diff -Nru sqlite3-3.4.2/aclocal.m4 sqlite3-3.6.16/aclocal.m4 --- sqlite3-3.4.2/aclocal.m4 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/aclocal.m4 2009-06-12 03:37:45.000000000 +0100 @@ -1,7 +1,7 @@ -# generated automatically by aclocal 1.9.6 -*- Autoconf -*- +# generated automatically by aclocal 1.10.2 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005 Free Software Foundation, Inc. +# 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -12,102 +12,187 @@ # PARTICULAR PURPOSE. # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) -# serial 51 Debian 1.5.24-1 AC_PROG_LIBTOOL +# serial 56 LT_INIT -# AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) -# ----------------------------------------------------------- -# If this macro is not defined by Autoconf, define it here. -m4_ifdef([AC_PROVIDE_IFELSE], - [], - [m4_define([AC_PROVIDE_IFELSE], - [m4_ifdef([AC_PROVIDE_$1], - [$2], [$3])])]) +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) -# AC_PROG_LIBTOOL -# --------------- -AC_DEFUN([AC_PROG_LIBTOOL], -[AC_REQUIRE([_AC_PROG_LIBTOOL])dnl -dnl If AC_PROG_CXX has already been expanded, run AC_LIBTOOL_CXX -dnl immediately, otherwise, hook it in at the end of AC_PROG_CXX. - AC_PROVIDE_IFELSE([AC_PROG_CXX], - [AC_LIBTOOL_CXX], - [define([AC_PROG_CXX], defn([AC_PROG_CXX])[AC_LIBTOOL_CXX - ])]) -dnl And a similar setup for Fortran 77 support - AC_PROVIDE_IFELSE([AC_PROG_F77], - [AC_LIBTOOL_F77], - [define([AC_PROG_F77], defn([AC_PROG_F77])[AC_LIBTOOL_F77 -])]) - -dnl Quote A][M_PROG_GCJ so that aclocal doesn't bring it in needlessly. -dnl If either AC_PROG_GCJ or A][M_PROG_GCJ have already been expanded, run -dnl AC_LIBTOOL_GCJ immediately, otherwise, hook it in at the end of both. - AC_PROVIDE_IFELSE([AC_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ], - [AC_LIBTOOL_GCJ], - [ifdef([AC_PROG_GCJ], - [define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[AC_LIBTOOL_GCJ])]) - ifdef([A][M_PROG_GCJ], - [define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[AC_LIBTOOL_GCJ])]) - ifdef([LT_AC_PROG_GCJ], - [define([LT_AC_PROG_GCJ], - defn([LT_AC_PROG_GCJ])[AC_LIBTOOL_GCJ])])])]) -])])# AC_PROG_LIBTOOL +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) -# _AC_PROG_LIBTOOL -# ---------------- -AC_DEFUN([_AC_PROG_LIBTOOL], -[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl -AC_BEFORE([$0],[AC_LIBTOOL_CXX])dnl -AC_BEFORE([$0],[AC_LIBTOOL_F77])dnl -AC_BEFORE([$0],[AC_LIBTOOL_GCJ])dnl +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed -LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" +LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl -# Prevent multiple expansion -define([AC_PROG_LIBTOOL], []) -])# _AC_PROG_LIBTOOL +_LT_SETUP +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) -# AC_LIBTOOL_SETUP -# ---------------- -AC_DEFUN([AC_LIBTOOL_SETUP], -[AC_PREREQ(2.50)dnl -AC_REQUIRE([AC_ENABLE_SHARED])dnl -AC_REQUIRE([AC_ENABLE_STATIC])dnl -AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_PROG_LD])dnl -AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl -AC_REQUIRE([AC_PROG_NM])dnl - +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl AC_REQUIRE([AC_PROG_LN_S])dnl -AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl -# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! -AC_REQUIRE([AC_OBJEXT])dnl -AC_REQUIRE([AC_EXEEXT])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi -AC_LIBTOOL_SYS_MAX_CMD_LEN -AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE -AC_LIBTOOL_OBJDIR +_LT_CHECK_OBJDIR -AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl -_LT_AC_PROG_ECHO_BACKSLASH +m4_require([_LT_TAG_COMPILER])dnl +_LT_PROG_ECHO_BACKSLASH case $host_os in aix3*) @@ -123,1452 +208,2222 @@ # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. -Xsed='sed -e 1s/^X//' -[sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'] +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. -[double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'] +double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' -# Constants: -rm="rm -f" - # Global variables: -default_ofile=libtool +ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a -ltmain="$ac_aux_dir/ltmain.sh" -ofile="$default_ofile" -with_gnu_ld="$lt_cv_prog_gnu_ld" -AC_CHECK_TOOL(AR, ar, false) -AC_CHECK_TOOL(RANLIB, ranlib, :) -AC_CHECK_TOOL(STRIP, strip, :) +with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables -test -z "$AR" && AR=ar -test -z "$AR_FLAGS" && AR_FLAGS=cru -test -z "$AS" && AS=as test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$DLLTOOL" && DLLTOOL=dlltool test -z "$LD" && LD=ld -test -z "$LN_S" && LN_S="ln -s" -test -z "$MAGIC_CMD" && MAGIC_CMD=file -test -z "$NM" && NM=nm -test -z "$SED" && SED=sed -test -z "$OBJDUMP" && OBJDUMP=objdump -test -z "$RANLIB" && RANLIB=: -test -z "$STRIP" && STRIP=: test -z "$ac_objext" && ac_objext=o -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" -fi - _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then - AC_PATH_MAGIC + _LT_PATH_MAGIC fi ;; esac -AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) -AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], -enable_win32_dll=yes, enable_win32_dll=no) - -AC_ARG_ENABLE([libtool-lock], - [AC_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP -AC_ARG_WITH([pic], - [AC_HELP_STRING([--with-pic], - [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], - [pic_mode="$withval"], - [pic_mode=default]) -test -z "$pic_mode" && pic_mode=default -# Use C for the default configuration in the libtool script -tagname= -AC_LIBTOOL_LANG_C_CONFIG -_LT_AC_TAGCONFIG -])# AC_LIBTOOL_SETUP +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN -# _LT_AC_SYS_COMPILER -# ------------------- -AC_DEFUN([_LT_AC_SYS_COMPILER], -[AC_REQUIRE([AC_PROG_CC])dnl -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -# Allow CC to be a program name with arguments. -compiler=$CC -])# _LT_AC_SYS_COMPILER +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) -# _LT_CC_BASENAME(CC) -# ------------------- -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -AC_DEFUN([_LT_CC_BASENAME], -[for cc_temp in $1""; do - case $cc_temp in - compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; - distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) ]) -# _LT_COMPILER_BOILERPLATE -# ------------------------ -# Check for compiler boilerplate output or warnings with -# the simple compiler test code. -AC_DEFUN([_LT_COMPILER_BOILERPLATE], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* -])# _LT_COMPILER_BOILERPLATE +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) -# _LT_LINKER_BOILERPLATE -# ---------------------- -# Check for linker boilerplate output or warnings with -# the simple link test code. -AC_DEFUN([_LT_LINKER_BOILERPLATE], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* -])# _LT_LINKER_BOILERPLATE -# _LT_AC_SYS_LIBPATH_AIX -# ---------------------- -# Links a minimal program and checks the executable -# for the system default hardcoded library path. In most cases, -# this is /usr/lib:/lib, but when the MPI compilers are used -# the location of the communication and MPI libs are included too. -# If we don't find anything, use the default library path according -# to the aix ld manual. -AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_LINK_IFELSE(AC_LANG_PROGRAM,[ -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi],[]) -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -])# _LT_AC_SYS_LIBPATH_AIX +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) -# _LT_AC_SHELL_INIT(ARG) -# ---------------------- -AC_DEFUN([_LT_AC_SHELL_INIT], -[ifdef([AC_DIVERSION_NOTICE], - [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], - [AC_DIVERT_PUSH(NOTICE)]) -$1 -AC_DIVERT_POP -])# _LT_AC_SHELL_INIT +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) -# _LT_AC_PROG_ECHO_BACKSLASH -# -------------------------- -# Add some code to the start of the generated configure script which -# will find an echo command which doesn't interpret backslashes. -AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH], -[_LT_AC_SHELL_INIT([ -# Check that we are running under the correct shell. -SHELL=${CONFIG_SHELL-/bin/sh} -case X$ECHO in -X*--fallback-echo) - # Remove one level of quotation (which was required for Make). - ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` - ;; -esac +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) -echo=${ECHO-echo} -if test "X[$]1" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift -elif test "X[$]1" = X--fallback-echo; then - # Avoid inline document here, it may be left over - : -elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then - # Yippee, $echo works! - : -else - # Restart under the correct shell. - exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} -fi -if test "X[$]1" = X--fallback-echo; then - # used as fallback echo - shift - cat </dev/null 2>&1 && unset CDPATH -if test -z "$ECHO"; then -if test "X${echo_test_string+set}" != Xset; then -# find a string as large as possible, as long as the shell can cope with it - for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do - # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... - if (echo_test_string=`eval $cmd`) 2>/dev/null && - echo_test_string=`eval $cmd` && - (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null - then - break - fi - done -fi +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "X$" | $Xsed -e "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) -if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - : -else - # The Solaris, AIX, and Digital Unix default echo programs unquote - # backslashes. This makes it impossible to quote backslashes using - # echo "$something" | sed 's/\\/\\\\/g' - # - # So, first we look for a working echo in the user's PATH. - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for dir in $PATH /usr/ucb; do - IFS="$lt_save_ifs" - if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && - test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$dir/echo" - break - fi - done - IFS="$lt_save_ifs" +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) - if test "X$echo" = Xecho; then - # We didn't find a better echo, so look for alternatives. - if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # This shell has a builtin print -r that does the trick. - echo='print -r' - elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && - test "X$CONFIG_SHELL" != X/bin/ksh; then - # If we have ksh, try running configure again with it. - ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} - export ORIGINAL_CONFIG_SHELL - CONFIG_SHELL=/bin/ksh - export CONFIG_SHELL - exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} - else - # Try using printf. - echo='printf %s\n' - if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # Cool, printf works - : - elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL - export CONFIG_SHELL - SHELL="$CONFIG_SHELL" - export SHELL - echo="$CONFIG_SHELL [$]0 --fallback-echo" - elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$CONFIG_SHELL [$]0 --fallback-echo" - else - # maybe with a smaller string... - prev=: - for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do - if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null - then - break - fi - prev="$cmd" - done +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) - if test "$prev" != 'sed 50q "[$]0"'; then - echo_test_string=`eval $prev` - export echo_test_string - exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} - else - # Oops. We lost completely, so just stick with echo. - echo=echo - fi - fi - fi - fi -fi -fi -# Copy echo and quote the copy suitably for passing to libtool from -# the Makefile, instead of quoting the original, which is used later. -ECHO=$echo -if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then - ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" -fi +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) -AC_SUBST(ECHO) -])])# _LT_AC_PROG_ECHO_BACKSLASH +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) -# _LT_AC_LOCK -# ----------- -AC_DEFUN([_LT_AC_LOCK], -[AC_ARG_ENABLE([libtool-lock], - [AC_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE="32" +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; - *ELF-64*) - HPUX_IA64_MODE="64" + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out which ABI we are using. - echo '[#]line __oline__ "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - if test "$lt_cv_prog_gnu_ld" = yes; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; +done -x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ -s390*-*linux*|sparc*-*linux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_i386" - ;; - ppc64-*linux*|powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - ppc*-*linux*|powerpc*-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -belf" - AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, - [AC_LANG_PUSH(C) - AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) - AC_LANG_POP]) - if test x"$lt_cv_cc_needs_belf" != x"yes"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS="$SAVE_CFLAGS" - fi - ;; -sparc*-*solaris*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) LD="${LD-ld} -64" ;; - esac + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac - fi - rm -rf conftest* - ;; +done -AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], -[*-*-cygwin* | *-*-mingw* | *-*-pw32*) - AC_CHECK_TOOL(DLLTOOL, dlltool, false) - AC_CHECK_TOOL(AS, as, false) - AC_CHECK_TOOL(OBJDUMP, objdump, false) +# Fix-up fallback echo if it was mangled by the above quoting rules. +case \$lt_ECHO in +*'\\\[$]0 --fallback-echo"')dnl " + lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\` ;; - ]) esac -need_locks="$enable_libtool_lock" +_LT_OUTPUT_LIBTOOL_INIT +]) -])# _LT_AC_LOCK +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +cat >"$CONFIG_LT" <<_LTEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate a libtool stub with the current configuration. + +lt_cl_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AS_SHELL_SANITIZE +_AS_PREPARE -# AC_LIBTOOL_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------------------- -# Check whether the given compiler option works -AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], -[AC_REQUIRE([LT_AC_PROG_SED]) -AC_CACHE_CHECK([$1], [$2], - [$2=no - ifelse([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$3" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - fi - $rm conftest* -]) +exec AS_MESSAGE_FD>&1 +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2008 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." -if test x"[$]$2" = xyes; then - ifelse([$5], , :, [$5]) -else - ifelse([$6], , :, [$6]) -fi -])# AC_LIBTOOL_COMPILER_OPTION +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; -# AC_LIBTOOL_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [ACTION-SUCCESS], [ACTION-FAILURE]) -# ------------------------------------------------------------ -# Check whether the given compiler option works -AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_CACHE_CHECK([$1], [$2], - [$2=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $3" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&AS_MESSAGE_LOG_FD - $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - else - $2=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" -]) + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done -if test x"[$]$2" = xyes; then - ifelse([$4], , :, [$4]) -else - ifelse([$5], , :, [$5]) +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null fi -])# AC_LIBTOOL_LINKER_OPTION +_LTEOF +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +if test "$no_create" != yes; then + lt_cl_success=: + test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" + exec AS_MESSAGE_LOG_FD>/dev/null + $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false + exec AS_MESSAGE_LOG_FD>>config.log + $lt_cl_success || AS_EXIT(1) +fi +])# LT_OUTPUT -# AC_LIBTOOL_SYS_MAX_CMD_LEN -# -------------------------- -AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], -[# find the maximum length of command line arguments -AC_MSG_CHECKING([the maximum length of command line arguments]) -AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl - i=0 - teststring="ABCD" - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" - cygwin* | mingw*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS - netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; +_LT_EOF - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - while (test "X"`$SHELL [$]0 --fallback-echo "X$teststring" 2>/dev/null` \ - = "XX$teststring") >/dev/null 2>&1 && - new_result=`expr "X$teststring" : ".*" 2>&1` && - lt_cv_sys_max_cmd_len=$new_result && - test $i != 17 # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - teststring= - # Add a significant safety factor because C++ compilers can tack on massive - # amounts of additional arguments before passing them to the linker. - # It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF ;; esac -]) -if test -n $lt_cv_sys_max_cmd_len ; then - AC_MSG_RESULT($lt_cv_sys_max_cmd_len) -else - AC_MSG_RESULT(none) -fi -])# AC_LIBTOOL_SYS_MAX_CMD_LEN + _LT_PROG_LTMAIN -# _LT_AC_CHECK_DLFCN -# ------------------ -AC_DEFUN([_LT_AC_CHECK_DLFCN], -[AC_CHECK_HEADERS(dlfcn.h)dnl -])# _LT_AC_CHECK_DLFCN + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + _LT_PROG_XSI_SHELLFNS -# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, -# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) -# --------------------------------------------------------------------- -AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF], -[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl -if test "$cross_compiling" = yes; then : - [$4] -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext < -#endif + sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) -#include + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif -#ifdef __cplusplus -extern "C" void exit (int); -#endif +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG -void fnord() { int i=42;} -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - /* dlclose (self); */ - } - else - puts (dlerror ()); +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG - exit (status); -}] -EOF - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) $1 ;; - x$lt_dlneed_uscore) $2 ;; - x$lt_dlunknown|x*) $3 ;; - esac - else : - # compilation failed - $3 - fi -fi -rm -fr conftest* -])# _LT_AC_TRY_DLOPEN_SELF +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) -# AC_LIBTOOL_DLOPEN_SELF -# ---------------------- -AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], -[AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl -if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - case $host_os in - beos*) - lt_cv_dlopen="load_add_on" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl - mingw* | pw32*) - lt_cv_dlopen="LoadLibrary" - lt_cv_dlopen_libs= - ;; +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl - cygwin*) - lt_cv_dlopen="dlopen" - lt_cv_dlopen_libs= - ;; +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} - darwin*) - # if libdl is installed we need to link against it - AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ - lt_cv_dlopen="dyld" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ]) - ;; +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - *) - AC_CHECK_FUNC([shl_load], - [lt_cv_dlopen="shl_load"], - [AC_CHECK_LIB([dld], [shl_load], - [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], - [AC_CHECK_FUNC([dlopen], - [lt_cv_dlopen="dlopen"], - [AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], - [AC_CHECK_LIB([svld], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], - [AC_CHECK_LIB([dld], [dld_link], - [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) - ]) - ]) - ]) - ]) - ]) - ;; - esac +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER - if test "x$lt_cv_dlopen" != xno; then - enable_dlopen=yes - else - enable_dlopen=no - fi - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS="$CPPFLAGS" - test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE - save_LDFLAGS="$LDFLAGS" - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - save_LIBS="$LIBS" - LIBS="$lt_cv_dlopen_libs $LIBS" +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE - AC_CACHE_CHECK([whether a program can dlopen itself], - lt_cv_dlopen_self, [dnl - _LT_AC_TRY_DLOPEN_SELF( - lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, - lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" ]) - - if test "x$lt_cv_dlopen_self" = xyes; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - AC_CACHE_CHECK([whether a statically linked program can dlopen itself], - lt_cv_dlopen_self_static, [dnl - _LT_AC_TRY_DLOPEN_SELF( - lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, - lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) - ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= fi - - CPPFLAGS="$save_CPPFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" ;; esac +]) - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; +# _LT_DARWIN_LINKER_FEATURES +# -------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; esac -fi -])# AC_LIBTOOL_DLOPEN_SELF - - -# AC_LIBTOOL_PROG_CC_C_O([TAGNAME]) -# --------------------------------- -# Check to see if options -c and -o are simultaneously supported by compiler -AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl -AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], - [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)], - [_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - fi - fi - chmod u+w . 2>&AS_MESSAGE_LOG_FD - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=echo + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi ]) -])# AC_LIBTOOL_PROG_CC_C_O +# _LT_SYS_MODULE_PATH_AIX +# ----------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +AC_LINK_IFELSE(AC_LANG_PROGRAM,[ +lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\(.*\)$/\1/ + p + } + }' +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +fi],[]) +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi +])# _LT_SYS_MODULE_PATH_AIX -# AC_LIBTOOL_SYS_HARD_LINK_LOCKS([TAGNAME]) -# ----------------------------------------- -# Check to see if we can do hard links to lock some files if needed -AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], -[AC_REQUIRE([_LT_AC_LOCK])dnl - -hard_links="nottested" -if test "$_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - AC_MSG_CHECKING([if we can lock with hard links]) - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - AC_MSG_RESULT([$hard_links]) - if test "$hard_links" = no; then - AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) - need_locks=warn - fi -else - need_locks=no -fi -])# AC_LIBTOOL_SYS_HARD_LINK_LOCKS +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[ifdef([AC_DIVERSION_NOTICE], + [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], + [AC_DIVERT_PUSH(NOTICE)]) +$1 +AC_DIVERT_POP +])# _LT_SHELL_INIT -# AC_LIBTOOL_OBJDIR -# ----------------- -AC_DEFUN([AC_LIBTOOL_OBJDIR], -[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], -[rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null]) -objdir=$lt_cv_objdir -])# AC_LIBTOOL_OBJDIR +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Add some code to the start of the generated configure script which +# will find an echo command which doesn't interpret backslashes. +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[_LT_SHELL_INIT([ +# Check that we are running under the correct shell. +SHELL=${CONFIG_SHELL-/bin/sh} -# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH([TAGNAME]) -# ---------------------------------------------- -# Check hardcoding attributes. -AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], -[AC_MSG_CHECKING([how to hardcode library paths into programs]) -_LT_AC_TAGVAR(hardcode_action, $1)= -if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ - test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ - test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then +case X$lt_ECHO in +X*--fallback-echo) + # Remove one level of quotation (which was required for Make). + ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` + ;; +esac - # We can hardcode non-existant directories. - if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)" != no && - test "$_LT_AC_TAGVAR(hardcode_minus_L, $1)" != no; then - # Linking always hardcodes the temporary library directory. - _LT_AC_TAGVAR(hardcode_action, $1)=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - _LT_AC_TAGVAR(hardcode_action, $1)=immediate - fi +ECHO=${lt_ECHO-echo} +if test "X[$]1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X[$]1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then + # Yippee, $ECHO works! + : else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - _LT_AC_TAGVAR(hardcode_action, $1)=unsupported + # Restart under the correct shell. + exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} fi -AC_MSG_RESULT([$_LT_AC_TAGVAR(hardcode_action, $1)]) -if test "$_LT_AC_TAGVAR(hardcode_action, $1)" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless +if test "X[$]1" = X--fallback-echo; then + # used as fallback echo + shift + cat <<_LT_EOF +[$]* +_LT_EOF + exit 0 fi -])# AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH - -# AC_LIBTOOL_SYS_LIB_STRIP -# ------------------------ -AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP], -[striplib= -old_striplib= -AC_MSG_CHECKING([whether stripping libraries is possible]) -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - AC_MSG_RESULT([yes]) -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) -fi - ;; - *) - AC_MSG_RESULT([no]) - ;; - esac -fi -])# AC_LIBTOOL_SYS_LIB_STRIP +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH +if test -z "$lt_ECHO"; then + if test "X${echo_test_string+set}" != Xset; then + # find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if { echo_test_string=`eval $cmd`; } 2>/dev/null && + { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null + then + break + fi + done + fi -# AC_LIBTOOL_SYS_DYNAMIC_LINKER -# ----------------------------- -# PORTME Fill in your ld.so characteristics -AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_MSG_CHECKING([dynamic linker characteristics]) -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -m4_if($1,[],[ -if test "$GCC" = yes; then - case $host_os in - darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; - *) lt_awk_arg="/^libraries:/" ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$lt_search_path_spec" | grep ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`echo "$lt_search_path_spec" | $SED -e 's/;/ /g'` + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : else - lt_search_path_spec=`echo "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary. - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path/$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" - else - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`echo $lt_tmp_lt_search_path_spec | awk ' -BEGIN {RS=" "; FS="/|\n";} { - lt_foo=""; - lt_count=0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo="/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[[lt_foo]]++; } - if (lt_freq[[lt_foo]] == 1) { print lt_foo; } -}'` - sys_lib_search_path_spec=`echo $lt_search_path_spec` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi]) -need_lib_prefix=unknown -hardcode_into_libs=no + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$dir/echo" + break + fi + done + IFS="$lt_save_ifs" -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH + if test "X$ECHO" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + ECHO='print -r' + elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} + else + # Try using printf. + ECHO='printf %s\n' + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + ECHO="$CONFIG_SHELL [$]0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$CONFIG_SHELL [$]0 --fallback-echo" + else + # maybe with a smaller string... + prev=: - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; + for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do + if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null + then + break + fi + prev="$cmd" + done -aix4* | aix5*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[[01]] | aix4.[[01]].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then - : - else - can_build_shared=no + if test "$prev" != 'sed 50q "[$]0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} + else + # Oops. We lost completely, so just stick with echo. + ECHO=echo + fi + fi fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' fi - shlibpath_var=LIBPATH fi - ;; - -amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; +fi -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; +# Copy echo and quote the copy suitably for passing to libtool from +# the Makefile, instead of quoting the original, which is used later. +lt_ECHO=$ECHO +if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then + lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" +fi -bsdi[[45]]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; +AC_SUBST(lt_ECHO) +]) +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], + [An echo program that does not interpret backslashes]) +])# _LT_PROG_ECHO_BACKSLASH -cygwin* | mingw* | pw32*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $rm \$dlpath' - shlibpath_overrides_runpath=yes +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | [grep ';[c-zC-Z]:/' >/dev/null]; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - ;; +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; esac - ;; - - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH + fi + rm -rf conftest* ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - m4_if([$1], [],[ - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line __oline__ "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* ;; -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* ;; -freebsd1*) - dynamic_linker=no +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[[123]]*) objformat=aout ;; - *) objformat=elf ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; esac fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[[01]]* | freebsdelf3.[[01]]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ - freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac + rm -rf conftest* ;; +esac -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[AC_CHECK_TOOL(AR, ar, false) +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1]) + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" ;; esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +])# _LT_CMD_OLD_ARCHIVE -interix[[3-9]]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; -irix5* | irix6* | nonstopux*) - case $host_os in +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ + = "XX$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line __oline__ "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` + else + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$host_os in + yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH printed by + # mingw gcc, but we are running on Cygwin. Gcc prints its search + # path with ; separators, and with drive letters. We can handle the + # drive letters (cygwin fileutils understands them), so leave them, + # especially as we might pass files found there to a mingw objdump, + # which wouldn't understand a cygwinified path. Ahh. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + ;; + + *) + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac + dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[123]]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555. + postinstall_cmds='chmod 555 $lib' + ;; + +interix[[3-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then @@ -1619,6 +2474,17 @@ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no + # Some binutils ld are patched to set DT_RUNPATH + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. @@ -1626,7 +2492,7 @@ # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi @@ -1639,23 +2505,11 @@ dynamic_linker='GNU/Linux ld.so' ;; -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - netbsd*) version_type=sunos need_lib_prefix=no need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' @@ -1676,14 +2530,16 @@ shlibpath_overrides_runpath=yes ;; -nto-qnx*) - version_type=linux +*nto* | *qnx*) + version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' ;; openbsd*) @@ -1692,13 +2548,13 @@ need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[[89]] | openbsd2.[[89]].*) shlibpath_overrides_runpath=no @@ -1770,7 +2626,6 @@ sni) shlibpath_overrides_runpath=no need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' runpath_var=LD_RUN_PATH ;; siemens) @@ -1801,13 +2656,12 @@ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - shlibpath_overrides_runpath=no else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - shlibpath_overrides_runpath=yes case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" @@ -1817,295 +2671,83 @@ sys_lib_dlsearch_path_spec='/usr/lib' ;; -uts4*) +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux + need_lib_prefix=no + need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes ;; -*) - dynamic_linker=no - ;; -esac -AC_MSG_RESULT([$dynamic_linker]) -test "$dynamic_linker" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi -])# AC_LIBTOOL_SYS_DYNAMIC_LINKER - - -# _LT_AC_TAGCONFIG -# ---------------- -AC_DEFUN([_LT_AC_TAGCONFIG], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_ARG_WITH([tags], - [AC_HELP_STRING([--with-tags@<:@=TAGS@:>@], - [include additional configurations @<:@automatic@:>@])], - [tagnames="$withval"]) - -if test -f "$ltmain" && test -n "$tagnames"; then - if test ! -f "${ofile}"; then - AC_MSG_WARN([output file `$ofile' does not exist]) - fi - - if test -z "$LTCC"; then - eval "`$SHELL ${ofile} --config | grep '^LTCC='`" - if test -z "$LTCC"; then - AC_MSG_WARN([output file `$ofile' does not look like a libtool script]) - else - AC_MSG_WARN([using `LTCC=$LTCC', extracted from `$ofile']) - fi - fi - if test -z "$LTCFLAGS"; then - eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" - fi - - # Extract list of available tagged configurations in $ofile. - # Note that this assumes the entire list is on one line. - available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` - - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for tagname in $tagnames; do - IFS="$lt_save_ifs" - # Check whether tagname contains only valid characters - case `$echo "X$tagname" | $Xsed -e 's:[[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]]::g'` in - "") ;; - *) AC_MSG_ERROR([invalid tag name: $tagname]) - ;; - esac - - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null - then - AC_MSG_ERROR([tag name \"$tagname\" already exists]) - fi - - # Update the list of available tags. - if test -n "$tagname"; then - echo appending configuration tag \"$tagname\" to $ofile - - case $tagname in - CXX) - if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - AC_LIBTOOL_LANG_CXX_CONFIG - else - tagname="" - fi - ;; - - F77) - if test -n "$F77" && test "X$F77" != "Xno"; then - AC_LIBTOOL_LANG_F77_CONFIG - else - tagname="" - fi - ;; - - GCJ) - if test -n "$GCJ" && test "X$GCJ" != "Xno"; then - AC_LIBTOOL_LANG_GCJ_CONFIG - else - tagname="" - fi - ;; - - RC) - AC_LIBTOOL_LANG_RC_CONFIG - ;; - - *) - AC_MSG_ERROR([Unsupported tag name: $tagname]) - ;; - esac - - # Append the new tag name to the list of available tags. - if test -n "$tagname" ; then - available_tags="$available_tags $tagname" - fi - fi - done - IFS="$lt_save_ifs" - - # Now substitute the updated list of available tags. - if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then - mv "${ofile}T" "$ofile" - chmod +x "$ofile" - else - rm -f "${ofile}T" - AC_MSG_ERROR([unable to update list of available tagged configurations.]) - fi -fi -])# _LT_AC_TAGCONFIG - - -# AC_LIBTOOL_DLOPEN -# ----------------- -# enable checks for dlopen support -AC_DEFUN([AC_LIBTOOL_DLOPEN], - [AC_BEFORE([$0],[AC_LIBTOOL_SETUP]) -])# AC_LIBTOOL_DLOPEN - - -# AC_LIBTOOL_WIN32_DLL -# -------------------- -# declare package support for building win32 DLLs -AC_DEFUN([AC_LIBTOOL_WIN32_DLL], -[AC_BEFORE([$0], [AC_LIBTOOL_SETUP]) -])# AC_LIBTOOL_WIN32_DLL - - -# AC_ENABLE_SHARED([DEFAULT]) -# --------------------------- -# implement the --enable-shared flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_SHARED], -[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([shared], - [AC_HELP_STRING([--enable-shared@<:@=PKGS@:>@], - [build shared libraries @<:@default=]AC_ENABLE_SHARED_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_shared=]AC_ENABLE_SHARED_DEFAULT) -])# AC_ENABLE_SHARED - - -# AC_DISABLE_SHARED -# ----------------- -# set the default shared flag to --disable-shared -AC_DEFUN([AC_DISABLE_SHARED], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_SHARED(no) -])# AC_DISABLE_SHARED - - -# AC_ENABLE_STATIC([DEFAULT]) -# --------------------------- -# implement the --enable-static flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_STATIC], -[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([static], - [AC_HELP_STRING([--enable-static@<:@=PKGS@:>@], - [build static libraries @<:@default=]AC_ENABLE_STATIC_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_static=]AC_ENABLE_STATIC_DEFAULT) -])# AC_ENABLE_STATIC - - -# AC_DISABLE_STATIC -# ----------------- -# set the default static flag to --disable-static -AC_DEFUN([AC_DISABLE_STATIC], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_STATIC(no) -])# AC_DISABLE_STATIC - - -# AC_ENABLE_FAST_INSTALL([DEFAULT]) -# --------------------------------- -# implement the --enable-fast-install flag -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -AC_DEFUN([AC_ENABLE_FAST_INSTALL], -[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl -AC_ARG_ENABLE([fast-install], - [AC_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], - [optimize for fast installation @<:@default=]AC_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_fast_install=]AC_ENABLE_FAST_INSTALL_DEFAULT) -])# AC_ENABLE_FAST_INSTALL - - -# AC_DISABLE_FAST_INSTALL -# ----------------------- -# set the default to --disable-fast-install -AC_DEFUN([AC_DISABLE_FAST_INSTALL], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -AC_ENABLE_FAST_INSTALL(no) -])# AC_DISABLE_FAST_INSTALL +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no -# AC_LIBTOOL_PICMODE([MODE]) -# -------------------------- -# implement the --with-pic flag -# MODE is either `yes' or `no'. If omitted, it defaults to `both'. -AC_DEFUN([AC_LIBTOOL_PICMODE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl -pic_mode=ifelse($#,1,$1,default) -])# AC_LIBTOOL_PICMODE +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi -# AC_PROG_EGREP -# ------------- -# This is predefined starting with Autoconf 2.54, so this conditional -# definition can be removed once we require Autoconf 2.54 or later. -m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP], -[AC_CACHE_CHECK([for egrep], [ac_cv_prog_egrep], - [if echo a | (grep -E '(a|b)') >/dev/null 2>&1 - then ac_cv_prog_egrep='grep -E' - else ac_cv_prog_egrep='egrep' - fi]) - EGREP=$ac_cv_prog_egrep - AC_SUBST([EGREP]) -])]) +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER -# AC_PATH_TOOL_PREFIX -# ------------------- +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- # find a file program which can recognize shared library -AC_DEFUN([AC_PATH_TOOL_PREFIX], -[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in @@ -2118,7 +2760,7 @@ dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. - ac_dummy="ifelse([$2], , $PATH, [$2])" + ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. @@ -2133,7 +2775,7 @@ $EGREP "$file_magic_regex" > /dev/null; then : else - cat <&2 + cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. @@ -2144,7 +2786,7 @@ *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org -EOF +_LT_EOF fi ;; esac fi @@ -2161,37 +2803,47 @@ else AC_MSG_RESULT(no) fi -])# AC_PATH_TOOL_PREFIX +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) -# AC_PATH_MAGIC -# ------------- + +# _LT_PATH_MAGIC +# -------------- # find a file program which can recognize a shared library -AC_DEFUN([AC_PATH_MAGIC], -[AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then - AC_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi -])# AC_PATH_MAGIC +])# _LT_PATH_MAGIC -# AC_PROG_LD +# LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker -AC_DEFUN([AC_PROG_LD], -[AC_ARG_WITH([gnu-ld], - [AC_HELP_STRING([--with-gnu-ld], - [assume the C compiler uses GNU ld @<:@default=no@:>@])], - [test "$withval" = no || with_gnu_ld=yes], - [with_gnu_ld=no]) -AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_REQUIRE([AC_PROG_CC])dnl +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. @@ -2208,9 +2860,9 @@ [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld - ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` - while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do - ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; @@ -2260,15 +2912,24 @@ AC_MSG_RESULT(no) fi test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH]) -AC_PROG_LD_GNU -])# AC_PROG_LD +_LT_PATH_LD_GNU +AC_SUBST([LD]) +_LT_TAGDECL([], [LD], [1], [The linker used to build libraries]) +])# LT_PATH_LD -# AC_PROG_LD_GNU -# -------------- -AC_DEFUN([AC_PROG_LD_GNU], -[AC_REQUIRE([AC_PROG_EGREP])dnl -AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld, +# Old names: +AU_ALIAS([AM_PROG_LD], [LT_PATH_LD]) +AU_ALIAS([AC_PROG_LD], [LT_PATH_LD]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_LD], []) +dnl AC_DEFUN([AC_PROG_LD], []) + + +# _LT_PATH_LD_GNU +#- -------------- +m4_defun([_LT_PATH_LD_GNU], +[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld, [# I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 /dev/null; then + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. @@ -2425,8 +3096,8 @@ lt_cv_deplibs_check_method=pass_all ;; -netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' @@ -2439,12 +3110,12 @@ lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; -nto-qnx*) - lt_cv_deplibs_check_method=unknown +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all ;; openbsd*) - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' @@ -2463,6 +3134,10 @@ lt_cv_deplibs_check_method=pass_all ;; +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + sysv4 | sysv4.3*) case $host_vendor in motorola) @@ -2490,7 +3165,7 @@ esac ;; -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) +tpf*) lt_cv_deplibs_check_method=pass_all ;; esac @@ -2498,14 +3173,20 @@ file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown -])# AC_DEPLIBS_CHECK_METHOD +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method == "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD -# AC_PROG_NM + +# LT_PATH_NM # ---------- -# find the pathname to a BSD-compatible name lister -AC_DEFUN([AC_PROG_NM], -[AC_CACHE_CHECK([for BSD-compatible nm], lt_cv_path_NM, +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" @@ -2547,16 +3228,51 @@ done IFS="$lt_save_ifs" done - test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm + : ${lt_cv_path_NM=no} fi]) -NM="$lt_cv_path_NM" -])# AC_PROG_NM +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :) + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) -# AC_CHECK_LIBM -# ------------- + +# LT_LIB_M +# -------- # check for math library -AC_DEFUN([AC_CHECK_LIBM], +AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in @@ -2571,4024 +3287,4686 @@ AC_CHECK_LIB(m, cos, LIBM="-lm") ;; esac -])# AC_CHECK_LIBM +AC_SUBST([LIBM]) +])# LT_LIB_M - -# AC_LIBLTDL_CONVENIENCE([DIRECTORY]) -# ----------------------------------- -# sets LIBLTDL to the link flags for the libltdl convenience library and -# LTDLINCL to the include flags for the libltdl header and adds -# --enable-ltdl-convenience to the configure arguments. Note that -# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, -# it is assumed to be `libltdl'. LIBLTDL will be prefixed with -# '${top_builddir}/' and LTDLINCL will be prefixed with '${top_srcdir}/' -# (note the single quotes!). If your package is not flat and you're not -# using automake, define top_builddir and top_srcdir appropriately in -# the Makefiles. -AC_DEFUN([AC_LIBLTDL_CONVENIENCE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl - case $enable_ltdl_convenience in - no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;; - "") enable_ltdl_convenience=yes - ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;; - esac - LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la - LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) - # For backwards non-gettext consistent compatibility... - INCLTDL="$LTDLINCL" -])# AC_LIBLTDL_CONVENIENCE - - -# AC_LIBLTDL_INSTALLABLE([DIRECTORY]) -# ----------------------------------- -# sets LIBLTDL to the link flags for the libltdl installable library and -# LTDLINCL to the include flags for the libltdl header and adds -# --enable-ltdl-install to the configure arguments. Note that -# AC_CONFIG_SUBDIRS is not called here. If DIRECTORY is not provided, -# and an installed libltdl is not found, it is assumed to be `libltdl'. -# LIBLTDL will be prefixed with '${top_builddir}/'# and LTDLINCL with -# '${top_srcdir}/' (note the single quotes!). If your package is not -# flat and you're not using automake, define top_builddir and top_srcdir -# appropriately in the Makefiles. -# In the future, this macro may have to be called after AC_PROG_LIBTOOL. -AC_DEFUN([AC_LIBLTDL_INSTALLABLE], -[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl - AC_CHECK_LIB(ltdl, lt_dlinit, - [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no], - [if test x"$enable_ltdl_install" = xno; then - AC_MSG_WARN([libltdl not installed, but installation disabled]) - else - enable_ltdl_install=yes - fi - ]) - if test x"$enable_ltdl_install" = x"yes"; then - ac_configure_args="$ac_configure_args --enable-ltdl-install" - LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la - LTDLINCL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl']) - else - ac_configure_args="$ac_configure_args --enable-ltdl-install=no" - LIBLTDL="-lltdl" - LTDLINCL= - fi - # For backwards non-gettext consistent compatibility... - INCLTDL="$LTDLINCL" -])# AC_LIBLTDL_INSTALLABLE +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) -# AC_LIBTOOL_CXX -# -------------- -# enable support for C++ libraries -AC_DEFUN([AC_LIBTOOL_CXX], -[AC_REQUIRE([_LT_AC_LANG_CXX]) -])# AC_LIBTOOL_CXX +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= -# _LT_AC_LANG_CXX -# --------------- -AC_DEFUN([_LT_AC_LANG_CXX], -[AC_REQUIRE([AC_PROG_CXX]) -AC_REQUIRE([_LT_AC_PROG_CXXCPP]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}CXX]) -])# _LT_AC_LANG_CXX +if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' -# _LT_AC_PROG_CXXCPP -# ------------------ -AC_DEFUN([_LT_AC_PROG_CXXCPP], -[ -AC_REQUIRE([AC_PROG_CXX]) -if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - AC_PROG_CXXCPP + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi -])# _LT_AC_PROG_CXXCPP - -# AC_LIBTOOL_F77 -# -------------- -# enable support for Fortran 77 libraries -AC_DEFUN([AC_LIBTOOL_F77], -[AC_REQUIRE([_LT_AC_LANG_F77]) -])# AC_LIBTOOL_F77 - - -# _LT_AC_LANG_F77 -# --------------- -AC_DEFUN([_LT_AC_LANG_F77], -[AC_REQUIRE([AC_PROG_F77]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}F77]) -])# _LT_AC_LANG_F77 - - -# AC_LIBTOOL_GCJ -# -------------- -# enable support for GCJ libraries -AC_DEFUN([AC_LIBTOOL_GCJ], -[AC_REQUIRE([_LT_AC_LANG_GCJ]) -])# AC_LIBTOOL_GCJ - - -# _LT_AC_LANG_GCJ -# --------------- -AC_DEFUN([_LT_AC_LANG_GCJ], -[AC_PROVIDE_IFELSE([AC_PROG_GCJ],[], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],[], - [AC_PROVIDE_IFELSE([LT_AC_PROG_GCJ],[], - [ifdef([AC_PROG_GCJ],[AC_REQUIRE([AC_PROG_GCJ])], - [ifdef([A][M_PROG_GCJ],[AC_REQUIRE([A][M_PROG_GCJ])], - [AC_REQUIRE([A][C_PROG_GCJ_OR_A][M_PROG_GCJ])])])])])]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}GCJ]) -])# _LT_AC_LANG_GCJ - - -# AC_LIBTOOL_RC -# ------------- -# enable support for Windows resource files -AC_DEFUN([AC_LIBTOOL_RC], -[AC_REQUIRE([LT_AC_PROG_RC]) -_LT_AC_SHELL_INIT([tagnames=${tagnames+${tagnames},}RC]) -])# AC_LIBTOOL_RC - - -# AC_LIBTOOL_LANG_C_CONFIG -# ------------------------ -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG], [_LT_AC_LANG_C_CONFIG]) -AC_DEFUN([_LT_AC_LANG_C_CONFIG], -[lt_save_CC="$CC" -AC_LANG_PUSH(C) - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' -_LT_AC_SYS_COMPILER +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] -AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) -AC_LIBTOOL_SYS_LIB_STRIP -AC_LIBTOOL_DLOPEN_SELF - -# Report which library types will actually be built -AC_MSG_CHECKING([if libtool supports shared libraries]) -AC_MSG_RESULT([$can_build_shared]) +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' -AC_MSG_CHECKING([whether to build shared libraries]) -test "$can_build_shared" = "no" && enable_shared=no +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' -# On AIX, shared libraries and static libraries use the same namespace, and -# are all built from PIC. +# Define system-specific variables. case $host_os in -aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi +aix*) + symcode='[[BCDT]]' ;; - -aix4* | aix5*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' fi - ;; + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; esac -AC_MSG_RESULT([$enable_shared]) - -AC_MSG_CHECKING([whether to build static libraries]) -# Make sure either enable_shared or enable_static is yes. -test "$enable_shared" = yes || enable_static=yes -AC_MSG_RESULT([$enable_static]) - -AC_LIBTOOL_CONFIG($1) - -AC_LANG_POP -CC="$lt_save_CC" -])# AC_LIBTOOL_LANG_C_CONFIG - -# AC_LIBTOOL_LANG_CXX_CONFIG -# -------------------------- -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG], [_LT_AC_LANG_CXX_CONFIG(CXX)]) -AC_DEFUN([_LT_AC_LANG_CXX_CONFIG], -[AC_LANG_PUSH(C++) -AC_REQUIRE([AC_PROG_CXX]) -AC_REQUIRE([_LT_AC_PROG_CXXCPP]) - -_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_AC_TAGVAR(allow_undefined_flag, $1)= -_LT_AC_TAGVAR(always_export_symbols, $1)=no -_LT_AC_TAGVAR(archive_expsym_cmds, $1)= -_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_direct, $1)=no -_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= -_LT_AC_TAGVAR(hardcode_minus_L, $1)=no -_LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported -_LT_AC_TAGVAR(hardcode_automatic, $1)=no -_LT_AC_TAGVAR(module_cmds, $1)= -_LT_AC_TAGVAR(module_expsym_cmds, $1)= -_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown -_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_AC_TAGVAR(no_undefined_flag, $1)= -_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= -_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac -# Dependencies to place before and after the object being linked: -_LT_AC_TAGVAR(predep_objects, $1)= -_LT_AC_TAGVAR(postdep_objects, $1)= -_LT_AC_TAGVAR(predeps, $1)= -_LT_AC_TAGVAR(postdeps, $1)= -_LT_AC_TAGVAR(compiler_lib_search_path, $1)= +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" -# Source file extension for C++ test sources. -ac_ext=cpp +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -# Object file extension for compiled C++ test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do -# Code to be used in simple link tests -lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_AC_SYS_COMPILER + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE + # Check to see that the pipe works correctly. + pipe_works=no -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_LD=$LD -lt_save_GCC=$GCC -GCC=$GXX -lt_save_with_gnu_ld=$with_gnu_ld -lt_save_path_LD=$lt_cv_path_LD -if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then - lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx -else - $as_unset lt_cv_prog_gnu_ld -fi -if test -n "${lt_cv_path_LDCXX+set}"; then - lt_cv_path_LD=$lt_cv_path_LDCXX -else - $as_unset lt_cv_path_LD -fi -test -z "${LDCXX+set}" || LD=$LDCXX -CC=${CXX-"c++"} -compiler=$CC -_LT_AC_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF -# We don't want -fno-exception wen compiling C++ code, so set the -# no_builtin_flag separately -if test "$GXX" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' -else - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= -fi + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi -if test "$GXX" = yes; then - # Set up default GNU C++ configuration + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +#ifdef __cplusplus +extern "C" { +#endif - AC_PROG_LD +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - # Check if GNU C++ uses GNU ld as the underlying linker, since the - # archiving commands below assume that GNU ld is being used. - if test "$with_gnu_ld" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + cat <<_LT_EOF >> conftest.$ac_ext - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' +/* The mapping between symbol names and symbols. */ +const struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; - # If archive_cmds runs LD, not CC, wlarc should be empty - # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to - # investigate it a little bit more. (MM) - wlarc='${wl}' +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif - # ancient GNU ld didn't support --whole-archive et. al. - if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ - grep 'no-whole-archive' > /dev/null; then - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_save_LIBS="$LIBS" + lt_save_CFLAGS="$CFLAGS" + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS="$lt_save_LIBS" + CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi else - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else - with_gnu_ld=no - wlarc= - - # A generic and very simple default shared library creation - # command for GNU C++ for the case where it uses the native - # linker, instead of GNU ld. If possible, this setting should - # overridden to take advantage of the native linker features on - # the platform it is being used on. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 fi + rm -rf conftest* conftst* - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) else - GXX=no - with_gnu_ld=no - wlarc= + AC_MSG_RESULT(ok) fi -# PORTME: fill in a description of your system's C++ link characteristics -AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -_LT_AC_TAGVAR(ld_shlibs, $1)=yes -case $host_os in - aix3*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) - for ld_flag in $LDFLAGS; do - case $ld_flag in - *-brtl*) - aix_use_runtimelinking=yes - break - ;; - esac - done - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +]) # _LT_CMD_GLOBAL_SYMBOLS - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_AC_TAGVAR(archive_cmds, $1)='' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - if test "$GXX" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" +AC_MSG_CHECKING([for $compiler option to produce PIC]) +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else + case $host_os in + aix*) + # All AIX code is PIC. if test "$host_cpu" = ia64; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared libraries. - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi - fi - ;; - - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - chorus*) - case $cc_basename in - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; + ;; - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - if test "$GXX" = yes ; then - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; - dgux*) - case $cc_basename in - ec++*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - ghcx*) - # Green Hills C++ Compiler - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - freebsd[[12]]*) - # C++ shared libraries reported to be fairly broken before switch to ELF - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - freebsd-elf*) - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - ;; - freebsd* | dragonfly*) - # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF - # conventions - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - ;; - gnu*) - ;; - hpux9*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. ;; - aCC*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[[-]]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; - *) - if test "$GXX" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; - esac - ;; - hpux10*|hpux11*) - if test $with_gnu_ld = no; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. case $host_cpu in - hppa*64*|ia64*) ;; + hppa*64*) + ;; *) - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; esac - fi - case $host_cpu in - hppa*64*|ia64*) - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes; then - if test $with_gnu_ld = no; then - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - interix[[3-9]]*) - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - irix5* | irix6*) - case $cc_basename in - CC*) - # SGI C++ - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - # Archives containing C++ object files must be created using - # "CC -ar", where "CC" is the IRIX C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' - ;; - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' - fi + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes ;; - esac - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - ;; - icpc*) - # Intel C++ - with_gnu_ld=yes - # version 8.0 and above of icpc choke on multiply defined symbols - # if we add $predep_objects and $postdep_objects, however 7.1 and - # earlier do not add the objects themselves. - case `$CC -V 2>&1` in - *"Version 7."*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 8.0 or newer - tmp_idyn= - case $host_cpu in - ia64*) tmp_idyn=' -i_dynamic';; - esac - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' - ;; - pgCC*) - # Portland Group C++ compiler - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - ;; - cxx*) - # Compaq C++ - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' - - runpath_var=LD_RUN_PATH - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - - # Not sure whether something based on - # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 - # would be better. - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; esac ;; - esac - ;; - lynxos*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - m88k*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - mvs*) - case $cc_basename in - cxx*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' - wlarc= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - fi - # Workaround some broken pre-1.5 toolchains - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' - ;; - openbsd2*) - # C++ shared libraries are fairly broken - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - openbsd*) - if test -f /usr/libexec/ld.so; then - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - fi - output_verbose_link_cmd='echo' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - osf3*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - osf4* | osf5*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_AC_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # the KAI C++ compiler. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ - echo "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ - $rm $lib.exp' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - - else - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ ;; - esac - ;; - psos*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - lcc*) - # Lucid - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? ;; - esac - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_AC_TAGVAR(archive_cmds_need_lc,$1)=yes - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. - # Supported since Solaris 2.6 (maybe 2.5.1?) - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - - # The C++ compiler must be used to create the archive. - _LT_AC_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; - *) - # GNU C++ compiler with Solaris linker - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' - if $CC --version | grep -v '^2\.7' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - else - # g++ 2.7 appears to require `-G' NOT `-shared' on this - # platform. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - fi - - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + linux* | k*bsd*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC*) + # IBM XL 8.0 on PPC + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; *) - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac ;; - esac - fi + esac ;; - esac - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + lynxos*) ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + m88k*) ;; - esac - ;; - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - # So that behaviour is only enabled if SCOABSPATH is set to a - # non-empty value in the environment. Most likely only useful for - # creating official distributions of packages. - # This is a hack until libtool officially supports absolute path - # names for shared libraries. - _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' + netbsd*) ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no + psos*) + ;; + solaris*) + case $cc_basename in + CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac - ;; - vxworks*) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; -esac -AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) -test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no - -_LT_AC_TAGVAR(GCC, $1)="$GXX" -_LT_AC_TAGVAR(LD, $1)="$LD" - -AC_LIBTOOL_POSTDEP_PREDEP($1) -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) - -AC_LIBTOOL_CONFIG($1) - -AC_LANG_POP -CC=$lt_save_CC -LDCXX=$LD -LD=$lt_save_LD -GCC=$lt_save_GCC -with_gnu_ldcxx=$with_gnu_ld -with_gnu_ld=$lt_save_with_gnu_ld -lt_cv_path_LDCXX=$lt_cv_path_LD -lt_cv_path_LD=$lt_save_path_LD -lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld -lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld -])# AC_LIBTOOL_LANG_CXX_CONFIG - -# AC_LIBTOOL_POSTDEP_PREDEP([TAGNAME]) -# ------------------------------------ -# Figure out "hidden" library dependencies from verbose -# compiler output when linking a shared library. -# Parse the compiler output and extract the necessary -# objects, libraries and library flags. -AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ -dnl we can't use the lt_simple_compile_test_code here, -dnl because it contains code intended for an executable, -dnl not a library. It's possible we should let each -dnl tag define a new lt_????_link_test_code variable, -dnl but it's only used here... -ifelse([$1],[],[cat > conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext < conftest.$ac_ext <&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - # - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - if test "$solaris_use_stlport4" != yes; then - _LT_AC_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' - fi - ;; - esac - ;; - -solaris*) - case $cc_basename in - CC*) - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - - # Adding this requires a known-good setup of shared libraries for - # Sun compiler versions before 5.6, else PIC objects from an old - # archive will be linked into the output, leading to subtle bugs. - if test "$solaris_use_stlport4" != yes; then - _LT_AC_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' - fi - ;; - esac - ;; -esac -]) - -case " $_LT_AC_TAGVAR(postdeps, $1) " in -*" -lc "*) _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no ;; -esac -])# AC_LIBTOOL_POSTDEP_PREDEP - -# AC_LIBTOOL_LANG_F77_CONFIG -# -------------------------- -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG], [_LT_AC_LANG_F77_CONFIG(F77)]) -AC_DEFUN([_LT_AC_LANG_F77_CONFIG], -[AC_REQUIRE([AC_PROG_F77]) -AC_LANG_PUSH(Fortran 77) - -_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_AC_TAGVAR(allow_undefined_flag, $1)= -_LT_AC_TAGVAR(always_export_symbols, $1)=no -_LT_AC_TAGVAR(archive_expsym_cmds, $1)= -_LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_direct, $1)=no -_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_AC_TAGVAR(hardcode_libdir_separator, $1)= -_LT_AC_TAGVAR(hardcode_minus_L, $1)=no -_LT_AC_TAGVAR(hardcode_automatic, $1)=no -_LT_AC_TAGVAR(module_cmds, $1)= -_LT_AC_TAGVAR(module_expsym_cmds, $1)= -_LT_AC_TAGVAR(link_all_deplibs, $1)=unknown -_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_AC_TAGVAR(no_undefined_flag, $1)= -_LT_AC_TAGVAR(whole_archive_flag_spec, $1)= -_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for f77 test sources. -ac_ext=f - -# Object file extension for compiled f77 test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="\ - subroutine t - return - end -" - -# Code to be used in simple link tests -lt_simple_link_test_code="\ - program t - end -" - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_AC_SYS_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${F77-"f77"} -compiler=$CC -_LT_AC_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) - -AC_MSG_CHECKING([if libtool supports shared libraries]) -AC_MSG_RESULT([$can_build_shared]) - -AC_MSG_CHECKING([whether to build shared libraries]) -test "$can_build_shared" = "no" && enable_shared=no - -# On AIX, shared libraries and static libraries use the same namespace, and -# are all built from PIC. -case $host_os in -aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; -aix4* | aix5*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no fi - ;; -esac -AC_MSG_RESULT([$enable_shared]) - -AC_MSG_CHECKING([whether to build static libraries]) -# Make sure either enable_shared or enable_static is yes. -test "$enable_shared" = yes || enable_static=yes -AC_MSG_RESULT([$enable_static]) - -_LT_AC_TAGVAR(GCC, $1)="$G77" -_LT_AC_TAGVAR(LD, $1)="$LD" - -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) - -AC_LIBTOOL_CONFIG($1) - -AC_LANG_POP -CC="$lt_save_CC" -])# AC_LIBTOOL_LANG_F77_CONFIG - +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' -# AC_LIBTOOL_LANG_GCJ_CONFIG -# -------------------------- -# Ensure that the configuration vars for the C compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG], [_LT_AC_LANG_GCJ_CONFIG(GCJ)]) -AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG], -[AC_LANG_SAVE + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; -# Source file extension for Java test sources. -ac_ext=java + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; -# Object file extension for compiled Java test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; -# Code to be used in simple compile tests -lt_simple_compile_test_code="class foo {}" + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; -# Code to be used in simple link tests -lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_AC_SYS_COMPILER + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${GCJ-"gcj"} -compiler=$CC -_LT_AC_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; -# GCJ did not exist at the time GCC didn't implicitly link libc in. -_LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; -_LT_AC_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; -AC_LIBTOOL_PROG_COMPILER_NO_RTTI($1) -AC_LIBTOOL_PROG_COMPILER_PIC($1) -AC_LIBTOOL_PROG_CC_C_O($1) -AC_LIBTOOL_SYS_HARD_LINK_LOCKS($1) -AC_LIBTOOL_PROG_LD_SHLIBS($1) -AC_LIBTOOL_SYS_DYNAMIC_LINKER($1) -AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH($1) + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; -AC_LIBTOOL_CONFIG($1) + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; -AC_LANG_RESTORE -CC="$lt_save_CC" -])# AC_LIBTOOL_LANG_GCJ_CONFIG + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; -# AC_LIBTOOL_LANG_RC_CONFIG -# ------------------------- -# Ensure that the configuration vars for the Windows resource compiler are -# suitably defined. Those variables are subsequently used by -# AC_LIBTOOL_CONFIG to write the compiler configuration to `libtool'. -AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG], [_LT_AC_LANG_RC_CONFIG(RC)]) -AC_DEFUN([_LT_AC_LANG_RC_CONFIG], -[AC_LANG_SAVE + linux* | k*bsd*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + pgcc* | pgf77* | pgf90* | pgf95*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl*) + # IBM XL C 8.0/Fortran 10.1 on PPC + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Sun\ F*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + esac + ;; + esac + ;; -# Source file extension for RC test sources. -ac_ext=rc + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; -# Object file extension for compiled RC test sources. -objext=o -_LT_AC_TAGVAR(objext, $1)=$objext + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; -# Code to be used in simple compile tests -lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; -# Code to be used in simple link tests -lt_simple_link_test_code="$lt_simple_compile_test_code" + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_AC_SYS_COMPILER + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${RC-"windres"} -compiler=$CC -_LT_AC_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) -_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; -AC_LIBTOOL_CONFIG($1) + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; -AC_LANG_RESTORE -CC="$lt_save_CC" -])# AC_LIBTOOL_LANG_RC_CONFIG + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; -# AC_LIBTOOL_CONFIG([TAGNAME]) -# ---------------------------- -# If TAGNAME is not passed, then create an initial libtool script -# with a default configuration from the untagged config vars. Otherwise -# add code to config.status for appending the configuration named by -# TAGNAME from the matching tagged config vars. -AC_DEFUN([AC_LIBTOOL_CONFIG], -[# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - _LT_AC_TAGVAR(compiler, $1) \ - _LT_AC_TAGVAR(CC, $1) \ - _LT_AC_TAGVAR(LD, $1) \ - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1) \ - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1) \ - _LT_AC_TAGVAR(lt_prog_compiler_static, $1) \ - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) \ - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1) \ - _LT_AC_TAGVAR(thread_safe_flag_spec, $1) \ - _LT_AC_TAGVAR(whole_archive_flag_spec, $1) \ - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) \ - _LT_AC_TAGVAR(old_archive_cmds, $1) \ - _LT_AC_TAGVAR(old_archive_from_new_cmds, $1) \ - _LT_AC_TAGVAR(predep_objects, $1) \ - _LT_AC_TAGVAR(postdep_objects, $1) \ - _LT_AC_TAGVAR(predeps, $1) \ - _LT_AC_TAGVAR(postdeps, $1) \ - _LT_AC_TAGVAR(compiler_lib_search_path, $1) \ - _LT_AC_TAGVAR(archive_cmds, $1) \ - _LT_AC_TAGVAR(archive_expsym_cmds, $1) \ - _LT_AC_TAGVAR(postinstall_cmds, $1) \ - _LT_AC_TAGVAR(postuninstall_cmds, $1) \ - _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) \ - _LT_AC_TAGVAR(allow_undefined_flag, $1) \ - _LT_AC_TAGVAR(no_undefined_flag, $1) \ - _LT_AC_TAGVAR(export_symbols_cmds, $1) \ - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) \ - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) \ - _LT_AC_TAGVAR(hardcode_libdir_separator, $1) \ - _LT_AC_TAGVAR(hardcode_automatic, $1) \ - _LT_AC_TAGVAR(module_cmds, $1) \ - _LT_AC_TAGVAR(module_expsym_cmds, $1) \ - _LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) \ - _LT_AC_TAGVAR(fix_srcfile_path, $1) \ - _LT_AC_TAGVAR(exclude_expsyms, $1) \ - _LT_AC_TAGVAR(include_expsyms, $1); do - - case $var in - _LT_AC_TAGVAR(old_archive_cmds, $1) | \ - _LT_AC_TAGVAR(old_archive_from_new_cmds, $1) | \ - _LT_AC_TAGVAR(archive_cmds, $1) | \ - _LT_AC_TAGVAR(archive_expsym_cmds, $1) | \ - _LT_AC_TAGVAR(module_cmds, $1) | \ - _LT_AC_TAGVAR(module_expsym_cmds, $1) | \ - _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) | \ - _LT_AC_TAGVAR(export_symbols_cmds, $1) | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; + *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac - done - - case $lt_echo in - *'\[$]0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\[$]0 --fallback-echo"[$]/[$]0 --fallback-echo"/'` + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; - esac - -ifelse([$1], [], - [cfgfile="${ofile}T" - trap "$rm \"$cfgfile\"; exit 1" 1 2 15 - $rm -f "$cfgfile" - AC_MSG_NOTICE([creating $ofile])], - [cfgfile="$ofile"]) - - cat <<__EOF__ >> "$cfgfile" -ifelse([$1], [], -[#! $SHELL + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac +AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) -# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. -# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) -# NOTE: Changes made to this file will be lost: look at ltmain.sh. # -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 -# Free Software Foundation, Inc. -# -# This file is part of GNU Libtool: -# Originally by Gordon Matzigkeit , 1996 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# Check to make sure the PIC flag actually works. # -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + # -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# Check to make sure the static flag actually works. # -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# A sed program that does not truncate output. -SED=$lt_SED - -# Sed that helps us avoid accidentally triggering echo(1) options like -n. -Xsed="$SED -e 1s/^X//" - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# The names of the tagged configurations supported by this script. -available_tags= - -# ### BEGIN LIBTOOL CONFIG], -[# ### BEGIN LIBTOOL TAG CONFIG: $tagname]) - -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: - -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL - -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared - -# Whether or not to build static libraries. -build_old_libs=$enable_static - -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$_LT_AC_TAGVAR(archive_cmds_need_lc, $1) - -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$_LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1) - -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install - -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os -# An echo program that does not interpret backslashes. -echo=$lt_echo +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac -# A C compiler. -LTCC=$lt_LTCC + _LT_TAGVAR(ld_shlibs, $1)=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac -# A language-specific compiler. -CC=$lt_[]_LT_AC_TAGVAR(compiler, $1) + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 -# Is the compiler the GNU C compiler? -with_gcc=$_LT_AC_TAGVAR(GCC, $1) +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. -# An ERE matcher. -EGREP=$lt_EGREP +_LT_EOF + fi + ;; -# The linker used to build libraries. -LD=$lt_[]_LT_AC_TAGVAR(LD, $1) + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; -# Whether we need hard or soft links. -LN_S=$lt_LN_S + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; -# A BSD-compatible nm program. -NM=$lt_NM + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' -# A symbol stripping program -STRIP=$lt_STRIP + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" + gnu* | linux* | tpf* | k*bsd*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag= + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi -# Used on cygwin: assembler. -AS="$AS" + case $cc_basename in + xlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; -# The name of the directory that contains temporary libtool files. -objdir=$objdir + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 -# How to pass a linker flag through the compiler. -wl=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. -# Object file suffix (normally "o"). -objext="$ac_objext" +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; -# Old archive suffix (normally "a"). -libext="$libext" + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. -# Executable file suffix (normally ""). -exeext="$exeext" +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; -# Additional compiler flags for building library objects. -pic_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) -pic_mode=$pic_mode + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_[]_LT_AC_TAGVAR(lt_cv_prog_compiler_c_o, $1) + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; -# Must we lock files when doing compilation? -need_locks=$lt_need_locks + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac -# Do we need a version for libraries? -need_version=$need_version + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi -# Whether dlopen is supported. -dlopen_support=$enable_dlopen + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_static, $1) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_[]_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_[]_LT_AC_TAGVAR(export_dynamic_flag_spec, $1) + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_[]_LT_AC_TAGVAR(whole_archive_flag_spec, $1) + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_[]_LT_AC_TAGVAR(thread_safe_flag_spec, $1) + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; -# Library versioning type. -version_type=$version_type + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Format of library name prefix. -libname_spec=$lt_libname_spec + freebsd1*) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_cmds, $1) -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_new_cmds, $1) + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_[]_LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1) + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; -# Commands used to build and install a shared archive. -archive_cmds=$lt_[]_LT_AC_TAGVAR(archive_cmds, $1) -archive_expsym_cmds=$lt_[]_LT_AC_TAGVAR(archive_expsym_cmds, $1) -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_[]_LT_AC_TAGVAR(module_cmds, $1) -module_expsym_cmds=$lt_[]_LT_AC_TAGVAR(module_expsym_cmds, $1) + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_[]_LT_AC_TAGVAR(predep_objects, $1) + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_[]_LT_AC_TAGVAR(postdep_objects, $1) + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE(int foo(void) {}, + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + ) + LDFLAGS="$save_LDFLAGS" + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) + *nto* | *qnx*) + ;; -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_[]_LT_AC_TAGVAR(allow_undefined_flag, $1) + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_[]_LT_AC_TAGVAR(no_undefined_flag, $1) + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; -# This is the shared library runtime path variable. -runpath_var=$runpath_var + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; -# This is the shared library path variable. -shlibpath_var=$shlibpath_var + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; -# How to hardcode a shared library path into an executable. -hardcode_action=$_LT_AC_TAGVAR(hardcode_action, $1) + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1) + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_[]_LT_AC_TAGVAR(hardcode_libdir_separator, $1) + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$_LT_AC_TAGVAR(hardcode_direct, $1) +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$_LT_AC_TAGVAR(hardcode_minus_L, $1) +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$_LT_AC_TAGVAR(hardcode_shlibpath_var, $1) +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$_LT_AC_TAGVAR(hardcode_automatic, $1) + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_MSG_CHECKING([whether -lc should be explicitly linked in]) + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)]) + ;; + esac + fi + ;; +esac -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$_LT_AC_TAGVAR(link_all_deplibs, $1) +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], + [[If ld is used when linking, flag to hardcode $libdir into a binary + during linking. This must work even if $libdir does not exist]]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [fix_srcfile_path], [1], + [Fix the shell variable $srcfile for the compiler]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path +# Source file extension for C test sources. +ac_ext=c -# Set to yes if exported symbols are required. -always_export_symbols=$_LT_AC_TAGVAR(always_export_symbols, $1) +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext -# The commands to list exported symbols. -export_symbols_cmds=$lt_[]_LT_AC_TAGVAR(export_symbols_cmds, $1) +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_[]_LT_AC_TAGVAR(exclude_expsyms, $1) +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC -# Symbols that must always be exported. -include_expsyms=$lt_[]_LT_AC_TAGVAR(include_expsyms, $1) +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE -ifelse([$1],[], -[# ### END LIBTOOL CONFIG], -[# ### END LIBTOOL TAG CONFIG: $tagname]) +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) -__EOF__ + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no -ifelse([$1],[], [ + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. case $host_os in aix3*) - cat <<\EOF >> "$cfgfile" + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -EOF + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi ;; esac + AC_MSG_RESULT([$enable_shared]) - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) - mv -f "$cfgfile" "$ofile" || \ - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" -]) -else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi + _LT_CONFIG($1) fi -])# AC_LIBTOOL_CONFIG - - -# AC_LIBTOOL_PROG_COMPILER_NO_RTTI([TAGNAME]) -# ------------------------------------------- -AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], -[AC_REQUIRE([_LT_AC_SYS_COMPILER])dnl - -_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG -if test "$GCC" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' - AC_LIBTOOL_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], - lt_cv_prog_compiler_rtti_exceptions, - [-fno-rtti -fno-exceptions], [], - [_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +# _LT_PROG_CXX +# ------------ +# Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++ +# compiler, we have our own version here. +m4_defun([_LT_PROG_CXX], +[ +pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes]) +AC_PROG_CXX +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes fi -])# AC_LIBTOOL_PROG_COMPILER_NO_RTTI +popdef([AC_MSG_ERROR]) +])# _LT_PROG_CXX +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_CXX], []) -# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE -# --------------------------------- -AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], -[AC_REQUIRE([AC_CANONICAL_HOST]) -AC_REQUIRE([LT_AC_PROG_SED]) -AC_REQUIRE([AC_PROG_NM]) -AC_REQUIRE([AC_OBJEXT]) -# Check for command to grab the raw symbol name followed by C symbol from nm. -AC_MSG_CHECKING([command to parse $NM output from $compiler object]) -AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], -[ -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[[BCDEGRST]]' -# Regexp to match symbols that can be accessed directly from C. -sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[AC_REQUIRE([_LT_PROG_CXX])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no -# Transform an extracted symbol line into a proper C declaration -lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" +# Source file extension for C++ test sources. +ac_ext=cpp -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext -# Define system-specific variables. -case $host_os in -aix*) - symcode='[[BCDT]]' - ;; -cygwin* | mingw* | pw32*) - symcode='[[ABCDGISTW]]' - ;; -hpux*) # Its linker distinguishes data from code symbols - if test "$host_cpu" = ia64; then - symcode='[[ABCDEGRST]]' +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld fi - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - ;; -linux* | k*bsd*-gnu) - if test "$host_cpu" = ia64; then - symcode='[[ABCDGIRSTW]]' - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD fi - ;; -irix* | nonstopux*) - symcode='[[BCDEGRST]]' - ;; -osf*) - symcode='[[BCDEGQRST]]' - ;; -solaris*) - symcode='[[BDRT]]' - ;; -sco3.2v5*) - symcode='[[DT]]' - ;; -sysv4.2uw2*) - symcode='[[DT]]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[[ABDT]]' - ;; -sysv4) - symcode='[[DFNSTU]]' - ;; -esac - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[[ABCDGIRSTW]]' ;; -esac - -# Try without a prefix undercore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) - # Write the raw and C identifiers. - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi - # Check to see that the pipe works correctly. - pipe_works=no + if test "$GXX" = yes; then + # Set up default GNU C++ configuration - rm -f conftest* - cat > conftest.$ac_ext < $nlist) && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if grep ' nm_test_var$' "$nlist" >/dev/null; then - if grep ' nm_test_func$' "$nlist" >/dev/null; then - cat < conftest.$ac_ext -#ifdef __cplusplus -extern "C" { -#endif - -EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' - - cat <> conftest.$ac_ext -#if defined (__STDC__) && __STDC__ -# define lt_ptr_t void * -#else -# define lt_ptr_t char * -# define const -#endif - -/* The mapping between symbol names and symbols. */ -const struct { - const char *name; - lt_ptr_t address; -} -lt_preloaded_symbols[[]] = -{ -EOF - $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext - cat <<\EOF >> conftest.$ac_ext - {0, (lt_ptr_t) 0} -}; + with_gnu_ld=no + wlarc= -#ifdef __cplusplus -} -#endif -EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_save_LIBS="$LIBS" - lt_save_CFLAGS="$CFLAGS" - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$_LT_AC_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then - pipe_works=yes - fi - LIBS="$lt_save_LIBS" - CFLAGS="$lt_save_CFLAGS" - else - echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD - fi - else - echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD - cat conftest.$ac_ext >&5 - fi - rm -f conftest* conftst* - # Do not use the global_symbol_pipe unless it works. - if test "$pipe_works" = yes; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done -]) -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - AC_MSG_RESULT(failed) -else - AC_MSG_RESULT(ok) -fi -]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + else + GXX=no + with_gnu_ld=no + wlarc= + fi -# AC_LIBTOOL_PROG_COMPILER_PIC([TAGNAME]) -# --------------------------------------- -AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC], -[_LT_AC_TAGVAR(lt_prog_compiler_wl, $1)= -_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= -_LT_AC_TAGVAR(lt_prog_compiler_static, $1)= + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no -AC_MSG_CHECKING([for $compiler option to produce PIC]) - ifelse([$1],[CXX],[ - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | cygwin* | os2* | pw32*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - *djgpp*) - # DJGPP does not support shared libraries at all - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - case $host_os in - aix4* | aix5*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + _LT_TAGVAR(ld_shlibs, $1)=no fi ;; + chorus*) - case $cc_basename in - cxch68*) - # Green Hills C++ Compiler - # _LT_AC_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - esac + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - esac - ;; + dgux*) - case $cc_basename in - ec++*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - ghcx*) + ghcx*) # Green Hills C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - *) + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - esac - ;; + esac + ;; + + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + freebsd* | dragonfly*) - # FreeBSD uses GNU C++ - ;; - hpux9* | hpux10* | hpux11*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - if test "$host_cpu" != ia64; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - fi + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + gnu*) + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - aCC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + aCC*) case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; - *) + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi ;; - esac - ;; - interix*) - # This is c89, which is MS Visual C++ (no shared libs) - # Anyone wants to do a port? - ;; - irix5* | irix6* | nonstopux*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - # CC pic flag -KPIC is the default. + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; - *) + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes ;; - esac - ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # KAI C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; - icpc* | ecpc*) + icpc* | ecpc* ) # Intel C++ - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgCC*) - # Portland Group C++ compiler. - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 will use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + ;; cxx*) # Compaq C++ - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' + ;; + xl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; + mvs*) - case $cc_basename in - cxx*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; - netbsd* | netbsdelf*-gnu) + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=echo + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi ;; + osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac ;; - RCC*) + RCC*) # Rational C++ 2.4.1 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - cxx*) - # Digital/Compaq C++ - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' ;; *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi ;; - esac - ;; + esac + ;; + psos*) - ;; + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + solaris*) - case $cc_basename in - CC*) + case $cc_basename in + CC*) # Sun C++ 4.2, 5.x and Centerline C++ - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='echo' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; - gcx*) + gcx*) # Green Hills C++ Compiler - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; - *) + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi ;; - esac - ;; - sunos4*) + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + case $cc_basename in - CC*) - # Sun C++ 4.x - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - lcc*) - # Lucid - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac - ;; + ;; + tandem*) - case $cc_basename in - NCC*) + case $cc_basename in + NCC*) # NonStop-UX NCC 3.20 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - esac - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - case $cc_basename in - CC*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no ;; - esac - ;; + esac + ;; + vxworks*) - ;; + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; esac - fi -], + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +]) +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + else + prev= + fi + + if test "$pre_test_object_deps_done" = no; then + case $p in + -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + ;; + + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_PROG_F77 +# ------------ +# Since AC_PROG_F77 is broken, in that it returns the empty string +# if there is no fortran compiler, we have our own version here. +m4_defun([_LT_PROG_F77], [ - if test "$GCC" = yes; then - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' +pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes]) +AC_PROG_F77 +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi +popdef([AC_MSG_ERROR]) +])# _LT_PROG_F77 + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_F77], []) + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_REQUIRE([_LT_PROG_F77])dnl +AC_LANG_PUSH(Fortran 77) + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + CC=${F77-"f77"} + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_PROG_FC +# ----------- +# Since AC_PROG_FC is broken, in that it returns the empty string +# if there is no fortran compiler, we have our own version here. +m4_defun([_LT_PROG_FC], +[ +pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes]) +AC_PROG_FC +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi +popdef([AC_MSG_ERROR]) +])# _LT_PROG_FC + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([_LT_PROG_FC], []) + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_REQUIRE([_LT_PROG_FC])dnl +AC_LANG_PUSH(Fortran) + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + CC=${FC-"f95"} + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" +fi # test "$_lt_disable_FC" != yes - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; +AC_LANG_POP +])# _LT_LANG_FC_CONFIG - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; +# Source file extension for Java test sources. +ac_ext=java - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - enable_shared=no - ;; +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-qnocommon' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - esac - ;; +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' - ;; +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE - hpux9* | hpux10* | hpux11*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - ;; +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) - irix5* | irix6* | nonstopux*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC (with -KPIC) is the default. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no - newsos6) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds - linux* | k*bsd*-gnu) - case $cc_basename in - icc* | ecc*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - ccc*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All Alpha code is PIC. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C 5.9 - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - *Sun\ F*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='' - ;; - esac - ;; - esac - ;; +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC="$lt_save_CC" +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC="$lt_save_CC" +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef - osf3* | osf4* | osf5*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All OSF/1 code is PIC. - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) - rdos*) - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - solaris*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; - *) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; - esac - ;; +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) - sunos4*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl - sysv4 | sysv4.2uw2* | sysv4.3*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_XSI_SHELLFNS +# --------------------- +# Bourne and XSI compatible variants of some useful shell functions. +m4_defun([_LT_PROG_XSI_SHELLFNS], +[case $xsi_shell in + yes) + cat << \_LT_EOF >> "$cfgfile" + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac +} - sysv4*MP*) - if test -d /usr/nec ;then - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; +# func_basename file +func_basename () +{ + func_basename_result="${1##*/}" +} - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}" +} - unicos*) - _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +func_stripname () +{ + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"} +} - uts4*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=${1%%=*} + func_opt_split_arg=${1#*=} +} - *) - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -]) -AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) +# func_lo2o object +func_lo2o () +{ + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac +} -# -# Check to make sure the PIC flag actually works. -# -if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then - AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], - _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), - [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], - [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in - "" | " "*) ;; - *) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)" ;; - esac], - [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_AC_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) -fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - *) - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])" - ;; -esac +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=${1%.*}.lo +} -# -# Check to make sure the static flag actually works. -# -wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_AC_TAGVAR(lt_prog_compiler_static, $1)\" -AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], - _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), - $lt_tmp_static_flag, - [], - [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) -]) +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=$(( $[*] )) +} +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=${#1} +} -# AC_LIBTOOL_PROG_LD_SHLIBS([TAGNAME]) -# ------------------------------------ -# See if the linker supports building shared libraries. -AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS], -[AC_REQUIRE([LT_AC_PROG_SED])dnl -AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -ifelse([$1],[CXX],[ - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - case $host_os in - aix4* | aix5*) - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - else - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - fi - ;; - pw32*) - _LT_AC_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" - ;; - cygwin* | mingw*) - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' - ;; - linux* | k*bsd*-gnu) - _LT_AC_TAGVAR(link_all_deplibs, $1)=no - ;; - *) - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - ;; - esac -],[ - runpath_var= - _LT_AC_TAGVAR(allow_undefined_flag, $1)= - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=no - _LT_AC_TAGVAR(archive_cmds, $1)= - _LT_AC_TAGVAR(archive_expsym_cmds, $1)= - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)= - _LT_AC_TAGVAR(old_archive_from_expsyms_cmds, $1)= - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - _LT_AC_TAGVAR(thread_safe_flag_spec, $1)= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_minus_L, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(link_all_deplibs, $1)=unknown - _LT_AC_TAGVAR(hardcode_automatic, $1)=no - _LT_AC_TAGVAR(module_cmds, $1)= - _LT_AC_TAGVAR(module_expsym_cmds, $1)= - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - _LT_AC_TAGVAR(include_expsyms, $1)= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - extract_expsyms_cmds= - # Just being paranoid about ensuring that cc_basename is set. - _LT_CC_BASENAME([$compiler]) - case $host_os in - cygwin* | mingw* | pw32*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no +_LT_EOF ;; + *) # Bourne compatible functions. + cat << \_LT_EOF >> "$cfgfile" + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` +} + +dnl func_dirname_and_basename +dnl A portable version of this function is already defined in general.m4sh +dnl so there is no need for it here. + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac +} - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' +# sed scripts: +my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' +my_sed_long_arg='1s/^-[[^=]]*=//' - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - fi - supports_anon_versioning=no - case `$LD -v 2>/dev/null` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` + func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` +} - # See if GNU ld supports shared libraries. - case $host_os in - aix3* | aix4* | aix5*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - _LT_AC_TAGVAR(ld_shlibs, $1)=no - cat <&2 +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` +} -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'` +} -EOF - fi - ;; +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "$[@]"` +} - amigaos*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - - # Samuel A. Falvo II reports - # that the semantics of dynamic libraries on AmigaOS, at least up - # to version 4, is to share data among multiple programs linked - # with the same dynamic library. Since this doesn't match the - # behavior of shared libraries on other platforms, we can't use - # them. - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` +} - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; +_LT_EOF +esac - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=no - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' +case $lt_shell_append in + yes) + cat << \_LT_EOF >> "$cfgfile" + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$[1]+=\$[2]" +} +_LT_EOF + ;; + *) + cat << \_LT_EOF >> "$cfgfile" - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$[1]=\$$[1]\$[2]" +} - interix[[3-9]]*) - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; +_LT_EOF + ;; + esac +]) - gnu* | linux* | k*bsd*-gnu) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - tmp_addflag= - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - *) - tmp_sharedflag='-shared' ;; - esac - _LT_AC_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. - if test $supports_anon_versioning = yes; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - $echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - _LT_AC_TAGVAR(link_all_deplibs, $1)=no - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; +# serial 6 ltoptions.m4 - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) - solaris*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - _LT_AC_TAGVAR(ld_shlibs, $1)=no - cat <&2 -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + -EOF - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS -_LT_EOF - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - sunos4*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) - if test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no; then - runpath_var= - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - fi - ;; - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - else - _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\[$]2 == "T") || (\[$]2 == "D") || (\[$]2 == "B")) && ([substr](\[$]3,1,1) != ".")) { print \[$]3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) - _LT_AC_TAGVAR(archive_cmds, $1)='' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) - if test "$GCC" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_AC_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_AC_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_AC_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - _LT_AC_SYS_LIBPATH_AIX - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_AC_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared libraries. - _LT_AC_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes - amigaos*) - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - # see comment about different semantics on the GNU ld section - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac - bsdi[[45]]*) - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic - ;; +test -z "$AS" && AS=as +_LT_DECL([], [AS], [0], [Assembler program])dnl - cygwin* | mingw* | pw32*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='true' - # FIXME: Should let the user specify the lib program. - _LT_AC_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' - _LT_AC_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' - _LT_AC_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_automatic, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - if test "$GCC" = yes ; then - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi - ;; +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl +])# win32-dll - dgux*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) - freebsd1*) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED - hpux9*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_direct, $1)=yes +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) - hpux10*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - fi - ;; +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) - hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - case $host_cpu in - hppa*64*|ia64*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - fi +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - ;; + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) - newsos6) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) - openbsd*) - if test -f /usr/libexec/ld.so; then - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - else - case $host_os in - openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - ;; - *) - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - ;; - esac - fi - else - _LT_AC_TAGVAR(ld_shlibs, $1)=no - fi - ;; +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) - os2*) - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_AC_TAGVAR(archive_cmds, $1)='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - _LT_AC_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) - osf3*) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - else - _LT_AC_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ - $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' - # Both c and cxx compiler support -rpath directly - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - fi - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - solaris*) - _LT_AC_TAGVAR(no_undefined_flag, $1)=' -z text' - if test "$GCC" = yes; then - wlarc='${wl}' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' - else - wlarc='' - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes fi - ;; - esac - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - ;; - - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes - _LT_AC_TAGVAR(hardcode_minus_L, $1)=yes - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no + done + IFS="$lt_save_ifs" ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) - sysv4) - case $host_vendor in - sni) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' - _LT_AC_TAGVAR(hardcode_direct, $1)=no - ;; - motorola) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) - sysv4.3*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' - ;; +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) - sysv4*MP*) - if test -d /usr/nec; then - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - _LT_AC_TAGVAR(ld_shlibs, $1)=yes - fi - ;; +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - _LT_AC_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - _LT_AC_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' - runpath_var='LD_RUN_PATH' +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) - if test "$GCC" = yes; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC - uts4*) - _LT_AC_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) - *) - _LT_AC_TAGVAR(ld_shlibs, $1)=no - ;; - esac - fi +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) ]) -AC_MSG_RESULT([$_LT_AC_TAGVAR(ld_shlibs, $1)]) -test "$_LT_AC_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) + +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # -# Do we need to explicitly link libc? +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 # -case "x$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)" in -x|xyes) - # Assume -lc should be added - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $_LT_AC_TAGVAR(archive_cmds, $1) in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - AC_MSG_CHECKING([whether -lc should be explicitly linked in]) - $rm conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext +# serial 6 ltsugar.m4 - if AC_TRY_EVAL(ac_compile) 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) - pic_flag=$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$_LT_AC_TAGVAR(allow_undefined_flag, $1) - _LT_AC_TAGVAR(allow_undefined_flag, $1)= - if AC_TRY_EVAL(_LT_AC_TAGVAR(archive_cmds, $1) 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) - then - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no - else - _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=yes - fi - _LT_AC_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - AC_MSG_RESULT([$_LT_AC_TAGVAR(archive_cmds_need_lc, $1)]) - ;; - esac - fi - ;; -esac -])# AC_LIBTOOL_PROG_LD_SHLIBS +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) -# _LT_AC_FILE_LTDLL_C -# ------------------- -# Be careful that the start marker always follows a newline. -AC_DEFUN([_LT_AC_FILE_LTDLL_C], [ -# /* ltdll.c starts here */ -# #define WIN32_LEAN_AND_MEAN -# #include -# #undef WIN32_LEAN_AND_MEAN -# #include -# -# #ifndef __CYGWIN__ -# # ifdef __CYGWIN32__ -# # define __CYGWIN__ __CYGWIN32__ -# # endif -# #endif -# -# #ifdef __cplusplus -# extern "C" { -# #endif -# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved); -# #ifdef __cplusplus -# } -# #endif -# -# #ifdef __CYGWIN__ -# #include -# DECLARE_CYGWIN_DLL( DllMain ); -# #endif -# HINSTANCE __hDllInstance_base; +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). # -# BOOL APIENTRY -# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved) -# { -# __hDllInstance_base = hInst; -# return TRUE; -# } -# /* ltdll.c ends here */ -])# _LT_AC_FILE_LTDLL_C +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) -# _LT_AC_TAGVAR(VARNAME, [TAGNAME]) -# --------------------------------- -AC_DEFUN([_LT_AC_TAGVAR], [ifelse([$2], [], [$1], [$1_$2])]) +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) -# old names -AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL]) -AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) -AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) -AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) -AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) -AC_DEFUN([AM_PROG_LD], [AC_PROG_LD]) -AC_DEFUN([AM_PROG_NM], [AC_PROG_NM]) -# This is just to silence aclocal about the macro not being used -ifelse([AC_DISABLE_FAST_INSTALL]) +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) -AC_DEFUN([LT_AC_PROG_GCJ], -[AC_CHECK_TOOL(GCJ, gcj, no) - test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" - AC_SUBST(GCJFLAGS) -]) -AC_DEFUN([LT_AC_PROG_RC], -[AC_CHECK_TOOL(RC, windres, no) +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# Generated from ltversion.in. + +# serial 3012 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.2.6]) +m4_define([LT_PACKAGE_REVISION], [1.3012]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.2.6' +macro_revision='1.3012' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) -# Cheap backport of AS_EXECUTABLE_P and required macros -# from Autoconf 2.59; we should not use $as_executable_p directly. - -# _AS_TEST_PREPARE -# ---------------- -m4_ifndef([_AS_TEST_PREPARE], -[m4_defun([_AS_TEST_PREPARE], -[if test -x / >/dev/null 2>&1; then - as_executable_p='test -x' -else - as_executable_p='test -f' -fi -])])# _AS_TEST_PREPARE +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. -# AS_EXECUTABLE_P -# --------------- -# Check whether a file is executable. -m4_ifndef([AS_EXECUTABLE_P], -[m4_defun([AS_EXECUTABLE_P], -[AS_REQUIRE([_AS_TEST_PREPARE])dnl -$as_executable_p $1[]dnl -])])# AS_EXECUTABLE_P +# serial 4 lt~obsolete.m4 -# NOTE: This macro has been submitted for inclusion into # -# GNU Autoconf as AC_PROG_SED. When it is available in # -# a released version of Autoconf we should remove this # -# macro and use it instead. # -# LT_AC_PROG_SED -# -------------- -# Check for a fully-functional sed program, that truncates -# as few characters as possible. Prefer GNU sed if found. -AC_DEFUN([LT_AC_PROG_SED], -[AC_MSG_CHECKING([for a sed that does not truncate output]) -AC_CACHE_VAL(lt_cv_path_SED, -[# Loop through the user's path and test for sed and gsed. -# Then use that list of sed's as ones to test for truncation. -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for lt_ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - if AS_EXECUTABLE_P(["$as_dir/$lt_ac_prog$ac_exec_ext"]); then - lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" - fi - done - done -done -IFS=$as_save_IFS -lt_ac_max=0 -lt_ac_count=0 -# Add /usr/xpg4/bin/sed as it is typically found on Solaris -# along with /bin/sed that truncates output. -for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do - test ! -f $lt_ac_sed && continue - cat /dev/null > conftest.in - lt_ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >conftest.in - # Check for GNU sed and select it if it is found. - if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then - lt_cv_path_SED=$lt_ac_sed - break - fi - while true; do - cat conftest.in conftest.in >conftest.tmp - mv conftest.tmp conftest.in - cp conftest.in conftest.nl - echo >>conftest.nl - $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break - cmp -s conftest.out conftest.nl || break - # 10000 chars as input seems more than enough - test $lt_ac_count -gt 10 && break - lt_ac_count=`expr $lt_ac_count + 1` - if test $lt_ac_count -gt $lt_ac_max; then - lt_ac_max=$lt_ac_count - lt_cv_path_SED=$lt_ac_sed - fi - done -done -]) -SED=$lt_cv_path_SED -AC_SUBST([SED]) -AC_MSG_RESULT([$SED]) -]) +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/addopcodes.awk /tmp/3ARg2Grji7/sqlite3-3.6.16/addopcodes.awk --- sqlite3-3.4.2/addopcodes.awk 2005-11-24 22:22:30.000000000 +0000 +++ sqlite3-3.6.16/addopcodes.awk 2009-05-05 04:39:49.000000000 +0100 @@ -14,19 +14,18 @@ if( max<$3 ) max = $3 } END { - printf "#define TK_%-29s %4d\n", "TO_TEXT", max+1 - printf "#define TK_%-29s %4d\n", "TO_BLOB", max+2 - printf "#define TK_%-29s %4d\n", "TO_NUMERIC", max+3 - printf "#define TK_%-29s %4d\n", "TO_INT", max+4 - printf "#define TK_%-29s %4d\n", "TO_REAL", max+5 - printf "#define TK_%-29s %4d\n", "END_OF_FILE", max+6 - printf "#define TK_%-29s %4d\n", "ILLEGAL", max+7 - printf "#define TK_%-29s %4d\n", "SPACE", max+8 - printf "#define TK_%-29s %4d\n", "UNCLOSED_STRING", max+9 - printf "#define TK_%-29s %4d\n", "COMMENT", max+10 - printf "#define TK_%-29s %4d\n", "FUNCTION", max+11 - printf "#define TK_%-29s %4d\n", "COLUMN", max+12 - printf "#define TK_%-29s %4d\n", "AGG_FUNCTION", max+13 - printf "#define TK_%-29s %4d\n", "AGG_COLUMN", max+14 - printf "#define TK_%-29s %4d\n", "CONST_FUNC", max+15 + printf "#define TK_%-29s %4d\n", "TO_TEXT", ++max + printf "#define TK_%-29s %4d\n", "TO_BLOB", ++max + printf "#define TK_%-29s %4d\n", "TO_NUMERIC", ++max + printf "#define TK_%-29s %4d\n", "TO_INT", ++max + printf "#define TK_%-29s %4d\n", "TO_REAL", ++max + printf "#define TK_%-29s %4d\n", "END_OF_FILE", ++max + printf "#define TK_%-29s %4d\n", "ILLEGAL", ++max + printf "#define TK_%-29s %4d\n", "SPACE", ++max + printf "#define TK_%-29s %4d\n", "UNCLOSED_STRING", ++max + printf "#define TK_%-29s %4d\n", "FUNCTION", ++max + printf "#define TK_%-29s %4d\n", "COLUMN", ++max + printf "#define TK_%-29s %4d\n", "AGG_FUNCTION", ++max + printf "#define TK_%-29s %4d\n", "AGG_COLUMN", ++max + printf "#define TK_%-29s %4d\n", "CONST_FUNC", ++max } Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/2005osaward.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/2005osaward.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/nocopy.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/nocopy.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/powered_by_sqlite.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/powered_by_sqlite.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/SQLite_big.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/SQLite_big.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/SQLite.eps and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/SQLite.eps differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/SQLite.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/SQLite.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/art/SQLiteLogo3.tiff and /tmp/3ARg2Grji7/sqlite3-3.6.16/art/SQLiteLogo3.tiff differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/config.guess /tmp/3ARg2Grji7/sqlite3-3.6.16/config.guess --- sqlite3-3.4.2/config.guess 2007-06-12 13:17:59.000000000 +0100 +++ sqlite3-3.6.16/config.guess 2009-05-05 04:39:49.000000000 +0100 @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, # Inc. -timestamp='2007-01-15' +timestamp='2007-07-22' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -347,7 +347,7 @@ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; - i86pc:SunOS:5.*:*) + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) @@ -808,12 +808,15 @@ i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; - x86:Interix*:[3456]*) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - EM64T:Interix*:[3456]* | authenticamd:Interix*:[3456]*) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; + *:Interix*:[3456]*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + EM64T | authenticamd) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/config.h.in /tmp/3ARg2Grji7/sqlite3-3.6.16/config.h.in --- sqlite3-3.4.2/config.h.in 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/config.h.in 2009-06-12 03:37:45.000000000 +0100 @@ -0,0 +1,104 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the `fdatasync' function. */ +#undef HAVE_FDATASYNC + +/* Define to 1 if you have the `gmtime_r' function. */ +#undef HAVE_GMTIME_R + +/* Define to 1 if the system has the type `int16_t'. */ +#undef HAVE_INT16_T + +/* Define to 1 if the system has the type `int32_t'. */ +#undef HAVE_INT32_T + +/* Define to 1 if the system has the type `int64_t'. */ +#undef HAVE_INT64_T + +/* Define to 1 if the system has the type `int8_t'. */ +#undef HAVE_INT8_T + +/* Define to 1 if the system has the type `intptr_t'. */ +#undef HAVE_INTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `localtime_r' function. */ +#undef HAVE_LOCALTIME_R + +/* Define to 1 if you have the `localtime_s' function. */ +#undef HAVE_LOCALTIME_S + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if the system has the type `uint16_t'. */ +#undef HAVE_UINT16_T + +/* Define to 1 if the system has the type `uint32_t'. */ +#undef HAVE_UINT32_T + +/* Define to 1 if the system has the type `uint64_t'. */ +#undef HAVE_UINT64_T + +/* Define to 1 if the system has the type `uint8_t'. */ +#undef HAVE_UINT8_T + +/* Define to 1 if the system has the type `uintptr_t'. */ +#undef HAVE_UINTPTR_T + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the `usleep' function. */ +#undef HAVE_USLEEP + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Number of bits in a file offset, on hosts where this is settable. */ +#undef _FILE_OFFSET_BITS + +/* Define for large files, on AIX-style hosts. */ +#undef _LARGE_FILES diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/config.sub /tmp/3ARg2Grji7/sqlite3-3.6.16/config.sub --- sqlite3-3.4.2/config.sub 2007-06-12 13:17:59.000000000 +0100 +++ sqlite3-3.6.16/config.sub 2009-05-05 04:39:49.000000000 +0100 @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, # Inc. -timestamp='2007-01-18' +timestamp='2007-06-28' # This file is (in principle) common to ALL GNU software. # The presence of a machine in this file suggests that SOME GNU software @@ -475,8 +475,8 @@ basic_machine=craynv-cray os=-unicosmp ;; - cr16c) - basic_machine=cr16c-unknown + cr16) + basic_machine=cr16-unknown os=-elf ;; crds | unos) @@ -683,6 +683,10 @@ basic_machine=i386-pc os=-mingw32 ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; miniframe) basic_machine=m68000-convergent ;; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/configure /tmp/3ARg2Grji7/sqlite3-3.6.16/configure --- sqlite3-3.4.2/configure 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/configure 2009-06-25 12:45:57.000000000 +0100 @@ -1,9 +1,9 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.61. +# Generated by GNU Autoconf 2.62 for sqlite 3.6.16. # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## @@ -15,7 +15,7 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST @@ -37,17 +37,45 @@ as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then - echo "#! /bin/sh" >conf$$.sh - echo "exit 0" >>conf$$.sh - chmod +x conf$$.sh - if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then - PATH_SEPARATOR=';' - else - PATH_SEPARATOR=: - fi - rm -f conf$$.sh + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } fi # Support unset when possible. @@ -63,8 +91,6 @@ # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) -as_nl=' -' IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. @@ -87,7 +113,7 @@ as_myself=$0 fi if test ! -f "$as_myself"; then - echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi @@ -100,17 +126,10 @@ PS4='+ ' # NLS nuisances. -for as_var in \ - LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ - LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ - LC_TELEPHONE LC_TIME -do - if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then - eval $as_var=C; export $as_var - else - ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var - fi -done +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && @@ -132,7 +151,7 @@ $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || -echo X/"$0" | +$as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q @@ -158,7 +177,7 @@ as_have_required=no fi - if test $as_have_required = yes && (eval ": + if test $as_have_required = yes && (eval ": (as_func_return () { (exit \$1) } @@ -240,7 +259,7 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST @@ -261,7 +280,7 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST @@ -341,10 +360,10 @@ if test "x$CONFIG_SHELL" != x; then for as_var in BASH_ENV ENV - do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var - done - export CONFIG_SHELL - exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} + do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + done + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi @@ -413,9 +432,10 @@ test \$exitcode = 0") || { echo No shell found that supports shell functions. - echo Please tell autoconf@gnu.org about your system, - echo including any error possibly output before this - echo message + echo Please tell bug-autoconf@gnu.org about your system, + echo including any error possibly output before this message. + echo This can help us improve future autoconf versions. + echo Configuration will now proceed without shell functions. } @@ -451,7 +471,7 @@ s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || - { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems @@ -479,7 +499,6 @@ *) ECHO_N='-n';; esac - if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr @@ -492,19 +511,22 @@ rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir - mkdir conf$$.dir + mkdir conf$$.dir 2>/dev/null fi -echo >conf$$.file -if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else as_ln_s='cp -p' -elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln + fi else as_ln_s='cp -p' fi @@ -529,10 +551,10 @@ as_test_x=' eval sh -c '\'' if test -d "$1"; then - test -d "$1/."; + test -d "$1/."; else case $1 in - -*)set "./$1";; + -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi @@ -553,22 +575,22 @@ # Check that we are running under the correct shell. SHELL=${CONFIG_SHELL-/bin/sh} -case X$ECHO in +case X$lt_ECHO in X*--fallback-echo) # Remove one level of quotation (which was required for Make). - ECHO=`echo "$ECHO" | sed 's,\\\\\$\\$0,'$0','` + ECHO=`echo "$lt_ECHO" | sed 's,\\\\\$\\$0,'$0','` ;; esac -echo=${ECHO-echo} +ECHO=${lt_ECHO-echo} if test "X$1" = X--no-reexec; then # Discard the --no-reexec flag, and continue. shift elif test "X$1" = X--fallback-echo; then # Avoid inline document here, it may be left over : -elif test "X`($echo '\t') 2>/dev/null`" = 'X\t' ; then - # Yippee, $echo works! +elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then + # Yippee, $ECHO works! : else # Restart under the correct shell. @@ -578,9 +600,9 @@ if test "X$1" = X--fallback-echo; then # used as fallback echo shift - cat </dev/null 2>&1 && unset CDPATH -if test -z "$ECHO"; then -if test "X${echo_test_string+set}" != Xset; then -# find a string as large as possible, as long as the shell can cope with it - for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do - # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... - if (echo_test_string=`eval $cmd`) 2>/dev/null && - echo_test_string=`eval $cmd` && - (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null - then - break - fi - done -fi +if test -z "$lt_ECHO"; then + if test "X${echo_test_string+set}" != Xset; then + # find a string as large as possible, as long as the shell can cope with it + for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do + # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... + if { echo_test_string=`eval $cmd`; } 2>/dev/null && + { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null + then + break + fi + done + fi -if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - : -else - # The Solaris, AIX, and Digital Unix default echo programs unquote - # backslashes. This makes it impossible to quote backslashes using - # echo "$something" | sed 's/\\/\\\\/g' - # - # So, first we look for a working echo in the user's PATH. + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + : + else + # The Solaris, AIX, and Digital Unix default echo programs unquote + # backslashes. This makes it impossible to quote backslashes using + # echo "$something" | sed 's/\\/\\\\/g' + # + # So, first we look for a working echo in the user's PATH. - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for dir in $PATH /usr/ucb; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for dir in $PATH /usr/ucb; do + IFS="$lt_save_ifs" + if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && + test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && + echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$dir/echo" + break + fi + done IFS="$lt_save_ifs" - if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && - test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$dir/echo" - break - fi - done - IFS="$lt_save_ifs" - if test "X$echo" = Xecho; then - # We didn't find a better echo, so look for alternatives. - if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # This shell has a builtin print -r that does the trick. - echo='print -r' - elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) && - test "X$CONFIG_SHELL" != X/bin/ksh; then - # If we have ksh, try running configure again with it. - ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} - export ORIGINAL_CONFIG_SHELL - CONFIG_SHELL=/bin/ksh - export CONFIG_SHELL - exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} - else - # Try using printf. - echo='printf %s\n' - if test "X`($echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # Cool, printf works - : - elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL - export CONFIG_SHELL - SHELL="$CONFIG_SHELL" - export SHELL - echo="$CONFIG_SHELL $0 --fallback-echo" - elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - echo="$CONFIG_SHELL $0 --fallback-echo" + if test "X$ECHO" = Xecho; then + # We didn't find a better echo, so look for alternatives. + if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # This shell has a builtin print -r that does the trick. + ECHO='print -r' + elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && + test "X$CONFIG_SHELL" != X/bin/ksh; then + # If we have ksh, try running configure again with it. + ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} + export ORIGINAL_CONFIG_SHELL + CONFIG_SHELL=/bin/ksh + export CONFIG_SHELL + exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"} else - # maybe with a smaller string... - prev=: + # Try using printf. + ECHO='printf %s\n' + if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && + echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + # Cool, printf works + : + elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL + export CONFIG_SHELL + SHELL="$CONFIG_SHELL" + export SHELL + ECHO="$CONFIG_SHELL $0 --fallback-echo" + elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` && + test "X$echo_testing_string" = 'X\t' && + echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` && + test "X$echo_testing_string" = "X$echo_test_string"; then + ECHO="$CONFIG_SHELL $0 --fallback-echo" + else + # maybe with a smaller string... + prev=: - for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do - if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null - then - break - fi - prev="$cmd" - done + for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do + if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null + then + break + fi + prev="$cmd" + done - if test "$prev" != 'sed 50q "$0"'; then - echo_test_string=`eval $prev` - export echo_test_string - exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} - else - # Oops. We lost completely, so just stick with echo. - echo=echo - fi + if test "$prev" != 'sed 50q "$0"'; then + echo_test_string=`eval $prev` + export echo_test_string + exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"} + else + # Oops. We lost completely, so just stick with echo. + ECHO=echo + fi + fi fi fi fi fi -fi # Copy echo and quote the copy suitably for passing to libtool from # the Makefile, instead of quoting the original, which is used later. -ECHO=$echo -if test "X$ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then - ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" +lt_ECHO=$ECHO +if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then + lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo" fi -tagnames=${tagnames+${tagnames},}CXX - -tagnames=${tagnames+${tagnames},}F77 - exec 7<&0 &1 # Name of the host. @@ -724,13 +741,12 @@ SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. -PACKAGE_NAME= -PACKAGE_TARNAME= -PACKAGE_VERSION= -PACKAGE_STRING= -PACKAGE_BUGREPORT= +PACKAGE_NAME='sqlite' +PACKAGE_TARNAME='sqlite' +PACKAGE_VERSION='3.6.16' +PACKAGE_STRING='sqlite 3.6.16' +PACKAGE_BUGREPORT='' -ac_unique_file="src/sqlite.h.in" # Factoring default headers for most tests. ac_includes_default="\ #include @@ -804,6 +820,7 @@ build_alias host_alias target_alias +LIBTOOL build build_cpu build_vendor @@ -822,40 +839,43 @@ SED GREP EGREP +FGREP +LD +DUMPBIN +ac_ct_DUMPBIN +NM LN_S -ECHO +OBJDUMP AR -RANLIB STRIP +RANLIB +lt_ECHO +DSYMUTIL +NMEDIT +LIPO +OTOOL +OTOOL64 CPP -CXX -CXXFLAGS -ac_ct_CXX -CXXCPP -F77 -FFLAGS -ac_ct_F77 -LIBTOOL INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA AWK +TCLSH_CMD +TCLLIBDIR program_prefix VERSION RELEASE VERSION_NUMBER BUILD_CC -BUILD_CFLAGS -THREADSAFE -TARGET_THREAD_LIB +SQLITE_THREADSAFE XTHREADCONNECT THREADSOVERRIDELOCKS ALLOWRELEASE TEMP_STORE BUILD_EXEEXT -OS_UNIX -OS_WIN -OS_OS2 +SQLITE_OS_UNIX +SQLITE_OS_WIN +SQLITE_OS_OS2 TARGET_EXEEXT TCL_VERSION TCL_BIN_DIR @@ -873,9 +893,38 @@ TARGET_READLINE_INC TARGET_HAVE_READLINE TARGET_DEBUG +USE_AMALGAMATION +OPT_FEATURE_FLAGS +USE_GCOV +BUILD_CFLAGS LIBOBJS LTLIBOBJS' ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_shared +enable_static +with_pic +enable_fast_install +with_gnu_ld +enable_libtool_lock +enable_largefile +with_hints +enable_threadsafe +enable_cross_thread_connections +enable_threads_override_locks +enable_releasemode +enable_tempstore +enable_tcl +with_tcl +enable_readline +with_readline_lib +with_readline_inc +enable_debug +enable_amalgamation +enable_load_extension +enable_gcov +' ac_precious_vars='build_alias host_alias target_alias @@ -885,17 +934,14 @@ LIBS CPPFLAGS CPP -CXX -CXXFLAGS -CCC -CXXCPP -F77 -FFLAGS' +TCLLIBDIR' # Initialize some variables set by options. ac_init_help= ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null @@ -929,7 +975,7 @@ localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE}' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' @@ -994,13 +1040,21 @@ datarootdir=$ac_optarg ;; -disable-* | --disable-*) - ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. - expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && - { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } - ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` - eval enable_$ac_feature=no ;; + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; @@ -1013,13 +1067,21 @@ dvidir=$ac_optarg ;; -enable-* | --enable-*) - ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. - expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && - { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 { (exit 1); exit 1; }; } - ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` - eval enable_$ac_feature=\$ac_optarg ;; + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ @@ -1210,22 +1272,38 @@ ac_init_version=: ;; -with-* | --with-*) - ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. - expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && - { echo "$as_me: error: invalid package name: $ac_package" >&2 + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } - ac_package=`echo $ac_package | sed 's/[-.]/_/g'` - eval with_$ac_package=\$ac_optarg ;; + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) - ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. - expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && - { echo "$as_me: error: invalid package name: $ac_package" >&2 + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 { (exit 1); exit 1; }; } - ac_package=`echo $ac_package | sed 's/[-.]/_/g'` - eval with_$ac_package=no ;; + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. @@ -1245,7 +1323,7 @@ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; - -*) { echo "$as_me: error: unrecognized option: $ac_option + -*) { $as_echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; @@ -1254,16 +1332,16 @@ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && - { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. - echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; @@ -1272,22 +1350,38 @@ if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` - { echo "$as_me: error: missing argument to $ac_option" >&2 + { $as_echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi -# Be sure to have absolute directory names. +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) { $as_echo "$as_me: error: Unrecognized options: $ac_unrecognized_opts" >&2 + { (exit 1); exit 1; }; } ;; + *) $as_echo "$as_me: WARNING: Unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac - { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; } done @@ -1302,7 +1396,7 @@ if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe - echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes @@ -1318,10 +1412,10 @@ ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - { echo "$as_me: error: Working directory cannot be determined" >&2 + { $as_echo "$as_me: error: Working directory cannot be determined" >&2 { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - { echo "$as_me: error: pwd does not report name of working directory" >&2 + { $as_echo "$as_me: error: pwd does not report name of working directory" >&2 { (exit 1); exit 1; }; } @@ -1329,12 +1423,12 @@ if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$0" || -$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$0" : 'X\(//\)[^/]' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -echo X"$0" | + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -1361,12 +1455,12 @@ fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2 + cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2 { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. @@ -1393,7 +1487,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures this package to adapt to many kinds of systems. +\`configure' configures sqlite 3.6.16 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1415,9 +1509,9 @@ Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] + [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] + [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify @@ -1427,25 +1521,25 @@ For better control, use the options below. Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/sqlite] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF @@ -1457,10 +1551,13 @@ fi if test -n "$ac_init_help"; then - + case $ac_init_help in + short | recursive ) echo "Configuration of sqlite 3.6.16:";; + esac cat <<\_ACEOF Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-shared[=PKGS] build shared libraries [default=yes] @@ -1468,6 +1565,7 @@ --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) + --disable-largefile omit support for large files --enable-threadsafe Support threadsafe operation --enable-cross-thread-connections Allow connection sharing across threads @@ -1479,14 +1577,17 @@ --disable-tcl do not build TCL extension --disable-readline disable readline support [default=detect] --enable-debug enable debugging & verbose explain + --disable-amalgamation Disable the amalgamation and instead build all files + separately + --enable-load-extension Enable loading of external extensions + --enable-gcov Enable coverage testing using gcov Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-pic try to use only PIC/non-PIC objects [default=use both] - --with-tags[=TAGS] include additional configurations [automatic] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-hints=FILE Read configuration options from FILE --with-tcl=DIR directory containing tcl configuration (tclConfig.sh) @@ -1502,11 +1603,7 @@ CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor - CXX C++ compiler command - CXXFLAGS C++ compiler flags - CXXCPP C++ preprocessor - F77 Fortran 77 compiler command - FFLAGS Fortran 77 compiler flags + TCLLIBDIR Where to install tcl plugin Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. @@ -1518,15 +1615,17 @@ if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) - ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; @@ -1562,7 +1661,7 @@ echo && $SHELL "$ac_srcdir/configure" --help=recursive else - echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done @@ -1571,11 +1670,11 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -configure -generated by GNU Autoconf 2.61 +sqlite configure 3.6.16 +generated by GNU Autoconf 2.62 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF @@ -1585,8 +1684,8 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by $as_me, which was -generated by GNU Autoconf 2.61. Invocation command line was +It was created by sqlite $as_me 3.6.16, which was +generated by GNU Autoconf 2.62. Invocation command line was $ $0 $@ @@ -1622,7 +1721,7 @@ do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - echo "PATH: $as_dir" + $as_echo "PATH: $as_dir" done IFS=$as_save_IFS @@ -1657,7 +1756,7 @@ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) - ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; @@ -1709,11 +1808,12 @@ case $ac_val in #( *${as_nl}*) case $ac_var in #( - *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 -echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +$as_echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) $as_unset $ac_var ;; esac ;; esac @@ -1743,9 +1843,9 @@ do eval ac_val=\$$ac_var case $ac_val in - *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac - echo "$ac_var='\''$ac_val'\''" + $as_echo "$ac_var='\''$ac_val'\''" done | sort echo @@ -1760,9 +1860,9 @@ do eval ac_val=\$$ac_var case $ac_val in - *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac - echo "$ac_var='\''$ac_val'\''" + $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi @@ -1778,8 +1878,8 @@ echo fi test "$ac_signal" != 0 && - echo "$as_me: caught signal $ac_signal" - echo "$as_me: exit $exit_status" + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && @@ -1821,21 +1921,24 @@ # Let the site file select an alternate cache file if it wants to. -# Prefer explicitly selected file to automatically selected ones. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE if test -n "$CONFIG_SITE"; then - set x "$CONFIG_SITE" + ac_site_file1=$CONFIG_SITE elif test "x$prefix" != xNONE; then - set x "$prefix/share/config.site" "$prefix/etc/config.site" + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site else - set x "$ac_default_prefix/share/config.site" \ - "$ac_default_prefix/etc/config.site" + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site fi -shift -for ac_site_file +for ac_site_file in "$ac_site_file1" "$ac_site_file2" do + test "x$ac_site_file" = xNONE && continue if test -r "$ac_site_file"; then - { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 -echo "$as_me: loading site script $ac_site_file" >&6;} + { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi @@ -1845,16 +1948,16 @@ # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then - { echo "$as_me:$LINENO: loading cache $cache_file" >&5 -echo "$as_me: loading cache $cache_file" >&6;} + { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else - { echo "$as_me:$LINENO: creating cache $cache_file" >&5 -echo "$as_me: creating cache $cache_file" >&6;} + { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi @@ -1868,29 +1971,38 @@ eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) - { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) - { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 -echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then - { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 -echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 -echo "$as_me: former value: $ac_old_val" >&2;} - { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 -echo "$as_me: current value: $ac_new_val" >&2;} - ac_cache_corrupted=: + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in - *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in @@ -1900,10 +2012,10 @@ fi done if $ac_cache_corrupted; then - { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 -echo "$as_me: error: changes in the environment can compromise the build" >&2;} - { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 -echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi @@ -1923,6 +2035,14 @@ + + + + + + + + ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -1931,83 +2051,49 @@ +sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'` +if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then +{ { $as_echo "$as_me:$LINENO: error: configure script is out of date: + configure \$PACKAGE_VERSION = $PACKAGE_VERSION + top level VERSION file = $sqlite_version_sanity_check +please regen with autoconf" >&5 +$as_echo "$as_me: error: configure script is out of date: + configure \$PACKAGE_VERSION = $PACKAGE_VERSION + top level VERSION file = $sqlite_version_sanity_check +please regen with autoconf" >&2;} + { (exit 1); exit 1; }; } +fi + # The following RCS revision string applies to configure.in -# $Revision: 1.29 $ +# $Revision: 1.73 $ ######### # Programs needed # -# Check whether --enable-shared was given. -if test "${enable_shared+set}" = set; then - enableval=$enable_shared; p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_shared=yes -fi +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:$LINENO: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.2.6' +macro_revision='1.3012' + + + + + + + + -# Check whether --enable-static was given. -if test "${enable_static+set}" = set; then - enableval=$enable_static; p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_static=yes -fi -# Check whether --enable-fast-install was given. -if test "${enable_fast_install+set}" = set; then - enableval=$enable_fast_install; p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_fast_install=yes -fi +ltmain="$ac_aux_dir/ltmain.sh" ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do @@ -2026,8 +2112,8 @@ fi done if test -z "$ac_aux_dir"; then - { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 -echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} + { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 +$as_echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} { (exit 1); exit 1; }; } fi @@ -2042,34 +2128,34 @@ # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - { { echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 -echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} + { { $as_echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 +$as_echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} { (exit 1); exit 1; }; } -{ echo "$as_me:$LINENO: checking build system type" >&5 -echo $ECHO_N "checking build system type... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } if test "${ac_cv_build+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && - { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 -echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { { $as_echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +$as_echo "$as_me: error: cannot guess build type; you must specify one" >&2;} { (exit 1); exit 1; }; } ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 -echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} { (exit 1); exit 1; }; } fi -{ echo "$as_me:$LINENO: result: $ac_cv_build" >&5 -echo "${ECHO_T}$ac_cv_build" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; -*) { { echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 -echo "$as_me: error: invalid value of canonical build" >&2;} +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 +$as_echo "$as_me: error: invalid value of canonical build" >&2;} { (exit 1); exit 1; }; };; esac build=$ac_cv_build @@ -2086,27 +2172,27 @@ case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac -{ echo "$as_me:$LINENO: checking host system type" >&5 -echo $ECHO_N "checking host system type... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } if test "${ac_cv_host+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 -echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} { (exit 1); exit 1; }; } fi fi -{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5 -echo "${ECHO_T}$ac_cv_host" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; -*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 -echo "$as_me: error: invalid value of canonical host" >&2;} +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +$as_echo "$as_me: error: invalid value of canonical host" >&2;} { (exit 1); exit 1; }; };; esac host=$ac_cv_host @@ -2131,10 +2217,10 @@ if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. @@ -2147,7 +2233,7 @@ for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2158,11 +2244,11 @@ fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { echo "$as_me:$LINENO: result: $CC" >&5 -echo "${ECHO_T}$CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi @@ -2171,10 +2257,10 @@ ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. @@ -2187,7 +2273,7 @@ for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2198,11 +2284,11 @@ fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 -echo "${ECHO_T}$ac_ct_CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then @@ -2210,10 +2296,10 @@ else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; @@ -2228,10 +2314,10 @@ if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. @@ -2244,7 +2330,7 @@ for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2255,11 +2341,11 @@ fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { echo "$as_me:$LINENO: result: $CC" >&5 -echo "${ECHO_T}$CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi @@ -2268,10 +2354,10 @@ if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. @@ -2289,7 +2375,7 @@ continue fi ac_cv_prog_CC="cc" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2312,11 +2398,11 @@ fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { echo "$as_me:$LINENO: result: $CC" >&5 -echo "${ECHO_T}$CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi @@ -2327,10 +2413,10 @@ do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. @@ -2343,7 +2429,7 @@ for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2354,11 +2440,11 @@ fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { echo "$as_me:$LINENO: result: $CC" >&5 -echo "${ECHO_T}$CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi @@ -2371,10 +2457,10 @@ do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } if test "${ac_cv_prog_ac_ct_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. @@ -2387,7 +2473,7 @@ for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2398,11 +2484,11 @@ fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 -echo "${ECHO_T}$ac_ct_CC" >&6; } + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi @@ -2414,10 +2500,10 @@ else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; @@ -2429,44 +2515,48 @@ fi -test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&5 -echo "$as_me: error: no acceptable C compiler found in \$PATH +$as_echo "$as_me: error: no acceptable C compiler found in \$PATH See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } # Provide some information about the compiler. -echo "$as_me:$LINENO: checking for C compiler version" >&5 -ac_compiler=`set X $ac_compile; echo $2` +$as_echo "$as_me:$LINENO: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 { (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -v >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -v >&5") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { (ac_try="$ac_compiler -V >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compiler -V >&5") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } cat >conftest.$ac_ext <<_ACEOF @@ -2485,27 +2575,22 @@ } _ACEOF ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.exe b.out" +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. -{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 -echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; } -ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` -# -# List of possible output files, starting from the most likely. -# The algorithm is not robust to junk in `.', hence go to wildcards (a.*) -# only as a last resort. b.out is created by i960 compilers. -ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out' -# -# The IRIX 6 linker writes into existing files which may not be -# executable, retaining their permissions. Remove them first so a -# subsequent execution test works. +{ $as_echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + ac_rmfiles= for ac_file in $ac_files do case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done @@ -2516,10 +2601,11 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_link_default") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' @@ -2530,7 +2616,7 @@ do test -f "$ac_file" || continue case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most @@ -2557,15 +2643,15 @@ ac_file='' fi -{ echo "$as_me:$LINENO: result: $ac_file" >&5 -echo "${ECHO_T}$ac_file" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } if test -z "$ac_file"; then - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { echo "$as_me:$LINENO: error: C compiler cannot create executables +{ { $as_echo "$as_me:$LINENO: error: C compiler cannot create executables See \`config.log' for more details." >&5 -echo "$as_me: error: C compiler cannot create executables +$as_echo "$as_me: error: C compiler cannot create executables See \`config.log' for more details." >&2;} { (exit 77); exit 77; }; } fi @@ -2574,8 +2660,8 @@ # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. -{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5 -echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } # FIXME: These cross compiler hacks should be removed for Autoconf 3.0 # If not cross compiling, check that we can run a simple program. if test "$cross_compiling" != yes; then @@ -2584,49 +2670,51 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_try") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else - { { echo "$as_me:$LINENO: error: cannot run C compiled programs. + { { $as_echo "$as_me:$LINENO: error: cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&5 -echo "$as_me: error: cannot run C compiled programs. +$as_echo "$as_me: error: cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi fi fi -{ echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } +{ $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } -rm -f a.out a.exe conftest$ac_cv_exeext b.out +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. -{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 -echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; } -{ echo "$as_me:$LINENO: result: $cross_compiling" >&5 -echo "${ECHO_T}$cross_compiling" >&6; } +{ $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +{ $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } -{ echo "$as_me:$LINENO: checking for suffix of executables" >&5 -echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will @@ -2635,31 +2723,31 @@ for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else - { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link + { { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +$as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi rm -f conftest$ac_cv_exeext -{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 -echo "${ECHO_T}$ac_cv_exeext" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT -{ echo "$as_me:$LINENO: checking for suffix of object files" >&5 -echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } if test "${ac_cv_objext+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ @@ -2682,40 +2770,41 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;; + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&5 -echo "$as_me: error: cannot compute suffix of object files: cannot compile +$as_echo "$as_me: error: cannot compute suffix of object files: cannot compile See \`config.log' for more details." >&2;} { (exit 1); exit 1; }; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi -{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 -echo "${ECHO_T}$ac_cv_objext" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT -{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 -echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if test "${ac_cv_c_compiler_gnu+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ @@ -2741,20 +2830,21 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_compiler_gnu=no @@ -2764,15 +2854,19 @@ ac_cv_c_compiler_gnu=$ac_compiler_gnu fi -{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 -echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; } -GCC=`test $ac_compiler_gnu = yes && echo yes` +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS -{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 -echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } if test "${ac_cv_prog_cc_g+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes @@ -2799,20 +2893,21 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 CFLAGS="" @@ -2837,20 +2932,21 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_c_werror_flag=$ac_save_c_werror_flag @@ -2876,20 +2972,21 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 @@ -2904,8 +3001,8 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi -{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 -echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then @@ -2921,10 +3018,10 @@ CFLAGS= fi fi -{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 -echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if test "${ac_cv_prog_cc_c89+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC @@ -2995,20 +3092,21 @@ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>conftest.er1 ac_status=$? grep -v '^ *+' conftest.er1 >conftest.err rm -f conftest.er1 cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg else - echo "$as_me: failed program was:" >&5 + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 @@ -3024,15 +3122,15 @@ # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) - { echo "$as_me:$LINENO: result: none needed" >&5 -echo "${ECHO_T}none needed" >&6; } ;; + { $as_echo "$as_me:$LINENO: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; xno) - { echo "$as_me:$LINENO: result: unsupported" >&5 -echo "${ECHO_T}unsupported" >&6; } ;; + { $as_echo "$as_me:$LINENO: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" - { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 -echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;; + { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac @@ -3042,101 +3140,121 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 -echo $ECHO_N "checking for a sed that does not truncate output... $ECHO_C" >&6; } -if test "${lt_cv_path_SED+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - # Loop through the user's path and test for sed and gsed. -# Then use that list of sed's as ones to test for truncation. -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +{ $as_echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if test "${ac_cv_path_SED+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + $as_unset ac_script || ac_script= + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for lt_ac_prog in sed gsed; do + for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$lt_ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$lt_ac_prog$ac_exec_ext"; }; then - lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" - fi + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS -lt_ac_max=0 -lt_ac_count=0 -# Add /usr/xpg4/bin/sed as it is typically found on Solaris -# along with /bin/sed that truncates output. -for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do - test ! -f $lt_ac_sed && continue - cat /dev/null > conftest.in - lt_ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >conftest.in - # Check for GNU sed and select it if it is found. - if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then - lt_cv_path_SED=$lt_ac_sed - break + if test -z "$ac_cv_path_SED"; then + { { $as_echo "$as_me:$LINENO: error: no acceptable sed could be found in \$PATH" >&5 +$as_echo "$as_me: error: no acceptable sed could be found in \$PATH" >&2;} + { (exit 1); exit 1; }; } fi - while true; do - cat conftest.in conftest.in >conftest.tmp - mv conftest.tmp conftest.in - cp conftest.in conftest.nl - echo >>conftest.nl - $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break - cmp -s conftest.out conftest.nl || break - # 10000 chars as input seems more than enough - test $lt_ac_count -gt 10 && break - lt_ac_count=`expr $lt_ac_count + 1` - if test $lt_ac_count -gt $lt_ac_max; then - lt_ac_max=$lt_ac_count - lt_cv_path_SED=$lt_ac_sed - fi - done -done +else + ac_cv_path_SED=$SED +fi fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed -SED=$lt_cv_path_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" -{ echo "$as_me:$LINENO: result: $SED" >&5 -echo "${ECHO_T}$SED" >&6; } -{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 -echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; } -if test "${ac_cv_path_GREP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - # Extract the first word of "grep ggrep" to use in msg output -if test -z "$GREP"; then -set dummy grep ggrep; ac_prog_name=$2 + + + + + + + + + +{ $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } if test "${ac_cv_path_GREP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else + if test -z "$GREP"; then ac_path_GREP_found=false -# Loop through the user's path and test for each of PROGNAME-LIST -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue - # Check for GNU ac_path_GREP and select it if it is found. + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue +# Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" - echo 'GREP' >> "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` @@ -3151,74 +3269,60 @@ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac - - $ac_path_GREP_found && break 3 + $ac_path_GREP_found && break 3 + done done done - -done IFS=$as_save_IFS - - -fi - -GREP="$ac_cv_path_GREP" -if test -z "$GREP"; then - { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 -echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + if test -z "$ac_cv_path_GREP"; then + { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +$as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } -fi - + fi else ac_cv_path_GREP=$GREP fi - fi -{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 -echo "${ECHO_T}$ac_cv_path_GREP" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" -{ echo "$as_me:$LINENO: checking for egrep" >&5 -echo $ECHO_N "checking for egrep... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } if test "${ac_cv_path_EGREP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else - # Extract the first word of "egrep" to use in msg output -if test -z "$EGREP"; then -set dummy egrep; ac_prog_name=$2 -if test "${ac_cv_path_EGREP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else + if test -z "$EGREP"; then ac_path_EGREP_found=false -# Loop through the user's path and test for each of PROGNAME-LIST -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue - # Check for GNU ac_path_EGREP and select it if it is found. + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue +# Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" - echo 'EGREP' >> "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ac_count=`expr $ac_count + 1` @@ -3233,36 +3337,115 @@ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac - - $ac_path_EGREP_found && break 3 + $ac_path_EGREP_found && break 3 + done done done - -done IFS=$as_save_IFS - - -fi - -EGREP="$ac_cv_path_EGREP" -if test -z "$EGREP"; then - { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 -echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + if test -z "$ac_cv_path_EGREP"; then + { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +$as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} { (exit 1); exit 1; }; } -fi - + fi else ac_cv_path_EGREP=$EGREP fi - fi fi -{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 -echo "${ECHO_T}$ac_cv_path_EGREP" >&6; } +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" +{ $as_echo "$as_me:$LINENO: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if test "${ac_cv_path_FGREP+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done +done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + { { $as_echo "$as_me:$LINENO: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +$as_echo "$as_me: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then @@ -3274,8 +3457,8 @@ ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. - { echo "$as_me:$LINENO: checking for ld used by $CC" >&5 -echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6; } + { $as_echo "$as_me:$LINENO: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw @@ -3288,9 +3471,9 @@ [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld - ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` - while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do - ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; @@ -3304,14 +3487,14 @@ ;; esac elif test "$with_gnu_ld" = yes; then - { echo "$as_me:$LINENO: checking for GNU ld" >&5 -echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6; } + { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } else - { echo "$as_me:$LINENO: checking for non-GNU ld" >&5 -echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6; } + { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } fi if test "${lt_cv_path_LD+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR @@ -3341,19 +3524,19 @@ LD="$lt_cv_path_LD" if test -n "$LD"; then - { echo "$as_me:$LINENO: result: $LD" >&5 -echo "${ECHO_T}$LD" >&6; } + { $as_echo "$as_me:$LINENO: result: $LD" >&5 +$as_echo "$LD" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 -echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} +test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 +$as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} { (exit 1); exit 1; }; } -{ echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 -echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if test "${lt_cv_prog_gnu_ld+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 -echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6; } +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_gnu_ld" >&5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld -{ echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 -echo $ECHO_N "checking for $LD option to reload object files... $ECHO_C" >&6; } -if test "${lt_cv_ld_reload_flag+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_ld_reload_flag='-r' -fi -{ echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 -echo "${ECHO_T}$lt_cv_ld_reload_flag" >&6; } -reload_flag=$lt_cv_ld_reload_flag -case $reload_flag in -"" | " "*) ;; -*) reload_flag=" $reload_flag" ;; -esac -reload_cmds='$LD$reload_flag -o $output$reload_objs' -case $host_os in - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' - else - reload_cmds='$LD$reload_flag -o $output$reload_objs' - fi - ;; -esac -{ echo "$as_me:$LINENO: checking for BSD-compatible nm" >&5 -echo $ECHO_N "checking for BSD-compatible nm... $ECHO_C" >&6; } + + + + + + +{ $as_echo "$as_me:$LINENO: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if test "${lt_cv_path_NM+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. @@ -3441,1081 +3606,720 @@ done IFS="$lt_save_ifs" done - test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm + : ${lt_cv_path_NM=no} fi fi -{ echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 -echo "${ECHO_T}$lt_cv_path_NM" >&6; } -NM="$lt_cv_path_NM" +{ $as_echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$ac_tool_prefix"; then + for ac_prog in "dumpbin -symbols" "link -dump -symbols" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_DUMPBIN+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -{ echo "$as_me:$LINENO: checking whether ln -s works" >&5 -echo $ECHO_N "checking whether ln -s works... $ECHO_C" >&6; } -LN_S=$as_ln_s -if test "$LN_S" = "ln -s"; then - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:$LINENO: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } else - { echo "$as_me:$LINENO: result: no, using $LN_S" >&5 -echo "${ECHO_T}no, using $LN_S" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -{ echo "$as_me:$LINENO: checking how to recognize dependent libraries" >&5 -echo $ECHO_N "checking how to recognize dependent libraries... $ECHO_C" >&6; } -if test "${lt_cv_deplibs_check_method+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_file_magic_cmd='$MAGIC_CMD' -lt_cv_file_magic_test_file= -lt_cv_deplibs_check_method='unknown' -# Need to set the preceding variable on all platforms that support -# interlibrary dependencies. -# 'none' -- dependencies not supported. -# `unknown' -- same as none, but documents that we really don't know. -# 'pass_all' -- all dependencies passed with no checks. -# 'test_compile' -- check by making test program. -# 'file_magic [[regex]]' -- check by looking for files in library path -# which responds to the $file_magic_cmd with a given extended regex. -# If you have `file' or equivalent on your system and you're not sure -# whether `pass_all' will *always* work, you probably want this one. -case $host_os in -aix4* | aix5*) - lt_cv_deplibs_check_method=pass_all - ;; + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in "dumpbin -symbols" "link -dump -symbols" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -beos*) - lt_cv_deplibs_check_method=pass_all - ;; +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -bsdi[45]*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' - lt_cv_file_magic_cmd='/usr/bin/file -L' - lt_cv_file_magic_test_file=/shlib/libc.so - ;; -cygwin*) - # func_win32_libid is a shell function defined in ltmain.sh - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - ;; + test -n "$ac_ct_DUMPBIN" && break +done -mingw* | pw32*) - # Base MSYS/MinGW do not provide the 'file' command needed by - # func_win32_libid shell function, so use a weaker test based on 'objdump', - # unless we find 'file', for example because we are cross-compiling. - if ( file / ) >/dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" else - lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN fi - ;; +fi -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" fi - ;; +fi +test -z "$NM" && NM=nm -gnu*) - lt_cv_deplibs_check_method=pass_all - ;; -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; -interix[3-9]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' - ;; -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; -# This must be Linux ELF. -linux* | k*bsd*-gnu) - lt_cv_deplibs_check_method=pass_all - ;; -netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' +{ $as_echo "$as_me:$LINENO: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if test "${lt_cv_nm_interface+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:3741: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:3744: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:3747: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; + rm -f conftest* +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } -nto-qnx*) - lt_cv_deplibs_check_method=unknown - ;; +{ $as_echo "$as_me:$LINENO: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi -openbsd*) - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - fi - ;; +# find the maximum length of command line arguments +{ $as_echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if test "${lt_cv_sys_max_cmd_len+set}" = set; then + $as_echo_n "(cached) " >&6 +else + i=0 + teststring="ABCD" -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; ;; - ncr) - lt_cv_deplibs_check_method=pass_all + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi ;; - siemens) - lt_cv_deplibs_check_method=pass_all + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 ;; - pc) - lt_cv_deplibs_check_method=pass_all + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`$SHELL $0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ + = "XX$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi ;; esac - ;; -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; -esac +fi +if test -n $lt_cv_sys_max_cmd_len ; then + { $as_echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:$LINENO: result: none" >&5 +$as_echo "none" >&6; } fi -{ echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 -echo "${ECHO_T}$lt_cv_deplibs_check_method" >&6; } -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown +max_cmd_len=$lt_cv_sys_max_cmd_len -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -# Allow CC to be a program name with arguments. -compiler=$CC +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +{ $as_echo "$as_me:$LINENO: checking whether the shell understands some XSI constructs" >&5 +$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +{ $as_echo "$as_me:$LINENO: result: $xsi_shell" >&5 +$as_echo "$xsi_shell" >&6; } + + +{ $as_echo "$as_me:$LINENO: checking whether the shell understands \"+=\"" >&5 +$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } +lt_shell_append=no +( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +{ $as_echo "$as_me:$LINENO: result: $lt_shell_append" >&5 +$as_echo "$lt_shell_append" >&6; } -# Check whether --enable-libtool-lock was given. -if test "${enable_libtool_lock+set}" = set; then - enableval=$enable_libtool_lock; +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false fi -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE="32" - ;; - *ELF-64*) - HPUX_IA64_MODE="64" - ;; - esac - fi - rm -rf conftest* + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' ;; -*-*-irix6*) - # Find out which ABI we are using. - echo '#line 3698 "configure"' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - if test "$lt_cv_prog_gnu_ld" = yes; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' ;; +esac -x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ -s390*-*linux*|sparc*-*linux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_i386" - ;; - ppc64-*linux*|powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - ppc*-*linux*|powerpc*-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -belf" - { echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 -echo $ECHO_N "checking whether the C compiler needs -belf... $ECHO_C" >&6; } -if test "${lt_cv_cc_needs_belf+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - lt_cv_cc_needs_belf=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - lt_cv_cc_needs_belf=no -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -fi -{ echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 -echo "${ECHO_T}$lt_cv_cc_needs_belf" >&6; } - if test x"$lt_cv_cc_needs_belf" != x"yes"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS="$SAVE_CFLAGS" - fi - ;; -sparc*-*solaris*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) LD="${LD-ld} -64" ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; +{ $as_echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if test "${lt_cv_ld_reload_flag+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; esac -need_locks="$enable_libtool_lock" -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 -echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if test "${ac_cv_prog_CPP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - : -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - # Broken: success on invalid input. -continue -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_OBJDUMP+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then - break -fi - - done - ac_cv_prog_CPP=$CPP +done +IFS=$as_save_IFS fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP fi -{ echo "$as_me:$LINENO: result: $CPP" >&5 -echo "${ECHO_T}$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - : +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - # Broken: fails on valid input. -continue + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -rm -f conftest.err conftest.$ac_ext - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - # Broken: success on invalid input. -continue +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then + $as_echo_n "(cached) " >&6 else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS - # Passes both tests. -ac_preproc_ok=: -break fi - -rm -f conftest.err conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then - : +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } else - { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details." >&5 -echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5 -echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; } -if test "${ac_cv_header_stdc+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_cv_header_stdc=yes + OBJDUMP=$ac_ct_OBJDUMP + fi else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_cv_header_stdc=no + OBJDUMP="$ac_cv_prog_OBJDUMP" fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +test -z "$OBJDUMP" && OBJDUMP=objdump -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then - : -else - ac_cv_header_stdc=no -fi -rm -f conftest* -fi -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then - : -else - ac_cv_header_stdc=no -fi -rm -f conftest* -fi -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then - : -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -rm -f conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { ac_try='./conftest$ac_exeext' - { (case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - : + +{ $as_echo "$as_me:$LINENO: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if test "${lt_cv_deplibs_check_method+set}" = set; then + $as_echo_n "(cached) " >&6 else - echo "$as_me: program exited with status $ac_status" >&5 -echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. -( exit $ac_status ) -ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext -fi +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; +beos*) + lt_cv_deplibs_check_method=pass_all + ;; -fi -fi -{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 -echo "${ECHO_T}$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; -cat >>confdefs.h <<\_ACEOF -#define STDC_HEADERS 1 -_ACEOF +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; -fi +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; -# On IRIX 5.3, sys/types and inttypes.h are conflicting. +cegcc) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; +# This must be Linux ELF. +linux* | k*bsd*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do -as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` -{ echo "$as_me:$LINENO: checking for $ac_header" >&5 -echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } -if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$ac_includes_default +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; -#include <$ac_header> -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - eval "$as_ac_Header=yes" -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; - eval "$as_ac_Header=no" -fi +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -ac_res=`eval echo '${'$as_ac_Header'}'` - { echo "$as_me:$LINENO: result: $ac_res" >&5 -echo "${ECHO_T}$ac_res" >&6; } -if test `eval echo '${'$as_ac_Header'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; -fi +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; -done +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; -for ac_header in dlfcn.h -do -as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` -if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then - { echo "$as_me:$LINENO: checking for $ac_header" >&5 -echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } -if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -fi -ac_res=`eval echo '${'$as_ac_Header'}'` - { echo "$as_me:$LINENO: result: $ac_res" >&5 -echo "${ECHO_T}$ac_res" >&6; } -else - # Is the header compilable? -{ echo "$as_me:$LINENO: checking $ac_header usability" >&5 -echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$ac_includes_default -#include <$ac_header> -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_header_compiler=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_header_compiler=no fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 -echo "${ECHO_T}$ac_header_compiler" >&6; } -# Is the header present? -{ echo "$as_me:$LINENO: checking $ac_header presence" >&5 -echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include <$ac_header> -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - ac_header_preproc=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_header_preproc=no -fi -rm -f conftest.err conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 -echo "${ECHO_T}$ac_header_preproc" >&6; } -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in - yes:no: ) - { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 -echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 -echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} - ac_header_preproc=yes - ;; - no:yes:* ) - { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 -echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 -echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 -echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} - { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 -echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} - ;; -esac -{ echo "$as_me:$LINENO: checking for $ac_header" >&5 -echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } -if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - eval "$as_ac_Header=\$ac_header_preproc" -fi -ac_res=`eval echo '${'$as_ac_Header'}'` - { echo "$as_me:$LINENO: result: $ac_res" >&5 -echo "${ECHO_T}$ac_res" >&6; } -fi -if test `eval echo '${'$as_ac_Header'}'` = yes; then - cat >>confdefs.h <<_ACEOF -#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF -fi -done -ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu -if test -z "$CXX"; then - if test -n "$CCC"; then - CXX=$CCC - else - if test -n "$ac_tool_prefix"; then - for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_CXX+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_AR+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$CXX"; then - ac_cv_prog_CXX="$CXX" # Let the user override the test. + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -4524,8 +4328,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_AR="${ac_tool_prefix}ar" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -4534,32 +4338,28 @@ fi fi -CXX=$ac_cv_prog_CXX -if test -n "$CXX"; then - { echo "$as_me:$LINENO: result: $CXX" >&5 -echo "${ECHO_T}$CXX" >&6; } +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:$LINENO: result: $AR" >&5 +$as_echo "$AR" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi - test -n "$CXX" && break - done fi -if test -z "$CXX"; then - ac_ct_CXX=$CXX - for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_AR+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$ac_ct_CXX"; then - ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -4568,8 +4368,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CXX="$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_ac_ct_AR="ar" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -4578,914 +4378,318 @@ fi fi -ac_ct_CXX=$ac_cv_prog_ac_ct_CXX -if test -n "$ac_ct_CXX"; then - { echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 -echo "${ECHO_T}$ac_ct_CXX" >&6; } +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi - - test -n "$ac_ct_CXX" && break -done - - if test "x$ac_ct_CXX" = x; then - CXX="g++" + if test "x$ac_ct_AR" = x; then + AR="false" else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; esac - CXX=$ac_ct_CXX + AR=$ac_ct_AR fi +else + AR="$ac_cv_prog_AR" fi +test -z "$AR" && AR=ar +test -z "$AR_FLAGS" && AR_FLAGS=cru + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_STRIP+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 fi +done +done +IFS=$as_save_IFS + fi -# Provide some information about the compiler. -echo "$as_me:$LINENO: checking for C++ compiler version" >&5 -ac_compiler=`set X $ac_compile; echo $2` -{ (ac_try="$ac_compiler --version >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler --version >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } -{ (ac_try="$ac_compiler -v >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler -v >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } -{ (ac_try="$ac_compiler -V >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler -V >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - -{ echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 -echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6; } -if test "${ac_cv_cxx_compiler_gnu+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:$LINENO: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_compiler_gnu=yes +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then + $as_echo_n "(cached) " >&6 else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS - ac_compiler_gnu=no fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_cxx_compiler_gnu=$ac_compiler_gnu - fi -{ echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 -echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6; } -GXX=`test $ac_compiler_gnu = yes && echo yes` -ac_test_CXXFLAGS=${CXXFLAGS+set} -ac_save_CXXFLAGS=$CXXFLAGS -{ echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 -echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6; } -if test "${ac_cv_prog_cxx_g+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_save_cxx_werror_flag=$ac_cxx_werror_flag - ac_cxx_werror_flag=yes - ac_cv_prog_cxx_g=no - CXXFLAGS="-g" - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -int -main () -{ +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_cv_prog_cxx_g=yes + STRIP=$ac_ct_STRIP + fi else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + STRIP="$ac_cv_prog_STRIP" +fi - CXXFLAGS="" - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +test -z "$STRIP" && STRIP=: -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - : -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cxx_werror_flag=$ac_save_cxx_werror_flag - CXXFLAGS="-g" - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_cv_prog_cxx_g=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_RANLIB+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:$LINENO: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_cxx_werror_flag=$ac_save_cxx_werror_flag + fi -{ echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 -echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6; } -if test "$ac_test_CXXFLAGS" = set; then - CXXFLAGS=$ac_save_CXXFLAGS -elif test $ac_cv_prog_cxx_g = yes; then - if test "$GXX" = yes; then - CXXFLAGS="-g -O2" - else - CXXFLAGS="-g" - fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test "$GXX" = yes; then - CXXFLAGS="-O2" - else - CXXFLAGS= + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 fi -fi -ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu - - +done +done +IFS=$as_save_IFS -if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu -{ echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 -echo $ECHO_N "checking how to run the C++ preprocessor... $ECHO_C" >&6; } -if test -z "$CXXCPP"; then - if test "${ac_cv_prog_CXXCPP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } else - # Double quotes because CXXCPP needs to be expanded - for CXXCPP in "$CXX -E" "/lib/cpp" - do - ac_preproc_ok=false -for ac_cxx_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || - test ! -s conftest.err - }; then - : + RANLIB=$ac_ct_RANLIB + fi else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - # Broken: fails on valid input. -continue + RANLIB="$ac_cv_prog_RANLIB" fi -rm -f conftest.err conftest.$ac_ext +test -z "$RANLIB" && RANLIB=: - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || - test ! -s conftest.err - }; then - # Broken: success on invalid input. -continue -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then - break -fi - done - ac_cv_prog_CXXCPP=$CXXCPP -fi - CXXCPP=$ac_cv_prog_CXXCPP -else - ac_cv_prog_CXXCPP=$CXXCPP -fi -{ echo "$as_me:$LINENO: result: $CXXCPP" >&5 -echo "${ECHO_T}$CXXCPP" >&6; } -ac_preproc_ok=false -for ac_cxx_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || - test ! -s conftest.err - }; then - : -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= - # Broken: fails on valid input. -continue +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi -rm -f conftest.err conftest.$ac_ext - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || - test ! -s conftest.err - }; then - # Broken: success on invalid input. -continue -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then - : -else - { { echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check -See \`config.log' for more details." >&5 -echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check -See \`config.log' for more details." >&2;} - { (exit 1); exit 1; }; } -fi -ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu -fi -ac_ext=f -ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' -ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_f77_compiler_gnu -if test -n "$ac_tool_prefix"; then - for ac_prog in g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 xlf90 f90 pgf90 pghpf epcf90 gfortran g95 xlf95 f95 fort ifort ifc efc pgf95 lf95 ftn - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_F77+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -n "$F77"; then - ac_cv_prog_F77="$F77" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_F77="$ac_tool_prefix$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done -IFS=$as_save_IFS -fi -fi -F77=$ac_cv_prog_F77 -if test -n "$F77"; then - { echo "$as_me:$LINENO: result: $F77" >&5 -echo "${ECHO_T}$F77" >&6; } -else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi - test -n "$F77" && break - done -fi -if test -z "$F77"; then - ac_ct_F77=$F77 - for ac_prog in g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 xlf90 f90 pgf90 pghpf epcf90 gfortran g95 xlf95 f95 fort ifort ifc efc pgf95 lf95 ftn -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_ac_ct_F77+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -n "$ac_ct_F77"; then - ac_cv_prog_ac_ct_F77="$ac_ct_F77" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_F77="$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done -IFS=$as_save_IFS -fi -fi -ac_ct_F77=$ac_cv_prog_ac_ct_F77 -if test -n "$ac_ct_F77"; then - { echo "$as_me:$LINENO: result: $ac_ct_F77" >&5 -echo "${ECHO_T}$ac_ct_F77" >&6; } -else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi - test -n "$ac_ct_F77" && break -done - if test "x$ac_ct_F77" = x; then - F77="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools -whose name does not start with the host triplet. If you think this -configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools -whose name does not start with the host triplet. If you think this -configuration is useful to you, please write to autoconf@gnu.org." >&2;} -ac_tool_warned=yes ;; -esac - F77=$ac_ct_F77 - fi -fi -# Provide some information about the compiler. -echo "$as_me:$LINENO: checking for Fortran 77 compiler version" >&5 -ac_compiler=`set X $ac_compile; echo $2` -{ (ac_try="$ac_compiler --version >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler --version >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } -{ (ac_try="$ac_compiler -v >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler -v >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } -{ (ac_try="$ac_compiler -V >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compiler -V >&5") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } -rm -f a.out - -# If we don't use `.F' as extension, the preprocessor is not run on the -# input file. (Note that this only needs to work for GNU compilers.) -ac_save_ext=$ac_ext -ac_ext=F -{ echo "$as_me:$LINENO: checking whether we are using the GNU Fortran 77 compiler" >&5 -echo $ECHO_N "checking whether we are using the GNU Fortran 77 compiler... $ECHO_C" >&6; } -if test "${ac_cv_f77_compiler_gnu+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF - program main -#ifndef __GNUC__ - choke me -#endif - - end -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_f77_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_compiler_gnu=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_compiler_gnu=no -fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_f77_compiler_gnu=$ac_compiler_gnu - -fi -{ echo "$as_me:$LINENO: result: $ac_cv_f77_compiler_gnu" >&5 -echo "${ECHO_T}$ac_cv_f77_compiler_gnu" >&6; } -ac_ext=$ac_save_ext -ac_test_FFLAGS=${FFLAGS+set} -ac_save_FFLAGS=$FFLAGS -FFLAGS= -{ echo "$as_me:$LINENO: checking whether $F77 accepts -g" >&5 -echo $ECHO_N "checking whether $F77 accepts -g... $ECHO_C" >&6; } -if test "${ac_cv_prog_f77_g+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - FFLAGS=-g -cat >conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_f77_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_cv_prog_f77_g=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_cv_prog_f77_g=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ echo "$as_me:$LINENO: result: $ac_cv_prog_f77_g" >&5 -echo "${ECHO_T}$ac_cv_prog_f77_g" >&6; } -if test "$ac_test_FFLAGS" = set; then - FFLAGS=$ac_save_FFLAGS -elif test $ac_cv_prog_f77_g = yes; then - if test "x$ac_cv_f77_compiler_gnu" = xyes; then - FFLAGS="-g -O2" - else - FFLAGS="-g" - fi -else - if test "x$ac_cv_f77_compiler_gnu" = xyes; then - FFLAGS="-O2" - else - FFLAGS= - fi -fi -G77=`test $ac_compiler_gnu = yes && echo yes` -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -# Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! -# find the maximum length of command line arguments -{ echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 -echo $ECHO_N "checking the maximum length of command line arguments... $ECHO_C" >&6; } -if test "${lt_cv_sys_max_cmd_len+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - i=0 - teststring="ABCD" - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - cygwin* | mingw*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - while (test "X"`$SHELL $0 --fallback-echo "X$teststring" 2>/dev/null` \ - = "XX$teststring") >/dev/null 2>&1 && - new_result=`expr "X$teststring" : ".*" 2>&1` && - lt_cv_sys_max_cmd_len=$new_result && - test $i != 17 # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - teststring= - # Add a significant safety factor because C++ compilers can tack on massive - # amounts of additional arguments before passing them to the linker. - # It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; - esac -fi -if test -n $lt_cv_sys_max_cmd_len ; then - { echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5 -echo "${ECHO_T}$lt_cv_sys_max_cmd_len" >&6; } -else - { echo "$as_me:$LINENO: result: none" >&5 -echo "${ECHO_T}none" >&6; } -fi +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +# Allow CC to be a program name with arguments. +compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. -{ echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 -echo $ECHO_N "checking command to parse $NM output from $compiler object... $ECHO_C" >&6; } +{ $as_echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. @@ -5497,33 +4701,18 @@ # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' -# Transform an extracted symbol line into a proper C declaration -lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern int \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; -cygwin* | mingw* | pw32*) +cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; -hpux*) # Its linker distinguishes data from code symbols +hpux*) if test "$host_cpu" = ia64; then symcode='[ABCDEGRST]' fi - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - ;; -linux* | k*bsd*-gnu) - if test "$host_cpu" = ia64; then - symcode='[ABCDGIRSTW]' - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'" - fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' @@ -5548,56 +4737,84 @@ ;; esac +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) - opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[ABCDGIRSTW]' ;; -esac - -# Try without a prefix undercore, then with it. +# Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* - cat > conftest.$ac_ext < conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; -void nm_test_func(){} +void nm_test_func(void); +void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} -EOF +_LT_EOF if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then # Now try to grab the symbols. nlist=conftest.nm if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5 (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then @@ -5607,42 +4824,44 @@ fi # Make sure that we snagged all the symbols we need. - if grep ' nm_test_var$' "$nlist" >/dev/null; then - if grep ' nm_test_func$' "$nlist" >/dev/null; then - cat < conftest.$ac_ext + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext #ifdef __cplusplus extern "C" { #endif -EOF +_LT_EOF # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | grep -v main >> conftest.$ac_ext' + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - cat <> conftest.$ac_ext -#if defined (__STDC__) && __STDC__ -# define lt_ptr_t void * -#else -# define lt_ptr_t char * -# define const -#endif + cat <<_LT_EOF >> conftest.$ac_ext -/* The mapping between symbol names and symbols. */ +/* The mapping between symbol names and symbols. */ const struct { const char *name; - lt_ptr_t address; + void *address; } -lt_preloaded_symbols[] = +lt__PROGRAM__LTX_preloaded_symbols[] = { -EOF - $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr_t) \&\2},/" < "$nlist" | grep -v main >> conftest.$ac_ext - cat <<\EOF >> conftest.$ac_ext - {0, (lt_ptr_t) 0} + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} }; +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + #ifdef __cplusplus } #endif -EOF +_LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_save_LIBS="$LIBS" @@ -5652,7 +4871,7 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 (eval $ac_link) 2>&5 ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && test -s conftest${ac_exeext}; then pipe_works=yes fi @@ -5671,7 +4890,7 @@ echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi - rm -f conftest* conftst* + rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then @@ -5687,87 +4906,270 @@ lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - { echo "$as_me:$LINENO: result: failed" >&5 -echo "${ECHO_T}failed" >&6; } + { $as_echo "$as_me:$LINENO: result: failed" >&5 +$as_echo "failed" >&6; } else - { echo "$as_me:$LINENO: result: ok" >&5 -echo "${ECHO_T}ok" >&6; } + { $as_echo "$as_me:$LINENO: result: ok" >&5 +$as_echo "ok" >&6; } fi -{ echo "$as_me:$LINENO: checking for objdir" >&5 -echo $ECHO_N "checking for objdir... $ECHO_C" >&6; } -if test "${lt_cv_objdir+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null -fi -{ echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 -echo "${ECHO_T}$lt_cv_objdir" >&6; } -objdir=$lt_cv_objdir -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -Xsed='sed -e 1s/^X//' -sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g' -# Same as above, but do not quote variable references. -double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g' -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' -# Constants: -rm="rm -f" -# Global variables: -default_ofile=libtool -can_build_shared=yes -# All known linkers require a `.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a -ltmain="$ac_aux_dir/ltmain.sh" -ofile="$default_ofile" -with_gnu_ld="$lt_cv_prog_gnu_ld" -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. -set dummy ${ac_tool_prefix}ar; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_AR+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + + + + + + + + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line 4969 "configure"' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if test "${lt_cv_cc_needs_belf+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$AR"; then - ac_cv_prog_AR="$AR" # Let the user override the test. + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + lt_cv_cc_needs_belf=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + lt_cv_cc_needs_belf=no +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_DSYMUTIL+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -5776,8 +5178,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_AR="${ac_tool_prefix}ar" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5786,28 +5188,28 @@ fi fi -AR=$ac_cv_prog_AR -if test -n "$AR"; then - { echo "$as_me:$LINENO: result: $AR" >&5 -echo "${ECHO_T}$AR" >&6; } +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:$LINENO: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi fi -if test -z "$ac_cv_prog_AR"; then - ac_ct_AR=$AR - # Extract the first word of "ar", so it can be a program name with args. -set dummy ar; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_ac_ct_AR+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$ac_ct_AR"; then - ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -5816,8 +5218,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_AR="ar" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5826,44 +5228,44 @@ fi fi -ac_ct_AR=$ac_cv_prog_ac_ct_AR -if test -n "$ac_ct_AR"; then - { echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 -echo "${ECHO_T}$ac_ct_AR" >&6; } +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi - if test "x$ac_ct_AR" = x; then - AR="false" + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; esac - AR=$ac_ct_AR + DSYMUTIL=$ac_ct_DSYMUTIL fi else - AR="$ac_cv_prog_AR" + DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_RANLIB+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_NMEDIT+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -5872,8 +5274,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5882,28 +5284,28 @@ fi fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { echo "$as_me:$LINENO: result: $RANLIB" >&5 -echo "${ECHO_T}$RANLIB" >&6; } +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:$LINENO: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -5912,8 +5314,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5922,44 +5324,44 @@ fi fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5 -echo "${ECHO_T}$ac_ct_RANLIB" >&6; } +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; esac - RANLIB=$ac_ct_RANLIB + NMEDIT=$ac_ct_NMEDIT fi else - RANLIB="$ac_cv_prog_RANLIB" + NMEDIT="$ac_cv_prog_NMEDIT" fi -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_STRIP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_LIPO+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -5968,8 +5370,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5978,28 +5380,28 @@ fi fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { echo "$as_me:$LINENO: result: $STRIP" >&5 -echo "${ECHO_T}$STRIP" >&6; } +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:$LINENO: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then + $as_echo_n "(cached) " >&6 else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -6008,8 +5410,8 @@ test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_STRIP="strip" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -6018,5841 +5420,3346 @@ fi fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5 -echo "${ECHO_T}$ac_ct_STRIP" >&6; } +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi - if test "x$ac_ct_STRIP" = x; then - STRIP=":" + if test "x$ac_ct_LIPO" = x; then + LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) -{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&5 -echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools whose name does not start with the host triplet. If you think this configuration is useful to you, please write to autoconf@gnu.org." >&2;} ac_tool_warned=yes ;; esac - STRIP=$ac_ct_STRIP + LIPO=$ac_ct_LIPO fi else - STRIP="$ac_cv_prog_STRIP" + LIPO="$ac_cv_prog_LIPO" fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_OTOOL+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -old_CC="$CC" -old_CFLAGS="$CFLAGS" - -# Set sane defaults for various variables -test -z "$AR" && AR=ar -test -z "$AR_FLAGS" && AR_FLAGS=cru -test -z "$AS" && AS=as -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$DLLTOOL" && DLLTOOL=dlltool -test -z "$LD" && LD=ld -test -z "$LN_S" && LN_S="ln -s" -test -z "$MAGIC_CMD" && MAGIC_CMD=file -test -z "$NM" && NM=nm -test -z "$SED" && SED=sed -test -z "$OBJDUMP" && OBJDUMP=objdump -test -z "$RANLIB" && RANLIB=: -test -z "$STRIP" && STRIP=: -test -z "$ac_objext" && ac_objext=o - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" fi - -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:$LINENO: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -# Only perform the check for file, if the check method requires it -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - { echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 -echo $ECHO_N "checking for ${ac_tool_prefix}file... $ECHO_C" >&6; } -if test "${lt_cv_path_MAGIC_CMD+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then + $as_echo_n "(cached) " >&6 else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/${ac_tool_prefix}file; then - lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <&2 + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" fi -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - { echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 -echo "${ECHO_T}$MAGIC_CMD" >&6; } + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_OTOOL64+set}" = set; then + $as_echo_n "(cached) " >&6 else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - { echo "$as_me:$LINENO: checking for file" >&5 -echo $ECHO_N "checking for file... $ECHO_C" >&6; } -if test "${lt_cv_path_MAGIC_CMD+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:$LINENO: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/file; then - lt_cv_path_MAGIC_CMD="$ac_dir/file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <&2 + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org -EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; -esac fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - { echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 -echo "${ECHO_T}$MAGIC_CMD" >&6; } +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" else - MAGIC_CMD=: + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +$as_echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 fi +else + OTOOL64="$ac_cv_prog_OTOOL64" fi - fi - ;; -esac -enable_dlopen=no -enable_win32_dll=no -# Check whether --enable-libtool-lock was given. -if test "${enable_libtool_lock+set}" = set; then - enableval=$enable_libtool_lock; -fi -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes -# Check whether --with-pic was given. -if test "${with_pic+set}" = set; then - withval=$with_pic; pic_mode="$withval" -else - pic_mode=default -fi -test -z "$pic_mode" && pic_mode=default -# Use C for the default configuration in the libtool script -tagname= -lt_save_CC="$CC" -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -# Source file extension for C test sources. -ac_ext=c -# Object file extension for compiled C test sources. -objext=o -objext=$objext -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -# Allow CC to be a program name with arguments. -compiler=$CC -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* - -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* -lt_prog_compiler_no_builtin_flag= -if test "$GCC" = yes; then - lt_prog_compiler_no_builtin_flag=' -fno-builtin' -{ echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 -echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_prog_compiler_rtti_exceptions=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-fno-rtti -fno-exceptions" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6327: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:6331: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_rtti_exceptions=yes - fi - fi - $rm conftest* -fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6; } -if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then - lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" + { $as_echo "$as_me:$LINENO: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if test "${lt_cv_apple_cc_single_mod+set}" = set; then + $as_echo_n "(cached) " >&6 else - : + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + { $as_echo "$as_me:$LINENO: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if test "${lt_cv_ld_exported_symbols_list+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -fi +int +main () +{ -lt_prog_compiler_wl= -lt_prog_compiler_pic= -lt_prog_compiler_static= + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + lt_cv_ld_exported_symbols_list=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 -echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } + lt_cv_ld_exported_symbols_list=no +fi - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_static='-static' +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - fi - ;; - - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' - ;; + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test "${ac_cv_prog_CPP+set}" = set; then + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic='-DDLL_EXPORT' - ;; + # Broken: fails on valid input. +continue +fi - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - ;; +rm -f conftest.err conftest.$ac_ext - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared=no - enable_shared=no - ;; + # Passes both tests. +ac_preproc_ok=: +break +fi - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic=-Kconform_pic - fi - ;; +rm -f conftest.err conftest.$ac_ext - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - ;; +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - else - lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' - fi - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - lt_prog_compiler_pic='-qnocommon' - lt_prog_compiler_wl='-Wl,' - ;; - esac - ;; + done + ac_cv_prog_CPP=$CPP - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic='-DDLL_EXPORT' - ;; - - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static='${wl}-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static='-non_shared' - ;; - - newsos6) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:$LINENO: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - linux* | k*bsd*-gnu) - case $cc_basename in - icc* | ecc*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' - ;; - ccc*) - lt_prog_compiler_wl='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Wl,' - ;; - *Sun\ F*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='' - ;; - esac - ;; - esac - ;; + # Broken: fails on valid input. +continue +fi - osf3* | osf4* | osf5*) - lt_prog_compiler_wl='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static='-non_shared' - ;; +rm -f conftest.err conftest.$ac_ext - rdos*) - lt_prog_compiler_static='-non_shared' - ;; + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - solaris*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; - esac - ;; + # Passes both tests. +ac_preproc_ok=: +break +fi - sunos4*) - lt_prog_compiler_wl='-Qoption ld ' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; +rm -f conftest.err conftest.$ac_ext - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { $as_echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi - sysv4*MP*) - if test -d /usr/nec ;then - lt_prog_compiler_pic='-Kconform_pic' - lt_prog_compiler_static='-Bstatic' - fi - ;; +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - unicos*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_can_build_shared=no - ;; +{ $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if test "${ac_cv_header_stdc+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include - uts4*) - lt_prog_compiler_pic='-pic' - lt_prog_compiler_static='-Bstatic' - ;; +int +main () +{ - *) - lt_prog_compiler_can_build_shared=no - ;; - esac - fi + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_header_stdc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic" >&6; } + ac_cv_header_stdc=no +fi -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic"; then +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 -echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : else - lt_prog_compiler_pic_works=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic -DPIC" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6617: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:6621: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works=yes - fi - fi - $rm conftest* + ac_cv_header_stdc=no +fi +rm -f conftest* fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works" >&6; } -if test x"$lt_prog_compiler_pic_works" = xyes; then - case $lt_prog_compiler_pic in - "" | " "*) ;; - *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; - esac +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : else - lt_prog_compiler_pic= - lt_prog_compiler_can_build_shared=no + ac_cv_header_stdc=no fi +rm -f conftest* fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic= - ;; - *) - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; -esac -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" -{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : else - lt_prog_compiler_static_works=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works=yes - fi - else - lt_prog_compiler_static_works=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + $as_echo "$as_me: program exited with status $ac_status" >&5 +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -rf conftest.dSYM +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works" >&6; } -if test x"$lt_prog_compiler_static_works" = xyes; then - : -else - lt_prog_compiler_static= + +fi fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF +fi -{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 -echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_c_o+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +# On IRIX 5.3, sys/types and inttypes.h are conflicting. + + + + + + + + + +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 else - lt_cv_prog_compiler_c_o=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:6721: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:6725: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + eval "$as_ac_Header=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + eval "$as_ac_Header=no" fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_c_o" >&6; } +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if test `eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF -hard_links="nottested" -if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 -echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { echo "$as_me:$LINENO: result: $hard_links" >&5 -echo "${ECHO_T}$hard_links" >&6; } - if test "$hard_links" = no; then - { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 -echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no fi -{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } +done - runpath_var= - allow_undefined_flag= - enable_shared_with_static_runtimes=no - archive_cmds= - archive_expsym_cmds= - old_archive_From_new_cmds= - old_archive_from_expsyms_cmds= - export_dynamic_flag_spec= - whole_archive_flag_spec= - thread_safe_flag_spec= - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld= - hardcode_libdir_separator= - hardcode_direct=no - hardcode_minus_L=no - hardcode_shlibpath_var=unsupported - link_all_deplibs=unknown - hardcode_automatic=no - module_cmds= - module_expsym_cmds= - always_export_symbols=no - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - exclude_expsyms="_GLOBAL_OFFSET_TABLE_" - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - extract_expsyms_cmds= - # Just being paranoid about ensuring that cc_basename is set. - for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` - case $host_os in - cygwin* | mingw* | pw32*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; - esac - ld_shlibs=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' +for ac_header in dlfcn.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir' - export_dynamic_flag_spec='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - whole_archive_flag_spec= - fi - supports_anon_versioning=no - case `$LD -v 2>/dev/null` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + eval "$as_ac_Header=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - # See if GNU ld supports shared libraries. - case $host_os in - aix3* | aix4* | aix5*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - ld_shlibs=no - cat <&2 + eval "$as_ac_Header=no" +fi -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if test `eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF -EOF - fi - ;; +fi - amigaos*) - archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes +done - # Samuel A. Falvo II reports - # that the semantics of dynamic libraries on AmigaOS, at least up - # to version 4, is to share data among multiple programs linked - # with the same dynamic library. Since this doesn't match the - # behavior of shared libraries on other platforms, we can't use - # them. - ld_shlibs=no - ;; - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - ld_shlibs=no - fi - ;; - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec='-L$libdir' - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' +# Set options - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs=no - fi - ;; - interix[3-9]*) - hardcode_direct=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - export_dynamic_flag_spec='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - gnu* | linux* | k*bsd*-gnu) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - tmp_addflag= - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - *) - tmp_sharedflag='-shared' ;; - esac - archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + enable_dlopen=no + - if test $supports_anon_versioning = yes; then - archive_expsym_cmds='$echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - $echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + enable_win32_dll=no + + + # Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes fi - link_all_deplibs=no - else - ld_shlibs=no - fi + done + IFS="$lt_save_ifs" ;; + esac +else + enable_shared=yes +fi - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - solaris*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - ld_shlibs=no - cat <&2 -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. -EOF - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs=no - cat <<_LT_EOF 1>&2 -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. -_LT_EOF - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - ;; - sunos4*) - archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; + # Check whether --enable-static was given. +if test "${enable_static+set}" = set; then + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" ;; esac +else + enable_static=yes +fi - if test "$ld_shlibs" = no; then - runpath_var= - hardcode_libdir_flag_spec= - export_dynamic_flag_spec= - whole_archive_flag_spec= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag=unsupported - always_export_symbols=yes - archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct=unsupported - fi - ;; - - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - archive_cmds='' - hardcode_direct=yes - hardcode_libdir_separator=':' - link_all_deplibs=yes - if test "$GCC" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L=yes - hardcode_libdir_flag_spec='-L$libdir' - hardcode_libdir_separator= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then + withval=$with_pic; pic_mode="$withval" else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - + pic_mode=default fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' - allow_undefined_flag="-z nodefs" - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +test -z "$pic_mode" && pic_mode=default -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag=' ${wl}-bernotok' - allow_undefined_flag=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec='$convenience' - archive_cmds_need_lc=yes - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes fi - fi + done + IFS="$lt_save_ifs" ;; + esac +else + enable_fast_install=yes +fi - amigaos*) - archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - # see comment about different semantics on the GNU ld section - ld_shlibs=no - ;; - bsdi[45]*) - export_dynamic_flag_spec=-rdynamic - ;; - cygwin* | mingw* | pw32*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_From_new_cmds='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' - fix_srcfile_path='`cygpath -w "$srcfile"`' - enable_shared_with_static_runtimes=yes - ;; - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[012]) - allow_undefined_flag='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[012]) - allow_undefined_flag='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - allow_undefined_flag='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - archive_cmds_need_lc=no - hardcode_direct=no - hardcode_automatic=yes - hardcode_shlibpath_var=unsupported - whole_archive_flag_spec='' - link_all_deplibs=yes - if test "$GCC" = yes ; then - output_verbose_link_cmd='echo' - archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - archive_cmds='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - ld_shlibs=no - ;; - esac - fi - ;; - dgux*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - freebsd1*) - ld_shlibs=no - ;; - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - hpux9*) - if test "$GCC" = yes; then - archive_cmds='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - export_dynamic_flag_spec='${wl}-E' - ;; +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" - hpux10*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' - hardcode_direct=yes - export_dynamic_flag_spec='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - fi - ;; - hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - case $host_cpu in - hppa*64*|ia64*) - hardcode_libdir_flag_spec_ld='+b $libdir' - hardcode_direct=no - hardcode_shlibpath_var=no - ;; - *) - hardcode_direct=yes - export_dynamic_flag_spec='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; - esac - fi - ;; - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec_ld='-rpath $libdir' - fi - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - link_all_deplibs=yes - ;; - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - newsos6) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_shlibpath_var=no - ;; - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct=yes - hardcode_shlibpath_var=no - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - export_dynamic_flag_spec='${wl}-E' - else - case $host_os in - openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-R$libdir' - ;; - *) - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - ;; - esac - fi - else - ld_shlibs=no - fi - ;; - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - old_archive_From_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - osf3*) - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - fi - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - ;; - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ - $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec='-rpath $libdir' - fi - hardcode_libdir_separator=: - ;; - solaris*) - no_undefined_flag=' -z text' - if test "$GCC" = yes; then - wlarc='${wl}' - archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' - else - wlarc='' - archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_shlibpath_var=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - whole_archive_flag_spec='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs=yes - ;; - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec='-L$libdir' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - sysv4) - case $host_vendor in - sni) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds='$CC -r -o $output$reload_objs' - hardcode_direct=no - ;; - motorola) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var=no - ;; - sysv4.3*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - export_dynamic_flag_spec='-Bexport' - ;; - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs=yes - fi - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag='${wl}-z,text' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - runpath_var='LD_RUN_PATH' - if test "$GCC" = yes; then - archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag='${wl}-z,text' - allow_undefined_flag='${wl}-z,nodefs' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - hardcode_libdir_separator=':' - link_all_deplibs=yes - export_dynamic_flag_spec='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - if test "$GCC" = yes; then - archive_cmds='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - uts4*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - *) - ld_shlibs=no - ;; - esac - fi -{ echo "$as_me:$LINENO: result: $ld_shlibs" >&5 -echo "${ECHO_T}$ld_shlibs" >&6; } -test "$ld_shlibs" = no && can_build_shared=no +test -z "$LN_S" && LN_S="ln -s" -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc=yes - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $archive_cmds in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 -echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } - $rm conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl - pic_flag=$lt_prog_compiler_pic - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag - allow_undefined_flag= - if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 - (eval $archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - then - archive_cmds_need_lc=no - else - archive_cmds_need_lc=yes - fi - allow_undefined_flag=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - { echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 -echo "${ECHO_T}$archive_cmds_need_lc" >&6; } - ;; - esac - fi - ;; -esac -{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -if test "$GCC" = yes; then - case $host_os in - darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; - *) lt_awk_arg="/^libraries:/" ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$lt_search_path_spec" | grep ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`echo "$lt_search_path_spec" | $SED -e 's/;/ /g'` - else - lt_search_path_spec=`echo "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary. - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path/$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" - else - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`echo $lt_tmp_lt_search_path_spec | awk ' -BEGIN {RS=" "; FS="/|\n";} { - lt_foo=""; - lt_count=0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo="/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[lt_foo]++; } - if (lt_freq[lt_foo] == 1) { print lt_foo; } -}'` - sys_lib_search_path_spec=`echo $lt_search_path_spec` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -need_lib_prefix=unknown -hardcode_into_libs=no -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; -aix4* | aix5*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; -amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; -bsdi[45]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; -cygwin* | mingw* | pw32*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $rm \$dlpath' - shlibpath_overrides_runpath=yes +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; +{ $as_echo "$as_me:$LINENO: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if test "${lt_cv_objdir+set}" = set; then + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; -freebsd1*) - dynamic_linker=no - ;; +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[123]*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; -interix[3-9]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; -# This must be Linux ELF. -linux* | k*bsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; -nto-qnx*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; -openbsd*) - version_type=sunos - sys_lib_dlsearch_path_spec="/usr/lib" - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[89] | openbsd2.[89].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES fi ;; +esac -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' -rdos*) - dynamic_linker=no - ;; +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' -sysv4 | sysv4.3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; esac - ;; +done +cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=freebsd-elf - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - if test "$with_gnu_ld" = yes; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - shlibpath_overrides_runpath=no - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - shlibpath_overrides_runpath=yes - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org -*) - dynamic_linker=no +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac -{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 -echo "${ECHO_T}$dynamic_linker" >&6; } -test "$dynamic_linker" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi -{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } -hardcode_action= -if test -n "$hardcode_libdir_flag_spec" || \ - test -n "$runpath_var" || \ - test "X$hardcode_automatic" = "Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, )" != no && - test "$hardcode_minus_L" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action=immediate - fi +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action=unsupported + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } fi -{ echo "$as_me:$LINENO: result: $hardcode_action" >&5 -echo "${ECHO_T}$hardcode_action" >&6; } -if test "$hardcode_action" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi -striplib= -old_striplib= -{ echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 -echo $ECHO_N "checking whether stripping libraries is possible... $ECHO_C" >&6; } -if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:$LINENO: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if test "${lt_cv_path_MAGIC_CMD+set}" = set; then + $as_echo_n "(cached) " >&6 else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } - else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi - ;; - *) - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } - ;; - esac + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac fi -if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } else - lt_cv_dlopen=no - lt_cv_dlopen_libs= + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi - case $host_os in - beos*) - lt_cv_dlopen="load_add_on" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - mingw* | pw32*) - lt_cv_dlopen="LoadLibrary" - lt_cv_dlopen_libs= - ;; + else + MAGIC_CMD=: + fi +fi - cygwin*) - lt_cv_dlopen="dlopen" - lt_cv_dlopen_libs= - ;; + fi + ;; +esac - darwin*) - # if libdl is installed we need to link against it - { echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 -echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6; } -if test "${ac_cv_lib_dl_dlopen+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +# Use C for the default configuration in the libtool script -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_dl_dlopen=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 -echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6; } -if test $ac_cv_lib_dl_dlopen = yes; then - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" -else +# Source file extension for C test sources. +ac_ext=c - lt_cv_dlopen="dyld" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes +# Object file extension for compiled C test sources. +objext=o +objext=$objext -fi +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" - ;; +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' - *) - { echo "$as_me:$LINENO: checking for shl_load" >&5 -echo $ECHO_N "checking for shl_load... $ECHO_C" >&6; } -if test "${ac_cv_func_shl_load+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Define shl_load to an innocuous variant, in case declares shl_load. - For example, HP-UX 11i declares gettimeofday. */ -#define shl_load innocuous_shl_load -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char shl_load (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif -#undef shl_load -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_shl_load || defined __stub___shl_load -choke me -#endif -int -main () -{ -return shl_load (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_func_shl_load=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cv_func_shl_load=no -fi +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -fi -{ echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 -echo "${ECHO_T}$ac_cv_func_shl_load" >&6; } -if test $ac_cv_func_shl_load = yes; then - lt_cv_dlopen="shl_load" -else - { echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 -echo $ECHO_N "checking for shl_load in -ldld... $ECHO_C" >&6; } -if test "${ac_cv_lib_dld_shl_load+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); -int -main () -{ -return shl_load (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_dld_shl_load=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +# Allow CC to be a program name with arguments. +compiler=$CC - ac_cv_lib_dld_shl_load=no -fi +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 -echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6; } -if test $ac_cv_lib_dld_shl_load = yes; then - lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" -else - { echo "$as_me:$LINENO: checking for dlopen" >&5 -echo $ECHO_N "checking for dlopen... $ECHO_C" >&6; } -if test "${ac_cv_func_dlopen+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Define dlopen to an innocuous variant, in case declares dlopen. - For example, HP-UX 11i declares gettimeofday. */ -#define dlopen innocuous_dlopen +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char dlopen (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* -#ifdef __STDC__ -# include -#else -# include -#endif -#undef dlopen +if test -n "$compiler"; then -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_dlopen || defined __stub___dlopen -choke me -#endif +lt_prog_compiler_no_builtin_flag= -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_func_dlopen=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +if test "$GCC" = yes; then + lt_prog_compiler_no_builtin_flag=' -fno-builtin' - ac_cv_func_dlopen=no -fi + { $as_echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:6838: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:6842: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext fi -{ echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 -echo "${ECHO_T}$ac_cv_func_dlopen" >&6; } -if test $ac_cv_func_dlopen = yes; then - lt_cv_dlopen="dlopen" -else - { echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 -echo $ECHO_N "checking for dlopen in -ldl... $ECHO_C" >&6; } -if test "${ac_cv_lib_dl_dlopen+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_dl_dlopen=yes +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_cv_lib_dl_dlopen=no + : fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 -echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6; } -if test $ac_cv_lib_dl_dlopen = yes; then - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" -else - { echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 -echo $ECHO_N "checking for dlopen in -lsvld... $ECHO_C" >&6; } -if test "${ac_cv_lib_svld_dlopen+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsvld $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_svld_dlopen=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cv_lib_svld_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 -echo "${ECHO_T}$ac_cv_lib_svld_dlopen" >&6; } -if test $ac_cv_lib_svld_dlopen = yes; then - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" -else - { echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 -echo $ECHO_N "checking for dld_link in -ldld... $ECHO_C" >&6; } -if test "${ac_cv_lib_dld_dld_link+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dld_link (); -int -main () -{ -return dld_link (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_dld_dld_link=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cv_lib_dld_dld_link=no -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 -echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6; } -if test $ac_cv_lib_dld_dld_link = yes; then - lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" -fi + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= +{ $as_echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -fi + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; -fi + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; -fi + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; -fi + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; -fi + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; - ;; - esac + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; - if test "x$lt_cv_dlopen" != xno; then - enable_dlopen=yes + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac else - enable_dlopen=no - fi + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS="$CPPFLAGS" - test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; - save_LDFLAGS="$LDFLAGS" - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; - save_LIBS="$LIBS" - LIBS="$lt_cv_dlopen_libs $LIBS" + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; - { echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 -echo $ECHO_N "checking whether a program can dlopen itself... $ECHO_C" >&6; } -if test "${lt_cv_dlopen_self+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test "$cross_compiling" = yes; then : - lt_cv_dlopen_self=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <&1 | sed 5q` in + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Sun\ F*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + esac + ;; + esac + ;; -#if HAVE_DLFCN_H -#include -#endif + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; -#include + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif + rdos*) + lt_prog_compiler_static='-non_shared' + ;; -#ifdef __cplusplus -extern "C" void exit (int); -#endif + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; -void fnord() { int i=42;} -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - /* dlclose (self); */ - } - else - puts (dlerror ()); + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; - exit (status); -} -EOF - if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self=no - fi -fi -rm -fr conftest* + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; -fi -{ echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 -echo "${ECHO_T}$lt_cv_dlopen_self" >&6; } + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; - if test "x$lt_cv_dlopen_self" = xyes; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - { echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 -echo $ECHO_N "checking whether a statically linked program can dlopen itself... $ECHO_C" >&6; } -if test "${lt_cv_dlopen_self_static+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test "$cross_compiling" = yes; then : - lt_cv_dlopen_self_static=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext < -#endif + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi -#include +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac +{ $as_echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5 +$as_echo "$lt_prog_compiler_pic" >&6; } -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif -#ifdef __cplusplus -extern "C" void exit (int); -#endif -void fnord() { int i=42;} -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - /* dlclose (self); */ - } - else - puts (dlerror ()); - exit (status); -} -EOF - if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 - (eval $ac_link) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self_static=no - fi +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if test "${lt_cv_prog_compiler_pic_works+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7177: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:7181: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + fi -rm -fr conftest* +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi fi -{ echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 -echo "${ECHO_T}$lt_cv_dlopen_self_static" >&6; } - fi - CPPFLAGS="$save_CPPFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" - ;; - esac - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi -# Report which library types will actually be built -{ echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 -echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6; } -{ echo "$as_me:$LINENO: result: $can_build_shared" >&5 -echo "${ECHO_T}$can_build_shared" >&6; } - -{ echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 -echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6; } -test "$can_build_shared" = "no" && enable_shared=no -# On AIX, shared libraries and static libraries use the same namespace, and -# are all built from PIC. -case $host_os in -aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if test "${lt_cv_prog_compiler_static_works+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" -aix4* | aix5*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; -esac -{ echo "$as_me:$LINENO: result: $enable_shared" >&5 -echo "${ECHO_T}$enable_shared" >&6; } +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } -{ echo "$as_me:$LINENO: checking whether to build static libraries" >&5 -echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6; } -# Make sure either enable_shared or enable_static is yes. -test "$enable_shared" = yes || enable_static=yes -{ echo "$as_me:$LINENO: result: $enable_static" >&5 -echo "${ECHO_T}$enable_static" >&6; } - -# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - compiler \ - CC \ - LD \ - lt_prog_compiler_wl \ - lt_prog_compiler_pic \ - lt_prog_compiler_static \ - lt_prog_compiler_no_builtin_flag \ - export_dynamic_flag_spec \ - thread_safe_flag_spec \ - whole_archive_flag_spec \ - enable_shared_with_static_runtimes \ - old_archive_cmds \ - old_archive_from_new_cmds \ - predep_objects \ - postdep_objects \ - predeps \ - postdeps \ - compiler_lib_search_path \ - archive_cmds \ - archive_expsym_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - old_archive_from_expsyms_cmds \ - allow_undefined_flag \ - no_undefined_flag \ - export_symbols_cmds \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ - hardcode_automatic \ - module_cmds \ - module_expsym_cmds \ - lt_cv_prog_compiler_c_o \ - fix_srcfile_path \ - exclude_expsyms \ - include_expsyms; do - - case $var in - old_archive_cmds | \ - old_archive_from_new_cmds | \ - archive_cmds | \ - archive_expsym_cmds | \ - module_cmds | \ - module_expsym_cmds | \ - old_archive_from_expsyms_cmds | \ - export_symbols_cmds | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" - ;; - *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" - ;; - esac - done +if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi - case $lt_echo in - *'\$0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` - ;; - esac -cfgfile="${ofile}T" - trap "$rm \"$cfgfile\"; exit 1" 1 2 15 - $rm -f "$cfgfile" - { echo "$as_me:$LINENO: creating $ofile" >&5 -echo "$as_me: creating $ofile" >&6;} - cat <<__EOF__ >> "$cfgfile" -#! $SHELL -# `$echo "$cfgfile" | sed 's%^.*/%%'` - Provide generalized library-building support services. -# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) -# NOTE: Changes made to this file will be lost: look at ltmain.sh. -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 -# Free Software Foundation, Inc. -# -# This file is part of GNU Libtool: -# Originally by Gordon Matzigkeit , 1996 -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. -# A sed program that does not truncate output. -SED=$lt_SED -# Sed that helps us avoid accidentally triggering echo(1) options like -n. -Xsed="$SED -e 1s/^X//" -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# The names of the tagged configurations supported by this script. -available_tags= - -# ### BEGIN LIBTOOL CONFIG - -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test "${lt_cv_prog_compiler_c_o+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7282: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:7286: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } -# Whether or not to build static libraries. -build_old_libs=$enable_static -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os + { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test "${lt_cv_prog_compiler_c_o+set}" = set; then + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext -# An echo program that does not interpret backslashes. -echo=$lt_echo + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:7337: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:7341: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } -# A C compiler. -LTCC=$lt_LTCC -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS -# A language-specific compiler. -CC=$lt_compiler -# Is the compiler the GNU C compiler? -with_gcc=$GCC +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:$LINENO: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi -# An ERE matcher. -EGREP=$lt_EGREP -# The linker used to build libraries. -LD=$lt_LD -# Whether we need hard or soft links. -LN_S=$lt_LN_S -# A BSD-compatible nm program. -NM=$lt_NM -# A symbol stripping program -STRIP=$lt_STRIP -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD + { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac -# Used on cygwin: assembler. -AS="$AS" + ld_shlibs=yes + if test "$with_gnu_ld" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' -# The name of the directory that contains temporary libtool files. -objdir=$objdir + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl +*** Warning: the GNU linker, at least up to release 2.9.1, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to modify your PATH +*** so that a non-GNU linker is found, and then restart. -# Object file suffix (normally "o"). -objext="$ac_objext" +_LT_EOF + fi + ;; -# Old archive suffix (normally "a"). -libext="$libext" + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; -# Executable file suffix (normally ""). -exeext="$exeext" + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic -pic_mode=$pic_mode + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len - -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o - -# Must we lock files when doing compilation? -need_locks=$lt_need_locks + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix + gnu* | linux* | tpf* | k*bsd*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag= + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + xl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -# Do we need a version for libraries? -need_version=$need_version + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi -# Whether dlopen is supported. -dlopen_support=$enable_dlopen + case $cc_basename in + xlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' + archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_thread_safe_flag_spec +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; -# Library versioning type. -version_type=$version_type + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; -# Format of library name prefix. -libname_spec=$lt_libname_spec + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_old_archive_cmds -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. -# Commands used to build and install a shared archive. -archive_cmds=$lt_archive_cmds -archive_expsym_cmds=$lt_archive_expsym_cmds -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='${wl}-f,' -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_module_cmds -module_expsym_cmds=$lt_module_expsym_cmds + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib + export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_predep_objects - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_postdep_objects - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_predeps - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_postdeps - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_compiler_lib_search_path +int +main () +{ -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method - -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd - -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag - -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag - -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds - -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval - -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl +lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\(.*\)$/\1/ + p + } + }' +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +fi +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address -# This is the shared library runtime path variable. -runpath_var=$runpath_var +fi -# This is the shared library path variable. -shlibpath_var=$shlibpath_var +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action +int +main () +{ -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec +lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\(.*\)$/\1/ + p + } + }' +aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +# Check for a 64-bit object if we didn't find anything. +if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +fi +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator +fi -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$hardcode_direct +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$hardcode_minus_L + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$hardcode_automatic + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + fix_srcfile_path='`cygpath -w "$srcfile"`' + enable_shared_with_static_runtimes=yes + ;; -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs + darwin* | rhapsody*) -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + whole_archive_flag_spec='' + link_all_deplibs=yes + allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=echo + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path + else + ld_shlibs=no + fi -# Set to yes if exported symbols are required. -always_export_symbols=$always_export_symbols + ;; -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds + freebsd1*) + ld_shlibs=no + ;; -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; -# ### END LIBTOOL CONFIG + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; -__EOF__ + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; - case $host_os in - aix3*) - cat <<\EOF >> "$cfgfile" + hpux10*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -EOF - ;; - esac - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '$q' "$ltmain" >> "$cfgfile" || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || \ - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" + hpux11*) + if test "$GCC" = yes -a "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: -else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi -fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + cat >conftest.$ac_ext <<_ACEOF +int foo(void) {} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -CC="$lt_save_CC" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Check whether --with-tags was given. -if test "${with_tags+set}" = set; then - withval=$with_tags; tagnames="$withval" fi +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; -if test -f "$ltmain" && test -n "$tagnames"; then - if test ! -f "${ofile}"; then - { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not exist" >&5 -echo "$as_me: WARNING: output file \`$ofile' does not exist" >&2;} - fi - - if test -z "$LTCC"; then - eval "`$SHELL ${ofile} --config | grep '^LTCC='`" - if test -z "$LTCC"; then - { echo "$as_me:$LINENO: WARNING: output file \`$ofile' does not look like a libtool script" >&5 -echo "$as_me: WARNING: output file \`$ofile' does not look like a libtool script" >&2;} - else - { echo "$as_me:$LINENO: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&5 -echo "$as_me: WARNING: using \`LTCC=$LTCC', extracted from \`$ofile'" >&2;} - fi - fi - if test -z "$LTCFLAGS"; then - eval "`$SHELL ${ofile} --config | grep '^LTCFLAGS='`" - fi - - # Extract list of available tagged configurations in $ofile. - # Note that this assumes the entire list is on one line. - available_tags=`grep "^available_tags=" "${ofile}" | $SED -e 's/available_tags=\(.*$\)/\1/' -e 's/\"//g'` + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for tagname in $tagnames; do - IFS="$lt_save_ifs" - # Check whether tagname contains only valid characters - case `$echo "X$tagname" | $Xsed -e 's:[-_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890,/]::g'` in - "") ;; - *) { { echo "$as_me:$LINENO: error: invalid tag name: $tagname" >&5 -echo "$as_me: error: invalid tag name: $tagname" >&2;} - { (exit 1); exit 1; }; } - ;; - esac + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "${ofile}" > /dev/null - then - { { echo "$as_me:$LINENO: error: tag name \"$tagname\" already exists" >&5 -echo "$as_me: error: tag name \"$tagname\" already exists" >&2;} - { (exit 1); exit 1; }; } - fi + *nto* | *qnx*) + ;; - # Update the list of available tags. - if test -n "$tagname"; then - echo appending configuration tag \"$tagname\" to $ofile - - case $tagname in - CXX) - if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' -archive_cmds_need_lc_CXX=no -allow_undefined_flag_CXX= -always_export_symbols_CXX=no -archive_expsym_cmds_CXX= -export_dynamic_flag_spec_CXX= -hardcode_direct_CXX=no -hardcode_libdir_flag_spec_CXX= -hardcode_libdir_flag_spec_ld_CXX= -hardcode_libdir_separator_CXX= -hardcode_minus_L_CXX=no -hardcode_shlibpath_var_CXX=unsupported -hardcode_automatic_CXX=no -module_cmds_CXX= -module_expsym_cmds_CXX= -link_all_deplibs_CXX=unknown -old_archive_cmds_CXX=$old_archive_cmds -no_undefined_flag_CXX= -whole_archive_flag_spec_CXX= -enable_shared_with_static_runtimes_CXX=no - -# Dependencies to place before and after the object being linked: -predep_objects_CXX= -postdep_objects_CXX= -predeps_CXX= -postdeps_CXX= -compiler_lib_search_path_CXX= + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; -# Source file extension for C++ test sources. -ac_ext=cpp + solaris*) + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; -# Object file extension for compiled C++ test sources. -objext=o -objext_CXX=$objext + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; -# Code to be used in simple link tests -lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; -# ltmain only uses $CC for tagged configurations so make sure $CC is set. + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; -# Allow CC to be a program name with arguments. -compiler=$CC + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* + *) + ld_shlibs=no + ;; + esac + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_LD=$LD -lt_save_GCC=$GCC -GCC=$GXX -lt_save_with_gnu_ld=$with_gnu_ld -lt_save_path_LD=$lt_cv_path_LD -if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then - lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx -else - $as_unset lt_cv_prog_gnu_ld -fi -if test -n "${lt_cv_path_LDCXX+set}"; then - lt_cv_path_LD=$lt_cv_path_LDCXX -else - $as_unset lt_cv_path_LD -fi -test -z "${LDCXX+set}" || LD=$LDCXX -CC=${CXX-"c++"} -compiler=$CC -compiler_CXX=$CC -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +{ $as_echo "$as_me:$LINENO: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no +with_gnu_ld=$with_gnu_ld -# We don't want -fno-exception wen compiling C++ code, so set the -# no_builtin_flag separately -if test "$GXX" = yes; then - lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' -else - lt_prog_compiler_no_builtin_flag_CXX= -fi -if test "$GXX" = yes; then - # Set up default GNU C++ configuration -# Check whether --with-gnu-ld was given. -if test "${with_gnu_ld+set}" = set; then - withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes -else - with_gnu_ld=no -fi -ac_prog=ld -if test "$GCC" = yes; then - # Check if gcc -print-prog-name=ld gives a path. - { echo "$as_me:$LINENO: checking for ld used by $CC" >&5 -echo $ECHO_N "checking for ld used by $CC... $ECHO_C" >&6; } - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [\\/]* | ?:[\\/]*) - re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`echo $ac_prog| $SED 's%\\\\%/%g'` - while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do - ac_prog=`echo $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD="$ac_prog" + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test "$with_gnu_ld" = yes; then - { echo "$as_me:$LINENO: checking for GNU ld" >&5 -echo $ECHO_N "checking for GNU ld... $ECHO_C" >&6; } -else - { echo "$as_me:$LINENO: checking for non-GNU ld" >&5 -echo $ECHO_N "checking for non-GNU ld... $ECHO_C" >&6; } -fi -if test "${lt_cv_path_LD+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -z "$LD"; then - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD="$ac_dir/$ac_prog" - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext -LD="$lt_cv_path_LD" -if test -n "$LD"; then - { echo "$as_me:$LINENO: result: $LD" >&5 -echo "${ECHO_T}$LD" >&6; } -else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi -test -z "$LD" && { { echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5 -echo "$as_me: error: no acceptable ld found in \$PATH" >&2;} - { (exit 1); exit 1; }; } -{ echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5 -echo $ECHO_N "checking if the linker ($LD) is GNU ld... $ECHO_C" >&6; } -if test "${lt_cv_prog_gnu_ld+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - # I'd rather use --version here, but apparently some GNU lds only accept -v. -case `$LD -v 2>&1 &5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\"") >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + then + archive_cmds_need_lc=no + else + archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + { $as_echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5 +$as_echo "$archive_cmds_need_lc" >&6; } + ;; + esac + fi ;; esac -fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_gnu_ld" >&5 -echo "${ECHO_T}$lt_cv_prog_gnu_ld" >&6; } -with_gnu_ld=$lt_cv_prog_gnu_ld - # Check if GNU C++ uses GNU ld as the underlying linker, since the - # archiving commands below assume that GNU ld is being used. - if test "$with_gnu_ld" = yes; then - archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' - export_dynamic_flag_spec_CXX='${wl}--export-dynamic' - # If archive_cmds runs LD, not CC, wlarc should be empty - # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to - # investigate it a little bit more. (MM) - wlarc='${wl}' - # ancient GNU ld didn't support --whole-archive et. al. - if eval "`$CC -print-prog-name=ld` --help 2>&1" | \ - grep 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - whole_archive_flag_spec_CXX= - fi - else - with_gnu_ld=no - wlarc= - # A generic and very simple default shared library creation - # command for GNU C++ for the case where it uses the native - # linker, instead of GNU ld. If possible, this setting should - # overridden to take advantage of the native linker features on - # the platform it is being used on. - archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - fi - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' -else - GXX=no - with_gnu_ld=no - wlarc= -fi -# PORTME: fill in a description of your system's C++ link characteristics -{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } -ld_shlibs_CXX=yes -case $host_os in - aix3*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - aix_use_runtimelinking=no - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) - for ld_flag in $LDFLAGS; do - case $ld_flag in - *-brtl*) - aix_use_runtimelinking=yes - break - ;; - esac - done - ;; - esac - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - archive_cmds_CXX='' - hardcode_direct_CXX=yes - hardcode_libdir_separator_CXX=':' - link_all_deplibs_CXX=yes - - if test "$GXX" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct_CXX=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L_CXX=yes - hardcode_libdir_flag_spec_CXX='-L$libdir' - hardcode_libdir_separator_CXX= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols_CXX=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag_CXX='-berok' - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds_CXX="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' - allow_undefined_flag_CXX="-z nodefs" - archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag_CXX=' ${wl}-bernotok' - allow_undefined_flag_CXX=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec_CXX='$convenience' - archive_cmds_need_lc_CXX=yes - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag_CXX=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - ld_shlibs_CXX=no - fi - ;; - chorus*) - case $cc_basename in - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - esac - ;; - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec_CXX='-L$libdir' - allow_undefined_flag_CXX=unsupported - always_export_symbols_CXX=no - enable_shared_with_static_runtimes_CXX=yes - - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs_CXX=no - fi - ;; - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[012]) - allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[012]) - allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - archive_cmds_need_lc_CXX=no - hardcode_direct_CXX=no - hardcode_automatic_CXX=yes - hardcode_shlibpath_var_CXX=unsupported - whole_archive_flag_spec_CXX='' - link_all_deplibs_CXX=yes - - if test "$GXX" = yes ; then - lt_int_apple_cc_single_mod=no - output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes - fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - archive_cmds_CXX='$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj ${wl}-single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - ld_shlibs_CXX=no - ;; - esac - fi - ;; - dgux*) - case $cc_basename in - ec++*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - ghcx*) - # Green Hills C++ Compiler - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - esac - ;; - freebsd[12]*) - # C++ shared libraries reported to be fairly broken before switch to ELF - ld_shlibs_CXX=no - ;; - freebsd-elf*) - archive_cmds_need_lc_CXX=no - ;; - freebsd* | dragonfly*) - # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF - # conventions - ld_shlibs_CXX=yes - ;; - gnu*) - ;; - hpux9*) - hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_CXX=: - export_dynamic_flag_spec_CXX='${wl}-E' - hardcode_direct_CXX=yes - hardcode_minus_L_CXX=yes # Not in the search PATH, - # but as the default - # location of the library. - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - aCC*) - archive_cmds_CXX='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "[-]L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes; then - archive_cmds_CXX='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - fi - ;; - esac - ;; - hpux10*|hpux11*) - if test $with_gnu_ld = no; then - hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_CXX=: - case $host_cpu in - hppa*64*|ia64*) ;; - *) - export_dynamic_flag_spec_CXX='${wl}-E' - ;; - esac - fi - case $host_cpu in - hppa*64*|ia64*) - hardcode_direct_CXX=no - hardcode_shlibpath_var_CXX=no - ;; - *) - hardcode_direct_CXX=yes - hardcode_minus_L_CXX=yes # Not in the search PATH, - # but as the default - # location of the library. - ;; - esac - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - aCC*) - case $host_cpu in - hppa*64*) - archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | grep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes; then - if test $with_gnu_ld = no; then - case $host_cpu in - hppa*64*) - archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi - else - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - fi - ;; - esac - ;; - interix[3-9]*) - hardcode_direct_CXX=no - hardcode_shlibpath_var_CXX=no - hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' - export_dynamic_flag_spec_CXX='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - irix5* | irix6*) - case $cc_basename in - CC*) - # SGI C++ - archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - # Archives containing C++ object files must be created using - # "CC -ar", where "CC" is the IRIX C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' - ;; - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then - archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` -o $lib' - fi - fi - link_all_deplibs_CXX=yes - ;; - esac - hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_CXX=: - ;; - linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | grep "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - - hardcode_libdir_flag_spec_CXX='${wl}--rpath,$libdir' - export_dynamic_flag_spec_CXX='${wl}--export-dynamic' - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' - ;; - icpc*) - # Intel C++ - with_gnu_ld=yes - # version 8.0 and above of icpc choke on multiply defined symbols - # if we add $predep_objects and $postdep_objects, however 7.1 and - # earlier do not add the objects themselves. - case `$CC -V 2>&1` in - *"Version 7."*) - archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 8.0 or newer - tmp_idyn= - case $host_cpu in - ia64*) tmp_idyn=' -i_dynamic';; - esac - archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - esac - archive_cmds_need_lc_CXX=no - hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' - export_dynamic_flag_spec_CXX='${wl}--export-dynamic' - whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' - ;; - pgCC*) - # Portland Group C++ compiler - archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - - hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' - export_dynamic_flag_spec_CXX='${wl}--export-dynamic' - whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - ;; - cxx*) - # Compaq C++ - archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec_CXX='-rpath $libdir' - hardcode_libdir_separator_CXX=: - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - no_undefined_flag_CXX=' -zdefs' - archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' - hardcode_libdir_flag_spec_CXX='-R$libdir' - whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - - # Not sure whether something based on - # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 - # would be better. - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' - ;; - esac - ;; - esac - ;; - lynxos*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - m88k*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - mvs*) - case $cc_basename in - cxx*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - esac - ;; - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' - wlarc= - hardcode_libdir_flag_spec_CXX='-R$libdir' - hardcode_direct_CXX=yes - hardcode_shlibpath_var_CXX=no - fi - # Workaround some broken pre-1.5 toolchains - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' - ;; - openbsd2*) - # C++ shared libraries are fairly broken - ld_shlibs_CXX=no - ;; - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct_CXX=yes - hardcode_shlibpath_var_CXX=no - archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' - export_dynamic_flag_spec_CXX='${wl}-E' - whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - fi - output_verbose_link_cmd='echo' - else - ld_shlibs_CXX=no - fi - ;; - osf3*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' - hardcode_libdir_separator_CXX=: - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - cxx*) - allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - - hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_CXX=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_CXX=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - else - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - fi - ;; - esac - ;; - osf4* | osf5*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' - hardcode_libdir_separator_CXX=: - - # Archives containing C++ object files must be created using - # the KAI C++ compiler. - old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - cxx*) - allow_undefined_flag_CXX=' -expect_unresolved \*' - archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ - echo "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~ - $rm $lib.exp' - - hardcode_libdir_flag_spec_CXX='-rpath $libdir' - hardcode_libdir_separator_CXX=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - - hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_CXX=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "\-L"' - else - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - fi - ;; - esac - ;; - psos*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - lcc*) - # Lucid - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - esac - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - archive_cmds_need_lc_CXX=yes - no_undefined_flag_CXX=' -zdefs' - archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - hardcode_libdir_flag_spec_CXX='-R$libdir' - hardcode_shlibpath_var_CXX=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. - # Supported since Solaris 2.6 (maybe 2.5.1?) - whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' - ;; - esac - link_all_deplibs_CXX=yes - output_verbose_link_cmd='echo' - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' - ;; - gcx*) - # Green Hills C++ Compiler - archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - # The C++ compiler must be used to create the archive. - old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' - ;; - *) - # GNU C++ compiler with Solaris linker - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - no_undefined_flag_CXX=' ${wl}-z ${wl}defs' - if $CC --version | grep -v '^2\.7' > /dev/null; then - archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - else - # g++ 2.7 appears to require `-G' NOT `-shared' on this - # platform. - archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - archive_expsym_cmds_CXX='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | grep \"\-L\"" - fi - hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - ;; - esac - fi - ;; - esac - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag_CXX='${wl}-z,text' - archive_cmds_need_lc_CXX=no - hardcode_shlibpath_var_CXX=no - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - # So that behaviour is only enabled if SCOABSPATH is set to a - # non-empty value in the environment. Most likely only useful for - # creating official distributions of packages. - # This is a hack until libtool officially supports absolute path - # names for shared libraries. - no_undefined_flag_CXX='${wl}-z,text' - allow_undefined_flag_CXX='${wl}-z,nodefs' - archive_cmds_need_lc_CXX=no - hardcode_shlibpath_var_CXX=no - hardcode_libdir_flag_spec_CXX='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - hardcode_libdir_separator_CXX=':' - link_all_deplibs_CXX=yes - export_dynamic_flag_spec_CXX='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - archive_cmds_CXX='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_CXX='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - esac - ;; - vxworks*) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; - *) - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no - ;; -esac -{ echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 -echo "${ECHO_T}$ld_shlibs_CXX" >&6; } -test "$ld_shlibs_CXX" = no && can_build_shared=no -GCC_CXX="$GXX" -LD_CXX="$LD" -cat > conftest.$ac_ext <&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; then - # Parse the compiler output and extract the necessary - # objects, libraries and library flags. - # Sentinel used to keep track of whether or not we are before - # the conftest object file. - pre_test_object_deps_done=no - - # The `*' in the case matches for architectures that use `case' in - # $output_verbose_cmd can trigger glob expansion during the loop - # eval without this substitution. - output_verbose_link_cmd=`$echo "X$output_verbose_link_cmd" | $Xsed -e "$no_glob_subst"` - - for p in `eval $output_verbose_link_cmd`; do - case $p in - - -L* | -R* | -l*) - # Some compilers place space between "-{L,R}" and the path. - # Remove the space. - if test $p = "-L" \ - || test $p = "-R"; then - prev=$p - continue - else - prev= - fi - if test "$pre_test_object_deps_done" = no; then - case $p in - -L* | -R*) - # Internal compiler library paths should come after those - # provided the user. The postdeps already come after the - # user supplied libs so there is no need to process them. - if test -z "$compiler_lib_search_path_CXX"; then - compiler_lib_search_path_CXX="${prev}${p}" - else - compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" - fi - ;; - # The "-l" case would never come before the object being - # linked, so don't bother handling this case. - esac - else - if test -z "$postdeps_CXX"; then - postdeps_CXX="${prev}${p}" - else - postdeps_CXX="${postdeps_CXX} ${prev}${p}" - fi - fi - ;; - *.$objext) - # This assumes that the test object file only shows up - # once in the compiler output. - if test "$p" = "conftest.$objext"; then - pre_test_object_deps_done=yes - continue - fi - if test "$pre_test_object_deps_done" = no; then - if test -z "$predep_objects_CXX"; then - predep_objects_CXX="$p" - else - predep_objects_CXX="$predep_objects_CXX $p" - fi - else - if test -z "$postdep_objects_CXX"; then - postdep_objects_CXX="$p" - else - postdep_objects_CXX="$postdep_objects_CXX $p" - fi - fi - ;; - *) ;; # Ignore the rest. - esac - done - # Clean up. - rm -f a.out a.exe -else - echo "libtool.m4: error: problem compiling CXX test program" -fi -$rm -f confest.$objext -# PORTME: override above test on systems where it is broken -case $host_os in -interix[3-9]*) - # Interix 3.5 installs completely hosed .la files for C++, so rather than - # hack all around it, let's just trust "g++" to DTRT. - predep_objects_CXX= - postdep_objects_CXX= - postdeps_CXX= - ;; -linux*) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - # - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - if test "$solaris_use_stlport4" != yes; then - postdeps_CXX='-library=Cstd -library=Crun' - fi - ;; - esac - ;; -solaris*) - case $cc_basename in - CC*) - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - # Adding this requires a known-good setup of shared libraries for - # Sun compiler versions before 5.6, else PIC objects from an old - # archive will be linked into the output, leading to subtle bugs. - if test "$solaris_use_stlport4" != yes; then - postdeps_CXX='-library=Cstd -library=Crun' - fi - ;; - esac - ;; -esac -case " $postdeps_CXX " in -*" -lc "*) archive_cmds_need_lc_CXX=no ;; -esac -lt_prog_compiler_wl_CXX= -lt_prog_compiler_pic_CXX= -lt_prog_compiler_static_CXX= -{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 -echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_static_CXX='-static' - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_CXX='-Bstatic' - fi - ;; - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' - ;; - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | cygwin* | os2* | pw32*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic_CXX='-DDLL_EXPORT' - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic_CXX='-fno-common' - ;; - *djgpp*) - # DJGPP does not support shared libraries at all - lt_prog_compiler_pic_CXX= - ;; - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic_CXX=-Kconform_pic - fi - ;; - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - ;; - *) - lt_prog_compiler_pic_CXX='-fPIC' - ;; - esac - ;; - *) - lt_prog_compiler_pic_CXX='-fPIC' - ;; - esac - else - case $host_os in - aix4* | aix5*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_CXX='-Bstatic' - else - lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' - fi - ;; - chorus*) - case $cc_basename in - cxch68*) - # Green Hills C++ Compiler - # _LT_AC_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" - ;; - esac - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - lt_prog_compiler_pic_CXX='-qnocommon' - lt_prog_compiler_wl_CXX='-Wl,' - ;; - esac - ;; - dgux*) - case $cc_basename in - ec++*) - lt_prog_compiler_pic_CXX='-KPIC' - ;; - ghcx*) - # Green Hills C++ Compiler - lt_prog_compiler_pic_CXX='-pic' - ;; - *) - ;; - esac - ;; - freebsd* | dragonfly*) - # FreeBSD uses GNU C++ - ;; - hpux9* | hpux10* | hpux11*) - case $cc_basename in - CC*) - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' - if test "$host_cpu" != ia64; then - lt_prog_compiler_pic_CXX='+Z' - fi - ;; - aCC*) - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic_CXX='+Z' - ;; - esac - ;; - *) - ;; - esac - ;; - interix*) - # This is c89, which is MS Visual C++ (no shared libs) - # Anyone wants to do a port? - ;; - irix5* | irix6* | nonstopux*) - case $cc_basename in - CC*) - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_static_CXX='-non_shared' - # CC pic flag -KPIC is the default. - ;; - *) - ;; - esac - ;; - linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # KAI C++ Compiler - lt_prog_compiler_wl_CXX='--backend -Wl,' - lt_prog_compiler_pic_CXX='-fPIC' - ;; - icpc* | ecpc*) - # Intel C++ - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_pic_CXX='-KPIC' - lt_prog_compiler_static_CXX='-static' - ;; - pgCC*) - # Portland Group C++ compiler. - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_pic_CXX='-fpic' - lt_prog_compiler_static_CXX='-Bstatic' - ;; - cxx*) - # Compaq C++ - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - lt_prog_compiler_pic_CXX= - lt_prog_compiler_static_CXX='-non_shared' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - lt_prog_compiler_pic_CXX='-KPIC' - lt_prog_compiler_static_CXX='-Bstatic' - lt_prog_compiler_wl_CXX='-Qoption ld ' - ;; - esac - ;; - esac - ;; - lynxos*) - ;; - m88k*) - ;; - mvs*) - case $cc_basename in - cxx*) - lt_prog_compiler_pic_CXX='-W c,exportall' - ;; - *) - ;; - esac - ;; - netbsd* | netbsdelf*-gnu) - ;; - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - lt_prog_compiler_wl_CXX='--backend -Wl,' - ;; - RCC*) - # Rational C++ 2.4.1 - lt_prog_compiler_pic_CXX='-pic' - ;; - cxx*) - # Digital/Compaq C++ - lt_prog_compiler_wl_CXX='-Wl,' - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - lt_prog_compiler_pic_CXX= - lt_prog_compiler_static_CXX='-non_shared' - ;; - *) - ;; - esac - ;; - psos*) - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - lt_prog_compiler_pic_CXX='-KPIC' - lt_prog_compiler_static_CXX='-Bstatic' - lt_prog_compiler_wl_CXX='-Qoption ld ' - ;; - gcx*) - # Green Hills C++ Compiler - lt_prog_compiler_pic_CXX='-PIC' - ;; - *) - ;; - esac - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - lt_prog_compiler_pic_CXX='-pic' - lt_prog_compiler_static_CXX='-Bstatic' - ;; - lcc*) - # Lucid - lt_prog_compiler_pic_CXX='-pic' - ;; - *) - ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - lt_prog_compiler_pic_CXX='-KPIC' - ;; - *) - ;; - esac - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - case $cc_basename in - CC*) - lt_prog_compiler_wl_CXX='-Wl,' - lt_prog_compiler_pic_CXX='-KPIC' - lt_prog_compiler_static_CXX='-Bstatic' - ;; - esac - ;; - vxworks*) - ;; - *) - lt_prog_compiler_can_build_shared_CXX=no - ;; - esac - fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_CXX" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_CXX" >&6; } -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic_CXX"; then -{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 -echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_prog_compiler_pic_works_CXX=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:11603: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:11607: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_CXX=yes - fi - fi - $rm conftest* -fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_CXX" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_CXX" >&6; } -if test x"$lt_prog_compiler_pic_works_CXX" = xyes; then - case $lt_prog_compiler_pic_CXX in - "" | " "*) ;; - *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; - esac -else - lt_prog_compiler_pic_CXX= - lt_prog_compiler_can_build_shared_CXX=no -fi -fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic_CXX= - ;; - *) - lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" - ;; -esac -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" -{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_CXX+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_prog_compiler_static_works_CXX=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_CXX=yes - fi - else - lt_prog_compiler_static_works_CXX=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" -fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_CXX" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_CXX" >&6; } -if test x"$lt_prog_compiler_static_works_CXX" = xyes; then - : -else - lt_prog_compiler_static_CXX= -fi -{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 -echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_c_o_CXX+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_prog_compiler_c_o_CXX=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:11707: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:11711: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o_CXX=yes - fi - fi - chmod u+w . 2>&5 - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* -fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_CXX" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_c_o_CXX" >&6; } -hard_links="nottested" -if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 -echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { echo "$as_me:$LINENO: result: $hard_links" >&5 -echo "${ECHO_T}$hard_links" >&6; } - if test "$hard_links" = no; then - { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 -echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no -fi -{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } - export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - case $host_os in - aix4* | aix5*) - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - fi - ;; - pw32*) - export_symbols_cmds_CXX="$ltdll_cmds" - ;; - cygwin* | mingw*) - export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' - ;; - linux* | k*bsd*-gnu) - link_all_deplibs_CXX=no - ;; - *) - export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - ;; - esac -{ echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 -echo "${ECHO_T}$ld_shlibs_CXX" >&6; } -test "$ld_shlibs_CXX" = no && can_build_shared=no -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc_CXX" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc_CXX=yes - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $archive_cmds_CXX in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 -echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } - $rm conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl_CXX - pic_flag=$lt_prog_compiler_pic_CXX - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag_CXX - allow_undefined_flag_CXX= - if { (eval echo "$as_me:$LINENO: \"$archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 - (eval $archive_cmds_CXX 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - then - archive_cmds_need_lc_CXX=no - else - archive_cmds_need_lc_CXX=yes - fi - allow_undefined_flag_CXX=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_CXX" >&5 -echo "${ECHO_T}$archive_cmds_need_lc_CXX" >&6; } - ;; - esac - fi - ;; -esac -{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` + else + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi library_names_spec= libname_spec='lib$name' soname_spec= @@ -11866,7 +8773,6 @@ version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" - need_lib_prefix=unknown hardcode_into_libs=no @@ -11884,7 +8790,7 @@ soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no @@ -11903,7 +8809,7 @@ aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no @@ -11929,9 +8835,18 @@ ;; amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac ;; beos*) @@ -11954,25 +8869,28 @@ # libtool to hard-code these into programs ;; -cygwin* | mingw* | pw32*) +cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) + yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ - $rm \$dlpath' + $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in @@ -11981,20 +8899,20 @@ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" ;; - mingw*) + mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then + sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH printed by # mingw gcc, but we are running on Cygwin. Gcc prints its search # path with ; separators, and with drive letters. We can handle the # drive letters (cygwin fileutils understands them), so leave them, # especially as we might pass files found there to a mingw objdump, # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi ;; pw32*) @@ -12018,12 +8936,13 @@ version_type=darwin need_lib_prefix=no need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; @@ -12051,7 +8970,14 @@ *) objformat=elf ;; esac fi - version_type=freebsd-$objformat + # Handle Gentoo/FreeBSD as it was Linux + case $host_vendor in + gentoo) + version_type=linux ;; + *) + version_type=freebsd-$objformat ;; + esac + case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' @@ -12062,6 +8988,12 @@ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; + linux) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + need_lib_prefix=no + need_version=no + ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in @@ -12116,18 +9048,18 @@ fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH @@ -12204,6 +9136,64 @@ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no + # Some binutils ld are patched to set DT_RUNPATH + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then + shlibpath_overrides_runpath=yes +fi + +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. @@ -12211,7 +9201,7 @@ # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi @@ -12224,23 +9214,11 @@ dynamic_linker='GNU/Linux ld.so' ;; -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - netbsd*) version_type=sunos need_lib_prefix=no need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' @@ -12261,14 +9239,16 @@ shlibpath_overrides_runpath=yes ;; -nto-qnx*) - version_type=linux +*nto* | *qnx*) + version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' ;; openbsd*) @@ -12277,13 +9257,13 @@ need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no @@ -12355,7 +9335,6 @@ sni) shlibpath_overrides_runpath=no need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' runpath_var=LD_RUN_PATH ;; siemens) @@ -12386,13 +9365,12 @@ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - shlibpath_overrides_runpath=no else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - shlibpath_overrides_runpath=yes case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" @@ -12402,6 +9380,17 @@ sys_lib_dlsearch_path_spec='/usr/lib' ;; +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + uts4*) version_type=linux library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' @@ -12413,8 +9402,8 @@ dynamic_linker=no ;; esac -{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 -echo "${ECHO_T}$dynamic_linker" >&6; } +{ $as_echo "$as_me:$LINENO: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" @@ -12422,8707 +9411,6250 @@ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi -{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } -hardcode_action_CXX= -if test -n "$hardcode_libdir_flag_spec_CXX" || \ - test -n "$runpath_var_CXX" || \ - test "X$hardcode_automatic_CXX" = "Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct_CXX" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, CXX)" != no && - test "$hardcode_minus_L_CXX" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_CXX=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_CXX=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_CXX=unsupported +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi -{ echo "$as_me:$LINENO: result: $hardcode_action_CXX" >&5 -echo "${ECHO_T}$hardcode_action_CXX" >&6; } - -if test "$hardcode_action_CXX" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi -# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - compiler_CXX \ - CC_CXX \ - LD_CXX \ - lt_prog_compiler_wl_CXX \ - lt_prog_compiler_pic_CXX \ - lt_prog_compiler_static_CXX \ - lt_prog_compiler_no_builtin_flag_CXX \ - export_dynamic_flag_spec_CXX \ - thread_safe_flag_spec_CXX \ - whole_archive_flag_spec_CXX \ - enable_shared_with_static_runtimes_CXX \ - old_archive_cmds_CXX \ - old_archive_from_new_cmds_CXX \ - predep_objects_CXX \ - postdep_objects_CXX \ - predeps_CXX \ - postdeps_CXX \ - compiler_lib_search_path_CXX \ - archive_cmds_CXX \ - archive_expsym_cmds_CXX \ - postinstall_cmds_CXX \ - postuninstall_cmds_CXX \ - old_archive_from_expsyms_cmds_CXX \ - allow_undefined_flag_CXX \ - no_undefined_flag_CXX \ - export_symbols_cmds_CXX \ - hardcode_libdir_flag_spec_CXX \ - hardcode_libdir_flag_spec_ld_CXX \ - hardcode_libdir_separator_CXX \ - hardcode_automatic_CXX \ - module_cmds_CXX \ - module_expsym_cmds_CXX \ - lt_cv_prog_compiler_c_o_CXX \ - fix_srcfile_path_CXX \ - exclude_expsyms_CXX \ - include_expsyms_CXX; do - - case $var in - old_archive_cmds_CXX | \ - old_archive_from_new_cmds_CXX | \ - archive_cmds_CXX | \ - archive_expsym_cmds_CXX | \ - module_cmds_CXX | \ - module_expsym_cmds_CXX | \ - old_archive_from_expsyms_cmds_CXX | \ - export_symbols_cmds_CXX | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" - ;; - *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" - ;; - esac - done - - case $lt_echo in - *'\$0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` - ;; - esac - -cfgfile="$ofile" - - cat <<__EOF__ >> "$cfgfile" -# ### BEGIN LIBTOOL TAG CONFIG: $tagname -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared -# Whether or not to build static libraries. -build_old_libs=$enable_static -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc_CXX -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os -# An echo program that does not interpret backslashes. -echo=$lt_echo -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS -# A C compiler. -LTCC=$lt_LTCC -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS -# A language-specific compiler. -CC=$lt_compiler_CXX -# Is the compiler the GNU C compiler? -with_gcc=$GCC_CXX -# An ERE matcher. -EGREP=$lt_EGREP -# The linker used to build libraries. -LD=$lt_LD_CXX -# Whether we need hard or soft links. -LN_S=$lt_LN_S -# A BSD-compatible nm program. -NM=$lt_NM -# A symbol stripping program -STRIP=$lt_STRIP -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" -# Used on cygwin: assembler. -AS="$AS" -# The name of the directory that contains temporary libtool files. -objdir=$objdir -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl_CXX -# Object file suffix (normally "o"). -objext="$ac_objext" -# Old archive suffix (normally "a"). -libext="$libext" -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' -# Executable file suffix (normally ""). -exeext="$exeext" -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic_CXX -pic_mode=$pic_mode -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX -# Must we lock files when doing compilation? -need_locks=$lt_need_locks -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix -# Do we need a version for libraries? -need_version=$need_version -# Whether dlopen is supported. -dlopen_support=$enable_dlopen -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static_CXX -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_thread_safe_flag_spec_CXX -# Library versioning type. -version_type=$version_type -# Format of library name prefix. -libname_spec=$lt_libname_spec -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_old_archive_cmds_CXX -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX -# Commands used to build and install a shared archive. -archive_cmds=$lt_archive_cmds_CXX -archive_expsym_cmds=$lt_archive_expsym_cmds_CXX -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_module_cmds_CXX -module_expsym_cmds=$lt_module_expsym_cmds_CXX -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_predep_objects_CXX - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_postdep_objects_CXX - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_predeps_CXX - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_postdeps_CXX - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_compiler_lib_search_path_CXX -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag_CXX -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag_CXX -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address -# This is the shared library runtime path variable. -runpath_var=$runpath_var -# This is the shared library path variable. -shlibpath_var=$shlibpath_var -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action_CXX -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX - -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX - -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$hardcode_direct_CXX - -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$hardcode_minus_L_CXX - -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX - -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$hardcode_automatic_CXX -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs_CXX -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path -# Set to yes if exported symbols are required. -always_export_symbols=$always_export_symbols_CXX -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds_CXX -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms_CXX -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms_CXX -# ### END LIBTOOL TAG CONFIG: $tagname -__EOF__ + { $as_echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test "X$hardcode_automatic" = "Xyes" ; then -else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" + # We can hardcode non-existent directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported fi +{ $as_echo "$as_me:$LINENO: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } +if test "$hardcode_action" = relink || + test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -CC=$lt_save_CC -LDCXX=$LD -LD=$lt_save_LD -GCC=$lt_save_GCC -with_gnu_ldcxx=$with_gnu_ld -with_gnu_ld=$lt_save_with_gnu_ld -lt_cv_path_LDCXX=$lt_cv_path_LD -lt_cv_path_LD=$lt_save_path_LD -lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld -lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld - - else - tagname="" - fi - ;; - - F77) - if test -n "$F77" && test "X$F77" != "Xno"; then - -ac_ext=f -ac_compile='$F77 -c $FFLAGS conftest.$ac_ext >&5' -ac_link='$F77 -o conftest$ac_exeext $FFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_f77_compiler_gnu - - -archive_cmds_need_lc_F77=no -allow_undefined_flag_F77= -always_export_symbols_F77=no -archive_expsym_cmds_F77= -export_dynamic_flag_spec_F77= -hardcode_direct_F77=no -hardcode_libdir_flag_spec_F77= -hardcode_libdir_flag_spec_ld_F77= -hardcode_libdir_separator_F77= -hardcode_minus_L_F77=no -hardcode_automatic_F77=no -module_cmds_F77= -module_expsym_cmds_F77= -link_all_deplibs_F77=unknown -old_archive_cmds_F77=$old_archive_cmds -no_undefined_flag_F77= -whole_archive_flag_spec_F77= -enable_shared_with_static_runtimes_F77=no - -# Source file extension for f77 test sources. -ac_ext=f - -# Object file extension for compiled f77 test sources. -objext=o -objext_F77=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="\ - subroutine t - return - end -" -# Code to be used in simple link tests -lt_simple_link_test_code="\ - program t - end -" -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -# Allow CC to be a program name with arguments. -compiler=$CC + if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${F77-"f77"} -compiler=$CC -compiler_F77=$CC -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` - - -{ echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 -echo $ECHO_N "checking if libtool supports shared libraries... $ECHO_C" >&6; } -{ echo "$as_me:$LINENO: result: $can_build_shared" >&5 -echo "${ECHO_T}$can_build_shared" >&6; } - -{ echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 -echo $ECHO_N "checking whether to build shared libraries... $ECHO_C" >&6; } -test "$can_build_shared" = "no" && enable_shared=no - -# On AIX, shared libraries and static libraries use the same namespace, and -# are all built from PIC. -case $host_os in -aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; -aix4* | aix5*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; esac -{ echo "$as_me:$LINENO: result: $enable_shared" >&5 -echo "${ECHO_T}$enable_shared" >&6; } +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_dl_dlopen=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -{ echo "$as_me:$LINENO: checking whether to build static libraries" >&5 -echo $ECHO_N "checking whether to build static libraries... $ECHO_C" >&6; } -# Make sure either enable_shared or enable_static is yes. -test "$enable_shared" = yes || enable_static=yes -{ echo "$as_me:$LINENO: result: $enable_static" >&5 -echo "${ECHO_T}$enable_static" >&6; } - -GCC_F77="$G77" -LD_F77="$LD" - -lt_prog_compiler_wl_F77= -lt_prog_compiler_pic_F77= -lt_prog_compiler_static_F77= + ac_cv_lib_dl_dlopen=no +fi -{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 -echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else - if test "$GCC" = yes; then - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_static_F77='-static' + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_F77='-Bstatic' - fi - ;; +fi - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - lt_prog_compiler_pic_F77='-m68020 -resident32 -malways-restore-a4' - ;; + ;; - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; + *) + { $as_echo "$as_me:$LINENO: checking for shl_load" >&5 +$as_echo_n "checking for shl_load... " >&6; } +if test "${ac_cv_func_shl_load+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define shl_load to an innocuous variant, in case declares shl_load. + For example, HP-UX 11i declares gettimeofday. */ +#define shl_load innocuous_shl_load - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic_F77='-DDLL_EXPORT' - ;; +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char shl_load (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic_F77='-fno-common' - ;; +#ifdef __STDC__ +# include +#else +# include +#endif - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; +#undef shl_load - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared_F77=no - enable_shared=no - ;; +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_shl_load || defined __stub___shl_load +choke me +#endif - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic_F77=-Kconform_pic - fi - ;; +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_func_shl_load=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic_F77='-fPIC' - ;; - esac - ;; + ac_cv_func_shl_load=no +fi - *) - lt_prog_compiler_pic_F77='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl_F77='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_F77='-Bstatic' - else - lt_prog_compiler_static_F77='-bnso -bI:/lib/syscalls.exp' - fi - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - lt_prog_compiler_pic_F77='-qnocommon' - lt_prog_compiler_wl_F77='-Wl,' - ;; - esac - ;; +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5 +$as_echo "$ac_cv_func_shl_load" >&6; } +if test $ac_cv_func_shl_load = yes; then + lt_cv_dlopen="shl_load" +else + { $as_echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if test "${ac_cv_lib_dld_shl_load+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic_F77='-DDLL_EXPORT' - ;; +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_dld_shl_load=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl_F77='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic_F77='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static_F77='${wl}-a ${wl}archive' - ;; + ac_cv_lib_dld_shl_load=no +fi - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl_F77='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static_F77='-non_shared' - ;; - - newsos6) - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - ;; - - linux* | k*bsd*-gnu) - case $cc_basename in - icc* | ecc*) - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_pic_F77='-fpic' - lt_prog_compiler_static_F77='-Bstatic' - ;; - ccc*) - lt_prog_compiler_wl_F77='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static_F77='-non_shared' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - lt_prog_compiler_wl_F77='-Wl,' - ;; - *Sun\ F*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - lt_prog_compiler_wl_F77='' - ;; - esac - ;; - esac - ;; - - osf3* | osf4* | osf5*) - lt_prog_compiler_wl_F77='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static_F77='-non_shared' - ;; - - rdos*) - lt_prog_compiler_static_F77='-non_shared' - ;; - - solaris*) - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - lt_prog_compiler_wl_F77='-Qoption ld ';; - *) - lt_prog_compiler_wl_F77='-Wl,';; - esac - ;; - - sunos4*) - lt_prog_compiler_wl_F77='-Qoption ld ' - lt_prog_compiler_pic_F77='-PIC' - lt_prog_compiler_static_F77='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec ;then - lt_prog_compiler_pic_F77='-Kconform_pic' - lt_prog_compiler_static_F77='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_pic_F77='-KPIC' - lt_prog_compiler_static_F77='-Bstatic' - ;; - - unicos*) - lt_prog_compiler_wl_F77='-Wl,' - lt_prog_compiler_can_build_shared_F77=no - ;; +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test $ac_cv_lib_dld_shl_load = yes; then + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" +else + { $as_echo "$as_me:$LINENO: checking for dlopen" >&5 +$as_echo_n "checking for dlopen... " >&6; } +if test "${ac_cv_func_dlopen+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define dlopen to an innocuous variant, in case declares dlopen. + For example, HP-UX 11i declares gettimeofday. */ +#define dlopen innocuous_dlopen - uts4*) - lt_prog_compiler_pic_F77='-pic' - lt_prog_compiler_static_F77='-Bstatic' - ;; +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char dlopen (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ - *) - lt_prog_compiler_can_build_shared_F77=no - ;; - esac - fi +#ifdef __STDC__ +# include +#else +# include +#endif -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_F77" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_F77" >&6; } +#undef dlopen -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic_F77"; then +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_dlopen || defined __stub___dlopen +choke me +#endif -{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 -echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_F77+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_func_dlopen=yes else - lt_prog_compiler_pic_works_F77=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic_F77" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:13284: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:13288: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_F77=yes - fi - fi - $rm conftest* + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_func_dlopen=no fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_F77" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_F77" >&6; } -if test x"$lt_prog_compiler_pic_works_F77" = xyes; then - case $lt_prog_compiler_pic_F77 in - "" | " "*) ;; - *) lt_prog_compiler_pic_F77=" $lt_prog_compiler_pic_F77" ;; - esac -else - lt_prog_compiler_pic_F77= - lt_prog_compiler_can_build_shared_F77=no +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5 +$as_echo "$ac_cv_func_dlopen" >&6; } +if test $ac_cv_func_dlopen = yes; then + lt_cv_dlopen="dlopen" +else + { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if test "${ac_cv_lib_dl_dlopen+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic_F77= - ;; - *) - lt_prog_compiler_pic_F77="$lt_prog_compiler_pic_F77" - ;; +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; esac - -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl_F77 eval lt_tmp_static_flag=\"$lt_prog_compiler_static_F77\" -{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_F77+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_dl_dlopen=yes else - lt_prog_compiler_static_works_F77=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_F77=yes - fi - else - lt_prog_compiler_static_works_F77=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_lib_dl_dlopen=no fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_F77" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_F77" >&6; } -if test x"$lt_prog_compiler_static_works_F77" = xyes; then - : -else - lt_prog_compiler_static_F77= +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi - - -{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 -echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_c_o_F77+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test $ac_cv_lib_dl_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else - lt_cv_prog_compiler_c_o_F77=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext + { $as_echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if test "${ac_cv_lib_svld_dlopen+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:13388: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:13392: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o_F77=yes - fi - fi - chmod u+w . 2>&5 - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_svld_dlopen=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_lib_svld_dlopen=no fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_F77" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_c_o_F77" >&6; } +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test $ac_cv_lib_svld_dlopen = yes; then + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { $as_echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if test "${ac_cv_lib_dld_dld_link+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -hard_links="nottested" -if test "$lt_cv_prog_compiler_c_o_F77" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 -echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { echo "$as_me:$LINENO: result: $hard_links" >&5 -echo "${ECHO_T}$hard_links" >&6; } - if test "$hard_links" = no; then - { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 -echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} - need_locks=warn - fi +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_dld_dld_link=yes else - need_locks=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_dld_dld_link=no fi -{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test $ac_cv_lib_dld_dld_link = yes; then + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" +fi - runpath_var= - allow_undefined_flag_F77= - enable_shared_with_static_runtimes_F77=no - archive_cmds_F77= - archive_expsym_cmds_F77= - old_archive_From_new_cmds_F77= - old_archive_from_expsyms_cmds_F77= - export_dynamic_flag_spec_F77= - whole_archive_flag_spec_F77= - thread_safe_flag_spec_F77= - hardcode_libdir_flag_spec_F77= - hardcode_libdir_flag_spec_ld_F77= - hardcode_libdir_separator_F77= - hardcode_direct_F77=no - hardcode_minus_L_F77=no - hardcode_shlibpath_var_F77=unsupported - link_all_deplibs_F77=unknown - hardcode_automatic_F77=no - module_cmds_F77= - module_expsym_cmds_F77= - always_export_symbols_F77=no - export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms_F77= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - exclude_expsyms_F77="_GLOBAL_OFFSET_TABLE_" - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - extract_expsyms_cmds= - # Just being paranoid about ensuring that cc_basename is set. - for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` - case $host_os in - cygwin* | mingw* | pw32*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; - esac +fi - ld_shlibs_F77=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec_F77='${wl}--rpath ${wl}$libdir' - export_dynamic_flag_spec_F77='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec_F77="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - whole_archive_flag_spec_F77= - fi - supports_anon_versioning=no - case `$LD -v 2>/dev/null` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac +fi - # See if GNU ld supports shared libraries. - case $host_os in - aix3* | aix4* | aix5*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - ld_shlibs_F77=no - cat <&2 -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. +fi -EOF - fi - ;; - amigaos*) - archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_minus_L_F77=yes - - # Samuel A. Falvo II reports - # that the semantics of dynamic libraries on AmigaOS, at least up - # to version 4, is to share data among multiple programs linked - # with the same dynamic library. Since this doesn't match the - # behavior of shared libraries on other platforms, we can't use - # them. - ld_shlibs_F77=no - ;; +fi - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag_F77=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds_F77='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - ld_shlibs_F77=no - fi - ;; - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, F77) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec_F77='-L$libdir' - allow_undefined_flag_F77=unsupported - always_export_symbols_F77=no - enable_shared_with_static_runtimes_F77=yes - export_symbols_cmds_F77='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' +fi - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - archive_expsym_cmds_F77='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs_F77=no - fi - ;; + ;; + esac - interix[3-9]*) - hardcode_direct_F77=no - hardcode_shlibpath_var_F77=no - hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' - export_dynamic_flag_spec_F77='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds_F77='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds_F77='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi - gnu* | linux* | k*bsd*-gnu) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - tmp_addflag= - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - whole_archive_flag_spec_F77='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec_F77='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - *) - tmp_sharedflag='-shared' ;; - esac - archive_cmds_F77='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - if test $supports_anon_versioning = yes; then - archive_expsym_cmds_F77='$echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - $echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - link_all_deplibs_F77=no - else - ld_shlibs_F77=no - fi - ;; + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds_F77='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" - solaris*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - ld_shlibs_F77=no - cat <&2 + { $as_echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if test "${lt_cv_dlopen_self+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line 10150 "configure" +#include "confdefs.h" -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. +#if HAVE_DLFCN_H +#include +#endif -EOF - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs_F77=no - fi - ;; +#include - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs_F77=no - cat <<_LT_EOF 1>&2 +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif -_LT_EOF - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' - archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' - else - ld_shlibs_F77=no - fi - ;; - esac - ;; +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; - sunos4*) - archive_cmds_F77='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct_F77=yes - hardcode_shlibpath_var_F77=no - ;; + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs_F77=no - fi - ;; + return status; +} +_LT_EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* - if test "$ld_shlibs_F77" = no; then - runpath_var= - hardcode_libdir_flag_spec_F77= - export_dynamic_flag_spec_F77= - whole_archive_flag_spec_F77= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag_F77=unsupported - always_export_symbols_F77=yes - archive_expsym_cmds_F77='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L_F77=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct_F77=unsupported - fi - ;; - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - export_symbols_cmds_F77='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds_F77='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no +fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if test "${lt_cv_dlopen_self_static+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line 10246 "configure" +#include "confdefs.h" - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi +#if HAVE_DLFCN_H +#include +#endif - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. +#include - archive_cmds_F77='' - hardcode_direct_F77=yes - hardcode_libdir_separator_F77=':' - link_all_deplibs_F77=yes +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif - if test "$GCC" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct_F77=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L_F77=yes - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_libdir_separator_F77= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols_F77=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag_F77='-berok' - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF - program main +void fnord() { int i=42;} +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; - end -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_f77_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + /* dlclose (self); */ + } + else + puts (dlerror ()); -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + return status; +} +_LT_EOF + if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +rm -fr conftest* fi +{ $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds_F77="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec_F77='${wl}-R $libdir:/usr/lib:/lib' - allow_undefined_flag_F77="-z nodefs" - archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF - program main + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac - end -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_f77_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 -fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - hardcode_libdir_flag_spec_F77='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag_F77=' ${wl}-bernotok' - allow_undefined_flag_F77=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec_F77='$convenience' - archive_cmds_need_lc_F77=yes - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds_F77="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - amigaos*) - archive_cmds_F77='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_minus_L_F77=yes - # see comment about different semantics on the GNU ld section - ld_shlibs_F77=no - ;; - bsdi[45]*) - export_dynamic_flag_spec_F77=-rdynamic - ;; - cygwin* | mingw* | pw32*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec_F77=' ' - allow_undefined_flag_F77=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - archive_cmds_F77='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_From_new_cmds_F77='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds_F77='lib -OUT:$oldlib$oldobjs$old_deplibs' - fix_srcfile_path_F77='`cygpath -w "$srcfile"`' - enable_shared_with_static_runtimes_F77=yes - ;; - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[012]) - allow_undefined_flag_F77='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[012]) - allow_undefined_flag_F77='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - allow_undefined_flag_F77='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - archive_cmds_need_lc_F77=no - hardcode_direct_F77=no - hardcode_automatic_F77=yes - hardcode_shlibpath_var_F77=unsupported - whole_archive_flag_spec_F77='' - link_all_deplibs_F77=yes - if test "$GCC" = yes ; then - output_verbose_link_cmd='echo' - archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - archive_cmds_F77='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - ld_shlibs_F77=no - ;; - esac - fi - ;; - dgux*) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_shlibpath_var_F77=no - ;; - freebsd1*) - ld_shlibs_F77=no - ;; - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec_F77='-R$libdir' - hardcode_direct_F77=yes - hardcode_shlibpath_var_F77=no - ;; - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_F77=yes - hardcode_minus_L_F77=yes - hardcode_shlibpath_var_F77=no - ;; - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - archive_cmds_F77='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec_F77='-R$libdir' - hardcode_direct_F77=yes - hardcode_shlibpath_var_F77=no - ;; - hpux9*) - if test "$GCC" = yes; then - archive_cmds_F77='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds_F77='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_F77=: - hardcode_direct_F77=yes - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_F77=yes - export_dynamic_flag_spec_F77='${wl}-E' - ;; - hpux10*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_F77='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_F77=: - hardcode_direct_F77=yes - export_dynamic_flag_spec_F77='${wl}-E' +striplib= +old_striplib= +{ $as_echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_F77=yes - fi - ;; - hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds_F77='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_F77='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_F77='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec_F77='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_F77=: - case $host_cpu in - hppa*64*|ia64*) - hardcode_libdir_flag_spec_ld_F77='+b $libdir' - hardcode_direct_F77=no - hardcode_shlibpath_var_F77=no - ;; - *) - hardcode_direct_F77=yes - export_dynamic_flag_spec_F77='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_F77=yes - ;; - esac - fi - ;; - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - archive_cmds_F77='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - archive_cmds_F77='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec_ld_F77='-rpath $libdir' - fi - hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_F77=: - link_all_deplibs_F77=yes - ;; - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds_F77='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec_F77='-R$libdir' - hardcode_direct_F77=yes - hardcode_shlibpath_var_F77=no - ;; - newsos6) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_F77=yes - hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_F77=: - hardcode_shlibpath_var_F77=no - ;; - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct_F77=yes - hardcode_shlibpath_var_F77=no - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' - export_dynamic_flag_spec_F77='${wl}-E' - else - case $host_os in - openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) - archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_F77='-R$libdir' - ;; - *) - archive_cmds_F77='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec_F77='${wl}-rpath,$libdir' - ;; - esac - fi - else - ld_shlibs_F77=no - fi - ;; - os2*) - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_minus_L_F77=yes - allow_undefined_flag_F77=unsupported - archive_cmds_F77='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - old_archive_From_new_cmds_F77='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - osf3*) - if test "$GCC" = yes; then - allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - allow_undefined_flag_F77=' -expect_unresolved \*' - archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - fi - hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_F77=: - ;; - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag_F77=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_F77='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec_F77='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag_F77=' -expect_unresolved \*' - archive_cmds_F77='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds_F77='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ - $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec_F77='-rpath $libdir' - fi - hardcode_libdir_separator_F77=: - ;; + # Report which library types will actually be built + { $as_echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:$LINENO: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } - solaris*) - no_undefined_flag_F77=' -z text' - if test "$GCC" = yes; then - wlarc='${wl}' - archive_cmds_F77='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' - else - wlarc='' - archive_cmds_F77='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds_F77='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - fi - hardcode_libdir_flag_spec_F77='-R$libdir' - hardcode_shlibpath_var_F77=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - whole_archive_flag_spec_F77='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - whole_archive_flag_spec_F77='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs_F77=yes - ;; + { $as_echo "$as_me:$LINENO: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test "$can_build_shared" = "no" && enable_shared=no - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds_F77='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_F77='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_direct_F77=yes - hardcode_minus_L_F77=yes - hardcode_shlibpath_var_F77=no - ;; + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; - sysv4) - case $host_vendor in - sni) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_F77=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds_F77='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds_F77='$CC -r -o $output$reload_objs' - hardcode_direct_F77=no - ;; - motorola) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_F77=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var_F77=no - ;; + aix[4-9]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + { $as_echo "$as_me:$LINENO: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } - sysv4.3*) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var_F77=no - export_dynamic_flag_spec_F77='-Bexport' - ;; + { $as_echo "$as_me:$LINENO: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + { $as_echo "$as_me:$LINENO: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var_F77=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs_F77=yes - fi - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag_F77='${wl}-z,text' - archive_cmds_need_lc_F77=no - hardcode_shlibpath_var_F77=no - runpath_var='LD_RUN_PATH' - if test "$GCC" = yes; then - archive_cmds_F77='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_F77='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag_F77='${wl}-z,text' - allow_undefined_flag_F77='${wl}-z,nodefs' - archive_cmds_need_lc_F77=no - hardcode_shlibpath_var_F77=no - hardcode_libdir_flag_spec_F77='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - hardcode_libdir_separator_F77=':' - link_all_deplibs_F77=yes - export_dynamic_flag_spec_F77='${wl}-Bexport' - runpath_var='LD_RUN_PATH' +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - if test "$GCC" = yes; then - archive_cmds_F77='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_F77='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_F77='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; +CC="$lt_save_CC" - uts4*) - archive_cmds_F77='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_F77='-L$libdir' - hardcode_shlibpath_var_F77=no - ;; - *) - ld_shlibs_F77=no - ;; - esac - fi -{ echo "$as_me:$LINENO: result: $ld_shlibs_F77" >&5 -echo "${ECHO_T}$ld_shlibs_F77" >&6; } -test "$ld_shlibs_F77" = no && can_build_shared=no -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc_F77" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc_F77=yes - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $archive_cmds_F77 in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 -echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } - $rm conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl_F77 - pic_flag=$lt_prog_compiler_pic_F77 - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag_F77 - allow_undefined_flag_F77= - if { (eval echo "$as_me:$LINENO: \"$archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 - (eval $archive_cmds_F77 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - then - archive_cmds_need_lc_F77=no - else - archive_cmds_need_lc_F77=yes - fi - allow_undefined_flag_F77=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_F77" >&5 -echo "${ECHO_T}$archive_cmds_need_lc_F77" >&6; } - ;; - esac - fi - ;; -esac -{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; -aix4* | aix5*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; -amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; + ac_config_commands="$ac_config_commands libtool" -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; -bsdi[45]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; -cygwin* | mingw* | pw32*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $rm \$dlpath' - shlibpath_overrides_runpath=yes +# Only expand once: - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; +done +IFS=$as_save_IFS -freebsd1*) - dynamic_linker=no - ;; +rm -rf conftest.one conftest.two conftest.dir -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install else - case $host_os in - freebsd[123]*) objformat=aout ;; - *) objformat=elf ;; - esac + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; +fi +{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' -interix[3-9]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_AWK+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:$LINENO: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -# This must be Linux ELF. -linux* | k*bsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi + test -n "$AWK" && break +done - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -nto-qnx*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -openbsd*) - version_type=sunos - sys_lib_dlsearch_path_spec="/usr/lib" - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[89] | openbsd2.[89].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes - fi - ;; - -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; +######### +# Enable large file support (if special flags are necessary) +# +# Check whether --enable-largefile was given. +if test "${enable_largefile+set}" = set; then + enableval=$enable_largefile; +fi + +if test "$enable_largefile" != no; then + + { $as_echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5 +$as_echo_n "checking for special C compiler options needed for large files... " >&6; } +if test "${ac_cv_sys_largefile_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_sys_largefile_CC=no + if test "$GCC" != yes; then + ac_save_CC=$CC + while :; do + # IRIX 6.2 and later do not support large files by default, + # so use the C compiler's -n32 option if that helps. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; + ; + return 0; +} +_ACEOF + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -rdos*) - dynamic_linker=no - ;; -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; +fi -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; +rm -f core conftest.err conftest.$ac_objext + CC="$CC -n32" + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_sys_largefile_CC=' -n32'; break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -sysv4 | sysv4.3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; +fi -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=freebsd-elf - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - if test "$with_gnu_ld" = yes; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - shlibpath_overrides_runpath=no - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - shlibpath_overrides_runpath=yes - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac +rm -f core conftest.err conftest.$ac_objext + break + done + CC=$ac_save_CC + rm -f conftest.$ac_ext + fi +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5 +$as_echo "$ac_cv_sys_largefile_CC" >&6; } + if test "$ac_cv_sys_largefile_CC" != no; then + CC=$CC$ac_cv_sys_largefile_CC fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; + { $as_echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } +if test "${ac_cv_sys_file_offset_bits+set}" = set; then + $as_echo_n "(cached) " >&6 +else + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -*) - dynamic_linker=no - ;; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; esac -{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 -echo "${ECHO_T}$dynamic_linker" >&6; } -test "$dynamic_linker" = no && can_build_shared=no +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_sys_file_offset_bits=no; break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi -{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } -hardcode_action_F77= -if test -n "$hardcode_libdir_flag_spec_F77" || \ - test -n "$runpath_var_F77" || \ - test "X$hardcode_automatic_F77" = "Xyes" ; then +fi - # We can hardcode non-existant directories. - if test "$hardcode_direct_F77" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, F77)" != no && - test "$hardcode_minus_L_F77" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_F77=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_F77=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_F77=unsupported -fi -{ echo "$as_me:$LINENO: result: $hardcode_action_F77" >&5 -echo "${ECHO_T}$hardcode_action_F77" >&6; } - -if test "$hardcode_action_F77" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - - -# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - compiler_F77 \ - CC_F77 \ - LD_F77 \ - lt_prog_compiler_wl_F77 \ - lt_prog_compiler_pic_F77 \ - lt_prog_compiler_static_F77 \ - lt_prog_compiler_no_builtin_flag_F77 \ - export_dynamic_flag_spec_F77 \ - thread_safe_flag_spec_F77 \ - whole_archive_flag_spec_F77 \ - enable_shared_with_static_runtimes_F77 \ - old_archive_cmds_F77 \ - old_archive_from_new_cmds_F77 \ - predep_objects_F77 \ - postdep_objects_F77 \ - predeps_F77 \ - postdeps_F77 \ - compiler_lib_search_path_F77 \ - archive_cmds_F77 \ - archive_expsym_cmds_F77 \ - postinstall_cmds_F77 \ - postuninstall_cmds_F77 \ - old_archive_from_expsyms_cmds_F77 \ - allow_undefined_flag_F77 \ - no_undefined_flag_F77 \ - export_symbols_cmds_F77 \ - hardcode_libdir_flag_spec_F77 \ - hardcode_libdir_flag_spec_ld_F77 \ - hardcode_libdir_separator_F77 \ - hardcode_automatic_F77 \ - module_cmds_F77 \ - module_expsym_cmds_F77 \ - lt_cv_prog_compiler_c_o_F77 \ - fix_srcfile_path_F77 \ - exclude_expsyms_F77 \ - include_expsyms_F77; do - - case $var in - old_archive_cmds_F77 | \ - old_archive_from_new_cmds_F77 | \ - archive_cmds_F77 | \ - archive_expsym_cmds_F77 | \ - module_cmds_F77 | \ - module_expsym_cmds_F77 | \ - old_archive_from_expsyms_cmds_F77 | \ - export_symbols_cmds_F77 | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" - ;; - *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" - ;; - esac - done - - case $lt_echo in - *'\$0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` - ;; - esac - -cfgfile="$ofile" - - cat <<__EOF__ >> "$cfgfile" -# ### BEGIN LIBTOOL TAG CONFIG: $tagname - -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: - -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL - -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared - -# Whether or not to build static libraries. -build_old_libs=$enable_static +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#define _FILE_OFFSET_BITS 64 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc_F77 + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_sys_file_offset_bits=64; break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_F77 -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install +fi -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_file_offset_bits=unknown + break +done +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5 +$as_echo "$ac_cv_sys_file_offset_bits" >&6; } +case $ac_cv_sys_file_offset_bits in #( + no | unknown) ;; + *) +cat >>confdefs.h <<_ACEOF +#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +_ACEOF +;; +esac +rm -rf conftest* + if test $ac_cv_sys_file_offset_bits = unknown; then + { $as_echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5 +$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } +if test "${ac_cv_sys_large_files+set}" = set; then + $as_echo_n "(cached) " >&6 +else + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_sys_large_files=no; break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# An echo program that does not interpret backslashes. -echo=$lt_echo -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS +fi -# A C compiler. -LTCC=$lt_LTCC +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#define _LARGE_FILES 1 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_sys_large_files=1; break +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# A language-specific compiler. -CC=$lt_compiler_F77 -# Is the compiler the GNU C compiler? -with_gcc=$GCC_F77 +fi -# An ERE matcher. -EGREP=$lt_EGREP +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_large_files=unknown + break +done +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5 +$as_echo "$ac_cv_sys_large_files" >&6; } +case $ac_cv_sys_large_files in #( + no | unknown) ;; + *) +cat >>confdefs.h <<_ACEOF +#define _LARGE_FILES $ac_cv_sys_large_files +_ACEOF +;; +esac +rm -rf conftest* + fi +fi -# The linker used to build libraries. -LD=$lt_LD_F77 -# Whether we need hard or soft links. -LN_S=$lt_LN_S +######### +# Check for needed/wanted data types +{ $as_echo "$as_me:$LINENO: checking for int8_t" >&5 +$as_echo_n "checking for int8_t... " >&6; } +if test "${ac_cv_type_int8_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_int8_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (int8_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((int8_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# A BSD-compatible nm program. -NM=$lt_NM + ac_cv_type_int8_t=yes +fi -# A symbol stripping program -STRIP=$lt_STRIP +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" +fi -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_int8_t" >&5 +$as_echo "$ac_cv_type_int8_t" >&6; } +if test $ac_cv_type_int8_t = yes; then -# Used on cygwin: assembler. -AS="$AS" +cat >>confdefs.h <<_ACEOF +#define HAVE_INT8_T 1 +_ACEOF -# The name of the directory that contains temporary libtool files. -objdir=$objdir -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds - -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl_F77 - -# Object file suffix (normally "o"). -objext="$ac_objext" - -# Old archive suffix (normally "a"). -libext="$libext" - -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' - -# Executable file suffix (normally ""). -exeext="$exeext" - -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic_F77 -pic_mode=$pic_mode - -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len - -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o_F77 - -# Must we lock files when doing compilation? -need_locks=$lt_need_locks - -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix - -# Do we need a version for libraries? -need_version=$need_version - -# Whether dlopen is supported. -dlopen_support=$enable_dlopen - -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self - -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static - -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static_F77 - -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_F77 - -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_F77 - -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec_F77 - -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_thread_safe_flag_spec_F77 - -# Library versioning type. -version_type=$version_type - -# Format of library name prefix. -libname_spec=$lt_libname_spec - -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec - -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec - -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_old_archive_cmds_F77 -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds - -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_F77 - -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_F77 - -# Commands used to build and install a shared archive. -archive_cmds=$lt_archive_cmds_F77 -archive_expsym_cmds=$lt_archive_expsym_cmds_F77 -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds - -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_module_cmds_F77 -module_expsym_cmds=$lt_module_expsym_cmds_F77 - -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib - -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_predep_objects_F77 - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_postdep_objects_F77 - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_predeps_F77 - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_postdeps_F77 - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_compiler_lib_search_path_F77 - -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method - -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd - -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag_F77 - -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag_F77 - -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds - -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval - -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe - -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl - -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - -# This is the shared library runtime path variable. -runpath_var=$runpath_var - -# This is the shared library path variable. -shlibpath_var=$shlibpath_var - -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath - -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action_F77 - -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs - -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_F77 - -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_F77 - -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator_F77 - -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$hardcode_direct_F77 - -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$hardcode_minus_L_F77 - -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var_F77 - -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$hardcode_automatic_F77 - -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" - -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs_F77 - -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec - -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec - -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path - -# Set to yes if exported symbols are required. -always_export_symbols=$always_export_symbols_F77 - -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds_F77 - -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds - -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms_F77 - -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms_F77 - -# ### END LIBTOOL TAG CONFIG: $tagname - -__EOF__ - - -else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi fi +{ $as_echo "$as_me:$LINENO: checking for int16_t" >&5 +$as_echo_n "checking for int16_t... " >&6; } +if test "${ac_cv_type_int16_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_int16_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (int16_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((int16_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_type_int16_t=yes +fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -CC="$lt_save_CC" - - else - tagname="" - fi - ;; - - GCJ) - if test -n "$GCJ" && test "X$GCJ" != "Xno"; then - - -# Source file extension for Java test sources. -ac_ext=java - -# Object file extension for compiled Java test sources. -objext=o -objext_GCJ=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="class foo {}" - -# Code to be used in simple link tests -lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }' - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Allow CC to be a program name with arguments. -compiler=$CC +fi -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_int16_t" >&5 +$as_echo "$ac_cv_type_int16_t" >&6; } +if test $ac_cv_type_int16_t = yes; then -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +cat >>confdefs.h <<_ACEOF +#define HAVE_INT16_T 1 +_ACEOF -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${GCJ-"gcj"} -compiler=$CC -compiler_GCJ=$CC -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` - - -# GCJ did not exist at the time GCC didn't implicitly link libc in. -archive_cmds_need_lc_GCJ=no - -old_archive_cmds_GCJ=$old_archive_cmds - - -lt_prog_compiler_no_builtin_flag_GCJ= - -if test "$GCC" = yes; then - lt_prog_compiler_no_builtin_flag_GCJ=' -fno-builtin' - - -{ echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 -echo $ECHO_N "checking if $compiler supports -fno-rtti -fno-exceptions... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_prog_compiler_rtti_exceptions=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-fno-rtti -fno-exceptions" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15588: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:15592: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_rtti_exceptions=yes - fi - fi - $rm conftest* - -fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_rtti_exceptions" >&6; } - -if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then - lt_prog_compiler_no_builtin_flag_GCJ="$lt_prog_compiler_no_builtin_flag_GCJ -fno-rtti -fno-exceptions" -else - : -fi - -fi - -lt_prog_compiler_wl_GCJ= -lt_prog_compiler_pic_GCJ= -lt_prog_compiler_static_GCJ= - -{ echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5 -echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_static_GCJ='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_GCJ='-Bstatic' - fi - ;; - - amigaos*) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - lt_prog_compiler_pic_GCJ='-m68020 -resident32 -malways-restore-a4' - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic_GCJ='-fno-common' - ;; - - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared_GCJ=no - enable_shared=no - ;; - - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic_GCJ=-Kconform_pic - fi - ;; - - hpux*) - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic_GCJ='-fPIC' - ;; - esac - ;; - - *) - lt_prog_compiler_pic_GCJ='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl_GCJ='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static_GCJ='-Bstatic' - else - lt_prog_compiler_static_GCJ='-bnso -bI:/lib/syscalls.exp' - fi - ;; - darwin*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - case $cc_basename in - xlc*) - lt_prog_compiler_pic_GCJ='-qnocommon' - lt_prog_compiler_wl_GCJ='-Wl,' - ;; - esac - ;; - - mingw* | cygwin* | pw32* | os2*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' - ;; - - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl_GCJ='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic_GCJ='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static_GCJ='${wl}-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl_GCJ='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static_GCJ='-non_shared' - ;; - - newsos6) - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - - linux* | k*bsd*-gnu) - case $cc_basename in - icc* | ecc*) - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_pic_GCJ='-fpic' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - ccc*) - lt_prog_compiler_wl_GCJ='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static_GCJ='-non_shared' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - lt_prog_compiler_wl_GCJ='-Wl,' - ;; - *Sun\ F*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - lt_prog_compiler_wl_GCJ='' - ;; - esac - ;; - esac - ;; - - osf3* | osf4* | osf5*) - lt_prog_compiler_wl_GCJ='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static_GCJ='-non_shared' - ;; - - rdos*) - lt_prog_compiler_static_GCJ='-non_shared' - ;; - - solaris*) - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - lt_prog_compiler_wl_GCJ='-Qoption ld ';; - *) - lt_prog_compiler_wl_GCJ='-Wl,';; - esac - ;; - - sunos4*) - lt_prog_compiler_wl_GCJ='-Qoption ld ' - lt_prog_compiler_pic_GCJ='-PIC' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec ;then - lt_prog_compiler_pic_GCJ='-Kconform_pic' - lt_prog_compiler_static_GCJ='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_pic_GCJ='-KPIC' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - - unicos*) - lt_prog_compiler_wl_GCJ='-Wl,' - lt_prog_compiler_can_build_shared_GCJ=no - ;; - - uts4*) - lt_prog_compiler_pic_GCJ='-pic' - lt_prog_compiler_static_GCJ='-Bstatic' - ;; - - *) - lt_prog_compiler_can_build_shared_GCJ=no - ;; - esac - fi - -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_GCJ" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_GCJ" >&6; } - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic_GCJ"; then - -{ echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 -echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_prog_compiler_pic_works_GCJ=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic_GCJ" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15878: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:15882: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_GCJ=yes - fi - fi - $rm conftest* - -fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_GCJ" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_GCJ" >&6; } - -if test x"$lt_prog_compiler_pic_works_GCJ" = xyes; then - case $lt_prog_compiler_pic_GCJ in - "" | " "*) ;; - *) lt_prog_compiler_pic_GCJ=" $lt_prog_compiler_pic_GCJ" ;; - esac -else - lt_prog_compiler_pic_GCJ= - lt_prog_compiler_can_build_shared_GCJ=no -fi - fi -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic_GCJ= - ;; - *) - lt_prog_compiler_pic_GCJ="$lt_prog_compiler_pic_GCJ" - ;; -esac - -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl_GCJ eval lt_tmp_static_flag=\"$lt_prog_compiler_static_GCJ\" -{ echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_GCJ+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +{ $as_echo "$as_me:$LINENO: checking for int32_t" >&5 +$as_echo_n "checking for int32_t... " >&6; } +if test "${ac_cv_type_int32_t+set}" = set; then + $as_echo_n "(cached) " >&6 else - lt_prog_compiler_static_works_GCJ=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_GCJ=yes - fi - else - lt_prog_compiler_static_works_GCJ=yes - fi - fi - $rm conftest* - LDFLAGS="$save_LDFLAGS" - -fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_GCJ" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_GCJ" >&6; } - -if test x"$lt_prog_compiler_static_works_GCJ" = xyes; then - : -else - lt_prog_compiler_static_GCJ= -fi - - -{ echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5 -echo $ECHO_N "checking if $compiler supports -c -o file.$ac_objext... $ECHO_C" >&6; } -if test "${lt_cv_prog_compiler_c_o_GCJ+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - lt_cv_prog_compiler_c_o_GCJ=no - $rm -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15982: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:15986: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o_GCJ=yes - fi - fi - chmod u+w . 2>&5 - $rm conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $rm out/ii_files/* && rmdir out/ii_files - $rm out/* && rmdir out - cd .. - rmdir conftest - $rm conftest* - -fi -{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o_GCJ" >&5 -echo "${ECHO_T}$lt_cv_prog_compiler_c_o_GCJ" >&6; } - - -hard_links="nottested" -if test "$lt_cv_prog_compiler_c_o_GCJ" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - { echo "$as_me:$LINENO: checking if we can lock with hard links" >&5 -echo $ECHO_N "checking if we can lock with hard links... $ECHO_C" >&6; } - hard_links=yes - $rm conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { echo "$as_me:$LINENO: result: $hard_links" >&5 -echo "${ECHO_T}$hard_links" >&6; } - if test "$hard_links" = no; then - { echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 -echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no -fi - -{ echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared libraries... $ECHO_C" >&6; } - - runpath_var= - allow_undefined_flag_GCJ= - enable_shared_with_static_runtimes_GCJ=no - archive_cmds_GCJ= - archive_expsym_cmds_GCJ= - old_archive_From_new_cmds_GCJ= - old_archive_from_expsyms_cmds_GCJ= - export_dynamic_flag_spec_GCJ= - whole_archive_flag_spec_GCJ= - thread_safe_flag_spec_GCJ= - hardcode_libdir_flag_spec_GCJ= - hardcode_libdir_flag_spec_ld_GCJ= - hardcode_libdir_separator_GCJ= - hardcode_direct_GCJ=no - hardcode_minus_L_GCJ=no - hardcode_shlibpath_var_GCJ=unsupported - link_all_deplibs_GCJ=unknown - hardcode_automatic_GCJ=no - module_cmds_GCJ= - module_expsym_cmds_GCJ= - always_export_symbols_GCJ=no - export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms_GCJ= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - exclude_expsyms_GCJ="_GLOBAL_OFFSET_TABLE_" - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - extract_expsyms_cmds= - # Just being paranoid about ensuring that cc_basename is set. - for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` - - case $host_os in - cygwin* | mingw* | pw32*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; - esac - - ld_shlibs_GCJ=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec_GCJ='${wl}--rpath ${wl}$libdir' - export_dynamic_flag_spec_GCJ='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | grep 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec_GCJ="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - whole_archive_flag_spec_GCJ= - fi - supports_anon_versioning=no - case `$LD -v 2>/dev/null` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix3* | aix4* | aix5*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - ld_shlibs_GCJ=no - cat <&2 - -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. - -EOF - fi - ;; - - amigaos*) - archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_minus_L_GCJ=yes - - # Samuel A. Falvo II reports - # that the semantics of dynamic libraries on AmigaOS, at least up - # to version 4, is to share data among multiple programs linked - # with the same dynamic library. Since this doesn't match the - # behavior of shared libraries on other platforms, we can't use - # them. - ld_shlibs_GCJ=no - ;; - - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag_GCJ=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds_GCJ='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - ld_shlibs_GCJ=no - fi - ;; - - cygwin* | mingw* | pw32*) - # _LT_AC_TAGVAR(hardcode_libdir_flag_spec, GCJ) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec_GCJ='-L$libdir' - allow_undefined_flag_GCJ=unsupported - always_export_symbols_GCJ=no - enable_shared_with_static_runtimes_GCJ=yes - export_symbols_cmds_GCJ='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' - - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - archive_expsym_cmds_GCJ='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs_GCJ=no - fi - ;; - - interix[3-9]*) - hardcode_direct_GCJ=no - hardcode_shlibpath_var_GCJ=no - hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' - export_dynamic_flag_spec_GCJ='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds_GCJ='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds_GCJ='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - - gnu* | linux* | k*bsd*-gnu) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - tmp_addflag= - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - whole_archive_flag_spec_GCJ='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec_GCJ='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $echo \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - *) - tmp_sharedflag='-shared' ;; - esac - archive_cmds_GCJ='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - - if test $supports_anon_versioning = yes; then - archive_expsym_cmds_GCJ='$echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - $echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - link_all_deplibs_GCJ=no - else - ld_shlibs_GCJ=no - fi - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds_GCJ='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - ld_shlibs_GCJ=no - cat <&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -EOF - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs_GCJ=no - fi - ;; - - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs_GCJ=no - cat <<_LT_EOF 1>&2 - -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib' - archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname,\${SCOABSPATH:+${install_libdir}/}$soname,-retain-symbols-file,$export_symbols -o $lib' - else - ld_shlibs_GCJ=no - fi - ;; - esac - ;; - - sunos4*) - archive_cmds_GCJ='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs_GCJ=no - fi - ;; - esac - - if test "$ld_shlibs_GCJ" = no; then - runpath_var= - hardcode_libdir_flag_spec_GCJ= - export_dynamic_flag_spec_GCJ= - whole_archive_flag_spec_GCJ= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag_GCJ=unsupported - always_export_symbols_GCJ=yes - archive_expsym_cmds_GCJ='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L_GCJ=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct_GCJ=unsupported - fi - ;; - - aix4* | aix5*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | grep 'GNU' > /dev/null; then - export_symbols_cmds_GCJ='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds_GCJ='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$2 == "T") || (\$2 == "D") || (\$2 == "B")) && (substr(\$3,1,1) != ".")) { print \$3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - archive_cmds_GCJ='' - hardcode_direct_GCJ=yes - hardcode_libdir_separator_GCJ=':' - link_all_deplibs_GCJ=yes - - if test "$GCC" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct_GCJ=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L_GCJ=yes - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_libdir_separator_GCJ= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols_GCJ=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag_GCJ='-berok' - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - -fi - -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds_GCJ="\$CC"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec_GCJ='${wl}-R $libdir:/usr/lib:/lib' - allow_undefined_flag_GCJ="-z nodefs" - archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an empty executable. - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - -fi - -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hardcode_libdir_flag_spec_GCJ='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag_GCJ=' ${wl}-bernotok' - allow_undefined_flag_GCJ=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec_GCJ='$convenience' - archive_cmds_need_lc_GCJ=yes - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds_GCJ="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - - amigaos*) - archive_cmds_GCJ='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_minus_L_GCJ=yes - # see comment about different semantics on the GNU ld section - ld_shlibs_GCJ=no - ;; - - bsdi[45]*) - export_dynamic_flag_spec_GCJ=-rdynamic - ;; - - cygwin* | mingw* | pw32*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec_GCJ=' ' - allow_undefined_flag_GCJ=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - archive_cmds_GCJ='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | $SED -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_From_new_cmds_GCJ='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds_GCJ='lib -OUT:$oldlib$oldobjs$old_deplibs' - fix_srcfile_path_GCJ='`cygpath -w "$srcfile"`' - enable_shared_with_static_runtimes_GCJ=yes - ;; - - darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[012]) - allow_undefined_flag_GCJ='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[012]) - allow_undefined_flag_GCJ='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - allow_undefined_flag_GCJ='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac - archive_cmds_need_lc_GCJ=no - hardcode_direct_GCJ=no - hardcode_automatic_GCJ=yes - hardcode_shlibpath_var_GCJ=unsupported - whole_archive_flag_spec_GCJ='' - link_all_deplibs_GCJ=yes - if test "$GCC" = yes ; then - output_verbose_link_cmd='echo' - archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - case $cc_basename in - xlc*) - output_verbose_link_cmd='echo' - archive_cmds_GCJ='$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}`echo $rpath/$soname` $xlcverstring' - module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -qmkshrobj $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-install_name ${wl}$rpath/$soname $xlcverstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - ;; - *) - ld_shlibs_GCJ=no - ;; - esac - fi - ;; - - dgux*) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_shlibpath_var_GCJ=no - ;; - - freebsd1*) - ld_shlibs_GCJ=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec_GCJ='-R$libdir' - hardcode_direct_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_GCJ=yes - hardcode_minus_L_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - archive_cmds_GCJ='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec_GCJ='-R$libdir' - hardcode_direct_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - hpux9*) - if test "$GCC" = yes; then - archive_cmds_GCJ='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds_GCJ='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - hardcode_direct_GCJ=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_GCJ=yes - export_dynamic_flag_spec_GCJ='${wl}-E' - ;; - - hpux10*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_GCJ='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - - hardcode_direct_GCJ=yes - export_dynamic_flag_spec_GCJ='${wl}-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_GCJ=yes - fi - ;; - - hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds_GCJ='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_GCJ='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds_GCJ='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec_GCJ='${wl}+b ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - - case $host_cpu in - hppa*64*|ia64*) - hardcode_libdir_flag_spec_ld_GCJ='+b $libdir' - hardcode_direct_GCJ=no - hardcode_shlibpath_var_GCJ=no - ;; - *) - hardcode_direct_GCJ=yes - export_dynamic_flag_spec_GCJ='${wl}-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L_GCJ=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - archive_cmds_GCJ='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - archive_cmds_GCJ='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec_ld_GCJ='-rpath $libdir' - fi - hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - link_all_deplibs_GCJ=yes - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds_GCJ='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec_GCJ='-R$libdir' - hardcode_direct_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - newsos6) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_GCJ=yes - hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - hardcode_shlibpath_var_GCJ=no - ;; - - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct_GCJ=yes - hardcode_shlibpath_var_GCJ=no - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' - export_dynamic_flag_spec_GCJ='${wl}-E' - else - case $host_os in - openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) - archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_GCJ='-R$libdir' - ;; - *) - archive_cmds_GCJ='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec_GCJ='${wl}-rpath,$libdir' - ;; - esac - fi - else - ld_shlibs_GCJ=no - fi - ;; - - os2*) - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_minus_L_GCJ=yes - allow_undefined_flag_GCJ=unsupported - archive_cmds_GCJ='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - old_archive_From_new_cmds_GCJ='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - - osf3*) - if test "$GCC" = yes; then - allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - allow_undefined_flag_GCJ=' -expect_unresolved \*' - archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - fi - hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator_GCJ=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag_GCJ=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds_GCJ='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec_GCJ='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag_GCJ=' -expect_unresolved \*' - archive_cmds_GCJ='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds_GCJ='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ - $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib~$rm $lib.exp' - - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec_GCJ='-rpath $libdir' - fi - hardcode_libdir_separator_GCJ=: - ;; - - solaris*) - no_undefined_flag_GCJ=' -z text' - if test "$GCC" = yes; then - wlarc='${wl}' - archive_cmds_GCJ='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' - else - wlarc='' - archive_cmds_GCJ='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds_GCJ='$echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' - fi - hardcode_libdir_flag_spec_GCJ='-R$libdir' - hardcode_shlibpath_var_GCJ=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - whole_archive_flag_spec_GCJ='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - whole_archive_flag_spec_GCJ='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs_GCJ=yes - ;; - - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds_GCJ='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_GCJ='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_direct_GCJ=yes - hardcode_minus_L_GCJ=yes - hardcode_shlibpath_var_GCJ=no - ;; - - sysv4) - case $host_vendor in - sni) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_GCJ=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds_GCJ='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds_GCJ='$CC -r -o $output$reload_objs' - hardcode_direct_GCJ=no - ;; - motorola) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct_GCJ=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var_GCJ=no - ;; - - sysv4.3*) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var_GCJ=no - export_dynamic_flag_spec_GCJ='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var_GCJ=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs_GCJ=yes - fi - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag_GCJ='${wl}-z,text' - archive_cmds_need_lc_GCJ=no - hardcode_shlibpath_var_GCJ=no - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - archive_cmds_GCJ='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_GCJ='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag_GCJ='${wl}-z,text' - allow_undefined_flag_GCJ='${wl}-z,nodefs' - archive_cmds_need_lc_GCJ=no - hardcode_shlibpath_var_GCJ=no - hardcode_libdir_flag_spec_GCJ='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - hardcode_libdir_separator_GCJ=':' - link_all_deplibs_GCJ=yes - export_dynamic_flag_spec_GCJ='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - archive_cmds_GCJ='$CC -shared ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds_GCJ='$CC -G ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds_GCJ='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,\${SCOABSPATH:+${install_libdir}/}$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - uts4*) - archive_cmds_GCJ='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec_GCJ='-L$libdir' - hardcode_shlibpath_var_GCJ=no - ;; - - *) - ld_shlibs_GCJ=no - ;; - esac - fi - -{ echo "$as_me:$LINENO: result: $ld_shlibs_GCJ" >&5 -echo "${ECHO_T}$ld_shlibs_GCJ" >&6; } -test "$ld_shlibs_GCJ" = no && can_build_shared=no - -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc_GCJ" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc_GCJ=yes - - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $archive_cmds_GCJ in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5 -echo $ECHO_N "checking whether -lc should be explicitly linked in... $ECHO_C" >&6; } - $rm conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl_GCJ - pic_flag=$lt_prog_compiler_pic_GCJ - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag_GCJ - allow_undefined_flag_GCJ= - if { (eval echo "$as_me:$LINENO: \"$archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1\"") >&5 - (eval $archive_cmds_GCJ 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - then - archive_cmds_need_lc_GCJ=no - else - archive_cmds_need_lc_GCJ=yes - fi - allow_undefined_flag_GCJ=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $rm conftest* - { echo "$as_me:$LINENO: result: $archive_cmds_need_lc_GCJ" >&5 -echo "${ECHO_T}$archive_cmds_need_lc_GCJ" >&6; } - ;; - esac - fi - ;; -esac - -{ echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5 -echo $ECHO_N "checking dynamic linker characteristics... $ECHO_C" >&6; } -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" - -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; - -aix4* | aix5*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[45]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i;echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $rm \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if echo "$sys_lib_search_path_spec" | grep ';[c-zC-Z]:/' >/dev/null; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`echo "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; - - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${versuffix}$shared_ext ${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd1*) - dynamic_linker=no - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[123]*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; - -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; - -interix[3-9]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -# This must be Linux ELF. -linux* | k*bsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -nto-qnx*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -openbsd*) - version_type=sunos - sys_lib_dlsearch_path_spec="/usr/lib" - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[89] | openbsd2.[89].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes - fi - ;; - -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; - -rdos*) - dynamic_linker=no - ;; - -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - export_dynamic_flag_spec='${wl}-Blargedynsym' - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=freebsd-elf - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - if test "$with_gnu_ld" = yes; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - shlibpath_overrides_runpath=no - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - shlibpath_overrides_runpath=yes - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; - -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; + ac_cv_type_int32_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (int32_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((int32_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; esac -{ echo "$as_me:$LINENO: result: $dynamic_linker" >&5 -echo "${ECHO_T}$dynamic_linker" >&6; } -test "$dynamic_linker" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -{ echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5 -echo $ECHO_N "checking how to hardcode library paths into programs... $ECHO_C" >&6; } -hardcode_action_GCJ= -if test -n "$hardcode_libdir_flag_spec_GCJ" || \ - test -n "$runpath_var_GCJ" || \ - test "X$hardcode_automatic_GCJ" = "Xyes" ; then - - # We can hardcode non-existant directories. - if test "$hardcode_direct_GCJ" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_AC_TAGVAR(hardcode_shlibpath_var, GCJ)" != no && - test "$hardcode_minus_L_GCJ" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action_GCJ=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action_GCJ=immediate - fi +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action_GCJ=unsupported -fi -{ echo "$as_me:$LINENO: result: $hardcode_action_GCJ" >&5 -echo "${ECHO_T}$hardcode_action_GCJ" >&6; } + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -if test "$hardcode_action_GCJ" = relink; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless + ac_cv_type_int32_t=yes fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - compiler_GCJ \ - CC_GCJ \ - LD_GCJ \ - lt_prog_compiler_wl_GCJ \ - lt_prog_compiler_pic_GCJ \ - lt_prog_compiler_static_GCJ \ - lt_prog_compiler_no_builtin_flag_GCJ \ - export_dynamic_flag_spec_GCJ \ - thread_safe_flag_spec_GCJ \ - whole_archive_flag_spec_GCJ \ - enable_shared_with_static_runtimes_GCJ \ - old_archive_cmds_GCJ \ - old_archive_from_new_cmds_GCJ \ - predep_objects_GCJ \ - postdep_objects_GCJ \ - predeps_GCJ \ - postdeps_GCJ \ - compiler_lib_search_path_GCJ \ - archive_cmds_GCJ \ - archive_expsym_cmds_GCJ \ - postinstall_cmds_GCJ \ - postuninstall_cmds_GCJ \ - old_archive_from_expsyms_cmds_GCJ \ - allow_undefined_flag_GCJ \ - no_undefined_flag_GCJ \ - export_symbols_cmds_GCJ \ - hardcode_libdir_flag_spec_GCJ \ - hardcode_libdir_flag_spec_ld_GCJ \ - hardcode_libdir_separator_GCJ \ - hardcode_automatic_GCJ \ - module_cmds_GCJ \ - module_expsym_cmds_GCJ \ - lt_cv_prog_compiler_c_o_GCJ \ - fix_srcfile_path_GCJ \ - exclude_expsyms_GCJ \ - include_expsyms_GCJ; do - - case $var in - old_archive_cmds_GCJ | \ - old_archive_from_new_cmds_GCJ | \ - archive_cmds_GCJ | \ - archive_expsym_cmds_GCJ | \ - module_cmds_GCJ | \ - module_expsym_cmds_GCJ | \ - old_archive_from_expsyms_cmds_GCJ | \ - export_symbols_cmds_GCJ | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" - ;; - *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" - ;; - esac - done - case $lt_echo in - *'\$0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` - ;; - esac +fi -cfgfile="$ofile" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_int32_t" >&5 +$as_echo "$ac_cv_type_int32_t" >&6; } +if test $ac_cv_type_int32_t = yes; then - cat <<__EOF__ >> "$cfgfile" -# ### BEGIN LIBTOOL TAG CONFIG: $tagname +cat >>confdefs.h <<_ACEOF +#define HAVE_INT32_T 1 +_ACEOF -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL +fi +{ $as_echo "$as_me:$LINENO: checking for int64_t" >&5 +$as_echo_n "checking for int64_t... " >&6; } +if test "${ac_cv_type_int64_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_int64_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (int64_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((int64_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared + ac_cv_type_int64_t=yes +fi -# Whether or not to build static libraries. -build_old_libs=$enable_static +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc_GCJ -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_GCJ +fi -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_int64_t" >&5 +$as_echo "$ac_cv_type_int64_t" >&6; } +if test $ac_cv_type_int64_t = yes; then -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os +cat >>confdefs.h <<_ACEOF +#define HAVE_INT64_T 1 +_ACEOF -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os -# An echo program that does not interpret backslashes. -echo=$lt_echo +fi +{ $as_echo "$as_me:$LINENO: checking for intptr_t" >&5 +$as_echo_n "checking for intptr_t... " >&6; } +if test "${ac_cv_type_intptr_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_intptr_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (intptr_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((intptr_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS + ac_cv_type_intptr_t=yes +fi -# A C compiler. -LTCC=$lt_LTCC +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS -# A language-specific compiler. -CC=$lt_compiler_GCJ +fi -# Is the compiler the GNU C compiler? -with_gcc=$GCC_GCJ +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_intptr_t" >&5 +$as_echo "$ac_cv_type_intptr_t" >&6; } +if test $ac_cv_type_intptr_t = yes; then -# An ERE matcher. -EGREP=$lt_EGREP +cat >>confdefs.h <<_ACEOF +#define HAVE_INTPTR_T 1 +_ACEOF -# The linker used to build libraries. -LD=$lt_LD_GCJ -# Whether we need hard or soft links. -LN_S=$lt_LN_S +fi +{ $as_echo "$as_me:$LINENO: checking for uint8_t" >&5 +$as_echo_n "checking for uint8_t... " >&6; } +if test "${ac_cv_type_uint8_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_uint8_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (uint8_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((uint8_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# A BSD-compatible nm program. -NM=$lt_NM + ac_cv_type_uint8_t=yes +fi -# A symbol stripping program -STRIP=$lt_STRIP +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" +fi -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uint8_t" >&5 +$as_echo "$ac_cv_type_uint8_t" >&6; } +if test $ac_cv_type_uint8_t = yes; then -# Used on cygwin: assembler. -AS="$AS" +cat >>confdefs.h <<_ACEOF +#define HAVE_UINT8_T 1 +_ACEOF -# The name of the directory that contains temporary libtool files. -objdir=$objdir -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds +fi +{ $as_echo "$as_me:$LINENO: checking for uint16_t" >&5 +$as_echo_n "checking for uint16_t... " >&6; } +if test "${ac_cv_type_uint16_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_uint16_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (uint16_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((uint16_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl_GCJ + ac_cv_type_uint16_t=yes +fi -# Object file suffix (normally "o"). -objext="$ac_objext" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Old archive suffix (normally "a"). -libext="$libext" -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' +fi -# Executable file suffix (normally ""). -exeext="$exeext" +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uint16_t" >&5 +$as_echo "$ac_cv_type_uint16_t" >&6; } +if test $ac_cv_type_uint16_t = yes; then -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic_GCJ -pic_mode=$pic_mode +cat >>confdefs.h <<_ACEOF +#define HAVE_UINT16_T 1 +_ACEOF -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o_GCJ +fi +{ $as_echo "$as_me:$LINENO: checking for uint32_t" >&5 +$as_echo_n "checking for uint32_t... " >&6; } +if test "${ac_cv_type_uint32_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_uint32_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (uint32_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((uint32_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Must we lock files when doing compilation? -need_locks=$lt_need_locks + ac_cv_type_uint32_t=yes +fi -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Do we need a version for libraries? -need_version=$need_version -# Whether dlopen is supported. -dlopen_support=$enable_dlopen +fi -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uint32_t" >&5 +$as_echo "$ac_cv_type_uint32_t" >&6; } +if test $ac_cv_type_uint32_t = yes; then -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static +cat >>confdefs.h <<_ACEOF +#define HAVE_UINT32_T 1 +_ACEOF -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static_GCJ -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_GCJ +fi +{ $as_echo "$as_me:$LINENO: checking for uint64_t" >&5 +$as_echo_n "checking for uint64_t... " >&6; } +if test "${ac_cv_type_uint64_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_uint64_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (uint64_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((uint64_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_GCJ + ac_cv_type_uint64_t=yes +fi -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec_GCJ +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_thread_safe_flag_spec_GCJ -# Library versioning type. -version_type=$version_type +fi -# Format of library name prefix. -libname_spec=$lt_libname_spec +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uint64_t" >&5 +$as_echo "$ac_cv_type_uint64_t" >&6; } +if test $ac_cv_type_uint64_t = yes; then -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec +cat >>confdefs.h <<_ACEOF +#define HAVE_UINT64_T 1 +_ACEOF -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_old_archive_cmds_GCJ -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds +fi +{ $as_echo "$as_me:$LINENO: checking for uintptr_t" >&5 +$as_echo_n "checking for uintptr_t... " >&6; } +if test "${ac_cv_type_uintptr_t+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_type_uintptr_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof (uintptr_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((uintptr_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_GCJ + ac_cv_type_uintptr_t=yes +fi -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_GCJ +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Commands used to build and install a shared archive. -archive_cmds=$lt_archive_cmds_GCJ -archive_expsym_cmds=$lt_archive_expsym_cmds_GCJ -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_module_cmds_GCJ -module_expsym_cmds=$lt_module_expsym_cmds_GCJ +fi -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uintptr_t" >&5 +$as_echo "$ac_cv_type_uintptr_t" >&6; } +if test $ac_cv_type_uintptr_t = yes; then -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_predep_objects_GCJ - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_postdep_objects_GCJ - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_predeps_GCJ - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_postdeps_GCJ - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_compiler_lib_search_path_GCJ +cat >>confdefs.h <<_ACEOF +#define HAVE_UINTPTR_T 1 +_ACEOF -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd +fi -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag_GCJ -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag_GCJ +######### +# Check for needed/wanted headers -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl +for ac_header in sys/types.h stdlib.h stdint.h inttypes.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 +$as_echo_n "checking $ac_header usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + ac_header_compiler=no +fi -# This is the shared library runtime path variable. -runpath_var=$runpath_var +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } -# This is the shared library path variable. -shlibpath_var=$shlibpath_var +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 +$as_echo_n "checking $ac_header presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath + ac_header_preproc=no +fi -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action_GCJ +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_GCJ + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_GCJ - -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator_GCJ - -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$hardcode_direct_GCJ - -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$hardcode_minus_L_GCJ - -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var_GCJ - -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$hardcode_automatic_GCJ +fi +if test `eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" +fi -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs_GCJ +done -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec +######### +# Figure out whether or not we have these functions +# -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path -# Set to yes if exported symbols are required. -always_export_symbols=$always_export_symbols_GCJ -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds_GCJ -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms_GCJ +for ac_func in usleep fdatasync localtime_r gmtime_r localtime_s +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms_GCJ +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -# ### END LIBTOOL TAG CONFIG: $tagname +#ifdef __STDC__ +# include +#else +# include +#endif -__EOF__ +#undef $ac_func +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi -fi + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + eval "$as_ac_var=no" +fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if test `eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF -CC="$lt_save_CC" +fi +done - else - tagname="" - fi - ;; - RC) +######### +# By default, we use the amalgamation (this may be changed below...) +# +USE_AMALGAMATION=1 +######### +# See whether we can run specific tclsh versions known to work well; +# if not, then we fall back to plain tclsh. +# TODO: try other versions before falling back? +# +for ac_prog in tclsh8.4 tclsh +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_TCLSH_CMD+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$TCLSH_CMD"; then + ac_cv_prog_TCLSH_CMD="$TCLSH_CMD" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_TCLSH_CMD="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -# Source file extension for RC test sources. -ac_ext=rc +fi +fi +TCLSH_CMD=$ac_cv_prog_TCLSH_CMD +if test -n "$TCLSH_CMD"; then + { $as_echo "$as_me:$LINENO: result: $TCLSH_CMD" >&5 +$as_echo "$TCLSH_CMD" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -# Object file extension for compiled RC test sources. -objext=o -objext_RC=$objext -# Code to be used in simple compile tests -lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + test -n "$TCLSH_CMD" && break +done +test -n "$TCLSH_CMD" || TCLSH_CMD="none" -# Code to be used in simple link tests -lt_simple_link_test_code="$lt_simple_compile_test_code" +if test "$TCLSH_CMD" = "none"; then + # If we can't find a local tclsh, then building the amalgamation will fail. + # We act as though --disable-amalgamation has been used. + echo "Warning: can't find tclsh - defaulting to non-amalgamation build." + USE_AMALGAMATION=0 + TCLSH_CMD="tclsh" +fi -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +if test "x${TCLLIBDIR+set}" != "xset" ; then + TCLLIBDIR='$(libdir)' + for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` ; do + TCLLIBDIR=$i + break + done + TCLLIBDIR="${TCLLIBDIR}/sqlite3" +fi -# Allow CC to be a program name with arguments. -compiler=$CC +######### +# Set up an appropriate program prefix +# +if test "$program_prefix" = "NONE"; then + program_prefix="" +fi -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$rm conftest* -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` +{ $as_echo "$as_me:$LINENO: Version set to $VERSION" >&5 +$as_echo "$as_me: Version set to $VERSION" >&6;} +RELEASE=`cat $srcdir/VERSION` +{ $as_echo "$as_me:$LINENO: Release set to $RELEASE" >&5 +$as_echo "$as_me: Release set to $RELEASE" >&6;} -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -CC=${RC-"windres"} -compiler=$CC -compiler_RC=$CC -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` +VERSION_NUMBER=`cat $srcdir/VERSION \ + | sed 's/[^0-9]/ /g' \ + | awk '{printf "%d%03d%03d",$1,$2,$3}'` +{ $as_echo "$as_me:$LINENO: Version number set to $VERSION_NUMBER" >&5 +$as_echo "$as_me: Version number set to $VERSION_NUMBER" >&6;} -lt_cv_prog_compiler_c_o_RC=yes -# The else clause should only fire when bootstrapping the -# libtool distribution, otherwise you forgot to ship ltmain.sh -# with your package, and you will get complaints that there are -# no rules to generate ltmain.sh. -if test -f "$ltmain"; then - # See if we are running on zsh, and set the options which allow our commands through - # without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - # Now quote all the things that may contain metacharacters while being - # careful not to overquote the AC_SUBSTed values. We take copies of the - # variables and quote the copies for generation of the libtool script. - for var in echo old_CC old_CFLAGS AR AR_FLAGS EGREP RANLIB LN_S LTCC LTCFLAGS NM \ - SED SHELL STRIP \ - libname_spec library_names_spec soname_spec extract_expsyms_cmds \ - old_striplib striplib file_magic_cmd finish_cmds finish_eval \ - deplibs_check_method reload_flag reload_cmds need_locks \ - lt_cv_sys_global_symbol_pipe lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - sys_lib_search_path_spec sys_lib_dlsearch_path_spec \ - old_postinstall_cmds old_postuninstall_cmds \ - compiler_RC \ - CC_RC \ - LD_RC \ - lt_prog_compiler_wl_RC \ - lt_prog_compiler_pic_RC \ - lt_prog_compiler_static_RC \ - lt_prog_compiler_no_builtin_flag_RC \ - export_dynamic_flag_spec_RC \ - thread_safe_flag_spec_RC \ - whole_archive_flag_spec_RC \ - enable_shared_with_static_runtimes_RC \ - old_archive_cmds_RC \ - old_archive_from_new_cmds_RC \ - predep_objects_RC \ - postdep_objects_RC \ - predeps_RC \ - postdeps_RC \ - compiler_lib_search_path_RC \ - archive_cmds_RC \ - archive_expsym_cmds_RC \ - postinstall_cmds_RC \ - postuninstall_cmds_RC \ - old_archive_from_expsyms_cmds_RC \ - allow_undefined_flag_RC \ - no_undefined_flag_RC \ - export_symbols_cmds_RC \ - hardcode_libdir_flag_spec_RC \ - hardcode_libdir_flag_spec_ld_RC \ - hardcode_libdir_separator_RC \ - hardcode_automatic_RC \ - module_cmds_RC \ - module_expsym_cmds_RC \ - lt_cv_prog_compiler_c_o_RC \ - fix_srcfile_path_RC \ - exclude_expsyms_RC \ - include_expsyms_RC; do - - case $var in - old_archive_cmds_RC | \ - old_archive_from_new_cmds_RC | \ - archive_cmds_RC | \ - archive_expsym_cmds_RC | \ - module_cmds_RC | \ - module_expsym_cmds_RC | \ - old_archive_from_expsyms_cmds_RC | \ - export_symbols_cmds_RC | \ - extract_expsyms_cmds | reload_cmds | finish_cmds | \ - postinstall_cmds | postuninstall_cmds | \ - old_postinstall_cmds | old_postuninstall_cmds | \ - sys_lib_search_path_spec | sys_lib_dlsearch_path_spec) - # Double-quote double-evaled strings. - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\"" - ;; - *) - eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\"" - ;; - esac - done +######### +# Check to see if the --with-hints=FILE option is used. If there is none, +# then check for a files named "$host.hints" and ../$hosts.hints where +# $host is the hostname of the build system. If still no hints are +# found, try looking in $system.hints and ../$system.hints where +# $system is the result of uname -s. +# - case $lt_echo in - *'\$0 --fallback-echo"') - lt_echo=`$echo "X$lt_echo" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'` - ;; - esac +# Check whether --with-hints was given. +if test "${with_hints+set}" = set; then + withval=$with_hints; hints=$withval +fi -cfgfile="$ofile" +if test "$hints" = ""; then + host=`hostname | sed 's/\..*//'` + if test -r $host.hints; then + hints=$host.hints + else + if test -r ../$host.hints; then + hints=../$host.hints + fi + fi +fi +if test "$hints" = ""; then + sys=`uname -s` + if test -r $sys.hints; then + hints=$sys.hints + else + if test -r ../$sys.hints; then + hints=../$sys.hints + fi + fi +fi +if test "$hints" != ""; then + { $as_echo "$as_me:$LINENO: result: reading hints from $hints" >&5 +$as_echo "reading hints from $hints" >&6; } + . $hints +fi - cat <<__EOF__ >> "$cfgfile" -# ### BEGIN LIBTOOL TAG CONFIG: $tagname +######### +# Locate a compiler for the build machine. This compiler should +# generate command-line programs that run on the build machine. +# +if test x"$cross_compiling" = xno; then + BUILD_CC=$CC + BUILD_CFLAGS=$CFLAGS +else + if test "${BUILD_CC+set}" != set; then + for ac_prog in gcc cc cl +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_BUILD_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$BUILD_CC"; then + ac_cv_prog_BUILD_CC="$BUILD_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_BUILD_CC="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +fi +fi +BUILD_CC=$ac_cv_prog_BUILD_CC +if test -n "$BUILD_CC"; then + { $as_echo "$as_me:$LINENO: result: $BUILD_CC" >&5 +$as_echo "$BUILD_CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared + test -n "$BUILD_CC" && break +done -# Whether or not to build static libraries. -build_old_libs=$enable_static + fi + if test "${BUILD_CFLAGS+set}" != set; then + BUILD_CFLAGS="-g" + fi +fi -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc_RC -# Whether or not to disallow shared libs when runtime libs are static -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_RC +########## +# Do we want to support multithreaded use of sqlite +# +# Check whether --enable-threadsafe was given. +if test "${enable_threadsafe+set}" = set; then + enableval=$enable_threadsafe; +else + enable_threadsafe=yes +fi -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install +{ $as_echo "$as_me:$LINENO: checking whether to support threadsafe operation" >&5 +$as_echo_n "checking whether to support threadsafe operation... " >&6; } +if test "$enable_threadsafe" = "no"; then + SQLITE_THREADSAFE=0 + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +else + SQLITE_THREADSAFE=1 + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +fi -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os +if test "$SQLITE_THREADSAFE" = "1"; then + { $as_echo "$as_me:$LINENO: checking for library containing pthread_create" >&5 +$as_echo_n "checking for library containing pthread_create... " >&6; } +if test "${ac_cv_search_pthread_create+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -# An echo program that does not interpret backslashes. -echo=$lt_echo +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +for ac_lib in '' pthread; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_search_pthread_create=$ac_res +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS -# A C compiler. -LTCC=$lt_LTCC +fi -# LTCC compiler flags. -LTCFLAGS=$lt_LTCFLAGS +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_pthread_create+set}" = set; then + break +fi +done +if test "${ac_cv_search_pthread_create+set}" = set; then + : +else + ac_cv_search_pthread_create=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_search_pthread_create" >&5 +$as_echo "$ac_cv_search_pthread_create" >&6; } +ac_res=$ac_cv_search_pthread_create +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -# A language-specific compiler. -CC=$lt_compiler_RC +fi -# Is the compiler the GNU C compiler? -with_gcc=$GCC_RC +fi -# An ERE matcher. -EGREP=$lt_EGREP +########## +# Do we want to allow a connection created in one thread to be used +# in another thread. This does not work on many Linux systems (ex: RedHat 9) +# due to bugs in the threading implementations. This is thus off by default. +# +# Check whether --enable-cross-thread-connections was given. +if test "${enable_cross_thread_connections+set}" = set; then + enableval=$enable_cross_thread_connections; +else + enable_xthreadconnect=no +fi -# The linker used to build libraries. -LD=$lt_LD_RC +{ $as_echo "$as_me:$LINENO: checking whether to allow connections to be shared across threads" >&5 +$as_echo_n "checking whether to allow connections to be shared across threads... " >&6; } +if test "$enable_xthreadconnect" = "no"; then + XTHREADCONNECT='' + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +else + XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +fi -# Whether we need hard or soft links. -LN_S=$lt_LN_S -# A BSD-compatible nm program. -NM=$lt_NM +########## +# Do we want to set threadsOverrideEachOthersLocks variable to be 1 (true) by +# default. Normally, a test at runtime is performed to determine the +# appropriate value of this variable. Use this option only if you're sure that +# threads can safely override each others locks in all runtime situations. +# +# Check whether --enable-threads-override-locks was given. +if test "${enable_threads_override_locks+set}" = set; then + enableval=$enable_threads_override_locks; +else + enable_threads_override_locks=no +fi -# A symbol stripping program -STRIP=$lt_STRIP +{ $as_echo "$as_me:$LINENO: checking whether threads can override each others locks" >&5 +$as_echo_n "checking whether threads can override each others locks... " >&6; } +if test "$enable_threads_override_locks" = "no"; then + THREADSOVERRIDELOCKS='-1' + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +else + THREADSOVERRIDELOCKS='1' + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +fi -# Used to examine libraries when file_magic_cmd begins "file" -MAGIC_CMD=$MAGIC_CMD -# Used on cygwin: DLL creation program. -DLLTOOL="$DLLTOOL" +########## +# Do we want to support release +# +# Check whether --enable-releasemode was given. +if test "${enable_releasemode+set}" = set; then + enableval=$enable_releasemode; +else + enable_releasemode=no +fi -# Used on cygwin: object dumper. -OBJDUMP="$OBJDUMP" +{ $as_echo "$as_me:$LINENO: checking whether to support shared library linked as release mode or not" >&5 +$as_echo_n "checking whether to support shared library linked as release mode or not... " >&6; } +if test "$enable_releasemode" = "no"; then + ALLOWRELEASE="" + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +else + ALLOWRELEASE="-release `cat $srcdir/VERSION`" + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +fi -# Used on cygwin: assembler. -AS="$AS" -# The name of the directory that contains temporary libtool files. -objdir=$objdir +########## +# Do we want temporary databases in memory +# +# Check whether --enable-tempstore was given. +if test "${enable_tempstore+set}" = set; then + enableval=$enable_tempstore; +else + enable_tempstore=no +fi -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds +{ $as_echo "$as_me:$LINENO: checking whether to use an in-ram database for temporary tables" >&5 +$as_echo_n "checking whether to use an in-ram database for temporary tables... " >&6; } +case "$enable_tempstore" in + never ) + TEMP_STORE=0 + { $as_echo "$as_me:$LINENO: result: never" >&5 +$as_echo "never" >&6; } + ;; + no ) + TEMP_STORE=1 + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } + ;; + yes ) + TEMP_STORE=2 + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } + ;; + always ) + TEMP_STORE=3 + { $as_echo "$as_me:$LINENO: result: always" >&5 +$as_echo "always" >&6; } + ;; + * ) + TEMP_STORE=1 + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } + ;; +esac -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl_RC -# Object file suffix (normally "o"). -objext="$ac_objext" -# Old archive suffix (normally "a"). -libext="$libext" +########### +# Lots of things are different if we are compiling for Windows using +# the CYGWIN environment. So check for that special case and handle +# things accordingly. +# +{ $as_echo "$as_me:$LINENO: checking if executables have the .exe suffix" >&5 +$as_echo_n "checking if executables have the .exe suffix... " >&6; } +if test "$config_BUILD_EXEEXT" = ".exe"; then + CYGWIN=yes + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:$LINENO: result: unknown" >&5 +$as_echo "unknown" >&6; } +fi +if test "$CYGWIN" != "yes"; then + { $as_echo "$as_me:$LINENO: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if test "${ac_cv_host+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { (exit 1); exit 1; }; } +fi -# Shared library suffix (normally ".so"). -shrext_cmds='$shrext_cmds' +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +$as_echo "$as_me: error: invalid value of canonical host" >&2;} + { (exit 1); exit 1; }; };; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac -# Executable file suffix (normally ""). -exeext="$exeext" -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic_RC -pic_mode=$pic_mode +case $host_os in + *cygwin* ) CYGWIN=yes;; + * ) CYGWIN=no;; +esac -# What is the maximum length of a command? -max_cmd_len=$lt_cv_sys_max_cmd_len +fi +if test "$CYGWIN" = "yes"; then + BUILD_EXEEXT=.exe +else + BUILD_EXEEXT=$EXEEXT +fi +if test x"$cross_compiling" = xno; then + TARGET_EXEEXT=$BUILD_EXEEXT +else + TARGET_EXEEXT=$config_TARGET_EXEEXT +fi +if test "$TARGET_EXEEXT" = ".exe"; then + if test $OS2_SHELL ; then + SQLITE_OS_UNIX=0 + SQLITE_OS_WIN=0 + SQLITE_OS_OS2=1 + CFLAGS="$CFLAGS -DSQLITE_OS_OS2=1" + else + SQLITE_OS_UNIX=0 + SQLITE_OS_WIN=1 + SQLITE_OS_OS2=0 + CFLAGS="$CFLAGS -DSQLITE_OS_WIN=1" + fi +else + SQLITE_OS_UNIX=1 + SQLITE_OS_WIN=0 + SQLITE_OS_OS2=0 + CFLAGS="$CFLAGS -DSQLITE_OS_UNIX=1" +fi -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o_RC -# Must we lock files when doing compilation? -need_locks=$lt_need_locks -# Do we need the lib prefix for modules? -need_lib_prefix=$need_lib_prefix -# Do we need a version for libraries? -need_version=$need_version -# Whether dlopen is supported. -dlopen_support=$enable_dlopen -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static +########## +# Figure out all the parameters needed to compile against Tcl. +# +# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG +# macros in the in the tcl.m4 file of the standard TCL distribution. +# Those macros could not be used directly since we have to make some +# minor changes to accomodate systems that do not have TCL installed. +# +# Check whether --enable-tcl was given. +if test "${enable_tcl+set}" = set; then + enableval=$enable_tcl; use_tcl=$enableval +else + use_tcl=yes +fi -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static_RC +if test "${use_tcl}" = "yes" ; then -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_RC +# Check whether --with-tcl was given. +if test "${with_tcl+set}" = set; then + withval=$with_tcl; with_tclconfig=${withval} +fi -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_RC + { $as_echo "$as_me:$LINENO: checking for Tcl configuration" >&5 +$as_echo_n "checking for Tcl configuration... " >&6; } + if test "${ac_cv_c_tclconfig+set}" = set; then + $as_echo_n "(cached) " >&6 +else -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec_RC + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` + else + { { $as_echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5 +$as_echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;} + { (exit 1); exit 1; }; } + fi + fi -# Compiler flag to generate thread-safe objects. -thread_safe_flag_spec=$lt_thread_safe_flag_spec_RC + # Start autosearch by asking tclsh + if test x"$cross_compiling" = xno; then + for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` + do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="$i" + break + fi + done + fi -# Library versioning type. -version_type=$version_type + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi -# Format of library name prefix. -libname_spec=$lt_libname_spec + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + `ls -d ${libdir} 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` + do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i; pwd)` + break + fi + done + fi -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME. -library_names_spec=$lt_library_names_spec + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9]* 2>/dev/null` + do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec +fi -# Commands used to build and install an old-style archive. -RANLIB=$lt_RANLIB -old_archive_cmds=$lt_old_archive_cmds_RC -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_RC + if test x"${ac_cv_c_tclconfig}" = x ; then + use_tcl=no + { $as_echo "$as_me:$LINENO: WARNING: Can't find Tcl configuration definitions" >&5 +$as_echo "$as_me: WARNING: Can't find Tcl configuration definitions" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&5 +$as_echo "$as_me: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&5 +$as_echo "$as_me: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&2;} + else + TCL_BIN_DIR=${ac_cv_c_tclconfig} + { $as_echo "$as_me:$LINENO: result: found $TCL_BIN_DIR/tclConfig.sh" >&5 +$as_echo "found $TCL_BIN_DIR/tclConfig.sh" >&6; } -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_RC + { $as_echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 +$as_echo_n "checking for existence of $TCL_BIN_DIR/tclConfig.sh... " >&6; } + if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then + { $as_echo "$as_me:$LINENO: result: loading" >&5 +$as_echo "loading" >&6; } + . $TCL_BIN_DIR/tclConfig.sh + else + { $as_echo "$as_me:$LINENO: result: file not found" >&5 +$as_echo "file not found" >&6; } + fi -# Commands used to build and install a shared archive. -archive_cmds=$lt_archive_cmds_RC -archive_expsym_cmds=$lt_archive_expsym_cmds_RC -postinstall_cmds=$lt_postinstall_cmds -postuninstall_cmds=$lt_postuninstall_cmds + # + # If the TCL_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TCL_LIB_SPEC will be set to the value + # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC + # instead of TCL_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + # -# Commands used to build a loadable module (assumed same as above if empty) -module_cmds=$lt_module_cmds_RC -module_expsym_cmds=$lt_module_expsym_cmds_RC + if test -f $TCL_BIN_DIR/Makefile ; then + TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} + TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} + TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} + fi -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib + # + # eval is required to do the TCL_DBGX substitution + # -# Dependencies to place before the objects being linked to create a -# shared library. -predep_objects=$lt_predep_objects_RC - -# Dependencies to place after the objects being linked to create a -# shared library. -postdep_objects=$lt_postdep_objects_RC - -# Dependencies to place before the objects being linked to create a -# shared library. -predeps=$lt_predeps_RC - -# Dependencies to place after the objects being linked to create a -# shared library. -postdeps=$lt_postdeps_RC - -# The library search path used internally by the compiler when linking -# a shared library. -compiler_lib_search_path=$lt_compiler_lib_search_path_RC + eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" + eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method + eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" + eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" + eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" -# Command to use when deplibs_check_method == file_magic. -file_magic_cmd=$lt_file_magic_cmd -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag_RC -# Flag that forces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag_RC -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds -# Same as above, but a single script fragment to be evaled but not shown. -finish_eval=$lt_finish_eval -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe -# Transform the output of nm in a proper C declaration -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl -# Transform the output of nm in a C name address pair -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address -# This is the shared library runtime path variable. -runpath_var=$runpath_var -# This is the shared library path variable. -shlibpath_var=$shlibpath_var -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action_RC -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_RC + fi +fi +if test "${use_tcl}" = "no" ; then + HAVE_TCL="" +else + HAVE_TCL=1 +fi -# If ld is used when linking, flag to hardcode \$libdir into -# a binary during linking. This must work even if \$libdir does -# not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_RC - -# Whether we need a single -rpath flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator_RC - -# Set to yes if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the -# resulting binary. -hardcode_direct=$hardcode_direct_RC - -# Set to yes if using the -LDIR flag during linking hardcodes DIR into the -# resulting binary. -hardcode_minus_L=$hardcode_minus_L_RC - -# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into -# the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var_RC - -# Set to yes if building a shared library automatically hardcodes DIR into the library -# and all subsequent libraries and executables linked against it. -hardcode_automatic=$hardcode_automatic_RC -# Variables whose values should be saved in libtool wrapper scripts and -# restored at relink time. -variables_saved_for_relink="$variables_saved_for_relink" +########## +# Figure out what C libraries are required to compile programs +# that use "readline()" library. +# +TARGET_READLINE_LIBS="" +TARGET_READLINE_INC="" +TARGET_HAVE_READLINE=0 +# Check whether --enable-readline was given. +if test "${enable_readline+set}" = set; then + enableval=$enable_readline; with_readline=$enableval +else + with_readline=auto +fi -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs_RC -# Compile-time system search path for libraries -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec +if test x"$with_readline" != xno; then + found="yes" -# Run-time system search path for libraries -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path +# Check whether --with-readline-lib was given. +if test "${with_readline_lib+set}" = set; then + withval=$with_readline_lib; with_readline_lib=$withval +else + with_readline_lib="auto" +fi -# Set to yes if exported symbols are required. -always_export_symbols=$always_export_symbols_RC + if test "x$with_readline_lib" = xauto; then + save_LIBS="$LIBS" + LIBS="" + { $as_echo "$as_me:$LINENO: checking for library containing tgetent" >&5 +$as_echo_n "checking for library containing tgetent... " >&6; } +if test "${ac_cv_search_tgetent+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds_RC +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tgetent (); +int +main () +{ +return tgetent (); + ; + return 0; +} +_ACEOF +for ac_lib in '' readline ncurses curses termcap; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_search_tgetent=$ac_res +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms_RC +fi -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms_RC +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_tgetent+set}" = set; then + break +fi +done +if test "${ac_cv_search_tgetent+set}" = set; then + : +else + ac_cv_search_tgetent=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_search_tgetent" >&5 +$as_echo "$ac_cv_search_tgetent" >&6; } +ac_res=$ac_cv_search_tgetent +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + term_LIBS="$LIBS" +else + term_LIBS="" +fi -# ### END LIBTOOL TAG CONFIG: $tagname + { $as_echo "$as_me:$LINENO: checking for readline in -lreadline" >&5 +$as_echo_n "checking for readline in -lreadline... " >&6; } +if test "${ac_cv_lib_readline_readline+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lreadline $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ -__EOF__ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char readline (); +int +main () +{ +return readline (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_readline_readline=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_cv_lib_readline_readline=no +fi +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_readline_readline" >&5 +$as_echo "$ac_cv_lib_readline_readline" >&6; } +if test $ac_cv_lib_readline_readline = yes; then + TARGET_READLINE_LIBS="-lreadline" else - # If there is no Makefile yet, we rely on a make rule to execute - # `config.status --recheck' to rerun these tests and create the - # libtool script then. - ltmain_in=`echo $ltmain | sed -e 's/\.sh$/.in/'` - if test -f "$ltmain_in"; then - test -f Makefile && make "$ltmain" - fi + found="no" fi + TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" + LIBS="$save_LIBS" + else + TARGET_READLINE_LIBS="$with_readline_lib" + fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -CC="$lt_save_CC" - - ;; - - *) - { { echo "$as_me:$LINENO: error: Unsupported tag name: $tagname" >&5 -echo "$as_me: error: Unsupported tag name: $tagname" >&2;} - { (exit 1); exit 1; }; } - ;; - esac - - # Append the new tag name to the list of available tags. - if test -n "$tagname" ; then - available_tags="$available_tags $tagname" - fi - fi - done - IFS="$lt_save_ifs" - # Now substitute the updated list of available tags. - if eval "sed -e 's/^available_tags=.*\$/available_tags=\"$available_tags\"/' \"$ofile\" > \"${ofile}T\""; then - mv "${ofile}T" "$ofile" - chmod +x "$ofile" - else - rm -f "${ofile}T" - { { echo "$as_me:$LINENO: error: unable to update list of available tagged configurations." >&5 -echo "$as_me: error: unable to update list of available tagged configurations." >&2;} - { (exit 1); exit 1; }; } - fi +# Check whether --with-readline-inc was given. +if test "${with_readline_inc+set}" = set; then + withval=$with_readline_inc; with_readline_inc=$withval +else + with_readline_inc="auto" fi + if test "x$with_readline_inc" = xauto; then + if test "${ac_cv_header_readline_h+set}" = set; then + { $as_echo "$as_me:$LINENO: checking for readline.h" >&5 +$as_echo_n "checking for readline.h... " >&6; } +if test "${ac_cv_header_readline_h+set}" = set; then + $as_echo_n "(cached) " >&6 +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 +$as_echo "$ac_cv_header_readline_h" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking readline.h usability" >&5 +$as_echo_n "checking readline.h usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_header_compiler=no +fi -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh" - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' - -# Prevent multiple expansion - - - +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking readline.h presence" >&5 +$as_echo_n "checking readline.h presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + ac_header_preproc=no +fi +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: readline.h: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: readline.h: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: readline.h: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: readline.h: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: readline.h: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: readline.h: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: readline.h: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: readline.h: in the future, the compiler will take precedence" >&2;} + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for readline.h" >&5 +$as_echo_n "checking for readline.h... " >&6; } +if test "${ac_cv_header_readline_h+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_header_readline_h=$ac_header_preproc +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 +$as_echo "$ac_cv_header_readline_h" >&6; } +fi +if test $ac_cv_header_readline_h = yes; then + found="yes" +else + found="no" + if test "$cross_compiling" != yes; then + for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do + for subdir in include include/readline; do + as_ac_File=`$as_echo "ac_cv_file_$dir/$subdir/readline.h" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $dir/$subdir/readline.h" >&5 +$as_echo_n "checking for $dir/$subdir/readline.h... " >&6; } +if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + test "$cross_compiling" = yes && + { { $as_echo "$as_me:$LINENO: error: cannot check for file existence when cross compiling" >&5 +$as_echo "$as_me: error: cannot check for file existence when cross compiling" >&2;} + { (exit 1); exit 1; }; } +if test -r "$dir/$subdir/readline.h"; then + eval "$as_ac_File=yes" +else + eval "$as_ac_File=no" +fi +fi +ac_res=`eval 'as_val=${'$as_ac_File'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if test `eval 'as_val=${'$as_ac_File'} + $as_echo "$as_val"'` = yes; then + found=yes +fi + if test "$found" = "yes"; then + TARGET_READLINE_INC="-I$dir/$subdir" + break + fi + done + test "$found" = "yes" && break + done + fi +fi + else + TARGET_READLINE_INC="$with_readline_inc" + fi + if test x"$found" = xno; then + TARGET_READLINE_LIBS="" + TARGET_READLINE_INC="" + TARGET_HAVE_READLINE=0 + else + TARGET_HAVE_READLINE=1 + fi +fi -# Find a good install program. We prefer a C program (faster), -# so one script is as good as another. But avoid the broken or -# incompatible versions: -# SysV /etc/install, /usr/sbin/install -# SunOS /usr/etc/install -# IRIX /sbin/install -# AIX /bin/install -# AmigaOS /C/install, which installs bootblocks on floppy discs -# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag -# AFS /usr/afsws/bin/install, which mishandles nonexistent args -# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" -# OS/2's system install, which has a completely different semantic -# ./install, which can be erroneously created by make from ./install.sh. -{ echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 -echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; } -if test -z "$INSTALL"; then -if test "${ac_cv_path_install+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +########## +# Figure out what C libraries are required to compile programs +# that use "fdatasync()" function. +# +{ $as_echo "$as_me:$LINENO: checking for library containing fdatasync" >&5 +$as_echo_n "checking for library containing fdatasync... " >&6; } +if test "${ac_cv_search_fdatasync+set}" = set; then + $as_echo_n "(cached) " >&6 else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in - ./ | .// | /cC/* | \ - /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ - /usr/ucb/* ) ;; - *) - # OSF1 and SCO ODT 3.0 have their own names for install. - # Don't use installbsd from OSF since it installs stuff as root - # by default. - for ac_prog in ginstall scoinst install; do - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi - done - done - ;; + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char fdatasync (); +int +main () +{ +return fdatasync (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; esac -done -IFS=$as_save_IFS +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_search_fdatasync=$ac_res +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 fi - if test "${ac_cv_path_install+set}" = set; then - INSTALL=$ac_cv_path_install - else - # As a last resort, use the slow shell script. Don't cache a - # value for INSTALL within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - INSTALL=$ac_install_sh - fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_fdatasync+set}" = set; then + break +fi +done +if test "${ac_cv_search_fdatasync+set}" = set; then + : +else + ac_cv_search_fdatasync=no fi -{ echo "$as_me:$LINENO: result: $INSTALL" >&5 -echo "${ECHO_T}$INSTALL" >&6; } +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_search_fdatasync" >&5 +$as_echo "$ac_cv_search_fdatasync" >&6; } +ac_res=$ac_cv_search_fdatasync +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -# Use test -z because SunOS4 sh mishandles braces in ${var-val}. -# It thinks the first close brace ends the variable substitution. -test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' +fi -test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' -test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' +######### +# check for debug enabled +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then + enableval=$enable_debug; use_debug=$enableval +else + use_debug=no +fi -for ac_prog in gawk mawk nawk awk -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_AWK+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +if test "${use_debug}" = "yes" ; then + TARGET_DEBUG="-DSQLITE_DEBUG=1" else - if test -n "$AWK"; then - ac_cv_prog_AWK="$AWK" # Let the user override the test. + TARGET_DEBUG="-DNDEBUG" +fi + + +######### +# See whether we should use the amalgamation to build +# Check whether --enable-amalgamation was given. +if test "${enable_amalgamation+set}" = set; then + enableval=$enable_amalgamation; use_amalgamation=$enableval else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_AWK="$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done -IFS=$as_save_IFS + use_amalgamation=yes +fi +if test "${use_amalgamation}" != "yes" ; then + USE_AMALGAMATION=0 fi + + +######### +# See whether we should allow loadable extensions +# Check whether --enable-load-extension was given. +if test "${enable_load_extension+set}" = set; then + enableval=$enable_load_extension; use_loadextension=$enableval +else + use_loadextension=no fi -AWK=$ac_cv_prog_AWK -if test -n "$AWK"; then - { echo "$as_me:$LINENO: result: $AWK" >&5 -echo "${ECHO_T}$AWK" >&6; } + +if test "${use_loadextension}" = "yes" ; then + OPT_FEATURE_FLAGS="" else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } + OPT_FEATURE_FLAGS="-DSQLITE_OMIT_LOAD_EXTENSION=1" fi +######### +# attempt to duplicate any OMITS and ENABLES into the $(OPT_FEATURE_FLAGS) parameter +for option in $CFLAGS $CPPFLAGS +do + case $option in + -DSQLITE_OMIT*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; + -DSQLITE_ENABLE*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; + esac +done + - test -n "$AWK" && break + +# attempt to remove any OMITS and ENABLES from the $(CFLAGS) parameter +ac_temp_CFLAGS="" +for option in $CFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_CFLAGS="$ac_temp_CFLAGS $option";; + esac +done +CFLAGS=$ac_temp_CFLAGS + + +# attempt to remove any OMITS and ENABLES from the $(CPPFLAGS) parameter +ac_temp_CPPFLAGS="" +for option in $CPPFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_CPPFLAGS="$ac_temp_CPPFLAGS $option";; + esac +done +CPPFLAGS=$ac_temp_CPPFLAGS + + +# attempt to remove any OMITS and ENABLES from the $(BUILD_CFLAGS) parameter +ac_temp_BUILD_CFLAGS="" +for option in $BUILD_CFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_BUILD_CFLAGS="$ac_temp_BUILD_CFLAGS $option";; + esac done +BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS ######### -# Set up an appropriate program prefix -# -if test "$program_prefix" = "NONE"; then - program_prefix="" +# See whether we should use GCOV +# Check whether --enable-gcov was given. +if test "${enable_gcov+set}" = set; then + enableval=$enable_gcov; use_gcov=$enableval +else + use_gcov=no fi +if test "${use_gcov}" = "yes" ; then + USE_GCOV=1 +else + USE_GCOV=0 +fi -VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` -echo "Version set to $VERSION" -RELEASE=`cat $srcdir/VERSION` -echo "Release set to $RELEASE" -VERSION_NUMBER=`cat $srcdir/VERSION \ - | sed 's/[^0-9]/ /g' \ - | awk '{printf "%d%03d%03d",$1,$2,$3}'` -echo "Version number set to $VERSION_NUMBER" +######### +# Output the config header +ac_config_headers="$ac_config_headers config.h" ######### -# Check to see if the --with-hints=FILE option is used. If there is none, -# then check for a files named "$host.hints" and ../$hosts.hints where -# $host is the hostname of the build system. If still no hints are -# found, try looking in $system.hints and ../$system.hints where -# $system is the result of uname -s. +# Generate the output files. # -# Check whether --with-hints was given. -if test "${with_hints+set}" = set; then - withval=$with_hints; hints=$withval -fi - -if test "$hints" = ""; then - host=`hostname | sed 's/\..*//'` - if test -r $host.hints; then - hints=$host.hints - else - if test -r ../$host.hints; then - hints=../$host.hints - fi - fi -fi -if test "$hints" = ""; then - sys=`uname -s` - if test -r $sys.hints; then - hints=$sys.hints - else - if test -r ../$sys.hints; then - hints=../$sys.hints - fi - fi -fi -if test "$hints" != ""; then - { echo "$as_me:$LINENO: result: reading hints from $hints" >&5 -echo "${ECHO_T}reading hints from $hints" >&6; } - . $hints -fi +ac_config_files="$ac_config_files Makefile sqlite3.pc" -######### -# Locate a compiler for the build machine. This compiler should -# generate command-line programs that run on the build machine. +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. # -if test x"$cross_compiling" = xno; then - BUILD_CC=$CC - BUILD_CFLAGS=$CFLAGS -else - if test "${BUILD_CC+set}" != set; then - for ac_prog in gcc cc cl -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ echo "$as_me:$LINENO: checking for $ac_word" >&5 -echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } -if test "${ac_cv_prog_BUILD_CC+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test -n "$BUILD_CC"; then - ac_cv_prog_BUILD_CC="$BUILD_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_BUILD_CC="$ac_prog" - echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done -done -IFS=$as_save_IFS +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +$as_echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + test "x$cache_file" != "x/dev/null" && + { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + cat confcache >$cache_file + else + { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi fi -fi -BUILD_CC=$ac_cv_prog_BUILD_CC -if test -n "$BUILD_CC"; then - { echo "$as_me:$LINENO: result: $BUILD_CC" >&5 -echo "${ECHO_T}$BUILD_CC" >&6; } -else - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -fi +rm -f confcache +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - test -n "$BUILD_CC" && break -done +DEFS=-DHAVE_CONFIG_H - fi - if test "${BUILD_CFLAGS+set}" != set; then - BUILD_CFLAGS="-g" - fi -fi +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs +LTLIBOBJS=$ac_ltlibobjs -########## -# Do we want to support multithreaded use of sqlite -# -# Check whether --enable-threadsafe was given. -if test "${enable_threadsafe+set}" = set; then - enableval=$enable_threadsafe; -else - enable_threadsafe=no -fi -{ echo "$as_me:$LINENO: checking whether to support threadsafe operation" >&5 -echo $ECHO_N "checking whether to support threadsafe operation... $ECHO_C" >&6; } -if test "$enable_threadsafe" = "no"; then - THREADSAFE=0 - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -else - THREADSAFE=1 - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } -fi +: ${CONFIG_STATUS=./config.status} +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF -if test "$THREADSAFE" = "1"; then - LIBS="" +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## -{ echo "$as_me:$LINENO: checking for pthread_create in -lpthread" >&5 -echo $ECHO_N "checking for pthread_create in -lpthread... $ECHO_C" >&6; } -if test "${ac_cv_lib_pthread_pthread_create+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lpthread $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_create (); -int -main () -{ -return pthread_create (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_pthread_pthread_create=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cv_lib_pthread_pthread_create=no fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_pthread_create" >&5 -echo "${ECHO_T}$ac_cv_lib_pthread_pthread_create" >&6; } -if test $ac_cv_lib_pthread_pthread_create = yes; then - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBPTHREAD 1 -_ACEOF - LIBS="-lpthread $LIBS" -fi - TARGET_THREAD_LIB="$LIBS" - LIBS="" -else - TARGET_THREAD_LIB="" -fi +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi -########## -# Do we want to allow a connection created in one thread to be used -# in another thread. This does not work on many Linux systems (ex: RedHat 9) -# due to bugs in the threading implementations. This is thus off by default. -# -# Check whether --enable-cross-thread-connections was given. -if test "${enable_cross_thread_connections+set}" = set; then - enableval=$enable_cross_thread_connections; -else - enable_xthreadconnect=no +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } fi -{ echo "$as_me:$LINENO: checking whether to allow connections to be shared across threads" >&5 -echo $ECHO_N "checking whether to allow connections to be shared across threads... $ECHO_C" >&6; } -if test "$enable_xthreadconnect" = "no"; then - XTHREADCONNECT='' - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset else - XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } + as_unset=false fi -########## -# Do we want to set threadsOverrideEachOthersLocks variable to be 1 (true) by -# default. Normally, a test at runtime is performed to determine the -# appropriate value of this variable. Use this option only if you're sure that -# threads can safely override each others locks in all runtime situations. -# -# Check whether --enable-threads-override-locks was given. -if test "${enable_threads_override_locks+set}" = set; then - enableval=$enable_threads_override_locks; -else - enable_threads_override_locks=no -fi +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" -{ echo "$as_me:$LINENO: checking whether threads can override each others locks" >&5 -echo $ECHO_N "checking whether threads can override each others locks... $ECHO_C" >&6; } -if test "$enable_threads_override_locks" = "no"; then - THREADSOVERRIDELOCKS='-1' - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } -else - THREADSOVERRIDELOCKS='1' - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } fi +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' -########## -# Do we want to support release -# -# Check whether --enable-releasemode was given. -if test "${enable_releasemode+set}" = set; then - enableval=$enable_releasemode; +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr else - enable_releasemode=no + as_expr=false fi -{ echo "$as_me:$LINENO: checking whether to support shared library linked as release mode or not" >&5 -echo $ECHO_N "checking whether to support shared library linked as release mode or not... $ECHO_C" >&6; } -if test "$enable_releasemode" = "no"; then - ALLOWRELEASE="" - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename else - ALLOWRELEASE="-release `cat VERSION`" - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } + as_basename=false fi -########## -# Do we want temporary databases in memory -# -# Check whether --enable-tempstore was given. -if test "${enable_tempstore+set}" = set; then - enableval=$enable_tempstore; -else - enable_tempstore=no -fi +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` -{ echo "$as_me:$LINENO: checking whether to use an in-ram database for temporary tables" >&5 -echo $ECHO_N "checking whether to use an in-ram database for temporary tables... $ECHO_C" >&6; } -case "$enable_tempstore" in - never ) - TEMP_STORE=0 - { echo "$as_me:$LINENO: result: never" >&5 -echo "${ECHO_T}never" >&6; } - ;; - no ) - TEMP_STORE=1 - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } - ;; - always ) - TEMP_STORE=3 - { echo "$as_me:$LINENO: result: always" >&5 -echo "${ECHO_T}always" >&6; } - ;; - yes ) - TEMP_STORE=3 - { echo "$as_me:$LINENO: result: always" >&5 -echo "${ECHO_T}always" >&6; } - ;; - * ) - TEMP_STORE=1 - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } - ;; -esac +# CDPATH. +$as_unset CDPATH -########### -# Lots of things are different if we are compiling for Windows using -# the CYGWIN environment. So check for that special case and handle -# things accordingly. -# -{ echo "$as_me:$LINENO: checking if executables have the .exe suffix" >&5 -echo $ECHO_N "checking if executables have the .exe suffix... $ECHO_C" >&6; } -if test "$config_BUILD_EXEEXT" = ".exe"; then - CYGWIN=yes - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } -else - { echo "$as_me:$LINENO: result: unknown" >&5 -echo "${ECHO_T}unknown" >&6; } -fi -if test "$CYGWIN" != "yes"; then - { echo "$as_me:$LINENO: checking host system type" >&5 -echo $ECHO_N "checking host system type... $ECHO_C" >&6; } -if test "${ac_cv_host+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 -echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} - { (exit 1); exit 1; }; } -fi + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { -fi -{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5 -echo "${ECHO_T}$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 -echo "$as_me: error: invalid value of canonical host" >&2;} - { (exit 1); exit 1; }; };; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} -case $host_os in - *cygwin* ) CYGWIN=yes;; - * ) CYGWIN=no;; -esac +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false fi -if test "$CYGWIN" = "yes"; then - BUILD_EXEEXT=.exe + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr else - BUILD_EXEEXT=$EXEEXT + as_expr=false fi -if test x"$cross_compiling" = xno; then - TARGET_EXEEXT=$BUILD_EXEEXT + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file else - TARGET_EXEEXT=$config_TARGET_EXEEXT + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null fi -if test "$TARGET_EXEEXT" = ".exe"; then - if test $OS2_SHELL ; then - OS_UNIX=0 - OS_WIN=0 - OS_OS2=1 - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_OS2=1" - if test "$ac_compiler_gnu" == "yes" ; then - TARGET_CFLAGS="$TARGET_CFLAGS -Zomf -Zexe -Zmap" - BUILD_CFLAGS="$BUILD_CFLAGS -Zomf -Zexe" - fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln else - OS_UNIX=0 - OS_WIN=1 - OS_OS2=0 - tclsubdir=win - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_WIN=1" + as_ln_s='cp -p' fi else - OS_UNIX=1 - OS_WIN=0 - OS_OS2=0 - tclsubdir=unix - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_UNIX=1" + as_ln_s='cp -p' fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null - - - - - - -########## -# Figure out all the parameters needed to compile against Tcl. -# -# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG -# macros in the in the tcl.m4 file of the standard TCL distribution. -# Those macros could not be used directly since we have to make some -# minor changes to accomodate systems that do not have TCL installed. -# -# Check whether --enable-tcl was given. -if test "${enable_tcl+set}" = set; then - enableval=$enable_tcl; use_tcl=$enableval +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: else - use_tcl=yes + test -d ./-p && rmdir ./-p + as_mkdir_p=false fi -if test "${use_tcl}" = "yes" ; then - -# Check whether --with-tcl was given. -if test "${with_tcl+set}" = set; then - withval=$with_tcl; with_tclconfig=${withval} +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' fi +as_executable_p=$as_test_x - { echo "$as_me:$LINENO: checking for Tcl configuration" >&5 -echo $ECHO_N "checking for Tcl configuration... $ECHO_C" >&6; } - if test "${ac_cv_c_tclconfig+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - # First check to see if --with-tcl was specified. - if test x"${with_tclconfig}" != x ; then - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` - else - { { echo "$as_me:$LINENO: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&5 -echo "$as_me: error: ${with_tclconfig} directory doesn't contain tclConfig.sh" >&2;} - { (exit 1); exit 1; }; } - fi - fi - # then check for a private Tcl installation - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ../tcl \ - `ls -dr ../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../tcl[8-9].[0-9]* 2>/dev/null` \ - ../../tcl \ - `ls -dr ../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../../tcl[8-9].[0-9]* 2>/dev/null` \ - ../../../tcl \ - `ls -dr ../../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../../../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../../../tcl[8-9].[0-9]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - `ls -d ${libdir} 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i; pwd)` - break - fi - done - fi - # check in a few other private locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ${srcdir}/../tcl \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi +exec 6>&1 -fi +# Save the log message, to keep $[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by sqlite $as_me 3.6.16, which was +generated by GNU Autoconf 2.62. Invocation command line was + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ - if test x"${ac_cv_c_tclconfig}" = x ; then - use_tcl=no - { echo "$as_me:$LINENO: WARNING: Can't find Tcl configuration definitions" >&5 -echo "$as_me: WARNING: Can't find Tcl configuration definitions" >&2;} - { echo "$as_me:$LINENO: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&5 -echo "$as_me: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&2;} - { echo "$as_me:$LINENO: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&5 -echo "$as_me: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&2;} - else - TCL_BIN_DIR=${ac_cv_c_tclconfig} - { echo "$as_me:$LINENO: result: found $TCL_BIN_DIR/tclConfig.sh" >&5 -echo "${ECHO_T}found $TCL_BIN_DIR/tclConfig.sh" >&6; } +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" - { echo "$as_me:$LINENO: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 -echo $ECHO_N "checking for existence of $TCL_BIN_DIR/tclConfig.sh... $ECHO_C" >&6; } - if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then - { echo "$as_me:$LINENO: result: loading" >&5 -echo "${ECHO_T}loading" >&6; } - . $TCL_BIN_DIR/tclConfig.sh - else - { echo "$as_me:$LINENO: result: file not found" >&5 -echo "${ECHO_T}file not found" >&6; } - fi +_ACEOF - # - # If the TCL_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable TCL_LIB_SPEC will be set to the value - # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC - # instead of TCL_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - # +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" - if test -f $TCL_BIN_DIR/Makefile ; then - TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} - TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} - TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} - fi +_ACEOF - # - # eval is required to do the TCL_DBGX substitution - # +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. - eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" - eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" +Usage: $0 [OPTIONS] [FILE]... - eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" - eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" - eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE +Configuration files: +$config_files +Configuration headers: +$config_headers +Configuration commands: +$config_commands +Report bugs to ." +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_version="\\ +sqlite config.status 3.6.16 +configured by $0, generated by GNU Autoconf 2.62, + with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" +Copyright (C) 2008 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + CONFIG_FILES="$CONFIG_FILES '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + { $as_echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; };; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + # This is an error. + -*) { $as_echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; + *) ac_config_targets="$ac_config_targets $1" + ac_need_defaults=false ;; + esac + shift +done +ac_configure_extra_args= - fi +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" fi -if test "${use_tcl}" = "no" ; then - HAVE_TCL="" -else - HAVE_TCL=1 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" fi +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 -########## -# Figure out what C libraries are required to compile programs -# that use "readline()" library. +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS # -TARGET_READLINE_LIBS="" -TARGET_READLINE_INC="" -TARGET_HAVE_READLINE=0 -# Check whether --enable-readline was given. -if test "${enable_readline+set}" = set; then - enableval=$enable_readline; with_readline=$enableval -else - with_readline=auto -fi - -if test x"$with_readline" != xno; then - found="yes" +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH -# Check whether --with-readline-lib was given. -if test "${with_readline_lib+set}" = set; then - withval=$with_readline_lib; with_readline_lib=$withval -else - with_readline_lib="auto" -fi +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "X$macro_version" | $Xsed -e "$delay_single_quote_subst"`' +macro_revision='`$ECHO "X$macro_revision" | $Xsed -e "$delay_single_quote_subst"`' +enable_shared='`$ECHO "X$enable_shared" | $Xsed -e "$delay_single_quote_subst"`' +enable_static='`$ECHO "X$enable_static" | $Xsed -e "$delay_single_quote_subst"`' +pic_mode='`$ECHO "X$pic_mode" | $Xsed -e "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "X$enable_fast_install" | $Xsed -e "$delay_single_quote_subst"`' +host_alias='`$ECHO "X$host_alias" | $Xsed -e "$delay_single_quote_subst"`' +host='`$ECHO "X$host" | $Xsed -e "$delay_single_quote_subst"`' +host_os='`$ECHO "X$host_os" | $Xsed -e "$delay_single_quote_subst"`' +build_alias='`$ECHO "X$build_alias" | $Xsed -e "$delay_single_quote_subst"`' +build='`$ECHO "X$build" | $Xsed -e "$delay_single_quote_subst"`' +build_os='`$ECHO "X$build_os" | $Xsed -e "$delay_single_quote_subst"`' +SED='`$ECHO "X$SED" | $Xsed -e "$delay_single_quote_subst"`' +Xsed='`$ECHO "X$Xsed" | $Xsed -e "$delay_single_quote_subst"`' +GREP='`$ECHO "X$GREP" | $Xsed -e "$delay_single_quote_subst"`' +EGREP='`$ECHO "X$EGREP" | $Xsed -e "$delay_single_quote_subst"`' +FGREP='`$ECHO "X$FGREP" | $Xsed -e "$delay_single_quote_subst"`' +LD='`$ECHO "X$LD" | $Xsed -e "$delay_single_quote_subst"`' +NM='`$ECHO "X$NM" | $Xsed -e "$delay_single_quote_subst"`' +LN_S='`$ECHO "X$LN_S" | $Xsed -e "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "X$max_cmd_len" | $Xsed -e "$delay_single_quote_subst"`' +ac_objext='`$ECHO "X$ac_objext" | $Xsed -e "$delay_single_quote_subst"`' +exeext='`$ECHO "X$exeext" | $Xsed -e "$delay_single_quote_subst"`' +lt_unset='`$ECHO "X$lt_unset" | $Xsed -e "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "X$lt_SP2NL" | $Xsed -e "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "X$lt_NL2SP" | $Xsed -e "$delay_single_quote_subst"`' +reload_flag='`$ECHO "X$reload_flag" | $Xsed -e "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "X$reload_cmds" | $Xsed -e "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "X$OBJDUMP" | $Xsed -e "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "X$deplibs_check_method" | $Xsed -e "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "X$file_magic_cmd" | $Xsed -e "$delay_single_quote_subst"`' +AR='`$ECHO "X$AR" | $Xsed -e "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "X$AR_FLAGS" | $Xsed -e "$delay_single_quote_subst"`' +STRIP='`$ECHO "X$STRIP" | $Xsed -e "$delay_single_quote_subst"`' +RANLIB='`$ECHO "X$RANLIB" | $Xsed -e "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "X$old_postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "X$old_postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "X$old_archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' +CC='`$ECHO "X$CC" | $Xsed -e "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "X$CFLAGS" | $Xsed -e "$delay_single_quote_subst"`' +compiler='`$ECHO "X$compiler" | $Xsed -e "$delay_single_quote_subst"`' +GCC='`$ECHO "X$GCC" | $Xsed -e "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "X$lt_cv_sys_global_symbol_pipe" | $Xsed -e "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "X$lt_cv_sys_global_symbol_to_cdecl" | $Xsed -e "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address" | $Xsed -e "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' +objdir='`$ECHO "X$objdir" | $Xsed -e "$delay_single_quote_subst"`' +SHELL='`$ECHO "X$SHELL" | $Xsed -e "$delay_single_quote_subst"`' +ECHO='`$ECHO "X$ECHO" | $Xsed -e "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "X$MAGIC_CMD" | $Xsed -e "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "X$lt_prog_compiler_no_builtin_flag" | $Xsed -e "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "X$lt_prog_compiler_wl" | $Xsed -e "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "X$lt_prog_compiler_pic" | $Xsed -e "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "X$lt_prog_compiler_static" | $Xsed -e "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "X$lt_cv_prog_compiler_c_o" | $Xsed -e "$delay_single_quote_subst"`' +need_locks='`$ECHO "X$need_locks" | $Xsed -e "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "X$DSYMUTIL" | $Xsed -e "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "X$NMEDIT" | $Xsed -e "$delay_single_quote_subst"`' +LIPO='`$ECHO "X$LIPO" | $Xsed -e "$delay_single_quote_subst"`' +OTOOL='`$ECHO "X$OTOOL" | $Xsed -e "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "X$OTOOL64" | $Xsed -e "$delay_single_quote_subst"`' +libext='`$ECHO "X$libext" | $Xsed -e "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "X$shrext_cmds" | $Xsed -e "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "X$extract_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "X$archive_cmds_need_lc" | $Xsed -e "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "X$enable_shared_with_static_runtimes" | $Xsed -e "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "X$export_dynamic_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "X$whole_archive_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "X$compiler_needs_object" | $Xsed -e "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "X$old_archive_from_new_cmds" | $Xsed -e "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "X$old_archive_from_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "X$archive_cmds" | $Xsed -e "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "X$archive_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' +module_cmds='`$ECHO "X$module_cmds" | $Xsed -e "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "X$module_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "X$with_gnu_ld" | $Xsed -e "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "X$allow_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "X$no_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "X$hardcode_libdir_flag_spec" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_ld='`$ECHO "X$hardcode_libdir_flag_spec_ld" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "X$hardcode_libdir_separator" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "X$hardcode_direct" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "X$hardcode_direct_absolute" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "X$hardcode_minus_L" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "X$hardcode_shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "X$hardcode_automatic" | $Xsed -e "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "X$inherit_rpath" | $Xsed -e "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "X$link_all_deplibs" | $Xsed -e "$delay_single_quote_subst"`' +fix_srcfile_path='`$ECHO "X$fix_srcfile_path" | $Xsed -e "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "X$always_export_symbols" | $Xsed -e "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "X$export_symbols_cmds" | $Xsed -e "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "X$exclude_expsyms" | $Xsed -e "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "X$include_expsyms" | $Xsed -e "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "X$prelink_cmds" | $Xsed -e "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "X$file_list_spec" | $Xsed -e "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "X$variables_saved_for_relink" | $Xsed -e "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "X$need_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`' +need_version='`$ECHO "X$need_version" | $Xsed -e "$delay_single_quote_subst"`' +version_type='`$ECHO "X$version_type" | $Xsed -e "$delay_single_quote_subst"`' +runpath_var='`$ECHO "X$runpath_var" | $Xsed -e "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "X$shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "X$shlibpath_overrides_runpath" | $Xsed -e "$delay_single_quote_subst"`' +libname_spec='`$ECHO "X$libname_spec" | $Xsed -e "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "X$library_names_spec" | $Xsed -e "$delay_single_quote_subst"`' +soname_spec='`$ECHO "X$soname_spec" | $Xsed -e "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "X$postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "X$postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "X$finish_cmds" | $Xsed -e "$delay_single_quote_subst"`' +finish_eval='`$ECHO "X$finish_eval" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "X$hardcode_into_libs" | $Xsed -e "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "X$sys_lib_search_path_spec" | $Xsed -e "$delay_single_quote_subst"`' +sys_lib_dlsearch_path_spec='`$ECHO "X$sys_lib_dlsearch_path_spec" | $Xsed -e "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "X$hardcode_action" | $Xsed -e "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "X$enable_dlopen" | $Xsed -e "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "X$enable_dlopen_self" | $Xsed -e "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "X$enable_dlopen_self_static" | $Xsed -e "$delay_single_quote_subst"`' +old_striplib='`$ECHO "X$old_striplib" | $Xsed -e "$delay_single_quote_subst"`' +striplib='`$ECHO "X$striplib" | $Xsed -e "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# Quote evaled strings. +for var in SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +AR \ +AR_FLAGS \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +SHELL \ +ECHO \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_wl \ +lt_prog_compiler_pic \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_flag_spec_ld \ +hardcode_libdir_separator \ +fix_srcfile_path \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +finish_eval \ +old_striplib \ +striplib; do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done - if test "x$with_readline_lib" = xauto; then - save_LIBS="$LIBS" - LIBS="" - { echo "$as_me:$LINENO: checking for library containing tgetent" >&5 -echo $ECHO_N "checking for library containing tgetent... $ECHO_C" >&6; } -if test "${ac_cv_search_tgetent+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_func_search_save_LIBS=$LIBS -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +sys_lib_dlsearch_path_spec; do + case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char tgetent (); -int -main () -{ -return tgetent (); - ; - return 0; -} -_ACEOF -for ac_lib in '' readline ncurses curses termcap; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; +# Fix-up fallback echo if it was mangled by the above quoting rules. +case \$lt_ECHO in +*'\\\$0 --fallback-echo"') lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\$0 --fallback-echo"\$/\$0 --fallback-echo"/'\` + ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_search_tgetent=$ac_res -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +ac_aux_dir='$ac_aux_dir' +xsi_shell='$xsi_shell' +lt_shell_append='$lt_shell_append' +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST fi -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext - if test "${ac_cv_search_tgetent+set}" = set; then - break -fi -done -if test "${ac_cv_search_tgetent+set}" = set; then - : -else - ac_cv_search_tgetent=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_search_tgetent" >&5 -echo "${ECHO_T}$ac_cv_search_tgetent" >&6; } -ac_res=$ac_cv_search_tgetent -if test "$ac_res" != no; then - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - term_LIBS="$LIBS" -else - term_LIBS="" -fi - { echo "$as_me:$LINENO: checking for readline in -lreadline" >&5 -echo $ECHO_N "checking for readline in -lreadline... $ECHO_C" >&6; } -if test "${ac_cv_lib_readline_readline+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lreadline $LIBS" -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + + + -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char readline (); -int -main () -{ -return readline (); - ; - return 0; -} _ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_lib_readline_readline=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_cv_lib_readline_readline=no -fi +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; + + *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_lib_readline_readline" >&5 -echo "${ECHO_T}$ac_cv_lib_readline_readline" >&6; } -if test $ac_cv_lib_readline_readline = yes; then - TARGET_READLINE_LIBS="-lreadline" -else - found="no" + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi - TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" - LIBS="$save_LIBS" - else - TARGET_READLINE_LIBS="$with_readline_lib" - fi +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= + trap 'exit_status=$? + { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status +' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || +{ + $as_echo "$as_me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then -# Check whether --with-readline-inc was given. -if test "${with_readline_inc+set}" = set; then - withval=$with_readline_inc; with_readline_inc=$withval +ac_cr=' ' +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' else - with_readline_inc="auto" + ac_cs_awk_cr=$ac_cr fi - if test "x$with_readline_inc" = xauto; then - if test "${ac_cv_header_readline_h+set}" = set; then - { echo "$as_me:$LINENO: checking for readline.h" >&5 -echo $ECHO_N "checking for readline.h... $ECHO_C" >&6; } -if test "${ac_cv_header_readline_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -fi -{ echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 -echo "${ECHO_T}$ac_cv_header_readline_h" >&6; } -else - # Is the header compilable? -{ echo "$as_me:$LINENO: checking readline.h usability" >&5 -echo $ECHO_N "checking readline.h usability... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$ac_includes_default -#include +echo 'BEGIN {' >"$tmp/subs1.awk" && _ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - ac_header_compiler=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 -echo "${ECHO_T}$ac_header_compiler" >&6; } +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } +ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } -# Is the header present? -{ echo "$as_me:$LINENO: checking readline.h presence" >&5 -echo $ECHO_N "checking readline.h presence... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` = $ac_delim_num; then + break + elif $ac_last_try; then + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$tmp/subs1.awk" <<\\_ACAWK && _ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\).*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\).*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK _ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then - ac_header_preproc=yes +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 + cat +fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ + || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5 +$as_echo "$as_me: error: could not setup config files machinery" >&2;} + { (exit 1); exit 1; }; } +_ACEOF - ac_header_preproc=no +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/ +s/:*\${srcdir}:*/:/ +s/:*@srcdir@:*/:/ +s/^\([^=]*=[ ]*\):*/\1/ +s/:*$// +s/^[^=]*=[ ]*$// +}' fi -rm -f conftest.err conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 -echo "${ECHO_T}$ac_header_preproc" >&6; } +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in - yes:no: ) - { echo "$as_me:$LINENO: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&5 -echo "$as_me: WARNING: readline.h: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the compiler's result" >&5 -echo "$as_me: WARNING: readline.h: proceeding with the compiler's result" >&2;} - ac_header_preproc=yes - ;; - no:yes:* ) - { echo "$as_me:$LINENO: WARNING: readline.h: present but cannot be compiled" >&5 -echo "$as_me: WARNING: readline.h: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: readline.h: check for missing prerequisite headers?" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: see the Autoconf documentation" >&5 -echo "$as_me: WARNING: readline.h: see the Autoconf documentation" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&5 -echo "$as_me: WARNING: readline.h: section \"Present But Cannot Be Compiled\"" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: readline.h: proceeding with the preprocessor's result" >&2;} - { echo "$as_me:$LINENO: WARNING: readline.h: in the future, the compiler will take precedence" >&5 -echo "$as_me: WARNING: readline.h: in the future, the compiler will take precedence" >&2;} +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF - ;; -esac -{ echo "$as_me:$LINENO: checking for readline.h" >&5 -echo $ECHO_N "checking for readline.h... $ECHO_C" >&6; } -if test "${ac_cv_header_readline_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_cv_header_readline_h=$ac_header_preproc -fi -{ echo "$as_me:$LINENO: result: $ac_cv_header_readline_h" >&5 -echo "${ECHO_T}$ac_cv_header_readline_h" >&6; } +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. -fi -if test $ac_cv_header_readline_h = yes; then - found="yes" -else +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_t=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_t"; then + break + elif $ac_last_try; then + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done - found="no" - if test "$cross_compiling" != yes; then - for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do - for subdir in include include/readline; do - as_ac_File=`echo "ac_cv_file_$dir/$subdir/readline.h" | $as_tr_sh` -{ echo "$as_me:$LINENO: checking for $dir/$subdir/readline.h" >&5 -echo $ECHO_N "checking for $dir/$subdir/readline.h... $ECHO_C" >&6; } -if { as_var=$as_ac_File; eval "test \"\${$as_var+set}\" = set"; }; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - test "$cross_compiling" = yes && - { { echo "$as_me:$LINENO: error: cannot check for file existence when cross compiling" >&5 -echo "$as_me: error: cannot check for file existence when cross compiling" >&2;} +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + prefix = substr(line, 1, index(line, defundef) - 1) + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", line, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5 +$as_echo "$as_me: error: could not setup config headers machinery" >&2;} { (exit 1); exit 1; }; } -if test -r "$dir/$subdir/readline.h"; then - eval "$as_ac_File=yes" -else - eval "$as_ac_File=no" -fi -fi -ac_res=`eval echo '${'$as_ac_File'}'` - { echo "$as_me:$LINENO: result: $ac_res" >&5 -echo "${ECHO_T}$ac_res" >&6; } -if test `eval echo '${'$as_ac_File'}'` = yes; then - found=yes -fi +fi # test -n "$CONFIG_HEADERS" - if test "$found" = "yes"; then - TARGET_READLINE_INC="-I$dir/$subdir" - break - fi - done - test "$found" = "yes" && break - done - fi -fi +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 +$as_echo "$as_me: error: Invalid tag $ac_tag." >&2;} + { (exit 1); exit 1; }; };; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 +$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;} + { (exit 1); exit 1; }; };; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + ac_file_inputs="$ac_file_inputs '$ac_f'" + done - else - TARGET_READLINE_INC="$with_readline_inc" - fi + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:$LINENO: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac - if test x"$found" = xno; then - TARGET_READLINE_LIBS="" - TARGET_READLINE_INC="" - TARGET_HAVE_READLINE=0 - else - TARGET_HAVE_READLINE=1 - fi -fi + case $ac_tag in + *:-:* | *:-) cat >"$tmp/stdin" \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { as_dir="$ac_dir" + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 +$as_echo "$as_me: error: cannot create directory $as_dir" >&2;} + { (exit 1); exit 1; }; }; } + ac_builddir=. +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + case $ac_mode in + :F) + # + # CONFIG_FILE + # -########## -# Figure out what C libraries are required to compile programs -# that use "fdatasync()" function. -# -{ echo "$as_me:$LINENO: checking for library containing fdatasync" >&5 -echo $ECHO_N "checking for library containing fdatasync... $ECHO_C" >&6; } -if test "${ac_cv_search_fdatasync+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_func_search_save_LIBS=$LIBS -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac _ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char fdatasync (); -int -main () -{ -return fdatasync (); - ; - return 0; +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= + +ac_sed_dataroot=' +/datarootdir/ { + p + q } +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p +' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_search_fdatasync=$ac_res -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +_ACEOF +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } -fi +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&2;} -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext - if test "${ac_cv_search_fdatasync+set}" = set; then - break -fi -done -if test "${ac_cv_search_fdatasync+set}" = set; then - : -else - ac_cv_search_fdatasync=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_search_fdatasync" >&5 -echo "${ECHO_T}$ac_cv_search_fdatasync" >&6; } -ac_res=$ac_cv_search_fdatasync -if test "$ac_res" != no; then - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + rm -f "$tmp/stdin" + case $ac_file in + -) cat "$tmp/out" && rm -f "$tmp/out";; + *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; + esac \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" + } >"$tmp/config.h" \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } + if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$tmp/config.h" "$ac_file" \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ + || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5 +$as_echo "$as_me: error: could not create -" >&2;} + { (exit 1); exit 1; }; } + fi + ;; -fi + :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac -######### -# check for debug enabled -# Check whether --enable-debug was given. -if test "${enable_debug+set}" = set; then - enableval=$enable_debug; use_debug=$enableval -else - use_debug=no -fi + case $ac_file$ac_mode in + "libtool":C) -if test "${use_debug}" = "yes" ; then - TARGET_DEBUG="-DSQLITE_DEBUG=1" -else - TARGET_DEBUG="-DNDEBUG" -fi + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" -######### -# Figure out whether or not we have a "usleep()" function. + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. # -{ echo "$as_me:$LINENO: checking for usleep" >&5 -echo $ECHO_N "checking for usleep... $ECHO_C" >&6; } -if test "${ac_cv_func_usleep+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Define usleep to an innocuous variant, in case declares usleep. - For example, HP-UX 11i declares gettimeofday. */ -#define usleep innocuous_usleep +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char usleep (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ -#ifdef __STDC__ -# include -#else -# include -#endif +# The names of the tagged configurations supported by this script. +available_tags="" -#undef usleep +# ### BEGIN LIBTOOL CONFIG -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char usleep (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_usleep || defined __stub___usleep -choke me -#endif +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision -int -main () -{ -return usleep (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_func_usleep=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared - ac_cv_func_usleep=no -fi +# Whether or not to build static libraries. +build_old_libs=$enable_static -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -fi -{ echo "$as_me:$LINENO: result: $ac_cv_func_usleep" >&5 -echo "${ECHO_T}$ac_cv_func_usleep" >&6; } -if test $ac_cv_func_usleep = yes; then - TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_USLEEP=1" -fi +# What type of objects to build. +pic_mode=$pic_mode +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install -#-------------------------------------------------------------------- -# Redefine fdatasync as fsync on systems that lack fdatasync -#-------------------------------------------------------------------- +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os -{ echo "$as_me:$LINENO: checking for fdatasync" >&5 -echo $ECHO_N "checking for fdatasync... $ECHO_C" >&6; } -if test "${ac_cv_func_fdatasync+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -/* Define fdatasync to an innocuous variant, in case declares fdatasync. - For example, HP-UX 11i declares gettimeofday. */ -#define fdatasync innocuous_fdatasync +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char fdatasync (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ +# A sed program that does not truncate output. +SED=$lt_SED -#ifdef __STDC__ -# include -#else -# include -#endif +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" -#undef fdatasync +# A grep program that handles long lines. +GREP=$lt_GREP -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char fdatasync (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_fdatasync || defined __stub___fdatasync -choke me -#endif +# An ERE matcher. +EGREP=$lt_EGREP -int -main () -{ -return fdatasync (); - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && - $as_test_x conftest$ac_exeext; then - ac_cv_func_fdatasync=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +# A literal string matcher. +FGREP=$lt_FGREP - ac_cv_func_fdatasync=no -fi +# A BSD- or MS-compatible name lister. +NM=$lt_NM -rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - conftest$ac_exeext conftest.$ac_ext -fi -{ echo "$as_me:$LINENO: result: $ac_cv_func_fdatasync" >&5 -echo "${ECHO_T}$ac_cv_func_fdatasync" >&6; } -if test $ac_cv_func_fdatasync = yes; then - TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_FDATASYNC=1" -fi +# Whether we need soft or hard links. +LN_S=$lt_LN_S +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len -######### -# Generate the output files. -# -ac_config_files="$ac_config_files Makefile sqlite3.pc" +# Object file suffix (normally "o"). +objext=$ac_objext -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. +# Executable file suffix (normally ""). +exeext=$exeext -_ACEOF +# whether the shell understands "unset". +lt_unset=$lt_unset -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 -echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - *) $as_unset $ac_var ;; - esac ;; - esac - done +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes (double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \). - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - test "x$cache_file" != "x/dev/null" && - { echo "$as_me:$LINENO: updating cache $cache_file" >&5 -echo "$as_me: updating cache $cache_file" >&6;} - cat confcache >$cache_file - else - { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 -echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds -# Transform confdefs.h into DEFS. -# Protect against shell expansion while executing Makefile rules. -# Protect against Makefile macro expansion. -# -# If the first sed substitution is executed (which looks for macros that -# take arguments), then branch to the quote section. Otherwise, -# look for a macro that doesn't take arguments. -ac_script=' -t clear -:clear -s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g -t quote -s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g -t quote -b any -:quote -s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g -s/\[/\\&/g -s/\]/\\&/g -s/\$/$$/g -H -:any -${ - g - s/^\n// - s/\n/ /g - p -} -' -DEFS=`sed -n "$ac_script" confdefs.h` +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method -ac_libobjs= -ac_ltlibobjs= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" - ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs +# Command to use when deplibs_check_method == "file_magic". +file_magic_cmd=$lt_file_magic_cmd -LTLIBOBJS=$ac_ltlibobjs +# The archiver. +AR=$lt_AR +AR_FLAGS=$lt_AR_FLAGS +# A symbol stripping program. +STRIP=$lt_STRIP +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds -: ${CONFIG_STATUS=./config.status} -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 -echo "$as_me: creating $CONFIG_STATUS" >&6;} -cat >$CONFIG_STATUS <<_ACEOF -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. +# A C compiler. +LTCC=$lt_CC -debug=false -ac_cs_recheck=false -ac_cs_silent=false -SHELL=\${CONFIG_SHELL-$SHELL} -_ACEOF +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS -cat >>$CONFIG_STATUS <<\_ACEOF -## --------------------- ## -## M4sh Initialization. ## -## --------------------- ## +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then - emulate sh - NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in - *posix*) set -o posix ;; -esac +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl -fi +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix +# The name of the directory that contains temporary libtool files. +objdir=$objdir +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL -# PATH needs CR -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits +# An echo program that does not interpret backslashes. +ECHO=$lt_ECHO -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - echo "#! /bin/sh" >conf$$.sh - echo "exit 0" >>conf$$.sh - chmod +x conf$$.sh - if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then - PATH_SEPARATOR=';' - else - PATH_SEPARATOR=: - fi - rm -f conf$$.sh -fi +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD -# Support unset when possible. -if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - as_unset=unset -else - as_unset=false -fi +# Must we lock files when doing compilation? +need_locks=$lt_need_locks +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -as_nl=' -' -IFS=" "" $as_nl" +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT -# Find who we are. Look in the path if we contain no directory separator. -case $0 in - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break -done -IFS=$as_save_IFS +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - { (exit 1); exit 1; } -fi +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL -# Work around bugs in pre-3.0 UWIN ksh. -for as_var in ENV MAIL MAILPATH -do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var -done -PS1='$ ' -PS2='> ' -PS4='+ ' +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 -# NLS nuisances. -for as_var in \ - LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ - LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ - LC_TELEPHONE LC_TIME -do - if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then - eval $as_var=C; export $as_var - else - ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var - fi -done +# Old archive suffix (normally "a"). +libext=$libext -# Required to use basename. -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink -# Name of the executable. -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix -# CDPATH. -$as_unset CDPATH +# Do we need a version for libraries? +need_version=$need_version +# Library versioning type. +version_type=$version_type +# Shared library runtime path variable. +runpath_var=$runpath_var - as_lineno_1=$LINENO - as_lineno_2=$LINENO - test "x$as_lineno_1" != "x$as_lineno_2" && - test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { +# Shared library path variable. +shlibpath_var=$shlibpath_var - # Create $as_me.lineno as a copy of $as_myself, but with $LINENO - # uniformly replaced by the line number. The first 'sed' inserts a - # line-number line after each line using $LINENO; the second 'sed' - # does the real work. The second script uses 'N' to pair each - # line-number line with the line containing $LINENO, and appends - # trailing '-' during substitution so that $LINENO is not a special - # case at line end. - # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the - # scripts with optimization help from Paolo Bonzini. Blame Lee - # E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 - { (exit 1); exit 1; }; } +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} +# Format of library name prefix. +libname_spec=$lt_libname_spec +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in --n*) - case `echo 'x\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - *) ECHO_C='\c';; - esac;; -*) - ECHO_N='-n';; -esac +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir -fi -echo >conf$$.file -if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' -elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds -if mkdir -p . 2>/dev/null; then - as_mkdir_p=: -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static -exec 6>&1 +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib -# Save the log message, to keep $[0] and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by $as_me, which was -generated by GNU Autoconf 2.61. Invocation command line was - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ +# The linker used to build libraries. +LD=$lt_LD -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds -_ACEOF +# A language specific compiler. +CC=$lt_compiler -cat >>$CONFIG_STATUS <<_ACEOF -# Files that config.status was made for. -config_files="$ac_config_files" +# Is the compiler the GNU compiler? +with_gcc=$GCC -_ACEOF +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag -cat >>$CONFIG_STATUS <<\_ACEOF -ac_cs_usage="\ -\`$as_me' instantiates files from templates according to the -current configuration. +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl -Usage: $0 [OPTIONS] [FILE]... +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - -q, --quiet do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static -Configuration files: -$config_files +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o -Report bugs to ." +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF -ac_cs_version="\\ -config.status -configured by $0, generated by GNU Autoconf 2.61, - with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes -Copyright (C) 2006 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec -ac_pwd='$ac_pwd' -srcdir='$srcdir' -INSTALL='$INSTALL' -_ACEOF +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec -cat >>$CONFIG_STATUS <<\_ACEOF -# If no file are specified by the user, then we need to provide default -# value. By we need to know if files were specified by the user. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - echo "$ac_cs_version"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - CONFIG_FILES="$CONFIG_FILES $ac_optarg" - ac_need_defaults=false;; - --he | --h | --help | --hel | -h ) - echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds - # This is an error. - -*) { echo "$as_me: error: unrecognized option: $1 -Try \`$0 --help' for more information." >&2 - { (exit 1); exit 1; }; } ;; +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds - *) ac_config_targets="$ac_config_targets $1" - ac_need_defaults=false ;; +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds - esac - shift -done +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds -ac_configure_extra_args= +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF -if \$ac_cs_recheck; then - echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 - CONFIG_SHELL=$SHELL - export CONFIG_SHELL - exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion -fi +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - echo "$ac_log" -} >&5 +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF -_ACEOF +# If ld is used when linking, flag to hardcode \$libdir into a binary +# during linking. This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld -cat >>$CONFIG_STATUS <<\_ACEOF +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - "sqlite3.pc") CONFIG_FILES="$CONFIG_FILES sqlite3.pc" ;; +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct - *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 -echo "$as_me: error: invalid argument: $ac_config_target" >&2;} - { (exit 1); exit 1; }; };; - esac -done +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files -fi +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= - trap 'exit_status=$? - { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status -' 0 - trap '{ (exit 1); exit 1; }' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -n "$tmp" && test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || -{ - echo "$me: cannot create a temporary directory in ." >&2 - { (exit 1); exit 1; } -} +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath -# -# Set up the sed scripts for CONFIG_FILES section. -# +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs -# No need to generate the scripts if there are no CONFIG_FILES. -# This happens for instance when ./config.status config.h -if test -n "$CONFIG_FILES"; then +# Fix the shell variable \$srcfile for the compiler. +fix_srcfile_path=$lt_fix_srcfile_path -_ACEOF +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - cat >conf$$subs.sed <<_ACEOF -SHELL!$SHELL$ac_delim -PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim -PACKAGE_NAME!$PACKAGE_NAME$ac_delim -PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim -PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim -PACKAGE_STRING!$PACKAGE_STRING$ac_delim -PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim -exec_prefix!$exec_prefix$ac_delim -prefix!$prefix$ac_delim -program_transform_name!$program_transform_name$ac_delim -bindir!$bindir$ac_delim -sbindir!$sbindir$ac_delim -libexecdir!$libexecdir$ac_delim -datarootdir!$datarootdir$ac_delim -datadir!$datadir$ac_delim -sysconfdir!$sysconfdir$ac_delim -sharedstatedir!$sharedstatedir$ac_delim -localstatedir!$localstatedir$ac_delim -includedir!$includedir$ac_delim -oldincludedir!$oldincludedir$ac_delim -docdir!$docdir$ac_delim -infodir!$infodir$ac_delim -htmldir!$htmldir$ac_delim -dvidir!$dvidir$ac_delim -pdfdir!$pdfdir$ac_delim -psdir!$psdir$ac_delim -libdir!$libdir$ac_delim -localedir!$localedir$ac_delim -mandir!$mandir$ac_delim -DEFS!$DEFS$ac_delim -ECHO_C!$ECHO_C$ac_delim -ECHO_N!$ECHO_N$ac_delim -ECHO_T!$ECHO_T$ac_delim -LIBS!$LIBS$ac_delim -build_alias!$build_alias$ac_delim -host_alias!$host_alias$ac_delim -target_alias!$target_alias$ac_delim -build!$build$ac_delim -build_cpu!$build_cpu$ac_delim -build_vendor!$build_vendor$ac_delim -build_os!$build_os$ac_delim -host!$host$ac_delim -host_cpu!$host_cpu$ac_delim -host_vendor!$host_vendor$ac_delim -host_os!$host_os$ac_delim -CC!$CC$ac_delim -CFLAGS!$CFLAGS$ac_delim -LDFLAGS!$LDFLAGS$ac_delim -CPPFLAGS!$CPPFLAGS$ac_delim -ac_ct_CC!$ac_ct_CC$ac_delim -EXEEXT!$EXEEXT$ac_delim -OBJEXT!$OBJEXT$ac_delim -SED!$SED$ac_delim -GREP!$GREP$ac_delim -EGREP!$EGREP$ac_delim -LN_S!$LN_S$ac_delim -ECHO!$ECHO$ac_delim -AR!$AR$ac_delim -RANLIB!$RANLIB$ac_delim -STRIP!$STRIP$ac_delim -CPP!$CPP$ac_delim -CXX!$CXX$ac_delim -CXXFLAGS!$CXXFLAGS$ac_delim -ac_ct_CXX!$ac_ct_CXX$ac_delim -CXXCPP!$CXXCPP$ac_delim -F77!$F77$ac_delim -FFLAGS!$FFLAGS$ac_delim -ac_ct_F77!$ac_ct_F77$ac_delim -LIBTOOL!$LIBTOOL$ac_delim -INSTALL_PROGRAM!$INSTALL_PROGRAM$ac_delim -INSTALL_SCRIPT!$INSTALL_SCRIPT$ac_delim -INSTALL_DATA!$INSTALL_DATA$ac_delim -AWK!$AWK$ac_delim -program_prefix!$program_prefix$ac_delim -VERSION!$VERSION$ac_delim -RELEASE!$RELEASE$ac_delim -VERSION_NUMBER!$VERSION_NUMBER$ac_delim -BUILD_CC!$BUILD_CC$ac_delim -BUILD_CFLAGS!$BUILD_CFLAGS$ac_delim -THREADSAFE!$THREADSAFE$ac_delim -TARGET_THREAD_LIB!$TARGET_THREAD_LIB$ac_delim -XTHREADCONNECT!$XTHREADCONNECT$ac_delim -THREADSOVERRIDELOCKS!$THREADSOVERRIDELOCKS$ac_delim -ALLOWRELEASE!$ALLOWRELEASE$ac_delim -TEMP_STORE!$TEMP_STORE$ac_delim -BUILD_EXEEXT!$BUILD_EXEEXT$ac_delim -OS_UNIX!$OS_UNIX$ac_delim -OS_WIN!$OS_WIN$ac_delim -OS_OS2!$OS_OS2$ac_delim -TARGET_EXEEXT!$TARGET_EXEEXT$ac_delim -TCL_VERSION!$TCL_VERSION$ac_delim -TCL_BIN_DIR!$TCL_BIN_DIR$ac_delim -TCL_SRC_DIR!$TCL_SRC_DIR$ac_delim -TCL_LIBS!$TCL_LIBS$ac_delim -TCL_INCLUDE_SPEC!$TCL_INCLUDE_SPEC$ac_delim -TCL_LIB_FILE!$TCL_LIB_FILE$ac_delim -TCL_LIB_FLAG!$TCL_LIB_FLAG$ac_delim -_ACEOF +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then - break - elif $ac_last_try; then - { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 -echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} - { (exit 1); exit 1; }; } - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds -ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` -if test -n "$ac_eof"; then - ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` - ac_eof=`expr $ac_eof + 1` -fi +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec -cat >>$CONFIG_STATUS <<_ACEOF -cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -_ACEOF -sed ' -s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g -s/^/s,@/; s/!/@,|#_!!_#|/ -:n -t n -s/'"$ac_delim"'$/,g/; t -s/$/\\/; p -N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n -' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF -CEOF$ac_eof -_ACEOF +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action +# ### END LIBTOOL CONFIG -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - cat >conf$$subs.sed <<_ACEOF -TCL_LIB_SPEC!$TCL_LIB_SPEC$ac_delim -TCL_STUB_LIB_FILE!$TCL_STUB_LIB_FILE$ac_delim -TCL_STUB_LIB_FLAG!$TCL_STUB_LIB_FLAG$ac_delim -TCL_STUB_LIB_SPEC!$TCL_STUB_LIB_SPEC$ac_delim -HAVE_TCL!$HAVE_TCL$ac_delim -TARGET_READLINE_LIBS!$TARGET_READLINE_LIBS$ac_delim -TARGET_READLINE_INC!$TARGET_READLINE_INC$ac_delim -TARGET_HAVE_READLINE!$TARGET_HAVE_READLINE$ac_delim -TARGET_DEBUG!$TARGET_DEBUG$ac_delim -LIBOBJS!$LIBOBJS$ac_delim -LTLIBOBJS!$LTLIBOBJS$ac_delim -_ACEOF +_LT_EOF - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 11; then - break - elif $ac_last_try; then - { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 -echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} - { (exit 1); exit 1; }; } - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac -ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` -if test -n "$ac_eof"; then - ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` - ac_eof=`expr $ac_eof + 1` -fi - -cat >>$CONFIG_STATUS <<_ACEOF -cat >"\$tmp/subs-2.sed" <<\CEOF$ac_eof -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end -_ACEOF -sed ' -s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g -s/^/s,@/; s/!/@,|#_!!_#|/ -:n -t n -s/'"$ac_delim"'$/,g/; t -s/$/\\/; p -N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n -' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF -:end -s/|#_!!_#|//g -CEOF$ac_eof -_ACEOF +ltmain="$ac_aux_dir/ltmain.sh" -# VPATH may cause trouble with some makes, so we remove $(srcdir), -# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=/{ -s/:*\$(srcdir):*/:/ -s/:*\${srcdir}:*/:/ -s/:*@srcdir@:*/:/ -s/^\([^=]*=[ ]*\):*/\1/ -s/:*$// -s/^[^=]*=[ ]*$// -}' -fi -cat >>$CONFIG_STATUS <<\_ACEOF -fi # test -n "$CONFIG_FILES" + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + case $xsi_shell in + yes) + cat << \_LT_EOF >> "$cfgfile" -for ac_tag in :F $CONFIG_FILES -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 -echo "$as_me: error: Invalid tag $ac_tag." >&2;} - { (exit 1); exit 1; }; };; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; +} + +# func_basename file +func_basename () +{ + func_basename_result="${1##*/}" +} + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift + func_basename_result="${1##*/}" +} - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 -echo "$as_me: error: cannot find input file: $ac_f" >&2;} - { (exit 1); exit 1; }; };; - esac - ac_file_inputs="$ac_file_inputs $ac_f" - done +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +func_stripname () +{ + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"} +} - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input="Generated from "`IFS=: - echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure." - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { echo "$as_me:$LINENO: creating $ac_file" >&5 -echo "$as_me: creating $ac_file" >&6;} - fi +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=${1%%=*} + func_opt_split_arg=${1#*=} +} - case $ac_tag in - *:-:* | *:-) cat >"$tmp/stdin";; - esac - ;; +# func_lo2o object +func_lo2o () +{ + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; esac +} - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - { as_dir="$ac_dir" - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 -echo "$as_me: error: cannot create directory $as_dir" >&2;} - { (exit 1); exit 1; }; }; } - ac_builddir=. +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=${1%.*}.lo +} -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=$(( $* )) +} -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=${#1} +} +_LT_EOF + ;; + *) # Bourne compatible functions. + cat << \_LT_EOF >> "$cfgfile" - case $ac_mode in - :F) - # - # CONFIG_FILE - # +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} - case $INSTALL in - [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; - *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` +} + + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "X${3}" \ + | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; esac -_ACEOF +} -cat >>$CONFIG_STATUS <<\_ACEOF -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= +# sed scripts: +my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +my_sed_long_arg='1s/^-[^=]*=//' -case `sed -n '/datarootdir/ { - p - q +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` + func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` } -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p -' $ac_file_inputs` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` +} + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[^.]*$/.lo/'` +} + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "$@"` +} + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +} + +_LT_EOF esac -_ACEOF -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF - sed "$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s&@configure_input@&$configure_input&;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -s&@INSTALL@&$ac_INSTALL&;t t -$ac_datarootdir_hack -" $ac_file_inputs | sed -f "$tmp/subs-1.sed" | sed -f "$tmp/subs-2.sed" >$tmp/out +case $lt_shell_append in + yes) + cat << \_LT_EOF >> "$cfgfile" + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$1+=\$2" +} +_LT_EOF + ;; + *) + cat << \_LT_EOF >> "$cfgfile" -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && - { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&5 -echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&2;} +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$1=\$$1\$2" +} - rm -f "$tmp/stdin" - case $ac_file in - -) cat "$tmp/out"; rm -f "$tmp/out";; - *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;; +_LT_EOF + ;; esac - ;; + sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) - esac + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + ;; + esac done # for ac_tag @@ -21131,6 +15663,11 @@ chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save +test $ac_write_fail = 0 || + { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. @@ -21152,4 +15689,8 @@ # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:$LINENO: WARNING: Unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: Unrecognized options: $ac_unrecognized_opts" >&2;} +fi diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/configure.ac /tmp/3ARg2Grji7/sqlite3-3.6.16/configure.ac --- sqlite3-3.4.2/configure.ac 2007-06-12 13:18:00.000000000 +0100 +++ sqlite3-3.6.16/configure.ac 2009-06-25 12:24:37.000000000 +0100 @@ -87,12 +87,21 @@ # you don't need (for example BLT) by erasing or commenting out # the corresponding code. # -AC_INIT(src/sqlite.h.in) +AC_INIT(sqlite, m4_esyscmd([cat VERSION | tr -d '\n'])) + +dnl Make sure the local VERSION file matches this configure script +sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'` +if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then +AC_MSG_ERROR([configure script is out of date: + configure \$PACKAGE_VERSION = $PACKAGE_VERSION + top level VERSION file = $sqlite_version_sanity_check +please regen with autoconf]) +fi dnl Put the RCS revision string after AC_INIT so that it will also dnl show in in configure. # The following RCS revision string applies to configure.in -# $Revision: 1.29 $ +# $Revision: 1.56 $ ######### # Programs needed @@ -102,6 +111,56 @@ AC_PROG_AWK ######### +# Enable large file support (if special flags are necessary) +# +AC_SYS_LARGEFILE + +######### +# Check for needed/wanted data types +AC_CHECK_TYPES([int8_t, int16_t, int32_t, int64_t, intptr_t, uint8_t, + uint16_t, uint32_t, uint64_t, uintptr_t]) + +######### +# Check for needed/wanted headers +AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h]) + +######### +# Figure out whether or not we have these functions +# +AC_CHECK_FUNCS([usleep fdatasync localtime_r gmtime_r localtime_s]) + +######### +# By default, we use the amalgamation (this may be changed below...) +# +USE_AMALGAMATION=1 + +######### +# See whether we can run specific tclsh versions known to work well; +# if not, then we fall back to plain tclsh. +# TODO: try other versions before falling back? +# +AC_CHECK_PROGS(TCLSH_CMD, [tclsh8.4 tclsh], none) +if test "$TCLSH_CMD" = "none"; then + # If we can't find a local tclsh, then building the amalgamation will fail. + # We act as though --disable-amalgamation has been used. + echo "Warning: can't find tclsh - defaulting to non-amalgamation build." + USE_AMALGAMATION=0 + TCLSH_CMD="tclsh" +fi +AC_SUBST(TCLSH_CMD) + +AC_ARG_VAR([TCLLIBDIR], [Where to install tcl plugin]) +if test "x${TCLLIBDIR+set}" != "xset" ; then + TCLLIBDIR='$(libdir)' + for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` ; do + TCLLIBDIR=$i + break + done + TCLLIBDIR="${TCLLIBDIR}/sqlite3" +fi + + +######### # Set up an appropriate program prefix # if test "$program_prefix" = "NONE"; then @@ -110,15 +169,15 @@ AC_SUBST(program_prefix) VERSION=[`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'`] -echo "Version set to $VERSION" +AC_MSG_NOTICE(Version set to $VERSION) AC_SUBST(VERSION) RELEASE=`cat $srcdir/VERSION` -echo "Release set to $RELEASE" +AC_MSG_NOTICE(Release set to $RELEASE) AC_SUBST(RELEASE) -VERSION_NUMBER=[`cat $srcdir/VERSION \ +VERSION_NUMBER=[`cat $srcdir/VERSION \ | sed 's/[^0-9]/ /g' \ | awk '{printf "%d%03d%03d",$1,$2,$3}'`] -echo "Version number set to $VERSION_NUMBER" +AC_MSG_NOTICE(Version number set to $VERSION_NUMBER) AC_SUBST(VERSION_NUMBER) ######### @@ -172,32 +231,25 @@ fi fi AC_SUBST(BUILD_CC) -AC_SUBST(BUILD_CFLAGS) ########## # Do we want to support multithreaded use of sqlite # AC_ARG_ENABLE(threadsafe, -AC_HELP_STRING([--enable-threadsafe],[Support threadsafe operation]),,enable_threadsafe=no) +AC_HELP_STRING([--enable-threadsafe],[Support threadsafe operation]),,enable_threadsafe=yes) AC_MSG_CHECKING([whether to support threadsafe operation]) if test "$enable_threadsafe" = "no"; then - THREADSAFE=0 + SQLITE_THREADSAFE=0 AC_MSG_RESULT([no]) else - THREADSAFE=1 + SQLITE_THREADSAFE=1 AC_MSG_RESULT([yes]) fi -AC_SUBST(THREADSAFE) +AC_SUBST(SQLITE_THREADSAFE) -if test "$THREADSAFE" = "1"; then - LIBS="" - AC_CHECK_LIB(pthread, pthread_create) - TARGET_THREAD_LIB="$LIBS" - LIBS="" -else - TARGET_THREAD_LIB="" +if test "$SQLITE_THREADSAFE" = "1"; then + AC_SEARCH_LIBS(pthread_create, pthread) fi -AC_SUBST(TARGET_THREAD_LIB) ########## # Do we want to allow a connection created in one thread to be used @@ -244,7 +296,7 @@ ALLOWRELEASE="" AC_MSG_RESULT([no]) else - ALLOWRELEASE="-release `cat VERSION`" + ALLOWRELEASE="-release `cat $srcdir/VERSION`" AC_MSG_RESULT([yes]) fi AC_SUBST(ALLOWRELEASE) @@ -264,17 +316,17 @@ TEMP_STORE=1 AC_MSG_RESULT([no]) ;; - always ) - TEMP_STORE=3 - AC_MSG_RESULT([always]) - ;; yes ) + TEMP_STORE=2 + AC_MSG_RESULT([yes]) + ;; + always ) TEMP_STORE=3 AC_MSG_RESULT([always]) ;; * ) TEMP_STORE=1 - AC_MSG_RESULT([yes]) + AC_MSG_RESULT([no]) ;; esac @@ -307,33 +359,27 @@ fi if test "$TARGET_EXEEXT" = ".exe"; then if test $OS2_SHELL ; then - OS_UNIX=0 - OS_WIN=0 - OS_OS2=1 - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_OS2=1" - if test "$ac_compiler_gnu" == "yes" ; then - TARGET_CFLAGS="$TARGET_CFLAGS -Zomf -Zexe -Zmap" - BUILD_CFLAGS="$BUILD_CFLAGS -Zomf -Zexe" - fi + SQLITE_OS_UNIX=0 + SQLITE_OS_WIN=0 + SQLITE_OS_OS2=1 + CFLAGS="$CFLAGS -DSQLITE_OS_OS2=1" else - OS_UNIX=0 - OS_WIN=1 - OS_OS2=0 - tclsubdir=win - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_WIN=1" + SQLITE_OS_UNIX=0 + SQLITE_OS_WIN=1 + SQLITE_OS_OS2=0 + CFLAGS="$CFLAGS -DSQLITE_OS_WIN=1" fi else - OS_UNIX=1 - OS_WIN=0 - OS_OS2=0 - tclsubdir=unix - TARGET_CFLAGS="$TARGET_CFLAGS -DOS_UNIX=1" + SQLITE_OS_UNIX=1 + SQLITE_OS_WIN=0 + SQLITE_OS_OS2=0 + CFLAGS="$CFLAGS -DSQLITE_OS_UNIX=1" fi AC_SUBST(BUILD_EXEEXT) -AC_SUBST(OS_UNIX) -AC_SUBST(OS_WIN) -AC_SUBST(OS_OS2) +AC_SUBST(SQLITE_OS_UNIX) +AC_SUBST(SQLITE_OS_WIN) +AC_SUBST(SQLITE_OS_OS2) AC_SUBST(TARGET_EXEEXT) ########## @@ -358,6 +404,18 @@ AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) fi fi + + # Start autosearch by asking tclsh + if test x"$cross_compiling" = xno; then + for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` + do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="$i" + break + fi + done + fi + # then check for a private Tcl installation if test x"${ac_cv_c_tclconfig}" = x ; then for i in \ @@ -438,7 +496,7 @@ # installed and uninstalled version of Tcl. # - if test -f $TCL_BIN_DIR/Makefile ; then + if test -f $TCL_BIN_DIR/Makefile ; then TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} @@ -563,19 +621,98 @@ AC_SUBST(TARGET_DEBUG) ######### -# Figure out whether or not we have a "usleep()" function. -# -AC_CHECK_FUNC(usleep, [TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_USLEEP=1"]) +# See whether we should use the amalgamation to build +AC_ARG_ENABLE(amalgamation, AC_HELP_STRING([--disable-amalgamation], + [Disable the amalgamation and instead build all files separately]), + [use_amalgamation=$enableval],[use_amalgamation=yes]) +if test "${use_amalgamation}" != "yes" ; then + USE_AMALGAMATION=0 +fi +AC_SUBST(USE_AMALGAMATION) -#-------------------------------------------------------------------- -# Redefine fdatasync as fsync on systems that lack fdatasync -#-------------------------------------------------------------------- +######### +# See whether we should allow loadable extensions +AC_ARG_ENABLE(load-extension, AC_HELP_STRING([--enable-load-extension], + [Enable loading of external extensions]), + [use_loadextension=$enableval],[use_loadextension=no]) +if test "${use_loadextension}" = "yes" ; then + OPT_FEATURE_FLAGS="" +else + OPT_FEATURE_FLAGS="-DSQLITE_OMIT_LOAD_EXTENSION=1" +fi + +######### +# attempt to duplicate any OMITS and ENABLES into the $(OPT_FEATURE_FLAGS) parameter +for option in $CFLAGS $CPPFLAGS +do + case $option in + -DSQLITE_OMIT*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; + -DSQLITE_ENABLE*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; + esac +done +AC_SUBST(OPT_FEATURE_FLAGS) + + +# attempt to remove any OMITS and ENABLES from the $(CFLAGS) parameter +ac_temp_CFLAGS="" +for option in $CFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_CFLAGS="$ac_temp_CFLAGS $option";; + esac +done +CFLAGS=$ac_temp_CFLAGS + + +# attempt to remove any OMITS and ENABLES from the $(CPPFLAGS) parameter +ac_temp_CPPFLAGS="" +for option in $CPPFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_CPPFLAGS="$ac_temp_CPPFLAGS $option";; + esac +done +CPPFLAGS=$ac_temp_CPPFLAGS + + +# attempt to remove any OMITS and ENABLES from the $(BUILD_CFLAGS) parameter +ac_temp_BUILD_CFLAGS="" +for option in $BUILD_CFLAGS +do + case $option in + -DSQLITE_OMIT*) ;; + -DSQLITE_ENABLE*) ;; + *) ac_temp_BUILD_CFLAGS="$ac_temp_BUILD_CFLAGS $option";; + esac +done +BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS -AC_CHECK_FUNC(fdatasync, [TARGET_CFLAGS="$TARGET_CFLAGS -DHAVE_FDATASYNC=1"]) + +######### +# See whether we should use GCOV +AC_ARG_ENABLE(gcov, AC_HELP_STRING([--enable-gcov], + [Enable coverage testing using gcov]), + [use_gcov=$enableval],[use_gcov=no]) +if test "${use_gcov}" = "yes" ; then + USE_GCOV=1 +else + USE_GCOV=0 +fi +AC_SUBST(USE_GCOV) + + +######### +# Output the config header +AC_CONFIG_HEADERS(config.h) ######### # Generate the output files. # +AC_SUBST(BUILD_CFLAGS) AC_OUTPUT([ Makefile sqlite3.pc diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/contrib/sqlitecon.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/contrib/sqlitecon.tcl --- sqlite3-3.4.2/contrib/sqlitecon.tcl 2005-09-24 12:01:12.000000000 +0100 +++ sqlite3-3.6.16/contrib/sqlitecon.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,679 +0,0 @@ -# A Tk console widget for SQLite. Invoke sqlitecon::create with a window name, -# a prompt string, a title to set a new top-level window, and the SQLite -# database handle. For example: -# -# sqlitecon::create .sqlcon {sql:- } {SQL Console} db -# -# A toplevel window is created that allows you to type in SQL commands to -# be processed on the spot. -# -# A limited set of dot-commands are supported: -# -# .table -# .schema ?TABLE? -# .mode list|column|multicolumn|line -# .exit -# -# In addition, a new SQL function named "edit()" is created. This function -# takes a single text argument and returns a text result. Whenever the -# the function is called, it pops up a new toplevel window containing a -# text editor screen initialized to the argument. When the "OK" button -# is pressed, whatever revised text is in the text editor is returned as -# the result of the edit() function. This allows text fields of SQL tables -# to be edited quickly and easily as follows: -# -# UPDATE table1 SET dscr = edit(dscr) WHERE rowid=15; -# - - -# Create a namespace to work in -# -namespace eval ::sqlitecon { - # do nothing -} - -# Create a console widget named $w. The prompt string is $prompt. -# The title at the top of the window is $title. The database connection -# object is $db -# -proc sqlitecon::create {w prompt title db} { - upvar #0 $w.t v - if {[winfo exists $w]} {destroy $w} - if {[info exists v]} {unset v} - toplevel $w - wm title $w $title - wm iconname $w $title - frame $w.mb -bd 2 -relief raised - pack $w.mb -side top -fill x - menubutton $w.mb.file -text File -menu $w.mb.file.m - menubutton $w.mb.edit -text Edit -menu $w.mb.edit.m - pack $w.mb.file $w.mb.edit -side left -padx 8 -pady 1 - set m [menu $w.mb.file.m -tearoff 0] - $m add command -label {Close} -command "destroy $w" - sqlitecon::create_child $w $prompt $w.mb.edit.m - set v(db) $db - $db function edit ::sqlitecon::_edit -} - -# This routine creates a console as a child window within a larger -# window. It also creates an edit menu named "$editmenu" if $editmenu!="". -# The calling function is responsible for posting the edit menu. -# -proc sqlitecon::create_child {w prompt editmenu} { - upvar #0 $w.t v - if {$editmenu!=""} { - set m [menu $editmenu -tearoff 0] - $m add command -label Cut -command "sqlitecon::Cut $w.t" - $m add command -label Copy -command "sqlitecon::Copy $w.t" - $m add command -label Paste -command "sqlitecon::Paste $w.t" - $m add command -label {Clear Screen} -command "sqlitecon::Clear $w.t" - $m add separator - $m add command -label {Save As...} -command "sqlitecon::SaveFile $w.t" - catch {$editmenu config -postcommand "sqlitecon::EnableEditMenu $w"} - } - scrollbar $w.sb -orient vertical -command "$w.t yview" - pack $w.sb -side right -fill y - text $w.t -font fixed -yscrollcommand "$w.sb set" - pack $w.t -side right -fill both -expand 1 - bindtags $w.t Sqlitecon - set v(editmenu) $editmenu - set v(history) 0 - set v(historycnt) 0 - set v(current) -1 - set v(prompt) $prompt - set v(prior) {} - set v(plength) [string length $v(prompt)] - set v(x) 0 - set v(y) 0 - set v(mode) column - set v(header) on - $w.t mark set insert end - $w.t tag config ok -foreground blue - $w.t tag config err -foreground red - $w.t insert end $v(prompt) - $w.t mark set out 1.0 - after idle "focus $w.t" -} - -bind Sqlitecon <1> {sqlitecon::Button1 %W %x %y} -bind Sqlitecon {sqlitecon::B1Motion %W %x %y} -bind Sqlitecon {sqlitecon::B1Leave %W %x %y} -bind Sqlitecon {sqlitecon::cancelMotor %W} -bind Sqlitecon {sqlitecon::cancelMotor %W} -bind Sqlitecon {sqlitecon::Insert %W %A} -bind Sqlitecon {sqlitecon::Left %W} -bind Sqlitecon {sqlitecon::Left %W} -bind Sqlitecon {sqlitecon::Right %W} -bind Sqlitecon {sqlitecon::Right %W} -bind Sqlitecon {sqlitecon::Backspace %W} -bind Sqlitecon {sqlitecon::Backspace %W} -bind Sqlitecon {sqlitecon::Delete %W} -bind Sqlitecon {sqlitecon::Delete %W} -bind Sqlitecon {sqlitecon::Home %W} -bind Sqlitecon {sqlitecon::Home %W} -bind Sqlitecon {sqlitecon::End %W} -bind Sqlitecon {sqlitecon::End %W} -bind Sqlitecon {sqlitecon::Enter %W} -bind Sqlitecon {sqlitecon::Enter %W} -bind Sqlitecon {sqlitecon::Prior %W} -bind Sqlitecon {sqlitecon::Prior %W} -bind Sqlitecon {sqlitecon::Next %W} -bind Sqlitecon {sqlitecon::Next %W} -bind Sqlitecon {sqlitecon::EraseEOL %W} -bind Sqlitecon <> {sqlitecon::Cut %W} -bind Sqlitecon <> {sqlitecon::Copy %W} -bind Sqlitecon <> {sqlitecon::Paste %W} -bind Sqlitecon <> {sqlitecon::Clear %W} - -# Insert a single character at the insertion cursor -# -proc sqlitecon::Insert {w a} { - $w insert insert $a - $w yview insert -} - -# Move the cursor one character to the left -# -proc sqlitecon::Left {w} { - upvar #0 $w v - scan [$w index insert] %d.%d row col - if {$col>$v(plength)} { - $w mark set insert "insert -1c" - } -} - -# Erase the character to the left of the cursor -# -proc sqlitecon::Backspace {w} { - upvar #0 $w v - scan [$w index insert] %d.%d row col - if {$col>$v(plength)} { - $w delete {insert -1c} - } -} - -# Erase to the end of the line -# -proc sqlitecon::EraseEOL {w} { - upvar #0 $w v - scan [$w index insert] %d.%d row col - if {$col>=$v(plength)} { - $w delete insert {insert lineend} - } -} - -# Move the cursor one character to the right -# -proc sqlitecon::Right {w} { - $w mark set insert "insert +1c" -} - -# Erase the character to the right of the cursor -# -proc sqlitecon::Delete w { - $w delete insert -} - -# Move the cursor to the beginning of the current line -# -proc sqlitecon::Home w { - upvar #0 $w v - scan [$w index insert] %d.%d row col - $w mark set insert $row.$v(plength) -} - -# Move the cursor to the end of the current line -# -proc sqlitecon::End w { - $w mark set insert {insert lineend} -} - -# Add a line to the history -# -proc sqlitecon::addHistory {w line} { - upvar #0 $w v - if {$v(historycnt)>0} { - set last [lindex $v(history) [expr $v(historycnt)-1]] - if {[string compare $last $line]} { - lappend v(history) $line - incr v(historycnt) - } - } else { - set v(history) [list $line] - set v(historycnt) 1 - } - set v(current) $v(historycnt) -} - -# Called when "Enter" is pressed. Do something with the line -# of text that was entered. -# -proc sqlitecon::Enter w { - upvar #0 $w v - scan [$w index insert] %d.%d row col - set start $row.$v(plength) - set line [$w get $start "$start lineend"] - $w insert end \n - $w mark set out end - if {$v(prior)==""} { - set cmd $line - } else { - set cmd $v(prior)\n$line - } - if {[string index $cmd 0]=="." || [$v(db) complete $cmd]} { - regsub -all {\n} [string trim $cmd] { } cmd2 - addHistory $w $cmd2 - set rc [catch {DoCommand $w $cmd} res] - if {![winfo exists $w]} return - if {$rc} { - $w insert end $res\n err - } elseif {[string length $res]>0} { - $w insert end $res\n ok - } - set v(prior) {} - $w insert end $v(prompt) - } else { - set v(prior) $cmd - regsub -all {[^ ]} $v(prompt) . x - $w insert end $x - } - $w mark set insert end - $w mark set out {insert linestart} - $w yview insert -} - -# Execute a single SQL command. Pay special attention to control -# directives that begin with "." -# -# The return value is the text output from the command, properly -# formatted. -# -proc sqlitecon::DoCommand {w cmd} { - upvar #0 $w v - set mode $v(mode) - set header $v(header) - if {[regexp {^(\.[a-z]+)} $cmd all word]} { - if {$word==".mode"} { - regexp {^.[a-z]+ +([a-z]+)} $cmd all v(mode) - return {} - } elseif {$word==".exit"} { - destroy [winfo toplevel $w] - return {} - } elseif {$word==".header"} { - regexp {^.[a-z]+ +([a-z]+)} $cmd all v(header) - return {} - } elseif {$word==".tables"} { - set mode multicolumn - set cmd {SELECT name FROM sqlite_master WHERE type='table' - UNION ALL - SELECT name FROM sqlite_temp_master WHERE type='table'} - $v(db) eval {PRAGMA database_list} { - if {$name!="temp" && $name!="main"} { - append cmd "UNION ALL SELECT name FROM $name.sqlite_master\ - WHERE type='table'" - } - } - append cmd { ORDER BY 1} - } elseif {$word==".fullschema"} { - set pattern % - regexp {^.[a-z]+ +([^ ]+)} $cmd all pattern - set mode list - set header 0 - set cmd "SELECT sql FROM sqlite_master WHERE tbl_name LIKE '$pattern' - AND sql NOT NULL UNION ALL SELECT sql FROM sqlite_temp_master - WHERE tbl_name LIKE '$pattern' AND sql NOT NULL" - $v(db) eval {PRAGMA database_list} { - if {$name!="temp" && $name!="main"} { - append cmd " UNION ALL SELECT sql FROM $name.sqlite_master\ - WHERE tbl_name LIKE '$pattern' AND sql NOT NULL" - } - } - } elseif {$word==".schema"} { - set pattern % - regexp {^.[a-z]+ +([^ ]+)} $cmd all pattern - set mode list - set header 0 - set cmd "SELECT sql FROM sqlite_master WHERE name LIKE '$pattern' - AND sql NOT NULL UNION ALL SELECT sql FROM sqlite_temp_master - WHERE name LIKE '$pattern' AND sql NOT NULL" - $v(db) eval {PRAGMA database_list} { - if {$name!="temp" && $name!="main"} { - append cmd " UNION ALL SELECT sql FROM $name.sqlite_master\ - WHERE name LIKE '$pattern' AND sql NOT NULL" - } - } - } else { - return \ - ".exit\n.mode line|list|column\n.schema ?TABLENAME?\n.tables" - } - } - set res {} - if {$mode=="list"} { - $v(db) eval $cmd x { - set sep {} - foreach col $x(*) { - append res $sep$x($col) - set sep | - } - append res \n - } - if {[info exists x(*)] && $header} { - set sep {} - set hdr {} - foreach col $x(*) { - append hdr $sep$col - set sep | - } - set res $hdr\n$res - } - } elseif {[string range $mode 0 2]=="col"} { - set y {} - $v(db) eval $cmd x { - foreach col $x(*) { - if {![info exists cw($col)] || $cw($col)<[string length $x($col)]} { - set cw($col) [string length $x($col)] - } - lappend y $x($col) - } - } - if {[info exists x(*)] && $header} { - set hdr {} - set ln {} - set dash --------------------------------------------------------------- - append dash ------------------------------------------------------------ - foreach col $x(*) { - if {![info exists cw($col)] || $cw($col)<[string length $col]} { - set cw($col) [string length $col] - } - lappend hdr $col - lappend ln [string range $dash 1 $cw($col)] - } - set y [concat $hdr $ln $y] - } - if {[info exists x(*)]} { - set format {} - set arglist {} - set arglist2 {} - set i 0 - foreach col $x(*) { - lappend arglist x$i - append arglist2 " \$x$i" - incr i - append format " %-$cw($col)s" - } - set format [string trimleft $format]\n - if {[llength $arglist]>0} { - foreach $arglist $y "append res \[format [list $format] $arglist2\]" - } - } - } elseif {$mode=="multicolumn"} { - set y [$v(db) eval $cmd] - set max 0 - foreach e $y { - if {$max<[string length $e]} {set max [string length $e]} - } - set ncol [expr {int(80/($max+2))}] - if {$ncol<1} {set ncol 1} - set nelem [llength $y] - set nrow [expr {($nelem+$ncol-1)/$ncol}] - set format "%-${max}s" - for {set i 0} {$i<$nrow} {incr i} { - set j $i - while 1 { - append res [format $format [lindex $y $j]] - incr j $nrow - if {$j>=$nelem} break - append res { } - } - append res \n - } - } else { - $v(db) eval $cmd x { - foreach col $x(*) {append res "$col = $x($col)\n"} - append res \n - } - } - return [string trimright $res] -} - -# Change the line to the previous line -# -proc sqlitecon::Prior w { - upvar #0 $w v - if {$v(current)<=0} return - incr v(current) -1 - set line [lindex $v(history) $v(current)] - sqlitecon::SetLine $w $line -} - -# Change the line to the next line -# -proc sqlitecon::Next w { - upvar #0 $w v - if {$v(current)>=$v(historycnt)} return - incr v(current) 1 - set line [lindex $v(history) $v(current)] - sqlitecon::SetLine $w $line -} - -# Change the contents of the entry line -# -proc sqlitecon::SetLine {w line} { - upvar #0 $w v - scan [$w index insert] %d.%d row col - set start $row.$v(plength) - $w delete $start end - $w insert end $line - $w mark set insert end - $w yview insert -} - -# Called when the mouse button is pressed at position $x,$y on -# the console widget. -# -proc sqlitecon::Button1 {w x y} { - global tkPriv - upvar #0 $w v - set v(mouseMoved) 0 - set v(pressX) $x - set p [sqlitecon::nearestBoundry $w $x $y] - scan [$w index insert] %d.%d ix iy - scan $p %d.%d px py - if {$px==$ix} { - $w mark set insert $p - } - $w mark set anchor $p - focus $w -} - -# Find the boundry between characters that is nearest -# to $x,$y -# -proc sqlitecon::nearestBoundry {w x y} { - set p [$w index @$x,$y] - set bb [$w bbox $p] - if {![string compare $bb ""]} {return $p} - if {($x-[lindex $bb 0])<([lindex $bb 2]/2)} {return $p} - $w index "$p + 1 char" -} - -# This routine extends the selection to the point specified by $x,$y -# -proc sqlitecon::SelectTo {w x y} { - upvar #0 $w v - set cur [sqlitecon::nearestBoundry $w $x $y] - if {[catch {$w index anchor}]} { - $w mark set anchor $cur - } - set anchor [$w index anchor] - if {[$w compare $cur != $anchor] || (abs($v(pressX) - $x) >= 3)} { - if {$v(mouseMoved)==0} { - $w tag remove sel 0.0 end - } - set v(mouseMoved) 1 - } - if {[$w compare $cur < anchor]} { - set first $cur - set last anchor - } else { - set first anchor - set last $cur - } - if {$v(mouseMoved)} { - $w tag remove sel 0.0 $first - $w tag add sel $first $last - $w tag remove sel $last end - update idletasks - } -} - -# Called whenever the mouse moves while button-1 is held down. -# -proc sqlitecon::B1Motion {w x y} { - upvar #0 $w v - set v(y) $y - set v(x) $x - sqlitecon::SelectTo $w $x $y -} - -# Called whenever the mouse leaves the boundries of the widget -# while button 1 is held down. -# -proc sqlitecon::B1Leave {w x y} { - upvar #0 $w v - set v(y) $y - set v(x) $x - sqlitecon::motor $w -} - -# This routine is called to automatically scroll the window when -# the mouse drags offscreen. -# -proc sqlitecon::motor w { - upvar #0 $w v - if {![winfo exists $w]} return - if {$v(y)>=[winfo height $w]} { - $w yview scroll 1 units - } elseif {$v(y)<0} { - $w yview scroll -1 units - } else { - return - } - sqlitecon::SelectTo $w $v(x) $v(y) - set v(timer) [after 50 sqlitecon::motor $w] -} - -# This routine cancels the scrolling motor if it is active -# -proc sqlitecon::cancelMotor w { - upvar #0 $w v - catch {after cancel $v(timer)} - catch {unset v(timer)} -} - -# Do a Copy operation on the stuff currently selected. -# -proc sqlitecon::Copy w { - if {![catch {set text [$w get sel.first sel.last]}]} { - clipboard clear -displayof $w - clipboard append -displayof $w $text - } -} - -# Return 1 if the selection exists and is contained -# entirely on the input line. Return 2 if the selection -# exists but is not entirely on the input line. Return 0 -# if the selection does not exist. -# -proc sqlitecon::canCut w { - set r [catch { - scan [$w index sel.first] %d.%d s1x s1y - scan [$w index sel.last] %d.%d s2x s2y - scan [$w index insert] %d.%d ix iy - }] - if {$r==1} {return 0} - if {$s1x==$ix && $s2x==$ix} {return 1} - return 2 -} - -# Do a Cut operation if possible. Cuts are only allowed -# if the current selection is entirely contained on the -# current input line. -# -proc sqlitecon::Cut w { - if {[sqlitecon::canCut $w]==1} { - sqlitecon::Copy $w - $w delete sel.first sel.last - } -} - -# Do a paste opeation. -# -proc sqlitecon::Paste w { - if {[sqlitecon::canCut $w]==1} { - $w delete sel.first sel.last - } - if {[catch {selection get -displayof $w -selection CLIPBOARD} topaste] - && [catch {selection get -displayof $w -selection PRIMARY} topaste]} { - return - } - if {[info exists ::$w]} { - set prior 0 - foreach line [split $topaste \n] { - if {$prior} { - sqlitecon::Enter $w - update - } - set prior 1 - $w insert insert $line - } - } else { - $w insert insert $topaste - } -} - -# Enable or disable entries in the Edit menu -# -proc sqlitecon::EnableEditMenu w { - upvar #0 $w.t v - set m $v(editmenu) - if {$m=="" || ![winfo exists $m]} return - switch [sqlitecon::canCut $w.t] { - 0 { - $m entryconf Copy -state disabled - $m entryconf Cut -state disabled - } - 1 { - $m entryconf Copy -state normal - $m entryconf Cut -state normal - } - 2 { - $m entryconf Copy -state normal - $m entryconf Cut -state disabled - } - } -} - -# Prompt the user for the name of a writable file. Then write the -# entire contents of the console screen to that file. -# -proc sqlitecon::SaveFile w { - set types { - {{Text Files} {.txt}} - {{All Files} *} - } - set f [tk_getSaveFile -filetypes $types -title "Write Screen To..."] - if {$f!=""} { - if {[catch {open $f w} fd]} { - tk_messageBox -type ok -icon error -message $fd - } else { - puts $fd [string trimright [$w get 1.0 end] \n] - close $fd - } - } -} - -# Erase everything from the console above the insertion line. -# -proc sqlitecon::Clear w { - $w delete 1.0 {insert linestart} -} - -# An in-line editor for SQL -# -proc sqlitecon::_edit {origtxt {title {}}} { - for {set i 0} {[winfo exists .ed$i]} {incr i} continue - set w .ed$i - toplevel $w - wm protocol $w WM_DELETE_WINDOW "$w.b.can invoke" - wm title $w {Inline SQL Editor} - frame $w.b - pack $w.b -side bottom -fill x - button $w.b.can -text Cancel -width 6 -command [list set ::$w 0] - button $w.b.ok -text OK -width 6 -command [list set ::$w 1] - button $w.b.cut -text Cut -width 6 -command [list ::sqlitecon::Cut $w.t] - button $w.b.copy -text Copy -width 6 -command [list ::sqlitecon::Copy $w.t] - button $w.b.paste -text Paste -width 6 -command [list ::sqlitecon::Paste $w.t] - set ::$w {} - pack $w.b.cut $w.b.copy $w.b.paste $w.b.can $w.b.ok\ - -side left -padx 5 -pady 5 -expand 1 - if {$title!=""} { - label $w.title -text $title - pack $w.title -side top -padx 5 -pady 5 - } - text $w.t -bg white -fg black -yscrollcommand [list $w.sb set] - pack $w.t -side left -fill both -expand 1 - scrollbar $w.sb -orient vertical -command [list $w.t yview] - pack $w.sb -side left -fill y - $w.t insert end $origtxt - - vwait ::$w - - if {[set ::$w]} { - set txt [string trimright [$w.t get 1.0 end]] - } else { - set txt $origtxt - } - destroy $w - return $txt -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/.cvsignore /tmp/3ARg2Grji7/sqlite3-3.6.16/.cvsignore --- sqlite3-3.4.2/.cvsignore 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/.cvsignore 2009-01-29 02:54:57.000000000 +0000 @@ -0,0 +1,27 @@ +autom4te.cache +Makefile +config.h +config.log +config.status +libtool +sqlite3.pc + +.deps +.libs + +*.lo +*.la + +lemon +mkkeywordhash +sqlite3 +tsrc + +.target_source +keywordhash.h +lempar.c +opcodes.[ch] +parse.[chy] +parse.h.temp +parse.out +sqlite3.[ch] diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/changelog /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/changelog --- sqlite3-3.4.2/debian/changelog 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/changelog 2009-08-19 23:00:31.000000000 +0100 @@ -1,3 +1,206 @@ +sqlite3 (3.6.16-1~hardy~pgquiles1) hardy; urgency=low + + * Backport to Hardy + + -- Pau Garcia i Quiles Wed, 19 Aug 2009 23:43:02 +0200 + +sqlite3 (3.6.16-1) unstable; urgency=low + + * New upstream release (closes: #536661). + + -- Laszlo Boszormenyi (GCS) Fri, 10 Jul 2009 22:44:19 +0000 + +sqlite3 (3.6.14.2-1) unstable; urgency=low + + * New upstream release. Disable ICU support, it causes more trouble than + good. + * Add 20-hurd-locking-style.patch for proper locking on Hurd + (closes: #529734). + + -- Laszlo Boszormenyi (GCS) Sun, 31 May 2009 16:28:06 +0000 + +sqlite3 (3.6.13-1) unstable; urgency=low + + * New upstream release (closes: #524617), fixing segfault when table + contains default value (closes: 524166). + + -- Laszlo Boszormenyi (GCS) Sun, 19 Apr 2009 07:18:50 +0000 + +sqlite3 (3.6.12-1) unstable; urgency=low + + * New upstream release, remove 10-install-libsqlite3-first.patch as fixed + upstream in a different way. + * Remove generated files in clean target. + + -- Laszlo Boszormenyi (GCS) Tue, 31 Mar 2009 18:33:15 +0000 + +sqlite3 (3.6.11-4) unstable; urgency=low + + * Fix that .la file (closes: #520478) (really). + + -- Laszlo Boszormenyi (GCS) Sun, 29 Mar 2009 23:53:53 +0000 + +sqlite3 (3.6.11-3) unstable; urgency=low + + * Really remove transitive dependencies from .la files (closes: #520478). + * Set section and priority fields matching override. + + -- Laszlo Boszormenyi (GCS) Fri, 27 Mar 2009 20:11:06 +0000 + +sqlite3 (3.6.11-2) unstable; urgency=low + + [ Zack Weinberg ] + * Fix ordering in "make install" (closes: #520153). + * Link libsqlite3.so with libdl (closes: #520466). + * Prune transitive dependencies from .la files (closes: #520478). + * Tweak ICU linkage to eliminate dpkg-shlibdeps complaints (closes: #521082). + + -- Laszlo Boszormenyi (GCS) Thu, 26 Mar 2009 20:12:02 +0000 + +sqlite3 (3.6.11-1) unstable; urgency=low + + * New upstream release, upload to unstable. + * Enable ICU support (closes: #494987). + * Update debhelper compatibility level to 5 as 4 is deprecated now. + + -- Laszlo Boszormenyi (GCS) Sat, 14 Mar 2009 21:18:18 +0000 + +sqlite3 (3.6.10-1) experimental; urgency=low + + * New upstream release, enable the R*Tree module (closes: #501099). + * Remove 01-sqlite3.pc-version-to-release.patch , applied upstream. + * Rework 02-lemon-snprintf.patch to match this release. + * Remove 03-restore-documentation.patch as documentation can not be built + anymore, but merged into .orig.tar.gz . + * Rework 04-loadextension-links-dl.patch to still link with libdl if load + extension is enabled. + * Remove 05-improve-nan-testing-on-x86.patch and + 06-fix-distinct-on-indexes.patch, this release contains these fixes. + * Remove own pkgIndex.tcl , use the original after correcting its path. + * Add ${misc:Depends} to packages. + + -- Laszlo Boszormenyi (GCS) Sat, 17 Jan 2009 08:06:05 +0000 + +sqlite3 (3.5.9-6) unstable; urgency=low + + * Make sqlite3 depends on the exact Debian version of libsqlite3-0 + (closes: #502370). + + -- Laszlo Boszormenyi (GCS) Thu, 16 Oct 2008 15:44:09 +0000 + +sqlite3 (3.5.9-5) unstable; urgency=low + + * Backport fix for distinct on indexes (closes: #500792). + + -- Laszlo Boszormenyi (GCS) Wed, 01 Oct 2008 20:16:18 +0000 + +sqlite3 (3.5.9-4) unstable; urgency=low + + * Backport improved NaN testing for highly optimized GCC on x86 + (closes: #488864). + * Remove rpath from sqlite3 binary. + + -- Laszlo Boszormenyi (GCS) Sun, 24 Aug 2008 11:03:56 +0000 + +sqlite3 (3.5.9-3) unstable; urgency=low + + * Enable full text search (closes: #487914). + * Update Standards-Version to 3.8.0 , no changes needed. + + -- Laszlo Boszormenyi (GCS) Wed, 25 Jun 2008 11:41:54 +0200 + +sqlite3 (3.5.9-2) unstable; urgency=low + + * Set correct version for TCL (closes: #483990). + + -- Laszlo Boszormenyi (GCS) Mon, 02 Jun 2008 16:33:24 +0000 + +sqlite3 (3.5.9-1) unstable; urgency=medium + + * New upstream release, contains the backported fix for 3.5.8-4 thus + drop 05-busy-handler-update-retry-fix.patch . + * Urgency set to medium due to fixing a buffer-overrun problem in + sqlite3_mprintf() and fixing a big performance regression on LEFT JOIN + (closes: #479184). + + -- Laszlo Boszormenyi (GCS) Sat, 17 May 2008 16:42:51 +0000 + +sqlite3 (3.5.8-4) unstable; urgency=low + + * Backport busy handler fix from upstream CVS (closes: #480007). + + -- Laszlo Boszormenyi (GCS) Thu, 08 May 2008 20:36:40 +0200 + +sqlite3 (3.5.8-3) unstable; urgency=low + + * Add 04-loadextension-links-dl.patch to link against dl when + loadextension enabled (closes: #478980). + + -- Laszlo Boszormenyi (GCS) Sat, 03 May 2008 09:19:31 +0200 + +sqlite3 (3.5.8-2) unstable; urgency=low + + * Re-enable extension mechanism (closes: #478337, #475084). + * Create and install a more complete documentation (closes: #478492). + + -- Laszlo Boszormenyi (GCS) Tue, 29 Apr 2008 01:12:34 +0200 + +sqlite3 (3.5.8-1) unstable; urgency=low + + * New upstream release, re-merge source and documentation. + * Remove 04-fix-tcl-interface.patch , this release contains it. + + -- Laszlo Boszormenyi (GCS) Sun, 27 Apr 2008 21:52:30 +0200 + +sqlite3 (3.5.7-2) unstable; urgency=low + + * Backport TCL interface fix from upstream CVS (closes: #473988). + + -- Laszlo Boszormenyi (GCS) Sun, 06 Apr 2008 20:20:07 +0200 + +sqlite3 (3.5.7-1) unstable; urgency=low + + * New upstream release. + * Redo 03-restore-documentation.patch as upstream Makefile.in changed big. + * Fix Makefile.in for rebuild. + + -- Laszlo Boszormenyi (GCS) Sat, 22 Mar 2008 10:32:05 +0000 + +sqlite3 (3.5.6-3) unstable; urgency=low + + * Add debug library package, thanks to Luis Rodrigo Gallardo Cruz + (closes: #447829). + * Correct snprintf use in 02-lemon-snprintf.patch thanks to Thorsten Glaser. + + -- Laszlo Boszormenyi (GCS) Thu, 28 Feb 2008 18:50:44 +0000 + +sqlite3 (3.5.6-2) unstable; urgency=low + + * Upload to unstable. + * Add Makefile.in snippets to generate C interface documentation again + (closes: #466938). + + -- Laszlo Boszormenyi (GCS) Thu, 28 Feb 2008 14:44:43 +0100 + +sqlite3 (3.5.6-1) experimental; urgency=low + + * New upstream release. + + -- Laszlo Boszormenyi (GCS) Thu, 14 Feb 2008 16:30:39 +0100 + +sqlite3 (3.5.4-1) experimental; urgency=low + + * New upstream release, re-merge source and documentation. + * Register with doc-base (closes: #452391). + + -- Laszlo Boszormenyi (GCS) Fri, 21 Dec 2007 22:40:30 +0200 + +sqlite3 (3.5.1-1) experimental; urgency=low + + * New upstream release. + + -- Laszlo Boszormenyi (GCS) Thu, 4 Oct 2007 09:27:05 +0200 + sqlite3 (3.4.2-2) unstable; urgency=low * Fixed upstream Makefile.in not to lose doc/lemon.html and @@ -196,3 +399,8 @@ - Adapted manpage for 3.0 (thanks to Laszlo 'GCS' Boszormeny). -- Andreas Rottmann Sun, 7 Nov 2004 13:49:52 +0100 +sqlite3 (3.5.8-3) unstable; urgency=low + + * Link. + + -- root Sat, 03 May 2008 09:33:37 +0200 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/compat /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/compat --- sqlite3-3.4.2/debian/compat 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/compat 2009-08-19 23:00:31.000000000 +0100 @@ -1 +1 @@ -4 +5 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/control /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/control --- sqlite3-3.4.2/debian/control 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/control 2009-08-19 23:00:31.000000000 +0100 @@ -2,12 +2,12 @@ Section: devel Priority: optional Maintainer: Laszlo Boszormenyi (GCS) -Build-Depends: cdbs (>= 0.4.15), debhelper (>= 4.1.16), autoconf (>= 2.59), libtool (>= 1.5.2), automake1.9, autotools-dev, libreadline5-dev, tcl8.4-dev -Standards-Version: 3.7.2 +Build-Depends: cdbs (>= 0.4.15), debhelper (>= 5), autoconf (>= 2.59), libtool (>= 1.5.2), automake, autotools-dev, chrpath, libreadline5-dev, tcl8.4-dev +Standards-Version: 3.8.2 Package: lemon Architecture: any -Depends: ${shlibs:Depends} +Depends: ${shlibs:Depends}, ${misc:Depends} Description: The Lemon Parser Generator Lemon is an LALR(1) parser generator for C or C++. It does the same job as bison and yacc. But lemon is not another bison or yacc @@ -20,9 +20,9 @@ embedded controllers. Package: sqlite3 -Section: misc +Section: database Architecture: any -Depends: ${shlibs:Depends} +Depends: ${shlibs:Depends}, ${misc:Depends} Suggests: sqlite3-doc Description: A command line interface for SQLite 3 SQLite is a C library that implements an SQL database engine. @@ -32,6 +32,7 @@ Package: sqlite3-doc Section: doc Architecture: all +Depends: ${misc:Depends} Recommends: sqlite3 Description: SQLite 3 documentation SQLite is a C library that implements an SQL database engine. @@ -41,10 +42,23 @@ This package contains the documentation that is also available on the SQLite homepage. +Package: libsqlite3-0-dbg +Section: debug +Architecture: any +Priority: extra +Depends: libsqlite3-0 (= ${binary:Version}), ${misc:Depends} +Description: SQLite 3 debugging symbols + SQLite is a C library that implements an SQL database engine. + Programs that link with the SQLite library can have SQL database + access without running a separate RDBMS process. + . + This package contains the debugging symbols for the libraries. + Package: libsqlite3-0 Section: libs Architecture: any -Depends: ${shlibs:Depends} +Priority: standard +Depends: ${shlibs:Depends}, ${misc:Depends} Description: SQLite 3 shared library SQLite is a C library that implements an SQL database engine. Programs that link with the SQLite library can have SQL database @@ -54,7 +68,7 @@ Suggests: sqlite3-doc Section: libdevel Architecture: any -Depends: libsqlite3-0 (= ${binary:Version}), libc6-dev +Depends: libsqlite3-0 (= ${binary:Version}), ${misc:Depends}, libc6-dev Description: SQLite 3 development files SQLite is a C library that implements an SQL database engine. Programs that link with the SQLite library can have SQL database @@ -66,10 +80,10 @@ Suggests: sqlite3-doc Section: interpreters Architecture: any -Depends: ${shlibs:Depends} -Description: SQLite 3 TCL bindings +Depends: ${shlibs:Depends}, ${misc:Depends} +Description: SQLite 3 Tcl bindings SQLite is a C library that implements an SQL database engine. Programs that link with the SQLite library can have SQL database access without running a separate RDBMS process. . - This package contains the TCL bindings. + This package contains the Tcl bindings. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/libsqlite3-0-dbg.dirs /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/libsqlite3-0-dbg.dirs --- sqlite3-3.4.2/debian/libsqlite3-0-dbg.dirs 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/debian/libsqlite3-0-dbg.dirs 2009-08-19 23:00:31.000000000 +0100 @@ -0,0 +1 @@ +usr/lib/debug diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/libsqlite3-tcl.install /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/libsqlite3-tcl.install --- sqlite3-3.4.2/debian/libsqlite3-tcl.install 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/libsqlite3-tcl.install 2009-08-19 23:00:31.000000000 +0100 @@ -1,2 +1 @@ -usr/lib/sqlite3/libtclsqlite3.so.* -usr/lib/sqlite3/pkgIndex.tcl +usr/share/tcltk/tcl8.4/sqlite3/libtclsqlite3.so usr/lib/sqlite3/ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/patches/01-sqlite3.pc-version-to-release.patch /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/patches/01-sqlite3.pc-version-to-release.patch --- sqlite3-3.4.2/debian/patches/01-sqlite3.pc-version-to-release.patch 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/patches/01-sqlite3.pc-version-to-release.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -diff -Nur sqlite3-3.3.17.orig/sqlite3.pc.in sqlite3-3.3.17/sqlite3.pc.in ---- sqlite3-3.3.17.orig/sqlite3.pc.in 2004-07-19 07:25:47.000000000 +0300 -+++ sqlite3-3.3.17/sqlite3.pc.in 2007-05-17 02:30:40.000000000 +0300 -@@ -7,6 +7,6 @@ - - Name: SQLite - Description: SQL database engine --Version: @VERSION@ -+Version: @RELEASE@ - Libs: -L${libdir} -lsqlite3 - Cflags: -I${includedir} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/patches/02-lemon-snprintf.patch /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/patches/02-lemon-snprintf.patch --- sqlite3-3.4.2/debian/patches/02-lemon-snprintf.patch 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/patches/02-lemon-snprintf.patch 2009-08-19 23:00:31.000000000 +0100 @@ -1,7 +1,6 @@ -diff -ruN sqlite/tool/lemon.c sqlite-new/tool/lemon.c ---- sqlite/tool/lemon.c 2004-04-24 14:59:13.000000000 +0200 -+++ sqlite-new/tool/lemon.c 2004-07-27 15:31:40.000000000 +0200 -@@ -1272,15 +1272,15 @@ +--- sqlite3-3.6.10.orig/tool/lemon.c 2009-01-15 14:01:58.000000000 +0000 ++++ sqlite3-3.6.10/tool/lemon.c 2009-01-18 18:21:18.319124796 +0000 +@@ -1325,15 +1325,15 @@ va_start(ap, format); /* Prepare a prefix to be prepended to every output line */ if( lineno>0 ){ @@ -11,16 +10,25 @@ - sprintf(prefix,"%.*s: ",PREFIXLIMIT-10,filename); + snprintf(prefix,sizeof prefix,"%.*s: ",PREFIXLIMIT-10,filename); } - prefixsize = strlen(prefix); + prefixsize = lemonStrlen(prefix); availablewidth = LINEWIDTH - prefixsize; /* Generate the error message */ - vsprintf(errmsg,format,ap); + vsnprintf(errmsg,sizeof errmsg,format,ap); va_end(ap); - errmsgsize = strlen(errmsg); + errmsgsize = lemonStrlen(errmsg); /* Remove trailing '\n's from the error message. */ -@@ -2675,7 +2675,7 @@ +@@ -2350,7 +2350,7 @@ + for(z=psp->filename, nBack=0; *z; z++){ + if( *z=='\\' ) nBack++; + } +- sprintf(zLine, "#line %d ", psp->tokenlineno); ++ snprintf(zLine,sizeof zLine, "#line %d ", psp->tokenlineno); + nLine = lemonStrlen(zLine); + n += nLine + lemonStrlen(psp->filename) + nBack; + } +@@ -2915,7 +2915,7 @@ while( cfp ){ char buf[20]; if( cfp->dot==cfp->rp->nrhs ){ @@ -29,25 +37,41 @@ fprintf(fp," %5s ",buf); }else{ fprintf(fp," "); -@@ -2721,7 +2721,7 @@ +@@ -2970,6 +2970,7 @@ + { + char *pathlist; + char *path,*cp; ++ size_t pathsz; + char c; + + #ifdef __WIN32__ +@@ -2980,21 +2981,21 @@ + if( cp ){ c = *cp; *cp = 0; - path = (char *)malloc( strlen(argv0) + strlen(name) + 2 ); +- path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 ); - if( path ) sprintf(path,"%s/%s",argv0,name); -+ if( path ) snprintf(path,sizeof path,"%s/%s",argv0,name); ++ path = (char *)malloc( (pathsz = lemonStrlen(argv0) + lemonStrlen(name) + 2) ); ++ if( path ) snprintf(path,pathsz,"%s/%s",argv0,name); *cp = c; }else{ extern char *getenv(); -@@ -2734,7 +2734,7 @@ - if( cp==0 ) cp = &pathlist[strlen(pathlist)]; + pathlist = getenv("PATH"); + if( pathlist==0 ) pathlist = ".:/bin:/usr/bin"; +- path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 ); ++ path = (char *)malloc( (pathsz = lemonStrlen(pathlist)+lemonStrlen(name)+2) ); + if( path!=0 ){ + while( *pathlist ){ + cp = strchr(pathlist,':'); + if( cp==0 ) cp = &pathlist[lemonStrlen(pathlist)]; c = *cp; *cp = 0; - sprintf(path,"%s/%s",pathlist,name); -+ snprintf(path,sizeof path,"%s/%s",pathlist,name); ++ snprintf(path,pathsz,"%s/%s",pathlist,name); *cp = c; if( c==0 ) pathlist = ""; else pathlist = &cp[1]; -@@ -2814,14 +2814,16 @@ +@@ -3074,14 +3075,16 @@ cp = strrchr(lemp->filename,'.'); if( cp ){ @@ -66,16 +90,25 @@ }else{ tpltname = pathsearch(lemp->argv0,templatename,0); } -@@ -2833,7 +2835,7 @@ +@@ -3093,7 +3096,7 @@ } - in = fopen(tpltname,"r"); + in = fopen(tpltname,"rb"); if( in==0 ){ - fprintf(stderr,"Can't open the template file \"%s\".\n",templatename); + fprintf(stderr,"Can't open the template file \"%s\".\n",tpltname); lemp->errorcnt++; return 0; } -@@ -3447,7 +3449,7 @@ +@@ -3236,7 +3239,7 @@ + while( n-- > 0 ){ + c = *(zText++); + if( c=='%' && n>0 && zText[0]=='d' ){ +- sprintf(zInt, "%d", p1); ++ snprintf(zInt,sizeof zInt, "%d", p1); + p1 = p2; + strcpy(&z[used], zInt); + used += lemonStrlen(&z[used]); +@@ -3830,7 +3833,7 @@ /* Generate a table containing the symbolic name of every symbol */ for(i=0; insymbol; i++){ @@ -84,8 +117,8 @@ fprintf(out," %-15s",line); if( (i&3)==3 ){ fprintf(out,"\n"); lineno++; } } -@@ -3562,7 +3564,7 @@ - in = file_open(lemp,".h","r"); +@@ -3986,7 +3989,7 @@ + in = file_open(lemp,".h","rb"); if( in ){ for(i=1; interminal && fgets(line,LINESIZE,in); i++){ - sprintf(pattern,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/patches/10-520466-libsqlite3-depends-on-libdl.patch /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/patches/10-520466-libsqlite3-depends-on-libdl.patch --- sqlite3-3.4.2/debian/patches/10-520466-libsqlite3-depends-on-libdl.patch 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/debian/patches/10-520466-libsqlite3-depends-on-libdl.patch 2009-08-19 23:00:31.000000000 +0100 @@ -0,0 +1,41 @@ +--- sqlite3-3.6.11.orig/configure.ac 2009-03-20 16:15:10.000000000 -0700 ++++ sqlite3-3.6.11/configure.ac 2009-03-20 16:37:59.000000000 -0700 +@@ -233,6 +233,38 @@ + AC_SUBST(BUILD_CC) + + ########## ++# Do we want to support load_extension()? ++# ++AC_ARG_ENABLE(load-extension, ++AC_HELP_STRING([--enable-load-extension], ++ [Include SQL functions for loading extension libraries]),, ++ enable_load_extension=auto) ++ ++if test "x$enable_load_extension" = xyes || ++ test "x$enable_load_extension" = xauto; then ++ ++ can_load_extension=yes ++ # libtool will already have looked for ++ if test $ac_cv_header_dlfcn_h = no; then ++ can_load_extension=no ++ else ++ AC_SEARCH_LIBS(dlopen, dl, , [can_load_extension=no]) ++ fi ++elif test "x$enable_load_extension" = xno; then ++ can_load_extension=no ++else ++ AC_MSG_ERROR([invalid argument to --enable-load-extension]) ++fi ++ ++if test $can_load_extension = no; then ++ if test "x$enable_load_extension" = xyes; then ++ AC_MSG_ERROR([ routines missing, load_extension() not supported]) ++ fi ++ AC_DEFINE(SQLITE_OMIT_LOAD_EXTENSION, 1, ++ [Define if the load_extension() sql function should be omitted.]) ++fi ++ ++########## + # Do we want to support multithreaded use of sqlite + # + AC_ARG_ENABLE(threadsafe, diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/patches/10-520478-squash-bad-deps.patch /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/patches/10-520478-squash-bad-deps.patch --- sqlite3-3.4.2/debian/patches/10-520478-squash-bad-deps.patch 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/debian/patches/10-520478-squash-bad-deps.patch 2009-08-19 23:00:31.000000000 +0100 @@ -0,0 +1,25 @@ +--- sqlite3-3.6.11.orig/Makefile.in 2009-03-24 11:40:55.000000000 -0700 ++++ sqlite3-3.6.11/Makefile.in 2009-03-24 11:42:36.000000000 -0700 +@@ -464,18 +464,20 @@ + libsqlite3.la: $(LIBOBJ) + $(LTLINK) -o $@ $(LIBOBJ) $(TLIBS) \ + ${ALLOWRELEASE} -rpath "$(libdir)" -version-info "8:6:8" ++ sed -i "/dependency_libs/s/'.*'/''/" $@ + + libtclsqlite3.la: tclsqlite.lo libsqlite3.la + $(LTLINK) -o $@ tclsqlite.lo \ +- libsqlite3.la @TCL_STUB_LIB_SPEC@ $(TLIBS) \ ++ libsqlite3.la @TCL_STUB_LIB_SPEC@ \ + -rpath "$(TCLLIBDIR)" \ + -version-info "8:6:8" \ + -avoid-version ++ sed -i "/dependency_libs/s/'.*'/''/" $@ + + sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h + $(LTLINK) $(READLINE_FLAGS) \ + -o $@ $(TOP)/src/shell.c libsqlite3.la \ +- $(LIBREADLINE) $(TLIBS) -rpath "$(libdir)" ++ $(LIBREADLINE) -rpath "$(libdir)" + + # This target creates a directory named "tsrc" and fills it with + # copies of all of the C source code and header files needed to diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/patches/20-hurd-locking-style.patch /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/patches/20-hurd-locking-style.patch --- sqlite3-3.4.2/debian/patches/20-hurd-locking-style.patch 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/debian/patches/20-hurd-locking-style.patch 2009-08-19 23:00:31.000000000 +0100 @@ -0,0 +1,31 @@ +--- z/src/os_unix.c 2009-04-09 20:41:18.000000000 +0200 ++++ z/src/os_unix.c 2009-05-21 01:24:01.000000000 +0200 +@@ -66,7 +66,7 @@ + ** where the database is located. + */ + #if !defined(SQLITE_ENABLE_LOCKING_STYLE) +-# if defined(__APPLE__) ++# if defined(__APPLE__) || defined(__GNU__) + # define SQLITE_ENABLE_LOCKING_STYLE 1 + # else + # define SQLITE_ENABLE_LOCKING_STYLE 0 +@@ -130,7 +130,9 @@ + # else + # include + # include +-# include ++# if ! defined(__GNU__) ++# include ++# endif + # endif + #endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +@@ -5101,6 +5103,8 @@ + static sqlite3_vfs aVfs[] = { + #if SQLITE_ENABLE_LOCKING_STYLE && (OS_VXWORKS || defined(__APPLE__)) + UNIXVFS("unix", autolockIoFinder ), ++#elif SQLITE_ENABLE_LOCKING_STYLE && defined(__GNU__) ++ UNIXVFS("unix", flockIoFinder ), + #else + UNIXVFS("unix", posixIoFinder ), + #endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/pkgIndex.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/pkgIndex.tcl --- sqlite3-3.4.2/debian/pkgIndex.tcl 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/pkgIndex.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -package ifneeded sqlite3 3.4 [list load [file join $dir libtclsqlite3.so.0] sqlite3] diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/rules /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/rules --- sqlite3-3.4.2/debian/rules 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/rules 2009-08-19 23:00:31.000000000 +0100 @@ -10,26 +10,40 @@ # Make sure libsqlite0 is built before packages depending on it binary/sqlite3 binary/libsqlite3-dev binary/libsqlite3-tcl:: binary/libsqlite3-$(so_version) +clean:: + rm -f config.status config.log libtool .target_source \ + Makefile config.h sqlite3.c \ + sqlite3.pc pkgIndex.tcl common-install-arch:: - install -d debian/tmp/usr/lib/sqlite3 - install -m 0664 debian/pkgIndex.tcl debian/tmp/usr/lib/sqlite3 - ./libtool --mode=install install libtclsqlite3.la `pwd`/debian/tmp/usr/lib/sqlite3 + install -d debian/tmp/usr/lib/sqlite3/ + install -m 0664 libtclsqlite3.la `pwd`/debian/tmp/usr/lib/sqlite3 + chrpath -d debian/tmp/usr/bin/sqlite3 + chrpath -d ./debian/tmp/usr/share/tcltk/tcl8.4/sqlite3/libtclsqlite3.so install -d debian/tmp/usr/share/lemon - install -m 664 tool/lempar.c debian/tmp/usr/share/lemon - install -m 775 lemon debian/tmp/usr/bin + install -m 0664 tool/lempar.c debian/tmp/usr/share/lemon + install -m 0775 lemon debian/tmp/usr/bin install -d debian/tmp/usr/share/man/man1/ - install -m 644 sqlite3.1 debian/tmp/usr/share/man/man1 - -install/sqlite3-doc:: - tclsh www/lang.tcl doc/ >doc/lang.html + install -m 0644 sqlite3.1 debian/tmp/usr/share/man/man1 +#install/libsqlite3-dev:: +# sed -e 's/-licui18n -licuuc //' \ + ./debian/tmp/usr/lib/libsqlite3.la \ + >./debian/tmp/usr/lib/libsqlite3.la.tmp +# mv ./debian/tmp/usr/lib/libsqlite3.la.tmp \ + ./debian/tmp/usr/lib/libsqlite3.la \ + +install/libsqlite3-tcl:: + sed -e 's/share/lib/' -e 's/tcl[^/]*\///g' \ + ./debian/tmp/usr/share/tcltk/tcl8.4/sqlite3/pkgIndex.tcl \ + >./debian/libsqlite3-tcl/usr/lib/sqlite3/pkgIndex.tcl DEB_AUTO_UPDATE_LIBTOOL = pre DEB_AUTO_UPDATE_AUTOCONF = 2.50 -DEB_AUTO_UPDATE_AUTOMAKE = 1.9 # We don't use automake, but aclocal +DEB_AUTO_UPDATE_AUTOMAKE = 1.10 # We don't use automake, but aclocal -DEB_CONFIGURE_EXTRA_FLAGS = --with-tcl=/usr/lib/tcl8.4 --enable-threadsafe +#DEB_CONFIGURE_SCRIPT_ENV += LDFLAGS="-licui18n -licuuc" +DEB_CONFIGURE_EXTRA_FLAGS = --with-tcl=/usr/lib/tcl8.4 --enable-threadsafe --enable-load-extension -DEB_BUILD_MAKE_TARGET = all libtclsqlite3.la doc +DEB_MAKE_BUILD_TARGET = all libtclsqlite3.la doc #DEB_MAKE_CHECK_TARGET = test DEB_DH_INSTALL_ARGS := --sourcedir=debian/tmp @@ -41,4 +55,7 @@ DEB_SHLIBDEPS_LIBRARY_sqlite := libsqlite3-$(so_version) DEB_SHLIBDEPS_INCLUDE_sqlite := debian/libsqlite3-$(so_version)/usr/lib/ -DEB_OPT_FLAG := -O2 -fno-strict-aliasing -DSQLITE_ENABLE_COLUMN_METADATA +#DEB_OPT_FLAG := -O2 -fno-strict-aliasing -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE=1 -DSQLITE_ENABLE_ICU=1 +DEB_OPT_FLAG := -O2 -fno-strict-aliasing -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE=1 + +DEB_DH_STRIP_ARGS := --dbg-package=libsqlite3-$(so_version)-dbg diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/debian/sqlite3-doc.docs /tmp/3ARg2Grji7/sqlite3-3.6.16/debian/sqlite3-doc.docs --- sqlite3-3.4.2/debian/sqlite3-doc.docs 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/debian/sqlite3-doc.docs 2009-08-19 23:00:31.000000000 +0100 @@ -1,5 +1 @@ -art/2005osaward.gif -doc/*.html -doc/*.png -doc/*.gif -www/*.gif +www/* diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/doc/lemon.html /tmp/3ARg2Grji7/sqlite3-3.6.16/doc/lemon.html --- sqlite3-3.4.2/doc/lemon.html 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/doc/lemon.html 1970-01-01 01:00:00.000000000 +0100 @@ -1,892 +0,0 @@ - - -The Lemon Parser Generator - - -

The Lemon Parser Generator

- -

Lemon is an LALR(1) parser generator for C or C++. -It does the same job as ``bison'' and ``yacc''. -But lemon is not another bison or yacc clone. It -uses a different grammar syntax which is designed to -reduce the number of coding errors. Lemon also uses a more -sophisticated parsing engine that is faster than yacc and -bison and which is both reentrant and thread-safe. -Furthermore, Lemon implements features that can be used -to eliminate resource leaks, making is suitable for use -in long-running programs such as graphical user interfaces -or embedded controllers.

- -

This document is an introduction to the Lemon -parser generator.

- -

Theory of Operation

- -

The main goal of Lemon is to translate a context free grammar (CFG) -for a particular language into C code that implements a parser for -that language. -The program has two inputs: -

    -
  • The grammar specification. -
  • A parser template file. -
-Typically, only the grammar specification is supplied by the programmer. -Lemon comes with a default parser template which works fine for most -applications. But the user is free to substitute a different parser -template if desired.

- -

Depending on command-line options, Lemon will generate between -one and three files of outputs. -

    -
  • C code to implement the parser. -
  • A header file defining an integer ID for each terminal symbol. -
  • An information file that describes the states of the generated parser - automaton. -
-By default, all three of these output files are generated. -The header file is suppressed if the ``-m'' command-line option is -used and the report file is omitted when ``-q'' is selected.

- -

The grammar specification file uses a ``.y'' suffix, by convention. -In the examples used in this document, we'll assume the name of the -grammar file is ``gram.y''. A typical use of Lemon would be the -following command: -

-   lemon gram.y
-
-This command will generate three output files named ``gram.c'', -``gram.h'' and ``gram.out''. -The first is C code to implement the parser. The second -is the header file that defines numerical values for all -terminal symbols, and the last is the report that explains -the states used by the parser automaton.

- -

Command Line Options

- -

The behavior of Lemon can be modified using command-line options. -You can obtain a list of the available command-line options together -with a brief explanation of what each does by typing -

-   lemon -?
-
-As of this writing, the following command-line options are supported: -
    -
  • -b -
  • -c -
  • -g -
  • -m -
  • -q -
  • -s -
  • -x -
-The ``-b'' option reduces the amount of text in the report file by -printing only the basis of each parser state, rather than the full -configuration. -The ``-c'' option suppresses action table compression. Using -c -will make the parser a little larger and slower but it will detect -syntax errors sooner. -The ``-g'' option causes no output files to be generated at all. -Instead, the input grammar file is printed on standard output but -with all comments, actions and other extraneous text deleted. This -is a useful way to get a quick summary of a grammar. -The ``-m'' option causes the output C source file to be compatible -with the ``makeheaders'' program. -Makeheaders is a program that automatically generates header files -from C source code. When the ``-m'' option is used, the header -file is not output since the makeheaders program will take care -of generated all header files automatically. -The ``-q'' option suppresses the report file. -Using ``-s'' causes a brief summary of parser statistics to be -printed. Like this: -
-   Parser statistics: 74 terminals, 70 nonterminals, 179 rules
-                      340 states, 2026 parser table entries, 0 conflicts
-
-Finally, the ``-x'' option causes Lemon to print its version number -and then stops without attempting to read the grammar or generate a parser.

- -

The Parser Interface

- -

Lemon doesn't generate a complete, working program. It only generates -a few subroutines that implement a parser. This section describes -the interface to those subroutines. It is up to the programmer to -call these subroutines in an appropriate way in order to produce a -complete system.

- -

Before a program begins using a Lemon-generated parser, the program -must first create the parser. -A new parser is created as follows: -

-   void *pParser = ParseAlloc( malloc );
-
-The ParseAlloc() routine allocates and initializes a new parser and -returns a pointer to it. -The actual data structure used to represent a parser is opaque -- -its internal structure is not visible or usable by the calling routine. -For this reason, the ParseAlloc() routine returns a pointer to void -rather than a pointer to some particular structure. -The sole argument to the ParseAlloc() routine is a pointer to the -subroutine used to allocate memory. Typically this means ``malloc()''.

- -

After a program is finished using a parser, it can reclaim all -memory allocated by that parser by calling -

-   ParseFree(pParser, free);
-
-The first argument is the same pointer returned by ParseAlloc(). The -second argument is a pointer to the function used to release bulk -memory back to the system.

- -

After a parser has been allocated using ParseAlloc(), the programmer -must supply the parser with a sequence of tokens (terminal symbols) to -be parsed. This is accomplished by calling the following function -once for each token: -

-   Parse(pParser, hTokenID, sTokenData, pArg);
-
-The first argument to the Parse() routine is the pointer returned by -ParseAlloc(). -The second argument is a small positive integer that tells the parse the -type of the next token in the data stream. -There is one token type for each terminal symbol in the grammar. -The gram.h file generated by Lemon contains #define statements that -map symbolic terminal symbol names into appropriate integer values. -(A value of 0 for the second argument is a special flag to the -parser to indicate that the end of input has been reached.) -The third argument is the value of the given token. By default, -the type of the third argument is integer, but the grammar will -usually redefine this type to be some kind of structure. -Typically the second argument will be a broad category of tokens -such as ``identifier'' or ``number'' and the third argument will -be the name of the identifier or the value of the number.

- -

The Parse() function may have either three or four arguments, -depending on the grammar. If the grammar specification file request -it, the Parse() function will have a fourth parameter that can be -of any type chosen by the programmer. The parser doesn't do anything -with this argument except to pass it through to action routines. -This is a convenient mechanism for passing state information down -to the action routines without having to use global variables.

- -

A typical use of a Lemon parser might look something like the -following: -

-   01 ParseTree *ParseFile(const char *zFilename){
-   02    Tokenizer *pTokenizer;
-   03    void *pParser;
-   04    Token sToken;
-   05    int hTokenId;
-   06    ParserState sState;
-   07
-   08    pTokenizer = TokenizerCreate(zFilename);
-   09    pParser = ParseAlloc( malloc );
-   10    InitParserState(&sState);
-   11    while( GetNextToken(pTokenizer, &hTokenId, &sToken) ){
-   12       Parse(pParser, hTokenId, sToken, &sState);
-   13    }
-   14    Parse(pParser, 0, sToken, &sState);
-   15    ParseFree(pParser, free );
-   16    TokenizerFree(pTokenizer);
-   17    return sState.treeRoot;
-   18 }
-
-This example shows a user-written routine that parses a file of -text and returns a pointer to the parse tree. -(We've omitted all error-handling from this example to keep it -simple.) -We assume the existence of some kind of tokenizer which is created -using TokenizerCreate() on line 8 and deleted by TokenizerFree() -on line 16. The GetNextToken() function on line 11 retrieves the -next token from the input file and puts its type in the -integer variable hTokenId. The sToken variable is assumed to be -some kind of structure that contains details about each token, -such as its complete text, what line it occurs on, etc.

- -

This example also assumes the existence of structure of type -ParserState that holds state information about a particular parse. -An instance of such a structure is created on line 6 and initialized -on line 10. A pointer to this structure is passed into the Parse() -routine as the optional 4th argument. -The action routine specified by the grammar for the parser can use -the ParserState structure to hold whatever information is useful and -appropriate. In the example, we note that the treeRoot field of -the ParserState structure is left pointing to the root of the parse -tree.

- -

The core of this example as it relates to Lemon is as follows: -

-   ParseFile(){
-      pParser = ParseAlloc( malloc );
-      while( GetNextToken(pTokenizer,&hTokenId, &sToken) ){
-         Parse(pParser, hTokenId, sToken);
-      }
-      Parse(pParser, 0, sToken);
-      ParseFree(pParser, free );
-   }
-
-Basically, what a program has to do to use a Lemon-generated parser -is first create the parser, then send it lots of tokens obtained by -tokenizing an input source. When the end of input is reached, the -Parse() routine should be called one last time with a token type -of 0. This step is necessary to inform the parser that the end of -input has been reached. Finally, we reclaim memory used by the -parser by calling ParseFree().

- -

There is one other interface routine that should be mentioned -before we move on. -The ParseTrace() function can be used to generate debugging output -from the parser. A prototype for this routine is as follows: -

-   ParseTrace(FILE *stream, char *zPrefix);
-
-After this routine is called, a short (one-line) message is written -to the designated output stream every time the parser changes states -or calls an action routine. Each such message is prefaced using -the text given by zPrefix. This debugging output can be turned off -by calling ParseTrace() again with a first argument of NULL (0).

- -

Differences With YACC and BISON

- -

Programmers who have previously used the yacc or bison parser -generator will notice several important differences between yacc and/or -bison and Lemon. -

    -
  • In yacc and bison, the parser calls the tokenizer. In Lemon, - the tokenizer calls the parser. -
  • Lemon uses no global variables. Yacc and bison use global variables - to pass information between the tokenizer and parser. -
  • Lemon allows multiple parsers to be running simultaneously. Yacc - and bison do not. -
-These differences may cause some initial confusion for programmers -with prior yacc and bison experience. -But after years of experience using Lemon, I firmly -believe that the Lemon way of doing things is better.

- -

Input File Syntax

- -

The main purpose of the grammar specification file for Lemon is -to define the grammar for the parser. But the input file also -specifies additional information Lemon requires to do its job. -Most of the work in using Lemon is in writing an appropriate -grammar file.

- -

The grammar file for lemon is, for the most part, free format. -It does not have sections or divisions like yacc or bison. Any -declaration can occur at any point in the file. -Lemon ignores whitespace (except where it is needed to separate -tokens) and it honors the same commenting conventions as C and C++.

- -

Terminals and Nonterminals

- -

A terminal symbol (token) is any string of alphanumeric -and underscore characters -that begins with an upper case letter. -A terminal can contain lower class letters after the first character, -but the usual convention is to make terminals all upper case. -A nonterminal, on the other hand, is any string of alphanumeric -and underscore characters than begins with a lower case letter. -Again, the usual convention is to make nonterminals use all lower -case letters.

- -

In Lemon, terminal and nonterminal symbols do not need to -be declared or identified in a separate section of the grammar file. -Lemon is able to generate a list of all terminals and nonterminals -by examining the grammar rules, and it can always distinguish a -terminal from a nonterminal by checking the case of the first -character of the name.

- -

Yacc and bison allow terminal symbols to have either alphanumeric -names or to be individual characters included in single quotes, like -this: ')' or '$'. Lemon does not allow this alternative form for -terminal symbols. With Lemon, all symbols, terminals and nonterminals, -must have alphanumeric names.

- -

Grammar Rules

- -

The main component of a Lemon grammar file is a sequence of grammar -rules. -Each grammar rule consists of a nonterminal symbol followed by -the special symbol ``::='' and then a list of terminals and/or nonterminals. -The rule is terminated by a period. -The list of terminals and nonterminals on the right-hand side of the -rule can be empty. -Rules can occur in any order, except that the left-hand side of the -first rule is assumed to be the start symbol for the grammar (unless -specified otherwise using the %start directive described below.) -A typical sequence of grammar rules might look something like this: -

-  expr ::= expr PLUS expr.
-  expr ::= expr TIMES expr.
-  expr ::= LPAREN expr RPAREN.
-  expr ::= VALUE.
-
-

- -

There is one non-terminal in this example, ``expr'', and five -terminal symbols or tokens: ``PLUS'', ``TIMES'', ``LPAREN'', -``RPAREN'' and ``VALUE''.

- -

Like yacc and bison, Lemon allows the grammar to specify a block -of C code that will be executed whenever a grammar rule is reduced -by the parser. -In Lemon, this action is specified by putting the C code (contained -within curly braces {...}) immediately after the -period that closes the rule. -For example: -

-  expr ::= expr PLUS expr.   { printf("Doing an addition...\n"); }
-
-

- -

In order to be useful, grammar actions must normally be linked to -their associated grammar rules. -In yacc and bison, this is accomplished by embedding a ``$$'' in the -action to stand for the value of the left-hand side of the rule and -symbols ``$1'', ``$2'', and so forth to stand for the value of -the terminal or nonterminal at position 1, 2 and so forth on the -right-hand side of the rule. -This idea is very powerful, but it is also very error-prone. The -single most common source of errors in a yacc or bison grammar is -to miscount the number of symbols on the right-hand side of a grammar -rule and say ``$7'' when you really mean ``$8''.

- -

Lemon avoids the need to count grammar symbols by assigning symbolic -names to each symbol in a grammar rule and then using those symbolic -names in the action. -In yacc or bison, one would write this: -

-  expr -> expr PLUS expr  { $$ = $1 + $3; };
-
-But in Lemon, the same rule becomes the following: -
-  expr(A) ::= expr(B) PLUS expr(C).  { A = B+C; }
-
-In the Lemon rule, any symbol in parentheses after a grammar rule -symbol becomes a place holder for that symbol in the grammar rule. -This place holder can then be used in the associated C action to -stand for the value of that symbol.

- -

The Lemon notation for linking a grammar rule with its reduce -action is superior to yacc/bison on several counts. -First, as mentioned above, the Lemon method avoids the need to -count grammar symbols. -Secondly, if a terminal or nonterminal in a Lemon grammar rule -includes a linking symbol in parentheses but that linking symbol -is not actually used in the reduce action, then an error message -is generated. -For example, the rule -

-  expr(A) ::= expr(B) PLUS expr(C).  { A = B; }
-
-will generate an error because the linking symbol ``C'' is used -in the grammar rule but not in the reduce action.

- -

The Lemon notation for linking grammar rules to reduce actions -also facilitates the use of destructors for reclaiming memory -allocated by the values of terminals and nonterminals on the -right-hand side of a rule.

- -

Precedence Rules

- -

Lemon resolves parsing ambiguities in exactly the same way as -yacc and bison. A shift-reduce conflict is resolved in favor -of the shift, and a reduce-reduce conflict is resolved by reducing -whichever rule comes first in the grammar file.

- -

Just like in -yacc and bison, Lemon allows a measure of control -over the resolution of paring conflicts using precedence rules. -A precedence value can be assigned to any terminal symbol -using the %left, %right or %nonassoc directives. Terminal symbols -mentioned in earlier directives have a lower precedence that -terminal symbols mentioned in later directives. For example:

- -

-   %left AND.
-   %left OR.
-   %nonassoc EQ NE GT GE LT LE.
-   %left PLUS MINUS.
-   %left TIMES DIVIDE MOD.
-   %right EXP NOT.
-

- -

In the preceding sequence of directives, the AND operator is -defined to have the lowest precedence. The OR operator is one -precedence level higher. And so forth. Hence, the grammar would -attempt to group the ambiguous expression -

-     a AND b OR c
-
-like this -
-     a AND (b OR c).
-
-The associativity (left, right or nonassoc) is used to determine -the grouping when the precedence is the same. AND is left-associative -in our example, so -
-     a AND b AND c
-
-is parsed like this -
-     (a AND b) AND c.
-
-The EXP operator is right-associative, though, so -
-     a EXP b EXP c
-
-is parsed like this -
-     a EXP (b EXP c).
-
-The nonassoc precedence is used for non-associative operators. -So -
-     a EQ b EQ c
-
-is an error.

- -

The precedence of non-terminals is transferred to rules as follows: -The precedence of a grammar rule is equal to the precedence of the -left-most terminal symbol in the rule for which a precedence is -defined. This is normally what you want, but in those cases where -you want to precedence of a grammar rule to be something different, -you can specify an alternative precedence symbol by putting the -symbol in square braces after the period at the end of the rule and -before any C-code. For example:

- -

-   expr = MINUS expr.  [NOT]
-

- -

This rule has a precedence equal to that of the NOT symbol, not the -MINUS symbol as would have been the case by default.

- -

With the knowledge of how precedence is assigned to terminal -symbols and individual -grammar rules, we can now explain precisely how parsing conflicts -are resolved in Lemon. Shift-reduce conflicts are resolved -as follows: -

    -
  • If either the token to be shifted or the rule to be reduced - lacks precedence information, then resolve in favor of the - shift, but report a parsing conflict. -
  • If the precedence of the token to be shifted is greater than - the precedence of the rule to reduce, then resolve in favor - of the shift. No parsing conflict is reported. -
  • If the precedence of the token it be shifted is less than the - precedence of the rule to reduce, then resolve in favor of the - reduce action. No parsing conflict is reported. -
  • If the precedences are the same and the shift token is - right-associative, then resolve in favor of the shift. - No parsing conflict is reported. -
  • If the precedences are the same the the shift token is - left-associative, then resolve in favor of the reduce. - No parsing conflict is reported. -
  • Otherwise, resolve the conflict by doing the shift and - report the parsing conflict. -
-Reduce-reduce conflicts are resolved this way: -
    -
  • If either reduce rule - lacks precedence information, then resolve in favor of the - rule that appears first in the grammar and report a parsing - conflict. -
  • If both rules have precedence and the precedence is different - then resolve the dispute in favor of the rule with the highest - precedence and do not report a conflict. -
  • Otherwise, resolve the conflict by reducing by the rule that - appears first in the grammar and report a parsing conflict. -
- -

Special Directives

- -

The input grammar to Lemon consists of grammar rules and special -directives. We've described all the grammar rules, so now we'll -talk about the special directives.

- -

Directives in lemon can occur in any order. You can put them before -the grammar rules, or after the grammar rules, or in the mist of the -grammar rules. It doesn't matter. The relative order of -directives used to assign precedence to terminals is important, but -other than that, the order of directives in Lemon is arbitrary.

- -

Lemon supports the following special directives: -

    -
  • %code -
  • %default_destructor -
  • %default_type -
  • %destructor -
  • %extra_argument -
  • %include -
  • %left -
  • %name -
  • %nonassoc -
  • %parse_accept -
  • %parse_failure -
  • %right -
  • %stack_overflow -
  • %stack_size -
  • %start_symbol -
  • %syntax_error -
  • %token_destructor -
  • %token_prefix -
  • %token_type -
  • %type -
-Each of these directives will be described separately in the -following sections:

- -

The %code directive

- -

The %code directive is used to specify addition C/C++ code that -is added to the end of the main output file. This is similar to -the %include directive except that %include is inserted at the -beginning of the main output file.

- -

%code is typically used to include some action routines or perhaps -a tokenizer as part of the output file.

- -

The %default_destructor directive

- -

The %default_destructor directive specifies a destructor to -use for non-terminals that do not have their own destructor -specified by a separate %destructor directive. See the documentation -on the %destructor directive below for additional information.

- -

In some grammers, many different non-terminal symbols have the -same datatype and hence the same destructor. This directive is -a convenience way to specify the same destructor for all those -non-terminals using a single statement.

- -

The %default_type directive

- -

The %default_type directive specifies the datatype of non-terminal -symbols that do no have their own datatype defined using a separate -%type directive. See the documentation on %type below for addition -information.

- -

The %destructor directive

- -

The %destructor directive is used to specify a destructor for -a non-terminal symbol. -(See also the %token_destructor directive which is used to -specify a destructor for terminal symbols.)

- -

A non-terminal's destructor is called to dispose of the -non-terminal's value whenever the non-terminal is popped from -the stack. This includes all of the following circumstances: -

    -
  • When a rule reduces and the value of a non-terminal on - the right-hand side is not linked to C code. -
  • When the stack is popped during error processing. -
  • When the ParseFree() function runs. -
-The destructor can do whatever it wants with the value of -the non-terminal, but its design is to deallocate memory -or other resources held by that non-terminal.

- -

Consider an example: -

-   %type nt {void*}
-   %destructor nt { free($$); }
-   nt(A) ::= ID NUM.   { A = malloc( 100 ); }
-
-This example is a bit contrived but it serves to illustrate how -destructors work. The example shows a non-terminal named -``nt'' that holds values of type ``void*''. When the rule for -an ``nt'' reduces, it sets the value of the non-terminal to -space obtained from malloc(). Later, when the nt non-terminal -is popped from the stack, the destructor will fire and call -free() on this malloced space, thus avoiding a memory leak. -(Note that the symbol ``$$'' in the destructor code is replaced -by the value of the non-terminal.)

- -

It is important to note that the value of a non-terminal is passed -to the destructor whenever the non-terminal is removed from the -stack, unless the non-terminal is used in a C-code action. If -the non-terminal is used by C-code, then it is assumed that the -C-code will take care of destroying it if it should really -be destroyed. More commonly, the value is used to build some -larger structure and we don't want to destroy it, which is why -the destructor is not called in this circumstance.

- -

By appropriate use of destructors, it is possible to -build a parser using Lemon that can be used within a long-running -program, such as a GUI, that will not leak memory or other resources. -To do the same using yacc or bison is much more difficult.

- -

The %extra_argument directive

- -The %extra_argument directive instructs Lemon to add a 4th parameter -to the parameter list of the Parse() function it generates. Lemon -doesn't do anything itself with this extra argument, but it does -make the argument available to C-code action routines, destructors, -and so forth. For example, if the grammar file contains:

- -

-    %extra_argument { MyStruct *pAbc }
-

- -

Then the Parse() function generated will have an 4th parameter -of type ``MyStruct*'' and all action routines will have access to -a variable named ``pAbc'' that is the value of the 4th parameter -in the most recent call to Parse().

- -

The %include directive

- -

The %include directive specifies C code that is included at the -top of the generated parser. You can include any text you want -- -the Lemon parser generator copies it blindly. If you have multiple -%include directives in your grammar file the value of the last -%include directive overwrites all the others.The %include directive is very handy for getting some extra #include -preprocessor statements at the beginning of the generated parser. -For example:

- -

-   %include {#include <unistd.h>}
-

- -

This might be needed, for example, if some of the C actions in the -grammar call functions that are prototyed in unistd.h.

- -

The %left directive

- -The %left directive is used (along with the %right and -%nonassoc directives) to declare precedences of terminal -symbols. Every terminal symbol whose name appears after -a %left directive but before the next period (``.'') is -given the same left-associative precedence value. Subsequent -%left directives have higher precedence. For example:

- -

-   %left AND.
-   %left OR.
-   %nonassoc EQ NE GT GE LT LE.
-   %left PLUS MINUS.
-   %left TIMES DIVIDE MOD.
-   %right EXP NOT.
-

- -

Note the period that terminates each %left, %right or %nonassoc -directive.

- -

LALR(1) grammars can get into a situation where they require -a large amount of stack space if you make heavy use or right-associative -operators. For this reason, it is recommended that you use %left -rather than %right whenever possible.

- -

The %name directive

- -

By default, the functions generated by Lemon all begin with the -five-character string ``Parse''. You can change this string to something -different using the %name directive. For instance:

- -

-   %name Abcde
-

- -

Putting this directive in the grammar file will cause Lemon to generate -functions named -

    -
  • AbcdeAlloc(), -
  • AbcdeFree(), -
  • AbcdeTrace(), and -
  • Abcde(). -
-The %name directive allows you to generator two or more different -parsers and link them all into the same executable. -

- -

The %nonassoc directive

- -

This directive is used to assign non-associative precedence to -one or more terminal symbols. See the section on precedence rules -or on the %left directive for additional information.

- -

The %parse_accept directive

- -

The %parse_accept directive specifies a block of C code that is -executed whenever the parser accepts its input string. To ``accept'' -an input string means that the parser was able to process all tokens -without error.

- -

For example:

- -

-   %parse_accept {
-      printf("parsing complete!\n");
-   }
-

- - -

The %parse_failure directive

- -

The %parse_failure directive specifies a block of C code that -is executed whenever the parser fails complete. This code is not -executed until the parser has tried and failed to resolve an input -error using is usual error recovery strategy. The routine is -only invoked when parsing is unable to continue.

- -

-   %parse_failure {
-     fprintf(stderr,"Giving up.  Parser is hopelessly lost...\n");
-   }
-

- -

The %right directive

- -

This directive is used to assign right-associative precedence to -one or more terminal symbols. See the section on precedence rules -or on the %left directive for additional information.

- -

The %stack_overflow directive

- -

The %stack_overflow directive specifies a block of C code that -is executed if the parser's internal stack ever overflows. Typically -this just prints an error message. After a stack overflow, the parser -will be unable to continue and must be reset.

- -

-   %stack_overflow {
-     fprintf(stderr,"Giving up.  Parser stack overflow\n");
-   }
-

- -

You can help prevent parser stack overflows by avoiding the use -of right recursion and right-precedence operators in your grammar. -Use left recursion and and left-precedence operators instead, to -encourage rules to reduce sooner and keep the stack size down. -For example, do rules like this: -

-   list ::= list element.      // left-recursion.  Good!
-   list ::= .
-
-Not like this: -
-   list ::= element list.      // right-recursion.  Bad!
-   list ::= .
-
- -

The %stack_size directive

- -

If stack overflow is a problem and you can't resolve the trouble -by using left-recursion, then you might want to increase the size -of the parser's stack using this directive. Put an positive integer -after the %stack_size directive and Lemon will generate a parse -with a stack of the requested size. The default value is 100.

- -

-   %stack_size 2000
-

- -

The %start_symbol directive

- -

By default, the start-symbol for the grammar that Lemon generates -is the first non-terminal that appears in the grammar file. But you -can choose a different start-symbol using the %start_symbol directive.

- -

-   %start_symbol  prog
-

- -

The %token_destructor directive

- -

The %destructor directive assigns a destructor to a non-terminal -symbol. (See the description of the %destructor directive above.) -This directive does the same thing for all terminal symbols.

- -

Unlike non-terminal symbols which may each have a different data type -for their values, terminals all use the same data type (defined by -the %token_type directive) and so they use a common destructor. Other -than that, the token destructor works just like the non-terminal -destructors.

- -

The %token_prefix directive

- -

Lemon generates #defines that assign small integer constants -to each terminal symbol in the grammar. If desired, Lemon will -add a prefix specified by this directive -to each of the #defines it generates. -So if the default output of Lemon looked like this: -

-    #define AND              1
-    #define MINUS            2
-    #define OR               3
-    #define PLUS             4
-
-You can insert a statement into the grammar like this: -
-    %token_prefix    TOKEN_
-
-to cause Lemon to produce these symbols instead: -
-    #define TOKEN_AND        1
-    #define TOKEN_MINUS      2
-    #define TOKEN_OR         3
-    #define TOKEN_PLUS       4
-
- -

The %token_type and %type directives

- -

These directives are used to specify the data types for values -on the parser's stack associated with terminal and non-terminal -symbols. The values of all terminal symbols must be of the same -type. This turns out to be the same data type as the 3rd parameter -to the Parse() function generated by Lemon. Typically, you will -make the value of a terminal symbol by a pointer to some kind of -token structure. Like this:

- -

-   %token_type    {Token*}
-

- -

If the data type of terminals is not specified, the default value -is ``int''.

- -

Non-terminal symbols can each have their own data types. Typically -the data type of a non-terminal is a pointer to the root of a parse-tree -structure that contains all information about that non-terminal. -For example:

- -

-   %type   expr  {Expr*}
-

- -

Each entry on the parser's stack is actually a union containing -instances of all data types for every non-terminal and terminal symbol. -Lemon will automatically use the correct element of this union depending -on what the corresponding non-terminal or terminal symbol is. But -the grammar designer should keep in mind that the size of the union -will be the size of its largest element. So if you have a single -non-terminal whose data type requires 1K of storage, then your 100 -entry parser stack will require 100K of heap space. If you are willing -and able to pay that price, fine. You just need to know.

- -

Error Processing

- -

After extensive experimentation over several years, it has been -discovered that the error recovery strategy used by yacc is about -as good as it gets. And so that is what Lemon uses.

- -

When a Lemon-generated parser encounters a syntax error, it -first invokes the code specified by the %syntax_error directive, if -any. It then enters its error recovery strategy. The error recovery -strategy is to begin popping the parsers stack until it enters a -state where it is permitted to shift a special non-terminal symbol -named ``error''. It then shifts this non-terminal and continues -parsing. But the %syntax_error routine will not be called again -until at least three new tokens have been successfully shifted.

- -

If the parser pops its stack until the stack is empty, and it still -is unable to shift the error symbol, then the %parse_failed routine -is invoked and the parser resets itself to its start state, ready -to begin parsing a new file. This is what will happen at the very -first syntax error, of course, if there are no instances of the -``error'' non-terminal in your grammar.

- - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/doc/report1.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/doc/report1.txt --- sqlite3-3.4.2/doc/report1.txt 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/doc/report1.txt 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -An SQLite (version 1.0) database was used in a large military application -where the database contained 105 tables and indices. The following is -a breakdown on the sizes of keys and data within these tables and indices: - -Entries: 967089 -Size: 45896104 -Avg Size: 48 -Key Size: 11112265 -Avg Key Size: 12 -Max Key Size: 99 - - 0..8 263 0% - 9..12 5560 0% - 13..16 71394 7% - 17..24 180717 26% - 25..32 215442 48% - 33..40 151118 64% - 41..48 77479 72% - 49..56 13983 74% - 57..64 14481 75% - 65..80 41342 79% - 81..96 127098 92% - 97..112 38054 96% - 113..128 14197 98% - 129..144 8208 99% - 145..160 3326 99% - 161..176 1242 99% - 177..192 604 99% - 193..208 222 99% - 209..224 213 99% - 225..240 132 99% - 241..256 58 99% - 257..288 515 99% - 289..320 64 99% - 321..352 39 99% - 353..384 44 99% - 385..416 25 99% - 417..448 24 99% - 449..480 26 99% - 481..512 27 99% - 513..1024 470 99% - 1025..2048 396 99% - 2049..4096 187 99% - 4097..8192 78 99% - 8193..16384 35 99% -16385..32768 17 99% -32769..65536 6 99% -65537..65541 3 100% - -If the indices are omitted, the statistics for the 49 tables -become the following: - -Entries: 451103 -Size: 30930282 -Avg Size: 69 -Key Size: 1804412 -Avg Key Size: 4 -Max Key Size: 4 - - 0..24 89 0% - 25..32 9417 2% - 33..40 119162 28% - 41..48 68710 43% - 49..56 9539 45% - 57..64 12435 48% - 65..80 38650 57% - 81..96 126877 85% - 97..112 38030 93% - 113..128 14183 96% - 129..144 7668 98% - 145..160 3302 99% - 161..176 1238 99% - 177..192 597 99% - 193..208 217 99% - 209..224 211 99% - 225..240 130 99% - 241..256 57 99% - 257..288 100 99% - 289..320 62 99% - 321..352 34 99% - 353..384 43 99% - 385..416 24 99% - 417..448 24 99% - 449..480 25 99% - 481..512 27 99% - 513..1024 153 99% - 1025..2048 92 99% - 2049..4096 7 100% - -The 56 indices have these statistics: - -Entries: 512422 -Size: 14879828 -Avg Size: 30 -Key Size: 9253204 -Avg Key Size: 19 -Max Key Size: 99 - - 0..8 246 0% - 9..12 5486 1% - 13..16 70717 14% - 17..24 178246 49% - 25..32 205722 89% - 33..40 31951 96% - 41..48 8768 97% - 49..56 4444 98% - 57..64 2046 99% - 65..80 2691 99% - 81..96 202 99% - 97..112 11 99% - 113..144 527 99% - 145..160 20 99% - 161..288 406 99% - 289..1024 316 99% - 1025..2048 304 99% - 2049..4096 180 99% - 4097..8192 78 99% - 8193..16384 35 99% -16385..32768 17 99% -32769..65536 6 99% -65537..65541 3 100% diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/async/README.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/async/README.txt --- sqlite3-3.4.2/ext/async/README.txt 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/async/README.txt 2009-04-24 10:27:16.000000000 +0100 @@ -0,0 +1,164 @@ + +Normally, when SQLite writes to a database file, it waits until the write +operation is finished before returning control to the calling application. +Since writing to the file-system is usually very slow compared with CPU +bound operations, this can be a performance bottleneck. This directory +contains an extension that causes SQLite to perform all write requests +using a separate thread running in the background. Although this does not +reduce the overall system resources (CPU, disk bandwidth etc.) at all, it +allows SQLite to return control to the caller quickly even when writing to +the database, eliminating the bottleneck. + + 1. Functionality + + 1.1 How it Works + 1.2 Limitations + 1.3 Locking and Concurrency + + 2. Compilation and Usage + + 3. Porting + + + +1. FUNCTIONALITY + + With asynchronous I/O, write requests are handled by a separate thread + running in the background. This means that the thread that initiates + a database write does not have to wait for (sometimes slow) disk I/O + to occur. The write seems to happen very quickly, though in reality + it is happening at its usual slow pace in the background. + + Asynchronous I/O appears to give better responsiveness, but at a price. + You lose the Durable property. With the default I/O backend of SQLite, + once a write completes, you know that the information you wrote is + safely on disk. With the asynchronous I/O, this is not the case. If + your program crashes or if a power loss occurs after the database + write but before the asynchronous write thread has completed, then the + database change might never make it to disk and the next user of the + database might not see your change. + + You lose Durability with asynchronous I/O, but you still retain the + other parts of ACID: Atomic, Consistent, and Isolated. Many + appliations get along fine without the Durablity. + + 1.1 How it Works + + Asynchronous I/O works by creating a special SQLite "vfs" structure + and registering it with sqlite3_vfs_register(). When files opened via + this vfs are written to (using the vfs xWrite() method), the data is not + written directly to disk, but is placed in the "write-queue" to be + handled by the background thread. + + When files opened with the asynchronous vfs are read from + (using the vfs xRead() method), the data is read from the file on + disk and the write-queue, so that from the point of view of + the vfs reader the xWrite() appears to have already completed. + + The special vfs is registered (and unregistered) by calls to the + API functions sqlite3async_initialize() and sqlite3async_shutdown(). + See section "Compilation and Usage" below for details. + + 1.2 Limitations + + In order to gain experience with the main ideas surrounding asynchronous + IO, this implementation is deliberately kept simple. Additional + capabilities may be added in the future. + + For example, as currently implemented, if writes are happening at a + steady stream that exceeds the I/O capability of the background writer + thread, the queue of pending write operations will grow without bound. + If this goes on for long enough, the host system could run out of memory. + A more sophisticated module could to keep track of the quantity of + pending writes and stop accepting new write requests when the queue of + pending writes grows too large. + + 1.3 Locking and Concurrency + + Multiple connections from within a single process that use this + implementation of asynchronous IO may access a single database + file concurrently. From the point of view of the user, if all + connections are from within a single process, there is no difference + between the concurrency offered by "normal" SQLite and SQLite + using the asynchronous backend. + + If file-locking is enabled (it is enabled by default), then connections + from multiple processes may also read and write the database file. + However concurrency is reduced as follows: + + * When a connection using asynchronous IO begins a database + transaction, the database is locked immediately. However the + lock is not released until after all relevant operations + in the write-queue have been flushed to disk. This means + (for example) that the database may remain locked for some + time after a "COMMIT" or "ROLLBACK" is issued. + + * If an application using asynchronous IO executes transactions + in quick succession, other database users may be effectively + locked out of the database. This is because when a BEGIN + is executed, a database lock is established immediately. But + when the corresponding COMMIT or ROLLBACK occurs, the lock + is not released until the relevant part of the write-queue + has been flushed through. As a result, if a COMMIT is followed + by a BEGIN before the write-queue is flushed through, the database + is never unlocked,preventing other processes from accessing + the database. + + File-locking may be disabled at runtime using the sqlite3async_control() + API (see below). This may improve performance when an NFS or other + network file-system, as the synchronous round-trips to the server be + required to establish file locks are avoided. However, if multiple + connections attempt to access the same database file when file-locking + is disabled, application crashes and database corruption is a likely + outcome. + + +2. COMPILATION AND USAGE + + The asynchronous IO extension consists of a single file of C code + (sqlite3async.c), and a header file (sqlite3async.h) that defines the + C API used by applications to activate and control the modules + functionality. + + To use the asynchronous IO extension, compile sqlite3async.c as + part of the application that uses SQLite. Then use the API defined + in sqlite3async.h to initialize and configure the module. + + The asynchronous IO VFS API is described in detail in comments in + sqlite3async.h. Using the API usually consists of the following steps: + + 1. Register the asynchronous IO VFS with SQLite by calling the + sqlite3async_initialize() function. + + 2. Create a background thread to perform write operations and call + sqlite3async_run(). + + 3. Use the normal SQLite API to read and write to databases via + the asynchronous IO VFS. + + Refer to sqlite3async.h for details. + + +3. PORTING + + Currently the asynchronous IO extension is compatible with win32 systems + and systems that support the pthreads interface, including Mac OSX, Linux, + and other varieties of Unix. + + To port the asynchronous IO extension to another platform, the user must + implement mutex and condition variable primitives for the new platform. + Currently there is no externally available interface to allow this, but + modifying the code within sqlite3async.c to include the new platforms + concurrency primitives is relatively easy. Search within sqlite3async.c + for the comment string "PORTING FUNCTIONS" for details. Then implement + new versions of each of the following: + + static void async_mutex_enter(int eMutex); + static void async_mutex_leave(int eMutex); + static void async_cond_wait(int eCond, int eMutex); + static void async_cond_signal(int eCond); + static void async_sched_yield(void); + + The functionality required of each of the above functions is described + in comments in sqlite3async.c. + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/async/sqlite3async.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/async/sqlite3async.c --- sqlite3-3.4.2/ext/async/sqlite3async.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/async/sqlite3async.c 2009-04-30 18:45:34.000000000 +0100 @@ -0,0 +1,1680 @@ +/* +** 2005 December 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: sqlite3async.c,v 1.6 2009/04/30 17:45:34 shane Exp $ +** +** This file contains the implementation of an asynchronous IO backend +** for SQLite. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ASYNCIO) + +#include "sqlite3async.h" +#include "sqlite3.h" +#include +#include +#include + +/* Useful macros used in several places */ +#define MIN(x,y) ((x)<(y)?(x):(y)) +#define MAX(x,y) ((x)>(y)?(x):(y)) + +#ifndef SQLITE_AMALGAMATION +/* Macro to mark parameters as unused and silence compiler warnings. */ +#define UNUSED_PARAMETER(x) (void)(x) +#endif + +/* Forward references */ +typedef struct AsyncWrite AsyncWrite; +typedef struct AsyncFile AsyncFile; +typedef struct AsyncFileData AsyncFileData; +typedef struct AsyncFileLock AsyncFileLock; +typedef struct AsyncLock AsyncLock; + +/* Enable for debugging */ +#ifndef NDEBUG +#include +static int sqlite3async_trace = 0; +# define ASYNC_TRACE(X) if( sqlite3async_trace ) asyncTrace X +static void asyncTrace(const char *zFormat, ...){ + char *z; + va_list ap; + va_start(ap, zFormat); + z = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + fprintf(stderr, "[%d] %s", 0 /* (int)pthread_self() */, z); + sqlite3_free(z); +} +#else +# define ASYNC_TRACE(X) +#endif + +/* +** THREAD SAFETY NOTES +** +** Basic rules: +** +** * Both read and write access to the global write-op queue must be +** protected by the async.queueMutex. As are the async.ioError and +** async.nFile variables. +** +** * The async.pLock list and all AsyncLock and AsyncFileLock +** structures must be protected by the async.lockMutex mutex. +** +** * The file handles from the underlying system are not assumed to +** be thread safe. +** +** * See the last two paragraphs under "The Writer Thread" for +** an assumption to do with file-handle synchronization by the Os. +** +** Deadlock prevention: +** +** There are three mutex used by the system: the "writer" mutex, +** the "queue" mutex and the "lock" mutex. Rules are: +** +** * It is illegal to block on the writer mutex when any other mutex +** are held, and +** +** * It is illegal to block on the queue mutex when the lock mutex +** is held. +** +** i.e. mutex's must be grabbed in the order "writer", "queue", "lock". +** +** File system operations (invoked by SQLite thread): +** +** xOpen +** xDelete +** xFileExists +** +** File handle operations (invoked by SQLite thread): +** +** asyncWrite, asyncClose, asyncTruncate, asyncSync +** +** The operations above add an entry to the global write-op list. They +** prepare the entry, acquire the async.queueMutex momentarily while +** list pointers are manipulated to insert the new entry, then release +** the mutex and signal the writer thread to wake up in case it happens +** to be asleep. +** +** +** asyncRead, asyncFileSize. +** +** Read operations. Both of these read from both the underlying file +** first then adjust their result based on pending writes in the +** write-op queue. So async.queueMutex is held for the duration +** of these operations to prevent other threads from changing the +** queue in mid operation. +** +** +** asyncLock, asyncUnlock, asyncCheckReservedLock +** +** These primitives implement in-process locking using a hash table +** on the file name. Files are locked correctly for connections coming +** from the same process. But other processes cannot see these locks +** and will therefore not honor them. +** +** +** The writer thread: +** +** The async.writerMutex is used to make sure only there is only +** a single writer thread running at a time. +** +** Inside the writer thread is a loop that works like this: +** +** WHILE (write-op list is not empty) +** Do IO operation at head of write-op list +** Remove entry from head of write-op list +** END WHILE +** +** The async.queueMutex is always held during the test, and when the entry is removed from the head +** of the write-op list. Sometimes it is held for the interim +** period (while the IO is performed), and sometimes it is +** relinquished. It is relinquished if (a) the IO op is an +** ASYNC_CLOSE or (b) when the file handle was opened, two of +** the underlying systems handles were opened on the same +** file-system entry. +** +** If condition (b) above is true, then one file-handle +** (AsyncFile.pBaseRead) is used exclusively by sqlite threads to read the +** file, the other (AsyncFile.pBaseWrite) by sqlite3_async_flush() +** threads to perform write() operations. This means that read +** operations are not blocked by asynchronous writes (although +** asynchronous writes may still be blocked by reads). +** +** This assumes that the OS keeps two handles open on the same file +** properly in sync. That is, any read operation that starts after a +** write operation on the same file system entry has completed returns +** data consistent with the write. We also assume that if one thread +** reads a file while another is writing it all bytes other than the +** ones actually being written contain valid data. +** +** If the above assumptions are not true, set the preprocessor symbol +** SQLITE_ASYNC_TWO_FILEHANDLES to 0. +*/ + + +#ifndef NDEBUG +# define TESTONLY( X ) X +#else +# define TESTONLY( X ) +#endif + +/* +** PORTING FUNCTIONS +** +** There are two definitions of the following functions. One for pthreads +** compatible systems and one for Win32. These functions isolate the OS +** specific code required by each platform. +** +** The system uses three mutexes and a single condition variable. To +** block on a mutex, async_mutex_enter() is called. The parameter passed +** to async_mutex_enter(), which must be one of ASYNC_MUTEX_LOCK, +** ASYNC_MUTEX_QUEUE or ASYNC_MUTEX_WRITER, identifies which of the three +** mutexes to lock. Similarly, to unlock a mutex, async_mutex_leave() is +** called with a parameter identifying the mutex being unlocked. Mutexes +** are not recursive - it is an error to call async_mutex_enter() to +** lock a mutex that is already locked, or to call async_mutex_leave() +** to unlock a mutex that is not currently locked. +** +** The async_cond_wait() and async_cond_signal() functions are modelled +** on the pthreads functions with similar names. The first parameter to +** both functions is always ASYNC_COND_QUEUE. When async_cond_wait() +** is called the mutex identified by the second parameter must be held. +** The mutex is unlocked, and the calling thread simultaneously begins +** waiting for the condition variable to be signalled by another thread. +** After another thread signals the condition variable, the calling +** thread stops waiting, locks mutex eMutex and returns. The +** async_cond_signal() function is used to signal the condition variable. +** It is assumed that the mutex used by the thread calling async_cond_wait() +** is held by the caller of async_cond_signal() (otherwise there would be +** a race condition). +** +** It is guaranteed that no other thread will call async_cond_wait() when +** there is already a thread waiting on the condition variable. +** +** The async_sched_yield() function is called to suggest to the operating +** system that it would be a good time to shift the current thread off the +** CPU. The system will still work if this function is not implemented +** (it is not currently implemented for win32), but it might be marginally +** more efficient if it is. +*/ +static void async_mutex_enter(int eMutex); +static void async_mutex_leave(int eMutex); +static void async_cond_wait(int eCond, int eMutex); +static void async_cond_signal(int eCond); +static void async_sched_yield(void); + +/* +** There are also two definitions of the following. async_os_initialize() +** is called when the asynchronous VFS is first installed, and os_shutdown() +** is called when it is uninstalled (from within sqlite3async_shutdown()). +** +** For pthreads builds, both of these functions are no-ops. For win32, +** they provide an opportunity to initialize and finalize the required +** mutex and condition variables. +** +** If async_os_initialize() returns other than zero, then the initialization +** fails and SQLITE_ERROR is returned to the user. +*/ +static int async_os_initialize(void); +static void async_os_shutdown(void); + +/* Values for use as the 'eMutex' argument of the above functions. The +** integer values assigned to these constants are important for assert() +** statements that verify that mutexes are locked in the correct order. +** Specifically, it is unsafe to try to lock mutex N while holding a lock +** on mutex M if (M<=N). +*/ +#define ASYNC_MUTEX_LOCK 0 +#define ASYNC_MUTEX_QUEUE 1 +#define ASYNC_MUTEX_WRITER 2 + +/* Values for use as the 'eCond' argument of the above functions. */ +#define ASYNC_COND_QUEUE 0 + +/************************************************************************* +** Start of OS specific code. +*/ +#if SQLITE_OS_WIN || defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__) + +#include + +/* The following block contains the win32 specific code. */ + +#define mutex_held(X) (GetCurrentThreadId()==primitives.aHolder[X]) + +static struct AsyncPrimitives { + int isInit; + DWORD aHolder[3]; + CRITICAL_SECTION aMutex[3]; + HANDLE aCond[1]; +} primitives = { 0 }; + +static int async_os_initialize(void){ + if( !primitives.isInit ){ + primitives.aCond[0] = CreateEvent(NULL, TRUE, FALSE, 0); + if( primitives.aCond[0]==NULL ){ + return 1; + } + InitializeCriticalSection(&primitives.aMutex[0]); + InitializeCriticalSection(&primitives.aMutex[1]); + InitializeCriticalSection(&primitives.aMutex[2]); + primitives.isInit = 1; + } + return 0; +} +static void async_os_shutdown(void){ + if( primitives.isInit ){ + DeleteCriticalSection(&primitives.aMutex[0]); + DeleteCriticalSection(&primitives.aMutex[1]); + DeleteCriticalSection(&primitives.aMutex[2]); + CloseHandle(primitives.aCond[0]); + primitives.isInit = 0; + } +} + +/* The following block contains the Win32 specific code. */ +static void async_mutex_enter(int eMutex){ + assert( eMutex==0 || eMutex==1 || eMutex==2 ); + assert( eMutex!=2 || (!mutex_held(0) && !mutex_held(1) && !mutex_held(2)) ); + assert( eMutex!=1 || (!mutex_held(0) && !mutex_held(1)) ); + assert( eMutex!=0 || (!mutex_held(0)) ); + EnterCriticalSection(&primitives.aMutex[eMutex]); + TESTONLY( primitives.aHolder[eMutex] = GetCurrentThreadId(); ) +} +static void async_mutex_leave(int eMutex){ + assert( eMutex==0 || eMutex==1 || eMutex==2 ); + assert( mutex_held(eMutex) ); + TESTONLY( primitives.aHolder[eMutex] = 0; ) + LeaveCriticalSection(&primitives.aMutex[eMutex]); +} +static void async_cond_wait(int eCond, int eMutex){ + ResetEvent(primitives.aCond[eCond]); + async_mutex_leave(eMutex); + WaitForSingleObject(primitives.aCond[eCond], INFINITE); + async_mutex_enter(eMutex); +} +static void async_cond_signal(int eCond){ + assert( mutex_held(ASYNC_MUTEX_QUEUE) ); + SetEvent(primitives.aCond[eCond]); +} +static void async_sched_yield(void){ + Sleep(0); +} +#else + +/* The following block contains the pthreads specific code. */ +#include +#include + +#define mutex_held(X) pthread_equal(primitives.aHolder[X], pthread_self()) + +static int async_os_initialize(void) {return 0;} +static void async_os_shutdown(void) {} + +static struct AsyncPrimitives { + pthread_mutex_t aMutex[3]; + pthread_cond_t aCond[1]; + pthread_t aHolder[3]; +} primitives = { + { PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER + } , { + PTHREAD_COND_INITIALIZER + } , { 0, 0, 0 } +}; + +static void async_mutex_enter(int eMutex){ + assert( eMutex==0 || eMutex==1 || eMutex==2 ); + assert( eMutex!=2 || (!mutex_held(0) && !mutex_held(1) && !mutex_held(2)) ); + assert( eMutex!=1 || (!mutex_held(0) && !mutex_held(1)) ); + assert( eMutex!=0 || (!mutex_held(0)) ); + pthread_mutex_lock(&primitives.aMutex[eMutex]); + TESTONLY( primitives.aHolder[eMutex] = pthread_self(); ) +} +static void async_mutex_leave(int eMutex){ + assert( eMutex==0 || eMutex==1 || eMutex==2 ); + assert( mutex_held(eMutex) ); + TESTONLY( primitives.aHolder[eMutex] = 0; ) + pthread_mutex_unlock(&primitives.aMutex[eMutex]); +} +static void async_cond_wait(int eCond, int eMutex){ + assert( eMutex==0 || eMutex==1 || eMutex==2 ); + assert( mutex_held(eMutex) ); + TESTONLY( primitives.aHolder[eMutex] = 0; ) + pthread_cond_wait(&primitives.aCond[eCond], &primitives.aMutex[eMutex]); + TESTONLY( primitives.aHolder[eMutex] = pthread_self(); ) +} +static void async_cond_signal(int eCond){ + assert( mutex_held(ASYNC_MUTEX_QUEUE) ); + pthread_cond_signal(&primitives.aCond[eCond]); +} +static void async_sched_yield(void){ + sched_yield(); +} +#endif +/* +** End of OS specific code. +*************************************************************************/ + +#define assert_mutex_is_held(X) assert( mutex_held(X) ) + + +#ifndef SQLITE_ASYNC_TWO_FILEHANDLES +/* #define SQLITE_ASYNC_TWO_FILEHANDLES 0 */ +#define SQLITE_ASYNC_TWO_FILEHANDLES 1 +#endif + +/* +** State information is held in the static variable "async" defined +** as the following structure. +** +** Both async.ioError and async.nFile are protected by async.queueMutex. +*/ +static struct TestAsyncStaticData { + AsyncWrite *pQueueFirst; /* Next write operation to be processed */ + AsyncWrite *pQueueLast; /* Last write operation on the list */ + AsyncLock *pLock; /* Linked list of all AsyncLock structures */ + volatile int ioDelay; /* Extra delay between write operations */ + volatile int eHalt; /* One of the SQLITEASYNC_HALT_XXX values */ + volatile int bLockFiles; /* Current value of "lockfiles" parameter */ + int ioError; /* True if an IO error has occurred */ + int nFile; /* Number of open files (from sqlite pov) */ +} async = { 0,0,0,0,0,1,0,0 }; + +/* Possible values of AsyncWrite.op */ +#define ASYNC_NOOP 0 +#define ASYNC_WRITE 1 +#define ASYNC_SYNC 2 +#define ASYNC_TRUNCATE 3 +#define ASYNC_CLOSE 4 +#define ASYNC_DELETE 5 +#define ASYNC_OPENEXCLUSIVE 6 +#define ASYNC_UNLOCK 7 + +/* Names of opcodes. Used for debugging only. +** Make sure these stay in sync with the macros above! +*/ +static const char *azOpcodeName[] = { + "NOOP", "WRITE", "SYNC", "TRUNCATE", "CLOSE", "DELETE", "OPENEX", "UNLOCK" +}; + +/* +** Entries on the write-op queue are instances of the AsyncWrite +** structure, defined here. +** +** The interpretation of the iOffset and nByte variables varies depending +** on the value of AsyncWrite.op: +** +** ASYNC_NOOP: +** No values used. +** +** ASYNC_WRITE: +** iOffset -> Offset in file to write to. +** nByte -> Number of bytes of data to write (pointed to by zBuf). +** +** ASYNC_SYNC: +** nByte -> flags to pass to sqlite3OsSync(). +** +** ASYNC_TRUNCATE: +** iOffset -> Size to truncate file to. +** nByte -> Unused. +** +** ASYNC_CLOSE: +** iOffset -> Unused. +** nByte -> Unused. +** +** ASYNC_DELETE: +** iOffset -> Contains the "syncDir" flag. +** nByte -> Number of bytes of zBuf points to (file name). +** +** ASYNC_OPENEXCLUSIVE: +** iOffset -> Value of "delflag". +** nByte -> Number of bytes of zBuf points to (file name). +** +** ASYNC_UNLOCK: +** nByte -> Argument to sqlite3OsUnlock(). +** +** +** For an ASYNC_WRITE operation, zBuf points to the data to write to the file. +** This space is sqlite3_malloc()d along with the AsyncWrite structure in a +** single blob, so is deleted when sqlite3_free() is called on the parent +** structure. +*/ +struct AsyncWrite { + AsyncFileData *pFileData; /* File to write data to or sync */ + int op; /* One of ASYNC_xxx etc. */ + sqlite_int64 iOffset; /* See above */ + int nByte; /* See above */ + char *zBuf; /* Data to write to file (or NULL if op!=ASYNC_WRITE) */ + AsyncWrite *pNext; /* Next write operation (to any file) */ +}; + +/* +** An instance of this structure is created for each distinct open file +** (i.e. if two handles are opened on the one file, only one of these +** structures is allocated) and stored in the async.aLock hash table. The +** keys for async.aLock are the full pathnames of the opened files. +** +** AsyncLock.pList points to the head of a linked list of AsyncFileLock +** structures, one for each handle currently open on the file. +** +** If the opened file is not a main-database (the SQLITE_OPEN_MAIN_DB is +** not passed to the sqlite3OsOpen() call), or if async.bLockFiles is +** false, variables AsyncLock.pFile and AsyncLock.eLock are never used. +** Otherwise, pFile is a file handle opened on the file in question and +** used to obtain the file-system locks required by database connections +** within this process. +** +** See comments above the asyncLock() function for more details on +** the implementation of database locking used by this backend. +*/ +struct AsyncLock { + char *zFile; + int nFile; + sqlite3_file *pFile; + int eLock; + AsyncFileLock *pList; + AsyncLock *pNext; /* Next in linked list headed by async.pLock */ +}; + +/* +** An instance of the following structure is allocated along with each +** AsyncFileData structure (see AsyncFileData.lock), but is only used if the +** file was opened with the SQLITE_OPEN_MAIN_DB. +*/ +struct AsyncFileLock { + int eLock; /* Internally visible lock state (sqlite pov) */ + int eAsyncLock; /* Lock-state with write-queue unlock */ + AsyncFileLock *pNext; +}; + +/* +** The AsyncFile structure is a subclass of sqlite3_file used for +** asynchronous IO. +** +** All of the actual data for the structure is stored in the structure +** pointed to by AsyncFile.pData, which is allocated as part of the +** sqlite3OsOpen() using sqlite3_malloc(). The reason for this is that the +** lifetime of the AsyncFile structure is ended by the caller after OsClose() +** is called, but the data in AsyncFileData may be required by the +** writer thread after that point. +*/ +struct AsyncFile { + sqlite3_io_methods *pMethod; + AsyncFileData *pData; +}; +struct AsyncFileData { + char *zName; /* Underlying OS filename - used for debugging */ + int nName; /* Number of characters in zName */ + sqlite3_file *pBaseRead; /* Read handle to the underlying Os file */ + sqlite3_file *pBaseWrite; /* Write handle to the underlying Os file */ + AsyncFileLock lock; /* Lock state for this handle */ + AsyncLock *pLock; /* AsyncLock object for this file system entry */ + AsyncWrite closeOp; /* Preallocated close operation */ +}; + +/* +** Add an entry to the end of the global write-op list. pWrite should point +** to an AsyncWrite structure allocated using sqlite3_malloc(). The writer +** thread will call sqlite3_free() to free the structure after the specified +** operation has been completed. +** +** Once an AsyncWrite structure has been added to the list, it becomes the +** property of the writer thread and must not be read or modified by the +** caller. +*/ +static void addAsyncWrite(AsyncWrite *pWrite){ + /* We must hold the queue mutex in order to modify the queue pointers */ + if( pWrite->op!=ASYNC_UNLOCK ){ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + } + + /* Add the record to the end of the write-op queue */ + assert( !pWrite->pNext ); + if( async.pQueueLast ){ + assert( async.pQueueFirst ); + async.pQueueLast->pNext = pWrite; + }else{ + async.pQueueFirst = pWrite; + } + async.pQueueLast = pWrite; + ASYNC_TRACE(("PUSH %p (%s %s %d)\n", pWrite, azOpcodeName[pWrite->op], + pWrite->pFileData ? pWrite->pFileData->zName : "-", pWrite->iOffset)); + + if( pWrite->op==ASYNC_CLOSE ){ + async.nFile--; + } + + /* The writer thread might have been idle because there was nothing + ** on the write-op queue for it to do. So wake it up. */ + async_cond_signal(ASYNC_COND_QUEUE); + + /* Drop the queue mutex */ + if( pWrite->op!=ASYNC_UNLOCK ){ + async_mutex_leave(ASYNC_MUTEX_QUEUE); + } +} + +/* +** Increment async.nFile in a thread-safe manner. +*/ +static void incrOpenFileCount(void){ + /* We must hold the queue mutex in order to modify async.nFile */ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + if( async.nFile==0 ){ + async.ioError = SQLITE_OK; + } + async.nFile++; + async_mutex_leave(ASYNC_MUTEX_QUEUE); +} + +/* +** This is a utility function to allocate and populate a new AsyncWrite +** structure and insert it (via addAsyncWrite() ) into the global list. +*/ +static int addNewAsyncWrite( + AsyncFileData *pFileData, + int op, + sqlite3_int64 iOffset, + int nByte, + const char *zByte +){ + AsyncWrite *p; + if( op!=ASYNC_CLOSE && async.ioError ){ + return async.ioError; + } + p = sqlite3_malloc(sizeof(AsyncWrite) + (zByte?nByte:0)); + if( !p ){ + /* The upper layer does not expect operations like OsWrite() to + ** return SQLITE_NOMEM. This is partly because under normal conditions + ** SQLite is required to do rollback without calling malloc(). So + ** if malloc() fails here, treat it as an I/O error. The above + ** layer knows how to handle that. + */ + return SQLITE_IOERR; + } + p->op = op; + p->iOffset = iOffset; + p->nByte = nByte; + p->pFileData = pFileData; + p->pNext = 0; + if( zByte ){ + p->zBuf = (char *)&p[1]; + memcpy(p->zBuf, zByte, nByte); + }else{ + p->zBuf = 0; + } + addAsyncWrite(p); + return SQLITE_OK; +} + +/* +** Close the file. This just adds an entry to the write-op list, the file is +** not actually closed. +*/ +static int asyncClose(sqlite3_file *pFile){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + + /* Unlock the file, if it is locked */ + async_mutex_enter(ASYNC_MUTEX_LOCK); + p->lock.eLock = 0; + async_mutex_leave(ASYNC_MUTEX_LOCK); + + addAsyncWrite(&p->closeOp); + return SQLITE_OK; +} + +/* +** Implementation of sqlite3OsWrite() for asynchronous files. Instead of +** writing to the underlying file, this function adds an entry to the end of +** the global AsyncWrite list. Either SQLITE_OK or SQLITE_NOMEM may be +** returned. +*/ +static int asyncWrite( + sqlite3_file *pFile, + const void *pBuf, + int amt, + sqlite3_int64 iOff +){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + return addNewAsyncWrite(p, ASYNC_WRITE, iOff, amt, pBuf); +} + +/* +** Read data from the file. First we read from the filesystem, then adjust +** the contents of the buffer based on ASYNC_WRITE operations in the +** write-op queue. +** +** This method holds the mutex from start to finish. +*/ +static int asyncRead( + sqlite3_file *pFile, + void *zOut, + int iAmt, + sqlite3_int64 iOffset +){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + int rc = SQLITE_OK; + sqlite3_int64 filesize; + int nRead; + sqlite3_file *pBase = p->pBaseRead; + + /* Grab the write queue mutex for the duration of the call */ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + + /* If an I/O error has previously occurred in this virtual file + ** system, then all subsequent operations fail. + */ + if( async.ioError!=SQLITE_OK ){ + rc = async.ioError; + goto asyncread_out; + } + + if( pBase->pMethods ){ + rc = pBase->pMethods->xFileSize(pBase, &filesize); + if( rc!=SQLITE_OK ){ + goto asyncread_out; + } + nRead = (int)MIN(filesize - iOffset, iAmt); + if( nRead>0 ){ + rc = pBase->pMethods->xRead(pBase, zOut, nRead, iOffset); + ASYNC_TRACE(("READ %s %d bytes at %d\n", p->zName, nRead, iOffset)); + } + } + + if( rc==SQLITE_OK ){ + AsyncWrite *pWrite; + char *zName = p->zName; + + for(pWrite=async.pQueueFirst; pWrite; pWrite = pWrite->pNext){ + if( pWrite->op==ASYNC_WRITE && ( + (pWrite->pFileData==p) || + (zName && pWrite->pFileData->zName==zName) + )){ + sqlite3_int64 iBeginOut = (pWrite->iOffset-iOffset); + sqlite3_int64 iBeginIn = -iBeginOut; + int nCopy; + + if( iBeginIn<0 ) iBeginIn = 0; + if( iBeginOut<0 ) iBeginOut = 0; + nCopy = (int)MIN(pWrite->nByte-iBeginIn, iAmt-iBeginOut); + + if( nCopy>0 ){ + memcpy(&((char *)zOut)[iBeginOut], &pWrite->zBuf[iBeginIn], nCopy); + ASYNC_TRACE(("OVERREAD %d bytes at %d\n", nCopy, iBeginOut+iOffset)); + } + } + } + } + +asyncread_out: + async_mutex_leave(ASYNC_MUTEX_QUEUE); + return rc; +} + +/* +** Truncate the file to nByte bytes in length. This just adds an entry to +** the write-op list, no IO actually takes place. +*/ +static int asyncTruncate(sqlite3_file *pFile, sqlite3_int64 nByte){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + return addNewAsyncWrite(p, ASYNC_TRUNCATE, nByte, 0, 0); +} + +/* +** Sync the file. This just adds an entry to the write-op list, the +** sync() is done later by sqlite3_async_flush(). +*/ +static int asyncSync(sqlite3_file *pFile, int flags){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + return addNewAsyncWrite(p, ASYNC_SYNC, 0, flags, 0); +} + +/* +** Read the size of the file. First we read the size of the file system +** entry, then adjust for any ASYNC_WRITE or ASYNC_TRUNCATE operations +** currently in the write-op list. +** +** This method holds the mutex from start to finish. +*/ +int asyncFileSize(sqlite3_file *pFile, sqlite3_int64 *piSize){ + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + int rc = SQLITE_OK; + sqlite3_int64 s = 0; + sqlite3_file *pBase; + + async_mutex_enter(ASYNC_MUTEX_QUEUE); + + /* Read the filesystem size from the base file. If pMethods is NULL, this + ** means the file hasn't been opened yet. In this case all relevant data + ** must be in the write-op queue anyway, so we can omit reading from the + ** file-system. + */ + pBase = p->pBaseRead; + if( pBase->pMethods ){ + rc = pBase->pMethods->xFileSize(pBase, &s); + } + + if( rc==SQLITE_OK ){ + AsyncWrite *pWrite; + for(pWrite=async.pQueueFirst; pWrite; pWrite = pWrite->pNext){ + if( pWrite->op==ASYNC_DELETE + && p->zName + && strcmp(p->zName, pWrite->zBuf)==0 + ){ + s = 0; + }else if( pWrite->pFileData && ( + (pWrite->pFileData==p) + || (p->zName && pWrite->pFileData->zName==p->zName) + )){ + switch( pWrite->op ){ + case ASYNC_WRITE: + s = MAX(pWrite->iOffset + (sqlite3_int64)(pWrite->nByte), s); + break; + case ASYNC_TRUNCATE: + s = MIN(s, pWrite->iOffset); + break; + } + } + } + *piSize = s; + } + async_mutex_leave(ASYNC_MUTEX_QUEUE); + return rc; +} + +/* +** Lock or unlock the actual file-system entry. +*/ +static int getFileLock(AsyncLock *pLock){ + int rc = SQLITE_OK; + AsyncFileLock *pIter; + int eRequired = 0; + + if( pLock->pFile ){ + for(pIter=pLock->pList; pIter; pIter=pIter->pNext){ + assert(pIter->eAsyncLock>=pIter->eLock); + if( pIter->eAsyncLock>eRequired ){ + eRequired = pIter->eAsyncLock; + assert(eRequired>=0 && eRequired<=SQLITE_LOCK_EXCLUSIVE); + } + } + + if( eRequired>pLock->eLock ){ + rc = pLock->pFile->pMethods->xLock(pLock->pFile, eRequired); + if( rc==SQLITE_OK ){ + pLock->eLock = eRequired; + } + } + else if( eRequiredeLock && eRequired<=SQLITE_LOCK_SHARED ){ + rc = pLock->pFile->pMethods->xUnlock(pLock->pFile, eRequired); + if( rc==SQLITE_OK ){ + pLock->eLock = eRequired; + } + } + } + + return rc; +} + +/* +** Return the AsyncLock structure from the global async.pLock list +** associated with the file-system entry identified by path zName +** (a string of nName bytes). If no such structure exists, return 0. +*/ +static AsyncLock *findLock(const char *zName, int nName){ + AsyncLock *p = async.pLock; + while( p && (p->nFile!=nName || memcmp(p->zFile, zName, nName)) ){ + p = p->pNext; + } + return p; +} + +/* +** The following two methods - asyncLock() and asyncUnlock() - are used +** to obtain and release locks on database files opened with the +** asynchronous backend. +*/ +static int asyncLock(sqlite3_file *pFile, int eLock){ + int rc = SQLITE_OK; + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + + if( p->zName ){ + async_mutex_enter(ASYNC_MUTEX_LOCK); + if( p->lock.eLockpLock; + AsyncFileLock *pIter; + assert(pLock && pLock->pList); + for(pIter=pLock->pList; pIter; pIter=pIter->pNext){ + if( pIter!=&p->lock && ( + (eLock==SQLITE_LOCK_EXCLUSIVE && pIter->eLock>=SQLITE_LOCK_SHARED) || + (eLock==SQLITE_LOCK_PENDING && pIter->eLock>=SQLITE_LOCK_RESERVED) || + (eLock==SQLITE_LOCK_RESERVED && pIter->eLock>=SQLITE_LOCK_RESERVED) || + (eLock==SQLITE_LOCK_SHARED && pIter->eLock>=SQLITE_LOCK_PENDING) + )){ + rc = SQLITE_BUSY; + } + } + if( rc==SQLITE_OK ){ + p->lock.eLock = eLock; + p->lock.eAsyncLock = MAX(p->lock.eAsyncLock, eLock); + } + assert(p->lock.eAsyncLock>=p->lock.eLock); + if( rc==SQLITE_OK ){ + rc = getFileLock(pLock); + } + } + async_mutex_leave(ASYNC_MUTEX_LOCK); + } + + ASYNC_TRACE(("LOCK %d (%s) rc=%d\n", eLock, p->zName, rc)); + return rc; +} +static int asyncUnlock(sqlite3_file *pFile, int eLock){ + int rc = SQLITE_OK; + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + if( p->zName ){ + AsyncFileLock *pLock = &p->lock; + async_mutex_enter(ASYNC_MUTEX_QUEUE); + async_mutex_enter(ASYNC_MUTEX_LOCK); + pLock->eLock = MIN(pLock->eLock, eLock); + rc = addNewAsyncWrite(p, ASYNC_UNLOCK, 0, eLock, 0); + async_mutex_leave(ASYNC_MUTEX_LOCK); + async_mutex_leave(ASYNC_MUTEX_QUEUE); + } + return rc; +} + +/* +** This function is called when the pager layer first opens a database file +** and is checking for a hot-journal. +*/ +static int asyncCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + int ret = 0; + AsyncFileLock *pIter; + AsyncFileData *p = ((AsyncFile *)pFile)->pData; + + async_mutex_enter(ASYNC_MUTEX_LOCK); + for(pIter=p->pLock->pList; pIter; pIter=pIter->pNext){ + if( pIter->eLock>=SQLITE_LOCK_RESERVED ){ + ret = 1; + break; + } + } + async_mutex_leave(ASYNC_MUTEX_LOCK); + + ASYNC_TRACE(("CHECK-LOCK %d (%s)\n", ret, p->zName)); + *pResOut = ret; + return SQLITE_OK; +} + +/* +** sqlite3_file_control() implementation. +*/ +static int asyncFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + async_mutex_enter(ASYNC_MUTEX_LOCK); + *(int*)pArg = ((AsyncFile*)id)->pData->lock.eLock; + async_mutex_leave(ASYNC_MUTEX_LOCK); + return SQLITE_OK; + } + } + return SQLITE_ERROR; +} + +/* +** Return the device characteristics and sector-size of the device. It +** is tricky to implement these correctly, as this backend might +** not have an open file handle at this point. +*/ +static int asyncSectorSize(sqlite3_file *pFile){ + UNUSED_PARAMETER(pFile); + return 512; +} +static int asyncDeviceCharacteristics(sqlite3_file *pFile){ + UNUSED_PARAMETER(pFile); + return 0; +} + +static int unlinkAsyncFile(AsyncFileData *pData){ + AsyncFileLock **ppIter; + int rc = SQLITE_OK; + + if( pData->zName ){ + AsyncLock *pLock = pData->pLock; + for(ppIter=&pLock->pList; *ppIter; ppIter=&((*ppIter)->pNext)){ + if( (*ppIter)==&pData->lock ){ + *ppIter = pData->lock.pNext; + break; + } + } + if( !pLock->pList ){ + AsyncLock **pp; + if( pLock->pFile ){ + pLock->pFile->pMethods->xClose(pLock->pFile); + } + for(pp=&async.pLock; *pp!=pLock; pp=&((*pp)->pNext)); + *pp = pLock->pNext; + sqlite3_free(pLock); + }else{ + rc = getFileLock(pLock); + } + } + + return rc; +} + +/* +** The parameter passed to this function is a copy of a 'flags' parameter +** passed to this modules xOpen() method. This function returns true +** if the file should be opened asynchronously, or false if it should +** be opened immediately. +** +** If the file is to be opened asynchronously, then asyncOpen() will add +** an entry to the event queue and the file will not actually be opened +** until the event is processed. Otherwise, the file is opened directly +** by the caller. +*/ +static int doAsynchronousOpen(int flags){ + return (flags&SQLITE_OPEN_CREATE) && ( + (flags&SQLITE_OPEN_MAIN_JOURNAL) || + (flags&SQLITE_OPEN_TEMP_JOURNAL) || + (flags&SQLITE_OPEN_DELETEONCLOSE) + ); +} + +/* +** Open a file. +*/ +static int asyncOpen( + sqlite3_vfs *pAsyncVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + static sqlite3_io_methods async_methods = { + 1, /* iVersion */ + asyncClose, /* xClose */ + asyncRead, /* xRead */ + asyncWrite, /* xWrite */ + asyncTruncate, /* xTruncate */ + asyncSync, /* xSync */ + asyncFileSize, /* xFileSize */ + asyncLock, /* xLock */ + asyncUnlock, /* xUnlock */ + asyncCheckReservedLock, /* xCheckReservedLock */ + asyncFileControl, /* xFileControl */ + asyncSectorSize, /* xSectorSize */ + asyncDeviceCharacteristics /* xDeviceCharacteristics */ + }; + + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + AsyncFile *p = (AsyncFile *)pFile; + int nName = 0; + int rc = SQLITE_OK; + int nByte; + AsyncFileData *pData; + AsyncLock *pLock = 0; + char *z; + int isAsyncOpen = doAsynchronousOpen(flags); + + /* If zName is NULL, then the upper layer is requesting an anonymous file */ + if( zName ){ + nName = (int)strlen(zName)+1; + } + + nByte = ( + sizeof(AsyncFileData) + /* AsyncFileData structure */ + 2 * pVfs->szOsFile + /* AsyncFileData.pBaseRead and pBaseWrite */ + nName /* AsyncFileData.zName */ + ); + z = sqlite3_malloc(nByte); + if( !z ){ + return SQLITE_NOMEM; + } + memset(z, 0, nByte); + pData = (AsyncFileData*)z; + z += sizeof(pData[0]); + pData->pBaseRead = (sqlite3_file*)z; + z += pVfs->szOsFile; + pData->pBaseWrite = (sqlite3_file*)z; + pData->closeOp.pFileData = pData; + pData->closeOp.op = ASYNC_CLOSE; + + if( zName ){ + z += pVfs->szOsFile; + pData->zName = z; + pData->nName = nName; + memcpy(pData->zName, zName, nName); + } + + if( !isAsyncOpen ){ + int flagsout; + rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseRead, flags, &flagsout); + if( rc==SQLITE_OK && (flagsout&SQLITE_OPEN_READWRITE) ){ + rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseWrite, flags, 0); + } + if( pOutFlags ){ + *pOutFlags = flagsout; + } + } + + async_mutex_enter(ASYNC_MUTEX_LOCK); + + if( zName && rc==SQLITE_OK ){ + pLock = findLock(pData->zName, pData->nName); + if( !pLock ){ + int nByte = pVfs->szOsFile + sizeof(AsyncLock) + pData->nName + 1; + pLock = (AsyncLock *)sqlite3_malloc(nByte); + if( pLock ){ + memset(pLock, 0, nByte); + if( async.bLockFiles && (flags&SQLITE_OPEN_MAIN_DB) ){ + pLock->pFile = (sqlite3_file *)&pLock[1]; + rc = pVfs->xOpen(pVfs, pData->zName, pLock->pFile, flags, 0); + if( rc!=SQLITE_OK ){ + sqlite3_free(pLock); + pLock = 0; + } + } + if( pLock ){ + pLock->nFile = pData->nName; + pLock->zFile = &((char *)(&pLock[1]))[pVfs->szOsFile]; + memcpy(pLock->zFile, pData->zName, pLock->nFile); + pLock->pNext = async.pLock; + async.pLock = pLock; + } + }else{ + rc = SQLITE_NOMEM; + } + } + } + + if( rc==SQLITE_OK ){ + p->pMethod = &async_methods; + p->pData = pData; + + /* Link AsyncFileData.lock into the linked list of + ** AsyncFileLock structures for this file. + */ + if( zName ){ + pData->lock.pNext = pLock->pList; + pLock->pList = &pData->lock; + pData->zName = pLock->zFile; + } + }else{ + if( pData->pBaseRead->pMethods ){ + pData->pBaseRead->pMethods->xClose(pData->pBaseRead); + } + if( pData->pBaseWrite->pMethods ){ + pData->pBaseWrite->pMethods->xClose(pData->pBaseWrite); + } + sqlite3_free(pData); + } + + async_mutex_leave(ASYNC_MUTEX_LOCK); + + if( rc==SQLITE_OK ){ + incrOpenFileCount(); + pData->pLock = pLock; + } + + if( rc==SQLITE_OK && isAsyncOpen ){ + rc = addNewAsyncWrite(pData, ASYNC_OPENEXCLUSIVE, (sqlite3_int64)flags,0,0); + if( rc==SQLITE_OK ){ + if( pOutFlags ) *pOutFlags = flags; + }else{ + async_mutex_enter(ASYNC_MUTEX_LOCK); + unlinkAsyncFile(pData); + async_mutex_leave(ASYNC_MUTEX_LOCK); + sqlite3_free(pData); + } + } + if( rc!=SQLITE_OK ){ + p->pMethod = 0; + } + return rc; +} + +/* +** Implementation of sqlite3OsDelete. Add an entry to the end of the +** write-op queue to perform the delete. +*/ +static int asyncDelete(sqlite3_vfs *pAsyncVfs, const char *z, int syncDir){ + UNUSED_PARAMETER(pAsyncVfs); + return addNewAsyncWrite(0, ASYNC_DELETE, syncDir, (int)strlen(z)+1, z); +} + +/* +** Implementation of sqlite3OsAccess. This method holds the mutex from +** start to finish. +*/ +static int asyncAccess( + sqlite3_vfs *pAsyncVfs, + const char *zName, + int flags, + int *pResOut +){ + int rc; + int ret; + AsyncWrite *p; + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + + assert(flags==SQLITE_ACCESS_READWRITE + || flags==SQLITE_ACCESS_READ + || flags==SQLITE_ACCESS_EXISTS + ); + + async_mutex_enter(ASYNC_MUTEX_QUEUE); + rc = pVfs->xAccess(pVfs, zName, flags, &ret); + if( rc==SQLITE_OK && flags==SQLITE_ACCESS_EXISTS ){ + for(p=async.pQueueFirst; p; p = p->pNext){ + if( p->op==ASYNC_DELETE && 0==strcmp(p->zBuf, zName) ){ + ret = 0; + }else if( p->op==ASYNC_OPENEXCLUSIVE + && p->pFileData->zName + && 0==strcmp(p->pFileData->zName, zName) + ){ + ret = 1; + } + } + } + ASYNC_TRACE(("ACCESS(%s): %s = %d\n", + flags==SQLITE_ACCESS_READWRITE?"read-write": + flags==SQLITE_ACCESS_READ?"read":"exists" + , zName, ret) + ); + async_mutex_leave(ASYNC_MUTEX_QUEUE); + *pResOut = ret; + return rc; +} + +/* +** Fill in zPathOut with the full path to the file identified by zPath. +*/ +static int asyncFullPathname( + sqlite3_vfs *pAsyncVfs, + const char *zPath, + int nPathOut, + char *zPathOut +){ + int rc; + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + rc = pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); + + /* Because of the way intra-process file locking works, this backend + ** needs to return a canonical path. The following block assumes the + ** file-system uses unix style paths. + */ + if( rc==SQLITE_OK ){ + int i, j; + int n = nPathOut; + char *z = zPathOut; + while( n>1 && z[n-1]=='/' ){ n--; } + for(i=j=0; i0 && z[j-1]!='/' ){ j--; } + if( j>0 ){ j--; } + i += 2; + continue; + } + } + z[j++] = z[i]; + } + z[j] = 0; + } + + return rc; +} +static void *asyncDlOpen(sqlite3_vfs *pAsyncVfs, const char *zPath){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + return pVfs->xDlOpen(pVfs, zPath); +} +static void asyncDlError(sqlite3_vfs *pAsyncVfs, int nByte, char *zErrMsg){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + pVfs->xDlError(pVfs, nByte, zErrMsg); +} +static void (*asyncDlSym( + sqlite3_vfs *pAsyncVfs, + void *pHandle, + const char *zSymbol +))(void){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + return pVfs->xDlSym(pVfs, pHandle, zSymbol); +} +static void asyncDlClose(sqlite3_vfs *pAsyncVfs, void *pHandle){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + pVfs->xDlClose(pVfs, pHandle); +} +static int asyncRandomness(sqlite3_vfs *pAsyncVfs, int nByte, char *zBufOut){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + return pVfs->xRandomness(pVfs, nByte, zBufOut); +} +static int asyncSleep(sqlite3_vfs *pAsyncVfs, int nMicro){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + return pVfs->xSleep(pVfs, nMicro); +} +static int asyncCurrentTime(sqlite3_vfs *pAsyncVfs, double *pTimeOut){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; + return pVfs->xCurrentTime(pVfs, pTimeOut); +} + +static sqlite3_vfs async_vfs = { + 1, /* iVersion */ + sizeof(AsyncFile), /* szOsFile */ + 0, /* mxPathname */ + 0, /* pNext */ + SQLITEASYNC_VFSNAME, /* zName */ + 0, /* pAppData */ + asyncOpen, /* xOpen */ + asyncDelete, /* xDelete */ + asyncAccess, /* xAccess */ + asyncFullPathname, /* xFullPathname */ + asyncDlOpen, /* xDlOpen */ + asyncDlError, /* xDlError */ + asyncDlSym, /* xDlSym */ + asyncDlClose, /* xDlClose */ + asyncRandomness, /* xDlError */ + asyncSleep, /* xDlSym */ + asyncCurrentTime /* xDlClose */ +}; + +/* +** This procedure runs in a separate thread, reading messages off of the +** write queue and processing them one by one. +** +** If async.writerHaltNow is true, then this procedure exits +** after processing a single message. +** +** If async.writerHaltWhenIdle is true, then this procedure exits when +** the write queue is empty. +** +** If both of the above variables are false, this procedure runs +** indefinately, waiting for operations to be added to the write queue +** and processing them in the order in which they arrive. +** +** An artifical delay of async.ioDelay milliseconds is inserted before +** each write operation in order to simulate the effect of a slow disk. +** +** Only one instance of this procedure may be running at a time. +*/ +static void asyncWriterThread(void){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)(async_vfs.pAppData); + AsyncWrite *p = 0; + int rc = SQLITE_OK; + int holdingMutex = 0; + + async_mutex_enter(ASYNC_MUTEX_WRITER); + + while( async.eHalt!=SQLITEASYNC_HALT_NOW ){ + int doNotFree = 0; + sqlite3_file *pBase = 0; + + if( !holdingMutex ){ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + } + while( (p = async.pQueueFirst)==0 ){ + if( async.eHalt!=SQLITEASYNC_HALT_NEVER ){ + async_mutex_leave(ASYNC_MUTEX_QUEUE); + break; + }else{ + ASYNC_TRACE(("IDLE\n")); + async_cond_wait(ASYNC_COND_QUEUE, ASYNC_MUTEX_QUEUE); + ASYNC_TRACE(("WAKEUP\n")); + } + } + if( p==0 ) break; + holdingMutex = 1; + + /* Right now this thread is holding the mutex on the write-op queue. + ** Variable 'p' points to the first entry in the write-op queue. In + ** the general case, we hold on to the mutex for the entire body of + ** the loop. + ** + ** However in the cases enumerated below, we relinquish the mutex, + ** perform the IO, and then re-request the mutex before removing 'p' from + ** the head of the write-op queue. The idea is to increase concurrency with + ** sqlite threads. + ** + ** * An ASYNC_CLOSE operation. + ** * An ASYNC_OPENEXCLUSIVE operation. For this one, we relinquish + ** the mutex, call the underlying xOpenExclusive() function, then + ** re-aquire the mutex before seting the AsyncFile.pBaseRead + ** variable. + ** * ASYNC_SYNC and ASYNC_WRITE operations, if + ** SQLITE_ASYNC_TWO_FILEHANDLES was set at compile time and two + ** file-handles are open for the particular file being "synced". + */ + if( async.ioError!=SQLITE_OK && p->op!=ASYNC_CLOSE ){ + p->op = ASYNC_NOOP; + } + if( p->pFileData ){ + pBase = p->pFileData->pBaseWrite; + if( + p->op==ASYNC_CLOSE || + p->op==ASYNC_OPENEXCLUSIVE || + (pBase->pMethods && (p->op==ASYNC_SYNC || p->op==ASYNC_WRITE) ) + ){ + async_mutex_leave(ASYNC_MUTEX_QUEUE); + holdingMutex = 0; + } + if( !pBase->pMethods ){ + pBase = p->pFileData->pBaseRead; + } + } + + switch( p->op ){ + case ASYNC_NOOP: + break; + + case ASYNC_WRITE: + assert( pBase ); + ASYNC_TRACE(("WRITE %s %d bytes at %d\n", + p->pFileData->zName, p->nByte, p->iOffset)); + rc = pBase->pMethods->xWrite(pBase, (void *)(p->zBuf), p->nByte, p->iOffset); + break; + + case ASYNC_SYNC: + assert( pBase ); + ASYNC_TRACE(("SYNC %s\n", p->pFileData->zName)); + rc = pBase->pMethods->xSync(pBase, p->nByte); + break; + + case ASYNC_TRUNCATE: + assert( pBase ); + ASYNC_TRACE(("TRUNCATE %s to %d bytes\n", + p->pFileData->zName, p->iOffset)); + rc = pBase->pMethods->xTruncate(pBase, p->iOffset); + break; + + case ASYNC_CLOSE: { + AsyncFileData *pData = p->pFileData; + ASYNC_TRACE(("CLOSE %s\n", p->pFileData->zName)); + if( pData->pBaseWrite->pMethods ){ + pData->pBaseWrite->pMethods->xClose(pData->pBaseWrite); + } + if( pData->pBaseRead->pMethods ){ + pData->pBaseRead->pMethods->xClose(pData->pBaseRead); + } + + /* Unlink AsyncFileData.lock from the linked list of AsyncFileLock + ** structures for this file. Obtain the async.lockMutex mutex + ** before doing so. + */ + async_mutex_enter(ASYNC_MUTEX_LOCK); + rc = unlinkAsyncFile(pData); + async_mutex_leave(ASYNC_MUTEX_LOCK); + + if( !holdingMutex ){ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + holdingMutex = 1; + } + assert_mutex_is_held(ASYNC_MUTEX_QUEUE); + async.pQueueFirst = p->pNext; + sqlite3_free(pData); + doNotFree = 1; + break; + } + + case ASYNC_UNLOCK: { + AsyncWrite *pIter; + AsyncFileData *pData = p->pFileData; + int eLock = p->nByte; + + /* When a file is locked by SQLite using the async backend, it is + ** locked within the 'real' file-system synchronously. When it is + ** unlocked, an ASYNC_UNLOCK event is added to the write-queue to + ** unlock the file asynchronously. The design of the async backend + ** requires that the 'real' file-system file be locked from the + ** time that SQLite first locks it (and probably reads from it) + ** until all asynchronous write events that were scheduled before + ** SQLite unlocked the file have been processed. + ** + ** This is more complex if SQLite locks and unlocks the file multiple + ** times in quick succession. For example, if SQLite does: + ** + ** lock, write, unlock, lock, write, unlock + ** + ** Each "lock" operation locks the file immediately. Each "write" + ** and "unlock" operation adds an event to the event queue. If the + ** second "lock" operation is performed before the first "unlock" + ** operation has been processed asynchronously, then the first + ** "unlock" cannot be safely processed as is, since this would mean + ** the file was unlocked when the second "write" operation is + ** processed. To work around this, when processing an ASYNC_UNLOCK + ** operation, SQLite: + ** + ** 1) Unlocks the file to the minimum of the argument passed to + ** the xUnlock() call and the current lock from SQLite's point + ** of view, and + ** + ** 2) Only unlocks the file at all if this event is the last + ** ASYNC_UNLOCK event on this file in the write-queue. + */ + assert( holdingMutex==1 ); + assert( async.pQueueFirst==p ); + for(pIter=async.pQueueFirst->pNext; pIter; pIter=pIter->pNext){ + if( pIter->pFileData==pData && pIter->op==ASYNC_UNLOCK ) break; + } + if( !pIter ){ + async_mutex_enter(ASYNC_MUTEX_LOCK); + pData->lock.eAsyncLock = MIN( + pData->lock.eAsyncLock, MAX(pData->lock.eLock, eLock) + ); + assert(pData->lock.eAsyncLock>=pData->lock.eLock); + rc = getFileLock(pData->pLock); + async_mutex_leave(ASYNC_MUTEX_LOCK); + } + break; + } + + case ASYNC_DELETE: + ASYNC_TRACE(("DELETE %s\n", p->zBuf)); + rc = pVfs->xDelete(pVfs, p->zBuf, (int)p->iOffset); + break; + + case ASYNC_OPENEXCLUSIVE: { + int flags = (int)p->iOffset; + AsyncFileData *pData = p->pFileData; + ASYNC_TRACE(("OPEN %s flags=%d\n", p->zBuf, (int)p->iOffset)); + assert(pData->pBaseRead->pMethods==0 && pData->pBaseWrite->pMethods==0); + rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseRead, flags, 0); + assert( holdingMutex==0 ); + async_mutex_enter(ASYNC_MUTEX_QUEUE); + holdingMutex = 1; + break; + } + + default: assert(!"Illegal value for AsyncWrite.op"); + } + + /* If we didn't hang on to the mutex during the IO op, obtain it now + ** so that the AsyncWrite structure can be safely removed from the + ** global write-op queue. + */ + if( !holdingMutex ){ + async_mutex_enter(ASYNC_MUTEX_QUEUE); + holdingMutex = 1; + } + /* ASYNC_TRACE(("UNLINK %p\n", p)); */ + if( p==async.pQueueLast ){ + async.pQueueLast = 0; + } + if( !doNotFree ){ + assert_mutex_is_held(ASYNC_MUTEX_QUEUE); + async.pQueueFirst = p->pNext; + sqlite3_free(p); + } + assert( holdingMutex ); + + /* An IO error has occurred. We cannot report the error back to the + ** connection that requested the I/O since the error happened + ** asynchronously. The connection has already moved on. There + ** really is nobody to report the error to. + ** + ** The file for which the error occurred may have been a database or + ** journal file. Regardless, none of the currently queued operations + ** associated with the same database should now be performed. Nor should + ** any subsequently requested IO on either a database or journal file + ** handle for the same database be accepted until the main database + ** file handle has been closed and reopened. + ** + ** Furthermore, no further IO should be queued or performed on any file + ** handle associated with a database that may have been part of a + ** multi-file transaction that included the database associated with + ** the IO error (i.e. a database ATTACHed to the same handle at some + ** point in time). + */ + if( rc!=SQLITE_OK ){ + async.ioError = rc; + } + + if( async.ioError && !async.pQueueFirst ){ + async_mutex_enter(ASYNC_MUTEX_LOCK); + if( 0==async.pLock ){ + async.ioError = SQLITE_OK; + } + async_mutex_leave(ASYNC_MUTEX_LOCK); + } + + /* Drop the queue mutex before continuing to the next write operation + ** in order to give other threads a chance to work with the write queue. + */ + if( !async.pQueueFirst || !async.ioError ){ + async_mutex_leave(ASYNC_MUTEX_QUEUE); + holdingMutex = 0; + if( async.ioDelay>0 ){ + pVfs->xSleep(pVfs, async.ioDelay*1000); + }else{ + async_sched_yield(); + } + } + } + + async_mutex_leave(ASYNC_MUTEX_WRITER); + return; +} + +/* +** Install the asynchronous VFS. +*/ +int sqlite3async_initialize(const char *zParent, int isDefault){ + int rc = SQLITE_OK; + if( async_vfs.pAppData==0 ){ + sqlite3_vfs *pParent = sqlite3_vfs_find(zParent); + if( !pParent || async_os_initialize() ){ + rc = SQLITE_ERROR; + }else if( SQLITE_OK!=(rc = sqlite3_vfs_register(&async_vfs, isDefault)) ){ + async_os_shutdown(); + }else{ + async_vfs.pAppData = (void *)pParent; + async_vfs.mxPathname = ((sqlite3_vfs *)async_vfs.pAppData)->mxPathname; + } + } + return rc; +} + +/* +** Uninstall the asynchronous VFS. +*/ +void sqlite3async_shutdown(void){ + if( async_vfs.pAppData ){ + async_os_shutdown(); + sqlite3_vfs_unregister((sqlite3_vfs *)&async_vfs); + async_vfs.pAppData = 0; + } +} + +/* +** Process events on the write-queue. +*/ +void sqlite3async_run(void){ + asyncWriterThread(); +} + +/* +** Control/configure the asynchronous IO system. +*/ +int sqlite3async_control(int op, ...){ + va_list ap; + va_start(ap, op); + switch( op ){ + case SQLITEASYNC_HALT: { + int eWhen = va_arg(ap, int); + if( eWhen!=SQLITEASYNC_HALT_NEVER + && eWhen!=SQLITEASYNC_HALT_NOW + && eWhen!=SQLITEASYNC_HALT_IDLE + ){ + return SQLITE_MISUSE; + } + async.eHalt = eWhen; + async_mutex_enter(ASYNC_MUTEX_QUEUE); + async_cond_signal(ASYNC_COND_QUEUE); + async_mutex_leave(ASYNC_MUTEX_QUEUE); + break; + } + + case SQLITEASYNC_DELAY: { + int iDelay = va_arg(ap, int); + if( iDelay<0 ){ + return SQLITE_MISUSE; + } + async.ioDelay = iDelay; + break; + } + + case SQLITEASYNC_LOCKFILES: { + int bLock = va_arg(ap, int); + async_mutex_enter(ASYNC_MUTEX_QUEUE); + if( async.nFile || async.pQueueFirst ){ + async_mutex_leave(ASYNC_MUTEX_QUEUE); + return SQLITE_MISUSE; + } + async.bLockFiles = bLock; + async_mutex_leave(ASYNC_MUTEX_QUEUE); + break; + } + + case SQLITEASYNC_GET_HALT: { + int *peWhen = va_arg(ap, int *); + *peWhen = async.eHalt; + break; + } + case SQLITEASYNC_GET_DELAY: { + int *piDelay = va_arg(ap, int *); + *piDelay = async.ioDelay; + break; + } + case SQLITEASYNC_GET_LOCKFILES: { + int *piDelay = va_arg(ap, int *); + *piDelay = async.bLockFiles; + break; + } + + default: + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ASYNCIO) */ + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/async/sqlite3async.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/async/sqlite3async.h --- sqlite3-3.4.2/ext/async/sqlite3async.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/async/sqlite3async.h 2009-06-25 12:35:50.000000000 +0100 @@ -0,0 +1,223 @@ + +#ifndef __SQLITEASYNC_H_ +#define __SQLITEASYNC_H_ 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +extern "C" { +#endif + +#define SQLITEASYNC_VFSNAME "sqlite3async" + +/* +** THREAD SAFETY NOTES: +** +** Of the four API functions in this file, the following are not threadsafe: +** +** sqlite3async_initialize() +** sqlite3async_shutdown() +** +** Care must be taken that neither of these functions is called while +** another thread may be calling either any sqlite3async_XXX() function +** or an sqlite3_XXX() API function related to a database handle that +** is using the asynchronous IO VFS. +** +** These functions: +** +** sqlite3async_run() +** sqlite3async_control() +** +** are threadsafe. It is quite safe to call either of these functions even +** if another thread may also be calling one of them or an sqlite3_XXX() +** function related to a database handle that uses the asynchronous IO VFS. +*/ + +/* +** Initialize the asynchronous IO VFS and register it with SQLite using +** sqlite3_vfs_register(). If the asynchronous VFS is already initialized +** and registered, this function is a no-op. The asynchronous IO VFS +** is registered as "sqlite3async". +** +** The asynchronous IO VFS does not make operating system IO requests +** directly. Instead, it uses an existing VFS implementation for all +** required file-system operations. If the first parameter to this function +** is NULL, then the current default VFS is used for IO. If it is not +** NULL, then it must be the name of an existing VFS. In other words, the +** first argument to this function is passed to sqlite3_vfs_find() to +** locate the VFS to use for all real IO operations. This VFS is known +** as the "parent VFS". +** +** If the second parameter to this function is non-zero, then the +** asynchronous IO VFS is registered as the default VFS for all SQLite +** database connections within the process. Otherwise, the asynchronous IO +** VFS is only used by connections opened using sqlite3_open_v2() that +** specifically request VFS "sqlite3async". +** +** If a parent VFS cannot be located, then SQLITE_ERROR is returned. +** In the unlikely event that operating system specific initialization +** fails (win32 systems create the required critical section and event +** objects within this function), then SQLITE_ERROR is also returned. +** Finally, if the call to sqlite3_vfs_register() returns an error, then +** the error code is returned to the user by this function. In all three +** of these cases, intialization has failed and the asynchronous IO VFS +** is not registered with SQLite. +** +** Otherwise, if no error occurs, SQLITE_OK is returned. +*/ +int sqlite3async_initialize(const char *zParent, int isDefault); + +/* +** This function unregisters the asynchronous IO VFS using +** sqlite3_vfs_unregister(). +** +** On win32 platforms, this function also releases the small number of +** critical section and event objects created by sqlite3async_initialize(). +*/ +void sqlite3async_shutdown(); + +/* +** This function may only be called when the asynchronous IO VFS is +** installed (after a call to sqlite3async_initialize()). It processes +** zero or more queued write operations before returning. It is expected +** (but not required) that this function will be called by a different +** thread than those threads that use SQLite. The "background thread" +** that performs IO. +** +** How many queued write operations are performed before returning +** depends on the global setting configured by passing the SQLITEASYNC_HALT +** verb to sqlite3async_control() (see below for details). By default +** this function never returns - it processes all pending operations and +** then blocks waiting for new ones. +** +** If multiple simultaneous calls are made to sqlite3async_run() from two +** or more threads, then the calls are serialized internally. +*/ +void sqlite3async_run(); + +/* +** This function may only be called when the asynchronous IO VFS is +** installed (after a call to sqlite3async_initialize()). It is used +** to query or configure various parameters that affect the operation +** of the asynchronous IO VFS. At present there are three parameters +** supported: +** +** * The "halt" parameter, which configures the circumstances under +** which the sqlite3async_run() parameter is configured. +** +** * The "delay" parameter. Setting the delay parameter to a non-zero +** value causes the sqlite3async_run() function to sleep for the +** configured number of milliseconds between each queued write +** operation. +** +** * The "lockfiles" parameter. This parameter determines whether or +** not the asynchronous IO VFS locks the database files it operates +** on. Disabling file locking can improve throughput. +** +** This function is always passed two arguments. When setting the value +** of a parameter, the first argument must be one of SQLITEASYNC_HALT, +** SQLITEASYNC_DELAY or SQLITEASYNC_LOCKFILES. The second argument must +** be passed the new value for the parameter as type "int". +** +** When querying the current value of a paramter, the first argument must +** be one of SQLITEASYNC_GET_HALT, GET_DELAY or GET_LOCKFILES. The second +** argument to this function must be of type (int *). The current value +** of the queried parameter is copied to the memory pointed to by the +** second argument. For example: +** +** int eCurrentHalt; +** int eNewHalt = SQLITEASYNC_HALT_IDLE; +** +** sqlite3async_control(SQLITEASYNC_HALT, eNewHalt); +** sqlite3async_control(SQLITEASYNC_GET_HALT, &eCurrentHalt); +** assert( eNewHalt==eCurrentHalt ); +** +** See below for more detail on each configuration parameter. +** +** SQLITEASYNC_HALT: +** +** This is used to set the value of the "halt" parameter. The second +** argument must be one of the SQLITEASYNC_HALT_XXX symbols defined +** below (either NEVER, IDLE and NOW). +** +** If the parameter is set to NEVER, then calls to sqlite3async_run() +** never return. This is the default setting. If the parameter is set +** to IDLE, then calls to sqlite3async_run() return as soon as the +** queue of pending write operations is empty. If the parameter is set +** to NOW, then calls to sqlite3async_run() return as quickly as +** possible, without processing any pending write requests. +** +** If an attempt is made to set this parameter to an integer value other +** than SQLITEASYNC_HALT_NEVER, IDLE or NOW, then sqlite3async_control() +** returns SQLITE_MISUSE and the current value of the parameter is not +** modified. +** +** Modifying the "halt" parameter affects calls to sqlite3async_run() +** made by other threads that are currently in progress. +** +** SQLITEASYNC_DELAY: +** +** This is used to set the value of the "delay" parameter. If set to +** a non-zero value, then after completing a pending write request, the +** sqlite3async_run() function sleeps for the configured number of +** milliseconds. +** +** If an attempt is made to set this parameter to a negative value, +** sqlite3async_control() returns SQLITE_MISUSE and the current value +** of the parameter is not modified. +** +** Modifying the "delay" parameter affects calls to sqlite3async_run() +** made by other threads that are currently in progress. +** +** SQLITEASYNC_LOCKFILES: +** +** This is used to set the value of the "lockfiles" parameter. This +** parameter must be set to either 0 or 1. If set to 1, then the +** asynchronous IO VFS uses the xLock() and xUnlock() methods of the +** parent VFS to lock database files being read and/or written. If +** the parameter is set to 0, then these locks are omitted. +** +** This parameter may only be set when there are no open database +** connections using the VFS and the queue of pending write requests +** is empty. Attempting to set it when this is not true, or to set it +** to a value other than 0 or 1 causes sqlite3async_control() to return +** SQLITE_MISUSE and the value of the parameter to remain unchanged. +** +** If this parameter is set to zero, then it is only safe to access the +** database via the asynchronous IO VFS from within a single process. If +** while writing to the database via the asynchronous IO VFS the database +** is also read or written from within another process, or via another +** connection that does not use the asynchronous IO VFS within the same +** process, the results are undefined (and may include crashes or database +** corruption). +** +** Alternatively, if this parameter is set to 1, then it is safe to access +** the database from multiple connections within multiple processes using +** either the asynchronous IO VFS or the parent VFS directly. +*/ +int sqlite3async_control(int op, ...); + +/* +** Values that can be used as the first argument to sqlite3async_control(). +*/ +#define SQLITEASYNC_HALT 1 +#define SQLITEASYNC_GET_HALT 2 +#define SQLITEASYNC_DELAY 3 +#define SQLITEASYNC_GET_DELAY 4 +#define SQLITEASYNC_LOCKFILES 5 +#define SQLITEASYNC_GET_LOCKFILES 6 + +/* +** If the first argument to sqlite3async_control() is SQLITEASYNC_HALT, +** the second argument should be one of the following. +*/ +#define SQLITEASYNC_HALT_NEVER 0 /* Never halt (default value) */ +#define SQLITEASYNC_HALT_NOW 1 /* Halt as soon as possible */ +#define SQLITEASYNC_HALT_IDLE 2 /* Halt when write-queue is empty */ + +#ifdef __cplusplus +} /* End of the 'extern "C"' block */ +#endif +#endif /* ifndef __SQLITEASYNC_H_ */ + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/ft_hash.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/ft_hash.c --- sqlite3-3.4.2/ext/fts1/ft_hash.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/ft_hash.c 2006-08-24 00:58:50.000000000 +0100 @@ -0,0 +1,404 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ +#include +#include +#include + +#include "ft_hash.h" + +void *malloc_and_zero(int n){ + void *p = malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants HASH_INT, HASH_POINTER, +** HASH_BINARY, or HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. CopyKey only makes +** sense for HASH_STRING and HASH_BINARY and is ignored +** for other key classes. +*/ +void HashInit(Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=HASH_STRING && keyClass<=HASH_BINARY ); + pNew->keyClass = keyClass; +#if 0 + if( keyClass==HASH_POINTER || keyClass==HASH_INT ) copyKey = 0; +#endif + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; + pNew->xMalloc = malloc_and_zero; + pNew->xFree = free; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void HashClear(Hash *pH){ + HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is HASH_INT +*/ +static int intHash(const void *pKey, int nKey){ + return nKey ^ (nKey<<8) ^ (nKey>>8); +} +static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + return n2 - n1; +} +#endif + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is HASH_POINTER +*/ +static int ptrHash(const void *pKey, int nKey){ + uptr x = Addr(pKey); + return x ^ (x<<8) ^ (x>>8); +} +static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( pKey1==pKey2 ) return 0; + if( pKey1 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case HASH_INT: return &intHash; + case HASH_POINTER: return &ptrHash; + case HASH_STRING: return &strHash; + case HASH_BINARY: return &binHash;; + default: break; + } + return 0; +#else + if( keyClass==HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==HASH_BINARY ); + return &binHash; + } +#endif +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case HASH_INT: return &intCompare; + case HASH_POINTER: return &ptrCompare; + case HASH_STRING: return &strCompare; + case HASH_BINARY: return &binCompare; + default: break; + } + return 0; +#else + if( keyClass==HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==HASH_BINARY ); + return &binCompare; + } +#endif +} + +/* Link an element into the hash table +*/ +static void insertElement( + Hash *pH, /* The complete hash table */ + struct _ht *pEntry, /* The entry into which pNew is inserted */ + HashElem *pNew /* The element to be inserted */ +){ + HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(Hash *pH, int new_size){ + struct _ht *new_ht; /* The new hash table */ + HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _ht *)pH->xMalloc( new_size*sizeof(struct _ht) ); + if( new_ht==0 ) return; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static HashElem *findElementGivenHash( + const Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + Hash *pH, /* The pH containing "elem" */ + HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *HashFind(const Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *HashInsert(Hash *pH, const void *pKey, int nKey, void *data){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + HashElem *elem; /* Used to loop thru the element list */ + HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (HashElem*)pH->xMalloc( sizeof(HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = pH->xMalloc( nKey ); + if( new_elem->pKey==0 ){ + pH->xFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + pH->xFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/ft_hash.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/ft_hash.h --- sqlite3-3.4.2/ext/fts1/ft_hash.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/ft_hash.h 2006-08-24 00:58:50.000000000 +0100 @@ -0,0 +1,111 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _HASH_H_ +#define _HASH_H_ + +/* Forward declarations of structures. */ +typedef struct Hash Hash; +typedef struct HashElem HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + HashElem *first; /* The first element of the array */ + void *(*xMalloc)(int); /* malloc() function to use */ + void (*xFree)(void *); /* free() function to use */ + int htsize; /* Number of buckets in the hash table */ + struct _ht { /* the hash table */ + int count; /* Number of entries with this hash */ + HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct HashElem { + HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 4 different modes of operation for a hash table: +** +** HASH_INT nKey is used as the key and pKey is ignored. +** +** HASH_POINTER pKey is used as the key and nKey is ignored. +** +** HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made for HASH_STRING and HASH_BINARY +** if the copyKey parameter to HashInit is 1. +*/ +/* #define HASH_INT 1 // NOT USED */ +/* #define HASH_POINTER 2 // NOT USED */ +#define HASH_STRING 3 +#define HASH_BINARY 4 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void HashInit(Hash*, int keytype, int copyKey); +void *HashInsert(Hash*, const void *pKey, int nKey, void *pData); +void *HashFind(const Hash*, const void *pKey, int nKey); +void HashClear(Hash*); + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** Hash h; +** HashElem *p; +** ... +** for(p=HashFirst(&h); p; p=HashNext(p)){ +** SomeStructure *pData = HashData(p); +** // do something with pData +** } +*/ +#define HashFirst(H) ((H)->first) +#define HashNext(E) ((E)->next) +#define HashData(E) ((E)->data) +#define HashKey(E) ((E)->pKey) +#define HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define HashCount(H) ((H)->count) + +#endif /* _HASH_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/fts1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/fts1.c --- sqlite3-3.4.2/ext/fts1/fts1.c 2007-07-30 21:36:32.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/fts1.c 2009-06-12 03:37:46.000000000 +0100 @@ -1,3 +1,25 @@ +/* fts1 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts1 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS1=1 to your CFLAGS. +*/ +#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1)) \ + && !defined(SQLITE_ENABLE_BROKEN_FTS1) +#error fts1 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts1 uses the content table's unaliased rowid as +** the unique docid. fts1 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts1. If you are using +** fts1 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** fts1 should be safe even across VACUUM if you only insert documents +** and never delete. +*/ + /* The author disclaims copyright to this source code. * * This is an SQLite module implementing full-text search. @@ -855,7 +877,7 @@ } /* Duplicate a string; the caller must free() the returned string. - * (We don't use strdup() since it's not part of the standard C library and + * (We don't use strdup() since it is not part of the standard C library and * may not be available everywhere.) */ static char *string_dup(const char *s){ return string_dup_n(s, strlen(s)); @@ -1192,7 +1214,7 @@ if( rc==SQLITE_BUSY ) continue; if( rc!=SQLITE_ERROR ) return rc; - /* If an SQLITE_SCHEMA error has occured, then finalizing this + /* If an SQLITE_SCHEMA error has occurred, then finalizing this * statement is going to delete the fulltext_vtab structure. If * the statement just executed is in the pFulltextStatements[] * array, it will be finalized twice. So remove it before @@ -2300,8 +2322,8 @@ for(i=0; inMatch; i++){ struct snippetMatch *pMatch = &p->aMatch[i]; zBuf[0] = ' '; - sprintf(&zBuf[cnt>0], "%d %d %d %d", pMatch->iCol, - pMatch->iTerm, pMatch->iStart, pMatch->nByte); + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); append(&sb, zBuf); cnt++; } @@ -2573,7 +2595,10 @@ pLeft = docListNew(DL_POSITIONS); rc = term_select_all(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pLeft); - if( rc ) return rc; + if( rc ){ + docListDelete(pLeft); + return rc; + } for(i=1; i<=pQTerm->nPhrase; i++){ pRight = docListNew(DL_POSITIONS); rc = term_select_all(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, pRight); @@ -3121,7 +3146,7 @@ return insertTerms(v, pTerms, iRow, pValues); } -/* This function implements the xUpdate callback; it's the top-level entry +/* This function implements the xUpdate callback; it is the top-level entry * point for inserting, deleting or updating a row in a full-text table. */ static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, sqlite_int64 *pRowid){ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/fulltext.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/fulltext.c --- sqlite3-3.4.2/ext/fts1/fulltext.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/fulltext.c 2006-08-29 00:46:02.000000000 +0100 @@ -0,0 +1,1496 @@ +/* The author disclaims copyright to this source code. + * + * This is an SQLite module implementing full-text search. + */ + +#include +#if !defined(__APPLE__) +#include +#else +#include +#endif +#include +#include +#include + +#include "fulltext.h" +#include "ft_hash.h" +#include "tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + +/* utility functions */ + +/* We encode variable-length integers in little-endian order using seven bits + * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +*/ + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*** Document lists *** + * + * A document list holds a sorted list of varint-encoded document IDs. + * + * A doclist with type DL_POSITIONS_OFFSETS is stored like this: + * + * array { + * varint docid; + * array { + * varint position; (delta from previous position plus 1, or 0 for end) + * varint startOffset; (delta from previous startOffset) + * varint endOffset; (delta from startOffset) + * } + * } + * + * Here, array { X } means zero or more occurrences of X, adjacent in memory. + * + * A doclist with type DL_POSITIONS is like the above, but holds only docids + * and positions without offset information. + * + * A doclist with type DL_DOCIDS is like the above, but holds only docids + * without positions or offset information. + * + * On disk, every document list has positions and offsets, so we don't bother + * to serialize a doclist's type. + * + * We don't yet delta-encode document IDs; doing so will probably be a + * modest win. + * + * NOTE(shess) I've thought of a slightly (1%) better offset encoding. + * After the first offset, estimate the next offset by using the + * current token position and the previous token position and offset, + * offset to handle some variance. So the estimate would be + * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded + * as normal. Offsets more than 64 chars from the estimate are + * encoded as the delta to the previous start offset + 128. An + * additional tiny increment can be gained by using the end offset of + * the previous token to make the estimate a tiny bit more precise. +*/ + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +typedef struct DocList { + char *pData; + int nData; + DocListType iType; + int iLastPos; /* the last position written */ + int iLastOffset; /* the last start offset written */ +} DocList; + +/* Initialize a new DocList to hold the given data. */ +static void docListInit(DocList *d, DocListType iType, + const char *pData, int nData){ + d->nData = nData; + if( nData>0 ){ + d->pData = malloc(nData); + memcpy(d->pData, pData, nData); + } else { + d->pData = NULL; + } + d->iType = iType; + d->iLastPos = 0; + d->iLastOffset = 0; +} + +/* Create a new dynamically-allocated DocList. */ +static DocList *docListNew(DocListType iType){ + DocList *d = (DocList *) malloc(sizeof(DocList)); + docListInit(d, iType, 0, 0); + return d; +} + +static void docListDestroy(DocList *d){ + free(d->pData); +#ifndef NDEBUG + memset(d, 0x55, sizeof(*d)); +#endif +} + +static void docListDelete(DocList *d){ + docListDestroy(d); + free(d); +} + +static char *docListEnd(DocList *d){ + return d->pData + d->nData; +} + +/* Append a varint to a DocList's data. */ +static void appendVarint(DocList *d, sqlite_int64 i){ + char c[VARINT_MAX]; + int n = putVarint(c, i); + d->pData = realloc(d->pData, d->nData + n); + memcpy(d->pData + d->nData, c, n); + d->nData += n; +} + +static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ + appendVarint(d, iDocid); + d->iLastPos = 0; +} + +/* Add a position to the last position list in a doclist. */ +static void docListAddPos(DocList *d, int iPos){ + assert( d->iType>=DL_POSITIONS ); + appendVarint(d, iPos-d->iLastPos+1); + d->iLastPos = iPos; +} + +static void docListAddPosOffset(DocList *d, int iPos, + int iStartOffset, int iEndOffset){ + assert( d->iType==DL_POSITIONS_OFFSETS ); + docListAddPos(d, iPos); + appendVarint(d, iStartOffset-d->iLastOffset); + d->iLastOffset = iStartOffset; + appendVarint(d, iEndOffset-iStartOffset); +} + +/* Terminate the last position list in the given doclist. */ +static void docListAddEndPos(DocList *d){ + appendVarint(d, 0); +} + +typedef struct DocListReader { + DocList *pDoclist; + char *p; + int iLastPos; /* the last position read */ +} DocListReader; + +static void readerInit(DocListReader *r, DocList *pDoclist){ + r->pDoclist = pDoclist; + if( pDoclist!=NULL ){ + r->p = pDoclist->pData; + } + r->iLastPos = 0; +} + +static int readerAtEnd(DocListReader *pReader){ + return pReader->p >= docListEnd(pReader->pDoclist); +} + +/* Peek at the next docid without advancing the read pointer. */ +static sqlite_int64 peekDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !readerAtEnd(pReader) ); + getVarint(pReader->p, &ret); + return ret; +} + +/* Read the next docid. */ +static sqlite_int64 readDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !readerAtEnd(pReader) ); + pReader->p += getVarint(pReader->p, &ret); + pReader->iLastPos = 0; + return ret; +} + +/* Read the next position from a position list. + * Returns the position, or -1 at the end of the list. */ +static int readPosition(DocListReader *pReader){ + int i; + int iType = pReader->pDoclist->iType; + assert( iType>=DL_POSITIONS ); + assert( !readerAtEnd(pReader) ); + + pReader->p += getVarint32(pReader->p, &i); + if( i==0 ){ + pReader->iLastPos = -1; + return -1; + } + pReader->iLastPos += ((int) i)-1; + if( iType>=DL_POSITIONS_OFFSETS ){ + /* Skip over offsets, ignoring them for now. */ + int iStart, iEnd; + pReader->p += getVarint32(pReader->p, &iStart); + pReader->p += getVarint32(pReader->p, &iEnd); + } + return pReader->iLastPos; +} + +/* Skip past the end of a position list. */ +static void skipPositionList(DocListReader *pReader){ + while( readPosition(pReader)!=-1 ) + ; +} + +/* Skip over a docid, including its position list if the doclist has + * positions. */ +static void skipDocument(DocListReader *pReader){ + readDocid(pReader); + if( pReader->pDoclist->iType >= DL_POSITIONS ){ + skipPositionList(pReader); + } +} + +static sqlite_int64 firstDocid(DocList *d){ + DocListReader r; + readerInit(&r, d); + return readDocid(&r); +} + +/* Doclist multi-tool. Pass pUpdate==NULL to delete the indicated docid; + * otherwise pUpdate, which must contain only the single docid [iDocid], is + * inserted (if not present) or updated (if already present). */ +static int docListUpdate(DocList *d, sqlite_int64 iDocid, DocList *pUpdate){ + int modified = 0; + DocListReader reader; + char *p; + + if( pUpdate!=NULL ){ + assert( d->iType==pUpdate->iType); + assert( iDocid==firstDocid(pUpdate) ); + } + + readerInit(&reader, d); + while( !readerAtEnd(&reader) && peekDocid(&reader)nData -= (reader.p - p); + modified = 1; + } + + /* Insert if indicated. */ + if( pUpdate!=NULL ){ + int iDoclist = p-d->pData; + docListAddEndPos(pUpdate); + + d->pData = realloc(d->pData, d->nData+pUpdate->nData); + p = d->pData + iDoclist; + + memmove(p+pUpdate->nData, p, docListEnd(d) - p); + memcpy(p, pUpdate->pData, pUpdate->nData); + d->nData += pUpdate->nData; + modified = 1; + } + + return modified; +} + +/* Split the second half of doclist d into a separate doclist d2. Returns 1 + * if successful, or 0 if d contains a single document and hence can't be + * split. */ +static int docListSplit(DocList *d, DocList *d2){ + const char *pSplitPoint = d->pData + d->nData / 2; + DocListReader reader; + + readerInit(&reader, d); + while( reader.piType, reader.p, docListEnd(d) - reader.p); + d->nData = reader.p - d->pData; + d->pData = realloc(d->pData, d->nData); + return 1; +} + +/* A DocListMerge computes the AND of an in-memory DocList [in] and a chunked + * on-disk doclist, resulting in another in-memory DocList [out]. [in] + * and [out] may or may not store position information according to the + * caller's wishes. The on-disk doclist always comes with positions. + * + * The caller must read each chunk of the on-disk doclist in succession and + * pass it to mergeBlock(). + * + * If [in] has positions, then the merge output contains only documents with + * matching positions in the two input doclists. If [in] does not have + * positions, then the merge output contains all documents common to the two + * input doclists. + * + * If [in] is NULL, then the on-disk doclist is copied to [out] directly. + * + * A merge is performed using an integer [iOffset] provided by the caller. + * [iOffset] is subtracted from each position in the on-disk doclist for the + * purpose of position comparison; this is helpful in implementing phrase + * searches. + * + * A DocListMerge is not yet able to propagate offsets through query + * processing; we should add that capability soon. +*/ +typedef struct DocListMerge { + DocListReader in; + DocList *pOut; + int iOffset; +} DocListMerge; + +static void mergeInit(DocListMerge *m, + DocList *pIn, int iOffset, DocList *pOut){ + readerInit(&m->in, pIn); + m->pOut = pOut; + m->iOffset = iOffset; + + /* can't handle offsets yet */ + assert( pIn==NULL || pIn->iType <= DL_POSITIONS ); + assert( pOut->iType <= DL_POSITIONS ); +} + +/* A helper function for mergeBlock(), below. Merge the position lists + * pointed to by m->in and pBlockReader. + * If the merge matches, write [iDocid] to m->pOut; if m->pOut + * has positions then write all matching positions as well. */ +static void mergePosList(DocListMerge *m, sqlite_int64 iDocid, + DocListReader *pBlockReader){ + int block_pos = readPosition(pBlockReader); + int in_pos = readPosition(&m->in); + int match = 0; + while( block_pos!=-1 || in_pos!=-1 ){ + if( block_pos-m->iOffset==in_pos ){ + if( !match ){ + docListAddDocid(m->pOut, iDocid); + match = 1; + } + if( m->pOut->iType >= DL_POSITIONS ){ + docListAddPos(m->pOut, in_pos); + } + block_pos = readPosition(pBlockReader); + in_pos = readPosition(&m->in); + } else if( in_pos==-1 || (block_pos!=-1 && block_pos-m->iOffsetin); + } + } + if( m->pOut->iType >= DL_POSITIONS && match ){ + docListAddEndPos(m->pOut); + } +} + +/* Merge one block of an on-disk doclist into a DocListMerge. */ +static void mergeBlock(DocListMerge *m, DocList *pBlock){ + DocListReader blockReader; + assert( pBlock->iType >= DL_POSITIONS ); + readerInit(&blockReader, pBlock); + while( !readerAtEnd(&blockReader) ){ + sqlite_int64 iDocid = readDocid(&blockReader); + if( m->in.pDoclist!=NULL ){ + while( 1 ){ + if( readerAtEnd(&m->in) ) return; /* nothing more to merge */ + if( peekDocid(&m->in)>=iDocid ) break; + skipDocument(&m->in); + } + if( peekDocid(&m->in)>iDocid ){ /* [pIn] has no match with iDocid */ + skipPositionList(&blockReader); /* skip this docid in the block */ + continue; + } + readDocid(&m->in); + } + /* We have a document match. */ + if( m->in.pDoclist==NULL || m->in.pDoclist->iType < DL_POSITIONS ){ + /* We don't need to do a poslist merge. */ + docListAddDocid(m->pOut, iDocid); + if( m->pOut->iType >= DL_POSITIONS ){ + /* Copy all positions to the output doclist. */ + while( 1 ){ + int pos = readPosition(&blockReader); + if( pos==-1 ) break; + docListAddPos(m->pOut, pos); + } + docListAddEndPos(m->pOut); + } else skipPositionList(&blockReader); + continue; + } + mergePosList(m, iDocid, &blockReader); + } +} + +static char *string_dup_n(const char *s, int n){ + char *str = malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it's not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, const char *zName){ + const char *p; + size_t len = 0; + size_t nName = strlen(zName); + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nName : 1); + } + len += 1; /* for null terminator */ + + r = result = malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zName, const char *zFormat){ + char *zCommand = string_format(zFormat, zName); + int rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zName, sqlite3_stmt **ppStmt, + const char *zFormat){ + char *zCommand = string_format(zFormat, zName); + int rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); + free(zCommand); + return rc; +} + +/* end utility functions */ + +#define QUERY_GENERIC 0 +#define QUERY_FULLTEXT 1 + +#define CHUNK_MAX 1024 + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_DELETE_STMT, + + TERM_SELECT_STMT, + TERM_CHUNK_SELECT_STMT, + TERM_INSERT_STMT, + TERM_UPDATE_STMT, + TERM_DELETE_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(adam): Is there some risk that a statement (in particular, +** pTermSelectStmt) will be used in two cursors at once, e.g. if a +** query joins a virtual table to itself? If so perhaps we should +** move some of these to the cursor object. +*/ +static const char *fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ "insert into %_content (rowid, content) values (?, ?)", + /* CONTENT_SELECT */ "select content from %_content where rowid = ?", + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + + /* TERM_SELECT */ + "select rowid, doclist from %_term where term = ? and first = ?", + /* TERM_CHUNK_SELECT */ + "select max(first) from %_term where term = ? and first <= ?", + /* TERM_INSERT */ + "insert into %_term (term, first, doclist) values (?, ?, ?)", + /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", + /* TERM_DELETE */ "delete from %_term where rowid = ?", +}; + +typedef struct fulltext_vtab { + sqlite3_vtab base; + sqlite3 *db; + const char *zName; /* virtual table name */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; +} fulltext_vtab; + +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; + int iCursorType; /* QUERY_GENERIC or QUERY_FULLTEXT */ + + sqlite3_stmt *pStmt; + + int eof; + + /* The following is used only when iCursorType == QUERY_FULLTEXT. */ + DocListReader result; +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static sqlite3_module fulltextModule; /* forward declaration */ + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmtpFulltextStatements[iStmt]==NULL ){ + int rc = sql_prepare(v->db, v->zName, &v->pFulltextStatements[iStmt], + fulltext_zStatement[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Step the indicated statement, handling errors SQLITE_BUSY (by +** retrying) and SQLITE_SCHEMA (by re-preparing and transferring +** bindings to the new statement). +** TODO(adam): We should extend this function so that it can work with +** statements declared locally, not only globally cached statements. +*/ +static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc; + sqlite3_stmt *s = *ppStmt; + assert( iStmtpFulltextStatements[iStmt] ); + + while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ + sqlite3_stmt *pNewStmt; + + if( rc==SQLITE_BUSY ) continue; + if( rc!=SQLITE_ERROR ) return rc; + + rc = sqlite3_reset(s); + if( rc!=SQLITE_SCHEMA ) return SQLITE_ERROR; + + v->pFulltextStatements[iStmt] = NULL; /* Still in s */ + rc = sql_get_statement(v, iStmt, &pNewStmt); + if( rc!=SQLITE_OK ) goto err; + *ppStmt = pNewStmt; + + rc = sqlite3_transfer_bindings(s, pNewStmt); + if( rc!=SQLITE_OK ) goto err; + + rc = sqlite3_finalize(s); + if( rc!=SQLITE_OK ) return rc; + s = pNewStmt; + } + return rc; + + err: + sqlite3_finalize(s); + return rc; +} + +/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. +** Useful for statements like UPDATE, where we expect no results. +*/ +static int sql_single_step_statement(fulltext_vtab *v, + fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc = sql_step_statement(v, iStmt, ppStmt); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* insert into %_content (rowid, content) values ([rowid], [zContent]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + const char *zContent, int nContent){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 2, zContent, nContent, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); +} + +/* select content from %_content where rowid = [iRow] + * The caller must delete the returned string. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + char **pzContent){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + *pzContent = string_dup((const char *)sqlite3_column_text(s, 0)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + + free(*pzContent); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); +} + +/* select rowid, doclist from %_term where term = [zTerm] and first = [iFirst] + * If found, returns SQLITE_OK; the caller must free the returned doclist. + * If no rows found, returns SQLITE_ERROR. */ +static int term_select(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, + sqlite_int64 *rowid, + DocList *out){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_TRANSIENT); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + + *rowid = sqlite3_column_int64(s, 0); + docListInit(out, DL_POSITIONS_OFFSETS, + sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + return rc==SQLITE_DONE ? SQLITE_OK : rc; +} + +/* select max(first) from %_term where term = [zTerm] and first <= [iFirst] + * If found, returns SQLITE_ROW and result in *piResult; if the query returns + * NULL (meaning no row found) returns SQLITE_DONE. + */ +static int term_chunk_select(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, sqlite_int64 *piResult){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_CHUNK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_CHUNK_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + + switch( sqlite3_column_type(s, 0) ){ + case SQLITE_NULL: + rc = SQLITE_DONE; + break; + case SQLITE_INTEGER: + *piResult = sqlite3_column_int64(s, 0); + break; + default: + return SQLITE_ERROR; + } + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + if( sqlite3_step(s) != SQLITE_DONE ) return SQLITE_ERROR; + return rc; +} + +/* insert into %_term (term, first, doclist) + values ([zTerm], [iFirst], [doclist]) */ +static int term_insert(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 3, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_INSERT_STMT, &s); +} + +/* update %_term set doclist = [doclist] where rowid = [rowid] */ +static int term_update(fulltext_vtab *v, sqlite_int64 rowid, + DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, + SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); +} + +static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_DELETE_STMT, &s); +} + +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt; + + for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + free((void *) v->zName); + free(v); +} + +/* Current interface: +** argv[0] - module name +** argv[1] - database name +** argv[2] - table name +** argv[3] - tokenizer name (optional, a sensible default is provided) +** argv[4..] - passed to tokenizer (optional based on tokenizer) +**/ +static int fulltextConnect(sqlite3 *db, void *pAux, int argc, char **argv, + sqlite3_vtab **ppVTab){ + int rc; + fulltext_vtab *v; + sqlite3_tokenizer_module *m = NULL; + + assert( argc>=3 ); + v = (fulltext_vtab *) malloc(sizeof(fulltext_vtab)); + /* sqlite will initialize v->base */ + v->db = db; + v->zName = string_dup(argv[2]); + v->pTokenizer = NULL; + + if( argc==3 ){ + get_simple_tokenizer_module(&m); + } else { + /* TODO(shess) For now, add new tokenizers as else if clauses. */ + if( !strcmp(argv[3], "simple") ){ + get_simple_tokenizer_module(&m); + } else { + assert( "unrecognized tokenizer"==NULL ); + } + } + + /* TODO(shess) Since tokenization impacts the index, the parameters + ** to the tokenizer need to be identical when a persistent virtual + ** table is re-created. One solution would be a meta-table to track + ** such information in the database. Then we could verify that the + ** information is identical on subsequent creates. + */ + /* TODO(shess) Why isn't argv already (const char **)? */ + rc = m->xCreate(argc-3, (const char **) (argv+3), &v->pTokenizer); + if( rc!=SQLITE_OK ) return rc; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + rc = sqlite3_declare_vtab(db, "create table x(content text)"); + if( rc!=SQLITE_OK ) return rc; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + *ppVTab = &v->base; + return SQLITE_OK; +} + +static int fulltextCreate(sqlite3 *db, void *pAux, int argc, char **argv, + sqlite3_vtab **ppVTab){ + int rc; + assert( argc>=3 ); + + /* The %_content table holds the text of each full-text item, with + ** the rowid used as the docid. + ** + ** The %_term table maps each term to a document list blob + ** containing elements sorted by ascending docid, each element + ** encoded as: + ** + ** docid varint-encoded + ** token count varint-encoded + ** "count" token elements (poslist): + ** position varint-encoded as delta from previous position + ** start offset varint-encoded as delta from previous start offset + ** end offset varint-encoded as delta from start offset + ** + ** Additionally, doclist blobs can be chunked into multiple rows, + ** using "first" to order the blobs. "first" is simply the first + ** docid in the blob. + */ + /* + ** NOTE(shess) That last sentence is incorrect in the face of + ** deletion, which can leave a doclist that doesn't contain the + ** first from that row. I _believe_ this does not matter to the + ** operation of the system, but it might be reasonable to update + ** appropriately in case this assumption becomes more important. + */ + rc = sql_exec(db, argv[2], + "create table %_content(content text);" + "create table %_term(term text, first integer, doclist blob);" + "create index %_index on %_term(term, first)"); + if( rc!=SQLITE_OK ) return rc; + + return fulltextConnect(db, pAux, argc, argv, ppVTab); +} + +/* Decide how to handle an SQL query. + * At the moment, MATCH queries can include implicit boolean ANDs; we + * haven't implemented phrase searches or OR yet. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + + for(i=0; inConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->iColumn==0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH && + pConstraint->usable ){ /* a full-text search */ + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + pInfo->idxNum = QUERY_FULLTEXT; + pInfo->estimatedCost = 1.0; /* an arbitrary value for now */ + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + + int rc = sql_exec(v->db, v->zName, + "drop table %_content; drop table %_term"); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + + return SQLITE_OK; +} + +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite3_finalize(c->pStmt); + if( c->result.pDoclist!=NULL ){ + docListDelete(c->result.pDoclist); + } + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite_int64 iDocid; + int rc; + + switch( c->iCursorType ){ + case QUERY_GENERIC: + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + case QUERY_FULLTEXT: + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( readerAtEnd(&c->result)){ + c->eof = 1; + return SQLITE_OK; + } + iDocid = readDocid(&c->result); + rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + default: + assert( 0 ); + return SQLITE_ERROR; /* not reached */ + } +} + +static int term_select_doclist(fulltext_vtab *v, const char *pTerm, int nTerm, + sqlite3_stmt **ppStmt){ + int rc; + if( *ppStmt ){ + rc = sqlite3_reset(*ppStmt); + } else { + rc = sql_prepare(v->db, v->zName, ppStmt, + "select doclist from %_term where term = ? order by first"); + } + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(*ppStmt, 1, pTerm, nTerm, SQLITE_TRANSIENT); + if( rc!=SQLITE_OK ) return rc; + + return sqlite3_step(*ppStmt); /* TODO(adamd): handle schema error */ +} + +/* Read the posting list for [zTerm]; AND it with the doclist [in] to + * produce the doclist [out], using the given offset [iOffset] for phrase + * matching. + * (*pSelect) is used to hold an SQLite statement used inside this function; + * the caller should initialize *pSelect to NULL before the first call. + */ +static int query_merge(fulltext_vtab *v, sqlite3_stmt **pSelect, + const char *zTerm, + DocList *pIn, int iOffset, DocList *out){ + int rc; + DocListMerge merge; + + if( pIn!=NULL && !pIn->nData ){ + /* If [pIn] is already empty, there's no point in reading the + * posting list to AND it in; return immediately. */ + return SQLITE_OK; + } + + rc = term_select_doclist(v, zTerm, -1, pSelect); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + mergeInit(&merge, pIn, iOffset, out); + while( rc==SQLITE_ROW ){ + DocList block; + docListInit(&block, DL_POSITIONS_OFFSETS, + sqlite3_column_blob(*pSelect, 0), + sqlite3_column_bytes(*pSelect, 0)); + mergeBlock(&merge, &block); + docListDestroy(&block); + + rc = sqlite3_step(*pSelect); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ + return rc; + } + } + + return SQLITE_OK; +} + +typedef struct QueryTerm { + int is_phrase; /* true if this term begins a new phrase */ + const char *zTerm; +} QueryTerm; + +/* A parsed query. + * + * As an example, parsing the query ["four score" years "new nation"] will + * yield a Query with 5 terms: + * "four", is_phrase = 1 + * "score", is_phrase = 0 + * "years", is_phrase = 1 + * "new", is_phrase = 1 + * "nation", is_phrase = 0 + */ +typedef struct Query { + int nTerms; + QueryTerm *pTerm; +} Query; + +static void query_add(Query *q, int is_phrase, const char *zTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerm = realloc(q->pTerm, q->nTerms * sizeof(q->pTerm[0])); + t = &q->pTerm[q->nTerms - 1]; + t->is_phrase = is_phrase; + t->zTerm = zTerm; +} + +static void query_free(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free((void *) q->pTerm[i].zTerm); + } + free(q->pTerm); +} + +static int tokenize_segment(sqlite3_tokenizer *pTokenizer, + const char *zQuery, int in_phrase, + Query *pQuery){ + sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int is_first = 1; + + int rc = pModule->xOpen(pTokenizer, zQuery, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *zToken; + int nToken, iStartOffset, iEndOffset, dummy_pos; + + rc = pModule->xNext(pCursor, + &zToken, &nToken, + &iStartOffset, &iEndOffset, + &dummy_pos); + if( rc!=SQLITE_OK ) break; + query_add(pQuery, !in_phrase || is_first, string_dup_n(zToken, nToken)); + is_first = 0; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object. */ +static int parse_query(fulltext_vtab *v, const char *zQuery, Query *pQuery){ + char *zQuery1 = string_dup(zQuery); + int in_phrase = 0; + char *s = zQuery1; + pQuery->nTerms = 0; + pQuery->pTerm = NULL; + + while( *s ){ + char *t = s; + while( *t ){ + if( *t=='"' ){ + *t++ = '\0'; + break; + } + ++t; + } + if( *s ){ + tokenize_segment(v->pTokenizer, s, in_phrase, pQuery); + } + s = t; + in_phrase = !in_phrase; + } + + free(zQuery1); + return SQLITE_OK; +} + +/* Perform a full-text query; return a list of documents in [pResult]. */ +static int fulltext_query(fulltext_vtab *v, const char *zQuery, + DocList **pResult){ + Query q; + int phrase_start = -1; + int i; + sqlite3_stmt *pSelect = NULL; + DocList *d = NULL; + + int rc = parse_query(v, zQuery, &q); + if( rc!=SQLITE_OK ) return rc; + + /* Merge terms. */ + for(i = 0 ; i < q.nTerms ; ++i){ + /* In each merge step, we need to generate positions whenever we're + * processing a phrase which hasn't ended yet. */ + int need_positions = iiCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + zStatement = "select rowid, content from %_content"; + break; + + case QUERY_FULLTEXT: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + DocList *pResult; + assert( argc==1 ); + rc = fulltext_query(v, zQuery, &pResult); + if( rc!=SQLITE_OK ) return rc; + readerInit(&c->result, pResult); + zStatement = "select rowid, content from %_content where rowid = ?"; + break; + } + + default: + assert( 0 ); + } + + rc = sql_prepare(v->db, v->zName, &c->pStmt, zStatement); + if( rc!=SQLITE_OK ) return rc; + + return fulltextNext(pCursor); +} + +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + const char *s; + + assert( idxCol==0 ); + s = (const char *) sqlite3_column_text(c->pStmt, 1); + sqlite3_result_text(pContext, s, -1, SQLITE_TRANSIENT); + + return SQLITE_OK; +} + +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Build a hash table containing all terms in zText. */ +static int build_terms(Hash *terms, sqlite3_tokenizer *pTokenizer, + const char *zText, sqlite_int64 iDocid){ + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + + int rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + HashInit(terms, HASH_STRING, 1); + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DocList *p; + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ) { + rc = SQLITE_ERROR; + goto err; + } + + p = HashFind(terms, pToken, nTokenBytes); + if( p==NULL ){ + p = docListNew(DL_POSITIONS_OFFSETS); + docListAddDocid(p, iDocid); + HashInsert(terms, pToken, nTokenBytes, p); + } + docListAddPosOffset(p, iPosition, iStartOffset, iEndOffset); + } + +err: + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} +/* Update the %_terms table to map the term [zTerm] to the given rowid. */ +static int index_insert_term(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iDocid, DocList *p){ + sqlite_int64 iFirst; + sqlite_int64 iIndexRow; + DocList doclist; + + int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); + if( rc==SQLITE_DONE ){ + docListInit(&doclist, DL_POSITIONS_OFFSETS, 0, 0); + if( docListUpdate(&doclist, iDocid, p) ){ + rc = term_insert(v, zTerm, nTerm, iDocid, &doclist); + docListDestroy(&doclist); + return rc; + } + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + /* This word is in the index; add this document ID to its blob. */ + + rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); + if( rc!=SQLITE_OK ) return rc; + + if( docListUpdate(&doclist, iDocid, p) ){ + /* If the blob is too big, split it in half. */ + if( doclist.nData>CHUNK_MAX ){ + DocList half; + if( docListSplit(&doclist, &half) ){ + rc = term_insert(v, zTerm, nTerm, firstDocid(&half), &half); + docListDestroy(&half); + if( rc!=SQLITE_OK ) goto err; + } + } + rc = term_update(v, iIndexRow, &doclist); + } + +err: + docListDestroy(&doclist); + return rc; +} + +/* Insert a row into the full-text index; set *piRowid to be the ID of the + * new row. */ +static int index_insert(fulltext_vtab *v, + sqlite3_value *pRequestRowid, const char *zText, + sqlite_int64 *piRowid){ + Hash terms; /* maps term string -> PosList */ + HashElem *e; + + int rc = content_insert(v, pRequestRowid, zText, -1); + if( rc!=SQLITE_OK ) return rc; + *piRowid = sqlite3_last_insert_rowid(v->db); + + if( !zText ) return SQLITE_OK; /* nothing to index */ + + rc = build_terms(&terms, v->pTokenizer, zText, *piRowid); + if( rc!=SQLITE_OK ) return rc; + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + rc = index_insert_term(v, HashKey(e), HashKeysize(e), *piRowid, p); + if( rc!=SQLITE_OK ) break; + } + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + docListDelete(p); + } + HashClear(&terms); + return rc; +} + +static int index_delete_term(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iDocid){ + sqlite_int64 iFirst; + sqlite_int64 iIndexRow; + DocList doclist; + + int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); + if( rc!=SQLITE_OK ) return rc; + + if( docListUpdate(&doclist, iDocid, NULL) ){ + if( doclist.nData>0 ){ + rc = term_update(v, iIndexRow, &doclist); + } else { /* empty posting list */ + rc = term_delete(v, iIndexRow); + } + } + docListDestroy(&doclist); + return rc; +} + +/* Delete a row from the full-text index. */ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + char *zText; + Hash terms; + HashElem *e; + + int rc = content_select(v, iRow, &zText); + if( rc!=SQLITE_OK ) return rc; + + rc = build_terms(&terms, v->pTokenizer, zText, iRow); + free(zText); + if( rc!=SQLITE_OK ) return rc; + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + rc = index_delete_term(v, HashKey(e), HashKeysize(e), iRow); + if( rc!=SQLITE_OK ) break; + } + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + docListDelete(p); + } + HashClear(&terms); + + return content_delete(v, iRow); +} + +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + + if( nArg<2 ){ + return index_delete(v, sqlite3_value_int64(ppArg[0])); + } + + if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + return SQLITE_ERROR; /* an update; not yet supported */ + } + + assert( nArg==3 ); /* ppArg[1] = rowid, ppArg[2] = content */ + return index_insert(v, ppArg[1], + (const char *)sqlite3_value_text(ppArg[2]), pRowid); +} + +static sqlite3_module fulltextModule = { + 0, + fulltextCreate, + fulltextConnect, + fulltextBestIndex, + fulltextDisconnect, + fulltextDestroy, + fulltextOpen, + fulltextClose, + fulltextFilter, + fulltextNext, + fulltextEof, + fulltextColumn, + fulltextRowid, + fulltextUpdate +}; + +int fulltext_init(sqlite3 *db){ + return sqlite3_create_module(db, "fulltext", &fulltextModule, 0); +} + +#if !SQLITE_CORE +int sqlite3_extension_init(sqlite3 *db, char **pzErrMsg, + const sqlite3_api_routines *pApi){ + SQLITE_EXTENSION_INIT2(pApi) + return fulltext_init(db); +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/fulltext.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/fulltext.h --- sqlite3-3.4.2/ext/fts1/fulltext.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/fulltext.h 2006-08-24 00:58:50.000000000 +0100 @@ -0,0 +1,11 @@ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int fulltext_init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/simple_tokenizer.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/simple_tokenizer.c --- sqlite3-3.4.2/ext/fts1/simple_tokenizer.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/simple_tokenizer.c 2006-08-30 22:40:30.000000000 +0100 @@ -0,0 +1,174 @@ +/* +** The author disclaims copyright to this source code. +** +************************************************************************* +** Implementation of the "simple" full-text-search tokenizer. +*/ + +#include +#if !defined(__APPLE__) +#include +#else +#include +#endif +#include +#include +#include + +#include "tokenizer.h" + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it's not part of the standard C library and + * may not be available everywhere.) */ +/* TODO(shess) Copied from fulltext.c, consider util.c for such +** things. */ +static char *string_dup(const char *s){ + char *str = malloc(strlen(s) + 1); + strcpy(str, s); + return str; +} + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + const char *zDelim; /* token delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + const char *pCurrent; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nTokenBytes; /* actual size of current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + +static sqlite3_tokenizer_module simpleTokenizerModule;/* forward declaration */ + +static int simpleCreate( + int argc, const char **argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) malloc(sizeof(simple_tokenizer)); + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + t->zDelim = string_dup(argv[1]); + } else { + /* Build a string excluding alphanumeric ASCII characters */ + char zDelim[0x80]; /* nul-terminated, so nul not a member */ + int i, j; + for(i=1, j=0; i<0x80; i++){ + if( !isalnum(i) ){ + zDelim[j++] = i; + } + } + zDelim[j++] = '\0'; + assert( j<=sizeof(zDelim) ); + t->zDelim = string_dup(zDelim); + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + simple_tokenizer *t = (simple_tokenizer *) pTokenizer; + + free((void *) t->zDelim); + free(t); + + return SQLITE_OK; +} + +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(simple_tokenizer_cursor)); + c->pInput = pInput; + c->nBytes = nBytes<0 ? (int) strlen(pInput) : nBytes; + c->pCurrent = c->pInput; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nTokenBytes = 0; + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + + if( NULL!=c->zToken ){ + free(c->zToken); + } + free(c); + + return SQLITE_OK; +} + +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + int ii; + + while( c->pCurrent-c->pInputnBytes ){ + int n = (int) strcspn(c->pCurrent, t->zDelim); + if( n>0 ){ + if( n+1>c->nTokenAllocated ){ + c->zToken = realloc(c->zToken, n+1); + } + for(ii=0; iipCurrent[ii]; + c->zToken[ii] = (unsigned char)ch<0x80 ? tolower(ch) : ch; + } + c->zToken[n] = '\0'; + *ppToken = c->zToken; + *pnBytes = n; + *piStartOffset = (int) (c->pCurrent-c->pInput); + *piEndOffset = *piStartOffset+n; + *piPosition = c->iToken++; + c->pCurrent += n + 1; + + return SQLITE_OK; + } + c->pCurrent += n + 1; + /* TODO(shess) could strspn() to skip delimiters en masse. Needs + ** to happen in two places, though, which is annoying. + */ + } + return SQLITE_DONE; +} + +static sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +void get_simple_tokenizer_module( + sqlite3_tokenizer_module **ppModule +){ + *ppModule = &simpleTokenizerModule; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts1/tokenizer.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts1/tokenizer.h --- sqlite3-3.4.2/ext/fts1/tokenizer.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts1/tokenizer.h 2006-08-24 00:58:50.000000000 +0100 @@ -0,0 +1,89 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _TOKENIZER_H_ +#define _TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. +*/ +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; + +struct sqlite3_tokenizer_module { + int iVersion; /* currently 0 */ + + /* + ** Create and destroy a tokenizer. argc/argv are passed down from + ** the fulltext virtual table creation to allow customization. + */ + int (*xCreate)(int argc, const char **argv, + sqlite3_tokenizer **ppTokenizer); + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Tokenize a particular input. Call xOpen() to prepare to + ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then + ** xClose() to free any internal state. The pInput passed to + ** xOpen() must exist until the cursor is closed. The ppToken + ** result from xNext() is only valid until the next call to xNext() + ** or until xClose() is called. + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xOpen)(sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor); + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + int (*xNext)(sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition); +}; + +struct sqlite3_tokenizer { + sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +/* +** Get the module for a tokenizer which generates tokens based on a +** set of non-token characters. The default is to break tokens at any +** non-alnum character, though the set of delimiters can also be +** specified by the first argv argument to xCreate(). +*/ +/* TODO(shess) This doesn't belong here. Need some sort of +** registration process. +*/ +void get_simple_tokenizer_module(sqlite3_tokenizer_module **ppModule); + +#endif /* _TOKENIZER_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2.c --- sqlite3-3.4.2/ext/fts2/fts2.c 2007-08-11 00:51:50.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2.c 2009-06-12 03:37:46.000000000 +0100 @@ -1,3 +1,27 @@ +/* fts2 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts2 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. +*/ +#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ + && !defined(SQLITE_ENABLE_BROKEN_FTS2) +#error fts2 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts2 uses the content table's unaliased rowid as +** the unique docid. fts2 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts2. If you are using +** fts2 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** Unlike fts1, which is safe across VACUUM if you never delete +** documents, fts2 has a second exposure to this flaw, in the segments +** table. So fts2 should be considered unsafe across VACUUM in all +** cases. +*/ + /* ** 2006 Oct 10 ** @@ -250,7 +274,7 @@ ** even with many segments. ** ** TODO(shess) That said, it would be nice to have a better query-side -** argument for MERGE_COUNT of 16. Also, it's possible/likely that +** argument for MERGE_COUNT of 16. Also, it is possible/likely that ** optimizations to things like doclist merging will swing the sweet ** spot around. ** @@ -431,6 +455,7 @@ ** dataBufferInit - create a buffer with given initial capacity. ** dataBufferReset - forget buffer's data, retaining capacity. ** dataBufferDestroy - free buffer's data. +** dataBufferSwap - swap contents of two buffers. ** dataBufferExpand - expand capacity without adding data. ** dataBufferAppend - append data. ** dataBufferAppend2 - append two pieces of data at once. @@ -446,15 +471,20 @@ assert( nCapacity>=0 ); pBuffer->nData = 0; pBuffer->nCapacity = nCapacity; - pBuffer->pData = nCapacity==0 ? NULL : malloc(nCapacity); + pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); } static void dataBufferReset(DataBuffer *pBuffer){ pBuffer->nData = 0; } static void dataBufferDestroy(DataBuffer *pBuffer){ - if( pBuffer->pData!=NULL ) free(pBuffer->pData); + if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); SCRAMBLE(pBuffer); } +static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ + DataBuffer tmp = *pBuffer1; + *pBuffer1 = *pBuffer2; + *pBuffer2 = tmp; +} static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ assert( nAddCapacity>0 ); /* TODO(shess) Consider expanding more aggressively. Note that the @@ -463,7 +493,7 @@ */ if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ pBuffer->nCapacity = pBuffer->nData+nAddCapacity; - pBuffer->pData = realloc(pBuffer->pData, pBuffer->nCapacity); + pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); } } static void dataBufferAppend(DataBuffer *pBuffer, @@ -670,7 +700,7 @@ #ifndef NDEBUG /* Verify that the doclist can be validly decoded. Also returns the -** last docid found because it's convenient in other assertions for +** last docid found because it is convenient in other assertions for ** DLWriter. */ static void docListValidate(DocListType iType, const char *pData, int nData, @@ -1033,7 +1063,7 @@ /* TODO(shess) This could also be done by calling plwTerminate() and ** dataBufferAppend(). I tried that, expecting nominal performance ** differences, but it seemed to pretty reliably be worth 1% to code -** it this way. I suspect it's the incremental malloc overhead (some +** it this way. I suspect it is the incremental malloc overhead (some ** percentage of the plwTerminate() calls will cause a realloc), so ** this might be worth revisiting if the DataBuffer implementation ** changes. @@ -1058,7 +1088,7 @@ } static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ - DLCollector *pCollector = malloc(sizeof(DLCollector)); + DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); dataBufferInit(&pCollector->b, 0); dlwInit(&pCollector->dlw, iType, &pCollector->b); plwInit(&pCollector->plw, &pCollector->dlw, iDocid); @@ -1069,7 +1099,7 @@ dlwDestroy(&pCollector->dlw); dataBufferDestroy(&pCollector->b); SCRAMBLE(pCollector); - free(pCollector); + sqlite3_free(pCollector); } @@ -1328,7 +1358,7 @@ DLWriter writer; if( nLeft==0 ){ - dataBufferAppend(pOut, pRight, nRight); + if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); return; } if( nRight==0 ){ @@ -1509,7 +1539,7 @@ DLWriter writer; if( nLeft==0 ){ - dataBufferAppend(pOut, pRight, nRight); + if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); return; } if( nRight==0 ){ @@ -1584,14 +1614,14 @@ } static char *string_dup_n(const char *s, int n){ - char *str = malloc(n + 1); + char *str = sqlite3_malloc(n + 1); memcpy(str, s, n); str[n] = '\0'; return str; } /* Duplicate a string; the caller must free() the returned string. - * (We don't use strdup() since it's not part of the standard C library and + * (We don't use strdup() since it is not part of the standard C library and * may not be available everywhere.) */ static char *string_dup(const char *s){ return string_dup_n(s, strlen(s)); @@ -1617,7 +1647,7 @@ } len += 1; /* for null terminator */ - r = result = malloc(len); + r = result = sqlite3_malloc(len); for(p = zFormat; *p; ++p){ if( *p=='%' ){ memcpy(r, zDb, nDb); @@ -1640,7 +1670,7 @@ int rc; TRACE(("FTS2 sql: %s\n", zCommand)); rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); - free(zCommand); + sqlite3_free(zCommand); return rc; } @@ -1650,7 +1680,7 @@ int rc; TRACE(("FTS2 prepare: %s\n", zCommand)); rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); - free(zCommand); + sqlite3_free(zCommand); return rc; } @@ -1744,17 +1774,22 @@ CONTENT_SELECT_STMT, CONTENT_UPDATE_STMT, CONTENT_DELETE_STMT, + CONTENT_EXISTS_STMT, BLOCK_INSERT_STMT, BLOCK_SELECT_STMT, BLOCK_DELETE_STMT, + BLOCK_DELETE_ALL_STMT, SEGDIR_MAX_INDEX_STMT, SEGDIR_SET_STMT, - SEGDIR_SELECT_STMT, + SEGDIR_SELECT_LEVEL_STMT, SEGDIR_SPAN_STMT, SEGDIR_DELETE_STMT, + SEGDIR_SELECT_SEGMENT_STMT, SEGDIR_SELECT_ALL_STMT, + SEGDIR_DELETE_ALL_STMT, + SEGDIR_COUNT_STMT, MAX_STMT /* Always at end! */ } fulltext_statement; @@ -1769,22 +1804,34 @@ /* CONTENT_SELECT */ "select * from %_content where rowid = ?", /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + /* CONTENT_EXISTS */ "select rowid from %_content limit 1", /* BLOCK_INSERT */ "insert into %_segments values (?)", /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", + /* BLOCK_DELETE_ALL */ "delete from %_segments", /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", - /* SEGDIR_SELECT */ + /* SEGDIR_SELECT_LEVEL */ "select start_block, leaves_end_block, root from %_segdir " " where level = ? order by idx", /* SEGDIR_SPAN */ "select min(start_block), max(end_block) from %_segdir " " where level = ? and start_block <> 0", /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + + /* NOTE(shess): The first three results of the following two + ** statements must match. + */ + /* SEGDIR_SELECT_SEGMENT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? and idx = ?", /* SEGDIR_SELECT_ALL */ - "select root, leaves_end_block from %_segdir order by level desc, idx", + "select start_block, leaves_end_block, root from %_segdir " + " order by level desc, idx asc", + /* SEGDIR_DELETE_ALL */ "delete from %_segdir", + /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", }; /* @@ -1914,7 +1961,7 @@ } rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], zStmt); - if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); + if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); if( rc!=SQLITE_OK ) return rc; } else { int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); @@ -1935,15 +1982,18 @@ } /* Like sql_get_statement(), but for special replicated LEAF_SELECT -** statements. +** statements. idx -1 is a special case for an uncached version of +** the statement (used in the optimize implementation). */ /* TODO(shess) Write version for generic statements and then share ** that between the cached-statement functions. */ static int sql_get_leaf_statement(fulltext_vtab *v, int idx, sqlite3_stmt **ppStmt){ - assert( idx>=0 && idxpLeafSelectStmts[idx]==NULL ){ + assert( idx>=-1 && idxdb, v->zDb, v->zName, ppStmt, LEAF_SELECT); + }else if( v->pLeafSelectStmts[idx]==NULL ){ int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], LEAF_SELECT); if( rc!=SQLITE_OK ) return rc; @@ -1999,9 +2049,9 @@ int i; for (i=0 ; i < nString ; ++i) { - if( pString[i]!=NULL ) free((void *) pString[i]); + if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); } - free((void *) pString); + sqlite3_free((void *) pString); } /* select * from %_content where rowid = [iRow] @@ -2028,7 +2078,7 @@ rc = sqlite3_step(s); if( rc!=SQLITE_ROW ) return rc; - values = (const char **) malloc(v->nColumn * sizeof(const char *)); + values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); for(i=0; inColumn; ++i){ if( sqlite3_column_type(s, i)==SQLITE_NULL ){ values[i] = NULL; @@ -2061,6 +2111,25 @@ return sql_single_step(s); } +/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if +** no rows exist, and any error in case of failure. +*/ +static int content_exists(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ROW; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + /* insert into %_segments values ([pData]) ** returns assigned rowid in *piBlockid */ @@ -2234,6 +2303,54 @@ return sql_single_step(s); } +/* Delete entire fts index, SQLITE_OK on success, relevant error on +** failure. +*/ +static int segdir_delete_all(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_single_step(s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_OK with *pnSegments set to the number of entries in +** %_segdir and *piMaxLevel set to the highest level which has a +** segment. Otherwise returns the SQLite error which caused failure. +*/ +static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* TODO(shess): This case should not be possible? Should stronger + ** measures be taken if it happens? + */ + if( rc==SQLITE_DONE ){ + *pnSegments = 0; + *piMaxLevel = 0; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + *pnSegments = sqlite3_column_int(s, 0); + *piMaxLevel = sqlite3_column_int(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + /* TODO(shess) clearPendingTerms() is far down the file because ** writeZeroSegment() is far down the file because LeafWriter is far ** down the file. Consider refactoring the code to move the non-vtab @@ -2270,12 +2387,12 @@ clearPendingTerms(v); - free(v->azColumn); + sqlite3_free(v->azColumn); for(i = 0; i < v->nColumn; ++i) { sqlite3_free(v->azContentColumn[i]); } - free(v->azContentColumn); - free(v); + sqlite3_free(v->azContentColumn); + sqlite3_free(v); } /* @@ -2386,7 +2503,7 @@ */ static char **tokenizeString(const char *z, int *pnToken){ int nToken = 0; - Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); + Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); int n = 1; int e, i; int totalSize = 0; @@ -2402,7 +2519,7 @@ } z += n; } - azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); + azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); zCopy = (char*)&azToken[nToken]; nToken--; for(i=0; iazColumn); - free(p->azContentColumn); - free(p->azTokenizer); + sqlite3_free(p->azColumn); + sqlite3_free(p->azContentColumn); + sqlite3_free(p->azTokenizer); } /* Parse a CREATE VIRTUAL TABLE statement, which looks like this: @@ -2588,7 +2705,7 @@ for(i=n=0; iazContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); + pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); if( pSpec->azContentColumn==0 ){ clearTableSpec(pSpec); return SQLITE_NOMEM; @@ -2702,7 +2819,7 @@ char const *zTok; /* Name of tokenizer to use for this fts table */ int nTok; /* Length of zTok, including nul terminator */ - v = (fulltext_vtab *) malloc(sizeof(fulltext_vtab)); + v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); if( v==0 ) return SQLITE_NOMEM; CLEAR(v); /* sqlite will initialize v->base */ @@ -2889,12 +3006,16 @@ static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ fulltext_cursor *c; - c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); - /* sqlite will initialize c->base */ - *ppCursor = &c->base; - TRACE(("FTS2 Open %p: %p\n", pVTab, c)); - - return SQLITE_OK; + c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); + if( c ){ + memset(c, 0, sizeof(fulltext_cursor)); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS2 Open %p: %p\n", pVTab, c)); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } } @@ -2903,9 +3024,9 @@ static void queryClear(Query *q){ int i; for(i = 0; i < q->nTerms; ++i){ - free(q->pTerms[i].pTerm); + sqlite3_free(q->pTerms[i].pTerm); } - free(q->pTerms); + sqlite3_free(q->pTerms); CLEAR(q); } @@ -2913,9 +3034,9 @@ ** Snippet */ static void snippetClear(Snippet *p){ - free(p->aMatch); - free(p->zOffset); - free(p->zSnippet); + sqlite3_free(p->aMatch); + sqlite3_free(p->zOffset); + sqlite3_free(p->zSnippet); CLEAR(p); } /* @@ -2930,7 +3051,7 @@ struct snippetMatch *pMatch; if( p->nMatch+1>=p->nAlloc ){ p->nAlloc = p->nAlloc*2 + 10; - p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); if( p->aMatch==0 ){ p->nMatch = 0; p->nAlloc = 0; @@ -3071,8 +3192,8 @@ for(i=0; inMatch; i++){ struct snippetMatch *pMatch = &p->aMatch[i]; zBuf[0] = ' '; - sprintf(&zBuf[cnt>0], "%d %d %d %d", pMatch->iCol, - pMatch->iTerm, pMatch->iStart, pMatch->nByte); + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); append(&sb, zBuf); cnt++; } @@ -3157,7 +3278,7 @@ int iMatch; - free(pCursor->snippet.zSnippet); + sqlite3_free(pCursor->snippet.zSnippet); pCursor->snippet.zSnippet = 0; aMatch = pCursor->snippet.aMatch; nMatch = pCursor->snippet.nMatch; @@ -3259,7 +3380,7 @@ snippetClear(&c->snippet); if( c->result.nData!=0 ) dlrDestroy(&c->reader); dataBufferDestroy(&c->result); - free(c); + sqlite3_free(c); return SQLITE_OK; } @@ -3364,14 +3485,14 @@ static void queryAdd(Query *q, const char *pTerm, int nTerm){ QueryTerm *t; ++q->nTerms; - q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); if( q->pTerms==0 ){ q->nTerms = 0; return; } t = &q->pTerms[q->nTerms - 1]; CLEAR(t); - t->pTerm = malloc(nTerm+1); + t->pTerm = sqlite3_malloc(nTerm+1); memcpy(t->pTerm, pTerm, nTerm); t->pTerm[nTerm] = 0; t->nTerm = nTerm; @@ -3656,18 +3777,38 @@ fulltext_cursor *c = (fulltext_cursor *) pCursor; fulltext_vtab *v = cursor_vtab(c); int rc; - char *zSql; TRACE(("FTS2 Filter %p\n",pCursor)); - zSql = sqlite3_mprintf("select rowid, * from %%_content %s", - idxNum==QUERY_GENERIC ? "" : "where rowid=?"); - sqlite3_finalize(c->pStmt); - rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); - sqlite3_free(zSql); - if( rc!=SQLITE_OK ) return rc; + /* If the cursor has a statement that was not prepared according to + ** idxNum, clear it. I believe all calls to fulltextFilter with a + ** given cursor will have the same idxNum , but in this case it's + ** easy to be safe. + */ + if( c->pStmt && c->iCursorType!=idxNum ){ + sqlite3_finalize(c->pStmt); + c->pStmt = NULL; + } + + /* Get a fresh statement appropriate to idxNum. */ + /* TODO(shess): Add a prepared-statement cache in the vt structure. + ** The cache must handle multiple open cursors. Easier to cache the + ** statement variants at the vt to reduce malloc/realloc/free here. + ** Or we could have a StringBuffer variant which allowed stack + ** construction for small values. + */ + if( !c->pStmt ){ + char *zSql = sqlite3_mprintf("select rowid, * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) return rc; + c->iCursorType = idxNum; + }else{ + sqlite3_reset(c->pStmt); + assert( c->iCursorType==idxNum ); + } - c->iCursorType = idxNum; switch( idxNum ){ case QUERY_GENERIC: break; @@ -3762,17 +3903,18 @@ if( rc!=SQLITE_OK ) return rc; pCursor->pTokenizer = pTokenizer; - while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, - &pToken, &nTokenBytes, - &iStartOffset, &iEndOffset, - &iPosition) ){ + while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition)) ){ DLCollector *p; int nData; /* Size of doclist before our update. */ - /* Positions can't be negative; we use -1 as a terminator internally. */ - if( iPosition<0 ){ - pTokenizer->pModule->xClose(pCursor); - return SQLITE_ERROR; + /* Positions can't be negative; we use -1 as a terminator + * internally. Token can't be NULL or empty. */ + if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ + rc = SQLITE_ERROR; + break; } p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); @@ -3801,6 +3943,7 @@ ** not durable. *ponder* */ pTokenizer->pModule->xClose(pCursor); + if( SQLITE_DONE == rc ) return SQLITE_OK; return rc; } @@ -3935,18 +4078,20 @@ static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, const char *pTerm, int nTerm){ - InteriorBlock *block = calloc(1, sizeof(InteriorBlock)); + InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); char c[VARINT_MAX+VARINT_MAX]; int n; - dataBufferInit(&block->term, 0); - dataBufferReplace(&block->term, pTerm, nTerm); - - n = putVarint(c, iHeight); - n += putVarint(c+n, iChildBlock); - dataBufferInit(&block->data, INTERIOR_MAX); - dataBufferReplace(&block->data, c, n); - + if( block ){ + memset(block, 0, sizeof(*block)); + dataBufferInit(&block->term, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = putVarint(c, iHeight); + n += putVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + } return block; } @@ -4111,11 +4256,11 @@ block = block->next; dataBufferDestroy(&b->term); dataBufferDestroy(&b->data); - free(b); + sqlite3_free(b); } if( pWriter->parentWriter!=NULL ){ interiorWriterDestroy(pWriter->parentWriter); - free(pWriter->parentWriter); + sqlite3_free(pWriter->parentWriter); } dataBufferDestroy(&pWriter->term); SCRAMBLE(pWriter); @@ -4149,7 +4294,7 @@ if( rc!=SQLITE_OK ) return rc; *piEndBlockid = iBlockid; - pWriter->parentWriter = malloc(sizeof(*pWriter->parentWriter)); + pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); interiorWriterInit(pWriter->iHeight+1, block->term.pData, block->term.nData, iBlockid, pWriter->parentWriter); @@ -4186,6 +4331,7 @@ } InteriorReader; static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); SCRAMBLE(pReader); } @@ -4916,6 +5062,12 @@ } static void leavesReaderDestroy(LeavesReader *pReader){ + /* If idx is -1, that means we're using a non-cached statement + ** handle in the optimize() case, so we need to release it. + */ + if( pReader->pStmt!=NULL && pReader->idx==-1 ){ + sqlite3_finalize(pReader->pStmt); + } leafReaderDestroy(&pReader->leafReader); dataBufferDestroy(&pReader->rootData); SCRAMBLE(pReader); @@ -5036,7 +5188,7 @@ static int leavesReadersInit(fulltext_vtab *v, int iLevel, LeavesReader *pReaders, int *piReaders){ sqlite3_stmt *s; - int i, rc = sql_get_statement(v, SEGDIR_SELECT_STMT, &s); + int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); if( rc!=SQLITE_OK ) return rc; rc = sqlite3_bind_int(s, 1, iLevel); @@ -5188,6 +5340,26 @@ return rc; } +/* Accumulate the union of *acc and *pData into *acc. */ +static void docListAccumulateUnion(DataBuffer *acc, + const char *pData, int nData) { + DataBuffer tmp = *acc; + dataBufferInit(acc, tmp.nData+nData); + docListUnion(tmp.pData, tmp.nData, pData, nData, acc); + dataBufferDestroy(&tmp); +} + +/* TODO(shess) It might be interesting to explore different merge +** strategies, here. For instance, since this is a sorted merge, we +** could easily merge many doclists in parallel. With some +** comprehension of the storage format, we could merge all of the +** doclists within a leaf node directly from the leaf node's storage. +** It may be worthwhile to merge smaller doclists before larger +** doclists, since they can be traversed more quickly - but the +** results may have less overlap, making them more expensive in a +** different way. +*/ + /* Scan pReader for pTerm/nTerm, and merge the term's doclist over ** *out (any doclists with duplicate docids overwrite those in *out). ** Internal function for loadSegmentLeaf(). @@ -5195,39 +5367,116 @@ static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, const char *pTerm, int nTerm, int isPrefix, DataBuffer *out){ + /* doclist data is accumulated into pBuffers similar to how one does + ** increment in binary arithmetic. If index 0 is empty, the data is + ** stored there. If there is data there, it is merged and the + ** results carried into position 1, with further merge-and-carry + ** until an empty position is found. + */ + DataBuffer *pBuffers = NULL; + int nBuffers = 0, nMaxBuffers = 0, rc; + assert( nTerm>0 ); - /* Process while the prefix matches. */ - while( !leavesReaderAtEnd(pReader) ){ + for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); + rc=leavesReaderStep(v, pReader)){ /* TODO(shess) Really want leavesReaderTermCmp(), but that name is ** already taken to compare the terms of two LeavesReaders. Think ** on a better name. [Meanwhile, break encapsulation rather than ** use a confusing name.] */ - int rc; int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c>0 ) break; /* Past any possible matches. */ if( c==0 ){ const char *pData = leavesReaderData(pReader); - int nData = leavesReaderDataBytes(pReader); - if( out->nData==0 ){ - dataBufferReplace(out, pData, nData); + int iBuffer, nData = leavesReaderDataBytes(pReader); + + /* Find the first empty buffer. */ + for(iBuffer=0; iBuffer0 ){ + assert(pBuffers!=NULL); + memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); + sqlite3_free(pBuffers); + } + pBuffers = p; + } + dataBufferInit(&(pBuffers[nBuffers]), 0); + nBuffers++; + } + + /* At this point, must have an empty at iBuffer. */ + assert(iBuffernData+nData); - docListUnion(out->pData, out->nData, pData, nData, &result); - dataBufferDestroy(out); - *out = result; - /* TODO(shess) Rather than destroy out, we could retain it for - ** later reuse. + /* pAcc is the empty buffer the merged data will end up in. */ + DataBuffer *pAcc = &(pBuffers[iBuffer]); + DataBuffer *p = &(pBuffers[0]); + + /* Handle position 0 specially to avoid need to prime pAcc + ** with pData/nData. */ + dataBufferSwap(p, pAcc); + docListAccumulateUnion(pAcc, pData, nData); + + /* Accumulate remaining doclists into pAcc. */ + for(++p; ppData, p->nData); + + /* dataBufferReset() could allow a large doclist to blow up + ** our memory requirements. + */ + if( p->nCapacity<1024 ){ + dataBufferReset(p); + }else{ + dataBufferDestroy(p); + dataBufferInit(p, 0); + } + } } } - if( c>0 ) break; /* Past any possible matches. */ + } - rc = leavesReaderStep(v, pReader); - if( rc!=SQLITE_OK ) return rc; + /* Union all the doclists together into *out. */ + /* TODO(shess) What if *out is big? Sigh. */ + if( rc==SQLITE_OK && nBuffers>0 ){ + int iBuffer; + for(iBuffer=0; iBuffer0 ){ + if( out->nData==0 ){ + dataBufferSwap(out, &(pBuffers[iBuffer])); + }else{ + docListAccumulateUnion(out, pBuffers[iBuffer].pData, + pBuffers[iBuffer].nData); + } + } + } } - return SQLITE_OK; + + while( nBuffers-- ){ + dataBufferDestroy(&(pBuffers[nBuffers])); + } + if( pBuffers!=NULL ) sqlite3_free(pBuffers); + + return rc; } /* Call loadSegmentLeavesInt() with pData/nData as input. */ @@ -5278,7 +5527,7 @@ /* TODO(shess) The calling code may already know that the end child is ** not worth calculating, because the end may be in a later sibling ** node. Consider whether breaking symmetry is worthwhile. I suspect -** it's not worthwhile. +** it is not worthwhile. */ static void getChildrenContaining(const char *pData, int nData, const char *pTerm, int nTerm, int isPrefix, @@ -5477,8 +5726,8 @@ ** elements for given docids overwrite older elements. */ while( (rc = sqlite3_step(s))==SQLITE_ROW ){ - const char *pData = sqlite3_column_blob(s, 0); - const int nData = sqlite3_column_bytes(s, 0); + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, &doclist); @@ -5538,7 +5787,7 @@ if( rc!=SQLITE_OK ) return rc; n = fts2HashCount(pTerms); - pData = malloc(n*sizeof(TermData)); + pData = sqlite3_malloc(n*sizeof(TermData)); for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ assert( iMERGE_COUNT segments, and would +** also need to be able to optionally optimize away deletes. +*/ +typedef struct OptLeavesReader { + /* Segment number, to order readers by age. */ + int segment; + LeavesReader reader; +} OptLeavesReader; + +static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ + return leavesReaderAtEnd(&pReader->reader); +} +static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ + return leavesReaderTermBytes(&pReader->reader); +} +static const char *optLeavesReaderData(OptLeavesReader *pReader){ + return leavesReaderData(&pReader->reader); +} +static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ + return leavesReaderDataBytes(&pReader->reader); +} +static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ + return leavesReaderTerm(&pReader->reader); +} +static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ + return leavesReaderStep(v, &pReader->reader); +} +static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + return leavesReaderTermCmp(&lr1->reader, &lr2->reader); +} +/* Order by term ascending, segment ascending (oldest to newest), with +** exhausted readers to the end. +*/ +static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + int c = optLeavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->segment-lr2->segment; +} +/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that +** pLr[1..nLr-1] is already sorted. +*/ +static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ + while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ + OptLeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* optimize() helper function. Put the readers in order and iterate +** through them, merging doclists for matching terms into pWriter. +** Returns SQLITE_OK on success, or the SQLite error code which +** prevented success. +*/ +static int optimizeInternal(fulltext_vtab *v, + OptLeavesReader *readers, int nReaders, + LeafWriter *pWriter){ + int i, rc = SQLITE_OK; + DataBuffer doclist, merged, tmp; + + /* Order the readers. */ + i = nReaders; + while( i-- > 0 ){ + optLeavesReaderReorder(&readers[i], nReaders-i); + } + + dataBufferInit(&doclist, LEAF_MAX); + dataBufferInit(&merged, LEAF_MAX); + + /* Exhausted readers bubble to the end, so when the first reader is + ** at eof, all are at eof. + */ + while( !optLeavesReaderAtEnd(&readers[0]) ){ + + /* Figure out how many readers share the next term. */ + for(i=1; i 0 ){ + dlrDestroy(&dlReaders[nReaders]); + } + + /* Accumulated doclist to reader 0 for next pass. */ + dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); + } + + /* Destroy reader that was left in the pipeline. */ + dlrDestroy(&dlReaders[0]); + + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + -1, DL_DEFAULT, &merged); + } + + /* Only pass doclists with hits (skip if all hits deleted). */ + if( merged.nData>0 ){ + rc = leafWriterStep(v, pWriter, + optLeavesReaderTerm(&readers[0]), + optLeavesReaderTermBytes(&readers[0]), + merged.pData, merged.nData); + if( rc!=SQLITE_OK ) goto err; + } + + /* Step merged readers to next term and reorder. */ + while( i-- > 0 ){ + rc = optLeavesReaderStep(v, &readers[i]); + if( rc!=SQLITE_OK ) goto err; + + optLeavesReaderReorder(&readers[i], nReaders-i); + } + } + + err: + dataBufferDestroy(&doclist); + dataBufferDestroy(&merged); + return rc; +} + +/* Implement optimize() function for FTS3. optimize(t) merges all +** segments in the fts index into a single segment. 't' is the magic +** table-named column. +*/ +static void optimizeFunc(sqlite3_context *pContext, + int argc, sqlite3_value **argv){ + fulltext_cursor *pCursor; + if( argc>1 ){ + sqlite3_result_error(pContext, "excess arguments to optimize()",-1); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to optimize",-1); + }else{ + fulltext_vtab *v; + int i, rc, iMaxLevel; + OptLeavesReader *readers; + int nReaders; + LeafWriter writer; + sqlite3_stmt *s; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* Flush any buffered updates before optimizing. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) goto err; + + rc = segdir_count(v, &nReaders, &iMaxLevel); + if( rc!=SQLITE_OK ) goto err; + if( nReaders==0 || nReaders==1 ){ + sqlite3_result_text(pContext, "Index already optimal", -1, + SQLITE_STATIC); + return; + } + + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) goto err; + + readers = sqlite3_malloc(nReaders*sizeof(readers[0])); + if( readers==NULL ) goto err; + + /* Note that there will already be a segment at this position + ** until we call segdir_delete() on iMaxLevel. + */ + leafWriterInit(iMaxLevel, 0, &writer); + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i 0 ){ + leavesReaderDestroy(&readers[i].reader); + } + sqlite3_free(readers); + + /* If we've successfully gotten to here, delete the old segments + ** and flush the interior structure of the new segment. + */ + if( rc==SQLITE_OK ){ + for( i=0; i<=iMaxLevel; i++ ){ + rc = segdir_delete(v, i); + if( rc!=SQLITE_OK ) break; + } + + if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); + } + + leafWriterDestroy(&writer); + + if( rc!=SQLITE_OK ) goto err; + + sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); + return; + + /* TODO(shess): Error-handling needs to be improved along the + ** lines of the dump_ functions. + */ + err: + { + char buf[512]; + sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", + sqlite3_errmsg(sqlite3_context_db_handle(pContext))); + sqlite3_result_error(pContext, buf, -1); + } + } +} + +#ifdef SQLITE_TEST +/* Generate an error of the form ": ". If msg is NULL, +** pull the error from the context's db handle. +*/ +static void generateError(sqlite3_context *pContext, + const char *prefix, const char *msg){ + char buf[512]; + if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); + sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); + sqlite3_result_error(pContext, buf, -1); +} + +/* Helper function to collect the set of terms in the segment into +** pTerms. The segment is defined by the leaf nodes between +** iStartBlockid and iEndBlockid, inclusive, or by the contents of +** pRootData if iStartBlockid is 0 (in which case the entire segment +** fit in a leaf). +*/ +static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, + fts2Hash *pTerms){ + const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); + const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + const int nRootData = sqlite3_column_bytes(s, 2); + LeavesReader reader; + int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, + pRootData, nRootData, &reader); + if( rc!=SQLITE_OK ) return rc; + + while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ + const char *pTerm = leavesReaderTerm(&reader); + const int nTerm = leavesReaderTermBytes(&reader); + void *oldValue = sqlite3Fts2HashFind(pTerms, pTerm, nTerm); + void *newValue = (void *)((char *)oldValue+1); + + /* From the comment before sqlite3Fts2HashInsert in fts2_hash.c, + ** the data value passed is returned in case of malloc failure. + */ + if( newValue==sqlite3Fts2HashInsert(pTerms, pTerm, nTerm, newValue) ){ + rc = SQLITE_NOMEM; + }else{ + rc = leavesReaderStep(v, &reader); + } + } + + leavesReaderDestroy(&reader); + return rc; +} + +/* Helper function to build the result string for dump_terms(). */ +static int generateTermsResult(sqlite3_context *pContext, fts2Hash *pTerms){ + int iTerm, nTerms, nResultBytes, iByte; + char *result; + TermData *pData; + fts2HashElem *e; + + /* Iterate pTerms to generate an array of terms in pData for + ** sorting. + */ + nTerms = fts2HashCount(pTerms); + assert( nTerms>0 ); + pData = sqlite3_malloc(nTerms*sizeof(TermData)); + if( pData==NULL ) return SQLITE_NOMEM; + + nResultBytes = 0; + for(iTerm = 0, e = fts2HashFirst(pTerms); e; iTerm++, e = fts2HashNext(e)){ + nResultBytes += fts2HashKeysize(e)+1; /* Term plus trailing space */ + assert( iTerm0 ); /* nTerms>0, nResultsBytes must be, too. */ + result = sqlite3_malloc(nResultBytes); + if( result==NULL ){ + sqlite3_free(pData); + return SQLITE_NOMEM; + } + + if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); + + /* Read the terms in order to build the result. */ + iByte = 0; + for(iTerm=0; iTerm0 ){ + rc = generateTermsResult(pContext, &terms); + if( rc==SQLITE_NOMEM ){ + generateError(pContext, "dump_terms", "out of memory"); + }else{ + assert( rc==SQLITE_OK ); + } + }else if( argc==3 ){ + /* The specific segment asked for could not be found. */ + generateError(pContext, "dump_terms", "segment not found"); + }else{ + /* No segments found. */ + /* TODO(shess): It should be impossible to reach this. This + ** case can only happen for an empty table, in which case + ** SQLite has no rows to call this function on. + */ + sqlite3_result_null(pContext); + } + } + sqlite3Fts2HashClear(&terms); + } +} + +/* Expand the DL_DEFAULT doclist in pData into a text result in +** pContext. +*/ +static void createDoclistResult(sqlite3_context *pContext, + const char *pData, int nData){ + DataBuffer dump; + DLReader dlReader; + + assert( pData!=NULL && nData>0 ); + + dataBufferInit(&dump, 0); + dlrInit(&dlReader, DL_DEFAULT, pData, nData); + for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ + char buf[256]; + PLReader plReader; + + plrInit(&plReader, &dlReader); + if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ + sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); + dataBufferAppend(&dump, buf, strlen(buf)); + }else{ + int iColumn = plrColumn(&plReader); + + sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", + dlrDocid(&dlReader), iColumn); + dataBufferAppend(&dump, buf, strlen(buf)); + + for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ + if( plrColumn(&plReader)!=iColumn ){ + iColumn = plrColumn(&plReader); + sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, buf, strlen(buf)); + } + if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", + plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + }else if( DL_DEFAULT==DL_POSITIONS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); + }else{ + assert( NULL=="Unhandled DL_DEFAULT value"); + } + dataBufferAppend(&dump, buf, strlen(buf)); + } + plrDestroy(&plReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, "]] ", 3); + } + } + dlrDestroy(&dlReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dump.pData[dump.nData] = '\0'; + assert( dump.nData>0 ); + + /* Passes ownership of dump's buffer to pContext. */ + sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); + dump.pData = NULL; + dump.nData = dump.nCapacity = 0; +} + +/* Implements dump_doclist() for use in inspecting the fts2 index from +** tests. TEXT result containing a string representation of the +** doclist for the indicated term. dump_doclist(t, term, level, idx) +** dumps the doclist for term from the segment specified by level, idx +** (in %_segdir), while dump_doclist(t, term) dumps the logical +** doclist for the term across all segments. The per-segment doclist +** can contain deletions, while the full-index doclist will not +** (deletions are omitted). +** +** Result formats differ with the setting of DL_DEFAULTS. Examples: +** +** DL_DOCIDS: [1] [3] [7] +** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] +** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] +** +** In each case the number after the outer '[' is the docid. In the +** latter two cases, the number before the inner '[' is the column +** associated with the values within. For DL_POSITIONS the numbers +** within are the positions, for DL_POSITIONS_OFFSETS they are the +** position, the start offset, and the end offset. +*/ +static void dumpDoclistFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=2 && argc!=4 ){ + generateError(pContext, "dump_doclist", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_doclist", "illegal first argument"); + }else if( sqlite3_value_text(argv[1])==NULL || + sqlite3_value_text(argv[1])[0]=='\0' ){ + generateError(pContext, "dump_doclist", "empty second argument"); + }else{ + const char *pTerm = (const char *)sqlite3_value_text(argv[1]); + const int nTerm = strlen(pTerm); + fulltext_vtab *v; + int rc; + DataBuffer doclist; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + dataBufferInit(&doclist, 0); + + /* termSelect() yields the same logical doclist that queries are + ** run against. + */ + if( argc==2 ){ + rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); + }else{ + sqlite3_stmt *s = NULL; + + /* Get our specific segment's information. */ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); + } + } + + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + if( rc==SQLITE_DONE ){ + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "segment not found"); + return; + } + + /* Found a segment, load it into doclist. */ + if( rc==SQLITE_ROW ){ + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + + /* loadSegment() is used by termSelect() to load each + ** segment's data. + */ + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, + &doclist); + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + /* Should not have more than one matching segment. */ + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "invalid segdir"); + return; + } + rc = SQLITE_OK; + } + } + } + + sqlite3_reset(s); + } + + if( rc==SQLITE_OK ){ + if( doclist.nData>0 ){ + createDoclistResult(pContext, doclist.pData, doclist.nData); + }else{ + /* TODO(shess): This can happen if the term is not present, or + ** if all instances of the term have been deleted and this is + ** an all-index dump. It may be interesting to distinguish + ** these cases. + */ + sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); + } + }else if( rc==SQLITE_NOMEM ){ + /* Handle out-of-memory cases specially because if they are + ** generated in fts2 code they may not be reflected in the db + ** handle. + */ + /* TODO(shess): Handle this more comprehensively. + ** sqlite3ErrStr() has what I need, but is internal. + */ + generateError(pContext, "dump_doclist", "out of memory"); + }else{ + generateError(pContext, "dump_doclist", NULL); + } + + dataBufferDestroy(&doclist); + } +} +#endif + /* ** This routine implements the xFindFunction method for the FTS2 ** virtual table. @@ -5764,6 +6689,23 @@ }else if( strcmp(zName,"offsets")==0 ){ *pxFunc = snippetOffsetsFunc; return 1; + }else if( strcmp(zName,"optimize")==0 ){ + *pxFunc = optimizeFunc; + return 1; +#ifdef SQLITE_TEST + /* NOTE(shess): These functions are present only for testing + ** purposes. No particular effort is made to optimize their + ** execution or how they build their results. + */ + }else if( strcmp(zName,"dump_terms")==0 ){ + /* fprintf(stderr, "Found dump_terms\n"); */ + *pxFunc = dumpTermsFunc; + return 1; + }else if( strcmp(zName,"dump_doclist")==0 ){ + /* fprintf(stderr, "Found dump_doclist\n"); */ + *pxFunc = dumpDoclistFunc; + return 1; +#endif } return 0; } @@ -5883,13 +6825,18 @@ && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) +#ifdef SQLITE_TEST + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) +#endif ){ return sqlite3_create_module_v2( db, "fts2", &fts2Module, (void *)pHash, hashDestroy ); } - /* An error has occured. Delete the hash table and return the error code. */ + /* An error has occurred. Delete the hash table and return the error code. */ assert( rc!=SQLITE_OK ); if( pHash ){ sqlite3Fts2HashClear(pHash); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2_hash.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2_hash.c --- sqlite3-3.4.2/ext/fts2/fts2_hash.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2_hash.c 2009-05-05 04:39:51.000000000 +0100 @@ -29,15 +29,22 @@ #include #include +#include "sqlite3.h" #include "fts2_hash.h" -static void *malloc_and_zero(int n){ - void *p = malloc(n); +/* +** Malloc and Free functions +*/ +static void *fts2HashMalloc(int n){ + void *p = sqlite3_malloc(n); if( p ){ memset(p, 0, n); } return p; } +static void fts2HashFree(void *p){ + sqlite3_free(p); +} /* Turn bulk memory into a hash table object by initializing the ** fields of the Hash structure. @@ -58,8 +65,6 @@ pNew->count = 0; pNew->htsize = 0; pNew->ht = 0; - pNew->xMalloc = malloc_and_zero; - pNew->xFree = free; } /* Remove all entries from a hash table. Reclaim all memory. @@ -72,15 +77,15 @@ assert( pH!=0 ); elem = pH->first; pH->first = 0; - if( pH->ht ) pH->xFree(pH->ht); + fts2HashFree(pH->ht); pH->ht = 0; pH->htsize = 0; while( elem ){ fts2HashElem *next_elem = elem->next; if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); + fts2HashFree(elem->pKey); } - pH->xFree(elem); + fts2HashFree(elem); elem = next_elem; } pH->count = 0; @@ -192,9 +197,9 @@ int (*xHash)(const void*,int); /* The hash function */ assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _fts2ht *)pH->xMalloc( new_size*sizeof(struct _fts2ht) ); + new_ht = (struct _fts2ht *)fts2HashMalloc( new_size*sizeof(struct _fts2ht) ); if( new_ht==0 ) return; - if( pH->ht ) pH->xFree(pH->ht); + fts2HashFree(pH->ht); pH->ht = new_ht; pH->htsize = new_size; xHash = hashFunction(pH->keyClass); @@ -260,9 +265,9 @@ pEntry->chain = 0; } if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); + fts2HashFree(elem->pKey); } - pH->xFree( elem ); + fts2HashFree( elem ); pH->count--; if( pH->count<=0 ){ assert( pH->first==0 ); @@ -333,12 +338,12 @@ return old_data; } if( data==0 ) return 0; - new_elem = (fts2HashElem*)pH->xMalloc( sizeof(fts2HashElem) ); + new_elem = (fts2HashElem*)fts2HashMalloc( sizeof(fts2HashElem) ); if( new_elem==0 ) return data; if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = pH->xMalloc( nKey ); + new_elem->pKey = fts2HashMalloc( nKey ); if( new_elem->pKey==0 ){ - pH->xFree(new_elem); + fts2HashFree(new_elem); return data; } memcpy((void*)new_elem->pKey, pKey, nKey); @@ -351,7 +356,7 @@ rehash(pH,8); if( pH->htsize==0 ){ pH->count = 0; - pH->xFree(new_elem); + fts2HashFree(new_elem); return data; } } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2_hash.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2_hash.h --- sqlite3-3.4.2/ext/fts2/fts2_hash.h 2006-10-10 18:37:14.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2_hash.h 2009-05-05 04:39:51.000000000 +0100 @@ -34,8 +34,6 @@ char copyKey; /* True if copy of key made on insert */ int count; /* Number of entries in this table */ fts2HashElem *first; /* The first element of the array */ - void *(*xMalloc)(int); /* malloc() function to use */ - void (*xFree)(void *); /* free() function to use */ int htsize; /* Number of buckets in the hash table */ struct _fts2ht { /* the hash table */ int count; /* Number of entries with this hash */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2_icu.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2_icu.c --- sqlite3-3.4.2/ext/fts2/fts2_icu.c 2007-06-22 16:21:16.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2_icu.c 2009-06-12 03:37:46.000000000 +0100 @@ -11,7 +11,7 @@ ************************************************************************* ** This file implements a tokenizer for fts2 based on the ICU library. ** -** $Id: fts2_icu.c,v 1.1 2007/06/22 15:21:16 danielk1977 Exp $ +** $Id: fts2_icu.c,v 1.3 2008/12/18 05:30:26 danielk1977 Exp $ */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) @@ -112,6 +112,9 @@ *ppCursor = 0; + if( nInput<0 ){ + nInput = strlen(zInput); + } nChar = nInput+1; pCsr = (IcuCursor *)sqlite3_malloc( sizeof(IcuCursor) + /* IcuCursor */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2_porter.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2_porter.c --- sqlite3-3.4.2/ext/fts2/fts2_porter.c 2007-08-06 00:47:54.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2_porter.c 2009-05-05 04:39:51.000000000 +0100 @@ -66,9 +66,9 @@ sqlite3_tokenizer **ppTokenizer ){ porter_tokenizer *t; - t = (porter_tokenizer *) calloc(sizeof(*t), 1); + t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); if( t==NULL ) return SQLITE_NOMEM; - + memset(t, 0, sizeof(*t)); *ppTokenizer = &t->base; return SQLITE_OK; } @@ -77,7 +77,7 @@ ** Destroy a tokenizer */ static int porterDestroy(sqlite3_tokenizer *pTokenizer){ - free(pTokenizer); + sqlite3_free(pTokenizer); return SQLITE_OK; } @@ -94,7 +94,7 @@ ){ porter_tokenizer_cursor *c; - c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); + c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); if( c==NULL ) return SQLITE_NOMEM; c->zInput = zInput; @@ -120,8 +120,8 @@ */ static int porterClose(sqlite3_tokenizer_cursor *pCursor){ porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - free(c->zToken); - free(c); + sqlite3_free(c->zToken); + sqlite3_free(c); return SQLITE_OK; } /* @@ -603,7 +603,7 @@ int n = c->iOffset-iStartOffset; if( n>c->nAllocated ){ c->nAllocated = n+20; - c->zToken = realloc(c->zToken, c->nAllocated); + c->zToken = sqlite3_realloc(c->zToken, c->nAllocated); if( c->zToken==NULL ) return SQLITE_NOMEM; } porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts2/fts2_tokenizer1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts2/fts2_tokenizer1.c --- sqlite3-3.4.2/ext/fts2/fts2_tokenizer1.c 2007-07-30 21:36:32.000000000 +0100 +++ sqlite3-3.6.16/ext/fts2/fts2_tokenizer1.c 2009-05-05 04:39:51.000000000 +0100 @@ -65,8 +65,9 @@ ){ simple_tokenizer *t; - t = (simple_tokenizer *) calloc(sizeof(*t), 1); + t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); /* TODO(shess) Delimiters need to remain the same from run to run, ** else we need to reindex. One solution would be a meta-table to @@ -79,7 +80,7 @@ unsigned char ch = argv[1][i]; /* We explicitly don't support UTF-8 delimiters for now. */ if( ch>=0x80 ){ - free(t); + sqlite3_free(t); return SQLITE_ERROR; } t->delim[ch] = 1; @@ -100,7 +101,7 @@ ** Destroy a tokenizer */ static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ - free(pTokenizer); + sqlite3_free(pTokenizer); return SQLITE_OK; } @@ -117,7 +118,7 @@ ){ simple_tokenizer_cursor *c; - c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); + c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); if( c==NULL ) return SQLITE_NOMEM; c->pInput = pInput; @@ -143,8 +144,8 @@ */ static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - free(c->pToken); - free(c); + sqlite3_free(c->pToken); + sqlite3_free(c); return SQLITE_OK; } @@ -182,7 +183,7 @@ int i, n = c->iOffset-iStartOffset; if( n>c->nTokenAllocated ){ c->nTokenAllocated = n+20; - c->pToken = realloc(c->pToken, c->nTokenAllocated); + c->pToken = sqlite3_realloc(c->pToken, c->nTokenAllocated); if( c->pToken==NULL ) return SQLITE_NOMEM; } for(i=0; i0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are grouped into levels and +** merged in batches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it is possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +** +** TODO(shess) Provide a VACUUM type operation to clear out all +** deletions and duplications. This would basically be a forced merge +** into a single segment. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include +#include +#include +#include +#include + +#include "fts3.h" +#include "fts3_expr.h" +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#ifndef SQLITE_CORE +# include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 +#endif + + +/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it +** would be nice to order the file better, perhaps something along the +** lines of: +** +** - utility functions +** - table setup functions +** - table update functions +** - table query functions +** +** Put the query functions last because they're likely to reference +** typedefs or functions from the table update section. +*/ + +#if 0 +# define FTSTRACE(A) printf A; fflush(stdout) +#else +# define FTSTRACE(A) +#endif + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted +** into (no deletes or updates). +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* MERGE_COUNT controls how often we merge segments (see comment at +** top of file). +*/ +#define MERGE_COUNT 16 + +/* utility functions */ + +/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single +** record to prevent errors of the form: +** +** my_function(SomeType *b){ +** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) +** } +*/ +/* TODO(shess) Obvious candidates for a header file. */ +#define CLEAR(b) memset(b, '\0', sizeof(*(b))) + +#ifndef NDEBUG +# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) +#else +# define SCRAMBLE(b) +#endif + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int fts3PutVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int fts3GetVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int fts3GetVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = fts3GetVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*******************************************************************/ +/* DataBuffer is used to collect data into a buffer in piecemeal +** fashion. It implements the usual distinction between amount of +** data currently stored (nData) and buffer capacity (nCapacity). +** +** dataBufferInit - create a buffer with given initial capacity. +** dataBufferReset - forget buffer's data, retaining capacity. +** dataBufferDestroy - free buffer's data. +** dataBufferSwap - swap contents of two buffers. +** dataBufferExpand - expand capacity without adding data. +** dataBufferAppend - append data. +** dataBufferAppend2 - append two pieces of data at once. +** dataBufferReplace - replace buffer's data. +*/ +typedef struct DataBuffer { + char *pData; /* Pointer to malloc'ed buffer. */ + int nCapacity; /* Size of pData buffer. */ + int nData; /* End of data loaded into pData. */ +} DataBuffer; + +static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ + assert( nCapacity>=0 ); + pBuffer->nData = 0; + pBuffer->nCapacity = nCapacity; + pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); +} +static void dataBufferReset(DataBuffer *pBuffer){ + pBuffer->nData = 0; +} +static void dataBufferDestroy(DataBuffer *pBuffer){ + if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); + SCRAMBLE(pBuffer); +} +static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ + DataBuffer tmp = *pBuffer1; + *pBuffer1 = *pBuffer2; + *pBuffer2 = tmp; +} +static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ + assert( nAddCapacity>0 ); + /* TODO(shess) Consider expanding more aggressively. Note that the + ** underlying malloc implementation may take care of such things for + ** us already. + */ + if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ + pBuffer->nCapacity = pBuffer->nData+nAddCapacity; + pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); + } +} +static void dataBufferAppend(DataBuffer *pBuffer, + const char *pSource, int nSource){ + assert( nSource>0 && pSource!=NULL ); + dataBufferExpand(pBuffer, nSource); + memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); + pBuffer->nData += nSource; +} +static void dataBufferAppend2(DataBuffer *pBuffer, + const char *pSource1, int nSource1, + const char *pSource2, int nSource2){ + assert( nSource1>0 && pSource1!=NULL ); + assert( nSource2>0 && pSource2!=NULL ); + dataBufferExpand(pBuffer, nSource1+nSource2); + memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); + memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); + pBuffer->nData += nSource1+nSource2; +} +static void dataBufferReplace(DataBuffer *pBuffer, + const char *pSource, int nSource){ + dataBufferReset(pBuffer); + dataBufferAppend(pBuffer, pSource, nSource); +} + +/* StringBuffer is a null-terminated version of DataBuffer. */ +typedef struct StringBuffer { + DataBuffer b; /* Includes null terminator. */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + dataBufferInit(&sb->b, 100); + dataBufferReplace(&sb->b, "", 1); +} +static int stringBufferLength(StringBuffer *sb){ + return sb->b.nData-1; +} +static char *stringBufferData(StringBuffer *sb){ + return sb->b.pData; +} +static void stringBufferDestroy(StringBuffer *sb){ + dataBufferDestroy(&sb->b); +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + assert( sb->b.nData>0 ); + if( nFrom>0 ){ + sb->b.nData--; + dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); + } +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* Append a list of strings separated by commas. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +static int endsInWhiteSpace(StringBuffer *p){ + return stringBufferLength(p)>0 && + safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); +} + +/* If the StringBuffer ends in something other than white space, add a +** single space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( stringBufferLength(p)==0 ) return; + if( !endsInWhiteSpace(p) ) append(p, " "); +} + +/* Remove white space from the end of the StringBuffer */ +static void trimWhiteSpace(StringBuffer *p){ + while( endsInWhiteSpace(p) ){ + p->b.pData[--p->b.nData-1] = '\0'; + } +} + +/*******************************************************************/ +/* DLReader is used to read document elements from a doclist. The +** current docid is cached, so dlrDocid() is fast. DLReader does not +** own the doclist buffer. +** +** dlrAtEnd - true if there's no more data to read. +** dlrDocid - docid of current document. +** dlrDocData - doclist data for current document (including docid). +** dlrDocDataBytes - length of same. +** dlrAllDataBytes - length of all remaining data. +** dlrPosData - position data for current document. +** dlrPosDataLen - length of pos data for current document (incl POS_END). +** dlrStep - step to current document. +** dlrInit - initial for doclist of given type against given data. +** dlrDestroy - clean up. +** +** Expected usage is something like: +** +** DLReader reader; +** dlrInit(&reader, pData, nData); +** while( !dlrAtEnd(&reader) ){ +** // calls to dlrDocid() and kin. +** dlrStep(&reader); +** } +** dlrDestroy(&reader); +*/ +typedef struct DLReader { + DocListType iType; + const char *pData; + int nData; + + sqlite_int64 iDocid; + int nElement; +} DLReader; + +static int dlrAtEnd(DLReader *pReader){ + assert( pReader->nData>=0 ); + return pReader->nData==0; +} +static sqlite_int64 dlrDocid(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->iDocid; +} +static const char *dlrDocData(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->pData; +} +static int dlrDocDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nElement; +} +static int dlrAllDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nData; +} +/* TODO(shess) Consider adding a field to track iDocid varint length +** to make these two functions faster. This might matter (a tiny bit) +** for queries. +*/ +static const char *dlrPosData(DLReader *pReader){ + sqlite_int64 iDummy; + int n = fts3GetVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->pData+n; +} +static int dlrPosDataLen(DLReader *pReader){ + sqlite_int64 iDummy; + int n = fts3GetVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->nElement-n; +} +static void dlrStep(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + + /* Skip past current doclist element. */ + assert( pReader->nElement<=pReader->nData ); + pReader->pData += pReader->nElement; + pReader->nData -= pReader->nElement; + + /* If there is more data, read the next doclist element. */ + if( pReader->nData!=0 ){ + sqlite_int64 iDocidDelta; + int iDummy, n = fts3GetVarint(pReader->pData, &iDocidDelta); + pReader->iDocid += iDocidDelta; + if( pReader->iType>=DL_POSITIONS ){ + assert( nnData ); + while( 1 ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( n<=pReader->nData ); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( nnData ); + } + } + } + pReader->nElement = n; + assert( pReader->nElement<=pReader->nData ); + } +} +static void dlrInit(DLReader *pReader, DocListType iType, + const char *pData, int nData){ + assert( pData!=NULL && nData!=0 ); + pReader->iType = iType; + pReader->pData = pData; + pReader->nData = nData; + pReader->nElement = 0; + pReader->iDocid = 0; + + /* Load the first element's data. There must be a first element. */ + dlrStep(pReader); +} +static void dlrDestroy(DLReader *pReader){ + SCRAMBLE(pReader); +} + +#ifndef NDEBUG +/* Verify that the doclist can be validly decoded. Also returns the +** last docid found because it is convenient in other assertions for +** DLWriter. +*/ +static void docListValidate(DocListType iType, const char *pData, int nData, + sqlite_int64 *pLastDocid){ + sqlite_int64 iPrevDocid = 0; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + while( nData!=0 ){ + sqlite_int64 iDocidDelta; + int n = fts3GetVarint(pData, &iDocidDelta); + iPrevDocid += iDocidDelta; + if( iType>DL_DOCIDS ){ + int iDummy; + while( 1 ){ + n += fts3GetVarint32(pData+n, &iDummy); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += fts3GetVarint32(pData+n, &iDummy); + }else if( iType>DL_POSITIONS ){ + n += fts3GetVarint32(pData+n, &iDummy); + n += fts3GetVarint32(pData+n, &iDummy); + } + assert( n<=nData ); + } + } + assert( n<=nData ); + pData += n; + nData -= n; + } + if( pLastDocid ) *pLastDocid = iPrevDocid; +} +#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) +#else +#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) +#endif + +/*******************************************************************/ +/* DLWriter is used to write doclist data to a DataBuffer. DLWriter +** always appends to the buffer and does not own it. +** +** dlwInit - initialize to write a given type doclistto a buffer. +** dlwDestroy - clear the writer's memory. Does not free buffer. +** dlwAppend - append raw doclist data to buffer. +** dlwCopy - copy next doclist from reader to writer. +** dlwAdd - construct doclist element and append to buffer. +** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). +*/ +typedef struct DLWriter { + DocListType iType; + DataBuffer *b; + sqlite_int64 iPrevDocid; +#ifndef NDEBUG + int has_iPrevDocid; +#endif +} DLWriter; + +static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ + pWriter->b = b; + pWriter->iType = iType; + pWriter->iPrevDocid = 0; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 0; +#endif +} +static void dlwDestroy(DLWriter *pWriter){ + SCRAMBLE(pWriter); +} +/* iFirstDocid is the first docid in the doclist in pData. It is +** needed because pData may point within a larger doclist, in which +** case the first item would be delta-encoded. +** +** iLastDocid is the final docid in the doclist in pData. It is +** needed to create the new iPrevDocid for future delta-encoding. The +** code could decode the passed doclist to recreate iLastDocid, but +** the only current user (docListMerge) already has decoded this +** information. +*/ +/* TODO(shess) This has become just a helper for docListMerge. +** Consider a refactor to make this cleaner. +*/ +static void dlwAppend(DLWriter *pWriter, + const char *pData, int nData, + sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ + sqlite_int64 iDocid = 0; + char c[VARINT_MAX]; + int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ +#ifndef NDEBUG + sqlite_int64 iLastDocidDelta; +#endif + + /* Recode the initial docid as delta from iPrevDocid. */ + nFirstOld = fts3GetVarint(pData, &iDocid); + assert( nFirstOldiType==DL_DOCIDS) ); + nFirstNew = fts3PutVarint(c, iFirstDocid-pWriter->iPrevDocid); + + /* Verify that the incoming doclist is valid AND that it ends with + ** the expected docid. This is essential because we'll trust this + ** docid in future delta-encoding. + */ + ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); + assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); + + /* Append recoded initial docid and everything else. Rest of docids + ** should have been delta-encoded from previous initial docid. + */ + if( nFirstOldb, c, nFirstNew, + pData+nFirstOld, nData-nFirstOld); + }else{ + dataBufferAppend(pWriter->b, c, nFirstNew); + } + pWriter->iPrevDocid = iLastDocid; +} +static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ + dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), + dlrDocid(pReader), dlrDocid(pReader)); +} +static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, iDocid-pWriter->iPrevDocid); + + /* Docids must ascend. */ + assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); + assert( pWriter->iType==DL_DOCIDS ); + + dataBufferAppend(pWriter->b, c, n); + pWriter->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 1; +#endif +} + +/*******************************************************************/ +/* PLReader is used to read data from a document's position list. As +** the caller steps through the list, data is cached so that varints +** only need to be decoded once. +** +** plrInit, plrDestroy - create/destroy a reader. +** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors +** plrAtEnd - at end of stream, only call plrDestroy once true. +** plrStep - step to the next element. +*/ +typedef struct PLReader { + /* These refer to the next position's data. nData will reach 0 when + ** reading the last position, so plrStep() signals EOF by setting + ** pData to NULL. + */ + const char *pData; + int nData; + + DocListType iType; + int iColumn; /* the last column read */ + int iPosition; /* the last position read */ + int iStartOffset; /* the last start offset read */ + int iEndOffset; /* the last end offset read */ +} PLReader; + +static int plrAtEnd(PLReader *pReader){ + return pReader->pData==NULL; +} +static int plrColumn(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iColumn; +} +static int plrPosition(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iPosition; +} +static int plrStartOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iStartOffset; +} +static int plrEndOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iEndOffset; +} +static void plrStep(PLReader *pReader){ + int i, n; + + assert( !plrAtEnd(pReader) ); + + if( pReader->nData==0 ){ + pReader->pData = NULL; + return; + } + + n = fts3GetVarint32(pReader->pData, &i); + if( i==POS_COLUMN ){ + n += fts3GetVarint32(pReader->pData+n, &pReader->iColumn); + pReader->iPosition = 0; + pReader->iStartOffset = 0; + n += fts3GetVarint32(pReader->pData+n, &i); + } + /* Should never see adjacent column changes. */ + assert( i!=POS_COLUMN ); + + if( i==POS_END ){ + pReader->nData = 0; + pReader->pData = NULL; + return; + } + + pReader->iPosition += i-POS_BASE; + if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += fts3GetVarint32(pReader->pData+n, &i); + pReader->iStartOffset += i; + n += fts3GetVarint32(pReader->pData+n, &i); + pReader->iEndOffset = pReader->iStartOffset+i; + } + assert( n<=pReader->nData ); + pReader->pData += n; + pReader->nData -= n; +} + +static void plrInit(PLReader *pReader, DLReader *pDLReader){ + pReader->pData = dlrPosData(pDLReader); + pReader->nData = dlrPosDataLen(pDLReader); + pReader->iType = pDLReader->iType; + pReader->iColumn = 0; + pReader->iPosition = 0; + pReader->iStartOffset = 0; + pReader->iEndOffset = 0; + plrStep(pReader); +} +static void plrDestroy(PLReader *pReader){ + SCRAMBLE(pReader); +} + +/*******************************************************************/ +/* PLWriter is used in constructing a document's position list. As a +** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. +** PLWriter writes to the associated DLWriter's buffer. +** +** plwInit - init for writing a document's poslist. +** plwDestroy - clear a writer. +** plwAdd - append position and offset information. +** plwCopy - copy next position's data from reader to writer. +** plwTerminate - add any necessary doclist terminator. +** +** Calling plwAdd() after plwTerminate() may result in a corrupt +** doclist. +*/ +/* TODO(shess) Until we've written the second item, we can cache the +** first item's information. Then we'd have three states: +** +** - initialized with docid, no positions. +** - docid and one position. +** - docid and multiple positions. +** +** Only the last state needs to actually write to dlw->b, which would +** be an improvement in the DLCollector case. +*/ +typedef struct PLWriter { + DLWriter *dlw; + + int iColumn; /* the last column written */ + int iPos; /* the last position written */ + int iOffset; /* the last start offset written */ +} PLWriter; + +/* TODO(shess) In the case where the parent is reading these values +** from a PLReader, we could optimize to a copy if that PLReader has +** the same type as pWriter. +*/ +static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, + ** iStartOffsetDelta, and iEndOffsetDelta. + */ + char c[5*VARINT_MAX]; + int n = 0; + + /* Ban plwAdd() after plwTerminate(). */ + assert( pWriter->iPos!=-1 ); + + if( pWriter->dlw->iType==DL_DOCIDS ) return; + + if( iColumn!=pWriter->iColumn ){ + n += fts3PutVarint(c+n, POS_COLUMN); + n += fts3PutVarint(c+n, iColumn); + pWriter->iColumn = iColumn; + pWriter->iPos = 0; + pWriter->iOffset = 0; + } + assert( iPos>=pWriter->iPos ); + n += fts3PutVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); + pWriter->iPos = iPos; + if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=pWriter->iOffset ); + n += fts3PutVarint(c+n, iStartOffset-pWriter->iOffset); + pWriter->iOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + n += fts3PutVarint(c+n, iEndOffset-iStartOffset); + } + dataBufferAppend(pWriter->dlw->b, c, n); +} +static void plwCopy(PLWriter *pWriter, PLReader *pReader){ + plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), + plrStartOffset(pReader), plrEndOffset(pReader)); +} +static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n; + + pWriter->dlw = dlw; + + /* Docids must ascend. */ + assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); + n = fts3PutVarint(c, iDocid-pWriter->dlw->iPrevDocid); + dataBufferAppend(pWriter->dlw->b, c, n); + pWriter->dlw->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->dlw->has_iPrevDocid = 1; +#endif + + pWriter->iColumn = 0; + pWriter->iPos = 0; + pWriter->iOffset = 0; +} +/* TODO(shess) Should plwDestroy() also terminate the doclist? But +** then plwDestroy() would no longer be just a destructor, it would +** also be doing work, which isn't consistent with the overall idiom. +** Another option would be for plwAdd() to always append any necessary +** terminator, so that the output is always correct. But that would +** add incremental work to the common case with the only benefit being +** API elegance. Punt for now. +*/ +static void plwTerminate(PLWriter *pWriter){ + if( pWriter->dlw->iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, POS_END); + dataBufferAppend(pWriter->dlw->b, c, n); + } +#ifndef NDEBUG + /* Mark as terminated for assert in plwAdd(). */ + pWriter->iPos = -1; +#endif +} +static void plwDestroy(PLWriter *pWriter){ + SCRAMBLE(pWriter); +} + +/*******************************************************************/ +/* DLCollector wraps PLWriter and DLWriter to provide a +** dynamically-allocated doclist area to use during tokenization. +** +** dlcNew - malloc up and initialize a collector. +** dlcDelete - destroy a collector and all contained items. +** dlcAddPos - append position and offset information. +** dlcAddDoclist - add the collected doclist to the given buffer. +** dlcNext - terminate the current document and open another. +*/ +typedef struct DLCollector { + DataBuffer b; + DLWriter dlw; + PLWriter plw; +} DLCollector; + +/* TODO(shess) This could also be done by calling plwTerminate() and +** dataBufferAppend(). I tried that, expecting nominal performance +** differences, but it seemed to pretty reliably be worth 1% to code +** it this way. I suspect it is the incremental malloc overhead (some +** percentage of the plwTerminate() calls will cause a realloc), so +** this might be worth revisiting if the DataBuffer implementation +** changes. +*/ +static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ + if( pCollector->dlw.iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, POS_END); + dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); + }else{ + dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); + } +} +static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ + plwTerminate(&pCollector->plw); + plwDestroy(&pCollector->plw); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); +} +static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); +} + +static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ + DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); + dataBufferInit(&pCollector->b, 0); + dlwInit(&pCollector->dlw, iType, &pCollector->b); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); + return pCollector; +} +static void dlcDelete(DLCollector *pCollector){ + plwDestroy(&pCollector->plw); + dlwDestroy(&pCollector->dlw); + dataBufferDestroy(&pCollector->b); + SCRAMBLE(pCollector); + sqlite3_free(pCollector); +} + + +/* Copy the doclist data of iType in pData/nData into *out, trimming +** unnecessary data as we go. Only columns matching iColumn are +** copied, all columns copied if iColumn is -1. Elements with no +** matching columns are dropped. The output is an iOutType doclist. +*/ +/* NOTE(shess) This code is only valid after all doclists are merged. +** If this is run before merges, then doclist items which represent +** deletion will be trimmed, and will thus not effect a deletion +** during the merge. +*/ +static void docListTrim(DocListType iType, const char *pData, int nData, + int iColumn, DocListType iOutType, DataBuffer *out){ + DLReader dlReader; + DLWriter dlWriter; + + assert( iOutType<=iType ); + + dlrInit(&dlReader, iType, pData, nData); + dlwInit(&dlWriter, iOutType, out); + + while( !dlrAtEnd(&dlReader) ){ + PLReader plReader; + PLWriter plWriter; + int match = 0; + + plrInit(&plReader, &dlReader); + + while( !plrAtEnd(&plReader) ){ + if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ + if( !match ){ + plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); + match = 1; + } + plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + } + plrStep(&plReader); + } + if( match ){ + plwTerminate(&plWriter); + plwDestroy(&plWriter); + } + + plrDestroy(&plReader); + dlrStep(&dlReader); + } + dlwDestroy(&dlWriter); + dlrDestroy(&dlReader); +} + +/* Used by docListMerge() to keep doclists in the ascending order by +** docid, then ascending order by age (so the newest comes first). +*/ +typedef struct OrderedDLReader { + DLReader *pReader; + + /* TODO(shess) If we assume that docListMerge pReaders is ordered by + ** age (which we do), then we could use pReader comparisons to break + ** ties. + */ + int idx; +} OrderedDLReader; + +/* Order eof to end, then by docid asc, idx desc. */ +static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ + if( dlrAtEnd(r1->pReader) ){ + if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ + return 1; /* Only r1 atEnd(). */ + } + if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ + + if( dlrDocid(r1->pReader)pReader) ) return -1; + if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; + + /* Descending on idx. */ + return r2->idx-r1->idx; +} + +/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that +** p[1..n-1] is already sorted. +*/ +/* TODO(shess) Is this frequent enough to warrant a binary search? +** Before implementing that, instrument the code to check. In most +** current usage, I expect that p[0] will be less than p[1] a very +** high proportion of the time. +*/ +static void orderedDLReaderReorder(OrderedDLReader *p, int n){ + while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ + OrderedDLReader tmp = p[0]; + p[0] = p[1]; + p[1] = tmp; + n--; + p++; + } +} + +/* Given an array of doclist readers, merge their doclist elements +** into out in sorted order (by docid), dropping elements from older +** readers when there is a duplicate docid. pReaders is assumed to be +** ordered by age, oldest first. +*/ +/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably +** be fixed. +*/ +static void docListMerge(DataBuffer *out, + DLReader *pReaders, int nReaders){ + OrderedDLReader readers[MERGE_COUNT]; + DLWriter writer; + int i, n; + const char *pStart = 0; + int nStart = 0; + sqlite_int64 iFirstDocid = 0, iLastDocid = 0; + + assert( nReaders>0 ); + if( nReaders==1 ){ + dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); + return; + } + + assert( nReaders<=MERGE_COUNT ); + n = 0; + for(i=0; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + + dlwInit(&writer, pReaders[0].iType, out); + while( !dlrAtEnd(readers[0].pReader) ){ + sqlite_int64 iDocid = dlrDocid(readers[0].pReader); + + /* If this is a continuation of the current buffer to copy, extend + ** that buffer. memcpy() seems to be more efficient if it has a + ** lots of data to copy. + */ + if( dlrDocData(readers[0].pReader)==pStart+nStart ){ + nStart += dlrDocDataBytes(readers[0].pReader); + }else{ + if( pStart!=0 ){ + dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + } + pStart = dlrDocData(readers[0].pReader); + nStart = dlrDocDataBytes(readers[0].pReader); + iFirstDocid = iDocid; + } + iLastDocid = iDocid; + dlrStep(readers[0].pReader); + + /* Drop all of the older elements with the same docid. */ + for(i=1; i0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + } + + /* Copy over any remaining elements. */ + if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + dlwDestroy(&writer); +} + +/* Helper function for posListUnion(). Compares the current position +** between left and right, returning as standard C idiom of <0 if +** left0 if left>right, and 0 if left==right. "End" always +** compares greater. +*/ +static int posListCmp(PLReader *pLeft, PLReader *pRight){ + assert( pLeft->iType==pRight->iType ); + if( pLeft->iType==DL_DOCIDS ) return 0; + + if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; + if( plrAtEnd(pRight) ) return -1; + + if( plrColumn(pLeft)plrColumn(pRight) ) return 1; + + if( plrPosition(pLeft)plrPosition(pRight) ) return 1; + if( pLeft->iType==DL_POSITIONS ) return 0; + + if( plrStartOffset(pLeft)plrStartOffset(pRight) ) return 1; + + if( plrEndOffset(pLeft)plrEndOffset(pRight) ) return 1; + + return 0; +} + +/* Write the union of position lists in pLeft and pRight to pOut. +** "Union" in this case meaning "All unique position tuples". Should +** work with any doclist type, though both inputs and the output +** should be the same type. +*/ +static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pLeft->iType==pRight->iType ); + assert( pLeft->iType==pOut->iType ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + plwInit(&writer, pOut, dlrDocid(pLeft)); + + while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ + int c = posListCmp(&left, &right); + if( c<0 ){ + plwCopy(&writer, &left); + plrStep(&left); + }else if( c>0 ){ + plwCopy(&writer, &right); + plrStep(&right); + }else{ + plwCopy(&writer, &left); + plrStep(&left); + plrStep(&right); + } + } + + plwTerminate(&writer); + plwDestroy(&writer); + plrDestroy(&left); + plrDestroy(&right); +} + +/* Write the union of doclists in pLeft and pRight to pOut. For +** docids in common between the inputs, the union of the position +** lists is written. Inputs and outputs are always type DL_DEFAULT. +*/ +static void docListUnion( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DEFAULT, pLeft, nLeft); + dlrInit(&right, DL_DEFAULT, pRight, nRight); + dlwInit(&writer, DL_DEFAULT, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else if( dlrDocid(&left)dlrDocid(&right) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else{ + posListUnion(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* +** This function is used as part of the implementation of phrase and +** NEAR matching. +** +** pLeft and pRight are DLReaders positioned to the same docid in +** lists of type DL_POSITION. This function writes an entry to the +** DLWriter pOut for each position in pRight that is less than +** (nNear+1) greater (but not equal to or smaller) than a position +** in pLeft. For example, if nNear is 0, and the positions contained +** by pLeft and pRight are: +** +** pLeft: 5 10 15 20 +** pRight: 6 9 17 21 +** +** then the docid is added to pOut. If pOut is of type DL_POSITIONS, +** then a positionids "6" and "21" are also added to pOut. +** +** If boolean argument isSaveLeft is true, then positionids are copied +** from pLeft instead of pRight. In the example above, the positions "5" +** and "20" would be added instead of "6" and "21". +*/ +static void posListPhraseMerge( + DLReader *pLeft, + DLReader *pRight, + int nNear, + int isSaveLeft, + DLWriter *pOut +){ + PLReader left, right; + PLWriter writer; + int match = 0; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pOut->iType!=DL_POSITIONS_OFFSETS ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + + while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ + if( plrColumn(&left)plrColumn(&right) ){ + plrStep(&right); + }else if( plrPosition(&left)>=plrPosition(&right) ){ + plrStep(&right); + }else{ + if( (plrPosition(&right)-plrPosition(&left))<=(nNear+1) ){ + if( !match ){ + plwInit(&writer, pOut, dlrDocid(pLeft)); + match = 1; + } + if( !isSaveLeft ){ + plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); + }else{ + plwAdd(&writer, plrColumn(&left), plrPosition(&left), 0, 0); + } + plrStep(&right); + }else{ + plrStep(&left); + } + } + } + + if( match ){ + plwTerminate(&writer); + plwDestroy(&writer); + } + + plrDestroy(&left); + plrDestroy(&right); +} + +/* +** Compare the values pointed to by the PLReaders passed as arguments. +** Return -1 if the value pointed to by pLeft is considered less than +** the value pointed to by pRight, +1 if it is considered greater +** than it, or 0 if it is equal. i.e. +** +** (*pLeft - *pRight) +** +** A PLReader that is in the EOF condition is considered greater than +** any other. If neither argument is in EOF state, the return value of +** plrColumn() is used. If the plrColumn() values are equal, the +** comparison is on the basis of plrPosition(). +*/ +static int plrCompare(PLReader *pLeft, PLReader *pRight){ + assert(!plrAtEnd(pLeft) || !plrAtEnd(pRight)); + + if( plrAtEnd(pRight) || plrAtEnd(pLeft) ){ + return (plrAtEnd(pRight) ? -1 : 1); + } + if( plrColumn(pLeft)!=plrColumn(pRight) ){ + return ((plrColumn(pLeft)0) +** and write the results into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** A NEAR intersection means that two documents only match if +** (abs(pLeft.iPos-pRight.iPos) 0", + /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + + /* NOTE(shess): The first three results of the following two + ** statements must match. + */ + /* SEGDIR_SELECT_SEGMENT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? and idx = ?", + /* SEGDIR_SELECT_ALL */ + "select start_block, leaves_end_block, root from %_segdir " + " order by level desc, idx asc", + /* SEGDIR_DELETE_ALL */ "delete from %_segdir", + /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; + + /* Precompiled statements used for segment merges. We run a + ** separate select across the leaf level of each tree being merged. + */ + sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; + /* The statement used to prepare pLeafSelectStmts. */ +#define LEAF_SELECT \ + "select block from %_segments where blockid between ? and ? order by blockid" + + /* These buffer pending index updates during transactions. + ** nPendingData estimates the memory size of the pending data. It + ** doesn't include the hash-bucket overhead, nor any malloc + ** overhead. When nPendingData exceeds kPendingThreshold, the + ** buffer is flushed even before the transaction closes. + ** pendingTerms stores the data, and is only valid when nPendingData + ** is >=0 (nPendingData<0 means pendingTerms has not been + ** initialized). iPrevDocid is the last docid written, used to make + ** certain we're inserting in sorted order. + */ + int nPendingData; +#define kPendingThreshold (1*1024*1024) + sqlite_int64 iPrevDocid; + fts3Hash pendingTerms; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Fts3Expr *pExpr; /* Parsed MATCH query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DataBuffer result; /* Doclist results from fulltextQuery */ + DLReader reader; /* Result reader if result not empty */ +} fulltext_cursor; + +static fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fts3Module; /* forward declaration */ + +/* Return a dynamically generated statement of the form + * insert into %_content (docid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; inColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * select from %_content where docid = ? + */ +static const char *contentSelectStatement(fulltext_vtab *v){ + StringBuffer sb; + initStringBuffer(&sb); + append(&sb, "SELECT "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content WHERE docid = ?"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where docid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; inColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where docid = ?"); + return stringBufferData(&sb); +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmtpFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_SELECT_STMT: + zStmt = contentSelectStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and +** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, +** where we expect no results. +*/ +static int sql_single_step(sqlite3_stmt *s){ + int rc = sqlite3_step(s); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* Like sql_get_statement(), but for special replicated LEAF_SELECT +** statements. idx -1 is a special case for an uncached version of +** the statement (used in the optimize implementation). +*/ +/* TODO(shess) Write version for generic statements and then share +** that between the cached-statement functions. +*/ +static int sql_get_leaf_statement(fulltext_vtab *v, int idx, + sqlite3_stmt **ppStmt){ + assert( idx>=-1 && idxdb, v->zDb, v->zName, ppStmt, LEAF_SELECT); + }else if( v->pLeafSelectStmts[idx]==NULL ){ + int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], + LEAF_SELECT); + if( rc!=SQLITE_OK ) return rc; + }else{ + int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pLeafSelectStmts[idx]; + return SQLITE_OK; +} + +/* insert into %_content (docid, ...) values ([docid], [pValues]) +** If the docid contains SQL NULL, then a unique docid will be +** generated. +*/ +static int content_insert(fulltext_vtab *v, sqlite3_value *docid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, docid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step(s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where docid = [iDocid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iDocid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; inColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); + } + sqlite3_free((void *) pString); +} + +/* select * from %_content where docid = [iDocid] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iDocid, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); + for(i=0; inColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where docid = [iDocid ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iDocid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if +** no rows exist, and any error in case of failure. +*/ +static int content_exists(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ROW; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* insert into %_segments values ([pData]) +** returns assigned blockid in *piBlockid +*/ +static int block_insert(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 *piBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + /* blockid column is an alias for rowid. */ + *piBlockid = sqlite3_last_insert_rowid(v->db); + return SQLITE_OK; +} + +/* delete from %_segments +** where blockid between [iStartBlockid] and [iEndBlockid] +** +** Deletes the range of blocks, inclusive, used to delete the blocks +** which form a segment. +*/ +static int block_delete(fulltext_vtab *v, + sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found +** at iLevel. Returns SQLITE_DONE if there are no segments at +** iLevel. Otherwise returns an error. +*/ +static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Should always get at least one row due to how max() works. */ + if( rc==SQLITE_DONE ) return SQLITE_DONE; + if( rc!=SQLITE_ROW ) return rc; + + /* NULL means that there were no inputs to max(). */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; + } + + *pidx = sqlite3_column_int(s, 0); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* insert into %_segdir values ( +** [iLevel], [idx], +** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], +** [pRootData] +** ) +*/ +static int segdir_set(fulltext_vtab *v, int iLevel, int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iLeavesEndBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, idx); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 3, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 5, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Queries %_segdir for the block span of the segments in level +** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, +** SQLITE_ROW if there are blocks, else an error. +*/ +static int segdir_span(fulltext_vtab *v, int iLevel, + sqlite_int64 *piStartBlockid, + sqlite_int64 *piEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ + if( rc!=SQLITE_ROW ) return rc; + + /* This happens if all segments at this level are entirely inline. */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + int rc2 = sqlite3_step(s); + if( rc2==SQLITE_ROW ) return SQLITE_ERROR; + return rc2; + } + + *piStartBlockid = sqlite3_column_int64(s, 0); + *piEndBlockid = sqlite3_column_int64(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* Delete the segment blocks and segment directory records for all +** segments at iLevel. +*/ +static int segdir_delete(fulltext_vtab *v, int iLevel){ + sqlite3_stmt *s; + sqlite_int64 iStartBlockid, iEndBlockid; + int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + if( rc==SQLITE_ROW ){ + rc = block_delete(v, iStartBlockid, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + } + + /* Delete the segment directory itself. */ + rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Delete entire fts index, SQLITE_OK on success, relevant error on +** failure. +*/ +static int segdir_delete_all(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_single_step(s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_OK with *pnSegments set to the number of entries in +** %_segdir and *piMaxLevel set to the highest level which has a +** segment. Otherwise returns the SQLite error which caused failure. +*/ +static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* TODO(shess): This case should not be possible? Should stronger + ** measures be taken if it happens? + */ + if( rc==SQLITE_DONE ){ + *pnSegments = 0; + *piMaxLevel = 0; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + *pnSegments = sqlite3_column_int(s, 0); + *piMaxLevel = sqlite3_column_int(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* TODO(shess) clearPendingTerms() is far down the file because +** writeZeroSegment() is far down the file because LeafWriter is far +** down the file. Consider refactoring the code to move the non-vtab +** code above the vtab code so that we don't need this forward +** reference. +*/ +static int clearPendingTerms(fulltext_vtab *v); + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + FTSTRACE(("FTS3 Destroy %p\n", v)); + for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + for( i=0; ipLeafSelectStmts[i]!=NULL ){ + sqlite3_finalize(v->pLeafSelectStmts[i]); + v->pLeafSelectStmts[i] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + clearPendingTerms(v); + + sqlite3_free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + sqlite3_free(v->azContentColumn); + sqlite3_free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** ftsIdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** isFtsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isFtsIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define ftsIdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isFtsIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int ftsGetToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !ftsIdChar(*z) ){ + break; + } + for(i=1; ftsIdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct FtsToken { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} FtsToken; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + FtsToken *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = ftsGetToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = ftsGetToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + sqlite3_free(p->azColumn); + sqlite3_free(p->azContentColumn); + sqlite3_free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts3(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + CLEAR(pSpec); + for(i=n=0; izDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; inColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; inColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; ibase */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + + zTok = spec->azTokenizer[0]; + if( !zTok ){ + zTok = "simple"; + } + nTok = strlen(zTok)+1; + + m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zTok, nTok); + if( !m ){ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + /* Indicate that the buffer is not live. */ + v->nPendingData = -1; + + *ppVTab = &v->base; + FTSTRACE(("FTS3 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + +/* The %_content table holds the text of each document, with +** the docid column exposed as the SQLite rowid for the table. +*/ +/* TODO(shess) This comment needs elaboration to match the updated +** code. Work it into the top-of-file comment at that time. +*/ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + FTSTRACE(("FTS3 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + append(&schema, " docid INTEGER PRIMARY KEY,"); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); + stringBufferDestroy(&schema); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segments(" + " blockid INTEGER PRIMARY KEY," + " block blob" + ");" + ); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segdir(" + " level integer," + " idx integer," + " start_block integer," + " leaves_end_block integer," + " end_block integer," + " root blob," + " primary key(level, idx)" + ");"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int i; + FTSTRACE(("FTS3 BestIndex\n")); + + for(i=0; inConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( (pConstraint->iColumn==-1 || pConstraint->iColumn==v->nColumn+1) && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_DOCID; /* lookup by docid */ + FTSTRACE(("FTS3 QUERY_DOCID\n")); + } else if( pConstraint->iColumn>=0 && pConstraint->iColumn<=v->nColumn && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + FTSTRACE(("FTS3 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps docid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + FTSTRACE(("FTS3 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + FTSTRACE(("FTS3 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_segments;" + "drop table if exists %_segdir;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); + if( c ){ + memset(c, 0, sizeof(fulltext_cursor)); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + FTSTRACE(("FTS3 Open %p: %p\n", pVTab, c)); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + sqlite3_free(p->aMatch); + sqlite3_free(p->zOffset); + sqlite3_free(p->zSnippet); + CLEAR(p); +} + +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iToken, /* Matching token in document */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iToken = iToken; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS3_ROTOR_SZ (32) +#define FTS3_ROTOR_MASK (FTS3_ROTOR_SZ-1) + +/* +** Function to iterate through the tokens of a compiled expression. +** +** Except, skip all tokens on the right-hand side of a NOT operator. +** This function is used to find tokens as part of snippet and offset +** generation and we do nt want snippets and offsets to report matches +** for tokens on the RHS of a NOT. +*/ +static int fts3NextExprToken(Fts3Expr **ppExpr, int *piToken){ + Fts3Expr *p = *ppExpr; + int iToken = *piToken; + if( iToken<0 ){ + /* In this case the expression p is the root of an expression tree. + ** Move to the first token in the expression tree. + */ + while( p->pLeft ){ + p = p->pLeft; + } + iToken = 0; + }else{ + assert(p && p->eType==FTSQUERY_PHRASE ); + if( iToken<(p->pPhrase->nToken-1) ){ + iToken++; + }else{ + iToken = 0; + while( p->pParent && p->pParent->pLeft!=p ){ + assert( p->pParent->pRight==p ); + p = p->pParent; + } + p = p->pParent; + if( p ){ + assert( p->pRight!=0 ); + p = p->pRight; + while( p->pLeft ){ + p = p->pLeft; + } + } + } + } + + *ppExpr = p; + *piToken = iToken; + return p?1:0; +} + +/* +** Return TRUE if the expression node pExpr is located beneath the +** RHS of a NOT operator. +*/ +static int fts3ExprBeneathNot(Fts3Expr *p){ + Fts3Expr *pParent; + while( p ){ + pParent = p->pParent; + if( pParent && pParent->eType==FTSQUERY_NOT && pParent->pRight==p ){ + return 1; + } + p = pParent; + } + return 0; +} + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + fulltext_cursor *pCur, /* The fulltest search cursor */ + Snippet *pSnippet, /* The Snippet object to be filled in */ + int iColumn, /* Index of fulltext table column */ + const char *zDoc, /* Text of the fulltext table column */ + int nDoc /* Length of zDoc in bytes */ +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS3_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS3_ROTOR_SZ]; /* Length of token */ + + pVtab = cursor_vtab(pCur); + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + + prevMatch = 0; + while( !pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos) ){ + Fts3Expr *pIter = pCur->pExpr; + int iIter = -1; + iRotorBegin[iRotor&FTS3_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS3_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i<(FTS3_ROTOR_SZ-1) && fts3NextExprToken(&pIter, &iIter); i++){ + int nPhrase; /* Number of tokens in current phrase */ + struct PhraseToken *pToken; /* Current token */ + int iCol; /* Column index */ + + if( fts3ExprBeneathNot(pIter) ) continue; + nPhrase = pIter->pPhrase->nToken; + pToken = &pIter->pPhrase->aToken[iIter]; + iCol = pIter->pPhrase->iColumn; + if( iCol>=0 && iColn>nToken ) continue; + if( !pToken->isPrefix && pToken->nn<=nToken ); + if( memcmp(pToken->z, zToken, pToken->n) ) continue; + if( iIter>0 && (prevMatch & (1<=0; j--){ + int k = (iRotor-j) & FTS3_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, iPos-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + +/* +** Remove entries from the pSnippet structure to account for the NEAR +** operator. When this is called, pSnippet contains the list of token +** offsets produced by treating all NEAR operators as AND operators. +** This function removes any entries that should not be present after +** accounting for the NEAR restriction. For example, if the queried +** document is: +** +** "A B C D E A" +** +** and the query is: +** +** A NEAR/0 E +** +** then when this function is called the Snippet contains token offsets +** 0, 4 and 5. This function removes the "0" entry (because the first A +** is not near enough to an E). +** +** When this function is called, the value pointed to by parameter piLeft is +** the integer id of the left-most token in the expression tree headed by +** pExpr. This function increments *piLeft by the total number of tokens +** in the expression tree headed by pExpr. +** +** Return 1 if any trimming occurs. Return 0 if no trimming is required. +*/ +static int trimSnippetOffsets( + Fts3Expr *pExpr, /* The search expression */ + Snippet *pSnippet, /* The set of snippet offsets to be trimmed */ + int *piLeft /* Index of left-most token in pExpr */ +){ + if( pExpr ){ + if( trimSnippetOffsets(pExpr->pLeft, pSnippet, piLeft) ){ + return 1; + } + + switch( pExpr->eType ){ + case FTSQUERY_PHRASE: + *piLeft += pExpr->pPhrase->nToken; + break; + case FTSQUERY_NEAR: { + /* The right-hand-side of a NEAR operator is always a phrase. The + ** left-hand-side is either a phrase or an expression tree that is + ** itself headed by a NEAR operator. The following initializations + ** set local variable iLeft to the token number of the left-most + ** token in the right-hand phrase, and iRight to the right most + ** token in the same phrase. For example, if we had: + ** + ** MATCH '"abc def" NEAR/2 "ghi jkl"' + ** + ** then iLeft will be set to 2 (token number of ghi) and nToken will + ** be set to 4. + */ + Fts3Expr *pLeft = pExpr->pLeft; + Fts3Expr *pRight = pExpr->pRight; + int iLeft = *piLeft; + int nNear = pExpr->nNear; + int nToken = pRight->pPhrase->nToken; + int jj, ii; + if( pLeft->eType==FTSQUERY_NEAR ){ + pLeft = pLeft->pRight; + } + assert( pRight->eType==FTSQUERY_PHRASE ); + assert( pLeft->eType==FTSQUERY_PHRASE ); + nToken += pLeft->pPhrase->nToken; + + for(ii=0; iinMatch; ii++){ + struct snippetMatch *p = &pSnippet->aMatch[ii]; + if( p->iTerm==iLeft ){ + int isOk = 0; + /* Snippet ii is an occurence of query term iLeft in the document. + ** It occurs at position (p->iToken) of the document. We now + ** search for an instance of token (iLeft-1) somewhere in the + ** range (p->iToken - nNear)...(p->iToken + nNear + nToken) within + ** the set of snippetMatch structures. If one is found, proceed. + ** If one cannot be found, then remove snippets ii..(ii+N-1) + ** from the matching snippets, where N is the number of tokens + ** in phrase pRight->pPhrase. + */ + for(jj=0; isOk==0 && jjnMatch; jj++){ + struct snippetMatch *p2 = &pSnippet->aMatch[jj]; + if( p2->iTerm==(iLeft-1) ){ + if( p2->iToken>=(p->iToken-nNear-1) + && p2->iToken<(p->iToken+nNear+nToken) + ){ + isOk = 1; + } + } + } + if( !isOk ){ + int kk; + for(kk=0; kkpPhrase->nToken; kk++){ + pSnippet->aMatch[kk+ii].iTerm = -2; + } + return 1; + } + } + if( p->iTerm==(iLeft-1) ){ + int isOk = 0; + for(jj=0; isOk==0 && jjnMatch; jj++){ + struct snippetMatch *p2 = &pSnippet->aMatch[jj]; + if( p2->iTerm==iLeft ){ + if( p2->iToken<=(p->iToken+nNear+1) + && p2->iToken>(p->iToken-nNear-nToken) + ){ + isOk = 1; + } + } + } + if( !isOk ){ + int kk; + for(kk=0; kkpPhrase->nToken; kk++){ + pSnippet->aMatch[ii-kk].iTerm = -2; + } + return 1; + } + } + } + break; + } + } + + if( trimSnippetOffsets(pExpr->pRight, pSnippet, piLeft) ){ + return 1; + } + } + return 0; +} + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + int iTerm = 0; + fulltext_vtab *pFts = cursor_vtab(p); + + if( p->snippet.nMatch || p->pExpr==0 ){ + return; + } + nColumn = pFts->nColumn; + iColumn = (p->iCursorType - QUERY_FULLTEXT); + if( iColumn<0 || iColumn>=nColumn ){ + /* Look for matches over all columns of the full-text index */ + iFirst = 0; + iLast = nColumn-1; + }else{ + /* Look for matches in the iColumn-th column of the index only */ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(p, &p->snippet, i, zDoc, nDoc); + } + + while( trimSnippetOffsets(p->pExpr, &p->snippet, &iTerm) ){ + iTerm = 0; + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. This string is used as +** the return of the SQL offsets() function. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; inMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + if( pMatch->iTerm>=0 ){ + /* If snippetMatch.iTerm is less than 0, then the match was + ** discarded as part of processing the NEAR operator (see the + ** trimSnippetOffsetsForNear() function for details). Ignore + ** it in this case + */ + zBuf[0] = ' '; + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + } + p->zOffset = stringBufferData(&sb); + p->nOffset = stringBufferLength(&sb); +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + sqlite3_free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; i0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatchsnippet.zSnippet = stringBufferData(&sb); + pCursor->snippet.nSnippet = stringBufferLength(&sb); +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + FTSTRACE(("FTS3 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + sqlite3Fts3ExprFree(c->pExpr); + snippetClear(&c->snippet); + if( c->result.nData!=0 ){ + dlrDestroy(&c->reader); + } + dataBufferDestroy(&c->result); + sqlite3_free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + int rc; + + FTSTRACE(("FTS3 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); + dlrStep(&c->reader); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* TODO(shess) If we pushed LeafReader to the top of the file, or to +** another file, term_select() could be pushed above +** docListOfTerm(). +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out); + +/* +** Return a DocList corresponding to the phrase *pPhrase. +** +** The resulting DL_DOCIDS doclist is stored in pResult, which is +** overwritten. +*/ +static int docListOfPhrase( + fulltext_vtab *pTab, /* The full text index */ + Fts3Phrase *pPhrase, /* Phrase to return a doclist corresponding to */ + DocListType eListType, /* Either DL_DOCIDS or DL_POSITIONS */ + DataBuffer *pResult /* Write the result here */ +){ + int ii; + int rc = SQLITE_OK; + int iCol = pPhrase->iColumn; + DocListType eType = eListType; + assert( eType==DL_POSITIONS || eType==DL_DOCIDS ); + if( pPhrase->nToken>1 ){ + eType = DL_POSITIONS; + } + + /* This code should never be called with buffered updates. */ + assert( pTab->nPendingData<0 ); + + for(ii=0; rc==SQLITE_OK && iinToken; ii++){ + DataBuffer tmp; + struct PhraseToken *p = &pPhrase->aToken[ii]; + rc = termSelect(pTab, iCol, p->z, p->n, p->isPrefix, eType, &tmp); + if( rc==SQLITE_OK ){ + if( ii==0 ){ + *pResult = tmp; + }else{ + DataBuffer res = *pResult; + dataBufferInit(pResult, 0); + if( ii==(pPhrase->nToken-1) ){ + eType = eListType; + } + docListPhraseMerge( + res.pData, res.nData, tmp.pData, tmp.nData, 0, 0, eType, pResult + ); + dataBufferDestroy(&res); + dataBufferDestroy(&tmp); + } + } + } + + return rc; +} + +/* +** Evaluate the full-text expression pExpr against fts3 table pTab. Write +** the results into pRes. +*/ +static int evalFts3Expr( + fulltext_vtab *pTab, /* Fts3 Virtual table object */ + Fts3Expr *pExpr, /* Parsed fts3 expression */ + DataBuffer *pRes /* OUT: Write results of the expression here */ +){ + int rc = SQLITE_OK; + + /* Initialize the output buffer. If this is an empty query (pExpr==0), + ** this is all that needs to be done. Empty queries produce empty + ** result sets. + */ + dataBufferInit(pRes, 0); + + if( pExpr ){ + if( pExpr->eType==FTSQUERY_PHRASE ){ + DocListType eType = DL_DOCIDS; + if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ + eType = DL_POSITIONS; + } + rc = docListOfPhrase(pTab, pExpr->pPhrase, eType, pRes); + }else{ + DataBuffer lhs; + DataBuffer rhs; + + dataBufferInit(&rhs, 0); + if( SQLITE_OK==(rc = evalFts3Expr(pTab, pExpr->pLeft, &lhs)) + && SQLITE_OK==(rc = evalFts3Expr(pTab, pExpr->pRight, &rhs)) + ){ + switch( pExpr->eType ){ + case FTSQUERY_NEAR: { + int nToken; + Fts3Expr *pLeft; + DocListType eType = DL_DOCIDS; + if( pExpr->pParent && pExpr->pParent->eType==FTSQUERY_NEAR ){ + eType = DL_POSITIONS; + } + pLeft = pExpr->pLeft; + while( pLeft->eType==FTSQUERY_NEAR ){ + pLeft=pLeft->pRight; + } + assert( pExpr->pRight->eType==FTSQUERY_PHRASE ); + assert( pLeft->eType==FTSQUERY_PHRASE ); + nToken = pLeft->pPhrase->nToken + pExpr->pRight->pPhrase->nToken; + docListPhraseMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, + pExpr->nNear+1, nToken, eType, pRes + ); + break; + } + case FTSQUERY_NOT: { + docListExceptMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData,pRes); + break; + } + case FTSQUERY_AND: { + docListAndMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, pRes); + break; + } + case FTSQUERY_OR: { + docListOrMerge(lhs.pData, lhs.nData, rhs.pData, rhs.nData, pRes); + break; + } + } + } + dataBufferDestroy(&lhs); + dataBufferDestroy(&rhs); + } + } + + return rc; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int flushPendingTerms(fulltext_vtab *v); + +/* Perform a full-text query using the search expression in +** zInput[0..nInput-1]. Return a list of matching documents +** in pResult. +** +** Queries must match column iColumn. Or if iColumn>=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DataBuffer *pResult, /* Write the result doclist here */ + Fts3Expr **ppExpr /* Put parsed query string here */ +){ + int rc; + + /* TODO(shess) Instead of flushing pendingTerms, we could query for + ** the relevant term and merge the doclist into what we receive from + ** the database. Wait and see if this is a common issue, first. + ** + ** A good reason not to flush is to not generate update-related + ** error codes from here. + */ + + /* Flush any buffered updates before executing the query. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Parse the query passed to the MATCH operator. */ + rc = sqlite3Fts3ExprParse(v->pTokenizer, + v->azColumn, v->nColumn, iColumn, zInput, nInput, ppExpr + ); + if( rc!=SQLITE_OK ){ + assert( 0==(*ppExpr) ); + return rc; + } + + return evalFts3Expr(v, *ppExpr, pResult); +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_DOCID then do a docid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts3 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + + FTSTRACE(("FTS3 Filter %p\n",pCursor)); + + /* If the cursor has a statement that was not prepared according to + ** idxNum, clear it. I believe all calls to fulltextFilter with a + ** given cursor will have the same idxNum , but in this case it's + ** easy to be safe. + */ + if( c->pStmt && c->iCursorType!=idxNum ){ + sqlite3_finalize(c->pStmt); + c->pStmt = NULL; + } + + /* Get a fresh statement appropriate to idxNum. */ + /* TODO(shess): Add a prepared-statement cache in the vt structure. + ** The cache must handle multiple open cursors. Easier to cache the + ** statement variants at the vt to reduce malloc/realloc/free here. + ** Or we could have a StringBuffer variant which allowed stack + ** construction for small values. + */ + if( !c->pStmt ){ + StringBuffer sb; + initStringBuffer(&sb); + append(&sb, "SELECT docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content"); + if( idxNum!=QUERY_GENERIC ) append(&sb, " WHERE docid = ?"); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, + stringBufferData(&sb)); + stringBufferDestroy(&sb); + if( rc!=SQLITE_OK ) return rc; + c->iCursorType = idxNum; + }else{ + sqlite3_reset(c->pStmt); + assert( c->iCursorType==idxNum ); + } + + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_DOCID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + int iCol = idxNum-QUERY_FULLTEXT; + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + if( c->result.nData!=0 ){ + /* This case happens if the same cursor is used repeatedly. */ + dlrDestroy(&c->reader); + dataBufferReset(&c->result); + }else{ + dataBufferInit(&c->result, 0); + } + rc = fulltextQuery(v, iCol, zQuery, -1, &c->result, &c->pExpr); + if( rc!=SQLITE_OK ) return rc; + if( c->result.nData!=0 ){ + dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); + } + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxColnColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + }else if( idxCol==v->nColumn+1 ){ + /* The docid column, which is an alias for rowid. */ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, 0); + sqlite3_result_value(pContext, pVal); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrieve the rowid for the current row of the result set. fts3 +** exposes %_content.docid as the rowid for the virtual table. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, +** we also store positions and offsets in the hash table using that +** column number. +*/ +static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition)) ){ + DLCollector *p; + int nData; /* Size of doclist before our update. */ + + /* Positions can't be negative; we use -1 as a terminator + * internally. Token can't be NULL or empty. */ + if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ + rc = SQLITE_ERROR; + break; + } + + p = fts3HashFind(&v->pendingTerms, pToken, nTokenBytes); + if( p==NULL ){ + nData = 0; + p = dlcNew(iDocid, DL_DEFAULT); + fts3HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); + + /* Overhead for our hash table entry, the key, and the value. */ + v->nPendingData += sizeof(struct fts3HashElem)+sizeof(*p)+nTokenBytes; + }else{ + nData = p->b.nData; + if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); + } + if( iColumn>=0 ){ + dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + + /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ + v->nPendingData += p->b.nData-nData; + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + if( SQLITE_DONE == rc ) return SQLITE_OK; + return rc; +} + +/* Add doclists for all terms in [pValues] to pendingTerms table. */ +static int insertTerms(fulltext_vtab *v, sqlite_int64 iDocid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, iDocid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to +** pendingTerms. +*/ +static int deleteTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + const char **pValues; + int i, rc; + + /* TODO(shess) Should we allow such tables at all? */ + if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; + + rc = content_select(v, iDocid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, iDocid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); + +/* Insert a row into the %_content table; set *piDocid to be the ID of the +** new row. Add doclists for terms to pendingTerms. +*/ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestDocid, + sqlite3_value **pValues, sqlite_int64 *piDocid){ + int rc; + + rc = content_insert(v, pRequestDocid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + + /* docid column is an alias for rowid. */ + *piDocid = sqlite3_last_insert_rowid(v->db); + rc = initPendingTerms(v, *piDocid); + if( rc!=SQLITE_OK ) return rc; + + return insertTerms(v, *piDocid, pValues); +} + +/* Delete a row from the %_content table; add empty doclists for terms +** to pendingTerms. +*/ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; add delete doclists to +** pendingTerms for old terms not in the new data, add insert doclists +** to pendingTerms for terms in the new data. +*/ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, iRow, pValues); +} + +/*******************************************************************/ +/* InteriorWriter is used to collect terms and block references into +** interior nodes in %_segments. See commentary at top of file for +** format. +*/ + +/* How large interior nodes can grow. */ +#define INTERIOR_MAX 2048 + +/* Minimum number of terms per interior node (except the root). This +** prevents large terms from making the tree too skinny - must be >0 +** so that the tree always makes progress. Note that the min tree +** fanout will be INTERIOR_MIN_TERMS+1. +*/ +#define INTERIOR_MIN_TERMS 7 +#if INTERIOR_MIN_TERMS<1 +# error INTERIOR_MIN_TERMS must be greater than 0. +#endif + +/* ROOT_MAX controls how much data is stored inline in the segment +** directory. +*/ +/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's +** only here so that interiorWriterRootInfo() and leafWriterRootInfo() +** can both see it, but if the caller passed it in, we wouldn't even +** need a define. +*/ +#define ROOT_MAX 1024 +#if ROOT_MAXterm, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = fts3PutVarint(c, iHeight); + n += fts3PutVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + } + return block; +} + +#ifndef NDEBUG +/* Verify that the data is readable as an interior node. */ +static void interiorBlockValidate(InteriorBlock *pBlock){ + const char *pData = pBlock->data.pData; + int nData = pBlock->data.nData; + int n, iDummy; + sqlite_int64 iBlockid; + + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with height of node as a varint(n), n>0 */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n0 ); + assert( n<=nData ); + pData += n; + nData -= n; + + /* Zero or more terms of positive length */ + if( nData!=0 ){ + /* First term is not delta-encoded. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Following terms delta-encoded. */ + while( nData!=0 ){ + /* Length of shared prefix. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + } + } +} +#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) +#else +#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) +#endif + +typedef struct InteriorWriter { + int iHeight; /* from 0 at leaves. */ + InteriorBlock *first, *last; + struct InteriorWriter *parentWriter; + + DataBuffer term; /* Last term written to block "last". */ + sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ +#ifndef NDEBUG + sqlite_int64 iLastChildBlock; /* for consistency checks. */ +#endif +} InteriorWriter; + +/* Initialize an interior node where pTerm[nTerm] marks the leftmost +** term in the tree. iChildBlock is the leftmost child block at the +** next level down the tree. +*/ +static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, + sqlite_int64 iChildBlock, + InteriorWriter *pWriter){ + InteriorBlock *block; + assert( iHeight>0 ); + CLEAR(pWriter); + + pWriter->iHeight = iHeight; + pWriter->iOpeningChildBlock = iChildBlock; +#ifndef NDEBUG + pWriter->iLastChildBlock = iChildBlock; +#endif + block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); + pWriter->last = pWriter->first = block; + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + dataBufferInit(&pWriter->term, 0); +} + +/* Append the child node rooted at iChildBlock to the interior node, +** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. +*/ +static void interiorWriterAppend(InteriorWriter *pWriter, + const char *pTerm, int nTerm, + sqlite_int64 iChildBlock){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + + /* The first term written into an interior node is actually + ** associated with the second child added (the first child was added + ** in interiorWriterInit, or in the if clause at the bottom of this + ** function). That term gets encoded straight up, with nPrefix left + ** at 0. + */ + if( pWriter->term.nData==0 ){ + n = fts3PutVarint(c, nTerm); + }else{ + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + } + + n = fts3PutVarint(c, nPrefix); + n += fts3PutVarint(c+n, nTerm-nPrefix); + } + +#ifndef NDEBUG + pWriter->iLastChildBlock++; +#endif + assert( pWriter->iLastChildBlock==iChildBlock ); + + /* Overflow to a new block if the new term makes the current block + ** too big, and the current block already has enough terms. + */ + if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && + iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ + pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, + pTerm, nTerm); + pWriter->last = pWriter->last->next; + pWriter->iOpeningChildBlock = iChildBlock; + dataBufferReset(&pWriter->term); + }else{ + dataBufferAppend2(&pWriter->last->data, c, n, + pTerm+nPrefix, nTerm-nPrefix); + dataBufferReplace(&pWriter->term, pTerm, nTerm); + } + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); +} + +/* Free the space used by pWriter, including the linked-list of +** InteriorBlocks, and parentWriter, if present. +*/ +static int interiorWriterDestroy(InteriorWriter *pWriter){ + InteriorBlock *block = pWriter->first; + + while( block!=NULL ){ + InteriorBlock *b = block; + block = block->next; + dataBufferDestroy(&b->term); + dataBufferDestroy(&b->data); + sqlite3_free(b); + } + if( pWriter->parentWriter!=NULL ){ + interiorWriterDestroy(pWriter->parentWriter); + sqlite3_free(pWriter->parentWriter); + } + dataBufferDestroy(&pWriter->term); + SCRAMBLE(pWriter); + return SQLITE_OK; +} + +/* If pWriter can fit entirely in ROOT_MAX, return it as the root info +** directly, leaving *piEndBlockid unchanged. Otherwise, flush +** pWriter to %_segments, building a new layer of interior nodes, and +** recursively ask for their root into. +*/ +static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + InteriorBlock *block = pWriter->first; + sqlite_int64 iBlockid = 0; + int rc; + + /* If we can fit the segment inline */ + if( block==pWriter->last && block->data.nDatadata.pData; + *pnRootInfo = block->data.nData; + return SQLITE_OK; + } + + /* Flush the first block to %_segments, and create a new level of + ** interior node. + */ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); + interiorWriterInit(pWriter->iHeight+1, + block->term.pData, block->term.nData, + iBlockid, pWriter->parentWriter); + + /* Flush additional blocks and append to the higher interior + ** node. + */ + for(block=block->next; block!=NULL; block=block->next){ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + interiorWriterAppend(pWriter->parentWriter, + block->term.pData, block->term.nData, iBlockid); + } + + /* Parent node gets the chance to be the root. */ + return interiorWriterRootInfo(v, pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/****************************************************************/ +/* InteriorReader is used to read off the data from an interior node +** (see comment at top of file for the format). +*/ +typedef struct InteriorReader { + const char *pData; + int nData; + + DataBuffer term; /* previous term, for decoding term delta. */ + + sqlite_int64 iBlockid; +} InteriorReader; + +static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +/* TODO(shess) The assertions are great, but what if we're in NDEBUG +** and the blob is empty or otherwise contains suspect data? +*/ +static void interiorReaderInit(const char *pData, int nData, + InteriorReader *pReader){ + int n, nTerm; + + /* Require at least the leading flag byte */ + assert( nData>0 ); + assert( pData[0]!='\0' ); + + CLEAR(pReader); + + /* Decode the base blockid, and set the cursor to the first term. */ + n = fts3GetVarint(pData+1, &pReader->iBlockid); + assert( 1+n<=nData ); + pReader->pData = pData+1+n; + pReader->nData = nData-(1+n); + + /* A single-child interior node (such as when a leaf node was too + ** large for the segment directory) won't have any terms. + ** Otherwise, decode the first term. + */ + if( pReader->nData==0 ){ + dataBufferInit(&pReader->term, 0); + }else{ + n = fts3GetVarint32(pReader->pData, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); + assert( n+nTerm<=pReader->nData ); + pReader->pData += n+nTerm; + pReader->nData -= n+nTerm; + } +} + +static int interiorReaderAtEnd(InteriorReader *pReader){ + return pReader->term.nData==0; +} + +static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ + return pReader->iBlockid; +} + +static int interiorReaderTermBytes(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.nData; +} +static const char *interiorReaderTerm(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.pData; +} + +/* Step forward to the next term in the node. */ +static void interiorReaderStep(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + + /* If the last term has been read, signal eof, else construct the + ** next term. + */ + if( pReader->nData==0 ){ + dataBufferReset(&pReader->term); + }else{ + int n, nPrefix, nSuffix; + + n = fts3GetVarint32(pReader->pData, &nPrefix); + n += fts3GetVarint32(pReader->pData+n, &nSuffix); + + /* Truncate the current term and append suffix data. */ + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + assert( n+nSuffix<=pReader->nData ); + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } + pReader->iBlockid++; +} + +/* Compare the current term to pTerm[nTerm], returning strcmp-style +** results. If isPrefix, equality means equal through nTerm bytes. +*/ +static int interiorReaderTermCmp(InteriorReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + const char *pReaderTerm = interiorReaderTerm(pReader); + int nReaderTerm = interiorReaderTermBytes(pReader); + int c, n = nReaderTerm0 ) return -1; + if( nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReaderTerm, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return nReaderTerm - nTerm; +} + +/****************************************************************/ +/* LeafWriter is used to collect terms and associated doclist data +** into leaf blocks in %_segments (see top of file for format info). +** Expected usage is: +** +** LeafWriter writer; +** leafWriterInit(0, 0, &writer); +** while( sorted_terms_left_to_process ){ +** // data is doclist data for that term. +** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); +** if( rc!=SQLITE_OK ) goto err; +** } +** rc = leafWriterFinalize(v, &writer); +**err: +** leafWriterDestroy(&writer); +** return rc; +** +** leafWriterStep() may write a collected leaf out to %_segments. +** leafWriterFinalize() finishes writing any buffered data and stores +** a root node in %_segdir. leafWriterDestroy() frees all buffers and +** InteriorWriters allocated as part of writing this segment. +** +** TODO(shess) Document leafWriterStepMerge(). +*/ + +/* Put terms with data this big in their own block. */ +#define STANDALONE_MIN 1024 + +/* Keep leaf blocks below this size. */ +#define LEAF_MAX 2048 + +typedef struct LeafWriter { + int iLevel; + int idx; + sqlite_int64 iStartBlockid; /* needed to create the root info */ + sqlite_int64 iEndBlockid; /* when we're done writing. */ + + DataBuffer term; /* previous encoded term */ + DataBuffer data; /* encoding buffer */ + + /* bytes of first term in the current node which distinguishes that + ** term from the last term of the previous node. + */ + int nTermDistinct; + + InteriorWriter parentWriter; /* if we overflow */ + int has_parent; +} LeafWriter; + +static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ + CLEAR(pWriter); + pWriter->iLevel = iLevel; + pWriter->idx = idx; + + dataBufferInit(&pWriter->term, 32); + + /* Start out with a reasonably sized block, though it can grow. */ + dataBufferInit(&pWriter->data, LEAF_MAX); +} + +#ifndef NDEBUG +/* Verify that the data is readable as a leaf node. */ +static void leafNodeValidate(const char *pData, int nData){ + int n, iDummy; + + if( nData==0 ) return; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with a varint(0) */ + n = fts3GetVarint32(pData, &iDummy); + assert( iDummy==0 ); + assert( n>0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + + /* Verify that trailing terms and doclists also are readable. */ + while( nData!=0 ){ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + } +} +#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) +#else +#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) +#endif + +/* Flush the current leaf node to %_segments, and adding the resulting +** blockid and the starting term to the interior node which will +** contain it. +*/ +static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, + int iData, int nData){ + sqlite_int64 iBlockid = 0; + const char *pStartingTerm; + int nStartingTerm, rc, n; + + /* Must have the leading varint(0) flag, plus at least some + ** valid-looking data. + */ + assert( nData>2 ); + assert( iData>=0 ); + assert( iData+nData<=pWriter->data.nData ); + ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); + + rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + assert( iBlockid!=0 ); + + /* Reconstruct the first term in the leaf for purposes of building + ** the interior node. + */ + n = fts3GetVarint32(pWriter->data.pData+iData+1, &nStartingTerm); + pStartingTerm = pWriter->data.pData+iData+1+n; + assert( pWriter->data.nData>iData+1+n+nStartingTerm ); + assert( pWriter->nTermDistinct>0 ); + assert( pWriter->nTermDistinct<=nStartingTerm ); + nStartingTerm = pWriter->nTermDistinct; + + if( pWriter->has_parent ){ + interiorWriterAppend(&pWriter->parentWriter, + pStartingTerm, nStartingTerm, iBlockid); + }else{ + interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, + &pWriter->parentWriter); + pWriter->has_parent = 1; + } + + /* Track the span of this segment's leaf nodes. */ + if( pWriter->iEndBlockid==0 ){ + pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; + }else{ + pWriter->iEndBlockid++; + assert( iBlockid==pWriter->iEndBlockid ); + } + + return SQLITE_OK; +} +static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ + int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); + if( rc!=SQLITE_OK ) return rc; + + /* Re-initialize the output buffer. */ + dataBufferReset(&pWriter->data); + + return SQLITE_OK; +} + +/* Fetch the root info for the segment. If the entire leaf fits +** within ROOT_MAX, then it will be returned directly, otherwise it +** will be flushed and the root info will be returned from the +** interior node. *piEndBlockid is set to the blockid of the last +** interior or leaf node written to disk (0 if none are written at +** all). +*/ +static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + /* we can fit the segment entirely inline */ + if( !pWriter->has_parent && pWriter->data.nDatadata.pData; + *pnRootInfo = pWriter->data.nData; + *piEndBlockid = 0; + return SQLITE_OK; + } + + /* Flush remaining leaf data. */ + if( pWriter->data.nData>0 ){ + int rc = leafWriterFlush(v, pWriter); + if( rc!=SQLITE_OK ) return rc; + } + + /* We must have flushed a leaf at some point. */ + assert( pWriter->has_parent ); + + /* Tenatively set the end leaf blockid as the end blockid. If the + ** interior node can be returned inline, this will be the final + ** blockid, otherwise it will be overwritten by + ** interiorWriterRootInfo(). + */ + *piEndBlockid = pWriter->iEndBlockid; + + return interiorWriterRootInfo(v, &pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/* Collect the rootInfo data and store it into the segment directory. +** This has the effect of flushing the segment's leaf data to +** %_segments, and also flushing any interior nodes to %_segments. +*/ +static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ + sqlite_int64 iEndBlockid; + char *pRootInfo; + int rc, nRootInfo; + + rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + /* Don't bother storing an entirely empty segment. */ + if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; + + return segdir_set(v, pWriter->iLevel, pWriter->idx, + pWriter->iStartBlockid, pWriter->iEndBlockid, + iEndBlockid, pRootInfo, nRootInfo); +} + +static void leafWriterDestroy(LeafWriter *pWriter){ + if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); + dataBufferDestroy(&pWriter->term); + dataBufferDestroy(&pWriter->data); +} + +/* Encode a term into the leafWriter, delta-encoding as appropriate. +** Returns the length of the new term which distinguishes it from the +** previous term, which can be used to set nTermDistinct when a node +** boundary is crossed. +*/ +static int leafWriterEncodeTerm(LeafWriter *pWriter, + const char *pTerm, int nTerm){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + assert( nTerm>0 ); + while( nPrefixterm.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + /* Failing this implies that the terms weren't in order. */ + assert( nPrefixdata.nData==0 ){ + /* Encode the node header and leading term as: + ** varint(0) + ** varint(nTerm) + ** char pTerm[nTerm] + */ + n = fts3PutVarint(c, '\0'); + n += fts3PutVarint(c+n, nTerm); + dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); + }else{ + /* Delta-encode the term as: + ** varint(nPrefix) + ** varint(nSuffix) + ** char pTermSuffix[nSuffix] + */ + n = fts3PutVarint(c, nPrefix); + n += fts3PutVarint(c+n, nTerm-nPrefix); + dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); + } + dataBufferReplace(&pWriter->term, pTerm, nTerm); + + return nPrefix+1; +} + +/* Used to avoid a memmove when a large amount of doclist data is in +** the buffer. This constructs a node and term header before +** iDoclistData and flushes the resulting complete node using +** leafWriterInternalFlush(). +*/ +static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + int iDoclistData){ + char c[VARINT_MAX+VARINT_MAX]; + int iData, n = fts3PutVarint(c, 0); + n += fts3PutVarint(c+n, nTerm); + + /* There should always be room for the header. Even if pTerm shared + ** a substantial prefix with the previous term, the entire prefix + ** could be constructed from earlier data in the doclist, so there + ** should be room. + */ + assert( iDoclistData>=n+nTerm ); + + iData = iDoclistData-(n+nTerm); + memcpy(pWriter->data.pData+iData, c, n); + memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); + + return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + DLReader *pReaders, int nReaders){ + char c[VARINT_MAX+VARINT_MAX]; + int iTermData = pWriter->data.nData, iDoclistData; + int i, nData, n, nActualData, nActual, rc, nTermDistinct; + + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); + + /* Remember nTermDistinct if opening a new node. */ + if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; + + iDoclistData = pWriter->data.nData; + + /* Estimate the length of the merged doclist so we can leave space + ** to encode it. + */ + for(i=0, nData=0; idata, c, n); + + docListMerge(&pWriter->data, pReaders, nReaders); + ASSERT_VALID_DOCLIST(DL_DEFAULT, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-iDoclistData-n, NULL); + + /* The actual amount of doclist data at this point could be smaller + ** than the length we encoded. Additionally, the space required to + ** encode this length could be smaller. For small doclists, this is + ** not a big deal, we can just use memmove() to adjust things. + */ + nActualData = pWriter->data.nData-(iDoclistData+n); + nActual = fts3PutVarint(c, nActualData); + assert( nActualData<=nData ); + assert( nActual<=n ); + + /* If the new doclist is big enough for force a standalone leaf + ** node, we can immediately flush it inline without doing the + ** memmove(). + */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData-iTermData>STANDALONE_MIN. + */ + if( nTerm+nActualData>STANDALONE_MIN ){ + /* Push leaf node from before this term. */ + if( iTermData>0 ){ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + } + + /* Fix the encoded doclist length. */ + iDoclistData += n - nActual; + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* Push the standalone leaf node. */ + rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); + if( rc!=SQLITE_OK ) return rc; + + /* Leave the node empty. */ + dataBufferReset(&pWriter->data); + + return rc; + } + + /* At this point, we know that the doclist was small, so do the + ** memmove if indicated. + */ + if( nActualdata.pData+iDoclistData+nActual, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-(iDoclistData+n)); + pWriter->data.nData -= n-nActual; + } + + /* Replace written length with actual length. */ + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* If the node is too large, break things up. */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData>LEAF_MAX. + */ + if( iTermData+nTerm+nActualData>LEAF_MAX ){ + /* Flush out the leading data as a node */ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + + /* Rebuild header using the current term */ + n = fts3PutVarint(pWriter->data.pData, 0); + n += fts3PutVarint(pWriter->data.pData+n, nTerm); + memcpy(pWriter->data.pData+n, pTerm, nTerm); + n += nTerm; + + /* There should always be room, because the previous encoding + ** included all data necessary to construct the term. + */ + assert( ndata.nData-iDoclistDatadata.pData+n, + pWriter->data.pData+iDoclistData, + pWriter->data.nData-iDoclistData); + pWriter->data.nData -= iDoclistData-n; + } + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + + return SQLITE_OK; +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +/* TODO(shess) Revise writeZeroSegment() so that doclists are +** constructed directly in pWriter->data. +*/ +static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + const char *pData, int nData){ + int rc; + DLReader reader; + + dlrInit(&reader, DL_DEFAULT, pData, nData); + rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); + dlrDestroy(&reader); + + return rc; +} + + +/****************************************************************/ +/* LeafReader is used to iterate over an individual leaf node. */ +typedef struct LeafReader { + DataBuffer term; /* copy of current term. */ + + const char *pData; /* data for current term. */ + int nData; +} LeafReader; + +static void leafReaderDestroy(LeafReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +static int leafReaderAtEnd(LeafReader *pReader){ + return pReader->nData<=0; +} + +/* Access the current term. */ +static int leafReaderTermBytes(LeafReader *pReader){ + return pReader->term.nData; +} +static const char *leafReaderTerm(LeafReader *pReader){ + assert( pReader->term.nData>0 ); + return pReader->term.pData; +} + +/* Access the doclist data for the current term. */ +static int leafReaderDataBytes(LeafReader *pReader){ + int nData; + assert( pReader->term.nData>0 ); + fts3GetVarint32(pReader->pData, &nData); + return nData; +} +static const char *leafReaderData(LeafReader *pReader){ + int n, nData; + assert( pReader->term.nData>0 ); + n = fts3GetVarint32(pReader->pData, &nData); + return pReader->pData+n; +} + +static void leafReaderInit(const char *pData, int nData, + LeafReader *pReader){ + int nTerm, n; + + assert( nData>0 ); + assert( pData[0]=='\0' ); + + CLEAR(pReader); + + /* Read the first term, skipping the header byte. */ + n = fts3GetVarint32(pData+1, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pData+1+n, nTerm); + + /* Position after the first term. */ + assert( 1+n+nTermpData = pData+1+n+nTerm; + pReader->nData = nData-1-n-nTerm; +} + +/* Step the reader forward to the next term. */ +static void leafReaderStep(LeafReader *pReader){ + int n, nData, nPrefix, nSuffix; + assert( !leafReaderAtEnd(pReader) ); + + /* Skip previous entry's data block. */ + n = fts3GetVarint32(pReader->pData, &nData); + assert( n+nData<=pReader->nData ); + pReader->pData += n+nData; + pReader->nData -= n+nData; + + if( !leafReaderAtEnd(pReader) ){ + /* Construct the new term using a prefix from the old term plus a + ** suffix from the leaf data. + */ + n = fts3GetVarint32(pReader->pData, &nPrefix); + n += fts3GetVarint32(pReader->pData+n, &nSuffix); + assert( n+nSuffixnData ); + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } +} + +/* strcmp-style comparison of pReader's current term against pTerm. +** If isPrefix, equality means equal through nTerm bytes. +*/ +static int leafReaderTermCmp(LeafReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + int c, n = pReader->term.nDataterm.nData : nTerm; + if( n==0 ){ + if( pReader->term.nData>0 ) return -1; + if(nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReader->term.pData, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return pReader->term.nData - nTerm; +} + + +/****************************************************************/ +/* LeavesReader wraps LeafReader to allow iterating over the entire +** leaf layer of the tree. +*/ +typedef struct LeavesReader { + int idx; /* Index within the segment. */ + + sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ + int eof; /* we've seen SQLITE_DONE from pStmt. */ + + LeafReader leafReader; /* reader for the current leaf. */ + DataBuffer rootData; /* root data for inline. */ +} LeavesReader; + +/* Access the current term. */ +static int leavesReaderTermBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTermBytes(&pReader->leafReader); +} +static const char *leavesReaderTerm(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTerm(&pReader->leafReader); +} + +/* Access the doclist data for the current term. */ +static int leavesReaderDataBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderDataBytes(&pReader->leafReader); +} +static const char *leavesReaderData(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderData(&pReader->leafReader); +} + +static int leavesReaderAtEnd(LeavesReader *pReader){ + return pReader->eof; +} + +/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus +** leaving the statement handle open, which locks the table. +*/ +/* TODO(shess) This "solution" is not satisfactory. Really, there +** should be check-in function for all statement handles which +** arranges to call sqlite3_reset(). This most likely will require +** modification to control flow all over the place, though, so for now +** just punt. +** +** Note the the current system assumes that segment merges will run to +** completion, which is why this particular probably hasn't arisen in +** this case. Probably a brittle assumption. +*/ +static int leavesReaderReset(LeavesReader *pReader){ + return sqlite3_reset(pReader->pStmt); +} + +static void leavesReaderDestroy(LeavesReader *pReader){ + /* If idx is -1, that means we're using a non-cached statement + ** handle in the optimize() case, so we need to release it. + */ + if( pReader->pStmt!=NULL && pReader->idx==-1 ){ + sqlite3_finalize(pReader->pStmt); + } + leafReaderDestroy(&pReader->leafReader); + dataBufferDestroy(&pReader->rootData); + SCRAMBLE(pReader); +} + +/* Initialize pReader with the given root data (if iStartBlockid==0 +** the leaf data was entirely contained in the root), or from the +** stream of blocks between iStartBlockid and iEndBlockid, inclusive. +*/ +static int leavesReaderInit(fulltext_vtab *v, + int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData, + LeavesReader *pReader){ + CLEAR(pReader); + pReader->idx = idx; + + dataBufferInit(&pReader->rootData, 0); + if( iStartBlockid==0 ){ + /* Entire leaf level fit in root data. */ + dataBufferReplace(&pReader->rootData, pRootData, nRootData); + leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, + &pReader->leafReader); + }else{ + sqlite3_stmt *s; + int rc = sql_get_leaf_statement(v, idx, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + pReader->eof = 1; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + pReader->pStmt = s; + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Step the current leaf forward to the next term. If we reach the +** end of the current leaf, step forward to the next leaf block. +*/ +static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ + assert( !leavesReaderAtEnd(pReader) ); + leafReaderStep(&pReader->leafReader); + + if( leafReaderAtEnd(&pReader->leafReader) ){ + int rc; + if( pReader->rootData.pData ){ + pReader->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->eof = 1; + return rc==SQLITE_DONE ? SQLITE_OK : rc; + } + leafReaderDestroy(&pReader->leafReader); + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Order LeavesReaders by their term, ignoring idx. Readers at eof +** always sort to the end. +*/ +static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ + if( leavesReaderAtEnd(lr1) ){ + if( leavesReaderAtEnd(lr2) ) return 0; + return 1; + } + if( leavesReaderAtEnd(lr2) ) return -1; + + return leafReaderTermCmp(&lr1->leafReader, + leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), + 0); +} + +/* Similar to leavesReaderTermCmp(), with additional ordering by idx +** so that older segments sort before newer segments. +*/ +static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ + int c = leavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->idx-lr2->idx; +} + +/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its +** sorted position. +*/ +static void leavesReaderReorder(LeavesReader *pLr, int nLr){ + while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ + LeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* Initializes pReaders with the segments from level iLevel, returning +** the number of segments in *piReaders. Leaves pReaders in sorted +** order. +*/ +static int leavesReadersInit(fulltext_vtab *v, int iLevel, + LeavesReader *pReaders, int *piReaders){ + sqlite3_stmt *s; + int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i0 ){ + leavesReaderDestroy(&pReaders[i]); + } + return rc; + } + + *piReaders = i; + + /* Leave our results sorted by term, then age. */ + while( i-- ){ + leavesReaderReorder(pReaders+i, *piReaders-i); + } + return SQLITE_OK; +} + +/* Merge doclists from pReaders[nReaders] into a single doclist, which +** is written to pWriter. Assumes pReaders is ordered oldest to +** newest. +*/ +/* TODO(shess) Consider putting this inline in segmentMerge(). */ +static int leavesReadersMerge(fulltext_vtab *v, + LeavesReader *pReaders, int nReaders, + LeafWriter *pWriter){ + DLReader dlReaders[MERGE_COUNT]; + const char *pTerm = leavesReaderTerm(pReaders); + int i, nTerm = leavesReaderTermBytes(pReaders); + + assert( nReaders<=MERGE_COUNT ); + + for(i=0; i0 ){ + rc = leavesReaderStep(v, lrs+i); + if( rc!=SQLITE_OK ) goto err; + + /* Reorder by term, then by age. */ + leavesReaderReorder(lrs+i, MERGE_COUNT-i); + } + } + + for(i=0; i0 ); + + for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); + rc=leavesReaderStep(v, pReader)){ + /* TODO(shess) Really want leavesReaderTermCmp(), but that name is + ** already taken to compare the terms of two LeavesReaders. Think + ** on a better name. [Meanwhile, break encapsulation rather than + ** use a confusing name.] + */ + int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c>0 ) break; /* Past any possible matches. */ + if( c==0 ){ + const char *pData = leavesReaderData(pReader); + int iBuffer, nData = leavesReaderDataBytes(pReader); + + /* Find the first empty buffer. */ + for(iBuffer=0; iBuffer0 ){ + assert(pBuffers!=NULL); + memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); + sqlite3_free(pBuffers); + } + pBuffers = p; + } + dataBufferInit(&(pBuffers[nBuffers]), 0); + nBuffers++; + } + + /* At this point, must have an empty at iBuffer. */ + assert(iBufferpData, p->nData); + + /* dataBufferReset() could allow a large doclist to blow up + ** our memory requirements. + */ + if( p->nCapacity<1024 ){ + dataBufferReset(p); + }else{ + dataBufferDestroy(p); + dataBufferInit(p, 0); + } + } + } + } + } + + /* Union all the doclists together into *out. */ + /* TODO(shess) What if *out is big? Sigh. */ + if( rc==SQLITE_OK && nBuffers>0 ){ + int iBuffer; + for(iBuffer=0; iBuffer0 ){ + if( out->nData==0 ){ + dataBufferSwap(out, &(pBuffers[iBuffer])); + }else{ + docListAccumulateUnion(out, pBuffers[iBuffer].pData, + pBuffers[iBuffer].nData); + } + } + } + } + + while( nBuffers-- ){ + dataBufferDestroy(&(pBuffers[nBuffers])); + } + if( pBuffers!=NULL ) sqlite3_free(pBuffers); + + return rc; +} + +/* Call loadSegmentLeavesInt() with pData/nData as input. */ +static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + LeavesReader reader; + int rc; + + assert( nData>1 ); + assert( *pData=='\0' ); + rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to +** iEndLeaf (inclusive) as input, and merge the resulting doclist into +** out. +*/ +static int loadSegmentLeaves(fulltext_vtab *v, + sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + int rc; + LeavesReader reader; + + assert( iStartLeaf<=iEndLeaf ); + rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Taking pData/nData as an interior node, find the sequence of child +** nodes which could include pTerm/nTerm/isPrefix. Note that the +** interior node terms logically come between the blocks, so there is +** one more blockid than there are terms (that block contains terms >= +** the last interior-node term). +*/ +/* TODO(shess) The calling code may already know that the end child is +** not worth calculating, because the end may be in a later sibling +** node. Consider whether breaking symmetry is worthwhile. I suspect +** it is not worthwhile. +*/ +static void getChildrenContaining(const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, + sqlite_int64 *piEndChild){ + InteriorReader reader; + + assert( nData>1 ); + assert( *pData!='\0' ); + interiorReaderInit(pData, nData, &reader); + + /* Scan for the first child which could contain pTerm/nTerm. */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; + interiorReaderStep(&reader); + } + *piStartChild = interiorReaderCurrentBlockid(&reader); + + /* Keep scanning to find a term greater than our term, using prefix + ** comparison if indicated. If isPrefix is false, this will be the + ** same blockid as the starting block. + */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; + interiorReaderStep(&reader); + } + *piEndChild = interiorReaderCurrentBlockid(&reader); + + interiorReaderDestroy(&reader); + + /* Children must ascend, and if !prefix, both must be the same. */ + assert( *piEndChild>=*piStartChild ); + assert( isPrefix || *piStartChild==*piEndChild ); +} + +/* Read block at iBlockid and pass it with other params to +** getChildrenContaining(). +*/ +static int loadAndGetChildrenContaining( + fulltext_vtab *v, + sqlite_int64 iBlockid, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, sqlite_int64 *piEndChild +){ + sqlite3_stmt *s = NULL; + int rc; + + assert( iBlockid!=0 ); + assert( pTerm!=NULL ); + assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ + assert( piStartChild!=NULL ); + assert( piEndChild!=NULL ); + + rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ERROR; + if( rc!=SQLITE_ROW ) return rc; + + getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), + pTerm, nTerm, isPrefix, piStartChild, piEndChild); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain + * locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + return SQLITE_OK; +} + +/* Traverse the tree represented by pData[nData] looking for +** pTerm[nTerm], placing its doclist into *out. This is internal to +** loadSegment() to make error-handling cleaner. +*/ +static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* Special case where root is a leaf. */ + if( *pData=='\0' ){ + return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); + }else{ + int rc; + sqlite_int64 iStartChild, iEndChild; + + /* Process pData as an interior node, then loop down the tree + ** until we find the set of leaf nodes to scan for the term. + */ + getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, + &iStartChild, &iEndChild); + while( iStartChild>iLeavesEnd ){ + sqlite_int64 iNextStart, iNextEnd; + rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, + &iNextStart, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + + /* If we've branched, follow the end branch, too. */ + if( iStartChild!=iEndChild ){ + sqlite_int64 iDummy; + rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, + &iDummy, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + } + + assert( iNextStart<=iNextEnd ); + iStartChild = iNextStart; + iEndChild = iNextEnd; + } + assert( iStartChild<=iLeavesEnd ); + assert( iEndChild<=iLeavesEnd ); + + /* Scan through the leaf segments for doclists. */ + return loadSegmentLeaves(v, iStartChild, iEndChild, + pTerm, nTerm, isPrefix, out); + } +} + +/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then +** merge its doclist over *out (any duplicate doclists read from the +** segment rooted at pData will overwrite those in *out). +*/ +/* TODO(shess) Consider changing this to determine the depth of the +** leaves using either the first characters of interior nodes (when +** ==1, we're one level above the leaves), or the first character of +** the root (which will describe the height of the tree directly). +** Either feels somewhat tricky to me. +*/ +/* TODO(shess) The current merge is likely to be slow for large +** doclists (though it should process from newest/smallest to +** oldest/largest, so it may not be that bad). It might be useful to +** modify things to allow for N-way merging. This could either be +** within a segment, with pairwise merges across segments, or across +** all segments at once. +*/ +static int loadSegment(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + DataBuffer result; + int rc; + + assert( nData>1 ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&result, 0); + rc = loadSegmentInt(v, pData, nData, iLeavesEnd, + pTerm, nTerm, isPrefix, &result); + if( rc==SQLITE_OK && result.nData>0 ){ + if( out->nData==0 ){ + DataBuffer tmp = *out; + *out = result; + result = tmp; + }else{ + DataBuffer merged; + DLReader readers[2]; + + dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); + dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); + dataBufferInit(&merged, out->nData+result.nData); + docListMerge(&merged, readers, 2); + dataBufferDestroy(out); + *out = merged; + dlrDestroy(&readers[0]); + dlrDestroy(&readers[1]); + } + } + dataBufferDestroy(&result); + return rc; +} + +/* Scan the database and merge together the posting lists for the term +** into *out. +*/ +static int termSelect( + fulltext_vtab *v, + int iColumn, + const char *pTerm, int nTerm, /* Term to query for */ + int isPrefix, /* True for a prefix search */ + DocListType iType, + DataBuffer *out /* Write results here */ +){ + DataBuffer doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&doclist, 0); + dataBufferInit(out, 0); + + /* Traverse the segments from oldest to newest so that newer doclist + ** elements for given docids overwrite older elements. + */ + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, + &doclist); + if( rc!=SQLITE_OK ) goto err; + } + if( rc==SQLITE_DONE ){ + if( doclist.nData!=0 ){ + /* TODO(shess) The old term_select_all() code applied the column + ** restrict as we merged segments, leading to smaller buffers. + ** This is probably worthwhile to bring back, once the new storage + ** system is checked in. + */ + if( iColumn==v->nColumn) iColumn = -1; + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + iColumn, iType, out); + } + rc = SQLITE_OK; + } + + err: + dataBufferDestroy(&doclist); + return rc; +} + +/****************************************************************/ +/* Used to hold hashtable data for sorting. */ +typedef struct TermData { + const char *pTerm; + int nTerm; + DLCollector *pCollector; +} TermData; + +/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 +** for equal, >0 for greater-than). +*/ +static int termDataCmp(const void *av, const void *bv){ + const TermData *a = (const TermData *)av; + const TermData *b = (const TermData *)bv; + int n = a->nTermnTerm ? a->nTerm : b->nTerm; + int c = memcmp(a->pTerm, b->pTerm, n); + if( c!=0 ) return c; + return a->nTerm-b->nTerm; +} + +/* Order pTerms data by term, then write a new level 0 segment using +** LeafWriter. +*/ +static int writeZeroSegment(fulltext_vtab *v, fts3Hash *pTerms){ + fts3HashElem *e; + int idx, rc, i, n; + TermData *pData; + LeafWriter writer; + DataBuffer dl; + + /* Determine the next index at level 0, merging as necessary. */ + rc = segdirNextIndex(v, 0, &idx); + if( rc!=SQLITE_OK ) return rc; + + n = fts3HashCount(pTerms); + pData = sqlite3_malloc(n*sizeof(TermData)); + + for(i = 0, e = fts3HashFirst(pTerms); e; i++, e = fts3HashNext(e)){ + assert( i1 ) qsort(pData, n, sizeof(*pData), termDataCmp); + + /* TODO(shess) Refactor so that we can write directly to the segment + ** DataBuffer, as happens for segment merges. + */ + leafWriterInit(0, idx, &writer); + dataBufferInit(&dl, 0); + for(i=0; inPendingData>=0 ){ + fts3HashElem *e; + for(e=fts3HashFirst(&v->pendingTerms); e; e=fts3HashNext(e)){ + dlcDelete(fts3HashData(e)); + } + fts3HashClear(&v->pendingTerms); + v->nPendingData = -1; + } + return SQLITE_OK; +} + +/* If pendingTerms has data, flush it to a level-zero segment, and +** free it. +*/ +static int flushPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + int rc = writeZeroSegment(v, &v->pendingTerms); + if( rc==SQLITE_OK ) clearPendingTerms(v); + return rc; + } + return SQLITE_OK; +} + +/* If pendingTerms is "too big", or docid is out of order, flush it. +** Regardless, be certain that pendingTerms is initialized for use. +*/ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ + int rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + } + if( v->nPendingData<0 ){ + fts3HashInit(&v->pendingTerms, FTS3_HASH_STRING, 1); + v->nPendingData = 0; + } + v->iPrevDocid = iDocid; + return SQLITE_OK; +} + +/* This function implements the xUpdate callback; it is the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + int rc; + + FTSTRACE(("FTS3 Update %p\n", pVtab)); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0])); + if( rc==SQLITE_OK ){ + /* If we just deleted the last row in the table, clear out the + ** index data. + */ + rc = content_exists(v); + if( rc==SQLITE_ROW ){ + rc = SQLITE_OK; + }else if( rc==SQLITE_DONE ){ + /* Clear the pending terms so we don't flush a useless level-0 + ** segment when the transaction closes. + */ + rc = clearPendingTerms(v); + if( rc==SQLITE_OK ){ + rc = segdir_delete_all(v); + } + } + } + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + }else if( sqlite3_value_type(ppArg[2+v->nColumn+1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[2+v->nColumn+1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the docid */ + }else{ + assert( nArg==2+v->nColumn+2); + rc = index_update(v, rowid, &ppArg[2]); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite3_value *pRequestDocid = ppArg[2+v->nColumn+1]; + assert( nArg==2+v->nColumn+2); + if( SQLITE_NULL != sqlite3_value_type(pRequestDocid) && + SQLITE_NULL != sqlite3_value_type(ppArg[1]) ){ + /* TODO(shess) Consider allowing this to work if the values are + ** identical. I'm inclined to discourage that usage, though, + ** given that both rowid and docid are special columns. Better + ** would be to define one or the other as the default winner, + ** but should it be fts3-centric (docid) or SQLite-centric + ** (rowid)? + */ + rc = SQLITE_ERROR; + }else{ + if( SQLITE_NULL == sqlite3_value_type(pRequestDocid) ){ + pRequestDocid = ppArg[1]; + } + rc = index_insert(v, pRequestDocid, &ppArg[2], pRowid); + } + } + + return rc; +} + +static int fulltextSync(sqlite3_vtab *pVtab){ + FTSTRACE(("FTS3 xSync()\n")); + return flushPendingTerms((fulltext_vtab *)pVtab); +} + +static int fulltextBegin(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + FTSTRACE(("FTS3 xBegin()\n")); + + /* Any buffered updates should have been cleared by the previous + ** transaction. + */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextCommit(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + FTSTRACE(("FTS3 xCommit()\n")); + + /* Buffered updates should have been cleared by fulltextSync(). */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextRollback(sqlite3_vtab *pVtab){ + FTSTRACE(("FTS3 xRollback()\n")); + return clearPendingTerms((fulltext_vtab *)pVtab); +} + +/* +** Implementation of the snippet() function for FTS3 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = ""; + const char *zEnd = ""; + const char *zEllipsis = "..."; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS3 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* OptLeavesReader is nearly identical to LeavesReader, except that +** where LeavesReader is geared towards the merging of complete +** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader +** is geared towards implementation of the optimize() function, and +** can merge all segments simultaneously. This version may be +** somewhat less efficient than LeavesReader because it merges into an +** accumulator rather than doing an N-way merge, but since segment +** size grows exponentially (so segment count logrithmically) this is +** probably not an immediate problem. +*/ +/* TODO(shess): Prove that assertion, or extend the merge code to +** merge tree fashion (like the prefix-searching code does). +*/ +/* TODO(shess): OptLeavesReader and LeavesReader could probably be +** merged with little or no loss of performance for LeavesReader. The +** merged code would need to handle >MERGE_COUNT segments, and would +** also need to be able to optionally optimize away deletes. +*/ +typedef struct OptLeavesReader { + /* Segment number, to order readers by age. */ + int segment; + LeavesReader reader; +} OptLeavesReader; + +static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ + return leavesReaderAtEnd(&pReader->reader); +} +static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ + return leavesReaderTermBytes(&pReader->reader); +} +static const char *optLeavesReaderData(OptLeavesReader *pReader){ + return leavesReaderData(&pReader->reader); +} +static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ + return leavesReaderDataBytes(&pReader->reader); +} +static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ + return leavesReaderTerm(&pReader->reader); +} +static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ + return leavesReaderStep(v, &pReader->reader); +} +static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + return leavesReaderTermCmp(&lr1->reader, &lr2->reader); +} +/* Order by term ascending, segment ascending (oldest to newest), with +** exhausted readers to the end. +*/ +static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + int c = optLeavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->segment-lr2->segment; +} +/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that +** pLr[1..nLr-1] is already sorted. +*/ +static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ + while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ + OptLeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* optimize() helper function. Put the readers in order and iterate +** through them, merging doclists for matching terms into pWriter. +** Returns SQLITE_OK on success, or the SQLite error code which +** prevented success. +*/ +static int optimizeInternal(fulltext_vtab *v, + OptLeavesReader *readers, int nReaders, + LeafWriter *pWriter){ + int i, rc = SQLITE_OK; + DataBuffer doclist, merged, tmp; + + /* Order the readers. */ + i = nReaders; + while( i-- > 0 ){ + optLeavesReaderReorder(&readers[i], nReaders-i); + } + + dataBufferInit(&doclist, LEAF_MAX); + dataBufferInit(&merged, LEAF_MAX); + + /* Exhausted readers bubble to the end, so when the first reader is + ** at eof, all are at eof. + */ + while( !optLeavesReaderAtEnd(&readers[0]) ){ + + /* Figure out how many readers share the next term. */ + for(i=1; i 0 ){ + dlrDestroy(&dlReaders[nReaders]); + } + + /* Accumulated doclist to reader 0 for next pass. */ + dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); + } + + /* Destroy reader that was left in the pipeline. */ + dlrDestroy(&dlReaders[0]); + + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + -1, DL_DEFAULT, &merged); + } + + /* Only pass doclists with hits (skip if all hits deleted). */ + if( merged.nData>0 ){ + rc = leafWriterStep(v, pWriter, + optLeavesReaderTerm(&readers[0]), + optLeavesReaderTermBytes(&readers[0]), + merged.pData, merged.nData); + if( rc!=SQLITE_OK ) goto err; + } + + /* Step merged readers to next term and reorder. */ + while( i-- > 0 ){ + rc = optLeavesReaderStep(v, &readers[i]); + if( rc!=SQLITE_OK ) goto err; + + optLeavesReaderReorder(&readers[i], nReaders-i); + } + } + + err: + dataBufferDestroy(&doclist); + dataBufferDestroy(&merged); + return rc; +} + +/* Implement optimize() function for FTS3. optimize(t) merges all +** segments in the fts index into a single segment. 't' is the magic +** table-named column. +*/ +static void optimizeFunc(sqlite3_context *pContext, + int argc, sqlite3_value **argv){ + fulltext_cursor *pCursor; + if( argc>1 ){ + sqlite3_result_error(pContext, "excess arguments to optimize()",-1); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to optimize",-1); + }else{ + fulltext_vtab *v; + int i, rc, iMaxLevel; + OptLeavesReader *readers; + int nReaders; + LeafWriter writer; + sqlite3_stmt *s; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* Flush any buffered updates before optimizing. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) goto err; + + rc = segdir_count(v, &nReaders, &iMaxLevel); + if( rc!=SQLITE_OK ) goto err; + if( nReaders==0 || nReaders==1 ){ + sqlite3_result_text(pContext, "Index already optimal", -1, + SQLITE_STATIC); + return; + } + + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) goto err; + + readers = sqlite3_malloc(nReaders*sizeof(readers[0])); + if( readers==NULL ) goto err; + + /* Note that there will already be a segment at this position + ** until we call segdir_delete() on iMaxLevel. + */ + leafWriterInit(iMaxLevel, 0, &writer); + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i 0 ){ + leavesReaderDestroy(&readers[i].reader); + } + sqlite3_free(readers); + + /* If we've successfully gotten to here, delete the old segments + ** and flush the interior structure of the new segment. + */ + if( rc==SQLITE_OK ){ + for( i=0; i<=iMaxLevel; i++ ){ + rc = segdir_delete(v, i); + if( rc!=SQLITE_OK ) break; + } + + if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); + } + + leafWriterDestroy(&writer); + + if( rc!=SQLITE_OK ) goto err; + + sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); + return; + + /* TODO(shess): Error-handling needs to be improved along the + ** lines of the dump_ functions. + */ + err: + { + char buf[512]; + sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", + sqlite3_errmsg(sqlite3_context_db_handle(pContext))); + sqlite3_result_error(pContext, buf, -1); + } + } +} + +#ifdef SQLITE_TEST +/* Generate an error of the form ": ". If msg is NULL, +** pull the error from the context's db handle. +*/ +static void generateError(sqlite3_context *pContext, + const char *prefix, const char *msg){ + char buf[512]; + if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); + sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); + sqlite3_result_error(pContext, buf, -1); +} + +/* Helper function to collect the set of terms in the segment into +** pTerms. The segment is defined by the leaf nodes between +** iStartBlockid and iEndBlockid, inclusive, or by the contents of +** pRootData if iStartBlockid is 0 (in which case the entire segment +** fit in a leaf). +*/ +static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, + fts3Hash *pTerms){ + const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); + const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + const int nRootData = sqlite3_column_bytes(s, 2); + LeavesReader reader; + int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, + pRootData, nRootData, &reader); + if( rc!=SQLITE_OK ) return rc; + + while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ + const char *pTerm = leavesReaderTerm(&reader); + const int nTerm = leavesReaderTermBytes(&reader); + void *oldValue = sqlite3Fts3HashFind(pTerms, pTerm, nTerm); + void *newValue = (void *)((char *)oldValue+1); + + /* From the comment before sqlite3Fts3HashInsert in fts3_hash.c, + ** the data value passed is returned in case of malloc failure. + */ + if( newValue==sqlite3Fts3HashInsert(pTerms, pTerm, nTerm, newValue) ){ + rc = SQLITE_NOMEM; + }else{ + rc = leavesReaderStep(v, &reader); + } + } + + leavesReaderDestroy(&reader); + return rc; +} + +/* Helper function to build the result string for dump_terms(). */ +static int generateTermsResult(sqlite3_context *pContext, fts3Hash *pTerms){ + int iTerm, nTerms, nResultBytes, iByte; + char *result; + TermData *pData; + fts3HashElem *e; + + /* Iterate pTerms to generate an array of terms in pData for + ** sorting. + */ + nTerms = fts3HashCount(pTerms); + assert( nTerms>0 ); + pData = sqlite3_malloc(nTerms*sizeof(TermData)); + if( pData==NULL ) return SQLITE_NOMEM; + + nResultBytes = 0; + for(iTerm = 0, e = fts3HashFirst(pTerms); e; iTerm++, e = fts3HashNext(e)){ + nResultBytes += fts3HashKeysize(e)+1; /* Term plus trailing space */ + assert( iTerm0 ); /* nTerms>0, nResultsBytes must be, too. */ + result = sqlite3_malloc(nResultBytes); + if( result==NULL ){ + sqlite3_free(pData); + return SQLITE_NOMEM; + } + + if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); + + /* Read the terms in order to build the result. */ + iByte = 0; + for(iTerm=0; iTerm0 ){ + rc = generateTermsResult(pContext, &terms); + if( rc==SQLITE_NOMEM ){ + generateError(pContext, "dump_terms", "out of memory"); + }else{ + assert( rc==SQLITE_OK ); + } + }else if( argc==3 ){ + /* The specific segment asked for could not be found. */ + generateError(pContext, "dump_terms", "segment not found"); + }else{ + /* No segments found. */ + /* TODO(shess): It should be impossible to reach this. This + ** case can only happen for an empty table, in which case + ** SQLite has no rows to call this function on. + */ + sqlite3_result_null(pContext); + } + } + sqlite3Fts3HashClear(&terms); + } +} + +/* Expand the DL_DEFAULT doclist in pData into a text result in +** pContext. +*/ +static void createDoclistResult(sqlite3_context *pContext, + const char *pData, int nData){ + DataBuffer dump; + DLReader dlReader; + + assert( pData!=NULL && nData>0 ); + + dataBufferInit(&dump, 0); + dlrInit(&dlReader, DL_DEFAULT, pData, nData); + for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ + char buf[256]; + PLReader plReader; + + plrInit(&plReader, &dlReader); + if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ + sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); + dataBufferAppend(&dump, buf, strlen(buf)); + }else{ + int iColumn = plrColumn(&plReader); + + sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", + dlrDocid(&dlReader), iColumn); + dataBufferAppend(&dump, buf, strlen(buf)); + + for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ + if( plrColumn(&plReader)!=iColumn ){ + iColumn = plrColumn(&plReader); + sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, buf, strlen(buf)); + } + if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", + plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + }else if( DL_DEFAULT==DL_POSITIONS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); + }else{ + assert( NULL=="Unhandled DL_DEFAULT value"); + } + dataBufferAppend(&dump, buf, strlen(buf)); + } + plrDestroy(&plReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, "]] ", 3); + } + } + dlrDestroy(&dlReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dump.pData[dump.nData] = '\0'; + assert( dump.nData>0 ); + + /* Passes ownership of dump's buffer to pContext. */ + sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); + dump.pData = NULL; + dump.nData = dump.nCapacity = 0; +} + +/* Implements dump_doclist() for use in inspecting the fts3 index from +** tests. TEXT result containing a string representation of the +** doclist for the indicated term. dump_doclist(t, term, level, idx) +** dumps the doclist for term from the segment specified by level, idx +** (in %_segdir), while dump_doclist(t, term) dumps the logical +** doclist for the term across all segments. The per-segment doclist +** can contain deletions, while the full-index doclist will not +** (deletions are omitted). +** +** Result formats differ with the setting of DL_DEFAULTS. Examples: +** +** DL_DOCIDS: [1] [3] [7] +** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] +** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] +** +** In each case the number after the outer '[' is the docid. In the +** latter two cases, the number before the inner '[' is the column +** associated with the values within. For DL_POSITIONS the numbers +** within are the positions, for DL_POSITIONS_OFFSETS they are the +** position, the start offset, and the end offset. +*/ +static void dumpDoclistFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=2 && argc!=4 ){ + generateError(pContext, "dump_doclist", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_doclist", "illegal first argument"); + }else if( sqlite3_value_text(argv[1])==NULL || + sqlite3_value_text(argv[1])[0]=='\0' ){ + generateError(pContext, "dump_doclist", "empty second argument"); + }else{ + const char *pTerm = (const char *)sqlite3_value_text(argv[1]); + const int nTerm = strlen(pTerm); + fulltext_vtab *v; + int rc; + DataBuffer doclist; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + dataBufferInit(&doclist, 0); + + /* termSelect() yields the same logical doclist that queries are + ** run against. + */ + if( argc==2 ){ + rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); + }else{ + sqlite3_stmt *s = NULL; + + /* Get our specific segment's information. */ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); + } + } + + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + if( rc==SQLITE_DONE ){ + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "segment not found"); + return; + } + + /* Found a segment, load it into doclist. */ + if( rc==SQLITE_ROW ){ + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + + /* loadSegment() is used by termSelect() to load each + ** segment's data. + */ + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, + &doclist); + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + /* Should not have more than one matching segment. */ + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "invalid segdir"); + return; + } + rc = SQLITE_OK; + } + } + } + + sqlite3_reset(s); + } + + if( rc==SQLITE_OK ){ + if( doclist.nData>0 ){ + createDoclistResult(pContext, doclist.pData, doclist.nData); + }else{ + /* TODO(shess): This can happen if the term is not present, or + ** if all instances of the term have been deleted and this is + ** an all-index dump. It may be interesting to distinguish + ** these cases. + */ + sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); + } + }else if( rc==SQLITE_NOMEM ){ + /* Handle out-of-memory cases specially because if they are + ** generated in fts3 code they may not be reflected in the db + ** handle. + */ + /* TODO(shess): Handle this more comprehensively. + ** sqlite3ErrStr() has what I need, but is internal. + */ + generateError(pContext, "dump_doclist", "out of memory"); + }else{ + generateError(pContext, "dump_doclist", NULL); + } + + dataBufferDestroy(&doclist); + } +} +#endif + +/* +** This routine implements the xFindFunction method for the FTS3 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + }else if( strcmp(zName,"optimize")==0 ){ + *pxFunc = optimizeFunc; + return 1; +#ifdef SQLITE_TEST + /* NOTE(shess): These functions are present only for testing + ** purposes. No particular effort is made to optimize their + ** execution or how they build their results. + */ + }else if( strcmp(zName,"dump_terms")==0 ){ + /* fprintf(stderr, "Found dump_terms\n"); */ + *pxFunc = dumpTermsFunc; + return 1; + }else if( strcmp(zName,"dump_doclist")==0 ){ + /* fprintf(stderr, "Found dump_doclist\n"); */ + *pxFunc = dumpDoclistFunc; + return 1; +#endif + } + return 0; +} + +/* +** Rename an fts3 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fts3Module = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ fulltextBegin, + /* xSync */ fulltextSync, + /* xCommit */ fulltextCommit, + /* xRollback */ fulltextRollback, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +static void hashDestroy(void *p){ + fts3Hash *pHash = (fts3Hash *)p; + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts3 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts3_tokenizer1.c and fts3_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. +** +** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed +** to by the argument to point a the "simple" tokenizer implementation. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. +*/ +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +int sqlite3Fts3InitHashTable(sqlite3 *, fts3Hash *, const char *); + +/* +** Initialise the fts3 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts3 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +int sqlite3Fts3Init(sqlite3 *db){ + int rc = SQLITE_OK; + fts3Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; + const sqlite3_tokenizer_module *pIcu = 0; + + sqlite3Fts3SimpleTokenizerModule(&pSimple); + sqlite3Fts3PorterTokenizerModule(&pPorter); +#ifdef SQLITE_ENABLE_ICU + sqlite3Fts3IcuTokenizerModule(&pIcu); +#endif + + /* Allocate and initialise the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(fts3Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) + ){ + rc = SQLITE_NOMEM; + } + } + +#ifdef SQLITE_TEST + sqlite3Fts3ExprInitTestInterface(db); +#endif + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc + && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) +#ifdef SQLITE_TEST + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) +#endif + ){ + return sqlite3_create_module_v2( + db, "fts3", &fts3Module, (void *)pHash, hashDestroy + ); + } + + /* An error has occurred. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts3Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_expr.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_expr.c --- sqlite3-3.4.2/ext/fts3/fts3_expr.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_expr.c 2009-03-12 15:43:48.000000000 +0000 @@ -0,0 +1,894 @@ +/* +** 2008 Nov 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This module contains code that implements a parser for fts3 query strings +** (the right-hand argument to the MATCH operator). Because the supported +** syntax is relatively simple, the whole tokenizer/parser system is +** hand-coded. The public interface to this module is declared in source +** code file "fts3_expr.h". +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* +** By default, this module parses the legacy syntax that has been +** traditionally used by fts3. Or, if SQLITE_ENABLE_FTS3_PARENTHESIS +** is defined, then it uses the new syntax. The differences between +** the new and the old syntaxes are: +** +** a) The new syntax supports parenthesis. The old does not. +** +** b) The new syntax supports the AND and NOT operators. The old does not. +** +** c) The old syntax supports the "-" token qualifier. This is not +** supported by the new syntax (it is replaced by the NOT operator). +** +** d) When using the old syntax, the OR operator has a greater precedence +** than an implicit AND. When using the new, both implicity and explicit +** AND operators have a higher precedence than OR. +** +** If compiled with SQLITE_TEST defined, then this module exports the +** symbol "int sqlite3_fts3_enable_parentheses". Setting this variable +** to zero causes the module to use the old syntax. If it is set to +** non-zero the new syntax is activated. This is so both syntaxes can +** be tested using a single build of testfixture. +*/ +#ifdef SQLITE_TEST +int sqlite3_fts3_enable_parentheses = 0; +#else +# ifdef SQLITE_ENABLE_FTS3_PARENTHESIS +# define sqlite3_fts3_enable_parentheses 1 +# else +# define sqlite3_fts3_enable_parentheses 0 +# endif +#endif + +/* +** Default span for NEAR operators. +*/ +#define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10 + +#include "fts3_expr.h" +#include "sqlite3.h" +#include +#include +#include + +typedef struct ParseContext ParseContext; +struct ParseContext { + sqlite3_tokenizer *pTokenizer; /* Tokenizer module */ + const char **azCol; /* Array of column names for fts3 table */ + int nCol; /* Number of entries in azCol[] */ + int iDefaultCol; /* Default column to query */ + sqlite3_context *pCtx; /* Write error message here */ + int nNest; /* Number of nested brackets */ +}; + +/* +** This function is equivalent to the standard isspace() function. +** +** The standard isspace() can be awkward to use safely, because although it +** is defined to accept an argument of type int, its behaviour when passed +** an integer that falls outside of the range of the unsigned char type +** is undefined (and sometimes, "undefined" means segfault). This wrapper +** is defined to accept an argument of type char, and always returns 0 for +** any values that fall outside of the range of the unsigned char type (i.e. +** negative values). +*/ +static int fts3isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} + +/* +** Extract the next token from buffer z (length n) using the tokenizer +** and other information (column names etc.) in pParse. Create an Fts3Expr +** structure of type FTSQUERY_PHRASE containing a phrase consisting of this +** single token and set *ppExpr to point to it. If the end of the buffer is +** reached before a token is found, set *ppExpr to zero. It is the +** responsibility of the caller to eventually deallocate the allocated +** Fts3Expr structure (if any) by passing it to sqlite3_free(). +** +** Return SQLITE_OK if successful, or SQLITE_NOMEM if a memory allocation +** fails. +*/ +static int getNextToken( + ParseContext *pParse, /* fts3 query parse context */ + int iCol, /* Value for Fts3Phrase.iColumn */ + const char *z, int n, /* Input string */ + Fts3Expr **ppExpr, /* OUT: expression */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + int rc; + sqlite3_tokenizer_cursor *pCursor; + Fts3Expr *pRet = 0; + int nConsumed = 0; + + rc = pModule->xOpen(pTokenizer, z, n, &pCursor); + if( rc==SQLITE_OK ){ + const char *zToken; + int nToken, iStart, iEnd, iPosition; + int nByte; /* total space to allocate */ + + pCursor->pTokenizer = pTokenizer; + rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); + + if( rc==SQLITE_OK ){ + nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + pRet = (Fts3Expr *)sqlite3_malloc(nByte); + if( !pRet ){ + rc = SQLITE_NOMEM; + }else{ + memset(pRet, 0, nByte); + pRet->eType = FTSQUERY_PHRASE; + pRet->pPhrase = (Fts3Phrase *)&pRet[1]; + pRet->pPhrase->nToken = 1; + pRet->pPhrase->iColumn = iCol; + pRet->pPhrase->aToken[0].n = nToken; + pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); + + if( iEndpPhrase->aToken[0].isPrefix = 1; + iEnd++; + } + if( !sqlite3_fts3_enable_parentheses && iStart>0 && z[iStart-1]=='-' ){ + pRet->pPhrase->isNot = 1; + } + } + nConsumed = iEnd; + } + + pModule->xClose(pCursor); + } + + *pnConsumed = nConsumed; + *ppExpr = pRet; + return rc; +} + + +/* +** Enlarge a memory allocation. If an out-of-memory allocation occurs, +** then free the old allocation. +*/ +void *fts3ReallocOrFree(void *pOrig, int nNew){ + void *pRet = sqlite3_realloc(pOrig, nNew); + if( !pRet ){ + sqlite3_free(pOrig); + } + return pRet; +} + +/* +** Buffer zInput, length nInput, contains the contents of a quoted string +** that appeared as part of an fts3 query expression. Neither quote character +** is included in the buffer. This function attempts to tokenize the entire +** input buffer and create an Fts3Expr structure of type FTSQUERY_PHRASE +** containing the results. +** +** If successful, SQLITE_OK is returned and *ppExpr set to point at the +** allocated Fts3Expr structure. Otherwise, either SQLITE_NOMEM (out of memory +** error) or SQLITE_ERROR (tokenization error) is returned and *ppExpr set +** to 0. +*/ +static int getNextString( + ParseContext *pParse, /* fts3 query parse context */ + const char *zInput, int nInput, /* Input string */ + Fts3Expr **ppExpr /* OUT: expression */ +){ + sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + int rc; + Fts3Expr *p = 0; + sqlite3_tokenizer_cursor *pCursor = 0; + char *zTemp = 0; + int nTemp = 0; + + rc = pModule->xOpen(pTokenizer, zInput, nInput, &pCursor); + if( rc==SQLITE_OK ){ + int ii; + pCursor->pTokenizer = pTokenizer; + for(ii=0; rc==SQLITE_OK; ii++){ + const char *zToken; + int nToken, iBegin, iEnd, iPos; + rc = pModule->xNext(pCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc==SQLITE_OK ){ + int nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + p = fts3ReallocOrFree(p, nByte+ii*sizeof(struct PhraseToken)); + zTemp = fts3ReallocOrFree(zTemp, nTemp + nToken); + if( !p || !zTemp ){ + goto no_mem; + } + if( ii==0 ){ + memset(p, 0, nByte); + p->pPhrase = (Fts3Phrase *)&p[1]; + } + p->pPhrase = (Fts3Phrase *)&p[1]; + p->pPhrase->nToken = ii+1; + p->pPhrase->aToken[ii].n = nToken; + memcpy(&zTemp[nTemp], zToken, nToken); + nTemp += nToken; + if( iEndpPhrase->aToken[ii].isPrefix = 1; + }else{ + p->pPhrase->aToken[ii].isPrefix = 0; + } + } + } + + pModule->xClose(pCursor); + pCursor = 0; + } + + if( rc==SQLITE_DONE ){ + int jj; + char *zNew; + int nNew = 0; + int nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + nByte += (p?(p->pPhrase->nToken-1):0) * sizeof(struct PhraseToken); + p = fts3ReallocOrFree(p, nByte + nTemp); + if( !p ){ + goto no_mem; + } + if( zTemp ){ + zNew = &(((char *)p)[nByte]); + memcpy(zNew, zTemp, nTemp); + }else{ + memset(p, 0, nByte+nTemp); + } + p->pPhrase = (Fts3Phrase *)&p[1]; + for(jj=0; jjpPhrase->nToken; jj++){ + p->pPhrase->aToken[jj].z = &zNew[nNew]; + nNew += p->pPhrase->aToken[jj].n; + } + sqlite3_free(zTemp); + p->eType = FTSQUERY_PHRASE; + p->pPhrase->iColumn = pParse->iDefaultCol; + rc = SQLITE_OK; + } + + *ppExpr = p; + return rc; +no_mem: + + if( pCursor ){ + pModule->xClose(pCursor); + } + sqlite3_free(zTemp); + sqlite3_free(p); + *ppExpr = 0; + return SQLITE_NOMEM; +} + +/* +** Function getNextNode(), which is called by fts3ExprParse(), may itself +** call fts3ExprParse(). So this forward declaration is required. +*/ +static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); + +/* +** The output variable *ppExpr is populated with an allocated Fts3Expr +** structure, or set to 0 if the end of the input buffer is reached. +** +** Returns an SQLite error code. SQLITE_OK if everything works, SQLITE_NOMEM +** if a malloc failure occurs, or SQLITE_ERROR if a parse error is encountered. +** If SQLITE_ERROR is returned, pContext is populated with an error message. +*/ +static int getNextNode( + ParseContext *pParse, /* fts3 query parse context */ + const char *z, int n, /* Input string */ + Fts3Expr **ppExpr, /* OUT: expression */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + static const struct Fts3Keyword { + char z[4]; /* Keyword text */ + unsigned char n; /* Length of the keyword */ + unsigned char parenOnly; /* Only valid in paren mode */ + unsigned char eType; /* Keyword code */ + } aKeyword[] = { + { "OR" , 2, 0, FTSQUERY_OR }, + { "AND", 3, 1, FTSQUERY_AND }, + { "NOT", 3, 1, FTSQUERY_NOT }, + { "NEAR", 4, 0, FTSQUERY_NEAR } + }; + int ii; + int iCol; + int iColLen; + int rc; + Fts3Expr *pRet = 0; + + const char *zInput = z; + int nInput = n; + + /* Skip over any whitespace before checking for a keyword, an open or + ** close bracket, or a quoted string. + */ + while( nInput>0 && fts3isspace(*zInput) ){ + nInput--; + zInput++; + } + if( nInput==0 ){ + return SQLITE_DONE; + } + + /* See if we are dealing with a keyword. */ + for(ii=0; ii<(int)(sizeof(aKeyword)/sizeof(struct Fts3Keyword)); ii++){ + const struct Fts3Keyword *pKey = &aKeyword[ii]; + + if( (pKey->parenOnly & ~sqlite3_fts3_enable_parentheses)!=0 ){ + continue; + } + + if( nInput>=pKey->n && 0==memcmp(zInput, pKey->z, pKey->n) ){ + int nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM; + int nKey = pKey->n; + char cNext; + + /* If this is a "NEAR" keyword, check for an explicit nearness. */ + if( pKey->eType==FTSQUERY_NEAR ){ + assert( nKey==4 ); + if( zInput[4]=='/' && zInput[5]>='0' && zInput[5]<='9' ){ + nNear = 0; + for(nKey=5; zInput[nKey]>='0' && zInput[nKey]<='9'; nKey++){ + nNear = nNear * 10 + (zInput[nKey] - '0'); + } + } + } + + /* At this point this is probably a keyword. But for that to be true, + ** the next byte must contain either whitespace, an open or close + ** parenthesis, a quote character, or EOF. + */ + cNext = zInput[nKey]; + if( fts3isspace(cNext) + || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 + ){ + pRet = (Fts3Expr *)sqlite3_malloc(sizeof(Fts3Expr)); + memset(pRet, 0, sizeof(Fts3Expr)); + pRet->eType = pKey->eType; + pRet->nNear = nNear; + *ppExpr = pRet; + *pnConsumed = (zInput - z) + nKey; + return SQLITE_OK; + } + + /* Turns out that wasn't a keyword after all. This happens if the + ** user has supplied a token such as "ORacle". Continue. + */ + } + } + + /* Check for an open bracket. */ + if( sqlite3_fts3_enable_parentheses ){ + if( *zInput=='(' ){ + int nConsumed; + int rc; + pParse->nNest++; + rc = fts3ExprParse(pParse, &zInput[1], nInput-1, ppExpr, &nConsumed); + if( rc==SQLITE_OK && !*ppExpr ){ + rc = SQLITE_DONE; + } + *pnConsumed = (zInput - z) + 1 + nConsumed; + return rc; + } + + /* Check for a close bracket. */ + if( *zInput==')' ){ + pParse->nNest--; + *pnConsumed = (zInput - z) + 1; + return SQLITE_DONE; + } + } + + /* See if we are dealing with a quoted phrase. If this is the case, then + ** search for the closing quote and pass the whole string to getNextString() + ** for processing. This is easy to do, as fts3 has no syntax for escaping + ** a quote character embedded in a string. + */ + if( *zInput=='"' ){ + for(ii=1; iiiDefaultCol; + iColLen = 0; + for(ii=0; iinCol; ii++){ + const char *zStr = pParse->azCol[ii]; + int nStr = strlen(zStr); + if( nInput>nStr && zInput[nStr]==':' && memcmp(zStr, zInput, nStr)==0 ){ + iCol = ii; + iColLen = ((zInput - z) + nStr + 1); + break; + } + } + rc = getNextToken(pParse, iCol, &z[iColLen], n-iColLen, ppExpr, pnConsumed); + *pnConsumed += iColLen; + return rc; +} + +/* +** The argument is an Fts3Expr structure for a binary operator (any type +** except an FTSQUERY_PHRASE). Return an integer value representing the +** precedence of the operator. Lower values have a higher precedence (i.e. +** group more tightly). For example, in the C language, the == operator +** groups more tightly than ||, and would therefore have a higher precedence. +** +** When using the new fts3 query syntax (when SQLITE_ENABLE_FTS3_PARENTHESIS +** is defined), the order of the operators in precedence from highest to +** lowest is: +** +** NEAR +** NOT +** AND (including implicit ANDs) +** OR +** +** Note that when using the old query syntax, the OR operator has a higher +** precedence than the AND operator. +*/ +static int opPrecedence(Fts3Expr *p){ + assert( p->eType!=FTSQUERY_PHRASE ); + if( sqlite3_fts3_enable_parentheses ){ + return p->eType; + }else if( p->eType==FTSQUERY_NEAR ){ + return 1; + }else if( p->eType==FTSQUERY_OR ){ + return 2; + } + assert( p->eType==FTSQUERY_AND ); + return 3; +} + +/* +** Argument ppHead contains a pointer to the current head of a query +** expression tree being parsed. pPrev is the expression node most recently +** inserted into the tree. This function adds pNew, which is always a binary +** operator node, into the expression tree based on the relative precedence +** of pNew and the existing nodes of the tree. This may result in the head +** of the tree changing, in which case *ppHead is set to the new root node. +*/ +static void insertBinaryOperator( + Fts3Expr **ppHead, /* Pointer to the root node of a tree */ + Fts3Expr *pPrev, /* Node most recently inserted into the tree */ + Fts3Expr *pNew /* New binary node to insert into expression tree */ +){ + Fts3Expr *pSplit = pPrev; + while( pSplit->pParent && opPrecedence(pSplit->pParent)<=opPrecedence(pNew) ){ + pSplit = pSplit->pParent; + } + + if( pSplit->pParent ){ + assert( pSplit->pParent->pRight==pSplit ); + pSplit->pParent->pRight = pNew; + pNew->pParent = pSplit->pParent; + }else{ + *ppHead = pNew; + } + pNew->pLeft = pSplit; + pSplit->pParent = pNew; +} + +/* +** Parse the fts3 query expression found in buffer z, length n. This function +** returns either when the end of the buffer is reached or an unmatched +** closing bracket - ')' - is encountered. +** +** If successful, SQLITE_OK is returned, *ppExpr is set to point to the +** parsed form of the expression and *pnConsumed is set to the number of +** bytes read from buffer z. Otherwise, *ppExpr is set to 0 and SQLITE_NOMEM +** (out of memory error) or SQLITE_ERROR (parse error) is returned. +*/ +static int fts3ExprParse( + ParseContext *pParse, /* fts3 query parse context */ + const char *z, int n, /* Text of MATCH query */ + Fts3Expr **ppExpr, /* OUT: Parsed query structure */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + Fts3Expr *pRet = 0; + Fts3Expr *pPrev = 0; + Fts3Expr *pNotBranch = 0; /* Only used in legacy parse mode */ + int nIn = n; + const char *zIn = z; + int rc = SQLITE_OK; + int isRequirePhrase = 1; + + while( rc==SQLITE_OK ){ + Fts3Expr *p = 0; + int nByte = 0; + rc = getNextNode(pParse, zIn, nIn, &p, &nByte); + if( rc==SQLITE_OK ){ + int isPhrase; + + if( !sqlite3_fts3_enable_parentheses + && p->eType==FTSQUERY_PHRASE && p->pPhrase->isNot + ){ + /* Create an implicit NOT operator. */ + Fts3Expr *pNot = sqlite3_malloc(sizeof(Fts3Expr)); + if( !pNot ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_NOMEM; + goto exprparse_out; + } + memset(pNot, 0, sizeof(Fts3Expr)); + pNot->eType = FTSQUERY_NOT; + pNot->pRight = p; + if( pNotBranch ){ + pNotBranch->pLeft = p; + pNot->pRight = pNotBranch; + } + pNotBranch = pNot; + }else{ + int eType = p->eType; + assert( eType!=FTSQUERY_PHRASE || !p->pPhrase->isNot ); + isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft); + + /* The isRequirePhrase variable is set to true if a phrase or + ** an expression contained in parenthesis is required. If a + ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** isRequirePhrase is set, this is a syntax error. + */ + if( !isPhrase && isRequirePhrase ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_ERROR; + goto exprparse_out; + } + + if( isPhrase && !isRequirePhrase ){ + /* Insert an implicit AND operator. */ + Fts3Expr *pAnd; + assert( pRet && pPrev ); + pAnd = sqlite3_malloc(sizeof(Fts3Expr)); + if( !pAnd ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_NOMEM; + goto exprparse_out; + } + memset(pAnd, 0, sizeof(Fts3Expr)); + pAnd->eType = FTSQUERY_AND; + insertBinaryOperator(&pRet, pPrev, pAnd); + pPrev = pAnd; + } + + /* This test catches attempts to make either operand of a NEAR + ** operator something other than a phrase. For example, either of + ** the following: + ** + ** (bracketed expression) NEAR phrase + ** phrase NEAR (bracketed expression) + ** + ** Return an error in either case. + */ + if( pPrev && ( + (eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE) + || (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR) + )){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_ERROR; + goto exprparse_out; + } + + if( isPhrase ){ + if( pRet ){ + assert( pPrev && pPrev->pLeft && pPrev->pRight==0 ); + pPrev->pRight = p; + p->pParent = pPrev; + }else{ + pRet = p; + } + }else{ + insertBinaryOperator(&pRet, pPrev, p); + } + isRequirePhrase = !isPhrase; + } + assert( nByte>0 ); + } + assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) ); + nIn -= nByte; + zIn += nByte; + pPrev = p; + } + + if( rc==SQLITE_DONE && pRet && isRequirePhrase ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + if( !sqlite3_fts3_enable_parentheses && pNotBranch ){ + if( !pRet ){ + rc = SQLITE_ERROR; + }else{ + pNotBranch->pLeft = pRet; + pRet = pNotBranch; + } + } + } + *pnConsumed = n - nIn; + +exprparse_out: + if( rc!=SQLITE_OK ){ + sqlite3Fts3ExprFree(pRet); + sqlite3Fts3ExprFree(pNotBranch); + pRet = 0; + } + *ppExpr = pRet; + return rc; +} + +/* +** Parameters z and n contain a pointer to and length of a buffer containing +** an fts3 query expression, respectively. This function attempts to parse the +** query expression and create a tree of Fts3Expr structures representing the +** parsed expression. If successful, *ppExpr is set to point to the head +** of the parsed expression tree and SQLITE_OK is returned. If an error +** occurs, either SQLITE_NOMEM (out-of-memory error) or SQLITE_ERROR (parse +** error) is returned and *ppExpr is set to 0. +** +** If parameter n is a negative number, then z is assumed to point to a +** nul-terminated string and the length is determined using strlen(). +** +** The first parameter, pTokenizer, is passed the fts3 tokenizer module to +** use to normalize query tokens while parsing the expression. The azCol[] +** array, which is assumed to contain nCol entries, should contain the names +** of each column in the target fts3 table, in order from left to right. +** Column names must be nul-terminated strings. +** +** The iDefaultCol parameter should be passed the index of the table column +** that appears on the left-hand-side of the MATCH operator (the default +** column to match against for tokens for which a column name is not explicitly +** specified as part of the query string), or -1 if tokens may by default +** match any table column. +*/ +int sqlite3Fts3ExprParse( + sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ + char **azCol, /* Array of column names for fts3 table */ + int nCol, /* Number of entries in azCol[] */ + int iDefaultCol, /* Default column to query */ + const char *z, int n, /* Text of MATCH query */ + Fts3Expr **ppExpr /* OUT: Parsed query structure */ +){ + int nParsed; + int rc; + ParseContext sParse; + sParse.pTokenizer = pTokenizer; + sParse.azCol = (const char **)azCol; + sParse.nCol = nCol; + sParse.iDefaultCol = iDefaultCol; + sParse.nNest = 0; + if( z==0 ){ + *ppExpr = 0; + return SQLITE_OK; + } + if( n<0 ){ + n = strlen(z); + } + rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed); + + /* Check for mismatched parenthesis */ + if( rc==SQLITE_OK && sParse.nNest ){ + rc = SQLITE_ERROR; + sqlite3Fts3ExprFree(*ppExpr); + *ppExpr = 0; + } + + return rc; +} + +/* +** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse(). +*/ +void sqlite3Fts3ExprFree(Fts3Expr *p){ + if( p ){ + sqlite3Fts3ExprFree(p->pLeft); + sqlite3Fts3ExprFree(p->pRight); + sqlite3_free(p); + } +} + +/**************************************************************************** +***************************************************************************** +** Everything after this point is just test code. +*/ + +#ifdef SQLITE_TEST + +#include + +/* +** Function to query the hash-table of tokenizers (see README.tokenizers). +*/ +static int queryTestTokenizer( + sqlite3 *db, + const char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +/* +** This function is part of the test interface for the query parser. It +** writes a text representation of the query expression pExpr into the +** buffer pointed to by argument zBuf. It is assumed that zBuf is large +** enough to store the required text representation. +*/ +static void exprToString(Fts3Expr *pExpr, char *zBuf){ + switch( pExpr->eType ){ + case FTSQUERY_PHRASE: { + Fts3Phrase *pPhrase = pExpr->pPhrase; + int i; + zBuf += sprintf(zBuf, "PHRASE %d %d", pPhrase->iColumn, pPhrase->isNot); + for(i=0; inToken; i++){ + zBuf += sprintf(zBuf," %.*s",pPhrase->aToken[i].n,pPhrase->aToken[i].z); + zBuf += sprintf(zBuf,"%s", (pPhrase->aToken[i].isPrefix?"+":"")); + } + return; + } + + case FTSQUERY_NEAR: + zBuf += sprintf(zBuf, "NEAR/%d ", pExpr->nNear); + break; + case FTSQUERY_NOT: + zBuf += sprintf(zBuf, "NOT "); + break; + case FTSQUERY_AND: + zBuf += sprintf(zBuf, "AND "); + break; + case FTSQUERY_OR: + zBuf += sprintf(zBuf, "OR "); + break; + } + + zBuf += sprintf(zBuf, "{"); + exprToString(pExpr->pLeft, zBuf); + zBuf += strlen(zBuf); + zBuf += sprintf(zBuf, "} "); + + zBuf += sprintf(zBuf, "{"); + exprToString(pExpr->pRight, zBuf); + zBuf += strlen(zBuf); + zBuf += sprintf(zBuf, "}"); +} + +/* +** This is the implementation of a scalar SQL function used to test the +** expression parser. It should be called as follows: +** +** fts3_exprtest(, , , ...); +** +** The first argument, , is the name of the fts3 tokenizer used +** to parse the query expression (see README.tokenizers). The second argument +** is the query expression to parse. Each subsequent argument is the name +** of a column of the fts3 table that the query expression may refer to. +** For example: +** +** SELECT fts3_exprtest('simple', 'Bill col2:Bloggs', 'col1', 'col2'); +*/ +static void fts3ExprTest( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_tokenizer_module const *pModule = 0; + sqlite3_tokenizer *pTokenizer = 0; + int rc; + char **azCol = 0; + const char *zExpr; + int nExpr; + int nCol; + int ii; + Fts3Expr *pExpr; + sqlite3 *db = sqlite3_context_db_handle(context); + + if( argc<3 ){ + sqlite3_result_error(context, + "Usage: fts3_exprtest(tokenizer, expr, col1, ...", -1 + ); + return; + } + + rc = queryTestTokenizer(db, + (const char *)sqlite3_value_text(argv[0]), &pModule); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + }else if( !pModule ){ + sqlite3_result_error(context, "No such tokenizer module", -1); + goto exprtest_out; + } + + rc = pModule->xCreate(0, 0, &pTokenizer); + assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + } + pTokenizer->pModule = pModule; + + zExpr = (const char *)sqlite3_value_text(argv[1]); + nExpr = sqlite3_value_bytes(argv[1]); + nCol = argc-2; + azCol = (char **)sqlite3_malloc(nCol*sizeof(char *)); + if( !azCol ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + } + for(ii=0; iixDestroy(pTokenizer); + } + sqlite3_free(azCol); +} + +/* +** Register the query expression parser test function fts3_exprtest() +** with database connection db. +*/ +void sqlite3Fts3ExprInitTestInterface(sqlite3* db){ + sqlite3_create_function( + db, "fts3_exprtest", -1, SQLITE_UTF8, 0, fts3ExprTest, 0, 0 + ); +} + +#endif +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_expr.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_expr.h --- sqlite3-3.4.2/ext/fts3/fts3_expr.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_expr.h 2009-01-01 12:34:46.000000000 +0000 @@ -0,0 +1,96 @@ +/* +** 2008 Nov 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +*/ + +#include "fts3_tokenizer.h" +#include "sqlite3.h" + +/* +** The following describes the syntax supported by the fts3 MATCH +** operator in a similar format to that used by the lemon parser +** generator. This module does not use actually lemon, it uses a +** custom parser. +** +** query ::= andexpr (OR andexpr)*. +** +** andexpr ::= notexpr (AND? notexpr)*. +** +** notexpr ::= nearexpr (NOT nearexpr|-TOKEN)*. +** notexpr ::= LP query RP. +** +** nearexpr ::= phrase (NEAR distance_opt nearexpr)*. +** +** distance_opt ::= . +** distance_opt ::= / INTEGER. +** +** phrase ::= TOKEN. +** phrase ::= COLUMN:TOKEN. +** phrase ::= "TOKEN TOKEN TOKEN...". +*/ + +typedef struct Fts3Expr Fts3Expr; +typedef struct Fts3Phrase Fts3Phrase; + +/* +** A "phrase" is a sequence of one or more tokens that must match in +** sequence. A single token is the base case and the most common case. +** For a sequence of tokens contained in "...", nToken will be the number +** of tokens in the string. +*/ +struct Fts3Phrase { + int nToken; /* Number of tokens in the phrase */ + int iColumn; /* Index of column this phrase must match */ + int isNot; /* Phrase prefixed by unary not (-) operator */ + struct PhraseToken { + char *z; /* Text of the token */ + int n; /* Number of bytes in buffer pointed to by z */ + int isPrefix; /* True if token ends in with a "*" character */ + } aToken[1]; /* One entry for each token in the phrase */ +}; + +/* +** A tree of these objects forms the RHS of a MATCH operator. +*/ +struct Fts3Expr { + int eType; /* One of the FTSQUERY_XXX values defined below */ + int nNear; /* Valid if eType==FTSQUERY_NEAR */ + Fts3Expr *pParent; /* pParent->pLeft==this or pParent->pRight==this */ + Fts3Expr *pLeft; /* Left operand */ + Fts3Expr *pRight; /* Right operand */ + Fts3Phrase *pPhrase; /* Valid if eType==FTSQUERY_PHRASE */ +}; + +int sqlite3Fts3ExprParse(sqlite3_tokenizer *, char **, int, int, + const char *, int, Fts3Expr **); +void sqlite3Fts3ExprFree(Fts3Expr *); + +/* +** Candidate values for Fts3Query.eType. Note that the order of the first +** four values is in order of precedence when parsing expressions. For +** example, the following: +** +** "a OR b AND c NOT d NEAR e" +** +** is equivalent to: +** +** "a OR (b AND (c NOT (d NEAR e)))" +*/ +#define FTSQUERY_NEAR 1 +#define FTSQUERY_NOT 2 +#define FTSQUERY_AND 3 +#define FTSQUERY_OR 4 +#define FTSQUERY_PHRASE 5 + +#ifdef SQLITE_TEST +void sqlite3Fts3ExprInitTestInterface(sqlite3 *db); +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3.h --- sqlite3-3.4.2/ext/fts3/fts3.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3.h 2007-08-20 18:37:04.000000000 +0100 @@ -0,0 +1,26 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts3Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_hash.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_hash.c --- sqlite3-3.4.2/ext/fts3/fts3_hash.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_hash.c 2009-06-12 03:37:46.000000000 +0100 @@ -0,0 +1,373 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#include +#include +#include + +#include "sqlite3.h" +#include "fts3_hash.h" + +/* +** Malloc and Free functions +*/ +static void *fts3HashMalloc(int n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} +static void fts3HashFree(void *p){ + sqlite3_free(p); +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts3HashClear(fts3Hash *pH){ + fts3HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + fts3HashFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts3HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_STRING +*/ +static int fts3StrHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_BINARY +*/ +static int fts3BinHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "ftsHashFunction". The function takes a +** single parameter "keyClass". The return value of ftsHashFunction() +** is a pointer to another function. Specifically, the return value +** of ftsHashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*ftsHashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrHash; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrCompare; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinCompare; + } +} + +/* Link an element into the hash table +*/ +static void fts3HashInsertElement( + fts3Hash *pH, /* The complete hash table */ + struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ + fts3HashElem *pNew /* The element to be inserted */ +){ + fts3HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void fts3Rehash(fts3Hash *pH, int new_size){ + struct _fts3ht *new_ht; /* The new hash table */ + fts3HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); + if( new_ht==0 ) return; + fts3HashFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = ftsHashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + fts3HashInsertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts3HashElem *fts3FindElementByHash( + const fts3Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts3HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts3ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = ftsCompareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void fts3RemoveElementByHash( + fts3Hash *pH, /* The pH containing "elem" */ + fts3HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts3ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts3HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts3HashFind(const fts3Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts3HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts3HashInsert( + fts3Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts3HashElem *elem; /* Used to loop thru the element list */ + fts3HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = fts3FindElementByHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + fts3RemoveElementByHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + if( pH->htsize==0 ){ + fts3Rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + return data; + } + } + new_elem = (fts3HashElem*)fts3HashMalloc( sizeof(fts3HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = fts3HashMalloc( nKey ); + if( new_elem->pKey==0 ){ + fts3HashFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->count > pH->htsize ){ + fts3Rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + fts3HashInsertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_hash.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_hash.h --- sqlite3-3.4.2/ext/fts3/fts3_hash.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_hash.h 2007-09-20 13:53:28.000000000 +0100 @@ -0,0 +1,110 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS3_HASH_H_ +#define _FTS3_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts3Hash fts3Hash; +typedef struct fts3HashElem fts3HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts3Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts3HashElem *first; /* The first element of the array */ + int htsize; /* Number of buckets in the hash table */ + struct _fts3ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts3HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts3HashElem { + fts3HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS3_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. +*/ +#define FTS3_HASH_STRING 1 +#define FTS3_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash*, int keytype, int copyKey); +void *sqlite3Fts3HashInsert(fts3Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts3HashFind(const fts3Hash*, const void *pKey, int nKey); +void sqlite3Fts3HashClear(fts3Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts3HashInit sqlite3Fts3HashInit +#define fts3HashInsert sqlite3Fts3HashInsert +#define fts3HashFind sqlite3Fts3HashFind +#define fts3HashClear sqlite3Fts3HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts3Hash h; +** fts3HashElem *p; +** ... +** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ +** SomeStructure *pData = fts3HashData(p); +** // do something with pData +** } +*/ +#define fts3HashFirst(H) ((H)->first) +#define fts3HashNext(E) ((E)->next) +#define fts3HashData(E) ((E)->data) +#define fts3HashKey(E) ((E)->pKey) +#define fts3HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts3HashCount(H) ((H)->count) + +#endif /* _FTS3_HASH_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_icu.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_icu.c --- sqlite3-3.4.2/ext/fts3/fts3_icu.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_icu.c 2008-09-01 19:34:20.000000000 +0100 @@ -0,0 +1,260 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts3 based on the ICU library. +** +** $Id: fts3_icu.c,v 1.3 2008/09/01 18:34:20 danielk1977 Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) +#ifdef SQLITE_ENABLE_ICU + +#include +#include +#include "fts3_tokenizer.h" + +#include +#include +#include +#include + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + if( nInput<0 ){ + nInput = strlen(zInput); + } + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + nChar * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[nChar]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStartaChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +void sqlite3Fts3IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_porter.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_porter.c --- sqlite3-3.4.2/ext/fts3/fts3_porter.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_porter.c 2007-11-23 17:31:18.000000000 +0000 @@ -0,0 +1,642 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include +#include +#include +#include +#include + +#include "fts3_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + sqlite3_free(c->zToken); + sqlite3_free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffsetnInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = sqlite3_realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_tokenizer1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_tokenizer1.c --- sqlite3-3.4.2/ext/fts3/fts3_tokenizer1.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_tokenizer1.c 2007-11-23 17:31:18.000000000 +0000 @@ -0,0 +1,230 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include +#include +#include +#include +#include + +#include "fts3_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i=0x80 ){ + sqlite3_free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + sqlite3_free(c->pToken); + sqlite3_free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffsetnBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = sqlite3_realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; ipToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_tokenizer.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_tokenizer.c --- sqlite3-3.4.2/ext/fts3/fts3_tokenizer.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_tokenizer.c 2008-02-01 15:34:10.000000000 +0000 @@ -0,0 +1,371 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#include "sqlite3ext.h" +#ifndef SQLITE_CORE + SQLITE_EXTENSION_INIT1 +#endif + +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#include + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT (); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). +** +** If the argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string . If is not specified, then +** the string must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string (after the hash-table is updated, if applicable). +*/ +static void scalarFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (fts3Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + return; + } + }else{ + pPtr = sqlite3Fts3HashFind(pHash, zName, nName); + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +#ifdef SQLITE_TEST + +#include +#include + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two arguments: +** +** SELECT (, ); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') +** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the , three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *zArg = 0; + + const char *zToken; + int nToken; + int iStart; + int iEnd; + int iPos; + + Tcl_Obj *pRet; + + assert( argc==2 || argc==3 ); + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + if( argc==3 ){ + zArg = (const char *)sqlite3_value_text(argv[1]); + } + + pHash = (fts3Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + pCsr->pTokenizer = pTokenizer; + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts3_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts3_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts3_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + /* Test the query function */ + sqlite3Fts3SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialised to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** scalarFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +int sqlite3Fts3InitHashTable( + sqlite3 *db, + fts3Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + char *zTest = 0; + char *zTest2 = 0; + +#ifdef SQLITE_TEST + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( rc!=SQLITE_OK + || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) +#ifdef SQLITE_TEST + || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) +#endif + ); + + sqlite3_free(zTest); + sqlite3_free(zTest2); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/fts3_tokenizer.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/fts3_tokenizer.h --- sqlite3-3.4.2/ext/fts3/fts3_tokenizer.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/fts3_tokenizer.h 2009-06-12 03:37:46.000000000 +0100 @@ -0,0 +1,148 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS3_TOKENIZER_H_ +#define _FTS3_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts3 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts3 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts3 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts3( ... , tokenizer arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialised by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts3 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts3 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. *piStartOffset should be set to the index of the first + ** byte of the token in the input buffer. *piEndOffset should be set + ** to the index of the first byte just past the end of the token in + ** the input buffer. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +#endif /* _FTS3_TOKENIZER_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/mkfts3amal.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/mkfts3amal.tcl --- sqlite3-3.4.2/ext/fts3/mkfts3amal.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/mkfts3amal.tcl 2008-01-31 13:35:49.000000000 +0000 @@ -0,0 +1,115 @@ +#!/usr/bin/tclsh +# +# This script builds a single C code file holding all of FTS3 code. +# The name of the output file is fts3amal.c. To build this file, +# first do: +# +# make target_source +# +# The make target above moves all of the source code files into +# a subdirectory named "tsrc". (This script expects to find the files +# there and will not work if they are not found.) +# +# After the "tsrc" directory has been created and populated, run +# this script: +# +# tclsh mkfts3amal.tcl +# +# The amalgamated FTS3 code will be written into fts3amal.c +# + +# Open the output file and write a header comment at the beginning +# of the file. +# +set out [open fts3amal.c w] +set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] +puts $out [subst \ +{/****************************************************************************** +** This file is an amalgamation of separate C source files from the SQLite +** Full Text Search extension 2 (fts3). By combining all the individual C +** code files into this single large file, the entire code can be compiled +** as a one translation unit. This allows many compilers to do optimizations +** that would not be possible if the files were compiled separately. It also +** makes the code easier to import into other projects. +** +** This amalgamation was generated on $today. +*/}] + +# These are the header files used by FTS3. The first time any of these +# files are seen in a #include statement in the C code, include the complete +# text of the file in-line. The file only needs to be included once. +# +foreach hdr { + fts3.h + fts3_hash.h + fts3_tokenizer.h + sqlite3.h + sqlite3ext.h +} { + set available_hdr($hdr) 1 +} + +# 78 stars used for comment formatting. +set s78 \ +{*****************************************************************************} + +# Insert a comment into the code +# +proc section_comment {text} { + global out s78 + set n [string length $text] + set nstar [expr {60 - $n}] + set stars [string range $s78 0 $nstar] + puts $out "/************** $text $stars/" +} + +# Read the source file named $filename and write it into the +# sqlite3.c output file. If any #include statements are seen, +# process them approprately. +# +proc copy_file {filename} { + global seen_hdr available_hdr out + set tail [file tail $filename] + section_comment "Begin file $tail" + set in [open $filename r] + while {![eof $in]} { + set line [gets $in] + if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[info exists available_hdr($hdr)]} { + if {$available_hdr($hdr)} { + section_comment "Include $hdr in the middle of $tail" + copy_file tsrc/$hdr + section_comment "Continuing where we left off in $tail" + } + } elseif {![info exists seen_hdr($hdr)]} { + set seen_hdr($hdr) 1 + puts $out $line + } + } elseif {[regexp {^#ifdef __cplusplus} $line]} { + puts $out "#if 0" + } elseif {[regexp {^#line} $line]} { + # Skip #line directives. + } else { + puts $out $line + } + } + close $in + section_comment "End of $tail" +} + + +# Process the source files. Process files containing commonly +# used subroutines first in order to help the compiler find +# inlining opportunities. +# +foreach file { + fts3.c + fts3_hash.c + fts3_porter.c + fts3_tokenizer.c + fts3_tokenizer1.c +} { + copy_file tsrc/$file +} + +close $out diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/README.syntax /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/README.syntax --- sqlite3-3.4.2/ext/fts3/README.syntax 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/README.syntax 2008-12-31 19:27:53.000000000 +0000 @@ -0,0 +1,209 @@ + +1. OVERVIEW + + This README file describes the syntax of the arguments that may be passed to + the FTS3 MATCH operator used for full-text queries. For example, if table + "t1" is an Fts3 virtual table, the following SQL query: + + SELECT * FROM t1 WHERE MATCH + + may be used to retrieve all rows that match a specified for full-text query. + The text "" should be replaced by either the name of the fts3 table + (in this case "t1"), or by the name of one of the columns of the fts3 + table. should be replaced by an SQL expression that + computes to a string containing an Fts3 query. + + If the left-hand-side of the MATCH operator is set to the name of the + fts3 table, then by default the query may be matched against any column + of the table. If it is set to a column name, then by default the query + may only match the specified column. In both cases this may be overriden + as part of the query text (see sections 2 and 3 below). + + As of SQLite version 3.6.8, Fts3 supports two slightly different query + formats; the standard syntax, which is used by default, and the enhanced + query syntax which can be selected by compiling with the pre-processor + symbol SQLITE_ENABLE_FTS3_PARENTHESIS defined. + + -DSQLITE_ENABLE_FTS3_PARENTHESIS + +2. STANDARD QUERY SYNTAX + + When using the standard Fts3 query syntax, a query usually consists of a + list of terms (words) separated by white-space characters. To match a + query, a row (or column) of an Fts3 table must contain each of the specified + terms. For example, the following query: + + MATCH 'hello world' + + matches rows (or columns, if is the name of a column name) that + contain at least one instance of the token "hello", and at least one + instance of the token "world". Tokens may be grouped into phrases using + quotation marks. In this case, a matching row or column must contain each + of the tokens in the phrase in the order specified, with no intervening + tokens. For example, the query: + + MATCH '"hello world" joe" + + matches the first of the following two documents, but not the second or + third: + + "'Hello world', said Joe." + "One should always greet the world with a cheery hello, thought Joe." + "How many hello world programs could their be?" + + As well as grouping tokens together by phrase, the binary NEAR operator + may be used to search for rows that contain two or more specified tokens + or phrases within a specified proximity of each other. The NEAR operator + must always be specified in upper case. The word "near" in lower or mixed + case is treated as an ordinary token. For example, the following query: + + MATCH 'engineering NEAR consultancy' + + matches rows that contain both the "engineering" and "consultancy" tokens + in the same column with not more than 10 other words between them. It does + not matter which of the two terms occurs first in the document, only that + they be seperated by only 10 tokens or less. The user may also specify + a different required proximity by adding "/N" immediately after the NEAR + operator, where N is an integer. For example: + + MATCH 'engineering NEAR/5 consultancy' + + searches for a row containing an instance of each specified token seperated + by not more than 5 other tokens. More than one NEAR operator can be used + in as sequence. For example this query: + + MATCH 'reliable NEAR/2 engineering NEAR/5 consultancy' + + searches for a row that contains an instance of the token "reliable" + seperated by not more than two tokens from an instance of "engineering", + which is in turn separated by not more than 5 other tokens from an + instance of the term "consultancy". Phrases enclosed in quotes may + also be used as arguments to the NEAR operator. + + Similar to the NEAR operator, one or more tokens or phrases may be + separated by OR operators. In this case, only one of the specified tokens + or phrases must appear in the document. For example, the query: + + MATCH 'hello OR world' + + matches rows that contain either the term "hello", or the term "world", + or both. Note that unlike in many programming languages, the OR operator + has a higher precedence than the AND operators implied between white-space + separated tokens. The following query matches documents that contain the + term 'sqlite' and at least one of the terms 'fantastic' or 'impressive', + not those that contain both 'sqlite' and 'fantastic' or 'impressive': + + MATCH 'sqlite fantastic OR impressive' + + Any token that is part of an Fts3 query expression, whether or not it is + part of a phrase enclosed in quotes, may have a '*' character appended to + it. In this case, the token matches all terms that begin with the characters + of the token, not just those that exactly match it. For example, the + following query: + + MATCH 'sql*' + + matches all rows that contain the term "SQLite", as well as those that + contain "SQL". + + A token that is not part of a quoted phrase may be preceded by a '-' + character, which indicates that matching rows must not contain the + specified term. For example, the following: + + MATCH '"database engine" -sqlite' + + matches rows that contain the phrase "database engine" but do not contain + the term "sqlite". If the '-' character occurs inside a quoted phrase, + it is ignored. It is possible to use both the '-' prefix and the '*' postfix + on a single term. At this time, all Fts3 queries must contain at least + one term or phrase that is not preceded by the '-' prefix. + + Regardless of whether or not a table name or column name is used on the + left hand side of the MATCH operator, a specific column of the fts3 table + may be associated with each token in a query by preceding a token with + a column name followed by a ':' character. For example, regardless of what + is specified for , the following query requires that column "col1" + of the table contains the term "hello", and that column "col2" of the + table contains the term "world". If the table does not contain columns + named "col1" and "col2", then an error is returned and the query is + not run. + + MATCH 'col1:hello col2:world' + + It is not possible to associate a specific table column with a quoted + phrase or a term preceded by a '-' operator. A '*' character may be + appended to a term associated with a specific column for prefix matching. + +3. ENHANCED QUERY SYNTAX + + The enhanced query syntax is quite similar to the standard query syntax, + with the following four differences: + + 1) Parenthesis are supported. When using the enhanced query syntax, + parenthesis may be used to overcome the built-in precedence of the + supplied binary operators. For example, the following query: + + MATCH '(hello world) OR (simple example)' + + matches documents that contain both "hello" and "world", and documents + that contain both "simple" and "example". It is not possible to forumlate + such a query using the standard syntax. + + 2) Instead of separating tokens and phrases by whitespace, an AND operator + may be explicitly specified. This does not change query processing at + all, but may be used to improve readability. For example, the following + query is handled identically to the one above: + + MATCH '(hello AND world) OR (simple AND example)' + + As with the OR and NEAR operators, the AND operator must be specified + in upper case. The word "and" specified in lower or mixed case is + handled as a regular token. + + 3) The '-' token prefix is not supported. Instead, a new binary operator, + NOT, is included. The NOT operator requires that the query specified + as its left-hand operator matches, but that the query specified as the + right-hand operator does not. For example, to query for all rows that + contain the term "example" but not the term "simple", the following + query could be used: + + MATCH 'example NOT simple' + + As for all other operators, the NOT operator must be specified in + upper case. Otherwise it will be treated as a regular token. + + 4) Unlike in the standard syntax, where the OR operator has a higher + precedence than the implicit AND operator, when using the enhanced + syntax implicit and explict AND operators have a higher precedence + than OR operators. Using the enhanced syntax, the following two + queries are equivalent: + + MATCH 'sqlite fantastic OR impressive' + MATCH '(sqlite AND fantastic) OR impressive' + + however, when using the standard syntax, the query: + + MATCH 'sqlite fantastic OR impressive' + + is equivalent to the enhanced syntax query: + + MATCH 'sqlite AND (fantastic OR impressive)' + + The precedence of all enhanced syntax operators, in order from highest + to lowest, is: + + NEAR (highest precedence, tightest grouping) + NOT + AND + OR (lowest precedence, loosest grouping) + + Using the advanced syntax, it is possible to specify expressions enclosed + in parenthesis as operands to the NOT, AND and OR operators. However both + the left and right hand side operands of NEAR operators must be either + tokens or phrases. Attempting the following query will return an error: + + MATCH 'sqlite NEAR (fantastic OR impressive)' + + Queries of this form must be re-written as: + + MATCH 'sqlite NEAR fantastic OR sqlite NEAR impressive' diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/README.tokenizers /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/README.tokenizers --- sqlite3-3.4.2/ext/fts3/README.tokenizers 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/README.tokenizers 2009-06-12 03:37:46.000000000 +0100 @@ -0,0 +1,133 @@ + +1. FTS3 Tokenizers + + When creating a new full-text table, FTS3 allows the user to select + the text tokenizer implementation to be used when indexing text + by specifying a "tokenize" clause as part of the CREATE VIRTUAL TABLE + statement: + + CREATE VIRTUAL TABLE USING fts3( + [, tokenize []] + ); + + The built-in tokenizers (valid values to pass as ) are + "simple" and "porter". + + should consist of zero or more white-space separated + arguments to pass to the selected tokenizer implementation. The + interpretation of the arguments, if any, depends on the individual + tokenizer. + +2. Custom Tokenizers + + FTS3 allows users to provide custom tokenizer implementations. The + interface used to create a new tokenizer is defined and described in + the fts3_tokenizer.h source file. + + Registering a new FTS3 tokenizer is similar to registering a new + virtual table module with SQLite. The user passes a pointer to a + structure containing pointers to various callback functions that + make up the implementation of the new tokenizer type. For tokenizers, + the structure (defined in fts3_tokenizer.h) is called + "sqlite3_tokenizer_module". + + FTS3 does not expose a C-function that users call to register new + tokenizer types with a database handle. Instead, the pointer must + be encoded as an SQL blob value and passed to FTS3 through the SQL + engine by evaluating a special scalar function, "fts3_tokenizer()". + The fts3_tokenizer() function may be called with one or two arguments, + as follows: + + SELECT fts3_tokenizer(); + SELECT fts3_tokenizer(, ); + + Where is a string identifying the tokenizer and + is a pointer to an sqlite3_tokenizer_module + structure encoded as an SQL blob. If the second argument is present, + it is registered as tokenizer and a copy of it + returned. If only one argument is passed, a pointer to the tokenizer + implementation currently registered as is returned, + encoded as a blob. Or, if no such tokenizer exists, an SQL exception + (error) is raised. + + SECURITY: If the fts3 extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they should be + prevented from invoking the fts3_tokenizer() function, possibly using the + authorisation callback. + + See "Sample code" below for an example of calling the fts3_tokenizer() + function from C code. + +3. ICU Library Tokenizers + + If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor + symbol defined, then there exists a built-in tokenizer named "icu" + implemented using the ICU library. The first argument passed to the + xCreate() method (see fts3_tokenizer.h) of this tokenizer may be + an ICU locale identifier. For example "tr_TR" for Turkish as used + in Turkey, or "en_AU" for English as used in Australia. For example: + + "CREATE VIRTUAL TABLE thai_text USING fts3(text, tokenizer icu th_TH)" + + The ICU tokenizer implementation is very simple. It splits the input + text according to the ICU rules for finding word boundaries and discards + any tokens that consist entirely of white-space. This may be suitable + for some applications in some locales, but not all. If more complex + processing is required, for example to implement stemming or + discard punctuation, this can be done by creating a tokenizer + implementation that uses the ICU tokenizer as part of its implementation. + + When using the ICU tokenizer this way, it is safe to overwrite the + contents of the strings returned by the xNext() method (see + fts3_tokenizer.h). + +4. Sample code. + + The following two code samples illustrate the way C code should invoke + the fts3_tokenizer() scalar function: + + int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); + } + + int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); + } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/fts3/README.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/fts3/README.txt --- sqlite3-3.4.2/ext/fts3/README.txt 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/fts3/README.txt 2007-08-20 18:37:03.000000000 +0100 @@ -0,0 +1,4 @@ +This folder contains source code to the second full-text search +extension for SQLite. While the API is the same, this version uses a +substantially different storage schema from fts1, so tables will need +to be rebuilt. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/icu/icu.c /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/icu/icu.c --- sqlite3-3.4.2/ext/icu/icu.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/ext/icu/icu.c 2009-05-05 04:39:52.000000000 +0100 @@ -9,7 +9,7 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* -** $Id: icu.c,v 1.6 2007/06/22 15:21:16 danielk1977 Exp $ +** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $ ** ** This file implements an integration between the ICU library ** ("International Components for Unicode", an open-source library @@ -298,7 +298,7 @@ /* ** Implementations of scalar functions for case mapping - upper() and -** lower(). Function upper() converts it's input to upper-case (ABC). +** lower(). Function upper() converts its input to upper-case (ABC). ** Function lower() converts to lower-case (abc). ** ** ICU provides two types of case mapping, "general" case mapping and diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/icu/sqliteicu.h /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/icu/sqliteicu.h --- sqlite3-3.4.2/ext/icu/sqliteicu.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/icu/sqliteicu.h 2008-09-08 09:08:09.000000000 +0100 @@ -0,0 +1,27 @@ +/* +** 2008 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** ICU extension. All it does is declare the sqlite3IcuInit() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3IcuInit(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/README /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/README --- sqlite3-3.4.2/ext/rtree/README 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/README 2008-06-04 16:09:17.000000000 +0100 @@ -0,0 +1,120 @@ + +This directory contains an SQLite extension that implements a virtual +table type that allows users to create, query and manipulate r-tree[1] +data structures inside of SQLite databases. Users create, populate +and query r-tree structures using ordinary SQL statements. + + 1. SQL Interface + + 1.1 Table Creation + 1.2 Data Manipulation + 1.3 Data Querying + 1.4 Introspection and Analysis + + 2. Compilation and Deployment + + 3. References + + +1. SQL INTERFACE + + 1.1 Table Creation. + + All r-tree virtual tables have an odd number of columns between + 3 and 11. Unlike regular SQLite tables, r-tree tables are strongly + typed. + + The leftmost column is always the pimary key and contains 64-bit + integer values. Each subsequent column contains a 32-bit real + value. For each pair of real values, the first (leftmost) must be + less than or equal to the second. R-tree tables may be + constructed using the following syntax: + + CREATE VIRTUAL TABLE USING rtree() + + For example: + + CREATE VIRTUAL TABLE boxes USING rtree(boxno, xmin, xmax, ymin, ymax); + INSERT INTO boxes VALUES(1, 1.0, 3.0, 2.0, 4.0); + + Constructing a virtual r-tree table creates the following three + real tables in the database to store the data structure: + + _node + _rowid + _parent + + Dropping or modifying the contents of these tables directly will + corrupt the r-tree structure. To delete an r-tree from a database, + use a regular DROP TABLE statement: + + DROP TABLE ; + + Dropping the main r-tree table automatically drops the automatically + created tables. + + 1.2 Data Manipulation (INSERT, UPDATE, DELETE). + + The usual INSERT, UPDATE or DELETE syntax is used to manipulate data + stored in an r-tree table. Please note the following: + + * Inserting a NULL value into the primary key column has the + same effect as inserting a NULL into an INTEGER PRIMARY KEY + column of a regular table. The system automatically assigns + an unused integer key value to the new record. Usually, this + is one greater than the largest primary key value currently + present in the table. + + * Attempting to insert a duplicate primary key value fails with + an SQLITE_CONSTRAINT error. + + * Attempting to insert or modify a record such that the value + stored in the (N*2)th column is greater than that stored in + the (N*2+1)th column fails with an SQLITE_CONSTRAINT error. + + * When a record is inserted, values are always converted to + the required type (64-bit integer or 32-bit real) as if they + were part of an SQL CAST expression. Non-numeric strings are + converted to zero. + + 1.3 Queries. + + R-tree tables may be queried using all of the same SQL syntax supported + by regular tables. However, some query patterns are more efficient + than others. + + R-trees support fast lookup by primary key value (O(logN), like + regular tables). + + Any combination of equality and range (<, <=, >, >=) constraints + on spatial data columns may be used to optimize other queries. This + is the key advantage to using r-tree tables instead of creating + indices on regular tables. + + 1.4 Introspection and Analysis. + + TODO: Describe rtreenode() and rtreedepth() functions. + + +2. COMPILATION AND USAGE + + The easiest way to compile and use the RTREE extension is to build + and use it as a dynamically loadable SQLite extension. To do this + using gcc on *nix: + + gcc -shared rtree.c -o libSqliteRtree.so + + You may need to add "-I" flags so that gcc can find sqlite3ext.h + and sqlite3.h. The resulting shared lib, libSqliteRtree.so, may be + loaded into sqlite in the same way as any other dynamicly loadable + extension. + + +3. REFERENCES + + [1] Atonin Guttman, "R-trees - A Dynamic Index Structure For Spatial + Searching", University of California Berkeley, 1984. + + [2] Norbert Beckmann, Hans-Peter Kriegel, Ralf Schneider, Bernhard Seeger, + "The R*-tree: An Efficient and Robust Access Method for Points and + Rectangles", Universitaet Bremen, 1990. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree1.test --- sqlite3-3.4.2/ext/rtree/rtree1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree1.test 2009-06-12 03:37:46.000000000 +0100 @@ -0,0 +1,396 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension. +# +# $Id: rtree1.test,v 1.6 2008/12/22 15:04:32 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source [file join [file dirname [info script]] rtree_util.tcl] +source $testdir/tester.tcl + +# Test plan: +# +# rtree-1.*: Creating/destroying r-tree tables. +# rtree-2.*: Test the implicit constraints - unique rowid and +# (coord[N]<=coord[N+1]) for even values of N. Also +# automatic assigning of rowid values. +# rtree-3.*: Linear scans of r-tree data. +# rtree-4.*: Test INSERT +# rtree-5.*: Test DELETE +# rtree-6.*: Test UPDATE +# rtree-7.*: Test renaming an r-tree table. +# rtree-8.*: Test constrained scans of r-tree data. +# + +ifcapable !rtree { + finish_test + return +} + +#---------------------------------------------------------------------------- +# Test cases rtree-1.* test CREATE and DROP table statements. +# + +# Test creating and dropping an rtree table. +# +do_test rtree-1.1.1 { + execsql { CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2) } +} {} +do_test rtree-1.1.2 { + execsql { SELECT name FROM sqlite_master ORDER BY name } +} {t1 t1_node t1_parent t1_rowid} +do_test rtree-1.1.3 { + execsql { + DROP TABLE t1; + SELECT name FROM sqlite_master ORDER BY name; + } +} {} + +# Test creating and dropping an rtree table with an odd name in +# an attached database. +# +do_test rtree-1.2.1 { + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.'a" "b' USING rtree(ii, x1, x2, y1, y2); + } +} {} +do_test rtree-1.2.2 { + execsql { SELECT name FROM sqlite_master ORDER BY name } +} {} +do_test rtree-1.2.3 { + execsql { SELECT name FROM aux.sqlite_master ORDER BY name } +} {{a" "b} {a" "b_node} {a" "b_parent} {a" "b_rowid}} +do_test rtree-1.2.4 { + execsql { + DROP TABLE aux.'a" "b'; + SELECT name FROM aux.sqlite_master ORDER BY name; + } +} {} + +# Test that the logic for checking the number of columns specified +# for an rtree table. Acceptable values are odd numbers between 3 and +# 11, inclusive. +# +set cols [list i1 i2 i3 i4 i5 i6 i7 i8 i9 iA iB iC iD iE iF iG iH iI iJ iK] +for {set nCol 1} {$nCol<[llength $cols]} {incr nCol} { + + set columns [join [lrange $cols 0 [expr {$nCol-1}]] ,] + + set X {0 {}} + if {$nCol%2 == 0} { set X {1 {Wrong number of columns for an rtree table}} } + if {$nCol < 3} { set X {1 {Too few columns for an rtree table}} } + if {$nCol > 11} { set X {1 {Too many columns for an rtree table}} } + + do_test rtree-1.3.$nCol { + catchsql " + CREATE VIRTUAL TABLE t1 USING rtree($columns); + " + } $X + + catchsql { DROP TABLE t1 } +} + +# Test that it is possible to open an existing database that contains +# r-tree tables. +# +do_test rtree-1.4.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2); + INSERT INTO t1 VALUES(1, 5.0, 10.0); + INSERT INTO t1 VALUES(2, 15.0, 20.0); + } +} {} +do_test rtree-1.4.2 { + db close + sqlite3 db test.db + execsql { SELECT * FROM t1 ORDER BY ii } +} {1 5.0 10.0 2 15.0 20.0} +do_test rtree-1.4.3 { + execsql { DROP TABLE t1 } +} {} + +# Test that it is possible to create an r-tree table with ridiculous +# column names. +# +do_test rtree-1.5.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree("the key", "x dim.", "x2'dim"); + INSERT INTO t1 VALUES(1, 2, 3); + SELECT "the key", "x dim.", "x2'dim" FROM t1; + } +} {1 2.0 3.0} +do_test rtree-1.5.1 { + execsql { DROP TABLE t1 } +} {} + +# Force the r-tree constructor to fail. +# +do_test rtree-1.6.1 { + execsql { CREATE TABLE t1_rowid(a); } + catchsql { + CREATE VIRTUAL TABLE t1 USING rtree("the key", "x dim.", "x2'dim"); + } +} {1 {table "t1_rowid" already exists}} +do_test rtree-1.6.1 { + execsql { DROP TABLE t1_rowid } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-2.* +# +do_test rtree-2.1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2); + SELECT * FROM t1; + } +} {} + +do_test rtree-2.1.2 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT * FROM t1 } +} {1 1.0 3.0 2.0 4.0} +do_test rtree-2.1.3 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT rowid FROM t1 ORDER BY rowid } +} {1 2} +do_test rtree-2.1.3 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT ii FROM t1 ORDER BY ii } +} {1 2 3} + +do_test rtree-2.2.1 { + catchsql { INSERT INTO t1 VALUES(2, 1, 3, 2, 4) } +} {1 {constraint failed}} +do_test rtree-2.2.2 { + catchsql { INSERT INTO t1 VALUES(4, 1, 3, 4, 2) } +} {1 {constraint failed}} +do_test rtree-2.2.3 { + catchsql { INSERT INTO t1 VALUES(4, 3, 1, 2, 4) } +} {1 {constraint failed}} +do_test rtree-2.2.4 { + execsql { SELECT ii FROM t1 ORDER BY ii } +} {1 2 3} + +do_test rtree-2.X { + execsql { DROP TABLE t1 } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-3.* test linear scans of r-tree table data. To test +# this we have to insert some data into an r-tree, but that is not the +# focus of these tests. +# +do_test rtree-3.1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2); + SELECT * FROM t1; + } +} {} +do_test rtree-3.1.2 { + execsql { + INSERT INTO t1 VALUES(5, 1, 3, 2, 4); + SELECT * FROM t1; + } +} {5 1.0 3.0 2.0 4.0} +do_test rtree-3.1.3 { + execsql { + INSERT INTO t1 VALUES(6, 2, 6, 4, 8); + SELECT * FROM t1; + } +} {5 1.0 3.0 2.0 4.0 6 2.0 6.0 4.0 8.0} + +# Test the constraint on the coordinates (c[i]<=c[i+1] where (i%2==0)): +do_test rtree-3.2.1 { + catchsql { INSERT INTO t1 VALUES(7, 2, 6, 4, 3) } +} {1 {constraint failed}} +do_test rtree-3.2.2 { + catchsql { INSERT INTO t1 VALUES(8, 2, 6, 3, 3) } +} {0 {}} + +#---------------------------------------------------------------------------- +# Test cases rtree-5.* test DELETE operations. +# +do_test rtree-5.1.1 { + execsql { CREATE VIRTUAL TABLE t2 USING rtree(ii, x1, x2) } +} {} +do_test rtree-5.1.2 { + execsql { + INSERT INTO t2 VALUES(1, 10, 20); + INSERT INTO t2 VALUES(2, 30, 40); + INSERT INTO t2 VALUES(3, 50, 60); + SELECT * FROM t2 ORDER BY ii; + } +} {1 10.0 20.0 2 30.0 40.0 3 50.0 60.0} +do_test rtree-5.1.3 { + execsql { + DELETE FROM t2 WHERE ii=2; + SELECT * FROM t2 ORDER BY ii; + } +} {1 10.0 20.0 3 50.0 60.0} +do_test rtree-5.1.4 { + execsql { + DELETE FROM t2 WHERE ii=1; + SELECT * FROM t2 ORDER BY ii; + } +} {3 50.0 60.0} +do_test rtree-5.1.5 { + execsql { + DELETE FROM t2 WHERE ii=3; + SELECT * FROM t2 ORDER BY ii; + } +} {} +do_test rtree-5.1.6 { + execsql { SELECT * FROM t2_rowid } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-5.* test UPDATE operations. +# +do_test rtree-6.1.1 { + execsql { CREATE VIRTUAL TABLE t3 USING rtree(ii, x1, x2, y1, y2) } +} {} +do_test rtree-6.1.2 { + execsql { + INSERT INTO t3 VALUES(1, 2, 3, 4, 5); + UPDATE t3 SET x2=5; + SELECT * FROM t3; + } +} {1 2.0 5.0 4.0 5.0} +do_test rtree-6.1.3 { + execsql { UPDATE t3 SET ii = 2 } + execsql { SELECT * FROM t3 } +} {2 2.0 5.0 4.0 5.0} + +#---------------------------------------------------------------------------- +# Test cases rtree-7.* test rename operations. +# +do_test rtree-7.1.1 { + execsql { + CREATE VIRTUAL TABLE t4 USING rtree(ii, x1, x2, y1, y2, z1, z2); + INSERT INTO t4 VALUES(1, 2, 3, 4, 5, 6, 7); + } +} {} +do_test rtree-7.1.2 { + execsql { ALTER TABLE t4 RENAME TO t5 } + execsql { SELECT * FROM t5 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.3 { + db close + sqlite3 db test.db + execsql { SELECT * FROM t5 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.4 { + execsql { ALTER TABLE t5 RENAME TO 'raisara "one"'''} + execsql { SELECT * FROM "raisara ""one""'" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.5 { + execsql { SELECT * FROM 'raisara "one"''' } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.6 { + execsql { ALTER TABLE "raisara ""one""'" RENAME TO "abc 123" } + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.7 { + db close + sqlite3 db test.db + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} + +# An error midway through a rename operation. +do_test rtree-7.2.1 { + execsql { + CREATE TABLE t4_node(a); + } + catchsql { ALTER TABLE "abc 123" RENAME TO t4 } +} {1 {SQL logic error or missing database}} +do_test rtree-7.2.2 { + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.2.3 { + execsql { + DROP TABLE t4_node; + CREATE TABLE t4_rowid(a); + } + catchsql { ALTER TABLE "abc 123" RENAME TO t4 } +} {1 {SQL logic error or missing database}} +do_test rtree-7.2.4 { + db close + sqlite3 db test.db + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.2.5 { + execsql { DROP TABLE t4_rowid } + execsql { ALTER TABLE "abc 123" RENAME TO t4 } + execsql { SELECT * FROM t4 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} + + +#---------------------------------------------------------------------------- +# Test cases rtree-8.* +# + +# Test that the function to determine if a leaf cell is part of the +# result set works. +do_test rtree-8.1.1 { + execsql { + CREATE VIRTUAL TABLE t6 USING rtree(ii, x1, x2); + INSERT INTO t6 VALUES(1, 3, 7); + INSERT INTO t6 VALUES(2, 4, 6); + } +} {} +do_test rtree-8.1.2 { execsql { SELECT ii FROM t6 WHERE x1>2 } } {1 2} +do_test rtree-8.1.3 { execsql { SELECT ii FROM t6 WHERE x1>3 } } {2} +do_test rtree-8.1.4 { execsql { SELECT ii FROM t6 WHERE x1>4 } } {} +do_test rtree-8.1.5 { execsql { SELECT ii FROM t6 WHERE x1>5 } } {} +do_test rtree-8.1.6 { execsql { SELECT ii FROM t6 WHERE x1<3 } } {} +do_test rtree-8.1.7 { execsql { SELECT ii FROM t6 WHERE x1<4 } } {1} +do_test rtree-8.1.8 { execsql { SELECT ii FROM t6 WHERE x1<5 } } {1 2} + +#---------------------------------------------------------------------------- +# Test cases rtree-9.* +# +# Test that ticket #3549 is fixed. +do_test rtree-9.1 { + execsql { + CREATE TABLE foo (id INTEGER PRIMARY KEY); + CREATE VIRTUAL TABLE bar USING rtree (id, minX, maxX, minY, maxY); + INSERT INTO foo VALUES (null); + INSERT INTO foo SELECT null FROM foo; + INSERT INTO foo SELECT null FROM foo; + INSERT INTO foo SELECT null FROM foo; + INSERT INTO foo SELECT null FROM foo; + INSERT INTO foo SELECT null FROM foo; + INSERT INTO foo SELECT null FROM foo; + DELETE FROM foo WHERE id > 40; + INSERT INTO bar SELECT NULL, 0, 0, 0, 0 FROM foo; + } +} {} + +# This used to crash. +do_test rtree-9.2 { + execsql { + SELECT count(*) FROM bar b1, bar b2, foo s1 WHERE s1.id = b1.id; + } +} {1600} +do_test rtree-9.3 { + execsql { + SELECT count(*) FROM bar b1, bar b2, foo s1 + WHERE b1.minX <= b2.maxX AND s1.id = b1.id; + } +} {1600} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree2.test --- sqlite3-3.4.2/ext/rtree/rtree2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree2.test 2008-07-14 16:37:01.000000000 +0100 @@ -0,0 +1,152 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension. +# +# $Id: rtree2.test,v 1.4 2008/07/14 15:37:01 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source [file join [file dirname [info script]] rtree_util.tcl] +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +set ::NROW 1000 +set ::NDEL 10 +set ::NSELECT 100 + +if {[info exists ISQUICK] && $ISQUICK} { + set ::NROW 100 + set ::NSELECT 10 +} + +foreach module {rtree_i32 rtree} { + for {set nDim 1} {$nDim <= 5} {incr nDim} { + + do_test rtree2-$module.$nDim.1 { + set cols [list] + foreach c [list c0 c1 c2 c3 c4 c5 c6 c7 c8 c9] { + lappend cols "$c REAL" + } + set cols [join [lrange $cols 0 [expr {$nDim*2-1}]] ", "] + execsql " + CREATE VIRTUAL TABLE t1 USING ${module}(ii, $cols); + CREATE TABLE t2 (ii, $cols); + " + } {} + + do_test rtree2-$module.$nDim.2 { + db transaction { + for {set ii 0} {$ii < $::NROW} {incr ii} { + #puts "Row $ii" + set values [list] + for {set jj 0} {$jj<$nDim*2} {incr jj} { + lappend values [expr int(rand()*1000)] + } + set values [join $values ,] + #puts [rtree_treedump db t1] + #puts "INSERT INTO t2 VALUES($ii, $values)" + set rc [catch {db eval "INSERT INTO t1 VALUES($ii, $values)"}] + if {$rc} { + incr ii -1 + } else { + db eval "INSERT INTO t2 VALUES($ii, $values)" + } + #if {[rtree_check db t1]} { + #puts [rtree_treedump db t1] + #exit + #} + } + } + + set t1 [execsql {SELECT * FROM t1 ORDER BY ii}] + set t2 [execsql {SELECT * FROM t2 ORDER BY ii}] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + puts $t1 + puts $t2 + } + set rc + } {1} + + do_test rtree2-$module.$nDim.3 { + rtree_check db t1 + } 0 + + set OPS [list < > <= >= =] + for {set ii 0} {$ii < $::NSELECT} {incr ii} { + do_test rtree2-$module.$nDim.4.$ii.1 { + set where [list] + foreach look_three_dots! {. . .} { + set colidx [expr int(rand()*($nDim*2+1))-1] + if {$colidx<0} { + set col ii + } else { + set col "c$colidx" + } + set op [lindex $OPS [expr int(rand()*[llength $OPS])]] + set val [expr int(rand()*1000)] + lappend where "$col $op $val" + } + set where [join $where " AND "] + + set t1 [execsql "SELECT * FROM t1 WHERE $where ORDER BY ii"] + set t2 [execsql "SELECT * FROM t2 WHERE $where ORDER BY ii"] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + #puts $where + puts $t1 + puts $t2 + #puts [rtree_treedump db t1] + #breakpoint + #set t1 [execsql "SELECT * FROM t1 WHERE $where ORDER BY ii"] + #exit + } + set rc + } {1} + } + + for {set ii 0} {$ii < $::NROW} {incr ii $::NDEL} { + #puts [rtree_treedump db t1] + do_test rtree2-$module.$nDim.5.$ii.1 { + execsql "DELETE FROM t2 WHERE ii <= $::ii" + execsql "DELETE FROM t1 WHERE ii <= $::ii" + + set t1 [execsql {SELECT * FROM t1 ORDER BY ii}] + set t2 [execsql {SELECT * FROM t2 ORDER BY ii}] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + puts $t1 + puts $t2 + } + set rc + } {1} + do_test rtree2-$module.$nDim.5.$ii.2 { + rtree_check db t1 + } {0} + } + + do_test rtree2-$module.$nDim.6 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + } + } {} + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree3.test --- sqlite3-3.4.2/ext/rtree/rtree3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree3.test 2008-06-23 16:55:52.000000000 +0100 @@ -0,0 +1,74 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing that the r-tree correctly handles +# out-of-memory conditions. +# +# $Id: rtree3.test,v 1.2 2008/06/23 15:55:52 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +# Only run these tests if memory debugging is turned on. +# +source $testdir/malloc_common.tcl +if {!$MEMDEBUG} { + puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +do_malloc_test rtree3-1 -sqlbody { + BEGIN TRANSACTION; + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); + INSERT INTO rt VALUES(NULL, 13, 15, 17, 19); + DELETE FROM rt WHERE ii = 1; + SELECT * FROM rt; + SELECT ii FROM rt WHERE ii = 2; + COMMIT; +} +do_malloc_test rtree3-2 -sqlprep { + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); +} -sqlbody { + DROP TABLE rt; +} + + +do_malloc_test rtree3-3 -sqlprep { + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); +} -tclbody { + db eval BEGIN + for {set ii 0} {$ii < 100} {incr ii} { + set f [expr rand()] + db eval {INSERT INTO rt VALUES(NULL, $f*10.0, $f*10.0, $f*15.0, $f*15.0)} + } + db eval COMMIT + db eval BEGIN + for {set ii 0} {$ii < 100} {incr ii} { + set f [expr rand()] + db eval { DELETE FROM rt WHERE x1<($f*10.0) AND x1>($f*10.5) } + } + db eval COMMIT +} + +finish_test + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree4.test --- sqlite3-3.4.2/ext/rtree/rtree4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree4.test 2008-06-23 16:55:52.000000000 +0100 @@ -0,0 +1,236 @@ +# 2008 May 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Randomized test cases for the rtree extension. +# +# $Id: rtree4.test,v 1.3 2008/06/23 15:55:52 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +set ::NROW 2500 +if {[info exists ISQUICK] && $ISQUICK} { + set ::NROW 250 +} + +# Return a floating point number between -X and X. +# +proc rand {X} { + return [expr {int((rand()-0.5)*1024.0*$X)/512.0}] +} + +# Return a positive floating point number less than or equal to X +# +proc randincr {X} { + while 1 { + set r [expr {int(rand()*$X*32.0)/32.0}] + if {$r>0.0} {return $r} + } +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# Always use the same random seed so that the sequence of tests +# is repeatable. +# +expr {srand(1234)} + +# Run these tests for all number of dimensions between 1 and 5. +# +for {set nDim 1} {$nDim<=5} {incr nDim} { + + # Construct an rtree virtual table and an ordinary btree table + # to mirror it. The ordinary table should be much slower (since + # it has to do a full table scan) but should give the exact same + # answers. + # + do_test rtree4-$nDim.1 { + set clist {} + set cklist {} + for {set i 0} {$i<$nDim} {incr i} { + lappend clist mn$i mx$i + lappend cklist "mn$i=$mn mx$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.2 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query on all dimensions + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>=$mn mn$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.3 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints at the beginning. + # This should force a full-table scan on the rtree. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + lappend where mn$j>-10000 mx$j<10000 + } + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mn$j>=$mn mx$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.3 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints at the beginning. + # This should force a full-table scan on the rtree. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + lappend where mn$j>=-10000 mx$j<=10000 + } + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>$mn mn$j<$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.4 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints at the end + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mn$j>=$mn mx$j<$mx + } + for {set j [expr {$nDim-1}]} {$j>=0} {incr j -1} { + lappend where mn$j>=-10000 mx$j<10000 + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.5 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints at the end + # + set where {} + for {set j [expr {$nDim-1}]} {$j>=0} {incr j -1} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>$mn mn$j<=$mx + } + for {set j 0} {$j<$nDim} {incr j} { + lappend where mx$j>-10000 mn$j<=10000 + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.6 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints where the + # constraints appear in a random order. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn1 [rand 10000] + set mn2 [expr {$mn1+[randincr 100]}] + set mx1 [expr {$mn2+[randincr 400]}] + set mx2 [expr {$mx1+[randincr 100]}] + lappend where mn$j>=$mn1 mn$j>$mn2 mx$j<$mx1 mx$j<=$mx2 + } + set where "WHERE [join [scramble $where] { AND }]" + do_test rtree-$nDim.2.$i.7 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints where the + # constraints appear in a random order. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn1 [rand 10000] + set mn2 [expr {$mn1+[randincr 100]}] + set mx1 [expr {$mn2+[randincr 400]}] + set mx2 [expr {$mx1+[randincr 100]}] + lappend where mx$j>=$mn1 mx$j>$mn2 mn$j<$mx1 mn$j<=$mx2 + } + set where "WHERE [join [scramble $where] { AND }]" + do_test rtree-$nDim.2.$i.8 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + } + +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree5.test --- sqlite3-3.4.2/ext/rtree/rtree5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree5.test 2008-07-14 16:37:01.000000000 +0100 @@ -0,0 +1,80 @@ +# 2008 Jul 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension when it is +# configured to store values as 32 bit integers. +# +# $Id: rtree5.test,v 1.1 2008/07/14 15:37:01 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +do_test rtree5-1.0 { + execsql { CREATE VIRTUAL TABLE t1 USING rtree_i32(id, x1, x2, y1, y2) } +} {} +do_test rtree5-1.1 { + execsql { INSERT INTO t1 VALUES(1, 5, 10, 4, 11.2) } +} {} +do_test rtree5-1.2 { + execsql { SELECT * FROM t1 } +} {1 5 10 4 11} +do_test rtree5-1.3 { + execsql { SELECT typeof(x1) FROM t1 } +} {integer} + +do_test rtree5-1.4 { + execsql { SELECT x1==5 FROM t1 } +} {1} +do_test rtree5-1.5 { + execsql { SELECT x1==5.2 FROM t1 } +} {0} +do_test rtree5-1.6 { + execsql { SELECT x1==5.0 FROM t1 } +} {1} + +do_test rtree5-1.7 { + execsql { SELECT count(*) FROM t1 WHERE x1==5 } +} {1} +do_test rtree5-1.8 { + execsql { SELECT count(*) FROM t1 WHERE x1==5.2 } +} {0} +do_test rtree5-1.9 { + execsql { SELECT count(*) FROM t1 WHERE x1==5.0 } +} {1} + +do_test rtree5-1.10 { + execsql { SELECT (1<<31)-5, (1<<31)-1, -1*(1<<31), -1*(1<<31)+5 } +} {2147483643 2147483647 -2147483648 -2147483643} +do_test rtree5-1.10 { + execsql { + INSERT INTO t1 VALUES(2, (1<<31)-5, (1<<31)-1, -1*(1<<31), -1*(1<<31)+5) + } +} {} +do_test rtree5-1.12 { + execsql { SELECT * FROM t1 WHERE id=2 } +} {2 2147483643 2147483647 -2147483648 -2147483643} +do_test rtree5-1.13 { + execsql { + SELECT * FROM t1 WHERE + x1=2147483643 AND x2=2147483647 AND + y1=-2147483648 AND y2=-2147483643 + } +} {2 2147483643 2147483647 -2147483648 -2147483643} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree6.test --- sqlite3-3.4.2/ext/rtree/rtree6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree6.test 2008-09-01 13:47:00.000000000 +0100 @@ -0,0 +1,111 @@ +# 2008 Sep 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: rtree6.test,v 1.1 2008/09/01 12:47:00 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +# Operator Byte Value +# ---------------------- +# = 0x41 ('A') +# <= 0x42 ('B') +# < 0x43 ('C') +# >= 0x44 ('D') +# > 0x45 ('E') +# ---------------------- + +proc rtree_strategy {sql} { + set ret [list] + db eval "explain $sql" a { + if {$a(opcode) eq "VFilter"} { + lappend ret $a(p4) + } + } + set ret +} + +proc query_plan {sql} { + set ret [list] + db eval "explain query plan $sql" a { + lappend ret $a(detail) + } + set ret +} + +do_test rtree6-1.1 { + execsql { + CREATE TABLE t2(k INTEGER PRIMARY KEY, v); + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2); + } +} {} + +do_test rtree6-1.2 { + rtree_strategy {SELECT * FROM t1 WHERE x1>10} +} {Ea} + +do_test rtree6-1.3 { + rtree_strategy {SELECT * FROM t1 WHERE x1<10} +} {Ca} + +do_test rtree6-1.4 { + rtree_strategy {SELECT * FROM t1,t2 WHERE k=ii AND x1<10} +} {Ca} + +do_test rtree6-1.5 { + rtree_strategy {SELECT * FROM t1,t2 WHERE k=+ii AND x1<10} +} {Ca} + +do_test rtree6.2.1 { + query_plan {SELECT * FROM t1,t2 WHERE k=+ii AND x1<10} +} [list \ + {TABLE t1 VIRTUAL TABLE INDEX 2:Ca} \ + {TABLE t2 USING PRIMARY KEY} \ +] + +do_test rtree6.2.2 { + query_plan {SELECT * FROM t1,t2 WHERE k=ii AND x1<10} +} [list \ + {TABLE t1 VIRTUAL TABLE INDEX 2:Ca} \ + {TABLE t2 USING PRIMARY KEY} \ +] + +do_test rtree6.2.3 { + query_plan {SELECT * FROM t1,t2 WHERE k=ii} +} [list \ + {TABLE t2} \ + {TABLE t1 VIRTUAL TABLE INDEX 1:} \ +] + +do_test rtree6.2.4 { + query_plan {SELECT * FROM t1,t2 WHERE v=10 and x1<10 and x2>10} +} [list \ + {TABLE t2} \ + {TABLE t1 VIRTUAL TABLE INDEX 2:CaEb} \ +] + +do_test rtree6.2.5 { + query_plan {SELECT * FROM t1,t2 WHERE k=ii AND x1 +#include + +#ifndef SQLITE_AMALGAMATION +typedef sqlite3_int64 i64; +typedef unsigned char u8; +typedef unsigned int u32; +#endif + +typedef struct Rtree Rtree; +typedef struct RtreeCursor RtreeCursor; +typedef struct RtreeNode RtreeNode; +typedef struct RtreeCell RtreeCell; +typedef struct RtreeConstraint RtreeConstraint; +typedef union RtreeCoord RtreeCoord; + +/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */ +#define RTREE_MAX_DIMENSIONS 5 + +/* Size of hash table Rtree.aHash. This hash table is not expected to +** ever contain very many entries, so a fixed number of buckets is +** used. +*/ +#define HASHSIZE 128 + +/* +** An rtree virtual-table object. +*/ +struct Rtree { + sqlite3_vtab base; + sqlite3 *db; /* Host database connection */ + int iNodeSize; /* Size in bytes of each node in the node table */ + int nDim; /* Number of dimensions */ + int nBytesPerCell; /* Bytes consumed per cell */ + int iDepth; /* Current depth of the r-tree structure */ + char *zDb; /* Name of database containing r-tree table */ + char *zName; /* Name of r-tree table */ + RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */ + int nBusy; /* Current number of users of this structure */ + + /* List of nodes removed during a CondenseTree operation. List is + ** linked together via the pointer normally used for hash chains - + ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree + ** headed by the node (leaf nodes have RtreeNode.iNode==0). + */ + RtreeNode *pDeleted; + int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ + + /* Statements to read/write/delete a record from xxx_node */ + sqlite3_stmt *pReadNode; + sqlite3_stmt *pWriteNode; + sqlite3_stmt *pDeleteNode; + + /* Statements to read/write/delete a record from xxx_rowid */ + sqlite3_stmt *pReadRowid; + sqlite3_stmt *pWriteRowid; + sqlite3_stmt *pDeleteRowid; + + /* Statements to read/write/delete a record from xxx_parent */ + sqlite3_stmt *pReadParent; + sqlite3_stmt *pWriteParent; + sqlite3_stmt *pDeleteParent; + + int eCoordType; +}; + +/* Possible values for eCoordType: */ +#define RTREE_COORD_REAL32 0 +#define RTREE_COORD_INT32 1 + +/* +** The minimum number of cells allowed for a node is a third of the +** maximum. In Gutman's notation: +** +** m = M/3 +** +** If an R*-tree "Reinsert" operation is required, the same number of +** cells are removed from the overfull node and reinserted into the tree. +*/ +#define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3) +#define RTREE_REINSERT(p) RTREE_MINCELLS(p) +#define RTREE_MAXCELLS 51 + +/* +** An rtree cursor object. +*/ +struct RtreeCursor { + sqlite3_vtab_cursor base; + RtreeNode *pNode; /* Node cursor is currently pointing at */ + int iCell; /* Index of current cell in pNode */ + int iStrategy; /* Copy of idxNum search parameter */ + int nConstraint; /* Number of entries in aConstraint */ + RtreeConstraint *aConstraint; /* Search constraints. */ +}; + +union RtreeCoord { + float f; + int i; +}; + +/* +** The argument is an RtreeCoord. Return the value stored within the RtreeCoord +** formatted as a double. This macro assumes that local variable pRtree points +** to the Rtree structure associated with the RtreeCoord. +*/ +#define DCOORD(coord) ( \ + (pRtree->eCoordType==RTREE_COORD_REAL32) ? \ + ((double)coord.f) : \ + ((double)coord.i) \ +) + +/* +** A search constraint. +*/ +struct RtreeConstraint { + int iCoord; /* Index of constrained coordinate */ + int op; /* Constraining operation */ + double rValue; /* Constraint value. */ +}; + +/* Possible values for RtreeConstraint.op */ +#define RTREE_EQ 0x41 +#define RTREE_LE 0x42 +#define RTREE_LT 0x43 +#define RTREE_GE 0x44 +#define RTREE_GT 0x45 + +/* +** An rtree structure node. +** +** Data format (RtreeNode.zData): +** +** 1. If the node is the root node (node 1), then the first 2 bytes +** of the node contain the tree depth as a big-endian integer. +** For non-root nodes, the first 2 bytes are left unused. +** +** 2. The next 2 bytes contain the number of entries currently +** stored in the node. +** +** 3. The remainder of the node contains the node entries. Each entry +** consists of a single 8-byte integer followed by an even number +** of 4-byte coordinates. For leaf nodes the integer is the rowid +** of a record. For internal nodes it is the node number of a +** child page. +*/ +struct RtreeNode { + RtreeNode *pParent; /* Parent node */ + i64 iNode; + int nRef; + int isDirty; + u8 *zData; + RtreeNode *pNext; /* Next node in this hash chain */ +}; +#define NCELL(pNode) readInt16(&(pNode)->zData[2]) + +/* +** Structure to store a deserialized rtree record. +*/ +struct RtreeCell { + i64 iRowid; + RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; +}; + +#ifndef MAX +# define MAX(x,y) ((x) < (y) ? (y) : (x)) +#endif +#ifndef MIN +# define MIN(x,y) ((x) > (y) ? (y) : (x)) +#endif + +/* +** Functions to deserialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The deserialized value is returned. +*/ +static int readInt16(u8 *p){ + return (p[0]<<8) + p[1]; +} +static void readCoord(u8 *p, RtreeCoord *pCoord){ + u32 i = ( + (((u32)p[0]) << 24) + + (((u32)p[1]) << 16) + + (((u32)p[2]) << 8) + + (((u32)p[3]) << 0) + ); + *(u32 *)pCoord = i; +} +static i64 readInt64(u8 *p){ + return ( + (((i64)p[0]) << 56) + + (((i64)p[1]) << 48) + + (((i64)p[2]) << 40) + + (((i64)p[3]) << 32) + + (((i64)p[4]) << 24) + + (((i64)p[5]) << 16) + + (((i64)p[6]) << 8) + + (((i64)p[7]) << 0) + ); +} + +/* +** Functions to serialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The value returned is the number of bytes written +** to the argument buffer (always 2, 4 and 8 respectively). +*/ +static int writeInt16(u8 *p, int i){ + p[0] = (i>> 8)&0xFF; + p[1] = (i>> 0)&0xFF; + return 2; +} +static int writeCoord(u8 *p, RtreeCoord *pCoord){ + u32 i; + assert( sizeof(RtreeCoord)==4 ); + assert( sizeof(u32)==4 ); + i = *(u32 *)pCoord; + p[0] = (i>>24)&0xFF; + p[1] = (i>>16)&0xFF; + p[2] = (i>> 8)&0xFF; + p[3] = (i>> 0)&0xFF; + return 4; +} +static int writeInt64(u8 *p, i64 i){ + p[0] = (i>>56)&0xFF; + p[1] = (i>>48)&0xFF; + p[2] = (i>>40)&0xFF; + p[3] = (i>>32)&0xFF; + p[4] = (i>>24)&0xFF; + p[5] = (i>>16)&0xFF; + p[6] = (i>> 8)&0xFF; + p[7] = (i>> 0)&0xFF; + return 8; +} + +/* +** Increment the reference count of node p. +*/ +static void nodeReference(RtreeNode *p){ + if( p ){ + p->nRef++; + } +} + +/* +** Clear the content of node p (set all bytes to 0x00). +*/ +static void nodeZero(Rtree *pRtree, RtreeNode *p){ + if( p ){ + memset(&p->zData[2], 0, pRtree->iNodeSize-2); + p->isDirty = 1; + } +} + +/* +** Given a node number iNode, return the corresponding key to use +** in the Rtree.aHash table. +*/ +static int nodeHash(i64 iNode){ + return ( + (iNode>>56) ^ (iNode>>48) ^ (iNode>>40) ^ (iNode>>32) ^ + (iNode>>24) ^ (iNode>>16) ^ (iNode>> 8) ^ (iNode>> 0) + ) % HASHSIZE; +} + +/* +** Search the node hash table for node iNode. If found, return a pointer +** to it. Otherwise, return 0. +*/ +static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){ + RtreeNode *p; + assert( iNode!=0 ); + for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext); + return p; +} + +/* +** Add node pNode to the node hash table. +*/ +static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){ + if( pNode ){ + int iHash; + assert( pNode->pNext==0 ); + iHash = nodeHash(pNode->iNode); + pNode->pNext = pRtree->aHash[iHash]; + pRtree->aHash[iHash] = pNode; + } +} + +/* +** Remove node pNode from the node hash table. +*/ +static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode **pp; + if( pNode->iNode!=0 ){ + pp = &pRtree->aHash[nodeHash(pNode->iNode)]; + for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); } + *pp = pNode->pNext; + pNode->pNext = 0; + } +} + +/* +** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0), +** indicating that node has not yet been assigned a node number. It is +** assigned a node number when nodeWrite() is called to write the +** node contents out to the database. +*/ +static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent, int zero){ + RtreeNode *pNode; + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); + if( pNode ){ + memset(pNode, 0, sizeof(RtreeNode) + (zero?pRtree->iNodeSize:0)); + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->pParent = pParent; + pNode->isDirty = 1; + nodeReference(pParent); + } + return pNode; +} + +/* +** Obtain a reference to an r-tree node. +*/ +static int +nodeAcquire( + Rtree *pRtree, /* R-tree structure */ + i64 iNode, /* Node number to load */ + RtreeNode *pParent, /* Either the parent node or NULL */ + RtreeNode **ppNode /* OUT: Acquired node */ +){ + int rc; + RtreeNode *pNode; + + /* Check if the requested node is already in the hash table. If so, + ** increase its reference count and return it. + */ + if( (pNode = nodeHashLookup(pRtree, iNode)) ){ + assert( !pParent || !pNode->pParent || pNode->pParent==pParent ); + if( pParent && !pNode->pParent ){ + nodeReference(pParent); + pNode->pParent = pParent; + } + pNode->nRef++; + *ppNode = pNode; + return SQLITE_OK; + } + + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); + if( !pNode ){ + *ppNode = 0; + return SQLITE_NOMEM; + } + pNode->pParent = pParent; + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->iNode = iNode; + pNode->isDirty = 0; + pNode->pNext = 0; + + sqlite3_bind_int64(pRtree->pReadNode, 1, iNode); + rc = sqlite3_step(pRtree->pReadNode); + if( rc==SQLITE_ROW ){ + const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0); + memcpy(pNode->zData, zBlob, pRtree->iNodeSize); + nodeReference(pParent); + }else{ + sqlite3_free(pNode); + pNode = 0; + } + + *ppNode = pNode; + rc = sqlite3_reset(pRtree->pReadNode); + + if( rc==SQLITE_OK && iNode==1 ){ + pRtree->iDepth = readInt16(pNode->zData); + } + + assert( (rc==SQLITE_OK && pNode) || (pNode==0 && rc!=SQLITE_OK) ); + nodeHashInsert(pRtree, pNode); + + return rc; +} + +/* +** Overwrite cell iCell of node pNode with the contents of pCell. +*/ +static void nodeOverwriteCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iCell +){ + int ii; + u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + p += writeInt64(p, pCell->iRowid); + for(ii=0; ii<(pRtree->nDim*2); ii++){ + p += writeCoord(p, &pCell->aCoord[ii]); + } + pNode->isDirty = 1; +} + +/* +** Remove cell the cell with index iCell from node pNode. +*/ +static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){ + u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + u8 *pSrc = &pDst[pRtree->nBytesPerCell]; + int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell; + memmove(pDst, pSrc, nByte); + writeInt16(&pNode->zData[2], NCELL(pNode)-1); + pNode->isDirty = 1; +} + +/* +** Insert the contents of cell pCell into node pNode. If the insert +** is successful, return SQLITE_OK. +** +** If there is not enough free space in pNode, return SQLITE_FULL. +*/ +static int +nodeInsertCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell +){ + int nCell; /* Current number of cells in pNode */ + int nMaxCell; /* Maximum number of cells for pNode */ + + nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell; + nCell = NCELL(pNode); + + assert(nCell<=nMaxCell); + + if( nCellzData[2], nCell+1); + pNode->isDirty = 1; + } + + return (nCell==nMaxCell); +} + +/* +** If the node is dirty, write it out to the database. +*/ +static int +nodeWrite(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode->isDirty ){ + sqlite3_stmt *p = pRtree->pWriteNode; + if( pNode->iNode ){ + sqlite3_bind_int64(p, 1, pNode->iNode); + }else{ + sqlite3_bind_null(p, 1); + } + sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC); + sqlite3_step(p); + pNode->isDirty = 0; + rc = sqlite3_reset(p); + if( pNode->iNode==0 && rc==SQLITE_OK ){ + pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); + nodeHashInsert(pRtree, pNode); + } + } + return rc; +} + +/* +** Release a reference to a node. If the node is dirty and the reference +** count drops to zero, the node data is written to the database. +*/ +static int +nodeRelease(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode ){ + assert( pNode->nRef>0 ); + pNode->nRef--; + if( pNode->nRef==0 ){ + if( pNode->iNode==1 ){ + pRtree->iDepth = -1; + } + if( pNode->pParent ){ + rc = nodeRelease(pRtree, pNode->pParent); + } + if( rc==SQLITE_OK ){ + rc = nodeWrite(pRtree, pNode); + } + nodeHashDelete(pRtree, pNode); + sqlite3_free(pNode); + } + } + return rc; +} + +/* +** Return the 64-bit integer value associated with cell iCell of +** node pNode. If pNode is a leaf node, this is a rowid. If it is +** an internal node, then the 64-bit integer is a child page number. +*/ +static i64 nodeGetRowid( + Rtree *pRtree, + RtreeNode *pNode, + int iCell +){ + assert( iCellzData[4 + pRtree->nBytesPerCell*iCell]); +} + +/* +** Return coordinate iCoord from cell iCell in node pNode. +*/ +static void nodeGetCoord( + Rtree *pRtree, + RtreeNode *pNode, + int iCell, + int iCoord, + RtreeCoord *pCoord /* Space to write result to */ +){ + readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); +} + +/* +** Deserialize cell iCell of node pNode. Populate the structure pointed +** to by pCell with the results. +*/ +static void nodeGetCell( + Rtree *pRtree, + RtreeNode *pNode, + int iCell, + RtreeCell *pCell +){ + int ii; + pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); + for(ii=0; iinDim*2; ii++){ + nodeGetCoord(pRtree, pNode, iCell, ii, &pCell->aCoord[ii]); + } +} + + +/* Forward declaration for the function that does the work of +** the virtual table module xCreate() and xConnect() methods. +*/ +static int rtreeInit( + sqlite3 *, void *, int, const char *const*, sqlite3_vtab **, char **, int +); + +/* +** Rtree virtual table module xCreate method. +*/ +static int rtreeCreate( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 1); +} + +/* +** Rtree virtual table module xConnect method. +*/ +static int rtreeConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 0); +} + +/* +** Increment the r-tree reference count. +*/ +static void rtreeReference(Rtree *pRtree){ + pRtree->nBusy++; +} + +/* +** Decrement the r-tree reference count. When the reference count reaches +** zero the structure is deleted. +*/ +static void rtreeRelease(Rtree *pRtree){ + pRtree->nBusy--; + if( pRtree->nBusy==0 ){ + sqlite3_finalize(pRtree->pReadNode); + sqlite3_finalize(pRtree->pWriteNode); + sqlite3_finalize(pRtree->pDeleteNode); + sqlite3_finalize(pRtree->pReadRowid); + sqlite3_finalize(pRtree->pWriteRowid); + sqlite3_finalize(pRtree->pDeleteRowid); + sqlite3_finalize(pRtree->pReadParent); + sqlite3_finalize(pRtree->pWriteParent); + sqlite3_finalize(pRtree->pDeleteParent); + sqlite3_free(pRtree); + } +} + +/* +** Rtree virtual table module xDisconnect method. +*/ +static int rtreeDisconnect(sqlite3_vtab *pVtab){ + rtreeRelease((Rtree *)pVtab); + return SQLITE_OK; +} + +/* +** Rtree virtual table module xDestroy method. +*/ +static int rtreeDestroy(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + int rc; + char *zCreate = sqlite3_mprintf( + "DROP TABLE '%q'.'%q_node';" + "DROP TABLE '%q'.'%q_rowid';" + "DROP TABLE '%q'.'%q_parent';", + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName + ); + if( !zCreate ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + } + if( rc==SQLITE_OK ){ + rtreeRelease(pRtree); + } + + return rc; +} + +/* +** Rtree virtual table module xOpen method. +*/ +static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + RtreeCursor *pCsr; + + pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); + if( pCsr ){ + memset(pCsr, 0, sizeof(RtreeCursor)); + pCsr->base.pVtab = pVTab; + rc = SQLITE_OK; + } + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + + return rc; +} + +/* +** Rtree virtual table module xClose method. +*/ +static int rtreeClose(sqlite3_vtab_cursor *cur){ + Rtree *pRtree = (Rtree *)(cur->pVtab); + int rc; + RtreeCursor *pCsr = (RtreeCursor *)cur; + sqlite3_free(pCsr->aConstraint); + rc = nodeRelease(pRtree, pCsr->pNode); + sqlite3_free(pCsr); + return rc; +} + +/* +** Rtree virtual table module xEof method. +** +** Return non-zero if the cursor does not currently point to a valid +** record (i.e if the scan has finished), or zero otherwise. +*/ +static int rtreeEof(sqlite3_vtab_cursor *cur){ + RtreeCursor *pCsr = (RtreeCursor *)cur; + return (pCsr->pNode==0); +} + +/* +** Cursor pCursor currently points to a cell in a non-leaf page. +** Return true if the sub-tree headed by the cell is filtered +** (excluded) by the constraints in the pCursor->aConstraint[] +** array, or false otherwise. +*/ +static int testRtreeCell(Rtree *pRtree, RtreeCursor *pCursor){ + RtreeCell cell; + int ii; + int bRes = 0; + + nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell); + for(ii=0; bRes==0 && iinConstraint; ii++){ + RtreeConstraint *p = &pCursor->aConstraint[ii]; + double cell_min = DCOORD(cell.aCoord[(p->iCoord>>1)*2]); + double cell_max = DCOORD(cell.aCoord[(p->iCoord>>1)*2+1]); + + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ + ); + + switch( p->op ){ + case RTREE_LE: case RTREE_LT: bRes = p->rValuerValue>cell_max; break; + case RTREE_EQ: + bRes = (p->rValue>cell_max || p->rValueaConstraint[] array, or false otherwise. +** +** This function assumes that the cell is part of a leaf node. +*/ +static int testRtreeEntry(Rtree *pRtree, RtreeCursor *pCursor){ + RtreeCell cell; + int ii; + + nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell); + for(ii=0; iinConstraint; ii++){ + RtreeConstraint *p = &pCursor->aConstraint[ii]; + double coord = DCOORD(cell.aCoord[p->iCoord]); + int res; + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ + ); + switch( p->op ){ + case RTREE_LE: res = (coord<=p->rValue); break; + case RTREE_LT: res = (coordrValue); break; + case RTREE_GE: res = (coord>=p->rValue); break; + case RTREE_GT: res = (coord>p->rValue); break; + case RTREE_EQ: res = (coord==p->rValue); break; + } + + if( !res ) return 1; + } + + return 0; +} + +/* +** Cursor pCursor currently points at a node that heads a sub-tree of +** height iHeight (if iHeight==0, then the node is a leaf). Descend +** to point to the left-most cell of the sub-tree that matches the +** configured constraints. +*/ +static int descendToCell( + Rtree *pRtree, + RtreeCursor *pCursor, + int iHeight, + int *pEof /* OUT: Set to true if cannot descend */ +){ + int isEof; + int rc; + int ii; + RtreeNode *pChild; + sqlite3_int64 iRowid; + + RtreeNode *pSavedNode = pCursor->pNode; + int iSavedCell = pCursor->iCell; + + assert( iHeight>=0 ); + + if( iHeight==0 ){ + isEof = testRtreeEntry(pRtree, pCursor); + }else{ + isEof = testRtreeCell(pRtree, pCursor); + } + if( isEof || iHeight==0 ){ + *pEof = isEof; + return SQLITE_OK; + } + + iRowid = nodeGetRowid(pRtree, pCursor->pNode, pCursor->iCell); + rc = nodeAcquire(pRtree, iRowid, pCursor->pNode, &pChild); + if( rc!=SQLITE_OK ){ + return rc; + } + + nodeRelease(pRtree, pCursor->pNode); + pCursor->pNode = pChild; + isEof = 1; + for(ii=0; isEof && iiiCell = ii; + rc = descendToCell(pRtree, pCursor, iHeight-1, &isEof); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + if( isEof ){ + assert( pCursor->pNode==pChild ); + nodeReference(pSavedNode); + nodeRelease(pRtree, pChild); + pCursor->pNode = pSavedNode; + pCursor->iCell = iSavedCell; + } + + *pEof = isEof; + return SQLITE_OK; +} + +/* +** One of the cells in node pNode is guaranteed to have a 64-bit +** integer value equal to iRowid. Return the index of this cell. +*/ +static int nodeRowidIndex(Rtree *pRtree, RtreeNode *pNode, i64 iRowid){ + int ii; + for(ii=0; nodeGetRowid(pRtree, pNode, ii)!=iRowid; ii++){ + assert( ii<(NCELL(pNode)-1) ); + } + return ii; +} + +/* +** Return the index of the cell containing a pointer to node pNode +** in its parent. If pNode is the root node, return -1. +*/ +static int nodeParentIndex(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode *pParent = pNode->pParent; + if( pParent ){ + return nodeRowidIndex(pRtree, pParent, pNode->iNode); + } + return -1; +} + +/* +** Rtree virtual table module xNext method. +*/ +static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){ + Rtree *pRtree = (Rtree *)(pVtabCursor->pVtab); + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + int rc = SQLITE_OK; + + if( pCsr->iStrategy==1 ){ + /* This "scan" is a direct lookup by rowid. There is no next entry. */ + nodeRelease(pRtree, pCsr->pNode); + pCsr->pNode = 0; + } + + else if( pCsr->pNode ){ + /* Move to the next entry that matches the configured constraints. */ + int iHeight = 0; + while( pCsr->pNode ){ + RtreeNode *pNode = pCsr->pNode; + int nCell = NCELL(pNode); + for(pCsr->iCell++; pCsr->iCelliCell++){ + int isEof; + rc = descendToCell(pRtree, pCsr, iHeight, &isEof); + if( rc!=SQLITE_OK || !isEof ){ + return rc; + } + } + pCsr->pNode = pNode->pParent; + pCsr->iCell = nodeParentIndex(pRtree, pNode); + nodeReference(pCsr->pNode); + nodeRelease(pRtree, pNode); + iHeight++; + } + } + + return rc; +} + +/* +** Rtree virtual table module xRowid method. +*/ +static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ + Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + + assert(pCsr->pNode); + *pRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell); + + return SQLITE_OK; +} + +/* +** Rtree virtual table module xColumn method. +*/ +static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + Rtree *pRtree = (Rtree *)cur->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)cur; + + if( i==0 ){ + i64 iRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell); + sqlite3_result_int64(ctx, iRowid); + }else{ + RtreeCoord c; + nodeGetCoord(pRtree, pCsr->pNode, pCsr->iCell, i-1, &c); + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + sqlite3_result_double(ctx, c.f); + }else{ + assert( pRtree->eCoordType==RTREE_COORD_INT32 ); + sqlite3_result_int(ctx, c.i); + } + } + + return SQLITE_OK; +} + +/* +** Use nodeAcquire() to obtain the leaf node containing the record with +** rowid iRowid. If successful, set *ppLeaf to point to the node and +** return SQLITE_OK. If there is no such record in the table, set +** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf +** to zero and return an SQLite error code. +*/ +static int findLeafNode(Rtree *pRtree, i64 iRowid, RtreeNode **ppLeaf){ + int rc; + *ppLeaf = 0; + sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid); + if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){ + i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0); + rc = nodeAcquire(pRtree, iNode, 0, ppLeaf); + sqlite3_reset(pRtree->pReadRowid); + }else{ + rc = sqlite3_reset(pRtree->pReadRowid); + } + return rc; +} + + +/* +** Rtree virtual table module xFilter method. +*/ +static int rtreeFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + + RtreeNode *pRoot = 0; + int ii; + int rc = SQLITE_OK; + + rtreeReference(pRtree); + + sqlite3_free(pCsr->aConstraint); + pCsr->aConstraint = 0; + pCsr->iStrategy = idxNum; + + if( idxNum==1 ){ + /* Special case - lookup by rowid. */ + RtreeNode *pLeaf; /* Leaf on which the required cell resides */ + i64 iRowid = sqlite3_value_int64(argv[0]); + rc = findLeafNode(pRtree, iRowid, &pLeaf); + pCsr->pNode = pLeaf; + if( pLeaf && rc==SQLITE_OK ){ + pCsr->iCell = nodeRowidIndex(pRtree, pLeaf, iRowid); + } + }else{ + /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array + ** with the configured constraints. + */ + if( argc>0 ){ + pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc); + pCsr->nConstraint = argc; + if( !pCsr->aConstraint ){ + rc = SQLITE_NOMEM; + }else{ + assert( (idxStr==0 && argc==0) || strlen(idxStr)==argc*2 ); + for(ii=0; iiaConstraint[ii]; + p->op = idxStr[ii*2]; + p->iCoord = idxStr[ii*2+1]-'a'; + p->rValue = sqlite3_value_double(argv[ii]); + } + } + } + + if( rc==SQLITE_OK ){ + pCsr->pNode = 0; + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + } + if( rc==SQLITE_OK ){ + int isEof = 1; + int nCell = NCELL(pRoot); + pCsr->pNode = pRoot; + for(pCsr->iCell=0; rc==SQLITE_OK && pCsr->iCelliCell++){ + assert( pCsr->pNode==pRoot ); + rc = descendToCell(pRtree, pCsr, pRtree->iDepth, &isEof); + if( !isEof ){ + break; + } + } + if( rc==SQLITE_OK && isEof ){ + assert( pCsr->pNode==pRoot ); + nodeRelease(pRtree, pRoot); + pCsr->pNode = 0; + } + assert( rc!=SQLITE_OK || !pCsr->pNode || pCsr->iCellpNode) ); + } + } + + rtreeRelease(pRtree); + return rc; +} + +/* +** Rtree virtual table module xBestIndex method. There are three +** table scan strategies to choose from (in order from most to +** least desirable): +** +** idxNum idxStr Strategy +** ------------------------------------------------ +** 1 Unused Direct lookup by rowid. +** 2 See below R-tree query. +** 3 Unused Full table scan. +** ------------------------------------------------ +** +** If strategy 1 or 3 is used, then idxStr is not meaningful. If strategy +** 2 is used, idxStr is formatted to contain 2 bytes for each +** constraint used. The first two bytes of idxStr correspond to +** the constraint in sqlite3_index_info.aConstraintUsage[] with +** (argvIndex==1) etc. +** +** The first of each pair of bytes in idxStr identifies the constraint +** operator as follows: +** +** Operator Byte Value +** ---------------------- +** = 0x41 ('A') +** <= 0x42 ('B') +** < 0x43 ('C') +** >= 0x44 ('D') +** > 0x45 ('E') +** ---------------------- +** +** The second of each pair of bytes identifies the coordinate column +** to which the constraint applies. The leftmost coordinate column +** is 'a', the second from the left 'b' etc. +*/ +static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + int rc = SQLITE_OK; + int ii, cCol; + + int iIdx = 0; + char zIdxStr[RTREE_MAX_DIMENSIONS*8+1]; + memset(zIdxStr, 0, sizeof(zIdxStr)); + + assert( pIdxInfo->idxStr==0 ); + for(ii=0; iinConstraint; ii++){ + struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; + + if( p->usable && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + /* We have an equality constraint on the rowid. Use strategy 1. */ + int jj; + for(jj=0; jjaConstraintUsage[jj].argvIndex = 0; + pIdxInfo->aConstraintUsage[jj].omit = 0; + } + pIdxInfo->idxNum = 1; + pIdxInfo->aConstraintUsage[ii].argvIndex = 1; + pIdxInfo->aConstraintUsage[jj].omit = 1; + + /* This strategy involves a two rowid lookups on an B-Tree structures + ** and then a linear search of an R-Tree node. This should be + ** considered almost as quick as a direct rowid lookup (for which + ** sqlite uses an internal cost of 0.0). + */ + pIdxInfo->estimatedCost = 10.0; + return SQLITE_OK; + } + + if( p->usable && p->iColumn>0 ){ + u8 op = 0; + switch( p->op ){ + case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; + case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; + case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; + case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; + case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; + } + if( op ){ + /* Make sure this particular constraint has not been used before. + ** If it has been used before, ignore it. + ** + ** A <= or < can be used if there is a prior >= or >. + ** A >= or > can be used if there is a prior < or <=. + ** A <= or < is disqualified if there is a prior <=, <, or ==. + ** A >= or > is disqualified if there is a prior >=, >, or ==. + ** A == is disqualifed if there is any prior constraint. + */ + int j, opmsk; + static const unsigned char compatible[] = { 0, 0, 1, 1, 2, 2 }; + assert( compatible[RTREE_EQ & 7]==0 ); + assert( compatible[RTREE_LT & 7]==1 ); + assert( compatible[RTREE_LE & 7]==1 ); + assert( compatible[RTREE_GT & 7]==2 ); + assert( compatible[RTREE_GE & 7]==2 ); + cCol = p->iColumn - 1 + 'a'; + opmsk = compatible[op & 7]; + for(j=0; jaConstraintUsage[ii].argvIndex = (iIdx/2); + pIdxInfo->aConstraintUsage[ii].omit = 1; + } + } + } + + pIdxInfo->idxNum = 2; + pIdxInfo->needToFreeIdxStr = 1; + if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ + return SQLITE_NOMEM; + } + assert( iIdx>=0 ); + pIdxInfo->estimatedCost = (2000000.0 / (double)(iIdx + 1)); + return rc; +} + +/* +** Return the N-dimensional volumn of the cell stored in *p. +*/ +static float cellArea(Rtree *pRtree, RtreeCell *p){ + float area = 1.0; + int ii; + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + area = area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); + } + return area; +} + +/* +** Return the margin length of cell p. The margin length is the sum +** of the objects size in each dimension. +*/ +static float cellMargin(Rtree *pRtree, RtreeCell *p){ + float margin = 0.0; + int ii; + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); + } + return margin; +} + +/* +** Store the union of cells p1 and p2 in p1. +*/ +static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ + int ii; + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); + p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); + } + }else{ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); + p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); + } + } +} + +/* +** Return true if the area covered by p2 is a subset of the area covered +** by p1. False otherwise. +*/ +static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ + int ii; + int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( (!isInt && (a2[0].fa1[1].f)) + || ( isInt && (a2[0].ia1[1].i)) + ){ + return 0; + } + } + return 1; +} + +/* +** Return the amount cell p would grow by if it were unioned with pCell. +*/ +static float cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ + float area; + RtreeCell cell; + memcpy(&cell, p, sizeof(RtreeCell)); + area = cellArea(pRtree, &cell); + cellUnion(pRtree, &cell, pCell); + return (cellArea(pRtree, &cell)-area); +} + +#if VARIANT_RSTARTREE_CHOOSESUBTREE || VARIANT_RSTARTREE_SPLIT +static float cellOverlap( + Rtree *pRtree, + RtreeCell *p, + RtreeCell *aCell, + int nCell, + int iExclude +){ + int ii; + float overlap = 0.0; + for(ii=0; iinDim*2); jj+=2){ + double x1; + double x2; + + x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); + x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); + + if( x2iDepth-iHeight); ii++){ + int iCell; + sqlite3_int64 iBest; + + float fMinGrowth; + float fMinArea; + float fMinOverlap; + + int nCell = NCELL(pNode); + RtreeCell cell; + RtreeNode *pChild; + + RtreeCell *aCell = 0; + +#if VARIANT_RSTARTREE_CHOOSESUBTREE + if( ii==(pRtree->iDepth-1) ){ + int jj; + aCell = sqlite3_malloc(sizeof(RtreeCell)*nCell); + if( !aCell ){ + rc = SQLITE_NOMEM; + nodeRelease(pRtree, pNode); + pNode = 0; + continue; + } + for(jj=0; jjiDepth-1) ){ + overlap = cellOverlapEnlargement(pRtree,&cell,pCell,aCell,nCell,iCell); + } +#endif + if( (iCell==0) + || (overlappParent ){ + RtreeCell cell; + RtreeNode *pParent = p->pParent; + int iCell = nodeParentIndex(pRtree, p); + + nodeGetCell(pRtree, pParent, iCell, &cell); + if( !cellContains(pRtree, &cell, pCell) ){ + cellUnion(pRtree, &cell, pCell); + nodeOverwriteCell(pRtree, pParent, &cell, iCell); + } + + p = pParent; + } +} + +/* +** Write mapping (iRowid->iNode) to the _rowid table. +*/ +static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){ + sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid); + sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode); + sqlite3_step(pRtree->pWriteRowid); + return sqlite3_reset(pRtree->pWriteRowid); +} + +/* +** Write mapping (iNode->iPar) to the _parent table. +*/ +static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){ + sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode); + sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar); + sqlite3_step(pRtree->pWriteParent); + return sqlite3_reset(pRtree->pWriteParent); +} + +static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); + +#if VARIANT_GUTTMAN_LINEAR_SPLIT +/* +** Implementation of the linear variant of the PickNext() function from +** Guttman[84]. +*/ +static RtreeCell *LinearPickNext( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeCell *pLeftBox, + RtreeCell *pRightBox, + int *aiUsed +){ + int ii; + for(ii=0; aiUsed[ii]; ii++); + aiUsed[ii] = 1; + return &aCell[ii]; +} + +/* +** Implementation of the linear variant of the PickSeeds() function from +** Guttman[84]. +*/ +static void LinearPickSeeds( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + int *piLeftSeed, + int *piRightSeed +){ + int i; + int iLeftSeed = 0; + int iRightSeed = 1; + float maxNormalInnerWidth = 0.0; + + /* Pick two "seed" cells from the array of cells. The algorithm used + ** here is the LinearPickSeeds algorithm from Gutman[1984]. The + ** indices of the two seed cells in the array are stored in local + ** variables iLeftSeek and iRightSeed. + */ + for(i=0; inDim; i++){ + float x1 = aCell[0].aCoord[i*2]; + float x2 = aCell[0].aCoord[i*2+1]; + float x3 = x1; + float x4 = x2; + int jj; + + int iCellLeft = 0; + int iCellRight = 0; + + for(jj=1; jjx4 ) x4 = right; + if( left>x3 ){ + x3 = left; + iCellRight = jj; + } + if( rightmaxNormalInnerWidth ){ + iLeftSeed = iCellLeft; + iRightSeed = iCellRight; + } + } + } + + *piLeftSeed = iLeftSeed; + *piRightSeed = iRightSeed; +} +#endif /* VARIANT_GUTTMAN_LINEAR_SPLIT */ + +#if VARIANT_GUTTMAN_QUADRATIC_SPLIT +/* +** Implementation of the quadratic variant of the PickNext() function from +** Guttman[84]. +*/ +static RtreeCell *QuadraticPickNext( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeCell *pLeftBox, + RtreeCell *pRightBox, + int *aiUsed +){ + #define FABS(a) ((a)<0.0?-1.0*(a):(a)) + + int iSelect = -1; + float fDiff; + int ii; + for(ii=0; iifDiff ){ + fDiff = diff; + iSelect = ii; + } + } + } + aiUsed[iSelect] = 1; + return &aCell[iSelect]; +} + +/* +** Implementation of the quadratic variant of the PickSeeds() function from +** Guttman[84]. +*/ +static void QuadraticPickSeeds( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + int *piLeftSeed, + int *piRightSeed +){ + int ii; + int jj; + + int iLeftSeed = 0; + int iRightSeed = 1; + float fWaste = 0.0; + + for(ii=0; iifWaste ){ + iLeftSeed = ii; + iRightSeed = jj; + fWaste = waste; + } + } + } + + *piLeftSeed = iLeftSeed; + *piRightSeed = iRightSeed; +} +#endif /* VARIANT_GUTTMAN_QUADRATIC_SPLIT */ + +/* +** Arguments aIdx, aDistance and aSpare all point to arrays of size +** nIdx. The aIdx array contains the set of integers from 0 to +** (nIdx-1) in no particular order. This function sorts the values +** in aIdx according to the indexed values in aDistance. For +** example, assuming the inputs: +** +** aIdx = { 0, 1, 2, 3 } +** aDistance = { 5.0, 2.0, 7.0, 6.0 } +** +** this function sets the aIdx array to contain: +** +** aIdx = { 0, 1, 2, 3 } +** +** The aSpare array is used as temporary working space by the +** sorting algorithm. +*/ +static void SortByDistance( + int *aIdx, + int nIdx, + float *aDistance, + int *aSpare +){ + if( nIdx>1 ){ + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDistance(aLeft, nLeft, aDistance, aSpare); + SortByDistance(aRight, nRight, aDistance, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + + while( iLeft1 ){ + + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare); + SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + while( iLeftnDim+1)*(sizeof(int*)+nCell*sizeof(int)); + + aaSorted = (int **)sqlite3_malloc(nByte); + if( !aaSorted ){ + return SQLITE_NOMEM; + } + + aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell]; + memset(aaSorted, 0, nByte); + for(ii=0; iinDim; ii++){ + int jj; + aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell]; + for(jj=0; jjnDim; ii++){ + float margin = 0.0; + float fBestOverlap; + float fBestArea; + int iBestLeft; + int nLeft; + + for( + nLeft=RTREE_MINCELLS(pRtree); + nLeft<=(nCell-RTREE_MINCELLS(pRtree)); + nLeft++ + ){ + RtreeCell left; + RtreeCell right; + int kk; + float overlap; + float area; + + memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell)); + memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell)); + for(kk=1; kk<(nCell-1); kk++){ + if( kk0; i--){ + RtreeCell *pNext; + pNext = PickNext(pRtree, aCell, nCell, pBboxLeft, pBboxRight, aiUsed); + float diff = + cellGrowth(pRtree, pBboxLeft, pNext) - + cellGrowth(pRtree, pBboxRight, pNext) + ; + if( (RTREE_MINCELLS(pRtree)-NCELL(pRight)==i) + || (diff>0.0 && (RTREE_MINCELLS(pRtree)-NCELL(pLeft)!=i)) + ){ + nodeInsertCell(pRtree, pRight, pNext); + cellUnion(pRtree, pBboxRight, pNext); + }else{ + nodeInsertCell(pRtree, pLeft, pNext); + cellUnion(pRtree, pBboxLeft, pNext); + } + } + + sqlite3_free(aiUsed); + return SQLITE_OK; +} +#endif + +static int updateMapping( + Rtree *pRtree, + i64 iRowid, + RtreeNode *pNode, + int iHeight +){ + int (*xSetMapping)(Rtree *, sqlite3_int64, sqlite3_int64); + xSetMapping = ((iHeight==0)?rowidWrite:parentWrite); + if( iHeight>0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + return xSetMapping(pRtree, iRowid, pNode->iNode); +} + +static int SplitNode( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int i; + int newCellIsRight = 0; + + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + RtreeCell *aCell; + int *aiUsed; + + RtreeNode *pLeft = 0; + RtreeNode *pRight = 0; + + RtreeCell leftbbox; + RtreeCell rightbbox; + + /* Allocate an array and populate it with a copy of pCell and + ** all cells from node pLeft. Then zero the original node. + */ + aCell = sqlite3_malloc((sizeof(RtreeCell)+sizeof(int))*(nCell+1)); + if( !aCell ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + aiUsed = (int *)&aCell[nCell+1]; + memset(aiUsed, 0, sizeof(int)*(nCell+1)); + for(i=0; iiNode==1 ){ + pRight = nodeNew(pRtree, pNode, 1); + pLeft = nodeNew(pRtree, pNode, 1); + pRtree->iDepth++; + pNode->isDirty = 1; + writeInt16(pNode->zData, pRtree->iDepth); + }else{ + pLeft = pNode; + pRight = nodeNew(pRtree, pLeft->pParent, 1); + nodeReference(pLeft); + } + + if( !pLeft || !pRight ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + + memset(pLeft->zData, 0, pRtree->iNodeSize); + memset(pRight->zData, 0, pRtree->iNodeSize); + + rc = AssignCells(pRtree, aCell, nCell, pLeft, pRight, &leftbbox, &rightbbox); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + + /* Ensure both child nodes have node numbers assigned to them. */ + if( (0==pRight->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pRight))) + || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft))) + ){ + goto splitnode_out; + } + + rightbbox.iRowid = pRight->iNode; + leftbbox.iRowid = pLeft->iNode; + + if( pNode->iNode==1 ){ + rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + }else{ + RtreeNode *pParent = pLeft->pParent; + int iCell = nodeParentIndex(pRtree, pLeft); + nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); + AdjustTree(pRtree, pParent, &leftbbox); + } + if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){ + goto splitnode_out; + } + + for(i=0; iiRowid ){ + newCellIsRight = 1; + } + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + } + if( pNode->iNode==1 ){ + for(i=0; iiRowid, pLeft, iHeight); + } + + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRight); + pRight = 0; + } + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pLeft); + pLeft = 0; + } + +splitnode_out: + nodeRelease(pRtree, pRight); + nodeRelease(pRtree, pLeft); + sqlite3_free(aCell); + return rc; +} + +static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ + int rc = SQLITE_OK; + if( pLeaf->iNode!=1 && pLeaf->pParent==0 ){ + sqlite3_bind_int64(pRtree->pReadParent, 1, pLeaf->iNode); + if( sqlite3_step(pRtree->pReadParent)==SQLITE_ROW ){ + i64 iNode = sqlite3_column_int64(pRtree->pReadParent, 0); + rc = nodeAcquire(pRtree, iNode, 0, &pLeaf->pParent); + }else{ + rc = SQLITE_ERROR; + } + sqlite3_reset(pRtree->pReadParent); + if( rc==SQLITE_OK ){ + rc = fixLeafParent(pRtree, pLeaf->pParent); + } + } + return rc; +} + +static int deleteCell(Rtree *, RtreeNode *, int, int); + +static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ + int rc; + RtreeNode *pParent; + int iCell; + + assert( pNode->nRef==1 ); + + /* Remove the entry in the parent cell. */ + iCell = nodeParentIndex(pRtree, pNode); + pParent = pNode->pParent; + pNode->pParent = 0; + if( SQLITE_OK!=(rc = deleteCell(pRtree, pParent, iCell, iHeight+1)) + || SQLITE_OK!=(rc = nodeRelease(pRtree, pParent)) + ){ + return rc; + } + + /* Remove the xxx_node entry. */ + sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteNode); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){ + return rc; + } + + /* Remove the xxx_parent entry. */ + sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteParent); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){ + return rc; + } + + /* Remove the node from the in-memory hash table and link it into + ** the Rtree.pDeleted list. Its contents will be re-inserted later on. + */ + nodeHashDelete(pRtree, pNode); + pNode->iNode = iHeight; + pNode->pNext = pRtree->pDeleted; + pNode->nRef++; + pRtree->pDeleted = pNode; + + return SQLITE_OK; +} + +static void fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode *pParent = pNode->pParent; + if( pParent ){ + int ii; + int nCell = NCELL(pNode); + RtreeCell box; /* Bounding box for pNode */ + nodeGetCell(pRtree, pNode, 0, &box); + for(ii=1; iiiNode; + ii = nodeParentIndex(pRtree, pNode); + nodeOverwriteCell(pRtree, pParent, &box, ii); + fixBoundingBox(pRtree, pParent); + } +} + +/* +** Delete the cell at index iCell of node pNode. After removing the +** cell, adjust the r-tree data structure if required. +*/ +static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){ + int rc; + + if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){ + return rc; + } + + /* Remove the cell from the node. This call just moves bytes around + ** the in-memory node image, so it cannot fail. + */ + nodeDeleteCell(pRtree, pNode, iCell); + + /* If the node is not the tree root and now has less than the minimum + ** number of cells, remove it from the tree. Otherwise, update the + ** cell in the parent node so that it tightly contains the updated + ** node. + */ + if( pNode->iNode!=1 ){ + RtreeNode *pParent = pNode->pParent; + if( (pParent->iNode!=1 || NCELL(pParent)!=1) + && (NCELL(pNode)nDim; iDim++){ + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); + } + } + for(iDim=0; iDimnDim; iDim++){ + aCenterCoord[iDim] = aCenterCoord[iDim]/((float)nCell*2.0); + } + + for(ii=0; iinDim; iDim++){ + float coord = DCOORD(aCell[ii].aCoord[iDim*2+1]) - + DCOORD(aCell[ii].aCoord[iDim*2]); + aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); + } + } + + SortByDistance(aOrder, nCell, aDistance, aSpare); + nodeZero(pRtree, pNode); + + for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ + RtreeCell *p = &aCell[aOrder[ii]]; + nodeInsertCell(pRtree, pNode, p); + if( p->iRowid==pCell->iRowid ){ + if( iHeight==0 ){ + rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, p->iRowid, pNode->iNode); + } + } + } + if( rc==SQLITE_OK ){ + fixBoundingBox(pRtree, pNode); + } + for(; rc==SQLITE_OK && iiiNode currently contains + ** the height of the sub-tree headed by the cell. + */ + RtreeNode *pInsert; + RtreeCell *p = &aCell[aOrder[ii]]; + rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + + sqlite3_free(aCell); + return rc; +} + +/* +** Insert cell pCell into node pNode. Node pNode is the head of a +** subtree iHeight high (leaf nodes have iHeight==0). +*/ +static int rtreeInsertCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int rc = SQLITE_OK; + if( iHeight>0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + if( nodeInsertCell(pRtree, pNode, pCell) ){ +#if VARIANT_RSTARTREE_REINSERT + if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ + rc = SplitNode(pRtree, pNode, pCell, iHeight); + }else{ + pRtree->iReinsertHeight = iHeight; + rc = Reinsert(pRtree, pNode, pCell, iHeight); + } +#else + rc = SplitNode(pRtree, pNode, pCell, iHeight); +#endif + }else{ + AdjustTree(pRtree, pNode, pCell); + if( iHeight==0 ){ + rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode); + } + } + return rc; +} + +static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){ + int ii; + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + + for(ii=0; rc==SQLITE_OK && iiiNode currently contains + ** the height of the sub-tree headed by the cell. + */ + rc = ChooseLeaf(pRtree, &cell, pNode->iNode, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, &cell, pNode->iNode); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + return rc; +} + +/* +** Select a currently unused rowid for a new r-tree record. +*/ +static int newRowid(Rtree *pRtree, i64 *piRowid){ + int rc; + sqlite3_bind_null(pRtree->pWriteRowid, 1); + sqlite3_bind_null(pRtree->pWriteRowid, 2); + sqlite3_step(pRtree->pWriteRowid); + rc = sqlite3_reset(pRtree->pWriteRowid); + *piRowid = sqlite3_last_insert_rowid(pRtree->db); + return rc; +} + +#ifndef NDEBUG +static int hashIsEmpty(Rtree *pRtree){ + int ii; + for(ii=0; iiaHash[ii] ); + } + return 1; +} +#endif + +/* +** The xUpdate method for rtree module virtual tables. +*/ +int rtreeUpdate( + sqlite3_vtab *pVtab, + int nData, + sqlite3_value **azData, + sqlite_int64 *pRowid +){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_OK; + + rtreeReference(pRtree); + + assert(nData>=1); + assert(hashIsEmpty(pRtree)); + + /* If azData[0] is not an SQL NULL value, it is the rowid of a + ** record to delete from the r-tree table. The following block does + ** just that. + */ + if( sqlite3_value_type(azData[0])!=SQLITE_NULL ){ + i64 iDelete; /* The rowid to delete */ + RtreeNode *pLeaf; /* Leaf node containing record iDelete */ + int iCell; /* Index of iDelete cell in pLeaf */ + RtreeNode *pRoot; + + /* Obtain a reference to the root node to initialise Rtree.iDepth */ + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + + /* Obtain a reference to the leaf node that contains the entry + ** about to be deleted. + */ + if( rc==SQLITE_OK ){ + iDelete = sqlite3_value_int64(azData[0]); + rc = findLeafNode(pRtree, iDelete, &pLeaf); + } + + /* Delete the cell in question from the leaf node. */ + if( rc==SQLITE_OK ){ + int rc2; + iCell = nodeRowidIndex(pRtree, pLeaf, iDelete); + rc = deleteCell(pRtree, pLeaf, iCell, 0); + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + + /* Delete the corresponding entry in the _rowid table. */ + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete); + sqlite3_step(pRtree->pDeleteRowid); + rc = sqlite3_reset(pRtree->pDeleteRowid); + } + + /* Check if the root node now has exactly one child. If so, remove + ** it, schedule the contents of the child for reinsertion and + ** reduce the tree height by one. + ** + ** This is equivalent to copying the contents of the child into + ** the root node (the operation that Gutman's paper says to perform + ** in this scenario). + */ + if( rc==SQLITE_OK && pRtree->iDepth>0 ){ + if( rc==SQLITE_OK && NCELL(pRoot)==1 ){ + RtreeNode *pChild; + i64 iChild = nodeGetRowid(pRtree, pRoot, 0); + rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); + if( rc==SQLITE_OK ){ + rc = removeNode(pRtree, pChild, pRtree->iDepth-1); + } + if( rc==SQLITE_OK ){ + pRtree->iDepth--; + writeInt16(pRoot->zData, pRtree->iDepth); + pRoot->isDirty = 1; + } + } + } + + /* Re-insert the contents of any underfull nodes removed from the tree. */ + for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){ + if( rc==SQLITE_OK ){ + rc = reinsertNodeContent(pRtree, pLeaf); + } + pRtree->pDeleted = pLeaf->pNext; + sqlite3_free(pLeaf); + } + + /* Release the reference to the root node. */ + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRoot); + }else{ + nodeRelease(pRtree, pRoot); + } + } + + /* If the azData[] array contains more than one element, elements + ** (azData[2]..azData[argc-1]) contain a new record to insert into + ** the r-tree structure. + */ + if( rc==SQLITE_OK && nData>1 ){ + /* Insert a new record into the r-tree */ + RtreeCell cell; + int ii; + RtreeNode *pLeaf; + + /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. */ + assert( nData==(pRtree->nDim*2 + 3) ); + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + cell.aCoord[ii].f = (float)sqlite3_value_double(azData[ii+3]); + cell.aCoord[ii+1].f = (float)sqlite3_value_double(azData[ii+4]); + if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){ + rc = SQLITE_CONSTRAINT; + goto constraint; + } + } + }else{ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + cell.aCoord[ii].i = sqlite3_value_int(azData[ii+3]); + cell.aCoord[ii+1].i = sqlite3_value_int(azData[ii+4]); + if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){ + rc = SQLITE_CONSTRAINT; + goto constraint; + } + } + } + + /* Figure out the rowid of the new row. */ + if( sqlite3_value_type(azData[2])==SQLITE_NULL ){ + rc = newRowid(pRtree, &cell.iRowid); + }else{ + cell.iRowid = sqlite3_value_int64(azData[2]); + sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid); + if( SQLITE_ROW==sqlite3_step(pRtree->pReadRowid) ){ + sqlite3_reset(pRtree->pReadRowid); + rc = SQLITE_CONSTRAINT; + goto constraint; + } + rc = sqlite3_reset(pRtree->pReadRowid); + } + + if( rc==SQLITE_OK ){ + rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); + } + if( rc==SQLITE_OK ){ + int rc2; + pRtree->iReinsertHeight = -1; + rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + +constraint: + rtreeRelease(pRtree); + return rc; +} + +/* +** The xRename method for rtree module virtual tables. +*/ +static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";" + "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";" + "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";" + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + ); + if( zSql ){ + rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static sqlite3_module rtreeModule = { + 0, /* iVersion */ + rtreeCreate, /* xCreate - create a table */ + rtreeConnect, /* xConnect - connect to an existing table */ + rtreeBestIndex, /* xBestIndex - Determine search strategy */ + rtreeDisconnect, /* xDisconnect - Disconnect from a table */ + rtreeDestroy, /* xDestroy - Drop a table */ + rtreeOpen, /* xOpen - open a cursor */ + rtreeClose, /* xClose - close a cursor */ + rtreeFilter, /* xFilter - configure scan constraints */ + rtreeNext, /* xNext - advance a cursor */ + rtreeEof, /* xEof */ + rtreeColumn, /* xColumn - read data */ + rtreeRowid, /* xRowid - read data */ + rtreeUpdate, /* xUpdate - write data */ + 0, /* xBegin - begin transaction */ + 0, /* xSync - sync transaction */ + 0, /* xCommit - commit transaction */ + 0, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + rtreeRename /* xRename - rename the table */ +}; + +static int rtreeSqlInit( + Rtree *pRtree, + sqlite3 *db, + const char *zDb, + const char *zPrefix, + int isCreate +){ + int rc = SQLITE_OK; + + #define N_STATEMENT 9 + static const char *azSql[N_STATEMENT] = { + /* Read and write the xxx_node table */ + "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1", + "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", + + /* Read and write the xxx_rowid table */ + "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = :1", + "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_rowid' WHERE rowid = :1", + + /* Read and write the xxx_parent table */ + "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = :1", + "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_parent' WHERE nodeno = :1" + }; + sqlite3_stmt **appStmt[N_STATEMENT]; + int i; + + pRtree->db = db; + + if( isCreate ){ + char *zCreate = sqlite3_mprintf( +"CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);" +"CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);" +"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY, parentnode INTEGER);" +"INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))", + zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize + ); + if( !zCreate ){ + return SQLITE_NOMEM; + } + rc = sqlite3_exec(db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + appStmt[0] = &pRtree->pReadNode; + appStmt[1] = &pRtree->pWriteNode; + appStmt[2] = &pRtree->pDeleteNode; + appStmt[3] = &pRtree->pReadRowid; + appStmt[4] = &pRtree->pWriteRowid; + appStmt[5] = &pRtree->pDeleteRowid; + appStmt[6] = &pRtree->pReadParent; + appStmt[7] = &pRtree->pWriteParent; + appStmt[8] = &pRtree->pDeleteParent; + + for(i=0; i module name +** argv[1] -> database name +** argv[2] -> table name +** argv[...] -> column names... +*/ +static int rtreeInit( + sqlite3 *db, /* Database connection */ + void *pAux, /* One of the RTREE_COORD_* constants */ + int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */ + sqlite3_vtab **ppVtab, /* OUT: New virtual table */ + char **pzErr, /* OUT: Error message, if any */ + int isCreate /* True for xCreate, false for xConnect */ +){ + int rc = SQLITE_OK; + int iPageSize = 0; + Rtree *pRtree; + int nDb; /* Length of string argv[1] */ + int nName; /* Length of string argv[2] */ + int eCoordType = (int)pAux; + + const char *aErrMsg[] = { + 0, /* 0 */ + "Wrong number of columns for an rtree table", /* 1 */ + "Too few columns for an rtree table", /* 2 */ + "Too many columns for an rtree table" /* 3 */ + }; + + int iErr = (argc<6) ? 2 : argc>(RTREE_MAX_DIMENSIONS*2+4) ? 3 : argc%2; + if( aErrMsg[iErr] ){ + *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); + return SQLITE_ERROR; + } + + rc = getPageSize(db, argv[1], &iPageSize); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Allocate the sqlite3_vtab structure */ + nDb = strlen(argv[1]); + nName = strlen(argv[2]); + pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); + if( !pRtree ){ + return SQLITE_NOMEM; + } + memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + pRtree->nBusy = 1; + pRtree->base.pModule = &rtreeModule; + pRtree->zDb = (char *)&pRtree[1]; + pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->nDim = (argc-4)/2; + pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2; + pRtree->eCoordType = eCoordType; + memcpy(pRtree->zDb, argv[1], nDb); + memcpy(pRtree->zName, argv[2], nName); + + /* Figure out the node size to use. By default, use 64 bytes less than + ** the database page-size. This ensures that each node is stored on + ** a single database page. + ** + ** If the databasd page-size is so large that more than RTREE_MAXCELLS + ** entries would fit in a single node, use a smaller node-size. + */ + pRtree->iNodeSize = iPageSize-64; + if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)iNodeSize ){ + pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS; + } + + /* Create/Connect to the underlying relational database schema. If + ** that is successful, call sqlite3_declare_vtab() to configure + ** the r-tree table schema. + */ + if( (rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate)) ){ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + }else{ + char *zSql = sqlite3_mprintf("CREATE TABLE x(%s", argv[3]); + char *zTmp; + int ii; + for(ii=4; zSql && ii*2 coordinates. +*/ +static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ + char *zText = 0; + RtreeNode node; + Rtree tree; + int ii; + + memset(&node, 0, sizeof(RtreeNode)); + memset(&tree, 0, sizeof(Rtree)); + tree.nDim = sqlite3_value_int(apArg[0]); + tree.nBytesPerCell = 8 + 8 * tree.nDim; + node.zData = (u8 *)sqlite3_value_blob(apArg[1]); + + for(ii=0; ii$x2 AND y1<$y1 AND y2>$y2} + } +}] +puts "$btree_select_time" + +puts -nonewline "Selecting from rtree... " +flush stdout +set rtree_select_time [time { + foreach {x1 x2 y1 y2} [lrange $data 0 [expr $NQUERY*4-1]] { + db eval {SELECT * FROM rtree WHERE x1<$x1 AND x2>$x2 AND y1<$y1 AND y2>$y2} + } +}] +puts "$rtree_select_time" + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/rtree_util.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/rtree_util.tcl --- sqlite3-3.4.2/ext/rtree/rtree_util.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/rtree_util.tcl 2008-05-26 19:41:54.000000000 +0100 @@ -0,0 +1,195 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains Tcl code that may be useful for testing or +# analyzing r-tree structures created with this module. It is +# used by both test procedures and the r-tree viewer application. +# +# $Id: rtree_util.tcl,v 1.1 2008/05/26 18:41:54 danielk1977 Exp $ +# + + +#-------------------------------------------------------------------------- +# PUBLIC API: +# +# rtree_depth +# rtree_ndim +# rtree_node +# rtree_mincells +# rtree_check +# rtree_dump +# rtree_treedump +# + +proc rtree_depth {db zTab} { + $db one "SELECT rtreedepth(data) FROM ${zTab}_node WHERE nodeno=1" +} + +proc rtree_nodedepth {db zTab iNode} { + set iDepth [rtree_depth $db $zTab] + + set ii $iNode + while {$ii != 1} { + set sql "SELECT parentnode FROM ${zTab}_parent WHERE nodeno = $ii" + set ii [db one $sql] + incr iDepth -1 + } + + return $iDepth +} + +# Return the number of dimensions of the rtree. +# +proc rtree_ndim {db zTab} { + set nDim [expr {(([llength [$db eval "pragma table_info($zTab)"]]/6)-1)/2}] +} + +# Return the contents of rtree node $iNode. +# +proc rtree_node {db zTab iNode {iPrec 6}} { + set nDim [rtree_ndim $db $zTab] + set sql " + SELECT rtreenode($nDim, data) FROM ${zTab}_node WHERE nodeno = $iNode + " + set node [db one $sql] + + set nCell [llength $node] + set nCoord [expr $nDim*2] + for {set ii 0} {$ii < $nCell} {incr ii} { + for {set jj 1} {$jj <= $nCoord} {incr jj} { + set newval [format "%.${iPrec}f" [lindex $node $ii $jj]] + lset node $ii $jj $newval + } + } + set node +} + +proc rtree_mincells {db zTab} { + set n [$db one "select length(data) FROM ${zTab}_node LIMIT 1"] + set nMax [expr {int(($n-4)/(8+[rtree_ndim $db $zTab]*2*4))}] + return [expr {int($nMax/3)}] +} + +# An integrity check for the rtree $zTab accessible via database +# connection $db. +# +proc rtree_check {db zTab} { + array unset ::checked + + # Check each r-tree node. + set rc [catch { + rtree_node_check $db $zTab 1 [rtree_depth $db $zTab] + } msg] + if {$rc && $msg ne ""} { error $msg } + + # Check that the _rowid and _parent tables have the right + # number of entries. + set nNode [$db one "SELECT count(*) FROM ${zTab}_node"] + set nRow [$db one "SELECT count(*) FROM ${zTab}"] + set nRowid [$db one "SELECT count(*) FROM ${zTab}_rowid"] + set nParent [$db one "SELECT count(*) FROM ${zTab}_parent"] + + if {$nNode != ($nParent+1)} { + error "Wrong number of entries in ${zTab}_parent" + } + if {$nRow != $nRowid} { + error "Wrong number of entries in ${zTab}_rowid" + } + + return $rc +} + +proc rtree_node_check {db zTab iNode iDepth} { + if {[info exists ::checked($iNode)]} { error "Second ref to $iNode" } + set ::checked($iNode) 1 + + set node [rtree_node $db $zTab $iNode] + if {$iNode!=1 && [llength $node]==0} { error "No such node: $iNode" } + + if {$iNode != 1 && [llength $node]<[rtree_mincells $db $zTab]} { + puts "Node $iNode: Has only [llength $node] cells" + error "" + } + if {$iNode == 1 && [llength $node]==1 && [rtree_depth $db $zTab]>0} { + set depth [rtree_depth $db $zTab] + puts "Node $iNode: Has only 1 child (tree depth is $depth)" + error "" + } + + set nDim [expr {([llength [lindex $node 0]]-1)/2}] + + if {$iDepth > 0} { + set d [expr $iDepth-1] + foreach cell $node { + set shouldbe [rtree_node_check $db $zTab [lindex $cell 0] $d] + if {$cell ne $shouldbe} { + puts "Node $iNode: Cell is: {$cell}, should be {$shouldbe}" + error "" + } + } + } + + set mapping_table "${zTab}_parent" + set mapping_sql "SELECT parentnode FROM $mapping_table WHERE rowid = \$rowid" + if {$iDepth==0} { + set mapping_table "${zTab}_rowid" + set mapping_sql "SELECT nodeno FROM $mapping_table WHERE rowid = \$rowid" + } + foreach cell $node { + set rowid [lindex $cell 0] + set mapping [db one $mapping_sql] + if {$mapping != $iNode} { + puts "Node $iNode: $mapping_table entry for cell $rowid is $mapping" + error "" + } + } + + set ret [list $iNode] + for {set ii 1} {$ii <= $nDim*2} {incr ii} { + set f [lindex $node 0 $ii] + foreach cell $node { + set f2 [lindex $cell $ii] + if {($ii%2)==1 && $f2<$f} {set f $f2} + if {($ii%2)==0 && $f2>$f} {set f $f2} + } + lappend ret $f + } + return $ret +} + +proc rtree_dump {db zTab} { + set zRet "" + set nDim [expr {(([llength [$db eval "pragma table_info($zTab)"]]/6)-1)/2}] + set sql "SELECT nodeno, rtreenode($nDim, data) AS node FROM ${zTab}_node" + $db eval $sql { + append zRet [format "% -10s %s\n" $nodeno $node] + } + set zRet +} + +proc rtree_nodetreedump {db zTab zIndent iDepth iNode} { + set ret "" + set node [rtree_node $db $zTab $iNode 1] + append ret [format "%-3d %s%s\n" $iNode $zIndent $node] + if {$iDepth>0} { + foreach cell $node { + set i [lindex $cell 0] + append ret [rtree_nodetreedump $db $zTab "$zIndent " [expr $iDepth-1] $i] + } + } + set ret +} + +proc rtree_treedump {db zTab} { + set d [rtree_depth $db $zTab] + rtree_nodetreedump $db $zTab "" $d 1 +} + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/tkt3363.test /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/tkt3363.test --- sqlite3-3.4.2/ext/rtree/tkt3363.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/tkt3363.test 2008-09-08 12:07:03.000000000 +0100 @@ -0,0 +1,54 @@ +# 2008 Sep 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing that ticket #3363 is fixed. +# +# $Id: tkt3363.test,v 1.1 2008/09/08 11:07:03 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source [file join [file dirname [info script]] rtree_util.tcl] +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +do_test tkt3363.1.1 { + execsql { CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2) } +} {} + +do_test tkt3363.1.2 { + for {set ii 1} {$ii < 50} {incr ii} { + set x 1000000 + set y [expr 4000000 + $ii*10] + execsql { INSERT INTO t1 VALUES($ii, $x, $x, $y, $y) } + } +} {} + +do_test tkt3363.1.3 { + execsql { + SELECT count(*) FROM t1 WHERE +y2>4000425.0; + } +} {7} + +do_test tkt3363.1.4 { + execsql { + SELECT count(*) FROM t1 WHERE y2>4000425.0; + } +} {7} + +finish_test + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ext/rtree/viewrtree.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/ext/rtree/viewrtree.tcl --- sqlite3-3.4.2/ext/rtree/viewrtree.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/ext/rtree/viewrtree.tcl 2008-05-26 19:41:54.000000000 +0100 @@ -0,0 +1,189 @@ + +load ./libsqlite3.dylib +#package require sqlite3 +source [file join [file dirname $argv0] rtree_util.tcl] + +wm title . "SQLite r-tree viewer" + +if {[llength $argv]!=1} { + puts stderr "Usage: $argv0 " + puts stderr "" + exit +} +sqlite3 db [lindex $argv 0] + +canvas .c -background white -width 400 -height 300 -highlightthickness 0 + +button .b -text "Parent Node" -command { + set sql "SELECT parentnode FROM $::O(zTab)_parent WHERE nodeno = $::O(iNode)" + set ::O(iNode) [db one $sql] + if {$::O(iNode) eq ""} {set ::O(iNode) 1} + view_node +} + +set O(iNode) 1 +set O(zTab) "" +set O(listbox_captions) [list] +set O(listbox_itemmap) [list] +set O(listbox_highlight) -1 + +listbox .l -listvariable ::O(listbox_captions) -yscrollcommand {.ls set} +scrollbar .ls -command {.l yview} +label .status -font courier -anchor w +label .title -anchor w -text "Node 1:" -background white -borderwidth 0 + + +set rtree_tables [list] +db eval { + SELECT name + FROM sqlite_master + WHERE type='table' AND sql LIKE '%virtual%table%using%rtree%' +} { + set nCol [expr [llength [db eval "pragma table_info($name)"]]/6] + if {$nCol != 5} { + puts stderr "Not viewing $name - is not 2-dimensional" + } else { + lappend rtree_tables [list Table $name] + } +} +if {$rtree_tables eq ""} { + puts stderr "Cannot find an r-tree table in database [lindex $argv 0]" + puts stderr "" + exit +} +eval tk_optionMenu .select option_var $rtree_tables +trace add variable option_var write set_option_var +proc set_option_var {args} { + set ::O(zTab) [lindex $::option_var 1] + set ::O(iNode) 1 + view_node +} +set ::O(zTab) [lindex $::rtree_tables 0 1] + +bind .l <1> {listbox_click [.l nearest %y]} +bind .l {listbox_mouseover [.l nearest %y]} +bind .l {listbox_mouseover -1} + +proc listbox_click {sel} { + if {$sel ne ""} { + set ::O(iNode) [lindex $::O(listbox_captions) $sel 1] + view_node + } +} +proc listbox_mouseover {i} { + set oldid [lindex $::O(listbox_itemmap) $::O(listbox_highlight)] + .c itemconfigure $oldid -fill "" + + .l selection clear 0 end + .status configure -text "" + if {$i>=0} { + set id [lindex $::O(listbox_itemmap) $i] + .c itemconfigure $id -fill grey + .c lower $id + set ::O(listbox_highlight) $i + .l selection set $i + .status configure -text [cell_report db $::O(zTab) $::O(iNode) $i] + } +} + +grid configure .select -row 0 -column 0 -columnspan 2 -sticky nsew +grid configure .b -row 1 -column 0 -columnspan 2 -sticky nsew +grid configure .l -row 2 -column 0 -sticky nsew +grid configure .status -row 3 -column 0 -columnspan 3 -sticky nsew + +grid configure .title -row 0 -column 2 -sticky nsew +grid configure .c -row 1 -column 2 -rowspan 2 -sticky nsew +grid configure .ls -row 2 -column 1 -sticky nsew + +grid columnconfigure . 2 -weight 1 +grid rowconfigure . 2 -weight 1 + +proc node_bbox {data} { + set xmin 0 + set xmax 0 + set ymin 0 + set ymax 0 + foreach {rowid xmin xmax ymin ymax} [lindex $data 0] break + foreach cell [lrange $data 1 end] { + foreach {rowid x1 x2 y1 y2} $cell break + if {$x1 < $xmin} {set xmin $x1} + if {$x2 > $xmax} {set xmax $x2} + if {$y1 < $ymin} {set ymin $y1} + if {$y2 > $ymax} {set ymax $y2} + } + list $xmin $xmax $ymin $ymax +} + +proc view_node {} { + set iNode $::O(iNode) + set zTab $::O(zTab) + + set data [rtree_node db $zTab $iNode 12] + set depth [rtree_nodedepth db $zTab $iNode] + + .c delete all + set ::O(listbox_captions) [list] + set ::O(listbox_itemmap) [list] + set $::O(listbox_highlight) -1 + + .b configure -state normal + if {$iNode == 1} {.b configure -state disabled} + .title configure -text "Node $iNode: [cell_report db $zTab $iNode -1]" + + foreach {xmin xmax ymin ymax} [node_bbox $data] break + set total_area 0.0 + + set xscale [expr {double([winfo width .c]-20)/($xmax-$xmin)}] + set yscale [expr {double([winfo height .c]-20)/($ymax-$ymin)}] + + set xoff [expr {10.0 - $xmin*$xscale}] + set yoff [expr {10.0 - $ymin*$yscale}] + + foreach cell $data { + foreach {rowid x1 x2 y1 y2} $cell break + set total_area [expr {$total_area + ($x2-$x1)*($y2-$y1)}] + set x1 [expr {$x1*$xscale + $xoff}] + set x2 [expr {$x2*$xscale + $xoff}] + set y1 [expr {$y1*$yscale + $yoff}] + set y2 [expr {$y2*$yscale + $yoff}] + + set id [.c create rectangle $x1 $y1 $x2 $y2] + if {$depth>0} { + lappend ::O(listbox_captions) "Node $rowid" + lappend ::O(listbox_itemmap) $id + } + } +} + +proc cell_report {db zTab iParent iCell} { + set data [rtree_node db $zTab $iParent 12] + set cell [lindex $data $iCell] + + foreach {xmin xmax ymin ymax} [node_bbox $data] break + set total_area [expr ($xmax-$xmin)*($ymax-$ymin)] + + if {$cell eq ""} { + set cell_area 0.0 + foreach cell $data { + foreach {rowid x1 x2 y1 y2} $cell break + set cell_area [expr $cell_area+($x2-$x1)*($y2-$y1)] + } + set cell_area [expr $cell_area/[llength $data]] + set zReport [format "Size = %.1f x %.1f Average child area = %.1f%%" \ + [expr $xmax-$xmin] [expr $ymax-$ymin] [expr 100.0*$cell_area/$total_area]\ + ] + append zReport " Sub-tree height: [rtree_nodedepth db $zTab $iParent]" + } else { + foreach {rowid x1 x2 y1 y2} $cell break + set cell_area [expr ($x2-$x1)*($y2-$y1)] + set zReport [format "Size = %.1f x %.1f Area = %.1f%%" \ + [expr $x2-$x1] [expr $y2-$y1] [expr 100.0*$cell_area/$total_area] + ] + } + + return $zReport +} + +view_node +bind .c view_node + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/ltmain.sh /tmp/3ARg2Grji7/sqlite3-3.6.16/ltmain.sh --- sqlite3-3.4.2/ltmain.sh 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/ltmain.sh 2009-06-12 03:37:46.000000000 +0100 @@ -1,52 +1,83 @@ -# ltmain.sh - Provide generalized library-building support services. -# NOTE: Changing this file will not affect anything until you rerun configure. -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, -# 2007 Free Software Foundation, Inc. -# Originally by Gordon Matzigkeit , 1996 -# -# This program is free software; you can redistribute it and/or modify +# Generated from ltmain.m4sh. + +# ltmain.sh (GNU libtool) 2.2.6 +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # -# This program is distributed in the hope that it will be useful, but +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -basename="s,^.*/,,g" - -# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh -# is ksh but when the shell is invoked as "sh" and the current value of -# the _XPG environment variable is not equal to 1 (one), the special -# positional parameter $0, within a function call, is the name of the -# function. -progpath="$0" - -# The name of this program: -progname=`echo "$progpath" | $SED $basename` -modename="$progname" +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# Global variables: -EXIT_SUCCESS=0 -EXIT_FAILURE=1 +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print informational messages (default) +# --version print version information +# -h, --help print short or long help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.2.6 +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . PROGRAM=ltmain.sh PACKAGE=libtool -VERSION="1.5.24 Debian 1.5.24-1" -TIMESTAMP=" (1.1220.2.456 2007/06/24 02:25:32)" +VERSION=2.2.6 +TIMESTAMP="" +package_revision=1.3012 -# Be Bourne compatible (taken from Autoconf:_AS_BOURNE_COMPATIBLE). +# Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: @@ -60,98 +91,261 @@ BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh -# Check that we have a working $echo. -if test "X$1" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift -elif test "X$1" = X--fallback-echo; then - # Avoid inline document here, it may be left over - : -elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then - # Yippee, $echo works! - : -else - # Restart under the correct shell, and then maybe $echo will work. - exec $SHELL "$progpath" --no-reexec ${1+"$@"} -fi - -if test "X$1" = X--fallback-echo; then - # used as fallback echo - shift - cat <&2 - $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit $EXIT_FAILURE -fi +dirname="s,/[^/]*$,," +basename="s,^.*/,," -# Global variables. -mode=$default_mode -nonopt= -prev= -prevopt= -run= -show="$echo" -show_help= -execute_dlfiles= -duplicate_deps=no -preserve_args= -lo2o="s/\\.lo\$/.${objext}/" -o2lo="s/\\.${objext}\$/.lo/" -extracted_archives= -extracted_serial=0 +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` +} + +# Generated shell functions inserted here. + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# The name of this program: +# In the unlikely event $progname began with a '-', it would play havoc with +# func_echo (imagine progname=-n), so we prepend ./ in that case: +func_dirname_and_basename "$progpath" +progname=$func_basename_result +case $progname in + -*) progname=./$progname ;; +esac + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=: + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname${mode+: }$mode: $*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"` + done + my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} -##################################### -# Shell function definitions: -# This seems to be the best place for them # func_mktempdir [string] # Make a temporary directory that won't clash with other running @@ -161,7 +355,7 @@ { my_template="${TMPDIR-/tmp}/${1-$progname}" - if test "$run" = ":"; then + if test "$opt_dry_run" = ":"; then # Return a directory name, but don't create it in dry-run mode my_tmpdir="${my_template}-$$" else @@ -170,524 +364,792 @@ my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` if test ! -d "$my_tmpdir"; then - # Failing that, at least try and use $RANDOM to avoid a race - my_tmpdir="${my_template}-${RANDOM-0}$$" + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" - save_mktempdir_umask=`umask` - umask 0077 - $mkdir "$my_tmpdir" - umask $save_mktempdir_umask + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure - test -d "$my_tmpdir" || { - $echo "cannot create temporary directory \`$my_tmpdir'" 1>&2 - exit $EXIT_FAILURE - } + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" fi - $echo "X$my_tmpdir" | $Xsed + $ECHO "X$my_tmpdir" | $Xsed } -# func_win32_libid arg -# return the library type of file 'arg' -# -# Need a lot of goo to handle *both* DLLs and import libs -# Has to be a shell function in order to 'eat' the argument -# that is supplied when $file_magic_command is called. -func_win32_libid () +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () { - win32_libid_type="unknown" - win32_fileres=`file -L $1 2>/dev/null` - case $win32_fileres in - *ar\ archive\ import\ library*) # definitely import - win32_libid_type="x86 archive import" - ;; - *ar\ archive*) # could be an import, or static - if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \ - $EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then - win32_nmres=`eval $NM -f posix -A $1 | \ - $SED -n -e '1,100{ - / I /{ - s,.*,import, - p - q - } - }'` - case $win32_nmres in - import*) win32_libid_type="x86 archive import";; - *) win32_libid_type="x86 archive static";; - esac - fi - ;; - *DLL*) - win32_libid_type="x86 DLL" - ;; - *executable*) # but shell scripts are "executable" too... - case $win32_fileres in - *MS\ Windows\ PE\ Intel*) - win32_libid_type="x86 DLL" - ;; + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" esac - ;; - esac - $echo $win32_libid_type } -# func_infer_tag arg -# Infer tagged configuration to use if any are available and -# if one wasn't chosen via the "--tag" command line option. -# Only attempt this if the compiler in the base compile -# command doesn't match the default compiler. -# arg is usually of the form 'gcc ...' -func_infer_tag () +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () { - if test -n "$available_tags" && test -z "$tagname"; then - CC_quoted= - for arg in $CC; do - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - CC_quoted="$CC_quoted $arg" - done - case $@ in - # Blanks in the command may have been stripped by the calling shell, - # but not from the CC environment variable when configure was run. - " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;; - # Blanks at the start of $base_compile will cause this to fail - # if we don't check for them as well. + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "X$1" | $Xsed \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; *) - for z in $available_tags; do - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" - CC_quoted= - for arg in $CC; do - # Double-quote args containing other shell metacharacters. - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - CC_quoted="$CC_quoted $arg" - done - case "$@ " in - " $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) - # The compiler in the base compile command matches - # the one in the tagged configuration. - # Assume this is the tagged configuration we want. - tagname=$z - break - ;; - esac - fi - done - # If $tagname still isn't set, then no tagged configuration - # was found and let the user know that the "--tag" command - # line option must be used. - if test -z "$tagname"; then - $echo "$modename: unable to infer tagged configuration" - $echo "$modename: specify a tag with \`--tag'" 1>&2 - exit $EXIT_FAILURE -# else -# $echo "$modename: using $tagname tagged configuration" - fi - ;; - esac - fi + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" } -# func_extract_an_archive dir oldlib -func_extract_an_archive () +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () { - f_ex_an_ar_dir="$1"; shift - f_ex_an_ar_oldlib="$1" + my_cmd="$1" + my_fail_exp="${2-:}" - $show "(cd $f_ex_an_ar_dir && $AR x $f_ex_an_ar_oldlib)" - $run eval "(cd \$f_ex_an_ar_dir && $AR x \$f_ex_an_ar_oldlib)" || exit $? - if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "$modename: ERROR: object name conflicts: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" 1>&2 - exit $EXIT_FAILURE + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi fi } -# func_extract_archives gentop oldlib ... -func_extract_archives () + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () { - my_gentop="$1"; shift - my_oldlibs=${1+"$@"} - my_oldobjs="" - my_xlib="" - my_xabs="" - my_xdir="" - my_status="" + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } - $show "${rm}r $my_gentop" - $run ${rm}r "$my_gentop" - $show "$mkdir $my_gentop" - $run $mkdir "$my_gentop" - my_status=$? - if test "$my_status" -ne 0 && test ! -d "$my_gentop"; then - exit $my_status + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi fi +} - for my_xlib in $my_oldlibs; do - # Extract the objects. - case $my_xlib in - [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; - *) my_xabs=`pwd`"/$my_xlib" ;; - esac - my_xlib=`$echo "X$my_xlib" | $Xsed -e 's%^.*/%%'` - my_xlib_u=$my_xlib - while :; do - case " $extracted_archives " in - *" $my_xlib_u "*) - extracted_serial=`expr $extracted_serial + 1` - my_xlib_u=lt$extracted_serial-$my_xlib ;; - *) break ;; - esac - done - extracted_archives="$extracted_archives $my_xlib_u" - my_xdir="$my_gentop/$my_xlib_u" - $show "${rm}r $my_xdir" - $run ${rm}r "$my_xdir" - $show "$mkdir $my_xdir" - $run $mkdir "$my_xdir" - exit_status=$? - if test "$exit_status" -ne 0 && test ! -d "$my_xdir"; then - exit $exit_status - fi - case $host in - *-darwin*) - $show "Extracting $my_xabs" - # Do not bother doing anything if just a dry run - if test -z "$run"; then - darwin_orig_dir=`pwd` - cd $my_xdir || exit $? - darwin_archive=$my_xabs - darwin_curdir=`pwd` - darwin_base_archive=`$echo "X$darwin_archive" | $Xsed -e 's%^.*/%%'` - darwin_arches=`lipo -info "$darwin_archive" 2>/dev/null | $EGREP Architectures 2>/dev/null` - if test -n "$darwin_arches"; then - darwin_arches=`echo "$darwin_arches" | $SED -e 's/.*are://'` - darwin_arch= - $show "$darwin_base_archive has multiple architectures $darwin_arches" - for darwin_arch in $darwin_arches ; do - mkdir -p "unfat-$$/${darwin_base_archive}-${darwin_arch}" - lipo -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" - cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" - func_extract_an_archive "`pwd`" "${darwin_base_archive}" - cd "$darwin_curdir" - $rm "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" - done # $darwin_arches - ## Okay now we have a bunch of thin objects, gotta fatten them up :) - darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print| xargs basename | sort -u | $NL2SP` - darwin_file= - darwin_files= - for darwin_file in $darwin_filelist; do - darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` - lipo -create -output "$darwin_file" $darwin_files - done # $darwin_filelist - ${rm}r unfat-$$ - cd "$darwin_orig_dir" - else - cd "$darwin_orig_dir" - func_extract_an_archive "$my_xdir" "$my_xabs" - fi # $darwin_arches - fi # $run - ;; - *) - func_extract_an_archive "$my_xdir" "$my_xabs" - ;; - esac - my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` - done - func_extract_archives_result="$my_oldobjs" + + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $SED -n '/^# Usage:/,/# -h/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + $ECHO + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help +# Echo long help message to standard output and exit. +func_help () +{ + $SED -n '/^# Usage:/,/# Report bugs to/ { + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ + p + }' < "$progpath" + exit $? +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + func_error "missing argument for $1" + exit_cmd=exit } -# End of Shell function definitions -##################################### -# Darwin sucks -eval std_shrext=\"$shrext_cmds\" +exit_cmd=: -disable_libs=no -# Parse our command line options once, thoroughly. -while test "$#" -gt 0 -do - arg="$1" + + + +# Check that we have a working $ECHO. +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then + # Yippee, $ECHO works! + : +else + # Restart under the correct shell, and then maybe $ECHO will work. + exec $SHELL "$progpath" --no-reexec ${1+"$@"} +fi +# Same for EGREP, and just to be sure, do LTCC as well +if test "x$EGREP" = x ; then + EGREP=egrep +fi +if test "x$LTCC" = x ; then + LTCC=${CC-gcc} +fi - case $arg in - -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;; - *) optarg= ;; - esac +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat <&2 - exit $EXIT_FAILURE - ;; - esac +magic="%%%MAGIC variable%%%" +magic_exe="%%%MAGIC EXE variable%%%" - case $tagname in - CC) - # Don't test for the "default" C tag, as we know, it's there, but - # not specially marked. - ;; - *) - if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then - taglist="$taglist $tagname" - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`" - else - $echo "$progname: ignoring unknown tag $tagname" 1>&2 - fi - ;; - esac - ;; - *) - eval "$prev=\$arg" - ;; - esac +# Global variables. +# $mode is unset +nonopt= +execute_dlfiles= +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" +extracted_archives= +extracted_serial=0 - prev= - prevopt= - continue - fi +opt_dry_run=false +opt_duplicate_deps=false +opt_silent=false +opt_debug=: - # Have we seen a non-optional argument yet? - case $arg in - --help) - show_help=yes - ;; +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +# func_fatal_configuration arg... +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_error ${1+"$@"} + func_error "See the $PACKAGE documentation for more information." + func_fatal_error "Fatal configuration error." +} - --version) - echo "\ -$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP - -Copyright (C) 2007 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - exit $? - ;; - --config) - ${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath +# func_config +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + # Now print the configurations for the tags. for tagname in $taglist; do - ${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath" + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" done - exit $? - ;; - - --debug) - $echo "$progname: enabling shell trace mode" - set -x - preserve_args="$preserve_args $arg" - ;; - --dry-run | -n) - run=: - ;; + exit $? +} - --features) - $echo "host: $host" +# func_features +# Display the features supported by this script. +func_features () +{ + $ECHO "host: $host" if test "$build_libtool_libs" = yes; then - $echo "enable shared libraries" + $ECHO "enable shared libraries" else - $echo "disable shared libraries" + $ECHO "disable shared libraries" fi if test "$build_old_libs" = yes; then - $echo "enable static libraries" + $ECHO "enable static libraries" else - $echo "disable static libraries" + $ECHO "disable static libraries" fi + exit $? - ;; +} - --finish) mode="finish" ;; +# func_enable_tag tagname +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname="$1" - --mode) prevopt="--mode" prev=mode ;; - --mode=*) mode="$optarg" ;; + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf="/$re_begincf/,/$re_endcf/p" + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac - --preserve-dup-deps) duplicate_deps="yes" ;; + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" - --quiet | --silent) - show=: - preserve_args="$preserve_args $arg" - ;; + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} - --tag) - prevopt="--tag" - prev=tag - preserve_args="$preserve_args --tag" +# Parse options once, thoroughly. This comes as soon as possible in +# the script to make things like `libtool --version' happen quickly. +{ + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift ;; - --tag=*) - set tag "$optarg" ${1+"$@"} - shift - prev=tag - preserve_args="$preserve_args --tag" + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift ;; - - -dlopen) - prevopt="-dlopen" - prev=execute_dlfiles + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift ;; - - -*) - $echo "$modename: unrecognized option \`$arg'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift ;; - - *) - nonopt="$arg" - break + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac -done -if test -n "$prevopt"; then - $echo "$modename: option \`$prevopt' requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE -fi + # Parse non-mode specific arguments: + while test "$#" -gt 0; do + opt="$1" + shift -case $disable_libs in -no) - ;; -shared) - build_libtool_libs=no - build_old_libs=yes - ;; -static) - build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` - ;; -esac + case $opt in + --config) func_config ;; -# If this variable is set in any of the actions, the command in it -# will be execed at the end. This prevents here-documents from being -# left over by shells. -exec_cmd= + --debug) preserve_args="$preserve_args $opt" + func_echo "enabling shell trace mode" + opt_debug='set -x' + $opt_debug + ;; -if test -z "$show_help"; then + -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break + execute_dlfiles="$execute_dlfiles $1" + shift + ;; - # Infer the operation mode. - if test -z "$mode"; then - $echo "*** Warning: inferring the mode of operation is deprecated." 1>&2 - $echo "*** Future versions of Libtool will require --mode=MODE be specified." 1>&2 - case $nonopt in - *cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*) - mode=link - for arg - do - case $arg in - -c) - mode=compile - break - ;; - esac - done - ;; - *db | *dbx | *strace | *truss) - mode=execute - ;; - *install*|cp|mv) - mode=install - ;; - *rm) - mode=uninstall + --dry-run | -n) opt_dry_run=: ;; + --features) func_features ;; + --finish) mode="finish" ;; + + --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break + case $1 in + # Valid mode arguments: + clean) ;; + compile) ;; + execute) ;; + finish) ;; + install) ;; + link) ;; + relink) ;; + uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; + esac + + mode="$1" + shift + ;; + + --preserve-dup-deps) + opt_duplicate_deps=: ;; + + --quiet|--silent) preserve_args="$preserve_args $opt" + opt_silent=: + ;; + + --verbose| -v) preserve_args="$preserve_args $opt" + opt_silent=false + ;; + + --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break + preserve_args="$preserve_args $opt $1" + func_enable_tag "$1" # tagname is set here + shift + ;; + + # Separate optargs to long options: + -dlopen=*|--mode=*|--tag=*) + func_opt_split "$opt" + set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} + shift + ;; + + -\?|-h) func_usage ;; + --help) opt_help=: ;; + --version) func_version ;; + + -*) func_fatal_help "unrecognized option \`$opt'" ;; + + *) nonopt="$opt" + break + ;; + esac + done + + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: ;; *) - # If we have no mode, but dlfiles were specified, then do execute mode. - test -n "$execute_dlfiles" && mode=execute + opt_duplicate_compiler_generated_deps=$opt_duplicate_deps + ;; + esac - # Just use the default operation mode. - if test -z "$mode"; then - if test -n "$nonopt"; then - $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2 - else - $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2 - fi + # Having warned about all mis-specified options, bail out if + # anything was wrong. + $exit_cmd $EXIT_FAILURE +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF fi - ;; - esac + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +## ----------- ## +## Main. ## +## ----------- ## + +$opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" fi + test -z "$mode" && func_fatal_error "error: you must specify a MODE." + + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. if test -n "$execute_dlfiles" && test "$mode" != execute; then - $echo "$modename: unrecognized option \`-dlopen'" 1>&2 - $echo "$help" 1>&2 + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help="$help" - help="Try \`$modename --help --mode=$mode' for more information." + help="Try \`$progname --help --mode=$mode' for more information." +} - # These modes are in order of execution frequency so that they run quickly. - case $mode in - # libtool compile mode - compile) - modename="$modename: compile" - # Get the compilation command and the source file. - base_compile= - srcfile="$nonopt" # always keep a non-empty value in "srcfile" - suppress_opt=yes - suppress_output= - arg_mode=normal - libobj= - later= - for arg - do - case $arg_mode in - arg ) - # do not "continue". Instead, add this to base_compile - lastarg="$arg" - arg_mode=normal - ;; +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} - target ) - libobj="$arg" - arg_mode=normal - continue - ;; +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} - normal ) - # Accept any command-line options. - case $arg in - -o) - if test -n "$libobj" ; then - $echo "$modename: you cannot specify \`-o' more than once" 1>&2 - exit $EXIT_FAILURE - fi - arg_mode=target - continue - ;; +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} - -static | -prefer-pic | -prefer-non-pic) - later="$later $arg" +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_ltwrapper_scriptname_result="" + if func_ltwrapper_executable_p "$1"; then + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" + fi +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_quote_for_eval "$arg" + CC_quoted="$CC_quoted $func_quote_for_eval_result" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_quote_for_eval "$arg" + CC_quoted="$CC_quoted $func_quote_for_eval_result" + done + case "$@ " in + " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T <\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - lastarg="$lastarg $arg" + func_quote_for_eval "$arg" + lastarg="$lastarg $func_quote_for_eval_result" done IFS="$save_ifs" - lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"` + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result # Add the arguments to base_compile. base_compile="$base_compile $lastarg" continue ;; - * ) + *) # Accept the current argument as the source file. # The previous "srcfile" becomes the current argument. # @@ -738,65 +1194,42 @@ esac # case $arg_mode # Aesthetically quote the previous argument. - lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"` - - case $lastarg in - # Double-quote args containing other shell metacharacters. - # Many Bourne shells cannot handle close brackets correctly - # in scan sets, and some SunOS ksh mistreat backslash-escaping - # in scan sets (worked around with variable expansion), - # and furthermore cannot handle '|' '&' '(' ')' in scan sets - # at all, so we specify them separately. - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - lastarg="\"$lastarg\"" - ;; - esac - - base_compile="$base_compile $lastarg" + func_quote_for_eval "$lastarg" + base_compile="$base_compile $func_quote_for_eval_result" done # for arg case $arg_mode in arg) - $echo "$modename: you must specify an argument for -Xcompile" - exit $EXIT_FAILURE + func_fatal_error "you must specify an argument for -Xcompile" ;; target) - $echo "$modename: you must specify a target with \`-o'" 1>&2 - exit $EXIT_FAILURE + func_fatal_error "you must specify a target with \`-o'" ;; *) # Get the name of the library object. - [ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'` + test -z "$libobj" && { + func_basename "$srcfile" + libobj="$func_basename_result" + } ;; esac # Recognize several different file suffixes. # If the user specifies -o file.o, it is replaced with file.lo - xform='[cCFSifmso]' case $libobj in - *.ada) xform=ada ;; - *.adb) xform=adb ;; - *.ads) xform=ads ;; - *.asm) xform=asm ;; - *.c++) xform=c++ ;; - *.cc) xform=cc ;; - *.ii) xform=ii ;; - *.class) xform=class ;; - *.cpp) xform=cpp ;; - *.cxx) xform=cxx ;; - *.[fF][09]?) xform=[fF][09]. ;; - *.for) xform=for ;; - *.java) xform=java ;; - *.obj) xform=obj ;; + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.obj | *.sx) + func_xform "$libobj" + libobj=$func_xform_result + ;; esac - libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` - case $libobj in - *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;; + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; *) - $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2 - exit $EXIT_FAILURE + func_fatal_error "cannot determine name of library object from \`$libobj'" ;; esac @@ -804,7 +1237,15 @@ for arg in $later; do case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + continue + ;; + -static) + build_libtool_libs=no build_old_libs=yes continue ;; @@ -821,28 +1262,17 @@ esac done - qlibobj=`$echo "X$libobj" | $Xsed -e "$sed_quote_subst"` - case $qlibobj in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qlibobj="\"$qlibobj\"" ;; - esac - test "X$libobj" != "X$qlibobj" \ - && $echo "X$libobj" | grep '[]~#^*{};<>?"'"'"' &()|`$[]' \ - && $echo "$modename: libobj name \`$libobj' may not contain shell special characters." - objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` - xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$obj"; then - xdir= - else - xdir=$xdir/ - fi + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" lobj=${xdir}$objdir/$objname - if test -z "$base_compile"; then - $echo "$modename: you must specify a compilation command" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test "$build_old_libs" = yes; then @@ -851,12 +1281,9 @@ removelist="$lobj $libobj ${libobj}T" fi - $run $rm $removelist - trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 - # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in - cygwin* | mingw* | pw32* | os2*) + cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac @@ -868,10 +1295,8 @@ # Calculate the filename of the output object if compiler does # not support -o with -c if test "$compiler_c_o" = no; then - output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} + output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" - removelist="$removelist $output_obj $lockfile" - trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15 else output_obj= need_locks=no @@ -881,13 +1306,13 @@ # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then - until $run ln "$progpath" "$lockfile" 2>/dev/null; do - $show "Waiting for $lockfile to be removed" + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test "$need_locks" = warn; then if test -f "$lockfile"; then - $echo "\ + $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` @@ -898,34 +1323,22 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." - $run $rm $removelist + $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi - $echo "$srcfile" > "$lockfile" + removelist="$removelist $output_obj" + $ECHO "$srcfile" > "$lockfile" fi + $opt_dry_run || $RM $removelist + removelist="$removelist $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + if test -n "$fix_srcfile_path"; then eval srcfile=\"$fix_srcfile_path\" fi - qsrcfile=`$echo "X$srcfile" | $Xsed -e "$sed_quote_subst"` - case $qsrcfile in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qsrcfile="\"$qsrcfile\"" ;; - esac - - $run $rm "$libobj" "${libobj}T" - - # Create a libtool object file (analogous to a ".la" file), - # but don't create it if we're doing a dry run. - test -z "$run" && cat > ${libobj}T </dev/null`" != "X$srcfile"; then - $echo "\ + $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` @@ -978,45 +1378,27 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." - $run $rm $removelist + $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then - $show "$mv $output_obj $lobj" - if $run $mv $output_obj $lobj; then : - else - error=$? - $run $rm $removelist - exit $error - fi + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi - # Append the name of the PIC object to the libtool object file. - test -z "$run" && cat >> ${libobj}T <> ${libobj}T </dev/null`" != "X$srcfile"; then - $echo "\ + $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` @@ -1050,5466 +1427,6825 @@ avoid parallel builds (make -j) in this platform, or get a better compiler." - $run $rm $removelist + $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then - $show "$mv $output_obj $obj" - if $run $mv $output_obj $obj; then : - else - error=$? - $run $rm $removelist - exit $error - fi + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi - - # Append the name of the non-PIC object the libtool object file. - # Only append if the libtool object file exists. - test -z "$run" && cat >> ${libobj}T <> ${libobj}T <&2 - fi - if test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=yes - ;; - -static) - if test -z "$pic_flag" && test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=built - ;; - -static-libtool-libs) - if test -z "$pic_flag" && test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=yes - ;; - esac - build_libtool_libs=no - build_old_libs=yes - break - ;; - esac - done +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. - # See if our shared archives depend on static archives. - test -n "$old_archive_from_new_cmds" && build_old_libs=yes +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; - # Go through the arguments, transforming them on the way. - while test "$#" -gt 0; do - arg="$1" - shift - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test - ;; - *) qarg=$arg ;; - esac - libtool_args="$libtool_args $qarg" + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE - # If the previous option needs an argument, assign it. - if test -n "$prev"; then - case $prev in - output) - compile_command="$compile_command @OUTPUT@" - finalize_command="$finalize_command @OUTPUT@" - ;; - esac +Compile a source file into a libtool library object. - case $prev in - dlfiles|dlprefiles) - if test "$preload" = no; then - # Add the symbol object into the linking commands. - compile_command="$compile_command @SYMFILE@" - finalize_command="$finalize_command @SYMFILE@" - preload=yes - fi - case $arg in - *.la | *.lo) ;; # We handle these cases below. - force) - if test "$dlself" = no; then - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - self) - if test "$prev" = dlprefiles; then - dlself=yes - elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then - dlself=yes - else - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - *) - if test "$prev" = dlfiles; then - dlfiles="$dlfiles $arg" - else - dlprefiles="$dlprefiles $arg" - fi - prev= - continue - ;; - esac - ;; - expsyms) - export_symbols="$arg" - if test ! -f "$arg"; then - $echo "$modename: symbol file \`$arg' does not exist" - exit $EXIT_FAILURE - fi - prev= - continue - ;; - expsyms_regex) - export_symbols_regex="$arg" - prev= - continue - ;; - inst_prefix) - inst_prefix_dir="$arg" - prev= - continue - ;; - precious_regex) - precious_files_regex="$arg" - prev= - continue - ;; - release) - release="-$arg" - prev= - continue - ;; - objectlist) - if test -f "$arg"; then - save_arg=$arg - moreargs= - for fil in `cat $save_arg` - do -# moreargs="$moreargs $fil" - arg=$fil - # A libtool-controlled object. +This mode accepts the following additional options: - # Check to see that this really is a libtool object. - if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - pic_object= - non_pic_object= + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to building PIC objects only + -prefer-non-pic try to building non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking - # Read the .lo file - # If there is no directory component, then add one. - case $arg in - */* | *\\*) . $arg ;; - *) . ./$arg ;; - esac +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. - if test -z "$pic_object" || \ - test -z "$non_pic_object" || - test "$pic_object" = none && \ - test "$non_pic_object" = none; then - $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit $EXIT_FAILURE - fi +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" +Automatically set library path, then run a program. - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue - else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi +This mode accepts the following additional options: - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi + -dlopen FILE add the directory containing FILE to the library path - # A PIC object. - libobjs="$libobjs $pic_object" - arg="$pic_object" - fi +This mode sets the library path environment variable according to \`-dlopen' +flags. - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. - # A standard non-PIC object - non_pic_objects="$non_pic_objects $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - else - # If the PIC object exists, use it instead. - # $xdir was prepended to $pic_object above. - non_pic_object="$pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi - else - # Only an error if not doing a dry-run. - if test -z "$run"; then - $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit $EXIT_FAILURE - else - # Dry-run case. +Then, COMMAND is executed, with ARGS as arguments." + ;; - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... - pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` - non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` - libobjs="$libobjs $pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi - fi - done - else - $echo "$modename: link input file \`$save_arg' does not exist" - exit $EXIT_FAILURE - fi - arg=$save_arg - prev= - continue - ;; - rpath | xrpath) - # We need an absolute path. - case $arg in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit $EXIT_FAILURE - ;; - esac - if test "$prev" = rpath; then - case "$rpath " in - *" $arg "*) ;; - *) rpath="$rpath $arg" ;; - esac - else - case "$xrpath " in - *" $arg "*) ;; - *) xrpath="$xrpath $arg" ;; - esac - fi - prev= - continue - ;; - xcompiler) - compiler_flags="$compiler_flags $qarg" - prev= - compile_command="$compile_command $qarg" - finalize_command="$finalize_command $qarg" - continue - ;; - xlinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $wl$qarg" - prev= - compile_command="$compile_command $wl$qarg" - finalize_command="$finalize_command $wl$qarg" - continue - ;; - xcclinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $qarg" - prev= - compile_command="$compile_command $qarg" - finalize_command="$finalize_command $qarg" - continue - ;; - shrext) - shrext_cmds="$arg" - prev= - continue - ;; - darwin_framework|darwin_framework_skip) - test "$prev" = "darwin_framework" && compiler_flags="$compiler_flags $arg" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - prev= - continue - ;; - *) - eval "$prev=\"\$arg\"" - prev= - continue - ;; - esac - fi # test -n "$prev" +Complete the installation of libtool libraries. - prevarg="$arg" +Each LIBDIR is a directory that contains libtool libraries. - case $arg in - -all-static) - if test -n "$link_static_flag"; then - compile_command="$compile_command $link_static_flag" - finalize_command="$finalize_command $link_static_flag" - fi - continue - ;; +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; - -allow-undefined) - # FIXME: remove this flag sometime in the future. - $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2 - continue - ;; + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... - -avoid-version) - avoid_version=yes - continue - ;; +Install executables or libraries. - -dlopen) - prev=dlfiles - continue - ;; +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. - -dlpreopen) - prev=dlprefiles - continue - ;; +The following components of INSTALL-COMMAND are treated specially: - -export-dynamic) - export_dynamic=yes - continue - ;; + -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation - -export-symbols | -export-symbols-regex) - if test -n "$export_symbols" || test -n "$export_symbols_regex"; then - $echo "$modename: more than one -exported-symbols argument is not allowed" - exit $EXIT_FAILURE - fi - if test "X$arg" = "X-export-symbols"; then - prev=expsyms - else - prev=expsyms_regex - fi - continue - ;; +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; - -framework|-arch|-isysroot) - case " $CC " in - *" ${arg} ${1} "* | *" ${arg} ${1} "*) - prev=darwin_framework_skip ;; - *) compiler_flags="$compiler_flags $arg" - prev=darwin_framework ;; - esac - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - continue - ;; + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... - -inst-prefix-dir) - prev=inst_prefix - continue - ;; +Link object files or libraries together to form another library, or to +create an executable program. - # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* - # so, if we see these flags be careful not to treat them like -L - -L[A-Z][A-Z]*:*) - case $with_gcc/$host in - no/*-*-irix* | /*-*-irix*) - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - ;; - esac - continue - ;; +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. - -L*) - dir=`$echo "X$arg" | $Xsed -e 's/^-L//'` - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - absdir=`cd "$dir" && pwd` - if test -z "$absdir"; then - $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2 - absdir="$dir" - notinst_path="$notinst_path $dir" - fi - dir="$absdir" - ;; - esac - case "$deplibs " in - *" -L$dir "*) ;; - *) - deplibs="$deplibs -L$dir" - lib_search_path="$lib_search_path $dir" - ;; - esac - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) - testbindir=`$echo "X$dir" | $Xsed -e 's*/lib$*/bin*'` - case :$dllsearchpath: in - *":$dir:"*) ;; - *) dllsearchpath="$dllsearchpath:$dir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - *) dllsearchpath="$dllsearchpath:$testbindir";; - esac - ;; - esac - continue - ;; +The following components of LINK-COMMAND are treated specially: - -l*) - if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos*) - # These systems don't actually have a C or math library (as such) - continue - ;; - *-*-os2*) - # These systems don't actually have a C library (as such) - test "X$arg" = "X-lc" && continue - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc due to us having libc/libc_r. - test "X$arg" = "X-lc" && continue - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C and math libraries are in the System framework - deplibs="$deplibs -framework System" - continue - ;; - *-*-sco3.2v5* | *-*-sco5v6*) - # Causes problems with __ctype - test "X$arg" = "X-lc" && continue - ;; - *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) - # Compiler inserts libc in the correct place for threads to work - test "X$arg" = "X-lc" && continue - ;; - esac - elif test "X$arg" = "X-lc_r"; then - case $host in - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc_r directly, use -pthread flag. - continue - ;; - esac - fi - deplibs="$deplibs $arg" - continue - ;; + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface - # Tru64 UNIX uses -model [arg] to determine the layout of C++ - # classes, name mangling, and exception handling. - -model) - compile_command="$compile_command $arg" - compiler_flags="$compiler_flags $arg" - finalize_command="$finalize_command $arg" - prev=xcompiler - continue - ;; +All other options (arguments beginning with \`-') are ignored. - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) - compiler_flags="$compiler_flags $arg" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - continue - ;; +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. - -module) - module=yes - continue - ;; +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. - # -64, -mips[0-9] enable 64-bit mode on the SGI compiler - # -r[0-9][0-9]* specifies the processor on the SGI compiler - # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler - # +DA*, +DD* enable 64-bit mode on the HP compiler - # -q* pass through compiler args for the IBM compiler - # -m* pass through architecture-specific compiler args for GCC - # -m*, -t[45]*, -txscale* pass through architecture-specific - # compiler args for GCC - # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC - # -F/path gives path to uninstalled frameworks, gcc on darwin - # @file GCC response files - -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ - -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*) +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - compiler_flags="$compiler_flags $arg" - continue +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." ;; - -shrext) - prev=shrext - continue - ;; + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... - -no-fast-install) - fast_install=no - continue - ;; +Remove libraries from an installation directory. - -no-install) - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin*) - # The PATH hackery in wrapper scripts is required on Windows - # and Darwin in order for the loader to find any dlls it needs. - $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2 - $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2 - fast_install=no - ;; - *) no_install=yes ;; - esac - continue - ;; +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. - -no-undefined) - allow_undefined=no - continue - ;; +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; - -objectlist) - prev=objectlist - continue - ;; + *) + func_fatal_help "invalid operation mode \`$mode'" + ;; + esac - -o) prev=output ;; + $ECHO + $ECHO "Try \`$progname --help' for more information about other modes." - -precious-files-regex) - prev=precious_regex - continue - ;; + exit $? +} - -release) - prev=release - continue - ;; - - -rpath) - prev=rpath - continue - ;; + # Now that we've collected a possible --mode arg, show help if necessary + $opt_help && func_mode_help - -R) - prev=xrpath - continue - ;; - -R*) - dir=`$echo "X$arg" | $Xsed -e 's/^-R//'` - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - $echo "$modename: only absolute run-paths are allowed" 1>&2 - exit $EXIT_FAILURE - ;; - esac - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - continue - ;; +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" - -static | -static-libtool-libs) - # The effects of -static are defined in a previous loop. - # We used to do the same as -all-static on platforms that - # didn't have a PIC flag, but the assumption that the effects - # would be equivalent was wrong. It would break on at least - # Digital Unix and AIX. - continue - ;; + # Handle -dlopen flags immediately. + for file in $execute_dlfiles; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" - -thread-safe) - thread_safe=yes - continue - ;; + dir= + case $file in + *.la) + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" - -version-info) - prev=vinfo - continue - ;; - -version-number) - prev=vinfo - vinfo_number=yes - continue - ;; + # Read the libtool library. + dlname= + library_names= + func_source "$file" - -Wc,*) - args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'` - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - case $flag in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - flag="\"$flag\"" - ;; - esac - arg="$arg $wl$flag" - compiler_flags="$compiler_flags $flag" - done - IFS="$save_ifs" - arg=`$echo "X$arg" | $Xsed -e "s/^ //"` - ;; + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi - -Wl,*) - args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'` - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - case $flag in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - flag="\"$flag\"" - ;; - esac - arg="$arg $wl$flag" - compiler_flags="$compiler_flags $wl$flag" - linker_flags="$linker_flags $flag" - done - IFS="$save_ifs" - arg=`$echo "X$arg" | $Xsed -e "s/^ //"` - ;; + func_dirname "$file" "" "." + dir="$func_dirname_result" - -Xcompiler) - prev=xcompiler - continue + if test -f "$dir/$objdir/$dlname"; then + dir="$dir/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi ;; - -Xlinker) - prev=xlinker - continue + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" ;; - -XCClinker) - prev=xcclinker + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" continue ;; + esac - # Some other compiler flag. - -* | +*) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - ;; - - *.$objext) - # A standard object. - objs="$objs $arg" - ;; - - *.lo) - # A libtool-controlled object. - - # Check to see that this really is a libtool object. - if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - pic_object= - non_pic_object= + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" - # Read the .lo file - # If there is no directory component, then add one. - case $arg in - */* | *\\*) . $arg ;; - *) . ./$arg ;; - esac + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done - if test -z "$pic_object" || \ - test -z "$non_pic_object" || - test "$pic_object" = none && \ - test "$non_pic_object" = none; then - $echo "$modename: cannot find name of object for \`$arg'" 1>&2 - exit $EXIT_FAILURE - fi + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -*) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_quote_for_eval "$file" + args="$args $func_quote_for_eval_result" + done - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi + $lt_unset $lt_var + fi" + done - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + $ECHO "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} - # A PIC object. - libobjs="$libobjs $pic_object" - arg="$pic_object" - fi +test "$mode" = execute && func_mode_execute ${1+"$@"} - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" - # A standard non-PIC object - non_pic_objects="$non_pic_objects $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - else - # If the PIC object exists, use it instead. - # $xdir was prepended to $pic_object above. - non_pic_object="$pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi - else - # Only an error if not doing a dry-run. - if test -z "$run"; then - $echo "$modename: \`$arg' is not a valid libtool object" 1>&2 - exit $EXIT_FAILURE - else - # Dry-run case. +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libdirs="$nonopt" + admincmds= - # Extract subdirectory from the argument. - xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'` - if test "X$xdir" = "X$arg"; then - xdir= - else - xdir="$xdir/" - fi + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for dir + do + libdirs="$libdirs $dir" + done - pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"` - non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"` - libobjs="$libobjs $pic_object" - non_pic_objects="$non_pic_objects $non_pic_object" - fi + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' fi - ;; + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || admincmds="$admincmds + $cmds" + fi + done + fi - *.$libext) - # An archive. - deplibs="$deplibs $arg" - old_deplibs="$old_deplibs $arg" - continue - ;; + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS - *.la) - # A libtool-controlled library. + $ECHO "X----------------------------------------------------------------------" | $Xsed + $ECHO "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + $ECHO + $ECHO "If you ever happen to want to link against installed libraries" + $ECHO "in a given directory, LIBDIR, you must either use libtool, and" + $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'" + $ECHO "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable" + $ECHO " during execution" + fi + if test -n "$runpath_var"; then + $ECHO " - add LIBDIR to the \`$runpath_var' environment variable" + $ECHO " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" - if test "$prev" = dlfiles; then - # This library was specified with -dlopen. - dlfiles="$dlfiles $arg" - prev= - elif test "$prev" = dlprefiles; then - # The library was specified with -dlpreopen. - dlprefiles="$dlprefiles $arg" + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + $ECHO + + $ECHO "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual" + $ECHO "pages." + ;; + *) + $ECHO "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + $ECHO "X----------------------------------------------------------------------" | $Xsed + exit $EXIT_SUCCESS +} + +test "$mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + $ECHO "X$nonopt" | $GREP shtool >/dev/null; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + install_prog="$install_prog$func_quote_for_eval_result" + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + for arg + do + if test -n "$dest"; then + files="$files $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + case " $install_prog " in + *[\\\ /]cp\ *) ;; + *) prev=$arg ;; + esac + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then prev= else - deplibs="$deplibs $arg" + dest=$arg + continue fi - continue ;; + esac - # Some other compiler argument. - *) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - ;; - esac # arg + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + install_prog="$install_prog $func_quote_for_eval_result" + done - # Now actually substitute the argument into the commands. - if test -n "$arg"; then - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" - fi - done # argument parsing loop + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" - if test -n "$prev"; then - $echo "$modename: the \`$prevarg' option requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" - if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then - eval arg=\"$export_dynamic_flag_spec\" - compile_command="$compile_command $arg" - finalize_command="$finalize_command $arg" + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi fi - oldlibs= - # calculate the name of the file, without its directory - outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'` - libobjs_save="$libobjs" + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result - if test -n "$shlibpath_var"; then - # get the directories listed in $shlibpath_var - eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= else - shlib_search_path= - fi - eval sys_lib_search_path=\"$sys_lib_search_path_spec\" - eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" - output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'` - if test "X$output_objdir" = "X$output"; then - output_objdir="$objdir" - else - output_objdir="$output_objdir/$objdir" - fi - # Create the object directory. - if test ! -d "$output_objdir"; then - $show "$mkdir $output_objdir" - $run $mkdir $output_objdir - exit_status=$? - if test "$exit_status" -ne 0 && test ! -d "$output_objdir"; then - exit $exit_status - fi + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" fi - - # Determine the type of output - case $output in - "") - $echo "$modename: you must specify an output file" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - *.$libext) linkmode=oldlib ;; - *.lo | *.$objext) linkmode=obj ;; - *.la) linkmode=lib ;; - *) linkmode=prog ;; # Anything else should be a program. - esac - - case $host in - *cygwin* | *mingw* | *pw32*) - # don't eliminate duplications in $postdeps and $predeps - duplicate_compiler_generated_deps=yes - ;; + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; *) - duplicate_compiler_generated_deps=$duplicate_deps + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done ;; esac - specialdeplibs= - libs= - # Find all interdependent deplibs by searching for libraries - # that are linked more than once (e.g. -la -lb -la) - for deplib in $deplibs; do - if test "X$duplicate_deps" = "Xyes" ; then - case "$libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - libs="$libs $deplib" - done + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" - if test "$linkmode" = lib; then - libs="$predeps $libs $compiler_lib_search_path $postdeps" + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do - # Compute libraries that are listed more than once in $predeps - # $postdeps and mark them as special (i.e., whose duplicates are - # not to be eliminated). - pre_post_deps= - if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then - for pre_post_dep in $predeps $postdeps; do - case "$pre_post_deps " in - *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + staticlibs="$staticlibs $file" + ;; + + *.la) + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) current_libdirs="$current_libdirs $libdir" ;; esac - pre_post_deps="$pre_post_deps $pre_post_dep" - done - fi - pre_post_deps= - fi + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) future_libdirs="$future_libdirs $libdir" ;; + esac + fi - deplibs= - newdependency_libs= - newlib_search_path= - need_relink=no # whether we're linking any uninstalled libtool libraries - notinst_deplibs= # not-installed libtool libraries - case $linkmode in - lib) - passes="conv link" - for file in $dlfiles $dlprefiles; do - case $file in - *.la) ;; - *) - $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2 - exit $EXIT_FAILURE + func_dirname "$file" "/" "" + dir="$func_dirname_result" + dir="$dir$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac ;; esac - done - ;; - prog) - compile_deplibs= - finalize_deplibs= - alldeplibs=no - newdlfiles= - newdlprefiles= - passes="conv scan dlopen dlpreopen link" - ;; - *) passes="conv" - ;; - esac - for pass in $passes; do - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan"; then - libs="$deplibs" - deplibs= - fi - if test "$linkmode" = prog; then - case $pass in - dlopen) libs="$dlfiles" ;; - dlpreopen) libs="$dlprefiles" ;; - link) - libs="$deplibs %DEPLIBS%" - test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" - ;; - esac - fi - if test "$pass" = dlopen; then - # Collect dlpreopened libraries - save_deplibs="$deplibs" - deplibs= - fi - for deplib in $libs; do - lib= - found=no - case $deplib in - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - compiler_flags="$compiler_flags $deplib" - fi - continue - ;; - -l*) - if test "$linkmode" != lib && test "$linkmode" != prog; then - $echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2 - continue + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' fi - name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` - for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do - for search_ext in .la $std_shrext .so .a; do - # Search the libtool library - lib="$searchdir/lib${name}${search_ext}" - if test -f "$lib"; then - if test "$search_ext" = ".la"; then - found=yes - else - found=no - fi - break 2 - fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done - done - if test "$found" != yes; then - # deplib doesn't seem to be a libtool library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - else # deplib is a libtool library - # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, - # We need to do some special things here, and not later. - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $deplib "*) - if (${SED} -e '2q' $lib | - grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - library_names= - old_library= - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac - for l in $old_library $library_names; do - ll="$l" - done - if test "X$ll" = "X$old_library" ; then # only static version available - found=no - ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` - test "X$ladir" = "X$lib" && ladir="." - lib=$ladir/$old_library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - fi - fi - ;; - *) ;; - esac - fi - fi - ;; # -l - -L*) - case $linkmode in - lib) - deplibs="$deplib $deplibs" - test "$pass" = conv && continue - newdependency_libs="$deplib $newdependency_libs" - newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` - ;; - prog) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - if test "$pass" = scan; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'` - ;; - *) - $echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2 - ;; - esac # linkmode - continue - ;; # -L - -R*) - if test "$pass" = link; then - dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'` - # Make sure the xrpath contains only unique directories. - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - fi - deplibs="$deplib $deplibs" - continue - ;; - *.la) lib="$deplib" ;; - *.$libext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - case $linkmode in - lib) - valid_a_lib=no - case $deplibs_check_method in - match_pattern*) - set dummy $deplibs_check_method - match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` - if eval $echo \"$deplib\" 2>/dev/null \ - | $SED 10q \ - | $EGREP "$match_pattern_regex" > /dev/null; then - valid_a_lib=yes - fi - ;; - pass_all) - valid_a_lib=yes - ;; - esac - if test "$valid_a_lib" != yes; then - $echo - $echo "*** Warning: Trying to link with static lib archive $deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because the file extensions .$libext of this argument makes me believe" - $echo "*** that it is just a static archive that I should not used here." - else - $echo - $echo "*** Warning: Linking the shared library $output against the" - $echo "*** static library $deplib is not portable!" - deplibs="$deplib $deplibs" - fi - continue - ;; - prog) - if test "$pass" != link; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - continue - ;; - esac # linkmode - ;; # *.$libext - *.lo | *.$objext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - elif test "$linkmode" = prog; then - if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then - # If there is no dlopen support or we're linking statically, - # we need to preload. - newdlprefiles="$newdlprefiles $deplib" - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - newdlfiles="$newdlfiles $deplib" - fi fi - continue - ;; - %DEPLIBS%) - alldeplibs=yes - continue - ;; - esac # case $deplib - if test "$found" = yes || test -f "$lib"; then : - else - $echo "$modename: cannot find the library \`$lib' or unhandled argument \`$deplib'" 1>&2 - exit $EXIT_FAILURE - fi - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : - else - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' fi - ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'` - test "X$ladir" = "X$lib" && ladir="." - - dlname= - dlopen= - dlpreopen= - libdir= - library_names= - old_library= - # If the library was installed with an old release of libtool, - # it will not redefine variables installed, or shouldnotlink - installed=yes - shouldnotlink=no - avoidtemprpath= + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + # Maybe install the static library, too. + test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" + ;; - # Read the .la file - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac + *.lo) + # Install (i.e. copy) a libtool object. - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan" || - { test "$linkmode" != prog && test "$linkmode" != lib; }; then - test -n "$dlopen" && dlfiles="$dlfiles $dlopen" - test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" fi - if test "$pass" = conv; then - # Only check for convenience libraries - deplibs="$lib $deplibs" - if test -z "$libdir"; then - if test -z "$old_library"; then - $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - # It is a libtool convenience library, so add in its objects. - convenience="$convenience $ladir/$objdir/$old_library" - old_convenience="$old_convenience $ladir/$objdir/$old_library" - tmp_libs= - for deplib in $dependency_libs; do - deplibs="$deplib $deplibs" - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - elif test "$linkmode" != prog && test "$linkmode" != lib; then - $echo "$modename: \`$lib' is not a convenience library" 1>&2 - exit $EXIT_FAILURE - fi - continue - fi # $pass = conv + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' - # Get the name of the library we link against. - linklib= - for l in $old_library $library_names; do - linklib="$l" - done - if test -z "$linklib"; then - $echo "$modename: cannot find name of link library for \`$lib'" 1>&2 - exit $EXIT_FAILURE + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi + exit $EXIT_SUCCESS + ;; - # This library was specified with -dlopen. - if test "$pass" = dlopen; then - if test -z "$libdir"; then - $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2 - exit $EXIT_FAILURE - fi - if test -z "$dlname" || - test "$dlopen_support" != yes || - test "$build_libtool_libs" = no; then - # If there is no dlname, no dlopen support or we're linking - # statically, we need to preload. We also need to preload any - # dependent libraries so libltdl's deplib preloader doesn't - # bomb out in the load deplibs phase. - dlprefiles="$dlprefiles $lib $dependency_libs" - else - newdlfiles="$newdlfiles $lib" - fi - continue - fi # $pass = dlopen + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi - # We need an absolute path. - case $ladir in - [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; *) - abs_ladir=`cd "$ladir" && pwd` - if test -z "$abs_ladir"; then - $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2 - $echo "$modename: passing it literally to the linker, although it might fail" 1>&2 - abs_ladir="$ladir" + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac ;; esac - laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done - # Find the relevant object directory and library name. - if test "X$installed" = Xyes; then - if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then - $echo "$modename: warning: library \`$lib' was moved." 1>&2 - dir="$ladir" - absdir="$abs_ladir" - libdir="$abs_ladir" - else - dir="$libdir" - absdir="$libdir" + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_verbose "extracting global C symbols from \`$progfile'" + $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } fi - test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes - else - if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then - dir="$ladir" - absdir="$abs_ladir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } else - dir="$ladir/$objdir" - absdir="$abs_ladir/$objdir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } fi - fi # $installed = yes - name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` + fi - # This library was specified with -dlpreopen. - if test "$pass" = dlpreopen; then - if test -z "$libdir"; then - $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2 - exit $EXIT_FAILURE + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" fi - # Prefer using a static library (so that no silly _DYNAMIC symbols - # are required to link). - if test -n "$old_library"; then - newdlprefiles="$newdlprefiles $dir/$old_library" - # Otherwise, use the dlname, so that lt_dlopen finds it. - elif test -n "$dlname"; then - newdlprefiles="$newdlprefiles $dir/$dlname" + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : else - newdlprefiles="$newdlprefiles $dir/$linklib" + $GREP -v "^: " < "$nlist" > "$nlist"S fi - fi # $pass = dlpreopen - if test -z "$libdir"; then - # Link the convenience library - if test "$linkmode" = lib; then - deplibs="$dir/$old_library $deplibs" - elif test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$dir/$old_library $compile_deplibs" - finalize_deplibs="$dir/$old_library $finalize_deplibs" + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else - deplibs="$lib $deplibs" # used for prog,scan pass + $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms" fi - continue - fi + $ECHO >> "$output_objdir/$my_dlsyms" "\ - if test "$linkmode" = prog && test "$pass" != link; then - newlib_search_path="$newlib_search_path $ladir" - deplibs="$lib $deplibs" +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +" + case $host in + *cygwin* | *mingw* | *cegcc* ) + $ECHO >> "$output_objdir/$my_dlsyms" "\ +/* DATA imports from DLLs on WIN32 con't be const, because + runtime relocations are performed -- see ld's documentation + on pseudo-relocs. */" + lt_dlsym_const= ;; + *osf5*) + echo >> "$output_objdir/$my_dlsyms" "\ +/* This system does not cope well with relocations in const data */" + lt_dlsym_const= ;; + *) + lt_dlsym_const=const ;; + esac - linkalldeplibs=no - if test "$link_all_deplibs" != no || test -z "$library_names" || - test "$build_libtool_libs" = no; then - linkalldeplibs=yes - fi + $ECHO >> "$output_objdir/$my_dlsyms" "\ +extern $lt_dlsym_const lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +$lt_dlsym_const lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," - tmp_libs= - for deplib in $dependency_libs; do - case $deplib in - -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test - esac - # Need to link against all dependency_libs? - if test "$linkalldeplibs" = yes; then - deplibs="$deplib $deplibs" - else - # Need to hardcode shared library paths - # or/and link against static libraries - newdependency_libs="$deplib $newdependency_libs" - fi - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done # for deplib - continue - fi # $linkmode = prog... + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + $ECHO >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; - if test "$linkmode,$pass" = "prog,link"; then - if test -n "$library_names" && - { { test "$prefer_static_libs" = no || - test "$prefer_static_libs,$installed" = "built,yes"; } || - test -z "$old_library"; }; then - # We need to hardcode the library path - if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then - # Make sure the rpath contains only unique directories. - case "$temp_rpath " in - *" $dir "*) ;; - *" $absdir "*) ;; - *) temp_rpath="$temp_rpath $absdir" ;; - esac - fi +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac - fi # $linkmode,$pass = prog,link... +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run - if test "$alldeplibs" = yes && - { test "$deplibs_check_method" = pass_all || - { test "$build_libtool_libs" = yes && - test -n "$library_names"; }; }; then - # We only need to search for static libraries - continue - fi - fi + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) symtab_cflags="$symtab_cflags $arg" ;; + esac + done - link_static=no # Whether the deplib will be linked statically - use_static_libs=$prefer_static_libs - if test "$use_static_libs" = built && test "$installed" = yes ; then - use_static_libs=no - fi - if test -n "$library_names" && - { test "$use_static_libs" = no || test -z "$old_library"; }; then - if test "$installed" = no; then - notinst_deplibs="$notinst_deplibs $lib" - need_relink=yes - fi - # This is a shared library + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' - # Warn about portability, can't link against -module's on - # some systems (darwin) - if test "$shouldnotlink" = yes && test "$pass" = link ; then - $echo - if test "$linkmode" = prog; then - $echo "*** Warning: Linking the executable $output against the loadable module" - else - $echo "*** Warning: Linking the shared library $output against the loadable module" - fi - $echo "*** $linklib is not portable!" - fi - if test "$linkmode" = lib && - test "$hardcode_into_libs" = yes; then - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` fi + ;; + *) + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` + fi +} - if test -n "$old_archive_from_expsyms_cmds"; then - # figure out the soname - set dummy $library_names - realname="$2" - shift; shift - libname=`eval \\$echo \"$libname_spec\"` - # use dlname if we got it. it's perfectly good, no? - if test -n "$dlname"; then - soname="$dlname" - elif test -n "$soname_spec"; then - # bleh windows - case $host in - *cygwin* | mingw*) - major=`expr $current - $age` - versuffix="-$major" - ;; - esac - eval soname=\"$soname_spec\" - else - soname="$realname" - fi +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + win32_nmres=`eval $NM -f posix -A $1 | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} - # Make a new name for the extract_expsyms_cmds to use - soroot="$soname" - soname=`$echo $soroot | ${SED} -e 's/^.*\///'` - newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a" - # If the library has no export list, then create one now - if test -f "$output_objdir/$soname-def"; then : - else - $show "extracting exported symbol list from \`$soname'" - save_ifs="$IFS"; IFS='~' - cmds=$extract_expsyms_cmds - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - # Create $newlib - if test -f "$output_objdir/$newlib"; then :; else - $show "generating import library for \`$soname'" - save_ifs="$IFS"; IFS='~' - cmds=$old_archive_from_expsyms_cmds - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - # make sure the library variables are pointing to the new library - dir=$output_objdir - linklib=$newlib - fi # test -n "$old_archive_from_expsyms_cmds" +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?' + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} - if test "$linkmode" = prog || test "$mode" != relink; then - add_shlibpath= - add_dir= - add= - lib_linked=yes - case $hardcode_action in - immediate | unsupported) - if test "$hardcode_direct" = no; then - add="$dir/$linklib" - case $host in - *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; - *-*-sysv4*uw2*) add_dir="-L$dir" ;; - *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ - *-*-unixware7*) add_dir="-L$dir" ;; - *-*-darwin* ) - # if the lib is a module then we can not link against - # it, someone is ignoring the new warnings I added - if /usr/bin/file -L $add 2> /dev/null | - $EGREP ": [^:]* bundle" >/dev/null ; then - $echo "** Warning, lib $linklib is a module, not a shared library" - if test -z "$old_library" ; then - $echo - $echo "** And there doesn't seem to be a static archive available" - $echo "** The link will probably fail, sorry" - else - add="$dir/$old_library" - fi - fi - esac - elif test "$hardcode_minus_L" = no; then - case $host in - *-*-sunos*) add_shlibpath="$dir" ;; - esac - add_dir="-L$dir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = no; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - relink) - if test "$hardcode_direct" = yes; then - add="$dir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$dir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - *) lib_linked=no ;; - esac - if test "$lib_linked" != yes; then - $echo "$modename: configuration error: unsupported hardcode properties" - exit $EXIT_FAILURE - fi +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" - if test -n "$add_shlibpath"; then - case :$compile_shlibpath: in - *":$add_shlibpath:"*) ;; - *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; - esac - fi - if test "$linkmode" = prog; then - test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" - test -n "$add" && compile_deplibs="$add $compile_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - if test "$hardcode_direct" != yes && \ - test "$hardcode_minus_L" != yes && \ - test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - fi - fi - fi + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" - if test "$linkmode" = prog || test "$mode" = relink; then - add_shlibpath= - add_dir= - add= - # Finalize command for both is simple: just hardcode it. - if test "$hardcode_direct" = yes; then - add="$libdir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$libdir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - add="-l$name" - elif test "$hardcode_automatic" = yes; then - if test -n "$inst_prefix_dir" && - test -f "$inst_prefix_dir$libdir/$linklib" ; then - add="$inst_prefix_dir$libdir/$linklib" - else - add="$libdir/$linklib" - fi - else - # We cannot seem to hardcode it, guess we'll fake it. - add_dir="-L$libdir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - fi + func_mkdir_p "$my_xdir" - if test "$linkmode" = prog; then - test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" - test -n "$add" && finalize_deplibs="$add $finalize_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - fi - fi - elif test "$linkmode" = prog; then - # Here we assume that one of hardcode_direct or hardcode_minus_L - # is not unsupported. This is valid on all known static and - # shared platforms. - if test "$hardcode_direct" != unsupported; then - test -n "$old_library" && linklib="$old_library" - compile_deplibs="$dir/$linklib $compile_deplibs" - finalize_deplibs="$dir/$linklib $finalize_deplibs" + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" else - compile_deplibs="-l$name -L$dir $compile_deplibs" - finalize_deplibs="-l$name -L$dir $finalize_deplibs" - fi - elif test "$build_libtool_libs" = yes; then - # Not a shared library - if test "$deplibs_check_method" != pass_all; then - # We're trying link a shared library against a static one - # but the system doesn't support it. + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done - # Just print a warning and add the library to dependency_libs so - # that the program can be linked against the static library. - $echo - $echo "*** Warning: This system can not link to static lib archive $lib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have." - if test "$module" = yes; then - $echo "*** But as you try to build a module library, libtool will still create " - $echo "*** a static module, that should work as long as the dlopening application" - $echo "*** is linked with the -dlopen flag to resolve symbols at runtime." - if test -z "$global_symbol_pipe"; then - $echo - $echo "*** However, this would only work if libtool was able to extract symbol" - $echo "*** lists from a program, using \`nm' or equivalent, but libtool could" - $echo "*** not find such a program. So, this module is probably useless." - $echo "*** \`nm' from GNU binutils and a full rebuild may help." - fi - if test "$build_old_libs" = no; then - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - fi - else - deplibs="$dir/$old_library $deplibs" - link_static=yes - fi - fi # link shared/static library? + func_extract_archives_result="$my_oldobjs" +} - if test "$linkmode" = lib; then - if test -n "$dependency_libs" && - { test "$hardcode_into_libs" != yes || - test "$build_old_libs" = yes || - test "$link_static" = yes; }; then - # Extract -R from dependency_libs - temp_deplibs= - for libdir in $dependency_libs; do - case $libdir in - -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'` - case " $xrpath " in - *" $temp_xrpath "*) ;; - *) xrpath="$xrpath $temp_xrpath";; - esac;; - *) temp_deplibs="$temp_deplibs $libdir";; - esac - done - dependency_libs="$temp_deplibs" - fi - newlib_search_path="$newlib_search_path $absdir" - # Link against this library - test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" - # ... and its dependency_libs - tmp_libs= - for deplib in $dependency_libs; do - newdependency_libs="$deplib $newdependency_libs" - if test "X$duplicate_deps" = "Xyes" ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - if test "$link_all_deplibs" != no; then - # Add the search paths of all dependency libraries - for deplib in $dependency_libs; do - case $deplib in - -L*) path="$deplib" ;; - *.la) - dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$deplib" && dir="." - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; - *) - absdir=`cd "$dir" && pwd` - if test -z "$absdir"; then - $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2 - absdir="$dir" - fi - ;; - esac - if grep "^installed=no" $deplib > /dev/null; then - path="$absdir/$objdir" - else - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - if test -z "$libdir"; then - $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - if test "$absdir" != "$libdir"; then - $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2 - fi - path="$absdir" - fi - depdepl= - case $host in - *-*-darwin*) - # we do not want to link against static libs, - # but need to link against shared - eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` - if test -n "$deplibrary_names" ; then - for tmp in $deplibrary_names ; do - depdepl=$tmp - done - if test -f "$path/$depdepl" ; then - depdepl="$path/$depdepl" - fi - # do not add paths which are already there - case " $newlib_search_path " in - *" $path "*) ;; - *) newlib_search_path="$newlib_search_path $path";; - esac - fi - path="" - ;; - *) - path="-L$path" - ;; - esac - ;; - -l*) - case $host in - *-*-darwin*) - # Again, we only want to link against shared libraries - eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"` - for tmp in $newlib_search_path ; do - if test -f "$tmp/lib$tmp_libs.dylib" ; then - eval depdepl="$tmp/lib$tmp_libs.dylib" - break - fi - done - path="" - ;; - *) continue ;; - esac - ;; - *) continue ;; - esac - case " $deplibs " in - *" $path "*) ;; - *) deplibs="$path $deplibs" ;; - esac - case " $deplibs " in - *" $depdepl "*) ;; - *) deplibs="$depdepl $deplibs" ;; - esac - done - fi # link_all_deplibs != no - fi # linkmode = lib - done # for deplib in $libs - dependency_libs="$newdependency_libs" - if test "$pass" = dlpreopen; then - # Link the dlpreopened libraries before other libraries - for deplib in $save_deplibs; do - deplibs="$deplib $deplibs" - done - fi - if test "$pass" != dlopen; then - if test "$pass" != conv; then - # Make sure lib_search_path contains only unique directories. - lib_search_path= - for dir in $newlib_search_path; do - case "$lib_search_path " in - *" $dir "*) ;; - *) lib_search_path="$lib_search_path $dir" ;; - esac - done - newlib_search_path= +# func_emit_wrapper_part1 [arg=no] +# +# Emit the first part of a libtool wrapper script on stdout. +# For more information, see the description associated with +# func_emit_wrapper(), below. +func_emit_wrapper_part1 () +{ + func_emit_wrapper_part1_arg1=no + if test -n "$1" ; then + func_emit_wrapper_part1_arg1=$1 fi - if test "$linkmode,$pass" != "prog,link"; then - vars="deplibs" - else - vars="compile_deplibs finalize_deplibs" - fi - for var in $vars dependency_libs; do - # Add libraries to $var in reverse order - eval tmp_libs=\"\$$var\" - new_libs= - for deplib in $tmp_libs; do - # FIXME: Pedantically, this is the right thing to do, so - # that some nasty dependency loop isn't accidentally - # broken: - #new_libs="$deplib $new_libs" - # Pragmatically, this seems to cause very few problems in - # practice: - case $deplib in - -L*) new_libs="$deplib $new_libs" ;; - -R*) ;; - *) - # And here is the reason: when a library appears more - # than once as an explicit dependence of a library, or - # is implicitly linked in more than once by the - # compiler, it is considered special, and multiple - # occurrences thereof are not removed. Compare this - # with having the same library being listed as a - # dependency of multiple other libraries: in this case, - # we know (pedantically, we assume) the library does not - # need to be listed more than once, so we keep only the - # last copy. This is not always right, but it is rare - # enough that we require users that really mean to play - # such unportable linking tricks to link the library - # using -Wl,-lname, so that libtool does not consider it - # for duplicate removal. - case " $specialdeplibs " in - *" $deplib "*) new_libs="$deplib $new_libs" ;; - *) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$deplib $new_libs" ;; - esac - ;; - esac - ;; - esac - done - tmp_libs= - for deplib in $new_libs; do - case $deplib in - -L*) - case " $tmp_libs " in - *" $deplib "*) ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - done - eval $var=\"$tmp_libs\" - done # for var - fi - # Last step: remove runtime libs from dependency_libs - # (they stay in deplibs) - tmp_libs= - for i in $dependency_libs ; do - case " $predeps $postdeps $compiler_lib_search_path " in - *" $i "*) - i="" - ;; - esac - if test -n "$i" ; then - tmp_libs="$tmp_libs $i" - fi - done - dependency_libs=$tmp_libs - done # for pass - if test "$linkmode" = prog; then - dlfiles="$newdlfiles" - dlprefiles="$newdlprefiles" - fi + $ECHO "\ +#! $SHELL - case $linkmode in - oldlib) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 - fi +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 - fi +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='${SED} -e 1s/^X//' +sed_quote_subst='$sed_quote_subst' - if test -n "$rpath"; then - $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2 - fi +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh - if test -n "$xrpath"; then - $echo "$modename: warning: \`-R' is ignored for archives" 1>&2 - fi +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2 - fi +relink_command=\"$relink_command\" - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for archives" 1>&2 - fi +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + ECHO=\"$qecho\" + file=\"\$0\" + # Make sure echo works. + if test \"X\$1\" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift + elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then + # Yippee, \$ECHO works! + : + else + # Restart under the correct shell, and then maybe \$ECHO will work. + exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + fi + fi\ +" + $ECHO "\ - if test -n "$export_symbols" || test -n "$export_symbols_regex"; then - $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2 - fi + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. - # Now set the variables for building old libraries. - build_libtool_libs=no - oldlibs="$output" - objs="$objs$old_deplibs" - ;; + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` - lib) - # Make sure we only generate libraries of the form `libNAME.la'. - case $outputname in - lib*) - name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'` - eval shared_ext=\"$shrext_cmds\" - eval libname=\"$libname_spec\" - ;; - *) - if test "$module" = no; then - $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - if test "$need_lib_prefix" != no; then - # Add the "lib" prefix for modules if required - name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` - eval shared_ext=\"$shrext_cmds\" - eval libname=\"$libname_spec\" - else - libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'` - fi - ;; + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; esac + fi - if test -n "$objs"; then - if test "$deplibs_check_method" != pass_all; then - $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1 - exit $EXIT_FAILURE - else - $echo - $echo "*** Warning: Linking the shared library $output against the non-libtool" - $echo "*** objects $objs is not portable!" - libobjs="$libobjs $objs" - fi - fi + file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` + done +" +} +# end: func_emit_wrapper_part1 - if test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2 - fi +# func_emit_wrapper_part2 [arg=no] +# +# Emit the second part of a libtool wrapper script on stdout. +# For more information, see the description associated with +# func_emit_wrapper(), below. +func_emit_wrapper_part2 () +{ + func_emit_wrapper_part2_arg1=no + if test -n "$1" ; then + func_emit_wrapper_part2_arg1=$1 + fi + + $ECHO "\ + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi - set dummy $rpath - if test "$#" -gt 2; then - $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2 + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 fi - install_libdir="$2" + fi - oldlibs= - if test -z "$rpath"; then - if test "$build_libtool_libs" = yes; then - # Building a libtool convenience library. - # Some compilers have problems with a `.al' extension so - # convenience libraries should have the same extension an - # archive normally would. - oldlibs="$output_objdir/$libname.$libext $oldlibs" - build_libtool_libs=convenience - build_old_libs=yes + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" fi - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2 - fi + $ECHO "\ - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2 - fi - else + if test -f \"\$progdir/\$program\"; then" - # Parse the version information argument. - save_ifs="$IFS"; IFS=':' - set dummy $vinfo 0 0 0 - IFS="$save_ifs" + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` - if test -n "$8"; then - $echo "$modename: too many parameters to \`-version-info'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE + export $shlibpath_var +" fi - # convert absolute version numbers to libtool ages - # this retains compatibility with .la files and attempts - # to make the code below a bit more comprehensible + # fixup the dll searchpath if we need to. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi - case $vinfo_number in - yes) - number_major="$2" - number_minor="$3" - number_revision="$4" - # - # There are really only two kinds -- those that - # use the current revision as the major version - # and those that subtract age and use age as - # a minor version. But, then there is irix - # which has an extra 1 added just for fun - # - case $version_type in - darwin|linux|osf|windows|none) - current=`expr $number_major + $number_minor` - age="$number_minor" - revision="$number_revision" - ;; - freebsd-aout|freebsd-elf|sunos) - current="$number_major" - revision="$number_minor" - age="0" - ;; - irix|nonstopux) - current=`expr $number_major + $number_minor` - age="$number_minor" - revision="$number_minor" - lt_irix_increment=no - ;; - *) - $echo "$modename: unknown library version type \`$version_type'" 1>&2 - $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit $EXIT_FAILURE - ;; - esac - ;; - no) - current="$2" - revision="$3" - age="$4" + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" ;; - esac - # Check that each of the things are valid numbers. - case $current in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - $echo "$modename: CURRENT \`$current' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - case $revision in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) - $echo "$modename: REVISION \`$revision' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE + $ECHO "\ + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" ;; esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} +# end: func_emit_wrapper_part2 - case $age in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - $echo "$modename: AGE \`$age' must be a nonnegative integer" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE - ;; - esac - if test "$age" -gt "$current"; then - $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2 - $echo "$modename: \`$vinfo' is not valid version information" 1>&2 - exit $EXIT_FAILURE +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=no + if test -n "$1" ; then + func_emit_wrapper_arg1=$1 fi - # Calculate the version variables. - major= - versuffix= - verstring= - case $version_type in - none) ;; - - darwin) - # Like Linux, but with the current version available in - # verstring for coding it into the library header - major=.`expr $current - $age` - versuffix="$major.$age.$revision" - # Darwin ld doesn't like 0 for these options... - minor_current=`expr $current + 1` - xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" - verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" - ;; - - freebsd-aout) - major=".$current" - versuffix=".$current.$revision"; - ;; + # split this up so that func_emit_cwrapperexe_src + # can call each part independently. + func_emit_wrapper_part1 "${func_emit_wrapper_arg1}" + func_emit_wrapper_part2 "${func_emit_wrapper_arg1}" +} - freebsd-elf) - major=".$current" - versuffix=".$current"; - ;; - irix | nonstopux) - if test "X$lt_irix_increment" = "Xno"; then - major=`expr $current - $age` - else - major=`expr $current - $age + 1` - fi - case $version_type in - nonstopux) verstring_prefix=nonstopux ;; - *) verstring_prefix=sgi ;; - esac - verstring="$verstring_prefix$major.$revision" +# func_to_host_path arg +# +# Convert paths to host format when used with build tools. +# Intended for use with "native" mingw (where libtool itself +# is running under the msys shell), or in the following cross- +# build environments: +# $build $host +# mingw (msys) mingw [e.g. native] +# cygwin mingw +# *nix + wine mingw +# where wine is equipped with the `winepath' executable. +# In the native mingw case, the (msys) shell automatically +# converts paths for any non-msys applications it launches, +# but that facility isn't available from inside the cwrapper. +# Similar accommodations are necessary for $host mingw and +# $build cygwin. Calling this function does no harm for other +# $host/$build combinations not listed above. +# +# ARG is the path (on $build) that should be converted to +# the proper representation for $host. The result is stored +# in $func_to_host_path_result. +func_to_host_path () +{ + func_to_host_path_result="$1" + if test -n "$1" ; then + case $host in + *mingw* ) + lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + case $build in + *mingw* ) # actually, msys + # awkward: cmd appends spaces to result + lt_sed_strip_trailing_spaces="s/[ ]*\$//" + func_to_host_path_tmp1=`( cmd //c echo "$1" |\ + $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + *cygwin* ) + func_to_host_path_tmp1=`cygpath -w "$1"` + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + * ) + # Unfortunately, winepath does not exit with a non-zero + # error code, so we are forced to check the contents of + # stdout. On the other hand, if the command is not + # found, the shell will set an exit code of 127 and print + # *an error message* to stdout. So we must check for both + # error code of zero AND non-empty stdout, which explains + # the odd construction: + func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` + if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + else + # Allow warning below. + func_to_host_path_result="" + fi + ;; + esac + if test -z "$func_to_host_path_result" ; then + func_error "Could not determine host path corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_path_result="$1" + fi + ;; + esac + fi +} +# end: func_to_host_path - # Add in all the interfaces that we are compatible with. - loop=$revision - while test "$loop" -ne 0; do - iface=`expr $revision - $loop` - loop=`expr $loop - 1` - verstring="$verstring_prefix$major.$iface:$verstring" - done +# func_to_host_pathlist arg +# +# Convert pathlists to host format when used with build tools. +# See func_to_host_path(), above. This function supports the +# following $build/$host combinations (but does no harm for +# combinations not listed here): +# $build $host +# mingw (msys) mingw [e.g. native] +# cygwin mingw +# *nix + wine mingw +# +# Path separators are also converted from $build format to +# $host format. If ARG begins or ends with a path separator +# character, it is preserved (but converted to $host format) +# on output. +# +# ARG is a pathlist (on $build) that should be converted to +# the proper representation on $host. The result is stored +# in $func_to_host_pathlist_result. +func_to_host_pathlist () +{ + func_to_host_pathlist_result="$1" + if test -n "$1" ; then + case $host in + *mingw* ) + lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_to_host_pathlist_tmp2="$1" + # Once set for this call, this variable should not be + # reassigned. It is used in tha fallback case. + func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e 's|^:*||' -e 's|:*$||'` + case $build in + *mingw* ) # Actually, msys. + # Awkward: cmd appends spaces to result. + lt_sed_strip_trailing_spaces="s/[ ]*\$//" + func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\ + $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + *cygwin* ) + func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"` + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + * ) + # unfortunately, winepath doesn't convert pathlists + func_to_host_pathlist_result="" + func_to_host_pathlist_oldIFS=$IFS + IFS=: + for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do + IFS=$func_to_host_pathlist_oldIFS + if test -n "$func_to_host_pathlist_f" ; then + func_to_host_path "$func_to_host_pathlist_f" + if test -n "$func_to_host_path_result" ; then + if test -z "$func_to_host_pathlist_result" ; then + func_to_host_pathlist_result="$func_to_host_path_result" + else + func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result" + fi + fi + fi + IFS=: + done + IFS=$func_to_host_pathlist_oldIFS + ;; + esac + if test -z "$func_to_host_pathlist_result" ; then + func_error "Could not determine the host path(s) corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This may break if $1 contains DOS-style drive + # specifications. The fix is not to complicate the expression + # below, but for the user to provide a working wine installation + # with winepath so that path translation in the cross-to-mingw + # case works properly. + lt_replace_pathsep_nix_to_dos="s|:|;|g" + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ + $SED -e "$lt_replace_pathsep_nix_to_dos"` + fi + # Now, add the leading and trailing path separators back + case "$1" in + :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" + ;; + esac + case "$1" in + *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;" + ;; + esac + ;; + esac + fi +} +# end: func_to_host_pathlist - # Before this point, $major must not contain `.'. - major=.$major - versuffix="$major.$revision" - ;; +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +# define setmode _setmode +#else +# include +# include +# ifdef __CYGWIN__ +# include +# define HAVE_SETENV +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include - sunos) - major=".$current" - versuffix=".$current.$revision" - ;; +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif - windows) - # Use '-' rather than '.', since we only want one - # extension on DOS 8.3 filesystems. - major=`expr $current - $age` - versuffix="-$major" - ;; +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif - *) - $echo "$modename: unknown library version type \`$version_type'" 1>&2 - $echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2 - exit $EXIT_FAILURE - ;; - esac +#ifdef _MSC_VER +# define S_IXUSR _S_IEXEC +# define stat _stat +# ifndef _INTPTR_T_DEFINED +# define intptr_t int +# endif +#endif - # Clear the version info if we defaulted, and they specified a release. - if test -z "$vinfo" && test -n "$release"; then - major= - case $version_type in - darwin) - # we can't check for "0.0" in archive_cmds due to quoting - # problems, so we reset it completely - verstring= - ;; - *) - verstring="0.0" - ;; - esac - if test "$need_version" = no; then - versuffix= - else - versuffix=".0.0" - fi - fi +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif - # Remove version info from name if versioning should be avoided - if test "$avoid_version" = yes && test "$need_version" = no; then - major= - versuffix= - verstring="" - fi +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif - # Check to see if the archive will have undefined symbols. - if test "$allow_undefined" = yes; then - if test "$allow_undefined_flag" = unsupported; then - $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2 - build_libtool_libs=no - build_old_libs=yes - fi - else - # Don't allow undefined symbols. - allow_undefined_flag="$no_undefined_flag" - fi - fi +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ - if test "$mode" != relink; then - # Remove our outputs, but don't remove object files since they - # may have been created when compiling PIC objects. - removelist= - tempremovelist=`$echo "$output_objdir/*"` - for p in $tempremovelist; do - case $p in - *.$objext) - ;; - $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) - if test "X$precious_files_regex" != "X"; then - if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 - then - continue - fi - fi - removelist="$removelist $p" - ;; - *) ;; - esac - done - if test -n "$removelist"; then - $show "${rm}r $removelist" - $run ${rm}r $removelist - fi - fi +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ - # Now set the variables for building old libraries. - if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then - oldlibs="$oldlibs $output_objdir/$libname.$libext" +#ifdef __CYGWIN__ +# define FOPEN_WB "wb" +#endif - # Transform .lo files to .o files. - oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` - fi +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif - # Eliminate all temporary directories. - #for path in $notinst_path; do - # lib_search_path=`$echo "$lib_search_path " | ${SED} -e "s% $path % %g"` - # deplibs=`$echo "$deplibs " | ${SED} -e "s% -L$path % %g"` - # dependency_libs=`$echo "$dependency_libs " | ${SED} -e "s% -L$path % %g"` - #done +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) - if test -n "$xrpath"; then - # If the user specified any rpath flags, then add them. - temp_xrpath= - for libdir in $xrpath; do - temp_xrpath="$temp_xrpath -R$libdir" - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then - dependency_libs="$temp_xrpath $dependency_libs" - fi - fi +#undef LTWRAPPER_DEBUGPRINTF +#if defined DEBUGWRAPPER +# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args +static void +ltwrapper_debugprintf (const char *fmt, ...) +{ + va_list args; + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); +} +#else +# define LTWRAPPER_DEBUGPRINTF(args) +#endif - # Make sure dlfiles contains only unique files that won't be dlpreopened - old_dlfiles="$dlfiles" - dlfiles= - for lib in $old_dlfiles; do - case " $dlprefiles $dlfiles " in - *" $lib "*) ;; - *) dlfiles="$dlfiles $lib" ;; - esac - done +const char *program_name = NULL; - # Make sure dlprefiles contains only unique files - old_dlprefiles="$dlprefiles" - dlprefiles= - for lib in $old_dlprefiles; do - case "$dlprefiles " in - *" $lib "*) ;; - *) dlprefiles="$dlprefiles $lib" ;; - esac - done +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_fatal (const char *message, ...); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_opt_process_env_set (const char *arg); +void lt_opt_process_env_prepend (const char *arg); +void lt_opt_process_env_append (const char *arg); +int lt_split_name_value (const char *arg, char** name, char** value); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); - if test "$build_libtool_libs" = yes; then - if test -n "$rpath"; then - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*) - # these systems don't actually have a c library (as such)! - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C library is in the System framework - deplibs="$deplibs -framework System" - ;; - *-*-netbsd*) - # Don't link with libc until the a.out ld.so is fixed. - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc due to us having libc/libc_r. - ;; - *-*-sco3.2v5* | *-*-sco5v6*) - # Causes problems with __ctype - ;; - *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) - # Compiler inserts libc in the correct place for threads to work - ;; - *) - # Add libc to deplibs on all other systems if necessary. - if test "$build_libtool_need_lc" = "yes"; then - deplibs="$deplibs -lc" - fi - ;; - esac - fi +static const char *script_text_part1 = +EOF - # Transform deplibs into only deplibs that can be linked in shared. - name_save=$name - libname_save=$libname - release_save=$release - versuffix_save=$versuffix - major_save=$major - # I'm not sure if I'm treating the release correctly. I think - # release should show up in the -l (ie -lgmp5) so we don't want to - # add it in twice. Is that correct? - release="" - versuffix="" - major="" - newdeplibs= - droppeddeps=no - case $deplibs_check_method in - pass_all) - # Don't check for shared/static. Everything works. - # This might be a little naive. We might want to check - # whether the library exists or not. But this is on - # osf3 & osf4 and I'm not really sure... Just - # implementing what was already the behavior. - newdeplibs=$deplibs - ;; - test_compile) - # This code stresses the "libraries are programs" paradigm to its - # limits. Maybe even breaks it. We compile a program, linking it - # against the deplibs as a proxy for the library. Then we can check - # whether they linked in statically or dynamically with ldd. - $rm conftest.c - cat > conftest.c </dev/null` - for potent_lib in $potential_libs; do - # Follow soft links. - if ls -lLd "$potent_lib" 2>/dev/null \ - | grep " -> " >/dev/null; then - continue - fi - # The statement above tries to avoid entering an - # endless loop below, in case of cyclic links. - # We might still enter an endless loop, since a link - # loop can be closed while we follow links, - # but so what? - potlib="$potent_lib" - while test -h "$potlib" 2>/dev/null; do - potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` - case $potliblink in - [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; - *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; - esac - done - if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \ - | ${SED} 10q \ - | $EGREP "$file_magic_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - $echo - $echo "*** Warning: linker path does not have real file for library $a_deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $echo "*** with $libname but no candidates were found. (...for file magic test)" - else - $echo "*** with $libname and none of the candidates passed a file format test" - $echo "*** using a file magic. Last file checked: $potlib" - fi - fi else - # Add a -L argument. - newdeplibs="$newdeplibs $a_deplib" + cat <<"EOF" +const char * LIB_PATH_VALUE = ""; +EOF fi - done # Gone through all deplibs. - ;; - match_pattern*) - set dummy $deplibs_check_method - match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"` - for a_deplib in $deplibs; do - name=`expr $a_deplib : '-l\(.*\)'` - # If $name is empty we are operating on a -L argument. - if test -n "$name" && test "$name" != "0"; then - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $a_deplib "*) - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - ;; - esac - fi - if test -n "$a_deplib" ; then - libname=`eval \\$echo \"$libname_spec\"` - for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do - potential_libs=`ls $i/$libname[.-]* 2>/dev/null` - for potent_lib in $potential_libs; do - potlib="$potent_lib" # see symlink-check above in file_magic test - if eval $echo \"$potent_lib\" 2>/dev/null \ - | ${SED} 10q \ - | $EGREP "$match_pattern_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - $echo - $echo "*** Warning: linker path does not have real file for library $a_deplib." - $echo "*** I have the capability to make that library automatically link in when" - $echo "*** you link to this library. But I can only do this if you have a" - $echo "*** shared version of the library, which you do not appear to have" - $echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $echo "*** with $libname but no candidates were found. (...for regex pattern test)" - else - $echo "*** with $libname and none of the candidates passed a file format test" - $echo "*** using a regex pattern. Last file checked: $potlib" - fi - fi + + if test -n "$dllsearchpath"; then + func_to_host_pathlist "$dllsearchpath:" + cat </dev/null; then - $echo - if test "X$deplibs_check_method" = "Xnone"; then - $echo "*** Warning: inter-library dependencies are not supported in this platform." + + if test "$fast_install" = yes; then + cat < \"${export_symbols}T\"" - $run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' - $show "$mv \"${export_symbols}T\" \"$export_symbols\"" - $run eval '$mv "${export_symbols}T" "$export_symbols"' - fi - fi - fi + newargz = XMALLOC (char *, argc + 1); + tmp_pathspec = find_executable (argv[0]); + if (tmp_pathspec == NULL) + lt_fatal ("Couldn't find %s", argv[0]); + LTWRAPPER_DEBUGPRINTF (("(main) found exe (before symlink chase) at : %s\n", + tmp_pathspec)); + + actual_cwrapper_path = chase_symlinks (tmp_pathspec); + LTWRAPPER_DEBUGPRINTF (("(main) found exe (after symlink chase) at : %s\n", + actual_cwrapper_path)); + XFREE (tmp_pathspec); + + actual_cwrapper_name = xstrdup( base_name (actual_cwrapper_path)); + strendzap (actual_cwrapper_path, actual_cwrapper_name); + + /* wrapper name transforms */ + strendzap (actual_cwrapper_name, ".exe"); + tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1); + XFREE (actual_cwrapper_name); + actual_cwrapper_name = tmp_pathspec; + tmp_pathspec = 0; + + /* target_name transforms -- use actual target program name; might have lt- prefix */ + target_name = xstrdup (base_name (TARGET_PROGRAM_NAME)); + strendzap (target_name, ".exe"); + tmp_pathspec = lt_extend_str (target_name, ".exe", 1); + XFREE (target_name); + target_name = tmp_pathspec; + tmp_pathspec = 0; - if test -n "$export_symbols" && test -n "$include_expsyms"; then - $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"' - fi + LTWRAPPER_DEBUGPRINTF (("(main) libtool target name: %s\n", + target_name)); +EOF - tmp_deplibs= - for test_deplib in $deplibs; do - case " $convenience " in - *" $test_deplib "*) ;; - *) - tmp_deplibs="$tmp_deplibs $test_deplib" - ;; - esac - done - deplibs="$tmp_deplibs" + cat </dev/null` && - test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - : - else - # The command line is too long to link in one step, link piecewise. - $echo "creating reloadable object files..." + LTWRAPPER_DEBUGPRINTF (("(main) lt_argv_zero : %s\n", (lt_argv_zero ? lt_argv_zero : ""))); + for (i = 0; i < newargc; i++) + { + LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); + } - # Save the value of $output and $libobjs because we want to - # use them later. If we have whole_archive_flag_spec, we - # want to use save_libobjs as it was before - # whole_archive_flag_spec was expanded, because we can't - # assume the linker understands whole_archive_flag_spec. - # This may have to be revisited, in case too many - # convenience libraries get linked in and end up exceeding - # the spec. - if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then - save_libobjs=$libobjs - fi - save_output=$output - output_la=`$echo "X$output" | $Xsed -e "$basename"` +EOF - # Clear the reloadable object creation command queue and - # initialize k to one. - test_cmds= - concat_cmds= - objlist= - delfiles= - last_robj= - k=1 - output=$output_objdir/$output_la-${k}.$objext - # Loop over the list of objects to be linked. - for obj in $save_libobjs - do - eval test_cmds=\"$reload_cmds $objlist $last_robj\" - if test "X$objlist" = X || - { len=`expr "X$test_cmds" : ".*" 2>/dev/null` && - test "$len" -le "$max_cmd_len"; }; then - objlist="$objlist $obj" - else - # The command $test_cmds is almost too long, add a - # command to the queue. - if test "$k" -eq 1 ; then - # The first file doesn't have a previous command to add. - eval concat_cmds=\"$reload_cmds $objlist $last_robj\" - else - # All subsequent reloadable object files will link in - # the last one created. - eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\" - fi - last_robj=$output_objdir/$output_la-${k}.$objext - k=`expr $k + 1` - output=$output_objdir/$output_la-${k}.$objext - objlist=$obj - len=1 - fi - done - # Handle the remaining objects by creating one last - # reloadable object file. All subsequent reloadable object - # files will link in the last one created. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" - - if ${skipped_export-false}; then - $show "generating symbol list for \`$libname.la'" - export_symbols="$output_objdir/$libname.exp" - $run $rm $export_symbols - libobjs=$output - # Append the command to create the export file. - eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\" - fi - - # Set up a command to remove the reloadable object files - # after they are used. - i=0 - while test "$i" -lt "$k" - do - i=`expr $i + 1` - delfiles="$delfiles $output_objdir/$output_la-${i}.$objext" - done + case $host_os in + mingw*) + cat <<"EOF" + /* execv doesn't actually work on mingw as expected on unix */ + rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz); + if (rval == -1) + { + /* failed to start process */ + LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); + return 127; + } + return rval; +EOF + ;; + *) + cat <<"EOF" + execv (lt_argv_zero, newargz); + return rval; /* =127, but avoids unused variable warning */ +EOF + ;; + esac - $echo "creating a temporary reloadable object file: $output" + cat <<"EOF" +} - # Loop through the commands generated above and execute them. - save_ifs="$IFS"; IFS='~' - for cmd in $concat_cmds; do - IFS="$save_ifs" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" +void * +xmalloc (size_t num) +{ + void *p = (void *) malloc (num); + if (!p) + lt_fatal ("Memory exhausted"); - libobjs=$output - # Restore the value of output. - output=$save_output + return p; +} - if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then - eval libobjs=\"\$libobjs $whole_archive_flag_spec\" - fi - # Expand the library linking commands again to reset the - # value of $libobjs for piecewise linking. +char * +xstrdup (const char *string) +{ + return string ? strcpy ((char *) xmalloc (strlen (string) + 1), + string) : NULL; +} - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - cmds=$module_expsym_cmds - else - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - cmds=$archive_expsym_cmds - else - cmds=$archive_cmds - fi - fi +const char * +base_name (const char *name) +{ + const char *base; - # Append the command to remove the reloadable object files - # to the just-reset $cmds. - eval cmds=\"\$cmds~\$rm $delfiles\" - fi - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || { - lt_exit=$? +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + /* Skip over the disk name in MSDOS pathnames. */ + if (isalpha ((unsigned char) name[0]) && name[1] == ':') + name += 2; +#endif - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' - fi + for (base = name; *name; name++) + if (IS_DIR_SEPARATOR (*name)) + base = name + 1; + return base; +} - exit $lt_exit - } - done - IFS="$save_ifs" +int +check_executable (const char *path) +{ + struct stat st; - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $? + LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", + path ? (*path ? path : "EMPTY!") : "NULL!")); + if ((!path) || (!*path)) + return 0; - if test -n "$convenience"; then - if test -z "$whole_archive_flag_spec"; then - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - fi - fi + if ((stat (path, &st) >= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} - exit $EXIT_SUCCESS - fi +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; - # Create links to the real library. - for linkname in $linknames; do - if test "$realname" != "$linkname"; then - $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)" - $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $? - fi - done + LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", + path ? (*path ? path : "EMPTY!") : "NULL!")); + if ((!path) || (!*path)) + return 0; - # If -module or -export-dynamic was specified, set the dlname. - if test "$module" = yes || test "$export_dynamic" = yes; then - # On all known operating systems, these are identical. - dlname="$soname" - fi - fi - ;; + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} - obj) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 - fi +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 - fi + LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", + wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); - if test -n "$rpath"; then - $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2 - fi + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; - if test -n "$xrpath"; then - $echo "$modename: warning: \`-R' is ignored for objects" 1>&2 - fi + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2 - fi + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for objects" 1>&2 - fi + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} - case $output in - *.lo) - if test -n "$objs$old_deplibs"; then - $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2 - exit $EXIT_FAILURE - fi - libobj="$output" - obj=`$echo "X$output" | $Xsed -e "$lo2o"` - ;; - *) - libobj= - obj="$output" - ;; - esac +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", + tmp_pathspec)); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } - # Delete the old objects. - $run $rm $obj $libobj + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + char *errstr = strerror (errno); + lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); + } + } + XFREE (tmp_pathspec); - # Objects from convenience libraries. This assumes - # single-version convenience libraries. Whenever we create - # different ones for PIC/non-PIC, this we'll have to duplicate - # the extraction. - reload_conv_objs= - gentop= - # reload_cmds runs $LD directly, so let us get rid of - # -Wl from whole_archive_flag_spec and hope we can get by with - # turning comma into space.. - wl= + if (!has_symlinks) + { + return xstrdup (pathspec); + } - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec"; then - eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" - reload_conv_objs=$reload_objs\ `$echo "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'` - else - gentop="$output_objdir/${obj}x" - generated="$generated $gentop" + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal ("Could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} - func_extract_archives $gentop $convenience - reload_conv_objs="$reload_objs $func_extract_archives_result" - fi - fi +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; - # Create the old-style object. - reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + assert (str != NULL); + assert (pat != NULL); - output="$obj" - cmds=$reload_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" + len = strlen (str); + patlen = strlen (pat); - # Exit if we aren't doing a library object file. - if test -z "$libobj"; then - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} - exit $EXIT_SUCCESS - fi +static void +lt_error_core (int exit_status, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s: %s: ", program_name, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); - if test "$build_libtool_libs" != yes; then - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi + if (exit_status >= 0) + exit (exit_status); +} - # Create an invalid libtool object if no PIC, so that we don't - # accidentally link it into a program. - # $show "echo timestamp > $libobj" - # $run eval "echo timestamp > $libobj" || exit $? - exit $EXIT_SUCCESS - fi +void +lt_fatal (const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, "FATAL", message, ap); + va_end (ap); +} - if test -n "$pic_flag" || test "$pic_mode" != default; then - # Only do commands if we really have different PIC objects. - reload_objs="$libobjs $reload_conv_objs" - output="$libobj" - cmds=$reload_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - fi - - if test -n "$gentop"; then - $show "${rm}r $gentop" - $run ${rm}r $gentop - fi - - exit $EXIT_SUCCESS - ;; +void +lt_setenv (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", + (name ? name : ""), + (value ? value : ""))); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} - prog) - case $host in - *cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;; - esac - if test -n "$vinfo"; then - $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2 - fi +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} - if test -n "$release"; then - $echo "$modename: warning: \`-release' is ignored for programs" 1>&2 - fi +int +lt_split_name_value (const char *arg, char** name, char** value) +{ + const char *p; + int len; + if (!arg || !*arg) + return 1; - if test "$preload" = yes; then - if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown && - test "$dlopen_self_static" = unknown; then - $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support." - fi - fi + p = strchr (arg, (int)'='); - case $host in - *-*-rhapsody* | *-*-darwin1.[012]) - # On Rhapsody replace the C library is the System framework - compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'` - finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'` - ;; - esac + if (!p) + return 1; - case $host in - *darwin*) - # Don't allow lazy linking, it breaks C++ global constructors - if test "$tagname" = CXX ; then - compile_command="$compile_command ${wl}-bind_at_load" - finalize_command="$finalize_command ${wl}-bind_at_load" - fi - ;; - esac + *value = xstrdup (++p); + len = strlen (arg) - strlen (*value); + *name = XMALLOC (char, len); + strncpy (*name, arg, len-1); + (*name)[len - 1] = '\0'; - # move library search paths that coincide with paths to not yet - # installed libraries to the beginning of the library search list - new_libs= - for path in $notinst_path; do - case " $new_libs " in - *" -L$path/$objdir "*) ;; - *) - case " $compile_deplibs " in - *" -L$path/$objdir "*) - new_libs="$new_libs -L$path/$objdir" ;; - esac - ;; - esac - done - for deplib in $compile_deplibs; do - case $deplib in - -L*) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$new_libs $deplib" ;; - esac - ;; - *) new_libs="$new_libs $deplib" ;; - esac - done - compile_deplibs="$new_libs" + return 0; +} +void +lt_opt_process_env_set (const char *arg) +{ + char *name = NULL; + char *value = NULL; - compile_command="$compile_command $compile_deplibs" - finalize_command="$finalize_command $finalize_deplibs" + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg); + } - if test -n "$rpath$xrpath"; then - # If the user specified any rpath flags, then add them. - for libdir in $rpath $xrpath; do - # This is the magic to use -rpath. - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - fi + lt_setenv (name, value); + XFREE (name); + XFREE (value); +} - # Now hardcode the library paths - rpath= - hardcode_libdirs= - for libdir in $compile_rpath $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval flag=\"$hardcode_libdir_flag_spec\" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; - *) perm_rpath="$perm_rpath $libdir" ;; - esac - fi - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*) - testbindir=`$echo "X$libdir" | $Xsed -e 's*/lib$*/bin*'` - case :$dllsearchpath: in - *":$libdir:"*) ;; - *) dllsearchpath="$dllsearchpath:$libdir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - *) dllsearchpath="$dllsearchpath:$testbindir";; - esac - ;; - esac - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval rpath=\" $hardcode_libdir_flag_spec\" - fi - compile_rpath="$rpath" +void +lt_opt_process_env_prepend (const char *arg) +{ + char *name = NULL; + char *value = NULL; + char *new_value = NULL; - rpath= - hardcode_libdirs= - for libdir in $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval flag=\"$hardcode_libdir_flag_spec\" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$finalize_perm_rpath " in - *" $libdir "*) ;; - *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; - esac - fi - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval rpath=\" $hardcode_libdir_flag_spec\" - fi - finalize_rpath="$rpath" + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg); + } - if test -n "$libobjs" && test "$build_old_libs" = yes; then - # Transform all the library objects into standard objects. - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - fi + new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + XFREE (name); + XFREE (value); +} - dlsyms= - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - if test -n "$NM" && test -n "$global_symbol_pipe"; then - dlsyms="${outputname}S.c" - else - $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2 - fi - fi +void +lt_opt_process_env_append (const char *arg) +{ + char *name = NULL; + char *value = NULL; + char *new_value = NULL; - if test -n "$dlsyms"; then - case $dlsyms in - "") ;; - *.c) - # Discover the nlist of each of the dlfiles. - nlist="$output_objdir/${outputname}.nm" + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg); + } - $show "$rm $nlist ${nlist}S ${nlist}T" - $run $rm "$nlist" "${nlist}S" "${nlist}T" + new_value = lt_extend_str (getenv (name), value, 1); + lt_setenv (name, new_value); + XFREE (new_value); + XFREE (name); + XFREE (value); +} - # Parse the name list into a source file. - $show "creating $output_objdir/$dlsyms" +void +lt_update_exe_path (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + (name ? name : ""), + (value ? value : ""))); - test -z "$run" && $echo > "$output_objdir/$dlsyms" "\ -/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */ -/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */ + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} -#ifdef __cplusplus -extern \"C\" { -#endif +void +lt_update_lib_path (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + (name ? name : ""), + (value ? value : ""))); -/* Prevent the only kind of declaration conflicts we can make. */ -#define lt_preloaded_symbols some_other_symbol + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} -/* External symbol declarations for the compiler. */\ -" - if test "$dlself" = yes; then - $show "generating symbol list for \`$output'" +EOF +} +# end: func_emit_cwrapperexe_src - test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist" +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no - # Add our own program objects to the symbol list. - progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` - for arg in $progfiles; do - $show "extracting global C symbols from \`$arg'" - $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" - done + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt - if test -n "$exclude_expsyms"; then - $run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' - $run eval '$mv "$nlist"T "$nlist"' - fi - - if test -n "$export_symbols_regex"; then - $run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' - $run eval '$mv "$nlist"T "$nlist"' - fi - - # Prepare the list of exported symbols - if test -z "$export_symbols"; then - export_symbols="$output_objdir/$outputname.exp" - $run $rm $export_symbols - $run eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' - case $host in - *cygwin* | *mingw* ) - $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' - $run eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' - ;; - esac - else - $run eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' - $run eval 'grep -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' - $run eval 'mv "$nlist"T "$nlist"' - case $host in - *cygwin* | *mingw* ) - $run eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' - $run eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' - ;; - esac - fi - fi + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= - for arg in $dlprefiles; do - $show "extracting global C symbols from \`$arg'" - name=`$echo "$arg" | ${SED} -e 's%^.*/%%'` - $run eval '$echo ": $name " >> "$nlist"' - $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'" - done + avoid_version=no + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile - if test -z "$run"; then - # Make sure we have at least an empty file. - test -f "$nlist" || : > "$nlist" - - if test -n "$exclude_expsyms"; then - $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T - $mv "$nlist"T "$nlist" - fi + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done - # Try sorting and uniquifying the output. - if grep -v "^: " < "$nlist" | - if sort -k 3 /dev/null 2>&1; then - sort -k 3 - else - sort +2 - fi | - uniq > "$nlist"S; then - : + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes else - grep -v "^: " < "$nlist" > "$nlist"S + dlself=needless + export_dynamic=yes fi - - if test -f "$nlist"S; then - eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"' + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + dlfiles="$dlfiles $arg" else - $echo '/* NONE */' >> "$output_objdir/$dlsyms" + dlprefiles="$dlprefiles $arg" fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) deplibs="$deplibs $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# moreargs="$moreargs $fil" + arg=$fil + # A libtool-controlled object. - $echo >> "$output_objdir/$dlsyms" "\ + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= -#undef lt_preloaded_symbols + # Read the .lo file + func_source "$arg" -#if defined (__STDC__) && __STDC__ -# define lt_ptr void * -#else -# define lt_ptr char * -# define const -#endif + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi -/* The mapping between symbol names and symbols. */ -" + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" - case $host in - *cygwin* | *mingw* ) - $echo >> "$output_objdir/$dlsyms" "\ -/* DATA imports from DLLs on WIN32 can't be const, because - runtime relocations are performed -- see ld's documentation - on pseudo-relocs */ -struct { -" - ;; - * ) - $echo >> "$output_objdir/$dlsyms" "\ -const struct { -" - ;; - esac + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi - $echo >> "$output_objdir/$dlsyms" "\ - const char *name; - lt_ptr address; -} -lt_preloaded_symbols[] = -{\ -" + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi - eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms" + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi - $echo >> "$output_objdir/$dlsyms" "\ - {0, (lt_ptr) 0} -}; + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt_preloaded_symbols; -} -#endif + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" -#ifdef __cplusplus -} -#endif\ -" + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" fi - - pic_flag_for_symtable= - case $host in - # compiling the symbol table file with pic_flag works around - # a FreeBSD bug that causes programs to crash when -lm is - # linked before any other PIC object. But we must not use - # pic_flag when linking with -static. The problem exists in - # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. - *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) - case "$compile_command " in - *" -static "*) ;; - *) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";; - esac;; - *-*-hpux*) - case "$compile_command " in - *" -static "*) ;; - *) pic_flag_for_symtable=" $pic_flag";; - esac - esac - - # Now compile the dynamic symbol file. - $show "(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")" - $run eval '(cd $output_objdir && $LTCC $LTCFLAGS -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $? - - # Clean up the generated files. - $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T" - $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T" - - # Transform the symbol file into the correct name. - case $host in - *cygwin* | *mingw* ) - if test -f "$output_objdir/${outputname}.def" ; then - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}.def $output_objdir/${outputname}S.${objext}%" | $NL2SP` - else - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%" | $NL2SP` - fi - ;; - * ) - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%" | $NL2SP` - ;; - esac + arg=$save_arg + prev= + continue ;; - *) - $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2 - exit $EXIT_FAILURE + precious_regex) + precious_files_regex="$arg" + prev= + continue ;; - esac - else - # We keep going just in case the user didn't refer to - # lt_preloaded_symbols. The linker will fail if global_symbol_pipe - # really was required. - - # Nullify the symbol file. - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "s% @SYMFILE@%%" | $NL2SP` - finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "s% @SYMFILE@%%" | $NL2SP` - fi - - if test "$need_relink" = no || test "$build_libtool_libs" != yes; then - # Replace the output file specification. - compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e 's%@OUTPUT@%'"$output"'%g' | $NL2SP` - link_command="$compile_command$compile_rpath" - - # We have no uninstalled library dependencies, so finalize right now. - $show "$link_command" - $run eval "$link_command" - exit_status=$? - - # Delete the generated files. - if test -n "$dlsyms"; then - $show "$rm $output_objdir/${outputname}S.${objext}" - $run $rm "$output_objdir/${outputname}S.${objext}" - fi - - exit $exit_status - fi - - if test -n "$shlibpath_var"; then - # We should set the shlibpath_var - rpath= - for dir in $temp_rpath; do - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) - # Absolute path. - rpath="$rpath$dir:" - ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; *) - # Relative path: add a thisdir entry. - rpath="$rpath\$thisdir/$dir:" + func_fatal_error "only absolute run-paths are allowed" ;; esac - done - temp_rpath="$rpath" - fi + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) rpath="$rpath $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) xrpath="$xrpath $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + weak_libs="$weak_libs $arg" + prev= + continue + ;; + xcclinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + compiler_flags="$compiler_flags $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" - if test -n "$compile_shlibpath$finalize_shlibpath"; then - compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" - fi - if test -n "$finalize_shlibpath"; then - finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" - fi + prevarg="$arg" - compile_var= - finalize_var= - if test -n "$runpath_var"; then - if test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do - rpath="$rpath$dir:" - done - compile_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi - if test -n "$finalize_perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $finalize_perm_rpath; do - rpath="$rpath$dir:" - done - finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" fi - fi + continue + ;; - if test "$no_install" = yes; then - # We don't need to create a wrapper script. - link_command="$compile_var$compile_command$compile_rpath" - # Replace the output file specification. - link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` - # Delete the old output file. - $run $rm $output - # Link the executable and exit - $show "$link_command" - $run eval "$link_command" || exit $? - exit $EXIT_SUCCESS - fi + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; - if test "$hardcode_action" = relink; then - # Fast installation is not supported - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" + -avoid-version) + avoid_version=yes + continue + ;; - $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2 - $echo "$modename: \`$output' will be relinked during installation" 1>&2 - else - if test "$fast_install" != no; then - link_command="$finalize_var$compile_command$finalize_rpath" - if test "$fast_install" = yes; then - relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $SP2NL | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g' | $NL2SP` - else - # fast_install is set to needless - relink_command= - fi + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms else - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" + prev=expsyms_regex fi - fi - - # Replace the output file specification. - link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + continue + ;; - # Delete the old output files. - $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname + -framework) + prev=framework + continue + ;; - $show "$link_command" - $run eval "$link_command" || exit $? + -inst-prefix-dir) + prev=inst_prefix + continue + ;; - # Now create the wrapper script. - $show "creating $output" + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; - # Quote the relink command for shipping. - if test -n "$relink_command"; then - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" + -L*) + func_stripname '-L' '' "$arg" + dir=$func_stripname_result + if test -z "$dir"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" else - var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` - relink_command="$var=\"$var_value\"; export $var; $relink_command" + func_fatal_error "need path for \`-L' option" fi - done - relink_command="(cd `pwd`; $relink_command)" - relink_command=`$echo "X$relink_command" | $SP2NL | $Xsed -e "$sed_quote_subst" | $NL2SP` - fi - - # Quote $echo for shipping. - if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then - case $progpath in - [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; - *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + fi + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; esac - qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"` - else - qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"` - fi - - # Only actually do things if our run command is non-null. - if test -z "$run"; then - # win32 will think the script is a binary if it has - # a .exe suffix, so we strip it off here. - case $output in - *.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;; + case "$deplibs " in + *" -L$dir "*) ;; + *) + deplibs="$deplibs -L$dir" + lib_search_path="$lib_search_path $dir" + ;; esac - # test for cygwin because mv fails w/o .exe extensions case $host in - *cygwin*) - exeext=.exe - outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;; - *) exeext= ;; + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) dllsearchpath="$dllsearchpath:$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; esac - case $host in - *cygwin* | *mingw* ) - output_name=`basename $output` - output_path=`dirname $output` - cwrappersource="$output_path/$objdir/lt-$output_name.c" - cwrapper="$output_path/$output_name.exe" - $rm $cwrappersource $cwrapper - trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 - - cat > $cwrappersource <> $cwrappersource<<"EOF" -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(PATH_MAX) -# define LT_PATHMAX PATH_MAX -#elif defined(MAXPATHLEN) -# define LT_PATHMAX MAXPATHLEN -#else -# define LT_PATHMAX 1024 -#endif - -#ifndef DIR_SEPARATOR -# define DIR_SEPARATOR '/' -# define PATH_SEPARATOR ':' -#endif + continue + ;; -#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ - defined (__OS2__) -# define HAVE_DOS_BASED_FILE_SYSTEM -# ifndef DIR_SEPARATOR_2 -# define DIR_SEPARATOR_2 '\\' -# endif -# ifndef PATH_SEPARATOR_2 -# define PATH_SEPARATOR_2 ';' -# endif -#endif + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + deplibs="$deplibs System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + deplibs="$deplibs $arg" + continue + ;; -#ifndef DIR_SEPARATOR_2 -# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) -#else /* DIR_SEPARATOR_2 */ -# define IS_DIR_SEPARATOR(ch) \ - (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) -#endif /* DIR_SEPARATOR_2 */ + -module) + module=yes + continue + ;; -#ifndef PATH_SEPARATOR_2 -# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) -#else /* PATH_SEPARATOR_2 */ -# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) -#endif /* PATH_SEPARATOR_2 */ + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot) + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; -#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) -#define XFREE(stale) do { \ - if (stale) { free ((void *) stale); stale = 0; } \ -} while (0) + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; + esac + continue + ;; -/* -DDEBUG is fairly common in CFLAGS. */ -#undef DEBUG -#if defined DEBUGWRAPPER -# define DEBUG(format, ...) fprintf(stderr, format, __VA_ARGS__) -#else -# define DEBUG(format, ...) -#endif + -multi_module) + single_module="${wl}-multi_module" + continue + ;; -const char *program_name = NULL; + -no-fast-install) + fast_install=no + continue + ;; -void * xmalloc (size_t num); -char * xstrdup (const char *string); -const char * base_name (const char *name); -char * find_executable(const char *wrapper); -int check_executable(const char *path); -char * strendzap(char *str, const char *pat); -void lt_fatal (const char *message, ...); + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; -int -main (int argc, char *argv[]) -{ - char **newargz; - int i; + -no-undefined) + allow_undefined=no + continue + ;; - program_name = (char *) xstrdup (base_name (argv[0])); - DEBUG("(main) argv[0] : %s\n",argv[0]); - DEBUG("(main) program_name : %s\n",program_name); - newargz = XMALLOC(char *, argc+2); -EOF + -objectlist) + prev=objectlist + continue + ;; - cat >> $cwrappersource <> $cwrappersource <<"EOF" - newargz[1] = find_executable(argv[0]); - if (newargz[1] == NULL) - lt_fatal("Couldn't find %s", argv[0]); - DEBUG("(main) found exe at : %s\n",newargz[1]); - /* we know the script has the same name, without the .exe */ - /* so make sure newargz[1] doesn't end in .exe */ - strendzap(newargz[1],".exe"); - for (i = 1; i < argc; i++) - newargz[i+1] = xstrdup(argv[i]); - newargz[argc+1] = NULL; + -precious-files-regex) + prev=precious_regex + continue + ;; - for (i=0; i> $cwrappersource <> $cwrappersource <> $cwrappersource <<"EOF" - return 127; -} + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + continue + ;; -void * -xmalloc (size_t num) -{ - void * p = (void *) malloc (num); - if (!p) - lt_fatal ("Memory exhausted"); + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; - return p; -} + -shrext) + prev=shrext + continue + ;; -char * -xstrdup (const char *string) -{ - return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL -; -} + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; -const char * -base_name (const char *name) -{ - const char *base; + -thread-safe) + thread_safe=yes + continue + ;; -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - /* Skip over the disk name in MSDOS pathnames. */ - if (isalpha ((unsigned char)name[0]) && name[1] == ':') - name += 2; -#endif + -version-info) + prev=vinfo + continue + ;; - for (base = name; *name; name++) - if (IS_DIR_SEPARATOR (*name)) - base = name + 1; - return base; -} + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; -int -check_executable(const char * path) -{ - struct stat st; + -weak) + prev=weak + continue + ;; - DEBUG("(check_executable) : %s\n", path ? (*path ? path : "EMPTY!") : "NULL!"); - if ((!path) || (!*path)) - return 0; + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + arg="$arg $wl$func_quote_for_eval_result" + compiler_flags="$compiler_flags $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; - if ((stat (path, &st) >= 0) && - ( - /* MinGW & native WIN32 do not support S_IXOTH or S_IXGRP */ -#if defined (S_IXOTH) - ((st.st_mode & S_IXOTH) == S_IXOTH) || -#endif -#if defined (S_IXGRP) - ((st.st_mode & S_IXGRP) == S_IXGRP) || -#endif - ((st.st_mode & S_IXUSR) == S_IXUSR)) - ) - return 1; - else - return 0; -} + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + arg="$arg $wl$func_quote_for_eval_result" + compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" + linker_flags="$linker_flags $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; -/* Searches for the full path of the wrapper. Returns - newly allocated full path name if found, NULL otherwise */ -char * -find_executable (const char* wrapper) -{ - int has_slash = 0; - const char* p; - const char* p_next; - /* static buffer for getcwd */ - char tmp[LT_PATHMAX + 1]; - int tmp_len; - char* concat_name; + -Xcompiler) + prev=xcompiler + continue + ;; - DEBUG("(find_executable) : %s\n", wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"); + -Xlinker) + prev=xlinker + continue + ;; - if ((wrapper == NULL) || (*wrapper == '\0')) - return NULL; + -XCClinker) + prev=xcclinker + continue + ;; - /* Absolute path? */ -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - if (isalpha ((unsigned char)wrapper[0]) && wrapper[1] == ':') - { - concat_name = xstrdup (wrapper); - if (check_executable(concat_name)) - return concat_name; - XFREE(concat_name); - } - else - { -#endif - if (IS_DIR_SEPARATOR (wrapper[0])) - { - concat_name = xstrdup (wrapper); - if (check_executable(concat_name)) - return concat_name; - XFREE(concat_name); - } -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - } -#endif + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; - for (p = wrapper; *p; p++) - if (*p == '/') - { - has_slash = 1; - break; - } - if (!has_slash) - { - /* no slashes; search PATH */ - const char* path = getenv ("PATH"); - if (path != NULL) - { - for (p = path; *p; p = p_next) - { - const char* q; - size_t p_len; - for (q = p; *q; q++) - if (IS_PATH_SEPARATOR(*q)) - break; - p_len = q - p; - p_next = (*q == '\0' ? q : q + 1); - if (p_len == 0) - { - /* empty path: current directory */ - if (getcwd (tmp, LT_PATHMAX) == NULL) - lt_fatal ("getcwd failed"); - tmp_len = strlen(tmp); - concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); - memcpy (concat_name, tmp, tmp_len); - concat_name[tmp_len] = '/'; - strcpy (concat_name + tmp_len + 1, wrapper); - } - else - { - concat_name = XMALLOC(char, p_len + 1 + strlen(wrapper) + 1); - memcpy (concat_name, p, p_len); - concat_name[p_len] = '/'; - strcpy (concat_name + p_len + 1, wrapper); - } - if (check_executable(concat_name)) - return concat_name; - XFREE(concat_name); - } - } - /* not found in PATH; assume curdir */ - } - /* Relative path | not found in path: prepend cwd */ - if (getcwd (tmp, LT_PATHMAX) == NULL) - lt_fatal ("getcwd failed"); - tmp_len = strlen(tmp); - concat_name = XMALLOC(char, tmp_len + 1 + strlen(wrapper) + 1); - memcpy (concat_name, tmp, tmp_len); - concat_name[tmp_len] = '/'; - strcpy (concat_name + tmp_len + 1, wrapper); + # -64, -mips[0-9] enable 64-bit mode on the SGI compiler + # -r[0-9][0-9]* specifies the processor on the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler + # +DA*, +DD* enable 64-bit mode on the HP compiler + # -q* pass through compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* pass through architecture-specific + # compiler args for GCC + # -F/path gives path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC + # @file GCC response files + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + compiler_flags="$compiler_flags $arg" + continue + ;; - if (check_executable(concat_name)) - return concat_name; - XFREE(concat_name); - return NULL; -} + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; -char * -strendzap(char *str, const char *pat) -{ - size_t len, patlen; + *.$objext) + # A standard object. + objs="$objs $arg" + ;; - assert(str != NULL); - assert(pat != NULL); + *.lo) + # A libtool-controlled object. - len = strlen(str); - patlen = strlen(pat); + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= - if (patlen <= len) - { - str += len - patlen; - if (strcmp(str, pat) == 0) - *str = '\0'; - } - return str; -} + # Read the .lo file + func_source "$arg" -static void -lt_error_core (int exit_status, const char * mode, - const char * message, va_list ap) -{ - fprintf (stderr, "%s: %s: ", program_name, mode); - vfprintf (stderr, message, ap); - fprintf (stderr, ".\n"); + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi - if (exit_status >= 0) - exit (exit_status); -} + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" -void -lt_fatal (const char *message, ...) -{ - va_list ap; - va_start (ap, message); - lt_error_core (EXIT_FAILURE, "FATAL", message, ap); - va_end (ap); -} -EOF - # we should really use a build-platform specific compiler - # here, but OTOH, the wrappers (shell script and this C one) - # are only useful if you want to execute the "real" binary. - # Since the "real" binary is built for $host, then this - # wrapper might as well be built for $host, too. - $run $LTCC $LTCFLAGS -s -o $cwrapper $cwrappersource - ;; - esac - $rm $output - trap "$rm $output; exit $EXIT_FAILURE" 1 2 15 + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" - $echo > $output "\ -#! $SHELL + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi -# $output - temporary wrapper script for $objdir/$outputname -# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP -# -# The $output program cannot be directly executed until all the libtool -# libraries that it depends on are installed. -# -# This wrapper script should never be moved out of the build directory. -# If it is, it will not operate correctly. + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -Xsed='${SED} -e 1s/^X//' -sed_quote_subst='$sed_quote_subst' + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi -# Be Bourne compatible (taken from Autoconf:_AS_BOURNE_COMPATIBLE). -if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then - emulate sh - NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac -fi -BIN_SH=xpg4; export BIN_SH # for Tru64 -DUALCASE=1; export DUALCASE # for MKS sh + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" -relink_command=\"$relink_command\" + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; -# This environment variable determines our operation mode. -if test \"\$libtool_install_magic\" = \"$magic\"; then - # install mode needs the following variable: - notinst_deplibs='$notinst_deplibs' -else - # When we are sourced in execute mode, \$file and \$echo are already set. - if test \"\$libtool_execute_magic\" != \"$magic\"; then - echo=\"$qecho\" - file=\"\$0\" - # Make sure echo works. - if test \"X\$1\" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift - elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then - # Yippee, \$echo works! - : + *.$libext) + # An archive. + deplibs="$deplibs $arg" + old_deplibs="$old_deplibs $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + dlfiles="$dlfiles $arg" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + dlprefiles="$dlprefiles $arg" + prev= + else + deplibs="$deplibs $arg" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` else - # Restart under the correct shell, and then maybe \$echo will work. - exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + shlib_search_path= fi - fi\ -" - $echo >> $output "\ + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" - # Find the directory that this script lives in. - thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` - test \"x\$thisdir\" = \"x\$file\" && thisdir=. + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + # Create the object directory. + func_mkdir_p "$output_objdir" - # Follow symbolic links until we get to the real thisdir. - file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` - while test -n \"\$file\"; do - destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac - # If there was a directory component, then change thisdir. - if test \"x\$destdir\" != \"x\$file\"; then - case \"\$destdir\" in - [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; - *) thisdir=\"\$thisdir/\$destdir\" ;; - esac - fi + specialdeplibs= - file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\` - file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` - done + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_duplicate_deps ; then + case "$libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + libs="$libs $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + esac + pre_post_deps="$pre_post_deps $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + case $lib in + *.la) func_source "$lib" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"` + case " $weak_libs " in + *" $deplib_base "*) ;; + *) deplibs="$deplibs $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + compiler_flags="$compiler_flags $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + dir=$func_stripname_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) lib="$deplib" ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + $ECHO + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because the file extensions .$libext of this argument makes me believe" + $ECHO "*** that it is just a static archive that I should not use here." + else + $ECHO + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + newdlprefiles="$newdlprefiles $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + newdlfiles="$newdlfiles $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && dlfiles="$dlfiles $dlopen" + test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + convenience="$convenience $ladir/$objdir/$old_library" + old_convenience="$old_convenience $ladir/$objdir/$old_library" + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + for l in $old_library $library_names; do + linklib="$l" + done + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + dlprefiles="$dlprefiles $lib $dependency_libs" + else + newdlfiles="$newdlfiles $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$libdir" + absdir="$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + newdlprefiles="$newdlprefiles $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + newdlprefiles="$newdlprefiles $dir/$dlname" + else + newdlprefiles="$newdlprefiles $dir/$linklib" + fi + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + newlib_search_path="$newlib_search_path $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) temp_rpath="$temp_rpath$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + notinst_deplibs="$notinst_deplibs $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + notinst_deplibs="$notinst_deplibs $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + $ECHO + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + $ECHO + $ECHO "*** And there doesn't seem to be a static archive available" + $ECHO "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + $ECHO + $ECHO "*** Warning: This system can not link to static lib archive $lib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + $ECHO "*** But as you try to build a module library, libtool will still create " + $ECHO "*** a static module, that should work as long as the dlopening application" + $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + $ECHO + $ECHO "*** However, this would only work if libtool was able to extract symbol" + $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" + $ECHO "*** not find such a program. So, this module is probably useless." + $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) xrpath="$xrpath $temp_xrpath";; + esac;; + *) temp_deplibs="$temp_deplibs $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + newlib_search_path="$newlib_search_path $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_dirname "$deplib" "" "." + dir="$func_dirname_result" + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) lib_search_path="$lib_search_path $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + tmp_libs="$tmp_libs $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + objs="$objs$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + $ECHO + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + libobjs="$libobjs $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + verstring="$verstring:${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + libobjs="$libobjs $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + removelist="$removelist $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + oldlibs="$oldlibs $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"` + # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"` + # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + temp_xrpath="$temp_xrpath -R$libdir" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) dlfiles="$dlfiles $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) dlprefiles="$dlprefiles $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + deplibs="$deplibs System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + deplibs="$deplibs -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $ECHO + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $ECHO + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \ + -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"` + done + fi + if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' | + $GREP . >/dev/null; then + $ECHO + if test "X$deplibs_check_method" = "Xnone"; then + $ECHO "*** Warning: inter-library dependencies are not supported in this platform." + else + $ECHO "*** Warning: inter-library dependencies are not known to be supported." + fi + $ECHO "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + fi + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + $ECHO + $ECHO "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + $ECHO "*** a static module, that should work as long as the dlopening" + $ECHO "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + $ECHO + $ECHO "*** However, this would only work if libtool was able to extract symbol" + $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" + $ECHO "*** not find such a program. So, this module is probably useless." + $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + $ECHO "*** The inter-library dependencies that have been dropped here will be" + $ECHO "*** automatically added whenever a program is linked with this library" + $ECHO "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + $ECHO + $ECHO "*** Since this library must not contain undefined symbols," + $ECHO "*** because either the platform does not support them or" + $ECHO "*** it was explicitly requested with -no-undefined," + $ECHO "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + dep_rpath="$dep_rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi - # Try to get the absolute directory name. - absdir=\`cd \"\$thisdir\" && pwd\` - test -n \"\$absdir\" && thisdir=\"\$absdir\" -" + shlibpath="$finalize_shlibpath" + test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi - if test "$fast_install" = yes; then - $echo >> $output "\ - program=lt-'$outputname'$exeext - progdir=\"\$thisdir/$objdir\" + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift - if test ! -f \"\$progdir/\$program\" || \\ - { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ - test \"X\$file\" != \"X\$progdir/\$program\"; }; then + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi - file=\"\$\$-\$program\" + lib="$output_objdir/$realname" + linknames= + for link + do + linknames="$linknames $link" + done - if test ! -d \"\$progdir\"; then - $mkdir \"\$progdir\" - else - $rm \"\$progdir/\$file\" - fi" + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= - $echo >> $output "\ + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + delfiles="$delfiles $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac - # relink executable if necessary - if test -n \"\$relink_command\"; then - if relink_command_output=\`eval \$relink_command 2>&1\`; then : - else - $echo \"\$relink_command_output\" >&2 - $rm \"\$progdir/\$file\" - exit $EXIT_FAILURE - fi - fi + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + func_len " $cmd" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi - $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || - { $rm \"\$progdir/\$program\"; - $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; } - $rm \"\$progdir/\$file\" - fi" - else - $echo >> $output "\ - program='$outputname' - progdir=\"\$thisdir/$objdir\" -" + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi - $echo >> $output "\ + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + tmp_deplibs="$tmp_deplibs $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" - if test -f \"\$progdir/\$program\"; then" + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" - # Export our shlibpath_var if we have one. - if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then - $echo >> $output "\ - # Add our own library path to $shlibpath_var - $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi - # Some systems cannot cope with colon-terminated $shlibpath_var - # The second colon is a workaround for a bug in BeOS R4 sed - $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + linker_flags="$linker_flags $flag" + fi - export $shlibpath_var -" + # Make a backup of the uninstalled library when relinking + if test "$mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi - # fixup the dll searchpath if we need to. - if test -n "$dllsearchpath"; then - $echo >> $output "\ - # Add the dll search path components to the executable PATH - PATH=$dllsearchpath:\$PATH -" + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi fi - $echo >> $output "\ - if test \"\$libtool_execute_magic\" != \"$magic\"; then - # Run the actual program with our arguments. -" - case $host in - # Backslashes separate directories on plain windows - *-*-mingw | *-*-os2*) - $echo >> $output "\ - exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} -" - ;; + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. - *) - $echo >> $output "\ - exec \"\$progdir/\$program\" \${1+\"\$@\"} -" - ;; - esac - $echo >> $output "\ - \$echo \"\$0: cannot exec \$program \$*\" - exit $EXIT_FAILURE - fi - else - # The program doesn't exist. - \$echo \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 - \$echo \"This script is just a wrapper for \$program.\" 1>&2 - $echo \"See the $PACKAGE documentation for more information.\" 1>&2 - exit $EXIT_FAILURE - fi -fi\ -" - chmod +x $output - fi - exit $EXIT_SUCCESS - ;; - esac + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + output_la=`$ECHO "X$output" | $Xsed -e "$basename"` - # See if we need to build an old-fashioned archive. - for oldlib in $oldlibs; do + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 - if test "$build_libtool_libs" = convenience; then - oldobjs="$libobjs_save" - addlibs="$convenience" - build_libtool_libs=no - else - if test "$build_libtool_libs" = module; then - oldobjs="$libobjs_save" - build_libtool_libs=no - else - oldobjs="$old_deplibs $non_pic_objects" - fi - addlibs="$old_convenience" - fi + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + $ECHO 'INPUT (' > $output + for obj in $save_libobjs + do + $ECHO "$obj" >> $output + done + $ECHO ')' >> $output + delfiles="$delfiles $output" + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + $ECHO "$obj" >> $output + done + delfiles="$delfiles $output" + output=$firstobj\"$file_list_spec$output\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + eval concat_cmds=\"$reload_cmds $objlist $last_robj\" + else + # All subsequent reloadable object files will link in + # the last one created. + eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=$obj + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + delfiles="$delfiles $output" - if test -n "$addlibs"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" + else + output= + fi - func_extract_archives $gentop $addlibs - oldobjs="$oldobjs $func_extract_archives_result" - fi + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi - # Do each command in the archive commands. - if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then - cmds=$old_archive_from_new_cmds - else - # POSIX demands no paths to be encoded in archives. We have - # to avoid creating archives with duplicate basenames if we - # might have to extract them afterwards, e.g., when creating a - # static archive out of a convenience library, or when linking - # the entirety of a libtool archive into another (currently - # not supported by libtool). - if (for obj in $oldobjs - do - $echo "X$obj" | $Xsed -e 's%^.*/%%' - done | sort | sort -uc >/dev/null 2>&1); then - : - else - $echo "copying selected object files to avoid basename conflicts..." + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" - if test -z "$gentop"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" - $show "${rm}r $gentop" - $run ${rm}r "$gentop" - $show "$mkdir $gentop" - $run $mkdir "$gentop" - exit_status=$? - if test "$exit_status" -ne 0 && test ! -d "$gentop"; then - exit $exit_status + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi - save_oldobjs=$oldobjs - oldobjs= - counter=1 - for obj in $save_oldobjs - do - objbase=`$echo "X$obj" | $Xsed -e 's%^.*/%%'` - case " $oldobjs " in - " ") oldobjs=$obj ;; - *[\ /]"$objbase "*) - while :; do - # Make sure we don't pick an alternate name that also - # overlaps. - newobj=lt$counter-$objbase - counter=`expr $counter + 1` - case " $oldobjs " in - *[\ /]"$newobj "*) ;; - *) if test ! -f "$gentop/$newobj"; then break; fi ;; - esac - done - $show "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" - $run ln "$obj" "$gentop/$newobj" || - $run cp "$obj" "$gentop/$newobj" - oldobjs="$oldobjs $gentop/$newobj" - ;; - *) oldobjs="$oldobjs $obj" ;; - esac - done - fi + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi - eval cmds=\"$old_archive_cmds\" + libobjs=$output + # Restore the value of output. + output=$save_output - if len=`expr "X$cmds" : ".*"` && - test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then - cmds=$old_archive_cmds - else - # the command line is too long to link in one step, link in parts - $echo "using piecewise archive linking..." - save_RANLIB=$RANLIB - RANLIB=: - objlist= - concat_cmds= - save_oldobjs=$oldobjs + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. - # Is there a better way of finding the last object in the list? - for obj in $save_oldobjs - do - last_oldobj=$obj - done - for obj in $save_oldobjs - do - oldobjs="$objlist $obj" - objlist="$objlist $obj" - eval test_cmds=\"$old_archive_cmds\" - if len=`expr "X$test_cmds" : ".*" 2>/dev/null` && - test "$len" -le "$max_cmd_len"; then - : + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds else - # the above command should be used before it gets too long - oldobjs=$objlist - if test "$obj" = "$last_oldobj" ; then - RANLIB=$save_RANLIB - fi - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" - objlist= + cmds=$module_cmds fi - done - RANLIB=$save_RANLIB - oldobjs=$objlist - if test "X$oldobjs" = "X" ; then - eval cmds=\"\$concat_cmds\" else - eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi fi fi - fi - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - eval cmd=\"$cmd\" - IFS="$save_ifs" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - done - if test -n "$generated"; then - $show "${rm}r$generated" - $run ${rm}r$generated - fi + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi - # Now create the libtool archive. - case $output in - *.la) - old_library= - test "$build_old_libs" = yes && old_library="$libname.$libext" - $show "creating $output" + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" - else - var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"` - relink_command="$var=\"$var_value\"; export $var; $relink_command" + func_extract_archives $gentop $dlprefiles + libobjs="$libobjs $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= fi - done - # Quote the link command for shipping. - relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" - relink_command=`$echo "X$relink_command" | $SP2NL | $Xsed -e "$sed_quote_subst" | $NL2SP` - if test "$hardcode_automatic" = yes ; then - relink_command= - fi + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? - # Only create the output if not a dry run. - if test -z "$run"; then - for installed in no yes; do - if test "$installed" = yes; then - if test -z "$install_libdir"; then - break + # Restore the uninstalled library and exit + if test "$mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) fi - output="$output_objdir/$outputname"i - # Replace all uninstalled libtool libraries with the installed ones - newdependency_libs= - for deplib in $dependency_libs; do - case $deplib in - *.la) - name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - if test -z "$libdir"; then - $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdependency_libs="$newdependency_libs $libdir/$name" - ;; - *) newdependency_libs="$newdependency_libs $deplib" ;; - esac - done - dependency_libs="$newdependency_libs" - newdlfiles= - for lib in $dlfiles; do - name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - if test -z "$libdir"; then - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdlfiles="$newdlfiles $libdir/$name" - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'` - eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - if test -z "$libdir"; then - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - exit $EXIT_FAILURE - fi - newdlprefiles="$newdlprefiles $libdir/$name" - done - dlprefiles="$newdlprefiles" - else - newdlfiles= - for lib in $dlfiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlfiles="$newdlfiles $abs" - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlprefiles="$newdlprefiles $abs" - done - dlprefiles="$newdlprefiles" - fi - $rm $output - # place dlname in correct position for cygwin - tdlname=$dlname - case $host,$output,$installed,$module,$dlname in - *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; - esac - $echo > $output "\ -# $outputname - a libtool library file -# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP -# -# Please DO NOT delete this file! -# It is necessary for linking the library. - -# The name that we can dlopen(3). -dlname='$tdlname' - -# Names of this library. -library_names='$library_names' - -# The name of the static archive. -old_library='$old_library' - -# Libraries that this one depends upon. -dependency_libs='$dependency_libs' -# Version information for $libname. -current=$current -age=$age -revision=$revision + exit $lt_exit + } + done + IFS="$save_ifs" -# Is this an already installed library? -installed=$installed + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? -# Should we warn about portability when linking against -modules? -shouldnotlink=$module + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi -# Files to dlopen/dlpreopen -dlopen='$dlfiles' -dlpreopen='$dlprefiles' + exit $EXIT_SUCCESS + fi -# Directory that this library needs to be installed in: -libdir='$install_libdir'" - if test "$installed" = no && test "$need_relink" = yes; then - $echo >> $output "\ -relink_command=\"$relink_command\"" + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done - fi - - # Do a symbolic link so that the libtool archive can be found in - # LD_LIBRARY_PATH before the program is installed. - $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" - $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $? - ;; - esac - exit $EXIT_SUCCESS - ;; - - # libtool install mode - install) - modename="$modename: install" - - # There may be an optional sh(1) argument at the beginning of - # install_prog (especially on Windows NT). - if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || - # Allow the use of GNU shtool's install command. - $echo "X$nonopt" | grep shtool > /dev/null; then - # Aesthetically quote it. - arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - install_prog="$arg " - arg="$1" - shift - else - install_prog= - arg=$nonopt - fi - # The real first argument should be the name of the installation program. - # Aesthetically quote it. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi ;; - esac - install_prog="$install_prog$arg" - # We need to accept at least all the BSD install flags. - dest= - files= - opts= - prev= - install_type= - isdir=no - stripme= - for arg - do - if test -n "$dest"; then - files="$files $dest" - dest=$arg - continue + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" fi - case $arg in - -d) isdir=yes ;; - -f) - case " $install_prog " in - *[\\\ /]cp\ *) ;; - *) prev=$arg ;; - esac - ;; - -g | -m | -o) prev=$arg ;; - -s) - stripme=" -s" - continue - ;; - -*) + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result ;; *) - # If the previous option needed an argument, then skip it. - if test -n "$prev"; then - prev= - else - dest=$arg - continue - fi + libobj= + obj="$output" ;; esac - # Aesthetically quote the argument. - arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"` - case $arg in - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - arg="\"$arg\"" - ;; - esac - install_prog="$install_prog $arg" - done + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj - if test -z "$install_prog"; then - $echo "$modename: you must specify an install program" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= - if test -n "$prev"; then - $echo "$modename: the \`$prev' option requires an argument" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + generated="$generated $gentop" - if test -z "$files"; then - if test -z "$dest"; then - $echo "$modename: no file or destination specified" 1>&2 - else - $echo "$modename: you must specify a destination" 1>&2 + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi fi - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi - # Strip any trailing slash from the destination. - dest=`$echo "X$dest" | $Xsed -e 's%/$%%'` + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test - # Check to see that the destination is a directory. - test -d "$dest" && isdir=yes - if test "$isdir" = yes; then - destdir="$dest" - destname= - else - destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'` - test "X$destdir" = "X$dest" && destdir=. - destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'` + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' - # Not a directory, so check to see that there is only one file specified. - set dummy $files - if test "$#" -gt 2; then - $echo "$modename: \`$dest' is not a directory" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS fi - fi - case $destdir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - for file in $files; do - case $file in - *.lo) ;; - *) - $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - esac - done + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS ;; - esac - # This variable tells wrapper scripts just to set variables rather - # than running their programs. - libtool_install_magic="$magic" + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" - staticlibs= - future_libdirs= - current_libdirs= - for file in $files; do + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" - # Do each installation. - case $file in - *.$libext) - # Do the static libraries later. - staticlibs="$staticlibs $file" + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` ;; + esac - *.la) - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : - else - $echo "$modename: \`$file' is not a valid libtool archive" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + compile_command="$compile_command ${wl}-bind_at_load" + finalize_command="$finalize_command ${wl}-bind_at_load" + ;; + esac fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac - library_names= - old_library= - relink_command= - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac - # Add the libdir to current_libdirs if it is the destination. - if test "X$destdir" = "X$libdir"; then - case "$current_libdirs " in - *" $libdir "*) ;; - *) current_libdirs="$current_libdirs $libdir" ;; + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; esac - else - # Note the libdir as a future libdir. - case "$future_libdirs " in - *" $libdir "*) ;; - *) future_libdirs="$future_libdirs $libdir" ;; + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; esac - fi + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + compile_deplibs="$new_libs" - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/ - test "X$dir" = "X$file/" && dir= - dir="$dir$objdir" - if test -n "$relink_command"; then - # Determine the prefix the user has applied to our future dir. - inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"` + compile_command="$compile_command $compile_deplibs" + finalize_command="$finalize_command $finalize_deplibs" - # Don't allow the user to place us outside of our expected - # location b/c this prevents finding dependent libraries that - # are installed to the same prefix. - # At present, this check doesn't affect windows .dll's that - # are installed into $libdir/../bin (currently, that works fine) - # but it's something to keep an eye on. - if test "$inst_prefix_dir" = "$destdir"; then - $echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2 - exit $EXIT_FAILURE - fi + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + fi - if test -n "$inst_prefix_dir"; then - # Stick the inst_prefix_dir data into the link command. - relink_command=`$echo "$relink_command" | $SP2NL | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%" | $NL2SP` + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi else - relink_command=`$echo "$relink_command" | $SP2NL | $SED "s%@inst_prefix_dir@%%" | $NL2SP` + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) dllsearchpath="$dllsearchpath:$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" - $echo "$modename: warning: relinking \`$file'" 1>&2 - $show "$relink_command" - if $run eval "$relink_command"; then : + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi else - $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 - exit $EXIT_FAILURE + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; + esac fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" - # See the names of the shared library. - set dummy $library_names - if test -n "$2"; then - realname="$2" - shift - shift + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + fi - srcname="$realname" - test -n "$relink_command" && srcname="$realname"T + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" - # Install the shared library and build the symlinks. - $show "$install_prog $dir/$srcname $destdir/$realname" - $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $? - if test -n "$stripme" && test -n "$striplib"; then - $show "$striplib $destdir/$realname" - $run eval "$striplib $destdir/$realname" || exit $? - fi + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi - if test "$#" -gt 0; then - # Delete the old symlinks, and create new ones. - # Try `ln -sf' first, because the `ln' binary might depend on - # the symlink we replace! Solaris /bin/ln does not understand -f, - # so we also need to try rm && ln -s. - for linkname - do - if test "$linkname" != "$realname"; then - $show "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" - $run eval "(cd $destdir && { $LN_S -f $realname $linkname || { $rm $linkname && $LN_S $realname $linkname; }; })" - fi - done - fi + wrappers_required=yes + case $host in + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *cegcc) + # Disable wrappers for cegcc, we are cross compiling anyway. + wrappers_required=no + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" - # Do each command in the postinstall commands. - lib="$destdir/$realname" - cmds=$postinstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || { - lt_exit=$? - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $run eval '(cd $output_objdir && $rm ${realname}T && $mv ${realname}U $realname)' - fi + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' - exit $lt_exit - } - done - IFS="$save_ifs" + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' fi - # Install the pseudo-library for information purposes. - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - instname="$dir/$name"i - $show "$install_prog $instname $destdir/$name" - $run eval "$install_prog $instname $destdir/$name" || exit $? - - # Maybe install the static library, too. - test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" - ;; + exit $exit_status + fi - *.lo) - # Install (i.e. copy) a libtool object. + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" - else - destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - destfile="$destdir/$destfile" + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi - - # Deduce the name of the destination old-style object file. - case $destfile in - *.lo) - staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"` - ;; - *.$objext) - staticdest="$destfile" - destfile= - ;; - *) - $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; - esac - - # Install the libtool object if requested. - if test -n "$destfile"; then - $show "$install_prog $file $destfile" - $run eval "$install_prog $file $destfile" || exit $? + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + rpath="$rpath$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi + fi - # Install the old object if enabled. - if test "$build_old_libs" = yes; then - # Deduce the name of the old-style object file. - staticobj=`$echo "X$file" | $Xsed -e "$lo2o"` - - $show "$install_prog $staticobj $staticdest" - $run eval "$install_prog \$staticobj \$staticdest" || exit $? - fi + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' exit $EXIT_SUCCESS - ;; + fi - *) - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi else - destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'` - destfile="$destdir/$destfile" + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" fi + fi - # If the file is missing, and there is a .exe on the end, strip it - # because it is most likely a libtool script we actually want to - # install - stripped_ext="" - case $file in - *.exe) - if test ! -f "$file"; then - file=`$echo $file|${SED} 's,.exe$,,'` - stripped_ext=".exe" - fi - ;; - esac + # Replace the output file specification. + link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` - # Do a test to see if this is really a libtool program. - case $host in - *cygwin*|*mingw*) - wrapper=`$echo $file | ${SED} -e 's,.exe$,,'` - ;; - *) - wrapper=$file - ;; - esac - if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then - notinst_deplibs= - relink_command= + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname - # Note that it is not necessary on cygwin/mingw to append a dot to - # foo even if both foo and FILE.exe exist: automatic-append-.exe - # behavior happens only for exec(3), not for open(2)! Also, sourcing - # `FILE.' does not work on cygwin managed mounts. - # - # If there is no directory component, then add one. - case $wrapper in - */* | *\\*) . ${wrapper} ;; - *) . ./${wrapper} ;; - esac + func_show_eval "$link_command" 'exit $?' - # Check the variables that should have been set. - if test -z "$notinst_deplibs"; then - $echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2 - exit $EXIT_FAILURE - fi + # Now create the wrapper script. + func_verbose "creating $output" - finalize=yes - for lib in $notinst_deplibs; do - # Check to see that each library is installed. - libdir= - if test -f "$lib"; then - # If there is no directory component, then add one. - case $lib in - */* | *\\*) . $lib ;; - *) . ./$lib ;; - esac - fi - libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test - if test -n "$libdir" && test ! -f "$libfile"; then - $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2 - finalize=no - fi - done + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` + fi - relink_command= - # Note that it is not necessary on cygwin/mingw to append a dot to - # foo even if both foo and FILE.exe exist: automatic-append-.exe - # behavior happens only for exec(3), not for open(2)! Also, sourcing - # `FILE.' does not work on cygwin managed mounts. - # - # If there is no directory component, then add one. - case $wrapper in - */* | *\\*) . ${wrapper} ;; - *) . ./${wrapper} ;; - esac + # Quote $ECHO for shipping. + if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + esac + qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"` + else + qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"` + fi - outputname= - if test "$fast_install" = no && test -n "$relink_command"; then - if test "$finalize" = yes && test -z "$run"; then - tmpdir=`func_mktempdir` - file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'` - outputname="$tmpdir/$file" - # Replace the output file specification. - relink_command=`$echo "X$relink_command" | $SP2NL | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g' | $NL2SP` + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } - $show "$relink_command" - if $run eval "$relink_command"; then : + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else - $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2 - ${rm}r "$tmpdir" - continue + func_emit_wrapper no > $func_ltwrapper_scriptname_result fi - file="$outputname" - else - $echo "$modename: warning: cannot relink \`$file'" 1>&2 - fi - else - # Install the binary that we compiled earlier. - file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` - fi - fi - - # remove .exe since cygwin /usr/bin/install will append another - # one anyway - case $install_prog,$host in - */usr/bin/install*,*cygwin*) - case $file:$destfile in - *.exe:*.exe) - # this is ok - ;; - *.exe:*) - destfile=$destfile.exe - ;; - *:*.exe) - destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'` - ;; - esac + } ;; - esac - $show "$install_prog$stripme $file $destfile" - $run eval "$install_prog\$stripme \$file \$destfile" || exit $? - test -n "$outputname" && ${rm}r "$tmpdir" - ;; - esac - done - - for file in $staticlibs; do - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 - # Set up the ranlib parameters. - oldlib="$destdir/$name" + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac - $show "$install_prog $file $oldlib" - $run eval "$install_prog \$file \$oldlib" || exit $? + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do - if test -n "$stripme" && test -n "$old_striplib"; then - $show "$old_striplib $oldlib" - $run eval "$old_striplib $oldlib" || exit $? + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + oldobjs="$oldobjs $symfileobj" + fi + fi + addlibs="$old_convenience" fi - # Do each command in the postinstall commands. - cmds=$old_postinstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || exit $? - done - IFS="$save_ifs" - done + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" - if test -n "$future_libdirs"; then - $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2 - fi + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" + fi - if test -n "$current_libdirs"; then - # Maybe just do a dry run. - test -n "$run" && current_libdirs=" -n$current_libdirs" - exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' - else - exit $EXIT_SUCCESS - fi - ;; + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else - # libtool finish mode - finish) - modename="$modename: finish" - libdirs="$nonopt" - admincmds= + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" - if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then - for dir - do - libdirs="$libdirs $dir" - done + func_extract_archives $gentop $dlprefiles + oldobjs="$oldobjs $func_extract_archives_result" + fi - for libdir in $libdirs; do - if test -n "$finish_cmds"; then - # Do each command in the finish commands. - cmds=$finish_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" || admincmds="$admincmds - $cmd" + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + $ECHO "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + oldobjs="$oldobjs $gentop/$newobj" + ;; + *) oldobjs="$oldobjs $obj" ;; + esac done - IFS="$save_ifs" - fi - if test -n "$finish_eval"; then - # Do the single finish_eval. - eval cmds=\"$finish_eval\" - $run eval "$cmds" || admincmds="$admincmds - $cmds" fi - done - fi - - # Exit here if they wanted silent mode. - test "$show" = : && exit $EXIT_SUCCESS + eval cmds=\"$old_archive_cmds\" - $echo "X----------------------------------------------------------------------" | $Xsed - $echo "Libraries have been installed in:" - for libdir in $libdirs; do - $echo " $libdir" + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' done - $echo - $echo "If you ever happen to want to link against installed libraries" - $echo "in a given directory, LIBDIR, you must either use libtool, and" - $echo "specify the full pathname of the library, or use the \`-LLIBDIR'" - $echo "flag during linking and do at least one of the following:" - if test -n "$shlibpath_var"; then - $echo " - add LIBDIR to the \`$shlibpath_var' environment variable" - $echo " during execution" - fi - if test -n "$runpath_var"; then - $echo " - add LIBDIR to the \`$runpath_var' environment variable" - $echo " during linking" - fi - if test -n "$hardcode_libdir_flag_spec"; then - libdir=LIBDIR - eval flag=\"$hardcode_libdir_flag_spec\" - - $echo " - use the \`$flag' linker flag" - fi - if test -n "$admincmds"; then - $echo " - have your system administrator run these commands:$admincmds" - fi - if test -f /etc/ld.so.conf; then - $echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" - fi - $echo - $echo "See any operating system documentation about shared libraries for" - $echo "more information, such as the ld(1) and ld.so(8) manual pages." - $echo "X----------------------------------------------------------------------" | $Xsed - exit $EXIT_SUCCESS - ;; - - # libtool execute mode - execute) - modename="$modename: execute" - # The first argument is the command name. - cmd="$nonopt" - if test -z "$cmd"; then - $echo "$modename: you must specify a COMMAND" 1>&2 - $echo "$help" - exit $EXIT_FAILURE - fi + test -n "$generated" && \ + func_show_eval "${RM}r$generated" - # Handle -dlopen flags immediately. - for file in $execute_dlfiles; do - if test ! -f "$file"; then - $echo "$modename: \`$file' is not a file" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" - dir= - case $file in - *.la) - # Check to see that this really is a libtool archive. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then : + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" else - $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi - # Read the libtool library. - dlname= - library_names= + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + if test "x$EGREP" = x ; then + EGREP=egrep + fi + # We do not want portage's install root ($D) present. Check only for + # this if the .la is being installed. + if test "$installed" = yes && test "$D"; then + eval mynewdependency_lib=`echo "$libdir/$name" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$libdir/$name" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_1=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_1"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + *) + if test "$installed" = yes; then + # Rather use S=WORKDIR if our version of portage supports it. + # This is because some ebuild (gcc) do not use $S as buildroot. + if test "$PWORKDIR"; then + S="$PWORKDIR" + fi + # We do not want portage's build root ($S) present. + my_little_ninja_foo_2=`echo $deplib |$EGREP -e "$S"` + # We do not want portage's install root ($D) present. + my_little_ninja_foo_3=`echo $deplib |$EGREP -e "$D"` + if test -n "$my_little_ninja_foo_2" && test "$S"; then + mynewdependency_lib="" + elif test -n "$my_little_ninja_foo_3" && test "$D"; then + eval mynewdependency_lib=`echo "$deplib" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + else + mynewdependency_lib="$deplib" + fi + else + mynewdependency_lib="$deplib" + fi + # Do not add duplicates + if test "$mynewdependency_lib"; then + my_little_ninja_foo_4=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"` + if test -z "$my_little_ninja_foo_4"; then + newdependency_libs="$newdependency_libs $mynewdependency_lib" + fi + fi + ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + newdlfiles="$newdlfiles $libdir/$name" + ;; + *) newdlfiles="$newdlfiles $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + newdlprefiles="$newdlprefiles $libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; + esac + # Do not add duplicates + if test "$installed" = yes && test "$D"; then + install_libdir=`echo "$install_libdir" |sed -e "s:$D:/:g" -e 's:/\+:/:g'` + fi + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac +# The name that we can dlopen(3). +dlname='$tdlname' - # Skip this library if it cannot be dlopened. - if test -z "$dlname"; then - # Warn if it was a shared library. - test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'" - continue - fi +# Names of this library. +library_names='$library_names' - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$file" && dir=. +# The name of the static archive. +old_library='$old_library' - if test -f "$dir/$objdir/$dlname"; then - dir="$dir/$objdir" - else - if test ! -f "$dir/$dlname"; then - $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2 - exit $EXIT_FAILURE - fi - fi - ;; +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' - *.lo) - # Just add the directory containing the .lo file. - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - test "X$dir" = "X$file" && dir=. - ;; +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' - *) - $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2 - continue - ;; - esac +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' - # Get the absolute pathname. - absdir=`cd "$dir" && pwd` - test -n "$absdir" && dir="$absdir" +# Version information for $libname. +current=$current +age=$age +revision=$revision - # Now add the directory to shlibpath_var. - if eval "test -z \"\$$shlibpath_var\""; then - eval "$shlibpath_var=\"\$dir\"" - else - eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" - fi - done +# Is this an already installed library? +installed=$installed - # This variable tells wrapper scripts just to set shlibpath_var - # rather than running their programs. - libtool_execute_magic="$magic" +# Should we warn about portability when linking against -modules? +shouldnotlink=$module - # Check if any of the arguments is a wrapper script. - args= - for file - do - case $file in - -*) ;; - *) - # Do a test to see if this is really a libtool program. - if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - # If there is no directory component, then add one. - case $file in - */* | *\\*) . $file ;; - *) . ./$file ;; - esac +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' - # Transform arg to wrapped name. - file="$progdir/$program" - fi - ;; - esac - # Quote arguments (to preserve shell metacharacters). - file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"` - args="$args \"$file\"" - done +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } - if test -z "$run"; then - if test -n "$shlibpath_var"; then - # Export the shlibpath_var. - eval "export $shlibpath_var" - fi + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} - # Restore saved environment variables - for lt_var in LANG LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES - do - eval "if test \"\${save_$lt_var+set}\" = set; then - $lt_var=\$save_$lt_var; export $lt_var - fi" - done +{ test "$mode" = link || test "$mode" = relink; } && + func_mode_link ${1+"$@"} - # Now prepare to actually exec the command. - exec_cmd="\$cmd$args" - else - # Display what would be done. - if test -n "$shlibpath_var"; then - eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\"" - $echo "export $shlibpath_var" - fi - $echo "$cmd$args" - exit $EXIT_SUCCESS - fi - ;; - # libtool clean and uninstall mode - clean | uninstall) - modename="$modename: $mode" - rm="$nonopt" +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" files= rmforce= exit_status=0 @@ -6521,30 +8257,28 @@ for arg do case $arg in - -f) rm="$rm $arg"; rmforce=yes ;; - -*) rm="$rm $arg" ;; + -f) RM="$RM $arg"; rmforce=yes ;; + -*) RM="$RM $arg" ;; *) files="$files $arg" ;; esac done - if test -z "$rm"; then - $echo "$modename: you must specify an RM program" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - fi + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" rmdirs= origobjdir="$objdir" for file in $files; do - dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'` - if test "X$dir" = "X$file"; then - dir=. + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then objdir="$origobjdir" else objdir="$dir/$origobjdir" fi - name=`$echo "X$file" | $Xsed -e 's%^.*/%%'` + func_basename "$file" + name="$func_basename_result" test "$mode" = uninstall && objdir="$dir" # Remember objdir for removal later, being careful to avoid duplicates @@ -6556,9 +8290,9 @@ fi # Don't error if the file doesn't exist and rm -f was used. - if (test -L "$file") >/dev/null 2>&1 \ - || (test -h "$file") >/dev/null 2>&1 \ - || test -f "$file"; then + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then : elif test -d "$file"; then exit_status=1 @@ -6572,8 +8306,8 @@ case $name in *.la) # Possibly a libtool archive, so verify it. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - . $dir/$name + if func_lalib_p "$file"; then + func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do @@ -6588,39 +8322,17 @@ *" $dlname "*) ;; *) rmfiles="$rmfiles $objdir/$dlname" ;; esac - test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" + test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. - cmds=$postuninstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" - if test "$?" -ne 0 && test "$rmforce" != yes; then - exit_status=1 - fi - done - IFS="$save_ifs" + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. - cmds=$old_postuninstall_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval cmd=\"$cmd\" - $show "$cmd" - $run eval "$cmd" - if test "$?" -ne 0 && test "$rmforce" != yes; then - exit_status=1 - fi - done - IFS="$save_ifs" + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; @@ -6630,20 +8342,20 @@ *.lo) # Possibly a libtool object, so verify it. - if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then + if func_lalib_p "$file"; then # Read the .lo file - . $dir/$name + func_source $dir/$name # Add PIC object to the list of files to remove. - if test -n "$pic_object" \ - && test "$pic_object" != none; then + if test -n "$pic_object" && + test "$pic_object" != none; then rmfiles="$rmfiles $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. - if test -n "$non_pic_object" \ - && test "$non_pic_object" != none; then + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then rmfiles="$rmfiles $dir/$non_pic_object" fi fi @@ -6654,17 +8366,26 @@ noexename=$name case $file in *.exe) - file=`$echo $file|${SED} 's,.exe$,,'` - noexename=`$echo $name|${SED} 's,.exe$,,'` + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe rmfiles="$rmfiles $file" ;; esac # Do a test to see if this is a libtool program. - if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then - relink_command= - . $dir/$noexename + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + rmfiles="$rmfiles $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles @@ -6679,239 +8400,38 @@ fi ;; esac - $show "$rm $rmfiles" - $run $rm $rmfiles || exit_status=1 + func_show_eval "$RM $rmfiles" 'exit_status=1' done objdir="$origobjdir" # Try to remove the ${objdir}s in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then - $show "rmdir $dir" - $run rmdir $dir >/dev/null 2>&1 + func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status - ;; +} - "") - $echo "$modename: you must specify a MODE" 1>&2 - $echo "$generic_help" 1>&2 - exit $EXIT_FAILURE - ;; - esac +{ test "$mode" = uninstall || test "$mode" = clean; } && + func_mode_uninstall ${1+"$@"} - if test -z "$exec_cmd"; then - $echo "$modename: invalid operation mode \`$mode'" 1>&2 - $echo "$generic_help" 1>&2 - exit $EXIT_FAILURE - fi -fi # test -z "$show_help" +test -z "$mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$mode'" if test -n "$exec_cmd"; then - eval exec $exec_cmd + eval exec "$exec_cmd" exit $EXIT_FAILURE fi -# We need to display help for each of the modes. -case $mode in -"") $echo \ -"Usage: $modename [OPTION]... [MODE-ARG]... - -Provide generalized library-building support services. - - --config show all configuration variables - --debug enable verbose shell tracing --n, --dry-run display commands without modifying any files - --features display basic configuration information and exit - --finish same as \`--mode=finish' - --help display this help message and exit - --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS] - --quiet same as \`--silent' - --silent don't print informational messages - --tag=TAG use configuration variables from tag TAG - --version print version information - -MODE must be one of the following: - - clean remove files from the build directory - compile compile a source file into a libtool object - execute automatically set library path, then run a program - finish complete the installation of libtool libraries - install install libraries or executables - link create a library or an executable - uninstall remove libraries from an installed directory - -MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for -a more detailed description of MODE. - -Report bugs to ." - exit $EXIT_SUCCESS - ;; - -clean) - $echo \ -"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE... - -Remove files from the build directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, object or program, all the files associated -with it are deleted. Otherwise, only FILE itself is deleted using RM." - ;; - -compile) - $echo \ -"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE - -Compile a source file into a libtool library object. - -This mode accepts the following additional options: - - -o OUTPUT-FILE set the output file name to OUTPUT-FILE - -prefer-pic try to building PIC objects only - -prefer-non-pic try to building non-PIC objects only - -static always build a \`.o' file suitable for static linking - -COMPILE-COMMAND is a command to be used in creating a \`standard' object file -from the given SOURCEFILE. - -The output file name is determined by removing the directory component from -SOURCEFILE, then substituting the C source code suffix \`.c' with the -library object suffix, \`.lo'." - ;; - -execute) - $echo \ -"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]... - -Automatically set library path, then run a program. - -This mode accepts the following additional options: - - -dlopen FILE add the directory containing FILE to the library path - -This mode sets the library path environment variable according to \`-dlopen' -flags. - -If any of the ARGS are libtool executable wrappers, then they are translated -into their corresponding uninstalled binary, and any of their required library -directories are added to the library path. - -Then, COMMAND is executed, with ARGS as arguments." - ;; - -finish) - $echo \ -"Usage: $modename [OPTION]... --mode=finish [LIBDIR]... - -Complete the installation of libtool libraries. - -Each LIBDIR is a directory that contains libtool libraries. - -The commands that this mode executes may require superuser privileges. Use -the \`--dry-run' option if you just want to see what would be executed." - ;; - -install) - $echo \ -"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND... - -Install executables or libraries. - -INSTALL-COMMAND is the installation command. The first component should be -either the \`install' or \`cp' program. - -The rest of the components are interpreted as arguments to that command (only -BSD-compatible install options are recognized)." - ;; - -link) - $echo \ -"Usage: $modename [OPTION]... --mode=link LINK-COMMAND... - -Link object files or libraries together to form another library, or to -create an executable program. +exit $exit_status -LINK-COMMAND is a command using the C compiler that you would use to create -a program from several object files. - -The following components of LINK-COMMAND are treated specially: - - -all-static do not do any dynamic linking at all - -avoid-version do not add a version suffix if possible - -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime - -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols - -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) - -export-symbols SYMFILE - try to export only the symbols listed in SYMFILE - -export-symbols-regex REGEX - try to export only the symbols matching REGEX - -LLIBDIR search LIBDIR for required installed libraries - -lNAME OUTPUT-FILE requires the installed library libNAME - -module build a library that can dlopened - -no-fast-install disable the fast-install mode - -no-install link a not-installable executable - -no-undefined declare that a library does not refer to external symbols - -o OUTPUT-FILE create OUTPUT-FILE from the specified objects - -objectlist FILE Use a list of object files found in FILE to specify objects - -precious-files-regex REGEX - don't remove output files matching REGEX - -release RELEASE specify package release information - -rpath LIBDIR the created library will eventually be installed in LIBDIR - -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries - -static do not do any dynamic linking of uninstalled libtool libraries - -static-libtool-libs - do not do any dynamic linking of libtool libraries - -version-info CURRENT[:REVISION[:AGE]] - specify library version info [each variable defaults to 0] - -All other options (arguments beginning with \`-') are ignored. - -Every other argument is treated as a filename. Files ending in \`.la' are -treated as uninstalled libtool libraries, other files are standard or library -object files. - -If the OUTPUT-FILE ends in \`.la', then a libtool library is created, -only library objects (\`.lo' files) may be specified, and \`-rpath' is -required, except when creating a convenience library. - -If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created -using \`ar' and \`ranlib', or on Windows using \`lib'. - -If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file -is created, otherwise an executable program is created." - ;; - -uninstall) - $echo \ -"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... - -Remove libraries from an installation directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, all the files associated with it are deleted. -Otherwise, only FILE itself is deleted using RM." - ;; - -*) - $echo "$modename: invalid operation mode \`$mode'" 1>&2 - $echo "$help" 1>&2 - exit $EXIT_FAILURE - ;; -esac - -$echo -$echo "Try \`$modename --help' for more information about other modes." - -exit $? # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting @@ -6925,14 +8445,17 @@ # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared -disable_libs=shared +build_libtool_libs=no +build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static -disable_libs=static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: +# vi:sw=2 + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/main.mk /tmp/3ARg2Grji7/sqlite3-3.6.16/main.mk --- sqlite3-3.4.2/main.mk 2007-07-20 14:07:40.000000000 +0100 +++ sqlite3-3.6.16/main.mk 2009-06-25 12:35:50.000000000 +0100 @@ -9,13 +9,6 @@ # BCC C Compiler and options for use in building executables that # will run on the platform that is doing the build. # -# USLEEP If the target operating system supports the "usleep()" system -# call, then define the HAVE_USLEEP macro for all C modules. -# -# THREADSAFE If you want the SQLite library to be safe for use within a -# multi-threaded program, then define the following macro -# appropriately: -# # THREADLIB Specify any extra linker options needed to make the library # thread safe # @@ -51,22 +44,31 @@ # This is how we compile # -TCCX = $(TCC) $(OPTS) $(THREADSAFE) $(USLEEP) -I. -I$(TOP)/src +TCCX = $(TCC) $(OPTS) -I. -I$(TOP)/src -I$(TOP) +TCCX += -I$(TOP)/ext/rtree -I$(TOP)/ext/icu -I$(TOP)/ext/fts3 +TCCX += -I$(TOP)/ext/async # Object files for the SQLite library. # -LIBOBJ+= alter.o analyze.o attach.o auth.o btree.o build.o \ - callback.o complete.o date.o delete.o \ - expr.o func.o hash.o insert.o loadext.o \ - main.o malloc.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ - pager.o parse.o pragma.o prepare.o printf.o random.o \ - select.o table.o tclsqlite.o tokenize.o trigger.o \ +LIBOBJ+= alter.o analyze.o attach.o auth.o \ + backup.o bitvec.o btmutex.o btree.o build.o \ + callback.o complete.o date.o delete.o expr.o fault.o \ + fts3.o fts3_expr.o fts3_hash.o fts3_icu.o fts3_porter.o \ + fts3_tokenizer.o fts3_tokenizer1.o \ + func.o global.o hash.o \ + icu.o insert.o journal.o legacy.o loadext.o \ + main.o malloc.o mem0.o mem1.o mem2.o mem3.o mem5.o \ + memjournal.o \ + mutex.o mutex_noop.o mutex_os2.o mutex_unix.o mutex_w32.o \ + notify.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ + pager.o parse.o pcache.o pcache1.o pragma.o prepare.o printf.o \ + random.o resolve.o rowset.o rtree.o select.o status.o \ + table.o tokenize.o trigger.o \ update.o util.o vacuum.o \ - vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbefifo.o vdbemem.o \ - where.o utf.o legacy.o vtab.o + vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbemem.o \ + walker.o where.o utf.o vtab.o + -EXTOBJ = icu.o fts2.o fts2_hash.o fts2_icu.o fts2_porter.o \ - fts2_tokenizer.o fts2_tokenizer1.o # All of the source code files. # @@ -75,38 +77,68 @@ $(TOP)/src/analyze.c \ $(TOP)/src/attach.c \ $(TOP)/src/auth.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/bitvec.c \ + $(TOP)/src/btmutex.c \ $(TOP)/src/btree.c \ $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ $(TOP)/src/build.c \ $(TOP)/src/callback.c \ $(TOP)/src/complete.c \ $(TOP)/src/date.c \ $(TOP)/src/delete.c \ $(TOP)/src/expr.c \ + $(TOP)/src/fault.c \ $(TOP)/src/func.c \ + $(TOP)/src/global.c \ $(TOP)/src/hash.c \ $(TOP)/src/hash.h \ + $(TOP)/src/hwtime.h \ $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ $(TOP)/src/legacy.c \ $(TOP)/src/loadext.c \ $(TOP)/src/main.c \ $(TOP)/src/malloc.c \ + $(TOP)/src/mem0.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mem3.c \ + $(TOP)/src/mem5.c \ + $(TOP)/src/memjournal.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex.h \ + $(TOP)/src/mutex_noop.c \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/notify.c \ $(TOP)/src/os.c \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ $(TOP)/src/os_os2.c \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/pager.c \ $(TOP)/src/pager.h \ $(TOP)/src/parse.y \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache.h \ + $(TOP)/src/pcache1.c \ $(TOP)/src/pragma.c \ $(TOP)/src/prepare.c \ $(TOP)/src/printf.c \ $(TOP)/src/random.c \ + $(TOP)/src/resolve.c \ + $(TOP)/src/rowset.c \ $(TOP)/src/select.c \ + $(TOP)/src/status.c \ $(TOP)/src/shell.c \ $(TOP)/src/sqlite.h.in \ $(TOP)/src/sqlite3ext.h \ $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ $(TOP)/src/table.c \ $(TOP)/src/tclsqlite.c \ $(TOP)/src/tokenize.c \ @@ -120,10 +152,10 @@ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ - $(TOP)/src/vdbefifo.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ + $(TOP)/src/walker.c \ $(TOP)/src/where.c # Source code for extensions @@ -147,7 +179,23 @@ $(TOP)/ext/fts2/fts2_tokenizer.c \ $(TOP)/ext/fts2/fts2_tokenizer1.c SRC += \ - $(TOP)/ext/icu/icu.c + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.c \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_icu.c \ + $(TOP)/ext/fts3/fts3_porter.c \ + $(TOP)/ext/fts3/fts3_tokenizer.h \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_tokenizer1.c +SRC += \ + $(TOP)/ext/icu/sqliteicu.h \ + $(TOP)/ext/icu/icu.c +SRC += \ + $(TOP)/ext/rtree/rtree.h \ + $(TOP)/ext/rtree/rtree.c # Generated source code files @@ -164,19 +212,6 @@ # Source code to the test files. # TESTSRC = \ - $(TOP)/src/btree.c \ - $(TOP)/src/date.c \ - $(TOP)/src/func.c \ - $(TOP)/src/insert.c \ - $(TOP)/src/main.c \ - $(TOP)/src/malloc.c \ - $(TOP)/src/os.c \ - $(TOP)/src/os_os2.c \ - $(TOP)/src/os_unix.c \ - $(TOP)/src/os_win.c \ - $(TOP)/src/pager.c \ - $(TOP)/src/pragma.c \ - $(TOP)/src/printf.c \ $(TOP)/src/test1.c \ $(TOP)/src/test2.c \ $(TOP)/src/test3.c \ @@ -188,35 +223,63 @@ $(TOP)/src/test9.c \ $(TOP)/src/test_autoext.c \ $(TOP)/src/test_async.c \ + $(TOP)/src/test_backup.c \ $(TOP)/src/test_btree.c \ $(TOP)/src/test_config.c \ + $(TOP)/src/test_devsym.c \ + $(TOP)/src/test_func.c \ $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_journal.c \ + $(TOP)/src/test_malloc.c \ $(TOP)/src/test_md5.c \ + $(TOP)/src/test_mutex.c \ + $(TOP)/src/test_onefile.c \ + $(TOP)/src/test_osinst.c \ + $(TOP)/src/test_pcache.c \ $(TOP)/src/test_schema.c \ $(TOP)/src/test_server.c \ $(TOP)/src/test_tclvar.c \ - $(TOP)/src/utf.c \ - $(TOP)/src/util.c \ - $(TOP)/src/vdbe.c \ - $(TOP)/src/vdbeaux.c \ - $(TOP)/src/where.c \ - $(TOP)/ext/fts2/fts2_tokenizer.c + $(TOP)/src/test_thread.c \ + $(TOP)/src/test_wsd.c \ + +#TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c +#TESTSRC += $(TOP)/ext/fts3/fts3_tokenizer.c + +TESTSRC2 = \ + $(TOP)/src/attach.c $(TOP)/src/backup.c $(TOP)/src/btree.c \ + $(TOP)/src/build.c $(TOP)/src/date.c \ + $(TOP)/src/expr.c $(TOP)/src/func.c $(TOP)/src/insert.c $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c $(TOP)/src/os_unix.c $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c $(TOP)/src/pragma.c $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c $(TOP)/src/random.c $(TOP)/src/pcache.c \ + $(TOP)/src/pcache1.c $(TOP)/src/select.c $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c $(TOP)/src/util.c $(TOP)/src/vdbeapi.c $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbe.c $(TOP)/src/vdbemem.c $(TOP)/src/where.c parse.c \ + $(TOP)/ext/fts3/fts3.c $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/async/sqlite3async.c # Header files used by all library source files. # HDR = \ - sqlite3.h \ $(TOP)/src/btree.h \ $(TOP)/src/btreeInt.h \ $(TOP)/src/hash.h \ - $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/hwtime.h \ + keywordhash.h \ + $(TOP)/src/mutex.h \ opcodes.h \ $(TOP)/src/os.h \ $(TOP)/src/os_common.h \ + $(TOP)/src/pager.h \ + $(TOP)/src/pcache.h \ + parse.h \ + sqlite3.h \ $(TOP)/src/sqlite3ext.h \ $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ $(TOP)/src/vdbe.h \ - parse.h + $(TOP)/src/vdbeInt.h # Header files used by extensions # @@ -228,27 +291,23 @@ $(TOP)/ext/fts2/fts2.h \ $(TOP)/ext/fts2/fts2_hash.h \ $(TOP)/ext/fts2/fts2_tokenizer.h - - -# Header files used by the VDBE submodule -# -VDBEHDR = \ - $(TOP)/src/vdbeInt.h +EXTHDR += \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_tokenizer.h +EXTHDR += \ + $(TOP)/ext/rtree/rtree.h +EXTHDR += \ + $(TOP)/ext/icu/sqliteicu.h # This is the default Makefile target. The objects listed here # are what get build when you type just "make" with no arguments. # all: sqlite3.h libsqlite3.a sqlite3$(EXE) -# Generate the file "last_change" which contains the date of change -# of the most recently modified source code file -# -last_change: $(SRC) - cat $(SRC) | grep '$$Id: ' | sort -k 5 | tail -1 \ - | $(NAWK) '{print $$5,$$6}' >last_change - -libsqlite3.a: $(LIBOBJ) $(EXTOBJ) - $(AR) libsqlite3.a $(LIBOBJ) $(EXTOBJ) +libsqlite3.a: $(LIBOBJ) + $(AR) libsqlite3.a $(LIBOBJ) $(RANLIB) libsqlite3.a sqlite3$(EXE): $(TOP)/src/shell.c libsqlite3.a sqlite3.h @@ -264,189 +323,81 @@ # files are automatically generated. This target takes care of # all that automatic generation. # -target_source: $(SRC) +target_source: $(SRC) $(TOP)/tool/vdbe-compress.tcl rm -rf tsrc mkdir tsrc - cp -f $(SRC) $(TOP)/src/*.h tsrc 2>/dev/null + cp -f $(SRC) tsrc rm tsrc/sqlite.h.in tsrc/parse.y + tclsh $(TOP)/tool/vdbe-compress.tcl vdbe.new + mv vdbe.new tsrc/vdbe.c + touch target_source sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl tclsh $(TOP)/tool/mksqlite3c.tcl cp sqlite3.c tclsqlite3.c cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c - tclsh $(TOP)/tool/mksqlite3internalh.tcl fts2amal.c: target_source $(TOP)/ext/fts2/mkfts2amal.tcl tclsh $(TOP)/ext/fts2/mkfts2amal.tcl +fts3amal.c: target_source $(TOP)/ext/fts3/mkfts3amal.tcl + tclsh $(TOP)/ext/fts3/mkfts3amal.tcl + # Rules to build the LEMON compiler generator # lemon: $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c $(BCC) -o lemon $(TOP)/tool/lemon.c cp $(TOP)/tool/lempar.c . -# Rules to build individual files +# Rules to build individual *.o files from generated *.c files. This +# applies to: # -alter.o: $(TOP)/src/alter.c $(HDR) - $(TCCX) -c $(TOP)/src/alter.c - -analyze.o: $(TOP)/src/analyze.c $(HDR) - $(TCCX) -c $(TOP)/src/analyze.c - -attach.o: $(TOP)/src/attach.c $(HDR) - $(TCCX) -c $(TOP)/src/attach.c - -auth.o: $(TOP)/src/auth.c $(HDR) - $(TCCX) -c $(TOP)/src/auth.c - -btree.o: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h - $(TCCX) -c $(TOP)/src/btree.c - -build.o: $(TOP)/src/build.c $(HDR) - $(TCCX) -c $(TOP)/src/build.c - -callback.o: $(TOP)/src/callback.c $(HDR) - $(TCCX) -c $(TOP)/src/callback.c - -complete.o: $(TOP)/src/complete.c $(HDR) - $(TCCX) -c $(TOP)/src/complete.c - -date.o: $(TOP)/src/date.c $(HDR) - $(TCCX) -c $(TOP)/src/date.c - -delete.o: $(TOP)/src/delete.c $(HDR) - $(TCCX) -c $(TOP)/src/delete.c - -expr.o: $(TOP)/src/expr.c $(HDR) - $(TCCX) -c $(TOP)/src/expr.c - -func.o: $(TOP)/src/func.c $(HDR) - $(TCCX) -c $(TOP)/src/func.c - -hash.o: $(TOP)/src/hash.c $(HDR) - $(TCCX) -c $(TOP)/src/hash.c - -insert.o: $(TOP)/src/insert.c $(HDR) - $(TCCX) -c $(TOP)/src/insert.c - -legacy.o: $(TOP)/src/legacy.c $(HDR) - $(TCCX) -c $(TOP)/src/legacy.c - -loadext.o: $(TOP)/src/loadext.c $(HDR) - $(TCCX) -c $(TOP)/src/loadext.c +# parse.o +# opcodes.o +# +%.o: %.c $(HDR) + $(TCCX) -c $< -main.o: $(TOP)/src/main.c $(HDR) - $(TCCX) -c $(TOP)/src/main.c +# Rules to build individual *.o files from files in the src directory. +# +%.o: $(TOP)/src/%.c $(HDR) + $(TCCX) -c $< -malloc.o: $(TOP)/src/malloc.c $(HDR) - $(TCCX) -c $(TOP)/src/malloc.c +tclsqlite.o: $(TOP)/src/tclsqlite.c $(HDR) + $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c -pager.o: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h - $(TCCX) -c $(TOP)/src/pager.c -opcodes.o: opcodes.c - $(TCCX) -c opcodes.c +# Rules to build opcodes.c and opcodes.h +# opcodes.c: opcodes.h $(TOP)/mkopcodec.awk sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk - cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h - -os.o: $(TOP)/src/os.c $(HDR) - $(TCCX) -c $(TOP)/src/os.c - -os_os2.o: $(TOP)/src/os_os2.c $(HDR) - $(TCCX) -c $(TOP)/src/os_os2.c - -os_unix.o: $(TOP)/src/os_unix.c $(HDR) - $(TCCX) -c $(TOP)/src/os_unix.c - -os_win.o: $(TOP)/src/os_win.c $(HDR) - $(TCCX) -c $(TOP)/src/os_win.c - -parse.o: parse.c $(HDR) - $(TCCX) -c parse.c + cat parse.h $(TOP)/src/vdbe.c | \ + $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h +# Rules to build parse.c and parse.h - the outputs of lemon. +# parse.h: parse.c parse.c: $(TOP)/src/parse.y lemon $(TOP)/addopcodes.awk cp $(TOP)/src/parse.y . + rm -f parse.h ./lemon $(OPTS) parse.y mv parse.h parse.h.temp awk -f $(TOP)/addopcodes.awk parse.h.temp >parse.h -pragma.o: $(TOP)/src/pragma.c $(HDR) - $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/pragma.c - -prepare.o: $(TOP)/src/prepare.c $(HDR) - $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/prepare.c - -printf.o: $(TOP)/src/printf.c $(HDR) - $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/printf.c - -random.o: $(TOP)/src/random.c $(HDR) - $(TCCX) -c $(TOP)/src/random.c - -select.o: $(TOP)/src/select.c $(HDR) - $(TCCX) -c $(TOP)/src/select.c - sqlite3.h: $(TOP)/src/sqlite.h.in sed -e s/--VERS--/`cat ${TOP}/VERSION`/ \ -e s/--VERSION-NUMBER--/`cat ${TOP}/VERSION | sed 's/[^0-9]/ /g' | $(NAWK) '{printf "%d%03d%03d",$$1,$$2,$$3}'`/ \ $(TOP)/src/sqlite.h.in >sqlite3.h -table.o: $(TOP)/src/table.c $(HDR) - $(TCCX) -c $(TOP)/src/table.c - -tclsqlite.o: $(TOP)/src/tclsqlite.c $(HDR) - $(TCCX) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c - -tokenize.o: $(TOP)/src/tokenize.c keywordhash.h $(HDR) - $(TCCX) -c $(TOP)/src/tokenize.c - keywordhash.h: $(TOP)/tool/mkkeywordhash.c $(BCC) -o mkkeywordhash $(OPTS) $(TOP)/tool/mkkeywordhash.c ./mkkeywordhash >keywordhash.h -trigger.o: $(TOP)/src/trigger.c $(HDR) - $(TCCX) -c $(TOP)/src/trigger.c - -update.o: $(TOP)/src/update.c $(HDR) - $(TCCX) -c $(TOP)/src/update.c - -utf.o: $(TOP)/src/utf.c $(HDR) - $(TCCX) -c $(TOP)/src/utf.c - -util.o: $(TOP)/src/util.c $(HDR) - $(TCCX) -c $(TOP)/src/util.c - -vacuum.o: $(TOP)/src/vacuum.c $(HDR) - $(TCCX) -c $(TOP)/src/vacuum.c - -vdbe.o: $(TOP)/src/vdbe.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbe.c - -vdbeapi.o: $(TOP)/src/vdbeapi.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbeapi.c -vdbeaux.o: $(TOP)/src/vdbeaux.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbeaux.c - -vdbeblob.o: $(TOP)/src/vdbeblob.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbeblob.c - -vdbefifo.o: $(TOP)/src/vdbefifo.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbefifo.c - -vdbemem.o: $(TOP)/src/vdbemem.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vdbemem.c - -vtab.o: $(TOP)/src/vtab.c $(VDBEHDR) $(HDR) - $(TCCX) -c $(TOP)/src/vtab.c - -where.o: $(TOP)/src/where.c $(HDR) - $(TCCX) -c $(TOP)/src/where.c # Rules to build the extension objects. # @@ -471,6 +422,30 @@ fts2_tokenizer1.o: $(TOP)/ext/fts2/fts2_tokenizer1.c $(HDR) $(EXTHDR) $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer1.c +fts3.o: $(TOP)/ext/fts3/fts3.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3.c + +fts3_expr.o: $(TOP)/ext/fts3/fts3_expr.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_expr.c + +fts3_hash.o: $(TOP)/ext/fts3/fts3_hash.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_hash.c + +fts3_icu.o: $(TOP)/ext/fts3/fts3_icu.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_icu.c + +fts3_porter.o: $(TOP)/ext/fts3/fts3_porter.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_porter.c + +fts3_tokenizer.o: $(TOP)/ext/fts3/fts3_tokenizer.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer.c + +fts3_tokenizer1.o: $(TOP)/ext/fts3/fts3_tokenizer1.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer1.c + +rtree.o: $(TOP)/ext/rtree/rtree.c $(HDR) $(EXTHDR) + $(TCCX) -DSQLITE_CORE -c $(TOP)/ext/rtree/rtree.c + # Rules for building test programs and for running tests # @@ -478,11 +453,27 @@ $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite3 \ $(TOP)/src/tclsqlite.c libsqlite3.a $(LIBTCL) $(THREADLIB) -testfixture$(EXE): $(TOP)/src/tclsqlite.c libsqlite3.a $(TESTSRC) - $(TCCX) $(TCL_FLAGS) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ - -DSQLITE_SERVER=1 -o testfixture$(EXE) \ - -DSQLITE_CORE $(TESTSRC) $(TOP)/src/tclsqlite.c \ - libsqlite3.a $(LIBTCL) $(THREADLIB) + +# Rules to build the 'testfixture' application. +# +TESTFIXTURE_FLAGS = -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 +TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE + +testfixture$(EXE): $(TESTSRC2) libsqlite3.a $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TESTSRC2) $(TOP)/src/tclsqlite.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) libsqlite3.a + +amalgamation-testfixture$(EXE): sqlite3.c $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) + +fts3-testfixture$(EXE): sqlite3.c fts3amal.c $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + -DSQLITE_ENABLE_FTS3=1 \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c fts3amal.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) fulltest: testfixture$(EXE) sqlite3$(EXE) ./testfixture$(EXE) $(TOP)/test/all.test @@ -491,9 +482,9 @@ ./testfixture$(EXE) $(TOP)/test/all.test -soak 1 test: testfixture$(EXE) sqlite3$(EXE) - ./testfixture$(EXE) $(TOP)/test/quick.test + ./testfixture$(EXE) $(TOP)/test/veryquick.test -sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c libsqlite3.a $(TESTSRC) \ +sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c sqlite3.c $(TESTSRC) \ $(TOP)/tool/spaceanal.tcl sed \ -e '/^#/d' \ @@ -502,9 +493,11 @@ -e 's,^,",' \ -e 's,$$,\\n",' \ $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h - $(TCCX) $(TCL_FLAGS) -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 -o \ - sqlite3_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ - libsqlite3.a $(LIBTCL) $(THREADLIB) + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 -DSQLITE_PRIVATE="" \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o sqlite3_analyzer$(EXE) \ + $(LIBTCL) $(THREADLIB) TEST_EXTENSION = $(SHPREFIX)testloadext.$(SO) $(TEST_EXTENSION): $(TOP)/src/test_loadext.c @@ -513,181 +506,6 @@ extensiontest: testfixture$(EXE) $(TEST_EXTENSION) ./testfixture$(EXE) $(TOP)/test/loadext.test -# Rules used to build documentation -# -arch.html: $(TOP)/www/arch.tcl - tclsh $(TOP)/www/arch.tcl >arch.html - -autoinc.html: $(TOP)/www/autoinc.tcl - tclsh $(TOP)/www/autoinc.tcl >autoinc.html - -c_interface.html: $(TOP)/www/c_interface.tcl - tclsh $(TOP)/www/c_interface.tcl >c_interface.html - -capi3.html: $(TOP)/www/capi3.tcl - tclsh $(TOP)/www/capi3.tcl >capi3.html - -capi3ref.html: $(TOP)/www/mkapidoc.tcl sqlite3.h - tclsh $(TOP)/www/mkapidoc.tcl capi3ref.html - -changes.html: $(TOP)/www/changes.tcl - tclsh $(TOP)/www/changes.tcl >changes.html - -compile.html: $(TOP)/www/compile.tcl - tclsh $(TOP)/www/compile.tcl >compile.html - -copyright.html: $(TOP)/www/copyright.tcl - tclsh $(TOP)/www/copyright.tcl >copyright.html - -copyright-release.html: $(TOP)/www/copyright-release.html - cp $(TOP)/www/copyright-release.html . - -copyright-release.pdf: $(TOP)/www/copyright-release.pdf - cp $(TOP)/www/copyright-release.pdf . - -common.tcl: $(TOP)/www/common.tcl - cp $(TOP)/www/common.tcl . - -conflict.html: $(TOP)/www/conflict.tcl - tclsh $(TOP)/www/conflict.tcl >conflict.html - -datatypes.html: $(TOP)/www/datatypes.tcl - tclsh $(TOP)/www/datatypes.tcl >datatypes.html - -datatype3.html: $(TOP)/www/datatype3.tcl - tclsh $(TOP)/www/datatype3.tcl >datatype3.html - -different.html: $(TOP)/www/different.tcl - tclsh $(TOP)/www/different.tcl >different.html - -docs.html: $(TOP)/www/docs.tcl - tclsh $(TOP)/www/docs.tcl >docs.html - -download.html: $(TOP)/www/download.tcl - mkdir -p doc - tclsh $(TOP)/www/download.tcl >download.html - -faq.html: $(TOP)/www/faq.tcl - tclsh $(TOP)/www/faq.tcl >faq.html - -fileformat.html: $(TOP)/www/fileformat.tcl - tclsh $(TOP)/www/fileformat.tcl >fileformat.html - -formatchng.html: $(TOP)/www/formatchng.tcl - tclsh $(TOP)/www/formatchng.tcl >formatchng.html - -index.html: $(TOP)/www/index.tcl last_change - tclsh $(TOP)/www/index.tcl >index.html - -limits.html: $(TOP)/www/limits.tcl last_change - tclsh $(TOP)/www/limits.tcl >limits.html - -lang.html: $(TOP)/www/lang.tcl - tclsh $(TOP)/www/lang.tcl doc >lang.html - -pragma.html: $(TOP)/www/pragma.tcl - tclsh $(TOP)/www/pragma.tcl >pragma.html - -lockingv3.html: $(TOP)/www/lockingv3.tcl - tclsh $(TOP)/www/lockingv3.tcl >lockingv3.html - -sharedcache.html: $(TOP)/www/sharedcache.tcl - tclsh $(TOP)/www/sharedcache.tcl >sharedcache.html - -mingw.html: $(TOP)/www/mingw.tcl - tclsh $(TOP)/www/mingw.tcl >mingw.html - -nulls.html: $(TOP)/www/nulls.tcl - tclsh $(TOP)/www/nulls.tcl >nulls.html - -oldnews.html: $(TOP)/www/oldnews.tcl - tclsh $(TOP)/www/oldnews.tcl >oldnews.html - -omitted.html: $(TOP)/www/omitted.tcl - tclsh $(TOP)/www/omitted.tcl >omitted.html - -opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c - tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html - -optimizer.html: $(TOP)/www/optimizer.tcl - tclsh $(TOP)/www/optimizer.tcl >optimizer.html - -optoverview.html: $(TOP)/www/optoverview.tcl - tclsh $(TOP)/www/optoverview.tcl >optoverview.html - -quickstart.html: $(TOP)/www/quickstart.tcl - tclsh $(TOP)/www/quickstart.tcl >quickstart.html - -speed.html: $(TOP)/www/speed.tcl - tclsh $(TOP)/www/speed.tcl >speed.html - -sqlite.html: $(TOP)/www/sqlite.tcl - tclsh $(TOP)/www/sqlite.tcl >sqlite.html - -support.html: $(TOP)/www/support.tcl - tclsh $(TOP)/www/support.tcl >support.html - -tclsqlite.html: $(TOP)/www/tclsqlite.tcl - tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html - -vdbe.html: $(TOP)/www/vdbe.tcl - tclsh $(TOP)/www/vdbe.tcl >vdbe.html - -version3.html: $(TOP)/www/version3.tcl - tclsh $(TOP)/www/version3.tcl >version3.html - -whentouse.html: $(TOP)/www/whentouse.tcl - tclsh $(TOP)/www/whentouse.tcl >whentouse.html - - -# Files to be published on the website. -# -DOC = \ - arch.html \ - autoinc.html \ - c_interface.html \ - capi3.html \ - capi3ref.html \ - changes.html \ - compile.html \ - copyright.html \ - copyright-release.html \ - copyright-release.pdf \ - conflict.html \ - datatypes.html \ - datatype3.html \ - different.html \ - docs.html \ - download.html \ - faq.html \ - fileformat.html \ - formatchng.html \ - index.html \ - limits.html \ - lang.html \ - lockingv3.html \ - mingw.html \ - nulls.html \ - oldnews.html \ - omitted.html \ - opcode.html \ - optimizer.html \ - optoverview.html \ - pragma.html \ - quickstart.html \ - sharedcache.html \ - speed.html \ - sqlite.html \ - support.html \ - tclsqlite.html \ - vdbe.html \ - version3.html \ - whentouse.html - -doc: common.tcl $(DOC) - mkdir -p doc - mv $(DOC) doc - cp $(TOP)/www/*.gif $(TOP)/art/*.gif doc # Standard install and cleanup targets # @@ -701,5 +519,6 @@ rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out - rm -rf tsrc + rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so + rm -f sqlite3.c fts?amal.c tclsqlite3.c diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/Makefile.arm-wince-mingw32ce-gcc /tmp/3ARg2Grji7/sqlite3-3.6.16/Makefile.arm-wince-mingw32ce-gcc --- sqlite3-3.4.2/Makefile.arm-wince-mingw32ce-gcc 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/Makefile.arm-wince-mingw32ce-gcc 2008-06-26 11:41:19.000000000 +0100 @@ -0,0 +1,138 @@ +#!/usr/make +# +# Makefile for SQLITE +# +# This is a template makefile for SQLite. Most people prefer to +# use the autoconf generated "configure" script to generate the +# makefile automatically. But that does not work for everybody +# and in every situation. If you are having problems with the +# "configure" script, you might want to try this makefile as an +# alternative. Create a copy of this file, edit the parameters +# below and type "make". +# + +#### The directory where to find the mingw32ce tools +MINGW32CE = /opt/mingw32ce/bin + +#### The target prefix of the mingw32ce tools +TARGET = arm-wince-mingw32ce + +#### The toplevel directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure.in" script. +# +TOP = ../sqlite + +#### C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +BCC = gcc -g -O2 +#BCC = /opt/ancic/bin/c89 -0 + +#### If the target operating system supports the "usleep()" system +# call, then define the HAVE_USLEEP macro for all C modules. +# +USLEEP = +#USLEEP = -DHAVE_USLEEP=1 + +#### If you want the SQLite library to be safe for use within a +# multi-threaded program, then define the following macro +# appropriately: +# +THREADSAFE = -DTHREADSAFE=1 +#THREADSAFE = -DTHREADSAFE=0 + +#### Specify any extra linker options needed to make the library +# thread safe +# +#THREADLIB = -lpthread +THREADLIB = + +#### Specify any extra libraries needed to access required functions. +# +#TLIBS = -lrt # fdatasync on Solaris 8 +TLIBS = + +#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 +# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all +# malloc()s and free()s in order to track down memory leaks. +# +# SQLite uses some expensive assert() statements in the inner loop. +# You can make the library go almost twice as fast if you compile +# with -DNDEBUG=1 +# +#OPTS = -DSQLITE_DEBUG=2 +#OPTS = -DSQLITE_DEBUG=1 +#OPTS = +OPTS = -DNDEBUG=1 -DSQLITE_OS_WIN=1 -D_WIN32_WCE=1 +#OPTS += -DHAVE_FDATASYNC=1 + +#### The suffix to add to executable files. ".exe" for windows. +# Nothing for unix. +# +EXE = .exe +#EXE = + +#### C Compile and options for use in building executables that +# will run on the target platform. This is usually the same +# as BCC, unless you are cross-compiling. +# +#TCC = gcc -O6 +#TCC = gcc -g -O0 -Wall +#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage +#TCC = /opt/mingw/bin/i386-mingw32-gcc -O6 +TCC = $(MINGW32CE)/$(TARGET)-gcc -O2 +#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive + +#### Tools used to build a static library. +# +#AR = ar cr +#AR = /opt/mingw/bin/i386-mingw32-ar cr +AR = $(MINGW32CE)/$(TARGET)-ar cr +#RANLIB = ranlib +#RANLIB = /opt/mingw/bin/i386-mingw32-ranlib +RANLIB = $(MINGW32CE)/$(TARGET)-ranlib + +#MKSHLIB = gcc -shared +#SO = so +#SHPREFIX = lib +MKSHLIB = $(MINGW32CE)/$(TARGET)-gcc -shared +SO = dll +SHPREFIX = + +#### Extra compiler options needed for programs that use the TCL library. +# +#TCL_FLAGS = +#TCL_FLAGS = -DSTATIC_BUILD=1 +TCL_FLAGS = -I/home/drh/tcltk/8.4linux +#TCL_FLAGS = -I/home/drh/tcltk/8.4win -DSTATIC_BUILD=1 +#TCL_FLAGS = -I/home/drh/tcltk/8.3hpux + +#### Linker options needed to link against the TCL library. +# +#LIBTCL = -ltcl -lm -ldl +LIBTCL = /home/drh/tcltk/8.4linux/libtcl8.4g.a -lm -ldl +#LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt +#LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc + +#### Additional objects for SQLite library when TCL support is enabled. +TCLOBJ = +#TCLOBJ = tclsqlite.o + +#### Compiler options needed for programs that use the readline() library. +# +READLINE_FLAGS = +#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline + +#### Linker options needed by programs using readline() must link against. +# +LIBREADLINE = +#LIBREADLINE = -static -lreadline -ltermcap + +#### Which "awk" program provides nawk compatibilty +# +# NAWK = nawk +NAWK = awk + +# You should not have to change anything below this line +############################################################################### +include $(TOP)/main.mk diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/Makefile.in /tmp/3ARg2Grji7/sqlite3-3.6.16/Makefile.in --- sqlite3-3.4.2/Makefile.in 2009-08-19 23:00:31.000000000 +0100 +++ sqlite3-3.6.16/Makefile.in 2009-08-19 23:00:31.000000000 +0100 @@ -26,7 +26,12 @@ # will run on the target platform. (BCC and TCC are usually the # same unless your are cross-compiling.) # -TCC = @CC@ @CFLAGS@ -I. -I${TOP}/src +TCC = @CC@ @CPPFLAGS@ @CFLAGS@ -I. -I${TOP}/src + +# Define this for the autoconf-based build, so that the code knows it can +# include the generated config.h +# +TCC += -D_HAVE_SQLITE_CONFIG_H # Define -DNDEBUG to compile without debugging (i.e., for production usage) # Omitting the define will cause extra debugging code to be inserted and @@ -52,26 +57,35 @@ # Should the database engine be compiled threadsafe # -TCC += -DTHREADSAFE=@THREADSAFE@ - -# The pthreads library if needed -# -LIBPTHREAD=@TARGET_THREAD_LIB@ +TCC += -DSQLITE_THREADSAFE=@SQLITE_THREADSAFE@ # Do threads override each others locks by default (1), or do we test (-1) # TCC += -DSQLITE_THREAD_OVERRIDE_LOCK=@THREADSOVERRIDELOCKS@ -# The fdatasync library +# Any target libraries which libsqlite must be linked against +# TLIBS = @LIBS@ # Flags controlling use of the in memory btree implementation # -# TEMP_STORE is 0 to force temporary tables to be in a file, 1 to +# SQLITE_TEMP_STORE is 0 to force temporary tables to be in a file, 1 to # default to file, 2 to default to memory, and 3 to force temporary # tables to always be in memory. # -TEMP_STORE = -DTEMP_STORE=@TEMP_STORE@ +TEMP_STORE = -DSQLITE_TEMP_STORE=@TEMP_STORE@ + +# Enable/disable loadable extensions, and other optional features +# based on configuration. (-DSQLITE_OMIT*, -DSQLITE_ENABLE*). +# The same set of OMIT and ENABLE flags should be passed to the +# LEMON parser generator and the mkkeywordhash tool as well. +OPT_FEATURE_FLAGS = @OPT_FEATURE_FLAGS@ + +TCC += $(OPT_FEATURE_FLAGS) + +# Add in any optional parameters specified on the make commane line +# ie. make "OPTS=-DSQLITE_ENABLE_FOO=1 -DSQLITE_OMIT_FOO=1". +TCC += $(OPTS) # Version numbers and release number for the SQLite being compiled. # @@ -91,10 +105,37 @@ # HAVE_TCL = @HAVE_TCL@ +# This is the command to use for tclsh - normally just "tclsh", but we may +# know the specific version we want to use +# +TCLSH_CMD = @TCLSH_CMD@ + +# Where do we want to install the tcl plugin +# +TCLLIBDIR = @TCLLIBDIR@ + # The suffix used on shared libraries. Ex: ".dll", ".so", ".dylib" # SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ +# If gcov support was enabled by the configure script, add the appropriate +# flags here. It's not always as easy as just having the user add the right +# CFLAGS / LDFLAGS, because libtool wants to use CFLAGS when linking, which +# causes build errors with -fprofile-arcs -ftest-coverage with some GCCs. +# Supposedly GCC does the right thing if you use --coverage, but in +# practice it still fails. See: +# +# http://www.mail-archive.com/debian-gcc@lists.debian.org/msg26197.html +# +# for more info. +# +GCOV_CFLAGS1 = -DSQLITE_COVERAGE_TEST=1 -fprofile-arcs -ftest-coverage +GCOV_LDFLAGS1 = -lgcov +USE_GCOV = @USE_GCOV@ +LTCOMPILE_EXTRAS += $(GCOV_CFLAGS$(USE_GCOV)) +LTLINK_EXTRAS += $(GCOV_LDFLAGS$(USE_GCOV)) + + # The directory into which to store package information for # Some standard variables and programs @@ -102,13 +143,16 @@ prefix = @prefix@ exec_prefix = @exec_prefix@ libdir = @libdir@ +pkgconfigdir = $(libdir)/pkgconfig +bindir = @bindir@ +includedir = @includedir@ INSTALL = @INSTALL@ LIBTOOL = ./libtool ALLOWRELEASE = @ALLOWRELEASE@ # libtool compile/link/install -LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(TCC) -LTLINK = $(LIBTOOL) --mode=link $(TCC) @LDFLAGS@ +LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(TCC) $(LTCOMPILE_EXTRAS) +LTLINK = $(LIBTOOL) --mode=link $(TCC) $(LTCOMPILE_EXTRAS) @LDFLAGS@ $(LTLINK_EXTRAS) LTINSTALL = $(LIBTOOL) --mode=install $(INSTALL) # nawk compatible awk. @@ -116,20 +160,33 @@ # You should not have to change anything below this line ############################################################################### -#TCC += -DSQLITE_OMIT_LOAD_EXTENSION=1 -TCC += -ldl -# Object files for the SQLite library. +# Object files for the SQLite library (non-amalgamation). +# +OBJS0 = alter.lo analyze.lo attach.lo auth.lo backup.lo bitvec.lo btmutex.lo \ + btree.lo build.lo callback.lo complete.lo date.lo \ + delete.lo expr.lo fault.lo func.lo global.lo \ + hash.lo journal.lo insert.lo legacy.lo loadext.lo \ + main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ + memjournal.lo \ + mutex.lo mutex_noop.lo mutex_os2.lo mutex_unix.lo mutex_w32.lo \ + notify.lo opcodes.lo os.lo os_unix.lo os_win.lo os_os2.lo \ + pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ + random.lo resolve.lo rowset.lo select.lo status.lo \ + table.lo tokenize.lo trigger.lo update.lo \ + util.lo vacuum.lo \ + vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo \ + walker.lo where.lo utf.lo vtab.lo + +# Object files for the amalgamation. # -LIBOBJ = alter.lo analyze.lo attach.lo auth.lo btree.lo build.lo \ - callback.lo complete.lo date.lo \ - delete.lo expr.lo func.lo hash.lo insert.lo loadext.lo \ - main.lo malloc.lo opcodes.lo os.lo os_unix.lo os_win.lo os_os2.lo \ - pager.lo parse.lo pragma.lo prepare.lo printf.lo random.lo \ - select.lo table.lo tokenize.lo trigger.lo update.lo \ - util.lo vacuum.lo \ - vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbefifo.lo vdbemem.lo \ - where.lo utf.lo legacy.lo vtab.lo +OBJS1 = sqlite3.lo + +# Determine the real value of LIBOBJ based on the 'configure' script +# +USE_AMALGAMATION = @USE_AMALGAMATION@ +LIBOBJ = $(OBJS$(USE_AMALGAMATION)) + # All of the source code files. # @@ -138,37 +195,68 @@ $(TOP)/src/analyze.c \ $(TOP)/src/attach.c \ $(TOP)/src/auth.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/bitvec.c \ + $(TOP)/src/btmutex.c \ $(TOP)/src/btree.c \ $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ $(TOP)/src/build.c \ $(TOP)/src/callback.c \ $(TOP)/src/complete.c \ $(TOP)/src/date.c \ $(TOP)/src/delete.c \ $(TOP)/src/expr.c \ + $(TOP)/src/fault.c \ $(TOP)/src/func.c \ + $(TOP)/src/global.c \ $(TOP)/src/hash.c \ $(TOP)/src/hash.h \ + $(TOP)/src/hwtime.h \ $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ $(TOP)/src/legacy.c \ $(TOP)/src/loadext.c \ $(TOP)/src/main.c \ $(TOP)/src/malloc.c \ + $(TOP)/src/mem0.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mem3.c \ + $(TOP)/src/mem5.c \ + $(TOP)/src/memjournal.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex.h \ + $(TOP)/src/mutex_noop.c \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/notify.c \ $(TOP)/src/os.c \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/os_os2.c \ $(TOP)/src/pager.c \ $(TOP)/src/pager.h \ $(TOP)/src/parse.y \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache.h \ + $(TOP)/src/pcache1.c \ $(TOP)/src/pragma.c \ $(TOP)/src/prepare.c \ $(TOP)/src/printf.c \ $(TOP)/src/random.c \ + $(TOP)/src/resolve.c \ + $(TOP)/src/rowset.c \ $(TOP)/src/select.c \ + $(TOP)/src/status.c \ $(TOP)/src/shell.c \ $(TOP)/src/sqlite.h.in \ + $(TOP)/src/sqlite3ext.h \ $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ $(TOP)/src/table.c \ $(TOP)/src/tclsqlite.c \ $(TOP)/src/tokenize.c \ @@ -182,12 +270,23 @@ $(TOP)/src/vdbeapi.c \ $(TOP)/src/vdbeaux.c \ $(TOP)/src/vdbeblob.c \ - $(TOP)/src/vdbefifo.c \ $(TOP)/src/vdbemem.c \ $(TOP)/src/vdbeInt.h \ $(TOP)/src/vtab.c \ + $(TOP)/src/walker.c \ $(TOP)/src/where.c +# Generated source code files +# +SRC += \ + keywordhash.h \ + opcodes.c \ + opcodes.h \ + parse.c \ + parse.h \ + config.h \ + sqlite3.h + # Source code for extensions # SRC += \ @@ -198,13 +297,45 @@ $(TOP)/ext/fts1/fts1_porter.c \ $(TOP)/ext/fts1/fts1_tokenizer.h \ $(TOP)/ext/fts1/fts1_tokenizer1.c +SRC += \ + $(TOP)/ext/fts2/fts2.c \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.c \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_icu.c \ + $(TOP)/ext/fts2/fts2_porter.c \ + $(TOP)/ext/fts2/fts2_tokenizer.h \ + $(TOP)/ext/fts2/fts2_tokenizer.c \ + $(TOP)/ext/fts2/fts2_tokenizer1.c +SRC += \ + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.c \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_icu.c \ + $(TOP)/ext/fts3/fts3_porter.c \ + $(TOP)/ext/fts3/fts3_tokenizer.h \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_tokenizer1.c +SRC += \ + $(TOP)/ext/icu/sqliteicu.h \ + $(TOP)/ext/icu/icu.c +SRC += \ + $(TOP)/ext/rtree/rtree.h \ + $(TOP)/ext/rtree/rtree.c - -# Source code to the test files. +# Source code to the library files needed by the test fixture # -TESTSRC = \ +TESTSRC2 = \ + $(TOP)/src/attach.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/bitvec.c \ $(TOP)/src/btree.c \ + $(TOP)/src/build.c \ $(TOP)/src/date.c \ + $(TOP)/src/expr.c \ $(TOP)/src/func.c \ $(TOP)/src/insert.c \ $(TOP)/src/malloc.c \ @@ -213,8 +344,26 @@ $(TOP)/src/os_unix.c \ $(TOP)/src/os_win.c \ $(TOP)/src/pager.c \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache1.c \ $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/select.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/where.c \ + parse.c + +# Source code to the actual test files. +# +TESTSRC = \ $(TOP)/src/test1.c \ $(TOP)/src/test2.c \ $(TOP)/src/test3.c \ @@ -226,18 +375,23 @@ $(TOP)/src/test9.c \ $(TOP)/src/test_autoext.c \ $(TOP)/src/test_async.c \ + $(TOP)/src/test_backup.c \ $(TOP)/src/test_btree.c \ $(TOP)/src/test_config.c \ + $(TOP)/src/test_devsym.c \ + $(TOP)/src/test_func.c \ $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_journal.c \ + $(TOP)/src/test_malloc.c \ $(TOP)/src/test_md5.c \ + $(TOP)/src/test_mutex.c \ + $(TOP)/src/test_onefile.c \ + $(TOP)/src/test_osinst.c \ + $(TOP)/src/test_pcache.c \ $(TOP)/src/test_schema.c \ $(TOP)/src/test_server.c \ $(TOP)/src/test_tclvar.c \ - $(TOP)/src/utf.c \ - $(TOP)/src/util.c \ - $(TOP)/src/vdbe.c \ - $(TOP)/src/vdbeaux.c \ - $(TOP)/src/where.c + $(TOP)/src/test_thread.c # Header files used by all library source files. # @@ -246,14 +400,18 @@ $(TOP)/src/btree.h \ $(TOP)/src/btreeInt.h \ $(TOP)/src/hash.h \ + $(TOP)/src/hwtime.h \ $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/mutex.h \ opcodes.h \ $(TOP)/src/os.h \ $(TOP)/src/os_common.h \ $(TOP)/src/sqlite3ext.h \ $(TOP)/src/sqliteInt.h \ $(TOP)/src/vdbe.h \ - parse.h + $(TOP)/src/vdbeInt.h \ + parse.h \ + config.h # Header files used by extensions # @@ -261,12 +419,29 @@ $(TOP)/ext/fts1/fts1.h \ $(TOP)/ext/fts1/fts1_hash.h \ $(TOP)/ext/fts1/fts1_tokenizer.h +HDR += \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_tokenizer.h +HDR += \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_tokenizer.h +HDR += \ + $(TOP)/ext/rtree/rtree.h +HDR += \ + $(TOP)/ext/icu/sqliteicu.h + +# If using the amalgamation, use sqlite3.c directly to build the test +# fixture. Otherwise link against libsqlite3.la. (This distinction is +# necessary because the test fixture requires non-API symbols which are +# hidden when the library is built via the amalgamation). +# +TESTFIXTURE_SRC0 = $(TESTSRC2) libsqlite3.la +TESTFIXTURE_SRC1 = sqlite3.c +TESTFIXTURE_SRC = $(TESTSRC) $(TOP)/src/tclsqlite.c $(TESTFIXTURE_SRC$(USE_AMALGAMATION)) -# Header files used by the VDBE submodule -# -VDBEHDR = \ - $(HDR) \ - $(TOP)/src/vdbeInt.h # This is the default Makefile target. The objects listed here # are what get build when you type just "make" with no arguments. @@ -276,6 +451,9 @@ Makefile: $(TOP)/Makefile.in ./config.status +sqlite3.pc: $(TOP)/sqlite3.pc.in + ./config.status + # Generate the file "last_change" which contains the date of change # of the most recently modified source code file # @@ -284,19 +462,20 @@ | $(NAWK) '{print $$5,$$6}' >last_change libsqlite3.la: $(LIBOBJ) - $(LTLINK) -o libsqlite3.la $(LIBOBJ) $(LIBPTHREAD) \ - ${ALLOWRELEASE} -rpath $(libdir) -version-info "8:6:8" + $(LTLINK) -o $@ $(LIBOBJ) $(TLIBS) \ + ${ALLOWRELEASE} -rpath "$(libdir)" -version-info "8:6:8" libtclsqlite3.la: tclsqlite.lo libsqlite3.la - $(LTLINK) -o libtclsqlite3.la tclsqlite.lo \ - $(LIBOBJ) @TCL_STUB_LIB_SPEC@ $(LIBPTHREAD) \ - -rpath $(libdir)/sqlite \ - -version-info "8:6:8" + $(LTLINK) -o $@ tclsqlite.lo \ + libsqlite3.la @TCL_STUB_LIB_SPEC@ $(TLIBS) \ + -rpath "$(TCLLIBDIR)" \ + -version-info "8:6:8" \ + -avoid-version sqlite3$(TEXE): $(TOP)/src/shell.c libsqlite3.la sqlite3.h - $(LTLINK) $(READLINE_FLAGS) $(LIBPTHREAD) \ + $(LTLINK) $(READLINE_FLAGS) \ -o $@ $(TOP)/src/shell.c libsqlite3.la \ - $(LIBREADLINE) $(TLIBS) + $(LIBREADLINE) $(TLIBS) -rpath "$(libdir)" # This target creates a directory named "tsrc" and fills it with # copies of all of the C source code and header files needed to @@ -304,72 +483,97 @@ # files are automatically generated. This target takes care of # all that automatic generation. # -target_source: $(SRC) parse.c opcodes.c keywordhash.h $(VDBEHDR) +.target_source: $(SRC) rm -rf tsrc mkdir -p tsrc - cp $(SRC) $(VDBEHDR) tsrc + cp $(SRC) tsrc rm tsrc/sqlite.h.in tsrc/parse.y - cp parse.c opcodes.c keywordhash.h tsrc + $(TCLSH_CMD) $(TOP)/tool/vdbe-compress.tcl vdbe.new + mv vdbe.new tsrc/vdbe.c + touch .target_source -sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl - tclsh $(TOP)/tool/mksqlite3c.tcl +sqlite3.c: .target_source $(TOP)/tool/mksqlite3c.tcl + $(TCLSH_CMD) $(TOP)/tool/mksqlite3c.tcl # Rules to build the LEMON compiler generator # lemon$(BEXE): $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c - $(BCC) -o lemon$(BEXE) $(TOP)/tool/lemon.c + $(BCC) -o $@ $(TOP)/tool/lemon.c cp $(TOP)/tool/lempar.c . +# Rule to build the amalgamation +# +sqlite3.lo: sqlite3.c + $(LTCOMPILE) $(TEMP_STORE) -c sqlite3.c + # Rules to build individual files # alter.lo: $(TOP)/src/alter.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/alter.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/alter.c analyze.lo: $(TOP)/src/analyze.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/analyze.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/analyze.c attach.lo: $(TOP)/src/attach.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/attach.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/attach.c auth.lo: $(TOP)/src/auth.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/auth.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/auth.c + +backup.lo: $(TOP)/src/backup.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/backup.c + +bitvec.lo: $(TOP)/src/bitvec.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/bitvec.c + +btmutex.lo: $(TOP)/src/btmutex.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/btmutex.c btree.lo: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h - $(LTCOMPILE) -c $(TOP)/src/btree.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/btree.c build.lo: $(TOP)/src/build.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/build.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/build.c callback.lo: $(TOP)/src/callback.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/callback.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/callback.c complete.lo: $(TOP)/src/complete.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/complete.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/complete.c date.lo: $(TOP)/src/date.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/date.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/date.c delete.lo: $(TOP)/src/delete.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/delete.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/delete.c expr.lo: $(TOP)/src/expr.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/expr.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/expr.c + +fault.lo: $(TOP)/src/fault.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/fault.c func.lo: $(TOP)/src/func.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/func.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/func.c + +global.lo: $(TOP)/src/global.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/global.c hash.lo: $(TOP)/src/hash.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/hash.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/hash.c insert.lo: $(TOP)/src/insert.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/insert.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/insert.c + +journal.lo: $(TOP)/src/journal.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/journal.c legacy.lo: $(TOP)/src/legacy.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/legacy.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/legacy.c loadext.lo: $(TOP)/src/loadext.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/loadext.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/loadext.c main.lo: $(TOP)/src/main.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/main.c @@ -377,11 +581,53 @@ malloc.lo: $(TOP)/src/malloc.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/malloc.c +mem0.lo: $(TOP)/src/mem0.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem0.c + +mem1.lo: $(TOP)/src/mem1.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem1.c + +mem2.lo: $(TOP)/src/mem2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem2.c + +mem3.lo: $(TOP)/src/mem3.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem3.c + +mem5.lo: $(TOP)/src/mem5.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem5.c + +memjournal.lo: $(TOP)/src/memjournal.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/memjournal.c + +mutex.lo: $(TOP)/src/mutex.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex.c + +mutex_noop.lo: $(TOP)/src/mutex_noop.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_noop.c + +mutex_os2.lo: $(TOP)/src/mutex_os2.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_os2.c + +mutex_unix.lo: $(TOP)/src/mutex_unix.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_unix.c + +mutex_w32.lo: $(TOP)/src/mutex_w32.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_w32.c + +notify.lo: $(TOP)/src/notify.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/notify.c + pager.lo: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h - $(LTCOMPILE) -c $(TOP)/src/pager.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pager.c + +pcache.lo: $(TOP)/src/pcache.c $(HDR) $(TOP)/src/pcache.h + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache.c + +pcache1.lo: $(TOP)/src/pcache1.c $(HDR) $(TOP)/src/pcache.h + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache1.c opcodes.lo: opcodes.c - $(LTCOMPILE) -c opcodes.c + $(LTCOMPILE) $(TEMP_STORE) -c opcodes.c opcodes.c: opcodes.h $(TOP)/mkopcodec.awk sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c @@ -390,98 +636,107 @@ cat parse.h $(TOP)/src/vdbe.c | $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h os.lo: $(TOP)/src/os.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/os.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os.c os_unix.lo: $(TOP)/src/os_unix.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/os_unix.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_unix.c os_win.lo: $(TOP)/src/os_win.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/os_win.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_win.c os_os2.lo: $(TOP)/src/os_os2.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/os_os2.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_os2.c parse.lo: parse.c $(HDR) - $(LTCOMPILE) -c parse.c + $(LTCOMPILE) $(TEMP_STORE) -c parse.c parse.h: parse.c parse.c: $(TOP)/src/parse.y lemon$(BEXE) $(TOP)/addopcodes.awk cp $(TOP)/src/parse.y . - ./lemon$(BEXE) $(OPTS) parse.y + ./lemon$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) parse.y mv parse.h parse.h.temp $(NAWK) -f $(TOP)/addopcodes.awk parse.h.temp >parse.h pragma.lo: $(TOP)/src/pragma.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/pragma.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pragma.c prepare.lo: $(TOP)/src/prepare.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/prepare.c printf.lo: $(TOP)/src/printf.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/printf.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/printf.c random.lo: $(TOP)/src/random.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/random.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/random.c + +resolve.lo: $(TOP)/src/resolve.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/resolve.c + +rowset.lo: $(TOP)/src/rowset.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/rowset.c select.lo: $(TOP)/src/select.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/select.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/select.c + +status.lo: $(TOP)/src/status.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/status.c sqlite3.h: $(TOP)/src/sqlite.h.in sed -e s/--VERS--/$(RELEASE)/ $(TOP)/src/sqlite.h.in | \ sed -e s/--VERSION-NUMBER--/$(VERSION_NUMBER)/ >sqlite3.h table.lo: $(TOP)/src/table.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/table.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/table.c tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/tclsqlite.c + $(LTCOMPILE) -DUSE_TCL_STUBS=1 -c $(TOP)/src/tclsqlite.c tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) - $(LTCOMPILE) -c $(TOP)/src/tokenize.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/tokenize.c keywordhash.h: $(TOP)/tool/mkkeywordhash.c - $(BCC) -o mkkeywordhash$(BEXE) $(OPTS) $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) $(TOP)/tool/mkkeywordhash.c ./mkkeywordhash$(BEXE) >keywordhash.h trigger.lo: $(TOP)/src/trigger.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/trigger.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/trigger.c update.lo: $(TOP)/src/update.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/update.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/update.c utf.lo: $(TOP)/src/utf.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/utf.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/utf.c util.lo: $(TOP)/src/util.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/util.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/util.c vacuum.lo: $(TOP)/src/vacuum.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/vacuum.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vacuum.c -vdbe.lo: $(TOP)/src/vdbe.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbe.c +vdbe.lo: $(TOP)/src/vdbe.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbe.c -vdbeapi.lo: $(TOP)/src/vdbeapi.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbeapi.c +vdbeapi.lo: $(TOP)/src/vdbeapi.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeapi.c -vdbeaux.lo: $(TOP)/src/vdbeaux.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbeaux.c +vdbeaux.lo: $(TOP)/src/vdbeaux.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeaux.c -vdbeblob.lo: $(TOP)/src/vdbeblob.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbeblob.c +vdbeblob.lo: $(TOP)/src/vdbeblob.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeblob.c -vdbefifo.lo: $(TOP)/src/vdbefifo.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbefifo.c +vdbemem.lo: $(TOP)/src/vdbemem.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbemem.c -vdbemem.lo: $(TOP)/src/vdbemem.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vdbemem.c +vtab.lo: $(TOP)/src/vtab.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vtab.c -vtab.lo: $(TOP)/src/vtab.c $(VDBEHDR) - $(LTCOMPILE) -c $(TOP)/src/vtab.c +walker.lo: $(TOP)/src/walker.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/walker.c where.lo: $(TOP)/src/where.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/where.c + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/where.c tclsqlite-shell.lo: $(TOP)/src/tclsqlite.c $(HDR) $(LTCOMPILE) -DTCLSH=1 -o $@ -c $(TOP)/src/tclsqlite.c @@ -489,25 +744,24 @@ tclsqlite-stubs.lo: $(TOP)/src/tclsqlite.c $(HDR) $(LTCOMPILE) -DTCL_USE_STUBS=1 -o $@ -c $(TOP)/src/tclsqlite.c -tclsqlite3: tclsqlite-shell.lo libsqlite3.la - $(LTLINK) -o tclsqlite3 tclsqlite-shell.lo \ +tclsqlite3$(TEXE): tclsqlite-shell.lo libsqlite3.la + $(LTLINK) -o $@ tclsqlite-shell.lo \ libsqlite3.la $(LIBTCL) -testfixture$(TEXE): $(TOP)/src/tclsqlite.c libsqlite3.la $(TESTSRC) - $(LTLINK) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ - -DSQLITE_NO_SYNC=1 $(TEMP_STORE) \ - -o testfixture $(TESTSRC) $(TOP)/src/tclsqlite.c \ - libsqlite3.la $(LIBTCL) +testfixture$(TEXE): $(TESTFIXTURE_SRC) + $(LTLINK) -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_NO_SYNC=1\ + -DSQLITE_CRASH_TEST=1 \ + -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE $(TEMP_STORE) \ + -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) $(TLIBS) fulltest: testfixture$(TEXE) sqlite3$(TEXE) - ./testfixture $(TOP)/test/all.test + ./testfixture$(TEXE) $(TOP)/test/all.test test: testfixture$(TEXE) sqlite3$(TEXE) - ./testfixture $(TOP)/test/quick.test + ./testfixture$(TEXE) $(TOP)/test/veryquick.test -sqlite3_analyzer$(TEXE): $(TOP)/src/tclsqlite.c libtclsqlite3.la \ - $(TESTSRC) $(TOP)/tool/spaceanal.tcl +sqlite3_analyzer$(TEXE): $(TESTFIXTURE_SRC) $(TOP)/tool/spaceanal.tcl sed \ -e '/^#/d' \ -e 's,\\,\\\\,g' \ @@ -515,205 +769,51 @@ -e 's,^,",' \ -e 's,$$,\\n",' \ $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h - $(LTLINK) -DTCLSH=2 -DSQLITE_TEST=1 $(TEMP_STORE)\ - -o sqlite3_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ - libtclsqlite3.la $(LIBTCL) - -# Rules used to build documentation -# -arch.html: $(TOP)/www/arch.tcl - tclsh $(TOP)/www/arch.tcl >arch.html - -arch2.gif: $(TOP)/www/arch2.gif - cp $(TOP)/www/arch2.gif . - -autoinc.html: $(TOP)/www/autoinc.tcl - tclsh $(TOP)/www/autoinc.tcl >autoinc.html - -c_interface.html: $(TOP)/www/c_interface.tcl - tclsh $(TOP)/www/c_interface.tcl >c_interface.html - -capi3.html: $(TOP)/www/capi3.tcl - tclsh $(TOP)/www/capi3.tcl >capi3.html - -capi3ref.html: $(TOP)/www/mkapidoc.tcl sqlite3.h - tclsh $(TOP)/www/mkapidoc.tcl capi3ref.html - -changes.html: $(TOP)/www/changes.tcl - tclsh $(TOP)/www/changes.tcl >changes.html - -compile.html: $(TOP)/www/compile.tcl - tclsh $(TOP)/www/compile.tcl >compile.html - -copyright.html: $(TOP)/www/copyright.tcl - tclsh $(TOP)/www/copyright.tcl >copyright.html - -copyright-release.html: $(TOP)/www/copyright-release.html - cp $(TOP)/www/copyright-release.html . - -copyright-release.pdf: $(TOP)/www/copyright-release.pdf - cp $(TOP)/www/copyright-release.pdf . - -common.tcl: $(TOP)/www/common.tcl - cp $(TOP)/www/common.tcl . + $(LTLINK) -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 \ + -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE \ + $(TEMP_STORE) -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) -conflict.html: $(TOP)/www/conflict.tcl - tclsh $(TOP)/www/conflict.tcl >conflict.html -datatypes.html: $(TOP)/www/datatypes.tcl - tclsh $(TOP)/www/datatypes.tcl >datatypes.html - -datatype3.html: $(TOP)/www/datatype3.tcl - tclsh $(TOP)/www/datatype3.tcl >datatype3.html - -docs.html: $(TOP)/www/docs.tcl - tclsh $(TOP)/www/docs.tcl >docs.html - -download.html: $(TOP)/www/download.tcl - mkdir -p doc - tclsh $(TOP)/www/download.tcl >download.html - -faq.html: $(TOP)/www/faq.tcl - tclsh $(TOP)/www/faq.tcl >faq.html - -fileformat.html: $(TOP)/www/fileformat.tcl - tclsh $(TOP)/www/fileformat.tcl >fileformat.html - -formatchng.html: $(TOP)/www/formatchng.tcl - tclsh $(TOP)/www/formatchng.tcl >formatchng.html - -index.html: $(TOP)/www/index.tcl last_change - tclsh $(TOP)/www/index.tcl >index.html - -limits.html: $(TOP)/www/limits.tcl last_change - tclsh $(TOP)/www/limits.tcl >limits.html - -lang.html: $(TOP)/www/lang.tcl - tclsh $(TOP)/www/lang.tcl >lang.html - -pragma.html: $(TOP)/www/pragma.tcl - tclsh $(TOP)/www/pragma.tcl >pragma.html - -lockingv3.html: $(TOP)/www/lockingv3.tcl - tclsh $(TOP)/www/lockingv3.tcl >lockingv3.html - -oldnews.html: $(TOP)/www/oldnews.tcl - tclsh $(TOP)/www/oldnews.tcl >oldnews.html - -omitted.html: $(TOP)/www/omitted.tcl - tclsh $(TOP)/www/omitted.tcl >omitted.html - -opcode.html: $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c - tclsh $(TOP)/www/opcode.tcl $(TOP)/src/vdbe.c >opcode.html - -mingw.html: $(TOP)/www/mingw.tcl - tclsh $(TOP)/www/mingw.tcl >mingw.html - -nulls.html: $(TOP)/www/nulls.tcl - tclsh $(TOP)/www/nulls.tcl >nulls.html - -quickstart.html: $(TOP)/www/quickstart.tcl - tclsh $(TOP)/www/quickstart.tcl >quickstart.html - -speed.html: $(TOP)/www/speed.tcl - tclsh $(TOP)/www/speed.tcl >speed.html - -sqlite.gif: $(TOP)/art/SQLite.gif - cp $(TOP)/art/SQLite.gif sqlite.gif - -sqlite.html: $(TOP)/www/sqlite.tcl - tclsh $(TOP)/www/sqlite.tcl >sqlite.html - -support.html: $(TOP)/www/support.tcl - tclsh $(TOP)/www/support.tcl >support.html - -tclsqlite.html: $(TOP)/www/tclsqlite.tcl - tclsh $(TOP)/www/tclsqlite.tcl >tclsqlite.html - -vdbe.html: $(TOP)/www/vdbe.tcl - tclsh $(TOP)/www/vdbe.tcl >vdbe.html - -version3.html: $(TOP)/www/version3.tcl - tclsh $(TOP)/www/version3.tcl >version3.html - - -# Files to be published on the website. -# -DOC = \ - arch.html \ - arch2.gif \ - autoinc.html \ - c_interface.html \ - capi3.html \ - capi3ref.html \ - changes.html \ - compile.html \ - copyright.html \ - copyright-release.html \ - copyright-release.pdf \ - conflict.html \ - datatypes.html \ - datatype3.html \ - docs.html \ - download.html \ - faq.html \ - fileformat.html \ - formatchng.html \ - index.html \ - lang.html \ - limits.html \ - lockingv3.html \ - mingw.html \ - nulls.html \ - oldnews.html \ - omitted.html \ - opcode.html \ - pragma.html \ - quickstart.html \ - speed.html \ - sqlite.gif \ - sqlite.html \ - support.html \ - tclsqlite.html \ - vdbe.html \ - version3.html - -doc: common.tcl $(DOC) - mkdir -p doc - mv $(DOC) doc - -install: sqlite3 libsqlite3.la sqlite3.h ${HAVE_TCL:1=tcl_install} +lib_install: libsqlite3.la $(INSTALL) -d $(DESTDIR)$(libdir) $(LTINSTALL) libsqlite3.la $(DESTDIR)$(libdir) - $(INSTALL) -d $(DESTDIR)$(exec_prefix)/bin - $(LTINSTALL) sqlite3 $(DESTDIR)$(exec_prefix)/bin - $(INSTALL) -d $(DESTDIR)$(prefix)/include - $(INSTALL) -m 0644 sqlite3.h $(DESTDIR)$(prefix)/include - $(INSTALL) -m 0644 $(TOP)/src/sqlite3ext.h $(DESTDIR)$(prefix)/include - $(INSTALL) -d $(DESTDIR)$(libdir)/pkgconfig; - $(INSTALL) -m 0644 sqlite3.pc $(DESTDIR)$(libdir)/pkgconfig; - -tcl_install: libtclsqlite3.la - tclsh $(TOP)/tclinstaller.tcl $(VERSION) + +install: sqlite3$(BEXE) lib_install sqlite3.h sqlite3.pc ${HAVE_TCL:1=tcl_install} + $(INSTALL) -d $(DESTDIR)$(bindir) + $(LTINSTALL) sqlite3$(BEXE) $(DESTDIR)$(bindir) + $(INSTALL) -d $(DESTDIR)$(includedir) + $(INSTALL) -m 0644 sqlite3.h $(DESTDIR)$(includedir) + $(INSTALL) -m 0644 $(TOP)/src/sqlite3ext.h $(DESTDIR)$(includedir) + $(INSTALL) -d $(DESTDIR)$(pkgconfigdir) + $(INSTALL) -m 0644 sqlite3.pc $(DESTDIR)$(pkgconfigdir) + +pkgIndex.tcl: + echo 'package ifneeded sqlite3 $(RELEASE) [list load $(TCLLIBDIR)/libtclsqlite3.so sqlite3]' > $@ +tcl_install: lib_install libtclsqlite3.la pkgIndex.tcl + $(INSTALL) -d $(DESTDIR)$(TCLLIBDIR) + $(LTINSTALL) libtclsqlite3.la $(DESTDIR)$(TCLLIBDIR) + rm -f $(DESTDIR)$(TCLLIBDIR)/libtclsqlite3.la $(DESTDIR)$(TCLLIBDIR)/libtclsqlite3.a + $(INSTALL) -m 0644 pkgIndex.tcl $(DESTDIR)$(TCLLIBDIR) clean: rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* - rm -rf .libs .deps + rm -rf .libs .deps tsrc rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -f testfixture$(TEXE) test.db - [ -f doc/lemon.html ] && mv doc/lemon.html . && mv doc/report1.txt . + [ -f doc/lemon.html ] && mv doc/lemon.html doc/report1.txt . rm -rf doc/* - [ -f lemon.html ] && mv lemon.html doc/ && mv report1.txt doc/ + [ -f lemon.html ] && mv lemon.html report1.txt doc/ rm -f common.tcl rm -f sqlite3.dll sqlite3.lib sqlite3.def + rm -f sqlite3.c .target_source rm -f last_change distclean: clean - rm -f config.log config.status libtool Makefile config.h + rm -f config.log config.status libtool Makefile sqlite3.pc # # Windows section @@ -730,5 +830,5 @@ | sed 's/^.* _//' >>sqlite3.def sqlite3.dll: $(REAL_LIBOBJ) sqlite3.def - $(TCC) -shared -o sqlite3.dll sqlite3.def \ + $(TCC) -shared -o $@ sqlite3.def \ -Wl,"--strip-all" $(REAL_LIBOBJ) diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/Makefile.linux-gcc /tmp/3ARg2Grji7/sqlite3-3.6.16/Makefile.linux-gcc --- sqlite3-3.4.2/Makefile.linux-gcc 2007-06-28 13:46:18.000000000 +0100 +++ sqlite3-3.6.16/Makefile.linux-gcc 2009-05-05 04:39:48.000000000 +0100 @@ -104,6 +104,10 @@ #LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt #LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc +#### Additional objects for SQLite library when TCL support is enabled. +#TCLOBJ = +TCLOBJ = tclsqlite.o + #### Compiler options needed for programs that use the readline() library. # READLINE_FLAGS = diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/Makefile.vxworks /tmp/3ARg2Grji7/sqlite3-3.6.16/Makefile.vxworks --- sqlite3-3.4.2/Makefile.vxworks 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/Makefile.vxworks 2009-06-25 12:24:37.000000000 +0100 @@ -0,0 +1,661 @@ +#!/usr/make +# +# Makefile for SQLITE on VxWorks + +ifeq ($(FORCPU),) + FORCPU = SH32gnule +endif + +TOOL_FAMILY = gnu + +include $(WIND_USR)/tool/gnu/make.$(FORCPU) + +#### The toplevel directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure.in" script. +# +TOP = . + +#### C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +BCC = gcc -g -O2 +#BCC = /opt/ancic/bin/c89 -0 + +#### If the target operating system supports the "usleep()" system +# call, then define the HAVE_USLEEP macro for all C modules. +# +USLEEP = +#USLEEP = -DHAVE_USLEEP=1 + +#### If you want the SQLite library to be safe for use within a +# multi-threaded program, then define the following macro +# appropriately: +# +THREADSAFE = -DSQLITE_THREADSAFE=1 +#THREADSAFE = -DSQLITE_THREADSAFE=0 + +#### Specify any extra linker options needed to make the library +# thread safe +# +#THREADLIB = -lpthread +THREADLIB = + +#### Specify any extra libraries needed to access required functions. +# +ifeq ($(CPU),SH32) + # for SH4 shared library + TLIBS_SHARED += -L$(WIND_USR)/lib/sh/SH32/commonle/PIC +else + # for all other CPUs shared library + TLIBS_SHARED += $(LD_LINK_PATH_ATEND) $(LD_PARTIAL_LAST_FLAGS) +endif +# for static library +TLIBS += $(LD_LINK_PATH_ATEND) $(LD_PARTIAL_LAST_FLAGS) + +#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 +# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all +# malloc()s and free()s in order to track down memory leaks. +# +# SQLite uses some expensive assert() statements in the inner loop. +# You can make the library go almost twice as fast if you compile +# with -DNDEBUG=1 +# +#OPTS = -DSQLITE_DEBUG=2 +#OPTS = -DSQLITE_DEBUG=1 +#OPTS = +OPTS = -DNDEBUG=1 -DSQLITE_OS_UNIX=1 $(THREADSAFE) +OPTS += -DSQLITE_OMIT_LOAD_EXTENSION=1 +OPTS += -DSQLITE_ENABLE_LOCKING_STYLE=1 +OPTS += -DSQLITE_THREAD_OVERRIDE_LOCK=0 +OPTS += -DSQLITE_ENABLE_COLUMN_METADATA=1 +OPTS += -DHAVE_FDATASYNC=1 + +#### The suffix to add to executable files. ".exe" for windows. +# Nothing for unix. +# +EXE = .vxe +#EXE = + +#### C Compile and options for use in building executables that +# will run on the target platform. This is usually the same +# as BCC, unless you are cross-compiling. +# +#TCC = gcc -O6 +#TCC = gcc -g -O0 -Wall +#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage +#TCC = /opt/mingw/bin/i386-mingw32-gcc -O6 +TCC = $(CC) $(DEFINE_CC) -O2 -g -mrtp $(CC_ARCH_SPEC) -D_REENTRANT=1 -D_VX_CPU=_VX_$(CPU) -D_VX_TOOL_FAMILY=$(TOOL_FAMILY) -D_VX_TOOL=$(TOOL) +TCC += -I$(WIND_USR)/h -I$(WIND_USR)/h/wrn/coreip +#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive + +#TCC_SHARED = $(TCC) -fPIC +TCC_SHARED = $(TCC) + +#### Tools used to build a static library. +# +#ARX = ar cr +#ARX = /opt/mingw/bin/i386-mingw32-ar cr +AR += cr +#RANLIB = ranlib +#RANLIB = /opt/mingw/bin/i386-mingw32-ranlib + +#MKSHLIB = gcc -shared +#SO = so +#SHPREFIX = lib +MKSHLIB = $(CC) $(DEFINE_CC) -mrtp -shared $(CC_ARCH_SPEC) -D_VX_CPU=_VX_$(CPU) -D_VX_TOOL_FAMILY=$(TOOL_FAMILY) -D_VX_TOOL=$(TOOL) +SO = so +SHPREFIX = lib + +#### Extra compiler options needed for programs that use the TCL library. +# +#TCL_FLAGS = +#TCL_FLAGS = -DSTATIC_BUILD=1 +TCL_FLAGS = -I/home/drh/tcltk/8.4linux +#TCL_FLAGS = -I/home/drh/tcltk/8.4win -DSTATIC_BUILD=1 +#TCL_FLAGS = -I/home/drh/tcltk/8.3hpux + +#### Linker options needed to link against the TCL library. +# +#LIBTCL = -ltcl -lm -ldl +LIBTCL = /home/drh/tcltk/8.4linux/libtcl8.4g.a -lm -ldl +#LIBTCL = /home/drh/tcltk/8.4win/libtcl84s.a -lmsvcrt +#LIBTCL = /home/drh/tcltk/8.3hpux/libtcl8.3.a -ldld -lm -lc + +#### Additional objects for SQLite library when TCL support is enabled. +TCLOBJ = +#TCLOBJ = tclsqlite.o + +#### Compiler options needed for programs that use the readline() library. +# +READLINE_FLAGS = +#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline + +#### Linker options needed by programs using readline() must link against. +# +LIBREADLINE = +#LIBREADLINE = -static -lreadline -ltermcap + +#### Which "awk" program provides nawk compatibilty +# +# NAWK = nawk +NAWK = awk + + +#### Pasted and adapted main.mk file +############################################################################### +# The following macros should be defined before this script is +# invoked: +# +# TOP The toplevel directory of the source tree. This is the +# directory that contains this "Makefile.in" and the +# "configure.in" script. +# +# BCC C Compiler and options for use in building executables that +# will run on the platform that is doing the build. +# +# THREADLIB Specify any extra linker options needed to make the library +# thread safe +# +# OPTS Extra compiler command-line options. +# +# EXE The suffix to add to executable files. ".exe" for windows +# and "" for Unix. +# +# TCC C Compiler and options for use in building executables that +# will run on the target platform. This is usually the same +# as BCC, unless you are cross-compiling. +# +# AR Tools used to build a static library. +# RANLIB +# +# TCL_FLAGS Extra compiler options needed for programs that use the +# TCL library. +# +# LIBTCL Linker options needed to link against the TCL library. +# +# READLINE_FLAGS Compiler options needed for programs that use the +# readline() library. +# +# LIBREADLINE Linker options needed by programs using readline() must +# link against. +# +# NAWK Nawk compatible awk program. Older (obsolete?) solaris +# systems need this to avoid using the original AT&T AWK. +# +# Once the macros above are defined, the rest of this make script will +# build the SQLite library and testing tools. +################################################################################ + +# This is how we compile +# +TCCX = $(TCC) $(OPTS) -I. -I$(TOP)/src -I$(TOP) +TCCX_SHARED = $(TCC_SHARED) $(OPTS) -I. -I$(TOP)/src -I$(TOP) \ + -I$(TOP)/ext/rtree -I$(TOP)/ext/icu -I$(TOP)/ext/fts3 \ + -I$(TOP)/ext/async + +# Object files for the SQLite library. +# +LIBOBJ+= alter.o analyze.o attach.o auth.o \ + backup.o bitvec.o btmutex.o btree.o build.o \ + callback.o complete.o date.o delete.o expr.o fault.o \ + fts3.o fts3_expr.o fts3_hash.o fts3_icu.o fts3_porter.o \ + fts3_tokenizer.o fts3_tokenizer1.o \ + func.o global.o hash.o \ + icu.o insert.o journal.o legacy.o loadext.o \ + main.o malloc.o mem0.o mem1.o mem2.o mem3.o mem5.o \ + memjournal.o \ + mutex.o mutex_noop.o mutex_os2.o mutex_unix.o mutex_w32.o \ + notify.o opcodes.o os.o os_os2.o os_unix.o os_win.o \ + pager.o parse.o pcache.o pcache1.o pragma.o prepare.o printf.o \ + random.o resolve.o rowset.o rtree.o select.o status.o \ + table.o tokenize.o trigger.o \ + update.o util.o vacuum.o \ + vdbe.o vdbeapi.o vdbeaux.o vdbeblob.o vdbemem.o \ + walker.o where.o utf.o vtab.o + + + +# All of the source code files. +# +SRC = \ + $(TOP)/src/alter.c \ + $(TOP)/src/analyze.c \ + $(TOP)/src/attach.c \ + $(TOP)/src/auth.c \ + $(TOP)/src/backup.c \ + $(TOP)/src/bitvec.c \ + $(TOP)/src/btmutex.c \ + $(TOP)/src/btree.c \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/build.c \ + $(TOP)/src/callback.c \ + $(TOP)/src/complete.c \ + $(TOP)/src/date.c \ + $(TOP)/src/delete.c \ + $(TOP)/src/expr.c \ + $(TOP)/src/fault.c \ + $(TOP)/src/func.c \ + $(TOP)/src/global.c \ + $(TOP)/src/hash.c \ + $(TOP)/src/hash.h \ + $(TOP)/src/hwtime.h \ + $(TOP)/src/insert.c \ + $(TOP)/src/journal.c \ + $(TOP)/src/legacy.c \ + $(TOP)/src/loadext.c \ + $(TOP)/src/main.c \ + $(TOP)/src/malloc.c \ + $(TOP)/src/mem0.c \ + $(TOP)/src/mem1.c \ + $(TOP)/src/mem2.c \ + $(TOP)/src/mem3.c \ + $(TOP)/src/mem5.c \ + $(TOP)/src/memjournal.c \ + $(TOP)/src/mutex.c \ + $(TOP)/src/mutex.h \ + $(TOP)/src/mutex_noop.c \ + $(TOP)/src/mutex_os2.c \ + $(TOP)/src/mutex_unix.c \ + $(TOP)/src/mutex_w32.c \ + $(TOP)/src/notify.c \ + $(TOP)/src/os.c \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/os_os2.c \ + $(TOP)/src/os_unix.c \ + $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c \ + $(TOP)/src/pager.h \ + $(TOP)/src/parse.y \ + $(TOP)/src/pcache.c \ + $(TOP)/src/pcache.h \ + $(TOP)/src/pcache1.c \ + $(TOP)/src/pragma.c \ + $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c \ + $(TOP)/src/random.c \ + $(TOP)/src/resolve.c \ + $(TOP)/src/rowset.c \ + $(TOP)/src/select.c \ + $(TOP)/src/status.c \ + $(TOP)/src/shell.c \ + $(TOP)/src/sqlite.h.in \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/table.c \ + $(TOP)/src/tclsqlite.c \ + $(TOP)/src/tokenize.c \ + $(TOP)/src/trigger.c \ + $(TOP)/src/utf.c \ + $(TOP)/src/update.c \ + $(TOP)/src/util.c \ + $(TOP)/src/vacuum.c \ + $(TOP)/src/vdbe.c \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeapi.c \ + $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbeblob.c \ + $(TOP)/src/vdbemem.c \ + $(TOP)/src/vdbeInt.h \ + $(TOP)/src/vtab.c \ + $(TOP)/src/walker.c \ + $(TOP)/src/where.c + +# Source code for extensions +# +SRC += \ + $(TOP)/ext/fts1/fts1.c \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.c \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_porter.c \ + $(TOP)/ext/fts1/fts1_tokenizer.h \ + $(TOP)/ext/fts1/fts1_tokenizer1.c +SRC += \ + $(TOP)/ext/fts2/fts2.c \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.c \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_icu.c \ + $(TOP)/ext/fts2/fts2_porter.c \ + $(TOP)/ext/fts2/fts2_tokenizer.h \ + $(TOP)/ext/fts2/fts2_tokenizer.c \ + $(TOP)/ext/fts2/fts2_tokenizer1.c +SRC += \ + $(TOP)/ext/fts3/fts3.c \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.c \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_icu.c \ + $(TOP)/ext/fts3/fts3_porter.c \ + $(TOP)/ext/fts3/fts3_tokenizer.h \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/fts3/fts3_tokenizer1.c +SRC += \ + $(TOP)/ext/icu/sqliteicu.h \ + $(TOP)/ext/icu/icu.c +SRC += \ + $(TOP)/ext/rtree/rtree.h \ + $(TOP)/ext/rtree/rtree.c + + +# Generated source code files +# +SRC += \ + keywordhash.h \ + opcodes.c \ + opcodes.h \ + parse.c \ + parse.h \ + sqlite3.h + + +# Source code to the test files. +# +TESTSRC = \ + $(TOP)/src/test1.c \ + $(TOP)/src/test2.c \ + $(TOP)/src/test3.c \ + $(TOP)/src/test4.c \ + $(TOP)/src/test5.c \ + $(TOP)/src/test6.c \ + $(TOP)/src/test7.c \ + $(TOP)/src/test8.c \ + $(TOP)/src/test9.c \ + $(TOP)/src/test_autoext.c \ + $(TOP)/src/test_async.c \ + $(TOP)/src/test_backup.c \ + $(TOP)/src/test_btree.c \ + $(TOP)/src/test_config.c \ + $(TOP)/src/test_devsym.c \ + $(TOP)/src/test_func.c \ + $(TOP)/src/test_hexio.c \ + $(TOP)/src/test_journal.c \ + $(TOP)/src/test_malloc.c \ + $(TOP)/src/test_md5.c \ + $(TOP)/src/test_mutex.c \ + $(TOP)/src/test_onefile.c \ + $(TOP)/src/test_osinst.c \ + $(TOP)/src/test_pcache.c \ + $(TOP)/src/test_schema.c \ + $(TOP)/src/test_server.c \ + $(TOP)/src/test_tclvar.c \ + $(TOP)/src/test_thread.c \ + $(TOP)/src/test_wsd.c \ + +#TESTSRC += $(TOP)/ext/fts2/fts2_tokenizer.c +#TESTSRC += $(TOP)/ext/fts3/fts3_tokenizer.c + +TESTSRC2 = \ + $(TOP)/src/attach.c $(TOP)/src/backup.c $(TOP)/src/btree.c \ + $(TOP)/src/build.c $(TOP)/src/date.c \ + $(TOP)/src/expr.c $(TOP)/src/func.c $(TOP)/src/insert.c $(TOP)/src/os.c \ + $(TOP)/src/os_os2.c $(TOP)/src/os_unix.c $(TOP)/src/os_win.c \ + $(TOP)/src/pager.c $(TOP)/src/pragma.c $(TOP)/src/prepare.c \ + $(TOP)/src/printf.c $(TOP)/src/random.c $(TOP)/src/pcache.c \ + $(TOP)/src/pcache1.c $(TOP)/src/select.c $(TOP)/src/tokenize.c \ + $(TOP)/src/utf.c $(TOP)/src/util.c $(TOP)/src/vdbeapi.c $(TOP)/src/vdbeaux.c \ + $(TOP)/src/vdbe.c $(TOP)/src/vdbemem.c $(TOP)/src/where.c parse.c \ + $(TOP)/ext/fts3/fts3.c $(TOP)/ext/fts3/fts3_expr.c \ + $(TOP)/ext/fts3/fts3_tokenizer.c \ + $(TOP)/ext/async/sqlite3async.c + +# Header files used by all library source files. +# +HDR = \ + $(TOP)/src/btree.h \ + $(TOP)/src/btreeInt.h \ + $(TOP)/src/hash.h \ + $(TOP)/src/hwtime.h \ + keywordhash.h \ + $(TOP)/src/mutex.h \ + opcodes.h \ + $(TOP)/src/os.h \ + $(TOP)/src/os_common.h \ + $(TOP)/src/pager.h \ + $(TOP)/src/pcache.h \ + parse.h \ + sqlite3.h \ + $(TOP)/src/sqlite3ext.h \ + $(TOP)/src/sqliteInt.h \ + $(TOP)/src/sqliteLimit.h \ + $(TOP)/src/vdbe.h \ + $(TOP)/src/vdbeInt.h + +# Header files used by extensions +# +EXTHDR += \ + $(TOP)/ext/fts1/fts1.h \ + $(TOP)/ext/fts1/fts1_hash.h \ + $(TOP)/ext/fts1/fts1_tokenizer.h +EXTHDR += \ + $(TOP)/ext/fts2/fts2.h \ + $(TOP)/ext/fts2/fts2_hash.h \ + $(TOP)/ext/fts2/fts2_tokenizer.h +EXTHDR += \ + $(TOP)/ext/fts3/fts3.h \ + $(TOP)/ext/fts3/fts3_expr.h \ + $(TOP)/ext/fts3/fts3_hash.h \ + $(TOP)/ext/fts3/fts3_tokenizer.h +EXTHDR += \ + $(TOP)/ext/rtree/rtree.h +EXTHDR += \ + $(TOP)/ext/icu/sqliteicu.h + +# This is the default Makefile target. The objects listed here +# are what get build when you type just "make" with no arguments. +# +all: sqlite3.h libsqlite3.a sqlite3$(EXE) + +libsqlite3.a: $(LIBOBJ) + $(AR) libsqlite3.a $(LIBOBJ) + $(RANLIB) libsqlite3.a + +$(SHPREFIX)sqlite3.$(SO): $(LIBOBJ) + $(MKSHLIB) -o $(SHPREFIX)sqlite3.$(SO) $(LIBOBJ) $(TLIBS_SHARED) + +sqlite3$(EXE): $(TOP)/src/shell.c libsqlite3.a sqlite3.h + $(TCCX) $(READLINE_FLAGS) -o sqlite3$(EXE) \ + $(TOP)/src/shell.c \ + $(LIBREADLINE) $(TLIBS) $(THREADLIB) -L. -lsqlite3 + +# This target creates a directory named "tsrc" and fills it with +# copies of all of the C source code and header files needed to +# build on the target system. Some of the C source code and header +# files are automatically generated. This target takes care of +# all that automatic generation. +# +target_source: $(SRC) + rm -rf tsrc + mkdir tsrc + cp -f $(SRC) tsrc + rm tsrc/sqlite.h.in tsrc/parse.y + touch target_source + +sqlite3.c: target_source $(TOP)/tool/mksqlite3c.tcl + tclsh $(TOP)/tool/mksqlite3c.tcl + cp sqlite3.c tclsqlite3.c + cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c + +fts2amal.c: target_source $(TOP)/ext/fts2/mkfts2amal.tcl + tclsh $(TOP)/ext/fts2/mkfts2amal.tcl + +fts3amal.c: target_source $(TOP)/ext/fts3/mkfts3amal.tcl + tclsh $(TOP)/ext/fts3/mkfts3amal.tcl + +# Rules to build the LEMON compiler generator +# +lemon: $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c + $(BCC) -o lemon $(TOP)/tool/lemon.c + cp $(TOP)/tool/lempar.c . + +# Rules to build individual *.o files from generated *.c files. This +# applies to: +# +# parse.o +# opcodes.o +# +%.o: %.c $(HDR) + $(TCCX_SHARED) -c $< + +# Rules to build individual *.o files from files in the src directory. +# +%.o: $(TOP)/src/%.c $(HDR) + $(TCCX_SHARED) -c $< + +tclsqlite.o: $(TOP)/src/tclsqlite.c $(HDR) + $(TCCX_SHARED) $(TCL_FLAGS) -c $(TOP)/src/tclsqlite.c + + + +# Rules to build opcodes.c and opcodes.h +# +opcodes.c: opcodes.h $(TOP)/mkopcodec.awk + sort -n -b -k 3 opcodes.h | $(NAWK) -f $(TOP)/mkopcodec.awk >opcodes.c + +opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/mkopcodeh.awk + cat parse.h $(TOP)/src/vdbe.c | \ + $(NAWK) -f $(TOP)/mkopcodeh.awk >opcodes.h + +# Rules to build parse.c and parse.h - the outputs of lemon. +# +parse.h: parse.c + +parse.c: $(TOP)/src/parse.y lemon $(TOP)/addopcodes.awk + cp $(TOP)/src/parse.y . + rm -f parse.h + ./lemon $(OPTS) parse.y + mv parse.h parse.h.temp + awk -f $(TOP)/addopcodes.awk parse.h.temp >parse.h + +sqlite3.h: $(TOP)/src/sqlite.h.in + sed -e s/--VERS--/`cat ${TOP}/VERSION`/ \ + -e s/--VERSION-NUMBER--/`cat ${TOP}/VERSION | sed 's/[^0-9]/ /g' | $(NAWK) '{printf "%d%03d%03d",$$1,$$2,$$3}'`/ \ + $(TOP)/src/sqlite.h.in >sqlite3.h + +keywordhash.h: $(TOP)/tool/mkkeywordhash.c + $(BCC) -o mkkeywordhash $(OPTS) $(TOP)/tool/mkkeywordhash.c + ./mkkeywordhash >keywordhash.h + + + +# Rules to build the extension objects. +# +icu.o: $(TOP)/ext/icu/icu.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/icu/icu.c + +fts2.o: $(TOP)/ext/fts2/fts2.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2.c + +fts2_hash.o: $(TOP)/ext/fts2/fts2_hash.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_hash.c + +fts2_icu.o: $(TOP)/ext/fts2/fts2_icu.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_icu.c + +fts2_porter.o: $(TOP)/ext/fts2/fts2_porter.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_porter.c + +fts2_tokenizer.o: $(TOP)/ext/fts2/fts2_tokenizer.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer.c + +fts2_tokenizer1.o: $(TOP)/ext/fts2/fts2_tokenizer1.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer1.c + +fts3.o: $(TOP)/ext/fts3/fts3.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3.c + +fts3_expr.o: $(TOP)/ext/fts3/fts3_expr.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_expr.c + +fts3_hash.o: $(TOP)/ext/fts3/fts3_hash.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_hash.c + +fts3_icu.o: $(TOP)/ext/fts3/fts3_icu.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_icu.c + +fts3_porter.o: $(TOP)/ext/fts3/fts3_porter.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_porter.c + +fts3_tokenizer.o: $(TOP)/ext/fts3/fts3_tokenizer.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer.c + +fts3_tokenizer1.o: $(TOP)/ext/fts3/fts3_tokenizer1.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer1.c + +rtree.o: $(TOP)/ext/rtree/rtree.c $(HDR) $(EXTHDR) + $(TCCX_SHARED) -DSQLITE_CORE -c $(TOP)/ext/rtree/rtree.c + + +# Rules for building test programs and for running tests +# +tclsqlite3: $(TOP)/src/tclsqlite.c libsqlite3.a + $(TCCX_SHARED) $(TCL_FLAGS) -DTCLSH=1 -o tclsqlite3 \ + $(TOP)/src/tclsqlite.c libsqlite3.a $(LIBTCL) $(THREADLIB) + + +# Rules to build the 'testfixture' application. +# +TESTFIXTURE_FLAGS = -DTCLSH=1 -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 +TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE + +testfixture$(EXE): $(TESTSRC2) libsqlite3.a $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TESTSRC2) $(TOP)/src/tclsqlite.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) libsqlite3.a + +amalgamation-testfixture$(EXE): sqlite3.c $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) + +fts3-testfixture$(EXE): sqlite3.c fts3amal.c $(TESTSRC) $(TOP)/src/tclsqlite.c + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + -DSQLITE_ENABLE_FTS3=1 \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c fts3amal.c \ + -o testfixture$(EXE) $(LIBTCL) $(THREADLIB) + +fulltest: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/all.test + +soaktest: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/all.test -soak 1 + +test: testfixture$(EXE) sqlite3$(EXE) + ./testfixture$(EXE) $(TOP)/test/veryquick.test + +sqlite3_analyzer$(EXE): $(TOP)/src/tclsqlite.c sqlite3.c $(TESTSRC) \ + $(TOP)/tool/spaceanal.tcl + sed \ + -e '/^#/d' \ + -e 's,\\,\\\\,g' \ + -e 's,",\\",g' \ + -e 's,^,",' \ + -e 's,$$,\\n",' \ + $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h + $(TCCX) $(TCL_FLAGS) $(TESTFIXTURE_FLAGS) \ + -DTCLSH=2 -DSQLITE_TEST=1 -DSQLITE_DEBUG=1 -DSQLITE_PRIVATE="" \ + $(TESTSRC) $(TOP)/src/tclsqlite.c sqlite3.c \ + -o sqlite3_analyzer$(EXE) \ + $(LIBTCL) $(THREADLIB) + +TEST_EXTENSION = $(SHPREFIX)testloadext.$(SO) +$(TEST_EXTENSION): $(TOP)/src/test_loadext.c + $(MKSHLIB) $(TOP)/src/test_loadext.c -o $(TEST_EXTENSION) + +extensiontest: testfixture$(EXE) $(TEST_EXTENSION) + ./testfixture$(EXE) $(TOP)/test/loadext.test + +clean: + rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* + rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h + rm -f $(PUBLISH) + rm -f *.da *.bb *.bbg gmon.out + rm -rf tsrc target_source + rm -f testloadext.dll libtestloadext.so + rm -f sqlite3.c fts?amal.c tclsqlite3.c + rm -f $(SHPREFIX)sqlite3.$(SO) diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/mkdll.sh /tmp/3ARg2Grji7/sqlite3-3.6.16/mkdll.sh --- sqlite3-3.4.2/mkdll.sh 2007-06-28 13:46:18.000000000 +0100 +++ sqlite3-3.6.16/mkdll.sh 2009-06-12 03:37:46.000000000 +0100 @@ -10,8 +10,12 @@ PATH=$PATH:/opt/mingw/bin TCLDIR=/home/drh/tcltk/846/win/846win TCLSTUBLIB=$TCLDIR/libtcl84stub.a -OPTS='-DUSE_TCL_STUBS=1 -DTHREADSAFE=1 -DBUILD_sqlite=1 -DOS_WIN=1' -CC="i386-mingw32msvc-gcc -O2 $OPTS -Itsrc -I$TCLDIR" +OPTS='-DUSE_TCL_STUBS=1 -DBUILD_sqlite=1 -DSQLITE_OS_WIN=1' +OPTS="$OPTS -DSQLITE_THREADSAFE=1" +OPTS="$OPTS -DSQLITE_ENABLE_FTS3=1" +OPTS="$OPTS -DSQLITE_ENABLE_RTREE=1" +OPTS="$OPTS -DSQLITE_ENABLE_COLUMN_METADATA=1" +CC="i386-mingw32msvc-gcc -Os $OPTS -Itsrc -I$TCLDIR" NM="i386-mingw32msvc-nm" CMD="$CC -c sqlite3.c" echo $CMD diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/mkextw.sh /tmp/3ARg2Grji7/sqlite3-3.6.16/mkextw.sh --- sqlite3-3.4.2/mkextw.sh 2007-07-20 14:51:36.000000000 +0100 +++ sqlite3-3.6.16/mkextw.sh 2009-05-05 04:39:51.000000000 +0100 @@ -4,7 +4,7 @@ # make fts2amal.c PATH=$PATH:/opt/mingw/bin -OPTS='-DTHREADSAFE=1 -DBUILD_sqlite=1 -DOS_WIN=1' +OPTS='-DTHREADSAFE=1 -DBUILD_sqlite=1 -DSQLITE_OS_WIN=1' CC="i386-mingw32msvc-gcc -O2 $OPTS -Itsrc" NM="i386-mingw32msvc-nm" CMD="$CC -c fts2amal.c" diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/mkopcodec.awk /tmp/3ARg2Grji7/sqlite3-3.6.16/mkopcodec.awk --- sqlite3-3.4.2/mkopcodec.awk 2005-01-20 23:23:29.000000000 +0000 +++ sqlite3-3.6.16/mkopcodec.awk 2009-05-05 04:39:51.000000000 +0100 @@ -15,14 +15,17 @@ printf " || !defined(NDEBUG)" printf " || defined(VDBE_PROFILE)" print " || defined(SQLITE_DEBUG)" - print "const char *const sqlite3OpcodeNames[] = { \"?\"," + print "const char *sqlite3OpcodeName(int i){" + print " static const char *const azName[] = { \"?\"," } /define OP_/ { sub("OP_","",$2) i++ - printf " /* %3d */ \"%s\",\n", $3, $2 + printf " /* %3d */ \"%s\",\n", $3, $2 } END { - print "};" + print " };" + print " return azName[i];" + print "}" print "#endif" } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/mkopcodeh.awk /tmp/3ARg2Grji7/sqlite3-3.6.16/mkopcodeh.awk --- sqlite3-3.4.2/mkopcodeh.awk 2007-03-29 19:39:30.000000000 +0100 +++ sqlite3-3.6.16/mkopcodeh.awk 2009-05-05 04:39:51.000000000 +0100 @@ -41,7 +41,7 @@ # Remember the TK_ values from the parse.h file /^#define TK_/ { - tk[$2] = $3 + tk[$2] = 0+$3 } # Scan for "case OP_aaaa:" lines in the vdbe.c file @@ -50,6 +50,12 @@ sub(/:/,"",name) sub("\r","",name) op[name] = -1 + jump[name] = 0 + out2_prerelease[name] = 0 + in1[name] = 0 + in2[name] = 0 + in3[name] = 0 + out3[name] = 0 for(i=3; i /* ** The code in this file only exists if we are not omitting the @@ -39,7 +38,7 @@ */ static void renameTableFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ unsigned char const *zSql = sqlite3_value_text(argv[0]); @@ -51,9 +50,13 @@ int len = 0; char *zRet; + sqlite3 *db = sqlite3_context_db_handle(context); + + UNUSED_PARAMETER(NotUsed); + /* The principle used to locate the table name in the CREATE TABLE - ** statement is that the table name is the first token that is immediatedly - ** followed by a left parenthesis - TK_LP - or "USING" TK_USING. + ** statement is that the table name is the first non-space token that + ** is immediately followed by a TK_LP or TK_USING token. */ if( zSql ){ do { @@ -63,11 +66,11 @@ } /* Store the token that zCsr points to in tname. */ - tname.z = zCsr; + tname.z = (char*)zCsr; tname.n = len; /* Advance zCsr to the next token. Store that token type in 'token', - ** and it's length in 'len' (to be used next iteration of this loop). + ** and its length in 'len' (to be used next iteration of this loop). */ do { zCsr += len; @@ -76,9 +79,9 @@ assert( len>0 ); } while( token!=TK_LP && token!=TK_USING ); - zRet = sqlite3MPrintf("%.*s%Q%s", tname.z - zSql, zSql, + zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", ((u8*)tname.z) - zSql, zSql, zTableName, tname.z+tname.n); - sqlite3_result_text(context, zRet, -1, sqlite3FreeX); + sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); } } @@ -92,7 +95,7 @@ */ static void renameTriggerFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ unsigned char const *zSql = sqlite3_value_text(argv[0]); @@ -104,6 +107,9 @@ unsigned char const *zCsr = zSql; int len = 0; char *zRet; + sqlite3 *db = sqlite3_context_db_handle(context); + + UNUSED_PARAMETER(NotUsed); /* The principle used to locate the table name in the CREATE TRIGGER ** statement is that the table name is the first token that is immediatedly @@ -119,11 +125,11 @@ } /* Store the token that zCsr points to in tname. */ - tname.z = zCsr; + tname.z = (char*)zCsr; tname.n = len; /* Advance zCsr to the next token. Store that token type in 'token', - ** and it's length in 'len' (to be used next iteration of this loop). + ** and its length in 'len' (to be used next iteration of this loop). */ do { zCsr += len; @@ -149,9 +155,9 @@ /* Variable tname now contains the token that is the old table-name ** in the CREATE TRIGGER statement. */ - zRet = sqlite3MPrintf("%.*s%Q%s", tname.z - zSql, zSql, + zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", ((u8*)tname.z) - zSql, zSql, zTableName, tname.z+tname.n); - sqlite3_result_text(context, zRet, -1, sqlite3FreeX); + sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); } } #endif /* !SQLITE_OMIT_TRIGGER */ @@ -160,22 +166,12 @@ ** Register built-in functions used to help implement ALTER TABLE */ void sqlite3AlterFunctions(sqlite3 *db){ - static const struct { - char *zName; - signed char nArg; - void (*xFunc)(sqlite3_context*,int,sqlite3_value **); - } aFuncs[] = { - { "sqlite_rename_table", 2, renameTableFunc}, + sqlite3CreateFunc(db, "sqlite_rename_table", 2, SQLITE_UTF8, 0, + renameTableFunc, 0, 0); #ifndef SQLITE_OMIT_TRIGGER - { "sqlite_rename_trigger", 2, renameTriggerFunc}, + sqlite3CreateFunc(db, "sqlite_rename_trigger", 2, SQLITE_UTF8, 0, + renameTriggerFunc, 0, 0); #endif - }; - int i; - - for(i=0; ipSchema!=pTempSchema ){ - for( pTrig=pTab->pTrigger; pTrig; pTrig=pTrig->pNext ){ + sqlite3 *db = pParse->db; + for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ if( pTrig->pSchema==pTempSchema ){ if( !zWhere ){ - zWhere = sqlite3MPrintf("name=%Q", pTrig->name); + zWhere = sqlite3MPrintf(db, "name=%Q", pTrig->name); }else{ tmp = zWhere; - zWhere = sqlite3MPrintf("%s OR name=%Q", zWhere, pTrig->name); - sqliteFree(tmp); + zWhere = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, pTrig->name); + sqlite3DbFree(db, tmp); } } } @@ -228,33 +225,34 @@ #endif v = sqlite3GetVdbe(pParse); - if( !v ) return; + if( NEVER(v==0) ) return; + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); assert( iDb>=0 ); #ifndef SQLITE_OMIT_TRIGGER /* Drop any table triggers from the internal schema. */ - for(pTrig=pTab->pTrigger; pTrig; pTrig=pTrig->pNext){ + for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); assert( iTrigDb==iDb || iTrigDb==1 ); - sqlite3VdbeOp3(v, OP_DropTrigger, iTrigDb, 0, pTrig->name, 0); + sqlite3VdbeAddOp4(v, OP_DropTrigger, iTrigDb, 0, 0, pTrig->name, 0); } #endif /* Drop the table and index from the internal schema */ - sqlite3VdbeOp3(v, OP_DropTable, iDb, 0, pTab->zName, 0); + sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); /* Reload the table, index and permanent trigger schemas. */ - zWhere = sqlite3MPrintf("tbl_name=%Q", zName); + zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName); if( !zWhere ) return; - sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, zWhere, P3_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC); #ifndef SQLITE_OMIT_TRIGGER /* Now, if the table is not stored in the temp database, reload any temp ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined. */ if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ - sqlite3VdbeOp3(v, OP_ParseSchema, 1, 0, zWhere, P3_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_ParseSchema, 1, 0, 0, zWhere, P4_DYNAMIC); } #endif } @@ -281,16 +279,17 @@ #endif int isVirtualRename = 0; /* True if this is a v-table with an xRename() */ - if( sqlite3MallocFailed() ) goto exit_rename_table; + if( NEVER(db->mallocFailed) ) goto exit_rename_table; assert( pSrc->nSrc==1 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); - pTab = sqlite3LocateTable(pParse, pSrc->a[0].zName, pSrc->a[0].zDatabase); + pTab = sqlite3LocateTable(pParse, 0, pSrc->a[0].zName, pSrc->a[0].zDatabase); if( !pTab ) goto exit_rename_table; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); zDb = db->aDb[iDb].zName; /* Get a NULL terminated version of the new table name. */ - zName = sqlite3NameFromToken(pName); + zName = sqlite3NameFromToken(db, pName); if( !zName ) goto exit_rename_table; /* Check that a table or index named 'zName' does not already exist @@ -305,7 +304,9 @@ /* Make sure it is not a system table being altered, or a reserved name ** that the table is being renamed to. */ - if( strlen(pTab->zName)>6 && 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) ){ + if( sqlite3Strlen30(pTab->zName)>6 + && 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) + ){ sqlite3ErrorMsg(pParse, "table %s may not be altered", pTab->zName); goto exit_rename_table; } @@ -313,6 +314,13 @@ goto exit_rename_table; } +#ifndef SQLITE_OMIT_VIEW + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName); + goto exit_rename_table; + } +#endif + #ifndef SQLITE_OMIT_AUTHORIZATION /* Invoke the authorization callback. */ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ @@ -339,7 +347,7 @@ goto exit_rename_table; } sqlite3BeginWriteOperation(pParse, isVirtualRename, iDb); - sqlite3ChangeCookie(db, v, iDb); + sqlite3ChangeCookie(pParse, iDb); /* If this is a virtual table, invoke the xRename() function if ** one is defined. The xRename() callback will modify the names @@ -348,8 +356,9 @@ */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( isVirtualRename ){ - sqlite3VdbeOp3(v, OP_String8, 0, 0, zName, 0); - sqlite3VdbeOp3(v, OP_VRename, 0, 0, (const char*)pTab->pVtab, P3_VTAB); + int i = ++pParse->nMem; + sqlite3VdbeAddOp4(v, OP_String8, 0, i, 0, zName, 0); + sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pTab->pVtab, P4_VTAB); } #endif @@ -371,7 +380,7 @@ "name = CASE " "WHEN type='table' THEN %Q " "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN " - "'sqlite_autoindex_' || %Q || substr(name,%d+18,10) " + "'sqlite_autoindex_' || %Q || substr(name,%d+18) " "ELSE name END " "WHERE tbl_name=%Q AND " "(type='table' OR type='index' OR type='trigger');", @@ -388,7 +397,7 @@ */ if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){ sqlite3NestedParse(pParse, - "UPDATE %Q.sqlite_sequence set name = %Q WHERE name = %Q", + "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q", zDb, zName, pTab->zName); } #endif @@ -404,7 +413,7 @@ "sql = sqlite_rename_trigger(sql, %Q), " "tbl_name = %Q " "WHERE %s;", zName, zName, zWhere); - sqliteFree(zWhere); + sqlite3DbFree(db, zWhere); } #endif @@ -412,12 +421,37 @@ reloadTableSchema(pParse, pTab, zName); exit_rename_table: - sqlite3SrcListDelete(pSrc); - sqliteFree(zName); + sqlite3SrcListDelete(db, pSrc); + sqlite3DbFree(db, zName); } /* +** Generate code to make sure the file format number is at least minFormat. +** The generated code will increase the file format number if necessary. +*/ +void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minFormat){ + Vdbe *v; + v = sqlite3GetVdbe(pParse); + /* The VDBE should have been allocated before this routine is called. + ** If that allocation failed, we would have quit before reaching this + ** point */ + if( ALWAYS(v) ){ + int r1 = sqlite3GetTempReg(pParse); + int r2 = sqlite3GetTempReg(pParse); + int j1; + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp2(v, OP_Integer, minFormat, r2); + j1 = sqlite3VdbeAddOp3(v, OP_Ge, r2, 0, r1); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, r2); + sqlite3VdbeJumpHere(v, j1); + sqlite3ReleaseTempReg(pParse, r1); + sqlite3ReleaseTempReg(pParse, r2); + } +} + +/* ** This function is called after an "ALTER TABLE ... ADD" statement ** has been parsed. Argument pColDef contains the text of the new ** column definition. @@ -434,17 +468,20 @@ char *zCol; /* Null-terminated column definition */ Column *pCol; /* The new column */ Expr *pDflt; /* Default value for the new column */ + sqlite3 *db; /* The database connection; */ - if( pParse->nErr ) return; + db = pParse->db; + if( pParse->nErr || db->mallocFailed ) return; pNew = pParse->pNewTable; assert( pNew ); - iDb = sqlite3SchemaToIndex(pParse->db, pNew->pSchema); - zDb = pParse->db->aDb[iDb].zName; - zTab = pNew->zName; + assert( sqlite3BtreeHoldsAllMutexes(db) ); + iDb = sqlite3SchemaToIndex(db, pNew->pSchema); + zDb = db->aDb[iDb].zName; + zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */ pCol = &pNew->aCol[pNew->nCol-1]; pDflt = pCol->pDflt; - pTab = sqlite3FindTable(pParse->db, zTab, zDb); + pTab = sqlite3FindTable(db, zTab, zDb); assert( pTab ); #ifndef SQLITE_OMIT_AUTHORIZATION @@ -485,8 +522,8 @@ */ if( pDflt ){ sqlite3_value *pVal; - if( sqlite3ValueFromExpr(pDflt, SQLITE_UTF8, SQLITE_AFF_NONE, &pVal) ){ - /* malloc() has failed */ + if( sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_NONE, &pVal) ){ + db->mallocFailed = 1; return; } if( !pVal ){ @@ -497,20 +534,20 @@ } /* Modify the CREATE TABLE statement. */ - zCol = sqliteStrNDup((char*)pColDef->z, pColDef->n); + zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); if( zCol ){ char *zEnd = &zCol[pColDef->n-1]; - while( (zEnd>zCol && *zEnd==';') || isspace(*(unsigned char *)zEnd) ){ + while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ *zEnd-- = '\0'; } sqlite3NestedParse(pParse, - "UPDATE %Q.%s SET " - "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d,length(sql)) " + "UPDATE \"%w\".%s SET " + "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " "WHERE type = 'table' AND name = %Q", zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1, zTab ); - sqliteFree(zCol); + sqlite3DbFree(db, zCol); } /* If the default value of the new column is NULL, then set the file @@ -545,11 +582,13 @@ int iDb; int i; int nAlloc; + sqlite3 *db = pParse->db; /* Look up the table being altered. */ assert( pParse->pNewTable==0 ); - if( sqlite3MallocFailed() ) goto exit_begin_add_column; - pTab = sqlite3LocateTable(pParse, pSrc->a[0].zName, pSrc->a[0].zDatabase); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + if( db->mallocFailed ) goto exit_begin_add_column; + pTab = sqlite3LocateTable(pParse, 0, pSrc->a[0].zName, pSrc->a[0].zDatabase); if( !pTab ) goto exit_begin_add_column; #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -566,33 +605,40 @@ } assert( pTab->addColOffset>0 ); - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); /* Put a copy of the Table struct in Parse.pNewTable for the - ** sqlite3AddColumn() function and friends to modify. + ** sqlite3AddColumn() function and friends to modify. But modify + ** the name by adding an "sqlite_altertab_" prefix. By adding this + ** prefix, we insure that the name will not collide with an existing + ** table because user table are not allowed to have the "sqlite_" + ** prefix on their name. */ - pNew = (Table *)sqliteMalloc(sizeof(Table)); + pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); if( !pNew ) goto exit_begin_add_column; pParse->pNewTable = pNew; pNew->nRef = 1; + pNew->dbMem = pTab->dbMem; pNew->nCol = pTab->nCol; assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); - pNew->aCol = (Column *)sqliteMalloc(sizeof(Column)*nAlloc); - pNew->zName = sqliteStrDup(pTab->zName); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); if( !pNew->aCol || !pNew->zName ){ + db->mallocFailed = 1; goto exit_begin_add_column; } memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; - pCol->zName = sqliteStrDup(pCol->zName); + pCol->zName = sqlite3DbStrDup(db, pCol->zName); pCol->zColl = 0; pCol->zType = 0; pCol->pDflt = 0; + pCol->zDflt = 0; } - pNew->pSchema = pParse->db->aDb[iDb].pSchema; + pNew->pSchema = db->aDb[iDb].pSchema; pNew->addColOffset = pTab->addColOffset; pNew->nRef = 1; @@ -600,10 +646,10 @@ sqlite3BeginWriteOperation(pParse, 0, iDb); v = sqlite3GetVdbe(pParse); if( !v ) goto exit_begin_add_column; - sqlite3ChangeCookie(pParse->db, v, iDb); + sqlite3ChangeCookie(pParse, iDb); exit_begin_add_column: - sqlite3SrcListDelete(pSrc); + sqlite3SrcListDelete(db, pSrc); return; } #endif /* SQLITE_ALTER_TABLE */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/analyze.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/analyze.c --- sqlite3-3.4.2/src/analyze.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/analyze.c 2009-06-25 12:23:18.000000000 +0100 @@ -11,7 +11,7 @@ ************************************************************************* ** This file contains code associated with the ANALYZE command. ** -** @(#) $Id: analyze.c,v 1.19 2007/06/20 13:37:31 drh Exp $ +** @(#) $Id: analyze.c,v 1.52 2009/04/16 17:45:48 drh Exp $ */ #ifndef SQLITE_OMIT_ANALYZE #include "sqliteInt.h" @@ -33,21 +33,25 @@ sqlite3 *db = pParse->db; Db *pDb; int iRootPage; + u8 createStat1 = 0; Table *pStat; Vdbe *v = sqlite3GetVdbe(pParse); if( v==0 ) return; + assert( sqlite3BtreeHoldsAllMutexes(db) ); + assert( sqlite3VdbeDb(v)==db ); pDb = &db->aDb[iDb]; if( (pStat = sqlite3FindTable(db, "sqlite_stat1", pDb->zName))==0 ){ /* The sqlite_stat1 tables does not exist. Create it. ** Note that a side-effect of the CREATE TABLE statement is to leave - ** the rootpage of the new table on the top of the stack. This is + ** the rootpage of the new table in register pParse->regRoot. This is ** important because the OpenWrite opcode below will be needing it. */ sqlite3NestedParse(pParse, "CREATE TABLE %Q.sqlite_stat1(tbl,idx,stat)", pDb->zName ); - iRootPage = 0; /* Cause rootpage to be taken from top of stack */ + iRootPage = pParse->regRoot; + createStat1 = 1; /* Cause rootpage to be taken from top of stack */ }else if( zWhere ){ /* The sqlite_stat1 table exists. Delete all entries associated with ** the table zWhere. */ @@ -59,7 +63,7 @@ }else{ /* The sqlite_stat1 table already exists. Delete all rows. */ iRootPage = pStat->tnum; - sqlite3VdbeAddOp(v, OP_Clear, pStat->tnum, iDb); + sqlite3VdbeAddOp2(v, OP_Clear, pStat->tnum, iDb); } /* Open the sqlite_stat1 table for writing. Unless it was created @@ -67,12 +71,12 @@ ** If this vdbe did create the sqlite_stat1 table, then it must have ** already obtained a schema-lock, making the write-lock redundant. */ - if( iRootPage>0 ){ + if( !createStat1 ){ sqlite3TableLock(pParse, iDb, iRootPage, 1, "sqlite_stat1"); } - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - sqlite3VdbeAddOp(v, OP_OpenWrite, iStatCur, iRootPage); - sqlite3VdbeAddOp(v, OP_SetNumColumns, iStatCur, 3); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iStatCur, iRootPage, iDb); + sqlite3VdbeChangeP4(v, -1, (char *)3, P4_INT32); + sqlite3VdbeChangeP5(v, createStat1); } /* @@ -82,11 +86,11 @@ static void analyzeOneTable( Parse *pParse, /* Parser context */ Table *pTab, /* Table whose indices are to be analyzed */ - int iStatCur, /* Cursor that writes to the sqlite_stat1 table */ + int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ int iMem /* Available memory locations begin here */ ){ Index *pIdx; /* An index to being analyzed */ - int iIdxCur; /* Cursor number for index being analyzed */ + int iIdxCur; /* Index of VdbeCursor for index being analyzed */ int nCol; /* Number of columns in the index */ Vdbe *v; /* The virtual machine being built up */ int i; /* Loop counter */ @@ -96,11 +100,11 @@ int iDb; /* Index of database containing pTab */ v = sqlite3GetVdbe(pParse); - if( v==0 || pTab==0 || pTab->pIndex==0 ){ + if( v==0 || NEVER(pTab==0) || pTab->pIndex==0 ){ /* Do no analysis for tables that have no indices */ return; } - + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); assert( iDb>=0 ); #ifndef SQLITE_OMIT_AUTHORIZATION @@ -113,22 +117,29 @@ /* Establish a read-lock on the table at the shared-cache level. */ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - iIdxCur = pParse->nTab; + iIdxCur = pParse->nTab++; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); + int regFields; /* Register block for building records */ + int regRec; /* Register holding completed record */ + int regTemp; /* Temporary use register */ + int regCol; /* Content of a column from the table being analyzed */ + int regRowid; /* Rowid for the inserted record */ + int regF2; /* Open a cursor to the index to be analyzed */ assert( iDb==sqlite3SchemaToIndex(pParse->db, pIdx->pSchema) ); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - VdbeComment((v, "# %s", pIdx->zName)); - sqlite3VdbeOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, - (char *)pKey, P3_KEYINFO_HANDOFF); nCol = pIdx->nColumn; - if( iMem+nCol*2>=pParse->nMem ){ - pParse->nMem = iMem+nCol*2+1; + sqlite3VdbeAddOp4(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb, + (char *)pKey, P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pIdx->zName)); + regFields = iMem+nCol*2; + regTemp = regRowid = regCol = regFields+3; + regRec = regCol+1; + if( regRec>pParse->nMem ){ + pParse->nMem = regRec; } - sqlite3VdbeAddOp(v, OP_SetNumColumns, iIdxCur, nCol+1); /* Memory cells are used as follows: ** @@ -144,40 +155,40 @@ ** are initialized to NULL. */ for(i=0; i<=nCol; i++){ - sqlite3VdbeAddOp(v, OP_MemInt, 0, iMem+i); + sqlite3VdbeAddOp2(v, OP_Integer, 0, iMem+i); } for(i=0; i0 then it is always the case the D>0 so division by zero ** is never possible. */ - sqlite3VdbeAddOp(v, OP_MemLoad, iMem, 0); - addr = sqlite3VdbeAddOp(v, OP_IfNot, 0, 0); - sqlite3VdbeAddOp(v, OP_NewRowid, iStatCur, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pIdx->zName, 0); - sqlite3VdbeAddOp(v, OP_MemLoad, iMem, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, " ", 0); + addr = sqlite3VdbeAddOp1(v, OP_IfNot, iMem); + sqlite3VdbeAddOp4(v, OP_String8, 0, regFields, 0, pTab->zName, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, regFields+1, 0, pIdx->zName, 0); + regF2 = regFields+2; + sqlite3VdbeAddOp2(v, OP_SCopy, iMem, regF2); for(i=0; inTab++; openStatTable(pParse, iDb, iStatCur, 0); - iMem = pParse->nMem; + iMem = pParse->nMem+1; for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ Table *pTab = (Table*)sqliteHashData(k); analyzeOneTable(pParse, pTab, iStatCur, iMem); @@ -258,11 +264,12 @@ int iStatCur; assert( pTab!=0 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); iStatCur = pParse->nTab++; openStatTable(pParse, iDb, iStatCur, pTab->zName); - analyzeOneTable(pParse, pTab, iStatCur, pParse->nMem); + analyzeOneTable(pParse, pTab, iStatCur, pParse->nMem+1); loadAnalysis(pParse, iDb); } @@ -288,27 +295,31 @@ /* Read the database schema. If an error occurs, leave an error message ** and code in pParse and return NULL. */ + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ return; } + assert( pName2!=0 || pName1==0 ); if( pName1==0 ){ /* Form 1: Analyze everything */ for(i=0; inDb; i++){ if( i==1 ) continue; /* Do not analyze the TEMP database */ analyzeDatabase(pParse, i); } - }else if( pName2==0 || pName2->n==0 ){ + }else if( pName2->n==0 ){ /* Form 2: Analyze the database or table named */ iDb = sqlite3FindDb(db, pName1); if( iDb>=0 ){ analyzeDatabase(pParse, iDb); }else{ - z = sqlite3NameFromToken(pName1); - pTab = sqlite3LocateTable(pParse, z, 0); - sqliteFree(z); - if( pTab ){ - analyzeTable(pParse, pTab); + z = sqlite3NameFromToken(db, pName1); + if( z ){ + pTab = sqlite3LocateTable(pParse, 0, z, 0); + sqlite3DbFree(db, z); + if( pTab ){ + analyzeTable(pParse, pTab); + } } } }else{ @@ -316,10 +327,10 @@ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName); if( iDb>=0 ){ zDb = db->aDb[iDb].zName; - z = sqlite3NameFromToken(pTableName); + z = sqlite3NameFromToken(db, pTableName); if( z ){ - pTab = sqlite3LocateTable(pParse, z, zDb); - sqliteFree(z); + pTab = sqlite3LocateTable(pParse, 0, z, zDb); + sqlite3DbFree(db, z); if( pTab ){ analyzeTable(pParse, pTab); } @@ -345,7 +356,7 @@ ** argv[0] = name of the index ** argv[1] = results of analysis - on integer for each column */ -static int analysisLoader(void *pData, int argc, char **argv, char **azNotUsed){ +static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){ analysisInfo *pInfo = (analysisInfo*)pData; Index *pIndex; int i, c; @@ -353,6 +364,8 @@ const char *z; assert( argc==2 ); + UNUSED_PARAMETER2(NotUsed, argc); + if( argv==0 || argv[0]==0 || argv[1]==0 ){ return 0; } @@ -382,6 +395,10 @@ char *zSql; int rc; + assert( iDb>=0 && iDbnDb ); + assert( db->aDb[iDb].pBt!=0 ); + assert( sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + /* Clear any prior statistics */ for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ Index *pIdx = sqliteHashData(i); @@ -397,12 +414,17 @@ /* Load new statistics out of the sqlite_stat1 table */ - zSql = sqlite3MPrintf("SELECT idx, stat FROM %Q.sqlite_stat1", + zSql = sqlite3MPrintf(db, "SELECT idx, stat FROM %Q.sqlite_stat1", sInfo.zDatabase); - sqlite3SafetyOff(db); - rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); - sqlite3SafetyOn(db); - sqliteFree(zSql); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + (void)sqlite3SafetyOff(db); + rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); + (void)sqlite3SafetyOn(db); + sqlite3DbFree(db, zSql); + if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; + } return rc; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/attach.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/attach.c --- sqlite3-3.4.2/src/attach.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/attach.c 2009-06-25 12:35:50.000000000 +0100 @@ -11,7 +11,7 @@ ************************************************************************* ** This file contains code used to implement the ATTACH and DETACH commands. ** -** $Id: attach.c,v 1.60 2007/05/09 20:31:30 drh Exp $ +** $Id: attach.c,v 1.93 2009/05/31 21:21:41 drh Exp $ */ #include "sqliteInt.h" @@ -39,9 +39,9 @@ int rc = SQLITE_OK; if( pExpr ){ if( pExpr->op!=TK_ID ){ - rc = sqlite3ExprResolveNames(pName, pExpr); + rc = sqlite3ResolveExprNames(pName, pExpr); if( rc==SQLITE_OK && !sqlite3ExprIsConstant(pExpr) ){ - sqlite3ErrorMsg(pName->pParse, "invalid name: \"%T\"", &pExpr->span); + sqlite3ErrorMsg(pName->pParse, "invalid name: \"%s\"", pExpr->u.zToken); return SQLITE_ERROR; } }else{ @@ -64,18 +64,19 @@ */ static void attachFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ int i; int rc = 0; - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); const char *zName; const char *zFile; Db *aNew; - char zErr[128]; char *zErrDyn = 0; + UNUSED_PARAMETER(NotUsed); + zFile = (const char *)sqlite3_value_text(argv[0]); zName = (const char *)sqlite3_value_text(argv[1]); if( zFile==0 ) zFile = ""; @@ -87,22 +88,21 @@ ** * Transaction currently open ** * Specified database name already being used. */ - if( db->nDb>=SQLITE_MAX_ATTACHED+2 ){ - sqlite3_snprintf( - sizeof(zErr), zErr, "too many attached databases - max %d", - SQLITE_MAX_ATTACHED + if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ + zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", + db->aLimit[SQLITE_LIMIT_ATTACHED] ); goto attach_error; } if( !db->autoCommit ){ - sqlite3_snprintf(sizeof(zErr), zErr, - "cannot ATTACH database within transaction"); + zErrDyn = sqlite3MPrintf(db, "cannot ATTACH database within transaction"); goto attach_error; } for(i=0; inDb; i++){ char *z = db->aDb[i].zName; - if( z && zName && sqlite3StrICmp(z, zName)==0 ){ - sqlite3_snprintf(sizeof(zErr), zErr, "database %s is already in use", zName); + assert( z && zName ); + if( sqlite3StrICmp(z, zName)==0 ){ + zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); goto attach_error; } } @@ -111,38 +111,43 @@ ** hash tables. */ if( db->aDb==db->aDbStatic ){ - aNew = sqliteMalloc( sizeof(db->aDb[0])*3 ); - if( aNew==0 ){ - return; - } + aNew = sqlite3DbMallocRaw(db, sizeof(db->aDb[0])*3 ); + if( aNew==0 ) return; memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); }else{ - aNew = sqliteRealloc(db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); - if( aNew==0 ){ - return; - } + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + if( aNew==0 ) return; } db->aDb = aNew; - aNew = &db->aDb[db->nDb++]; + aNew = &db->aDb[db->nDb]; memset(aNew, 0, sizeof(*aNew)); /* Open the database file. If the btree is successfully opened, use ** it to obtain the database schema. At this point the schema may ** or may not be initialised. */ - rc = sqlite3BtreeFactory(db, zFile, 0, SQLITE_DEFAULT_CACHE_SIZE, &aNew->pBt); - if( rc==SQLITE_OK ){ - aNew->pSchema = sqlite3SchemaGet(aNew->pBt); + rc = sqlite3BtreeFactory(db, zFile, 0, SQLITE_DEFAULT_CACHE_SIZE, + db->openFlags | SQLITE_OPEN_MAIN_DB, + &aNew->pBt); + db->nDb++; + if( rc==SQLITE_CONSTRAINT ){ + rc = SQLITE_ERROR; + zErrDyn = sqlite3MPrintf(db, "database is already attached"); + }else if( rc==SQLITE_OK ){ + Pager *pPager; + aNew->pSchema = sqlite3SchemaGet(db, aNew->pBt); if( !aNew->pSchema ){ rc = SQLITE_NOMEM; }else if( aNew->pSchema->file_format && aNew->pSchema->enc!=ENC(db) ){ - sqlite3_snprintf(sizeof(zErr), zErr, + zErrDyn = sqlite3MPrintf(db, "attached databases must use the same text encoding as main database"); - goto attach_error; + rc = SQLITE_ERROR; } - sqlite3PagerLockingMode(sqlite3BtreePager(aNew->pBt), db->dfltLockMode); + pPager = sqlite3BtreePager(aNew->pBt); + sqlite3PagerLockingMode(pPager, db->dfltLockMode); + sqlite3PagerJournalMode(pPager, db->dfltJournalMode); } - aNew->zName = sqliteStrDup(zName); + aNew->zName = sqlite3DbStrDup(db, zName); aNew->safety_level = 3; #if SQLITE_HAS_CODEC @@ -155,7 +160,7 @@ switch( t ){ case SQLITE_INTEGER: case SQLITE_FLOAT: - zErrDyn = sqliteStrDup("Invalid key value"); + zErrDyn = sqlite3DbStrDup(db, "Invalid key value"); rc = SQLITE_ERROR; break; @@ -181,9 +186,11 @@ ** we found it. */ if( rc==SQLITE_OK ){ - sqlite3SafetyOn(db); + (void)sqlite3SafetyOn(db); + sqlite3BtreeEnterAll(db); rc = sqlite3Init(db, &zErrDyn); - sqlite3SafetyOff(db); + sqlite3BtreeLeaveAll(db); + (void)sqlite3SafetyOff(db); } if( rc ){ int iDb = db->nDb - 1; @@ -195,11 +202,12 @@ } sqlite3ResetInternalSchema(db, 0); db->nDb = iDb; - if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); - sqlite3_snprintf(sizeof(zErr),zErr, "out of memory"); - }else{ - sqlite3_snprintf(sizeof(zErr),zErr, "unable to open database: %s", zFile); + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; + sqlite3DbFree(db, zErrDyn); + zErrDyn = sqlite3MPrintf(db, "out of memory"); + }else if( zErrDyn==0 ){ + zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); } goto attach_error; } @@ -210,11 +218,9 @@ /* Return an error if we get here */ if( zErrDyn ){ sqlite3_result_error(context, zErrDyn, -1); - sqliteFree(zErrDyn); - }else{ - zErr[sizeof(zErr)-1] = 0; - sqlite3_result_error(context, zErr, -1); + sqlite3DbFree(db, zErrDyn); } + if( rc ) sqlite3_result_error_code(context, rc); } /* @@ -227,15 +233,17 @@ */ static void detachFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ const char *zName = (const char *)sqlite3_value_text(argv[0]); - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); int i; Db *pDb = 0; char zErr[128]; + UNUSED_PARAMETER(NotUsed); + if( zName==0 ) zName = ""; for(i=0; inDb; i++){ pDb = &db->aDb[i]; @@ -256,7 +264,7 @@ "cannot DETACH database within transaction"); goto detach_error; } - if( sqlite3BtreeIsInReadTrans(pDb->pBt) ){ + if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){ sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); goto detach_error; } @@ -278,8 +286,7 @@ static void codeAttach( Parse *pParse, /* The parser context */ int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ - const char *zFunc, /* Either "sqlite_attach" or "sqlite_detach */ - int nFunc, /* Number of args to pass to zFunc */ + FuncDef *pFunc, /* FuncDef wrapper for detachFunc() or attachFunc() */ Expr *pAuthArg, /* Expression to pass to authorization callback */ Expr *pFilename, /* Name of database file */ Expr *pDbname, /* Name of the database to use internally */ @@ -288,23 +295,8 @@ int rc; NameContext sName; Vdbe *v; - FuncDef *pFunc; sqlite3* db = pParse->db; - -#ifndef SQLITE_OMIT_AUTHORIZATION - assert( sqlite3MallocFailed() || pAuthArg ); - if( pAuthArg ){ - char *zAuthArg = sqlite3NameFromToken(&pAuthArg->span); - if( !zAuthArg ){ - goto attach_end; - } - rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); - sqliteFree(zAuthArg); - if(rc!=SQLITE_OK ){ - goto attach_end; - } - } -#endif /* SQLITE_OMIT_AUTHORIZATION */ + int regArgs; memset(&sName, 0, sizeof(NameContext)); sName.pParse = pParse; @@ -318,28 +310,44 @@ goto attach_end; } +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pAuthArg ){ + char *zAuthArg = pAuthArg->u.zToken; + if( NEVER(zAuthArg==0) ){ + goto attach_end; + } + rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); + if(rc!=SQLITE_OK ){ + goto attach_end; + } + } +#endif /* SQLITE_OMIT_AUTHORIZATION */ + + v = sqlite3GetVdbe(pParse); - sqlite3ExprCode(pParse, pFilename); - sqlite3ExprCode(pParse, pDbname); - sqlite3ExprCode(pParse, pKey); + regArgs = sqlite3GetTempRange(pParse, 4); + sqlite3ExprCode(pParse, pFilename, regArgs); + sqlite3ExprCode(pParse, pDbname, regArgs+1); + sqlite3ExprCode(pParse, pKey, regArgs+2); - assert( v || sqlite3MallocFailed() ); + assert( v || db->mallocFailed ); if( v ){ - sqlite3VdbeAddOp(v, OP_Function, 0, nFunc); - pFunc = sqlite3FindFunction(db, zFunc, strlen(zFunc), nFunc, SQLITE_UTF8,0); - sqlite3VdbeChangeP3(v, -1, (char *)pFunc, P3_FUNCDEF); + sqlite3VdbeAddOp3(v, OP_Function, 0, regArgs+3-pFunc->nArg, regArgs+3); + assert( pFunc->nArg==-1 || (pFunc->nArg&0xff)==pFunc->nArg ); + sqlite3VdbeChangeP5(v, (u8)(pFunc->nArg)); + sqlite3VdbeChangeP4(v, -1, (char *)pFunc, P4_FUNCDEF); /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this ** statement only). For DETACH, set it to false (expire all existing ** statements). */ - sqlite3VdbeAddOp(v, OP_Expire, (type==SQLITE_ATTACH), 0); + sqlite3VdbeAddOp1(v, OP_Expire, (type==SQLITE_ATTACH)); } attach_end: - sqlite3ExprDelete(pFilename); - sqlite3ExprDelete(pDbname); - sqlite3ExprDelete(pKey); + sqlite3ExprDelete(db, pFilename); + sqlite3ExprDelete(db, pDbname); + sqlite3ExprDelete(db, pKey); } /* @@ -348,7 +356,19 @@ ** DETACH pDbname */ void sqlite3Detach(Parse *pParse, Expr *pDbname){ - codeAttach(pParse, SQLITE_DETACH, "sqlite_detach", 1, pDbname, 0, 0, pDbname); + static FuncDef detach_func = { + 1, /* nArg */ + SQLITE_UTF8, /* iPrefEnc */ + 0, /* flags */ + 0, /* pUserData */ + 0, /* pNext */ + detachFunc, /* xFunc */ + 0, /* xStep */ + 0, /* xFinalize */ + "sqlite_detach", /* zName */ + 0 /* pHash */ + }; + codeAttach(pParse, SQLITE_DETACH, &detach_func, pDbname, 0, 0, pDbname); } /* @@ -357,22 +377,23 @@ ** ATTACH p AS pDbname KEY pKey */ void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ - codeAttach(pParse, SQLITE_ATTACH, "sqlite_attach", 3, p, p, pDbname, pKey); + static FuncDef attach_func = { + 3, /* nArg */ + SQLITE_UTF8, /* iPrefEnc */ + 0, /* flags */ + 0, /* pUserData */ + 0, /* pNext */ + attachFunc, /* xFunc */ + 0, /* xStep */ + 0, /* xFinalize */ + "sqlite_attach", /* zName */ + 0 /* pHash */ + }; + codeAttach(pParse, SQLITE_ATTACH, &attach_func, p, p, pDbname, pKey); } #endif /* SQLITE_OMIT_ATTACH */ /* -** Register the functions sqlite_attach and sqlite_detach. -*/ -void sqlite3AttachFunctions(sqlite3 *db){ -#ifndef SQLITE_OMIT_ATTACH - static const int enc = SQLITE_UTF8; - sqlite3CreateFunc(db, "sqlite_attach", 3, enc, db, attachFunc, 0, 0); - sqlite3CreateFunc(db, "sqlite_detach", 1, enc, db, detachFunc, 0, 0); -#endif -} - -/* ** Initialize a DbFixer structure. This routine must be called prior ** to passing the structure to one of the sqliteFixAAAA() routines below. ** @@ -388,7 +409,7 @@ ){ sqlite3 *db; - if( iDb<0 || iDb==1 ) return 0; + if( NEVER(iDb<0) || iDb==1 ) return 0; db = pParse->db; assert( db->nDb>iDb ); pFix->pParse = pParse; @@ -420,11 +441,11 @@ const char *zDb; struct SrcList_item *pItem; - if( pList==0 ) return 0; + if( NEVER(pList==0) ) return 0; zDb = pFix->zDb; for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->zDatabase==0 ){ - pItem->zDatabase = sqliteStrDup(zDb); + pItem->zDatabase = sqlite3DbStrDup(pFix->pParse->db, zDb); }else if( sqlite3StrICmp(pItem->zDatabase,zDb)!=0 ){ sqlite3ErrorMsg(pFix->pParse, "%s %T cannot reference objects in database %s", @@ -465,11 +486,11 @@ Expr *pExpr /* The expression to be fixed to one database */ ){ while( pExpr ){ - if( sqlite3FixSelect(pFix, pExpr->pSelect) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pExpr->pList) ){ - return 1; + if( ExprHasAnyProperty(pExpr, EP_TokenOnly) ) break; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; + }else{ + if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; } if( sqlite3FixExpr(pFix, pExpr->pRight) ){ return 1; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/auth.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/auth.c --- sqlite3-3.4.2/src/auth.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/auth.c 2009-06-25 12:24:38.000000000 +0100 @@ -14,7 +14,7 @@ ** systems that do not need this facility may omit it by recompiling ** the library with -DSQLITE_OMIT_AUTHORIZATION=1 ** -** $Id: auth.c,v 1.26 2007/05/14 11:34:47 drh Exp $ +** $Id: auth.c,v 1.31 2009/05/04 18:01:40 drh Exp $ */ #include "sqliteInt.h" @@ -74,9 +74,11 @@ int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), void *pArg ){ + sqlite3_mutex_enter(db->mutex); db->xAuth = xAuth; db->pAuthArg = pArg; sqlite3ExpirePreparedStatements(db); + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -84,10 +86,8 @@ ** Write an error message into pParse->zErrMsg that explains that the ** user-supplied authorization function returned an illegal value. */ -static void sqliteAuthBadReturnCode(Parse *pParse, int rc){ - sqlite3ErrorMsg(pParse, "illegal return value (%d) from the " - "authorization function - should be SQLITE_OK, SQLITE_IGNORE, " - "or SQLITE_DENY", rc); +static void sqliteAuthBadReturnCode(Parse *pParse){ + sqlite3ErrorMsg(pParse, "authorizer malfunction"); pParse->rc = SQLITE_ERROR; } @@ -103,11 +103,12 @@ void sqlite3AuthRead( Parse *pParse, /* The parser context */ Expr *pExpr, /* The expression to check authorization on */ + Schema *pSchema, /* The schema of the expression */ SrcList *pTabList /* All table that pExpr might refer to */ ){ sqlite3 *db = pParse->db; int rc; - Table *pTab; /* The table being read */ + Table *pTab = 0; /* The table being read */ const char *zCol; /* Name of the column of the table */ int iSrc; /* Index in pTabList->a[] of table being read */ const char *zDBase; /* Name of database being accessed */ @@ -115,28 +116,30 @@ int iDb; /* The index of the database the expression refers to */ if( db->xAuth==0 ) return; - if( pExpr->op!=TK_COLUMN ) return; - iDb = sqlite3SchemaToIndex(pParse->db, pExpr->pSchema); + assert( pExpr->op==TK_COLUMN ); + iDb = sqlite3SchemaToIndex(pParse->db, pSchema); if( iDb<0 ){ /* An attempt to read a column out of a subquery or other ** temporary table. */ return; } - for(iSrc=0; pTabList && iSrcnSrc; iSrc++){ - if( pExpr->iTable==pTabList->a[iSrc].iCursor ) break; - } - if( iSrc>=0 && pTabList && iSrcnSrc ){ + if( pTabList ){ + for(iSrc=0; ALWAYS(iSrcnSrc); iSrc++){ + if( pExpr->iTable==pTabList->a[iSrc].iCursor ) break; + } + assert( iSrcnSrc ); pTab = pTabList->a[iSrc].pTab; - }else if( (pStack = pParse->trigStack)!=0 ){ - /* This must be an attempt to read the NEW or OLD pseudo-tables - ** of a trigger. - */ - assert( pExpr->iTable==pStack->newIdx || pExpr->iTable==pStack->oldIdx ); - pTab = pStack->pTab; }else{ - return; + pStack = pParse->trigStack; + if( ALWAYS(pStack) ){ + /* This must be an attempt to read the NEW or OLD pseudo-tables + ** of a trigger. + */ + assert( pExpr->iTable==pStack->newIdx || pExpr->iTable==pStack->oldIdx ); + pTab = pStack->pTab; + } } - if( pTab==0 ) return; + if( NEVER(pTab==0) ) return; if( pExpr->iColumn>=0 ){ assert( pExpr->iColumnnCol ); zCol = pTab->aCol[pExpr->iColumn].zName; @@ -161,7 +164,7 @@ } pParse->rc = SQLITE_AUTH; }else if( rc!=SQLITE_OK ){ - sqliteAuthBadReturnCode(pParse, rc); + sqliteAuthBadReturnCode(pParse); } } @@ -197,7 +200,7 @@ pParse->rc = SQLITE_AUTH; }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){ rc = SQLITE_DENY; - sqliteAuthBadReturnCode(pParse, rc); + sqliteAuthBadReturnCode(pParse); } return rc; } @@ -212,11 +215,10 @@ AuthContext *pContext, const char *zContext ){ + assert( pParse ); pContext->pParse = pParse; - if( pParse ){ - pContext->zAuthContext = pParse->zAuthContext; - pParse->zAuthContext = zContext; - } + pContext->zAuthContext = pParse->zAuthContext; + pParse->zAuthContext = zContext; } /* diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/backup.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/backup.c --- sqlite3-3.4.2/src/backup.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/backup.c 2009-06-25 12:35:50.000000000 +0100 @@ -0,0 +1,629 @@ +/* +** 2009 January 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the implementation of the sqlite3_backup_XXX() +** API functions and the related features. +** +** $Id: backup.c,v 1.17 2009/06/03 11:25:07 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include "btreeInt.h" + +/* Macro to find the minimum of two numeric values. +*/ +#ifndef MIN +# define MIN(x,y) ((x)<(y)?(x):(y)) +#endif + +/* +** Structure allocated for each backup operation. +*/ +struct sqlite3_backup { + sqlite3* pDestDb; /* Destination database handle */ + Btree *pDest; /* Destination b-tree file */ + u32 iDestSchema; /* Original schema cookie in destination */ + int bDestLocked; /* True once a write-transaction is open on pDest */ + + Pgno iNext; /* Page number of the next source page to copy */ + sqlite3* pSrcDb; /* Source database handle */ + Btree *pSrc; /* Source b-tree file */ + + int rc; /* Backup process error code */ + + /* These two variables are set by every call to backup_step(). They are + ** read by calls to backup_remaining() and backup_pagecount(). + */ + Pgno nRemaining; /* Number of pages left to copy */ + Pgno nPagecount; /* Total number of pages to copy */ + + int isAttached; /* True once backup has been registered with pager */ + sqlite3_backup *pNext; /* Next backup associated with source pager */ +}; + +/* +** THREAD SAFETY NOTES: +** +** Once it has been created using backup_init(), a single sqlite3_backup +** structure may be accessed via two groups of thread-safe entry points: +** +** * Via the sqlite3_backup_XXX() API function backup_step() and +** backup_finish(). Both these functions obtain the source database +** handle mutex and the mutex associated with the source BtShared +** structure, in that order. +** +** * Via the BackupUpdate() and BackupRestart() functions, which are +** invoked by the pager layer to report various state changes in +** the page cache associated with the source database. The mutex +** associated with the source database BtShared structure will always +** be held when either of these functions are invoked. +** +** The other sqlite3_backup_XXX() API functions, backup_remaining() and +** backup_pagecount() are not thread-safe functions. If they are called +** while some other thread is calling backup_step() or backup_finish(), +** the values returned may be invalid. There is no way for a call to +** BackupUpdate() or BackupRestart() to interfere with backup_remaining() +** or backup_pagecount(). +** +** Depending on the SQLite configuration, the database handles and/or +** the Btree objects may have their own mutexes that require locking. +** Non-sharable Btrees (in-memory databases for example), do not have +** associated mutexes. +*/ + +/* +** Return a pointer corresponding to database zDb (i.e. "main", "temp") +** in connection handle pDb. If such a database cannot be found, return +** a NULL pointer and write an error message to pErrorDb. +** +** If the "temp" database is requested, it may need to be opened by this +** function. If an error occurs while doing so, return 0 and write an +** error message to pErrorDb. +*/ +static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){ + int i = sqlite3FindDbName(pDb, zDb); + + if( i==1 ){ + Parse *pParse; + int rc = 0; + pParse = sqlite3StackAllocZero(pErrorDb, sizeof(*pParse)); + if( pParse==0 ){ + sqlite3Error(pErrorDb, SQLITE_NOMEM, "out of memory"); + rc = SQLITE_NOMEM; + }else{ + pParse->db = pDb; + if( sqlite3OpenTempDatabase(pParse) ){ + sqlite3ErrorClear(pParse); + sqlite3Error(pErrorDb, pParse->rc, "%s", pParse->zErrMsg); + rc = SQLITE_ERROR; + } + sqlite3StackFree(pErrorDb, pParse); + } + if( rc ){ + return 0; + } + } + + if( i<0 ){ + sqlite3Error(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb); + return 0; + } + + return pDb->aDb[i].pBt; +} + +/* +** Create an sqlite3_backup process to copy the contents of zSrcDb from +** connection handle pSrcDb to zDestDb in pDestDb. If successful, return +** a pointer to the new sqlite3_backup object. +** +** If an error occurs, NULL is returned and an error code and error message +** stored in database handle pDestDb. +*/ +sqlite3_backup *sqlite3_backup_init( + sqlite3* pDestDb, /* Database to write to */ + const char *zDestDb, /* Name of database within pDestDb */ + sqlite3* pSrcDb, /* Database connection to read from */ + const char *zSrcDb /* Name of database within pSrcDb */ +){ + sqlite3_backup *p; /* Value to return */ + + /* Lock the source database handle. The destination database + ** handle is not locked in this routine, but it is locked in + ** sqlite3_backup_step(). The user is required to ensure that no + ** other thread accesses the destination handle for the duration + ** of the backup operation. Any attempt to use the destination + ** database connection while a backup is in progress may cause + ** a malfunction or a deadlock. + */ + sqlite3_mutex_enter(pSrcDb->mutex); + sqlite3_mutex_enter(pDestDb->mutex); + + if( pSrcDb==pDestDb ){ + sqlite3Error( + pDestDb, SQLITE_ERROR, "source and destination must be distinct" + ); + p = 0; + }else { + /* Allocate space for a new sqlite3_backup object */ + p = (sqlite3_backup *)sqlite3_malloc(sizeof(sqlite3_backup)); + if( !p ){ + sqlite3Error(pDestDb, SQLITE_NOMEM, 0); + } + } + + /* If the allocation succeeded, populate the new object. */ + if( p ){ + memset(p, 0, sizeof(sqlite3_backup)); + p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); + p->pDest = findBtree(pDestDb, pDestDb, zDestDb); + p->pDestDb = pDestDb; + p->pSrcDb = pSrcDb; + p->iNext = 1; + p->isAttached = 0; + + if( 0==p->pSrc || 0==p->pDest ){ + /* One (or both) of the named databases did not exist. An error has + ** already been written into the pDestDb handle. All that is left + ** to do here is free the sqlite3_backup structure. + */ + sqlite3_free(p); + p = 0; + } + } + if( p ){ + p->pSrc->nBackup++; + } + + sqlite3_mutex_leave(pDestDb->mutex); + sqlite3_mutex_leave(pSrcDb->mutex); + return p; +} + +/* +** Argument rc is an SQLite error code. Return true if this error is +** considered fatal if encountered during a backup operation. All errors +** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED. +*/ +static int isFatalError(int rc){ + return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED)); +} + +/* +** Parameter zSrcData points to a buffer containing the data for +** page iSrcPg from the source database. Copy this data into the +** destination database. +*/ +static int backupOnePage(sqlite3_backup *p, Pgno iSrcPg, const u8 *zSrcData){ + Pager * const pDestPager = sqlite3BtreePager(p->pDest); + const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc); + int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest); + const int nCopy = MIN(nSrcPgsz, nDestPgsz); + const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz; + + int rc = SQLITE_OK; + i64 iOff; + + assert( p->bDestLocked ); + assert( !isFatalError(p->rc) ); + assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); + assert( zSrcData ); + + /* Catch the case where the destination is an in-memory database and the + ** page sizes of the source and destination differ. + */ + if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(sqlite3BtreePager(p->pDest)) ){ + rc = SQLITE_READONLY; + } + + /* This loop runs once for each destination page spanned by the source + ** page. For each iteration, variable iOff is set to the byte offset + ** of the destination page. + */ + for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOffpDest->pBt) ) continue; + if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg)) + && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg)) + ){ + const u8 *zIn = &zSrcData[iOff%nSrcPgsz]; + u8 *zDestData = sqlite3PagerGetData(pDestPg); + u8 *zOut = &zDestData[iOff%nDestPgsz]; + + /* Copy the data from the source page into the destination page. + ** Then clear the Btree layer MemPage.isInit flag. Both this module + ** and the pager code use this trick (clearing the first byte + ** of the page 'extra' space to invalidate the Btree layers + ** cached parse of the page). MemPage.isInit is marked + ** "MUST BE FIRST" for this purpose. + */ + memcpy(zOut, zIn, nCopy); + ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0; + } + sqlite3PagerUnref(pDestPg); + } + + return rc; +} + +/* +** If pFile is currently larger than iSize bytes, then truncate it to +** exactly iSize bytes. If pFile is not larger than iSize bytes, then +** this function is a no-op. +** +** Return SQLITE_OK if everything is successful, or an SQLite error +** code if an error occurs. +*/ +static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){ + i64 iCurrent; + int rc = sqlite3OsFileSize(pFile, &iCurrent); + if( rc==SQLITE_OK && iCurrent>iSize ){ + rc = sqlite3OsTruncate(pFile, iSize); + } + return rc; +} + +/* +** Register this backup object with the associated source pager for +** callbacks when pages are changed or the cache invalidated. +*/ +static void attachBackupObject(sqlite3_backup *p){ + sqlite3_backup **pp; + assert( sqlite3BtreeHoldsMutex(p->pSrc) ); + pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); + p->pNext = *pp; + *pp = p; + p->isAttached = 1; +} + +/* +** Copy nPage pages from the source b-tree to the destination. +*/ +int sqlite3_backup_step(sqlite3_backup *p, int nPage){ + int rc; + + sqlite3_mutex_enter(p->pSrcDb->mutex); + sqlite3BtreeEnter(p->pSrc); + if( p->pDestDb ){ + sqlite3_mutex_enter(p->pDestDb->mutex); + } + + rc = p->rc; + if( !isFatalError(rc) ){ + Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ + Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ + int ii; /* Iterator variable */ + int nSrcPage = -1; /* Size of source db in pages */ + int bCloseTrans = 0; /* True if src db requires unlocking */ + + /* If the source pager is currently in a write-transaction, return + ** SQLITE_BUSY immediately. + */ + if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ + rc = SQLITE_BUSY; + }else{ + rc = SQLITE_OK; + } + + /* Lock the destination database, if it is not locked already. */ + if( SQLITE_OK==rc && p->bDestLocked==0 + && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) + ){ + p->bDestLocked = 1; + rc = sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema); + } + + /* If there is no open read-transaction on the source database, open + ** one now. If a transaction is opened here, then it will be closed + ** before this function exits. + */ + if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ + rc = sqlite3BtreeBeginTrans(p->pSrc, 0); + bCloseTrans = 1; + } + + /* Now that there is a read-lock on the source database, query the + ** source pager for the number of pages in the database. + */ + if( rc==SQLITE_OK ){ + rc = sqlite3PagerPagecount(pSrcPager, &nSrcPage); + } + for(ii=0; (nPage<0 || iiiNext<=(Pgno)nSrcPage && !rc; ii++){ + const Pgno iSrcPg = p->iNext; /* Source page number */ + if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ + DbPage *pSrcPg; /* Source page object */ + rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg); + if( rc==SQLITE_OK ){ + rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg)); + sqlite3PagerUnref(pSrcPg); + } + } + p->iNext++; + } + if( rc==SQLITE_OK ){ + p->nPagecount = nSrcPage; + p->nRemaining = nSrcPage+1-p->iNext; + if( p->iNext>(Pgno)nSrcPage ){ + rc = SQLITE_DONE; + }else if( !p->isAttached ){ + attachBackupObject(p); + } + } + + if( rc==SQLITE_DONE ){ + const int nSrcPagesize = sqlite3BtreeGetPageSize(p->pSrc); + const int nDestPagesize = sqlite3BtreeGetPageSize(p->pDest); + int nDestTruncate; + + /* Update the schema version field in the destination database. This + ** is to make sure that the schema-version really does change in + ** the case where the source and destination databases have the + ** same schema version. + */ + sqlite3BtreeUpdateMeta(p->pDest, 1, p->iDestSchema+1); + if( p->pDestDb ){ + sqlite3ResetInternalSchema(p->pDestDb, 0); + } + + /* Set nDestTruncate to the final number of pages in the destination + ** database. The complication here is that the destination page + ** size may be different to the source page size. + ** + ** If the source page size is smaller than the destination page size, + ** round up. In this case the call to sqlite3OsTruncate() below will + ** fix the size of the file. However it is important to call + ** sqlite3PagerTruncateImage() here so that any pages in the + ** destination file that lie beyond the nDestTruncate page mark are + ** journalled by PagerCommitPhaseOne() before they are destroyed + ** by the file truncation. + */ + if( nSrcPagesizepDest->pBt) ){ + nDestTruncate--; + } + }else{ + nDestTruncate = nSrcPage * (nSrcPagesize/nDestPagesize); + } + sqlite3PagerTruncateImage(pDestPager, nDestTruncate); + + if( nSrcPagesize= iSize || ( + nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) + && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+nDestPagesize + )); + if( SQLITE_OK==(rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1)) + && SQLITE_OK==(rc = backupTruncateFile(pFile, iSize)) + && SQLITE_OK==(rc = sqlite3PagerSync(pDestPager)) + ){ + i64 iOff; + i64 iEnd = MIN(PENDING_BYTE + nDestPagesize, iSize); + for( + iOff=PENDING_BYTE+nSrcPagesize; + rc==SQLITE_OK && iOffpDest)) + ){ + rc = SQLITE_DONE; + } + } + + /* If bCloseTrans is true, then this function opened a read transaction + ** on the source database. Close the read transaction here. There is + ** no need to check the return values of the btree methods here, as + ** "committing" a read-only transaction cannot fail. + */ + if( bCloseTrans ){ + TESTONLY( int rc2 ); + TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); + TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc); + assert( rc2==SQLITE_OK ); + } + + p->rc = rc; + } + if( p->pDestDb ){ + sqlite3_mutex_leave(p->pDestDb->mutex); + } + sqlite3BtreeLeave(p->pSrc); + sqlite3_mutex_leave(p->pSrcDb->mutex); + return rc; +} + +/* +** Release all resources associated with an sqlite3_backup* handle. +*/ +int sqlite3_backup_finish(sqlite3_backup *p){ + sqlite3_backup **pp; /* Ptr to head of pagers backup list */ + sqlite3_mutex *mutex; /* Mutex to protect source database */ + int rc; /* Value to return */ + + /* Enter the mutexes */ + if( p==0 ) return SQLITE_OK; + sqlite3_mutex_enter(p->pSrcDb->mutex); + sqlite3BtreeEnter(p->pSrc); + mutex = p->pSrcDb->mutex; + if( p->pDestDb ){ + sqlite3_mutex_enter(p->pDestDb->mutex); + } + + /* Detach this backup from the source pager. */ + if( p->pDestDb ){ + p->pSrc->nBackup--; + } + if( p->isAttached ){ + pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); + while( *pp!=p ){ + pp = &(*pp)->pNext; + } + *pp = p->pNext; + } + + /* If a transaction is still open on the Btree, roll it back. */ + sqlite3BtreeRollback(p->pDest); + + /* Set the error code of the destination database handle. */ + rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc; + sqlite3Error(p->pDestDb, rc, 0); + + /* Exit the mutexes and free the backup context structure. */ + if( p->pDestDb ){ + sqlite3_mutex_leave(p->pDestDb->mutex); + } + sqlite3BtreeLeave(p->pSrc); + if( p->pDestDb ){ + sqlite3_free(p); + } + sqlite3_mutex_leave(mutex); + return rc; +} + +/* +** Return the number of pages still to be backed up as of the most recent +** call to sqlite3_backup_step(). +*/ +int sqlite3_backup_remaining(sqlite3_backup *p){ + return p->nRemaining; +} + +/* +** Return the total number of pages in the source database as of the most +** recent call to sqlite3_backup_step(). +*/ +int sqlite3_backup_pagecount(sqlite3_backup *p){ + return p->nPagecount; +} + +/* +** This function is called after the contents of page iPage of the +** source database have been modified. If page iPage has already been +** copied into the destination database, then the data written to the +** destination is now invalidated. The destination copy of iPage needs +** to be updated with the new data before the backup operation is +** complete. +** +** It is assumed that the mutex associated with the BtShared object +** corresponding to the source database is held when this function is +** called. +*/ +void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){ + sqlite3_backup *p; /* Iterator variable */ + for(p=pBackup; p; p=p->pNext){ + assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); + if( !isFatalError(p->rc) && iPageiNext ){ + /* The backup process p has already copied page iPage. But now it + ** has been modified by a transaction on the source pager. Copy + ** the new data into the backup. + */ + int rc = backupOnePage(p, iPage, aData); + assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED ); + if( rc!=SQLITE_OK ){ + p->rc = rc; + } + } + } +} + +/* +** Restart the backup process. This is called when the pager layer +** detects that the database has been modified by an external database +** connection. In this case there is no way of knowing which of the +** pages that have been copied into the destination database are still +** valid and which are not, so the entire process needs to be restarted. +** +** It is assumed that the mutex associated with the BtShared object +** corresponding to the source database is held when this function is +** called. +*/ +void sqlite3BackupRestart(sqlite3_backup *pBackup){ + sqlite3_backup *p; /* Iterator variable */ + for(p=pBackup; p; p=p->pNext){ + assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); + p->iNext = 1; + } +} + +#ifndef SQLITE_OMIT_VACUUM +/* +** Copy the complete content of pBtFrom into pBtTo. A transaction +** must be active for both files. +** +** The size of file pTo may be reduced by this operation. If anything +** goes wrong, the transaction on pTo is rolled back. If successful, the +** transaction is committed before returning. +*/ +int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ + int rc; + sqlite3_backup b; + sqlite3BtreeEnter(pTo); + sqlite3BtreeEnter(pFrom); + + /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set + ** to 0. This is used by the implementations of sqlite3_backup_step() + ** and sqlite3_backup_finish() to detect that they are being called + ** from this function, not directly by the user. + */ + memset(&b, 0, sizeof(b)); + b.pSrcDb = pFrom->db; + b.pSrc = pFrom; + b.pDest = pTo; + b.iNext = 1; + + /* 0x7FFFFFFF is the hard limit for the number of pages in a database + ** file. By passing this as the number of pages to copy to + ** sqlite3_backup_step(), we can guarantee that the copy finishes + ** within a single call (unless an error occurs). The assert() statement + ** checks this assumption - (p->rc) should be set to either SQLITE_DONE + ** or an error code. + */ + sqlite3_backup_step(&b, 0x7FFFFFFF); + assert( b.rc!=SQLITE_OK ); + rc = sqlite3_backup_finish(&b); + if( rc==SQLITE_OK ){ + pTo->pBt->pageSizeFixed = 0; + } + + sqlite3BtreeLeave(pFrom); + sqlite3BtreeLeave(pTo); + return rc; +} +#endif /* SQLITE_OMIT_VACUUM */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/bitvec.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/bitvec.c --- sqlite3-3.4.2/src/bitvec.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/bitvec.c 2009-06-25 12:35:50.000000000 +0100 @@ -0,0 +1,407 @@ +/* +** 2008 February 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements an object that represents a fixed-length +** bitmap. Bits are numbered starting with 1. +** +** A bitmap is used to record which pages of a database file have been +** journalled during a transaction, or which pages have the "dont-write" +** property. Usually only a few pages are meet either condition. +** So the bitmap is usually sparse and has low cardinality. +** But sometimes (for example when during a DROP of a large table) most +** or all of the pages in a database can get journalled. In those cases, +** the bitmap becomes dense with high cardinality. The algorithm needs +** to handle both cases well. +** +** The size of the bitmap is fixed when the object is created. +** +** All bits are clear when the bitmap is created. Individual bits +** may be set or cleared one at a time. +** +** Test operations are about 100 times more common that set operations. +** Clear operations are exceedingly rare. There are usually between +** 5 and 500 set operations per Bitvec object, though the number of sets can +** sometimes grow into tens of thousands or larger. The size of the +** Bitvec object is the number of pages in the database file at the +** start of a transaction, and is thus usually less than a few thousand, +** but can be as large as 2 billion for a really big database. +** +** @(#) $Id: bitvec.c,v 1.15 2009/06/02 21:31:39 drh Exp $ +*/ +#include "sqliteInt.h" + +/* Size of the Bitvec structure in bytes. */ +#define BITVEC_SZ 512 + +/* Round the union size down to the nearest pointer boundary, since that's how +** it will be aligned within the Bitvec struct. */ +#define BITVEC_USIZE (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*)) + +/* Type of the array "element" for the bitmap representation. +** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE. +** Setting this to the "natural word" size of your CPU may improve +** performance. */ +#define BITVEC_TELEM u8 +/* Size, in bits, of the bitmap element. */ +#define BITVEC_SZELEM 8 +/* Number of elements in a bitmap array. */ +#define BITVEC_NELEM (BITVEC_USIZE/sizeof(BITVEC_TELEM)) +/* Number of bits in the bitmap array. */ +#define BITVEC_NBIT (BITVEC_NELEM*BITVEC_SZELEM) + +/* Number of u32 values in hash table. */ +#define BITVEC_NINT (BITVEC_USIZE/sizeof(u32)) +/* Maximum number of entries in hash table before +** sub-dividing and re-hashing. */ +#define BITVEC_MXHASH (BITVEC_NINT/2) +/* Hashing function for the aHash representation. +** Empirical testing showed that the *37 multiplier +** (an arbitrary prime)in the hash function provided +** no fewer collisions than the no-op *1. */ +#define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) + +#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) + + +/* +** A bitmap is an instance of the following structure. +** +** This bitmap records the existance of zero or more bits +** with values between 1 and iSize, inclusive. +** +** There are three possible representations of the bitmap. +** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight +** bitmap. The least significant bit is bit 1. +** +** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is +** a hash table that will hold up to BITVEC_MXHASH distinct values. +** +** Otherwise, the value i is redirected into one of BITVEC_NPTR +** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap +** handles up to iDivisor separate values of i. apSub[0] holds +** values between 1 and iDivisor. apSub[1] holds values between +** iDivisor+1 and 2*iDivisor. apSub[N] holds values between +** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized +** to hold deal with values between 1 and iDivisor. +*/ +struct Bitvec { + u32 iSize; /* Maximum bit index. Max iSize is 4,294,967,296. */ + u32 nSet; /* Number of bits that are set - only valid for aHash + ** element. Max is BITVEC_NINT. For BITVEC_SZ of 512, + ** this would be 125. */ + u32 iDivisor; /* Number of bits handled by each apSub[] entry. */ + /* Should >=0 for apSub element. */ + /* Max iDivisor is max(u32) / BITVEC_NPTR + 1. */ + /* For a BITVEC_SZ of 512, this would be 34,359,739. */ + union { + BITVEC_TELEM aBitmap[BITVEC_NELEM]; /* Bitmap representation */ + u32 aHash[BITVEC_NINT]; /* Hash table representation */ + Bitvec *apSub[BITVEC_NPTR]; /* Recursive representation */ + } u; +}; + +/* +** Create a new bitmap object able to handle bits between 0 and iSize, +** inclusive. Return a pointer to the new object. Return NULL if +** malloc fails. +*/ +Bitvec *sqlite3BitvecCreate(u32 iSize){ + Bitvec *p; + assert( sizeof(*p)==BITVEC_SZ ); + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->iSize = iSize; + } + return p; +} + +/* +** Check to see if the i-th bit is set. Return true or false. +** If p is NULL (if the bitmap has not been created) or if +** i is out of range, then return false. +*/ +int sqlite3BitvecTest(Bitvec *p, u32 i){ + if( p==0 ) return 0; + if( i>p->iSize || i==0 ) return 0; + i--; + while( p->iDivisor ){ + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + p = p->u.apSub[bin]; + if (!p) { + return 0; + } + } + if( p->iSize<=BITVEC_NBIT ){ + return (p->u.aBitmap[i/BITVEC_SZELEM] & (1<<(i&(BITVEC_SZELEM-1))))!=0; + } else{ + u32 h = BITVEC_HASH(i++); + while( p->u.aHash[h] ){ + if( p->u.aHash[h]==i ) return 1; + h++; + if( h>=BITVEC_NINT ) h = 0; + } + return 0; + } +} + +/* +** Set the i-th bit. Return 0 on success and an error code if +** anything goes wrong. +** +** This routine might cause sub-bitmaps to be allocated. Failing +** to get the memory needed to hold the sub-bitmap is the only +** that can go wrong with an insert, assuming p and i are valid. +** +** The calling function must ensure that p is a valid Bitvec object +** and that the value for "i" is within range of the Bitvec object. +** Otherwise the behavior is undefined. +*/ +int sqlite3BitvecSet(Bitvec *p, u32 i){ + u32 h; + assert( p!=0 ); + assert( i>0 ); + assert( i<=p->iSize ); + i--; + while((p->iSize > BITVEC_NBIT) && p->iDivisor) { + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + if( p->u.apSub[bin]==0 ){ + p->u.apSub[bin] = sqlite3BitvecCreate( p->iDivisor ); + if( p->u.apSub[bin]==0 ) return SQLITE_NOMEM; + } + p = p->u.apSub[bin]; + } + if( p->iSize<=BITVEC_NBIT ){ + p->u.aBitmap[i/BITVEC_SZELEM] |= 1 << (i&(BITVEC_SZELEM-1)); + return SQLITE_OK; + } + h = BITVEC_HASH(i++); + /* if there wasn't a hash collision, and this doesn't */ + /* completely fill the hash, then just add it without */ + /* worring about sub-dividing and re-hashing. */ + if( !p->u.aHash[h] ){ + if (p->nSet<(BITVEC_NINT-1)) { + goto bitvec_set_end; + } else { + goto bitvec_set_rehash; + } + } + /* there was a collision, check to see if it's already */ + /* in hash, if not, try to find a spot for it */ + do { + if( p->u.aHash[h]==i ) return SQLITE_OK; + h++; + if( h>=BITVEC_NINT ) h = 0; + } while( p->u.aHash[h] ); + /* we didn't find it in the hash. h points to the first */ + /* available free spot. check to see if this is going to */ + /* make our hash too "full". */ +bitvec_set_rehash: + if( p->nSet>=BITVEC_MXHASH ){ + unsigned int j; + int rc; + u32 *aiValues = sqlite3StackAllocRaw(0, sizeof(p->u.aHash)); + if( aiValues==0 ){ + return SQLITE_NOMEM; + }else{ + memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); + memset(p->u.apSub, 0, sizeof(p->u.apSub)); + p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; + rc = sqlite3BitvecSet(p, i); + for(j=0; jnSet++; + p->u.aHash[h] = i; + return SQLITE_OK; +} + +/* +** Clear the i-th bit. +** +** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage +** that BitvecClear can use to rebuilt its hash table. +*/ +void sqlite3BitvecClear(Bitvec *p, u32 i, void *pBuf){ + assert( p!=0 ); + assert( i>0 ); + i--; + while( p->iDivisor ){ + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + p = p->u.apSub[bin]; + if (!p) { + return; + } + } + if( p->iSize<=BITVEC_NBIT ){ + p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); + }else{ + unsigned int j; + u32 *aiValues = pBuf; + memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); + memset(p->u.aHash, 0, sizeof(p->u.aHash)); + p->nSet = 0; + for(j=0; jnSet++; + while( p->u.aHash[h] ){ + h++; + if( h>=BITVEC_NINT ) h = 0; + } + p->u.aHash[h] = aiValues[j]; + } + } + } +} + +/* +** Destroy a bitmap object. Reclaim all memory used. +*/ +void sqlite3BitvecDestroy(Bitvec *p){ + if( p==0 ) return; + if( p->iDivisor ){ + unsigned int i; + for(i=0; iu.apSub[i]); + } + } + sqlite3_free(p); +} + +/* +** Return the value of the iSize parameter specified when Bitvec *p +** was created. +*/ +u32 sqlite3BitvecSize(Bitvec *p){ + return p->iSize; +} + +#ifndef SQLITE_OMIT_BUILTIN_TEST +/* +** Let V[] be an array of unsigned characters sufficient to hold +** up to N bits. Let I be an integer between 0 and N. 0<=I>3] |= (1<<(I&7)) +#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) +#define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 + +/* +** This routine runs an extensive test of the Bitvec code. +** +** The input is an array of integers that acts as a program +** to test the Bitvec. The integers are opcodes followed +** by 0, 1, or 3 operands, depending on the opcode. Another +** opcode follows immediately after the last operand. +** +** There are 6 opcodes numbered from 0 through 5. 0 is the +** "halt" opcode and causes the test to end. +** +** 0 Halt and return the number of errors +** 1 N S X Set N bits beginning with S and incrementing by X +** 2 N S X Clear N bits beginning with S and incrementing by X +** 3 N Set N randomly chosen bits +** 4 N Clear N randomly chosen bits +** 5 N S X Set N bits from S increment X in array only, not in bitvec +** +** The opcodes 1 through 4 perform set and clear operations are performed +** on both a Bitvec object and on a linear array of bits obtained from malloc. +** Opcode 5 works on the linear array only, not on the Bitvec. +** Opcode 5 is used to deliberately induce a fault in order to +** confirm that error detection works. +** +** At the conclusion of the test the linear array is compared +** against the Bitvec object. If there are any differences, +** an error is returned. If they are the same, zero is returned. +** +** If a memory allocation error occurs, return -1. +*/ +int sqlite3BitvecBuiltinTest(int sz, int *aOp){ + Bitvec *pBitvec = 0; + unsigned char *pV = 0; + int rc = -1; + int i, nx, pc, op; + void *pTmpSpace; + + /* Allocate the Bitvec to be tested and a linear array of + ** bits to act as the reference */ + pBitvec = sqlite3BitvecCreate( sz ); + pV = sqlite3_malloc( (sz+7)/8 + 1 ); + pTmpSpace = sqlite3_malloc(BITVEC_SZ); + if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; + memset(pV, 0, (sz+7)/8 + 1); + + /* Run the program */ + pc = 0; + while( (op = aOp[pc])!=0 ){ + switch( op ){ + case 1: + case 2: + case 5: { + nx = 4; + i = aOp[pc+2] - 1; + aOp[pc+2] += aOp[pc+3]; + break; + } + case 3: + case 4: + default: { + nx = 2; + sqlite3_randomness(sizeof(i), &i); + break; + } + } + if( (--aOp[pc+1]) > 0 ) nx = 0; + pc += nx; + i = (i & 0x7fffffff)%sz; + if( (op & 1)!=0 ){ + SETBIT(pV, (i+1)); + if( op!=5 ){ + if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end; + } + }else{ + CLEARBIT(pV, (i+1)); + sqlite3BitvecClear(pBitvec, i+1, pTmpSpace); + } + } + + /* Test to make sure the linear array exactly matches the + ** Bitvec object. Start with the assumption that they do + ** match (rc==0). Change rc to non-zero if a discrepancy + ** is found. + */ + rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) + + sqlite3BitvecTest(pBitvec, 0) + + (sqlite3BitvecSize(pBitvec) - sz); + for(i=1; i<=sz; i++){ + if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ + rc = i; + break; + } + } + + /* Free allocated structure */ +bitvec_end: + sqlite3_free(pTmpSpace); + sqlite3_free(pV); + sqlite3BitvecDestroy(pBitvec); + return rc; +} +#endif /* SQLITE_OMIT_BUILTIN_TEST */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/btmutex.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/btmutex.c --- sqlite3-3.4.2/src/btmutex.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/btmutex.c 2009-06-25 12:23:18.000000000 +0100 @@ -0,0 +1,354 @@ +/* +** 2007 August 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: btmutex.c,v 1.15 2009/04/10 12:55:17 danielk1977 Exp $ +** +** This file contains code used to implement mutexes on Btree objects. +** This code really belongs in btree.c. But btree.c is getting too +** big and we want to break it down some. This packaged seemed like +** a good breakout. +*/ +#include "btreeInt.h" +#ifndef SQLITE_OMIT_SHARED_CACHE +#if SQLITE_THREADSAFE + +/* +** Obtain the BtShared mutex associated with B-Tree handle p. Also, +** set BtShared.db to the database handle associated with p and the +** p->locked boolean to true. +*/ +static void lockBtreeMutex(Btree *p){ + assert( p->locked==0 ); + assert( sqlite3_mutex_notheld(p->pBt->mutex) ); + assert( sqlite3_mutex_held(p->db->mutex) ); + + sqlite3_mutex_enter(p->pBt->mutex); + p->pBt->db = p->db; + p->locked = 1; +} + +/* +** Release the BtShared mutex associated with B-Tree handle p and +** clear the p->locked boolean. +*/ +static void unlockBtreeMutex(Btree *p){ + assert( p->locked==1 ); + assert( sqlite3_mutex_held(p->pBt->mutex) ); + assert( sqlite3_mutex_held(p->db->mutex) ); + assert( p->db==p->pBt->db ); + + sqlite3_mutex_leave(p->pBt->mutex); + p->locked = 0; +} + +/* +** Enter a mutex on the given BTree object. +** +** If the object is not sharable, then no mutex is ever required +** and this routine is a no-op. The underlying mutex is non-recursive. +** But we keep a reference count in Btree.wantToLock so the behavior +** of this interface is recursive. +** +** To avoid deadlocks, multiple Btrees are locked in the same order +** by all database connections. The p->pNext is a list of other +** Btrees belonging to the same database connection as the p Btree +** which need to be locked after p. If we cannot get a lock on +** p, then first unlock all of the others on p->pNext, then wait +** for the lock to become available on p, then relock all of the +** subsequent Btrees that desire a lock. +*/ +void sqlite3BtreeEnter(Btree *p){ + Btree *pLater; + + /* Some basic sanity checking on the Btree. The list of Btrees + ** connected by pNext and pPrev should be in sorted order by + ** Btree.pBt value. All elements of the list should belong to + ** the same connection. Only shared Btrees are on the list. */ + assert( p->pNext==0 || p->pNext->pBt>p->pBt ); + assert( p->pPrev==0 || p->pPrev->pBtpBt ); + assert( p->pNext==0 || p->pNext->db==p->db ); + assert( p->pPrev==0 || p->pPrev->db==p->db ); + assert( p->sharable || (p->pNext==0 && p->pPrev==0) ); + + /* Check for locking consistency */ + assert( !p->locked || p->wantToLock>0 ); + assert( p->sharable || p->wantToLock==0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->db->mutex) ); + + /* Unless the database is sharable and unlocked, then BtShared.db + ** should already be set correctly. */ + assert( (p->locked==0 && p->sharable) || p->pBt->db==p->db ); + + if( !p->sharable ) return; + p->wantToLock++; + if( p->locked ) return; + + /* In most cases, we should be able to acquire the lock we + ** want without having to go throught the ascending lock + ** procedure that follows. Just be sure not to block. + */ + if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){ + p->pBt->db = p->db; + p->locked = 1; + return; + } + + /* To avoid deadlock, first release all locks with a larger + ** BtShared address. Then acquire our lock. Then reacquire + ** the other BtShared locks that we used to hold in ascending + ** order. + */ + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + assert( pLater->sharable ); + assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt ); + assert( !pLater->locked || pLater->wantToLock>0 ); + if( pLater->locked ){ + unlockBtreeMutex(pLater); + } + } + lockBtreeMutex(p); + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + if( pLater->wantToLock ){ + lockBtreeMutex(pLater); + } + } +} + +/* +** Exit the recursive mutex on a Btree. +*/ +void sqlite3BtreeLeave(Btree *p){ + if( p->sharable ){ + assert( p->wantToLock>0 ); + p->wantToLock--; + if( p->wantToLock==0 ){ + unlockBtreeMutex(p); + } + } +} + +#ifndef NDEBUG +/* +** Return true if the BtShared mutex is held on the btree, or if the +** B-Tree is not marked as sharable. +** +** This routine is used only from within assert() statements. +*/ +int sqlite3BtreeHoldsMutex(Btree *p){ + assert( p->sharable==0 || p->locked==0 || p->wantToLock>0 ); + assert( p->sharable==0 || p->locked==0 || p->db==p->pBt->db ); + assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->pBt->mutex) ); + assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->db->mutex) ); + + return (p->sharable==0 || p->locked); +} +#endif + + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Enter and leave a mutex on a Btree given a cursor owned by that +** Btree. These entry points are used by incremental I/O and can be +** omitted if that module is not used. +*/ +void sqlite3BtreeEnterCursor(BtCursor *pCur){ + sqlite3BtreeEnter(pCur->pBtree); +} +void sqlite3BtreeLeaveCursor(BtCursor *pCur){ + sqlite3BtreeLeave(pCur->pBtree); +} +#endif /* SQLITE_OMIT_INCRBLOB */ + + +/* +** Enter the mutex on every Btree associated with a database +** connection. This is needed (for example) prior to parsing +** a statement since we will be comparing table and column names +** against all schemas and we do not want those schemas being +** reset out from under us. +** +** There is a corresponding leave-all procedures. +** +** Enter the mutexes in accending order by BtShared pointer address +** to avoid the possibility of deadlock when two threads with +** two or more btrees in common both try to lock all their btrees +** at the same instant. +*/ +void sqlite3BtreeEnterAll(sqlite3 *db){ + int i; + Btree *p, *pLater; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + assert( !p || (p->locked==0 && p->sharable) || p->pBt->db==p->db ); + if( p && p->sharable ){ + p->wantToLock++; + if( !p->locked ){ + assert( p->wantToLock==1 ); + while( p->pPrev ) p = p->pPrev; + while( p->locked && p->pNext ) p = p->pNext; + for(pLater = p->pNext; pLater; pLater=pLater->pNext){ + if( pLater->locked ){ + unlockBtreeMutex(pLater); + } + } + while( p ){ + lockBtreeMutex(p); + p = p->pNext; + } + } + } + } +} +void sqlite3BtreeLeaveAll(sqlite3 *db){ + int i; + Btree *p; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + if( p && p->sharable ){ + assert( p->wantToLock>0 ); + p->wantToLock--; + if( p->wantToLock==0 ){ + unlockBtreeMutex(p); + } + } + } +} + +#ifndef NDEBUG +/* +** Return true if the current thread holds the database connection +** mutex and all required BtShared mutexes. +** +** This routine is used inside assert() statements only. +*/ +int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ + int i; + if( !sqlite3_mutex_held(db->mutex) ){ + return 0; + } + for(i=0; inDb; i++){ + Btree *p; + p = db->aDb[i].pBt; + if( p && p->sharable && + (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){ + return 0; + } + } + return 1; +} +#endif /* NDEBUG */ + +/* +** Add a new Btree pointer to a BtreeMutexArray. +** if the pointer can possibly be shared with +** another database connection. +** +** The pointers are kept in sorted order by pBtree->pBt. That +** way when we go to enter all the mutexes, we can enter them +** in order without every having to backup and retry and without +** worrying about deadlock. +** +** The number of shared btrees will always be small (usually 0 or 1) +** so an insertion sort is an adequate algorithm here. +*/ +void sqlite3BtreeMutexArrayInsert(BtreeMutexArray *pArray, Btree *pBtree){ + int i, j; + BtShared *pBt; + if( pBtree==0 || pBtree->sharable==0 ) return; +#ifndef NDEBUG + { + for(i=0; inMutex; i++){ + assert( pArray->aBtree[i]!=pBtree ); + } + } +#endif + assert( pArray->nMutex>=0 ); + assert( pArray->nMutexaBtree)-1 ); + pBt = pBtree->pBt; + for(i=0; inMutex; i++){ + assert( pArray->aBtree[i]!=pBtree ); + if( pArray->aBtree[i]->pBt>pBt ){ + for(j=pArray->nMutex; j>i; j--){ + pArray->aBtree[j] = pArray->aBtree[j-1]; + } + pArray->aBtree[i] = pBtree; + pArray->nMutex++; + return; + } + } + pArray->aBtree[pArray->nMutex++] = pBtree; +} + +/* +** Enter the mutex of every btree in the array. This routine is +** called at the beginning of sqlite3VdbeExec(). The mutexes are +** exited at the end of the same function. +*/ +void sqlite3BtreeMutexArrayEnter(BtreeMutexArray *pArray){ + int i; + for(i=0; inMutex; i++){ + Btree *p = pArray->aBtree[i]; + /* Some basic sanity checking */ + assert( i==0 || pArray->aBtree[i-1]->pBtpBt ); + assert( !p->locked || p->wantToLock>0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->db->mutex) ); + + p->wantToLock++; + if( !p->locked && p->sharable ){ + lockBtreeMutex(p); + } + } +} + +/* +** Leave the mutex of every btree in the group. +*/ +void sqlite3BtreeMutexArrayLeave(BtreeMutexArray *pArray){ + int i; + for(i=0; inMutex; i++){ + Btree *p = pArray->aBtree[i]; + /* Some basic sanity checking */ + assert( i==0 || pArray->aBtree[i-1]->pBtpBt ); + assert( p->locked || !p->sharable ); + assert( p->wantToLock>0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->db->mutex) ); + + p->wantToLock--; + if( p->wantToLock==0 && p->locked ){ + unlockBtreeMutex(p); + } + } +} + +#else +void sqlite3BtreeEnter(Btree *p){ + p->pBt->db = p->db; +} +void sqlite3BtreeEnterAll(sqlite3 *db){ + int i; + for(i=0; inDb; i++){ + Btree *p = db->aDb[i].pBt; + if( p ){ + p->pBt->db = p->db; + } + } +} +#endif /* if SQLITE_THREADSAFE */ +#endif /* ifndef SQLITE_OMIT_SHARED_CACHE */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/btree.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/btree.c --- sqlite3-3.4.2/src/btree.c 2007-08-13 15:56:44.000000000 +0100 +++ sqlite3-3.6.16/src/btree.c 2009-06-26 19:17:20.000000000 +0100 @@ -9,7 +9,7 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* -** $Id: btree.c,v 1.396 2007/08/13 14:56:44 drh Exp $ +** $Id: btree.c,v 1.645 2009/06/26 16:32:13 shane Exp $ ** ** This file implements a external (disk-based) database using BTrees. ** See the header comment on "btreeInt.h" for additional information. @@ -23,80 +23,154 @@ */ static const char zMagicHeader[] = SQLITE_FILE_HEADER; - /* ** Set this global variable to 1 to enable tracing using the TRACE ** macro. */ -#if SQLITE_TEST -int sqlite3_btree_trace=0; /* True to enable tracing */ +#if 0 +int sqlite3BtreeTrace=1; /* True to enable tracing */ +# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);} +#else +# define TRACE(X) +#endif + + + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** A list of BtShared objects that are eligible for participation +** in shared cache. This variable has file scope during normal builds, +** but the test harness needs to access it so we make it global for +** test builds. +** +** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER. +*/ +#ifdef SQLITE_TEST +BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; +#else +static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; +#endif +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Enable or disable the shared pager and schema features. +** +** This routine has no effect on existing database connections. +** The shared cache setting effects only future calls to +** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). +*/ +int sqlite3_enable_shared_cache(int enable){ + sqlite3GlobalConfig.sharedCacheEnabled = enable; + return SQLITE_OK; +} #endif + /* ** Forward declaration */ -static int checkReadLocks(Btree*,Pgno,BtCursor*); +static int checkForReadConflicts(Btree*, Pgno, BtCursor*, i64); #ifdef SQLITE_OMIT_SHARED_CACHE /* - ** The functions queryTableLock(), lockTable() and unlockAllTables() + ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(), + ** and clearAllSharedCacheTableLocks() ** manipulate entries in the BtShared.pLock linked list used to store ** shared-cache table level locks. If the library is compiled with the ** shared-cache feature disabled, then there is only ever one user ** of each BtShared structure and so this locking is not necessary. ** So define the lock related functions as no-ops. */ - #define queryTableLock(a,b,c) SQLITE_OK - #define lockTable(a,b,c) SQLITE_OK - #define unlockAllTables(a) -#else + #define querySharedCacheTableLock(a,b,c) SQLITE_OK + #define setSharedCacheTableLock(a,b,c) SQLITE_OK + #define clearAllSharedCacheTableLocks(a) +#endif +#ifndef SQLITE_OMIT_SHARED_CACHE /* ** Query to see if btree handle p may obtain a lock of type eLock ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return -** SQLITE_OK if the lock may be obtained (by calling lockTable()), or -** SQLITE_LOCKED if not. +** SQLITE_OK if the lock may be obtained (by calling +** setSharedCacheTableLock()), or SQLITE_LOCKED if not. */ -static int queryTableLock(Btree *p, Pgno iTab, u8 eLock){ +static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pIter; + assert( sqlite3BtreeHoldsMutex(p) ); + assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); + assert( p->db!=0 ); + + /* If requesting a write-lock, then the Btree must have an open write + ** transaction on this file. And, obviously, for this to be so there + ** must be an open write transaction on the file itself. + */ + assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) ); + assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE ); + /* This is a no-op if the shared-cache is not enabled */ - if( 0==sqlite3ThreadDataReadOnly()->useSharedData ){ + if( !p->sharable ){ return SQLITE_OK; } - /* This (along with lockTable()) is where the ReadUncommitted flag is - ** dealt with. If the caller is querying for a read-lock and the flag is - ** set, it is unconditionally granted - even if there are write-locks + /* If some other connection is holding an exclusive lock, the + ** requested lock may not be obtained. + */ + if( pBt->pWriter!=p && pBt->isExclusive ){ + sqlite3ConnectionBlocked(p->db, pBt->pWriter->db); + return SQLITE_LOCKED_SHAREDCACHE; + } + + /* This (along with setSharedCacheTableLock()) is where + ** the ReadUncommitted flag is dealt with. + ** If the caller is querying for a read-lock on any table + ** other than the sqlite_master table (table 1) and if the ReadUncommitted + ** flag is set, then the lock granted even if there are write-locks ** on the table. If a write-lock is requested, the ReadUncommitted flag ** is not considered. ** - ** In function lockTable(), if a read-lock is demanded and the + ** In function setSharedCacheTableLock(), if a read-lock is demanded and the ** ReadUncommitted flag is set, no entry is added to the locks list ** (BtShared.pLock). ** - ** To summarize: If the ReadUncommitted flag is set, then read cursors do - ** not create or respect table locks. The locking procedure for a - ** write-cursor does not change. + ** To summarize: If the ReadUncommitted flag is set, then read cursors + ** on non-schema tables do not create or respect table locks. The locking + ** procedure for a write-cursor does not change. */ if( - !p->pSqlite || - 0==(p->pSqlite->flags&SQLITE_ReadUncommitted) || + 0==(p->db->flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK || iTab==MASTER_ROOT ){ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ - if( pIter->pBtree!=p && pIter->iTable==iTab && - (pIter->eLock!=eLock || eLock!=READ_LOCK) ){ - return SQLITE_LOCKED; + /* The condition (pIter->eLock!=eLock) in the following if(...) + ** statement is a simplification of: + ** + ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) + ** + ** since we know that if eLock==WRITE_LOCK, then no other connection + ** may hold a WRITE_LOCK on any table in this file (since there can + ** only be a single writer). + */ + assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK ); + assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK); + if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){ + sqlite3ConnectionBlocked(p->db, pIter->pBtree->db); + if( eLock==WRITE_LOCK ){ + assert( p==pBt->pWriter ); + pBt->isPending = 1; + } + return SQLITE_LOCKED_SHAREDCACHE; } } } return SQLITE_OK; } +#endif /* !SQLITE_OMIT_SHARED_CACHE */ +#ifndef SQLITE_OMIT_SHARED_CACHE /* ** Add a lock on the table with root-page iTable to the shared-btree used ** by Btree handle p. Parameter eLock must be either READ_LOCK or @@ -105,26 +179,30 @@ ** SQLITE_OK is returned if the lock is added successfully. SQLITE_BUSY and ** SQLITE_NOMEM may also be returned. */ -static int lockTable(Btree *p, Pgno iTable, u8 eLock){ +static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pLock = 0; BtLock *pIter; + assert( sqlite3BtreeHoldsMutex(p) ); + assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); + assert( p->db!=0 ); + /* This is a no-op if the shared-cache is not enabled */ - if( 0==sqlite3ThreadDataReadOnly()->useSharedData ){ + if( !p->sharable ){ return SQLITE_OK; } - assert( SQLITE_OK==queryTableLock(p, iTable, eLock) ); + assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) ); - /* If the read-uncommitted flag is set and a read-lock is requested, - ** return early without adding an entry to the BtShared.pLock list. See - ** comment in function queryTableLock() for more info on handling - ** the ReadUncommitted flag. + /* If the read-uncommitted flag is set and a read-lock is requested on + ** a non-schema table, then the lock is always granted. Return early + ** without adding an entry to the BtShared.pLock list. See + ** comment in function querySharedCacheTableLock() for more info + ** on handling the ReadUncommitted flag. */ if( - (p->pSqlite) && - (p->pSqlite->flags&SQLITE_ReadUncommitted) && + (p->db->flags&SQLITE_ReadUncommitted) && (eLock==READ_LOCK) && iTable!=MASTER_ROOT ){ @@ -143,7 +221,7 @@ ** with table iTable, allocate one and link it into the list. */ if( !pLock ){ - pLock = (BtLock *)sqliteMalloc(sizeof(BtLock)); + pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); if( !pLock ){ return SQLITE_NOMEM; } @@ -164,40 +242,76 @@ return SQLITE_OK; } +#endif /* !SQLITE_OMIT_SHARED_CACHE */ +#ifndef SQLITE_OMIT_SHARED_CACHE /* -** Release all the table locks (locks obtained via calls to the lockTable() -** procedure) held by Btree handle p. +** Release all the table locks (locks obtained via calls to +** the setSharedCacheTableLock() procedure) held by Btree handle p. +** +** This function assumes that handle p has an open read or write +** transaction. If it does not, then the BtShared.isPending variable +** may be incorrectly cleared. */ -static void unlockAllTables(Btree *p){ - BtLock **ppIter = &p->pBt->pLock; +static void clearAllSharedCacheTableLocks(Btree *p){ + BtShared *pBt = p->pBt; + BtLock **ppIter = &pBt->pLock; - /* If the shared-cache extension is not enabled, there should be no - ** locks in the BtShared.pLock list, making this procedure a no-op. Assert - ** that this is the case. - */ - assert( sqlite3ThreadDataReadOnly()->useSharedData || 0==*ppIter ); + assert( sqlite3BtreeHoldsMutex(p) ); + assert( p->sharable || 0==*ppIter ); + assert( p->inTrans>0 ); while( *ppIter ){ BtLock *pLock = *ppIter; + assert( pBt->isExclusive==0 || pBt->pWriter==pLock->pBtree ); + assert( pLock->pBtree->inTrans>=pLock->eLock ); if( pLock->pBtree==p ){ *ppIter = pLock->pNext; - sqliteFree(pLock); + sqlite3_free(pLock); }else{ ppIter = &pLock->pNext; } } + + assert( pBt->isPending==0 || pBt->pWriter ); + if( pBt->pWriter==p ){ + pBt->pWriter = 0; + pBt->isExclusive = 0; + pBt->isPending = 0; + }else if( pBt->nTransaction==2 ){ + /* This function is called when connection p is concluding its + ** transaction. If there currently exists a writer, and p is not + ** that writer, then the number of locks held by connections other + ** than the writer must be about to drop to zero. In this case + ** set the isPending flag to 0. + ** + ** If there is not currently a writer, then BtShared.isPending must + ** be zero already. So this next line is harmless in that case. + */ + pBt->isPending = 0; + } } #endif /* SQLITE_OMIT_SHARED_CACHE */ static void releasePage(MemPage *pPage); /* Forward reference */ +/* +** Verify that the cursor holds a mutex on the BtShared +*/ +#ifndef NDEBUG +static int cursorHoldsMutex(BtCursor *p){ + return sqlite3_mutex_held(p->pBt->mutex); +} +#endif + + #ifndef SQLITE_OMIT_INCRBLOB /* ** Invalidate the overflow page-list cache for cursor pCur, if any. */ static void invalidateOverflowCache(BtCursor *pCur){ - sqliteFree(pCur->aOverflow); + assert( cursorHoldsMutex(pCur) ); + sqlite3_free(pCur->aOverflow); pCur->aOverflow = 0; } @@ -207,6 +321,7 @@ */ static void invalidateAllOverflowCache(BtShared *pBt){ BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); for(p=pBt->pCursor; p; p=p->pNext){ invalidateOverflowCache(p); } @@ -217,6 +332,80 @@ #endif /* +** Set bit pgno of the BtShared.pHasContent bitvec. This is called +** when a page that previously contained data becomes a free-list leaf +** page. +** +** The BtShared.pHasContent bitvec exists to work around an obscure +** bug caused by the interaction of two useful IO optimizations surrounding +** free-list leaf pages: +** +** 1) When all data is deleted from a page and the page becomes +** a free-list leaf page, the page is not written to the database +** (as free-list leaf pages contain no meaningful data). Sometimes +** such a page is not even journalled (as it will not be modified, +** why bother journalling it?). +** +** 2) When a free-list leaf page is reused, its content is not read +** from the database or written to the journal file (why should it +** be, if it is not at all meaningful?). +** +** By themselves, these optimizations work fine and provide a handy +** performance boost to bulk delete or insert operations. However, if +** a page is moved to the free-list and then reused within the same +** transaction, a problem comes up. If the page is not journalled when +** it is moved to the free-list and it is also not journalled when it +** is extracted from the free-list and reused, then the original data +** may be lost. In the event of a rollback, it may not be possible +** to restore the database to its original configuration. +** +** The solution is the BtShared.pHasContent bitvec. Whenever a page is +** moved to become a free-list leaf page, the corresponding bit is +** set in the bitvec. Whenever a leaf page is extracted from the free-list, +** optimization 2 above is ommitted if the corresponding bit is already +** set in BtShared.pHasContent. The contents of the bitvec are cleared +** at the end of every transaction. +*/ +static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ + int rc = SQLITE_OK; + if( !pBt->pHasContent ){ + int nPage; + rc = sqlite3PagerPagecount(pBt->pPager, &nPage); + if( rc==SQLITE_OK ){ + pBt->pHasContent = sqlite3BitvecCreate((u32)nPage); + if( !pBt->pHasContent ){ + rc = SQLITE_NOMEM; + } + } + } + if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){ + rc = sqlite3BitvecSet(pBt->pHasContent, pgno); + } + return rc; +} + +/* +** Query the BtShared.pHasContent vector. +** +** This function is called when a free-list leaf page is removed from the +** free-list for reuse. It returns false if it is safe to retrieve the +** page from the pager layer with the 'no-content' flag set. True otherwise. +*/ +static int btreeGetHasContent(BtShared *pBt, Pgno pgno){ + Bitvec *p = pBt->pHasContent; + return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno))); +} + +/* +** Clear (destroy) the BtShared.pHasContent bitvec. This should be +** invoked at the conclusion of each write-transaction. +*/ +static void btreeClearHasContent(BtShared *pBt){ + sqlite3BitvecDestroy(pBt->pHasContent); + pBt->pHasContent = 0; +} + +/* ** Save the current cursor position in the variables BtCursor.nKey ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. */ @@ -225,6 +414,7 @@ assert( CURSOR_VALID==pCur->eState ); assert( 0==pCur->pKey ); + assert( cursorHoldsMutex(pCur) ); rc = sqlite3BtreeKeySize(pCur, &pCur->nKey); @@ -234,24 +424,28 @@ ** table, then malloc space for and store the pCur->nKey bytes of key ** data. */ - if( rc==SQLITE_OK && 0==pCur->pPage->intKey){ - void *pKey = sqliteMalloc(pCur->nKey); + if( rc==SQLITE_OK && 0==pCur->apPage[0]->intKey){ + void *pKey = sqlite3Malloc( (int)pCur->nKey ); if( pKey ){ - rc = sqlite3BtreeKey(pCur, 0, pCur->nKey, pKey); + rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ pCur->pKey = pKey; }else{ - sqliteFree(pKey); + sqlite3_free(pKey); } }else{ rc = SQLITE_NOMEM; } } - assert( !pCur->pPage->intKey || !pCur->pKey ); + assert( !pCur->apPage[0]->intKey || !pCur->pKey ); if( rc==SQLITE_OK ){ - releasePage(pCur->pPage); - pCur->pPage = 0; + int i; + for(i=0; i<=pCur->iPage; i++){ + releasePage(pCur->apPage[i]); + pCur->apPage[i] = 0; + } + pCur->iPage = -1; pCur->eState = CURSOR_REQUIRESEEK; } @@ -266,6 +460,8 @@ */ static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pExcept==0 || pExcept->pBt==pBt ); for(p=pBt->pCursor; p; p=p->pNext){ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) && p->eState==CURSOR_VALID ){ @@ -281,8 +477,9 @@ /* ** Clear the current cursor position. */ -static void clearCursorPosition(BtCursor *pCur){ - sqliteFree(pCur->pKey); +void sqlite3BtreeClearCursor(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + sqlite3_free(pCur->pKey); pCur->pKey = 0; pCur->eState = CURSOR_INVALID; } @@ -291,36 +488,55 @@ ** Restore the cursor to the position it was in (or as close to as possible) ** when saveCursorPosition() was called. Note that this call deletes the ** saved position info stored by saveCursorPosition(), so there can be -** at most one effective restoreOrClearCursorPosition() call after each +** at most one effective restoreCursorPosition() call after each ** saveCursorPosition(). -** -** If the second argument argument - doSeek - is false, then instead of -** returning the cursor to it's saved position, any saved position is deleted -** and the cursor state set to CURSOR_INVALID. */ -int sqlite3BtreeRestoreOrClearCursorPosition(BtCursor *pCur){ +int sqlite3BtreeRestoreCursorPosition(BtCursor *pCur){ int rc; - assert( pCur->eState==CURSOR_REQUIRESEEK ); -#ifndef SQLITE_OMIT_INCRBLOB - if( pCur->isIncrblobHandle ){ - return SQLITE_ABORT; + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState>=CURSOR_REQUIRESEEK ); + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; } -#endif pCur->eState = CURSOR_INVALID; rc = sqlite3BtreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &pCur->skip); if( rc==SQLITE_OK ){ - sqliteFree(pCur->pKey); + sqlite3_free(pCur->pKey); pCur->pKey = 0; assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); } return rc; } -#define restoreOrClearCursorPosition(p) \ - (p->eState==CURSOR_REQUIRESEEK ? \ - sqlite3BtreeRestoreOrClearCursorPosition(p) : \ +#define restoreCursorPosition(p) \ + (p->eState>=CURSOR_REQUIRESEEK ? \ + sqlite3BtreeRestoreCursorPosition(p) : \ SQLITE_OK) +/* +** Determine whether or not a cursor has moved from the position it +** was last placed at. Cursors can move when the row they are pointing +** at is deleted out from under them. +** +** This routine returns an error code if something goes wrong. The +** integer *pHasMoved is set to one if the cursor has moved and 0 if not. +*/ +int sqlite3BtreeCursorHasMoved(BtCursor *pCur, int *pHasMoved){ + int rc; + + rc = restoreCursorPosition(pCur); + if( rc ){ + *pHasMoved = 1; + return rc; + } + if( pCur->eState!=CURSOR_VALID || pCur->skip!=0 ){ + *pHasMoved = 1; + }else{ + *pHasMoved = 0; + } + return SQLITE_OK; +} + #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Given a page number of a regular database page, return the page @@ -328,9 +544,12 @@ ** input page number. */ static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ - int nPagesPerMapPage = (pBt->usableSize/5)+1; - int iPtrMap = (pgno-2)/nPagesPerMapPage; - int ret = (iPtrMap*nPagesPerMapPage) + 2; + int nPagesPerMapPage; + Pgno iPtrMap, ret; + assert( sqlite3_mutex_held(pBt->mutex) ); + nPagesPerMapPage = (pBt->usableSize/5)+1; + iPtrMap = (pgno-2)/nPagesPerMapPage; + ret = (iPtrMap*nPagesPerMapPage) + 2; if( ret==PENDING_BYTE_PAGE(pBt) ){ ret++; } @@ -351,6 +570,7 @@ int offset; /* Offset in pointer map page */ int rc; + assert( sqlite3_mutex_held(pBt->mutex) ); /* The master-journal page number must never be used as a pointer map page */ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); @@ -363,7 +583,10 @@ if( rc!=SQLITE_OK ){ return rc; } - offset = PTRMAP_PTROFFSET(pBt, key); + offset = PTRMAP_PTROFFSET(iPtrmap, key); + if( offset<0 ){ + return SQLITE_CORRUPT_BKPT; + } pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ @@ -393,6 +616,8 @@ int offset; /* Offset of entry in pointer map */ int rc; + assert( sqlite3_mutex_held(pBt->mutex) ); + iPtrmap = PTRMAP_PAGENO(pBt, key); rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); if( rc!=0 ){ @@ -400,7 +625,7 @@ } pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); - offset = PTRMAP_PTROFFSET(pBt, key); + offset = PTRMAP_PTROFFSET(iPtrmap, key); assert( pEType!=0 ); *pEType = pPtrmap[offset]; if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); @@ -410,7 +635,10 @@ return SQLITE_OK; } -#endif /* SQLITE_OMIT_AUTOVACUUM */ +#else /* if defined SQLITE_OMIT_AUTOVACUUM */ + #define ptrmapPut(w,x,y,z) SQLITE_OK + #define ptrmapGet(w,x,y,z) SQLITE_OK +#endif /* ** Given a btree page and a cell index (0 means the first cell on @@ -419,22 +647,16 @@ ** ** This routine works only for pages that do not contain overflow cells. */ -#define findCell(pPage, iCell) \ - ((pPage)->aData + get2byte(&(pPage)->aData[(pPage)->cellOffset+2*(iCell)])) -#ifdef SQLITE_TEST -u8 *sqlite3BtreeFindCell(MemPage *pPage, int iCell){ - assert( iCell>=0 ); - assert( iCellaData[pPage->hdrOffset+3]) ); - return findCell(pPage, iCell); -} -#endif +#define findCell(P,I) \ + ((P)->aData + ((P)->maskPage & get2byte(&(P)->aData[(P)->cellOffset+2*(I)]))) /* -** This a more complex version of sqlite3BtreeFindCell() that works for +** This a more complex version of findCell() that works for ** pages that do contain overflow cells. See insert */ static u8 *findOverflowCell(MemPage *pPage, int iCell){ int i; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); for(i=pPage->nOverflow-1; i>=0; i--){ int k; struct _OvflCell *pOvfl; @@ -464,41 +686,42 @@ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ - int n; /* Number bytes in cell content header */ + u16 n; /* Number bytes in cell content header */ u32 nPayload; /* Number of bytes of cell payload */ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pInfo->pCell = pCell; assert( pPage->leaf==0 || pPage->leaf==1 ); n = pPage->childPtrSize; assert( n==4-4*pPage->leaf ); - if( pPage->hasData ){ - n += getVarint32(&pCell[n], &nPayload); - }else{ - nPayload = 0; - } - pInfo->nData = nPayload; if( pPage->intKey ){ - n += getVarint(&pCell[n], (u64 *)&pInfo->nKey); + if( pPage->hasData ){ + n += getVarint32(&pCell[n], nPayload); + }else{ + nPayload = 0; + } + n += getVarint(&pCell[n], (u64*)&pInfo->nKey); + pInfo->nData = nPayload; }else{ - u32 x; - n += getVarint32(&pCell[n], &x); - pInfo->nKey = x; - nPayload += x; + pInfo->nData = 0; + n += getVarint32(&pCell[n], nPayload); + pInfo->nKey = nPayload; } pInfo->nPayload = nPayload; pInfo->nHeader = n; - if( nPayload<=pPage->maxLocal ){ + if( likely(nPayload<=pPage->maxLocal) ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ int nSize; /* Total size of cell content in bytes */ - pInfo->nLocal = nPayload; - pInfo->iOverflow = 0; nSize = nPayload + n; - if( nSize<4 ){ + pInfo->nLocal = (u16)nPayload; + pInfo->iOverflow = 0; + if( (nSize & ~3)==0 ){ nSize = 4; /* Minimum cell size is 4 */ } - pInfo->nSize = nSize; + pInfo->nSize = (u16)nSize; }else{ /* If the payload will not fit completely on the local page, we have ** to decide how much to store locally and how much to spill onto @@ -517,11 +740,11 @@ maxLocal = pPage->maxLocal; surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize - 4); if( surplus <= maxLocal ){ - pInfo->nLocal = surplus; + pInfo->nLocal = (u16)surplus; }else{ - pInfo->nLocal = minLocal; + pInfo->nLocal = (u16)minLocal; } - pInfo->iOverflow = pInfo->nLocal + n; + pInfo->iOverflow = (u16)(pInfo->nLocal + n); pInfo->nSize = pInfo->iOverflow + 4; } } @@ -541,18 +764,59 @@ ** data header and the local payload, but not any overflow page or ** the space used by the cell pointer. */ +static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ + u8 *pIter = &pCell[pPage->childPtrSize]; + u32 nSize; + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + sqlite3BtreeParseCellPtr(pPage, pCell, &debuginfo); +#endif + + if( pPage->intKey ){ + u8 *pEnd; + if( pPage->hasData ){ + pIter += getVarint32(pIter, nSize); + }else{ + nSize = 0; + } + + /* pIter now points at the 64-bit integer key value, a variable length + ** integer. The following block moves pIter to point at the first byte + ** past the end of the key value. */ + pEnd = &pIter[9]; + while( (*pIter++)&0x80 && pIterpPage->maxLocal ){ + int minLocal = pPage->minLocal; + nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); + if( nSize>pPage->maxLocal ){ + nSize = minLocal; + } + nSize += 4; + } + nSize += (u32)(pIter - pCell); + + /* The minimum size of any cell is 4 bytes. */ + if( nSize<4 ){ + nSize = 4; + } + + assert( nSize==debuginfo.nSize ); + return (u16)nSize; +} #ifndef NDEBUG -static int cellSize(MemPage *pPage, int iCell){ - CellInfo info; - sqlite3BtreeParseCell(pPage, iCell, &info); - return info.nSize; +static u16 cellSize(MemPage *pPage, int iCell){ + return cellSizePtr(pPage, findCell(pPage, iCell)); } #endif -static int cellSizePtr(MemPage *pPage, u8 *pCell){ - CellInfo info; - sqlite3BtreeParseCellPtr(pPage, pCell, &info); - return info.nSize; -} #ifndef SQLITE_OMIT_AUTOVACUUM /* @@ -561,27 +825,16 @@ ** for the overflow page. */ static int ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell){ - if( pCell ){ - CellInfo info; - sqlite3BtreeParseCellPtr(pPage, pCell, &info); - assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); - if( (info.nData+(pPage->intKey?0:info.nKey))>info.nLocal ){ - Pgno ovfl = get4byte(&pCell[info.iOverflow]); - return ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno); - } + CellInfo info; + assert( pCell!=0 ); + sqlite3BtreeParseCellPtr(pPage, pCell, &info); + assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); + if( info.iOverflow ){ + Pgno ovfl = get4byte(&pCell[info.iOverflow]); + return ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno); } return SQLITE_OK; } -/* -** If the cell with index iCell on page pPage contains a pointer -** to an overflow page, insert an entry into the pointer-map -** for the overflow page. -*/ -static int ptrmapPutOvfl(MemPage *pPage, int iCell){ - u8 *pCell; - pCell = findOverflowCell(pPage, iCell); - return ptrmapPutOvflPtr(pPage, pCell); -} #endif @@ -599,7 +852,7 @@ int size; /* Size of a cell */ int usableSize; /* Number of usable bytes on a page */ int cellOffset; /* Offset to the cell pointer array */ - int brk; /* Offset to the cell content area */ + int cbrk; /* Offset to the cell content area */ int nCell; /* Number of cells on the page */ unsigned char *data; /* The page data */ unsigned char *temp; /* Temp area for cell content */ @@ -608,100 +861,114 @@ assert( pPage->pBt!=0 ); assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); assert( pPage->nOverflow==0 ); - temp = sqliteMalloc( pPage->pBt->pageSize ); - if( temp==0 ) return SQLITE_NOMEM; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + temp = sqlite3PagerTempSpace(pPage->pBt->pPager); data = pPage->aData; hdr = pPage->hdrOffset; cellOffset = pPage->cellOffset; nCell = pPage->nCell; assert( nCell==get2byte(&data[hdr+3]) ); usableSize = pPage->pBt->usableSize; - brk = get2byte(&data[hdr+5]); - memcpy(&temp[brk], &data[brk], usableSize - brk); - brk = usableSize; + cbrk = get2byte(&data[hdr+5]); + memcpy(&temp[cbrk], &data[cbrk], usableSize - cbrk); + cbrk = usableSize; for(i=0; ipBt->usableSize ); + if( pc>=usableSize ){ + return SQLITE_CORRUPT_BKPT; + } size = cellSizePtr(pPage, &temp[pc]); - brk -= size; - memcpy(&data[brk], &temp[pc], size); - put2byte(pAddr, brk); + cbrk -= size; + if( cbrkusableSize ){ + return SQLITE_CORRUPT_BKPT; + } + assert( cbrk+size<=usableSize && cbrk>=0 ); + memcpy(&data[cbrk], &temp[pc], size); + put2byte(pAddr, cbrk); } - assert( brk>=cellOffset+2*nCell ); - put2byte(&data[hdr+5], brk); + assert( cbrk>=cellOffset+2*nCell ); + put2byte(&data[hdr+5], cbrk); data[hdr+1] = 0; data[hdr+2] = 0; data[hdr+7] = 0; addr = cellOffset+2*nCell; - memset(&data[addr], 0, brk-addr); - sqliteFree(temp); + memset(&data[addr], 0, cbrk-addr); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + if( cbrk-addr!=pPage->nFree ){ + return SQLITE_CORRUPT_BKPT; + } return SQLITE_OK; } /* -** Allocate nByte bytes of space on a page. -** -** Return the index into pPage->aData[] of the first byte of -** the new allocation. Or return 0 if there is not enough free -** space on the page to satisfy the allocation request. -** -** If the page contains nBytes of free space but does not contain -** nBytes of contiguous free space, then this routine automatically -** calls defragementPage() to consolidate all free space before -** allocating the new chunk. +** Allocate nByte bytes of space from within the B-Tree page passed +** as the first argument. Return the index into pPage->aData[] of the +** first byte of allocated space. +** +** The caller guarantees that the space between the end of the cell-offset +** array and the start of the cell-content area is at least nByte bytes +** in size. So this routine can never fail. +** +** If there are already 60 or more bytes of fragments within the page, +** the page is defragmented before returning. If this were not done there +** is a chance that the number of fragmented bytes could eventually +** overflow the single-byte field of the page-header in which this value +** is stored. */ static int allocateSpace(MemPage *pPage, int nByte){ - int addr, pc, hdr; - int size; - int nFrag; + const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ + u8 * const data = pPage->aData; /* Local cache of pPage->aData */ + int nFrag; /* Number of fragmented bytes on pPage */ int top; - int nCell; - int cellOffset; - unsigned char *data; - data = pPage->aData; assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt ); - if( nByte<4 ) nByte = 4; - if( pPage->nFreenOverflow>0 ) return 0; - pPage->nFree -= nByte; - hdr = pPage->hdrOffset; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( nByte>=0 ); /* Minimum cell size is 4 */ + assert( pPage->nFree>=nByte ); + assert( pPage->nOverflow==0 ); + + /* Assert that the space between the cell-offset array and the + ** cell-content area is greater than nByte bytes. + */ + assert( nByte <= ( + get2byte(&data[hdr+5])-(hdr+8+(pPage->leaf?0:4)+2*get2byte(&data[hdr+3])) + )); nFrag = data[hdr+7]; - if( nFrag<60 ){ - /* Search the freelist looking for a slot big enough to satisfy the - ** space request. */ - addr = hdr+1; - while( (pc = get2byte(&data[addr]))>0 ){ - size = get2byte(&data[pc+2]); + if( nFrag>=60 ){ + defragmentPage(pPage); + }else{ + /* Search the freelist looking for a free slot big enough to satisfy + ** the request. The allocation is made from the first free slot in + ** the list that is large enough to accomadate it. + */ + int pc, addr; + for(addr=hdr+1; (pc = get2byte(&data[addr]))>0; addr=pc){ + int size = get2byte(&data[pc+2]); /* Size of free slot */ if( size>=nByte ){ - if( sizecellOffset; - if( nFrag>=60 || cellOffset + 2*nCell > top - nByte ){ - if( defragmentPage(pPage) ) return 0; - top = get2byte(&data[hdr+5]); - } - top -= nByte; - assert( cellOffset + 2*nCell <= top ); + top = get2byte(&data[hdr+5]) - nByte; put2byte(&data[hdr+5], top); return top; } @@ -714,7 +981,7 @@ ** Most of the effort here is involved in coalesing adjacent ** free blocks into a single big free block. */ -static void freeSpace(MemPage *pPage, int start, int size){ +static int freeSpace(MemPage *pPage, int start, int size){ int addr, pbegin, hdr; unsigned char *data = pPage->aData; @@ -722,7 +989,8 @@ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( start>=pPage->hdrOffset+6+(pPage->leaf?0:4) ); assert( (start + size)<=pPage->pBt->usableSize ); - if( size<4 ) size = 4; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( size>=0 ); /* Minimum cell size is 4 */ #ifdef SQLITE_SECURE_DELETE /* Overwrite deleted information with zeros when the SECURE_DELETE @@ -735,30 +1003,38 @@ addr = hdr + 1; while( (pbegin = get2byte(&data[addr]))0 ){ assert( pbegin<=pPage->pBt->usableSize-4 ); - assert( pbegin>addr ); + if( pbegin<=addr ) { + return SQLITE_CORRUPT_BKPT; + } addr = pbegin; } - assert( pbegin<=pPage->pBt->usableSize-4 ); + if ( pbegin>pPage->pBt->usableSize-4 ) { + return SQLITE_CORRUPT_BKPT; + } assert( pbegin>addr || pbegin==0 ); put2byte(&data[addr], start); put2byte(&data[start], pbegin); put2byte(&data[start+2], size); - pPage->nFree += size; + pPage->nFree = pPage->nFree + (u16)size; /* Coalesce adjacent free blocks */ addr = pPage->hdrOffset + 1; while( (pbegin = get2byte(&data[addr]))>0 ){ - int pnext, psize; + int pnext, psize, x; assert( pbegin>addr ); assert( pbegin<=pPage->pBt->usableSize-4 ); pnext = get2byte(&data[pbegin]); psize = get2byte(&data[pbegin+2]); if( pbegin + psize + 3 >= pnext && pnext>0 ){ int frag = pnext - (pbegin+psize); - assert( frag<=data[pPage->hdrOffset+7] ); - data[pPage->hdrOffset+7] -= frag; - put2byte(&data[pbegin], get2byte(&data[pnext])); - put2byte(&data[pbegin+2], pnext+get2byte(&data[pnext+2])-pbegin); + if( (frag<0) || (frag>(int)data[pPage->hdrOffset+7]) ){ + return SQLITE_CORRUPT_BKPT; + } + data[pPage->hdrOffset+7] -= (u8)frag; + x = get2byte(&data[pnext]); + put2byte(&data[pbegin], x); + x = pnext + get2byte(&data[pnext+2]) - pbegin; + put2byte(&data[pbegin+2], x); }else{ addr = pbegin; } @@ -769,119 +1045,177 @@ int top; pbegin = get2byte(&data[hdr+1]); memcpy(&data[hdr+1], &data[pbegin], 2); - top = get2byte(&data[hdr+5]); - put2byte(&data[hdr+5], top + get2byte(&data[pbegin+2])); + top = get2byte(&data[hdr+5]) + get2byte(&data[pbegin+2]); + put2byte(&data[hdr+5], top); } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + return SQLITE_OK; } /* ** Decode the flags byte (the first byte of the header) for a page ** and initialize fields of the MemPage structure accordingly. +** +** Only the following combinations are supported. Anything different +** indicates a corrupt database files: +** +** PTF_ZERODATA +** PTF_ZERODATA | PTF_LEAF +** PTF_LEAFDATA | PTF_INTKEY +** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF */ -static void decodeFlags(MemPage *pPage, int flagByte){ +static int decodeFlags(MemPage *pPage, int flagByte){ BtShared *pBt; /* A copy of pPage->pBt */ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); - pPage->intKey = (flagByte & (PTF_INTKEY|PTF_LEAFDATA))!=0; - pPage->zeroData = (flagByte & PTF_ZERODATA)!=0; - pPage->leaf = (flagByte & PTF_LEAF)!=0; - pPage->childPtrSize = 4*(pPage->leaf==0); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); + flagByte &= ~PTF_LEAF; + pPage->childPtrSize = 4-4*pPage->leaf; pBt = pPage->pBt; - if( flagByte & PTF_LEAFDATA ){ - pPage->leafData = 1; + if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ + pPage->intKey = 1; + pPage->hasData = pPage->leaf; pPage->maxLocal = pBt->maxLeaf; pPage->minLocal = pBt->minLeaf; - }else{ - pPage->leafData = 0; + }else if( flagByte==PTF_ZERODATA ){ + pPage->intKey = 0; + pPage->hasData = 0; pPage->maxLocal = pBt->maxLocal; pPage->minLocal = pBt->minLocal; + }else{ + return SQLITE_CORRUPT_BKPT; } - pPage->hasData = !(pPage->zeroData || (!pPage->leaf && pPage->leafData)); + return SQLITE_OK; } /* ** Initialize the auxiliary information for a disk block. ** -** The pParent parameter must be a pointer to the MemPage which -** is the parent of the page being initialized. The root of a -** BTree has no parent and so for that page, pParent==NULL. -** ** Return SQLITE_OK on success. If we see that the page does ** not contain a well-formed database page, then return ** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not ** guarantee that the page is well-formed. It only shows that ** we failed to detect any corruption. */ -int sqlite3BtreeInitPage( - MemPage *pPage, /* The page to be initialized */ - MemPage *pParent /* The parent. Might be NULL */ -){ - int pc; /* Address of a freeblock within pPage->aData[] */ - int hdr; /* Offset to beginning of page header */ - u8 *data; /* Equal to pPage->aData */ - BtShared *pBt; /* The main btree structure */ - int usableSize; /* Amount of usable space on each page */ - int cellOffset; /* Offset from start of page to first cell pointer */ - int nFree; /* Number of unused bytes on the page */ - int top; /* First byte of the cell content area */ +int sqlite3BtreeInitPage(MemPage *pPage){ - pBt = pPage->pBt; - assert( pBt!=0 ); - assert( pParent==0 || pParent->pBt==pBt ); + assert( pPage->pBt!=0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); - assert( pPage->aData == &((unsigned char*)pPage)[-pBt->pageSize] ); - if( pPage->pParent!=pParent && (pPage->pParent!=0 || pPage->isInit) ){ - /* The parent page should never change unless the file is corrupt */ - return SQLITE_CORRUPT_BKPT; - } - if( pPage->isInit ) return SQLITE_OK; - if( pPage->pParent==0 && pParent!=0 ){ - pPage->pParent = pParent; - sqlite3PagerRef(pParent->pDbPage); - } - hdr = pPage->hdrOffset; - data = pPage->aData; - decodeFlags(pPage, data[hdr]); - pPage->nOverflow = 0; - pPage->idxShift = 0; - usableSize = pBt->usableSize; - pPage->cellOffset = cellOffset = hdr + 12 - 4*pPage->leaf; - top = get2byte(&data[hdr+5]); - pPage->nCell = get2byte(&data[hdr+3]); - if( pPage->nCell>MX_CELL(pBt) ){ - /* To many cells for a single page. The page must be corrupt */ - return SQLITE_CORRUPT_BKPT; - } - if( pPage->nCell==0 && pParent!=0 && pParent->pgno!=1 ){ - /* All pages must have at least one cell, except for root pages */ - return SQLITE_CORRUPT_BKPT; - } + assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); + assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); - /* Compute the total free space on the page */ - pc = get2byte(&data[hdr+1]); - nFree = data[hdr+7] + top - (cellOffset + 2*pPage->nCell); - while( pc>0 ){ - int next, size; - if( pc>usableSize-4 ){ - /* Free block is off the page */ - return SQLITE_CORRUPT_BKPT; + if( !pPage->isInit ){ + u16 pc; /* Address of a freeblock within pPage->aData[] */ + u8 hdr; /* Offset to beginning of page header */ + u8 *data; /* Equal to pPage->aData */ + BtShared *pBt; /* The main btree structure */ + u16 usableSize; /* Amount of usable space on each page */ + u16 cellOffset; /* Offset from start of page to first cell pointer */ + u16 nFree; /* Number of unused bytes on the page */ + u16 top; /* First byte of the cell content area */ + + pBt = pPage->pBt; + + hdr = pPage->hdrOffset; + data = pPage->aData; + if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT; + assert( pBt->pageSize>=512 && pBt->pageSize<=32768 ); + pPage->maskPage = pBt->pageSize - 1; + pPage->nOverflow = 0; + usableSize = pBt->usableSize; + pPage->cellOffset = cellOffset = hdr + 12 - 4*pPage->leaf; + top = get2byte(&data[hdr+5]); + pPage->nCell = get2byte(&data[hdr+3]); + if( pPage->nCell>MX_CELL(pBt) ){ + /* To many cells for a single page. The page must be corrupt */ + return SQLITE_CORRUPT_BKPT; + } + + /* A malformed database page might cause use to read past the end + ** of page when parsing a cell. + ** + ** The following block of code checks early to see if a cell extends + ** past the end of a page boundary and causes SQLITE_CORRUPT to be + ** returned if it does. + */ +#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK) + { + int iCellFirst; /* First allowable cell index */ + int iCellLast; /* Last possible cell index */ + int i; /* Index into the cell pointer array */ + int sz; /* Size of a cell */ + + iCellFirst = cellOffset + 2*pPage->nCell; + iCellLast = usableSize - 4; + if( !pPage->leaf ) iCellLast--; + for(i=0; inCell; i++){ + pc = get2byte(&data[cellOffset+i*2]); + if( pciCellLast ){ + return SQLITE_CORRUPT_BKPT; + } + sz = cellSizePtr(pPage, &data[pc]); + if( pc+sz>usableSize ){ + return SQLITE_CORRUPT_BKPT; + } + } + } +#endif + + /* Compute the total free space on the page */ + pc = get2byte(&data[hdr+1]); + nFree = data[hdr+7] + top; + while( pc>0 ){ + u16 next, size; + if( pc>usableSize-4 ){ + /* Free block is off the page */ + return SQLITE_CORRUPT_BKPT; + } + next = get2byte(&data[pc]); + size = get2byte(&data[pc+2]); + if( next>0 && next<=pc+size+3 ){ + /* Free blocks must be in accending order */ + return SQLITE_CORRUPT_BKPT; + } + nFree = nFree + size; + pc = next; } - next = get2byte(&data[pc]); - size = get2byte(&data[pc+2]); - if( next>0 && next<=pc+size+3 ){ - /* Free blocks must be in accending order */ + + /* At this point, nFree contains the sum of the offset to the start + ** of the cell-content area plus the number of free bytes within + ** the cell-content area. If this is greater than the usable-size + ** of the page, then the page must be corrupted. This check also + ** serves to verify that the offset to the start of the cell-content + ** area, according to the page header, lies within the page. + */ + if( nFree>usableSize ){ return SQLITE_CORRUPT_BKPT; } - nFree += size; - pc = next; - } - pPage->nFree = nFree; - if( nFree>=usableSize ){ - /* Free space cannot exceed total page size */ - return SQLITE_CORRUPT_BKPT; + pPage->nFree = nFree - (cellOffset + 2*pPage->nCell); + +#if 0 + /* Check that all the offsets in the cell offset array are within range. + ** + ** Omitting this consistency check and using the pPage->maskPage mask + ** to prevent overrunning the page buffer in findCell() results in a + ** 2.5% performance gain. + */ + { + u8 *pOff; /* Iterator used to check all cell offsets are in range */ + u8 *pEnd; /* Pointer to end of cell offset array */ + u8 mask; /* Mask of bits that must be zero in MSB of cell offsets */ + mask = ~(((u8)(pBt->pageSize>>8))-1); + pEnd = &data[cellOffset + pPage->nCell*2]; + for(pOff=&data[cellOffset]; pOff!=pEnd && !((*pOff)&mask); pOff+=2); + if( pOff!=pEnd ){ + return SQLITE_CORRUPT_BKPT; + } } +#endif - pPage->isInit = 1; + pPage->isInit = 1; + } return SQLITE_OK; } @@ -892,15 +1226,17 @@ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; - int hdr = pPage->hdrOffset; - int first; + u8 hdr = pPage->hdrOffset; + u16 first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); - assert( &data[pBt->pageSize] == (unsigned char*)pPage ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage) == data ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - memset(&data[hdr], 0, pBt->usableSize - hdr); - data[hdr] = flags; - first = hdr + 8 + 4*((flags&PTF_LEAF)==0); + assert( sqlite3_mutex_held(pBt->mutex) ); + /*memset(&data[hdr], 0, pBt->usableSize - hdr);*/ + data[hdr] = (char)flags; + first = hdr + 8 + 4*((flags&PTF_LEAF)==0 ?1:0); memset(&data[hdr+1], 0, 4); data[hdr+7] = 0; put2byte(&data[hdr+5], pBt->usableSize); @@ -909,11 +1245,27 @@ pPage->hdrOffset = hdr; pPage->cellOffset = first; pPage->nOverflow = 0; - pPage->idxShift = 0; + assert( pBt->pageSize>=512 && pBt->pageSize<=32768 ); + pPage->maskPage = pBt->pageSize - 1; pPage->nCell = 0; pPage->isInit = 1; } + +/* +** Convert a DbPage obtained from the pager into a MemPage used by +** the btree layer. +*/ +static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){ + MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); + pPage->aData = sqlite3PagerGetData(pDbPage); + pPage->pDbPage = pDbPage; + pPage->pBt = pBt; + pPage->pgno = pgno; + pPage->hdrOffset = pPage->pgno==1 ? 100 : 0; + return pPage; +} + /* ** Get a page from the pager. Initialize the MemPage.pBt and ** MemPage.aData elements if needed. @@ -932,22 +1284,44 @@ int noContent /* Do not load page content if true */ ){ int rc; - MemPage *pPage; DbPage *pDbPage; + assert( sqlite3_mutex_held(pBt->mutex) ); rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, noContent); if( rc ) return rc; - pPage = (MemPage *)sqlite3PagerGetExtra(pDbPage); - pPage->aData = sqlite3PagerGetData(pDbPage); - pPage->pDbPage = pDbPage; - pPage->pBt = pBt; - pPage->pgno = pgno; - pPage->hdrOffset = pPage->pgno==1 ? 100 : 0; - *ppPage = pPage; + *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); return SQLITE_OK; } /* +** Retrieve a page from the pager cache. If the requested page is not +** already in the pager cache return NULL. Initialize the MemPage.pBt and +** MemPage.aData elements if needed. +*/ +static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ + DbPage *pDbPage; + assert( sqlite3_mutex_held(pBt->mutex) ); + pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); + if( pDbPage ){ + return btreePageFromDbPage(pDbPage, pgno, pBt); + } + return 0; +} + +/* +** Return the size of the database file in pages. If there is any kind of +** error, return ((unsigned int)-1). +*/ +static Pgno pagerPagecount(BtShared *pBt){ + int nPage = -1; + int rc; + assert( pBt->pPage1 ); + rc = sqlite3PagerPagecount(pBt->pPager, &nPage); + assert( rc==SQLITE_OK || nPage==-1 ); + return (Pgno)nPage; +} + +/* ** Get a page from the pager and initialize it. This routine ** is just a convenience wrapper around separate calls to ** sqlite3BtreeGetPage() and sqlite3BtreeInitPage(). @@ -955,51 +1329,61 @@ static int getAndInitPage( BtShared *pBt, /* The database file */ Pgno pgno, /* Number of the page to get */ - MemPage **ppPage, /* Write the page pointer here */ - MemPage *pParent /* Parent of the page */ + MemPage **ppPage /* Write the page pointer here */ ){ int rc; + MemPage *pPage; + + assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno==0 ){ return SQLITE_CORRUPT_BKPT; } - rc = sqlite3BtreeGetPage(pBt, pgno, ppPage, 0); - if( rc==SQLITE_OK && (*ppPage)->isInit==0 ){ - rc = sqlite3BtreeInitPage(*ppPage, pParent); - } - return rc; -} -/* -** Release a MemPage. This should be called once for each prior + /* It is often the case that the page we want is already in cache. + ** If so, get it directly. This saves us from having to call + ** pagerPagecount() to make sure pgno is within limits, which results + ** in a measureable performance improvements. + */ + *ppPage = pPage = btreePageLookup(pBt, pgno); + if( pPage ){ + /* Page is already in cache */ + rc = SQLITE_OK; + }else{ + /* Page not in cache. Acquire it. */ + if( pgno>pagerPagecount(pBt) ){ + return SQLITE_CORRUPT_BKPT; + } + rc = sqlite3BtreeGetPage(pBt, pgno, ppPage, 0); + if( rc ) return rc; + pPage = *ppPage; + } + if( !pPage->isInit ){ + rc = sqlite3BtreeInitPage(pPage); + } + if( rc!=SQLITE_OK ){ + releasePage(pPage); + *ppPage = 0; + } + return rc; +} + +/* +** Release a MemPage. This should be called once for each prior ** call to sqlite3BtreeGetPage. */ static void releasePage(MemPage *pPage){ if( pPage ){ + assert( pPage->nOverflow==0 || sqlite3PagerPageRefcount(pPage->pDbPage)>1 ); assert( pPage->aData ); assert( pPage->pBt ); - assert( &pPage->aData[pPage->pBt->pageSize]==(unsigned char*)pPage ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); sqlite3PagerUnref(pPage->pDbPage); } } /* -** This routine is called when the reference count for a page -** reaches zero. We need to unref the pParent pointer when that -** happens. -*/ -static void pageDestructor(DbPage *pData, int pageSize){ - MemPage *pPage; - assert( (pageSize & 7)==0 ); - pPage = (MemPage *)sqlite3PagerGetExtra(pData); - if( pPage->pParent ){ - MemPage *pParent = pPage->pParent; - pPage->pParent = 0; - releasePage(pParent); - } - pPage->isInit = 0; -} - -/* ** During a rollback, when the pager reloads information into the cache ** so that the cache is restored to its original state at the start of ** the transaction, for each page restored this routine is called. @@ -1007,37 +1391,64 @@ ** This routine needs to reset the extra data section at the end of the ** page to agree with the restored data. */ -static void pageReinit(DbPage *pData, int pageSize){ +static void pageReinit(DbPage *pData){ MemPage *pPage; - assert( (pageSize & 7)==0 ); pPage = (MemPage *)sqlite3PagerGetExtra(pData); + assert( sqlite3PagerPageRefcount(pData)>0 ); if( pPage->isInit ){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->isInit = 0; - sqlite3BtreeInitPage(pPage, pPage->pParent); + if( sqlite3PagerPageRefcount(pData)>1 ){ + /* pPage might not be a btree page; it might be an overflow page + ** or ptrmap page or a free page. In those cases, the following + ** call to sqlite3BtreeInitPage() will likely return SQLITE_CORRUPT. + ** But no harm is done by this. And it is very important that + ** sqlite3BtreeInitPage() be called on every btree page so we make + ** the call for every page that comes in for re-initing. */ + sqlite3BtreeInitPage(pPage); + } } } /* +** Invoke the busy handler for a btree. +*/ +static int btreeInvokeBusyHandler(void *pArg){ + BtShared *pBt = (BtShared*)pArg; + assert( pBt->db ); + assert( sqlite3_mutex_held(pBt->db->mutex) ); + return sqlite3InvokeBusyHandler(&pBt->db->busyHandler); +} + +/* ** Open a database file. ** ** zFilename is the name of the database file. If zFilename is NULL ** a new database with a random name is created. This randomly named ** database file will be deleted when sqlite3BtreeClose() is called. +** If zFilename is ":memory:" then an in-memory database is created +** that is automatically destroyed when it is closed. +** +** If the database is already opened in the same database connection +** and we are in shared cache mode, then the open will fail with an +** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared +** objects in the same database connection since doing so will lead +** to problems with locking. */ int sqlite3BtreeOpen( const char *zFilename, /* Name of the file containing the BTree database */ - sqlite3 *pSqlite, /* Associated database handle */ + sqlite3 *db, /* Associated database handle */ Btree **ppBtree, /* Pointer to new Btree object written here */ - int flags /* Options */ + int flags, /* Options */ + int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ ){ - BtShared *pBt; /* Shared part of btree structure */ - Btree *p; /* Handle to return */ - int rc = SQLITE_OK; - int nReserve; - unsigned char zDbHeader[100]; -#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - const ThreadData *pTsdro; -#endif + sqlite3_vfs *pVfs; /* The VFS to use for this btree */ + BtShared *pBt = 0; /* Shared part of btree structure */ + Btree *p; /* Handle to return */ + sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */ + int rc = SQLITE_OK; /* Result code from this function */ + u8 nReserve; /* Byte of unused space on each page */ + unsigned char zDbHeader[100]; /* Database header content */ /* Set the variable isMemdb to true for an in-memory database, or ** false for a file-based database. This symbol is only required if @@ -1052,113 +1463,190 @@ #endif #endif - p = sqliteMalloc(sizeof(Btree)); + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + + pVfs = db->pVfs; + p = sqlite3MallocZero(sizeof(Btree)); if( !p ){ return SQLITE_NOMEM; } p->inTrans = TRANS_NONE; - p->pSqlite = pSqlite; + p->db = db; - /* Try to find an existing Btree structure opened on zFilename. */ #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - pTsdro = sqlite3ThreadDataReadOnly(); - if( pTsdro->useSharedData && zFilename && !isMemdb ){ - char *zFullPathname = sqlite3OsFullPathname(zFilename); - if( !zFullPathname ){ - sqliteFree(p); - return SQLITE_NOMEM; - } - for(pBt=pTsdro->pBtree; pBt; pBt=pBt->pNext){ - assert( pBt->nRef>0 ); - if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager)) ){ - p->pBt = pBt; - *ppBtree = p; - pBt->nRef++; - sqliteFree(zFullPathname); - return SQLITE_OK; + /* + ** If this Btree is a candidate for shared cache, try to find an + ** existing BtShared object that we can share with + */ + if( isMemdb==0 && zFilename && zFilename[0] ){ + if( sqlite3GlobalConfig.sharedCacheEnabled ){ + int nFullPathname = pVfs->mxPathname+1; + char *zFullPathname = sqlite3Malloc(nFullPathname); + sqlite3_mutex *mutexShared; + p->sharable = 1; + db->flags |= SQLITE_SharedCache; + if( !zFullPathname ){ + sqlite3_free(p); + return SQLITE_NOMEM; + } + sqlite3OsFullPathname(pVfs, zFilename, nFullPathname, zFullPathname); + mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN); + sqlite3_mutex_enter(mutexOpen); + mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutexShared); + for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ + assert( pBt->nRef>0 ); + if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager)) + && sqlite3PagerVfs(pBt->pPager)==pVfs ){ + int iDb; + for(iDb=db->nDb-1; iDb>=0; iDb--){ + Btree *pExisting = db->aDb[iDb].pBt; + if( pExisting && pExisting->pBt==pBt ){ + sqlite3_mutex_leave(mutexShared); + sqlite3_mutex_leave(mutexOpen); + sqlite3_free(zFullPathname); + sqlite3_free(p); + return SQLITE_CONSTRAINT; + } + } + p->pBt = pBt; + pBt->nRef++; + break; + } } + sqlite3_mutex_leave(mutexShared); + sqlite3_free(zFullPathname); + } +#ifdef SQLITE_DEBUG + else{ + /* In debug mode, we mark all persistent databases as sharable + ** even when they are not. This exercises the locking code and + ** gives more opportunity for asserts(sqlite3_mutex_held()) + ** statements to find locking problems. + */ + p->sharable = 1; } - sqliteFree(zFullPathname); +#endif } #endif - - /* - ** The following asserts make sure that structures used by the btree are - ** the right size. This is to guard against size changes that result - ** when compiling on a different architecture. - */ - assert( sizeof(i64)==8 || sizeof(i64)==4 ); - assert( sizeof(u64)==8 || sizeof(u64)==4 ); - assert( sizeof(u32)==4 ); - assert( sizeof(u16)==2 ); - assert( sizeof(Pgno)==4 ); - - pBt = sqliteMalloc( sizeof(*pBt) ); if( pBt==0 ){ - rc = SQLITE_NOMEM; - goto btree_open_out; - } - rc = sqlite3PagerOpen(&pBt->pPager, zFilename, EXTRA_SIZE, flags); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); - } - if( rc!=SQLITE_OK ){ - goto btree_open_out; - } - p->pBt = pBt; - - sqlite3PagerSetDestructor(pBt->pPager, pageDestructor); - sqlite3PagerSetReiniter(pBt->pPager, pageReinit); - pBt->pCursor = 0; - pBt->pPage1 = 0; - pBt->readOnly = sqlite3PagerIsreadonly(pBt->pPager); - pBt->pageSize = get2byte(&zDbHeader[16]); - if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE - || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ - pBt->pageSize = SQLITE_DEFAULT_PAGE_SIZE; - pBt->maxEmbedFrac = 64; /* 25% */ - pBt->minEmbedFrac = 32; /* 12.5% */ - pBt->minLeafFrac = 32; /* 12.5% */ -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If the magic name ":memory:" will create an in-memory database, then - ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if - ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if - ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a - ** regular file-name. In this case the auto-vacuum applies as per normal. + /* + ** The following asserts make sure that structures used by the btree are + ** the right size. This is to guard against size changes that result + ** when compiling on a different architecture. */ - if( zFilename && !isMemdb ){ - pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); - pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); + assert( sizeof(i64)==8 || sizeof(i64)==4 ); + assert( sizeof(u64)==8 || sizeof(u64)==4 ); + assert( sizeof(u32)==4 ); + assert( sizeof(u16)==2 ); + assert( sizeof(Pgno)==4 ); + + pBt = sqlite3MallocZero( sizeof(*pBt) ); + if( pBt==0 ){ + rc = SQLITE_NOMEM; + goto btree_open_out; + } + rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, + EXTRA_SIZE, flags, vfsFlags); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); } + if( rc!=SQLITE_OK ){ + goto btree_open_out; + } + pBt->db = db; + sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt); + p->pBt = pBt; + + sqlite3PagerSetReiniter(pBt->pPager, pageReinit); + pBt->pCursor = 0; + pBt->pPage1 = 0; + pBt->readOnly = sqlite3PagerIsreadonly(pBt->pPager); + pBt->pageSize = get2byte(&zDbHeader[16]); + if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE + || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ + pBt->pageSize = 0; +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the magic name ":memory:" will create an in-memory database, then + ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if + ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if + ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a + ** regular file-name. In this case the auto-vacuum applies as per normal. + */ + if( zFilename && !isMemdb ){ + pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); + pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); + } #endif - nReserve = 0; - }else{ - nReserve = zDbHeader[20]; - pBt->maxEmbedFrac = zDbHeader[21]; - pBt->minEmbedFrac = zDbHeader[22]; - pBt->minLeafFrac = zDbHeader[23]; - pBt->pageSizeFixed = 1; + nReserve = 0; + }else{ + nReserve = zDbHeader[20]; + pBt->pageSizeFixed = 1; #ifndef SQLITE_OMIT_AUTOVACUUM - pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); - pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); + pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); + pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); +#endif + } + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); + if( rc ) goto btree_open_out; + pBt->usableSize = pBt->pageSize - nReserve; + assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* Add the new BtShared object to the linked list sharable BtShareds. + */ + if( p->sharable ){ + sqlite3_mutex *mutexShared; + pBt->nRef = 1; + mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ + pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); + if( pBt->mutex==0 ){ + rc = SQLITE_NOMEM; + db->mallocFailed = 0; + goto btree_open_out; + } + } + sqlite3_mutex_enter(mutexShared); + pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList); + GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt; + sqlite3_mutex_leave(mutexShared); + } #endif } - pBt->usableSize = pBt->pageSize - nReserve; - assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ - sqlite3PagerSetPagesize(pBt->pPager, pBt->pageSize); #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - /* Add the new btree to the linked list starting at ThreadData.pBtree. - ** There is no chance that a malloc() may fail inside of the - ** sqlite3ThreadData() call, as the ThreadData structure must have already - ** been allocated for pTsdro->useSharedData to be non-zero. + /* If the new Btree uses a sharable pBtShared, then link the new + ** Btree into the list of all sharable Btrees for the same connection. + ** The list is kept in ascending order by pBt address. */ - if( pTsdro->useSharedData && zFilename && !isMemdb ){ - pBt->pNext = pTsdro->pBtree; - sqlite3ThreadData()->pBtree = pBt; + if( p->sharable ){ + int i; + Btree *pSib; + for(i=0; inDb; i++){ + if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){ + while( pSib->pPrev ){ pSib = pSib->pPrev; } + if( p->pBtpBt ){ + p->pNext = pSib; + p->pPrev = 0; + pSib->pPrev = p; + }else{ + while( pSib->pNext && pSib->pNext->pBtpBt ){ + pSib = pSib->pNext; + } + p->pNext = pSib->pNext; + p->pPrev = pSib; + if( p->pNext ){ + p->pNext->pPrev = p; + } + pSib->pNext = p; + } + break; + } + } } #endif - pBt->nRef = 1; *ppBtree = p; btree_open_out: @@ -1166,25 +1654,85 @@ if( pBt && pBt->pPager ){ sqlite3PagerClose(pBt->pPager); } - sqliteFree(pBt); - sqliteFree(p); + sqlite3_free(pBt); + sqlite3_free(p); *ppBtree = 0; } + if( mutexOpen ){ + assert( sqlite3_mutex_held(mutexOpen) ); + sqlite3_mutex_leave(mutexOpen); + } return rc; } /* +** Decrement the BtShared.nRef counter. When it reaches zero, +** remove the BtShared structure from the sharing list. Return +** true if the BtShared.nRef counter reaches zero and return +** false if it is still positive. +*/ +static int removeFromSharingList(BtShared *pBt){ +#ifndef SQLITE_OMIT_SHARED_CACHE + sqlite3_mutex *pMaster; + BtShared *pList; + int removed = 0; + + assert( sqlite3_mutex_notheld(pBt->mutex) ); + pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(pMaster); + pBt->nRef--; + if( pBt->nRef<=0 ){ + if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){ + GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext; + }else{ + pList = GLOBAL(BtShared*,sqlite3SharedCacheList); + while( ALWAYS(pList) && pList->pNext!=pBt ){ + pList=pList->pNext; + } + if( ALWAYS(pList) ){ + pList->pNext = pBt->pNext; + } + } + if( SQLITE_THREADSAFE ){ + sqlite3_mutex_free(pBt->mutex); + } + removed = 1; + } + sqlite3_mutex_leave(pMaster); + return removed; +#else + return 1; +#endif +} + +/* +** Make sure pBt->pTmpSpace points to an allocation of +** MX_CELL_SIZE(pBt) bytes. +*/ +static void allocateTempSpace(BtShared *pBt){ + if( !pBt->pTmpSpace ){ + pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); + } +} + +/* +** Free the pBt->pTmpSpace allocation +*/ +static void freeTempSpace(BtShared *pBt){ + sqlite3PageFree( pBt->pTmpSpace); + pBt->pTmpSpace = 0; +} + +/* ** Close an open database and invalidate all cursors. */ int sqlite3BtreeClose(Btree *p){ BtShared *pBt = p->pBt; BtCursor *pCur; -#ifndef SQLITE_OMIT_SHARED_CACHE - ThreadData *pTsd; -#endif - /* Close all cursors opened via this handle. */ + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); pCur = pBt->pCursor; while( pCur ){ BtCursor *pTmp = pCur; @@ -1199,55 +1747,37 @@ ** this handle. */ sqlite3BtreeRollback(p); - sqliteFree(p); + sqlite3BtreeLeave(p); -#ifndef SQLITE_OMIT_SHARED_CACHE /* If there are still other outstanding references to the shared-btree ** structure, return now. The remainder of this procedure cleans ** up the shared-btree. */ - assert( pBt->nRef>0 ); - pBt->nRef--; - if( pBt->nRef ){ - return SQLITE_OK; + assert( p->wantToLock==0 && p->locked==0 ); + if( !p->sharable || removeFromSharingList(pBt) ){ + /* The pBt is no longer on the sharing list, so we can access + ** it without having to hold the mutex. + ** + ** Clean out and delete the BtShared object. + */ + assert( !pBt->pCursor ); + sqlite3PagerClose(pBt->pPager); + if( pBt->xFreeSchema && pBt->pSchema ){ + pBt->xFreeSchema(pBt->pSchema); + } + sqlite3_free(pBt->pSchema); + freeTempSpace(pBt); + sqlite3_free(pBt); } - /* Remove the shared-btree from the thread wide list. Call - ** ThreadDataReadOnly() and then cast away the const property of the - ** pointer to avoid allocating thread data if it is not really required. - */ - pTsd = (ThreadData *)sqlite3ThreadDataReadOnly(); - if( pTsd->pBtree==pBt ){ - assert( pTsd==sqlite3ThreadData() ); - pTsd->pBtree = pBt->pNext; - }else{ - BtShared *pPrev; - for(pPrev=pTsd->pBtree; pPrev && pPrev->pNext!=pBt; pPrev=pPrev->pNext){} - if( pPrev ){ - assert( pTsd==sqlite3ThreadData() ); - pPrev->pNext = pBt->pNext; - } - } +#ifndef SQLITE_OMIT_SHARED_CACHE + assert( p->wantToLock==0 ); + assert( p->locked==0 ); + if( p->pPrev ) p->pPrev->pNext = p->pNext; + if( p->pNext ) p->pNext->pPrev = p->pPrev; #endif - /* Close the pager and free the shared-btree structure */ - assert( !pBt->pCursor ); - sqlite3PagerClose(pBt->pPager); - if( pBt->xFreeSchema && pBt->pSchema ){ - pBt->xFreeSchema(pBt->pSchema); - } - sqliteFree(pBt->pSchema); - sqliteFree(pBt); - return SQLITE_OK; -} - -/* -** Change the busy handler callback function. -*/ -int sqlite3BtreeSetBusyHandler(Btree *p, BusyHandler *pHandler){ - BtShared *pBt = p->pBt; - pBt->pBusyHandler = pHandler; - sqlite3PagerSetBusyhandler(pBt->pPager, pHandler); + sqlite3_free(p); return SQLITE_OK; } @@ -1268,7 +1798,10 @@ */ int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); sqlite3PagerSetCachesize(pBt->pPager, mxPage); + sqlite3BtreeLeave(p); return SQLITE_OK; } @@ -1283,7 +1816,10 @@ #ifndef SQLITE_OMIT_PAGER_PRAGMAS int sqlite3BtreeSetSafetyLevel(Btree *p, int level, int fullSync){ BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); sqlite3PagerSetSafetyLevel(pBt->pPager, level, fullSync); + sqlite3BtreeLeave(p); return SQLITE_OK; } #endif @@ -1294,13 +1830,20 @@ */ int sqlite3BtreeSyncDisabled(Btree *p){ BtShared *pBt = p->pBt; + int rc; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); assert( pBt && pBt->pPager ); - return sqlite3PagerNosync(pBt->pPager); + rc = sqlite3PagerNosync(pBt->pPager); + sqlite3BtreeLeave(p); + return rc; } #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) /* ** Change the default pages size and the number of reserved bytes per page. +** Or, if the page size has already been fixed, return SQLITE_READONLY +** without changing anything. ** ** The page size must be a power of 2 between 512 and 65536. If the page ** size supplied does not meet this constraint then the page size is not @@ -1313,23 +1856,35 @@ ** ** If parameter nReserve is less than zero, then the number of reserved ** bytes per page is left unchanged. +** +** If the iFix!=0 then the pageSizeFixed flag is set so that the page size +** and autovacuum mode can no longer be changed. */ -int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve){ +int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){ + int rc = SQLITE_OK; BtShared *pBt = p->pBt; + assert( nReserve>=-1 && nReserve<=255 ); + sqlite3BtreeEnter(p); if( pBt->pageSizeFixed ){ + sqlite3BtreeLeave(p); return SQLITE_READONLY; } if( nReserve<0 ){ nReserve = pBt->pageSize - pBt->usableSize; } + assert( nReserve>=0 && nReserve<=255 ); if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && ((pageSize-1)&pageSize)==0 ){ assert( (pageSize & 7)==0 ); assert( !pBt->pPage1 && !pBt->pCursor ); - pBt->pageSize = sqlite3PagerSetPagesize(pBt->pPager, pageSize); + pBt->pageSize = (u16)pageSize; + freeTempSpace(pBt); } - pBt->usableSize = pBt->pageSize - nReserve; - return SQLITE_OK; + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); + pBt->usableSize = pBt->pageSize - (u16)nReserve; + if( iFix ) pBt->pageSizeFixed = 1; + sqlite3BtreeLeave(p); + return rc; } /* @@ -1338,8 +1893,18 @@ int sqlite3BtreeGetPageSize(Btree *p){ return p->pBt->pageSize; } + +/* +** Return the number of bytes of space at the end of every page that +** are intentually left unused. This is the "reserved" space that is +** sometimes used by extensions. +*/ int sqlite3BtreeGetReserve(Btree *p){ - return p->pBt->pageSize - p->pBt->usableSize; + int n; + sqlite3BtreeEnter(p); + n = p->pBt->pageSize - p->pBt->usableSize; + sqlite3BtreeLeave(p); + return n; } /* @@ -1348,7 +1913,11 @@ ** Regardless of the value of mxPage, return the maximum page count. */ int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ - return sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); + int n; + sqlite3BtreeEnter(p); + n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return n; } #endif /* !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) */ @@ -1363,12 +1932,18 @@ return SQLITE_READONLY; #else BtShared *pBt = p->pBt; - int av = (autoVacuum?1:0); - if( pBt->pageSizeFixed && av!=pBt->autoVacuum ){ - return SQLITE_READONLY; + int rc = SQLITE_OK; + u8 av = (u8)autoVacuum; + + sqlite3BtreeEnter(p); + if( pBt->pageSizeFixed && (av ?1:0)!=pBt->autoVacuum ){ + rc = SQLITE_READONLY; + }else{ + pBt->autoVacuum = av ?1:0; + pBt->incrVacuum = av==2 ?1:0; } - pBt->autoVacuum = av; - return SQLITE_OK; + sqlite3BtreeLeave(p); + return rc; #endif } @@ -1380,11 +1955,15 @@ #ifdef SQLITE_OMIT_AUTOVACUUM return BTREE_AUTOVACUUM_NONE; #else - return ( + int rc; + sqlite3BtreeEnter(p); + rc = ( (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: BTREE_AUTOVACUUM_INCR ); + sqlite3BtreeLeave(p); + return rc; #endif } @@ -1399,19 +1978,26 @@ ** is returned if we run out of memory. */ static int lockBtree(BtShared *pBt){ - int rc, pageSize; + int rc; MemPage *pPage1; - if( pBt->pPage1 ) return SQLITE_OK; + int nPage; + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pBt->pPage1==0 ); rc = sqlite3BtreeGetPage(pBt, 1, &pPage1, 0); if( rc!=SQLITE_OK ) return rc; - /* Do some checking to help insure the file we opened really is ** a valid database file. */ - rc = SQLITE_NOTADB; - if( sqlite3PagerPagecount(pBt->pPager)>0 ){ + rc = sqlite3PagerPagecount(pBt->pPager, &nPage); + if( rc!=SQLITE_OK ){ + goto page1_init_failed; + }else if( nPage>0 ){ + int pageSize; + int usableSize; u8 *page1 = pPage1->aData; + rc = SQLITE_NOTADB; if( memcmp(page1, zMagicHeader, 16)!=0 ){ goto page1_init_failed; } @@ -1421,19 +2007,44 @@ if( page1[19]>1 ){ goto page1_init_failed; } + + /* The maximum embedded fraction must be exactly 25%. And the minimum + ** embedded fraction must be 12.5% for both leaf-data and non-leaf-data. + ** The original design allowed these amounts to vary, but as of + ** version 3.6.0, we require them to be fixed. + */ + if( memcmp(&page1[21], "\100\040\040",3)!=0 ){ + goto page1_init_failed; + } pageSize = get2byte(&page1[16]); - if( ((pageSize-1)&pageSize)!=0 || pageSize<512 ){ + if( ((pageSize-1)&pageSize)!=0 || pageSize<512 || + (SQLITE_MAX_PAGE_SIZE<32768 && pageSize>SQLITE_MAX_PAGE_SIZE) + ){ goto page1_init_failed; } assert( (pageSize & 7)==0 ); - pBt->pageSize = pageSize; - pBt->usableSize = pageSize - page1[20]; - if( pBt->usableSize<500 ){ + usableSize = pageSize - page1[20]; + if( pageSize!=pBt->pageSize ){ + /* After reading the first page of the database assuming a page size + ** of BtShared.pageSize, we have discovered that the page-size is + ** actually pageSize. Unlock the database, leave pBt->pPage1 at + ** zero and return SQLITE_OK. The caller will call this function + ** again with the correct page-size. + */ + releasePage(pPage1); + pBt->usableSize = (u16)usableSize; + pBt->pageSize = (u16)pageSize; + freeTempSpace(pBt); + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, + pageSize-usableSize); + if( rc ) goto page1_init_failed; + return SQLITE_OK; + } + if( usableSize<480 ){ goto page1_init_failed; } - pBt->maxEmbedFrac = page1[21]; - pBt->minEmbedFrac = page1[22]; - pBt->minLeafFrac = page1[23]; + pBt->pageSize = (u16)pageSize; + pBt->usableSize = (u16)usableSize; #ifndef SQLITE_OMIT_AUTOVACUUM pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); @@ -1453,13 +2064,10 @@ ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow ** page pointer. */ - pBt->maxLocal = (pBt->usableSize-12)*pBt->maxEmbedFrac/255 - 23; - pBt->minLocal = (pBt->usableSize-12)*pBt->minEmbedFrac/255 - 23; + pBt->maxLocal = (pBt->usableSize-12)*64/255 - 23; + pBt->minLocal = (pBt->usableSize-12)*32/255 - 23; pBt->maxLeaf = pBt->usableSize - 35; - pBt->minLeaf = (pBt->usableSize-12)*pBt->minLeafFrac/255 - 23; - if( pBt->minLocal>pBt->maxLocal || pBt->maxLocal<0 ){ - goto page1_init_failed; - } + pBt->minLeaf = (pBt->usableSize-12)*32/255 - 23; assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); pBt->pPage1 = pPage1; return SQLITE_OK; @@ -1476,6 +2084,8 @@ */ static int lockBtreeWithRetry(Btree *pRef){ int rc = SQLITE_OK; + + assert( sqlite3BtreeHoldsMutex(pRef) ); if( pRef->inTrans==TRANS_NONE ){ u8 inTransaction = pRef->pBt->inTransaction; btreeIntegrity(pRef); @@ -1502,18 +2112,13 @@ ** If there is a transaction in progress, this routine is a no-op. */ static void unlockBtreeIfUnused(BtShared *pBt){ + assert( sqlite3_mutex_held(pBt->mutex) ); if( pBt->inTransaction==TRANS_NONE && pBt->pCursor==0 && pBt->pPage1!=0 ){ - if( sqlite3PagerRefcount(pBt->pPager)>=1 ){ - if( pBt->pPage1->aData==0 ){ - MemPage *pPage = pBt->pPage1; - pPage->aData = &((u8*)pPage)[-pBt->pageSize]; - pPage->pBt = pBt; - pPage->pgno = 1; - } - releasePage(pBt->pPage1); - } + assert( pBt->pPage1->aData ); + assert( sqlite3PagerRefcount(pBt->pPager)==1 ); + assert( pBt->pPage1->aData ); + releasePage(pBt->pPage1); pBt->pPage1 = 0; - pBt->inStmt = 0; } } @@ -1525,7 +2130,13 @@ MemPage *pP1; unsigned char *data; int rc; - if( sqlite3PagerPagecount(pBt->pPager)>0 ) return SQLITE_OK; + int nPage; + + assert( sqlite3_mutex_held(pBt->mutex) ); + rc = sqlite3PagerPagecount(pBt->pPager, &nPage); + if( rc!=SQLITE_OK || nPage>0 ){ + return rc; + } pP1 = pBt->pPage1; assert( pP1!=0 ); data = pP1->aData; @@ -1536,10 +2147,11 @@ put2byte(&data[16], pBt->pageSize); data[18] = 1; data[19] = 1; - data[20] = pBt->pageSize - pBt->usableSize; - data[21] = pBt->maxEmbedFrac; - data[22] = pBt->minEmbedFrac; - data[23] = pBt->minLeafFrac; + assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize); + data[20] = (u8)(pBt->pageSize - pBt->usableSize); + data[21] = 64; + data[22] = 32; + data[23] = 32; memset(&data[24], 0, 100-24); zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); pBt->pageSizeFixed = 1; @@ -1588,9 +2200,11 @@ ** proceed. */ int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ + sqlite3 *pBlock = 0; BtShared *pBt = p->pBt; int rc = SQLITE_OK; + sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the btree is already in a write-transaction, or it @@ -1598,45 +2212,64 @@ ** is requested, this is a no-op. */ if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ - return SQLITE_OK; + goto trans_begun; } /* Write transactions are not possible on a read-only database */ if( pBt->readOnly && wrflag ){ - return SQLITE_READONLY; + rc = SQLITE_READONLY; + goto trans_begun; } +#ifndef SQLITE_OMIT_SHARED_CACHE /* If another database handle has already opened a write transaction ** on this shared-btree structure and a second write transaction is - ** requested, return SQLITE_BUSY. + ** requested, return SQLITE_LOCKED. */ - if( pBt->inTransaction==TRANS_WRITE && wrflag ){ - return SQLITE_BUSY; + if( (wrflag && pBt->inTransaction==TRANS_WRITE) || pBt->isPending ){ + pBlock = pBt->pWriter->db; + }else if( wrflag>1 ){ + BtLock *pIter; + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + if( pIter->pBtree!=p ){ + pBlock = pIter->pBtree->db; + break; + } + } + } + if( pBlock ){ + sqlite3ConnectionBlocked(p->db, pBlock); + rc = SQLITE_LOCKED_SHAREDCACHE; + goto trans_begun; } +#endif do { - if( pBt->pPage1==0 ){ - rc = lockBtree(pBt); - } + /* Call lockBtree() until either pBt->pPage1 is populated or + ** lockBtree() returns something other than SQLITE_OK. lockBtree() + ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after + ** reading page 1 it discovers that the page-size of the database + ** file is not pBt->pageSize. In this case lockBtree() will update + ** pBt->pageSize to the page-size of the file on disk. + */ + while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); if( rc==SQLITE_OK && wrflag ){ if( pBt->readOnly ){ rc = SQLITE_READONLY; }else{ - rc = sqlite3PagerBegin(pBt->pPage1->pDbPage, wrflag>1); + rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db)); if( rc==SQLITE_OK ){ rc = newDatabase(pBt); } } } - if( rc==SQLITE_OK ){ - if( wrflag ) pBt->inStmt = 0; - }else{ + if( rc!=SQLITE_OK ){ unlockBtreeIfUnused(pBt); } }while( rc==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && - sqlite3InvokeBusyHandler(pBt->pBusyHandler) ); + btreeInvokeBusyHandler(pBt) ); if( rc==SQLITE_OK ){ if( p->inTrans==TRANS_NONE ){ @@ -1646,9 +2279,27 @@ if( p->inTrans>pBt->inTransaction ){ pBt->inTransaction = p->inTrans; } +#ifndef SQLITE_OMIT_SHARED_CACHE + if( wrflag ){ + assert( !pBt->pWriter ); + pBt->pWriter = p; + pBt->isExclusive = (u8)(wrflag>1); + } +#endif + } + + +trans_begun: + if( rc==SQLITE_OK && wrflag ){ + /* This call makes sure that the pager has the correct number of + ** open savepoints. If the second parameter is greater than 0 and + ** the sub-journal is not already open, then it will be opened here. + */ + rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); } btreeIntegrity(p); + sqlite3BtreeLeave(p); return rc; } @@ -1664,10 +2315,11 @@ int nCell; /* Number of cells in page pPage */ int rc; /* Return code */ BtShared *pBt = pPage->pBt; - int isInitOrig = pPage->isInit; + u8 isInitOrig = pPage->isInit; Pgno pgno = pPage->pgno; - rc = sqlite3BtreeInitPage(pPage, pPage->pParent); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + rc = sqlite3BtreeInitPage(pPage); if( rc!=SQLITE_OK ){ goto set_child_ptrmaps_out; } @@ -1699,7 +2351,7 @@ } /* -** Somewhere on pPage, which is guarenteed to be a btree page, not an overflow +** Somewhere on pPage, which is guaranteed to be a btree page, not an overflow ** page, is a pointer to page iFrom. Modify this pointer so that it points to ** iTo. Parameter eType describes the type of pointer to be modified, as ** follows: @@ -1714,6 +2366,8 @@ ** overflow page in the list. */ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); if( eType==PTRMAP_OVERFLOW2 ){ /* The pointer is always the first 4 bytes of the page in this case. */ if( get4byte(pPage->aData)!=iFrom ){ @@ -1721,11 +2375,11 @@ } put4byte(pPage->aData, iTo); }else{ - int isInitOrig = pPage->isInit; + u8 isInitOrig = pPage->isInit; int i; int nCell; - sqlite3BtreeInitPage(pPage, 0); + sqlite3BtreeInitPage(pPage); nCell = pPage->nCell; for(i=0; ipgno; @@ -1779,11 +2434,13 @@ assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pDbPage->pBt==pBt ); - /* Move page iDbPage from it's current location to page number iFreePage */ + /* Move page iDbPage from its current location to page number iFreePage */ TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", iDbPage, iFreePage, iPtrPage, eType)); - rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage); + rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){ return rc; } @@ -1853,14 +2510,11 @@ ** number of pages the database file will contain after this ** process is complete. */ -static int incrVacuumStep(BtShared *pBt, Pgno nFin){ - Pgno iLastPg; /* Last page in the database */ +static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg){ Pgno nFreeList; /* Number of pages still on the free-list */ - iLastPg = pBt->nTrunc; - if( iLastPg==0 ){ - iLastPg = sqlite3PagerPagecount(pBt->pPager); - } + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( iLastPg>nFin ); if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ int rc; @@ -1868,7 +2522,7 @@ Pgno iPtrPage; nFreeList = get4byte(&pBt->pPage1->aData[36]); - if( nFreeList==0 || nFin==iLastPg ){ + if( nFreeList==0 ){ return SQLITE_DONE; } @@ -1924,20 +2578,34 @@ assert( iFreePgpDbPage); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg); + if( rc==SQLITE_OK ){ + rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, nFin!=0); + } releasePage(pLastPg); if( rc!=SQLITE_OK ){ return rc; - } + } } } - pBt->nTrunc = iLastPg - 1; - while( pBt->nTrunc==PENDING_BYTE_PAGE(pBt)||PTRMAP_ISPAGE(pBt, pBt->nTrunc) ){ - pBt->nTrunc--; + if( nFin==0 ){ + iLastPg--; + while( iLastPg==PENDING_BYTE_PAGE(pBt)||PTRMAP_ISPAGE(pBt, iLastPg) ){ + if( PTRMAP_ISPAGE(pBt, iLastPg) ){ + MemPage *pPg; + int rc = sqlite3BtreeGetPage(pBt, iLastPg, &pPg, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3PagerWrite(pPg->pDbPage); + releasePage(pPg); + if( rc!=SQLITE_OK ){ + return rc; + } + } + iLastPg--; + } + sqlite3PagerTruncateImage(pBt->pPager, iLastPg); } return SQLITE_OK; } @@ -1947,17 +2615,23 @@ ** It performs a single unit of work towards an incremental vacuum. ** ** If the incremental vacuum is finished after this function has run, -** SQLITE_DONE is returned. If it is not finished, but no error occured, +** SQLITE_DONE is returned. If it is not finished, but no error occurred, ** SQLITE_OK is returned. Otherwise an SQLite error code. */ int sqlite3BtreeIncrVacuum(Btree *p){ + int rc; BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); if( !pBt->autoVacuum ){ - return SQLITE_DONE; + rc = SQLITE_DONE; + }else{ + invalidateAllOverflowCache(pBt); + rc = incrVacuumStep(pBt, 0, pagerPagecount(pBt)); } - invalidateAllOverflowCache(pBt); - return incrVacuumStep(pBt, 0); + sqlite3BtreeLeave(p); + return rc; } /* @@ -1969,67 +2643,62 @@ ** i.e. the database has been reorganized so that only the first *pnTrunc ** pages are in use. */ -static int autoVacuumCommit(BtShared *pBt, Pgno *pnTrunc){ +static int autoVacuumCommit(BtShared *pBt){ int rc = SQLITE_OK; Pager *pPager = pBt->pPager; -#ifndef NDEBUG - int nRef = sqlite3PagerRefcount(pPager); -#endif + VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager) ); + assert( sqlite3_mutex_held(pBt->mutex) ); invalidateAllOverflowCache(pBt); assert(pBt->autoVacuum); if( !pBt->incrVacuum ){ - Pgno nFin = 0; - - if( pBt->nTrunc==0 ){ - Pgno nFree; - Pgno nPtrmap; - const int pgsz = pBt->pageSize; - Pgno nOrig = sqlite3PagerPagecount(pBt->pPager); + Pgno nFin; + Pgno nFree; + Pgno nPtrmap; + Pgno iFree; + const int pgsz = pBt->pageSize; + Pgno nOrig = pagerPagecount(pBt); + + if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ + /* It is not possible to create a database for which the final page + ** is either a pointer-map page or the pending-byte page. If one + ** is encountered, this indicates corruption. + */ + return SQLITE_CORRUPT_BKPT; + } - if( PTRMAP_ISPAGE(pBt, nOrig) ){ - return SQLITE_CORRUPT_BKPT; - } - if( nOrig==PENDING_BYTE_PAGE(pBt) ){ - nOrig--; - } - nFree = get4byte(&pBt->pPage1->aData[36]); - nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+pgsz/5)/(pgsz/5); - nFin = nOrig - nFree - nPtrmap; - if( nOrig>PENDING_BYTE_PAGE(pBt) && nFin<=PENDING_BYTE_PAGE(pBt) ){ - nFin--; - } - while( PTRMAP_ISPAGE(pBt, nFin) || nFin==PENDING_BYTE_PAGE(pBt) ){ - nFin--; - } + nFree = get4byte(&pBt->pPage1->aData[36]); + nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+pgsz/5)/(pgsz/5); + nFin = nOrig - nFree - nPtrmap; + if( nOrig>PENDING_BYTE_PAGE(pBt) && nFinnOrig ) return SQLITE_CORRUPT_BKPT; - while( rc==SQLITE_OK ){ - rc = incrVacuumStep(pBt, nFin); + for(iFree=nOrig; iFree>nFin && rc==SQLITE_OK; iFree--){ + rc = incrVacuumStep(pBt, nFin, iFree); } - if( rc==SQLITE_DONE ){ - assert(nFin==0 || pBt->nTrunc==0 || nFin<=pBt->nTrunc); + if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ rc = SQLITE_OK; - if( pBt->nTrunc ){ - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - put4byte(&pBt->pPage1->aData[32], 0); - put4byte(&pBt->pPage1->aData[36], 0); - pBt->nTrunc = nFin; - } + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + put4byte(&pBt->pPage1->aData[32], 0); + put4byte(&pBt->pPage1->aData[36], 0); + sqlite3PagerTruncateImage(pBt->pPager, nFin); } if( rc!=SQLITE_OK ){ sqlite3PagerRollback(pPager); } } - if( rc==SQLITE_OK ){ - *pnTrunc = pBt->nTrunc; - pBt->nTrunc = 0; - } assert( nRef==sqlite3PagerRefcount(pPager) ); return rc; } +#else /* ifndef SQLITE_OMIT_AUTOVACUUM */ +# define setChildPtrmaps(x) SQLITE_OK #endif /* @@ -2042,7 +2711,7 @@ ** database are written into the database file and flushed to oxide. ** At the end of this call, the rollback journal still exists on the ** disk and we are still holding all locks, so the transaction has not -** committed. See sqlite3BtreeCommit() for the second phase of the +** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the ** commit process. ** ** This call is a no-op if no write-transaction is currently active on pBt. @@ -2062,16 +2731,18 @@ int rc = SQLITE_OK; if( p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; - Pgno nTrunc = 0; + sqlite3BtreeEnter(p); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - rc = autoVacuumCommit(pBt, &nTrunc); + rc = autoVacuumCommit(pBt); if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); return rc; } } #endif - rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, nTrunc); + rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0); + sqlite3BtreeLeave(p); } return rc; } @@ -2080,12 +2751,13 @@ ** Commit the transaction currently in progress. ** ** This routine implements the second phase of a 2-phase commit. The -** sqlite3BtreeSync() routine does the first phase and should be invoked -** prior to calling this routine. The sqlite3BtreeSync() routine did -** all the work of writing information out to disk and flushing the +** sqlite3BtreeCommitPhaseOne() routine does the first phase and should +** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne() +** routine did all the work of writing information out to disk and flushing the ** contents so that they are written onto the disk platter. All this -** routine has to do is delete or truncate the rollback journal -** (which causes the transaction to commit) and drop locks. +** routine has to do is delete or truncate or zero the header in the +** the rollback journal (which causes the transaction to commit) and +** drop locks. ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. @@ -2093,6 +2765,7 @@ int sqlite3BtreeCommitPhaseTwo(Btree *p){ BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the handle has a write-transaction open, commit the shared-btrees @@ -2104,12 +2777,11 @@ assert( pBt->nTransaction>0 ); rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); return rc; } pBt->inTransaction = TRANS_READ; - pBt->inStmt = 0; } - unlockAllTables(p); /* If the handle has any kind of transaction open, decrement the transaction ** count of the shared btree. If the transaction count reaches 0, set @@ -2117,19 +2789,22 @@ ** will unlock the pager. */ if( p->inTrans!=TRANS_NONE ){ + clearAllSharedCacheTableLocks(p); pBt->nTransaction--; if( 0==pBt->nTransaction ){ pBt->inTransaction = TRANS_NONE; } } - /* Set the handles current transaction state to TRANS_NONE and unlock + /* Set the current transaction state to TRANS_NONE and unlock ** the pager if this call closed the only read or write transaction. */ + btreeClearHasContent(pBt); p->inTrans = TRANS_NONE; unlockBtreeIfUnused(pBt); btreeIntegrity(p); + sqlite3BtreeLeave(p); return SQLITE_OK; } @@ -2138,10 +2813,12 @@ */ int sqlite3BtreeCommit(Btree *p){ int rc; + sqlite3BtreeEnter(p); rc = sqlite3BtreeCommitPhaseOne(p, 0); if( rc==SQLITE_OK ){ rc = sqlite3BtreeCommitPhaseTwo(p); } + sqlite3BtreeLeave(p); return rc; } @@ -2150,18 +2827,56 @@ ** Return the number of write-cursors open on this handle. This is for use ** in assert() expressions, so it is only compiled if NDEBUG is not ** defined. +** +** For the purposes of this routine, a write-cursor is any cursor that +** is capable of writing to the databse. That means the cursor was +** originally opened for writing and the cursor has not be disabled +** by having its state changed to CURSOR_FAULT. */ static int countWriteCursors(BtShared *pBt){ BtCursor *pCur; int r = 0; for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ - if( pCur->wrFlag ) r++; + if( pCur->wrFlag && pCur->eState!=CURSOR_FAULT ) r++; } return r; } #endif /* +** This routine sets the state to CURSOR_FAULT and the error +** code to errCode for every cursor on BtShared that pBtree +** references. +** +** Every cursor is tripped, including cursors that belong +** to other database connections that happen to be sharing +** the cache with pBtree. +** +** This routine gets called when a rollback occurs. +** All cursors using the same cache must be tripped +** to prevent them from trying to use the btree after +** the rollback. The rollback may have deleted tables +** or moved root pages, so it is not sufficient to +** save the state of the cursor. The cursor must be +** invalidated. +*/ +void sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode){ + BtCursor *p; + sqlite3BtreeEnter(pBtree); + for(p=pBtree->pBt->pCursor; p; p=p->pNext){ + int i; + sqlite3BtreeClearCursor(p); + p->eState = CURSOR_FAULT; + p->skip = errCode; + for(i=0; i<=p->iPage; i++){ + releasePage(p->apPage[i]); + p->apPage[i] = 0; + } + } + sqlite3BtreeLeave(pBtree); +} + +/* ** Rollback the transaction in progress. All cursors will be ** invalided by this operation. Any attempt to use a cursor ** that was open at the beginning of this operation will result @@ -2175,34 +2890,25 @@ BtShared *pBt = p->pBt; MemPage *pPage1; + sqlite3BtreeEnter(p); rc = saveAllCursors(pBt, 0, 0); #ifndef SQLITE_OMIT_SHARED_CACHE if( rc!=SQLITE_OK ){ - /* This is a horrible situation. An IO or malloc() error occured whilst + /* This is a horrible situation. An IO or malloc() error occurred whilst ** trying to save cursor positions. If this is an automatic rollback (as ** the result of a constraint, malloc() failure or IO error) then ** the cache may be internally inconsistent (not contain valid trees) so ** we cannot simply return the error to the caller. Instead, abort ** all queries that may be using any of the cursors that failed to save. */ - while( pBt->pCursor ){ - sqlite3 *db = pBt->pCursor->pBtree->pSqlite; - if( db ){ - sqlite3AbortOtherActiveVdbes(db, 0); - } - } + sqlite3BtreeTripAllCursors(p, rc); } #endif btreeIntegrity(p); - unlockAllTables(p); if( p->inTrans==TRANS_WRITE ){ int rc2; -#ifndef SQLITE_OMIT_AUTOVACUUM - pBt->nTrunc = 0; -#endif - assert( TRANS_WRITE==pBt->inTransaction ); rc2 = sqlite3PagerRollback(pBt->pPager); if( rc2!=SQLITE_OK ){ @@ -2220,6 +2926,7 @@ } if( p->inTrans!=TRANS_NONE ){ + clearAllSharedCacheTableLocks(p); assert( pBt->nTransaction>0 ); pBt->nTransaction--; if( 0==pBt->nTransaction ){ @@ -2227,97 +2934,85 @@ } } + btreeClearHasContent(pBt); p->inTrans = TRANS_NONE; - pBt->inStmt = 0; unlockBtreeIfUnused(pBt); btreeIntegrity(p); + sqlite3BtreeLeave(p); return rc; } /* -** Start a statement subtransaction. The subtransaction can -** can be rolled back independently of the main transaction. -** You must start a transaction before starting a subtransaction. -** The subtransaction is ended automatically if the main transaction -** commits or rolls back. -** -** Only one subtransaction may be active at a time. It is an error to try -** to start a new subtransaction if another subtransaction is already active. +** Start a statement subtransaction. The subtransaction can can be rolled +** back independently of the main transaction. You must start a transaction +** before starting a subtransaction. The subtransaction is ended automatically +** if the main transaction commits or rolls back. ** ** Statement subtransactions are used around individual SQL statements ** that are contained within a BEGIN...COMMIT block. If a constraint ** error occurs within the statement, the effect of that one statement ** can be rolled back without having to rollback the entire transaction. +** +** A statement sub-transaction is implemented as an anonymous savepoint. The +** value passed as the second parameter is the total number of savepoints, +** including the new anonymous savepoint, open on the B-Tree. i.e. if there +** are no active savepoints and no other statement-transactions open, +** iStatement is 1. This anonymous savepoint can be released or rolled back +** using the sqlite3BtreeSavepoint() function. */ -int sqlite3BtreeBeginStmt(Btree *p){ - int rc; - BtShared *pBt = p->pBt; - if( (p->inTrans!=TRANS_WRITE) || pBt->inStmt ){ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } - assert( pBt->inTransaction==TRANS_WRITE ); - rc = pBt->readOnly ? SQLITE_OK : sqlite3PagerStmtBegin(pBt->pPager); - pBt->inStmt = 1; - return rc; -} - - -/* -** Commit the statment subtransaction currently in progress. If no -** subtransaction is active, this is a no-op. -*/ -int sqlite3BtreeCommitStmt(Btree *p){ +int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ int rc; BtShared *pBt = p->pBt; - if( pBt->inStmt && !pBt->readOnly ){ - rc = sqlite3PagerStmtCommit(pBt->pPager); + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); + assert( pBt->readOnly==0 ); + assert( iStatement>0 ); + assert( iStatement>p->db->nSavepoint ); + if( NEVER(p->inTrans!=TRANS_WRITE || pBt->readOnly) ){ + rc = SQLITE_INTERNAL; }else{ - rc = SQLITE_OK; + assert( pBt->inTransaction==TRANS_WRITE ); + /* At the pager level, a statement transaction is a savepoint with + ** an index greater than all savepoints created explicitly using + ** SQL statements. It is illegal to open, release or rollback any + ** such savepoints while the statement transaction savepoint is active. + */ + rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); } - pBt->inStmt = 0; + sqlite3BtreeLeave(p); return rc; } /* -** Rollback the active statement subtransaction. If no subtransaction -** is active this routine is a no-op. +** The second argument to this function, op, is always SAVEPOINT_ROLLBACK +** or SAVEPOINT_RELEASE. This function either releases or rolls back the +** savepoint identified by parameter iSavepoint, depending on the value +** of op. ** -** All cursors will be invalidated by this operation. Any attempt -** to use a cursor that was open at the beginning of this operation -** will result in an error. +** Normally, iSavepoint is greater than or equal to zero. However, if op is +** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the +** contents of the entire transaction are rolled back. This is different +** from a normal transaction rollback, as no locks are released and the +** transaction remains open. */ -int sqlite3BtreeRollbackStmt(Btree *p){ +int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ int rc = SQLITE_OK; - BtShared *pBt = p->pBt; - sqlite3MallocDisallow(); - if( pBt->inStmt && !pBt->readOnly ){ - rc = sqlite3PagerStmtRollback(pBt->pPager); - assert( countWriteCursors(pBt)==0 ); - pBt->inStmt = 0; + if( p && p->inTrans==TRANS_WRITE ){ + BtShared *pBt = p->pBt; + assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); + assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); + sqlite3BtreeEnter(p); + rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); + if( rc==SQLITE_OK ){ + rc = newDatabase(pBt); + } + sqlite3BtreeLeave(p); } - sqlite3MallocAllow(); return rc; } /* -** Default key comparison function to be used if no comparison function -** is specified on the sqlite3BtreeCursor() call. -*/ -static int dfltCompare( - void *NotUsed, /* User data is not used */ - int n1, const void *p1, /* First key to compare */ - int n2, const void *p2 /* Second key to compare */ -){ - int c; - c = memcmp(p1, p2, n1pBt; - *ppCur = 0; + assert( sqlite3BtreeHoldsMutex(p) ); + assert( wrFlag==0 || wrFlag==1 ); if( wrFlag ){ - if( pBt->readOnly ){ + assert( !pBt->readOnly ); + if( NEVER(pBt->readOnly) ){ return SQLITE_READONLY; } - if( checkReadLocks(p, iTable, 0) ){ - return SQLITE_LOCKED; + rc = checkForReadConflicts(p, iTable, 0, 0); + if( rc!=SQLITE_OK ){ + assert( rc==SQLITE_LOCKED_SHAREDCACHE ); + return rc; } } @@ -2377,21 +3072,17 @@ if( rc!=SQLITE_OK ){ return rc; } - if( pBt->readOnly && wrFlag ){ - return SQLITE_READONLY; - } - } - pCur = sqliteMalloc( sizeof(*pCur) ); - if( pCur==0 ){ - rc = SQLITE_NOMEM; - goto create_cursor_exception; } pCur->pgnoRoot = (Pgno)iTable; - if( iTable==1 && sqlite3PagerPagecount(pBt->pPager)==0 ){ + rc = sqlite3PagerPagecount(pBt->pPager, (int *)&nPage); + if( rc!=SQLITE_OK ){ + return rc; + } + if( iTable==1 && nPage==0 ){ rc = SQLITE_EMPTY; goto create_cursor_exception; } - rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->pPage, 0); + rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->apPage[0]); if( rc!=SQLITE_OK ){ goto create_cursor_exception; } @@ -2400,72 +3091,143 @@ ** variables, link the cursor into the BtShared list and set *ppCur (the ** output argument to this function). */ - pCur->xCompare = xCmp ? xCmp : dfltCompare; - pCur->pArg = pArg; + pCur->pKeyInfo = pKeyInfo; pCur->pBtree = p; - pCur->wrFlag = wrFlag; + pCur->pBt = pBt; + pCur->wrFlag = (u8)wrFlag; pCur->pNext = pBt->pCursor; if( pCur->pNext ){ pCur->pNext->pPrev = pCur; } pBt->pCursor = pCur; pCur->eState = CURSOR_INVALID; - *ppCur = pCur; + pCur->cachedRowid = 0; return SQLITE_OK; + create_cursor_exception: - if( pCur ){ - releasePage(pCur->pPage); - sqliteFree(pCur); - } + releasePage(pCur->apPage[0]); unlockBtreeIfUnused(pBt); return rc; } +int sqlite3BtreeCursor( + Btree *p, /* The btree */ + int iTable, /* Root page of table to open */ + int wrFlag, /* 1 to write. 0 read-only */ + struct KeyInfo *pKeyInfo, /* First arg to xCompare() */ + BtCursor *pCur /* Write new cursor here */ +){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Return the size of a BtCursor object in bytes. +** +** This interfaces is needed so that users of cursors can preallocate +** sufficient storage to hold a cursor. The BtCursor object is opaque +** to users so they cannot do the sizeof() themselves - they must call +** this routine. +*/ +int sqlite3BtreeCursorSize(void){ + return sizeof(BtCursor); +} + +/* +** Set the cached rowid value of every cursor in the same database file +** as pCur and having the same root page number as pCur. The value is +** set to iRowid. +** +** Only positive rowid values are considered valid for this cache. +** The cache is initialized to zero, indicating an invalid cache. +** A btree will work fine with zero or negative rowids. We just cannot +** cache zero or negative rowids, which means tables that use zero or +** negative rowids might run a little slower. But in practice, zero +** or negative rowids are very uncommon so this should not be a problem. +*/ +void sqlite3BtreeSetCachedRowid(BtCursor *pCur, sqlite3_int64 iRowid){ + BtCursor *p; + for(p=pCur->pBt->pCursor; p; p=p->pNext){ + if( p->pgnoRoot==pCur->pgnoRoot ) p->cachedRowid = iRowid; + } + assert( pCur->cachedRowid==iRowid ); +} + +/* +** Return the cached rowid for the given cursor. A negative or zero +** return value indicates that the rowid cache is invalid and should be +** ignored. If the rowid cache has never before been set, then a +** zero is returned. +*/ +sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor *pCur){ + return pCur->cachedRowid; +} /* ** Close a cursor. The read lock on the database file is released ** when the last cursor is closed. */ int sqlite3BtreeCloseCursor(BtCursor *pCur){ - BtShared *pBt = pCur->pBtree->pBt; - clearCursorPosition(pCur); - if( pCur->pPrev ){ - pCur->pPrev->pNext = pCur->pNext; - }else{ - pBt->pCursor = pCur->pNext; - } - if( pCur->pNext ){ - pCur->pNext->pPrev = pCur->pPrev; + Btree *pBtree = pCur->pBtree; + if( pBtree ){ + int i; + BtShared *pBt = pCur->pBt; + sqlite3BtreeEnter(pBtree); + sqlite3BtreeClearCursor(pCur); + if( pCur->pPrev ){ + pCur->pPrev->pNext = pCur->pNext; + }else{ + pBt->pCursor = pCur->pNext; + } + if( pCur->pNext ){ + pCur->pNext->pPrev = pCur->pPrev; + } + for(i=0; i<=pCur->iPage; i++){ + releasePage(pCur->apPage[i]); + } + unlockBtreeIfUnused(pBt); + invalidateOverflowCache(pCur); + /* sqlite3_free(pCur); */ + sqlite3BtreeLeave(pBtree); } - releasePage(pCur->pPage); - unlockBtreeIfUnused(pBt); - invalidateOverflowCache(pCur); - sqliteFree(pCur); return SQLITE_OK; } +#ifdef SQLITE_TEST /* ** Make a temporary cursor by filling in the fields of pTempCur. ** The temporary cursor is not on the cursor list for the Btree. */ void sqlite3BtreeGetTempCursor(BtCursor *pCur, BtCursor *pTempCur){ - memcpy(pTempCur, pCur, sizeof(*pCur)); + int i; + assert( cursorHoldsMutex(pCur) ); + memcpy(pTempCur, pCur, sizeof(BtCursor)); pTempCur->pNext = 0; pTempCur->pPrev = 0; - if( pTempCur->pPage ){ - sqlite3PagerRef(pTempCur->pPage->pDbPage); + for(i=0; i<=pTempCur->iPage; i++){ + sqlite3PagerRef(pTempCur->apPage[i]->pDbPage); } + assert( pTempCur->pKey==0 ); } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** Delete a temporary cursor such as was made by the CreateTemporaryCursor() ** function above. */ void sqlite3BtreeReleaseTempCursor(BtCursor *pCur){ - if( pCur->pPage ){ - sqlite3PagerUnref(pCur->pPage->pDbPage); + int i; + assert( cursorHoldsMutex(pCur) ); + for(i=0; i<=pCur->iPage; i++){ + sqlite3PagerUnref(pCur->apPage[i]->pDbPage); } + sqlite3_free(pCur->pKey); } +#endif /* SQLITE_TEST */ /* ** Make sure the BtCursor* given in the argument has a valid @@ -2485,8 +3247,9 @@ #ifndef NDEBUG static void assertCellInfo(BtCursor *pCur){ CellInfo info; + int iPage = pCur->iPage; memset(&info, 0, sizeof(info)); - sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &info); + sqlite3BtreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info); assert( memcmp(&info, &pCur->info, sizeof(info))==0 ); } #else @@ -2496,18 +3259,22 @@ /* Use a real function in MSVC to work around bugs in that compiler. */ static void getCellInfo(BtCursor *pCur){ if( pCur->info.nSize==0 ){ - sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &pCur->info); + int iPage = pCur->iPage; + sqlite3BtreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); + pCur->validNKey = 1; }else{ assertCellInfo(pCur); } } #else /* if not _MSC_VER */ /* Use a macro in all other compilers so that the function is inlined */ -#define getCellInfo(pCur) \ - if( pCur->info.nSize==0 ){ \ - sqlite3BtreeParseCell(pCur->pPage, pCur->idx, &pCur->info); \ - }else{ \ - assertCellInfo(pCur); \ +#define getCellInfo(pCur) \ + if( pCur->info.nSize==0 ){ \ + int iPage = pCur->iPage; \ + sqlite3BtreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); \ + pCur->validNKey = 1; \ + }else{ \ + assertCellInfo(pCur); \ } #endif /* _MSC_VER */ @@ -2520,7 +3287,10 @@ ** itself, not the number of bytes in the key. */ int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){ - int rc = restoreOrClearCursorPosition(pCur); + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID ); if( pCur->eState==CURSOR_INVALID ){ @@ -2541,7 +3311,10 @@ ** the database is empty) then *pSize is set to 0. */ int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){ - int rc = restoreOrClearCursorPosition(pCur); + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID ); if( pCur->eState==CURSOR_INVALID ){ @@ -2563,33 +3336,29 @@ ** ** If an error occurs an SQLite error code is returned. Otherwise: ** -** Unless pPgnoNext is NULL, the page number of the next overflow -** page in the linked list is written to *pPgnoNext. If page ovfl -** is the last page in it's linked list, *pPgnoNext is set to zero. -** -** If ppPage is not NULL, *ppPage is set to the MemPage* handle -** for page ovfl. The underlying pager page may have been requested -** with the noContent flag set, so the page data accessable via -** this handle may not be trusted. +** The page number of the next overflow page in the linked list is +** written to *pPgnoNext. If page ovfl is the last page in its linked +** list, *pPgnoNext is set to zero. +** +** If ppPage is not NULL, and a reference to the MemPage object corresponding +** to page number pOvfl was obtained, then *ppPage is set to point to that +** reference. It is the responsibility of the caller to call releasePage() +** on *ppPage to free the reference. In no reference was obtained (because +** the pointer-map was used to obtain the value for *pPgnoNext), then +** *ppPage is set to zero. */ static int getOverflowPage( BtShared *pBt, Pgno ovfl, /* Overflow page */ - MemPage **ppPage, /* OUT: MemPage handle */ + MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */ Pgno *pPgnoNext /* OUT: Next overflow page number */ ){ Pgno next = 0; - int rc; - - /* One of these must not be NULL. Otherwise, why call this function? */ - assert(ppPage || pPgnoNext); + MemPage *pPage = 0; + int rc = SQLITE_OK; - /* If pPgnoNext is NULL, then this function is being called to obtain - ** a MemPage* reference only. No page-data is required in this case. - */ - if( !pPgnoNext ){ - return sqlite3BtreeGetPage(pBt, ovfl, ppPage, 1); - } + assert( sqlite3_mutex_held(pBt->mutex) ); + assert(pPgnoNext); #ifndef SQLITE_OMIT_AUTOVACUUM /* Try to find the next page in the overflow list using the @@ -2607,36 +3376,31 @@ iGuess++; } - if( iGuess<=sqlite3PagerPagecount(pBt->pPager) ){ + if( iGuess<=pagerPagecount(pBt) ){ rc = ptrmapGet(pBt, iGuess, &eType, &pgno); - if( rc!=SQLITE_OK ){ - return rc; - } - if( eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ + if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ next = iGuess; + rc = SQLITE_DONE; } } } #endif - if( next==0 || ppPage ){ - MemPage *pPage = 0; - - rc = sqlite3BtreeGetPage(pBt, ovfl, &pPage, next!=0); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeGetPage(pBt, ovfl, &pPage, 0); assert(rc==SQLITE_OK || pPage==0); if( next==0 && rc==SQLITE_OK ){ next = get4byte(pPage->aData); } - - if( ppPage ){ - *ppPage = pPage; - }else{ - releasePage(pPage); - } } - *pPgnoNext = next; - return rc; + *pPgnoNext = next; + if( ppPage ){ + *ppPage = pPage; + }else{ + releasePage(pPage); + } + return (rc==SQLITE_DONE ? SQLITE_OK : rc); } /* @@ -2703,8 +3467,8 @@ */ static int accessPayload( BtCursor *pCur, /* Cursor pointing to entry to read from */ - int offset, /* Begin reading this far into payload */ - int amt, /* Read this many bytes */ + u32 offset, /* Begin reading this far into payload */ + u32 amt, /* Read this many bytes */ unsigned char *pBuf, /* Write the bytes into this buffer */ int skipKey, /* offset begins at data if this is true */ int eOp /* zero to read. non-zero to write. */ @@ -2713,24 +3477,26 @@ int rc = SQLITE_OK; u32 nKey; int iIdx = 0; - MemPage *pPage = pCur->pPage; /* Btree page of current cursor entry */ - BtShared *pBt = pCur->pBtree->pBt; /* Btree this cursor belongs to */ + MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */ + BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ assert( pPage ); assert( pCur->eState==CURSOR_VALID ); - assert( pCur->idx>=0 && pCur->idxnCell ); - assert( offset>=0 ); + assert( pCur->aiIdx[pCur->iPage]nCell ); + assert( cursorHoldsMutex(pCur) ); getCellInfo(pCur); aPayload = pCur->info.pCell + pCur->info.nHeader; - nKey = (pPage->intKey ? 0 : pCur->info.nKey); + nKey = (pPage->intKey ? 0 : (int)pCur->info.nKey); if( skipKey ){ offset += nKey; } - if( offset+amt > nKey+pCur->info.nData ){ + if( offset+amt > nKey+pCur->info.nData + || &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] + ){ /* Trying to read or write past the end of the data is an error */ - return SQLITE_ERROR; + return SQLITE_CORRUPT_BKPT; } /* Check if data must be read/written to/from the btree page itself. */ @@ -2748,7 +3514,7 @@ } if( rc==SQLITE_OK && amt>0 ){ - const int ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ + const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ Pgno nextPage; nextPage = get4byte(&aPayload[pCur->info.nLocal]); @@ -2763,7 +3529,7 @@ */ if( pCur->isIncrblobHandle && !pCur->aOverflow ){ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; - pCur->aOverflow = (Pgno *)sqliteMalloc(sizeof(Pgno)*nOvfl); + pCur->aOverflow = (Pgno *)sqlite3MallocZero(sizeof(Pgno)*nOvfl); if( nOvfl && !pCur->aOverflow ){ rc = SQLITE_NOMEM; } @@ -2843,15 +3609,17 @@ ** the available payload. */ int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ - int rc = restoreOrClearCursorPosition(pCur); + int rc; + + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_VALID ); - assert( pCur->pPage!=0 ); - if( pCur->pPage->intKey ){ + assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); + if( pCur->apPage[0]->intKey ){ return SQLITE_CORRUPT_BKPT; } - assert( pCur->pPage->intKey==0 ); - assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); rc = accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0, 0); } return rc; @@ -2867,11 +3635,20 @@ ** the available payload. */ int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ - int rc = restoreOrClearCursorPosition(pCur); + int rc; + +#ifndef SQLITE_OMIT_INCRBLOB + if ( pCur->eState==CURSOR_INVALID ){ + return SQLITE_ABORT; + } +#endif + + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_VALID ); - assert( pCur->pPage!=0 ); - assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); rc = accessPayload(pCur, offset, amt, pBuf, 1, 0); } return rc; @@ -2889,7 +3666,7 @@ ** and data to fit on the local page and for there to be no overflow ** pages. When that is so, this routine can be used to access the ** key and data without making a copy. If the key and/or data spills -** onto overflow pages, then accessPayload() must be used to reassembly +** onto overflow pages, then accessPayload() must be used to reassemble ** the key/data and copy it into a preallocated buffer. ** ** The pointer returned by this routine looks directly into the cached @@ -2904,19 +3681,20 @@ unsigned char *aPayload; MemPage *pPage; u32 nKey; - int nLocal; + u32 nLocal; - assert( pCur!=0 && pCur->pPage!=0 ); + assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]); assert( pCur->eState==CURSOR_VALID ); - pPage = pCur->pPage; - assert( pCur->idx>=0 && pCur->idxnCell ); + assert( cursorHoldsMutex(pCur) ); + pPage = pCur->apPage[pCur->iPage]; + assert( pCur->aiIdx[pCur->iPage]nCell ); getCellInfo(pCur); aPayload = pCur->info.pCell; aPayload += pCur->info.nHeader; if( pPage->intKey ){ nKey = 0; }else{ - nKey = pCur->info.nKey; + nKey = (int)pCur->info.nKey; } if( skipKey ){ aPayload += nKey; @@ -2938,18 +3716,25 @@ ** b-tree page. Write the number of available bytes into *pAmt. ** ** The pointer returned is ephemeral. The key/data may move -** or be destroyed on the next call to any Btree routine. +** or be destroyed on the next call to any Btree routine, +** including calls from other threads against the same cache. +** Hence, a mutex on the BtShared should be held prior to calling +** this routine. ** ** These routines is used to get quick access to key and data ** in the common case where no overflow pages are used. */ const void *sqlite3BtreeKeyFetch(BtCursor *pCur, int *pAmt){ + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + assert( cursorHoldsMutex(pCur) ); if( pCur->eState==CURSOR_VALID ){ return (const void*)fetchPayload(pCur, pAmt, 0); } return 0; } const void *sqlite3BtreeDataFetch(BtCursor *pCur, int *pAmt){ + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + assert( cursorHoldsMutex(pCur) ); if( pCur->eState==CURSOR_VALID ){ return (const void*)fetchPayload(pCur, pAmt, 1); } @@ -2963,42 +3748,49 @@ */ static int moveToChild(BtCursor *pCur, u32 newPgno){ int rc; + int i = pCur->iPage; MemPage *pNewPage; - MemPage *pOldPage; - BtShared *pBt = pCur->pBtree->pBt; + BtShared *pBt = pCur->pBt; + assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - rc = getAndInitPage(pBt, newPgno, &pNewPage, pCur->pPage); + assert( pCur->iPageiPage>=(BTCURSOR_MAX_DEPTH-1) ){ + return SQLITE_CORRUPT_BKPT; + } + rc = getAndInitPage(pBt, newPgno, &pNewPage); if( rc ) return rc; - pNewPage->idxParent = pCur->idx; - pOldPage = pCur->pPage; - pOldPage->idxShift = 0; - releasePage(pOldPage); - pCur->pPage = pNewPage; - pCur->idx = 0; + pCur->apPage[i+1] = pNewPage; + pCur->aiIdx[i+1] = 0; + pCur->iPage++; + pCur->info.nSize = 0; + pCur->validNKey = 0; if( pNewPage->nCell<1 ){ return SQLITE_CORRUPT_BKPT; } return SQLITE_OK; } +#ifndef NDEBUG /* -** Return true if the page is the virtual root of its table. -** -** The virtual root page is the root page for most tables. But -** for the table rooted on page 1, sometime the real root page -** is empty except for the right-pointer. In such cases the -** virtual root page is the page that the right-pointer of page -** 1 is pointing to. -*/ -int sqlite3BtreeIsRootPage(MemPage *pPage){ - MemPage *pParent = pPage->pParent; - if( pParent==0 ) return 1; - if( pParent->pgno>1 ) return 0; - if( get2byte(&pParent->aData[pParent->hdrOffset+3])==0 ) return 1; - return 0; +** Page pParent is an internal (non-leaf) tree page. This function +** asserts that page number iChild is the left-child if the iIdx'th +** cell in page pParent. Or, if iIdx is equal to the total number of +** cells in pParent, that page number iChild is the right-child of +** the page. +*/ +static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){ + assert( iIdx<=pParent->nCell ); + if( iIdx==pParent->nCell ){ + assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild ); + }else{ + assert( get4byte(findCell(pParent, iIdx))==iChild ); + } } +#else +# define assertParentIndex(x,y,z) +#endif /* ** Move the cursor up to the parent page. @@ -3009,23 +3801,19 @@ ** the largest cell index. */ void sqlite3BtreeMoveToParent(BtCursor *pCur){ - MemPage *pParent; - MemPage *pPage; - int idxParent; - + assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - pPage = pCur->pPage; - assert( pPage!=0 ); - assert( !sqlite3BtreeIsRootPage(pPage) ); - pParent = pPage->pParent; - assert( pParent!=0 ); - idxParent = pPage->idxParent; - sqlite3PagerRef(pParent->pDbPage); - releasePage(pPage); - pCur->pPage = pParent; + assert( pCur->iPage>0 ); + assert( pCur->apPage[pCur->iPage] ); + assertParentIndex( + pCur->apPage[pCur->iPage-1], + pCur->aiIdx[pCur->iPage-1], + pCur->apPage[pCur->iPage]->pgno + ); + releasePage(pCur->apPage[pCur->iPage]); + pCur->iPage--; pCur->info.nSize = 0; - assert( pParent->idxShift==0 ); - pCur->idx = idxParent; + pCur->validNKey = 0; } /* @@ -3034,35 +3822,53 @@ static int moveToRoot(BtCursor *pCur){ MemPage *pRoot; int rc = SQLITE_OK; - BtShared *pBt = pCur->pBtree->pBt; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; - if( pCur->eState==CURSOR_REQUIRESEEK ){ - clearCursorPosition(pCur); + assert( cursorHoldsMutex(pCur) ); + assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); + if( pCur->eState>=CURSOR_REQUIRESEEK ){ + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; + } + sqlite3BtreeClearCursor(pCur); } - pRoot = pCur->pPage; - if( pRoot && pRoot->pgno==pCur->pgnoRoot ){ - assert( pRoot->isInit ); + + if( pCur->iPage>=0 ){ + int i; + for(i=1; i<=pCur->iPage; i++){ + releasePage(pCur->apPage[i]); + } }else{ if( - SQLITE_OK!=(rc = getAndInitPage(pBt, pCur->pgnoRoot, &pRoot, 0)) + SQLITE_OK!=(rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->apPage[0])) ){ pCur->eState = CURSOR_INVALID; return rc; } - releasePage(pCur->pPage); - pCur->pPage = pRoot; } - pCur->idx = 0; + + pRoot = pCur->apPage[0]; + assert( pRoot->pgno==pCur->pgnoRoot ); + pCur->iPage = 0; + pCur->aiIdx[0] = 0; pCur->info.nSize = 0; + pCur->atLast = 0; + pCur->validNKey = 0; + if( pRoot->nCell==0 && !pRoot->leaf ){ Pgno subpage; + if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT; assert( pRoot->pgno==1 ); subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); assert( subpage>0 ); pCur->eState = CURSOR_VALID; rc = moveToChild(pCur, subpage); + }else{ + pCur->eState = ((pRoot->nCell>0)?CURSOR_VALID:CURSOR_INVALID); } - pCur->eState = ((pCur->pPage->nCell>0)?CURSOR_VALID:CURSOR_INVALID); return rc; } @@ -3075,17 +3881,17 @@ */ static int moveToLeftmost(BtCursor *pCur){ Pgno pgno; - int rc; + int rc = SQLITE_OK; MemPage *pPage; + assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - while( !(pPage = pCur->pPage)->leaf ){ - assert( pCur->idx>=0 && pCur->idxnCell ); - pgno = get4byte(findCell(pPage, pCur->idx)); + while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ + assert( pCur->aiIdx[pCur->iPage]nCell ); + pgno = get4byte(findCell(pPage, pCur->aiIdx[pCur->iPage])); rc = moveToChild(pCur, pgno); - if( rc ) return rc; } - return SQLITE_OK; + return rc; } /* @@ -3100,19 +3906,22 @@ */ static int moveToRightmost(BtCursor *pCur){ Pgno pgno; - int rc; - MemPage *pPage; + int rc = SQLITE_OK; + MemPage *pPage = 0; + assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - while( !(pPage = pCur->pPage)->leaf ){ + while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); - pCur->idx = pPage->nCell; + pCur->aiIdx[pCur->iPage] = pPage->nCell; rc = moveToChild(pCur, pgno); - if( rc ) return rc; } - pCur->idx = pPage->nCell - 1; - pCur->info.nSize = 0; - return SQLITE_OK; + if( rc==SQLITE_OK ){ + pCur->aiIdx[pCur->iPage] = pPage->nCell-1; + pCur->info.nSize = 0; + pCur->validNKey = 0; + } + return rc; } /* Move the cursor to the first entry in the table. Return SQLITE_OK @@ -3121,16 +3930,21 @@ */ int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ int rc; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); rc = moveToRoot(pCur); - if( rc ) return rc; - if( pCur->eState==CURSOR_INVALID ){ - assert( pCur->pPage->nCell==0 ); - *pRes = 1; - return SQLITE_OK; + if( rc==SQLITE_OK ){ + if( pCur->eState==CURSOR_INVALID ){ + assert( pCur->apPage[pCur->iPage]->nCell==0 ); + *pRes = 1; + rc = SQLITE_OK; + }else{ + assert( pCur->apPage[pCur->iPage]->nCell>0 ); + *pRes = 0; + rc = moveToLeftmost(pCur); + } } - assert( pCur->pPage->nCell>0 ); - *pRes = 0; - rc = moveToLeftmost(pCur); return rc; } @@ -3140,131 +3954,207 @@ */ int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ int rc; - rc = moveToRoot(pCur); - if( rc ) return rc; - if( CURSOR_INVALID==pCur->eState ){ - assert( pCur->pPage->nCell==0 ); - *pRes = 1; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + + /* If the cursor already points to the last entry, this is a no-op. */ + if( CURSOR_VALID==pCur->eState && pCur->atLast ){ +#ifdef SQLITE_DEBUG + /* This block serves to assert() that the cursor really does point + ** to the last entry in the b-tree. */ + int ii; + for(ii=0; iiiPage; ii++){ + assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); + } + assert( pCur->aiIdx[pCur->iPage]==pCur->apPage[pCur->iPage]->nCell-1 ); + assert( pCur->apPage[pCur->iPage]->leaf ); +#endif return SQLITE_OK; } - assert( pCur->eState==CURSOR_VALID ); - *pRes = 0; - rc = moveToRightmost(pCur); + + rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + if( CURSOR_INVALID==pCur->eState ){ + assert( pCur->apPage[pCur->iPage]->nCell==0 ); + *pRes = 1; + }else{ + assert( pCur->eState==CURSOR_VALID ); + *pRes = 0; + rc = moveToRightmost(pCur); + pCur->atLast = rc==SQLITE_OK ?1:0; + } + } return rc; } -/* Move the cursor so that it points to an entry near pKey/nKey. -** Return a success code. +/* Move the cursor so that it points to an entry near the key +** specified by pIdxKey or intKey. Return a success code. ** -** For INTKEY tables, only the nKey parameter is used. pKey is -** ignored. For other tables, nKey is the number of bytes of data -** in pKey. The comparison function specified when the cursor was -** created is used to compare keys. +** For INTKEY tables, the intKey parameter is used. pIdxKey +** must be NULL. For index tables, pIdxKey is used and intKey +** is ignored. ** ** If an exact match is not found, then the cursor is always ** left pointing at a leaf page which would hold the entry if it ** were present. The cursor might point to an entry that comes ** before or after the key. ** -** The result of comparing the key with the entry to which the -** cursor is written to *pRes if pRes!=NULL. The meaning of -** this value is as follows: +** An integer is written into *pRes which is the result of +** comparing the key with the entry to which the cursor is +** pointing. The meaning of the integer written into +** *pRes is as follows: ** ** *pRes<0 The cursor is left pointing at an entry that -** is smaller than pKey or if the table is empty +** is smaller than intKey/pIdxKey or if the table is empty ** and the cursor is therefore left point to nothing. ** ** *pRes==0 The cursor is left pointing at an entry that -** exactly matches pKey. +** exactly matches intKey/pIdxKey. ** ** *pRes>0 The cursor is left pointing at an entry that -** is larger than pKey. +** is larger than intKey/pIdxKey. +** */ -int sqlite3BtreeMoveto( - BtCursor *pCur, /* The cursor to be moved */ - const void *pKey, /* The key content for indices. Not used by tables */ - i64 nKey, /* Size of pKey. Or the key for tables */ - int biasRight, /* If true, bias the search to the high end */ - int *pRes /* Search result flag */ +int sqlite3BtreeMovetoUnpacked( + BtCursor *pCur, /* The cursor to be moved */ + UnpackedRecord *pIdxKey, /* Unpacked index key */ + i64 intKey, /* The table key */ + int biasRight, /* If true, bias the search to the high end */ + int *pRes /* Write search results here */ ){ int rc; + + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + + /* If the cursor is already positioned at the point we are trying + ** to move to, then just return without doing any work */ + if( pCur->eState==CURSOR_VALID && pCur->validNKey + && pCur->apPage[0]->intKey + ){ + if( pCur->info.nKey==intKey ){ + *pRes = 0; + return SQLITE_OK; + } + if( pCur->atLast && pCur->info.nKeypPage ); - assert( pCur->pPage->isInit ); + if( rc ){ + return rc; + } + assert( pCur->apPage[pCur->iPage] ); + assert( pCur->apPage[pCur->iPage]->isInit ); if( pCur->eState==CURSOR_INVALID ){ *pRes = -1; - assert( pCur->pPage->nCell==0 ); + assert( pCur->apPage[pCur->iPage]->nCell==0 ); return SQLITE_OK; } + assert( pCur->apPage[0]->intKey || pIdxKey ); for(;;){ int lwr, upr; Pgno chldPg; - MemPage *pPage = pCur->pPage; + MemPage *pPage = pCur->apPage[pCur->iPage]; int c = -1; /* pRes return if table is empty must be -1 */ lwr = 0; upr = pPage->nCell-1; - if( !pPage->intKey && pKey==0 ){ - return SQLITE_CORRUPT_BKPT; + if( (!pPage->intKey && pIdxKey==0) || upr<0 ){ + rc = SQLITE_CORRUPT_BKPT; + goto moveto_finish; } if( biasRight ){ - pCur->idx = upr; + pCur->aiIdx[pCur->iPage] = (u16)upr; }else{ - pCur->idx = (upr+lwr)/2; + pCur->aiIdx[pCur->iPage] = (u16)((upr+lwr)/2); } - if( lwr<=upr ) for(;;){ - void *pCellKey; - i64 nCellKey; + for(;;){ + int idx = pCur->aiIdx[pCur->iPage]; /* Index of current cell in pPage */ + u8 *pCell; /* Pointer to current cell in pPage */ + pCur->info.nSize = 0; + pCell = findCell(pPage, idx) + pPage->childPtrSize; if( pPage->intKey ){ - u8 *pCell; - pCell = findCell(pPage, pCur->idx) + pPage->childPtrSize; + i64 nCellKey; if( pPage->hasData ){ u32 dummy; - pCell += getVarint32(pCell, &dummy); + pCell += getVarint32(pCell, dummy); } - getVarint(pCell, (u64 *)&nCellKey); - if( nCellKeynKey ){ - c = +1; }else{ - c = 0; + assert( nCellKey>intKey ); + c = +1; } + pCur->validNKey = 1; + pCur->info.nKey = nCellKey; }else{ - int available; - pCellKey = (void *)fetchPayload(pCur, &available, 0); - nCellKey = pCur->info.nKey; - if( available>=nCellKey ){ - c = pCur->xCompare(pCur->pArg, nCellKey, pCellKey, nKey, pKey); + /* The maximum supported page-size is 32768 bytes. This means that + ** the maximum number of record bytes stored on an index B-Tree + ** page is at most 8198 bytes, which may be stored as a 2-byte + ** varint. This information is used to attempt to avoid parsing + ** the entire cell by checking for the cases where the record is + ** stored entirely within the b-tree page by inspecting the first + ** 2 bytes of the cell. + */ + int nCell = pCell[0]; + if( !(nCell & 0x80) && nCell<=pPage->maxLocal ){ + /* This branch runs if the record-size field of the cell is a + ** single byte varint and the record fits entirely on the main + ** b-tree page. */ + c = sqlite3VdbeRecordCompare(nCell, (void*)&pCell[1], pIdxKey); + }else if( !(pCell[1] & 0x80) + && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal + ){ + /* The record-size field is a 2 byte varint and the record + ** fits entirely on the main b-tree page. */ + c = sqlite3VdbeRecordCompare(nCell, (void*)&pCell[2], pIdxKey); }else{ - pCellKey = sqliteMallocRaw( nCellKey ); - if( pCellKey==0 ) return SQLITE_NOMEM; - rc = sqlite3BtreeKey(pCur, 0, nCellKey, (void *)pCellKey); - c = pCur->xCompare(pCur->pArg, nCellKey, pCellKey, nKey, pKey); - sqliteFree(pCellKey); - if( rc ) return rc; + /* The record flows over onto one or more overflow pages. In + ** this case the whole cell needs to be parsed, a buffer allocated + ** and accessPayload() used to retrieve the record into the + ** buffer before VdbeRecordCompare() can be called. */ + void *pCellKey; + u8 * const pCellBody = pCell - pPage->childPtrSize; + sqlite3BtreeParseCellPtr(pPage, pCellBody, &pCur->info); + nCell = (int)pCur->info.nKey; + pCellKey = sqlite3Malloc( nCell ); + if( pCellKey==0 ){ + rc = SQLITE_NOMEM; + goto moveto_finish; + } + rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0, 0); + c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey); + sqlite3_free(pCellKey); + if( rc ) goto moveto_finish; } } if( c==0 ){ - if( pPage->leafData && !pPage->leaf ){ - lwr = pCur->idx; + if( pPage->intKey && !pPage->leaf ){ + lwr = idx; upr = lwr - 1; break; }else{ - if( pRes ) *pRes = 0; - return SQLITE_OK; + *pRes = 0; + rc = SQLITE_OK; + goto moveto_finish; } } if( c<0 ){ - lwr = pCur->idx+1; + lwr = idx+1; }else{ - upr = pCur->idx-1; + upr = idx-1; } if( lwr>upr ){ break; } - pCur->idx = (lwr+upr)/2; + pCur->aiIdx[pCur->iPage] = (u16)((lwr+upr)/2); } assert( lwr==upr+1 ); assert( pPage->isInit ); @@ -3276,20 +4166,54 @@ chldPg = get4byte(findCell(pPage, lwr)); } if( chldPg==0 ){ - assert( pCur->idx>=0 && pCur->idxpPage->nCell ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); if( pRes ) *pRes = c; - return SQLITE_OK; + rc = SQLITE_OK; + goto moveto_finish; } - pCur->idx = lwr; + pCur->aiIdx[pCur->iPage] = (u16)lwr; pCur->info.nSize = 0; + pCur->validNKey = 0; rc = moveToChild(pCur, chldPg); - if( rc ){ - return rc; - } + if( rc ) goto moveto_finish; + } +moveto_finish: + return rc; +} + +/* +** In this version of BtreeMoveto, pKey is a packed index record +** such as is generated by the OP_MakeRecord opcode. Unpack the +** record and then call BtreeMovetoUnpacked() to do the work. +*/ +int sqlite3BtreeMoveto( + BtCursor *pCur, /* Cursor open on the btree to be searched */ + const void *pKey, /* Packed key if the btree is an index */ + i64 nKey, /* Integer key for tables. Size of pKey for indices */ + int bias, /* Bias search to the high end */ + int *pRes /* Write search results here */ +){ + int rc; /* Status code */ + UnpackedRecord *pIdxKey; /* Unpacked index key */ + char aSpace[150]; /* Temp space for pIdxKey - to avoid a malloc */ + + + if( pKey ){ + assert( nKey==(i64)(int)nKey ); + pIdxKey = sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, + aSpace, sizeof(aSpace)); + if( pIdxKey==0 ) return SQLITE_NOMEM; + }else{ + pIdxKey = 0; + } + rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); + if( pKey ){ + sqlite3VdbeDeleteUnpackedRecord(pIdxKey); } - /* NOT REACHED */ + return rc; } + /* ** Return TRUE if the cursor is not pointing at an entry of the table. ** @@ -3313,14 +4237,15 @@ */ int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ int rc; + int idx; MemPage *pPage; - rc = restoreOrClearCursorPosition(pCur); + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } assert( pRes!=0 ); - pPage = pCur->pPage; if( CURSOR_INVALID==pCur->eState ){ *pRes = 1; return SQLITE_OK; @@ -3332,12 +4257,14 @@ } pCur->skip = 0; + pPage = pCur->apPage[pCur->iPage]; + idx = ++pCur->aiIdx[pCur->iPage]; assert( pPage->isInit ); - assert( pCur->idxnCell ); + assert( idx<=pPage->nCell ); - pCur->idx++; pCur->info.nSize = 0; - if( pCur->idx>=pPage->nCell ){ + pCur->validNKey = 0; + if( idx>=pPage->nCell ){ if( !pPage->leaf ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); if( rc ) return rc; @@ -3346,16 +4273,16 @@ return rc; } do{ - if( sqlite3BtreeIsRootPage(pPage) ){ + if( pCur->iPage==0 ){ *pRes = 1; pCur->eState = CURSOR_INVALID; return SQLITE_OK; } sqlite3BtreeMoveToParent(pCur); - pPage = pCur->pPage; - }while( pCur->idx>=pPage->nCell ); + pPage = pCur->apPage[pCur->iPage]; + }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell ); *pRes = 0; - if( pPage->leafData ){ + if( pPage->intKey ){ rc = sqlite3BtreeNext(pCur, pRes); }else{ rc = SQLITE_OK; @@ -3370,6 +4297,7 @@ return rc; } + /* ** Step the cursor to the back to the previous entry in the database. If ** successful then set *pRes=0. If the cursor @@ -3378,13 +4306,14 @@ */ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ int rc; - Pgno pgno; MemPage *pPage; - rc = restoreOrClearCursorPosition(pCur); + assert( cursorHoldsMutex(pCur) ); + rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } + pCur->atLast = 0; if( CURSOR_INVALID==pCur->eState ){ *pRes = 1; return SQLITE_OK; @@ -3396,27 +4325,30 @@ } pCur->skip = 0; - pPage = pCur->pPage; + pPage = pCur->apPage[pCur->iPage]; assert( pPage->isInit ); - assert( pCur->idx>=0 ); if( !pPage->leaf ){ - pgno = get4byte( findCell(pPage, pCur->idx) ); - rc = moveToChild(pCur, pgno); - if( rc ) return rc; + int idx = pCur->aiIdx[pCur->iPage]; + rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); + if( rc ){ + return rc; + } rc = moveToRightmost(pCur); }else{ - while( pCur->idx==0 ){ - if( sqlite3BtreeIsRootPage(pPage) ){ + while( pCur->aiIdx[pCur->iPage]==0 ){ + if( pCur->iPage==0 ){ pCur->eState = CURSOR_INVALID; *pRes = 1; return SQLITE_OK; } sqlite3BtreeMoveToParent(pCur); - pPage = pCur->pPage; } - pCur->idx--; pCur->info.nSize = 0; - if( pPage->leafData && !pPage->leaf ){ + pCur->validNKey = 0; + + pCur->aiIdx[pCur->iPage]--; + pPage = pCur->apPage[pCur->iPage]; + if( pPage->intKey && !pPage->leaf ){ rc = sqlite3BtreePrevious(pCur, pRes); }else{ rc = SQLITE_OK; @@ -3456,13 +4388,19 @@ ){ MemPage *pPage1; int rc; - int n; /* Number of pages on the freelist */ - int k; /* Number of leaves on the trunk of the freelist */ + u32 n; /* Number of pages on the freelist */ + u32 k; /* Number of leaves on the trunk of the freelist */ MemPage *pTrunk = 0; MemPage *pPrevTrunk = 0; + Pgno mxPage; /* Total size of the database file */ + assert( sqlite3_mutex_held(pBt->mutex) ); pPage1 = pBt->pPage1; + mxPage = pagerPagecount(pBt); n = get4byte(&pPage1->aData[36]); + if( n>mxPage ){ + return SQLITE_CORRUPT_BKPT; + } if( n>0 ){ /* There are pages on the freelist. Reuse one of those pages. */ Pgno iTrunk; @@ -3473,7 +4411,7 @@ ** the entire-list will be searched for that page. */ #ifndef SQLITE_OMIT_AUTOVACUUM - if( exact && nearby<=sqlite3PagerPagecount(pBt->pPager) ){ + if( exact && nearby<=mxPage ){ u8 eType; assert( nearby>0 ); assert( pBt->autoVacuum ); @@ -3504,7 +4442,11 @@ }else{ iTrunk = get4byte(&pPage1->aData[32]); } - rc = sqlite3BtreeGetPage(pBt, iTrunk, &pTrunk, 0); + if( iTrunk>mxPage ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = sqlite3BtreeGetPage(pBt, iTrunk, &pTrunk, 0); + } if( rc ){ pTrunk = 0; goto end_allocate_page; @@ -3525,7 +4467,7 @@ *ppPage = pTrunk; pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); - }else if( k>pBt->usableSize/4 - 8 ){ + }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_BKPT; goto end_allocate_page; @@ -3554,6 +4496,10 @@ */ MemPage *pNewTrunk; Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); + if( iNewTrunk>mxPage ){ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; + } rc = sqlite3BtreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0); if( rc!=SQLITE_OK ){ goto end_allocate_page; @@ -3568,6 +4514,7 @@ memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); releasePage(pNewTrunk); if( !pPrevTrunk ){ + assert( sqlite3PagerIswriteable(pPage1->pDbPage) ); put4byte(&pPage1->aData[32], iNewTrunk); }else{ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); @@ -3580,9 +4527,9 @@ pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); #endif - }else{ + }else if( k>0 ){ /* Extract a leaf from the trunk */ - int closest; + u32 closest; Pgno iPage; unsigned char *aData = pTrunk->aData; rc = sqlite3PagerWrite(pTrunk->pDbPage); @@ -3590,7 +4537,8 @@ goto end_allocate_page; } if( nearby>0 ){ - int i, dist; + u32 i; + int dist; closest = 0; dist = get4byte(&aData[8]) - nearby; if( dist<0 ) dist = -dist; @@ -3607,11 +4555,19 @@ } iPage = get4byte(&aData[8+closest*4]); + if( iPage>mxPage ){ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; + } if( !searchList || iPage==nearby ){ + int noContent; + Pgno nPage; *pPgno = iPage; - if( *pPgno>sqlite3PagerPagecount(pBt->pPager) ){ + nPage = pagerPagecount(pBt); + if( iPage>nPage ){ /* Free page off the end of the file */ - return SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; } TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" ": %d more free pages\n", @@ -3620,9 +4576,10 @@ memcpy(&aData[8+closest*4], &aData[4+k*4], 4); } put4byte(&aData[4], k-1); - rc = sqlite3BtreeGetPage(pBt, *pPgno, ppPage, 1); + assert( sqlite3PagerIswriteable(pTrunk->pDbPage) ); + noContent = !btreeGetHasContent(pBt, *pPgno); + rc = sqlite3BtreeGetPage(pBt, *pPgno, ppPage, noContent); if( rc==SQLITE_OK ){ - sqlite3PagerDontRollback((*ppPage)->pDbPage); rc = sqlite3PagerWrite((*ppPage)->pDbPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); @@ -3637,30 +4594,30 @@ }else{ /* There are no pages on the freelist, so create a new page at the ** end of the file */ - *pPgno = sqlite3PagerPagecount(pBt->pPager) + 1; + int nPage = pagerPagecount(pBt); + *pPgno = nPage + 1; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->nTrunc ){ - /* An incr-vacuum has already run within this transaction. So the - ** page to allocate is not from the physical end of the file, but - ** at pBt->nTrunc. - */ - *pPgno = pBt->nTrunc+1; - if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ - (*pPgno)++; - } + if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ + (*pPgno)++; } + +#ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, *pPgno) ){ /* If *pPgno refers to a pointer-map page, allocate two new pages ** at the end of the file instead of one. The first allocated page ** becomes a new pointer-map page, the second is used by the caller. */ + MemPage *pPg = 0; TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", *pPgno)); assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + rc = sqlite3BtreeGetPage(pBt, *pPgno, &pPg, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pPg->pDbPage); + releasePage(pPg); + } + if( rc ) return rc; (*pPgno)++; - } - if( pBt->nTrunc ){ - pBt->nTrunc = *pPgno; + if( *pPgno==PENDING_BYTE_PAGE(pBt) ){ (*pPgno)++; } } #endif @@ -3679,90 +4636,154 @@ end_allocate_page: releasePage(pTrunk); releasePage(pPrevTrunk); + if( rc==SQLITE_OK ){ + if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){ + releasePage(*ppPage); + return SQLITE_CORRUPT_BKPT; + } + (*ppPage)->isInit = 0; + }else{ + *ppPage = 0; + } return rc; } /* -** Add a page of the database file to the freelist. +** This function is used to add page iPage to the database file free-list. +** It is assumed that the page is not already a part of the free-list. +** +** The value passed as the second argument to this function is optional. +** If the caller happens to have a pointer to the MemPage object +** corresponding to page iPage handy, it may pass it as the second value. +** Otherwise, it may pass NULL. ** -** sqlite3PagerUnref() is NOT called for pPage. +** If a pointer to a MemPage object is passed as the second argument, +** its reference count is not altered by this function. */ -static int freePage(MemPage *pPage){ - BtShared *pBt = pPage->pBt; - MemPage *pPage1 = pBt->pPage1; - int rc, n, k; +static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ + MemPage *pTrunk = 0; /* Free-list trunk page */ + Pgno iTrunk = 0; /* Page number of free-list trunk page */ + MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */ + MemPage *pPage; /* Page being freed. May be NULL. */ + int rc; /* Return Code */ + int nFree; /* Initial number of pages on free-list */ - /* Prepare the page for freeing */ - assert( pPage->pgno>1 ); - pPage->isInit = 0; - releasePage(pPage->pParent); - pPage->pParent = 0; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( iPage>1 ); + assert( !pMemPage || pMemPage->pgno==iPage ); + + if( pMemPage ){ + pPage = pMemPage; + sqlite3PagerRef(pPage->pDbPage); + }else{ + pPage = btreePageLookup(pBt, iPage); + } /* Increment the free page count on pPage1 */ rc = sqlite3PagerWrite(pPage1->pDbPage); - if( rc ) return rc; - n = get4byte(&pPage1->aData[36]); - put4byte(&pPage1->aData[36], n+1); + if( rc ) goto freepage_out; + nFree = get4byte(&pPage1->aData[36]); + put4byte(&pPage1->aData[36], nFree+1); #ifdef SQLITE_SECURE_DELETE /* If the SQLITE_SECURE_DELETE compile-time option is enabled, then ** always fully overwrite deleted information with zeros. */ - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ) return rc; + if( (!pPage && (rc = sqlite3BtreeGetPage(pBt, iPage, &pPage, 0))) + || (rc = sqlite3PagerWrite(pPage->pDbPage)) + ){ + goto freepage_out; + } memset(pPage->aData, 0, pPage->pBt->pageSize); #endif -#ifndef SQLITE_OMIT_AUTOVACUUM /* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ - if( pBt->autoVacuum ){ - rc = ptrmapPut(pBt, pPage->pgno, PTRMAP_FREEPAGE, 0); - if( rc ) return rc; - } -#endif + if( ISAUTOVACUUM ){ + rc = ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0); + if( rc ) goto freepage_out; + } + + /* Now manipulate the actual database free-list structure. There are two + ** possibilities. If the free-list is currently empty, or if the first + ** trunk page in the free-list is full, then this page will become a + ** new free-list trunk page. Otherwise, it will become a leaf of the + ** first trunk page in the current free-list. This block tests if it + ** is possible to add the page as a new free-list leaf. + */ + if( nFree!=0 ){ + int nLeaf; /* Initial number of leaf cells on trunk page */ - if( n==0 ){ - /* This is the first free page */ - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ) return rc; - memset(pPage->aData, 0, 8); - put4byte(&pPage1->aData[32], pPage->pgno); - TRACE(("FREE-PAGE: %d first\n", pPage->pgno)); - }else{ - /* Other free pages already exist. Retrive the first trunk page - ** of the freelist and find out how many leaves it has. */ - MemPage *pTrunk; - rc = sqlite3BtreeGetPage(pBt, get4byte(&pPage1->aData[32]), &pTrunk, 0); - if( rc ) return rc; - k = get4byte(&pTrunk->aData[4]); - if( k>=pBt->usableSize/4 - 8 ){ - /* The trunk is full. Turn the page being freed into a new - ** trunk page with no leaves. */ - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ) return rc; - put4byte(pPage->aData, pTrunk->pgno); - put4byte(&pPage->aData[4], 0); - put4byte(&pPage1->aData[32], pPage->pgno); - TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", - pPage->pgno, pTrunk->pgno)); - }else{ - /* Add the newly freed page as a leaf on the current trunk */ + iTrunk = get4byte(&pPage1->aData[32]); + rc = sqlite3BtreeGetPage(pBt, iTrunk, &pTrunk, 0); + if( rc!=SQLITE_OK ){ + goto freepage_out; + } + + nLeaf = get4byte(&pTrunk->aData[4]); + if( nLeaf<0 ){ + rc = SQLITE_CORRUPT_BKPT; + goto freepage_out; + } + if( nLeafusableSize/4 - 8 ){ + /* In this case there is room on the trunk page to insert the page + ** being freed as a new leaf. + ** + ** Note that the trunk page is not really full until it contains + ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have + ** coded. But due to a coding error in versions of SQLite prior to + ** 3.6.0, databases with freelist trunk pages holding more than + ** usableSize/4 - 8 entries will be reported as corrupt. In order + ** to maintain backwards compatibility with older versions of SQLite, + ** we will contain to restrict the number of entries to usableSize/4 - 8 + ** for now. At some point in the future (once everyone has upgraded + ** to 3.6.0 or later) we should consider fixing the conditional above + ** to read "usableSize/4-2" instead of "usableSize/4-8". + */ rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc==SQLITE_OK ){ - put4byte(&pTrunk->aData[4], k+1); - put4byte(&pTrunk->aData[8+k*4], pPage->pgno); + put4byte(&pTrunk->aData[4], nLeaf+1); + put4byte(&pTrunk->aData[8+nLeaf*4], iPage); #ifndef SQLITE_SECURE_DELETE - sqlite3PagerDontWrite(pPage->pDbPage); + if( pPage ){ + sqlite3PagerDontWrite(pPage->pDbPage); + } #endif + rc = btreeSetHasContent(pBt, iPage); } TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + goto freepage_out; } - releasePage(pTrunk); } + + /* If control flows to this point, then it was not possible to add the + ** the page being freed as a leaf page of the first trunk in the free-list. + ** Possibly because the free-list is empty, or possibly because the + ** first trunk in the free-list is full. Either way, the page being freed + ** will become the new first trunk page in the free-list. + */ + if( ((!pPage) && (0 != (rc = sqlite3BtreeGetPage(pBt, iPage, &pPage, 0)))) + || (0 != (rc = sqlite3PagerWrite(pPage->pDbPage))) + ){ + goto freepage_out; + } + put4byte(pPage->aData, iTrunk); + put4byte(&pPage->aData[4], 0); + put4byte(&pPage1->aData[32], iPage); + TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); + +freepage_out: + if( pPage ){ + pPage->isInit = 0; + } + releasePage(pPage); + releasePage(pTrunk); return rc; } +static int freePage(MemPage *pPage){ + return freePage2(pPage->pBt, pPage, pPage->pgno); +} /* ** Free any overflow pages associated with the given Cell. @@ -3773,27 +4794,37 @@ Pgno ovflPgno; int rc; int nOvfl; - int ovflPageSize; + u16 ovflPageSize; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); sqlite3BtreeParseCellPtr(pPage, pCell, &info); if( info.iOverflow==0 ){ return SQLITE_OK; /* No overflow pages. Return without doing anything */ } ovflPgno = get4byte(&pCell[info.iOverflow]); + assert( pBt->usableSize > 4 ); ovflPageSize = pBt->usableSize - 4; nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize; assert( ovflPgno==0 || nOvfl>0 ); while( nOvfl-- ){ - MemPage *pOvfl; - if( ovflPgno==0 || ovflPgno>sqlite3PagerPagecount(pBt->pPager) ){ + Pgno iNext = 0; + MemPage *pOvfl = 0; + if( ovflPgno<2 || ovflPgno>pagerPagecount(pBt) ){ + /* 0 is not a legal page number and page 1 cannot be an + ** overflow page. Therefore if ovflPgno<2 or past the end of the + ** file the database must be corrupt. */ return SQLITE_CORRUPT_BKPT; } - - rc = getOverflowPage(pBt, ovflPgno, &pOvfl, (nOvfl==0)?0:&ovflPgno); - if( rc ) return rc; - rc = freePage(pOvfl); - sqlite3PagerUnref(pOvfl->pDbPage); + if( nOvfl ){ + rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext); + if( rc ) return rc; + } + rc = freePage2(pBt, pOvfl, ovflPgno); + if( pOvfl ){ + sqlite3PagerUnref(pOvfl->pDbPage); + } if( rc ) return rc; + ovflPgno = iNext; } return SQLITE_OK; } @@ -3831,6 +4862,13 @@ int nHeader; CellInfo info; + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + /* pPage is not necessarily writeable since pCell might be auxiliary + ** buffer space that is separate from the pPage buffer area */ + assert( pCellaData || pCell>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + /* Fill in the header. */ nHeader = 0; if( !pPage->leaf ){ @@ -3845,7 +4883,7 @@ sqlite3BtreeParseCellPtr(pPage, pCell, &info); assert( info.nHeader==nHeader ); assert( info.nKey==nKey ); - assert( info.nData==nData+nZero ); + assert( info.nData==(u32)(nData+nZero) ); /* Fill in the payload */ nPayload = nData + nZero; @@ -3853,10 +4891,13 @@ pSrc = pData; nSrc = nData; nData = 0; - }else{ - nPayload += nKey; + }else{ + if( nKey>0x7fffffff || pKey==0 ){ + return SQLITE_CORRUPT; + } + nPayload += (int)nKey; pSrc = pKey; - nSrc = nKey; + nSrc = (int)nKey; } *pnSize = info.nSize; spaceLeft = info.nLocal; @@ -3865,7 +4906,6 @@ while( nPayload>0 ){ if( spaceLeft==0 ){ - int isExact = 0; #ifndef SQLITE_OMIT_AUTOVACUUM Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ if( pBt->autoVacuum ){ @@ -3874,12 +4914,9 @@ } while( PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) ); - if( pgnoOvfl>1 ){ - /* isExact = 1; */ - } } #endif - rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, isExact); + rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); #ifndef SQLITE_OMIT_AUTOVACUUM /* If the database supports auto-vacuum, and the second or subsequent ** overflow page is being allocated, add an entry to the pointer-map @@ -3903,6 +4940,16 @@ releasePage(pToRelease); return rc; } + + /* If pToRelease is not zero than pPrior points into the data area + ** of pToRelease. Make sure pToRelease is still writeable. */ + assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); + + /* If pPrior is part of the data area of pPage, then make sure pPage + ** is still writeable */ + assert( pPrioraData || pPrior>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + put4byte(pPrior, pgnoOvfl); releasePage(pToRelease); pToRelease = pOvfl; @@ -3913,6 +4960,16 @@ } n = nPayload; if( n>spaceLeft ) n = spaceLeft; + + /* If pToRelease is not zero than pPayload points into the data area + ** of pToRelease. Make sure pToRelease is still writeable. */ + assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); + + /* If pPayload is part of the data area of pPage, then make sure pPage + ** is still writeable */ + assert( pPayloadaData || pPayload>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + if( nSrc>0 ){ if( n>nSrc ) n = nSrc; assert( pSrc ); @@ -3935,75 +4992,6 @@ } /* -** Change the MemPage.pParent pointer on the page whose number is -** given in the second argument so that MemPage.pParent holds the -** pointer in the third argument. -*/ -static int reparentPage(BtShared *pBt, Pgno pgno, MemPage *pNewParent, int idx){ - MemPage *pThis; - DbPage *pDbPage; - - assert( pNewParent!=0 ); - if( pgno==0 ) return SQLITE_OK; - assert( pBt->pPager!=0 ); - pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); - if( pDbPage ){ - pThis = (MemPage *)sqlite3PagerGetExtra(pDbPage); - if( pThis->isInit ){ - assert( pThis->aData==(sqlite3PagerGetData(pDbPage)) ); - if( pThis->pParent!=pNewParent ){ - if( pThis->pParent ) sqlite3PagerUnref(pThis->pParent->pDbPage); - pThis->pParent = pNewParent; - sqlite3PagerRef(pNewParent->pDbPage); - } - pThis->idxParent = idx; - } - sqlite3PagerUnref(pDbPage); - } - -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - return ptrmapPut(pBt, pgno, PTRMAP_BTREE, pNewParent->pgno); - } -#endif - return SQLITE_OK; -} - - - -/* -** Change the pParent pointer of all children of pPage to point back -** to pPage. -** -** In other words, for every child of pPage, invoke reparentPage() -** to make sure that each child knows that pPage is its parent. -** -** This routine gets called after you memcpy() one page into -** another. -*/ -static int reparentChildPages(MemPage *pPage){ - int i; - BtShared *pBt = pPage->pBt; - int rc = SQLITE_OK; - - if( pPage->leaf ) return SQLITE_OK; - - for(i=0; inCell; i++){ - u8 *pCell = findCell(pPage, i); - if( !pPage->leaf ){ - rc = reparentPage(pBt, get4byte(pCell), pPage, i); - if( rc!=SQLITE_OK ) return rc; - } - } - if( !pPage->leaf ){ - rc = reparentPage(pBt, get4byte(&pPage->aData[pPage->hdrOffset+8]), - pPage, i); - pPage->idxShift = 0; - } - return rc; -} - -/* ** Remove the i-th cell from pPage. This routine effects pPage only. ** The cell content is not freed or deallocated. It is assumed that ** the cell content has been copied someplace else. This routine just @@ -4011,20 +4999,28 @@ ** ** "sz" must be the number of bytes in the cell. */ -static void dropCell(MemPage *pPage, int idx, int sz){ +static int dropCell(MemPage *pPage, int idx, int sz){ int i; /* Loop counter */ int pc; /* Offset to cell content of cell being deleted */ u8 *data; /* pPage->aData */ u8 *ptr; /* Used to move bytes around within data[] */ + int rc; /* The return code */ assert( idx>=0 && idxnCell ); assert( sz==cellSize(pPage, idx) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); data = pPage->aData; ptr = &data[pPage->cellOffset + 2*idx]; pc = get2byte(ptr); - assert( pc>10 && pc+sz<=pPage->pBt->usableSize ); - freeSpace(pPage, pc, sz); + if( (pchdrOffset+6+(pPage->leaf?0:4)) + || (pc+sz>pPage->pBt->usableSize) ){ + return SQLITE_CORRUPT_BKPT; + } + rc = freeSpace(pPage, pc, sz); + if( rc!=SQLITE_OK ){ + return rc; + } for(i=idx+1; inCell; i++, ptr+=2){ ptr[0] = ptr[2]; ptr[1] = ptr[3]; @@ -4032,7 +5028,7 @@ pPage->nCell--; put2byte(&data[pPage->hdrOffset+3], pPage->nCell); pPage->nFree += 2; - pPage->idxShift = 1; + return SQLITE_OK; } /* @@ -4058,7 +5054,7 @@ u8 *pCell, /* Content of the new cell */ int sz, /* Bytes of content in pCell */ u8 *pTemp, /* Temp storage space for pCell, if needed */ - u8 nSkip /* Do not write the first nSkip bytes of the cell */ + Pgno iChild /* If non-zero, replace first 4 bytes with this value */ ){ int idx; /* Where to write new cell content in data[] */ int j; /* Loop counter */ @@ -4070,20 +5066,31 @@ u8 *data; /* The content of the whole page */ u8 *ptr; /* Used for moving information around in data[] */ + int nSkip = (iChild ? 4 : 0); + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( pPage->nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=5460 ); + assert( pPage->nOverflow<=ArraySize(pPage->aOvfl) ); assert( sz==cellSizePtr(pPage, pCell) ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp+nSkip, pCell+nSkip, sz-nSkip); pCell = pTemp; } + if( iChild ){ + put4byte(pCell, iChild); + } j = pPage->nOverflow++; - assert( jaOvfl)/sizeof(pPage->aOvfl[0]) ); + assert( j<(int)(sizeof(pPage->aOvfl)/sizeof(pPage->aOvfl[0])) ); pPage->aOvfl[j].pCell = pCell; - pPage->aOvfl[j].idx = i; - pPage->nFree = 0; + pPage->aOvfl[j].idx = (u16)i; }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); data = pPage->aData; hdr = pPage->hdrOffset; top = get2byte(&data[hdr+5]); @@ -4091,37 +5098,37 @@ end = cellOffset + 2*pPage->nCell + 2; ins = cellOffset + 2*i; if( end > top - sz ){ - int rc = defragmentPage(pPage); - if( rc!=SQLITE_OK ) return rc; + rc = defragmentPage(pPage); + if( rc!=SQLITE_OK ){ + return rc; + } top = get2byte(&data[hdr+5]); assert( end + sz <= top ); } idx = allocateSpace(pPage, sz); assert( idx>0 ); assert( end <= get2byte(&data[hdr+5]) ); + if (idx+sz > pPage->pBt->usableSize) { + return SQLITE_CORRUPT_BKPT; + } pPage->nCell++; - pPage->nFree -= 2; + pPage->nFree = pPage->nFree - (u16)(2 + sz); memcpy(&data[idx+nSkip], pCell+nSkip, sz-nSkip); + if( iChild ){ + put4byte(&data[idx], iChild); + } for(j=end-2, ptr=&data[j]; j>ins; j-=2, ptr-=2){ ptr[0] = ptr[-2]; ptr[1] = ptr[-1]; } put2byte(&data[ins], idx); put2byte(&data[hdr+3], pPage->nCell); - pPage->idxShift = 1; #ifndef SQLITE_OMIT_AUTOVACUUM if( pPage->pBt->autoVacuum ){ /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ - CellInfo info; - sqlite3BtreeParseCellPtr(pPage, pCell, &info); - assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); - if( (info.nData+(pPage->intKey?0:info.nKey))>info.nLocal ){ - Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]); - int rc = ptrmapPut(pPage->pBt, pgnoOvfl, PTRMAP_OVERFLOW1, pPage->pgno); - if( rc!=SQLITE_OK ) return rc; - } + return ptrmapPutOvflPtr(pPage, pCell); } #endif } @@ -4137,40 +5144,36 @@ MemPage *pPage, /* The page to be assemblied */ int nCell, /* The number of cells to add to this page */ u8 **apCell, /* Pointers to cell bodies */ - int *aSize /* Sizes of the cells */ + u16 *aSize /* Sizes of the cells */ ){ int i; /* Loop counter */ - int totalSize; /* Total size of all cells */ - int hdr; /* Index of page header */ - int cellptr; /* Address of next cell pointer */ + u8 *pCellptr; /* Address of next cell pointer */ int cellbody; /* Address of next cell body */ - u8 *data; /* Data for the page */ + u8 * const data = pPage->aData; /* Pointer to data for pPage */ + const int hdr = pPage->hdrOffset; /* Offset of header on pPage */ + const int nUsable = pPage->pBt->usableSize; /* Usable size of page */ assert( pPage->nOverflow==0 ); - totalSize = 0; - for(i=0; inFree ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( nCell>=0 && nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=5460 ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + + /* Check that the page has just been zeroed by zeroPage() */ assert( pPage->nCell==0 ); - cellptr = pPage->cellOffset; - data = pPage->aData; - hdr = pPage->hdrOffset; - put2byte(&data[hdr+3], nCell); - if( nCell ){ - cellbody = allocateSpace(pPage, totalSize); - assert( cellbody>0 ); - assert( pPage->nFree >= 2*nCell ); - pPage->nFree -= 2*nCell; - for(i=0; ipBt->usableSize ); + assert( get2byte(&data[hdr+5])==nUsable ); + + pCellptr = &data[pPage->cellOffset + nCell*2]; + cellbody = nUsable; + for(i=nCell-1; i>=0; i--){ + pCellptr -= 2; + cellbody -= aSize[i]; + put2byte(pCellptr, cellbody); + memcpy(&data[cellbody], apCell[i], aSize[i]); } - pPage->nCell = nCell; + put2byte(&data[hdr+3], nCell); + put2byte(&data[hdr+5], cellbody); + pPage->nFree -= (nCell*2 + nUsable - cellbody); + pPage->nCell = (u16)nCell; } /* @@ -4188,8 +5191,6 @@ #define NN 1 /* Number of neighbors on either side of pPage */ #define NB (NN*2+1) /* Total pages involved in the balance */ -/* Forward reference */ -static int balance(MemPage*, int); #ifndef SQLITE_OMIT_QUICKBALANCE /* @@ -4208,300 +5209,378 @@ ** pPage is the leaf page which is the right-most page in the tree. ** pParent is its parent. pPage must have a single overflow entry ** which is also the right-most entry on the page. -*/ -static int balance_quick(MemPage *pPage, MemPage *pParent){ - int rc; - MemPage *pNew; - Pgno pgnoNew; - u8 *pCell; - int szCell; - CellInfo info; - BtShared *pBt = pPage->pBt; - int parentIdx = pParent->nCell; /* pParent new divider cell index */ - int parentSize; /* Size of new divider cell */ - u8 parentCell[64]; /* Space for the new divider cell */ - - /* Allocate a new page. Insert the overflow cell from pPage - ** into it. Then remove the overflow cell from pPage. +** +** The pSpace buffer is used to store a temporary copy of the divider +** cell that will be inserted into pParent. Such a cell consists of a 4 +** byte page number followed by a variable length integer. In other +** words, at most 13 bytes. Hence the pSpace buffer must be at +** least 13 bytes in size. +*/ +static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ + BtShared *const pBt = pPage->pBt; /* B-Tree Database */ + MemPage *pNew; /* Newly allocated page */ + int rc; /* Return Code */ + Pgno pgnoNew; /* Page number of pNew */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + assert( pPage->nOverflow==1 ); + + if( pPage->nCell<=0 ) return SQLITE_CORRUPT_BKPT; + + /* Allocate a new page. This page will become the right-sibling of + ** pPage. Make the parent page writable, so that the new divider cell + ** may be inserted. If both these operations are successful, proceed. */ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - pCell = pPage->aOvfl[0].pCell; - szCell = cellSizePtr(pPage, pCell); - zeroPage(pNew, pPage->aData[0]); - assemblePage(pNew, 1, &pCell, &szCell); - pPage->nOverflow = 0; - /* Set the parent of the newly allocated page to pParent. */ - pNew->pParent = pParent; - sqlite3PagerRef(pParent->pDbPage); - - /* pPage is currently the right-child of pParent. Change this - ** so that the right-child is the new page allocated above and - ** pPage is the next-to-right child. - */ - assert( pPage->nCell>0 ); - pCell = findCell(pPage, pPage->nCell-1); - sqlite3BtreeParseCellPtr(pPage, pCell, &info); - rc = fillInCell(pParent, parentCell, 0, info.nKey, 0, 0, 0, &parentSize); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( parentSize<64 ); - rc = insertCell(pParent, parentIdx, parentCell, parentSize, 0, 4); - if( rc!=SQLITE_OK ){ - return rc; + if( rc==SQLITE_OK ){ + + u8 *pOut = &pSpace[4]; + u8 *pCell = pPage->aOvfl[0].pCell; + u16 szCell = cellSizePtr(pPage, pCell); + u8 *pStop; + + assert( sqlite3PagerIswriteable(pNew->pDbPage) ); + assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); + zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); + assemblePage(pNew, 1, &pCell, &szCell); + + /* If this is an auto-vacuum database, update the pointer map + ** with entries for the new page, and any pointer from the + ** cell on the page to an overflow page. If either of these + ** operations fails, the return code is set, but the contents + ** of the parent page are still manipulated by thh code below. + ** That is Ok, at this point the parent page is guaranteed to + ** be marked as dirty. Returning an error code will cause a + ** rollback, undoing any changes made to the parent page. + */ + if( ISAUTOVACUUM ){ + rc = ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno); + if( szCell>pNew->minLocal && rc==SQLITE_OK ){ + rc = ptrmapPutOvflPtr(pNew, pCell); + } + } + + /* Create a divider cell to insert into pParent. The divider cell + ** consists of a 4-byte page number (the page number of pPage) and + ** a variable length key value (which must be the same value as the + ** largest key on pPage). + ** + ** To find the largest key value on pPage, first find the right-most + ** cell on pPage. The first two fields of this cell are the + ** record-length (a variable length integer at most 32-bits in size) + ** and the key value (a variable length integer, may have any value). + ** The first of the while(...) loops below skips over the record-length + ** field. The second while(...) loop copies the key value from the + ** cell on pPage into the pSpace buffer. + */ + pCell = findCell(pPage, pPage->nCell-1); + pStop = &pCell[9]; + while( (*(pCell++)&0x80) && pCellnCell,pSpace,(int)(pOut-pSpace),0,pPage->pgno); + + /* Set the right-child pointer of pParent to point to the new page. */ + put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); + + /* Release the reference to the new page. */ + releasePage(pNew); } - put4byte(findOverflowCell(pParent,parentIdx), pPage->pgno); - put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If this is an auto-vacuum database, update the pointer map - ** with entries for the new page, and any pointer from the - ** cell on the page to an overflow page. - */ - if( pBt->autoVacuum ){ - rc = ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno); - if( rc==SQLITE_OK ){ - rc = ptrmapPutOvfl(pNew, 0); + return rc; +} +#endif /* SQLITE_OMIT_QUICKBALANCE */ + +#if 0 +/* +** This function does not contribute anything to the operation of SQLite. +** it is sometimes activated temporarily while debugging code responsible +** for setting pointer-map entries. +*/ +static int ptrmapCheckPages(MemPage **apPage, int nPage){ + int i, j; + for(i=0; ipBt; + assert( pPage->isInit ); + + for(j=0; jnCell; j++){ + CellInfo info; + u8 *z; + + z = findCell(pPage, j); + sqlite3BtreeParseCellPtr(pPage, z, &info); + if( info.iOverflow ){ + Pgno ovfl = get4byte(&z[info.iOverflow]); + ptrmapGet(pBt, ovfl, &e, &n); + assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 ); + } + if( !pPage->leaf ){ + Pgno child = get4byte(z); + ptrmapGet(pBt, child, &e, &n); + assert( n==pPage->pgno && e==PTRMAP_BTREE ); + } } - if( rc!=SQLITE_OK ){ - releasePage(pNew); - return rc; + if( !pPage->leaf ){ + Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]); + ptrmapGet(pBt, child, &e, &n); + assert( n==pPage->pgno && e==PTRMAP_BTREE ); } } + return 1; +} #endif - /* Release the reference to the new page and balance the parent page, - ** in case the divider cell inserted caused it to become overfull. - */ - releasePage(pNew); - return balance(pParent, 0); +/* +** This function is used to copy the contents of the b-tree node stored +** on page pFrom to page pTo. If page pFrom was not a leaf page, then +** the pointer-map entries for each child page are updated so that the +** parent page stored in the pointer map is page pTo. If pFrom contained +** any cells with overflow page pointers, then the corresponding pointer +** map entries are also updated so that the parent page is page pTo. +** +** If pFrom is currently carrying any overflow cells (entries in the +** MemPage.aOvfl[] array), they are not copied to pTo. +** +** Before returning, page pTo is reinitialized using sqlite3BtreeInitPage(). +** +** The performance of this function is not critical. It is only used by +** the balance_shallower() and balance_deeper() procedures, neither of +** which are called often under normal circumstances. +*/ +static int copyNodeContent(MemPage *pFrom, MemPage *pTo){ + BtShared * const pBt = pFrom->pBt; + u8 * const aFrom = pFrom->aData; + u8 * const aTo = pTo->aData; + int const iFromHdr = pFrom->hdrOffset; + int const iToHdr = ((pTo->pgno==1) ? 100 : 0); + int rc = SQLITE_OK; + int iData; + + assert( pFrom->isInit ); + assert( pFrom->nFree>=iToHdr ); + assert( get2byte(&aFrom[iFromHdr+5])<=pBt->usableSize ); + + /* Copy the b-tree node content from page pFrom to page pTo. */ + iData = get2byte(&aFrom[iFromHdr+5]); + memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData); + memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell); + + /* Reinitialize page pTo so that the contents of the MemPage structure + ** match the new data. The initialization of pTo "cannot" fail, as the + ** data copied from pFrom is known to be valid. */ + pTo->isInit = 0; + TESTONLY(rc = ) sqlite3BtreeInitPage(pTo); + assert( rc==SQLITE_OK ); + + /* If this is an auto-vacuum database, update the pointer-map entries + ** for any b-tree or overflow pages that pTo now contains the pointers to. */ + if( ISAUTOVACUUM ){ + rc = setChildPtrmaps(pTo); + } + return rc; } -#endif /* SQLITE_OMIT_QUICKBALANCE */ /* -** This routine redistributes Cells on pPage and up to NN*2 siblings -** of pPage so that all pages have about the same amount of free space. -** Usually NN siblings on either side of pPage is used in the balancing, -** though more siblings might come from one side if pPage is the first -** or last child of its parent. If pPage has fewer than 2*NN siblings -** (something which can only happen if pPage is the root page or a -** child of root) then all available siblings participate in the balancing. -** -** The number of siblings of pPage might be increased or decreased by one or -** two in an effort to keep pages nearly full but not over full. The root page -** is special and is allowed to be nearly empty. If pPage is -** the root page, then the depth of the tree might be increased -** or decreased by one, as necessary, to keep the root page from being -** overfull or completely empty. -** -** Note that when this routine is called, some of the Cells on pPage -** might not actually be stored in pPage->aData[]. This can happen -** if the page is overfull. Part of the job of this routine is to -** make sure all Cells for pPage once again fit in pPage->aData[]. -** -** In the course of balancing the siblings of pPage, the parent of pPage -** might become overfull or underfull. If that happens, then this routine -** is called recursively on the parent. +** This routine redistributes cells on the iParentIdx'th child of pParent +** (hereafter "the page") and up to 2 siblings so that all pages have about the +** same amount of free space. Usually a single sibling on either side of the +** page are used in the balancing, though both siblings might come from one +** side if the page is the first or last child of its parent. If the page +** has fewer than 2 siblings (something which can only happen if the page +** is a root page or a child of a root page) then all available siblings +** participate in the balancing. +** +** The number of siblings of the page might be increased or decreased by +** one or two in an effort to keep pages nearly full but not over full. +** +** Note that when this routine is called, some of the cells on the page +** might not actually be stored in MemPage.aData[]. This can happen +** if the page is overfull. This routine ensures that all cells allocated +** to the page and its siblings fit into MemPage.aData[] before returning. +** +** In the course of balancing the page and its siblings, cells may be +** inserted into or removed from the parent page (pParent). Doing so +** may cause the parent page to become overfull or underfull. If this +** happens, it is the responsibility of the caller to invoke the correct +** balancing routine to fix this problem (see the balance() routine). ** ** If this routine fails for any reason, it might leave the database -** in a corrupted state. So if this routine fails, the database should +** in a corrupted state. So if this routine fails, the database should ** be rolled back. -*/ -static int balance_nonroot(MemPage *pPage){ - MemPage *pParent; /* The parent of pPage */ +** +** The third argument to this function, aOvflSpace, is a pointer to a +** buffer page-size bytes in size. If, in inserting cells into the parent +** page (pParent), the parent page becomes overfull, this buffer is +** used to store the parents overflow cells. Because this function inserts +** a maximum of four divider cells into the parent page, and the maximum +** size of a cell stored within an internal node is always less than 1/4 +** of the page-size, the aOvflSpace[] buffer is guaranteed to be large +** enough for all overflow cells. +** +** If aOvflSpace is set to a null pointer, this function returns +** SQLITE_NOMEM. +*/ +static int balance_nonroot( + MemPage *pParent, /* Parent page of siblings being balanced */ + int iParentIdx, /* Index of "the page" in pParent */ + u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ + int isRoot /* True if pParent is a root-page */ +){ BtShared *pBt; /* The whole database */ int nCell = 0; /* Number of cells in apCell[] */ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ + int nNew = 0; /* Number of pages in apNew[] */ int nOld; /* Number of pages in apOld[] */ - int nNew; /* Number of pages in apNew[] */ - int nDiv; /* Number of cells in apDiv[] */ int i, j, k; /* Loop counters */ - int idx; /* Index of pPage in pParent->aCell[] */ int nxDiv; /* Next divider slot in pParent->aCell[] */ - int rc; /* The return code */ - int leafCorrection; /* 4 if pPage is a leaf. 0 if not */ + int rc = SQLITE_OK; /* The return code */ + u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */ int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ int usableSpace; /* Bytes in pPage beyond the header */ int pageFlags; /* Value of pPage->aData[0] */ int subtotal; /* Subtotal of bytes in cells on one page */ - int iSpace = 0; /* First unused byte of aSpace[] */ + int iSpace1 = 0; /* First unused byte of aSpace1[] */ + int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ + int szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ - Pgno pgnoOld[NB]; /* Page numbers for each page in apOld[] */ MemPage *apCopy[NB]; /* Private copies of apOld[] pages */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ - Pgno pgnoNew[NB+2]; /* Page numbers for each page in apNew[] */ - u8 *apDiv[NB]; /* Divider cells in pParent */ + u8 *pRight; /* Location in parent of right-sibling pointer */ + u8 *apDiv[NB-1]; /* Divider cells in pParent */ int cntNew[NB+2]; /* Index in aCell[] of cell after i-th page */ int szNew[NB+2]; /* Combined size of cells place on i-th page */ u8 **apCell = 0; /* All cells begin balanced */ - int *szCell; /* Local size of all cells in apCell[] */ - u8 *aCopy[NB]; /* Space for holding data of apCopy[] */ - u8 *aSpace; /* Space to hold copies of dividers cells */ -#ifndef SQLITE_OMIT_AUTOVACUUM - u8 *aFrom = 0; -#endif + u16 *szCell; /* Local size of all cells in apCell[] */ + u8 *aSpace1; /* Space for copies of dividers cells */ + Pgno pgno; /* Temp var to store a page number in */ + + pBt = pParent->pBt; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); - /* - ** Find the parent page. - */ - assert( pPage->isInit ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - pBt = pPage->pBt; - pParent = pPage->pParent; - assert( pParent ); - if( SQLITE_OK!=(rc = sqlite3PagerWrite(pParent->pDbPage)) ){ - return rc; - } +#if 0 TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno)); +#endif -#ifndef SQLITE_OMIT_QUICKBALANCE - /* - ** A special case: If a new entry has just been inserted into a - ** table (that is, a btree with integer keys and all data at the leaves) - ** and the new entry is the right-most entry in the tree (it has the - ** largest key) then use the special balance_quick() routine for - ** balancing. balance_quick() is much faster and results in a tighter - ** packing of data in the common case. - */ - if( pPage->leaf && - pPage->intKey && - pPage->leafData && - pPage->nOverflow==1 && - pPage->aOvfl[0].idx==pPage->nCell && - pPage->pParent->pgno!=1 && - get4byte(&pParent->aData[pParent->hdrOffset+8])==pPage->pgno - ){ - /* - ** TODO: Check the siblings to the left of pPage. It may be that - ** they are not full and no new page is required. - */ - return balance_quick(pPage, pParent); + /* At this point pParent may have at most one overflow cell. And if + ** this overflow cell is present, it must be the cell with + ** index iParentIdx. This scenario comes about when this function + ** is called (indirectly) from sqlite3BtreeDelete(). */ + assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); + assert( pParent->nOverflow==0 || pParent->aOvfl[0].idx==iParentIdx ); + + if( !aOvflSpace ){ + return SQLITE_NOMEM; } -#endif - /* - ** Find the cell in the parent page whose left child points back - ** to pPage. The "idx" variable is the index of that cell. If pPage - ** is the rightmost child of pParent then set idx to pParent->nCell - */ - if( pParent->idxShift ){ - Pgno pgno; - pgno = pPage->pgno; - assert( pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); - for(idx=0; idxnCell; idx++){ - if( get4byte(findCell(pParent, idx))==pgno ){ - break; - } + /* Find the sibling pages to balance. Also locate the cells in pParent + ** that divide the siblings. An attempt is made to find NN siblings on + ** either side of pPage. More siblings are taken from one side, however, + ** if there are fewer than NN siblings on the other side. If pParent + ** has NB or fewer children then all children of pParent are taken. + ** + ** This loop also drops the divider cells from the parent page. This + ** way, the remainder of the function does not have to deal with any + ** overflow cells in the parent page, as if one existed it has already + ** been removed. */ + i = pParent->nOverflow + pParent->nCell; + if( i<2 ){ + nxDiv = 0; + nOld = i+1; + }else{ + nOld = 3; + if( iParentIdx==0 ){ + nxDiv = 0; + }else if( iParentIdx==i ){ + nxDiv = i-2; + }else{ + nxDiv = iParentIdx-1; } - assert( idxnCell - || get4byte(&pParent->aData[pParent->hdrOffset+8])==pgno ); + i = 2; + } + if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){ + pRight = &pParent->aData[pParent->hdrOffset+8]; }else{ - idx = pPage->idxParent; + pRight = findCell(pParent, i+nxDiv-pParent->nOverflow); } + pgno = get4byte(pRight); + while( 1 ){ + rc = getAndInitPage(pBt, pgno, &apOld[i]); + if( rc ){ + memset(apOld, 0, i*sizeof(MemPage*)); + goto balance_cleanup; + } + nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; + if( (i--)==0 ) break; - /* - ** Initialize variables so that it will be safe to jump - ** directly to balance_cleanup at any moment. - */ - nOld = nNew = 0; - sqlite3PagerRef(pParent->pDbPage); - - /* - ** Find sibling pages to pPage and the cells in pParent that divide - ** the siblings. An attempt is made to find NN siblings on either - ** side of pPage. More siblings are taken from one side, however, if - ** pPage there are fewer than NN siblings on the other side. If pParent - ** has NB or fewer children then all children of pParent are taken. - */ - nxDiv = idx - NN; - if( nxDiv + NB > pParent->nCell ){ - nxDiv = pParent->nCell - NB + 1; - } - if( nxDiv<0 ){ - nxDiv = 0; - } - nDiv = 0; - for(i=0, k=nxDiv; inCell ){ - apDiv[i] = findCell(pParent, k); - nDiv++; - assert( !pParent->leaf ); - pgnoOld[i] = get4byte(apDiv[i]); - }else if( k==pParent->nCell ){ - pgnoOld[i] = get4byte(&pParent->aData[pParent->hdrOffset+8]); + if( pParent->nOverflow && i+nxDiv==pParent->aOvfl[0].idx ){ + apDiv[i] = pParent->aOvfl[0].pCell; + pgno = get4byte(apDiv[i]); + szNew[i] = cellSizePtr(pParent, apDiv[i]); + pParent->nOverflow = 0; }else{ - break; + apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); + pgno = get4byte(apDiv[i]); + szNew[i] = cellSizePtr(pParent, apDiv[i]); + + /* Drop the cell from the parent page. apDiv[i] still points to + ** the cell within the parent, even though it has been dropped. + ** This is safe because dropping a cell only overwrites the first + ** four bytes of it, and this function does not need the first + ** four bytes of the divider cell. So the pointer is safe to use + ** later on. + ** + ** Unless SQLite is compiled in secure-delete mode. In this case, + ** the dropCell() routine will overwrite the entire cell with zeroes. + ** In this case, temporarily copy the cell into the aOvflSpace[] + ** buffer. It will be copied out again as soon as the aSpace[] buffer + ** is allocated. */ +#ifdef SQLITE_SECURE_DELETE + memcpy(&aOvflSpace[apDiv[i]-pParent->aData], apDiv[i], szNew[i]); + apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; +#endif + dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i]); } - rc = getAndInitPage(pBt, pgnoOld[i], &apOld[i], pParent); - if( rc ) goto balance_cleanup; - apOld[i]->idxParent = k; - apCopy[i] = 0; - assert( i==nOld ); - nOld++; - nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; } - /* Make nMaxCells a multiple of 2 in order to preserve 8-byte + /* Make nMaxCells a multiple of 4 in order to preserve 8-byte ** alignment */ - nMaxCells = (nMaxCells + 1)&~1; + nMaxCells = (nMaxCells + 3)&~3; /* ** Allocate space for memory structures */ - apCell = sqliteMallocRaw( - nMaxCells*sizeof(u8*) /* apCell */ - + nMaxCells*sizeof(int) /* szCell */ - + ROUND8(sizeof(MemPage))*NB /* aCopy */ - + pBt->pageSize*(5+NB) /* aSpace */ - + (ISAUTOVACUUM ? nMaxCells : 0) /* aFrom */ - ); + k = pBt->pageSize + ROUND8(sizeof(MemPage)); + szScratch = + nMaxCells*sizeof(u8*) /* apCell */ + + nMaxCells*sizeof(u16) /* szCell */ + + pBt->pageSize /* aSpace1 */ + + k*nOld; /* Page copies (apCopy) */ + apCell = sqlite3ScratchMalloc( szScratch ); if( apCell==0 ){ rc = SQLITE_NOMEM; goto balance_cleanup; } - szCell = (int*)&apCell[nMaxCells]; - aCopy[0] = (u8*)&szCell[nMaxCells]; - assert( ((aCopy[0] - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ - for(i=1; ipageSize+ROUND8(sizeof(MemPage))]; - assert( ((aCopy[i] - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ - } - aSpace = &aCopy[NB-1][pBt->pageSize+ROUND8(sizeof(MemPage))]; - assert( ((aSpace - (u8*)apCell) & 7)==0 ); /* 8-byte alignment required */ -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - aFrom = &aSpace[5*pBt->pageSize]; - } -#endif - - /* - ** Make copies of the content of pPage and its siblings into aOld[]. - ** The rest of this function will use data from the copies rather - ** that the original pages since the original pages will be in the - ** process of being overwritten. - */ - for(i=0; ipageSize]; - p->aData = &((u8*)p)[-pBt->pageSize]; - memcpy(p->aData, apOld[i]->aData, pBt->pageSize + sizeof(MemPage)); - /* The memcpy() above changes the value of p->aData so we have to - ** set it again. */ - p->aData = &((u8*)p)[-pBt->pageSize]; - } + szCell = (u16*)&apCell[nMaxCells]; + aSpace1 = (u8*)&szCell[nMaxCells]; + assert( EIGHT_BYTE_ALIGNMENT(aSpace1) ); /* ** Load pointers to all cells on sibling pages and the divider cells ** into the local apCell[] array. Make copies of the divider cells - ** into space obtained form aSpace[] and remove the the divider Cells + ** into space obtained from aSpace1[] and remove the the divider Cells ** from pParent. ** ** If the siblings are on leaf pages, then the child pointers of the ** divider cells are stripped from the cells before they are copied - ** into aSpace[]. In this way, all cells in apCell[] are without + ** into aSpace1[]. In this way, all cells in apCell[] are without ** child pointers. If siblings are not leaves, then all cell in ** apCell[] include child pointers. Either way, all cells in apCell[] ** are alike. @@ -4509,70 +5588,54 @@ ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. ** leafData: 1 if pPage holds key+data and pParent holds only keys. */ - nCell = 0; - leafCorrection = pPage->leaf*4; - leafData = pPage->leafData && pPage->leaf; + leafCorrection = apOld[0]->leaf*4; + leafData = apOld[0]->hasData; for(i=0; inCell+pOld->nOverflow; + int limit; + + /* Before doing anything else, take a copy of the i'th original sibling + ** The rest of this function will use data from the copies rather + ** that the original pages since the original pages will be in the + ** process of being overwritten. */ + MemPage *pOld = apCopy[i] = (MemPage*)&aSpace1[pBt->pageSize + k*i]; + memcpy(pOld, apOld[i], sizeof(MemPage)); + pOld->aData = (void*)&pOld[1]; + memcpy(pOld->aData, apOld[i]->aData, pBt->pageSize); + + limit = pOld->nCell+pOld->nOverflow; for(j=0; jautoVacuum ){ - int a; - aFrom[nCell] = i; - for(a=0; anOverflow; a++){ - if( pOld->aOvfl[a].pCell==apCell[nCell] ){ - aFrom[nCell] = 0xFF; - break; - } - } - } -#endif nCell++; } - if( ipageSize/4 ); + assert( iSpace1<=pBt->pageSize ); + memcpy(pTemp, apDiv[i], sz); + apCell[nCell] = pTemp+leafCorrection; + assert( leafCorrection==0 || leafCorrection==4 ); + szCell[nCell] = szCell[nCell] - leafCorrection; + if( !pOld->leaf ){ + assert( leafCorrection==0 ); + assert( pOld->hdrOffset==0 ); + /* The right pointer of the child page pOld becomes the left + ** pointer of the divider cell */ + memcpy(apCell[nCell], &pOld->aData[8], 4); }else{ - u8 *pTemp; - assert( nCellpageSize*5 ); - memcpy(pTemp, apDiv[i], sz); - apCell[nCell] = pTemp+leafCorrection; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - aFrom[nCell] = 0xFF; + assert( leafCorrection==4 ); + if( szCell[nCell]<4 ){ + /* Do not allow any cells smaller than 4 bytes. */ + szCell[nCell] = 4; } -#endif - dropCell(pParent, nxDiv, sz); - szCell[nCell] -= leafCorrection; - assert( get4byte(pTemp)==pgnoOld[i] ); - if( !pOld->leaf ){ - assert( leafCorrection==0 ); - /* The right pointer of the child page pOld becomes the left - ** pointer of the divider cell */ - memcpy(apCell[nCell], &pOld->aData[pOld->hdrOffset+8], 4); - }else{ - assert( leafCorrection==4 ); - if( szCell[nCell]<4 ){ - /* Do not allow any cells smaller than 4 bytes. */ - szCell[nCell] = 4; - } - } - nCell++; } + nCell++; } } @@ -4602,6 +5665,7 @@ if( leafData ){ i--; } subtotal = 0; k++; + if( k>NB+1 ){ rc = SQLITE_CORRUPT; goto balance_cleanup; } } } szNew[k] = subtotal; @@ -4639,34 +5703,49 @@ szNew[i-1] = szLeft; } - /* Either we found one or more cells (cntnew[0])>0) or we are the + /* Either we found one or more cells (cntnew[0])>0) or pPage is ** a virtual root page. A virtual root page is when the real root ** page is page 1 and we are the only child of that page. */ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) ); + TRACE(("BALANCE: old: %d %d %d ", + apOld[0]->pgno, + nOld>=2 ? apOld[1]->pgno : 0, + nOld>=3 ? apOld[2]->pgno : 0 + )); + /* ** Allocate k new pages. Reuse old pages where possible. */ - assert( pPage->pgno>1 ); - pageFlags = pPage->aData[0]; + if( apOld[0]->pgno<=1 ){ + rc = SQLITE_CORRUPT; + goto balance_cleanup; + } + pageFlags = apOld[0]->aData[0]; for(i=0; ipDbPage); nNew++; if( rc ) goto balance_cleanup; }else{ assert( i>0 ); - rc = allocateBtreePage(pBt, &pNew, &pgnoNew[i], pgnoNew[i-1], 0); + rc = allocateBtreePage(pBt, &pNew, &pgno, pgno, 0); if( rc ) goto balance_cleanup; apNew[i] = pNew; nNew++; + + /* Set the pointer-map entry for the new sibling page. */ + if( ISAUTOVACUUM ){ + rc = ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno); + if( rc!=SQLITE_OK ){ + goto balance_cleanup; + } + } } - zeroPage(pNew, pageFlags); } /* Free any old pages that were not reused as new pages. @@ -4694,34 +5773,32 @@ ** about 25% faster for large insertions and deletions. */ for(i=0; ipgno; int minI = i; for(j=i+1; jpgno<(unsigned)minV ){ minI = j; - minV = pgnoNew[j]; + minV = apNew[j]->pgno; } } if( minI>i ){ int t; MemPage *pT; - t = pgnoNew[i]; + t = apNew[i]->pgno; pT = apNew[i]; - pgnoNew[i] = pgnoNew[minI]; apNew[i] = apNew[minI]; - pgnoNew[minI] = t; apNew[minI] = pT; } } - TRACE(("BALANCE: old: %d %d %d new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n", - pgnoOld[0], - nOld>=2 ? pgnoOld[1] : 0, - nOld>=3 ? pgnoOld[2] : 0, - pgnoNew[0], szNew[0], - nNew>=2 ? pgnoNew[1] : 0, nNew>=2 ? szNew[1] : 0, - nNew>=3 ? pgnoNew[2] : 0, nNew>=3 ? szNew[2] : 0, - nNew>=4 ? pgnoNew[3] : 0, nNew>=4 ? szNew[3] : 0, - nNew>=5 ? pgnoNew[4] : 0, nNew>=5 ? szNew[4] : 0)); + TRACE(("new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n", + apNew[0]->pgno, szNew[0], + nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, + nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0, + nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0, + nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0)); + + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + put4byte(pRight, apNew[nNew-1]->pgno); /* ** Evenly distribute the data in apCell[] across the new pages. @@ -4732,36 +5809,18 @@ /* Assemble the new sibling page. */ MemPage *pNew = apNew[i]; assert( jpgno==pgnoNew[i] ); + zeroPage(pNew, pageFlags); assemblePage(pNew, cntNew[i]-j, &apCell[j], &szCell[j]); assert( pNew->nCell>0 || (nNew==1 && cntNew[0]==0) ); assert( pNew->nOverflow==0 ); -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If this is an auto-vacuum database, update the pointer map entries - ** that point to the siblings that were rearranged. These can be: left - ** children of cells, the right-child of the page, or overflow pages - ** pointed to by cells. - */ - if( pBt->autoVacuum ){ - for(k=j; kpgno!=pNew->pgno ){ - rc = ptrmapPutOvfl(pNew, k-j); - if( rc!=SQLITE_OK ){ - goto balance_cleanup; - } - } - } - } -#endif - j = cntNew[i]; /* If the sibling page assembled above was not the right-most sibling, ** insert a divider cell into the parent page. */ - if( ileaf ){ memcpy(&pNew->aData[8], pCell, 4); - pTemp = 0; }else if( leafData ){ /* If the tree is a leaf-data tree, and the siblings are leaves, ** then there is no divider cell in apCell[]. Instead, the divider @@ -4781,21 +5840,16 @@ CellInfo info; j--; sqlite3BtreeParseCellPtr(pNew, apCell[j], &info); - pCell = &aSpace[iSpace]; - fillInCell(pParent, pCell, 0, info.nKey, 0, 0, 0, &sz); - iSpace += sz; - assert( iSpace<=pBt->pageSize*5 ); + pCell = pTemp; + sz = 4 + putVarint(&pCell[4], info.nKey); pTemp = 0; }else{ pCell -= 4; - pTemp = &aSpace[iSpace]; - iSpace += sz; - assert( iSpace<=pBt->pageSize*5 ); /* Obscure case for non-leaf-data trees: If the cell at pCell was - ** previously stored on a leaf node, and it's reported size was 4 + ** previously stored on a leaf node, and its reported size was 4 ** bytes, then it may actually be smaller than this ** (see sqlite3BtreeParseCellPtr(), 4 bytes is the minimum size of - ** any cell). But it's important to pass the correct size to + ** any cell). But it is important to pass the correct size to ** insertCell(), so reparse the cell now. ** ** Note that this can never happen in an SQLite data file, as all @@ -4807,21 +5861,13 @@ sz = cellSizePtr(pParent, pCell); } } - rc = insertCell(pParent, nxDiv, pCell, sz, pTemp, 4); + iOvflSpace += sz; + assert( sz<=pBt->pageSize/4 ); + assert( iOvflSpace<=pBt->pageSize ); + rc = insertCell(pParent, nxDiv, pCell, sz, pTemp, pNew->pgno); if( rc!=SQLITE_OK ) goto balance_cleanup; - put4byte(findOverflowCell(pParent,nxDiv), pNew->pgno); -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If this is an auto-vacuum database, and not a leaf-data tree, - ** then update the pointer map with an entry for the overflow page - ** that the cell just inserted points to (if any). - */ - if( pBt->autoVacuum && !leafData ){ - rc = ptrmapPutOvfl(pParent, nxDiv); - if( rc!=SQLITE_OK ){ - goto balance_cleanup; - } - } -#endif + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + j++; nxDiv++; } @@ -4830,232 +5876,336 @@ assert( nOld>0 ); assert( nNew>0 ); if( (pageFlags & PTF_LEAF)==0 ){ - memcpy(&apNew[nNew-1]->aData[8], &apCopy[nOld-1]->aData[8], 4); - } - if( nxDiv==pParent->nCell+pParent->nOverflow ){ - /* Right-most sibling is the right-most child of pParent */ - put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew[nNew-1]); - }else{ - /* Right-most sibling is the left child of the first entry in pParent - ** past the right-most divider entry */ - put4byte(findOverflowCell(pParent, nxDiv), pgnoNew[nNew-1]); + u8 *zChild = &apCopy[nOld-1]->aData[8]; + memcpy(&apNew[nNew-1]->aData[8], zChild, 4); } - /* - ** Reparent children of all cells. - */ - for(i=0; inCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){ + /* The root page of the b-tree now contains no cells. The only sibling + ** page is the right-child of the parent. Copy the contents of the + ** child page into the parent, decreasing the overall height of the + ** b-tree structure by one. This is described as the "balance-shallower" + ** sub-algorithm in some documentation. + ** + ** If this is an auto-vacuum database, the call to copyNodeContent() + ** sets all pointer-map entries corresponding to database image pages + ** for which the pointer is stored within the content being copied. + ** + ** The second assert below verifies that the child page is defragmented + ** (it must be, as it was just reconstructed using assemblePage()). This + ** is important if the parent page happens to be page 1 of the database + ** image. */ + assert( nNew==1 ); + assert( apNew[0]->nFree == + (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2) + ); + if( SQLITE_OK==(rc = copyNodeContent(apNew[0], pParent)) ){ + rc = freePage(apNew[0]); + } + }else if( ISAUTOVACUUM ){ + /* Fix the pointer-map entries for all the cells that were shifted around. + ** There are several different types of pointer-map entries that need to + ** be dealt with by this routine. Some of these have been set already, but + ** many have not. The following is a summary: + ** + ** 1) The entries associated with new sibling pages that were not + ** siblings when this function was called. These have already + ** been set. We don't need to worry about old siblings that were + ** moved to the free-list - the freePage() code has taken care + ** of those. + ** + ** 2) The pointer-map entries associated with the first overflow + ** page in any overflow chains used by new divider cells. These + ** have also already been taken care of by the insertCell() code. + ** + ** 3) If the sibling pages are not leaves, then the child pages of + ** cells stored on the sibling pages may need to be updated. + ** + ** 4) If the sibling pages are not internal intkey nodes, then any + ** overflow pages used by these cells may need to be updated + ** (internal intkey nodes never contain pointers to overflow pages). + ** + ** 5) If the sibling pages are not leaves, then the pointer-map + ** entries for the right-child pages of each sibling may need + ** to be updated. + ** + ** Cases 1 and 2 are dealt with above by other code. The next + ** block deals with cases 3 and 4 and the one after that, case 5. Since + ** setting a pointer map entry is a relatively expensive operation, this + ** code only sets pointer map entries for child or overflow pages that have + ** actually moved between pages. */ + MemPage *pNew = apNew[0]; + MemPage *pOld = apCopy[0]; + int nOverflow = pOld->nOverflow; + int iNextOld = pOld->nCell + nOverflow; + int iOverflow = (nOverflow ? pOld->aOvfl[0].idx : -1); + j = 0; /* Current 'old' sibling page */ + k = 0; /* Current 'new' sibling page */ + for(i=0; inCell + pOld->nOverflow; + if( pOld->nOverflow ){ + nOverflow = pOld->nOverflow; + iOverflow = i + !leafData + pOld->aOvfl[0].idx; + } + isDivider = !leafData; + } + + assert(nOverflow>0 || iOverflowaOvfl[0].idx==pOld->aOvfl[1].idx-1); + assert(nOverflow<3 || pOld->aOvfl[1].idx==pOld->aOvfl[2].idx-1); + if( i==iOverflow ){ + isDivider = 1; + if( (--nOverflow)>0 ){ + iOverflow++; + } + } + + if( i==cntNew[k] ){ + /* Cell i is the cell immediately following the last cell on new + ** sibling page k. If the siblings are not leaf pages of an + ** intkey b-tree, then cell i is a divider cell. */ + pNew = apNew[++k]; + if( !leafData ) continue; + } + assert( rc==SQLITE_OK ); + assert( jpgno!=pNew->pgno ){ + if( !leafCorrection ){ + rc = ptrmapPut(pBt, get4byte(apCell[i]), PTRMAP_BTREE, pNew->pgno); + } + if( szCell[i]>pNew->minLocal && rc==SQLITE_OK ){ + rc = ptrmapPutOvflPtr(pNew, apCell[i]); + } + } + } + + if( !leafCorrection ){ + for(i=0; rc==SQLITE_OK && iaData[8]), PTRMAP_BTREE, apNew[i]->pgno); + } + } + +#if 0 + /* The ptrmapCheckPages() contains assert() statements that verify that + ** all pointer map pages are set correctly. This is helpful while + ** debugging. This is usually disabled because a corrupt database may + ** cause an assert() statement to fail. */ + ptrmapCheckPages(apNew, nNew); + ptrmapCheckPages(&pParent, 1); +#endif } - rc = reparentChildPages(pParent); - if( rc!=SQLITE_OK ) goto balance_cleanup; - /* - ** Balance the parent page. Note that the current page (pPage) might - ** have been added to the freelist so it might no longer be initialized. - ** But the parent page will always be initialized. - */ assert( pParent->isInit ); - rc = balance(pParent, 0); - + TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", + nOld, nNew, nCell)); + /* ** Cleanup before returning. */ balance_cleanup: - sqliteFree(apCell); + sqlite3ScratchFree(apCell); for(i=0; ipgno, nOld, nNew, nCell)); + return rc; } -/* -** This routine is called for the root page of a btree when the root -** page contains no cells. This is an opportunity to make the tree -** shallower by one level. -*/ -static int balance_shallower(MemPage *pPage){ - MemPage *pChild; /* The only child page of pPage */ - Pgno pgnoChild; /* Page number for pChild */ - int rc = SQLITE_OK; /* Return code from subprocedures */ - BtShared *pBt; /* The main BTree structure */ - int mxCellPerPage; /* Maximum number of cells per page */ - u8 **apCell; /* All cells from pages being balanced */ - int *szCell; /* Local size of all cells */ - assert( pPage->pParent==0 ); - assert( pPage->nCell==0 ); - pBt = pPage->pBt; - mxCellPerPage = MX_CELL(pBt); - apCell = sqliteMallocRaw( mxCellPerPage*(sizeof(u8*)+sizeof(int)) ); - if( apCell==0 ) return SQLITE_NOMEM; - szCell = (int*)&apCell[mxCellPerPage]; - if( pPage->leaf ){ - /* The table is completely empty */ - TRACE(("BALANCE: empty table %d\n", pPage->pgno)); - }else{ - /* The root page is empty but has one child. Transfer the - ** information from that one child into the root page if it - ** will fit. This reduces the depth of the tree by one. - ** - ** If the root page is page 1, it has less space available than - ** its child (due to the 100 byte header that occurs at the beginning - ** of the database fle), so it might not be able to hold all of the - ** information currently contained in the child. If this is the - ** case, then do not do the transfer. Leave page 1 empty except - ** for the right-pointer to the child page. The child page becomes - ** the virtual root of the tree. - */ - pgnoChild = get4byte(&pPage->aData[pPage->hdrOffset+8]); - assert( pgnoChild>0 ); - assert( pgnoChild<=sqlite3PagerPagecount(pPage->pBt->pPager) ); - rc = sqlite3BtreeGetPage(pPage->pBt, pgnoChild, &pChild, 0); - if( rc ) goto end_shallow_balance; - if( pPage->pgno==1 ){ - rc = sqlite3BtreeInitPage(pChild, pPage); - if( rc ) goto end_shallow_balance; - assert( pChild->nOverflow==0 ); - if( pChild->nFree>=100 ){ - /* The child information will fit on the root page, so do the - ** copy */ - int i; - zeroPage(pPage, pChild->aData[0]); - for(i=0; inCell; i++){ - apCell[i] = findCell(pChild,i); - szCell[i] = cellSizePtr(pChild, apCell[i]); - } - assemblePage(pPage, pChild->nCell, apCell, szCell); - /* Copy the right-pointer of the child to the parent. */ - put4byte(&pPage->aData[pPage->hdrOffset+8], - get4byte(&pChild->aData[pChild->hdrOffset+8])); - freePage(pChild); - TRACE(("BALANCE: child %d transfer to page 1\n", pChild->pgno)); - }else{ - /* The child has more information that will fit on the root. - ** The tree is already balanced. Do nothing. */ - TRACE(("BALANCE: child %d will not fit on page 1\n", pChild->pgno)); - } - }else{ - memcpy(pPage->aData, pChild->aData, pPage->pBt->usableSize); - pPage->isInit = 0; - pPage->pParent = 0; - rc = sqlite3BtreeInitPage(pPage, 0); - assert( rc==SQLITE_OK ); - freePage(pChild); - TRACE(("BALANCE: transfer child %d into root %d\n", - pChild->pgno, pPage->pgno)); - } - rc = reparentChildPages(pPage); - assert( pPage->nOverflow==0 ); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - int i; - for(i=0; inCell; i++){ - rc = ptrmapPutOvfl(pPage, i); - if( rc!=SQLITE_OK ){ - goto end_shallow_balance; - } - } - } -#endif +/* +** This function is called when the root page of a b-tree structure is +** overfull (has one or more overflow pages). +** +** A new child page is allocated and the contents of the current root +** page, including overflow cells, are copied into the child. The root +** page is then overwritten to make it an empty page with the right-child +** pointer pointing to the new page. +** +** Before returning, all pointer-map entries corresponding to pages +** that the new child-page now contains pointers to are updated. The +** entry corresponding to the new right-child pointer of the root +** page is also updated. +** +** If successful, *ppChild is set to contain a reference to the child +** page and SQLITE_OK is returned. In this case the caller is required +** to call releasePage() on *ppChild exactly once. If an error occurs, +** an error code is returned and *ppChild is set to 0. +*/ +static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ + int rc; /* Return value from subprocedures */ + MemPage *pChild = 0; /* Pointer to a new child page */ + Pgno pgnoChild; /* Page number of the new child page */ + BtShared *pBt = pRoot->pBt; /* The BTree */ + + assert( pRoot->nOverflow>0 ); + assert( sqlite3_mutex_held(pBt->mutex) ); + + /* Make pRoot, the root page of the b-tree, writable. Allocate a new + ** page that will become the new right-child of pPage. Copy the contents + ** of the node stored on pRoot into the new child page. + */ + if( SQLITE_OK!=(rc = sqlite3PagerWrite(pRoot->pDbPage)) + || SQLITE_OK!=(rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0)) + || SQLITE_OK!=(rc = copyNodeContent(pRoot, pChild)) + || (ISAUTOVACUUM && + SQLITE_OK!=(rc = ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno))) + ){ + *ppChild = 0; releasePage(pChild); + return rc; } -end_shallow_balance: - sqliteFree(apCell); - return rc; -} + assert( sqlite3PagerIswriteable(pChild->pDbPage) ); + assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); + assert( pChild->nCell==pRoot->nCell ); + TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); -/* -** The root page is overfull -** -** When this happens, Create a new child page and copy the -** contents of the root into the child. Then make the root -** page an empty page with rightChild pointing to the new -** child. Finally, call balance_internal() on the new child -** to cause it to split. -*/ -static int balance_deeper(MemPage *pPage){ - int rc; /* Return value from subprocedures */ - MemPage *pChild; /* Pointer to a new child page */ - Pgno pgnoChild; /* Page number of the new child page */ - BtShared *pBt; /* The BTree */ - int usableSize; /* Total usable size of a page */ - u8 *data; /* Content of the parent page */ - u8 *cdata; /* Content of the child page */ - int hdr; /* Offset to page header in parent */ - int brk; /* Offset to content of first cell in parent */ + /* Copy the overflow cells from pRoot to pChild */ + memcpy(pChild->aOvfl, pRoot->aOvfl, pRoot->nOverflow*sizeof(pRoot->aOvfl[0])); + pChild->nOverflow = pRoot->nOverflow; - assert( pPage->pParent==0 ); - assert( pPage->nOverflow>0 ); - pBt = pPage->pBt; - rc = allocateBtreePage(pBt, &pChild, &pgnoChild, pPage->pgno, 0); - if( rc ) return rc; - assert( sqlite3PagerIswriteable(pChild->pDbPage) ); - usableSize = pBt->usableSize; - data = pPage->aData; - hdr = pPage->hdrOffset; - brk = get2byte(&data[hdr+5]); - cdata = pChild->aData; - memcpy(cdata, &data[hdr], pPage->cellOffset+2*pPage->nCell-hdr); - memcpy(&cdata[brk], &data[brk], usableSize-brk); - assert( pChild->isInit==0 ); - rc = sqlite3BtreeInitPage(pChild, pPage); - if( rc ) goto balancedeeper_out; - memcpy(pChild->aOvfl, pPage->aOvfl, pPage->nOverflow*sizeof(pPage->aOvfl[0])); - pChild->nOverflow = pPage->nOverflow; - if( pChild->nOverflow ){ - pChild->nFree = 0; - } - assert( pChild->nCell==pPage->nCell ); - zeroPage(pPage, pChild->aData[0] & ~PTF_LEAF); - put4byte(&pPage->aData[pPage->hdrOffset+8], pgnoChild); - TRACE(("BALANCE: copy root %d into %d\n", pPage->pgno, pChild->pgno)); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - int i; - rc = ptrmapPut(pBt, pChild->pgno, PTRMAP_BTREE, pPage->pgno); - if( rc ) goto balancedeeper_out; - for(i=0; inCell; i++){ - rc = ptrmapPutOvfl(pChild, i); - if( rc!=SQLITE_OK ){ - return rc; - } - } - } -#endif - rc = balance_nonroot(pChild); + /* Zero the contents of pRoot. Then install pChild as the right-child. */ + zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); + put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); -balancedeeper_out: - releasePage(pChild); - return rc; + *ppChild = pChild; + return SQLITE_OK; } /* -** Decide if the page pPage needs to be balanced. If balancing is -** required, call the appropriate balancing routine. +** The page that pCur currently points to has just been modified in +** some way. This function figures out if this modification means the +** tree needs to be balanced, and if so calls the appropriate balancing +** routine. Balancing routines are: +** +** balance_quick() +** balance_deeper() +** balance_nonroot() */ -static int balance(MemPage *pPage, int insert){ +static int balance(BtCursor *pCur){ int rc = SQLITE_OK; - if( pPage->pParent==0 ){ - if( pPage->nOverflow>0 ){ - rc = balance_deeper(pPage); - } - if( rc==SQLITE_OK && pPage->nCell==0 ){ - rc = balance_shallower(pPage); - } - }else{ - if( pPage->nOverflow>0 || - (!insert && pPage->nFree>pPage->pBt->usableSize*2/3) ){ - rc = balance_nonroot(pPage); + const int nMin = pCur->pBt->usableSize * 2 / 3; + u8 aBalanceQuickSpace[13]; + u8 *pFree = 0; + + TESTONLY( int balance_quick_called = 0 ); + TESTONLY( int balance_deeper_called = 0 ); + + do { + int iPage = pCur->iPage; + MemPage *pPage = pCur->apPage[iPage]; + + if( iPage==0 ){ + if( pPage->nOverflow ){ + /* The root page of the b-tree is overfull. In this case call the + ** balance_deeper() function to create a new child for the root-page + ** and copy the current contents of the root-page to it. The + ** next iteration of the do-loop will balance the child page. + */ + assert( (balance_deeper_called++)==0 ); + rc = balance_deeper(pPage, &pCur->apPage[1]); + if( rc==SQLITE_OK ){ + pCur->iPage = 1; + pCur->aiIdx[0] = 0; + pCur->aiIdx[1] = 0; + assert( pCur->apPage[1]->nOverflow ); + } + }else{ + break; + } + }else if( pPage->nOverflow==0 && pPage->nFree<=nMin ){ + break; + }else{ + MemPage * const pParent = pCur->apPage[iPage-1]; + int const iIdx = pCur->aiIdx[iPage-1]; + + rc = sqlite3PagerWrite(pParent->pDbPage); + if( rc==SQLITE_OK ){ +#ifndef SQLITE_OMIT_QUICKBALANCE + if( pPage->hasData + && pPage->nOverflow==1 + && pPage->aOvfl[0].idx==pPage->nCell + && pParent->pgno!=1 + && pParent->nCell==iIdx + ){ + /* Call balance_quick() to create a new sibling of pPage on which + ** to store the overflow cell. balance_quick() inserts a new cell + ** into pParent, which may cause pParent overflow. If this + ** happens, the next interation of the do-loop will balance pParent + ** use either balance_nonroot() or balance_deeper(). Until this + ** happens, the overflow cell is stored in the aBalanceQuickSpace[] + ** buffer. + ** + ** The purpose of the following assert() is to check that only a + ** single call to balance_quick() is made for each call to this + ** function. If this were not verified, a subtle bug involving reuse + ** of the aBalanceQuickSpace[] might sneak in. + */ + assert( (balance_quick_called++)==0 ); + rc = balance_quick(pParent, pPage, aBalanceQuickSpace); + }else +#endif + { + /* In this case, call balance_nonroot() to redistribute cells + ** between pPage and up to 2 of its sibling pages. This involves + ** modifying the contents of pParent, which may cause pParent to + ** become overfull or underfull. The next iteration of the do-loop + ** will balance the parent page to correct this. + ** + ** If the parent page becomes overfull, the overflow cell or cells + ** are stored in the pSpace buffer allocated immediately below. + ** A subsequent iteration of the do-loop will deal with this by + ** calling balance_nonroot() (balance_deeper() may be called first, + ** but it doesn't deal with overflow cells - just moves them to a + ** different page). Once this subsequent call to balance_nonroot() + ** has completed, it is safe to release the pSpace buffer used by + ** the previous call, as the overflow cell data will have been + ** copied either into the body of a database page or into the new + ** pSpace buffer passed to the latter call to balance_nonroot(). + */ + u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); + rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1); + if( pFree ){ + /* If pFree is not NULL, it points to the pSpace buffer used + ** by a previous call to balance_nonroot(). Its contents are + ** now stored either on real database pages or within the + ** new pSpace buffer, so it may be safely freed here. */ + sqlite3PageFree(pFree); + } + + /* The pSpace buffer will be freed after the next call to + ** balance_nonroot(), or just before this function returns, whichever + ** comes first. */ + pFree = pSpace; + } + } + + pPage->nOverflow = 0; + + /* The next iteration of the do-loop balances the parent page. */ + releasePage(pPage); + pCur->iPage--; } + }while( rc==SQLITE_OK ); + + if( pFree ){ + sqlite3PageFree(pFree); } return rc; } @@ -5068,30 +6218,63 @@ ** is not in the ReadUncommmitted state, then this routine returns ** SQLITE_LOCKED. ** -** In addition to checking for read-locks (where a read-lock -** means a cursor opened with wrFlag==0) this routine also moves -** all write cursors so that they are pointing to the -** first Cell on the root page. This is necessary because an insert -** or delete might change the number of cells on a page or delete -** a page entirely and we do not want to leave any cursors -** pointing to non-existant pages or cells. -*/ -static int checkReadLocks(Btree *pBtree, Pgno pgnoRoot, BtCursor *pExclude){ +** As well as cursors with wrFlag==0, cursors with +** isIncrblobHandle==1 are also considered 'read' cursors because +** incremental blob cursors are used for both reading and writing. +** +** When pgnoRoot is the root page of an intkey table, this function is also +** responsible for invalidating incremental blob cursors when the table row +** on which they are opened is deleted or modified. Cursors are invalidated +** according to the following rules: +** +** 1) When BtreeClearTable() is called to completely delete the contents +** of a B-Tree table, pExclude is set to zero and parameter iRow is +** set to non-zero. In this case all incremental blob cursors open +** on the table rooted at pgnoRoot are invalidated. +** +** 2) When BtreeInsert(), BtreeDelete() or BtreePutData() is called to +** modify a table row via an SQL statement, pExclude is set to the +** write cursor used to do the modification and parameter iRow is set +** to the integer row id of the B-Tree entry being modified. Unless +** pExclude is itself an incremental blob cursor, then all incremental +** blob cursors open on row iRow of the B-Tree are invalidated. +** +** 3) If both pExclude and iRow are set to zero, no incremental blob +** cursors are invalidated. +*/ +static int checkForReadConflicts( + Btree *pBtree, /* The database file to check */ + Pgno pgnoRoot, /* Look for read cursors on this btree */ + BtCursor *pExclude, /* Ignore this cursor */ + i64 iRow /* The rowid that might be changing */ +){ BtCursor *p; BtShared *pBt = pBtree->pBt; - sqlite3 *db = pBtree->pSqlite; + sqlite3 *db = pBtree->db; + assert( sqlite3BtreeHoldsMutex(pBtree) ); for(p=pBt->pCursor; p; p=p->pNext){ if( p==pExclude ) continue; - if( p->eState!=CURSOR_VALID ) continue; if( p->pgnoRoot!=pgnoRoot ) continue; - if( p->wrFlag==0 ){ - sqlite3 *dbOther = p->pBtree->pSqlite; - if( dbOther==0 || - (dbOther!=db && (dbOther->flags & SQLITE_ReadUncommitted)==0) ){ - return SQLITE_LOCKED; +#ifndef SQLITE_OMIT_INCRBLOB + if( p->isIncrblobHandle && ( + (!pExclude && iRow) + || (pExclude && !pExclude->isIncrblobHandle && p->info.nKey==iRow) + )){ + p->eState = CURSOR_INVALID; + } +#endif + if( p->eState!=CURSOR_VALID ) continue; + if( p->wrFlag==0 +#ifndef SQLITE_OMIT_INCRBLOB + || p->isIncrblobHandle +#endif + ){ + sqlite3 *dbOther = p->pBtree->db; + assert(dbOther); + if( dbOther!=db && (dbOther->flags & SQLITE_ReadUncommitted)==0 ){ + sqlite3ConnectionBlocked(db, dbOther); + return SQLITE_LOCKED_SHAREDCACHE; } - }else if( p->pPage->pgno!=p->pgnoRoot ){ - moveToRoot(p); } } return SQLITE_OK; @@ -5105,192 +6288,258 @@ ** ** For an INTKEY table, only the nKey value of the key is used. pKey is ** ignored. For a ZERODATA table, the pData and nData are both ignored. +** +** If the seekResult parameter is non-zero, then a successful call to +** sqlite3BtreeMoveto() to seek cursor pCur to (pKey, nKey) has already +** been performed. seekResult is the search result returned (a negative +** number if pCur points at an entry that is smaller than (pKey, nKey), or +** a positive value if pCur points at an etry that is larger than +** (pKey, nKey)). +** +** If the seekResult parameter is 0, then cursor pCur may point to any +** entry or to no entry at all. In this case this function has to seek +** the cursor before the new key can be inserted. */ int sqlite3BtreeInsert( BtCursor *pCur, /* Insert data into the table of this cursor */ const void *pKey, i64 nKey, /* The key of the new record */ const void *pData, int nData, /* The data of the new record */ int nZero, /* Number of extra 0 bytes to append to data */ - int appendBias /* True if this is likely an append */ + int appendBias, /* True if this is likely an append */ + int seekResult /* Result of prior sqlite3BtreeMoveto() call */ ){ int rc; - int loc; + int loc = seekResult; int szNew; + int idx; MemPage *pPage; - BtShared *pBt = pCur->pBtree->pBt; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; unsigned char *oldCell; unsigned char *newCell = 0; - if( pBt->inTransaction!=TRANS_WRITE ){ - /* Must start a transaction before doing an insert */ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } + assert( cursorHoldsMutex(pCur) ); + assert( pBt->inTransaction==TRANS_WRITE ); assert( !pBt->readOnly ); - if( !pCur->wrFlag ){ - return SQLITE_PERM; /* Cursor not open for writing */ + assert( pCur->wrFlag ); + rc = checkForReadConflicts(pCur->pBtree, pCur->pgnoRoot, pCur, nKey); + if( rc ){ + /* The table pCur points to has a read lock */ + assert( rc==SQLITE_LOCKED_SHAREDCACHE ); + return rc; } - if( checkReadLocks(pCur->pBtree, pCur->pgnoRoot, pCur) ){ - return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skip; } - /* Save the positions of any other cursors open on this table */ - clearCursorPosition(pCur); - if( - SQLITE_OK!=(rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) || + /* Save the positions of any other cursors open on this table. + ** + ** In some cases, the call to sqlite3BtreeMoveto() below is a no-op. For + ** example, when inserting data into a table with auto-generated integer + ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the + ** integer key to use. It then calls this function to actually insert the + ** data into the intkey B-Tree. In this case sqlite3BtreeMoveto() recognizes + ** that the cursor is already where it needs to be and returns without + ** doing any work. To avoid thwarting these optimizations, it is important + ** not to clear the cursor here. + */ + if( + SQLITE_OK!=(rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) || (!loc && SQLITE_OK!=(rc = sqlite3BtreeMoveto(pCur, pKey, nKey, appendBias, &loc)) - ){ + )){ return rc; } + assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); - pPage = pCur->pPage; + pPage = pCur->apPage[pCur->iPage]; assert( pPage->intKey || nKey>=0 ); - assert( pPage->leaf || !pPage->leafData ); + assert( pPage->leaf || !pPage->intKey ); TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", pCur->pgnoRoot, nKey, nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit ); - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ) return rc; - newCell = sqliteMallocRaw( MX_CELL_SIZE(pBt) ); + allocateTempSpace(pBt); + newCell = pBt->pTmpSpace; if( newCell==0 ) return SQLITE_NOMEM; rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew); if( rc ) goto end_insert; assert( szNew==cellSizePtr(pPage, newCell) ); assert( szNew<=MX_CELL_SIZE(pBt) ); - if( loc==0 && CURSOR_VALID==pCur->eState ){ - int szOld; - assert( pCur->idx>=0 && pCur->idxnCell ); - oldCell = findCell(pPage, pCur->idx); + idx = pCur->aiIdx[pCur->iPage]; + if( loc==0 ){ + u16 szOld; + assert( idxnCell ); + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ){ + goto end_insert; + } + oldCell = findCell(pPage, idx); if( !pPage->leaf ){ memcpy(newCell, oldCell, 4); } szOld = cellSizePtr(pPage, oldCell); rc = clearCell(pPage, oldCell); if( rc ) goto end_insert; - dropCell(pPage, pCur->idx, szOld); + rc = dropCell(pPage, idx, szOld); + if( rc!=SQLITE_OK ) { + goto end_insert; + } }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); - pCur->idx++; - pCur->info.nSize = 0; + idx = ++pCur->aiIdx[pCur->iPage]; }else{ assert( pPage->leaf ); } - rc = insertCell(pPage, pCur->idx, newCell, szNew, 0, 0); - if( rc!=SQLITE_OK ) goto end_insert; - rc = balance(pPage, 1); - /* sqlite3BtreePageDump(pCur->pBt, pCur->pgnoRoot, 1); */ - /* fflush(stdout); */ - if( rc==SQLITE_OK ){ - moveToRoot(pCur); + rc = insertCell(pPage, idx, newCell, szNew, 0, 0); + assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); + + /* If no error has occured and pPage has an overflow cell, call balance() + ** to redistribute the cells within the tree. Since balance() may move + ** the cursor, zero the BtCursor.info.nSize and BtCursor.validNKey + ** variables. + ** + ** Previous versions of SQLite called moveToRoot() to move the cursor + ** back to the root page as balance() used to invalidate the contents + ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that, + ** set the cursor state to "invalid". This makes common insert operations + ** slightly faster. + ** + ** There is a subtle but important optimization here too. When inserting + ** multiple records into an intkey b-tree using a single cursor (as can + ** happen while processing an "INSERT INTO ... SELECT" statement), it + ** is advantageous to leave the cursor pointing to the last entry in + ** the b-tree if possible. If the cursor is left pointing to the last + ** entry in the table, and the next row inserted has an integer key + ** larger than the largest existing key, it is possible to insert the + ** row without seeking the cursor. This can be a big performance boost. + */ + pCur->info.nSize = 0; + pCur->validNKey = 0; + if( rc==SQLITE_OK && pPage->nOverflow ){ + rc = balance(pCur); + + /* Must make sure nOverflow is reset to zero even if the balance() + ** fails. Internal data structure corruption will result otherwise. + ** Also, set the cursor state to invalid. This stops saveCursorPosition() + ** from trying to save the current position of the cursor. */ + pCur->apPage[pCur->iPage]->nOverflow = 0; + pCur->eState = CURSOR_INVALID; } + assert( pCur->apPage[pCur->iPage]->nOverflow==0 ); + end_insert: - sqliteFree(newCell); return rc; } /* ** Delete the entry that the cursor is pointing to. The cursor -** is left pointing at a random location. +** is left pointing at a arbitrary location. */ int sqlite3BtreeDelete(BtCursor *pCur){ - MemPage *pPage = pCur->pPage; - unsigned char *pCell; - int rc; - Pgno pgnoChild = 0; - BtShared *pBt = pCur->pBtree->pBt; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + int rc; /* Return code */ + MemPage *pPage; /* Page to delete cell from */ + unsigned char *pCell; /* Pointer to cell to delete */ + int iCellIdx; /* Index of cell to delete */ + int iCellDepth; /* Depth of node containing pCell */ - assert( pPage->isInit ); - if( pBt->inTransaction!=TRANS_WRITE ){ - /* Must start a transaction before doing a delete */ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } + assert( cursorHoldsMutex(pCur) ); + assert( pBt->inTransaction==TRANS_WRITE ); assert( !pBt->readOnly ); - if( pCur->idx >= pPage->nCell ){ - return SQLITE_ERROR; /* The cursor is not pointing to anything */ + assert( pCur->wrFlag ); + if( NEVER(pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell) + || NEVER(pCur->eState!=CURSOR_VALID) + ){ + return SQLITE_ERROR; /* Something has gone awry. */ } - if( !pCur->wrFlag ){ - return SQLITE_PERM; /* Did not open this cursor for writing */ + + rc = checkForReadConflicts(p, pCur->pgnoRoot, pCur, pCur->info.nKey); + if( rc!=SQLITE_OK ){ + assert( rc==SQLITE_LOCKED_SHAREDCACHE ); + return rc; /* The table pCur points to has a read lock */ } - if( checkReadLocks(pCur->pBtree, pCur->pgnoRoot, pCur) ){ - return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + + iCellDepth = pCur->iPage; + iCellIdx = pCur->aiIdx[iCellDepth]; + pPage = pCur->apPage[iCellDepth]; + pCell = findCell(pPage, iCellIdx); + + /* If the page containing the entry to delete is not a leaf page, move + ** the cursor to the largest entry in the tree that is smaller than + ** the entry being deleted. This cell will replace the cell being deleted + ** from the internal node. The 'previous' entry is used for this instead + ** of the 'next' entry, as the previous entry is always a part of the + ** sub-tree headed by the child page of the cell being deleted. This makes + ** balancing the tree following the delete operation easier. */ + if( !pPage->leaf ){ + int notUsed; + if( SQLITE_OK!=(rc = sqlite3BtreePrevious(pCur, ¬Used)) ){ + return rc; + } } - /* Restore the current cursor position (a no-op if the cursor is not in - ** CURSOR_REQUIRESEEK state) and save the positions of any other cursors - ** open on the same table. Then call sqlite3PagerWrite() on the page - ** that the entry will be deleted from. - */ - if( - (rc = restoreOrClearCursorPosition(pCur))!=0 || - (rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur))!=0 || - (rc = sqlite3PagerWrite(pPage->pDbPage))!=0 + /* Save the positions of any other cursors open on this table before + ** making any modifications. Make the page containing the entry to be + ** deleted writable. Then free any overflow pages associated with the + ** entry and finally remove the cell itself from within the page. */ + if( SQLITE_OK!=(rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur)) + || SQLITE_OK!=(rc = sqlite3PagerWrite(pPage->pDbPage)) + || SQLITE_OK!=(rc = clearCell(pPage, pCell)) + || SQLITE_OK!=(rc = dropCell(pPage, iCellIdx, cellSizePtr(pPage, pCell))) ){ return rc; } - /* Locate the cell within it's page and leave pCell pointing to the - ** data. The clearCell() call frees any overflow pages associated with the - ** cell. The cell itself is still intact. - */ - pCell = findCell(pPage, pCur->idx); + /* If the cell deleted was not located on a leaf page, then the cursor + ** is currently pointing to the largest entry in the sub-tree headed + ** by the child-page of the cell that was just deleted from an internal + ** node. The cell from the leaf node needs to be moved to the internal + ** node to replace the deleted cell. */ if( !pPage->leaf ){ - pgnoChild = get4byte(pCell); - } - rc = clearCell(pPage, pCell); - if( rc ) return rc; + MemPage *pLeaf = pCur->apPage[pCur->iPage]; + int nCell; + Pgno n = pCur->apPage[iCellDepth+1]->pgno; + unsigned char *pTmp; - if( !pPage->leaf ){ - /* - ** The entry we are about to delete is not a leaf so if we do not - ** do something we will leave a hole on an internal page. - ** We have to fill the hole by moving in a cell from a leaf. The - ** next Cell after the one to be deleted is guaranteed to exist and - ** to be a leaf so we can use it. - */ - BtCursor leafCur; - unsigned char *pNext; - int szNext; /* The compiler warning is wrong: szNext is always - ** initialized before use. Adding an extra initialization - ** to silence the compiler slows down the code. */ - int notUsed; - unsigned char *tempCell = 0; - assert( !pPage->leafData ); - sqlite3BtreeGetTempCursor(pCur, &leafCur); - rc = sqlite3BtreeNext(&leafCur, ¬Used); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(leafCur.pPage->pDbPage); - } - if( rc==SQLITE_OK ){ - TRACE(("DELETE: table=%d delete internal from %d replace from leaf %d\n", - pCur->pgnoRoot, pPage->pgno, leafCur.pPage->pgno)); - dropCell(pPage, pCur->idx, cellSizePtr(pPage, pCell)); - pNext = findCell(leafCur.pPage, leafCur.idx); - szNext = cellSizePtr(leafCur.pPage, pNext); - assert( MX_CELL_SIZE(pBt)>=szNext+4 ); - tempCell = sqliteMallocRaw( MX_CELL_SIZE(pBt) ); - if( tempCell==0 ){ - rc = SQLITE_NOMEM; - } - } - if( rc==SQLITE_OK ){ - rc = insertCell(pPage, pCur->idx, pNext-4, szNext+4, tempCell, 0); - } - if( rc==SQLITE_OK ){ - put4byte(findOverflowCell(pPage, pCur->idx), pgnoChild); - rc = balance(pPage, 0); + pCell = findCell(pLeaf, pLeaf->nCell-1); + nCell = cellSizePtr(pLeaf, pCell); + assert( MX_CELL_SIZE(pBt)>=nCell ); + + allocateTempSpace(pBt); + pTmp = pBt->pTmpSpace; + + if( SQLITE_OK!=(rc = sqlite3PagerWrite(pLeaf->pDbPage)) + || SQLITE_OK!=(rc = insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n)) + || SQLITE_OK!=(rc = dropCell(pLeaf, pLeaf->nCell-1, nCell)) + ){ + return rc; } - if( rc==SQLITE_OK ){ - dropCell(leafCur.pPage, leafCur.idx, szNext); - rc = balance(leafCur.pPage, 0); + } + + /* Balance the tree. If the entry deleted was located on a leaf page, + ** then the cursor still points to that page. In this case the first + ** call to balance() repairs the tree, and the if(...) condition is + ** never true. + ** + ** Otherwise, if the entry deleted was on an internal node page, then + ** pCur is pointing to the leaf page from which a cell was removed to + ** replace the cell deleted from the internal node. This is slightly + ** tricky as the leaf node may be underfull, and the internal node may + ** be either under or overfull. In this case run the balancing algorithm + ** on the leaf node first. If the balance proceeds far enough up the + ** tree that we can be sure that any problem in the internal node has + ** been corrected, so be it. Otherwise, after balancing the leaf node, + ** walk the cursor up the tree to the internal node and balance it as + ** well. */ + rc = balance(pCur); + if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){ + while( pCur->iPage>iCellDepth ){ + releasePage(pCur->apPage[pCur->iPage--]); } - sqliteFree(tempCell); - sqlite3BtreeReleaseTempCursor(&leafCur); - }else{ - TRACE(("DELETE: table=%d delete from leaf %d\n", - pCur->pgnoRoot, pPage->pgno)); - dropCell(pPage, pCur->idx, cellSizePtr(pPage, pCell)); - rc = balance(pPage, 0); + rc = balance(pCur); } + if( rc==SQLITE_OK ){ moveToRoot(pCur); } @@ -5308,20 +6557,21 @@ ** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys ** BTREE_ZERODATA Used for SQL indices */ -int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ +static int btreeCreateTable(Btree *p, int *piTable, int flags){ BtShared *pBt = p->pBt; MemPage *pRoot; Pgno pgnoRoot; int rc; - if( pBt->inTransaction!=TRANS_WRITE ){ - /* Must start a transaction first */ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( pBt->inTransaction==TRANS_WRITE ); assert( !pBt->readOnly ); #ifdef SQLITE_OMIT_AUTOVACUUM rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); - if( rc ) return rc; + if( rc ){ + return rc; + } #else if( pBt->autoVacuum ){ Pgno pgnoMove; /* Move a page here to make room for the root-page */ @@ -5338,14 +6588,16 @@ ** root page of the new table should go. meta[3] is the largest root-page ** created so far, so the new root-page is (meta[3]+1). */ - rc = sqlite3BtreeGetMeta(p, 4, &pgnoRoot); - if( rc!=SQLITE_OK ) return rc; + rc = sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); + if( rc!=SQLITE_OK ){ + return rc; + } pgnoRoot++; /* The new root-page may not be allocated on a pointer-map page, or the ** PENDING_BYTE page. */ - if( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || + while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ pgnoRoot++; } @@ -5378,18 +6630,16 @@ return rc; } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); - if( rc!=SQLITE_OK || eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - releasePage(pRoot); - return rc; + if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ + rc = SQLITE_CORRUPT_BKPT; } - assert( eType!=PTRMAP_ROOTPAGE ); - assert( eType!=PTRMAP_FREEPAGE ); - rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pRoot); return rc; } - rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove); + assert( eType!=PTRMAP_ROOTPAGE ); + assert( eType!=PTRMAP_FREEPAGE ); + rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0); releasePage(pRoot); /* Obtain the page at pgnoRoot */ @@ -5432,6 +6682,13 @@ *piTable = (int)pgnoRoot; return SQLITE_OK; } +int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeCreateTable(p, piTable, flags); + sqlite3BtreeLeave(p); + return rc; +} /* ** Erase the given database page and all its children. Return @@ -5440,32 +6697,36 @@ static int clearDatabasePage( BtShared *pBt, /* The BTree that contains the table */ Pgno pgno, /* Page number to clear */ - MemPage *pParent, /* Parent page. NULL for the root */ - int freePageFlag /* Deallocate page if true */ + int freePageFlag, /* Deallocate page if true */ + int *pnChange ){ MemPage *pPage = 0; int rc; unsigned char *pCell; int i; - if( pgno>sqlite3PagerPagecount(pBt->pPager) ){ + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pgno>pagerPagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } - rc = getAndInitPage(pBt, pgno, &pPage, pParent); + rc = getAndInitPage(pBt, pgno, &pPage); if( rc ) goto cleardatabasepage_out; for(i=0; inCell; i++){ pCell = findCell(pPage, i); if( !pPage->leaf ){ - rc = clearDatabasePage(pBt, get4byte(pCell), pPage->pParent, 1); + rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } rc = clearCell(pPage, pCell); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ - rc = clearDatabasePage(pBt, get4byte(&pPage->aData[8]), pPage->pParent, 1); + rc = clearDatabasePage(pBt, get4byte(&pPage->aData[8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; + }else if( pnChange ){ + assert( pPage->intKey ); + *pnChange += pPage->nCell; } if( freePageFlag ){ rc = freePage(pPage); @@ -5486,24 +6747,25 @@ ** This routine will fail with SQLITE_LOCKED if there are any open ** read cursors on the table. Open write cursors are moved to the ** root of the table. +** +** If pnChange is not NULL, then table iTable must be an intkey table. The +** integer value pointed to by pnChange is incremented by the number of +** entries in the table. */ -int sqlite3BtreeClearTable(Btree *p, int iTable){ +int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; BtShared *pBt = p->pBt; - if( p->inTrans!=TRANS_WRITE ){ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } - rc = checkReadLocks(p, iTable, 0); - if( rc ){ - return rc; - } - - /* Save the position of all cursors open on this table */ - if( SQLITE_OK!=(rc = saveAllCursors(pBt, iTable, 0)) ){ - return rc; + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); + if( (rc = checkForReadConflicts(p, iTable, 0, 1))!=SQLITE_OK ){ + /* nothing to do */ + }else if( SQLITE_OK!=(rc = saveAllCursors(pBt, iTable, 0)) ){ + /* nothing to do */ + }else{ + rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } - - return clearDatabasePage(pBt, (Pgno)iTable, 0, 0); + sqlite3BtreeLeave(p); + return rc; } /* @@ -5526,14 +6788,13 @@ ** The last root page is recorded in meta[3] and the value of ** meta[3] is updated by this procedure. */ -int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ +static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ int rc; MemPage *pPage = 0; BtShared *pBt = p->pBt; - if( p->inTrans!=TRANS_WRITE ){ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } + assert( sqlite3BtreeHoldsMutex(p) ); + assert( p->inTrans==TRANS_WRITE ); /* It is illegal to drop a table if any cursors are open on the ** database. This is because in auto-vacuum mode the backend may @@ -5542,12 +6803,13 @@ ** occur. */ if( pBt->pCursor ){ - return SQLITE_LOCKED; + sqlite3ConnectionBlocked(p->db, pBt->pCursor->pBtree->db); + return SQLITE_LOCKED_SHAREDCACHE; } rc = sqlite3BtreeGetPage(pBt, (Pgno)iTable, &pPage, 0); if( rc ) return rc; - rc = sqlite3BtreeClearTable(p, iTable); + rc = sqlite3BtreeClearTable(p, iTable, 0); if( rc ){ releasePage(pPage); return rc; @@ -5562,7 +6824,7 @@ #else if( pBt->autoVacuum ){ Pgno maxRootPgno; - rc = sqlite3BtreeGetMeta(p, 4, &maxRootPgno); + rc = sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno); if( rc!=SQLITE_OK ){ releasePage(pPage); return rc; @@ -5588,7 +6850,7 @@ if( rc!=SQLITE_OK ){ return rc; } - rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable); + rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0); releasePage(pMove); if( rc!=SQLITE_OK ){ return rc; @@ -5632,6 +6894,13 @@ } return rc; } +int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeDropTable(p, iTable, piMoved); + sqlite3BtreeLeave(p); + return rc; +} /* @@ -5645,37 +6914,69 @@ ** free pages is not visible. So Cookie[0] is the same as Meta[1]. */ int sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ - DbPage *pDbPage; + DbPage *pDbPage = 0; int rc; unsigned char *pP1; BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + /* Reading a meta-data value requires a read-lock on page 1 (and hence ** the sqlite_master table. We grab this lock regardless of whether or ** not the SQLITE_ReadUncommitted flag is set (the table rooted at page - ** 1 is treated as a special case by queryTableLock() and lockTable()). + ** 1 is treated as a special case by querySharedCacheTableLock() + ** and setSharedCacheTableLock()). */ - rc = queryTableLock(p, 1, READ_LOCK); + rc = querySharedCacheTableLock(p, 1, READ_LOCK); if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); return rc; } assert( idx>=0 && idx<=15 ); - rc = sqlite3PagerGet(pBt->pPager, 1, &pDbPage); - if( rc ) return rc; - pP1 = (unsigned char *)sqlite3PagerGetData(pDbPage); + if( pBt->pPage1 ){ + /* The b-tree is already holding a reference to page 1 of the database + ** file. In this case the required meta-data value can be read directly + ** from the page data of this reference. This is slightly faster than + ** requesting a new reference from the pager layer. + */ + pP1 = (unsigned char *)pBt->pPage1->aData; + }else{ + /* The b-tree does not have a reference to page 1 of the database file. + ** Obtain one from the pager layer. + */ + rc = sqlite3PagerGet(pBt->pPager, 1, &pDbPage); + if( rc ){ + sqlite3BtreeLeave(p); + return rc; + } + pP1 = (unsigned char *)sqlite3PagerGetData(pDbPage); + } *pMeta = get4byte(&pP1[36 + idx*4]); - sqlite3PagerUnref(pDbPage); + + /* If the b-tree is not holding a reference to page 1, then one was + ** requested from the pager layer in the above block. Release it now. + */ + if( !pBt->pPage1 ){ + sqlite3PagerUnref(pDbPage); + } /* If autovacuumed is disabled in this build but we are trying to ** access an autovacuumed database, then make the database readonly. */ #ifdef SQLITE_OMIT_AUTOVACUUM - if( idx==4 && *pMeta>0 ) pBt->readOnly = 1; + if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ) pBt->readOnly = 1; #endif - /* Grab the read-lock on page 1. */ - rc = lockTable(p, 1, READ_LOCK); + /* If there is currently an open transaction, grab a read-lock + ** on page 1 of the database file. This is done to make sure that + ** no other connection can modify the meta value just read from + ** the database until the transaction is concluded. + */ + if( p->inTrans>0 ){ + rc = setSharedCacheTableLock(p, 1, READ_LOCK); + } + sqlite3BtreeLeave(p); return rc; } @@ -5688,20 +6989,23 @@ unsigned char *pP1; int rc; assert( idx>=1 && idx<=15 ); - if( p->inTrans!=TRANS_WRITE ){ - return pBt->readOnly ? SQLITE_READONLY : SQLITE_ERROR; - } + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); assert( pBt->pPage1!=0 ); pP1 = pBt->pPage1->aData; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - if( rc ) return rc; - put4byte(&pP1[36 + idx*4], iMeta); - if( idx==7 ){ - assert( pBt->autoVacuum || iMeta==0 ); - assert( iMeta==0 || iMeta==1 ); - pBt->incrVacuum = iMeta; + if( rc==SQLITE_OK ){ + put4byte(&pP1[36 + idx*4], iMeta); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( idx==BTREE_INCR_VACUUM ){ + assert( pBt->autoVacuum || iMeta==0 ); + assert( iMeta==0 || iMeta==1 ); + pBt->incrVacuum = (u8)iMeta; + } +#endif } - return SQLITE_OK; + sqlite3BtreeLeave(p); + return rc; } /* @@ -5710,12 +7014,86 @@ */ int sqlite3BtreeFlags(BtCursor *pCur){ /* TODO: What about CURSOR_REQUIRESEEK state? Probably need to call - ** restoreOrClearCursorPosition() here. + ** restoreCursorPosition() here. */ - MemPage *pPage = pCur->pPage; - return pPage ? pPage->aData[pPage->hdrOffset] : 0; + MemPage *pPage; + restoreCursorPosition(pCur); + pPage = pCur->apPage[pCur->iPage]; + assert( cursorHoldsMutex(pCur) ); + assert( pPage!=0 ); + assert( pPage->pBt==pCur->pBt ); + return pPage->aData[pPage->hdrOffset]; } +#ifndef SQLITE_OMIT_BTREECOUNT +/* +** The first argument, pCur, is a cursor opened on some b-tree. Count the +** number of entries in the b-tree and write the result to *pnEntry. +** +** SQLITE_OK is returned if the operation is successfully executed. +** Otherwise, if an error is encountered (i.e. an IO error or database +** corruption) an SQLite error code is returned. +*/ +int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){ + i64 nEntry = 0; /* Value to return in *pnEntry */ + int rc; /* Return code */ + rc = moveToRoot(pCur); + + /* Unless an error occurs, the following loop runs one iteration for each + ** page in the B-Tree structure (not including overflow pages). + */ + while( rc==SQLITE_OK ){ + int iIdx; /* Index of child node in parent */ + MemPage *pPage; /* Current page of the b-tree */ + + /* If this is a leaf page or the tree is not an int-key tree, then + ** this page contains countable entries. Increment the entry counter + ** accordingly. + */ + pPage = pCur->apPage[pCur->iPage]; + if( pPage->leaf || !pPage->intKey ){ + nEntry += pPage->nCell; + } + + /* pPage is a leaf node. This loop navigates the cursor so that it + ** points to the first interior cell that it points to the parent of + ** the next page in the tree that has not yet been visited. The + ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell + ** of the page, or to the number of cells in the page if the next page + ** to visit is the right-child of its parent. + ** + ** If all pages in the tree have been visited, return SQLITE_OK to the + ** caller. + */ + if( pPage->leaf ){ + do { + if( pCur->iPage==0 ){ + /* All pages of the b-tree have been visited. Return successfully. */ + *pnEntry = nEntry; + return SQLITE_OK; + } + sqlite3BtreeMoveToParent(pCur); + }while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell ); + + pCur->aiIdx[pCur->iPage]++; + pPage = pCur->apPage[pCur->iPage]; + } + + /* Descend to the child node of the cell that the cursor currently + ** points at. This is the right-child if (iIdx==pPage->nCell). + */ + iIdx = pCur->aiIdx[pCur->iPage]; + if( iIdx==pPage->nCell ){ + rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); + }else{ + rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx))); + } + } + + /* An error has occurred. Return an error code. */ + return rc; +} +#endif /* ** Return the pager associated with a BTree. This routine is used for @@ -5736,23 +7114,21 @@ ... ){ va_list ap; - char *zMsg2; if( !pCheck->mxErr ) return; pCheck->mxErr--; pCheck->nErr++; va_start(ap, zFormat); - zMsg2 = sqlite3VMPrintf(zFormat, ap); + if( pCheck->errMsg.nChar ){ + sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1); + } + if( zMsg1 ){ + sqlite3StrAccumAppend(&pCheck->errMsg, zMsg1, -1); + } + sqlite3VXPrintf(&pCheck->errMsg, 1, zFormat, ap); va_end(ap); - if( zMsg1==0 ) zMsg1 = ""; - if( pCheck->zErrMsg ){ - char *zOld = pCheck->zErrMsg; - pCheck->zErrMsg = 0; - sqlite3SetString(&pCheck->zErrMsg, zOld, "\n", zMsg1, zMsg2, (char*)0); - sqliteFree(zOld); - }else{ - sqlite3SetString(&pCheck->zErrMsg, zMsg1, zMsg2, (char*)0); + if( pCheck->errMsg.mallocFailed ){ + pCheck->mallocFailed = 1; } - sqliteFree(zMsg2); } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ @@ -5765,9 +7141,9 @@ ** ** Also check that the page number is in bounds. */ -static int checkRef(IntegrityCk *pCheck, int iPage, char *zContext){ +static int checkRef(IntegrityCk *pCheck, Pgno iPage, char *zContext){ if( iPage==0 ) return 1; - if( iPage>pCheck->nPage || iPage<0 ){ + if( iPage>pCheck->nPage ){ checkAppendMsg(pCheck, zContext, "invalid page number %d", iPage); return 1; } @@ -5797,6 +7173,7 @@ rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; checkAppendMsg(pCheck, zContext, "Failed to read ptrmap key=%d", iChild); return; } @@ -5845,7 +7222,7 @@ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0, zContext); } #endif - if( n>pCheck->pBt->usableSize/4-8 ){ + if( n>pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, zContext, "freelist leaf count too big on page %d", iPage); N--; @@ -5902,7 +7279,6 @@ static int checkTreePage( IntegrityCk *pCheck, /* Context for the sanity check */ int iPage, /* Page number of the page to check */ - MemPage *pParent, /* Parent page */ char *zParentContext /* Parent context */ ){ MemPage *pPage; @@ -5913,7 +7289,7 @@ BtShared *pBt; int usableSize; char zContext[100]; - char *hit; + char *hit = 0; sqlite3_snprintf(sizeof(zContext), zContext, "Page %d: ", iPage); @@ -5924,11 +7300,13 @@ if( iPage==0 ) return 0; if( checkRef(pCheck, iPage, zParentContext) ) return 0; if( (rc = sqlite3BtreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; checkAppendMsg(pCheck, zContext, "unable to get the page. error code=%d", rc); return 0; } - if( (rc = sqlite3BtreeInitPage(pPage, pParent))!=0 ){ + if( (rc = sqlite3BtreeInitPage(pPage))!=0 ){ + assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ checkAppendMsg(pCheck, zContext, "sqlite3BtreeInitPage() returns error code %d", rc); releasePage(pPage); @@ -5940,7 +7318,7 @@ depth = 0; for(i=0; inCell && pCheck->mxErr; i++){ u8 *pCell; - int sz; + u32 sz; CellInfo info; /* Check payload overflow pages @@ -5950,9 +7328,11 @@ pCell = findCell(pPage,i); sqlite3BtreeParseCellPtr(pPage, pCell, &info); sz = info.nData; - if( !pPage->intKey ) sz += info.nKey; + if( !pPage->intKey ) sz += (int)info.nKey; assert( sz==info.nPayload ); - if( sz>info.nLocal ){ + if( (sz>info.nLocal) + && (&pCell[info.iOverflow]<=&pPage->aData[pBt->usableSize]) + ){ int nPage = (sz - info.nLocal + usableSize - 5)/(usableSize - 4); Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]); #ifndef SQLITE_OMIT_AUTOVACUUM @@ -5972,7 +7352,7 @@ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext); } #endif - d2 = checkTreePage(pCheck,pgno,pPage,zContext); + d2 = checkTreePage(pCheck, pgno, zContext); if( i>0 && d2!=depth ){ checkAppendMsg(pCheck, zContext, "Child page depth differs"); } @@ -5988,22 +7368,34 @@ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, 0); } #endif - checkTreePage(pCheck, pgno, pPage, zContext); + checkTreePage(pCheck, pgno, zContext); } /* Check for complete coverage of the page */ data = pPage->aData; hdr = pPage->hdrOffset; - hit = sqliteMalloc( usableSize ); - if( hit ){ - memset(hit, 1, get2byte(&data[hdr+5])); + hit = sqlite3PageMalloc( pBt->pageSize ); + if( hit==0 ){ + pCheck->mallocFailed = 1; + }else{ + u16 contentOffset = get2byte(&data[hdr+5]); + if (contentOffset > usableSize) { + checkAppendMsg(pCheck, 0, + "Corruption detected in header on page %d",iPage,0); + goto check_page_abort; + } + memset(hit+contentOffset, 0, usableSize-contentOffset); + memset(hit, 1, contentOffset); nCell = get2byte(&data[hdr+3]); cellStart = hdr + 12 - 4*pPage->leaf; for(i=0; i=usableSize || pc<0 ){ checkAppendMsg(pCheck, 0, "Corruption detected in cell %d on page %d",i,iPage,0); @@ -6038,7 +7430,8 @@ cnt, data[hdr+7], iPage); } } - sqliteFree(hit); +check_page_abort: + if (hit) sqlite3PageFree(hit); releasePage(pPage); return depth+1; @@ -6051,10 +7444,10 @@ ** an array of pages numbers were each page number is the root page of ** a table. nRoot is the number of entries in aRoot. ** -** If everything checks out, this routine returns NULL. If something is -** amiss, an error message is written into memory obtained from malloc() -** and a pointer to that error message is returned. The calling function -** is responsible for freeing the error message when it is done. +** Write the number of error seen in *pnErr. Except for some memory +** allocation errors, an error message held in memory obtained from +** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is +** returned. If a memory allocation error occurs, NULL is returned. */ char *sqlite3BtreeIntegrityCheck( Btree *p, /* The btree to be checked */ @@ -6063,43 +7456,44 @@ int mxErr, /* Stop reporting errors after this many */ int *pnErr /* Write number of errors seen to this variable */ ){ - int i; + Pgno i; int nRef; IntegrityCk sCheck; BtShared *pBt = p->pBt; + char zErr[100]; + sqlite3BtreeEnter(p); nRef = sqlite3PagerRefcount(pBt->pPager); if( lockBtreeWithRetry(p)!=SQLITE_OK ){ - return sqliteStrDup("Unable to acquire a read lock on the database"); + *pnErr = 1; + sqlite3BtreeLeave(p); + return sqlite3DbStrDup(0, "cannot acquire a read lock on the database"); } sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; - sCheck.nPage = sqlite3PagerPagecount(sCheck.pPager); + sCheck.nPage = pagerPagecount(sCheck.pBt); sCheck.mxErr = mxErr; sCheck.nErr = 0; + sCheck.mallocFailed = 0; *pnErr = 0; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->nTrunc!=0 ){ - sCheck.nPage = pBt->nTrunc; - } -#endif if( sCheck.nPage==0 ){ unlockBtreeIfUnused(pBt); + sqlite3BtreeLeave(p); return 0; } - sCheck.anRef = sqliteMallocRaw( (sCheck.nPage+1)*sizeof(sCheck.anRef[0]) ); + sCheck.anRef = sqlite3Malloc( (sCheck.nPage+1)*sizeof(sCheck.anRef[0]) ); if( !sCheck.anRef ){ unlockBtreeIfUnused(pBt); *pnErr = 1; - return sqlite3MPrintf("Unable to malloc %d bytes", - (sCheck.nPage+1)*sizeof(sCheck.anRef[0])); + sqlite3BtreeLeave(p); + return 0; } for(i=0; i<=sCheck.nPage; i++){ sCheck.anRef[i] = 0; } i = PENDING_BYTE_PAGE(pBt); if( i<=sCheck.nPage ){ sCheck.anRef[i] = 1; } - sCheck.zErrMsg = 0; + sqlite3StrAccumInit(&sCheck.errMsg, zErr, sizeof(zErr), 20000); /* Check the integrity of the freelist */ @@ -6108,14 +7502,14 @@ /* Check all the tables. */ - for(i=0; iautoVacuum && aRoot[i]>1 ){ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0, 0); } #endif - checkTreePage(&sCheck, aRoot[i], 0, "List of tree roots: "); + checkTreePage(&sCheck, aRoot[i], "List of tree roots: "); } /* Make sure every page in the file is referenced @@ -6140,10 +7534,12 @@ #endif } - /* Make sure this analysis did not leave any unref() pages + /* Make sure this analysis did not leave any unref() pages. + ** This is an internal consistency check; an integrity check + ** of the integrity check. */ unlockBtreeIfUnused(pBt); - if( nRef != sqlite3PagerRefcount(pBt->pPager) ){ + if( NEVER(nRef != sqlite3PagerRefcount(pBt->pPager)) ){ checkAppendMsg(&sCheck, 0, "Outstanding page count goes from %d to %d during this analysis", nRef, sqlite3PagerRefcount(pBt->pPager) @@ -6152,14 +7548,24 @@ /* Clean up and report errors. */ - sqliteFree(sCheck.anRef); + sqlite3BtreeLeave(p); + sqlite3_free(sCheck.anRef); + if( sCheck.mallocFailed ){ + sqlite3StrAccumReset(&sCheck.errMsg); + *pnErr = sCheck.nErr+1; + return 0; + } *pnErr = sCheck.nErr; - return sCheck.zErrMsg; + if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg); + return sqlite3StrAccumFinish(&sCheck.errMsg); } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* ** Return the full pathname of the underlying database file. +** +** The pager filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. */ const char *sqlite3BtreeGetFilename(Btree *p){ assert( p->pBt->pPager!=0 ); @@ -6167,109 +7573,44 @@ } /* -** Return the pathname of the directory that contains the database file. -*/ -const char *sqlite3BtreeGetDirname(Btree *p){ - assert( p->pBt->pPager!=0 ); - return sqlite3PagerDirname(p->pBt->pPager); -} - -/* ** Return the pathname of the journal file for this database. The return ** value of this routine is the same regardless of whether the journal file ** has been created or not. +** +** The pager journal filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. */ const char *sqlite3BtreeGetJournalname(Btree *p){ assert( p->pBt->pPager!=0 ); return sqlite3PagerJournalname(p->pBt->pPager); } -#ifndef SQLITE_OMIT_VACUUM -/* -** Copy the complete content of pBtFrom into pBtTo. A transaction -** must be active for both files. -** -** The size of file pBtFrom may be reduced by this operation. -** If anything goes wrong, the transaction on pBtFrom is rolled back. -*/ -int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ - int rc = SQLITE_OK; - Pgno i, nPage, nToPage, iSkip; - - BtShared *pBtTo = pTo->pBt; - BtShared *pBtFrom = pFrom->pBt; - - if( pTo->inTrans!=TRANS_WRITE || pFrom->inTrans!=TRANS_WRITE ){ - return SQLITE_ERROR; - } - if( pBtTo->pCursor ) return SQLITE_BUSY; - nToPage = sqlite3PagerPagecount(pBtTo->pPager); - nPage = sqlite3PagerPagecount(pBtFrom->pPager); - iSkip = PENDING_BYTE_PAGE(pBtTo); - for(i=1; rc==SQLITE_OK && i<=nPage; i++){ - DbPage *pDbPage; - if( i==iSkip ) continue; - rc = sqlite3PagerGet(pBtFrom->pPager, i, &pDbPage); - if( rc ) break; - rc = sqlite3PagerOverwrite(pBtTo->pPager, i, sqlite3PagerGetData(pDbPage)); - sqlite3PagerUnref(pDbPage); - } - - /* If the file is shrinking, journal the pages that are being truncated - ** so that they can be rolled back if the commit fails. - */ - for(i=nPage+1; rc==SQLITE_OK && i<=nToPage; i++){ - DbPage *pDbPage; - if( i==iSkip ) continue; - rc = sqlite3PagerGet(pBtTo->pPager, i, &pDbPage); - if( rc ) break; - rc = sqlite3PagerWrite(pDbPage); - sqlite3PagerDontWrite(pDbPage); - /* Yeah. It seems wierd to call DontWrite() right after Write(). But - ** that is because the names of those procedures do not exactly - ** represent what they do. Write() really means "put this page in the - ** rollback journal and mark it as dirty so that it will be written - ** to the database file later." DontWrite() undoes the second part of - ** that and prevents the page from being written to the database. The - ** page is still on the rollback journal, though. And that is the whole - ** point of this loop: to put pages on the rollback journal. */ - sqlite3PagerUnref(pDbPage); - } - if( !rc && nPagepPager, nPage); - } - - if( rc ){ - sqlite3BtreeRollback(pTo); - } - return rc; -} -#endif /* SQLITE_OMIT_VACUUM */ - /* ** Return non-zero if a transaction is active. */ int sqlite3BtreeIsInTrans(Btree *p){ + assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); return (p && (p->inTrans==TRANS_WRITE)); } /* -** Return non-zero if a statement transaction is active. -*/ -int sqlite3BtreeIsInStmt(Btree *p){ - return (p->pBt && p->pBt->inStmt); -} - -/* ** Return non-zero if a read (or write) transaction is active. */ int sqlite3BtreeIsInReadTrans(Btree *p){ - return (p && (p->inTrans!=TRANS_NONE)); + assert( p ); + assert( sqlite3_mutex_held(p->db->mutex) ); + return p->inTrans!=TRANS_NONE; +} + +int sqlite3BtreeIsInBackup(Btree *p){ + assert( p ); + assert( sqlite3_mutex_held(p->db->mutex) ); + return p->nBackup!=0; } /* ** This function returns a pointer to a blob of memory associated with -** a single shared-btree. The memory is used by client code for it's own +** a single shared-btree. The memory is used by client code for its own ** purposes (for example, to store a high-level schema associated with ** the shared-btree). The btree layer manages reference counting issues. ** @@ -6278,26 +7619,39 @@ ** call the nBytes parameter is ignored and a pointer to the same blob ** of memory returned. ** +** If the nBytes parameter is 0 and the blob of memory has not yet been +** allocated, a null pointer is returned. If the blob has already been +** allocated, it is returned as normal. +** ** Just before the shared-btree is closed, the function passed as the ** xFree argument when the memory allocation was made is invoked on the -** blob of allocated memory. This function should not call sqliteFree() +** blob of allocated memory. This function should not call sqlite3_free() ** on the memory, the btree layer does that. */ void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; - if( !pBt->pSchema ){ - pBt->pSchema = sqliteMalloc(nBytes); + sqlite3BtreeEnter(p); + if( !pBt->pSchema && nBytes ){ + pBt->pSchema = sqlite3MallocZero(nBytes); pBt->xFreeSchema = xFree; } + sqlite3BtreeLeave(p); return pBt->pSchema; } /* -** Return true if another user of the same shared btree as the argument -** handle holds an exclusive lock on the sqlite_master table. +** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared +** btree as the argument handle holds an exclusive lock on the +** sqlite_master table. Otherwise SQLITE_OK. */ int sqlite3BtreeSchemaLocked(Btree *p){ - return (queryTableLock(p, MASTER_ROOT, READ_LOCK)!=SQLITE_OK); + int rc; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); + assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE ); + sqlite3BtreeLeave(p); + return rc; } @@ -6309,10 +7663,16 @@ */ int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ int rc = SQLITE_OK; - u8 lockType = (isWriteLock?WRITE_LOCK:READ_LOCK); - rc = queryTableLock(p, iTab, lockType); - if( rc==SQLITE_OK ){ - rc = lockTable(p, iTab, lockType); + if( p->sharable ){ + u8 lockType = READ_LOCK + isWriteLock; + assert( READ_LOCK+1==WRITE_LOCK ); + assert( isWriteLock==0 || isWriteLock==1 ); + sqlite3BtreeEnter(p); + rc = querySharedCacheTableLock(p, iTab, lockType); + if( rc==SQLITE_OK ){ + rc = setSharedCacheTableLock(p, iTab, lockType); + } + sqlite3BtreeLeave(p); } return rc; } @@ -6327,9 +7687,15 @@ ** to change the length of the data stored. */ int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ + int rc; + assert( cursorHoldsMutex(pCsr) ); + assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) ); assert(pCsr->isIncrblobHandle); - if( pCsr->eState==CURSOR_REQUIRESEEK ){ + + restoreCursorPosition(pCsr); + assert( pCsr->eState!=CURSOR_REQUIRESEEK ); + if( pCsr->eState!=CURSOR_VALID ){ return SQLITE_ABORT; } @@ -6341,12 +7707,15 @@ if( !pCsr->wrFlag ){ return SQLITE_READONLY; } - assert( !pCsr->pBtree->pBt->readOnly - && pCsr->pBtree->pBt->inTransaction==TRANS_WRITE ); - if( checkReadLocks(pCsr->pBtree, pCsr->pgnoRoot, pCsr) ){ - return SQLITE_LOCKED; /* The table pCur points to has a read lock */ + assert( !pCsr->pBt->readOnly + && pCsr->pBt->inTransaction==TRANS_WRITE ); + rc = checkForReadConflicts(pCsr->pBtree, pCsr->pgnoRoot, pCsr, 0); + if( rc!=SQLITE_OK ){ + /* The table pCur points to has a read lock */ + assert( rc==SQLITE_LOCKED_SHAREDCACHE ); + return rc; } - if( pCsr->eState==CURSOR_INVALID || !pCsr->pPage->intKey ){ + if( pCsr->eState==CURSOR_INVALID || !pCsr->apPage[pCsr->iPage]->intKey ){ return SQLITE_ERROR; } @@ -6364,6 +7733,8 @@ ** sqlite3BtreePutData()). */ void sqlite3BtreeCacheOverflow(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert(!pCur->isIncrblobHandle); assert(!pCur->aOverflow); pCur->isIncrblobHandle = 1; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/btree.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/btree.h --- sqlite3-3.4.2/src/btree.h 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/btree.h 2009-06-25 12:35:51.000000000 +0100 @@ -13,7 +13,7 @@ ** subsystem. See comments in the source code for a detailed description ** of what each interface routine does. ** -** @(#) $Id: btree.h,v 1.82 2007/05/08 21:45:27 drh Exp $ +** @(#) $Id: btree.h,v 1.116 2009/06/03 11:25:07 danielk1977 Exp $ */ #ifndef _BTREE_H_ #define _BTREE_H_ @@ -41,13 +41,26 @@ typedef struct Btree Btree; typedef struct BtCursor BtCursor; typedef struct BtShared BtShared; +typedef struct BtreeMutexArray BtreeMutexArray; + +/* +** This structure records all of the Btrees that need to hold +** a mutex before we enter sqlite3VdbeExec(). The Btrees are +** are placed in aBtree[] in order of aBtree[]->pBt. That way, +** we can always lock and unlock them all quickly. +*/ +struct BtreeMutexArray { + int nMutex; + Btree *aBtree[SQLITE_MAX_ATTACHED+1]; +}; int sqlite3BtreeOpen( const char *zFilename, /* Name of database file to open */ sqlite3 *db, /* Associated database connection */ - Btree **, /* Return open Btree* here */ - int flags /* Flags */ + Btree **ppBtree, /* Return open Btree* here */ + int flags, /* Flags */ + int vfsFlags /* Flags passed through to VFS open */ ); /* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the @@ -59,13 +72,15 @@ #define BTREE_OMIT_JOURNAL 1 /* Do not use journal. No argument */ #define BTREE_NO_READLOCK 2 /* Omit readlocks on readonly files */ #define BTREE_MEMORY 4 /* In-memory DB. No argument */ +#define BTREE_READONLY 8 /* Open the database in read-only mode */ +#define BTREE_READWRITE 16 /* Open for both reading and writing */ +#define BTREE_CREATE 32 /* Create the database if it does not exist */ int sqlite3BtreeClose(Btree*); -int sqlite3BtreeSetBusyHandler(Btree*,BusyHandler*); int sqlite3BtreeSetCacheSize(Btree*,int); int sqlite3BtreeSetSafetyLevel(Btree*,int,int); int sqlite3BtreeSyncDisabled(Btree*); -int sqlite3BtreeSetPageSize(Btree*,int,int); +int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); int sqlite3BtreeGetPageSize(Btree*); int sqlite3BtreeMaxPageCount(Btree*,int); int sqlite3BtreeGetReserve(Btree*); @@ -76,19 +91,17 @@ int sqlite3BtreeCommitPhaseTwo(Btree*); int sqlite3BtreeCommit(Btree*); int sqlite3BtreeRollback(Btree*); -int sqlite3BtreeBeginStmt(Btree*); -int sqlite3BtreeCommitStmt(Btree*); -int sqlite3BtreeRollbackStmt(Btree*); +int sqlite3BtreeBeginStmt(Btree*,int); int sqlite3BtreeCreateTable(Btree*, int*, int flags); int sqlite3BtreeIsInTrans(Btree*); -int sqlite3BtreeIsInStmt(Btree*); int sqlite3BtreeIsInReadTrans(Btree*); +int sqlite3BtreeIsInBackup(Btree*); void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); int sqlite3BtreeSchemaLocked(Btree *); int sqlite3BtreeLockTable(Btree *, int, u8); +int sqlite3BtreeSavepoint(Btree *, int, int); const char *sqlite3BtreeGetFilename(Btree *); -const char *sqlite3BtreeGetDirname(Btree *); const char *sqlite3BtreeGetJournalname(Btree *); int sqlite3BtreeCopyFile(Btree *, Btree *); @@ -102,25 +115,62 @@ #define BTREE_LEAFDATA 4 /* Data stored in leaves only. Implies INTKEY */ int sqlite3BtreeDropTable(Btree*, int, int*); -int sqlite3BtreeClearTable(Btree*, int); +int sqlite3BtreeClearTable(Btree*, int, int*); +void sqlite3BtreeTripAllCursors(Btree*, int); + int sqlite3BtreeGetMeta(Btree*, int idx, u32 *pValue); int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); +/* +** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta +** should be one of the following values. The integer values are assigned +** to constants so that the offset of the corresponding field in an +** SQLite database header may be found using the following formula: +** +** offset = 36 + (idx * 4) +** +** For example, the free-page-count field is located at byte offset 36 of +** the database file header. The incr-vacuum-flag field is located at +** byte offset 64 (== 36+4*7). +*/ +#define BTREE_FREE_PAGE_COUNT 0 +#define BTREE_SCHEMA_VERSION 1 +#define BTREE_FILE_FORMAT 2 +#define BTREE_DEFAULT_CACHE_SIZE 3 +#define BTREE_LARGEST_ROOT_PAGE 4 +#define BTREE_TEXT_ENCODING 5 +#define BTREE_USER_VERSION 6 +#define BTREE_INCR_VACUUM 7 + int sqlite3BtreeCursor( Btree*, /* BTree containing table to open */ int iTable, /* Index of root page */ int wrFlag, /* 1 for writing. 0 for read-only */ - int(*)(void*,int,const void*,int,const void*), /* Key comparison function */ - void*, /* First argument to compare function */ - BtCursor **ppCursor /* Returned cursor */ + struct KeyInfo*, /* First argument to compare function */ + BtCursor *pCursor /* Space to write cursor structure */ ); +int sqlite3BtreeCursorSize(void); int sqlite3BtreeCloseCursor(BtCursor*); -int sqlite3BtreeMoveto(BtCursor*,const void *pKey,i64 nKey,int bias,int *pRes); +int sqlite3BtreeMoveto( + BtCursor*, + const void *pKey, + i64 nKey, + int bias, + int *pRes +); +int sqlite3BtreeMovetoUnpacked( + BtCursor*, + UnpackedRecord *pUnKey, + i64 intKey, + int bias, + int *pRes +); +int sqlite3BtreeCursorHasMoved(BtCursor*, int*); int sqlite3BtreeDelete(BtCursor*); int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, const void *pData, int nData, - int nZero, int bias); + int nZero, int bias, int seekResult); int sqlite3BtreeFirst(BtCursor*, int *pRes); int sqlite3BtreeLast(BtCursor*, int *pRes); int sqlite3BtreeNext(BtCursor*, int *pRes); @@ -133,17 +183,64 @@ const void *sqlite3BtreeDataFetch(BtCursor*, int *pAmt); int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); +void sqlite3BtreeSetCachedRowid(BtCursor*, sqlite3_int64); +sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor*); char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); struct Pager *sqlite3BtreePager(Btree*); int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); void sqlite3BtreeCacheOverflow(BtCursor *); +void sqlite3BtreeClearCursor(BtCursor *); + +#ifndef SQLITE_OMIT_BTREECOUNT +int sqlite3BtreeCount(BtCursor *, i64 *); +#endif #ifdef SQLITE_TEST int sqlite3BtreeCursorInfo(BtCursor*, int*, int); void sqlite3BtreeCursorList(Btree*); -int sqlite3BtreePageDump(Btree*, int, int recursive); #endif +/* +** If we are not using shared cache, then there is no need to +** use mutexes to access the BtShared structures. So make the +** Enter and Leave procedures no-ops. +*/ +#ifndef SQLITE_OMIT_SHARED_CACHE + void sqlite3BtreeEnter(Btree*); + void sqlite3BtreeEnterAll(sqlite3*); +#else +# define sqlite3BtreeEnter(X) +# define sqlite3BtreeEnterAll(X) +#endif + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE + void sqlite3BtreeLeave(Btree*); + void sqlite3BtreeEnterCursor(BtCursor*); + void sqlite3BtreeLeaveCursor(BtCursor*); + void sqlite3BtreeLeaveAll(sqlite3*); + void sqlite3BtreeMutexArrayEnter(BtreeMutexArray*); + void sqlite3BtreeMutexArrayLeave(BtreeMutexArray*); + void sqlite3BtreeMutexArrayInsert(BtreeMutexArray*, Btree*); +#ifndef NDEBUG + /* These routines are used inside assert() statements only. */ + int sqlite3BtreeHoldsMutex(Btree*); + int sqlite3BtreeHoldsAllMutexes(sqlite3*); +#endif +#else + +# define sqlite3BtreeLeave(X) +# define sqlite3BtreeEnterCursor(X) +# define sqlite3BtreeLeaveCursor(X) +# define sqlite3BtreeLeaveAll(X) +# define sqlite3BtreeMutexArrayEnter(X) +# define sqlite3BtreeMutexArrayLeave(X) +# define sqlite3BtreeMutexArrayInsert(X,Y) + +# define sqlite3BtreeHoldsMutex(X) 1 +# define sqlite3BtreeHoldsAllMutexes(X) 1 +#endif + + #endif /* _BTREE_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/btreeInt.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/btreeInt.h --- sqlite3-3.4.2/src/btreeInt.h 2007-06-15 13:06:59.000000000 +0100 +++ sqlite3-3.6.16/src/btreeInt.h 2009-06-25 12:45:57.000000000 +0100 @@ -9,7 +9,7 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* -** $Id: btreeInt.h,v 1.5 2007/06/15 12:06:59 drh Exp $ +** $Id: btreeInt.h,v 1.49 2009/06/24 05:40:34 danielk1977 Exp $ ** ** This file implements a external (disk-based) database using BTrees. ** For a detailed discussion of BTrees, refer to @@ -71,6 +71,17 @@ ** 36 4 Number of freelist pages in the file ** 40 60 15 4-byte meta values passed to higher layers ** +** 40 4 Schema cookie +** 44 4 File format of schema layer +** 48 4 Size of page cache +** 52 4 Largest root-page (auto/incr_vacuum) +** 56 4 1=UTF-8 2=UTF16le 3=UTF16be +** 60 4 User version +** 64 4 Incremental vacuum mode +** 68 4 unused +** 72 4 unused +** 76 4 unused +** ** All of the integer values are big-endian (most significant byte first). ** ** The file change counter is incremented when the database is changed @@ -204,15 +215,6 @@ ** * zero or more pages numbers of leaves */ #include "sqliteInt.h" -#include "pager.h" -#include "btree.h" -#include "os.h" -#include - -/* Round up a number to the next larger multiple of 8. This is used -** to force 8-byte alignment on 64-bit architectures. -*/ -#define ROUND8(x) ((x+7)&~7) /* The following value is the maximum cell size assuming a maximum page @@ -221,10 +223,11 @@ #define MX_CELL_SIZE(pBt) (pBt->pageSize-8) /* The maximum number of cells on a single page of the database. This -** assumes a minimum cell size of 3 bytes. Such small cells will be -** exceedingly rare, but they are possible. +** assumes a minimum cell size of 6 bytes (4 bytes for the cell itself +** plus 2 bytes for the index to the cell in the page header). Such +** small cells will be rare, but they are possible. */ -#define MX_CELL(pBt) ((pBt->pageSize-8)/3) +#define MX_CELL(pBt) ((pBt->pageSize-8)/6) /* Forward declarations */ typedef struct MemPage MemPage; @@ -248,7 +251,7 @@ /* ** Page type flags. An ORed combination of these flags appear as the -** first byte of every BTree page. +** first byte of on-disk image of every BTree page. */ #define PTF_INTKEY 0x01 #define PTF_ZERODATA 0x02 @@ -264,33 +267,32 @@ ** walk up the BTree from any leaf to the root. Care must be taken to ** unref() the parent page pointer when this page is no longer referenced. ** The pageDestructor() routine handles that chore. +** +** Access to all fields of this structure is controlled by the mutex +** stored in MemPage.pBt->mutex. */ struct MemPage { u8 isInit; /* True if previously initialized. MUST BE FIRST! */ - u8 idxShift; /* True if Cell indices have changed */ u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ u8 intKey; /* True if intkey flag is set */ u8 leaf; /* True if leaf flag is set */ - u8 zeroData; /* True if table stores keys only */ - u8 leafData; /* True if tables stores data on leaves only */ u8 hasData; /* True if this page stores data */ u8 hdrOffset; /* 100 for page 1. 0 otherwise */ u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ - u16 maxLocal; /* Copy of Btree.maxLocal or Btree.maxLeaf */ - u16 minLocal; /* Copy of Btree.minLocal or Btree.minLeaf */ + u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ + u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ u16 cellOffset; /* Index in aData of first cell pointer */ - u16 idxParent; /* Index in parent of this node */ u16 nFree; /* Number of free bytes on the page */ u16 nCell; /* Number of cells on this page, local and ovfl */ + u16 maskPage; /* Mask for page offset */ struct _OvflCell { /* Cells that will not fit on aData[] */ u8 *pCell; /* Pointers to the body of the overflow cell */ u16 idx; /* Insert this cell before idx-th non-overflow cell */ } aOvfl[5]; - BtShared *pBt; /* Pointer back to BTree structure */ - u8 *aData; /* Pointer back to the start of the page */ + BtShared *pBt; /* Pointer to BtShared that this page is part of */ + u8 *aData; /* Pointer to disk image of the page data */ DbPage *pDbPage; /* Pager page handle */ Pgno pgno; /* Page number for this page */ - MemPage *pParent; /* The parent of this page. NULL for root */ }; /* @@ -300,11 +302,37 @@ */ #define EXTRA_SIZE sizeof(MemPage) -/* Btree handle */ +/* A Btree handle +** +** A database connection contains a pointer to an instance of +** this object for every database file that it has open. This structure +** is opaque to the database connection. The database connection cannot +** see the internals of this structure and only deals with pointers to +** this structure. +** +** For some database files, the same underlying database cache might be +** shared between multiple connections. In that case, each contection +** has it own pointer to this object. But each instance of this object +** points to the same BtShared object. The database cache and the +** schema associated with the database file are all contained within +** the BtShared object. +** +** All fields in this structure are accessed under sqlite3.mutex. +** The pBt pointer itself may not be changed while there exists cursors +** in the referenced BtShared that point back to this Btree since those +** cursors have to do go through this Btree to find their BtShared and +** they often do so without holding sqlite3.mutex. +*/ struct Btree { - sqlite3 *pSqlite; - BtShared *pBt; - u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ + sqlite3 *db; /* The database connection holding this btree */ + BtShared *pBt; /* Sharable content of this btree */ + u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ + u8 sharable; /* True if we can share pBt with another db */ + u8 locked; /* True if db currently has pBt locked */ + int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ + int nBackup; /* Number of backup operations reading this btree */ + Btree *pNext; /* List of other sharable Btrees from the same db */ + Btree *pPrev; /* Back pointer of the same list */ }; /* @@ -312,47 +340,79 @@ ** ** If the shared-data extension is enabled, there may be multiple users ** of the Btree structure. At most one of these may open a write transaction, -** but any number may have active read transactions. Variable Btree.pDb -** points to the handle that owns any current write-transaction. +** but any number may have active read transactions. */ #define TRANS_NONE 0 #define TRANS_READ 1 #define TRANS_WRITE 2 /* -** Everything we need to know about an open database +** An instance of this object represents a single database file. +** +** A single database file can be in use as the same time by two +** or more database connections. When two or more connections are +** sharing the same database file, each connection has it own +** private Btree object for the file and each of those Btrees points +** to this one BtShared object. BtShared.nRef is the number of +** connections currently sharing this database file. +** +** Fields in this structure are accessed under the BtShared.mutex +** mutex, except for nRef and pNext which are accessed under the +** global SQLITE_MUTEX_STATIC_MASTER mutex. The pPager field +** may not be modified once it is initially set as long as nRef>0. +** The pSchema field may be set once under BtShared.mutex and +** thereafter is unchanged as long as nRef>0. +** +** isPending: +** +** If a BtShared client fails to obtain a write-lock on a database +** table (because there exists one or more read-locks on the table), +** the shared-cache enters 'pending-lock' state and isPending is +** set to true. +** +** The shared-cache leaves the 'pending lock' state when either of +** the following occur: +** +** 1) The current writer (BtShared.pWriter) concludes its transaction, OR +** 2) The number of locks held by other connections drops to zero. +** +** while in the 'pending-lock' state, no connection may start a new +** transaction. +** +** This feature is included to help prevent writer-starvation. */ struct BtShared { Pager *pPager; /* The page cache */ + sqlite3 *db; /* Database connection currently using this Btree */ BtCursor *pCursor; /* A list of all open cursors */ MemPage *pPage1; /* First page of the database */ - u8 inStmt; /* True if we are in a statement subtransaction */ u8 readOnly; /* True if the underlying file is readonly */ - u8 maxEmbedFrac; /* Maximum payload as % of total page size */ - u8 minEmbedFrac; /* Minimum payload as % of total page size */ - u8 minLeafFrac; /* Minimum leaf payload as % of total page size */ u8 pageSizeFixed; /* True if the page size can no longer be changed */ #ifndef SQLITE_OMIT_AUTOVACUUM u8 autoVacuum; /* True if auto-vacuum is enabled */ u8 incrVacuum; /* True if incr-vacuum is enabled */ - Pgno nTrunc; /* Non-zero if the db will be truncated (incr vacuum) */ #endif u16 pageSize; /* Total number of bytes on a page */ u16 usableSize; /* Number of usable bytes on each page */ - int maxLocal; /* Maximum local payload in non-LEAFDATA tables */ - int minLocal; /* Minimum local payload in non-LEAFDATA tables */ - int maxLeaf; /* Maximum local payload in a LEAFDATA table */ - int minLeaf; /* Minimum local payload in a LEAFDATA table */ - BusyHandler *pBusyHandler; /* Callback for when there is lock contention */ + u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */ + u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */ + u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */ + u16 minLeaf; /* Minimum local payload in a LEAFDATA table */ u8 inTransaction; /* Transaction state */ - int nRef; /* Number of references to this structure */ int nTransaction; /* Number of open transactions (read + write) */ void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ + sqlite3_mutex *mutex; /* Non-recursive mutex required to access this struct */ + Bitvec *pHasContent; /* Set of pages moved to free-list this transaction */ #ifndef SQLITE_OMIT_SHARED_CACHE + int nRef; /* Number of references to this structure */ + BtShared *pNext; /* Next on a list of sharable BtShared structs */ BtLock *pLock; /* List of locks held on this shared-btree struct */ - BtShared *pNext; /* Next in ThreadData.pBtree linked list */ + Btree *pWriter; /* Btree with currently open write transaction */ + u8 isExclusive; /* True if pWriter has an EXCLUSIVE lock on the db */ + u8 isPending; /* If waiting for read-locks to clear */ #endif + u8 *pTmpSpace; /* BtShared.pageSize bytes of space for tmp use */ }; /* @@ -373,20 +433,41 @@ }; /* -** A cursor is a pointer to a particular entry in the BTree. +** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than +** this will be declared corrupt. This value is calculated based on a +** maximum database size of 2^31 pages a minimum fanout of 2 for a +** root-node and 3 for all other internal nodes. +** +** If a tree that appears to be taller than this is encountered, it is +** assumed that the database is corrupt. +*/ +#define BTCURSOR_MAX_DEPTH 20 + +/* +** A cursor is a pointer to a particular entry within a particular +** b-tree within a database file. +** ** The entry is identified by its MemPage and the index in ** MemPage.aCell[] of the entry. +** +** When a single database file can shared by two more database connections, +** but cursors cannot be shared. Each cursor is associated with a +** particular database connection identified BtCursor.pBtree.db. +** +** Fields in this structure are accessed under the BtShared.mutex +** found at self->pBt->mutex. */ struct BtCursor { Btree *pBtree; /* The Btree to which this cursor belongs */ + BtShared *pBt; /* The BtShared this cursor points to */ BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */ - int (*xCompare)(void*,int,const void*,int,const void*); /* Key comp func */ - void *pArg; /* First arg to xCompare() */ + struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ Pgno pgnoRoot; /* The root page of this tree */ - MemPage *pPage; /* Page that contains the entry */ - int idx; /* Index of the entry in pPage->aCell[] */ + sqlite3_int64 cachedRowid; /* Next rowid cache. 0 means not valid */ CellInfo info; /* A parse of the cell we are pointing at */ u8 wrFlag; /* True if writable */ + u8 atLast; /* Cursor pointing to the last entry */ + u8 validNKey; /* True if info.nKey is valid */ u8 eState; /* One of the CURSOR_XXX constants (see below) */ void *pKey; /* Saved key that was cursor's last known position */ i64 nKey; /* Size of pKey, or last integer key */ @@ -395,6 +476,9 @@ u8 isIncrblobHandle; /* True if this cursor is an incr. io handle */ Pgno *aOverflow; /* Cache of overflow page locations */ #endif + i16 iPage; /* Index of current page in apPage */ + MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */ + u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */ }; /* @@ -412,45 +496,25 @@ ** The table that this cursor was opened on still exists, but has been ** modified since the cursor was last used. The cursor position is saved ** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in -** this state, restoreOrClearCursorPosition() can be called to attempt to +** this state, restoreCursorPosition() can be called to attempt to ** seek the cursor to the saved position. +** +** CURSOR_FAULT: +** A unrecoverable error (an I/O error or a malloc failure) has occurred +** on a different connection that shares the BtShared cache with this +** cursor. The error has left the cache in an inconsistent state. +** Do nothing else with this cursor. Any attempt to use the cursor +** should return the error code stored in BtCursor.skip */ #define CURSOR_INVALID 0 #define CURSOR_VALID 1 #define CURSOR_REQUIRESEEK 2 +#define CURSOR_FAULT 3 -/* -** The TRACE macro will print high-level status information about the -** btree operation when the global variable sqlite3_btree_trace is -** enabled. -*/ -#if SQLITE_TEST -# define TRACE(X) if( sqlite3_btree_trace ){ printf X; fflush(stdout); } -#else -# define TRACE(X) -#endif - -/* -** Routines to read and write variable-length integers. These used to -** be defined locally, but now we use the varint routines in the util.c -** file. -*/ -#define getVarint sqlite3GetVarint -#define getVarint32(A,B) ((*B=*(A))<=0x7f?1:sqlite3GetVarint32(A,B)) -#define putVarint sqlite3PutVarint - -/* The database page the PENDING_BYTE occupies. This page is never used. -** TODO: This macro is very similary to PAGER_MJ_PGNO() in pager.c. They -** should possibly be consolidated (presumably in pager.h). -** -** If disk I/O is omitted (meaning that the database is stored purely -** in memory) then there is no pending byte. +/* +** The database page the PENDING_BYTE occupies. This page is never used. */ -#ifdef SQLITE_OMIT_DISKIO -# define PENDING_BYTE_PAGE(pBt) 0x7fffffff -#else -# define PENDING_BYTE_PAGE(pBt) ((PENDING_BYTE/(pBt)->pageSize)+1) -#endif +# define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt) /* ** A linked list of the following structures is stored at BtShared.pLock. @@ -486,7 +550,7 @@ ** this test. */ #define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno) -#define PTRMAP_PTROFFSET(pBt, pgno) (5*(pgno-ptrmapPageno(pBt, pgno)-1)) +#define PTRMAP_PTROFFSET(pgptrmap, pgno) (5*(pgno-pgptrmap-1)) #define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno)) /* @@ -530,8 +594,6 @@ ** of handle p (type Btree*) are internally consistent. */ #define btreeIntegrity(p) \ - assert( p->inTrans!=TRANS_NONE || p->pBt->nTransactionpBt->nRef ); \ - assert( p->pBt->nTransaction<=p->pBt->nRef ); \ assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \ assert( p->pBt->inTransaction>=p->inTrans ); @@ -558,18 +620,19 @@ struct IntegrityCk { BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ - int nPage; /* Number of pages in the database */ + Pgno nPage; /* Number of pages in the database */ int *anRef; /* Number of times each page is referenced */ int mxErr; /* Stop accumulating errors when this reaches zero */ - char *zErrMsg; /* An error message. NULL if no errors seen. */ int nErr; /* Number of messages written to zErrMsg so far */ + int mallocFailed; /* A memory allocation error has occurred */ + StrAccum errMsg; /* Accumulate the error message text here */ }; /* ** Read or write a two- and four-byte big-endian integer values. */ #define get2byte(x) ((x)[0]<<8 | (x)[1]) -#define put2byte(p,v) ((p)[0] = (v)>>8, (p)[1] = (v)) +#define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v)) #define get4byte sqlite3Get4byte #define put4byte sqlite3Put4byte @@ -577,12 +640,13 @@ ** Internal routines that should be accessed by the btree layer only. */ int sqlite3BtreeGetPage(BtShared*, Pgno, MemPage**, int); -int sqlite3BtreeInitPage(MemPage *pPage, MemPage *pParent); +int sqlite3BtreeInitPage(MemPage *pPage); void sqlite3BtreeParseCellPtr(MemPage*, u8*, CellInfo*); void sqlite3BtreeParseCell(MemPage*, int, CellInfo*); -u8 *sqlite3BtreeFindCell(MemPage *pPage, int iCell); -int sqlite3BtreeRestoreOrClearCursorPosition(BtCursor *pCur); +int sqlite3BtreeRestoreCursorPosition(BtCursor *pCur); +void sqlite3BtreeMoveToParent(BtCursor *pCur); + +#ifdef SQLITE_TEST void sqlite3BtreeGetTempCursor(BtCursor *pCur, BtCursor *pTempCur); void sqlite3BtreeReleaseTempCursor(BtCursor *pCur); -int sqlite3BtreeIsRootPage(MemPage *pPage); -void sqlite3BtreeMoveToParent(BtCursor *pCur); +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/build.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/build.c --- sqlite3-3.4.2/src/build.c 2007-07-02 20:31:27.000000000 +0100 +++ sqlite3-3.6.16/src/build.c 2009-06-25 12:50:21.000000000 +0100 @@ -22,17 +22,16 @@ ** COMMIT ** ROLLBACK ** -** $Id: build.c,v 1.433 2007/07/02 19:31:27 drh Exp $ +** $Id: build.c,v 1.554 2009/06/25 11:50:21 drh Exp $ */ #include "sqliteInt.h" -#include /* ** This routine is called when a new SQL statement is beginning to ** be parsed. Initialize the pParse structure as needed. */ void sqlite3BeginParse(Parse *pParse, int explainFlag){ - pParse->explain = explainFlag; + pParse->explain = (u8)explainFlag; pParse->nVar = 0; } @@ -69,10 +68,7 @@ int nBytes; TableLock *p; - if( 0==sqlite3ThreadDataReadOnly()->useSharedData || iDb<0 ){ - return; - } - + assert( iDb>=0 ); for(i=0; inTableLock; i++){ p = &pParse->aTableLock[i]; if( p->iDb==iDb && p->iTab==iTab ){ @@ -82,13 +78,17 @@ } nBytes = sizeof(TableLock) * (pParse->nTableLock+1); - pParse->aTableLock = sqliteReallocOrFree(pParse->aTableLock, nBytes); + pParse->aTableLock = + sqlite3DbReallocOrFree(pParse->db, pParse->aTableLock, nBytes); if( pParse->aTableLock ){ p = &pParse->aTableLock[pParse->nTableLock++]; p->iDb = iDb; p->iTab = iTab; p->isWriteLock = isWriteLock; p->zName = zName; + }else{ + pParse->nTableLock = 0; + pParse->db->mallocFailed = 1; } } @@ -99,19 +99,15 @@ static void codeTableLocks(Parse *pParse){ int i; Vdbe *pVdbe; - assert( sqlite3ThreadDataReadOnly()->useSharedData || pParse->nTableLock==0 ); - if( 0==(pVdbe = sqlite3GetVdbe(pParse)) ){ - return; - } + pVdbe = sqlite3GetVdbe(pParse); + assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */ for(i=0; inTableLock; i++){ TableLock *p = &pParse->aTableLock[i]; int p1 = p->iDb; - if( p->isWriteLock ){ - p1 = -1*(p1+1); - } - sqlite3VdbeOp3(pVdbe, OP_TableLock, p1, p->iTab, p->zName, P3_STATIC); + sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock, + p->zName, P4_STATIC); } } #else @@ -132,22 +128,17 @@ sqlite3 *db; Vdbe *v; - if( sqlite3MallocFailed() ) return; + db = pParse->db; + if( db->mallocFailed ) return; if( pParse->nested ) return; - if( !pParse->pVdbe ){ - if( pParse->rc==SQLITE_OK && pParse->nErr ){ - pParse->rc = SQLITE_ERROR; - return; - } - } + if( pParse->nErr ) return; /* Begin by generating some termination code at the end of the ** vdbe program */ - db = pParse->db; v = sqlite3GetVdbe(pParse); if( v ){ - sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + sqlite3VdbeAddOp0(v, OP_Halt); /* The cookie mask contains one bit for each database file open. ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are @@ -161,13 +152,20 @@ sqlite3VdbeJumpHere(v, pParse->cookieGoto-1); for(iDb=0, mask=1; iDbnDb; mask<<=1, iDb++){ if( (mask & pParse->cookieMask)==0 ) continue; - sqlite3VdbeAddOp(v, OP_Transaction, iDb, (mask & pParse->writeMask)!=0); - sqlite3VdbeAddOp(v, OP_VerifyCookie, iDb, pParse->cookieValue[iDb]); + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp2(v,OP_Transaction, iDb, (mask & pParse->writeMask)!=0); + if( db->init.busy==0 ){ + sqlite3VdbeAddOp2(v,OP_VerifyCookie, iDb, pParse->cookieValue[iDb]); + } } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( pParse->pVirtualLock ){ - char *vtab = (char *)pParse->pVirtualLock->pVtab; - sqlite3VdbeOp3(v, OP_VBegin, 0, 0, vtab, P3_VTAB); + { + int i; + for(i=0; inVtabLock; i++){ + char *vtab = (char *)pParse->apVtabLock[i]->pVtab; + sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); + } + pParse->nVtabLock = 0; } #endif @@ -176,30 +174,27 @@ ** shared-cache feature is enabled. */ codeTableLocks(pParse); - sqlite3VdbeAddOp(v, OP_Goto, 0, pParse->cookieGoto); - } -#ifndef SQLITE_OMIT_TRACE - /* Add a No-op that contains the complete text of the compiled SQL - ** statement as its P3 argument. This does not change the functionality - ** of the program. - ** - ** This is used to implement sqlite3_trace(). - */ - sqlite3VdbeOp3(v, OP_Noop, 0, 0, pParse->zSql, pParse->zTail-pParse->zSql); -#endif /* SQLITE_OMIT_TRACE */ + /* Initialize any AUTOINCREMENT data structures required. + */ + sqlite3AutoincrementBegin(pParse); + + /* Finally, jump back to the beginning of the executable code. */ + sqlite3VdbeAddOp2(v, OP_Goto, 0, pParse->cookieGoto); + } } /* Get the VDBE program ready for execution */ - if( v && pParse->nErr==0 && !sqlite3MallocFailed() ){ + if( v && ALWAYS(pParse->nErr==0) && !db->mallocFailed ){ #ifdef SQLITE_DEBUG FILE *trace = (db->flags & SQLITE_VdbeTrace)!=0 ? stdout : 0; sqlite3VdbeTrace(v, trace); #endif - sqlite3VdbeMakeReady(v, pParse->nVar, pParse->nMem+3, - pParse->nTab+3, pParse->explain); + assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */ + sqlite3VdbeMakeReady(v, pParse->nVar, pParse->nMem, + pParse->nTab, pParse->explain); pParse->rc = SQLITE_DONE; pParse->colNamesSet = 0; }else if( pParse->rc==SQLITE_OK ){ @@ -228,13 +223,15 @@ void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ va_list ap; char *zSql; + char *zErrMsg = 0; + sqlite3 *db = pParse->db; # define SAVE_SZ (sizeof(Parse) - offsetof(Parse,nVar)) char saveBuf[SAVE_SZ]; if( pParse->nErr ) return; assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ va_start(ap, zFormat); - zSql = sqlite3VMPrintf(zFormat, ap); + zSql = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); if( zSql==0 ){ return; /* A malloc must have failed */ @@ -242,8 +239,9 @@ pParse->nested++; memcpy(saveBuf, &pParse->nVar, SAVE_SZ); memset(&pParse->nVar, 0, SAVE_SZ); - sqlite3RunParser(pParse, zSql, 0); - sqliteFree(zSql); + sqlite3RunParser(pParse, zSql, &zErrMsg); + sqlite3DbFree(db, zErrMsg); + sqlite3DbFree(db, zSql); memcpy(&pParse->nVar, saveBuf, SAVE_SZ); pParse->nested--; } @@ -263,11 +261,13 @@ Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){ Table *p = 0; int i; + int nName; assert( zName!=0 ); + nName = sqlite3Strlen30(zName); for(i=OMIT_TEMPDB; inDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue; - p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName, strlen(zName)+1); + p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName, nName); if( p ) break; } return p; @@ -283,7 +283,12 @@ ** routine leaves an error message in pParse->zErrMsg where ** sqlite3FindTable() does not. */ -Table *sqlite3LocateTable(Parse *pParse, const char *zName, const char *zDbase){ +Table *sqlite3LocateTable( + Parse *pParse, /* context in which to report errors */ + int isView, /* True if looking for a VIEW rather than a TABLE */ + const char *zName, /* Name of the table we are looking for */ + const char *zDbase /* Name of the database. Might be NULL */ +){ Table *p; /* Read the database schema. If an error occurs, leave an error message @@ -294,10 +299,11 @@ p = sqlite3FindTable(pParse->db, zName, zDbase); if( p==0 ){ + const char *zMsg = isView ? "no such view" : "no such table"; if( zDbase ){ - sqlite3ErrorMsg(pParse, "no such table: %s.%s", zDbase, zName); + sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); }else{ - sqlite3ErrorMsg(pParse, "no such table: %s", zName); + sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); } pParse->checkSchema = 1; } @@ -319,14 +325,13 @@ Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){ Index *p = 0; int i; + int nName = sqlite3Strlen30(zName); for(i=OMIT_TEMPDB; inDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ Schema *pSchema = db->aDb[j].pSchema; + assert( pSchema ); if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue; - assert( pSchema || (j==1 && !db->aDb[1].pBt) ); - if( pSchema ){ - p = sqlite3HashFind(&pSchema->idxHash, zName, strlen(zName)+1); - } + p = sqlite3HashFind(&pSchema->idxHash, zName, nName); if( p ) break; } return p; @@ -336,8 +341,10 @@ ** Reclaim the memory used by an index */ static void freeIndex(Index *p){ - sqliteFree(p->zColAff); - sqliteFree(p); + sqlite3 *db = p->pTable->dbMem; + /* testcase( db==0 ); */ + sqlite3DbFree(db, p->zColAff); + sqlite3DbFree(db, p); } /* @@ -348,11 +355,12 @@ ** it is not unlinked from the Table that it indexes. ** Unlinking from the Table must be done by the calling function. */ -static void sqliteDeleteIndex(Index *p){ +static void sqlite3DeleteIndex(Index *p){ Index *pOld; const char *zName = p->zName; - pOld = sqlite3HashInsert(&p->pSchema->idxHash, zName, strlen( zName)+1, 0); + pOld = sqlite3HashInsert(&p->pSchema->idxHash, zName, + sqlite3Strlen30(zName), 0); assert( pOld==0 || pOld==p ); freeIndex(p); } @@ -368,15 +376,18 @@ int len; Hash *pHash = &db->aDb[iDb].pSchema->idxHash; - len = strlen(zIdxName); - pIndex = sqlite3HashInsert(pHash, zIdxName, len+1, 0); + len = sqlite3Strlen30(zIdxName); + pIndex = sqlite3HashInsert(pHash, zIdxName, len, 0); if( pIndex ){ if( pIndex->pTable->pIndex==pIndex ){ pIndex->pTable->pIndex = pIndex->pNext; }else{ Index *p; - for(p=pIndex->pTable->pIndex; p && p->pNext!=pIndex; p=p->pNext){} - if( p && p->pNext==pIndex ){ + /* Justification of ALWAYS(); The index must be on the list of + ** indices. */ + p = pIndex->pTable->pIndex; + while( ALWAYS(p) && p->pNext!=pIndex ){ p = p->pNext; } + if( ALWAYS(p && p->pNext==pIndex) ){ p->pNext = pIndex->pNext; } } @@ -392,23 +403,28 @@ ** if there were schema changes during the transaction or if a ** schema-cookie mismatch occurs. ** -** If iDb<=0 then reset the internal schema tables for all database -** files. If iDb>=2 then reset the internal schema for only the +** If iDb==0 then reset the internal schema tables for all database +** files. If iDb>=1 then reset the internal schema for only the ** single file indicated. */ void sqlite3ResetInternalSchema(sqlite3 *db, int iDb){ int i, j; - assert( iDb>=0 && iDbnDb ); + + if( iDb==0 ){ + sqlite3BtreeEnterAll(db); + } for(i=iDb; inDb; i++){ Db *pDb = &db->aDb[i]; if( pDb->pSchema ){ + assert(i==1 || (pDb->pBt && sqlite3BtreeHoldsMutex(pDb->pBt))); sqlite3SchemaFree(pDb->pSchema); } if( iDb>0 ) return; } assert( iDb==0 ); db->flags &= ~SQLITE_InternChanges; + sqlite3BtreeLeaveAll(db); /* If one or more of the auxiliary database files has been closed, ** then remove them from the auxiliary database list. We take the @@ -416,17 +432,10 @@ ** schema hash tables and therefore do not have to make any changes ** to any of those tables. */ - for(i=0; inDb; i++){ - struct Db *pDb = &db->aDb[i]; - if( pDb->pBt==0 ){ - if( pDb->pAux && pDb->xFreeAux ) pDb->xFreeAux(pDb->pAux); - pDb->pAux = 0; - } - } for(i=j=2; inDb; i++){ struct Db *pDb = &db->aDb[i]; if( pDb->pBt==0 ){ - sqliteFree(pDb->zName); + sqlite3DbFree(db, pDb->zName); pDb->zName = 0; continue; } @@ -439,7 +448,7 @@ db->nDb = j; if( db->nDb<=2 && db->aDb!=db->aDbStatic ){ memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0])); - sqliteFree(db->aDb); + sqlite3DbFree(db, db->aDb); db->aDb = db->aDbStatic; } } @@ -457,15 +466,18 @@ static void sqliteResetColumnNames(Table *pTable){ int i; Column *pCol; + sqlite3 *db = pTable->dbMem; + testcase( db==0 ); assert( pTable!=0 ); if( (pCol = pTable->aCol)!=0 ){ for(i=0; inCol; i++, pCol++){ - sqliteFree(pCol->zName); - sqlite3ExprDelete(pCol->pDflt); - sqliteFree(pCol->zType); - sqliteFree(pCol->zColl); + sqlite3DbFree(db, pCol->zName); + sqlite3ExprDelete(db, pCol->pDflt); + sqlite3DbFree(db, pCol->zDflt); + sqlite3DbFree(db, pCol->zType); + sqlite3DbFree(db, pCol->zColl); } - sqliteFree(pTable->aCol); + sqlite3DbFree(db, pTable->aCol); } pTable->aCol = 0; pTable->nCol = 0; @@ -476,16 +488,18 @@ ** Table. No changes are made to disk by this routine. ** ** This routine just deletes the data structure. It does not unlink -** the table data structure from the hash table. Nor does it remove -** foreign keys from the sqlite.aFKey hash table. But it does destroy +** the table data structure from the hash table. But it does destroy ** memory structures of the indices and foreign keys associated with ** the table. */ void sqlite3DeleteTable(Table *pTable){ Index *pIndex, *pNext; FKey *pFKey, *pNextFKey; + sqlite3 *db; if( pTable==0 ) return; + db = pTable->dbMem; + testcase( db==0 ); /* Do not delete the table until the reference count reaches zero. */ pTable->nRef--; @@ -499,32 +513,28 @@ for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ pNext = pIndex->pNext; assert( pIndex->pSchema==pTable->pSchema ); - sqliteDeleteIndex(pIndex); + sqlite3DeleteIndex(pIndex); } #ifndef SQLITE_OMIT_FOREIGN_KEY - /* Delete all foreign keys associated with this table. The keys - ** should have already been unlinked from the pSchema->aFKey hash table - */ + /* Delete all foreign keys associated with this table. */ for(pFKey=pTable->pFKey; pFKey; pFKey=pNextFKey){ pNextFKey = pFKey->pNextFrom; - assert( sqlite3HashFind(&pTable->pSchema->aFKey, - pFKey->zTo, strlen(pFKey->zTo)+1)!=pFKey ); - sqliteFree(pFKey); + sqlite3DbFree(db, pFKey); } #endif /* Delete the Table structure itself. */ sqliteResetColumnNames(pTable); - sqliteFree(pTable->zName); - sqliteFree(pTable->zColAff); - sqlite3SelectDelete(pTable->pSelect); + sqlite3DbFree(db, pTable->zName); + sqlite3DbFree(db, pTable->zColAff); + sqlite3SelectDelete(db, pTable->pSelect); #ifndef SQLITE_OMIT_CHECK - sqlite3ExprDelete(pTable->pCheck); + sqlite3ExprDelete(db, pTable->pCheck); #endif sqlite3VtabClear(pTable); - sqliteFree(pTable); + sqlite3DbFree(db, pTable); } /* @@ -533,48 +543,35 @@ */ void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){ Table *p; - FKey *pF1, *pF2; Db *pDb; assert( db!=0 ); assert( iDb>=0 && iDbnDb ); assert( zTabName && zTabName[0] ); pDb = &db->aDb[iDb]; - p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, strlen(zTabName)+1,0); - if( p ){ -#ifndef SQLITE_OMIT_FOREIGN_KEY - for(pF1=p->pFKey; pF1; pF1=pF1->pNextFrom){ - int nTo = strlen(pF1->zTo) + 1; - pF2 = sqlite3HashFind(&pDb->pSchema->aFKey, pF1->zTo, nTo); - if( pF2==pF1 ){ - sqlite3HashInsert(&pDb->pSchema->aFKey, pF1->zTo, nTo, pF1->pNextTo); - }else{ - while( pF2 && pF2->pNextTo!=pF1 ){ pF2=pF2->pNextTo; } - if( pF2 ){ - pF2->pNextTo = pF1->pNextTo; - } - } - } -#endif - sqlite3DeleteTable(p); - } + p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, + sqlite3Strlen30(zTabName),0); + sqlite3DeleteTable(p); db->flags |= SQLITE_InternChanges; } /* ** Given a token, return a string that consists of the text of that -** token with any quotations removed. Space to hold the returned string +** token. Space to hold the returned string ** is obtained from sqliteMalloc() and must be freed by the calling ** function. ** +** Any quotation marks (ex: "name", 'name', [name], or `name`) that +** surround the body of the token are removed. +** ** Tokens are often just pointers into the original SQL text and so ** are not \000 terminated and are not persistent. The returned string ** is \000 terminated and is persistent. */ -char *sqlite3NameFromToken(Token *pName){ +char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ char *zName; if( pName ){ - zName = sqliteStrNDup((char*)pName->z, pName->n); + zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); sqlite3Dequote(zName); }else{ zName = 0; @@ -589,37 +586,49 @@ void sqlite3OpenMasterTable(Parse *p, int iDb){ Vdbe *v = sqlite3GetVdbe(p); sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb)); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - sqlite3VdbeAddOp(v, OP_OpenWrite, 0, MASTER_ROOT); - sqlite3VdbeAddOp(v, OP_SetNumColumns, 0, 5); /* sqlite_master has 5 columns */ + sqlite3VdbeAddOp3(v, OP_OpenWrite, 0, MASTER_ROOT, iDb); + sqlite3VdbeChangeP4(v, -1, (char *)5, P4_INT32); /* 5 column table */ + if( p->nTab==0 ){ + p->nTab = 1; + } } /* -** The token *pName contains the name of a database (either "main" or -** "temp" or the name of an attached db). This routine returns the -** index of the named database in db->aDb[], or -1 if the named db -** does not exist. +** Parameter zName points to a nul-terminated buffer containing the name +** of a database ("main", "temp" or the name of an attached db). This +** function returns the index of the named database in db->aDb[], or +** -1 if the named db cannot be found. */ -int sqlite3FindDb(sqlite3 *db, Token *pName){ - int i = -1; /* Database number */ - int n; /* Number of characters in the name */ - Db *pDb; /* A database whose name space is being searched */ - char *zName; /* Name we are searching for */ - - zName = sqlite3NameFromToken(pName); +int sqlite3FindDbName(sqlite3 *db, const char *zName){ + int i = -1; /* Database number */ if( zName ){ - n = strlen(zName); + Db *pDb; + int n = sqlite3Strlen30(zName); for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ - if( (!OMIT_TEMPDB || i!=1 ) && n==strlen(pDb->zName) && + if( (!OMIT_TEMPDB || i!=1 ) && n==sqlite3Strlen30(pDb->zName) && 0==sqlite3StrICmp(pDb->zName, zName) ){ break; } } - sqliteFree(zName); } return i; } +/* +** The token *pName contains the name of a database (either "main" or +** "temp" or the name of an attached db). This routine returns the +** index of the named database in db->aDb[], or -1 if the named db +** does not exist. +*/ +int sqlite3FindDb(sqlite3 *db, Token *pName){ + int i; /* Database number */ + char *zName; /* Name we are searching for */ + zName = sqlite3NameFromToken(db, pName); + i = sqlite3FindDbName(db, zName); + sqlite3DbFree(db, zName); + return i; +} + /* The table or view or trigger name is passed to this routine via tokens ** pName1 and pName2. If the table name was fully qualified, for example: ** @@ -645,8 +654,12 @@ int iDb; /* Database holding the object */ sqlite3 *db = pParse->db; - if( pName2 && pName2->n>0 ){ - assert( !db->init.busy ); + if( ALWAYS(pName2!=0) && pName2->n>0 ){ + if( db->init.busy ) { + sqlite3ErrorMsg(pParse, "corrupt database"); + pParse->nErr++; + return -1; + } *pUnqual = pName2; iDb = sqlite3FindDb(db, pName1); if( iDb<0 ){ @@ -738,7 +751,7 @@ if( !OMIT_TEMPDB && isTemp ) iDb = 1; pParse->sNameToken = *pName; - zName = sqlite3NameFromToken(pName); + zName = sqlite3NameFromToken(db, pName); if( zName==0 ) return; if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto begin_table_error; @@ -795,8 +808,9 @@ } } - pTable = sqliteMalloc( sizeof(Table) ); + pTable = sqlite3DbMallocZero(db, sizeof(Table)); if( pTable==0 ){ + db->mallocFailed = 1; pParse->rc = SQLITE_NOMEM; pParse->nErr++; goto begin_table_error; @@ -805,7 +819,8 @@ pTable->iPKey = -1; pTable->pSchema = db->aDb[iDb].pSchema; pTable->nRef = 1; - if( pParse->pNewTable ) sqlite3DeleteTable(pParse->pNewTable); + pTable->dbMem = 0; + assert( pParse->pNewTable==0 ); pParse->pNewTable = pTable; /* If this is the magic sqlite_sequence table used by autoincrement, @@ -827,53 +842,57 @@ ** now. */ if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){ - int lbl; + int j1; int fileFormat; + int reg1, reg2, reg3; sqlite3BeginWriteOperation(pParse, 0, iDb); #ifndef SQLITE_OMIT_VIRTUALTABLE if( isVirtual ){ - sqlite3VdbeAddOp(v, OP_VBegin, 0, 0); + sqlite3VdbeAddOp0(v, OP_VBegin); } #endif /* If the file format and encoding in the database have not been set, ** set them now. */ - sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 1); /* file_format */ - lbl = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp(v, OP_If, 0, lbl); + reg1 = pParse->regRowid = ++pParse->nMem; + reg2 = pParse->regRoot = ++pParse->nMem; + reg3 = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); + sqlite3VdbeUsesBtree(v, iDb); + j1 = sqlite3VdbeAddOp1(v, OP_If, reg3); fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ? 1 : SQLITE_MAX_FILE_FORMAT; - sqlite3VdbeAddOp(v, OP_Integer, fileFormat, 0); - sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 1); - sqlite3VdbeAddOp(v, OP_Integer, ENC(db), 0); - sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 4); - sqlite3VdbeResolveLabel(v, lbl); + sqlite3VdbeAddOp2(v, OP_Integer, fileFormat, reg3); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, reg3); + sqlite3VdbeAddOp2(v, OP_Integer, ENC(db), reg3); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, reg3); + sqlite3VdbeJumpHere(v, j1); /* This just creates a place-holder record in the sqlite_master table. ** The record created does not contain anything yet. It will be replaced ** by the real entry in code generated at sqlite3EndTable(). ** - ** The rowid for the new entry is left on the top of the stack. - ** The rowid value is needed by the code that sqlite3EndTable will - ** generate. + ** The rowid for the new entry is left in register pParse->regRowid. + ** The root page number of the new table is left in reg pParse->regRoot. + ** The rowid and root page number values are needed by the code that + ** sqlite3EndTable will generate. */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) if( isView || isVirtual ){ - sqlite3VdbeAddOp(v, OP_Integer, 0, 0); + sqlite3VdbeAddOp2(v, OP_Integer, 0, reg2); }else #endif { - sqlite3VdbeAddOp(v, OP_CreateTable, iDb, 0); + sqlite3VdbeAddOp2(v, OP_CreateTable, iDb, reg2); } sqlite3OpenMasterTable(pParse, iDb); - sqlite3VdbeAddOp(v, OP_NewRowid, 0, 0); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3VdbeAddOp(v, OP_Null, 0, 0); - sqlite3VdbeAddOp(v, OP_Insert, 0, OPFLAG_APPEND); - sqlite3VdbeAddOp(v, OP_Close, 0, 0); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1); + sqlite3VdbeAddOp2(v, OP_Null, 0, reg3); + sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3VdbeAddOp0(v, OP_Close); } /* Normal (non-error) return. */ @@ -881,7 +900,7 @@ /* If an error occurs, we jump here */ begin_table_error: - sqliteFree(zName); + sqlite3DbFree(db, zName); return; } @@ -911,25 +930,28 @@ int i; char *z; Column *pCol; + sqlite3 *db = pParse->db; if( (p = pParse->pNewTable)==0 ) return; - if( p->nCol+1>SQLITE_MAX_COLUMN ){ +#if SQLITE_MAX_COLUMN + if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); return; } - z = sqlite3NameFromToken(pName); +#endif + z = sqlite3NameFromToken(db, pName); if( z==0 ) return; for(i=0; inCol; i++){ if( STRICMP(z, p->aCol[i].zName) ){ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); - sqliteFree(z); + sqlite3DbFree(db, z); return; } } if( (p->nCol & 0x7)==0 ){ Column *aNew; - aNew = sqliteRealloc( p->aCol, (p->nCol+8)*sizeof(p->aCol[0])); + aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); if( aNew==0 ){ - sqliteFree(z); + sqlite3DbFree(db, z); return; } p->aCol = aNew; @@ -954,10 +976,9 @@ */ void sqlite3AddNotNull(Parse *pParse, int onError){ Table *p; - int i; - if( (p = pParse->pNewTable)==0 ) return; - i = p->nCol-1; - if( i>=0 ) p->aCol[i].notNull = onError; + p = pParse->pNewTable; + if( p==0 || NEVER(p->nCol<1) ) return; + p->aCol[p->nCol-1].notNull = (u8)onError; } /* @@ -985,14 +1006,12 @@ ** If none of the substrings in the above table are found, ** SQLITE_AFF_NUMERIC is returned. */ -char sqlite3AffinityType(const Token *pType){ +char sqlite3AffinityType(const char *zIn){ u32 h = 0; char aff = SQLITE_AFF_NUMERIC; - const unsigned char *zIn = pType->z; - const unsigned char *zEnd = &pType->z[pType->n]; - while( zIn!=zEnd ){ - h = (h<<8) + sqlite3UpperToLower[*zIn]; + if( zIn ) while( zIn[0] ){ + h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; @@ -1034,16 +1053,14 @@ */ void sqlite3AddColumnType(Parse *pParse, Token *pType){ Table *p; - int i; Column *pCol; - if( (p = pParse->pNewTable)==0 ) return; - i = p->nCol-1; - if( i<0 ) return; - pCol = &p->aCol[i]; - sqliteFree(pCol->zType); - pCol->zType = sqlite3NameFromToken(pType); - pCol->affinity = sqlite3AffinityType(pType); + p = pParse->pNewTable; + if( p==0 || NEVER(p->nCol<1) ) return; + pCol = &p->aCol[p->nCol-1]; + assert( pCol->zType==0 ); + pCol->zType = sqlite3NameFromToken(pParse->db, pType); + pCol->affinity = sqlite3AffinityType(pCol->zType); } /* @@ -1056,24 +1073,29 @@ ** This routine is called by the parser while in the middle of ** parsing a CREATE TABLE statement. */ -void sqlite3AddDefaultValue(Parse *pParse, Expr *pExpr){ +void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){ Table *p; Column *pCol; - if( (p = pParse->pNewTable)!=0 ){ + sqlite3 *db = pParse->db; + p = pParse->pNewTable; + if( p!=0 ){ pCol = &(p->aCol[p->nCol-1]); - if( !sqlite3ExprIsConstantOrFunction(pExpr) ){ + if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr) ){ sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", pCol->zName); }else{ - Expr *pCopy; - sqlite3ExprDelete(pCol->pDflt); - pCol->pDflt = pCopy = sqlite3ExprDup(pExpr); - if( pCopy ){ - sqlite3TokenCopy(&pCopy->span, &pExpr->span); - } + /* A copy of pExpr is used instead of the original, as pExpr contains + ** tokens that point to volatile memory. The 'span' of the expression + ** is required by pragma table_info. + */ + sqlite3ExprDelete(db, pCol->pDflt); + pCol->pDflt = sqlite3ExprDup(db, pSpan->pExpr, EXPRDUP_REDUCE); + sqlite3DbFree(db, pCol->zDflt); + pCol->zDflt = sqlite3DbStrNDup(db, (char*)pSpan->zStart, + (int)(pSpan->zEnd - pSpan->zStart)); } } - sqlite3ExprDelete(pExpr); + sqlite3ExprDelete(db, pSpan->pExpr); } /* @@ -1105,12 +1127,12 @@ char *zType = 0; int iCol = -1, i; if( pTab==0 || IN_DECLARE_VTAB ) goto primary_key_exit; - if( pTab->hasPrimKey ){ + if( pTab->tabFlags & TF_HasPrimaryKey ){ sqlite3ErrorMsg(pParse, "table \"%s\" has more than one primary key", pTab->zName); goto primary_key_exit; } - pTab->hasPrimKey = 1; + pTab->tabFlags |= TF_HasPrimaryKey; if( pList==0 ){ iCol = pTab->nCol - 1; pTab->aCol[iCol].isPrimKey = 1; @@ -1133,8 +1155,9 @@ if( zType && sqlite3StrICmp(zType, "INTEGER")==0 && sortOrder==SQLITE_SO_ASC ){ pTab->iPKey = iCol; - pTab->keyConf = onError; - pTab->autoInc = autoInc; + pTab->keyConf = (u8)onError; + assert( autoInc==0 || autoInc==1 ); + pTab->tabFlags |= autoInc*TF_Autoincrement; }else if( autoInc ){ #ifndef SQLITE_OMIT_AUTOINCREMENT sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an " @@ -1146,7 +1169,7 @@ } primary_key_exit: - sqlite3ExprListDelete(pList); + sqlite3ExprListDelete(pParse->db, pList); return; } @@ -1157,32 +1180,37 @@ Parse *pParse, /* Parsing context */ Expr *pCheckExpr /* The check expression */ ){ + sqlite3 *db = pParse->db; #ifndef SQLITE_OMIT_CHECK Table *pTab = pParse->pNewTable; if( pTab && !IN_DECLARE_VTAB ){ - /* The CHECK expression must be duplicated so that tokens refer - ** to malloced space and not the (ephemeral) text of the CREATE TABLE - ** statement */ - pTab->pCheck = sqlite3ExprAnd(pTab->pCheck, sqlite3ExprDup(pCheckExpr)); - } + pTab->pCheck = sqlite3ExprAnd(db, pTab->pCheck, pCheckExpr); + }else #endif - sqlite3ExprDelete(pCheckExpr); + { + sqlite3ExprDelete(db, pCheckExpr); + } } /* ** Set the collation function of the most recently parsed table column ** to the CollSeq given. */ -void sqlite3AddCollateType(Parse *pParse, const char *zType, int nType){ +void sqlite3AddCollateType(Parse *pParse, Token *pToken){ Table *p; int i; + char *zColl; /* Dequoted name of collation sequence */ + sqlite3 *db; if( (p = pParse->pNewTable)==0 ) return; i = p->nCol-1; + db = pParse->db; + zColl = sqlite3NameFromToken(db, pToken); + if( !zColl ) return; - if( sqlite3LocateCollSeq(pParse, zType, nType) ){ + if( sqlite3LocateCollSeq(pParse, zColl) ){ Index *pIdx; - p->aCol[i].zColl = sqliteStrNDup(zType, nType); + p->aCol[i].zColl = zColl; /* If the column is declared as " PRIMARY KEY COLLATE ", ** then an index may have been created on this column before the @@ -1194,6 +1222,8 @@ pIdx->azColl[0] = p->aCol[i].zColl; } } + }else{ + sqlite3DbFree(db, zColl); } } @@ -1214,22 +1244,20 @@ ** This routine is a wrapper around sqlite3FindCollSeq(). This routine ** invokes the collation factory if the named collation cannot be found ** and generates an error message. +** +** See also: sqlite3FindCollSeq(), sqlite3GetCollSeq() */ -CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName, int nName){ +CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){ sqlite3 *db = pParse->db; u8 enc = ENC(db); u8 initbusy = db->init.busy; CollSeq *pColl; - pColl = sqlite3FindCollSeq(db, enc, zName, nName, initbusy); + pColl = sqlite3FindCollSeq(db, enc, zName, initbusy); if( !initbusy && (!pColl || !pColl->xCmp) ){ - pColl = sqlite3GetCollSeq(db, pColl, zName, nName); + pColl = sqlite3GetCollSeq(db, pColl, zName); if( !pColl ){ - if( nName<0 ){ - nName = strlen(zName); - } - sqlite3ErrorMsg(pParse, "no such collation sequence: %.*s", nName, zName); - pColl = 0; + sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); } } @@ -1253,9 +1281,13 @@ ** and the probability of hitting the same cookie value is only ** 1 chance in 2^32. So we're safe enough. */ -void sqlite3ChangeCookie(sqlite3 *db, Vdbe *v, int iDb){ - sqlite3VdbeAddOp(v, OP_Integer, db->aDb[iDb].pSchema->schema_cookie+1, 0); - sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 0); +void sqlite3ChangeCookie(Parse *pParse, int iDb){ + int r1 = sqlite3GetTempReg(pParse); + sqlite3 *db = pParse->db; + Vdbe *v = pParse->pVdbe; + sqlite3VdbeAddOp2(v, OP_Integer, db->aDb[iDb].pSchema->schema_cookie+1, r1); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_SCHEMA_VERSION, r1); + sqlite3ReleaseTempReg(pParse, r1); } /* @@ -1275,18 +1307,31 @@ } /* -** Write an identifier onto the end of the given string. Add -** quote characters as needed. +** The first parameter is a pointer to an output buffer. The second +** parameter is a pointer to an integer that contains the offset at +** which to write into the output buffer. This function copies the +** nul-terminated string pointed to by the third parameter, zSignedIdent, +** to the specified offset in the buffer and updates *pIdx to refer +** to the first byte after the last byte written before returning. +** +** If the string zSignedIdent consists entirely of alpha-numeric +** characters, does not begin with a digit and is not an SQL keyword, +** then it is copied to the output buffer exactly as it is. Otherwise, +** it is quoted using double-quotes. */ static void identPut(char *z, int *pIdx, char *zSignedIdent){ unsigned char *zIdent = (unsigned char*)zSignedIdent; int i, j, needQuote; i = *pIdx; + for(j=0; zIdent[j]; j++){ - if( !isalnum(zIdent[j]) && zIdent[j]!='_' ) break; + if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break; + } + needQuote = sqlite3Isdigit(zIdent[0]) || sqlite3KeywordCode(zIdent, j)!=TK_ID; + if( !needQuote ){ + needQuote = zIdent[j]; } - needQuote = zIdent[j]!=0 || isdigit(zIdent[0]) - || sqlite3KeywordCode(zIdent, j)!=TK_ID; + if( needQuote ) z[i++] = '"'; for(j=0; zIdent[j]; j++){ z[i++] = zIdent[j]; @@ -1302,21 +1347,17 @@ ** table. Memory to hold the text of the statement is obtained ** from sqliteMalloc() and must be freed by the calling function. */ -static char *createTableStmt(Table *p, int isTemp){ +static char *createTableStmt(sqlite3 *db, Table *p){ int i, k, n; char *zStmt; - char *zSep, *zSep2, *zEnd, *z; + char *zSep, *zSep2, *zEnd; Column *pCol; n = 0; for(pCol = p->aCol, i=0; inCol; i++, pCol++){ - n += identLength(pCol->zName); - z = pCol->zType; - if( z ){ - n += (strlen(z) + 1); - } + n += identLength(pCol->zName) + 5; } n += identLength(p->zName); - if( n<50 ){ + if( n<50 ){ zSep = ""; zSep2 = ","; zEnd = ")"; @@ -1326,24 +1367,45 @@ zEnd = "\n)"; } n += 35 + 6*p->nCol; - zStmt = sqliteMallocRaw( n ); - if( zStmt==0 ) return 0; - sqlite3_snprintf(n, zStmt, - !OMIT_TEMPDB&&isTemp ? "CREATE TEMP TABLE ":"CREATE TABLE "); - k = strlen(zStmt); + zStmt = sqlite3Malloc( n ); + if( zStmt==0 ){ + db->mallocFailed = 1; + return 0; + } + sqlite3_snprintf(n, zStmt, "CREATE TABLE "); + k = sqlite3Strlen30(zStmt); identPut(zStmt, &k, p->zName); zStmt[k++] = '('; for(pCol=p->aCol, i=0; inCol; i++, pCol++){ + static const char * const azType[] = { + /* SQLITE_AFF_TEXT */ " TEXT", + /* SQLITE_AFF_NONE */ "", + /* SQLITE_AFF_NUMERIC */ " NUM", + /* SQLITE_AFF_INTEGER */ " INT", + /* SQLITE_AFF_REAL */ " REAL" + }; + int len; + const char *zType; + sqlite3_snprintf(n-k, &zStmt[k], zSep); - k += strlen(&zStmt[k]); + k += sqlite3Strlen30(&zStmt[k]); zSep = zSep2; identPut(zStmt, &k, pCol->zName); - if( (z = pCol->zType)!=0 ){ - zStmt[k++] = ' '; - assert( strlen(z)+k+1<=n ); - sqlite3_snprintf(n-k, &zStmt[k], "%s", z); - k += strlen(z); - } + assert( pCol->affinity-SQLITE_AFF_TEXT >= 0 ); + assert( pCol->affinity-SQLITE_AFF_TEXT < sizeof(azType)/sizeof(azType[0]) ); + testcase( pCol->affinity==SQLITE_AFF_TEXT ); + testcase( pCol->affinity==SQLITE_AFF_NONE ); + testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); + testcase( pCol->affinity==SQLITE_AFF_INTEGER ); + testcase( pCol->affinity==SQLITE_AFF_REAL ); + + zType = azType[pCol->affinity - SQLITE_AFF_TEXT]; + len = sqlite3Strlen30(zType); + assert( pCol->affinity==SQLITE_AFF_NONE + || pCol->affinity==sqlite3AffinityType(zType) ); + memcpy(&zStmt[k], zType, len); + k += len; + assert( k<=n ); } sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd); return zStmt; @@ -1379,7 +1441,7 @@ sqlite3 *db = pParse->db; int iDb; - if( (pEnd==0 && pSelect==0) || pParse->nErr || sqlite3MallocFailed() ) { + if( (pEnd==0 && pSelect==0) || db->mallocFailed ){ return; } p = pParse->pNewTable; @@ -1405,7 +1467,7 @@ sNC.pParse = pParse; sNC.pSrcList = &sSrc; sNC.isCheck = 1; - if( sqlite3ExprResolveNames(&sNC, p->pCheck) ){ + if( sqlite3ResolveExprNames(&sNC, p->pCheck) ){ return; } } @@ -1422,8 +1484,7 @@ } /* If not initializing, then create a record for the new table - ** in the SQLITE_MASTER table of the database. The record number - ** for the new table entry should already be on the stack. + ** in the SQLITE_MASTER table of the database. ** ** If this is a TEMPORARY table, write the entry into the auxiliary ** file instead of into the main database file. @@ -1436,13 +1497,12 @@ char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */ v = sqlite3GetVdbe(pParse); - if( v==0 ) return; + if( NEVER(v==0) ) return; - sqlite3VdbeAddOp(v, OP_Close, 0, 0); + sqlite3VdbeAddOp1(v, OP_Close, 0); - /* Create the rootpage for the new table and push it onto the stack. - ** A view has no rootpage, so just push a zero onto the stack for - ** views. Initialize zType at the same time. + /* + ** Initialize zType for the new view or table. */ if( p->pSelect==0 ){ /* A regular table */ @@ -1458,7 +1518,7 @@ /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT ** statement to populate the new table. The root-page number for the - ** new table is on the top of the vdbe stack. + ** new table is in register pParse->regRoot. ** ** Once the SELECT has been coded by sqlite3Select(), it is in a ** suitable state to query for the column names and types to be used @@ -1470,15 +1530,18 @@ ** be redundant. */ if( pSelect ){ + SelectDest dest; Table *pSelTab; - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - sqlite3VdbeAddOp(v, OP_OpenWrite, 1, 0); + + assert(pParse->nTab==1); + sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); + sqlite3VdbeChangeP5(v, 1); pParse->nTab = 2; - sqlite3Select(pParse, pSelect, SRT_Table, 1, 0, 0, 0, 0); - sqlite3VdbeAddOp(v, OP_Close, 1, 0); + sqlite3SelectDestInit(&dest, SRT_Table, 1); + sqlite3Select(pParse, pSelect, &dest); + sqlite3VdbeAddOp1(v, OP_Close, 1); if( pParse->nErr==0 ){ - pSelTab = sqlite3ResultSetOfSelect(pParse, 0, pSelect); + pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect); if( pSelTab==0 ) return; assert( p->aCol==0 ); p->nCol = pSelTab->nCol; @@ -1491,36 +1554,38 @@ /* Compute the complete text of the CREATE statement */ if( pSelect ){ - zStmt = createTableStmt(p, p->pSchema==pParse->db->aDb[1].pSchema); + zStmt = createTableStmt(db, p); }else{ - n = pEnd->z - pParse->sNameToken.z + 1; - zStmt = sqlite3MPrintf("CREATE %s %.*s", zType2, n, pParse->sNameToken.z); + n = (int)(pEnd->z - pParse->sNameToken.z) + 1; + zStmt = sqlite3MPrintf(db, + "CREATE %s %.*s", zType2, n, pParse->sNameToken.z + ); } /* A slot for the record has already been allocated in the ** SQLITE_MASTER table. We just need to update that slot with all - ** the information we've collected. The rowid for the preallocated - ** slot is the 2nd item on the stack. The top of the stack is the - ** root page for the new table (or a 0 if this is a view). + ** the information we've collected. */ sqlite3NestedParse(pParse, "UPDATE %Q.%s " - "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#0, sql=%Q " - "WHERE rowid=#1", + "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q " + "WHERE rowid=#%d", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zType, p->zName, p->zName, - zStmt + pParse->regRoot, + zStmt, + pParse->regRowid ); - sqliteFree(zStmt); - sqlite3ChangeCookie(db, v, iDb); + sqlite3DbFree(db, zStmt); + sqlite3ChangeCookie(pParse, iDb); #ifndef SQLITE_OMIT_AUTOINCREMENT /* Check to see if we need to create an sqlite_sequence table for ** keeping track of autoincrement keys. */ - if( p->autoInc ){ + if( p->tabFlags & TF_Autoincrement ){ Db *pDb = &db->aDb[iDb]; if( pDb->pSchema->pSeqTab==0 ){ sqlite3NestedParse(pParse, @@ -1532,29 +1597,23 @@ #endif /* Reparse everything to update our internal data structures */ - sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, - sqlite3MPrintf("tbl_name='%q'",p->zName), P3_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_ParseSchema, iDb, 0, 0, + sqlite3MPrintf(db, "tbl_name='%q'",p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database. */ - if( db->init.busy && pParse->nErr==0 ){ + if( db->init.busy ){ Table *pOld; - FKey *pFKey; Schema *pSchema = p->pSchema; - pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, strlen(p->zName)+1,p); + pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, + sqlite3Strlen30(p->zName),p); if( pOld ){ assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ + db->mallocFailed = 1; return; } -#ifndef SQLITE_OMIT_FOREIGN_KEY - for(pFKey=p->pFKey; pFKey; pFKey=pFKey->pNextFrom){ - int nTo = strlen(pFKey->zTo) + 1; - pFKey->pNextTo = sqlite3HashFind(&pSchema->aFKey, pFKey->zTo, nTo); - sqlite3HashInsert(&pSchema->aFKey, pFKey->zTo, nTo, pFKey); - } -#endif pParse->pNewTable = 0; db->nTable++; db->flags |= SQLITE_InternChanges; @@ -1567,7 +1626,7 @@ if( pCons->z==0 ){ pCons = pEnd; } - nName = (const char *)pCons->z - zName; + nName = (int)((const char *)pCons->z - zName); p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); } #endif @@ -1589,29 +1648,32 @@ ){ Table *p; int n; - const unsigned char *z; + const char *z; Token sEnd; DbFixer sFix; Token *pName; int iDb; + sqlite3 *db = pParse->db; if( pParse->nVar>0 ){ sqlite3ErrorMsg(pParse, "parameters are not allowed in views"); - sqlite3SelectDelete(pSelect); + sqlite3SelectDelete(db, pSelect); return; } sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); p = pParse->pNewTable; - if( p==0 || pParse->nErr ){ - sqlite3SelectDelete(pSelect); + if( p==0 ){ + sqlite3SelectDelete(db, pSelect); return; } + assert( pParse->nErr==0 ); /* If sqlite3StartTable return non-NULL then + ** there could not have been an error */ sqlite3TwoPartName(pParse, pName1, pName2, &pName); - iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); + iDb = sqlite3SchemaToIndex(db, p->pSchema); if( sqlite3FixInit(&sFix, pParse, iDb, "view", pName) && sqlite3FixSelect(&sFix, pSelect) ){ - sqlite3SelectDelete(pSelect); + sqlite3SelectDelete(db, pSelect); return; } @@ -1620,12 +1682,12 @@ ** allocated rather than point to the input string - which means that ** they will persist after the current sqlite3_exec() call returns. */ - p->pSelect = sqlite3SelectDup(pSelect); - sqlite3SelectDelete(pSelect); - if( sqlite3MallocFailed() ){ + p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); + sqlite3SelectDelete(db, pSelect); + if( db->mallocFailed ){ return; } - if( !pParse->db->init.busy ){ + if( !db->init.busy ){ sqlite3ViewGetColumnNames(pParse, p); } @@ -1633,13 +1695,13 @@ ** the end. */ sEnd = pParse->sLastToken; - if( sEnd.z[0]!=0 && sEnd.z[0]!=';' ){ + if( ALWAYS(sEnd.z[0]!=0) && sEnd.z[0]!=';' ){ sEnd.z += sEnd.n; } sEnd.n = 0; - n = sEnd.z - pBegin->z; - z = (const unsigned char*)pBegin->z; - while( n>0 && (z[n-1]==';' || isspace(z[n-1])) ){ n--; } + n = (int)(sEnd.z - pBegin->z); + z = pBegin->z; + while( ALWAYS(n>0) && sqlite3Isspace(z[n-1]) ){ n--; } sEnd.z = &z[n-1]; sEnd.n = 1; @@ -1660,6 +1722,8 @@ Select *pSel; /* Copy of the SELECT that implements the view */ int nErr = 0; /* Number of errors encountered */ int n; /* Temporarily holds the number of cursors assigned */ + sqlite3 *db = pParse->db; /* Database connection for malloc errors */ + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); assert( pTable ); @@ -1683,8 +1747,13 @@ ** CREATE VIEW one AS SELECT * FROM two; ** CREATE VIEW two AS SELECT * FROM one; ** - ** Actually, this error is caught previously and so the following test - ** should always fail. But we will leave it in place just to be safe. + ** Actually, the error above is now caught prior to reaching this point. + ** But the following test is still important as it does come up + ** in the following: + ** + ** CREATE TABLE main.ex1(a); + ** CREATE TEMP VIEW ex1 AS SELECT a FROM ex1; + ** SELECT * FROM temp.ex1; */ if( pTable->nCol<0 ){ sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName); @@ -1700,12 +1769,22 @@ ** statement that defines the view. */ assert( pTable->pSelect ); - pSel = sqlite3SelectDup(pTable->pSelect); + pSel = sqlite3SelectDup(db, pTable->pSelect, 0); if( pSel ){ + u8 enableLookaside = db->lookaside.bEnabled; n = pParse->nTab; sqlite3SrcListAssignCursors(pParse, pSel->pSrc); pTable->nCol = -1; - pSelTab = sqlite3ResultSetOfSelect(pParse, 0, pSel); + db->lookaside.bEnabled = 0; +#ifndef SQLITE_OMIT_AUTHORIZATION + xAuth = db->xAuth; + db->xAuth = 0; + pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); + db->xAuth = xAuth; +#else + pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); +#endif + db->lookaside.bEnabled = enableLookaside; pParse->nTab = n; if( pSelTab ){ assert( pTable->aCol==0 ); @@ -1719,7 +1798,7 @@ pTable->nCol = 0; nErr++; } - sqlite3SelectDelete(pSel); + sqlite3SelectDelete(db, pSel); } else { nErr++; } @@ -1794,20 +1873,23 @@ */ static void destroyRootPage(Parse *pParse, int iTable, int iDb){ Vdbe *v = sqlite3GetVdbe(pParse); - sqlite3VdbeAddOp(v, OP_Destroy, iTable, iDb); + int r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_Destroy, iTable, r1, iDb); #ifndef SQLITE_OMIT_AUTOVACUUM - /* OP_Destroy pushes an integer onto the stack. If this integer + /* OP_Destroy stores an in integer r1. If this integer ** is non-zero, then it is the root page number of a table moved to ** location iTable. The following code modifies the sqlite_master table to ** reflect this. ** - ** The "#0" in the SQL is a special constant that means whatever value - ** is on the top of the stack. See sqlite3RegisterExpr(). + ** The "#NNN" in the SQL is a special constant that means whatever value + ** is in register NNN. See grammar rules associated with the TK_REGISTER + ** token for additional information. */ sqlite3NestedParse(pParse, - "UPDATE %Q.%s SET rootpage=%d WHERE #0 AND rootpage=#0", - pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable); + "UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d", + pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable, r1, r1); #endif + sqlite3ReleaseTempReg(pParse, r1); } /* @@ -1879,11 +1961,13 @@ sqlite3 *db = pParse->db; int iDb; - if( pParse->nErr || sqlite3MallocFailed() ){ + if( db->mallocFailed ){ goto exit_drop_table; } + assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); - pTab = sqlite3LocateTable(pParse, pName->a[0].zName, pName->a[0].zDatabase); + pTab = sqlite3LocateTable(pParse, isView, + pName->a[0].zName, pName->a[0].zDatabase); if( pTab==0 ){ if( noErr ){ @@ -1893,6 +1977,13 @@ } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 && iDbnDb ); + + /* If pTab is a virtual table, call ViewGetColumnNames() to ensure + ** it is initialized. + */ + if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto exit_drop_table; + } #ifndef SQLITE_OMIT_AUTHORIZATION { int code; @@ -1910,9 +2001,6 @@ } #ifndef SQLITE_OMIT_VIRTUALTABLE }else if( IsVirtual(pTab) ){ - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto exit_drop_table; - } code = SQLITE_DROP_VTABLE; zArg2 = pTab->pMod->zName; #endif @@ -1931,7 +2019,7 @@ } } #endif - if( pTab->readOnly || pTab==db->aDb[iDb].pSchema->pSeqTab ){ + if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){ sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName); goto exit_drop_table; } @@ -1957,14 +2045,11 @@ if( v ){ Trigger *pTrigger; Db *pDb = &db->aDb[iDb]; - sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3BeginWriteOperation(pParse, 1, iDb); #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ - Vdbe *v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp(v, OP_VBegin, 0, 0); - } + sqlite3VdbeAddOp0(v, OP_VBegin); } #endif @@ -1972,7 +2057,7 @@ ** is generated to remove entries from sqlite_master and/or ** sqlite_temp_master if required. */ - pTrigger = pTab->pTrigger; + pTrigger = sqlite3TriggerList(pParse, pTab); while( pTrigger ){ assert( pTrigger->pSchema==pTab->pSchema || pTrigger->pSchema==db->aDb[1].pSchema ); @@ -1986,7 +2071,7 @@ ** at the btree level, in case the sqlite_sequence table needs to ** move as a result of the drop (can happen in auto-vacuum mode). */ - if( pTab->autoInc ){ + if( pTab->tabFlags & TF_Autoincrement ){ sqlite3NestedParse(pParse, "DELETE FROM %s.sqlite_sequence WHERE name=%Q", pDb->zName, pTab->zName @@ -2004,6 +2089,14 @@ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); + + /* Drop any statistics from the sqlite_stat1 table, if it exists */ + if( sqlite3FindTable(db, "sqlite_stat1", db->aDb[iDb].zName) ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.sqlite_stat1 WHERE tbl=%Q", pDb->zName, pTab->zName + ); + } + if( !isView && !IsVirtual(pTab) ){ destroyTable(pParse, pTab); } @@ -2012,15 +2105,15 @@ ** the schema cookie. */ if( IsVirtual(pTab) ){ - sqlite3VdbeOp3(v, OP_VDestroy, iDb, 0, pTab->zName, 0); + sqlite3VdbeAddOp4(v, OP_VDestroy, iDb, 0, 0, pTab->zName, 0); } - sqlite3VdbeOp3(v, OP_DropTable, iDb, 0, pTab->zName, 0); - sqlite3ChangeCookie(db, v, iDb); + sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); + sqlite3ChangeCookie(pParse, iDb); } sqliteViewResetAll(db, iDb); exit_drop_table: - sqlite3SrcListDelete(pName); + sqlite3SrcListDelete(db, pName); } /* @@ -2034,9 +2127,7 @@ ** in the ON DELETE, ON UPDATE and ON INSERT clauses. ** ** An FKey structure is created and added to the table currently -** under construction in the pParse->pNewTable field. The new FKey -** is not linked into db->aFKey at this point - that does not happen -** until sqlite3EndTable(). +** under construction in the pParse->pNewTable field. ** ** The foreign key is set for IMMEDIATE processing. A subsequent call ** to sqlite3DeferForeignKey() might change this to DEFERRED. @@ -2048,6 +2139,7 @@ ExprList *pToCol, /* Columns in the other table */ int flags /* Conflict resolution algorithms. */ ){ + sqlite3 *db = pParse->db; #ifndef SQLITE_OMIT_FOREIGN_KEY FKey *pFKey = 0; Table *p = pParse->pNewTable; @@ -2057,10 +2149,10 @@ char *z; assert( pTo!=0 ); - if( p==0 || pParse->nErr || IN_DECLARE_VTAB ) goto fk_end; + if( p==0 || IN_DECLARE_VTAB ) goto fk_end; if( pFromCol==0 ){ int iCol = p->nCol-1; - if( iCol<0 ) goto fk_end; + if( NEVER(iCol<0) ) goto fk_end; if( pToCol && pToCol->nExpr!=1 ){ sqlite3ErrorMsg(pParse, "foreign key on %s" " should reference only one column of table %T", @@ -2076,24 +2168,24 @@ }else{ nCol = pFromCol->nExpr; } - nByte = sizeof(*pFKey) + nCol*sizeof(pFKey->aCol[0]) + pTo->n + 1; + nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; if( pToCol ){ for(i=0; inExpr; i++){ - nByte += strlen(pToCol->a[i].zName) + 1; + nByte += sqlite3Strlen30(pToCol->a[i].zName) + 1; } } - pFKey = sqliteMalloc( nByte ); - if( pFKey==0 ) goto fk_end; + pFKey = sqlite3DbMallocZero(db, nByte ); + if( pFKey==0 ){ + goto fk_end; + } pFKey->pFrom = p; pFKey->pNextFrom = p->pFKey; - z = (char*)&pFKey[1]; - pFKey->aCol = (struct sColMap*)z; - z += sizeof(struct sColMap)*nCol; + z = (char*)&pFKey->aCol[nCol]; pFKey->zTo = z; memcpy(z, pTo->z, pTo->n); z[pTo->n] = 0; + sqlite3Dequote(z); z += pTo->n+1; - pFKey->pNextTo = 0; pFKey->nCol = nCol; if( pFromCol==0 ){ pFKey->aCol[0].iFrom = p->nCol-1; @@ -2116,7 +2208,7 @@ } if( pToCol ){ for(i=0; ia[i].zName); + int n = sqlite3Strlen30(pToCol->a[i].zName); pFKey->aCol[i].zCol = z; memcpy(z, pToCol->a[i].zName, n); z[n] = 0; @@ -2124,9 +2216,9 @@ } } pFKey->isDeferred = 0; - pFKey->deleteConf = flags & 0xff; - pFKey->updateConf = (flags >> 8 ) & 0xff; - pFKey->insertConf = (flags >> 16 ) & 0xff; + pFKey->deleteConf = (u8)(flags & 0xff); + pFKey->updateConf = (u8)((flags >> 8 ) & 0xff); + pFKey->insertConf = (u8)((flags >> 16 ) & 0xff); /* Link the foreign key to the table as the last step. */ @@ -2134,10 +2226,10 @@ pFKey = 0; fk_end: - sqliteFree(pFKey); + sqlite3DbFree(db, pFKey); #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ - sqlite3ExprListDelete(pFromCol); - sqlite3ExprListDelete(pToCol); + sqlite3ExprListDelete(db, pFromCol); + sqlite3ExprListDelete(db, pToCol); } /* @@ -2152,7 +2244,8 @@ Table *pTab; FKey *pFKey; if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; - pFKey->isDeferred = isDeferred; + assert( isDeferred==0 || isDeferred==1 ); + pFKey->isDeferred = (u8)isDeferred; #endif } @@ -2162,24 +2255,27 @@ ** content of an index in response to a REINDEX command. ** ** if memRootPage is not negative, it means that the index is newly -** created. The memory cell specified by memRootPage contains the +** created. The register specified by memRootPage contains the ** root page number of the index. If memRootPage is negative, then ** the index already exists and must be cleared before being refilled and ** the root page number of the index is taken from pIndex->tnum. */ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ Table *pTab = pIndex->pTable; /* The table that is indexed */ - int iTab = pParse->nTab; /* Btree cursor used for pTab */ - int iIdx = pParse->nTab+1; /* Btree cursor used for pIndex */ + int iTab = pParse->nTab++; /* Btree cursor used for pTab */ + int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ int addr1; /* Address of top of loop */ int tnum; /* Root page of index */ Vdbe *v; /* Generate code into this virtual machine */ KeyInfo *pKey; /* KeyInfo for index */ - int iDb = sqlite3SchemaToIndex(pParse->db, pIndex->pSchema); + int regIdxKey; /* Registers containing the index key */ + int regRecord; /* Register holding assemblied index record */ + sqlite3 *db = pParse->db; /* The database connection */ + int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, - pParse->db->aDb[iDb].zName ) ){ + db->aDb[iDb].zName ) ){ return; } #endif @@ -2190,34 +2286,46 @@ v = sqlite3GetVdbe(pParse); if( v==0 ) return; if( memRootPage>=0 ){ - sqlite3VdbeAddOp(v, OP_MemLoad, memRootPage, 0); - tnum = 0; + tnum = memRootPage; }else{ tnum = pIndex->tnum; - sqlite3VdbeAddOp(v, OP_Clear, tnum, iDb); + sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb); } - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); pKey = sqlite3IndexKeyinfo(pParse, pIndex); - sqlite3VdbeOp3(v, OP_OpenWrite, iIdx, tnum, (char *)pKey, P3_KEYINFO_HANDOFF); + sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, + (char *)pKey, P4_KEYINFO_HANDOFF); + if( memRootPage>=0 ){ + sqlite3VdbeChangeP5(v, 1); + } sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); - addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iTab, 0); - sqlite3GenerateIndexKey(v, pIndex, iTab); + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); + regRecord = sqlite3GetTempReg(pParse); + regIdxKey = sqlite3GenerateIndexKey(pParse, pIndex, iTab, regRecord, 1); if( pIndex->onError!=OE_None ){ - int curaddr = sqlite3VdbeCurrentAddr(v); - int addr2 = curaddr+4; - sqlite3VdbeChangeP2(v, curaddr-1, addr2); - sqlite3VdbeAddOp(v, OP_Rowid, iTab, 0); - sqlite3VdbeAddOp(v, OP_AddImm, 1, 0); - sqlite3VdbeAddOp(v, OP_IsUnique, iIdx, addr2); - sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, OE_Abort, - "indexed columns are not unique", P3_STATIC); - assert( sqlite3MallocFailed() || addr2==sqlite3VdbeCurrentAddr(v) ); - } - sqlite3VdbeAddOp(v, OP_IdxInsert, iIdx, 0); - sqlite3VdbeAddOp(v, OP_Next, iTab, addr1+1); + const int regRowid = regIdxKey + pIndex->nColumn; + const int j2 = sqlite3VdbeCurrentAddr(v) + 2; + void * const pRegKey = SQLITE_INT_TO_PTR(regIdxKey); + + /* The registers accessed by the OP_IsUnique opcode were allocated + ** using sqlite3GetTempRange() inside of the sqlite3GenerateIndexKey() + ** call above. Just before that function was freed they were released + ** (made available to the compiler for reuse) using + ** sqlite3ReleaseTempRange(). So in some ways having the OP_IsUnique + ** opcode use the values stored within seems dangerous. However, since + ** we can be sure that no other temp registers have been allocated + ** since sqlite3ReleaseTempRange() was called, it is safe to do so. + */ + sqlite3VdbeAddOp4(v, OP_IsUnique, iIdx, j2, regRowid, pRegKey, P4_INT32); + sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CONSTRAINT, OE_Abort, 0, + "indexed columns are not unique", P4_STATIC); + } + sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdx, regRecord); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp(v, OP_Close, iTab, 0); - sqlite3VdbeAddOp(v, OP_Close, iIdx, 0); + sqlite3VdbeAddOp1(v, OP_Close, iTab); + sqlite3VdbeAddOp1(v, OP_Close, iIdx); } /* @@ -2261,7 +2369,12 @@ int nExtra = 0; char *zExtra; - if( pParse->nErr || sqlite3MallocFailed() || IN_DECLARE_VTAB ){ + assert( pStart==0 || pEnd!=0 ); /* pEnd must be non-NULL if pStart is */ + assert( pParse->nErr==0 ); /* Never called with prior errors */ + if( db->mallocFailed || IN_DECLARE_VTAB ){ + goto exit_create_index; + } + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_create_index; } @@ -2280,11 +2393,14 @@ #ifndef SQLITE_OMIT_TEMPDB /* If the index name was unqualified, check if the the table - ** is a temp table. If so, set the database to 1. + ** is a temp table. If so, set the database to 1. Do not do this + ** if initialising a database schema. */ - pTab = sqlite3SrcListLookup(pParse, pTblName); - if( pName2 && pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ - iDb = 1; + if( !db->init.busy ){ + pTab = sqlite3SrcListLookup(pParse, pTblName); + if( pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ + iDb = 1; + } } #endif @@ -2295,9 +2411,9 @@ ** sqlite3FixSrcList can never fail. */ assert(0); } - pTab = sqlite3LocateTable(pParse, pTblName->a[0].zName, + pTab = sqlite3LocateTable(pParse, 0, pTblName->a[0].zName, pTblName->a[0].zDatabase); - if( !pTab ) goto exit_create_index; + if( !pTab || db->mallocFailed ) goto exit_create_index; assert( db->aDb[iDb].pSchema==pTab->pSchema ); }else{ assert( pName==0 ); @@ -2307,8 +2423,10 @@ } pDb = &db->aDb[iDb]; - if( pTab==0 || pParse->nErr ) goto exit_create_index; - if( pTab->readOnly ){ + assert( pTab!=0 ); + assert( pParse->nErr==0 ); + if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 + && memcmp(&pTab->zName[7],"altertab_",9)!=0 ){ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); goto exit_create_index; } @@ -2339,14 +2457,12 @@ ** own name. */ if( pName ){ - zName = sqlite3NameFromToken(pName); - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto exit_create_index; + zName = sqlite3NameFromToken(db, pName); if( zName==0 ) goto exit_create_index; if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto exit_create_index; } if( !db->init.busy ){ - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto exit_create_index; if( sqlite3FindTable(db, zName, 0)!=0 ){ sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); goto exit_create_index; @@ -2359,14 +2475,13 @@ goto exit_create_index; } }else{ - char zBuf[30]; int n; Index *pLoop; for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){} - sqlite3_snprintf(sizeof(zBuf),zBuf,"_%d",n); - zName = 0; - sqlite3SetString(&zName, "sqlite_autoindex_", pTab->zName, zBuf, (char*)0); - if( zName==0 ) goto exit_create_index; + zName = sqlite3MPrintf(db, "sqlite_autoindex_%s_%d", pTab->zName, n); + if( zName==0 ){ + goto exit_create_index; + } } /* Check for authorization to create an index. @@ -2390,11 +2505,12 @@ ** So create a fake list to simulate this. */ if( pList==0 ){ - nullId.z = (u8*)pTab->aCol[pTab->nCol-1].zName; - nullId.n = strlen((char*)nullId.z); - pList = sqlite3ExprListAppend(0, 0, &nullId); + nullId.z = pTab->aCol[pTab->nCol-1].zName; + nullId.n = sqlite3Strlen30((char*)nullId.z); + pList = sqlite3ExprListAppend(pParse, 0, 0); if( pList==0 ) goto exit_create_index; - pList->a[0].sortOrder = sortOrder; + sqlite3ExprListSetName(pParse, pList, &nullId, 0); + pList->a[0].sortOrder = (u8)sortOrder; } /* Figure out how many bytes of space are required to store explicitly @@ -2403,16 +2519,21 @@ for(i=0; inExpr; i++){ Expr *pExpr = pList->a[i].pExpr; if( pExpr ){ - nExtra += (1 + strlen(pExpr->pColl->zName)); + CollSeq *pColl = pExpr->pColl; + /* Either pColl!=0 or there was an OOM failure. But if an OOM + ** failure we have quit before reaching this point. */ + if( ALWAYS(pColl) ){ + nExtra += (1 + sqlite3Strlen30(pColl->zName)); + } } } /* ** Allocate the index structure. */ - nName = strlen(zName); + nName = sqlite3Strlen30(zName); nCol = pList->nExpr; - pIndex = sqliteMalloc( + pIndex = sqlite3DbMallocZero(db, sizeof(Index) + /* Index structure */ sizeof(int)*nCol + /* Index.aiColumn */ sizeof(int)*(nCol+1) + /* Index.aiRowEst */ @@ -2421,7 +2542,9 @@ nName + 1 + /* Index.zName */ nExtra /* Collation sequence names */ ); - if( sqlite3MallocFailed() ) goto exit_create_index; + if( db->mallocFailed ){ + goto exit_create_index; + } pIndex->azColl = (char**)(&pIndex[1]); pIndex->aiColumn = (int *)(&pIndex->azColl[nCol]); pIndex->aiRowEst = (unsigned *)(&pIndex->aiColumn[nCol]); @@ -2431,8 +2554,8 @@ memcpy(pIndex->zName, zName, nName+1); pIndex->pTable = pTab; pIndex->nColumn = pList->nExpr; - pIndex->onError = onError; - pIndex->autoIndex = pName==0; + pIndex->onError = (u8)onError; + pIndex->autoIndex = (u8)(pName==0); pIndex->pSchema = db->aDb[iDb].pSchema; /* Check to see if we should honor DESC requests on index columns @@ -2446,6 +2569,12 @@ /* Scan the names of the columns of the table to be indexed and ** load the column indices into the Index structure. Report an error ** if any column is not found. + ** + ** TODO: Add a test to make sure that the same column is not named + ** more than once within the same index. Only the first instance of + ** the column will ever be used by the optimizer. Note that using the + ** same column more than once cannot be an error because that would + ** break backwards compatibility - it needs to be a warning. */ for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){ const char *zColName = pListItem->zName; @@ -2461,30 +2590,33 @@ pTab->zName, zColName); goto exit_create_index; } - /* TODO: Add a test to make sure that the same column is not named - ** more than once within the same index. Only the first instance of - ** the column will ever be used by the optimizer. Note that using the - ** same column more than once cannot be an error because that would - ** break backwards compatibility - it needs to be a warning. - */ pIndex->aiColumn[i] = j; - if( pListItem->pExpr ){ - assert( pListItem->pExpr->pColl ); + /* Justification of the ALWAYS(pListItem->pExpr->pColl): Because of + ** the way the "idxlist" non-terminal is constructed by the parser, + ** if pListItem->pExpr is not null then either pListItem->pExpr->pColl + ** must exist or else there must have been an OOM error. But if there + ** was an OOM error, we would never reach this point. */ + if( pListItem->pExpr && ALWAYS(pListItem->pExpr->pColl) ){ + int nColl; + zColl = pListItem->pExpr->pColl->zName; + nColl = sqlite3Strlen30(zColl) + 1; + assert( nExtra>=nColl ); + memcpy(zExtra, zColl, nColl); zColl = zExtra; - sqlite3_snprintf(nExtra, zExtra, "%s", pListItem->pExpr->pColl->zName); - zExtra += (strlen(zColl) + 1); + zExtra += nColl; + nExtra -= nColl; }else{ zColl = pTab->aCol[j].zColl; if( !zColl ){ zColl = db->pDfltColl->zName; } } - if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl, -1) ){ + if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){ goto exit_create_index; } pIndex->azColl[i] = zColl; requestedSortOrder = pListItem->sortOrder & sortOrderMask; - pIndex->aSortOrder[i] = requestedSortOrder; + pIndex->aSortOrder[i] = (u8)requestedSortOrder; } sqlite3DefaultRowEst(pIndex); @@ -2501,6 +2633,14 @@ ** so, don't bother creating this one. This only applies to ** automatically created indices. Users can do as they wish with ** explicit indices. + ** + ** Two UNIQUE or PRIMARY KEY constraints are considered equivalent + ** (and thus suppressing the second one) even if they have different + ** sort orders. + ** + ** If there are different collating sequences or if the columns of + ** the constraint occur in different orders, then the constraints are + ** considered distinct and both result in separate indices. */ Index *pIdx; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ @@ -2511,10 +2651,11 @@ if( pIdx->nColumn!=pIndex->nColumn ) continue; for(k=0; knColumn; k++){ - const char *z1 = pIdx->azColl[k]; - const char *z2 = pIndex->azColl[k]; + const char *z1; + const char *z2; if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break; - if( pIdx->aSortOrder[k]!=pIndex->aSortOrder[k] ) break; + z1 = pIdx->azColl[k]; + z2 = pIndex->azColl[k]; if( z1!=z2 && sqlite3StrICmp(z1, z2) ) break; } if( k==pIdx->nColumn ){ @@ -2545,9 +2686,11 @@ if( db->init.busy ){ Index *p; p = sqlite3HashInsert(&pIndex->pSchema->idxHash, - pIndex->zName, strlen(pIndex->zName)+1, pIndex); + pIndex->zName, sqlite3Strlen30(pIndex->zName), + pIndex); if( p ){ assert( p==pIndex ); /* Malloc must have failed */ + db->mallocFailed = 1; goto exit_create_index; } db->flags |= SQLITE_InternChanges; @@ -2571,10 +2714,10 @@ ** has just been created, it contains no data and the index initialization ** step can be skipped. */ - else if( db->init.busy==0 ){ + else{ /* if( db->init.busy==0 ) */ Vdbe *v; char *zStmt; - int iMem = pParse->nMem++; + int iMem = ++pParse->nMem; v = sqlite3GetVdbe(pParse); if( v==0 ) goto exit_create_index; @@ -2583,15 +2726,15 @@ /* Create the rootpage for the index */ sqlite3BeginWriteOperation(pParse, 1, iDb); - sqlite3VdbeAddOp(v, OP_CreateIndex, iDb, 0); - sqlite3VdbeAddOp(v, OP_MemStore, iMem, 0); + sqlite3VdbeAddOp2(v, OP_CreateIndex, iDb, iMem); /* Gather the complete text of the CREATE INDEX statement into ** the zStmt variable */ - if( pStart && pEnd ){ + if( pStart ){ + assert( pEnd!=0 ); /* A named index with an explicit CREATE INDEX statement */ - zStmt = sqlite3MPrintf("CREATE%s INDEX %.*s", + zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s", onError==OE_None ? "" : " UNIQUE", pEnd->z - pName->z + 1, pName->z); @@ -2604,31 +2747,32 @@ /* Add an entry in sqlite_master for this index */ sqlite3NestedParse(pParse, - "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#0,%Q);", + "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName, pTab->zName, + iMem, zStmt ); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqliteFree(zStmt); + sqlite3DbFree(db, zStmt); /* Fill the index with data and reparse the schema. Code an OP_Expire ** to invalidate all pre-compiled statements. */ if( pTblName ){ sqlite3RefillIndex(pParse, pIndex, iMem); - sqlite3ChangeCookie(db, v, iDb); - sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, - sqlite3MPrintf("name='%q'", pIndex->zName), P3_DYNAMIC); - sqlite3VdbeAddOp(v, OP_Expire, 0, 0); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddOp4(v, OP_ParseSchema, iDb, 0, 0, + sqlite3MPrintf(db, "name='%q'", pIndex->zName), P4_DYNAMIC); + sqlite3VdbeAddOp1(v, OP_Expire, 0); } } /* When adding an index to the list of indices for a table, make ** sure all indices labeled OE_Replace come after all those labeled - ** OE_Ignore. This is necessary for the correct operation of UPDATE - ** and INSERT. + ** OE_Ignore. This is necessary for the correct constraint check + ** processing (in sqlite3GenerateConstraintChecks()) as part of + ** UPDATE and INSERT statements. */ if( db->init.busy || pTblName==0 ){ if( onError!=OE_Replace || pTab->pIndex==0 @@ -2649,31 +2793,16 @@ /* Clean up before exiting */ exit_create_index: if( pIndex ){ - freeIndex(pIndex); + sqlite3_free(pIndex->zColAff); + sqlite3DbFree(db, pIndex); } - sqlite3ExprListDelete(pList); - sqlite3SrcListDelete(pTblName); - sqliteFree(zName); + sqlite3ExprListDelete(db, pList); + sqlite3SrcListDelete(db, pTblName); + sqlite3DbFree(db, zName); return; } /* -** Generate code to make sure the file format number is at least minFormat. -** The generated code will increase the file format number if necessary. -*/ -void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minFormat){ - Vdbe *v; - v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 1); - sqlite3VdbeAddOp(v, OP_Integer, minFormat, 0); - sqlite3VdbeAddOp(v, OP_Ge, 0, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeAddOp(v, OP_Integer, minFormat, 0); - sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 1); - } -} - -/* ** Fill the Index.aiRowEst[] array with default information - information ** to be used when we have not run the ANALYZE command. ** @@ -2718,7 +2847,8 @@ sqlite3 *db = pParse->db; int iDb; - if( pParse->nErr || sqlite3MallocFailed() ){ + assert( pParse->nErr==0 ); /* Never called with prior errors */ + if( db->mallocFailed ){ goto exit_drop_index; } assert( pName->nSrc==1 ); @@ -2758,18 +2888,25 @@ /* Generate code to remove the index and from the master table */ v = sqlite3GetVdbe(pParse); if( v ){ + sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName ); - sqlite3ChangeCookie(db, v, iDb); + if( sqlite3FindTable(db, "sqlite_stat1", db->aDb[iDb].zName) ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.sqlite_stat1 WHERE idx=%Q", + db->aDb[iDb].zName, pIndex->zName + ); + } + sqlite3ChangeCookie(pParse, iDb); destroyRootPage(pParse, pIndex->tnum, iDb); - sqlite3VdbeOp3(v, OP_DropIndex, iDb, 0, pIndex->zName, 0); + sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0); } exit_drop_index: - sqlite3SrcListDelete(pName); + sqlite3SrcListDelete(db, pName); } /* @@ -2788,6 +2925,7 @@ ** pointer if the array was resized. */ void *sqlite3ArrayAllocate( + sqlite3 *db, /* Connection to notify of malloc failures */ void *pArray, /* Array of objects. Might be reallocated */ int szEntry, /* Size of each object in the array */ int initSize, /* Suggested initial allocation, in elements */ @@ -2800,12 +2938,12 @@ void *pNew; int newSize; newSize = (*pnAlloc)*2 + initSize; - pNew = sqliteRealloc(pArray, newSize*szEntry); + pNew = sqlite3DbRealloc(db, pArray, newSize*szEntry); if( pNew==0 ){ *pIdx = -1; return pArray; } - *pnAlloc = newSize; + *pnAlloc = sqlite3DbMallocSize(db, pNew)/szEntry; pArray = pNew; } z = (char*)pArray; @@ -2821,14 +2959,15 @@ ** ** A new IdList is returned, or NULL if malloc() fails. */ -IdList *sqlite3IdListAppend(IdList *pList, Token *pToken){ +IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){ int i; if( pList==0 ){ - pList = sqliteMalloc( sizeof(IdList) ); + pList = sqlite3DbMallocZero(db, sizeof(IdList) ); if( pList==0 ) return 0; pList->nAlloc = 0; } pList->a = sqlite3ArrayAllocate( + db, pList->a, sizeof(pList->a[0]), 5, @@ -2837,24 +2976,24 @@ &i ); if( i<0 ){ - sqlite3IdListDelete(pList); + sqlite3IdListDelete(db, pList); return 0; } - pList->a[i].zName = sqlite3NameFromToken(pToken); + pList->a[i].zName = sqlite3NameFromToken(db, pToken); return pList; } /* ** Delete an IdList. */ -void sqlite3IdListDelete(IdList *pList){ +void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; if( pList==0 ) return; for(i=0; inId; i++){ - sqliteFree(pList->a[i].zName); + sqlite3DbFree(db, pList->a[i].zName); } - sqliteFree(pList->a); - sqliteFree(pList); + sqlite3DbFree(db, pList->a); + sqlite3DbFree(db, pList); } /* @@ -2871,10 +3010,80 @@ } /* +** Expand the space allocated for the given SrcList object by +** creating nExtra new slots beginning at iStart. iStart is zero based. +** New slots are zeroed. +** +** For example, suppose a SrcList initially contains two entries: A,B. +** To append 3 new entries onto the end, do this: +** +** sqlite3SrcListEnlarge(db, pSrclist, 3, 2); +** +** After the call above it would contain: A, B, nil, nil, nil. +** If the iStart argument had been 1 instead of 2, then the result +** would have been: A, nil, nil, nil, B. To prepend the new slots, +** the iStart value would be 0. The result then would +** be: nil, nil, nil, A, B. +** +** If a memory allocation fails the SrcList is unchanged. The +** db->mallocFailed flag will be set to true. +*/ +SrcList *sqlite3SrcListEnlarge( + sqlite3 *db, /* Database connection to notify of OOM errors */ + SrcList *pSrc, /* The SrcList to be enlarged */ + int nExtra, /* Number of new slots to add to pSrc->a[] */ + int iStart /* Index in pSrc->a[] of first new slot */ +){ + int i; + + /* Sanity checking on calling parameters */ + assert( iStart>=0 ); + assert( nExtra>=1 ); + assert( pSrc!=0 ); + assert( iStart<=pSrc->nSrc ); + + /* Allocate additional space if needed */ + if( pSrc->nSrc+nExtra>pSrc->nAlloc ){ + SrcList *pNew; + int nAlloc = pSrc->nSrc+nExtra; + int nGot; + pNew = sqlite3DbRealloc(db, pSrc, + sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); + if( pNew==0 ){ + assert( db->mallocFailed ); + return pSrc; + } + pSrc = pNew; + nGot = (sqlite3DbMallocSize(db, pNew) - sizeof(*pSrc))/sizeof(pSrc->a[0])+1; + pSrc->nAlloc = (u16)nGot; + } + + /* Move existing slots that come after the newly inserted slots + ** out of the way */ + for(i=pSrc->nSrc-1; i>=iStart; i--){ + pSrc->a[i+nExtra] = pSrc->a[i]; + } + pSrc->nSrc += (i16)nExtra; + + /* Zero the newly allocated slots */ + memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra); + for(i=iStart; ia[i].iCursor = -1; + } + + /* Return a pointer to the enlarged SrcList */ + return pSrc; +} + + +/* ** Append a new table name to the given SrcList. Create a new SrcList if -** need be. A new entry is created in the SrcList even if pToken is NULL. +** need be. A new entry is created in the SrcList even if pTable is NULL. ** -** A new SrcList is returned, or NULL if malloc() fails. +** A SrcList is returned, or NULL if there is an OOM error. The returned +** SrcList might be the same as the SrcList that was input or it might be +** a new one. If an OOM error does occurs, then the prior value of pList +** that is input to this routine is automatically freed. ** ** If pDatabase is not null, it means that the table has an optional ** database name prefix. Like this: "database.table". The pDatabase @@ -2886,58 +3095,60 @@ ** ** In other words, if call like this: ** -** sqlite3SrcListAppend(A,B,0); +** sqlite3SrcListAppend(D,A,B,0); ** ** Then B is a table name and the database name is unspecified. If called ** like this: ** -** sqlite3SrcListAppend(A,B,C); +** sqlite3SrcListAppend(D,A,B,C); +** +** Then C is the table name and B is the database name. If C is defined +** then so is B. In other words, we never have a case where: ** -** Then C is the table name and B is the database name. +** sqlite3SrcListAppend(D,A,0,C); +** +** Both pTable and pDatabase are assumed to be quoted. They are dequoted +** before being added to the SrcList. */ -SrcList *sqlite3SrcListAppend(SrcList *pList, Token *pTable, Token *pDatabase){ +SrcList *sqlite3SrcListAppend( + sqlite3 *db, /* Connection to notify of malloc failures */ + SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */ + Token *pTable, /* Table to append */ + Token *pDatabase /* Database of the table */ +){ struct SrcList_item *pItem; + assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ if( pList==0 ){ - pList = sqliteMalloc( sizeof(SrcList) ); + pList = sqlite3DbMallocZero(db, sizeof(SrcList) ); if( pList==0 ) return 0; pList->nAlloc = 1; } - if( pList->nSrc>=pList->nAlloc ){ - SrcList *pNew; - pList->nAlloc *= 2; - pNew = sqliteRealloc(pList, - sizeof(*pList) + (pList->nAlloc-1)*sizeof(pList->a[0]) ); - if( pNew==0 ){ - sqlite3SrcListDelete(pList); - return 0; - } - pList = pNew; + pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); + if( db->mallocFailed ){ + sqlite3SrcListDelete(db, pList); + return 0; } - pItem = &pList->a[pList->nSrc]; - memset(pItem, 0, sizeof(pList->a[0])); + pItem = &pList->a[pList->nSrc-1]; if( pDatabase && pDatabase->z==0 ){ pDatabase = 0; } - if( pDatabase && pTable ){ + if( pDatabase ){ Token *pTemp = pDatabase; pDatabase = pTable; pTable = pTemp; } - pItem->zName = sqlite3NameFromToken(pTable); - pItem->zDatabase = sqlite3NameFromToken(pDatabase); - pItem->iCursor = -1; - pItem->isPopulated = 0; - pList->nSrc++; + pItem->zName = sqlite3NameFromToken(db, pTable); + pItem->zDatabase = sqlite3NameFromToken(db, pDatabase); return pList; } /* -** Assign cursors to all tables in a SrcList +** Assign VdbeCursor index numbers to all tables in a SrcList */ void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ int i; struct SrcList_item *pItem; - assert(pList || sqlite3MallocFailed() ); + assert(pList || pParse->db->mallocFailed ); if( pList ){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->iCursor>=0 ) break; @@ -2952,20 +3163,21 @@ /* ** Delete an entire SrcList including all its substructure. */ -void sqlite3SrcListDelete(SrcList *pList){ +void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ int i; struct SrcList_item *pItem; if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - sqliteFree(pItem->zDatabase); - sqliteFree(pItem->zName); - sqliteFree(pItem->zAlias); + sqlite3DbFree(db, pItem->zDatabase); + sqlite3DbFree(db, pItem->zName); + sqlite3DbFree(db, pItem->zAlias); + sqlite3DbFree(db, pItem->zIndex); sqlite3DeleteTable(pItem->pTab); - sqlite3SelectDelete(pItem->pSelect); - sqlite3ExprDelete(pItem->pOn); - sqlite3IdListDelete(pItem->pUsing); + sqlite3SelectDelete(db, pItem->pSelect); + sqlite3ExprDelete(db, pItem->pOn); + sqlite3IdListDelete(db, pItem->pUsing); } - sqliteFree(pList); + sqlite3DbFree(db, pList); } /* @@ -2985,6 +3197,7 @@ ** term added. */ SrcList *sqlite3SrcListAppendFromTerm( + Parse *pParse, /* Parsing context */ SrcList *p, /* The left part of the FROM clause already seen */ Token *pTable, /* Name of the table to add to the FROM clause */ Token *pDatabase, /* Name of the database containing pTable */ @@ -2994,16 +3207,18 @@ IdList *pUsing /* The USING clause of a join */ ){ struct SrcList_item *pItem; - p = sqlite3SrcListAppend(p, pTable, pDatabase); - if( p==0 || p->nSrc==0 ){ - sqlite3ExprDelete(pOn); - sqlite3IdListDelete(pUsing); - sqlite3SelectDelete(pSubquery); + sqlite3 *db = pParse->db; + p = sqlite3SrcListAppend(db, p, pTable, pDatabase); + if( p==0 || NEVER(p->nSrc==0) ){ + sqlite3ExprDelete(db, pOn); + sqlite3IdListDelete(db, pUsing); + sqlite3SelectDelete(db, pSubquery); return p; } pItem = &p->a[p->nSrc-1]; - if( pAlias && pAlias->n ){ - pItem->zAlias = sqlite3NameFromToken(pAlias); + assert( pAlias!=0 ); + if( pAlias->n ){ + pItem->zAlias = sqlite3NameFromToken(db, pAlias); } pItem->pSelect = pSubquery; pItem->pOn = pOn; @@ -3012,6 +3227,25 @@ } /* +** Add an INDEXED BY or NOT INDEXED clause to the most recently added +** element of the source-list passed as the second argument. +*/ +void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ + assert( pIndexedBy!=0 ); + if( p && ALWAYS(p->nSrc>0) ){ + struct SrcList_item *pItem = &p->a[p->nSrc-1]; + assert( pItem->notIndexed==0 && pItem->zIndex==0 ); + if( pIndexedBy->n==1 && !pIndexedBy->z ){ + /* A "NOT INDEXED" clause was supplied. See parse.y + ** construct "indexed_opt" for details. */ + pItem->notIndexed = 1; + }else{ + pItem->zIndex = sqlite3NameFromToken(pParse->db, pIndexedBy); + } + } +} + +/* ** When building up a FROM clause in the parser, the join operator ** is initially attached to the left operand. But the code generator ** expects the join operator to be on the right operand. This routine @@ -3044,18 +3278,22 @@ Vdbe *v; int i; - if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; - if( pParse->nErr || sqlite3MallocFailed() ) return; - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ) return; - + assert( pParse!=0 ); + db = pParse->db; + assert( db!=0 ); +/* if( db->aDb[0].pBt==0 ) return; */ + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){ + return; + } v = sqlite3GetVdbe(pParse); if( !v ) return; if( type!=TK_DEFERRED ){ for(i=0; inDb; i++){ - sqlite3VdbeAddOp(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); + sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); + sqlite3VdbeUsesBtree(v, i); } } - sqlite3VdbeAddOp(v, OP_AutoCommit, 0, 0); + sqlite3VdbeAddOp2(v, OP_AutoCommit, 0, 0); } /* @@ -3065,13 +3303,16 @@ sqlite3 *db; Vdbe *v; - if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; - if( pParse->nErr || sqlite3MallocFailed() ) return; - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ) return; - + assert( pParse!=0 ); + db = pParse->db; + assert( db!=0 ); +/* if( db->aDb[0].pBt==0 ) return; */ + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ){ + return; + } v = sqlite3GetVdbe(pParse); if( v ){ - sqlite3VdbeAddOp(v, OP_AutoCommit, 1, 0); + sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 0); } } @@ -3082,13 +3323,36 @@ sqlite3 *db; Vdbe *v; - if( pParse==0 || (db=pParse->db)==0 || db->aDb[0].pBt==0 ) return; - if( pParse->nErr || sqlite3MallocFailed() ) return; - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ) return; - + assert( pParse!=0 ); + db = pParse->db; + assert( db!=0 ); +/* if( db->aDb[0].pBt==0 ) return; */ + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ){ + return; + } v = sqlite3GetVdbe(pParse); if( v ){ - sqlite3VdbeAddOp(v, OP_AutoCommit, 1, 1); + sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 1); + } +} + +/* +** This function is called by the parser when it parses a command to create, +** release or rollback an SQL savepoint. +*/ +void sqlite3Savepoint(Parse *pParse, int op, Token *pName){ + char *zName = sqlite3NameFromToken(pParse->db, pName); + if( zName ){ + Vdbe *v = sqlite3GetVdbe(pParse); +#ifndef SQLITE_OMIT_AUTHORIZATION + static const char *az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; + assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 ); +#endif + if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){ + sqlite3DbFree(pParse->db, zName); + return; + } + sqlite3VdbeAddOp4(v, OP_Savepoint, op, 0, 0, zName, P4_DYNAMIC); } } @@ -3099,7 +3363,15 @@ int sqlite3OpenTempDatabase(Parse *pParse){ sqlite3 *db = pParse->db; if( db->aDb[1].pBt==0 && !pParse->explain ){ - int rc = sqlite3BtreeFactory(db, 0, 0, SQLITE_DEFAULT_CACHE_SIZE, + int rc; + static const int flags = + SQLITE_OPEN_READWRITE | + SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_TEMP_DB; + + rc = sqlite3BtreeFactory(db, 0, 0, SQLITE_DEFAULT_CACHE_SIZE, flags, &db->aDb[1].pBt); if( rc!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "unable to open a temporary database " @@ -3107,16 +3379,10 @@ pParse->rc = rc; return 1; } - if( db->flags & !db->autoCommit ){ - rc = sqlite3BtreeBeginTrans(db->aDb[1].pBt, 1); - if( rc!=SQLITE_OK ){ - sqlite3ErrorMsg(pParse, "unable to get a write lock on " - "the temporary database file"); - pParse->rc = rc; - return 1; - } - } + assert( (db->flags & SQLITE_InTrans)==0 || db->autoCommit ); assert( db->aDb[1].pSchema ); + sqlite3PagerJournalMode(sqlite3BtreePager(db->aDb[1].pBt), + db->dfltJournalMode); } return 0; } @@ -3152,7 +3418,7 @@ if( v==0 ) return; /* This only happens if there was a prior error */ db = pParse->db; if( pParse->cookieGoto==0 ){ - pParse->cookieGoto = sqlite3VdbeAddOp(v, OP_Goto, 0, 0)+1; + pParse->cookieGoto = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0)+1; } if( iDb>=0 ){ assert( iDbnDb ); @@ -3181,22 +3447,15 @@ ** rollback the whole transaction. For operations where all constraints ** can be checked before any changes are made to the database, it is never ** necessary to undo a write and the checkpoint should not be set. -** -** Only database iDb and the temp database are made writable by this call. -** If iDb==0, then the main and temp databases are made writable. If -** iDb==1 then only the temp database is made writable. If iDb>1 then the -** specified auxiliary database and the temp database are made writable. */ void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ - Vdbe *v = sqlite3GetVdbe(pParse); - if( v==0 ) return; sqlite3CodeVerifySchema(pParse, iDb); pParse->writeMask |= 1<nested==0 ){ - sqlite3VdbeAddOp(v, OP_Statement, iDb, 0); - } - if( (OMIT_TEMPDB || iDb!=1) && pParse->db->aDb[1].pBt!=0 ){ - sqlite3BeginWriteOperation(pParse, setStatement, 1); + /* Every place where this routine is called with setStatement!=0 has + ** already successfully created a VDBE. */ + assert( pParse->pVdbe ); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Statement, iDb); } } @@ -3207,9 +3466,11 @@ #ifndef SQLITE_OMIT_REINDEX static int collationMatch(const char *zColl, Index *pIndex){ int i; + assert( zColl!=0 ); for(i=0; inColumn; i++){ const char *z = pIndex->azColl[i]; - if( z==zColl || (z && zColl && 0==sqlite3StrICmp(z, zColl)) ){ + assert( z!=0 ); + if( 0==sqlite3StrICmp(z, zColl) ){ return 1; } } @@ -3288,34 +3549,35 @@ return; } - if( pName1==0 || pName1->z==0 ){ + if( pName1==0 ){ reindexDatabases(pParse, 0); return; - }else if( pName2==0 || pName2->z==0 ){ + }else if( NEVER(pName2==0) || pName2->z==0 ){ + char *zColl; assert( pName1->z ); - pColl = sqlite3FindCollSeq(db, ENC(db), (char*)pName1->z, pName1->n, 0); + zColl = sqlite3NameFromToken(pParse->db, pName1); + if( !zColl ) return; + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); if( pColl ){ - char *zColl = sqliteStrNDup((const char *)pName1->z, pName1->n); - if( zColl ){ - reindexDatabases(pParse, zColl); - sqliteFree(zColl); - } + reindexDatabases(pParse, zColl); + sqlite3DbFree(db, zColl); return; } + sqlite3DbFree(db, zColl); } iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName); if( iDb<0 ) return; - z = sqlite3NameFromToken(pObjName); + z = sqlite3NameFromToken(db, pObjName); if( z==0 ) return; zDb = db->aDb[iDb].zName; pTab = sqlite3FindTable(db, z, zDb); if( pTab ){ reindexTable(pParse, pTab, 0); - sqliteFree(z); + sqlite3DbFree(db, z); return; } pIndex = sqlite3FindIndex(db, z, zDb); - sqliteFree(z); + sqlite3DbFree(db, z); if( pIndex ){ sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); @@ -3330,7 +3592,7 @@ ** with OP_OpenRead or OP_OpenWrite to access database index pIdx. ** ** If successful, a pointer to the new structure is returned. In this case -** the caller is responsible for calling sqliteFree() on the returned +** the caller is responsible for calling sqlite3DbFree(db, ) on the returned ** pointer. If an error occurs (out of memory or missing collation ** sequence), NULL is returned and the state of pParse updated to reflect ** the error. @@ -3339,22 +3601,24 @@ int i; int nCol = pIdx->nColumn; int nBytes = sizeof(KeyInfo) + (nCol-1)*sizeof(CollSeq*) + nCol; - KeyInfo *pKey = (KeyInfo *)sqliteMalloc(nBytes); + sqlite3 *db = pParse->db; + KeyInfo *pKey = (KeyInfo *)sqlite3DbMallocZero(db, nBytes); if( pKey ){ + pKey->db = pParse->db; pKey->aSortOrder = (u8 *)&(pKey->aColl[nCol]); assert( &pKey->aSortOrder[nCol]==&(((u8 *)pKey)[nBytes]) ); for(i=0; iazColl[i]; assert( zColl ); - pKey->aColl[i] = sqlite3LocateCollSeq(pParse, zColl, -1); + pKey->aColl[i] = sqlite3LocateCollSeq(pParse, zColl); pKey->aSortOrder[i] = pIdx->aSortOrder[i]; } - pKey->nField = nCol; + pKey->nField = (u16)nCol; } if( pParse->nErr ){ - sqliteFree(pKey); + sqlite3DbFree(db, pKey); pKey = 0; } return pKey; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/callback.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/callback.c --- sqlite3-3.4.2/src/callback.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/callback.c 2009-06-25 12:45:57.000000000 +0100 @@ -13,7 +13,7 @@ ** This file contains functions used to access the internal hash tables ** of user defined functions and collation sequences. ** -** $Id: callback.c,v 1.18 2007/05/07 09:32:45 danielk1977 Exp $ +** $Id: callback.c,v 1.42 2009/06/17 00:35:31 drh Exp $ */ #include "sqliteInt.h" @@ -23,20 +23,19 @@ ** in the database text encoding of name zName, length nName. ** If the collation sequence */ -static void callCollNeeded(sqlite3 *db, const char *zName, int nName){ +static void callCollNeeded(sqlite3 *db, const char *zName){ assert( !db->xCollNeeded || !db->xCollNeeded16 ); - if( nName<0 ) nName = strlen(zName); if( db->xCollNeeded ){ - char *zExternal = sqliteStrNDup(zName, nName); + char *zExternal = sqlite3DbStrDup(db, zName); if( !zExternal ) return; db->xCollNeeded(db->pCollNeededArg, db, (int)ENC(db), zExternal); - sqliteFree(zExternal); + sqlite3DbFree(db, zExternal); } #ifndef SQLITE_OMIT_UTF16 if( db->xCollNeeded16 ){ char const *zExternal; - sqlite3_value *pTmp = sqlite3ValueNew(); - sqlite3ValueSetStr(pTmp, nName, zName, SQLITE_UTF8, SQLITE_STATIC); + sqlite3_value *pTmp = sqlite3ValueNew(db); + sqlite3ValueSetStr(pTmp, -1, zName, SQLITE_UTF8, SQLITE_STATIC); zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE); if( zExternal ){ db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal); @@ -56,11 +55,10 @@ static int synthCollSeq(sqlite3 *db, CollSeq *pColl){ CollSeq *pColl2; char *z = pColl->zName; - int n = strlen(z); int i; static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 }; for(i=0; i<3; i++){ - pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, n, 0); + pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, 0); if( pColl2->xCmp!=0 ){ memcpy(pColl, pColl2, sizeof(CollSeq)); pColl->xDel = 0; /* Do not copy the destructor */ @@ -82,25 +80,26 @@ ** The return value is either the collation sequence to be used in database ** db for collation type name zName, length nName, or NULL, if no collation ** sequence can be found. +** +** See also: sqlite3LocateCollSeq(), sqlite3FindCollSeq() */ CollSeq *sqlite3GetCollSeq( - sqlite3* db, - CollSeq *pColl, - const char *zName, - int nName + sqlite3* db, /* The database connection */ + CollSeq *pColl, /* Collating sequence with native encoding, or NULL */ + const char *zName /* Collating sequence name */ ){ CollSeq *p; p = pColl; if( !p ){ - p = sqlite3FindCollSeq(db, ENC(db), zName, nName, 0); + p = sqlite3FindCollSeq(db, ENC(db), zName, 0); } if( !p || !p->xCmp ){ /* No collation sequence of this type for this encoding is registered. ** Call the collation factory to see if it can supply us with one. */ - callCollNeeded(db, zName, nName); - p = sqlite3FindCollSeq(db, ENC(db), zName, nName, 0); + callCollNeeded(db, zName); + p = sqlite3FindCollSeq(db, ENC(db), zName, 0); } if( p && !p->xCmp && synthCollSeq(db, p) ){ p = 0; @@ -123,11 +122,9 @@ int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){ if( pColl ){ const char *zName = pColl->zName; - CollSeq *p = sqlite3GetCollSeq(pParse->db, pColl, zName, -1); + CollSeq *p = sqlite3GetCollSeq(pParse->db, pColl, zName); if( !p ){ - if( pParse->nErr==0 ){ - sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); - } + sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); pParse->nErr++; return SQLITE_ERROR; } @@ -152,17 +149,16 @@ ** each collation sequence structure. */ static CollSeq *findCollSeqEntry( - sqlite3 *db, - const char *zName, - int nName, - int create + sqlite3 *db, /* Database connection */ + const char *zName, /* Name of the collating sequence */ + int create /* Create a new entry if true */ ){ CollSeq *pColl; - if( nName<0 ) nName = strlen(zName); + int nName = sqlite3Strlen30(zName); pColl = sqlite3HashFind(&db->aCollSeq, zName, nName); if( 0==pColl && create ){ - pColl = sqliteMalloc( 3*sizeof(*pColl) + nName + 1 ); + pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1 ); if( pColl ){ CollSeq *pDel = 0; pColl[0].zName = (char*)&pColl[3]; @@ -175,13 +171,14 @@ pColl[0].zName[nName] = 0; pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, nName, pColl); - /* If a malloc() failure occured in sqlite3HashInsert(), it will + /* If a malloc() failure occurred in sqlite3HashInsert(), it will ** return the pColl pointer to be deleted (because it wasn't added ** to the hash table). */ - assert( !pDel || (sqlite3MallocFailed() && pDel==pColl) ); - if( pDel ){ - sqliteFree(pDel); + assert( pDel==0 || pDel==pColl ); + if( pDel!=0 ){ + db->mallocFailed = 1; + sqlite3DbFree(db, pDel); pColl = 0; } } @@ -201,17 +198,18 @@ ** this routine. sqlite3LocateCollSeq() invokes the collation factory ** if necessary and generates an error message if the collating sequence ** cannot be found. +** +** See also: sqlite3LocateCollSeq(), sqlite3GetCollSeq() */ CollSeq *sqlite3FindCollSeq( sqlite3 *db, u8 enc, const char *zName, - int nName, int create ){ CollSeq *pColl; if( zName ){ - pColl = findCollSeqEntry(db, zName, nName, create); + pColl = findCollSeqEntry(db, zName, create); }else{ pColl = db->pDfltColl; } @@ -221,6 +219,91 @@ return pColl; } +/* During the search for the best function definition, this procedure +** is called to test how well the function passed as the first argument +** matches the request for a function with nArg arguments in a system +** that uses encoding enc. The value returned indicates how well the +** request is matched. A higher value indicates a better match. +** +** The returned value is always between 0 and 6, as follows: +** +** 0: Not a match, or if nArg<0 and the function is has no implementation. +** 1: A variable arguments function that prefers UTF-8 when a UTF-16 +** encoding is requested, or vice versa. +** 2: A variable arguments function that uses UTF-16BE when UTF-16LE is +** requested, or vice versa. +** 3: A variable arguments function using the same text encoding. +** 4: A function with the exact number of arguments requested that +** prefers UTF-8 when a UTF-16 encoding is requested, or vice versa. +** 5: A function with the exact number of arguments requested that +** prefers UTF-16LE when UTF-16BE is requested, or vice versa. +** 6: An exact match. +** +*/ +static int matchQuality(FuncDef *p, int nArg, u8 enc){ + int match = 0; + if( p->nArg==-1 || p->nArg==nArg + || (nArg==-1 && (p->xFunc!=0 || p->xStep!=0)) + ){ + match = 1; + if( p->nArg==nArg || nArg==-1 ){ + match = 4; + } + if( enc==p->iPrefEnc ){ + match += 2; + } + else if( (enc==SQLITE_UTF16LE && p->iPrefEnc==SQLITE_UTF16BE) || + (enc==SQLITE_UTF16BE && p->iPrefEnc==SQLITE_UTF16LE) ){ + match += 1; + } + } + return match; +} + +/* +** Search a FuncDefHash for a function with the given name. Return +** a pointer to the matching FuncDef if found, or 0 if there is no match. +*/ +static FuncDef *functionSearch( + FuncDefHash *pHash, /* Hash table to search */ + int h, /* Hash of the name */ + const char *zFunc, /* Name of function */ + int nFunc /* Number of bytes in zFunc */ +){ + FuncDef *p; + for(p=pHash->a[h]; p; p=p->pHash){ + if( sqlite3StrNICmp(p->zName, zFunc, nFunc)==0 && p->zName[nFunc]==0 ){ + return p; + } + } + return 0; +} + +/* +** Insert a new FuncDef into a FuncDefHash hash table. +*/ +void sqlite3FuncDefInsert( + FuncDefHash *pHash, /* The hash table into which to insert */ + FuncDef *pDef /* The function definition to insert */ +){ + FuncDef *pOther; + int nName = sqlite3Strlen30(pDef->zName); + u8 c1 = (u8)pDef->zName[0]; + int h = (sqlite3UpperToLower[c1] + nName) % ArraySize(pHash->a); + pOther = functionSearch(pHash, h, pDef->zName, nName); + if( pOther ){ + assert( pOther!=pDef && pOther->pNext!=pDef ); + pDef->pNext = pOther->pNext; + pOther->pNext = pDef; + }else{ + pDef->pNext = 0; + pDef->pHash = pHash->a[h]; + pHash->a[h] = pDef; + } +} + + + /* ** Locate a user function given a name, a number of arguments and a flag ** indicating whether the function prefers UTF-16 over UTF-8. Return a @@ -250,69 +333,59 @@ int createFlag /* Create new entry if true and does not otherwise exist */ ){ FuncDef *p; /* Iterator variable */ - FuncDef *pFirst; /* First function with this name */ FuncDef *pBest = 0; /* Best match found so far */ - int bestmatch = 0; + int bestScore = 0; /* Score of best match */ + int h; /* Hash value */ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); - if( nArg<-1 ) nArg = -1; + h = (sqlite3UpperToLower[(u8)zName[0]] + nName) % ArraySize(db->aFunc.a); - pFirst = (FuncDef*)sqlite3HashFind(&db->aFunc, zName, nName); - for(p=pFirst; p; p=p->pNext){ - /* During the search for the best function definition, bestmatch is set - ** as follows to indicate the quality of the match with the definition - ** pointed to by pBest: - ** - ** 0: pBest is NULL. No match has been found. - ** 1: A variable arguments function that prefers UTF-8 when a UTF-16 - ** encoding is requested, or vice versa. - ** 2: A variable arguments function that uses UTF-16BE when UTF-16LE is - ** requested, or vice versa. - ** 3: A variable arguments function using the same text encoding. - ** 4: A function with the exact number of arguments requested that - ** prefers UTF-8 when a UTF-16 encoding is requested, or vice versa. - ** 5: A function with the exact number of arguments requested that - ** prefers UTF-16LE when UTF-16BE is requested, or vice versa. - ** 6: An exact match. - ** - ** A larger value of 'matchqual' indicates a more desirable match. - */ - if( p->nArg==-1 || p->nArg==nArg || nArg==-1 ){ - int match = 1; /* Quality of this match */ - if( p->nArg==nArg || nArg==-1 ){ - match = 4; - } - if( enc==p->iPrefEnc ){ - match += 2; - } - else if( (enc==SQLITE_UTF16LE && p->iPrefEnc==SQLITE_UTF16BE) || - (enc==SQLITE_UTF16BE && p->iPrefEnc==SQLITE_UTF16LE) ){ - match += 1; - } + /* First search for a match amongst the application-defined functions. + */ + p = functionSearch(&db->aFunc, h, zName, nName); + while( p ){ + int score = matchQuality(p, nArg, enc); + if( score>bestScore ){ + pBest = p; + bestScore = score; + } + p = p->pNext; + } - if( match>bestmatch ){ + /* If no match is found, search the built-in functions. + ** + ** Except, if createFlag is true, that means that we are trying to + ** install a new function. Whatever FuncDef structure is returned will + ** have fields overwritten with new information appropriate for the + ** new function. But the FuncDefs for built-in functions are read-only. + ** So we must not search for built-ins when creating a new function. + */ + if( !createFlag && !pBest ){ + FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); + p = functionSearch(pHash, h, zName, nName); + while( p ){ + int score = matchQuality(p, nArg, enc); + if( score>bestScore ){ pBest = p; - bestmatch = match; + bestScore = score; } + p = p->pNext; } } - /* If the createFlag parameter is true, and the seach did not reveal an + /* If the createFlag parameter is true and the search did not reveal an ** exact match for the name, number of arguments and encoding, then add a ** new entry to the hash table and return it. */ - if( createFlag && bestmatch<6 && - (pBest = sqliteMalloc(sizeof(*pBest)+nName))!=0 ){ - pBest->nArg = nArg; - pBest->pNext = pFirst; + if( createFlag && (bestScore<6 || pBest->nArg!=nArg) && + (pBest = sqlite3DbMallocZero(db, sizeof(*pBest)+nName+1))!=0 ){ + pBest->zName = (char *)&pBest[1]; + pBest->nArg = (u16)nArg; pBest->iPrefEnc = enc; memcpy(pBest->zName, zName, nName); pBest->zName[nName] = 0; - if( pBest==sqlite3HashInsert(&db->aFunc,pBest->zName,nName,(void*)pBest) ){ - sqliteFree(pBest); - return 0; - } + sqlite3FuncDefInsert(&db->aFunc, pBest); } if( pBest && (pBest->xStep || pBest->xFunc || createFlag) ){ @@ -323,9 +396,11 @@ /* ** Free all resources held by the schema structure. The void* argument points -** at a Schema struct. This function does not call sqliteFree() on the +** at a Schema struct. This function does not call sqlite3DbFree(db, ) on the ** pointer itself, it just cleans up subsiduary resources (i.e. the contents ** of the schema hash tables). +** +** The Schema.cache_size variable is not cleared. */ void sqlite3SchemaFree(void *p){ Hash temp1; @@ -335,16 +410,16 @@ temp1 = pSchema->tblHash; temp2 = pSchema->trigHash; - sqlite3HashInit(&pSchema->trigHash, SQLITE_HASH_STRING, 0); - sqlite3HashClear(&pSchema->aFKey); + sqlite3HashInit(&pSchema->trigHash); sqlite3HashClear(&pSchema->idxHash); for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ - sqlite3DeleteTrigger((Trigger*)sqliteHashData(pElem)); + sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem)); } sqlite3HashClear(&temp2); - sqlite3HashInit(&pSchema->tblHash, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&pSchema->tblHash); for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ Table *pTab = sqliteHashData(pElem); + assert( pTab->dbMem==0 ); sqlite3DeleteTable(pTab); } sqlite3HashClear(&temp1); @@ -356,18 +431,19 @@ ** Find and return the schema associated with a BTree. Create ** a new one if necessary. */ -Schema *sqlite3SchemaGet(Btree *pBt){ +Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ Schema * p; if( pBt ){ - p = (Schema *)sqlite3BtreeSchema(pBt,sizeof(Schema),sqlite3SchemaFree); + p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaFree); }else{ - p = (Schema *)sqliteMalloc(sizeof(Schema)); + p = (Schema *)sqlite3MallocZero(sizeof(Schema)); } - if( p && 0==p->file_format ){ - sqlite3HashInit(&p->tblHash, SQLITE_HASH_STRING, 0); - sqlite3HashInit(&p->idxHash, SQLITE_HASH_STRING, 0); - sqlite3HashInit(&p->trigHash, SQLITE_HASH_STRING, 0); - sqlite3HashInit(&p->aFKey, SQLITE_HASH_STRING, 1); + if( !p ){ + db->mallocFailed = 1; + }else if ( 0==p->file_format ){ + sqlite3HashInit(&p->tblHash); + sqlite3HashInit(&p->idxHash); + sqlite3HashInit(&p->trigHash); p->enc = SQLITE_UTF8; } return p; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/complete.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/complete.c --- sqlite3-3.4.2/src/complete.c 2007-03-29 19:39:32.000000000 +0100 +++ sqlite3-3.6.16/src/complete.c 2009-06-25 12:24:38.000000000 +0100 @@ -16,7 +16,7 @@ ** separating it out, the code will be automatically omitted from ** static links that do not use it. ** -** $Id: complete.c,v 1.3 2006/01/18 15:25:17 danielk1977 Exp $ +** $Id: complete.c,v 1.8 2009/04/28 04:46:42 drh Exp $ */ #include "sqliteInt.h" #ifndef SQLITE_OMIT_COMPLETE @@ -24,8 +24,16 @@ /* ** This is defined in tokenize.c. We just have to import the definition. */ -extern const char sqlite3IsIdChar[]; -#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && sqlite3IsIdChar[c-0x20])) +#ifndef SQLITE_AMALGAMATION +#ifdef SQLITE_ASCII +extern const char sqlite3IsAsciiIdChar[]; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && sqlite3IsAsciiIdChar[c-0x20])) +#endif +#ifdef SQLITE_EBCDIC +extern const char sqlite3IsEbcdicIdChar[]; +#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) +#endif +#endif /* SQLITE_AMALGAMATION */ /* @@ -104,7 +112,7 @@ /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */ /* 0 START: */ { 0, 0, 1, 2, 3, 1, 1, 1, }, /* 1 NORMAL: */ { 0, 1, 1, 1, 1, 1, 1, 1, }, - /* 2 EXPLAIN: */ { 0, 2, 1, 1, 3, 1, 1, 1, }, + /* 2 EXPLAIN: */ { 0, 2, 2, 1, 3, 1, 1, 1, }, /* 3 CREATE: */ { 0, 3, 1, 1, 1, 3, 4, 1, }, /* 4 TRIGGER: */ { 5, 4, 4, 4, 4, 4, 4, 4, }, /* 5 SEMI: */ { 5, 5, 4, 4, 4, 4, 4, 6, }, @@ -248,13 +256,19 @@ int sqlite3_complete16(const void *zSql){ sqlite3_value *pVal; char const *zSql8; - int rc = 0; + int rc = SQLITE_NOMEM; - pVal = sqlite3ValueNew(); +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC); zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8); if( zSql8 ){ rc = sqlite3_complete(zSql8); + }else{ + rc = SQLITE_NOMEM; } sqlite3ValueFree(pVal); return sqlite3ApiExit(0, rc); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/date.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/date.c --- sqlite3-3.4.2/src/date.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/date.c 2009-06-25 12:24:38.000000000 +0100 @@ -16,7 +16,7 @@ ** sqlite3RegisterDateTimeFunctions() found at the bottom of the file. ** All other code has file scope. ** -** $Id: date.c,v 1.66 2007/05/08 21:56:00 drh Exp $ +** $Id: date.c,v 1.107 2009/05/03 20:23:53 drh Exp $ ** ** SQLite processes all times and dates as Julian Day numbers. The ** dates and times are stored as the number of days since noon @@ -46,8 +46,6 @@ ** Richmond, Virginia (USA) */ #include "sqliteInt.h" -#include "os.h" -#include #include #include #include @@ -55,19 +53,36 @@ #ifndef SQLITE_OMIT_DATETIME_FUNCS /* +** On recent Windows platforms, the localtime_s() function is available +** as part of the "Secure CRT". It is essentially equivalent to +** localtime_r() available under most POSIX platforms, except that the +** order of the parameters is reversed. +** +** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx. +** +** If the user has not indicated to use localtime_r() or localtime_s() +** already, check for an MSVC build environment that provides +** localtime_s(). +*/ +#if !defined(HAVE_LOCALTIME_R) && !defined(HAVE_LOCALTIME_S) && \ + defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE) +#define HAVE_LOCALTIME_S 1 +#endif + +/* ** A structure for holding a single date and time. */ typedef struct DateTime DateTime; struct DateTime { - double rJD; /* The julian day number */ - int Y, M, D; /* Year, month, and day */ - int h, m; /* Hour and minutes */ - int tz; /* Timezone offset in minutes */ - double s; /* Seconds */ - char validYMD; /* True if Y,M,D are valid */ - char validHMS; /* True if h,m,s are valid */ - char validJD; /* True if rJD is valid */ - char validTZ; /* True if tz is valid */ + sqlite3_int64 iJD; /* The julian day number times 86400000 */ + int Y, M, D; /* Year, month, and day */ + int h, m; /* Hour and minutes */ + int tz; /* Timezone offset in minutes */ + double s; /* Seconds */ + char validYMD; /* True (1) if Y,M,D are valid */ + char validHMS; /* True (1) if h,m,s are valid */ + char validJD; /* True (1) if iJD is valid */ + char validTZ; /* True (1) if tz is valid */ }; @@ -102,7 +117,7 @@ pVal = va_arg(ap, int*); val = 0; while( N-- ){ - if( !isdigit(*(u8*)zDate) ){ + if( !sqlite3Isdigit(*zDate) ){ goto end_getDigits; } val = val*10 + *zDate - '0'; @@ -132,23 +147,32 @@ ** ** (+/-)HH:MM ** +** Or the "zulu" notation: +** +** Z +** ** If the parse is successful, write the number of minutes -** of change in *pnMin and return 0. If a parser error occurs, -** return 0. +** of change in p->tz and return 0. If a parser error occurs, +** return non-zero. ** ** A missing specifier is not considered an error. */ static int parseTimezone(const char *zDate, DateTime *p){ int sgn = 0; int nHr, nMn; - while( isspace(*(u8*)zDate) ){ zDate++; } + int c; + while( sqlite3Isspace(*zDate) ){ zDate++; } p->tz = 0; - if( *zDate=='-' ){ + c = *zDate; + if( c=='-' ){ sgn = -1; - }else if( *zDate=='+' ){ + }else if( c=='+' ){ sgn = +1; + }else if( c=='Z' || c=='z' ){ + zDate++; + goto zulu_time; }else{ - return *zDate!=0; + return c!=0; } zDate++; if( getDigits(zDate, 2, 0, 14, ':', &nHr, 2, 0, 59, 0, &nMn)!=2 ){ @@ -156,7 +180,8 @@ } zDate += 5; p->tz = sgn*(nMn + nHr*60); - while( isspace(*(u8*)zDate) ){ zDate++; } +zulu_time: + while( sqlite3Isspace(*zDate) ){ zDate++; } return *zDate!=0; } @@ -180,10 +205,10 @@ return 1; } zDate += 2; - if( *zDate=='.' && isdigit((u8)zDate[1]) ){ + if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){ double rScale = 1.0; zDate++; - while( isdigit(*(u8*)zDate) ){ + while( sqlite3Isdigit(*zDate) ){ ms = ms*10.0 + *zDate - '0'; rScale *= 10.0; zDate++; @@ -199,7 +224,7 @@ p->m = m; p->s = s + ms; if( parseTimezone(zDate, p) ) return 1; - p->validTZ = p->tz!=0; + p->validTZ = (p->tz!=0)?1:0; return 0; } @@ -228,14 +253,14 @@ } A = Y/100; B = 2 - A + (A/4); - X1 = 365.25*(Y+4716); - X2 = 30.6001*(M+1); - p->rJD = X1 + X2 + D + B - 1524.5; + X1 = 36525*(Y+4716)/100; + X2 = 306001*(M+1)/10000; + p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); p->validJD = 1; if( p->validHMS ){ - p->rJD += (p->h*3600.0 + p->m*60.0 + p->s)/86400.0; + p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000); if( p->validTZ ){ - p->rJD -= p->tz*60/86400.0; + p->iJD -= p->tz*60000; p->validYMD = 0; p->validHMS = 0; p->validTZ = 0; @@ -268,7 +293,7 @@ return 1; } zDate += 10; - while( isspace(*(u8*)zDate) || 'T'==*(u8*)zDate ){ zDate++; } + while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; } if( parseHhMmSs(zDate, p)==0 ){ /* We got the time */ }else if( *zDate==0 ){ @@ -288,6 +313,17 @@ } /* +** Set the time to the current time reported by the VFS +*/ +static void setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ + double r; + sqlite3 *db = sqlite3_context_db_handle(context); + sqlite3OsCurrentTime(db->pVfs, &r); + p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); + p->validJD = 1; +} + +/* ** Attempt to parse the given string into a Julian Day Number. Return ** the number of errors. ** @@ -303,20 +339,23 @@ ** as there is a time string. The time string can be omitted as long ** as there is a year and date. */ -static int parseDateOrTime(const char *zDate, DateTime *p){ - memset(p, 0, sizeof(*p)); +static int parseDateOrTime( + sqlite3_context *context, + const char *zDate, + DateTime *p +){ + int isRealNum; /* Return from sqlite3IsNumber(). Not used */ if( parseYyyyMmDd(zDate,p)==0 ){ return 0; }else if( parseHhMmSs(zDate, p)==0 ){ return 0; }else if( sqlite3StrICmp(zDate,"now")==0){ - double r; - sqlite3OsCurrentTime(&r); - p->rJD = r; - p->validJD = 1; + setDateTimeToCurrent(context, p); return 0; - }else if( sqlite3IsNumber(zDate, 0, SQLITE_UTF8) ){ - getValue(zDate, &p->rJD); + }else if( sqlite3IsNumber(zDate, &isRealNum, SQLITE_UTF8) ){ + double r; + getValue(zDate, &r); + p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); p->validJD = 1; return 0; } @@ -334,14 +373,14 @@ p->M = 1; p->D = 1; }else{ - Z = p->rJD + 0.5; - A = (Z - 1867216.25)/36524.25; + Z = (int)((p->iJD + 43200000)/86400000); + A = (int)((Z - 1867216.25)/36524.25); A = Z + 1 + A - (A/4); B = A + 1524; - C = (B - 122.1)/365.25; - D = 365.25*C; - E = (B-D)/30.6001; - X1 = 30.6001*E; + C = (int)((B - 122.1)/365.25); + D = (36525*C)/100; + E = (int)((B-D)/30.6001); + X1 = (int)(30.6001*E); p->D = B - D - X1; p->M = E<14 ? E-1 : E-13; p->Y = p->M>2 ? C - 4716 : C - 4715; @@ -353,13 +392,12 @@ ** Compute the Hour, Minute, and Seconds from the julian day number. */ static void computeHMS(DateTime *p){ - int Z, s; + int s; if( p->validHMS ) return; computeJD(p); - Z = p->rJD + 0.5; - s = (p->rJD + 0.5 - Z)*86400000.0 + 0.5; - p->s = 0.001*s; - s = p->s; + s = (int)((p->iJD + 43200000) % 86400000); + p->s = s/1000.0; + s = (int)p->s; p->s -= s; p->h = s/3600; s -= p->h*3600; @@ -385,11 +423,13 @@ p->validTZ = 0; } +#ifndef SQLITE_OMIT_LOCALTIME /* -** Compute the difference (in days) between localtime and UTC (a.k.a. GMT) +** Compute the difference (in milliseconds) +** between localtime and UTC (a.k.a. GMT) ** for the time value p where p is in UTC. */ -static double localtimeOffset(DateTime *p){ +static sqlite3_int64 localtimeOffset(DateTime *p){ DateTime x, y; time_t t; x = *p; @@ -402,13 +442,13 @@ x.m = 0; x.s = 0.0; } else { - int s = x.s + 0.5; + int s = (int)(x.s + 0.5); x.s = s; } x.tz = 0; x.validJD = 0; computeJD(&x); - t = (x.rJD-2440587.5)*86400.0 + 0.5; + t = x.iJD/1000 - 21086676*(i64)10000; #ifdef HAVE_LOCALTIME_R { struct tm sLocal; @@ -420,10 +460,21 @@ y.m = sLocal.tm_min; y.s = sLocal.tm_sec; } +#elif defined(HAVE_LOCALTIME_S) + { + struct tm sLocal; + localtime_s(&sLocal, &t); + y.Y = sLocal.tm_year + 1900; + y.M = sLocal.tm_mon + 1; + y.D = sLocal.tm_mday; + y.h = sLocal.tm_hour; + y.m = sLocal.tm_min; + y.s = sLocal.tm_sec; + } #else { struct tm *pTm; - sqlite3OsEnterMutex(); + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); pTm = localtime(&t); y.Y = pTm->tm_year + 1900; y.M = pTm->tm_mon + 1; @@ -431,7 +482,7 @@ y.h = pTm->tm_hour; y.m = pTm->tm_min; y.s = pTm->tm_sec; - sqlite3OsLeaveMutex(); + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); } #endif y.validYMD = 1; @@ -439,8 +490,9 @@ y.validJD = 0; y.validTZ = 0; computeJD(&y); - return y.rJD - x.rJD; + return y.iJD - x.iJD; } +#endif /* SQLITE_OMIT_LOCALTIME */ /* ** Process a modifier to a date-time stamp. The modifiers are @@ -469,11 +521,12 @@ double r; char *z, zBuf[30]; z = zBuf; - for(n=0; nrJD += localtimeOffset(p); + p->iJD += localtimeOffset(p); clearYMD_HMS_TZ(p); rc = 0; } break; } +#endif case 'u': { /* ** unixepoch ** - ** Treat the current value of p->rJD as the number of + ** Treat the current value of p->iJD as the number of ** seconds since 1970. Convert to a real julian day number. */ if( strcmp(z, "unixepoch")==0 && p->validJD ){ - p->rJD = p->rJD/86400.0 + 2440587.5; + p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000; clearYMD_HMS_TZ(p); rc = 0; - }else if( strcmp(z, "utc")==0 ){ - double c1; + } +#ifndef SQLITE_OMIT_LOCALTIME + else if( strcmp(z, "utc")==0 ){ + sqlite3_int64 c1; computeJD(p); c1 = localtimeOffset(p); - p->rJD -= c1; + p->iJD -= c1; clearYMD_HMS_TZ(p); - p->rJD += c1 - localtimeOffset(p); + p->iJD += c1 - localtimeOffset(p); rc = 0; } +#endif break; } case 'w': { @@ -519,16 +576,15 @@ ** date is already on the appropriate weekday, this is a no-op. */ if( strncmp(z, "weekday ", 8)==0 && getValue(&z[8],&r)>0 - && (n=r)==r && n>=0 && r<7 ){ - int Z; + && (n=(int)r)==r && n>=0 && r<7 ){ + sqlite3_int64 Z; computeYMD_HMS(p); p->validTZ = 0; p->validJD = 0; computeJD(p); - Z = p->rJD + 1.5; - Z %= 7; + Z = ((p->iJD + 129600000)/86400000) % 7; if( Z>n ) Z -= 7; - p->rJD += n - Z; + p->iJD += (n - Z)*86400000; clearYMD_HMS_TZ(p); rc = 0; } @@ -574,6 +630,7 @@ case '7': case '8': case '9': { + double rRounder; n = getValue(z, &r); assert( n>=1 ); if( z[n]==':' ){ @@ -584,54 +641,59 @@ */ const char *z2 = z; DateTime tx; - int day; - if( !isdigit(*(u8*)z2) ) z2++; + sqlite3_int64 day; + if( !sqlite3Isdigit(*z2) ) z2++; memset(&tx, 0, sizeof(tx)); if( parseHhMmSs(z2, &tx) ) break; computeJD(&tx); - tx.rJD -= 0.5; - day = (int)tx.rJD; - tx.rJD -= day; - if( z[0]=='-' ) tx.rJD = -tx.rJD; + tx.iJD -= 43200000; + day = tx.iJD/86400000; + tx.iJD -= day*86400000; + if( z[0]=='-' ) tx.iJD = -tx.iJD; computeJD(p); clearYMD_HMS_TZ(p); - p->rJD += tx.rJD; + p->iJD += tx.iJD; rc = 0; break; } z += n; - while( isspace(*(u8*)z) ) z++; - n = strlen(z); + while( sqlite3Isspace(*z) ) z++; + n = sqlite3Strlen30(z); if( n>10 || n<3 ) break; if( z[n-1]=='s' ){ z[n-1] = 0; n--; } computeJD(p); rc = 0; + rRounder = r<0 ? -0.5 : +0.5; if( n==3 && strcmp(z,"day")==0 ){ - p->rJD += r; + p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder); }else if( n==4 && strcmp(z,"hour")==0 ){ - p->rJD += r/24.0; + p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder); }else if( n==6 && strcmp(z,"minute")==0 ){ - p->rJD += r/(24.0*60.0); + p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder); }else if( n==6 && strcmp(z,"second")==0 ){ - p->rJD += r/(24.0*60.0*60.0); + p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder); }else if( n==5 && strcmp(z,"month")==0 ){ int x, y; computeYMD_HMS(p); - p->M += r; + p->M += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; p->validJD = 0; computeJD(p); - y = r; + y = (int)r; if( y!=r ){ - p->rJD += (r - y)*30.0; + p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder); } }else if( n==4 && strcmp(z,"year")==0 ){ + int y = (int)r; computeYMD_HMS(p); - p->Y += r; + p->Y += y; p->validJD = 0; computeJD(p); + if( y!=r ){ + p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder); + } }else{ rc = 1; } @@ -650,13 +712,31 @@ ** argv[1] and following are modifiers. Parse them all and write ** the resulting time into the DateTime structure p. Return 0 ** on success and 1 if there are any errors. +** +** If there are zero parameters (if even argv[0] is undefined) +** then assume a default value of "now" for argv[0]. */ -static int isDate(int argc, sqlite3_value **argv, DateTime *p){ +static int isDate( + sqlite3_context *context, + int argc, + sqlite3_value **argv, + DateTime *p +){ int i; const unsigned char *z; - if( argc==0 ) return 1; - if( (z = sqlite3_value_text(argv[0]))==0 || parseDateOrTime((char*)z, p) ){ - return 1; + int eType; + memset(p, 0, sizeof(*p)); + if( argc==0 ){ + setDateTimeToCurrent(context, p); + }else if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT + || eType==SQLITE_INTEGER ){ + p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5); + p->validJD = 1; + }else{ + z = sqlite3_value_text(argv[0]); + if( !z || parseDateOrTime(context, (char*)z, p) ){ + return 1; + } } for(i=1; iaLimit[SQLITE_LIMIT_LENGTH]+1 ); + testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ); if( nSQLITE_MAX_LENGTH ){ + }else if( n>(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); return; }else{ - z = sqliteMalloc( n ); - if( z==0 ) return; + z = sqlite3DbMallocRaw(db, (int)n); + if( z==0 ){ + sqlite3_result_error_nomem(context); + return; + } } computeJD(&x); computeYMD_HMS(&x); @@ -833,7 +922,7 @@ double s = x.s; if( s>59.999 ) s = 59.999; sqlite3_snprintf(7, &z[j],"%06.3f", s); - j += strlen(&z[j]); + j += sqlite3Strlen30(&z[j]); break; } case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; @@ -845,10 +934,10 @@ y.M = 1; y.D = 1; computeJD(&y); - nDay = x.rJD - y.rJD + 0.5; + nDay = (int)((x.iJD-y.iJD+43200000)/86400000); if( zFmt[i]=='W' ){ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = ((int)(x.rJD+0.5)) % 7; + wd = (int)(((x.iJD+43200000)/86400000)%7); sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); j += 2; }else{ @@ -858,30 +947,34 @@ break; } case 'J': { - sqlite3_snprintf(20, &z[j],"%.16g",x.rJD); - j+=strlen(&z[j]); + sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0); + j+=sqlite3Strlen30(&z[j]); break; } case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; case 's': { - sqlite3_snprintf(30,&z[j],"%d", - (int)((x.rJD-2440587.5)*86400.0 + 0.5)); - j += strlen(&z[j]); + sqlite3_snprintf(30,&z[j],"%lld", + (i64)(x.iJD/1000 - 21086676*(i64)10000)); + j += sqlite3Strlen30(&z[j]); break; } case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; - case 'w': z[j++] = (((int)(x.rJD+1.5)) % 7) + '0'; break; - case 'Y': sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=strlen(&z[j]);break; - case '%': z[j++] = '%'; break; + case 'w': { + z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + break; + } + case 'Y': { + sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]); + break; + } + default: z[j++] = '%'; break; } } } z[j] = 0; - sqlite3_result_text(context, z, -1, SQLITE_TRANSIENT); - if( z!=zBuf ){ - sqliteFree(z); - } + sqlite3_result_text(context, z, -1, + z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC); } /* @@ -891,15 +984,11 @@ */ static void ctimeFunc( sqlite3_context *context, - int argc, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3_value *pVal = sqlite3ValueNew(); - if( pVal ){ - sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); - timeFunc(context, 1, &pVal); - sqlite3ValueFree(pVal); - } + UNUSED_PARAMETER2(NotUsed, NotUsed2); + timeFunc(context, 0, 0); } /* @@ -909,15 +998,11 @@ */ static void cdateFunc( sqlite3_context *context, - int argc, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3_value *pVal = sqlite3ValueNew(); - if( pVal ){ - sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); - dateFunc(context, 1, &pVal); - sqlite3ValueFree(pVal); - } + UNUSED_PARAMETER2(NotUsed, NotUsed2); + dateFunc(context, 0, 0); } /* @@ -927,15 +1012,11 @@ */ static void ctimestampFunc( sqlite3_context *context, - int argc, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3_value *pVal = sqlite3ValueNew(); - if( pVal ){ - sqlite3ValueSetStr(pVal, -1, "now", SQLITE_UTF8, SQLITE_STATIC); - datetimeFunc(context, 1, &pVal); - sqlite3ValueFree(pVal); - } + UNUSED_PARAMETER2(NotUsed, NotUsed2); + datetimeFunc(context, 0, 0); } #endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */ @@ -958,18 +1039,23 @@ ){ time_t t; char *zFormat = (char *)sqlite3_user_data(context); + sqlite3 *db; + double rT; char zBuf[20]; - time(&t); -#ifdef SQLITE_TEST - { - extern int sqlite3_current_time; /* See os_XXX.c */ - if( sqlite3_current_time ){ - t = sqlite3_current_time; - } - } -#endif + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + db = sqlite3_context_db_handle(context); + sqlite3OsCurrentTime(db->pVfs, &rT); +#ifndef SQLITE_OMIT_FLOATING_POINT + t = 86400.0*(rT - 2440587.5) + 0.5; +#else + /* without floating point support, rT will have + ** already lost fractional day precision. + */ + t = 86400 * (rT - 2440587) - 43200; +#endif #ifdef HAVE_GMTIME_R { struct tm sNow; @@ -979,10 +1065,10 @@ #else { struct tm *pTm; - sqlite3OsEnterMutex(); + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); pTm = gmtime(&t); strftime(zBuf, 20, zFormat, pTm); - sqlite3OsLeaveMutex(); + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); } #endif @@ -995,42 +1081,28 @@ ** functions. This should be the only routine in this file with ** external linkage. */ -void sqlite3RegisterDateTimeFunctions(sqlite3 *db){ +void sqlite3RegisterDateTimeFunctions(void){ + static SQLITE_WSD FuncDef aDateTimeFuncs[] = { #ifndef SQLITE_OMIT_DATETIME_FUNCS - static const struct { - char *zName; - int nArg; - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); - } aFuncs[] = { - { "julianday", -1, juliandayFunc }, - { "date", -1, dateFunc }, - { "time", -1, timeFunc }, - { "datetime", -1, datetimeFunc }, - { "strftime", -1, strftimeFunc }, - { "current_time", 0, ctimeFunc }, - { "current_timestamp", 0, ctimestampFunc }, - { "current_date", 0, cdateFunc }, - }; - int i; - - for(i=0; ia; inSrc; i++, pItem++){ - pTab = sqlite3LocateTable(pParse, pItem->zName, pItem->zDatabase); - sqlite3DeleteTable(pItem->pTab); - pItem->pTab = pTab; - if( pTab ){ - pTab->nRef++; - } + struct SrcList_item *pItem = pSrc->a; + Table *pTab; + assert( pItem && pSrc->nSrc==1 ); + pTab = sqlite3LocateTable(pParse, 0, pItem->zName, pItem->zDatabase); + sqlite3DeleteTable(pItem->pTab); + pItem->pTab = pTab; + if( pTab ){ + pTab->nRef++; + } + if( sqlite3IndexedByLookup(pParse, pItem) ){ + pTab = 0; } return pTab; } @@ -42,7 +43,8 @@ ** writable return 0; */ int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ - if( (pTab->readOnly && (pParse->db->flags & SQLITE_WriteSchema)==0 + if( ((pTab->tabFlags & TF_Readonly)!=0 + && (pParse->db->flags & SQLITE_WriteSchema)==0 && pParse->nested==0) #ifndef SQLITE_OMIT_VIRTUALTABLE || (pTab->pMod && pTab->pMod->pModule->xUpdate==0) @@ -60,27 +62,139 @@ return 0; } + +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) /* -** Generate code that will open a table for reading. +** Evaluate a view and store its result in an ephemeral table. The +** pWhere argument is an optional WHERE clause that restricts the +** set of rows in the view that are to be added to the ephemeral table. */ -void sqlite3OpenTable( - Parse *p, /* Generate code into this VDBE */ - int iCur, /* The cursor number of the table */ - int iDb, /* The database index in sqlite3.aDb[] */ - Table *pTab, /* The table to be opened */ - int opcode /* OP_OpenRead or OP_OpenWrite */ +void sqlite3MaterializeView( + Parse *pParse, /* Parsing context */ + Table *pView, /* View definition */ + Expr *pWhere, /* Optional WHERE clause to be added */ + int iCur /* Cursor number for ephemerial table */ ){ - Vdbe *v; - if( IsVirtual(pTab) ) return; - v = sqlite3GetVdbe(p); - assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); - sqlite3TableLock(p, iDb, pTab->tnum, (opcode==OP_OpenWrite), pTab->zName); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - VdbeComment((v, "# %s", pTab->zName)); - sqlite3VdbeAddOp(v, opcode, iCur, pTab->tnum); - sqlite3VdbeAddOp(v, OP_SetNumColumns, iCur, pTab->nCol); + SelectDest dest; + Select *pDup; + sqlite3 *db = pParse->db; + + pDup = sqlite3SelectDup(db, pView->pSelect, 0); + if( pWhere ){ + SrcList *pFrom; + + pWhere = sqlite3ExprDup(db, pWhere, 0); + pFrom = sqlite3SrcListAppend(db, 0, 0, 0); + if( pFrom ){ + assert( pFrom->nSrc==1 ); + pFrom->a[0].zAlias = sqlite3DbStrDup(db, pView->zName); + pFrom->a[0].pSelect = pDup; + assert( pFrom->a[0].pOn==0 ); + assert( pFrom->a[0].pUsing==0 ); + }else{ + sqlite3SelectDelete(db, pDup); + } + pDup = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, 0, 0, 0, 0); + } + sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur); + sqlite3Select(pParse, pDup, &dest); + sqlite3SelectDelete(db, pDup); } +#endif /* !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) */ + +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) +/* +** Generate an expression tree to implement the WHERE, ORDER BY, +** and LIMIT/OFFSET portion of DELETE and UPDATE statements. +** +** DELETE FROM table_wxyz WHERE a<5 ORDER BY a LIMIT 1; +** \__________________________/ +** pLimitWhere (pInClause) +*/ +Expr *sqlite3LimitWhere( + Parse *pParse, /* The parser context */ + SrcList *pSrc, /* the FROM clause -- which tables to scan */ + Expr *pWhere, /* The WHERE clause. May be null */ + ExprList *pOrderBy, /* The ORDER BY clause. May be null */ + Expr *pLimit, /* The LIMIT clause. May be null */ + Expr *pOffset, /* The OFFSET clause. May be null */ + char *zStmtType /* Either DELETE or UPDATE. For error messages. */ +){ + Expr *pWhereRowid = NULL; /* WHERE rowid .. */ + Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ + Expr *pSelectRowid = NULL; /* SELECT rowid ... */ + ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ + SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ + Select *pSelect = NULL; /* Complete SELECT tree */ + + /* Check that there isn't an ORDER BY without a LIMIT clause. + */ + if( pOrderBy && (pLimit == 0) ) { + sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType); + pParse->parseError = 1; + goto limit_where_cleanup_2; + } + + /* We only need to generate a select expression if there + ** is a limit/offset term to enforce. + */ + if( pLimit == 0 ) { + /* if pLimit is null, pOffset will always be null as well. */ + assert( pOffset == 0 ); + return pWhere; + } + + /* Generate a select expression tree to enforce the limit/offset + ** term for the DELETE or UPDATE statement. For example: + ** DELETE FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 + ** becomes: + ** DELETE FROM table_a WHERE rowid IN ( + ** SELECT rowid FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 + ** ); + */ + + pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); + if( pSelectRowid == 0 ) goto limit_where_cleanup_2; + pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); + if( pEList == 0 ) goto limit_where_cleanup_2; + + /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree + ** and the SELECT subtree. */ + pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); + if( pSelectSrc == 0 ) { + sqlite3ExprListDelete(pParse->db, pEList); + goto limit_where_cleanup_2; + } + + /* generate the SELECT expression tree. */ + pSelect = sqlite3SelectNew(pParse,pEList,pSelectSrc,pWhere,0,0, + pOrderBy,0,pLimit,pOffset); + if( pSelect == 0 ) return 0; + + /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ + pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); + if( pWhereRowid == 0 ) goto limit_where_cleanup_1; + pInClause = sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0); + if( pInClause == 0 ) goto limit_where_cleanup_1; + + pInClause->x.pSelect = pSelect; + pInClause->flags |= EP_xIsSelect; + sqlite3ExprSetHeight(pParse, pInClause); + return pInClause; + + /* something went wrong. clean up anything allocated. */ +limit_where_cleanup_1: + sqlite3SelectDelete(pParse->db, pSelect); + return 0; +limit_where_cleanup_2: + sqlite3ExprDelete(pParse->db, pWhere); + sqlite3ExprListDelete(pParse->db, pOrderBy); + sqlite3ExprDelete(pParse->db, pLimit); + sqlite3ExprDelete(pParse->db, pOffset); + return 0; +} +#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */ /* ** Generate code for a DELETE FROM statement. @@ -107,18 +221,24 @@ int oldIdx = -1; /* Cursor for the OLD table of AFTER triggers */ NameContext sNC; /* Name context to resolve expressions in */ int iDb; /* Database number */ - int memCnt = 0; /* Memory cell used for change counting */ + int memCnt = -1; /* Memory cell used for change counting */ + int rcauth; /* Value returned by authorization callback */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to delete from a view */ - int triggers_exist = 0; /* True if any triggers exist */ + Trigger *pTrigger; /* List of table triggers, if required */ #endif + int iBeginAfterTrigger = 0; /* Address of after trigger program */ + int iEndAfterTrigger = 0; /* Exit of after trigger program */ + int iBeginBeforeTrigger = 0; /* Address of before trigger program */ + int iEndBeforeTrigger = 0; /* Exit of before trigger program */ + u32 old_col_mask = 0; /* Mask of OLD.* columns in use */ sContext.pParse = 0; - if( pParse->nErr || sqlite3MallocFailed() ){ + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ goto delete_from_cleanup; } - db = pParse->db; assert( pTabList->nSrc==1 ); /* Locate the table which we want to delete. This table has to be @@ -133,10 +253,10 @@ ** deleted from is a view */ #ifndef SQLITE_OMIT_TRIGGER - triggers_exist = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0); + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); isView = pTab->pSelect!=0; #else -# define triggers_exist 0 +# define pTrigger 0 # define isView 0 #endif #ifdef SQLITE_OMIT_VIEW @@ -144,15 +264,18 @@ # define isView 0 #endif - if( sqlite3IsReadOnly(pParse, pTab, triggers_exist) ){ + if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ goto delete_from_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDbnDb ); zDb = db->aDb[iDb].zName; - if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ + rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb); + assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE ); + if( rcauth==SQLITE_DENY ){ goto delete_from_cleanup; } + assert(!isView || pTrigger); /* If pTab is really a view, make sure it has been initialized. */ @@ -162,19 +285,16 @@ /* Allocate a cursor used to store the old.* data for a trigger. */ - if( triggers_exist ){ + if( pTrigger ){ oldIdx = pParse->nTab++; } - /* Resolve the column names in the WHERE clause. + /* Assign cursor number to the table and all its indices. */ assert( pTabList->nSrc==1 ); iCur = pTabList->a[0].iCursor = pParse->nTab++; - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - sNC.pSrcList = pTabList; - if( sqlite3ExprResolveNames(&sNC, pWhere) ){ - goto delete_from_cleanup; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + pParse->nTab++; } /* Start the view context @@ -190,81 +310,91 @@ goto delete_from_cleanup; } if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); - sqlite3BeginWriteOperation(pParse, triggers_exist, iDb); + sqlite3BeginWriteOperation(pParse, (pTrigger?1:0), iDb); + + if( pTrigger ){ + int orconf = ((pParse->trigStack)?pParse->trigStack->orconf:OE_Default); + int iGoto = sqlite3VdbeAddOp0(v, OP_Goto); + addr = sqlite3VdbeMakeLabel(v); + + iBeginBeforeTrigger = sqlite3VdbeCurrentAddr(v); + (void)sqlite3CodeRowTrigger(pParse, pTrigger, TK_DELETE, 0, + TRIGGER_BEFORE, pTab, -1, oldIdx, orconf, addr, &old_col_mask, 0); + iEndBeforeTrigger = sqlite3VdbeAddOp0(v, OP_Goto); + + iBeginAfterTrigger = sqlite3VdbeCurrentAddr(v); + (void)sqlite3CodeRowTrigger(pParse, pTrigger, TK_DELETE, 0, + TRIGGER_AFTER, pTab, -1, oldIdx, orconf, addr, &old_col_mask, 0); + iEndAfterTrigger = sqlite3VdbeAddOp0(v, OP_Goto); + + sqlite3VdbeJumpHere(v, iGoto); + } /* If we are trying to delete from a view, realize that view into ** a ephemeral table. */ +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) if( isView ){ - Select *pView = sqlite3SelectDup(pTab->pSelect); - sqlite3Select(pParse, pView, SRT_EphemTab, iCur, 0, 0, 0, 0); - sqlite3SelectDelete(pView); + sqlite3MaterializeView(pParse, pTab, pWhere, iCur); + } +#endif + + /* Resolve the column names in the WHERE clause. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + if( sqlite3ResolveExprNames(&sNC, pWhere) ){ + goto delete_from_cleanup; } /* Initialize the counter of the number of rows deleted, if ** we are counting rows. */ if( db->flags & SQLITE_CountRows ){ - memCnt = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemInt, 0, memCnt); + memCnt = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); } +#ifndef SQLITE_OMIT_TRUNCATE_OPTIMIZATION /* Special case: A DELETE without a WHERE clause deletes everything. ** It is easier just to erase the whole table. Note, however, that ** this means that the row change count will be incorrect. */ - if( pWhere==0 && !triggers_exist && !IsVirtual(pTab) ){ - if( db->flags & SQLITE_CountRows ){ - /* If counting rows deleted, just count the total number of - ** entries in the table. */ - int endOfLoop = sqlite3VdbeMakeLabel(v); - int addr2; - if( !isView ){ - sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); - } - sqlite3VdbeAddOp(v, OP_Rewind, iCur, sqlite3VdbeCurrentAddr(v)+2); - addr2 = sqlite3VdbeAddOp(v, OP_MemIncr, 1, memCnt); - sqlite3VdbeAddOp(v, OP_Next, iCur, addr2); - sqlite3VdbeResolveLabel(v, endOfLoop); - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + if( rcauth==SQLITE_OK && pWhere==0 && !pTrigger && !IsVirtual(pTab) ){ + assert( !isView ); + sqlite3VdbeAddOp4(v, OP_Clear, pTab->tnum, iDb, memCnt, + pTab->zName, P4_STATIC); + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->pSchema==pTab->pSchema ); + sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); } - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Clear, pTab->tnum, iDb); - if( !pParse->nested ){ - sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); - } - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - assert( pIdx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp(v, OP_Clear, pIdx->tnum, iDb); - } - } - } + }else +#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ /* The usual case: There is a WHERE clause so we have to scan through ** the table and pick which records to delete. */ - else{ - /* Begin the database scan - */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0); - if( pWInfo==0 ) goto delete_from_cleanup; + { + int iRowid = ++pParse->nMem; /* Used for storing rowid values. */ + int iRowSet = ++pParse->nMem; /* Register for rowset of rows to delete */ + int regRowid; /* Actual register containing rowids */ - /* Remember the rowid of every item to be deleted. + /* Collect rowids of every row to be deleted. */ - sqlite3VdbeAddOp(v, IsVirtual(pTab) ? OP_VRowid : OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_FifoWrite, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0,WHERE_DUPLICATES_OK); + if( pWInfo==0 ) goto delete_from_cleanup; + regRowid = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, iRowid, 0); + sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, regRowid); if( db->flags & SQLITE_CountRows ){ - sqlite3VdbeAddOp(v, OP_MemIncr, 1, memCnt); + sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1); } - - /* End the database scan loop. - */ sqlite3WhereEnd(pWInfo); /* Open the pseudo-table used to store OLD if there are triggers. */ - if( triggers_exist ){ - sqlite3VdbeAddOp(v, OP_OpenPseudo, oldIdx, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, oldIdx, pTab->nCol); + if( pTrigger ){ + sqlite3VdbeAddOp3(v, OP_OpenPseudo, oldIdx, 0, pTab->nCol); } /* Delete every item whose key was written to the list during the @@ -273,99 +403,101 @@ */ end = sqlite3VdbeMakeLabel(v); - /* This is the beginning of the delete loop when there are - ** row triggers. - */ - if( triggers_exist ){ - addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, end); - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); - } - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_RowData, iCur, 0); - sqlite3VdbeAddOp(v, OP_Insert, oldIdx, 0); - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); - } + if( !isView ){ + /* Open cursors for the table we are deleting from and + ** all its indices. + */ + sqlite3OpenTableAndIndices(pParse, pTab, iCur, OP_OpenWrite); + } - (void)sqlite3CodeRowTrigger(pParse, TK_DELETE, 0, TRIGGER_BEFORE, pTab, - -1, oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default, - addr); + /* This is the beginning of the delete loop. If a trigger encounters + ** an IGNORE constraint, it jumps back to here. + */ + if( pTrigger ){ + sqlite3VdbeResolveLabel(v, addr); } + addr = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, end, iRowid); - if( !isView ){ - /* Open cursors for the table we are deleting from and all its - ** indices. If there are row triggers, this happens inside the - ** OP_FifoRead loop because the cursor have to all be closed - ** before the trigger fires. If there are no row triggers, the - ** cursors are opened only once on the outside the loop. + if( pTrigger ){ + int iData = ++pParse->nMem; /* For storing row data of OLD table */ + + /* If the record is no longer present in the table, jump to the + ** next iteration of the loop through the contents of the fifo. */ - sqlite3OpenTableAndIndices(pParse, pTab, iCur, OP_OpenWrite); + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addr, iRowid); - /* This is the beginning of the delete loop when there are no - ** row triggers */ - if( !triggers_exist ){ - addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, end); + /* Populate the OLD.* pseudo-table */ + if( old_col_mask ){ + sqlite3VdbeAddOp2(v, OP_RowData, iCur, iData); + }else{ + sqlite3VdbeAddOp2(v, OP_Null, 0, iData); } + sqlite3VdbeAddOp3(v, OP_Insert, oldIdx, iData, iRowid); + /* Jump back and run the BEFORE triggers */ + sqlite3VdbeAddOp2(v, OP_Goto, 0, iBeginBeforeTrigger); + sqlite3VdbeJumpHere(v, iEndBeforeTrigger); + } + + if( !isView ){ /* Delete the row */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ - pParse->pVirtualLock = pTab; - sqlite3VdbeOp3(v, OP_VUpdate, 0, 1, (const char*)pTab->pVtab, P3_VTAB); + const char *pVtab = (const char *)pTab->pVtab; + sqlite3VtabMakeWritable(pParse, pTab); + sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iRowid, pVtab, P4_VTAB); }else #endif { - sqlite3GenerateRowDelete(db, v, pTab, iCur, pParse->nested==0); + sqlite3GenerateRowDelete(pParse, pTab, iCur, iRowid, pParse->nested==0); } } /* If there are row triggers, close all cursors then invoke ** the AFTER triggers */ - if( triggers_exist ){ - if( !isView ){ - for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - sqlite3VdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum); - } - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); - } - (void)sqlite3CodeRowTrigger(pParse, TK_DELETE, 0, TRIGGER_AFTER, pTab, -1, - oldIdx, (pParse->trigStack)?pParse->trigStack->orconf:OE_Default, - addr); + if( pTrigger ){ + /* Jump back and run the AFTER triggers */ + sqlite3VdbeAddOp2(v, OP_Goto, 0, iBeginAfterTrigger); + sqlite3VdbeJumpHere(v, iEndAfterTrigger); } /* End of the delete loop */ - sqlite3VdbeAddOp(v, OP_Goto, 0, addr); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addr); sqlite3VdbeResolveLabel(v, end); /* Close the cursors after the loop if there are no row triggers */ - if( !triggers_exist && !IsVirtual(pTab) ){ + if( !isView && !IsVirtual(pTab) ){ for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - sqlite3VdbeAddOp(v, OP_Close, iCur + i, pIdx->tnum); + sqlite3VdbeAddOp2(v, OP_Close, iCur + i, pIdx->tnum); } - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + sqlite3VdbeAddOp1(v, OP_Close, iCur); } } + /* Update the sqlite_sequence table by storing the content of the + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. + */ + if( pParse->nested==0 && pParse->trigStack==0 ){ + sqlite3AutoincrementEnd(pParse); + } + /* ** Return the number of rows that were deleted. If this routine is ** generating code because of a call to sqlite3NestedParse(), do not ** invoke the callback function. */ if( db->flags & SQLITE_CountRows && pParse->nested==0 && !pParse->trigStack ){ - sqlite3VdbeAddOp(v, OP_MemLoad, memCnt, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", P3_STATIC); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); } delete_from_cleanup: sqlite3AuthContextPop(&sContext); - sqlite3SrcListDelete(pTabList); - sqlite3ExprDelete(pWhere); + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprDelete(db, pWhere); return; } @@ -382,26 +514,29 @@ ** 2. Read/write cursors for all indices of pTab must be open as ** cursor number base+i for the i-th index. ** -** 3. The record number of the row to be deleted must be on the top -** of the stack. +** 3. The record number of the row to be deleted must be stored in +** memory cell iRowid. ** ** This routine pops the top of the stack to remove the record number ** and then generates code to remove both the table record and all index ** entries that point to that record. */ void sqlite3GenerateRowDelete( - sqlite3 *db, /* The database containing the index */ - Vdbe *v, /* Generate code into this VDBE */ + Parse *pParse, /* Parsing context */ Table *pTab, /* Table containing the row to be deleted */ int iCur, /* Cursor number for the table */ + int iRowid, /* Memory cell that contains the rowid to delete */ int count /* Increment the row change counter */ ){ int addr; - addr = sqlite3VdbeAddOp(v, OP_NotExists, iCur, 0); - sqlite3GenerateRowIndexDelete(v, pTab, iCur, 0); - sqlite3VdbeAddOp(v, OP_Delete, iCur, (count?OPFLAG_NCHANGE:0)); + Vdbe *v; + + v = pParse->pVdbe; + addr = sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowid); + sqlite3GenerateRowIndexDelete(pParse, pTab, iCur, 0); + sqlite3VdbeAddOp2(v, OP_Delete, iCur, (count?OPFLAG_NCHANGE:0)); if( count ){ - sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); + sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_STATIC); } sqlite3VdbeJumpHere(v, addr); } @@ -423,45 +558,68 @@ ** deleted. */ void sqlite3GenerateRowIndexDelete( - Vdbe *v, /* Generate code into this VDBE */ + Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* Table containing the row to be deleted */ int iCur, /* Cursor number for the table */ - char *aIdxUsed /* Only delete if aIdxUsed!=0 && aIdxUsed[i]!=0 */ + int *aRegIdx /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */ ){ int i; Index *pIdx; + int r1; for(i=1, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - if( aIdxUsed!=0 && aIdxUsed[i-1]==0 ) continue; - sqlite3GenerateIndexKey(v, pIdx, iCur); - sqlite3VdbeAddOp(v, OP_IdxDelete, iCur+i, 0); + if( aRegIdx!=0 && aRegIdx[i-1]==0 ) continue; + r1 = sqlite3GenerateIndexKey(pParse, pIdx, iCur, 0, 0); + sqlite3VdbeAddOp3(pParse->pVdbe, OP_IdxDelete, iCur+i, r1,pIdx->nColumn+1); } } /* -** Generate code that will assemble an index key and put it on the top -** of the tack. The key with be for index pIdx which is an index on pTab. +** Generate code that will assemble an index key and put it in register +** regOut. The key with be for index pIdx which is an index on pTab. ** iCur is the index of a cursor open on the pTab table and pointing to ** the entry that needs indexing. +** +** Return a register number which is the first in a block of +** registers that holds the elements of the index key. The +** block of registers has already been deallocated by the time +** this routine returns. */ -void sqlite3GenerateIndexKey( - Vdbe *v, /* Generate code into this VDBE */ +int sqlite3GenerateIndexKey( + Parse *pParse, /* Parsing context */ Index *pIdx, /* The index for which to generate a key */ - int iCur /* Cursor number for the pIdx->pTable table */ + int iCur, /* Cursor number for the pIdx->pTable table */ + int regOut, /* Write the new index key to this register */ + int doMakeRec /* Run the OP_MakeRecord instruction if true */ ){ + Vdbe *v = pParse->pVdbe; int j; Table *pTab = pIdx->pTable; + int regBase; + int nCol; - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - for(j=0; jnColumn; j++){ + nCol = pIdx->nColumn; + regBase = sqlite3GetTempRange(pParse, nCol+1); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, regBase+nCol); + for(j=0; jaiColumn[j]; if( idx==pTab->iPKey ){ - sqlite3VdbeAddOp(v, OP_Dup, j, 0); + sqlite3VdbeAddOp2(v, OP_SCopy, regBase+nCol, regBase+j); }else{ - sqlite3VdbeAddOp(v, OP_Column, iCur, idx); + sqlite3VdbeAddOp3(v, OP_Column, iCur, idx, regBase+j); sqlite3ColumnDefault(v, pTab, idx); } } - sqlite3VdbeAddOp(v, OP_MakeIdxRec, pIdx->nColumn, 0); - sqlite3IndexAffinityStr(v, pIdx); + if( doMakeRec ){ + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol+1, regOut); + sqlite3IndexAffinityStr(v, pIdx); + sqlite3ExprCacheAffinityChange(pParse, regBase, nCol+1); + } + sqlite3ReleaseTempRange(pParse, regBase, nCol+1); + return regBase; } + +/* Make sure "isView" gets undefined in case this file becomes part of +** the amalgamation - so that subsequent files do not see isView as a +** macro. */ +#undef isView diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/dump.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/src/dump.txt --- sqlite3-3.4.2/src/dump.txt 2007-08-10 15:01:04.000000000 +0100 +++ sqlite3-3.6.16/src/dump.txt 1970-01-01 01:00:00.000000000 +0100 @@ -1,4469 +0,0 @@ -1.1 (drh 11-Apr-01): /* -1.20 (drh 16-Sep-01): ** 2001 September 15 -1.1 (drh 11-Apr-01): ** -1.20 (drh 16-Sep-01): ** The author disclaims copyright to this source code. In place of -1.20 (drh 16-Sep-01): ** a legal notice, here is a blessing: -1.20 (drh 16-Sep-01): ** -1.20 (drh 16-Sep-01): ** May you do good and not evil. -1.20 (drh 16-Sep-01): ** May you find forgiveness for yourself and forgive others. -1.20 (drh 16-Sep-01): ** May you share freely, never taking more than you give. -1.1 (drh 11-Apr-01): ** -1.1 (drh 11-Apr-01): ************************************************************************* -1.20 (drh 16-Sep-01): ** This is the implementation of the page cache subsystem or "pager". -1.1 (drh 11-Apr-01): ** -1.20 (drh 16-Sep-01): ** The pager is used to access a database disk file. It implements -1.20 (drh 16-Sep-01): ** atomic commit and rollback through the use of a journal file that -1.20 (drh 16-Sep-01): ** is separate from the database file. The pager also implements file -1.20 (drh 16-Sep-01): ** locking to prevent two processes from writing the same database -1.20 (drh 16-Sep-01): ** file simultaneously, or one process from reading the database while -1.20 (drh 16-Sep-01): ** another is writing. -1.1 (drh 11-Apr-01): ** -1.352 (drh 07-Aug-07): ** @(#) $Id: pager.c,v 1.351 2007/07/20 00:33:36 drh Exp $ -1.1 (drh 11-Apr-01): */ -1.202 (drh 28-Apr-05): #ifndef SQLITE_OMIT_DISKIO -1.3 (drh 15-Apr-01): #include "sqliteInt.h" -1.165 (drh 01-Oct-04): #include "os.h" -1.1 (drh 11-Apr-01): #include "pager.h" -1.1 (drh 11-Apr-01): #include -1.3 (drh 15-Apr-01): #include -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.68 (drh 16-Jan-03): ** Macros for troubleshooting. Normally turned off -1.68 (drh 16-Jan-03): */ -1.120 (danielk1 10-Jun-04): #if 0 -1.277 (drh 18-Dec-06): #define sqlite3DebugPrintf printf -1.300 (drh 26-Mar-07): #define PAGERTRACE1(X) sqlite3DebugPrintf(X) -1.300 (drh 26-Mar-07): #define PAGERTRACE2(X,Y) sqlite3DebugPrintf(X,Y) -1.300 (drh 26-Mar-07): #define PAGERTRACE3(X,Y,Z) sqlite3DebugPrintf(X,Y,Z) -1.300 (drh 26-Mar-07): #define PAGERTRACE4(X,Y,Z,W) sqlite3DebugPrintf(X,Y,Z,W) -1.300 (drh 26-Mar-07): #define PAGERTRACE5(X,Y,Z,W,V) sqlite3DebugPrintf(X,Y,Z,W,V) -1.68 (drh 16-Jan-03): #else -1.300 (drh 26-Mar-07): #define PAGERTRACE1(X) -1.300 (drh 26-Mar-07): #define PAGERTRACE2(X,Y) -1.300 (drh 26-Mar-07): #define PAGERTRACE3(X,Y,Z) -1.300 (drh 26-Mar-07): #define PAGERTRACE4(X,Y,Z,W) -1.300 (drh 26-Mar-07): #define PAGERTRACE5(X,Y,Z,W,V) -1.68 (drh 16-Jan-03): #endif -1.68 (drh 16-Jan-03): -1.175 (danielk1 08-Nov-04): /* -1.300 (drh 26-Mar-07): ** The following two macros are used within the PAGERTRACEX() macros above -1.220 (drh 26-Nov-05): ** to print out file-descriptors. -1.175 (danielk1 08-Nov-04): ** -1.175 (danielk1 08-Nov-04): ** PAGERID() takes a pointer to a Pager struct as it's argument. The -1.175 (danielk1 08-Nov-04): ** associated file-descriptor is returned. FILEHANDLEID() takes an OsFile -1.175 (danielk1 08-Nov-04): ** struct as it's argument. -1.175 (danielk1 08-Nov-04): */ -1.261 (drh 06-Mar-06): #define PAGERID(p) ((int)(p->fd)) -1.261 (drh 06-Mar-06): #define FILEHANDLEID(fd) ((int)fd) -1.68 (drh 16-Jan-03): -1.68 (drh 16-Jan-03): /* -1.1 (drh 11-Apr-01): ** The page cache as a whole is always in one of the following -1.1 (drh 11-Apr-01): ** states: -1.1 (drh 11-Apr-01): ** -1.115 (drh 09-Jun-04): ** PAGER_UNLOCK The page cache is not currently reading or -1.1 (drh 11-Apr-01): ** writing the database file. There is no -1.1 (drh 11-Apr-01): ** data held in memory. This is the initial -1.1 (drh 11-Apr-01): ** state. -1.1 (drh 11-Apr-01): ** -1.115 (drh 09-Jun-04): ** PAGER_SHARED The page cache is reading the database. -1.1 (drh 11-Apr-01): ** Writing is not permitted. There can be -1.1 (drh 11-Apr-01): ** multiple readers accessing the same database -1.2 (drh 14-Apr-01): ** file at the same time. -1.1 (drh 11-Apr-01): ** -1.123 (drh 10-Jun-04): ** PAGER_RESERVED This process has reserved the database for writing -1.123 (drh 10-Jun-04): ** but has not yet made any changes. Only one process -1.123 (drh 10-Jun-04): ** at a time can reserve the database. The original -1.123 (drh 10-Jun-04): ** database file has not been modified so other -1.123 (drh 10-Jun-04): ** processes may still be reading the on-disk -1.115 (drh 09-Jun-04): ** database file. -1.115 (drh 09-Jun-04): ** -1.115 (drh 09-Jun-04): ** PAGER_EXCLUSIVE The page cache is writing the database. -1.1 (drh 11-Apr-01): ** Access is exclusive. No other processes or -1.1 (drh 11-Apr-01): ** threads can be reading or writing while one -1.1 (drh 11-Apr-01): ** process is writing. -1.1 (drh 11-Apr-01): ** -1.125 (danielk1 14-Jun-04): ** PAGER_SYNCED The pager moves to this state from PAGER_EXCLUSIVE -1.125 (danielk1 14-Jun-04): ** after all dirty pages have been written to the -1.125 (danielk1 14-Jun-04): ** database file and the file has been synced to -1.309 (drh 30-Mar-07): ** disk. All that remains to do is to remove or -1.309 (drh 30-Mar-07): ** truncate the journal file and the transaction -1.309 (drh 30-Mar-07): ** will be committed. -1.125 (danielk1 14-Jun-04): ** -1.115 (drh 09-Jun-04): ** The page cache comes up in PAGER_UNLOCK. The first time a -1.292 (danielk1 19-Mar-07): ** sqlite3PagerGet() occurs, the state transitions to PAGER_SHARED. -1.1 (drh 11-Apr-01): ** After all pages have been released using sqlite_page_unref(), -1.115 (drh 09-Jun-04): ** the state transitions back to PAGER_UNLOCK. The first time -1.292 (danielk1 19-Mar-07): ** that sqlite3PagerWrite() is called, the state transitions to -1.309 (drh 30-Mar-07): ** PAGER_RESERVED. (Note that sqlite3PagerWrite() can only be -1.6 (drh 21-May-01): ** called on an outstanding page which means that the pager must -1.115 (drh 09-Jun-04): ** be in PAGER_SHARED before it transitions to PAGER_RESERVED.) -1.309 (drh 30-Mar-07): ** PAGER_RESERVED means that there is an open rollback journal. -1.309 (drh 30-Mar-07): ** The transition to PAGER_EXCLUSIVE occurs before any changes -1.309 (drh 30-Mar-07): ** are made to the database file, though writes to the rollback -1.309 (drh 30-Mar-07): ** journal occurs with just PAGER_RESERVED. After an sqlite3PagerRollback() -1.309 (drh 30-Mar-07): ** or sqlite3PagerCommitPhaseTwo(), the state can go back to PAGER_SHARED, -1.309 (drh 30-Mar-07): ** or it can stay at PAGER_EXCLUSIVE if we are in exclusive access mode. -1.1 (drh 11-Apr-01): */ -1.115 (drh 09-Jun-04): #define PAGER_UNLOCK 0 -1.167 (drh 05-Oct-04): #define PAGER_SHARED 1 /* same as SHARED_LOCK */ -1.167 (drh 05-Oct-04): #define PAGER_RESERVED 2 /* same as RESERVED_LOCK */ -1.167 (drh 05-Oct-04): #define PAGER_EXCLUSIVE 4 /* same as EXCLUSIVE_LOCK */ -1.167 (drh 05-Oct-04): #define PAGER_SYNCED 5 -1.1 (drh 11-Apr-01): -1.167 (drh 05-Oct-04): /* -1.167 (drh 05-Oct-04): ** If the SQLITE_BUSY_RESERVED_LOCK macro is set to true at compile-time, -1.167 (drh 05-Oct-04): ** then failed attempts to get a reserved lock will invoke the busy callback. -1.167 (drh 05-Oct-04): ** This is off by default. To see why, consider the following scenario: -1.167 (drh 05-Oct-04): ** -1.167 (drh 05-Oct-04): ** Suppose thread A already has a shared lock and wants a reserved lock. -1.167 (drh 05-Oct-04): ** Thread B already has a reserved lock and wants an exclusive lock. If -1.167 (drh 05-Oct-04): ** both threads are using their busy callbacks, it might be a long time -1.167 (drh 05-Oct-04): ** be for one of the threads give up and allows the other to proceed. -1.167 (drh 05-Oct-04): ** But if the thread trying to get the reserved lock gives up quickly -1.167 (drh 05-Oct-04): ** (if it never invokes its busy callback) then the contention will be -1.167 (drh 05-Oct-04): ** resolved quickly. -1.167 (drh 05-Oct-04): */ -1.167 (drh 05-Oct-04): #ifndef SQLITE_BUSY_RESERVED_LOCK -1.167 (drh 05-Oct-04): # define SQLITE_BUSY_RESERVED_LOCK 0 -1.167 (drh 05-Oct-04): #endif -1.3 (drh 15-Apr-01): -1.1 (drh 11-Apr-01): /* -1.168 (drh 22-Oct-04): ** This macro rounds values up so that if the value is an address it -1.168 (drh 22-Oct-04): ** is guaranteed to be an address that is aligned to an 8-byte boundary. -1.168 (drh 22-Oct-04): */ -1.168 (drh 22-Oct-04): #define FORCE_ALIGNMENT(X) (((X)+7)&~7) -1.168 (drh 22-Oct-04): -1.168 (drh 22-Oct-04): /* -1.1 (drh 11-Apr-01): ** Each in-memory image of a page begins with the following header. -1.8 (drh 02-Jun-01): ** This header is only visible to this pager module. The client -1.8 (drh 02-Jun-01): ** code that calls pager sees only the data that follows the header. -1.95 (drh 08-Feb-04): ** -1.292 (danielk1 19-Mar-07): ** Client code should call sqlite3PagerWrite() on a page prior to making -1.292 (danielk1 19-Mar-07): ** any modifications to that page. The first time sqlite3PagerWrite() -1.95 (drh 08-Feb-04): ** is called, the original page contents are written into the rollback -1.95 (drh 08-Feb-04): ** journal and PgHdr.inJournal and PgHdr.needSync are set. Later, once -1.95 (drh 08-Feb-04): ** the journal page has made it onto the disk surface, PgHdr.needSync -1.95 (drh 08-Feb-04): ** is cleared. The modified page cannot be written back into the original -1.95 (drh 08-Feb-04): ** database file until the journal pages has been synced to disk and the -1.95 (drh 08-Feb-04): ** PgHdr.needSync has been cleared. -1.95 (drh 08-Feb-04): ** -1.292 (danielk1 19-Mar-07): ** The PgHdr.dirty flag is set when sqlite3PagerWrite() is called and -1.95 (drh 08-Feb-04): ** is cleared again when the page content is written back to the original -1.95 (drh 08-Feb-04): ** database file. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** Details of important structure elements: -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** needSync -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** If this is true, this means that it is not safe to write the page -1.346 (drh 16-Jun-07): ** content to the database because the original content needed -1.346 (drh 16-Jun-07): ** for rollback has not by synced to the main rollback journal. -1.346 (drh 16-Jun-07): ** The original content may have been written to the rollback journal -1.346 (drh 16-Jun-07): ** but it has not yet been synced. So we cannot write to the database -1.346 (drh 16-Jun-07): ** file because power failure might cause the page in the journal file -1.346 (drh 16-Jun-07): ** to never reach the disk. It is as if the write to the journal file -1.346 (drh 16-Jun-07): ** does not occur until the journal file is synced. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** This flag is false if the page content exactly matches what -1.346 (drh 16-Jun-07): ** currently exists in the database file. The needSync flag is also -1.346 (drh 16-Jun-07): ** false if the original content has been written to the main rollback -1.346 (drh 16-Jun-07): ** journal and synced. If the page represents a new page that has -1.346 (drh 16-Jun-07): ** been added onto the end of the database during the current -1.346 (drh 16-Jun-07): ** transaction, the needSync flag is true until the original database -1.346 (drh 16-Jun-07): ** size in the journal header has been synced to disk. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** inJournal -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** This is true if the original page has been written into the main -1.346 (drh 16-Jun-07): ** rollback journal. This is always false for new pages added to -1.346 (drh 16-Jun-07): ** the end of the database file during the current transaction. -1.346 (drh 16-Jun-07): ** And this flag says nothing about whether or not the journal -1.346 (drh 16-Jun-07): ** has been synced to disk. For pages that are in the original -1.346 (drh 16-Jun-07): ** database file, the following expression should always be true: -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** inJournal = (pPager->aInJournal[(pgno-1)/8] & (1<<((pgno-1)%8))!=0 -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** The pPager->aInJournal[] array is only valid for the original -1.346 (drh 16-Jun-07): ** pages of the database, not new pages that are added to the end -1.346 (drh 16-Jun-07): ** of the database, so obviously the above expression cannot be -1.346 (drh 16-Jun-07): ** valid for new pages. For new pages inJournal is always 0. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** dirty -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** When true, this means that the content of the page has been -1.346 (drh 16-Jun-07): ** modified and needs to be written back to the database file. -1.346 (drh 16-Jun-07): ** If false, it means that either the content of the page is -1.346 (drh 16-Jun-07): ** unchanged or else the content is unimportant and we do not -1.346 (drh 16-Jun-07): ** care whether or not it is preserved. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** alwaysRollback -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** This means that the sqlite3PagerDontRollback() API should be -1.346 (drh 16-Jun-07): ** ignored for this page. The DontRollback() API attempts to say -1.346 (drh 16-Jun-07): ** that the content of the page on disk is unimportant (it is an -1.346 (drh 16-Jun-07): ** unused page on the freelist) so that it is unnecessary to -1.346 (drh 16-Jun-07): ** rollback changes to this page because the content of the page -1.346 (drh 16-Jun-07): ** can change without changing the meaning of the database. This -1.346 (drh 16-Jun-07): ** flag overrides any DontRollback() attempt. This flag is set -1.346 (drh 16-Jun-07): ** when a page that originally contained valid data is added to -1.346 (drh 16-Jun-07): ** the freelist. Later in the same transaction, this page might -1.346 (drh 16-Jun-07): ** be pulled from the freelist and reused for something different -1.346 (drh 16-Jun-07): ** and at that point the DontRollback() API will be called because -1.346 (drh 16-Jun-07): ** pages taken from the freelist do not need to be protected by -1.346 (drh 16-Jun-07): ** the rollback journal. But this flag says that the page was -1.346 (drh 16-Jun-07): ** not originally part of the freelist so that it still needs to -1.346 (drh 16-Jun-07): ** be rolled back in spite of any subsequent DontRollback() calls. -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** needRead -1.346 (drh 16-Jun-07): ** -1.346 (drh 16-Jun-07): ** This flag means (when true) that the content of the page has -1.346 (drh 16-Jun-07): ** not yet been loaded from disk. The in-memory content is just -1.346 (drh 16-Jun-07): ** garbage. (Actually, we zero the content, but you should not -1.346 (drh 16-Jun-07): ** make any assumptions about the content nevertheless.) If the -1.346 (drh 16-Jun-07): ** content is needed in the future, it should be read from the -1.346 (drh 16-Jun-07): ** original database file. -1.1 (drh 11-Apr-01): */ -1.3 (drh 15-Apr-01): typedef struct PgHdr PgHdr; -1.1 (drh 11-Apr-01): struct PgHdr { -1.1 (drh 11-Apr-01): Pager *pPager; /* The pager to which this page belongs */ -1.1 (drh 11-Apr-01): Pgno pgno; /* The page number for this page */ -1.2 (drh 14-Apr-01): PgHdr *pNextHash, *pPrevHash; /* Hash collision chain for PgHdr.pgno */ -1.3 (drh 15-Apr-01): PgHdr *pNextFree, *pPrevFree; /* Freelist of pages where nRef==0 */ -1.107 (drh 12-May-04): PgHdr *pNextAll; /* A list of all pages */ -1.49 (drh 07-Jul-02): u8 inJournal; /* TRUE if has been written to journal */ -1.49 (drh 07-Jul-02): u8 dirty; /* TRUE if we need to write back changes */ -1.68 (drh 16-Jan-03): u8 needSync; /* Sync journal before writing this page */ -1.307 (drh 30-Mar-07): u8 alwaysRollback; /* Disable DontRollback() for this page */ -1.327 (drh 13-Apr-07): u8 needRead; /* Read content if PagerWrite() is called */ -1.107 (drh 12-May-04): short int nRef; /* Number of users of this page */ -1.269 (drh 15-Jun-06): PgHdr *pDirty, *pPrevDirty; /* Dirty pages */ -1.267 (drh 03-May-06): u32 notUsed; /* Buffer space */ -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): u32 pageHash; -1.189 (danielk1 15-Feb-05): #endif -1.203 (drh 20-May-05): /* pPager->pageSize bytes of page data follow this header */ -1.75 (drh 12-Feb-03): /* Pager.nExtra bytes of local data follow the page data */ -1.1 (drh 11-Apr-01): }; -1.1 (drh 11-Apr-01): -1.107 (drh 12-May-04): /* -1.107 (drh 12-May-04): ** For an in-memory only database, some extra information is recorded about -1.107 (drh 12-May-04): ** each page so that changes can be rolled back. (Journal files are not -1.107 (drh 12-May-04): ** used for in-memory databases.) The following information is added to -1.107 (drh 12-May-04): ** the end of every EXTRA block for in-memory databases. -1.107 (drh 12-May-04): ** -1.107 (drh 12-May-04): ** This information could have been added directly to the PgHdr structure. -1.107 (drh 12-May-04): ** But then it would take up an extra 8 bytes of storage on every PgHdr -1.107 (drh 12-May-04): ** even for disk-based databases. Splitting it out saves 8 bytes. This -1.107 (drh 12-May-04): ** is only a savings of 0.8% but those percentages add up. -1.107 (drh 12-May-04): */ -1.107 (drh 12-May-04): typedef struct PgHistory PgHistory; -1.107 (drh 12-May-04): struct PgHistory { -1.107 (drh 12-May-04): u8 *pOrig; /* Original page text. Restore to this on a full rollback */ -1.107 (drh 12-May-04): u8 *pStmt; /* Text as it was at the beginning of the current statement */ -1.325 (danielk1 07-Apr-07): PgHdr *pNextStmt, *pPrevStmt; /* List of pages in the statement journal */ -1.325 (danielk1 07-Apr-07): u8 inStmt; /* TRUE if in the statement subjournal */ -1.107 (drh 12-May-04): }; -1.99 (drh 11-Feb-04): -1.99 (drh 11-Feb-04): /* -1.99 (drh 11-Feb-04): ** A macro used for invoking the codec if there is one -1.99 (drh 11-Feb-04): */ -1.99 (drh 11-Feb-04): #ifdef SQLITE_HAS_CODEC -1.261 (drh 06-Mar-06): # define CODEC1(P,D,N,X) if( P->xCodec!=0 ){ P->xCodec(P->pCodecArg,D,N,X); } -1.261 (drh 06-Mar-06): # define CODEC2(P,D,N,X) ((char*)(P->xCodec!=0?P->xCodec(P->pCodecArg,D,N,X):D)) -1.99 (drh 11-Feb-04): #else -1.261 (drh 06-Mar-06): # define CODEC1(P,D,N,X) /* NO-OP */ -1.261 (drh 06-Mar-06): # define CODEC2(P,D,N,X) ((char*)D) -1.99 (drh 11-Feb-04): #endif -1.99 (drh 11-Feb-04): -1.1 (drh 11-Apr-01): /* -1.2 (drh 14-Apr-01): ** Convert a pointer to a PgHdr into a pointer to its data -1.2 (drh 14-Apr-01): ** and back again. -1.1 (drh 11-Apr-01): */ -1.1 (drh 11-Apr-01): #define PGHDR_TO_DATA(P) ((void*)(&(P)[1])) -1.1 (drh 11-Apr-01): #define DATA_TO_PGHDR(D) (&((PgHdr*)(D))[-1]) -1.203 (drh 20-May-05): #define PGHDR_TO_EXTRA(G,P) ((void*)&((char*)(&(G)[1]))[(P)->pageSize]) -1.107 (drh 12-May-04): #define PGHDR_TO_HIST(P,PGR) \ -1.203 (drh 20-May-05): ((PgHistory*)&((char*)(&(P)[1]))[(PGR)->pageSize+(PGR)->nExtra]) -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.1 (drh 11-Apr-01): ** A open page cache is an instance of the following structure. -1.238 (danielk1 16-Jan-06): ** -1.311 (drh 30-Mar-07): ** Pager.errCode may be set to SQLITE_IOERR, SQLITE_CORRUPT, or -1.238 (danielk1 16-Jan-06): ** or SQLITE_FULL. Once one of the first three errors occurs, it persists -1.238 (danielk1 16-Jan-06): ** and is returned as the result of every major pager API call. The -1.238 (danielk1 16-Jan-06): ** SQLITE_FULL return code is slightly different. It persists only until the -1.238 (danielk1 16-Jan-06): ** next successful rollback is performed on the pager cache. Also, -1.292 (danielk1 19-Mar-07): ** SQLITE_FULL does not affect the sqlite3PagerGet() and sqlite3PagerLookup() -1.238 (danielk1 16-Jan-06): ** APIs, they may still be used successfully. -1.1 (drh 11-Apr-01): */ -1.1 (drh 11-Apr-01): struct Pager { -1.42 (drh 05-Mar-02): u8 journalOpen; /* True if journal file descriptors is valid */ -1.94 (drh 08-Feb-04): u8 journalStarted; /* True if header of journal is synced */ -1.94 (drh 08-Feb-04): u8 useJournal; /* Use a rollback journal on this file */ -1.188 (drh 06-Feb-05): u8 noReadlock; /* Do not bother to obtain readlocks */ -1.107 (drh 12-May-04): u8 stmtOpen; /* True if the statement subjournal is open */ -1.107 (drh 12-May-04): u8 stmtInUse; /* True we are in a statement subtransaction */ -1.107 (drh 12-May-04): u8 stmtAutoopen; /* Open stmt journal when main journal is opened*/ -1.42 (drh 05-Mar-02): u8 noSync; /* Do not sync the journal if true */ -1.73 (drh 11-Feb-03): u8 fullSync; /* Do extra syncs of the journal for robustness */ -1.258 (drh 11-Feb-06): u8 full_fsync; /* Use F_FULLFSYNC when available */ -1.115 (drh 09-Jun-04): u8 state; /* PAGER_UNLOCK, _SHARED, _RESERVED, etc. */ -1.42 (drh 05-Mar-02): u8 tempFile; /* zFilename is a temporary file */ -1.42 (drh 05-Mar-02): u8 readOnly; /* True for a read-only database */ -1.42 (drh 05-Mar-02): u8 needSync; /* True if an fsync() is needed on the journal */ -1.115 (drh 09-Jun-04): u8 dirtyCache; /* True if cached pages have changed */ -1.307 (drh 30-Mar-07): u8 alwaysRollback; /* Disable DontRollback() for all pages */ -1.107 (drh 12-May-04): u8 memDb; /* True to inhibit all file I/O */ -1.204 (drh 20-May-05): u8 setMaster; /* True if a m-j name has been written to jrnl */ -1.307 (drh 30-Mar-07): u8 doNotSync; /* Boolean. While true, do not spill the cache */ -1.307 (drh 30-Mar-07): u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ -1.307 (drh 30-Mar-07): u8 changeCountDone; /* Set after incrementing the change-counter */ -1.273 (drh 15-Sep-06): int errCode; /* One of several kinds of errors */ -1.205 (drh 21-May-05): int dbSize; /* Number of pages in the file */ -1.205 (drh 21-May-05): int origDbSize; /* dbSize before the current change */ -1.205 (drh 21-May-05): int stmtSize; /* Size of database (in pages) at stmt_begin() */ -1.205 (drh 21-May-05): int nRec; /* Number of pages written to the journal */ -1.205 (drh 21-May-05): u32 cksumInit; /* Quasi-random value added to every checksum */ -1.205 (drh 21-May-05): int stmtNRec; /* Number of records in stmt subjournal */ -1.205 (drh 21-May-05): int nExtra; /* Add this many bytes to each in-memory page */ -1.205 (drh 21-May-05): int pageSize; /* Number of bytes in a page */ -1.205 (drh 21-May-05): int nPage; /* Total number of in-memory pages */ -1.205 (drh 21-May-05): int nRef; /* Number of in-memory pages with PgHdr.nRef>0 */ -1.205 (drh 21-May-05): int mxPage; /* Maximum number of pages to hold in cache */ -1.337 (drh 08-May-07): Pgno mxPgno; /* Maximum allowed size of the database */ -1.42 (drh 05-Mar-02): u8 *aInJournal; /* One bit for each page in the database file */ -1.107 (drh 12-May-04): u8 *aInStmt; /* One bit for each page in the database */ -1.205 (drh 21-May-05): char *zFilename; /* Name of the database file */ -1.205 (drh 21-May-05): char *zJournal; /* Name of the journal file */ -1.205 (drh 21-May-05): char *zDirectory; /* Directory hold database and journal files */ -1.221 (drh 29-Nov-05): OsFile *fd, *jfd; /* File descriptors for database and journal */ -1.221 (drh 29-Nov-05): OsFile *stfd; /* File descriptor for the statement subjournal*/ -1.123 (drh 10-Jun-04): BusyHandler *pBusyHandler; /* Pointer to sqlite.busyHandler */ -1.1 (drh 11-Apr-01): PgHdr *pFirst, *pLast; /* List of free pages */ -1.69 (drh 21-Jan-03): PgHdr *pFirstSynced; /* First free page with PgHdr.needSync==0 */ -1.3 (drh 15-Apr-01): PgHdr *pAll; /* List of all pages */ -1.107 (drh 12-May-04): PgHdr *pStmt; /* List of pages in the statement subjournal */ -1.267 (drh 03-May-06): PgHdr *pDirty; /* List of all dirty pages */ -1.165 (drh 01-Oct-04): i64 journalOff; /* Current byte offset in the journal file */ -1.165 (drh 01-Oct-04): i64 journalHdr; /* Byte offset to previous journal header */ -1.165 (drh 01-Oct-04): i64 stmtHdrOff; /* First journal header written this statement */ -1.165 (drh 01-Oct-04): i64 stmtCksum; /* cksumInit when statement was started */ -1.204 (drh 20-May-05): i64 stmtJSize; /* Size of journal at stmt_begin() */ -1.138 (danielk1 25-Jun-04): int sectorSize; /* Assumed sector size during rollback */ -1.205 (drh 21-May-05): #ifdef SQLITE_TEST -1.319 (drh 05-Apr-07): int nHit, nMiss; /* Cache hits and missing */ -1.319 (drh 05-Apr-07): int nRead, nWrite; /* Database pages read/written */ -1.205 (drh 21-May-05): #endif -1.292 (danielk1 19-Mar-07): void (*xDestructor)(DbPage*,int); /* Call this routine when freeing pages */ -1.292 (danielk1 19-Mar-07): void (*xReiniter)(DbPage*,int); /* Call this routine when reloading pages */ -1.319 (drh 05-Apr-07): #ifdef SQLITE_HAS_CODEC -1.261 (drh 06-Mar-06): void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ -1.204 (drh 20-May-05): void *pCodecArg; /* First argument to xCodec() */ -1.319 (drh 05-Apr-07): #endif -1.268 (drh 07-May-06): int nHash; /* Size of the pager hash table */ -1.268 (drh 07-May-06): PgHdr **aHash; /* Hash table to map page number to PgHdr */ -1.236 (drh 11-Jan-06): #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -1.225 (danielk1 18-Dec-05): Pager *pNext; /* Linked list of pagers in this thread */ -1.225 (danielk1 18-Dec-05): #endif -1.286 (danielk1 06-Mar-07): char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ -1.329 (drh 16-Apr-07): char dbFileVers[16]; /* Changes whenever database file changes */ -1.3 (drh 15-Apr-01): }; -1.3 (drh 15-Apr-01): -1.3 (drh 15-Apr-01): /* -1.327 (drh 13-Apr-07): ** The following global variables hold counters used for -1.327 (drh 13-Apr-07): ** testing purposes only. These variables do not exist in -1.327 (drh 13-Apr-07): ** a non-testing build. These variables are not thread-safe. -1.205 (drh 21-May-05): */ -1.205 (drh 21-May-05): #ifdef SQLITE_TEST -1.327 (drh 13-Apr-07): int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ -1.327 (drh 13-Apr-07): int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ -1.327 (drh 13-Apr-07): int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ -1.327 (drh 13-Apr-07): int sqlite3_pager_pgfree_count = 0; /* Number of cache pages freed */ -1.327 (drh 13-Apr-07): # define PAGER_INCR(v) v++ -1.205 (drh 21-May-05): #else -1.327 (drh 13-Apr-07): # define PAGER_INCR(v) -1.205 (drh 21-May-05): #endif -1.205 (drh 21-May-05): -1.327 (drh 13-Apr-07): -1.327 (drh 13-Apr-07): -1.205 (drh 21-May-05): /* -1.14 (drh 13-Sep-01): ** Journal files begin with the following magic string. The data -1.14 (drh 13-Sep-01): ** was obtained from /dev/random. It is used only as a sanity check. -1.50 (drh 12-Aug-02): ** -1.116 (drh 09-Jun-04): ** Since version 2.8.0, the journal format contains additional sanity -1.116 (drh 09-Jun-04): ** checking information. If the power fails while the journal is begin -1.116 (drh 09-Jun-04): ** written, semi-random garbage data might appear in the journal -1.116 (drh 09-Jun-04): ** file after power is restored. If an attempt is then made -1.73 (drh 11-Feb-03): ** to roll the journal back, the database could be corrupted. The additional -1.73 (drh 11-Feb-03): ** sanity checking data is an attempt to discover the garbage in the -1.73 (drh 11-Feb-03): ** journal and ignore it. -1.73 (drh 11-Feb-03): ** -1.116 (drh 09-Jun-04): ** The sanity checking information for the new journal format consists -1.73 (drh 11-Feb-03): ** of a 32-bit checksum on each page of data. The checksum covers both -1.152 (drh 22-Jul-04): ** the page number and the pPager->pageSize bytes of data for the page. -1.73 (drh 11-Feb-03): ** This cksum is initialized to a 32-bit random value that appears in the -1.73 (drh 11-Feb-03): ** journal file right after the header. The random initializer is important, -1.73 (drh 11-Feb-03): ** because garbage data that appears at the end of a journal is likely -1.73 (drh 11-Feb-03): ** data that was once in other files that have now been deleted. If the -1.73 (drh 11-Feb-03): ** garbage data came from an obsolete journal file, the checksums might -1.73 (drh 11-Feb-03): ** be correct. But by initializing the checksum to random value which -1.73 (drh 11-Feb-03): ** is different for every journal, we minimize that risk. -1.3 (drh 15-Apr-01): */ -1.116 (drh 09-Jun-04): static const unsigned char aJournalMagic[] = { -1.116 (drh 09-Jun-04): 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, -1.1 (drh 11-Apr-01): }; -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.123 (drh 10-Jun-04): ** The size of the header and of each page in the journal is determined -1.123 (drh 10-Jun-04): ** by the following macros. -1.73 (drh 11-Feb-03): */ -1.116 (drh 09-Jun-04): #define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) -1.73 (drh 11-Feb-03): -1.138 (danielk1 25-Jun-04): /* -1.138 (danielk1 25-Jun-04): ** The journal header size for this pager. In the future, this could be -1.138 (danielk1 25-Jun-04): ** set to some value read from the disk controller. The important -1.138 (danielk1 25-Jun-04): ** characteristic is that it is the same size as a disk sector. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): #define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) -1.138 (danielk1 25-Jun-04): -1.169 (drh 31-Oct-04): /* -1.169 (drh 31-Oct-04): ** The macro MEMDB is true if we are dealing with an in-memory database. -1.169 (drh 31-Oct-04): ** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, -1.169 (drh 31-Oct-04): ** the value of MEMDB will be a constant and the compiler will optimize -1.169 (drh 31-Oct-04): ** out code that would never execute. -1.169 (drh 31-Oct-04): */ -1.169 (drh 31-Oct-04): #ifdef SQLITE_OMIT_MEMORYDB -1.169 (drh 31-Oct-04): # define MEMDB 0 -1.169 (drh 31-Oct-04): #else -1.169 (drh 31-Oct-04): # define MEMDB pPager->memDb -1.169 (drh 31-Oct-04): #endif -1.169 (drh 31-Oct-04): -1.169 (drh 31-Oct-04): /* -1.138 (danielk1 25-Jun-04): ** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is -1.138 (danielk1 25-Jun-04): ** reserved for working around a windows/posix incompatibility). It is -1.138 (danielk1 25-Jun-04): ** used in the journal to signify that the remainder of the journal file -1.138 (danielk1 25-Jun-04): ** is devoted to storing a master journal name - there are no more pages to -1.138 (danielk1 25-Jun-04): ** roll back. See comments for function writeMasterJournal() for details. -1.138 (danielk1 25-Jun-04): */ -1.175 (danielk1 08-Nov-04): /* #define PAGER_MJ_PGNO(x) (PENDING_BYTE/((x)->pageSize)) */ -1.175 (danielk1 08-Nov-04): #define PAGER_MJ_PGNO(x) ((PENDING_BYTE/((x)->pageSize))+1) -1.110 (danielk1 03-Jun-04): -1.73 (drh 11-Feb-03): /* -1.183 (danielk1 17-Jan-05): ** The maximum legal page number is (2^31 - 1). -1.183 (danielk1 17-Jan-05): */ -1.183 (danielk1 17-Jan-05): #define PAGER_MAX_PGNO 2147483647 -1.183 (danielk1 17-Jan-05): -1.183 (danielk1 17-Jan-05): /* -1.123 (drh 10-Jun-04): ** Enable reference count tracking (for debugging) here: -1.12 (drh 28-Jun-01): */ -1.319 (drh 05-Apr-07): #ifdef SQLITE_DEBUG -1.102 (drh 26-Apr-04): int pager3_refinfo_enable = 0; -1.12 (drh 28-Jun-01): static void pager_refinfo(PgHdr *p){ -1.12 (drh 28-Jun-01): static int cnt = 0; -1.102 (drh 26-Apr-04): if( !pager3_refinfo_enable ) return; -1.163 (drh 08-Sep-04): sqlite3DebugPrintf( -1.282 (drh 05-Jan-07): "REFCNT: %4d addr=%p nRef=%-3d total=%d\n", -1.282 (drh 05-Jan-07): p->pgno, PGHDR_TO_DATA(p), p->nRef, p->pPager->nRef -1.12 (drh 28-Jun-01): ); -1.12 (drh 28-Jun-01): cnt++; /* Something to set a breakpoint on */ -1.12 (drh 28-Jun-01): } -1.12 (drh 28-Jun-01): # define REFINFO(X) pager_refinfo(X) -1.12 (drh 28-Jun-01): #else -1.12 (drh 28-Jun-01): # define REFINFO(X) -1.12 (drh 28-Jun-01): #endif -1.12 (drh 28-Jun-01): -1.325 (danielk1 07-Apr-07): /* -1.325 (danielk1 07-Apr-07): ** Return true if page *pPg has already been written to the statement -1.325 (danielk1 07-Apr-07): ** journal (or statement snapshot has been created, if *pPg is part -1.325 (danielk1 07-Apr-07): ** of an in-memory database). -1.325 (danielk1 07-Apr-07): */ -1.325 (danielk1 07-Apr-07): static int pageInStatement(PgHdr *pPg){ -1.325 (danielk1 07-Apr-07): Pager *pPager = pPg->pPager; -1.325 (danielk1 07-Apr-07): if( MEMDB ){ -1.325 (danielk1 07-Apr-07): return PGHDR_TO_HIST(pPg, pPager)->inStmt; -1.325 (danielk1 07-Apr-07): }else{ -1.325 (danielk1 07-Apr-07): Pgno pgno = pPg->pgno; -1.325 (danielk1 07-Apr-07): u8 *a = pPager->aInStmt; -1.325 (danielk1 07-Apr-07): return (a && (int)pgno<=pPager->stmtSize && (a[pgno/8] & (1<<(pgno&7)))); -1.325 (danielk1 07-Apr-07): } -1.325 (danielk1 07-Apr-07): } -1.268 (drh 07-May-06): -1.268 (drh 07-May-06): /* -1.268 (drh 07-May-06): ** Change the size of the pager hash table to N. N must be a power -1.268 (drh 07-May-06): ** of two. -1.268 (drh 07-May-06): */ -1.268 (drh 07-May-06): static void pager_resize_hash_table(Pager *pPager, int N){ -1.268 (drh 07-May-06): PgHdr **aHash, *pPg; -1.268 (drh 07-May-06): assert( N>0 && (N&(N-1))==0 ); -1.268 (drh 07-May-06): aHash = sqliteMalloc( sizeof(aHash[0])*N ); -1.268 (drh 07-May-06): if( aHash==0 ){ -1.268 (drh 07-May-06): /* Failure to rehash is not an error. It is only a performance hit. */ -1.268 (drh 07-May-06): return; -1.268 (drh 07-May-06): } -1.268 (drh 07-May-06): sqliteFree(pPager->aHash); -1.268 (drh 07-May-06): pPager->nHash = N; -1.268 (drh 07-May-06): pPager->aHash = aHash; -1.268 (drh 07-May-06): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.270 (drh 28-Jun-06): int h; -1.270 (drh 28-Jun-06): if( pPg->pgno==0 ){ -1.270 (drh 28-Jun-06): assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); -1.270 (drh 28-Jun-06): continue; -1.270 (drh 28-Jun-06): } -1.270 (drh 28-Jun-06): h = pPg->pgno & (N-1); -1.268 (drh 07-May-06): pPg->pNextHash = aHash[h]; -1.268 (drh 07-May-06): if( aHash[h] ){ -1.268 (drh 07-May-06): aHash[h]->pPrevHash = pPg; -1.268 (drh 07-May-06): } -1.268 (drh 07-May-06): aHash[h] = pPg; -1.268 (drh 07-May-06): pPg->pPrevHash = 0; -1.268 (drh 07-May-06): } -1.268 (drh 07-May-06): } -1.268 (drh 07-May-06): -1.12 (drh 28-Jun-01): /* -1.94 (drh 08-Feb-04): ** Read a 32-bit integer from the given file descriptor. Store the integer -1.94 (drh 08-Feb-04): ** that is read in *pRes. Return SQLITE_OK if everything worked, or an -1.94 (drh 08-Feb-04): ** error code is something goes wrong. -1.123 (drh 10-Jun-04): ** -1.123 (drh 10-Jun-04): ** All values are stored on disk as big-endian. -1.50 (drh 12-Aug-02): */ -1.116 (drh 09-Jun-04): static int read32bits(OsFile *fd, u32 *pRes){ -1.237 (drh 15-Jan-06): unsigned char ac[4]; -1.237 (drh 15-Jan-06): int rc = sqlite3OsRead(fd, ac, sizeof(ac)); -1.116 (drh 09-Jun-04): if( rc==SQLITE_OK ){ -1.336 (drh 05-May-07): *pRes = sqlite3Get4byte(ac); -1.50 (drh 12-Aug-02): } -1.50 (drh 12-Aug-02): return rc; -1.50 (drh 12-Aug-02): } -1.50 (drh 12-Aug-02): -1.50 (drh 12-Aug-02): /* -1.235 (drh 10-Jan-06): ** Write a 32-bit integer into a string buffer in big-endian byte order. -1.235 (drh 10-Jan-06): */ -1.336 (drh 05-May-07): #define put32bits(A,B) sqlite3Put4byte((u8*)A,B) -1.235 (drh 10-Jan-06): -1.235 (drh 10-Jan-06): /* -1.94 (drh 08-Feb-04): ** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK -1.94 (drh 08-Feb-04): ** on success or an error code is something goes wrong. -1.50 (drh 12-Aug-02): */ -1.50 (drh 12-Aug-02): static int write32bits(OsFile *fd, u32 val){ -1.240 (danielk1 16-Jan-06): char ac[4]; -1.235 (drh 10-Jan-06): put32bits(ac, val); -1.222 (drh 30-Nov-05): return sqlite3OsWrite(fd, ac, 4); -1.50 (drh 12-Aug-02): } -1.50 (drh 12-Aug-02): -1.70 (drh 22-Jan-03): /* -1.229 (danielk1 30-Dec-05): ** This function should be called when an error occurs within the pager -1.239 (danielk1 16-Jan-06): ** code. The first argument is a pointer to the pager structure, the -1.239 (danielk1 16-Jan-06): ** second the error-code about to be returned by a pager API function. -1.239 (danielk1 16-Jan-06): ** The value returned is a copy of the second argument to this function. -1.239 (danielk1 16-Jan-06): ** -1.311 (drh 30-Mar-07): ** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL -1.239 (danielk1 16-Jan-06): ** the error becomes persistent. All subsequent API calls on this Pager -1.239 (danielk1 16-Jan-06): ** will immediately return the same error code. -1.229 (danielk1 30-Dec-05): */ -1.229 (danielk1 30-Dec-05): static int pager_error(Pager *pPager, int rc){ -1.272 (drh 15-Sep-06): int rc2 = rc & 0xff; -1.238 (danielk1 16-Jan-06): assert( pPager->errCode==SQLITE_FULL || pPager->errCode==SQLITE_OK ); -1.302 (danielk1 27-Mar-07): if( -1.272 (drh 15-Sep-06): rc2==SQLITE_FULL || -1.272 (drh 15-Sep-06): rc2==SQLITE_IOERR || -1.311 (drh 30-Mar-07): rc2==SQLITE_CORRUPT -1.238 (danielk1 16-Jan-06): ){ -1.238 (danielk1 16-Jan-06): pPager->errCode = rc; -1.229 (danielk1 30-Dec-05): } -1.229 (danielk1 30-Dec-05): return rc; -1.229 (danielk1 30-Dec-05): } -1.229 (danielk1 30-Dec-05): -1.344 (drh 16-Jun-07): /* -1.344 (drh 16-Jun-07): ** If SQLITE_CHECK_PAGES is defined then we do some sanity checking -1.344 (drh 16-Jun-07): ** on the cache using a hash function. This is used for testing -1.344 (drh 16-Jun-07): ** and debugging only. -1.344 (drh 16-Jun-07): */ -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): /* -1.189 (danielk1 15-Feb-05): ** Return a 32-bit hash of the page data for pPage. -1.189 (danielk1 15-Feb-05): */ -1.344 (drh 16-Jun-07): static u32 pager_datahash(int nByte, unsigned char *pData){ -1.189 (danielk1 15-Feb-05): u32 hash = 0; -1.189 (danielk1 15-Feb-05): int i; -1.344 (drh 16-Jun-07): for(i=0; ipPager->pageSize, -1.344 (drh 16-Jun-07): (unsigned char *)PGHDR_TO_DATA(pPage)); -1.344 (drh 16-Jun-07): } -1.189 (danielk1 15-Feb-05): -1.189 (danielk1 15-Feb-05): /* -1.189 (danielk1 15-Feb-05): ** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): ** is defined, and NDEBUG is not defined, an assert() statement checks -1.189 (danielk1 15-Feb-05): ** that the page is either dirty or still matches the calculated page-hash. -1.189 (danielk1 15-Feb-05): */ -1.189 (danielk1 15-Feb-05): #define CHECK_PAGE(x) checkPage(x) -1.189 (danielk1 15-Feb-05): static void checkPage(PgHdr *pPg){ -1.189 (danielk1 15-Feb-05): Pager *pPager = pPg->pPager; -1.238 (danielk1 16-Jan-06): assert( !pPg->pageHash || pPager->errCode || MEMDB || pPg->dirty || -1.189 (danielk1 15-Feb-05): pPg->pageHash==pager_pagehash(pPg) ); -1.189 (danielk1 15-Feb-05): } -1.189 (danielk1 15-Feb-05): -1.189 (danielk1 15-Feb-05): #else -1.348 (drh 18-Jun-07): #define pager_datahash(X,Y) 0 -1.344 (drh 16-Jun-07): #define pager_pagehash(X) 0 -1.189 (danielk1 15-Feb-05): #define CHECK_PAGE(x) -1.189 (danielk1 15-Feb-05): #endif -1.189 (danielk1 15-Feb-05): -1.1 (drh 11-Apr-01): /* -1.138 (danielk1 25-Jun-04): ** When this is called the journal file for pager pPager must be open. -1.138 (danielk1 25-Jun-04): ** The master journal file name is read from the end of the file and -1.138 (danielk1 25-Jun-04): ** written into memory obtained from sqliteMalloc(). *pzMaster is -1.138 (danielk1 25-Jun-04): ** set to point at the memory and SQLITE_OK returned. The caller must -1.138 (danielk1 25-Jun-04): ** sqliteFree() *pzMaster. -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** If no master journal file name is present *pzMaster is set to 0 and -1.138 (danielk1 25-Jun-04): ** SQLITE_OK returned. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): static int readMasterJournal(OsFile *pJrnl, char **pzMaster){ -1.138 (danielk1 25-Jun-04): int rc; -1.138 (danielk1 25-Jun-04): u32 len; -1.165 (drh 01-Oct-04): i64 szJ; -1.146 (danielk1 28-Jun-04): u32 cksum; -1.142 (danielk1 25-Jun-04): int i; -1.138 (danielk1 25-Jun-04): unsigned char aMagic[8]; /* A buffer to hold the magic header */ -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): *pzMaster = 0; -1.138 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(pJrnl, &szJ); -1.142 (danielk1 25-Jun-04): if( rc!=SQLITE_OK || szJ<16 ) return rc; -1.138 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pJrnl, szJ-16); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): rc = read32bits(pJrnl, &len); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): -1.142 (danielk1 25-Jun-04): rc = read32bits(pJrnl, &cksum); -1.142 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.142 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsRead(pJrnl, aMagic, 8); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK || memcmp(aMagic, aJournalMagic, 8) ) return rc; -1.138 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pJrnl, szJ-16-len); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): -1.147 (danielk1 28-Jun-04): *pzMaster = (char *)sqliteMalloc(len+1); -1.138 (danielk1 25-Jun-04): if( !*pzMaster ){ -1.138 (danielk1 25-Jun-04): return SQLITE_NOMEM; -1.138 (danielk1 25-Jun-04): } -1.222 (drh 30-Nov-05): rc = sqlite3OsRead(pJrnl, *pzMaster, len); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ){ -1.138 (danielk1 25-Jun-04): sqliteFree(*pzMaster); -1.138 (danielk1 25-Jun-04): *pzMaster = 0; -1.138 (danielk1 25-Jun-04): return rc; -1.138 (danielk1 25-Jun-04): } -1.142 (danielk1 25-Jun-04): -1.142 (danielk1 25-Jun-04): /* See if the checksum matches the master journal name */ -1.142 (danielk1 25-Jun-04): for(i=0; ijournalOff; -1.138 (danielk1 25-Jun-04): if( c ){ -1.138 (danielk1 25-Jun-04): offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); -1.138 (danielk1 25-Jun-04): assert( offset>=c ); -1.138 (danielk1 25-Jun-04): assert( (offset-c)journalOff = offset; -1.222 (drh 30-Nov-05): return sqlite3OsSeek(pPager->jfd, pPager->journalOff); -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* -1.138 (danielk1 25-Jun-04): ** The journal file must be open when this routine is called. A journal -1.138 (danielk1 25-Jun-04): ** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the -1.138 (danielk1 25-Jun-04): ** current location. -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** The format for the journal header is as follows: -1.138 (danielk1 25-Jun-04): ** - 8 bytes: Magic identifying journal format. -1.138 (danielk1 25-Jun-04): ** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. -1.138 (danielk1 25-Jun-04): ** - 4 bytes: Random number used for page hash. -1.138 (danielk1 25-Jun-04): ** - 4 bytes: Initial database page count. -1.138 (danielk1 25-Jun-04): ** - 4 bytes: Sector size used by the process that wrote this journal. -1.138 (danielk1 25-Jun-04): ** -1.140 (danielk1 25-Jun-04): ** Followed by (JOURNAL_HDR_SZ - 24) bytes of unused space. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): static int writeJournalHdr(Pager *pPager){ -1.235 (drh 10-Jan-06): char zHeader[sizeof(aJournalMagic)+16]; -1.290 (danielk1 19-Mar-07): int rc; -1.138 (danielk1 25-Jun-04): -1.290 (danielk1 19-Mar-07): if( pPager->stmtHdrOff==0 ){ -1.290 (danielk1 19-Mar-07): pPager->stmtHdrOff = pPager->journalOff; -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): rc = seekJournalHdr(pPager); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): pPager->journalHdr = pPager->journalOff; -1.138 (danielk1 25-Jun-04): pPager->journalOff += JOURNAL_HDR_SZ(pPager); -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* FIX ME: -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** Possibly for a pager not in no-sync mode, the journal magic should not -1.138 (danielk1 25-Jun-04): ** be written until nRec is filled in as part of next syncJournal(). -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** Actually maybe the whole journal header should be delayed until that -1.138 (danielk1 25-Jun-04): ** point. Think about this. -1.138 (danielk1 25-Jun-04): */ -1.235 (drh 10-Jan-06): memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); -1.235 (drh 10-Jan-06): /* The nRec Field. 0xFFFFFFFF for no-sync journals. */ -1.235 (drh 10-Jan-06): put32bits(&zHeader[sizeof(aJournalMagic)], pPager->noSync ? 0xffffffff : 0); -1.235 (drh 10-Jan-06): /* The random check-hash initialiser */ -1.235 (drh 10-Jan-06): sqlite3Randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); -1.235 (drh 10-Jan-06): put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); -1.235 (drh 10-Jan-06): /* The initial database size */ -1.235 (drh 10-Jan-06): put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbSize); -1.235 (drh 10-Jan-06): /* The assumed sector size for this process */ -1.235 (drh 10-Jan-06): put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); -1.283 (drh 28-Feb-07): IOTRACE(("JHDR %p %lld %d\n", pPager, pPager->journalHdr, sizeof(zHeader))) -1.235 (drh 10-Jan-06): rc = sqlite3OsWrite(pPager->jfd, zHeader, sizeof(zHeader)); -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* The journal header has been written successfully. Seek the journal -1.138 (danielk1 25-Jun-04): ** file descriptor to the end of the journal header sector. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): if( rc==SQLITE_OK ){ -1.283 (drh 28-Feb-07): IOTRACE(("JTAIL %p %lld\n", pPager, pPager->journalOff-1)) -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff-1); -1.212 (drh 09-Sep-05): if( rc==SQLITE_OK ){ -1.222 (drh 30-Nov-05): rc = sqlite3OsWrite(pPager->jfd, "\000", 1); -1.212 (drh 09-Sep-05): } -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): return rc; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* -1.138 (danielk1 25-Jun-04): ** The journal file must be open when this is called. A journal header file -1.138 (danielk1 25-Jun-04): ** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal -1.138 (danielk1 25-Jun-04): ** file. See comments above function writeJournalHdr() for a description of -1.138 (danielk1 25-Jun-04): ** the journal header format. -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** If the header is read successfully, *nRec is set to the number of -1.138 (danielk1 25-Jun-04): ** page records following this header and *dbSize is set to the size of the -1.138 (danielk1 25-Jun-04): ** database before the transaction began, in pages. Also, pPager->cksumInit -1.138 (danielk1 25-Jun-04): ** is set to the value read from the journal header. SQLITE_OK is returned -1.138 (danielk1 25-Jun-04): ** in this case. -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** If the journal header file appears to be corrupted, SQLITE_DONE is -1.138 (danielk1 25-Jun-04): ** returned and *nRec and *dbSize are not set. If JOURNAL_HDR_SZ bytes -1.138 (danielk1 25-Jun-04): ** cannot be read from the journal file an error code is returned. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): static int readJournalHdr( -1.138 (danielk1 25-Jun-04): Pager *pPager, -1.165 (drh 01-Oct-04): i64 journalSize, -1.138 (danielk1 25-Jun-04): u32 *pNRec, -1.138 (danielk1 25-Jun-04): u32 *pDbSize -1.138 (danielk1 25-Jun-04): ){ -1.138 (danielk1 25-Jun-04): int rc; -1.138 (danielk1 25-Jun-04): unsigned char aMagic[8]; /* A buffer to hold the magic header */ -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): rc = seekJournalHdr(pPager); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ -1.138 (danielk1 25-Jun-04): return SQLITE_DONE; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic)); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ -1.138 (danielk1 25-Jun-04): return SQLITE_DONE; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.221 (drh 29-Nov-05): rc = read32bits(pPager->jfd, pNRec); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.221 (drh 29-Nov-05): rc = read32bits(pPager->jfd, &pPager->cksumInit); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.221 (drh 29-Nov-05): rc = read32bits(pPager->jfd, pDbSize); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* Update the assumed sector-size to match the value used by -1.138 (danielk1 25-Jun-04): ** the process that created this journal. If this journal was -1.138 (danielk1 25-Jun-04): ** created by a process other than this one, then this routine -1.138 (danielk1 25-Jun-04): ** is being called from within pager_playback(). The local value -1.138 (danielk1 25-Jun-04): ** of Pager.sectorSize is restored at the end of that routine. -1.138 (danielk1 25-Jun-04): */ -1.221 (drh 29-Nov-05): rc = read32bits(pPager->jfd, (u32 *)&pPager->sectorSize); -1.138 (danielk1 25-Jun-04): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): pPager->journalOff += JOURNAL_HDR_SZ(pPager); -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); -1.138 (danielk1 25-Jun-04): return rc; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* -1.138 (danielk1 25-Jun-04): ** Write the supplied master journal name into the journal file for pager -1.142 (danielk1 25-Jun-04): ** pPager at the current location. The master journal name must be the last -1.142 (danielk1 25-Jun-04): ** thing written to a journal file. If the pager is in full-sync mode, the -1.142 (danielk1 25-Jun-04): ** journal file descriptor is advanced to the next sector boundary before -1.142 (danielk1 25-Jun-04): ** anything is written. The format is: -1.142 (danielk1 25-Jun-04): ** -1.142 (danielk1 25-Jun-04): ** + 4 bytes: PAGER_MJ_PGNO. -1.142 (danielk1 25-Jun-04): ** + N bytes: length of master journal name. -1.142 (danielk1 25-Jun-04): ** + 4 bytes: N -1.142 (danielk1 25-Jun-04): ** + 4 bytes: Master journal name checksum. -1.142 (danielk1 25-Jun-04): ** + 8 bytes: aJournalMagic[]. -1.142 (danielk1 25-Jun-04): ** -1.142 (danielk1 25-Jun-04): ** The master journal page checksum is the sum of the bytes in the master -1.142 (danielk1 25-Jun-04): ** journal name. -1.229 (danielk1 30-Dec-05): ** -1.229 (danielk1 30-Dec-05): ** If zMaster is a NULL pointer (occurs for a single database transaction), -1.229 (danielk1 30-Dec-05): ** this call is a no-op. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): static int writeMasterJournal(Pager *pPager, const char *zMaster){ -1.138 (danielk1 25-Jun-04): int rc; -1.138 (danielk1 25-Jun-04): int len; -1.142 (danielk1 25-Jun-04): int i; -1.235 (drh 10-Jan-06): u32 cksum = 0; -1.235 (drh 10-Jan-06): char zBuf[sizeof(aJournalMagic)+2*4]; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): if( !zMaster || pPager->setMaster) return SQLITE_OK; -1.138 (danielk1 25-Jun-04): pPager->setMaster = 1; -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): len = strlen(zMaster); -1.142 (danielk1 25-Jun-04): for(i=0; ifullSync ){ -1.138 (danielk1 25-Jun-04): rc = seekJournalHdr(pPager); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): } -1.142 (danielk1 25-Jun-04): pPager->journalOff += (len+20); -1.138 (danielk1 25-Jun-04): -1.221 (drh 29-Nov-05): rc = write32bits(pPager->jfd, PAGER_MJ_PGNO(pPager)); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsWrite(pPager->jfd, zMaster, len); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): -1.235 (drh 10-Jan-06): put32bits(zBuf, len); -1.235 (drh 10-Jan-06): put32bits(&zBuf[4], cksum); -1.235 (drh 10-Jan-06): memcpy(&zBuf[8], aJournalMagic, sizeof(aJournalMagic)); -1.235 (drh 10-Jan-06): rc = sqlite3OsWrite(pPager->jfd, zBuf, 8+sizeof(aJournalMagic)); -1.210 (drh 27-Aug-05): pPager->needSync = !pPager->noSync; -1.138 (danielk1 25-Jun-04): return rc; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* -1.57 (drh 10-Nov-02): ** Add or remove a page from the list of all pages that are in the -1.107 (drh 12-May-04): ** statement journal. -1.57 (drh 10-Nov-02): ** -1.57 (drh 10-Nov-02): ** The Pager keeps a separate list of pages that are currently in -1.292 (danielk1 19-Mar-07): ** the statement journal. This helps the sqlite3PagerStmtCommit() -1.57 (drh 10-Nov-02): ** routine run MUCH faster for the common case where there are many -1.107 (drh 12-May-04): ** pages in memory but only a few are in the statement journal. -1.57 (drh 10-Nov-02): */ -1.102 (drh 26-Apr-04): static void page_add_to_stmt_list(PgHdr *pPg){ -1.57 (drh 10-Nov-02): Pager *pPager = pPg->pPager; -1.325 (danielk1 07-Apr-07): PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); -1.325 (danielk1 07-Apr-07): assert( MEMDB ); -1.325 (danielk1 07-Apr-07): if( !pHist->inStmt ){ -1.325 (danielk1 07-Apr-07): assert( pHist->pPrevStmt==0 && pHist->pNextStmt==0 ); -1.325 (danielk1 07-Apr-07): if( pPager->pStmt ){ -1.325 (danielk1 07-Apr-07): PGHDR_TO_HIST(pPager->pStmt, pPager)->pPrevStmt = pPg; -1.325 (danielk1 07-Apr-07): } -1.325 (danielk1 07-Apr-07): pHist->pNextStmt = pPager->pStmt; -1.325 (danielk1 07-Apr-07): pPager->pStmt = pPg; -1.325 (danielk1 07-Apr-07): pHist->inStmt = 1; -1.57 (drh 10-Nov-02): } -1.57 (drh 10-Nov-02): } -1.57 (drh 10-Nov-02): -1.57 (drh 10-Nov-02): /* -1.1 (drh 11-Apr-01): ** Find a page in the hash table given its page number. Return -1.1 (drh 11-Apr-01): ** a pointer to the page or NULL if not found. -1.1 (drh 11-Apr-01): */ -1.3 (drh 15-Apr-01): static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ -1.268 (drh 07-May-06): PgHdr *p; -1.268 (drh 07-May-06): if( pPager->aHash==0 ) return 0; -1.268 (drh 07-May-06): p = pPager->aHash[pgno & (pPager->nHash-1)]; -1.1 (drh 11-Apr-01): while( p && p->pgno!=pgno ){ -1.1 (drh 11-Apr-01): p = p->pNextHash; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): return p; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.279 (drh 03-Jan-07): ** Unlock the database file. -1.279 (drh 03-Jan-07): */ -1.279 (drh 03-Jan-07): static void pager_unlock(Pager *pPager){ -1.294 (danielk1 24-Mar-07): if( !pPager->exclusiveMode ){ -1.294 (danielk1 24-Mar-07): if( !MEMDB ){ -1.294 (danielk1 24-Mar-07): sqlite3OsUnlock(pPager->fd, NO_LOCK); -1.294 (danielk1 24-Mar-07): pPager->dbSize = -1; -1.294 (danielk1 24-Mar-07): IOTRACE(("UNLOCK %p\n", pPager)) -1.294 (danielk1 24-Mar-07): } -1.294 (danielk1 24-Mar-07): pPager->state = PAGER_UNLOCK; -1.294 (danielk1 24-Mar-07): pPager->changeCountDone = 0; -1.279 (drh 03-Jan-07): } -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* -1.293 (danielk1 23-Mar-07): ** Execute a rollback if a transaction is active and unlock the -1.293 (danielk1 23-Mar-07): ** database file. This is a no-op if the pager has already entered -1.293 (danielk1 23-Mar-07): ** the error-state. -1.293 (danielk1 23-Mar-07): */ -1.295 (danielk1 26-Mar-07): static void pagerUnlockAndRollback(Pager *p){ -1.295 (danielk1 26-Mar-07): if( p->errCode ) return; -1.297 (danielk1 26-Mar-07): assert( p->state>=PAGER_RESERVED || p->journalOpen==0 ); -1.295 (danielk1 26-Mar-07): if( p->state>=PAGER_RESERVED ){ -1.295 (danielk1 26-Mar-07): sqlite3PagerRollback(p); -1.295 (danielk1 26-Mar-07): } -1.295 (danielk1 26-Mar-07): pager_unlock(p); -1.295 (danielk1 26-Mar-07): assert( p->errCode || !p->journalOpen || (p->exclusiveMode&&!p->journalOff) ); -1.295 (danielk1 26-Mar-07): assert( p->errCode || !p->stmtOpen || p->exclusiveMode ); -1.279 (drh 03-Jan-07): } -1.279 (drh 03-Jan-07): -1.279 (drh 03-Jan-07): -1.279 (drh 03-Jan-07): /* -1.323 (danielk1 05-Apr-07): ** Clear the in-memory cache. This routine -1.1 (drh 11-Apr-01): ** sets the state of the pager back to what it was when it was first -1.1 (drh 11-Apr-01): ** opened. Any outstanding pages are invalidated and subsequent attempts -1.1 (drh 11-Apr-01): ** to access those pages will likely result in a coredump. -1.1 (drh 11-Apr-01): */ -1.3 (drh 15-Apr-01): static void pager_reset(Pager *pPager){ -1.1 (drh 11-Apr-01): PgHdr *pPg, *pNext; -1.238 (danielk1 16-Jan-06): if( pPager->errCode ) return; -1.3 (drh 15-Apr-01): for(pPg=pPager->pAll; pPg; pPg=pNext){ -1.327 (drh 13-Apr-07): IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_pgfree_count); -1.3 (drh 15-Apr-01): pNext = pPg->pNextAll; -1.3 (drh 15-Apr-01): sqliteFree(pPg); -1.1 (drh 11-Apr-01): } -1.291 (danielk1 19-Mar-07): pPager->pStmt = 0; -1.1 (drh 11-Apr-01): pPager->pFirst = 0; -1.69 (drh 21-Jan-03): pPager->pFirstSynced = 0; -1.3 (drh 15-Apr-01): pPager->pLast = 0; -1.3 (drh 15-Apr-01): pPager->pAll = 0; -1.268 (drh 07-May-06): pPager->nHash = 0; -1.268 (drh 07-May-06): sqliteFree(pPager->aHash); -1.1 (drh 11-Apr-01): pPager->nPage = 0; -1.268 (drh 07-May-06): pPager->aHash = 0; -1.293 (danielk1 23-Mar-07): pPager->nRef = 0; -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* -1.307 (drh 30-Mar-07): ** This routine ends a transaction. A transaction is ended by either -1.307 (drh 30-Mar-07): ** a COMMIT or a ROLLBACK. -1.307 (drh 30-Mar-07): ** -1.1 (drh 11-Apr-01): ** When this routine is called, the pager has the journal file open and -1.307 (drh 30-Mar-07): ** a RESERVED or EXCLUSIVE lock on the database. This routine will release -1.307 (drh 30-Mar-07): ** the database lock and acquires a SHARED lock in its place if that is -1.307 (drh 30-Mar-07): ** the appropriate thing to do. Release locks usually is appropriate, -1.307 (drh 30-Mar-07): ** unless we are in exclusive access mode or unless this is a -1.307 (drh 30-Mar-07): ** COMMIT AND BEGIN or ROLLBACK AND BEGIN operation. -1.307 (drh 30-Mar-07): ** -1.307 (drh 30-Mar-07): ** The journal file is either deleted or truncated. -1.90 (drh 06-Sep-03): ** -1.90 (drh 06-Sep-03): ** TODO: Consider keeping the journal file open for temporary databases. -1.90 (drh 06-Sep-03): ** This might give a performance improvement on windows where opening -1.90 (drh 06-Sep-03): ** a file is an expensive operation. -1.1 (drh 11-Apr-01): */ -1.307 (drh 30-Mar-07): static int pager_end_transaction(Pager *pPager){ -1.3 (drh 15-Apr-01): PgHdr *pPg; -1.294 (danielk1 24-Mar-07): int rc = SQLITE_OK; -1.302 (danielk1 27-Mar-07): int rc2 = SQLITE_OK; -1.169 (drh 31-Oct-04): assert( !MEMDB ); -1.115 (drh 09-Jun-04): if( pPager->statestmtOpen && !pPager->exclusiveMode ){ -1.302 (danielk1 27-Mar-07): sqlite3OsClose(&pPager->stfd); -1.302 (danielk1 27-Mar-07): pPager->stmtOpen = 0; -1.46 (drh 30-May-02): } -1.60 (drh 02-Dec-02): if( pPager->journalOpen ){ -1.316 (drh 02-Apr-07): if( pPager->exclusiveMode -1.316 (drh 02-Apr-07): && (rc = sqlite3OsTruncate(pPager->jfd, 0))==SQLITE_OK ){; -1.294 (danielk1 24-Mar-07): sqlite3OsSeek(pPager->jfd, 0); -1.294 (danielk1 24-Mar-07): pPager->journalOff = 0; -1.295 (danielk1 26-Mar-07): pPager->journalStarted = 0; -1.294 (danielk1 24-Mar-07): }else{ -1.294 (danielk1 24-Mar-07): sqlite3OsClose(&pPager->jfd); -1.294 (danielk1 24-Mar-07): pPager->journalOpen = 0; -1.316 (drh 02-Apr-07): if( rc==SQLITE_OK ){ -1.305 (danielk1 29-Mar-07): rc = sqlite3OsDelete(pPager->zJournal); -1.305 (danielk1 29-Mar-07): } -1.294 (danielk1 24-Mar-07): } -1.60 (drh 02-Dec-02): sqliteFree( pPager->aInJournal ); -1.60 (drh 02-Dec-02): pPager->aInJournal = 0; -1.60 (drh 02-Dec-02): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.60 (drh 02-Dec-02): pPg->inJournal = 0; -1.60 (drh 02-Dec-02): pPg->dirty = 0; -1.68 (drh 16-Jan-03): pPg->needSync = 0; -1.294 (danielk1 24-Mar-07): pPg->alwaysRollback = 0; -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): pPg->pageHash = pager_pagehash(pPg); -1.189 (danielk1 15-Feb-05): #endif -1.60 (drh 02-Dec-02): } -1.267 (drh 03-May-06): pPager->pDirty = 0; -1.137 (danielk1 23-Jun-04): pPager->dirtyCache = 0; -1.137 (danielk1 23-Jun-04): pPager->nRec = 0; -1.60 (drh 02-Dec-02): }else{ -1.201 (drh 28-Mar-05): assert( pPager->aInJournal==0 ); -1.115 (drh 09-Jun-04): assert( pPager->dirtyCache==0 || pPager->useJournal==0 ); -1.60 (drh 02-Dec-02): } -1.302 (danielk1 27-Mar-07): -1.294 (danielk1 24-Mar-07): if( !pPager->exclusiveMode ){ -1.302 (danielk1 27-Mar-07): rc2 = sqlite3OsUnlock(pPager->fd, SHARED_LOCK); -1.294 (danielk1 24-Mar-07): pPager->state = PAGER_SHARED; -1.295 (danielk1 26-Mar-07): }else if( pPager->state==PAGER_SYNCED ){ -1.295 (danielk1 26-Mar-07): pPager->state = PAGER_EXCLUSIVE; -1.294 (danielk1 24-Mar-07): } -1.295 (danielk1 26-Mar-07): pPager->origDbSize = 0; -1.138 (danielk1 25-Jun-04): pPager->setMaster = 0; -1.249 (danielk1 21-Jan-06): pPager->needSync = 0; -1.249 (danielk1 21-Jan-06): pPager->pFirstSynced = pPager->pFirst; -1.278 (drh 02-Jan-07): pPager->dbSize = -1; -1.302 (danielk1 27-Mar-07): -1.302 (danielk1 27-Mar-07): return (rc==SQLITE_OK?rc2:rc); -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.73 (drh 11-Feb-03): ** Compute and return a checksum for the page of data. -1.94 (drh 08-Feb-04): ** -1.94 (drh 08-Feb-04): ** This is not a real checksum. It is really just the sum of the -1.123 (drh 10-Jun-04): ** random initial value and the page number. We experimented with -1.123 (drh 10-Jun-04): ** a checksum of the entire data, but that was found to be too slow. -1.123 (drh 10-Jun-04): ** -1.123 (drh 10-Jun-04): ** Note that the page number is stored at the beginning of data and -1.123 (drh 10-Jun-04): ** the checksum is stored at the end. This is important. If journal -1.123 (drh 10-Jun-04): ** corruption occurs due to a power failure, the most likely scenario -1.123 (drh 10-Jun-04): ** is that one end or the other of the record will be changed. It is -1.123 (drh 10-Jun-04): ** much less likely that the two ends of the journal record will be -1.123 (drh 10-Jun-04): ** correct and the middle be corrupt. Thus, this "checksum" scheme, -1.123 (drh 10-Jun-04): ** though fast and simple, catches the mostly likely kind of corruption. -1.123 (drh 10-Jun-04): ** -1.123 (drh 10-Jun-04): ** FIX ME: Consider adding every 200th (or so) byte of the data to the -1.123 (drh 10-Jun-04): ** checksum. That way if a single page spans 3 or more disk sectors and -1.123 (drh 10-Jun-04): ** only the middle sector is corrupt, we will still have a reasonable -1.123 (drh 10-Jun-04): ** chance of failing the checksum and thus detecting the problem. -1.73 (drh 11-Feb-03): */ -1.259 (drh 24-Feb-06): static u32 pager_cksum(Pager *pPager, const u8 *aData){ -1.137 (danielk1 23-Jun-04): u32 cksum = pPager->cksumInit; -1.137 (danielk1 23-Jun-04): int i = pPager->pageSize-200; -1.137 (danielk1 23-Jun-04): while( i>0 ){ -1.137 (danielk1 23-Jun-04): cksum += aData[i]; -1.137 (danielk1 23-Jun-04): i -= 200; -1.137 (danielk1 23-Jun-04): } -1.73 (drh 11-Feb-03): return cksum; -1.73 (drh 11-Feb-03): } -1.73 (drh 11-Feb-03): -1.267 (drh 03-May-06): /* Forward declaration */ -1.267 (drh 03-May-06): static void makeClean(PgHdr*); -1.267 (drh 03-May-06): -1.73 (drh 11-Feb-03): /* -1.37 (drh 02-Feb-02): ** Read a single page from the journal file opened on file descriptor -1.37 (drh 02-Feb-02): ** jfd. Playback this one page. -1.73 (drh 11-Feb-03): ** -1.123 (drh 10-Jun-04): ** If useCksum==0 it means this journal does not use checksums. Checksums -1.123 (drh 10-Jun-04): ** are not used in statement journals because statement journals do not -1.123 (drh 10-Jun-04): ** need to survive power failures. -1.37 (drh 02-Feb-02): */ -1.116 (drh 09-Jun-04): static int pager_playback_one_page(Pager *pPager, OsFile *jfd, int useCksum){ -1.37 (drh 02-Feb-02): int rc; -1.116 (drh 09-Jun-04): PgHdr *pPg; /* An existing page in the cache */ -1.116 (drh 09-Jun-04): Pgno pgno; /* The page number of a page in journal */ -1.116 (drh 09-Jun-04): u32 cksum; /* Checksum used for sanity checking */ -1.286 (danielk1 06-Mar-07): u8 *aData = (u8 *)pPager->pTmpSpace; /* Temp storage for a page */ -1.37 (drh 02-Feb-02): -1.196 (drh 20-Mar-05): /* useCksum should be true for the main journal and false for -1.196 (drh 20-Mar-05): ** statement journals. Verify that this is always the case -1.196 (drh 20-Mar-05): */ -1.221 (drh 29-Nov-05): assert( jfd == (useCksum ? pPager->jfd : pPager->stfd) ); -1.286 (danielk1 06-Mar-07): assert( aData ); -1.196 (drh 20-Mar-05): -1.116 (drh 09-Jun-04): rc = read32bits(jfd, &pgno); -1.78 (drh 16-Feb-03): if( rc!=SQLITE_OK ) return rc; -1.286 (danielk1 06-Mar-07): rc = sqlite3OsRead(jfd, aData, pPager->pageSize); -1.78 (drh 16-Feb-03): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): pPager->journalOff += pPager->pageSize + 4; -1.37 (drh 02-Feb-02): -1.73 (drh 11-Feb-03): /* Sanity checking on the page. This is more important that I originally -1.73 (drh 11-Feb-03): ** thought. If a power failure occurs while the journal is being written, -1.73 (drh 11-Feb-03): ** it could cause invalid data to be written into the journal. We need to -1.73 (drh 11-Feb-03): ** detect this invalid data (with high probability) and ignore it. -1.73 (drh 11-Feb-03): */ -1.143 (danielk1 26-Jun-04): if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ -1.73 (drh 11-Feb-03): return SQLITE_DONE; -1.73 (drh 11-Feb-03): } -1.116 (drh 09-Jun-04): if( pgno>(unsigned)pPager->dbSize ){ -1.73 (drh 11-Feb-03): return SQLITE_OK; -1.73 (drh 11-Feb-03): } -1.116 (drh 09-Jun-04): if( useCksum ){ -1.116 (drh 09-Jun-04): rc = read32bits(jfd, &cksum); -1.78 (drh 16-Feb-03): if( rc ) return rc; -1.138 (danielk1 25-Jun-04): pPager->journalOff += 4; -1.259 (drh 24-Feb-06): if( pager_cksum(pPager, aData)!=cksum ){ -1.73 (drh 11-Feb-03): return SQLITE_DONE; -1.73 (drh 11-Feb-03): } -1.73 (drh 11-Feb-03): } -1.37 (drh 02-Feb-02): -1.125 (danielk1 14-Jun-04): assert( pPager->state==PAGER_RESERVED || pPager->state>=PAGER_EXCLUSIVE ); -1.121 (danielk1 10-Jun-04): -1.121 (danielk1 10-Jun-04): /* If the pager is in RESERVED state, then there must be a copy of this -1.121 (danielk1 10-Jun-04): ** page in the pager cache. In this case just update the pager cache, -1.122 (danielk1 10-Jun-04): ** not the database file. The page is left marked dirty in this case. -1.122 (danielk1 10-Jun-04): ** -1.341 (danielk1 24-May-07): ** An exception to the above rule: If the database is in no-sync mode -1.341 (danielk1 24-May-07): ** and a page is moved during an incremental vacuum then the page may -1.342 (danielk1 24-May-07): ** not be in the pager cache. Later: if a malloc() or IO error occurs -1.342 (danielk1 24-May-07): ** during a Movepage() call, then the page may not be in the cache -1.342 (danielk1 24-May-07): ** either. So the condition described in the above paragraph is not -1.342 (danielk1 24-May-07): ** assert()able. -1.341 (danielk1 24-May-07): ** -1.121 (danielk1 10-Jun-04): ** If in EXCLUSIVE state, then we update the pager cache if it exists -1.121 (danielk1 10-Jun-04): ** and the main file. The page is then marked not dirty. -1.196 (drh 20-Mar-05): ** -1.196 (drh 20-Mar-05): ** Ticket #1171: The statement journal might contain page content that is -1.196 (drh 20-Mar-05): ** different from the page content at the start of the transaction. -1.196 (drh 20-Mar-05): ** This occurs when a page is changed prior to the start of a statement -1.196 (drh 20-Mar-05): ** then changed again within the statement. When rolling back such a -1.196 (drh 20-Mar-05): ** statement we must not write to the original database unless we know -1.345 (drh 16-Jun-07): ** for certain that original page contents are synced into the main rollback -1.345 (drh 16-Jun-07): ** journal. Otherwise, a power loss might leave modified data in the -1.345 (drh 16-Jun-07): ** database file without an entry in the rollback journal that can -1.345 (drh 16-Jun-07): ** restore the database to its original form. Two conditions must be -1.345 (drh 16-Jun-07): ** met before writing to the database files. (1) the database must be -1.345 (drh 16-Jun-07): ** locked. (2) we know that the original page content is fully synced -1.345 (drh 16-Jun-07): ** in the main journal either because the page is not in cache or else -1.345 (drh 16-Jun-07): ** the page is marked as needSync==0. -1.37 (drh 02-Feb-02): */ -1.116 (drh 09-Jun-04): pPg = pager_lookup(pPager, pgno); -1.344 (drh 16-Jun-07): PAGERTRACE4("PLAYBACK %d page %d hash(%08x)\n", -1.344 (drh 16-Jun-07): PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, aData)); -1.196 (drh 20-Mar-05): if( pPager->state>=PAGER_EXCLUSIVE && (pPg==0 || pPg->needSync==0) ){ -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); -1.212 (drh 09-Sep-05): if( rc==SQLITE_OK ){ -1.222 (drh 30-Nov-05): rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize); -1.212 (drh 09-Sep-05): } -1.267 (drh 03-May-06): if( pPg ){ -1.267 (drh 03-May-06): makeClean(pPg); -1.267 (drh 03-May-06): } -1.121 (danielk1 10-Jun-04): } -1.37 (drh 02-Feb-02): if( pPg ){ -1.181 (danielk1 11-Jan-05): /* No page should ever be explicitly rolled back that is in use, except -1.181 (danielk1 11-Jan-05): ** for page 1 which is held in use in order to keep the lock on the -1.181 (danielk1 11-Jan-05): ** database active. However such a page may be rolled back as a result -1.181 (danielk1 11-Jan-05): ** of an internal error resulting in an automatic call to -1.292 (danielk1 19-Mar-07): ** sqlite3PagerRollback(). -1.91 (drh 17-Dec-03): */ -1.108 (drh 14-May-04): void *pData; -1.181 (danielk1 11-Jan-05): /* assert( pPg->nRef==0 || pPg->pgno==1 ); */ -1.108 (drh 14-May-04): pData = PGHDR_TO_DATA(pPg); -1.116 (drh 09-Jun-04): memcpy(pData, aData, pPager->pageSize); -1.326 (danielk1 09-Apr-07): if( pPager->xReiniter ){ -1.326 (danielk1 09-Apr-07): pPager->xReiniter(pPg, pPager->pageSize); -1.103 (drh 07-May-04): } -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.196 (drh 20-Mar-05): pPg->pageHash = pager_pagehash(pPg); -1.189 (danielk1 15-Feb-05): #endif -1.329 (drh 16-Apr-07): /* If this was page 1, then restore the value of Pager.dbFileVers. -1.329 (drh 16-Apr-07): ** Do this before any decoding. */ -1.294 (danielk1 24-Mar-07): if( pgno==1 ){ -1.329 (drh 16-Apr-07): memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); -1.294 (danielk1 24-Mar-07): } -1.329 (drh 16-Apr-07): -1.329 (drh 16-Apr-07): /* Decode the page just read from disk */ -1.329 (drh 16-Apr-07): CODEC1(pPager, pData, pPg->pgno, 3); -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): return rc; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.110 (danielk1 03-Jun-04): ** Parameter zMaster is the name of a master journal file. A single journal -1.110 (danielk1 03-Jun-04): ** file that referred to the master journal file has just been rolled back. -1.110 (danielk1 03-Jun-04): ** This routine checks if it is possible to delete the master journal file, -1.110 (danielk1 03-Jun-04): ** and does so if it is. -1.123 (drh 10-Jun-04): ** -1.123 (drh 10-Jun-04): ** The master journal file contains the names of all child journals. -1.123 (drh 10-Jun-04): ** To tell if a master journal can be deleted, check to each of the -1.123 (drh 10-Jun-04): ** children. If all children are either missing or do not refer to -1.123 (drh 10-Jun-04): ** a different master journal, then this master journal can be deleted. -1.110 (danielk1 03-Jun-04): */ -1.110 (danielk1 03-Jun-04): static int pager_delmaster(const char *zMaster){ -1.110 (danielk1 03-Jun-04): int rc; -1.110 (danielk1 03-Jun-04): int master_open = 0; -1.221 (drh 29-Nov-05): OsFile *master = 0; -1.110 (danielk1 03-Jun-04): char *zMasterJournal = 0; /* Contents of master journal file */ -1.165 (drh 01-Oct-04): i64 nMasterJournal; /* Size of master journal file */ -1.110 (danielk1 03-Jun-04): -1.110 (danielk1 03-Jun-04): /* Open the master journal file exclusively in case some other process -1.110 (danielk1 03-Jun-04): ** is running this routine also. Not that it makes too much difference. -1.110 (danielk1 03-Jun-04): */ -1.231 (drh 06-Jan-06): rc = sqlite3OsOpenReadOnly(zMaster, &master); -1.320 (danielk1 05-Apr-07): assert( rc!=SQLITE_OK || master ); -1.110 (danielk1 03-Jun-04): if( rc!=SQLITE_OK ) goto delmaster_out; -1.110 (danielk1 03-Jun-04): master_open = 1; -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(master, &nMasterJournal); -1.110 (danielk1 03-Jun-04): if( rc!=SQLITE_OK ) goto delmaster_out; -1.110 (danielk1 03-Jun-04): -1.110 (danielk1 03-Jun-04): if( nMasterJournal>0 ){ -1.126 (danielk1 14-Jun-04): char *zJournal; -1.138 (danielk1 25-Jun-04): char *zMasterPtr = 0; -1.126 (danielk1 14-Jun-04): -1.126 (danielk1 14-Jun-04): /* Load the entire master journal file into space obtained from -1.126 (danielk1 14-Jun-04): ** sqliteMalloc() and pointed to by zMasterJournal. -1.126 (danielk1 14-Jun-04): */ -1.138 (danielk1 25-Jun-04): zMasterJournal = (char *)sqliteMalloc(nMasterJournal); -1.110 (danielk1 03-Jun-04): if( !zMasterJournal ){ -1.110 (danielk1 03-Jun-04): rc = SQLITE_NOMEM; -1.110 (danielk1 03-Jun-04): goto delmaster_out; -1.110 (danielk1 03-Jun-04): } -1.222 (drh 30-Nov-05): rc = sqlite3OsRead(master, zMasterJournal, nMasterJournal); -1.110 (danielk1 03-Jun-04): if( rc!=SQLITE_OK ) goto delmaster_out; -1.110 (danielk1 03-Jun-04): -1.126 (danielk1 14-Jun-04): zJournal = zMasterJournal; -1.126 (danielk1 14-Jun-04): while( (zJournal-zMasterJournal)state>=PAGER_EXCLUSIVE ){ -1.323 (danielk1 05-Apr-07): rc = sqlite3OsTruncate(pPager->fd, pPager->pageSize*(i64)nPage); -1.323 (danielk1 05-Apr-07): } -1.323 (danielk1 05-Apr-07): if( rc==SQLITE_OK ){ -1.323 (danielk1 05-Apr-07): pPager->dbSize = nPage; -1.323 (danielk1 05-Apr-07): pager_truncate_cache(pPager); -1.323 (danielk1 05-Apr-07): } -1.323 (danielk1 05-Apr-07): return rc; -1.158 (drh 18-Aug-04): } -1.158 (drh 18-Aug-04): -1.158 (drh 18-Aug-04): /* -1.332 (drh 01-May-07): ** Set the sectorSize for the given pager. -1.332 (drh 01-May-07): ** -1.332 (drh 01-May-07): ** The sector size is the larger of the sector size reported -1.332 (drh 01-May-07): ** by sqlite3OsSectorSize() and the pageSize. -1.332 (drh 01-May-07): */ -1.332 (drh 01-May-07): static void setSectorSize(Pager *pPager){ -1.332 (drh 01-May-07): pPager->sectorSize = sqlite3OsSectorSize(pPager->fd); -1.332 (drh 01-May-07): if( pPager->sectorSizepageSize ){ -1.332 (drh 01-May-07): pPager->sectorSize = pPager->pageSize; -1.332 (drh 01-May-07): } -1.332 (drh 01-May-07): } -1.332 (drh 01-May-07): -1.332 (drh 01-May-07): /* -1.1 (drh 11-Apr-01): ** Playback the journal and thus restore the database file to -1.1 (drh 11-Apr-01): ** the state it was in before we started making changes. -1.1 (drh 11-Apr-01): ** -1.94 (drh 08-Feb-04): ** The journal file format is as follows: -1.94 (drh 08-Feb-04): ** -1.116 (drh 09-Jun-04): ** (1) 8 byte prefix. A copy of aJournalMagic[]. -1.116 (drh 09-Jun-04): ** (2) 4 byte big-endian integer which is the number of valid page records -1.94 (drh 08-Feb-04): ** in the journal. If this value is 0xffffffff, then compute the -1.116 (drh 09-Jun-04): ** number of page records from the journal size. -1.116 (drh 09-Jun-04): ** (3) 4 byte big-endian integer which is the initial value for the -1.116 (drh 09-Jun-04): ** sanity checksum. -1.116 (drh 09-Jun-04): ** (4) 4 byte integer which is the number of pages to truncate the -1.94 (drh 08-Feb-04): ** database to during a rollback. -1.116 (drh 09-Jun-04): ** (5) 4 byte integer which is the number of bytes in the master journal -1.116 (drh 09-Jun-04): ** name. The value may be zero (indicate that there is no master -1.116 (drh 09-Jun-04): ** journal.) -1.116 (drh 09-Jun-04): ** (6) N bytes of the master journal name. The name will be nul-terminated -1.116 (drh 09-Jun-04): ** and might be shorter than the value read from (5). If the first byte -1.116 (drh 09-Jun-04): ** of the name is \000 then there is no master journal. The master -1.116 (drh 09-Jun-04): ** journal name is stored in UTF-8. -1.116 (drh 09-Jun-04): ** (7) Zero or more pages instances, each as follows: -1.94 (drh 08-Feb-04): ** + 4 byte page number. -1.116 (drh 09-Jun-04): ** + pPager->pageSize bytes of data. -1.116 (drh 09-Jun-04): ** + 4 byte checksum -1.94 (drh 08-Feb-04): ** -1.116 (drh 09-Jun-04): ** When we speak of the journal header, we mean the first 6 items above. -1.116 (drh 09-Jun-04): ** Each entry in the journal is an instance of the 7th item. -1.94 (drh 08-Feb-04): ** -1.94 (drh 08-Feb-04): ** Call the value from the second bullet "nRec". nRec is the number of -1.94 (drh 08-Feb-04): ** valid page entries in the journal. In most cases, you can compute the -1.94 (drh 08-Feb-04): ** value of nRec from the size of the journal file. But if a power -1.94 (drh 08-Feb-04): ** failure occurred while the journal was being written, it could be the -1.94 (drh 08-Feb-04): ** case that the size of the journal file had already been increased but -1.94 (drh 08-Feb-04): ** the extra entries had not yet made it safely to disk. In such a case, -1.94 (drh 08-Feb-04): ** the value of nRec computed from the file size would be too large. For -1.94 (drh 08-Feb-04): ** that reason, we always use the nRec value in the header. -1.94 (drh 08-Feb-04): ** -1.94 (drh 08-Feb-04): ** If the nRec value is 0xffffffff it means that nRec should be computed -1.94 (drh 08-Feb-04): ** from the file size. This value is used when the user selects the -1.94 (drh 08-Feb-04): ** no-sync option for the journal. A power failure could lead to corruption -1.94 (drh 08-Feb-04): ** in this case. But for things like temporary table (which will be -1.94 (drh 08-Feb-04): ** deleted when the power is restored) we don't care. -1.94 (drh 08-Feb-04): ** -1.3 (drh 15-Apr-01): ** If the file opened as the journal file is not a well-formed -1.136 (danielk1 23-Jun-04): ** journal file then all pages up to the first corrupted page are rolled -1.136 (danielk1 23-Jun-04): ** back (or no pages if the journal header is corrupted). The journal file -1.136 (danielk1 23-Jun-04): ** is then deleted and SQLITE_OK returned, just as if no corruption had -1.136 (danielk1 23-Jun-04): ** been encountered. -1.136 (danielk1 23-Jun-04): ** -1.136 (danielk1 23-Jun-04): ** If an I/O or malloc() error occurs, the journal-file is not deleted -1.136 (danielk1 23-Jun-04): ** and an error code is returned. -1.3 (drh 15-Apr-01): */ -1.293 (danielk1 23-Mar-07): static int pager_playback(Pager *pPager, int isHot){ -1.165 (drh 01-Oct-04): i64 szJ; /* Size of the journal file in bytes */ -1.146 (danielk1 28-Jun-04): u32 nRec; /* Number of Records in the journal */ -1.3 (drh 15-Apr-01): int i; /* Loop counter */ -1.3 (drh 15-Apr-01): Pgno mxPg = 0; /* Size of the original file in pages */ -1.116 (drh 09-Jun-04): int rc; /* Result code of a subroutine */ -1.110 (danielk1 03-Jun-04): char *zMaster = 0; /* Name of master journal file if any */ -1.1 (drh 11-Apr-01): -1.31 (drh 22-Nov-01): /* Figure out how many records are in the journal. Abort early if -1.31 (drh 22-Nov-01): ** the journal is empty. -1.31 (drh 22-Nov-01): */ -1.31 (drh 22-Nov-01): assert( pPager->journalOpen ); -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(pPager->jfd, &szJ); -1.295 (danielk1 26-Mar-07): if( rc!=SQLITE_OK || szJ==0 ){ -1.31 (drh 22-Nov-01): goto end_playback; -1.31 (drh 22-Nov-01): } -1.93 (drh 08-Feb-04): -1.138 (danielk1 25-Jun-04): /* Read the master journal name from the journal, if it is present. -1.138 (danielk1 25-Jun-04): ** If a master journal file name is specified, but the file is not -1.138 (danielk1 25-Jun-04): ** present on disk, then the journal is not hot and does not need to be -1.138 (danielk1 25-Jun-04): ** played back. -1.138 (danielk1 25-Jun-04): */ -1.221 (drh 29-Nov-05): rc = readMasterJournal(pPager->jfd, &zMaster); -1.138 (danielk1 25-Jun-04): assert( rc!=SQLITE_DONE ); -1.231 (drh 06-Jan-06): if( rc!=SQLITE_OK || (zMaster && !sqlite3OsFileExists(zMaster)) ){ -1.138 (danielk1 25-Jun-04): sqliteFree(zMaster); -1.138 (danielk1 25-Jun-04): zMaster = 0; -1.138 (danielk1 25-Jun-04): if( rc==SQLITE_DONE ) rc = SQLITE_OK; -1.31 (drh 22-Nov-01): goto end_playback; -1.31 (drh 22-Nov-01): } -1.222 (drh 30-Nov-05): sqlite3OsSeek(pPager->jfd, 0); -1.138 (danielk1 25-Jun-04): pPager->journalOff = 0; -1.31 (drh 22-Nov-01): -1.138 (danielk1 25-Jun-04): /* This loop terminates either when the readJournalHdr() call returns -1.138 (danielk1 25-Jun-04): ** SQLITE_DONE or an IO error occurs. */ -1.138 (danielk1 25-Jun-04): while( 1 ){ -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* Read the next journal header from the journal file. If there are -1.138 (danielk1 25-Jun-04): ** not enough bytes left in the journal file for a complete header, or -1.138 (danielk1 25-Jun-04): ** it is corrupted, then a process must of failed while writing it. -1.138 (danielk1 25-Jun-04): ** This indicates nothing more needs to be rolled back. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): rc = readJournalHdr(pPager, szJ, &nRec, &mxPg); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ){ -1.138 (danielk1 25-Jun-04): if( rc==SQLITE_DONE ){ -1.138 (danielk1 25-Jun-04): rc = SQLITE_OK; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): goto end_playback; -1.138 (danielk1 25-Jun-04): } -1.116 (drh 09-Jun-04): -1.138 (danielk1 25-Jun-04): /* If nRec is 0xffffffff, then this journal was created by a process -1.138 (danielk1 25-Jun-04): ** working in no-sync mode. This means that the rest of the journal -1.138 (danielk1 25-Jun-04): ** file consists of pages, there are no more journal headers. Compute -1.138 (danielk1 25-Jun-04): ** the value of nRec based on this assumption. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): if( nRec==0xffffffff ){ -1.138 (danielk1 25-Jun-04): assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); -1.138 (danielk1 25-Jun-04): nRec = (szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager); -1.138 (danielk1 25-Jun-04): } -1.116 (drh 09-Jun-04): -1.293 (danielk1 23-Mar-07): /* If nRec is 0 and this rollback is of a transaction created by this -1.293 (danielk1 23-Mar-07): ** process. In this case the rest of the journal file consists of -1.293 (danielk1 23-Mar-07): ** journalled copies of pages that need to be read back into the cache. -1.293 (danielk1 23-Mar-07): */ -1.293 (danielk1 23-Mar-07): if( nRec==0 && !isHot ){ -1.293 (danielk1 23-Mar-07): nRec = (szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.138 (danielk1 25-Jun-04): /* If this is the first header read from the journal, truncate the -1.138 (danielk1 25-Jun-04): ** database file back to it's original size. -1.138 (danielk1 25-Jun-04): */ -1.323 (danielk1 05-Apr-07): if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ -1.158 (drh 18-Aug-04): rc = pager_truncate(pPager, mxPg); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ){ -1.138 (danielk1 25-Jun-04): goto end_playback; -1.138 (danielk1 25-Jun-04): } -1.93 (drh 08-Feb-04): } -1.110 (danielk1 03-Jun-04): -1.138 (danielk1 25-Jun-04): /* Copy original pages out of the journal and back into the database file. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): for(i=0; ijfd, 1); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ){ -1.138 (danielk1 25-Jun-04): if( rc==SQLITE_DONE ){ -1.138 (danielk1 25-Jun-04): rc = SQLITE_OK; -1.138 (danielk1 25-Jun-04): pPager->journalOff = szJ; -1.138 (danielk1 25-Jun-04): break; -1.138 (danielk1 25-Jun-04): }else{ -1.138 (danielk1 25-Jun-04): goto end_playback; -1.138 (danielk1 25-Jun-04): } -1.73 (drh 11-Feb-03): } -1.73 (drh 11-Feb-03): } -1.37 (drh 02-Feb-02): } -1.260 (drh 24-Feb-06): /*NOTREACHED*/ -1.260 (drh 24-Feb-06): assert( 0 ); -1.76 (drh 13-Feb-03): -1.76 (drh 13-Feb-03): end_playback: -1.147 (danielk1 28-Jun-04): if( rc==SQLITE_OK ){ -1.307 (drh 30-Mar-07): rc = pager_end_transaction(pPager); -1.147 (danielk1 28-Jun-04): } -1.110 (danielk1 03-Jun-04): if( zMaster ){ -1.302 (danielk1 27-Mar-07): /* If there was a master journal and this routine will return success, -1.186 (danielk1 22-Jan-05): ** see if it is possible to delete the master journal. -1.110 (danielk1 03-Jun-04): */ -1.110 (danielk1 03-Jun-04): if( rc==SQLITE_OK ){ -1.186 (danielk1 22-Jan-05): rc = pager_delmaster(zMaster); -1.110 (danielk1 03-Jun-04): } -1.110 (danielk1 03-Jun-04): sqliteFree(zMaster); -1.110 (danielk1 03-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): /* The Pager.sectorSize variable may have been updated while rolling -1.306 (drh 29-Mar-07): ** back a journal created by a process with a different sector size -1.138 (danielk1 25-Jun-04): ** value. Reset it to the correct value for this process. -1.138 (danielk1 25-Jun-04): */ -1.332 (drh 01-May-07): setSectorSize(pPager); -1.37 (drh 02-Feb-02): return rc; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.107 (drh 12-May-04): ** Playback the statement journal. -1.37 (drh 02-Feb-02): ** -1.37 (drh 02-Feb-02): ** This is similar to playing back the transaction journal but with -1.37 (drh 02-Feb-02): ** a few extra twists. -1.37 (drh 02-Feb-02): ** -1.38 (drh 02-Feb-02): ** (1) The number of pages in the database file at the start of -1.107 (drh 12-May-04): ** the statement is stored in pPager->stmtSize, not in the -1.38 (drh 02-Feb-02): ** journal file itself. -1.37 (drh 02-Feb-02): ** -1.107 (drh 12-May-04): ** (2) In addition to playing back the statement journal, also -1.37 (drh 02-Feb-02): ** playback all pages of the transaction journal beginning -1.107 (drh 12-May-04): ** at offset pPager->stmtJSize. -1.37 (drh 02-Feb-02): */ -1.102 (drh 26-Apr-04): static int pager_stmt_playback(Pager *pPager){ -1.165 (drh 01-Oct-04): i64 szJ; /* Size of the full journal */ -1.165 (drh 01-Oct-04): i64 hdrOff; -1.73 (drh 11-Feb-03): int nRec; /* Number of Records */ -1.37 (drh 02-Feb-02): int i; /* Loop counter */ -1.37 (drh 02-Feb-02): int rc; -1.37 (drh 02-Feb-02): -1.138 (danielk1 25-Jun-04): szJ = pPager->journalOff; -1.138 (danielk1 25-Jun-04): #ifndef NDEBUG -1.138 (danielk1 25-Jun-04): { -1.165 (drh 01-Oct-04): i64 os_szJ; -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(pPager->jfd, &os_szJ); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) return rc; -1.138 (danielk1 25-Jun-04): assert( szJ==os_szJ ); -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): #endif -1.138 (danielk1 25-Jun-04): -1.290 (danielk1 19-Mar-07): /* Set hdrOff to be the offset just after the end of the last journal -1.290 (danielk1 19-Mar-07): ** page written before the first journal-header for this statement -1.290 (danielk1 19-Mar-07): ** transaction was written, or the end of the file if no journal -1.138 (danielk1 25-Jun-04): ** header was written. -1.138 (danielk1 25-Jun-04): */ -1.138 (danielk1 25-Jun-04): hdrOff = pPager->stmtHdrOff; -1.138 (danielk1 25-Jun-04): assert( pPager->fullSync || !hdrOff ); -1.138 (danielk1 25-Jun-04): if( !hdrOff ){ -1.138 (danielk1 25-Jun-04): hdrOff = szJ; -1.138 (danielk1 25-Jun-04): } -1.138 (danielk1 25-Jun-04): -1.37 (drh 02-Feb-02): /* Truncate the database back to its original size. -1.37 (drh 02-Feb-02): */ -1.323 (danielk1 05-Apr-07): rc = pager_truncate(pPager, pPager->stmtSize); -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED ); -1.3 (drh 15-Apr-01): -1.107 (drh 12-May-04): /* Figure out how many records are in the statement journal. -1.37 (drh 02-Feb-02): */ -1.107 (drh 12-May-04): assert( pPager->stmtInUse && pPager->journalOpen ); -1.222 (drh 30-Nov-05): sqlite3OsSeek(pPager->stfd, 0); -1.107 (drh 12-May-04): nRec = pPager->stmtNRec; -1.37 (drh 02-Feb-02): -1.107 (drh 12-May-04): /* Copy original pages out of the statement journal and back into the -1.116 (drh 09-Jun-04): ** database file. Note that the statement journal omits checksums from -1.116 (drh 09-Jun-04): ** each record since power-failure recovery is not important to statement -1.116 (drh 09-Jun-04): ** journals. -1.37 (drh 02-Feb-02): */ -1.37 (drh 02-Feb-02): for(i=nRec-1; i>=0; i--){ -1.221 (drh 29-Nov-05): rc = pager_playback_one_page(pPager, pPager->stfd, 0); -1.73 (drh 11-Feb-03): assert( rc!=SQLITE_DONE ); -1.102 (drh 26-Apr-04): if( rc!=SQLITE_OK ) goto end_stmt_playback; -1.37 (drh 02-Feb-02): } -1.1 (drh 11-Apr-01): -1.138 (danielk1 25-Jun-04): /* Now roll some pages back from the transaction journal. Pager.stmtJSize -1.138 (danielk1 25-Jun-04): ** was the size of the journal file when this statement was started, so -1.138 (danielk1 25-Jun-04): ** everything after that needs to be rolled back, either into the -1.138 (danielk1 25-Jun-04): ** database, the memory cache, or both. -1.138 (danielk1 25-Jun-04): ** -1.138 (danielk1 25-Jun-04): ** If it is not zero, then Pager.stmtHdrOff is the offset to the start -1.138 (danielk1 25-Jun-04): ** of the first journal header written during this statement transaction. -1.37 (drh 02-Feb-02): */ -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->jfd, pPager->stmtJSize); -1.37 (drh 02-Feb-02): if( rc!=SQLITE_OK ){ -1.102 (drh 26-Apr-04): goto end_stmt_playback; -1.37 (drh 02-Feb-02): } -1.138 (danielk1 25-Jun-04): pPager->journalOff = pPager->stmtJSize; -1.143 (danielk1 26-Jun-04): pPager->cksumInit = pPager->stmtCksum; -1.290 (danielk1 19-Mar-07): while( pPager->journalOff < hdrOff ){ -1.221 (drh 29-Nov-05): rc = pager_playback_one_page(pPager, pPager->jfd, 1); -1.138 (danielk1 25-Jun-04): assert( rc!=SQLITE_DONE ); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) goto end_stmt_playback; -1.37 (drh 02-Feb-02): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): while( pPager->journalOff < szJ ){ -1.255 (danielk1 24-Jan-06): u32 nJRec; /* Number of Journal Records */ -1.138 (danielk1 25-Jun-04): u32 dummy; -1.255 (danielk1 24-Jan-06): rc = readJournalHdr(pPager, szJ, &nJRec, &dummy); -1.73 (drh 11-Feb-03): if( rc!=SQLITE_OK ){ -1.73 (drh 11-Feb-03): assert( rc!=SQLITE_DONE ); -1.102 (drh 26-Apr-04): goto end_stmt_playback; -1.73 (drh 11-Feb-03): } -1.255 (danielk1 24-Jan-06): if( nJRec==0 ){ -1.255 (danielk1 24-Jan-06): nJRec = (szJ - pPager->journalOff) / (pPager->pageSize+8); -1.143 (danielk1 26-Jun-04): } -1.255 (danielk1 24-Jan-06): for(i=nJRec-1; i>=0 && pPager->journalOff < szJ; i--){ -1.221 (drh 29-Nov-05): rc = pager_playback_one_page(pPager, pPager->jfd, 1); -1.138 (danielk1 25-Jun-04): assert( rc!=SQLITE_DONE ); -1.138 (danielk1 25-Jun-04): if( rc!=SQLITE_OK ) goto end_stmt_playback; -1.138 (danielk1 25-Jun-04): } -1.1 (drh 11-Apr-01): } -1.138 (danielk1 25-Jun-04): -1.138 (danielk1 25-Jun-04): pPager->journalOff = szJ; -1.37 (drh 02-Feb-02): -1.102 (drh 26-Apr-04): end_stmt_playback: -1.252 (danielk1 23-Jan-06): if( rc==SQLITE_OK) { -1.143 (danielk1 26-Jun-04): pPager->journalOff = szJ; -1.143 (danielk1 26-Jun-04): /* pager_reload_cache(pPager); */ -1.1 (drh 11-Apr-01): } -1.3 (drh 15-Apr-01): return rc; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.18 (drh 14-Sep-01): ** Change the maximum number of in-memory pages that are allowed. -1.18 (drh 14-Sep-01): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ -1.18 (drh 14-Sep-01): if( mxPage>10 ){ -1.18 (drh 14-Sep-01): pPager->mxPage = mxPage; -1.137 (danielk1 23-Jun-04): }else{ -1.137 (danielk1 23-Jun-04): pPager->mxPage = 10; -1.18 (drh 14-Sep-01): } -1.75 (drh 12-Feb-03): } -1.75 (drh 12-Feb-03): -1.75 (drh 12-Feb-03): /* -1.75 (drh 12-Feb-03): ** Adjust the robustness of the database to damage due to OS crashes -1.75 (drh 12-Feb-03): ** or power failures by changing the number of syncs()s when writing -1.75 (drh 12-Feb-03): ** the rollback journal. There are three levels: -1.75 (drh 12-Feb-03): ** -1.222 (drh 30-Nov-05): ** OFF sqlite3OsSync() is never called. This is the default -1.75 (drh 12-Feb-03): ** for temporary and transient files. -1.75 (drh 12-Feb-03): ** -1.75 (drh 12-Feb-03): ** NORMAL The journal is synced once before writes begin on the -1.75 (drh 12-Feb-03): ** database. This is normally adequate protection, but -1.75 (drh 12-Feb-03): ** it is theoretically possible, though very unlikely, -1.75 (drh 12-Feb-03): ** that an inopertune power failure could leave the journal -1.75 (drh 12-Feb-03): ** in a state which would cause damage to the database -1.75 (drh 12-Feb-03): ** when it is rolled back. -1.75 (drh 12-Feb-03): ** -1.75 (drh 12-Feb-03): ** FULL The journal is synced twice before writes begin on the -1.94 (drh 08-Feb-04): ** database (with some additional information - the nRec field -1.94 (drh 08-Feb-04): ** of the journal header - being written in between the two -1.94 (drh 08-Feb-04): ** syncs). If we assume that writing a -1.75 (drh 12-Feb-03): ** single disk sector is atomic, then this mode provides -1.75 (drh 12-Feb-03): ** assurance that the journal will not be corrupted to the -1.75 (drh 12-Feb-03): ** point of causing damage to the database during rollback. -1.75 (drh 12-Feb-03): ** -1.75 (drh 12-Feb-03): ** Numeric values associated with these states are OFF==1, NORMAL=2, -1.75 (drh 12-Feb-03): ** and FULL=3. -1.75 (drh 12-Feb-03): */ -1.185 (danielk1 21-Jan-05): #ifndef SQLITE_OMIT_PAGER_PRAGMAS -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetSafetyLevel(Pager *pPager, int level, int full_fsync){ -1.75 (drh 12-Feb-03): pPager->noSync = level==1 || pPager->tempFile; -1.75 (drh 12-Feb-03): pPager->fullSync = level==3 && !pPager->tempFile; -1.258 (drh 11-Feb-06): pPager->full_fsync = full_fsync; -1.109 (danielk1 31-May-04): if( pPager->noSync ) pPager->needSync = 0; -1.18 (drh 14-Sep-01): } -1.185 (danielk1 21-Jan-05): #endif -1.18 (drh 14-Sep-01): -1.18 (drh 14-Sep-01): /* -1.207 (drh 07-Jun-05): ** The following global variable is incremented whenever the library -1.207 (drh 07-Jun-05): ** attempts to open a temporary file. This information is used for -1.207 (drh 07-Jun-05): ** testing and analysis only. -1.207 (drh 07-Jun-05): */ -1.271 (drh 08-Aug-06): #ifdef SQLITE_TEST -1.207 (drh 07-Jun-05): int sqlite3_opentemp_count = 0; -1.271 (drh 08-Aug-06): #endif -1.207 (drh 07-Jun-05): -1.207 (drh 07-Jun-05): /* -1.287 (drh 15-Mar-07): ** Open a temporary file. -1.287 (drh 15-Mar-07): ** -1.287 (drh 15-Mar-07): ** Write the file descriptor into *fd. Return SQLITE_OK on success or some -1.37 (drh 02-Feb-02): ** other error code if we fail. -1.37 (drh 02-Feb-02): ** -1.37 (drh 02-Feb-02): ** The OS will automatically delete the temporary file when it is -1.37 (drh 02-Feb-02): ** closed. -1.37 (drh 02-Feb-02): */ -1.292 (danielk1 19-Mar-07): static int sqlite3PagerOpentemp(OsFile **pFd){ -1.37 (drh 02-Feb-02): int cnt = 8; -1.37 (drh 02-Feb-02): int rc; -1.287 (drh 15-Mar-07): char zFile[SQLITE_TEMPNAME_SIZE]; -1.287 (drh 15-Mar-07): -1.271 (drh 08-Aug-06): #ifdef SQLITE_TEST -1.207 (drh 07-Jun-05): sqlite3_opentemp_count++; /* Used for testing and analysis only */ -1.271 (drh 08-Aug-06): #endif -1.37 (drh 02-Feb-02): do{ -1.37 (drh 02-Feb-02): cnt--; -1.231 (drh 06-Jan-06): sqlite3OsTempFileName(zFile); -1.231 (drh 06-Jan-06): rc = sqlite3OsOpenExclusive(zFile, pFd, 1); -1.320 (danielk1 05-Apr-07): assert( rc!=SQLITE_OK || *pFd ); -1.149 (danielk1 30-Jun-04): }while( cnt>0 && rc!=SQLITE_OK && rc!=SQLITE_NOMEM ); -1.37 (drh 02-Feb-02): return rc; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.1 (drh 11-Apr-01): ** Create a new page cache and put a pointer to the page cache in *ppPager. -1.14 (drh 13-Sep-01): ** The file to be cached need not exist. The file is not locked until -1.292 (danielk1 19-Mar-07): ** the first call to sqlite3PagerGet() and is only held open until the -1.292 (danielk1 19-Mar-07): ** last page is released using sqlite3PagerUnref(). -1.25 (drh 06-Oct-01): ** -1.34 (drh 15-Dec-01): ** If zFilename is NULL then a randomly-named temporary file is created -1.34 (drh 15-Dec-01): ** and used as the file to be cached. The file will be deleted -1.34 (drh 15-Dec-01): ** automatically when it is closed. -1.152 (drh 22-Jul-04): ** -1.152 (drh 22-Jul-04): ** If zFilename is ":memory:" then all information is held in cache. -1.152 (drh 22-Jul-04): ** It is never written to disk. This can be used to implement an -1.152 (drh 22-Jul-04): ** in-memory database. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerOpen( -1.5 (drh 28-Apr-01): Pager **ppPager, /* Return the Pager structure here */ -1.5 (drh 28-Apr-01): const char *zFilename, /* Name of the database file to open */ -1.60 (drh 02-Dec-02): int nExtra, /* Extra bytes append to each in-memory page */ -1.188 (drh 06-Feb-05): int flags /* flags controlling this file */ -1.5 (drh 28-Apr-01): ){ -1.229 (danielk1 30-Dec-05): Pager *pPager = 0; -1.130 (danielk1 16-Jun-04): char *zFullPathname = 0; -1.248 (drh 20-Jan-06): int nameLen; /* Compiler is wrong. This is always initialized before use */ -1.312 (danielk1 31-Mar-07): OsFile *fd = 0; -1.131 (danielk1 16-Jun-04): int rc = SQLITE_OK; -1.131 (danielk1 16-Jun-04): int i; -1.130 (danielk1 16-Jun-04): int tempFile = 0; -1.107 (drh 12-May-04): int memDb = 0; -1.14 (drh 13-Sep-01): int readOnly = 0; -1.188 (drh 06-Feb-05): int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; -1.188 (drh 06-Feb-05): int noReadlock = (flags & PAGER_NO_READLOCK)!=0; -1.22 (drh 19-Sep-01): char zTemp[SQLITE_TEMPNAME_SIZE]; -1.236 (drh 11-Jan-06): #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -1.242 (danielk1 18-Jan-06): /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to -1.242 (danielk1 18-Jan-06): ** malloc() must have already been made by this thread before it gets -1.242 (danielk1 18-Jan-06): ** to this point. This means the ThreadData must have been allocated already -1.242 (danielk1 18-Jan-06): ** so that ThreadData.nAlloc can be set. It would be nice to assert -1.242 (danielk1 18-Jan-06): ** that ThreadData.nAlloc is non-zero, but alas this breaks test cases -1.242 (danielk1 18-Jan-06): ** written to invoke the pager directly. -1.242 (danielk1 18-Jan-06): */ -1.241 (danielk1 16-Jan-06): ThreadData *pTsd = sqlite3ThreadData(); -1.242 (danielk1 18-Jan-06): assert( pTsd ); -1.233 (danielk1 09-Jan-06): #endif -1.1 (drh 11-Apr-01): -1.312 (danielk1 31-Mar-07): /* We used to test if malloc() had already failed before proceeding. -1.312 (danielk1 31-Mar-07): ** But the way this function is used in SQLite means that can never -1.312 (danielk1 31-Mar-07): ** happen. Furthermore, if the malloc-failed flag is already set, -1.312 (danielk1 31-Mar-07): ** either the call to sqliteStrDup() or sqliteMalloc() below will -1.312 (danielk1 31-Mar-07): ** fail shortly and SQLITE_NOMEM returned anyway. -1.229 (danielk1 30-Dec-05): */ -1.3 (drh 15-Apr-01): *ppPager = 0; -1.229 (danielk1 30-Dec-05): -1.229 (danielk1 30-Dec-05): /* Open the pager file and set zFullPathname to point at malloc()ed -1.229 (danielk1 30-Dec-05): ** memory containing the complete filename (i.e. including the directory). -1.229 (danielk1 30-Dec-05): */ -1.88 (drh 26-Aug-03): if( zFilename && zFilename[0] ){ -1.169 (drh 31-Oct-04): #ifndef SQLITE_OMIT_MEMORYDB -1.107 (drh 12-May-04): if( strcmp(zFilename,":memory:")==0 ){ -1.107 (drh 12-May-04): memDb = 1; -1.133 (drh 21-Jun-04): zFullPathname = sqliteStrDup(""); -1.169 (drh 31-Oct-04): }else -1.169 (drh 31-Oct-04): #endif -1.169 (drh 31-Oct-04): { -1.231 (drh 06-Jan-06): zFullPathname = sqlite3OsFullPathname(zFilename); -1.130 (danielk1 16-Jun-04): if( zFullPathname ){ -1.231 (drh 06-Jan-06): rc = sqlite3OsOpenReadWrite(zFullPathname, &fd, &readOnly); -1.320 (danielk1 05-Apr-07): assert( rc!=SQLITE_OK || fd ); -1.130 (danielk1 16-Jun-04): } -1.107 (drh 12-May-04): } -1.14 (drh 13-Sep-01): }else{ -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerOpentemp(&fd); -1.287 (drh 15-Mar-07): sqlite3OsTempFileName(zTemp); -1.14 (drh 13-Sep-01): zFilename = zTemp; -1.231 (drh 06-Jan-06): zFullPathname = sqlite3OsFullPathname(zFilename); -1.130 (danielk1 16-Jun-04): if( rc==SQLITE_OK ){ -1.130 (danielk1 16-Jun-04): tempFile = 1; -1.130 (danielk1 16-Jun-04): } -1.14 (drh 13-Sep-01): } -1.229 (danielk1 30-Dec-05): -1.229 (danielk1 30-Dec-05): /* Allocate the Pager structure. As part of the same allocation, allocate -1.229 (danielk1 30-Dec-05): ** space for the full paths of the file, directory and journal -1.229 (danielk1 30-Dec-05): ** (Pager.zFilename, Pager.zDirectory and Pager.zJournal). -1.229 (danielk1 30-Dec-05): */ -1.229 (danielk1 30-Dec-05): if( zFullPathname ){ -1.229 (danielk1 30-Dec-05): nameLen = strlen(zFullPathname); -1.229 (danielk1 30-Dec-05): pPager = sqliteMalloc( sizeof(*pPager) + nameLen*3 + 30 ); -1.286 (danielk1 06-Mar-07): if( pPager && rc==SQLITE_OK ){ -1.286 (danielk1 06-Mar-07): pPager->pTmpSpace = (char *)sqliteMallocRaw(SQLITE_DEFAULT_PAGE_SIZE); -1.286 (danielk1 06-Mar-07): } -1.61 (drh 07-Dec-02): } -1.229 (danielk1 30-Dec-05): -1.286 (danielk1 06-Mar-07): -1.229 (danielk1 30-Dec-05): /* If an error occured in either of the blocks above, free the memory -1.229 (danielk1 30-Dec-05): ** pointed to by zFullPathname, free the Pager structure and close the -1.229 (danielk1 30-Dec-05): ** file. Since the pager is not allocated there is no need to set -1.229 (danielk1 30-Dec-05): ** any Pager.errMask variables. -1.229 (danielk1 30-Dec-05): */ -1.286 (danielk1 06-Mar-07): if( !pPager || !zFullPathname || !pPager->pTmpSpace || rc!=SQLITE_OK ){ -1.222 (drh 30-Nov-05): sqlite3OsClose(&fd); -1.133 (drh 21-Jun-04): sqliteFree(zFullPathname); -1.229 (danielk1 30-Dec-05): sqliteFree(pPager); -1.229 (danielk1 30-Dec-05): return ((rc==SQLITE_OK)?SQLITE_NOMEM:rc); -1.3 (drh 15-Apr-01): } -1.229 (danielk1 30-Dec-05): -1.300 (drh 26-Mar-07): PAGERTRACE3("OPEN %d %s\n", FILEHANDLEID(fd), zFullPathname); -1.283 (drh 28-Feb-07): IOTRACE(("OPEN %p %s\n", pPager, zFullPathname)) -1.1 (drh 11-Apr-01): pPager->zFilename = (char*)&pPager[1]; -1.87 (drh 27-Jul-03): pPager->zDirectory = &pPager->zFilename[nameLen+1]; -1.87 (drh 27-Jul-03): pPager->zJournal = &pPager->zDirectory[nameLen+1]; -1.335 (drh 04-May-07): memcpy(pPager->zFilename, zFullPathname, nameLen+1); -1.335 (drh 04-May-07): memcpy(pPager->zDirectory, zFullPathname, nameLen+1); -1.229 (danielk1 30-Dec-05): -1.87 (drh 27-Jul-03): for(i=nameLen; i>0 && pPager->zDirectory[i-1]!='/'; i--){} -1.87 (drh 27-Jul-03): if( i>0 ) pPager->zDirectory[i-1] = 0; -1.335 (drh 04-May-07): memcpy(pPager->zJournal, zFullPathname,nameLen); -1.61 (drh 07-Dec-02): sqliteFree(zFullPathname); -1.335 (drh 04-May-07): memcpy(&pPager->zJournal[nameLen], "-journal",sizeof("-journal")); -1.221 (drh 29-Nov-05): pPager->fd = fd; -1.237 (drh 15-Jan-06): /* pPager->journalOpen = 0; */ -1.107 (drh 12-May-04): pPager->useJournal = useJournal && !memDb; -1.188 (drh 06-Feb-05): pPager->noReadlock = noReadlock && readOnly; -1.237 (drh 15-Jan-06): /* pPager->stmtOpen = 0; */ -1.237 (drh 15-Jan-06): /* pPager->stmtInUse = 0; */ -1.237 (drh 15-Jan-06): /* pPager->nRef = 0; */ -1.107 (drh 12-May-04): pPager->dbSize = memDb-1; -1.152 (drh 22-Jul-04): pPager->pageSize = SQLITE_DEFAULT_PAGE_SIZE; -1.237 (drh 15-Jan-06): /* pPager->stmtSize = 0; */ -1.237 (drh 15-Jan-06): /* pPager->stmtJSize = 0; */ -1.237 (drh 15-Jan-06): /* pPager->nPage = 0; */ -1.152 (drh 22-Jul-04): pPager->mxPage = 100; -1.337 (drh 08-May-07): pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; -1.237 (drh 15-Jan-06): assert( PAGER_UNLOCK==0 ); -1.237 (drh 15-Jan-06): /* pPager->state = PAGER_UNLOCK; */ -1.237 (drh 15-Jan-06): /* pPager->errMask = 0; */ -1.14 (drh 13-Sep-01): pPager->tempFile = tempFile; -1.309 (drh 30-Mar-07): assert( tempFile==PAGER_LOCKINGMODE_NORMAL -1.309 (drh 30-Mar-07): || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); -1.309 (drh 30-Mar-07): assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); -1.309 (drh 30-Mar-07): pPager->exclusiveMode = tempFile; -1.107 (drh 12-May-04): pPager->memDb = memDb; -1.14 (drh 13-Sep-01): pPager->readOnly = readOnly; -1.237 (drh 15-Jan-06): /* pPager->needSync = 0; */ -1.60 (drh 02-Dec-02): pPager->noSync = pPager->tempFile || !useJournal; -1.143 (danielk1 26-Jun-04): pPager->fullSync = (pPager->noSync?0:1); -1.237 (drh 15-Jan-06): /* pPager->pFirst = 0; */ -1.237 (drh 15-Jan-06): /* pPager->pFirstSynced = 0; */ -1.237 (drh 15-Jan-06): /* pPager->pLast = 0; */ -1.168 (drh 22-Oct-04): pPager->nExtra = FORCE_ALIGNMENT(nExtra); -1.289 (danielk1 19-Mar-07): assert(fd||memDb); -1.289 (danielk1 19-Mar-07): if( !memDb ){ -1.332 (drh 01-May-07): setSectorSize(pPager); -1.289 (danielk1 19-Mar-07): } -1.237 (drh 15-Jan-06): /* pPager->pBusyHandler = 0; */ -1.237 (drh 15-Jan-06): /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ -1.1 (drh 11-Apr-01): *ppPager = pPager; -1.236 (drh 11-Jan-06): #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -1.241 (danielk1 16-Jan-06): pPager->pNext = pTsd->pPager; -1.241 (danielk1 16-Jan-06): pTsd->pPager = pPager; -1.233 (danielk1 09-Jan-06): #endif -1.1 (drh 11-Apr-01): return SQLITE_OK; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.152 (drh 22-Jul-04): ** Set the busy handler function. -1.152 (drh 22-Jul-04): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetBusyhandler(Pager *pPager, BusyHandler *pBusyHandler){ -1.152 (drh 22-Jul-04): pPager->pBusyHandler = pBusyHandler; -1.152 (drh 22-Jul-04): } -1.152 (drh 22-Jul-04): -1.152 (drh 22-Jul-04): /* -1.7 (drh 24-May-01): ** Set the destructor for this pager. If not NULL, the destructor is called -1.14 (drh 13-Sep-01): ** when the reference count on each page reaches zero. The destructor can -1.14 (drh 13-Sep-01): ** be used to clean up information in the extra segment appended to each page. -1.7 (drh 24-May-01): ** -1.292 (danielk1 19-Mar-07): ** The destructor is not called as a result sqlite3PagerClose(). -1.292 (danielk1 19-Mar-07): ** Destructors are only called by sqlite3PagerUnref(). -1.7 (drh 24-May-01): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetDestructor(Pager *pPager, void (*xDesc)(DbPage*,int)){ -1.7 (drh 24-May-01): pPager->xDestructor = xDesc; -1.7 (drh 24-May-01): } -1.7 (drh 24-May-01): -1.7 (drh 24-May-01): /* -1.115 (drh 09-Jun-04): ** Set the reinitializer for this pager. If not NULL, the reinitializer -1.115 (drh 09-Jun-04): ** is called when the content of a page in cache is restored to its original -1.115 (drh 09-Jun-04): ** value as a result of a rollback. The callback gives higher-level code -1.115 (drh 09-Jun-04): ** an opportunity to restore the EXTRA section to agree with the restored -1.115 (drh 09-Jun-04): ** page data. -1.115 (drh 09-Jun-04): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetReiniter(Pager *pPager, void (*xReinit)(DbPage*,int)){ -1.115 (drh 09-Jun-04): pPager->xReiniter = xReinit; -1.115 (drh 09-Jun-04): } -1.115 (drh 09-Jun-04): -1.115 (drh 09-Jun-04): /* -1.203 (drh 20-May-05): ** Set the page size. Return the new size. If the suggest new page -1.203 (drh 20-May-05): ** size is inappropriate, then an alternative page size is selected -1.203 (drh 20-May-05): ** and returned. -1.152 (drh 22-Jul-04): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerSetPagesize(Pager *pPager, int pageSize){ -1.152 (drh 22-Jul-04): assert( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE ); -1.299 (danielk1 26-Mar-07): if( !pPager->memDb && pPager->nRef==0 ){ -1.299 (danielk1 26-Mar-07): pager_reset(pPager); -1.203 (drh 20-May-05): pPager->pageSize = pageSize; -1.301 (drh 27-Mar-07): pPager->pTmpSpace = sqlite3ReallocOrFree(pPager->pTmpSpace, pageSize); -1.203 (drh 20-May-05): } -1.203 (drh 20-May-05): return pPager->pageSize; -1.152 (drh 22-Jul-04): } -1.152 (drh 22-Jul-04): -1.152 (drh 22-Jul-04): /* -1.337 (drh 08-May-07): ** Attempt to set the maximum database page count if mxPage is positive. -1.337 (drh 08-May-07): ** Make no changes if mxPage is zero or negative. And never reduce the -1.337 (drh 08-May-07): ** maximum page count below the current size of the database. -1.337 (drh 08-May-07): ** -1.337 (drh 08-May-07): ** Regardless of mxPage, return the current maximum page count. -1.337 (drh 08-May-07): */ -1.337 (drh 08-May-07): int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ -1.337 (drh 08-May-07): if( mxPage>0 ){ -1.337 (drh 08-May-07): pPager->mxPgno = mxPage; -1.337 (drh 08-May-07): } -1.337 (drh 08-May-07): sqlite3PagerPagecount(pPager); -1.337 (drh 08-May-07): return pPager->mxPgno; -1.337 (drh 08-May-07): } -1.337 (drh 08-May-07): -1.337 (drh 08-May-07): /* -1.216 (drh 04-Nov-05): ** The following set of routines are used to disable the simulated -1.216 (drh 04-Nov-05): ** I/O error mechanism. These routines are used to avoid simulated -1.216 (drh 04-Nov-05): ** errors in places where we do not care about errors. -1.216 (drh 04-Nov-05): ** -1.216 (drh 04-Nov-05): ** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops -1.216 (drh 04-Nov-05): ** and generate no code. -1.216 (drh 04-Nov-05): */ -1.216 (drh 04-Nov-05): #ifdef SQLITE_TEST -1.216 (drh 04-Nov-05): extern int sqlite3_io_error_pending; -1.216 (drh 04-Nov-05): extern int sqlite3_io_error_hit; -1.216 (drh 04-Nov-05): static int saved_cnt; -1.216 (drh 04-Nov-05): void disable_simulated_io_errors(void){ -1.216 (drh 04-Nov-05): saved_cnt = sqlite3_io_error_pending; -1.216 (drh 04-Nov-05): sqlite3_io_error_pending = -1; -1.216 (drh 04-Nov-05): } -1.216 (drh 04-Nov-05): void enable_simulated_io_errors(void){ -1.216 (drh 04-Nov-05): sqlite3_io_error_pending = saved_cnt; -1.216 (drh 04-Nov-05): } -1.216 (drh 04-Nov-05): #else -1.217 (drh 05-Nov-05): # define disable_simulated_io_errors() -1.217 (drh 05-Nov-05): # define enable_simulated_io_errors() -1.216 (drh 04-Nov-05): #endif -1.216 (drh 04-Nov-05): -1.216 (drh 04-Nov-05): /* -1.152 (drh 22-Jul-04): ** Read the first N bytes from the beginning of the file into memory -1.229 (danielk1 30-Dec-05): ** that pDest points to. -1.229 (danielk1 30-Dec-05): ** -1.229 (danielk1 30-Dec-05): ** No error checking is done. The rational for this is that this function -1.229 (danielk1 30-Dec-05): ** may be called even if the file does not exist or contain a header. In -1.229 (danielk1 30-Dec-05): ** these cases sqlite3OsRead() will return an error, to which the correct -1.229 (danielk1 30-Dec-05): ** response is to zero the memory at pDest and continue. A real IO error -1.229 (danielk1 30-Dec-05): ** will presumably recur and be picked up later (Todo: Think about this). -1.152 (drh 22-Jul-04): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ -1.275 (drh 06-Nov-06): int rc = SQLITE_OK; -1.152 (drh 22-Jul-04): memset(pDest, 0, N); -1.169 (drh 31-Oct-04): if( MEMDB==0 ){ -1.256 (danielk1 24-Jan-06): disable_simulated_io_errors(); -1.222 (drh 30-Nov-05): sqlite3OsSeek(pPager->fd, 0); -1.256 (danielk1 24-Jan-06): enable_simulated_io_errors(); -1.283 (drh 28-Feb-07): IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) -1.275 (drh 06-Nov-06): rc = sqlite3OsRead(pPager->fd, pDest, N); -1.275 (drh 06-Nov-06): if( rc==SQLITE_IOERR_SHORT_READ ){ -1.275 (drh 06-Nov-06): rc = SQLITE_OK; -1.275 (drh 06-Nov-06): } -1.152 (drh 22-Jul-04): } -1.275 (drh 06-Nov-06): return rc; -1.152 (drh 22-Jul-04): } -1.152 (drh 22-Jul-04): -1.152 (drh 22-Jul-04): /* -1.14 (drh 13-Sep-01): ** Return the total number of pages in the disk file associated with -1.213 (danielk1 16-Sep-05): ** pPager. -1.213 (danielk1 16-Sep-05): ** -1.213 (danielk1 16-Sep-05): ** If the PENDING_BYTE lies on the page directly after the end of the -1.213 (danielk1 16-Sep-05): ** file, then consider this page part of the file too. For example, if -1.213 (danielk1 16-Sep-05): ** PENDING_BYTE is byte 4096 (the first byte of page 5) and the size of the -1.213 (danielk1 16-Sep-05): ** file is 4096 bytes, 5 is returned instead of 4. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerPagecount(Pager *pPager){ -1.165 (drh 01-Oct-04): i64 n; -1.273 (drh 15-Sep-06): int rc; -1.3 (drh 15-Apr-01): assert( pPager!=0 ); -1.288 (drh 15-Mar-07): if( pPager->errCode ){ -1.288 (drh 15-Mar-07): return 0; -1.288 (drh 15-Mar-07): } -1.1 (drh 11-Apr-01): if( pPager->dbSize>=0 ){ -1.213 (danielk1 16-Sep-05): n = pPager->dbSize; -1.213 (danielk1 16-Sep-05): } else { -1.273 (drh 15-Sep-06): if( (rc = sqlite3OsFileSize(pPager->fd, &n))!=SQLITE_OK ){ -1.273 (drh 15-Sep-06): pager_error(pPager, rc); -1.213 (danielk1 16-Sep-05): return 0; -1.213 (danielk1 16-Sep-05): } -1.213 (danielk1 16-Sep-05): if( n>0 && npageSize ){ -1.213 (danielk1 16-Sep-05): n = 1; -1.213 (danielk1 16-Sep-05): }else{ -1.213 (danielk1 16-Sep-05): n /= pPager->pageSize; -1.213 (danielk1 16-Sep-05): } -1.213 (danielk1 16-Sep-05): if( pPager->state!=PAGER_UNLOCK ){ -1.213 (danielk1 16-Sep-05): pPager->dbSize = n; -1.213 (danielk1 16-Sep-05): } -1.1 (drh 11-Apr-01): } -1.213 (danielk1 16-Sep-05): if( n==(PENDING_BYTE/pPager->pageSize) ){ -1.127 (drh 15-Jun-04): n++; -1.127 (drh 15-Jun-04): } -1.337 (drh 08-May-07): if( n>pPager->mxPgno ){ -1.337 (drh 08-May-07): pPager->mxPgno = n; -1.337 (drh 08-May-07): } -1.1 (drh 11-Apr-01): return n; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.266 (drh 07-Apr-06): -1.266 (drh 07-Apr-06): #ifndef SQLITE_OMIT_MEMORYDB -1.266 (drh 07-Apr-06): /* -1.266 (drh 07-Apr-06): ** Clear a PgHistory block -1.266 (drh 07-Apr-06): */ -1.266 (drh 07-Apr-06): static void clearHistory(PgHistory *pHist){ -1.266 (drh 07-Apr-06): sqliteFree(pHist->pOrig); -1.266 (drh 07-Apr-06): sqliteFree(pHist->pStmt); -1.266 (drh 07-Apr-06): pHist->pOrig = 0; -1.266 (drh 07-Apr-06): pHist->pStmt = 0; -1.266 (drh 07-Apr-06): } -1.266 (drh 07-Apr-06): #else -1.266 (drh 07-Apr-06): #define clearHistory(x) -1.266 (drh 07-Apr-06): #endif -1.266 (drh 07-Apr-06): -1.1 (drh 11-Apr-01): /* -1.82 (drh 25-Apr-03): ** Forward declaration -1.82 (drh 25-Apr-03): */ -1.138 (danielk1 25-Jun-04): static int syncJournal(Pager*); -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): /* -1.171 (danielk1 03-Nov-04): ** Unlink pPg from it's hash chain. Also set the page number to 0 to indicate -1.171 (danielk1 03-Nov-04): ** that the page is not part of any hash chain. This is required because the -1.292 (danielk1 19-Mar-07): ** sqlite3PagerMovepage() routine can leave a page in the -1.171 (danielk1 03-Nov-04): ** pNextFree/pPrevFree list that is not a part of any hash-chain. -1.171 (danielk1 03-Nov-04): */ -1.171 (danielk1 03-Nov-04): static void unlinkHashChain(Pager *pPager, PgHdr *pPg){ -1.171 (danielk1 03-Nov-04): if( pPg->pgno==0 ){ -1.270 (drh 28-Jun-06): assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); -1.171 (danielk1 03-Nov-04): return; -1.171 (danielk1 03-Nov-04): } -1.171 (danielk1 03-Nov-04): if( pPg->pNextHash ){ -1.171 (danielk1 03-Nov-04): pPg->pNextHash->pPrevHash = pPg->pPrevHash; -1.171 (danielk1 03-Nov-04): } -1.171 (danielk1 03-Nov-04): if( pPg->pPrevHash ){ -1.268 (drh 07-May-06): assert( pPager->aHash[pPg->pgno & (pPager->nHash-1)]!=pPg ); -1.171 (danielk1 03-Nov-04): pPg->pPrevHash->pNextHash = pPg->pNextHash; -1.171 (danielk1 03-Nov-04): }else{ -1.268 (drh 07-May-06): int h = pPg->pgno & (pPager->nHash-1); -1.171 (danielk1 03-Nov-04): pPager->aHash[h] = pPg->pNextHash; -1.171 (danielk1 03-Nov-04): } -1.264 (drh 23-Mar-06): if( MEMDB ){ -1.264 (drh 23-Mar-06): clearHistory(PGHDR_TO_HIST(pPg, pPager)); -1.264 (drh 23-Mar-06): } -1.171 (danielk1 03-Nov-04): pPg->pgno = 0; -1.171 (danielk1 03-Nov-04): pPg->pNextHash = pPg->pPrevHash = 0; -1.171 (danielk1 03-Nov-04): } -1.171 (danielk1 03-Nov-04): -1.171 (danielk1 03-Nov-04): /* -1.107 (drh 12-May-04): ** Unlink a page from the free list (the list of all pages where nRef==0) -1.107 (drh 12-May-04): ** and from its hash collision chain. -1.107 (drh 12-May-04): */ -1.107 (drh 12-May-04): static void unlinkPage(PgHdr *pPg){ -1.107 (drh 12-May-04): Pager *pPager = pPg->pPager; -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): /* Keep the pFirstSynced pointer pointing at the first synchronized page */ -1.107 (drh 12-May-04): if( pPg==pPager->pFirstSynced ){ -1.107 (drh 12-May-04): PgHdr *p = pPg->pNextFree; -1.107 (drh 12-May-04): while( p && p->needSync ){ p = p->pNextFree; } -1.107 (drh 12-May-04): pPager->pFirstSynced = p; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): /* Unlink from the freelist */ -1.107 (drh 12-May-04): if( pPg->pPrevFree ){ -1.107 (drh 12-May-04): pPg->pPrevFree->pNextFree = pPg->pNextFree; -1.107 (drh 12-May-04): }else{ -1.107 (drh 12-May-04): assert( pPager->pFirst==pPg ); -1.107 (drh 12-May-04): pPager->pFirst = pPg->pNextFree; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): if( pPg->pNextFree ){ -1.107 (drh 12-May-04): pPg->pNextFree->pPrevFree = pPg->pPrevFree; -1.107 (drh 12-May-04): }else{ -1.107 (drh 12-May-04): assert( pPager->pLast==pPg ); -1.107 (drh 12-May-04): pPager->pLast = pPg->pPrevFree; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): pPg->pNextFree = pPg->pPrevFree = 0; -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): /* Unlink from the pgno hash table */ -1.171 (danielk1 03-Nov-04): unlinkHashChain(pPager, pPg); -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): /* -1.323 (danielk1 05-Apr-07): ** This routine is used to truncate the cache when a database -1.323 (danielk1 05-Apr-07): ** is truncated. Drop from the cache all pages whose pgno is -1.323 (danielk1 05-Apr-07): ** larger than pPager->dbSize and is unreferenced. -1.323 (danielk1 05-Apr-07): ** -1.107 (drh 12-May-04): ** Referenced pages larger than pPager->dbSize are zeroed. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** Actually, at the point this routine is called, it would be -1.323 (danielk1 05-Apr-07): ** an error to have a referenced page. But rather than delete -1.323 (danielk1 05-Apr-07): ** that page and guarantee a subsequent segfault, it seems better -1.323 (danielk1 05-Apr-07): ** to zero it and hope that we error out sanely. -1.107 (drh 12-May-04): */ -1.323 (danielk1 05-Apr-07): static void pager_truncate_cache(Pager *pPager){ -1.107 (drh 12-May-04): PgHdr *pPg; -1.107 (drh 12-May-04): PgHdr **ppPg; -1.107 (drh 12-May-04): int dbSize = pPager->dbSize; -1.107 (drh 12-May-04): -1.107 (drh 12-May-04): ppPg = &pPager->pAll; -1.107 (drh 12-May-04): while( (pPg = *ppPg)!=0 ){ -1.107 (drh 12-May-04): if( pPg->pgno<=dbSize ){ -1.107 (drh 12-May-04): ppPg = &pPg->pNextAll; -1.107 (drh 12-May-04): }else if( pPg->nRef>0 ){ -1.107 (drh 12-May-04): memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); -1.107 (drh 12-May-04): ppPg = &pPg->pNextAll; -1.107 (drh 12-May-04): }else{ -1.107 (drh 12-May-04): *ppPg = pPg->pNextAll; -1.327 (drh 13-Apr-07): IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_pgfree_count); -1.107 (drh 12-May-04): unlinkPage(pPg); -1.267 (drh 03-May-06): makeClean(pPg); -1.107 (drh 12-May-04): sqliteFree(pPg); -1.107 (drh 12-May-04): pPager->nPage--; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): -1.82 (drh 25-Apr-03): /* -1.190 (danielk1 15-Feb-05): ** Try to obtain a lock on a file. Invoke the busy callback if the lock -1.208 (drh 09-Jul-05): ** is currently not available. Repeat until the busy callback returns -1.190 (danielk1 15-Feb-05): ** false or until the lock succeeds. -1.190 (danielk1 15-Feb-05): ** -1.190 (danielk1 15-Feb-05): ** Return SQLITE_OK on success and an error code if we cannot obtain -1.190 (danielk1 15-Feb-05): ** the lock. -1.190 (danielk1 15-Feb-05): */ -1.190 (danielk1 15-Feb-05): static int pager_wait_on_lock(Pager *pPager, int locktype){ -1.190 (danielk1 15-Feb-05): int rc; -1.279 (drh 03-Jan-07): -1.279 (drh 03-Jan-07): /* The OS lock values must be the same as the Pager lock values */ -1.190 (danielk1 15-Feb-05): assert( PAGER_SHARED==SHARED_LOCK ); -1.190 (danielk1 15-Feb-05): assert( PAGER_RESERVED==RESERVED_LOCK ); -1.190 (danielk1 15-Feb-05): assert( PAGER_EXCLUSIVE==EXCLUSIVE_LOCK ); -1.279 (drh 03-Jan-07): -1.279 (drh 03-Jan-07): /* If the file is currently unlocked then the size must be unknown */ -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED || pPager->dbSize<0 || MEMDB ); -1.279 (drh 03-Jan-07): -1.190 (danielk1 15-Feb-05): if( pPager->state>=locktype ){ -1.190 (danielk1 15-Feb-05): rc = SQLITE_OK; -1.190 (danielk1 15-Feb-05): }else{ -1.190 (danielk1 15-Feb-05): do { -1.222 (drh 30-Nov-05): rc = sqlite3OsLock(pPager->fd, locktype); -1.208 (drh 09-Jul-05): }while( rc==SQLITE_BUSY && sqlite3InvokeBusyHandler(pPager->pBusyHandler) ); -1.190 (danielk1 15-Feb-05): if( rc==SQLITE_OK ){ -1.190 (danielk1 15-Feb-05): pPager->state = locktype; -1.283 (drh 28-Feb-07): IOTRACE(("LOCK %p %d\n", pPager, locktype)) -1.190 (danielk1 15-Feb-05): } -1.190 (danielk1 15-Feb-05): } -1.190 (danielk1 15-Feb-05): return rc; -1.190 (danielk1 15-Feb-05): } -1.190 (danielk1 15-Feb-05): -1.190 (danielk1 15-Feb-05): /* -1.82 (drh 25-Apr-03): ** Truncate the file to the number of pages specified. -1.82 (drh 25-Apr-03): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerTruncate(Pager *pPager, Pgno nPage){ -1.82 (drh 25-Apr-03): int rc; -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED || MEMDB ); -1.292 (danielk1 19-Mar-07): sqlite3PagerPagecount(pPager); -1.238 (danielk1 16-Jan-06): if( pPager->errCode ){ -1.238 (danielk1 16-Jan-06): rc = pPager->errCode; -1.83 (drh 25-Apr-03): return rc; -1.83 (drh 25-Apr-03): } -1.84 (drh 04-Jun-03): if( nPage>=(unsigned)pPager->dbSize ){ -1.82 (drh 25-Apr-03): return SQLITE_OK; -1.82 (drh 25-Apr-03): } -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.107 (drh 12-May-04): pPager->dbSize = nPage; -1.323 (danielk1 05-Apr-07): pager_truncate_cache(pPager); -1.107 (drh 12-May-04): return SQLITE_OK; -1.107 (drh 12-May-04): } -1.138 (danielk1 25-Jun-04): rc = syncJournal(pPager); -1.128 (danielk1 15-Jun-04): if( rc!=SQLITE_OK ){ -1.128 (danielk1 15-Jun-04): return rc; -1.128 (danielk1 15-Jun-04): } -1.190 (danielk1 15-Feb-05): -1.190 (danielk1 15-Feb-05): /* Get an exclusive lock on the database before truncating. */ -1.190 (danielk1 15-Feb-05): rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); -1.190 (danielk1 15-Feb-05): if( rc!=SQLITE_OK ){ -1.190 (danielk1 15-Feb-05): return rc; -1.190 (danielk1 15-Feb-05): } -1.190 (danielk1 15-Feb-05): -1.158 (drh 18-Aug-04): rc = pager_truncate(pPager, nPage); -1.82 (drh 25-Apr-03): return rc; -1.82 (drh 25-Apr-03): } -1.82 (drh 25-Apr-03): -1.82 (drh 25-Apr-03): /* -1.1 (drh 11-Apr-01): ** Shutdown the page cache. Free all memory and close all files. -1.1 (drh 11-Apr-01): ** -1.1 (drh 11-Apr-01): ** If a transaction was in progress when this routine is called, that -1.1 (drh 11-Apr-01): ** transaction is rolled back. All outstanding pages are invalidated -1.1 (drh 11-Apr-01): ** and their memory is freed. Any attempt to use a page associated -1.1 (drh 11-Apr-01): ** with this page cache after this function returns will likely -1.1 (drh 11-Apr-01): ** result in a coredump. -1.229 (danielk1 30-Dec-05): ** -1.229 (danielk1 30-Dec-05): ** This function always succeeds. If a transaction is active an attempt -1.229 (danielk1 30-Dec-05): ** is made to roll it back. If an error occurs during the rollback -1.229 (danielk1 30-Dec-05): ** a hot journal may be left in the filesystem but no error is returned -1.229 (danielk1 30-Dec-05): ** to the caller. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerClose(Pager *pPager){ -1.236 (drh 11-Jan-06): #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -1.242 (danielk1 18-Jan-06): /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to -1.242 (danielk1 18-Jan-06): ** malloc() must have already been made by this thread before it gets -1.242 (danielk1 18-Jan-06): ** to this point. This means the ThreadData must have been allocated already -1.242 (danielk1 18-Jan-06): ** so that ThreadData.nAlloc can be set. -1.242 (danielk1 18-Jan-06): */ -1.241 (danielk1 16-Jan-06): ThreadData *pTsd = sqlite3ThreadData(); -1.245 (danielk1 18-Jan-06): assert( pPager ); -1.242 (danielk1 18-Jan-06): assert( pTsd && pTsd->nAlloc ); -1.225 (danielk1 18-Dec-05): #endif -1.225 (danielk1 18-Dec-05): -1.280 (drh 03-Jan-07): disable_simulated_io_errors(); -1.281 (drh 04-Jan-07): pPager->errCode = 0; -1.294 (danielk1 24-Mar-07): pPager->exclusiveMode = 0; -1.280 (drh 03-Jan-07): pager_reset(pPager); -1.293 (danielk1 23-Mar-07): pagerUnlockAndRollback(pPager); -1.280 (drh 03-Jan-07): enable_simulated_io_errors(); -1.300 (drh 26-Mar-07): PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); -1.283 (drh 28-Feb-07): IOTRACE(("CLOSE %p\n", pPager)) -1.238 (danielk1 16-Jan-06): assert( pPager->errCode || (pPager->journalOpen==0 && pPager->stmtOpen==0) ); -1.197 (danielk1 21-Mar-05): if( pPager->journalOpen ){ -1.222 (drh 30-Nov-05): sqlite3OsClose(&pPager->jfd); -1.197 (danielk1 21-Mar-05): } -1.201 (drh 28-Mar-05): sqliteFree(pPager->aInJournal); -1.197 (danielk1 21-Mar-05): if( pPager->stmtOpen ){ -1.222 (drh 30-Nov-05): sqlite3OsClose(&pPager->stfd); -1.197 (danielk1 21-Mar-05): } -1.222 (drh 30-Nov-05): sqlite3OsClose(&pPager->fd); -1.46 (drh 30-May-02): /* Temp files are automatically deleted by the OS -1.46 (drh 30-May-02): ** if( pPager->tempFile ){ -1.231 (drh 06-Jan-06): ** sqlite3OsDelete(pPager->zFilename); -1.46 (drh 30-May-02): ** } -1.46 (drh 30-May-02): */ -1.182 (danielk1 13-Jan-05): -1.236 (drh 11-Jan-06): #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -1.229 (danielk1 30-Dec-05): /* Remove the pager from the linked list of pagers starting at -1.233 (danielk1 09-Jan-06): ** ThreadData.pPager if memory-management is enabled. -1.229 (danielk1 30-Dec-05): */ -1.241 (danielk1 16-Jan-06): if( pPager==pTsd->pPager ){ -1.241 (danielk1 16-Jan-06): pTsd->pPager = pPager->pNext; -1.241 (danielk1 16-Jan-06): }else{ -1.241 (danielk1 16-Jan-06): Pager *pTmp; -1.259 (drh 24-Feb-06): for(pTmp = pTsd->pPager; pTmp->pNext!=pPager; pTmp=pTmp->pNext){} -1.241 (danielk1 16-Jan-06): pTmp->pNext = pPager->pNext; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): #endif -1.268 (drh 07-May-06): sqliteFree(pPager->aHash); -1.286 (danielk1 06-Mar-07): sqliteFree(pPager->pTmpSpace); -1.1 (drh 11-Apr-01): sqliteFree(pPager); -1.1 (drh 11-Apr-01): return SQLITE_OK; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.338 (drh 08-May-07): #if !defined(NDEBUG) || defined(SQLITE_TEST) -1.1 (drh 11-Apr-01): /* -1.14 (drh 13-Sep-01): ** Return the page number for the given page data. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): Pgno sqlite3PagerPagenumber(DbPage *p){ -1.1 (drh 11-Apr-01): return p->pgno; -1.1 (drh 11-Apr-01): } -1.338 (drh 08-May-07): #endif -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.105 (drh 08-May-04): ** The page_ref() function increments the reference count for a page. -1.105 (drh 08-May-04): ** If the page is currently on the freelist (the reference count is zero) then -1.5 (drh 28-Apr-01): ** remove it from the freelist. -1.105 (drh 08-May-04): ** -1.105 (drh 08-May-04): ** For non-test systems, page_ref() is a macro that calls _page_ref() -1.105 (drh 08-May-04): ** online of the reference count is zero. For test systems, page_ref() -1.105 (drh 08-May-04): ** is a real function so that we can set breakpoints and trace it. -1.5 (drh 28-Apr-01): */ -1.66 (drh 11-Jan-03): static void _page_ref(PgHdr *pPg){ -1.5 (drh 28-Apr-01): if( pPg->nRef==0 ){ -1.5 (drh 28-Apr-01): /* The page is currently on the freelist. Remove it. */ -1.69 (drh 21-Jan-03): if( pPg==pPg->pPager->pFirstSynced ){ -1.69 (drh 21-Jan-03): PgHdr *p = pPg->pNextFree; -1.69 (drh 21-Jan-03): while( p && p->needSync ){ p = p->pNextFree; } -1.69 (drh 21-Jan-03): pPg->pPager->pFirstSynced = p; -1.69 (drh 21-Jan-03): } -1.5 (drh 28-Apr-01): if( pPg->pPrevFree ){ -1.5 (drh 28-Apr-01): pPg->pPrevFree->pNextFree = pPg->pNextFree; -1.5 (drh 28-Apr-01): }else{ -1.5 (drh 28-Apr-01): pPg->pPager->pFirst = pPg->pNextFree; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): if( pPg->pNextFree ){ -1.5 (drh 28-Apr-01): pPg->pNextFree->pPrevFree = pPg->pPrevFree; -1.5 (drh 28-Apr-01): }else{ -1.5 (drh 28-Apr-01): pPg->pPager->pLast = pPg->pPrevFree; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): pPg->pPager->nRef++; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): pPg->nRef++; -1.12 (drh 28-Jun-01): REFINFO(pPg); -1.10 (drh 23-Jun-01): } -1.182 (danielk1 13-Jan-05): #ifdef SQLITE_DEBUG -1.105 (drh 08-May-04): static void page_ref(PgHdr *pPg){ -1.105 (drh 08-May-04): if( pPg->nRef==0 ){ -1.105 (drh 08-May-04): _page_ref(pPg); -1.105 (drh 08-May-04): }else{ -1.105 (drh 08-May-04): pPg->nRef++; -1.105 (drh 08-May-04): REFINFO(pPg); -1.105 (drh 08-May-04): } -1.105 (drh 08-May-04): } -1.105 (drh 08-May-04): #else -1.105 (drh 08-May-04): # define page_ref(P) ((P)->nRef==0?_page_ref(P):(void)(P)->nRef++) -1.105 (drh 08-May-04): #endif -1.10 (drh 23-Jun-01): -1.10 (drh 23-Jun-01): /* -1.10 (drh 23-Jun-01): ** Increment the reference count for a page. The input pointer is -1.10 (drh 23-Jun-01): ** a reference to the page data. -1.10 (drh 23-Jun-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerRef(DbPage *pPg){ -1.10 (drh 23-Jun-01): page_ref(pPg); -1.9 (drh 22-Jun-01): return SQLITE_OK; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): -1.5 (drh 28-Apr-01): /* -1.94 (drh 08-Feb-04): ** Sync the journal. In other words, make sure all the pages that have -1.94 (drh 08-Feb-04): ** been written to the journal have actually reached the surface of the -1.94 (drh 08-Feb-04): ** disk. It is not safe to modify the original database file until after -1.94 (drh 08-Feb-04): ** the journal has been synced. If the original database is modified before -1.94 (drh 08-Feb-04): ** the journal is synced and a power failure occurs, the unsynced journal -1.94 (drh 08-Feb-04): ** data would be lost and we would be unable to completely rollback the -1.94 (drh 08-Feb-04): ** database changes. Database corruption would occur. -1.94 (drh 08-Feb-04): ** -1.94 (drh 08-Feb-04): ** This routine also updates the nRec field in the header of the journal. -1.94 (drh 08-Feb-04): ** (See comments on the pager_playback() routine for additional information.) -1.94 (drh 08-Feb-04): ** If the sync mode is FULL, two syncs will occur. First the whole journal -1.94 (drh 08-Feb-04): ** is synced, then the nRec field is updated, then a second sync occurs. -1.94 (drh 08-Feb-04): ** -1.94 (drh 08-Feb-04): ** For temporary databases, we do not care if we are able to rollback -1.94 (drh 08-Feb-04): ** after a power failure, so sync occurs. -1.20 (drh 16-Sep-01): ** -1.94 (drh 08-Feb-04): ** This routine clears the needSync field of every page current held in -1.94 (drh 08-Feb-04): ** memory. -1.19 (drh 15-Sep-01): */ -1.138 (danielk1 25-Jun-04): static int syncJournal(Pager *pPager){ -1.19 (drh 15-Sep-01): PgHdr *pPg; -1.19 (drh 15-Sep-01): int rc = SQLITE_OK; -1.57 (drh 10-Nov-02): -1.57 (drh 10-Nov-02): /* Sync the journal before modifying the main database -1.57 (drh 10-Nov-02): ** (assuming there is a journal and it needs to be synced.) -1.57 (drh 10-Nov-02): */ -1.138 (danielk1 25-Jun-04): if( pPager->needSync ){ -1.37 (drh 02-Feb-02): if( !pPager->tempFile ){ -1.68 (drh 16-Jan-03): assert( pPager->journalOpen ); -1.101 (drh 25-Feb-04): /* assert( !pPager->noSync ); // noSync might be set if synchronous -1.101 (drh 25-Feb-04): ** was turned off after the transaction was started. Ticket #615 */ -1.73 (drh 11-Feb-03): #ifndef NDEBUG -1.73 (drh 11-Feb-03): { -1.94 (drh 08-Feb-04): /* Make sure the pPager->nRec counter we are keeping agrees -1.94 (drh 08-Feb-04): ** with the nRec computed from the size of the journal file. -1.94 (drh 08-Feb-04): */ -1.165 (drh 01-Oct-04): i64 jSz; -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(pPager->jfd, &jSz); -1.73 (drh 11-Feb-03): if( rc!=0 ) return rc; -1.138 (danielk1 25-Jun-04): assert( pPager->journalOff==jSz ); -1.73 (drh 11-Feb-03): } -1.73 (drh 11-Feb-03): #endif -1.116 (drh 09-Jun-04): { -1.138 (danielk1 25-Jun-04): /* Write the nRec value into the journal file header. If in -1.138 (danielk1 25-Jun-04): ** full-synchronous mode, sync the journal first. This ensures that -1.138 (danielk1 25-Jun-04): ** all data has really hit the disk before nRec is updated to mark -1.138 (danielk1 25-Jun-04): ** it as a candidate for rollback. -1.138 (danielk1 25-Jun-04): */ -1.74 (drh 12-Feb-03): if( pPager->fullSync ){ -1.300 (drh 26-Mar-07): PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); -1.283 (drh 28-Feb-07): IOTRACE(("JSYNC %p\n", pPager)) -1.222 (drh 30-Nov-05): rc = sqlite3OsSync(pPager->jfd, 0); -1.74 (drh 12-Feb-03): if( rc!=0 ) return rc; -1.74 (drh 12-Feb-03): } -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->jfd, -1.212 (drh 09-Sep-05): pPager->journalHdr + sizeof(aJournalMagic)); -1.212 (drh 09-Sep-05): if( rc ) return rc; -1.283 (drh 28-Feb-07): IOTRACE(("JHDR %p %lld %d\n", pPager, -1.283 (drh 28-Feb-07): pPager->journalHdr + sizeof(aJournalMagic), 4)) -1.221 (drh 29-Nov-05): rc = write32bits(pPager->jfd, pPager->nRec); -1.78 (drh 16-Feb-03): if( rc ) return rc; -1.110 (danielk1 03-Jun-04): -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); -1.212 (drh 09-Sep-05): if( rc ) return rc; -1.73 (drh 11-Feb-03): } -1.300 (drh 26-Mar-07): PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); -1.334 (drh 04-May-07): IOTRACE(("JSYNC %p\n", pPager)) -1.258 (drh 11-Feb-06): rc = sqlite3OsSync(pPager->jfd, pPager->full_fsync); -1.37 (drh 02-Feb-02): if( rc!=0 ) return rc; -1.68 (drh 16-Jan-03): pPager->journalStarted = 1; -1.37 (drh 02-Feb-02): } -1.19 (drh 15-Sep-01): pPager->needSync = 0; -1.69 (drh 21-Jan-03): -1.69 (drh 21-Jan-03): /* Erase the needSync flag from every page. -1.69 (drh 21-Jan-03): */ -1.69 (drh 21-Jan-03): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.69 (drh 21-Jan-03): pPg->needSync = 0; -1.69 (drh 21-Jan-03): } -1.69 (drh 21-Jan-03): pPager->pFirstSynced = pPager->pFirst; -1.19 (drh 15-Sep-01): } -1.57 (drh 10-Nov-02): -1.69 (drh 21-Jan-03): #ifndef NDEBUG -1.69 (drh 21-Jan-03): /* If the Pager.needSync flag is clear then the PgHdr.needSync -1.69 (drh 21-Jan-03): ** flag must also be clear for all pages. Verify that this -1.69 (drh 21-Jan-03): ** invariant is true. -1.68 (drh 16-Jan-03): */ -1.69 (drh 21-Jan-03): else{ -1.69 (drh 21-Jan-03): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.69 (drh 21-Jan-03): assert( pPg->needSync==0 ); -1.69 (drh 21-Jan-03): } -1.69 (drh 21-Jan-03): assert( pPager->pFirstSynced==pPager->pFirst ); -1.19 (drh 15-Sep-01): } -1.69 (drh 21-Jan-03): #endif -1.69 (drh 21-Jan-03): -1.70 (drh 22-Jan-03): return rc; -1.70 (drh 22-Jan-03): } -1.70 (drh 22-Jan-03): -1.70 (drh 22-Jan-03): /* -1.269 (drh 15-Jun-06): ** Merge two lists of pages connected by pDirty and in pgno order. -1.269 (drh 15-Jun-06): ** Do not both fixing the pPrevDirty pointers. -1.269 (drh 15-Jun-06): */ -1.269 (drh 15-Jun-06): static PgHdr *merge_pagelist(PgHdr *pA, PgHdr *pB){ -1.269 (drh 15-Jun-06): PgHdr result, *pTail; -1.269 (drh 15-Jun-06): pTail = &result; -1.269 (drh 15-Jun-06): while( pA && pB ){ -1.269 (drh 15-Jun-06): if( pA->pgnopgno ){ -1.269 (drh 15-Jun-06): pTail->pDirty = pA; -1.269 (drh 15-Jun-06): pTail = pA; -1.269 (drh 15-Jun-06): pA = pA->pDirty; -1.269 (drh 15-Jun-06): }else{ -1.269 (drh 15-Jun-06): pTail->pDirty = pB; -1.269 (drh 15-Jun-06): pTail = pB; -1.269 (drh 15-Jun-06): pB = pB->pDirty; -1.269 (drh 15-Jun-06): } -1.269 (drh 15-Jun-06): } -1.269 (drh 15-Jun-06): if( pA ){ -1.269 (drh 15-Jun-06): pTail->pDirty = pA; -1.269 (drh 15-Jun-06): }else if( pB ){ -1.269 (drh 15-Jun-06): pTail->pDirty = pB; -1.269 (drh 15-Jun-06): }else{ -1.269 (drh 15-Jun-06): pTail->pDirty = 0; -1.269 (drh 15-Jun-06): } -1.269 (drh 15-Jun-06): return result.pDirty; -1.269 (drh 15-Jun-06): } -1.269 (drh 15-Jun-06): -1.269 (drh 15-Jun-06): /* -1.269 (drh 15-Jun-06): ** Sort the list of pages in accending order by pgno. Pages are -1.269 (drh 15-Jun-06): ** connected by pDirty pointers. The pPrevDirty pointers are -1.269 (drh 15-Jun-06): ** corrupted by this sort. -1.269 (drh 15-Jun-06): */ -1.314 (danielk1 02-Apr-07): #define N_SORT_BUCKET_ALLOC 25 -1.314 (danielk1 02-Apr-07): #define N_SORT_BUCKET 25 -1.314 (danielk1 02-Apr-07): #ifdef SQLITE_TEST -1.314 (danielk1 02-Apr-07): int sqlite3_pager_n_sort_bucket = 0; -1.314 (danielk1 02-Apr-07): #undef N_SORT_BUCKET -1.314 (danielk1 02-Apr-07): #define N_SORT_BUCKET \ -1.314 (danielk1 02-Apr-07): (sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC) -1.314 (danielk1 02-Apr-07): #endif -1.269 (drh 15-Jun-06): static PgHdr *sort_pagelist(PgHdr *pIn){ -1.314 (danielk1 02-Apr-07): PgHdr *a[N_SORT_BUCKET_ALLOC], *p; -1.269 (drh 15-Jun-06): int i; -1.269 (drh 15-Jun-06): memset(a, 0, sizeof(a)); -1.269 (drh 15-Jun-06): while( pIn ){ -1.269 (drh 15-Jun-06): p = pIn; -1.269 (drh 15-Jun-06): pIn = p->pDirty; -1.269 (drh 15-Jun-06): p->pDirty = 0; -1.269 (drh 15-Jun-06): for(i=0; ipPager; -1.112 (danielk1 04-Jun-04): -1.112 (danielk1 04-Jun-04): /* At this point there may be either a RESERVED or EXCLUSIVE lock on the -1.112 (danielk1 04-Jun-04): ** database file. If there is already an EXCLUSIVE lock, the following -1.222 (drh 30-Nov-05): ** calls to sqlite3OsLock() are no-ops. -1.112 (danielk1 04-Jun-04): ** -1.115 (drh 09-Jun-04): ** Moving the lock from RESERVED to EXCLUSIVE actually involves going -1.115 (drh 09-Jun-04): ** through an intermediate state PENDING. A PENDING lock prevents new -1.115 (drh 09-Jun-04): ** readers from attaching to the database but is unsufficient for us to -1.115 (drh 09-Jun-04): ** write. The idea of a PENDING lock is to prevent new readers from -1.115 (drh 09-Jun-04): ** coming in while we wait for existing readers to clear. -1.112 (danielk1 04-Jun-04): ** -1.115 (drh 09-Jun-04): ** While the pager is in the RESERVED state, the original database file -1.115 (drh 09-Jun-04): ** is unchanged and we can rollback without having to playback the -1.115 (drh 09-Jun-04): ** journal into the original database file. Once we transition to -1.115 (drh 09-Jun-04): ** EXCLUSIVE, it means the database file has been changed and any rollback -1.115 (drh 09-Jun-04): ** will require a journal playback. -1.112 (danielk1 04-Jun-04): */ -1.167 (drh 05-Oct-04): rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); -1.112 (danielk1 04-Jun-04): if( rc!=SQLITE_OK ){ -1.112 (danielk1 04-Jun-04): return rc; -1.112 (danielk1 04-Jun-04): } -1.112 (danielk1 04-Jun-04): -1.269 (drh 15-Jun-06): pList = sort_pagelist(pList); -1.70 (drh 22-Jan-03): while( pList ){ -1.70 (drh 22-Jan-03): assert( pList->dirty ); -1.222 (drh 30-Nov-05): rc = sqlite3OsSeek(pPager->fd, (pList->pgno-1)*(i64)pPager->pageSize); -1.212 (drh 09-Sep-05): if( rc ) return rc; -1.170 (danielk1 02-Nov-04): /* If there are dirty pages in the page cache with page numbers greater -1.292 (danielk1 19-Mar-07): ** than Pager.dbSize, this means sqlite3PagerTruncate() was called to -1.170 (danielk1 02-Nov-04): ** make the file smaller (presumably by auto-vacuum code). Do not write -1.170 (danielk1 02-Nov-04): ** any such pages to the file. -1.170 (danielk1 02-Nov-04): */ -1.170 (danielk1 02-Nov-04): if( pList->pgno<=pPager->dbSize ){ -1.261 (drh 06-Mar-06): char *pData = CODEC2(pPager, PGHDR_TO_DATA(pList), pList->pgno, 6); -1.344 (drh 16-Jun-07): PAGERTRACE4("STORE %d page %d hash(%08x)\n", -1.344 (drh 16-Jun-07): PAGERID(pPager), pList->pgno, pager_pagehash(pList)); -1.327 (drh 13-Apr-07): IOTRACE(("PGOUT %p %d\n", pPager, pList->pgno)); -1.261 (drh 06-Mar-06): rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize); -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_writedb_count); -1.327 (drh 13-Apr-07): PAGER_INCR(pPager->nWrite); -1.329 (drh 16-Apr-07): if( pList->pgno==1 ){ -1.329 (drh 16-Apr-07): memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); -1.329 (drh 16-Apr-07): } -1.170 (danielk1 02-Nov-04): } -1.170 (danielk1 02-Nov-04): #ifndef NDEBUG -1.170 (danielk1 02-Nov-04): else{ -1.300 (drh 26-Mar-07): PAGERTRACE3("NOSTORE %d page %d\n", PAGERID(pPager), pList->pgno); -1.170 (danielk1 02-Nov-04): } -1.170 (danielk1 02-Nov-04): #endif -1.70 (drh 22-Jan-03): if( rc ) return rc; -1.70 (drh 22-Jan-03): pList->dirty = 0; -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): pList->pageHash = pager_pagehash(pList); -1.189 (danielk1 15-Feb-05): #endif -1.70 (drh 22-Jan-03): pList = pList->pDirty; -1.70 (drh 22-Jan-03): } -1.70 (drh 22-Jan-03): return SQLITE_OK; -1.70 (drh 22-Jan-03): } -1.68 (drh 16-Jan-03): -1.70 (drh 22-Jan-03): /* -1.70 (drh 22-Jan-03): ** Collect every dirty page into a dirty list and -1.70 (drh 22-Jan-03): ** return a pointer to the head of that list. All pages are -1.70 (drh 22-Jan-03): ** collected even if they are still in use. -1.70 (drh 22-Jan-03): */ -1.70 (drh 22-Jan-03): static PgHdr *pager_get_all_dirty_pages(Pager *pPager){ -1.267 (drh 03-May-06): return pPager->pDirty; -1.19 (drh 15-Sep-01): } -1.19 (drh 15-Sep-01): -1.19 (drh 15-Sep-01): /* -1.194 (drh 15-Mar-05): ** Return TRUE if there is a hot journal on the given pager. -1.194 (drh 15-Mar-05): ** A hot journal is one that needs to be played back. -1.194 (drh 15-Mar-05): ** -1.194 (drh 15-Mar-05): ** If the current size of the database file is 0 but a journal file -1.194 (drh 15-Mar-05): ** exists, that is probably an old journal left over from a prior -1.194 (drh 15-Mar-05): ** database with the same name. Just delete the journal. -1.194 (drh 15-Mar-05): */ -1.194 (drh 15-Mar-05): static int hasHotJournal(Pager *pPager){ -1.194 (drh 15-Mar-05): if( !pPager->useJournal ) return 0; -1.324 (drh 06-Apr-07): if( !sqlite3OsFileExists(pPager->zJournal) ){ -1.324 (drh 06-Apr-07): return 0; -1.324 (drh 06-Apr-07): } -1.324 (drh 06-Apr-07): if( sqlite3OsCheckReservedLock(pPager->fd) ){ -1.324 (drh 06-Apr-07): return 0; -1.324 (drh 06-Apr-07): } -1.292 (danielk1 19-Mar-07): if( sqlite3PagerPagecount(pPager)==0 ){ -1.231 (drh 06-Jan-06): sqlite3OsDelete(pPager->zJournal); -1.194 (drh 15-Mar-05): return 0; -1.194 (drh 15-Mar-05): }else{ -1.194 (drh 15-Mar-05): return 1; -1.194 (drh 15-Mar-05): } -1.194 (drh 15-Mar-05): } -1.194 (drh 15-Mar-05): -1.228 (danielk1 20-Dec-05): /* -1.229 (danielk1 30-Dec-05): ** Try to find a page in the cache that can be recycled. -1.229 (danielk1 30-Dec-05): ** -1.229 (danielk1 30-Dec-05): ** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It -1.238 (danielk1 16-Jan-06): ** does not set the pPager->errCode variable. -1.228 (danielk1 20-Dec-05): */ -1.225 (danielk1 18-Dec-05): static int pager_recycle(Pager *pPager, int syncOk, PgHdr **ppPg){ -1.225 (danielk1 18-Dec-05): PgHdr *pPg; -1.225 (danielk1 18-Dec-05): *ppPg = 0; -1.225 (danielk1 18-Dec-05): -1.321 (danielk1 05-Apr-07): assert(!MEMDB); -1.321 (danielk1 05-Apr-07): -1.225 (danielk1 18-Dec-05): /* Find a page to recycle. Try to locate a page that does not -1.225 (danielk1 18-Dec-05): ** require us to do an fsync() on the journal. -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): pPg = pPager->pFirstSynced; -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* If we could not find a page that does not require an fsync() -1.225 (danielk1 18-Dec-05): ** on the journal file then fsync the journal file. This is a -1.225 (danielk1 18-Dec-05): ** very slow operation, so we work hard to avoid it. But sometimes -1.225 (danielk1 18-Dec-05): ** it can't be helped. -1.225 (danielk1 18-Dec-05): */ -1.228 (danielk1 20-Dec-05): if( pPg==0 && pPager->pFirst && syncOk && !MEMDB){ -1.225 (danielk1 18-Dec-05): int rc = syncJournal(pPager); -1.225 (danielk1 18-Dec-05): if( rc!=0 ){ -1.229 (danielk1 30-Dec-05): return rc; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): if( pPager->fullSync ){ -1.225 (danielk1 18-Dec-05): /* If in full-sync mode, write a new journal header into the -1.225 (danielk1 18-Dec-05): ** journal file. This is done to avoid ever modifying a journal -1.225 (danielk1 18-Dec-05): ** header that is involved in the rollback of pages that have -1.225 (danielk1 18-Dec-05): ** already been written to the database (in case the header is -1.225 (danielk1 18-Dec-05): ** trashed when the nRec field is updated). -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): pPager->nRec = 0; -1.225 (danielk1 18-Dec-05): assert( pPager->journalOff > 0 ); -1.290 (danielk1 19-Mar-07): assert( pPager->doNotSync==0 ); -1.225 (danielk1 18-Dec-05): rc = writeJournalHdr(pPager); -1.225 (danielk1 18-Dec-05): if( rc!=0 ){ -1.229 (danielk1 30-Dec-05): return rc; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): pPg = pPager->pFirst; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): if( pPg==0 ){ -1.225 (danielk1 18-Dec-05): return SQLITE_OK; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): assert( pPg->nRef==0 ); -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* Write the page to the database file if it is dirty. -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): if( pPg->dirty ){ -1.225 (danielk1 18-Dec-05): int rc; -1.225 (danielk1 18-Dec-05): assert( pPg->needSync==0 ); -1.267 (drh 03-May-06): makeClean(pPg); -1.267 (drh 03-May-06): pPg->dirty = 1; -1.225 (danielk1 18-Dec-05): pPg->pDirty = 0; -1.225 (danielk1 18-Dec-05): rc = pager_write_pagelist( pPg ); -1.225 (danielk1 18-Dec-05): if( rc!=SQLITE_OK ){ -1.229 (danielk1 30-Dec-05): return rc; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): assert( pPg->dirty==0 ); -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* If the page we are recycling is marked as alwaysRollback, then -1.225 (danielk1 18-Dec-05): ** set the global alwaysRollback flag, thus disabling the -1.307 (drh 30-Mar-07): ** sqlite3PagerDontRollback() optimization for the rest of this transaction. -1.225 (danielk1 18-Dec-05): ** It is necessary to do this because the page marked alwaysRollback -1.225 (danielk1 18-Dec-05): ** might be reloaded at a later time but at that point we won't remember -1.225 (danielk1 18-Dec-05): ** that is was marked alwaysRollback. This means that all pages must -1.225 (danielk1 18-Dec-05): ** be marked as alwaysRollback from here on out. -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): if( pPg->alwaysRollback ){ -1.284 (drh 01-Mar-07): IOTRACE(("ALWAYS_ROLLBACK %p\n", pPager)) -1.225 (danielk1 18-Dec-05): pPager->alwaysRollback = 1; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* Unlink the old page from the free list and the hash table -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): unlinkPage(pPg); -1.319 (drh 05-Apr-07): assert( pPg->pgno==0 ); -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): *ppPg = pPg; -1.225 (danielk1 18-Dec-05): return SQLITE_OK; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* -1.225 (danielk1 18-Dec-05): ** This function is called to free superfluous dynamically allocated memory -1.225 (danielk1 18-Dec-05): ** held by the pager system. Memory in use by any SQLite pager allocated -1.225 (danielk1 18-Dec-05): ** by the current thread may be sqliteFree()ed. -1.225 (danielk1 18-Dec-05): ** -1.225 (danielk1 18-Dec-05): ** nReq is the number of bytes of memory required. Once this much has -1.226 (danielk1 19-Dec-05): ** been released, the function returns. A negative value for nReq means -1.226 (danielk1 19-Dec-05): ** free as much memory as possible. The return value is the total number -1.225 (danielk1 18-Dec-05): ** of bytes of memory released. -1.225 (danielk1 18-Dec-05): */ -1.338 (drh 08-May-07): #if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) -1.292 (danielk1 19-Mar-07): int sqlite3PagerReleaseMemory(int nReq){ -1.236 (drh 11-Jan-06): const ThreadData *pTsdro = sqlite3ThreadDataReadOnly(); -1.225 (danielk1 18-Dec-05): int nReleased = 0; -1.225 (danielk1 18-Dec-05): int i; -1.225 (danielk1 18-Dec-05): -1.234 (drh 09-Jan-06): /* If the the global mutex is held, this subroutine becomes a -1.234 (drh 09-Jan-06): ** o-op; zero bytes of memory are freed. This is because -1.234 (drh 09-Jan-06): ** some of the code invoked by this function may also -1.234 (drh 09-Jan-06): ** try to obtain the mutex, resulting in a deadlock. -1.230 (danielk1 05-Jan-06): */ -1.244 (drh 18-Jan-06): if( sqlite3OsInMutex(0) ){ -1.230 (danielk1 05-Jan-06): return 0; -1.230 (danielk1 05-Jan-06): } -1.230 (danielk1 05-Jan-06): -1.225 (danielk1 18-Dec-05): /* Outermost loop runs for at most two iterations. First iteration we -1.225 (danielk1 18-Dec-05): ** try to find memory that can be released without calling fsync(). Second -1.225 (danielk1 18-Dec-05): ** iteration (which only runs if the first failed to free nReq bytes of -1.225 (danielk1 18-Dec-05): ** memory) is permitted to call fsync(). This is of course much more -1.225 (danielk1 18-Dec-05): ** expensive. -1.225 (danielk1 18-Dec-05): */ -1.234 (drh 09-Jan-06): for(i=0; i<=1; i++){ -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): /* Loop through all the SQLite pagers opened by the current thread. */ -1.322 (danielk1 05-Apr-07): Pager *pPager = pTsdro->pPager; -1.322 (danielk1 05-Apr-07): for( ; pPager && (nReq<0 || nReleasedpNext){ -1.225 (danielk1 18-Dec-05): PgHdr *pPg; -1.225 (danielk1 18-Dec-05): int rc; -1.225 (danielk1 18-Dec-05): -1.322 (danielk1 05-Apr-07): if( MEMDB ){ -1.321 (danielk1 05-Apr-07): continue; -1.321 (danielk1 05-Apr-07): } -1.321 (danielk1 05-Apr-07): -1.225 (danielk1 18-Dec-05): /* For each pager, try to free as many pages as possible (without -1.225 (danielk1 18-Dec-05): ** calling fsync() if this is the first iteration of the outermost -1.225 (danielk1 18-Dec-05): ** loop). -1.225 (danielk1 18-Dec-05): */ -1.322 (danielk1 05-Apr-07): while( SQLITE_OK==(rc = pager_recycle(pPager, i, &pPg)) && pPg) { -1.229 (danielk1 30-Dec-05): /* We've found a page to free. At this point the page has been -1.225 (danielk1 18-Dec-05): ** removed from the page hash-table, free-list and synced-list -1.229 (danielk1 30-Dec-05): ** (pFirstSynced). It is still in the all pages (pAll) list. -1.225 (danielk1 18-Dec-05): ** Remove it from this list before freeing. -1.225 (danielk1 18-Dec-05): ** -1.225 (danielk1 18-Dec-05): ** Todo: Check the Pager.pStmt list to make sure this is Ok. It -1.225 (danielk1 18-Dec-05): ** probably is though. -1.225 (danielk1 18-Dec-05): */ -1.225 (danielk1 18-Dec-05): PgHdr *pTmp; -1.226 (danielk1 19-Dec-05): assert( pPg ); -1.322 (danielk1 05-Apr-07): if( pPg==pPager->pAll ){ -1.322 (danielk1 05-Apr-07): pPager->pAll = pPg->pNextAll; -1.225 (danielk1 18-Dec-05): }else{ -1.322 (danielk1 05-Apr-07): for( pTmp=pPager->pAll; pTmp->pNextAll!=pPg; pTmp=pTmp->pNextAll ){} -1.225 (danielk1 18-Dec-05): pTmp->pNextAll = pPg->pNextAll; -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): nReleased += sqliteAllocSize(pPg); -1.327 (drh 13-Apr-07): IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_pgfree_count); -1.225 (danielk1 18-Dec-05): sqliteFree(pPg); -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): -1.225 (danielk1 18-Dec-05): if( rc!=SQLITE_OK ){ -1.229 (danielk1 30-Dec-05): /* An error occured whilst writing to the database file or -1.229 (danielk1 30-Dec-05): ** journal in pager_recycle(). The error is not returned to the -1.238 (danielk1 16-Jan-06): ** caller of this function. Instead, set the Pager.errCode variable. -1.229 (danielk1 30-Dec-05): ** The error will be returned to the user (or users, in the case -1.229 (danielk1 30-Dec-05): ** of a shared pager cache) of the pager for which the error occured. -1.225 (danielk1 18-Dec-05): */ -1.272 (drh 15-Sep-06): assert( (rc&0xff)==SQLITE_IOERR || rc==SQLITE_FULL ); -1.322 (danielk1 05-Apr-07): assert( pPager->state>=PAGER_RESERVED ); -1.322 (danielk1 05-Apr-07): pager_error(pPager, rc); -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): } -1.225 (danielk1 18-Dec-05): } -1.229 (danielk1 30-Dec-05): -1.225 (danielk1 18-Dec-05): return nReleased; -1.225 (danielk1 18-Dec-05): } -1.338 (drh 08-May-07): #endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT && !SQLITE_OMIT_DISKIO */ -1.225 (danielk1 18-Dec-05): -1.194 (drh 15-Mar-05): /* -1.323 (danielk1 05-Apr-07): ** Read the content of page pPg out of the database file. -1.323 (danielk1 05-Apr-07): */ -1.323 (danielk1 05-Apr-07): static int readDbPage(Pager *pPager, PgHdr *pPg, Pgno pgno){ -1.323 (danielk1 05-Apr-07): int rc; -1.323 (danielk1 05-Apr-07): assert( MEMDB==0 ); -1.323 (danielk1 05-Apr-07): rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); -1.323 (danielk1 05-Apr-07): if( rc==SQLITE_OK ){ -1.323 (danielk1 05-Apr-07): rc = sqlite3OsRead(pPager->fd, PGHDR_TO_DATA(pPg), -1.323 (danielk1 05-Apr-07): pPager->pageSize); -1.323 (danielk1 05-Apr-07): } -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_readdb_count); -1.327 (drh 13-Apr-07): PAGER_INCR(pPager->nRead); -1.327 (drh 13-Apr-07): IOTRACE(("PGIN %p %d\n", pPager, pgno)); -1.329 (drh 16-Apr-07): if( pgno==1 ){ -1.329 (drh 16-Apr-07): memcpy(&pPager->dbFileVers, &((u8*)PGHDR_TO_DATA(pPg))[24], -1.329 (drh 16-Apr-07): sizeof(pPager->dbFileVers)); -1.329 (drh 16-Apr-07): } -1.323 (danielk1 05-Apr-07): CODEC1(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3); -1.344 (drh 16-Jun-07): PAGERTRACE4("FETCH %d page %d hash(%08x)\n", -1.344 (drh 16-Jun-07): PAGERID(pPager), pPg->pgno, pager_pagehash(pPg)); -1.323 (danielk1 05-Apr-07): return rc; -1.323 (danielk1 05-Apr-07): } -1.323 (danielk1 05-Apr-07): -1.323 (danielk1 05-Apr-07): -1.323 (danielk1 05-Apr-07): /* -1.293 (danielk1 23-Mar-07): ** This function is called to obtain the shared lock required before -1.293 (danielk1 23-Mar-07): ** data may be read from the pager cache. If the shared lock has already -1.293 (danielk1 23-Mar-07): ** been obtained, this function is a no-op. -1.312 (danielk1 31-Mar-07): ** -1.312 (danielk1 31-Mar-07): ** Immediately after obtaining the shared lock (if required), this function -1.312 (danielk1 31-Mar-07): ** checks for a hot-journal file. If one is found, an emergency rollback -1.312 (danielk1 31-Mar-07): ** is performed immediately. -1.293 (danielk1 23-Mar-07): */ -1.293 (danielk1 23-Mar-07): static int pagerSharedLock(Pager *pPager){ -1.293 (danielk1 23-Mar-07): int rc = SQLITE_OK; -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): if( pPager->state==PAGER_UNLOCK ){ -1.293 (danielk1 23-Mar-07): if( !MEMDB ){ -1.293 (danielk1 23-Mar-07): assert( pPager->nRef==0 ); -1.293 (danielk1 23-Mar-07): if( !pPager->noReadlock ){ -1.293 (danielk1 23-Mar-07): rc = pager_wait_on_lock(pPager, SHARED_LOCK); -1.293 (danielk1 23-Mar-07): if( rc!=SQLITE_OK ){ -1.293 (danielk1 23-Mar-07): return pager_error(pPager, rc); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): assert( pPager->state>=SHARED_LOCK ); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* If a journal file exists, and there is no RESERVED lock on the -1.293 (danielk1 23-Mar-07): ** database file, then it either needs to be played back or deleted. -1.293 (danielk1 23-Mar-07): */ -1.293 (danielk1 23-Mar-07): if( hasHotJournal(pPager) ){ -1.293 (danielk1 23-Mar-07): /* Get an EXCLUSIVE lock on the database file. At this point it is -1.293 (danielk1 23-Mar-07): ** important that a RESERVED lock is not obtained on the way to the -1.293 (danielk1 23-Mar-07): ** EXCLUSIVE lock. If it were, another process might open the -1.293 (danielk1 23-Mar-07): ** database file, detect the RESERVED lock, and conclude that the -1.293 (danielk1 23-Mar-07): ** database is safe to read while this process is still rolling it -1.293 (danielk1 23-Mar-07): ** back. -1.293 (danielk1 23-Mar-07): ** -1.293 (danielk1 23-Mar-07): ** Because the intermediate RESERVED lock is not requested, the -1.293 (danielk1 23-Mar-07): ** second process will get to this point in the code and fail to -1.293 (danielk1 23-Mar-07): ** obtain it's own EXCLUSIVE lock on the database file. -1.293 (danielk1 23-Mar-07): */ -1.293 (danielk1 23-Mar-07): rc = sqlite3OsLock(pPager->fd, EXCLUSIVE_LOCK); -1.293 (danielk1 23-Mar-07): if( rc!=SQLITE_OK ){ -1.293 (danielk1 23-Mar-07): pager_unlock(pPager); -1.293 (danielk1 23-Mar-07): return pager_error(pPager, rc); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): pPager->state = PAGER_EXCLUSIVE; -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* Open the journal for reading only. Return SQLITE_BUSY if -1.293 (danielk1 23-Mar-07): ** we are unable to open the journal file. -1.293 (danielk1 23-Mar-07): ** -1.293 (danielk1 23-Mar-07): ** The journal file does not need to be locked itself. The -1.293 (danielk1 23-Mar-07): ** journal file is never open unless the main database file holds -1.293 (danielk1 23-Mar-07): ** a write lock, so there is never any chance of two or more -1.293 (danielk1 23-Mar-07): ** processes opening the journal at the same time. -1.302 (danielk1 27-Mar-07): ** -1.352 (drh 07-Aug-07): ** Open the journal for read/write access. This is because in -1.352 (drh 07-Aug-07): ** exclusive-access mode the file descriptor will be kept open and -1.302 (danielk1 27-Mar-07): ** possibly used for a transaction later on. On some systems, the -1.302 (danielk1 27-Mar-07): ** OsTruncate() call used in exclusive-access mode also requires -1.302 (danielk1 27-Mar-07): ** a read/write file handle. -1.293 (danielk1 23-Mar-07): */ -1.302 (danielk1 27-Mar-07): rc = SQLITE_BUSY; -1.302 (danielk1 27-Mar-07): if( sqlite3OsFileExists(pPager->zJournal) ){ -1.302 (danielk1 27-Mar-07): int ro; -1.305 (danielk1 29-Mar-07): assert( !pPager->tempFile ); -1.302 (danielk1 27-Mar-07): rc = sqlite3OsOpenReadWrite(pPager->zJournal, &pPager->jfd, &ro); -1.320 (danielk1 05-Apr-07): assert( rc!=SQLITE_OK || pPager->jfd ); -1.302 (danielk1 27-Mar-07): if( ro ){ -1.302 (danielk1 27-Mar-07): rc = SQLITE_BUSY; -1.315 (danielk1 02-Apr-07): sqlite3OsClose(&pPager->jfd); -1.302 (danielk1 27-Mar-07): } -1.302 (danielk1 27-Mar-07): } -1.293 (danielk1 23-Mar-07): if( rc!=SQLITE_OK ){ -1.293 (danielk1 23-Mar-07): pager_unlock(pPager); -1.293 (danielk1 23-Mar-07): return SQLITE_BUSY; -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): pPager->journalOpen = 1; -1.293 (danielk1 23-Mar-07): pPager->journalStarted = 0; -1.293 (danielk1 23-Mar-07): pPager->journalOff = 0; -1.293 (danielk1 23-Mar-07): pPager->setMaster = 0; -1.293 (danielk1 23-Mar-07): pPager->journalHdr = 0; -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* Playback and delete the journal. Drop the database write -1.293 (danielk1 23-Mar-07): ** lock and reacquire the read lock. -1.293 (danielk1 23-Mar-07): */ -1.293 (danielk1 23-Mar-07): rc = pager_playback(pPager, 1); -1.293 (danielk1 23-Mar-07): if( rc!=SQLITE_OK ){ -1.293 (danielk1 23-Mar-07): return pager_error(pPager, rc); -1.293 (danielk1 23-Mar-07): } -1.297 (danielk1 26-Mar-07): assert(pPager->state==PAGER_SHARED || -1.297 (danielk1 26-Mar-07): (pPager->exclusiveMode && pPager->state>PAGER_SHARED) -1.297 (danielk1 26-Mar-07): ); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): if( pPager->pAll ){ -1.314 (danielk1 02-Apr-07): /* The shared-lock has just been acquired on the database file -1.314 (danielk1 02-Apr-07): ** and there are already pages in the cache (from a previous -1.329 (drh 16-Apr-07): ** read or write transaction). Check to see if the database -1.329 (drh 16-Apr-07): ** has been modified. If the database has changed, flush the -1.329 (drh 16-Apr-07): ** cache. -1.329 (drh 16-Apr-07): ** -1.329 (drh 16-Apr-07): ** Database changes is detected by looking at 15 bytes beginning -1.329 (drh 16-Apr-07): ** at offset 24 into the file. The first 4 of these 16 bytes are -1.329 (drh 16-Apr-07): ** a 32-bit counter that is incremented with each change. The -1.329 (drh 16-Apr-07): ** other bytes change randomly with each file change when -1.329 (drh 16-Apr-07): ** a codec is in use. -1.329 (drh 16-Apr-07): ** -1.329 (drh 16-Apr-07): ** There is a vanishingly small chance that a change will not be -1.340 (drh 09-May-07): ** detected. The chance of an undetected change is so small that -1.329 (drh 16-Apr-07): ** it can be neglected. -1.314 (danielk1 02-Apr-07): */ -1.329 (drh 16-Apr-07): char dbFileVers[sizeof(pPager->dbFileVers)]; -1.323 (danielk1 05-Apr-07): sqlite3PagerPagecount(pPager); -1.314 (danielk1 02-Apr-07): -1.323 (danielk1 05-Apr-07): if( pPager->errCode ){ -1.323 (danielk1 05-Apr-07): return pPager->errCode; -1.323 (danielk1 05-Apr-07): } -1.323 (danielk1 05-Apr-07): -1.323 (danielk1 05-Apr-07): if( pPager->dbSize>0 ){ -1.333 (drh 03-May-07): IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); -1.323 (danielk1 05-Apr-07): rc = sqlite3OsSeek(pPager->fd, 24); -1.323 (danielk1 05-Apr-07): if( rc!=SQLITE_OK ){ -1.323 (danielk1 05-Apr-07): return rc; -1.323 (danielk1 05-Apr-07): } -1.329 (drh 16-Apr-07): rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers)); -1.323 (danielk1 05-Apr-07): if( rc!=SQLITE_OK ){ -1.323 (danielk1 05-Apr-07): return rc; -1.314 (danielk1 02-Apr-07): } -1.329 (drh 16-Apr-07): }else{ -1.329 (drh 16-Apr-07): memset(dbFileVers, 0, sizeof(dbFileVers)); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.329 (drh 16-Apr-07): if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ -1.323 (danielk1 05-Apr-07): pager_reset(pPager); -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): } -1.297 (danielk1 26-Mar-07): assert( pPager->exclusiveMode || pPager->state<=PAGER_SHARED ); -1.297 (danielk1 26-Mar-07): if( pPager->state==PAGER_UNLOCK ){ -1.297 (danielk1 26-Mar-07): pPager->state = PAGER_SHARED; -1.297 (danielk1 26-Mar-07): } -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): return rc; -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): /* -1.323 (danielk1 05-Apr-07): ** Allocate a PgHdr object. Either create a new one or reuse -1.323 (danielk1 05-Apr-07): ** an existing one that is not otherwise in use. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** A new PgHdr structure is created if any of the following are -1.323 (danielk1 05-Apr-07): ** true: -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (1) We have not exceeded our maximum allocated cache size -1.323 (danielk1 05-Apr-07): ** as set by the "PRAGMA cache_size" command. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (2) There are no unused PgHdr objects available at this time. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (3) This is an in-memory database. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (4) There are no PgHdr objects that do not require a journal -1.323 (danielk1 05-Apr-07): ** file sync and a sync of the journal file is currently -1.323 (danielk1 05-Apr-07): ** prohibited. -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** Otherwise, reuse an existing PgHdr. In other words, reuse an -1.323 (danielk1 05-Apr-07): ** existing PgHdr if all of the following are true: -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (1) We have reached or exceeded the maximum cache size -1.323 (danielk1 05-Apr-07): ** allowed by "PRAGMA cache_size". -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (2) There is a PgHdr available with PgHdr->nRef==0 -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (3) We are not in an in-memory database -1.323 (danielk1 05-Apr-07): ** -1.323 (danielk1 05-Apr-07): ** (4) Either there is an available PgHdr that does not need -1.323 (danielk1 05-Apr-07): ** to be synced to disk or else disk syncing is currently -1.323 (danielk1 05-Apr-07): ** allowed. -1.314 (danielk1 02-Apr-07): */ -1.314 (danielk1 02-Apr-07): static int pagerAllocatePage(Pager *pPager, PgHdr **ppPg){ -1.314 (danielk1 02-Apr-07): int rc = SQLITE_OK; -1.314 (danielk1 02-Apr-07): PgHdr *pPg; -1.314 (danielk1 02-Apr-07): -1.323 (danielk1 05-Apr-07): /* Create a new PgHdr if any of the four conditions defined -1.323 (danielk1 05-Apr-07): ** above is met: */ -1.323 (danielk1 05-Apr-07): if( pPager->nPagemxPage -1.323 (danielk1 05-Apr-07): || pPager->pFirst==0 -1.323 (danielk1 05-Apr-07): || MEMDB -1.323 (danielk1 05-Apr-07): || (pPager->pFirstSynced==0 && pPager->doNotSync) -1.323 (danielk1 05-Apr-07): ){ -1.314 (danielk1 02-Apr-07): if( pPager->nPage>=pPager->nHash ){ -1.314 (danielk1 02-Apr-07): pager_resize_hash_table(pPager, -1.314 (danielk1 02-Apr-07): pPager->nHash<256 ? 256 : pPager->nHash*2); -1.314 (danielk1 02-Apr-07): if( pPager->nHash==0 ){ -1.314 (danielk1 02-Apr-07): rc = SQLITE_NOMEM; -1.314 (danielk1 02-Apr-07): goto pager_allocate_out; -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): pPg = sqliteMallocRaw( sizeof(*pPg) + pPager->pageSize -1.314 (danielk1 02-Apr-07): + sizeof(u32) + pPager->nExtra -1.314 (danielk1 02-Apr-07): + MEMDB*sizeof(PgHistory) ); -1.314 (danielk1 02-Apr-07): if( pPg==0 ){ -1.314 (danielk1 02-Apr-07): rc = SQLITE_NOMEM; -1.314 (danielk1 02-Apr-07): goto pager_allocate_out; -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): memset(pPg, 0, sizeof(*pPg)); -1.314 (danielk1 02-Apr-07): if( MEMDB ){ -1.314 (danielk1 02-Apr-07): memset(PGHDR_TO_HIST(pPg, pPager), 0, sizeof(PgHistory)); -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): pPg->pPager = pPager; -1.314 (danielk1 02-Apr-07): pPg->pNextAll = pPager->pAll; -1.314 (danielk1 02-Apr-07): pPager->pAll = pPg; -1.314 (danielk1 02-Apr-07): pPager->nPage++; -1.314 (danielk1 02-Apr-07): }else{ -1.314 (danielk1 02-Apr-07): /* Recycle an existing page with a zero ref-count. */ -1.314 (danielk1 02-Apr-07): rc = pager_recycle(pPager, 1, &pPg); -1.343 (danielk1 13-Jun-07): if( rc==SQLITE_BUSY ){ -1.343 (danielk1 13-Jun-07): rc = SQLITE_IOERR_BLOCKED; -1.343 (danielk1 13-Jun-07): } -1.314 (danielk1 02-Apr-07): if( rc!=SQLITE_OK ){ -1.314 (danielk1 02-Apr-07): goto pager_allocate_out; -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): assert( pPager->state>=SHARED_LOCK ); -1.314 (danielk1 02-Apr-07): assert(pPg); -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): *ppPg = pPg; -1.314 (danielk1 02-Apr-07): -1.314 (danielk1 02-Apr-07): pager_allocate_out: -1.314 (danielk1 02-Apr-07): return rc; -1.314 (danielk1 02-Apr-07): } -1.314 (danielk1 02-Apr-07): -1.314 (danielk1 02-Apr-07): /* -1.330 (drh 26-Apr-07): ** Make sure we have the content for a page. If the page was -1.330 (drh 26-Apr-07): ** previously acquired with noContent==1, then the content was -1.330 (drh 26-Apr-07): ** just initialized to zeros instead of being read from disk. -1.330 (drh 26-Apr-07): ** But now we need the real data off of disk. So make sure we -1.330 (drh 26-Apr-07): ** have it. Read it in if we do not have it already. -1.330 (drh 26-Apr-07): */ -1.330 (drh 26-Apr-07): static int pager_get_content(PgHdr *pPg){ -1.330 (drh 26-Apr-07): if( pPg->needRead ){ -1.330 (drh 26-Apr-07): int rc = readDbPage(pPg->pPager, pPg, pPg->pgno); -1.330 (drh 26-Apr-07): if( rc==SQLITE_OK ){ -1.330 (drh 26-Apr-07): pPg->needRead = 0; -1.330 (drh 26-Apr-07): }else{ -1.330 (drh 26-Apr-07): return rc; -1.330 (drh 26-Apr-07): } -1.330 (drh 26-Apr-07): } -1.330 (drh 26-Apr-07): return SQLITE_OK; -1.330 (drh 26-Apr-07): } -1.330 (drh 26-Apr-07): -1.330 (drh 26-Apr-07): /* -1.3 (drh 15-Apr-01): ** Acquire a page. -1.3 (drh 15-Apr-01): ** -1.30 (drh 10-Nov-01): ** A read lock on the disk file is obtained when the first page is acquired. -1.14 (drh 13-Sep-01): ** This read lock is dropped when the last page is released. -1.3 (drh 15-Apr-01): ** -1.330 (drh 26-Apr-07): ** This routine works for any page number greater than 0. If the database -1.6 (drh 21-May-01): ** file is smaller than the requested page, then no actual disk -1.6 (drh 21-May-01): ** read occurs and the memory image of the page is initialized to -1.6 (drh 21-May-01): ** all zeros. The extra data appended to a page is always initialized -1.6 (drh 21-May-01): ** to zeros the first time a page is loaded into memory. -1.6 (drh 21-May-01): ** -1.3 (drh 15-Apr-01): ** The acquisition might fail for several reasons. In all cases, -1.3 (drh 15-Apr-01): ** an appropriate error code is returned and *ppPage is set to NULL. -1.5 (drh 28-Apr-01): ** -1.330 (drh 26-Apr-07): ** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt -1.5 (drh 28-Apr-01): ** to find a page in the in-memory cache first. If the page is not already -1.330 (drh 26-Apr-07): ** in memory, this routine goes to disk to read it in whereas Lookup() -1.5 (drh 28-Apr-01): ** just returns 0. This routine acquires a read-lock the first time it -1.5 (drh 28-Apr-01): ** has to go to disk, and could also playback an old journal if necessary. -1.330 (drh 26-Apr-07): ** Since Lookup() never goes to disk, it never has to deal with locks -1.5 (drh 28-Apr-01): ** or journal files. -1.285 (drh 04-Mar-07): ** -1.327 (drh 13-Apr-07): ** If noContent is false, the page contents are actually read from disk. -1.327 (drh 13-Apr-07): ** If noContent is true, it means that we do not care about the contents -1.327 (drh 13-Apr-07): ** of the page at this time, so do not do a disk read. Just fill in the -1.327 (drh 13-Apr-07): ** page content with zeros. But mark the fact that we have not read the -1.327 (drh 13-Apr-07): ** content by setting the PgHdr.needRead flag. Later on, if -1.330 (drh 26-Apr-07): ** sqlite3PagerWrite() is called on this page or if this routine is -1.330 (drh 26-Apr-07): ** called again with noContent==0, that means that the content is needed -1.330 (drh 26-Apr-07): ** and the disk read should occur at that point. -1.327 (drh 13-Apr-07): */ -1.327 (drh 13-Apr-07): int sqlite3PagerAcquire( -1.327 (drh 13-Apr-07): Pager *pPager, /* The pager open on the database file */ -1.327 (drh 13-Apr-07): Pgno pgno, /* Page number to fetch */ -1.327 (drh 13-Apr-07): DbPage **ppPage, /* Write a pointer to the page here */ -1.327 (drh 13-Apr-07): int noContent /* Do not bother reading content from disk if true */ -1.327 (drh 13-Apr-07): ){ -1.1 (drh 11-Apr-01): PgHdr *pPg; -1.194 (drh 15-Mar-05): int rc; -1.1 (drh 11-Apr-01): -1.293 (danielk1 23-Mar-07): assert( pPager->state==PAGER_UNLOCK || pPager->nRef>0 || pgno==1 ); -1.293 (danielk1 23-Mar-07): -1.183 (danielk1 17-Jan-05): /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page -1.183 (danielk1 17-Jan-05): ** number greater than this, or zero, is requested. -1.183 (danielk1 17-Jan-05): */ -1.214 (drh 16-Sep-05): if( pgno>PAGER_MAX_PGNO || pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ -1.215 (drh 17-Sep-05): return SQLITE_CORRUPT_BKPT; -1.183 (danielk1 17-Jan-05): } -1.183 (danielk1 17-Jan-05): -1.3 (drh 15-Apr-01): /* Make sure we have not hit any critical errors. -1.3 (drh 15-Apr-01): */ -1.66 (drh 11-Jan-03): assert( pPager!=0 ); -1.83 (drh 25-Apr-03): *ppPage = 0; -1.238 (danielk1 16-Jan-06): if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ -1.238 (danielk1 16-Jan-06): return pPager->errCode; -1.3 (drh 15-Apr-01): } -1.3 (drh 15-Apr-01): -1.110 (danielk1 03-Jun-04): /* If this is the first page accessed, then get a SHARED lock -1.295 (danielk1 26-Mar-07): ** on the database file. pagerSharedLock() is a no-op if -1.295 (danielk1 26-Mar-07): ** a database lock is already held. -1.1 (drh 11-Apr-01): */ -1.293 (danielk1 23-Mar-07): rc = pagerSharedLock(pPager); -1.293 (danielk1 23-Mar-07): if( rc!=SQLITE_OK ){ -1.293 (danielk1 23-Mar-07): return rc; -1.293 (danielk1 23-Mar-07): } -1.293 (danielk1 23-Mar-07): assert( pPager->state!=PAGER_UNLOCK ); -1.1 (drh 11-Apr-01): -1.293 (danielk1 23-Mar-07): pPg = pager_lookup(pPager, pgno); -1.1 (drh 11-Apr-01): if( pPg==0 ){ -1.3 (drh 15-Apr-01): /* The requested page is not in the page cache. */ -1.302 (danielk1 27-Mar-07): int nMax; -1.1 (drh 11-Apr-01): int h; -1.327 (drh 13-Apr-07): PAGER_INCR(pPager->nMiss); -1.314 (danielk1 02-Apr-07): rc = pagerAllocatePage(pPager, &pPg); -1.314 (danielk1 02-Apr-07): if( rc!=SQLITE_OK ){ -1.314 (danielk1 02-Apr-07): return rc; -1.1 (drh 11-Apr-01): } -1.314 (danielk1 02-Apr-07): -1.1 (drh 11-Apr-01): pPg->pgno = pgno; -1.325 (danielk1 07-Apr-07): assert( !MEMDB || pgno>pPager->stmtSize ); -1.36 (drh 14-Jan-02): if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ -1.104 (danielk1 08-May-04): sqlite3CheckMemory(pPager->aInJournal, pgno/8); -1.68 (drh 16-Jan-03): assert( pPager->journalOpen ); -1.13 (drh 02-Jul-01): pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; -1.68 (drh 16-Jan-03): pPg->needSync = 0; -1.13 (drh 02-Jul-01): }else{ -1.13 (drh 02-Jul-01): pPg->inJournal = 0; -1.68 (drh 16-Jan-03): pPg->needSync = 0; -1.13 (drh 02-Jul-01): } -1.325 (danielk1 07-Apr-07): -1.267 (drh 03-May-06): makeClean(pPg); -1.1 (drh 11-Apr-01): pPg->nRef = 1; -1.12 (drh 28-Jun-01): REFINFO(pPg); -1.250 (danielk1 23-Jan-06): -1.3 (drh 15-Apr-01): pPager->nRef++; -1.83 (drh 25-Apr-03): if( pPager->nExtra>0 ){ -1.152 (drh 22-Jul-04): memset(PGHDR_TO_EXTRA(pPg, pPager), 0, pPager->nExtra); -1.83 (drh 25-Apr-03): } -1.302 (danielk1 27-Mar-07): nMax = sqlite3PagerPagecount(pPager); -1.238 (danielk1 16-Jan-06): if( pPager->errCode ){ -1.292 (danielk1 19-Mar-07): sqlite3PagerUnref(pPg); -1.238 (danielk1 16-Jan-06): rc = pPager->errCode; -1.83 (drh 25-Apr-03): return rc; -1.83 (drh 25-Apr-03): } -1.250 (danielk1 23-Jan-06): -1.250 (danielk1 23-Jan-06): /* Populate the page with data, either by reading from the database -1.250 (danielk1 23-Jan-06): ** file, or by setting the entire page to zero. -1.250 (danielk1 23-Jan-06): */ -1.328 (drh 13-Apr-07): if( nMax<(int)pgno || MEMDB || (noContent && !pPager->alwaysRollback) ){ -1.337 (drh 08-May-07): if( pgno>pPager->mxPgno ){ -1.339 (danielk1 09-May-07): sqlite3PagerUnref(pPg); -1.337 (drh 08-May-07): return SQLITE_FULL; -1.337 (drh 08-May-07): } -1.152 (drh 22-Jul-04): memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); -1.328 (drh 13-Apr-07): pPg->needRead = noContent && !pPager->alwaysRollback; -1.327 (drh 13-Apr-07): IOTRACE(("ZERO %p %d\n", pPager, pgno)); -1.6 (drh 21-May-01): }else{ -1.323 (danielk1 05-Apr-07): rc = readDbPage(pPager, pPg, pgno); -1.275 (drh 06-Nov-06): if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ -1.275 (drh 06-Nov-06): pPg->pgno = 0; -1.292 (danielk1 19-Mar-07): sqlite3PagerUnref(pPg); -1.275 (drh 06-Nov-06): return rc; -1.27 (drh 12-Oct-01): } -1.331 (danielk1 28-Apr-07): pPg->needRead = 0; -1.6 (drh 21-May-01): } -1.250 (danielk1 23-Jan-06): -1.250 (danielk1 23-Jan-06): /* Link the page into the page hash table */ -1.268 (drh 07-May-06): h = pgno & (pPager->nHash-1); -1.270 (drh 28-Jun-06): assert( pgno!=0 ); -1.250 (danielk1 23-Jan-06): pPg->pNextHash = pPager->aHash[h]; -1.250 (danielk1 23-Jan-06): pPager->aHash[h] = pPg; -1.250 (danielk1 23-Jan-06): if( pPg->pNextHash ){ -1.250 (danielk1 23-Jan-06): assert( pPg->pNextHash->pPrevHash==0 ); -1.250 (danielk1 23-Jan-06): pPg->pNextHash->pPrevHash = pPg; -1.250 (danielk1 23-Jan-06): } -1.250 (danielk1 23-Jan-06): -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): pPg->pageHash = pager_pagehash(pPg); -1.189 (danielk1 15-Feb-05): #endif -1.1 (drh 11-Apr-01): }else{ -1.3 (drh 15-Apr-01): /* The requested page is in the page cache. */ -1.293 (danielk1 23-Mar-07): assert(pPager->nRef>0 || pgno==1); -1.327 (drh 13-Apr-07): PAGER_INCR(pPager->nHit); -1.330 (drh 26-Apr-07): if( !noContent ){ -1.330 (drh 26-Apr-07): rc = pager_get_content(pPg); -1.330 (drh 26-Apr-07): if( rc ){ -1.330 (drh 26-Apr-07): return rc; -1.330 (drh 26-Apr-07): } -1.330 (drh 26-Apr-07): } -1.10 (drh 23-Jun-01): page_ref(pPg); -1.1 (drh 11-Apr-01): } -1.292 (danielk1 19-Mar-07): *ppPage = pPg; -1.1 (drh 11-Apr-01): return SQLITE_OK; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): -1.5 (drh 28-Apr-01): /* -1.5 (drh 28-Apr-01): ** Acquire a page if it is already in the in-memory cache. Do -1.5 (drh 28-Apr-01): ** not read the page from disk. Return a pointer to the page, -1.5 (drh 28-Apr-01): ** or 0 if the page is not in cache. -1.5 (drh 28-Apr-01): ** -1.292 (danielk1 19-Mar-07): ** See also sqlite3PagerGet(). The difference between this routine -1.292 (danielk1 19-Mar-07): ** and sqlite3PagerGet() is that _get() will go to the disk and read -1.5 (drh 28-Apr-01): ** in the page if the page is not already in cache. This routine -1.14 (drh 13-Sep-01): ** returns NULL if the page is not in cache or if a disk I/O error -1.14 (drh 13-Sep-01): ** has ever happened. -1.5 (drh 28-Apr-01): */ -1.292 (danielk1 19-Mar-07): DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ -1.5 (drh 28-Apr-01): PgHdr *pPg; -1.5 (drh 28-Apr-01): -1.66 (drh 11-Jan-03): assert( pPager!=0 ); -1.66 (drh 11-Jan-03): assert( pgno!=0 ); -1.293 (danielk1 23-Mar-07): -1.293 (danielk1 23-Mar-07): if( pPager->state==PAGER_UNLOCK ){ -1.295 (danielk1 26-Mar-07): assert( !pPager->pAll || pPager->exclusiveMode ); -1.293 (danielk1 23-Mar-07): return 0; -1.293 (danielk1 23-Mar-07): } -1.295 (danielk1 26-Mar-07): if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ -1.5 (drh 28-Apr-01): return 0; -1.5 (drh 28-Apr-01): } -1.5 (drh 28-Apr-01): pPg = pager_lookup(pPager, pgno); -1.5 (drh 28-Apr-01): if( pPg==0 ) return 0; -1.10 (drh 23-Jun-01): page_ref(pPg); -1.292 (danielk1 19-Mar-07): return pPg; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.1 (drh 11-Apr-01): ** Release a page. -1.1 (drh 11-Apr-01): ** -1.1 (drh 11-Apr-01): ** If the number of references to the page drop to zero, then the -1.1 (drh 11-Apr-01): ** page is added to the LRU list. When all references to all pages -1.3 (drh 15-Apr-01): ** are released, a rollback occurs and the lock on the database is -1.1 (drh 11-Apr-01): ** removed. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerUnref(DbPage *pPg){ -1.3 (drh 15-Apr-01): -1.3 (drh 15-Apr-01): /* Decrement the reference count for this page -1.3 (drh 15-Apr-01): */ -1.1 (drh 11-Apr-01): assert( pPg->nRef>0 ); -1.1 (drh 11-Apr-01): pPg->nRef--; -1.12 (drh 28-Jun-01): REFINFO(pPg); -1.3 (drh 15-Apr-01): -1.189 (danielk1 15-Feb-05): CHECK_PAGE(pPg); -1.189 (danielk1 15-Feb-05): -1.7 (drh 24-May-01): /* When the number of references to a page reach 0, call the -1.7 (drh 24-May-01): ** destructor and add the page to the freelist. -1.3 (drh 15-Apr-01): */ -1.1 (drh 11-Apr-01): if( pPg->nRef==0 ){ -1.21 (drh 18-Sep-01): Pager *pPager; -1.21 (drh 18-Sep-01): pPager = pPg->pPager; -1.3 (drh 15-Apr-01): pPg->pNextFree = 0; -1.3 (drh 15-Apr-01): pPg->pPrevFree = pPager->pLast; -1.1 (drh 11-Apr-01): pPager->pLast = pPg; -1.3 (drh 15-Apr-01): if( pPg->pPrevFree ){ -1.3 (drh 15-Apr-01): pPg->pPrevFree->pNextFree = pPg; -1.1 (drh 11-Apr-01): }else{ -1.1 (drh 11-Apr-01): pPager->pFirst = pPg; -1.7 (drh 24-May-01): } -1.69 (drh 21-Jan-03): if( pPg->needSync==0 && pPager->pFirstSynced==0 ){ -1.69 (drh 21-Jan-03): pPager->pFirstSynced = pPg; -1.69 (drh 21-Jan-03): } -1.7 (drh 24-May-01): if( pPager->xDestructor ){ -1.292 (danielk1 19-Mar-07): pPager->xDestructor(pPg, pPager->pageSize); -1.1 (drh 11-Apr-01): } -1.3 (drh 15-Apr-01): -1.3 (drh 15-Apr-01): /* When all pages reach the freelist, drop the read lock from -1.3 (drh 15-Apr-01): ** the database file. -1.3 (drh 15-Apr-01): */ -1.3 (drh 15-Apr-01): pPager->nRef--; -1.3 (drh 15-Apr-01): assert( pPager->nRef>=0 ); -1.303 (danielk1 27-Mar-07): if( pPager->nRef==0 && (!pPager->exclusiveMode || pPager->journalOff>0) ){ -1.293 (danielk1 23-Mar-07): pagerUnlockAndRollback(pPager); -1.3 (drh 15-Apr-01): } -1.1 (drh 11-Apr-01): } -1.3 (drh 15-Apr-01): return SQLITE_OK; -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.115 (drh 09-Jun-04): ** Create a journal file for pPager. There should already be a RESERVED -1.115 (drh 09-Jun-04): ** or EXCLUSIVE lock on the database file when this routine is called. -1.60 (drh 02-Dec-02): ** -1.60 (drh 02-Dec-02): ** Return SQLITE_OK if everything. Return an error code and release the -1.60 (drh 02-Dec-02): ** write lock if anything goes wrong. -1.60 (drh 02-Dec-02): */ -1.60 (drh 02-Dec-02): static int pager_open_journal(Pager *pPager){ -1.60 (drh 02-Dec-02): int rc; -1.169 (drh 31-Oct-04): assert( !MEMDB ); -1.115 (drh 09-Jun-04): assert( pPager->state>=PAGER_RESERVED ); -1.60 (drh 02-Dec-02): assert( pPager->journalOpen==0 ); -1.60 (drh 02-Dec-02): assert( pPager->useJournal ); -1.201 (drh 28-Mar-05): assert( pPager->aInJournal==0 ); -1.292 (danielk1 19-Mar-07): sqlite3PagerPagecount(pPager); -1.60 (drh 02-Dec-02): pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); -1.60 (drh 02-Dec-02): if( pPager->aInJournal==0 ){ -1.166 (drh 02-Oct-04): rc = SQLITE_NOMEM; -1.166 (drh 02-Oct-04): goto failed_to_open_journal; -1.60 (drh 02-Dec-02): } -1.231 (drh 06-Jan-06): rc = sqlite3OsOpenExclusive(pPager->zJournal, &pPager->jfd, -1.218 (drh 26-Nov-05): pPager->tempFile); -1.320 (danielk1 05-Apr-07): assert( rc!=SQLITE_OK || pPager->jfd ); -1.138 (danielk1 25-Jun-04): pPager->journalOff = 0; -1.138 (danielk1 25-Jun-04): pPager->setMaster = 0; -1.138 (danielk1 25-Jun-04): pPager->journalHdr = 0; -1.60 (drh 02-Dec-02): if( rc!=SQLITE_OK ){ -1.304 (drh 28-Mar-07): if( rc==SQLITE_NOMEM ){ -1.304 (drh 28-Mar-07): sqlite3OsDelete(pPager->zJournal); -1.304 (drh 28-Mar-07): } -1.166 (drh 02-Oct-04): goto failed_to_open_journal; -1.60 (drh 02-Dec-02): } -1.258 (drh 11-Feb-06): sqlite3OsSetFullSync(pPager->jfd, pPager->full_fsync); -1.258 (drh 11-Feb-06): sqlite3OsSetFullSync(pPager->fd, pPager->full_fsync); -1.222 (drh 30-Nov-05): sqlite3OsOpenDirectory(pPager->jfd, pPager->zDirectory); -1.60 (drh 02-Dec-02): pPager->journalOpen = 1; -1.68 (drh 16-Jan-03): pPager->journalStarted = 0; -1.60 (drh 02-Dec-02): pPager->needSync = 0; -1.60 (drh 02-Dec-02): pPager->alwaysRollback = 0; -1.73 (drh 11-Feb-03): pPager->nRec = 0; -1.238 (danielk1 16-Jan-06): if( pPager->errCode ){ -1.238 (danielk1 16-Jan-06): rc = pPager->errCode; -1.199 (drh 28-Mar-05): goto failed_to_open_journal; -1.83 (drh 25-Apr-03): } -1.60 (drh 02-Dec-02): pPager->origDbSize = pPager->dbSize; -1.116 (drh 09-Jun-04): -1.138 (danielk1 25-Jun-04): rc = writeJournalHdr(pPager); -1.138 (danielk1 25-Jun-04): -1.107 (drh 12-May-04): if( pPager->stmtAutoopen && rc==SQLITE_OK ){ -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerStmtBegin(pPager); -1.60 (drh 02-Dec-02): } -1.223 (danielk1 06-Dec-05): if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ -1.307 (drh 30-Mar-07): rc = pager_end_transaction(pPager); -1.60 (drh 02-Dec-02): if( rc==SQLITE_OK ){ -1.60 (drh 02-Dec-02): rc = SQLITE_FULL; -1.60 (drh 02-Dec-02): } -1.60 (drh 02-Dec-02): } -1.166 (drh 02-Oct-04): return rc; -1.166 (drh 02-Oct-04): -1.166 (drh 02-Oct-04): failed_to_open_journal: -1.166 (drh 02-Oct-04): sqliteFree(pPager->aInJournal); -1.166 (drh 02-Oct-04): pPager->aInJournal = 0; -1.166 (drh 02-Oct-04): return rc; -1.60 (drh 02-Dec-02): } -1.60 (drh 02-Dec-02): -1.60 (drh 02-Dec-02): /* -1.43 (drh 05-Mar-02): ** Acquire a write-lock on the database. The lock is removed when -1.43 (drh 05-Mar-02): ** the any of the following happen: -1.43 (drh 05-Mar-02): ** -1.307 (drh 30-Mar-07): ** * sqlite3PagerCommitPhaseTwo() is called. -1.292 (danielk1 19-Mar-07): ** * sqlite3PagerRollback() is called. -1.292 (danielk1 19-Mar-07): ** * sqlite3PagerClose() is called. -1.292 (danielk1 19-Mar-07): ** * sqlite3PagerUnref() is called to on every outstanding page. -1.43 (drh 05-Mar-02): ** -1.110 (danielk1 03-Jun-04): ** The first parameter to this routine is a pointer to any open page of the -1.110 (danielk1 03-Jun-04): ** database file. Nothing changes about the page - it is used merely to -1.110 (danielk1 03-Jun-04): ** acquire a pointer to the Pager structure and as proof that there is -1.110 (danielk1 03-Jun-04): ** already a read-lock on the database. -1.110 (danielk1 03-Jun-04): ** -1.110 (danielk1 03-Jun-04): ** The second parameter indicates how much space in bytes to reserve for a -1.110 (danielk1 03-Jun-04): ** master journal file-name at the start of the journal when it is created. -1.110 (danielk1 03-Jun-04): ** -1.110 (danielk1 03-Jun-04): ** A journal file is opened if this is not a temporary file. For temporary -1.110 (danielk1 03-Jun-04): ** files, the opening of the journal file is deferred until there is an -1.110 (danielk1 03-Jun-04): ** actual need to write to the journal. -1.60 (drh 02-Dec-02): ** -1.115 (drh 09-Jun-04): ** If the database is already reserved for writing, this routine is a no-op. -1.167 (drh 05-Oct-04): ** -1.167 (drh 05-Oct-04): ** If exFlag is true, go ahead and get an EXCLUSIVE lock on the file -1.167 (drh 05-Oct-04): ** immediately instead of waiting until we try to flush the cache. The -1.167 (drh 05-Oct-04): ** exFlag is ignored if a transaction is already active. -1.43 (drh 05-Mar-02): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerBegin(DbPage *pPg, int exFlag){ -1.43 (drh 05-Mar-02): Pager *pPager = pPg->pPager; -1.43 (drh 05-Mar-02): int rc = SQLITE_OK; -1.43 (drh 05-Mar-02): assert( pPg->nRef>0 ); -1.115 (drh 09-Jun-04): assert( pPager->state!=PAGER_UNLOCK ); -1.115 (drh 09-Jun-04): if( pPager->state==PAGER_SHARED ){ -1.43 (drh 05-Mar-02): assert( pPager->aInJournal==0 ); -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.115 (drh 09-Jun-04): pPager->state = PAGER_EXCLUSIVE; -1.107 (drh 12-May-04): pPager->origDbSize = pPager->dbSize; -1.107 (drh 12-May-04): }else{ -1.222 (drh 30-Nov-05): rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); -1.167 (drh 05-Oct-04): if( rc==SQLITE_OK ){ -1.167 (drh 05-Oct-04): pPager->state = PAGER_RESERVED; -1.167 (drh 05-Oct-04): if( exFlag ){ -1.167 (drh 05-Oct-04): rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); -1.167 (drh 05-Oct-04): } -1.167 (drh 05-Oct-04): } -1.112 (danielk1 04-Jun-04): if( rc!=SQLITE_OK ){ -1.112 (danielk1 04-Jun-04): return rc; -1.107 (drh 12-May-04): } -1.115 (drh 09-Jun-04): pPager->dirtyCache = 0; -1.300 (drh 26-Mar-07): PAGERTRACE2("TRANSACTION %d\n", PAGERID(pPager)); -1.107 (drh 12-May-04): if( pPager->useJournal && !pPager->tempFile ){ -1.107 (drh 12-May-04): rc = pager_open_journal(pPager); -1.107 (drh 12-May-04): } -1.43 (drh 05-Mar-02): } -1.295 (danielk1 26-Mar-07): }else if( pPager->journalOpen && pPager->journalOff==0 ){ -1.295 (danielk1 26-Mar-07): /* This happens when the pager was in exclusive-access mode last -1.295 (danielk1 26-Mar-07): ** time a (read or write) transaction was successfully concluded -1.295 (danielk1 26-Mar-07): ** by this connection. Instead of deleting the journal file it was -1.295 (danielk1 26-Mar-07): ** kept open and truncated to 0 bytes. -1.295 (danielk1 26-Mar-07): */ -1.295 (danielk1 26-Mar-07): assert( pPager->nRec==0 ); -1.295 (danielk1 26-Mar-07): assert( pPager->origDbSize==0 ); -1.309 (drh 30-Mar-07): assert( pPager->aInJournal==0 ); -1.295 (danielk1 26-Mar-07): sqlite3PagerPagecount(pPager); -1.295 (danielk1 26-Mar-07): pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); -1.295 (danielk1 26-Mar-07): if( !pPager->aInJournal ){ -1.295 (danielk1 26-Mar-07): rc = SQLITE_NOMEM; -1.295 (danielk1 26-Mar-07): }else{ -1.309 (drh 30-Mar-07): pPager->origDbSize = pPager->dbSize; -1.295 (danielk1 26-Mar-07): rc = writeJournalHdr(pPager); -1.295 (danielk1 26-Mar-07): } -1.43 (drh 05-Mar-02): } -1.295 (danielk1 26-Mar-07): assert( !pPager->journalOpen || pPager->journalOff>0 || rc!=SQLITE_OK ); -1.43 (drh 05-Mar-02): return rc; -1.43 (drh 05-Mar-02): } -1.43 (drh 05-Mar-02): -1.43 (drh 05-Mar-02): /* -1.267 (drh 03-May-06): ** Make a page dirty. Set its dirty flag and add it to the dirty -1.267 (drh 03-May-06): ** page list. -1.267 (drh 03-May-06): */ -1.267 (drh 03-May-06): static void makeDirty(PgHdr *pPg){ -1.267 (drh 03-May-06): if( pPg->dirty==0 ){ -1.267 (drh 03-May-06): Pager *pPager = pPg->pPager; -1.267 (drh 03-May-06): pPg->dirty = 1; -1.267 (drh 03-May-06): pPg->pDirty = pPager->pDirty; -1.267 (drh 03-May-06): if( pPager->pDirty ){ -1.267 (drh 03-May-06): pPager->pDirty->pPrevDirty = pPg; -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): pPg->pPrevDirty = 0; -1.267 (drh 03-May-06): pPager->pDirty = pPg; -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): -1.267 (drh 03-May-06): /* -1.267 (drh 03-May-06): ** Make a page clean. Clear its dirty bit and remove it from the -1.267 (drh 03-May-06): ** dirty page list. -1.267 (drh 03-May-06): */ -1.267 (drh 03-May-06): static void makeClean(PgHdr *pPg){ -1.267 (drh 03-May-06): if( pPg->dirty ){ -1.267 (drh 03-May-06): pPg->dirty = 0; -1.267 (drh 03-May-06): if( pPg->pDirty ){ -1.267 (drh 03-May-06): pPg->pDirty->pPrevDirty = pPg->pPrevDirty; -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): if( pPg->pPrevDirty ){ -1.267 (drh 03-May-06): pPg->pPrevDirty->pDirty = pPg->pDirty; -1.267 (drh 03-May-06): }else{ -1.267 (drh 03-May-06): pPg->pPager->pDirty = pPg->pDirty; -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): } -1.267 (drh 03-May-06): -1.267 (drh 03-May-06): -1.267 (drh 03-May-06): /* -1.1 (drh 11-Apr-01): ** Mark a data page as writeable. The page is written into the journal -1.1 (drh 11-Apr-01): ** if it is not there already. This routine must be called before making -1.1 (drh 11-Apr-01): ** changes to a page. -1.1 (drh 11-Apr-01): ** -1.1 (drh 11-Apr-01): ** The first time this routine is called, the pager creates a new -1.115 (drh 09-Jun-04): ** journal and acquires a RESERVED lock on the database. If the RESERVED -1.1 (drh 11-Apr-01): ** lock could not be acquired, this routine returns SQLITE_BUSY. The -1.6 (drh 21-May-01): ** calling routine must check for that return value and be careful not to -1.1 (drh 11-Apr-01): ** change any page data until this routine returns SQLITE_OK. -1.3 (drh 15-Apr-01): ** -1.3 (drh 15-Apr-01): ** If the journal file could not be written because the disk is full, -1.3 (drh 15-Apr-01): ** then this routine returns SQLITE_FULL and does an immediate rollback. -1.3 (drh 15-Apr-01): ** All subsequent write attempts also return SQLITE_FULL until there -1.292 (danielk1 19-Mar-07): ** is a call to sqlite3PagerCommit() or sqlite3PagerRollback() to -1.3 (drh 15-Apr-01): ** reset. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): static int pager_write(PgHdr *pPg){ -1.292 (danielk1 19-Mar-07): void *pData = PGHDR_TO_DATA(pPg); -1.2 (drh 14-Apr-01): Pager *pPager = pPg->pPager; -1.4 (drh 15-Apr-01): int rc = SQLITE_OK; -1.2 (drh 14-Apr-01): -1.34 (drh 15-Dec-01): /* Check for errors -1.34 (drh 15-Dec-01): */ -1.238 (danielk1 16-Jan-06): if( pPager->errCode ){ -1.238 (danielk1 16-Jan-06): return pPager->errCode; -1.3 (drh 15-Apr-01): } -1.14 (drh 13-Sep-01): if( pPager->readOnly ){ -1.14 (drh 13-Sep-01): return SQLITE_PERM; -1.14 (drh 13-Sep-01): } -1.34 (drh 15-Dec-01): -1.138 (danielk1 25-Jun-04): assert( !pPager->setMaster ); -1.138 (danielk1 25-Jun-04): -1.189 (danielk1 15-Feb-05): CHECK_PAGE(pPg); -1.189 (danielk1 15-Feb-05): -1.327 (drh 13-Apr-07): /* If this page was previously acquired with noContent==1, that means -1.327 (drh 13-Apr-07): ** we didn't really read in the content of the page. This can happen -1.327 (drh 13-Apr-07): ** (for example) when the page is being moved to the freelist. But -1.327 (drh 13-Apr-07): ** now we are (perhaps) moving the page off of the freelist for -1.327 (drh 13-Apr-07): ** reuse and we need to know its original content so that content -1.327 (drh 13-Apr-07): ** can be stored in the rollback journal. So do the read at this -1.327 (drh 13-Apr-07): ** time. -1.327 (drh 13-Apr-07): */ -1.330 (drh 26-Apr-07): rc = pager_get_content(pPg); -1.330 (drh 26-Apr-07): if( rc ){ -1.330 (drh 26-Apr-07): return rc; -1.327 (drh 13-Apr-07): } -1.327 (drh 13-Apr-07): -1.34 (drh 15-Dec-01): /* Mark the page as dirty. If the page has already been written -1.34 (drh 15-Dec-01): ** to the journal then we can return right away. -1.34 (drh 15-Dec-01): */ -1.267 (drh 03-May-06): makeDirty(pPg); -1.325 (danielk1 07-Apr-07): if( pPg->inJournal && (pageInStatement(pPg) || pPager->stmtInUse==0) ){ -1.115 (drh 09-Jun-04): pPager->dirtyCache = 1; -1.172 (danielk1 04-Nov-04): }else{ -1.34 (drh 15-Dec-01): -1.172 (danielk1 04-Nov-04): /* If we get this far, it means that the page needs to be -1.172 (danielk1 04-Nov-04): ** written to the transaction journal or the ckeckpoint journal -1.172 (danielk1 04-Nov-04): ** or both. -1.172 (danielk1 04-Nov-04): ** -1.172 (danielk1 04-Nov-04): ** First check to see that the transaction journal exists and -1.172 (danielk1 04-Nov-04): ** create it if it does not. -1.172 (danielk1 04-Nov-04): */ -1.172 (danielk1 04-Nov-04): assert( pPager->state!=PAGER_UNLOCK ); -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerBegin(pPg, 0); -1.172 (danielk1 04-Nov-04): if( rc!=SQLITE_OK ){ -1.172 (danielk1 04-Nov-04): return rc; -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): assert( pPager->state>=PAGER_RESERVED ); -1.172 (danielk1 04-Nov-04): if( !pPager->journalOpen && pPager->useJournal ){ -1.172 (danielk1 04-Nov-04): rc = pager_open_journal(pPager); -1.172 (danielk1 04-Nov-04): if( rc!=SQLITE_OK ) return rc; -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): assert( pPager->journalOpen || !pPager->useJournal ); -1.172 (danielk1 04-Nov-04): pPager->dirtyCache = 1; -1.172 (danielk1 04-Nov-04): -1.172 (danielk1 04-Nov-04): /* The transaction journal now exists and we have a RESERVED or an -1.172 (danielk1 04-Nov-04): ** EXCLUSIVE lock on the main database file. Write the current page to -1.172 (danielk1 04-Nov-04): ** the transaction journal if it is not there already. -1.172 (danielk1 04-Nov-04): */ -1.172 (danielk1 04-Nov-04): if( !pPg->inJournal && (pPager->useJournal || MEMDB) ){ -1.172 (danielk1 04-Nov-04): if( (int)pPg->pgno <= pPager->origDbSize ){ -1.172 (danielk1 04-Nov-04): int szPg; -1.172 (danielk1 04-Nov-04): if( MEMDB ){ -1.172 (danielk1 04-Nov-04): PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); -1.300 (drh 26-Mar-07): PAGERTRACE3("JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); -1.172 (danielk1 04-Nov-04): assert( pHist->pOrig==0 ); -1.172 (danielk1 04-Nov-04): pHist->pOrig = sqliteMallocRaw( pPager->pageSize ); -1.172 (danielk1 04-Nov-04): if( pHist->pOrig ){ -1.172 (danielk1 04-Nov-04): memcpy(pHist->pOrig, PGHDR_TO_DATA(pPg), pPager->pageSize); -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): }else{ -1.261 (drh 06-Mar-06): u32 cksum, saved; -1.261 (drh 06-Mar-06): char *pData2, *pEnd; -1.214 (drh 16-Sep-05): /* We should never write to the journal file the page that -1.214 (drh 16-Sep-05): ** contains the database locks. The following assert verifies -1.214 (drh 16-Sep-05): ** that we do not. */ -1.214 (drh 16-Sep-05): assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); -1.261 (drh 06-Mar-06): pData2 = CODEC2(pPager, pData, pPg->pgno, 7); -1.263 (drh 16-Mar-06): cksum = pager_cksum(pPager, (u8*)pData2); -1.261 (drh 06-Mar-06): pEnd = pData2 + pPager->pageSize; -1.261 (drh 06-Mar-06): pData2 -= 4; -1.261 (drh 06-Mar-06): saved = *(u32*)pEnd; -1.261 (drh 06-Mar-06): put32bits(pEnd, cksum); -1.172 (danielk1 04-Nov-04): szPg = pPager->pageSize+8; -1.261 (drh 06-Mar-06): put32bits(pData2, pPg->pgno); -1.261 (drh 06-Mar-06): rc = sqlite3OsWrite(pPager->jfd, pData2, szPg); -1.283 (drh 28-Feb-07): IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, -1.327 (drh 13-Apr-07): pPager->journalOff, szPg)); -1.327 (drh 13-Apr-07): PAGER_INCR(sqlite3_pager_writej_count); -1.172 (danielk1 04-Nov-04): pPager->journalOff += szPg; -1.344 (drh 16-Jun-07): PAGERTRACE5("JOURNAL %d page %d needSync=%d hash(%08x)\n", -1.344 (drh 16-Jun-07): PAGERID(pPager), pPg->pgno, pPg->needSync, pager_pagehash(pPg)); -1.261 (drh 06-Mar-06): *(u32*)pEnd = saved; -1.246 (danielk1 20-Jan-06): -1.352 (drh 07-Aug-07): /* An error has occured writing to the journal file. The -1.246 (danielk1 20-Jan-06): ** transaction will be rolled back by the layer above. -1.246 (danielk1 20-Jan-06): */ -1.172 (danielk1 04-Nov-04): if( rc!=SQLITE_OK ){ -1.172 (danielk1 04-Nov-04): return rc; -1.172 (danielk1 04-Nov-04): } -1.246 (danielk1 20-Jan-06): -1.172 (danielk1 04-Nov-04): pPager->nRec++; -1.172 (danielk1 04-Nov-04): assert( pPager->aInJournal!=0 ); -1.172 (danielk1 04-Nov-04): pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.172 (danielk1 04-Nov-04): pPg->needSync = !pPager->noSync; -1.172 (danielk1 04-Nov-04): if( pPager->stmtInUse ){ -1.172 (danielk1 04-Nov-04): pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): }else{ -1.172 (danielk1 04-Nov-04): pPg->needSync = !pPager->journalStarted && !pPager->noSync; -1.300 (drh 26-Mar-07): PAGERTRACE4("APPEND %d page %d needSync=%d\n", -1.174 (danielk1 06-Nov-04): PAGERID(pPager), pPg->pgno, pPg->needSync); -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): if( pPg->needSync ){ -1.172 (danielk1 04-Nov-04): pPager->needSync = 1; -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): pPg->inJournal = 1; -1.172 (danielk1 04-Nov-04): } -1.172 (danielk1 04-Nov-04): -1.172 (danielk1 04-Nov-04): /* If the statement journal is open and the page is not in it, -1.172 (danielk1 04-Nov-04): ** then write the current page to the statement journal. Note that -1.172 (danielk1 04-Nov-04): ** the statement journal format differs from the standard journal format -1.172 (danielk1 04-Nov-04): ** in that it omits the checksums and the header. -1.172 (danielk1 04-Nov-04): */ -1.325 (danielk1 07-Apr-07): if( pPager->stmtInUse -1.325 (danielk1 07-Apr-07): && !pageInStatement(pPg) -1.325 (danielk1 07-Apr-07): && (int)pPg->pgno<=pPager->stmtSize -1.325 (danielk1 07-Apr-07): ){ -1.172 (danielk1 04-Nov-04): assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.107 (drh 12-May-04): PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); -1.172 (danielk1 04-Nov-04): assert( pHist->pStmt==0 ); -1.172 (danielk1 04-Nov-04): pHist->pStmt = sqliteMallocRaw( pPager->pageSize ); -1.172 (danielk1 04-Nov-04): if( pHist->pStmt ){ -1.172 (danielk1 04-Nov-04): memcpy(pHist->pStmt, PGHDR_TO_DATA(pPg), pPager->pageSize); -1.107 (drh 12-May-04): } -1.300 (drh 26-Mar-07): PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); -1.325 (danielk1 07-Apr-07): page_add_to_stmt_list(pPg); -1.110 (danielk1 03-Jun-04): }else{ -1.261 (drh 06-Mar-06): char *pData2 = CODEC2(pPager, pData, pPg->pgno, 7)-4; -1.261 (drh 06-Mar-06): put32bits(pData2, pPg->pgno); -1.261 (drh 06-Mar-06): rc = sqlite3OsWrite(pPager->stfd, pData2, pPager->pageSize+4); -1.300 (drh 26-Mar-07): PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); -1.107 (drh 12-May-04): if( rc!=SQLITE_OK ){ -1.107 (drh 12-May-04): return rc; -1.107 (drh 12-May-04): } -1.172 (danielk1 04-Nov-04): pPager->stmtNRec++; -1.172 (danielk1 04-Nov-04): assert( pPager->aInStmt!=0 ); -1.172 (danielk1 04-Nov-04): pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.68 (drh 16-Jan-03): } -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* Update the database size and return. -1.34 (drh 15-Dec-01): */ -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED ); -1.36 (drh 14-Jan-02): if( pPager->dbSize<(int)pPg->pgno ){ -1.6 (drh 21-May-01): pPager->dbSize = pPg->pgno; -1.169 (drh 31-Oct-04): if( !MEMDB && pPager->dbSize==PENDING_BYTE/pPager->pageSize ){ -1.127 (drh 15-Jun-04): pPager->dbSize++; -1.127 (drh 15-Jun-04): } -1.6 (drh 21-May-01): } -1.2 (drh 14-Apr-01): return rc; -1.13 (drh 02-Jul-01): } -1.13 (drh 02-Jul-01): -1.13 (drh 02-Jul-01): /* -1.290 (danielk1 19-Mar-07): ** This function is used to mark a data-page as writable. It uses -1.290 (danielk1 19-Mar-07): ** pager_write() to open a journal file (if it is not already open) -1.290 (danielk1 19-Mar-07): ** and write the page *pData to the journal. -1.290 (danielk1 19-Mar-07): ** -1.290 (danielk1 19-Mar-07): ** The difference between this function and pager_write() is that this -1.290 (danielk1 19-Mar-07): ** function also deals with the special case where 2 or more pages -1.290 (danielk1 19-Mar-07): ** fit on a single disk sector. In this case all co-resident pages -1.290 (danielk1 19-Mar-07): ** must have been written to the journal file before returning. -1.290 (danielk1 19-Mar-07): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerWrite(DbPage *pDbPage){ -1.290 (danielk1 19-Mar-07): int rc = SQLITE_OK; -1.290 (danielk1 19-Mar-07): -1.292 (danielk1 19-Mar-07): PgHdr *pPg = pDbPage; -1.290 (danielk1 19-Mar-07): Pager *pPager = pPg->pPager; -1.290 (danielk1 19-Mar-07): Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): if( !MEMDB && nPagePerSector>1 ){ -1.290 (danielk1 19-Mar-07): Pgno nPageCount; /* Total number of pages in database file */ -1.290 (danielk1 19-Mar-07): Pgno pg1; /* First page of the sector pPg is located on. */ -1.290 (danielk1 19-Mar-07): int nPage; /* Number of pages starting at pg1 to journal */ -1.290 (danielk1 19-Mar-07): int ii; -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): /* Set the doNotSync flag to 1. This is because we cannot allow a journal -1.290 (danielk1 19-Mar-07): ** header to be written between the pages journaled by this function. -1.290 (danielk1 19-Mar-07): */ -1.290 (danielk1 19-Mar-07): assert( pPager->doNotSync==0 ); -1.290 (danielk1 19-Mar-07): pPager->doNotSync = 1; -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): /* This trick assumes that both the page-size and sector-size are -1.290 (danielk1 19-Mar-07): ** an integer power of 2. It sets variable pg1 to the identifier -1.290 (danielk1 19-Mar-07): ** of the first page of the sector pPg is located on. -1.290 (danielk1 19-Mar-07): */ -1.290 (danielk1 19-Mar-07): pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; -1.290 (danielk1 19-Mar-07): -1.292 (danielk1 19-Mar-07): nPageCount = sqlite3PagerPagecount(pPager); -1.290 (danielk1 19-Mar-07): if( pPg->pgno>nPageCount ){ -1.290 (danielk1 19-Mar-07): nPage = (pPg->pgno - pg1)+1; -1.290 (danielk1 19-Mar-07): }else if( (pg1+nPagePerSector-1)>nPageCount ){ -1.290 (danielk1 19-Mar-07): nPage = nPageCount+1-pg1; -1.290 (danielk1 19-Mar-07): }else{ -1.290 (danielk1 19-Mar-07): nPage = nPagePerSector; -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): assert(nPage>0); -1.290 (danielk1 19-Mar-07): assert(pg1<=pPg->pgno); -1.290 (danielk1 19-Mar-07): assert((pg1+nPage)>pPg->pgno); -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): for(ii=0; iiaInJournal || pg==pPg->pgno || -1.290 (danielk1 19-Mar-07): pg>pPager->origDbSize || !(pPager->aInJournal[pg/8]&(1<<(pg&7))) -1.290 (danielk1 19-Mar-07): ) { -1.290 (danielk1 19-Mar-07): if( pg!=PAGER_MJ_PGNO(pPager) ){ -1.292 (danielk1 19-Mar-07): PgHdr *pPage; -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerGet(pPager, pg, &pPage); -1.290 (danielk1 19-Mar-07): if( rc==SQLITE_OK ){ -1.290 (danielk1 19-Mar-07): rc = pager_write(pPage); -1.292 (danielk1 19-Mar-07): sqlite3PagerUnref(pPage); -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): assert( pPager->doNotSync==1 ); -1.290 (danielk1 19-Mar-07): pPager->doNotSync = 0; -1.290 (danielk1 19-Mar-07): }else{ -1.292 (danielk1 19-Mar-07): rc = pager_write(pDbPage); -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): return rc; -1.290 (danielk1 19-Mar-07): } -1.290 (danielk1 19-Mar-07): -1.290 (danielk1 19-Mar-07): /* -1.35 (drh 06-Jan-02): ** Return TRUE if the page given in the argument was previously passed -1.292 (danielk1 19-Mar-07): ** to sqlite3PagerWrite(). In other words, return TRUE if it is ok -1.13 (drh 02-Jul-01): ** to change the content of the page. -1.13 (drh 02-Jul-01): */ -1.254 (danielk1 23-Jan-06): #ifndef NDEBUG -1.292 (danielk1 19-Mar-07): int sqlite3PagerIswriteable(DbPage *pPg){ -1.13 (drh 02-Jul-01): return pPg->dirty; -1.79 (drh 19-Mar-03): } -1.254 (danielk1 23-Jan-06): #endif -1.79 (drh 19-Mar-03): -1.184 (danielk1 20-Jan-05): #ifndef SQLITE_OMIT_VACUUM -1.79 (drh 19-Mar-03): /* -1.79 (drh 19-Mar-03): ** Replace the content of a single page with the information in the third -1.79 (drh 19-Mar-03): ** argument. -1.79 (drh 19-Mar-03): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void *pData){ -1.292 (danielk1 19-Mar-07): PgHdr *pPg; -1.79 (drh 19-Mar-03): int rc; -1.79 (drh 19-Mar-03): -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerGet(pPager, pgno, &pPg); -1.79 (drh 19-Mar-03): if( rc==SQLITE_OK ){ -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerWrite(pPg); -1.79 (drh 19-Mar-03): if( rc==SQLITE_OK ){ -1.292 (danielk1 19-Mar-07): memcpy(sqlite3PagerGetData(pPg), pData, pPager->pageSize); -1.79 (drh 19-Mar-03): } -1.292 (danielk1 19-Mar-07): sqlite3PagerUnref(pPg); -1.79 (drh 19-Mar-03): } -1.79 (drh 19-Mar-03): return rc; -1.41 (drh 02-Mar-02): } -1.184 (danielk1 20-Jan-05): #endif -1.41 (drh 02-Mar-02): -1.41 (drh 02-Mar-02): /* -1.41 (drh 02-Mar-02): ** A call to this routine tells the pager that it is not necessary to -1.327 (drh 13-Apr-07): ** write the information on page pPg back to the disk, even though -1.41 (drh 02-Mar-02): ** that page might be marked as dirty. -1.41 (drh 02-Mar-02): ** -1.41 (drh 02-Mar-02): ** The overlying software layer calls this routine when all of the data -1.41 (drh 02-Mar-02): ** on the given page is unused. The pager marks the page as clean so -1.41 (drh 02-Mar-02): ** that it does not get written to disk. -1.41 (drh 02-Mar-02): ** -1.41 (drh 02-Mar-02): ** Tests show that this optimization, together with the -1.292 (danielk1 19-Mar-07): ** sqlite3PagerDontRollback() below, more than double the speed -1.41 (drh 02-Mar-02): ** of large INSERT operations and quadruple the speed of large DELETEs. -1.48 (drh 06-Jul-02): ** -1.48 (drh 06-Jul-02): ** When this routine is called, set the alwaysRollback flag to true. -1.292 (danielk1 19-Mar-07): ** Subsequent calls to sqlite3PagerDontRollback() for the same page -1.48 (drh 06-Jul-02): ** will thereafter be ignored. This is necessary to avoid a problem -1.48 (drh 06-Jul-02): ** where a page with data is added to the freelist during one part of -1.48 (drh 06-Jul-02): ** a transaction then removed from the freelist during a later part -1.48 (drh 06-Jul-02): ** of the same transaction and reused for some other purpose. When it -1.48 (drh 06-Jul-02): ** is first added to the freelist, this routine is called. When reused, -1.307 (drh 30-Mar-07): ** the sqlite3PagerDontRollback() routine is called. But because the -1.307 (drh 30-Mar-07): ** page contains critical data, we still need to be sure it gets -1.307 (drh 30-Mar-07): ** rolled back in spite of the sqlite3PagerDontRollback() call. -1.41 (drh 02-Mar-02): */ -1.327 (drh 13-Apr-07): void sqlite3PagerDontWrite(DbPage *pDbPage){ -1.327 (drh 13-Apr-07): PgHdr *pPg = pDbPage; -1.327 (drh 13-Apr-07): Pager *pPager = pPg->pPager; -1.48 (drh 06-Jul-02): -1.169 (drh 31-Oct-04): if( MEMDB ) return; -1.48 (drh 06-Jul-02): pPg->alwaysRollback = 1; -1.262 (drh 06-Mar-06): if( pPg->dirty && !pPager->stmtInUse ){ -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED ); -1.47 (drh 25-Jun-02): if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSizedbSize ){ -1.47 (drh 25-Jun-02): /* If this pages is the last page in the file and the file has grown -1.47 (drh 25-Jun-02): ** during the current transaction, then do NOT mark the page as clean. -1.47 (drh 25-Jun-02): ** When the database file grows, we must make sure that the last page -1.47 (drh 25-Jun-02): ** gets written at least once so that the disk file will be the correct -1.47 (drh 25-Jun-02): ** size. If you do not write this page and the size of the file -1.47 (drh 25-Jun-02): ** on the disk ends up being too small, that can lead to database -1.47 (drh 25-Jun-02): ** corruption during the next transaction. -1.47 (drh 25-Jun-02): */ -1.47 (drh 25-Jun-02): }else{ -1.327 (drh 13-Apr-07): PAGERTRACE3("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)); -1.327 (drh 13-Apr-07): IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) -1.267 (drh 03-May-06): makeClean(pPg); -1.189 (danielk1 15-Feb-05): #ifdef SQLITE_CHECK_PAGES -1.189 (danielk1 15-Feb-05): pPg->pageHash = pager_pagehash(pPg); -1.189 (danielk1 15-Feb-05): #endif -1.47 (drh 25-Jun-02): } -1.41 (drh 02-Mar-02): } -1.41 (drh 02-Mar-02): } -1.41 (drh 02-Mar-02): -1.41 (drh 02-Mar-02): /* -1.41 (drh 02-Mar-02): ** A call to this routine tells the pager that if a rollback occurs, -1.41 (drh 02-Mar-02): ** it is not necessary to restore the data on the given page. This -1.41 (drh 02-Mar-02): ** means that the pager does not have to record the given page in the -1.41 (drh 02-Mar-02): ** rollback journal. -1.327 (drh 13-Apr-07): ** -1.327 (drh 13-Apr-07): ** If we have not yet actually read the content of this page (if -1.327 (drh 13-Apr-07): ** the PgHdr.needRead flag is set) then this routine acts as a promise -1.327 (drh 13-Apr-07): ** that we will never need to read the page content in the future. -1.327 (drh 13-Apr-07): ** so the needRead flag can be cleared at this point. -1.41 (drh 02-Mar-02): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerDontRollback(DbPage *pPg){ -1.41 (drh 02-Mar-02): Pager *pPager = pPg->pPager; -1.41 (drh 02-Mar-02): -1.277 (drh 18-Dec-06): assert( pPager->state>=PAGER_RESERVED ); -1.277 (drh 18-Dec-06): if( pPager->journalOpen==0 ) return; -1.169 (drh 31-Oct-04): if( pPg->alwaysRollback || pPager->alwaysRollback || MEMDB ) return; -1.41 (drh 02-Mar-02): if( !pPg->inJournal && (int)pPg->pgno <= pPager->origDbSize ){ -1.41 (drh 02-Mar-02): assert( pPager->aInJournal!=0 ); -1.41 (drh 02-Mar-02): pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.41 (drh 02-Mar-02): pPg->inJournal = 1; -1.327 (drh 13-Apr-07): pPg->needRead = 0; -1.107 (drh 12-May-04): if( pPager->stmtInUse ){ -1.107 (drh 12-May-04): pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.41 (drh 02-Mar-02): } -1.300 (drh 26-Mar-07): PAGERTRACE3("DONT_ROLLBACK page %d of %d\n", pPg->pgno, PAGERID(pPager)); -1.283 (drh 28-Feb-07): IOTRACE(("GARBAGE %p %d\n", pPager, pPg->pgno)) -1.41 (drh 02-Mar-02): } -1.325 (danielk1 07-Apr-07): if( pPager->stmtInUse -1.325 (danielk1 07-Apr-07): && !pageInStatement(pPg) -1.325 (danielk1 07-Apr-07): && (int)pPg->pgno<=pPager->stmtSize -1.325 (danielk1 07-Apr-07): ){ -1.41 (drh 02-Mar-02): assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); -1.107 (drh 12-May-04): assert( pPager->aInStmt!=0 ); -1.107 (drh 12-May-04): pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); -1.41 (drh 02-Mar-02): } -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.107 (drh 12-May-04): -1.1 (drh 11-Apr-01): /* -1.307 (drh 30-Mar-07): ** This routine is called to increment the database file change-counter, -1.307 (drh 30-Mar-07): ** stored at byte 24 of the pager file. -1.307 (drh 30-Mar-07): */ -1.307 (drh 30-Mar-07): static int pager_incr_changecounter(Pager *pPager){ -1.307 (drh 30-Mar-07): PgHdr *pPgHdr; -1.307 (drh 30-Mar-07): u32 change_counter; -1.307 (drh 30-Mar-07): int rc; -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): if( !pPager->changeCountDone ){ -1.307 (drh 30-Mar-07): /* Open page 1 of the file for writing. */ -1.307 (drh 30-Mar-07): rc = sqlite3PagerGet(pPager, 1, &pPgHdr); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) return rc; -1.307 (drh 30-Mar-07): rc = sqlite3PagerWrite(pPgHdr); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) return rc; -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* Increment the value just read and write it back to byte 24. */ -1.351 (drh 20-Jul-07): change_counter = sqlite3Get4byte((u8*)pPager->dbFileVers); -1.307 (drh 30-Mar-07): change_counter++; -1.307 (drh 30-Mar-07): put32bits(((char*)PGHDR_TO_DATA(pPgHdr))+24, change_counter); -1.307 (drh 30-Mar-07): /* Release the page reference. */ -1.307 (drh 30-Mar-07): sqlite3PagerUnref(pPgHdr); -1.307 (drh 30-Mar-07): pPager->changeCountDone = 1; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): return SQLITE_OK; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* -1.307 (drh 30-Mar-07): ** Sync the database file for the pager pPager. zMaster points to the name -1.307 (drh 30-Mar-07): ** of a master journal file that should be written into the individual -1.307 (drh 30-Mar-07): ** journal file. zMaster may be NULL, which is interpreted as no master -1.307 (drh 30-Mar-07): ** journal (a single database transaction). -1.307 (drh 30-Mar-07): ** -1.307 (drh 30-Mar-07): ** This routine ensures that the journal is synced, all dirty pages written -1.307 (drh 30-Mar-07): ** to the database file and the database file synced. The only thing that -1.307 (drh 30-Mar-07): ** remains to commit the transaction is to delete the journal file (or -1.307 (drh 30-Mar-07): ** master journal file if specified). -1.307 (drh 30-Mar-07): ** -1.307 (drh 30-Mar-07): ** Note that if zMaster==NULL, this does not overwrite a previous value -1.307 (drh 30-Mar-07): ** passed to an sqlite3PagerCommitPhaseOne() call. -1.307 (drh 30-Mar-07): ** -1.307 (drh 30-Mar-07): ** If parameter nTrunc is non-zero, then the pager file is truncated to -1.307 (drh 30-Mar-07): ** nTrunc pages (this is used by auto-vacuum databases). -1.307 (drh 30-Mar-07): */ -1.307 (drh 30-Mar-07): int sqlite3PagerCommitPhaseOne(Pager *pPager, const char *zMaster, Pgno nTrunc){ -1.307 (drh 30-Mar-07): int rc = SQLITE_OK; -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): PAGERTRACE4("DATABASE SYNC: File=%s zMaster=%s nTrunc=%d\n", -1.307 (drh 30-Mar-07): pPager->zFilename, zMaster, nTrunc); -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* If this is an in-memory db, or no pages have been written to, or this -1.307 (drh 30-Mar-07): ** function has already been called, it is a no-op. -1.307 (drh 30-Mar-07): */ -1.307 (drh 30-Mar-07): if( pPager->state!=PAGER_SYNCED && !MEMDB && pPager->dirtyCache ){ -1.307 (drh 30-Mar-07): PgHdr *pPg; -1.307 (drh 30-Mar-07): assert( pPager->journalOpen ); -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* If a master journal file name has already been written to the -1.307 (drh 30-Mar-07): ** journal file, then no sync is required. This happens when it is -1.307 (drh 30-Mar-07): ** written, then the process fails to upgrade from a RESERVED to an -1.307 (drh 30-Mar-07): ** EXCLUSIVE lock. The next time the process tries to commit the -1.307 (drh 30-Mar-07): ** transaction the m-j name will have already been written. -1.307 (drh 30-Mar-07): */ -1.307 (drh 30-Mar-07): if( !pPager->setMaster ){ -1.307 (drh 30-Mar-07): rc = pager_incr_changecounter(pPager); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): #ifndef SQLITE_OMIT_AUTOVACUUM -1.307 (drh 30-Mar-07): if( nTrunc!=0 ){ -1.307 (drh 30-Mar-07): /* If this transaction has made the database smaller, then all pages -1.307 (drh 30-Mar-07): ** being discarded by the truncation must be written to the journal -1.307 (drh 30-Mar-07): ** file. -1.307 (drh 30-Mar-07): */ -1.307 (drh 30-Mar-07): Pgno i; -1.307 (drh 30-Mar-07): int iSkip = PAGER_MJ_PGNO(pPager); -1.307 (drh 30-Mar-07): for( i=nTrunc+1; i<=pPager->origDbSize; i++ ){ -1.307 (drh 30-Mar-07): if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=iSkip ){ -1.307 (drh 30-Mar-07): rc = sqlite3PagerGet(pPager, i, &pPg); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): rc = sqlite3PagerWrite(pPg); -1.307 (drh 30-Mar-07): sqlite3PagerUnref(pPg); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): #endif -1.307 (drh 30-Mar-07): rc = writeMasterJournal(pPager, zMaster); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): rc = syncJournal(pPager); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): #ifndef SQLITE_OMIT_AUTOVACUUM -1.307 (drh 30-Mar-07): if( nTrunc!=0 ){ -1.307 (drh 30-Mar-07): rc = sqlite3PagerTruncate(pPager, nTrunc); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): #endif -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* Write all dirty pages to the database file */ -1.307 (drh 30-Mar-07): pPg = pager_get_all_dirty_pages(pPager); -1.307 (drh 30-Mar-07): rc = pager_write_pagelist(pPg); -1.307 (drh 30-Mar-07): if( rc!=SQLITE_OK ) goto sync_exit; -1.308 (drh 30-Mar-07): pPager->pDirty = 0; -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* Sync the database file. */ -1.307 (drh 30-Mar-07): if( !pPager->noSync ){ -1.307 (drh 30-Mar-07): rc = sqlite3OsSync(pPager->fd, 0); -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): IOTRACE(("DBSYNC %p\n", pPager)) -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): pPager->state = PAGER_SYNCED; -1.307 (drh 30-Mar-07): }else if( MEMDB && nTrunc!=0 ){ -1.307 (drh 30-Mar-07): rc = sqlite3PagerTruncate(pPager, nTrunc); -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): sync_exit: -1.343 (danielk1 13-Jun-07): if( rc==SQLITE_IOERR_BLOCKED ){ -1.343 (danielk1 13-Jun-07): /* pager_incr_changecounter() may attempt to obtain an exclusive -1.343 (danielk1 13-Jun-07): * lock to spill the cache and return IOERR_BLOCKED. But since -1.343 (danielk1 13-Jun-07): * there is no chance the cache is inconsistent, it's -1.343 (danielk1 13-Jun-07): * better to return SQLITE_BUSY. -1.343 (danielk1 13-Jun-07): */ -1.343 (danielk1 13-Jun-07): rc = SQLITE_BUSY; -1.343 (danielk1 13-Jun-07): } -1.307 (drh 30-Mar-07): return rc; -1.307 (drh 30-Mar-07): } -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): -1.307 (drh 30-Mar-07): /* -1.1 (drh 11-Apr-01): ** Commit all changes to the database and release the write lock. -1.3 (drh 15-Apr-01): ** -1.3 (drh 15-Apr-01): ** If the commit fails for any reason, a rollback attempt is made -1.3 (drh 15-Apr-01): ** and an error code is returned. If the commit worked, SQLITE_OK -1.3 (drh 15-Apr-01): ** is returned. -1.1 (drh 11-Apr-01): */ -1.307 (drh 30-Mar-07): int sqlite3PagerCommitPhaseTwo(Pager *pPager){ -1.17 (drh 14-Sep-01): int rc; -1.1 (drh 11-Apr-01): PgHdr *pPg; -1.3 (drh 15-Apr-01): -1.238 (danielk1 16-Jan-06): if( pPager->errCode ){ -1.251 (danielk1 23-Jan-06): return pPager->errCode; -1.3 (drh 15-Apr-01): } -1.115 (drh 09-Jun-04): if( pPager->statedirty = 0; -1.107 (drh 12-May-04): pPg->inJournal = 0; -1.325 (danielk1 07-Apr-07): pHist->inStmt = 0; -1.264 (drh 23-Mar-06): pPg->needSync = 0; -1.325 (danielk1 07-Apr-07): pHist->pPrevStmt = pHist->pNextStmt = 0; -1.107 (drh 12-May-04): pPg = pPg->pDirty; -1.107 (drh 12-May-04): } -1.267 (drh 03-May-06): pPager->pDirty = 0; -1.129 (danielk1 16-Jun-04): #ifndef NDEBUG -1.129 (danielk1 16-Jun-04): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.129 (danielk1 16-Jun-04): PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); -1.129 (danielk1 16-Jun-04): assert( !pPg->alwaysRollback ); -1.129 (danielk1 16-Jun-04): assert( !pHist->pOrig ); -1.129 (danielk1 16-Jun-04): assert( !pHist->pStmt ); -1.129 (danielk1 16-Jun-04): } -1.129 (danielk1 16-Jun-04): #endif -1.107 (drh 12-May-04): pPager->pStmt = 0; -1.115 (drh 09-Jun-04): pPager->state = PAGER_SHARED; -1.107 (drh 12-May-04): return SQLITE_OK; -1.107 (drh 12-May-04): } -1.308 (drh 30-Mar-07): assert( pPager->journalOpen || !pPager->dirtyCache ); -1.308 (drh 30-Mar-07): assert( pPager->state==PAGER_SYNCED || !pPager->dirtyCache ); -1.308 (drh 30-Mar-07): rc = pager_end_transaction(pPager); -1.302 (danielk1 27-Mar-07): return pager_error(pPager, rc); -1.1 (drh 11-Apr-01): } -1.1 (drh 11-Apr-01): -1.1 (drh 11-Apr-01): /* -1.115 (drh 09-Jun-04): ** Rollback all changes. The database falls back to PAGER_SHARED mode. -1.1 (drh 11-Apr-01): ** All in-memory cache pages revert to their original data contents. -1.1 (drh 11-Apr-01): ** The journal is deleted. -1.3 (drh 15-Apr-01): ** -1.3 (drh 15-Apr-01): ** This routine cannot fail unless some other process is not following -1.311 (drh 30-Mar-07): ** the correct locking protocol or unless some other -1.3 (drh 15-Apr-01): ** process is writing trash into the journal file (SQLITE_CORRUPT) or -1.3 (drh 15-Apr-01): ** unless a prior malloc() failed (SQLITE_NOMEM). Appropriate error -1.3 (drh 15-Apr-01): ** codes are returned for all these occasions. Otherwise, -1.3 (drh 15-Apr-01): ** SQLITE_OK is returned. -1.1 (drh 11-Apr-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerRollback(Pager *pPager){ -1.1 (drh 11-Apr-01): int rc; -1.300 (drh 26-Mar-07): PAGERTRACE2("ROLLBACK %d\n", PAGERID(pPager)); -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.107 (drh 12-May-04): PgHdr *p; -1.107 (drh 12-May-04): for(p=pPager->pAll; p; p=p->pNextAll){ -1.107 (drh 12-May-04): PgHistory *pHist; -1.129 (danielk1 16-Jun-04): assert( !p->alwaysRollback ); -1.129 (danielk1 16-Jun-04): if( !p->dirty ){ -1.129 (danielk1 16-Jun-04): assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pOrig ); -1.129 (danielk1 16-Jun-04): assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pStmt ); -1.129 (danielk1 16-Jun-04): continue; -1.129 (danielk1 16-Jun-04): } -1.129 (danielk1 16-Jun-04): -1.107 (drh 12-May-04): pHist = PGHDR_TO_HIST(p, pPager); -1.107 (drh 12-May-04): if( pHist->pOrig ){ -1.107 (drh 12-May-04): memcpy(PGHDR_TO_DATA(p), pHist->pOrig, pPager->pageSize); -1.300 (drh 26-Mar-07): PAGERTRACE3("ROLLBACK-PAGE %d of %d\n", p->pgno, PAGERID(pPager)); -1.107 (drh 12-May-04): }else{ -1.300 (drh 26-Mar-07): PAGERTRACE3("PAGE %d is clean on %d\n", p->pgno, PAGERID(pPager)); -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): clearHistory(pHist); -1.107 (drh 12-May-04): p->dirty = 0; -1.107 (drh 12-May-04): p->inJournal = 0; -1.325 (danielk1 07-Apr-07): pHist->inStmt = 0; -1.325 (danielk1 07-Apr-07): pHist->pPrevStmt = pHist->pNextStmt = 0; -1.128 (danielk1 15-Jun-04): if( pPager->xReiniter ){ -1.292 (danielk1 19-Mar-07): pPager->xReiniter(p, pPager->pageSize); -1.128 (danielk1 15-Jun-04): } -1.107 (drh 12-May-04): } -1.267 (drh 03-May-06): pPager->pDirty = 0; -1.107 (drh 12-May-04): pPager->pStmt = 0; -1.107 (drh 12-May-04): pPager->dbSize = pPager->origDbSize; -1.323 (danielk1 05-Apr-07): pager_truncate_cache(pPager); -1.107 (drh 12-May-04): pPager->stmtInUse = 0; -1.115 (drh 09-Jun-04): pPager->state = PAGER_SHARED; -1.107 (drh 12-May-04): return SQLITE_OK; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): -1.115 (drh 09-Jun-04): if( !pPager->dirtyCache || !pPager->journalOpen ){ -1.307 (drh 30-Mar-07): rc = pager_end_transaction(pPager); -1.60 (drh 02-Dec-02): return rc; -1.60 (drh 02-Dec-02): } -1.68 (drh 16-Jan-03): -1.238 (danielk1 16-Jan-06): if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ -1.115 (drh 09-Jun-04): if( pPager->state>=PAGER_EXCLUSIVE ){ -1.293 (danielk1 23-Mar-07): pager_playback(pPager, 0); -1.43 (drh 05-Mar-02): } -1.238 (danielk1 16-Jan-06): return pPager->errCode; -1.3 (drh 15-Apr-01): } -1.115 (drh 09-Jun-04): if( pPager->state==PAGER_RESERVED ){ -1.190 (danielk1 15-Feb-05): int rc2; -1.293 (danielk1 23-Mar-07): rc = pager_playback(pPager, 0); -1.307 (drh 30-Mar-07): rc2 = pager_end_transaction(pPager); -1.115 (drh 09-Jun-04): if( rc==SQLITE_OK ){ -1.115 (drh 09-Jun-04): rc = rc2; -1.115 (drh 09-Jun-04): } -1.115 (drh 09-Jun-04): }else{ -1.293 (danielk1 23-Mar-07): rc = pager_playback(pPager, 0); -1.3 (drh 15-Apr-01): } -1.323 (danielk1 05-Apr-07): /* pager_reset(pPager); */ -1.3 (drh 15-Apr-01): pPager->dbSize = -1; -1.246 (danielk1 20-Jan-06): -1.246 (danielk1 20-Jan-06): /* If an error occurs during a ROLLBACK, we can no longer trust the pager -1.246 (danielk1 20-Jan-06): ** cache. So call pager_error() on the way out to make any error -1.246 (danielk1 20-Jan-06): ** persistent. -1.246 (danielk1 20-Jan-06): */ -1.246 (danielk1 20-Jan-06): return pager_error(pPager, rc); -1.28 (drh 18-Oct-01): } -1.14 (drh 13-Sep-01): -1.14 (drh 13-Sep-01): /* -1.14 (drh 13-Sep-01): ** Return TRUE if the database file is opened read-only. Return FALSE -1.14 (drh 13-Sep-01): ** if the database is (in theory) writable. -1.14 (drh 13-Sep-01): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerIsreadonly(Pager *pPager){ -1.15 (drh 13-Sep-01): return pPager->readOnly; -1.14 (drh 13-Sep-01): } -1.3 (drh 15-Apr-01): -1.3 (drh 15-Apr-01): /* -1.271 (drh 08-Aug-06): ** Return the number of references to the pager. -1.271 (drh 08-Aug-06): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerRefcount(Pager *pPager){ -1.271 (drh 08-Aug-06): return pPager->nRef; -1.271 (drh 08-Aug-06): } -1.271 (drh 08-Aug-06): -1.271 (drh 08-Aug-06): #ifdef SQLITE_TEST -1.271 (drh 08-Aug-06): /* -1.3 (drh 15-Apr-01): ** This routine is used for testing and analysis only. -1.3 (drh 15-Apr-01): */ -1.292 (danielk1 19-Mar-07): int *sqlite3PagerStats(Pager *pPager){ -1.180 (danielk1 08-Jan-05): static int a[11]; -1.3 (drh 15-Apr-01): a[0] = pPager->nRef; -1.3 (drh 15-Apr-01): a[1] = pPager->nPage; -1.3 (drh 15-Apr-01): a[2] = pPager->mxPage; -1.3 (drh 15-Apr-01): a[3] = pPager->dbSize; -1.3 (drh 15-Apr-01): a[4] = pPager->state; -1.238 (danielk1 16-Jan-06): a[5] = pPager->errCode; -1.3 (drh 15-Apr-01): a[6] = pPager->nHit; -1.3 (drh 15-Apr-01): a[7] = pPager->nMiss; -1.319 (drh 05-Apr-07): a[8] = 0; /* Used to be pPager->nOvfl */ -1.180 (danielk1 08-Jan-05): a[9] = pPager->nRead; -1.180 (danielk1 08-Jan-05): a[10] = pPager->nWrite; -1.3 (drh 15-Apr-01): return a; -1.37 (drh 02-Feb-02): } -1.271 (drh 08-Aug-06): #endif -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.107 (drh 12-May-04): ** Set the statement rollback point. -1.37 (drh 02-Feb-02): ** -1.37 (drh 02-Feb-02): ** This routine should be called with the transaction journal already -1.107 (drh 12-May-04): ** open. A new statement journal is created that can be used to rollback -1.40 (drh 19-Feb-02): ** changes of a single SQL command within a larger transaction. -1.37 (drh 02-Feb-02): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerStmtBegin(Pager *pPager){ -1.37 (drh 02-Feb-02): int rc; -1.107 (drh 12-May-04): assert( !pPager->stmtInUse ); -1.279 (drh 03-Jan-07): assert( pPager->state>=PAGER_SHARED ); -1.155 (drh 18-Aug-04): assert( pPager->dbSize>=0 ); -1.300 (drh 26-Mar-07): PAGERTRACE2("STMT-BEGIN %d\n", PAGERID(pPager)); -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.107 (drh 12-May-04): pPager->stmtInUse = 1; -1.107 (drh 12-May-04): pPager->stmtSize = pPager->dbSize; -1.107 (drh 12-May-04): return SQLITE_OK; -1.107 (drh 12-May-04): } -1.60 (drh 02-Dec-02): if( !pPager->journalOpen ){ -1.107 (drh 12-May-04): pPager->stmtAutoopen = 1; -1.60 (drh 02-Dec-02): return SQLITE_OK; -1.60 (drh 02-Dec-02): } -1.37 (drh 02-Feb-02): assert( pPager->journalOpen ); -1.107 (drh 12-May-04): pPager->aInStmt = sqliteMalloc( pPager->dbSize/8 + 1 ); -1.107 (drh 12-May-04): if( pPager->aInStmt==0 ){ -1.223 (danielk1 06-Dec-05): /* sqlite3OsLock(pPager->fd, SHARED_LOCK); */ -1.37 (drh 02-Feb-02): return SQLITE_NOMEM; -1.37 (drh 02-Feb-02): } -1.73 (drh 11-Feb-03): #ifndef NDEBUG -1.222 (drh 30-Nov-05): rc = sqlite3OsFileSize(pPager->jfd, &pPager->stmtJSize); -1.107 (drh 12-May-04): if( rc ) goto stmt_begin_failed; -1.138 (danielk1 25-Jun-04): assert( pPager->stmtJSize == pPager->journalOff ); -1.73 (drh 11-Feb-03): #endif -1.138 (danielk1 25-Jun-04): pPager->stmtJSize = pPager->journalOff; -1.107 (drh 12-May-04): pPager->stmtSize = pPager->dbSize; -1.138 (danielk1 25-Jun-04): pPager->stmtHdrOff = 0; -1.143 (danielk1 26-Jun-04): pPager->stmtCksum = pPager->cksumInit; -1.107 (drh 12-May-04): if( !pPager->stmtOpen ){ -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerOpentemp(&pPager->stfd); -1.107 (drh 12-May-04): if( rc ) goto stmt_begin_failed; -1.107 (drh 12-May-04): pPager->stmtOpen = 1; -1.107 (drh 12-May-04): pPager->stmtNRec = 0; -1.46 (drh 30-May-02): } -1.107 (drh 12-May-04): pPager->stmtInUse = 1; -1.37 (drh 02-Feb-02): return SQLITE_OK; -1.37 (drh 02-Feb-02): -1.107 (drh 12-May-04): stmt_begin_failed: -1.107 (drh 12-May-04): if( pPager->aInStmt ){ -1.107 (drh 12-May-04): sqliteFree(pPager->aInStmt); -1.107 (drh 12-May-04): pPager->aInStmt = 0; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): return rc; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.107 (drh 12-May-04): ** Commit a statement. -1.37 (drh 02-Feb-02): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerStmtCommit(Pager *pPager){ -1.107 (drh 12-May-04): if( pPager->stmtInUse ){ -1.57 (drh 10-Nov-02): PgHdr *pPg, *pNext; -1.300 (drh 26-Mar-07): PAGERTRACE2("STMT-COMMIT %d\n", PAGERID(pPager)); -1.169 (drh 31-Oct-04): if( !MEMDB ){ -1.222 (drh 30-Nov-05): sqlite3OsSeek(pPager->stfd, 0); -1.222 (drh 30-Nov-05): /* sqlite3OsTruncate(pPager->stfd, 0); */ -1.107 (drh 12-May-04): sqliteFree( pPager->aInStmt ); -1.107 (drh 12-May-04): pPager->aInStmt = 0; -1.325 (danielk1 07-Apr-07): }else{ -1.325 (danielk1 07-Apr-07): for(pPg=pPager->pStmt; pPg; pPg=pNext){ -1.107 (drh 12-May-04): PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); -1.325 (danielk1 07-Apr-07): pNext = pHist->pNextStmt; -1.325 (danielk1 07-Apr-07): assert( pHist->inStmt ); -1.325 (danielk1 07-Apr-07): pHist->inStmt = 0; -1.325 (danielk1 07-Apr-07): pHist->pPrevStmt = pHist->pNextStmt = 0; -1.107 (drh 12-May-04): sqliteFree(pHist->pStmt); -1.107 (drh 12-May-04): pHist->pStmt = 0; -1.107 (drh 12-May-04): } -1.38 (drh 02-Feb-02): } -1.107 (drh 12-May-04): pPager->stmtNRec = 0; -1.107 (drh 12-May-04): pPager->stmtInUse = 0; -1.107 (drh 12-May-04): pPager->pStmt = 0; -1.38 (drh 02-Feb-02): } -1.107 (drh 12-May-04): pPager->stmtAutoopen = 0; -1.37 (drh 02-Feb-02): return SQLITE_OK; -1.37 (drh 02-Feb-02): } -1.37 (drh 02-Feb-02): -1.37 (drh 02-Feb-02): /* -1.107 (drh 12-May-04): ** Rollback a statement. -1.37 (drh 02-Feb-02): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerStmtRollback(Pager *pPager){ -1.37 (drh 02-Feb-02): int rc; -1.107 (drh 12-May-04): if( pPager->stmtInUse ){ -1.300 (drh 26-Mar-07): PAGERTRACE2("STMT-ROLLBACK %d\n", PAGERID(pPager)); -1.169 (drh 31-Oct-04): if( MEMDB ){ -1.107 (drh 12-May-04): PgHdr *pPg; -1.325 (danielk1 07-Apr-07): PgHistory *pHist; -1.325 (danielk1 07-Apr-07): for(pPg=pPager->pStmt; pPg; pPg=pHist->pNextStmt){ -1.325 (danielk1 07-Apr-07): pHist = PGHDR_TO_HIST(pPg, pPager); -1.107 (drh 12-May-04): if( pHist->pStmt ){ -1.107 (drh 12-May-04): memcpy(PGHDR_TO_DATA(pPg), pHist->pStmt, pPager->pageSize); -1.107 (drh 12-May-04): sqliteFree(pHist->pStmt); -1.107 (drh 12-May-04): pHist->pStmt = 0; -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): } -1.107 (drh 12-May-04): pPager->dbSize = pPager->stmtSize; -1.323 (danielk1 05-Apr-07): pager_truncate_cache(pPager); -1.107 (drh 12-May-04): rc = SQLITE_OK; -1.107 (drh 12-May-04): }else{ -1.107 (drh 12-May-04): rc = pager_stmt_playback(pPager); -1.107 (drh 12-May-04): } -1.292 (danielk1 19-Mar-07): sqlite3PagerStmtCommit(pPager); -1.38 (drh 02-Feb-02): }else{ -1.38 (drh 02-Feb-02): rc = SQLITE_OK; -1.38 (drh 02-Feb-02): } -1.107 (drh 12-May-04): pPager->stmtAutoopen = 0; -1.37 (drh 02-Feb-02): return rc; -1.80 (drh 06-Apr-03): } -1.80 (drh 06-Apr-03): -1.80 (drh 06-Apr-03): /* -1.80 (drh 06-Apr-03): ** Return the full pathname of the database file. -1.80 (drh 06-Apr-03): */ -1.292 (danielk1 19-Mar-07): const char *sqlite3PagerFilename(Pager *pPager){ -1.80 (drh 06-Apr-03): return pPager->zFilename; -1.126 (danielk1 14-Jun-04): } -1.126 (danielk1 14-Jun-04): -1.126 (danielk1 14-Jun-04): /* -1.126 (danielk1 14-Jun-04): ** Return the directory of the database file. -1.126 (danielk1 14-Jun-04): */ -1.292 (danielk1 19-Mar-07): const char *sqlite3PagerDirname(Pager *pPager){ -1.126 (danielk1 14-Jun-04): return pPager->zDirectory; -1.126 (danielk1 14-Jun-04): } -1.126 (danielk1 14-Jun-04): -1.126 (danielk1 14-Jun-04): /* -1.126 (danielk1 14-Jun-04): ** Return the full pathname of the journal file. -1.126 (danielk1 14-Jun-04): */ -1.292 (danielk1 19-Mar-07): const char *sqlite3PagerJournalname(Pager *pPager){ -1.126 (danielk1 14-Jun-04): return pPager->zJournal; -1.96 (drh 09-Feb-04): } -1.96 (drh 09-Feb-04): -1.96 (drh 09-Feb-04): /* -1.210 (drh 27-Aug-05): ** Return true if fsync() calls are disabled for this pager. Return FALSE -1.210 (drh 27-Aug-05): ** if fsync()s are executed normally. -1.210 (drh 27-Aug-05): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerNosync(Pager *pPager){ -1.210 (drh 27-Aug-05): return pPager->noSync; -1.210 (drh 27-Aug-05): } -1.210 (drh 27-Aug-05): -1.319 (drh 05-Apr-07): #ifdef SQLITE_HAS_CODEC -1.210 (drh 27-Aug-05): /* -1.96 (drh 09-Feb-04): ** Set the codec for this pager -1.96 (drh 09-Feb-04): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerSetCodec( -1.96 (drh 09-Feb-04): Pager *pPager, -1.261 (drh 06-Mar-06): void *(*xCodec)(void*,void*,Pgno,int), -1.96 (drh 09-Feb-04): void *pCodecArg -1.96 (drh 09-Feb-04): ){ -1.96 (drh 09-Feb-04): pPager->xCodec = xCodec; -1.96 (drh 09-Feb-04): pPager->pCodecArg = pCodecArg; -1.110 (danielk1 03-Jun-04): } -1.319 (drh 05-Apr-07): #endif -1.110 (danielk1 03-Jun-04): -1.170 (danielk1 02-Nov-04): #ifndef SQLITE_OMIT_AUTOVACUUM -1.170 (danielk1 02-Nov-04): /* -1.345 (drh 16-Jun-07): ** Move the page pPg to location pgno in the file. -1.170 (danielk1 02-Nov-04): ** -1.345 (drh 16-Jun-07): ** There must be no references to the page previously located at -1.345 (drh 16-Jun-07): ** pgno (which we call pPgOld) though that page is allowed to be -1.345 (drh 16-Jun-07): ** in cache. If the page previous located at pgno is not already -1.345 (drh 16-Jun-07): ** in the rollback journal, it is not put there by by this routine. -1.170 (danielk1 02-Nov-04): ** -1.345 (drh 16-Jun-07): ** References to the page pPg remain valid. Updating any -1.345 (drh 16-Jun-07): ** meta-data associated with pPg (i.e. data stored in the nExtra bytes -1.170 (danielk1 02-Nov-04): ** allocated along with the page) is the responsibility of the caller. -1.170 (danielk1 02-Nov-04): ** -1.191 (danielk1 09-Mar-05): ** A transaction must be active when this routine is called. It used to be -1.191 (danielk1 09-Mar-05): ** required that a statement transaction was not active, but this restriction -1.191 (danielk1 09-Mar-05): ** has been removed (CREATE INDEX needs to move a page when a statement -1.191 (danielk1 09-Mar-05): ** transaction is active). -1.170 (danielk1 02-Nov-04): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno){ -1.345 (drh 16-Jun-07): PgHdr *pPgOld; /* The page being overwritten. */ -1.171 (danielk1 03-Nov-04): int h; -1.176 (danielk1 08-Nov-04): Pgno needSyncPgno = 0; -1.170 (danielk1 02-Nov-04): -1.170 (danielk1 02-Nov-04): assert( pPg->nRef>0 ); -1.170 (danielk1 02-Nov-04): -1.300 (drh 26-Mar-07): PAGERTRACE5("MOVE %d page %d (needSync=%d) moves to %d\n", -1.175 (danielk1 08-Nov-04): PAGERID(pPager), pPg->pgno, pPg->needSync, pgno); -1.283 (drh 28-Feb-07): IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) -1.174 (danielk1 06-Nov-04): -1.331 (danielk1 28-Apr-07): pager_get_content(pPg); -1.176 (danielk1 08-Nov-04): if( pPg->needSync ){ -1.176 (danielk1 08-Nov-04): needSyncPgno = pPg->pgno; -1.347 (drh 16-Jun-07): assert( pPg->inJournal || (int)pgno>pPager->origDbSize ); -1.176 (danielk1 08-Nov-04): assert( pPg->dirty ); -1.178 (danielk1 23-Nov-04): assert( pPager->needSync ); -1.176 (danielk1 08-Nov-04): } -1.176 (danielk1 08-Nov-04): -1.170 (danielk1 02-Nov-04): /* Unlink pPg from it's hash-chain */ -1.171 (danielk1 03-Nov-04): unlinkHashChain(pPager, pPg); -1.170 (danielk1 02-Nov-04): -1.174 (danielk1 06-Nov-04): /* If the cache contains a page with page-number pgno, remove it -1.175 (danielk1 08-Nov-04): ** from it's hash chain. Also, if the PgHdr.needSync was set for -1.175 (danielk1 08-Nov-04): ** page pgno before the 'move' operation, it needs to be retained -1.175 (danielk1 08-Nov-04): ** for the page moved there. -1.171 (danielk1 03-Nov-04): */ -1.345 (drh 16-Jun-07): pPg->needSync = 0; -1.170 (danielk1 02-Nov-04): pPgOld = pager_lookup(pPager, pgno); -1.170 (danielk1 02-Nov-04): if( pPgOld ){ -1.171 (danielk1 03-Nov-04): assert( pPgOld->nRef==0 ); -1.171 (danielk1 03-Nov-04): unlinkHashChain(pPager, pPgOld); -1.267 (drh 03-May-06): makeClean(pPgOld); -1.345 (drh 16-Jun-07): pPg->needSync = pPgOld->needSync; -1.345 (drh 16-Jun-07): }else{ -1.345 (drh 16-Jun-07): pPg->needSync = 0; -1.345 (drh 16-Jun-07): } -1.345 (drh 16-Jun-07): if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ -1.345 (drh 16-Jun-07): pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; -1.345 (drh 16-Jun-07): }else{ -1.345 (drh 16-Jun-07): pPg->inJournal = 0; -1.346 (drh 16-Jun-07): assert( pPg->needSync==0 || (int)pgno>pPager->origDbSize ); -1.170 (danielk1 02-Nov-04): } -1.170 (danielk1 02-Nov-04): -1.171 (danielk1 03-Nov-04): /* Change the page number for pPg and insert it into the new hash-chain. */ -1.270 (drh 28-Jun-06): assert( pgno!=0 ); -1.171 (danielk1 03-Nov-04): pPg->pgno = pgno; -1.268 (drh 07-May-06): h = pgno & (pPager->nHash-1); -1.171 (danielk1 03-Nov-04): if( pPager->aHash[h] ){ -1.171 (danielk1 03-Nov-04): assert( pPager->aHash[h]->pPrevHash==0 ); -1.171 (danielk1 03-Nov-04): pPager->aHash[h]->pPrevHash = pPg; -1.171 (danielk1 03-Nov-04): } -1.171 (danielk1 03-Nov-04): pPg->pNextHash = pPager->aHash[h]; -1.171 (danielk1 03-Nov-04): pPager->aHash[h] = pPg; -1.171 (danielk1 03-Nov-04): pPg->pPrevHash = 0; -1.171 (danielk1 03-Nov-04): -1.267 (drh 03-May-06): makeDirty(pPg); -1.170 (danielk1 02-Nov-04): pPager->dirtyCache = 1; -1.170 (danielk1 02-Nov-04): -1.176 (danielk1 08-Nov-04): if( needSyncPgno ){ -1.176 (danielk1 08-Nov-04): /* If needSyncPgno is non-zero, then the journal file needs to be -1.176 (danielk1 08-Nov-04): ** sync()ed before any data is written to database file page needSyncPgno. -1.176 (danielk1 08-Nov-04): ** Currently, no such page exists in the page-cache and the -1.176 (danielk1 08-Nov-04): ** Pager.aInJournal bit has been set. This needs to be remedied by loading -1.176 (danielk1 08-Nov-04): ** the page into the pager-cache and setting the PgHdr.needSync flag. -1.178 (danielk1 23-Nov-04): ** -1.292 (danielk1 19-Mar-07): ** The sqlite3PagerGet() call may cause the journal to sync. So make -1.178 (danielk1 23-Nov-04): ** sure the Pager.needSync flag is set too. -1.176 (danielk1 08-Nov-04): */ -1.176 (danielk1 08-Nov-04): int rc; -1.292 (danielk1 19-Mar-07): PgHdr *pPgHdr; -1.178 (danielk1 23-Nov-04): assert( pPager->needSync ); -1.292 (danielk1 19-Mar-07): rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); -1.176 (danielk1 08-Nov-04): if( rc!=SQLITE_OK ) return rc; -1.178 (danielk1 23-Nov-04): pPager->needSync = 1; -1.292 (danielk1 19-Mar-07): pPgHdr->needSync = 1; -1.292 (danielk1 19-Mar-07): pPgHdr->inJournal = 1; -1.292 (danielk1 19-Mar-07): makeDirty(pPgHdr); -1.292 (danielk1 19-Mar-07): sqlite3PagerUnref(pPgHdr); -1.176 (danielk1 08-Nov-04): } -1.176 (danielk1 08-Nov-04): -1.170 (danielk1 02-Nov-04): return SQLITE_OK; -1.170 (danielk1 02-Nov-04): } -1.170 (danielk1 02-Nov-04): #endif -1.170 (danielk1 02-Nov-04): -1.292 (danielk1 19-Mar-07): /* -1.292 (danielk1 19-Mar-07): ** Return a pointer to the data for the specified page. -1.292 (danielk1 19-Mar-07): */ -1.292 (danielk1 19-Mar-07): void *sqlite3PagerGetData(DbPage *pPg){ -1.292 (danielk1 19-Mar-07): return PGHDR_TO_DATA(pPg); -1.292 (danielk1 19-Mar-07): } -1.292 (danielk1 19-Mar-07): -1.292 (danielk1 19-Mar-07): /* -1.292 (danielk1 19-Mar-07): ** Return a pointer to the Pager.nExtra bytes of "extra" space -1.292 (danielk1 19-Mar-07): ** allocated along with the specified page. -1.292 (danielk1 19-Mar-07): */ -1.292 (danielk1 19-Mar-07): void *sqlite3PagerGetExtra(DbPage *pPg){ -1.292 (danielk1 19-Mar-07): Pager *pPager = pPg->pPager; -1.292 (danielk1 19-Mar-07): return (pPager?PGHDR_TO_EXTRA(pPg, pPager):0); -1.292 (danielk1 19-Mar-07): } -1.292 (danielk1 19-Mar-07): -1.294 (danielk1 24-Mar-07): /* -1.294 (danielk1 24-Mar-07): ** Get/set the locking-mode for this pager. Parameter eMode must be one -1.294 (danielk1 24-Mar-07): ** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or -1.294 (danielk1 24-Mar-07): ** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then -1.294 (danielk1 24-Mar-07): ** the locking-mode is set to the value specified. -1.294 (danielk1 24-Mar-07): ** -1.294 (danielk1 24-Mar-07): ** The returned value is either PAGER_LOCKINGMODE_NORMAL or -1.294 (danielk1 24-Mar-07): ** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) -1.294 (danielk1 24-Mar-07): ** locking-mode. -1.294 (danielk1 24-Mar-07): */ -1.294 (danielk1 24-Mar-07): int sqlite3PagerLockingMode(Pager *pPager, int eMode){ -1.309 (drh 30-Mar-07): assert( eMode==PAGER_LOCKINGMODE_QUERY -1.309 (drh 30-Mar-07): || eMode==PAGER_LOCKINGMODE_NORMAL -1.309 (drh 30-Mar-07): || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); -1.309 (drh 30-Mar-07): assert( PAGER_LOCKINGMODE_QUERY<0 ); -1.309 (drh 30-Mar-07): assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); -1.309 (drh 30-Mar-07): if( eMode>=0 && !pPager->tempFile ){ -1.294 (danielk1 24-Mar-07): pPager->exclusiveMode = eMode; -1.294 (danielk1 24-Mar-07): } -1.294 (danielk1 24-Mar-07): return (int)pPager->exclusiveMode; -1.294 (danielk1 24-Mar-07): } -1.294 (danielk1 24-Mar-07): -1.132 (dougcurr 18-Jun-04): #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) -1.114 (drh 09-Jun-04): /* -1.114 (drh 09-Jun-04): ** Return the current state of the file lock for the given pager. -1.114 (drh 09-Jun-04): ** The return value is one of NO_LOCK, SHARED_LOCK, RESERVED_LOCK, -1.114 (drh 09-Jun-04): ** PENDING_LOCK, or EXCLUSIVE_LOCK. -1.114 (drh 09-Jun-04): */ -1.292 (danielk1 19-Mar-07): int sqlite3PagerLockstate(Pager *pPager){ -1.222 (drh 30-Nov-05): return sqlite3OsLockState(pPager->fd); -1.114 (drh 09-Jun-04): } -1.114 (drh 09-Jun-04): #endif -1.12 (drh 28-Jun-01): -1.185 (danielk1 21-Jan-05): #ifdef SQLITE_DEBUG -1.12 (drh 28-Jun-01): /* -1.12 (drh 28-Jun-01): ** Print a listing of all referenced pages and their ref count. -1.12 (drh 28-Jun-01): */ -1.292 (danielk1 19-Mar-07): void sqlite3PagerRefdump(Pager *pPager){ -1.12 (drh 28-Jun-01): PgHdr *pPg; -1.12 (drh 28-Jun-01): for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ -1.12 (drh 28-Jun-01): if( pPg->nRef<=0 ) continue; -1.163 (drh 08-Sep-04): sqlite3DebugPrintf("PAGE %3d addr=%p nRef=%d\n", -1.163 (drh 08-Sep-04): pPg->pgno, PGHDR_TO_DATA(pPg), pPg->nRef); -1.12 (drh 28-Jun-01): } -1.12 (drh 28-Jun-01): } -1.12 (drh 28-Jun-01): #endif -1.202 (drh 28-Apr-05): -1.202 (drh 28-Apr-05): #endif /* SQLITE_OMIT_DISKIO */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/expr.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/expr.c --- sqlite3-3.4.2/src/expr.c 2007-08-07 18:13:04.000000000 +0100 +++ sqlite3-3.6.16/src/expr.c 2009-06-25 12:45:57.000000000 +0100 @@ -12,10 +12,9 @@ ** This file contains routines used for analyzing expressions and ** for generating VDBE code that evaluates expressions in SQLite. ** -** $Id: expr.c,v 1.303 2007/08/07 17:13:04 drh Exp $ +** $Id: expr.c,v 1.446 2009/06/19 18:32:55 drh Exp $ */ #include "sqliteInt.h" -#include /* ** Return the 'affinity' of the expression pExpr if any. @@ -36,13 +35,25 @@ char sqlite3ExprAffinity(Expr *pExpr){ int op = pExpr->op; if( op==TK_SELECT ){ - return sqlite3ExprAffinity(pExpr->pSelect->pEList->a[0].pExpr); + assert( pExpr->flags&EP_xIsSelect ); + return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); } #ifndef SQLITE_OMIT_CAST if( op==TK_CAST ){ - return sqlite3AffinityType(&pExpr->token); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + return sqlite3AffinityType(pExpr->u.zToken); } #endif + if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER) + && pExpr->pTab!=0 + ){ + /* op==TK_REGISTER && pExpr->pTab!=0 happens when pExpr was originally + ** a TK_COLUMN but was previously evaluated and cached in a register */ + int j = pExpr->iColumn; + if( j<0 ) return SQLITE_AFF_INTEGER; + assert( pExpr->pTab && jpTab->nCol ); + return pExpr->pTab->aCol[j].affinity; + } return pExpr->affinity; } @@ -53,14 +64,19 @@ ** flag. An explicit collating sequence will override implicit ** collating sequences. */ -Expr *sqlite3ExprSetColl(Parse *pParse, Expr *pExpr, Token *pName){ +Expr *sqlite3ExprSetColl(Parse *pParse, Expr *pExpr, Token *pCollName){ + char *zColl = 0; /* Dequoted name of collation sequence */ CollSeq *pColl; - if( pExpr==0 ) return 0; - pColl = sqlite3LocateCollSeq(pParse, (char*)pName->z, pName->n); - if( pColl ){ - pExpr->pColl = pColl; - pExpr->flags |= EP_ExpCollate; + sqlite3 *db = pParse->db; + zColl = sqlite3NameFromToken(db, pCollName); + if( pExpr && zColl ){ + pColl = sqlite3LocateCollSeq(pParse, zColl); + if( pColl ){ + pExpr->pColl = pColl; + pExpr->flags |= EP_ExpCollate; + } } + sqlite3DbFree(db, zColl); return pExpr; } @@ -70,13 +86,29 @@ */ CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){ CollSeq *pColl = 0; - if( pExpr ){ + Expr *p = pExpr; + while( ALWAYS(p) ){ int op; - pColl = pExpr->pColl; - op = pExpr->op; - if( (op==TK_CAST || op==TK_UPLUS) && !pColl ){ - return sqlite3ExprCollSeq(pParse, pExpr->pLeft); + pColl = p->pColl; + if( pColl ) break; + op = p->op; + if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER) && p->pTab!=0 ){ + /* op==TK_REGISTER && p->pTab!=0 happens when pExpr was originally + ** a TK_COLUMN but was previously evaluated and cached in a register */ + const char *zColl; + int j = p->iColumn; + if( j>=0 ){ + sqlite3 *db = pParse->db; + zColl = p->pTab->aCol[j].zColl; + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + pExpr->pColl = pColl; + } + break; + } + if( op!=TK_CAST && op!=TK_UPLUS ){ + break; } + p = p->pLeft; } if( sqlite3CheckCollSeq(pParse, pColl) ){ pColl = 0; @@ -125,11 +157,9 @@ aff = sqlite3ExprAffinity(pExpr->pLeft); if( pExpr->pRight ){ aff = sqlite3CompareAffinity(pExpr->pRight, aff); - } - else if( pExpr->pSelect ){ - aff = sqlite3CompareAffinity(pExpr->pSelect->pEList->a[0].pExpr, aff); - } - else if( !aff ){ + }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); + }else if( !aff ){ aff = SQLITE_AFF_NONE; } return aff; @@ -154,15 +184,13 @@ } /* -** Return the P1 value that should be used for a binary comparison +** Return the P5 value that should be used for a binary comparison ** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2. -** If jumpIfNull is true, then set the low byte of the returned -** P1 value to tell the opcode to jump if either expression -** evaluates to NULL. -*/ -static int binaryCompareP1(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ - char aff = sqlite3ExprAffinity(pExpr2); - return ((int)sqlite3CompareAffinity(pExpr1, aff))+(jumpIfNull?0x100:0); +*/ +static u8 binaryCompareP5(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ + u8 aff = (char)sqlite3ExprAffinity(pExpr2); + aff = (u8)sqlite3CompareAffinity(pExpr1, aff) | (u8)jumpIfNull; + return aff; } /* @@ -200,6 +228,30 @@ } /* +** Generate the operands for a comparison operation. Before +** generating the code for each operand, set the EP_AnyAff +** flag on the expression so that it will be able to used a +** cached column value that has previously undergone an +** affinity change. +*/ +static void codeCompareOperands( + Parse *pParse, /* Parsing and code generating context */ + Expr *pLeft, /* The left operand */ + int *pRegLeft, /* Register where left operand is stored */ + int *pFreeLeft, /* Free this register when done */ + Expr *pRight, /* The right operand */ + int *pRegRight, /* Register where right operand is stored */ + int *pFreeRight /* Write temp register for right operand there */ +){ + while( pLeft->op==TK_UPLUS ) pLeft = pLeft->pLeft; + pLeft->flags |= EP_AnyAff; + *pRegLeft = sqlite3ExprCodeTemp(pParse, pLeft, pFreeLeft); + while( pRight->op==TK_UPLUS ) pRight = pRight->pLeft; + pRight->flags |= EP_AnyAff; + *pRegRight = sqlite3ExprCodeTemp(pParse, pRight, pFreeRight); +} + +/* ** Generate code for a comparison operator. */ static int codeCompare( @@ -207,97 +259,252 @@ Expr *pLeft, /* The left operand */ Expr *pRight, /* The right operand */ int opcode, /* The comparison opcode */ + int in1, int in2, /* Register holding operands */ int dest, /* Jump here if true. */ int jumpIfNull /* If true, jump if either operand is NULL */ ){ - int p1 = binaryCompareP1(pLeft, pRight, jumpIfNull); - CollSeq *p3 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); - return sqlite3VdbeOp3(pParse->pVdbe, opcode, p1, dest, (void*)p3, P3_COLLSEQ); + int p5; + int addr; + CollSeq *p4; + + p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); + p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); + addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, + (void*)p4, P4_COLLSEQ); + sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); + if( (p5 & SQLITE_AFF_MASK)!=SQLITE_AFF_NONE ){ + sqlite3ExprCacheAffinityChange(pParse, in1, 1); + sqlite3ExprCacheAffinityChange(pParse, in2, 1); + } + return addr; +} + +#if SQLITE_MAX_EXPR_DEPTH>0 +/* +** Check that argument nHeight is less than or equal to the maximum +** expression depth allowed. If it is not, leave an error message in +** pParse. +*/ +int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){ + int rc = SQLITE_OK; + int mxHeight = pParse->db->aLimit[SQLITE_LIMIT_EXPR_DEPTH]; + if( nHeight>mxHeight ){ + sqlite3ErrorMsg(pParse, + "Expression tree is too large (maximum depth %d)", mxHeight + ); + rc = SQLITE_ERROR; + } + return rc; +} + +/* The following three functions, heightOfExpr(), heightOfExprList() +** and heightOfSelect(), are used to determine the maximum height +** of any expression tree referenced by the structure passed as the +** first argument. +** +** If this maximum height is greater than the current value pointed +** to by pnHeight, the second parameter, then set *pnHeight to that +** value. +*/ +static void heightOfExpr(Expr *p, int *pnHeight){ + if( p ){ + if( p->nHeight>*pnHeight ){ + *pnHeight = p->nHeight; + } + } +} +static void heightOfExprList(ExprList *p, int *pnHeight){ + if( p ){ + int i; + for(i=0; inExpr; i++){ + heightOfExpr(p->a[i].pExpr, pnHeight); + } + } +} +static void heightOfSelect(Select *p, int *pnHeight){ + if( p ){ + heightOfExpr(p->pWhere, pnHeight); + heightOfExpr(p->pHaving, pnHeight); + heightOfExpr(p->pLimit, pnHeight); + heightOfExpr(p->pOffset, pnHeight); + heightOfExprList(p->pEList, pnHeight); + heightOfExprList(p->pGroupBy, pnHeight); + heightOfExprList(p->pOrderBy, pnHeight); + heightOfSelect(p->pPrior, pnHeight); + } +} + +/* +** Set the Expr.nHeight variable in the structure passed as an +** argument. An expression with no children, Expr.pList or +** Expr.pSelect member has a height of 1. Any other expression +** has a height equal to the maximum height of any other +** referenced Expr plus one. +*/ +static void exprSetHeight(Expr *p){ + int nHeight = 0; + heightOfExpr(p->pLeft, &nHeight); + heightOfExpr(p->pRight, &nHeight); + if( ExprHasProperty(p, EP_xIsSelect) ){ + heightOfSelect(p->x.pSelect, &nHeight); + }else{ + heightOfExprList(p->x.pList, &nHeight); + } + p->nHeight = nHeight + 1; +} + +/* +** Set the Expr.nHeight variable using the exprSetHeight() function. If +** the height is greater than the maximum allowed expression depth, +** leave an error in pParse. +*/ +void sqlite3ExprSetHeight(Parse *pParse, Expr *p){ + exprSetHeight(p); + sqlite3ExprCheckHeight(pParse, p->nHeight); } /* +** Return the maximum height of any expression tree referenced +** by the select statement passed as an argument. +*/ +int sqlite3SelectExprHeight(Select *p){ + int nHeight = 0; + heightOfSelect(p, &nHeight); + return nHeight; +} +#else + #define exprSetHeight(y) +#endif /* SQLITE_MAX_EXPR_DEPTH>0 */ + +/* +** This routine is the core allocator for Expr nodes. +** ** Construct a new expression node and return a pointer to it. Memory -** for this node is obtained from sqliteMalloc(). The calling function +** for this node and for the pToken argument is a single allocation +** obtained from sqlite3DbMalloc(). The calling function ** is responsible for making sure the node eventually gets freed. -*/ -Expr *sqlite3Expr(int op, Expr *pLeft, Expr *pRight, const Token *pToken){ +** +** If dequote is true, then the token (if it exists) is dequoted. +** If dequote is false, no dequoting is performance. The deQuote +** parameter is ignored if pToken is NULL or if the token does not +** appear to be quoted. If the quotes were of the form "..." (double-quotes) +** then the EP_DblQuoted flag is set on the expression node. +** +** Special case: If op==TK_INTEGER and pToken points to a string that +** can be translated into a 32-bit integer, then the token is not +** stored in u.zToken. Instead, the integer values is written +** into u.iValue and the EP_IntValue flag is set. No extra storage +** is allocated to hold the integer text and the dequote flag is ignored. +*/ +Expr *sqlite3ExprAlloc( + sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ + int op, /* Expression opcode */ + const Token *pToken, /* Token argument. Might be NULL */ + int dequote /* True to dequote */ +){ Expr *pNew; - pNew = sqliteMalloc( sizeof(Expr) ); - if( pNew==0 ){ - /* When malloc fails, delete pLeft and pRight. Expressions passed to - ** this function must always be allocated with sqlite3Expr() for this - ** reason. - */ - sqlite3ExprDelete(pLeft); - sqlite3ExprDelete(pRight); - return 0; - } - pNew->op = op; - pNew->pLeft = pLeft; - pNew->pRight = pRight; - pNew->iAgg = -1; + int nExtra = 0; + int iValue = 0; + if( pToken ){ - assert( pToken->dyn==0 ); - pNew->span = pNew->token = *pToken; - }else if( pLeft ){ - if( pRight ){ - sqlite3ExprSpan(pNew, &pLeft->span, &pRight->span); - if( pRight->flags & EP_ExpCollate ){ - pNew->flags |= EP_ExpCollate; - pNew->pColl = pRight->pColl; - } + if( op!=TK_INTEGER || pToken->z==0 + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + nExtra = pToken->n+1; } - if( pLeft->flags & EP_ExpCollate ){ - pNew->flags |= EP_ExpCollate; - pNew->pColl = pLeft->pColl; + } + pNew = sqlite3DbMallocZero(db, sizeof(Expr)+nExtra); + if( pNew ){ + pNew->op = (u8)op; + pNew->iAgg = -1; + if( pToken ){ + if( nExtra==0 ){ + pNew->flags |= EP_IntValue; + pNew->u.iValue = iValue; + }else{ + int c; + pNew->u.zToken = (char*)&pNew[1]; + memcpy(pNew->u.zToken, pToken->z, pToken->n); + pNew->u.zToken[pToken->n] = 0; + if( dequote && nExtra>=3 + && ((c = pToken->z[0])=='\'' || c=='"' || c=='[' || c=='`') ){ + sqlite3Dequote(pNew->u.zToken); + if( c=='"' ) pNew->flags |= EP_DblQuoted; + } + } } +#if SQLITE_MAX_EXPR_DEPTH>0 + pNew->nHeight = 1; +#endif } - - sqlite3ExprSetHeight(pNew); return pNew; } /* -** Works like sqlite3Expr() but frees its pLeft and pRight arguments -** if it fails due to a malloc problem. +** Allocate a new expression node from a zero-terminated token that has +** already been dequoted. */ -Expr *sqlite3ExprOrFree(int op, Expr *pLeft, Expr *pRight, const Token *pToken){ - Expr *pNew = sqlite3Expr(op, pLeft, pRight, pToken); - if( pNew==0 ){ - sqlite3ExprDelete(pLeft); - sqlite3ExprDelete(pRight); +Expr *sqlite3Expr( + sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ + int op, /* Expression opcode */ + const char *zToken /* Token argument. Might be NULL */ +){ + Token x; + x.z = zToken; + x.n = zToken ? sqlite3Strlen30(zToken) : 0; + return sqlite3ExprAlloc(db, op, &x, 0); +} + +/* +** Attach subtrees pLeft and pRight to the Expr node pRoot. +** +** If pRoot==NULL that means that a memory allocation error has occurred. +** In that case, delete the subtrees pLeft and pRight. +*/ +void sqlite3ExprAttachSubtrees( + sqlite3 *db, + Expr *pRoot, + Expr *pLeft, + Expr *pRight +){ + if( pRoot==0 ){ + assert( db->mallocFailed ); + sqlite3ExprDelete(db, pLeft); + sqlite3ExprDelete(db, pRight); + }else{ + if( pRight ){ + pRoot->pRight = pRight; + if( pRight->flags & EP_ExpCollate ){ + pRoot->flags |= EP_ExpCollate; + pRoot->pColl = pRight->pColl; + } + } + if( pLeft ){ + pRoot->pLeft = pLeft; + if( pLeft->flags & EP_ExpCollate ){ + pRoot->flags |= EP_ExpCollate; + pRoot->pColl = pLeft->pColl; + } + } + exprSetHeight(pRoot); } - return pNew; } /* -** When doing a nested parse, you can include terms in an expression -** that look like this: #0 #1 #2 ... These terms refer to elements -** on the stack. "#0" means the top of the stack. -** "#1" means the next down on the stack. And so forth. -** -** This routine is called by the parser to deal with on of those terms. -** It immediately generates code to store the value in a memory location. -** The returns an expression that will code to extract the value from -** that memory location as needed. +** Allocate a Expr node which joins as many as two subtrees. +** +** One or both of the subtrees can be NULL. Return a pointer to the new +** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed, +** free the subtrees and return NULL. */ -Expr *sqlite3RegisterExpr(Parse *pParse, Token *pToken){ - Vdbe *v = pParse->pVdbe; - Expr *p; - int depth; - if( pParse->nested==0 ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", pToken); - return sqlite3Expr(TK_NULL, 0, 0, 0); - } - if( v==0 ) return 0; - p = sqlite3Expr(TK_REGISTER, 0, 0, pToken); - if( p==0 ){ - return 0; /* Malloc failed */ - } - depth = atoi((char*)&pToken->z[1]); - p->iTable = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_Dup, depth, 0); - sqlite3VdbeAddOp(v, OP_MemStore, p->iTable, 1); +Expr *sqlite3PExpr( + Parse *pParse, /* Parsing context */ + int op, /* Expression opcode */ + Expr *pLeft, /* Left operand */ + Expr *pRight, /* Right operand */ + const Token *pToken /* Argument token */ +){ + Expr *p = sqlite3ExprAlloc(pParse->db, op, pToken, 1); + sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); return p; } @@ -305,31 +512,15 @@ ** Join two expressions using an AND operator. If either expression is ** NULL, then just return the other expression. */ -Expr *sqlite3ExprAnd(Expr *pLeft, Expr *pRight){ +Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){ if( pLeft==0 ){ return pRight; }else if( pRight==0 ){ return pLeft; }else{ - return sqlite3Expr(TK_AND, pLeft, pRight, 0); - } -} - -/* -** Set the Expr.span field of the given expression to span all -** text between the two given tokens. -*/ -void sqlite3ExprSpan(Expr *pExpr, Token *pLeft, Token *pRight){ - assert( pRight!=0 ); - assert( pLeft!=0 ); - if( !sqlite3MallocFailed() && pRight->z && pLeft->z ){ - assert( pLeft->dyn==0 || pLeft->z[pLeft->n]==0 ); - if( pLeft->dyn==0 && pRight->dyn==0 ){ - pExpr->span.z = pLeft->z; - pExpr->span.n = pRight->n + (pRight->z - pLeft->z); - }else{ - pExpr->span.z = 0; - } + Expr *pNew = sqlite3ExprAlloc(db, TK_AND, 0, 0); + sqlite3ExprAttachSubtrees(db, pNew, pLeft, pRight); + return pNew; } } @@ -337,21 +528,18 @@ ** Construct a new expression node for a function with multiple ** arguments. */ -Expr *sqlite3ExprFunction(ExprList *pList, Token *pToken){ +Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){ Expr *pNew; + sqlite3 *db = pParse->db; assert( pToken ); - pNew = sqliteMalloc( sizeof(Expr) ); + pNew = sqlite3ExprAlloc(db, TK_FUNCTION, pToken, 1); if( pNew==0 ){ - sqlite3ExprListDelete(pList); /* Avoid leaking memory when malloc fails */ + sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */ return 0; } - pNew->op = TK_FUNCTION; - pNew->pList = pList; - assert( pToken->dyn==0 ); - pNew->token = *pToken; - pNew->span = pNew->token; - - sqlite3ExprSetHeight(pNew); + pNew->x.pList = pList; + assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + sqlite3ExprSetHeight(pParse, pNew); return pNew; } @@ -366,45 +554,52 @@ ** sure "nnn" is not too be to avoid a denial of service attack when ** the SQL statement comes from an external source. ** -** Wildcards of the form ":aaa" or "$aaa" are assigned the same number +** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number ** as the previous instance of the same wildcard. Or if this is the first ** instance of the wildcard, the next sequenial variable number is ** assigned. */ void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){ - Token *pToken; + sqlite3 *db = pParse->db; + const char *z; + if( pExpr==0 ) return; - pToken = &pExpr->token; - assert( pToken->n>=1 ); - assert( pToken->z!=0 ); - assert( pToken->z[0]!=0 ); - if( pToken->n==1 ){ + assert( !ExprHasAnyProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) ); + z = pExpr->u.zToken; + assert( z!=0 ); + assert( z[0]!=0 ); + if( z[1]==0 ){ /* Wildcard of the form "?". Assign the next variable number */ + assert( z[0]=='?' ); pExpr->iTable = ++pParse->nVar; - }else if( pToken->z[0]=='?' ){ + }else if( z[0]=='?' ){ /* Wildcard of the form "?nnn". Convert "nnn" to an integer and ** use it as the variable number */ int i; - pExpr->iTable = i = atoi((char*)&pToken->z[1]); - if( i<1 || i>SQLITE_MAX_VARIABLE_NUMBER ){ + pExpr->iTable = i = atoi((char*)&z[1]); + testcase( i==0 ); + testcase( i==1 ); + testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 ); + testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ); + if( i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", - SQLITE_MAX_VARIABLE_NUMBER); + db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); } if( i>pParse->nVar ){ pParse->nVar = i; } }else{ - /* Wildcards of the form ":aaa" or "$aaa". Reuse the same variable + /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable ** number as the prior appearance of the same name, or if the name ** has never appeared before, reuse the same variable number */ - int i, n; - n = pToken->n; + int i; + u32 n; + n = sqlite3Strlen30(z); for(i=0; inVarExpr; i++){ - Expr *pE; - if( (pE = pParse->apVarExpr[i])!=0 - && pE->token.n==n - && memcmp(pE->token.z, pToken->z, n)==0 ){ + Expr *pE = pParse->apVarExpr[i]; + assert( pE!=0 ); + if( memcmp(pE->u.zToken, z, n)==0 && pE->u.zToken[n]==0 ){ pExpr->iTable = pE->iTable; break; } @@ -413,49 +608,247 @@ pExpr->iTable = ++pParse->nVar; if( pParse->nVarExpr>=pParse->nVarExprAlloc-1 ){ pParse->nVarExprAlloc += pParse->nVarExprAlloc + 10; - pParse->apVarExpr = sqliteReallocOrFree(pParse->apVarExpr, - pParse->nVarExprAlloc*sizeof(pParse->apVarExpr[0]) ); + pParse->apVarExpr = + sqlite3DbReallocOrFree( + db, + pParse->apVarExpr, + pParse->nVarExprAlloc*sizeof(pParse->apVarExpr[0]) + ); } - if( !sqlite3MallocFailed() ){ + if( !db->mallocFailed ){ assert( pParse->apVarExpr!=0 ); pParse->apVarExpr[pParse->nVarExpr++] = pExpr; } } } - if( !pParse->nErr && pParse->nVar>SQLITE_MAX_VARIABLE_NUMBER ){ + if( !pParse->nErr && pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "too many SQL variables"); } } /* +** Clear an expression structure without deleting the structure itself. +** Substructure is deleted. +*/ +void sqlite3ExprClear(sqlite3 *db, Expr *p){ + assert( p!=0 ); + if( !ExprHasAnyProperty(p, EP_TokenOnly) ){ + sqlite3ExprDelete(db, p->pLeft); + sqlite3ExprDelete(db, p->pRight); + if( !ExprHasProperty(p, EP_Reduced) && (p->flags2 & EP2_MallocedToken)!=0 ){ + sqlite3DbFree(db, p->u.zToken); + } + if( ExprHasProperty(p, EP_xIsSelect) ){ + sqlite3SelectDelete(db, p->x.pSelect); + }else{ + sqlite3ExprListDelete(db, p->x.pList); + } + } +} + +/* ** Recursively delete an expression tree. */ -void sqlite3ExprDelete(Expr *p){ +void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p==0 ) return; - if( p->span.dyn ) sqliteFree((char*)p->span.z); - if( p->token.dyn ) sqliteFree((char*)p->token.z); - sqlite3ExprDelete(p->pLeft); - sqlite3ExprDelete(p->pRight); - sqlite3ExprListDelete(p->pList); - sqlite3SelectDelete(p->pSelect); - sqliteFree(p); + sqlite3ExprClear(db, p); + if( !ExprHasProperty(p, EP_Static) ){ + sqlite3DbFree(db, p); + } } /* -** The Expr.token field might be a string literal that is quoted. -** If so, remove the quotation marks. +** Return the number of bytes allocated for the expression structure +** passed as the first argument. This is always one of EXPR_FULLSIZE, +** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE. */ -void sqlite3DequoteExpr(Expr *p){ - if( ExprHasAnyProperty(p, EP_Dequoted) ){ - return; +static int exprStructSize(Expr *p){ + if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE; + if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE; + return EXPR_FULLSIZE; +} + +/* +** The dupedExpr*Size() routines each return the number of bytes required +** to store a copy of an expression or expression tree. They differ in +** how much of the tree is measured. +** +** dupedExprStructSize() Size of only the Expr structure +** dupedExprNodeSize() Size of Expr + space for token +** dupedExprSize() Expr + token + subtree components +** +*************************************************************************** +** +** The dupedExprStructSize() function returns two values OR-ed together: +** (1) the space required for a copy of the Expr structure only and +** (2) the EP_xxx flags that indicate what the structure size should be. +** The return values is always one of: +** +** EXPR_FULLSIZE +** EXPR_REDUCEDSIZE | EP_Reduced +** EXPR_TOKENONLYSIZE | EP_TokenOnly +** +** The size of the structure can be found by masking the return value +** of this routine with 0xfff. The flags can be found by masking the +** return value with EP_Reduced|EP_TokenOnly. +** +** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size +** (unreduced) Expr objects as they or originally constructed by the parser. +** During expression analysis, extra information is computed and moved into +** later parts of teh Expr object and that extra information might get chopped +** off if the expression is reduced. Note also that it does not work to +** make a EXPRDUP_REDUCE copy of a reduced expression. It is only legal +** to reduce a pristine expression tree from the parser. The implementation +** of dupedExprStructSize() contain multiple assert() statements that attempt +** to enforce this constraint. +*/ +static int dupedExprStructSize(Expr *p, int flags){ + int nSize; + assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ + if( 0==(flags&EXPRDUP_REDUCE) ){ + nSize = EXPR_FULLSIZE; + }else{ + assert( !ExprHasAnyProperty(p, EP_TokenOnly|EP_Reduced) ); + assert( !ExprHasProperty(p, EP_FromJoin) ); + assert( (p->flags2 & EP2_MallocedToken)==0 ); + assert( (p->flags2 & EP2_Irreducible)==0 ); + if( p->pLeft || p->pRight || p->pColl || p->x.pList ){ + nSize = EXPR_REDUCEDSIZE | EP_Reduced; + }else{ + nSize = EXPR_TOKENONLYSIZE | EP_TokenOnly; + } + } + return nSize; +} + +/* +** This function returns the space in bytes required to store the copy +** of the Expr structure and a copy of the Expr.u.zToken string (if that +** string is defined.) +*/ +static int dupedExprNodeSize(Expr *p, int flags){ + int nByte = dupedExprStructSize(p, flags) & 0xfff; + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nByte += sqlite3Strlen30(p->u.zToken)+1; } - ExprSetProperty(p, EP_Dequoted); - if( p->token.dyn==0 ){ - sqlite3TokenCopy(&p->token, &p->token); + return ROUND8(nByte); +} + +/* +** Return the number of bytes required to create a duplicate of the +** expression passed as the first argument. The second argument is a +** mask containing EXPRDUP_XXX flags. +** +** The value returned includes space to create a copy of the Expr struct +** itself and the buffer referred to by Expr.u.zToken, if any. +** +** If the EXPRDUP_REDUCE flag is set, then the return value includes +** space to duplicate all Expr nodes in the tree formed by Expr.pLeft +** and Expr.pRight variables (but not for any structures pointed to or +** descended from the Expr.x.pList or Expr.x.pSelect variables). +*/ +static int dupedExprSize(Expr *p, int flags){ + int nByte = 0; + if( p ){ + nByte = dupedExprNodeSize(p, flags); + if( flags&EXPRDUP_REDUCE ){ + nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); + } } - sqlite3Dequote((char*)p->token.z); + return nByte; } +/* +** This function is similar to sqlite3ExprDup(), except that if pzBuffer +** is not NULL then *pzBuffer is assumed to point to a buffer large enough +** to store the copy of expression p, the copies of p->u.zToken +** (if applicable), and the copies of the p->pLeft and p->pRight expressions, +** if any. Before returning, *pzBuffer is set to the first byte passed the +** portion of the buffer copied into by this function. +*/ +static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){ + Expr *pNew = 0; /* Value to return */ + if( p ){ + const int isReduced = (flags&EXPRDUP_REDUCE); + u8 *zAlloc; + u32 staticFlag = 0; + + assert( pzBuffer==0 || isReduced ); + + /* Figure out where to write the new Expr structure. */ + if( pzBuffer ){ + zAlloc = *pzBuffer; + staticFlag = EP_Static; + }else{ + zAlloc = sqlite3DbMallocRaw(db, dupedExprSize(p, flags)); + } + pNew = (Expr *)zAlloc; + + if( pNew ){ + /* Set nNewSize to the size allocated for the structure pointed to + ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or + ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed + ** by the copy of the p->u.zToken string (if any). + */ + const unsigned nStructSize = dupedExprStructSize(p, flags); + const int nNewSize = nStructSize & 0xfff; + int nToken; + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30(p->u.zToken) + 1; + }else{ + nToken = 0; + } + if( isReduced ){ + assert( ExprHasProperty(p, EP_Reduced)==0 ); + memcpy(zAlloc, p, nNewSize); + }else{ + int nSize = exprStructSize(p); + memcpy(zAlloc, p, nSize); + memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize); + } + + /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */ + pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static); + pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); + pNew->flags |= staticFlag; + + /* Copy the p->u.zToken string, if any. */ + if( nToken ){ + char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; + memcpy(zToken, p->u.zToken, nToken); + } + + if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){ + /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ + if( ExprHasProperty(p, EP_xIsSelect) ){ + pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, isReduced); + }else{ + pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, isReduced); + } + } + + /* Fill in pNew->pLeft and pNew->pRight. */ + if( ExprHasAnyProperty(pNew, EP_Reduced|EP_TokenOnly) ){ + zAlloc += dupedExprNodeSize(p, flags); + if( ExprHasProperty(pNew, EP_Reduced) ){ + pNew->pLeft = exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc); + pNew->pRight = exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc); + } + if( pzBuffer ){ + *pzBuffer = zAlloc; + } + }else{ + pNew->flags2 = 0; + if( !ExprHasAnyProperty(p, EP_TokenOnly) ){ + pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); + pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); + } + } + + } + } + return pNew; +} /* ** The following group of routines make deep copies of expressions, @@ -468,66 +861,40 @@ ** by subsequent calls to sqlite*ListAppend() routines. ** ** Any tables that the SrcList might point to are not duplicated. +** +** The flags parameter contains a combination of the EXPRDUP_XXX flags. +** If the EXPRDUP_REDUCE flag is set, then the structure returned is a +** truncated version of the usual Expr structure that will be stored as +** part of the in-memory representation of the database schema. */ -Expr *sqlite3ExprDup(Expr *p){ - Expr *pNew; - if( p==0 ) return 0; - pNew = sqliteMallocRaw( sizeof(*p) ); - if( pNew==0 ) return 0; - memcpy(pNew, p, sizeof(*pNew)); - if( p->token.z!=0 ){ - pNew->token.z = (u8*)sqliteStrNDup((char*)p->token.z, p->token.n); - pNew->token.dyn = 1; - }else{ - assert( pNew->token.z==0 ); - } - pNew->span.z = 0; - pNew->pLeft = sqlite3ExprDup(p->pLeft); - pNew->pRight = sqlite3ExprDup(p->pRight); - pNew->pList = sqlite3ExprListDup(p->pList); - pNew->pSelect = sqlite3SelectDup(p->pSelect); - return pNew; -} -void sqlite3TokenCopy(Token *pTo, Token *pFrom){ - if( pTo->dyn ) sqliteFree((char*)pTo->z); - if( pFrom->z ){ - pTo->n = pFrom->n; - pTo->z = (u8*)sqliteStrNDup((char*)pFrom->z, pFrom->n); - pTo->dyn = 1; - }else{ - pTo->z = 0; - } +Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ + return exprDup(db, p, flags, 0); } -ExprList *sqlite3ExprListDup(ExprList *p){ +ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ ExprList *pNew; struct ExprList_item *pItem, *pOldItem; int i; if( p==0 ) return 0; - pNew = sqliteMalloc( sizeof(*pNew) ); + pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); if( pNew==0 ) return 0; + pNew->iECursor = 0; pNew->nExpr = pNew->nAlloc = p->nExpr; - pNew->a = pItem = sqliteMalloc( p->nExpr*sizeof(p->a[0]) ); + pNew->a = pItem = sqlite3DbMallocRaw(db, p->nExpr*sizeof(p->a[0]) ); if( pItem==0 ){ - sqliteFree(pNew); + sqlite3DbFree(db, pNew); return 0; } pOldItem = p->a; for(i=0; inExpr; i++, pItem++, pOldItem++){ - Expr *pNewExpr, *pOldExpr; - pItem->pExpr = pNewExpr = sqlite3ExprDup(pOldExpr = pOldItem->pExpr); - if( pOldExpr->span.z!=0 && pNewExpr ){ - /* Always make a copy of the span for top-level expressions in the - ** expression list. The logic in SELECT processing that determines - ** the names of columns in the result set needs this information */ - sqlite3TokenCopy(&pNewExpr->span, &pOldExpr->span); - } - assert( pNewExpr==0 || pNewExpr->span.z!=0 - || pOldExpr->span.z==0 - || sqlite3MallocFailed() ); - pItem->zName = sqliteStrDup(pOldItem->zName); + Expr *pNewExpr; + Expr *pOldExpr = pOldItem->pExpr; + pItem->pExpr = pNewExpr = sqlite3ExprDup(db, pOldExpr, flags); + pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan); pItem->sortOrder = pOldItem->sortOrder; - pItem->isAgg = pOldItem->isAgg; pItem->done = 0; + pItem->iCol = pOldItem->iCol; + pItem->iAlias = pOldItem->iAlias; } return pNew; } @@ -540,78 +907,77 @@ */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ || !defined(SQLITE_OMIT_SUBQUERY) -SrcList *sqlite3SrcListDup(SrcList *p){ +SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ SrcList *pNew; int i; int nByte; if( p==0 ) return 0; nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); - pNew = sqliteMallocRaw( nByte ); + pNew = sqlite3DbMallocRaw(db, nByte ); if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ struct SrcList_item *pNewItem = &pNew->a[i]; struct SrcList_item *pOldItem = &p->a[i]; Table *pTab; - pNewItem->zDatabase = sqliteStrDup(pOldItem->zDatabase); - pNewItem->zName = sqliteStrDup(pOldItem->zName); - pNewItem->zAlias = sqliteStrDup(pOldItem->zAlias); + pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); pNewItem->jointype = pOldItem->jointype; pNewItem->iCursor = pOldItem->iCursor; pNewItem->isPopulated = pOldItem->isPopulated; + pNewItem->zIndex = sqlite3DbStrDup(db, pOldItem->zIndex); + pNewItem->notIndexed = pOldItem->notIndexed; + pNewItem->pIndex = pOldItem->pIndex; pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ pTab->nRef++; } - pNewItem->pSelect = sqlite3SelectDup(pOldItem->pSelect); - pNewItem->pOn = sqlite3ExprDup(pOldItem->pOn); - pNewItem->pUsing = sqlite3IdListDup(pOldItem->pUsing); + pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); + pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags); + pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing); pNewItem->colUsed = pOldItem->colUsed; } return pNew; } -IdList *sqlite3IdListDup(IdList *p){ +IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ IdList *pNew; int i; if( p==0 ) return 0; - pNew = sqliteMallocRaw( sizeof(*pNew) ); + pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); if( pNew==0 ) return 0; pNew->nId = pNew->nAlloc = p->nId; - pNew->a = sqliteMallocRaw( p->nId*sizeof(p->a[0]) ); + pNew->a = sqlite3DbMallocRaw(db, p->nId*sizeof(p->a[0]) ); if( pNew->a==0 ){ - sqliteFree(pNew); + sqlite3DbFree(db, pNew); return 0; } for(i=0; inId; i++){ struct IdList_item *pNewItem = &pNew->a[i]; struct IdList_item *pOldItem = &p->a[i]; - pNewItem->zName = sqliteStrDup(pOldItem->zName); + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pNewItem->idx = pOldItem->idx; } return pNew; } -Select *sqlite3SelectDup(Select *p){ +Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ Select *pNew; if( p==0 ) return 0; - pNew = sqliteMallocRaw( sizeof(*p) ); + pNew = sqlite3DbMallocRaw(db, sizeof(*p) ); if( pNew==0 ) return 0; - pNew->isDistinct = p->isDistinct; - pNew->pEList = sqlite3ExprListDup(p->pEList); - pNew->pSrc = sqlite3SrcListDup(p->pSrc); - pNew->pWhere = sqlite3ExprDup(p->pWhere); - pNew->pGroupBy = sqlite3ExprListDup(p->pGroupBy); - pNew->pHaving = sqlite3ExprDup(p->pHaving); - pNew->pOrderBy = sqlite3ExprListDup(p->pOrderBy); + pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); + pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); + pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); + pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); + pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); + pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); pNew->op = p->op; - pNew->pPrior = sqlite3SelectDup(p->pPrior); - pNew->pLimit = sqlite3ExprDup(p->pLimit); - pNew->pOffset = sqlite3ExprDup(p->pOffset); - pNew->iLimit = -1; - pNew->iOffset = -1; - pNew->isResolved = p->isResolved; - pNew->isAgg = p->isAgg; - pNew->usesEphm = 0; - pNew->disallowOrderBy = 0; + pNew->pPrior = sqlite3SelectDup(db, p->pPrior, flags); + pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); + pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); + pNew->iLimit = 0; + pNew->iOffset = 0; + pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; pNew->pRightmost = 0; pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; @@ -619,7 +985,7 @@ return pNew; } #else -Select *sqlite3SelectDup(Select *p){ +Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ assert( p==0 ); return 0; } @@ -629,10 +995,19 @@ /* ** Add a new element to the end of an expression list. If pList is ** initially NULL, then create a new expression list. -*/ -ExprList *sqlite3ExprListAppend(ExprList *pList, Expr *pExpr, Token *pName){ +** +** If a memory allocation error occurs, the entire list is freed and +** NULL is returned. If non-NULL is returned, then it is guaranteed +** that the new entry was successfully appended. +*/ +ExprList *sqlite3ExprListAppend( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to append. Might be NULL */ + Expr *pExpr /* Expression to be appended. Might be NULL */ +){ + sqlite3 *db = pParse->db; if( pList==0 ){ - pList = sqliteMalloc( sizeof(ExprList) ); + pList = sqlite3DbMallocZero(db, sizeof(ExprList) ); if( pList==0 ){ goto no_mem; } @@ -641,233 +1016,170 @@ if( pList->nAlloc<=pList->nExpr ){ struct ExprList_item *a; int n = pList->nAlloc*2 + 4; - a = sqliteRealloc(pList->a, n*sizeof(pList->a[0])); + a = sqlite3DbRealloc(db, pList->a, n*sizeof(pList->a[0])); if( a==0 ){ goto no_mem; } pList->a = a; - pList->nAlloc = n; + pList->nAlloc = sqlite3DbMallocSize(db, a)/sizeof(a[0]); } assert( pList->a!=0 ); - if( pExpr || pName ){ + if( 1 ){ struct ExprList_item *pItem = &pList->a[pList->nExpr++]; memset(pItem, 0, sizeof(*pItem)); - pItem->zName = sqlite3NameFromToken(pName); pItem->pExpr = pExpr; } return pList; no_mem: /* Avoid leaking memory if malloc has failed. */ - sqlite3ExprDelete(pExpr); - sqlite3ExprListDelete(pList); + sqlite3ExprDelete(db, pExpr); + sqlite3ExprListDelete(db, pList); return 0; } /* +** Set the ExprList.a[].zName element of the most recently added item +** on the expression list. +** +** pList might be NULL following an OOM error. But pName should never be +** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag +** is set. +*/ +void sqlite3ExprListSetName( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to add the span. */ + Token *pName, /* Name to be added */ + int dequote /* True to cause the name to be dequoted */ +){ + assert( pList!=0 || pParse->db->mallocFailed!=0 ); + if( pList ){ + struct ExprList_item *pItem; + assert( pList->nExpr>0 ); + pItem = &pList->a[pList->nExpr-1]; + assert( pItem->zName==0 ); + pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n); + if( dequote && pItem->zName ) sqlite3Dequote(pItem->zName); + } +} + +/* +** Set the ExprList.a[].zSpan element of the most recently added item +** on the expression list. +** +** pList might be NULL following an OOM error. But pSpan should never be +** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag +** is set. +*/ +void sqlite3ExprListSetSpan( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to add the span. */ + ExprSpan *pSpan /* The span to be added */ +){ + sqlite3 *db = pParse->db; + assert( pList!=0 || db->mallocFailed!=0 ); + if( pList ){ + struct ExprList_item *pItem = &pList->a[pList->nExpr-1]; + assert( pList->nExpr>0 ); + assert( db->mallocFailed || pItem->pExpr==pSpan->pExpr ); + sqlite3DbFree(db, pItem->zSpan); + pItem->zSpan = sqlite3DbStrNDup(db, (char*)pSpan->zStart, + (int)(pSpan->zEnd - pSpan->zStart)); + } +} + +/* ** If the expression list pEList contains more than iLimit elements, ** leave an error message in pParse. */ void sqlite3ExprListCheckLength( Parse *pParse, ExprList *pEList, - int iLimit, const char *zObject ){ - if( pEList && pEList->nExpr>iLimit ){ + int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + testcase( pEList && pEList->nExpr==mx ); + testcase( pEList && pEList->nExpr==mx+1 ); + if( pEList && pEList->nExpr>mx ){ sqlite3ErrorMsg(pParse, "too many columns in %s", zObject); } } - -#if SQLITE_MAX_EXPR_DEPTH>0 -/* The following three functions, heightOfExpr(), heightOfExprList() -** and heightOfSelect(), are used to determine the maximum height -** of any expression tree referenced by the structure passed as the -** first argument. -** -** If this maximum height is greater than the current value pointed -** to by pnHeight, the second parameter, then set *pnHeight to that -** value. +/* +** Delete an entire expression list. */ -static void heightOfExpr(Expr *p, int *pnHeight){ - if( p ){ - if( p->nHeight>*pnHeight ){ - *pnHeight = p->nHeight; - } +void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ + int i; + struct ExprList_item *pItem; + if( pList==0 ) return; + assert( pList->a!=0 || (pList->nExpr==0 && pList->nAlloc==0) ); + assert( pList->nExpr<=pList->nAlloc ); + for(pItem=pList->a, i=0; inExpr; i++, pItem++){ + sqlite3ExprDelete(db, pItem->pExpr); + sqlite3DbFree(db, pItem->zName); + sqlite3DbFree(db, pItem->zSpan); } -} -static void heightOfExprList(ExprList *p, int *pnHeight){ - if( p ){ - int i; - for(i=0; inExpr; i++){ - heightOfExpr(p->a[i].pExpr, pnHeight); - } - } -} -static void heightOfSelect(Select *p, int *pnHeight){ - if( p ){ - heightOfExpr(p->pWhere, pnHeight); - heightOfExpr(p->pHaving, pnHeight); - heightOfExpr(p->pLimit, pnHeight); - heightOfExpr(p->pOffset, pnHeight); - heightOfExprList(p->pEList, pnHeight); - heightOfExprList(p->pGroupBy, pnHeight); - heightOfExprList(p->pOrderBy, pnHeight); - heightOfSelect(p->pPrior, pnHeight); - } -} - -/* -** Set the Expr.nHeight variable in the structure passed as an -** argument. An expression with no children, Expr.pList or -** Expr.pSelect member has a height of 1. Any other expression -** has a height equal to the maximum height of any other -** referenced Expr plus one. -*/ -void sqlite3ExprSetHeight(Expr *p){ - int nHeight = 0; - heightOfExpr(p->pLeft, &nHeight); - heightOfExpr(p->pRight, &nHeight); - heightOfExprList(p->pList, &nHeight); - heightOfSelect(p->pSelect, &nHeight); - p->nHeight = nHeight + 1; + sqlite3DbFree(db, pList->a); + sqlite3DbFree(db, pList); } /* -** Return the maximum height of any expression tree referenced -** by the select statement passed as an argument. -*/ -int sqlite3SelectExprHeight(Select *p){ - int nHeight = 0; - heightOfSelect(p, &nHeight); - return nHeight; -} -#endif - -/* -** Delete an entire expression list. -*/ -void sqlite3ExprListDelete(ExprList *pList){ - int i; - struct ExprList_item *pItem; - if( pList==0 ) return; - assert( pList->a!=0 || (pList->nExpr==0 && pList->nAlloc==0) ); - assert( pList->nExpr<=pList->nAlloc ); - for(pItem=pList->a, i=0; inExpr; i++, pItem++){ - sqlite3ExprDelete(pItem->pExpr); - sqliteFree(pItem->zName); - } - sqliteFree(pList->a); - sqliteFree(pList); -} - -/* -** Walk an expression tree. Call xFunc for each node visited. +** These routines are Walker callbacks. Walker.u.pi is a pointer +** to an integer. These routines are checking an expression to see +** if it is a constant. Set *Walker.u.pi to 0 if the expression is +** not constant. ** -** The return value from xFunc determines whether the tree walk continues. -** 0 means continue walking the tree. 1 means do not walk children -** of the current node but continue with siblings. 2 means abandon -** the tree walk completely. +** These callback routines are used to implement the following: ** -** The return value from this routine is 1 to abandon the tree walk -** and 0 to continue. +** sqlite3ExprIsConstant() +** sqlite3ExprIsConstantNotJoin() +** sqlite3ExprIsConstantOrFunction() ** -** NOTICE: This routine does *not* descend into subqueries. */ -static int walkExprList(ExprList *, int (*)(void *, Expr*), void *); -static int walkExprTree(Expr *pExpr, int (*xFunc)(void*,Expr*), void *pArg){ - int rc; - if( pExpr==0 ) return 0; - rc = (*xFunc)(pArg, pExpr); - if( rc==0 ){ - if( walkExprTree(pExpr->pLeft, xFunc, pArg) ) return 1; - if( walkExprTree(pExpr->pRight, xFunc, pArg) ) return 1; - if( walkExprList(pExpr->pList, xFunc, pArg) ) return 1; - } - return rc>1; -} +static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ -/* -** Call walkExprTree() for every expression in list p. -*/ -static int walkExprList(ExprList *p, int (*xFunc)(void *, Expr*), void *pArg){ - int i; - struct ExprList_item *pItem; - if( !p ) return 0; - for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ - if( walkExprTree(pItem->pExpr, xFunc, pArg) ) return 1; - } - return 0; -} - -/* -** Call walkExprTree() for every expression in Select p, not including -** expressions that are part of sub-selects in any FROM clause or the LIMIT -** or OFFSET expressions.. -*/ -static int walkSelectExpr(Select *p, int (*xFunc)(void *, Expr*), void *pArg){ - walkExprList(p->pEList, xFunc, pArg); - walkExprTree(p->pWhere, xFunc, pArg); - walkExprList(p->pGroupBy, xFunc, pArg); - walkExprTree(p->pHaving, xFunc, pArg); - walkExprList(p->pOrderBy, xFunc, pArg); - if( p->pPrior ){ - walkSelectExpr(p->pPrior, xFunc, pArg); - } - return 0; -} - - -/* -** This routine is designed as an xFunc for walkExprTree(). -** -** pArg is really a pointer to an integer. If we can tell by looking -** at pExpr that the expression that contains pExpr is not a constant -** expression, then set *pArg to 0 and return 2 to abandon the tree walk. -** If pExpr does does not disqualify the expression from being a constant -** then do nothing. -** -** After walking the whole tree, if no nodes are found that disqualify -** the expression as constant, then we assume the whole expression -** is constant. See sqlite3ExprIsConstant() for additional information. -*/ -static int exprNodeIsConstant(void *pArg, Expr *pExpr){ - int *pN = (int*)pArg; - - /* If *pArg is 3 then any term of the expression that comes from + /* If pWalker->u.i is 3 then any term of the expression that comes from ** the ON or USING clauses of a join disqualifies the expression ** from being considered constant. */ - if( (*pN)==3 && ExprHasAnyProperty(pExpr, EP_FromJoin) ){ - *pN = 0; - return 2; + if( pWalker->u.i==3 && ExprHasAnyProperty(pExpr, EP_FromJoin) ){ + pWalker->u.i = 0; + return WRC_Abort; } switch( pExpr->op ){ /* Consider functions to be constant if all their arguments are constant - ** and *pArg==2 */ + ** and pWalker->u.i==2 */ case TK_FUNCTION: - if( (*pN)==2 ) return 0; + if( pWalker->u.i==2 ) return 0; /* Fall through */ case TK_ID: case TK_COLUMN: - case TK_DOT: case TK_AGG_FUNCTION: case TK_AGG_COLUMN: -#ifndef SQLITE_OMIT_SUBQUERY - case TK_SELECT: - case TK_EXISTS: -#endif - *pN = 0; - return 2; - case TK_IN: - if( pExpr->pSelect ){ - *pN = 0; - return 2; - } + testcase( pExpr->op==TK_ID ); + testcase( pExpr->op==TK_COLUMN ); + testcase( pExpr->op==TK_AGG_FUNCTION ); + testcase( pExpr->op==TK_AGG_COLUMN ); + pWalker->u.i = 0; + return WRC_Abort; default: - return 0; - } + testcase( pExpr->op==TK_SELECT ); /* selectNodeIsConstant will disallow */ + testcase( pExpr->op==TK_EXISTS ); /* selectNodeIsConstant will disallow */ + return WRC_Continue; + } +} +static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){ + UNUSED_PARAMETER(NotUsed); + pWalker->u.i = 0; + return WRC_Abort; +} +static int exprIsConst(Expr *p, int initFlag){ + Walker w; + w.u.i = initFlag; + w.xExprCallback = exprNodeIsConstant; + w.xSelectCallback = selectNodeIsConstant; + sqlite3WalkExpr(&w, p); + return w.u.i; } /* @@ -879,9 +1191,7 @@ ** a constant. */ int sqlite3ExprIsConstant(Expr *p){ - int isConst = 1; - walkExprTree(p, exprNodeIsConstant, &isConst); - return isConst; + return exprIsConst(p, 1); } /* @@ -891,9 +1201,7 @@ ** an ON or USING clause. */ int sqlite3ExprIsConstantNotJoin(Expr *p){ - int isConst = 3; - walkExprTree(p, exprNodeIsConstant, &isConst); - return isConst!=0; + return exprIsConst(p, 3); } /* @@ -906,9 +1214,7 @@ ** a constant. */ int sqlite3ExprIsConstantOrFunction(Expr *p){ - int isConst = 2; - walkExprTree(p, exprNodeIsConstant, &isConst); - return isConst!=0; + return exprIsConst(p, 2); } /* @@ -918,27 +1224,39 @@ ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. */ int sqlite3ExprIsInteger(Expr *p, int *pValue){ + int rc = 0; + if( p->flags & EP_IntValue ){ + *pValue = p->u.iValue; + return 1; + } switch( p->op ){ case TK_INTEGER: { - if( sqlite3GetInt32((char*)p->token.z, pValue) ){ - return 1; - } + rc = sqlite3GetInt32(p->u.zToken, pValue); + assert( rc==0 ); break; } case TK_UPLUS: { - return sqlite3ExprIsInteger(p->pLeft, pValue); + rc = sqlite3ExprIsInteger(p->pLeft, pValue); + break; } case TK_UMINUS: { int v; if( sqlite3ExprIsInteger(p->pLeft, &v) ){ *pValue = -v; - return 1; + rc = 1; } break; } default: break; } - return 0; + if( rc ){ + assert( ExprHasAnyProperty(p, EP_Reduced|EP_TokenOnly) + || (p->flags2 & EP2_MallocedToken)==0 ); + p->op = TK_INTEGER; + p->flags |= EP_IntValue; + p->u.iValue = *pValue; + } + return rc; } /* @@ -952,547 +1270,207 @@ } /* -** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up -** that name in the set of source tables in pSrcList and make the pExpr -** expression node refer back to that source column. The following changes -** are made to pExpr: -** -** pExpr->iDb Set the index in db->aDb[] of the database holding -** the table. -** pExpr->iTable Set to the cursor number for the table obtained -** from pSrcList. -** pExpr->iColumn Set to the column number within the table. -** pExpr->op Set to TK_COLUMN. -** pExpr->pLeft Any expression this points to is deleted -** pExpr->pRight Any expression this points to is deleted. -** -** The pDbToken is the name of the database (the "X"). This value may be -** NULL meaning that name is of the form Y.Z or Z. Any available database -** can be used. The pTableToken is the name of the table (the "Y"). This -** value can be NULL if pDbToken is also NULL. If pTableToken is NULL it -** means that the form of the name is Z and that columns from any table -** can be used. -** -** If the name cannot be resolved unambiguously, leave an error message -** in pParse and return non-zero. Return zero on success. -*/ -static int lookupName( - Parse *pParse, /* The parsing context */ - Token *pDbToken, /* Name of the database containing table, or NULL */ - Token *pTableToken, /* Name of table containing column, or NULL */ - Token *pColumnToken, /* Name of the column. */ - NameContext *pNC, /* The name context used to resolve the name */ - Expr *pExpr /* Make this EXPR node point to the selected column */ -){ - char *zDb = 0; /* Name of the database. The "X" in X.Y.Z */ - char *zTab = 0; /* Name of the table. The "Y" in X.Y.Z or Y.Z */ - char *zCol = 0; /* Name of the column. The "Z" */ - int i, j; /* Loop counters */ - int cnt = 0; /* Number of matching column names */ - int cntTab = 0; /* Number of matching table names */ - sqlite3 *db = pParse->db; /* The database */ - struct SrcList_item *pItem; /* Use for looping over pSrcList items */ - struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ - NameContext *pTopNC = pNC; /* First namecontext in the list */ - - assert( pColumnToken && pColumnToken->z ); /* The Z in X.Y.Z cannot be NULL */ - zDb = sqlite3NameFromToken(pDbToken); - zTab = sqlite3NameFromToken(pTableToken); - zCol = sqlite3NameFromToken(pColumnToken); - if( sqlite3MallocFailed() ){ - goto lookupname_end; - } - - pExpr->iTable = -1; - while( pNC && cnt==0 ){ - ExprList *pEList; - SrcList *pSrcList = pNC->pSrcList; - - if( pSrcList ){ - for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ - Table *pTab; - int iDb; - Column *pCol; - - pTab = pItem->pTab; - assert( pTab!=0 ); - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( pTab->nCol>0 ); - if( zTab ){ - if( pItem->zAlias ){ - char *zTabName = pItem->zAlias; - if( sqlite3StrICmp(zTabName, zTab)!=0 ) continue; - }else{ - char *zTabName = pTab->zName; - if( zTabName==0 || sqlite3StrICmp(zTabName, zTab)!=0 ) continue; - if( zDb!=0 && sqlite3StrICmp(db->aDb[iDb].zName, zDb)!=0 ){ - continue; - } - } - } - if( 0==(cntTab++) ){ - pExpr->iTable = pItem->iCursor; - pExpr->pSchema = pTab->pSchema; - pMatch = pItem; - } - for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ - const char *zColl = pTab->aCol[j].zColl; - IdList *pUsing; - cnt++; - pExpr->iTable = pItem->iCursor; - pMatch = pItem; - pExpr->pSchema = pTab->pSchema; - /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ - pExpr->iColumn = j==pTab->iPKey ? -1 : j; - pExpr->affinity = pTab->aCol[j].affinity; - if( (pExpr->flags & EP_ExpCollate)==0 ){ - pExpr->pColl = sqlite3FindCollSeq(db, ENC(db), zColl,-1, 0); - } - if( inSrc-1 ){ - if( pItem[1].jointype & JT_NATURAL ){ - /* If this match occurred in the left table of a natural join, - ** then skip the right table to avoid a duplicate match */ - pItem++; - i++; - }else if( (pUsing = pItem[1].pUsing)!=0 ){ - /* If this match occurs on a column that is in the USING clause - ** of a join, skip the search of the right table of the join - ** to avoid a duplicate match there. */ - int k; - for(k=0; knId; k++){ - if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ){ - pItem++; - i++; - break; - } - } - } - } - break; - } - } - } - } - -#ifndef SQLITE_OMIT_TRIGGER - /* If we have not already resolved the name, then maybe - ** it is a new.* or old.* trigger argument reference - */ - if( zDb==0 && zTab!=0 && cnt==0 && pParse->trigStack!=0 ){ - TriggerStack *pTriggerStack = pParse->trigStack; - Table *pTab = 0; - if( pTriggerStack->newIdx != -1 && sqlite3StrICmp("new", zTab) == 0 ){ - pExpr->iTable = pTriggerStack->newIdx; - assert( pTriggerStack->pTab ); - pTab = pTriggerStack->pTab; - }else if( pTriggerStack->oldIdx != -1 && sqlite3StrICmp("old", zTab)==0 ){ - pExpr->iTable = pTriggerStack->oldIdx; - assert( pTriggerStack->pTab ); - pTab = pTriggerStack->pTab; - } - - if( pTab ){ - int iCol; - Column *pCol = pTab->aCol; - - pExpr->pSchema = pTab->pSchema; - cntTab++; - for(iCol=0; iCol < pTab->nCol; iCol++, pCol++) { - if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ - const char *zColl = pTab->aCol[iCol].zColl; - cnt++; - pExpr->iColumn = iCol==pTab->iPKey ? -1 : iCol; - pExpr->affinity = pTab->aCol[iCol].affinity; - if( (pExpr->flags & EP_ExpCollate)==0 ){ - pExpr->pColl = sqlite3FindCollSeq(db, ENC(db), zColl,-1, 0); - } - pExpr->pTab = pTab; - break; - } - } - } - } -#endif /* !defined(SQLITE_OMIT_TRIGGER) */ - - /* - ** Perhaps the name is a reference to the ROWID - */ - if( cnt==0 && cntTab==1 && sqlite3IsRowid(zCol) ){ - cnt = 1; - pExpr->iColumn = -1; - pExpr->affinity = SQLITE_AFF_INTEGER; - } +** Return true if we are able to the IN operator optimization on a +** query of the form +** +** x IN (SELECT ...) +** +** Where the SELECT... clause is as specified by the parameter to this +** routine. +** +** The Select object passed in has already been preprocessed and no +** errors have been found. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +static int isCandidateForInOpt(Select *p){ + SrcList *pSrc; + ExprList *pEList; + Table *pTab; + if( p==0 ) return 0; /* right-hand side of IN is SELECT */ + if( p->pPrior ) return 0; /* Not a compound SELECT */ + if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ + testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); + testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); + return 0; /* No DISTINCT keyword and no aggregate functions */ + } + assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */ + if( p->pLimit ) return 0; /* Has no LIMIT clause */ + assert( p->pOffset==0 ); /* No LIMIT means no OFFSET */ + if( p->pWhere ) return 0; /* Has no WHERE clause */ + pSrc = p->pSrc; + assert( pSrc!=0 ); + if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ + if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ + pTab = pSrc->a[0].pTab; + if( NEVER(pTab==0) ) return 0; + assert( pTab->pSelect==0 ); /* FROM clause is not a view */ + if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ + pEList = p->pEList; + if( pEList->nExpr!=1 ) return 0; /* One column in the result set */ + if( pEList->a[0].pExpr->op!=TK_COLUMN ) return 0; /* Result is a column */ + return 1; +} +#endif /* SQLITE_OMIT_SUBQUERY */ - /* - ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z - ** might refer to an result-set alias. This happens, for example, when - ** we are resolving names in the WHERE clause of the following command: - ** - ** SELECT a+b AS x FROM table WHERE x<10; - ** - ** In cases like this, replace pExpr with a copy of the expression that - ** forms the result set entry ("a+b" in the example) and return immediately. - ** Note that the expression in the result set should have already been - ** resolved by the time the WHERE clause is resolved. +/* +** This function is used by the implementation of the IN (...) operator. +** It's job is to find or create a b-tree structure that may be used +** either to test for membership of the (...) set or to iterate through +** its members, skipping duplicates. +** +** The index of the cursor opened on the b-tree (database table, database index +** or ephermal table) is stored in pX->iTable before this function returns. +** The returned value of this function indicates the b-tree type, as follows: +** +** IN_INDEX_ROWID - The cursor was opened on a database table. +** IN_INDEX_INDEX - The cursor was opened on a database index. +** IN_INDEX_EPH - The cursor was opened on a specially created and +** populated epheremal table. +** +** An existing b-tree may only be used if the SELECT is of the simple +** form: +** +** SELECT FROM +** +** If the prNotFound parameter is 0, then the b-tree will be used to iterate +** through the set members, skipping any duplicates. In this case an +** epheremal table must be used unless the selected is guaranteed +** to be unique - either because it is an INTEGER PRIMARY KEY or it +** has a UNIQUE constraint or UNIQUE index. +** +** If the prNotFound parameter is not 0, then the b-tree will be used +** for fast set membership tests. In this case an epheremal table must +** be used unless is an INTEGER PRIMARY KEY or an index can +** be found with as its left-most column. +** +** When the b-tree is being used for membership tests, the calling function +** needs to know whether or not the structure contains an SQL NULL +** value in order to correctly evaluate expressions like "X IN (Y, Z)". +** If there is a chance that the b-tree might contain a NULL value at +** runtime, then a register is allocated and the register number written +** to *prNotFound. If there is no chance that the b-tree contains a +** NULL value, then *prNotFound is left unchanged. +** +** If a register is allocated and its location stored in *prNotFound, then +** its initial value is NULL. If the b-tree does not remain constant +** for the duration of the query (i.e. the SELECT that generates the b-tree +** is a correlated subquery) then the value of the allocated register is +** reset to NULL each time the b-tree is repopulated. This allows the +** caller to use vdbe code equivalent to the following: +** +** if( register==NULL ){ +** has_null = +** register = 1 +** } +** +** in order to avoid running the +** test more often than is necessary. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +int sqlite3FindInIndex(Parse *pParse, Expr *pX, int *prNotFound){ + Select *p; /* SELECT to the right of IN operator */ + int eType = 0; /* Type of RHS table. IN_INDEX_* */ + int iTab = pParse->nTab++; /* Cursor of the RHS table */ + int mustBeUnique = (prNotFound==0); /* True if RHS must be unique */ + + /* Check to see if an existing table or index can be used to + ** satisfy the query. This is preferable to generating a new + ** ephemeral table. + */ + p = (ExprHasProperty(pX, EP_xIsSelect) ? pX->x.pSelect : 0); + if( ALWAYS(pParse->nErr==0) && isCandidateForInOpt(p) ){ + sqlite3 *db = pParse->db; /* Database connection */ + Expr *pExpr = p->pEList->a[0].pExpr; /* Expression */ + int iCol = pExpr->iColumn; /* Index of column */ + Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ + Table *pTab = p->pSrc->a[0].pTab; /* Table
. */ + int iDb; /* Database idx for pTab */ + + /* Code an OP_VerifyCookie and OP_TableLock for
. */ + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + + /* This function is only called from two places. In both cases the vdbe + ** has already been allocated. So assume sqlite3GetVdbe() is always + ** successful here. */ - if( cnt==0 && (pEList = pNC->pEList)!=0 && zTab==0 ){ - for(j=0; jnExpr; j++){ - char *zAs = pEList->a[j].zName; - if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ - Expr *pDup, *pOrig; - assert( pExpr->pLeft==0 && pExpr->pRight==0 ); - assert( pExpr->pList==0 ); - assert( pExpr->pSelect==0 ); - pOrig = pEList->a[j].pExpr; - if( !pNC->allowAgg && ExprHasProperty(pOrig, EP_Agg) ){ - sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); - sqliteFree(zCol); - return 2; - } - pDup = sqlite3ExprDup(pOrig); - if( pExpr->flags & EP_ExpCollate ){ - pDup->pColl = pExpr->pColl; - pDup->flags |= EP_ExpCollate; - } - if( pExpr->span.dyn ) sqliteFree((char*)pExpr->span.z); - if( pExpr->token.dyn ) sqliteFree((char*)pExpr->token.z); - memcpy(pExpr, pDup, sizeof(*pExpr)); - sqliteFree(pDup); - cnt = 1; - pMatch = 0; - assert( zTab==0 && zDb==0 ); - goto lookupname_end_2; - } - } - } + assert(v); + if( iCol<0 ){ + int iMem = ++pParse->nMem; + int iAddr; + sqlite3VdbeUsesBtree(v, iDb); - /* Advance to the next name context. The loop will exit when either - ** we have a match (cnt>0) or when we run out of name contexts. - */ - if( cnt==0 ){ - pNC = pNC->pNext; - } - } + iAddr = sqlite3VdbeAddOp1(v, OP_If, iMem); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iMem); - /* - ** If X and Y are NULL (in other words if only the column name Z is - ** supplied) and the value of Z is enclosed in double-quotes, then - ** Z is a string literal if it doesn't match any column names. In that - ** case, we need to return right away and not make any changes to - ** pExpr. - ** - ** Because no reference was made to outer contexts, the pNC->nRef - ** fields are not changed in any context. - */ - if( cnt==0 && zTab==0 && pColumnToken->z[0]=='"' ){ - sqliteFree(zCol); - return 0; - } + sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); + eType = IN_INDEX_ROWID; - /* - ** cnt==0 means there was not match. cnt>1 means there were two or - ** more matches. Either way, we have an error. - */ - if( cnt!=1 ){ - char *z = 0; - char *zErr; - zErr = cnt==0 ? "no such column: %s" : "ambiguous column name: %s"; - if( zDb ){ - sqlite3SetString(&z, zDb, ".", zTab, ".", zCol, (char*)0); - }else if( zTab ){ - sqlite3SetString(&z, zTab, ".", zCol, (char*)0); + sqlite3VdbeJumpHere(v, iAddr); }else{ - z = sqliteStrDup(zCol); - } - sqlite3ErrorMsg(pParse, zErr, z); - sqliteFree(z); - pTopNC->nErr++; - } + Index *pIdx; /* Iterator variable */ - /* If a column from a table in pSrcList is referenced, then record - ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes - ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the - ** column number is greater than the number of bits in the bitmask - ** then set the high-order bit of the bitmask. - */ - if( pExpr->iColumn>=0 && pMatch!=0 ){ - int n = pExpr->iColumn; - if( n>=sizeof(Bitmask)*8 ){ - n = sizeof(Bitmask)*8-1; - } - assert( pMatch->iCursor==pExpr->iTable ); - pMatch->colUsed |= ((Bitmask)1)<pLeft); - pExpr->pLeft = 0; - sqlite3ExprDelete(pExpr->pRight); - pExpr->pRight = 0; - pExpr->op = TK_COLUMN; -lookupname_end_2: - sqliteFree(zCol); - if( cnt==1 ){ - assert( pNC!=0 ); - sqlite3AuthRead(pParse, pExpr, pNC->pSrcList); - if( pMatch && !pMatch->pSelect ){ - pExpr->pTab = pMatch->pTab; - } - /* Increment the nRef value on all name contexts from TopNC up to - ** the point where the name matched. */ - for(;;){ - assert( pTopNC!=0 ); - pTopNC->nRef++; - if( pTopNC==pNC ) break; - pTopNC = pTopNC->pNext; - } - return 0; - } else { - return 1; - } -} + /* The collation sequence used by the comparison. If an index is to + ** be used in place of a temp-table, it must be ordered according + ** to this collation sequence. */ + CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pExpr); + + /* Check that the affinity that will be used to perform the + ** comparison is the same as the affinity of the column. If + ** it is not, it is not possible to use any index. + */ + char aff = comparisonAffinity(pX); + int affinity_ok = (pTab->aCol[iCol].affinity==aff||aff==SQLITE_AFF_NONE); -/* -** This routine is designed as an xFunc for walkExprTree(). -** -** Resolve symbolic names into TK_COLUMN operators for the current -** node in the expression tree. Return 0 to continue the search down -** the tree or 2 to abort the tree walk. -** -** This routine also does error checking and name resolution for -** function names. The operator for aggregate functions is changed -** to TK_AGG_FUNCTION. -*/ -static int nameResolverStep(void *pArg, Expr *pExpr){ - NameContext *pNC = (NameContext*)pArg; - Parse *pParse; - - if( pExpr==0 ) return 1; - assert( pNC!=0 ); - pParse = pNC->pParse; - - if( ExprHasAnyProperty(pExpr, EP_Resolved) ) return 1; - ExprSetProperty(pExpr, EP_Resolved); -#ifndef NDEBUG - if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ - SrcList *pSrcList = pNC->pSrcList; - int i; - for(i=0; ipSrcList->nSrc; i++){ - assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); - } - } -#endif - switch( pExpr->op ){ - /* Double-quoted strings (ex: "abc") are used as identifiers if - ** possible. Otherwise they remain as strings. Single-quoted - ** strings (ex: 'abc') are always string literals. - */ - case TK_STRING: { - if( pExpr->token.z[0]=='\'' ) break; - /* Fall thru into the TK_ID case if this is a double-quoted string */ - } - /* A lone identifier is the name of a column. - */ - case TK_ID: { - lookupName(pParse, 0, 0, &pExpr->token, pNC, pExpr); - return 1; - } + for(pIdx=pTab->pIndex; pIdx && eType==0 && affinity_ok; pIdx=pIdx->pNext){ + if( (pIdx->aiColumn[0]==iCol) + && sqlite3FindCollSeq(db, ENC(db), pIdx->azColl[0], 0)==pReq + && (!mustBeUnique || (pIdx->nColumn==1 && pIdx->onError!=OE_None)) + ){ + int iMem = ++pParse->nMem; + int iAddr; + char *pKey; - /* A table name and column name: ID.ID - ** Or a database, table and column: ID.ID.ID - */ - case TK_DOT: { - Token *pColumn; - Token *pTable; - Token *pDb; - Expr *pRight; - - /* if( pSrcList==0 ) break; */ - pRight = pExpr->pRight; - if( pRight->op==TK_ID ){ - pDb = 0; - pTable = &pExpr->pLeft->token; - pColumn = &pRight->token; - }else{ - assert( pRight->op==TK_DOT ); - pDb = &pExpr->pLeft->token; - pTable = &pRight->pLeft->token; - pColumn = &pRight->pRight->token; - } - lookupName(pParse, pDb, pTable, pColumn, pNC, pExpr); - return 1; - } + pKey = (char *)sqlite3IndexKeyinfo(pParse, pIdx); + iDb = sqlite3SchemaToIndex(db, pIdx->pSchema); + sqlite3VdbeUsesBtree(v, iDb); - /* Resolve function names - */ - case TK_CONST_FUNC: - case TK_FUNCTION: { - ExprList *pList = pExpr->pList; /* The argument list */ - int n = pList ? pList->nExpr : 0; /* Number of arguments */ - int no_such_func = 0; /* True if no such function exists */ - int wrong_num_args = 0; /* True if wrong number of arguments */ - int is_agg = 0; /* True if is an aggregate function */ - int i; - int auth; /* Authorization to use the function */ - int nId; /* Number of characters in function name */ - const char *zId; /* The function name. */ - FuncDef *pDef; /* Information about the function */ - int enc = ENC(pParse->db); /* The database encoding */ - - zId = (char*)pExpr->token.z; - nId = pExpr->token.n; - pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0); - if( pDef==0 ){ - pDef = sqlite3FindFunction(pParse->db, zId, nId, -1, enc, 0); - if( pDef==0 ){ - no_such_func = 1; - }else{ - wrong_num_args = 1; - } - }else{ - is_agg = pDef->xFunc==0; - } -#ifndef SQLITE_OMIT_AUTHORIZATION - if( pDef ){ - auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0); - if( auth!=SQLITE_OK ){ - if( auth==SQLITE_DENY ){ - sqlite3ErrorMsg(pParse, "not authorized to use function: %s", - pDef->zName); - pNC->nErr++; + iAddr = sqlite3VdbeAddOp1(v, OP_If, iMem); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iMem); + + sqlite3VdbeAddOp4(v, OP_OpenRead, iTab, pIdx->tnum, iDb, + pKey,P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pIdx->zName)); + eType = IN_INDEX_INDEX; + + sqlite3VdbeJumpHere(v, iAddr); + if( prNotFound && !pTab->aCol[iCol].notNull ){ + *prNotFound = ++pParse->nMem; } - pExpr->op = TK_NULL; - return 1; - } - } -#endif - if( is_agg && !pNC->allowAgg ){ - sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); - pNC->nErr++; - is_agg = 0; - }else if( no_such_func ){ - sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); - pNC->nErr++; - }else if( wrong_num_args ){ - sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", - nId, zId); - pNC->nErr++; - } - if( is_agg ){ - pExpr->op = TK_AGG_FUNCTION; - pNC->hasAgg = 1; - } - if( is_agg ) pNC->allowAgg = 0; - for(i=0; pNC->nErr==0 && ia[i].pExpr, nameResolverStep, pNC); - } - if( is_agg ) pNC->allowAgg = 1; - /* FIX ME: Compute pExpr->affinity based on the expected return - ** type of the function - */ - return is_agg; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_SELECT: - case TK_EXISTS: -#endif - case TK_IN: { - if( pExpr->pSelect ){ - int nRef = pNC->nRef; -#ifndef SQLITE_OMIT_CHECK - if( pNC->isCheck ){ - sqlite3ErrorMsg(pParse,"subqueries prohibited in CHECK constraints"); - } -#endif - sqlite3SelectResolve(pParse, pExpr->pSelect, pNC); - assert( pNC->nRef>=nRef ); - if( nRef!=pNC->nRef ){ - ExprSetProperty(pExpr, EP_VarSelect); } } - break; } -#ifndef SQLITE_OMIT_CHECK - case TK_VARIABLE: { - if( pNC->isCheck ){ - sqlite3ErrorMsg(pParse,"parameters prohibited in CHECK constraints"); - } - break; - } -#endif } - return 0; -} -/* -** This routine walks an expression tree and resolves references to -** table columns. Nodes of the form ID.ID or ID resolve into an -** index to the table in the table list and a column offset. The -** Expr.opcode for such nodes is changed to TK_COLUMN. The Expr.iTable -** value is changed to the index of the referenced table in pTabList -** plus the "base" value. The base value will ultimately become the -** VDBE cursor number for a cursor that is pointing into the referenced -** table. The Expr.iColumn value is changed to the index of the column -** of the referenced table. The Expr.iColumn value for the special -** ROWID column is -1. Any INTEGER PRIMARY KEY column is tried as an -** alias for ROWID. -** -** Also resolve function names and check the functions for proper -** usage. Make sure all function names are recognized and all functions -** have the correct number of arguments. Leave an error message -** in pParse->zErrMsg if anything is amiss. Return the number of errors. -** -** If the expression contains aggregate functions then set the EP_Agg -** property on the expression. -*/ -int sqlite3ExprResolveNames( - NameContext *pNC, /* Namespace to resolve expressions in. */ - Expr *pExpr /* The expression to be analyzed. */ -){ - int savedHasAgg; - if( pExpr==0 ) return 0; -#if SQLITE_MAX_EXPR_DEPTH>0 - if( (pExpr->nHeight+pNC->pParse->nHeight)>SQLITE_MAX_EXPR_DEPTH ){ - sqlite3ErrorMsg(pNC->pParse, - "Expression tree is too large (maximum depth %d)", - SQLITE_MAX_EXPR_DEPTH - ); - return 1; - } - pNC->pParse->nHeight += pExpr->nHeight; -#endif - savedHasAgg = pNC->hasAgg; - pNC->hasAgg = 0; - walkExprTree(pExpr, nameResolverStep, pNC); -#if SQLITE_MAX_EXPR_DEPTH>0 - pNC->pParse->nHeight -= pExpr->nHeight; -#endif - if( pNC->nErr>0 ){ - ExprSetProperty(pExpr, EP_Error); - } - if( pNC->hasAgg ){ - ExprSetProperty(pExpr, EP_Agg); - }else if( savedHasAgg ){ - pNC->hasAgg = 1; + if( eType==0 ){ + /* Could not found an existing able or index to use as the RHS b-tree. + ** We will have to generate an ephemeral table to do the job. + */ + int rMayHaveNull = 0; + eType = IN_INDEX_EPH; + if( prNotFound ){ + *prNotFound = rMayHaveNull = ++pParse->nMem; + }else if( pX->pLeft->iColumn<0 && !ExprHasAnyProperty(pX, EP_xIsSelect) ){ + eType = IN_INDEX_ROWID; + } + sqlite3CodeSubselect(pParse, pX, rMayHaveNull, eType==IN_INDEX_ROWID); + }else{ + pX->iTable = iTab; } - return ExprHasProperty(pExpr, EP_Error); + return eType; } - -/* -** A pointer instance of this structure is used to pass information -** through walkExprTree into codeSubqueryStep(). -*/ -typedef struct QueryCoder QueryCoder; -struct QueryCoder { - Parse *pParse; /* The parsing context */ - NameContext *pNC; /* Namespace of first enclosing query */ -}; - +#endif /* ** Generate code for scalar subqueries used as an expression @@ -1505,13 +1483,36 @@ ** ** The pExpr parameter describes the expression that contains the IN ** operator or subquery. +** +** If parameter isRowid is non-zero, then expression pExpr is guaranteed +** to be of the form " IN (?, ?, ?)", where is a reference +** to some integer key column of a table B-Tree. In this case, use an +** intkey B-Tree to store the set of IN(...) values instead of the usual +** (slower) variable length keys B-Tree. +** +** If rMayHaveNull is non-zero, that means that the operation is an IN +** (not a SELECT or EXISTS) and that the RHS might contains NULLs. +** Furthermore, the IN is in a WHERE clause and that we really want +** to iterate over the RHS of the IN operator in order to quickly locate +** all corresponding LHS elements. All this routine does is initialize +** the register given by rMayHaveNull to NULL. Calling routines will take +** care of changing this register value to non-NULL if the RHS is NULL-free. +** +** If rMayHaveNull is zero, that means that the subquery is being used +** for membership testing only. There is no need to initialize any +** registers to indicate the presense or absence of NULLs on the RHS. */ #ifndef SQLITE_OMIT_SUBQUERY -void sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ +void sqlite3CodeSubselect( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The IN, SELECT, or EXISTS operator */ + int rMayHaveNull, /* Register that records whether NULLs exist in RHS */ + int isRowid /* If true, LHS of IN operator is a rowid */ +){ int testAddr = 0; /* One-time test address */ Vdbe *v = sqlite3GetVdbe(pParse); - if( v==0 ) return; - + if( NEVER(v==0) ) return; + sqlite3ExprCachePush(pParse); /* This code must be run in its entirety every time it is encountered ** if any of the following is true: @@ -1524,11 +1525,10 @@ ** save the results, and reuse the same result on subsequent invocations. */ if( !ExprHasAnyProperty(pExpr, EP_VarSelect) && !pParse->trigStack ){ - int mem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemLoad, mem, 0); - testAddr = sqlite3VdbeAddOp(v, OP_If, 0, 0); - assert( testAddr>0 || sqlite3MallocFailed() ); - sqlite3VdbeAddOp(v, OP_MemInt, 1, mem); + int mem = ++pParse->nMem; + sqlite3VdbeAddOp1(v, OP_If, mem); + testAddr = sqlite3VdbeAddOp2(v, OP_Integer, 1, mem); + assert( testAddr>0 || pParse->db->mallocFailed ); } switch( pExpr->op ){ @@ -1536,8 +1536,13 @@ char affinity; KeyInfo keyInfo; int addr; /* Address of OP_OpenEphemeral instruction */ + Expr *pLeft = pExpr->pLeft; + + if( rMayHaveNull ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, rMayHaveNull); + } - affinity = sqlite3ExprAffinity(pExpr->pLeft); + affinity = sqlite3ExprAffinity(pLeft); /* Whether this is an 'x IN(SELECT...)' or an 'x IN()' ** expression it is handled the same way. A virtual table is @@ -1553,29 +1558,32 @@ ** is used. */ pExpr->iTable = pParse->nTab++; - addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, pExpr->iTable, 0); + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, !isRowid); memset(&keyInfo, 0, sizeof(keyInfo)); keyInfo.nField = 1; - sqlite3VdbeAddOp(v, OP_SetNumColumns, pExpr->iTable, 1); - if( pExpr->pSelect ){ + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ /* Case 1: expr IN (SELECT ...) ** ** Generate code to write the results of the select into the temporary ** table allocated and opened above. */ - int iParm = pExpr->iTable + (((int)affinity)<<16); + SelectDest dest; ExprList *pEList; + + assert( !isRowid ); + sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable); + dest.affinity = (u8)affinity; assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable ); - if( sqlite3Select(pParse, pExpr->pSelect, SRT_Set, iParm, 0, 0, 0, 0) ){ + if( sqlite3Select(pParse, pExpr->x.pSelect, &dest) ){ return; } - pEList = pExpr->pSelect->pEList; - if( pEList && pEList->nExpr>0 ){ + pEList = pExpr->x.pSelect->pEList; + if( ALWAYS(pEList!=0 && pEList->nExpr>0) ){ keyInfo.aColl[0] = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pEList->a[0].pExpr); } - }else if( pExpr->pList ){ + }else if( pExpr->x.pList!=0 ){ /* Case 2: expr IN (exprlist) ** ** For each expression, build an index key from the evaluation and @@ -1584,15 +1592,19 @@ ** a column, use numeric affinity. */ int i; - ExprList *pList = pExpr->pList; + ExprList *pList = pExpr->x.pList; struct ExprList_item *pItem; + int r1, r2, r3; if( !affinity ){ affinity = SQLITE_AFF_NONE; } - keyInfo.aColl[0] = pExpr->pLeft->pColl; + keyInfo.aColl[0] = sqlite3ExprCollSeq(pParse, pExpr->pLeft); /* Loop through each expression in . */ + r1 = sqlite3GetTempReg(pParse); + r2 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Null, 0, r2); for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){ Expr *pE2 = pItem->pExpr; @@ -1601,134 +1613,512 @@ ** this code only executes once. Because for a non-constant ** expression we need to rerun this code each time. */ - if( testAddr>0 && !sqlite3ExprIsConstant(pE2) ){ - sqlite3VdbeChangeToNoop(v, testAddr-1, 3); + if( testAddr && !sqlite3ExprIsConstant(pE2) ){ + sqlite3VdbeChangeToNoop(v, testAddr-1, 2); testAddr = 0; } /* Evaluate the expression and insert it into the temp table */ - sqlite3ExprCode(pParse, pE2); - sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &affinity, 1); - sqlite3VdbeAddOp(v, OP_IdxInsert, pExpr->iTable, 0); + r3 = sqlite3ExprCodeTarget(pParse, pE2, r1); + if( isRowid ){ + sqlite3VdbeAddOp2(v, OP_MustBeInt, r3, sqlite3VdbeCurrentAddr(v)+2); + sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3); + }else{ + sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1); + sqlite3ExprCacheAffinityChange(pParse, r3, 1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, pExpr->iTable, r2); + } } + sqlite3ReleaseTempReg(pParse, r1); + sqlite3ReleaseTempReg(pParse, r2); + } + if( !isRowid ){ + sqlite3VdbeChangeP4(v, addr, (void *)&keyInfo, P4_KEYINFO); } - sqlite3VdbeChangeP3(v, addr, (void *)&keyInfo, P3_KEYINFO); break; } case TK_EXISTS: - case TK_SELECT: { - /* This has to be a scalar SELECT. Generate code to put the + case TK_SELECT: + default: { + /* If this has to be a scalar SELECT. Generate code to put the ** value of this select in a memory cell and record the number - ** of the memory cell in iColumn. + ** of the memory cell in iColumn. If this is an EXISTS, write + ** an integer 0 (not exists) or 1 (exists) into a memory cell + ** and record that memory cell in iColumn. */ - static const Token one = { (u8*)"1", 0, 1 }; - Select *pSel; - int iMem; - int sop; - - pExpr->iColumn = iMem = pParse->nMem++; - pSel = pExpr->pSelect; + static const Token one = { "1", 1 }; /* Token for literal value 1 */ + Select *pSel; /* SELECT statement to encode */ + SelectDest dest; /* How to deal with SELECt result */ + + testcase( pExpr->op==TK_EXISTS ); + testcase( pExpr->op==TK_SELECT ); + assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); + + assert( ExprHasProperty(pExpr, EP_xIsSelect) ); + pSel = pExpr->x.pSelect; + sqlite3SelectDestInit(&dest, 0, ++pParse->nMem); if( pExpr->op==TK_SELECT ){ - sop = SRT_Mem; - sqlite3VdbeAddOp(v, OP_MemNull, iMem, 0); - VdbeComment((v, "# Init subquery result")); + dest.eDest = SRT_Mem; + sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iParm); + VdbeComment((v, "Init subquery result")); }else{ - sop = SRT_Exists; - sqlite3VdbeAddOp(v, OP_MemInt, 0, iMem); - VdbeComment((v, "# Init EXISTS result")); - } - sqlite3ExprDelete(pSel->pLimit); - pSel->pLimit = sqlite3Expr(TK_INTEGER, 0, 0, &one); - if( sqlite3Select(pParse, pSel, sop, iMem, 0, 0, 0, 0) ){ + dest.eDest = SRT_Exists; + sqlite3VdbeAddOp2(v, OP_Integer, 0, dest.iParm); + VdbeComment((v, "Init EXISTS result")); + } + sqlite3ExprDelete(pParse->db, pSel->pLimit); + pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &one); + if( sqlite3Select(pParse, pSel, &dest) ){ return; } + pExpr->iColumn = (i16)dest.iParm; + ExprSetIrreducible(pExpr); break; } } - if( testAddr ){ - sqlite3VdbeJumpHere(v, testAddr); + if( testAddr ){ + sqlite3VdbeJumpHere(v, testAddr-1); + } + sqlite3ExprCachePop(pParse, 1); + + return; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + +/* +** Duplicate an 8-byte value +*/ +static char *dup8bytes(Vdbe *v, const char *in){ + char *out = sqlite3DbMallocRaw(sqlite3VdbeDb(v), 8); + if( out ){ + memcpy(out, in, 8); + } + return out; +} + +/* +** Generate an instruction that will put the floating point +** value described by z[0..n-1] into register iMem. +** +** The z[] string will probably not be zero-terminated. But the +** z[n] character is guaranteed to be something that does not look +** like the continuation of the number. +*/ +static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){ + if( ALWAYS(z!=0) ){ + double value; + char *zV; + sqlite3AtoF(z, &value); + if( sqlite3IsNaN(value) ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, iMem); + }else{ + if( negateFlag ) value = -value; + zV = dup8bytes(v, (char*)&value); + sqlite3VdbeAddOp4(v, OP_Real, 0, iMem, 0, zV, P4_REAL); + } + } +} + + +/* +** Generate an instruction that will put the integer describe by +** text z[0..n-1] into register iMem. +** +** The z[] string will probably not be zero-terminated. But the +** z[n] character is guaranteed to be something that does not look +** like the continuation of the number. +*/ +static void codeInteger(Vdbe *v, Expr *pExpr, int negFlag, int iMem){ + if( pExpr->flags & EP_IntValue ){ + int i = pExpr->u.iValue; + if( negFlag ) i = -i; + sqlite3VdbeAddOp2(v, OP_Integer, i, iMem); + }else{ + const char *z = pExpr->u.zToken; + assert( z!=0 ); + if( sqlite3FitsIn64Bits(z, negFlag) ){ + i64 value; + char *zV; + sqlite3Atoi64(z, &value); + if( negFlag ) value = -value; + zV = dup8bytes(v, (char*)&value); + sqlite3VdbeAddOp4(v, OP_Int64, 0, iMem, 0, zV, P4_INT64); + }else{ + codeReal(v, z, negFlag, iMem); + } + } +} + +/* +** Clear a cache entry. +*/ +static void cacheEntryClear(Parse *pParse, struct yColCache *p){ + if( p->tempReg ){ + if( pParse->nTempRegaTempReg) ){ + pParse->aTempReg[pParse->nTempReg++] = p->iReg; + } + p->tempReg = 0; + } +} + + +/* +** Record in the column cache that a particular column from a +** particular table is stored in a particular register. +*/ +void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int iReg){ + int i; + int minLru; + int idxLru; + struct yColCache *p; + + assert( iReg>0 ); /* Register numbers are always positive */ + assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */ + + /* First replace any existing entry */ + for(i=0, p=pParse->aColCache; iiReg && p->iTable==iTab && p->iColumn==iCol ){ + cacheEntryClear(pParse, p); + p->iLevel = pParse->iCacheLevel; + p->iReg = iReg; + p->affChange = 0; + p->lru = pParse->iCacheCnt++; + return; + } + } + + /* Find an empty slot and replace it */ + for(i=0, p=pParse->aColCache; iiReg==0 ){ + p->iLevel = pParse->iCacheLevel; + p->iTable = iTab; + p->iColumn = iCol; + p->iReg = iReg; + p->affChange = 0; + p->tempReg = 0; + p->lru = pParse->iCacheCnt++; + return; + } + } + + /* Replace the last recently used */ + minLru = 0x7fffffff; + idxLru = -1; + for(i=0, p=pParse->aColCache; ilrulru; + } + } + if( ALWAYS(idxLru>=0) ){ + p = &pParse->aColCache[idxLru]; + p->iLevel = pParse->iCacheLevel; + p->iTable = iTab; + p->iColumn = iCol; + p->iReg = iReg; + p->affChange = 0; + p->tempReg = 0; + p->lru = pParse->iCacheCnt++; + return; + } +} + +/* +** Indicate that a register is being overwritten. Purge the register +** from the column cache. +*/ +void sqlite3ExprCacheRemove(Parse *pParse, int iReg){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; iiReg==iReg ){ + cacheEntryClear(pParse, p); + p->iReg = 0; + } } +} - return; +/* +** Remember the current column cache context. Any new entries added +** added to the column cache after this call are removed when the +** corresponding pop occurs. +*/ +void sqlite3ExprCachePush(Parse *pParse){ + pParse->iCacheLevel++; } -#endif /* SQLITE_OMIT_SUBQUERY */ /* -** Generate an instruction that will put the integer describe by -** text z[0..n-1] on the stack. +** Remove from the column cache any entries that were added since the +** the previous N Push operations. In other words, restore the cache +** to the state it was in N Pushes ago. */ -static void codeInteger(Vdbe *v, const char *z, int n){ - assert( z || sqlite3MallocFailed() ); - if( z ){ - int i; - if( sqlite3GetInt32(z, &i) ){ - sqlite3VdbeAddOp(v, OP_Integer, i, 0); - }else if( sqlite3FitsIn64Bits(z) ){ - sqlite3VdbeOp3(v, OP_Int64, 0, 0, z, n); - }else{ - sqlite3VdbeOp3(v, OP_Real, 0, 0, z, n); +void sqlite3ExprCachePop(Parse *pParse, int N){ + int i; + struct yColCache *p; + assert( N>0 ); + assert( pParse->iCacheLevel>=N ); + pParse->iCacheLevel -= N; + for(i=0, p=pParse->aColCache; iiReg && p->iLevel>pParse->iCacheLevel ){ + cacheEntryClear(pParse, p); + p->iReg = 0; } } } +/* +** When a cached column is reused, make sure that its register is +** no longer available as a temp register. ticket #3879: that same +** register might be in the cache in multiple places, so be sure to +** get them all. +*/ +static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; iiReg==iReg ){ + p->tempReg = 0; + } + } +} /* ** Generate code that will extract the iColumn-th column from -** table pTab and push that column value on the stack. There -** is an open cursor to pTab in iTable. If iColumn<0 then -** code is generated that extracts the rowid. -*/ -void sqlite3ExprCodeGetColumn(Vdbe *v, Table *pTab, int iColumn, int iTable){ +** table pTab and store the column value in a register. An effort +** is made to store the column value in register iReg, but this is +** not guaranteed. The location of the column value is returned. +** +** There must be an open cursor to pTab in iTable when this routine +** is called. If iColumn<0 then code is generated that extracts the rowid. +** +** This routine might attempt to reuse the value of the column that +** has already been loaded into a register. The value will always +** be used if it has not undergone any affinity changes. But if +** an affinity change has occurred, then the cached value will only be +** used if allowAffChng is true. +*/ +int sqlite3ExprCodeGetColumn( + Parse *pParse, /* Parsing and code generating context */ + Table *pTab, /* Description of the table we are reading from */ + int iColumn, /* Index of the table column */ + int iTable, /* The cursor pointing to the table */ + int iReg, /* Store results here */ + int allowAffChng /* True if prior affinity changes are OK */ +){ + Vdbe *v = pParse->pVdbe; + int i; + struct yColCache *p; + + for(i=0, p=pParse->aColCache; iiReg>0 && p->iTable==iTable && p->iColumn==iColumn + && (!p->affChange || allowAffChng) ){ + p->lru = pParse->iCacheCnt++; + sqlite3ExprCachePinRegister(pParse, p->iReg); + return p->iReg; + } + } + assert( v!=0 ); if( iColumn<0 ){ - int op = (pTab && IsVirtual(pTab)) ? OP_VRowid : OP_Rowid; - sqlite3VdbeAddOp(v, op, iTable, 0); - }else if( pTab==0 ){ - sqlite3VdbeAddOp(v, OP_Column, iTable, iColumn); - }else{ + sqlite3VdbeAddOp2(v, OP_Rowid, iTable, iReg); + }else if( ALWAYS(pTab!=0) ){ int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; - sqlite3VdbeAddOp(v, op, iTable, iColumn); + sqlite3VdbeAddOp3(v, op, iTable, iColumn, iReg); sqlite3ColumnDefault(v, pTab, iColumn); #ifndef SQLITE_OMIT_FLOATING_POINT if( pTab->aCol[iColumn].affinity==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp(v, OP_RealAffinity, 0, 0); + sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); } #endif } + sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); + return iReg; +} + +/* +** Clear all column cache entries. +*/ +void sqlite3ExprCacheClear(Parse *pParse){ + int i; + struct yColCache *p; + + for(i=0, p=pParse->aColCache; iiReg ){ + cacheEntryClear(pParse, p); + p->iReg = 0; + } + } +} + +/* +** Record the fact that an affinity change has occurred on iCount +** registers starting with iStart. +*/ +void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, int iCount){ + int iEnd = iStart + iCount - 1; + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; iiReg; + if( r>=iStart && r<=iEnd ){ + p->affChange = 1; + } + } +} + +/* +** Generate code to move content from registers iFrom...iFrom+nReg-1 +** over to iTo..iTo+nReg-1. Keep the column cache up-to-date. +*/ +void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){ + int i; + struct yColCache *p; + if( NEVER(iFrom==iTo) ) return; + sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg); + for(i=0, p=pParse->aColCache; iiReg; + if( x>=iFrom && xiReg += iTo-iFrom; + } + } +} + +/* +** Generate code to copy content from registers iFrom...iFrom+nReg-1 +** over to iTo..iTo+nReg-1. +*/ +void sqlite3ExprCodeCopy(Parse *pParse, int iFrom, int iTo, int nReg){ + int i; + if( NEVER(iFrom==iTo) ) return; + for(i=0; ipVdbe, OP_Copy, iFrom+i, iTo+i); + } +} + +/* +** Return true if any register in the range iFrom..iTo (inclusive) +** is used as part of the column cache. +*/ +static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; iiReg; + if( r>=iFrom && r<=iTo ) return 1; + } + return 0; +} + +/* +** If the last instruction coded is an ephemeral copy of any of +** the registers in the nReg registers beginning with iReg, then +** convert the last instruction from OP_SCopy to OP_Copy. +*/ +void sqlite3ExprHardCopy(Parse *pParse, int iReg, int nReg){ + VdbeOp *pOp; + Vdbe *v; + + assert( pParse->db->mallocFailed==0 ); + v = pParse->pVdbe; + assert( v!=0 ); + pOp = sqlite3VdbeGetOp(v, -1); + assert( pOp!=0 ); + if( pOp->opcode==OP_SCopy && pOp->p1>=iReg && pOp->p1opcode = OP_Copy; + } +} + +/* +** Generate code to store the value of the iAlias-th alias in register +** target. The first time this is called, pExpr is evaluated to compute +** the value of the alias. The value is stored in an auxiliary register +** and the number of that register is returned. On subsequent calls, +** the register number is returned without generating any code. +** +** Note that in order for this to work, code must be generated in the +** same order that it is executed. +** +** Aliases are numbered starting with 1. So iAlias is in the range +** of 1 to pParse->nAlias inclusive. +** +** pParse->aAlias[iAlias-1] records the register number where the value +** of the iAlias-th alias is stored. If zero, that means that the +** alias has not yet been computed. +*/ +static int codeAlias(Parse *pParse, int iAlias, Expr *pExpr, int target){ +#if 0 + sqlite3 *db = pParse->db; + int iReg; + if( pParse->nAliasAllocnAlias ){ + pParse->aAlias = sqlite3DbReallocOrFree(db, pParse->aAlias, + sizeof(pParse->aAlias[0])*pParse->nAlias ); + testcase( db->mallocFailed && pParse->nAliasAlloc>0 ); + if( db->mallocFailed ) return 0; + memset(&pParse->aAlias[pParse->nAliasAlloc], 0, + (pParse->nAlias-pParse->nAliasAlloc)*sizeof(pParse->aAlias[0])); + pParse->nAliasAlloc = pParse->nAlias; + } + assert( iAlias>0 && iAlias<=pParse->nAlias ); + iReg = pParse->aAlias[iAlias-1]; + if( iReg==0 ){ + if( pParse->iCacheLevel>0 ){ + iReg = sqlite3ExprCodeTarget(pParse, pExpr, target); + }else{ + iReg = ++pParse->nMem; + sqlite3ExprCode(pParse, pExpr, iReg); + pParse->aAlias[iAlias-1] = iReg; + } + } + return iReg; +#else + UNUSED_PARAMETER(iAlias); + return sqlite3ExprCodeTarget(pParse, pExpr, target); +#endif } /* ** Generate code into the current Vdbe to evaluate the given -** expression and leave the result on the top of stack. +** expression. Attempt to store the results in register "target". +** Return the register where results are stored. ** -** This code depends on the fact that certain token values (ex: TK_EQ) -** are the same as opcode values (ex: OP_Eq) that implement the corresponding -** operation. Special comments in vdbe.c and the mkopcodeh.awk script in -** the make process cause these values to align. Assert()s in the code -** below verify that the numbers are aligned correctly. -*/ -void sqlite3ExprCode(Parse *pParse, Expr *pExpr){ - Vdbe *v = pParse->pVdbe; - int op; - int stackChng = 1; /* Amount of change to stack depth */ +** With this routine, there is no guarantee that results will +** be stored in target. The result might be stored in some other +** register if it is convenient to do so. The calling function +** must check the return code and move the results to the desired +** register. +*/ +int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){ + Vdbe *v = pParse->pVdbe; /* The VM under construction */ + int op; /* The opcode being coded */ + int inReg = target; /* Results stored in register inReg */ + int regFree1 = 0; /* If non-zero free this temporary register */ + int regFree2 = 0; /* If non-zero free this temporary register */ + int r1, r2, r3, r4; /* Various register numbers */ + sqlite3 *db = pParse->db; /* The database connection */ + + assert( target>0 && target<=pParse->nMem ); + if( v==0 ){ + assert( pParse->db->mallocFailed ); + return 0; + } - if( v==0 ) return; if( pExpr==0 ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); - return; + op = TK_NULL; + }else{ + op = pExpr->op; } - op = pExpr->op; switch( op ){ case TK_AGG_COLUMN: { AggInfo *pAggInfo = pExpr->pAggInfo; struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg]; if( !pAggInfo->directMode ){ - sqlite3VdbeAddOp(v, OP_MemLoad, pCol->iMem, 0); + assert( pCol->iMem>0 ); + inReg = pCol->iMem; break; }else if( pAggInfo->useSortingIdx ){ - sqlite3VdbeAddOp(v, OP_Column, pAggInfo->sortingIdx, - pCol->iSorterColumn); + sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdx, + pCol->iSorterColumn, target); break; } /* Otherwise, fall thru into the TK_COLUMN case */ @@ -1736,69 +2126,108 @@ case TK_COLUMN: { if( pExpr->iTable<0 ){ /* This only happens when coding check constraints */ - assert( pParse->ckOffset>0 ); - sqlite3VdbeAddOp(v, OP_Dup, pParse->ckOffset-pExpr->iColumn-1, 1); + assert( pParse->ckBase>0 ); + inReg = pExpr->iColumn + pParse->ckBase; }else{ - sqlite3ExprCodeGetColumn(v, pExpr->pTab, pExpr->iColumn, pExpr->iTable); + testcase( (pExpr->flags & EP_AnyAff)!=0 ); + inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, + pExpr->iColumn, pExpr->iTable, target, + pExpr->flags & EP_AnyAff); } break; } case TK_INTEGER: { - codeInteger(v, (char*)pExpr->token.z, pExpr->token.n); + codeInteger(v, pExpr, 0, target); + break; + } + case TK_FLOAT: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + codeReal(v, pExpr->u.zToken, 0, target); break; } - case TK_FLOAT: case TK_STRING: { - assert( TK_FLOAT==OP_Real ); - assert( TK_STRING==OP_String8 ); - sqlite3DequoteExpr(pExpr); - sqlite3VdbeOp3(v, op, 0, 0, (char*)pExpr->token.z, pExpr->token.n); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3VdbeAddOp4(v, OP_String8, 0, target, 0, pExpr->u.zToken, 0); break; } case TK_NULL: { - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, target); break; } #ifndef SQLITE_OMIT_BLOB_LITERAL case TK_BLOB: { int n; const char *z; - assert( TK_BLOB==OP_HexBlob ); - n = pExpr->token.n - 3; - z = (char*)pExpr->token.z + 2; - assert( n>=0 ); - if( n==0 ){ - z = ""; - } - sqlite3VdbeOp3(v, op, 0, 0, z, n); + char *zBlob; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); + assert( pExpr->u.zToken[1]=='\'' ); + z = &pExpr->u.zToken[2]; + n = sqlite3Strlen30(z) - 1; + assert( z[n]=='\'' ); + zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n); + sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC); break; } #endif case TK_VARIABLE: { - sqlite3VdbeAddOp(v, OP_Variable, pExpr->iTable, 0); - if( pExpr->token.n>1 ){ - sqlite3VdbeChangeP3(v, -1, (char*)pExpr->token.z, pExpr->token.n); + VdbeOp *pOp; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( pExpr->u.zToken!=0 ); + assert( pExpr->u.zToken[0]!=0 ); + if( pExpr->u.zToken[1]==0 + && (pOp = sqlite3VdbeGetOp(v, -1))->opcode==OP_Variable + && pOp->p1+pOp->p3==pExpr->iTable + && pOp->p2+pOp->p3==target + && pOp->p4.z==0 + ){ + /* If the previous instruction was a copy of the previous unnamed + ** parameter into the previous register, then simply increment the + ** repeat count on the prior instruction rather than making a new + ** instruction. + */ + pOp->p3++; + }else{ + sqlite3VdbeAddOp3(v, OP_Variable, pExpr->iTable, target, 1); + if( pExpr->u.zToken[1]!=0 ){ + sqlite3VdbeChangeP4(v, -1, pExpr->u.zToken, 0); + } } break; } case TK_REGISTER: { - sqlite3VdbeAddOp(v, OP_MemLoad, pExpr->iTable, 0); + inReg = pExpr->iTable; + break; + } + case TK_AS: { + inReg = codeAlias(pParse, pExpr->iTable, pExpr->pLeft, target); break; } #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ int aff, to_op; - sqlite3ExprCode(pParse, pExpr->pLeft); - aff = sqlite3AffinityType(&pExpr->token); + inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + aff = sqlite3AffinityType(pExpr->u.zToken); to_op = aff - SQLITE_AFF_TEXT + OP_ToText; assert( to_op==OP_ToText || aff!=SQLITE_AFF_TEXT ); assert( to_op==OP_ToBlob || aff!=SQLITE_AFF_NONE ); assert( to_op==OP_ToNumeric || aff!=SQLITE_AFF_NUMERIC ); assert( to_op==OP_ToInt || aff!=SQLITE_AFF_INTEGER ); assert( to_op==OP_ToReal || aff!=SQLITE_AFF_REAL ); - sqlite3VdbeAddOp(v, to_op, 0, 0); - stackChng = 0; + testcase( to_op==OP_ToText ); + testcase( to_op==OP_ToBlob ); + testcase( to_op==OP_ToNumeric ); + testcase( to_op==OP_ToInt ); + testcase( to_op==OP_ToReal ); + if( inReg!=target ){ + sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); + inReg = target; + } + sqlite3VdbeAddOp1(v, to_op, inReg); + testcase( usedAsColumnCache(pParse, inReg, inReg) ); + sqlite3ExprCacheAffinityChange(pParse, inReg, 1); break; } #endif /* SQLITE_OMIT_CAST */ @@ -1814,10 +2243,18 @@ assert( TK_GE==OP_Ge ); assert( TK_EQ==OP_Eq ); assert( TK_NE==OP_Ne ); - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3ExprCode(pParse, pExpr->pRight); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, 0, 0); - stackChng = -1; + testcase( op==TK_LT ); + testcase( op==TK_LE ); + testcase( op==TK_GT ); + testcase( op==TK_GE ); + testcase( op==TK_EQ ); + testcase( op==TK_NE ); + codeCompareOperands(pParse, pExpr->pLeft, &r1, ®Free1, + pExpr->pRight, &r2, ®Free2); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, + r1, r2, inReg, SQLITE_STOREP2); + testcase( regFree1==0 ); + testcase( regFree2==0 ); break; } case TK_AND: @@ -1843,76 +2280,111 @@ assert( TK_LSHIFT==OP_ShiftLeft ); assert( TK_RSHIFT==OP_ShiftRight ); assert( TK_CONCAT==OP_Concat ); - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3ExprCode(pParse, pExpr->pRight); - sqlite3VdbeAddOp(v, op, 0, 0); - stackChng = -1; + testcase( op==TK_AND ); + testcase( op==TK_OR ); + testcase( op==TK_PLUS ); + testcase( op==TK_MINUS ); + testcase( op==TK_REM ); + testcase( op==TK_BITAND ); + testcase( op==TK_BITOR ); + testcase( op==TK_SLASH ); + testcase( op==TK_LSHIFT ); + testcase( op==TK_RSHIFT ); + testcase( op==TK_CONCAT ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + sqlite3VdbeAddOp3(v, op, r2, r1, target); + testcase( regFree1==0 ); + testcase( regFree2==0 ); break; } case TK_UMINUS: { Expr *pLeft = pExpr->pLeft; assert( pLeft ); - if( pLeft->op==TK_FLOAT || pLeft->op==TK_INTEGER ){ - Token *p = &pLeft->token; - char *z = sqlite3MPrintf("-%.*s", p->n, p->z); - if( pLeft->op==TK_FLOAT ){ - sqlite3VdbeOp3(v, OP_Real, 0, 0, z, p->n+1); - }else{ - codeInteger(v, z, p->n+1); - } - sqliteFree(z); - break; + if( pLeft->op==TK_FLOAT ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + codeReal(v, pLeft->u.zToken, 1, target); + }else if( pLeft->op==TK_INTEGER ){ + codeInteger(v, pLeft, 1, target); + }else{ + regFree1 = r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Integer, 0, r1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free2); + sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target); + testcase( regFree2==0 ); } - /* Fall through into TK_NOT */ + inReg = target; + break; } case TK_BITNOT: case TK_NOT: { assert( TK_BITNOT==OP_BitNot ); assert( TK_NOT==OP_Not ); - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3VdbeAddOp(v, op, 0, 0); - stackChng = 0; + testcase( op==TK_BITNOT ); + testcase( op==TK_NOT ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + testcase( regFree1==0 ); + inReg = target; + sqlite3VdbeAddOp2(v, op, r1, inReg); break; } case TK_ISNULL: case TK_NOTNULL: { - int dest; + int addr; assert( TK_ISNULL==OP_IsNull ); assert( TK_NOTNULL==OP_NotNull ); - sqlite3VdbeAddOp(v, OP_Integer, 1, 0); - sqlite3ExprCode(pParse, pExpr->pLeft); - dest = sqlite3VdbeCurrentAddr(v) + 2; - sqlite3VdbeAddOp(v, op, 1, dest); - sqlite3VdbeAddOp(v, OP_AddImm, -1, 0); - stackChng = 0; + testcase( op==TK_ISNULL ); + testcase( op==TK_NOTNULL ); + sqlite3VdbeAddOp2(v, OP_Integer, 1, target); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + testcase( regFree1==0 ); + addr = sqlite3VdbeAddOp1(v, op, r1); + sqlite3VdbeAddOp2(v, OP_AddImm, target, -1); + sqlite3VdbeJumpHere(v, addr); break; } case TK_AGG_FUNCTION: { AggInfo *pInfo = pExpr->pAggInfo; if( pInfo==0 ){ - sqlite3ErrorMsg(pParse, "misuse of aggregate: %T", - &pExpr->span); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken); }else{ - sqlite3VdbeAddOp(v, OP_MemLoad, pInfo->aFunc[pExpr->iAgg].iMem, 0); + inReg = pInfo->aFunc[pExpr->iAgg].iMem; } break; } case TK_CONST_FUNC: case TK_FUNCTION: { - ExprList *pList = pExpr->pList; - int nExpr = pList ? pList->nExpr : 0; - FuncDef *pDef; - int nId; - const char *zId; - int constMask = 0; - int i; - u8 enc = ENC(pParse->db); - CollSeq *pColl = 0; - zId = (char*)pExpr->token.z; - nId = pExpr->token.n; - pDef = sqlite3FindFunction(pParse->db, zId, nId, nExpr, enc, 0); + ExprList *pFarg; /* List of function arguments */ + int nFarg; /* Number of function arguments */ + FuncDef *pDef; /* The function definition object */ + int nId; /* Length of the function name in bytes */ + const char *zId; /* The function name */ + int constMask = 0; /* Mask of function arguments that are constant */ + int i; /* Loop counter */ + u8 enc = ENC(db); /* The text encoding used by this database */ + CollSeq *pColl = 0; /* A collating sequence */ + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + testcase( op==TK_CONST_FUNC ); + testcase( op==TK_FUNCTION ); + if( ExprHasAnyProperty(pExpr, EP_TokenOnly) ){ + pFarg = 0; + }else{ + pFarg = pExpr->x.pList; + } + nFarg = pFarg ? pFarg->nExpr : 0; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + zId = pExpr->u.zToken; + nId = sqlite3Strlen30(zId); + pDef = sqlite3FindFunction(db, zId, nId, nFarg, enc, 0); assert( pDef!=0 ); - nExpr = sqlite3ExprCodeExprList(pParse, pList); + if( pFarg ){ + r1 = sqlite3GetTempRange(pParse, nFarg); + sqlite3ExprCodeExprList(pParse, pFarg, r1, 1); + }else{ + r1 = 0; + } #ifndef SQLITE_OMIT_VIRTUALTABLE /* Possibly overload the function if the first argument is ** a virtual table column. @@ -1926,131 +2398,252 @@ ** "glob(B,A). We want to use the A in "A glob B" to test ** for function overloading. But we use the B term in "glob(B,A)". */ - if( nExpr>=2 && (pExpr->flags & EP_InfixFunc) ){ - pDef = sqlite3VtabOverloadFunction(pDef, nExpr, pList->a[1].pExpr); - }else if( nExpr>0 ){ - pDef = sqlite3VtabOverloadFunction(pDef, nExpr, pList->a[0].pExpr); + if( nFarg>=2 && (pExpr->flags & EP_InfixFunc) ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr); + }else if( nFarg>0 ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr); } #endif - for(i=0; ia[i].pExpr) ){ + for(i=0; ia[i].pExpr) ){ constMask |= (1<needCollSeq && !pColl ){ - pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr); + if( (pDef->flags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){ + pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr); } } - if( pDef->needCollSeq ){ - if( !pColl ) pColl = pParse->db->pDfltColl; - sqlite3VdbeOp3(v, OP_CollSeq, 0, 0, (char *)pColl, P3_COLLSEQ); + if( pDef->flags & SQLITE_FUNC_NEEDCOLL ){ + if( !pColl ) pColl = db->pDfltColl; + sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); + } + sqlite3VdbeAddOp4(v, OP_Function, constMask, r1, target, + (char*)pDef, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nFarg); + if( nFarg ){ + sqlite3ReleaseTempRange(pParse, r1, nFarg); } - sqlite3VdbeOp3(v, OP_Function, constMask, nExpr, (char*)pDef, P3_FUNCDEF); - stackChng = 1-nExpr; + sqlite3ExprCacheAffinityChange(pParse, r1, nFarg); break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_EXISTS: case TK_SELECT: { - if( pExpr->iColumn==0 ){ - sqlite3CodeSubselect(pParse, pExpr); - } - sqlite3VdbeAddOp(v, OP_MemLoad, pExpr->iColumn, 0); - VdbeComment((v, "# load subquery result")); + testcase( op==TK_EXISTS ); + testcase( op==TK_SELECT ); + sqlite3CodeSubselect(pParse, pExpr, 0, 0); + inReg = pExpr->iColumn; break; } case TK_IN: { - int addr; + int rNotFound = 0; + int rMayHaveNull = 0; + int j2, j3, j4, j5; char affinity; - int ckOffset = pParse->ckOffset; - sqlite3CodeSubselect(pParse, pExpr); + int eType; + + VdbeNoopComment((v, "begin IN expr r%d", target)); + eType = sqlite3FindInIndex(pParse, pExpr, &rMayHaveNull); + if( rMayHaveNull ){ + rNotFound = ++pParse->nMem; + } /* Figure out the affinity to use to create a key from the results ** of the expression. affinityStr stores a static string suitable for - ** P3 of OP_MakeRecord. + ** P4 of OP_MakeRecord. */ affinity = comparisonAffinity(pExpr); - sqlite3VdbeAddOp(v, OP_Integer, 1, 0); - pParse->ckOffset = (ckOffset ? (ckOffset+1) : 0); /* Code the from " IN (...)". The temporary table ** pExpr->iTable contains the values that make up the (...) set. */ - sqlite3ExprCode(pParse, pExpr->pLeft); - addr = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_NotNull, -1, addr+4); /* addr + 0 */ - sqlite3VdbeAddOp(v, OP_Pop, 2, 0); - sqlite3VdbeAddOp(v, OP_Null, 0, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, addr+7); - sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &affinity, 1); /* addr + 4 */ - sqlite3VdbeAddOp(v, OP_Found, pExpr->iTable, addr+7); - sqlite3VdbeAddOp(v, OP_AddImm, -1, 0); /* addr + 6 */ + sqlite3ExprCachePush(pParse); + sqlite3ExprCode(pParse, pExpr->pLeft, target); + j2 = sqlite3VdbeAddOp1(v, OP_IsNull, target); + if( eType==IN_INDEX_ROWID ){ + j3 = sqlite3VdbeAddOp1(v, OP_MustBeInt, target); + j4 = sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, 0, target); + sqlite3VdbeAddOp2(v, OP_Integer, 1, target); + j5 = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeJumpHere(v, j3); + sqlite3VdbeJumpHere(v, j4); + sqlite3VdbeAddOp2(v, OP_Integer, 0, target); + }else{ + r2 = regFree2 = sqlite3GetTempReg(pParse); + /* Create a record and test for set membership. If the set contains + ** the value, then jump to the end of the test code. The target + ** register still contains the true (1) value written to it earlier. + */ + sqlite3VdbeAddOp4(v, OP_MakeRecord, target, 1, r2, &affinity, 1); + sqlite3VdbeAddOp2(v, OP_Integer, 1, target); + j5 = sqlite3VdbeAddOp3(v, OP_Found, pExpr->iTable, 0, r2); + + /* If the set membership test fails, then the result of the + ** "x IN (...)" expression must be either 0 or NULL. If the set + ** contains no NULL values, then the result is 0. If the set + ** contains one or more NULL values, then the result of the + ** expression is also NULL. + */ + if( rNotFound==0 ){ + /* This branch runs if it is known at compile time (now) that + ** the set contains no NULL values. This happens as the result + ** of a "NOT NULL" constraint in the database schema. No need + ** to test the data structure at runtime in this case. + */ + sqlite3VdbeAddOp2(v, OP_Integer, 0, target); + }else{ + /* This block populates the rNotFound register with either NULL + ** or 0 (an integer value). If the data structure contains one + ** or more NULLs, then set rNotFound to NULL. Otherwise, set it + ** to 0. If register rMayHaveNull is already set to some value + ** other than NULL, then the test has already been run and + ** rNotFound is already populated. + */ + static const char nullRecord[] = { 0x02, 0x00 }; + j3 = sqlite3VdbeAddOp1(v, OP_NotNull, rMayHaveNull); + sqlite3VdbeAddOp2(v, OP_Null, 0, rNotFound); + sqlite3VdbeAddOp4(v, OP_Blob, 2, rMayHaveNull, 0, + nullRecord, P4_STATIC); + j4 = sqlite3VdbeAddOp3(v, OP_Found, pExpr->iTable, 0, rMayHaveNull); + sqlite3VdbeAddOp2(v, OP_Integer, 0, rNotFound); + sqlite3VdbeJumpHere(v, j4); + sqlite3VdbeJumpHere(v, j3); + + /* Copy the value of register rNotFound (which is either NULL or 0) + ** into the target register. This will be the result of the + ** expression. + */ + sqlite3VdbeAddOp2(v, OP_Copy, rNotFound, target); + } + } + sqlite3VdbeJumpHere(v, j2); + sqlite3VdbeJumpHere(v, j5); + sqlite3ExprCachePop(pParse, 1); + VdbeComment((v, "end IN expr r%d", target)); break; } #endif + /* + ** x BETWEEN y AND z + ** + ** This is equivalent to + ** + ** x>=y AND x<=z + ** + ** X is stored in pExpr->pLeft. + ** Y is stored in pExpr->pList->a[0].pExpr. + ** Z is stored in pExpr->pList->a[1].pExpr. + */ case TK_BETWEEN: { Expr *pLeft = pExpr->pLeft; - struct ExprList_item *pLItem = pExpr->pList->a; + struct ExprList_item *pLItem = pExpr->x.pList->a; Expr *pRight = pLItem->pExpr; - sqlite3ExprCode(pParse, pLeft); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3ExprCode(pParse, pRight); - codeCompare(pParse, pLeft, pRight, OP_Ge, 0, 0); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); + + codeCompareOperands(pParse, pLeft, &r1, ®Free1, + pRight, &r2, ®Free2); + testcase( regFree1==0 ); + testcase( regFree2==0 ); + r3 = sqlite3GetTempReg(pParse); + r4 = sqlite3GetTempReg(pParse); + codeCompare(pParse, pLeft, pRight, OP_Ge, + r1, r2, r3, SQLITE_STOREP2); pLItem++; pRight = pLItem->pExpr; - sqlite3ExprCode(pParse, pRight); - codeCompare(pParse, pLeft, pRight, OP_Le, 0, 0); - sqlite3VdbeAddOp(v, OP_And, 0, 0); + sqlite3ReleaseTempReg(pParse, regFree2); + r2 = sqlite3ExprCodeTemp(pParse, pRight, ®Free2); + testcase( regFree2==0 ); + codeCompare(pParse, pLeft, pRight, OP_Le, r1, r2, r4, SQLITE_STOREP2); + sqlite3VdbeAddOp3(v, OP_And, r3, r4, target); + sqlite3ReleaseTempReg(pParse, r3); + sqlite3ReleaseTempReg(pParse, r4); break; } case TK_UPLUS: { - sqlite3ExprCode(pParse, pExpr->pLeft); - stackChng = 0; + inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); break; } - case TK_CASE: { - int expr_end_label; - int jumpInst; - int nExpr; - int i; - ExprList *pEList; - struct ExprList_item *aListelem; - - assert(pExpr->pList); - assert((pExpr->pList->nExpr % 2) == 0); - assert(pExpr->pList->nExpr > 0); - pEList = pExpr->pList; + + /* + ** Form A: + ** CASE x WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END + ** + ** Form B: + ** CASE WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END + ** + ** Form A is can be transformed into the equivalent form B as follows: + ** CASE WHEN x=e1 THEN r1 WHEN x=e2 THEN r2 ... + ** WHEN x=eN THEN rN ELSE y END + ** + ** X (if it exists) is in pExpr->pLeft. + ** Y is in pExpr->pRight. The Y is also optional. If there is no + ** ELSE clause and no other term matches, then the result of the + ** exprssion is NULL. + ** Ei is in pExpr->pList->a[i*2] and Ri is pExpr->pList->a[i*2+1]. + ** + ** The result of the expression is the Ri for the first matching Ei, + ** or if there is no matching Ei, the ELSE term Y, or if there is + ** no ELSE term, NULL. + */ + default: assert( op==TK_CASE ); { + int endLabel; /* GOTO label for end of CASE stmt */ + int nextCase; /* GOTO label for next WHEN clause */ + int nExpr; /* 2x number of WHEN terms */ + int i; /* Loop counter */ + ExprList *pEList; /* List of WHEN terms */ + struct ExprList_item *aListelem; /* Array of WHEN terms */ + Expr opCompare; /* The X==Ei expression */ + Expr cacheX; /* Cached expression X */ + Expr *pX; /* The X expression */ + Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */ + VVA_ONLY( int iCacheLevel = pParse->iCacheLevel; ) + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList ); + assert((pExpr->x.pList->nExpr % 2) == 0); + assert(pExpr->x.pList->nExpr > 0); + pEList = pExpr->x.pList; aListelem = pEList->a; nExpr = pEList->nExpr; - expr_end_label = sqlite3VdbeMakeLabel(v); - if( pExpr->pLeft ){ - sqlite3ExprCode(pParse, pExpr->pLeft); + endLabel = sqlite3VdbeMakeLabel(v); + if( (pX = pExpr->pLeft)!=0 ){ + cacheX = *pX; + testcase( pX->op==TK_COLUMN ); + testcase( pX->op==TK_REGISTER ); + cacheX.iTable = sqlite3ExprCodeTemp(pParse, pX, ®Free1); + testcase( regFree1==0 ); + cacheX.op = TK_REGISTER; + opCompare.op = TK_EQ; + opCompare.pLeft = &cacheX; + pTest = &opCompare; } for(i=0; ipLeft ){ - sqlite3VdbeAddOp(v, OP_Dup, 1, 1); - jumpInst = codeCompare(pParse, pExpr->pLeft, aListelem[i].pExpr, - OP_Ne, 0, 1); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3ExprCachePush(pParse); + if( pX ){ + assert( pTest!=0 ); + opCompare.pRight = aListelem[i].pExpr; }else{ - jumpInst = sqlite3VdbeAddOp(v, OP_IfNot, 1, 0); + pTest = aListelem[i].pExpr; } - sqlite3ExprCode(pParse, aListelem[i+1].pExpr); - sqlite3VdbeAddOp(v, OP_Goto, 0, expr_end_label); - sqlite3VdbeJumpHere(v, jumpInst); - } - if( pExpr->pLeft ){ - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + nextCase = sqlite3VdbeMakeLabel(v); + testcase( pTest->op==TK_COLUMN ); + sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL); + testcase( aListelem[i+1].pExpr->op==TK_COLUMN ); + testcase( aListelem[i+1].pExpr->op==TK_REGISTER ); + sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target); + sqlite3VdbeAddOp2(v, OP_Goto, 0, endLabel); + sqlite3ExprCachePop(pParse, 1); + sqlite3VdbeResolveLabel(v, nextCase); } if( pExpr->pRight ){ - sqlite3ExprCode(pParse, pExpr->pRight); + sqlite3ExprCachePush(pParse); + sqlite3ExprCode(pParse, pExpr->pRight, target); + sqlite3ExprCachePop(pParse, 1); }else{ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, target); } - sqlite3VdbeResolveLabel(v, expr_end_label); + assert( db->mallocFailed || pParse->nErr>0 + || pParse->iCacheLevel==iCacheLevel ); + sqlite3VdbeResolveLabel(v, endLabel); break; } #ifndef SQLITE_OMIT_TRIGGER @@ -2058,76 +2651,247 @@ if( !pParse->trigStack ){ sqlite3ErrorMsg(pParse, "RAISE() may only be used within a trigger-program"); - return; + return 0; } - if( pExpr->iColumn!=OE_Ignore ){ - assert( pExpr->iColumn==OE_Rollback || - pExpr->iColumn == OE_Abort || - pExpr->iColumn == OE_Fail ); - sqlite3DequoteExpr(pExpr); - sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, pExpr->iColumn, - (char*)pExpr->token.z, pExpr->token.n); + if( pExpr->affinity!=OE_Ignore ){ + assert( pExpr->affinity==OE_Rollback || + pExpr->affinity == OE_Abort || + pExpr->affinity == OE_Fail ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CONSTRAINT, pExpr->affinity, 0, + pExpr->u.zToken, 0); } else { - assert( pExpr->iColumn == OE_Ignore ); - sqlite3VdbeAddOp(v, OP_ContextPop, 0, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, pParse->trigStack->ignoreJump); - VdbeComment((v, "# raise(IGNORE)")); + assert( pExpr->affinity == OE_Ignore ); + sqlite3VdbeAddOp2(v, OP_ContextPop, 0, 0); + sqlite3VdbeAddOp2(v, OP_Goto, 0, pParse->trigStack->ignoreJump); + VdbeComment((v, "raise(IGNORE)")); } - stackChng = 0; break; } #endif } + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); + return inReg; +} - if( pParse->ckOffset ){ - pParse->ckOffset += stackChng; - assert( pParse->ckOffset ); +/* +** Generate code to evaluate an expression and store the results +** into a register. Return the register number where the results +** are stored. +** +** If the register is a temporary register that can be deallocated, +** then write its number into *pReg. If the result register is not +** a temporary, then set *pReg to zero. +*/ +int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ + int r1 = sqlite3GetTempReg(pParse); + int r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); + if( r2==r1 ){ + *pReg = r1; + }else{ + sqlite3ReleaseTempReg(pParse, r1); + *pReg = 0; } + return r2; } -#ifndef SQLITE_OMIT_TRIGGER /* -** Generate code that evalutes the given expression and leaves the result -** on the stack. See also sqlite3ExprCode(). +** Generate code that will evaluate expression pExpr and store the +** results in register target. The results are guaranteed to appear +** in register target. +*/ +int sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ + int inReg; + + assert( target>0 && target<=pParse->nMem ); + inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); + assert( pParse->pVdbe || pParse->db->mallocFailed ); + if( inReg!=target && pParse->pVdbe ){ + sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, inReg, target); + } + return target; +} + +/* +** Generate code that evalutes the given expression and puts the result +** in register target. ** -** This routine might also cache the result and modify the pExpr tree -** so that it will make use of the cached result on subsequent evaluations -** rather than evaluate the whole expression again. Trivial expressions are -** not cached. If the expression is cached, its result is stored in a -** memory location. +** Also make a copy of the expression results into another "cache" register +** and modify the expression so that the next time it is evaluated, +** the result is a copy of the cache register. +** +** This routine is used for expressions that are used multiple +** times. They are evaluated once and the results of the expression +** are reused. */ -void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr){ +int sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){ Vdbe *v = pParse->pVdbe; - int iMem; - int addr1, addr2; - if( v==0 ) return; - addr1 = sqlite3VdbeCurrentAddr(v); - sqlite3ExprCode(pParse, pExpr); - addr2 = sqlite3VdbeCurrentAddr(v); - if( addr2>addr1+1 || sqlite3VdbeGetOp(v, addr1)->opcode==OP_Function ){ - iMem = pExpr->iTable = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemStore, iMem, 0); + int inReg; + inReg = sqlite3ExprCode(pParse, pExpr, target); + assert( target>0 ); + /* This routine is called for terms to INSERT or UPDATE. And the only + ** other place where expressions can be converted into TK_REGISTER is + ** in WHERE clause processing. So as currently implemented, there is + ** no way for a TK_REGISTER to exist here. But it seems prudent to + ** keep the ALWAYS() in case the conditions above change with future + ** modifications or enhancements. */ + if( ALWAYS(pExpr->op!=TK_REGISTER) ){ + int iMem; + iMem = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Copy, inReg, iMem); + pExpr->iTable = iMem; pExpr->op = TK_REGISTER; } + return inReg; } + +/* +** Return TRUE if pExpr is an constant expression that is appropriate +** for factoring out of a loop. Appropriate expressions are: +** +** * Any expression that evaluates to two or more opcodes. +** +** * Any OP_Integer, OP_Real, OP_String, OP_Blob, OP_Null, +** or OP_Variable that does not need to be placed in a +** specific register. +** +** There is no point in factoring out single-instruction constant +** expressions that need to be placed in a particular register. +** We could factor them out, but then we would end up adding an +** OP_SCopy instruction to move the value into the correct register +** later. We might as well just use the original instruction and +** avoid the OP_SCopy. +*/ +static int isAppropriateForFactoring(Expr *p){ + if( !sqlite3ExprIsConstantNotJoin(p) ){ + return 0; /* Only constant expressions are appropriate for factoring */ + } + if( (p->flags & EP_FixedDest)==0 ){ + return 1; /* Any constant without a fixed destination is appropriate */ + } + while( p->op==TK_UPLUS ) p = p->pLeft; + switch( p->op ){ +#ifndef SQLITE_OMIT_BLOB_LITERAL + case TK_BLOB: #endif + case TK_VARIABLE: + case TK_INTEGER: + case TK_FLOAT: + case TK_NULL: + case TK_STRING: { + testcase( p->op==TK_BLOB ); + testcase( p->op==TK_VARIABLE ); + testcase( p->op==TK_INTEGER ); + testcase( p->op==TK_FLOAT ); + testcase( p->op==TK_NULL ); + testcase( p->op==TK_STRING ); + /* Single-instruction constants with a fixed destination are + ** better done in-line. If we factor them, they will just end + ** up generating an OP_SCopy to move the value to the destination + ** register. */ + return 0; + } + case TK_UMINUS: { + if( p->pLeft->op==TK_FLOAT || p->pLeft->op==TK_INTEGER ){ + return 0; + } + break; + } + default: { + break; + } + } + return 1; +} + +/* +** If pExpr is a constant expression that is appropriate for +** factoring out of a loop, then evaluate the expression +** into a register and convert the expression into a TK_REGISTER +** expression. +*/ +static int evalConstExpr(Walker *pWalker, Expr *pExpr){ + Parse *pParse = pWalker->pParse; + switch( pExpr->op ){ + case TK_REGISTER: { + return WRC_Prune; + } + case TK_FUNCTION: + case TK_AGG_FUNCTION: + case TK_CONST_FUNC: { + /* The arguments to a function have a fixed destination. + ** Mark them this way to avoid generated unneeded OP_SCopy + ** instructions. + */ + ExprList *pList = pExpr->x.pList; + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + if( pList ){ + int i = pList->nExpr; + struct ExprList_item *pItem = pList->a; + for(; i>0; i--, pItem++){ + if( ALWAYS(pItem->pExpr) ) pItem->pExpr->flags |= EP_FixedDest; + } + } + break; + } + } + if( isAppropriateForFactoring(pExpr) ){ + int r1 = ++pParse->nMem; + int r2; + r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); + if( NEVER(r1!=r2) ) sqlite3ReleaseTempReg(pParse, r1); + pExpr->op = TK_REGISTER; + pExpr->iTable = r2; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Preevaluate constant subexpressions within pExpr and store the +** results in registers. Modify pExpr so that the constant subexpresions +** are TK_REGISTER opcodes that refer to the precomputed values. +*/ +void sqlite3ExprCodeConstants(Parse *pParse, Expr *pExpr){ + Walker w; + w.xExprCallback = evalConstExpr; + w.xSelectCallback = 0; + w.pParse = pParse; + sqlite3WalkExpr(&w, pExpr); +} + /* ** Generate code that pushes the value of every element of the given -** expression list onto the stack. +** expression list into a sequence of registers beginning at target. ** -** Return the number of elements pushed onto the stack. +** Return the number of elements evaluated. */ int sqlite3ExprCodeExprList( Parse *pParse, /* Parsing context */ - ExprList *pList /* The expression list to be coded */ + ExprList *pList, /* The expression list to be coded */ + int target, /* Where to write results */ + int doHardCopy /* Make a hard copy of every element */ ){ struct ExprList_item *pItem; int i, n; - if( pList==0 ) return 0; + assert( pList!=0 ); + assert( target>0 ); n = pList->nExpr; - for(pItem=pList->a, i=n; i>0; i--, pItem++){ - sqlite3ExprCode(pParse, pItem->pExpr); + for(pItem=pList->a, i=0; iiAlias ){ + int iReg = codeAlias(pParse, pItem->iAlias, pItem->pExpr, target+i); + Vdbe *v = sqlite3GetVdbe(pParse); + if( iReg!=target+i ){ + sqlite3VdbeAddOp2(v, OP_SCopy, iReg, target+i); + } + }else{ + sqlite3ExprCode(pParse, pItem->pExpr, target+i); + } + if( doHardCopy && !pParse->db->mallocFailed ){ + sqlite3ExprHardCopy(pParse, target, n); + } } return n; } @@ -2138,7 +2902,7 @@ ** continues straight thru if the expression is false. ** ** If the expression evaluates to NULL (neither true nor false), then -** take the jump if the jumpIfNull flag is true. +** take the jump if the jumpIfNull flag is SQLITE_JUMPIFNULL. ** ** This code depends on the fact that certain token values (ex: TK_EQ) ** are the same as opcode values (ex: OP_Eq) that implement the corresponding @@ -2149,23 +2913,33 @@ void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ Vdbe *v = pParse->pVdbe; int op = 0; - int ckOffset = pParse->ckOffset; - if( v==0 || pExpr==0 ) return; + int regFree1 = 0; + int regFree2 = 0; + int r1, r2; + + assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); + if( NEVER(v==0) ) return; /* Existance of VDBE checked by caller */ + if( NEVER(pExpr==0) ) return; /* No way this can happen */ op = pExpr->op; switch( op ){ case TK_AND: { int d2 = sqlite3VdbeMakeLabel(v); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2, !jumpIfNull); + testcase( jumpIfNull==0 ); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL); sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3VdbeResolveLabel(v, d2); + sqlite3ExprCachePop(pParse, 1); break; } case TK_OR: { + testcase( jumpIfNull==0 ); sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); break; } case TK_NOT: { + testcase( jumpIfNull==0 ); sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); break; } @@ -2181,50 +2955,75 @@ assert( TK_GE==OP_Ge ); assert( TK_EQ==OP_Eq ); assert( TK_NE==OP_Ne ); - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3ExprCode(pParse, pExpr->pRight); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, dest, jumpIfNull); + testcase( op==TK_LT ); + testcase( op==TK_LE ); + testcase( op==TK_GT ); + testcase( op==TK_GE ); + testcase( op==TK_EQ ); + testcase( op==TK_NE ); + testcase( jumpIfNull==0 ); + codeCompareOperands(pParse, pExpr->pLeft, &r1, ®Free1, + pExpr->pRight, &r2, ®Free2); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, + r1, r2, dest, jumpIfNull); + testcase( regFree1==0 ); + testcase( regFree2==0 ); break; } case TK_ISNULL: case TK_NOTNULL: { assert( TK_ISNULL==OP_IsNull ); assert( TK_NOTNULL==OP_NotNull ); - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3VdbeAddOp(v, op, 1, dest); + testcase( op==TK_ISNULL ); + testcase( op==TK_NOTNULL ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp2(v, op, r1, dest); + testcase( regFree1==0 ); break; } case TK_BETWEEN: { - /* The expression "x BETWEEN y AND z" is implemented as: + /* x BETWEEN y AND z + ** + ** Is equivalent to + ** + ** x>=y AND x<=z ** - ** 1 IF (x < y) GOTO 3 - ** 2 IF (x <= z) GOTO - ** 3 ... + ** Code it as such, taking care to do the common subexpression + ** elementation of x. */ - int addr; - Expr *pLeft = pExpr->pLeft; - Expr *pRight = pExpr->pList->a[0].pExpr; - sqlite3ExprCode(pParse, pLeft); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3ExprCode(pParse, pRight); - addr = codeCompare(pParse, pLeft, pRight, OP_Lt, 0, !jumpIfNull); - - pRight = pExpr->pList->a[1].pExpr; - sqlite3ExprCode(pParse, pRight); - codeCompare(pParse, pLeft, pRight, OP_Le, dest, jumpIfNull); - - sqlite3VdbeAddOp(v, OP_Integer, 0, 0); - sqlite3VdbeJumpHere(v, addr); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + Expr exprAnd; + Expr compLeft; + Expr compRight; + Expr exprX; + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + exprX = *pExpr->pLeft; + exprAnd.op = TK_AND; + exprAnd.pLeft = &compLeft; + exprAnd.pRight = &compRight; + compLeft.op = TK_GE; + compLeft.pLeft = &exprX; + compLeft.pRight = pExpr->x.pList->a[0].pExpr; + compRight.op = TK_LE; + compRight.pLeft = &exprX; + compRight.pRight = pExpr->x.pList->a[1].pExpr; + exprX.iTable = sqlite3ExprCodeTemp(pParse, &exprX, ®Free1); + testcase( regFree1==0 ); + exprX.op = TK_REGISTER; + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, &exprAnd, dest, jumpIfNull); break; } default: { - sqlite3ExprCode(pParse, pExpr); - sqlite3VdbeAddOp(v, OP_If, jumpIfNull, dest); + r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); + sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0); + testcase( regFree1==0 ); + testcase( jumpIfNull==0 ); break; } } - pParse->ckOffset = ckOffset; + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); } /* @@ -2233,13 +3032,19 @@ ** continues straight thru if the expression is true. ** ** If the expression evaluates to NULL (neither true nor false) then -** jump if jumpIfNull is true or fall through if jumpIfNull is false. +** jump if jumpIfNull is SQLITE_JUMPIFNULL or fall through if jumpIfNull +** is 0. */ void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ Vdbe *v = pParse->pVdbe; int op = 0; - int ckOffset = pParse->ckOffset; - if( v==0 || pExpr==0 ) return; + int regFree1 = 0; + int regFree2 = 0; + int r1, r2; + + assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); + if( NEVER(v==0) ) return; /* Existance of VDBE checked by caller */ + if( pExpr==0 ) return; /* The value of pExpr->op and op are related as follows: ** @@ -2274,15 +3079,19 @@ switch( pExpr->op ){ case TK_AND: { + testcase( jumpIfNull==0 ); sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); break; } case TK_OR: { int d2 = sqlite3VdbeMakeLabel(v); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, !jumpIfNull); + testcase( jumpIfNull==0 ); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL); sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3VdbeResolveLabel(v, d2); + sqlite3ExprCachePop(pParse, 1); break; } case TK_NOT: { @@ -2295,47 +3104,73 @@ case TK_GE: case TK_NE: case TK_EQ: { - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3ExprCode(pParse, pExpr->pRight); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, dest, jumpIfNull); + testcase( op==TK_LT ); + testcase( op==TK_LE ); + testcase( op==TK_GT ); + testcase( op==TK_GE ); + testcase( op==TK_EQ ); + testcase( op==TK_NE ); + testcase( jumpIfNull==0 ); + codeCompareOperands(pParse, pExpr->pLeft, &r1, ®Free1, + pExpr->pRight, &r2, ®Free2); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, + r1, r2, dest, jumpIfNull); + testcase( regFree1==0 ); + testcase( regFree2==0 ); break; } case TK_ISNULL: case TK_NOTNULL: { - sqlite3ExprCode(pParse, pExpr->pLeft); - sqlite3VdbeAddOp(v, op, 1, dest); + testcase( op==TK_ISNULL ); + testcase( op==TK_NOTNULL ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp2(v, op, r1, dest); + testcase( regFree1==0 ); break; } case TK_BETWEEN: { - /* The expression is "x BETWEEN y AND z". It is implemented as: + /* x BETWEEN y AND z ** - ** 1 IF (x >= y) GOTO 3 - ** 2 GOTO - ** 3 IF (x > z) GOTO + ** Is equivalent to + ** + ** x>=y AND x<=z + ** + ** Code it as such, taking care to do the common subexpression + ** elementation of x. */ - int addr; - Expr *pLeft = pExpr->pLeft; - Expr *pRight = pExpr->pList->a[0].pExpr; - sqlite3ExprCode(pParse, pLeft); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3ExprCode(pParse, pRight); - addr = sqlite3VdbeCurrentAddr(v); - codeCompare(pParse, pLeft, pRight, OP_Ge, addr+3, !jumpIfNull); - - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, dest); - pRight = pExpr->pList->a[1].pExpr; - sqlite3ExprCode(pParse, pRight); - codeCompare(pParse, pLeft, pRight, OP_Gt, dest, jumpIfNull); + Expr exprAnd; + Expr compLeft; + Expr compRight; + Expr exprX; + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + exprX = *pExpr->pLeft; + exprAnd.op = TK_AND; + exprAnd.pLeft = &compLeft; + exprAnd.pRight = &compRight; + compLeft.op = TK_GE; + compLeft.pLeft = &exprX; + compLeft.pRight = pExpr->x.pList->a[0].pExpr; + compRight.op = TK_LE; + compRight.pLeft = &exprX; + compRight.pRight = pExpr->x.pList->a[1].pExpr; + exprX.iTable = sqlite3ExprCodeTemp(pParse, &exprX, ®Free1); + testcase( regFree1==0 ); + exprX.op = TK_REGISTER; + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, &exprAnd, dest, jumpIfNull); break; } default: { - sqlite3ExprCode(pParse, pExpr); - sqlite3VdbeAddOp(v, OP_IfNot, jumpIfNull, dest); + r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); + sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0); + testcase( regFree1==0 ); + testcase( jumpIfNull==0 ); break; } } - pParse->ckOffset = ckOffset; + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); } /* @@ -2357,27 +3192,35 @@ if( pA==0||pB==0 ){ return pB==pA; } - if( pA->op!=pB->op ) return 0; + assert( !ExprHasAnyProperty(pA, EP_TokenOnly|EP_Reduced) ); + assert( !ExprHasAnyProperty(pB, EP_TokenOnly|EP_Reduced) ); + if( ExprHasProperty(pA, EP_xIsSelect) || ExprHasProperty(pB, EP_xIsSelect) ){ + return 0; + } if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 0; + if( pA->op!=pB->op ) return 0; if( !sqlite3ExprCompare(pA->pLeft, pB->pLeft) ) return 0; if( !sqlite3ExprCompare(pA->pRight, pB->pRight) ) return 0; - if( pA->pList ){ - if( pB->pList==0 ) return 0; - if( pA->pList->nExpr!=pB->pList->nExpr ) return 0; - for(i=0; ipList->nExpr; i++){ - if( !sqlite3ExprCompare(pA->pList->a[i].pExpr, pB->pList->a[i].pExpr) ){ - return 0; - } + + if( pA->x.pList && pB->x.pList ){ + if( pA->x.pList->nExpr!=pB->x.pList->nExpr ) return 0; + for(i=0; ix.pList->nExpr; i++){ + Expr *pExprA = pA->x.pList->a[i].pExpr; + Expr *pExprB = pB->x.pList->a[i].pExpr; + if( !sqlite3ExprCompare(pExprA, pExprB) ) return 0; } - }else if( pB->pList ){ + }else if( pA->x.pList || pB->x.pList ){ return 0; } - if( pA->pSelect || pB->pSelect ) return 0; + if( pA->iTable!=pB->iTable || pA->iColumn!=pB->iColumn ) return 0; - if( pA->op!=TK_COLUMN && pA->token.z ){ - if( pB->token.z==0 ) return 0; - if( pB->token.n!=pA->token.n ) return 0; - if( sqlite3StrNICmp((char*)pA->token.z,(char*)pB->token.z,pB->token.n)!=0 ){ + if( ExprHasProperty(pA, EP_IntValue) ){ + if( !ExprHasProperty(pB, EP_IntValue) || pA->u.iValue!=pB->u.iValue ){ + return 0; + } + }else if( pA->op!=TK_COLUMN && pA->u.zToken ){ + if( ExprHasProperty(pB, EP_IntValue) || NEVER(pB->u.zToken==0) ) return 0; + if( sqlite3StrICmp(pA->u.zToken,pB->u.zToken)!=0 ){ return 0; } } @@ -2389,9 +3232,10 @@ ** Add a new element to the pAggInfo->aCol[] array. Return the index of ** the new element. Return a negative number if malloc fails. */ -static int addAggInfoColumn(AggInfo *pInfo){ +static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){ int i; pInfo->aCol = sqlite3ArrayAllocate( + db, pInfo->aCol, sizeof(pInfo->aCol[0]), 3, @@ -2406,9 +3250,10 @@ ** Add a new element to the pAggInfo->aFunc[] array. Return the index of ** the new element. Return a negative number if malloc fails. */ -static int addAggInfoFunc(AggInfo *pInfo){ +static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ int i; pInfo->aFunc = sqlite3ArrayAllocate( + db, pInfo->aFunc, sizeof(pInfo->aFunc[0]), 3, @@ -2420,29 +3265,29 @@ } /* -** This is an xFunc for walkExprTree() used to implement -** sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates +** This is the xExprCallback for a tree walker. It is used to +** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates ** for additional information. -** -** This routine analyzes the aggregate function at pExpr. */ -static int analyzeAggregate(void *pArg, Expr *pExpr){ +static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ int i; - NameContext *pNC = (NameContext *)pArg; + NameContext *pNC = pWalker->u.pNC; Parse *pParse = pNC->pParse; SrcList *pSrcList = pNC->pSrcList; AggInfo *pAggInfo = pNC->pAggInfo; - switch( pExpr->op ){ case TK_AGG_COLUMN: case TK_COLUMN: { + testcase( pExpr->op==TK_AGG_COLUMN ); + testcase( pExpr->op==TK_COLUMN ); /* Check to see if the column is in one of the tables in the FROM ** clause of the aggregate query */ - if( pSrcList ){ + if( ALWAYS(pSrcList!=0) ){ struct SrcList_item *pItem = pSrcList->a; for(i=0; inSrc; i++, pItem++){ struct AggInfo_col *pCol; + assert( !ExprHasAnyProperty(pExpr, EP_TokenOnly|EP_Reduced) ); if( pExpr->iTable==pItem->iCursor ){ /* If we reach this point, it means that pExpr refers to a table ** that is in the FROM clause of the aggregate query. @@ -2458,12 +3303,14 @@ break; } } - if( k>=pAggInfo->nColumn && (k = addAggInfoColumn(pAggInfo))>=0 ){ + if( (k>=pAggInfo->nColumn) + && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 + ){ pCol = &pAggInfo->aCol[k]; pCol->pTab = pExpr->pTab; pCol->iTable = pExpr->iTable; pCol->iColumn = pExpr->iColumn; - pCol->iMem = pParse->nMem++; + pCol->iMem = ++pParse->nMem; pCol->iSorterColumn = -1; pCol->pExpr = pExpr; if( pAggInfo->pGroupBy ){ @@ -2489,14 +3336,15 @@ ** Convert the pExpr to be a TK_AGG_COLUMN referring to that ** pAggInfo->aCol[] entry. */ + ExprSetIrreducible(pExpr); pExpr->pAggInfo = pAggInfo; pExpr->op = TK_AGG_COLUMN; - pExpr->iAgg = k; + pExpr->iAgg = (i16)k; break; } /* endif pExpr->iTable==pItem->iCursor */ } /* end loop over pSrcList */ } - return 1; + return WRC_Prune; } case TK_AGG_FUNCTION: { /* The pNC->nDepth==0 test causes aggregate functions in subqueries @@ -2515,14 +3363,16 @@ /* pExpr is original. Make a new entry in pAggInfo->aFunc[] */ u8 enc = ENC(pParse->db); - i = addAggInfoFunc(pAggInfo); + i = addAggInfoFunc(pParse->db, pAggInfo); if( i>=0 ){ + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); pItem = &pAggInfo->aFunc[i]; pItem->pExpr = pExpr; - pItem->iMem = pParse->nMem++; + pItem->iMem = ++pParse->nMem; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); pItem->pFunc = sqlite3FindFunction(pParse->db, - (char*)pExpr->token.z, pExpr->token.n, - pExpr->pList ? pExpr->pList->nExpr : 0, enc, 0); + pExpr->u.zToken, sqlite3Strlen30(pExpr->u.zToken), + pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); if( pExpr->flags & EP_Distinct ){ pItem->iDistinct = pParse->nTab++; }else{ @@ -2532,23 +3382,26 @@ } /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry */ - pExpr->iAgg = i; + assert( !ExprHasAnyProperty(pExpr, EP_TokenOnly|EP_Reduced) ); + ExprSetIrreducible(pExpr); + pExpr->iAgg = (i16)i; pExpr->pAggInfo = pAggInfo; - return 1; + return WRC_Prune; } } } - - /* Recursively walk subqueries looking for TK_COLUMN nodes that need - ** to be changed to TK_AGG_COLUMN. But increment nDepth so that - ** TK_AGG_FUNCTION nodes in subqueries will be unchanged. - */ - if( pExpr->pSelect ){ + return WRC_Continue; +} +static int analyzeAggregatesInSelect(Walker *pWalker, Select *pSelect){ + NameContext *pNC = pWalker->u.pNC; + if( pNC->nDepth==0 ){ pNC->nDepth++; - walkSelectExpr(pExpr->pSelect, analyzeAggregate, pNC); + sqlite3WalkSelect(pWalker, pSelect); pNC->nDepth--; + return WRC_Prune; + }else{ + return WRC_Continue; } - return 0; } /* @@ -2557,15 +3410,15 @@ ** Make additional entries to the pParse->aAgg[] array as necessary. ** ** This routine should only be called after the expression has been -** analyzed by sqlite3ExprResolveNames(). -** -** If errors are seen, leave an error message in zErrMsg and return -** the number of errors. +** analyzed by sqlite3ResolveExprNames(). */ -int sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ - int nErr = pNC->pParse->nErr; - walkExprTree(pExpr, analyzeAggregate, pNC); - return pNC->pParse->nErr - nErr; +void sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ + Walker w; + w.xExprCallback = analyzeAggregate; + w.xSelectCallback = analyzeAggregatesInSelect; + w.u.pNC = pNC; + assert( pNC->pSrcList!=0 ); + sqlite3WalkExpr(&w, pExpr); } /* @@ -2574,14 +3427,67 @@ ** ** If an error is found, the analysis is cut short. */ -int sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ +void sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ struct ExprList_item *pItem; int i; - int nErr = 0; if( pList ){ - for(pItem=pList->a, i=0; nErr==0 && inExpr; i++, pItem++){ - nErr += sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); + for(pItem=pList->a, i=0; inExpr; i++, pItem++){ + sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); + } + } +} + +/* +** Allocate a single new register for use to hold some intermediate result. +*/ +int sqlite3GetTempReg(Parse *pParse){ + if( pParse->nTempReg==0 ){ + return ++pParse->nMem; + } + return pParse->aTempReg[--pParse->nTempReg]; +} + +/* +** Deallocate a register, making available for reuse for some other +** purpose. +** +** If a register is currently being used by the column cache, then +** the dallocation is deferred until the column cache line that uses +** the register becomes stale. +*/ +void sqlite3ReleaseTempReg(Parse *pParse, int iReg){ + if( iReg && pParse->nTempRegaTempReg) ){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; iiReg==iReg ){ + p->tempReg = 1; + return; + } } + pParse->aTempReg[pParse->nTempReg++] = iReg; + } +} + +/* +** Allocate or deallocate a block of nReg consecutive registers +*/ +int sqlite3GetTempRange(Parse *pParse, int nReg){ + int i, n; + i = pParse->iRangeReg; + n = pParse->nRangeReg; + if( nReg<=n && !usedAsColumnCache(pParse, i, i+n-1) ){ + pParse->iRangeReg += nReg; + pParse->nRangeReg -= nReg; + }else{ + i = pParse->nMem+1; + pParse->nMem += nReg; + } + return i; +} +void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){ + if( nReg>pParse->nRangeReg ){ + pParse->nRangeReg = nReg; + pParse->iRangeReg = iReg; } - return nErr; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/fault.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/fault.c --- sqlite3-3.4.2/src/fault.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/fault.c 2008-09-02 01:52:52.000000000 +0100 @@ -0,0 +1,91 @@ +/* +** 2008 Jan 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: fault.c,v 1.11 2008/09/02 00:52:52 drh Exp $ +*/ + +/* +** This file contains code to support the concept of "benign" +** malloc failures (when the xMalloc() or xRealloc() method of the +** sqlite3_mem_methods structure fails to allocate a block of memory +** and returns 0). +** +** Most malloc failures are non-benign. After they occur, SQLite +** abandons the current operation and returns an error code (usually +** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily +** fatal. For example, if a malloc fails while resizing a hash table, this +** is completely recoverable simply by not carrying out the resize. The +** hash table will continue to function normally. So a malloc failure +** during a hash table resize is a benign fault. +*/ + +#include "sqliteInt.h" + +#ifndef SQLITE_OMIT_BUILTIN_TEST + +/* +** Global variables. +*/ +typedef struct BenignMallocHooks BenignMallocHooks; +static SQLITE_WSD struct BenignMallocHooks { + void (*xBenignBegin)(void); + void (*xBenignEnd)(void); +} sqlite3Hooks = { 0, 0 }; + +/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks +** structure. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdHooks can refer directly +** to the "sqlite3Hooks" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdHooksInit \ + BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks) +# define wsdHooks x[0] +#else +# define wsdHooksInit +# define wsdHooks sqlite3Hooks +#endif + + +/* +** Register hooks to call when sqlite3BeginBenignMalloc() and +** sqlite3EndBenignMalloc() are called, respectively. +*/ +void sqlite3BenignMallocHooks( + void (*xBenignBegin)(void), + void (*xBenignEnd)(void) +){ + wsdHooksInit; + wsdHooks.xBenignBegin = xBenignBegin; + wsdHooks.xBenignEnd = xBenignEnd; +} + +/* +** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that +** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc() +** indicates that subsequent malloc failures are non-benign. +*/ +void sqlite3BeginBenignMalloc(void){ + wsdHooksInit; + if( wsdHooks.xBenignBegin ){ + wsdHooks.xBenignBegin(); + } +} +void sqlite3EndBenignMalloc(void){ + wsdHooksInit; + if( wsdHooks.xBenignEnd ){ + wsdHooks.xBenignEnd(); + } +} + +#endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/func.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/func.c --- sqlite3-3.4.2/src/func.c 2007-07-30 21:36:28.000000000 +0100 +++ sqlite3-3.6.16/src/func.c 2009-06-25 12:45:57.000000000 +0100 @@ -16,16 +16,12 @@ ** sqliteRegisterBuildinFunctions() found at the bottom of the file. ** All other code has file scope. ** -** $Id: func.c,v 1.163 2007/07/26 06:50:06 danielk1977 Exp $ +** $Id: func.c,v 1.239 2009/06/19 16:44:41 drh Exp $ */ #include "sqliteInt.h" -#include -/* #include */ #include #include #include "vdbeInt.h" -#include "os.h" - /* ** Return the collating function associated with a function. @@ -47,7 +43,7 @@ int iBest; CollSeq *pColl; - if( argc==0 ) return; + assert( argc>1 ); mask = sqlite3_user_data(context)==0 ? 0 : -1; pColl = sqlite3GetFuncCollSeq(context); assert( pColl ); @@ -57,6 +53,7 @@ for(i=1; i=0 ){ + testcase( mask==0 ); iBest = i; } } @@ -68,16 +65,17 @@ */ static void typeofFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ const char *z = 0; + UNUSED_PARAMETER(NotUsed); switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_NULL: z = "null"; break; case SQLITE_INTEGER: z = "integer"; break; case SQLITE_TEXT: z = "text"; break; case SQLITE_FLOAT: z = "real"; break; case SQLITE_BLOB: z = "blob"; break; + default: z = "null"; break; } sqlite3_result_text(context, z, -1, SQLITE_STATIC); } @@ -94,6 +92,7 @@ int len; assert( argc==1 ); + UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_BLOB: case SQLITE_INTEGER: @@ -124,6 +123,7 @@ */ static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); + UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_INTEGER: { i64 iVal = sqlite3_value_int64(argv[0]); @@ -170,8 +170,14 @@ int len; int p0type; i64 p1, p2; + int negP2 = 0; - assert( argc==3 ); + assert( argc==3 || argc==2 ); + if( sqlite3_value_type(argv[1])==SQLITE_NULL + || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) + ){ + return; + } p0type = sqlite3_value_type(argv[0]); if( p0type==SQLITE_BLOB ){ len = sqlite3_value_bytes(argv[0]); @@ -187,18 +193,38 @@ } } p1 = sqlite3_value_int(argv[1]); - p2 = sqlite3_value_int(argv[2]); + if( argc==3 ){ + p2 = sqlite3_value_int(argv[2]); + if( p2<0 ){ + p2 = -p2; + negP2 = 1; + } + }else{ + p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; + } if( p1<0 ){ p1 += len; if( p1<0 ){ p2 += p1; + if( p2<0 ) p2 = 0; p1 = 0; } }else if( p1>0 ){ p1--; + }else if( p2>0 ){ + p2--; } + if( negP2 ){ + p1 -= p2; + if( p1<0 ){ + p2 += p1; + p1 = 0; + } + } + assert( p1>=0 && p2>=0 ); if( p1+p2>len ){ p2 = len-p1; + if( p2<0 ) p2 = 0; } if( p0type!=SQLITE_BLOB ){ while( *z && p1 ){ @@ -208,20 +234,20 @@ for(z2=z; *z2 && p2; p2--){ SQLITE_SKIP_UTF8(z2); } - sqlite3_result_text(context, (char*)z, z2-z, SQLITE_TRANSIENT); + sqlite3_result_text(context, (char*)z, (int)(z2-z), SQLITE_TRANSIENT); }else{ - if( p2<0 ) p2 = 0; - sqlite3_result_blob(context, (char*)&z[p1], p2, SQLITE_TRANSIENT); + sqlite3_result_blob(context, (char*)&z[p1], (int)p2, SQLITE_TRANSIENT); } } /* ** Implementation of the round() function */ +#ifndef SQLITE_OMIT_FLOATING_POINT static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ int n = 0; double r; - char zBuf[500]; /* larger than the %f representation of the largest double */ + char *zBuf; assert( argc==1 || argc==2 ); if( argc==2 ){ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; @@ -231,9 +257,40 @@ } if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; r = sqlite3_value_double(argv[0]); - sqlite3_snprintf(sizeof(zBuf),zBuf,"%.*f",n,r); - sqlite3AtoF(zBuf, &r); - sqlite3_result_double(context, r); + zBuf = sqlite3_mprintf("%.*f",n,r); + if( zBuf==0 ){ + sqlite3_result_error_nomem(context); + }else{ + sqlite3AtoF(zBuf, &r); + sqlite3_free(zBuf); + sqlite3_result_double(context, r); + } +} +#endif + +/* +** Allocate nByte bytes of space using sqlite3_malloc(). If the +** allocation fails, call sqlite3_result_error_nomem() to notify +** the database handle that malloc() has failed and return NULL. +** If nByte is larger than the maximum string or blob length, then +** raise an SQLITE_TOOBIG exception and return NULL. +*/ +static void *contextMalloc(sqlite3_context *context, i64 nByte){ + char *z; + sqlite3 *db = sqlite3_context_db_handle(context); + assert( nByte>0 ); + testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] ); + testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); + if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + sqlite3_result_error_toobig(context); + z = 0; + }else{ + z = sqlite3Malloc((int)nByte); + if( !z ){ + sqlite3_result_error_nomem(context); + } + } + return z; } /* @@ -243,39 +300,39 @@ char *z1; const char *z2; int i, n; - if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; + UNUSED_PARAMETER(argc); z2 = (char*)sqlite3_value_text(argv[0]); n = sqlite3_value_bytes(argv[0]); /* Verify that the call to _bytes() does not invalidate the _text() pointer */ assert( z2==(char*)sqlite3_value_text(argv[0]) ); if( z2 ){ - z1 = sqlite3_malloc(n+1); + z1 = contextMalloc(context, ((i64)n)+1); if( z1 ){ memcpy(z1, z2, n+1); for(i=0; z1[i]; i++){ - z1[i] = toupper(z1[i]); + z1[i] = (char)sqlite3Toupper(z1[i]); } sqlite3_result_text(context, z1, -1, sqlite3_free); } } } static void lowerFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - char *z1; + u8 *z1; const char *z2; int i, n; - if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; + UNUSED_PARAMETER(argc); z2 = (char*)sqlite3_value_text(argv[0]); n = sqlite3_value_bytes(argv[0]); /* Verify that the call to _bytes() does not invalidate the _text() pointer */ assert( z2==(char*)sqlite3_value_text(argv[0]) ); if( z2 ){ - z1 = sqlite3_malloc(n+1); + z1 = contextMalloc(context, ((i64)n)+1); if( z1 ){ memcpy(z1, z2, n+1); for(i=0; z1[i]; i++){ - z1[i] = tolower(z1[i]); + z1[i] = sqlite3Tolower(z1[i]); } - sqlite3_result_text(context, z1, -1, sqlite3_free); + sqlite3_result_text(context, (char *)z1, -1, sqlite3_free); } } } @@ -304,13 +361,23 @@ */ static void randomFunc( sqlite3_context *context, - int argc, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ sqlite_int64 r; - sqlite3Randomness(sizeof(r), &r); - if( (r<<1)==0 ) r = 0; /* Prevent 0x8000.... as the result so that we */ - /* can always do abs() of the result */ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + sqlite3_randomness(sizeof(r), &r); + if( r<0 ){ + /* We need to prevent a random number of 0x8000000000000000 + ** (or -9223372036854775808) since when you do abs() of that + ** number of you get the same value back again. To do this + ** in a way that is testable, mask the sign bit off of negative + ** values, resulting in a positive value. Then take the + ** 2s complement of that positive value. The end result can + ** therefore be no less than -9223372036854775807. + */ + r = -(r ^ (((sqlite3_int64)1)<<63)); + } sqlite3_result_int64(context, r); } @@ -326,18 +393,15 @@ int n; unsigned char *p; assert( argc==1 ); + UNUSED_PARAMETER(argc); n = sqlite3_value_int(argv[0]); if( n<1 ){ n = 1; } - if( n>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - p = sqliteMalloc(n); + p = contextMalloc(context, n); if( p ){ - sqlite3Randomness(n, p); - sqlite3_result_blob(context, (char*)p, n, sqlite3FreeX); + sqlite3_randomness(n, p); + sqlite3_result_blob(context, (char*)p, n, sqlite3_free); } } @@ -347,10 +411,11 @@ */ static void last_insert_rowid( sqlite3_context *context, - int arg, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); + UNUSED_PARAMETER2(NotUsed, NotUsed2); sqlite3_result_int64(context, sqlite3_last_insert_rowid(db)); } @@ -360,10 +425,11 @@ */ static void changes( sqlite3_context *context, - int arg, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); + UNUSED_PARAMETER2(NotUsed, NotUsed2); sqlite3_result_int(context, sqlite3_changes(db)); } @@ -373,10 +439,11 @@ */ static void total_changes( sqlite3_context *context, - int arg, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); + UNUSED_PARAMETER2(NotUsed, NotUsed2); sqlite3_result_int(context, sqlite3_total_changes(db)); } @@ -390,6 +457,19 @@ u8 noCase; }; +/* +** For LIKE and GLOB matching on EBCDIC machines, assume that every +** character is exactly one byte in size. Also, all characters are +** able to participate in upper-case-to-lower-case mappings in EBCDIC +** whereas only characters less than 0x80 do in ASCII. +*/ +#if defined(SQLITE_EBCDIC) +# define sqlite3Utf8Read(A,C) (*(A++)) +# define GlogUpperToLower(A) A = sqlite3UpperToLower[A] +#else +# define GlogUpperToLower(A) if( A<0x80 ){ A = sqlite3UpperToLower[A]; } +#endif + static const struct compareInfo globInfo = { '*', '?', '[', 0 }; /* The correct SQL-92 behavior is for the LIKE operator to ignore ** case. Thus 'a' LIKE 'A' would be true. */ @@ -441,18 +521,18 @@ u8 noCase = pInfo->noCase; int prevEscape = 0; /* True if the previous character was 'escape' */ - while( (c = sqlite3Utf8Read(zPattern,0,&zPattern))!=0 ){ + while( (c = sqlite3Utf8Read(zPattern,&zPattern))!=0 ){ if( !prevEscape && c==matchAll ){ - while( (c=sqlite3Utf8Read(zPattern,0,&zPattern)) == matchAll + while( (c=sqlite3Utf8Read(zPattern,&zPattern)) == matchAll || c == matchOne ){ - if( c==matchOne && sqlite3Utf8Read(zString, 0, &zString)==0 ){ + if( c==matchOne && sqlite3Utf8Read(zString, &zString)==0 ){ return 0; } } if( c==0 ){ return 1; }else if( c==esc ){ - c = sqlite3Utf8Read(zPattern, 0, &zPattern); + c = sqlite3Utf8Read(zPattern, &zPattern); if( c==0 ){ return 0; } @@ -464,17 +544,17 @@ } return *zString!=0; } - while( (c2 = sqlite3Utf8Read(zString,0,&zString))!=0 ){ + while( (c2 = sqlite3Utf8Read(zString,&zString))!=0 ){ if( noCase ){ - c2 = c2<0x80 ? sqlite3UpperToLower[c2] : c2; - c = c<0x80 ? sqlite3UpperToLower[c] : c; + GlogUpperToLower(c2); + GlogUpperToLower(c); while( c2 != 0 && c2 != c ){ - c2 = sqlite3Utf8Read(zString, 0, &zString); - if( c2<0x80 ) c2 = sqlite3UpperToLower[c2]; + c2 = sqlite3Utf8Read(zString, &zString); + GlogUpperToLower(c2); } }else{ while( c2 != 0 && c2 != c ){ - c2 = sqlite3Utf8Read(zString, 0, &zString); + c2 = sqlite3Utf8Read(zString, &zString); } } if( c2==0 ) return 0; @@ -482,7 +562,7 @@ } return 0; }else if( !prevEscape && c==matchOne ){ - if( sqlite3Utf8Read(zString, 0, &zString)==0 ){ + if( sqlite3Utf8Read(zString, &zString)==0 ){ return 0; } }else if( c==matchSet ){ @@ -490,20 +570,20 @@ assert( esc==0 ); /* This only occurs for GLOB, not LIKE */ seen = 0; invert = 0; - c = sqlite3Utf8Read(zString, 0, &zString); + c = sqlite3Utf8Read(zString, &zString); if( c==0 ) return 0; - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + c2 = sqlite3Utf8Read(zPattern, &zPattern); if( c2=='^' ){ invert = 1; - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + c2 = sqlite3Utf8Read(zPattern, &zPattern); } if( c2==']' ){ if( c==']' ) seen = 1; - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + c2 = sqlite3Utf8Read(zPattern, &zPattern); } while( c2 && c2!=']' ){ if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + c2 = sqlite3Utf8Read(zPattern, &zPattern); if( c>=prior_c && c<=c2 ) seen = 1; prior_c = 0; }else{ @@ -512,7 +592,7 @@ } prior_c = c2; } - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); + c2 = sqlite3Utf8Read(zPattern, &zPattern); } if( c2==0 || (seen ^ invert)==0 ){ return 0; @@ -520,10 +600,10 @@ }else if( esc==c && !prevEscape ){ prevEscape = 1; }else{ - c2 = sqlite3Utf8Read(zString, 0, &zString); + c2 = sqlite3Utf8Read(zString, &zString); if( noCase ){ - c = c<0x80 ? sqlite3UpperToLower[c] : c; - c2 = c2<0x80 ? sqlite3UpperToLower[c2] : c2; + GlogUpperToLower(c); + GlogUpperToLower(c2); } if( c!=c2 ){ return 0; @@ -563,6 +643,8 @@ ){ const unsigned char *zA, *zB; int escape = 0; + int nPat; + sqlite3 *db = sqlite3_context_db_handle(context); zB = sqlite3_value_text(argv[0]); zA = sqlite3_value_text(argv[1]); @@ -570,7 +652,10 @@ /* Limit the length of the LIKE or GLOB pattern to avoid problems ** of deep recursion and N*N behavior in patternCompare(). */ - if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ + nPat = sqlite3_value_bytes(argv[0]); + testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ); + testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]+1 ); + if( nPat > db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ){ sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); return; } @@ -587,7 +672,7 @@ "ESCAPE expression must be a single character", -1); return; } - escape = sqlite3Utf8Read(zEsc, 0, &zEsc); + escape = sqlite3Utf8Read(zEsc, &zEsc); } if( zA && zB ){ struct compareInfo *pInfo = sqlite3_user_data(context); @@ -606,10 +691,11 @@ */ static void nullifFunc( sqlite3_context *context, - int argc, + int NotUsed, sqlite3_value **argv ){ CollSeq *pColl = sqlite3GetFuncCollSeq(context); + UNUSED_PARAMETER(NotUsed); if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ sqlite3_result_value(context, argv[0]); } @@ -621,9 +707,10 @@ */ static void versionFunc( sqlite3_context *context, - int argc, - sqlite3_value **argv + int NotUsed, + sqlite3_value **NotUsed2 ){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); sqlite3_result_text(context, sqlite3_version, -1, SQLITE_STATIC); } @@ -646,12 +733,9 @@ ** single-quote escapes. */ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - if( argc<1 ) return; + assert( argc==1 ); + UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_NULL: { - sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); - break; - } case SQLITE_INTEGER: case SQLITE_FLOAT: { sqlite3_result_value(context, argv[0]); @@ -662,15 +746,8 @@ char const *zBlob = sqlite3_value_blob(argv[0]); int nBlob = sqlite3_value_bytes(argv[0]); assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - - if( 2*nBlob+4>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - zText = (char *)sqliteMalloc((2*nBlob)+4); - if( !zText ){ - sqlite3_result_error(context, "out of memory", -1); - }else{ + zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4); + if( zText ){ int i; for(i=0; i>4)&0x0F]; @@ -681,7 +758,7 @@ zText[0] = 'X'; zText[1] = '\''; sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); - sqliteFree(zText); + sqlite3_free(zText); } break; } @@ -693,23 +770,25 @@ if( zArg==0 ) return; for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } - if( i+n+3>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - z = sqliteMalloc( i+n+3 ); - if( z==0 ) return; - z[0] = '\''; - for(i=0, j=1; zArg[i]; i++){ - z[j++] = zArg[i]; - if( zArg[i]=='\'' ){ - z[j++] = '\''; + z = contextMalloc(context, ((i64)i)+((i64)n)+3); + if( z ){ + z[0] = '\''; + for(i=0, j=1; zArg[i]; i++){ + z[j++] = zArg[i]; + if( zArg[i]=='\'' ){ + z[j++] = '\''; + } } + z[j++] = '\''; + z[j] = 0; + sqlite3_result_text(context, z, j, sqlite3_free); } - z[j++] = '\''; - z[j] = 0; - sqlite3_result_text(context, z, j, SQLITE_TRANSIENT); - sqliteFree(z); + break; + } + default: { + assert( sqlite3_value_type(argv[0])==SQLITE_NULL ); + sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); + break; } } } @@ -727,22 +806,20 @@ const unsigned char *pBlob; char *zHex, *z; assert( argc==1 ); + UNUSED_PARAMETER(argc); pBlob = sqlite3_value_blob(argv[0]); n = sqlite3_value_bytes(argv[0]); - if( n*2+1>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - z = zHex = sqlite3_malloc(n*2 + 1); - if( zHex==0 ) return; - for(i=0; i>4)&0xf]; - *(z++) = hexdigits[c&0xf]; + z = zHex = contextMalloc(context, ((i64)n)*2 + 1); + if( zHex ){ + for(i=0; i>4)&0xf]; + *(z++) = hexdigits[c&0xf]; + } + *z = 0; + sqlite3_result_text(context, zHex, n*2, sqlite3_free); } - *z = 0; - sqlite3_result_text(context, zHex, n*2, sqlite3_free); } /* @@ -754,12 +831,16 @@ sqlite3_value **argv ){ i64 n; + sqlite3 *db = sqlite3_context_db_handle(context); assert( argc==1 ); + UNUSED_PARAMETER(argc); n = sqlite3_value_int64(argv[0]); - if( n>SQLITE_MAX_LENGTH ){ + testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH] ); + testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); + if( n>db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); }else{ - sqlite3_result_zeroblob(context, n); + sqlite3_result_zeroblob(context, (int)n); } } @@ -786,12 +867,22 @@ int i, j; /* Loop counters */ assert( argc==3 ); + UNUSED_PARAMETER(argc); zStr = sqlite3_value_text(argv[0]); if( zStr==0 ) return; nStr = sqlite3_value_bytes(argv[0]); assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ zPattern = sqlite3_value_text(argv[1]); - if( zPattern==0 || zPattern[0]==0 ) return; + if( zPattern==0 ){ + assert( sqlite3_value_type(argv[1])==SQLITE_NULL + || sqlite3_context_db_handle(context)->mallocFailed ); + return; + } + if( zPattern[0]==0 ){ + assert( sqlite3_value_type(argv[1])!=SQLITE_NULL ); + sqlite3_result_value(context, argv[0]); + return; + } nPattern = sqlite3_value_bytes(argv[1]); assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ zRep = sqlite3_value_text(argv[2]); @@ -800,7 +891,7 @@ assert( zRep==sqlite3_value_text(argv[2]) ); nOut = nStr + 1; assert( nOut=SQLITE_MAX_LENGTH ){ + testcase( nOut-1==db->aLimit[SQLITE_LIMIT_LENGTH] ); + testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); + if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); - sqlite3_free(zOut); + sqlite3DbFree(db, zOut); return; } + zOld = zOut; zOut = sqlite3_realloc(zOut, (int)nOut); if( zOut==0 ){ + sqlite3_result_error_nomem(context); + sqlite3DbFree(db, zOld); return; } memcpy(&zOut[j], zRep, nRep); @@ -846,8 +944,8 @@ int nIn; /* Number of bytes in input */ int flags; /* 1: trimleft 2: trimright 3: trim */ int i; /* Loop counter */ - unsigned char *aLen; /* Length of each character in zCharSet */ - const unsigned char **azChar; /* Individual characters in zCharSet */ + unsigned char *aLen = 0; /* Length of each character in zCharSet */ + unsigned char **azChar = 0; /* Individual characters in zCharSet */ int nChar; /* Number of characters in zCharSet */ if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ @@ -859,10 +957,10 @@ assert( zIn==sqlite3_value_text(argv[0]) ); if( argc==1 ){ static const unsigned char lenOne[] = { 1 }; - static const unsigned char *azOne[] = { (u8*)" " }; + static unsigned char * const azOne[] = { (u8*)" " }; nChar = 1; aLen = (u8*)lenOne; - azChar = azOne; + azChar = (unsigned char **)azOne; zCharSet = 0; }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ return; @@ -872,26 +970,26 @@ SQLITE_SKIP_UTF8(z); } if( nChar>0 ){ - azChar = sqlite3_malloc( nChar*(sizeof(char*)+1) ); + azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1)); if( azChar==0 ){ return; } aLen = (unsigned char*)&azChar[nChar]; for(z=zCharSet, nChar=0; *z; nChar++){ - azChar[nChar] = z; + azChar[nChar] = (unsigned char *)z; SQLITE_SKIP_UTF8(z); - aLen[nChar] = z - azChar[nChar]; + aLen[nChar] = (u8)(z - azChar[nChar]); } } } if( nChar>0 ){ - flags = (int)sqlite3_user_data(context); + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context)); if( flags & 1 ){ while( nIn>0 ){ - int len; + int len = 0; for(i=0; i=nChar ) break; zIn += len; @@ -900,7 +998,7 @@ } if( flags & 2 ){ while( nIn>0 ){ - int len; + int len = 0; for(i=0; i0 ){ @@ -974,7 +1073,7 @@ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ const char *zFile = (const char *)sqlite3_value_text(argv[0]); const char *zProc; - sqlite3 *db = sqlite3_user_data(context); + sqlite3 *db = sqlite3_context_db_handle(context); char *zErrMsg = 0; if( argc==2 ){ @@ -989,164 +1088,6 @@ } #endif -#ifdef SQLITE_TEST -/* -** This function generates a string of random characters. Used for -** generating test data. -*/ -static void randStr(sqlite3_context *context, int argc, sqlite3_value **argv){ - static const unsigned char zSrc[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789" - ".-!,:*^+=_|?/<> "; - int iMin, iMax, n, r, i; - unsigned char zBuf[1000]; - if( argc>=1 ){ - iMin = sqlite3_value_int(argv[0]); - if( iMin<0 ) iMin = 0; - if( iMin>=sizeof(zBuf) ) iMin = sizeof(zBuf)-1; - }else{ - iMin = 1; - } - if( argc>=2 ){ - iMax = sqlite3_value_int(argv[1]); - if( iMax=sizeof(zBuf) ) iMax = sizeof(zBuf)-1; - }else{ - iMax = 50; - } - n = iMin; - if( iMax>iMin ){ - sqlite3Randomness(sizeof(r), &r); - r &= 0x7fffffff; - n += r%(iMax + 1 - iMin); - } - assert( nrSum += v; if( (p->approx|p->overflow)==0 ){ i64 iNewSum = p->iSum + v; - int s1 = p->iSum >> (sizeof(i64)*8-1); - int s2 = v >> (sizeof(i64)*8-1); - int s3 = iNewSum >> (sizeof(i64)*8-1); - p->overflow = (s1&s2&~s3) | (~s1&~s2&s3); + int s1 = (int)(p->iSum >> (sizeof(i64)*8-1)); + int s2 = (int)(v >> (sizeof(i64)*8-1)); + int s3 = (int)(iNewSum >> (sizeof(i64)*8-1)); + p->overflow = ((s1&s2&~s3) | (~s1&~s2&s3))?1:0; p->iSum = iNewSum; } }else{ @@ -1219,7 +1161,8 @@ static void totalFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); - sqlite3_result_double(context, p ? p->rSum : 0.0); + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + sqlite3_result_double(context, p ? p->rSum : (double)0); } /* @@ -1240,6 +1183,15 @@ if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ p->n++; } + +#ifndef SQLITE_OMIT_DEPRECATED + /* The sqlite3_aggregate_count() function is deprecated. But just to make + ** sure it still operates correctly, verify that its count agrees with our + ** internal count when using count(*) and when the total count can be + ** expressed as a 32-bit integer. */ + assert( argc==1 || p==0 || p->n>0x7fffffff + || p->n==sqlite3_aggregate_count(context) ); +#endif } static void countFinalize(sqlite3_context *context){ CountCtx *p; @@ -1250,9 +1202,14 @@ /* ** Routines to implement min() and max() aggregate functions. */ -static void minmaxStep(sqlite3_context *context, int argc, sqlite3_value **argv){ +static void minmaxStep( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ Mem *pArg = (Mem *)argv[0]; Mem *pBest; + UNUSED_PARAMETER(NotUsed); if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); @@ -1283,13 +1240,63 @@ sqlite3_value *pRes; pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); if( pRes ){ - if( pRes->flags ){ + if( ALWAYS(pRes->flags) ){ sqlite3_result_value(context, pRes); } sqlite3VdbeMemRelease(pRes); } } +/* +** group_concat(EXPR, ?SEPARATOR?) +*/ +static void groupConcatStep( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zVal; + StrAccum *pAccum; + const char *zSep; + int nVal, nSep; + assert( argc==1 || argc==2 ); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); + + if( pAccum ){ + sqlite3 *db = sqlite3_context_db_handle(context); + int firstTerm = pAccum->useMalloc==0; + pAccum->useMalloc = 1; + pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; + if( !firstTerm ){ + if( argc==2 ){ + zSep = (char*)sqlite3_value_text(argv[1]); + nSep = sqlite3_value_bytes(argv[1]); + }else{ + zSep = ","; + nSep = 1; + } + sqlite3StrAccumAppend(pAccum, zSep, nSep); + } + zVal = (char*)sqlite3_value_text(argv[0]); + nVal = sqlite3_value_bytes(argv[0]); + sqlite3StrAccumAppend(pAccum, zVal, nVal); + } +} +static void groupConcatFinalize(sqlite3_context *context){ + StrAccum *pAccum; + pAccum = sqlite3_aggregate_context(context, 0); + if( pAccum ){ + if( pAccum->tooBig ){ + sqlite3_result_error_toobig(context); + }else if( pAccum->mallocFailed ){ + sqlite3_result_error_nomem(context); + }else{ + sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1, + sqlite3_free); + } + } +} /* ** This function registered all of the above C functions as SQL @@ -1297,141 +1304,26 @@ ** external linkage. */ void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ - static const struct { - char *zName; - signed char nArg; - u8 argType; /* ff: db 1: 0, 2: 1, 3: 2,... N: N-1. */ - u8 eTextRep; /* 1: UTF-16. 0: UTF-8 */ - u8 needCollSeq; - void (*xFunc)(sqlite3_context*,int,sqlite3_value **); - } aFuncs[] = { - { "min", -1, 0, SQLITE_UTF8, 1, minmaxFunc }, - { "min", 0, 0, SQLITE_UTF8, 1, 0 }, - { "max", -1, 1, SQLITE_UTF8, 1, minmaxFunc }, - { "max", 0, 1, SQLITE_UTF8, 1, 0 }, - { "typeof", 1, 0, SQLITE_UTF8, 0, typeofFunc }, - { "length", 1, 0, SQLITE_UTF8, 0, lengthFunc }, - { "substr", 3, 0, SQLITE_UTF8, 0, substrFunc }, - { "abs", 1, 0, SQLITE_UTF8, 0, absFunc }, - { "round", 1, 0, SQLITE_UTF8, 0, roundFunc }, - { "round", 2, 0, SQLITE_UTF8, 0, roundFunc }, - { "upper", 1, 0, SQLITE_UTF8, 0, upperFunc }, - { "lower", 1, 0, SQLITE_UTF8, 0, lowerFunc }, - { "coalesce", -1, 0, SQLITE_UTF8, 0, ifnullFunc }, - { "coalesce", 0, 0, SQLITE_UTF8, 0, 0 }, - { "coalesce", 1, 0, SQLITE_UTF8, 0, 0 }, - { "hex", 1, 0, SQLITE_UTF8, 0, hexFunc }, - { "ifnull", 2, 0, SQLITE_UTF8, 1, ifnullFunc }, - { "random", -1, 0, SQLITE_UTF8, 0, randomFunc }, - { "randomblob", 1, 0, SQLITE_UTF8, 0, randomBlob }, - { "nullif", 2, 0, SQLITE_UTF8, 1, nullifFunc }, - { "sqlite_version", 0, 0, SQLITE_UTF8, 0, versionFunc}, - { "quote", 1, 0, SQLITE_UTF8, 0, quoteFunc }, - { "last_insert_rowid", 0, 0xff, SQLITE_UTF8, 0, last_insert_rowid }, - { "changes", 0, 0xff, SQLITE_UTF8, 0, changes }, - { "total_changes", 0, 0xff, SQLITE_UTF8, 0, total_changes }, - { "replace", 3, 0, SQLITE_UTF8, 0, replaceFunc }, - { "ltrim", 1, 1, SQLITE_UTF8, 0, trimFunc }, - { "ltrim", 2, 1, SQLITE_UTF8, 0, trimFunc }, - { "rtrim", 1, 2, SQLITE_UTF8, 0, trimFunc }, - { "rtrim", 2, 2, SQLITE_UTF8, 0, trimFunc }, - { "trim", 1, 3, SQLITE_UTF8, 0, trimFunc }, - { "trim", 2, 3, SQLITE_UTF8, 0, trimFunc }, - { "zeroblob", 1, 0, SQLITE_UTF8, 0, zeroblobFunc }, -#ifdef SQLITE_SOUNDEX - { "soundex", 1, 0, SQLITE_UTF8, 0, soundexFunc}, -#endif -#ifndef SQLITE_OMIT_LOAD_EXTENSION - { "load_extension", 1, 0xff, SQLITE_UTF8, 0, loadExt }, - { "load_extension", 2, 0xff, SQLITE_UTF8, 0, loadExt }, -#endif -#ifdef SQLITE_TEST - { "randstr", 2, 0, SQLITE_UTF8, 0, randStr }, - { "test_destructor", 1, 0xff, SQLITE_UTF8, 0, test_destructor}, - { "test_destructor_count", 0, 0, SQLITE_UTF8, 0, test_destructor_count}, - { "test_auxdata", -1, 0, SQLITE_UTF8, 0, test_auxdata}, - { "test_error", 1, 0, SQLITE_UTF8, 0, test_error}, -#endif - }; - static const struct { - char *zName; - signed char nArg; - u8 argType; - u8 needCollSeq; - void (*xStep)(sqlite3_context*,int,sqlite3_value**); - void (*xFinalize)(sqlite3_context*); - } aAggs[] = { - { "min", 1, 0, 1, minmaxStep, minMaxFinalize }, - { "max", 1, 1, 1, minmaxStep, minMaxFinalize }, - { "sum", 1, 0, 0, sumStep, sumFinalize }, - { "total", 1, 0, 0, sumStep, totalFinalize }, - { "avg", 1, 0, 0, sumStep, avgFinalize }, - { "count", 0, 0, 0, countStep, countFinalize }, - { "count", 1, 0, 0, countStep, countFinalize }, - }; - int i; - - for(i=0; ineedCollSeq = 1; - } - } - } #ifndef SQLITE_OMIT_ALTERTABLE sqlite3AlterFunctions(db); #endif -#ifndef SQLITE_OMIT_PARSER - sqlite3AttachFunctions(db); -#endif - for(i=0; ineedCollSeq = 1; - } - } - } - sqlite3RegisterDateTimeFunctions(db); - if( !sqlite3MallocFailed() ){ + if( !db->mallocFailed ){ int rc = sqlite3_overload_function(db, "MATCH", 2); assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); + db->mallocFailed = 1; } } -#ifdef SQLITE_SSE - (void)sqlite3SseFunctions(db); -#endif -#ifdef SQLITE_CASE_SENSITIVE_LIKE - sqlite3RegisterLikeFunctions(db, 1); -#else - sqlite3RegisterLikeFunctions(db, 0); -#endif } /* ** Set the LIKEOPT flag on the 2-argument function with the given name. */ -static void setLikeOptFlag(sqlite3 *db, const char *zName, int flagVal){ +static void setLikeOptFlag(sqlite3 *db, const char *zName, u8 flagVal){ FuncDef *pDef; - pDef = sqlite3FindFunction(db, zName, strlen(zName), 2, SQLITE_UTF8, 0); - if( pDef ){ + pDef = sqlite3FindFunction(db, zName, sqlite3Strlen30(zName), + 2, SQLITE_UTF8, 0); + if( ALWAYS(pDef) ){ pDef->flags = flagVal; } } @@ -1448,9 +1340,9 @@ }else{ pInfo = (struct compareInfo*)&likeInfoNorm; } - sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0); - sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0); - sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, + sqlite3CreateFunc(db, "like", 2, SQLITE_ANY, pInfo, likeFunc, 0, 0); + sqlite3CreateFunc(db, "like", 3, SQLITE_ANY, pInfo, likeFunc, 0, 0); + sqlite3CreateFunc(db, "glob", 2, SQLITE_ANY, (struct compareInfo*)&globInfo, likeFunc, 0,0); setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); setLikeOptFlag(db, "like", @@ -1466,15 +1358,17 @@ */ int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ FuncDef *pDef; - if( pExpr->op!=TK_FUNCTION || !pExpr->pList ){ + if( pExpr->op!=TK_FUNCTION + || !pExpr->x.pList + || pExpr->x.pList->nExpr!=2 + ){ return 0; } - if( pExpr->pList->nExpr!=2 ){ - return 0; - } - pDef = sqlite3FindFunction(db, (char*)pExpr->token.z, pExpr->token.n, 2, - SQLITE_UTF8, 0); - if( pDef==0 || (pDef->flags & SQLITE_FUNC_LIKE)==0 ){ + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + pDef = sqlite3FindFunction(db, pExpr->u.zToken, + sqlite3Strlen30(pExpr->u.zToken), + 2, SQLITE_UTF8, 0); + if( NEVER(pDef==0) || (pDef->flags & SQLITE_FUNC_LIKE)==0 ){ return 0; } @@ -1489,3 +1383,94 @@ *pIsNocase = (pDef->flags & SQLITE_FUNC_CASE)==0; return 1; } + +/* +** All all of the FuncDef structures in the aBuiltinFunc[] array above +** to the global function hash table. This occurs at start-time (as +** a consequence of calling sqlite3_initialize()). +** +** After this routine runs +*/ +void sqlite3RegisterGlobalFunctions(void){ + /* + ** The following array holds FuncDef structures for all of the functions + ** defined in this file. + ** + ** The array cannot be constant since changes are made to the + ** FuncDef.pHash elements at start-time. The elements of this array + ** are read-only after initialization is complete. + */ + static SQLITE_WSD FuncDef aBuiltinFunc[] = { + FUNCTION(ltrim, 1, 1, 0, trimFunc ), + FUNCTION(ltrim, 2, 1, 0, trimFunc ), + FUNCTION(rtrim, 1, 2, 0, trimFunc ), + FUNCTION(rtrim, 2, 2, 0, trimFunc ), + FUNCTION(trim, 1, 3, 0, trimFunc ), + FUNCTION(trim, 2, 3, 0, trimFunc ), + FUNCTION(min, -1, 0, 1, minmaxFunc ), + FUNCTION(min, 0, 0, 1, 0 ), + AGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize ), + FUNCTION(max, -1, 1, 1, minmaxFunc ), + FUNCTION(max, 0, 1, 1, 0 ), + AGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize ), + FUNCTION(typeof, 1, 0, 0, typeofFunc ), + FUNCTION(length, 1, 0, 0, lengthFunc ), + FUNCTION(substr, 2, 0, 0, substrFunc ), + FUNCTION(substr, 3, 0, 0, substrFunc ), + FUNCTION(abs, 1, 0, 0, absFunc ), +#ifndef SQLITE_OMIT_FLOATING_POINT + FUNCTION(round, 1, 0, 0, roundFunc ), + FUNCTION(round, 2, 0, 0, roundFunc ), +#endif + FUNCTION(upper, 1, 0, 0, upperFunc ), + FUNCTION(lower, 1, 0, 0, lowerFunc ), + FUNCTION(coalesce, 1, 0, 0, 0 ), + FUNCTION(coalesce, -1, 0, 0, ifnullFunc ), + FUNCTION(coalesce, 0, 0, 0, 0 ), + FUNCTION(hex, 1, 0, 0, hexFunc ), + FUNCTION(ifnull, 2, 0, 1, ifnullFunc ), + FUNCTION(random, 0, 0, 0, randomFunc ), + FUNCTION(randomblob, 1, 0, 0, randomBlob ), + FUNCTION(nullif, 2, 0, 1, nullifFunc ), + FUNCTION(sqlite_version, 0, 0, 0, versionFunc ), + FUNCTION(quote, 1, 0, 0, quoteFunc ), + FUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), + FUNCTION(changes, 0, 0, 0, changes ), + FUNCTION(total_changes, 0, 0, 0, total_changes ), + FUNCTION(replace, 3, 0, 0, replaceFunc ), + FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ), + #ifdef SQLITE_SOUNDEX + FUNCTION(soundex, 1, 0, 0, soundexFunc ), + #endif + #ifndef SQLITE_OMIT_LOAD_EXTENSION + FUNCTION(load_extension, 1, 0, 0, loadExt ), + FUNCTION(load_extension, 2, 0, 0, loadExt ), + #endif + AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ), + AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ), + AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ), + /* AGGREGATE(count, 0, 0, 0, countStep, countFinalize ), */ + {0,SQLITE_UTF8,SQLITE_FUNC_COUNT,0,0,0,countStep,countFinalize,"count",0}, + AGGREGATE(count, 1, 0, 0, countStep, countFinalize ), + AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize), + AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize), + + LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), + #ifdef SQLITE_CASE_SENSITIVE_LIKE + LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), + LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), + #else + LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE), + LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE), + #endif + }; + + int i; + FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); + FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aBuiltinFunc); + + for(i=0; i -/* #include */ -#include -#include -#include "vdbeInt.h" -#include "os.h" - -/* -** Return the collating function associated with a function. -*/ -static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ - return context->pColl; -} - -/* -** Implementation of the non-aggregate min() and max() functions -*/ -static void minmaxFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i; - int mask; /* 0 for min() or 0xffffffff for max() */ - int iBest; - CollSeq *pColl; - - if( argc==0 ) return; - mask = sqlite3_user_data(context)==0 ? 0 : -1; - pColl = sqlite3GetFuncCollSeq(context); - assert( pColl ); - assert( mask==-1 || mask==0 ); - iBest = 0; - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - for(i=1; i=0 ){ - iBest = i; - } - } - sqlite3_result_value(context, argv[iBest]); -} - -/* -** Return the type of the argument. -*/ -static void typeofFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const char *z = 0; - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_NULL: z = "null"; break; - case SQLITE_INTEGER: z = "integer"; break; - case SQLITE_TEXT: z = "text"; break; - case SQLITE_FLOAT: z = "real"; break; - case SQLITE_BLOB: z = "blob"; break; - } - sqlite3_result_text(context, z, -1, SQLITE_STATIC); -} - - -/* -** Implementation of the length() function -*/ -static void lengthFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int len; - - assert( argc==1 ); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_BLOB: - case SQLITE_INTEGER: - case SQLITE_FLOAT: { - sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); - break; - } - case SQLITE_TEXT: { - const unsigned char *z = sqlite3_value_text(argv[0]); - if( z==0 ) return; - len = 0; - while( *z ){ - len++; - SQLITE_SKIP_UTF8(z); - } - sqlite3_result_int(context, len); - break; - } - default: { - sqlite3_result_null(context); - break; - } - } -} - -/* -** Implementation of the abs() function -*/ -static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - assert( argc==1 ); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_INTEGER: { - i64 iVal = sqlite3_value_int64(argv[0]); - if( iVal<0 ){ - if( (iVal<<1)==0 ){ - sqlite3_result_error(context, "integer overflow", -1); - return; - } - iVal = -iVal; - } - sqlite3_result_int64(context, iVal); - break; - } - case SQLITE_NULL: { - sqlite3_result_null(context); - break; - } - default: { - double rVal = sqlite3_value_double(argv[0]); - if( rVal<0 ) rVal = -rVal; - sqlite3_result_double(context, rVal); - break; - } - } -} - -/* -** Implementation of the substr() function. -** -** substr(x,p1,p2) returns p2 characters of x[] beginning with p1. -** p1 is 1-indexed. So substr(x,1,1) returns the first character -** of x. If x is text, then we actually count UTF-8 characters. -** If x is a blob, then we count bytes. -** -** If p1 is negative, then we begin abs(p1) from the end of x[]. -*/ -static void substrFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *z; - const unsigned char *z2; - int len; - int p0type; - i64 p1, p2; - - assert( argc==3 ); - p0type = sqlite3_value_type(argv[0]); - if( p0type==SQLITE_BLOB ){ - len = sqlite3_value_bytes(argv[0]); - z = sqlite3_value_blob(argv[0]); - if( z==0 ) return; - assert( len==sqlite3_value_bytes(argv[0]) ); - }else{ - z = sqlite3_value_text(argv[0]); - if( z==0 ) return; - len = 0; - for(z2=z; *z2; len++){ - SQLITE_SKIP_UTF8(z2); - } - } - p1 = sqlite3_value_int(argv[1]); - p2 = sqlite3_value_int(argv[2]); - if( p1<0 ){ - p1 += len; - if( p1<0 ){ - p2 += p1; - p1 = 0; - } - }else if( p1>0 ){ - p1--; - } - if( p1+p2>len ){ - p2 = len-p1; - } - if( p0type!=SQLITE_BLOB ){ - while( *z && p1 ){ - SQLITE_SKIP_UTF8(z); - p1--; - } - for(z2=z; *z2 && p2; p2--){ - SQLITE_SKIP_UTF8(z2); - } - sqlite3_result_text(context, (char*)z, z2-z, SQLITE_TRANSIENT); - }else{ - if( p2<0 ) p2 = 0; - sqlite3_result_blob(context, (char*)&z[p1], p2, SQLITE_TRANSIENT); - } -} - -/* -** Implementation of the round() function -*/ -static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - int n = 0; - double r; - char zBuf[500]; /* larger than the %f representation of the largest double */ - assert( argc==1 || argc==2 ); - if( argc==2 ){ - if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; - n = sqlite3_value_int(argv[1]); - if( n>30 ) n = 30; - if( n<0 ) n = 0; - } - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - r = sqlite3_value_double(argv[0]); - sqlite3_snprintf(sizeof(zBuf),zBuf,"%.*f",n,r); - sqlite3AtoF(zBuf, &r); - sqlite3_result_double(context, r); -} - -/* -** Implementation of the upper() and lower() SQL functions. -*/ -static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - char *z1; - const char *z2; - int i, n; - if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; - z2 = (char*)sqlite3_value_text(argv[0]); - n = sqlite3_value_bytes(argv[0]); - /* Verify that the call to _bytes() does not invalidate the _text() pointer */ - assert( z2==(char*)sqlite3_value_text(argv[0]) ); - if( z2 ){ - z1 = sqlite3_malloc(n+1); - if( z1 ){ - memcpy(z1, z2, n+1); - for(i=0; z1[i]; i++){ - z1[i] = toupper(z1[i]); - } - sqlite3_result_text(context, z1, -1, sqlite3_free); - } - } -} -static void lowerFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - char *z1; - const char *z2; - int i, n; - if( argc<1 || SQLITE_NULL==sqlite3_value_type(argv[0]) ) return; - z2 = (char*)sqlite3_value_text(argv[0]); - n = sqlite3_value_bytes(argv[0]); - /* Verify that the call to _bytes() does not invalidate the _text() pointer */ - assert( z2==(char*)sqlite3_value_text(argv[0]) ); - if( z2 ){ - z1 = sqlite3_malloc(n+1); - if( z1 ){ - memcpy(z1, z2, n+1); - for(i=0; z1[i]; i++){ - z1[i] = tolower(z1[i]); - } - sqlite3_result_text(context, z1, -1, sqlite3_free); - } - } -} - -/* -** Implementation of the IFNULL(), NVL(), and COALESCE() functions. -** All three do the same thing. They return the first non-NULL -** argument. -*/ -static void ifnullFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i; - for(i=0; iSQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - p = sqliteMalloc(n); - if( p ){ - sqlite3Randomness(n, p); - sqlite3_result_blob(context, (char*)p, n, sqlite3FreeX); - } -} - -/* -** Implementation of the last_insert_rowid() SQL function. The return -** value is the same as the sqlite3_last_insert_rowid() API function. -*/ -static void last_insert_rowid( - sqlite3_context *context, - int arg, - sqlite3_value **argv -){ - sqlite3 *db = sqlite3_user_data(context); - sqlite3_result_int64(context, sqlite3_last_insert_rowid(db)); -} - -/* -** Implementation of the changes() SQL function. The return value is the -** same as the sqlite3_changes() API function. -*/ -static void changes( - sqlite3_context *context, - int arg, - sqlite3_value **argv -){ - sqlite3 *db = sqlite3_user_data(context); - sqlite3_result_int(context, sqlite3_changes(db)); -} - -/* -** Implementation of the total_changes() SQL function. The return value is -** the same as the sqlite3_total_changes() API function. -*/ -static void total_changes( - sqlite3_context *context, - int arg, - sqlite3_value **argv -){ - sqlite3 *db = sqlite3_user_data(context); - sqlite3_result_int(context, sqlite3_total_changes(db)); -} - -/* -** A structure defining how to do GLOB-style comparisons. -*/ -struct compareInfo { - u8 matchAll; - u8 matchOne; - u8 matchSet; - u8 noCase; -}; - -static const struct compareInfo globInfo = { '*', '?', '[', 0 }; -/* The correct SQL-92 behavior is for the LIKE operator to ignore -** case. Thus 'a' LIKE 'A' would be true. */ -static const struct compareInfo likeInfoNorm = { '%', '_', 0, 1 }; -/* If SQLITE_CASE_SENSITIVE_LIKE is defined, then the LIKE operator -** is case sensitive causing 'a' LIKE 'A' to be false */ -static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 }; - -/* -** Compare two UTF-8 strings for equality where the first string can -** potentially be a "glob" expression. Return true (1) if they -** are the same and false (0) if they are different. -** -** Globbing rules: -** -** '*' Matches any sequence of zero or more characters. -** -** '?' Matches exactly one character. -** -** [...] Matches one character from the enclosed list of -** characters. -** -** [^...] Matches one character not in the enclosed list. -** -** With the [...] and [^...] matching, a ']' character can be included -** in the list by making it the first character after '[' or '^'. A -** range of characters can be specified using '-'. Example: -** "[a-z]" matches any single lower-case letter. To match a '-', make -** it the last character in the list. -** -** This routine is usually quick, but can be N**2 in the worst case. -** -** Hints: to match '*' or '?', put them in "[]". Like this: -** -** abc[*]xyz Matches "abc*xyz" only -*/ -static int patternCompare( - const u8 *zPattern, /* The glob pattern */ - const u8 *zString, /* The string to compare against the glob */ - const struct compareInfo *pInfo, /* Information about how to do the compare */ - const int esc /* The escape character */ -){ - int c, c2; - int invert; - int seen; - u8 matchOne = pInfo->matchOne; - u8 matchAll = pInfo->matchAll; - u8 matchSet = pInfo->matchSet; - u8 noCase = pInfo->noCase; - const u8 *zNext; - int prevEscape = 0; /* True if the previous character was 'escape' */ - - while( (c = sqlite3Utf8Read(zPattern, 0, &zPattern))!=0 ){ - if( !prevEscape && c==matchAll ){ - while( (c=zPattern[0]) == matchAll || c == matchOne ){ - if( c==matchOne ){ - c2 = sqlite3Utf8Read(zString, 0, &zString); - if( c2==0 ) return 0; - } - zPattern++; - } - if( c && esc && sqlite3Utf8Read(zPattern, 0, &zNext)==esc ){ - c = *zNext; - } - if( c==0 ) return 1; - if( c==matchSet ){ - assert( esc==0 ); /* This is GLOB, not LIKE */ - while( *zString && patternCompare(&zPattern[1],zString,pInfo,esc)==0 ){ - SQLITE_SKIP_UTF8(zString); - } - return *zString!=0; - }else{ - while( (c2 = *zString)!=0 ){ - if( noCase ){ - c2 = sqlite3UpperToLower[c2]; - c = sqlite3UpperToLower[c]; - while( c2 != 0 && c2 != c ){ c2 = sqlite3UpperToLower[*++zString]; } - }else{ - while( c2 != 0 && c2 != c ){ c2 = *++zString; } - } - if( c2==0 ) return 0; - if( patternCompare(&zPattern[1],zString,pInfo,esc) ) return 1; - SQLITE_SKIP_UTF8(zString); - } - return 0; - } - }else if( !prevEscape && c==matchOne ){ - if( *zString==0 ) return 0; - SQLITE_SKIP_UTF8(zString); - zPattern++; - }else if( c==matchSet ){ - int prior_c = 0; - assert( esc==0 ); /* This only occurs for GLOB, not LIKE */ - seen = 0; - invert = 0; - c = sqlite3Utf8Read(zString, 0, &zString); - if( c==0 ) return 0; - c2 = *++zPattern; - if( c2=='^' ){ invert = 1; c2 = *++zPattern; } - if( c2==']' ){ - if( c==']' ) seen = 1; - c2 = *++zPattern; - } - while( (c2 = sqlite3Utf8(zPattern,0,&zPattern))!=0 && c2!=']' ){ - if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ - c2 = sqlite3Utf8Read(zPattern, 0, &zPattern); - if( c>=prior_c && c<=c2 ) seen = 1; - prior_c = 0; - }else if( c==c2 ){ - seen = 1; - prior_c = c2; - }else{ - prior_c = c2; - } - } - if( c2==0 || (seen ^ invert)==0 ) return 0; - }else if( esc && !prevEscape && sqlite3Utf8Read(zPattern, 0, &zNext)==esc){ - prevEscape = 1; - zPattern = zNext; - }else{ - if( noCase ){ - if( sqlite3UpperToLower[c] != sqlite3UpperToLower[*zString] ) return 0; - }else{ - if( c != *zString ) return 0; - } - zPattern++; - zString++; - prevEscape = 0; - } - } - return *zString==0; -} - -/* -** Count the number of times that the LIKE operator (or GLOB which is -** just a variation of LIKE) gets called. This is used for testing -** only. -*/ -#ifdef SQLITE_TEST -int sqlite3_like_count = 0; -#endif - - -/* -** Implementation of the like() SQL function. This function implements -** the build-in LIKE operator. The first argument to the function is the -** pattern and the second argument is the string. So, the SQL statements: -** -** A LIKE B -** -** is implemented as like(B,A). -** -** This same function (with a different compareInfo structure) computes -** the GLOB operator. -*/ -static void likeFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zA, *zB; - int escape = 0; - - zB = sqlite3_value_text(argv[0]); - zA = sqlite3_value_text(argv[1]); - - /* Limit the length of the LIKE or GLOB pattern to avoid problems - ** of deep recursion and N*N behavior in patternCompare(). - */ - if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ - sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); - return; - } - assert( zB==sqlite3_value_text(argv[0]) ); /* Encoding did not change */ - - if( argc==3 ){ - /* The escape character string must consist of a single UTF-8 character. - ** Otherwise, return an error. - */ - const unsigned char *zEsc = sqlite3_value_text(argv[2]); - if( zEsc==0 ) return; - if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){ - sqlite3_result_error(context, - "ESCAPE expression must be a single character", -1); - return; - } - escape = sqlite3ReadUtf8(zEsc); - } - if( zA && zB ){ - struct compareInfo *pInfo = sqlite3_user_data(context); -#ifdef SQLITE_TEST - sqlite3_like_count++; -#endif - - sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)); - } -} - -/* -** Implementation of the NULLIF(x,y) function. The result is the first -** argument if the arguments are different. The result is NULL if the -** arguments are equal to each other. -*/ -static void nullifFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - CollSeq *pColl = sqlite3GetFuncCollSeq(context); - if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ - sqlite3_result_value(context, argv[0]); - } -} - -/* -** Implementation of the VERSION(*) function. The result is the version -** of the SQLite library that is running. -*/ -static void versionFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - sqlite3_result_text(context, sqlite3_version, -1, SQLITE_STATIC); -} - -/* Array for converting from half-bytes (nybbles) into ASCII hex -** digits. */ -static const char hexdigits[] = { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' -}; - -/* -** EXPERIMENTAL - This is not an official function. The interface may -** change. This function may disappear. Do not write code that depends -** on this function. -** -** Implementation of the QUOTE() function. This function takes a single -** argument. If the argument is numeric, the return value is the same as -** the argument. If the argument is NULL, the return value is the string -** "NULL". Otherwise, the argument is enclosed in single quotes with -** single-quote escapes. -*/ -static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - if( argc<1 ) return; - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_NULL: { - sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); - break; - } - case SQLITE_INTEGER: - case SQLITE_FLOAT: { - sqlite3_result_value(context, argv[0]); - break; - } - case SQLITE_BLOB: { - char *zText = 0; - char const *zBlob = sqlite3_value_blob(argv[0]); - int nBlob = sqlite3_value_bytes(argv[0]); - assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - - if( 2*nBlob+4>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - zText = (char *)sqliteMalloc((2*nBlob)+4); - if( !zText ){ - sqlite3_result_error(context, "out of memory", -1); - }else{ - int i; - for(i=0; i>4)&0x0F]; - zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F]; - } - zText[(nBlob*2)+2] = '\''; - zText[(nBlob*2)+3] = '\0'; - zText[0] = 'X'; - zText[1] = '\''; - sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); - sqliteFree(zText); - } - break; - } - case SQLITE_TEXT: { - int i,j; - u64 n; - const unsigned char *zArg = sqlite3_value_text(argv[0]); - char *z; - - if( zArg==0 ) return; - for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } - if( i+n+3>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - z = sqliteMalloc( i+n+3 ); - if( z==0 ) return; - z[0] = '\''; - for(i=0, j=1; zArg[i]; i++){ - z[j++] = zArg[i]; - if( zArg[i]=='\'' ){ - z[j++] = '\''; - } - } - z[j++] = '\''; - z[j] = 0; - sqlite3_result_text(context, z, j, SQLITE_TRANSIENT); - sqliteFree(z); - } - } -} - -/* -** The hex() function. Interpret the argument as a blob. Return -** a hexadecimal rendering as text. -*/ -static void hexFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i, n; - const unsigned char *pBlob; - char *zHex, *z; - assert( argc==1 ); - pBlob = sqlite3_value_blob(argv[0]); - n = sqlite3_value_bytes(argv[0]); - if( n*2+1>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - return; - } - assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - z = zHex = sqlite3_malloc(n*2 + 1); - if( zHex==0 ) return; - for(i=0; i>4)&0xf]; - *(z++) = hexdigits[c&0xf]; - } - *z = 0; - sqlite3_result_text(context, zHex, n*2, sqlite3_free); -} - -/* -** The zeroblob(N) function returns a zero-filled blob of size N bytes. -*/ -static void zeroblobFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - i64 n; - assert( argc==1 ); - n = sqlite3_value_int64(argv[0]); - if( n>SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - }else{ - sqlite3_result_zeroblob(context, n); - } -} - -/* -** The replace() function. Three arguments are all strings: call -** them A, B, and C. The result is also a string which is derived -** from A by replacing every occurance of B with C. The match -** must be exact. Collating sequences are not used. -*/ -static void replaceFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zStr; /* The input string A */ - const unsigned char *zPattern; /* The pattern string B */ - const unsigned char *zRep; /* The replacement string C */ - unsigned char *zOut; /* The output */ - int nStr; /* Size of zStr */ - int nPattern; /* Size of zPattern */ - int nRep; /* Size of zRep */ - i64 nOut; /* Maximum size of zOut */ - int loopLimit; /* Last zStr[] that might match zPattern[] */ - int i, j; /* Loop counters */ - - assert( argc==3 ); - zStr = sqlite3_value_text(argv[0]); - if( zStr==0 ) return; - nStr = sqlite3_value_bytes(argv[0]); - assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ - zPattern = sqlite3_value_text(argv[1]); - if( zPattern==0 || zPattern[0]==0 ) return; - nPattern = sqlite3_value_bytes(argv[1]); - assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ - zRep = sqlite3_value_text(argv[2]); - if( zRep==0 ) return; - nRep = sqlite3_value_bytes(argv[2]); - assert( zRep==sqlite3_value_text(argv[2]) ); - nOut = nStr + 1; - assert( nOut=SQLITE_MAX_LENGTH ){ - sqlite3_result_error_toobig(context); - sqlite3_free(zOut); - return; - } - zOut = sqlite3_realloc(zOut, (int)nOut); - if( zOut==0 ){ - return; - } - memcpy(&zOut[j], zRep, nRep); - j += nRep; - i += nPattern-1; - } - } - assert( j+nStr-i+1==nOut ); - memcpy(&zOut[j], &zStr[i], nStr-i); - j += nStr - i; - assert( j<=nOut ); - zOut[j] = 0; - sqlite3_result_text(context, (char*)zOut, j, sqlite3_free); -} - -/* -** Implementation of the TRIM(), LTRIM(), and RTRIM() functions. -** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both. -*/ -static void trimFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zIn; /* Input string */ - const unsigned char *zCharSet; /* Set of characters to trim */ - int nIn; /* Number of bytes in input */ - int flags; /* 1: trimleft 2: trimright 3: trim */ - int i; /* Loop counter */ - unsigned char *aLen; /* Length of each character in zCharSet */ - const unsigned char **azChar; /* Individual characters in zCharSet */ - int nChar; /* Number of characters in zCharSet */ - - if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ - return; - } - zIn = sqlite3_value_text(argv[0]); - if( zIn==0 ) return; - nIn = sqlite3_value_bytes(argv[0]); - assert( zIn==sqlite3_value_text(argv[0]) ); - if( argc==1 ){ - static const unsigned char lenOne[] = { 1 }; - static const unsigned char *azOne[] = { (u8*)" " }; - nChar = 1; - aLen = (u8*)lenOne; - azChar = azOne; - zCharSet = 0; - }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ - return; - }else{ - const unsigned char *z; - for(z=zCharSet, nChar=0; *z; nChar++){ - SQLITE_SKIP_UTF8(z); - } - if( nChar>0 ){ - azChar = sqlite3_malloc( nChar*(sizeof(char*)+1) ); - if( azChar==0 ){ - return; - } - aLen = (unsigned char*)&azChar[nChar]; - for(z=zCharSet, nChar=0; *z; nChar++){ - azChar[nChar] = z; - SQLITE_SKIP_UTF8(z); - aLen[nChar] = z - azChar[nChar]; - } - } - } - if( nChar>0 ){ - flags = (int)sqlite3_user_data(context); - if( flags & 1 ){ - while( nIn>0 ){ - int len; - for(i=0; i=nChar ) break; - zIn += len; - nIn -= len; - } - } - if( flags & 2 ){ - while( nIn>0 ){ - int len; - for(i=0; i=nChar ) break; - nIn -= len; - } - } - if( zCharSet ){ - sqlite3_free(azChar); - } - } - sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); -} - -#ifdef SQLITE_SOUNDEX -/* -** Compute the soundex encoding of a word. -*/ -static void soundexFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - char zResult[8]; - const u8 *zIn; - int i, j; - static const unsigned char iCode[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, - 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, - 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, - 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, - }; - assert( argc==1 ); - zIn = (u8*)sqlite3_value_text(argv[0]); - if( zIn==0 ) zIn = (u8*)""; - for(i=0; zIn[i] && !isalpha(zIn[i]); i++){} - if( zIn[i] ){ - u8 prevcode = iCode[zIn[i]&0x7f]; - zResult[0] = toupper(zIn[i]); - for(j=1; j<4 && zIn[i]; i++){ - int code = iCode[zIn[i]&0x7f]; - if( code>0 ){ - if( code!=prevcode ){ - prevcode = code; - zResult[j++] = code + '0'; - } - }else{ - prevcode = 0; - } - } - while( j<4 ){ - zResult[j++] = '0'; - } - zResult[j] = 0; - sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT); - }else{ - sqlite3_result_text(context, "?000", 4, SQLITE_STATIC); - } -} -#endif - -#ifndef SQLITE_OMIT_LOAD_EXTENSION -/* -** A function that loads a shared-library extension then returns NULL. -*/ -static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ - const char *zFile = (const char *)sqlite3_value_text(argv[0]); - const char *zProc; - sqlite3 *db = sqlite3_user_data(context); - char *zErrMsg = 0; - - if( argc==2 ){ - zProc = (const char *)sqlite3_value_text(argv[1]); - }else{ - zProc = 0; - } - if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){ - sqlite3_result_error(context, zErrMsg, -1); - sqlite3_free(zErrMsg); - } -} -#endif - -#ifdef SQLITE_TEST -/* -** This function generates a string of random characters. Used for -** generating test data. -*/ -static void randStr(sqlite3_context *context, int argc, sqlite3_value **argv){ - static const unsigned char zSrc[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789" - ".-!,:*^+=_|?/<> "; - int iMin, iMax, n, r, i; - unsigned char zBuf[1000]; - if( argc>=1 ){ - iMin = sqlite3_value_int(argv[0]); - if( iMin<0 ) iMin = 0; - if( iMin>=sizeof(zBuf) ) iMin = sizeof(zBuf)-1; - }else{ - iMin = 1; - } - if( argc>=2 ){ - iMax = sqlite3_value_int(argv[1]); - if( iMax=sizeof(zBuf) ) iMax = sizeof(zBuf)-1; - }else{ - iMax = 50; - } - n = iMin; - if( iMax>iMin ){ - sqlite3Randomness(sizeof(r), &r); - r &= 0x7fffffff; - n += r%(iMax + 1 - iMin); - } - assert( ncnt++; - if( type==SQLITE_INTEGER ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum += v; - if( (p->approx|p->overflow)==0 ){ - i64 iNewSum = p->iSum + v; - int s1 = p->iSum >> (sizeof(i64)*8-1); - int s2 = v >> (sizeof(i64)*8-1); - int s3 = iNewSum >> (sizeof(i64)*8-1); - p->overflow = (s1&s2&~s3) | (~s1&~s2&s3); - p->iSum = iNewSum; - } - }else{ - p->rSum += sqlite3_value_double(argv[0]); - p->approx = 1; - } - } -} -static void sumFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - if( p && p->cnt>0 ){ - if( p->overflow ){ - sqlite3_result_error(context,"integer overflow",-1); - }else if( p->approx ){ - sqlite3_result_double(context, p->rSum); - }else{ - sqlite3_result_int64(context, p->iSum); - } - } -} -static void avgFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - if( p && p->cnt>0 ){ - sqlite3_result_double(context, p->rSum/(double)p->cnt); - } -} -static void totalFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - sqlite3_result_double(context, p ? p->rSum : 0.0); -} - -/* -** The following structure keeps track of state information for the -** count() aggregate function. -*/ -typedef struct CountCtx CountCtx; -struct CountCtx { - i64 n; -}; - -/* -** Routines to implement the count() aggregate function. -*/ -static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){ - CountCtx *p; - p = sqlite3_aggregate_context(context, sizeof(*p)); - if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ - p->n++; - } -} -static void countFinalize(sqlite3_context *context){ - CountCtx *p; - p = sqlite3_aggregate_context(context, 0); - sqlite3_result_int64(context, p ? p->n : 0); -} - -/* -** Routines to implement min() and max() aggregate functions. -*/ -static void minmaxStep(sqlite3_context *context, int argc, sqlite3_value **argv){ - Mem *pArg = (Mem *)argv[0]; - Mem *pBest; - - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); - if( !pBest ) return; - - if( pBest->flags ){ - int max; - int cmp; - CollSeq *pColl = sqlite3GetFuncCollSeq(context); - /* This step function is used for both the min() and max() aggregates, - ** the only difference between the two being that the sense of the - ** comparison is inverted. For the max() aggregate, the - ** sqlite3_user_data() function returns (void *)-1. For min() it - ** returns (void *)db, where db is the sqlite3* database pointer. - ** Therefore the next statement sets variable 'max' to 1 for the max() - ** aggregate, or 0 for min(). - */ - max = sqlite3_user_data(context)!=0; - cmp = sqlite3MemCompare(pBest, pArg, pColl); - if( (max && cmp<0) || (!max && cmp>0) ){ - sqlite3VdbeMemCopy(pBest, pArg); - } - }else{ - sqlite3VdbeMemCopy(pBest, pArg); - } -} -static void minMaxFinalize(sqlite3_context *context){ - sqlite3_value *pRes; - pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); - if( pRes ){ - if( pRes->flags ){ - sqlite3_result_value(context, pRes); - } - sqlite3VdbeMemRelease(pRes); - } -} - - -/* -** This function registered all of the above C functions as SQL -** functions. This should be the only routine in this file with -** external linkage. -*/ -void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ - static const struct { - char *zName; - signed char nArg; - u8 argType; /* ff: db 1: 0, 2: 1, 3: 2,... N: N-1. */ - u8 eTextRep; /* 1: UTF-16. 0: UTF-8 */ - u8 needCollSeq; - void (*xFunc)(sqlite3_context*,int,sqlite3_value **); - } aFuncs[] = { - { "min", -1, 0, SQLITE_UTF8, 1, minmaxFunc }, - { "min", 0, 0, SQLITE_UTF8, 1, 0 }, - { "max", -1, 1, SQLITE_UTF8, 1, minmaxFunc }, - { "max", 0, 1, SQLITE_UTF8, 1, 0 }, - { "typeof", 1, 0, SQLITE_UTF8, 0, typeofFunc }, - { "length", 1, 0, SQLITE_UTF8, 0, lengthFunc }, - { "substr", 3, 0, SQLITE_UTF8, 0, substrFunc }, - { "abs", 1, 0, SQLITE_UTF8, 0, absFunc }, - { "round", 1, 0, SQLITE_UTF8, 0, roundFunc }, - { "round", 2, 0, SQLITE_UTF8, 0, roundFunc }, - { "upper", 1, 0, SQLITE_UTF8, 0, upperFunc }, - { "lower", 1, 0, SQLITE_UTF8, 0, lowerFunc }, - { "coalesce", -1, 0, SQLITE_UTF8, 0, ifnullFunc }, - { "coalesce", 0, 0, SQLITE_UTF8, 0, 0 }, - { "coalesce", 1, 0, SQLITE_UTF8, 0, 0 }, - { "hex", 1, 0, SQLITE_UTF8, 0, hexFunc }, - { "ifnull", 2, 0, SQLITE_UTF8, 1, ifnullFunc }, - { "random", -1, 0, SQLITE_UTF8, 0, randomFunc }, - { "randomblob", 1, 0, SQLITE_UTF8, 0, randomBlob }, - { "nullif", 2, 0, SQLITE_UTF8, 1, nullifFunc }, - { "sqlite_version", 0, 0, SQLITE_UTF8, 0, versionFunc}, - { "quote", 1, 0, SQLITE_UTF8, 0, quoteFunc }, - { "last_insert_rowid", 0, 0xff, SQLITE_UTF8, 0, last_insert_rowid }, - { "changes", 0, 0xff, SQLITE_UTF8, 0, changes }, - { "total_changes", 0, 0xff, SQLITE_UTF8, 0, total_changes }, - { "replace", 3, 0, SQLITE_UTF8, 0, replaceFunc }, - { "ltrim", 1, 1, SQLITE_UTF8, 0, trimFunc }, - { "ltrim", 2, 1, SQLITE_UTF8, 0, trimFunc }, - { "rtrim", 1, 2, SQLITE_UTF8, 0, trimFunc }, - { "rtrim", 2, 2, SQLITE_UTF8, 0, trimFunc }, - { "trim", 1, 3, SQLITE_UTF8, 0, trimFunc }, - { "trim", 2, 3, SQLITE_UTF8, 0, trimFunc }, - { "zeroblob", 1, 0, SQLITE_UTF8, 0, zeroblobFunc }, -#ifdef SQLITE_SOUNDEX - { "soundex", 1, 0, SQLITE_UTF8, 0, soundexFunc}, -#endif -#ifndef SQLITE_OMIT_LOAD_EXTENSION - { "load_extension", 1, 0xff, SQLITE_UTF8, 0, loadExt }, - { "load_extension", 2, 0xff, SQLITE_UTF8, 0, loadExt }, -#endif -#ifdef SQLITE_TEST - { "randstr", 2, 0, SQLITE_UTF8, 0, randStr }, - { "test_destructor", 1, 0xff, SQLITE_UTF8, 0, test_destructor}, - { "test_destructor_count", 0, 0, SQLITE_UTF8, 0, test_destructor_count}, - { "test_auxdata", -1, 0, SQLITE_UTF8, 0, test_auxdata}, - { "test_error", 1, 0, SQLITE_UTF8, 0, test_error}, -#endif - }; - static const struct { - char *zName; - signed char nArg; - u8 argType; - u8 needCollSeq; - void (*xStep)(sqlite3_context*,int,sqlite3_value**); - void (*xFinalize)(sqlite3_context*); - } aAggs[] = { - { "min", 1, 0, 1, minmaxStep, minMaxFinalize }, - { "max", 1, 1, 1, minmaxStep, minMaxFinalize }, - { "sum", 1, 0, 0, sumStep, sumFinalize }, - { "total", 1, 0, 0, sumStep, totalFinalize }, - { "avg", 1, 0, 0, sumStep, avgFinalize }, - { "count", 0, 0, 0, countStep, countFinalize }, - { "count", 1, 0, 0, countStep, countFinalize }, - }; - int i; - - for(i=0; ineedCollSeq = 1; - } - } - } -#ifndef SQLITE_OMIT_ALTERTABLE - sqlite3AlterFunctions(db); -#endif -#ifndef SQLITE_OMIT_PARSER - sqlite3AttachFunctions(db); -#endif - for(i=0; ineedCollSeq = 1; - } - } - } - sqlite3RegisterDateTimeFunctions(db); - if( !sqlite3MallocFailed() ){ - int rc = sqlite3_overload_function(db, "MATCH", 2); - assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); - if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); - } - } -#ifdef SQLITE_SSE - (void)sqlite3SseFunctions(db); -#endif -#ifdef SQLITE_CASE_SENSITIVE_LIKE - sqlite3RegisterLikeFunctions(db, 1); -#else - sqlite3RegisterLikeFunctions(db, 0); -#endif -} - -/* -** Set the LIKEOPT flag on the 2-argument function with the given name. -*/ -static void setLikeOptFlag(sqlite3 *db, const char *zName, int flagVal){ - FuncDef *pDef; - pDef = sqlite3FindFunction(db, zName, strlen(zName), 2, SQLITE_UTF8, 0); - if( pDef ){ - pDef->flags = flagVal; - } -} - -/* -** Register the built-in LIKE and GLOB functions. The caseSensitive -** parameter determines whether or not the LIKE operator is case -** sensitive. GLOB is always case sensitive. -*/ -void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ - struct compareInfo *pInfo; - if( caseSensitive ){ - pInfo = (struct compareInfo*)&likeInfoAlt; - }else{ - pInfo = (struct compareInfo*)&likeInfoNorm; - } - sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0); - sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0); - sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, - (struct compareInfo*)&globInfo, likeFunc, 0,0); - setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); - setLikeOptFlag(db, "like", - caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE); -} - -/* -** pExpr points to an expression which implements a function. If -** it is appropriate to apply the LIKE optimization to that function -** then set aWc[0] through aWc[2] to the wildcard characters and -** return TRUE. If the function is not a LIKE-style function then -** return FALSE. -*/ -int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ - FuncDef *pDef; - if( pExpr->op!=TK_FUNCTION || !pExpr->pList ){ - return 0; - } - if( pExpr->pList->nExpr!=2 ){ - return 0; - } - pDef = sqlite3FindFunction(db, (char*)pExpr->token.z, pExpr->token.n, 2, - SQLITE_UTF8, 0); - if( pDef==0 || (pDef->flags & SQLITE_FUNC_LIKE)==0 ){ - return 0; - } - - /* The memcpy() statement assumes that the wildcard characters are - ** the first three statements in the compareInfo structure. The - ** asserts() that follow verify that assumption - */ - memcpy(aWc, pDef->pUserData, 3); - assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll ); - assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne ); - assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet ); - *pIsNocase = (pDef->flags & SQLITE_FUNC_CASE)==0; - return 1; -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/global.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/global.c --- sqlite3-3.4.2/src/global.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/global.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,190 @@ +/* +** 2008 June 13 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains definitions of global variables and contants. +** +** $Id: global.c,v 1.12 2009/02/05 16:31:46 drh Exp $ +*/ +#include "sqliteInt.h" + + +/* An array to map all upper-case characters into their corresponding +** lower-case character. +** +** SQLite only considers US-ASCII (or EBCDIC) characters. We do not +** handle case conversions for the UTF character set since the tables +** involved are nearly as big or bigger than SQLite itself. +*/ +const unsigned char sqlite3UpperToLower[] = { +#ifdef SQLITE_ASCII + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103, + 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, + 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107, + 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, + 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, + 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, + 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, + 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, + 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, + 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, + 252,253,254,255 +#endif +#ifdef SQLITE_EBCDIC + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */ + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */ + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */ + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */ + 96, 97, 66, 67, 68, 69, 70, 71, 72, 73,106,107,108,109,110,111, /* 6x */ + 112, 81, 82, 83, 84, 85, 86, 87, 88, 89,122,123,124,125,126,127, /* 7x */ + 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */ + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,156,159, /* 9x */ + 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */ + 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */ + 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */ + 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */ + 224,225,162,163,164,165,166,167,168,169,232,203,204,205,206,207, /* Ex */ + 239,240,241,242,243,244,245,246,247,248,249,219,220,221,222,255, /* Fx */ +#endif +}; + +/* +** The following 256 byte lookup table is used to support SQLites built-in +** equivalents to the following standard library functions: +** +** isspace() 0x01 +** isalpha() 0x02 +** isdigit() 0x04 +** isalnum() 0x06 +** isxdigit() 0x08 +** toupper() 0x20 +** +** Bit 0x20 is set if the mapped character requires translation to upper +** case. i.e. if the character is a lower-case ASCII character. +** If x is a lower-case ASCII character, then its upper-case equivalent +** is (x - 0x20). Therefore toupper() can be implemented as: +** +** (x & ~(map[x]&0x20)) +** +** Standard function tolower() is implemented using the sqlite3UpperToLower[] +** array. tolower() is used more often than toupper() by SQLite. +** +** SQLite's versions are identical to the standard versions assuming a +** locale of "C". They are implemented as macros in sqliteInt.h. +*/ +#ifdef SQLITE_ASCII +const unsigned char sqlite3CtypeMap[256] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */ + 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20..27 !"#$%&' */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */ + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */ + 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */ + + 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */ + 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58..5f XYZ[\]^_ */ + 0x00, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */ + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */ + 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */ + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80..87 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88..8f ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90..97 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98..9f ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* a0..a7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* a8..af ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0..b7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b8..bf ........ */ + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0..c7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c8..cf ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* d0..d7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* d8..df ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* e0..e7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* e8..ef ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* f0..f7 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* f8..ff ........ */ +}; +#endif + + + +/* +** The following singleton contains the global configuration for +** the SQLite library. +*/ +SQLITE_WSD struct Sqlite3Config sqlite3Config = { + SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */ + 1, /* bCoreMutex */ + SQLITE_THREADSAFE==1, /* bFullMutex */ + 0x7ffffffe, /* mxStrlen */ + 100, /* szLookaside */ + 500, /* nLookaside */ + {0,0,0,0,0,0,0,0}, /* m */ + {0,0,0,0,0,0,0,0,0}, /* mutex */ + {0,0,0,0,0,0,0,0,0,0,0}, /* pcache */ + (void*)0, /* pHeap */ + 0, /* nHeap */ + 0, 0, /* mnHeap, mxHeap */ + (void*)0, /* pScratch */ + 0, /* szScratch */ + 0, /* nScratch */ + (void*)0, /* pPage */ + 0, /* szPage */ + 0, /* nPage */ + 0, /* mxParserStack */ + 0, /* sharedCacheEnabled */ + /* All the rest need to always be zero */ + 0, /* isInit */ + 0, /* inProgress */ + 0, /* isMallocInit */ + 0, /* pInitMutex */ + 0, /* nRefInitMutex */ +}; + + +/* +** Hash table for global functions - functions common to all +** database connections. After initialization, this table is +** read-only. +*/ +SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; + +/* +** The value of the "pending" byte must be 0x40000000 (1 byte past the +** 1-gibabyte boundary) in a compatible database. SQLite never uses +** the database page that contains the pending byte. It never attempts +** to read or write that page. The pending byte page is set assign +** for use by the VFS layers as space for managing file locks. +** +** During testing, it is often desirable to move the pending byte to +** a different position in the file. This allows code that has to +** deal with the pending byte to run on files that are much smaller +** than 1 GiB. The sqlite3_test_control() interface can be used to +** move the pending byte. +** +** IMPORTANT: Changing the pending byte to any value other than +** 0x40000000 results in an incompatible database file format! +** Changing the pending byte during operating results in undefined +** and dileterious behavior. +*/ +int sqlite3PendingByte = 0x40000000; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/hash.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/hash.c --- sqlite3-3.4.2/src/hash.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/hash.c 2009-06-25 12:31:29.000000000 +0100 @@ -12,7 +12,7 @@ ** This is the implementation of generic hash-tables ** used in SQLite. ** -** $Id: hash.c,v 1.19 2007/03/31 03:59:24 drh Exp $ +** $Id: hash.c,v 1.38 2009/05/09 23:29:12 drh Exp $ */ #include "sqliteInt.h" #include @@ -21,28 +21,13 @@ ** fields of the Hash structure. ** ** "pNew" is a pointer to the hash table that is to be initialized. -** keyClass is one of the constants SQLITE_HASH_INT, SQLITE_HASH_POINTER, -** SQLITE_HASH_BINARY, or SQLITE_HASH_STRING. The value of keyClass -** determines what kind of key the hash table will use. "copyKey" is -** true if the hash table should make its own private copy of keys and -** false if it should just use the supplied pointer. CopyKey only makes -** sense for SQLITE_HASH_STRING and SQLITE_HASH_BINARY and is ignored -** for other key classes. */ -void sqlite3HashInit(Hash *pNew, int keyClass, int copyKey){ +void sqlite3HashInit(Hash *pNew){ assert( pNew!=0 ); - assert( keyClass>=SQLITE_HASH_STRING && keyClass<=SQLITE_HASH_BINARY ); - pNew->keyClass = keyClass; -#if 0 - if( keyClass==SQLITE_HASH_POINTER || keyClass==SQLITE_HASH_INT ) copyKey = 0; -#endif - pNew->copyKey = copyKey; pNew->first = 0; pNew->count = 0; pNew->htsize = 0; pNew->ht = 0; - pNew->xMalloc = sqlite3MallocX; - pNew->xFree = sqlite3FreeX; } /* Remove all entries from a hash table. Reclaim all memory. @@ -55,140 +40,33 @@ assert( pH!=0 ); elem = pH->first; pH->first = 0; - if( pH->ht ) pH->xFree(pH->ht); + sqlite3_free(pH->ht); pH->ht = 0; pH->htsize = 0; while( elem ){ HashElem *next_elem = elem->next; - if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); - } - pH->xFree(elem); + sqlite3_free(elem); elem = next_elem; } pH->count = 0; } -#if 0 /* NOT USED */ -/* -** Hash and comparison functions when the mode is SQLITE_HASH_INT -*/ -static int intHash(const void *pKey, int nKey){ - return nKey ^ (nKey<<8) ^ (nKey>>8); -} -static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - return n2 - n1; -} -#endif - -#if 0 /* NOT USED */ -/* -** Hash and comparison functions when the mode is SQLITE_HASH_POINTER -*/ -static int ptrHash(const void *pKey, int nKey){ - uptr x = Addr(pKey); - return x ^ (x<<8) ^ (x>>8); -} -static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( pKey1==pKey2 ) return 0; - if( pKey1=0 ); while( nKey > 0 ){ h = (h<<3) ^ h ^ sqlite3UpperToLower[(unsigned char)*z++]; nKey--; } - return h & 0x7fffffff; -} -static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return sqlite3StrNICmp((const char*)pKey1,(const char*)pKey2,n1); + return h; } -/* -** Hash and comparison functions when the mode is SQLITE_HASH_BINARY -*/ -static int binHash(const void *pKey, int nKey){ - int h = 0; - const char *z = (const char *)pKey; - while( nKey-- > 0 ){ - h = (h<<3) ^ h ^ *(z++); - } - return h & 0x7fffffff; -} -static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return memcmp(pKey1,pKey2,n1); -} -/* -** Return a pointer to the appropriate hash function given the key class. -** -** The C syntax in this function definition may be unfamilar to some -** programmers, so we provide the following additional explanation: -** -** The name of the function is "hashFunction". The function takes a -** single parameter "keyClass". The return value of hashFunction() -** is a pointer to another function. Specifically, the return value -** of hashFunction() is a pointer to a function that takes two parameters -** with types "const void*" and "int" and returns an "int". -*/ -static int (*hashFunction(int keyClass))(const void*,int){ -#if 0 /* HASH_INT and HASH_POINTER are never used */ - switch( keyClass ){ - case SQLITE_HASH_INT: return &intHash; - case SQLITE_HASH_POINTER: return &ptrHash; - case SQLITE_HASH_STRING: return &strHash; - case SQLITE_HASH_BINARY: return &binHash;; - default: break; - } - return 0; -#else - if( keyClass==SQLITE_HASH_STRING ){ - return &strHash; - }else{ - assert( keyClass==SQLITE_HASH_BINARY ); - return &binHash; - } -#endif -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** For help in interpreted the obscure C code in the function definition, -** see the header comment on the previous function. -*/ -static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ -#if 0 /* HASH_INT and HASH_POINTER are never used */ - switch( keyClass ){ - case SQLITE_HASH_INT: return &intCompare; - case SQLITE_HASH_POINTER: return &ptrCompare; - case SQLITE_HASH_STRING: return &strCompare; - case SQLITE_HASH_BINARY: return &binCompare; - default: break; - } - return 0; -#else - if( keyClass==SQLITE_HASH_STRING ){ - return &strCompare; - }else{ - assert( keyClass==SQLITE_HASH_BINARY ); - return &binCompare; - } -#endif -} - -/* Link an element into the hash table +/* Link pNew element into the hash table pH. If pEntry!=0 then also +** insert pNew into the pEntry hash bucket. */ static void insertElement( Hash *pH, /* The complete hash table */ @@ -196,7 +74,13 @@ HashElem *pNew /* The element to be inserted */ ){ HashElem *pHead; /* First element already in pEntry */ - pHead = pEntry->chain; + if( pEntry ){ + pHead = pEntry->count ? pEntry->chain : 0; + pEntry->count++; + pEntry->chain = pNew; + }else{ + pHead = 0; + } if( pHead ){ pNew->next = pHead; pNew->prev = pHead->prev; @@ -209,32 +93,45 @@ pNew->prev = 0; pH->first = pNew; } - pEntry->count++; - pEntry->chain = pNew; } /* Resize the hash table so that it cantains "new_size" buckets. -** "new_size" must be a power of 2. The hash table might fail -** to resize if sqliteMalloc() fails. +** +** The hash table might fail to resize if sqlite3_malloc() fails or +** if the new size is the same as the prior size. +** Return TRUE if the resize occurs and false if not. */ -static void rehash(Hash *pH, int new_size){ +static int rehash(Hash *pH, unsigned int new_size){ struct _ht *new_ht; /* The new hash table */ HashElem *elem, *next_elem; /* For looping over existing elements */ - int (*xHash)(const void*,int); /* The hash function */ - assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _ht *)pH->xMalloc( new_size*sizeof(struct _ht) ); - if( new_ht==0 ) return; - if( pH->ht ) pH->xFree(pH->ht); +#if SQLITE_MALLOC_SOFT_LIMIT>0 + if( new_size*sizeof(struct _ht)>SQLITE_MALLOC_SOFT_LIMIT ){ + new_size = SQLITE_MALLOC_SOFT_LIMIT/sizeof(struct _ht); + } + if( new_size==pH->htsize ) return 0; +#endif + + /* The inability to allocates space for a larger hash table is + ** a performance hit but it is not a fatal error. So mark the + ** allocation as a benign. + */ + sqlite3BeginBenignMalloc(); + new_ht = (struct _ht *)sqlite3Malloc( new_size*sizeof(struct _ht) ); + sqlite3EndBenignMalloc(); + + if( new_ht==0 ) return 0; + sqlite3_free(pH->ht); pH->ht = new_ht; - pH->htsize = new_size; - xHash = hashFunction(pH->keyClass); + pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht); + memset(new_ht, 0, new_size*sizeof(struct _ht)); for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + unsigned int h = strHash(elem->pKey, elem->nKey) % new_size; next_elem = elem->next; insertElement(pH, &new_ht[h], elem); } + return 1; } /* This function (for internal use only) locates an element in an @@ -243,25 +140,26 @@ */ static HashElem *findElementGivenHash( const Hash *pH, /* The pH to be searched */ - const void *pKey, /* The key we are searching for */ - int nKey, - int h /* The hash for this key. */ + const char *pKey, /* The key we are searching for */ + int nKey, /* Bytes in key (not counting zero terminator) */ + unsigned int h /* The hash for this key. */ ){ HashElem *elem; /* Used to loop thru the element list */ int count; /* Number of elements left to test */ - int (*xCompare)(const void*,int,const void*,int); /* comparison function */ if( pH->ht ){ struct _ht *pEntry = &pH->ht[h]; elem = pEntry->chain; count = pEntry->count; - xCompare = compareFunction(pH->keyClass); - while( count-- && elem ){ - if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; + }else{ + elem = pH->first; + count = pH->count; + } + while( count-- && ALWAYS(elem) ){ + if( elem->nKey==nKey && sqlite3StrNICmp(elem->pKey,pKey,nKey)==0 ){ + return elem; } + elem = elem->next; } return 0; } @@ -272,7 +170,7 @@ static void removeElementGivenHash( Hash *pH, /* The pH containing "elem" */ HashElem* elem, /* The element to be removed from the pH */ - int h /* Hash value for the element */ + unsigned int h /* Hash value for the element */ ){ struct _ht *pEntry; if( elem->prev ){ @@ -283,18 +181,15 @@ if( elem->next ){ elem->next->prev = elem->prev; } - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - if( pEntry->count<=0 ){ - pEntry->chain = 0; - } - if( pH->copyKey ){ - pH->xFree(elem->pKey); + if( pH->ht ){ + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + assert( pEntry->count>=0 ); } - pH->xFree( elem ); + sqlite3_free( elem ); pH->count--; if( pH->count<=0 ){ assert( pH->first==0 ); @@ -307,17 +202,19 @@ ** that matches pKey,nKey. Return the data for this element if it is ** found, or NULL if there is no match. */ -void *sqlite3HashFind(const Hash *pH, const void *pKey, int nKey){ - int h; /* A hash on key */ +void *sqlite3HashFind(const Hash *pH, const char *pKey, int nKey){ HashElem *elem; /* The element that matches key */ - int (*xHash)(const void*,int); /* The hash function */ + unsigned int h; /* A hash on key */ - if( pH==0 || pH->ht==0 ) return 0; - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - h = (*xHash)(pKey,nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + assert( pH!=0 ); + assert( pKey!=0 ); + assert( nKey>=0 ); + if( pH->ht ){ + h = strHash(pKey, nKey) % pH->htsize; + }else{ + h = 0; + } + elem = findElementGivenHash(pH, pKey, nKey, h); return elem ? elem->data : 0; } @@ -325,8 +222,7 @@ ** and the data is "data". ** ** If no element exists with a matching key, then a new -** element is created. A copy of the key is made if the copyKey -** flag is set. NULL is returned. +** element is created and NULL is returned. ** ** If another element already exists with the same key, then the ** new data replaces the old data and the old data is returned. @@ -336,19 +232,19 @@ ** If the "data" parameter to this function is NULL, then the ** element corresponding to "key" is removed from the hash table. */ -void *sqlite3HashInsert(Hash *pH, const void *pKey, int nKey, void *data){ - int hraw; /* Raw hash value of the key */ - int h; /* the hash of the key modulo hash table size */ +void *sqlite3HashInsert(Hash *pH, const char *pKey, int nKey, void *data){ + unsigned int h; /* the hash of the key modulo hash table size */ HashElem *elem; /* Used to loop thru the element list */ HashElem *new_elem; /* New element added to the pH */ - int (*xHash)(const void*,int); /* The hash function */ assert( pH!=0 ); - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - hraw = (*xHash)(pKey, nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); + assert( pKey!=0 ); + assert( nKey>=0 ); + if( pH->htsize ){ + h = strHash(pKey, nKey) % pH->htsize; + }else{ + h = 0; + } elem = findElementGivenHash(pH,pKey,nKey,h); if( elem ){ void *old_data = elem->data; @@ -356,42 +252,28 @@ removeElementGivenHash(pH,elem,h); }else{ elem->data = data; + elem->pKey = pKey; + assert(nKey==elem->nKey); } return old_data; } if( data==0 ) return 0; - new_elem = (HashElem*)pH->xMalloc( sizeof(HashElem) ); + new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) ); if( new_elem==0 ) return data; - if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = pH->xMalloc( nKey ); - if( new_elem->pKey==0 ){ - pH->xFree(new_elem); - return data; - } - memcpy((void*)new_elem->pKey, pKey, nKey); - }else{ - new_elem->pKey = (void*)pKey; - } + new_elem->pKey = pKey; new_elem->nKey = nKey; + new_elem->data = data; pH->count++; - if( pH->htsize==0 ){ - rehash(pH,8); - if( pH->htsize==0 ){ - pH->count = 0; - if( pH->copyKey ){ - pH->xFree(new_elem->pKey); - } - pH->xFree(new_elem); - return data; + if( pH->count>=10 && pH->count > 2*pH->htsize ){ + if( rehash(pH, pH->count*2) ){ + assert( pH->htsize>0 ); + h = strHash(pKey, nKey) % pH->htsize; } } - if( pH->count > pH->htsize ){ - rehash(pH,pH->htsize*2); + if( pH->ht ){ + insertElement(pH, &pH->ht[h], new_elem); + }else{ + insertElement(pH, 0, new_elem); } - assert( pH->htsize>0 ); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - insertElement(pH, &pH->ht[h], new_elem); - new_elem->data = data; return 0; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/hash.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/hash.h --- sqlite3-3.4.2/src/hash.h 2007-03-29 19:39:32.000000000 +0100 +++ sqlite3-3.6.16/src/hash.h 2009-06-25 12:24:38.000000000 +0100 @@ -12,7 +12,7 @@ ** This is the header file for the generic hash-table implemenation ** used in SQLite. ** -** $Id: hash.h,v 1.9 2006/02/14 10:48:39 danielk1977 Exp $ +** $Id: hash.h,v 1.15 2009/05/02 13:29:38 drh Exp $ */ #ifndef _SQLITE_HASH_H_ #define _SQLITE_HASH_H_ @@ -25,21 +25,30 @@ ** The internals of this structure are intended to be opaque -- client ** code should not attempt to access or modify the fields of this structure ** directly. Change this structure only by using the routines below. -** However, many of the "procedures" and "functions" for modifying and +** However, some of the "procedures" and "functions" for modifying and ** accessing this structure are really macros, so we can't really make ** this structure opaque. +** +** All elements of the hash table are on a single doubly-linked list. +** Hash.first points to the head of this list. +** +** There are Hash.htsize buckets. Each bucket points to a spot in +** the global doubly-linked list. The contents of the bucket are the +** element pointed to plus the next _ht.count-1 elements in the list. +** +** Hash.htsize and Hash.ht may be zero. In that case lookup is done +** by a linear search of the global list. For small tables, the +** Hash.ht table is never allocated because if there are few elements +** in the table, it is faster to do a linear search than to manage +** the hash table. */ struct Hash { - char keyClass; /* SQLITE_HASH_INT, _POINTER, _STRING, _BINARY */ - char copyKey; /* True if copy of key made on insert */ - int count; /* Number of entries in this table */ - HashElem *first; /* The first element of the array */ - void *(*xMalloc)(int); /* malloc() function to use */ - void (*xFree)(void *); /* free() function to use */ - int htsize; /* Number of buckets in the hash table */ - struct _ht { /* the hash table */ - int count; /* Number of entries with this hash */ - HashElem *chain; /* Pointer to first entry with this hash */ + unsigned int htsize; /* Number of buckets in the hash table */ + unsigned int count; /* Number of entries in this table */ + HashElem *first; /* The first element of the array */ + struct _ht { /* the hash table */ + int count; /* Number of entries with this hash */ + HashElem *chain; /* Pointer to first entry with this hash */ } *ht; }; @@ -50,39 +59,17 @@ ** be opaque because it is used by macros. */ struct HashElem { - HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - void *pKey; int nKey; /* Key associated with this element */ + HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + const char *pKey; int nKey; /* Key associated with this element */ }; /* -** There are 4 different modes of operation for a hash table: -** -** SQLITE_HASH_INT nKey is used as the key and pKey is ignored. -** -** SQLITE_HASH_POINTER pKey is used as the key and nKey is ignored. -** -** SQLITE_HASH_STRING pKey points to a string that is nKey bytes long -** (including the null-terminator, if any). Case -** is ignored in comparisons. -** -** SQLITE_HASH_BINARY pKey points to binary data nKey bytes long. -** memcmp() is used to compare keys. -** -** A copy of the key is made for SQLITE_HASH_STRING and SQLITE_HASH_BINARY -** if the copyKey parameter to HashInit is 1. -*/ -/* #define SQLITE_HASH_INT 1 // NOT USED */ -/* #define SQLITE_HASH_POINTER 2 // NOT USED */ -#define SQLITE_HASH_STRING 3 -#define SQLITE_HASH_BINARY 4 - -/* ** Access routines. To delete, insert a NULL pointer. */ -void sqlite3HashInit(Hash*, int keytype, int copyKey); -void *sqlite3HashInsert(Hash*, const void *pKey, int nKey, void *pData); -void *sqlite3HashFind(const Hash*, const void *pKey, int nKey); +void sqlite3HashInit(Hash*); +void *sqlite3HashInsert(Hash*, const char *pKey, int nKey, void *pData); +void *sqlite3HashFind(const Hash*, const char *pKey, int nKey); void sqlite3HashClear(Hash*); /* @@ -100,12 +87,12 @@ #define sqliteHashFirst(H) ((H)->first) #define sqliteHashNext(E) ((E)->next) #define sqliteHashData(E) ((E)->data) -#define sqliteHashKey(E) ((E)->pKey) -#define sqliteHashKeysize(E) ((E)->nKey) +/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */ +/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */ /* ** Number of entries in a hash table */ -#define sqliteHashCount(H) ((H)->count) +/* #define sqliteHashCount(H) ((H)->count) // NOT USED */ #endif /* _SQLITE_HASH_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/hwtime.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/hwtime.h --- sqlite3-3.4.2/src/hwtime.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/hwtime.h 2008-08-01 15:33:15.000000000 +0100 @@ -0,0 +1,87 @@ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 class CPUs. +** +** $Id: hwtime.h,v 1.3 2008/08/01 14:33:15 shane Exp $ +*/ +#ifndef _HWTIME_H_ +#define _HWTIME_H_ + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + #error Need implementation of sqlite3Hwtime() for your platform. + + /* + ** To compile without implementing sqlite3Hwtime() for your platform, + ** you can remove the above #error and use the following + ** stub function. You will lose timing support for many + ** of the debugging and testing utilities, but it should at + ** least compile and run. + */ + sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(_HWTIME_H_) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/insert.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/insert.c --- sqlite3-3.4.2/src/insert.c 2007-07-23 20:39:47.000000000 +0100 +++ sqlite3-3.6.16/src/insert.c 2009-06-25 12:45:58.000000000 +0100 @@ -12,12 +12,32 @@ ** This file contains C code routines that are called by the parser ** to handle INSERT statements in SQLite. ** -** $Id: insert.c,v 1.188 2007/07/23 19:39:47 drh Exp $ +** $Id: insert.c,v 1.269 2009/06/23 20:28:54 drh Exp $ */ #include "sqliteInt.h" /* -** Set P3 of the most recently inserted opcode to a column affinity +** Generate code that will open a table for reading. +*/ +void sqlite3OpenTable( + Parse *p, /* Generate code into this VDBE */ + int iCur, /* The cursor number of the table */ + int iDb, /* The database index in sqlite3.aDb[] */ + Table *pTab, /* The table to be opened */ + int opcode /* OP_OpenRead or OP_OpenWrite */ +){ + Vdbe *v; + if( IsVirtual(pTab) ) return; + v = sqlite3GetVdbe(p); + assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); + sqlite3TableLock(p, iDb, pTab->tnum, (opcode==OP_OpenWrite)?1:0, pTab->zName); + sqlite3VdbeAddOp3(v, opcode, iCur, pTab->tnum, iDb); + sqlite3VdbeChangeP4(v, -1, SQLITE_INT_TO_PTR(pTab->nCol), P4_INT32); + VdbeComment((v, "%s", pTab->zName)); +} + +/* +** Set P4 of the most recently inserted opcode to a column affinity ** string for index pIdx. A column affinity string has one character ** for each column in the table, according to the affinity of the column: ** @@ -28,6 +48,9 @@ ** 'c' NUMERIC ** 'd' INTEGER ** 'e' REAL +** +** An extra 'b' is appended to the end of the string to cover the +** rowid that appears as the last column in every index. */ void sqlite3IndexAffinityStr(Vdbe *v, Index *pIdx){ if( !pIdx->zColAff ){ @@ -41,21 +64,24 @@ */ int n; Table *pTab = pIdx->pTable; - pIdx->zColAff = (char *)sqliteMalloc(pIdx->nColumn+1); + sqlite3 *db = sqlite3VdbeDb(v); + pIdx->zColAff = (char *)sqlite3Malloc(pIdx->nColumn+2); if( !pIdx->zColAff ){ + db->mallocFailed = 1; return; } for(n=0; nnColumn; n++){ pIdx->zColAff[n] = pTab->aCol[pIdx->aiColumn[n]].affinity; } - pIdx->zColAff[pIdx->nColumn] = '\0'; + pIdx->zColAff[n++] = SQLITE_AFF_NONE; + pIdx->zColAff[n] = 0; } - sqlite3VdbeChangeP3(v, -1, pIdx->zColAff, 0); + sqlite3VdbeChangeP4(v, -1, pIdx->zColAff, 0); } /* -** Set P3 of the most recently inserted opcode to a column affinity +** Set P4 of the most recently inserted opcode to a column affinity ** string for table pTab. A column affinity string has one character ** for each column indexed by the index, according to the affinity of the ** column: @@ -79,9 +105,11 @@ if( !pTab->zColAff ){ char *zColAff; int i; + sqlite3 *db = sqlite3VdbeDb(v); - zColAff = (char *)sqliteMalloc(pTab->nCol+1); + zColAff = (char *)sqlite3Malloc(pTab->nCol+1); if( !zColAff ){ + db->mallocFailed = 1; return; } @@ -93,79 +121,126 @@ pTab->zColAff = zColAff; } - sqlite3VdbeChangeP3(v, -1, pTab->zColAff, 0); + sqlite3VdbeChangeP4(v, -1, pTab->zColAff, 0); } /* -** Return non-zero if SELECT statement p opens the table with rootpage -** iTab in database iDb. This is used to see if a statement of the form -** "INSERT INTO SELECT ..." can run without using temporary -** table for the results of the SELECT. -** -** No checking is done for sub-selects that are part of expressions. +** Return non-zero if the table pTab in database iDb or any of its indices +** have been opened at any point in the VDBE program beginning at location +** iStartAddr throught the end of the program. This is used to see if +** a statement of the form "INSERT INTO SELECT ..." can +** run without using temporary table for the results of the SELECT. */ -static int selectReadsTable(Select *p, Schema *pSchema, int iTab){ +static int readsTable(Vdbe *v, int iStartAddr, int iDb, Table *pTab){ int i; - struct SrcList_item *pItem; - if( p->pSrc==0 ) return 0; - for(i=0, pItem=p->pSrc->a; ipSrc->nSrc; i++, pItem++){ - if( pItem->pSelect ){ - if( selectReadsTable(pItem->pSelect, pSchema, iTab) ) return 1; - }else{ - if( pItem->pTab->pSchema==pSchema && pItem->pTab->tnum==iTab ) return 1; + int iEnd = sqlite3VdbeCurrentAddr(v); + for(i=iStartAddr; iopcode==OP_OpenRead && pOp->p3==iDb ){ + Index *pIndex; + int tnum = pOp->p2; + if( tnum==pTab->tnum ){ + return 1; + } + for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ + if( tnum==pIndex->tnum ){ + return 1; + } + } } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pOp->opcode==OP_VOpen && pOp->p4.pVtab==pTab->pVtab ){ + assert( pOp->p4.pVtab!=0 ); + assert( pOp->p4type==P4_VTAB ); + return 1; + } +#endif } return 0; } #ifndef SQLITE_OMIT_AUTOINCREMENT /* -** Write out code to initialize the autoincrement logic. This code -** looks up the current autoincrement value in the sqlite_sequence -** table and stores that value in a memory cell. Code generated by -** autoIncStep() will keep that memory cell holding the largest -** rowid value. Code generated by autoIncEnd() will write the new -** largest value of the counter back into the sqlite_sequence table. -** -** This routine returns the index of the mem[] cell that contains -** the maximum rowid counter. -** -** Two memory cells are allocated. The next memory cell after the -** one returned holds the rowid in sqlite_sequence where we will -** write back the revised maximum rowid. +** Locate or create an AutoincInfo structure associated with table pTab +** which is in database iDb. Return the register number for the register +** that holds the maximum rowid. +** +** There is at most one AutoincInfo structure per table even if the +** same table is autoincremented multiple times due to inserts within +** triggers. A new AutoincInfo structure is created if this is the +** first use of table pTab. On 2nd and subsequent uses, the original +** AutoincInfo structure is used. +** +** Three memory locations are allocated: +** +** (1) Register to hold the name of the pTab table. +** (2) Register to hold the maximum ROWID of pTab. +** (3) Register to hold the rowid in sqlite_sequence of pTab +** +** The 2nd register is the one that is returned. That is all the +** insert routine needs to know about. */ static int autoIncBegin( Parse *pParse, /* Parsing context */ int iDb, /* Index of the database holding pTab */ Table *pTab /* The table we are writing to */ ){ - int memId = 0; - if( pTab->autoInc ){ - Vdbe *v = pParse->pVdbe; - Db *pDb = &pParse->db->aDb[iDb]; - int iCur = pParse->nTab; - int addr; - assert( v ); - addr = sqlite3VdbeCurrentAddr(v); - memId = pParse->nMem+1; - pParse->nMem += 2; - sqlite3OpenTable(pParse, iCur, iDb, pDb->pSchema->pSeqTab, OP_OpenRead); - sqlite3VdbeAddOp(v, OP_Rewind, iCur, addr+13); - sqlite3VdbeAddOp(v, OP_Column, iCur, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); - sqlite3VdbeAddOp(v, OP_Ne, 0x100, addr+12); - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_MemStore, memId-1, 1); - sqlite3VdbeAddOp(v, OP_Column, iCur, 1); - sqlite3VdbeAddOp(v, OP_MemStore, memId, 1); - sqlite3VdbeAddOp(v, OP_Goto, 0, addr+13); - sqlite3VdbeAddOp(v, OP_Next, iCur, addr+4); - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); + int memId = 0; /* Register holding maximum rowid */ + if( pTab->tabFlags & TF_Autoincrement ){ + AutoincInfo *pInfo; + + pInfo = pParse->pAinc; + while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } + if( pInfo==0 ){ + pInfo = sqlite3DbMallocRaw(pParse->db, sizeof(*pInfo)); + if( pInfo==0 ) return 0; + pInfo->pNext = pParse->pAinc; + pParse->pAinc = pInfo; + pInfo->pTab = pTab; + pInfo->iDb = iDb; + pParse->nMem++; /* Register to hold name of table */ + pInfo->regCtr = ++pParse->nMem; /* Max rowid register */ + pParse->nMem++; /* Rowid in sqlite_sequence */ + } + memId = pInfo->regCtr; } return memId; } /* +** This routine generates code that will initialize all of the +** register used by the autoincrement tracker. +*/ +void sqlite3AutoincrementBegin(Parse *pParse){ + AutoincInfo *p; /* Information about an AUTOINCREMENT */ + sqlite3 *db = pParse->db; /* The database connection */ + Db *pDb; /* Database only autoinc table */ + int memId; /* Register holding max rowid */ + int addr; /* A VDBE address */ + Vdbe *v = pParse->pVdbe; /* VDBE under construction */ + + assert( v ); /* We failed long ago if this is not so */ + for(p = pParse->pAinc; p; p = p->pNext){ + pDb = &db->aDb[p->iDb]; + memId = p->regCtr; + sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead); + addr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp4(v, OP_String8, 0, memId-1, 0, p->pTab->zName, 0); + sqlite3VdbeAddOp2(v, OP_Rewind, 0, addr+9); + sqlite3VdbeAddOp3(v, OP_Column, 0, 0, memId); + sqlite3VdbeAddOp3(v, OP_Ne, memId-1, addr+7, memId); + sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); + sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1); + sqlite3VdbeAddOp3(v, OP_Column, 0, 1, memId); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addr+9); + sqlite3VdbeAddOp2(v, OP_Next, 0, addr+2); + sqlite3VdbeAddOp2(v, OP_Integer, 0, memId); + sqlite3VdbeAddOp0(v, OP_Close); + } +} + +/* ** Update the maximum rowid for an autoincrement calculation. ** ** This routine should be called when the top of the stack holds a @@ -173,40 +248,50 @@ ** larger than the maximum rowid in the memId memory cell, then the ** memory cell is updated. The stack is unchanged. */ -static void autoIncStep(Parse *pParse, int memId){ +static void autoIncStep(Parse *pParse, int memId, int regRowid){ if( memId>0 ){ - sqlite3VdbeAddOp(pParse->pVdbe, OP_MemMax, memId, 0); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_MemMax, memId, regRowid); } } /* -** After doing one or more inserts, the maximum rowid is stored -** in mem[memId]. Generate code to write this value back into the -** the sqlite_sequence table. +** This routine generates the code needed to write autoincrement +** maximum rowid values back into the sqlite_sequence register. +** Every statement that might do an INSERT into an autoincrement +** table (either directly or through triggers) needs to call this +** routine just before the "exit" code. */ -static void autoIncEnd( - Parse *pParse, /* The parsing context */ - int iDb, /* Index of the database holding pTab */ - Table *pTab, /* Table we are inserting into */ - int memId /* Memory cell holding the maximum rowid */ -){ - if( pTab->autoInc ){ - int iCur = pParse->nTab; - Vdbe *v = pParse->pVdbe; - Db *pDb = &pParse->db->aDb[iDb]; - int addr; - assert( v ); - addr = sqlite3VdbeCurrentAddr(v); - sqlite3OpenTable(pParse, iCur, iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); - sqlite3VdbeAddOp(v, OP_MemLoad, memId-1, 0); - sqlite3VdbeAddOp(v, OP_NotNull, -1, addr+7); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_NewRowid, iCur, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->zName, 0); - sqlite3VdbeAddOp(v, OP_MemLoad, memId, 0); - sqlite3VdbeAddOp(v, OP_MakeRecord, 2, 0); - sqlite3VdbeAddOp(v, OP_Insert, iCur, OPFLAG_APPEND); - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); +void sqlite3AutoincrementEnd(Parse *pParse){ + AutoincInfo *p; + Vdbe *v = pParse->pVdbe; + sqlite3 *db = pParse->db; + + assert( v ); + for(p = pParse->pAinc; p; p = p->pNext){ + Db *pDb = &db->aDb[p->iDb]; + int j1, j2, j3, j4, j5; + int iRec; + int memId = p->regCtr; + + iRec = sqlite3GetTempReg(pParse); + sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); + j1 = sqlite3VdbeAddOp1(v, OP_NotNull, memId+1); + j2 = sqlite3VdbeAddOp0(v, OP_Rewind); + j3 = sqlite3VdbeAddOp3(v, OP_Column, 0, 0, iRec); + j4 = sqlite3VdbeAddOp3(v, OP_Eq, memId-1, 0, iRec); + sqlite3VdbeAddOp2(v, OP_Next, 0, j3); + sqlite3VdbeJumpHere(v, j2); + sqlite3VdbeAddOp2(v, OP_NewRowid, 0, memId+1); + j5 = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeJumpHere(v, j4); + sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1); + sqlite3VdbeJumpHere(v, j1); + sqlite3VdbeJumpHere(v, j5); + sqlite3VdbeAddOp3(v, OP_MakeRecord, memId-1, 2, iRec); + sqlite3VdbeAddOp3(v, OP_Insert, 0, iRec, memId+1); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3VdbeAddOp0(v, OP_Close); + sqlite3ReleaseTempReg(pParse, iRec); } } #else @@ -215,8 +300,7 @@ ** above are all no-ops */ # define autoIncBegin(A,B,C) (0) -# define autoIncStep(A,B) -# define autoIncEnd(A,B,C,D) +# define autoIncStep(A,B,C) #endif /* SQLITE_OMIT_AUTOINCREMENT */ @@ -246,7 +330,8 @@ ** ** The code generated follows one of four templates. For a simple ** select with data coming from a VALUES clause, the code executes -** once straight down through. The template looks like this: +** once straight down through. Pseudo-code follows (we call this +** the "1st template"): ** ** open write cursor to
and its indices ** puts VALUES clause expressions onto the stack @@ -264,7 +349,7 @@ ** schemas, including all the same indices, then a special optimization ** is invoked that copies raw records from over to . ** See the xferOptimization() function for the implementation of this -** template. This is the second template. +** template. This is the 2nd template. ** ** open a write cursor to
** open read cursor on @@ -277,45 +362,58 @@ ** close cursors ** end foreach ** -** The third template is for when the second template does not apply +** The 3rd template is for when the second template does not apply ** and the SELECT clause does not read from
at any time. ** The generated code follows this template: ** +** EOF <- 0 +** X <- A ** goto B ** A: setup for the SELECT ** loop over the rows in the SELECT -** gosub C +** load values into registers R..R+n +** yield X ** end loop ** cleanup after the SELECT -** goto D -** B: open write cursor to
and its indices +** EOF <- 1 +** yield X ** goto A -** C: insert the select result into
-** return +** B: open write cursor to
and its indices +** C: yield X +** if EOF goto D +** insert the select result into
from R..R+n +** goto C ** D: cleanup ** -** The fourth template is used if the insert statement takes its +** The 4th template is used if the insert statement takes its ** values from a SELECT but the data is being inserted into a table ** that is also read as part of the SELECT. In the third form, ** we have to use a intermediate table to store the results of ** the select. The template is like this: ** +** EOF <- 0 +** X <- A ** goto B ** A: setup for the SELECT ** loop over the tables in the SELECT -** gosub C +** load value into register R..R+n +** yield X ** end loop ** cleanup after the SELECT -** goto D -** C: insert the select result into the intermediate table -** return -** B: open a cursor to an intermediate table -** goto A -** D: open write cursor to
and its indices -** loop over the intermediate table +** EOF <- 1 +** yield X +** halt-error +** B: open temp table +** L: yield X +** if EOF goto M +** insert row from R..R+n into temp table +** goto L +** M: open write cursor to
and its indices +** rewind temp table +** C: loop over rows of intermediate table ** transfer values form intermediate table into
-** end the loop -** cleanup +** end loop +** D: cleanup */ void sqlite3Insert( Parse *pParse, /* Parser context */ @@ -325,47 +423,58 @@ IdList *pColumn, /* Column names corresponding to IDLIST. */ int onError /* How to handle constraint errors */ ){ - Table *pTab; /* The table to insert into */ + sqlite3 *db; /* The main database structure */ + Table *pTab; /* The table to insert into. aka TABLE */ char *zTab; /* Name of the table into which we are inserting */ const char *zDb; /* Name of the database holding this table */ int i, j, idx; /* Loop counters */ Vdbe *v; /* Generate code into this virtual machine */ Index *pIdx; /* For looping over indices of the table */ int nColumn; /* Number of columns in the data */ - int base = 0; /* VDBE Cursor number for pTab */ - int iCont=0,iBreak=0; /* Beginning and end of the loop over srcTab */ - sqlite3 *db; /* The main database structure */ + int nHidden = 0; /* Number of hidden columns if TABLE is virtual */ + int baseCur = 0; /* VDBE Cursor number for pTab */ int keyColumn = -1; /* Column that is the INTEGER PRIMARY KEY */ int endOfLoop; /* Label for the end of the insertion loop */ int useTempTable = 0; /* Store SELECT results in intermediate table */ int srcTab = 0; /* Data comes from this temporary cursor if >=0 */ - int iSelectLoop = 0; /* Address of code that implements the SELECT */ - int iCleanup = 0; /* Address of the cleanup code */ - int iInsertBlock = 0; /* Address of the subroutine used to insert data */ - int iCntMem = 0; /* Memory cell used for the row counter */ - int newIdx = -1; /* Cursor for the NEW table */ + int addrInsTop = 0; /* Jump to label "D" */ + int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */ + int addrSelect = 0; /* Address of coroutine that implements the SELECT */ + SelectDest dest; /* Destination for SELECT on rhs of INSERT */ + int newIdx = -1; /* Cursor for the NEW pseudo-table */ + int iDb; /* Index of database holding TABLE */ Db *pDb; /* The database containing table being inserted into */ - int counterMem = 0; /* Memory cell holding AUTOINCREMENT counter */ int appendFlag = 0; /* True if the insert is likely to be an append */ - int iDb; - int nHidden = 0; + /* Register allocations */ + int regFromSelect = 0;/* Base register for data coming from SELECT */ + int regAutoinc = 0; /* Register holding the AUTOINCREMENT counter */ + int regRowCount = 0; /* Memory cell used for the row counter */ + int regIns; /* Block of regs holding rowid+data being inserted */ + int regRowid; /* registers holding insert rowid */ + int regData; /* register holding first column to insert */ + int regRecord; /* Holds the assemblied row record */ + int regEof = 0; /* Register recording end of SELECT data */ + int *aRegIdx = 0; /* One register allocated to each index */ + #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to insert into a view */ - int triggers_exist = 0; /* True if there are FOR EACH ROW triggers */ + Trigger *pTrigger; /* List of triggers on pTab, if required */ + int tmask; /* Mask of trigger times */ #endif - if( pParse->nErr || sqlite3MallocFailed() ){ + db = pParse->db; + memset(&dest, 0, sizeof(dest)); + if( pParse->nErr || db->mallocFailed ){ goto insert_cleanup; } - db = pParse->db; /* Locate the table into which we will be inserting new information. */ assert( pTabList->nSrc==1 ); zTab = pTabList->a[0].zName; - if( zTab==0 ) goto insert_cleanup; + if( NEVER(zTab==0) ) goto insert_cleanup; pTab = sqlite3SrcListLookup(pParse, pTabList); if( pTab==0 ){ goto insert_cleanup; @@ -382,22 +491,24 @@ ** inserted into is a view */ #ifndef SQLITE_OMIT_TRIGGER - triggers_exist = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0); + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask); isView = pTab->pSelect!=0; #else -# define triggers_exist 0 +# define pTrigger 0 +# define tmask 0 # define isView 0 #endif #ifdef SQLITE_OMIT_VIEW # undef isView # define isView 0 #endif + assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) ); /* Ensure that: * (a) the table is not read-only, * (b) that if it is a view then ON INSERT triggers exist */ - if( sqlite3IsReadOnly(pParse, pTab, triggers_exist) ){ + if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ goto insert_cleanup; } assert( pTab!=0 ); @@ -415,10 +526,10 @@ v = sqlite3GetVdbe(pParse); if( v==0 ) goto insert_cleanup; if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); - sqlite3BeginWriteOperation(pParse, pSelect || triggers_exist, iDb); + sqlite3BeginWriteOperation(pParse, pSelect || pTrigger, iDb); /* if there are row triggers, allocate a temp table for new.* references. */ - if( triggers_exist ){ + if( pTrigger ){ newIdx = pParse->nTab++; } @@ -429,83 +540,120 @@ ** ** Then special optimizations can be applied that make the transfer ** very fast and which reduce fragmentation of indices. + ** + ** This is the 2nd template. */ if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ - assert( !triggers_exist ); + assert( !pTrigger ); assert( pList==0 ); - goto insert_cleanup; + goto insert_end; } #endif /* SQLITE_OMIT_XFER_OPT */ /* If this is an AUTOINCREMENT table, look up the sequence number in the - ** sqlite_sequence table and store it in memory cell counterMem. Also - ** remember the rowid of the sqlite_sequence table entry in memory cell - ** counterRowid. + ** sqlite_sequence table and store it in memory cell regAutoinc. */ - counterMem = autoIncBegin(pParse, iDb, pTab); + regAutoinc = autoIncBegin(pParse, iDb, pTab); /* Figure out how many columns of data are supplied. If the data - ** is coming from a SELECT statement, then this step also generates - ** all the code to implement the SELECT statement and invoke a subroutine - ** to process each row of the result. (Template 2.) If the SELECT - ** statement uses the the table that is being inserted into, then the - ** subroutine is also coded here. That subroutine stores the SELECT - ** results in a temporary table. (Template 3.) + ** is coming from a SELECT statement, then generate a co-routine that + ** produces a single row of the SELECT on each invocation. The + ** co-routine is the common header to the 3rd and 4th templates. */ if( pSelect ){ /* Data is coming from a SELECT. Generate code to implement that SELECT + ** as a co-routine. The code is common to both the 3rd and 4th + ** templates: + ** + ** EOF <- 0 + ** X <- A + ** goto B + ** A: setup for the SELECT + ** loop over the tables in the SELECT + ** load value into register R..R+n + ** yield X + ** end loop + ** cleanup after the SELECT + ** EOF <- 1 + ** yield X + ** halt-error + ** + ** On each invocation of the co-routine, it puts a single row of the + ** SELECT result into registers dest.iMem...dest.iMem+dest.nMem-1. + ** (These output registers are allocated by sqlite3Select().) When + ** the SELECT completes, it sets the EOF flag stored in regEof. */ - int rc, iInitCode; - iInitCode = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); - iSelectLoop = sqlite3VdbeCurrentAddr(v); - iInsertBlock = sqlite3VdbeMakeLabel(v); + int rc, j1; + + regEof = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regEof); /* EOF <- 0 */ + VdbeComment((v, "SELECT eof flag")); + sqlite3SelectDestInit(&dest, SRT_Coroutine, ++pParse->nMem); + addrSelect = sqlite3VdbeCurrentAddr(v)+2; + sqlite3VdbeAddOp2(v, OP_Integer, addrSelect-1, dest.iParm); + j1 = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); + VdbeComment((v, "Jump over SELECT coroutine")); /* Resolve the expressions in the SELECT statement and execute it. */ - rc = sqlite3Select(pParse, pSelect, SRT_Subroutine, iInsertBlock,0,0,0,0); - if( rc || pParse->nErr || sqlite3MallocFailed() ){ + rc = sqlite3Select(pParse, pSelect, &dest); + assert( pParse->nErr==0 || rc ); + if( rc || NEVER(pParse->nErr) || db->mallocFailed ){ goto insert_cleanup; } + sqlite3VdbeAddOp2(v, OP_Integer, 1, regEof); /* EOF <- 1 */ + sqlite3VdbeAddOp1(v, OP_Yield, dest.iParm); /* yield X */ + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_INTERNAL, OE_Abort); + VdbeComment((v, "End of SELECT coroutine")); + sqlite3VdbeJumpHere(v, j1); /* label B: */ - iCleanup = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp(v, OP_Goto, 0, iCleanup); + regFromSelect = dest.iMem; assert( pSelect->pEList ); nColumn = pSelect->pEList->nExpr; + assert( dest.nMem==nColumn ); /* Set useTempTable to TRUE if the result of the SELECT statement - ** should be written into a temporary table. Set to FALSE if each - ** row of the SELECT can be written directly into the result table. + ** should be written into a temporary table (template 4). Set to + ** FALSE if each* row of the SELECT can be written directly into + ** the destination table (template 3). ** ** A temp table must be used if the table being updated is also one ** of the tables being read by the SELECT statement. Also use a ** temp table in the case of row triggers. */ - if( triggers_exist || selectReadsTable(pSelect,pTab->pSchema,pTab->tnum) ){ + if( pTrigger || readsTable(v, addrSelect, iDb, pTab) ){ useTempTable = 1; } if( useTempTable ){ - /* Generate the subroutine that SELECT calls to process each row of - ** the result. Store the result in a temporary table + /* Invoke the coroutine to extract information from the SELECT + ** and add it to a transient table srcTab. The code generated + ** here is from the 4th template: + ** + ** B: open temp table + ** L: yield X + ** if EOF goto M + ** insert row from R..R+n into temp table + ** goto L + ** M: ... */ + int regRec; /* Register to hold packed record */ + int regTempRowid; /* Register to hold temp table ROWID */ + int addrTop; /* Label "L" */ + int addrIf; /* Address of jump to M */ + srcTab = pParse->nTab++; - sqlite3VdbeResolveLabel(v, iInsertBlock); - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - sqlite3VdbeAddOp(v, OP_NewRowid, srcTab, 0); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); - sqlite3VdbeAddOp(v, OP_Insert, srcTab, OPFLAG_APPEND); - sqlite3VdbeAddOp(v, OP_Return, 0, 0); - - /* The following code runs first because the GOTO at the very top - ** of the program jumps to it. Create the temporary table, then jump - ** back up and execute the SELECT code above. - */ - sqlite3VdbeJumpHere(v, iInitCode); - sqlite3VdbeAddOp(v, OP_OpenEphemeral, srcTab, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, srcTab, nColumn); - sqlite3VdbeAddOp(v, OP_Goto, 0, iSelectLoop); - sqlite3VdbeResolveLabel(v, iCleanup); - }else{ - sqlite3VdbeJumpHere(v, iInitCode); + regRec = sqlite3GetTempReg(pParse); + regTempRowid = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn); + addrTop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iParm); + addrIf = sqlite3VdbeAddOp1(v, OP_If, regEof); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec); + sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid); + sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); + sqlite3VdbeJumpHere(v, addrIf); + sqlite3ReleaseTempReg(pParse, regRec); + sqlite3ReleaseTempReg(pParse, regTempRowid); } }else{ /* This is the case if the data for the INSERT is coming from a VALUES @@ -515,10 +663,10 @@ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; srcTab = -1; - useTempTable = 0; + assert( useTempTable==0 ); nColumn = pList ? pList->nExpr : 0; for(i=0; ia[i].pExpr) ){ + if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){ goto insert_cleanup; } } @@ -535,7 +683,7 @@ if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ sqlite3ErrorMsg(pParse, "table %S has %d columns but %d values were supplied", - pTabList, 0, pTab->nCol, nColumn); + pTabList, 0, pTab->nCol-nHidden, nColumn); goto insert_cleanup; } if( pColumn!=0 && nColumn!=pColumn->nId ){ @@ -591,42 +739,78 @@ /* Open the temp table for FOR EACH ROW triggers */ - if( triggers_exist ){ - sqlite3VdbeAddOp(v, OP_OpenPseudo, newIdx, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, newIdx, pTab->nCol); + if( pTrigger ){ + sqlite3VdbeAddOp3(v, OP_OpenPseudo, newIdx, 0, pTab->nCol); } /* Initialize the count of rows to be inserted */ if( db->flags & SQLITE_CountRows ){ - iCntMem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemInt, 0, iCntMem); + regRowCount = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); } - /* Open tables and indices if there are no row triggers */ - if( !triggers_exist ){ - base = pParse->nTab; - sqlite3OpenTableAndIndices(pParse, pTab, base, OP_OpenWrite); + /* If this is not a view, open the table and and all indices */ + if( !isView ){ + int nIdx; + + baseCur = pParse->nTab; + nIdx = sqlite3OpenTableAndIndices(pParse, pTab, baseCur, OP_OpenWrite); + aRegIdx = sqlite3DbMallocRaw(db, sizeof(int)*(nIdx+1)); + if( aRegIdx==0 ){ + goto insert_cleanup; + } + for(i=0; inMem; + } } - /* If the data source is a temporary table, then we have to create - ** a loop because there might be multiple rows of data. If the data - ** source is a subroutine call from the SELECT statement, then we need - ** to launch the SELECT statement processing. - */ + /* This is the top of the main insertion loop */ if( useTempTable ){ - iBreak = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp(v, OP_Rewind, srcTab, iBreak); - iCont = sqlite3VdbeCurrentAddr(v); + /* This block codes the top of loop only. The complete loop is the + ** following pseudocode (template 4): + ** + ** rewind temp table + ** C: loop over rows of intermediate table + ** transfer values form intermediate table into
+ ** end loop + ** D: ... + */ + addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); + addrCont = sqlite3VdbeCurrentAddr(v); }else if( pSelect ){ - sqlite3VdbeAddOp(v, OP_Goto, 0, iSelectLoop); - sqlite3VdbeResolveLabel(v, iInsertBlock); + /* This block codes the top of loop only. The complete loop is the + ** following pseudocode (template 3): + ** + ** C: yield X + ** if EOF goto D + ** insert the select result into
from R..R+n + ** goto C + ** D: ... + */ + addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iParm); + addrInsTop = sqlite3VdbeAddOp1(v, OP_If, regEof); } + /* Allocate registers for holding the rowid of the new row, + ** the content of the new row, and the assemblied row record. + */ + regRecord = ++pParse->nMem; + regRowid = regIns = pParse->nMem+1; + pParse->nMem += pTab->nCol + 1; + if( IsVirtual(pTab) ){ + regRowid++; + pParse->nMem++; + } + regData = regRowid+1; + /* Run the BEFORE and INSTEAD OF triggers, if there are any */ endOfLoop = sqlite3VdbeMakeLabel(v); - if( triggers_exist & TRIGGER_BEFORE ){ + if( tmask & TRIGGER_BEFORE ){ + int regTrigRowid; + int regCols; + int regRec; /* build the NEW.* reference row. Note that if there is an INTEGER ** PRIMARY KEY into which a NULL is being inserted, that NULL will be @@ -634,17 +818,21 @@ ** we do not know what the unique ID will be (because the insert has ** not happened yet) so we substitute a rowid of -1 */ + regTrigRowid = sqlite3GetTempReg(pParse); if( keyColumn<0 ){ - sqlite3VdbeAddOp(v, OP_Integer, -1, 0); - }else if( useTempTable ){ - sqlite3VdbeAddOp(v, OP_Column, srcTab, keyColumn); + sqlite3VdbeAddOp2(v, OP_Integer, -1, regTrigRowid); }else{ - assert( pSelect==0 ); /* Otherwise useTempTable is true */ - sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr); - sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_Integer, -1, 0); - sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + int j1; + if( useTempTable ){ + sqlite3VdbeAddOp3(v, OP_Column, srcTab, keyColumn, regTrigRowid); + }else{ + assert( pSelect==0 ); /* Otherwise useTempTable is true */ + sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr, regTrigRowid); + } + j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regTrigRowid); + sqlite3VdbeAddOp2(v, OP_Integer, -1, regTrigRowid); + sqlite3VdbeJumpHere(v, j1); + sqlite3VdbeAddOp1(v, OP_MustBeInt, regTrigRowid); } /* Cannot have triggers on a virtual table. If it were possible, @@ -654,6 +842,7 @@ /* Create the new column data */ + regCols = sqlite3GetTempRange(pParse, pTab->nCol); for(i=0; inCol; i++){ if( pColumn==0 ){ j = i; @@ -663,15 +852,16 @@ } } if( pColumn && j>=pColumn->nId ){ - sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regCols+i); }else if( useTempTable ){ - sqlite3VdbeAddOp(v, OP_Column, srcTab, j); + sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, regCols+i); }else{ assert( pSelect==0 ); /* Otherwise useTempTable is true */ - sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr); + sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr, regCols+i); } } - sqlite3VdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0); + regRec = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regCols, pTab->nCol, regRec); /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger, ** do not attempt any conversions before assembling the record. @@ -681,23 +871,18 @@ if( !isView ){ sqlite3TableAffinityStr(v, pTab); } - sqlite3VdbeAddOp(v, OP_Insert, newIdx, 0); + sqlite3VdbeAddOp3(v, OP_Insert, newIdx, regRec, regTrigRowid); + sqlite3ReleaseTempReg(pParse, regRec); + sqlite3ReleaseTempReg(pParse, regTrigRowid); + sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol); /* Fire BEFORE or INSTEAD OF triggers */ - if( sqlite3CodeRowTrigger(pParse, TK_INSERT, 0, TRIGGER_BEFORE, pTab, - newIdx, -1, onError, endOfLoop) ){ + if( sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_BEFORE, + pTab, newIdx, -1, onError, endOfLoop, 0, 0) ){ goto insert_cleanup; } } - /* If any triggers exists, the opening of tables and indices is deferred - ** until now. - */ - if( triggers_exist && !isView ){ - base = pParse->nTab; - sqlite3OpenTableAndIndices(pParse, pTab, base, OP_OpenWrite); - } - /* Push the record number for the new entry onto the stack. The ** record number is a randomly generate integer created by NewRowid ** except when the table has an INTEGER PRIMARY KEY column, in which @@ -705,53 +890,61 @@ */ if( !isView ){ if( IsVirtual(pTab) ){ - /* The row that the VUpdate opcode will delete: none */ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + /* The row that the VUpdate opcode will delete: none */ + sqlite3VdbeAddOp2(v, OP_Null, 0, regIns); } if( keyColumn>=0 ){ if( useTempTable ){ - sqlite3VdbeAddOp(v, OP_Column, srcTab, keyColumn); + sqlite3VdbeAddOp3(v, OP_Column, srcTab, keyColumn, regRowid); }else if( pSelect ){ - sqlite3VdbeAddOp(v, OP_Dup, nColumn - keyColumn - 1, 1); + sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+keyColumn, regRowid); }else{ VdbeOp *pOp; - sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr); - pOp = sqlite3VdbeGetOp(v, sqlite3VdbeCurrentAddr(v) - 1); - if( pOp && pOp->opcode==OP_Null ){ + sqlite3ExprCode(pParse, pList->a[keyColumn].pExpr, regRowid); + pOp = sqlite3VdbeGetOp(v, -1); + if( ALWAYS(pOp) && pOp->opcode==OP_Null && !IsVirtual(pTab) ){ appendFlag = 1; pOp->opcode = OP_NewRowid; - pOp->p1 = base; - pOp->p2 = counterMem; + pOp->p1 = baseCur; + pOp->p2 = regRowid; + pOp->p3 = regAutoinc; } } /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid ** to generate a unique primary key value. */ if( !appendFlag ){ - sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_NewRowid, base, counterMem); - sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + int j1; + if( !IsVirtual(pTab) ){ + j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); + sqlite3VdbeAddOp3(v, OP_NewRowid, baseCur, regRowid, regAutoinc); + sqlite3VdbeJumpHere(v, j1); + }else{ + j1 = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, j1+2); + } + sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); } }else if( IsVirtual(pTab) ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid); }else{ - sqlite3VdbeAddOp(v, OP_NewRowid, base, counterMem); + sqlite3VdbeAddOp3(v, OP_NewRowid, baseCur, regRowid, regAutoinc); appendFlag = 1; } - autoIncStep(pParse, counterMem); + autoIncStep(pParse, regAutoinc, regRowid); /* Push onto the stack, data for all columns of the new entry, beginning ** with the first column. */ nHidden = 0; for(i=0; inCol; i++){ + int iRegStore = regRowid+1+i; if( i==pTab->iPKey ){ /* The value of the INTEGER PRIMARY KEY column is always a NULL. ** Whenever this column is read, the record number will be substituted ** in its place. So will fill this column with a NULL to avoid ** taking up data space with information that will never be used. */ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, iRegStore); continue; } if( pColumn==0 ){ @@ -768,13 +961,13 @@ } } if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){ - sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, iRegStore); }else if( useTempTable ){ - sqlite3VdbeAddOp(v, OP_Column, srcTab, j); + sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, iRegStore); }else if( pSelect ){ - sqlite3VdbeAddOp(v, OP_Dup, i+nColumn-j+IsVirtual(pTab), 1); + sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore); }else{ - sqlite3ExprCode(pParse, pList->a[j].pExpr); + sqlite3ExprCode(pParse, pList->a[j].pExpr, iRegStore); } } @@ -783,68 +976,66 @@ */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ - pParse->pVirtualLock = pTab; - sqlite3VdbeOp3(v, OP_VUpdate, 1, pTab->nCol+2, - (const char*)pTab->pVtab, P3_VTAB); + sqlite3VtabMakeWritable(pParse, pTab); + sqlite3VdbeAddOp4(v, OP_VUpdate, 1, pTab->nCol+2, regIns, + (const char*)pTab->pVtab, P4_VTAB); }else #endif { - sqlite3GenerateConstraintChecks(pParse, pTab, base, 0, keyColumn>=0, - 0, onError, endOfLoop); - sqlite3CompleteInsertion(pParse, pTab, base, 0,0,0, - (triggers_exist & TRIGGER_AFTER)!=0 ? newIdx : -1, - appendFlag); + int isReplace; /* Set to true if constraints may cause a replace */ + sqlite3GenerateConstraintChecks(pParse, pTab, baseCur, regIns, aRegIdx, + keyColumn>=0, 0, onError, endOfLoop, &isReplace + ); + sqlite3CompleteInsertion( + pParse, pTab, baseCur, regIns, aRegIdx, 0, + (tmask&TRIGGER_AFTER) ? newIdx : -1, appendFlag, isReplace==0 + ); } } /* Update the count of rows that are inserted */ if( (db->flags & SQLITE_CountRows)!=0 ){ - sqlite3VdbeAddOp(v, OP_MemIncr, 1, iCntMem); + sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } - if( triggers_exist ){ - /* Close all tables opened */ - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Close, base, 0); - for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){ - sqlite3VdbeAddOp(v, OP_Close, idx+base, 0); - } - } - + if( pTrigger ){ /* Code AFTER triggers */ - if( sqlite3CodeRowTrigger(pParse, TK_INSERT, 0, TRIGGER_AFTER, pTab, - newIdx, -1, onError, endOfLoop) ){ + if( sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_AFTER, + pTab, newIdx, -1, onError, endOfLoop, 0, 0) ){ goto insert_cleanup; } } - /* The bottom of the loop, if the data source is a SELECT statement + /* The bottom of the main insertion loop, if the data source + ** is a SELECT statement. */ sqlite3VdbeResolveLabel(v, endOfLoop); if( useTempTable ){ - sqlite3VdbeAddOp(v, OP_Next, srcTab, iCont); - sqlite3VdbeResolveLabel(v, iBreak); - sqlite3VdbeAddOp(v, OP_Close, srcTab, 0); + sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); + sqlite3VdbeJumpHere(v, addrInsTop); + sqlite3VdbeAddOp1(v, OP_Close, srcTab); }else if( pSelect ){ - sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); - sqlite3VdbeAddOp(v, OP_Return, 0, 0); - sqlite3VdbeResolveLabel(v, iCleanup); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrCont); + sqlite3VdbeJumpHere(v, addrInsTop); } - if( !triggers_exist && !IsVirtual(pTab) ){ + if( !IsVirtual(pTab) && !isView ){ /* Close all tables opened */ - sqlite3VdbeAddOp(v, OP_Close, base, 0); + sqlite3VdbeAddOp1(v, OP_Close, baseCur); for(idx=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){ - sqlite3VdbeAddOp(v, OP_Close, idx+base, 0); + sqlite3VdbeAddOp1(v, OP_Close, idx+baseCur); } } +insert_end: /* Update the sqlite_sequence table by storing the content of the - ** counter value in memory counterMem back into the sqlite_sequence - ** table. + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. */ - autoIncEnd(pParse, iDb, pTab, counterMem); + if( pParse->nested==0 && pParse->trigStack==0 ){ + sqlite3AutoincrementEnd(pParse); + } /* ** Return the number of rows inserted. If this routine is @@ -852,28 +1043,27 @@ ** invoke the callback function. */ if( db->flags & SQLITE_CountRows && pParse->nested==0 && !pParse->trigStack ){ - sqlite3VdbeAddOp(v, OP_MemLoad, iCntMem, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", P3_STATIC); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); } insert_cleanup: - sqlite3SrcListDelete(pTabList); - sqlite3ExprListDelete(pList); - sqlite3SelectDelete(pSelect); - sqlite3IdListDelete(pColumn); + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprListDelete(db, pList); + sqlite3SelectDelete(db, pSelect); + sqlite3IdListDelete(db, pColumn); + sqlite3DbFree(db, aRegIdx); } /* -** Generate code to do a constraint check prior to an INSERT or an UPDATE. +** Generate code to do constraint checks prior to an INSERT or an UPDATE. ** -** When this routine is called, the stack contains (from bottom to top) -** the following values: +** The input is a range of consecutive registers as follows: ** ** 1. The rowid of the row to be updated before the update. This ** value is omitted unless we are doing an UPDATE that involves a -** change to the record number. +** change to the record number or writing to a virtual table. ** ** 2. The rowid of the row after the update. ** @@ -883,15 +1073,20 @@ ** ** N. The data in the last column of the entry after the update. ** +** The regRowid parameter is the index of the register containing (2). +** ** The old rowid shown as entry (1) above is omitted unless both isUpdate ** and rowidChng are 1. isUpdate is true for UPDATEs and false for -** INSERTs and rowidChng is true if the record number is being changed. -** -** The code generated by this routine pushes additional entries onto -** the stack which are the keys for new index entries for the new record. -** The order of index keys is the same as the order of the indices on -** the pTable->pIndex list. A key is only created for index i if -** aIdxUsed!=0 and aIdxUsed[i]!=0. +** INSERTs. RowidChng means that the new rowid is explicitly specified by +** the update or insert statement. If rowidChng is false, it means that +** the rowid is computed automatically in an insert or that the rowid value +** is not modified by the update. +** +** The code generated by this routine store new index entries into +** registers identified by aRegIdx[]. No index entry is created for +** indices where aRegIdx[i]==0. The order of indices in aRegIdx[] is +** the same as the order of indices on the linked list of indices +** attached to the table. ** ** This routine also generates code to check constraints. NOT NULL, ** CHECK, and UNIQUE constraints are all checked. If a constraint fails, @@ -933,43 +1128,41 @@ ** for the constraint is used. ** ** The calling routine must open a read/write cursor for pTab with -** cursor number "base". All indices of pTab must also have open -** read/write cursors with cursor number base+i for the i-th cursor. +** cursor number "baseCur". All indices of pTab must also have open +** read/write cursors with cursor number baseCur+i for the i-th cursor. ** Except, if there is no possibility of a REPLACE action then -** cursors do not need to be open for indices where aIdxUsed[i]==0. -** -** If the isUpdate flag is true, it means that the "base" cursor is -** initially pointing to an entry that is being updated. The isUpdate -** flag causes extra code to be generated so that the "base" cursor -** is still pointing at the same entry after the routine returns. -** Without the isUpdate flag, the "base" cursor might be moved. +** cursors do not need to be open for indices where aRegIdx[i]==0. */ void sqlite3GenerateConstraintChecks( Parse *pParse, /* The parser context */ Table *pTab, /* the table into which we are inserting */ - int base, /* Index of a read/write cursor pointing at pTab */ - char *aIdxUsed, /* Which indices are used. NULL means all are used */ - int rowidChng, /* True if the record number will change */ + int baseCur, /* Index of a read/write cursor pointing at pTab */ + int regRowid, /* Index of the range of input registers */ + int *aRegIdx, /* Register used by each index. 0 for unused indices */ + int rowidChng, /* True if the rowid might collide with existing entry */ int isUpdate, /* True for UPDATE, False for INSERT */ int overrideError, /* Override onError to this if not OE_Default */ - int ignoreDest /* Jump to this label on an OE_Ignore resolution */ + int ignoreDest, /* Jump to this label on an OE_Ignore resolution */ + int *pbMayReplace /* OUT: Set to true if constraint may cause a replace */ ){ - int i; - Vdbe *v; - int nCol; - int onError; - int addr; - int extra; - int iCur; - Index *pIdx; - int seenReplace = 0; - int jumpInst1=0, jumpInst2; + int i; /* loop counter */ + Vdbe *v; /* VDBE under constrution */ + int nCol; /* Number of columns */ + int onError; /* Conflict resolution strategy */ + int j1; /* Addresss of jump instruction */ + int j2 = 0, j3; /* Addresses of jump instructions */ + int regData; /* Register containing first data column */ + int iCur; /* Table cursor number */ + Index *pIdx; /* Pointer to one of the indices */ + int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int hasTwoRowids = (isUpdate && rowidChng); v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ nCol = pTab->nCol; + regData = regRowid + 1; + /* Test all NOT NULL constraints. */ @@ -987,33 +1180,32 @@ if( onError==OE_Replace && pTab->aCol[i].pDflt==0 ){ onError = OE_Abort; } - sqlite3VdbeAddOp(v, OP_Dup, nCol-1-i, 1); - addr = sqlite3VdbeAddOp(v, OP_NotNull, 1, 0); assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail || onError==OE_Ignore || onError==OE_Replace ); switch( onError ){ case OE_Rollback: case OE_Abort: case OE_Fail: { - char *zMsg = 0; - sqlite3VdbeAddOp(v, OP_Halt, SQLITE_CONSTRAINT, onError); - sqlite3SetString(&zMsg, pTab->zName, ".", pTab->aCol[i].zName, - " may not be NULL", (char*)0); - sqlite3VdbeChangeP3(v, -1, zMsg, P3_DYNAMIC); + char *zMsg; + j1 = sqlite3VdbeAddOp3(v, OP_HaltIfNull, + SQLITE_CONSTRAINT, onError, regData+i); + zMsg = sqlite3MPrintf(pParse->db, "%s.%s may not be NULL", + pTab->zName, pTab->aCol[i].zName); + sqlite3VdbeChangeP4(v, -1, zMsg, P4_DYNAMIC); break; } case OE_Ignore: { - sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + sqlite3VdbeAddOp2(v, OP_IsNull, regData+i, ignoreDest); break; } - case OE_Replace: { - sqlite3ExprCode(pParse, pTab->aCol[i].pDflt); - sqlite3VdbeAddOp(v, OP_Push, nCol-i, 0); + default: { + assert( onError==OE_Replace ); + j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regData+i); + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regData+i); + sqlite3VdbeJumpHere(v, j1); break; } } - sqlite3VdbeJumpHere(v, addr); } /* Test all CHECK constraints @@ -1021,17 +1213,13 @@ #ifndef SQLITE_OMIT_CHECK if( pTab->pCheck && (pParse->db->flags & SQLITE_IgnoreChecks)==0 ){ int allOk = sqlite3VdbeMakeLabel(v); - assert( pParse->ckOffset==0 ); - pParse->ckOffset = nCol; - sqlite3ExprIfTrue(pParse, pTab->pCheck, allOk, 1); - assert( pParse->ckOffset==nCol ); - pParse->ckOffset = 0; + pParse->ckBase = regData; + sqlite3ExprIfTrue(pParse, pTab->pCheck, allOk, SQLITE_JUMPIFNULL); onError = overrideError!=OE_Default ? overrideError : OE_Abort; if( onError==OE_Ignore ){ - sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); }else{ - sqlite3VdbeAddOp(v, OP_Halt, SQLITE_CONSTRAINT, onError); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_CONSTRAINT, onError); } sqlite3VdbeResolveLabel(v, allOk); } @@ -1049,74 +1237,72 @@ onError = OE_Abort; } - if( isUpdate ){ - sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); - sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); - jumpInst1 = sqlite3VdbeAddOp(v, OP_Eq, 0, 0); - } - sqlite3VdbeAddOp(v, OP_Dup, nCol, 1); - jumpInst2 = sqlite3VdbeAddOp(v, OP_NotExists, base, 0); - switch( onError ){ - default: { - onError = OE_Abort; - /* Fall thru into the next case */ - } - case OE_Rollback: - case OE_Abort: - case OE_Fail: { - sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, - "PRIMARY KEY must be unique", P3_STATIC); - break; - } - case OE_Replace: { - sqlite3GenerateRowIndexDelete(v, pTab, base, 0); - if( isUpdate ){ - sqlite3VdbeAddOp(v, OP_Dup, nCol+hasTwoRowids, 1); - sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); + if( onError!=OE_Replace || pTab->pIndex ){ + if( isUpdate ){ + j2 = sqlite3VdbeAddOp3(v, OP_Eq, regRowid, 0, regRowid-1); + } + j3 = sqlite3VdbeAddOp3(v, OP_NotExists, baseCur, 0, regRowid); + switch( onError ){ + default: { + onError = OE_Abort; + /* Fall thru into the next case */ + } + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CONSTRAINT, onError, 0, + "PRIMARY KEY must be unique", P4_STATIC); + break; + } + case OE_Replace: { + sqlite3GenerateRowIndexDelete(pParse, pTab, baseCur, 0); + seenReplace = 1; + break; + } + case OE_Ignore: { + assert( seenReplace==0 ); + sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); + break; } - seenReplace = 1; - break; } - case OE_Ignore: { - assert( seenReplace==0 ); - sqlite3VdbeAddOp(v, OP_Pop, nCol+1+hasTwoRowids, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); - break; + sqlite3VdbeJumpHere(v, j3); + if( isUpdate ){ + sqlite3VdbeJumpHere(v, j2); } } - sqlite3VdbeJumpHere(v, jumpInst2); - if( isUpdate ){ - sqlite3VdbeJumpHere(v, jumpInst1); - sqlite3VdbeAddOp(v, OP_Dup, nCol+1, 1); - sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); - } } /* Test all UNIQUE constraints by creating entries for each UNIQUE ** index and making sure that duplicate entries do not already exist. ** Add the new records to the indices as we go. */ - extra = -1; for(iCur=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, iCur++){ - if( aIdxUsed && aIdxUsed[iCur]==0 ) continue; /* Skip unused indices */ - extra++; + int regIdx; + int regR; + + if( aRegIdx[iCur]==0 ) continue; /* Skip unused indices */ /* Create a key for accessing the index entry */ - sqlite3VdbeAddOp(v, OP_Dup, nCol+extra, 1); + regIdx = sqlite3GetTempRange(pParse, pIdx->nColumn+1); for(i=0; inColumn; i++){ int idx = pIdx->aiColumn[i]; if( idx==pTab->iPKey ){ - sqlite3VdbeAddOp(v, OP_Dup, i+extra+nCol+1, 1); + sqlite3VdbeAddOp2(v, OP_SCopy, regRowid, regIdx+i); }else{ - sqlite3VdbeAddOp(v, OP_Dup, i+extra+nCol-idx, 1); + sqlite3VdbeAddOp2(v, OP_SCopy, regData+idx, regIdx+i); } } - jumpInst1 = sqlite3VdbeAddOp(v, OP_MakeIdxRec, pIdx->nColumn, 0); + sqlite3VdbeAddOp2(v, OP_SCopy, regRowid, regIdx+i); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn+1, aRegIdx[iCur]); sqlite3IndexAffinityStr(v, pIdx); + sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn+1); /* Find out what action to take in case there is an indexing conflict */ onError = pIdx->onError; - if( onError==OE_None ) continue; /* pIdx is not a UNIQUE index */ + if( onError==OE_None ){ + sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn+1); + continue; /* pIdx is not a UNIQUE index */ + } if( overrideError!=OE_Default ){ onError = overrideError; }else if( onError==OE_Default ){ @@ -1129,8 +1315,12 @@ /* Check to see if the new index entry will be unique */ - sqlite3VdbeAddOp(v, OP_Dup, extra+nCol+1+hasTwoRowids, 1); - jumpInst2 = sqlite3VdbeAddOp(v, OP_IsUnique, base+iCur+1, 0); + regR = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_SCopy, regRowid-hasTwoRowids, regR); + j3 = sqlite3VdbeAddOp4(v, OP_IsUnique, baseCur+iCur+1, 0, + regR, SQLITE_INT_TO_PTR(regIdx), + P4_INT32); + sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn+1); /* Generate code that executes if the new index entry is not unique */ assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail @@ -1139,61 +1329,53 @@ case OE_Rollback: case OE_Abort: case OE_Fail: { - int j, n1, n2; - char zErrMsg[200]; - sqlite3_snprintf(sizeof(zErrMsg), zErrMsg, - pIdx->nColumn>1 ? "columns " : "column "); - n1 = strlen(zErrMsg); - for(j=0; jnColumn && n1db; + zSep = pIdx->nColumn>1 ? "columns " : "column "; + for(j=0; jnColumn; j++){ char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName; - n2 = strlen(zCol); - if( j>0 ){ - sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], ", "); - n1 += 2; - } - if( n1+n2>sizeof(zErrMsg)-30 ){ - sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], "..."); - n1 += 3; - break; - }else{ - sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], "%s", zCol); - n1 += n2; - } + sqlite3StrAccumAppend(&errMsg, zSep, -1); + zSep = ", "; + sqlite3StrAccumAppend(&errMsg, zCol, -1); } - sqlite3_snprintf(sizeof(zErrMsg)-n1, &zErrMsg[n1], - pIdx->nColumn>1 ? " are not unique" : " is not unique"); - sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, zErrMsg, 0); + sqlite3StrAccumAppend(&errMsg, + pIdx->nColumn>1 ? " are not unique" : " is not unique", -1); + zErr = sqlite3StrAccumFinish(&errMsg); + sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CONSTRAINT, onError, 0, zErr, 0); + sqlite3DbFree(errMsg.db, zErr); break; } case OE_Ignore: { assert( seenReplace==0 ); - sqlite3VdbeAddOp(v, OP_Pop, nCol+extra+3+hasTwoRowids, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, ignoreDest); + sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); break; } - case OE_Replace: { - sqlite3GenerateRowDelete(pParse->db, v, pTab, base, 0); - if( isUpdate ){ - sqlite3VdbeAddOp(v, OP_Dup, nCol+extra+1+hasTwoRowids, 1); - sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); - } + default: { + assert( onError==OE_Replace ); + sqlite3GenerateRowDelete(pParse, pTab, baseCur, regR, 0); seenReplace = 1; break; } } -#if NULL_DISTINCT_FOR_UNIQUE - sqlite3VdbeJumpHere(v, jumpInst1); -#endif - sqlite3VdbeJumpHere(v, jumpInst2); + sqlite3VdbeJumpHere(v, j3); + sqlite3ReleaseTempReg(pParse, regR); + } + + if( pbMayReplace ){ + *pbMayReplace = seenReplace; } } /* ** This routine generates code to finish the INSERT or UPDATE operation ** that was started by a prior call to sqlite3GenerateConstraintChecks. -** The stack must contain keys for all active indices followed by data -** and the rowid for the new entry. This routine creates the new -** entries in all indices and in the main table. +** A consecutive range of registers starting at regRowid contains the +** rowid and the content to be inserted. ** ** The arguments to this routine should be the same as the first six ** arguments to sqlite3GenerateConstraintChecks. @@ -1201,34 +1383,41 @@ void sqlite3CompleteInsertion( Parse *pParse, /* The parser context */ Table *pTab, /* the table into which we are inserting */ - int base, /* Index of a read/write cursor pointing at pTab */ - char *aIdxUsed, /* Which indices are used. NULL means all are used */ - int rowidChng, /* True if the record number will change */ + int baseCur, /* Index of a read/write cursor pointing at pTab */ + int regRowid, /* Range of content */ + int *aRegIdx, /* Register used by each index. 0 for unused indices */ int isUpdate, /* True for UPDATE, False for INSERT */ int newIdx, /* Index of NEW table for triggers. -1 if none */ - int appendBias /* True if this is likely to be an append */ + int appendBias, /* True if this is likely to be an append */ + int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */ ){ int i; Vdbe *v; int nIdx; Index *pIdx; - int pik_flags; + u8 pik_flags; + int regData; + int regRec; v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){} for(i=nIdx-1; i>=0; i--){ - if( aIdxUsed && aIdxUsed[i]==0 ) continue; - sqlite3VdbeAddOp(v, OP_IdxInsert, base+i+1, 0); + if( aRegIdx[i]==0 ) continue; + sqlite3VdbeAddOp2(v, OP_IdxInsert, baseCur+i+1, aRegIdx[i]); + if( useSeekResult ){ + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + } } - sqlite3VdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0); + regData = regRowid + 1; + regRec = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec); sqlite3TableAffinityStr(v, pTab); + sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); #ifndef SQLITE_OMIT_TRIGGER if( newIdx>=0 ){ - sqlite3VdbeAddOp(v, OP_Dup, 1, 0); - sqlite3VdbeAddOp(v, OP_Dup, 1, 0); - sqlite3VdbeAddOp(v, OP_Insert, newIdx, 0); + sqlite3VdbeAddOp3(v, OP_Insert, newIdx, regRec, regRowid); } #endif if( pParse->nested ){ @@ -1240,25 +1429,27 @@ if( appendBias ){ pik_flags |= OPFLAG_APPEND; } - sqlite3VdbeAddOp(v, OP_Insert, base, pik_flags); - if( !pParse->nested ){ - sqlite3VdbeChangeP3(v, -1, pTab->zName, P3_STATIC); + if( useSeekResult ){ + pik_flags |= OPFLAG_USESEEKRESULT; } - - if( isUpdate && rowidChng ){ - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); + sqlite3VdbeAddOp3(v, OP_Insert, baseCur, regRec, regRowid); + if( !pParse->nested ){ + sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_STATIC); } + sqlite3VdbeChangeP5(v, pik_flags); } /* ** Generate code that will open cursors for a table and for all -** indices of that table. The "base" parameter is the cursor number used +** indices of that table. The "baseCur" parameter is the cursor number used ** for the table. Indices are opened on subsequent cursors. +** +** Return the number of indices on the table. */ -void sqlite3OpenTableAndIndices( +int sqlite3OpenTableAndIndices( Parse *pParse, /* Parsing context */ Table *pTab, /* Table to be opened */ - int base, /* Cursor number assigned to the table */ + int baseCur, /* Cursor number assigned to the table */ int op /* OP_OpenRead or OP_OpenWrite */ ){ int i; @@ -1266,21 +1457,22 @@ Index *pIdx; Vdbe *v; - if( IsVirtual(pTab) ) return; + if( IsVirtual(pTab) ) return 0; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); v = sqlite3GetVdbe(pParse); assert( v!=0 ); - sqlite3OpenTable(pParse, base, iDb, pTab, op); + sqlite3OpenTable(pParse, baseCur, iDb, pTab, op); for(i=1, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); assert( pIdx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - VdbeComment((v, "# %s", pIdx->zName)); - sqlite3VdbeOp3(v, op, i+base, pIdx->tnum, (char*)pKey, P3_KEYINFO_HANDOFF); + sqlite3VdbeAddOp4(v, op, i+baseCur, pIdx->tnum, iDb, + (char*)pKey, P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pIdx->zName)); } - if( pParse->nTab<=base+i ){ - pParse->nTab = base+i; + if( pParse->nTabnTab = baseCur+i; } + return i-1; } @@ -1337,8 +1529,8 @@ if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){ return 0; /* Different sort orders */ } - if( pSrc->azColl[i]!=pDest->azColl[i] ){ - return 0; /* Different sort orders */ + if( !xferCompatibleCollation(pSrc->azColl[i],pDest->azColl[i]) ){ + return 0; /* Different collating sequences */ } } @@ -1396,17 +1588,18 @@ int emptySrcTest; /* Address of test for empty pSrc */ Vdbe *v; /* The VDBE we are building */ KeyInfo *pKey; /* Key information for an index */ - int counterMem; /* Memory register used by AUTOINC */ + int regAutoinc; /* Memory register used by AUTOINC */ int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ + int regData, regRowid; /* Registers holding data and rowid */ if( pSelect==0 ){ return 0; /* Must be of the form INSERT INTO ... SELECT ... */ } - if( pDest->pTrigger ){ + if( sqlite3TriggerList(pParse, pDest) ){ return 0; /* tab1 must not have triggers */ } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( pDest->isVirtual ){ + if( pDest->tabFlags & TF_Virtual ){ return 0; /* tab1 must not be a virtual table */ } #endif @@ -1416,9 +1609,7 @@ if( onError!=OE_Abort && onError!=OE_Rollback ){ return 0; /* Cannot do OR REPLACE or OR IGNORE or OR FAIL */ } - if( pSelect->pSrc==0 ){ - return 0; /* SELECT must have a FROM clause */ - } + assert(pSelect->pSrc); /* allocated even if there is no FROM clause */ if( pSelect->pSrc->nSrc!=1 ){ return 0; /* FROM clause must have exactly one term */ } @@ -1443,7 +1634,7 @@ if( pSelect->pPrior ){ return 0; /* SELECT may not be a compound query */ } - if( pSelect->isDistinct ){ + if( pSelect->selFlags & SF_Distinct ){ return 0; /* SELECT may not be DISTINCT */ } pEList = pSelect->pEList; @@ -1461,7 +1652,7 @@ ** we have to check the semantics. */ pItem = pSelect->pSrc->a; - pSrc = sqlite3LocateTable(pParse, pItem->zName, pItem->zDatabase); + pSrc = sqlite3LocateTable(pParse, 0, pItem->zName, pItem->zDatabase); if( pSrc==0 ){ return 0; /* FROM clause does not contain a real table */ } @@ -1469,7 +1660,7 @@ return 0; /* tab1 and tab2 may not be the same table */ } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( pSrc->isVirtual ){ + if( pSrc->tabFlags & TF_Virtual ){ return 0; /* tab2 must not be a virtual table */ } #endif @@ -1523,9 +1714,10 @@ #endif iDbSrc = sqlite3SchemaToIndex(pParse->db, pSrc->pSchema); v = sqlite3GetVdbe(pParse); + sqlite3CodeVerifySchema(pParse, iDbSrc); iSrc = pParse->nTab++; iDest = pParse->nTab++; - counterMem = autoIncBegin(pParse, iDbDest, pDest); + regAutoinc = autoIncBegin(pParse, iDbDest, pDest); sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); if( (pDest->iPKey<0 && pDest->pIndex!=0) || destHasUniqueIdx ){ /* If tables do not have an INTEGER PRIMARY KEY and there @@ -1538,67 +1730,72 @@ ** insure that all entries in the union of DEST and SRC will be ** unique. */ - addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iDest, 0); - emptyDestTest = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); + emptyDestTest = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); sqlite3VdbeJumpHere(v, addr1); }else{ emptyDestTest = 0; } sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); - emptySrcTest = sqlite3VdbeAddOp(v, OP_Rewind, iSrc, 0); + emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); + regData = sqlite3GetTempReg(pParse); + regRowid = sqlite3GetTempReg(pParse); if( pDest->iPKey>=0 ){ - addr1 = sqlite3VdbeAddOp(v, OP_Rowid, iSrc, 0); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - addr2 = sqlite3VdbeAddOp(v, OP_NotExists, iDest, 0); - sqlite3VdbeOp3(v, OP_Halt, SQLITE_CONSTRAINT, onError, - "PRIMARY KEY must be unique", P3_STATIC); + addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); + addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); + sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_CONSTRAINT, onError, 0, + "PRIMARY KEY must be unique", P4_STATIC); sqlite3VdbeJumpHere(v, addr2); - autoIncStep(pParse, counterMem); + autoIncStep(pParse, regAutoinc, regRowid); }else if( pDest->pIndex==0 ){ - addr1 = sqlite3VdbeAddOp(v, OP_NewRowid, iDest, 0); + addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); }else{ - addr1 = sqlite3VdbeAddOp(v, OP_Rowid, iSrc, 0); - assert( pDest->autoInc==0 ); + addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); + assert( (pDest->tabFlags & TF_Autoincrement)==0 ); } - sqlite3VdbeAddOp(v, OP_RowData, iSrc, 0); - sqlite3VdbeOp3(v, OP_Insert, iDest, - OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND, - pDest->zName, 0); - sqlite3VdbeAddOp(v, OP_Next, iSrc, addr1); - autoIncEnd(pParse, iDbDest, pDest, counterMem); + sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData); + sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid); + sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND); + sqlite3VdbeChangeP4(v, -1, pDest->zName, 0); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ - for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ + for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){ if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; } assert( pSrcIdx ); - sqlite3VdbeAddOp(v, OP_Close, iSrc, 0); - sqlite3VdbeAddOp(v, OP_Close, iDest, 0); - sqlite3VdbeAddOp(v, OP_Integer, iDbSrc, 0); + sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); pKey = sqlite3IndexKeyinfo(pParse, pSrcIdx); - VdbeComment((v, "# %s", pSrcIdx->zName)); - sqlite3VdbeOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - sqlite3VdbeAddOp(v, OP_Integer, iDbDest, 0); + sqlite3VdbeAddOp4(v, OP_OpenRead, iSrc, pSrcIdx->tnum, iDbSrc, + (char*)pKey, P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pSrcIdx->zName)); pKey = sqlite3IndexKeyinfo(pParse, pDestIdx); - VdbeComment((v, "# %s", pDestIdx->zName)); - sqlite3VdbeOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - addr1 = sqlite3VdbeAddOp(v, OP_Rewind, iSrc, 0); - sqlite3VdbeAddOp(v, OP_RowKey, iSrc, 0); - sqlite3VdbeAddOp(v, OP_IdxInsert, iDest, 1); - sqlite3VdbeAddOp(v, OP_Next, iSrc, addr1+1); + sqlite3VdbeAddOp4(v, OP_OpenWrite, iDest, pDestIdx->tnum, iDbDest, + (char*)pKey, P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pDestIdx->zName)); + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); + sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData); + sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); sqlite3VdbeJumpHere(v, addr1); } sqlite3VdbeJumpHere(v, emptySrcTest); - sqlite3VdbeAddOp(v, OP_Close, iSrc, 0); - sqlite3VdbeAddOp(v, OP_Close, iDest, 0); + sqlite3ReleaseTempReg(pParse, regRowid); + sqlite3ReleaseTempReg(pParse, regData); + sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); if( emptyDestTest ){ - sqlite3VdbeAddOp(v, OP_Halt, SQLITE_OK, 0); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0); sqlite3VdbeJumpHere(v, emptyDestTest); - sqlite3VdbeAddOp(v, OP_Close, iDest, 0); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); return 0; }else{ return 1; } } #endif /* SQLITE_OMIT_XFER_OPT */ + +/* Make sure "isView" gets undefined in case this file becomes part of +** the amalgamation - so that subsequent files do not see isView as a +** macro. */ +#undef isView diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/journal.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/journal.c --- sqlite3-3.4.2/src/journal.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/journal.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,240 @@ +/* +** 2007 August 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** @(#) $Id: journal.c,v 1.9 2009/01/20 17:06:27 danielk1977 Exp $ +*/ + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + +/* +** This file implements a special kind of sqlite3_file object used +** by SQLite to create journal files if the atomic-write optimization +** is enabled. +** +** The distinctive characteristic of this sqlite3_file is that the +** actual on disk file is created lazily. When the file is created, +** the caller specifies a buffer size for an in-memory buffer to +** be used to service read() and write() requests. The actual file +** on disk is not created or populated until either: +** +** 1) The in-memory representation grows too large for the allocated +** buffer, or +** 2) The sqlite3JournalCreate() function is called. +*/ + +#include "sqliteInt.h" + + +/* +** A JournalFile object is a subclass of sqlite3_file used by +** as an open file handle for journal files. +*/ +struct JournalFile { + sqlite3_io_methods *pMethod; /* I/O methods on journal files */ + int nBuf; /* Size of zBuf[] in bytes */ + char *zBuf; /* Space to buffer journal writes */ + int iSize; /* Amount of zBuf[] currently used */ + int flags; /* xOpen flags */ + sqlite3_vfs *pVfs; /* The "real" underlying VFS */ + sqlite3_file *pReal; /* The "real" underlying file descriptor */ + const char *zJournal; /* Name of the journal file */ +}; +typedef struct JournalFile JournalFile; + +/* +** If it does not already exists, create and populate the on-disk file +** for JournalFile p. +*/ +static int createFile(JournalFile *p){ + int rc = SQLITE_OK; + if( !p->pReal ){ + sqlite3_file *pReal = (sqlite3_file *)&p[1]; + rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0); + if( rc==SQLITE_OK ){ + p->pReal = pReal; + if( p->iSize>0 ){ + assert(p->iSize<=p->nBuf); + rc = sqlite3OsWrite(p->pReal, p->zBuf, p->iSize, 0); + } + } + } + return rc; +} + +/* +** Close the file. +*/ +static int jrnlClose(sqlite3_file *pJfd){ + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + sqlite3OsClose(p->pReal); + } + sqlite3_free(p->zBuf); + return SQLITE_OK; +} + +/* +** Read data from the file. +*/ +static int jrnlRead( + sqlite3_file *pJfd, /* The journal file from which to read */ + void *zBuf, /* Put the results here */ + int iAmt, /* Number of bytes to read */ + sqlite_int64 iOfst /* Begin reading at this offset */ +){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); + }else if( (iAmt+iOfst)>p->iSize ){ + rc = SQLITE_IOERR_SHORT_READ; + }else{ + memcpy(zBuf, &p->zBuf[iOfst], iAmt); + } + return rc; +} + +/* +** Write data to the file. +*/ +static int jrnlWrite( + sqlite3_file *pJfd, /* The journal file into which to write */ + const void *zBuf, /* Take data to be written from here */ + int iAmt, /* Number of bytes to write */ + sqlite_int64 iOfst /* Begin writing at this offset into the file */ +){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( !p->pReal && (iOfst+iAmt)>p->nBuf ){ + rc = createFile(p); + } + if( rc==SQLITE_OK ){ + if( p->pReal ){ + rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); + }else{ + memcpy(&p->zBuf[iOfst], zBuf, iAmt); + if( p->iSize<(iOfst+iAmt) ){ + p->iSize = (iOfst+iAmt); + } + } + } + return rc; +} + +/* +** Truncate the file. +*/ +static int jrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsTruncate(p->pReal, size); + }else if( sizeiSize ){ + p->iSize = size; + } + return rc; +} + +/* +** Sync the file. +*/ +static int jrnlSync(sqlite3_file *pJfd, int flags){ + int rc; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsSync(p->pReal, flags); + }else{ + rc = SQLITE_OK; + } + return rc; +} + +/* +** Query the size of the file in bytes. +*/ +static int jrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ + int rc = SQLITE_OK; + JournalFile *p = (JournalFile *)pJfd; + if( p->pReal ){ + rc = sqlite3OsFileSize(p->pReal, pSize); + }else{ + *pSize = (sqlite_int64) p->iSize; + } + return rc; +} + +/* +** Table of methods for JournalFile sqlite3_file object. +*/ +static struct sqlite3_io_methods JournalFileMethods = { + 1, /* iVersion */ + jrnlClose, /* xClose */ + jrnlRead, /* xRead */ + jrnlWrite, /* xWrite */ + jrnlTruncate, /* xTruncate */ + jrnlSync, /* xSync */ + jrnlFileSize, /* xFileSize */ + 0, /* xLock */ + 0, /* xUnlock */ + 0, /* xCheckReservedLock */ + 0, /* xFileControl */ + 0, /* xSectorSize */ + 0 /* xDeviceCharacteristics */ +}; + +/* +** Open a journal file. +*/ +int sqlite3JournalOpen( + sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */ + const char *zName, /* Name of the journal file */ + sqlite3_file *pJfd, /* Preallocated, blank file handle */ + int flags, /* Opening flags */ + int nBuf /* Bytes buffered before opening the file */ +){ + JournalFile *p = (JournalFile *)pJfd; + memset(p, 0, sqlite3JournalSize(pVfs)); + if( nBuf>0 ){ + p->zBuf = sqlite3MallocZero(nBuf); + if( !p->zBuf ){ + return SQLITE_NOMEM; + } + }else{ + return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); + } + p->pMethod = &JournalFileMethods; + p->nBuf = nBuf; + p->flags = flags; + p->zJournal = zName; + p->pVfs = pVfs; + return SQLITE_OK; +} + +/* +** If the argument p points to a JournalFile structure, and the underlying +** file has not yet been created, create it now. +*/ +int sqlite3JournalCreate(sqlite3_file *p){ + if( p->pMethods!=&JournalFileMethods ){ + return SQLITE_OK; + } + return createFile((JournalFile *)p); +} + +/* +** Return the number of bytes required to store a JournalFile that uses vfs +** pVfs to create the underlying on-disk files. +*/ +int sqlite3JournalSize(sqlite3_vfs *pVfs){ + return (pVfs->szOsFile+sizeof(JournalFile)); +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/legacy.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/legacy.c --- sqlite3-3.4.2/src/legacy.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/legacy.c 2009-06-25 12:24:38.000000000 +0100 @@ -14,12 +14,10 @@ ** other files are for internal use by SQLite and should not be ** accessed by users of the library. ** -** $Id: legacy.c,v 1.18 2007/05/04 13:15:56 drh Exp $ +** $Id: legacy.c,v 1.33 2009/05/05 20:02:48 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include /* ** Execute SQL code. Return one of the SQLITE_ success/failure @@ -38,15 +36,17 @@ void *pArg, /* First argument to xCallback() */ char **pzErrMsg /* Write error messages here */ ){ - int rc = SQLITE_OK; - const char *zLeftover; - sqlite3_stmt *pStmt = 0; - char **azCols = 0; + int rc = SQLITE_OK; /* Return code */ + const char *zLeftover; /* Tail of unprocessed SQL */ + sqlite3_stmt *pStmt = 0; /* The current SQL statement */ + char **azCols = 0; /* Names of result columns */ + int nRetry = 0; /* Number of retry attempts */ + int callbackIsInit; /* True if callback data is initialized */ - int nRetry = 0; - int nCallback; + if( zSql==0 ) zSql = ""; - if( zSql==0 ) return SQLITE_OK; + sqlite3_mutex_enter(db->mutex); + sqlite3Error(db, SQLITE_OK, 0); while( (rc==SQLITE_OK || (rc==SQLITE_SCHEMA && (++nRetry)<2)) && zSql[0] ){ int nCol; char **azVals = 0; @@ -63,13 +63,8 @@ continue; } - nCallback = 0; - + callbackIsInit = 0; nCol = sqlite3_column_count(pStmt); - azCols = sqliteMalloc(2*nCol*sizeof(const char *) + 1); - if( azCols==0 ){ - goto exec_out; - } while( 1 ){ int i; @@ -77,49 +72,64 @@ /* Invoke the callback function if required */ if( xCallback && (SQLITE_ROW==rc || - (SQLITE_DONE==rc && !nCallback && db->flags&SQLITE_NullCallback)) ){ - if( 0==nCallback ){ + (SQLITE_DONE==rc && !callbackIsInit + && db->flags&SQLITE_NullCallback)) ){ + if( !callbackIsInit ){ + azCols = sqlite3DbMallocZero(db, 2*nCol*sizeof(const char*) + 1); + if( azCols==0 ){ + goto exec_out; + } for(i=0; imallocFailed = 1; + goto exec_out; + } } } if( xCallback(pArg, nCol, azVals, azCols) ){ rc = SQLITE_ABORT; + sqlite3VdbeFinalize((Vdbe *)pStmt); + pStmt = 0; + sqlite3Error(db, SQLITE_ABORT, 0); goto exec_out; } } if( rc!=SQLITE_ROW ){ - rc = sqlite3_finalize(pStmt); + rc = sqlite3VdbeFinalize((Vdbe *)pStmt); pStmt = 0; if( rc!=SQLITE_SCHEMA ){ nRetry = 0; zSql = zLeftover; - while( isspace((unsigned char)zSql[0]) ) zSql++; + while( sqlite3Isspace(zSql[0]) ) zSql++; } break; } } - sqliteFree(azCols); + sqlite3DbFree(db, azCols); azCols = 0; } exec_out: - if( pStmt ) sqlite3_finalize(pStmt); - if( azCols ) sqliteFree(azCols); + if( pStmt ) sqlite3VdbeFinalize((Vdbe *)pStmt); + sqlite3DbFree(db, azCols); - rc = sqlite3ApiExit(0, rc); - if( rc!=SQLITE_OK && rc==sqlite3_errcode(db) && pzErrMsg ){ - int nErrMsg = 1 + strlen(sqlite3_errmsg(db)); - *pzErrMsg = sqlite3_malloc(nErrMsg); + rc = sqlite3ApiExit(db, rc); + if( rc!=SQLITE_OK && ALWAYS(rc==sqlite3_errcode(db)) && pzErrMsg ){ + int nErrMsg = 1 + sqlite3Strlen30(sqlite3_errmsg(db)); + *pzErrMsg = sqlite3Malloc(nErrMsg); if( *pzErrMsg ){ memcpy(*pzErrMsg, sqlite3_errmsg(db), nErrMsg); } @@ -128,5 +138,6 @@ } assert( (rc&db->errMask)==rc ); + sqlite3_mutex_leave(db->mutex); return rc; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/loadext.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/loadext.c --- sqlite3-3.4.2/src/loadext.c 2007-07-20 11:32:01.000000000 +0100 +++ sqlite3-3.6.16/src/loadext.c 2009-06-25 12:35:51.000000000 +0100 @@ -11,15 +11,18 @@ ************************************************************************* ** This file contains code used to dynamically load extensions into ** the SQLite library. +** +** $Id: loadext.c,v 1.60 2009/06/03 01:24:54 drh Exp $ */ -#ifndef SQLITE_OMIT_LOAD_EXTENSION -#define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ +#ifndef SQLITE_CORE + #define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ +#endif #include "sqlite3ext.h" #include "sqliteInt.h" -#include "os.h" #include -#include + +#ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** Some API routines are omitted when various features are @@ -94,6 +97,15 @@ # define sqlite3_get_table 0 #endif +#ifdef SQLITE_OMIT_INCRBLOB +#define sqlite3_bind_zeroblob 0 +#define sqlite3_blob_bytes 0 +#define sqlite3_blob_close 0 +#define sqlite3_blob_open 0 +#define sqlite3_blob_read 0 +#define sqlite3_blob_write 0 +#endif + /* ** The following structure contains pointers to all SQLite API routines. ** A pointer to this structure is passed into extensions when they are @@ -109,9 +121,13 @@ ** also check to make sure that the pointer to the function is ** not NULL before calling it. */ -const sqlite3_api_routines sqlite3_apis = { +static const sqlite3_api_routines sqlite3Apis = { sqlite3_aggregate_context, +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_aggregate_count, +#else + 0, +#endif sqlite3_bind_blob, sqlite3_bind_double, sqlite3_bind_int, @@ -166,7 +182,11 @@ sqlite3_errmsg, sqlite3_errmsg16, sqlite3_exec, +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_expired, +#else + 0, +#endif sqlite3_finalize, sqlite3_free, sqlite3_free_table, @@ -206,10 +226,18 @@ sqlite3_snprintf, sqlite3_step, sqlite3_table_column_metadata, +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_thread_cleanup, +#else + 0, +#endif sqlite3_total_changes, sqlite3_trace, +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_transfer_bindings, +#else + 0, +#endif sqlite3_update_hook, sqlite3_user_data, sqlite3_value_blob, @@ -247,6 +275,60 @@ */ sqlite3_create_module_v2, + /* + ** Added for 3.5.0 + */ + sqlite3_bind_zeroblob, + sqlite3_blob_bytes, + sqlite3_blob_close, + sqlite3_blob_open, + sqlite3_blob_read, + sqlite3_blob_write, + sqlite3_create_collation_v2, + sqlite3_file_control, + sqlite3_memory_highwater, + sqlite3_memory_used, +#ifdef SQLITE_MUTEX_OMIT + 0, + 0, + 0, + 0, + 0, +#else + sqlite3_mutex_alloc, + sqlite3_mutex_enter, + sqlite3_mutex_free, + sqlite3_mutex_leave, + sqlite3_mutex_try, +#endif + sqlite3_open_v2, + sqlite3_release_memory, + sqlite3_result_error_nomem, + sqlite3_result_error_toobig, + sqlite3_sleep, + sqlite3_soft_heap_limit, + sqlite3_vfs_find, + sqlite3_vfs_register, + sqlite3_vfs_unregister, + + /* + ** Added for 3.5.8 + */ + sqlite3_threadsafe, + sqlite3_result_zeroblob, + sqlite3_result_error_code, + sqlite3_test_control, + sqlite3_randomness, + sqlite3_context_db_handle, + + /* + ** Added for 3.6.0 + */ + sqlite3_extended_result_codes, + sqlite3_limit, + sqlite3_next_stmt, + sqlite3_sql, + sqlite3_status, }; /* @@ -259,18 +341,22 @@ ** ** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with ** error message text. The calling function should free this memory -** by calling sqlite3_free(). +** by calling sqlite3DbFree(db, ). */ -int sqlite3_load_extension( +static int sqlite3LoadExtension( sqlite3 *db, /* Load the extension into this database connection */ const char *zFile, /* Name of the shared library containing extension */ const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ char **pzErrMsg /* Put error message here if not 0 */ ){ + sqlite3_vfs *pVfs = db->pVfs; void *handle; int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); char *zErrmsg = 0; void **aHandle; + const int nMsg = 300; + + if( pzErrMsg ) *pzErrMsg = 0; /* Ticket #1863. To avoid a creating security problems for older ** applications that relink against newer versions of SQLite, the @@ -289,46 +375,71 @@ zProc = "sqlite3_extension_init"; } - handle = sqlite3OsDlopen(zFile); + handle = sqlite3OsDlOpen(pVfs, zFile); if( handle==0 ){ if( pzErrMsg ){ - *pzErrMsg = sqlite3_mprintf("unable to open shared library [%s]", zFile); + zErrmsg = sqlite3StackAllocZero(db, nMsg); + if( zErrmsg ){ + sqlite3_snprintf(nMsg, zErrmsg, + "unable to open shared library [%s]", zFile); + sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); + *pzErrMsg = sqlite3DbStrDup(0, zErrmsg); + sqlite3StackFree(db, zErrmsg); + } } return SQLITE_ERROR; } xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - sqlite3OsDlsym(handle, zProc); + sqlite3OsDlSym(pVfs, handle, zProc); if( xInit==0 ){ if( pzErrMsg ){ - *pzErrMsg = sqlite3_mprintf("no entry point [%s] in shared library [%s]", - zProc, zFile); + zErrmsg = sqlite3StackAllocZero(db, nMsg); + if( zErrmsg ){ + sqlite3_snprintf(nMsg, zErrmsg, + "no entry point [%s] in shared library [%s]", zProc,zFile); + sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); + *pzErrMsg = sqlite3DbStrDup(0, zErrmsg); + sqlite3StackFree(db, zErrmsg); + } + sqlite3OsDlClose(pVfs, handle); } - sqlite3OsDlclose(handle); return SQLITE_ERROR; - }else if( xInit(db, &zErrmsg, &sqlite3_apis) ){ + }else if( xInit(db, &zErrmsg, &sqlite3Apis) ){ if( pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); } sqlite3_free(zErrmsg); - sqlite3OsDlclose(handle); + sqlite3OsDlClose(pVfs, handle); return SQLITE_ERROR; } /* Append the new shared library handle to the db->aExtension array. */ - db->nExtension++; - aHandle = sqliteMalloc(sizeof(handle)*db->nExtension); + aHandle = sqlite3DbMallocZero(db, sizeof(handle)*(db->nExtension+1)); if( aHandle==0 ){ return SQLITE_NOMEM; } if( db->nExtension>0 ){ - memcpy(aHandle, db->aExtension, sizeof(handle)*(db->nExtension-1)); + memcpy(aHandle, db->aExtension, sizeof(handle)*db->nExtension); } - sqliteFree(db->aExtension); + sqlite3DbFree(db, db->aExtension); db->aExtension = aHandle; - db->aExtension[db->nExtension-1] = handle; + db->aExtension[db->nExtension++] = handle; return SQLITE_OK; } +int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +){ + int rc; + sqlite3_mutex_enter(db->mutex); + rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} /* ** Call this routine when the database connection is closing in order @@ -336,10 +447,11 @@ */ void sqlite3CloseExtensions(sqlite3 *db){ int i; + assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inExtension; i++){ - sqlite3OsDlclose(db->aExtension[i]); + sqlite3OsDlClose(db->pVfs, db->aExtension[i]); } - sqliteFree(db->aExtension); + sqlite3DbFree(db, db->aExtension); } /* @@ -347,94 +459,153 @@ ** default so as not to open security holes in older applications. */ int sqlite3_enable_load_extension(sqlite3 *db, int onoff){ + sqlite3_mutex_enter(db->mutex); if( onoff ){ db->flags |= SQLITE_LoadExtension; }else{ db->flags &= ~SQLITE_LoadExtension; } + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + +/* +** The auto-extension code added regardless of whether or not extension +** loading is supported. We need a dummy sqlite3Apis pointer for that +** code if regular extension loading is not available. This is that +** dummy pointer. +*/ +#ifdef SQLITE_OMIT_LOAD_EXTENSION +static const sqlite3_api_routines sqlite3Apis = { 0 }; +#endif + + /* -** A list of automatically loaded extensions. +** The following object holds the list of automatically loaded +** extensions. ** -** This list is shared across threads, so be sure to hold the -** mutex while accessing or changing it. +** This list is shared across threads. The SQLITE_MUTEX_STATIC_MASTER +** mutex must be held while accessing this list. */ -static int nAutoExtension = 0; -static void **aAutoExtension = 0; +typedef struct sqlite3AutoExtList sqlite3AutoExtList; +static SQLITE_WSD struct sqlite3AutoExtList { + int nExt; /* Number of entries in aExt[] */ + void (**aExt)(void); /* Pointers to the extension init functions */ +} sqlite3Autoext = { 0, 0 }; + +/* The "wsdAutoext" macro will resolve to the autoextension +** state vector. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdStat can refer directly +** to the "sqlite3Autoext" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdAutoextInit \ + sqlite3AutoExtList *x = &GLOBAL(sqlite3AutoExtList,sqlite3Autoext) +# define wsdAutoext x[0] +#else +# define wsdAutoextInit +# define wsdAutoext sqlite3Autoext +#endif /* ** Register a statically linked extension that is automatically ** loaded by every new database connection. */ -int sqlite3_auto_extension(void *xInit){ - int i; +int sqlite3_auto_extension(void (*xInit)(void)){ int rc = SQLITE_OK; - sqlite3OsEnterMutex(); - for(i=0; i=nAutoExtension ){ + char *zErrmsg; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + sqlite3_mutex_enter(mutex); + if( i>=wsdAutoext.nExt ){ xInit = 0; go = 0; }else{ xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - aAutoExtension[i]; + wsdAutoext.aExt[i]; } - sqlite3OsLeaveMutex(); - if( xInit && xInit(db, &zErrmsg, &sqlite3_apis) ){ + sqlite3_mutex_leave(mutex); + zErrmsg = 0; + if( xInit && xInit(db, &zErrmsg, &sqlite3Apis) ){ sqlite3Error(db, SQLITE_ERROR, "automatic extension loading failed: %s", zErrmsg); go = 0; - rc = SQLITE_ERROR; } + sqlite3_free(zErrmsg); } - return rc; } - -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/main.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/main.c --- sqlite3-3.4.2/src/main.c 2007-08-13 18:46:00.000000000 +0100 +++ sqlite3-3.6.16/src/main.c 2009-06-26 16:14:55.000000000 +0100 @@ -14,26 +14,39 @@ ** other files are for internal use by SQLite and should not be ** accessed by users of the library. ** -** $Id: main.c,v 1.378 2007/08/13 15:28:34 danielk1977 Exp $ +** $Id: main.c,v 1.560 2009/06/26 15:14:55 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include + +#ifdef SQLITE_ENABLE_FTS3 +# include "fts3.h" +#endif +#ifdef SQLITE_ENABLE_RTREE +# include "rtree.h" +#endif +#ifdef SQLITE_ENABLE_ICU +# include "sqliteicu.h" +#endif /* ** The version of the library */ +#ifndef SQLITE_AMALGAMATION const char sqlite3_version[] = SQLITE_VERSION; +#endif const char *sqlite3_libversion(void){ return sqlite3_version; } int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; } +int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; } +#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) /* ** If the following function pointer is not NULL and if ** SQLITE_ENABLE_IOTRACE is enabled, then messages describing ** I/O active are written using this function. These messages ** are intended for debugging activity only. */ -void (*sqlite3_io_trace)(const char*, ...) = 0; +void (*sqlite3IoTrace)(const char*, ...) = 0; +#endif /* ** If the following global variable points to a string which is the @@ -44,13 +57,438 @@ */ char *sqlite3_temp_directory = 0; +/* +** Initialize SQLite. +** +** This routine must be called to initialize the memory allocation, +** VFS, and mutex subsystems prior to doing any serious work with +** SQLite. But as long as you do not compile with SQLITE_OMIT_AUTOINIT +** this routine will be called automatically by key routines such as +** sqlite3_open(). +** +** This routine is a no-op except on its very first call for the process, +** or for the first call after a call to sqlite3_shutdown. +** +** The first thread to call this routine runs the initialization to +** completion. If subsequent threads call this routine before the first +** thread has finished the initialization process, then the subsequent +** threads must block until the first thread finishes with the initialization. +** +** The first thread might call this routine recursively. Recursive +** calls to this routine should not block, of course. Otherwise the +** initialization process would never complete. +** +** Let X be the first thread to enter this routine. Let Y be some other +** thread. Then while the initial invocation of this routine by X is +** incomplete, it is required that: +** +** * Calls to this routine from Y must block until the outer-most +** call by X completes. +** +** * Recursive calls to this routine from thread X return immediately +** without blocking. +*/ +int sqlite3_initialize(void){ + sqlite3_mutex *pMaster; /* The main static mutex */ + int rc; /* Result code */ + +#ifdef SQLITE_OMIT_WSD + rc = sqlite3_wsd_init(4096, 24); + if( rc!=SQLITE_OK ){ + return rc; + } +#endif + + /* If SQLite is already completely initialized, then this call + ** to sqlite3_initialize() should be a no-op. But the initialization + ** must be complete. So isInit must not be set until the very end + ** of this routine. + */ + if( sqlite3GlobalConfig.isInit ) return SQLITE_OK; + + /* Make sure the mutex subsystem is initialized. If unable to + ** initialize the mutex subsystem, return early with the error. + ** If the system is so sick that we are unable to allocate a mutex, + ** there is not much SQLite is going to be able to do. + ** + ** The mutex subsystem must take care of serializing its own + ** initialization. + */ + rc = sqlite3MutexInit(); + if( rc ) return rc; + + /* Initialize the malloc() system and the recursive pInitMutex mutex. + ** This operation is protected by the STATIC_MASTER mutex. Note that + ** MutexAlloc() is called for a static mutex prior to initializing the + ** malloc subsystem - this implies that the allocation of a static + ** mutex must not require support from the malloc subsystem. + */ + pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(pMaster); + if( !sqlite3GlobalConfig.isMallocInit ){ + rc = sqlite3MallocInit(); + } + if( rc==SQLITE_OK ){ + sqlite3GlobalConfig.isMallocInit = 1; + if( !sqlite3GlobalConfig.pInitMutex ){ + sqlite3GlobalConfig.pInitMutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); + if( sqlite3GlobalConfig.bCoreMutex && !sqlite3GlobalConfig.pInitMutex ){ + rc = SQLITE_NOMEM; + } + } + } + if( rc==SQLITE_OK ){ + sqlite3GlobalConfig.nRefInitMutex++; + } + sqlite3_mutex_leave(pMaster); + + /* If unable to initialize the malloc subsystem, then return early. + ** There is little hope of getting SQLite to run if the malloc + ** subsystem cannot be initialized. + */ + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Do the rest of the initialization under the recursive mutex so + ** that we will be able to handle recursive calls into + ** sqlite3_initialize(). The recursive calls normally come through + ** sqlite3_os_init() when it invokes sqlite3_vfs_register(), but other + ** recursive calls might also be possible. + */ + sqlite3_mutex_enter(sqlite3GlobalConfig.pInitMutex); + if( sqlite3GlobalConfig.isInit==0 && sqlite3GlobalConfig.inProgress==0 ){ + FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); + sqlite3GlobalConfig.inProgress = 1; + memset(pHash, 0, sizeof(sqlite3GlobalFunctions)); + sqlite3RegisterGlobalFunctions(); + rc = sqlite3PcacheInitialize(); + if( rc==SQLITE_OK ){ + rc = sqlite3_os_init(); + } + if( rc==SQLITE_OK ){ + sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, + sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); + sqlite3GlobalConfig.isInit = 1; + } + sqlite3GlobalConfig.inProgress = 0; + } + sqlite3_mutex_leave(sqlite3GlobalConfig.pInitMutex); + + /* Go back under the static mutex and clean up the recursive + ** mutex to prevent a resource leak. + */ + sqlite3_mutex_enter(pMaster); + sqlite3GlobalConfig.nRefInitMutex--; + if( sqlite3GlobalConfig.nRefInitMutex<=0 ){ + assert( sqlite3GlobalConfig.nRefInitMutex==0 ); + sqlite3_mutex_free(sqlite3GlobalConfig.pInitMutex); + sqlite3GlobalConfig.pInitMutex = 0; + } + sqlite3_mutex_leave(pMaster); + + /* The following is just a sanity check to make sure SQLite has + ** been compiled correctly. It is important to run this code, but + ** we don't want to run it too often and soak up CPU cycles for no + ** reason. So we run it once during initialization. + */ +#ifndef NDEBUG +#ifndef SQLITE_OMIT_FLOATING_POINT + /* This section of code's only "output" is via assert() statements. */ + if ( rc==SQLITE_OK ){ + u64 x = (((u64)1)<<63)-1; + double y; + assert(sizeof(x)==8); + assert(sizeof(x)==sizeof(y)); + memcpy(&y, &x, 8); + assert( sqlite3IsNaN(y) ); + } +#endif +#endif + + return rc; +} + +/* +** Undo the effects of sqlite3_initialize(). Must not be called while +** there are outstanding database connections or memory allocations or +** while any part of SQLite is otherwise in use in any thread. This +** routine is not threadsafe. But it is safe to invoke this routine +** on when SQLite is already shut down. If SQLite is already shut down +** when this routine is invoked, then this routine is a harmless no-op. +*/ +int sqlite3_shutdown(void){ + if( sqlite3GlobalConfig.isInit ){ + sqlite3GlobalConfig.isMallocInit = 0; + sqlite3PcacheShutdown(); + sqlite3_os_end(); + sqlite3_reset_auto_extension(); + sqlite3MallocEnd(); + sqlite3MutexEnd(); + sqlite3GlobalConfig.isInit = 0; + } + return SQLITE_OK; +} + +/* +** This API allows applications to modify the global configuration of +** the SQLite library at run-time. +** +** This routine should only be called when there are no outstanding +** database connections or memory allocations. This routine is not +** threadsafe. Failure to heed these warnings can lead to unpredictable +** behavior. +*/ +int sqlite3_config(int op, ...){ + va_list ap; + int rc = SQLITE_OK; + + /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while + ** the SQLite library is in use. */ + if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE; + + va_start(ap, op); + switch( op ){ + + /* Mutex configuration options are only available in a threadsafe + ** compile. + */ +#if SQLITE_THREADSAFE + case SQLITE_CONFIG_SINGLETHREAD: { + /* Disable all mutexing */ + sqlite3GlobalConfig.bCoreMutex = 0; + sqlite3GlobalConfig.bFullMutex = 0; + break; + } + case SQLITE_CONFIG_MULTITHREAD: { + /* Disable mutexing of database connections */ + /* Enable mutexing of core data structures */ + sqlite3GlobalConfig.bCoreMutex = 1; + sqlite3GlobalConfig.bFullMutex = 0; + break; + } + case SQLITE_CONFIG_SERIALIZED: { + /* Enable all mutexing */ + sqlite3GlobalConfig.bCoreMutex = 1; + sqlite3GlobalConfig.bFullMutex = 1; + break; + } + case SQLITE_CONFIG_MUTEX: { + /* Specify an alternative mutex implementation */ + sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*); + break; + } + case SQLITE_CONFIG_GETMUTEX: { + /* Retrieve the current mutex implementation */ + *va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex; + break; + } +#endif + + + case SQLITE_CONFIG_MALLOC: { + /* Specify an alternative malloc implementation */ + sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*); + break; + } + case SQLITE_CONFIG_GETMALLOC: { + /* Retrieve the current malloc() implementation */ + if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault(); + *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m; + break; + } + case SQLITE_CONFIG_MEMSTATUS: { + /* Enable or disable the malloc status collection */ + sqlite3GlobalConfig.bMemstat = va_arg(ap, int); + break; + } + case SQLITE_CONFIG_SCRATCH: { + /* Designate a buffer for scratch memory space */ + sqlite3GlobalConfig.pScratch = va_arg(ap, void*); + sqlite3GlobalConfig.szScratch = va_arg(ap, int); + sqlite3GlobalConfig.nScratch = va_arg(ap, int); + break; + } + case SQLITE_CONFIG_PAGECACHE: { + /* Designate a buffer for page cache memory space */ + sqlite3GlobalConfig.pPage = va_arg(ap, void*); + sqlite3GlobalConfig.szPage = va_arg(ap, int); + sqlite3GlobalConfig.nPage = va_arg(ap, int); + break; + } + + case SQLITE_CONFIG_PCACHE: { + /* Specify an alternative page cache implementation */ + sqlite3GlobalConfig.pcache = *va_arg(ap, sqlite3_pcache_methods*); + break; + } + + case SQLITE_CONFIG_GETPCACHE: { + if( sqlite3GlobalConfig.pcache.xInit==0 ){ + sqlite3PCacheSetDefault(); + } + *va_arg(ap, sqlite3_pcache_methods*) = sqlite3GlobalConfig.pcache; + break; + } + +#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5) + case SQLITE_CONFIG_HEAP: { + /* Designate a buffer for heap memory space */ + sqlite3GlobalConfig.pHeap = va_arg(ap, void*); + sqlite3GlobalConfig.nHeap = va_arg(ap, int); + sqlite3GlobalConfig.mnReq = va_arg(ap, int); + + if( sqlite3GlobalConfig.pHeap==0 ){ + /* If the heap pointer is NULL, then restore the malloc implementation + ** back to NULL pointers too. This will cause the malloc to go + ** back to its default implementation when sqlite3_initialize() is + ** run. + */ + memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m)); + }else{ + /* The heap pointer is not NULL, then install one of the + ** mem5.c/mem3.c methods. If neither ENABLE_MEMSYS3 nor + ** ENABLE_MEMSYS5 is defined, return an error. + */ +#ifdef SQLITE_ENABLE_MEMSYS3 + sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3(); +#endif +#ifdef SQLITE_ENABLE_MEMSYS5 + sqlite3GlobalConfig.m = *sqlite3MemGetMemsys5(); +#endif + } + break; + } +#endif + + case SQLITE_CONFIG_LOOKASIDE: { + sqlite3GlobalConfig.szLookaside = va_arg(ap, int); + sqlite3GlobalConfig.nLookaside = va_arg(ap, int); + break; + } + + default: { + rc = SQLITE_ERROR; + break; + } + } + va_end(ap); + return rc; +} + +/* +** Set up the lookaside buffers for a database connection. +** Return SQLITE_OK on success. +** If lookaside is already active, return SQLITE_BUSY. +** +** The sz parameter is the number of bytes in each lookaside slot. +** The cnt parameter is the number of slots. If pStart is NULL the +** space for the lookaside memory is obtained from sqlite3_malloc(). +** If pStart is not NULL then it is sz*cnt bytes of memory to use for +** the lookaside memory. +*/ +static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ + void *pStart; + if( db->lookaside.nOut ){ + return SQLITE_BUSY; + } + /* Free any existing lookaside buffer for this handle before + ** allocating a new one so we don't have to have space for + ** both at the same time. + */ + if( db->lookaside.bMalloced ){ + sqlite3_free(db->lookaside.pStart); + } + /* The size of a lookaside slot needs to be larger than a pointer + ** to be useful. + */ + if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; + if( cnt<0 ) cnt = 0; + if( sz==0 || cnt==0 ){ + sz = 0; + pStart = 0; + }else if( pBuf==0 ){ + sz = ROUND8(sz); + sqlite3BeginBenignMalloc(); + pStart = sqlite3Malloc( sz*cnt ); + sqlite3EndBenignMalloc(); + }else{ + sz = ROUNDDOWN8(sz); + pStart = pBuf; + } + db->lookaside.pStart = pStart; + db->lookaside.pFree = 0; + db->lookaside.sz = (u16)sz; + if( pStart ){ + int i; + LookasideSlot *p; + assert( sz > (int)sizeof(LookasideSlot*) ); + p = (LookasideSlot*)pStart; + for(i=cnt-1; i>=0; i--){ + p->pNext = db->lookaside.pFree; + db->lookaside.pFree = p; + p = (LookasideSlot*)&((u8*)p)[sz]; + } + db->lookaside.pEnd = p; + db->lookaside.bEnabled = 1; + db->lookaside.bMalloced = pBuf==0 ?1:0; + }else{ + db->lookaside.pEnd = 0; + db->lookaside.bEnabled = 0; + db->lookaside.bMalloced = 0; + } + return SQLITE_OK; +} + +/* +** Return the mutex associated with a database connection. +*/ +sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){ + return db->mutex; +} + +/* +** Configuration settings for an individual database connection +*/ +int sqlite3_db_config(sqlite3 *db, int op, ...){ + va_list ap; + int rc; + va_start(ap, op); + switch( op ){ + case SQLITE_DBCONFIG_LOOKASIDE: { + void *pBuf = va_arg(ap, void*); + int sz = va_arg(ap, int); + int cnt = va_arg(ap, int); + rc = setupLookaside(db, pBuf, sz, cnt); + break; + } + default: { + rc = SQLITE_ERROR; + break; + } + } + va_end(ap); + return rc; +} + + +/* +** Return true if the buffer z[0..n-1] contains all spaces. +*/ +static int allSpaces(const char *z, int n){ + while( n>0 && z[n-1]==' ' ){ n--; } + return n==0; +} /* ** This is the default collating function named "BINARY" which is always ** available. +** +** If the padFlag argument is not NULL then space padding at the end +** of strings is ignored. This implements the RTRIM collation. */ static int binCollFunc( - void *NotUsed, + void *padFlag, int nKey1, const void *pKey1, int nKey2, const void *pKey2 ){ @@ -58,7 +496,14 @@ n = nKey1pSavepoint ){ + Savepoint *pTmp = db->pSavepoint; + db->pSavepoint = pTmp->pNext; + sqlite3DbFree(db, pTmp); + } + db->nSavepoint = 0; + db->nStatement = 0; + db->isTransactionSavepoint = 0; +} + +/* ** Close an existing SQLite database */ int sqlite3_close(sqlite3 *db){ @@ -116,16 +578,10 @@ if( !db ){ return SQLITE_OK; } - if( sqlite3SafetyCheck(db) ){ + if( !sqlite3SafetyCheckSickOrOk(db) ){ return SQLITE_MISUSE; } - -#ifdef SQLITE_SSE - { - extern void sqlite3SseCleanup(sqlite3*); - sqlite3SseCleanup(db); - } -#endif + sqlite3_mutex_enter(db->mutex); sqlite3ResetInternalSchema(db, 0); @@ -141,24 +597,25 @@ /* If there are any outstanding VMs, return SQLITE_BUSY. */ if( db->pVdbe ){ sqlite3Error(db, SQLITE_BUSY, - "Unable to close due to unfinalised statements"); + "unable to close due to unfinalised statements"); + sqlite3_mutex_leave(db->mutex); return SQLITE_BUSY; } - assert( !sqlite3SafetyCheck(db) ); + assert( sqlite3SafetyCheckSickOrOk(db) ); - /* FIX ME: db->magic may be set to SQLITE_MAGIC_CLOSED if the database - ** cannot be opened for some reason. So this routine needs to run in - ** that case. But maybe there should be an extra magic value for the - ** "failed to open" state. - ** - ** TODO: Coverage tests do not test the case where this condition is - ** true. It's hard to see how to cause it without messing with threads. - */ - if( db->magic!=SQLITE_MAGIC_CLOSED && sqlite3SafetyOn(db) ){ - /* printf("DID NOT CLOSE\n"); fflush(stdout); */ - return SQLITE_ERROR; + for(j=0; jnDb; j++){ + Btree *pBt = db->aDb[j].pBt; + if( pBt && sqlite3BtreeIsInBackup(pBt) ){ + sqlite3Error(db, SQLITE_BUSY, + "unable to close due to unfinished backup operation"); + sqlite3_mutex_leave(db->mutex); + return SQLITE_BUSY; + } } + /* Free any outstanding Savepoint structures. */ + sqlite3CloseSavepoints(db); + for(j=0; jnDb; j++){ struct Db *pDb = &db->aDb[j]; if( pDb->pBt ){ @@ -170,16 +627,25 @@ } } sqlite3ResetInternalSchema(db, 0); + + /* Tell the code in notify.c that the connection no longer holds any + ** locks and does not require any further unlock-notify callbacks. + */ + sqlite3ConnectionClosed(db); + assert( db->nDb<=2 ); assert( db->aDb==db->aDbStatic ); - for(i=sqliteHashFirst(&db->aFunc); i; i=sqliteHashNext(i)){ - FuncDef *pFunc, *pNext; - for(pFunc = (FuncDef*)sqliteHashData(i); pFunc; pFunc=pNext){ - pNext = pFunc->pNext; - sqliteFree(pFunc); + for(j=0; jaFunc.a); j++){ + FuncDef *pNext, *pHash, *p; + for(p=db->aFunc.a[j]; p; p=pHash){ + pHash = p->pHash; + while( p ){ + pNext = p->pNext; + sqlite3DbFree(db, p); + p = pNext; + } } } - for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){ CollSeq *pColl = (CollSeq *)sqliteHashData(i); /* Invoke any destructors registered for collation sequence user data. */ @@ -188,7 +654,7 @@ pColl[j].xDel(pColl[j].pUser); } } - sqliteFree(pColl); + sqlite3DbFree(db, pColl); } sqlite3HashClear(&db->aCollSeq); #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -197,12 +663,11 @@ if( pMod->xDestroy ){ pMod->xDestroy(pMod->pAux); } - sqliteFree(pMod); + sqlite3DbFree(db, pMod); } sqlite3HashClear(&db->aModule); #endif - sqlite3HashClear(&db->aFunc); sqlite3Error(db, SQLITE_OK, 0); /* Deallocates any cached error strings. */ if( db->pErr ){ sqlite3ValueFree(db->pErr); @@ -217,9 +682,15 @@ ** the same sqliteMalloc() as the one that allocates the database ** structure? */ - sqliteFree(db->aDb[1].pSchema); - sqliteFree(db); - sqlite3ReleaseThreadData(); + sqlite3DbFree(db, db->aDb[1].pSchema); + sqlite3_mutex_leave(db->mutex); + db->magic = SQLITE_MAGIC_CLOSED; + sqlite3_mutex_free(db->mutex); + assert( db->lookaside.nOut==0 ); /* Fails on a lookaside memory leak */ + if( db->lookaside.bMalloced ){ + sqlite3_free(db->lookaside.pStart); + } + sqlite3_free(db); return SQLITE_OK; } @@ -229,6 +700,8 @@ void sqlite3RollbackAll(sqlite3 *db){ int i; int inTrans = 0; + assert( sqlite3_mutex_held(db->mutex) ); + sqlite3BeginBenignMalloc(); for(i=0; inDb; i++){ if( db->aDb[i].pBt ){ if( sqlite3BtreeIsInTrans(db->aDb[i].pBt) ){ @@ -239,6 +712,8 @@ } } sqlite3VtabRollback(db); + sqlite3EndBenignMalloc(); + if( db->flags&SQLITE_InternChanges ){ sqlite3ExpirePreparedStatements(db); sqlite3ResetInternalSchema(db, 0); @@ -255,37 +730,41 @@ ** argument. */ const char *sqlite3ErrStr(int rc){ - const char *z; - switch( rc & 0xff ){ - case SQLITE_ROW: - case SQLITE_DONE: - case SQLITE_OK: z = "not an error"; break; - case SQLITE_ERROR: z = "SQL logic error or missing database"; break; - case SQLITE_PERM: z = "access permission denied"; break; - case SQLITE_ABORT: z = "callback requested query abort"; break; - case SQLITE_BUSY: z = "database is locked"; break; - case SQLITE_LOCKED: z = "database table is locked"; break; - case SQLITE_NOMEM: z = "out of memory"; break; - case SQLITE_READONLY: z = "attempt to write a readonly database"; break; - case SQLITE_INTERRUPT: z = "interrupted"; break; - case SQLITE_IOERR: z = "disk I/O error"; break; - case SQLITE_CORRUPT: z = "database disk image is malformed"; break; - case SQLITE_FULL: z = "database or disk is full"; break; - case SQLITE_CANTOPEN: z = "unable to open database file"; break; - case SQLITE_EMPTY: z = "table contains no data"; break; - case SQLITE_SCHEMA: z = "database schema has changed"; break; - case SQLITE_TOOBIG: z = "String or BLOB exceeded size limit"; break; - case SQLITE_CONSTRAINT: z = "constraint failed"; break; - case SQLITE_MISMATCH: z = "datatype mismatch"; break; - case SQLITE_MISUSE: z = "library routine called out of sequence";break; - case SQLITE_NOLFS: z = "kernel lacks large file support"; break; - case SQLITE_AUTH: z = "authorization denied"; break; - case SQLITE_FORMAT: z = "auxiliary database format error"; break; - case SQLITE_RANGE: z = "bind or column index out of range"; break; - case SQLITE_NOTADB: z = "file is encrypted or is not a database";break; - default: z = "unknown error"; break; + static const char* const aMsg[] = { + /* SQLITE_OK */ "not an error", + /* SQLITE_ERROR */ "SQL logic error or missing database", + /* SQLITE_INTERNAL */ 0, + /* SQLITE_PERM */ "access permission denied", + /* SQLITE_ABORT */ "callback requested query abort", + /* SQLITE_BUSY */ "database is locked", + /* SQLITE_LOCKED */ "database table is locked", + /* SQLITE_NOMEM */ "out of memory", + /* SQLITE_READONLY */ "attempt to write a readonly database", + /* SQLITE_INTERRUPT */ "interrupted", + /* SQLITE_IOERR */ "disk I/O error", + /* SQLITE_CORRUPT */ "database disk image is malformed", + /* SQLITE_NOTFOUND */ 0, + /* SQLITE_FULL */ "database or disk is full", + /* SQLITE_CANTOPEN */ "unable to open database file", + /* SQLITE_PROTOCOL */ 0, + /* SQLITE_EMPTY */ "table contains no data", + /* SQLITE_SCHEMA */ "database schema has changed", + /* SQLITE_TOOBIG */ "string or blob too big", + /* SQLITE_CONSTRAINT */ "constraint failed", + /* SQLITE_MISMATCH */ "datatype mismatch", + /* SQLITE_MISUSE */ "library routine called out of sequence", + /* SQLITE_NOLFS */ "large file support is disabled", + /* SQLITE_AUTH */ "authorization denied", + /* SQLITE_FORMAT */ "auxiliary database format error", + /* SQLITE_RANGE */ "bind or column index out of range", + /* SQLITE_NOTADB */ "file is encrypted or is not a database", + }; + rc &= 0xff; + if( ALWAYS(rc>=0) && rc<(int)(sizeof(aMsg)/sizeof(aMsg[0])) && aMsg[rc]!=0 ){ + return aMsg[rc]; + }else{ + return "unknown error"; } - return z; } /* @@ -298,13 +777,14 @@ void *ptr, /* Database connection */ int count /* Number of times table has been busy */ ){ -#if OS_WIN || (defined(HAVE_USLEEP) && HAVE_USLEEP) +#if SQLITE_OS_WIN || (defined(HAVE_USLEEP) && HAVE_USLEEP) static const u8 delays[] = { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 }; static const u8 totals[] = { 0, 1, 3, 8, 18, 33, 53, 78, 103, 128, 178, 228 }; # define NDELAY (sizeof(delays)/sizeof(delays[0])) - int timeout = ((sqlite3 *)ptr)->busyTimeout; + sqlite3 *db = (sqlite3 *)ptr; + int timeout = db->busyTimeout; int delay, prior; assert( count>=0 ); @@ -319,14 +799,15 @@ delay = timeout - prior; if( delay<=0 ) return 0; } - sqlite3OsSleep(delay); + sqlite3OsSleep(db->pVfs, delay*1000); return 1; #else + sqlite3 *db = (sqlite3 *)ptr; int timeout = ((sqlite3 *)ptr)->busyTimeout; if( (count+1)*1000 > timeout ){ return 0; } - sqlite3OsSleep(1000); + sqlite3OsSleep(db->pVfs, 1000000); return 1; #endif } @@ -340,7 +821,7 @@ */ int sqlite3InvokeBusyHandler(BusyHandler *p){ int rc; - if( p==0 || p->xFunc==0 || p->nBusy<0 ) return 0; + if( NEVER(p==0) || p->xFunc==0 || p->nBusy<0 ) return 0; rc = p->xFunc(p->pArg, p->nBusy); if( rc==0 ){ p->nBusy = -1; @@ -359,12 +840,11 @@ int (*xBusy)(void*,int), void *pArg ){ - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } + sqlite3_mutex_enter(db->mutex); db->busyHandler.xFunc = xBusy; db->busyHandler.pArg = pArg; db->busyHandler.nBusy = 0; + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -380,17 +860,17 @@ int (*xProgress)(void*), void *pArg ){ - if( !sqlite3SafetyCheck(db) ){ - if( nOps>0 ){ - db->xProgress = xProgress; - db->nProgressOps = nOps; - db->pProgressArg = pArg; - }else{ - db->xProgress = 0; - db->nProgressOps = 0; - db->pProgressArg = 0; - } + sqlite3_mutex_enter(db->mutex); + if( nOps>0 ){ + db->xProgress = xProgress; + db->nProgressOps = nOps; + db->pProgressArg = pArg; + }else{ + db->xProgress = 0; + db->nProgressOps = 0; + db->pProgressArg = 0; } + sqlite3_mutex_leave(db->mutex); } #endif @@ -400,9 +880,6 @@ ** specified number of milliseconds before returning 0. */ int sqlite3_busy_timeout(sqlite3 *db, int ms){ - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } if( ms>0 ){ db->busyTimeout = ms; sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); @@ -416,35 +893,9 @@ ** Cause any pending operation to stop at its earliest opportunity. */ void sqlite3_interrupt(sqlite3 *db){ - if( db && (db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_BUSY) ){ - db->u1.isInterrupted = 1; - } + db->u1.isInterrupted = 1; } -/* -** Memory allocation routines that use SQLites internal memory -** memory allocator. Depending on how SQLite is compiled, the -** internal memory allocator might be just an alias for the -** system default malloc/realloc/free. Or the built-in allocator -** might do extra stuff like put sentinals around buffers to -** check for overruns or look for memory leaks. -** -** Use sqlite3_free() to free memory returned by sqlite3_mprintf(). -*/ -void sqlite3_free(void *p){ if( p ) sqlite3OsFree(p); } -void *sqlite3_malloc(int nByte){ return nByte>0 ? sqlite3OsMalloc(nByte) : 0; } -void *sqlite3_realloc(void *pOld, int nByte){ - if( pOld ){ - if( nByte>0 ){ - return sqlite3OsRealloc(pOld, nByte); - }else{ - sqlite3OsFree(pOld); - return 0; - } - }else{ - return sqlite3_malloc(nByte); - } -} /* ** This function is exactly the same as sqlite3_create_function(), except @@ -465,17 +916,14 @@ FuncDef *p; int nName; - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } + assert( sqlite3_mutex_held(db->mutex) ); if( zFunctionName==0 || (xFunc && (xFinal || xStep)) || (!xFunc && (xFinal && !xStep)) || (!xFunc && (!xFinal && xStep)) || - (nArg<-1 || nArg>127) || - (255<(nName = strlen(zFunctionName))) ){ - sqlite3Error(db, SQLITE_ERROR, "bad parameters"); - return SQLITE_ERROR; + (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) || + (255<(nName = sqlite3Strlen30( zFunctionName))) ){ + return SQLITE_MISUSE; } #ifndef SQLITE_OMIT_UTF16 @@ -492,10 +940,13 @@ int rc; rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8, pUserData, xFunc, xStep, xFinal); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE, - pUserData, xFunc, xStep, xFinal); - if( rc!=SQLITE_OK ) return rc; + if( rc==SQLITE_OK ){ + rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE, + pUserData, xFunc, xStep, xFinal); + } + if( rc!=SQLITE_OK ){ + return rc; + } enc = SQLITE_UTF16BE; } #else @@ -507,27 +958,29 @@ ** is being overridden/deleted but there are no active VMs, allow the ** operation to continue but invalidate all precompiled statements. */ - p = sqlite3FindFunction(db, zFunctionName, nName, nArg, enc, 0); + p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 0); if( p && p->iPrefEnc==enc && p->nArg==nArg ){ if( db->activeVdbeCnt ){ sqlite3Error(db, SQLITE_BUSY, - "Unable to delete/modify user-function due to active statements"); - assert( !sqlite3MallocFailed() ); + "unable to delete/modify user-function due to active statements"); + assert( !db->mallocFailed ); return SQLITE_BUSY; }else{ sqlite3ExpirePreparedStatements(db); } } - p = sqlite3FindFunction(db, zFunctionName, nName, nArg, enc, 1); - if( p ){ - p->flags = 0; - p->xFunc = xFunc; - p->xStep = xStep; - p->xFinalize = xFinal; - p->pUserData = pUserData; - p->nArg = nArg; + p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 1); + assert(p || db->mallocFailed); + if( !p ){ + return SQLITE_NOMEM; } + p->flags = 0; + p->xFunc = xFunc; + p->xStep = xStep; + p->xFinalize = xFinal; + p->pUserData = pUserData; + p->nArg = (u16)nArg; return SQLITE_OK; } @@ -545,10 +998,11 @@ void (*xFinal)(sqlite3_context*) ){ int rc; - assert( !sqlite3MallocFailed() ); + sqlite3_mutex_enter(db->mutex); rc = sqlite3CreateFunc(db, zFunctionName, nArg, enc, p, xFunc, xStep, xFinal); - - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } #ifndef SQLITE_OMIT_UTF16 @@ -564,13 +1018,14 @@ ){ int rc; char *zFunc8; - assert( !sqlite3MallocFailed() ); - - zFunc8 = sqlite3Utf16to8(zFunctionName, -1); + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1); rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xFunc, xStep, xFinal); - sqliteFree(zFunc8); - - return sqlite3ApiExit(db, rc); + sqlite3DbFree(db, zFunc8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } #endif @@ -592,12 +1047,16 @@ const char *zName, int nArg ){ - int nName = strlen(zName); + int nName = sqlite3Strlen30(zName); + int rc; + sqlite3_mutex_enter(db->mutex); if( sqlite3FindFunction(db, zName, nName, nArg, SQLITE_UTF8, 0)==0 ){ sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8, 0, sqlite3InvalidFunction, 0, 0); } - return sqlite3ApiExit(db, SQLITE_OK); + rc = sqlite3ApiExit(db, SQLITE_OK); + sqlite3_mutex_leave(db->mutex); + return rc; } #ifndef SQLITE_OMIT_TRACE @@ -610,9 +1069,12 @@ ** SQL statement. */ void *sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){ - void *pOld = db->pTraceArg; + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pTraceArg; db->xTrace = xTrace; db->pTraceArg = pArg; + sqlite3_mutex_leave(db->mutex); return pOld; } /* @@ -628,9 +1090,12 @@ void (*xProfile)(void*,const char*,sqlite_uint64), void *pArg ){ - void *pOld = db->pProfileArg; + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pProfileArg; db->xProfile = xProfile; db->pProfileArg = pArg; + sqlite3_mutex_leave(db->mutex); return pOld; } #endif /* SQLITE_OMIT_TRACE */ @@ -646,9 +1111,12 @@ int (*xCallback)(void*), /* Function to invoke on each commit */ void *pArg /* Argument to the function */ ){ - void *pOld = db->pCommitArg; + void *pOld; + sqlite3_mutex_enter(db->mutex); + pOld = db->pCommitArg; db->xCommitCallback = xCallback; db->pCommitArg = pArg; + sqlite3_mutex_leave(db->mutex); return pOld; } @@ -661,9 +1129,12 @@ void (*xCallback)(void*,int,char const *,char const *,sqlite_int64), void *pArg /* Argument to the function */ ){ - void *pRet = db->pUpdateArg; + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pUpdateArg; db->xUpdateCallback = xCallback; db->pUpdateArg = pArg; + sqlite3_mutex_leave(db->mutex); return pRet; } @@ -676,13 +1147,50 @@ void (*xCallback)(void*), /* Callback function */ void *pArg /* Argument to the function */ ){ - void *pRet = db->pRollbackArg; + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pRollbackArg; db->xRollbackCallback = xCallback; db->pRollbackArg = pArg; + sqlite3_mutex_leave(db->mutex); return pRet; } /* +** This function returns true if main-memory should be used instead of +** a temporary file for transient pager files and statement journals. +** The value returned depends on the value of db->temp_store (runtime +** parameter) and the compile time value of SQLITE_TEMP_STORE. The +** following table describes the relationship between these two values +** and this functions return value. +** +** SQLITE_TEMP_STORE db->temp_store Location of temporary database +** ----------------- -------------- ------------------------------ +** 0 any file (return 0) +** 1 1 file (return 0) +** 1 2 memory (return 1) +** 1 0 file (return 0) +** 2 1 file (return 0) +** 2 2 memory (return 1) +** 2 0 memory (return 1) +** 3 any memory (return 1) +*/ +int sqlite3TempInMemory(const sqlite3 *db){ +#if SQLITE_TEMP_STORE==1 + return ( db->temp_store==2 ); +#endif +#if SQLITE_TEMP_STORE==2 + return ( db->temp_store!=1 ); +#endif +#if SQLITE_TEMP_STORE==3 + return 1; +#endif +#if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3 + return 0; +#endif +} + +/* ** This routine is called to create a connection to a database BTree ** driver. If zFilename is the name of a file, then that file is ** opened and used. If zFilename is the magic name ":memory:" then @@ -692,58 +1200,45 @@ ** soon as the connection is closed. ** ** A virtual database can be either a disk file (that is automatically -** deleted when the file is closed) or it an be held entirely in memory, -** depending on the values of the TEMP_STORE compile-time macro and the -** db->temp_store variable, according to the following chart: -** -** TEMP_STORE db->temp_store Location of temporary database -** ---------- -------------- ------------------------------ -** 0 any file -** 1 1 file -** 1 2 memory -** 1 0 file -** 2 1 file -** 2 2 memory -** 2 0 memory -** 3 any memory +** deleted when the file is closed) or it an be held entirely in memory. +** The sqlite3TempInMemory() function is used to determine which. */ int sqlite3BtreeFactory( const sqlite3 *db, /* Main database when opening aux otherwise 0 */ const char *zFilename, /* Name of the file containing the BTree database */ int omitJournal, /* if TRUE then do not journal this file */ int nCache, /* How many pages in the page cache */ + int vfsFlags, /* Flags passed through to vfsOpen */ Btree **ppBtree /* Pointer to new Btree object written here */ ){ - int btree_flags = 0; + int btFlags = 0; int rc; + assert( sqlite3_mutex_held(db->mutex) ); assert( ppBtree != 0); if( omitJournal ){ - btree_flags |= BTREE_OMIT_JOURNAL; + btFlags |= BTREE_OMIT_JOURNAL; } if( db->flags & SQLITE_NoReadlock ){ - btree_flags |= BTREE_NO_READLOCK; + btFlags |= BTREE_NO_READLOCK; } - if( zFilename==0 ){ -#if TEMP_STORE==0 - /* Do nothing */ -#endif #ifndef SQLITE_OMIT_MEMORYDB -#if TEMP_STORE==1 - if( db->temp_store==2 ) zFilename = ":memory:"; -#endif -#if TEMP_STORE==2 - if( db->temp_store!=1 ) zFilename = ":memory:"; -#endif -#if TEMP_STORE==3 + if( zFilename==0 && sqlite3TempInMemory(db) ){ zFilename = ":memory:"; + } #endif -#endif /* SQLITE_OMIT_MEMORYDB */ + + if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (zFilename==0 || *zFilename==0) ){ + vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; } + rc = sqlite3BtreeOpen(zFilename, (sqlite3 *)db, ppBtree, btFlags, vfsFlags); - rc = sqlite3BtreeOpen(zFilename, (sqlite3 *)db, ppBtree, btree_flags); - if( rc==SQLITE_OK ){ - sqlite3BtreeSetBusyHandler(*ppBtree, (void*)&db->busyHandler); + /* If the B-Tree was successfully opened, set the pager-cache size to the + ** default value. Except, if the call to BtreeOpen() returned a handle + ** open on an existing shared pager-cache, do not change the pager-cache + ** size. + */ + if( rc==SQLITE_OK && 0==sqlite3BtreeSchema(*ppBtree, 0, 0) ){ sqlite3BtreeSetCacheSize(*ppBtree, nCache); } return rc; @@ -755,17 +1250,23 @@ */ const char *sqlite3_errmsg(sqlite3 *db){ const char *z; - assert( !sqlite3MallocFailed() ); if( !db ){ return sqlite3ErrStr(SQLITE_NOMEM); } - if( sqlite3SafetyCheck(db) || db->errCode==SQLITE_MISUSE ){ + if( !sqlite3SafetyCheckSickOrOk(db) ){ return sqlite3ErrStr(SQLITE_MISUSE); } - z = (char*)sqlite3_value_text(db->pErr); - if( z==0 ){ - z = sqlite3ErrStr(db->errCode); + sqlite3_mutex_enter(db->mutex); + if( db->mallocFailed ){ + z = sqlite3ErrStr(SQLITE_NOMEM); + }else{ + z = (char*)sqlite3_value_text(db->pErr); + assert( !db->mallocFailed ); + if( z==0 ){ + z = sqlite3ErrStr(db->errCode); + } } + sqlite3_mutex_leave(db->mutex); return z; } @@ -775,40 +1276,43 @@ ** error. */ const void *sqlite3_errmsg16(sqlite3 *db){ - /* Because all the characters in the string are in the unicode - ** range 0x00-0xFF, if we pad the big-endian string with a - ** zero byte, we can obtain the little-endian string with - ** &big_endian[1]. - */ - static const char outOfMemBe[] = { - 0, 'o', 0, 'u', 0, 't', 0, ' ', - 0, 'o', 0, 'f', 0, ' ', - 0, 'm', 0, 'e', 0, 'm', 0, 'o', 0, 'r', 0, 'y', 0, 0, 0 + static const u16 outOfMem[] = { + 'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0 }; - static const char misuseBe [] = { - 0, 'l', 0, 'i', 0, 'b', 0, 'r', 0, 'a', 0, 'r', 0, 'y', 0, ' ', - 0, 'r', 0, 'o', 0, 'u', 0, 't', 0, 'i', 0, 'n', 0, 'e', 0, ' ', - 0, 'c', 0, 'a', 0, 'l', 0, 'l', 0, 'e', 0, 'd', 0, ' ', - 0, 'o', 0, 'u', 0, 't', 0, ' ', - 0, 'o', 0, 'f', 0, ' ', - 0, 's', 0, 'e', 0, 'q', 0, 'u', 0, 'e', 0, 'n', 0, 'c', 0, 'e', 0, 0, 0 + static const u16 misuse[] = { + 'l', 'i', 'b', 'r', 'a', 'r', 'y', ' ', + 'r', 'o', 'u', 't', 'i', 'n', 'e', ' ', + 'c', 'a', 'l', 'l', 'e', 'd', ' ', + 'o', 'u', 't', ' ', + 'o', 'f', ' ', + 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', 0 }; const void *z; - assert( !sqlite3MallocFailed() ); if( !db ){ - return (void *)(&outOfMemBe[SQLITE_UTF16NATIVE==SQLITE_UTF16LE?1:0]); + return (void *)outOfMem; } - if( sqlite3SafetyCheck(db) || db->errCode==SQLITE_MISUSE ){ - return (void *)(&misuseBe[SQLITE_UTF16NATIVE==SQLITE_UTF16LE?1:0]); + if( !sqlite3SafetyCheckSickOrOk(db) ){ + return (void *)misuse; } - z = sqlite3_value_text16(db->pErr); - if( z==0 ){ - sqlite3ValueSetStr(db->pErr, -1, sqlite3ErrStr(db->errCode), - SQLITE_UTF8, SQLITE_STATIC); + sqlite3_mutex_enter(db->mutex); + if( db->mallocFailed ){ + z = (void *)outOfMem; + }else{ z = sqlite3_value_text16(db->pErr); + if( z==0 ){ + sqlite3ValueSetStr(db->pErr, -1, sqlite3ErrStr(db->errCode), + SQLITE_UTF8, SQLITE_STATIC); + z = sqlite3_value_text16(db->pErr); + } + /* A malloc() may have failed within the call to sqlite3_value_text16() + ** above. If this is the case, then the db->mallocFailed flag needs to + ** be cleared before returning. Do this directly, instead of via + ** sqlite3ApiExit(), to avoid setting the database handle error message. + */ + db->mallocFailed = 0; } - sqlite3ApiExit(0, 0); + sqlite3_mutex_leave(db->mutex); return z; } #endif /* SQLITE_OMIT_UTF16 */ @@ -818,13 +1322,22 @@ ** passed to this function, we assume a malloc() failed during sqlite3_open(). */ int sqlite3_errcode(sqlite3 *db){ - if( !db || sqlite3MallocFailed() ){ + if( db && !sqlite3SafetyCheckSickOrOk(db) ){ + return SQLITE_MISUSE; + } + if( !db || db->mallocFailed ){ return SQLITE_NOMEM; } - if( sqlite3SafetyCheck(db) ){ + return db->errCode & db->errMask; +} +int sqlite3_extended_errcode(sqlite3 *db){ + if( db && !sqlite3SafetyCheckSickOrOk(db) ){ return SQLITE_MISUSE; } - return db->errCode & db->errMask; + if( !db || db->mallocFailed ){ + return SQLITE_NOMEM; + } + return db->errCode; } /* @@ -841,34 +1354,33 @@ ){ CollSeq *pColl; int enc2; + int nName = sqlite3Strlen30(zName); - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } + assert( sqlite3_mutex_held(db->mutex) ); /* If SQLITE_UTF16 is specified as the encoding type, transform this ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. */ - enc2 = enc & ~SQLITE_UTF16_ALIGNED; - if( enc2==SQLITE_UTF16 ){ + enc2 = enc; + testcase( enc2==SQLITE_UTF16 ); + testcase( enc2==SQLITE_UTF16_ALIGNED ); + if( enc2==SQLITE_UTF16 || enc2==SQLITE_UTF16_ALIGNED ){ enc2 = SQLITE_UTF16NATIVE; } - - if( (enc2&~3)!=0 ){ - sqlite3Error(db, SQLITE_ERROR, "unknown encoding"); - return SQLITE_ERROR; + if( enc2SQLITE_UTF16BE ){ + return SQLITE_MISUSE; } /* Check if this call is removing or replacing an existing collation ** sequence. If so, and there are active VMs, return busy. If there ** are no active VMs, invalidate any pre-compiled statements. */ - pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, strlen(zName), 0); + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0); if( pColl && pColl->xCmp ){ if( db->activeVdbeCnt ){ sqlite3Error(db, SQLITE_BUSY, - "Unable to delete/modify collation sequence due to active statements"); + "unable to delete/modify collation sequence due to active statements"); return SQLITE_BUSY; } sqlite3ExpirePreparedStatements(db); @@ -880,7 +1392,7 @@ ** to be called. */ if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){ - CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName, strlen(zName)); + CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName, nName); int j; for(j=0; j<3; j++){ CollSeq *p = &aColl[j]; @@ -894,12 +1406,12 @@ } } - pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, strlen(zName), 1); + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1); if( pColl ){ pColl->xCmp = xCompare; pColl->pUser = pCtx; pColl->xDel = xDel; - pColl->enc = enc2 | (enc & SQLITE_UTF16_ALIGNED); + pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); } sqlite3Error(db, SQLITE_OK, 0); return SQLITE_OK; @@ -907,29 +1419,158 @@ /* +** This array defines hard upper bounds on limit values. The +** initializer must be kept in sync with the SQLITE_LIMIT_* +** #defines in sqlite3.h. +*/ +static const int aHardLimit[] = { + SQLITE_MAX_LENGTH, + SQLITE_MAX_SQL_LENGTH, + SQLITE_MAX_COLUMN, + SQLITE_MAX_EXPR_DEPTH, + SQLITE_MAX_COMPOUND_SELECT, + SQLITE_MAX_VDBE_OP, + SQLITE_MAX_FUNCTION_ARG, + SQLITE_MAX_ATTACHED, + SQLITE_MAX_LIKE_PATTERN_LENGTH, + SQLITE_MAX_VARIABLE_NUMBER, +}; + +/* +** Make sure the hard limits are set to reasonable values +*/ +#if SQLITE_MAX_LENGTH<100 +# error SQLITE_MAX_LENGTH must be at least 100 +#endif +#if SQLITE_MAX_SQL_LENGTH<100 +# error SQLITE_MAX_SQL_LENGTH must be at least 100 +#endif +#if SQLITE_MAX_SQL_LENGTH>SQLITE_MAX_LENGTH +# error SQLITE_MAX_SQL_LENGTH must not be greater than SQLITE_MAX_LENGTH +#endif +#if SQLITE_MAX_COMPOUND_SELECT<2 +# error SQLITE_MAX_COMPOUND_SELECT must be at least 2 +#endif +#if SQLITE_MAX_VDBE_OP<40 +# error SQLITE_MAX_VDBE_OP must be at least 40 +#endif +#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>1000 +# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 1000 +#endif +#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>30 +# error SQLITE_MAX_ATTACHED must be between 0 and 30 +#endif +#if SQLITE_MAX_LIKE_PATTERN_LENGTH<1 +# error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1 +#endif +#if SQLITE_MAX_VARIABLE_NUMBER<1 +# error SQLITE_MAX_VARIABLE_NUMBER must be at least 1 +#endif +#if SQLITE_MAX_COLUMN>32767 +# error SQLITE_MAX_COLUMN must not exceed 32767 +#endif + + +/* +** Change the value of a limit. Report the old value. +** If an invalid limit index is supplied, report -1. +** Make no changes but still report the old value if the +** new limit is negative. +** +** A new lower limit does not shrink existing constructs. +** It merely prevents new constructs that exceed the limit +** from forming. +*/ +int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ + int oldLimit; + if( limitId<0 || limitId>=SQLITE_N_LIMIT ){ + return -1; + } + oldLimit = db->aLimit[limitId]; + if( newLimit>=0 ){ + if( newLimit>aHardLimit[limitId] ){ + newLimit = aHardLimit[limitId]; + } + db->aLimit[limitId] = newLimit; + } + return oldLimit; +} + +/* ** This routine does the work of opening a database on behalf of ** sqlite3_open() and sqlite3_open16(). The database filename "zFilename" ** is UTF-8 encoded. */ static int openDatabase( const char *zFilename, /* Database filename UTF-8 encoded */ - sqlite3 **ppDb /* OUT: Returned database handle */ + sqlite3 **ppDb, /* OUT: Returned database handle */ + unsigned flags, /* Operational flags */ + const char *zVfs /* Name of the VFS to use */ ){ sqlite3 *db; int rc; CollSeq *pColl; + int isThreadsafe; + + *ppDb = 0; +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + + if( sqlite3GlobalConfig.bCoreMutex==0 ){ + isThreadsafe = 0; + }else if( flags & SQLITE_OPEN_NOMUTEX ){ + isThreadsafe = 0; + }else if( flags & SQLITE_OPEN_FULLMUTEX ){ + isThreadsafe = 1; + }else{ + isThreadsafe = sqlite3GlobalConfig.bFullMutex; + } - assert( !sqlite3MallocFailed() ); + /* Remove harmful bits from the flags parameter + ** + ** The SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX flags were + ** dealt with in the previous code block. Besides these, the only + ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY, + ** SQLITE_OPEN_READWRITE, and SQLITE_OPEN_CREATE. Silently mask + ** off all other flags. + */ + flags &= ~( SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_MAIN_DB | + SQLITE_OPEN_TEMP_DB | + SQLITE_OPEN_TRANSIENT_DB | + SQLITE_OPEN_MAIN_JOURNAL | + SQLITE_OPEN_TEMP_JOURNAL | + SQLITE_OPEN_SUBJOURNAL | + SQLITE_OPEN_MASTER_JOURNAL | + SQLITE_OPEN_NOMUTEX | + SQLITE_OPEN_FULLMUTEX + ); /* Allocate the sqlite data structure */ - db = sqliteMalloc( sizeof(sqlite3) ); + db = sqlite3MallocZero( sizeof(sqlite3) ); if( db==0 ) goto opendb_out; + if( isThreadsafe ){ + db->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); + if( db->mutex==0 ){ + sqlite3_free(db); + db = 0; + goto opendb_out; + } + } + sqlite3_mutex_enter(db->mutex); db->errMask = 0xff; - db->priorNewRowid = 0; - db->magic = SQLITE_MAGIC_BUSY; db->nDb = 2; + db->magic = SQLITE_MAGIC_BUSY; db->aDb = db->aDbStatic; + + assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); + memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); db->autoCommit = 1; + db->nextAutovac = -1; + db->nextPagesize = 0; db->flags |= SQLITE_ShortColNames #if SQLITE_DEFAULT_FILE_FORMAT<4 | SQLITE_LegacyFileFmt @@ -938,46 +1579,56 @@ | SQLITE_LoadExtension #endif ; - sqlite3HashInit(&db->aFunc, SQLITE_HASH_STRING, 0); - sqlite3HashInit(&db->aCollSeq, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&db->aCollSeq); #ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3HashInit(&db->aModule, SQLITE_HASH_STRING, 0); + sqlite3HashInit(&db->aModule); #endif + db->pVfs = sqlite3_vfs_find(zVfs); + if( !db->pVfs ){ + rc = SQLITE_ERROR; + sqlite3Error(db, rc, "no such vfs: %s", zVfs); + goto opendb_out; + } + /* Add the default collation sequence BINARY. BINARY works for both UTF-8 ** and UTF-16, so add a version for each to avoid any unnecessary ** conversions. The only error that can occur here is a malloc() failure. */ - if( createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0) || - createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0) || - createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0) || - (db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 6, 0))==0 - ){ - assert( sqlite3MallocFailed() ); - db->magic = SQLITE_MAGIC_CLOSED; + createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0); + createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0); + createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0); + createCollation(db, "RTRIM", SQLITE_UTF8, (void*)1, binCollFunc, 0); + if( db->mallocFailed ){ goto opendb_out; } + db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0); + assert( db->pDfltColl!=0 ); /* Also add a UTF-8 case-insensitive collation sequence. */ createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0); /* Set flags on the built-in collating sequences */ db->pDfltColl->type = SQLITE_COLL_BINARY; - pColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "NOCASE", 6, 0); + pColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "NOCASE", 0); if( pColl ){ pColl->type = SQLITE_COLL_NOCASE; } /* Open the backend database driver */ - rc = sqlite3BtreeFactory(db, zFilename, 0, SQLITE_DEFAULT_CACHE_SIZE, + db->openFlags = flags; + rc = sqlite3BtreeFactory(db, zFilename, 0, SQLITE_DEFAULT_CACHE_SIZE, + flags | SQLITE_OPEN_MAIN_DB, &db->aDb[0].pBt); if( rc!=SQLITE_OK ){ + if( rc==SQLITE_IOERR_NOMEM ){ + rc = SQLITE_NOMEM; + } sqlite3Error(db, rc, 0); - db->magic = SQLITE_MAGIC_CLOSED; goto opendb_out; } - db->aDb[0].pSchema = sqlite3SchemaGet(db->aDb[0].pBt); - db->aDb[1].pSchema = sqlite3SchemaGet(0); + db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt); + db->aDb[1].pSchema = sqlite3SchemaGet(db, 0); /* The default safety_level for the main database is 'full'; for the temp @@ -991,7 +1642,7 @@ #endif db->magic = SQLITE_MAGIC_OPEN; - if( sqlite3MallocFailed() ){ + if( db->mallocFailed ){ goto opendb_out; } @@ -1005,31 +1656,44 @@ /* Load automatic extensions - extensions that have been registered ** using the sqlite3_automatic_extension() API. */ - (void)sqlite3AutoLoadExtensions(db); - if( sqlite3_errcode(db)!=SQLITE_OK ){ + sqlite3AutoLoadExtensions(db); + rc = sqlite3_errcode(db); + if( rc!=SQLITE_OK ){ goto opendb_out; } #ifdef SQLITE_ENABLE_FTS1 - if( !sqlite3MallocFailed() ){ + if( !db->mallocFailed ){ extern int sqlite3Fts1Init(sqlite3*); rc = sqlite3Fts1Init(db); } #endif #ifdef SQLITE_ENABLE_FTS2 - if( !sqlite3MallocFailed() && rc==SQLITE_OK ){ + if( !db->mallocFailed && rc==SQLITE_OK ){ extern int sqlite3Fts2Init(sqlite3*); rc = sqlite3Fts2Init(db); } #endif +#ifdef SQLITE_ENABLE_FTS3 + if( !db->mallocFailed && rc==SQLITE_OK ){ + rc = sqlite3Fts3Init(db); + } +#endif + #ifdef SQLITE_ENABLE_ICU - if( !sqlite3MallocFailed() && rc==SQLITE_OK ){ - extern int sqlite3IcuInit(sqlite3*); + if( !db->mallocFailed && rc==SQLITE_OK ){ rc = sqlite3IcuInit(db); } #endif + +#ifdef SQLITE_ENABLE_RTREE + if( !db->mallocFailed && rc==SQLITE_OK){ + rc = sqlite3RtreeInit(db); + } +#endif + sqlite3Error(db, rc, 0); /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking @@ -1042,10 +1706,21 @@ SQLITE_DEFAULT_LOCKING_MODE); #endif + /* Enable the lookaside-malloc subsystem */ + setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside, + sqlite3GlobalConfig.nLookaside); + opendb_out: - if( SQLITE_NOMEM==(rc = sqlite3_errcode(db)) ){ + if( db ){ + assert( db->mutex!=0 || isThreadsafe==0 || sqlite3GlobalConfig.bFullMutex==0 ); + sqlite3_mutex_leave(db->mutex); + } + rc = sqlite3_errcode(db); + if( rc==SQLITE_NOMEM ){ sqlite3_close(db); db = 0; + }else if( rc!=SQLITE_OK ){ + db->magic = SQLITE_MAGIC_SICK; } *ppDb = db; return sqlite3ApiExit(0, rc); @@ -1058,7 +1733,16 @@ const char *zFilename, sqlite3 **ppDb ){ - return openDatabase(zFilename, ppDb); + return openDatabase(zFilename, ppDb, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); +} +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +){ + return openDatabase(filename, ppDb, flags, zVfs); } #ifndef SQLITE_OMIT_UTF16 @@ -1070,24 +1754,28 @@ sqlite3 **ppDb ){ char const *zFilename8; /* zFilename encoded in UTF-8 instead of UTF-16 */ - int rc = SQLITE_OK; sqlite3_value *pVal; + int rc; assert( zFilename ); assert( ppDb ); *ppDb = 0; - pVal = sqlite3ValueNew(); +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC); zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8); if( zFilename8 ){ - rc = openDatabase(zFilename8, ppDb); - if( rc==SQLITE_OK && *ppDb ){ - rc = sqlite3_exec(*ppDb, "PRAGMA encoding = 'UTF-16'", 0, 0, 0); - if( rc!=SQLITE_OK ){ - sqlite3_close(*ppDb); - *ppDb = 0; - } + rc = openDatabase(zFilename8, ppDb, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); + assert( *ppDb || rc==SQLITE_NOMEM ); + if( rc==SQLITE_OK && !DbHasProperty(*ppDb, 0, DB_SchemaLoaded) ){ + ENC(*ppDb) = SQLITE_UTF16NATIVE; } + }else{ + rc = SQLITE_NOMEM; } sqlite3ValueFree(pVal); @@ -1096,45 +1784,6 @@ #endif /* SQLITE_OMIT_UTF16 */ /* -** The following routine destroys a virtual machine that is created by -** the sqlite3_compile() routine. The integer returned is an SQLITE_ -** success/failure code that describes the result of executing the virtual -** machine. -** -** This routine sets the error code and string returned by -** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). -*/ -int sqlite3_finalize(sqlite3_stmt *pStmt){ - int rc; - if( pStmt==0 ){ - rc = SQLITE_OK; - }else{ - rc = sqlite3VdbeFinalize((Vdbe*)pStmt); - } - return rc; -} - -/* -** Terminate the current execution of an SQL statement and reset it -** back to its starting state so that it can be reused. A success code from -** the prior execution is returned. -** -** This routine sets the error code and string returned by -** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). -*/ -int sqlite3_reset(sqlite3_stmt *pStmt){ - int rc; - if( pStmt==0 ){ - rc = SQLITE_OK; - }else{ - rc = sqlite3VdbeReset((Vdbe*)pStmt); - sqlite3VdbeMakeReady((Vdbe*)pStmt, -1, 0, 0, 0); - assert( (rc & (sqlite3_db_handle(pStmt)->errMask))==rc ); - } - return rc; -} - -/* ** Register a new collation sequence with the database handle db. */ int sqlite3_create_collation( @@ -1145,9 +1794,12 @@ int(*xCompare)(void*,int,const void*,int,const void*) ){ int rc; - assert( !sqlite3MallocFailed() ); + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); rc = createCollation(db, zName, enc, pCtx, xCompare, 0); - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } /* @@ -1162,9 +1814,12 @@ void(*xDel)(void*) ){ int rc; - assert( !sqlite3MallocFailed() ); + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); rc = createCollation(db, zName, enc, pCtx, xCompare, xDel); - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } #ifndef SQLITE_OMIT_UTF16 @@ -1173,20 +1828,23 @@ */ int sqlite3_create_collation16( sqlite3* db, - const char *zName, + const void *zName, int enc, void* pCtx, int(*xCompare)(void*,int,const void*,int,const void*) ){ int rc = SQLITE_OK; - char *zName8; - assert( !sqlite3MallocFailed() ); - zName8 = sqlite3Utf16to8(zName, -1); + char *zName8; + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zName8 = sqlite3Utf16to8(db, zName, -1); if( zName8 ){ rc = createCollation(db, zName8, enc, pCtx, xCompare, 0); - sqliteFree(zName8); + sqlite3DbFree(db, zName8); } - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } #endif /* SQLITE_OMIT_UTF16 */ @@ -1199,12 +1857,11 @@ void *pCollNeededArg, void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*) ){ - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } + sqlite3_mutex_enter(db->mutex); db->xCollNeeded = xCollNeeded; db->xCollNeeded16 = 0; db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -1218,25 +1875,26 @@ void *pCollNeededArg, void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*) ){ - if( sqlite3SafetyCheck(db) ){ - return SQLITE_MISUSE; - } + sqlite3_mutex_enter(db->mutex); db->xCollNeeded = 0; db->xCollNeeded16 = xCollNeeded16; db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } #endif /* SQLITE_OMIT_UTF16 */ #ifndef SQLITE_OMIT_GLOBALRECOVER +#ifndef SQLITE_OMIT_DEPRECATED /* ** This function is now an anachronism. It used to be used to recover from a ** malloc() failure, but SQLite now does this automatically. */ -int sqlite3_global_recover(){ +int sqlite3_global_recover(void){ return SQLITE_OK; } #endif +#endif /* ** Test to see whether or not the database connection is in autocommit @@ -1261,46 +1919,17 @@ } #endif - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** Enable or disable the shared pager and schema features for the -** current thread. -** -** This routine should only be called when there are no open -** database connections. -*/ -int sqlite3_enable_shared_cache(int enable){ - ThreadData *pTd = sqlite3ThreadData(); - if( pTd ){ - /* It is only legal to call sqlite3_enable_shared_cache() when there - ** are no currently open b-trees that were opened by the calling thread. - ** This condition is only easy to detect if the shared-cache were - ** previously enabled (and is being disabled). - */ - if( pTd->pBtree && !enable ){ - assert( pTd->useSharedData ); - return SQLITE_MISUSE; - } - - pTd->useSharedData = enable; - sqlite3ReleaseThreadData(); - } - return sqlite3ApiExit(0, SQLITE_OK); -} -#endif - +#ifndef SQLITE_OMIT_DEPRECATED /* ** This is a convenience routine that makes sure that all thread-specific ** data for this thread has been deallocated. +** +** SQLite no longer uses thread-specific data so this routine is now a +** no-op. It is retained for historical compatibility. */ void sqlite3_thread_cleanup(void){ - ThreadData *pTd = sqlite3OsThreadSpecificData(0); - if( pTd ){ - memset(pTd, 0, sizeof(*pTd)); - sqlite3OsThreadSpecificData(-1); - } } +#endif /* ** Return meta information about a specific column of a database table. @@ -1316,7 +1945,7 @@ char const **pzCollSeq, /* OUTPUT: Collation sequence name */ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if colums is auto-increment */ + int *pAutoinc /* OUTPUT: True if column is auto-increment */ ){ int rc; char *zErrMsg = 0; @@ -1331,9 +1960,9 @@ int autoinc = 0; /* Ensure the database schema has been loaded */ - if( sqlite3SafetyOn(db) ){ - return SQLITE_MISUSE; - } + sqlite3_mutex_enter(db->mutex); + (void)sqlite3SafetyOn(db); + sqlite3BtreeEnterAll(db); rc = sqlite3Init(db, &zErrMsg); if( SQLITE_OK!=rc ){ goto error_out; @@ -1378,9 +2007,9 @@ if( pCol ){ zDataType = pCol->zType; zCollSeq = pCol->zColl; - notnull = (pCol->notNull?1:0); - primarykey = (pCol->isPrimKey?1:0); - autoinc = ((pTab->iPKey==iCol && pTab->autoInc)?1:0); + notnull = pCol->notNull!=0; + primarykey = pCol->isPrimKey!=0; + autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0; }else{ zDataType = "INTEGER"; primarykey = 1; @@ -1390,9 +2019,8 @@ } error_out: - if( sqlite3SafetyOff(db) ){ - rc = SQLITE_MISUSE; - } + sqlite3BtreeLeaveAll(db); + (void)sqlite3SafetyOff(db); /* Whether the function call succeeded or failed, set the output parameters ** to whatever their local counterparts contain. If an error did occur, @@ -1405,39 +2033,220 @@ if( pAutoinc ) *pAutoinc = autoinc; if( SQLITE_OK==rc && !pTab ){ - sqlite3SetString(&zErrMsg, "no such table column: ", zTableName, ".", - zColumnName, 0); + sqlite3DbFree(db, zErrMsg); + zErrMsg = sqlite3MPrintf(db, "no such table column: %s.%s", zTableName, + zColumnName); rc = SQLITE_ERROR; } sqlite3Error(db, rc, (zErrMsg?"%s":0), zErrMsg); - sqliteFree(zErrMsg); - return sqlite3ApiExit(db, rc); -} -#endif - -/* -** Set all the parameters in the compiled SQL statement to NULL. -*/ -int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ - int i; - int rc = SQLITE_OK; - for(i=1; rc==SQLITE_OK && i<=sqlite3_bind_parameter_count(pStmt); i++){ - rc = sqlite3_bind_null(pStmt, i); - } + sqlite3DbFree(db, zErrMsg); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); return rc; } +#endif /* ** Sleep for a little while. Return the amount of time slept. */ int sqlite3_sleep(int ms){ - return sqlite3OsSleep(ms); + sqlite3_vfs *pVfs; + int rc; + pVfs = sqlite3_vfs_find(0); + if( pVfs==0 ) return 0; + + /* This function works in milliseconds, but the underlying OsSleep() + ** API uses microseconds. Hence the 1000's. + */ + rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + return rc; } /* ** Enable or disable the extended result codes. */ int sqlite3_extended_result_codes(sqlite3 *db, int onoff){ + sqlite3_mutex_enter(db->mutex); db->errMask = onoff ? 0xffffffff : 0xff; + sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } + +/* +** Invoke the xFileControl method on a particular database. +*/ +int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ + int rc = SQLITE_ERROR; + int iDb; + sqlite3_mutex_enter(db->mutex); + if( zDbName==0 ){ + iDb = 0; + }else{ + for(iDb=0; iDbnDb; iDb++){ + if( strcmp(db->aDb[iDb].zName, zDbName)==0 ) break; + } + } + if( iDbnDb ){ + Btree *pBtree = db->aDb[iDb].pBt; + if( pBtree ){ + Pager *pPager; + sqlite3_file *fd; + sqlite3BtreeEnter(pBtree); + pPager = sqlite3BtreePager(pBtree); + assert( pPager!=0 ); + fd = sqlite3PagerFile(pPager); + assert( fd!=0 ); + if( fd->pMethods ){ + rc = sqlite3OsFileControl(fd, op, pArg); + } + sqlite3BtreeLeave(pBtree); + } + } + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Interface to the testing logic. +*/ +int sqlite3_test_control(int op, ...){ + int rc = 0; +#ifndef SQLITE_OMIT_BUILTIN_TEST + va_list ap; + va_start(ap, op); + switch( op ){ + + /* + ** Save the current state of the PRNG. + */ + case SQLITE_TESTCTRL_PRNG_SAVE: { + sqlite3PrngSaveState(); + break; + } + + /* + ** Restore the state of the PRNG to the last state saved using + ** PRNG_SAVE. If PRNG_SAVE has never before been called, then + ** this verb acts like PRNG_RESET. + */ + case SQLITE_TESTCTRL_PRNG_RESTORE: { + sqlite3PrngRestoreState(); + break; + } + + /* + ** Reset the PRNG back to its uninitialized state. The next call + ** to sqlite3_randomness() will reseed the PRNG using a single call + ** to the xRandomness method of the default VFS. + */ + case SQLITE_TESTCTRL_PRNG_RESET: { + sqlite3PrngResetState(); + break; + } + + /* + ** sqlite3_test_control(BITVEC_TEST, size, program) + ** + ** Run a test against a Bitvec object of size. The program argument + ** is an array of integers that defines the test. Return -1 on a + ** memory allocation error, 0 on success, or non-zero for an error. + ** See the sqlite3BitvecBuiltinTest() for additional information. + */ + case SQLITE_TESTCTRL_BITVEC_TEST: { + int sz = va_arg(ap, int); + int *aProg = va_arg(ap, int*); + rc = sqlite3BitvecBuiltinTest(sz, aProg); + break; + } + + /* + ** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd) + ** + ** Register hooks to call to indicate which malloc() failures + ** are benign. + */ + case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: { + typedef void (*void_function)(void); + void_function xBenignBegin; + void_function xBenignEnd; + xBenignBegin = va_arg(ap, void_function); + xBenignEnd = va_arg(ap, void_function); + sqlite3BenignMallocHooks(xBenignBegin, xBenignEnd); + break; + } + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, unsigned int X) + ** + ** Set the PENDING byte to the value in the argument, if X>0. + ** Make no changes if X==0. Return the value of the pending byte + ** as it existing before this routine was called. + ** + ** IMPORTANT: Changing the PENDING byte from 0x40000000 results in + ** an incompatible database file format. Changing the PENDING byte + ** while any database connection is open results in undefined and + ** dileterious behavior. + */ + case SQLITE_TESTCTRL_PENDING_BYTE: { + unsigned int newVal = va_arg(ap, unsigned int); + rc = sqlite3PendingByte; + if( newVal ) sqlite3PendingByte = newVal; + break; + } + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, int X) + ** + ** This action provides a run-time test to see whether or not + ** assert() was enabled at compile-time. If X is true and assert() + ** is enabled, then the return value is true. If X is true and + ** assert() is disabled, then the return value is zero. If X is + ** false and assert() is enabled, then the assertion fires and the + ** process aborts. If X is false and assert() is disabled, then the + ** return value is zero. + */ + case SQLITE_TESTCTRL_ASSERT: { + volatile int x = 0; + assert( (x = va_arg(ap,int))!=0 ); + rc = x; + break; + } + + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, int X) + ** + ** This action provides a run-time test to see how the ALWAYS and + ** NEVER macros were defined at compile-time. + ** + ** The return value is ALWAYS(X). + ** + ** The recommended test is X==2. If the return value is 2, that means + ** ALWAYS() and NEVER() are both no-op pass-through macros, which is the + ** default setting. If the return value is 1, then ALWAYS() is either + ** hard-coded to true or else it asserts if its argument is false. + ** The first behavior (hard-coded to true) is the case if + ** SQLITE_TESTCTRL_ASSERT shows that assert() is disabled and the second + ** behavior (assert if the argument to ALWAYS() is false) is the case if + ** SQLITE_TESTCTRL_ASSERT shows that assert() is enabled. + ** + ** The run-time test procedure might look something like this: + ** + ** if( sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, 2)==2 ){ + ** // ALWAYS() and NEVER() are no-op pass-through macros + ** }else if( sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, 1) ){ + ** // ALWAYS(x) asserts that x is true. NEVER(x) asserts x is false. + ** }else{ + ** // ALWAYS(x) is a constant 1. NEVER(x) is a constant 0. + ** } + */ + case SQLITE_TESTCTRL_ALWAYS: { + int x = va_arg(ap,int); + rc = ALWAYS(x); + break; + } + } + va_end(ap); +#endif /* SQLITE_OMIT_BUILTIN_TEST */ + return rc; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/malloc.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/malloc.c --- sqlite3-3.4.2/src/malloc.c 2007-08-08 02:04:52.000000000 +0100 +++ sqlite3-3.6.16/src/malloc.c 2009-06-27 01:48:33.000000000 +0100 @@ -9,711 +9,634 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* -** Memory allocation functions used throughout sqlite. ** +** Memory allocation functions used throughout sqlite. ** -** $Id: malloc.c,v 1.4 2007/08/08 01:04:52 drh Exp $ +** $Id: malloc.c,v 1.64 2009/06/27 00:48:33 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" #include -#include /* -** MALLOC WRAPPER ARCHITECTURE -** -** The sqlite code accesses dynamic memory allocation/deallocation by invoking -** the following six APIs (which may be implemented as macros). -** -** sqlite3Malloc() -** sqlite3MallocRaw() -** sqlite3Realloc() -** sqlite3ReallocOrFree() -** sqlite3Free() -** sqlite3AllocSize() -** -** The function sqlite3FreeX performs the same task as sqlite3Free and is -** guaranteed to be a real function. The same holds for sqlite3MallocX -** -** The above APIs are implemented in terms of the functions provided in the -** operating-system interface. The OS interface is never accessed directly -** by code outside of this file. -** -** sqlite3OsMalloc() -** sqlite3OsRealloc() -** sqlite3OsFree() -** sqlite3OsAllocationSize() -** -** Functions sqlite3MallocRaw() and sqlite3Realloc() may invoke -** sqlite3_release_memory() if a call to sqlite3OsMalloc() or -** sqlite3OsRealloc() fails (or if the soft-heap-limit for the thread is -** exceeded). Function sqlite3Malloc() usually invokes -** sqlite3MallocRaw(). -** -** MALLOC TEST WRAPPER ARCHITECTURE -** -** The test wrapper provides extra test facilities to ensure the library -** does not leak memory and handles the failure of the underlying OS level -** allocation system correctly. It is only present if the library is -** compiled with the SQLITE_MEMDEBUG macro set. -** -** * Guardposts to detect overwrites. -** * Ability to cause a specific Malloc() or Realloc() to fail. -** * Audit outstanding memory allocations (i.e check for leaks). -*/ - -#define MAX(x,y) ((x)>(y)?(x):(y)) +** This routine runs when the memory allocator sees that the +** total memory allocation is about to exceed the soft heap +** limit. +*/ +static void softHeapLimitEnforcer( + void *NotUsed, + sqlite3_int64 NotUsed2, + int allocSize +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + sqlite3_release_memory(allocSize); +} -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) /* -** Set the soft heap-size limit for the current thread. Passing a negative -** value indicates no limit. +** Set the soft heap-size limit for the library. Passing a zero or +** negative value indicates no limit. */ void sqlite3_soft_heap_limit(int n){ - ThreadData *pTd = sqlite3ThreadData(); - if( pTd ){ - pTd->nSoftHeapLimit = n; + sqlite3_uint64 iLimit; + int overage; + if( n<0 ){ + iLimit = 0; + }else{ + iLimit = n; + } + sqlite3_initialize(); + if( iLimit>0 ){ + sqlite3MemoryAlarm(softHeapLimitEnforcer, 0, iLimit); + }else{ + sqlite3MemoryAlarm(0, 0, 0); + } + overage = (int)(sqlite3_memory_used() - (i64)n); + if( overage>0 ){ + sqlite3_release_memory(overage); } - sqlite3ReleaseThreadData(); } /* -** Release memory held by SQLite instances created by the current thread. +** Attempt to release up to n bytes of non-essential memory currently +** held by SQLite. An example of non-essential memory is memory used to +** cache database pages that are not currently in use. */ int sqlite3_release_memory(int n){ - return sqlite3PagerReleaseMemory(n); -} -#else -/* If SQLITE_ENABLE_MEMORY_MANAGEMENT is not defined, then define a version -** of sqlite3_release_memory() to be used by other code in this file. -** This is done for no better reason than to reduce the number of -** pre-processor #ifndef statements. -*/ -#define sqlite3_release_memory(x) 0 /* 0 == no memory freed */ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + int nRet = 0; +#if 0 + nRet += sqlite3VdbeReleaseMemory(n); #endif - -#ifdef SQLITE_MEMDEBUG -/*-------------------------------------------------------------------------- -** Begin code for memory allocation system test layer. -** -** Memory debugging is turned on by defining the SQLITE_MEMDEBUG macro. -** -** SQLITE_MEMDEBUG==1 -> Fence-posting only (thread safe) -** SQLITE_MEMDEBUG==2 -> Fence-posting + linked list of allocations (not ts) -** SQLITE_MEMDEBUG==3 -> Above + backtraces (not thread safe, req. glibc) -*/ - -/* Figure out whether or not to store backtrace() information for each malloc. -** The backtrace() function is only used if SQLITE_MEMDEBUG is set to 2 or -** greater and glibc is in use. If we don't want to use backtrace(), then just -** define it as an empty macro and set the amount of space reserved to 0. -*/ -#if defined(__GLIBC__) && SQLITE_MEMDEBUG>2 - extern int backtrace(void **, int); - #define TESTALLOC_STACKSIZE 128 - #define TESTALLOC_STACKFRAMES ((TESTALLOC_STACKSIZE-8)/sizeof(void*)) + nRet += sqlite3PcacheReleaseMemory(n-nRet); + return nRet; #else - #define backtrace(x, y) - #define TESTALLOC_STACKSIZE 0 - #define TESTALLOC_STACKFRAMES 0 + UNUSED_PARAMETER(n); + return SQLITE_OK; #endif +} /* -** Number of 32-bit guard words. This should probably be a multiple of -** 2 since on 64-bit machines we want the value returned by sqliteMalloc() -** to be 8-byte aligned. +** State information local to the memory allocation subsystem. */ -#ifndef TESTALLOC_NGUARD -# define TESTALLOC_NGUARD 2 -#endif +static SQLITE_WSD struct Mem0Global { + /* Number of free pages for scratch and page-cache memory */ + u32 nScratchFree; + u32 nPageFree; -/* -** Size reserved for storing file-name along with each malloc()ed blob. -*/ -#define TESTALLOC_FILESIZE 64 + sqlite3_mutex *mutex; /* Mutex to serialize access */ -/* -** Size reserved for storing the user string. Each time a Malloc() or Realloc() -** call succeeds, up to TESTALLOC_USERSIZE bytes of the string pointed to by -** sqlite3_malloc_id are stored along with the other test system metadata. -*/ -#define TESTALLOC_USERSIZE 64 -const char *sqlite3_malloc_id = 0; + /* + ** The alarm callback and its arguments. The mem0.mutex lock will + ** be held while the callback is running. Recursive calls into + ** the memory subsystem are allowed, but no new callbacks will be + ** issued. The alarmBusy variable is set to prevent recursive + ** callbacks. + */ + sqlite3_int64 alarmThreshold; + void (*alarmCallback)(void*, sqlite3_int64,int); + void *alarmArg; + int alarmBusy; -/* -** Blocks used by the test layer have the following format: -** -** -** -** -** -** -** <32-bit line number> -** -** -*/ + /* + ** Pointers to the end of sqlite3GlobalConfig.pScratch and + ** sqlite3GlobalConfig.pPage to a block of memory that records + ** which pages are available. + */ + u32 *aScratchFree; + u32 *aPageFree; +} mem0 = { 62560955, 0, 0, 0, 0, 0, 0, 0, 0 }; -#define TESTALLOC_OFFSET_GUARD1(p) (sizeof(void *) * 2) -#define TESTALLOC_OFFSET_DATA(p) ( \ - TESTALLOC_OFFSET_GUARD1(p) + sizeof(u32) * TESTALLOC_NGUARD \ -) -#define TESTALLOC_OFFSET_GUARD2(p) ( \ - TESTALLOC_OFFSET_DATA(p) + sqlite3OsAllocationSize(p) - TESTALLOC_OVERHEAD \ -) -#define TESTALLOC_OFFSET_LINENUMBER(p) ( \ - TESTALLOC_OFFSET_GUARD2(p) + sizeof(u32) * TESTALLOC_NGUARD \ -) -#define TESTALLOC_OFFSET_FILENAME(p) ( \ - TESTALLOC_OFFSET_LINENUMBER(p) + sizeof(u32) \ -) -#define TESTALLOC_OFFSET_USER(p) ( \ - TESTALLOC_OFFSET_FILENAME(p) + TESTALLOC_FILESIZE \ -) -#define TESTALLOC_OFFSET_STACK(p) ( \ - TESTALLOC_OFFSET_USER(p) + TESTALLOC_USERSIZE + 8 - \ - (TESTALLOC_OFFSET_USER(p) % 8) \ -) - -#define TESTALLOC_OVERHEAD ( \ - sizeof(void *)*2 + /* pPrev and pNext pointers */ \ - TESTALLOC_NGUARD*sizeof(u32)*2 + /* Guard words */ \ - sizeof(u32) + TESTALLOC_FILESIZE + /* File and line number */ \ - TESTALLOC_USERSIZE + /* User string */ \ - TESTALLOC_STACKSIZE /* backtrace() stack */ \ -) - - -/* -** For keeping track of the number of mallocs and frees. This -** is used to check for memory leaks. The iMallocFail and iMallocReset -** values are used to simulate malloc() failures during testing in -** order to verify that the library correctly handles an out-of-memory -** condition. -*/ -int sqlite3_nMalloc; /* Number of sqliteMalloc() calls */ -int sqlite3_nFree; /* Number of sqliteFree() calls */ -int sqlite3_memUsed; /* TODO Total memory obtained from malloc */ -int sqlite3_memMax; /* TODO Mem usage high-water mark */ -int sqlite3_iMallocFail; /* Fail sqliteMalloc() after this many calls */ -int sqlite3_iMallocReset = -1; /* When iMallocFail reaches 0, set to this */ - -void *sqlite3_pFirst = 0; /* Pointer to linked list of allocations */ -int sqlite3_nMaxAlloc = 0; /* High water mark of ThreadData.nAlloc */ -int sqlite3_mallocDisallowed = 0; /* assert() in sqlite3Malloc() if set */ -int sqlite3_isFail = 0; /* True if all malloc calls should fail */ -const char *sqlite3_zFile = 0; /* Filename to associate debug info with */ -int sqlite3_iLine = 0; /* Line number for debug info */ -int sqlite3_mallocfail_trace = 0; /* Print a msg on malloc fail if true */ - -/* -** Check for a simulated memory allocation failure. Return true if -** the failure should be simulated. Return false to proceed as normal. -*/ -int sqlite3TestMallocFail(){ - if( sqlite3_isFail ){ - return 1; - } - if( sqlite3_iMallocFail>=0 ){ - sqlite3_iMallocFail--; - if( sqlite3_iMallocFail==0 ){ - sqlite3_iMallocFail = sqlite3_iMallocReset; - sqlite3_isFail = 1; - if( sqlite3_mallocfail_trace ){ - sqlite3DebugPrintf("###_malloc_fails_###\n"); - } - return 1; - } - } - return 0; -} +#define mem0 GLOBAL(struct Mem0Global, mem0) /* -** The argument is a pointer returned by sqlite3OsMalloc() or xRealloc(). -** assert() that the first and last (TESTALLOC_NGUARD*4) bytes are set to the -** values set by the applyGuards() function. +** Initialize the memory allocation subsystem. */ -static void checkGuards(u32 *p) -{ - int i; - char *zAlloc = (char *)p; - char *z; - - /* First set of guard words */ - z = &zAlloc[TESTALLOC_OFFSET_GUARD1(p)]; - for(i=0; i=100 + && sqlite3GlobalConfig.nScratch>=0 ){ + int i; + sqlite3GlobalConfig.szScratch = ROUNDDOWN8(sqlite3GlobalConfig.szScratch-4); + mem0.aScratchFree = (u32*)&((char*)sqlite3GlobalConfig.pScratch) + [sqlite3GlobalConfig.szScratch*sqlite3GlobalConfig.nScratch]; + for(i=0; i=512 + && sqlite3GlobalConfig.nPage>=1 ){ + int i; + int overhead; + int sz = ROUNDDOWN8(sqlite3GlobalConfig.szPage); + int n = sqlite3GlobalConfig.nPage; + overhead = (4*n + sz - 1)/sz; + sqlite3GlobalConfig.nPage -= overhead; + mem0.aPageFree = (u32*)&((char*)sqlite3GlobalConfig.pPage) + [sqlite3GlobalConfig.szPage*sqlite3GlobalConfig.nPage]; + for(i=0; i1 /* -** The argument points to an Os level allocation. Link it into the threads list -** of allocations. +** Trigger the alarm */ -static void linkAlloc(void *p){ - void **pp = (void **)p; - pp[0] = 0; - pp[1] = sqlite3_pFirst; - if( sqlite3_pFirst ){ - ((void **)sqlite3_pFirst)[0] = p; - } - sqlite3_pFirst = p; +static void sqlite3MallocAlarm(int nByte){ + void (*xCallback)(void*,sqlite3_int64,int); + sqlite3_int64 nowUsed; + void *pArg; + if( mem0.alarmCallback==0 || mem0.alarmBusy ) return; + mem0.alarmBusy = 1; + xCallback = mem0.alarmCallback; + nowUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); + pArg = mem0.alarmArg; + sqlite3_mutex_leave(mem0.mutex); + xCallback(pArg, nowUsed, nByte); + sqlite3_mutex_enter(mem0.mutex); + mem0.alarmBusy = 0; } /* -** The argument points to an Os level allocation. Unlinke it from the threads -** list of allocations. +** Do a memory allocation with statistics and alarms. Assume the +** lock is already held. */ -static void unlinkAlloc(void *p) -{ - void **pp = (void **)p; - if( p==sqlite3_pFirst ){ - assert(!pp[0]); - assert(!pp[1] || ((void **)(pp[1]))[0]==p); - sqlite3_pFirst = pp[1]; - if( sqlite3_pFirst ){ - ((void **)sqlite3_pFirst)[0] = 0; - } - }else{ - void **pprev = pp[0]; - void **pnext = pp[1]; - assert(pprev); - assert(pprev[1]==p); - pprev[1] = (void *)pnext; - if( pnext ){ - assert(pnext[0]==p); - pnext[0] = (void *)pprev; +static int mallocWithAlarm(int n, void **pp){ + int nFull; + void *p; + assert( sqlite3_mutex_held(mem0.mutex) ); + nFull = sqlite3GlobalConfig.m.xRoundup(n); + sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, n); + if( mem0.alarmCallback!=0 ){ + int nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); + if( nUsed+nFull >= mem0.alarmThreshold ){ + sqlite3MallocAlarm(nFull); } } + p = sqlite3GlobalConfig.m.xMalloc(nFull); + if( p==0 && mem0.alarmCallback ){ + sqlite3MallocAlarm(nFull); + p = sqlite3GlobalConfig.m.xMalloc(nFull); + } + if( p ){ + nFull = sqlite3MallocSize(p); + sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nFull); + } + *pp = p; + return nFull; } /* -** Pointer p is a pointer to an OS level allocation that has just been -** realloc()ed. Set the list pointers that point to this entry to it's new -** location. -*/ -static void relinkAlloc(void *p) -{ - void **pp = (void **)p; - if( pp[0] ){ - ((void **)(pp[0]))[1] = p; +** Allocate memory. This routine is like sqlite3_malloc() except that it +** assumes the memory subsystem has already been initialized. +*/ +void *sqlite3Malloc(int n){ + void *p; + if( n<=0 || n>=0x7fffff00 ){ + /* A memory allocation of a number of bytes which is near the maximum + ** signed integer value might cause an integer overflow inside of the + ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving + ** 255 bytes of overhead. SQLite itself will never use anything near + ** this amount. The only way to reach the limit is with sqlite3_malloc() */ + p = 0; + }else if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + mallocWithAlarm(n, &p); + sqlite3_mutex_leave(mem0.mutex); }else{ - sqlite3_pFirst = p; - } - if( pp[1] ){ - ((void **)(pp[1]))[0] = p; + p = sqlite3GlobalConfig.m.xMalloc(n); } + return p; } -#else -#define linkAlloc(x) -#define relinkAlloc(x) -#define unlinkAlloc(x) -#endif /* -** This function sets the result of the Tcl interpreter passed as an argument -** to a list containing an entry for each currently outstanding call made to -** sqliteMalloc and friends by the current thread. Each list entry is itself a -** list, consisting of the following (in order): -** -** * The number of bytes allocated -** * The __FILE__ macro at the time of the sqliteMalloc() call. -** * The __LINE__ macro ... -** * The value of the sqlite3_malloc_id variable ... -** * The output of backtrace() (if available) ... -** -** Todo: We could have a version of this function that outputs to stdout, -** to debug memory leaks when Tcl is not available. +** This version of the memory allocation is for use by the application. +** First make sure the memory subsystem is initialized, then do the +** allocation. */ -#if defined(TCLSH) && defined(SQLITE_DEBUG) && SQLITE_MEMDEBUG>1 -#include -int sqlite3OutstandingMallocs(Tcl_Interp *interp){ - void *p; - Tcl_Obj *pRes = Tcl_NewObj(); - Tcl_IncrRefCount(pRes); - +void *sqlite3_malloc(int n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return sqlite3Malloc(n); +} - for(p=sqlite3_pFirst; p; p=((void **)p)[1]){ - Tcl_Obj *pEntry = Tcl_NewObj(); - Tcl_Obj *pStack = Tcl_NewObj(); - char *z; - u32 iLine; - int nBytes = sqlite3OsAllocationSize(p) - TESTALLOC_OVERHEAD; - char *zAlloc = (char *)p; - int i; +/* +** Each thread may only have a single outstanding allocation from +** xScratchMalloc(). We verify this constraint in the single-threaded +** case by setting scratchAllocOut to 1 when an allocation +** is outstanding clearing it when the allocation is freed. +*/ +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) +static int scratchAllocOut = 0; +#endif - Tcl_ListObjAppendElement(0, pEntry, Tcl_NewIntObj(nBytes)); - z = &zAlloc[TESTALLOC_OFFSET_FILENAME(p)]; - Tcl_ListObjAppendElement(0, pEntry, Tcl_NewStringObj(z, -1)); +/* +** Allocate memory that is to be used and released right away. +** This routine is similar to alloca() in that it is not intended +** for situations where the memory might be held long-term. This +** routine is intended to get memory to old large transient data +** structures that would not normally fit on the stack of an +** embedded processor. +*/ +void *sqlite3ScratchMalloc(int n){ + void *p; + assert( n>0 ); - z = &zAlloc[TESTALLOC_OFFSET_LINENUMBER(p)]; - memcpy(&iLine, z, sizeof(u32)); - Tcl_ListObjAppendElement(0, pEntry, Tcl_NewIntObj(iLine)); - - z = &zAlloc[TESTALLOC_OFFSET_USER(p)]; - Tcl_ListObjAppendElement(0, pEntry, Tcl_NewStringObj(z, -1)); - - z = &zAlloc[TESTALLOC_OFFSET_STACK(p)]; - for(i=0; inAlloc); -#endif - assert( !sqlite3_mallocDisallowed ); - if( !sqlite3TestMallocFail() ){ - u32 *p; - p = (u32 *)sqlite3OsMalloc(n + TESTALLOC_OVERHEAD); - assert(p); - sqlite3_nMalloc++; - applyGuards(p); - linkAlloc(p); - sqlite3OsLeaveMutex(); - return (void *)(&p[TESTALLOC_NGUARD + 2*sizeof(void *)/sizeof(u32)]); + return p; + +scratch_overflow: + if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n); + n = mallocWithAlarm(n, &p); + if( p ) sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, n); + sqlite3_mutex_leave(mem0.mutex); + }else{ + p = sqlite3GlobalConfig.m.xMalloc(n); } - sqlite3OsLeaveMutex(); - return 0; +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) + scratchAllocOut = p!=0; +#endif + return p; } - -static int OSSIZEOF(void *p){ +void sqlite3ScratchFree(void *p){ if( p ){ - u32 *pOs = (u32 *)getOsPointer(p); - return sqlite3OsAllocationSize(pOs) - TESTALLOC_OVERHEAD; + +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) + /* Verify that no more than one scratch allocation per thread + ** is outstanding at one time. (This is only checked in the + ** single-threaded case since checking in the multi-threaded case + ** would be much more complicated.) */ + assert( scratchAllocOut==1 ); + scratchAllocOut = 0; +#endif + + if( sqlite3GlobalConfig.pScratch==0 + || p=(void*)mem0.aScratchFree ){ + if( sqlite3GlobalConfig.bMemstat ){ + int iSize = sqlite3MallocSize(p); + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, -iSize); + sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -iSize); + sqlite3GlobalConfig.m.xFree(p); + sqlite3_mutex_leave(mem0.mutex); + }else{ + sqlite3GlobalConfig.m.xFree(p); + } + }else{ + int i; + i = (int)((u8*)p - (u8*)sqlite3GlobalConfig.pScratch); + i /= sqlite3GlobalConfig.szScratch; + assert( i>=0 && i=db->lookaside.pStart && plookaside.pEnd; } +#else +#define isLookaside(A,B) 0 +#endif /* -** This is the test layer's wrapper around sqlite3OsRealloc(). +** Return the size of a memory allocation previously obtained from +** sqlite3Malloc() or sqlite3_malloc(). */ -static void * OSREALLOC(void *pRealloc, int n){ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - sqlite3_nMaxAlloc = - MAX(sqlite3_nMaxAlloc, sqlite3ThreadDataReadOnly()->nAlloc); -#endif - assert( !sqlite3_mallocDisallowed ); - if( !sqlite3TestMallocFail() ){ - u32 *p = (u32 *)getOsPointer(pRealloc); - checkGuards(p); - p = sqlite3OsRealloc(p, n + TESTALLOC_OVERHEAD); - applyGuards(p); - relinkAlloc(p); - return (void *)(&p[TESTALLOC_NGUARD + 2*sizeof(void *)/sizeof(u32)]); - } - return 0; -} - -static void OSMALLOC_FAILED(){ - sqlite3_isFail = 0; +int sqlite3MallocSize(void *p){ + return sqlite3GlobalConfig.m.xSize(p); } - -#else -/* Define macros to call the sqlite3OsXXX interface directly if -** the SQLITE_MEMDEBUG macro is not defined. -*/ -#define OSMALLOC(x) sqlite3OsMalloc(x) -#define OSREALLOC(x,y) sqlite3OsRealloc(x,y) -#define OSFREE(x) sqlite3OsFree(x) -#define OSSIZEOF(x) sqlite3OsAllocationSize(x) -#define OSMALLOC_FAILED() - -#endif /* SQLITE_MEMDEBUG */ -/* -** End code for memory allocation system test layer. -**--------------------------------------------------------------------------*/ - -/* -** This routine is called when we are about to allocate n additional bytes -** of memory. If the new allocation will put is over the soft allocation -** limit, then invoke sqlite3_release_memory() to try to release some -** memory before continuing with the allocation. -** -** This routine also makes sure that the thread-specific-data (TSD) has -** be allocated. If it has not and can not be allocated, then return -** false. The updateMemoryUsedCount() routine below will deallocate -** the TSD if it ought to be. -** -** If SQLITE_ENABLE_MEMORY_MANAGEMENT is not defined, this routine is -** a no-op -*/ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -static int enforceSoftLimit(int n){ - ThreadData *pTsd = sqlite3ThreadData(); - if( pTsd==0 ){ +int sqlite3DbMallocSize(sqlite3 *db, void *p){ + assert( db==0 || sqlite3_mutex_held(db->mutex) ); + if( p==0 ){ return 0; + }else if( isLookaside(db, p) ){ + return db->lookaside.sz; + }else{ + return sqlite3GlobalConfig.m.xSize(p); } - assert( pTsd->nAlloc>=0 ); - if( n>0 && pTsd->nSoftHeapLimit>0 ){ - while( pTsd->nAlloc+n>pTsd->nSoftHeapLimit && sqlite3_release_memory(n) ){} - } - return 1; } -#else -# define enforceSoftLimit(X) 1 -#endif /* -** Update the count of total outstanding memory that is held in -** thread-specific-data (TSD). If after this update the TSD is -** no longer being used, then deallocate it. -** -** If SQLITE_ENABLE_MEMORY_MANAGEMENT is not defined, this routine is -** a no-op +** Free memory previously obtained from sqlite3Malloc(). */ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -static void updateMemoryUsedCount(int n){ - ThreadData *pTsd = sqlite3ThreadData(); - if( pTsd ){ - pTsd->nAlloc += n; - assert( pTsd->nAlloc>=0 ); - if( pTsd->nAlloc==0 && pTsd->nSoftHeapLimit==0 ){ - sqlite3ReleaseThreadData(); - } +void sqlite3_free(void *p){ + if( p==0 ) return; + if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -sqlite3MallocSize(p)); + sqlite3GlobalConfig.m.xFree(p); + sqlite3_mutex_leave(mem0.mutex); + }else{ + sqlite3GlobalConfig.m.xFree(p); } } -#else -#define updateMemoryUsedCount(x) /* no-op */ -#endif /* -** Allocate and return N bytes of uninitialised memory by calling -** sqlite3OsMalloc(). If the Malloc() call fails, attempt to free memory -** by calling sqlite3_release_memory(). -*/ -void *sqlite3MallocRaw(int n, int doMemManage){ - void *p = 0; - if( n>0 && !sqlite3MallocFailed() && (!doMemManage || enforceSoftLimit(n)) ){ - while( (p = OSMALLOC(n))==0 && sqlite3_release_memory(n) ){} - if( !p ){ - sqlite3FailedMalloc(); - OSMALLOC_FAILED(); - }else if( doMemManage ){ - updateMemoryUsedCount(OSSIZEOF(p)); - } +** Free memory that might be associated with a particular database +** connection. +*/ +void sqlite3DbFree(sqlite3 *db, void *p){ + assert( db==0 || sqlite3_mutex_held(db->mutex) ); + if( isLookaside(db, p) ){ + LookasideSlot *pBuf = (LookasideSlot*)p; + pBuf->pNext = db->lookaside.pFree; + db->lookaside.pFree = pBuf; + db->lookaside.nOut--; + }else{ + sqlite3_free(p); } - return p; } /* -** Resize the allocation at p to n bytes by calling sqlite3OsRealloc(). The -** pointer to the new allocation is returned. If the Realloc() call fails, -** attempt to free memory by calling sqlite3_release_memory(). +** Change the size of an existing memory allocation */ -void *sqlite3Realloc(void *p, int n){ - if( sqlite3MallocFailed() ){ +void *sqlite3Realloc(void *pOld, int nBytes){ + int nOld, nNew; + void *pNew; + if( pOld==0 ){ + return sqlite3Malloc(nBytes); + } + if( nBytes<=0 ){ + sqlite3_free(pOld); return 0; } - - if( !p ){ - return sqlite3Malloc(n, 1); - }else{ - void *np = 0; -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - int origSize = OSSIZEOF(p); -#endif - if( enforceSoftLimit(n - origSize) ){ - while( (np = OSREALLOC(p, n))==0 && sqlite3_release_memory(n) ){} - if( !np ){ - sqlite3FailedMalloc(); - OSMALLOC_FAILED(); - }else{ - updateMemoryUsedCount(OSSIZEOF(np) - origSize); + if( nBytes>=0x7fffff00 ){ + /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */ + return 0; + } + nOld = sqlite3MallocSize(pOld); + if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, nBytes); + nNew = sqlite3GlobalConfig.m.xRoundup(nBytes); + if( nOld==nNew ){ + pNew = pOld; + }else{ + if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED)+nNew-nOld >= + mem0.alarmThreshold ){ + sqlite3MallocAlarm(nNew-nOld); + } + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); + if( pNew==0 && mem0.alarmCallback ){ + sqlite3MallocAlarm(nBytes); + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); + } + if( pNew ){ + nNew = sqlite3MallocSize(pNew); + sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nNew-nOld); } } - return np; + sqlite3_mutex_leave(mem0.mutex); + }else{ + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nBytes); } + return pNew; } /* -** Free the memory pointed to by p. p must be either a NULL pointer or a -** value returned by a previous call to sqlite3Malloc() or sqlite3Realloc(). +** The public interface to sqlite3Realloc. Make sure that the memory +** subsystem is initialized prior to invoking sqliteRealloc. */ -void sqlite3FreeX(void *p){ - if( p ){ - updateMemoryUsedCount(0 - OSSIZEOF(p)); - OSFREE(p); - } +void *sqlite3_realloc(void *pOld, int n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return sqlite3Realloc(pOld, n); } -/* -** A version of sqliteMalloc() that is always a function, not a macro. -** Currently, this is used only to alloc to allocate the parser engine. -*/ -void *sqlite3MallocX(int n){ - return sqliteMalloc(n); -} /* -** sqlite3Malloc -** sqlite3ReallocOrFree -** -** These two are implemented as wrappers around sqlite3MallocRaw(), -** sqlite3Realloc() and sqlite3Free(). +** Allocate and zero memory. */ -void *sqlite3Malloc(int n, int doMemManage){ - void *p = sqlite3MallocRaw(n, doMemManage); +void *sqlite3MallocZero(int n){ + void *p = sqlite3Malloc(n); if( p ){ memset(p, 0, n); } return p; } -void *sqlite3ReallocOrFree(void *p, int n){ - void *pNew; - pNew = sqlite3Realloc(p, n); - if( !pNew ){ - sqlite3FreeX(p); + +/* +** Allocate and zero memory. If the allocation fails, make +** the mallocFailed flag in the connection pointer. +*/ +void *sqlite3DbMallocZero(sqlite3 *db, int n){ + void *p = sqlite3DbMallocRaw(db, n); + if( p ){ + memset(p, 0, n); } - return pNew; + return p; } /* -** sqlite3ThreadSafeMalloc() and sqlite3ThreadSafeFree() are used in those -** rare scenarios where sqlite may allocate memory in one thread and free -** it in another. They are exactly the same as sqlite3Malloc() and -** sqlite3Free() except that: +** Allocate and zero memory. If the allocation fails, make +** the mallocFailed flag in the connection pointer. ** -** * The allocated memory is not included in any calculations with -** respect to the soft-heap-limit, and +** If db!=0 and db->mallocFailed is true (indicating a prior malloc +** failure on the same database connection) then always return 0. +** Hence for a particular database connection, once malloc starts +** failing, it fails consistently until mallocFailed is reset. +** This is an important assumption. There are many places in the +** code that do things like this: +** +** int *a = (int*)sqlite3DbMallocRaw(db, 100); +** int *b = (int*)sqlite3DbMallocRaw(db, 200); +** if( b ) a[10] = 9; ** -** * sqlite3ThreadSafeMalloc() must be matched with ThreadSafeFree(), -** not sqlite3Free(). Calling sqlite3Free() on memory obtained from -** ThreadSafeMalloc() will cause an error somewhere down the line. +** In other words, if a subsequent malloc (ex: "b") worked, it is assumed +** that all prior mallocs (ex: "a") worked too. */ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -void *sqlite3ThreadSafeMalloc(int n){ - (void)ENTER_MALLOC; - return sqlite3Malloc(n, 0); -} -void sqlite3ThreadSafeFree(void *p){ - (void)ENTER_MALLOC; - if( p ){ - OSFREE(p); +void *sqlite3DbMallocRaw(sqlite3 *db, int n){ + void *p; + assert( db==0 || sqlite3_mutex_held(db->mutex) ); +#ifndef SQLITE_OMIT_LOOKASIDE + if( db ){ + LookasideSlot *pBuf; + if( db->mallocFailed ){ + return 0; + } + if( db->lookaside.bEnabled && n<=db->lookaside.sz + && (pBuf = db->lookaside.pFree)!=0 ){ + db->lookaside.pFree = pBuf->pNext; + db->lookaside.nOut++; + if( db->lookaside.nOut>db->lookaside.mxOut ){ + db->lookaside.mxOut = db->lookaside.nOut; + } + return (void*)pBuf; + } + } +#else + if( db && db->mallocFailed ){ + return 0; } -} #endif + p = sqlite3Malloc(n); + if( !p && db ){ + db->mallocFailed = 1; + } + return p; +} +/* +** Resize the block of memory pointed to by p to n bytes. If the +** resize fails, set the mallocFailed flag in the connection object. +*/ +void *sqlite3DbRealloc(sqlite3 *db, void *p, int n){ + void *pNew = 0; + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + if( db->mallocFailed==0 ){ + if( p==0 ){ + return sqlite3DbMallocRaw(db, n); + } + if( isLookaside(db, p) ){ + if( n<=db->lookaside.sz ){ + return p; + } + pNew = sqlite3DbMallocRaw(db, n); + if( pNew ){ + memcpy(pNew, p, db->lookaside.sz); + sqlite3DbFree(db, p); + } + }else{ + pNew = sqlite3_realloc(p, n); + if( !pNew ){ + db->mallocFailed = 1; + } + } + } + return pNew; +} /* -** Return the number of bytes allocated at location p. p must be either -** a NULL pointer (in which case 0 is returned) or a pointer returned by -** sqlite3Malloc(), sqlite3Realloc() or sqlite3ReallocOrFree(). -** -** The number of bytes allocated does not include any overhead inserted by -** any malloc() wrapper functions that may be called. So the value returned -** is the number of bytes that were available to SQLite using pointer p, -** regardless of how much memory was actually allocated. +** Attempt to reallocate p. If the reallocation fails, then free p +** and set the mallocFailed flag in the database connection. */ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -int sqlite3AllocSize(void *p){ - return OSSIZEOF(p); +void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, int n){ + void *pNew; + pNew = sqlite3DbRealloc(db, p, n); + if( !pNew ){ + sqlite3DbFree(db, p); + } + return pNew; } -#endif /* ** Make a copy of a string in memory obtained from sqliteMalloc(). These @@ -722,19 +645,27 @@ ** called via macros that record the current file and line number in the ** ThreadData structure. */ -char *sqlite3StrDup(const char *z){ +char *sqlite3DbStrDup(sqlite3 *db, const char *z){ char *zNew; - int n; - if( z==0 ) return 0; - n = strlen(z)+1; - zNew = sqlite3MallocRaw(n, 1); - if( zNew ) memcpy(zNew, z, n); + size_t n; + if( z==0 ){ + return 0; + } + n = sqlite3Strlen30(z) + 1; + assert( (n&0x7fffffff)==n ); + zNew = sqlite3DbMallocRaw(db, (int)n); + if( zNew ){ + memcpy(zNew, z, n); + } return zNew; } -char *sqlite3StrNDup(const char *z, int n){ +char *sqlite3DbStrNDup(sqlite3 *db, const char *z, int n){ char *zNew; - if( z==0 ) return 0; - zNew = sqlite3MallocRaw(n+1, 1); + if( z==0 ){ + return 0; + } + assert( (n&0x7fffffff)==n ); + zNew = sqlite3DbMallocRaw(db, n+1); if( zNew ){ memcpy(zNew, z, n); zNew[n] = 0; @@ -743,93 +674,45 @@ } /* -** Create a string from the 2nd and subsequent arguments (up to the -** first NULL argument), store the string in memory obtained from -** sqliteMalloc() and make the pointer indicated by the 1st argument -** point to that string. The 1st argument must either be NULL or -** point to memory obtained from sqliteMalloc(). +** Create a string from the zFromat argument and the va_list that follows. +** Store the string in memory obtained from sqliteMalloc() and make *pz +** point to that string. */ -void sqlite3SetString(char **pz, ...){ +void sqlite3SetString(char **pz, sqlite3 *db, const char *zFormat, ...){ va_list ap; - int nByte; - const char *z; - char *zResult; - - assert( pz!=0 ); - nByte = 1; - va_start(ap, pz); - while( (z = va_arg(ap, const char*))!=0 ){ - nByte += strlen(z); - } - va_end(ap); - sqliteFree(*pz); - *pz = zResult = sqliteMallocRaw( nByte ); - if( zResult==0 ){ - return; - } - *zResult = 0; - va_start(ap, pz); - while( (z = va_arg(ap, const char*))!=0 ){ - int n = strlen(z); - memcpy(zResult, z, n); - zResult += n; - } - zResult[0] = 0; + char *z; + + va_start(ap, zFormat); + z = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); + sqlite3DbFree(db, *pz); + *pz = z; } /* ** This function must be called before exiting any API function (i.e. -** returning control to the user) that has called sqlite3Malloc or -** sqlite3Realloc. +** returning control to the user) that has called sqlite3_malloc or +** sqlite3_realloc. ** ** The returned value is normally a copy of the second argument to this -** function. However, if a malloc() failure has occured since the previous +** function. However, if a malloc() failure has occurred since the previous ** invocation SQLITE_NOMEM is returned instead. ** -** If the first argument, db, is not NULL and a malloc() error has occured, +** If the first argument, db, is not NULL and a malloc() error has occurred, ** then the connection error-code (the value returned by sqlite3_errcode()) ** is set to SQLITE_NOMEM. */ -int sqlite3MallocHasFailed = 0; int sqlite3ApiExit(sqlite3* db, int rc){ - if( sqlite3MallocFailed() ){ - sqlite3MallocHasFailed = 0; - sqlite3OsLeaveMutex(); + /* If the db handle is not NULL, then we must hold the connection handle + ** mutex here. Otherwise the read (and possible write) of db->mallocFailed + ** is unsafe, as is the call to sqlite3Error(). + */ + assert( !db || sqlite3_mutex_held(db->mutex) ); + if( db && (db->mallocFailed || rc==SQLITE_IOERR_NOMEM) ){ sqlite3Error(db, SQLITE_NOMEM, 0); + db->mallocFailed = 0; rc = SQLITE_NOMEM; } return rc & (db ? db->errMask : 0xff); } - -/* -** Set the "malloc has failed" condition to true for this thread. -*/ -void sqlite3FailedMalloc(){ - if( !sqlite3MallocFailed() ){ - sqlite3OsEnterMutex(); - assert( sqlite3MallocHasFailed==0 ); - sqlite3MallocHasFailed = 1; - } -} - -#ifdef SQLITE_MEMDEBUG -/* -** This function sets a flag in the thread-specific-data structure that will -** cause an assert to fail if sqliteMalloc() or sqliteRealloc() is called. -*/ -void sqlite3MallocDisallow(){ - assert( sqlite3_mallocDisallowed>=0 ); - sqlite3_mallocDisallowed++; -} - -/* -** This function clears the flag set in the thread-specific-data structure set -** by sqlite3MallocDisallow(). -*/ -void sqlite3MallocAllow(){ - assert( sqlite3_mallocDisallowed>0 ); - sqlite3_mallocDisallowed--; -} -#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mem0.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mem0.c --- sqlite3-3.4.2/src/mem0.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mem0.c 2008-10-28 18:58:20.000000000 +0000 @@ -0,0 +1,61 @@ +/* +** 2008 October 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains a no-op memory allocation drivers for use when +** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented +** here always fail. SQLite will not operate with these drivers. These +** are merely placeholders. Real drivers must be substituted using +** sqlite3_config() before SQLite will operate. +** +** $Id: mem0.c,v 1.1 2008/10/28 18:58:20 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** This version of the memory allocator is the default. It is +** used when no other memory allocator is specified using compile-time +** macros. +*/ +#ifdef SQLITE_ZERO_MALLOC + +/* +** No-op versions of all memory allocation routines +*/ +static void *sqlite3MemMalloc(int nByte){ return 0; } +static void sqlite3MemFree(void *pPrior){ return; } +static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; } +static int sqlite3MemSize(void *pPrior){ return 0; } +static int sqlite3MemRoundup(int n){ return n; } +static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; } +static void sqlite3MemShutdown(void *NotUsed){ return; } + +/* +** This routine is the only routine in this file with external linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +#endif /* SQLITE_ZERO_MALLOC */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mem1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mem1.c --- sqlite3-3.4.2/src/mem1.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mem1.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,145 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains low-level memory allocation drivers for when +** SQLite will use the standard C-library malloc/realloc/free interface +** to obtain the memory it needs. +** +** This file contains implementations of the low-level memory allocation +** routines specified in the sqlite3_mem_methods object. +** +** $Id: mem1.c,v 1.30 2009/03/23 04:33:33 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +/* +** This version of the memory allocator is the default. It is +** used when no other memory allocator is specified using compile-time +** macros. +*/ +#ifdef SQLITE_SYSTEM_MALLOC + +/* +** Like malloc(), but remember the size of the allocation +** so that we can find it later using sqlite3MemSize(). +** +** For this low-level routine, we are guaranteed that nByte>0 because +** cases of nByte<=0 will be intercepted and dealt with by higher level +** routines. +*/ +static void *sqlite3MemMalloc(int nByte){ + sqlite3_int64 *p; + assert( nByte>0 ); + nByte = ROUND8(nByte); + p = malloc( nByte+8 ); + if( p ){ + p[0] = nByte; + p++; + } + return (void *)p; +} + +/* +** Like free() but works for allocations obtained from sqlite3MemMalloc() +** or sqlite3MemRealloc(). +** +** For this low-level routine, we already know that pPrior!=0 since +** cases where pPrior==0 will have been intecepted and dealt with +** by higher-level routines. +*/ +static void sqlite3MemFree(void *pPrior){ + sqlite3_int64 *p = (sqlite3_int64*)pPrior; + assert( pPrior!=0 ); + p--; + free(p); +} + +/* +** Like realloc(). Resize an allocation previously obtained from +** sqlite3MemMalloc(). +** +** For this low-level interface, we know that pPrior!=0. Cases where +** pPrior==0 while have been intercepted by higher-level routine and +** redirected to xMalloc. Similarly, we know that nByte>0 becauses +** cases where nByte<=0 will have been intercepted by higher-level +** routines and redirected to xFree. +*/ +static void *sqlite3MemRealloc(void *pPrior, int nByte){ + sqlite3_int64 *p = (sqlite3_int64*)pPrior; + assert( pPrior!=0 && nByte>0 ); + nByte = ROUND8(nByte); + p = (sqlite3_int64*)pPrior; + p--; + p = realloc(p, nByte+8 ); + if( p ){ + p[0] = nByte; + p++; + } + return (void*)p; +} + +/* +** Report the allocated size of a prior return from xMalloc() +** or xRealloc(). +*/ +static int sqlite3MemSize(void *pPrior){ + sqlite3_int64 *p; + if( pPrior==0 ) return 0; + p = (sqlite3_int64*)pPrior; + p--; + return (int)p[0]; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int sqlite3MemRoundup(int n){ + return ROUND8(n); +} + +/* +** Initialize this module. +*/ +static int sqlite3MemInit(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return SQLITE_OK; +} + +/* +** Deinitialize this module. +*/ +static void sqlite3MemShutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return; +} + +/* +** This routine is the only routine in this file with external linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +#endif /* SQLITE_SYSTEM_MALLOC */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mem2.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mem2.c --- sqlite3-3.4.2/src/mem2.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mem2.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,444 @@ +/* +** 2007 August 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains low-level memory allocation drivers for when +** SQLite will use the standard C-library malloc/realloc/free interface +** to obtain the memory it needs while adding lots of additional debugging +** information to each allocation in order to help detect and fix memory +** leaks and memory usage errors. +** +** This file contains implementations of the low-level memory allocation +** routines specified in the sqlite3_mem_methods object. +** +** $Id: mem2.c,v 1.45 2009/03/23 04:33:33 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +/* +** This version of the memory allocator is used only if the +** SQLITE_MEMDEBUG macro is defined +*/ +#ifdef SQLITE_MEMDEBUG + +/* +** The backtrace functionality is only available with GLIBC +*/ +#ifdef __GLIBC__ + extern int backtrace(void**,int); + extern void backtrace_symbols_fd(void*const*,int,int); +#else +# define backtrace(A,B) 1 +# define backtrace_symbols_fd(A,B,C) +#endif +#include + +/* +** Each memory allocation looks like this: +** +** ------------------------------------------------------------------------ +** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard | +** ------------------------------------------------------------------------ +** +** The application code sees only a pointer to the allocation. We have +** to back up from the allocation pointer to find the MemBlockHdr. The +** MemBlockHdr tells us the size of the allocation and the number of +** backtrace pointers. There is also a guard word at the end of the +** MemBlockHdr. +*/ +struct MemBlockHdr { + i64 iSize; /* Size of this allocation */ + struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ + char nBacktrace; /* Number of backtraces on this alloc */ + char nBacktraceSlots; /* Available backtrace slots */ + short nTitle; /* Bytes of title; includes '\0' */ + int iForeGuard; /* Guard word for sanity */ +}; + +/* +** Guard words +*/ +#define FOREGUARD 0x80F5E153 +#define REARGUARD 0xE4676B53 + +/* +** Number of malloc size increments to track. +*/ +#define NCSIZE 1000 + +/* +** All of the static variables used by this module are collected +** into a single structure named "mem". This is to keep the +** static variables organized and to reduce namespace pollution +** when this module is combined with other in the amalgamation. +*/ +static struct { + + /* + ** Mutex to control access to the memory allocation subsystem. + */ + sqlite3_mutex *mutex; + + /* + ** Head and tail of a linked list of all outstanding allocations + */ + struct MemBlockHdr *pFirst; + struct MemBlockHdr *pLast; + + /* + ** The number of levels of backtrace to save in new allocations. + */ + int nBacktrace; + void (*xBacktrace)(int, int, void **); + + /* + ** Title text to insert in front of each block + */ + int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */ + char zTitle[100]; /* The title text */ + + /* + ** sqlite3MallocDisallow() increments the following counter. + ** sqlite3MallocAllow() decrements it. + */ + int disallow; /* Do not allow memory allocation */ + + /* + ** Gather statistics on the sizes of memory allocations. + ** nAlloc[i] is the number of allocation attempts of i*8 + ** bytes. i==NCSIZE is the number of allocation attempts for + ** sizes more than NCSIZE*8 bytes. + */ + int nAlloc[NCSIZE]; /* Total number of allocations */ + int nCurrent[NCSIZE]; /* Current number of allocations */ + int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */ + +} mem; + + +/* +** Adjust memory usage statistics +*/ +static void adjustStats(int iSize, int increment){ + int i = ROUND8(iSize)/8; + if( i>NCSIZE-1 ){ + i = NCSIZE - 1; + } + if( increment>0 ){ + mem.nAlloc[i]++; + mem.nCurrent[i]++; + if( mem.nCurrent[i]>mem.mxCurrent[i] ){ + mem.mxCurrent[i] = mem.nCurrent[i]; + } + }else{ + mem.nCurrent[i]--; + assert( mem.nCurrent[i]>=0 ); + } +} + +/* +** Given an allocation, find the MemBlockHdr for that allocation. +** +** This routine checks the guards at either end of the allocation and +** if they are incorrect it asserts. +*/ +static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ + struct MemBlockHdr *p; + int *pInt; + u8 *pU8; + int nReserve; + + p = (struct MemBlockHdr*)pAllocation; + p--; + assert( p->iForeGuard==(int)FOREGUARD ); + nReserve = ROUND8(p->iSize); + pInt = (int*)pAllocation; + pU8 = (u8*)pAllocation; + assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD ); + /* This checks any of the "extra" bytes allocated due + ** to rounding up to an 8 byte boundary to ensure + ** they haven't been overwritten. + */ + while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 ); + return p; +} + +/* +** Return the number of bytes currently allocated at address p. +*/ +static int sqlite3MemSize(void *p){ + struct MemBlockHdr *pHdr; + if( !p ){ + return 0; + } + pHdr = sqlite3MemsysGetHeader(p); + return pHdr->iSize; +} + +/* +** Initialize the memory allocation subsystem. +*/ +static int sqlite3MemInit(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( (sizeof(struct MemBlockHdr)&7) == 0 ); + if( !sqlite3GlobalConfig.bMemstat ){ + /* If memory status is enabled, then the malloc.c wrapper will already + ** hold the STATIC_MEM mutex when the routines here are invoked. */ + mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + return SQLITE_OK; +} + +/* +** Deinitialize the memory allocation subsystem. +*/ +static void sqlite3MemShutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + mem.mutex = 0; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int sqlite3MemRoundup(int n){ + return ROUND8(n); +} + +/* +** Allocate nByte bytes of memory. +*/ +static void *sqlite3MemMalloc(int nByte){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + int *pInt; + void *p = 0; + int totalSize; + int nReserve; + sqlite3_mutex_enter(mem.mutex); + assert( mem.disallow==0 ); + nReserve = ROUND8(nByte); + totalSize = nReserve + sizeof(*pHdr) + sizeof(int) + + mem.nBacktrace*sizeof(void*) + mem.nTitle; + p = malloc(totalSize); + if( p ){ + z = p; + pBt = (void**)&z[mem.nTitle]; + pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace]; + pHdr->pNext = 0; + pHdr->pPrev = mem.pLast; + if( mem.pLast ){ + mem.pLast->pNext = pHdr; + }else{ + mem.pFirst = pHdr; + } + mem.pLast = pHdr; + pHdr->iForeGuard = FOREGUARD; + pHdr->nBacktraceSlots = mem.nBacktrace; + pHdr->nTitle = mem.nTitle; + if( mem.nBacktrace ){ + void *aAddr[40]; + pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1; + memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*)); + assert(pBt[0]); + if( mem.xBacktrace ){ + mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]); + } + }else{ + pHdr->nBacktrace = 0; + } + if( mem.nTitle ){ + memcpy(z, mem.zTitle, mem.nTitle); + } + pHdr->iSize = nByte; + adjustStats(nByte, +1); + pInt = (int*)&pHdr[1]; + pInt[nReserve/sizeof(int)] = REARGUARD; + memset(pInt, 0x65, nReserve); + p = (void*)pInt; + } + sqlite3_mutex_leave(mem.mutex); + return p; +} + +/* +** Free memory. +*/ +static void sqlite3MemFree(void *pPrior){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + assert( sqlite3GlobalConfig.bMemstat || mem.mutex!=0 ); + pHdr = sqlite3MemsysGetHeader(pPrior); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + sqlite3_mutex_enter(mem.mutex); + if( pHdr->pPrev ){ + assert( pHdr->pPrev->pNext==pHdr ); + pHdr->pPrev->pNext = pHdr->pNext; + }else{ + assert( mem.pFirst==pHdr ); + mem.pFirst = pHdr->pNext; + } + if( pHdr->pNext ){ + assert( pHdr->pNext->pPrev==pHdr ); + pHdr->pNext->pPrev = pHdr->pPrev; + }else{ + assert( mem.pLast==pHdr ); + mem.pLast = pHdr->pPrev; + } + z = (char*)pBt; + z -= pHdr->nTitle; + adjustStats(pHdr->iSize, -1); + memset(z, 0x2b, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) + + pHdr->iSize + sizeof(int) + pHdr->nTitle); + free(z); + sqlite3_mutex_leave(mem.mutex); +} + +/* +** Change the size of an existing memory allocation. +** +** For this debugging implementation, we *always* make a copy of the +** allocation into a new place in memory. In this way, if the +** higher level code is using pointer to the old allocation, it is +** much more likely to break and we are much more liking to find +** the error. +*/ +static void *sqlite3MemRealloc(void *pPrior, int nByte){ + struct MemBlockHdr *pOldHdr; + void *pNew; + assert( mem.disallow==0 ); + pOldHdr = sqlite3MemsysGetHeader(pPrior); + pNew = sqlite3MemMalloc(nByte); + if( pNew ){ + memcpy(pNew, pPrior, nByteiSize ? nByte : pOldHdr->iSize); + if( nByte>pOldHdr->iSize ){ + memset(&((char*)pNew)[pOldHdr->iSize], 0x2b, nByte - pOldHdr->iSize); + } + sqlite3MemFree(pPrior); + } + return pNew; +} + +/* +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +/* +** Set the number of backtrace levels kept for each allocation. +** A value of zero turns off backtracing. The number is always rounded +** up to a multiple of 2. +*/ +void sqlite3MemdebugBacktrace(int depth){ + if( depth<0 ){ depth = 0; } + if( depth>20 ){ depth = 20; } + depth = (depth+1)&0xfe; + mem.nBacktrace = depth; +} + +void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){ + mem.xBacktrace = xBacktrace; +} + +/* +** Set the title string for subsequent allocations. +*/ +void sqlite3MemdebugSettitle(const char *zTitle){ + unsigned int n = sqlite3Strlen30(zTitle) + 1; + sqlite3_mutex_enter(mem.mutex); + if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1; + memcpy(mem.zTitle, zTitle, n); + mem.zTitle[n] = 0; + mem.nTitle = ROUND8(n); + sqlite3_mutex_leave(mem.mutex); +} + +void sqlite3MemdebugSync(){ + struct MemBlockHdr *pHdr; + for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ + void **pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + mem.xBacktrace(pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]); + } +} + +/* +** Open the file indicated and write a log of all unfreed memory +** allocations into that log. +*/ +void sqlite3MemdebugDump(const char *zFilename){ + FILE *out; + struct MemBlockHdr *pHdr; + void **pBt; + int i; + out = fopen(zFilename, "w"); + if( out==0 ){ + fprintf(stderr, "** Unable to output memory debug output log: %s **\n", + zFilename); + return; + } + for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ + char *z = (char*)pHdr; + z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle; + fprintf(out, "**** %lld bytes at %p from %s ****\n", + pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???"); + if( pHdr->nBacktrace ){ + fflush(out); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out)); + fprintf(out, "\n"); + } + } + fprintf(out, "COUNTS:\n"); + for(i=0; i=1 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); + assert( size>=2 ); + if( size <= MX_SMALL ){ + memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]); + }else{ + hash = size % N_HASH; + memsys3UnlinkFromList(i, &mem3.aiHash[hash]); + } +} + +/* +** Link the chunk at mem3.aPool[i] so that is on the list rooted +** at *pRoot. +*/ +static void memsys3LinkIntoList(u32 i, u32 *pRoot){ + assert( sqlite3_mutex_held(mem3.mutex) ); + mem3.aPool[i].u.list.next = *pRoot; + mem3.aPool[i].u.list.prev = 0; + if( *pRoot ){ + mem3.aPool[*pRoot].u.list.prev = i; + } + *pRoot = i; +} + +/* +** Link the chunk at index i into either the appropriate +** small chunk list, or into the large chunk hash table. +*/ +static void memsys3Link(u32 i){ + u32 size, hash; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( i>=1 ); + assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); + assert( size>=2 ); + if( size <= MX_SMALL ){ + memsys3LinkIntoList(i, &mem3.aiSmall[size-2]); + }else{ + hash = size % N_HASH; + memsys3LinkIntoList(i, &mem3.aiHash[hash]); + } +} + +/* +** If the STATIC_MEM mutex is not already held, obtain it now. The mutex +** will already be held (obtained by code in malloc.c) if +** sqlite3GlobalConfig.bMemStat is true. +*/ +static void memsys3Enter(void){ + if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){ + mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + sqlite3_mutex_enter(mem3.mutex); +} +static void memsys3Leave(void){ + sqlite3_mutex_leave(mem3.mutex); +} + +/* +** Called when we are unable to satisfy an allocation of nBytes. +*/ +static void memsys3OutOfMemory(int nByte){ + if( !mem3.alarmBusy ){ + mem3.alarmBusy = 1; + assert( sqlite3_mutex_held(mem3.mutex) ); + sqlite3_mutex_leave(mem3.mutex); + sqlite3_release_memory(nByte); + sqlite3_mutex_enter(mem3.mutex); + mem3.alarmBusy = 0; + } +} + + +/* +** Chunk i is a free chunk that has been unlinked. Adjust its +** size parameters for check-out and return a pointer to the +** user portion of the chunk. +*/ +static void *memsys3Checkout(u32 i, u32 nBlock){ + u32 x; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( i>=1 ); + assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ); + assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock ); + x = mem3.aPool[i-1].u.hdr.size4x; + mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2); + mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock; + mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2; + return &mem3.aPool[i]; +} + +/* +** Carve a piece off of the end of the mem3.iMaster free chunk. +** Return a pointer to the new allocation. Or, if the master chunk +** is not large enough, return 0. +*/ +static void *memsys3FromMaster(u32 nBlock){ + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( mem3.szMaster>=nBlock ); + if( nBlock>=mem3.szMaster-1 ){ + /* Use the entire master */ + void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster); + mem3.iMaster = 0; + mem3.szMaster = 0; + mem3.mnMaster = 0; + return p; + }else{ + /* Split the master block. Return the tail. */ + u32 newi, x; + newi = mem3.iMaster + mem3.szMaster - nBlock; + assert( newi > mem3.iMaster+1 ); + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2; + mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1; + mem3.szMaster -= nBlock; + mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster; + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + if( mem3.szMaster < mem3.mnMaster ){ + mem3.mnMaster = mem3.szMaster; + } + return (void*)&mem3.aPool[newi]; + } +} + +/* +** *pRoot is the head of a list of free chunks of the same size +** or same size hash. In other words, *pRoot is an entry in either +** mem3.aiSmall[] or mem3.aiHash[]. +** +** This routine examines all entries on the given list and tries +** to coalesce each entries with adjacent free chunks. +** +** If it sees a chunk that is larger than mem3.iMaster, it replaces +** the current mem3.iMaster with the new larger chunk. In order for +** this mem3.iMaster replacement to work, the master chunk must be +** linked into the hash tables. That is not the normal state of +** affairs, of course. The calling routine must link the master +** chunk before invoking this routine, then must unlink the (possibly +** changed) master chunk once this routine has finished. +*/ +static void memsys3Merge(u32 *pRoot){ + u32 iNext, prev, size, i, x; + + assert( sqlite3_mutex_held(mem3.mutex) ); + for(i=*pRoot; i>0; i=iNext){ + iNext = mem3.aPool[i].u.list.next; + size = mem3.aPool[i-1].u.hdr.size4x; + assert( (size&1)==0 ); + if( (size&2)==0 ){ + memsys3UnlinkFromList(i, pRoot); + assert( i > mem3.aPool[i-1].u.hdr.prevSize ); + prev = i - mem3.aPool[i-1].u.hdr.prevSize; + if( prev==iNext ){ + iNext = mem3.aPool[prev].u.list.next; + } + memsys3Unlink(prev); + size = i + size/4 - prev; + x = mem3.aPool[prev-1].u.hdr.size4x & 2; + mem3.aPool[prev-1].u.hdr.size4x = size*4 | x; + mem3.aPool[prev+size-1].u.hdr.prevSize = size; + memsys3Link(prev); + i = prev; + }else{ + size /= 4; + } + if( size>mem3.szMaster ){ + mem3.iMaster = i; + mem3.szMaster = size; + } + } +} + +/* +** Return a block of memory of at least nBytes in size. +** Return NULL if unable. +** +** This function assumes that the necessary mutexes, if any, are +** already held by the caller. Hence "Unsafe". +*/ +static void *memsys3MallocUnsafe(int nByte){ + u32 i; + u32 nBlock; + u32 toFree; + + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( sizeof(Mem3Block)==8 ); + if( nByte<=12 ){ + nBlock = 2; + }else{ + nBlock = (nByte + 11)/8; + } + assert( nBlock>=2 ); + + /* STEP 1: + ** Look for an entry of the correct size in either the small + ** chunk table or in the large chunk hash table. This is + ** successful most of the time (about 9 times out of 10). + */ + if( nBlock <= MX_SMALL ){ + i = mem3.aiSmall[nBlock-2]; + if( i>0 ){ + memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]); + return memsys3Checkout(i, nBlock); + } + }else{ + int hash = nBlock % N_HASH; + for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){ + if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){ + memsys3UnlinkFromList(i, &mem3.aiHash[hash]); + return memsys3Checkout(i, nBlock); + } + } + } + + /* STEP 2: + ** Try to satisfy the allocation by carving a piece off of the end + ** of the master chunk. This step usually works if step 1 fails. + */ + if( mem3.szMaster>=nBlock ){ + return memsys3FromMaster(nBlock); + } + + + /* STEP 3: + ** Loop through the entire memory pool. Coalesce adjacent free + ** chunks. Recompute the master chunk as the largest free chunk. + ** Then try again to satisfy the allocation by carving a piece off + ** of the end of the master chunk. This step happens very + ** rarely (we hope!) + */ + for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){ + memsys3OutOfMemory(toFree); + if( mem3.iMaster ){ + memsys3Link(mem3.iMaster); + mem3.iMaster = 0; + mem3.szMaster = 0; + } + for(i=0; i=nBlock ){ + return memsys3FromMaster(nBlock); + } + } + } + + /* If none of the above worked, then we fail. */ + return 0; +} + +/* +** Free an outstanding memory allocation. +** +** This function assumes that the necessary mutexes, if any, are +** already held by the caller. Hence "Unsafe". +*/ +void memsys3FreeUnsafe(void *pOld){ + Mem3Block *p = (Mem3Block*)pOld; + int i; + u32 size, x; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] ); + i = p - mem3.aPool; + assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( i+size<=mem3.nPool+1 ); + mem3.aPool[i-1].u.hdr.size4x &= ~1; + mem3.aPool[i+size-1].u.hdr.prevSize = size; + mem3.aPool[i+size-1].u.hdr.size4x &= ~2; + memsys3Link(i); + + /* Try to expand the master using the newly freed chunk */ + if( mem3.iMaster ){ + while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){ + size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize; + mem3.iMaster -= size; + mem3.szMaster += size; + memsys3Unlink(mem3.iMaster); + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; + } + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){ + memsys3Unlink(mem3.iMaster+mem3.szMaster); + mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; + } + } +} + +/* +** Return the size of an outstanding allocation, in bytes. The +** size returned omits the 8-byte header overhead. This only +** works for chunks that are currently checked out. +*/ +static int memsys3Size(void *p){ + Mem3Block *pBlock; + if( p==0 ) return 0; + pBlock = (Mem3Block*)p; + assert( (pBlock[-1].u.hdr.size4x&1)!=0 ); + return (pBlock[-1].u.hdr.size4x&~3)*2 - 4; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int memsys3Roundup(int n){ + if( n<=12 ){ + return 12; + }else{ + return ((n+11)&~7) - 4; + } +} + +/* +** Allocate nBytes of memory. +*/ +static void *memsys3Malloc(int nBytes){ + sqlite3_int64 *p; + assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */ + memsys3Enter(); + p = memsys3MallocUnsafe(nBytes); + memsys3Leave(); + return (void*)p; +} + +/* +** Free memory. +*/ +void memsys3Free(void *pPrior){ + assert( pPrior ); + memsys3Enter(); + memsys3FreeUnsafe(pPrior); + memsys3Leave(); +} + +/* +** Change the size of an existing memory allocation +*/ +void *memsys3Realloc(void *pPrior, int nBytes){ + int nOld; + void *p; + if( pPrior==0 ){ + return sqlite3_malloc(nBytes); + } + if( nBytes<=0 ){ + sqlite3_free(pPrior); + return 0; + } + nOld = memsys3Size(pPrior); + if( nBytes<=nOld && nBytes>=nOld-128 ){ + return pPrior; + } + memsys3Enter(); + p = memsys3MallocUnsafe(nBytes); + if( p ){ + if( nOld>1)!=(size&1) ){ + fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]); + assert( 0 ); + break; + } + if( size&1 ){ + fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8); + }else{ + fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8, + i==mem3.iMaster ? " **master**" : ""); + } + } + for(i=0; i0; j=mem3.aPool[j].u.list.next){ + fprintf(out, " %p(%d)", &mem3.aPool[j], + (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); + } + fprintf(out, "\n"); + } + for(i=0; i0; j=mem3.aPool[j].u.list.next){ + fprintf(out, " %p(%d)", &mem3.aPool[j], + (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); + } + fprintf(out, "\n"); + } + fprintf(out, "master=%d\n", mem3.iMaster); + fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8); + fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8); + sqlite3_mutex_leave(mem3.mutex); + if( out==stdout ){ + fflush(stdout); + }else{ + fclose(out); + } +#else + UNUSED_PARAMETER(zFilename); +#endif +} + +/* +** This routine is the only routine in this file with external +** linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. The +** arguments specify the block of memory to manage. +** +** This routine is only called by sqlite3_config(), and therefore +** is not required to be threadsafe (it is not). +*/ +const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){ + static const sqlite3_mem_methods mempoolMethods = { + memsys3Malloc, + memsys3Free, + memsys3Realloc, + memsys3Size, + memsys3Roundup, + memsys3Init, + memsys3Shutdown, + 0 + }; + return &mempoolMethods; +} + +#endif /* SQLITE_ENABLE_MEMSYS3 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mem5.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mem5.c --- sqlite3-3.4.2/src/mem5.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mem5.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,488 @@ +/* +** 2007 October 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement a memory +** allocation subsystem for use by SQLite. +** +** This version of the memory allocation subsystem omits all +** use of malloc(). The SQLite user supplies a block of memory +** before calling sqlite3_initialize() from which allocations +** are made and returned by the xMalloc() and xRealloc() +** implementations. Once sqlite3_initialize() has been called, +** the amount of memory available to SQLite is fixed and cannot +** be changed. +** +** This version of the memory allocation subsystem is included +** in the build only if SQLITE_ENABLE_MEMSYS5 is defined. +** +** $Id: mem5.c,v 1.19 2008/11/19 16:52:44 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +/* +** This version of the memory allocator is used only when +** SQLITE_ENABLE_MEMSYS5 is defined. +*/ +#ifdef SQLITE_ENABLE_MEMSYS5 + +/* +** A minimum allocation is an instance of the following structure. +** Larger allocations are an array of these structures where the +** size of the array is a power of 2. +*/ +typedef struct Mem5Link Mem5Link; +struct Mem5Link { + int next; /* Index of next free chunk */ + int prev; /* Index of previous free chunk */ +}; + +/* +** Maximum size of any allocation is ((1<=0 && i=0 && iLogsize<=LOGMAX ); + assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); + + next = MEM5LINK(i)->next; + prev = MEM5LINK(i)->prev; + if( prev<0 ){ + mem5.aiFreelist[iLogsize] = next; + }else{ + MEM5LINK(prev)->next = next; + } + if( next>=0 ){ + MEM5LINK(next)->prev = prev; + } +} + +/* +** Link the chunk at mem5.aPool[i] so that is on the iLogsize +** free list. +*/ +static void memsys5Link(int i, int iLogsize){ + int x; + assert( sqlite3_mutex_held(mem5.mutex) ); + assert( i>=0 && i=0 && iLogsize<=LOGMAX ); + assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); + + x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize]; + MEM5LINK(i)->prev = -1; + if( x>=0 ){ + assert( xprev = i; + } + mem5.aiFreelist[iLogsize] = i; +} + +/* +** If the STATIC_MEM mutex is not already held, obtain it now. The mutex +** will already be held (obtained by code in malloc.c) if +** sqlite3GlobalConfig.bMemStat is true. +*/ +static void memsys5Enter(void){ + if( sqlite3GlobalConfig.bMemstat==0 && mem5.mutex==0 ){ + mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + sqlite3_mutex_enter(mem5.mutex); +} +static void memsys5Leave(void){ + sqlite3_mutex_leave(mem5.mutex); +} + +/* +** Return the size of an outstanding allocation, in bytes. The +** size returned omits the 8-byte header overhead. This only +** works for chunks that are currently checked out. +*/ +static int memsys5Size(void *p){ + int iSize = 0; + if( p ){ + int i = ((u8 *)p-mem5.zPool)/mem5.nAtom; + assert( i>=0 && i=0 && iLogsize<=LOGMAX ); + i = iFirst = mem5.aiFreelist[iLogsize]; + assert( iFirst>=0 ); + while( i>0 ){ + if( inext; + } + memsys5Unlink(iFirst, iLogsize); + return iFirst; +} + +/* +** Return a block of memory of at least nBytes in size. +** Return NULL if unable. +*/ +static void *memsys5MallocUnsafe(int nByte){ + int i; /* Index of a mem5.aPool[] slot */ + int iBin; /* Index into mem5.aiFreelist[] */ + int iFullSz; /* Size of allocation rounded up to power of 2 */ + int iLogsize; /* Log2 of iFullSz/POW2_MIN */ + + /* Keep track of the maximum allocation request. Even unfulfilled + ** requests are counted */ + if( (u32)nByte>mem5.maxRequest ){ + mem5.maxRequest = nByte; + } + + /* Round nByte up to the next valid power of two */ + for(iFullSz=mem5.nAtom, iLogsize=0; iFullSzLOGMAX ) return 0; + i = memsys5UnlinkFirst(iBin); + while( iBin>iLogsize ){ + int newSize; + + iBin--; + newSize = 1 << iBin; + mem5.aCtrl[i+newSize] = CTRL_FREE | iBin; + memsys5Link(i+newSize, iBin); + } + mem5.aCtrl[i] = iLogsize; + + /* Update allocator performance statistics. */ + mem5.nAlloc++; + mem5.totalAlloc += iFullSz; + mem5.totalExcess += iFullSz - nByte; + mem5.currentCount++; + mem5.currentOut += iFullSz; + if( mem5.maxCount=0 && iBlock0 ); + assert( mem5.currentOut>=(size*mem5.nAtom) ); + mem5.currentCount--; + mem5.currentOut -= size*mem5.nAtom; + assert( mem5.currentOut>0 || mem5.currentCount==0 ); + assert( mem5.currentCount>0 || mem5.currentOut==0 ); + + mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize; + while( iLogsize>iLogsize) & 1 ){ + iBuddy = iBlock - size; + }else{ + iBuddy = iBlock + size; + } + assert( iBuddy>=0 ); + if( (iBuddy+(1<mem5.nBlock ) break; + if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break; + memsys5Unlink(iBuddy, iLogsize); + iLogsize++; + if( iBuddy0 ){ + memsys5Enter(); + p = memsys5MallocUnsafe(nBytes); + memsys5Leave(); + } + return (void*)p; +} + +/* +** Free memory. +*/ +static void memsys5Free(void *pPrior){ + if( pPrior==0 ){ +assert(0); + return; + } + memsys5Enter(); + memsys5FreeUnsafe(pPrior); + memsys5Leave(); +} + +/* +** Change the size of an existing memory allocation +*/ +static void *memsys5Realloc(void *pPrior, int nBytes){ + int nOld; + void *p; + if( pPrior==0 ){ + return memsys5Malloc(nBytes); + } + if( nBytes<=0 ){ + memsys5Free(pPrior); + return 0; + } + nOld = memsys5Size(pPrior); + if( nBytes<=nOld ){ + return pPrior; + } + memsys5Enter(); + p = memsys5MallocUnsafe(nBytes); + if( p ){ + memcpy(p, pPrior, nOld); + memsys5FreeUnsafe(pPrior); + } + memsys5Leave(); + return p; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int memsys5Roundup(int n){ + int iFullSz; + for(iFullSz=mem5.nAtom; iFullSzmem5.nAtom ){ + mem5.nAtom = mem5.nAtom << 1; + } + + mem5.nBlock = (nByte / (mem5.nAtom+sizeof(u8))); + mem5.zPool = zByte; + mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.nAtom]; + + for(ii=0; ii<=LOGMAX; ii++){ + mem5.aiFreelist[ii] = -1; + } + + iOffset = 0; + for(ii=LOGMAX; ii>=0; ii--){ + int nAlloc = (1<mem5.nBlock); + } + + return SQLITE_OK; +} + +/* +** Deinitialize this module. +*/ +static void memsys5Shutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return; +} + +/* +** Open the file indicated and write a log of all unfreed memory +** allocations into that log. +*/ +void sqlite3Memsys5Dump(const char *zFilename){ +#ifdef SQLITE_DEBUG + FILE *out; + int i, j, n; + int nMinLog; + + if( zFilename==0 || zFilename[0]==0 ){ + out = stdout; + }else{ + out = fopen(zFilename, "w"); + if( out==0 ){ + fprintf(stderr, "** Unable to output memory debug output log: %s **\n", + zFilename); + return; + } + } + memsys5Enter(); + nMinLog = memsys5Log(mem5.nAtom); + for(i=0; i<=LOGMAX && i+nMinLog<32; i++){ + for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){} + fprintf(out, "freelist items of size %d: %d\n", mem5.nAtom << i, n); + } + fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc); + fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc); + fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess); + fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut); + fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount); + fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut); + fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount); + fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest); + memsys5Leave(); + if( out==stdout ){ + fflush(stdout); + }else{ + fclose(out); + } +#else + UNUSED_PARAMETER(zFilename); +#endif +} + +/* +** This routine is the only routine in this file with external +** linkage. It returns a pointer to a static sqlite3_mem_methods +** struct populated with the memsys5 methods. +*/ +const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){ + static const sqlite3_mem_methods memsys5Methods = { + memsys5Malloc, + memsys5Free, + memsys5Realloc, + memsys5Size, + memsys5Roundup, + memsys5Init, + memsys5Shutdown, + 0 + }; + return &memsys5Methods; +} + +#endif /* SQLITE_ENABLE_MEMSYS5 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/memjournal.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/memjournal.c --- sqlite3-3.4.2/src/memjournal.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/memjournal.c 2009-06-25 12:24:38.000000000 +0100 @@ -0,0 +1,259 @@ +/* +** 2008 October 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code use to implement an in-memory rollback journal. +** The in-memory rollback journal is used to journal transactions for +** ":memory:" databases and when the journal_mode=MEMORY pragma is used. +** +** @(#) $Id: memjournal.c,v 1.12 2009/05/04 11:42:30 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +/* Forward references to internal structures */ +typedef struct MemJournal MemJournal; +typedef struct FilePoint FilePoint; +typedef struct FileChunk FileChunk; + +/* Space to hold the rollback journal is allocated in increments of +** this many bytes. +** +** The size chosen is a little less than a power of two. That way, +** the FileChunk object will have a size that almost exactly fills +** a power-of-two allocation. This mimimizes wasted space in power-of-two +** memory allocators. +*/ +#define JOURNAL_CHUNKSIZE ((int)(1024-sizeof(FileChunk*))) + +/* Macro to find the minimum of two numeric values. +*/ +#ifndef MIN +# define MIN(x,y) ((x)<(y)?(x):(y)) +#endif + +/* +** The rollback journal is composed of a linked list of these structures. +*/ +struct FileChunk { + FileChunk *pNext; /* Next chunk in the journal */ + u8 zChunk[JOURNAL_CHUNKSIZE]; /* Content of this chunk */ +}; + +/* +** An instance of this object serves as a cursor into the rollback journal. +** The cursor can be either for reading or writing. +*/ +struct FilePoint { + sqlite3_int64 iOffset; /* Offset from the beginning of the file */ + FileChunk *pChunk; /* Specific chunk into which cursor points */ +}; + +/* +** This subclass is a subclass of sqlite3_file. Each open memory-journal +** is an instance of this class. +*/ +struct MemJournal { + sqlite3_io_methods *pMethod; /* Parent class. MUST BE FIRST */ + FileChunk *pFirst; /* Head of in-memory chunk-list */ + FilePoint endpoint; /* Pointer to the end of the file */ + FilePoint readpoint; /* Pointer to the end of the last xRead() */ +}; + +/* +** Read data from the in-memory journal file. This is the implementation +** of the sqlite3_vfs.xRead method. +*/ +static int memjrnlRead( + sqlite3_file *pJfd, /* The journal file from which to read */ + void *zBuf, /* Put the results here */ + int iAmt, /* Number of bytes to read */ + sqlite_int64 iOfst /* Begin reading at this offset */ +){ + MemJournal *p = (MemJournal *)pJfd; + u8 *zOut = zBuf; + int nRead = iAmt; + int iChunkOffset; + FileChunk *pChunk; + + /* SQLite never tries to read past the end of a rollback journal file */ + assert( iOfst+iAmt<=p->endpoint.iOffset ); + + if( p->readpoint.iOffset!=iOfst || iOfst==0 ){ + sqlite3_int64 iOff = 0; + for(pChunk=p->pFirst; + ALWAYS(pChunk) && (iOff+JOURNAL_CHUNKSIZE)<=iOfst; + pChunk=pChunk->pNext + ){ + iOff += JOURNAL_CHUNKSIZE; + } + }else{ + pChunk = p->readpoint.pChunk; + } + + iChunkOffset = (int)(iOfst%JOURNAL_CHUNKSIZE); + do { + int iSpace = JOURNAL_CHUNKSIZE - iChunkOffset; + int nCopy = MIN(nRead, (JOURNAL_CHUNKSIZE - iChunkOffset)); + memcpy(zOut, &pChunk->zChunk[iChunkOffset], nCopy); + zOut += nCopy; + nRead -= iSpace; + iChunkOffset = 0; + } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 ); + p->readpoint.iOffset = iOfst+iAmt; + p->readpoint.pChunk = pChunk; + + return SQLITE_OK; +} + +/* +** Write data to the file. +*/ +static int memjrnlWrite( + sqlite3_file *pJfd, /* The journal file into which to write */ + const void *zBuf, /* Take data to be written from here */ + int iAmt, /* Number of bytes to write */ + sqlite_int64 iOfst /* Begin writing at this offset into the file */ +){ + MemJournal *p = (MemJournal *)pJfd; + int nWrite = iAmt; + u8 *zWrite = (u8 *)zBuf; + + /* An in-memory journal file should only ever be appended to. Random + ** access writes are not required by sqlite. + */ + assert( iOfst==p->endpoint.iOffset ); + UNUSED_PARAMETER(iOfst); + + while( nWrite>0 ){ + FileChunk *pChunk = p->endpoint.pChunk; + int iChunkOffset = (int)(p->endpoint.iOffset%JOURNAL_CHUNKSIZE); + int iSpace = MIN(nWrite, JOURNAL_CHUNKSIZE - iChunkOffset); + + if( iChunkOffset==0 ){ + /* New chunk is required to extend the file. */ + FileChunk *pNew = sqlite3_malloc(sizeof(FileChunk)); + if( !pNew ){ + return SQLITE_IOERR_NOMEM; + } + pNew->pNext = 0; + if( pChunk ){ + assert( p->pFirst ); + pChunk->pNext = pNew; + }else{ + assert( !p->pFirst ); + p->pFirst = pNew; + } + p->endpoint.pChunk = pNew; + } + + memcpy(&p->endpoint.pChunk->zChunk[iChunkOffset], zWrite, iSpace); + zWrite += iSpace; + nWrite -= iSpace; + p->endpoint.iOffset += iSpace; + } + + return SQLITE_OK; +} + +/* +** Truncate the file. +*/ +static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ + MemJournal *p = (MemJournal *)pJfd; + FileChunk *pChunk; + assert(size==0); + UNUSED_PARAMETER(size); + pChunk = p->pFirst; + while( pChunk ){ + FileChunk *pTmp = pChunk; + pChunk = pChunk->pNext; + sqlite3_free(pTmp); + } + sqlite3MemJournalOpen(pJfd); + return SQLITE_OK; +} + +/* +** Close the file. +*/ +static int memjrnlClose(sqlite3_file *pJfd){ + memjrnlTruncate(pJfd, 0); + return SQLITE_OK; +} + + +/* +** Sync the file. +** +** Syncing an in-memory journal is a no-op. And, in fact, this routine +** is never called in a working implementation. This implementation +** exists purely as a contingency, in case some malfunction in some other +** part of SQLite causes Sync to be called by mistake. +*/ +static int memjrnlSync(sqlite3_file *NotUsed, int NotUsed2){ /*NO_TEST*/ + UNUSED_PARAMETER2(NotUsed, NotUsed2); /*NO_TEST*/ + assert( 0 ); /*NO_TEST*/ + return SQLITE_OK; /*NO_TEST*/ +} /*NO_TEST*/ + +/* +** Query the size of the file in bytes. +*/ +static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ + MemJournal *p = (MemJournal *)pJfd; + *pSize = (sqlite_int64) p->endpoint.iOffset; + return SQLITE_OK; +} + +/* +** Table of methods for MemJournal sqlite3_file object. +*/ +static struct sqlite3_io_methods MemJournalMethods = { + 1, /* iVersion */ + memjrnlClose, /* xClose */ + memjrnlRead, /* xRead */ + memjrnlWrite, /* xWrite */ + memjrnlTruncate, /* xTruncate */ + memjrnlSync, /* xSync */ + memjrnlFileSize, /* xFileSize */ + 0, /* xLock */ + 0, /* xUnlock */ + 0, /* xCheckReservedLock */ + 0, /* xFileControl */ + 0, /* xSectorSize */ + 0 /* xDeviceCharacteristics */ +}; + +/* +** Open a journal file. +*/ +void sqlite3MemJournalOpen(sqlite3_file *pJfd){ + MemJournal *p = (MemJournal *)pJfd; + assert( EIGHT_BYTE_ALIGNMENT(p) ); + memset(p, 0, sqlite3MemJournalSize()); + p->pMethod = &MemJournalMethods; +} + +/* +** Return true if the file-handle passed as an argument is +** an in-memory journal +*/ +int sqlite3IsMemJournal(sqlite3_file *pJfd){ + return pJfd->pMethods==&MemJournalMethods; +} + +/* +** Return the number of bytes required to store a MemJournal that uses vfs +** pVfs to create the underlying on-disk files. +*/ +int sqlite3MemJournalSize(void){ + return sizeof(MemJournal); +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex.c --- sqlite3-3.4.2/src/mutex.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,149 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes. +** +** This file contains code that is common across all mutex implementations. + +** +** $Id: mutex.c,v 1.30 2009/02/17 16:29:11 danielk1977 Exp $ +*/ +#include "sqliteInt.h" + +#ifndef SQLITE_MUTEX_OMIT +/* +** Initialize the mutex system. +*/ +int sqlite3MutexInit(void){ + int rc = SQLITE_OK; + if( sqlite3GlobalConfig.bCoreMutex ){ + if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ + /* If the xMutexAlloc method has not been set, then the user did not + ** install a mutex implementation via sqlite3_config() prior to + ** sqlite3_initialize() being called. This block copies pointers to + ** the default implementation into the sqlite3GlobalConfig structure. + ** + ** The danger is that although sqlite3_config() is not a threadsafe + ** API, sqlite3_initialize() is, and so multiple threads may be + ** attempting to run this function simultaneously. To guard write + ** access to the sqlite3GlobalConfig structure, the 'MASTER' static mutex + ** is obtained before modifying it. + */ + sqlite3_mutex_methods *p = sqlite3DefaultMutex(); + sqlite3_mutex *pMaster = 0; + + rc = p->xMutexInit(); + if( rc==SQLITE_OK ){ + pMaster = p->xMutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + assert(pMaster); + p->xMutexEnter(pMaster); + assert( sqlite3GlobalConfig.mutex.xMutexAlloc==0 + || sqlite3GlobalConfig.mutex.xMutexAlloc==p->xMutexAlloc + ); + if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ + sqlite3GlobalConfig.mutex = *p; + } + p->xMutexLeave(pMaster); + } + }else{ + rc = sqlite3GlobalConfig.mutex.xMutexInit(); + } + } + + return rc; +} + +/* +** Shutdown the mutex system. This call frees resources allocated by +** sqlite3MutexInit(). +*/ +int sqlite3MutexEnd(void){ + int rc = SQLITE_OK; + if( sqlite3GlobalConfig.mutex.xMutexEnd ){ + rc = sqlite3GlobalConfig.mutex.xMutexEnd(); + } + return rc; +} + +/* +** Retrieve a pointer to a static mutex or allocate a new dynamic one. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int id){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return sqlite3GlobalConfig.mutex.xMutexAlloc(id); +} + +sqlite3_mutex *sqlite3MutexAlloc(int id){ + if( !sqlite3GlobalConfig.bCoreMutex ){ + return 0; + } + return sqlite3GlobalConfig.mutex.xMutexAlloc(id); +} + +/* +** Free a dynamic mutex. +*/ +void sqlite3_mutex_free(sqlite3_mutex *p){ + if( p ){ + sqlite3GlobalConfig.mutex.xMutexFree(p); + } +} + +/* +** Obtain the mutex p. If some other thread already has the mutex, block +** until it can be obtained. +*/ +void sqlite3_mutex_enter(sqlite3_mutex *p){ + if( p ){ + sqlite3GlobalConfig.mutex.xMutexEnter(p); + } +} + +/* +** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another +** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY. +*/ +int sqlite3_mutex_try(sqlite3_mutex *p){ + int rc = SQLITE_OK; + if( p ){ + return sqlite3GlobalConfig.mutex.xMutexTry(p); + } + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was previously +** entered by the same thread. The behavior is undefined if the mutex +** is not currently entered. If a NULL pointer is passed as an argument +** this function is a no-op. +*/ +void sqlite3_mutex_leave(sqlite3_mutex *p){ + if( p ){ + sqlite3GlobalConfig.mutex.xMutexLeave(p); + } +} + +#ifndef NDEBUG +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +int sqlite3_mutex_held(sqlite3_mutex *p){ + return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p); +} +int sqlite3_mutex_notheld(sqlite3_mutex *p){ + return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p); +} +#endif + +#endif /* SQLITE_OMIT_MUTEX */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex.h --- sqlite3-3.4.2/src/mutex.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex.h 2009-06-12 03:28:38.000000000 +0100 @@ -0,0 +1,73 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the common header for all mutex implementations. +** The sqliteInt.h header #includes this file so that it is available +** to all source files. We break it out in an effort to keep the code +** better organized. +** +** NOTE: source files should *not* #include this header file directly. +** Source files should #include the sqliteInt.h file and let that file +** include this one indirectly. +** +** $Id: mutex.h,v 1.9 2008/10/07 15:25:48 drh Exp $ +*/ + + +/* +** Figure out what version of the code to use. The choices are +** +** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The +** mutexes implemention cannot be overridden +** at start-time. +** +** SQLITE_MUTEX_NOOP For single-threaded applications. No +** mutual exclusion is provided. But this +** implementation can be overridden at +** start-time. +** +** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. +** +** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. +** +** SQLITE_MUTEX_OS2 For multi-threaded applications on OS/2. +*/ +#if !SQLITE_THREADSAFE +# define SQLITE_MUTEX_OMIT +#endif +#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP) +# if SQLITE_OS_UNIX +# define SQLITE_MUTEX_PTHREADS +# elif SQLITE_OS_WIN +# define SQLITE_MUTEX_W32 +# elif SQLITE_OS_OS2 +# define SQLITE_MUTEX_OS2 +# else +# define SQLITE_MUTEX_NOOP +# endif +#endif + +#ifdef SQLITE_MUTEX_OMIT +/* +** If this is a no-op implementation, implement everything as macros. +*/ +#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) +#define sqlite3_mutex_free(X) +#define sqlite3_mutex_enter(X) +#define sqlite3_mutex_try(X) SQLITE_OK +#define sqlite3_mutex_leave(X) +#define sqlite3_mutex_held(X) 1 +#define sqlite3_mutex_notheld(X) 1 +#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8) +#define sqlite3MutexInit() SQLITE_OK +#define sqlite3MutexEnd() +#endif /* defined(SQLITE_OMIT_MUTEX) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex_noop.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex_noop.c --- sqlite3-3.4.2/src/mutex_noop.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex_noop.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,186 @@ +/* +** 2008 October 07 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes. +** +** This implementation in this file does not provide any mutual +** exclusion and is thus suitable for use only in applications +** that use SQLite in a single thread. The routines defined +** here are place-holders. Applications can substitute working +** mutex routines at start-time using the +** +** sqlite3_config(SQLITE_CONFIG_MUTEX,...) +** +** interface. +** +** If compiled with SQLITE_DEBUG, then additional logic is inserted +** that does error checking on mutexes to make sure they are being +** called correctly. +** +** $Id: mutex_noop.c,v 1.3 2008/12/05 17:17:08 drh Exp $ +*/ +#include "sqliteInt.h" + + +#if defined(SQLITE_MUTEX_NOOP) && !defined(SQLITE_DEBUG) +/* +** Stub routines for all mutex methods. +** +** This routines provide no mutual exclusion or error checking. +*/ +static int noopMutexHeld(sqlite3_mutex *p){ return 1; } +static int noopMutexNotheld(sqlite3_mutex *p){ return 1; } +static int noopMutexInit(void){ return SQLITE_OK; } +static int noopMutexEnd(void){ return SQLITE_OK; } +static sqlite3_mutex *noopMutexAlloc(int id){ return (sqlite3_mutex*)8; } +static void noopMutexFree(sqlite3_mutex *p){ return; } +static void noopMutexEnter(sqlite3_mutex *p){ return; } +static int noopMutexTry(sqlite3_mutex *p){ return SQLITE_OK; } +static void noopMutexLeave(sqlite3_mutex *p){ return; } + +sqlite3_mutex_methods *sqlite3DefaultMutex(void){ + static sqlite3_mutex_methods sMutex = { + noopMutexInit, + noopMutexEnd, + noopMutexAlloc, + noopMutexFree, + noopMutexEnter, + noopMutexTry, + noopMutexLeave, + + noopMutexHeld, + noopMutexNotheld + }; + + return &sMutex; +} +#endif /* defined(SQLITE_MUTEX_NOOP) && !defined(SQLITE_DEBUG) */ + +#if defined(SQLITE_MUTEX_NOOP) && defined(SQLITE_DEBUG) +/* +** In this implementation, error checking is provided for testing +** and debugging purposes. The mutexes still do not provide any +** mutual exclusion. +*/ + +/* +** The mutex object +*/ +struct sqlite3_mutex { + int id; /* The mutex type */ + int cnt; /* Number of entries without a matching leave */ +}; + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +static int debugMutexHeld(sqlite3_mutex *p){ + return p==0 || p->cnt>0; +} +static int debugMutexNotheld(sqlite3_mutex *p){ + return p==0 || p->cnt==0; +} + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static int debugMutexInit(void){ return SQLITE_OK; } +static int debugMutexEnd(void){ return SQLITE_OK; } + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. +*/ +static sqlite3_mutex *debugMutexAlloc(int id){ + static sqlite3_mutex aStatic[6]; + sqlite3_mutex *pNew = 0; + switch( id ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + pNew = sqlite3Malloc(sizeof(*pNew)); + if( pNew ){ + pNew->id = id; + pNew->cnt = 0; + } + break; + } + default: { + assert( id-2 >= 0 ); + assert( id-2 < (int)(sizeof(aStatic)/sizeof(aStatic[0])) ); + pNew = &aStatic[id-2]; + pNew->id = id; + break; + } + } + return pNew; +} + +/* +** This routine deallocates a previously allocated mutex. +*/ +static void debugMutexFree(sqlite3_mutex *p){ + assert( p->cnt==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void debugMutexEnter(sqlite3_mutex *p){ + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); + p->cnt++; +} +static int debugMutexTry(sqlite3_mutex *p){ + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); + p->cnt++; + return SQLITE_OK; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void debugMutexLeave(sqlite3_mutex *p){ + assert( debugMutexHeld(p) ); + p->cnt--; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(p) ); +} + +sqlite3_mutex_methods *sqlite3DefaultMutex(void){ + static sqlite3_mutex_methods sMutex = { + debugMutexInit, + debugMutexEnd, + debugMutexAlloc, + debugMutexFree, + debugMutexEnter, + debugMutexTry, + debugMutexLeave, + + debugMutexHeld, + debugMutexNotheld + }; + + return &sMutex; +} +#endif /* defined(SQLITE_MUTEX_NOOP) && defined(SQLITE_DEBUG) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex_os2.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex_os2.c --- sqlite3-3.4.2/src/mutex_os2.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex_os2.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,273 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for OS/2 +** +** $Id: mutex_os2.c,v 1.11 2008/11/22 19:50:54 pweilbacher Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if SQLITE_MUTEX_OS2 is defined. +** See the mutex.h file for details. +*/ +#ifdef SQLITE_MUTEX_OS2 + +/********************** OS/2 Mutex Implementation ********************** +** +** This implementation of mutexes is built using the OS/2 API. +*/ + +/* +** The mutex object +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + HMTX mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of references */ + TID owner; /* Thread holding this mutex */ +}; + +#define OS2_MUTEX_INITIALIZER 0,0,0,0 + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static int os2MutexInit(void){ return SQLITE_OK; } +static int os2MutexEnd(void){ return SQLITE_OK; } + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. +** SQLite will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST 0 +**
  • SQLITE_MUTEX_RECURSIVE 1 +**
  • SQLITE_MUTEX_STATIC_MASTER 2 +**
  • SQLITE_MUTEX_STATIC_MEM 3 +**
  • SQLITE_MUTEX_STATIC_PRNG 4 +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +static sqlite3_mutex *os2MutexAlloc(int iType){ + sqlite3_mutex *p = NULL; + switch( iType ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; + if( DosCreateMutexSem( 0, &p->mutex, 0, FALSE ) != NO_ERROR ){ + sqlite3_free( p ); + p = NULL; + } + } + break; + } + default: { + static volatile int isInit = 0; + static sqlite3_mutex staticMutexes[] = { + { OS2_MUTEX_INITIALIZER, }, + { OS2_MUTEX_INITIALIZER, }, + { OS2_MUTEX_INITIALIZER, }, + { OS2_MUTEX_INITIALIZER, }, + { OS2_MUTEX_INITIALIZER, }, + { OS2_MUTEX_INITIALIZER, }, + }; + if ( !isInit ){ + APIRET rc; + PTIB ptib; + PPIB ppib; + HMTX mutex; + char name[32]; + DosGetInfoBlocks( &ptib, &ppib ); + sqlite3_snprintf( sizeof(name), name, "\\SEM32\\SQLITE%04x", + ppib->pib_ulpid ); + while( !isInit ){ + mutex = 0; + rc = DosCreateMutexSem( name, &mutex, 0, FALSE); + if( rc == NO_ERROR ){ + unsigned int i; + if( !isInit ){ + for( i = 0; i < sizeof(staticMutexes)/sizeof(staticMutexes[0]); i++ ){ + DosCreateMutexSem( 0, &staticMutexes[i].mutex, 0, FALSE ); + } + isInit = 1; + } + DosCloseMutexSem( mutex ); + }else if( rc == ERROR_DUPLICATE_NAME ){ + DosSleep( 1 ); + }else{ + return p; + } + } + } + assert( iType-2 >= 0 ); + assert( iType-2 < sizeof(staticMutexes)/sizeof(staticMutexes[0]) ); + p = &staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously allocated mutex. +** SQLite is careful to deallocate every mutex that it allocates. +*/ +static void os2MutexFree(sqlite3_mutex *p){ + if( p==0 ) return; + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + DosCloseMutexSem( p->mutex ); + sqlite3_free( p ); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void os2MutexEnter(sqlite3_mutex *p){ + TID tid; + PID holder1; + ULONG holder2; + if( p==0 ) return; + assert( p->id==SQLITE_MUTEX_RECURSIVE || os2MutexNotheld(p) ); + DosRequestMutexSem(p->mutex, SEM_INDEFINITE_WAIT); + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + p->owner = tid; + p->nRef++; +} +static int os2MutexTry(sqlite3_mutex *p){ + int rc; + TID tid; + PID holder1; + ULONG holder2; + if( p==0 ) return SQLITE_OK; + assert( p->id==SQLITE_MUTEX_RECURSIVE || os2MutexNotheld(p) ); + if( DosRequestMutexSem(p->mutex, SEM_IMMEDIATE_RETURN) == NO_ERROR) { + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + p->owner = tid; + p->nRef++; + rc = SQLITE_OK; + } else { + rc = SQLITE_BUSY; + } + + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void os2MutexLeave(sqlite3_mutex *p){ + TID tid; + PID holder1; + ULONG holder2; + if( p==0 ) return; + assert( p->nRef>0 ); + DosQueryMutexSem(p->mutex, &holder1, &tid, &holder2); + assert( p->owner==tid ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + DosReleaseMutexSem(p->mutex); +} + +#ifdef SQLITE_DEBUG +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +static int os2MutexHeld(sqlite3_mutex *p){ + TID tid; + PID pid; + ULONG ulCount; + PTIB ptib; + if( p!=0 ) { + DosQueryMutexSem(p->mutex, &pid, &tid, &ulCount); + } else { + DosGetInfoBlocks(&ptib, NULL); + tid = ptib->tib_ptib2->tib2_ultid; + } + return p==0 || (p->nRef!=0 && p->owner==tid); +} +static int os2MutexNotheld(sqlite3_mutex *p){ + TID tid; + PID pid; + ULONG ulCount; + PTIB ptib; + if( p!= 0 ) { + DosQueryMutexSem(p->mutex, &pid, &tid, &ulCount); + } else { + DosGetInfoBlocks(&ptib, NULL); + tid = ptib->tib_ptib2->tib2_ultid; + } + return p==0 || p->nRef==0 || p->owner!=tid; +} +#endif + +sqlite3_mutex_methods *sqlite3DefaultMutex(void){ + static sqlite3_mutex_methods sMutex = { + os2MutexInit, + os2MutexEnd, + os2MutexAlloc, + os2MutexFree, + os2MutexEnter, + os2MutexTry, + os2MutexLeave, +#ifdef SQLITE_DEBUG + os2MutexHeld, + os2MutexNotheld +#endif + }; + + return &sMutex; +} +#endif /* SQLITE_MUTEX_OS2 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex_unix.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex_unix.c --- sqlite3-3.4.2/src/mutex_unix.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex_unix.c 2009-06-12 03:37:47.000000000 +0100 @@ -0,0 +1,328 @@ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for pthreads +** +** $Id: mutex_unix.c,v 1.16 2008/12/08 18:19:18 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if we are compiling threadsafe +** under unix with pthreads. +** +** Note that this implementation requires a version of pthreads that +** supports recursive mutexes. +*/ +#ifdef SQLITE_MUTEX_PTHREADS + +#include + + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + pthread_mutex_t mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of entrances */ + pthread_t owner; /* Thread that is within this mutex */ +#ifdef SQLITE_DEBUG + int trace; /* True to trace changes */ +#endif +}; +#ifdef SQLITE_DEBUG +#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0, 0 } +#else +#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0 } +#endif + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. On some platforms, +** there might be race conditions that can cause these routines to +** deliver incorrect results. In particular, if pthread_equal() is +** not an atomic operation, then these routines might delivery +** incorrect results. On most platforms, pthread_equal() is a +** comparison of two integers and is therefore atomic. But we are +** told that HPUX is not such a platform. If so, then these routines +** will not always work correctly on HPUX. +** +** On those platforms where pthread_equal() is not atomic, SQLite +** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to +** make sure no assert() statements are evaluated and hence these +** routines are never called. +*/ +#if !defined(NDEBUG) || defined(SQLITE_DEBUG) +static int pthreadMutexHeld(sqlite3_mutex *p){ + return (p->nRef!=0 && pthread_equal(p->owner, pthread_self())); +} +static int pthreadMutexNotheld(sqlite3_mutex *p){ + return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0; +} +#endif + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static int pthreadMutexInit(void){ return SQLITE_OK; } +static int pthreadMutexEnd(void){ return SQLITE_OK; } + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. SQLite +** will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_MEM2 +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +static sqlite3_mutex *pthreadMutexAlloc(int iType){ + static sqlite3_mutex staticMutexes[] = { + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER + }; + sqlite3_mutex *p; + switch( iType ){ + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, we will have to + ** build our own. See below. */ + pthread_mutex_init(&p->mutex, 0); +#else + /* Use a recursive mutex if it is available */ + pthread_mutexattr_t recursiveAttr; + pthread_mutexattr_init(&recursiveAttr); + pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&p->mutex, &recursiveAttr); + pthread_mutexattr_destroy(&recursiveAttr); +#endif + p->id = iType; + } + break; + } + case SQLITE_MUTEX_FAST: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; + pthread_mutex_init(&p->mutex, 0); + } + break; + } + default: { + assert( iType-2 >= 0 ); + assert( iType-2 < ArraySize(staticMutexes) ); + p = &staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +static void pthreadMutexFree(sqlite3_mutex *p){ + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + pthread_mutex_destroy(&p->mutex); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void pthreadMutexEnter(sqlite3_mutex *p){ + assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, then we have to grow + ** our own. This implementation assumes that pthread_equal() + ** is atomic - that it cannot be deceived into thinking self + ** and p->owner are equal if p->owner changes between two values + ** that are not equal to self while the comparison is taking place. + ** This implementation also assumes a coherent cache - that + ** separate processes cannot read different values from the same + ** address at the same time. If either of these two conditions + ** are not met, then the mutexes will fail and problems will result. + */ + { + pthread_t self = pthread_self(); + if( p->nRef>0 && pthread_equal(p->owner, self) ){ + p->nRef++; + }else{ + pthread_mutex_lock(&p->mutex); + assert( p->nRef==0 ); + p->owner = self; + p->nRef = 1; + } + } +#else + /* Use the built-in recursive mutexes if they are available. + */ + pthread_mutex_lock(&p->mutex); + p->owner = pthread_self(); + p->nRef++; +#endif + +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif +} +static int pthreadMutexTry(sqlite3_mutex *p){ + int rc; + assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, then we have to grow + ** our own. This implementation assumes that pthread_equal() + ** is atomic - that it cannot be deceived into thinking self + ** and p->owner are equal if p->owner changes between two values + ** that are not equal to self while the comparison is taking place. + ** This implementation also assumes a coherent cache - that + ** separate processes cannot read different values from the same + ** address at the same time. If either of these two conditions + ** are not met, then the mutexes will fail and problems will result. + */ + { + pthread_t self = pthread_self(); + if( p->nRef>0 && pthread_equal(p->owner, self) ){ + p->nRef++; + rc = SQLITE_OK; + }else if( pthread_mutex_trylock(&p->mutex)==0 ){ + assert( p->nRef==0 ); + p->owner = self; + p->nRef = 1; + rc = SQLITE_OK; + }else{ + rc = SQLITE_BUSY; + } + } +#else + /* Use the built-in recursive mutexes if they are available. + */ + if( pthread_mutex_trylock(&p->mutex)==0 ){ + p->owner = pthread_self(); + p->nRef++; + rc = SQLITE_OK; + }else{ + rc = SQLITE_BUSY; + } +#endif + +#ifdef SQLITE_DEBUG + if( rc==SQLITE_OK && p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void pthreadMutexLeave(sqlite3_mutex *p){ + assert( pthreadMutexHeld(p) ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + if( p->nRef==0 ){ + pthread_mutex_unlock(&p->mutex); + } +#else + pthread_mutex_unlock(&p->mutex); +#endif + +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif +} + +sqlite3_mutex_methods *sqlite3DefaultMutex(void){ + static sqlite3_mutex_methods sMutex = { + pthreadMutexInit, + pthreadMutexEnd, + pthreadMutexAlloc, + pthreadMutexFree, + pthreadMutexEnter, + pthreadMutexTry, + pthreadMutexLeave, +#ifdef SQLITE_DEBUG + pthreadMutexHeld, + pthreadMutexNotheld +#else + 0, + 0 +#endif + }; + + return &sMutex; +} + +#endif /* SQLITE_MUTEX_PTHREAD */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/mutex_w32.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/mutex_w32.c --- sqlite3-3.4.2/src/mutex_w32.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/mutex_w32.c 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,280 @@ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for win32 +** +** $Id: mutex_w32.c,v 1.17 2009/06/01 17:10:22 shane Exp $ +*/ +#include "sqliteInt.h" + +/* +** The code in this file is only used if we are compiling multithreaded +** on a win32 system. +*/ +#ifdef SQLITE_MUTEX_W32 + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + CRITICAL_SECTION mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ + int nRef; /* Number of enterances */ + DWORD owner; /* Thread holding this mutex */ +}; + +/* +** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, +** or WinCE. Return false (zero) for Win95, Win98, or WinME. +** +** Here is an interesting observation: Win95, Win98, and WinME lack +** the LockFileEx() API. But we can still statically link against that +** API as long as we don't call it win running Win95/98/ME. A call to +** this routine is used to determine if the host is Win95/98/ME or +** WinNT/2K/XP so that we will know whether or not we can safely call +** the LockFileEx() API. +** +** mutexIsNT() is only used for the TryEnterCriticalSection() API call, +** which is only available if your application was compiled with +** _WIN32_WINNT defined to a value >= 0x0400. Currently, the only +** call to TryEnterCriticalSection() is #ifdef'ed out, so #ifdef +** this out as well. +*/ +#if 0 +#if SQLITE_OS_WINCE +# define mutexIsNT() (1) +#else + static int mutexIsNT(void){ + static int osType = 0; + if( osType==0 ){ + OSVERSIONINFO sInfo; + sInfo.dwOSVersionInfoSize = sizeof(sInfo); + GetVersionEx(&sInfo); + osType = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1; + } + return osType==2; + } +#endif /* SQLITE_OS_WINCE */ +#endif + +#ifdef SQLITE_DEBUG +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. +*/ +static int winMutexHeld(sqlite3_mutex *p){ + return p->nRef!=0 && p->owner==GetCurrentThreadId(); +} +static int winMutexNotheld(sqlite3_mutex *p){ + return p->nRef==0 || p->owner!=GetCurrentThreadId(); +} +#endif + + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static sqlite3_mutex winMutex_staticMutexes[6]; +static int winMutex_isInit = 0; +/* As winMutexInit() and winMutexEnd() are called as part +** of the sqlite3_initialize and sqlite3_shutdown() +** processing, the "interlocked" magic is probably not +** strictly necessary. +*/ +static long winMutex_lock = 0; + +static int winMutexInit(void){ + /* The first to increment to 1 does actual initialization */ + if( InterlockedIncrement(&winMutex_lock)==1 ){ + int i; + for(i=0; i +**
  • SQLITE_MUTEX_FAST 0 +**
  • SQLITE_MUTEX_RECURSIVE 1 +**
  • SQLITE_MUTEX_STATIC_MASTER 2 +**
  • SQLITE_MUTEX_STATIC_MEM 3 +**
  • SQLITE_MUTEX_STATIC_PRNG 4 +** +** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Three static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +static sqlite3_mutex *winMutexAlloc(int iType){ + sqlite3_mutex *p; + + switch( iType ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; + InitializeCriticalSection(&p->mutex); + } + break; + } + default: { + assert( winMutex_isInit==1 ); + assert( iType-2 >= 0 ); + assert( iType-2 < sizeof(winMutex_staticMutexes)/sizeof(winMutex_staticMutexes[0]) ); + p = &winMutex_staticMutexes[iType-2]; + p->id = iType; + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +static void winMutexFree(sqlite3_mutex *p){ + assert( p ); + assert( p->nRef==0 ); + assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); + DeleteCriticalSection(&p->mutex); + sqlite3_free(p); +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void winMutexEnter(sqlite3_mutex *p){ + assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld(p) ); + EnterCriticalSection(&p->mutex); + p->owner = GetCurrentThreadId(); + p->nRef++; +} +static int winMutexTry(sqlite3_mutex *p){ + int rc = SQLITE_BUSY; + assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld(p) ); + /* + ** The sqlite3_mutex_try() routine is very rarely used, and when it + ** is used it is merely an optimization. So it is OK for it to always + ** fail. + ** + ** The TryEnterCriticalSection() interface is only available on WinNT. + ** And some windows compilers complain if you try to use it without + ** first doing some #defines that prevent SQLite from building on Win98. + ** For that reason, we will omit this optimization for now. See + ** ticket #2685. + */ +#if 0 + if( mutexIsNT() && TryEnterCriticalSection(&p->mutex) ){ + p->owner = GetCurrentThreadId(); + p->nRef++; + rc = SQLITE_OK; + } +#else + UNUSED_PARAMETER(p); +#endif + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void winMutexLeave(sqlite3_mutex *p){ + assert( p->nRef>0 ); + assert( p->owner==GetCurrentThreadId() ); + p->nRef--; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + LeaveCriticalSection(&p->mutex); +} + +sqlite3_mutex_methods *sqlite3DefaultMutex(void){ + static sqlite3_mutex_methods sMutex = { + winMutexInit, + winMutexEnd, + winMutexAlloc, + winMutexFree, + winMutexEnter, + winMutexTry, + winMutexLeave, +#ifdef SQLITE_DEBUG + winMutexHeld, + winMutexNotheld +#else + 0, + 0 +#endif + }; + + return &sMutex; +} +#endif /* SQLITE_MUTEX_W32 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/notify.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/notify.c --- sqlite3-3.4.2/src/notify.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/notify.c 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,333 @@ +/* +** 2009 March 3 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the implementation of the sqlite3_unlock_notify() +** API method and its associated functionality. +** +** $Id: notify.c,v 1.4 2009/04/07 22:06:57 drh Exp $ +*/ +#include "sqliteInt.h" +#include "btreeInt.h" + +/* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */ +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + +/* +** Public interfaces: +** +** sqlite3ConnectionBlocked() +** sqlite3ConnectionUnlocked() +** sqlite3ConnectionClosed() +** sqlite3_unlock_notify() +*/ + +#define assertMutexHeld() \ + assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ) + +/* +** Head of a linked list of all sqlite3 objects created by this process +** for which either sqlite3.pBlockingConnection or sqlite3.pUnlockConnection +** is not NULL. This variable may only accessed while the STATIC_MASTER +** mutex is held. +*/ +static sqlite3 *SQLITE_WSD sqlite3BlockedList = 0; + +#ifndef NDEBUG +/* +** This function is a complex assert() that verifies the following +** properties of the blocked connections list: +** +** 1) Each entry in the list has a non-NULL value for either +** pUnlockConnection or pBlockingConnection, or both. +** +** 2) All entries in the list that share a common value for +** xUnlockNotify are grouped together. +** +** 3) If the argument db is not NULL, then none of the entries in the +** blocked connections list have pUnlockConnection or pBlockingConnection +** set to db. This is used when closing connection db. +*/ +static void checkListProperties(sqlite3 *db){ + sqlite3 *p; + for(p=sqlite3BlockedList; p; p=p->pNextBlocked){ + int seen = 0; + sqlite3 *p2; + + /* Verify property (1) */ + assert( p->pUnlockConnection || p->pBlockingConnection ); + + /* Verify property (2) */ + for(p2=sqlite3BlockedList; p2!=p; p2=p2->pNextBlocked){ + if( p2->xUnlockNotify==p->xUnlockNotify ) seen = 1; + assert( p2->xUnlockNotify==p->xUnlockNotify || !seen ); + assert( db==0 || p->pUnlockConnection!=db ); + assert( db==0 || p->pBlockingConnection!=db ); + } + } +} +#else +# define checkListProperties(x) +#endif + +/* +** Remove connection db from the blocked connections list. If connection +** db is not currently a part of the list, this function is a no-op. +*/ +static void removeFromBlockedList(sqlite3 *db){ + sqlite3 **pp; + assertMutexHeld(); + for(pp=&sqlite3BlockedList; *pp; pp = &(*pp)->pNextBlocked){ + if( *pp==db ){ + *pp = (*pp)->pNextBlocked; + break; + } + } +} + +/* +** Add connection db to the blocked connections list. It is assumed +** that it is not already a part of the list. +*/ +static void addToBlockedList(sqlite3 *db){ + sqlite3 **pp; + assertMutexHeld(); + for( + pp=&sqlite3BlockedList; + *pp && (*pp)->xUnlockNotify!=db->xUnlockNotify; + pp=&(*pp)->pNextBlocked + ); + db->pNextBlocked = *pp; + *pp = db; +} + +/* +** Obtain the STATIC_MASTER mutex. +*/ +static void enterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); + checkListProperties(0); +} + +/* +** Release the STATIC_MASTER mutex. +*/ +static void leaveMutex(void){ + assertMutexHeld(); + checkListProperties(0); + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} + +/* +** Register an unlock-notify callback. +** +** This is called after connection "db" has attempted some operation +** but has received an SQLITE_LOCKED error because another connection +** (call it pOther) in the same process was busy using the same shared +** cache. pOther is found by looking at db->pBlockingConnection. +** +** If there is no blocking connection, the callback is invoked immediately, +** before this routine returns. +** +** If pOther is already blocked on db, then report SQLITE_LOCKED, to indicate +** a deadlock. +** +** Otherwise, make arrangements to invoke xNotify when pOther drops +** its locks. +** +** Each call to this routine overrides any prior callbacks registered +** on the same "db". If xNotify==0 then any prior callbacks are immediately +** cancelled. +*/ +int sqlite3_unlock_notify( + sqlite3 *db, + void (*xNotify)(void **, int), + void *pArg +){ + int rc = SQLITE_OK; + + sqlite3_mutex_enter(db->mutex); + enterMutex(); + + if( xNotify==0 ){ + removeFromBlockedList(db); + db->pUnlockConnection = 0; + db->xUnlockNotify = 0; + db->pUnlockArg = 0; + }else if( 0==db->pBlockingConnection ){ + /* The blocking transaction has been concluded. Or there never was a + ** blocking transaction. In either case, invoke the notify callback + ** immediately. + */ + xNotify(&pArg, 1); + }else{ + sqlite3 *p; + + for(p=db->pBlockingConnection; p && p!=db; p=p->pUnlockConnection){} + if( p ){ + rc = SQLITE_LOCKED; /* Deadlock detected. */ + }else{ + db->pUnlockConnection = db->pBlockingConnection; + db->xUnlockNotify = xNotify; + db->pUnlockArg = pArg; + removeFromBlockedList(db); + addToBlockedList(db); + } + } + + leaveMutex(); + assert( !db->mallocFailed ); + sqlite3Error(db, rc, (rc?"database is deadlocked":0)); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** This function is called while stepping or preparing a statement +** associated with connection db. The operation will return SQLITE_LOCKED +** to the user because it requires a lock that will not be available +** until connection pBlocker concludes its current transaction. +*/ +void sqlite3ConnectionBlocked(sqlite3 *db, sqlite3 *pBlocker){ + enterMutex(); + if( db->pBlockingConnection==0 && db->pUnlockConnection==0 ){ + addToBlockedList(db); + } + db->pBlockingConnection = pBlocker; + leaveMutex(); +} + +/* +** This function is called when +** the transaction opened by database db has just finished. Locks held +** by database connection db have been released. +** +** This function loops through each entry in the blocked connections +** list and does the following: +** +** 1) If the sqlite3.pBlockingConnection member of a list entry is +** set to db, then set pBlockingConnection=0. +** +** 2) If the sqlite3.pUnlockConnection member of a list entry is +** set to db, then invoke the configured unlock-notify callback and +** set pUnlockConnection=0. +** +** 3) If the two steps above mean that pBlockingConnection==0 and +** pUnlockConnection==0, remove the entry from the blocked connections +** list. +*/ +void sqlite3ConnectionUnlocked(sqlite3 *db){ + void (*xUnlockNotify)(void **, int) = 0; /* Unlock-notify cb to invoke */ + int nArg = 0; /* Number of entries in aArg[] */ + sqlite3 **pp; /* Iterator variable */ + void **aArg; /* Arguments to the unlock callback */ + void **aDyn = 0; /* Dynamically allocated space for aArg[] */ + void *aStatic[16]; /* Starter space for aArg[]. No malloc required */ + + aArg = aStatic; + enterMutex(); /* Enter STATIC_MASTER mutex */ + + /* This loop runs once for each entry in the blocked-connections list. */ + for(pp=&sqlite3BlockedList; *pp; /* no-op */ ){ + sqlite3 *p = *pp; + + /* Step 1. */ + if( p->pBlockingConnection==db ){ + p->pBlockingConnection = 0; + } + + /* Step 2. */ + if( p->pUnlockConnection==db ){ + assert( p->xUnlockNotify ); + if( p->xUnlockNotify!=xUnlockNotify && nArg!=0 ){ + xUnlockNotify(aArg, nArg); + nArg = 0; + } + + sqlite3BeginBenignMalloc(); + assert( aArg==aDyn || (aDyn==0 && aArg==aStatic) ); + assert( nArg<=(int)ArraySize(aStatic) || aArg==aDyn ); + if( (!aDyn && nArg==(int)ArraySize(aStatic)) + || (aDyn && nArg==(int)(sqlite3DbMallocSize(db, aDyn)/sizeof(void*))) + ){ + /* The aArg[] array needs to grow. */ + void **pNew = (void **)sqlite3Malloc(nArg*sizeof(void *)*2); + if( pNew ){ + memcpy(pNew, aArg, nArg*sizeof(void *)); + sqlite3_free(aDyn); + aDyn = aArg = pNew; + }else{ + /* This occurs when the array of context pointers that need to + ** be passed to the unlock-notify callback is larger than the + ** aStatic[] array allocated on the stack and the attempt to + ** allocate a larger array from the heap has failed. + ** + ** This is a difficult situation to handle. Returning an error + ** code to the caller is insufficient, as even if an error code + ** is returned the transaction on connection db will still be + ** closed and the unlock-notify callbacks on blocked connections + ** will go unissued. This might cause the application to wait + ** indefinitely for an unlock-notify callback that will never + ** arrive. + ** + ** Instead, invoke the unlock-notify callback with the context + ** array already accumulated. We can then clear the array and + ** begin accumulating any further context pointers without + ** requiring any dynamic allocation. This is sub-optimal because + ** it means that instead of one callback with a large array of + ** context pointers the application will receive two or more + ** callbacks with smaller arrays of context pointers, which will + ** reduce the applications ability to prioritize multiple + ** connections. But it is the best that can be done under the + ** circumstances. + */ + xUnlockNotify(aArg, nArg); + nArg = 0; + } + } + sqlite3EndBenignMalloc(); + + aArg[nArg++] = p->pUnlockArg; + xUnlockNotify = p->xUnlockNotify; + p->pUnlockConnection = 0; + p->xUnlockNotify = 0; + p->pUnlockArg = 0; + } + + /* Step 3. */ + if( p->pBlockingConnection==0 && p->pUnlockConnection==0 ){ + /* Remove connection p from the blocked connections list. */ + *pp = p->pNextBlocked; + p->pNextBlocked = 0; + }else{ + pp = &p->pNextBlocked; + } + } + + if( nArg!=0 ){ + xUnlockNotify(aArg, nArg); + } + sqlite3_free(aDyn); + leaveMutex(); /* Leave STATIC_MASTER mutex */ +} + +/* +** This is called when the database connection passed as an argument is +** being closed. The connection is removed from the blocked list. +*/ +void sqlite3ConnectionClosed(sqlite3 *db){ + sqlite3ConnectionUnlocked(db); + enterMutex(); + removeFromBlockedList(db); + checkListProperties(db); + leaveMutex(); +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os.c --- sqlite3-3.4.2/src/os.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/os.c 2009-06-25 12:22:33.000000000 +0100 @@ -12,85 +12,270 @@ ** ** This file contains OS interface code that is common to all ** architectures. +** +** $Id: os.c,v 1.126 2009/03/25 14:24:42 drh Exp $ */ #define _SQLITE_OS_C_ 1 #include "sqliteInt.h" -#include "os.h" #undef _SQLITE_OS_C_ /* +** The default SQLite sqlite3_vfs implementations do not allocate +** memory (actually, os_unix.c allocates a small amount of memory +** from within OsOpen()), but some third-party implementations may. +** So we test the effects of a malloc() failing and the sqlite3OsXXX() +** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro. +** +** The following functions are instrumented for malloc() failure +** testing: +** +** sqlite3OsOpen() +** sqlite3OsRead() +** sqlite3OsWrite() +** sqlite3OsSync() +** sqlite3OsLock() +** +*/ +#if defined(SQLITE_TEST) && (SQLITE_OS_WIN==0) + #define DO_OS_MALLOC_TEST if (1) { \ + void *pTstAlloc = sqlite3Malloc(10); \ + if (!pTstAlloc) return SQLITE_IOERR_NOMEM; \ + sqlite3_free(pTstAlloc); \ + } +#else + #define DO_OS_MALLOC_TEST +#endif + +/* ** The following routines are convenience wrappers around methods -** of the OsFile object. This is mostly just syntactic sugar. All +** of the sqlite3_file object. This is mostly just syntactic sugar. All ** of this would be completely automatic if SQLite were coded using ** C++ instead of plain old C. */ -int sqlite3OsClose(OsFile **pId){ - OsFile *id; - if( pId!=0 && (id = *pId)!=0 ){ - return id->pMethod->xClose(pId); - }else{ - return SQLITE_OK; +int sqlite3OsClose(sqlite3_file *pId){ + int rc = SQLITE_OK; + if( pId->pMethods ){ + rc = pId->pMethods->xClose(pId); + pId->pMethods = 0; } + return rc; } -int sqlite3OsOpenDirectory(OsFile *id, const char *zName){ - return id->pMethod->xOpenDirectory(id, zName); +int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST; + return id->pMethods->xRead(id, pBuf, amt, offset); } -int sqlite3OsRead(OsFile *id, void *pBuf, int amt){ - return id->pMethod->xRead(id, pBuf, amt); +int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST; + return id->pMethods->xWrite(id, pBuf, amt, offset); } -int sqlite3OsWrite(OsFile *id, const void *pBuf, int amt){ - return id->pMethod->xWrite(id, pBuf, amt); +int sqlite3OsTruncate(sqlite3_file *id, i64 size){ + return id->pMethods->xTruncate(id, size); } -int sqlite3OsSeek(OsFile *id, i64 offset){ - return id->pMethod->xSeek(id, offset); +int sqlite3OsSync(sqlite3_file *id, int flags){ + DO_OS_MALLOC_TEST; + return id->pMethods->xSync(id, flags); } -int sqlite3OsTruncate(OsFile *id, i64 size){ - return id->pMethod->xTruncate(id, size); +int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ + DO_OS_MALLOC_TEST; + return id->pMethods->xFileSize(id, pSize); } -int sqlite3OsSync(OsFile *id, int fullsync){ - return id->pMethod->xSync(id, fullsync); +int sqlite3OsLock(sqlite3_file *id, int lockType){ + DO_OS_MALLOC_TEST; + return id->pMethods->xLock(id, lockType); } -void sqlite3OsSetFullSync(OsFile *id, int value){ - id->pMethod->xSetFullSync(id, value); +int sqlite3OsUnlock(sqlite3_file *id, int lockType){ + return id->pMethods->xUnlock(id, lockType); } -int sqlite3OsFileSize(OsFile *id, i64 *pSize){ - return id->pMethod->xFileSize(id, pSize); +int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ + DO_OS_MALLOC_TEST; + return id->pMethods->xCheckReservedLock(id, pResOut); } -int sqlite3OsLock(OsFile *id, int lockType){ - return id->pMethod->xLock(id, lockType); +int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ + return id->pMethods->xFileControl(id, op, pArg); } -int sqlite3OsUnlock(OsFile *id, int lockType){ - return id->pMethod->xUnlock(id, lockType); +int sqlite3OsSectorSize(sqlite3_file *id){ + int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; + return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); } -int sqlite3OsCheckReservedLock(OsFile *id){ - return id->pMethod->xCheckReservedLock(id); +int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ + return id->pMethods->xDeviceCharacteristics(id); } -int sqlite3OsSectorSize(OsFile *id){ - int (*xSectorSize)(OsFile*) = id->pMethod->xSectorSize; - return xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE; + +/* +** The next group of routines are convenience wrappers around the +** VFS methods. +*/ +int sqlite3OsOpen( + sqlite3_vfs *pVfs, + const char *zPath, + sqlite3_file *pFile, + int flags, + int *pFlagsOut +){ + int rc; + DO_OS_MALLOC_TEST; + rc = pVfs->xOpen(pVfs, zPath, pFile, flags, pFlagsOut); + assert( rc==SQLITE_OK || pFile->pMethods==0 ); + return rc; +} +int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + return pVfs->xDelete(pVfs, zPath, dirSync); +} +int sqlite3OsAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + DO_OS_MALLOC_TEST; + return pVfs->xAccess(pVfs, zPath, flags, pResOut); +} +int sqlite3OsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nPathOut, + char *zPathOut +){ + return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); +} +#ifndef SQLITE_OMIT_LOAD_EXTENSION +void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return pVfs->xDlOpen(pVfs, zPath); +} +void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + pVfs->xDlError(pVfs, nByte, zBufOut); +} +void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){ + return pVfs->xDlSym(pVfs, pHdle, zSym); +} +void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){ + pVfs->xDlClose(pVfs, pHandle); +} +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return pVfs->xRandomness(pVfs, nByte, zBufOut); +} +int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ + return pVfs->xSleep(pVfs, nMicro); +} +int sqlite3OsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return pVfs->xCurrentTime(pVfs, pTimeOut); } -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) - /* These methods are currently only used for testing and debugging. */ - int sqlite3OsFileHandle(OsFile *id){ - return id->pMethod->xFileHandle(id); - } - int sqlite3OsLockState(OsFile *id){ - return id->pMethod->xLockState(id); +int sqlite3OsOpenMalloc( + sqlite3_vfs *pVfs, + const char *zFile, + sqlite3_file **ppFile, + int flags, + int *pOutFlags +){ + int rc = SQLITE_NOMEM; + sqlite3_file *pFile; + pFile = (sqlite3_file *)sqlite3Malloc(pVfs->szOsFile); + if( pFile ){ + rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); + if( rc!=SQLITE_OK ){ + sqlite3_free(pFile); + }else{ + *ppFile = pFile; + } } + return rc; +} +int sqlite3OsCloseFree(sqlite3_file *pFile){ + int rc = SQLITE_OK; + assert( pFile ); + rc = sqlite3OsClose(pFile); + sqlite3_free(pFile); + return rc; +} + +/* +** The list of all registered VFS implementations. +*/ +static sqlite3_vfs * SQLITE_WSD vfsList = 0; +#define vfsList GLOBAL(sqlite3_vfs *, vfsList) + +/* +** Locate a VFS by name. If no name is given, simply return the +** first VFS on the list. +*/ +sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){ + sqlite3_vfs *pVfs = 0; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex; +#endif +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return 0; #endif +#if SQLITE_THREADSAFE + mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + sqlite3_mutex_enter(mutex); + for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){ + if( zVfs==0 ) break; + if( strcmp(zVfs, pVfs->zName)==0 ) break; + } + sqlite3_mutex_leave(mutex); + return pVfs; +} + +/* +** Unlink a VFS from the linked list +*/ +static void vfsUnlink(sqlite3_vfs *pVfs){ + assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ); + if( pVfs==0 ){ + /* No-op */ + }else if( vfsList==pVfs ){ + vfsList = pVfs->pNext; + }else if( vfsList ){ + sqlite3_vfs *p = vfsList; + while( p->pNext && p->pNext!=pVfs ){ + p = p->pNext; + } + if( p->pNext==pVfs ){ + p->pNext = pVfs->pNext; + } + } +} -#ifdef SQLITE_ENABLE_REDEF_IO /* -** A function to return a pointer to the virtual function table. -** This routine really does not accomplish very much since the -** virtual function table is a global variable and anybody who -** can call this function can just as easily access the variable -** for themselves. Nevertheless, we include this routine for -** backwards compatibility with an earlier redefinable I/O -** interface design. +** Register a VFS with the system. It is harmless to register the same +** VFS multiple times. The new VFS becomes the default if makeDflt is +** true. */ -struct sqlite3OsVtbl *sqlite3_os_switch(void){ - return &sqlite3Os; +int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){ + sqlite3_mutex *mutex = 0; +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + if( makeDflt || vfsList==0 ){ + pVfs->pNext = vfsList; + vfsList = pVfs; + }else{ + pVfs->pNext = vfsList->pNext; + vfsList->pNext = pVfs; + } + assert(vfsList); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; } + +/* +** Unregister a VFS so that it is no longer accessible. +*/ +int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os_common.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os_common.h --- sqlite3-3.4.2/src/os_common.h 2007-08-08 13:01:59.000000000 +0100 +++ sqlite3-3.6.16/src/os_common.h 2009-06-12 03:37:47.000000000 +0100 @@ -16,7 +16,11 @@ ** ** This file should be #included by the os_*.c files only. It is not a ** general purpose header file. +** +** $Id: os_common.h,v 1.38 2009/02/24 18:40:50 danielk1977 Exp $ */ +#ifndef _OS_COMMON_H_ +#define _OS_COMMON_H_ /* ** At least two bugs have slipped in because we changed the MEMORY_DEBUG @@ -27,26 +31,17 @@ # error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." #endif - -/* - * When testing, this global variable stores the location of the - * pending-byte in the database file. - */ -#ifdef SQLITE_TEST -unsigned int sqlite3_pending_byte = 0x40000000; -#endif - #ifdef SQLITE_DEBUG -int sqlite3_os_trace = 0; -#define OSTRACE1(X) if( sqlite3_os_trace ) sqlite3DebugPrintf(X) -#define OSTRACE2(X,Y) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y) -#define OSTRACE3(X,Y,Z) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z) -#define OSTRACE4(X,Y,Z,A) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z,A) -#define OSTRACE5(X,Y,Z,A,B) if( sqlite3_os_trace ) sqlite3DebugPrintf(X,Y,Z,A,B) +int sqlite3OSTrace = 0; +#define OSTRACE1(X) if( sqlite3OSTrace ) sqlite3DebugPrintf(X) +#define OSTRACE2(X,Y) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y) +#define OSTRACE3(X,Y,Z) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z) +#define OSTRACE4(X,Y,Z,A) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z,A) +#define OSTRACE5(X,Y,Z,A,B) if( sqlite3OSTrace ) sqlite3DebugPrintf(X,Y,Z,A,B) #define OSTRACE6(X,Y,Z,A,B,C) \ - if(sqlite3_os_trace) sqlite3DebugPrintf(X,Y,Z,A,B,C) + if(sqlite3OSTrace) sqlite3DebugPrintf(X,Y,Z,A,B,C) #define OSTRACE7(X,Y,Z,A,B,C,D) \ - if(sqlite3_os_trace) sqlite3DebugPrintf(X,Y,Z,A,B,C,D) + if(sqlite3OSTrace) sqlite3DebugPrintf(X,Y,Z,A,B,C,D) #else #define OSTRACE1(X) #define OSTRACE2(X,Y) @@ -62,22 +57,22 @@ ** on i486 hardware. */ #ifdef SQLITE_PERFORMANCE_TRACE -__inline__ unsigned long long int hwtime(void){ - unsigned long long int x; - __asm__("rdtsc\n\t" - "mov %%edx, %%ecx\n\t" - :"=A" (x)); - return x; -} -static unsigned long long int g_start; -static unsigned int elapse; -#define TIMER_START g_start=hwtime() -#define TIMER_END elapse=hwtime()-g_start -#define TIMER_ELAPSED elapse + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +#include "hwtime.h" + +static sqlite_uint64 g_start; +static sqlite_uint64 g_elapsed; +#define TIMER_START g_start=sqlite3Hwtime() +#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start +#define TIMER_ELAPSED g_elapsed #else #define TIMER_START #define TIMER_END -#define TIMER_ELAPSED 0 +#define TIMER_ELAPSED ((sqlite_uint64)0) #endif /* @@ -86,19 +81,22 @@ ** is used for testing the I/O recovery logic. */ #ifdef SQLITE_TEST -int sqlite3_io_error_hit = 0; -int sqlite3_io_error_pending = 0; -int sqlite3_io_error_persist = 0; +int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ +int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ +int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ +int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ +int sqlite3_io_error_benign = 0; /* True if errors are benign */ int sqlite3_diskfull_pending = 0; int sqlite3_diskfull = 0; +#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) #define SimulateIOError(CODE) \ - if( sqlite3_io_error_pending || sqlite3_io_error_hit ) \ - if( sqlite3_io_error_pending-- == 1 \ - || (sqlite3_io_error_persist && sqlite3_io_error_hit) ) \ - { local_ioerr(); CODE; } + if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ + || sqlite3_io_error_pending-- == 1 ) \ + { local_ioerr(); CODE; } static void local_ioerr(){ IOTRACE(("IOERR\n")); - sqlite3_io_error_hit = 1; + sqlite3_io_error_hit++; + if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; } #define SimulateDiskfullError(CODE) \ if( sqlite3_diskfull_pending ){ \ @@ -112,6 +110,7 @@ } \ } #else +#define SimulateIOErrorBenign(X) #define SimulateIOError(A) #define SimulateDiskfullError(A) #endif @@ -126,73 +125,4 @@ #define OpenCounter(X) #endif -/* -** sqlite3GenericMalloc -** sqlite3GenericRealloc -** sqlite3GenericOsFree -** sqlite3GenericAllocationSize -** -** Implementation of the os level dynamic memory allocation interface in terms -** of the standard malloc(), realloc() and free() found in many operating -** systems. No rocket science here. -** -** There are two versions of these four functions here. The version -** implemented here is only used if memory-management or memory-debugging is -** enabled. This version allocates an extra 8-bytes at the beginning of each -** block and stores the size of the allocation there. -** -** If neither memory-management or debugging is enabled, the second -** set of implementations is used instead. -*/ -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || defined (SQLITE_MEMDEBUG) -void *sqlite3GenericMalloc(int n){ - char *p = (char *)malloc(n+8); - assert(n>0); - assert(sizeof(int)<=8); - if( p ){ - *(int *)p = n; - p += 8; - } - return (void *)p; -} -void *sqlite3GenericRealloc(void *p, int n){ - char *p2 = ((char *)p - 8); - assert(n>0); - p2 = (char*)realloc(p2, n+8); - if( p2 ){ - *(int *)p2 = n; - p2 += 8; - } - return (void *)p2; -} -void sqlite3GenericFree(void *p){ - assert(p); - free((void *)((char *)p - 8)); -} -int sqlite3GenericAllocationSize(void *p){ - return p ? *(int *)((char *)p - 8) : 0; -} -#else -void *sqlite3GenericMalloc(int n){ - char *p = (char *)malloc(n); - return (void *)p; -} -void *sqlite3GenericRealloc(void *p, int n){ - assert(n>0); - p = realloc(p, n); - return p; -} -void sqlite3GenericFree(void *p){ - assert(p); - free(p); -} -/* Never actually used, but needed for the linker */ -int sqlite3GenericAllocationSize(void *p){ return 0; } -#endif - -/* -** The default size of a disk sector -*/ -#ifndef PAGER_SECTOR_SIZE -# define PAGER_SECTOR_SIZE 512 -#endif +#endif /* !defined(_OS_COMMON_H_) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os.h --- sqlite3-3.4.2/src/os.h 2007-06-30 19:28:15.000000000 +0100 +++ sqlite3-3.6.16/src/os.h 2009-06-12 03:37:47.000000000 +0100 @@ -13,60 +13,78 @@ ** This header file (together with is companion C source-code file ** "os.c") attempt to abstract the underlying operating system so that ** the SQLite library will work on both POSIX and windows systems. +** +** This header file is #include-ed by sqliteInt.h and thus ends up +** being included by every source file. +** +** $Id: os.h,v 1.108 2009/02/05 16:31:46 drh Exp $ */ #ifndef _SQLITE_OS_H_ #define _SQLITE_OS_H_ /* ** Figure out if we are dealing with Unix, Windows, or some other -** operating system. -*/ -#if defined(OS_OTHER) -# if OS_OTHER==1 -# undef OS_UNIX -# define OS_UNIX 0 -# undef OS_WIN -# define OS_WIN 0 -# undef OS_OS2 -# define OS_OS2 0 +** operating system. After the following block of preprocess macros, +** all of SQLITE_OS_UNIX, SQLITE_OS_WIN, SQLITE_OS_OS2, and SQLITE_OS_OTHER +** will defined to either 1 or 0. One of the four will be 1. The other +** three will be 0. +*/ +#if defined(SQLITE_OS_OTHER) +# if SQLITE_OS_OTHER==1 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +# undef SQLITE_OS_OS2 +# define SQLITE_OS_OS2 0 # else -# undef OS_OTHER +# undef SQLITE_OS_OTHER # endif #endif -#if !defined(OS_UNIX) && !defined(OS_OTHER) -# define OS_OTHER 0 -# ifndef OS_WIN +#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) +# define SQLITE_OS_OTHER 0 +# ifndef SQLITE_OS_WIN # if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__) -# define OS_WIN 1 -# define OS_UNIX 0 -# define OS_OS2 0 +# define SQLITE_OS_WIN 1 +# define SQLITE_OS_UNIX 0 +# define SQLITE_OS_OS2 0 # elif defined(__EMX__) || defined(_OS2) || defined(OS2) || defined(_OS2_) || defined(__OS2__) -# define OS_WIN 0 -# define OS_UNIX 0 -# define OS_OS2 1 +# define SQLITE_OS_WIN 0 +# define SQLITE_OS_UNIX 0 +# define SQLITE_OS_OS2 1 # else -# define OS_WIN 0 -# define OS_UNIX 1 -# define OS_OS2 0 +# define SQLITE_OS_WIN 0 +# define SQLITE_OS_UNIX 1 +# define SQLITE_OS_OS2 0 # endif # else -# define OS_UNIX 0 -# define OS_OS2 0 +# define SQLITE_OS_UNIX 0 +# define SQLITE_OS_OS2 0 # endif #else -# ifndef OS_WIN -# define OS_WIN 0 +# ifndef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 # endif #endif +/* +** Determine if we are dealing with WindowsCE - which has a much +** reduced API. +*/ +#if defined(_WIN32_WCE) +# define SQLITE_OS_WINCE 1 +#else +# define SQLITE_OS_WINCE 0 +#endif + /* ** Define the maximum size of a temporary filename */ -#if OS_WIN +#if SQLITE_OS_WIN # include # define SQLITE_TEMPNAME_SIZE (MAX_PATH+50) -#elif OS_OS2 +#elif SQLITE_OS_OS2 # if (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 3) && defined(OS2_HIGH_MEMORY) # include /* has to be included before os2.h for linking to work */ # endif @@ -76,7 +94,9 @@ # define INCL_DOSMISC # define INCL_DOSPROCESS # define INCL_DOSMODULEMGR +# define INCL_DOSSEMAPHORES # include +# include # define SQLITE_TEMPNAME_SIZE (CCHMAXPATHCOMP) #else # define SQLITE_TEMPNAME_SIZE 200 @@ -103,7 +123,7 @@ ** If sqlite is being embedded in another program, you may wish to change the ** prefix to reflect your program's name, so that if your program exits ** prematurely, old temporary files can be easily identified. This can be done -** using -DTEMP_FILE_PREFIX=myprefix_ on the compiler command line. +** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. ** ** 2006-10-31: The default prefix used to be "sqlite_". But then ** Mcafee started using SQLite in their anti-virus product and it @@ -117,143 +137,10 @@ ** enough to know that calling the developer will not help get rid ** of the file. */ -#ifndef TEMP_FILE_PREFIX -# define TEMP_FILE_PREFIX "etilqs_" -#endif - -/* -** Define the interfaces for Unix, Windows, and OS/2. -*/ -#if OS_UNIX -#define sqlite3OsOpenReadWrite sqlite3UnixOpenReadWrite -#define sqlite3OsOpenExclusive sqlite3UnixOpenExclusive -#define sqlite3OsOpenReadOnly sqlite3UnixOpenReadOnly -#define sqlite3OsDelete sqlite3UnixDelete -#define sqlite3OsFileExists sqlite3UnixFileExists -#define sqlite3OsFullPathname sqlite3UnixFullPathname -#define sqlite3OsIsDirWritable sqlite3UnixIsDirWritable -#define sqlite3OsSyncDirectory sqlite3UnixSyncDirectory -#define sqlite3OsTempFileName sqlite3UnixTempFileName -#define sqlite3OsRandomSeed sqlite3UnixRandomSeed -#define sqlite3OsSleep sqlite3UnixSleep -#define sqlite3OsCurrentTime sqlite3UnixCurrentTime -#define sqlite3OsEnterMutex sqlite3UnixEnterMutex -#define sqlite3OsLeaveMutex sqlite3UnixLeaveMutex -#define sqlite3OsInMutex sqlite3UnixInMutex -#define sqlite3OsThreadSpecificData sqlite3UnixThreadSpecificData -#define sqlite3OsMalloc sqlite3GenericMalloc -#define sqlite3OsRealloc sqlite3GenericRealloc -#define sqlite3OsFree sqlite3GenericFree -#define sqlite3OsAllocationSize sqlite3GenericAllocationSize -#define sqlite3OsDlopen sqlite3UnixDlopen -#define sqlite3OsDlsym sqlite3UnixDlsym -#define sqlite3OsDlclose sqlite3UnixDlclose -#endif -#if OS_WIN -#define sqlite3OsOpenReadWrite sqlite3WinOpenReadWrite -#define sqlite3OsOpenExclusive sqlite3WinOpenExclusive -#define sqlite3OsOpenReadOnly sqlite3WinOpenReadOnly -#define sqlite3OsDelete sqlite3WinDelete -#define sqlite3OsFileExists sqlite3WinFileExists -#define sqlite3OsFullPathname sqlite3WinFullPathname -#define sqlite3OsIsDirWritable sqlite3WinIsDirWritable -#define sqlite3OsSyncDirectory sqlite3WinSyncDirectory -#define sqlite3OsTempFileName sqlite3WinTempFileName -#define sqlite3OsRandomSeed sqlite3WinRandomSeed -#define sqlite3OsSleep sqlite3WinSleep -#define sqlite3OsCurrentTime sqlite3WinCurrentTime -#define sqlite3OsEnterMutex sqlite3WinEnterMutex -#define sqlite3OsLeaveMutex sqlite3WinLeaveMutex -#define sqlite3OsInMutex sqlite3WinInMutex -#define sqlite3OsThreadSpecificData sqlite3WinThreadSpecificData -#define sqlite3OsMalloc sqlite3GenericMalloc -#define sqlite3OsRealloc sqlite3GenericRealloc -#define sqlite3OsFree sqlite3GenericFree -#define sqlite3OsAllocationSize sqlite3GenericAllocationSize -#define sqlite3OsDlopen sqlite3WinDlopen -#define sqlite3OsDlsym sqlite3WinDlsym -#define sqlite3OsDlclose sqlite3WinDlclose -#endif -#if OS_OS2 -#define sqlite3OsOpenReadWrite sqlite3Os2OpenReadWrite -#define sqlite3OsOpenExclusive sqlite3Os2OpenExclusive -#define sqlite3OsOpenReadOnly sqlite3Os2OpenReadOnly -#define sqlite3OsDelete sqlite3Os2Delete -#define sqlite3OsFileExists sqlite3Os2FileExists -#define sqlite3OsFullPathname sqlite3Os2FullPathname -#define sqlite3OsIsDirWritable sqlite3Os2IsDirWritable -#define sqlite3OsSyncDirectory sqlite3Os2SyncDirectory -#define sqlite3OsTempFileName sqlite3Os2TempFileName -#define sqlite3OsRandomSeed sqlite3Os2RandomSeed -#define sqlite3OsSleep sqlite3Os2Sleep -#define sqlite3OsCurrentTime sqlite3Os2CurrentTime -#define sqlite3OsEnterMutex sqlite3Os2EnterMutex -#define sqlite3OsLeaveMutex sqlite3Os2LeaveMutex -#define sqlite3OsInMutex sqlite3Os2InMutex -#define sqlite3OsThreadSpecificData sqlite3Os2ThreadSpecificData -#define sqlite3OsMalloc sqlite3GenericMalloc -#define sqlite3OsRealloc sqlite3GenericRealloc -#define sqlite3OsFree sqlite3GenericFree -#define sqlite3OsAllocationSize sqlite3GenericAllocationSize -#define sqlite3OsDlopen sqlite3Os2Dlopen -#define sqlite3OsDlsym sqlite3Os2Dlsym -#define sqlite3OsDlclose sqlite3Os2Dlclose +#ifndef SQLITE_TEMP_FILE_PREFIX +# define SQLITE_TEMP_FILE_PREFIX "etilqs_" #endif - - - -/* -** If using an alternative OS interface, then we must have an "os_other.h" -** header file available for that interface. Presumably the "os_other.h" -** header file contains #defines similar to those above. -*/ -#if OS_OTHER -# include "os_other.h" -#endif - - - -/* -** Forward declarations -*/ -typedef struct OsFile OsFile; -typedef struct IoMethod IoMethod; - -/* -** An instance of the following structure contains pointers to all -** methods on an OsFile object. -*/ -struct IoMethod { - int (*xClose)(OsFile**); - int (*xOpenDirectory)(OsFile*, const char*); - int (*xRead)(OsFile*, void*, int amt); - int (*xWrite)(OsFile*, const void*, int amt); - int (*xSeek)(OsFile*, i64 offset); - int (*xTruncate)(OsFile*, i64 size); - int (*xSync)(OsFile*, int); - void (*xSetFullSync)(OsFile *id, int setting); - int (*xFileHandle)(OsFile *id); - int (*xFileSize)(OsFile*, i64 *pSize); - int (*xLock)(OsFile*, int); - int (*xUnlock)(OsFile*, int); - int (*xLockState)(OsFile *id); - int (*xCheckReservedLock)(OsFile *id); - int (*xSectorSize)(OsFile *id); -}; - -/* -** The OsFile object describes an open disk file in an OS-dependent way. -** The version of OsFile defined here is a generic version. Each OS -** implementation defines its own subclass of this structure that contains -** additional information needed to handle file I/O. But the pMethod -** entry (pointing to the virtual function table) always occurs first -** so that we can always find the appropriate methods. -*/ -struct OsFile { - IoMethod const *pMethod; -}; - /* ** The following values may be passed as the second argument to ** sqlite3OsLock(). The various locks exhibit the following semantics: @@ -308,9 +195,7 @@ ** a random byte is selected for a shared lock. The pool of bytes for ** shared locks begins at SHARED_FIRST. ** -** These #defines are available in sqlite_aux.h so that adaptors for -** connecting SQLite to other operating systems can use the same byte -** ranges for locking. In particular, the same locking strategy and +** The same locking strategy and ** byte ranges are used for Unix. This leaves open the possiblity of having ** clients on win95, winNT, and unix all talking to the same shared file ** and all locking correctly. To do so would require that samba (or whatever @@ -334,215 +219,50 @@ ** 1GB boundary. ** */ -#ifndef SQLITE_TEST -#define PENDING_BYTE 0x40000000 /* First byte past the 1GB boundary */ -#else -extern unsigned int sqlite3_pending_byte; -#define PENDING_BYTE sqlite3_pending_byte -#endif - +#define PENDING_BYTE sqlite3PendingByte #define RESERVED_BYTE (PENDING_BYTE+1) #define SHARED_FIRST (PENDING_BYTE+2) #define SHARED_SIZE 510 -/* -** Prototypes for operating system interface routines. +/* +** Functions for accessing sqlite3_file methods */ -int sqlite3OsClose(OsFile**); -int sqlite3OsOpenDirectory(OsFile*, const char*); -int sqlite3OsRead(OsFile*, void*, int amt); -int sqlite3OsWrite(OsFile*, const void*, int amt); -int sqlite3OsSeek(OsFile*, i64 offset); -int sqlite3OsTruncate(OsFile*, i64 size); -int sqlite3OsSync(OsFile*, int); -void sqlite3OsSetFullSync(OsFile *id, int setting); -int sqlite3OsFileSize(OsFile*, i64 *pSize); -int sqlite3OsLock(OsFile*, int); -int sqlite3OsUnlock(OsFile*, int); -int sqlite3OsCheckReservedLock(OsFile *id); -int sqlite3OsOpenReadWrite(const char*, OsFile**, int*); -int sqlite3OsOpenExclusive(const char*, OsFile**, int); -int sqlite3OsOpenReadOnly(const char*, OsFile**); -int sqlite3OsDelete(const char*); -int sqlite3OsFileExists(const char*); -char *sqlite3OsFullPathname(const char*); -int sqlite3OsIsDirWritable(char*); -int sqlite3OsSyncDirectory(const char*); -int sqlite3OsSectorSize(OsFile *id); -int sqlite3OsTempFileName(char*); -int sqlite3OsRandomSeed(char*); -int sqlite3OsSleep(int ms); -int sqlite3OsCurrentTime(double*); -void sqlite3OsEnterMutex(void); -void sqlite3OsLeaveMutex(void); -int sqlite3OsInMutex(int); -ThreadData *sqlite3OsThreadSpecificData(int); -void *sqlite3OsMalloc(int); -void *sqlite3OsRealloc(void *, int); -void sqlite3OsFree(void *); -int sqlite3OsAllocationSize(void *); -void *sqlite3OsDlopen(const char*); -void *sqlite3OsDlsym(void*, const char*); -int sqlite3OsDlclose(void*); - -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) - int sqlite3OsFileHandle(OsFile *id); - int sqlite3OsLockState(OsFile *id); -#endif +int sqlite3OsClose(sqlite3_file*); +int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); +int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); +int sqlite3OsTruncate(sqlite3_file*, i64 size); +int sqlite3OsSync(sqlite3_file*, int); +int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); +int sqlite3OsLock(sqlite3_file*, int); +int sqlite3OsUnlock(sqlite3_file*, int); +int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); +int sqlite3OsFileControl(sqlite3_file*,int,void*); +#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 +int sqlite3OsSectorSize(sqlite3_file *id); +int sqlite3OsDeviceCharacteristics(sqlite3_file *id); + +/* +** Functions for accessing sqlite3_vfs methods +*/ +int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); +int sqlite3OsDelete(sqlite3_vfs *, const char *, int); +int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); +int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); +void sqlite3OsDlError(sqlite3_vfs *, int, char *); +void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); +void sqlite3OsDlClose(sqlite3_vfs *, void *); +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +int sqlite3OsRandomness(sqlite3_vfs *, int, char *); +int sqlite3OsSleep(sqlite3_vfs *, int); +int sqlite3OsCurrentTime(sqlite3_vfs *, double*); /* -** If the SQLITE_ENABLE_REDEF_IO macro is defined, then the OS-layer -** interface routines are not called directly but are invoked using -** pointers to functions. This allows the implementation of various -** OS-layer interface routines to be modified at run-time. There are -** obscure but legitimate reasons for wanting to do this. But for -** most users, a direct call to the underlying interface is preferable -** so the the redefinable I/O interface is turned off by default. +** Convenience functions for opening and closing files using +** sqlite3_malloc() to obtain space for the file-handle structure. */ -#ifdef SQLITE_ENABLE_REDEF_IO - -/* -** When redefinable I/O is enabled, a single global instance of the -** following structure holds pointers to the routines that SQLite -** uses to talk with the underlying operating system. Modify this -** structure (before using any SQLite API!) to accomodate perculiar -** operating system interfaces or behaviors. -*/ -struct sqlite3OsVtbl { - int (*xOpenReadWrite)(const char*, OsFile**, int*); - int (*xOpenExclusive)(const char*, OsFile**, int); - int (*xOpenReadOnly)(const char*, OsFile**); - - int (*xDelete)(const char*); - int (*xFileExists)(const char*); - char *(*xFullPathname)(const char*); - int (*xIsDirWritable)(char*); - int (*xSyncDirectory)(const char*); - int (*xTempFileName)(char*); - - int (*xRandomSeed)(char*); - int (*xSleep)(int ms); - int (*xCurrentTime)(double*); - - void (*xEnterMutex)(void); - void (*xLeaveMutex)(void); - int (*xInMutex)(int); - ThreadData *(*xThreadSpecificData)(int); - - void *(*xMalloc)(int); - void *(*xRealloc)(void *, int); - void (*xFree)(void *); - int (*xAllocationSize)(void *); - - void *(*xDlopen)(const char*); - void *(*xDlsym)(void*, const char*); - int (*xDlclose)(void*); -}; - -/* Macro used to comment out routines that do not exists when there is -** no disk I/O or extension loading -*/ -#ifdef SQLITE_OMIT_DISKIO -# define IF_DISKIO(X) 0 -#else -# define IF_DISKIO(X) X -#endif -#ifdef SQLITE_OMIT_LOAD_EXTENSION -# define IF_DLOPEN(X) 0 -#else -# define IF_DLOPEN(X) X -#endif - - -#if defined(_SQLITE_OS_C_) || defined(SQLITE_AMALGAMATION) - /* - ** The os.c file implements the global virtual function table. - ** We have to put this file here because the initializers - ** (ex: sqlite3OsRandomSeed) are macros that are about to be - ** redefined. - */ - struct sqlite3OsVtbl sqlite3Os = { - IF_DISKIO( sqlite3OsOpenReadWrite ), - IF_DISKIO( sqlite3OsOpenExclusive ), - IF_DISKIO( sqlite3OsOpenReadOnly ), - IF_DISKIO( sqlite3OsDelete ), - IF_DISKIO( sqlite3OsFileExists ), - IF_DISKIO( sqlite3OsFullPathname ), - IF_DISKIO( sqlite3OsIsDirWritable ), - IF_DISKIO( sqlite3OsSyncDirectory ), - IF_DISKIO( sqlite3OsTempFileName ), - sqlite3OsRandomSeed, - sqlite3OsSleep, - sqlite3OsCurrentTime, - sqlite3OsEnterMutex, - sqlite3OsLeaveMutex, - sqlite3OsInMutex, - sqlite3OsThreadSpecificData, - sqlite3OsMalloc, - sqlite3OsRealloc, - sqlite3OsFree, - sqlite3OsAllocationSize, - IF_DLOPEN( sqlite3OsDlopen ), - IF_DLOPEN( sqlite3OsDlsym ), - IF_DLOPEN( sqlite3OsDlclose ), - }; -#else - /* - ** Files other than os.c just reference the global virtual function table. - */ - extern struct sqlite3OsVtbl sqlite3Os; -#endif /* _SQLITE_OS_C_ */ - - -/* This additional API routine is available with redefinable I/O */ -struct sqlite3OsVtbl *sqlite3_os_switch(void); - - -/* -** Redefine the OS interface to go through the virtual function table -** rather than calling routines directly. -*/ -#undef sqlite3OsOpenReadWrite -#undef sqlite3OsOpenExclusive -#undef sqlite3OsOpenReadOnly -#undef sqlite3OsDelete -#undef sqlite3OsFileExists -#undef sqlite3OsFullPathname -#undef sqlite3OsIsDirWritable -#undef sqlite3OsSyncDirectory -#undef sqlite3OsTempFileName -#undef sqlite3OsRandomSeed -#undef sqlite3OsSleep -#undef sqlite3OsCurrentTime -#undef sqlite3OsEnterMutex -#undef sqlite3OsLeaveMutex -#undef sqlite3OsInMutex -#undef sqlite3OsThreadSpecificData -#undef sqlite3OsMalloc -#undef sqlite3OsRealloc -#undef sqlite3OsFree -#undef sqlite3OsAllocationSize -#define sqlite3OsOpenReadWrite sqlite3Os.xOpenReadWrite -#define sqlite3OsOpenExclusive sqlite3Os.xOpenExclusive -#define sqlite3OsOpenReadOnly sqlite3Os.xOpenReadOnly -#define sqlite3OsDelete sqlite3Os.xDelete -#define sqlite3OsFileExists sqlite3Os.xFileExists -#define sqlite3OsFullPathname sqlite3Os.xFullPathname -#define sqlite3OsIsDirWritable sqlite3Os.xIsDirWritable -#define sqlite3OsSyncDirectory sqlite3Os.xSyncDirectory -#define sqlite3OsTempFileName sqlite3Os.xTempFileName -#define sqlite3OsRandomSeed sqlite3Os.xRandomSeed -#define sqlite3OsSleep sqlite3Os.xSleep -#define sqlite3OsCurrentTime sqlite3Os.xCurrentTime -#define sqlite3OsEnterMutex sqlite3Os.xEnterMutex -#define sqlite3OsLeaveMutex sqlite3Os.xLeaveMutex -#define sqlite3OsInMutex sqlite3Os.xInMutex -#define sqlite3OsThreadSpecificData sqlite3Os.xThreadSpecificData -#define sqlite3OsMalloc sqlite3Os.xMalloc -#define sqlite3OsRealloc sqlite3Os.xRealloc -#define sqlite3OsFree sqlite3Os.xFree -#define sqlite3OsAllocationSize sqlite3Os.xAllocationSize - -#endif /* SQLITE_ENABLE_REDEF_IO */ +int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); +int sqlite3OsCloseFree(sqlite3_file *); #endif /* _SQLITE_OS_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os_os2.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os_os2.c --- sqlite3-3.4.2/src/os_os2.c 2007-07-13 11:23:10.000000000 +0100 +++ sqlite3-3.6.16/src/os_os2.c 2009-06-12 03:37:47.000000000 +0100 @@ -11,17 +11,44 @@ ****************************************************************************** ** ** This file contains code that is specific to OS/2. +** +** $Id: os_os2.c,v 1.63 2008/12/10 19:26:24 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#if OS_OS2 +#if SQLITE_OS_OS2 + +/* +** A Note About Memory Allocation: +** +** This driver uses malloc()/free() directly rather than going through +** the SQLite-wrappers sqlite3_malloc()/sqlite3_free(). Those wrappers +** are designed for use on embedded systems where memory is scarce and +** malloc failures happen frequently. OS/2 does not typically run on +** embedded systems, and when it does the developers normally have bigger +** problems to worry about than running out of memory. So there is not +** a compelling need to use the wrappers. +** +** But there is a good reason to not use the wrappers. If we use the +** wrappers then we will get simulated malloc() failures within this +** driver. And that causes all kinds of problems for our tests. We +** could enhance SQLite to deal with simulated malloc failures within +** the OS driver, but the code to deal with those failure would not +** be exercised on Linux (which does not need to malloc() in the driver) +** and so we would have difficulty writing coverage tests for that +** code. Better to leave the code out, we think. +** +** The point of this discussion is as follows: When creating a new +** OS layer for an embedded system, if you use this file as an example, +** avoid the use of malloc()/free(). Those routines work ok on OS/2 +** desktops but not so well in embedded systems. +*/ /* ** Macros used to determine whether or not to use threads. */ -#if defined(THREADSAFE) && THREADSAFE +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE # define SQLITE_OS2_THREADS 1 #endif @@ -31,242 +58,40 @@ #include "os_common.h" /* -** The os2File structure is subclass of OsFile specific for the OS/2 +** The os2File structure is subclass of sqlite3_file specific for the OS/2 ** protability layer. */ typedef struct os2File os2File; struct os2File { - IoMethod const *pMethod; /* Always the first entry */ + const sqlite3_io_methods *pMethod; /* Always the first entry */ HFILE h; /* Handle for accessing the file */ - int delOnClose; /* True if file is to be deleted on close */ - char* pathToDel; /* Name of file to delete on close */ + char* pathToDel; /* Name of file to delete on close, NULL if not */ unsigned char locktype; /* Type of lock currently held on this file */ }; -/* -** Do not include any of the File I/O interface procedures if the -** SQLITE_OMIT_DISKIO macro is defined (indicating that there database -** will be in-memory only) -*/ -#ifndef SQLITE_OMIT_DISKIO - -/* -** Delete the named file -*/ -int sqlite3Os2Delete( const char *zFilename ){ - APIRET rc = NO_ERROR; - - rc = DosDelete( (PSZ)zFilename ); - OSTRACE2( "DELETE \"%s\"\n", zFilename ); - return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; -} - -/* -** Return TRUE if the named file exists. -*/ -int sqlite3Os2FileExists( const char *zFilename ){ - FILESTATUS3 fsts3ConfigInfo; - memset(&fsts3ConfigInfo, 0, sizeof(fsts3ConfigInfo)); - return DosQueryPathInfo( (PSZ)zFilename, FIL_STANDARD, - &fsts3ConfigInfo, sizeof(FILESTATUS3) ) == NO_ERROR; -} - -/* Forward declaration */ -int allocateOs2File( os2File *pInit, OsFile **pld ); - -/* -** Attempt to open a file for both reading and writing. If that -** fails, try opening it read-only. If the file does not exist, -** try to create it. -** -** On success, a handle for the open file is written to *id -** and *pReadonly is set to 0 if the file was opened for reading and -** writing or 1 if the file was opened read-only. The function returns -** SQLITE_OK. -** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id and *pReadonly unchanged. -*/ -int sqlite3Os2OpenReadWrite( - const char *zFilename, - OsFile **pld, - int *pReadonly -){ - os2File f; - HFILE hf; - ULONG ulAction; - APIRET rc = NO_ERROR; - - assert( *pld == 0 ); - rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, - FILE_ARCHIVED | FILE_NORMAL, - OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_OPEN_IF_EXISTS, - OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | - OPEN_SHARE_DENYNONE | OPEN_ACCESS_READWRITE, (PEAOP2)NULL ); - if( rc != NO_ERROR ){ - rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, - FILE_ARCHIVED | FILE_NORMAL, - OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_OPEN_IF_EXISTS, - OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | - OPEN_SHARE_DENYWRITE | OPEN_ACCESS_READONLY, (PEAOP2)NULL ); - if( rc != NO_ERROR ){ - return SQLITE_CANTOPEN; - } - *pReadonly = 1; - } - else{ - *pReadonly = 0; - } - f.h = hf; - f.locktype = NO_LOCK; - f.delOnClose = 0; - f.pathToDel = NULL; - OpenCounter(+1); - OSTRACE3( "OPEN R/W %d \"%s\"\n", hf, zFilename ); - return allocateOs2File( &f, pld ); -} - - -/* -** Attempt to open a new file for exclusive access by this process. -** The file will be opened for both reading and writing. To avoid -** a potential security problem, we do not allow the file to have -** previously existed. Nor do we allow the file to be a symbolic -** link. -** -** If delFlag is true, then make arrangements to automatically delete -** the file when it is closed. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -*/ -int sqlite3Os2OpenExclusive( const char *zFilename, OsFile **pld, int delFlag ){ - os2File f; - HFILE hf; - ULONG ulAction; - APIRET rc = NO_ERROR; - - assert( *pld == 0 ); - rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, FILE_NORMAL, - OPEN_ACTION_CREATE_IF_NEW | OPEN_ACTION_REPLACE_IF_EXISTS, - OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | - OPEN_SHARE_DENYREADWRITE | OPEN_ACCESS_READWRITE, (PEAOP2)NULL ); - if( rc != NO_ERROR ){ - return SQLITE_CANTOPEN; - } - - f.h = hf; - f.locktype = NO_LOCK; - f.delOnClose = delFlag ? 1 : 0; - f.pathToDel = delFlag ? sqlite3OsFullPathname( zFilename ) : NULL; - OpenCounter( +1 ); - if( delFlag ) DosForceDelete( (PSZ)sqlite3OsFullPathname( zFilename ) ); - OSTRACE3( "OPEN EX %d \"%s\"\n", hf, sqlite3OsFullPathname ( zFilename ) ); - return allocateOs2File( &f, pld ); -} - -/* -** Attempt to open a new file for read-only access. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -*/ -int sqlite3Os2OpenReadOnly( const char *zFilename, OsFile **pld ){ - os2File f; - HFILE hf; - ULONG ulAction; - APIRET rc = NO_ERROR; - - assert( *pld == 0 ); - rc = DosOpen( (PSZ)zFilename, &hf, &ulAction, 0L, - FILE_NORMAL, OPEN_ACTION_OPEN_IF_EXISTS, - OPEN_FLAGS_FAIL_ON_ERROR | OPEN_FLAGS_RANDOM | - OPEN_SHARE_DENYWRITE | OPEN_ACCESS_READONLY, (PEAOP2)NULL ); - if( rc != NO_ERROR ){ - return SQLITE_CANTOPEN; - } - f.h = hf; - f.locktype = NO_LOCK; - f.delOnClose = 0; - f.pathToDel = NULL; - OpenCounter( +1 ); - OSTRACE3( "OPEN RO %d \"%s\"\n", hf, zFilename ); - return allocateOs2File( &f, pld ); -} - -/* -** Attempt to open a file descriptor for the directory that contains a -** file. This file descriptor can be used to fsync() the directory -** in order to make sure the creation of a new file is actually written -** to disk. -** -** This routine is only meaningful for Unix. It is a no-op under -** OS/2 since OS/2 does not support hard links. -** -** On success, a handle for a previously open file is at *id is -** updated with the new directory file descriptor and SQLITE_OK is -** returned. -** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id unchanged. -*/ -int os2OpenDirectory( - OsFile *id, - const char *zDirname -){ - return SQLITE_OK; -} +#define LOCK_TIMEOUT 10L /* the default locking timeout */ -/* -** Create a temporary file name in zBuf. zBuf must be big enough to -** hold at least SQLITE_TEMPNAME_SIZE characters. -*/ -int sqlite3Os2TempFileName( char *zBuf ){ - static const unsigned char zChars[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789"; - int i, j; - PSZ zTempPath = 0; - if( DosScanEnv( (PSZ)"TEMP", &zTempPath ) ){ - if( DosScanEnv( (PSZ)"TMP", &zTempPath ) ){ - if( DosScanEnv( (PSZ)"TMPDIR", &zTempPath ) ){ - ULONG ulDriveNum = 0, ulDriveMap = 0; - DosQueryCurrentDisk( &ulDriveNum, &ulDriveMap ); - sprintf( (char*)zTempPath, "%c:", (char)( 'A' + ulDriveNum - 1 ) ); - } - } - } - for(;;){ - sprintf( zBuf, "%s\\"TEMP_FILE_PREFIX, zTempPath ); - j = strlen( zBuf ); - sqlite3Randomness( 15, &zBuf[j] ); - for( i = 0; i < 15; i++, j++ ){ - zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; - } - zBuf[j] = 0; - if( !sqlite3OsFileExists( zBuf ) ) break; - } - OSTRACE2( "TEMP FILENAME: %s\n", zBuf ); - return SQLITE_OK; -} +/***************************************************************************** +** The next group of routines implement the I/O methods specified +** by the sqlite3_io_methods object. +******************************************************************************/ /* ** Close a file. */ -int os2Close( OsFile **pld ){ - os2File *pFile; +static int os2Close( sqlite3_file *id ){ APIRET rc = NO_ERROR; - if( pld && (pFile = (os2File*)*pld) != 0 ){ + os2File *pFile; + if( id && (pFile = (os2File*)id) != 0 ){ OSTRACE2( "CLOSE %d\n", pFile->h ); rc = DosClose( pFile->h ); pFile->locktype = NO_LOCK; - if( pFile->delOnClose != 0 ){ - rc = DosForceDelete( (PSZ)pFile->pathToDel ); + if( pFile->pathToDel != NULL ){ + rc = DosForceDelete( (PSZ)pFile->pathToDel ); + free( pFile->pathToDel ); + pFile->pathToDel = NULL; } - *pld = 0; + id = 0; OpenCounter( -1 ); } @@ -278,17 +103,28 @@ ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ -int os2Read( OsFile *id, void *pBuf, int amt ){ +static int os2Read( + sqlite3_file *id, /* File to read from */ + void *pBuf, /* Write content into this buffer */ + int amt, /* Number of bytes to read */ + sqlite3_int64 offset /* Begin reading at this offset */ +){ + ULONG fileLocation = 0L; ULONG got; + os2File *pFile = (os2File*)id; assert( id!=0 ); - SimulateIOError( return SQLITE_IOERR ); - OSTRACE3( "READ %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); - DosRead( ((os2File*)id)->h, pBuf, amt, &got ); - if (got == (ULONG)amt) - return SQLITE_OK; - else if (got == 0) + SimulateIOError( return SQLITE_IOERR_READ ); + OSTRACE3( "READ %d lock=%d\n", pFile->h, pFile->locktype ); + if( DosSetFilePtr(pFile->h, offset, FILE_BEGIN, &fileLocation) != NO_ERROR ){ + return SQLITE_IOERR; + } + if( DosRead( pFile->h, pBuf, amt, &got ) != NO_ERROR ){ return SQLITE_IOERR_READ; + } + if( got == (ULONG)amt ) + return SQLITE_OK; else { + /* Unread portions of the input buffer must be zero-filled */ memset(&((char*)pBuf)[got], 0, amt-got); return SQLITE_IOERR_SHORT_READ; } @@ -298,101 +134,113 @@ ** Write data from a buffer into a file. Return SQLITE_OK on success ** or some other error code on failure. */ -int os2Write( OsFile *id, const void *pBuf, int amt ){ +static int os2Write( + sqlite3_file *id, /* File to write into */ + const void *pBuf, /* The bytes to be written */ + int amt, /* Number of bytes to write */ + sqlite3_int64 offset /* Offset into the file to begin writing at */ +){ + ULONG fileLocation = 0L; APIRET rc = NO_ERROR; ULONG wrote; + os2File *pFile = (os2File*)id; assert( id!=0 ); - SimulateIOError( return SQLITE_IOERR ); + SimulateIOError( return SQLITE_IOERR_WRITE ); SimulateDiskfullError( return SQLITE_FULL ); - OSTRACE3( "WRITE %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + OSTRACE3( "WRITE %d lock=%d\n", pFile->h, pFile->locktype ); + if( DosSetFilePtr(pFile->h, offset, FILE_BEGIN, &fileLocation) != NO_ERROR ){ + return SQLITE_IOERR; + } + assert( amt>0 ); while( amt > 0 && - (rc = DosWrite( ((os2File*)id)->h, (PVOID)pBuf, amt, &wrote )) && wrote > 0 ){ - amt -= wrote; - pBuf = &((char*)pBuf)[wrote]; + ( rc = DosWrite( pFile->h, (PVOID)pBuf, amt, &wrote ) ) == NO_ERROR && + wrote > 0 + ){ + amt -= wrote; + pBuf = &((char*)pBuf)[wrote]; } return ( rc != NO_ERROR || amt > (int)wrote ) ? SQLITE_FULL : SQLITE_OK; } /* -** Move the read/write pointer in a file. +** Truncate an open file to a specified size */ -int os2Seek( OsFile *id, i64 offset ){ +static int os2Truncate( sqlite3_file *id, i64 nByte ){ APIRET rc = NO_ERROR; - ULONG filePointer = 0L; - assert( id!=0 ); - rc = DosSetFilePtr( ((os2File*)id)->h, offset, FILE_BEGIN, &filePointer ); - OSTRACE3( "SEEK %d %lld\n", ((os2File*)id)->h, offset ); - return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; -} - -/* -** Make sure all writes to a particular file are committed to disk. -*/ -int os2Sync( OsFile *id, int dataOnly ){ - assert( id!=0 ); - OSTRACE3( "SYNC %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); - return DosResetBuffer( ((os2File*)id)->h ) == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; + os2File *pFile = (os2File*)id; + OSTRACE3( "TRUNCATE %d %lld\n", pFile->h, nByte ); + SimulateIOError( return SQLITE_IOERR_TRUNCATE ); + rc = DosSetFileSize( pFile->h, nByte ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR_TRUNCATE; } +#ifdef SQLITE_TEST /* -** Sync the directory zDirname. This is a no-op on operating systems other -** than UNIX. +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occuring at the right times. */ -int sqlite3Os2SyncDirectory( const char *zDirname ){ - SimulateIOError( return SQLITE_IOERR ); - return SQLITE_OK; -} +int sqlite3_sync_count = 0; +int sqlite3_fullsync_count = 0; +#endif /* -** Truncate an open file to a specified size +** Make sure all writes to a particular file are committed to disk. */ -int os2Truncate( OsFile *id, i64 nByte ){ - APIRET rc = NO_ERROR; - ULONG upperBits = nByte>>32; - assert( id!=0 ); - OSTRACE3( "TRUNCATE %d %lld\n", ((os2File*)id)->h, nByte ); - SimulateIOError( return SQLITE_IOERR ); - rc = DosSetFilePtr( ((os2File*)id)->h, nByte, FILE_BEGIN, &upperBits ); - if( rc != NO_ERROR ){ - return SQLITE_IOERR; +static int os2Sync( sqlite3_file *id, int flags ){ + os2File *pFile = (os2File*)id; + OSTRACE3( "SYNC %d lock=%d\n", pFile->h, pFile->locktype ); +#ifdef SQLITE_TEST + if( flags & SQLITE_SYNC_FULL){ + sqlite3_fullsync_count++; } - rc = DosSetFilePtr( ((os2File*)id)->h, 0L, FILE_END, &upperBits ); - return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; + sqlite3_sync_count++; +#endif + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op + */ +#ifdef SQLITE_NO_SYNC + UNUSED_PARAMETER(pFile); + return SQLITE_OK; +#else + return DosResetBuffer( pFile->h ) == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +#endif } /* ** Determine the current size of a file in bytes */ -int os2FileSize( OsFile *id, i64 *pSize ){ +static int os2FileSize( sqlite3_file *id, sqlite3_int64 *pSize ){ APIRET rc = NO_ERROR; FILESTATUS3 fsts3FileInfo; memset(&fsts3FileInfo, 0, sizeof(fsts3FileInfo)); assert( id!=0 ); - SimulateIOError( return SQLITE_IOERR ); + SimulateIOError( return SQLITE_IOERR_FSTAT ); rc = DosQueryFileInfo( ((os2File*)id)->h, FIL_STANDARD, &fsts3FileInfo, sizeof(FILESTATUS3) ); if( rc == NO_ERROR ){ *pSize = fsts3FileInfo.cbFile; return SQLITE_OK; - } - else{ - return SQLITE_IOERR; + }else{ + return SQLITE_IOERR_FSTAT; } } /* ** Acquire a reader lock. */ -static int getReadLock( os2File *id ){ +static int getReadLock( os2File *pFile ){ FILELOCK LockArea, UnlockArea; + APIRET res; memset(&LockArea, 0, sizeof(LockArea)); memset(&UnlockArea, 0, sizeof(UnlockArea)); LockArea.lOffset = SHARED_FIRST; LockArea.lRange = SHARED_SIZE; UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; - return DosSetFileLocks( id->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 1L ); + OSTRACE3( "GETREADLOCK %d res=%d\n", pFile->h, res ); + return res; } /* @@ -401,33 +249,17 @@ static int unlockReadLock( os2File *id ){ FILELOCK LockArea, UnlockArea; + APIRET res; memset(&LockArea, 0, sizeof(LockArea)); memset(&UnlockArea, 0, sizeof(UnlockArea)); LockArea.lOffset = 0L; LockArea.lRange = 0L; UnlockArea.lOffset = SHARED_FIRST; UnlockArea.lRange = SHARED_SIZE; - return DosSetFileLocks( id->h, &UnlockArea, &LockArea, 2000L, 1L ); -} - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* -** Check that a given pathname is a directory and is writable -** -*/ -int sqlite3Os2IsDirWritable( char *zDirname ){ - FILESTATUS3 fsts3ConfigInfo; - APIRET rc = NO_ERROR; - memset(&fsts3ConfigInfo, 0, sizeof(fsts3ConfigInfo)); - if( zDirname==0 ) return 0; - if( strlen(zDirname)>CCHMAXPATH ) return 0; - rc = DosQueryPathInfo( (PSZ)zDirname, FIL_STANDARD, &fsts3ConfigInfo, sizeof(FILESTATUS3) ); - if( rc != NO_ERROR ) return 0; - if( (fsts3ConfigInfo.attrFile & FILE_DIRECTORY) != FILE_DIRECTORY ) return 0; - - return 1; + res = DosSetFileLocks( id->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 1L ); + OSTRACE3( "UNLOCK-READLOCK file handle=%d res=%d?\n", id->h, res ); + return res; } -#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ /* ** Lock the file with the lock specified by parameter locktype - one @@ -455,10 +287,10 @@ ** It is not possible to lower the locking level one step at a time. You ** must go straight to locking level 0. */ -int os2Lock( OsFile *id, int locktype ){ - APIRET rc = SQLITE_OK; /* Return code from subroutines */ +static int os2Lock( sqlite3_file *id, int locktype ){ + int rc = SQLITE_OK; /* Return code from subroutines */ APIRET res = NO_ERROR; /* Result of an OS/2 lock call */ - int newLocktype; /* Set id->locktype to this value before exiting */ + int newLocktype; /* Set pFile->locktype to this value before exiting */ int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ FILELOCK LockArea, UnlockArea; @@ -469,10 +301,11 @@ OSTRACE4( "LOCK %d %d was %d\n", pFile->h, locktype, pFile->locktype ); /* If there is already a lock of this type or more restrictive on the - ** OsFile, do nothing. Don't use the end_lock: exit path, as - ** sqlite3OsEnterMutex() hasn't been called yet. + ** os2File, do nothing. Don't use the end_lock: exit path, as + ** sqlite3_mutex_enter() hasn't been called yet. */ if( pFile->locktype>=locktype ){ + OSTRACE3( "LOCK %d %d ok (already held)\n", pFile->h, locktype ); return SQLITE_OK; } @@ -488,59 +321,58 @@ */ newLocktype = pFile->locktype; if( pFile->locktype==NO_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) + || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) ){ - int cnt = 3; - LockArea.lOffset = PENDING_BYTE; LockArea.lRange = 1L; UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; - while( cnt-->0 && (res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L) )!=NO_ERROR ){ - /* Try 3 times to get the pending lock. The pending lock might be - ** held by another reader process who will release it momentarily. - */ - OSTRACE2( "could not get a PENDING lock. cnt=%d\n", cnt ); - DosSleep(1); + /* wait longer than LOCK_TIMEOUT here not to have to try multiple times */ + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 100L, 0L ); + if( res == NO_ERROR ){ + gotPendingLock = 1; + OSTRACE3( "LOCK %d pending lock boolean set. res=%d\n", pFile->h, res ); } - gotPendingLock = res; } /* Acquire a shared lock */ - if( locktype==SHARED_LOCK && res ){ + if( locktype==SHARED_LOCK && res == NO_ERROR ){ assert( pFile->locktype==NO_LOCK ); res = getReadLock(pFile); if( res == NO_ERROR ){ newLocktype = SHARED_LOCK; } + OSTRACE3( "LOCK %d acquire shared lock. res=%d\n", pFile->h, res ); } /* Acquire a RESERVED lock */ - if( locktype==RESERVED_LOCK && res ){ + if( locktype==RESERVED_LOCK && res == NO_ERROR ){ assert( pFile->locktype==SHARED_LOCK ); LockArea.lOffset = RESERVED_BYTE; LockArea.lRange = 1L; UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; - res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); if( res == NO_ERROR ){ newLocktype = RESERVED_LOCK; } + OSTRACE3( "LOCK %d acquire reserved lock. res=%d\n", pFile->h, res ); } /* Acquire a PENDING lock */ - if( locktype==EXCLUSIVE_LOCK && res ){ + if( locktype==EXCLUSIVE_LOCK && res == NO_ERROR ){ newLocktype = PENDING_LOCK; gotPendingLock = 0; + OSTRACE2( "LOCK %d acquire pending lock. pending lock boolean unset.\n", pFile->h ); } /* Acquire an EXCLUSIVE lock */ - if( locktype==EXCLUSIVE_LOCK && res ){ + if( locktype==EXCLUSIVE_LOCK && res == NO_ERROR ){ assert( pFile->locktype>=SHARED_LOCK ); res = unlockReadLock(pFile); OSTRACE2( "unreadlock = %d\n", res ); @@ -548,23 +380,27 @@ LockArea.lRange = SHARED_SIZE; UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; - res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); if( res == NO_ERROR ){ newLocktype = EXCLUSIVE_LOCK; }else{ - OSTRACE2( "error-code = %d\n", res ); + OSTRACE2( "OS/2 error-code = %d\n", res ); + getReadLock(pFile); } + OSTRACE3( "LOCK %d acquire exclusive lock. res=%d\n", pFile->h, res ); } /* If we are holding a PENDING lock that ought to be released, then ** release it now. */ if( gotPendingLock && locktype==SHARED_LOCK ){ + int r; LockArea.lOffset = 0L; LockArea.lRange = 0L; UnlockArea.lOffset = PENDING_BYTE; UnlockArea.lRange = 1L; - DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + r = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "LOCK %d unlocking pending/is shared. r=%d\n", pFile->h, r ); } /* Update the state of the lock has held in the file descriptor then @@ -574,10 +410,11 @@ rc = SQLITE_OK; }else{ OSTRACE4( "LOCK FAILED %d trying for %d but got %d\n", pFile->h, - locktype, newLocktype ); + locktype, newLocktype ); rc = SQLITE_BUSY; } pFile->locktype = newLocktype; + OSTRACE3( "LOCK %d now %d\n", pFile->h, pFile->locktype ); return rc; } @@ -586,33 +423,39 @@ ** file by this or any other process. If such a lock is held, return ** non-zero, otherwise zero. */ -int os2CheckReservedLock( OsFile *id ){ - APIRET rc = NO_ERROR; +static int os2CheckReservedLock( sqlite3_file *id, int *pOut ){ + int r = 0; os2File *pFile = (os2File*)id; assert( pFile!=0 ); if( pFile->locktype>=RESERVED_LOCK ){ - rc = 1; - OSTRACE3( "TEST WR-LOCK %d %d (local)\n", pFile->h, rc ); + r = 1; + OSTRACE3( "TEST WR-LOCK %d %d (local)\n", pFile->h, r ); }else{ FILELOCK LockArea, UnlockArea; + APIRET rc = NO_ERROR; memset(&LockArea, 0, sizeof(LockArea)); memset(&UnlockArea, 0, sizeof(UnlockArea)); LockArea.lOffset = RESERVED_BYTE; LockArea.lRange = 1L; UnlockArea.lOffset = 0L; UnlockArea.lRange = 0L; - rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "TEST WR-LOCK %d lock reserved byte rc=%d\n", pFile->h, rc ); if( rc == NO_ERROR ){ + APIRET rcu = NO_ERROR; /* return code for unlocking */ LockArea.lOffset = 0L; LockArea.lRange = 0L; UnlockArea.lOffset = RESERVED_BYTE; UnlockArea.lRange = 1L; - rc = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + rcu = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "TEST WR-LOCK %d unlock reserved byte r=%d\n", pFile->h, rcu ); } - OSTRACE3( "TEST WR-LOCK %d %d (remote)\n", pFile->h, rc ); + r = !(rc == NO_ERROR); + OSTRACE3( "TEST WR-LOCK %d %d (remote)\n", pFile->h, r ); } - return rc; + *pOut = r; + return SQLITE_OK; } /* @@ -626,10 +469,11 @@ ** is NO_LOCK. If the second argument is SHARED_LOCK then this routine ** might return SQLITE_IOERR; */ -int os2Unlock( OsFile *id, int locktype ){ +static int os2Unlock( sqlite3_file *id, int locktype ){ int type; - APIRET rc = SQLITE_OK; os2File *pFile = (os2File*)id; + APIRET rc = SQLITE_OK; + APIRET res = NO_ERROR; FILELOCK LockArea, UnlockArea; memset(&LockArea, 0, sizeof(LockArea)); @@ -643,11 +487,13 @@ LockArea.lRange = 0L; UnlockArea.lOffset = SHARED_FIRST; UnlockArea.lRange = SHARED_SIZE; - DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "UNLOCK %d exclusive lock res=%d\n", pFile->h, res ); if( locktype==SHARED_LOCK && getReadLock(pFile) != NO_ERROR ){ /* This should never happen. We should always be able to ** reacquire the read lock */ - rc = SQLITE_IOERR; + OSTRACE3( "UNLOCK %d to %d getReadLock() failed\n", pFile->h, locktype ); + rc = SQLITE_IOERR_UNLOCK; } } if( type>=RESERVED_LOCK ){ @@ -655,159 +501,448 @@ LockArea.lRange = 0L; UnlockArea.lOffset = RESERVED_BYTE; UnlockArea.lRange = 1L; - DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "UNLOCK %d reserved res=%d\n", pFile->h, res ); } if( locktype==NO_LOCK && type>=SHARED_LOCK ){ - unlockReadLock(pFile); + res = unlockReadLock(pFile); + OSTRACE5( "UNLOCK %d is %d want %d res=%d\n", pFile->h, type, locktype, res ); } if( type>=PENDING_LOCK ){ LockArea.lOffset = 0L; LockArea.lRange = 0L; UnlockArea.lOffset = PENDING_BYTE; UnlockArea.lRange = 1L; - DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, 2000L, 1L ); + res = DosSetFileLocks( pFile->h, &UnlockArea, &LockArea, LOCK_TIMEOUT, 0L ); + OSTRACE3( "UNLOCK %d pending res=%d\n", pFile->h, res ); } pFile->locktype = locktype; + OSTRACE3( "UNLOCK %d now %d\n", pFile->h, pFile->locktype ); return rc; } /* -** Turn a relative pathname into a full pathname. Return a pointer -** to the full pathname stored in space obtained from sqliteMalloc(). -** The calling function is responsible for freeing this space once it -** is no longer needed. -*/ -char *sqlite3Os2FullPathname( const char *zRelative ){ - char *zFull = 0; - if( strchr(zRelative, ':') ){ - sqlite3SetString( &zFull, zRelative, (char*)0 ); - }else{ - ULONG ulDriveNum = 0; - ULONG ulDriveMap = 0; - ULONG cbzBufLen = SQLITE_TEMPNAME_SIZE; - char zDrive[2]; - char *zBuff; - - zBuff = sqliteMalloc( cbzBufLen ); - if( zBuff != 0 ){ - DosQueryCurrentDisk( &ulDriveNum, &ulDriveMap ); - if( DosQueryCurrentDir( ulDriveNum, (PBYTE)zBuff, &cbzBufLen ) == NO_ERROR ){ - sprintf( zDrive, "%c", (char)('A' + ulDriveNum - 1) ); - sqlite3SetString( &zFull, zDrive, ":\\", zBuff, - "\\", zRelative, (char*)0 ); - } - sqliteFree( zBuff ); +** Control and query of the open file handle. +*/ +static int os2FileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = ((os2File*)id)->locktype; + OSTRACE3( "FCNTL_LOCKSTATE %d lock=%d\n", ((os2File*)id)->h, ((os2File*)id)->locktype ); + return SQLITE_OK; } } - return zFull; + return SQLITE_ERROR; } /* -** The fullSync option is meaningless on os2, or correct me if I'm wrong. This is a no-op. -** From os_unix.c: Change the value of the fullsync flag in the given file descriptor. -** From os_unix.c: ((unixFile*)id)->fullSync = v; +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and its journal file) that the sector size will be the +** same for both. */ -static void os2SetFullSync( OsFile *id, int v ){ - return; +static int os2SectorSize(sqlite3_file *id){ + return SQLITE_DEFAULT_SECTOR_SIZE; } /* -** Return the underlying file handle for an OsFile +** Return a vector of device characteristics. */ -static int os2FileHandle( OsFile *id ){ - return (int)((os2File*)id)->h; +static int os2DeviceCharacteristics(sqlite3_file *id){ + return 0; } + /* -** Return an integer that indices the type of lock currently held -** by this handle. (Used for testing and analysis only.) +** Character set conversion objects used by conversion routines. */ -static int os2LockState( OsFile *id ){ - return ((os2File*)id)->locktype; +static UconvObject ucUtf8 = NULL; /* convert between UTF-8 and UCS-2 */ +static UconvObject uclCp = NULL; /* convert between local codepage and UCS-2 */ + +/* +** Helper function to initialize the conversion objects from and to UTF-8. +*/ +static void initUconvObjects( void ){ + if( UniCreateUconvObject( UTF_8, &ucUtf8 ) != ULS_SUCCESS ) + ucUtf8 = NULL; + if ( UniCreateUconvObject( (UniChar *)L"@path=yes", &uclCp ) != ULS_SUCCESS ) + uclCp = NULL; } /* -** Return the sector size in bytes of the underlying block device for -** the specified file. This is almost always 512 bytes, but may be -** larger for some devices. +** Helper function to free the conversion objects from and to UTF-8. +*/ +static void freeUconvObjects( void ){ + if ( ucUtf8 ) + UniFreeUconvObject( ucUtf8 ); + if ( uclCp ) + UniFreeUconvObject( uclCp ); + ucUtf8 = NULL; + uclCp = NULL; +} + +/* +** Helper function to convert UTF-8 filenames to local OS/2 codepage. +** The two-step process: first convert the incoming UTF-8 string +** into UCS-2 and then from UCS-2 to the current codepage. +** The returned char pointer has to be freed. +*/ +static char *convertUtf8PathToCp( const char *in ){ + UniChar tempPath[CCHMAXPATH]; + char *out = (char *)calloc( CCHMAXPATH, 1 ); + + if( !out ) + return NULL; + + if( !ucUtf8 || !uclCp ) + initUconvObjects(); + + /* determine string for the conversion of UTF-8 which is CP1208 */ + if( UniStrToUcs( ucUtf8, tempPath, (char *)in, CCHMAXPATH ) != ULS_SUCCESS ) + return out; /* if conversion fails, return the empty string */ + + /* conversion for current codepage which can be used for paths */ + UniStrFromUcs( uclCp, out, tempPath, CCHMAXPATH ); + + return out; +} + +/* +** Helper function to convert filenames from local codepage to UTF-8. +** The two-step process: first convert the incoming codepage-specific +** string into UCS-2 and then from UCS-2 to the codepage of UTF-8. +** The returned char pointer has to be freed. ** -** SQLite code assumes this function cannot fail. It also assumes that -** if two files are created in the same file-system directory (i.e. -** a database and it's journal file) that the sector size will be the -** same for both. +** This function is non-static to be able to use this in shell.c and +** similar applications that take command line arguments. */ -static int os2SectorSize(OsFile *id){ - return SQLITE_DEFAULT_SECTOR_SIZE; +char *convertCpPathToUtf8( const char *in ){ + UniChar tempPath[CCHMAXPATH]; + char *out = (char *)calloc( CCHMAXPATH, 1 ); + + if( !out ) + return NULL; + + if( !ucUtf8 || !uclCp ) + initUconvObjects(); + + /* conversion for current codepage which can be used for paths */ + if( UniStrToUcs( uclCp, tempPath, (char *)in, CCHMAXPATH ) != ULS_SUCCESS ) + return out; /* if conversion fails, return the empty string */ + + /* determine string for the conversion of UTF-8 which is CP1208 */ + UniStrFromUcs( ucUtf8, out, tempPath, CCHMAXPATH ); + + return out; } /* -** This vector defines all the methods that can operate on an OsFile -** for os2. +** This vector defines all the methods that can operate on an +** sqlite3_file for os2. */ -static const IoMethod sqlite3Os2IoMethod = { +static const sqlite3_io_methods os2IoMethod = { + 1, /* iVersion */ os2Close, - os2OpenDirectory, os2Read, os2Write, - os2Seek, os2Truncate, os2Sync, - os2SetFullSync, - os2FileHandle, os2FileSize, os2Lock, os2Unlock, - os2LockState, os2CheckReservedLock, + os2FileControl, os2SectorSize, + os2DeviceCharacteristics }; +/*************************************************************************** +** Here ends the I/O methods that form the sqlite3_io_methods object. +** +** The next block of code implements the VFS methods. +****************************************************************************/ + /* -** Allocate memory for an OsFile. Initialize the new OsFile -** to the value given in pInit and return a pointer to the new -** OsFile. If we run out of memory, close the file and return NULL. -*/ -int allocateOs2File( os2File *pInit, OsFile **pld ){ - os2File *pNew; - pNew = sqliteMalloc( sizeof(*pNew) ); - if( pNew==0 ){ - DosClose( pInit->h ); - *pld = 0; - return SQLITE_NOMEM; +** Create a temporary file name in zBuf. zBuf must be big enough to +** hold at pVfs->mxPathname characters. +*/ +static int getTempname(int nBuf, char *zBuf ){ + static const unsigned char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + int i, j; + char zTempPathBuf[3]; + PSZ zTempPath = (PSZ)&zTempPathBuf; + if( sqlite3_temp_directory ){ + zTempPath = sqlite3_temp_directory; }else{ - *pNew = *pInit; - pNew->pMethod = &sqlite3Os2IoMethod; - pNew->locktype = NO_LOCK; - *pld = (OsFile*)pNew; - OpenCounter(+1); - return SQLITE_OK; + if( DosScanEnv( (PSZ)"TEMP", &zTempPath ) ){ + if( DosScanEnv( (PSZ)"TMP", &zTempPath ) ){ + if( DosScanEnv( (PSZ)"TMPDIR", &zTempPath ) ){ + ULONG ulDriveNum = 0, ulDriveMap = 0; + DosQueryCurrentDisk( &ulDriveNum, &ulDriveMap ); + sprintf( (char*)zTempPath, "%c:", (char)( 'A' + ulDriveNum - 1 ) ); + } + } + } + } + /* Strip off a trailing slashes or backslashes, otherwise we would get * + * multiple (back)slashes which causes DosOpen() to fail. * + * Trailing spaces are not allowed, either. */ + j = sqlite3Strlen30(zTempPath); + while( j > 0 && ( zTempPath[j-1] == '\\' || zTempPath[j-1] == '/' + || zTempPath[j-1] == ' ' ) ){ + j--; + } + zTempPath[j] = '\0'; + if( !sqlite3_temp_directory ){ + char *zTempPathUTF = convertCpPathToUtf8( zTempPath ); + sqlite3_snprintf( nBuf-30, zBuf, + "%s\\"SQLITE_TEMP_FILE_PREFIX, zTempPathUTF ); + free( zTempPathUTF ); + }else{ + sqlite3_snprintf( nBuf-30, zBuf, + "%s\\"SQLITE_TEMP_FILE_PREFIX, zTempPath ); + } + j = sqlite3Strlen30( zBuf ); + sqlite3_randomness( 20, &zBuf[j] ); + for( i = 0; i < 20; i++, j++ ){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } + zBuf[j] = 0; + OSTRACE2( "TEMP FILENAME: %s\n", zBuf ); + return SQLITE_OK; +} + + +/* +** Turn a relative pathname into a full pathname. Write the full +** pathname into zFull[]. zFull[] will be at least pVfs->mxPathname +** bytes in size. +*/ +static int os2FullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zRelative, /* Possibly relative input path */ + int nFull, /* Size of output buffer in bytes */ + char *zFull /* Output buffer */ +){ + char *zRelativeCp = convertUtf8PathToCp( zRelative ); + char zFullCp[CCHMAXPATH] = "\0"; + char *zFullUTF; + APIRET rc = DosQueryPathInfo( zRelativeCp, FIL_QUERYFULLNAME, zFullCp, + CCHMAXPATH ); + free( zRelativeCp ); + zFullUTF = convertCpPathToUtf8( zFullCp ); + sqlite3_snprintf( nFull, zFull, zFullUTF ); + free( zFullUTF ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR; +} + + +/* +** Open a file. +*/ +static int os2Open( + sqlite3_vfs *pVfs, /* Not used */ + const char *zName, /* Name of the file */ + sqlite3_file *id, /* Write the SQLite file handle here */ + int flags, /* Open mode flags */ + int *pOutFlags /* Status return flags */ +){ + HFILE h; + ULONG ulFileAttribute = FILE_NORMAL; + ULONG ulOpenFlags = 0; + ULONG ulOpenMode = 0; + os2File *pFile = (os2File*)id; + APIRET rc = NO_ERROR; + ULONG ulAction; + char *zNameCp; + char zTmpname[CCHMAXPATH+1]; /* Buffer to hold name of temp file */ + + /* If the second argument to this function is NULL, generate a + ** temporary file name to use + */ + if( !zName ){ + int rc = getTempname(CCHMAXPATH+1, zTmpname); + if( rc!=SQLITE_OK ){ + return rc; + } + zName = zTmpname; + } + + + memset( pFile, 0, sizeof(*pFile) ); + + OSTRACE2( "OPEN want %d\n", flags ); + + if( flags & SQLITE_OPEN_READWRITE ){ + ulOpenMode |= OPEN_ACCESS_READWRITE; + OSTRACE1( "OPEN read/write\n" ); + }else{ + ulOpenMode |= OPEN_ACCESS_READONLY; + OSTRACE1( "OPEN read only\n" ); + } + + if( flags & SQLITE_OPEN_CREATE ){ + ulOpenFlags |= OPEN_ACTION_OPEN_IF_EXISTS | OPEN_ACTION_CREATE_IF_NEW; + OSTRACE1( "OPEN open new/create\n" ); + }else{ + ulOpenFlags |= OPEN_ACTION_OPEN_IF_EXISTS | OPEN_ACTION_FAIL_IF_NEW; + OSTRACE1( "OPEN open existing\n" ); + } + + if( flags & SQLITE_OPEN_MAIN_DB ){ + ulOpenMode |= OPEN_SHARE_DENYNONE; + OSTRACE1( "OPEN share read/write\n" ); + }else{ + ulOpenMode |= OPEN_SHARE_DENYWRITE; + OSTRACE1( "OPEN share read only\n" ); + } + + if( flags & SQLITE_OPEN_DELETEONCLOSE ){ + char pathUtf8[CCHMAXPATH]; +#ifdef NDEBUG /* when debugging we want to make sure it is deleted */ + ulFileAttribute = FILE_HIDDEN; +#endif + os2FullPathname( pVfs, zName, CCHMAXPATH, pathUtf8 ); + pFile->pathToDel = convertUtf8PathToCp( pathUtf8 ); + OSTRACE1( "OPEN hidden/delete on close file attributes\n" ); + }else{ + pFile->pathToDel = NULL; + OSTRACE1( "OPEN normal file attribute\n" ); + } + + /* always open in random access mode for possibly better speed */ + ulOpenMode |= OPEN_FLAGS_RANDOM; + ulOpenMode |= OPEN_FLAGS_FAIL_ON_ERROR; + ulOpenMode |= OPEN_FLAGS_NOINHERIT; + + zNameCp = convertUtf8PathToCp( zName ); + rc = DosOpen( (PSZ)zNameCp, + &h, + &ulAction, + 0L, + ulFileAttribute, + ulOpenFlags, + ulOpenMode, + (PEAOP2)NULL ); + free( zNameCp ); + if( rc != NO_ERROR ){ + OSTRACE7( "OPEN Invalid handle rc=%d: zName=%s, ulAction=%#lx, ulAttr=%#lx, ulFlags=%#lx, ulMode=%#lx\n", + rc, zName, ulAction, ulFileAttribute, ulOpenFlags, ulOpenMode ); + if( pFile->pathToDel ) + free( pFile->pathToDel ); + pFile->pathToDel = NULL; + if( flags & SQLITE_OPEN_READWRITE ){ + OSTRACE2( "OPEN %d Invalid handle\n", ((flags | SQLITE_OPEN_READONLY) & ~SQLITE_OPEN_READWRITE) ); + return os2Open( pVfs, zName, id, + ((flags | SQLITE_OPEN_READONLY) & ~SQLITE_OPEN_READWRITE), + pOutFlags ); + }else{ + return SQLITE_CANTOPEN; + } + } + + if( pOutFlags ){ + *pOutFlags = flags & SQLITE_OPEN_READWRITE ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY; + } + + pFile->pMethod = &os2IoMethod; + pFile->h = h; + OpenCounter(+1); + OSTRACE3( "OPEN %d pOutFlags=%d\n", pFile->h, pOutFlags ); + return SQLITE_OK; +} + +/* +** Delete the named file. +*/ +static int os2Delete( + sqlite3_vfs *pVfs, /* Not used on os2 */ + const char *zFilename, /* Name of file to delete */ + int syncDir /* Not used on os2 */ +){ + APIRET rc = NO_ERROR; + char *zFilenameCp = convertUtf8PathToCp( zFilename ); + SimulateIOError( return SQLITE_IOERR_DELETE ); + rc = DosDelete( (PSZ)zFilenameCp ); + free( zFilenameCp ); + OSTRACE2( "DELETE \"%s\"\n", zFilename ); + return rc == NO_ERROR ? SQLITE_OK : SQLITE_IOERR_DELETE; +} + +/* +** Check the existance and status of a file. +*/ +static int os2Access( + sqlite3_vfs *pVfs, /* Not used on os2 */ + const char *zFilename, /* Name of file to check */ + int flags, /* Type of test to make on this file */ + int *pOut /* Write results here */ +){ + FILESTATUS3 fsts3ConfigInfo; + APIRET rc = NO_ERROR; + char *zFilenameCp = convertUtf8PathToCp( zFilename ); + + memset( &fsts3ConfigInfo, 0, sizeof(fsts3ConfigInfo) ); + rc = DosQueryPathInfo( (PSZ)zFilenameCp, FIL_STANDARD, + &fsts3ConfigInfo, sizeof(FILESTATUS3) ); + free( zFilenameCp ); + OSTRACE4( "ACCESS fsts3ConfigInfo.attrFile=%d flags=%d rc=%d\n", + fsts3ConfigInfo.attrFile, flags, rc ); + switch( flags ){ + case SQLITE_ACCESS_READ: + case SQLITE_ACCESS_EXISTS: + rc = (rc == NO_ERROR); + OSTRACE3( "ACCESS %s access of read and exists rc=%d\n", zFilename, rc ); + break; + case SQLITE_ACCESS_READWRITE: + rc = (rc == NO_ERROR) && ( (fsts3ConfigInfo.attrFile & FILE_READONLY) == 0 ); + OSTRACE3( "ACCESS %s access of read/write rc=%d\n", zFilename, rc ); + break; + default: + assert( !"Invalid flags argument" ); + } + *pOut = rc; + return SQLITE_OK; } -#endif /* SQLITE_OMIT_DISKIO */ -/*************************************************************************** -** Everything above deals with file I/O. Everything that follows deals -** with other miscellanous aspects of the operating system interface -****************************************************************************/ #ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** Interfaces for opening a shared library, finding entry points ** within the shared library, and closing the shared library. */ -void *sqlite3Os2Dlopen(const char *zFilename){ +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +static void *os2DlOpen(sqlite3_vfs *pVfs, const char *zFilename){ UCHAR loadErr[256]; HMODULE hmod; APIRET rc; - rc = DosLoadModule((PSZ)loadErr, sizeof(loadErr), zFilename, &hmod); - if (rc != NO_ERROR) return 0; - return (void*)hmod; + char *zFilenameCp = convertUtf8PathToCp(zFilename); + rc = DosLoadModule((PSZ)loadErr, sizeof(loadErr), zFilenameCp, &hmod); + free(zFilenameCp); + return rc != NO_ERROR ? 0 : (void*)hmod; } -void *sqlite3Os2Dlsym(void *pHandle, const char *zSymbol){ +/* +** A no-op since the error code is returned on the DosLoadModule call. +** os2Dlopen returns zero if DosLoadModule is not successful. +*/ +static void os2DlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ +/* no-op */ +} +static void *os2DlSym(sqlite3_vfs *pVfs, void *pHandle, const char *zSymbol){ PFN pfn; APIRET rc; rc = DosQueryProcAddr((HMODULE)pHandle, 0L, zSymbol, &pfn); - if (rc != NO_ERROR) { + if( rc != NO_ERROR ){ /* if the symbol itself was not found, search again for the same * symbol with an extra underscore, that might be needed depending * on the calling convention */ @@ -815,100 +950,92 @@ strncat(_zSymbol, zSymbol, 255); rc = DosQueryProcAddr((HMODULE)pHandle, 0L, _zSymbol, &pfn); } - if (rc != NO_ERROR) return 0; - return (void *)pfn; + return rc != NO_ERROR ? 0 : (void*)pfn; } -int sqlite3Os2Dlclose(void *pHandle){ - return DosFreeModule((HMODULE)pHandle); -} -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ - - -/* -** Get information to seed the random number generator. The seed -** is written into the buffer zBuf[256]. The calling function must -** supply a sufficiently large buffer. -*/ -int sqlite3Os2RandomSeed( char *zBuf ){ - /* We have to initialize zBuf to prevent valgrind from reporting - ** errors. The reports issued by valgrind are incorrect - we would - ** prefer that the randomness be increased by making use of the - ** uninitialized space in zBuf - but valgrind errors tend to worry - ** some users. Rather than argue, it seems easier just to initialize - ** the whole array and silence valgrind, even if that means less randomness - ** in the random seed. - ** - ** When testing, initializing zBuf[] to zero is all we do. That means - ** that we always use the same random number sequence. This makes the - ** tests repeatable. - */ - memset( zBuf, 0, 256 ); - DosGetDateTime( (PDATETIME)zBuf ); - return SQLITE_OK; +static void os2DlClose(sqlite3_vfs *pVfs, void *pHandle){ + DosFreeModule((HMODULE)pHandle); } +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define os2DlOpen 0 + #define os2DlError 0 + #define os2DlSym 0 + #define os2DlClose 0 +#endif -/* -** Sleep for a little while. Return the amount of time slept. -*/ -int sqlite3Os2Sleep( int ms ){ - DosSleep( ms ); - return ms; -} /* -** Static variables used for thread synchronization +** Write up to nBuf bytes of randomness into zBuf. */ -static int inMutex = 0; -#ifdef SQLITE_OS2_THREADS -static ULONG mutexOwner; +static int os2Randomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf ){ + int n = 0; +#if defined(SQLITE_TEST) + n = nBuf; + memset(zBuf, 0, nBuf); +#else + int sizeofULong = sizeof(ULONG); + if( (int)sizeof(DATETIME) <= nBuf - n ){ + DATETIME x; + DosGetDateTime(&x); + memcpy(&zBuf[n], &x, sizeof(x)); + n += sizeof(x); + } + + if( sizeofULong <= nBuf - n ){ + PPIB ppib; + DosGetInfoBlocks(NULL, &ppib); + memcpy(&zBuf[n], &ppib->pib_ulpid, sizeofULong); + n += sizeofULong; + } + + if( sizeofULong <= nBuf - n ){ + PTIB ptib; + DosGetInfoBlocks(&ptib, NULL); + memcpy(&zBuf[n], &ptib->tib_ptib2->tib2_ultid, sizeofULong); + n += sizeofULong; + } + + /* if we still haven't filled the buffer yet the following will */ + /* grab everything once instead of making several calls for a single item */ + if( sizeofULong <= nBuf - n ){ + ULONG ulSysInfo[QSV_MAX]; + DosQuerySysInfo(1L, QSV_MAX, ulSysInfo, sizeofULong * QSV_MAX); + + memcpy(&zBuf[n], &ulSysInfo[QSV_MS_COUNT - 1], sizeofULong); + n += sizeofULong; + + if( sizeofULong <= nBuf - n ){ + memcpy(&zBuf[n], &ulSysInfo[QSV_TIMER_INTERVAL - 1], sizeofULong); + n += sizeofULong; + } + if( sizeofULong <= nBuf - n ){ + memcpy(&zBuf[n], &ulSysInfo[QSV_TIME_LOW - 1], sizeofULong); + n += sizeofULong; + } + if( sizeofULong <= nBuf - n ){ + memcpy(&zBuf[n], &ulSysInfo[QSV_TIME_HIGH - 1], sizeofULong); + n += sizeofULong; + } + if( sizeofULong <= nBuf - n ){ + memcpy(&zBuf[n], &ulSysInfo[QSV_TOTAVAILMEM - 1], sizeofULong); + n += sizeofULong; + } + } #endif -/* -** The following pair of routines implement mutual exclusion for -** multi-threaded processes. Only a single thread is allowed to -** executed code that is surrounded by EnterMutex() and LeaveMutex(). -** -** SQLite uses only a single Mutex. There is not much critical -** code and what little there is executes quickly and without blocking. -*/ -void sqlite3Os2EnterMutex(){ -#ifdef SQLITE_OS2_THREADS - PTIB ptib; - DosEnterCritSec(); - DosGetInfoBlocks( &ptib, NULL ); - mutexOwner = ptib->tib_ptib2->tib2_ultid; -#endif - assert( !inMutex ); - inMutex = 1; -} -void sqlite3Os2LeaveMutex(){ -#ifdef SQLITE_OS2_THREADS - PTIB ptib; -#endif - assert( inMutex ); - inMutex = 0; -#ifdef SQLITE_OS2_THREADS - DosGetInfoBlocks( &ptib, NULL ); - assert( mutexOwner == ptib->tib_ptib2->tib2_ultid ); - DosExitCritSec(); -#endif + return n; } /* -** Return TRUE if the mutex is currently held. -** -** If the thisThreadOnly parameter is true, return true if and only if the -** calling thread holds the mutex. If the parameter is false, return -** true if any thread holds the mutex. -*/ -int sqlite3Os2InMutex( int thisThreadOnly ){ -#ifdef SQLITE_OS2_THREADS - PTIB ptib; - DosGetInfoBlocks( &ptib, NULL ); - return inMutex>0 && (thisThreadOnly==0 || mutexOwner==ptib->tib_ptib2->tib2_ultid); -#else - return inMutex>0; -#endif +** Sleep for a little while. Return the amount of time slept. +** The argument is the number of microseconds we want to sleep. +** The return value is the number of microseconds of sleep actually +** requested from the underlying operating system, a number which +** might be greater than or equal to the argument, but not less +** than the argument. +*/ +static int os2Sleep( sqlite3_vfs *pVfs, int microsec ){ + DosSleep( (microsec/1000) ); + return microsec; } /* @@ -924,14 +1051,15 @@ ** current time and date as a Julian Day number into *prNow and ** return 0. Return 1 if the time and date cannot be found. */ -int sqlite3Os2CurrentTime( double *prNow ){ +int os2CurrentTime( sqlite3_vfs *pVfs, double *prNow ){ double now; - USHORT second, minute, hour, + SHORT minute; /* needs to be able to cope with negative timezone offset */ + USHORT second, hour, day, month, year; DATETIME dt; DosGetDateTime( &dt ); second = (USHORT)dt.seconds; - minute = (USHORT)dt.minutes + dt.timezone; + minute = (SHORT)dt.minutes + dt.timezone; hour = (USHORT)dt.hours; day = (USHORT)dt.day; month = (USHORT)dt.month; @@ -958,69 +1086,42 @@ return 0; } -/* -** Remember the number of thread-specific-data blocks allocated. -** Use this to verify that we are not leaking thread-specific-data. -** Ticket #1601 -*/ -#ifdef SQLITE_TEST -int sqlite3_tsd_count = 0; -# define TSD_COUNTER_INCR InterlockedIncrement( &sqlite3_tsd_count ) -# define TSD_COUNTER_DECR InterlockedDecrement( &sqlite3_tsd_count ) -#else -# define TSD_COUNTER_INCR /* no-op */ -# define TSD_COUNTER_DECR /* no-op */ -#endif +static int os2GetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + return 0; +} /* -** If called with allocateFlag>1, then return a pointer to thread -** specific data for the current thread. Allocate and zero the -** thread-specific data if it does not already exist necessary. -** -** If called with allocateFlag==0, then check the current thread -** specific data. Return it if it exists. If it does not exist, -** then return NULL. -** -** If called with allocateFlag<0, check to see if the thread specific -** data is allocated and is all zero. If it is then deallocate it. -** Return a pointer to the thread specific data or NULL if it is -** unallocated or gets deallocated. -*/ -ThreadData *sqlite3Os2ThreadSpecificData( int allocateFlag ){ - static ThreadData **s_ppTsd = NULL; - static const ThreadData zeroData = {0, 0, 0}; - ThreadData *pTsd; - - if( !s_ppTsd ){ - sqlite3OsEnterMutex(); - if( !s_ppTsd ){ - PULONG pul; - APIRET rc = DosAllocThreadLocalMemory(1, &pul); - if( rc != NO_ERROR ){ - sqlite3OsLeaveMutex(); - return 0; - } - s_ppTsd = (ThreadData **)pul; - } - sqlite3OsLeaveMutex(); - } - pTsd = *s_ppTsd; - if( allocateFlag>0 ){ - if( !pTsd ){ - pTsd = sqlite3OsMalloc( sizeof(zeroData) ); - if( pTsd ){ - *pTsd = zeroData; - *s_ppTsd = pTsd; - TSD_COUNTER_INCR; - } - } - }else if( pTsd!=0 && allocateFlag<0 - && memcmp( pTsd, &zeroData, sizeof(ThreadData) )==0 ){ - sqlite3OsFree(pTsd); - *s_ppTsd = NULL; - TSD_COUNTER_DECR; - pTsd = 0; - } - return pTsd; +** Initialize and deinitialize the operating system interface. +*/ +int sqlite3_os_init(void){ + static sqlite3_vfs os2Vfs = { + 1, /* iVersion */ + sizeof(os2File), /* szOsFile */ + CCHMAXPATH, /* mxPathname */ + 0, /* pNext */ + "os2", /* zName */ + 0, /* pAppData */ + + os2Open, /* xOpen */ + os2Delete, /* xDelete */ + os2Access, /* xAccess */ + os2FullPathname, /* xFullPathname */ + os2DlOpen, /* xDlOpen */ + os2DlError, /* xDlError */ + os2DlSym, /* xDlSym */ + os2DlClose, /* xDlClose */ + os2Randomness, /* xRandomness */ + os2Sleep, /* xSleep */ + os2CurrentTime, /* xCurrentTime */ + os2GetLastError /* xGetLastError */ + }; + sqlite3_vfs_register(&os2Vfs, 1); + initUconvObjects(); + return SQLITE_OK; +} +int sqlite3_os_end(void){ + freeUconvObjects(); + return SQLITE_OK; } -#endif /* OS_OS2 */ + +#endif /* SQLITE_OS_OS2 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os_os2.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os_os2.h --- sqlite3-3.4.2/src/os_os2.h 2006-06-03 19:02:19.000000000 +0100 +++ sqlite3-3.6.16/src/os_os2.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file defined OS-specific features for OS/2. -*/ -#ifndef _SQLITE_OS_OS2_H_ -#define _SQLITE_OS_OS2_H_ - -/* -** standard include files. -*/ -#include -#include -#include -#include - -/* -** Macros used to determine whether or not to use threads. The -** SQLITE_UNIX_THREADS macro is defined if we are synchronizing for -** Posix threads and SQLITE_W32_THREADS is defined if we are -** synchronizing using Win32 threads. -*/ -/* this mutex implementation only available with EMX */ -#if defined(THREADSAFE) && THREADSAFE -# include -# include -# define SQLITE_OS2_THREADS 1 -#endif - -/* -** The OsFile structure is a operating-system independing representation -** of an open file handle. It is defined differently for each architecture. -** -** This is the definition for Unix. -** -** OsFile.locktype takes one of the values SHARED_LOCK, RESERVED_LOCK, -** PENDING_LOCK or EXCLUSIVE_LOCK. -*/ -typedef struct OsFile OsFile; -struct OsFile { - int h; /* The file descriptor (LHANDLE) */ - int locked; /* True if this user holds the lock */ - int delOnClose; /* True if file is to be deleted on close */ - char *pathToDel; /* Name of file to delete on close */ - unsigned char locktype; /* The type of lock held on this fd */ - unsigned char isOpen; /* True if needs to be closed */ - unsigned char fullSync; -}; - -/* -** Maximum number of characters in a temporary file name -*/ -#define SQLITE_TEMPNAME_SIZE 200 - -/* -** Minimum interval supported by sqlite3OsSleep(). -*/ -#define SQLITE_MIN_SLEEP_MS 1 - -#ifndef SQLITE_DEFAULT_FILE_PERMISSIONS -# define SQLITE_DEFAULT_FILE_PERMISSIONS 0600 -#endif - -#endif /* _SQLITE_OS_OS2_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os_unix.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os_unix.c --- sqlite3-3.4.2/src/os_unix.c 2007-08-07 18:12:05.000000000 +0100 +++ sqlite3-3.6.16/src/os_unix.c 2009-06-25 12:45:58.000000000 +0100 @@ -10,13 +10,80 @@ ** ****************************************************************************** ** -** This file contains code that is specific to Unix systems. +** This file contains the VFS implementation for unix-like operating systems +** include Linux, MacOSX, *BSD, QNX, VxWorks, AIX, HPUX, and others. +** +** There are actually several different VFS implementations in this file. +** The differences are in the way that file locking is done. The default +** implementation uses Posix Advisory Locks. Alternative implementations +** use flock(), dot-files, various proprietary locking schemas, or simply +** skip locking all together. +** +** This source file is organized into divisions where the logic for various +** subfunctions is contained within the appropriate division. PLEASE +** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed +** in the correct division and should be clearly labeled. +** +** The layout of divisions is as follows: +** +** * General-purpose declarations and utility functions. +** * Unique file ID logic used by VxWorks. +** * Various locking primitive implementations (all except proxy locking): +** + for Posix Advisory Locks +** + for no-op locks +** + for dot-file locks +** + for flock() locking +** + for named semaphore locks (VxWorks only) +** + for AFP filesystem locks (MacOSX only) +** * sqlite3_file methods not associated with locking. +** * Definitions of sqlite3_io_methods objects for all locking +** methods plus "finder" functions for each locking method. +** * sqlite3_vfs method implementations. +** * Locking primitives for the proxy uber-locking-method. (MacOSX only) +** * Definitions of sqlite3_vfs objects for all locking methods +** plus implementations of sqlite3_os_init() and sqlite3_os_end(). +** +** $Id: os_unix.c,v 1.253 2009/06/17 13:09:39 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#if OS_UNIX /* This file is used on unix only */ +#if SQLITE_OS_UNIX /* This file is used on unix only */ + +/* +** There are various methods for file locking used for concurrency +** control: +** +** 1. POSIX locking (the default), +** 2. No locking, +** 3. Dot-file locking, +** 4. flock() locking, +** 5. AFP locking (OSX only), +** 6. Named POSIX semaphores (VXWorks only), +** 7. proxy locking. (OSX only) +** +** Styles 4, 5, and 7 are only available of SQLITE_ENABLE_LOCKING_STYLE +** is defined to 1. The SQLITE_ENABLE_LOCKING_STYLE also enables automatic +** selection of the appropriate locking style based on the filesystem +** where the database is located. +*/ +#if !defined(SQLITE_ENABLE_LOCKING_STYLE) +# if defined(__APPLE__) +# define SQLITE_ENABLE_LOCKING_STYLE 1 +# else +# define SQLITE_ENABLE_LOCKING_STYLE 0 +# endif +#endif -/* #define SQLITE_ENABLE_LOCKING_STYLE 0 */ +/* +** Define the OS_VXWORKS pre-processor macro to 1 if building on +** vxworks, or 0 otherwise. +*/ +#ifndef OS_VXWORKS +# if defined(__RTP__) || defined(_WRS_KERNEL) +# define OS_VXWORKS 1 +# else +# define OS_VXWORKS 0 +# endif +#endif /* ** These #defines should enable >2GB file support on Posix if the @@ -30,6 +97,11 @@ ** without this option, LFS is enable. But LFS does not exist in the kernel ** in RedHat 6.0, so the code won't work. Hence, for maximum binary ** portability you should omit LFS. +** +** The previous paragraph was written in 2005. (This paragraph is written +** on 2008-11-28.) These days, all Linux kernels support large files, so +** you should probably leave LFS enabled. But some embedded platforms might +** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful. */ #ifndef SQLITE_DISABLE_LFS # define _LARGE_FILE 1 @@ -49,20 +121,24 @@ #include #include #include -#ifdef SQLITE_ENABLE_LOCKING_STYLE -#include -#include -#include + +#if SQLITE_ENABLE_LOCKING_STYLE +# include +# if OS_VXWORKS +# include +# include +# else +# include +# include +# include +# endif #endif /* SQLITE_ENABLE_LOCKING_STYLE */ /* ** If we are to be thread-safe, include the pthreads header and define ** the SQLITE_UNIX_THREADS macro. */ -#ifndef THREADSAFE -# define THREADSAFE 1 -#endif -#if THREADSAFE +#if SQLITE_THREADSAFE # include # define SQLITE_UNIX_THREADS 1 #endif @@ -74,47 +150,73 @@ # define SQLITE_DEFAULT_FILE_PERMISSIONS 0644 #endif +/* + ** Default permissions when creating auto proxy dir + */ +#ifndef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS +# define SQLITE_DEFAULT_PROXYDIR_PERMISSIONS 0755 +#endif + +/* +** Maximum supported path-length. +*/ +#define MAX_PATHNAME 512 + +/* +** Only set the lastErrno if the error code is a real error and not +** a normal expected return code of SQLITE_BUSY or SQLITE_OK +*/ +#define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY)) /* -** The unixFile structure is subclass of OsFile specific for the unix -** protability layer. +** The unixFile structure is subclass of sqlite3_file specific to the unix +** VFS implementations. */ typedef struct unixFile unixFile; struct unixFile { - IoMethod const *pMethod; /* Always the first entry */ - struct openCnt *pOpen; /* Info about all open fd's on this inode */ - struct lockInfo *pLock; /* Info about locks on this inode */ -#ifdef SQLITE_ENABLE_LOCKING_STYLE - void *lockingContext; /* Locking style specific state */ -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ - int h; /* The file descriptor */ - unsigned char locktype; /* The type of lock held on this fd */ - unsigned char isOpen; /* True if needs to be closed */ - unsigned char fullSync; /* Use F_FULLSYNC if available */ - int dirfd; /* File descriptor for the directory */ - i64 offset; /* Seek offset */ -#ifdef SQLITE_UNIX_THREADS - pthread_t tid; /* The thread that "owns" this OsFile */ + sqlite3_io_methods const *pMethod; /* Always the first entry */ + struct unixOpenCnt *pOpen; /* Info about all open fd's on this inode */ + struct unixLockInfo *pLock; /* Info about locks on this inode */ + int h; /* The file descriptor */ + int dirfd; /* File descriptor for the directory */ + unsigned char locktype; /* The type of lock held on this fd */ + int lastErrno; /* The unix errno from the last I/O error */ + void *lockingContext; /* Locking style specific state */ +#if SQLITE_ENABLE_LOCKING_STYLE + int openFlags; /* The flags specified at open() */ +#endif +#if SQLITE_THREADSAFE && defined(__linux__) + pthread_t tid; /* The thread that "owns" this unixFile */ +#endif +#if OS_VXWORKS + int isDelete; /* Delete on close if true */ + struct vxworksFileId *pId; /* Unique file ID */ +#endif +#ifndef NDEBUG + /* The next group of variables are used to track whether or not the + ** transaction counter in bytes 24-27 of database files are updated + ** whenever any part of the database changes. An assertion fault will + ** occur if a file is updated without also updating the transaction + ** counter. This test is made to avoid new problems similar to the + ** one described by ticket #3584. + */ + unsigned char transCntrChng; /* True if the transaction counter changed */ + unsigned char dbUpdate; /* True if any part of database file changed */ + unsigned char inNormalWrite; /* True if in a normal write operation */ + + /* If true, that means we are dealing with a database file that has + ** a range of locking bytes from PENDING_BYTE through PENDING_BYTE+511 + ** which should never be read or written. Asserts() will verify this */ + unsigned char isLockable; /* True if file might be locked */ #endif -}; - -/* -** Provide the ability to override some OS-layer functions during -** testing. This is used to simulate OS crashes to verify that -** commits are atomic even in the event of an OS crash. -*/ -#ifdef SQLITE_CRASH_TEST - extern int sqlite3CrashTestEnable; - extern int sqlite3CrashOpenReadWrite(const char*, OsFile**, int*); - extern int sqlite3CrashOpenExclusive(const char*, OsFile**, int); - extern int sqlite3CrashOpenReadOnly(const char*, OsFile**, int); -# define CRASH_TEST_OVERRIDE(X,A,B,C) \ - if(sqlite3CrashTestEnable){ return X(A,B,C); } -#else -# define CRASH_TEST_OVERRIDE(X,A,B,C) /* no-op */ +#ifdef SQLITE_TEST + /* In test mode, increase the size of this structure a bit so that + ** it is larger than the struct CrashFile defined in test6.c. + */ + char aPadding[32]; #endif - +}; /* ** Include code that is common to all os_*.c files @@ -122,14 +224,6 @@ #include "os_common.h" /* -** Do not include any of the File I/O interface procedures if the -** SQLITE_OMIT_DISKIO macro is defined (indicating that the database -** will be in-memory only) -*/ -#ifndef SQLITE_OMIT_DISKIO - - -/* ** Define various macros that are missing from some systems. */ #ifndef O_LARGEFILE @@ -150,7 +244,7 @@ ** The DJGPP compiler environment looks mostly like Unix, but it ** lacks the fcntl() system call. So redefine fcntl() to be something ** that always succeeds. This means that locking does not occur under -** DJGPP. But it's DOS - what did you expect? +** DJGPP. But it is DOS - what did you expect? */ #ifdef __DJGPP__ # define fcntl(A,B,C) 0 @@ -160,39 +254,312 @@ ** The threadid macro resolves to the thread-id or to 0. Used for ** testing and debugging only. */ -#ifdef SQLITE_UNIX_THREADS +#if SQLITE_THREADSAFE #define threadid pthread_self() #else #define threadid 0 #endif + /* -** Set or check the OsFile.tid field. This field is set when an OsFile -** is first opened. All subsequent uses of the OsFile verify that the -** same thread is operating on the OsFile. Some operating systems do -** not allow locks to be overridden by other threads and that restriction -** means that sqlite3* database handles cannot be moved from one thread -** to another. This logic makes sure a user does not try to do that -** by mistake. -** -** Version 3.3.1 (2006-01-15): OsFiles can be moved from one thread to -** another as long as we are running on a system that supports threads -** overriding each others locks (which now the most common behavior) -** or if no locks are held. But the OsFile.pLock field needs to be -** recomputed because its key includes the thread-id. See the -** transferOwnership() function below for additional information +** Helper functions to obtain and relinquish the global mutex. */ -#if defined(SQLITE_UNIX_THREADS) -# define SET_THREADID(X) (X)->tid = pthread_self() -# define CHECK_THREADID(X) (threadsOverrideEachOthersLocks==0 && \ - !pthread_equal((X)->tid, pthread_self())) -#else -# define SET_THREADID(X) -# define CHECK_THREADID(X) 0 +static void unixEnterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} +static void unixLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} + + +#ifdef SQLITE_DEBUG +/* +** Helper function for printing out trace information from debugging +** binaries. This returns the string represetation of the supplied +** integer lock-type. +*/ +static const char *locktypeName(int locktype){ + switch( locktype ){ + case NO_LOCK: return "NONE"; + case SHARED_LOCK: return "SHARED"; + case RESERVED_LOCK: return "RESERVED"; + case PENDING_LOCK: return "PENDING"; + case EXCLUSIVE_LOCK: return "EXCLUSIVE"; + } + return "ERROR"; +} #endif +#ifdef SQLITE_LOCK_TRACE +/* +** Print out information about all locking operations. +** +** This routine is used for troubleshooting locks on multithreaded +** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE +** command-line option on the compiler. This code is normally +** turned off. +*/ +static int lockTrace(int fd, int op, struct flock *p){ + char *zOpName, *zType; + int s; + int savedErrno; + if( op==F_GETLK ){ + zOpName = "GETLK"; + }else if( op==F_SETLK ){ + zOpName = "SETLK"; + }else{ + s = fcntl(fd, op, p); + sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); + return s; + } + if( p->l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( p->l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( p->l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + assert( p->l_whence==SEEK_SET ); + s = fcntl(fd, op, p); + savedErrno = errno; + sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", + threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, + (int)p->l_pid, s); + if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ + struct flock l2; + l2 = *p; + fcntl(fd, F_GETLK, &l2); + if( l2.l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( l2.l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( l2.l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", + zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); + } + errno = savedErrno; + return s; +} +#define fcntl lockTrace +#endif /* SQLITE_LOCK_TRACE */ + + + +/* +** This routine translates a standard POSIX errno code into something +** useful to the clients of the sqlite3 functions. Specifically, it is +** intended to translate a variety of "try again" errors into SQLITE_BUSY +** and a variety of "please close the file descriptor NOW" errors into +** SQLITE_IOERR +** +** Errors during initialization of locks, or file system support for locks, +** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately. +*/ +static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) { + switch (posixError) { + case 0: + return SQLITE_OK; + + case EAGAIN: + case ETIMEDOUT: + case EBUSY: + case EINTR: + case ENOLCK: + /* random NFS retry error, unless during file system support + * introspection, in which it actually means what it says */ + return SQLITE_BUSY; + + case EACCES: + /* EACCES is like EAGAIN during locking operations, but not any other time*/ + if( (sqliteIOErr == SQLITE_IOERR_LOCK) || + (sqliteIOErr == SQLITE_IOERR_UNLOCK) || + (sqliteIOErr == SQLITE_IOERR_RDLOCK) || + (sqliteIOErr == SQLITE_IOERR_CHECKRESERVEDLOCK) ){ + return SQLITE_BUSY; + } + /* else fall through */ + case EPERM: + return SQLITE_PERM; + + case EDEADLK: + return SQLITE_IOERR_BLOCKED; + +#if EOPNOTSUPP!=ENOTSUP + case EOPNOTSUPP: + /* something went terribly awry, unless during file system support + * introspection, in which it actually means what it says */ +#endif +#ifdef ENOTSUP + case ENOTSUP: + /* invalid fd, unless during file system support introspection, in which + * it actually means what it says */ +#endif + case EIO: + case EBADF: + case EINVAL: + case ENOTCONN: + case ENODEV: + case ENXIO: + case ENOENT: + case ESTALE: + case ENOSYS: + /* these should force the client to close the file and reconnect */ + + default: + return sqliteIOErr; + } +} + + + +/****************************************************************************** +****************** Begin Unique File ID Utility Used By VxWorks *************** +** +** On most versions of unix, we can get a unique ID for a file by concatenating +** the device number and the inode number. But this does not work on VxWorks. +** On VxWorks, a unique file id must be based on the canonical filename. +** +** A pointer to an instance of the following structure can be used as a +** unique file ID in VxWorks. Each instance of this structure contains +** a copy of the canonical filename. There is also a reference count. +** The structure is reclaimed when the number of pointers to it drops to +** zero. +** +** There are never very many files open at one time and lookups are not +** a performance-critical path, so it is sufficient to put these +** structures on a linked list. +*/ +struct vxworksFileId { + struct vxworksFileId *pNext; /* Next in a list of them all */ + int nRef; /* Number of references to this one */ + int nName; /* Length of the zCanonicalName[] string */ + char *zCanonicalName; /* Canonical filename */ +}; + +#if OS_VXWORKS +/* +** All unique filenames are held on a linked list headed by this +** variable: +*/ +static struct vxworksFileId *vxworksFileList = 0; + +/* +** Simplify a filename into its canonical form +** by making the following changes: +** +** * removing any trailing and duplicate / +** * convert /./ into just / +** * convert /A/../ where A is any simple name into just / +** +** Changes are made in-place. Return the new name length. +** +** The original filename is in z[0..n-1]. Return the number of +** characters in the simplified name. +*/ +static int vxworksSimplifyName(char *z, int n){ + int i, j; + while( n>1 && z[n-1]=='/' ){ n--; } + for(i=j=0; i0 && z[j-1]!='/' ){ j--; } + if( j>0 ){ j--; } + i += 2; + continue; + } + } + z[j++] = z[i]; + } + z[j] = 0; + return j; +} + +/* +** Find a unique file ID for the given absolute pathname. Return +** a pointer to the vxworksFileId object. This pointer is the unique +** file ID. +** +** The nRef field of the vxworksFileId object is incremented before +** the object is returned. A new vxworksFileId object is created +** and added to the global list if necessary. +** +** If a memory allocation error occurs, return NULL. +*/ +static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ + struct vxworksFileId *pNew; /* search key and new file ID */ + struct vxworksFileId *pCandidate; /* For looping over existing file IDs */ + int n; /* Length of zAbsoluteName string */ + + assert( zAbsoluteName[0]=='/' ); + n = (int)strlen(zAbsoluteName); + pNew = sqlite3_malloc( sizeof(*pNew) + (n+1) ); + if( pNew==0 ) return 0; + pNew->zCanonicalName = (char*)&pNew[1]; + memcpy(pNew->zCanonicalName, zAbsoluteName, n+1); + n = vxworksSimplifyName(pNew->zCanonicalName, n); + + /* Search for an existing entry that matching the canonical name. + ** If found, increment the reference count and return a pointer to + ** the existing file ID. + */ + unixEnterMutex(); + for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){ + if( pCandidate->nName==n + && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0 + ){ + sqlite3_free(pNew); + pCandidate->nRef++; + unixLeaveMutex(); + return pCandidate; + } + } + + /* No match was found. We will make a new file ID */ + pNew->nRef = 1; + pNew->nName = n; + pNew->pNext = vxworksFileList; + vxworksFileList = pNew; + unixLeaveMutex(); + return pNew; +} + /* -** Here is the dirt on POSIX advisory locks: ANSI STD 1003.1 (1996) +** Decrement the reference count on a vxworksFileId object. Free +** the object when the reference count reaches zero. +*/ +static void vxworksReleaseFileId(struct vxworksFileId *pId){ + unixEnterMutex(); + assert( pId->nRef>0 ); + pId->nRef--; + if( pId->nRef==0 ){ + struct vxworksFileId **pp; + for(pp=&vxworksFileList; *pp && *pp!=pId; pp = &((*pp)->pNext)){} + assert( *pp==pId ); + *pp = pId->pNext; + sqlite3_free(pId); + } + unixLeaveMutex(); +} +#endif /* OS_VXWORKS */ +/*************** End of Unique File ID Utility Used By VxWorks **************** +******************************************************************************/ + + +/****************************************************************************** +*************************** Posix Advisory Locking **************************** +** +** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996) ** section 6.5.2.2 lines 483 through 490 specify that when a process ** sets or clears a lock, that operation overrides any prior locks set ** by the same process. It does not explicitly say so, but this implies @@ -211,9 +578,8 @@ ** second overrides the first, even though they were on different ** file descriptors opened on different file names. ** -** Bummer. If you ask me, this is broken. Badly broken. It means -** that we cannot use POSIX locks to synchronize file access among -** competing threads of the same process. POSIX locks will work fine +** This means that we cannot use POSIX locks to synchronize file access +** among competing threads of the same process. POSIX locks will work fine ** to synchronize access for threads in separate processes, but not ** threads within the same process. ** @@ -226,11 +592,15 @@ ** locks to see if another thread has previously set a lock on that same ** inode. ** -** The OsFile structure for POSIX is no longer just an integer file +** (Aside: The use of inode numbers as unique IDs does not work on VxWorks. +** For VxWorks, we have to use the alternative unique ID system based on +** canonical filename and implemented in the previous division.) +** +** The sqlite3_file structure for POSIX is no longer just an integer file ** descriptor. It is now a structure that holds the integer file ** descriptor and a pointer to a structure that describes the internal ** locks on the corresponding inode. There is one locking structure -** per inode, so if the same inode is opened twice, both OsFile structures +** per inode, so if the same inode is opened twice, both unixFile structures ** point to the same locking structure. The locking structure keeps ** a reference count (so we will know when to delete it) and a "cnt" ** field that tells us its internal lock status. cnt==0 means the @@ -242,96 +612,114 @@ ** POSIX lock if the internal lock structure transitions between ** a locked and an unlocked state. ** -** 2004-Jan-11: -** More recent discoveries about POSIX advisory locks. (The more -** I discover, the more I realize the a POSIX advisory locks are -** an abomination.) +** But wait: there are yet more problems with POSIX advisory locks. ** ** If you close a file descriptor that points to a file that has locks, ** all locks on that file that are owned by the current process are -** released. To work around this problem, each OsFile structure contains -** a pointer to an openCnt structure. There is one openCnt structure -** per open inode, which means that multiple OsFiles can point to a single -** openCnt. When an attempt is made to close an OsFile, if there are -** other OsFiles open on the same inode that are holding locks, the call +** released. To work around this problem, each unixFile structure contains +** a pointer to an unixOpenCnt structure. There is one unixOpenCnt structure +** per open inode, which means that multiple unixFile can point to a single +** unixOpenCnt. When an attempt is made to close an unixFile, if there are +** other unixFile open on the same inode that are holding locks, the call ** to close() the file descriptor is deferred until all of the locks clear. -** The openCnt structure keeps a list of file descriptors that need to +** The unixOpenCnt structure keeps a list of file descriptors that need to ** be closed and that list is walked (and cleared) when the last lock ** clears. ** -** First, under Linux threads, because each thread has a separate -** process ID, lock operations in one thread do not override locks -** to the same file in other threads. Linux threads behave like -** separate processes in this respect. But, if you close a file -** descriptor in linux threads, all locks are cleared, even locks -** on other threads and even though the other threads have different -** process IDs. Linux threads is inconsistent in this respect. -** (I'm beginning to think that linux threads is an abomination too.) -** The consequence of this all is that the hash table for the lockInfo -** structure has to include the process id as part of its key because -** locks in different threads are treated as distinct. But the -** openCnt structure should not include the process id in its -** key because close() clears lock on all threads, not just the current -** thread. Were it not for this goofiness in linux threads, we could -** combine the lockInfo and openCnt structures into a single structure. -** -** 2004-Jun-28: -** On some versions of linux, threads can override each others locks. -** On others not. Sometimes you can change the behavior on the same -** system by setting the LD_ASSUME_KERNEL environment variable. The -** POSIX standard is silent as to which behavior is correct, as far -** as I can tell, so other versions of unix might show the same -** inconsistency. There is no little doubt in my mind that posix -** advisory locks and linux threads are profoundly broken. -** -** To work around the inconsistencies, we have to test at runtime -** whether or not threads can override each others locks. This test -** is run once, the first time any lock is attempted. A static -** variable is set to record the results of this test for future -** use. +** Yet another problem: LinuxThreads do not play well with posix locks. +** +** Many older versions of linux use the LinuxThreads library which is +** not posix compliant. Under LinuxThreads, a lock created by thread +** A cannot be modified or overridden by a different thread B. +** Only thread A can modify the lock. Locking behavior is correct +** if the appliation uses the newer Native Posix Thread Library (NPTL) +** on linux - with NPTL a lock created by thread A can override locks +** in thread B. But there is no way to know at compile-time which +** threading library is being used. So there is no way to know at +** compile-time whether or not thread A can override locks on thread B. +** We have to do a run-time check to discover the behavior of the +** current process. +** +** On systems where thread A is unable to modify locks created by +** thread B, we have to keep track of which thread created each +** lock. Hence there is an extra field in the key to the unixLockInfo +** structure to record this information. And on those systems it +** is illegal to begin a transaction in one thread and finish it +** in another. For this latter restriction, there is no work-around. +** It is a limitation of LinuxThreads. */ /* -** An instance of the following structure serves as the key used -** to locate a particular lockInfo structure given its inode. +** Set or check the unixFile.tid field. This field is set when an unixFile +** is first opened. All subsequent uses of the unixFile verify that the +** same thread is operating on the unixFile. Some operating systems do +** not allow locks to be overridden by other threads and that restriction +** means that sqlite3* database handles cannot be moved from one thread +** to another while locks are held. ** -** If threads cannot override each others locks, then we set the -** lockKey.tid field to the thread ID. If threads can override -** each others locks then tid is always set to zero. tid is omitted -** if we compile without threading support. -*/ -struct lockKey { - dev_t dev; /* Device number */ - ino_t ino; /* Inode number */ -#ifdef SQLITE_UNIX_THREADS - pthread_t tid; /* Thread ID or zero if threads can override each other */ +** Version 3.3.1 (2006-01-15): unixFile can be moved from one thread to +** another as long as we are running on a system that supports threads +** overriding each others locks (which is now the most common behavior) +** or if no locks are held. But the unixFile.pLock field needs to be +** recomputed because its key includes the thread-id. See the +** transferOwnership() function below for additional information +*/ +#if SQLITE_THREADSAFE && defined(__linux__) +# define SET_THREADID(X) (X)->tid = pthread_self() +# define CHECK_THREADID(X) (threadsOverrideEachOthersLocks==0 && \ + !pthread_equal((X)->tid, pthread_self())) +#else +# define SET_THREADID(X) +# define CHECK_THREADID(X) 0 #endif -}; /* -** An instance of the following structure is allocated for each open -** inode on each thread with a different process ID. (Threads have -** different process IDs on linux, but not on most other unixes.) -** -** A single inode can have multiple file descriptors, so each OsFile -** structure contains a pointer to an instance of this object and this -** object keeps a count of the number of OsFiles pointing to it. +** An instance of the following structure serves as the key used +** to locate a particular unixOpenCnt structure given its inode. This +** is the same as the unixLockKey except that the thread ID is omitted. */ -struct lockInfo { - struct lockKey key; /* The lookup key */ - int cnt; /* Number of SHARED locks held */ - int locktype; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ - int nRef; /* Number of pointers to this structure */ +struct unixFileId { + dev_t dev; /* Device number */ +#if OS_VXWORKS + struct vxworksFileId *pId; /* Unique file ID for vxworks. */ +#else + ino_t ino; /* Inode number */ +#endif }; /* ** An instance of the following structure serves as the key used -** to locate a particular openCnt structure given its inode. This -** is the same as the lockKey except that the thread ID is omitted. +** to locate a particular unixLockInfo structure given its inode. +** +** If threads cannot override each others locks (LinuxThreads), then we +** set the unixLockKey.tid field to the thread ID. If threads can override +** each others locks (Posix and NPTL) then tid is always set to zero. +** tid is omitted if we compile without threading support or on an OS +** other than linux. +*/ +struct unixLockKey { + struct unixFileId fid; /* Unique identifier for the file */ +#if SQLITE_THREADSAFE && defined(__linux__) + pthread_t tid; /* Thread ID of lock owner. Zero if not using LinuxThreads */ +#endif +}; + +/* +** An instance of the following structure is allocated for each open +** inode. Or, on LinuxThreads, there is one of these structures for +** each inode opened by each thread. +** +** A single inode can have multiple file descriptors, so each unixFile +** structure contains a pointer to an instance of this object and this +** object keeps a count of the number of unixFile pointing to it. */ -struct openKey { - dev_t dev; /* Device number */ - ino_t ino; /* Inode number */ +struct unixLockInfo { + struct unixLockKey lockKey; /* The lookup key */ + int cnt; /* Number of SHARED locks held */ + int locktype; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ + int nRef; /* Number of pointers to this structure */ + struct unixLockInfo *pNext; /* List of all unixLockInfo objects */ + struct unixLockInfo *pPrev; /* .... doubly linked */ }; /* @@ -340,58 +728,40 @@ ** inode. If a close is attempted against an inode that is holding ** locks, the close is deferred until all locks clear by adding the ** file descriptor to be closed to the pending list. -*/ -struct openCnt { - struct openKey key; /* The lookup key */ - int nRef; /* Number of pointers to this structure */ - int nLock; /* Number of outstanding locks */ - int nPending; /* Number of pending close() operations */ - int *aPending; /* Malloced space holding fd's awaiting a close() */ +** +** TODO: Consider changing this so that there is only a single file +** descriptor for each open file, even when it is opened multiple times. +** The close() system call would only occur when the last database +** using the file closes. +*/ +struct unixOpenCnt { + struct unixFileId fileId; /* The lookup key */ + int nRef; /* Number of pointers to this structure */ + int nLock; /* Number of outstanding locks */ + int nPending; /* Number of pending close() operations */ + int *aPending; /* Malloced space holding fd's awaiting a close() */ +#if OS_VXWORKS + sem_t *pSem; /* Named POSIX semaphore */ + char aSemName[MAX_PATHNAME+1]; /* Name of that semaphore */ +#endif + struct unixOpenCnt *pNext, *pPrev; /* List of all unixOpenCnt objects */ }; -/* -** These hash tables map inodes and file descriptors (really, lockKey and -** openKey structures) into lockInfo and openCnt structures. Access to -** these hash tables must be protected by a mutex. -*/ -static Hash lockHash = {SQLITE_HASH_BINARY, 0, 0, 0, - sqlite3ThreadSafeMalloc, sqlite3ThreadSafeFree, 0, 0}; -static Hash openHash = {SQLITE_HASH_BINARY, 0, 0, 0, - sqlite3ThreadSafeMalloc, sqlite3ThreadSafeFree, 0, 0}; - -#ifdef SQLITE_ENABLE_LOCKING_STYLE -/* -** The locking styles are associated with the different file locking -** capabilities supported by different file systems. -** -** POSIX locking style fully supports shared and exclusive byte-range locks -** ADP locking only supports exclusive byte-range locks -** FLOCK only supports a single file-global exclusive lock -** DOTLOCK isn't a true locking style, it refers to the use of a special -** file named the same as the database file with a '.lock' extension, this -** can be used on file systems that do not offer any reliable file locking -** NO locking means that no locking will be attempted, this is only used for -** read-only file systems currently -** UNSUPPORTED means that no locking will be attempted, this is only used for -** file systems that are known to be unsupported -*/ -typedef enum { - posixLockingStyle = 0, /* standard posix-advisory locks */ - afpLockingStyle, /* use afp locks */ - flockLockingStyle, /* use flock() */ - dotlockLockingStyle, /* use .lock files */ - noLockingStyle, /* useful for read-only file system */ - unsupportedLockingStyle /* indicates unsupported file system */ -} sqlite3LockingStyle; -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ +/* +** Lists of all unixLockInfo and unixOpenCnt objects. These used to be hash +** tables. But the number of objects is rarely more than a dozen and +** never exceeds a few thousand. And lookup is not on a critical +** path so a simple linked list will suffice. +*/ +static struct unixLockInfo *lockList = 0; +static struct unixOpenCnt *openList = 0; -#ifdef SQLITE_UNIX_THREADS /* -** This variable records whether or not threads can override each others +** This variable remembers whether or not threads can override each others ** locks. ** -** 0: No. Threads cannot override each others locks. -** 1: Yes. Threads can override each others locks. +** 0: No. Threads cannot override each others locks. (LinuxThreads) +** 1: Yes. Threads can override each others locks. (Posix & NLPT) ** -1: We don't know yet. ** ** On some systems, we know at compile-time if threads can override each @@ -404,13 +774,15 @@ ** it a global so that the test code can change its value in order to verify ** that the right stuff happens in either case. */ -#ifndef SQLITE_THREAD_OVERRIDE_LOCK -# define SQLITE_THREAD_OVERRIDE_LOCK -1 -#endif -#ifdef SQLITE_TEST +#if SQLITE_THREADSAFE && defined(__linux__) +# ifndef SQLITE_THREAD_OVERRIDE_LOCK +# define SQLITE_THREAD_OVERRIDE_LOCK -1 +# endif +# ifdef SQLITE_TEST int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; -#else +# else static int threadsOverrideEachOthersLocks = SQLITE_THREAD_OVERRIDE_LOCK; +# endif #endif /* @@ -423,287 +795,225 @@ int result; /* Result of the locking operation */ }; -#ifdef SQLITE_LOCK_TRACE +#if SQLITE_THREADSAFE && defined(__linux__) /* -** Print out information about all locking operations. -** -** This routine is used for troubleshooting locks on multithreaded -** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE -** command-line option on the compiler. This code is normally -** turned off. -*/ -static int lockTrace(int fd, int op, struct flock *p){ - char *zOpName, *zType; - int s; - int savedErrno; - if( op==F_GETLK ){ - zOpName = "GETLK"; - }else if( op==F_SETLK ){ - zOpName = "SETLK"; - }else{ - s = fcntl(fd, op, p); - sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); - return s; - } - if( p->l_type==F_RDLCK ){ - zType = "RDLCK"; - }else if( p->l_type==F_WRLCK ){ - zType = "WRLCK"; - }else if( p->l_type==F_UNLCK ){ - zType = "UNLCK"; - }else{ - assert( 0 ); - } - assert( p->l_whence==SEEK_SET ); - s = fcntl(fd, op, p); - savedErrno = errno; - sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", - threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, - (int)p->l_pid, s); - if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ - struct flock l2; - l2 = *p; - fcntl(fd, F_GETLK, &l2); - if( l2.l_type==F_RDLCK ){ - zType = "RDLCK"; - }else if( l2.l_type==F_WRLCK ){ - zType = "WRLCK"; - }else if( l2.l_type==F_UNLCK ){ - zType = "UNLCK"; - }else{ - assert( 0 ); - } - sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", - zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); - } - errno = savedErrno; - return s; -} -#define fcntl lockTrace -#endif /* SQLITE_LOCK_TRACE */ - -/* -** The testThreadLockingBehavior() routine launches two separate -** threads on this routine. This routine attempts to lock a file -** descriptor then returns. The success or failure of that attempt -** allows the testThreadLockingBehavior() procedure to determine -** whether or not threads can override each others locks. -*/ -static void *threadLockingTest(void *pArg){ - struct threadTestData *pData = (struct threadTestData*)pArg; - pData->result = fcntl(pData->fd, F_SETLK, &pData->lock); - return pArg; -} - -/* -** This procedure attempts to determine whether or not threads -** can override each others locks then sets the -** threadsOverrideEachOthersLocks variable appropriately. +** This function is used as the main routine for a thread launched by +** testThreadLockingBehavior(). It tests whether the shared-lock obtained +** by the main thread in testThreadLockingBehavior() conflicts with a +** hypothetical write-lock obtained by this thread on the same file. +** +** The write-lock is not actually acquired, as this is not possible if +** the file is open in read-only mode (see ticket #3472). +*/ +static void *threadLockingTest(void *pArg){ + struct threadTestData *pData = (struct threadTestData*)pArg; + pData->result = fcntl(pData->fd, F_GETLK, &pData->lock); + return pArg; +} +#endif /* SQLITE_THREADSAFE && defined(__linux__) */ + + +#if SQLITE_THREADSAFE && defined(__linux__) +/* +** This procedure attempts to determine whether or not threads +** can override each others locks then sets the +** threadsOverrideEachOthersLocks variable appropriately. */ static void testThreadLockingBehavior(int fd_orig){ int fd; - struct threadTestData d[2]; - pthread_t t[2]; + int rc; + struct threadTestData d; + struct flock l; + pthread_t t; fd = dup(fd_orig); if( fd<0 ) return; - memset(d, 0, sizeof(d)); - d[0].fd = fd; - d[0].lock.l_type = F_RDLCK; - d[0].lock.l_len = 1; - d[0].lock.l_start = 0; - d[0].lock.l_whence = SEEK_SET; - d[1] = d[0]; - d[1].lock.l_type = F_WRLCK; - pthread_create(&t[0], 0, threadLockingTest, &d[0]); - pthread_create(&t[1], 0, threadLockingTest, &d[1]); - pthread_join(t[0], 0); - pthread_join(t[1], 0); + memset(&l, 0, sizeof(l)); + l.l_type = F_RDLCK; + l.l_len = 1; + l.l_start = 0; + l.l_whence = SEEK_SET; + rc = fcntl(fd_orig, F_SETLK, &l); + if( rc!=0 ) return; + memset(&d, 0, sizeof(d)); + d.fd = fd; + d.lock = l; + d.lock.l_type = F_WRLCK; + pthread_create(&t, 0, threadLockingTest, &d); + pthread_join(t, 0); close(fd); - threadsOverrideEachOthersLocks = d[0].result==0 && d[1].result==0; + if( d.result!=0 ) return; + threadsOverrideEachOthersLocks = (d.lock.l_type==F_UNLCK); } -#endif /* SQLITE_UNIX_THREADS */ +#endif /* SQLITE_THERADSAFE && defined(__linux__) */ /* -** Release a lockInfo structure previously allocated by findLockInfo(). +** Release a unixLockInfo structure previously allocated by findLockInfo(). */ -static void releaseLockInfo(struct lockInfo *pLock){ - assert( sqlite3OsInMutex(1) ); - if (pLock == NULL) - return; - pLock->nRef--; - if( pLock->nRef==0 ){ - sqlite3HashInsert(&lockHash, &pLock->key, sizeof(pLock->key), 0); - sqlite3ThreadSafeFree(pLock); +static void releaseLockInfo(struct unixLockInfo *pLock){ + if( pLock ){ + pLock->nRef--; + if( pLock->nRef==0 ){ + if( pLock->pPrev ){ + assert( pLock->pPrev->pNext==pLock ); + pLock->pPrev->pNext = pLock->pNext; + }else{ + assert( lockList==pLock ); + lockList = pLock->pNext; + } + if( pLock->pNext ){ + assert( pLock->pNext->pPrev==pLock ); + pLock->pNext->pPrev = pLock->pPrev; + } + sqlite3_free(pLock); + } } } /* -** Release a openCnt structure previously allocated by findLockInfo(). +** Release a unixOpenCnt structure previously allocated by findLockInfo(). */ -static void releaseOpenCnt(struct openCnt *pOpen){ - assert( sqlite3OsInMutex(1) ); - if (pOpen == NULL) - return; - pOpen->nRef--; - if( pOpen->nRef==0 ){ - sqlite3HashInsert(&openHash, &pOpen->key, sizeof(pOpen->key), 0); - free(pOpen->aPending); - sqlite3ThreadSafeFree(pOpen); +static void releaseOpenCnt(struct unixOpenCnt *pOpen){ + if( pOpen ){ + pOpen->nRef--; + if( pOpen->nRef==0 ){ + if( pOpen->pPrev ){ + assert( pOpen->pPrev->pNext==pOpen ); + pOpen->pPrev->pNext = pOpen->pNext; + }else{ + assert( openList==pOpen ); + openList = pOpen->pNext; + } + if( pOpen->pNext ){ + assert( pOpen->pNext->pPrev==pOpen ); + pOpen->pNext->pPrev = pOpen->pPrev; + } + sqlite3_free(pOpen->aPending); + sqlite3_free(pOpen); + } } } -#ifdef SQLITE_ENABLE_LOCKING_STYLE -/* -** Tests a byte-range locking query to see if byte range locks are -** supported, if not we fall back to dotlockLockingStyle. -*/ -static sqlite3LockingStyle sqlite3TestLockingStyle(const char *filePath, - int fd) { - /* test byte-range lock using fcntl */ - struct flock lockInfo; - - lockInfo.l_len = 1; - lockInfo.l_start = 0; - lockInfo.l_whence = SEEK_SET; - lockInfo.l_type = F_RDLCK; - - if (fcntl(fd, F_GETLK, &lockInfo) != -1) { - return posixLockingStyle; - } - - /* testing for flock can give false positives. So if if the above test - ** fails, then we fall back to using dot-lock style locking. - */ - return dotlockLockingStyle; -} - -/* -** Examines the f_fstypename entry in the statfs structure as returned by -** stat() for the file system hosting the database file, assigns the -** appropriate locking style based on it's value. These values and -** assignments are based on Darwin/OSX behavior and have not been tested on -** other systems. -*/ -static sqlite3LockingStyle sqlite3DetectLockingStyle(const char *filePath, - int fd) { - -#ifdef SQLITE_FIXED_LOCKING_STYLE - return (sqlite3LockingStyle)SQLITE_FIXED_LOCKING_STYLE; -#else - struct statfs fsInfo; - - if (statfs(filePath, &fsInfo) == -1) - return sqlite3TestLockingStyle(filePath, fd); - - if (fsInfo.f_flags & MNT_RDONLY) - return noLockingStyle; - - if( (!strcmp(fsInfo.f_fstypename, "hfs")) || - (!strcmp(fsInfo.f_fstypename, "ufs")) ) - return posixLockingStyle; - - if(!strcmp(fsInfo.f_fstypename, "afpfs")) - return afpLockingStyle; - - if(!strcmp(fsInfo.f_fstypename, "nfs")) - return sqlite3TestLockingStyle(filePath, fd); - - if(!strcmp(fsInfo.f_fstypename, "smbfs")) - return flockLockingStyle; - - if(!strcmp(fsInfo.f_fstypename, "msdos")) - return dotlockLockingStyle; - - if(!strcmp(fsInfo.f_fstypename, "webdav")) - return unsupportedLockingStyle; - - return sqlite3TestLockingStyle(filePath, fd); -#endif /* SQLITE_FIXED_LOCKING_STYLE */ -} - -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ - /* -** Given a file descriptor, locate lockInfo and openCnt structures that +** Given a file descriptor, locate unixLockInfo and unixOpenCnt structures that ** describes that file descriptor. Create new ones if necessary. The ** return values might be uninitialized if an error occurs. ** -** Return the number of errors. +** Return an appropriate error code. */ static int findLockInfo( - int fd, /* The file descriptor used in the key */ - struct lockInfo **ppLock, /* Return the lockInfo structure here */ - struct openCnt **ppOpen /* Return the openCnt structure here */ + unixFile *pFile, /* Unix file with file desc used in the key */ + struct unixLockInfo **ppLock, /* Return the unixLockInfo structure here */ + struct unixOpenCnt **ppOpen /* Return the unixOpenCnt structure here */ ){ - int rc; - struct lockKey key1; - struct openKey key2; - struct stat statbuf; - struct lockInfo *pLock; - struct openCnt *pOpen; + int rc; /* System call return code */ + int fd; /* The file descriptor for pFile */ + struct unixLockKey lockKey; /* Lookup key for the unixLockInfo structure */ + struct unixFileId fileId; /* Lookup key for the unixOpenCnt struct */ + struct stat statbuf; /* Low-level file information */ + struct unixLockInfo *pLock = 0;/* Candidate unixLockInfo object */ + struct unixOpenCnt *pOpen; /* Candidate unixOpenCnt object */ + + /* Get low-level information about the file that we can used to + ** create a unique name for the file. + */ + fd = pFile->h; rc = fstat(fd, &statbuf); - if( rc!=0 ) return 1; + if( rc!=0 ){ + pFile->lastErrno = errno; +#ifdef EOVERFLOW + if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS; +#endif + return SQLITE_IOERR; + } + +#ifdef __APPLE__ + /* On OS X on an msdos filesystem, the inode number is reported + ** incorrectly for zero-size files. See ticket #3260. To work + ** around this problem (we consider it a bug in OS X, not SQLite) + ** we always increase the file size to 1 by writing a single byte + ** prior to accessing the inode number. The one byte written is + ** an ASCII 'S' character which also happens to be the first byte + ** in the header of every SQLite database. In this way, if there + ** is a race condition such that another thread has already populated + ** the first page of the database, no damage is done. + */ + if( statbuf.st_size==0 ){ + rc = write(fd, "S", 1); + if( rc!=1 ){ + return SQLITE_IOERR; + } + rc = fstat(fd, &statbuf); + if( rc!=0 ){ + pFile->lastErrno = errno; + return SQLITE_IOERR; + } + } +#endif - assert( sqlite3OsInMutex(1) ); - memset(&key1, 0, sizeof(key1)); - key1.dev = statbuf.st_dev; - key1.ino = statbuf.st_ino; -#ifdef SQLITE_UNIX_THREADS + memset(&lockKey, 0, sizeof(lockKey)); + lockKey.fid.dev = statbuf.st_dev; +#if OS_VXWORKS + lockKey.fid.pId = pFile->pId; +#else + lockKey.fid.ino = statbuf.st_ino; +#endif +#if SQLITE_THREADSAFE && defined(__linux__) if( threadsOverrideEachOthersLocks<0 ){ testThreadLockingBehavior(fd); } - key1.tid = threadsOverrideEachOthersLocks ? 0 : pthread_self(); + lockKey.tid = threadsOverrideEachOthersLocks ? 0 : pthread_self(); #endif - memset(&key2, 0, sizeof(key2)); - key2.dev = statbuf.st_dev; - key2.ino = statbuf.st_ino; - pLock = (struct lockInfo*)sqlite3HashFind(&lockHash, &key1, sizeof(key1)); - if( pLock==0 ){ - struct lockInfo *pOld; - pLock = sqlite3ThreadSafeMalloc( sizeof(*pLock) ); - if( pLock==0 ){ - rc = 1; - goto exit_findlockinfo; + fileId = lockKey.fid; + if( ppLock!=0 ){ + pLock = lockList; + while( pLock && memcmp(&lockKey, &pLock->lockKey, sizeof(lockKey)) ){ + pLock = pLock->pNext; } - pLock->key = key1; - pLock->nRef = 1; - pLock->cnt = 0; - pLock->locktype = 0; - pOld = sqlite3HashInsert(&lockHash, &pLock->key, sizeof(key1), pLock); - if( pOld!=0 ){ - assert( pOld==pLock ); - sqlite3ThreadSafeFree(pLock); - rc = 1; - goto exit_findlockinfo; + if( pLock==0 ){ + pLock = sqlite3_malloc( sizeof(*pLock) ); + if( pLock==0 ){ + rc = SQLITE_NOMEM; + goto exit_findlockinfo; + } + pLock->lockKey = lockKey; + pLock->nRef = 1; + pLock->cnt = 0; + pLock->locktype = 0; + pLock->pNext = lockList; + pLock->pPrev = 0; + if( lockList ) lockList->pPrev = pLock; + lockList = pLock; + }else{ + pLock->nRef++; } - }else{ - pLock->nRef++; + *ppLock = pLock; } - *ppLock = pLock; if( ppOpen!=0 ){ - pOpen = (struct openCnt*)sqlite3HashFind(&openHash, &key2, sizeof(key2)); + pOpen = openList; + while( pOpen && memcmp(&fileId, &pOpen->fileId, sizeof(fileId)) ){ + pOpen = pOpen->pNext; + } if( pOpen==0 ){ - struct openCnt *pOld; - pOpen = sqlite3ThreadSafeMalloc( sizeof(*pOpen) ); + pOpen = sqlite3_malloc( sizeof(*pOpen) ); if( pOpen==0 ){ releaseLockInfo(pLock); - rc = 1; + rc = SQLITE_NOMEM; goto exit_findlockinfo; } - pOpen->key = key2; + pOpen->fileId = fileId; pOpen->nRef = 1; pOpen->nLock = 0; pOpen->nPending = 0; pOpen->aPending = 0; - pOld = sqlite3HashInsert(&openHash, &pOpen->key, sizeof(key2), pOpen); - if( pOld!=0 ){ - assert( pOld==pOpen ); - sqlite3ThreadSafeFree(pOpen); - releaseLockInfo(pLock); - rc = 1; - goto exit_findlockinfo; - } + pOpen->pNext = openList; + pOpen->pPrev = 0; + if( openList ) openList->pPrev = pOpen; + openList = pOpen; +#if OS_VXWORKS + pOpen->pSem = NULL; + pOpen->aSemName[0] = '\0'; +#endif }else{ pOpen->nRef++; } @@ -714,38 +1024,18 @@ return rc; } -#ifdef SQLITE_DEBUG -/* -** Helper function for printing out trace information from debugging -** binaries. This returns the string represetation of the supplied -** integer lock-type. -*/ -static const char *locktypeName(int locktype){ - switch( locktype ){ - case NO_LOCK: return "NONE"; - case SHARED_LOCK: return "SHARED"; - case RESERVED_LOCK: return "RESERVED"; - case PENDING_LOCK: return "PENDING"; - case EXCLUSIVE_LOCK: return "EXCLUSIVE"; - } - return "ERROR"; -} -#endif - /* ** If we are currently in a different thread than the thread that the ** unixFile argument belongs to, then transfer ownership of the unixFile ** over to the current thread. ** -** A unixFile is only owned by a thread on systems where one thread is -** unable to override locks created by a different thread. RedHat9 is -** an example of such a system. +** A unixFile is only owned by a thread on systems that use LinuxThreads. ** ** Ownership transfer is only allowed if the unixFile is currently unlocked. ** If the unixFile is locked and an ownership is wrong, then return ** SQLITE_MISUSE. SQLITE_OK is returned if everything works. */ -#ifdef SQLITE_UNIX_THREADS +#if SQLITE_THREADSAFE && defined(__linux__) static int transferOwnership(unixFile *pFile){ int rc; pthread_t hSelf; @@ -768,7 +1058,7 @@ pFile->tid = hSelf; if (pFile->pLock != NULL) { releaseLockInfo(pFile->pLock); - rc = findLockInfo(pFile->h, &pFile->pLock, 0); + rc = findLockInfo(pFile, &pFile->pLock, 0); OSTRACE5("LOCK %d is now %s(%s,%d)\n", pFile->h, locktypeName(pFile->locktype), locktypeName(pFile->pLock->locktype), pFile->pLock->cnt); @@ -777,246 +1067,1622 @@ return SQLITE_OK; } } -#else +#else /* if not SQLITE_THREADSAFE */ /* On single-threaded builds, ownership transfer is a no-op */ # define transferOwnership(X) SQLITE_OK -#endif +#endif /* SQLITE_THREADSAFE */ -/* -** Delete the named file -*/ -int sqlite3UnixDelete(const char *zFilename){ - SimulateIOError(return SQLITE_IOERR_DELETE); - unlink(zFilename); - return SQLITE_OK; -} - -/* -** Return TRUE if the named file exists. -*/ -int sqlite3UnixFileExists(const char *zFilename){ - return access(zFilename, 0)==0; -} - -/* Forward declaration */ -static int allocateUnixFile( - int h, /* File descriptor of the open file */ - OsFile **pId, /* Write the real file descriptor here */ - const char *zFilename, /* Name of the file being opened */ - int delFlag /* If true, make sure the file deletes on close */ -); /* -** Attempt to open a file for both reading and writing. If that -** fails, try opening it read-only. If the file does not exist, -** try to create it. -** -** On success, a handle for the open file is written to *id -** and *pReadonly is set to 0 if the file was opened for reading and -** writing or 1 if the file was opened read-only. The function returns -** SQLITE_OK. -** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id and *pReadonly unchanged. +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ -int sqlite3UnixOpenReadWrite( - const char *zFilename, - OsFile **pId, - int *pReadonly -){ - int h; - - CRASH_TEST_OVERRIDE(sqlite3CrashOpenReadWrite, zFilename, pId, pReadonly); - assert( 0==*pId ); - h = open(zFilename, O_RDWR|O_CREAT|O_LARGEFILE|O_BINARY, - SQLITE_DEFAULT_FILE_PERMISSIONS); - if( h<0 ){ -#ifdef EISDIR - if( errno==EISDIR ){ - return SQLITE_CANTOPEN; - } -#endif - h = open(zFilename, O_RDONLY|O_LARGEFILE|O_BINARY); - if( h<0 ){ - return SQLITE_CANTOPEN; - } - *pReadonly = 1; - }else{ - *pReadonly = 0; - } - return allocateUnixFile(h, pId, zFilename, 0); -} +static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); -/* -** Attempt to open a new file for exclusive access by this process. -** The file will be opened for both reading and writing. To avoid -** a potential security problem, we do not allow the file to have -** previously existed. Nor do we allow the file to be a symbolic -** link. -** -** If delFlag is true, then make arrangements to automatically delete -** the file when it is closed. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -*/ -int sqlite3UnixOpenExclusive(const char *zFilename, OsFile **pId, int delFlag){ - int h; + assert( pFile ); + unixEnterMutex(); /* Because pFile->pLock is shared across threads */ - CRASH_TEST_OVERRIDE(sqlite3CrashOpenExclusive, zFilename, pId, delFlag); - assert( 0==*pId ); - h = open(zFilename, - O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW|O_LARGEFILE|O_BINARY, - delFlag ? 0600 : SQLITE_DEFAULT_FILE_PERMISSIONS); - if( h<0 ){ - return SQLITE_CANTOPEN; + /* Check if a thread in this process holds such a lock */ + if( pFile->pLock->locktype>SHARED_LOCK ){ + reserved = 1; } - return allocateUnixFile(h, pId, zFilename, delFlag); -} -/* -** Attempt to open a new file for read-only access. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -*/ -int sqlite3UnixOpenReadOnly(const char *zFilename, OsFile **pId){ - int h; - - CRASH_TEST_OVERRIDE(sqlite3CrashOpenReadOnly, zFilename, pId, 0); - assert( 0==*pId ); - h = open(zFilename, O_RDONLY|O_LARGEFILE|O_BINARY); - if( h<0 ){ - return SQLITE_CANTOPEN; + /* Otherwise see if some other process holds it. + */ +#ifndef __DJGPP__ + if( !reserved ){ + struct flock lock; + lock.l_whence = SEEK_SET; + lock.l_start = RESERVED_BYTE; + lock.l_len = 1; + lock.l_type = F_WRLCK; + if (-1 == fcntl(pFile->h, F_GETLK, &lock)) { + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK); + pFile->lastErrno = tErrno; + } else if( lock.l_type!=F_UNLCK ){ + reserved = 1; + } } - return allocateUnixFile(h, pId, zFilename, 0); +#endif + + unixLeaveMutex(); + OSTRACE4("TEST WR-LOCK %d %d %d\n", pFile->h, rc, reserved); + + *pResOut = reserved; + return rc; } /* -** Attempt to open a file descriptor for the directory that contains a -** file. This file descriptor can be used to fsync() the directory -** in order to make sure the creation of a new file is actually written -** to disk. +** Lock the file with the lock specified by parameter locktype - one +** of the following: ** -** This routine is only meaningful for Unix. It is a no-op under -** windows since windows does not support hard links. +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK ** -** If FULL_FSYNC is enabled, this function is not longer useful, -** a FULL_FSYNC sync applies to all pending disk operations. +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: ** -** On success, a handle for a previously open file at *id is -** updated with the new directory file descriptor and SQLITE_OK is -** returned. +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE ** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id unchanged. +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. */ -static int unixOpenDirectory( - OsFile *id, - const char *zDirname -){ - int h; +static int unixLock(sqlite3_file *id, int locktype){ + /* The following describes the implementation of the various locks and + ** lock transitions in terms of the POSIX advisory shared and exclusive + ** lock primitives (called read-locks and write-locks below, to avoid + ** confusion with SQLite lock names). The algorithms are complicated + ** slightly in order to be compatible with windows systems simultaneously + ** accessing the same database file, in case that is ever required. + ** + ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved + ** byte', each single bytes at well known offsets, and the 'shared byte + ** range', a range of 510 bytes at a well known offset. + ** + ** To obtain a SHARED lock, a read-lock is obtained on the 'pending + ** byte'. If this is successful, a random byte from the 'shared byte + ** range' is read-locked and the lock on the 'pending byte' released. + ** + ** A process may only obtain a RESERVED lock after it has a SHARED lock. + ** A RESERVED lock is implemented by grabbing a write-lock on the + ** 'reserved byte'. + ** + ** A process may only obtain a PENDING lock after it has obtained a + ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock + ** on the 'pending byte'. This ensures that no new SHARED locks can be + ** obtained, but existing SHARED locks are allowed to persist. A process + ** does not have to obtain a RESERVED lock on the way to a PENDING lock. + ** This property is used by the algorithm for rolling back a journal file + ** after a crash. + ** + ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is + ** implemented by obtaining a write-lock on the entire 'shared byte + ** range'. Since all other locks require a read-lock on one of the bytes + ** within this range, this ensures that no other locks are held on the + ** database. + ** + ** The reason a single byte cannot be used instead of the 'shared byte + ** range' is that some versions of windows do not support read-locks. By + ** locking a random byte from a range, concurrent SHARED locks may exist + ** even if the locking primitive used is always a write-lock. + */ + int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; - assert( pFile!=0 ); - SET_THREADID(pFile); - assert( pFile->dirfd<0 ); - pFile->dirfd = h = open(zDirname, O_RDONLY|O_BINARY, 0); - if( h<0 ){ - return SQLITE_CANTOPEN; - } -#ifdef FD_CLOEXEC - fcntl(h, F_SETFD, fcntl(h, F_GETFD, 0) | FD_CLOEXEC); -#endif - OSTRACE3("OPENDIR %-3d %s\n", h, zDirname); - return SQLITE_OK; -} + struct unixLockInfo *pLock = pFile->pLock; + struct flock lock; + int s; -/* -** Create a temporary file name in zBuf. zBuf must be big enough to -** hold at least SQLITE_TEMPNAME_SIZE characters. -*/ -int sqlite3UnixTempFileName(char *zBuf){ - static const char *azDirs[] = { - 0, - "/var/tmp", - "/usr/tmp", - "/tmp", - ".", - }; - static const unsigned char zChars[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789"; - int i, j; - struct stat buf; - const char *zDir = "."; - azDirs[0] = sqlite3_temp_directory; - for(i=0; ih, + locktypeName(locktype), locktypeName(pFile->locktype), + locktypeName(pLock->locktype), pLock->cnt , getpid()); + + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the end_lock: exit path, as + ** unixEnterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, + locktypeName(locktype)); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* This mutex is needed because pFile->pLock is shared across threads + */ + unixEnterMutex(); + + /* Make sure the current thread owns the pFile. + */ + rc = transferOwnership(pFile); + if( rc!=SQLITE_OK ){ + unixLeaveMutex(); + return rc; + } + pLock = pFile->pLock; + + /* If some thread using this PID has a lock via a different unixFile* + ** handle that precludes the requested lock, return BUSY. + */ + if( (pFile->locktype!=pLock->locktype && + (pLock->locktype>=PENDING_LOCK || locktype>SHARED_LOCK)) + ){ + rc = SQLITE_BUSY; + goto end_lock; + } + + /* If a SHARED lock is requested, and some thread using this PID already + ** has a SHARED or RESERVED lock, then increment reference counts and + ** return SQLITE_OK. + */ + if( locktype==SHARED_LOCK && + (pLock->locktype==SHARED_LOCK || pLock->locktype==RESERVED_LOCK) ){ + assert( locktype==SHARED_LOCK ); + assert( pFile->locktype==0 ); + assert( pLock->cnt>0 ); + pFile->locktype = SHARED_LOCK; + pLock->cnt++; + pFile->pOpen->nLock++; + goto end_lock; + } + + lock.l_len = 1L; + + lock.l_whence = SEEK_SET; + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + if( locktype==SHARED_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktypeh, F_SETLK, &lock); + if( s==(-1) ){ + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + goto end_lock; + } + } + + + /* If control gets to this point, then actually go ahead and make + ** operating system calls for the specified lock. + */ + if( locktype==SHARED_LOCK ){ + int tErrno = 0; + assert( pLock->cnt==0 ); + assert( pLock->locktype==0 ); + + /* Now get the read-lock */ + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + if( (s = fcntl(pFile->h, F_SETLK, &lock))==(-1) ){ + tErrno = errno; + } + /* Drop the temporary PENDING lock */ + lock.l_start = PENDING_BYTE; + lock.l_len = 1L; + lock.l_type = F_UNLCK; + if( fcntl(pFile->h, F_SETLK, &lock)!=0 ){ + if( s != -1 ){ + /* This could happen with a network mount */ + tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + goto end_lock; + } + } + if( s==(-1) ){ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + }else{ + pFile->locktype = SHARED_LOCK; + pFile->pOpen->nLock++; + pLock->cnt = 1; + } + }else if( locktype==EXCLUSIVE_LOCK && pLock->cnt>1 ){ + /* We are trying for an exclusive lock but another thread in this + ** same process is still holding a shared lock. */ + rc = SQLITE_BUSY; + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + assert( 0!=pFile->locktype ); + lock.l_type = F_WRLCK; + switch( locktype ){ + case RESERVED_LOCK: + lock.l_start = RESERVED_BYTE; + break; + case EXCLUSIVE_LOCK: + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + break; + default: + assert(0); + } + s = fcntl(pFile->h, F_SETLK, &lock); + if( s==(-1) ){ + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + } + } + + +#ifndef NDEBUG + /* Set up the transaction-counter change checking flags when + ** transitioning from a SHARED to a RESERVED lock. The change + ** from SHARED to RESERVED marks the beginning of a normal + ** write operation (not a hot journal rollback). + */ + if( rc==SQLITE_OK + && pFile->locktype<=SHARED_LOCK + && locktype==RESERVED_LOCK + ){ + pFile->transCntrChng = 0; + pFile->dbUpdate = 0; + pFile->inNormalWrite = 1; + } +#endif + + + if( rc==SQLITE_OK ){ + pFile->locktype = locktype; + pLock->locktype = locktype; + }else if( locktype==EXCLUSIVE_LOCK ){ + pFile->locktype = PENDING_LOCK; + pLock->locktype = PENDING_LOCK; + } + +end_lock: + unixLeaveMutex(); + OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), + rc==SQLITE_OK ? "ok" : "failed"); + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int unixUnlock(sqlite3_file *id, int locktype){ + struct unixLockInfo *pLock; + struct flock lock; + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + int h; + + assert( pFile ); + OSTRACE7("UNLOCK %d %d was %d(%d,%d) pid=%d\n", pFile->h, locktype, + pFile->locktype, pFile->pLock->locktype, pFile->pLock->cnt, getpid()); + + assert( locktype<=SHARED_LOCK ); + if( pFile->locktype<=locktype ){ + return SQLITE_OK; + } + if( CHECK_THREADID(pFile) ){ + return SQLITE_MISUSE; + } + unixEnterMutex(); + h = pFile->h; + pLock = pFile->pLock; + assert( pLock->cnt!=0 ); + if( pFile->locktype>SHARED_LOCK ){ + assert( pLock->locktype==pFile->locktype ); + SimulateIOErrorBenign(1); + SimulateIOError( h=(-1) ) + SimulateIOErrorBenign(0); + +#ifndef NDEBUG + /* When reducing a lock such that other processes can start + ** reading the database file again, make sure that the + ** transaction counter was updated if any part of the database + ** file changed. If the transaction counter is not updated, + ** other connections to the same file might not realize that + ** the file has changed and hence might not know to flush their + ** cache. The use of a stale cache can lead to database corruption. + */ + assert( pFile->inNormalWrite==0 + || pFile->dbUpdate==0 + || pFile->transCntrChng==1 ); + pFile->inNormalWrite = 0; +#endif + + + if( locktype==SHARED_LOCK ){ + lock.l_type = F_RDLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + if( fcntl(h, F_SETLK, &lock)==(-1) ){ + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + goto end_unlock; + } + } + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = PENDING_BYTE; + lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); + if( fcntl(h, F_SETLK, &lock)!=(-1) ){ + pLock->locktype = SHARED_LOCK; + }else{ + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + goto end_unlock; + } + } + if( locktype==NO_LOCK ){ + struct unixOpenCnt *pOpen; + int rc2 = SQLITE_OK; + + /* Decrement the shared lock counter. Release the lock using an + ** OS call only when all threads in this same process have released + ** the lock. + */ + pLock->cnt--; + if( pLock->cnt==0 ){ + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = lock.l_len = 0L; + SimulateIOErrorBenign(1); + SimulateIOError( h=(-1) ) + SimulateIOErrorBenign(0); + if( fcntl(h, F_SETLK, &lock)!=(-1) ){ + pLock->locktype = NO_LOCK; + }else{ + int tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + pLock->locktype = NO_LOCK; + pFile->locktype = NO_LOCK; + } + } + + /* Decrement the count of locks against this same file. When the + ** count reaches zero, close any other file descriptors whose close + ** was deferred because of outstanding locks. + */ + pOpen = pFile->pOpen; + pOpen->nLock--; + assert( pOpen->nLock>=0 ); + if( pOpen->nLock==0 && pOpen->nPending>0 ){ + int i; + for(i=0; inPending; i++){ + /* close pending fds, but if closing fails don't free the array + ** assign -1 to the successfully closed descriptors and record the + ** error. The next attempt to unlock will try again. */ + if( pOpen->aPending[i] < 0 ) continue; + if( close(pOpen->aPending[i]) ){ + pFile->lastErrno = errno; + rc2 = SQLITE_IOERR_CLOSE; + }else{ + pOpen->aPending[i] = -1; + } + } + if( rc2==SQLITE_OK ){ + sqlite3_free(pOpen->aPending); + pOpen->nPending = 0; + pOpen->aPending = 0; + } + } + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + +end_unlock: + unixLeaveMutex(); + if( rc==SQLITE_OK ) pFile->locktype = locktype; + return rc; +} + +/* +** This function performs the parts of the "close file" operation +** common to all locking schemes. It closes the directory and file +** handles, if they are valid, and sets all fields of the unixFile +** structure to 0. +** +** It is *not* necessary to hold the mutex when this routine is called, +** even on VxWorks. A mutex will be acquired on VxWorks by the +** vxworksReleaseFileId() routine. +*/ +static int closeUnixFile(sqlite3_file *id){ + unixFile *pFile = (unixFile*)id; + if( pFile ){ + if( pFile->dirfd>=0 ){ + int err = close(pFile->dirfd); + if( err ){ + pFile->lastErrno = errno; + return SQLITE_IOERR_DIR_CLOSE; + }else{ + pFile->dirfd=-1; + } + } + if( pFile->h>=0 ){ + int err = close(pFile->h); + if( err ){ + pFile->lastErrno = errno; + return SQLITE_IOERR_CLOSE; + } + } +#if OS_VXWORKS + if( pFile->pId ){ + if( pFile->isDelete ){ + unlink(pFile->pId->zCanonicalName); + } + vxworksReleaseFileId(pFile->pId); + pFile->pId = 0; + } +#endif + OSTRACE2("CLOSE %-3d\n", pFile->h); + OpenCounter(-1); + memset(pFile, 0, sizeof(unixFile)); + } + return SQLITE_OK; +} + +/* +** Close a file. +*/ +static int unixClose(sqlite3_file *id){ + int rc = SQLITE_OK; + if( id ){ + unixFile *pFile = (unixFile *)id; + unixUnlock(id, NO_LOCK); + unixEnterMutex(); + if( pFile->pOpen && pFile->pOpen->nLock ){ + /* If there are outstanding locks, do not actually close the file just + ** yet because that would clear those locks. Instead, add the file + ** descriptor to pOpen->aPending. It will be automatically closed when + ** the last lock is cleared. + */ + int *aNew; + struct unixOpenCnt *pOpen = pFile->pOpen; + aNew = sqlite3_realloc(pOpen->aPending, (pOpen->nPending+1)*sizeof(int) ); + if( aNew==0 ){ + /* If a malloc fails, just leak the file descriptor */ + }else{ + pOpen->aPending = aNew; + pOpen->aPending[pOpen->nPending] = pFile->h; + pOpen->nPending++; + pFile->h = -1; + } + } + releaseLockInfo(pFile->pLock); + releaseOpenCnt(pFile->pOpen); + rc = closeUnixFile(id); + unixLeaveMutex(); + } + return rc; +} + +/************** End of the posix advisory lock implementation ***************** +******************************************************************************/ + +/****************************************************************************** +****************************** No-op Locking ********************************** +** +** Of the various locking implementations available, this is by far the +** simplest: locking is ignored. No attempt is made to lock the database +** file for reading or writing. +** +** This locking mode is appropriate for use on read-only databases +** (ex: databases that are burned into CD-ROM, for example.) It can +** also be used if the application employs some external mechanism to +** prevent simultaneous access of the same database by two or more +** database connections. But there is a serious risk of database +** corruption if this locking mode is used in situations where multiple +** database connections are accessing the same database file at the same +** time and one or more of those connections are writing. +*/ + +static int nolockCheckReservedLock(sqlite3_file *NotUsed, int *pResOut){ + UNUSED_PARAMETER(NotUsed); + *pResOut = 0; + return SQLITE_OK; +} +static int nolockLock(sqlite3_file *NotUsed, int NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return SQLITE_OK; +} +static int nolockUnlock(sqlite3_file *NotUsed, int NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return SQLITE_OK; +} + +/* +** Close the file. +*/ +static int nolockClose(sqlite3_file *id) { + return closeUnixFile(id); +} + +/******************* End of the no-op lock implementation ********************* +******************************************************************************/ + +/****************************************************************************** +************************* Begin dot-file Locking ****************************** +** +** The dotfile locking implementation uses the existing of separate lock +** files in order to control access to the database. This works on just +** about every filesystem imaginable. But there are serious downsides: +** +** (1) There is zero concurrency. A single reader blocks all other +** connections from reading or writing the database. +** +** (2) An application crash or power loss can leave stale lock files +** sitting around that need to be cleared manually. +** +** Nevertheless, a dotlock is an appropriate locking mode for use if no +** other locking strategy is available. +** +** Dotfile locking works by creating a file in the same directory as the +** database and with the same name but with a ".lock" extension added. +** The existance of a lock file implies an EXCLUSIVE lock. All other lock +** types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE. +*/ + +/* +** The file suffix added to the data base filename in order to create the +** lock file. +*/ +#define DOTLOCK_SUFFIX ".lock" + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +** +** In dotfile locking, either a lock exists or it does not. So in this +** variation of CheckReservedLock(), *pResOut is set to true if any lock +** is held on the file and false if the file is unlocked. +*/ +static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + + /* Check if a thread in this process holds such a lock */ + if( pFile->locktype>SHARED_LOCK ){ + /* Either this connection or some other connection in the same process + ** holds a lock on the file. No need to check further. */ + reserved = 1; + }else{ + /* The lock is held if and only if the lockfile exists */ + const char *zLockFile = (const char*)pFile->lockingContext; + reserved = access(zLockFile, 0)==0; + } + OSTRACE4("TEST WR-LOCK %d %d %d\n", pFile->h, rc, reserved); + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +** +** With dotfile locking, we really only support state (4): EXCLUSIVE. +** But we track the other locking levels internally. +*/ +static int dotlockLock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + int fd; + char *zLockFile = (char *)pFile->lockingContext; + int rc = SQLITE_OK; + + + /* If we have any lock, then the lock file already exists. All we have + ** to do is adjust our internal record of the lock level. + */ + if( pFile->locktype > NO_LOCK ){ + pFile->locktype = locktype; +#if !OS_VXWORKS + /* Always update the timestamp on the old file */ + utimes(zLockFile, NULL); +#endif + return SQLITE_OK; + } + + /* grab an exclusive lock */ + fd = open(zLockFile,O_RDONLY|O_CREAT|O_EXCL,0600); + if( fd<0 ){ + /* failed to open/create the file, someone else may have stolen the lock */ + int tErrno = errno; + if( EEXIST == tErrno ){ + rc = SQLITE_BUSY; + } else { + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + } + return rc; + } + if( close(fd) ){ + pFile->lastErrno = errno; + rc = SQLITE_IOERR_CLOSE; + } + + /* got it, set the type and return ok */ + pFile->locktype = locktype; + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** When the locking level reaches NO_LOCK, delete the lock file. +*/ +static int dotlockUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + char *zLockFile = (char *)pFile->lockingContext; + + assert( pFile ); + OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, + pFile->locktype, getpid()); + assert( locktype<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->locktype==locktype ){ + return SQLITE_OK; + } + + /* To downgrade to shared, simply update our internal notion of the + ** lock state. No need to mess with the file on disk. + */ + if( locktype==SHARED_LOCK ){ + pFile->locktype = SHARED_LOCK; + return SQLITE_OK; + } + + /* To fully unlock the database, delete the lock file */ + assert( locktype==NO_LOCK ); + if( unlink(zLockFile) ){ + int rc = 0; + int tErrno = errno; + if( ENOENT != tErrno ){ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + } + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + return rc; + } + pFile->locktype = NO_LOCK; + return SQLITE_OK; +} + +/* +** Close a file. Make sure the lock has been released before closing. +*/ +static int dotlockClose(sqlite3_file *id) { + int rc; + if( id ){ + unixFile *pFile = (unixFile*)id; + dotlockUnlock(id, NO_LOCK); + sqlite3_free(pFile->lockingContext); + } + rc = closeUnixFile(id); + return rc; +} +/****************** End of the dot-file lock implementation ******************* +******************************************************************************/ + +/****************************************************************************** +************************** Begin flock Locking ******************************** +** +** Use the flock() system call to do file locking. +** +** flock() locking is like dot-file locking in that the various +** fine-grain locking levels supported by SQLite are collapsed into +** a single exclusive lock. In other words, SHARED, RESERVED, and +** PENDING locks are the same thing as an EXCLUSIVE lock. SQLite +** still works when you do this, but concurrency is reduced since +** only a single process can be reading the database at a time. +** +** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off or if +** compiling for VXWORKS. +*/ +#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + + /* Check if a thread in this process holds such a lock */ + if( pFile->locktype>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. */ + if( !reserved ){ + /* attempt to get the lock */ + int lrc = flock(pFile->h, LOCK_EX | LOCK_NB); + if( !lrc ){ + /* got the lock, unlock it */ + lrc = flock(pFile->h, LOCK_UN); + if ( lrc ) { + int tErrno = errno; + /* unlock failed with an error */ + lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(lrc) ){ + pFile->lastErrno = tErrno; + rc = lrc; + } + } + } else { + int tErrno = errno; + reserved = 1; + /* someone else might have it reserved */ + lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(lrc) ){ + pFile->lastErrno = tErrno; + rc = lrc; + } + } + } + OSTRACE4("TEST WR-LOCK %d %d %d\n", pFile->h, rc, reserved); + +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ + rc = SQLITE_OK; + reserved=1; + } +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** flock() only really support EXCLUSIVE locks. We track intermediate +** lock states in the sqlite3_file structure, but all locks SHARED or +** above are really EXCLUSIVE locks and exclude all other processes from +** access the file. +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int flockLock(sqlite3_file *id, int locktype) { + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->locktype > NO_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* grab an exclusive lock */ + + if (flock(pFile->h, LOCK_EX | LOCK_NB)) { + int tErrno = errno; + /* didn't get, must be busy */ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + } else { + /* got it, set the type and return ok */ + pFile->locktype = locktype; + } + OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), + rc==SQLITE_OK ? "ok" : "failed"); +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ + rc = SQLITE_BUSY; + } +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + return rc; +} + + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int flockUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, + pFile->locktype, getpid()); + assert( locktype<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->locktype==locktype ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (locktype==SHARED_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* no, really, unlock. */ + int rc = flock(pFile->h, LOCK_UN); + if (rc) { + int r, tErrno = errno; + r = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(r) ){ + pFile->lastErrno = tErrno; + } +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + if( (r & SQLITE_IOERR) == SQLITE_IOERR ){ + r = SQLITE_BUSY; + } +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + + return r; + } else { + pFile->locktype = NO_LOCK; + return SQLITE_OK; + } +} + +/* +** Close a file. +*/ +static int flockClose(sqlite3_file *id) { + if( id ){ + flockUnlock(id, NO_LOCK); + } + return closeUnixFile(id); +} + +#endif /* SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORK */ + +/******************* End of the flock lock implementation ********************* +******************************************************************************/ + +/****************************************************************************** +************************ Begin Named Semaphore Locking ************************ +** +** Named semaphore locking is only supported on VxWorks. +** +** Semaphore locking is like dot-lock and flock in that it really only +** supports EXCLUSIVE locking. Only a single process can read or write +** the database file at a time. This reduces potential concurrency, but +** makes the lock implementation much easier. +*/ +#if OS_VXWORKS + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int semCheckReservedLock(sqlite3_file *id, int *pResOut) { + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + + /* Check if a thread in this process holds such a lock */ + if( pFile->locktype>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. */ + if( !reserved ){ + sem_t *pSem = pFile->pOpen->pSem; + struct stat statBuf; + + if( sem_trywait(pSem)==-1 ){ + int tErrno = errno; + if( EAGAIN != tErrno ){ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK); + pFile->lastErrno = tErrno; + } else { + /* someone else has the lock when we are in NO_LOCK */ + reserved = (pFile->locktype < SHARED_LOCK); + } + }else{ + /* we could have it if we want it */ + sem_post(pSem); + } + } + OSTRACE4("TEST WR-LOCK %d %d %d\n", pFile->h, rc, reserved); + + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** Semaphore locks only really support EXCLUSIVE locks. We track intermediate +** lock states in the sqlite3_file structure, but all locks SHARED or +** above are really EXCLUSIVE locks and exclude all other processes from +** access the file. +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int semLock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + int fd; + sem_t *pSem = pFile->pOpen->pSem; + int rc = SQLITE_OK; + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->locktype > NO_LOCK) { + pFile->locktype = locktype; + rc = SQLITE_OK; + goto sem_end_lock; + } + + /* lock semaphore now but bail out when already locked. */ + if( sem_trywait(pSem)==-1 ){ + rc = SQLITE_BUSY; + goto sem_end_lock; + } + + /* got it, set the type and return ok */ + pFile->locktype = locktype; + + sem_end_lock: + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int semUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + sem_t *pSem = pFile->pOpen->pSem; + + assert( pFile ); + assert( pSem ); + OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, + pFile->locktype, getpid()); + assert( locktype<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->locktype==locktype ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (locktype==SHARED_LOCK) { + pFile->locktype = locktype; + return SQLITE_OK; + } + + /* no, really unlock. */ + if ( sem_post(pSem)==-1 ) { + int rc, tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + return rc; + } + pFile->locktype = NO_LOCK; + return SQLITE_OK; +} + +/* + ** Close a file. + */ +static int semClose(sqlite3_file *id) { + if( id ){ + unixFile *pFile = (unixFile*)id; + semUnlock(id, NO_LOCK); + assert( pFile ); + unixEnterMutex(); + releaseLockInfo(pFile->pLock); + releaseOpenCnt(pFile->pOpen); + unixLeaveMutex(); + closeUnixFile(id); + } + return SQLITE_OK; +} + +#endif /* OS_VXWORKS */ +/* +** Named semaphore locking is only available on VxWorks. +** +*************** End of the named semaphore lock implementation **************** +******************************************************************************/ + + +/****************************************************************************** +*************************** Begin AFP Locking ********************************* +** +** AFP is the Apple Filing Protocol. AFP is a network filesystem found +** on Apple Macintosh computers - both OS9 and OSX. +** +** Third-party implementations of AFP are available. But this code here +** only works on OSX. +*/ + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +/* +** The afpLockingContext structure contains all afp lock specific state +*/ +typedef struct afpLockingContext afpLockingContext; +struct afpLockingContext { + unsigned long long sharedByte; + const char *dbPath; /* Name of the open file */ +}; + +struct ByteRangeLockPB2 +{ + unsigned long long offset; /* offset to first byte to lock */ + unsigned long long length; /* nbr of bytes to lock */ + unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ + unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ + unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ + int fd; /* file desc to assoc this lock with */ +}; + +#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) + +/* +** This is a utility for setting or clearing a bit-range lock on an +** AFP filesystem. +** +** Return SQLITE_OK on success, SQLITE_BUSY on failure. +*/ +static int afpSetLock( + const char *path, /* Name of the file to be locked or unlocked */ + unixFile *pFile, /* Open file descriptor on path */ + unsigned long long offset, /* First byte to be locked */ + unsigned long long length, /* Number of bytes to lock */ + int setLockFlag /* True to set lock. False to clear lock */ +){ + struct ByteRangeLockPB2 pb; + int err; + + pb.unLockFlag = setLockFlag ? 0 : 1; + pb.startEndFlag = 0; + pb.offset = offset; + pb.length = length; + pb.fd = pFile->h; + + OSTRACE6("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", + (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""), + offset, length); + err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); + if ( err==-1 ) { + int rc; + int tErrno = errno; + OSTRACE4("AFPSETLOCK failed to fsctl() '%s' %d %s\n", + path, tErrno, strerror(tErrno)); +#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS + rc = SQLITE_BUSY; +#else + rc = sqliteErrorFromPosixError(tErrno, + setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK); +#endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */ + if( IS_LOCK_ERROR(rc) ){ + pFile->lastErrno = tErrno; + } + return rc; + } else { + return SQLITE_OK; + } +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + + /* Check if a thread in this process holds such a lock */ + if( pFile->locktype>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. + */ + if( !reserved ){ + /* lock the RESERVED byte */ + int lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); + if( SQLITE_OK==lrc ){ + /* if we succeeded in taking the reserved lock, unlock it to restore + ** the original state */ + lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); + } else { + /* if we failed to get the lock then someone else must have it */ + reserved = 1; + } + if( IS_LOCK_ERROR(lrc) ){ + rc=lrc; + } + } + + OSTRACE4("TEST WR-LOCK %d %d %d\n", pFile->h, rc, reserved); + + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int afpLock(sqlite3_file *id, int locktype){ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + + assert( pFile ); + OSTRACE5("LOCK %d %s was %s pid=%d\n", pFile->h, + locktypeName(locktype), locktypeName(pFile->locktype), getpid()); + + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as + ** unixEnterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, + locktypeName(locktype)); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* This mutex is needed because pFile->pLock is shared across threads + */ + unixEnterMutex(); + + /* Make sure the current thread owns the pFile. + */ + rc = transferOwnership(pFile); + if( rc!=SQLITE_OK ){ + unixLeaveMutex(); + return rc; + } + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + if( locktype==SHARED_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktypedbPath, pFile, PENDING_BYTE, 1, 1); + if (failed) { + rc = failed; + goto afp_end_lock; + } + } + + /* If control gets to this point, then actually go ahead and make + ** operating system calls for the specified lock. + */ + if( locktype==SHARED_LOCK ){ + int lk, lrc1, lrc2, lrc1Errno; + + /* Now get the read-lock SHARED_LOCK */ + /* note that the quality of the randomness doesn't matter that much */ + lk = random(); + context->sharedByte = (lk & 0x7fffffff)%(SHARED_SIZE - 1); + lrc1 = afpSetLock(context->dbPath, pFile, + SHARED_FIRST+context->sharedByte, 1, 1); + if( IS_LOCK_ERROR(lrc1) ){ + lrc1Errno = pFile->lastErrno; + } + /* Drop the temporary PENDING lock */ + lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); + + if( IS_LOCK_ERROR(lrc1) ) { + pFile->lastErrno = lrc1Errno; + rc = lrc1; + goto afp_end_lock; + } else if( IS_LOCK_ERROR(lrc2) ){ + rc = lrc2; + goto afp_end_lock; + } else if( lrc1 != SQLITE_OK ) { + rc = lrc1; + } else { + pFile->locktype = SHARED_LOCK; + pFile->pOpen->nLock++; + } + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + int failed = 0; + assert( 0!=pFile->locktype ); + if (locktype >= RESERVED_LOCK && pFile->locktype < RESERVED_LOCK) { + /* Acquire a RESERVED lock */ + failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); + } + if (!failed && locktype == EXCLUSIVE_LOCK) { + /* Acquire an EXCLUSIVE lock */ + + /* Remove the shared lock before trying the range. we'll need to + ** reestablish the shared lock if we can't get the afpUnlock + */ + if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + + context->sharedByte, 1, 0)) ){ + int failed2 = SQLITE_OK; + /* now attemmpt to get the exclusive lock range */ + failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, + SHARED_SIZE, 1); + if( failed && (failed2 = afpSetLock(context->dbPath, pFile, + SHARED_FIRST + context->sharedByte, 1, 1)) ){ + /* Can't reestablish the shared lock. Sqlite can't deal, this is + ** a critical I/O error + */ + rc = ((failed & SQLITE_IOERR) == SQLITE_IOERR) ? failed2 : + SQLITE_IOERR_LOCK; + goto afp_end_lock; + } + }else{ + rc = failed; + } + } + if( failed ){ + rc = failed; + } + } + + if( rc==SQLITE_OK ){ + pFile->locktype = locktype; + }else if( locktype==EXCLUSIVE_LOCK ){ + pFile->locktype = PENDING_LOCK; + } + +afp_end_lock: + unixLeaveMutex(); + OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), + rc==SQLITE_OK ? "ok" : "failed"); + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int afpUnlock(sqlite3_file *id, int locktype) { + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + afpLockingContext *pCtx = (afpLockingContext *) pFile->lockingContext; + + assert( pFile ); + OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, + pFile->locktype, getpid()); + + assert( locktype<=SHARED_LOCK ); + if( pFile->locktype<=locktype ){ + return SQLITE_OK; } - do{ - sqlite3_snprintf(SQLITE_TEMPNAME_SIZE, zBuf, "%s/"TEMP_FILE_PREFIX, zDir); - j = strlen(zBuf); - sqlite3Randomness(15, &zBuf[j]); - for(i=0; i<15; i++, j++){ - zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + if( CHECK_THREADID(pFile) ){ + return SQLITE_MISUSE; + } + unixEnterMutex(); + if( pFile->locktype>SHARED_LOCK ){ + + if( pFile->locktype==EXCLUSIVE_LOCK ){ + rc = afpSetLock(pCtx->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0); + if( rc==SQLITE_OK && locktype==SHARED_LOCK ){ + /* only re-establish the shared lock if necessary */ + int sharedLockByte = SHARED_FIRST+pCtx->sharedByte; + rc = afpSetLock(pCtx->dbPath, pFile, sharedLockByte, 1, 1); + } } - zBuf[j] = 0; - }while( access(zBuf,0)==0 ); - return SQLITE_OK; + if( rc==SQLITE_OK && pFile->locktype>=PENDING_LOCK ){ + rc = afpSetLock(pCtx->dbPath, pFile, PENDING_BYTE, 1, 0); + } + if( rc==SQLITE_OK && pFile->locktype>=RESERVED_LOCK ){ + rc = afpSetLock(pCtx->dbPath, pFile, RESERVED_BYTE, 1, 0); + } + }else if( locktype==NO_LOCK ){ + /* clear the shared lock */ + int sharedLockByte = SHARED_FIRST+pCtx->sharedByte; + rc = afpSetLock(pCtx->dbPath, pFile, sharedLockByte, 1, 0); + } + + if( rc==SQLITE_OK ){ + if( locktype==NO_LOCK ){ + struct unixOpenCnt *pOpen = pFile->pOpen; + pOpen->nLock--; + assert( pOpen->nLock>=0 ); + if( pOpen->nLock==0 && pOpen->nPending>0 ){ + int i; + for(i=0; inPending; i++){ + if( pOpen->aPending[i] < 0 ) continue; + if( close(pOpen->aPending[i]) ){ + pFile->lastErrno = errno; + rc = SQLITE_IOERR_CLOSE; + }else{ + pOpen->aPending[i] = -1; + } + } + if( rc==SQLITE_OK ){ + sqlite3_free(pOpen->aPending); + pOpen->nPending = 0; + pOpen->aPending = 0; + } + } + } + } + unixLeaveMutex(); + if( rc==SQLITE_OK ) pFile->locktype = locktype; + return rc; } /* -** Check that a given pathname is a directory and is writable -** +** Close a file & cleanup AFP specific locking context */ -int sqlite3UnixIsDirWritable(char *zBuf){ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS - struct stat buf; - if( zBuf==0 ) return 0; - if( zBuf[0]==0 ) return 0; - if( stat(zBuf, &buf) ) return 0; - if( !S_ISDIR(buf.st_mode) ) return 0; - if( access(zBuf, 07) ) return 0; -#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ - return 1; +static int afpClose(sqlite3_file *id) { + if( id ){ + unixFile *pFile = (unixFile*)id; + afpUnlock(id, NO_LOCK); + unixEnterMutex(); + if( pFile->pOpen && pFile->pOpen->nLock ){ + /* If there are outstanding locks, do not actually close the file just + ** yet because that would clear those locks. Instead, add the file + ** descriptor to pOpen->aPending. It will be automatically closed when + ** the last lock is cleared. + */ + int *aNew; + struct unixOpenCnt *pOpen = pFile->pOpen; + aNew = sqlite3_realloc(pOpen->aPending, (pOpen->nPending+1)*sizeof(int) ); + if( aNew==0 ){ + /* If a malloc fails, just leak the file descriptor */ + }else{ + pOpen->aPending = aNew; + pOpen->aPending[pOpen->nPending] = pFile->h; + pOpen->nPending++; + pFile->h = -1; + } + } + releaseOpenCnt(pFile->pOpen); + sqlite3_free(pFile->lockingContext); + closeUnixFile(id); + unixLeaveMutex(); + } + return SQLITE_OK; } +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ /* -** Seek to the offset in id->offset then read cnt bytes into pBuf. -** Return the number of bytes actually read. Update the offset. +** The code above is the AFP lock implementation. The code is specific +** to MacOSX and does not work on other unix platforms. No alternative +** is available. If you don't compile for a mac, then the "unix-afp" +** VFS is not available. +** +********************* End of the AFP lock implementation ********************** +******************************************************************************/ + + +/****************************************************************************** +**************** Non-locking sqlite3_file methods ***************************** +** +** The next division contains implementations for all methods of the +** sqlite3_file object other than the locking methods. The locking +** methods were defined in divisions above (one locking method per +** division). Those methods that are common to all locking modes +** are gather together into this division. +*/ + +/* +** Seek to the offset passed as the second argument, then read cnt +** bytes into pBuf. Return the number of bytes actually read. +** +** NB: If you define USE_PREAD or USE_PREAD64, then it might also +** be necessary to define _XOPEN_SOURCE to be 500. This varies from +** one system to another. Since SQLite does not define USE_PREAD +** any any form by default, we will not attempt to define _XOPEN_SOURCE. +** See tickets #2741 and #2681. +** +** To avoid stomping the errno value on a failed read the lastErrno value +** is set before returning. */ -static int seekAndRead(unixFile *id, void *pBuf, int cnt){ +static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ int got; i64 newOffset; TIMER_START; #if defined(USE_PREAD) - got = pread(id->h, pBuf, cnt, id->offset); + got = pread(id->h, pBuf, cnt, offset); SimulateIOError( got = -1 ); #elif defined(USE_PREAD64) - got = pread64(id->h, pBuf, cnt, id->offset); + got = pread64(id->h, pBuf, cnt, offset); SimulateIOError( got = -1 ); #else - newOffset = lseek(id->h, id->offset, SEEK_SET); + newOffset = lseek(id->h, offset, SEEK_SET); SimulateIOError( newOffset-- ); - if( newOffset!=id->offset ){ + if( newOffset!=offset ){ + if( newOffset == -1 ){ + ((unixFile*)id)->lastErrno = errno; + }else{ + ((unixFile*)id)->lastErrno = 0; + } return -1; } got = read(id->h, pBuf, cnt); #endif TIMER_END; - OSTRACE5("READ %-3d %5d %7lld %d\n", id->h, got, id->offset, TIMER_ELAPSED); - if( got>0 ){ - id->offset += got; + if( got<0 ){ + ((unixFile*)id)->lastErrno = errno; } + OSTRACE5("READ %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED); return got; } @@ -1025,15 +2691,29 @@ ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ -static int unixRead(OsFile *id, void *pBuf, int amt){ +static int unixRead( + sqlite3_file *id, + void *pBuf, + int amt, + sqlite3_int64 offset +){ int got; assert( id ); - got = seekAndRead((unixFile*)id, pBuf, amt); + + /* Never read or write any of the bytes in the locking range */ + assert( ((unixFile*)id)->isLockable==0 + || offset>=PENDING_BYTE+512 + || offset+amt<=PENDING_BYTE ); + + got = seekAndRead((unixFile*)id, offset, pBuf, amt); if( got==amt ){ return SQLITE_OK; }else if( got<0 ){ + /* lastErrno set by seekAndRead */ return SQLITE_IOERR_READ; }else{ + ((unixFile*)id)->lastErrno = 0; /* not a system error */ + /* Unread parts of the buffer must be zero-filled */ memset(&((char*)pBuf)[got], 0, amt-got); return SQLITE_IOERR_SHORT_READ; } @@ -1042,27 +2722,36 @@ /* ** Seek to the offset in id->offset then read cnt bytes into pBuf. ** Return the number of bytes actually read. Update the offset. +** +** To avoid stomping the errno value on a failed write the lastErrno value +** is set before returning. */ -static int seekAndWrite(unixFile *id, const void *pBuf, int cnt){ +static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ int got; i64 newOffset; TIMER_START; #if defined(USE_PREAD) - got = pwrite(id->h, pBuf, cnt, id->offset); + got = pwrite(id->h, pBuf, cnt, offset); #elif defined(USE_PREAD64) - got = pwrite64(id->h, pBuf, cnt, id->offset); + got = pwrite64(id->h, pBuf, cnt, offset); #else - newOffset = lseek(id->h, id->offset, SEEK_SET); - if( newOffset!=id->offset ){ + newOffset = lseek(id->h, offset, SEEK_SET); + if( newOffset!=offset ){ + if( newOffset == -1 ){ + ((unixFile*)id)->lastErrno = errno; + }else{ + ((unixFile*)id)->lastErrno = 0; + } return -1; } got = write(id->h, pBuf, cnt); #endif TIMER_END; - OSTRACE5("WRITE %-3d %5d %7lld %d\n", id->h, got, id->offset, TIMER_ELAPSED); - if( got>0 ){ - id->offset += got; + if( got<0 ){ + ((unixFile*)id)->lastErrno = errno; } + + OSTRACE5("WRITE %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED); return got; } @@ -1071,1865 +2760,2389 @@ ** Write data from a buffer into a file. Return SQLITE_OK on success ** or some other error code on failure. */ -static int unixWrite(OsFile *id, const void *pBuf, int amt){ +static int unixWrite( + sqlite3_file *id, + const void *pBuf, + int amt, + sqlite3_int64 offset +){ int wrote = 0; assert( id ); assert( amt>0 ); - while( amt>0 && (wrote = seekAndWrite((unixFile*)id, pBuf, amt))>0 ){ + + /* Never read or write any of the bytes in the locking range */ + assert( ((unixFile*)id)->isLockable==0 + || offset>=PENDING_BYTE+512 + || offset+amt<=PENDING_BYTE ); + +#ifndef NDEBUG + /* If we are doing a normal write to a database file (as opposed to + ** doing a hot-journal rollback or a write to some file other than a + ** normal database file) then record the fact that the database + ** has changed. If the transaction counter is modified, record that + ** fact too. + */ + if( ((unixFile*)id)->inNormalWrite ){ + unixFile *pFile = (unixFile*)id; + pFile->dbUpdate = 1; /* The database has been modified */ + if( offset<=24 && offset+amt>=27 ){ + int rc; + char oldCntr[4]; + SimulateIOErrorBenign(1); + rc = seekAndRead(pFile, 24, oldCntr, 4); + SimulateIOErrorBenign(0); + if( rc!=4 || memcmp(oldCntr, &((char*)pBuf)[24-offset], 4)!=0 ){ + pFile->transCntrChng = 1; /* The transaction counter has changed */ + } + } + } +#endif + + while( amt>0 && (wrote = seekAndWrite((unixFile*)id, offset, pBuf, amt))>0 ){ amt -= wrote; + offset += wrote; pBuf = &((char*)pBuf)[wrote]; } SimulateIOError(( wrote=(-1), amt=1 )); SimulateDiskfullError(( wrote=0, amt=1 )); if( amt>0 ){ if( wrote<0 ){ + /* lastErrno set by seekAndWrite */ return SQLITE_IOERR_WRITE; }else{ + ((unixFile*)id)->lastErrno = 0; /* not a system error */ return SQLITE_FULL; } } return SQLITE_OK; } -/* -** Move the read/write pointer in a file. -*/ -static int unixSeek(OsFile *id, i64 offset){ - assert( id ); -#ifdef SQLITE_TEST - if( offset ) SimulateDiskfullError(return SQLITE_FULL); -#endif - ((unixFile*)id)->offset = offset; - return SQLITE_OK; -} - #ifdef SQLITE_TEST /* ** Count the number of fullsyncs and normal syncs. This is used to test -** that syncs and fullsyncs are occuring at the right times. +** that syncs and fullsyncs are occurring at the right times. */ int sqlite3_sync_count = 0; int sqlite3_fullsync_count = 0; -#endif - -/* -** Use the fdatasync() API only if the HAVE_FDATASYNC macro is defined. -** Otherwise use fsync() in its place. -*/ -#ifndef HAVE_FDATASYNC -# define fdatasync fsync -#endif - -/* -** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not -** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently -** only available on Mac OS X. But that could change. -*/ -#ifdef F_FULLFSYNC -# define HAVE_FULLFSYNC 1 -#else -# define HAVE_FULLFSYNC 0 -#endif - - -/* -** The fsync() system call does not work as advertised on many -** unix systems. The following procedure is an attempt to make -** it work better. -** -** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful -** for testing when we want to run through the test suite quickly. -** You are strongly advised *not* to deploy with SQLITE_NO_SYNC -** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash -** or power failure will likely corrupt the database file. -*/ -static int full_fsync(int fd, int fullSync, int dataOnly){ - int rc; - - /* Record the number of times that we do a normal fsync() and - ** FULLSYNC. This is used during testing to verify that this procedure - ** gets called with the correct arguments. - */ -#ifdef SQLITE_TEST - if( fullSync ) sqlite3_fullsync_count++; - sqlite3_sync_count++; -#endif - - /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a - ** no-op - */ -#ifdef SQLITE_NO_SYNC - rc = SQLITE_OK; -#else - -#if HAVE_FULLFSYNC - if( fullSync ){ - rc = fcntl(fd, F_FULLFSYNC, 0); - }else{ - rc = 1; - } - /* If the FULLFSYNC failed, fall back to attempting an fsync(). - * It shouldn't be possible for fullfsync to fail on the local - * file system (on OSX), so failure indicates that FULLFSYNC - * isn't supported for this file system. So, attempt an fsync - * and (for now) ignore the overhead of a superfluous fcntl call. - * It'd be better to detect fullfsync support once and avoid - * the fcntl call every time sync is called. - */ - if( rc ) rc = fsync(fd); - -#else - if( dataOnly ){ - rc = fdatasync(fd); - }else{ - rc = fsync(fd); - } -#endif /* HAVE_FULLFSYNC */ -#endif /* defined(SQLITE_NO_SYNC) */ - - return rc; -} - -/* -** Make sure all writes to a particular file are committed to disk. -** -** If dataOnly==0 then both the file itself and its metadata (file -** size, access time, etc) are synced. If dataOnly!=0 then only the -** file data is synced. -** -** Under Unix, also make sure that the directory entry for the file -** has been created by fsync-ing the directory that contains the file. -** If we do not do this and we encounter a power failure, the directory -** entry for the journal might not exist after we reboot. The next -** SQLite to access the file will not know that the journal exists (because -** the directory entry for the journal was never created) and the transaction -** will not roll back - possibly leading to database corruption. -*/ -static int unixSync(OsFile *id, int dataOnly){ - int rc; - unixFile *pFile = (unixFile*)id; - assert( pFile ); - OSTRACE2("SYNC %-3d\n", pFile->h); - rc = full_fsync(pFile->h, pFile->fullSync, dataOnly); - SimulateIOError( rc=1 ); - if( rc ){ - return SQLITE_IOERR_FSYNC; - } - if( pFile->dirfd>=0 ){ - OSTRACE4("DIRSYNC %-3d (have_fullfsync=%d fullsync=%d)\n", pFile->dirfd, - HAVE_FULLFSYNC, pFile->fullSync); -#ifndef SQLITE_DISABLE_DIRSYNC - /* The directory sync is only attempted if full_fsync is - ** turned off or unavailable. If a full_fsync occurred above, - ** then the directory sync is superfluous. - */ - if( (!HAVE_FULLFSYNC || !pFile->fullSync) && full_fsync(pFile->dirfd,0,0) ){ - /* - ** We have received multiple reports of fsync() returning - ** errors when applied to directories on certain file systems. - ** A failed directory sync is not a big deal. So it seems - ** better to ignore the error. Ticket #1657 - */ - /* return SQLITE_IOERR; */ - } -#endif - close(pFile->dirfd); /* Only need to sync once, so close the directory */ - pFile->dirfd = -1; /* when we are done. */ - } - return SQLITE_OK; -} - -/* -** Sync the directory zDirname. This is a no-op on operating systems other -** than UNIX. -** -** This is used to make sure the master journal file has truely been deleted -** before making changes to individual journals on a multi-database commit. -** The F_FULLFSYNC option is not needed here. -*/ -int sqlite3UnixSyncDirectory(const char *zDirname){ -#ifdef SQLITE_DISABLE_DIRSYNC - return SQLITE_OK; -#else - int fd; - int r; - fd = open(zDirname, O_RDONLY|O_BINARY, 0); - OSTRACE3("DIRSYNC %-3d (%s)\n", fd, zDirname); - if( fd<0 ){ - return SQLITE_CANTOPEN; - } - r = fsync(fd); - close(fd); - SimulateIOError( r=1 ); - if( r ){ - return SQLITE_IOERR_DIR_FSYNC; - }else{ - return SQLITE_OK; - } -#endif -} - -/* -** Truncate an open file to a specified size -*/ -static int unixTruncate(OsFile *id, i64 nByte){ - int rc; - assert( id ); - rc = ftruncate(((unixFile*)id)->h, (off_t)nByte); - SimulateIOError( rc=1 ); - if( rc ){ - return SQLITE_IOERR_TRUNCATE; - }else{ - return SQLITE_OK; - } -} - -/* -** Determine the current size of a file in bytes -*/ -static int unixFileSize(OsFile *id, i64 *pSize){ - int rc; - struct stat buf; - assert( id ); - rc = fstat(((unixFile*)id)->h, &buf); - SimulateIOError( rc=1 ); - if( rc!=0 ){ - return SQLITE_IOERR_FSTAT; - } - *pSize = buf.st_size; - return SQLITE_OK; -} - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, return -** non-zero. If the file is unlocked or holds only SHARED locks, then -** return zero. -*/ -static int unixCheckReservedLock(OsFile *id){ - int r = 0; - unixFile *pFile = (unixFile*)id; - - assert( pFile ); - sqlite3OsEnterMutex(); /* Because pFile->pLock is shared across threads */ - - /* Check if a thread in this process holds such a lock */ - if( pFile->pLock->locktype>SHARED_LOCK ){ - r = 1; - } +#endif - /* Otherwise see if some other process holds it. - */ - if( !r ){ - struct flock lock; - lock.l_whence = SEEK_SET; - lock.l_start = RESERVED_BYTE; - lock.l_len = 1; - lock.l_type = F_WRLCK; - fcntl(pFile->h, F_GETLK, &lock); - if( lock.l_type!=F_UNLCK ){ - r = 1; - } - } - - sqlite3OsLeaveMutex(); - OSTRACE3("TEST WR-LOCK %d %d\n", pFile->h, r); +/* +** We do not trust systems to provide a working fdatasync(). Some do. +** Others do no. To be safe, we will stick with the (slower) fsync(). +** If you know that your system does support fdatasync() correctly, +** then simply compile with -Dfdatasync=fdatasync +*/ +#if !defined(fdatasync) && !defined(__linux__) +# define fdatasync fsync +#endif + +/* +** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not +** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently +** only available on Mac OS X. But that could change. +*/ +#ifdef F_FULLFSYNC +# define HAVE_FULLFSYNC 1 +#else +# define HAVE_FULLFSYNC 0 +#endif - return r; -} /* -** Lock the file with the lock specified by parameter locktype - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: +** The fsync() system call does not work as advertised on many +** unix systems. The following procedure is an attempt to make +** it work better. ** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE +** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful +** for testing when we want to run through the test suite quickly. +** You are strongly advised *not* to deploy with SQLITE_NO_SYNC +** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash +** or power failure will likely corrupt the database file. ** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. +** SQLite sets the dataOnly flag if the size of the file is unchanged. +** The idea behind dataOnly is that it should only write the file content +** to disk, not the inode. We only set dataOnly if the file size is +** unchanged since the file size is part of the inode. However, +** Ted Ts'o tells us that fdatasync() will also write the inode if the +** file size has changed. The only real difference between fdatasync() +** and fsync(), Ted tells us, is that fdatasync() will not flush the +** inode if the mtime or owner or other inode attributes have changed. +** We only care about the file size, not the other file attributes, so +** as far as SQLite is concerned, an fdatasync() is always adequate. +** So, we always use fdatasync() if it is available, regardless of +** the value of the dataOnly flag. */ -static int unixLock(OsFile *id, int locktype){ - /* The following describes the implementation of the various locks and - ** lock transitions in terms of the POSIX advisory shared and exclusive - ** lock primitives (called read-locks and write-locks below, to avoid - ** confusion with SQLite lock names). The algorithms are complicated - ** slightly in order to be compatible with windows systems simultaneously - ** accessing the same database file, in case that is ever required. - ** - ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved - ** byte', each single bytes at well known offsets, and the 'shared byte - ** range', a range of 510 bytes at a well known offset. - ** - ** To obtain a SHARED lock, a read-lock is obtained on the 'pending - ** byte'. If this is successful, a random byte from the 'shared byte - ** range' is read-locked and the lock on the 'pending byte' released. - ** - ** A process may only obtain a RESERVED lock after it has a SHARED lock. - ** A RESERVED lock is implemented by grabbing a write-lock on the - ** 'reserved byte'. - ** - ** A process may only obtain a PENDING lock after it has obtained a - ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock - ** on the 'pending byte'. This ensures that no new SHARED locks can be - ** obtained, but existing SHARED locks are allowed to persist. A process - ** does not have to obtain a RESERVED lock on the way to a PENDING lock. - ** This property is used by the algorithm for rolling back a journal file - ** after a crash. - ** - ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is - ** implemented by obtaining a write-lock on the entire 'shared byte - ** range'. Since all other locks require a read-lock on one of the bytes - ** within this range, this ensures that no other locks are held on the - ** database. - ** - ** The reason a single byte cannot be used instead of the 'shared byte - ** range' is that some versions of windows do not support read-locks. By - ** locking a random byte from a range, concurrent SHARED locks may exist - ** even if the locking primitive used is always a write-lock. - */ - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - struct lockInfo *pLock = pFile->pLock; - struct flock lock; - int s; - - assert( pFile ); - OSTRACE7("LOCK %d %s was %s(%s,%d) pid=%d\n", pFile->h, - locktypeName(locktype), locktypeName(pFile->locktype), - locktypeName(pLock->locktype), pLock->cnt , getpid()); - - /* If there is already a lock of this type or more restrictive on the - ** OsFile, do nothing. Don't use the end_lock: exit path, as - ** sqlite3OsEnterMutex() hasn't been called yet. - */ - if( pFile->locktype>=locktype ){ - OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, - locktypeName(locktype)); - return SQLITE_OK; - } +static int full_fsync(int fd, int fullSync, int dataOnly){ + int rc; - /* Make sure the locking sequence is correct + /* The following "ifdef/elif/else/" block has the same structure as + ** the one below. It is replicated here solely to avoid cluttering + ** up the real code with the UNUSED_PARAMETER() macros. */ - assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); - assert( locktype!=PENDING_LOCK ); - assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); +#ifdef SQLITE_NO_SYNC + UNUSED_PARAMETER(fd); + UNUSED_PARAMETER(fullSync); + UNUSED_PARAMETER(dataOnly); +#elif HAVE_FULLFSYNC + UNUSED_PARAMETER(dataOnly); +#else + UNUSED_PARAMETER(fullSync); + UNUSED_PARAMETER(dataOnly); +#endif - /* This mutex is needed because pFile->pLock is shared across threads + /* Record the number of times that we do a normal fsync() and + ** FULLSYNC. This is used during testing to verify that this procedure + ** gets called with the correct arguments. */ - sqlite3OsEnterMutex(); +#ifdef SQLITE_TEST + if( fullSync ) sqlite3_fullsync_count++; + sqlite3_sync_count++; +#endif - /* Make sure the current thread owns the pFile. + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op */ - rc = transferOwnership(pFile); - if( rc!=SQLITE_OK ){ - sqlite3OsLeaveMutex(); - return rc; +#ifdef SQLITE_NO_SYNC + rc = SQLITE_OK; +#elif HAVE_FULLFSYNC + if( fullSync ){ + rc = fcntl(fd, F_FULLFSYNC, 0); + }else{ + rc = 1; } - pLock = pFile->pLock; - - /* If some thread using this PID has a lock via a different OsFile* - ** handle that precludes the requested lock, return BUSY. + /* If the FULLFSYNC failed, fall back to attempting an fsync(). + ** It shouldn't be possible for fullfsync to fail on the local + ** file system (on OSX), so failure indicates that FULLFSYNC + ** isn't supported for this file system. So, attempt an fsync + ** and (for now) ignore the overhead of a superfluous fcntl call. + ** It'd be better to detect fullfsync support once and avoid + ** the fcntl call every time sync is called. */ - if( (pFile->locktype!=pLock->locktype && - (pLock->locktype>=PENDING_LOCK || locktype>SHARED_LOCK)) - ){ - rc = SQLITE_BUSY; - goto end_lock; - } + if( rc ) rc = fsync(fd); - /* If a SHARED lock is requested, and some thread using this PID already - ** has a SHARED or RESERVED lock, then increment reference counts and - ** return SQLITE_OK. - */ - if( locktype==SHARED_LOCK && - (pLock->locktype==SHARED_LOCK || pLock->locktype==RESERVED_LOCK) ){ - assert( locktype==SHARED_LOCK ); - assert( pFile->locktype==0 ); - assert( pLock->cnt>0 ); - pFile->locktype = SHARED_LOCK; - pLock->cnt++; - pFile->pOpen->nLock++; - goto end_lock; +#else + rc = fdatasync(fd); +#if OS_VXWORKS + if( rc==-1 && errno==ENOTSUP ){ + rc = fsync(fd); } +#endif /* OS_VXWORKS */ +#endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ - lock.l_len = 1L; + if( OS_VXWORKS && rc!= -1 ){ + rc = 0; + } + return rc; +} - lock.l_whence = SEEK_SET; +/* +** Make sure all writes to a particular file are committed to disk. +** +** If dataOnly==0 then both the file itself and its metadata (file +** size, access time, etc) are synced. If dataOnly!=0 then only the +** file data is synced. +** +** Under Unix, also make sure that the directory entry for the file +** has been created by fsync-ing the directory that contains the file. +** If we do not do this and we encounter a power failure, the directory +** entry for the journal might not exist after we reboot. The next +** SQLite to access the file will not know that the journal exists (because +** the directory entry for the journal was never created) and the transaction +** will not roll back - possibly leading to database corruption. +*/ +static int unixSync(sqlite3_file *id, int flags){ + int rc; + unixFile *pFile = (unixFile*)id; - /* A PENDING lock is needed before acquiring a SHARED lock and before - ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will - ** be released. - */ - if( locktype==SHARED_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktypeh, F_SETLK, &lock); - if( s==(-1) ){ - rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; - goto end_lock; - } - } + int isDataOnly = (flags&SQLITE_SYNC_DATAONLY); + int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL; + /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ + assert((flags&0x0F)==SQLITE_SYNC_NORMAL + || (flags&0x0F)==SQLITE_SYNC_FULL + ); - /* If control gets to this point, then actually go ahead and make - ** operating system calls for the specified lock. + /* Unix cannot, but some systems may return SQLITE_FULL from here. This + ** line is to test that doing so does not cause any problems. */ - if( locktype==SHARED_LOCK ){ - assert( pLock->cnt==0 ); - assert( pLock->locktype==0 ); - - /* Now get the read-lock */ - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - s = fcntl(pFile->h, F_SETLK, &lock); + SimulateDiskfullError( return SQLITE_FULL ); - /* Drop the temporary PENDING lock */ - lock.l_start = PENDING_BYTE; - lock.l_len = 1L; - lock.l_type = F_UNLCK; - if( fcntl(pFile->h, F_SETLK, &lock)!=0 ){ - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - goto end_lock; - } - if( s==(-1) ){ - rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; - }else{ - pFile->locktype = SHARED_LOCK; - pFile->pOpen->nLock++; - pLock->cnt = 1; - } - }else if( locktype==EXCLUSIVE_LOCK && pLock->cnt>1 ){ - /* We are trying for an exclusive lock but another thread in this - ** same process is still holding a shared lock. */ - rc = SQLITE_BUSY; - }else{ - /* The request was for a RESERVED or EXCLUSIVE lock. It is - ** assumed that there is a SHARED or greater lock on the file - ** already. - */ - assert( 0!=pFile->locktype ); - lock.l_type = F_WRLCK; - switch( locktype ){ - case RESERVED_LOCK: - lock.l_start = RESERVED_BYTE; - break; - case EXCLUSIVE_LOCK: - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - break; - default: - assert(0); + assert( pFile ); + OSTRACE2("SYNC %-3d\n", pFile->h); + rc = full_fsync(pFile->h, isFullsync, isDataOnly); + SimulateIOError( rc=1 ); + if( rc ){ + pFile->lastErrno = errno; + return SQLITE_IOERR_FSYNC; + } + if( pFile->dirfd>=0 ){ + int err; + OSTRACE4("DIRSYNC %-3d (have_fullfsync=%d fullsync=%d)\n", pFile->dirfd, + HAVE_FULLFSYNC, isFullsync); +#ifndef SQLITE_DISABLE_DIRSYNC + /* The directory sync is only attempted if full_fsync is + ** turned off or unavailable. If a full_fsync occurred above, + ** then the directory sync is superfluous. + */ + if( (!HAVE_FULLFSYNC || !isFullsync) && full_fsync(pFile->dirfd,0,0) ){ + /* + ** We have received multiple reports of fsync() returning + ** errors when applied to directories on certain file systems. + ** A failed directory sync is not a big deal. So it seems + ** better to ignore the error. Ticket #1657 + */ + /* pFile->lastErrno = errno; */ + /* return SQLITE_IOERR; */ } - s = fcntl(pFile->h, F_SETLK, &lock); - if( s==(-1) ){ - rc = (errno==EINVAL) ? SQLITE_NOLFS : SQLITE_BUSY; +#endif + err = close(pFile->dirfd); /* Only need to sync once, so close the */ + if( err==0 ){ /* directory when we are done */ + pFile->dirfd = -1; + }else{ + pFile->lastErrno = errno; + rc = SQLITE_IOERR_DIR_CLOSE; } } - - if( rc==SQLITE_OK ){ - pFile->locktype = locktype; - pLock->locktype = locktype; - }else if( locktype==EXCLUSIVE_LOCK ){ - pFile->locktype = PENDING_LOCK; - pLock->locktype = PENDING_LOCK; - } - -end_lock: - sqlite3OsLeaveMutex(); - OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), - rc==SQLITE_OK ? "ok" : "failed"); return rc; } /* -** Lower the locking level on file descriptor pFile to locktype. locktype -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. +** Truncate an open file to a specified size */ -static int unixUnlock(OsFile *id, int locktype){ - struct lockInfo *pLock; - struct flock lock; - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - - assert( pFile ); - OSTRACE7("UNLOCK %d %d was %d(%d,%d) pid=%d\n", pFile->h, locktype, - pFile->locktype, pFile->pLock->locktype, pFile->pLock->cnt, getpid()); - - assert( locktype<=SHARED_LOCK ); - if( pFile->locktype<=locktype ){ +static int unixTruncate(sqlite3_file *id, i64 nByte){ + int rc; + assert( id ); + SimulateIOError( return SQLITE_IOERR_TRUNCATE ); + rc = ftruncate(((unixFile*)id)->h, (off_t)nByte); + if( rc ){ + ((unixFile*)id)->lastErrno = errno; + return SQLITE_IOERR_TRUNCATE; + }else{ return SQLITE_OK; } - if( CHECK_THREADID(pFile) ){ - return SQLITE_MISUSE; - } - sqlite3OsEnterMutex(); - pLock = pFile->pLock; - assert( pLock->cnt!=0 ); - if( pFile->locktype>SHARED_LOCK ){ - assert( pLock->locktype==pFile->locktype ); - if( locktype==SHARED_LOCK ){ - lock.l_type = F_RDLCK; - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - if( fcntl(pFile->h, F_SETLK, &lock)==(-1) ){ - /* This should never happen */ - rc = SQLITE_IOERR_RDLOCK; - } - } - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = PENDING_BYTE; - lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); - if( fcntl(pFile->h, F_SETLK, &lock)!=(-1) ){ - pLock->locktype = SHARED_LOCK; - }else{ - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - } +} + +/* +** Determine the current size of a file in bytes +*/ +static int unixFileSize(sqlite3_file *id, i64 *pSize){ + int rc; + struct stat buf; + assert( id ); + rc = fstat(((unixFile*)id)->h, &buf); + SimulateIOError( rc=1 ); + if( rc!=0 ){ + ((unixFile*)id)->lastErrno = errno; + return SQLITE_IOERR_FSTAT; } - if( locktype==NO_LOCK ){ - struct openCnt *pOpen; + *pSize = buf.st_size; - /* Decrement the shared lock counter. Release the lock using an - ** OS call only when all threads in this same process have released - ** the lock. - */ - pLock->cnt--; - if( pLock->cnt==0 ){ - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = lock.l_len = 0L; - if( fcntl(pFile->h, F_SETLK, &lock)!=(-1) ){ - pLock->locktype = NO_LOCK; - }else{ - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - } - } + /* When opening a zero-size database, the findLockInfo() procedure + ** writes a single byte into that file in order to work around a bug + ** in the OS-X msdos filesystem. In order to avoid problems with upper + ** layers, we need to report this file size as zero even though it is + ** really 1. Ticket #3260. + */ + if( *pSize==1 ) *pSize = 0; - /* Decrement the count of locks against this same file. When the - ** count reaches zero, close any other file descriptors whose close - ** was deferred because of outstanding locks. - */ - pOpen = pFile->pOpen; - pOpen->nLock--; - assert( pOpen->nLock>=0 ); - if( pOpen->nLock==0 && pOpen->nPending>0 ){ - int i; - for(i=0; inPending; i++){ - close(pOpen->aPending[i]); - } - free(pOpen->aPending); - pOpen->nPending = 0; - pOpen->aPending = 0; - } - } - sqlite3OsLeaveMutex(); - pFile->locktype = locktype; - return rc; + + return SQLITE_OK; } +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) /* -** Close a file. +** Handler for proxy-locking file-control verbs. Defined below in the +** proxying locking division. */ -static int unixClose(OsFile **pId){ - unixFile *id = (unixFile*)*pId; +static int proxyFileControl(sqlite3_file*,int,void*); +#endif + - if( !id ) return SQLITE_OK; - unixUnlock(*pId, NO_LOCK); - if( id->dirfd>=0 ) close(id->dirfd); - id->dirfd = -1; - sqlite3OsEnterMutex(); - - if( id->pOpen->nLock ){ - /* If there are outstanding locks, do not actually close the file just - ** yet because that would clear those locks. Instead, add the file - ** descriptor to pOpen->aPending. It will be automatically closed when - ** the last lock is cleared. +/* +** Information and control of an open file handle. +*/ +static int unixFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = ((unixFile*)id)->locktype; + return SQLITE_OK; + } + case SQLITE_LAST_ERRNO: { + *(int*)pArg = ((unixFile*)id)->lastErrno; + return SQLITE_OK; + } +#ifndef NDEBUG + /* The pager calls this method to signal that it has done + ** a rollback and that the database is therefore unchanged and + ** it hence it is OK for the transaction change counter to be + ** unchanged. */ - int *aNew; - struct openCnt *pOpen = id->pOpen; - aNew = realloc( pOpen->aPending, (pOpen->nPending+1)*sizeof(int) ); - if( aNew==0 ){ - /* If a malloc fails, just leak the file descriptor */ - }else{ - pOpen->aPending = aNew; - pOpen->aPending[pOpen->nPending] = id->h; - pOpen->nPending++; + case SQLITE_FCNTL_DB_UNCHANGED: { + ((unixFile*)id)->dbUpdate = 0; + return SQLITE_OK; } - }else{ - /* There are no outstanding locks so we can close the file immediately */ - close(id->h); +#endif +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + case SQLITE_SET_LOCKPROXYFILE: + case SQLITE_GET_LOCKPROXYFILE: { + return proxyFileControl(id,op,pArg); + } +#endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ } - releaseLockInfo(id->pLock); - releaseOpenCnt(id->pOpen); + return SQLITE_ERROR; +} - sqlite3OsLeaveMutex(); - id->isOpen = 0; - OSTRACE2("CLOSE %-3d\n", id->h); - OpenCounter(-1); - sqlite3ThreadSafeFree(id); - *pId = 0; - return SQLITE_OK; +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and its journal file) that the sector size will be the +** same for both. +*/ +static int unixSectorSize(sqlite3_file *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return SQLITE_DEFAULT_SECTOR_SIZE; } +/* +** Return the device characteristics for the file. This is always 0 for unix. +*/ +static int unixDeviceCharacteristics(sqlite3_file *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return 0; +} -#ifdef SQLITE_ENABLE_LOCKING_STYLE -#pragma mark AFP Support +/* +** Here ends the implementation of all sqlite3_file methods. +** +********************** End sqlite3_file Methods ******************************* +******************************************************************************/ /* - ** The afpLockingContext structure contains all afp lock specific state - */ -typedef struct afpLockingContext afpLockingContext; -struct afpLockingContext { - unsigned long long sharedLockByte; - char *filePath; -}; +** This division contains definitions of sqlite3_io_methods objects that +** implement various file locking strategies. It also contains definitions +** of "finder" functions. A finder-function is used to locate the appropriate +** sqlite3_io_methods object for a particular database file. The pAppData +** field of the sqlite3_vfs VFS objects are initialized to be pointers to +** the correct finder-function for that VFS. +** +** Most finder functions return a pointer to a fixed sqlite3_io_methods +** object. The only interesting finder-function is autolockIoFinder, which +** looks at the filesystem type and tries to guess the best locking +** strategy from that. +** +** For finder-funtion F, two objects are created: +** +** (1) The real finder-function named "FImpt()". +** +** (2) A constant pointer to this functio named just "F". +** +** +** A pointer to the F pointer is used as the pAppData value for VFS +** objects. We have to do this instead of letting pAppData point +** directly at the finder-function since C90 rules prevent a void* +** from be cast into a function pointer. +** +** +** Each instance of this macro generates two objects: +** +** * A constant sqlite3_io_methods object call METHOD that has locking +** methods CLOSE, LOCK, UNLOCK, CKRESLOCK. +** +** * An I/O method finder function called FINDER that returns a pointer +** to the METHOD object in the previous bullet. +*/ +#define IOMETHODS(FINDER, METHOD, CLOSE, LOCK, UNLOCK, CKLOCK) \ +static const sqlite3_io_methods METHOD = { \ + 1, /* iVersion */ \ + CLOSE, /* xClose */ \ + unixRead, /* xRead */ \ + unixWrite, /* xWrite */ \ + unixTruncate, /* xTruncate */ \ + unixSync, /* xSync */ \ + unixFileSize, /* xFileSize */ \ + LOCK, /* xLock */ \ + UNLOCK, /* xUnlock */ \ + CKLOCK, /* xCheckReservedLock */ \ + unixFileControl, /* xFileControl */ \ + unixSectorSize, /* xSectorSize */ \ + unixDeviceCharacteristics /* xDeviceCapabilities */ \ +}; \ +static const sqlite3_io_methods *FINDER##Impl(const char *z, int h){ \ + UNUSED_PARAMETER(z); UNUSED_PARAMETER(h); \ + return &METHOD; \ +} \ +static const sqlite3_io_methods *(*const FINDER)(const char*,int) \ + = FINDER##Impl; + +/* +** Here are all of the sqlite3_io_methods objects for each of the +** locking strategies. Functions that return pointers to these methods +** are also created. +*/ +IOMETHODS( + posixIoFinder, /* Finder function name */ + posixIoMethods, /* sqlite3_io_methods object name */ + unixClose, /* xClose method */ + unixLock, /* xLock method */ + unixUnlock, /* xUnlock method */ + unixCheckReservedLock /* xCheckReservedLock method */ +) +IOMETHODS( + nolockIoFinder, /* Finder function name */ + nolockIoMethods, /* sqlite3_io_methods object name */ + nolockClose, /* xClose method */ + nolockLock, /* xLock method */ + nolockUnlock, /* xUnlock method */ + nolockCheckReservedLock /* xCheckReservedLock method */ +) +IOMETHODS( + dotlockIoFinder, /* Finder function name */ + dotlockIoMethods, /* sqlite3_io_methods object name */ + dotlockClose, /* xClose method */ + dotlockLock, /* xLock method */ + dotlockUnlock, /* xUnlock method */ + dotlockCheckReservedLock /* xCheckReservedLock method */ +) + +#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS +IOMETHODS( + flockIoFinder, /* Finder function name */ + flockIoMethods, /* sqlite3_io_methods object name */ + flockClose, /* xClose method */ + flockLock, /* xLock method */ + flockUnlock, /* xUnlock method */ + flockCheckReservedLock /* xCheckReservedLock method */ +) +#endif + +#if OS_VXWORKS +IOMETHODS( + semIoFinder, /* Finder function name */ + semIoMethods, /* sqlite3_io_methods object name */ + semClose, /* xClose method */ + semLock, /* xLock method */ + semUnlock, /* xUnlock method */ + semCheckReservedLock /* xCheckReservedLock method */ +) +#endif + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +IOMETHODS( + afpIoFinder, /* Finder function name */ + afpIoMethods, /* sqlite3_io_methods object name */ + afpClose, /* xClose method */ + afpLock, /* xLock method */ + afpUnlock, /* xUnlock method */ + afpCheckReservedLock /* xCheckReservedLock method */ +) +#endif + +/* +** The proxy locking method is a "super-method" in the sense that it +** opens secondary file descriptors for the conch and lock files and +** it uses proxy, dot-file, AFP, and flock() locking methods on those +** secondary files. For this reason, the division that implements +** proxy locking is located much further down in the file. But we need +** to go ahead and define the sqlite3_io_methods and finder function +** for proxy locking here. So we forward declare the I/O methods. +*/ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +static int proxyClose(sqlite3_file*); +static int proxyLock(sqlite3_file*, int); +static int proxyUnlock(sqlite3_file*, int); +static int proxyCheckReservedLock(sqlite3_file*, int*); +IOMETHODS( + proxyIoFinder, /* Finder function name */ + proxyIoMethods, /* sqlite3_io_methods object name */ + proxyClose, /* xClose method */ + proxyLock, /* xLock method */ + proxyUnlock, /* xUnlock method */ + proxyCheckReservedLock /* xCheckReservedLock method */ +) +#endif -struct ByteRangeLockPB2 -{ - unsigned long long offset; /* offset to first byte to lock */ - unsigned long long length; /* nbr of bytes to lock */ - unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ - unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ - unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ - int fd; /* file desc to assoc this lock with */ -}; -#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +/* +** This "finder" function attempts to determine the best locking strategy +** for the database file "filePath". It then returns the sqlite3_io_methods +** object that implements that strategy. +** +** This is for MacOSX only. +*/ +static const sqlite3_io_methods *autolockIoFinderImpl( + const char *filePath, /* name of the database file */ + int fd /* file descriptor open on the database file */ +){ + static const struct Mapping { + const char *zFilesystem; /* Filesystem type name */ + const sqlite3_io_methods *pMethods; /* Appropriate locking method */ + } aMap[] = { + { "hfs", &posixIoMethods }, + { "ufs", &posixIoMethods }, + { "afpfs", &afpIoMethods }, +#ifdef SQLITE_ENABLE_AFP_LOCKING_SMB + { "smbfs", &afpIoMethods }, +#else + { "smbfs", &flockIoMethods }, +#endif + { "webdav", &nolockIoMethods }, + { 0, 0 } + }; + int i; + struct statfs fsInfo; + struct flock lockInfo; -/* return 0 on success, 1 on failure. To match the behavior of the - normal posix file locking (used in unixLock for example), we should - provide 'richer' return codes - specifically to differentiate between - 'file busy' and 'file system error' results */ -static int _AFPFSSetLock(const char *path, int fd, unsigned long long offset, - unsigned long long length, int setLockFlag) -{ - struct ByteRangeLockPB2 pb; - int err; - - pb.unLockFlag = setLockFlag ? 0 : 1; - pb.startEndFlag = 0; - pb.offset = offset; - pb.length = length; - pb.fd = fd; - OSTRACE5("AFPLOCK setting lock %s for %d in range %llx:%llx\n", - (setLockFlag?"ON":"OFF"), fd, offset, length); - err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); - if ( err==-1 ) { - OSTRACE4("AFPLOCK failed to fsctl() '%s' %d %s\n", path, errno, - strerror(errno)); - return 1; /* error */ - } else { - return 0; + if( !filePath ){ + /* If filePath==NULL that means we are dealing with a transient file + ** that does not need to be locked. */ + return &nolockIoMethods; + } + if( statfs(filePath, &fsInfo) != -1 ){ + if( fsInfo.f_flags & MNT_RDONLY ){ + return &nolockIoMethods; + } + for(i=0; aMap[i].zFilesystem; i++){ + if( strcmp(fsInfo.f_fstypename, aMap[i].zFilesystem)==0 ){ + return aMap[i].pMethods; + } + } + } + + /* Default case. Handles, amongst others, "nfs". + ** Test byte-range lock using fcntl(). If the call succeeds, + ** assume that the file-system supports POSIX style locks. + */ + lockInfo.l_len = 1; + lockInfo.l_start = 0; + lockInfo.l_whence = SEEK_SET; + lockInfo.l_type = F_RDLCK; + if( fcntl(fd, F_GETLK, &lockInfo)!=-1 ) { + return &posixIoMethods; + }else{ + return &dotlockIoMethods; } } +static const sqlite3_io_methods *(*const autolockIoFinder)(const char*,int) + = autolockIoFinderImpl; -/* - ** This routine checks if there is a RESERVED lock held on the specified - ** file by this or any other process. If such a lock is held, return - ** non-zero. If the file is unlocked or holds only SHARED locks, then - ** return zero. - */ -static int afpUnixCheckReservedLock(OsFile *id){ - int r = 0; - unixFile *pFile = (unixFile*)id; - - assert( pFile ); - afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; - - /* Check if a thread in this process holds such a lock */ - if( pFile->locktype>SHARED_LOCK ){ - r = 1; +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ + +#if OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE +/* +** This "finder" function attempts to determine the best locking strategy +** for the database file "filePath". It then returns the sqlite3_io_methods +** object that implements that strategy. +** +** This is for VXWorks only. +*/ +static const sqlite3_io_methods *autolockIoFinderImpl( + const char *filePath, /* name of the database file */ + int fd /* file descriptor open on the database file */ +){ + struct flock lockInfo; + + if( !filePath ){ + /* If filePath==NULL that means we are dealing with a transient file + ** that does not need to be locked. */ + return &nolockIoMethods; } - - /* Otherwise see if some other process holds it. - */ - if ( !r ) { - /* lock the byte */ - int failed = _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1,1); - if (failed) { - /* if we failed to get the lock then someone else must have it */ - r = 1; - } else { - /* if we succeeded in taking the reserved lock, unlock it to restore - ** the original state */ - _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1, 0); - } + + /* Test if fcntl() is supported and use POSIX style locks. + ** Otherwise fall back to the named semaphore method. + */ + lockInfo.l_len = 1; + lockInfo.l_start = 0; + lockInfo.l_whence = SEEK_SET; + lockInfo.l_type = F_RDLCK; + if( fcntl(fd, F_GETLK, &lockInfo)!=-1 ) { + return &posixIoMethods; + }else{ + return &semIoMethods; } - OSTRACE3("TEST WR-LOCK %d %d\n", pFile->h, r); - - return r; } +static const sqlite3_io_methods *(*const autolockIoFinder)(const char*,int) + = autolockIoFinderImpl; -/* AFP-style locking following the behavior of unixLock, see the unixLock -** function comments for details of lock management. */ -static int afpUnixLock(OsFile *id, int locktype) -{ +#endif /* OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE */ + +/* +** An abstract type for a pointer to a IO method finder function: +*/ +typedef const sqlite3_io_methods *(*finder_type)(const char*,int); + + +/**************************************************************************** +**************************** sqlite3_vfs methods **************************** +** +** This division contains the implementation of methods on the +** sqlite3_vfs object. +*/ + +/* +** Initialize the contents of the unixFile structure pointed to by pId. +*/ +static int fillInUnixFile( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + int h, /* Open file descriptor of file being opened */ + int dirfd, /* Directory file descriptor */ + sqlite3_file *pId, /* Write to the unixFile structure here */ + const char *zFilename, /* Name of the file being opened */ + int noLock, /* Omit locking if true */ + int isDelete /* Delete on close if true */ +){ + const sqlite3_io_methods *pLockingStyle; + unixFile *pNew = (unixFile *)pId; int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; - int gotPendingLock = 0; - - assert( pFile ); - OSTRACE5("LOCK %d %s was %s pid=%d\n", pFile->h, - locktypeName(locktype), locktypeName(pFile->locktype), getpid()); - /* If there is already a lock of this type or more restrictive on the - ** OsFile, do nothing. Don't use the afp_end_lock: exit path, as - ** sqlite3OsEnterMutex() hasn't been called yet. - */ - if( pFile->locktype>=locktype ){ - OSTRACE3("LOCK %d %s ok (already held)\n", pFile->h, - locktypeName(locktype)); - return SQLITE_OK; + + assert( pNew->pLock==NULL ); + assert( pNew->pOpen==NULL ); + + /* Parameter isDelete is only used on vxworks. + ** Express this explicitly here to prevent compiler warnings + ** about unused parameters. + */ +#if !OS_VXWORKS + UNUSED_PARAMETER(isDelete); +#endif + + OSTRACE3("OPEN %-3d %s\n", h, zFilename); + pNew->h = h; + pNew->dirfd = dirfd; + SET_THREADID(pNew); + +#if OS_VXWORKS + pNew->pId = vxworksFindFileId(zFilename); + if( pNew->pId==0 ){ + noLock = 1; + rc = SQLITE_NOMEM; } +#endif - /* Make sure the locking sequence is correct - */ - assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); - assert( locktype!=PENDING_LOCK ); - assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); - - /* This mutex is needed because pFile->pLock is shared across threads - */ - sqlite3OsEnterMutex(); + if( noLock ){ + pLockingStyle = &nolockIoMethods; + }else{ + pLockingStyle = (**(finder_type*)pVfs->pAppData)(zFilename, h); +#if SQLITE_ENABLE_LOCKING_STYLE + /* Cache zFilename in the locking context (AFP and dotlock override) for + ** proxyLock activation is possible (remote proxy is based on db name) + ** zFilename remains valid until file is closed, to support */ + pNew->lockingContext = (void*)zFilename; +#endif + } - /* Make sure the current thread owns the pFile. - */ - rc = transferOwnership(pFile); - if( rc!=SQLITE_OK ){ - sqlite3OsLeaveMutex(); - return rc; + if( pLockingStyle == &posixIoMethods ){ + unixEnterMutex(); + rc = findLockInfo(pNew, &pNew->pLock, &pNew->pOpen); + unixLeaveMutex(); } - - /* A PENDING lock is needed before acquiring a SHARED lock and before - ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will - ** be released. + +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + else if( pLockingStyle == &afpIoMethods ){ + /* AFP locking uses the file path so it needs to be included in + ** the afpLockingContext. */ - if( locktype==SHARED_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktypefilePath, pFile->h, - PENDING_BYTE, 1, 1); - if (failed) { - rc = SQLITE_BUSY; - goto afp_end_lock; + afpLockingContext *pCtx; + pNew->lockingContext = pCtx = sqlite3_malloc( sizeof(*pCtx) ); + if( pCtx==0 ){ + rc = SQLITE_NOMEM; + }else{ + /* NB: zFilename exists and remains valid until the file is closed + ** according to requirement F11141. So we do not need to make a + ** copy of the filename. */ + pCtx->dbPath = zFilename; + srandomdev(); + unixEnterMutex(); + rc = findLockInfo(pNew, NULL, &pNew->pOpen); + unixLeaveMutex(); } } - - /* If control gets to this point, then actually go ahead and make - ** operating system calls for the specified lock. +#endif + + else if( pLockingStyle == &dotlockIoMethods ){ + /* Dotfile locking uses the file path so it needs to be included in + ** the dotlockLockingContext */ - if( locktype==SHARED_LOCK ){ - int lk, failed; - int tries = 0; - - /* Now get the read-lock */ - /* note that the quality of the randomness doesn't matter that much */ - lk = random(); - context->sharedLockByte = (lk & 0x7fffffff)%(SHARED_SIZE - 1); - failed = _AFPFSSetLock(context->filePath, pFile->h, - SHARED_FIRST+context->sharedLockByte, 1, 1); - - /* Drop the temporary PENDING lock */ - if (_AFPFSSetLock(context->filePath, pFile->h, PENDING_BYTE, 1, 0)) { - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - goto afp_end_lock; - } - - if( failed ){ - rc = SQLITE_BUSY; - } else { - pFile->locktype = SHARED_LOCK; + char *zLockFile; + int nFilename; + nFilename = (int)strlen(zFilename) + 6; + zLockFile = (char *)sqlite3_malloc(nFilename); + if( zLockFile==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_snprintf(nFilename, zLockFile, "%s" DOTLOCK_SUFFIX, zFilename); } - }else{ - /* The request was for a RESERVED or EXCLUSIVE lock. It is - ** assumed that there is a SHARED or greater lock on the file - ** already. + pNew->lockingContext = zLockFile; + } + +#if OS_VXWORKS + else if( pLockingStyle == &semIoMethods ){ + /* Named semaphore locking uses the file path so it needs to be + ** included in the semLockingContext */ - int failed = 0; - assert( 0!=pFile->locktype ); - if (locktype >= RESERVED_LOCK && pFile->locktype < RESERVED_LOCK) { - /* Acquire a RESERVED lock */ - failed = _AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1,1); - } - if (!failed && locktype == EXCLUSIVE_LOCK) { - /* Acquire an EXCLUSIVE lock */ - - /* Remove the shared lock before trying the range. we'll need to - ** reestablish the shared lock if we can't get the afpUnixUnlock - */ - if (!_AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST + - context->sharedLockByte, 1, 0)) { - /* now attemmpt to get the exclusive lock range */ - failed = _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST, - SHARED_SIZE, 1); - if (failed && _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST + - context->sharedLockByte, 1, 1)) { - rc = SQLITE_IOERR_RDLOCK; /* this should never happen */ - } - } else { - /* */ - rc = SQLITE_IOERR_UNLOCK; /* this should never happen */ + unixEnterMutex(); + rc = findLockInfo(pNew, &pNew->pLock, &pNew->pOpen); + if( (rc==SQLITE_OK) && (pNew->pOpen->pSem==NULL) ){ + char *zSemName = pNew->pOpen->aSemName; + int n; + sqlite3_snprintf(MAX_PATHNAME, zSemName, "%s.sem", + pNew->pId->zCanonicalName); + for( n=0; zSemName[n]; n++ ) + if( zSemName[n]=='/' ) zSemName[n] = '_'; + pNew->pOpen->pSem = sem_open(zSemName, O_CREAT, 0666, 1); + if( pNew->pOpen->pSem == SEM_FAILED ){ + rc = SQLITE_NOMEM; + pNew->pOpen->aSemName[0] = '\0'; } } - if( failed && rc == SQLITE_OK){ - rc = SQLITE_BUSY; - } + unixLeaveMutex(); } +#endif - if( rc==SQLITE_OK ){ - pFile->locktype = locktype; - }else if( locktype==EXCLUSIVE_LOCK ){ - pFile->locktype = PENDING_LOCK; + pNew->lastErrno = 0; +#if OS_VXWORKS + if( rc!=SQLITE_OK ){ + unlink(zFilename); + isDelete = 0; + } + pNew->isDelete = isDelete; +#endif + if( rc!=SQLITE_OK ){ + if( dirfd>=0 ) close(dirfd); /* silent leak if fail, already in error */ + close(h); + }else{ + pNew->pMethod = pLockingStyle; + OpenCounter(+1); } - -afp_end_lock: - sqlite3OsLeaveMutex(); - OSTRACE4("LOCK %d %s %s\n", pFile->h, locktypeName(locktype), - rc==SQLITE_OK ? "ok" : "failed"); return rc; } /* - ** Lower the locking level on file descriptor pFile to locktype. locktype - ** must be either NO_LOCK or SHARED_LOCK. - ** - ** If the locking level of the file descriptor is already at or below - ** the requested locking level, this routine is a no-op. - */ -static int afpUnixUnlock(OsFile *id, int locktype) { - struct flock lock; - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; - - assert( pFile ); - OSTRACE5("UNLOCK %d %d was %d pid=%d\n", pFile->h, locktype, - pFile->locktype, getpid()); - - assert( locktype<=SHARED_LOCK ); - if( pFile->locktype<=locktype ){ - return SQLITE_OK; - } - if( CHECK_THREADID(pFile) ){ - return SQLITE_MISUSE; - } - sqlite3OsEnterMutex(); - if( pFile->locktype>SHARED_LOCK ){ - if( locktype==SHARED_LOCK ){ - int failed = 0; - - /* unlock the exclusive range - then re-establish the shared lock */ - if (pFile->locktype==EXCLUSIVE_LOCK) { - failed = _AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST, - SHARED_SIZE, 0); - if (!failed) { - /* successfully removed the exclusive lock */ - if (_AFPFSSetLock(context->filePath, pFile->h, SHARED_FIRST+ - context->sharedLockByte, 1, 1)) { - /* failed to re-establish our shared lock */ - rc = SQLITE_IOERR_RDLOCK; /* This should never happen */ - } - } else { - /* This should never happen - failed to unlock the exclusive range */ - rc = SQLITE_IOERR_UNLOCK; - } - } - } - if (rc == SQLITE_OK && pFile->locktype>=PENDING_LOCK) { - if (_AFPFSSetLock(context->filePath, pFile->h, PENDING_BYTE, 1, 0)){ - /* failed to release the pending lock */ - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - } - } - if (rc == SQLITE_OK && pFile->locktype>=RESERVED_LOCK) { - if (_AFPFSSetLock(context->filePath, pFile->h, RESERVED_BYTE, 1, 0)) { - /* failed to release the reserved lock */ - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ - } - } - } - if( locktype==NO_LOCK ){ - int failed = _AFPFSSetLock(context->filePath, pFile->h, - SHARED_FIRST + context->sharedLockByte, 1, 0); - if (failed) { - rc = SQLITE_IOERR_UNLOCK; /* This should never happen */ +** Open a file descriptor to the directory containing file zFilename. +** If successful, *pFd is set to the opened file descriptor and +** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM +** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined +** value. +** +** If SQLITE_OK is returned, the caller is responsible for closing +** the file descriptor *pFd using close(). +*/ +static int openDirectory(const char *zFilename, int *pFd){ + int ii; + int fd = -1; + char zDirname[MAX_PATHNAME+1]; + + sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); + for(ii=(int)strlen(zDirname); ii>1 && zDirname[ii]!='/'; ii--); + if( ii>0 ){ + zDirname[ii] = '\0'; + fd = open(zDirname, O_RDONLY|O_BINARY, 0); + if( fd>=0 ){ +#ifdef FD_CLOEXEC + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD, 0) | FD_CLOEXEC); +#endif + OSTRACE3("OPENDIR %-3d %s\n", fd, zDirname); } } - if (rc == SQLITE_OK) - pFile->locktype = locktype; - sqlite3OsLeaveMutex(); - return rc; + *pFd = fd; + return (fd>=0?SQLITE_OK:SQLITE_CANTOPEN); } /* - ** Close a file & cleanup AFP specific locking context - */ -static int afpUnixClose(OsFile **pId) { - unixFile *id = (unixFile*)*pId; +** Create a temporary file name in zBuf. zBuf must be allocated +** by the calling process and must be big enough to hold at least +** pVfs->mxPathname bytes. +*/ +static int getTempname(int nBuf, char *zBuf){ + static const char *azDirs[] = { + 0, + 0, + "/var/tmp", + "/usr/tmp", + "/tmp", + ".", + }; + static const unsigned char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + unsigned int i, j; + struct stat buf; + const char *zDir = "."; + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. + */ + SimulateIOError( return SQLITE_IOERR ); + + azDirs[0] = sqlite3_temp_directory; + if (NULL == azDirs[1]) { + azDirs[1] = getenv("TMPDIR"); + } - if( !id ) return SQLITE_OK; - afpUnixUnlock(*pId, NO_LOCK); - /* free the AFP locking structure */ - if (id->lockingContext != NULL) { - if (((afpLockingContext *)id->lockingContext)->filePath != NULL) - sqlite3ThreadSafeFree(((afpLockingContext*)id->lockingContext)->filePath); - sqlite3ThreadSafeFree(id->lockingContext); - } - - if( id->dirfd>=0 ) close(id->dirfd); - id->dirfd = -1; - close(id->h); - id->isOpen = 0; - OSTRACE2("CLOSE %-3d\n", id->h); - OpenCounter(-1); - sqlite3ThreadSafeFree(id); - *pId = 0; + for(i=0; i= (size_t)nBuf ){ + return SQLITE_ERROR; + } + + do{ + sqlite3_snprintf(nBuf-17, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX, zDir); + j = (int)strlen(zBuf); + sqlite3_randomness(15, &zBuf[j]); + for(i=0; i<15; i++, j++){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + }while( access(zBuf,0)==0 ); return SQLITE_OK; } +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) +/* +** Routine to transform a unixFile into a proxy-locking unixFile. +** Implementation in the proxy-lock division, but used by unixOpen() +** if SQLITE_PREFER_PROXY_LOCKING is defined. +*/ +static int proxyTransformUnixFile(unixFile*, const char*); +#endif -#pragma mark flock() style locking /* - ** The flockLockingContext is not used - */ -typedef void flockLockingContext; +** Open the file zPath. +** +** Previously, the SQLite OS layer used three functions in place of this +** one: +** +** sqlite3OsOpenReadWrite(); +** sqlite3OsOpenReadOnly(); +** sqlite3OsOpenExclusive(); +** +** These calls correspond to the following combinations of flags: +** +** ReadWrite() -> (READWRITE | CREATE) +** ReadOnly() -> (READONLY) +** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE) +** +** The old OpenExclusive() accepted a boolean argument - "delFlag". If +** true, the file was configured to be automatically deleted when the +** file handle closed. To achieve the same effect using this new +** interface, add the DELETEONCLOSE flag to those specified above for +** OpenExclusive(). +*/ +static int unixOpen( + sqlite3_vfs *pVfs, /* The VFS for which this is the xOpen method */ + const char *zPath, /* Pathname of file to be opened */ + sqlite3_file *pFile, /* The file descriptor to be filled in */ + int flags, /* Input flags to control the opening */ + int *pOutFlags /* Output flags returned to SQLite core */ +){ + int fd = -1; /* File descriptor returned by open() */ + int dirfd = -1; /* Directory file descriptor */ + int openFlags = 0; /* Flags to pass to open() */ + int eType = flags&0xFFFFFF00; /* Type of file to open */ + int noLock; /* True to omit locking primitives */ + int rc = SQLITE_OK; -static int flockUnixCheckReservedLock(OsFile *id) { - unixFile *pFile = (unixFile*)id; - - if (pFile->locktype == RESERVED_LOCK) { - return 1; /* already have a reserved lock */ - } else { - /* attempt to get the lock */ - int rc = flock(pFile->h, LOCK_EX | LOCK_NB); - if (!rc) { - /* got the lock, unlock it */ - flock(pFile->h, LOCK_UN); - return 0; /* no one has it reserved */ + int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); + int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); + int isCreate = (flags & SQLITE_OPEN_CREATE); + int isReadonly = (flags & SQLITE_OPEN_READONLY); + int isReadWrite = (flags & SQLITE_OPEN_READWRITE); + + /* If creating a master or main-file journal, this function will open + ** a file-descriptor on the directory too. The first time unixSync() + ** is called the directory file descriptor will be fsync()ed and close()d. + */ + int isOpenDirectory = (isCreate && + (eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL) + ); + + /* If argument zPath is a NULL pointer, this function is required to open + ** a temporary file. Use this buffer to store the file name in. + */ + char zTmpname[MAX_PATHNAME+1]; + const char *zName = zPath; + + /* Check the following statements are true: + ** + ** (a) Exactly one of the READWRITE and READONLY flags must be set, and + ** (b) if CREATE is set, then READWRITE must also be set, and + ** (c) if EXCLUSIVE is set, then CREATE must also be set. + ** (d) if DELETEONCLOSE is set, then CREATE must also be set. + */ + assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); + assert(isCreate==0 || isReadWrite); + assert(isExclusive==0 || isCreate); + assert(isDelete==0 || isCreate); + + /* The main DB, main journal, and master journal are never automatically + ** deleted + */ + assert( eType!=SQLITE_OPEN_MAIN_DB || !isDelete ); + assert( eType!=SQLITE_OPEN_MAIN_JOURNAL || !isDelete ); + assert( eType!=SQLITE_OPEN_MASTER_JOURNAL || !isDelete ); + + /* Assert that the upper layer has set one of the "file-type" flags. */ + assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB + || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL + || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_TRANSIENT_DB + ); + + memset(pFile, 0, sizeof(unixFile)); + + if( !zName ){ + assert(isDelete && !isOpenDirectory); + rc = getTempname(MAX_PATHNAME+1, zTmpname); + if( rc!=SQLITE_OK ){ + return rc; } - return 1; /* someone else might have it reserved */ + zName = zTmpname; } -} -static int flockUnixLock(OsFile *id, int locktype) { - unixFile *pFile = (unixFile*)id; - - /* if we already have a lock, it is exclusive. - ** Just adjust level and punt on outta here. */ - if (pFile->locktype > NO_LOCK) { - pFile->locktype = locktype; - return SQLITE_OK; + if( isReadonly ) openFlags |= O_RDONLY; + if( isReadWrite ) openFlags |= O_RDWR; + if( isCreate ) openFlags |= O_CREAT; + if( isExclusive ) openFlags |= (O_EXCL|O_NOFOLLOW); + openFlags |= (O_LARGEFILE|O_BINARY); + + fd = open(zName, openFlags, isDelete?0600:SQLITE_DEFAULT_FILE_PERMISSIONS); + OSTRACE4("OPENX %-3d %s 0%o\n", fd, zName, openFlags); + if( fd<0 && errno!=EISDIR && isReadWrite && !isExclusive ){ + /* Failed to open the file for read/write access. Try read-only. */ + flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); + flags |= SQLITE_OPEN_READONLY; + return unixOpen(pVfs, zPath, pFile, flags, pOutFlags); } - - /* grab an exclusive lock */ - int rc = flock(pFile->h, LOCK_EX | LOCK_NB); - if (rc) { - /* didn't get, must be busy */ - return SQLITE_BUSY; - } else { - /* got it, set the type and return ok */ - pFile->locktype = locktype; - return SQLITE_OK; + if( fd<0 ){ + return SQLITE_CANTOPEN; + } + if( isDelete ){ +#if OS_VXWORKS + zPath = zName; +#else + unlink(zName); +#endif + } +#if SQLITE_ENABLE_LOCKING_STYLE + else{ + ((unixFile*)pFile)->openFlags = openFlags; + } +#endif + if( pOutFlags ){ + *pOutFlags = flags; } -} -static int flockUnixUnlock(OsFile *id, int locktype) { - unixFile *pFile = (unixFile*)id; - - assert( locktype<=SHARED_LOCK ); - - /* no-op if possible */ - if( pFile->locktype==locktype ){ - return SQLITE_OK; +#ifndef NDEBUG + if( (flags & SQLITE_OPEN_MAIN_DB)!=0 ){ + ((unixFile*)pFile)->isLockable = 1; } - - /* shared can just be set because we always have an exclusive */ - if (locktype==SHARED_LOCK) { - pFile->locktype = locktype; - return SQLITE_OK; +#endif + + assert( fd>=0 ); + if( isOpenDirectory ){ + rc = openDirectory(zPath, &dirfd); + if( rc!=SQLITE_OK ){ + close(fd); /* silently leak if fail, already in error */ + return rc; + } + } + +#ifdef FD_CLOEXEC + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD, 0) | FD_CLOEXEC); +#endif + + noLock = eType!=SQLITE_OPEN_MAIN_DB; + +#if SQLITE_PREFER_PROXY_LOCKING + if( zPath!=NULL && !noLock ){ + char *envforce = getenv("SQLITE_FORCE_PROXY_LOCKING"); + int useProxy = 0; + + /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, + ** 0 means never use proxy, NULL means use proxy for non-local files only + */ + if( envforce!=NULL ){ + useProxy = atoi(envforce)>0; + }else{ + struct statfs fsInfo; + + if( statfs(zPath, &fsInfo) == -1 ){ + ((unixFile*)pFile)->lastErrno = errno; + if( dirfd>=0 ) close(dirfd); /* silently leak if fail, in error */ + close(fd); /* silently leak if fail, in error */ + return SQLITE_IOERR_ACCESS; + } + useProxy = !(fsInfo.f_flags&MNT_LOCAL); + } + if( useProxy ){ + rc = fillInUnixFile(pVfs, fd, dirfd, pFile, zPath, noLock, isDelete); + if( rc==SQLITE_OK ){ + rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:"); + } + return rc; + } } +#endif - /* no, really, unlock. */ - int rc = flock(pFile->h, LOCK_UN); - if (rc) - return SQLITE_IOERR_UNLOCK; - else { - pFile->locktype = NO_LOCK; - return SQLITE_OK; + return fillInUnixFile(pVfs, fd, dirfd, pFile, zPath, noLock, isDelete); +} + +/* +** Delete the file at zPath. If the dirSync argument is true, fsync() +** the directory after deleting the file. +*/ +static int unixDelete( + sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ + const char *zPath, /* Name of file to be deleted */ + int dirSync /* If true, fsync() directory after deleting file */ +){ + int rc = SQLITE_OK; + UNUSED_PARAMETER(NotUsed); + SimulateIOError(return SQLITE_IOERR_DELETE); + unlink(zPath); +#ifndef SQLITE_DISABLE_DIRSYNC + if( dirSync ){ + int fd; + rc = openDirectory(zPath, &fd); + if( rc==SQLITE_OK ){ +#if OS_VXWORKS + if( fsync(fd)==-1 ) +#else + if( fsync(fd) ) +#endif + { + rc = SQLITE_IOERR_DIR_FSYNC; + } + if( close(fd)&&!rc ){ + rc = SQLITE_IOERR_DIR_CLOSE; + } + } } +#endif + return rc; } /* - ** Close a file. - */ -static int flockUnixClose(OsFile **pId) { - unixFile *id = (unixFile*)*pId; - - if( !id ) return SQLITE_OK; - flockUnixUnlock(*pId, NO_LOCK); - - if( id->dirfd>=0 ) close(id->dirfd); - id->dirfd = -1; - sqlite3OsEnterMutex(); - - close(id->h); - sqlite3OsLeaveMutex(); - id->isOpen = 0; - OSTRACE2("CLOSE %-3d\n", id->h); - OpenCounter(-1); - sqlite3ThreadSafeFree(id); - *pId = 0; +** Test the existance of or access permissions of file zPath. The +** test performed depends on the value of flags: +** +** SQLITE_ACCESS_EXISTS: Return 1 if the file exists +** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable. +** SQLITE_ACCESS_READONLY: Return 1 if the file is readable. +** +** Otherwise return 0. +*/ +static int unixAccess( + sqlite3_vfs *NotUsed, /* The VFS containing this xAccess method */ + const char *zPath, /* Path of the file to examine */ + int flags, /* What do we want to learn about the zPath file? */ + int *pResOut /* Write result boolean here */ +){ + int amode = 0; + UNUSED_PARAMETER(NotUsed); + SimulateIOError( return SQLITE_IOERR_ACCESS; ); + switch( flags ){ + case SQLITE_ACCESS_EXISTS: + amode = F_OK; + break; + case SQLITE_ACCESS_READWRITE: + amode = W_OK|R_OK; + break; + case SQLITE_ACCESS_READ: + amode = R_OK; + break; + + default: + assert(!"Invalid flags argument"); + } + *pResOut = (access(zPath, amode)==0); return SQLITE_OK; } -#pragma mark Old-School .lock file based locking /* - ** The dotlockLockingContext structure contains all dotlock (.lock) lock - ** specific state - */ -typedef struct dotlockLockingContext dotlockLockingContext; -struct dotlockLockingContext { - char *lockPath; -}; +** Turn a relative pathname into a full pathname. The relative path +** is stored as a nul-terminated string in the buffer pointed to by +** zPath. +** +** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes +** (in this case, MAX_PATHNAME bytes). The full-path is written to +** this buffer before returning. +*/ +static int unixFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zPath, /* Possibly relative input path */ + int nOut, /* Size of output buffer in bytes */ + char *zOut /* Output buffer */ +){ + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. This function could fail if, for example, the + ** current working directory has been unlinked. + */ + SimulateIOError( return SQLITE_ERROR ); -static int dotlockUnixCheckReservedLock(OsFile *id) { - unixFile *pFile = (unixFile*)id; - dotlockLockingContext *context = - (dotlockLockingContext *) pFile->lockingContext; - - if (pFile->locktype == RESERVED_LOCK) { - return 1; /* already have a reserved lock */ - } else { - struct stat statBuf; - if (lstat(context->lockPath,&statBuf) == 0) - /* file exists, someone else has the lock */ - return 1; - else - /* file does not exist, we could have it if we want it */ - return 0; - } -} + assert( pVfs->mxPathname==MAX_PATHNAME ); + UNUSED_PARAMETER(pVfs); -static int dotlockUnixLock(OsFile *id, int locktype) { - unixFile *pFile = (unixFile*)id; - dotlockLockingContext *context = - (dotlockLockingContext *) pFile->lockingContext; - - /* if we already have a lock, it is exclusive. - ** Just adjust level and punt on outta here. */ - if (pFile->locktype > NO_LOCK) { - pFile->locktype = locktype; - - /* Always update the timestamp on the old file */ - utimes(context->lockPath,NULL); - return SQLITE_OK; - } - - /* check to see if lock file already exists */ - struct stat statBuf; - if (lstat(context->lockPath,&statBuf) == 0){ - return SQLITE_BUSY; /* it does, busy */ - } - - /* grab an exclusive lock */ - int fd = open(context->lockPath,O_RDONLY|O_CREAT|O_EXCL,0600); - if (fd < 0) { - /* failed to open/create the file, someone else may have stolen the lock */ - return SQLITE_BUSY; + zOut[nOut-1] = '\0'; + if( zPath[0]=='/' ){ + sqlite3_snprintf(nOut, zOut, "%s", zPath); + }else{ + int nCwd; + if( getcwd(zOut, nOut-1)==0 ){ + return SQLITE_CANTOPEN; + } + nCwd = (int)strlen(zOut); + sqlite3_snprintf(nOut-nCwd, &zOut[nCwd], "/%s", zPath); } - close(fd); - - /* got it, set the type and return ok */ - pFile->locktype = locktype; return SQLITE_OK; } -static int dotlockUnixUnlock(OsFile *id, int locktype) { - unixFile *pFile = (unixFile*)id; - dotlockLockingContext *context = - (dotlockLockingContext *) pFile->lockingContext; - - assert( locktype<=SHARED_LOCK ); - - /* no-op if possible */ - if( pFile->locktype==locktype ){ - return SQLITE_OK; - } - - /* shared can just be set because we always have an exclusive */ - if (locktype==SHARED_LOCK) { - pFile->locktype = locktype; - return SQLITE_OK; - } - - /* no, really, unlock. */ - unlink(context->lockPath); - pFile->locktype = NO_LOCK; - return SQLITE_OK; + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +#include +static void *unixDlOpen(sqlite3_vfs *NotUsed, const char *zFilename){ + UNUSED_PARAMETER(NotUsed); + return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); } /* - ** Close a file. - */ -static int dotlockUnixClose(OsFile **pId) { - unixFile *id = (unixFile*)*pId; - - if( !id ) return SQLITE_OK; - dotlockUnixUnlock(*pId, NO_LOCK); - /* free the dotlock locking structure */ - if (id->lockingContext != NULL) { - if (((dotlockLockingContext *)id->lockingContext)->lockPath != NULL) - sqlite3ThreadSafeFree( ( (dotlockLockingContext *) - id->lockingContext)->lockPath); - sqlite3ThreadSafeFree(id->lockingContext); - } - - if( id->dirfd>=0 ) close(id->dirfd); - id->dirfd = -1; - sqlite3OsEnterMutex(); - - close(id->h); - - sqlite3OsLeaveMutex(); - id->isOpen = 0; - OSTRACE2("CLOSE %-3d\n", id->h); - OpenCounter(-1); - sqlite3ThreadSafeFree(id); - *pId = 0; - return SQLITE_OK; +** SQLite calls this function immediately after a call to unixDlSym() or +** unixDlOpen() fails (returns a null pointer). If a more detailed error +** message is available, it is written to zBufOut. If no error message +** is available, zBufOut is left unmodified and SQLite uses a default +** error message. +*/ +static void unixDlError(sqlite3_vfs *NotUsed, int nBuf, char *zBufOut){ + char *zErr; + UNUSED_PARAMETER(NotUsed); + unixEnterMutex(); + zErr = dlerror(); + if( zErr ){ + sqlite3_snprintf(nBuf, zBufOut, "%s", zErr); + } + unixLeaveMutex(); +} +static void (*unixDlSym(sqlite3_vfs *NotUsed, void *p, const char*zSym))(void){ + /* + ** GCC with -pedantic-errors says that C90 does not allow a void* to be + ** cast into a pointer to a function. And yet the library dlsym() routine + ** returns a void* which is really a pointer to a function. So how do we + ** use dlsym() with -pedantic-errors? + ** + ** Variable x below is defined to be a pointer to a function taking + ** parameters void* and const char* and returning a pointer to a function. + ** We initialize x by assigning it a pointer to the dlsym() function. + ** (That assignment requires a cast.) Then we call the function that + ** x points to. + ** + ** This work-around is unlikely to work correctly on any system where + ** you really cannot cast a function pointer into void*. But then, on the + ** other hand, dlsym() will not work on such a system either, so we have + ** not really lost anything. + */ + void (*(*x)(void*,const char*))(void); + UNUSED_PARAMETER(NotUsed); + x = (void(*(*)(void*,const char*))(void))dlsym; + return (*x)(p, zSym); +} +static void unixDlClose(sqlite3_vfs *NotUsed, void *pHandle){ + UNUSED_PARAMETER(NotUsed); + dlclose(pHandle); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define unixDlOpen 0 + #define unixDlError 0 + #define unixDlSym 0 + #define unixDlClose 0 +#endif + +/* +** Write nBuf bytes of random data to the supplied buffer zBuf. +*/ +static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){ + UNUSED_PARAMETER(NotUsed); + assert((size_t)nBuf>=(sizeof(time_t)+sizeof(int))); + + /* We have to initialize zBuf to prevent valgrind from reporting + ** errors. The reports issued by valgrind are incorrect - we would + ** prefer that the randomness be increased by making use of the + ** uninitialized space in zBuf - but valgrind errors tend to worry + ** some users. Rather than argue, it seems easier just to initialize + ** the whole array and silence valgrind, even if that means less randomness + ** in the random seed. + ** + ** When testing, initializing zBuf[] to zero is all we do. That means + ** that we always use the same random number sequence. This makes the + ** tests repeatable. + */ + memset(zBuf, 0, nBuf); +#if !defined(SQLITE_TEST) + { + int pid, fd; + fd = open("/dev/urandom", O_RDONLY); + if( fd<0 ){ + time_t t; + time(&t); + memcpy(zBuf, &t, sizeof(t)); + pid = getpid(); + memcpy(&zBuf[sizeof(t)], &pid, sizeof(pid)); + assert( sizeof(t)+sizeof(pid)<=(size_t)nBuf ); + nBuf = sizeof(t) + sizeof(pid); + }else{ + nBuf = read(fd, zBuf, nBuf); + close(fd); + } + } +#endif + return nBuf; } -#pragma mark No locking +/* +** Sleep for a little while. Return the amount of time slept. +** The argument is the number of microseconds we want to sleep. +** The return value is the number of microseconds of sleep actually +** requested from the underlying operating system, a number which +** might be greater than or equal to the argument, but not less +** than the argument. +*/ +static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ +#if OS_VXWORKS + struct timespec sp; + + sp.tv_sec = microseconds / 1000000; + sp.tv_nsec = (microseconds % 1000000) * 1000; + nanosleep(&sp, NULL); + UNUSED_PARAMETER(NotUsed); + return microseconds; +#elif defined(HAVE_USLEEP) && HAVE_USLEEP + usleep(microseconds); + UNUSED_PARAMETER(NotUsed); + return microseconds; +#else + int seconds = (microseconds+999999)/1000000; + sleep(seconds); + UNUSED_PARAMETER(NotUsed); + return seconds*1000000; +#endif +} /* - ** The nolockLockingContext is void - */ -typedef void nolockLockingContext; +** The following variable, if set to a non-zero value, is interpreted as +** the number of seconds since 1970 and is used to set the result of +** sqlite3OsCurrentTime() during testing. +*/ +#ifdef SQLITE_TEST +int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){ +#if defined(SQLITE_OMIT_FLOATING_POINT) + time_t t; + time(&t); + *prNow = (((sqlite3_int64)t)/8640 + 24405875)/10; +#elif defined(NO_GETTOD) + time_t t; + time(&t); + *prNow = t/86400.0 + 2440587.5; +#elif OS_VXWORKS + struct timespec sNow; + clock_gettime(CLOCK_REALTIME, &sNow); + *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_nsec/86400000000000.0; +#else + struct timeval sNow; + gettimeofday(&sNow, 0); + *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_usec/86400000000.0; +#endif -static int nolockUnixCheckReservedLock(OsFile *id) { +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *prNow = sqlite3_current_time/86400.0 + 2440587.5; + } +#endif + UNUSED_PARAMETER(NotUsed); return 0; } -static int nolockUnixLock(OsFile *id, int locktype) { - return SQLITE_OK; +/* +** We added the xGetLastError() method with the intention of providing +** better low-level error messages when operating-system problems come up +** during SQLite operation. But so far, none of that has been implemented +** in the core. So this routine is never called. For now, it is merely +** a place-holder. +*/ +static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){ + UNUSED_PARAMETER(NotUsed); + UNUSED_PARAMETER(NotUsed2); + UNUSED_PARAMETER(NotUsed3); + return 0; } -static int nolockUnixUnlock(OsFile *id, int locktype) { - return SQLITE_OK; -} +/* +************************ End of sqlite3_vfs methods *************************** +******************************************************************************/ + +/****************************************************************************** +************************** Begin Proxy Locking ******************************** +** +** Proxy locking is a "uber-locking-method" in this sense: It uses the +** other locking methods on secondary lock files. Proxy locking is a +** meta-layer over top of the primitive locking implemented above. For +** this reason, the division that implements of proxy locking is deferred +** until late in the file (here) after all of the other I/O methods have +** been defined - so that the primitive locking methods are available +** as services to help with the implementation of proxy locking. +** +**** +** +** The default locking schemes in SQLite use byte-range locks on the +** database file to coordinate safe, concurrent access by multiple readers +** and writers [http://sqlite.org/lockingv3.html]. The five file locking +** states (UNLOCKED, PENDING, SHARED, RESERVED, EXCLUSIVE) are implemented +** as POSIX read & write locks over fixed set of locations (via fsctl), +** on AFP and SMB only exclusive byte-range locks are available via fsctl +** with _IOWR('z', 23, struct ByteRangeLockPB2) to track the same 5 states. +** To simulate a F_RDLCK on the shared range, on AFP a randomly selected +** address in the shared range is taken for a SHARED lock, the entire +** shared range is taken for an EXCLUSIVE lock): +** +** PENDING_BYTE 0x40000000 +** RESERVED_BYTE 0x40000001 +** SHARED_RANGE 0x40000002 -> 0x40000200 +** +** This works well on the local file system, but shows a nearly 100x +** slowdown in read performance on AFP because the AFP client disables +** the read cache when byte-range locks are present. Enabling the read +** cache exposes a cache coherency problem that is present on all OS X +** supported network file systems. NFS and AFP both observe the +** close-to-open semantics for ensuring cache coherency +** [http://nfs.sourceforge.net/#faq_a8], which does not effectively +** address the requirements for concurrent database access by multiple +** readers and writers +** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html]. +** +** To address the performance and cache coherency issues, proxy file locking +** changes the way database access is controlled by limiting access to a +** single host at a time and moving file locks off of the database file +** and onto a proxy file on the local file system. +** +** +** Using proxy locks +** ----------------- +** +** C APIs +** +** sqlite3_file_control(db, dbname, SQLITE_SET_LOCKPROXYFILE, +** | ":auto:"); +** sqlite3_file_control(db, dbname, SQLITE_GET_LOCKPROXYFILE, &); +** +** +** SQL pragmas +** +** PRAGMA [database.]lock_proxy_file= | :auto: +** PRAGMA [database.]lock_proxy_file +** +** Specifying ":auto:" means that if there is a conch file with a matching +** host ID in it, the proxy path in the conch file will be used, otherwise +** a proxy path based on the user's temp dir +** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the +** actual proxy file name is generated from the name and path of the +** database file. For example: +** +** For database path "/Users/me/foo.db" +** The lock path will be "/sqliteplocks/_Users_me_foo.db:auto:") +** +** Once a lock proxy is configured for a database connection, it can not +** be removed, however it may be switched to a different proxy path via +** the above APIs (assuming the conch file is not being held by another +** connection or process). +** +** +** How proxy locking works +** ----------------------- +** +** Proxy file locking relies primarily on two new supporting files: +** +** * conch file to limit access to the database file to a single host +** at a time +** +** * proxy file to act as a proxy for the advisory locks normally +** taken on the database +** +** The conch file - to use a proxy file, sqlite must first "hold the conch" +** by taking an sqlite-style shared lock on the conch file, reading the +** contents and comparing the host's unique host ID (see below) and lock +** proxy path against the values stored in the conch. The conch file is +** stored in the same directory as the database file and the file name +** is patterned after the database file name as ".-conch". +** If the conch file does not exist, or it's contents do not match the +** host ID and/or proxy path, then the lock is escalated to an exclusive +** lock and the conch file contents is updated with the host ID and proxy +** path and the lock is downgraded to a shared lock again. If the conch +** is held by another process (with a shared lock), the exclusive lock +** will fail and SQLITE_BUSY is returned. +** +** The proxy file - a single-byte file used for all advisory file locks +** normally taken on the database file. This allows for safe sharing +** of the database file for multiple readers and writers on the same +** host (the conch ensures that they all use the same local lock file). +** +** There is a third file - the host ID file - used as a persistent record +** of a unique identifier for the host, a 128-byte unique host id file +** in the path defined by the HOSTIDPATH macro (default value is +** /Library/Caches/.com.apple.sqliteConchHostId). +** +** Requesting the lock proxy does not immediately take the conch, it is +** only taken when the first request to lock database file is made. +** This matches the semantics of the traditional locking behavior, where +** opening a connection to a database file does not take a lock on it. +** The shared lock and an open file descriptor are maintained until +** the connection to the database is closed. +** +** The proxy file and the lock file are never deleted so they only need +** to be created the first time they are used. +** +** Configuration options +** --------------------- +** +** SQLITE_PREFER_PROXY_LOCKING +** +** Database files accessed on non-local file systems are +** automatically configured for proxy locking, lock files are +** named automatically using the same logic as +** PRAGMA lock_proxy_file=":auto:" +** +** SQLITE_PROXY_DEBUG +** +** Enables the logging of error messages during host id file +** retrieval and creation +** +** HOSTIDPATH +** +** Overrides the default host ID file path location +** +** LOCKPROXYDIR +** +** Overrides the default directory used for lock proxy files that +** are named automatically via the ":auto:" setting +** +** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS +** +** Permissions to use when creating a directory for storing the +** lock proxy files, only used when LOCKPROXYDIR is not set. +** +** +** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING, +** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will +** force proxy locking to be used for every database file opened, and 0 +** will force automatic proxy locking to be disabled for all database +** files (explicity calling the SQLITE_SET_LOCKPROXYFILE pragma or +** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING). +*/ /* - ** Close a file. - */ -static int nolockUnixClose(OsFile **pId) { - unixFile *id = (unixFile*)*pId; +** Proxy locking is only available on MacOSX +*/ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE + +#ifdef SQLITE_TEST +/* simulate multiple hosts by creating unique hostid file paths */ +int sqlite3_hostid_num = 0; +#endif + +/* +** The proxyLockingContext has the path and file structures for the remote +** and local proxy files in it +*/ +typedef struct proxyLockingContext proxyLockingContext; +struct proxyLockingContext { + unixFile *conchFile; /* Open conch file */ + char *conchFilePath; /* Name of the conch file */ + unixFile *lockProxy; /* Open proxy lock file */ + char *lockProxyPath; /* Name of the proxy lock file */ + char *dbPath; /* Name of the open file */ + int conchHeld; /* True if the conch is currently held */ + void *oldLockingContext; /* Original lockingcontext to restore on close */ + sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */ +}; + +/* HOSTIDLEN and CONCHLEN both include space for the string +** terminating nul +*/ +#define HOSTIDLEN 128 +#define CONCHLEN (MAXPATHLEN+HOSTIDLEN+1) +#ifndef HOSTIDPATH +# define HOSTIDPATH "/Library/Caches/.com.apple.sqliteConchHostId" +#endif + +/* basically a copy of unixRandomness with different +** test behavior built in */ +static int proxyGenerateHostID(char *pHostID){ + int pid, fd, len; + unsigned char *key = (unsigned char *)pHostID; + + memset(key, 0, HOSTIDLEN); + len = 0; + fd = open("/dev/urandom", O_RDONLY); + if( fd>=0 ){ + len = read(fd, key, HOSTIDLEN); + close(fd); /* silently leak the fd if it fails */ + } + if( len < HOSTIDLEN ){ + time_t t; + time(&t); + memcpy(key, &t, sizeof(t)); + pid = getpid(); + memcpy(&key[sizeof(t)], &pid, sizeof(pid)); + } - if( !id ) return SQLITE_OK; - if( id->dirfd>=0 ) close(id->dirfd); - id->dirfd = -1; - sqlite3OsEnterMutex(); - - close(id->h); - - sqlite3OsLeaveMutex(); - id->isOpen = 0; - OSTRACE2("CLOSE %-3d\n", id->h); - OpenCounter(-1); - sqlite3ThreadSafeFree(id); - *pId = 0; +#ifdef MAKE_PRETTY_HOSTID + { + int i; + /* filter the bytes into printable ascii characters and NUL terminate */ + key[(HOSTIDLEN-1)] = 0x00; + for( i=0; i<(HOSTIDLEN-1); i++ ){ + unsigned char pa = key[i]&0x7F; + if( pa<0x20 ){ + key[i] = (key[i]&0x80 == 0x80) ? pa+0x40 : pa+0x20; + }else if( pa==0x7F ){ + key[i] = (key[i]&0x80 == 0x80) ? pa=0x20 : pa+0x7E; + } + } + } +#endif return SQLITE_OK; } -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ +/* writes the host id path to path, path should be an pre-allocated buffer +** with enough space for a path +*/ +static void proxyGetHostIDPath(char *path, size_t len){ + strlcpy(path, HOSTIDPATH, len); +#ifdef SQLITE_TEST + if( sqlite3_hostid_num>0 ){ + char suffix[2] = "1"; + suffix[0] = suffix[0] + sqlite3_hostid_num; + strlcat(path, suffix, len); + } +#endif + OSTRACE3("GETHOSTIDPATH %s pid=%d\n", path, getpid()); +} -/* -** Turn a relative pathname into a full pathname. Return a pointer -** to the full pathname stored in space obtained from sqliteMalloc(). -** The calling function is responsible for freeing this space once it -** is no longer needed. -*/ -char *sqlite3UnixFullPathname(const char *zRelative){ - char *zFull = 0; - if( zRelative[0]=='/' ){ - sqlite3SetString(&zFull, zRelative, (char*)0); +/* get the host ID from a sqlite hostid file stored in the +** user-specific tmp directory, create the ID if it's not there already +*/ +static int proxyGetHostID(char *pHostID, int *pError){ + int fd; + char path[MAXPATHLEN]; + size_t len; + int rc=SQLITE_OK; + + proxyGetHostIDPath(path, MAXPATHLEN); + /* try to create the host ID file, if it already exists read the contents */ + fd = open(path, O_CREAT|O_WRONLY|O_EXCL, 0644); + if( fd<0 ){ + int err=errno; + + if( err!=EEXIST ){ +#ifdef SQLITE_PROXY_DEBUG /* set the sqlite error message instead */ + fprintf(stderr, "sqlite error creating host ID file %s: %s\n", + path, strerror(err)); +#endif + return SQLITE_PERM; + } + /* couldn't create the file, read it instead */ + fd = open(path, O_RDONLY|O_EXCL); + if( fd<0 ){ +#ifdef SQLITE_PROXY_DEBUG /* set the sqlite error message instead */ + int err = errno; + fprintf(stderr, "sqlite error opening host ID file %s: %s\n", + path, strerror(err)); +#endif + return SQLITE_PERM; + } + len = pread(fd, pHostID, HOSTIDLEN, 0); + if( len<0 ){ + *pError = errno; + rc = SQLITE_IOERR_READ; + }else if( len0 && zFull[j-1]!='/' ){ j--; } - i += 3; - continue; - } +static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ + int len; + int dbLen; + int i; + +#ifdef LOCKPROXYDIR + len = strlcpy(lPath, LOCKPROXYDIR, maxLen); +#else +# ifdef _CS_DARWIN_USER_TEMP_DIR + { + confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen); + len = strlcat(lPath, "sqliteplocks", maxLen); + if( mkdir(lPath, SQLITE_DEFAULT_PROXYDIR_PERMISSIONS) ){ + /* if mkdir fails, handle as lock file creation failure */ +# ifdef SQLITE_DEBUG + int err = errno; + if( err!=EEXIST ){ + fprintf(stderr, "proxyGetLockPath: mkdir(%s,0%o) error %d %s\n", lPath, + SQLITE_DEFAULT_PROXYDIR_PERMISSIONS, err, strerror(err)); } - zFull[j++] = zFull[i]; +# endif + }else{ + OSTRACE3("GETLOCKPATH mkdir %s pid=%d\n", lPath, getpid()); } - zFull[j] = 0; + } +# else + len = strlcpy(lPath, "/tmp/", maxLen); +# endif #endif - return zFull; + if( lPath[len-1]!='/' ){ + len = strlcat(lPath, "/", maxLen); + } + + /* transform the db path to a unique cache name */ + dbLen = (int)strlen(dbPath); + for( i=0; ifullSync = v; -} +static int proxyCreateUnixFile(const char *path, unixFile **ppFile) { + int fd; + int dirfd = -1; + unixFile *pNew; + int rc = SQLITE_OK; + sqlite3_vfs dummyVfs; -/* -** Return the underlying file handle for an OsFile -*/ -static int unixFileHandle(OsFile *id){ - return ((unixFile*)id)->h; -} + fd = open(path, O_RDWR | O_CREAT, SQLITE_DEFAULT_FILE_PERMISSIONS); + if( fd<0 ){ + return SQLITE_CANTOPEN; + } + + pNew = (unixFile *)sqlite3_malloc(sizeof(unixFile)); + if( pNew==NULL ){ + rc = SQLITE_NOMEM; + goto end_create_proxy; + } + memset(pNew, 0, sizeof(unixFile)); -/* -** Return an integer that indices the type of lock currently held -** by this handle. (Used for testing and analysis only.) -*/ -static int unixLockState(OsFile *id){ - return ((unixFile*)id)->locktype; + dummyVfs.pAppData = (void*)&autolockIoFinder; + rc = fillInUnixFile(&dummyVfs, fd, dirfd, (sqlite3_file*)pNew, path, 0, 0); + if( rc==SQLITE_OK ){ + *ppFile = pNew; + return SQLITE_OK; + } +end_create_proxy: + close(fd); /* silently leak fd if error, we're already in error */ + sqlite3_free(pNew); + return rc; } -/* -** Return the sector size in bytes of the underlying block device for -** the specified file. This is almost always 512 bytes, but may be -** larger for some devices. -** -** SQLite code assumes this function cannot fail. It also assumes that -** if two files are created in the same file-system directory (i.e. -** a database and it's journal file) that the sector size will be the -** same for both. +/* takes the conch by taking a shared lock and read the contents conch, if +** lockPath is non-NULL, the host ID and lock file path must match. A NULL +** lockPath means that the lockPath in the conch file will be used if the +** host IDs match, or a new lock path will be generated automatically +** and written to the conch file. */ -static int unixSectorSize(OsFile *id){ - return SQLITE_DEFAULT_SECTOR_SIZE; +static int proxyTakeConch(unixFile *pFile){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + + if( pCtx->conchHeld>0 ){ + return SQLITE_OK; + }else{ + unixFile *conchFile = pCtx->conchFile; + char testValue[CONCHLEN]; + char conchValue[CONCHLEN]; + char lockPath[MAXPATHLEN]; + char *tLockPath = NULL; + int rc = SQLITE_OK; + int readRc = SQLITE_OK; + int syncPerms = 0; + + OSTRACE4("TAKECONCH %d for %s pid=%d\n", conchFile->h, + (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), getpid()); + + rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK); + if( rc==SQLITE_OK ){ + int pError = 0; + memset(testValue, 0, CONCHLEN); /* conch is fixed size */ + rc = proxyGetHostID(testValue, &pError); + if( (rc&0xff)==SQLITE_IOERR ){ + pFile->lastErrno = pError; + } + if( pCtx->lockProxyPath ){ + strlcpy(&testValue[HOSTIDLEN], pCtx->lockProxyPath, MAXPATHLEN); + } + } + if( rc!=SQLITE_OK ){ + goto end_takeconch; + } + + readRc = unixRead((sqlite3_file *)conchFile, conchValue, CONCHLEN, 0); + if( readRc!=SQLITE_IOERR_SHORT_READ ){ + if( readRc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_IOERR ){ + pFile->lastErrno = conchFile->lastErrno; + } + rc = readRc; + goto end_takeconch; + } + /* if the conch has data compare the contents */ + if( !pCtx->lockProxyPath ){ + /* for auto-named local lock file, just check the host ID and we'll + ** use the local lock file path that's already in there */ + if( !memcmp(testValue, conchValue, HOSTIDLEN) ){ + tLockPath = (char *)&conchValue[HOSTIDLEN]; + goto end_takeconch; + } + }else{ + /* we've got the conch if conchValue matches our path and host ID */ + if( !memcmp(testValue, conchValue, CONCHLEN) ){ + goto end_takeconch; + } + } + }else{ + /* a short read means we're "creating" the conch (even though it could + ** have been user-intervention), if we acquire the exclusive lock, + ** we'll try to match the current on-disk permissions of the database + */ + syncPerms = 1; + } + + /* either conch was emtpy or didn't match */ + if( !pCtx->lockProxyPath ){ + proxyGetLockPath(pCtx->dbPath, lockPath, MAXPATHLEN); + tLockPath = lockPath; + strlcpy(&testValue[HOSTIDLEN], lockPath, MAXPATHLEN); + } + + /* update conch with host and path (this will fail if other process + ** has a shared lock already) */ + rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, EXCLUSIVE_LOCK); + if( rc==SQLITE_OK ){ + rc = unixWrite((sqlite3_file *)conchFile, testValue, CONCHLEN, 0); + if( rc==SQLITE_OK && syncPerms ){ + struct stat buf; + int err = fstat(pFile->h, &buf); + if( err==0 ){ + /* try to match the database file permissions, ignore failure */ +#ifndef SQLITE_PROXY_DEBUG + fchmod(conchFile->h, buf.st_mode); +#else + if( fchmod(conchFile->h, buf.st_mode)!=0 ){ + int code = errno; + fprintf(stderr, "fchmod %o FAILED with %d %s\n", + buf.st_mode, code, strerror(code)); + } else { + fprintf(stderr, "fchmod %o SUCCEDED\n",buf.st_mode); + } + }else{ + int code = errno; + fprintf(stderr, "STAT FAILED[%d] with %d %s\n", + err, code, strerror(code)); +#endif + } + } + } + conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK); + +end_takeconch: + OSTRACE2("TRANSPROXY: CLOSE %d\n", pFile->h); + if( rc==SQLITE_OK && pFile->openFlags ){ + if( pFile->h>=0 ){ +#ifdef STRICT_CLOSE_ERROR + if( close(pFile->h) ){ + pFile->lastErrno = errno; + return SQLITE_IOERR_CLOSE; + } +#else + close(pFile->h); /* silently leak fd if fail */ +#endif + } + pFile->h = -1; + int fd = open(pCtx->dbPath, pFile->openFlags, + SQLITE_DEFAULT_FILE_PERMISSIONS); + OSTRACE2("TRANSPROXY: OPEN %d\n", fd); + if( fd>=0 ){ + pFile->h = fd; + }else{ + rc=SQLITE_CANTOPEN; /* SQLITE_BUSY? proxyTakeConch called + during locking */ + } + } + if( rc==SQLITE_OK && !pCtx->lockProxy ){ + char *path = tLockPath ? tLockPath : pCtx->lockProxyPath; + /* ACS: Need to make a copy of path sometimes */ + rc = proxyCreateUnixFile(path, &pCtx->lockProxy); + } + if( rc==SQLITE_OK ){ + pCtx->conchHeld = 1; + + if( tLockPath ){ + pCtx->lockProxyPath = sqlite3DbStrDup(0, tLockPath); + if( pCtx->lockProxy->pMethod == &afpIoMethods ){ + ((afpLockingContext *)pCtx->lockProxy->lockingContext)->dbPath = + pCtx->lockProxyPath; + } + } + } else { + conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); + } + OSTRACE3("TAKECONCH %d %s\n", conchFile->h, rc==SQLITE_OK?"ok":"failed"); + return rc; + } } /* -** This vector defines all the methods that can operate on an OsFile -** for unix. +** If pFile holds a lock on a conch file, then release that lock. */ -static const IoMethod sqlite3UnixIoMethod = { - unixClose, - unixOpenDirectory, - unixRead, - unixWrite, - unixSeek, - unixTruncate, - unixSync, - unixSetFullSync, - unixFileHandle, - unixFileSize, - unixLock, - unixUnlock, - unixLockState, - unixCheckReservedLock, - unixSectorSize, -}; - -#ifdef SQLITE_ENABLE_LOCKING_STYLE -/* - ** This vector defines all the methods that can operate on an OsFile - ** for unix with AFP style file locking. - */ -static const IoMethod sqlite3AFPLockingUnixIoMethod = { - afpUnixClose, - unixOpenDirectory, - unixRead, - unixWrite, - unixSeek, - unixTruncate, - unixSync, - unixSetFullSync, - unixFileHandle, - unixFileSize, - afpUnixLock, - afpUnixUnlock, - unixLockState, - afpUnixCheckReservedLock, - unixSectorSize, -}; +static int proxyReleaseConch(unixFile *pFile){ + int rc; /* Subroutine return code */ + proxyLockingContext *pCtx; /* The locking context for the proxy lock */ + unixFile *conchFile; /* Name of the conch file */ + + pCtx = (proxyLockingContext *)pFile->lockingContext; + conchFile = pCtx->conchFile; + OSTRACE4("RELEASECONCH %d for %s pid=%d\n", conchFile->h, + (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), + getpid()); + pCtx->conchHeld = 0; + rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); + OSTRACE3("RELEASECONCH %d %s\n", conchFile->h, + (rc==SQLITE_OK ? "ok" : "failed")); + return rc; +} /* - ** This vector defines all the methods that can operate on an OsFile - ** for unix with flock() style file locking. - */ -static const IoMethod sqlite3FlockLockingUnixIoMethod = { - flockUnixClose, - unixOpenDirectory, - unixRead, - unixWrite, - unixSeek, - unixTruncate, - unixSync, - unixSetFullSync, - unixFileHandle, - unixFileSize, - flockUnixLock, - flockUnixUnlock, - unixLockState, - flockUnixCheckReservedLock, - unixSectorSize, -}; +** Given the name of a database file, compute the name of its conch file. +** Store the conch filename in memory obtained from sqlite3_malloc(). +** Make *pConchPath point to the new name. Return SQLITE_OK on success +** or SQLITE_NOMEM if unable to obtain memory. +** +** The caller is responsible for ensuring that the allocated memory +** space is eventually freed. +** +** *pConchPath is set to NULL if a memory allocation error occurs. +*/ +static int proxyCreateConchPathname(char *dbPath, char **pConchPath){ + int i; /* Loop counter */ + int len = (int)strlen(dbPath); /* Length of database filename - dbPath */ + char *conchPath; /* buffer in which to construct conch name */ + + /* Allocate space for the conch filename and initialize the name to + ** the name of the original database file. */ + *pConchPath = conchPath = (char *)sqlite3_malloc(len + 8); + if( conchPath==0 ){ + return SQLITE_NOMEM; + } + memcpy(conchPath, dbPath, len+1); + + /* now insert a "." before the last / character */ + for( i=(len-1); i>=0; i-- ){ + if( conchPath[i]=='/' ){ + i++; + break; + } + } + conchPath[i]='.'; + while ( ilockingContext; + char *oldPath = pCtx->lockProxyPath; + int rc = SQLITE_OK; - memset(&f, 0, sizeof(f)); - lockingStyle = sqlite3DetectLockingStyle(zFilename, h); - if ( lockingStyle == posixLockingStyle ) { - sqlite3OsEnterMutex(); - rc = findLockInfo(h, &f.pLock, &f.pOpen); - sqlite3OsLeaveMutex(); - if( rc ){ - close(h); - unlink(zFilename); - return SQLITE_NOMEM; + if( pFile->locktype!=NO_LOCK ){ + return SQLITE_BUSY; + } + + /* nothing to do if the path is NULL, :auto: or matches the existing path */ + if( !path || path[0]=='\0' || !strcmp(path, ":auto:") || + (oldPath && !strncmp(oldPath, path, MAXPATHLEN)) ){ + return SQLITE_OK; + }else{ + unixFile *lockProxy = pCtx->lockProxy; + pCtx->lockProxy=NULL; + pCtx->conchHeld = 0; + if( lockProxy!=NULL ){ + rc=lockProxy->pMethod->xClose((sqlite3_file *)lockProxy); + if( rc ) return rc; + sqlite3_free(lockProxy); } - } else { - /* pLock and pOpen are only used for posix advisory locking */ - f.pLock = NULL; - f.pOpen = NULL; - } - if( delFlag ){ - unlink(zFilename); + sqlite3_free(oldPath); + pCtx->lockProxyPath = sqlite3DbStrDup(0, path); } - f.dirfd = -1; - f.h = h; - SET_THREADID(&f); - pNew = sqlite3ThreadSafeMalloc( sizeof(unixFile) ); - if( pNew==0 ){ - close(h); - sqlite3OsEnterMutex(); - releaseLockInfo(f.pLock); - releaseOpenCnt(f.pOpen); - sqlite3OsLeaveMutex(); - *pId = 0; - return SQLITE_NOMEM; + + return rc; +} + +/* +** pFile is a file that has been opened by a prior xOpen call. dbPath +** is a string buffer at least MAXPATHLEN+1 characters in size. +** +** This routine find the filename associated with pFile and writes it +** int dbPath. +*/ +static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){ +#if defined(__APPLE__) + if( pFile->pMethod == &afpIoMethods ){ + /* afp style keeps a reference to the db path in the filePath field + ** of the struct */ + assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); + strcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath); + }else +#endif + if( pFile->pMethod == &dotlockIoMethods ){ + /* dot lock style uses the locking context to store the dot lock + ** file path */ + int len = strlen((char *)pFile->lockingContext) - strlen(DOTLOCK_SUFFIX); + memcpy(dbPath, (char *)pFile->lockingContext, len + 1); }else{ - *pNew = f; - switch(lockingStyle) { - case afpLockingStyle: { - /* afp locking uses the file path so it needs to be included in - ** the afpLockingContext */ - int nFilename; - pNew->pMethod = &sqlite3AFPLockingUnixIoMethod; - pNew->lockingContext = - sqlite3ThreadSafeMalloc(sizeof(afpLockingContext)); - nFilename = strlen(zFilename)+1; - ((afpLockingContext *)pNew->lockingContext)->filePath = - sqlite3ThreadSafeMalloc(nFilename); - memcpy(((afpLockingContext *)pNew->lockingContext)->filePath, - zFilename, nFilename); - srandomdev(); - break; - } - case flockLockingStyle: - /* flock locking doesn't need additional lockingContext information */ - pNew->pMethod = &sqlite3FlockLockingUnixIoMethod; - break; - case dotlockLockingStyle: { - /* dotlock locking uses the file path so it needs to be included in - ** the dotlockLockingContext */ - int nFilename; - pNew->pMethod = &sqlite3DotlockLockingUnixIoMethod; - pNew->lockingContext = sqlite3ThreadSafeMalloc( - sizeof(dotlockLockingContext)); - nFilename = strlen(zFilename) + 6; - ((dotlockLockingContext *)pNew->lockingContext)->lockPath = - sqlite3ThreadSafeMalloc( nFilename ); - sqlite3_snprintf(nFilename, - ((dotlockLockingContext *)pNew->lockingContext)->lockPath, - "%s.lock", zFilename); - break; - } - case posixLockingStyle: - /* posix locking doesn't need additional lockingContext information */ - pNew->pMethod = &sqlite3UnixIoMethod; - break; - case noLockingStyle: - case unsupportedLockingStyle: - default: - pNew->pMethod = &sqlite3NolockLockingUnixIoMethod; - } - *pId = (OsFile*)pNew; - OpenCounter(+1); - return SQLITE_OK; + /* all other styles use the locking context to store the db file path */ + assert( strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); + strcpy(dbPath, (char *)pFile->lockingContext); } + return SQLITE_OK; } -#else /* SQLITE_ENABLE_LOCKING_STYLE */ -static int allocateUnixFile( - int h, /* Open file descriptor on file being opened */ - OsFile **pId, /* Write the resul unixFile structure here */ - const char *zFilename, /* Name of the file being opened */ - int delFlag /* If true, delete the file on or before closing */ -){ - unixFile *pNew; - unixFile f; - int rc; -#ifdef FD_CLOEXEC - fcntl(h, F_SETFD, fcntl(h, F_GETFD, 0) | FD_CLOEXEC); -#endif - memset(&f, 0, sizeof(f)); - sqlite3OsEnterMutex(); - rc = findLockInfo(h, &f.pLock, &f.pOpen); - sqlite3OsLeaveMutex(); - if( delFlag ){ - unlink(zFilename); +/* +** Takes an already filled in unix file and alters it so all file locking +** will be performed on the local proxy lock file. The following fields +** are preserved in the locking context so that they can be restored and +** the unix structure properly cleaned up at close time: +** ->lockingContext +** ->pMethod +*/ +static int proxyTransformUnixFile(unixFile *pFile, const char *path) { + proxyLockingContext *pCtx; + char dbPath[MAXPATHLEN+1]; /* Name of the database file */ + char *lockPath=NULL; + int rc = SQLITE_OK; + + if( pFile->locktype!=NO_LOCK ){ + return SQLITE_BUSY; } - if( rc ){ - close(h); - return SQLITE_NOMEM; + proxyGetDbPathForUnixFile(pFile, dbPath); + if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ){ + lockPath=NULL; + }else{ + lockPath=(char *)path; } - OSTRACE3("OPEN %-3d %s\n", h, zFilename); - f.dirfd = -1; - f.h = h; - SET_THREADID(&f); - pNew = sqlite3ThreadSafeMalloc( sizeof(unixFile) ); - if( pNew==0 ){ - close(h); - sqlite3OsEnterMutex(); - releaseLockInfo(f.pLock); - releaseOpenCnt(f.pOpen); - sqlite3OsLeaveMutex(); - *pId = 0; + + OSTRACE4("TRANSPROXY %d for %s pid=%d\n", pFile->h, + (lockPath ? lockPath : ":auto:"), getpid()); + + pCtx = sqlite3_malloc( sizeof(*pCtx) ); + if( pCtx==0 ){ return SQLITE_NOMEM; + } + memset(pCtx, 0, sizeof(*pCtx)); + + rc = proxyCreateConchPathname(dbPath, &pCtx->conchFilePath); + if( rc==SQLITE_OK ){ + rc = proxyCreateUnixFile(pCtx->conchFilePath, &pCtx->conchFile); + } + if( rc==SQLITE_OK && lockPath ){ + pCtx->lockProxyPath = sqlite3DbStrDup(0, lockPath); + } + + if( rc==SQLITE_OK ){ + /* all memory is allocated, proxys are created and assigned, + ** switch the locking context and pMethod then return. + */ + pCtx->dbPath = sqlite3DbStrDup(0, dbPath); + pCtx->oldLockingContext = pFile->lockingContext; + pFile->lockingContext = pCtx; + pCtx->pOldMethod = pFile->pMethod; + pFile->pMethod = &proxyIoMethods; }else{ - *pNew = f; - pNew->pMethod = &sqlite3UnixIoMethod; - *pId = (OsFile*)pNew; - OpenCounter(+1); - return SQLITE_OK; + if( pCtx->conchFile ){ + rc = pCtx->conchFile->pMethod->xClose((sqlite3_file *)pCtx->conchFile); + if( rc ) return rc; + sqlite3_free(pCtx->conchFile); + } + sqlite3_free(pCtx->conchFilePath); + sqlite3_free(pCtx); } + OSTRACE3("TRANSPROXY %d %s\n", pFile->h, + (rc==SQLITE_OK ? "ok" : "failed")); + return rc; } -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ - -#endif /* SQLITE_OMIT_DISKIO */ -/*************************************************************************** -** Everything above deals with file I/O. Everything that follows deals -** with other miscellanous aspects of the operating system interface -****************************************************************************/ -#ifndef SQLITE_OMIT_LOAD_EXTENSION /* -** Interfaces for opening a shared library, finding entry points -** within the shared library, and closing the shared library. +** This routine handles sqlite3_file_control() calls that are specific +** to proxy locking. */ -#include -void *sqlite3UnixDlopen(const char *zFilename){ - return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); -} -void *sqlite3UnixDlsym(void *pHandle, const char *zSymbol){ - return dlsym(pHandle, zSymbol); -} -int sqlite3UnixDlclose(void *pHandle){ - return dlclose(pHandle); +static int proxyFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_GET_LOCKPROXYFILE: { + unixFile *pFile = (unixFile*)id; + if( pFile->pMethod == &proxyIoMethods ){ + proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext; + proxyTakeConch(pFile); + if( pCtx->lockProxyPath ){ + *(const char **)pArg = pCtx->lockProxyPath; + }else{ + *(const char **)pArg = ":auto: (not held)"; + } + } else { + *(const char **)pArg = NULL; + } + return SQLITE_OK; + } + case SQLITE_SET_LOCKPROXYFILE: { + unixFile *pFile = (unixFile*)id; + int rc = SQLITE_OK; + int isProxyStyle = (pFile->pMethod == &proxyIoMethods); + if( pArg==NULL || (const char *)pArg==0 ){ + if( isProxyStyle ){ + /* turn off proxy locking - not supported */ + rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/; + }else{ + /* turn off proxy locking - already off - NOOP */ + rc = SQLITE_OK; + } + }else{ + const char *proxyPath = (const char *)pArg; + if( isProxyStyle ){ + proxyLockingContext *pCtx = + (proxyLockingContext*)pFile->lockingContext; + if( !strcmp(pArg, ":auto:") + || (pCtx->lockProxyPath && + !strncmp(pCtx->lockProxyPath, proxyPath, MAXPATHLEN)) + ){ + rc = SQLITE_OK; + }else{ + rc = switchLockProxyPath(pFile, proxyPath); + } + }else{ + /* turn on proxy file locking */ + rc = proxyTransformUnixFile(pFile, proxyPath); + } + } + return rc; + } + default: { + assert( 0 ); /* The call assures that only valid opcodes are sent */ + } + } + /*NOTREACHED*/ + return SQLITE_ERROR; } -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ /* -** Get information to seed the random number generator. The seed -** is written into the buffer zBuf[256]. The calling function must -** supply a sufficiently large buffer. +** Within this division (the proxying locking implementation) the procedures +** above this point are all utilities. The lock-related methods of the +** proxy-locking sqlite3_io_method object follow. */ -int sqlite3UnixRandomSeed(char *zBuf){ - /* We have to initialize zBuf to prevent valgrind from reporting - ** errors. The reports issued by valgrind are incorrect - we would - ** prefer that the randomness be increased by making use of the - ** uninitialized space in zBuf - but valgrind errors tend to worry - ** some users. Rather than argue, it seems easier just to initialize - ** the whole array and silence valgrind, even if that means less randomness - ** in the random seed. - ** - ** When testing, initializing zBuf[] to zero is all we do. That means - ** that we always use the same random number sequence. This makes the - ** tests repeatable. - */ - memset(zBuf, 0, 256); -#if !defined(SQLITE_TEST) - { - int pid, fd; - fd = open("/dev/urandom", O_RDONLY); - if( fd<0 ){ - time_t t; - time(&t); - memcpy(zBuf, &t, sizeof(t)); - pid = getpid(); - memcpy(&zBuf[sizeof(time_t)], &pid, sizeof(pid)); - }else{ - read(fd, zBuf, 256); - close(fd); - } - } -#endif - return SQLITE_OK; -} + /* -** Sleep for a little while. Return the amount of time slept. -** The argument is the number of milliseconds we want to sleep. +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ -int sqlite3UnixSleep(int ms){ -#if defined(HAVE_USLEEP) && HAVE_USLEEP - usleep(ms*1000); - return ms; -#else - sleep((ms+999)/1000); - return 1000*((ms+999)/1000); -#endif +static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *proxy = pCtx->lockProxy; + return proxy->pMethod->xCheckReservedLock((sqlite3_file*)proxy, pResOut); + } + return rc; } /* -** Static variables used for thread synchronization. -** -** inMutex the nesting depth of the recursive mutex. The thread -** holding mutexMain can read this variable at any time. -** But is must hold mutexAux to change this variable. Other -** threads must hold mutexAux to read the variable and can -** never write. -** -** mutexOwner The thread id of the thread holding mutexMain. Same -** access rules as for inMutex. -** -** mutexOwnerValid True if the value in mutexOwner is valid. The same -** access rules apply as for inMutex. -** -** mutexMain The main mutex. Hold this mutex in order to get exclusive -** access to SQLite data structures. -** -** mutexAux An auxiliary mutex needed to access variables defined above. +** Lock the file with the lock specified by parameter locktype - one +** of the following: ** -** Mutexes are always acquired in this order: mutexMain mutexAux. It -** is not necessary to acquire mutexMain in order to get mutexAux - just -** do not attempt to acquire them in the reverse order: mutexAux mutexMain. -** Either get the mutexes with mutexMain first or get mutexAux only. +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK ** -** When running on a platform where the three variables inMutex, mutexOwner, -** and mutexOwnerValid can be set atomically, the mutexAux is not required. -** On many systems, all three are 32-bit integers and writing to a 32-bit -** integer is atomic. I think. But there are no guarantees. So it seems -** safer to protect them using mutexAux. -*/ -static int inMutex = 0; -#ifdef SQLITE_UNIX_THREADS -static pthread_t mutexOwner; /* Thread holding mutexMain */ -static int mutexOwnerValid = 0; /* True if mutexOwner is valid */ -static pthread_mutex_t mutexMain = PTHREAD_MUTEX_INITIALIZER; /* The mutex */ -static pthread_mutex_t mutexAux = PTHREAD_MUTEX_INITIALIZER; /* Aux mutex */ -#endif - -/* -** The following pair of routine implement mutual exclusion for -** multi-threaded processes. Only a single thread is allowed to -** executed code that is surrounded by EnterMutex() and LeaveMutex(). +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: ** -** SQLite uses only a single Mutex. There is not much critical -** code and what little there is executes quickly and without blocking. +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE ** -** As of version 3.3.2, this mutex must be recursive. +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. */ -void sqlite3UnixEnterMutex(){ -#ifdef SQLITE_UNIX_THREADS - pthread_mutex_lock(&mutexAux); - if( !mutexOwnerValid || !pthread_equal(mutexOwner, pthread_self()) ){ - pthread_mutex_unlock(&mutexAux); - pthread_mutex_lock(&mutexMain); - assert( inMutex==0 ); - assert( !mutexOwnerValid ); - pthread_mutex_lock(&mutexAux); - mutexOwner = pthread_self(); - mutexOwnerValid = 1; - } - inMutex++; - pthread_mutex_unlock(&mutexAux); -#else - inMutex++; -#endif -} -void sqlite3UnixLeaveMutex(){ - assert( inMutex>0 ); -#ifdef SQLITE_UNIX_THREADS - pthread_mutex_lock(&mutexAux); - inMutex--; - assert( pthread_equal(mutexOwner, pthread_self()) ); - if( inMutex==0 ){ - assert( mutexOwnerValid ); - mutexOwnerValid = 0; - pthread_mutex_unlock(&mutexMain); +static int proxyLock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *proxy = pCtx->lockProxy; + rc = proxy->pMethod->xLock((sqlite3_file*)proxy, locktype); + pFile->locktype = proxy->locktype; } - pthread_mutex_unlock(&mutexAux); -#else - inMutex--; -#endif + return rc; } + /* -** Return TRUE if the mutex is currently held. +** Lower the locking level on file descriptor pFile to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. ** -** If the thisThrd parameter is true, return true only if the -** calling thread holds the mutex. If the parameter is false, return -** true if any thread holds the mutex. +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. */ -int sqlite3UnixInMutex(int thisThrd){ -#ifdef SQLITE_UNIX_THREADS - int rc; - pthread_mutex_lock(&mutexAux); - rc = inMutex>0 && (thisThrd==0 || pthread_equal(mutexOwner,pthread_self())); - pthread_mutex_unlock(&mutexAux); +static int proxyUnlock(sqlite3_file *id, int locktype) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *proxy = pCtx->lockProxy; + rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, locktype); + pFile->locktype = proxy->locktype; + } return rc; -#else - return inMutex>0; -#endif } /* -** Remember the number of thread-specific-data blocks allocated. -** Use this to verify that we are not leaking thread-specific-data. -** Ticket #1601 +** Close a file that uses proxy locks. */ -#ifdef SQLITE_TEST -int sqlite3_tsd_count = 0; -# ifdef SQLITE_UNIX_THREADS - static pthread_mutex_t tsd_counter_mutex = PTHREAD_MUTEX_INITIALIZER; -# define TSD_COUNTER(N) \ - pthread_mutex_lock(&tsd_counter_mutex); \ - sqlite3_tsd_count += N; \ - pthread_mutex_unlock(&tsd_counter_mutex); -# else -# define TSD_COUNTER(N) sqlite3_tsd_count += N -# endif -#else -# define TSD_COUNTER(N) /* no-op */ -#endif - -/* -** If called with allocateFlag>0, then return a pointer to thread -** specific data for the current thread. Allocate and zero the -** thread-specific data if it does not already exist. -** -** If called with allocateFlag==0, then check the current thread -** specific data. Return it if it exists. If it does not exist, -** then return NULL. -** -** If called with allocateFlag<0, check to see if the thread specific -** data is allocated and is all zero. If it is then deallocate it. -** Return a pointer to the thread specific data or NULL if it is -** unallocated or gets deallocated. -*/ -ThreadData *sqlite3UnixThreadSpecificData(int allocateFlag){ - static const ThreadData zeroData = {0}; /* Initializer to silence warnings - ** from broken compilers */ -#ifdef SQLITE_UNIX_THREADS - static pthread_key_t key; - static int keyInit = 0; - ThreadData *pTsd; - - if( !keyInit ){ - sqlite3OsEnterMutex(); - if( !keyInit ){ - int rc; - rc = pthread_key_create(&key, 0); - if( rc ){ - sqlite3OsLeaveMutex(); - return 0; - } - keyInit = 1; - } - sqlite3OsLeaveMutex(); - } - - pTsd = pthread_getspecific(key); - if( allocateFlag>0 ){ - if( pTsd==0 ){ - if( !sqlite3TestMallocFail() ){ - pTsd = sqlite3OsMalloc(sizeof(zeroData)); - } -#ifdef SQLITE_MEMDEBUG - sqlite3_isFail = 0; -#endif - if( pTsd ){ - *pTsd = zeroData; - pthread_setspecific(key, pTsd); - TSD_COUNTER(+1); - } +static int proxyClose(sqlite3_file *id) { + if( id ){ + unixFile *pFile = (unixFile*)id; + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *lockProxy = pCtx->lockProxy; + unixFile *conchFile = pCtx->conchFile; + int rc = SQLITE_OK; + + if( lockProxy ){ + rc = lockProxy->pMethod->xUnlock((sqlite3_file*)lockProxy, NO_LOCK); + if( rc ) return rc; + rc = lockProxy->pMethod->xClose((sqlite3_file*)lockProxy); + if( rc ) return rc; + sqlite3_free(lockProxy); + pCtx->lockProxy = 0; } - }else if( pTsd!=0 && allocateFlag<0 - && memcmp(pTsd, &zeroData, sizeof(ThreadData))==0 ){ - sqlite3OsFree(pTsd); - pthread_setspecific(key, 0); - TSD_COUNTER(-1); - pTsd = 0; - } - return pTsd; -#else - static ThreadData *pTsd = 0; - if( allocateFlag>0 ){ - if( pTsd==0 ){ - if( !sqlite3TestMallocFail() ){ - pTsd = sqlite3OsMalloc( sizeof(zeroData) ); - } -#ifdef SQLITE_MEMDEBUG - sqlite3_isFail = 0; -#endif - if( pTsd ){ - *pTsd = zeroData; - TSD_COUNTER(+1); + if( conchFile ){ + if( pCtx->conchHeld ){ + rc = proxyReleaseConch(pFile); + if( rc ) return rc; } + rc = conchFile->pMethod->xClose((sqlite3_file*)conchFile); + if( rc ) return rc; + sqlite3_free(conchFile); } - }else if( pTsd!=0 && allocateFlag<0 - && memcmp(pTsd, &zeroData, sizeof(ThreadData))==0 ){ - sqlite3OsFree(pTsd); - TSD_COUNTER(-1); - pTsd = 0; + sqlite3_free(pCtx->lockProxyPath); + sqlite3_free(pCtx->conchFilePath); + sqlite3_free(pCtx->dbPath); + /* restore the original locking context and pMethod then close it */ + pFile->lockingContext = pCtx->oldLockingContext; + pFile->pMethod = pCtx->pOldMethod; + sqlite3_free(pCtx); + return pFile->pMethod->xClose(id); } - return pTsd; -#endif + return SQLITE_OK; } -/* -** The following variable, if set to a non-zero value, becomes the result -** returned from sqlite3OsCurrentTime(). This is used for testing. -*/ -#ifdef SQLITE_TEST -int sqlite3_current_time = 0; -#endif + +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ /* -** Find the current time (in Universal Coordinated Time). Write the -** current time and date as a Julian Day number into *prNow and -** return 0. Return 1 if the time and date cannot be found. -*/ -int sqlite3UnixCurrentTime(double *prNow){ -#ifdef NO_GETTOD - time_t t; - time(&t); - *prNow = t/86400.0 + 2440587.5; +** The proxy locking style is intended for use with AFP filesystems. +** And since AFP is only supported on MacOSX, the proxy locking is also +** restricted to MacOSX. +** +** +******************* End of the proxy lock implementation ********************** +******************************************************************************/ + +/* +** Initialize the operating system interface. +** +** This routine registers all VFS implementations for unix-like operating +** systems. This routine, and the sqlite3_os_end() routine that follows, +** should be the only routines in this file that are visible from other +** files. +** +** This routine is called once during SQLite initialization and by a +** single thread. The memory allocation and mutex subsystems have not +** necessarily been initialized when this routine is called, and so they +** should not be used. +*/ +int sqlite3_os_init(void){ + /* + ** The following macro defines an initializer for an sqlite3_vfs object. + ** The name of the VFS is NAME. The pAppData is a pointer to a pointer + ** to the "finder" function. (pAppData is a pointer to a pointer because + ** silly C90 rules prohibit a void* from being cast to a function pointer + ** and so we have to go through the intermediate pointer to avoid problems + ** when compiling with -pedantic-errors on GCC.) + ** + ** The FINDER parameter to this macro is the name of the pointer to the + ** finder-function. The finder-function returns a pointer to the + ** sqlite_io_methods object that implements the desired locking + ** behaviors. See the division above that contains the IOMETHODS + ** macro for addition information on finder-functions. + ** + ** Most finders simply return a pointer to a fixed sqlite3_io_methods + ** object. But the "autolockIoFinder" available on MacOSX does a little + ** more than that; it looks at the filesystem type that hosts the + ** database file and tries to choose an locking method appropriate for + ** that filesystem time. + */ + #define UNIXVFS(VFSNAME, FINDER) { \ + 1, /* iVersion */ \ + sizeof(unixFile), /* szOsFile */ \ + MAX_PATHNAME, /* mxPathname */ \ + 0, /* pNext */ \ + VFSNAME, /* zName */ \ + (void*)&FINDER, /* pAppData */ \ + unixOpen, /* xOpen */ \ + unixDelete, /* xDelete */ \ + unixAccess, /* xAccess */ \ + unixFullPathname, /* xFullPathname */ \ + unixDlOpen, /* xDlOpen */ \ + unixDlError, /* xDlError */ \ + unixDlSym, /* xDlSym */ \ + unixDlClose, /* xDlClose */ \ + unixRandomness, /* xRandomness */ \ + unixSleep, /* xSleep */ \ + unixCurrentTime, /* xCurrentTime */ \ + unixGetLastError /* xGetLastError */ \ + } + + /* + ** All default VFSes for unix are contained in the following array. + ** + ** Note that the sqlite3_vfs.pNext field of the VFS object is modified + ** by the SQLite core when the VFS is registered. So the following + ** array cannot be const. + */ + static sqlite3_vfs aVfs[] = { +#if SQLITE_ENABLE_LOCKING_STYLE && (OS_VXWORKS || defined(__APPLE__)) + UNIXVFS("unix", autolockIoFinder ), #else - struct timeval sNow; - gettimeofday(&sNow, 0); - *prNow = 2440587.5 + sNow.tv_sec/86400.0 + sNow.tv_usec/86400000000.0; + UNIXVFS("unix", posixIoFinder ), #endif -#ifdef SQLITE_TEST - if( sqlite3_current_time ){ - *prNow = sqlite3_current_time/86400.0 + 2440587.5; - } + UNIXVFS("unix-none", nolockIoFinder ), + UNIXVFS("unix-dotfile", dotlockIoFinder ), +#if OS_VXWORKS + UNIXVFS("unix-namedsem", semIoFinder ), +#endif +#if SQLITE_ENABLE_LOCKING_STYLE + UNIXVFS("unix-posix", posixIoFinder ), +#if !OS_VXWORKS + UNIXVFS("unix-flock", flockIoFinder ), +#endif +#endif +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + UNIXVFS("unix-afp", afpIoFinder ), + UNIXVFS("unix-proxy", proxyIoFinder ), #endif - return 0; + }; + unsigned int i; /* Loop counter */ + + /* Register all VFSes defined in the aVfs[] array */ + for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ + sqlite3_vfs_register(&aVfs[i], i==0); + } + return SQLITE_OK; } -#endif /* OS_UNIX */ +/* +** Shutdown the operating system interface. +** +** Some operating systems might need to do some cleanup in this routine, +** to release dynamically allocated objects. But not on unix. +** This routine is a no-op for unix. +*/ +int sqlite3_os_end(void){ + return SQLITE_OK; +} + +#endif /* SQLITE_OS_UNIX */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/os_win.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/os_win.c --- sqlite3-3.4.2/src/os_win.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/os_win.c 2009-06-25 12:24:38.000000000 +0100 @@ -11,10 +11,38 @@ ****************************************************************************** ** ** This file contains code that is specific to windows. +** +** $Id: os_win.c,v 1.156 2009/04/23 19:08:33 shane Exp $ */ #include "sqliteInt.h" -#include "os.h" -#if OS_WIN /* This file is used for windows only */ +#if SQLITE_OS_WIN /* This file is used for windows only */ + + +/* +** A Note About Memory Allocation: +** +** This driver uses malloc()/free() directly rather than going through +** the SQLite-wrappers sqlite3_malloc()/sqlite3_free(). Those wrappers +** are designed for use on embedded systems where memory is scarce and +** malloc failures happen frequently. Win32 does not typically run on +** embedded systems, and when it does the developers normally have bigger +** problems to worry about than running out of memory. So there is not +** a compelling need to use the wrappers. +** +** But there is a good reason to not use the wrappers. If we use the +** wrappers then we will get simulated malloc() failures within this +** driver. And that causes all kinds of problems for our tests. We +** could enhance SQLite to deal with simulated malloc failures within +** the OS driver, but the code to deal with those failure would not +** be exercised on Linux (which does not need to malloc() in the driver) +** and so we would have difficulty writing coverage tests for that +** code. Better to leave the code out, we think. +** +** The point of this discussion is as follows: When creating a new +** OS layer for an embedded system, if you use this file as an example, +** avoid the use of malloc()/free(). Those routines work ok on windows +** desktops but not so well in embedded systems. +*/ #include @@ -35,21 +63,26 @@ #include "os_common.h" /* +** Some microsoft compilers lack this definition. +*/ +#ifndef INVALID_FILE_ATTRIBUTES +# define INVALID_FILE_ATTRIBUTES ((DWORD)-1) +#endif + +/* ** Determine if we are dealing with WindowsCE - which has a much ** reduced API. */ -#if defined(_WIN32_WCE) -# define OS_WINCE 1 +#if SQLITE_OS_WINCE # define AreFileApisANSI() 1 -#else -# define OS_WINCE 0 +# define GetDiskFreeSpaceW() 0 #endif /* ** WinCE lacks native support for file locking so we have to fake it ** with some code of our own. */ -#if OS_WINCE +#if SQLITE_OS_WINCE typedef struct winceLock { int nReaders; /* Number of reader locks obtained */ BOOL bPending; /* Indicates a pending lock has been obtained */ @@ -59,16 +92,18 @@ #endif /* -** The winFile structure is a subclass of OsFile specific to the win32 +** The winFile structure is a subclass of sqlite3_file* specific to the win32 ** portability layer. */ typedef struct winFile winFile; struct winFile { - IoMethod const *pMethod;/* Must be first */ + const sqlite3_io_methods *pMethod;/* Must be first */ HANDLE h; /* Handle for accessing the file */ unsigned char locktype; /* Type of lock currently held on this file */ short sharedLockByte; /* Randomly chosen byte used as a shared lock */ -#if OS_WINCE + DWORD lastErrno; /* The Windows errno from the last I/O error */ + DWORD sectorSize; /* Sector size of the device file is on */ +#if SQLITE_OS_WINCE WCHAR *zDeleteOnClose; /* Name of file to delete when closing */ HANDLE hMutex; /* Mutex used to control access to shared lock */ HANDLE hShared; /* Shared memory segment used for locking */ @@ -77,13 +112,13 @@ #endif }; - /* -** Do not include any of the File I/O interface procedures if the -** SQLITE_OMIT_DISKIO macro is defined (indicating that there database -** will be in-memory only) +** Forward prototypes. */ -#ifndef SQLITE_OMIT_DISKIO +static int getSectorSize( + sqlite3_vfs *pVfs, + const char *zRelative /* UTF-8 file name */ +); /* ** The following variable is (normally) set once and never changes @@ -97,7 +132,11 @@ ** In order to facilitate testing on a WinNT system, the test fixture ** can manually set this value to 1 to emulate Win98 behavior. */ +#ifdef SQLITE_TEST int sqlite3_os_type = 0; +#else +static int sqlite3_os_type = 0; +#endif /* ** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, @@ -105,12 +144,12 @@ ** ** Here is an interesting observation: Win95, Win98, and WinME lack ** the LockFileEx() API. But we can still statically link against that -** API as long as we don't call it win running Win95/98/ME. A call to +** API as long as we don't call it when running Win95/98/ME. A call to ** this routine is used to determine if the host is Win95/98/ME or ** WinNT/2K/XP so that we will know whether or not we can safely call ** the LockFileEx() API. */ -#if OS_WINCE +#if SQLITE_OS_WINCE # define isNT() (1) #else static int isNT(void){ @@ -122,25 +161,25 @@ } return sqlite3_os_type==2; } -#endif /* OS_WINCE */ +#endif /* SQLITE_OS_WINCE */ /* ** Convert a UTF-8 string to microsoft unicode (UTF-16?). ** -** Space to hold the returned string is obtained from sqliteMalloc. +** Space to hold the returned string is obtained from malloc. */ static WCHAR *utf8ToUnicode(const char *zFilename){ int nChar; WCHAR *zWideFilename; nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); - zWideFilename = sqliteMalloc( nChar*sizeof(zWideFilename[0]) ); + zWideFilename = malloc( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ){ return 0; } nChar = MultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ){ - sqliteFree(zWideFilename); + free(zWideFilename); zWideFilename = 0; } return zWideFilename; @@ -148,21 +187,21 @@ /* ** Convert microsoft unicode to UTF-8. Space to hold the returned string is -** obtained from sqliteMalloc(). +** obtained from malloc(). */ static char *unicodeToUtf8(const WCHAR *zWideFilename){ int nByte; char *zFilename; nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); - zFilename = sqliteMalloc( nByte ); + zFilename = malloc( nByte ); if( zFilename==0 ){ return 0; } nByte = WideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ - sqliteFree(zFilename); + free(zFilename); zFilename = 0; } return zFilename; @@ -173,7 +212,7 @@ ** current codepage settings for file apis. ** ** Space to hold the returned string is obtained -** from sqliteMalloc. +** from malloc. */ static WCHAR *mbcsToUnicode(const char *zFilename){ int nByte; @@ -181,13 +220,13 @@ int codepage = AreFileApisANSI() ? CP_ACP : CP_OEMCP; nByte = MultiByteToWideChar(codepage, 0, zFilename, -1, NULL,0)*sizeof(WCHAR); - zMbcsFilename = sqliteMalloc( nByte*sizeof(zMbcsFilename[0]) ); + zMbcsFilename = malloc( nByte*sizeof(zMbcsFilename[0]) ); if( zMbcsFilename==0 ){ return 0; } nByte = MultiByteToWideChar(codepage, 0, zFilename, -1, zMbcsFilename, nByte); if( nByte==0 ){ - sqliteFree(zMbcsFilename); + free(zMbcsFilename); zMbcsFilename = 0; } return zMbcsFilename; @@ -198,7 +237,7 @@ ** user's Ansi codepage. ** ** Space to hold the returned string is obtained from -** sqliteMalloc(). +** malloc(). */ static char *unicodeToMbcs(const WCHAR *zWideFilename){ int nByte; @@ -206,14 +245,14 @@ int codepage = AreFileApisANSI() ? CP_ACP : CP_OEMCP; nByte = WideCharToMultiByte(codepage, 0, zWideFilename, -1, 0, 0, 0, 0); - zFilename = sqliteMalloc( nByte ); + zFilename = malloc( nByte ); if( zFilename==0 ){ return 0; } nByte = WideCharToMultiByte(codepage, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ - sqliteFree(zFilename); + free(zFilename); zFilename = 0; } return zFilename; @@ -221,9 +260,9 @@ /* ** Convert multibyte character string to UTF-8. Space to hold the -** returned string is obtained from sqliteMalloc(). +** returned string is obtained from malloc(). */ -static char *mbcsToUtf8(const char *zFilename){ +char *sqlite3_win32_mbcs_to_utf8(const char *zFilename){ char *zFilenameUtf8; WCHAR *zTmpWide; @@ -232,13 +271,13 @@ return 0; } zFilenameUtf8 = unicodeToUtf8(zTmpWide); - sqliteFree(zTmpWide); + free(zTmpWide); return zFilenameUtf8; } /* ** Convert UTF-8 to multibyte character string. Space to hold the -** returned string is obtained from sqliteMalloc(). +** returned string is obtained from malloc(). */ static char *utf8ToMbcs(const char *zFilename){ char *zFilenameMbcs; @@ -249,11 +288,11 @@ return 0; } zFilenameMbcs = unicodeToMbcs(zTmpWide); - sqliteFree(zTmpWide); + free(zTmpWide); return zFilenameMbcs; } -#if OS_WINCE +#if SQLITE_OS_WINCE /************************************************************************* ** This section contains code for WinCE only. */ @@ -267,7 +306,7 @@ static struct tm y; FILETIME uTm, lTm; SYSTEMTIME pTm; - i64 t64; + sqlite3_int64 t64; t64 = *t; t64 = (t64 + 11644473600)*10000000; uTm.dwLowDateTime = t64 & 0xFFFFFFFF; @@ -329,7 +368,8 @@ /* Create/open the named mutex */ pFile->hMutex = CreateMutexW(NULL, FALSE, zName); if (!pFile->hMutex){ - sqliteFree(zName); + pFile->lastErrno = GetLastError(); + free(zName); return FALSE; } @@ -351,7 +391,7 @@ bInit = FALSE; } - sqliteFree(zName); + free(zName); /* If we succeeded in making the shared memory handle, map it. */ if (pFile->hShared){ @@ -359,6 +399,7 @@ FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock)); /* If mapping failed, close the shared memory handle and erase it */ if (!pFile->shared){ + pFile->lastErrno = GetLastError(); CloseHandle(pFile->hShared); pFile->hShared = NULL; } @@ -408,12 +449,6 @@ UnmapViewOfFile(pFile->shared); CloseHandle(pFile->hShared); - if( pFile->zDeleteOnClose ){ - DeleteFileW(pFile->zDeleteOnClose); - sqliteFree(pFile->zDeleteOnClose); - pFile->zDeleteOnClose = 0; - } - /* Done with the mutex */ winceMutexRelease(pFile->hMutex); CloseHandle(pFile->hMutex); @@ -563,402 +598,12 @@ /* ** End of the special code for wince *****************************************************************************/ -#endif /* OS_WINCE */ - -/* -** Convert a UTF-8 filename into whatever form the underlying -** operating system wants filenames in. Space to hold the result -** is obtained from sqliteMalloc and must be freed by the calling -** function. -*/ -static void *convertUtf8Filename(const char *zFilename){ - void *zConverted = 0; - if( isNT() ){ - zConverted = utf8ToUnicode(zFilename); - }else{ - zConverted = utf8ToMbcs(zFilename); - } - /* caller will handle out of memory */ - return zConverted; -} - -/* -** Delete the named file. -** -** Note that windows does not allow a file to be deleted if some other -** process has it open. Sometimes a virus scanner or indexing program -** will open a journal file shortly after it is created in order to do -** whatever it is it does. While this other process is holding the -** file open, we will be unable to delete it. To work around this -** problem, we delay 100 milliseconds and try to delete again. Up -** to MX_DELETION_ATTEMPTs deletion attempts are run before giving -** up and returning an error. -*/ -#define MX_DELETION_ATTEMPTS 3 -int sqlite3WinDelete(const char *zFilename){ - int cnt = 0; - int rc; - void *zConverted = convertUtf8Filename(zFilename); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - SimulateIOError(return SQLITE_IOERR_DELETE); - if( isNT() ){ - do{ - rc = DeleteFileW(zConverted); - }while( rc==0 && GetFileAttributesW(zConverted)!=0xffffffff - && cnt++ < MX_DELETION_ATTEMPTS && (Sleep(100), 1) ); - }else{ -#if OS_WINCE - return SQLITE_NOMEM; -#else - do{ - rc = DeleteFileA(zConverted); - }while( rc==0 && GetFileAttributesA(zConverted)!=0xffffffff - && cnt++ < MX_DELETION_ATTEMPTS && (Sleep(100), 1) ); -#endif - } - sqliteFree(zConverted); - OSTRACE2("DELETE \"%s\"\n", zFilename); - return rc!=0 ? SQLITE_OK : SQLITE_IOERR; -} - -/* -** Return TRUE if the named file exists. -*/ -int sqlite3WinFileExists(const char *zFilename){ - int exists = 0; - void *zConverted = convertUtf8Filename(zFilename); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - if( isNT() ){ - exists = GetFileAttributesW((WCHAR*)zConverted) != 0xffffffff; - }else{ -#if OS_WINCE - return SQLITE_NOMEM; -#else - exists = GetFileAttributesA((char*)zConverted) != 0xffffffff; -#endif - } - sqliteFree(zConverted); - return exists; -} - -/* Forward declaration */ -static int allocateWinFile(winFile *pInit, OsFile **pId); - -/* -** Attempt to open a file for both reading and writing. If that -** fails, try opening it read-only. If the file does not exist, -** try to create it. -** -** On success, a handle for the open file is written to *id -** and *pReadonly is set to 0 if the file was opened for reading and -** writing or 1 if the file was opened read-only. The function returns -** SQLITE_OK. -** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id and *pReadonly unchanged. -*/ -int sqlite3WinOpenReadWrite( - const char *zFilename, - OsFile **pId, - int *pReadonly -){ - winFile f; - HANDLE h; - void *zConverted = convertUtf8Filename(zFilename); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - assert( *pId==0 ); - - if( isNT() ){ - h = CreateFileW((WCHAR*)zConverted, - GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, - NULL, - OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); - if( h==INVALID_HANDLE_VALUE ){ - h = CreateFileW((WCHAR*)zConverted, - GENERIC_READ, - FILE_SHARE_READ | FILE_SHARE_WRITE, - NULL, - OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); - if( h==INVALID_HANDLE_VALUE ){ - sqliteFree(zConverted); - return SQLITE_CANTOPEN; - } - *pReadonly = 1; - }else{ - *pReadonly = 0; - } -#if OS_WINCE - if (!winceCreateLock(zFilename, &f)){ - CloseHandle(h); - sqliteFree(zConverted); - return SQLITE_CANTOPEN; - } -#endif - }else{ -#if OS_WINCE - return SQLITE_NOMEM; -#else - h = CreateFileA((char*)zConverted, - GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, - NULL, - OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); - if( h==INVALID_HANDLE_VALUE ){ - h = CreateFileA((char*)zConverted, - GENERIC_READ, - FILE_SHARE_READ | FILE_SHARE_WRITE, - NULL, - OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); - if( h==INVALID_HANDLE_VALUE ){ - sqliteFree(zConverted); - return SQLITE_CANTOPEN; - } - *pReadonly = 1; - }else{ - *pReadonly = 0; - } -#endif /* OS_WINCE */ - } - - sqliteFree(zConverted); - - f.h = h; -#if OS_WINCE - f.zDeleteOnClose = 0; -#endif - OSTRACE3("OPEN R/W %d \"%s\"\n", h, zFilename); - return allocateWinFile(&f, pId); -} - - -/* -** Attempt to open a new file for exclusive access by this process. -** The file will be opened for both reading and writing. To avoid -** a potential security problem, we do not allow the file to have -** previously existed. Nor do we allow the file to be a symbolic -** link. -** -** If delFlag is true, then make arrangements to automatically delete -** the file when it is closed. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -** -** Sometimes if we have just deleted a prior journal file, windows -** will fail to open a new one because there is a "pending delete". -** To work around this bug, we pause for 100 milliseconds and attempt -** a second open after the first one fails. The whole operation only -** fails if both open attempts are unsuccessful. -*/ -int sqlite3WinOpenExclusive(const char *zFilename, OsFile **pId, int delFlag){ - winFile f; - HANDLE h; - DWORD fileflags; - void *zConverted = convertUtf8Filename(zFilename); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - assert( *pId == 0 ); - fileflags = FILE_FLAG_RANDOM_ACCESS; -#if !OS_WINCE - if( delFlag ){ - fileflags |= FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE; - } -#endif - if( isNT() ){ - int cnt = 0; - do{ - h = CreateFileW((WCHAR*)zConverted, - GENERIC_READ | GENERIC_WRITE, - 0, - NULL, - CREATE_ALWAYS, - fileflags, - NULL - ); - }while( h==INVALID_HANDLE_VALUE && cnt++ < 2 && (Sleep(100), 1) ); - }else{ -#if OS_WINCE - return SQLITE_NOMEM; -#else - int cnt = 0; - do{ - h = CreateFileA((char*)zConverted, - GENERIC_READ | GENERIC_WRITE, - 0, - NULL, - CREATE_ALWAYS, - fileflags, - NULL - ); - }while( h==INVALID_HANDLE_VALUE && cnt++ < 2 && (Sleep(100), 1) ); -#endif /* OS_WINCE */ - } -#if OS_WINCE - if( delFlag && h!=INVALID_HANDLE_VALUE ){ - f.zDeleteOnClose = zConverted; - zConverted = 0; - } - f.hMutex = NULL; -#endif - sqliteFree(zConverted); - if( h==INVALID_HANDLE_VALUE ){ - return SQLITE_CANTOPEN; - } - f.h = h; - OSTRACE3("OPEN EX %d \"%s\"\n", h, zFilename); - return allocateWinFile(&f, pId); -} - -/* -** Attempt to open a new file for read-only access. -** -** On success, write the file handle into *id and return SQLITE_OK. -** -** On failure, return SQLITE_CANTOPEN. -*/ -int sqlite3WinOpenReadOnly(const char *zFilename, OsFile **pId){ - winFile f; - HANDLE h; - void *zConverted = convertUtf8Filename(zFilename); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - assert( *pId==0 ); - if( isNT() ){ - h = CreateFileW((WCHAR*)zConverted, - GENERIC_READ, - 0, - NULL, - OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); - }else{ -#if OS_WINCE - return SQLITE_NOMEM; -#else - h = CreateFileA((char*)zConverted, - GENERIC_READ, - 0, - NULL, - OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS, - NULL - ); -#endif - } - sqliteFree(zConverted); - if( h==INVALID_HANDLE_VALUE ){ - return SQLITE_CANTOPEN; - } - f.h = h; -#if OS_WINCE - f.zDeleteOnClose = 0; - f.hMutex = NULL; -#endif - OSTRACE3("OPEN RO %d \"%s\"\n", h, zFilename); - return allocateWinFile(&f, pId); -} - -/* -** Attempt to open a file descriptor for the directory that contains a -** file. This file descriptor can be used to fsync() the directory -** in order to make sure the creation of a new file is actually written -** to disk. -** -** This routine is only meaningful for Unix. It is a no-op under -** windows since windows does not support hard links. -** -** On success, a handle for a previously open file is at *id is -** updated with the new directory file descriptor and SQLITE_OK is -** returned. -** -** On failure, the function returns SQLITE_CANTOPEN and leaves -** *id unchanged. -*/ -static int winOpenDirectory( - OsFile *id, - const char *zDirname -){ - return SQLITE_OK; -} +#endif /* SQLITE_OS_WINCE */ -/* -** Create a temporary file name in zBuf. zBuf must be big enough to -** hold at least SQLITE_TEMPNAME_SIZE characters. -*/ -int sqlite3WinTempFileName(char *zBuf){ - static char zChars[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789"; - int i, j; - char zTempPath[SQLITE_TEMPNAME_SIZE]; - if( sqlite3_temp_directory ){ - strncpy(zTempPath, sqlite3_temp_directory, SQLITE_TEMPNAME_SIZE-30); - zTempPath[SQLITE_TEMPNAME_SIZE-30] = 0; - }else if( isNT() ){ - char *zMulti; - WCHAR zWidePath[SQLITE_TEMPNAME_SIZE]; - GetTempPathW(SQLITE_TEMPNAME_SIZE-30, zWidePath); - zMulti = unicodeToUtf8(zWidePath); - if( zMulti ){ - strncpy(zTempPath, zMulti, SQLITE_TEMPNAME_SIZE-30); - zTempPath[SQLITE_TEMPNAME_SIZE-30] = 0; - sqliteFree(zMulti); - }else{ - return SQLITE_NOMEM; - } - }else{ - char *zUtf8; - char zMbcsPath[SQLITE_TEMPNAME_SIZE]; - GetTempPathA(SQLITE_TEMPNAME_SIZE-30, zMbcsPath); - zUtf8 = mbcsToUtf8(zMbcsPath); - if( zUtf8 ){ - strncpy(zTempPath, zUtf8, SQLITE_TEMPNAME_SIZE-30); - zTempPath[SQLITE_TEMPNAME_SIZE-30] = 0; - sqliteFree(zUtf8); - }else{ - return SQLITE_NOMEM; - } - } - for(i=strlen(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){} - zTempPath[i] = 0; - for(;;){ - sqlite3_snprintf(SQLITE_TEMPNAME_SIZE, zBuf, - "%s\\"TEMP_FILE_PREFIX, zTempPath); - j = strlen(zBuf); - sqlite3Randomness(15, &zBuf[j]); - for(i=0; i<15; i++, j++){ - zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; - } - zBuf[j] = 0; - if( !sqlite3OsFileExists(zBuf) ) break; - } - OSTRACE2("TEMP FILENAME: %s\n", zBuf); - return SQLITE_OK; -} +/***************************************************************************** +** The next group of routines implement the I/O methods specified +** by the sqlite3_io_methods object. +******************************************************************************/ /* ** Close a file. @@ -971,41 +616,75 @@ ** giving up and returning an error. */ #define MX_CLOSE_ATTEMPT 3 -static int winClose(OsFile **pId){ - winFile *pFile; - int rc = 1; - if( pId && (pFile = (winFile*)*pId)!=0 ){ - int rc, cnt = 0; - OSTRACE2("CLOSE %d\n", pFile->h); - do{ - rc = CloseHandle(pFile->h); - }while( rc==0 && cnt++ < MX_CLOSE_ATTEMPT && (Sleep(100), 1) ); -#if OS_WINCE - winceDestroyLock(pFile); -#endif - OpenCounter(-1); - sqliteFree(pFile); - *pId = 0; +static int winClose(sqlite3_file *id){ + int rc, cnt = 0; + winFile *pFile = (winFile*)id; + + assert( id!=0 ); + OSTRACE2("CLOSE %d\n", pFile->h); + do{ + rc = CloseHandle(pFile->h); + }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (Sleep(100), 1) ); +#if SQLITE_OS_WINCE +#define WINCE_DELETION_ATTEMPTS 3 + winceDestroyLock(pFile); + if( pFile->zDeleteOnClose ){ + int cnt = 0; + while( + DeleteFileW(pFile->zDeleteOnClose)==0 + && GetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff + && cnt++ < WINCE_DELETION_ATTEMPTS + ){ + Sleep(100); /* Wait a little before trying again */ + } + free(pFile->zDeleteOnClose); } +#endif + OpenCounter(-1); return rc ? SQLITE_OK : SQLITE_IOERR; } /* +** Some microsoft compilers lack this definition. +*/ +#ifndef INVALID_SET_FILE_POINTER +# define INVALID_SET_FILE_POINTER ((DWORD)-1) +#endif + +/* ** Read data from a file into a buffer. Return SQLITE_OK if all ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ -static int winRead(OsFile *id, void *pBuf, int amt){ +static int winRead( + sqlite3_file *id, /* File to read from */ + void *pBuf, /* Write content into this buffer */ + int amt, /* Number of bytes to read */ + sqlite3_int64 offset /* Begin reading at this offset */ +){ + LONG upperBits = (LONG)((offset>>32) & 0x7fffffff); + LONG lowerBits = (LONG)(offset & 0xffffffff); + DWORD rc; + winFile *pFile = (winFile*)id; + DWORD error; DWORD got; + assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_READ); - OSTRACE3("READ %d lock=%d\n", ((winFile*)id)->h, ((winFile*)id)->locktype); - if( !ReadFile(((winFile*)id)->h, pBuf, amt, &got, 0) ){ + OSTRACE3("READ %d lock=%d\n", pFile->h, pFile->locktype); + rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ + pFile->lastErrno = error; + return SQLITE_FULL; + } + if( !ReadFile(pFile->h, pBuf, amt, &got, 0) ){ + pFile->lastErrno = GetLastError(); return SQLITE_IOERR_READ; } if( got==(DWORD)amt ){ return SQLITE_OK; }else{ + /* Unread parts of the buffer must be zero-filled */ memset(&((char*)pBuf)[got], 0, amt-got); return SQLITE_IOERR_SHORT_READ; } @@ -1015,95 +694,133 @@ ** Write data from a buffer into a file. Return SQLITE_OK on success ** or some other error code on failure. */ -static int winWrite(OsFile *id, const void *pBuf, int amt){ - int rc = 0; - DWORD wrote; +static int winWrite( + sqlite3_file *id, /* File to write into */ + const void *pBuf, /* The bytes to be written */ + int amt, /* Number of bytes to write */ + sqlite3_int64 offset /* Offset into the file to begin writing at */ +){ + LONG upperBits = (LONG)((offset>>32) & 0x7fffffff); + LONG lowerBits = (LONG)(offset & 0xffffffff); + DWORD rc; + winFile *pFile = (winFile*)id; + DWORD error; + DWORD wrote = 0; + assert( id!=0 ); - SimulateIOError(return SQLITE_IOERR_READ); + SimulateIOError(return SQLITE_IOERR_WRITE); SimulateDiskfullError(return SQLITE_FULL); - OSTRACE3("WRITE %d lock=%d\n", ((winFile*)id)->h, ((winFile*)id)->locktype); + OSTRACE3("WRITE %d lock=%d\n", pFile->h, pFile->locktype); + rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ + pFile->lastErrno = error; + return SQLITE_FULL; + } assert( amt>0 ); - while( amt>0 && (rc = WriteFile(((winFile*)id)->h, pBuf, amt, &wrote, 0))!=0 - && wrote>0 ){ + while( + amt>0 + && (rc = WriteFile(pFile->h, pBuf, amt, &wrote, 0))!=0 + && wrote>0 + ){ amt -= wrote; pBuf = &((char*)pBuf)[wrote]; } if( !rc || amt>(int)wrote ){ + pFile->lastErrno = GetLastError(); return SQLITE_FULL; } return SQLITE_OK; } /* -** Some microsoft compilers lack this definition. -*/ -#ifndef INVALID_SET_FILE_POINTER -# define INVALID_SET_FILE_POINTER ((DWORD)-1) -#endif - -/* -** Move the read/write pointer in a file. +** Truncate an open file to a specified size */ -static int winSeek(OsFile *id, i64 offset){ - LONG upperBits = offset>>32; - LONG lowerBits = offset & 0xffffffff; +static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ + LONG upperBits = (LONG)((nByte>>32) & 0x7fffffff); + LONG lowerBits = (LONG)(nByte & 0xffffffff); DWORD rc; + winFile *pFile = (winFile*)id; + DWORD error; + assert( id!=0 ); -#ifdef SQLITE_TEST - if( offset ) SimulateDiskfullError(return SQLITE_FULL); -#endif - rc = SetFilePointer(((winFile*)id)->h, lowerBits, &upperBits, FILE_BEGIN); - OSTRACE3("SEEK %d %lld\n", ((winFile*)id)->h, offset); - if( rc==INVALID_SET_FILE_POINTER && GetLastError()!=NO_ERROR ){ - return SQLITE_FULL; + OSTRACE3("TRUNCATE %d %lld\n", pFile->h, nByte); + SimulateIOError(return SQLITE_IOERR_TRUNCATE); + rc = SetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + if( rc==INVALID_SET_FILE_POINTER && (error=GetLastError())!=NO_ERROR ){ + pFile->lastErrno = error; + return SQLITE_IOERR_TRUNCATE; + } + /* SetEndOfFile will fail if nByte is negative */ + if( !SetEndOfFile(pFile->h) ){ + pFile->lastErrno = GetLastError(); + return SQLITE_IOERR_TRUNCATE; } return SQLITE_OK; } +#ifdef SQLITE_TEST /* -** Make sure all writes to a particular file are committed to disk. +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occuring at the right times. */ -static int winSync(OsFile *id, int dataOnly){ - assert( id!=0 ); - OSTRACE3("SYNC %d lock=%d\n", ((winFile*)id)->h, ((winFile*)id)->locktype); - if( FlushFileBuffers(((winFile*)id)->h) ){ - return SQLITE_OK; - }else{ - return SQLITE_IOERR; - } -} +int sqlite3_sync_count = 0; +int sqlite3_fullsync_count = 0; +#endif /* -** Sync the directory zDirname. This is a no-op on operating systems other -** than UNIX. +** Make sure all writes to a particular file are committed to disk. */ -int sqlite3WinSyncDirectory(const char *zDirname){ - SimulateIOError(return SQLITE_IOERR_READ); - return SQLITE_OK; -} +static int winSync(sqlite3_file *id, int flags){ +#ifndef SQLITE_NO_SYNC + winFile *pFile = (winFile*)id; -/* -** Truncate an open file to a specified size -*/ -static int winTruncate(OsFile *id, i64 nByte){ - LONG upperBits = nByte>>32; assert( id!=0 ); - OSTRACE3("TRUNCATE %d %lld\n", ((winFile*)id)->h, nByte); - SimulateIOError(return SQLITE_IOERR_TRUNCATE); - SetFilePointer(((winFile*)id)->h, nByte, &upperBits, FILE_BEGIN); - SetEndOfFile(((winFile*)id)->h); - return SQLITE_OK; + OSTRACE3("SYNC %d lock=%d\n", pFile->h, pFile->locktype); +#else + UNUSED_PARAMETER(id); +#endif +#ifndef SQLITE_TEST + UNUSED_PARAMETER(flags); +#else + if( flags & SQLITE_SYNC_FULL ){ + sqlite3_fullsync_count++; + } + sqlite3_sync_count++; +#endif + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op + */ +#ifdef SQLITE_NO_SYNC + return SQLITE_OK; +#else + if( FlushFileBuffers(pFile->h) ){ + return SQLITE_OK; + }else{ + pFile->lastErrno = GetLastError(); + return SQLITE_IOERR; + } +#endif } /* ** Determine the current size of a file in bytes */ -static int winFileSize(OsFile *id, i64 *pSize){ - DWORD upperBits, lowerBits; +static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ + DWORD upperBits; + DWORD lowerBits; + winFile *pFile = (winFile*)id; + DWORD error; + assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_FSTAT); - lowerBits = GetFileSize(((winFile*)id)->h, &upperBits); - *pSize = (((i64)upperBits)<<32) + lowerBits; + lowerBits = GetFileSize(pFile->h, &upperBits); + if( (lowerBits == INVALID_FILE_SIZE) + && ((error = GetLastError()) != NO_ERROR) ) + { + pFile->lastErrno = error; + return SQLITE_IOERR_FSTAT; + } + *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits; return SQLITE_OK; } @@ -1119,19 +836,27 @@ ** Different API routines are called depending on whether or not this ** is Win95 or WinNT. */ -static int getReadLock(winFile *id){ +static int getReadLock(winFile *pFile){ int res; if( isNT() ){ OVERLAPPED ovlp; ovlp.Offset = SHARED_FIRST; ovlp.OffsetHigh = 0; ovlp.hEvent = 0; - res = LockFileEx(id->h, LOCKFILE_FAIL_IMMEDIATELY, 0, SHARED_SIZE,0,&ovlp); + res = LockFileEx(pFile->h, LOCKFILE_FAIL_IMMEDIATELY, + 0, SHARED_SIZE, 0, &ovlp); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +*/ +#if SQLITE_OS_WINCE==0 }else{ int lk; - sqlite3Randomness(sizeof(lk), &lk); - id->sharedLockByte = (lk & 0x7fffffff)%(SHARED_SIZE - 1); - res = LockFile(id->h, SHARED_FIRST+id->sharedLockByte, 0, 1, 0); + sqlite3_randomness(sizeof(lk), &lk); + pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1)); + res = LockFile(pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); +#endif + } + if( res == 0 ){ + pFile->lastErrno = GetLastError(); } return res; } @@ -1143,44 +868,18 @@ int res; if( isNT() ){ res = UnlockFile(pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - }else{ - res = UnlockFile(pFile->h, SHARED_FIRST + pFile->sharedLockByte, 0, 1, 0); - } - return res; -} - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* -** Check that a given pathname is a directory and is writable -** +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. */ -int sqlite3WinIsDirWritable(char *zDirname){ - int fileAttr; - void *zConverted; - if( zDirname==0 ) return 0; - if( !isNT() && strlen(zDirname)>MAX_PATH ) return 0; - - zConverted = convertUtf8Filename(zDirname); - if( zConverted==0 ){ - return SQLITE_NOMEM; - } - if( isNT() ){ - fileAttr = GetFileAttributesW((WCHAR*)zConverted); +#if SQLITE_OS_WINCE==0 }else{ -#if OS_WINCE - return 0; -#else - fileAttr = GetFileAttributesA((char*)zConverted); + res = UnlockFile(pFile->h, SHARED_FIRST + pFile->sharedLockByte, 0, 1, 0); #endif } - sqliteFree(zConverted); - if( fileAttr == 0xffffffff ) return 0; - if( (fileAttr & FILE_ATTRIBUTE_DIRECTORY) != FILE_ATTRIBUTE_DIRECTORY ){ - return 0; + if( res == 0 ){ + pFile->lastErrno = GetLastError(); } - return 1; + return res; } -#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ /* ** Lock the file with the lock specified by parameter locktype - one @@ -1208,14 +907,15 @@ ** It is not possible to lower the locking level one step at a time. You ** must go straight to locking level 0. */ -static int winLock(OsFile *id, int locktype){ +static int winLock(sqlite3_file *id, int locktype){ int rc = SQLITE_OK; /* Return code from subroutines */ int res = 1; /* Result of a windows lock call */ - int newLocktype; /* Set id->locktype to this value before exiting */ + int newLocktype; /* Set pFile->locktype to this value before exiting */ int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ winFile *pFile = (winFile*)id; + DWORD error = NO_ERROR; - assert( pFile!=0 ); + assert( id!=0 ); OSTRACE5("LOCK %d %d was %d(%d)\n", pFile->h, locktype, pFile->locktype, pFile->sharedLockByte); @@ -1238,8 +938,9 @@ ** the PENDING_LOCK byte is temporary. */ newLocktype = pFile->locktype; - if( pFile->locktype==NO_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) + if( (pFile->locktype==NO_LOCK) + || ( (locktype==EXCLUSIVE_LOCK) + && (pFile->locktype==RESERVED_LOCK)) ){ int cnt = 3; while( cnt-->0 && (res = LockFile(pFile->h, PENDING_BYTE, 0, 1, 0))==0 ){ @@ -1250,6 +951,9 @@ Sleep(1); } gotPendingLock = res; + if( !res ){ + error = GetLastError(); + } } /* Acquire a shared lock @@ -1259,6 +963,8 @@ res = getReadLock(pFile); if( res ){ newLocktype = SHARED_LOCK; + }else{ + error = GetLastError(); } } @@ -1269,6 +975,8 @@ res = LockFile(pFile->h, RESERVED_BYTE, 0, 1, 0); if( res ){ newLocktype = RESERVED_LOCK; + }else{ + error = GetLastError(); } } @@ -1289,7 +997,8 @@ if( res ){ newLocktype = EXCLUSIVE_LOCK; }else{ - OSTRACE2("error-code = %d\n", GetLastError()); + error = GetLastError(); + OSTRACE2("error-code = %d\n", error); getReadLock(pFile); } } @@ -1309,9 +1018,10 @@ }else{ OSTRACE4("LOCK FAILED %d trying for %d but got %d\n", pFile->h, locktype, newLocktype); + pFile->lastErrno = error; rc = SQLITE_BUSY; } - pFile->locktype = newLocktype; + pFile->locktype = (u8)newLocktype; return rc; } @@ -1320,10 +1030,11 @@ ** file by this or any other process. If such a lock is held, return ** non-zero, otherwise zero. */ -static int winCheckReservedLock(OsFile *id){ +static int winCheckReservedLock(sqlite3_file *id, int *pResOut){ int rc; winFile *pFile = (winFile*)id; - assert( pFile!=0 ); + + assert( id!=0 ); if( pFile->locktype>=RESERVED_LOCK ){ rc = 1; OSTRACE3("TEST WR-LOCK %d %d (local)\n", pFile->h, rc); @@ -1335,7 +1046,8 @@ rc = !rc; OSTRACE3("TEST WR-LOCK %d %d (remote)\n", pFile->h, rc); } - return rc; + *pResOut = rc; + return SQLITE_OK; } /* @@ -1349,10 +1061,10 @@ ** is NO_LOCK. If the second argument is SHARED_LOCK then this routine ** might return SQLITE_IOERR; */ -static int winUnlock(OsFile *id, int locktype){ +static int winUnlock(sqlite3_file *id, int locktype){ int type; - int rc = SQLITE_OK; winFile *pFile = (winFile*)id; + int rc = SQLITE_OK; assert( pFile!=0 ); assert( locktype<=SHARED_LOCK ); OSTRACE5("UNLOCK %d to %d was %d(%d)\n", pFile->h, locktype, @@ -1375,80 +1087,25 @@ if( type>=PENDING_LOCK ){ UnlockFile(pFile->h, PENDING_BYTE, 0, 1, 0); } - pFile->locktype = locktype; + pFile->locktype = (u8)locktype; return rc; } /* -** Turn a relative pathname into a full pathname. Return a pointer -** to the full pathname stored in space obtained from sqliteMalloc(). -** The calling function is responsible for freeing this space once it -** is no longer needed. +** Control and query of the open file handle. */ -char *sqlite3WinFullPathname(const char *zRelative){ - char *zFull; -#if defined(__CYGWIN__) - int nByte; - nByte = strlen(zRelative) + MAX_PATH + 1001; - zFull = sqliteMalloc( nByte ); - if( zFull==0 ) return 0; - if( cygwin_conv_to_full_win32_path(zRelative, zFull) ) return 0; -#elif OS_WINCE - /* WinCE has no concept of a relative pathname, or so I am told. */ - zFull = sqliteStrDup(zRelative); -#else - int nByte; - void *zConverted; - zConverted = convertUtf8Filename(zRelative); - if( isNT() ){ - WCHAR *zTemp; - nByte = GetFullPathNameW((WCHAR*)zConverted, 0, 0, 0) + 3; - zTemp = sqliteMalloc( nByte*sizeof(zTemp[0]) ); - if( zTemp==0 ){ - sqliteFree(zConverted); - return 0; +static int winFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = ((winFile*)id)->locktype; + return SQLITE_OK; } - GetFullPathNameW((WCHAR*)zConverted, nByte, zTemp, 0); - sqliteFree(zConverted); - zFull = unicodeToUtf8(zTemp); - sqliteFree(zTemp); - }else{ - char *zTemp; - nByte = GetFullPathNameA((char*)zConverted, 0, 0, 0) + 3; - zTemp = sqliteMalloc( nByte*sizeof(zTemp[0]) ); - if( zTemp==0 ){ - sqliteFree(zConverted); - return 0; + case SQLITE_LAST_ERRNO: { + *(int*)pArg = (int)((winFile*)id)->lastErrno; + return SQLITE_OK; } - GetFullPathNameA((char*)zConverted, nByte, zTemp, 0); - sqliteFree(zConverted); - zFull = mbcsToUtf8(zTemp); - sqliteFree(zTemp); } -#endif - return zFull; -} - -/* -** The fullSync option is meaningless on windows. This is a no-op. -*/ -static void winSetFullSync(OsFile *id, int v){ - return; -} - -/* -** Return the underlying file handle for an OsFile -*/ -static int winFileHandle(OsFile *id){ - return (int)((winFile*)id)->h; -} - -/* -** Return an integer that indices the type of lock currently held -** by this handle. (Used for testing and analysis only.) -*/ -static int winLockState(OsFile *id){ - return ((winFile*)id)->locktype; + return SQLITE_ERROR; } /* @@ -1458,201 +1115,641 @@ ** ** SQLite code assumes this function cannot fail. It also assumes that ** if two files are created in the same file-system directory (i.e. -** a database and it's journal file) that the sector size will be the +** a database and its journal file) that the sector size will be the ** same for both. */ -static int winSectorSize(OsFile *id){ - return SQLITE_DEFAULT_SECTOR_SIZE; +static int winSectorSize(sqlite3_file *id){ + assert( id!=0 ); + return (int)(((winFile*)id)->sectorSize); +} + +/* +** Return a vector of device characteristics. +*/ +static int winDeviceCharacteristics(sqlite3_file *id){ + UNUSED_PARAMETER(id); + return 0; } /* -** This vector defines all the methods that can operate on an OsFile -** for win32. +** This vector defines all the methods that can operate on an +** sqlite3_file for win32. */ -static const IoMethod sqlite3WinIoMethod = { +static const sqlite3_io_methods winIoMethod = { + 1, /* iVersion */ winClose, - winOpenDirectory, winRead, winWrite, - winSeek, winTruncate, winSync, - winSetFullSync, - winFileHandle, winFileSize, winLock, winUnlock, - winLockState, winCheckReservedLock, + winFileControl, winSectorSize, + winDeviceCharacteristics }; +/*************************************************************************** +** Here ends the I/O methods that form the sqlite3_io_methods object. +** +** The next block of code implements the VFS methods. +****************************************************************************/ + /* -** Allocate memory for an OsFile. Initialize the new OsFile -** to the value given in pInit and return a pointer to the new -** OsFile. If we run out of memory, close the file and return NULL. -*/ -static int allocateWinFile(winFile *pInit, OsFile **pId){ - winFile *pNew; - pNew = sqliteMalloc( sizeof(*pNew) ); - if( pNew==0 ){ - CloseHandle(pInit->h); -#if OS_WINCE - sqliteFree(pInit->zDeleteOnClose); +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function. +*/ +static void *convertUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( isNT() ){ + zConverted = utf8ToUnicode(zFilename); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +*/ +#if SQLITE_OS_WINCE==0 + }else{ + zConverted = utf8ToMbcs(zFilename); #endif - *pId = 0; - return SQLITE_NOMEM; + } + /* caller will handle out of memory */ + return zConverted; +} + +/* +** Create a temporary file name in zBuf. zBuf must be big enough to +** hold at pVfs->mxPathname characters. +*/ +static int getTempname(int nBuf, char *zBuf){ + static char zChars[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789"; + size_t i, j; + char zTempPath[MAX_PATH+1]; + if( sqlite3_temp_directory ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", sqlite3_temp_directory); + }else if( isNT() ){ + char *zMulti; + WCHAR zWidePath[MAX_PATH]; + GetTempPathW(MAX_PATH-30, zWidePath); + zMulti = unicodeToUtf8(zWidePath); + if( zMulti ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zMulti); + free(zMulti); + }else{ + return SQLITE_NOMEM; + } +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. +*/ +#if SQLITE_OS_WINCE==0 }else{ - *pNew = *pInit; - pNew->pMethod = &sqlite3WinIoMethod; - pNew->locktype = NO_LOCK; - pNew->sharedLockByte = 0; - *pId = (OsFile*)pNew; - OpenCounter(+1); - return SQLITE_OK; + char *zUtf8; + char zMbcsPath[MAX_PATH]; + GetTempPathA(MAX_PATH-30, zMbcsPath); + zUtf8 = sqlite3_win32_mbcs_to_utf8(zMbcsPath); + if( zUtf8 ){ + sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zUtf8); + free(zUtf8); + }else{ + return SQLITE_NOMEM; + } +#endif } + for(i=sqlite3Strlen30(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){} + zTempPath[i] = 0; + sqlite3_snprintf(nBuf-30, zBuf, + "%s\\"SQLITE_TEMP_FILE_PREFIX, zTempPath); + j = sqlite3Strlen30(zBuf); + sqlite3_randomness(20, &zBuf[j]); + for(i=0; i<20; i++, j++){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + OSTRACE2("TEMP FILENAME: %s\n", zBuf); + return SQLITE_OK; } +/* +** The return value of getLastErrorMsg +** is zero if the error message fits in the buffer, or non-zero +** otherwise (if the message was truncated). +*/ +static int getLastErrorMsg(int nBuf, char *zBuf){ + DWORD error = GetLastError(); -#endif /* SQLITE_OMIT_DISKIO */ -/*************************************************************************** -** Everything above deals with file I/O. Everything that follows deals -** with other miscellanous aspects of the operating system interface -****************************************************************************/ +#if SQLITE_OS_WINCE + sqlite3_snprintf(nBuf, zBuf, "OsError 0x%x (%u)", error, error); +#else + /* FormatMessage returns 0 on failure. Otherwise it + ** returns the number of TCHARs written to the output + ** buffer, excluding the terminating null char. + */ + if (!FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + error, + 0, + zBuf, + nBuf-1, + 0)) + { + sqlite3_snprintf(nBuf, zBuf, "OsError 0x%x (%u)", error, error); + } +#endif + + return 0; +} -#if !defined(SQLITE_OMIT_LOAD_EXTENSION) /* -** Interfaces for opening a shared library, finding entry points -** within the shared library, and closing the shared library. +** Open a file. */ -void *sqlite3WinDlopen(const char *zFilename){ +static int winOpen( + sqlite3_vfs *pVfs, /* Not used */ + const char *zName, /* Name of the file (UTF-8) */ + sqlite3_file *id, /* Write the SQLite file handle here */ + int flags, /* Open mode flags */ + int *pOutFlags /* Status return flags */ +){ HANDLE h; - void *zConverted = convertUtf8Filename(zFilename); + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + DWORD dwFlagsAndAttributes = 0; +#if SQLITE_OS_WINCE + int isTemp = 0; +#endif + winFile *pFile = (winFile*)id; + void *zConverted; /* Filename in OS encoding */ + const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ + char zTmpname[MAX_PATH+1]; /* Buffer used to create temp filename */ + + assert( id!=0 ); + UNUSED_PARAMETER(pVfs); + + /* If the second argument to this function is NULL, generate a + ** temporary file name to use + */ + if( !zUtf8Name ){ + int rc = getTempname(MAX_PATH+1, zTmpname); + if( rc!=SQLITE_OK ){ + return rc; + } + zUtf8Name = zTmpname; + } + + /* Convert the filename to the system encoding. */ + zConverted = convertUtf8Filename(zUtf8Name); if( zConverted==0 ){ - return 0; + return SQLITE_NOMEM; } - if( isNT() ){ - h = LoadLibraryW((WCHAR*)zConverted); + + if( flags & SQLITE_OPEN_READWRITE ){ + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; }else{ -#if OS_WINCE - return 0; + dwDesiredAccess = GENERIC_READ; + } + /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is + ** created. SQLite doesn't use it to indicate "exclusive access" + ** as it is usually understood. + */ + assert(!(flags & SQLITE_OPEN_EXCLUSIVE) || (flags & SQLITE_OPEN_CREATE)); + if( flags & SQLITE_OPEN_EXCLUSIVE ){ + /* Creates a new file, only if it does not already exist. */ + /* If the file exists, it fails. */ + dwCreationDisposition = CREATE_NEW; + }else if( flags & SQLITE_OPEN_CREATE ){ + /* Open existing file, or create if it doesn't exist */ + dwCreationDisposition = OPEN_ALWAYS; + }else{ + /* Opens a file, only if it exists. */ + dwCreationDisposition = OPEN_EXISTING; + } + dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + if( flags & SQLITE_OPEN_DELETEONCLOSE ){ +#if SQLITE_OS_WINCE + dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN; + isTemp = 1; #else - h = LoadLibraryA((char*)zConverted); + dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY + | FILE_ATTRIBUTE_HIDDEN + | FILE_FLAG_DELETE_ON_CLOSE; #endif + }else{ + dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; } - sqliteFree(zConverted); - return (void*)h; - -} -void *sqlite3WinDlsym(void *pHandle, const char *zSymbol){ -#if OS_WINCE - /* The GetProcAddressA() routine is only available on wince. */ - return GetProcAddressA((HANDLE)pHandle, zSymbol); -#else - /* All other windows platforms expect GetProcAddress() to take - ** an Ansi string regardless of the _UNICODE setting */ - return GetProcAddress((HANDLE)pHandle, zSymbol); + /* Reports from the internet are that performance is always + ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ +#if SQLITE_OS_WINCE + dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; +#endif + if( isNT() ){ + h = CreateFileW((WCHAR*)zConverted, + dwDesiredAccess, + dwShareMode, + NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL + ); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. +*/ +#if SQLITE_OS_WINCE==0 + }else{ + h = CreateFileA((char*)zConverted, + dwDesiredAccess, + dwShareMode, + NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL + ); +#endif + } + if( h==INVALID_HANDLE_VALUE ){ + free(zConverted); + if( flags & SQLITE_OPEN_READWRITE ){ + return winOpen(pVfs, zName, id, + ((flags|SQLITE_OPEN_READONLY)&~SQLITE_OPEN_READWRITE), pOutFlags); + }else{ + return SQLITE_CANTOPEN; + } + } + if( pOutFlags ){ + if( flags & SQLITE_OPEN_READWRITE ){ + *pOutFlags = SQLITE_OPEN_READWRITE; + }else{ + *pOutFlags = SQLITE_OPEN_READONLY; + } + } + memset(pFile, 0, sizeof(*pFile)); + pFile->pMethod = &winIoMethod; + pFile->h = h; + pFile->lastErrno = NO_ERROR; + pFile->sectorSize = getSectorSize(pVfs, zUtf8Name); +#if SQLITE_OS_WINCE + if( (flags & (SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB)) == + (SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB) + && !winceCreateLock(zName, pFile) + ){ + CloseHandle(h); + free(zConverted); + return SQLITE_CANTOPEN; + } + if( isTemp ){ + pFile->zDeleteOnClose = zConverted; + }else #endif + { + free(zConverted); + } + OpenCounter(+1); + return SQLITE_OK; } -int sqlite3WinDlclose(void *pHandle){ - return FreeLibrary((HANDLE)pHandle); + +/* +** Delete the named file. +** +** Note that windows does not allow a file to be deleted if some other +** process has it open. Sometimes a virus scanner or indexing program +** will open a journal file shortly after it is created in order to do +** whatever it does. While this other process is holding the +** file open, we will be unable to delete it. To work around this +** problem, we delay 100 milliseconds and try to delete again. Up +** to MX_DELETION_ATTEMPTs deletion attempts are run before giving +** up and returning an error. +*/ +#define MX_DELETION_ATTEMPTS 5 +static int winDelete( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to delete */ + int syncDir /* Not used on win32 */ +){ + int cnt = 0; + DWORD rc; + DWORD error = 0; + void *zConverted = convertUtf8Filename(zFilename); + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(syncDir); + if( zConverted==0 ){ + return SQLITE_NOMEM; + } + SimulateIOError(return SQLITE_IOERR_DELETE); + if( isNT() ){ + do{ + DeleteFileW(zConverted); + }while( ( ((rc = GetFileAttributesW(zConverted)) != INVALID_FILE_ATTRIBUTES) + || ((error = GetLastError()) == ERROR_ACCESS_DENIED)) + && (++cnt < MX_DELETION_ATTEMPTS) + && (Sleep(100), 1) ); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. +*/ +#if SQLITE_OS_WINCE==0 + }else{ + do{ + DeleteFileA(zConverted); + }while( ( ((rc = GetFileAttributesA(zConverted)) != INVALID_FILE_ATTRIBUTES) + || ((error = GetLastError()) == ERROR_ACCESS_DENIED)) + && (++cnt < MX_DELETION_ATTEMPTS) + && (Sleep(100), 1) ); +#endif + } + free(zConverted); + OSTRACE2("DELETE \"%s\"\n", zFilename); + return ( (rc == INVALID_FILE_ATTRIBUTES) + && (error == ERROR_FILE_NOT_FOUND)) ? SQLITE_OK : SQLITE_IOERR_DELETE; } -#endif /* !SQLITE_OMIT_LOAD_EXTENSION */ /* -** Get information to seed the random number generator. The seed -** is written into the buffer zBuf[256]. The calling function must -** supply a sufficiently large buffer. -*/ -int sqlite3WinRandomSeed(char *zBuf){ - /* We have to initialize zBuf to prevent valgrind from reporting - ** errors. The reports issued by valgrind are incorrect - we would - ** prefer that the randomness be increased by making use of the - ** uninitialized space in zBuf - but valgrind errors tend to worry - ** some users. Rather than argue, it seems easier just to initialize - ** the whole array and silence valgrind, even if that means less randomness - ** in the random seed. - ** - ** When testing, initializing zBuf[] to zero is all we do. That means - ** that we always use the same random number sequence.* This makes the - ** tests repeatable. - */ - memset(zBuf, 0, 256); - GetSystemTime((LPSYSTEMTIME)zBuf); +** Check the existance and status of a file. +*/ +static int winAccess( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to check */ + int flags, /* Type of test to make on this file */ + int *pResOut /* OUT: Result */ +){ + DWORD attr; + int rc = 0; + void *zConverted = convertUtf8Filename(zFilename); + UNUSED_PARAMETER(pVfs); + if( zConverted==0 ){ + return SQLITE_NOMEM; + } + if( isNT() ){ + attr = GetFileAttributesW((WCHAR*)zConverted); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. +*/ +#if SQLITE_OS_WINCE==0 + }else{ + attr = GetFileAttributesA((char*)zConverted); +#endif + } + free(zConverted); + switch( flags ){ + case SQLITE_ACCESS_READ: + case SQLITE_ACCESS_EXISTS: + rc = attr!=INVALID_FILE_ATTRIBUTES; + break; + case SQLITE_ACCESS_READWRITE: + rc = (attr & FILE_ATTRIBUTE_READONLY)==0; + break; + default: + assert(!"Invalid flags argument"); + } + *pResOut = rc; return SQLITE_OK; } + /* -** Sleep for a little while. Return the amount of time slept. +** Turn a relative pathname into a full pathname. Write the full +** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname +** bytes in size. +*/ +static int winFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zRelative, /* Possibly relative input path */ + int nFull, /* Size of output buffer in bytes */ + char *zFull /* Output buffer */ +){ + +#if defined(__CYGWIN__) + UNUSED_PARAMETER(nFull); + cygwin_conv_to_full_win32_path(zRelative, zFull); + return SQLITE_OK; +#endif + +#if SQLITE_OS_WINCE + UNUSED_PARAMETER(nFull); + /* WinCE has no concept of a relative pathname, or so I am told. */ + sqlite3_snprintf(pVfs->mxPathname, zFull, "%s", zRelative); + return SQLITE_OK; +#endif + +#if !SQLITE_OS_WINCE && !defined(__CYGWIN__) + int nByte; + void *zConverted; + char *zOut; + UNUSED_PARAMETER(nFull); + zConverted = convertUtf8Filename(zRelative); + if( isNT() ){ + WCHAR *zTemp; + nByte = GetFullPathNameW((WCHAR*)zConverted, 0, 0, 0) + 3; + zTemp = malloc( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + free(zConverted); + return SQLITE_NOMEM; + } + GetFullPathNameW((WCHAR*)zConverted, nByte, zTemp, 0); + free(zConverted); + zOut = unicodeToUtf8(zTemp); + free(zTemp); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. */ -int sqlite3WinSleep(int ms){ - Sleep(ms); - return ms; +#if SQLITE_OS_WINCE==0 + }else{ + char *zTemp; + nByte = GetFullPathNameA((char*)zConverted, 0, 0, 0) + 3; + zTemp = malloc( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + free(zConverted); + return SQLITE_NOMEM; + } + GetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + free(zConverted); + zOut = sqlite3_win32_mbcs_to_utf8(zTemp); + free(zTemp); +#endif + } + if( zOut ){ + sqlite3_snprintf(pVfs->mxPathname, zFull, "%s", zOut); + free(zOut); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } +#endif } /* -** Static variables used for thread synchronization +** Get the sector size of the device used to store +** file. */ -static int inMutex = 0; -#ifdef SQLITE_W32_THREADS - static DWORD mutexOwner; - static CRITICAL_SECTION cs; +static int getSectorSize( + sqlite3_vfs *pVfs, + const char *zRelative /* UTF-8 file name */ +){ + DWORD bytesPerSector = SQLITE_DEFAULT_SECTOR_SIZE; + char zFullpath[MAX_PATH+1]; + int rc; + DWORD dwRet = 0, dwDummy; + + /* + ** We need to get the full path name of the file + ** to get the drive letter to look up the sector + ** size. + */ + rc = winFullPathname(pVfs, zRelative, MAX_PATH, zFullpath); + if( rc == SQLITE_OK ) + { + void *zConverted = convertUtf8Filename(zFullpath); + if( zConverted ){ + if( isNT() ){ + /* trim path to just drive reference */ + WCHAR *p = zConverted; + for(;*p;p++){ + if( *p == '\\' ){ + *p = '\0'; + break; + } + } + dwRet = GetDiskFreeSpaceW((WCHAR*)zConverted, + &dwDummy, + &bytesPerSector, + &dwDummy, + &dwDummy); +#if SQLITE_OS_WINCE==0 + }else{ + /* trim path to just drive reference */ + CHAR *p = (CHAR *)zConverted; + for(;*p;p++){ + if( *p == '\\' ){ + *p = '\0'; + break; + } + } + dwRet = GetDiskFreeSpaceA((CHAR*)zConverted, + &dwDummy, + &bytesPerSector, + &dwDummy, + &dwDummy); #endif + } + free(zConverted); + } + if( !dwRet ){ + bytesPerSector = SQLITE_DEFAULT_SECTOR_SIZE; + } + } + return (int) bytesPerSector; +} +#ifndef SQLITE_OMIT_LOAD_EXTENSION /* -** The following pair of routines implement mutual exclusion for -** multi-threaded processes. Only a single thread is allowed to -** executed code that is surrounded by EnterMutex() and LeaveMutex(). -** -** SQLite uses only a single Mutex. There is not much critical -** code and what little there is executes quickly and without blocking. -** -** Version 3.3.1 and earlier used a simple mutex. Beginning with -** version 3.3.2, a recursive mutex is required. +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. */ -void sqlite3WinEnterMutex(){ -#ifdef SQLITE_W32_THREADS - static int isInit = 0; - while( !isInit ){ - static long lock = 0; - if( InterlockedIncrement(&lock)==1 ){ - InitializeCriticalSection(&cs); - isInit = 1; - }else{ - Sleep(1); - } +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ + HANDLE h; + void *zConverted = convertUtf8Filename(zFilename); + UNUSED_PARAMETER(pVfs); + if( zConverted==0 ){ + return 0; } - EnterCriticalSection(&cs); - mutexOwner = GetCurrentThreadId(); + if( isNT() ){ + h = LoadLibraryW((WCHAR*)zConverted); +/* isNT() is 1 if SQLITE_OS_WINCE==1, so this else is never executed. +** Since the ASCII version of these Windows API do not exist for WINCE, +** it's important to not reference them for WINCE builds. +*/ +#if SQLITE_OS_WINCE==0 + }else{ + h = LoadLibraryA((char*)zConverted); #endif - inMutex++; + } + free(zConverted); + return (void*)h; } -void sqlite3WinLeaveMutex(){ - assert( inMutex ); - inMutex--; -#ifdef SQLITE_W32_THREADS - assert( mutexOwner==GetCurrentThreadId() ); - LeaveCriticalSection(&cs); +static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ + UNUSED_PARAMETER(pVfs); + getLastErrorMsg(nBuf, zBufOut); +} +void (*winDlSym(sqlite3_vfs *pVfs, void *pHandle, const char *zSymbol))(void){ + UNUSED_PARAMETER(pVfs); +#if SQLITE_OS_WINCE + /* The GetProcAddressA() routine is only available on wince. */ + return (void(*)(void))GetProcAddressA((HANDLE)pHandle, zSymbol); +#else + /* All other windows platforms expect GetProcAddress() to take + ** an Ansi string regardless of the _UNICODE setting */ + return (void(*)(void))GetProcAddress((HANDLE)pHandle, zSymbol); #endif } +void winDlClose(sqlite3_vfs *pVfs, void *pHandle){ + UNUSED_PARAMETER(pVfs); + FreeLibrary((HANDLE)pHandle); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define winDlOpen 0 + #define winDlError 0 + #define winDlSym 0 + #define winDlClose 0 +#endif + /* -** Return TRUE if the mutex is currently held. -** -** If the thisThreadOnly parameter is true, return true if and only if the -** calling thread holds the mutex. If the parameter is false, return -** true if any thread holds the mutex. +** Write up to nBuf bytes of randomness into zBuf. */ -int sqlite3WinInMutex(int thisThreadOnly){ -#ifdef SQLITE_W32_THREADS - return inMutex>0 && (thisThreadOnly==0 || mutexOwner==GetCurrentThreadId()); +static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + int n = 0; + UNUSED_PARAMETER(pVfs); +#if defined(SQLITE_TEST) + n = nBuf; + memset(zBuf, 0, nBuf); #else - return inMutex>0; + if( sizeof(SYSTEMTIME)<=nBuf-n ){ + SYSTEMTIME x; + GetSystemTime(&x); + memcpy(&zBuf[n], &x, sizeof(x)); + n += sizeof(x); + } + if( sizeof(DWORD)<=nBuf-n ){ + DWORD pid = GetCurrentProcessId(); + memcpy(&zBuf[n], &pid, sizeof(pid)); + n += sizeof(pid); + } + if( sizeof(DWORD)<=nBuf-n ){ + DWORD cnt = GetTickCount(); + memcpy(&zBuf[n], &cnt, sizeof(cnt)); + n += sizeof(cnt); + } + if( sizeof(LARGE_INTEGER)<=nBuf-n ){ + LARGE_INTEGER i; + QueryPerformanceCounter(&i); + memcpy(&zBuf[n], &i, sizeof(i)); + n += sizeof(i); + } #endif + return n; } /* +** Sleep for a little while. Return the amount of time slept. +*/ +static int winSleep(sqlite3_vfs *pVfs, int microsec){ + Sleep((microsec+999)/1000); + UNUSED_PARAMETER(pVfs); + return ((microsec+999)/1000)*1000; +} + +/* ** The following variable, if set to a non-zero value, becomes the result ** returned from sqlite3OsCurrentTime(). This is used for testing. */ @@ -1665,94 +1762,118 @@ ** current time and date as a Julian Day number into *prNow and ** return 0. Return 1 if the time and date cannot be found. */ -int sqlite3WinCurrentTime(double *prNow){ +int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ FILETIME ft; /* FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). */ - double now; -#if OS_WINCE + sqlite3_int64 timeW; /* Whole days */ + sqlite3_int64 timeF; /* Fractional Days */ + + /* Number of 100-nanosecond intervals in a single day */ + static const sqlite3_int64 ntuPerDay = + 10000000*(sqlite3_int64)86400; + + /* Number of 100-nanosecond intervals in half of a day */ + static const sqlite3_int64 ntuPerHalfDay = + 10000000*(sqlite3_int64)43200; + + /* 2^32 - to avoid use of LL and warnings in gcc */ + static const sqlite3_int64 max32BitValue = + (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 + (sqlite3_int64)294967296; + +#if SQLITE_OS_WINCE SYSTEMTIME time; GetSystemTime(&time); - SystemTimeToFileTime(&time,&ft); + /* if SystemTimeToFileTime() fails, it returns zero. */ + if (!SystemTimeToFileTime(&time,&ft)){ + return 1; + } #else GetSystemTimeAsFileTime( &ft ); #endif - now = ((double)ft.dwHighDateTime) * 4294967296.0; - *prNow = (now + ft.dwLowDateTime)/864000000000.0 + 2305813.5; + UNUSED_PARAMETER(pVfs); + timeW = (((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + (sqlite3_int64)ft.dwLowDateTime; + timeF = timeW % ntuPerDay; /* fractional days (100-nanoseconds) */ + timeW = timeW / ntuPerDay; /* whole days */ + timeW = timeW + 2305813; /* add whole days (from 2305813.5) */ + timeF = timeF + ntuPerHalfDay; /* add half a day (from 2305813.5) */ + timeW = timeW + (timeF/ntuPerDay); /* add whole day if half day made one */ + timeF = timeF % ntuPerDay; /* compute new fractional days */ + *prNow = (double)timeW + ((double)timeF / (double)ntuPerDay); #ifdef SQLITE_TEST if( sqlite3_current_time ){ - *prNow = sqlite3_current_time/86400.0 + 2440587.5; + *prNow = ((double)sqlite3_current_time + (double)43200) / (double)86400 + (double)2440587; } #endif return 0; } /* -** Remember the number of thread-specific-data blocks allocated. -** Use this to verify that we are not leaking thread-specific-data. -** Ticket #1601 -*/ -#ifdef SQLITE_TEST -int sqlite3_tsd_count = 0; -# define TSD_COUNTER_INCR InterlockedIncrement(&sqlite3_tsd_count) -# define TSD_COUNTER_DECR InterlockedDecrement(&sqlite3_tsd_count) -#else -# define TSD_COUNTER_INCR /* no-op */ -# define TSD_COUNTER_DECR /* no-op */ -#endif - - +** The idea is that this function works like a combination of +** GetLastError() and FormatMessage() on windows (or errno and +** strerror_r() on unix). After an error is returned by an OS +** function, SQLite calls this function with zBuf pointing to +** a buffer of nBuf bytes. The OS layer should populate the +** buffer with a nul-terminated UTF-8 encoded error message +** describing the last IO error to have occurred within the calling +** thread. +** +** If the error message is too large for the supplied buffer, +** it should be truncated. The return value of xGetLastError +** is zero if the error message fits in the buffer, or non-zero +** otherwise (if the message was truncated). If non-zero is returned, +** then it is not necessary to include the nul-terminator character +** in the output buffer. +** +** Not supplying an error message will have no adverse effect +** on SQLite. It is fine to have an implementation that never +** returns an error message: +** +** int xGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ +** assert(zBuf[0]=='\0'); +** return 0; +** } +** +** However if an error message is supplied, it will be incorporated +** by sqlite into the error message available to the user using +** sqlite3_errmsg(), possibly making IO errors easier to debug. +*/ +static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + UNUSED_PARAMETER(pVfs); + return getLastErrorMsg(nBuf, zBuf); +} /* -** If called with allocateFlag>1, then return a pointer to thread -** specific data for the current thread. Allocate and zero the -** thread-specific data if it does not already exist necessary. -** -** If called with allocateFlag==0, then check the current thread -** specific data. Return it if it exists. If it does not exist, -** then return NULL. -** -** If called with allocateFlag<0, check to see if the thread specific -** data is allocated and is all zero. If it is then deallocate it. -** Return a pointer to the thread specific data or NULL if it is -** unallocated or gets deallocated. -*/ -ThreadData *sqlite3WinThreadSpecificData(int allocateFlag){ - static int key; - static int keyInit = 0; - static const ThreadData zeroData = {0}; - ThreadData *pTsd; - - if( !keyInit ){ - sqlite3OsEnterMutex(); - if( !keyInit ){ - key = TlsAlloc(); - if( key==0xffffffff ){ - sqlite3OsLeaveMutex(); - return 0; - } - keyInit = 1; - } - sqlite3OsLeaveMutex(); - } - pTsd = TlsGetValue(key); - if( allocateFlag>0 ){ - if( !pTsd ){ - pTsd = sqlite3OsMalloc( sizeof(zeroData) ); - if( pTsd ){ - *pTsd = zeroData; - TlsSetValue(key, pTsd); - TSD_COUNTER_INCR; - } - } - }else if( pTsd!=0 && allocateFlag<0 - && memcmp(pTsd, &zeroData, sizeof(ThreadData))==0 ){ - sqlite3OsFree(pTsd); - TlsSetValue(key, 0); - TSD_COUNTER_DECR; - pTsd = 0; - } - return pTsd; +** Initialize and deinitialize the operating system interface. +*/ +int sqlite3_os_init(void){ + static sqlite3_vfs winVfs = { + 1, /* iVersion */ + sizeof(winFile), /* szOsFile */ + MAX_PATH, /* mxPathname */ + 0, /* pNext */ + "win32", /* zName */ + 0, /* pAppData */ + + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError /* xGetLastError */ + }; + sqlite3_vfs_register(&winVfs, 1); + return SQLITE_OK; } -#endif /* OS_WIN */ +int sqlite3_os_end(void){ + return SQLITE_OK; +} + +#endif /* SQLITE_OS_WIN */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pager.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pager.c --- sqlite3-3.4.2/src/pager.c 2007-08-13 15:44:08.000000000 +0100 +++ sqlite3-3.6.16/src/pager.c 2009-06-26 15:19:54.000000000 +0100 @@ -18,40 +18,29 @@ ** file simultaneously, or one process from reading the database while ** another is writing. ** -** @(#) $Id: pager.c,v 1.355 2007/08/11 00:26:21 drh Exp $ +** @(#) $Id: pager.c,v 1.603 2009/06/26 12:15:23 drh Exp $ */ #ifndef SQLITE_OMIT_DISKIO #include "sqliteInt.h" -#include "os.h" -#include "pager.h" -#include -#include /* ** Macros for troubleshooting. Normally turned off */ #if 0 +int sqlite3PagerTrace=1; /* True to enable tracing */ #define sqlite3DebugPrintf printf -#define PAGERTRACE1(X) sqlite3DebugPrintf(X) -#define PAGERTRACE2(X,Y) sqlite3DebugPrintf(X,Y) -#define PAGERTRACE3(X,Y,Z) sqlite3DebugPrintf(X,Y,Z) -#define PAGERTRACE4(X,Y,Z,W) sqlite3DebugPrintf(X,Y,Z,W) -#define PAGERTRACE5(X,Y,Z,W,V) sqlite3DebugPrintf(X,Y,Z,W,V) +#define PAGERTRACE(X) if( sqlite3PagerTrace ){ sqlite3DebugPrintf X; } #else -#define PAGERTRACE1(X) -#define PAGERTRACE2(X,Y) -#define PAGERTRACE3(X,Y,Z) -#define PAGERTRACE4(X,Y,Z,W) -#define PAGERTRACE5(X,Y,Z,W,V) +#define PAGERTRACE(X) #endif /* -** The following two macros are used within the PAGERTRACEX() macros above +** The following two macros are used within the PAGERTRACE() macros above ** to print out file-descriptors. ** -** PAGERID() takes a pointer to a Pager struct as it's argument. The -** associated file-descriptor is returned. FILEHANDLEID() takes an OsFile -** struct as it's argument. +** PAGERID() takes a pointer to a Pager struct as its argument. The +** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file +** struct as its argument. */ #define PAGERID(p) ((int)(p->fd)) #define FILEHANDLEID(fd) ((int)fd) @@ -111,262 +100,220 @@ #define PAGER_SYNCED 5 /* -** If the SQLITE_BUSY_RESERVED_LOCK macro is set to true at compile-time, -** then failed attempts to get a reserved lock will invoke the busy callback. -** This is off by default. To see why, consider the following scenario: -** -** Suppose thread A already has a shared lock and wants a reserved lock. -** Thread B already has a reserved lock and wants an exclusive lock. If -** both threads are using their busy callbacks, it might be a long time -** be for one of the threads give up and allows the other to proceed. -** But if the thread trying to get the reserved lock gives up quickly -** (if it never invokes its busy callback) then the contention will be -** resolved quickly. -*/ -#ifndef SQLITE_BUSY_RESERVED_LOCK -# define SQLITE_BUSY_RESERVED_LOCK 0 -#endif - -/* -** This macro rounds values up so that if the value is an address it -** is guaranteed to be an address that is aligned to an 8-byte boundary. -*/ -#define FORCE_ALIGNMENT(X) (((X)+7)&~7) - -/* -** Each in-memory image of a page begins with the following header. -** This header is only visible to this pager module. The client -** code that calls pager sees only the data that follows the header. -** -** Client code should call sqlite3PagerWrite() on a page prior to making -** any modifications to that page. The first time sqlite3PagerWrite() -** is called, the original page contents are written into the rollback -** journal and PgHdr.inJournal and PgHdr.needSync are set. Later, once -** the journal page has made it onto the disk surface, PgHdr.needSync -** is cleared. The modified page cannot be written back into the original -** database file until the journal pages has been synced to disk and the -** PgHdr.needSync has been cleared. -** -** The PgHdr.dirty flag is set when sqlite3PagerWrite() is called and -** is cleared again when the page content is written back to the original -** database file. -** -** Details of important structure elements: -** -** needSync -** -** If this is true, this means that it is not safe to write the page -** content to the database because the original content needed -** for rollback has not by synced to the main rollback journal. -** The original content may have been written to the rollback journal -** but it has not yet been synced. So we cannot write to the database -** file because power failure might cause the page in the journal file -** to never reach the disk. It is as if the write to the journal file -** does not occur until the journal file is synced. -** -** This flag is false if the page content exactly matches what -** currently exists in the database file. The needSync flag is also -** false if the original content has been written to the main rollback -** journal and synced. If the page represents a new page that has -** been added onto the end of the database during the current -** transaction, the needSync flag is true until the original database -** size in the journal header has been synced to disk. -** -** inJournal -** -** This is true if the original page has been written into the main -** rollback journal. This is always false for new pages added to -** the end of the database file during the current transaction. -** And this flag says nothing about whether or not the journal -** has been synced to disk. For pages that are in the original -** database file, the following expression should always be true: -** -** inJournal = (pPager->aInJournal[(pgno-1)/8] & (1<<((pgno-1)%8))!=0 -** -** The pPager->aInJournal[] array is only valid for the original -** pages of the database, not new pages that are added to the end -** of the database, so obviously the above expression cannot be -** valid for new pages. For new pages inJournal is always 0. -** -** dirty -** -** When true, this means that the content of the page has been -** modified and needs to be written back to the database file. -** If false, it means that either the content of the page is -** unchanged or else the content is unimportant and we do not -** care whether or not it is preserved. -** -** alwaysRollback -** -** This means that the sqlite3PagerDontRollback() API should be -** ignored for this page. The DontRollback() API attempts to say -** that the content of the page on disk is unimportant (it is an -** unused page on the freelist) so that it is unnecessary to -** rollback changes to this page because the content of the page -** can change without changing the meaning of the database. This -** flag overrides any DontRollback() attempt. This flag is set -** when a page that originally contained valid data is added to -** the freelist. Later in the same transaction, this page might -** be pulled from the freelist and reused for something different -** and at that point the DontRollback() API will be called because -** pages taken from the freelist do not need to be protected by -** the rollback journal. But this flag says that the page was -** not originally part of the freelist so that it still needs to -** be rolled back in spite of any subsequent DontRollback() calls. -** -** needRead -** -** This flag means (when true) that the content of the page has -** not yet been loaded from disk. The in-memory content is just -** garbage. (Actually, we zero the content, but you should not -** make any assumptions about the content nevertheless.) If the -** content is needed in the future, it should be read from the -** original database file. -*/ -typedef struct PgHdr PgHdr; -struct PgHdr { - Pager *pPager; /* The pager to which this page belongs */ - Pgno pgno; /* The page number for this page */ - PgHdr *pNextHash, *pPrevHash; /* Hash collision chain for PgHdr.pgno */ - PgHdr *pNextFree, *pPrevFree; /* Freelist of pages where nRef==0 */ - PgHdr *pNextAll; /* A list of all pages */ - u8 inJournal; /* TRUE if has been written to journal */ - u8 dirty; /* TRUE if we need to write back changes */ - u8 needSync; /* Sync journal before writing this page */ - u8 alwaysRollback; /* Disable DontRollback() for this page */ - u8 needRead; /* Read content if PagerWrite() is called */ - short int nRef; /* Number of users of this page */ - PgHdr *pDirty, *pPrevDirty; /* Dirty pages */ - u32 notUsed; /* Buffer space */ -#ifdef SQLITE_CHECK_PAGES - u32 pageHash; -#endif - /* pPager->pageSize bytes of page data follow this header */ - /* Pager.nExtra bytes of local data follow the page data */ -}; - -/* -** For an in-memory only database, some extra information is recorded about -** each page so that changes can be rolled back. (Journal files are not -** used for in-memory databases.) The following information is added to -** the end of every EXTRA block for in-memory databases. -** -** This information could have been added directly to the PgHdr structure. -** But then it would take up an extra 8 bytes of storage on every PgHdr -** even for disk-based databases. Splitting it out saves 8 bytes. This -** is only a savings of 0.8% but those percentages add up. -*/ -typedef struct PgHistory PgHistory; -struct PgHistory { - u8 *pOrig; /* Original page text. Restore to this on a full rollback */ - u8 *pStmt; /* Text as it was at the beginning of the current statement */ - PgHdr *pNextStmt, *pPrevStmt; /* List of pages in the statement journal */ - u8 inStmt; /* TRUE if in the statement subjournal */ -}; - -/* ** A macro used for invoking the codec if there is one */ #ifdef SQLITE_HAS_CODEC -# define CODEC1(P,D,N,X) if( P->xCodec!=0 ){ P->xCodec(P->pCodecArg,D,N,X); } -# define CODEC2(P,D,N,X) ((char*)(P->xCodec!=0?P->xCodec(P->pCodecArg,D,N,X):D)) +# define CODEC1(P,D,N,X,E) \ + if( P->xCodec && P->xCodec(P->pCodec,D,N,X)==0 ){ E; } +# define CODEC2(P,D,N,X,E,O) \ + if( P->xCodec==0 ){ O=(char*)D; }else \ + if( (O=(char*)(P->xCodec(P->pCodec,D,N,X)))==0 ){ E; } #else -# define CODEC1(P,D,N,X) /* NO-OP */ -# define CODEC2(P,D,N,X) ((char*)D) +# define CODEC1(P,D,N,X,E) /* NO-OP */ +# define CODEC2(P,D,N,X,E,O) O=(char*)D #endif /* -** Convert a pointer to a PgHdr into a pointer to its data -** and back again. -*/ -#define PGHDR_TO_DATA(P) ((void*)(&(P)[1])) -#define DATA_TO_PGHDR(D) (&((PgHdr*)(D))[-1]) -#define PGHDR_TO_EXTRA(G,P) ((void*)&((char*)(&(G)[1]))[(P)->pageSize]) -#define PGHDR_TO_HIST(P,PGR) \ - ((PgHistory*)&((char*)(&(P)[1]))[(PGR)->pageSize+(PGR)->nExtra]) +** The maximum allowed sector size. 16MB. If the xSectorsize() method +** returns a value larger than this, then MAX_SECTOR_SIZE is used instead. +** This could conceivably cause corruption following a power failure on +** such a system. This is currently an undocumented limit. +*/ +#define MAX_SECTOR_SIZE 0x0100000 + +/* +** An instance of the following structure is allocated for each active +** savepoint and statement transaction in the system. All such structures +** are stored in the Pager.aSavepoint[] array, which is allocated and +** resized using sqlite3Realloc(). +** +** When a savepoint is created, the PagerSavepoint.iHdrOffset field is +** set to 0. If a journal-header is written into the main journal while +** the savepoint is active, then iHdrOffset is set to the byte offset +** immediately following the last journal record written into the main +** journal before the journal-header. This is required during savepoint +** rollback (see pagerPlaybackSavepoint()). +*/ +typedef struct PagerSavepoint PagerSavepoint; +struct PagerSavepoint { + i64 iOffset; /* Starting offset in main journal */ + i64 iHdrOffset; /* See above */ + Bitvec *pInSavepoint; /* Set of pages in this savepoint */ + Pgno nOrig; /* Original number of pages in file */ + Pgno iSubRec; /* Index of first record in sub-journal */ +}; /* ** A open page cache is an instance of the following structure. ** -** Pager.errCode may be set to SQLITE_IOERR, SQLITE_CORRUPT, or -** or SQLITE_FULL. Once one of the first three errors occurs, it persists -** and is returned as the result of every major pager API call. The -** SQLITE_FULL return code is slightly different. It persists only until the -** next successful rollback is performed on the pager cache. Also, -** SQLITE_FULL does not affect the sqlite3PagerGet() and sqlite3PagerLookup() -** APIs, they may still be used successfully. +** errCode +** +** Pager.errCode may be set to SQLITE_IOERR, SQLITE_CORRUPT, or +** or SQLITE_FULL. Once one of the first three errors occurs, it persists +** and is returned as the result of every major pager API call. The +** SQLITE_FULL return code is slightly different. It persists only until the +** next successful rollback is performed on the pager cache. Also, +** SQLITE_FULL does not affect the sqlite3PagerGet() and sqlite3PagerLookup() +** APIs, they may still be used successfully. +** +** dbSizeValid, dbSize, dbOrigSize, dbFileSize +** +** Managing the size of the database file in pages is a little complicated. +** The variable Pager.dbSize contains the number of pages that the database +** image currently contains. As the database image grows or shrinks this +** variable is updated. The variable Pager.dbFileSize contains the number +** of pages in the database file. This may be different from Pager.dbSize +** if some pages have been appended to the database image but not yet written +** out from the cache to the actual file on disk. Or if the image has been +** truncated by an incremental-vacuum operation. The Pager.dbOrigSize variable +** contains the number of pages in the database image when the current +** transaction was opened. The contents of all three of these variables is +** only guaranteed to be correct if the boolean Pager.dbSizeValid is true. +** +** TODO: Under what conditions is dbSizeValid set? Cleared? +** +** changeCountDone +** +** This boolean variable is used to make sure that the change-counter +** (the 4-byte header field at byte offset 24 of the database file) is +** not updated more often than necessary. +** +** It is set to true when the change-counter field is updated, which +** can only happen if an exclusive lock is held on the database file. +** It is cleared (set to false) whenever an exclusive lock is +** relinquished on the database file. Each time a transaction is committed, +** The changeCountDone flag is inspected. If it is true, the work of +** updating the change-counter is omitted for the current transaction. +** +** This mechanism means that when running in exclusive mode, a connection +** need only update the change-counter once, for the first transaction +** committed. +** +** dbModified +** +** The dbModified flag is set whenever a database page is dirtied. +** It is cleared at the end of each transaction. +** +** It is used when committing or otherwise ending a transaction. If +** the dbModified flag is clear then less work has to be done. +** +** journalStarted +** +** This flag is set whenever the the main journal is synced. +** +** The point of this flag is that it must be set after the +** first journal header in a journal file has been synced to disk. +** After this has happened, new pages appended to the database +** do not need the PGHDR_NEED_SYNC flag set, as they do not need +** to wait for a journal sync before they can be written out to +** the database file (see function pager_write()). +** +** setMaster +** +** This variable is used to ensure that the master journal file name +** (if any) is only written into the journal file once. +** +** When committing a transaction, the master journal file name (if any) +** may be written into the journal file while the pager is still in +** PAGER_RESERVED state (see CommitPhaseOne() for the action). It +** then attempts to upgrade to an exclusive lock. If this attempt +** fails, then SQLITE_BUSY may be returned to the user and the user +** may attempt to commit the transaction again later (calling +** CommitPhaseOne() again). This flag is used to ensure that the +** master journal name is only written to the journal file the first +** time CommitPhaseOne() is called. +** +** doNotSync +** +** This variable is set and cleared by sqlite3PagerWrite(). +** +** needSync +** +** TODO: It might be easier to set this variable in writeJournalHdr() +** and writeMasterJournal() only. Change its meaning to "unsynced data +** has been written to the journal". +** +** subjInMemory +** +** This is a boolean variable. If true, then any required sub-journal +** is opened as an in-memory journal file. If false, then in-memory +** sub-journals are only used for in-memory pager files. */ struct Pager { - u8 journalOpen; /* True if journal file descriptors is valid */ - u8 journalStarted; /* True if header of journal is synced */ + sqlite3_vfs *pVfs; /* OS functions to use for IO */ + u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ + u8 journalMode; /* On of the PAGER_JOURNALMODE_* values */ u8 useJournal; /* Use a rollback journal on this file */ u8 noReadlock; /* Do not bother to obtain readlocks */ - u8 stmtOpen; /* True if the statement subjournal is open */ - u8 stmtInUse; /* True we are in a statement subtransaction */ - u8 stmtAutoopen; /* Open stmt journal when main journal is opened*/ u8 noSync; /* Do not sync the journal if true */ u8 fullSync; /* Do extra syncs of the journal for robustness */ - u8 full_fsync; /* Use F_FULLFSYNC when available */ - u8 state; /* PAGER_UNLOCK, _SHARED, _RESERVED, etc. */ + u8 sync_flags; /* One of SYNC_NORMAL or SYNC_FULL */ u8 tempFile; /* zFilename is a temporary file */ u8 readOnly; /* True for a read-only database */ - u8 needSync; /* True if an fsync() is needed on the journal */ - u8 dirtyCache; /* True if cached pages have changed */ - u8 alwaysRollback; /* Disable DontRollback() for all pages */ u8 memDb; /* True to inhibit all file I/O */ + + /* The following block contains those class members that are dynamically + ** modified during normal operations. The other variables in this structure + ** are either constant throughout the lifetime of the pager, or else + ** used to store configuration parameters that affect the way the pager + ** operates. + ** + ** The 'state' variable is described in more detail along with the + ** descriptions of the values it may take - PAGER_UNLOCK etc. Many of the + ** other variables in this block are described in the comment directly + ** above this class definition. + */ + u8 state; /* PAGER_UNLOCK, _SHARED, _RESERVED, etc. */ + u8 dbModified; /* True if there are any changes to the Db */ + u8 needSync; /* True if an fsync() is needed on the journal */ + u8 journalStarted; /* True if header of journal is synced */ + u8 changeCountDone; /* Set after incrementing the change-counter */ u8 setMaster; /* True if a m-j name has been written to jrnl */ u8 doNotSync; /* Boolean. While true, do not spill the cache */ - u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ - u8 changeCountDone; /* Set after incrementing the change-counter */ + u8 dbSizeValid; /* Set when dbSize is correct */ + u8 subjInMemory; /* True to use in-memory sub-journals */ + Pgno dbSize; /* Number of pages in the database */ + Pgno dbOrigSize; /* dbSize before the current transaction */ + Pgno dbFileSize; /* Number of pages in the database file */ int errCode; /* One of several kinds of errors */ - int dbSize; /* Number of pages in the file */ - int origDbSize; /* dbSize before the current change */ - int stmtSize; /* Size of database (in pages) at stmt_begin() */ - int nRec; /* Number of pages written to the journal */ + int nRec; /* Pages journalled since last j-header written */ u32 cksumInit; /* Quasi-random value added to every checksum */ - int stmtNRec; /* Number of records in stmt subjournal */ - int nExtra; /* Add this many bytes to each in-memory page */ + u32 nSubRec; /* Number of records written to sub-journal */ + Bitvec *pInJournal; /* One bit for each page in the database file */ + sqlite3_file *fd; /* File descriptor for database */ + sqlite3_file *jfd; /* File descriptor for main journal */ + sqlite3_file *sjfd; /* File descriptor for sub-journal */ + i64 journalOff; /* Current write offset in the journal file */ + i64 journalHdr; /* Byte offset to previous journal header */ + PagerSavepoint *aSavepoint; /* Array of active savepoints */ + int nSavepoint; /* Number of elements in aSavepoint[] */ + char dbFileVers[16]; /* Changes whenever database file changes */ + u32 sectorSize; /* Assumed sector size during rollback */ + + u16 nExtra; /* Add this many bytes to each in-memory page */ + i16 nReserve; /* Number of unused bytes at end of each page */ + u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ int pageSize; /* Number of bytes in a page */ - int nPage; /* Total number of in-memory pages */ - int nRef; /* Number of in-memory pages with PgHdr.nRef>0 */ - int mxPage; /* Maximum number of pages to hold in cache */ Pgno mxPgno; /* Maximum allowed size of the database */ - u8 *aInJournal; /* One bit for each page in the database file */ - u8 *aInStmt; /* One bit for each page in the database */ char *zFilename; /* Name of the database file */ char *zJournal; /* Name of the journal file */ - char *zDirectory; /* Directory hold database and journal files */ - OsFile *fd, *jfd; /* File descriptors for database and journal */ - OsFile *stfd; /* File descriptor for the statement subjournal*/ - BusyHandler *pBusyHandler; /* Pointer to sqlite.busyHandler */ - PgHdr *pFirst, *pLast; /* List of free pages */ - PgHdr *pFirstSynced; /* First free page with PgHdr.needSync==0 */ - PgHdr *pAll; /* List of all pages */ - PgHdr *pStmt; /* List of pages in the statement subjournal */ - PgHdr *pDirty; /* List of all dirty pages */ - i64 journalOff; /* Current byte offset in the journal file */ - i64 journalHdr; /* Byte offset to previous journal header */ - i64 stmtHdrOff; /* First journal header written this statement */ - i64 stmtCksum; /* cksumInit when statement was started */ - i64 stmtJSize; /* Size of journal at stmt_begin() */ - int sectorSize; /* Assumed sector size during rollback */ + int (*xBusyHandler)(void*); /* Function to call when busy */ + void *pBusyHandlerArg; /* Context argument for xBusyHandler */ #ifdef SQLITE_TEST int nHit, nMiss; /* Cache hits and missing */ int nRead, nWrite; /* Database pages read/written */ #endif - void (*xDestructor)(DbPage*,int); /* Call this routine when freeing pages */ - void (*xReiniter)(DbPage*,int); /* Call this routine when reloading pages */ + void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ #ifdef SQLITE_HAS_CODEC void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ - void *pCodecArg; /* First argument to xCodec() */ -#endif - int nHash; /* Size of the pager hash table */ - PgHdr **aHash; /* Hash table to map page number to PgHdr */ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - Pager *pNext; /* Linked list of pagers in this thread */ + void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ + void (*xCodecFree)(void*); /* Destructor for the codec */ + void *pCodec; /* First argument to xCodec... methods */ #endif char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ - char dbFileVers[16]; /* Changes whenever database file changes */ + i64 journalSizeLimit; /* Size limit for persistent journal files */ + PCache *pPCache; /* Pointer to page cache object */ + sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */ }; /* @@ -378,7 +325,6 @@ int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ -int sqlite3_pager_pgfree_count = 0; /* Number of cache pages freed */ # define PAGER_INCR(v) v++ #else # define PAGER_INCR(v) @@ -391,7 +337,7 @@ ** was obtained from /dev/random. It is used only as a sanity check. ** ** Since version 2.8.0, the journal format contains additional sanity -** checking information. If the power fails while the journal is begin +** checking information. If the power fails while the journal is being ** written, semi-random garbage data might appear in the journal ** file after power is restored. If an attempt is then made ** to roll the journal back, the database could be corrupted. The additional @@ -414,15 +360,14 @@ }; /* -** The size of the header and of each page in the journal is determined -** by the following macros. +** The size of the of each page record in the journal is given by +** the following macro. */ #define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) /* -** The journal header size for this pager. In the future, this could be -** set to some value read from the disk controller. The important -** characteristic is that it is the same size as a disk sector. +** The journal header size for this pager. This is usually the same +** size as a single disk sector. See also setSectorSize(). */ #define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) @@ -439,84 +384,55 @@ #endif /* -** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is -** reserved for working around a windows/posix incompatibility). It is -** used in the journal to signify that the remainder of the journal file -** is devoted to storing a master journal name - there are no more pages to -** roll back. See comments for function writeMasterJournal() for details. -*/ -/* #define PAGER_MJ_PGNO(x) (PENDING_BYTE/((x)->pageSize)) */ -#define PAGER_MJ_PGNO(x) ((PENDING_BYTE/((x)->pageSize))+1) - -/* ** The maximum legal page number is (2^31 - 1). */ #define PAGER_MAX_PGNO 2147483647 +#ifndef NDEBUG /* -** Enable reference count tracking (for debugging) here: +** Usage: +** +** assert( assert_pager_state(pPager) ); */ -#ifdef SQLITE_DEBUG - int pager3_refinfo_enable = 0; - static void pager_refinfo(PgHdr *p){ - static int cnt = 0; - if( !pager3_refinfo_enable ) return; - sqlite3DebugPrintf( - "REFCNT: %4d addr=%p nRef=%-3d total=%d\n", - p->pgno, PGHDR_TO_DATA(p), p->nRef, p->pPager->nRef - ); - cnt++; /* Something to set a breakpoint on */ - } -# define REFINFO(X) pager_refinfo(X) -#else -# define REFINFO(X) +static int assert_pager_state(Pager *pPager){ + + /* A temp-file is always in PAGER_EXCLUSIVE or PAGER_SYNCED state. */ + assert( pPager->tempFile==0 || pPager->state>=PAGER_EXCLUSIVE ); + + /* The changeCountDone flag is always set for temp-files */ + assert( pPager->tempFile==0 || pPager->changeCountDone ); + + return 1; +} #endif /* -** Return true if page *pPg has already been written to the statement -** journal (or statement snapshot has been created, if *pPg is part -** of an in-memory database). +** Return true if it is necessary to write page *pPg into the sub-journal. +** A page needs to be written into the sub-journal if there exists one +** or more open savepoints for which: +** +** * The page-number is less than or equal to PagerSavepoint.nOrig, and +** * The bit corresponding to the page-number is not set in +** PagerSavepoint.pInSavepoint. */ -static int pageInStatement(PgHdr *pPg){ +static int subjRequiresPage(PgHdr *pPg){ + Pgno pgno = pPg->pgno; Pager *pPager = pPg->pPager; - if( MEMDB ){ - return PGHDR_TO_HIST(pPg, pPager)->inStmt; - }else{ - Pgno pgno = pPg->pgno; - u8 *a = pPager->aInStmt; - return (a && (int)pgno<=pPager->stmtSize && (a[pgno/8] & (1<<(pgno&7)))); + int i; + for(i=0; inSavepoint; i++){ + PagerSavepoint *p = &pPager->aSavepoint[i]; + if( p->nOrig>=pgno && 0==sqlite3BitvecTest(p->pInSavepoint, pgno) ){ + return 1; + } } + return 0; } /* -** Change the size of the pager hash table to N. N must be a power -** of two. -*/ -static void pager_resize_hash_table(Pager *pPager, int N){ - PgHdr **aHash, *pPg; - assert( N>0 && (N&(N-1))==0 ); - aHash = sqliteMalloc( sizeof(aHash[0])*N ); - if( aHash==0 ){ - /* Failure to rehash is not an error. It is only a performance hit. */ - return; - } - sqliteFree(pPager->aHash); - pPager->nHash = N; - pPager->aHash = aHash; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - int h; - if( pPg->pgno==0 ){ - assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); - continue; - } - h = pPg->pgno & (N-1); - pPg->pNextHash = aHash[h]; - if( aHash[h] ){ - aHash[h]->pPrevHash = pPg; - } - aHash[h] = pPg; - pPg->pPrevHash = 0; - } +** Return true if the page is already in the journal file. +*/ +static int pageInJournal(PgHdr *pPg){ + return sqlite3BitvecTest(pPg->pPager->pInJournal, pPg->pgno); } /* @@ -526,9 +442,9 @@ ** ** All values are stored on disk as big-endian. */ -static int read32bits(OsFile *fd, u32 *pRes){ +static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){ unsigned char ac[4]; - int rc = sqlite3OsRead(fd, ac, sizeof(ac)); + int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset); if( rc==SQLITE_OK ){ *pRes = sqlite3Get4byte(ac); } @@ -544,38 +460,76 @@ ** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK ** on success or an error code is something goes wrong. */ -static int write32bits(OsFile *fd, u32 val){ +static int write32bits(sqlite3_file *fd, i64 offset, u32 val){ char ac[4]; put32bits(ac, val); - return sqlite3OsWrite(fd, ac, 4); + return sqlite3OsWrite(fd, ac, 4, offset); } /* -** This function should be called when an error occurs within the pager -** code. The first argument is a pointer to the pager structure, the -** second the error-code about to be returned by a pager API function. -** The value returned is a copy of the second argument to this function. +** The argument to this macro is a file descriptor (type sqlite3_file*). +** Return 0 if it is not open, or non-zero (but not 1) if it is. ** -** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL -** the error becomes persistent. All subsequent API calls on this Pager -** will immediately return the same error code. +** This is so that expressions can be written as: +** +** if( isOpen(pPager->jfd) ){ ... +** +** instead of +** +** if( pPager->jfd->pMethods ){ ... */ -static int pager_error(Pager *pPager, int rc){ - int rc2 = rc & 0xff; - assert( - pPager->errCode==SQLITE_FULL || - pPager->errCode==SQLITE_OK || - (pPager->errCode & 0xff)==SQLITE_IOERR - ); - if( - rc2==SQLITE_FULL || - rc2==SQLITE_IOERR || - rc2==SQLITE_CORRUPT - ){ - pPager->errCode = rc; +#define isOpen(pFd) ((pFd)->pMethods) + +/* +** If file pFd is open, call sqlite3OsUnlock() on it. +*/ +static int osUnlock(sqlite3_file *pFd, int eLock){ + if( !isOpen(pFd) ){ + return SQLITE_OK; } - return rc; + return sqlite3OsUnlock(pFd, eLock); +} + +/* +** This function determines whether or not the atomic-write optimization +** can be used with this pager. The optimization can be used if: +** +** (a) the value returned by OsDeviceCharacteristics() indicates that +** a database page may be written atomically, and +** (b) the value returned by OsSectorSize() is less than or equal +** to the page size. +** +** The optimization is also always enabled for temporary files. It is +** an error to call this function if pPager is opened on an in-memory +** database. +** +** If the optimization cannot be used, 0 is returned. If it can be used, +** then the value returned is the size of the journal file when it +** contains rollback data for exactly one page. +*/ +#ifdef SQLITE_ENABLE_ATOMIC_WRITE +static int jrnlBufferSize(Pager *pPager){ + assert( !MEMDB ); + if( !pPager->tempFile ){ + int dc; /* Device characteristics */ + int nSector; /* Sector size */ + int szPage; /* Page size */ + + assert( isOpen(pPager->fd) ); + dc = sqlite3OsDeviceCharacteristics(pPager->fd); + nSector = pPager->sectorSize; + szPage = pPager->pageSize; + + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + if( 0==(dc&(SQLITE_IOCAP_ATOMIC|(szPage>>8)) || nSector>szPage) ){ + return 0; + } + } + + return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); } +#endif /* ** If SQLITE_CHECK_PAGES is defined then we do some sanity checking @@ -595,8 +549,10 @@ return hash; } static u32 pager_pagehash(PgHdr *pPage){ - return pager_datahash(pPage->pPager->pageSize, - (unsigned char *)PGHDR_TO_DATA(pPage)); + return pager_datahash(pPage->pPager->pageSize, (unsigned char *)pPage->pData); +} +static void pager_set_pagehash(PgHdr *pPage){ + pPage->pageHash = pager_pagehash(pPage); } /* @@ -607,68 +563,65 @@ #define CHECK_PAGE(x) checkPage(x) static void checkPage(PgHdr *pPg){ Pager *pPager = pPg->pPager; - assert( !pPg->pageHash || pPager->errCode || MEMDB || pPg->dirty || - pPg->pageHash==pager_pagehash(pPg) ); + assert( !pPg->pageHash || pPager->errCode + || (pPg->flags&PGHDR_DIRTY) || pPg->pageHash==pager_pagehash(pPg) ); } #else #define pager_datahash(X,Y) 0 #define pager_pagehash(X) 0 #define CHECK_PAGE(x) -#endif +#endif /* SQLITE_CHECK_PAGES */ /* ** When this is called the journal file for pager pPager must be open. -** The master journal file name is read from the end of the file and -** written into memory obtained from sqliteMalloc(). *pzMaster is -** set to point at the memory and SQLITE_OK returned. The caller must -** sqliteFree() *pzMaster. -** -** If no master journal file name is present *pzMaster is set to 0 and -** SQLITE_OK returned. -*/ -static int readMasterJournal(OsFile *pJrnl, char **pzMaster){ - int rc; - u32 len; - i64 szJ; - u32 cksum; - int i; - unsigned char aMagic[8]; /* A buffer to hold the magic header */ - - *pzMaster = 0; - - rc = sqlite3OsFileSize(pJrnl, &szJ); - if( rc!=SQLITE_OK || szJ<16 ) return rc; - - rc = sqlite3OsSeek(pJrnl, szJ-16); - if( rc!=SQLITE_OK ) return rc; - - rc = read32bits(pJrnl, &len); - if( rc!=SQLITE_OK ) return rc; - - rc = read32bits(pJrnl, &cksum); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3OsRead(pJrnl, aMagic, 8); - if( rc!=SQLITE_OK || memcmp(aMagic, aJournalMagic, 8) ) return rc; - - rc = sqlite3OsSeek(pJrnl, szJ-16-len); - if( rc!=SQLITE_OK ) return rc; - - *pzMaster = (char *)sqliteMalloc(len+1); - if( !*pzMaster ){ - return SQLITE_NOMEM; - } - rc = sqlite3OsRead(pJrnl, *pzMaster, len); - if( rc!=SQLITE_OK ){ - sqliteFree(*pzMaster); - *pzMaster = 0; +** This function attempts to read a master journal file name from the +** end of the file and, if successful, copies it into memory supplied +** by the caller. See comments above writeMasterJournal() for the format +** used to store a master journal file name at the end of a journal file. +** +** zMaster must point to a buffer of at least nMaster bytes allocated by +** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is +** enough space to write the master journal name). If the master journal +** name in the journal is longer than nMaster bytes (including a +** nul-terminator), then this is handled as if no master journal name +** were present in the journal. +** +** If a master journal file name is present at the end of the journal +** file, then it is copied into the buffer pointed to by zMaster. A +** nul-terminator byte is appended to the buffer following the master +** journal file name. +** +** If it is determined that no master journal file name is present +** zMaster[0] is set to 0 and SQLITE_OK returned. +** +** If an error occurs while reading from the journal file, an SQLite +** error code is returned. +*/ +static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, u32 nMaster){ + int rc; /* Return code */ + u32 len; /* Length in bytes of master journal name */ + i64 szJ; /* Total size in bytes of journal file pJrnl */ + u32 cksum; /* MJ checksum value read from journal */ + u32 u; /* Unsigned loop counter */ + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + zMaster[0] = '\0'; + + if( SQLITE_OK!=(rc = sqlite3OsFileSize(pJrnl, &szJ)) + || szJ<16 + || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len)) + || len>=nMaster + || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum)) + || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8)) + || memcmp(aMagic, aJournalMagic, 8) + || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, zMaster, len, szJ-16-len)) + ){ return rc; } /* See if the checksum matches the master journal name */ - for(i=0; ijournalOff, assuming a sector +** size of pPager->sectorSize bytes. ** ** i.e for a sector size of 512: ** -** Input Offset Output Offset -** --------------------------------------- -** 0 0 -** 512 512 -** 100 512 -** 2000 2048 +** Pager.journalOff Return value +** --------------------------------------- +** 0 0 +** 512 512 +** 100 512 +** 2000 2048 ** */ -static int seekJournalHdr(Pager *pPager){ +static i64 journalHdrOffset(Pager *pPager){ i64 offset = 0; i64 c = pPager->journalOff; if( c ){ @@ -709,8 +660,62 @@ assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); assert( offset>=c ); assert( (offset-c)journalOff = offset; - return sqlite3OsSeek(pPager->jfd, pPager->journalOff); + return offset; +} + +/* +** The journal file must be open when this function is called. +** +** This function is a no-op if the journal file has not been written to +** within the current transaction (i.e. if Pager.journalOff==0). +** +** If doTruncate is non-zero or the Pager.journalSizeLimit variable is +** set to 0, then truncate the journal file to zero bytes in size. Otherwise, +** zero the 28-byte header at the start of the journal file. In either case, +** if the pager is not in no-sync mode, sync the journal file immediately +** after writing or truncating it. +** +** If Pager.journalSizeLimit is set to a positive, non-zero value, and +** following the truncation or zeroing described above the size of the +** journal file in bytes is larger than this value, then truncate the +** journal file to Pager.journalSizeLimit bytes. The journal file does +** not need to be synced following this operation. +** +** If an IO error occurs, abandon processing and return the IO error code. +** Otherwise, return SQLITE_OK. +*/ +static int zeroJournalHdr(Pager *pPager, int doTruncate){ + int rc = SQLITE_OK; /* Return code */ + assert( isOpen(pPager->jfd) ); + if( pPager->journalOff ){ + const i64 iLimit = pPager->journalSizeLimit; /* Local cache of jsl */ + + IOTRACE(("JZEROHDR %p\n", pPager)) + if( doTruncate || iLimit==0 ){ + rc = sqlite3OsTruncate(pPager->jfd, 0); + }else{ + static const char zeroHdr[28] = {0}; + rc = sqlite3OsWrite(pPager->jfd, zeroHdr, sizeof(zeroHdr), 0); + } + if( rc==SQLITE_OK && !pPager->noSync ){ + rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_DATAONLY|pPager->sync_flags); + } + + /* At this point the transaction is committed but the write lock + ** is still held on the file. If there is a size limit configured for + ** the persistent journal and the journal file currently consumes more + ** space than that limit allows for, truncate it now. There is no need + ** to sync the file following this operation. + */ + if( rc==SQLITE_OK && iLimit>0 ){ + i64 sz; + rc = sqlite3OsFileSize(pPager->jfd, &sz); + if( rc==SQLITE_OK && sz>iLimit ){ + rc = sqlite3OsTruncate(pPager->jfd, iLimit); + } + } + } + return rc; } /* @@ -724,116 +729,225 @@ ** - 4 bytes: Random number used for page hash. ** - 4 bytes: Initial database page count. ** - 4 bytes: Sector size used by the process that wrote this journal. +** - 4 bytes: Database page size. ** -** Followed by (JOURNAL_HDR_SZ - 24) bytes of unused space. +** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space. */ static int writeJournalHdr(Pager *pPager){ - char zHeader[sizeof(aJournalMagic)+16]; - int rc; + int rc = SQLITE_OK; /* Return code */ + char *zHeader = pPager->pTmpSpace; /* Temporary space used to build header */ + u32 nHeader = pPager->pageSize; /* Size of buffer pointed to by zHeader */ + u32 nWrite; /* Bytes of header sector written */ + int ii; /* Loop counter */ - if( pPager->stmtHdrOff==0 ){ - pPager->stmtHdrOff = pPager->journalOff; + assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ + + if( nHeader>JOURNAL_HDR_SZ(pPager) ){ + nHeader = JOURNAL_HDR_SZ(pPager); } - rc = seekJournalHdr(pPager); - if( rc ) return rc; + /* If there are active savepoints and any of them were created + ** since the most recent journal header was written, update the + ** PagerSavepoint.iHdrOffset fields now. + */ + for(ii=0; iinSavepoint; ii++){ + if( pPager->aSavepoint[ii].iHdrOffset==0 ){ + pPager->aSavepoint[ii].iHdrOffset = pPager->journalOff; + } + } - pPager->journalHdr = pPager->journalOff; - pPager->journalOff += JOURNAL_HDR_SZ(pPager); + pPager->journalHdr = pPager->journalOff = journalHdrOffset(pPager); - /* FIX ME: + /* + ** Write the nRec Field - the number of page records that follow this + ** journal header. Normally, zero is written to this value at this time. + ** After the records are added to the journal (and the journal synced, + ** if in full-sync mode), the zero is overwritten with the true number + ** of records (see syncJournal()). + ** + ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When + ** reading the journal this value tells SQLite to assume that the + ** rest of the journal file contains valid page records. This assumption + ** is dangerous, as if a failure occurred whilst writing to the journal + ** file it may contain some garbage data. There are two scenarios + ** where this risk can be ignored: ** - ** Possibly for a pager not in no-sync mode, the journal magic should not - ** be written until nRec is filled in as part of next syncJournal(). + ** * When the pager is in no-sync mode. Corruption can follow a + ** power failure in this case anyway. ** - ** Actually maybe the whole journal header should be delayed until that - ** point. Think about this. + ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees + ** that garbage data is never appended to the journal file. */ - memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); - /* The nRec Field. 0xFFFFFFFF for no-sync journals. */ - put32bits(&zHeader[sizeof(aJournalMagic)], pPager->noSync ? 0xffffffff : 0); + assert( isOpen(pPager->fd) || pPager->noSync ); + if( (pPager->noSync) || (pPager->journalMode==PAGER_JOURNALMODE_MEMORY) + || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) + ){ + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff); + }else{ + zHeader[0] = '\0'; + put32bits(&zHeader[sizeof(aJournalMagic)], 0); + } + /* The random check-hash initialiser */ - sqlite3Randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); /* The initial database size */ - put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbSize); + put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); /* The assumed sector size for this process */ put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); - IOTRACE(("JHDR %p %lld %d\n", pPager, pPager->journalHdr, sizeof(zHeader))) - rc = sqlite3OsWrite(pPager->jfd, zHeader, sizeof(zHeader)); - /* The journal header has been written successfully. Seek the journal - ** file descriptor to the end of the journal header sector. - */ - if( rc==SQLITE_OK ){ - IOTRACE(("JTAIL %p %lld\n", pPager, pPager->journalOff-1)) - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff-1); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->jfd, "\000", 1); - } + /* The page size */ + put32bits(&zHeader[sizeof(aJournalMagic)+16], pPager->pageSize); + + /* Initializing the tail of the buffer is not necessary. Everything + ** works find if the following memset() is omitted. But initializing + ** the memory prevents valgrind from complaining, so we are willing to + ** take the performance hit. + */ + memset(&zHeader[sizeof(aJournalMagic)+20], 0, + nHeader-(sizeof(aJournalMagic)+20)); + + /* In theory, it is only necessary to write the 28 bytes that the + ** journal header consumes to the journal file here. Then increment the + ** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next + ** record is written to the following sector (leaving a gap in the file + ** that will be implicitly filled in by the OS). + ** + ** However it has been discovered that on some systems this pattern can + ** be significantly slower than contiguously writing data to the file, + ** even if that means explicitly writing data to the block of + ** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what + ** is done. + ** + ** The loop is required here in case the sector-size is larger than the + ** database page size. Since the zHeader buffer is only Pager.pageSize + ** bytes in size, more than one call to sqlite3OsWrite() may be required + ** to populate the entire journal header sector. + */ + for(nWrite=0; rc==SQLITE_OK&&nWritejournalHdr, nHeader)) + rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff); + pPager->journalOff += nHeader; } + return rc; } /* ** The journal file must be open when this is called. A journal header file ** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal -** file. See comments above function writeJournalHdr() for a description of -** the journal header format. +** file. The current location in the journal file is given by +** pPager->journalOff. See comments above function writeJournalHdr() for +** a description of the journal header format. ** -** If the header is read successfully, *nRec is set to the number of -** page records following this header and *dbSize is set to the size of the +** If the header is read successfully, *pNRec is set to the number of +** page records following this header and *pDbSize is set to the size of the ** database before the transaction began, in pages. Also, pPager->cksumInit ** is set to the value read from the journal header. SQLITE_OK is returned ** in this case. ** ** If the journal header file appears to be corrupted, SQLITE_DONE is -** returned and *nRec and *dbSize are not set. If JOURNAL_HDR_SZ bytes +** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes ** cannot be read from the journal file an error code is returned. */ static int readJournalHdr( - Pager *pPager, - i64 journalSize, - u32 *pNRec, - u32 *pDbSize + Pager *pPager, /* Pager object */ + int isHot, + i64 journalSize, /* Size of the open journal file in bytes */ + u32 *pNRec, /* OUT: Value read from the nRec field */ + u32 *pDbSize /* OUT: Value of original database size field */ ){ - int rc; - unsigned char aMagic[8]; /* A buffer to hold the magic header */ + int rc; /* Return code */ + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + i64 iHdrOff; /* Offset of journal header being read */ - rc = seekJournalHdr(pPager); - if( rc ) return rc; + assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ + /* Advance Pager.journalOff to the start of the next sector. If the + ** journal file is too small for there to be a header stored at this + ** point, return SQLITE_DONE. + */ + pPager->journalOff = journalHdrOffset(pPager); if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ return SQLITE_DONE; } + iHdrOff = pPager->journalOff; - rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic)); - if( rc ) return rc; - - if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ - return SQLITE_DONE; + /* Read in the first 8 bytes of the journal header. If they do not match + ** the magic string found at the start of each journal header, return + ** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise, + ** proceed. + */ + if( isHot || iHdrOff!=pPager->journalHdr ){ + rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), iHdrOff); + if( rc ){ + return rc; + } + if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ + return SQLITE_DONE; + } } - rc = read32bits(pPager->jfd, pNRec); - if( rc ) return rc; + /* Read the first three 32-bit fields of the journal header: The nRec + ** field, the checksum-initializer and the database size at the start + ** of the transaction. Return an error code if anything goes wrong. + */ + if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+8, pNRec)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+12, &pPager->cksumInit)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+16, pDbSize)) + ){ + return rc; + } - rc = read32bits(pPager->jfd, &pPager->cksumInit); - if( rc ) return rc; + if( pPager->journalOff==0 ){ + u32 iPageSize; /* Page-size field of journal header */ + u32 iSectorSize; /* Sector-size field of journal header */ + u16 iPageSize16; /* Copy of iPageSize in 16-bit variable */ + + /* Read the page-size and sector-size journal header fields. */ + if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+20, &iSectorSize)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+24, &iPageSize)) + ){ + return rc; + } - rc = read32bits(pPager->jfd, pDbSize); - if( rc ) return rc; + /* Check that the values read from the page-size and sector-size fields + ** are within range. To be 'in range', both values need to be a power + ** of two greater than or equal to 512, and not greater than their + ** respective compile time maximum limits. + */ + if( iPageSize<512 || iSectorSize<512 + || iPageSize>SQLITE_MAX_PAGE_SIZE || iSectorSize>MAX_SECTOR_SIZE + || ((iPageSize-1)&iPageSize)!=0 || ((iSectorSize-1)&iSectorSize)!=0 + ){ + /* If the either the page-size or sector-size in the journal-header is + ** invalid, then the process that wrote the journal-header must have + ** crashed before the header was synced. In this case stop reading + ** the journal file here. + */ + return SQLITE_DONE; + } - /* Update the assumed sector-size to match the value used by - ** the process that created this journal. If this journal was - ** created by a process other than this one, then this routine - ** is being called from within pager_playback(). The local value - ** of Pager.sectorSize is restored at the end of that routine. - */ - rc = read32bits(pPager->jfd, (u32 *)&pPager->sectorSize); - if( rc ) return rc; + /* Update the page-size to match the value read from the journal. + ** Use a testcase() macro to make sure that malloc failure within + ** PagerSetPagesize() is tested. + */ + iPageSize16 = (u16)iPageSize; + rc = sqlite3PagerSetPagesize(pPager, &iPageSize16, -1); + testcase( rc!=SQLITE_OK ); + assert( rc!=SQLITE_OK || iPageSize16==(u16)iPageSize ); + + /* Update the assumed sector-size to match the value used by + ** the process that created this journal. If this journal was + ** created by a process other than this one, then this routine + ** is being called from within pager_playback(). The local value + ** of Pager.sectorSize is restored at the end of that routine. + */ + pPager->sectorSize = iSectorSize; + } pPager->journalOff += JOURNAL_HDR_SZ(pPager); - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); return rc; } @@ -845,31 +959,37 @@ ** journal file descriptor is advanced to the next sector boundary before ** anything is written. The format is: ** -** + 4 bytes: PAGER_MJ_PGNO. -** + N bytes: length of master journal name. -** + 4 bytes: N -** + 4 bytes: Master journal name checksum. -** + 8 bytes: aJournalMagic[]. +** + 4 bytes: PAGER_MJ_PGNO. +** + N bytes: Master journal filename in utf-8. +** + 4 bytes: N (length of master journal name in bytes, no nul-terminator). +** + 4 bytes: Master journal name checksum. +** + 8 bytes: aJournalMagic[]. ** ** The master journal page checksum is the sum of the bytes in the master -** journal name. +** journal name, where each byte is interpreted as a signed 8-bit integer. ** ** If zMaster is a NULL pointer (occurs for a single database transaction), ** this call is a no-op. */ static int writeMasterJournal(Pager *pPager, const char *zMaster){ - int rc; - int len; - int i; - u32 cksum = 0; - char zBuf[sizeof(aJournalMagic)+2*4]; - - if( !zMaster || pPager->setMaster) return SQLITE_OK; + int rc; /* Return code */ + int nMaster; /* Length of string zMaster */ + i64 iHdrOff; /* Offset of header in journal file */ + i64 jrnlSize; /* Size of journal file on disk */ + u32 cksum = 0; /* Checksum of string zMaster */ + + if( !zMaster || pPager->setMaster + || pPager->journalMode==PAGER_JOURNALMODE_MEMORY + || pPager->journalMode==PAGER_JOURNALMODE_OFF + ){ + return SQLITE_OK; + } pPager->setMaster = 1; + assert( isOpen(pPager->jfd) ); - len = strlen(zMaster); - for(i=0; ifullSync ){ - rc = seekJournalHdr(pPager); - if( rc!=SQLITE_OK ) return rc; + pPager->journalOff = journalHdrOffset(pPager); } - pPager->journalOff += (len+20); + iHdrOff = pPager->journalOff; - rc = write32bits(pPager->jfd, PAGER_MJ_PGNO(pPager)); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3OsWrite(pPager->jfd, zMaster, len); - if( rc!=SQLITE_OK ) return rc; - - put32bits(zBuf, len); - put32bits(&zBuf[4], cksum); - memcpy(&zBuf[8], aJournalMagic, sizeof(aJournalMagic)); - rc = sqlite3OsWrite(pPager->jfd, zBuf, 8+sizeof(aJournalMagic)); + /* Write the master journal data to the end of the journal file. If + ** an error occurs, return the error code to the caller. + */ + if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager)))) + || (0 != (rc = sqlite3OsWrite(pPager->jfd, zMaster, nMaster, iHdrOff+4))) + || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster, nMaster))) + || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster+4, cksum))) + || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8, iHdrOff+4+nMaster+8))) + ){ + return rc; + } + pPager->journalOff += (nMaster+20); pPager->needSync = !pPager->noSync; - return rc; -} -/* -** Add or remove a page from the list of all pages that are in the -** statement journal. -** -** The Pager keeps a separate list of pages that are currently in -** the statement journal. This helps the sqlite3PagerStmtCommit() -** routine run MUCH faster for the common case where there are many -** pages in memory but only a few are in the statement journal. -*/ -static void page_add_to_stmt_list(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( MEMDB ); - if( !pHist->inStmt ){ - assert( pHist->pPrevStmt==0 && pHist->pNextStmt==0 ); - if( pPager->pStmt ){ - PGHDR_TO_HIST(pPager->pStmt, pPager)->pPrevStmt = pPg; - } - pHist->pNextStmt = pPager->pStmt; - pPager->pStmt = pPg; - pHist->inStmt = 1; + /* If the pager is in peristent-journal mode, then the physical + ** journal-file may extend past the end of the master-journal name + ** and 8 bytes of magic data just written to the file. This is + ** dangerous because the code to rollback a hot-journal file + ** will not be able to find the master-journal name to determine + ** whether or not the journal is hot. + ** + ** Easiest thing to do in this scenario is to truncate the journal + ** file to the required size. + */ + if( SQLITE_OK==(rc = sqlite3OsFileSize(pPager->jfd, &jrnlSize)) + && jrnlSize>pPager->journalOff + ){ + rc = sqlite3OsTruncate(pPager->jfd, pPager->journalOff); } + return rc; } /* -** Find a page in the hash table given its page number. Return -** a pointer to the page or NULL if not found. +** Find a page in the hash table given its page number. Return +** a pointer to the page or NULL if the requested page is not +** already in memory. */ static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ - PgHdr *p; - if( pPager->aHash==0 ) return 0; - p = pPager->aHash[pgno & (pPager->nHash-1)]; - while( p && p->pgno!=pgno ){ - p = p->pNextHash; - } + PgHdr *p; /* Return value */ + + /* It is not possible for a call to PcacheFetch() with createFlag==0 to + ** fail, since no attempt to allocate dynamic memory will be made. + */ + (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &p); return p; } /* -** Unlock the database file. +** Unless the pager is in error-state, discard all in-memory pages. If +** the pager is in error-state, then this call is a no-op. +** +** TODO: Why can we not reset the pager while in error state? */ -static void pager_unlock(Pager *pPager){ - if( !pPager->exclusiveMode ){ - if( !MEMDB ){ - sqlite3OsUnlock(pPager->fd, NO_LOCK); - pPager->dbSize = -1; - IOTRACE(("UNLOCK %p\n", pPager)) - } - pPager->state = PAGER_UNLOCK; - pPager->changeCountDone = 0; +static void pager_reset(Pager *pPager){ + if( SQLITE_OK==pPager->errCode ){ + sqlite3BackupRestart(pPager->pBackup); + sqlite3PcacheClear(pPager->pPCache); + pPager->dbSizeValid = 0; } } /* -** Execute a rollback if a transaction is active and unlock the -** database file. This is a no-op if the pager has already entered -** the error-state. +** Free all structures in the Pager.aSavepoint[] array and set both +** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal +** if it is open and the pager is not in exclusive mode. */ -static void pagerUnlockAndRollback(Pager *p){ - if( p->errCode ) return; - assert( p->state>=PAGER_RESERVED || p->journalOpen==0 ); - if( p->state>=PAGER_RESERVED ){ - sqlite3PagerRollback(p); +static void releaseAllSavepoints(Pager *pPager){ + int ii; /* Iterator for looping through Pager.aSavepoint */ + for(ii=0; iinSavepoint; ii++){ + sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); + } + if( !pPager->exclusiveMode || sqlite3IsMemJournal(pPager->sjfd) ){ + sqlite3OsClose(pPager->sjfd); } - pager_unlock(p); - assert( p->errCode || !p->journalOpen || (p->exclusiveMode&&!p->journalOff) ); - assert( p->errCode || !p->stmtOpen || p->exclusiveMode ); + sqlite3_free(pPager->aSavepoint); + pPager->aSavepoint = 0; + pPager->nSavepoint = 0; + pPager->nSubRec = 0; } - /* -** Clear the in-memory cache. This routine -** sets the state of the pager back to what it was when it was first -** opened. Any outstanding pages are invalidated and subsequent attempts -** to access those pages will likely result in a coredump. +** Set the bit number pgno in the PagerSavepoint.pInSavepoint +** bitvecs of all open savepoints. Return SQLITE_OK if successful +** or SQLITE_NOMEM if a malloc failure occurs. */ -static void pager_reset(Pager *pPager){ - PgHdr *pPg, *pNext; - if( pPager->errCode ) return; - for(pPg=pPager->pAll; pPg; pPg=pNext){ - IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - pNext = pPg->pNextAll; - sqliteFree(pPg); - } - pPager->pStmt = 0; - pPager->pFirst = 0; - pPager->pFirstSynced = 0; - pPager->pLast = 0; - pPager->pAll = 0; - pPager->nHash = 0; - sqliteFree(pPager->aHash); - pPager->nPage = 0; - pPager->aHash = 0; - pPager->nRef = 0; +static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ + int ii; /* Loop counter */ + int rc = SQLITE_OK; /* Result code */ + + for(ii=0; iinSavepoint; ii++){ + PagerSavepoint *p = &pPager->aSavepoint[ii]; + if( pgno<=p->nOrig ){ + rc |= sqlite3BitvecSet(p->pInSavepoint, pgno); + testcase( rc==SQLITE_NOMEM ); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + } + } + return rc; } /* -** This routine ends a transaction. A transaction is ended by either -** a COMMIT or a ROLLBACK. -** -** When this routine is called, the pager has the journal file open and -** a RESERVED or EXCLUSIVE lock on the database. This routine will release -** the database lock and acquires a SHARED lock in its place if that is -** the appropriate thing to do. Release locks usually is appropriate, -** unless we are in exclusive access mode or unless this is a -** COMMIT AND BEGIN or ROLLBACK AND BEGIN operation. -** -** The journal file is either deleted or truncated. -** -** TODO: Consider keeping the journal file open for temporary databases. -** This might give a performance improvement on windows where opening -** a file is an expensive operation. +** Unlock the database file. This function is a no-op if the pager +** is in exclusive mode. +** +** If the pager is currently in error state, discard the contents of +** the cache and reset the Pager structure internal state. If there is +** an open journal-file, then the next time a shared-lock is obtained +** on the pager file (by this or any other process), it will be +** treated as a hot-journal and rolled back. */ -static int pager_end_transaction(Pager *pPager){ - PgHdr *pPg; - int rc = SQLITE_OK; - int rc2 = SQLITE_OK; - assert( !MEMDB ); - if( pPager->statestmtOpen && !pPager->exclusiveMode ){ - sqlite3OsClose(&pPager->stfd); - pPager->stmtOpen = 0; - } - if( pPager->journalOpen ){ - if( pPager->exclusiveMode - && (rc = sqlite3OsTruncate(pPager->jfd, 0))==SQLITE_OK ){; - sqlite3OsSeek(pPager->jfd, 0); - pPager->journalOff = 0; - pPager->journalStarted = 0; - }else{ - sqlite3OsClose(&pPager->jfd); - pPager->journalOpen = 0; +static void pager_unlock(Pager *pPager){ + if( !pPager->exclusiveMode ){ + int rc; /* Return code */ + + /* Always close the journal file when dropping the database lock. + ** Otherwise, another connection with journal_mode=delete might + ** delete the file out from under us. + */ + sqlite3OsClose(pPager->jfd); + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + releaseAllSavepoints(pPager); + + /* If the file is unlocked, somebody else might change it. The + ** values stored in Pager.dbSize etc. might become invalid if + ** this happens. TODO: Really, this doesn't need to be cleared + ** until the change-counter check fails in pagerSharedLock(). + */ + pPager->dbSizeValid = 0; + + rc = osUnlock(pPager->fd, NO_LOCK); + if( rc ){ + pPager->errCode = rc; + } + IOTRACE(("UNLOCK %p\n", pPager)) + + /* If Pager.errCode is set, the contents of the pager cache cannot be + ** trusted. Now that the pager file is unlocked, the contents of the + ** cache can be discarded and the error code safely cleared. + */ + if( pPager->errCode ){ if( rc==SQLITE_OK ){ - rc = sqlite3OsDelete(pPager->zJournal); + pPager->errCode = SQLITE_OK; } + pager_reset(pPager); } - sqliteFree( pPager->aInJournal ); - pPager->aInJournal = 0; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - pPg->inJournal = 0; - pPg->dirty = 0; - pPg->needSync = 0; - pPg->alwaysRollback = 0; -#ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); -#endif - } - pPager->pDirty = 0; - pPager->dirtyCache = 0; - pPager->nRec = 0; - }else{ - assert( pPager->aInJournal==0 ); - assert( pPager->dirtyCache==0 || pPager->useJournal==0 ); - } - if( !pPager->exclusiveMode ){ - rc2 = sqlite3OsUnlock(pPager->fd, SHARED_LOCK); + pPager->changeCountDone = 0; + pPager->state = PAGER_UNLOCK; + } +} + +/* +** This function should be called when an IOERR, CORRUPT or FULL error +** may have occurred. The first argument is a pointer to the pager +** structure, the second the error-code about to be returned by a pager +** API function. The value returned is a copy of the second argument +** to this function. +** +** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL +** the error becomes persistent. Until the persisten error is cleared, +** subsequent API calls on this Pager will immediately return the same +** error code. +** +** A persistent error indicates that the contents of the pager-cache +** cannot be trusted. This state can be cleared by completely discarding +** the contents of the pager-cache. If a transaction was active when +** the persistent error occurred, then the rollback journal may need +** to be replayed to restore the contents of the database file (as if +** it were a hot-journal). +*/ +static int pager_error(Pager *pPager, int rc){ + int rc2 = rc & 0xff; + assert( + pPager->errCode==SQLITE_FULL || + pPager->errCode==SQLITE_OK || + (pPager->errCode & 0xff)==SQLITE_IOERR + ); + if( + rc2==SQLITE_FULL || + rc2==SQLITE_IOERR || + rc2==SQLITE_CORRUPT + ){ + pPager->errCode = rc; + if( pPager->state==PAGER_UNLOCK + && sqlite3PcacheRefCount(pPager->pPCache)==0 + ){ + /* If the pager is already unlocked, call pager_unlock() now to + ** clear the error state and ensure that the pager-cache is + ** completely empty. + */ + pager_unlock(pPager); + } + } + return rc; +} + +/* +** Execute a rollback if a transaction is active and unlock the +** database file. +** +** If the pager has already entered the error state, do not attempt +** the rollback at this time. Instead, pager_unlock() is called. The +** call to pager_unlock() will discard all in-memory pages, unlock +** the database file and clear the error state. If this means that +** there is a hot-journal left in the file-system, the next connection +** to obtain a shared lock on the pager (which may be this one) will +** roll it back. +** +** If the pager has not already entered the error state, but an IO or +** malloc error occurs during a rollback, then this will itself cause +** the pager to enter the error state. Which will be cleared by the +** call to pager_unlock(), as described above. +*/ +static void pagerUnlockAndRollback(Pager *pPager){ + if( pPager->errCode==SQLITE_OK && pPager->state>=PAGER_RESERVED ){ + sqlite3BeginBenignMalloc(); + sqlite3PagerRollback(pPager); + sqlite3EndBenignMalloc(); + } + pager_unlock(pPager); +} + +/* +** This routine ends a transaction. A transaction is usually ended by +** either a COMMIT or a ROLLBACK operation. This routine may be called +** after rollback of a hot-journal, or if an error occurs while opening +** the journal file or writing the very first journal-header of a +** database transaction. +** +** If the pager is in PAGER_SHARED or PAGER_UNLOCK state when this +** routine is called, it is a no-op (returns SQLITE_OK). +** +** Otherwise, any active savepoints are released. +** +** If the journal file is open, then it is "finalized". Once a journal +** file has been finalized it is not possible to use it to roll back a +** transaction. Nor will it be considered to be a hot-journal by this +** or any other database connection. Exactly how a journal is finalized +** depends on whether or not the pager is running in exclusive mode and +** the current journal-mode (Pager.journalMode value), as follows: +** +** journalMode==MEMORY +** Journal file descriptor is simply closed. This destroys an +** in-memory journal. +** +** journalMode==TRUNCATE +** Journal file is truncated to zero bytes in size. +** +** journalMode==PERSIST +** The first 28 bytes of the journal file are zeroed. This invalidates +** the first journal header in the file, and hence the entire journal +** file. An invalid journal file cannot be rolled back. +** +** journalMode==DELETE +** The journal file is closed and deleted using sqlite3OsDelete(). +** +** If the pager is running in exclusive mode, this method of finalizing +** the journal file is never used. Instead, if the journalMode is +** DELETE and the pager is in exclusive mode, the method described under +** journalMode==PERSIST is used instead. +** +** After the journal is finalized, if running in non-exclusive mode, the +** pager moves to PAGER_SHARED state (and downgrades the lock on the +** database file accordingly). +** +** If the pager is running in exclusive mode and is in PAGER_SYNCED state, +** it moves to PAGER_EXCLUSIVE. No locks are downgraded when running in +** exclusive mode. +** +** SQLITE_OK is returned if no error occurs. If an error occurs during +** any of the IO operations to finalize the journal file or unlock the +** database then the IO error code is returned to the user. If the +** operation to finalize the journal file fails, then the code still +** tries to unlock the database file if not in exclusive mode. If the +** unlock operation fails as well, then the first error code related +** to the first error encountered (the journal finalization one) is +** returned. +*/ +static int pager_end_transaction(Pager *pPager, int hasMaster){ + int rc = SQLITE_OK; /* Error code from journal finalization operation */ + int rc2 = SQLITE_OK; /* Error code from db file unlock operation */ + + if( pPager->statejfd) || pPager->pInJournal==0 ); + if( isOpen(pPager->jfd) ){ + + /* TODO: There's a problem here if a journal-file was opened in MEMORY + ** mode and then the journal-mode is changed to TRUNCATE or PERSIST + ** during the transaction. This code should be changed to assume + ** that the journal mode has not changed since the transaction was + ** started. And the sqlite3PagerJournalMode() function should be + ** changed to make sure that this is the case too. + */ + + /* Finalize the journal file. */ + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ + int isMemoryJournal = sqlite3IsMemJournal(pPager->jfd); + sqlite3OsClose(pPager->jfd); + if( !isMemoryJournal ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + } + }else if( pPager->journalMode==PAGER_JOURNALMODE_TRUNCATE ){ + if( pPager->journalOff==0 ){ + rc = SQLITE_OK; + }else{ + rc = sqlite3OsTruncate(pPager->jfd, 0); + } + pPager->journalOff = 0; + pPager->journalStarted = 0; + }else if( pPager->exclusiveMode + || pPager->journalMode==PAGER_JOURNALMODE_PERSIST + ){ + rc = zeroJournalHdr(pPager, hasMaster); + pager_error(pPager, rc); + pPager->journalOff = 0; + pPager->journalStarted = 0; + }else{ + assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE || rc ); + sqlite3OsClose(pPager->jfd); + if( rc==SQLITE_OK && !pPager->tempFile ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + } + } + +#ifdef SQLITE_CHECK_PAGES + sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash); +#endif + + sqlite3PcacheCleanAll(pPager->pPCache); + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + pPager->nRec = 0; + } + + if( !pPager->exclusiveMode ){ + rc2 = osUnlock(pPager->fd, SHARED_LOCK); pPager->state = PAGER_SHARED; + pPager->changeCountDone = 0; }else if( pPager->state==PAGER_SYNCED ){ pPager->state = PAGER_EXCLUSIVE; } - pPager->origDbSize = 0; pPager->setMaster = 0; pPager->needSync = 0; - pPager->pFirstSynced = pPager->pFirst; - pPager->dbSize = -1; + pPager->dbModified = 0; + + /* TODO: Is this optimal? Why is the db size invalidated here + ** when the database file is not unlocked? */ + pPager->dbOrigSize = 0; + sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); + if( !MEMDB ){ + pPager->dbSizeValid = 0; + } return (rc==SQLITE_OK?rc2:rc); } /* -** Compute and return a checksum for the page of data. -** -** This is not a real checksum. It is really just the sum of the -** random initial value and the page number. We experimented with -** a checksum of the entire data, but that was found to be too slow. -** -** Note that the page number is stored at the beginning of data and -** the checksum is stored at the end. This is important. If journal -** corruption occurs due to a power failure, the most likely scenario -** is that one end or the other of the record will be changed. It is -** much less likely that the two ends of the journal record will be +** Parameter aData must point to a buffer of pPager->pageSize bytes +** of data. Compute and return a checksum based ont the contents of the +** page of data and the current value of pPager->cksumInit. +** +** This is not a real checksum. It is really just the sum of the +** random initial value (pPager->cksumInit) and every 200th byte +** of the page data, starting with byte offset (pPager->pageSize%200). +** Each byte is interpreted as an 8-bit unsigned integer. +** +** Changing the formula used to compute this checksum results in an +** incompatible journal file format. +** +** If journal corruption occurs due to a power failure, the most likely +** scenario is that one end or the other of the record will be changed. +** It is much less likely that the two ends of the journal record will be ** correct and the middle be corrupt. Thus, this "checksum" scheme, ** though fast and simple, catches the mostly likely kind of corruption. -** -** FIX ME: Consider adding every 200th (or so) byte of the data to the -** checksum. That way if a single page spans 3 or more disk sectors and -** only the middle sector is corrupt, we will still have a reasonable -** chance of failing the checksum and thus detecting the problem. */ static u32 pager_cksum(Pager *pPager, const u8 *aData){ - u32 cksum = pPager->cksumInit; - int i = pPager->pageSize-200; + u32 cksum = pPager->cksumInit; /* Checksum value to return */ + int i = pPager->pageSize-200; /* Loop counter */ while( i>0 ){ cksum += aData[i]; i -= 200; @@ -1100,35 +1391,76 @@ return cksum; } -/* Forward declaration */ -static void makeClean(PgHdr*); - /* -** Read a single page from the journal file opened on file descriptor -** jfd. Playback this one page. -** -** If useCksum==0 it means this journal does not use checksums. Checksums -** are not used in statement journals because statement journals do not -** need to survive power failures. -*/ -static int pager_playback_one_page(Pager *pPager, OsFile *jfd, int useCksum){ +** Read a single page from either the journal file (if isMainJrnl==1) or +** from the sub-journal (if isMainJrnl==0) and playback that page. +** The page begins at offset *pOffset into the file. The *pOffset +** value is increased to the start of the next page in the journal. +** +** The isMainJrnl flag is true if this is the main rollback journal and +** false for the statement journal. The main rollback journal uses +** checksums - the statement journal does not. +** +** If the page number of the page record read from the (sub-)journal file +** is greater than the current value of Pager.dbSize, then playback is +** skipped and SQLITE_OK is returned. +** +** If pDone is not NULL, then it is a record of pages that have already +** been played back. If the page at *pOffset has already been played back +** (if the corresponding pDone bit is set) then skip the playback. +** Make sure the pDone bit corresponding to the *pOffset page is set +** prior to returning. +** +** If the page record is successfully read from the (sub-)journal file +** and played back, then SQLITE_OK is returned. If an IO error occurs +** while reading the record from the (sub-)journal file or while writing +** to the database file, then the IO error code is returned. If data +** is successfully read from the (sub-)journal file but appears to be +** corrupted, SQLITE_DONE is returned. Data is considered corrupted in +** two circumstances: +** +** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or +** * If the record is being rolled back from the main journal file +** and the checksum field does not match the record content. +** +** Neither of these two scenarios are possible during a savepoint rollback. +** +** If this is a savepoint rollback, then memory may have to be dynamically +** allocated by this function. If this is the case and an allocation fails, +** SQLITE_NOMEM is returned. +*/ +static int pager_playback_one_page( + Pager *pPager, /* The pager being played back */ + int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ + int isUnsync, /* True if reading from unsynced main journal */ + i64 *pOffset, /* Offset of record to playback */ + int isSavepnt, /* True for a savepoint rollback */ + Bitvec *pDone /* Bitvec of pages already played back */ +){ int rc; PgHdr *pPg; /* An existing page in the cache */ Pgno pgno; /* The page number of a page in journal */ u32 cksum; /* Checksum used for sanity checking */ - u8 *aData = (u8 *)pPager->pTmpSpace; /* Temp storage for a page */ + u8 *aData; /* Temporary storage for the page */ + sqlite3_file *jfd; /* The file descriptor for the journal file */ - /* useCksum should be true for the main journal and false for - ** statement journals. Verify that this is always the case - */ - assert( jfd == (useCksum ? pPager->jfd : pPager->stfd) ); - assert( aData ); + assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */ + assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */ + assert( isMainJrnl || pDone ); /* pDone always used on sub-journals */ + assert( isSavepnt || pDone==0 ); /* pDone never used on non-savepoint */ - rc = read32bits(jfd, &pgno); + aData = (u8*)pPager->pTmpSpace; + assert( aData ); /* Temp storage must have already been allocated */ + + /* Read the page number and page data from the journal or sub-journal + ** file. Return an error code to the caller if an IO error occurs. + */ + jfd = isMainJrnl ? pPager->jfd : pPager->sjfd; + rc = read32bits(jfd, *pOffset, &pgno); if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsRead(jfd, aData, pPager->pageSize); + rc = sqlite3OsRead(jfd, aData, pPager->pageSize, (*pOffset)+4); if( rc!=SQLITE_OK ) return rc; - pPager->journalOff += pPager->pageSize + 4; + *pOffset += pPager->pageSize + 4 + isMainJrnl*4; /* Sanity checking on the page. This is more important that I originally ** thought. If a power failure occurs while the journal is being written, @@ -1136,20 +1468,24 @@ ** detect this invalid data (with high probability) and ignore it. */ if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ + assert( !isSavepnt ); return SQLITE_DONE; } - if( pgno>(unsigned)pPager->dbSize ){ + if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){ return SQLITE_OK; } - if( useCksum ){ - rc = read32bits(jfd, &cksum); + if( isMainJrnl ){ + rc = read32bits(jfd, (*pOffset)-4, &cksum); if( rc ) return rc; - pPager->journalOff += 4; - if( pager_cksum(pPager, aData)!=cksum ){ + if( !isSavepnt && pager_cksum(pPager, aData)!=cksum ){ return SQLITE_DONE; } } + if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){ + return rc; + } + assert( pPager->state==PAGER_RESERVED || pPager->state>=PAGER_EXCLUSIVE ); /* If the pager is in RESERVED state, then there must be a copy of this @@ -1179,18 +1515,55 @@ ** locked. (2) we know that the original page content is fully synced ** in the main journal either because the page is not in cache or else ** the page is marked as needSync==0. + ** + ** 2008-04-14: When attempting to vacuum a corrupt database file, it + ** is possible to fail a statement on a database that does not yet exist. + ** Do not attempt to write if database file has never been opened. */ pPg = pager_lookup(pPager, pgno); - PAGERTRACE4("PLAYBACK %d page %d hash(%08x)\n", - PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, aData)); - if( pPager->state>=PAGER_EXCLUSIVE && (pPg==0 || pPg->needSync==0) ){ - rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize); - } - if( pPg ){ - makeClean(pPg); + assert( pPg || !MEMDB ); + PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", + PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, aData), + (isMainJrnl?"main-journal":"sub-journal") + )); + if( (pPager->state>=PAGER_EXCLUSIVE) + && (pPg==0 || 0==(pPg->flags&PGHDR_NEED_SYNC)) + && isOpen(pPager->fd) + && !isUnsync + ){ + i64 ofst = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize, ofst); + if( pgno>pPager->dbFileSize ){ + pPager->dbFileSize = pgno; + } + if( pPager->pBackup ){ + CODEC1(pPager, aData, pgno, 3, rc=SQLITE_NOMEM); + sqlite3BackupUpdate(pPager->pBackup, pgno, aData); + CODEC1(pPager, aData, pgno, 0, rc=SQLITE_NOMEM); + } + }else if( !isMainJrnl && pPg==0 ){ + /* If this is a rollback of a savepoint and data was not written to + ** the database and the page is not in-memory, there is a potential + ** problem. When the page is next fetched by the b-tree layer, it + ** will be read from the database file, which may or may not be + ** current. + ** + ** There are a couple of different ways this can happen. All are quite + ** obscure. When running in synchronous mode, this can only happen + ** if the page is on the free-list at the start of the transaction, then + ** populated, then moved using sqlite3PagerMovepage(). + ** + ** The solution is to add an in-memory page to the cache containing + ** the data just read from the sub-journal. Mark the page as dirty + ** and if the pager requires a journal-sync, then mark the page as + ** requiring a journal-sync before it is written. + */ + assert( isSavepnt ); + if( (rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1))!=SQLITE_OK ){ + return rc; } + pPg->flags &= ~PGHDR_NEED_READ; + sqlite3PcacheMakeDirty(pPg); } if( pPg ){ /* No page should ever be explicitly rolled back that is in use, except @@ -1200,11 +1573,31 @@ ** sqlite3PagerRollback(). */ void *pData; - /* assert( pPg->nRef==0 || pPg->pgno==1 ); */ - pData = PGHDR_TO_DATA(pPg); + pData = pPg->pData; memcpy(pData, aData, pPager->pageSize); if( pPager->xReiniter ){ - pPager->xReiniter(pPg, pPager->pageSize); + pPager->xReiniter(pPg); + } + if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ + /* If the contents of this page were just restored from the main + ** journal file, then its content must be as they were when the + ** transaction was first opened. In this case we can mark the page + ** as clean, since there will be no need to write it out to the. + ** + ** There is one exception to this rule. If the page is being rolled + ** back as part of a savepoint (or statement) rollback from an + ** unsynced portion of the main journal file, then it is not safe + ** to mark the page as clean. This is because marking the page as + ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is + ** already in the journal file (recorded in Pager.pInJournal) and + ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to + ** again within this transaction, it will be marked as dirty but + ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially + ** be written out into the database file before its journal file + ** segment is synced. If a crash occurs during or following this, + ** database corruption may ensue. + */ + sqlite3PcacheMakeClean(pPg); } #ifdef SQLITE_CHECK_PAGES pPg->pageHash = pager_pagehash(pPg); @@ -1216,128 +1609,257 @@ } /* Decode the page just read from disk */ - CODEC1(pPager, pData, pPg->pgno, 3); + CODEC1(pPager, pData, pPg->pgno, 3, rc=SQLITE_NOMEM); + sqlite3PcacheRelease(pPg); } return rc; } +#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) +/* +** This routine looks ahead into the main journal file and determines +** whether or not the next record (the record that begins at file +** offset pPager->journalOff) is a well-formed page record consisting +** of a valid page number, pPage->pageSize bytes of content, followed +** by a valid checksum. +** +** The pager never needs to know this in order to do its job. This +** routine is only used from with assert() and testcase() macros. +*/ +static int pagerNextJournalPageIsValid(Pager *pPager){ + Pgno pgno; /* The page number of the page */ + u32 cksum; /* The page checksum */ + int rc; /* Return code from read operations */ + sqlite3_file *fd; /* The file descriptor from which we are reading */ + u8 *aData; /* Content of the page */ + + /* Read the page number header */ + fd = pPager->jfd; + rc = read32bits(fd, pPager->journalOff, &pgno); + if( rc!=SQLITE_OK ){ return 0; } /*NO_TEST*/ + if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ return 0; } /*NO_TEST*/ + if( pgno>(Pgno)pPager->dbSize ){ return 0; } /*NO_TEST*/ + + /* Read the checksum */ + rc = read32bits(fd, pPager->journalOff+pPager->pageSize+4, &cksum); + if( rc!=SQLITE_OK ){ return 0; } /*NO_TEST*/ + + /* Read the data and verify the checksum */ + aData = (u8*)pPager->pTmpSpace; + rc = sqlite3OsRead(fd, aData, pPager->pageSize, pPager->journalOff+4); + if( rc!=SQLITE_OK ){ return 0; } /*NO_TEST*/ + if( pager_cksum(pPager, aData)!=cksum ){ return 0; } /*NO_TEST*/ + + /* Reach this point only if the page is valid */ + return 1; +} +#endif /* !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) */ + /* ** Parameter zMaster is the name of a master journal file. A single journal ** file that referred to the master journal file has just been rolled back. ** This routine checks if it is possible to delete the master journal file, ** and does so if it is. ** -** The master journal file contains the names of all child journals. -** To tell if a master journal can be deleted, check to each of the -** children. If all children are either missing or do not refer to -** a different master journal, then this master journal can be deleted. -*/ -static int pager_delmaster(const char *zMaster){ - int rc; - int master_open = 0; - OsFile *master = 0; +** Argument zMaster may point to Pager.pTmpSpace. So that buffer is not +** available for use within this function. +** +** When a master journal file is created, it is populated with the names +** of all of its child journals, one after another, formatted as utf-8 +** encoded text. The end of each child journal file is marked with a +** nul-terminator byte (0x00). i.e. the entire contents of a master journal +** file for a transaction involving two databases might be: +** +** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00" +** +** A master journal file may only be deleted once all of its child +** journals have been rolled back. +** +** This function reads the contents of the master-journal file into +** memory and loops through each of the child journal names. For +** each child journal, it checks if: +** +** * if the child journal exists, and if so +** * if the child journal contains a reference to master journal +** file zMaster +** +** If a child journal can be found that matches both of the criteria +** above, this function returns without doing anything. Otherwise, if +** no such child journal can be found, file zMaster is deleted from +** the file-system using sqlite3OsDelete(). +** +** If an IO error within this function, an error code is returned. This +** function allocates memory by calling sqlite3Malloc(). If an allocation +** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors +** occur, SQLITE_OK is returned. +** +** TODO: This function allocates a single block of memory to load +** the entire contents of the master journal file. This could be +** a couple of kilobytes or so - potentially larger than the page +** size. +*/ +static int pager_delmaster(Pager *pPager, const char *zMaster){ + sqlite3_vfs *pVfs = pPager->pVfs; + int rc; /* Return code */ + sqlite3_file *pMaster; /* Malloc'd master-journal file descriptor */ + sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */ char *zMasterJournal = 0; /* Contents of master journal file */ i64 nMasterJournal; /* Size of master journal file */ - /* Open the master journal file exclusively in case some other process - ** is running this routine also. Not that it makes too much difference. + /* Allocate space for both the pJournal and pMaster file descriptors. + ** If successful, open the master journal file for reading. */ - rc = sqlite3OsOpenReadOnly(zMaster, &master); - assert( rc!=SQLITE_OK || master ); + pMaster = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); + pJournal = (sqlite3_file *)(((u8 *)pMaster) + pVfs->szOsFile); + if( !pMaster ){ + rc = SQLITE_NOMEM; + }else{ + const int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_MASTER_JOURNAL); + rc = sqlite3OsOpen(pVfs, zMaster, pMaster, flags, 0); + } if( rc!=SQLITE_OK ) goto delmaster_out; - master_open = 1; - rc = sqlite3OsFileSize(master, &nMasterJournal); + + rc = sqlite3OsFileSize(pMaster, &nMasterJournal); if( rc!=SQLITE_OK ) goto delmaster_out; if( nMasterJournal>0 ){ char *zJournal; char *zMasterPtr = 0; + int nMasterPtr = pVfs->mxPathname+1; /* Load the entire master journal file into space obtained from - ** sqliteMalloc() and pointed to by zMasterJournal. + ** sqlite3_malloc() and pointed to by zMasterJournal. */ - zMasterJournal = (char *)sqliteMalloc(nMasterJournal); + zMasterJournal = (char *)sqlite3Malloc((int)nMasterJournal + nMasterPtr); if( !zMasterJournal ){ rc = SQLITE_NOMEM; goto delmaster_out; } - rc = sqlite3OsRead(master, zMasterJournal, nMasterJournal); + zMasterPtr = &zMasterJournal[nMasterJournal]; + rc = sqlite3OsRead(pMaster, zMasterJournal, (int)nMasterJournal, 0); if( rc!=SQLITE_OK ) goto delmaster_out; zJournal = zMasterJournal; while( (zJournal-zMasterJournal)pageSize bytes). If the file +** on disk is currently larger than nPage pages, then use the VFS +** xTruncate() method to truncate it. +** +** Or, it might might be the case that the file on disk is smaller than +** nPage pages. Some operating system implementations can get confused if +** you try to truncate a file to some size that is larger than it +** currently is, so detect this case and write a single zero byte to +** the end of the new file instead. +** +** If successful, return SQLITE_OK. If an IO error occurs while modifying +** the database file, return the error code to the caller. */ -static int pager_truncate(Pager *pPager, int nPage){ +static int pager_truncate(Pager *pPager, Pgno nPage){ int rc = SQLITE_OK; - if( pPager->state>=PAGER_EXCLUSIVE ){ - rc = sqlite3OsTruncate(pPager->fd, pPager->pageSize*(i64)nPage); - } - if( rc==SQLITE_OK ){ - pPager->dbSize = nPage; - pager_truncate_cache(pPager); + if( pPager->state>=PAGER_EXCLUSIVE && isOpen(pPager->fd) ){ + i64 currentSize, newSize; + /* TODO: Is it safe to use Pager.dbFileSize here? */ + rc = sqlite3OsFileSize(pPager->fd, ¤tSize); + newSize = pPager->pageSize*(i64)nPage; + if( rc==SQLITE_OK && currentSize!=newSize ){ + if( currentSize>newSize ){ + rc = sqlite3OsTruncate(pPager->fd, newSize); + }else{ + rc = sqlite3OsWrite(pPager->fd, "", 1, newSize-1); + } + if( rc==SQLITE_OK ){ + pPager->dbFileSize = nPage; + } + } } return rc; } /* -** Set the sectorSize for the given pager. -** -** The sector size is the larger of the sector size reported -** by sqlite3OsSectorSize() and the pageSize. +** Set the value of the Pager.sectorSize variable for the given +** pager based on the value returned by the xSectorSize method +** of the open database file. The sector size will be used used +** to determine the size and alignment of journal header and +** master journal pointers within created journal files. +** +** For temporary files the effective sector size is always 512 bytes. +** +** Otherwise, for non-temporary files, the effective sector size is +** the value returned by the xSectorSize() method rounded up to 512 if +** it is less than 512, or rounded down to MAX_SECTOR_SIZE if it +** is greater than MAX_SECTOR_SIZE. */ static void setSectorSize(Pager *pPager){ - pPager->sectorSize = sqlite3OsSectorSize(pPager->fd); - if( pPager->sectorSizepageSize ){ - pPager->sectorSize = pPager->pageSize; + assert( isOpen(pPager->fd) || pPager->tempFile ); + + if( !pPager->tempFile ){ + /* Sector size doesn't matter for temporary files. Also, the file + ** may not have been opened yet, in which case the OsSectorSize() + ** call will segfault. + */ + pPager->sectorSize = sqlite3OsSectorSize(pPager->fd); + } + if( pPager->sectorSize<512 ){ + pPager->sectorSize = 512; + } + if( pPager->sectorSize>MAX_SECTOR_SIZE ){ + assert( MAX_SECTOR_SIZE>=512 ); + pPager->sectorSize = MAX_SECTOR_SIZE; } } @@ -1355,20 +1877,23 @@ ** sanity checksum. ** (4) 4 byte integer which is the number of pages to truncate the ** database to during a rollback. -** (5) 4 byte integer which is the number of bytes in the master journal +** (5) 4 byte big-endian integer which is the sector size. The header +** is this many bytes in size. +** (6) 4 byte big-endian integer which is the page case. +** (7) 4 byte integer which is the number of bytes in the master journal ** name. The value may be zero (indicate that there is no master ** journal.) -** (6) N bytes of the master journal name. The name will be nul-terminated +** (8) N bytes of the master journal name. The name will be nul-terminated ** and might be shorter than the value read from (5). If the first byte ** of the name is \000 then there is no master journal. The master ** journal name is stored in UTF-8. -** (7) Zero or more pages instances, each as follows: +** (9) Zero or more pages instances, each as follows: ** + 4 byte page number. ** + pPager->pageSize bytes of data. ** + 4 byte checksum ** -** When we speak of the journal header, we mean the first 6 items above. -** Each entry in the journal is an instance of the 7th item. +** When we speak of the journal header, we mean the first 8 items above. +** Each entry in the journal is an instance of the 9th item. ** ** Call the value from the second bullet "nRec". nRec is the number of ** valid page entries in the journal. In most cases, you can compute the @@ -1393,19 +1918,29 @@ ** ** If an I/O or malloc() error occurs, the journal-file is not deleted ** and an error code is returned. +** +** The isHot parameter indicates that we are trying to rollback a journal +** that might be a hot journal. Or, it could be that the journal is +** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE. +** If the journal really is hot, reset the pager cache prior rolling +** back any content. If the journal is merely persistent, no reset is +** needed. */ static int pager_playback(Pager *pPager, int isHot){ + sqlite3_vfs *pVfs = pPager->pVfs; i64 szJ; /* Size of the journal file in bytes */ u32 nRec; /* Number of Records in the journal */ - int i; /* Loop counter */ + u32 u; /* Unsigned loop counter */ Pgno mxPg = 0; /* Size of the original file in pages */ int rc; /* Result code of a subroutine */ + int res = 1; /* Value returned by sqlite3OsAccess() */ char *zMaster = 0; /* Name of master journal file if any */ + int needPagerReset; /* True to reset page prior to first page rollback */ /* Figure out how many records are in the journal. Abort early if ** the journal is empty. */ - assert( pPager->journalOpen ); + assert( isOpen(pPager->jfd) ); rc = sqlite3OsFileSize(pPager->jfd, &szJ); if( rc!=SQLITE_OK || szJ==0 ){ goto end_playback; @@ -1415,28 +1950,38 @@ ** If a master journal file name is specified, but the file is not ** present on disk, then the journal is not hot and does not need to be ** played back. - */ - rc = readMasterJournal(pPager->jfd, &zMaster); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK || (zMaster && !sqlite3OsFileExists(zMaster)) ){ - sqliteFree(zMaster); - zMaster = 0; - if( rc==SQLITE_DONE ) rc = SQLITE_OK; + ** + ** TODO: Technically the following is an error because it assumes that + ** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that + ** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c, + ** mxPathname is 512, which is the same as the minimum allowable value + ** for pageSize. + */ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + if( rc==SQLITE_OK && zMaster[0] ){ + rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); + } + zMaster = 0; + if( rc!=SQLITE_OK || !res ){ goto end_playback; } - sqlite3OsSeek(pPager->jfd, 0); pPager->journalOff = 0; + needPagerReset = isHot; - /* This loop terminates either when the readJournalHdr() call returns - ** SQLITE_DONE or an IO error occurs. */ + /* This loop terminates either when a readJournalHdr() or + ** pager_playback_one_page() call returns SQLITE_DONE or an IO error + ** occurs. + */ while( 1 ){ + int isUnsync = 0; /* Read the next journal header from the journal file. If there are ** not enough bytes left in the journal file for a complete header, or ** it is corrupted, then a process must of failed while writing it. ** This indicates nothing more needs to be rolled back. */ - rc = readJournalHdr(pPager, szJ, &nRec, &mxPg); + rc = readJournalHdr(pPager, isHot, szJ, &nRec, &mxPg); if( rc!=SQLITE_OK ){ if( rc==SQLITE_DONE ){ rc = SQLITE_OK; @@ -1451,7 +1996,7 @@ */ if( nRec==0xffffffff ){ assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); - nRec = (szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager); + nRec = (int)((szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager)); } /* If nRec is 0 and this rollback is of a transaction created by this @@ -1461,32 +2006,55 @@ ** size of the file. ** ** The third term of the test was added to fix ticket #2565. + ** When rolling back a hot journal, nRec==0 always means that the next + ** chunk of the journal contains zero pages to be rolled back. But + ** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in + ** the journal, it means that the journal might contain additional + ** pages that need to be rolled back and that the number of pages + ** should be computed based on the journal file size. */ + testcase( nRec==0 && !isHot + && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)!=pPager->journalOff + && ((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager))>0 + && pagerNextJournalPageIsValid(pPager) + ); if( nRec==0 && !isHot && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ - nRec = (szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager); + nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager)); + isUnsync = 1; } /* If this is the first header read from the journal, truncate the - ** database file back to it's original size. + ** database file back to its original size. */ if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ rc = pager_truncate(pPager, mxPg); if( rc!=SQLITE_OK ){ goto end_playback; } + pPager->dbSize = mxPg; } - /* Copy original pages out of the journal and back into the database file. + /* Copy original pages out of the journal and back into the + ** database file and/or page cache. */ - for(i=0; ijfd, 1); + for(u=0; ujournalOff,0,0); if( rc!=SQLITE_OK ){ if( rc==SQLITE_DONE ){ rc = SQLITE_OK; pPager->journalOff = szJ; break; }else{ + /* If we are unable to rollback, quit and return the error + ** code. This will cause the pager to enter the error state + ** so that no further harm will be done. Perhaps the next + ** process to come along will be able to rollback the database. + */ goto end_playback; } } @@ -1496,17 +2064,42 @@ assert( 0 ); end_playback: + /* Following a rollback, the database file should be back in its original + ** state prior to the start of the transaction, so invoke the + ** SQLITE_FCNTL_DB_UNCHANGED file-control method to disable the + ** assertion that the transaction counter was modified. + */ + assert( + pPager->fd->pMethods==0 || + sqlite3OsFileControl(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0)>=SQLITE_OK + ); + + /* If this playback is happening automatically as a result of an IO or + ** malloc error that occurred after the change-counter was updated but + ** before the transaction was committed, then the change-counter + ** modification may just have been reverted. If this happens in exclusive + ** mode, then subsequent transactions performed by the connection will not + ** update the change-counter at all. This may lead to cache inconsistency + ** problems for other processes at some point in the future. So, just + ** in case this has happened, clear the changeCountDone flag now. + */ + pPager->changeCountDone = pPager->tempFile; + + if( rc==SQLITE_OK ){ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + testcase( rc!=SQLITE_OK ); + } if( rc==SQLITE_OK ){ - rc = pager_end_transaction(pPager); + rc = pager_end_transaction(pPager, zMaster[0]!='\0'); + testcase( rc!=SQLITE_OK ); } - if( zMaster ){ + if( rc==SQLITE_OK && zMaster[0] && res ){ /* If there was a master journal and this routine will return success, ** see if it is possible to delete the master journal. */ - if( rc==SQLITE_OK ){ - rc = pager_delmaster(zMaster); - } - sqliteFree(zMaster); + rc = pager_delmaster(pPager, zMaster); + testcase( rc!=SQLITE_OK ); } /* The Pager.sectorSize variable may have been updated while rolling @@ -1518,113 +2111,138 @@ } /* -** Playback the statement journal. -** -** This is similar to playing back the transaction journal but with -** a few extra twists. -** -** (1) The number of pages in the database file at the start of -** the statement is stored in pPager->stmtSize, not in the -** journal file itself. -** -** (2) In addition to playing back the statement journal, also -** playback all pages of the transaction journal beginning -** at offset pPager->stmtJSize. -*/ -static int pager_stmt_playback(Pager *pPager){ - i64 szJ; /* Size of the full journal */ - i64 hdrOff; - int nRec; /* Number of Records */ - int i; /* Loop counter */ - int rc; +** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback +** the entire master journal file. The case pSavepoint==NULL occurs when +** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction +** savepoint. +** +** When pSavepoint is not NULL (meaning a non-transaction savepoint is +** being rolled back), then the rollback consists of up to three stages, +** performed in the order specified: +** +** * Pages are played back from the main journal starting at byte +** offset PagerSavepoint.iOffset and continuing to +** PagerSavepoint.iHdrOffset, or to the end of the main journal +** file if PagerSavepoint.iHdrOffset is zero. +** +** * If PagerSavepoint.iHdrOffset is not zero, then pages are played +** back starting from the journal header immediately following +** PagerSavepoint.iHdrOffset to the end of the main journal file. +** +** * Pages are then played back from the sub-journal file, starting +** with the PagerSavepoint.iSubRec and continuing to the end of +** the journal file. +** +** Throughout the rollback process, each time a page is rolled back, the +** corresponding bit is set in a bitvec structure (variable pDone in the +** implementation below). This is used to ensure that a page is only +** rolled back the first time it is encountered in either journal. +** +** If pSavepoint is NULL, then pages are only played back from the main +** journal file. There is no need for a bitvec in this case. +** +** In either case, before playback commences the Pager.dbSize variable +** is reset to the value that it held at the start of the savepoint +** (or transaction). No page with a page-number greater than this value +** is played back. If one is encountered it is simply skipped. +*/ +static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ + i64 szJ; /* Effective size of the main journal */ + i64 iHdrOff; /* End of first segment of main-journal records */ + int rc = SQLITE_OK; /* Return code */ + Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */ - szJ = pPager->journalOff; -#ifndef NDEBUG - { - i64 os_szJ; - rc = sqlite3OsFileSize(pPager->jfd, &os_szJ); - if( rc!=SQLITE_OK ) return rc; - assert( szJ==os_szJ ); - } -#endif + assert( pPager->state>=PAGER_SHARED ); - /* Set hdrOff to be the offset just after the end of the last journal - ** page written before the first journal-header for this statement - ** transaction was written, or the end of the file if no journal - ** header was written. - */ - hdrOff = pPager->stmtHdrOff; - assert( pPager->fullSync || !hdrOff ); - if( !hdrOff ){ - hdrOff = szJ; + /* Allocate a bitvec to use to store the set of pages rolled back */ + if( pSavepoint ){ + pDone = sqlite3BitvecCreate(pSavepoint->nOrig); + if( !pDone ){ + return SQLITE_NOMEM; + } } - - /* Truncate the database back to its original size. - */ - rc = pager_truncate(pPager, pPager->stmtSize); - assert( pPager->state>=PAGER_SHARED ); - /* Figure out how many records are in the statement journal. + /* Set the database size back to the value it was before the savepoint + ** being reverted was opened. */ - assert( pPager->stmtInUse && pPager->journalOpen ); - sqlite3OsSeek(pPager->stfd, 0); - nRec = pPager->stmtNRec; - - /* Copy original pages out of the statement journal and back into the - ** database file. Note that the statement journal omits checksums from - ** each record since power-failure recovery is not important to statement - ** journals. + pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize; + + /* Use pPager->journalOff as the effective size of the main rollback + ** journal. The actual file might be larger than this in + ** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything + ** past pPager->journalOff is off-limits to us. */ - for(i=nRec-1; i>=0; i--){ - rc = pager_playback_one_page(pPager, pPager->stfd, 0); + szJ = pPager->journalOff; + + /* Begin by rolling back records from the main journal starting at + ** PagerSavepoint.iOffset and continuing to the next journal header. + ** There might be records in the main journal that have a page number + ** greater than the current database size (pPager->dbSize) but those + ** will be skipped automatically. Pages are added to pDone as they + ** are played back. + */ + if( pSavepoint ){ + iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ; + pPager->journalOff = pSavepoint->iOffset; + while( rc==SQLITE_OK && pPager->journalOffjournalOff, 1, pDone); + } assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK ) goto end_stmt_playback; + }else{ + pPager->journalOff = 0; } - /* Now roll some pages back from the transaction journal. Pager.stmtJSize - ** was the size of the journal file when this statement was started, so - ** everything after that needs to be rolled back, either into the - ** database, the memory cache, or both. - ** - ** If it is not zero, then Pager.stmtHdrOff is the offset to the start - ** of the first journal header written during this statement transaction. + /* Continue rolling back records out of the main journal starting at + ** the first journal header seen and continuing until the effective end + ** of the main journal file. Continue to skip out-of-range pages and + ** continue adding pages rolled back to pDone. */ - rc = sqlite3OsSeek(pPager->jfd, pPager->stmtJSize); - if( rc!=SQLITE_OK ){ - goto end_stmt_playback; - } - pPager->journalOff = pPager->stmtJSize; - pPager->cksumInit = pPager->stmtCksum; - while( pPager->journalOff < hdrOff ){ - rc = pager_playback_one_page(pPager, pPager->jfd, 1); + while( rc==SQLITE_OK && pPager->journalOffjournalOff < szJ ){ - u32 nJRec; /* Number of Journal Records */ - u32 dummy; - rc = readJournalHdr(pPager, szJ, &nJRec, &dummy); - if( rc!=SQLITE_OK ){ - assert( rc!=SQLITE_DONE ); - goto end_stmt_playback; + /* + ** The "pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff" + ** test is related to ticket #2565. See the discussion in the + ** pager_playback() function for additional information. + */ + assert( !(nJRec==0 + && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)!=pPager->journalOff + && ((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager))>0 + && pagerNextJournalPageIsValid(pPager)) + ); + if( nJRec==0 + && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff + ){ + nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager)); } - if( nJRec==0 ){ - nJRec = (szJ - pPager->journalOff) / (pPager->pageSize+8); + for(ii=0; rc==SQLITE_OK && iijournalOffjournalOff, 1, pDone); } - for(i=nJRec-1; i>=0 && pPager->journalOff < szJ; i--){ - rc = pager_playback_one_page(pPager, pPager->jfd, 1); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK ) goto end_stmt_playback; + assert( rc!=SQLITE_DONE ); + } + assert( rc!=SQLITE_OK || pPager->journalOff==szJ ); + + /* Finally, rollback pages from the sub-journal. Page that were + ** previously rolled back out of the main journal (and are hence in pDone) + ** will be skipped. Out-of-range pages are also skipped. + */ + if( pSavepoint ){ + u32 ii; /* Loop counter */ + i64 offset = pSavepoint->iSubRec*(4+pPager->pageSize); + for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && iinSubRec; ii++){ + assert( offset==ii*(4+pPager->pageSize) ); + rc = pager_playback_one_page(pPager, 0, 0, &offset, 1, pDone); } + assert( rc!=SQLITE_DONE ); } - pPager->journalOff = szJ; - -end_stmt_playback: - if( rc==SQLITE_OK) { + sqlite3BitvecDestroy(pDone); + if( rc==SQLITE_OK ){ pPager->journalOff = szJ; - /* pager_reload_cache(pPager); */ } return rc; } @@ -1633,11 +2251,7 @@ ** Change the maximum number of in-memory pages that are allowed. */ void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ - if( mxPage>10 ){ - pPager->mxPage = mxPage; - }else{ - pPager->mxPage = 10; - } + sqlite3PcacheSetCachesize(pPager->pPCache, mxPage); } /* @@ -1667,10 +2281,10 @@ ** and FULL=3. */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS -void sqlite3PagerSetSafetyLevel(Pager *pPager, int level, int full_fsync){ - pPager->noSync = level==1 || pPager->tempFile; - pPager->fullSync = level==3 && !pPager->tempFile; - pPager->full_fsync = full_fsync; +void sqlite3PagerSetSafetyLevel(Pager *pPager, int level, int bFullFsync){ + pPager->noSync = (level==1 || pPager->tempFile) ?1:0; + pPager->fullSync = (level==3 && !pPager->tempFile) ?1:0; + pPager->sync_flags = (bFullFsync?SQLITE_SYNC_FULL:SQLITE_SYNC_NORMAL); if( pPager->noSync ) pPager->needSync = 0; } #endif @@ -1685,240 +2299,167 @@ #endif /* -** Open a temporary file. +** Open a temporary file. ** -** Write the file descriptor into *fd. Return SQLITE_OK on success or some -** other error code if we fail. -** -** The OS will automatically delete the temporary file when it is -** closed. -*/ -static int sqlite3PagerOpentemp(OsFile **pFd){ - int cnt = 8; - int rc; - char zFile[SQLITE_TEMPNAME_SIZE]; +** Write the file descriptor into *pFile. Return SQLITE_OK on success +** or some other error code if we fail. The OS will automatically +** delete the temporary file when it is closed. +** +** The flags passed to the VFS layer xOpen() call are those specified +** by parameter vfsFlags ORed with the following: +** +** SQLITE_OPEN_READWRITE +** SQLITE_OPEN_CREATE +** SQLITE_OPEN_EXCLUSIVE +** SQLITE_OPEN_DELETEONCLOSE +*/ +static int pagerOpentemp( + Pager *pPager, /* The pager object */ + sqlite3_file *pFile, /* Write the file descriptor here */ + int vfsFlags /* Flags passed through to the VFS */ +){ + int rc; /* Return code */ #ifdef SQLITE_TEST sqlite3_opentemp_count++; /* Used for testing and analysis only */ #endif - do{ - cnt--; - sqlite3OsTempFileName(zFile); - rc = sqlite3OsOpenExclusive(zFile, pFd, 1); - assert( rc!=SQLITE_OK || *pFd ); - }while( cnt>0 && rc!=SQLITE_OK && rc!=SQLITE_NOMEM ); - return rc; -} - -/* -** Create a new page cache and put a pointer to the page cache in *ppPager. -** The file to be cached need not exist. The file is not locked until -** the first call to sqlite3PagerGet() and is only held open until the -** last page is released using sqlite3PagerUnref(). -** -** If zFilename is NULL then a randomly-named temporary file is created -** and used as the file to be cached. The file will be deleted -** automatically when it is closed. -** -** If zFilename is ":memory:" then all information is held in cache. -** It is never written to disk. This can be used to implement an -** in-memory database. -*/ -int sqlite3PagerOpen( - Pager **ppPager, /* Return the Pager structure here */ - const char *zFilename, /* Name of the database file to open */ - int nExtra, /* Extra bytes append to each in-memory page */ - int flags /* flags controlling this file */ -){ - Pager *pPager = 0; - char *zFullPathname = 0; - int nameLen; /* Compiler is wrong. This is always initialized before use */ - OsFile *fd = 0; - int rc = SQLITE_OK; - int i; - int tempFile = 0; - int memDb = 0; - int readOnly = 0; - int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; - int noReadlock = (flags & PAGER_NO_READLOCK)!=0; - char zTemp[SQLITE_TEMPNAME_SIZE]; -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to - ** malloc() must have already been made by this thread before it gets - ** to this point. This means the ThreadData must have been allocated already - ** so that ThreadData.nAlloc can be set. It would be nice to assert - ** that ThreadData.nAlloc is non-zero, but alas this breaks test cases - ** written to invoke the pager directly. - */ - ThreadData *pTsd = sqlite3ThreadData(); - assert( pTsd ); -#endif - - /* We used to test if malloc() had already failed before proceeding. - ** But the way this function is used in SQLite means that can never - ** happen. Furthermore, if the malloc-failed flag is already set, - ** either the call to sqliteStrDup() or sqliteMalloc() below will - ** fail shortly and SQLITE_NOMEM returned anyway. - */ - *ppPager = 0; - - /* Open the pager file and set zFullPathname to point at malloc()ed - ** memory containing the complete filename (i.e. including the directory). - */ - if( zFilename && zFilename[0] ){ -#ifndef SQLITE_OMIT_MEMORYDB - if( strcmp(zFilename,":memory:")==0 ){ - memDb = 1; - zFullPathname = sqliteStrDup(""); - }else -#endif - { - zFullPathname = sqlite3OsFullPathname(zFilename); - if( zFullPathname ){ - rc = sqlite3OsOpenReadWrite(zFullPathname, &fd, &readOnly); - assert( rc!=SQLITE_OK || fd ); - } - } - }else{ - rc = sqlite3PagerOpentemp(&fd); - sqlite3OsTempFileName(zTemp); - zFilename = zTemp; - zFullPathname = sqlite3OsFullPathname(zFilename); - if( rc==SQLITE_OK ){ - tempFile = 1; - } - } - - /* Allocate the Pager structure. As part of the same allocation, allocate - ** space for the full paths of the file, directory and journal - ** (Pager.zFilename, Pager.zDirectory and Pager.zJournal). - */ - if( zFullPathname ){ - nameLen = strlen(zFullPathname); - pPager = sqliteMalloc( sizeof(*pPager) + nameLen*3 + 30 ); - if( pPager && rc==SQLITE_OK ){ - pPager->pTmpSpace = (char *)sqliteMallocRaw(SQLITE_DEFAULT_PAGE_SIZE); - } - } - - - /* If an error occured in either of the blocks above, free the memory - ** pointed to by zFullPathname, free the Pager structure and close the - ** file. Since the pager is not allocated there is no need to set - ** any Pager.errMask variables. - */ - if( !pPager || !zFullPathname || !pPager->pTmpSpace || rc!=SQLITE_OK ){ - sqlite3OsClose(&fd); - sqliteFree(zFullPathname); - sqliteFree(pPager); - return ((rc==SQLITE_OK)?SQLITE_NOMEM:rc); - } - PAGERTRACE3("OPEN %d %s\n", FILEHANDLEID(fd), zFullPathname); - IOTRACE(("OPEN %p %s\n", pPager, zFullPathname)) - pPager->zFilename = (char*)&pPager[1]; - pPager->zDirectory = &pPager->zFilename[nameLen+1]; - pPager->zJournal = &pPager->zDirectory[nameLen+1]; - memcpy(pPager->zFilename, zFullPathname, nameLen+1); - memcpy(pPager->zDirectory, zFullPathname, nameLen+1); - - for(i=nameLen; i>0 && pPager->zDirectory[i-1]!='/'; i--){} - if( i>0 ) pPager->zDirectory[i-1] = 0; - memcpy(pPager->zJournal, zFullPathname,nameLen); - sqliteFree(zFullPathname); - memcpy(&pPager->zJournal[nameLen], "-journal",sizeof("-journal")); - pPager->fd = fd; - /* pPager->journalOpen = 0; */ - pPager->useJournal = useJournal && !memDb; - pPager->noReadlock = noReadlock && readOnly; - /* pPager->stmtOpen = 0; */ - /* pPager->stmtInUse = 0; */ - /* pPager->nRef = 0; */ - pPager->dbSize = memDb-1; - pPager->pageSize = SQLITE_DEFAULT_PAGE_SIZE; - /* pPager->stmtSize = 0; */ - /* pPager->stmtJSize = 0; */ - /* pPager->nPage = 0; */ - pPager->mxPage = 100; - pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; - assert( PAGER_UNLOCK==0 ); - /* pPager->state = PAGER_UNLOCK; */ - /* pPager->errMask = 0; */ - pPager->tempFile = tempFile; - assert( tempFile==PAGER_LOCKINGMODE_NORMAL - || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); - assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); - pPager->exclusiveMode = tempFile; - pPager->memDb = memDb; - pPager->readOnly = readOnly; - /* pPager->needSync = 0; */ - pPager->noSync = pPager->tempFile || !useJournal; - pPager->fullSync = (pPager->noSync?0:1); - /* pPager->pFirst = 0; */ - /* pPager->pFirstSynced = 0; */ - /* pPager->pLast = 0; */ - pPager->nExtra = FORCE_ALIGNMENT(nExtra); - assert(fd||memDb); - if( !memDb ){ - setSectorSize(pPager); - } - /* pPager->pBusyHandler = 0; */ - /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ - *ppPager = pPager; -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - pPager->pNext = pTsd->pPager; - pTsd->pPager = pPager; -#endif - return SQLITE_OK; + vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; + rc = sqlite3OsOpen(pPager->pVfs, 0, pFile, vfsFlags, 0); + assert( rc!=SQLITE_OK || isOpen(pFile) ); + return rc; } /* ** Set the busy handler function. -*/ -void sqlite3PagerSetBusyhandler(Pager *pPager, BusyHandler *pBusyHandler){ - pPager->pBusyHandler = pBusyHandler; -} - -/* -** Set the destructor for this pager. If not NULL, the destructor is called -** when the reference count on each page reaches zero. The destructor can -** be used to clean up information in the extra segment appended to each page. ** -** The destructor is not called as a result sqlite3PagerClose(). -** Destructors are only called by sqlite3PagerUnref(). -*/ -void sqlite3PagerSetDestructor(Pager *pPager, void (*xDesc)(DbPage*,int)){ - pPager->xDestructor = xDesc; +** The pager invokes the busy-handler if sqlite3OsLock() returns +** SQLITE_BUSY when trying to upgrade from no-lock to a SHARED lock, +** or when trying to upgrade from a RESERVED lock to an EXCLUSIVE +** lock. It does *not* invoke the busy handler when upgrading from +** SHARED to RESERVED, or when upgrading from SHARED to EXCLUSIVE +** (which occurs during hot-journal rollback). Summary: +** +** Transition | Invokes xBusyHandler +** -------------------------------------------------------- +** NO_LOCK -> SHARED_LOCK | Yes +** SHARED_LOCK -> RESERVED_LOCK | No +** SHARED_LOCK -> EXCLUSIVE_LOCK | No +** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes +** +** If the busy-handler callback returns non-zero, the lock is +** retried. If it returns zero, then the SQLITE_BUSY error is +** returned to the caller of the pager API function. +*/ +void sqlite3PagerSetBusyhandler( + Pager *pPager, /* Pager object */ + int (*xBusyHandler)(void *), /* Pointer to busy-handler function */ + void *pBusyHandlerArg /* Argument to pass to xBusyHandler */ +){ + pPager->xBusyHandler = xBusyHandler; + pPager->pBusyHandlerArg = pBusyHandlerArg; } /* -** Set the reinitializer for this pager. If not NULL, the reinitializer -** is called when the content of a page in cache is restored to its original -** value as a result of a rollback. The callback gives higher-level code -** an opportunity to restore the EXTRA section to agree with the restored -** page data. +** Set the reinitializer for this pager. If not NULL, the reinitializer +** is called when the content of a page in cache is modified (restored) +** as part of a transaction or savepoint rollback. The callback gives +** higher-level code an opportunity to restore the EXTRA section to +** agree with the restored page data. */ -void sqlite3PagerSetReiniter(Pager *pPager, void (*xReinit)(DbPage*,int)){ +void sqlite3PagerSetReiniter(Pager *pPager, void (*xReinit)(DbPage*)){ pPager->xReiniter = xReinit; } /* -** Set the page size. Return the new size. If the suggest new page -** size is inappropriate, then an alternative page size is selected -** and returned. -*/ -int sqlite3PagerSetPagesize(Pager *pPager, int pageSize){ - assert( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE ); - if( !pPager->memDb && pPager->nRef==0 ){ - pager_reset(pPager); - pPager->pageSize = pageSize; - pPager->pTmpSpace = sqlite3ReallocOrFree(pPager->pTmpSpace, pageSize); +** Report the current page size and number of reserved bytes back +** to the codec. +*/ +#ifdef SQLITE_HAS_CODEC +static void pagerReportSize(Pager *pPager){ + if( pPager->xCodecSizeChng ){ + pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, + (int)pPager->nReserve); } - return pPager->pageSize; } +#else +# define pagerReportSize(X) /* No-op if we do not support a codec */ +#endif /* -** Attempt to set the maximum database page count if mxPage is positive. +** Change the page size used by the Pager object. The new page size +** is passed in *pPageSize. +** +** If the pager is in the error state when this function is called, it +** is a no-op. The value returned is the error state error code (i.e. +** one of SQLITE_IOERR, SQLITE_CORRUPT or SQLITE_FULL). +** +** Otherwise, if all of the following are true: +** +** * the new page size (value of *pPageSize) is valid (a power +** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and +** +** * there are no outstanding page references, and +** +** * the database is either not an in-memory database or it is +** an in-memory database that currently consists of zero pages. +** +** then the pager object page size is set to *pPageSize. +** +** If the page size is changed, then this function uses sqlite3PagerMalloc() +** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt +** fails, SQLITE_NOMEM is returned and the page size remains unchanged. +** In all other cases, SQLITE_OK is returned. +** +** If the page size is not changed, either because one of the enumerated +** conditions above is not true, the pager was in error state when this +** function was called, or because the memory allocation attempt failed, +** then *pPageSize is set to the old, retained page size before returning. +*/ +int sqlite3PagerSetPagesize(Pager *pPager, u16 *pPageSize, int nReserve){ + int rc = pPager->errCode; + if( rc==SQLITE_OK ){ + u16 pageSize = *pPageSize; + assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) ); + if( pageSize && pageSize!=pPager->pageSize + && (pPager->memDb==0 || pPager->dbSize==0) + && sqlite3PcacheRefCount(pPager->pPCache)==0 + ){ + char *pNew = (char *)sqlite3PageMalloc(pageSize); + if( !pNew ){ + rc = SQLITE_NOMEM; + }else{ + pager_reset(pPager); + pPager->pageSize = pageSize; + sqlite3PageFree(pPager->pTmpSpace); + pPager->pTmpSpace = pNew; + sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); + } + } + *pPageSize = (u16)pPager->pageSize; + if( nReserve<0 ) nReserve = pPager->nReserve; + assert( nReserve>=0 && nReserve<1000 ); + pPager->nReserve = (i16)nReserve; + pagerReportSize(pPager); + } + return rc; +} + +/* +** Return a pointer to the "temporary page" buffer held internally +** by the pager. This is a buffer that is big enough to hold the +** entire content of a database page. This buffer is used internally +** during rollback and will be overwritten whenever a rollback +** occurs. But other modules are free to use it too, as long as +** no rollbacks are happening. +*/ +void *sqlite3PagerTempSpace(Pager *pPager){ + return pPager->pTmpSpace; +} + +/* +** Attempt to set the maximum database page count if mxPage is positive. ** Make no changes if mxPage is zero or negative. And never reduce the ** maximum page count below the current size of the database. ** @@ -1928,7 +2469,7 @@ if( mxPage>0 ){ pPager->mxPgno = mxPage; } - sqlite3PagerPagecount(pPager); + sqlite3PagerPagecount(pPager, 0); return pPager->mxPgno; } @@ -1960,21 +2501,23 @@ ** Read the first N bytes from the beginning of the file into memory ** that pDest points to. ** -** No error checking is done. The rational for this is that this function -** may be called even if the file does not exist or contain a header. In -** these cases sqlite3OsRead() will return an error, to which the correct -** response is to zero the memory at pDest and continue. A real IO error -** will presumably recur and be picked up later (Todo: Think about this). +** If the pager was opened on a transient file (zFilename==""), or +** opened on a file less than N bytes in size, the output buffer is +** zeroed and SQLITE_OK returned. The rationale for this is that this +** function is used to read database headers, and a new transient or +** zero sized database has a header than consists entirely of zeroes. +** +** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered, +** the error code is returned to the caller and the contents of the +** output buffer undefined. */ int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ int rc = SQLITE_OK; memset(pDest, 0, N); - if( MEMDB==0 ){ - disable_simulated_io_errors(); - sqlite3OsSeek(pPager->fd, 0); - enable_simulated_io_errors(); + assert( isOpen(pPager->fd) || pPager->tempFile ); + if( isOpen(pPager->fd) ){ IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) - rc = sqlite3OsRead(pPager->fd, pDest, N); + rc = sqlite3OsRead(pPager->fd, pDest, N, 0); if( rc==SQLITE_IOERR_SHORT_READ ){ rc = SQLITE_OK; } @@ -1983,173 +2526,84 @@ } /* -** Return the total number of pages in the disk file associated with -** pPager. +** Return the total number of pages in the database file associated +** with pPager. Normally, this is calculated as (/). +** However, if the file is between 1 and bytes in size, then +** this is considered a 1 page file. +** +** If the pager is in error state when this function is called, then the +** error state error code is returned and *pnPage left unchanged. Or, +** if the file system has to be queried for the size of the file and +** the query attempt returns an IO error, the IO error code is returned +** and *pnPage is left unchanged. ** -** If the PENDING_BYTE lies on the page directly after the end of the -** file, then consider this page part of the file too. For example, if -** PENDING_BYTE is byte 4096 (the first byte of page 5) and the size of the -** file is 4096 bytes, 5 is returned instead of 4. +** Otherwise, if everything is successful, then SQLITE_OK is returned +** and *pnPage is set to the number of pages in the database. */ -int sqlite3PagerPagecount(Pager *pPager){ - i64 n; - int rc; - assert( pPager!=0 ); +int sqlite3PagerPagecount(Pager *pPager, int *pnPage){ + Pgno nPage; /* Value to return via *pnPage */ + + /* If the pager is already in the error state, return the error code. */ if( pPager->errCode ){ - return 0; + return pPager->errCode; } - if( pPager->dbSize>=0 ){ - n = pPager->dbSize; - } else { - if( (rc = sqlite3OsFileSize(pPager->fd, &n))!=SQLITE_OK ){ + + /* Determine the number of pages in the file. Store this in nPage. */ + if( pPager->dbSizeValid ){ + nPage = pPager->dbSize; + }else{ + int rc; /* Error returned by OsFileSize() */ + i64 n = 0; /* File size in bytes returned by OsFileSize() */ + + assert( isOpen(pPager->fd) || pPager->tempFile ); + if( isOpen(pPager->fd) && (0 != (rc = sqlite3OsFileSize(pPager->fd, &n))) ){ pager_error(pPager, rc); - return 0; + return rc; } if( n>0 && npageSize ){ - n = 1; + nPage = 1; }else{ - n /= pPager->pageSize; + nPage = (Pgno)(n / pPager->pageSize); } if( pPager->state!=PAGER_UNLOCK ){ - pPager->dbSize = n; + pPager->dbSize = nPage; + pPager->dbFileSize = nPage; + pPager->dbSizeValid = 1; } } - if( n==(PENDING_BYTE/pPager->pageSize) ){ - n++; - } - if( n>pPager->mxPgno ){ - pPager->mxPgno = n; - } - return n; -} - -#ifndef SQLITE_OMIT_MEMORYDB -/* -** Clear a PgHistory block -*/ -static void clearHistory(PgHistory *pHist){ - sqliteFree(pHist->pOrig); - sqliteFree(pHist->pStmt); - pHist->pOrig = 0; - pHist->pStmt = 0; -} -#else -#define clearHistory(x) -#endif - -/* -** Forward declaration -*/ -static int syncJournal(Pager*); - -/* -** Unlink pPg from it's hash chain. Also set the page number to 0 to indicate -** that the page is not part of any hash chain. This is required because the -** sqlite3PagerMovepage() routine can leave a page in the -** pNextFree/pPrevFree list that is not a part of any hash-chain. -*/ -static void unlinkHashChain(Pager *pPager, PgHdr *pPg){ - if( pPg->pgno==0 ){ - assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); - return; - } - if( pPg->pNextHash ){ - pPg->pNextHash->pPrevHash = pPg->pPrevHash; - } - if( pPg->pPrevHash ){ - assert( pPager->aHash[pPg->pgno & (pPager->nHash-1)]!=pPg ); - pPg->pPrevHash->pNextHash = pPg->pNextHash; - }else{ - int h = pPg->pgno & (pPager->nHash-1); - pPager->aHash[h] = pPg->pNextHash; - } - if( MEMDB ){ - clearHistory(PGHDR_TO_HIST(pPg, pPager)); - } - pPg->pgno = 0; - pPg->pNextHash = pPg->pPrevHash = 0; -} - -/* -** Unlink a page from the free list (the list of all pages where nRef==0) -** and from its hash collision chain. -*/ -static void unlinkPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - - /* Keep the pFirstSynced pointer pointing at the first synchronized page */ - if( pPg==pPager->pFirstSynced ){ - PgHdr *p = pPg->pNextFree; - while( p && p->needSync ){ p = p->pNextFree; } - pPager->pFirstSynced = p; + /* If the current number of pages in the file is greater than the + ** configured maximum pager number, increase the allowed limit so + ** that the file can be read. + */ + if( nPage>pPager->mxPgno ){ + pPager->mxPgno = (Pgno)nPage; } - /* Unlink from the freelist */ - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg->pNextFree; - }else{ - assert( pPager->pFirst==pPg ); - pPager->pFirst = pPg->pNextFree; + /* Set the output variable and return SQLITE_OK */ + if( pnPage ){ + *pnPage = nPage; } - if( pPg->pNextFree ){ - pPg->pNextFree->pPrevFree = pPg->pPrevFree; - }else{ - assert( pPager->pLast==pPg ); - pPager->pLast = pPg->pPrevFree; - } - pPg->pNextFree = pPg->pPrevFree = 0; - - /* Unlink from the pgno hash table */ - unlinkHashChain(pPager, pPg); + return SQLITE_OK; } -/* -** This routine is used to truncate the cache when a database -** is truncated. Drop from the cache all pages whose pgno is -** larger than pPager->dbSize and is unreferenced. -** -** Referenced pages larger than pPager->dbSize are zeroed. -** -** Actually, at the point this routine is called, it would be -** an error to have a referenced page. But rather than delete -** that page and guarantee a subsequent segfault, it seems better -** to zero it and hope that we error out sanely. -*/ -static void pager_truncate_cache(Pager *pPager){ - PgHdr *pPg; - PgHdr **ppPg; - int dbSize = pPager->dbSize; - - ppPg = &pPager->pAll; - while( (pPg = *ppPg)!=0 ){ - if( pPg->pgno<=dbSize ){ - ppPg = &pPg->pNextAll; - }else if( pPg->nRef>0 ){ - memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); - ppPg = &pPg->pNextAll; - }else{ - *ppPg = pPg->pNextAll; - IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - unlinkPage(pPg); - makeClean(pPg); - sqliteFree(pPg); - pPager->nPage--; - } - } -} /* -** Try to obtain a lock on a file. Invoke the busy callback if the lock -** is currently not available. Repeat until the busy callback returns -** false or until the lock succeeds. +** Try to obtain a lock of type locktype on the database file. If +** a similar or greater lock is already held, this function is a no-op +** (returning SQLITE_OK immediately). +** +** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke +** the busy callback if the lock is currently not available. Repeat +** until the busy callback returns false or until the attempt to +** obtain the lock succeeds. ** ** Return SQLITE_OK on success and an error code if we cannot obtain -** the lock. +** the lock. If the lock is obtained successfully, set the Pager.state +** variable to locktype before returning. */ static int pager_wait_on_lock(Pager *pPager, int locktype){ - int rc; + int rc; /* Return code */ /* The OS lock values must be the same as the Pager lock values */ assert( PAGER_SHARED==SHARED_LOCK ); @@ -2157,16 +2611,26 @@ assert( PAGER_EXCLUSIVE==EXCLUSIVE_LOCK ); /* If the file is currently unlocked then the size must be unknown */ - assert( pPager->state>=PAGER_SHARED || pPager->dbSize<0 || MEMDB ); + assert( pPager->state>=PAGER_SHARED || pPager->dbSizeValid==0 ); + + /* Check that this is either a no-op (because the requested lock is + ** already held, or one of the transistions that the busy-handler + ** may be invoked during, according to the comment above + ** sqlite3PagerSetBusyhandler(). + */ + assert( (pPager->state>=locktype) + || (pPager->state==PAGER_UNLOCK && locktype==PAGER_SHARED) + || (pPager->state==PAGER_RESERVED && locktype==PAGER_EXCLUSIVE) + ); if( pPager->state>=locktype ){ rc = SQLITE_OK; }else{ do { rc = sqlite3OsLock(pPager->fd, locktype); - }while( rc==SQLITE_BUSY && sqlite3InvokeBusyHandler(pPager->pBusyHandler) ); + }while( rc==SQLITE_BUSY && pPager->xBusyHandler(pPager->pBusyHandlerArg) ); if( rc==SQLITE_OK ){ - pPager->state = locktype; + pPager->state = (u8)locktype; IOTRACE(("LOCK %p %d\n", pPager, locktype)) } } @@ -2174,37 +2638,16 @@ } /* -** Truncate the file to the number of pages specified. -*/ -int sqlite3PagerTruncate(Pager *pPager, Pgno nPage){ - int rc; - assert( pPager->state>=PAGER_SHARED || MEMDB ); - sqlite3PagerPagecount(pPager); - if( pPager->errCode ){ - rc = pPager->errCode; - return rc; - } - if( nPage>=(unsigned)pPager->dbSize ){ - return SQLITE_OK; - } - if( MEMDB ){ - pPager->dbSize = nPage; - pager_truncate_cache(pPager); - return SQLITE_OK; - } - rc = syncJournal(pPager); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Get an exclusive lock on the database before truncating. */ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - return rc; - } - - rc = pager_truncate(pPager, nPage); - return rc; +** Truncate the in-memory database file image to nPage pages. This +** function does not actually modify the database file on disk. It +** just sets the internal state of the pager object so that the +** truncation will be done when the current transaction is committed. +*/ +void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ + assert( pPager->dbSizeValid ); + assert( pPager->dbSize>=nPage ); + assert( pPager->state>=PAGER_RESERVED ); + pPager->dbSize = nPage; } /* @@ -2222,308 +2665,230 @@ ** to the caller. */ int sqlite3PagerClose(Pager *pPager){ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to - ** malloc() must have already been made by this thread before it gets - ** to this point. This means the ThreadData must have been allocated already - ** so that ThreadData.nAlloc can be set. - */ - ThreadData *pTsd = sqlite3ThreadData(); - assert( pPager ); - assert( pTsd && pTsd->nAlloc ); -#endif - disable_simulated_io_errors(); + sqlite3BeginBenignMalloc(); pPager->errCode = 0; pPager->exclusiveMode = 0; pager_reset(pPager); - pagerUnlockAndRollback(pPager); - enable_simulated_io_errors(); - PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); - IOTRACE(("CLOSE %p\n", pPager)) - assert( pPager->errCode || (pPager->journalOpen==0 && pPager->stmtOpen==0) ); - if( pPager->journalOpen ){ - sqlite3OsClose(&pPager->jfd); - } - sqliteFree(pPager->aInJournal); - if( pPager->stmtOpen ){ - sqlite3OsClose(&pPager->stfd); - } - sqlite3OsClose(&pPager->fd); - /* Temp files are automatically deleted by the OS - ** if( pPager->tempFile ){ - ** sqlite3OsDelete(pPager->zFilename); - ** } - */ - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* Remove the pager from the linked list of pagers starting at - ** ThreadData.pPager if memory-management is enabled. - */ - if( pPager==pTsd->pPager ){ - pTsd->pPager = pPager->pNext; + if( MEMDB ){ + pager_unlock(pPager); }else{ - Pager *pTmp; - for(pTmp = pTsd->pPager; pTmp->pNext!=pPager; pTmp=pTmp->pNext){} - pTmp->pNext = pPager->pNext; + /* Set Pager.journalHdr to -1 for the benefit of the pager_playback() + ** call which may be made from within pagerUnlockAndRollback(). If it + ** is not -1, then the unsynced portion of an open journal file may + ** be played back into the database. If a power failure occurs while + ** this is happening, the database may become corrupt. + */ + pPager->journalHdr = -1; + pagerUnlockAndRollback(pPager); } + sqlite3EndBenignMalloc(); + enable_simulated_io_errors(); + PAGERTRACE(("CLOSE %d\n", PAGERID(pPager))); + IOTRACE(("CLOSE %p\n", pPager)) + sqlite3OsClose(pPager->fd); + sqlite3PageFree(pPager->pTmpSpace); + sqlite3PcacheClose(pPager->pPCache); + +#ifdef SQLITE_HAS_CODEC + if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); #endif - sqliteFree(pPager->aHash); - sqliteFree(pPager->pTmpSpace); - sqliteFree(pPager); + + assert( !pPager->aSavepoint && !pPager->pInJournal ); + assert( !isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ); + + sqlite3_free(pPager); return SQLITE_OK; } #if !defined(NDEBUG) || defined(SQLITE_TEST) /* -** Return the page number for the given page data. +** Return the page number for page pPg. */ -Pgno sqlite3PagerPagenumber(DbPage *p){ - return p->pgno; +Pgno sqlite3PagerPagenumber(DbPage *pPg){ + return pPg->pgno; } #endif /* -** The page_ref() function increments the reference count for a page. -** If the page is currently on the freelist (the reference count is zero) then -** remove it from the freelist. -** -** For non-test systems, page_ref() is a macro that calls _page_ref() -** online of the reference count is zero. For test systems, page_ref() -** is a real function so that we can set breakpoints and trace it. -*/ -static void _page_ref(PgHdr *pPg){ - if( pPg->nRef==0 ){ - /* The page is currently on the freelist. Remove it. */ - if( pPg==pPg->pPager->pFirstSynced ){ - PgHdr *p = pPg->pNextFree; - while( p && p->needSync ){ p = p->pNextFree; } - pPg->pPager->pFirstSynced = p; - } - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg->pNextFree; - }else{ - pPg->pPager->pFirst = pPg->pNextFree; - } - if( pPg->pNextFree ){ - pPg->pNextFree->pPrevFree = pPg->pPrevFree; - }else{ - pPg->pPager->pLast = pPg->pPrevFree; - } - pPg->pPager->nRef++; - } - pPg->nRef++; - REFINFO(pPg); -} -#ifdef SQLITE_DEBUG - static void page_ref(PgHdr *pPg){ - if( pPg->nRef==0 ){ - _page_ref(pPg); - }else{ - pPg->nRef++; - REFINFO(pPg); - } - } -#else -# define page_ref(P) ((P)->nRef==0?_page_ref(P):(void)(P)->nRef++) -#endif - -/* -** Increment the reference count for a page. The input pointer is -** a reference to the page data. +** Increment the reference count for page pPg. */ -int sqlite3PagerRef(DbPage *pPg){ - page_ref(pPg); - return SQLITE_OK; +void sqlite3PagerRef(DbPage *pPg){ + sqlite3PcacheRef(pPg); } /* -** Sync the journal. In other words, make sure all the pages that have +** Sync the journal. In other words, make sure all the pages that have ** been written to the journal have actually reached the surface of the -** disk. It is not safe to modify the original database file until after -** the journal has been synced. If the original database is modified before -** the journal is synced and a power failure occurs, the unsynced journal -** data would be lost and we would be unable to completely rollback the -** database changes. Database corruption would occur. -** -** This routine also updates the nRec field in the header of the journal. -** (See comments on the pager_playback() routine for additional information.) -** If the sync mode is FULL, two syncs will occur. First the whole journal -** is synced, then the nRec field is updated, then a second sync occurs. -** -** For temporary databases, we do not care if we are able to rollback -** after a power failure, so sync occurs. +** disk and can be restored in the event of a hot-journal rollback. ** -** This routine clears the needSync field of every page current held in -** memory. +** If the Pager.needSync flag is not set, then this function is a +** no-op. Otherwise, the actions required depend on the journal-mode +** and the device characteristics of the the file-system, as follows: +** +** * If the journal file is an in-memory journal file, no action need +** be taken. +** +** * Otherwise, if the device does not support the SAFE_APPEND property, +** then the nRec field of the most recently written journal header +** is updated to contain the number of journal records that have +** been written following it. If the pager is operating in full-sync +** mode, then the journal file is synced before this field is updated. +** +** * If the device does not support the SEQUENTIAL property, then +** journal file is synced. +** +** Or, in pseudo-code: +** +** if( NOT ){ +** if( NOT SAFE_APPEND ){ +** if( ) xSync(); +** +** } +** if( NOT SEQUENTIAL ) xSync(); +** } +** +** The Pager.needSync flag is never be set for temporary files, or any +** file operating in no-sync mode (Pager.noSync set to non-zero). +** +** If successful, this routine clears the PGHDR_NEED_SYNC flag of every +** page currently held in memory before returning SQLITE_OK. If an IO +** error is encountered, then the IO error code is returned to the caller. */ static int syncJournal(Pager *pPager){ - PgHdr *pPg; - int rc = SQLITE_OK; - - /* Sync the journal before modifying the main database - ** (assuming there is a journal and it needs to be synced.) - */ if( pPager->needSync ){ - if( !pPager->tempFile ){ - assert( pPager->journalOpen ); - /* assert( !pPager->noSync ); // noSync might be set if synchronous - ** was turned off after the transaction was started. Ticket #615 */ -#ifndef NDEBUG - { - /* Make sure the pPager->nRec counter we are keeping agrees - ** with the nRec computed from the size of the journal file. + assert( !pPager->tempFile ); + if( pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ + int rc; /* Return code */ + const int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + assert( isOpen(pPager->jfd) ); + + if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ + /* This block deals with an obscure problem. If the last connection + ** that wrote to this database was operating in persistent-journal + ** mode, then the journal file may at this point actually be larger + ** than Pager.journalOff bytes. If the next thing in the journal + ** file happens to be a journal-header (written as part of the + ** previous connections transaction), and a crash or power-failure + ** occurs after nRec is updated but before this connection writes + ** anything else to the journal file (or commits/rolls back its + ** transaction), then SQLite may become confused when doing the + ** hot-journal rollback following recovery. It may roll back all + ** of this connections data, then proceed to rolling back the old, + ** out-of-date data that follows it. Database corruption. + ** + ** To work around this, if the journal file does appear to contain + ** a valid header following Pager.journalOff, then write a 0x00 + ** byte to the start of it to prevent it from being recognized. + ** + ** Variable iNextHdrOffset is set to the offset at which this + ** problematic header will occur, if it exists. aMagic is used + ** as a temporary buffer to inspect the first couple of bytes of + ** the potential journal header. */ - i64 jSz; - rc = sqlite3OsFileSize(pPager->jfd, &jSz); - if( rc!=0 ) return rc; - assert( pPager->journalOff==jSz ); - } -#endif - { + i64 iNextHdrOffset; + u8 aMagic[8]; + u8 zHeader[sizeof(aJournalMagic)+4]; + + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); + + iNextHdrOffset = journalHdrOffset(pPager); + rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset); + if( rc==SQLITE_OK && 0==memcmp(aMagic, aJournalMagic, 8) ){ + static const u8 zerobyte = 0; + rc = sqlite3OsWrite(pPager->jfd, &zerobyte, 1, iNextHdrOffset); + } + if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ + return rc; + } + /* Write the nRec value into the journal file header. If in ** full-synchronous mode, sync the journal first. This ensures that ** all data has really hit the disk before nRec is updated to mark - ** it as a candidate for rollback. + ** it as a candidate for rollback. + ** + ** This is not required if the persistent media supports the + ** SAFE_APPEND property. Because in this case it is not possible + ** for garbage data to be appended to the file, the nRec field + ** is populated with 0xFFFFFFFF when the journal header is written + ** and never needs to be updated. */ - if( pPager->fullSync ){ - PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); + if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, 0); - if( rc!=0 ) return rc; + rc = sqlite3OsSync(pPager->jfd, pPager->sync_flags); + if( rc!=SQLITE_OK ) return rc; } - rc = sqlite3OsSeek(pPager->jfd, - pPager->journalHdr + sizeof(aJournalMagic)); - if( rc ) return rc; - IOTRACE(("JHDR %p %lld %d\n", pPager, - pPager->journalHdr + sizeof(aJournalMagic), 4)) - rc = write32bits(pPager->jfd, pPager->nRec); - if( rc ) return rc; - - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); - if( rc ) return rc; - } - PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); - IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, pPager->full_fsync); - if( rc!=0 ) return rc; - pPager->journalStarted = 1; + IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr)); + rc = sqlite3OsWrite( + pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr + ); + if( rc!=SQLITE_OK ) return rc; + } + if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); + IOTRACE(("JSYNC %p\n", pPager)) + rc = sqlite3OsSync(pPager->jfd, pPager->sync_flags| + (pPager->sync_flags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0) + ); + if( rc!=SQLITE_OK ) return rc; + } } - pPager->needSync = 0; - /* Erase the needSync flag from every page. + /* The journal file was just successfully synced. Set Pager.needSync + ** to zero and clear the PGHDR_NEED_SYNC flag on all pagess. */ - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - pPg->needSync = 0; - } - pPager->pFirstSynced = pPager->pFirst; - } - -#ifndef NDEBUG - /* If the Pager.needSync flag is clear then the PgHdr.needSync - ** flag must also be clear for all pages. Verify that this - ** invariant is true. - */ - else{ - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - assert( pPg->needSync==0 ); - } - assert( pPager->pFirstSynced==pPager->pFirst ); - } -#endif - - return rc; -} - -/* -** Merge two lists of pages connected by pDirty and in pgno order. -** Do not both fixing the pPrevDirty pointers. -*/ -static PgHdr *merge_pagelist(PgHdr *pA, PgHdr *pB){ - PgHdr result, *pTail; - pTail = &result; - while( pA && pB ){ - if( pA->pgnopgno ){ - pTail->pDirty = pA; - pTail = pA; - pA = pA->pDirty; - }else{ - pTail->pDirty = pB; - pTail = pB; - pB = pB->pDirty; - } - } - if( pA ){ - pTail->pDirty = pA; - }else if( pB ){ - pTail->pDirty = pB; - }else{ - pTail->pDirty = 0; + pPager->needSync = 0; + pPager->journalStarted = 1; + sqlite3PcacheClearSyncFlags(pPager->pPCache); } - return result.pDirty; -} -/* -** Sort the list of pages in accending order by pgno. Pages are -** connected by pDirty pointers. The pPrevDirty pointers are -** corrupted by this sort. -*/ -#define N_SORT_BUCKET_ALLOC 25 -#define N_SORT_BUCKET 25 -#ifdef SQLITE_TEST - int sqlite3_pager_n_sort_bucket = 0; - #undef N_SORT_BUCKET - #define N_SORT_BUCKET \ - (sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC) -#endif -static PgHdr *sort_pagelist(PgHdr *pIn){ - PgHdr *a[N_SORT_BUCKET_ALLOC], *p; - int i; - memset(a, 0, sizeof(a)); - while( pIn ){ - p = pIn; - pIn = p->pDirty; - p->pDirty = 0; - for(i=0; ipPager; /* At this point there may be either a RESERVED or EXCLUSIVE lock on the ** database file. If there is already an EXCLUSIVE lock, the following - ** calls to sqlite3OsLock() are no-ops. + ** call is a no-op. ** ** Moving the lock from RESERVED to EXCLUSIVE actually involves going ** through an intermediate state PENDING. A PENDING lock prevents new @@ -2537,543 +2902,928 @@ ** EXCLUSIVE, it means the database file has been changed and any rollback ** will require a journal playback. */ + assert( pPager->state>=PAGER_RESERVED ); rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - return rc; + + /* If the file is a temp-file has not yet been opened, open it now. It + ** is not possible for rc to be other than SQLITE_OK if this branch + ** is taken, as pager_wait_on_lock() is a no-op for temp-files. + */ + if( !isOpen(pPager->fd) ){ + assert( pPager->tempFile && rc==SQLITE_OK ); + rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags); } - pList = sort_pagelist(pList); - while( pList ){ - assert( pList->dirty ); - rc = sqlite3OsSeek(pPager->fd, (pList->pgno-1)*(i64)pPager->pageSize); - if( rc ) return rc; + while( rc==SQLITE_OK && pList ){ + Pgno pgno = pList->pgno; + /* If there are dirty pages in the page cache with page numbers greater - ** than Pager.dbSize, this means sqlite3PagerTruncate() was called to + ** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to ** make the file smaller (presumably by auto-vacuum code). Do not write ** any such pages to the file. + ** + ** Also, do not write out any page that has the PGHDR_DONT_WRITE flag + ** set (set by sqlite3PagerDontWrite()). */ - if( pList->pgno<=pPager->dbSize ){ - char *pData = CODEC2(pPager, PGHDR_TO_DATA(pList), pList->pgno, 6); - PAGERTRACE4("STORE %d page %d hash(%08x)\n", - PAGERID(pPager), pList->pgno, pager_pagehash(pList)); - IOTRACE(("PGOUT %p %d\n", pPager, pList->pgno)); - rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize); - PAGER_INCR(sqlite3_pager_writedb_count); - PAGER_INCR(pPager->nWrite); - if( pList->pgno==1 ){ + if( pgno<=pPager->dbSize && 0==(pList->flags&PGHDR_DONT_WRITE) ){ + i64 offset = (pgno-1)*(i64)pPager->pageSize; /* Offset to write */ + char *pData; /* Data to write */ + + /* Encode the database */ + CODEC2(pPager, pList->pData, pgno, 6, return SQLITE_NOMEM, pData); + + /* Write out the page data. */ + rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset); + + /* If page 1 was just written, update Pager.dbFileVers to match + ** the value now stored in the database file. If writing this + ** page caused the database file to grow, update dbFileSize. + */ + if( pgno==1 ){ memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); } + if( pgno>pPager->dbFileSize ){ + pPager->dbFileSize = pgno; + } + + /* Update any backup objects copying the contents of this pager. */ + sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); + + PAGERTRACE(("STORE %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_pagehash(pList))); + IOTRACE(("PGOUT %p %d\n", pPager, pgno)); + PAGER_INCR(sqlite3_pager_writedb_count); + PAGER_INCR(pPager->nWrite); + }else{ + PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno)); } -#ifndef NDEBUG - else{ - PAGERTRACE3("NOSTORE %d page %d\n", PAGERID(pPager), pList->pgno); - } -#endif - if( rc ) return rc; - pList->dirty = 0; #ifdef SQLITE_CHECK_PAGES pList->pageHash = pager_pagehash(pList); #endif pList = pList->pDirty; } - return SQLITE_OK; -} -/* -** Collect every dirty page into a dirty list and -** return a pointer to the head of that list. All pages are -** collected even if they are still in use. -*/ -static PgHdr *pager_get_all_dirty_pages(Pager *pPager){ - return pPager->pDirty; + return rc; } /* -** Return TRUE if there is a hot journal on the given pager. -** A hot journal is one that needs to be played back. -** -** If the current size of the database file is 0 but a journal file -** exists, that is probably an old journal left over from a prior -** database with the same name. Just delete the journal. +** Append a record of the current state of page pPg to the sub-journal. +** It is the callers responsibility to use subjRequiresPage() to check +** that it is really required before calling this function. +** +** If successful, set the bit corresponding to pPg->pgno in the bitvecs +** for all open savepoints before returning. +** +** This function returns SQLITE_OK if everything is successful, an IO +** error code if the attempt to write to the sub-journal fails, or +** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint +** bitvec. */ -static int hasHotJournal(Pager *pPager){ - if( !pPager->useJournal ) return 0; - if( !sqlite3OsFileExists(pPager->zJournal) ){ - return 0; - } - if( sqlite3OsCheckReservedLock(pPager->fd) ){ - return 0; - } - if( sqlite3PagerPagecount(pPager)==0 ){ - sqlite3OsDelete(pPager->zJournal); - return 0; - }else{ - return 1; +static int subjournalPage(PgHdr *pPg){ + int rc = SQLITE_OK; + Pager *pPager = pPg->pPager; + if( isOpen(pPager->sjfd) ){ + void *pData = pPg->pData; + i64 offset = pPager->nSubRec*(4+pPager->pageSize); + char *pData2; + + CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); + PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); + + assert( pageInJournal(pPg) || pPg->pgno>pPager->dbOrigSize ); + rc = write32bits(pPager->sjfd, offset, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); + } } + if( rc==SQLITE_OK ){ + pPager->nSubRec++; + assert( pPager->nSavepoint>0 ); + rc = addToSavepointBitvecs(pPager, pPg->pgno); + testcase( rc!=SQLITE_OK ); + } + return rc; } + /* -** Try to find a page in the cache that can be recycled. +** This function is called by the pcache layer when it has reached some +** soft memory limit. The first argument is a pointer to a Pager object +** (cast as a void*). The pager is always 'purgeable' (not an in-memory +** database). The second argument is a reference to a page that is +** currently dirty but has no outstanding references. The page +** is always associated with the Pager object passed as the first +** argument. ** -** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It -** does not set the pPager->errCode variable. +** The job of this function is to make pPg clean by writing its contents +** out to the database file, if possible. This may involve syncing the +** journal file. +** +** If successful, sqlite3PcacheMakeClean() is called on the page and +** SQLITE_OK returned. If an IO error occurs while trying to make the +** page clean, the IO error code is returned. If the page cannot be +** made clean for some other reason, but no error occurs, then SQLITE_OK +** is returned by sqlite3PcacheMakeClean() is not called. */ -static int pager_recycle(Pager *pPager, int syncOk, PgHdr **ppPg){ - PgHdr *pPg; - *ppPg = 0; +static int pagerStress(void *p, PgHdr *pPg){ + Pager *pPager = (Pager *)p; + int rc = SQLITE_OK; - assert(!MEMDB); + assert( pPg->pPager==pPager ); + assert( pPg->flags&PGHDR_DIRTY ); - /* Find a page to recycle. Try to locate a page that does not - ** require us to do an fsync() on the journal. + /* The doNotSync flag is set by the sqlite3PagerWrite() function while it + ** is journalling a set of two or more database pages that are stored + ** on the same disk sector. Syncing the journal is not allowed while + ** this is happening as it is important that all members of such a + ** set of pages are synced to disk together. So, if the page this function + ** is trying to make clean will require a journal sync and the doNotSync + ** flag is set, return without doing anything. The pcache layer will + ** just have to go ahead and allocate a new page buffer instead of + ** reusing pPg. + ** + ** Similarly, if the pager has already entered the error state, do not + ** try to write the contents of pPg to disk. */ - pPg = pPager->pFirstSynced; + if( pPager->errCode || (pPager->doNotSync && pPg->flags&PGHDR_NEED_SYNC) ){ + return SQLITE_OK; + } - /* If we could not find a page that does not require an fsync() - ** on the journal file then fsync the journal file. This is a - ** very slow operation, so we work hard to avoid it. But sometimes - ** it can't be helped. - */ - if( pPg==0 && pPager->pFirst && syncOk && !MEMDB){ - int rc = syncJournal(pPager); - if( rc!=0 ){ - return rc; - } - if( pPager->fullSync ){ - /* If in full-sync mode, write a new journal header into the - ** journal file. This is done to avoid ever modifying a journal - ** header that is involved in the rollback of pages that have - ** already been written to the database (in case the header is - ** trashed when the nRec field is updated). - */ + /* Sync the journal file if required. */ + if( pPg->flags&PGHDR_NEED_SYNC ){ + rc = syncJournal(pPager); + if( rc==SQLITE_OK && pPager->fullSync && + !(pPager->journalMode==PAGER_JOURNALMODE_MEMORY) && + !(sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) + ){ pPager->nRec = 0; - assert( pPager->journalOff > 0 ); - assert( pPager->doNotSync==0 ); rc = writeJournalHdr(pPager); - if( rc!=0 ){ - return rc; - } } - pPg = pPager->pFirst; } - if( pPg==0 ){ - return SQLITE_OK; + + /* If the page number of this page is larger than the current size of + ** the database image, it may need to be written to the sub-journal. + ** This is because the call to pager_write_pagelist() below will not + ** actually write data to the file in this case. + ** + ** Consider the following sequence of events: + ** + ** BEGIN; + ** + ** + ** SAVEPOINT sp; + ** + ** pagerStress(page X) + ** ROLLBACK TO sp; + ** + ** If (X>Y), then when pagerStress is called page X will not be written + ** out to the database file, but will be dropped from the cache. Then, + ** following the "ROLLBACK TO sp" statement, reading page X will read + ** data from the database file. This will be the copy of page X as it + ** was when the transaction started, not as it was when "SAVEPOINT sp" + ** was executed. + ** + ** The solution is to write the current data for page X into the + ** sub-journal file now (if it is not already there), so that it will + ** be restored to its current value when the "ROLLBACK TO sp" is + ** executed. + */ + if( rc==SQLITE_OK && pPg->pgno>pPager->dbSize && subjRequiresPage(pPg) ){ + rc = subjournalPage(pPg); + } + + /* Write the contents of the page out to the database file. */ + if( rc==SQLITE_OK ){ + pPg->pDirty = 0; + rc = pager_write_pagelist(pPg); + } + + /* Mark the page as clean. */ + if( rc==SQLITE_OK ){ + PAGERTRACE(("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno)); + sqlite3PcacheMakeClean(pPg); + } + + return pager_error(pPager, rc); +} + + +/* +** Allocate and initialize a new Pager object and put a pointer to it +** in *ppPager. The pager should eventually be freed by passing it +** to sqlite3PagerClose(). +** +** The zFilename argument is the path to the database file to open. +** If zFilename is NULL then a randomly-named temporary file is created +** and used as the file to be cached. Temporary files are be deleted +** automatically when they are closed. If zFilename is ":memory:" then +** all information is held in cache. It is never written to disk. +** This can be used to implement an in-memory database. +** +** The nExtra parameter specifies the number of bytes of space allocated +** along with each page reference. This space is available to the user +** via the sqlite3PagerGetExtra() API. +** +** The flags argument is used to specify properties that affect the +** operation of the pager. It should be passed some bitwise combination +** of the PAGER_OMIT_JOURNAL and PAGER_NO_READLOCK flags. +** +** The vfsFlags parameter is a bitmask to pass to the flags parameter +** of the xOpen() method of the supplied VFS when opening files. +** +** If the pager object is allocated and the specified file opened +** successfully, SQLITE_OK is returned and *ppPager set to point to +** the new pager object. If an error occurs, *ppPager is set to NULL +** and error code returned. This function may return SQLITE_NOMEM +** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or +** various SQLITE_IO_XXX errors. +*/ +int sqlite3PagerOpen( + sqlite3_vfs *pVfs, /* The virtual file system to use */ + Pager **ppPager, /* OUT: Return the Pager structure here */ + const char *zFilename, /* Name of the database file to open */ + int nExtra, /* Extra bytes append to each in-memory page */ + int flags, /* flags controlling this file */ + int vfsFlags /* flags passed through to sqlite3_vfs.xOpen() */ +){ + u8 *pPtr; + Pager *pPager = 0; /* Pager object to allocate and return */ + int rc = SQLITE_OK; /* Return code */ + int tempFile = 0; /* True for temp files (incl. in-memory files) */ + int memDb = 0; /* True if this is an in-memory file */ + int readOnly = 0; /* True if this is a read-only file */ + int journalFileSize; /* Bytes to allocate for each journal fd */ + char *zPathname = 0; /* Full path to database file */ + int nPathname = 0; /* Number of bytes in zPathname */ + int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; /* False to omit journal */ + int noReadlock = (flags & PAGER_NO_READLOCK)!=0; /* True to omit read-lock */ + int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */ + u16 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ + + /* Figure out how much space is required for each journal file-handle + ** (there are two of them, the main journal and the sub-journal). This + ** is the maximum space required for an in-memory journal file handle + ** and a regular journal file-handle. Note that a "regular journal-handle" + ** may be a wrapper capable of caching the first portion of the journal + ** file in memory to implement the atomic-write optimization (see + ** source file journal.c). + */ + if( sqlite3JournalSize(pVfs)>sqlite3MemJournalSize() ){ + journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); + }else{ + journalFileSize = ROUND8(sqlite3MemJournalSize()); } - assert( pPg->nRef==0 ); + /* Set the output variable to NULL in case an error occurs. */ + *ppPager = 0; - /* Write the page to the database file if it is dirty. + /* Compute and store the full pathname in an allocated buffer pointed + ** to by zPathname, length nPathname. Or, if this is a temporary file, + ** leave both nPathname and zPathname set to 0. */ - if( pPg->dirty ){ - int rc; - assert( pPg->needSync==0 ); - makeClean(pPg); - pPg->dirty = 1; - pPg->pDirty = 0; - rc = pager_write_pagelist( pPg ); + if( zFilename && zFilename[0] ){ + nPathname = pVfs->mxPathname+1; + zPathname = sqlite3Malloc(nPathname*2); + if( zPathname==0 ){ + return SQLITE_NOMEM; + } +#ifndef SQLITE_OMIT_MEMORYDB + if( strcmp(zFilename,":memory:")==0 ){ + memDb = 1; + zPathname[0] = 0; + }else +#endif + { + zPathname[0] = 0; /* Make sure initialized even if FullPathname() fails */ + rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname); + } + + nPathname = sqlite3Strlen30(zPathname); + if( rc==SQLITE_OK && nPathname+8>pVfs->mxPathname ){ + /* This branch is taken when the journal path required by + ** the database being opened will be more than pVfs->mxPathname + ** bytes in length. This means the database cannot be opened, + ** as it will not be possible to open the journal file or even + ** check for a hot-journal before reading. + */ + rc = SQLITE_CANTOPEN; + } if( rc!=SQLITE_OK ){ + sqlite3_free(zPathname); return rc; } } - assert( pPg->dirty==0 ); - /* If the page we are recycling is marked as alwaysRollback, then - ** set the global alwaysRollback flag, thus disabling the - ** sqlite3PagerDontRollback() optimization for the rest of this transaction. - ** It is necessary to do this because the page marked alwaysRollback - ** might be reloaded at a later time but at that point we won't remember - ** that is was marked alwaysRollback. This means that all pages must - ** be marked as alwaysRollback from here on out. + /* Allocate memory for the Pager structure, PCache object, the + ** three file descriptors, the database file name and the journal + ** file name. The layout in memory is as follows: + ** + ** Pager object (sizeof(Pager) bytes) + ** PCache object (sqlite3PcacheSize() bytes) + ** Database file handle (pVfs->szOsFile bytes) + ** Sub-journal file handle (journalFileSize bytes) + ** Main journal file handle (journalFileSize bytes) + ** Database file name (nPathname+1 bytes) + ** Journal file name (nPathname+8+1 bytes) + */ + pPtr = (u8 *)sqlite3MallocZero( + ROUND8(sizeof(*pPager)) + /* Pager structure */ + ROUND8(pcacheSize) + /* PCache object */ + ROUND8(pVfs->szOsFile) + /* The main db file */ + journalFileSize * 2 + /* The two journal files */ + nPathname + 1 + /* zFilename */ + nPathname + 8 + 1 /* zJournal */ + ); + assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); + if( !pPtr ){ + sqlite3_free(zPathname); + return SQLITE_NOMEM; + } + pPager = (Pager*)(pPtr); + pPager->pPCache = (PCache*)(pPtr += ROUND8(sizeof(*pPager))); + pPager->fd = (sqlite3_file*)(pPtr += ROUND8(pcacheSize)); + pPager->sjfd = (sqlite3_file*)(pPtr += ROUND8(pVfs->szOsFile)); + pPager->jfd = (sqlite3_file*)(pPtr += journalFileSize); + pPager->zFilename = (char*)(pPtr += journalFileSize); + assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); + + /* Fill in the Pager.zFilename and Pager.zJournal buffers, if required. */ + if( zPathname ){ + pPager->zJournal = (char*)(pPtr += nPathname + 1); + memcpy(pPager->zFilename, zPathname, nPathname); + memcpy(pPager->zJournal, zPathname, nPathname); + memcpy(&pPager->zJournal[nPathname], "-journal", 8); + sqlite3_free(zPathname); + } + pPager->pVfs = pVfs; + pPager->vfsFlags = vfsFlags; + + /* Open the pager file. + */ + if( zFilename && zFilename[0] && !memDb ){ + int fout = 0; /* VFS flags returned by xOpen() */ + rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); + readOnly = (fout&SQLITE_OPEN_READONLY); + + /* If the file was successfully opened for read/write access, + ** choose a default page size in case we have to create the + ** database file. The default page size is the maximum of: + ** + ** + SQLITE_DEFAULT_PAGE_SIZE, + ** + The value returned by sqlite3OsSectorSize() + ** + The largest page size that can be written atomically. + */ + if( rc==SQLITE_OK && !readOnly ){ + setSectorSize(pPager); + assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE); + if( szPageDfltsectorSize ){ + if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){ + szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE; + }else{ + szPageDflt = (u16)pPager->sectorSize; + } + } +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + { + int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + int ii; + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536); + for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){ + if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){ + szPageDflt = ii; + } + } + } +#endif + } + }else{ + /* If a temporary file is requested, it is not opened immediately. + ** In this case we accept the default page size and delay actually + ** opening the file until the first call to OsWrite(). + ** + ** This branch is also run for an in-memory database. An in-memory + ** database is the same as a temp-file that is never written out to + ** disk and uses an in-memory rollback journal. + */ + tempFile = 1; + pPager->state = PAGER_EXCLUSIVE; + readOnly = (vfsFlags&SQLITE_OPEN_READONLY); + } + + /* The following call to PagerSetPagesize() serves to set the value of + ** Pager.pageSize and to allocate the Pager.pTmpSpace buffer. */ - if( pPg->alwaysRollback ){ - IOTRACE(("ALWAYS_ROLLBACK %p\n", pPager)) - pPager->alwaysRollback = 1; + if( rc==SQLITE_OK ){ + assert( pPager->memDb==0 ); + rc = sqlite3PagerSetPagesize(pPager, &szPageDflt, -1); + testcase( rc!=SQLITE_OK ); } - /* Unlink the old page from the free list and the hash table + /* If an error occurred in either of the blocks above, free the + ** Pager structure and close the file. */ - unlinkPage(pPg); - assert( pPg->pgno==0 ); + if( rc!=SQLITE_OK ){ + assert( !pPager->pTmpSpace ); + sqlite3OsClose(pPager->fd); + sqlite3_free(pPager); + return rc; + } - *ppPg = pPg; - return SQLITE_OK; -} + /* Initialize the PCache object. */ + assert( nExtra<1000 ); + nExtra = ROUND8(nExtra); + sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, + !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); -/* -** This function is called to free superfluous dynamically allocated memory -** held by the pager system. Memory in use by any SQLite pager allocated -** by the current thread may be sqliteFree()ed. -** -** nReq is the number of bytes of memory required. Once this much has -** been released, the function returns. A negative value for nReq means -** free as much memory as possible. The return value is the total number -** of bytes of memory released. -*/ -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) -int sqlite3PagerReleaseMemory(int nReq){ - const ThreadData *pTsdro = sqlite3ThreadDataReadOnly(); - int nReleased = 0; - int i; + PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename)); + IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) - /* If the the global mutex is held, this subroutine becomes a - ** o-op; zero bytes of memory are freed. This is because - ** some of the code invoked by this function may also - ** try to obtain the mutex, resulting in a deadlock. - */ - if( sqlite3OsInMutex(0) ){ - return 0; + pPager->useJournal = (u8)useJournal; + pPager->noReadlock = (noReadlock && readOnly) ?1:0; + /* pPager->stmtOpen = 0; */ + /* pPager->stmtInUse = 0; */ + /* pPager->nRef = 0; */ + pPager->dbSizeValid = (u8)memDb; + /* pPager->stmtSize = 0; */ + /* pPager->stmtJSize = 0; */ + /* pPager->nPage = 0; */ + pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; + /* pPager->state = PAGER_UNLOCK; */ + assert( pPager->state == (tempFile ? PAGER_EXCLUSIVE : PAGER_UNLOCK) ); + /* pPager->errMask = 0; */ + pPager->tempFile = (u8)tempFile; + assert( tempFile==PAGER_LOCKINGMODE_NORMAL + || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); + assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); + pPager->exclusiveMode = (u8)tempFile; + pPager->changeCountDone = pPager->tempFile; + pPager->memDb = (u8)memDb; + pPager->readOnly = (u8)readOnly; + /* pPager->needSync = 0; */ + pPager->noSync = (pPager->tempFile || !useJournal) ?1:0; + pPager->fullSync = pPager->noSync ?0:1; + pPager->sync_flags = SQLITE_SYNC_NORMAL; + /* pPager->pFirst = 0; */ + /* pPager->pFirstSynced = 0; */ + /* pPager->pLast = 0; */ + pPager->nExtra = (u16)nExtra; + pPager->journalSizeLimit = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT; + assert( isOpen(pPager->fd) || tempFile ); + setSectorSize(pPager); + if( memDb ){ + pPager->journalMode = PAGER_JOURNALMODE_MEMORY; } + /* pPager->xBusyHandler = 0; */ + /* pPager->pBusyHandlerArg = 0; */ + /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ + *ppPager = pPager; + return SQLITE_OK; +} - /* Outermost loop runs for at most two iterations. First iteration we - ** try to find memory that can be released without calling fsync(). Second - ** iteration (which only runs if the first failed to free nReq bytes of - ** memory) is permitted to call fsync(). This is of course much more - ** expensive. - */ - for(i=0; i<=1; i++){ - /* Loop through all the SQLite pagers opened by the current thread. */ - Pager *pPager = pTsdro->pPager; - for( ; pPager && (nReq<0 || nReleasedpNext){ - PgHdr *pPg; - int rc; - if( MEMDB ){ - continue; - } +/* +** This function is called after transitioning from PAGER_UNLOCK to +** PAGER_SHARED state. It tests if there is a hot journal present in +** the file-system for the given pager. A hot journal is one that +** needs to be played back. According to this function, a hot-journal +** file exists if the following criteria are met: +** +** * The journal file exists in the file system, and +** * No process holds a RESERVED or greater lock on the database file, and +** * The database file itself is greater than 0 bytes in size, and +** * The first byte of the journal file exists and is not 0x00. +** +** If the current size of the database file is 0 but a journal file +** exists, that is probably an old journal left over from a prior +** database with the same name. In this case the journal file is +** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK +** is returned. +** +** This routine does not check if there is a master journal filename +** at the end of the file. If there is, and that master journal file +** does not exist, then the journal file is not really hot. In this +** case this routine will return a false-positive. The pager_playback() +** routine will discover that the journal file is not really hot and +** will not roll it back. +** +** If a hot-journal file is found to exist, *pExists is set to 1 and +** SQLITE_OK returned. If no hot-journal file is present, *pExists is +** set to 0 and SQLITE_OK returned. If an IO error occurs while trying +** to determine whether or not a hot-journal file exists, the IO error +** code is returned and the value of *pExists is undefined. +*/ +static int hasHotJournal(Pager *pPager, int *pExists){ + sqlite3_vfs * const pVfs = pPager->pVfs; + int rc; /* Return code */ + int exists; /* True if a journal file is present */ - /* For each pager, try to free as many pages as possible (without - ** calling fsync() if this is the first iteration of the outermost - ** loop). + assert( pPager!=0 ); + assert( pPager->useJournal ); + assert( isOpen(pPager->fd) ); + assert( !isOpen(pPager->jfd) ); + + *pExists = 0; + rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); + if( rc==SQLITE_OK && exists ){ + int locked; /* True if some process holds a RESERVED lock */ + + /* Race condition here: Another process might have been holding the + ** the RESERVED lock and have a journal open at the sqlite3OsAccess() + ** call above, but then delete the journal and drop the lock before + ** we get to the following sqlite3OsCheckReservedLock() call. If that + ** is the case, this routine might think there is a hot journal when + ** in fact there is none. This results in a false-positive which will + ** be dealt with by the playback routine. Ticket #3883. + */ + rc = sqlite3OsCheckReservedLock(pPager->fd, &locked); + if( rc==SQLITE_OK && !locked ){ + int nPage; + + /* Check the size of the database file. If it consists of 0 pages, + ** then delete the journal file. See the header comment above for + ** the reasoning here. Delete the obsolete journal file under + ** a RESERVED lock to avoid race conditions and to avoid violating + ** [H33020]. */ - while( SQLITE_OK==(rc = pager_recycle(pPager, i, &pPg)) && pPg) { - /* We've found a page to free. At this point the page has been - ** removed from the page hash-table, free-list and synced-list - ** (pFirstSynced). It is still in the all pages (pAll) list. - ** Remove it from this list before freeing. - ** - ** Todo: Check the Pager.pStmt list to make sure this is Ok. It - ** probably is though. - */ - PgHdr *pTmp; - assert( pPg ); - if( pPg==pPager->pAll ){ - pPager->pAll = pPg->pNextAll; + rc = sqlite3PagerPagecount(pPager, &nPage); + if( rc==SQLITE_OK ){ + if( nPage==0 ){ + sqlite3BeginBenignMalloc(); + if( pPager->state>=PAGER_RESERVED + || sqlite3OsLock(pPager->fd, RESERVED_LOCK)==SQLITE_OK ){ + sqlite3OsDelete(pVfs, pPager->zJournal, 0); + assert( pPager->state>=PAGER_SHARED ); + if( pPager->state==PAGER_SHARED ){ + sqlite3OsUnlock(pPager->fd, SHARED_LOCK); + } + } + sqlite3EndBenignMalloc(); }else{ - for( pTmp=pPager->pAll; pTmp->pNextAll!=pPg; pTmp=pTmp->pNextAll ){} - pTmp->pNextAll = pPg->pNextAll; + /* The journal file exists and no other connection has a reserved + ** or greater lock on the database file. Now check that there is + ** at least one non-zero bytes at the start of the journal file. + ** If there is, then we consider this journal to be hot. If not, + ** it can be ignored. + */ + int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); + if( rc==SQLITE_OK ){ + u8 first = 0; + rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + sqlite3OsClose(pPager->jfd); + *pExists = (first!=0); + }else if( rc==SQLITE_CANTOPEN ){ + /* If we cannot open the rollback journal file in order to see if + ** its has a zero header, that might be due to an I/O error, or + ** it might be due to the race condition described above and in + ** ticket #3883. Either way, assume that the journal is hot. + ** This might be a false positive. But if it is, then the + ** automatic journal playback and recovery mechanism will deal + ** with it under an EXCLUSIVE lock where we do not need to + ** worry so much with race conditions. + */ + *pExists = 1; + rc = SQLITE_OK; + } } - nReleased += sqliteAllocSize(pPg); - IOTRACE(("PGFREE %p %d *\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - sqliteFree(pPg); - } - - if( rc!=SQLITE_OK ){ - /* An error occured whilst writing to the database file or - ** journal in pager_recycle(). The error is not returned to the - ** caller of this function. Instead, set the Pager.errCode variable. - ** The error will be returned to the user (or users, in the case - ** of a shared pager cache) of the pager for which the error occured. - */ - assert( - (rc&0xff)==SQLITE_IOERR || - rc==SQLITE_FULL || - rc==SQLITE_BUSY - ); - assert( pPager->state>=PAGER_RESERVED ); - pager_error(pPager, rc); } } } - return nReleased; + return rc; } -#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT && !SQLITE_OMIT_DISKIO */ /* -** Read the content of page pPg out of the database file. -*/ -static int readDbPage(Pager *pPager, PgHdr *pPg, Pgno pgno){ - int rc; - assert( MEMDB==0 ); - rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); - if( rc==SQLITE_OK ){ - rc = sqlite3OsRead(pPager->fd, PGHDR_TO_DATA(pPg), - pPager->pageSize); +** Read the content for page pPg out of the database file and into +** pPg->pData. A shared lock or greater must be held on the database +** file before this function is called. +** +** If page 1 is read, then the value of Pager.dbFileVers[] is set to +** the value read from the database file. +** +** If an IO error occurs, then the IO error is returned to the caller. +** Otherwise, SQLITE_OK is returned. +*/ +static int readDbPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ + Pgno pgno = pPg->pgno; /* Page number to read */ + int rc; /* Return code */ + i64 iOffset; /* Byte offset of file to read from */ + + assert( pPager->state>=PAGER_SHARED && !MEMDB ); + + if( !isOpen(pPager->fd) ){ + assert( pPager->tempFile ); + memset(pPg->pData, 0, pPager->pageSize); + return SQLITE_OK; + } + iOffset = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsRead(pPager->fd, pPg->pData, pPager->pageSize, iOffset); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + if( pgno==1 ){ + u8 *dbFileVers = &((u8*)pPg->pData)[24]; + memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); } + CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM); + PAGER_INCR(sqlite3_pager_readdb_count); PAGER_INCR(pPager->nRead); IOTRACE(("PGIN %p %d\n", pPager, pgno)); - if( pgno==1 ){ - memcpy(&pPager->dbFileVers, &((u8*)PGHDR_TO_DATA(pPg))[24], - sizeof(pPager->dbFileVers)); - } - CODEC1(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3); - PAGERTRACE4("FETCH %d page %d hash(%08x)\n", - PAGERID(pPager), pPg->pgno, pager_pagehash(pPg)); + PAGERTRACE(("FETCH %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_pagehash(pPg))); + return rc; } - /* -** This function is called to obtain the shared lock required before -** data may be read from the pager cache. If the shared lock has already -** been obtained, this function is a no-op. -** -** Immediately after obtaining the shared lock (if required), this function -** checks for a hot-journal file. If one is found, an emergency rollback -** is performed immediately. +** This function is called whenever the upper layer requests a database +** page is requested, before the cache is checked for a suitable page +** or any data is read from the database. It performs the following +** two functions: +** +** 1) If the pager is currently in PAGER_UNLOCK state (no lock held +** on the database file), then an attempt is made to obtain a +** SHARED lock on the database file. Immediately after obtaining +** the SHARED lock, the file-system is checked for a hot-journal, +** which is played back if present. Following any hot-journal +** rollback, the contents of the cache are validated by checking +** the 'change-counter' field of the database file header and +** discarded if they are found to be invalid. +** +** 2) If the pager is running in exclusive-mode, and there are currently +** no outstanding references to any pages, and is in the error state, +** then an attempt is made to clear the error state by discarding +** the contents of the page cache and rolling back any open journal +** file. +** +** If the operation described by (2) above is not attempted, and if the +** pager is in an error state other than SQLITE_FULL when this is called, +** the error state error code is returned. It is permitted to read the +** database when in SQLITE_FULL error state. +** +** Otherwise, if everything is successful, SQLITE_OK is returned. If an +** IO error occurs while locking the database, checking for a hot-journal +** file or rolling back a journal file, the IO error code is returned. */ static int pagerSharedLock(Pager *pPager){ - int rc = SQLITE_OK; + int rc = SQLITE_OK; /* Return code */ + int isErrorReset = 0; /* True if recovering from error state */ - if( pPager->state==PAGER_UNLOCK ){ - if( !MEMDB ){ - assert( pPager->nRef==0 ); - if( !pPager->noReadlock ){ - rc = pager_wait_on_lock(pPager, SHARED_LOCK); - if( rc!=SQLITE_OK ){ - return pager_error(pPager, rc); - } - assert( pPager->state>=SHARED_LOCK ); + /* If this database has no outstanding page references and is in an + ** error-state, this is a chance to clear the error. Discard the + ** contents of the pager-cache and rollback any hot journal in the + ** file-system. + */ + if( !MEMDB && sqlite3PcacheRefCount(pPager->pPCache)==0 && pPager->errCode ){ + if( isOpen(pPager->jfd) || pPager->zJournal ){ + isErrorReset = 1; + } + pPager->errCode = SQLITE_OK; + pager_reset(pPager); + } + + /* If the pager is still in an error state, do not proceed. The error + ** state will be cleared at some point in the future when all page + ** references are dropped and the cache can be discarded. + */ + if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ + return pPager->errCode; + } + + if( pPager->state==PAGER_UNLOCK || isErrorReset ){ + sqlite3_vfs * const pVfs = pPager->pVfs; + int isHotJournal = 0; + assert( !MEMDB ); + assert( sqlite3PcacheRefCount(pPager->pPCache)==0 ); + if( !pPager->noReadlock ){ + rc = pager_wait_on_lock(pPager, SHARED_LOCK); + if( rc!=SQLITE_OK ){ + assert( pPager->state==PAGER_UNLOCK ); + return pager_error(pPager, rc); } - - /* If a journal file exists, and there is no RESERVED lock on the - ** database file, then it either needs to be played back or deleted. + }else if( pPager->state==PAGER_UNLOCK ){ + pPager->state = PAGER_SHARED; + } + assert( pPager->state>=SHARED_LOCK ); + + /* If a journal file exists, and there is no RESERVED lock on the + ** database file, then it either needs to be played back or deleted. + */ + if( !isErrorReset ){ + rc = hasHotJournal(pPager, &isHotJournal); + if( rc!=SQLITE_OK ){ + goto failed; + } + } + if( isErrorReset || isHotJournal ){ + /* Get an EXCLUSIVE lock on the database file. At this point it is + ** important that a RESERVED lock is not obtained on the way to the + ** EXCLUSIVE lock. If it were, another process might open the + ** database file, detect the RESERVED lock, and conclude that the + ** database is safe to read while this process is still rolling the + ** hot-journal back. + ** + ** Because the intermediate RESERVED lock is not requested, any + ** other process attempting to access the database file will get to + ** this point in the code and fail to obtain its own EXCLUSIVE lock + ** on the database file. */ - if( hasHotJournal(pPager) ){ - /* Get an EXCLUSIVE lock on the database file. At this point it is - ** important that a RESERVED lock is not obtained on the way to the - ** EXCLUSIVE lock. If it were, another process might open the - ** database file, detect the RESERVED lock, and conclude that the - ** database is safe to read while this process is still rolling it - ** back. - ** - ** Because the intermediate RESERVED lock is not requested, the - ** second process will get to this point in the code and fail to - ** obtain it's own EXCLUSIVE lock on the database file. - */ + if( pPager->statefd, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ - pager_unlock(pPager); - return pager_error(pPager, rc); + rc = pager_error(pPager, rc); + goto failed; } pPager->state = PAGER_EXCLUSIVE; + } - /* Open the journal for reading only. Return SQLITE_BUSY if - ** we are unable to open the journal file. - ** - ** The journal file does not need to be locked itself. The - ** journal file is never open unless the main database file holds - ** a write lock, so there is never any chance of two or more - ** processes opening the journal at the same time. - ** - ** Open the journal for read/write access. This is because in - ** exclusive-access mode the file descriptor will be kept open and - ** possibly used for a transaction later on. On some systems, the - ** OsTruncate() call used in exclusive-access mode also requires - ** a read/write file handle. - */ - rc = SQLITE_BUSY; - if( sqlite3OsFileExists(pPager->zJournal) ){ - int ro; - assert( !pPager->tempFile ); - rc = sqlite3OsOpenReadWrite(pPager->zJournal, &pPager->jfd, &ro); - assert( rc!=SQLITE_OK || pPager->jfd ); - if( ro ){ - rc = SQLITE_BUSY; - sqlite3OsClose(&pPager->jfd); + /* Open the journal for read/write access. This is because in + ** exclusive-access mode the file descriptor will be kept open and + ** possibly used for a transaction later on. On some systems, the + ** OsTruncate() call used in exclusive-access mode also requires + ** a read/write file handle. + */ + if( !isOpen(pPager->jfd) ){ + int res; + rc = sqlite3OsAccess(pVfs,pPager->zJournal,SQLITE_ACCESS_EXISTS,&res); + if( rc==SQLITE_OK ){ + if( res ){ + int fout = 0; + int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL; + assert( !pPager->tempFile ); + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout); + assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); + if( rc==SQLITE_OK && fout&SQLITE_OPEN_READONLY ){ + rc = SQLITE_CANTOPEN; + sqlite3OsClose(pPager->jfd); + } + }else{ + /* If the journal does not exist, it usually means that some + ** other connection managed to get in and roll it back before + ** this connection obtained the exclusive lock above. Or, it + ** may mean that the pager was in the error-state when this + ** function was called and the journal file does not exist. */ + rc = pager_end_transaction(pPager, 0); } } - if( rc!=SQLITE_OK ){ - pager_unlock(pPager); - return SQLITE_BUSY; - } - pPager->journalOpen = 1; - pPager->journalStarted = 0; - pPager->journalOff = 0; - pPager->setMaster = 0; - pPager->journalHdr = 0; + } + if( rc!=SQLITE_OK ){ + goto failed; + } + + /* TODO: Why are these cleared here? Is it necessary? */ + pPager->journalStarted = 0; + pPager->journalOff = 0; + pPager->setMaster = 0; + pPager->journalHdr = 0; - /* Playback and delete the journal. Drop the database write - ** lock and reacquire the read lock. - */ + /* Playback and delete the journal. Drop the database write + ** lock and reacquire the read lock. Purge the cache before + ** playing back the hot-journal so that we don't end up with + ** an inconsistent cache. + */ + if( isOpen(pPager->jfd) ){ rc = pager_playback(pPager, 1); if( rc!=SQLITE_OK ){ - return pager_error(pPager, rc); + rc = pager_error(pPager, rc); + goto failed; } - assert(pPager->state==PAGER_SHARED || - (pPager->exclusiveMode && pPager->state>PAGER_SHARED) - ); } + assert( (pPager->state==PAGER_SHARED) + || (pPager->exclusiveMode && pPager->state>PAGER_SHARED) + ); + } + + if( pPager->pBackup || sqlite3PcachePagecount(pPager->pPCache)>0 ){ + /* The shared-lock has just been acquired on the database file + ** and there are already pages in the cache (from a previous + ** read or write transaction). Check to see if the database + ** has been modified. If the database has changed, flush the + ** cache. + ** + ** Database changes is detected by looking at 15 bytes beginning + ** at offset 24 into the file. The first 4 of these 16 bytes are + ** a 32-bit counter that is incremented with each change. The + ** other bytes change randomly with each file change when + ** a codec is in use. + ** + ** There is a vanishingly small chance that a change will not be + ** detected. The chance of an undetected change is so small that + ** it can be neglected. + */ + char dbFileVers[sizeof(pPager->dbFileVers)]; + sqlite3PagerPagecount(pPager, 0); - if( pPager->pAll ){ - /* The shared-lock has just been acquired on the database file - ** and there are already pages in the cache (from a previous - ** read or write transaction). Check to see if the database - ** has been modified. If the database has changed, flush the - ** cache. - ** - ** Database changes is detected by looking at 15 bytes beginning - ** at offset 24 into the file. The first 4 of these 16 bytes are - ** a 32-bit counter that is incremented with each change. The - ** other bytes change randomly with each file change when - ** a codec is in use. - ** - ** There is a vanishingly small chance that a change will not be - ** detected. The chance of an undetected change is so small that - ** it can be neglected. - */ - char dbFileVers[sizeof(pPager->dbFileVers)]; - sqlite3PagerPagecount(pPager); - - if( pPager->errCode ){ - return pPager->errCode; - } + if( pPager->errCode ){ + rc = pPager->errCode; + goto failed; + } - if( pPager->dbSize>0 ){ - IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); - rc = sqlite3OsSeek(pPager->fd, 24); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers)); - if( rc!=SQLITE_OK ){ - return rc; - } - }else{ - memset(dbFileVers, 0, sizeof(dbFileVers)); + assert( pPager->dbSizeValid ); + if( pPager->dbSize>0 ){ + IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); + rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); + if( rc!=SQLITE_OK ){ + goto failed; } + }else{ + memset(dbFileVers, 0, sizeof(dbFileVers)); + } - if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ - pager_reset(pPager); - } + if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ + pager_reset(pPager); } } - assert( pPager->exclusiveMode || pPager->state<=PAGER_SHARED ); - if( pPager->state==PAGER_UNLOCK ){ - pPager->state = PAGER_SHARED; - } + assert( pPager->exclusiveMode || pPager->state==PAGER_SHARED ); } + failed: + if( rc!=SQLITE_OK ){ + /* pager_unlock() is a no-op for exclusive mode and in-memory databases. */ + pager_unlock(pPager); + } return rc; } /* -** Allocate a PgHdr object. Either create a new one or reuse -** an existing one that is not otherwise in use. -** -** A new PgHdr structure is created if any of the following are -** true: -** -** (1) We have not exceeded our maximum allocated cache size -** as set by the "PRAGMA cache_size" command. -** -** (2) There are no unused PgHdr objects available at this time. -** -** (3) This is an in-memory database. -** -** (4) There are no PgHdr objects that do not require a journal -** file sync and a sync of the journal file is currently -** prohibited. -** -** Otherwise, reuse an existing PgHdr. In other words, reuse an -** existing PgHdr if all of the following are true: -** -** (1) We have reached or exceeded the maximum cache size -** allowed by "PRAGMA cache_size". +** If the reference count has reached zero, rollback any active +** transaction and unlock the pager. ** -** (2) There is a PgHdr available with PgHdr->nRef==0 -** -** (3) We are not in an in-memory database -** -** (4) Either there is an available PgHdr that does not need -** to be synced to disk or else disk syncing is currently -** allowed. -*/ -static int pagerAllocatePage(Pager *pPager, PgHdr **ppPg){ - int rc = SQLITE_OK; - PgHdr *pPg; - - /* Create a new PgHdr if any of the four conditions defined - ** above is met: */ - if( pPager->nPagemxPage - || pPager->pFirst==0 - || MEMDB - || (pPager->pFirstSynced==0 && pPager->doNotSync) +** Except, in locking_mode=EXCLUSIVE when there is nothing to in +** the rollback journal, the unlock is not performed and there is +** nothing to rollback, so this routine is a no-op. +*/ +static void pagerUnlockIfUnused(Pager *pPager){ + if( (sqlite3PcacheRefCount(pPager->pPCache)==0) + && (!pPager->exclusiveMode || pPager->journalOff>0) ){ - if( pPager->nPage>=pPager->nHash ){ - pager_resize_hash_table(pPager, - pPager->nHash<256 ? 256 : pPager->nHash*2); - if( pPager->nHash==0 ){ - rc = SQLITE_NOMEM; - goto pager_allocate_out; - } - } - pPg = sqliteMallocRaw( sizeof(*pPg) + pPager->pageSize - + sizeof(u32) + pPager->nExtra - + MEMDB*sizeof(PgHistory) ); - if( pPg==0 ){ - rc = SQLITE_NOMEM; - goto pager_allocate_out; - } - memset(pPg, 0, sizeof(*pPg)); - if( MEMDB ){ - memset(PGHDR_TO_HIST(pPg, pPager), 0, sizeof(PgHistory)); - } - pPg->pPager = pPager; - pPg->pNextAll = pPager->pAll; - pPager->pAll = pPg; - pPager->nPage++; - }else{ - /* Recycle an existing page with a zero ref-count. */ - rc = pager_recycle(pPager, 1, &pPg); - if( rc==SQLITE_BUSY ){ - rc = SQLITE_IOERR_BLOCKED; - } - if( rc!=SQLITE_OK ){ - goto pager_allocate_out; - } - assert( pPager->state>=SHARED_LOCK ); - assert(pPg); + pagerUnlockAndRollback(pPager); } - *ppPg = pPg; - -pager_allocate_out: - return rc; } /* -** Make sure we have the content for a page. If the page was -** previously acquired with noContent==1, then the content was -** just initialized to zeros instead of being read from disk. -** But now we need the real data off of disk. So make sure we -** have it. Read it in if we do not have it already. -*/ -static int pager_get_content(PgHdr *pPg){ - if( pPg->needRead ){ - int rc = readDbPage(pPg->pPager, pPg, pPg->pgno); - if( rc==SQLITE_OK ){ - pPg->needRead = 0; - }else{ - return rc; - } - } - return SQLITE_OK; +** Drop a page from the cache using sqlite3PcacheDrop(). +** +** If this means there are now no pages with references to them, a rollback +** occurs and the lock on the database is removed. +*/ +static void pagerDropPage(DbPage *pPg){ + Pager *pPager = pPg->pPager; + sqlite3PcacheDrop(pPg); + pagerUnlockIfUnused(pPager); } /* -** Acquire a page. +** Acquire a reference to page number pgno in pager pPager (a page +** reference has type DbPage*). If the requested reference is +** successfully obtained, it is copied to *ppPage and SQLITE_OK returned. +** +** This function calls pagerSharedLock() to obtain a SHARED lock on +** the database file if such a lock or greater is not already held. +** This may cause hot-journal rollback or a cache purge. See comments +** above function pagerSharedLock() for details. +** +** If the requested page is already in the cache, it is returned. +** Otherwise, a new page object is allocated and populated with data +** read from the database file. In some cases, the pcache module may +** choose not to allocate a new page object and may reuse an existing +** object with no outstanding references. +** +** The extra data appended to a page is always initialized to zeros the +** first time a page is loaded into memory. If the page requested is +** already in the cache when this function is called, then the extra +** data is left as it was when the page object was last used. +** +** If the database image is smaller than the requested page or if a +** non-zero value is passed as the noContent parameter and the +** requested page is not already stored in the cache, then no +** actual disk read occurs. In this case the memory image of the +** page is initialized to all zeros. ** -** A read lock on the disk file is obtained when the first page is acquired. -** This read lock is dropped when the last page is released. +** If noContent is true, it means that we do not care about the contents +** of the page. This occurs in two seperate scenarios: +** +** a) When reading a free-list leaf page from the database, and ** -** This routine works for any page number greater than 0. If the database -** file is smaller than the requested page, then no actual disk -** read occurs and the memory image of the page is initialized to -** all zeros. The extra data appended to a page is always initialized -** to zeros the first time a page is loaded into memory. +** b) When a savepoint is being rolled back and we need to load +** a new page into the cache to populate with the data read +** from the savepoint journal. +** +** If noContent is true, then the data returned is zeroed instead of +** being read from the database. Additionally, the bits corresponding +** to pgno in Pager.pInJournal (bitvec of pages already written to the +** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open +** savepoints are set. This means if the page is made writable at any +** point in the future, using a call to sqlite3PagerWrite(), its contents +** will not be journaled. This saves IO. ** ** The acquisition might fail for several reasons. In all cases, ** an appropriate error code is returned and *ppPage is set to NULL. @@ -3085,15 +3835,6 @@ ** has to go to disk, and could also playback an old journal if necessary. ** Since Lookup() never goes to disk, it never has to deal with locks ** or journal files. -** -** If noContent is false, the page contents are actually read from disk. -** If noContent is true, it means that we do not care about the contents -** of the page at this time, so do not do a disk read. Just fill in the -** page content with zeros. But mark the fact that we have not read the -** content by setting the PgHdr.needRead flag. Later on, if -** sqlite3PagerWrite() is called on this page or if this routine is -** called again with noContent==0, that means that the content is needed -** and the disk read should occur at that point. */ int sqlite3PagerAcquire( Pager *pPager, /* The pager open on the database file */ @@ -3101,10 +3842,14 @@ DbPage **ppPage, /* Write a pointer to the page here */ int noContent /* Do not bother reading content from disk if true */ ){ - PgHdr *pPg; + PgHdr *pPg = 0; int rc; - assert( pPager->state==PAGER_UNLOCK || pPager->nRef>0 || pgno==1 ); + assert( assert_pager_state(pPager) ); + assert( pPager->state==PAGER_UNLOCK + || sqlite3PcacheRefCount(pPager->pPCache)>0 + || pgno==1 + ); /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page ** number greater than this, or zero, is requested. @@ -3117,9 +3862,6 @@ */ assert( pPager!=0 ); *ppPage = 0; - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ - return pPager->errCode; - } /* If this is the first page accessed, then get a SHARED lock ** on the database file. pagerSharedLock() is a no-op if @@ -3131,90 +3873,67 @@ } assert( pPager->state!=PAGER_UNLOCK ); - pPg = pager_lookup(pPager, pgno); - if( pPg==0 ){ - /* The requested page is not in the page cache. */ + rc = sqlite3PcacheFetch(pPager->pPCache, pgno, 1, &pPg); + if( rc!=SQLITE_OK ){ + pagerUnlockIfUnused(pPager); + return rc; + } + assert( pPg->pgno==pgno ); + assert( pPg->pPager==pPager || pPg->pPager==0 ); + if( pPg->pPager==0 ){ + /* The pager cache has created a new page. Its content needs to + ** be initialized. + */ int nMax; - int h; PAGER_INCR(pPager->nMiss); - rc = pagerAllocatePage(pPager, &pPg); - if( rc!=SQLITE_OK ){ - return rc; - } - - pPg->pgno = pgno; - assert( !MEMDB || pgno>pPager->stmtSize ); - if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ - sqlite3CheckMemory(pPager->aInJournal, pgno/8); - assert( pPager->journalOpen ); - pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; - pPg->needSync = 0; - }else{ - pPg->inJournal = 0; - pPg->needSync = 0; - } - - makeClean(pPg); - pPg->nRef = 1; - REFINFO(pPg); + pPg->pPager = pPager; - pPager->nRef++; - if( pPager->nExtra>0 ){ - memset(PGHDR_TO_EXTRA(pPg, pPager), 0, pPager->nExtra); - } - nMax = sqlite3PagerPagecount(pPager); - if( pPager->errCode ){ + rc = sqlite3PagerPagecount(pPager, &nMax); + if( rc!=SQLITE_OK ){ sqlite3PagerUnref(pPg); - rc = pPager->errCode; return rc; } - /* Populate the page with data, either by reading from the database - ** file, or by setting the entire page to zero. - */ - if( nMax<(int)pgno || MEMDB || (noContent && !pPager->alwaysRollback) ){ + if( nMax<(int)pgno || MEMDB || noContent ){ if( pgno>pPager->mxPgno ){ sqlite3PagerUnref(pPg); return SQLITE_FULL; } - memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); - pPg->needRead = noContent && !pPager->alwaysRollback; + if( noContent ){ + /* Failure to set the bits in the InJournal bit-vectors is benign. + ** It merely means that we might do some extra work to journal a + ** page that does not need to be journaled. Nevertheless, be sure + ** to test the case where a malloc error occurs while trying to set + ** a bit in a bit vector. + */ + sqlite3BeginBenignMalloc(); + if( pgno<=pPager->dbOrigSize ){ + TESTONLY( rc = ) sqlite3BitvecSet(pPager->pInJournal, pgno); + testcase( rc==SQLITE_NOMEM ); + } + TESTONLY( rc = ) addToSavepointBitvecs(pPager, pgno); + testcase( rc==SQLITE_NOMEM ); + sqlite3EndBenignMalloc(); + }else{ + memset(pPg->pData, 0, pPager->pageSize); + } IOTRACE(("ZERO %p %d\n", pPager, pgno)); }else{ - rc = readDbPage(pPager, pPg, pgno); - if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ - pPg->pgno = 0; - sqlite3PagerUnref(pPg); + assert( pPg->pPager==pPager ); + rc = readDbPage(pPg); + if( rc!=SQLITE_OK ){ + pagerDropPage(pPg); return rc; } - pPg->needRead = 0; - } - - /* Link the page into the page hash table */ - h = pgno & (pPager->nHash-1); - assert( pgno!=0 ); - pPg->pNextHash = pPager->aHash[h]; - pPager->aHash[h] = pPg; - if( pPg->pNextHash ){ - assert( pPg->pNextHash->pPrevHash==0 ); - pPg->pNextHash->pPrevHash = pPg; } - #ifdef SQLITE_CHECK_PAGES pPg->pageHash = pager_pagehash(pPg); #endif }else{ /* The requested page is in the page cache. */ - assert(pPager->nRef>0 || pgno==1); PAGER_INCR(pPager->nHit); - if( !noContent ){ - rc = pager_get_content(pPg); - if( rc ){ - return rc; - } - } - page_ref(pPg); } + *ppPage = pPg; return SQLITE_OK; } @@ -3222,7 +3941,9 @@ /* ** Acquire a page if it is already in the in-memory cache. Do ** not read the page from disk. Return a pointer to the page, -** or 0 if the page is not in cache. +** or 0 if the page is not in cache. Also, return 0 if the +** pager is in PAGER_UNLOCK state when this function is called, +** or if the pager is in an error state other than SQLITE_FULL. ** ** See also sqlite3PagerGet(). The difference between this routine ** and sqlite3PagerGet() is that _get() will go to the disk and read @@ -3231,271 +3952,231 @@ ** has ever happened. */ DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ - PgHdr *pPg; - + PgHdr *pPg = 0; assert( pPager!=0 ); assert( pgno!=0 ); - if( pPager->state==PAGER_UNLOCK ){ - assert( !pPager->pAll || pPager->exclusiveMode ); - return 0; - } - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ - return 0; + if( (pPager->state!=PAGER_UNLOCK) + && (pPager->errCode==SQLITE_OK || pPager->errCode==SQLITE_FULL) + ){ + sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg); } - pPg = pager_lookup(pPager, pgno); - if( pPg==0 ) return 0; - page_ref(pPg); + return pPg; } /* -** Release a page. +** Release a page reference. ** ** If the number of references to the page drop to zero, then the ** page is added to the LRU list. When all references to all pages ** are released, a rollback occurs and the lock on the database is ** removed. */ -int sqlite3PagerUnref(DbPage *pPg){ - - /* Decrement the reference count for this page - */ - assert( pPg->nRef>0 ); - pPg->nRef--; - REFINFO(pPg); - - CHECK_PAGE(pPg); +void sqlite3PagerUnref(DbPage *pPg){ + if( pPg ){ + Pager *pPager = pPg->pPager; + sqlite3PcacheRelease(pPg); + pagerUnlockIfUnused(pPager); + } +} - /* When the number of references to a page reach 0, call the - ** destructor and add the page to the freelist. - */ - if( pPg->nRef==0 ){ - Pager *pPager; - pPager = pPg->pPager; - pPg->pNextFree = 0; - pPg->pPrevFree = pPager->pLast; - pPager->pLast = pPg; - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg; +/* +** If the main journal file has already been opened, ensure that the +** sub-journal file is open too. If the main journal is not open, +** this function is a no-op. +** +** SQLITE_OK is returned if everything goes according to plan. +** An SQLITE_IOERR_XXX error code is returned if a call to +** sqlite3OsOpen() fails. +*/ +static int openSubJournal(Pager *pPager){ + int rc = SQLITE_OK; + if( isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ){ + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ + sqlite3MemJournalOpen(pPager->sjfd); }else{ - pPager->pFirst = pPg; - } - if( pPg->needSync==0 && pPager->pFirstSynced==0 ){ - pPager->pFirstSynced = pPg; - } - if( pPager->xDestructor ){ - pPager->xDestructor(pPg, pPager->pageSize); - } - - /* When all pages reach the freelist, drop the read lock from - ** the database file. - */ - pPager->nRef--; - assert( pPager->nRef>=0 ); - if( pPager->nRef==0 && (!pPager->exclusiveMode || pPager->journalOff>0) ){ - pagerUnlockAndRollback(pPager); + rc = pagerOpentemp(pPager, pPager->sjfd, SQLITE_OPEN_SUBJOURNAL); } } - return SQLITE_OK; + return rc; } /* -** Create a journal file for pPager. There should already be a RESERVED -** or EXCLUSIVE lock on the database file when this routine is called. -** -** Return SQLITE_OK if everything. Return an error code and release the -** write lock if anything goes wrong. +** This function is called at the start of every write transaction. +** There must already be a RESERVED or EXCLUSIVE lock on the database +** file when this routine is called. +** +** Open the journal file for pager pPager and write a journal header +** to the start of it. If there are active savepoints, open the sub-journal +** as well. This function is only used when the journal file is being +** opened to write a rollback log for a transaction. It is not used +** when opening a hot journal file to roll it back. +** +** If the journal file is already open (as it may be in exclusive mode), +** then this function just writes a journal header to the start of the +** already open file. +** +** Whether or not the journal file is opened by this function, the +** Pager.pInJournal bitvec structure is allocated. +** +** Return SQLITE_OK if everything is successful. Otherwise, return +** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or +** an IO error code if opening or writing the journal file fails. */ static int pager_open_journal(Pager *pPager){ - int rc; - assert( !MEMDB ); + int rc = SQLITE_OK; /* Return code */ + sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */ + assert( pPager->state>=PAGER_RESERVED ); - assert( pPager->journalOpen==0 ); assert( pPager->useJournal ); - assert( pPager->aInJournal==0 ); - sqlite3PagerPagecount(pPager); - pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( pPager->aInJournal==0 ){ - rc = SQLITE_NOMEM; - goto failed_to_open_journal; - } - rc = sqlite3OsOpenExclusive(pPager->zJournal, &pPager->jfd, - pPager->tempFile); - assert( rc!=SQLITE_OK || pPager->jfd ); - pPager->journalOff = 0; - pPager->setMaster = 0; - pPager->journalHdr = 0; - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ){ - sqlite3OsDelete(pPager->zJournal); - } - goto failed_to_open_journal; - } - sqlite3OsSetFullSync(pPager->jfd, pPager->full_fsync); - sqlite3OsSetFullSync(pPager->fd, pPager->full_fsync); - sqlite3OsOpenDirectory(pPager->jfd, pPager->zDirectory); - pPager->journalOpen = 1; - pPager->journalStarted = 0; - pPager->needSync = 0; - pPager->alwaysRollback = 0; - pPager->nRec = 0; + assert( pPager->pInJournal==0 ); + + /* If already in the error state, this function is a no-op. */ if( pPager->errCode ){ - rc = pPager->errCode; - goto failed_to_open_journal; + return pPager->errCode; } - pPager->origDbSize = pPager->dbSize; - rc = writeJournalHdr(pPager); + /* TODO: Is it really possible to get here with dbSizeValid==0? If not, + ** the call to PagerPagecount() can be removed. + */ + testcase( pPager->dbSizeValid==0 ); + sqlite3PagerPagecount(pPager, 0); - if( pPager->stmtAutoopen && rc==SQLITE_OK ){ - rc = sqlite3PagerStmtBegin(pPager); + pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize); + if( pPager->pInJournal==0 ){ + return SQLITE_NOMEM; } - if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - rc = pager_end_transaction(pPager); - if( rc==SQLITE_OK ){ - rc = SQLITE_FULL; + + /* Open the journal file if it is not already open. */ + if( !isOpen(pPager->jfd) ){ + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ + sqlite3MemJournalOpen(pPager->jfd); + }else{ + const int flags = /* VFS flags to open journal file */ + SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| + (pPager->tempFile ? + (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL): + (SQLITE_OPEN_MAIN_JOURNAL) + ); +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + rc = sqlite3JournalOpen( + pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager) + ); +#else + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0); +#endif } + assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); + } + + + /* Write the first journal header to the journal file and open + ** the sub-journal if necessary. + */ + if( rc==SQLITE_OK ){ + /* TODO: Check if all of these are really required. */ + pPager->dbOrigSize = pPager->dbSize; + pPager->journalStarted = 0; + pPager->needSync = 0; + pPager->nRec = 0; + pPager->journalOff = 0; + pPager->setMaster = 0; + pPager->journalHdr = 0; + rc = writeJournalHdr(pPager); + } + if( rc==SQLITE_OK && pPager->nSavepoint ){ + rc = openSubJournal(pPager); } - return rc; -failed_to_open_journal: - sqliteFree(pPager->aInJournal); - pPager->aInJournal = 0; + if( rc!=SQLITE_OK ){ + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + } return rc; } /* -** Acquire a write-lock on the database. The lock is removed when -** the any of the following happen: -** -** * sqlite3PagerCommitPhaseTwo() is called. -** * sqlite3PagerRollback() is called. -** * sqlite3PagerClose() is called. -** * sqlite3PagerUnref() is called to on every outstanding page. +** Begin a write-transaction on the specified pager object. If a +** write-transaction has already been opened, this function is a no-op. ** -** The first parameter to this routine is a pointer to any open page of the -** database file. Nothing changes about the page - it is used merely to -** acquire a pointer to the Pager structure and as proof that there is -** already a read-lock on the database. -** -** The second parameter indicates how much space in bytes to reserve for a -** master journal file-name at the start of the journal when it is created. -** -** A journal file is opened if this is not a temporary file. For temporary -** files, the opening of the journal file is deferred until there is an -** actual need to write to the journal. -** -** If the database is already reserved for writing, this routine is a no-op. -** -** If exFlag is true, go ahead and get an EXCLUSIVE lock on the file -** immediately instead of waiting until we try to flush the cache. The -** exFlag is ignored if a transaction is already active. +** If the exFlag argument is false, then acquire at least a RESERVED +** lock on the database file. If exFlag is true, then acquire at least +** an EXCLUSIVE lock. If such a lock is already held, no locking +** functions need be called. +** +** If this is not a temporary or in-memory file and, the journal file is +** opened if it has not been already. For a temporary file, the opening +** of the journal file is deferred until there is an actual need to +** write to the journal. TODO: Why handle temporary files differently? +** +** If the journal file is opened (or if it is already open), then a +** journal-header is written to the start of it. +** +** If the subjInMemory argument is non-zero, then any sub-journal opened +** within this transaction will be opened as an in-memory file. This +** has no effect if the sub-journal is already opened (as it may be when +** running in exclusive mode) or if the transaction does not require a +** sub-journal. If the subjInMemory argument is zero, then any required +** sub-journal is implemented in-memory if pPager is an in-memory database, +** or using a temporary file otherwise. */ -int sqlite3PagerBegin(DbPage *pPg, int exFlag){ - Pager *pPager = pPg->pPager; +int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){ int rc = SQLITE_OK; - assert( pPg->nRef>0 ); assert( pPager->state!=PAGER_UNLOCK ); + pPager->subjInMemory = (u8)subjInMemory; if( pPager->state==PAGER_SHARED ){ - assert( pPager->aInJournal==0 ); - if( MEMDB ){ - pPager->state = PAGER_EXCLUSIVE; - pPager->origDbSize = pPager->dbSize; - }else{ - rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); - if( rc==SQLITE_OK ){ - pPager->state = PAGER_RESERVED; - if( exFlag ){ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - } - } - if( rc!=SQLITE_OK ){ - return rc; - } - pPager->dirtyCache = 0; - PAGERTRACE2("TRANSACTION %d\n", PAGERID(pPager)); - if( pPager->useJournal && !pPager->tempFile ){ - rc = pager_open_journal(pPager); + assert( pPager->pInJournal==0 ); + assert( !MEMDB && !pPager->tempFile ); + + /* Obtain a RESERVED lock on the database file. If the exFlag parameter + ** is true, then immediately upgrade this to an EXCLUSIVE lock. The + ** busy-handler callback can be used when upgrading to the EXCLUSIVE + ** lock, but not when obtaining the RESERVED lock. + */ + rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); + if( rc==SQLITE_OK ){ + pPager->state = PAGER_RESERVED; + if( exFlag ){ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); } } - }else if( pPager->journalOpen && pPager->journalOff==0 ){ - /* This happens when the pager was in exclusive-access mode last + + /* If the required locks were successfully obtained, open the journal + ** file and write the first journal-header to it. + */ + if( rc==SQLITE_OK && pPager->useJournal + && pPager->journalMode!=PAGER_JOURNALMODE_OFF + ){ + rc = pager_open_journal(pPager); + } + }else if( isOpen(pPager->jfd) && pPager->journalOff==0 ){ + /* This happens when the pager was in exclusive-access mode the last ** time a (read or write) transaction was successfully concluded ** by this connection. Instead of deleting the journal file it was - ** kept open and truncated to 0 bytes. + ** kept open and either was truncated to 0 bytes or its header was + ** overwritten with zeros. */ assert( pPager->nRec==0 ); - assert( pPager->origDbSize==0 ); - assert( pPager->aInJournal==0 ); - sqlite3PagerPagecount(pPager); - pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( !pPager->aInJournal ){ - rc = SQLITE_NOMEM; - }else{ - pPager->origDbSize = pPager->dbSize; - rc = writeJournalHdr(pPager); - } - } - assert( !pPager->journalOpen || pPager->journalOff>0 || rc!=SQLITE_OK ); - return rc; -} - -/* -** Make a page dirty. Set its dirty flag and add it to the dirty -** page list. -*/ -static void makeDirty(PgHdr *pPg){ - if( pPg->dirty==0 ){ - Pager *pPager = pPg->pPager; - pPg->dirty = 1; - pPg->pDirty = pPager->pDirty; - if( pPager->pDirty ){ - pPager->pDirty->pPrevDirty = pPg; - } - pPg->pPrevDirty = 0; - pPager->pDirty = pPg; + assert( pPager->dbOrigSize==0 ); + assert( pPager->pInJournal==0 ); + rc = pager_open_journal(pPager); } -} -/* -** Make a page clean. Clear its dirty bit and remove it from the -** dirty page list. -*/ -static void makeClean(PgHdr *pPg){ - if( pPg->dirty ){ - pPg->dirty = 0; - if( pPg->pDirty ){ - pPg->pDirty->pPrevDirty = pPg->pPrevDirty; - } - if( pPg->pPrevDirty ){ - pPg->pPrevDirty->pDirty = pPg->pDirty; - }else{ - pPg->pPager->pDirty = pPg->pDirty; - } - } + PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager))); + assert( !isOpen(pPager->jfd) || pPager->journalOff>0 || rc!=SQLITE_OK ); + return rc; } - /* -** Mark a data page as writeable. The page is written into the journal -** if it is not there already. This routine must be called before making -** changes to a page. -** -** The first time this routine is called, the pager creates a new -** journal and acquires a RESERVED lock on the database. If the RESERVED -** lock could not be acquired, this routine returns SQLITE_BUSY. The -** calling routine must check for that return value and be careful not to -** change any page data until this routine returns SQLITE_OK. -** -** If the journal file could not be written because the disk is full, -** then this routine returns SQLITE_FULL and does an immediate rollback. -** All subsequent write attempts also return SQLITE_FULL until there -** is a call to sqlite3PagerCommit() or sqlite3PagerRollback() to -** reset. +** Mark a single data page as writeable. The page is written into the +** main journal or sub-journal as required. If the page is written into +** one of the journals, the corresponding bit is set in the +** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs +** of any open savepoints as appropriate. */ static int pager_write(PgHdr *pPg){ - void *pData = PGHDR_TO_DATA(pPg); + void *pData = pPg->pData; Pager *pPager = pPg->pPager; int rc = SQLITE_OK; @@ -3512,25 +4193,12 @@ CHECK_PAGE(pPg); - /* If this page was previously acquired with noContent==1, that means - ** we didn't really read in the content of the page. This can happen - ** (for example) when the page is being moved to the freelist. But - ** now we are (perhaps) moving the page off of the freelist for - ** reuse and we need to know its original content so that content - ** can be stored in the rollback journal. So do the read at this - ** time. - */ - rc = pager_get_content(pPg); - if( rc ){ - return rc; - } - /* Mark the page as dirty. If the page has already been written ** to the journal then we can return right away. */ - makeDirty(pPg); - if( pPg->inJournal && (pageInStatement(pPg) || pPager->stmtInUse==0) ){ - pPager->dirtyCache = 1; + sqlite3PcacheMakeDirty(pPg); + if( pageInJournal(pPg) && !subjRequiresPage(pPg) ){ + pPager->dbModified = 1; }else{ /* If we get this far, it means that the page needs to be @@ -3541,81 +4209,88 @@ ** create it if it does not. */ assert( pPager->state!=PAGER_UNLOCK ); - rc = sqlite3PagerBegin(pPg, 0); + rc = sqlite3PagerBegin(pPager, 0, pPager->subjInMemory); if( rc!=SQLITE_OK ){ return rc; } assert( pPager->state>=PAGER_RESERVED ); - if( !pPager->journalOpen && pPager->useJournal ){ + if( !isOpen(pPager->jfd) && pPager->useJournal + && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ rc = pager_open_journal(pPager); if( rc!=SQLITE_OK ) return rc; } - assert( pPager->journalOpen || !pPager->useJournal ); - pPager->dirtyCache = 1; + pPager->dbModified = 1; /* The transaction journal now exists and we have a RESERVED or an ** EXCLUSIVE lock on the main database file. Write the current page to ** the transaction journal if it is not there already. */ - if( !pPg->inJournal && (pPager->useJournal || MEMDB) ){ - if( (int)pPg->pgno <= pPager->origDbSize ){ - int szPg; - if( MEMDB ){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - PAGERTRACE3("JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - assert( pHist->pOrig==0 ); - pHist->pOrig = sqliteMallocRaw( pPager->pageSize ); - if( pHist->pOrig ){ - memcpy(pHist->pOrig, PGHDR_TO_DATA(pPg), pPager->pageSize); - } - }else{ - u32 cksum, saved; - char *pData2, *pEnd; - /* We should never write to the journal file the page that - ** contains the database locks. The following assert verifies - ** that we do not. */ - assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); - pData2 = CODEC2(pPager, pData, pPg->pgno, 7); - cksum = pager_cksum(pPager, (u8*)pData2); - pEnd = pData2 + pPager->pageSize; - pData2 -= 4; - saved = *(u32*)pEnd; - put32bits(pEnd, cksum); - szPg = pPager->pageSize+8; - put32bits(pData2, pPg->pgno); - rc = sqlite3OsWrite(pPager->jfd, pData2, szPg); - IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, - pPager->journalOff, szPg)); - PAGER_INCR(sqlite3_pager_writej_count); - pPager->journalOff += szPg; - PAGERTRACE5("JOURNAL %d page %d needSync=%d hash(%08x)\n", - PAGERID(pPager), pPg->pgno, pPg->needSync, pager_pagehash(pPg)); - *(u32*)pEnd = saved; + if( !pageInJournal(pPg) && isOpen(pPager->jfd) ){ + if( pPg->pgno<=pPager->dbOrigSize ){ + u32 cksum; + char *pData2; + + /* We should never write to the journal file the page that + ** contains the database locks. The following assert verifies + ** that we do not. */ + assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); + CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); + cksum = pager_cksum(pPager, (u8*)pData2); + rc = write32bits(pPager->jfd, pPager->journalOff, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, + pPager->journalOff + 4); + pPager->journalOff += pPager->pageSize+4; + } + if( rc==SQLITE_OK ){ + rc = write32bits(pPager->jfd, pPager->journalOff, cksum); + pPager->journalOff += 4; + } + IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, + pPager->journalOff, pPager->pageSize)); + PAGER_INCR(sqlite3_pager_writej_count); + PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n", + PAGERID(pPager), pPg->pgno, + ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg))); + + /* Even if an IO or diskfull error occurred while journalling the + ** page in the block above, set the need-sync flag for the page. + ** Otherwise, when the transaction is rolled back, the logic in + ** playback_one_page() will think that the page needs to be restored + ** in the database file. And if an IO error occurs while doing so, + ** then corruption may follow. + */ + if( !pPager->noSync ){ + pPg->flags |= PGHDR_NEED_SYNC; + pPager->needSync = 1; + } - /* An error has occured writing to the journal file. The - ** transaction will be rolled back by the layer above. - */ - if( rc!=SQLITE_OK ){ - return rc; - } + /* An error has occurred writing to the journal file. The + ** transaction will be rolled back by the layer above. + */ + if( rc!=SQLITE_OK ){ + return rc; + } - pPager->nRec++; - assert( pPager->aInJournal!=0 ); - pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); - pPg->needSync = !pPager->noSync; - if( pPager->stmtInUse ){ - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } + pPager->nRec++; + assert( pPager->pInJournal!=0 ); + rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno); + testcase( rc==SQLITE_NOMEM ); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + rc |= addToSavepointBitvecs(pPager, pPg->pgno); + if( rc!=SQLITE_OK ){ + assert( rc==SQLITE_NOMEM ); + return rc; } }else{ - pPg->needSync = !pPager->journalStarted && !pPager->noSync; - PAGERTRACE4("APPEND %d page %d needSync=%d\n", - PAGERID(pPager), pPg->pgno, pPg->needSync); - } - if( pPg->needSync ){ - pPager->needSync = 1; + if( !pPager->journalStarted && !pPager->noSync ){ + pPg->flags |= PGHDR_NEED_SYNC; + pPager->needSync = 1; + } + PAGERTRACE(("APPEND %d page %d needSync=%d\n", + PAGERID(pPager), pPg->pgno, + ((pPg->flags&PGHDR_NEED_SYNC)?1:0))); } - pPg->inJournal = 1; } /* If the statement journal is open and the page is not in it, @@ -3623,56 +4298,33 @@ ** the statement journal format differs from the standard journal format ** in that it omits the checksums and the header. */ - if( pPager->stmtInUse - && !pageInStatement(pPg) - && (int)pPg->pgno<=pPager->stmtSize - ){ - assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); - if( MEMDB ){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( pHist->pStmt==0 ); - pHist->pStmt = sqliteMallocRaw( pPager->pageSize ); - if( pHist->pStmt ){ - memcpy(pHist->pStmt, PGHDR_TO_DATA(pPg), pPager->pageSize); - } - PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - page_add_to_stmt_list(pPg); - }else{ - char *pData2 = CODEC2(pPager, pData, pPg->pgno, 7)-4; - put32bits(pData2, pPg->pgno); - rc = sqlite3OsWrite(pPager->stfd, pData2, pPager->pageSize+4); - PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - if( rc!=SQLITE_OK ){ - return rc; - } - pPager->stmtNRec++; - assert( pPager->aInStmt!=0 ); - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } + if( subjRequiresPage(pPg) ){ + rc = subjournalPage(pPg); } } /* Update the database size and return. */ assert( pPager->state>=PAGER_SHARED ); - if( pPager->dbSize<(int)pPg->pgno ){ + if( pPager->dbSizepgno ){ pPager->dbSize = pPg->pgno; - if( !MEMDB && pPager->dbSize==PENDING_BYTE/pPager->pageSize ){ - pPager->dbSize++; - } } return rc; } /* -** This function is used to mark a data-page as writable. It uses -** pager_write() to open a journal file (if it is not already open) -** and write the page *pData to the journal. +** Mark a data page as writeable. This routine must be called before +** making changes to a page. The caller must check the return value +** of this function and be careful not to change any page data unless +** this routine returns SQLITE_OK. ** ** The difference between this function and pager_write() is that this ** function also deals with the special case where 2 or more pages ** fit on a single disk sector. In this case all co-resident pages ** must have been written to the journal file before returning. +** +** If an error occurs, SQLITE_NOMEM or an IO error code is returned +** as appropriate. Otherwise, SQLITE_OK. */ int sqlite3PagerWrite(DbPage *pDbPage){ int rc = SQLITE_OK; @@ -3681,15 +4333,17 @@ Pager *pPager = pPg->pPager; Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); - if( !MEMDB && nPagePerSector>1 ){ + if( nPagePerSector>1 ){ Pgno nPageCount; /* Total number of pages in database file */ Pgno pg1; /* First page of the sector pPg is located on. */ int nPage; /* Number of pages starting at pg1 to journal */ - int ii; + int ii; /* Loop counter */ + int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */ /* Set the doNotSync flag to 1. This is because we cannot allow a journal ** header to be written between the pages journaled by this function. */ + assert( !MEMDB ); assert( pPager->doNotSync==0 ); pPager->doNotSync = 1; @@ -3699,7 +4353,7 @@ */ pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; - nPageCount = sqlite3PagerPagecount(pPager); + sqlite3PagerPagecount(pPager, (int *)&nPageCount); if( pPg->pgno>nPageCount ){ nPage = (pPg->pgno - pg1)+1; }else if( (pg1+nPagePerSector-1)>nPageCount ){ @@ -3713,18 +4367,43 @@ for(ii=0; iiaInJournal || pg==pPg->pgno || - pg>pPager->origDbSize || !(pPager->aInJournal[pg/8]&(1<<(pg&7))) - ) { + PgHdr *pPage; + if( pg==pPg->pgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){ if( pg!=PAGER_MJ_PGNO(pPager) ){ - PgHdr *pPage; rc = sqlite3PagerGet(pPager, pg, &pPage); if( rc==SQLITE_OK ){ rc = pager_write(pPage); + if( pPage->flags&PGHDR_NEED_SYNC ){ + needSync = 1; + assert(pPager->needSync); + } sqlite3PagerUnref(pPage); } } + }else if( (pPage = pager_lookup(pPager, pg))!=0 ){ + if( pPage->flags&PGHDR_NEED_SYNC ){ + needSync = 1; + } + sqlite3PagerUnref(pPage); + } + } + + /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages + ** starting at pg1, then it needs to be set for all of them. Because + ** writing to any of these nPage pages may damage the others, the + ** journal file must contain sync()ed copies of all of them + ** before any of them can be written out to the database file. + */ + if( needSync ){ + assert( !MEMDB && pPager->noSync==0 ); + for(ii=0; iiflags |= PGHDR_NEED_SYNC; + sqlite3PagerUnref(pPage); + } } + assert(pPager->needSync); } assert( pPager->doNotSync==1 ); @@ -3742,147 +4421,131 @@ */ #ifndef NDEBUG int sqlite3PagerIswriteable(DbPage *pPg){ - return pPg->dirty; -} -#endif - -#ifndef SQLITE_OMIT_VACUUM -/* -** Replace the content of a single page with the information in the third -** argument. -*/ -int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void *pData){ - PgHdr *pPg; - int rc; - - rc = sqlite3PagerGet(pPager, pgno, &pPg); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pPg); - if( rc==SQLITE_OK ){ - memcpy(sqlite3PagerGetData(pPg), pData, pPager->pageSize); - } - sqlite3PagerUnref(pPg); - } - return rc; + return pPg->flags&PGHDR_DIRTY; } #endif /* ** A call to this routine tells the pager that it is not necessary to ** write the information on page pPg back to the disk, even though -** that page might be marked as dirty. +** that page might be marked as dirty. This happens, for example, when +** the page has been added as a leaf of the freelist and so its +** content no longer matters. ** ** The overlying software layer calls this routine when all of the data -** on the given page is unused. The pager marks the page as clean so +** on the given page is unused. The pager marks the page as clean so ** that it does not get written to disk. ** -** Tests show that this optimization, together with the -** sqlite3PagerDontRollback() below, more than double the speed -** of large INSERT operations and quadruple the speed of large DELETEs. -** -** When this routine is called, set the alwaysRollback flag to true. -** Subsequent calls to sqlite3PagerDontRollback() for the same page -** will thereafter be ignored. This is necessary to avoid a problem -** where a page with data is added to the freelist during one part of -** a transaction then removed from the freelist during a later part -** of the same transaction and reused for some other purpose. When it -** is first added to the freelist, this routine is called. When reused, -** the sqlite3PagerDontRollback() routine is called. But because the -** page contains critical data, we still need to be sure it gets -** rolled back in spite of the sqlite3PagerDontRollback() call. +** Tests show that this optimization can quadruple the speed of large +** DELETE operations. */ -void sqlite3PagerDontWrite(DbPage *pDbPage){ - PgHdr *pPg = pDbPage; +void sqlite3PagerDontWrite(PgHdr *pPg){ Pager *pPager = pPg->pPager; - - if( MEMDB ) return; - pPg->alwaysRollback = 1; - if( pPg->dirty && !pPager->stmtInUse ){ - assert( pPager->state>=PAGER_SHARED ); - if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSizedbSize ){ - /* If this pages is the last page in the file and the file has grown - ** during the current transaction, then do NOT mark the page as clean. - ** When the database file grows, we must make sure that the last page - ** gets written at least once so that the disk file will be the correct - ** size. If you do not write this page and the size of the file - ** on the disk ends up being too small, that can lead to database - ** corruption during the next transaction. - */ - }else{ - PAGERTRACE3("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)); - IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) - makeClean(pPg); + if( (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ + PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); + IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) + pPg->flags |= PGHDR_DONT_WRITE; #ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); + pPg->pageHash = pager_pagehash(pPg); #endif - } } } /* -** A call to this routine tells the pager that if a rollback occurs, -** it is not necessary to restore the data on the given page. This -** means that the pager does not have to record the given page in the -** rollback journal. -** -** If we have not yet actually read the content of this page (if -** the PgHdr.needRead flag is set) then this routine acts as a promise -** that we will never need to read the page content in the future. -** so the needRead flag can be cleared at this point. +** This routine is called to increment the value of the database file +** change-counter, stored as a 4-byte big-endian integer starting at +** byte offset 24 of the pager file. +** +** If the isDirect flag is zero, then this is done by calling +** sqlite3PagerWrite() on page 1, then modifying the contents of the +** page data. In this case the file will be updated when the current +** transaction is committed. +** +** The isDirect flag may only be non-zero if the library was compiled +** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case, +** if isDirect is non-zero, then the database file is updated directly +** by writing an updated version of page 1 using a call to the +** sqlite3OsWrite() function. */ -void sqlite3PagerDontRollback(DbPage *pPg){ - Pager *pPager = pPg->pPager; +static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ + int rc = SQLITE_OK; - assert( pPager->state>=PAGER_RESERVED ); - if( pPager->journalOpen==0 ) return; - if( pPg->alwaysRollback || pPager->alwaysRollback || MEMDB ) return; - if( !pPg->inJournal && (int)pPg->pgno <= pPager->origDbSize ){ - assert( pPager->aInJournal!=0 ); - pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); - pPg->inJournal = 1; - pPg->needRead = 0; - if( pPager->stmtInUse ){ - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } - PAGERTRACE3("DONT_ROLLBACK page %d of %d\n", pPg->pgno, PAGERID(pPager)); - IOTRACE(("GARBAGE %p %d\n", pPager, pPg->pgno)) - } - if( pPager->stmtInUse - && !pageInStatement(pPg) - && (int)pPg->pgno<=pPager->stmtSize - ){ - assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); - assert( pPager->aInStmt!=0 ); - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } -} + /* Declare and initialize constant integer 'isDirect'. If the + ** atomic-write optimization is enabled in this build, then isDirect + ** is initialized to the value passed as the isDirectMode parameter + ** to this function. Otherwise, it is always set to zero. + ** + ** The idea is that if the atomic-write optimization is not + ** enabled at compile time, the compiler can omit the tests of + ** 'isDirect' below, as well as the block enclosed in the + ** "if( isDirect )" condition. + */ +#ifndef SQLITE_ENABLE_ATOMIC_WRITE + const int isDirect = 0; + assert( isDirectMode==0 ); + UNUSED_PARAMETER(isDirectMode); +#else + const int isDirect = isDirectMode; +#endif + assert( pPager->state>=PAGER_RESERVED ); + if( !pPager->changeCountDone && pPager->dbSize>0 ){ + PgHdr *pPgHdr; /* Reference to page 1 */ + u32 change_counter; /* Initial value of change-counter field */ -/* -** This routine is called to increment the database file change-counter, -** stored at byte 24 of the pager file. -*/ -static int pager_incr_changecounter(Pager *pPager){ - PgHdr *pPgHdr; - u32 change_counter; - int rc; + assert( !pPager->tempFile && isOpen(pPager->fd) ); - if( !pPager->changeCountDone ){ /* Open page 1 of the file for writing. */ rc = sqlite3PagerGet(pPager, 1, &pPgHdr); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3PagerWrite(pPgHdr); - if( rc!=SQLITE_OK ) return rc; - - /* Increment the value just read and write it back to byte 24. */ - change_counter = sqlite3Get4byte((u8*)pPager->dbFileVers); - change_counter++; - put32bits(((char*)PGHDR_TO_DATA(pPgHdr))+24, change_counter); + assert( pPgHdr==0 || rc==SQLITE_OK ); + + /* If page one was fetched successfully, and this function is not + ** operating in direct-mode, make page 1 writable. + */ + if( rc==SQLITE_OK && !isDirect ){ + rc = sqlite3PagerWrite(pPgHdr); + } + + if( rc==SQLITE_OK ){ + /* Increment the value just read and write it back to byte 24. */ + change_counter = sqlite3Get4byte((u8*)pPager->dbFileVers); + change_counter++; + put32bits(((char*)pPgHdr->pData)+24, change_counter); + + /* If running in direct mode, write the contents of page 1 to the file. */ + if( isDirect ){ + const void *zBuf = pPgHdr->pData; + assert( pPager->dbFileSize>0 ); + rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); + } + + /* If everything worked, set the changeCountDone flag. */ + if( rc==SQLITE_OK ){ + pPager->changeCountDone = 1; + } + } + /* Release the page reference. */ sqlite3PagerUnref(pPgHdr); - pPager->changeCountDone = 1; } - return SQLITE_OK; + return rc; +} + +/* +** Sync the pager file to disk. This is a no-op for in-memory files +** or pages with the Pager.noSync flag set. +** +** If successful, or called on a pager for which it is a no-op, this +** function returns SQLITE_OK. Otherwise, an IO error code is returned. +*/ +int sqlite3PagerSync(Pager *pPager){ + int rc; /* Return code */ + if( MEMDB || pPager->noSync ){ + rc = SQLITE_OK; + }else{ + rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); + } + return rc; } /* @@ -3891,95 +4554,176 @@ ** journal file. zMaster may be NULL, which is interpreted as no master ** journal (a single database transaction). ** -** This routine ensures that the journal is synced, all dirty pages written -** to the database file and the database file synced. The only thing that -** remains to commit the transaction is to delete the journal file (or -** master journal file if specified). +** This routine ensures that: +** +** * The database file change-counter is updated, +** * the journal is synced (unless the atomic-write optimization is used), +** * all dirty pages are written to the database file, +** * the database file is truncated (if required), and +** * the database file synced. +** +** The only thing that remains to commit the transaction is to finalize +** (delete, truncate or zero the first part of) the journal file (or +** delete the master journal file if specified). ** ** Note that if zMaster==NULL, this does not overwrite a previous value ** passed to an sqlite3PagerCommitPhaseOne() call. ** -** If parameter nTrunc is non-zero, then the pager file is truncated to -** nTrunc pages (this is used by auto-vacuum databases). -*/ -int sqlite3PagerCommitPhaseOne(Pager *pPager, const char *zMaster, Pgno nTrunc){ - int rc = SQLITE_OK; +** If the final parameter - noSync - is true, then the database file itself +** is not synced. The caller must call sqlite3PagerSync() directly to +** sync the database file before calling CommitPhaseTwo() to delete the +** journal file in this case. +*/ +int sqlite3PagerCommitPhaseOne( + Pager *pPager, /* Pager object */ + const char *zMaster, /* If not NULL, the master journal name */ + int noSync /* True to omit the xSync on the db file */ +){ + int rc = SQLITE_OK; /* Return code */ + + if( pPager->errCode ){ + return pPager->errCode; + } - PAGERTRACE4("DATABASE SYNC: File=%s zMaster=%s nTrunc=%d\n", - pPager->zFilename, zMaster, nTrunc); + PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", + pPager->zFilename, zMaster, pPager->dbSize)); /* If this is an in-memory db, or no pages have been written to, or this ** function has already been called, it is a no-op. */ - if( pPager->state!=PAGER_SYNCED && !MEMDB && pPager->dirtyCache ){ + if( MEMDB && pPager->dbModified ){ + sqlite3BackupRestart(pPager->pBackup); + }else if( pPager->state!=PAGER_SYNCED && pPager->dbModified ){ + + /* The following block updates the change-counter. Exactly how it + ** does this depends on whether or not the atomic-update optimization + ** was enabled at compile time, and if this transaction meets the + ** runtime criteria to use the operation: + ** + ** * The file-system supports the atomic-write property for + ** blocks of size page-size, and + ** * This commit is not part of a multi-file transaction, and + ** * Exactly one page has been modified and store in the journal file. + ** + ** If the optimization was not enabled at compile time, then the + ** pager_incr_changecounter() function is called to update the change + ** counter in 'indirect-mode'. If the optimization is compiled in but + ** is not applicable to this transaction, call sqlite3JournalCreate() + ** to make sure the journal file has actually been created, then call + ** pager_incr_changecounter() to update the change-counter in indirect + ** mode. + ** + ** Otherwise, if the optimization is both enabled and applicable, + ** then call pager_incr_changecounter() to update the change-counter + ** in 'direct' mode. In this case the journal file will never be + ** created for this transaction. + */ +#ifdef SQLITE_ENABLE_ATOMIC_WRITE PgHdr *pPg; - assert( pPager->journalOpen ); - - /* If a master journal file name has already been written to the - ** journal file, then no sync is required. This happens when it is - ** written, then the process fails to upgrade from a RESERVED to an - ** EXCLUSIVE lock. The next time the process tries to commit the - ** transaction the m-j name will have already been written. - */ - if( !pPager->setMaster ){ - rc = pager_incr_changecounter(pPager); - if( rc!=SQLITE_OK ) goto sync_exit; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( nTrunc!=0 ){ - /* If this transaction has made the database smaller, then all pages - ** being discarded by the truncation must be written to the journal - ** file. - */ - Pgno i; - int iSkip = PAGER_MJ_PGNO(pPager); - for( i=nTrunc+1; i<=pPager->origDbSize; i++ ){ - if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=iSkip ){ - rc = sqlite3PagerGet(pPager, i, &pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - rc = sqlite3PagerWrite(pPg); - sqlite3PagerUnref(pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - } - } + assert( isOpen(pPager->jfd) || pPager->journalMode==PAGER_JOURNALMODE_OFF ); + if( !zMaster && isOpen(pPager->jfd) + && pPager->journalOff==jrnlBufferSize(pPager) + && pPager->dbSize>=pPager->dbFileSize + && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) + ){ + /* Update the db file change counter via the direct-write method. The + ** following call will modify the in-memory representation of page 1 + ** to include the updated change counter and then write page 1 + ** directly to the database file. Because of the atomic-write + ** property of the host file-system, this is safe. + */ + rc = pager_incr_changecounter(pPager, 1); + }else{ + rc = sqlite3JournalCreate(pPager->jfd); + if( rc==SQLITE_OK ){ + rc = pager_incr_changecounter(pPager, 0); } -#endif - rc = writeMasterJournal(pPager, zMaster); - if( rc!=SQLITE_OK ) goto sync_exit; - rc = syncJournal(pPager); - if( rc!=SQLITE_OK ) goto sync_exit; } +#else + rc = pager_incr_changecounter(pPager, 0); +#endif + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + /* If this transaction has made the database smaller, then all pages + ** being discarded by the truncation must be written to the journal + ** file. This can only happen in auto-vacuum mode. + ** + ** Before reading the pages with page numbers larger than the + ** current value of Pager.dbSize, set dbSize back to the value + ** that it took at the start of the transaction. Otherwise, the + ** calls to sqlite3PagerGet() return zeroed pages instead of + ** reading data from the database file. + */ #ifndef SQLITE_OMIT_AUTOVACUUM - if( nTrunc!=0 ){ - rc = sqlite3PagerTruncate(pPager, nTrunc); - if( rc!=SQLITE_OK ) goto sync_exit; + if( pPager->dbSizedbOrigSize + && pPager->journalMode!=PAGER_JOURNALMODE_OFF + ){ + Pgno i; /* Iterator variable */ + const Pgno iSkip = PAGER_MJ_PGNO(pPager); /* Pending lock page */ + const Pgno dbSize = pPager->dbSize; /* Database image size */ + pPager->dbSize = pPager->dbOrigSize; + for( i=dbSize+1; i<=pPager->dbOrigSize; i++ ){ + if( !sqlite3BitvecTest(pPager->pInJournal, i) && i!=iSkip ){ + PgHdr *pPage; /* Page to journal */ + rc = sqlite3PagerGet(pPager, i, &pPage); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + rc = sqlite3PagerWrite(pPage); + sqlite3PagerUnref(pPage); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + } + } + pPager->dbSize = dbSize; } #endif - /* Write all dirty pages to the database file */ - pPg = pager_get_all_dirty_pages(pPager); - rc = pager_write_pagelist(pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - pPager->pDirty = 0; + /* Write the master journal name into the journal file. If a master + ** journal file name has already been written to the journal file, + ** or if zMaster is NULL (no master journal), then this call is a no-op. + */ + rc = writeMasterJournal(pPager, zMaster); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* Sync the journal file. If the atomic-update optimization is being + ** used, this call will not create the journal file or perform any + ** real IO. + */ + rc = syncJournal(pPager); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - /* Sync the database file. */ - if( !pPager->noSync ){ - rc = sqlite3OsSync(pPager->fd, 0); + /* Write all dirty pages to the database file. */ + rc = pager_write_pagelist(sqlite3PcacheDirtyList(pPager->pPCache)); + if( rc!=SQLITE_OK ){ + assert( rc!=SQLITE_IOERR_BLOCKED ); + goto commit_phase_one_exit; + } + sqlite3PcacheCleanAll(pPager->pPCache); + + /* If the file on disk is not the same size as the database image, + ** then use pager_truncate to grow or shrink the file here. + */ + if( pPager->dbSize!=pPager->dbFileSize ){ + Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); + assert( pPager->state>=PAGER_EXCLUSIVE ); + rc = pager_truncate(pPager, nNew); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + } + + /* Finally, sync the database file. */ + if( !pPager->noSync && !noSync ){ + rc = sqlite3OsSync(pPager->fd, pPager->sync_flags); } IOTRACE(("DBSYNC %p\n", pPager)) pPager->state = PAGER_SYNCED; - }else if( MEMDB && nTrunc!=0 ){ - rc = sqlite3PagerTruncate(pPager, nTrunc); } -sync_exit: +commit_phase_one_exit: if( rc==SQLITE_IOERR_BLOCKED ){ /* pager_incr_changecounter() may attempt to obtain an exclusive - * lock to spill the cache and return IOERR_BLOCKED. But since - * there is no chance the cache is inconsistent, it's - * better to return SQLITE_BUSY. - */ + ** lock to spill the cache and return IOERR_BLOCKED. But since + ** there is no chance the cache is inconsistent, it is + ** better to return SQLITE_BUSY. + **/ rc = SQLITE_BUSY; } return rc; @@ -3987,141 +4731,143 @@ /* -** Commit all changes to the database and release the write lock. +** When this function is called, the database file has been completely +** updated to reflect the changes made by the current transaction and +** synced to disk. The journal file still exists in the file-system +** though, and if a failure occurs at this point it will eventually +** be used as a hot-journal and the current transaction rolled back. +** +** This function finalizes the journal file, either by deleting, +** truncating or partially zeroing it, so that it cannot be used +** for hot-journal rollback. Once this is done the transaction is +** irrevocably committed. ** -** If the commit fails for any reason, a rollback attempt is made -** and an error code is returned. If the commit worked, SQLITE_OK -** is returned. +** If an error occurs, an IO error code is returned and the pager +** moves into the error state. Otherwise, SQLITE_OK is returned. */ int sqlite3PagerCommitPhaseTwo(Pager *pPager){ - int rc; - PgHdr *pPg; + int rc = SQLITE_OK; /* Return code */ + /* Do not proceed if the pager is already in the error state. */ if( pPager->errCode ){ return pPager->errCode; } - if( pPager->statestatedirty = 0; - pPg->inJournal = 0; - pHist->inStmt = 0; - pPg->needSync = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - pPg = pPg->pDirty; - } - pPager->pDirty = 0; -#ifndef NDEBUG - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( !pPg->alwaysRollback ); - assert( !pHist->pOrig ); - assert( !pHist->pStmt ); - } -#endif - pPager->pStmt = 0; - pPager->state = PAGER_SHARED; + + /* An optimization. If the database was not actually modified during + ** this transaction, the pager is running in exclusive-mode and is + ** using persistent journals, then this function is a no-op. + ** + ** The start of the journal file currently contains a single journal + ** header with the nRec field set to 0. If such a journal is used as + ** a hot-journal during hot-journal rollback, 0 changes will be made + ** to the database file. So there is no need to zero the journal + ** header. Since the pager is in exclusive mode, there is no need + ** to drop any locks either. + */ + if( pPager->dbModified==0 && pPager->exclusiveMode + && pPager->journalMode==PAGER_JOURNALMODE_PERSIST + ){ + assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); return SQLITE_OK; } - assert( pPager->journalOpen || !pPager->dirtyCache ); - assert( pPager->state==PAGER_SYNCED || !pPager->dirtyCache ); - rc = pager_end_transaction(pPager); + + PAGERTRACE(("COMMIT %d\n", PAGERID(pPager))); + assert( pPager->state==PAGER_SYNCED || MEMDB || !pPager->dbModified ); + rc = pager_end_transaction(pPager, pPager->setMaster); return pager_error(pPager, rc); } /* -** Rollback all changes. The database falls back to PAGER_SHARED mode. -** All in-memory cache pages revert to their original data contents. -** The journal is deleted. -** -** This routine cannot fail unless some other process is not following -** the correct locking protocol or unless some other -** process is writing trash into the journal file (SQLITE_CORRUPT) or -** unless a prior malloc() failed (SQLITE_NOMEM). Appropriate error -** codes are returned for all these occasions. Otherwise, -** SQLITE_OK is returned. +** Rollback all changes. The database falls back to PAGER_SHARED mode. +** +** This function performs two tasks: +** +** 1) It rolls back the journal file, restoring all database file and +** in-memory cache pages to the state they were in when the transaction +** was opened, and +** 2) It finalizes the journal file, so that it is not used for hot +** rollback at any point in the future. +** +** subject to the following qualifications: +** +** * If the journal file is not yet open when this function is called, +** then only (2) is performed. In this case there is no journal file +** to roll back. +** +** * If in an error state other than SQLITE_FULL, then task (1) is +** performed. If successful, task (2). Regardless of the outcome +** of either, the error state error code is returned to the caller +** (i.e. either SQLITE_IOERR or SQLITE_CORRUPT). +** +** * If the pager is in PAGER_RESERVED state, then attempt (1). Whether +** or not (1) is succussful, also attempt (2). If successful, return +** SQLITE_OK. Otherwise, enter the error state and return the first +** error code encountered. +** +** In this case there is no chance that the database was written to. +** So is safe to finalize the journal file even if the playback +** (operation 1) failed. However the pager must enter the error state +** as the contents of the in-memory cache are now suspect. +** +** * Finally, if in PAGER_EXCLUSIVE state, then attempt (1). Only +** attempt (2) if (1) is successful. Return SQLITE_OK if successful, +** otherwise enter the error state and return the error code from the +** failing operation. +** +** In this case the database file may have been written to. So if the +** playback operation did not succeed it would not be safe to finalize +** the journal file. It needs to be left in the file-system so that +** some other process can use it to restore the database state (by +** hot-journal rollback). */ int sqlite3PagerRollback(Pager *pPager){ - int rc; - PAGERTRACE2("ROLLBACK %d\n", PAGERID(pPager)); - if( MEMDB ){ - PgHdr *p; - for(p=pPager->pAll; p; p=p->pNextAll){ - PgHistory *pHist; - assert( !p->alwaysRollback ); - if( !p->dirty ){ - assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pOrig ); - assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pStmt ); - continue; - } - - pHist = PGHDR_TO_HIST(p, pPager); - if( pHist->pOrig ){ - memcpy(PGHDR_TO_DATA(p), pHist->pOrig, pPager->pageSize); - PAGERTRACE3("ROLLBACK-PAGE %d of %d\n", p->pgno, PAGERID(pPager)); - }else{ - PAGERTRACE3("PAGE %d is clean on %d\n", p->pgno, PAGERID(pPager)); - } - clearHistory(pHist); - p->dirty = 0; - p->inJournal = 0; - pHist->inStmt = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - if( pPager->xReiniter ){ - pPager->xReiniter(p, pPager->pageSize); - } - } - pPager->pDirty = 0; - pPager->pStmt = 0; - pPager->dbSize = pPager->origDbSize; - pager_truncate_cache(pPager); - pPager->stmtInUse = 0; - pPager->state = PAGER_SHARED; - return SQLITE_OK; - } - - if( !pPager->dirtyCache || !pPager->journalOpen ){ - rc = pager_end_transaction(pPager); - return rc; - } - - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ + int rc = SQLITE_OK; /* Return code */ + PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager))); + if( !pPager->dbModified || !isOpen(pPager->jfd) ){ + rc = pager_end_transaction(pPager, pPager->setMaster); + }else if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ if( pPager->state>=PAGER_EXCLUSIVE ){ pager_playback(pPager, 0); } - return pPager->errCode; - } - if( pPager->state==PAGER_RESERVED ){ - int rc2; - rc = pager_playback(pPager, 0); - rc2 = pager_end_transaction(pPager); - if( rc==SQLITE_OK ){ - rc = rc2; - } + rc = pPager->errCode; }else{ - rc = pager_playback(pPager, 0); - } - /* pager_reset(pPager); */ - pPager->dbSize = -1; + if( pPager->state==PAGER_RESERVED ){ + int rc2; + rc = pager_playback(pPager, 0); + rc2 = pager_end_transaction(pPager, pPager->setMaster); + if( rc==SQLITE_OK ){ + rc = rc2; + } + }else{ + rc = pager_playback(pPager, 0); + } - /* If an error occurs during a ROLLBACK, we can no longer trust the pager - ** cache. So call pager_error() on the way out to make any error - ** persistent. - */ - return pager_error(pPager, rc); + if( !MEMDB ){ + pPager->dbSizeValid = 0; + } + + /* If an error occurs during a ROLLBACK, we can no longer trust the pager + ** cache. So call pager_error() on the way out to make any error + ** persistent. + */ + rc = pager_error(pPager, rc); + } + return rc; } /* ** Return TRUE if the database file is opened read-only. Return FALSE ** if the database is (in theory) writable. */ -int sqlite3PagerIsreadonly(Pager *pPager){ +u8 sqlite3PagerIsreadonly(Pager *pPager){ return pPager->readOnly; } @@ -4129,7 +4875,14 @@ ** Return the number of references to the pager. */ int sqlite3PagerRefcount(Pager *pPager){ - return pPager->nRef; + return sqlite3PcacheRefCount(pPager->pPCache); +} + +/* +** Return the number of references to the specified page. +*/ +int sqlite3PagerPageRefcount(DbPage *pPage){ + return sqlite3PcachePageRefcount(pPage); } #ifdef SQLITE_TEST @@ -4138,10 +4891,10 @@ */ int *sqlite3PagerStats(Pager *pPager){ static int a[11]; - a[0] = pPager->nRef; - a[1] = pPager->nPage; - a[2] = pPager->mxPage; - a[3] = pPager->dbSize; + a[0] = sqlite3PcacheRefCount(pPager->pPCache); + a[1] = sqlite3PcachePagecount(pPager->pPCache); + a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); + a[3] = pPager->dbSizeValid ? (int) pPager->dbSize : -1; a[4] = pPager->state; a[5] = pPager->errCode; a[6] = pPager->nHit; @@ -4154,119 +4907,141 @@ #endif /* -** Set the statement rollback point. -** -** This routine should be called with the transaction journal already -** open. A new statement journal is created that can be used to rollback -** changes of a single SQL command within a larger transaction. +** Return true if this is an in-memory pager. */ -int sqlite3PagerStmtBegin(Pager *pPager){ - int rc; - assert( !pPager->stmtInUse ); - assert( pPager->state>=PAGER_SHARED ); - assert( pPager->dbSize>=0 ); - PAGERTRACE2("STMT-BEGIN %d\n", PAGERID(pPager)); - if( MEMDB ){ - pPager->stmtInUse = 1; - pPager->stmtSize = pPager->dbSize; - return SQLITE_OK; - } - if( !pPager->journalOpen ){ - pPager->stmtAutoopen = 1; - return SQLITE_OK; - } - assert( pPager->journalOpen ); - pPager->aInStmt = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( pPager->aInStmt==0 ){ - /* sqlite3OsLock(pPager->fd, SHARED_LOCK); */ - return SQLITE_NOMEM; - } -#ifndef NDEBUG - rc = sqlite3OsFileSize(pPager->jfd, &pPager->stmtJSize); - if( rc ) goto stmt_begin_failed; - assert( pPager->stmtJSize == pPager->journalOff ); -#endif - pPager->stmtJSize = pPager->journalOff; - pPager->stmtSize = pPager->dbSize; - pPager->stmtHdrOff = 0; - pPager->stmtCksum = pPager->cksumInit; - if( !pPager->stmtOpen ){ - rc = sqlite3PagerOpentemp(&pPager->stfd); - if( rc ) goto stmt_begin_failed; - pPager->stmtOpen = 1; - pPager->stmtNRec = 0; - } - pPager->stmtInUse = 1; - return SQLITE_OK; - -stmt_begin_failed: - if( pPager->aInStmt ){ - sqliteFree(pPager->aInStmt); - pPager->aInStmt = 0; - } - return rc; +int sqlite3PagerIsMemdb(Pager *pPager){ + return MEMDB; } /* -** Commit a statement. +** Check that there are at least nSavepoint savepoints open. If there are +** currently less than nSavepoints open, then open one or more savepoints +** to make up the difference. If the number of savepoints is already +** equal to nSavepoint, then this function is a no-op. +** +** If a memory allocation fails, SQLITE_NOMEM is returned. If an error +** occurs while opening the sub-journal file, then an IO error code is +** returned. Otherwise, SQLITE_OK. */ -int sqlite3PagerStmtCommit(Pager *pPager){ - if( pPager->stmtInUse ){ - PgHdr *pPg, *pNext; - PAGERTRACE2("STMT-COMMIT %d\n", PAGERID(pPager)); - if( !MEMDB ){ - sqlite3OsSeek(pPager->stfd, 0); - /* sqlite3OsTruncate(pPager->stfd, 0); */ - sqliteFree( pPager->aInStmt ); - pPager->aInStmt = 0; - }else{ - for(pPg=pPager->pStmt; pPg; pPg=pNext){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - pNext = pHist->pNextStmt; - assert( pHist->inStmt ); - pHist->inStmt = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - sqliteFree(pHist->pStmt); - pHist->pStmt = 0; +int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ + int rc = SQLITE_OK; /* Return code */ + int nCurrent = pPager->nSavepoint; /* Current number of savepoints */ + + if( nSavepoint>nCurrent && pPager->useJournal ){ + int ii; /* Iterator variable */ + PagerSavepoint *aNew; /* New Pager.aSavepoint array */ + + /* Either there is no active journal or the sub-journal is open or + ** the journal is always stored in memory */ + assert( pPager->nSavepoint==0 || isOpen(pPager->sjfd) || + pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); + + /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM + ** if the allocation fails. Otherwise, zero the new portion in case a + ** malloc failure occurs while populating it in the for(...) loop below. + */ + aNew = (PagerSavepoint *)sqlite3Realloc( + pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint + ); + if( !aNew ){ + return SQLITE_NOMEM; + } + memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint)); + pPager->aSavepoint = aNew; + pPager->nSavepoint = nSavepoint; + + /* Populate the PagerSavepoint structures just allocated. */ + for(ii=nCurrent; iidbSizeValid ); + aNew[ii].nOrig = pPager->dbSize; + if( isOpen(pPager->jfd) && pPager->journalOff>0 ){ + aNew[ii].iOffset = pPager->journalOff; + }else{ + aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager); + } + aNew[ii].iSubRec = pPager->nSubRec; + aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); + if( !aNew[ii].pInSavepoint ){ + return SQLITE_NOMEM; } } - pPager->stmtNRec = 0; - pPager->stmtInUse = 0; - pPager->pStmt = 0; + + /* Open the sub-journal, if it is not already opened. */ + rc = openSubJournal(pPager); } - pPager->stmtAutoopen = 0; - return SQLITE_OK; + + return rc; } /* -** Rollback a statement. -*/ -int sqlite3PagerStmtRollback(Pager *pPager){ - int rc; - if( pPager->stmtInUse ){ - PAGERTRACE2("STMT-ROLLBACK %d\n", PAGERID(pPager)); - if( MEMDB ){ - PgHdr *pPg; - PgHistory *pHist; - for(pPg=pPager->pStmt; pPg; pPg=pHist->pNextStmt){ - pHist = PGHDR_TO_HIST(pPg, pPager); - if( pHist->pStmt ){ - memcpy(PGHDR_TO_DATA(pPg), pHist->pStmt, pPager->pageSize); - sqliteFree(pHist->pStmt); - pHist->pStmt = 0; - } - } - pPager->dbSize = pPager->stmtSize; - pager_truncate_cache(pPager); - rc = SQLITE_OK; - }else{ - rc = pager_stmt_playback(pPager); +** This function is called to rollback or release (commit) a savepoint. +** The savepoint to release or rollback need not be the most recently +** created savepoint. +** +** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE. +** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with +** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes +** that have occurred since the specified savepoint was created. +** +** The savepoint to rollback or release is identified by parameter +** iSavepoint. A value of 0 means to operate on the outermost savepoint +** (the first created). A value of (Pager.nSavepoint-1) means operate +** on the most recently created savepoint. If iSavepoint is greater than +** (Pager.nSavepoint-1), then this function is a no-op. +** +** If a negative value is passed to this function, then the current +** transaction is rolled back. This is different to calling +** sqlite3PagerRollback() because this function does not terminate +** the transaction or unlock the database, it just restores the +** contents of the database to its original state. +** +** In any case, all savepoints with an index greater than iSavepoint +** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE), +** then savepoint iSavepoint is also destroyed. +** +** This function may return SQLITE_NOMEM if a memory allocation fails, +** or an IO error code if an IO error occurs while rolling back a +** savepoint. If no errors occur, SQLITE_OK is returned. +*/ +int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ + int rc = SQLITE_OK; + + assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); + assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK ); + + if( iSavepointnSavepoint ){ + int ii; /* Iterator variable */ + int nNew; /* Number of remaining savepoints after this op. */ + + /* Figure out how many savepoints will still be active after this + ** operation. Store this value in nNew. Then free resources associated + ** with any savepoints that are destroyed by this operation. + */ + nNew = iSavepoint + (op==SAVEPOINT_ROLLBACK); + for(ii=nNew; iinSavepoint; ii++){ + sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); + } + pPager->nSavepoint = nNew; + + /* If this is a rollback operation, playback the specified savepoint. + ** If this is a temp-file, it is possible that the journal file has + ** not yet been opened. In this case there have been no changes to + ** the database file, so the playback operation can be skipped. + */ + if( op==SAVEPOINT_ROLLBACK && isOpen(pPager->jfd) ){ + PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1]; + rc = pagerPlaybackSavepoint(pPager, pSavepoint); + assert(rc!=SQLITE_DONE); + } + + /* If this is a release of the outermost savepoint, truncate + ** the sub-journal to zero bytes in size. */ + if( nNew==0 && op==SAVEPOINT_RELEASE && isOpen(pPager->sjfd) ){ + assert( rc==SQLITE_OK ); + rc = sqlite3OsTruncate(pPager->sjfd, 0); + pPager->nSubRec = 0; } - sqlite3PagerStmtCommit(pPager); - }else{ - rc = SQLITE_OK; } - pPager->stmtAutoopen = 0; return rc; } @@ -4278,10 +5053,19 @@ } /* -** Return the directory of the database file. +** Return the VFS structure for the pager. +*/ +const sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){ + return pPager->pVfs; +} + +/* +** Return the file handle for the database file associated +** with the pager. This might return NULL if the file has +** not yet been opened. */ -const char *sqlite3PagerDirname(Pager *pPager){ - return pPager->zDirectory; +sqlite3_file *sqlite3PagerFile(Pager *pPager){ + return pPager->fd; } /* @@ -4301,25 +5085,34 @@ #ifdef SQLITE_HAS_CODEC /* -** Set the codec for this pager +** Set or retrieve the codec for this pager */ -void sqlite3PagerSetCodec( +static void sqlite3PagerSetCodec( Pager *pPager, void *(*xCodec)(void*,void*,Pgno,int), - void *pCodecArg + void (*xCodecSizeChng)(void*,int,int), + void (*xCodecFree)(void*), + void *pCodec ){ + if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); pPager->xCodec = xCodec; - pPager->pCodecArg = pCodecArg; + pPager->xCodecSizeChng = xCodecSizeChng; + pPager->xCodecFree = xCodecFree; + pPager->pCodec = pCodec; + pagerReportSize(pPager); +} +static void *sqlite3PagerGetCodec(Pager *pPager){ + return pPager->pCodec; } #endif #ifndef SQLITE_OMIT_AUTOVACUUM /* -** Move the page pPg to location pgno in the file. +** Move the page pPg to location pgno in the file. ** ** There must be no references to the page previously located at ** pgno (which we call pPgOld) though that page is allowed to be -** in cache. If the page previous located at pgno is not already +** in cache. If the page previously located at pgno is not already ** in the rollback journal, it is not put there by by this routine. ** ** References to the page pPg remain valid. Updating any @@ -4330,88 +5123,132 @@ ** required that a statement transaction was not active, but this restriction ** has been removed (CREATE INDEX needs to move a page when a statement ** transaction is active). -*/ -int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno){ - PgHdr *pPgOld; /* The page being overwritten. */ - int h; - Pgno needSyncPgno = 0; +** +** If the fourth argument, isCommit, is non-zero, then this page is being +** moved as part of a database reorganization just before the transaction +** is being committed. In this case, it is guaranteed that the database page +** pPg refers to will not be written to again within this transaction. +** +** This function may return SQLITE_NOMEM or an IO error code if an error +** occurs. Otherwise, it returns SQLITE_OK. +*/ +int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, int isCommit){ + PgHdr *pPgOld; /* The page being overwritten. */ + Pgno needSyncPgno = 0; /* Old value of pPg->pgno, if sync is required */ + int rc; /* Return code */ + Pgno origPgno; /* The original page number */ assert( pPg->nRef>0 ); - PAGERTRACE5("MOVE %d page %d (needSync=%d) moves to %d\n", - PAGERID(pPager), pPg->pgno, pPg->needSync, pgno); + /* If the page being moved is dirty and has not been saved by the latest + ** savepoint, then save the current contents of the page into the + ** sub-journal now. This is required to handle the following scenario: + ** + ** BEGIN; + ** + ** SAVEPOINT one; + ** + ** ROLLBACK TO one; + ** + ** If page X were not written to the sub-journal here, it would not + ** be possible to restore its contents when the "ROLLBACK TO one" + ** statement were is processed. + ** + ** subjournalPage() may need to allocate space to store pPg->pgno into + ** one or more savepoint bitvecs. This is the reason this function + ** may return SQLITE_NOMEM. + */ + if( pPg->flags&PGHDR_DIRTY + && subjRequiresPage(pPg) + && SQLITE_OK!=(rc = subjournalPage(pPg)) + ){ + return rc; + } + + PAGERTRACE(("MOVE %d page %d (needSync=%d) moves to %d\n", + PAGERID(pPager), pPg->pgno, (pPg->flags&PGHDR_NEED_SYNC)?1:0, pgno)); IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) - pager_get_content(pPg); - if( pPg->needSync ){ + /* If the journal needs to be sync()ed before page pPg->pgno can + ** be written to, store pPg->pgno in local variable needSyncPgno. + ** + ** If the isCommit flag is set, there is no need to remember that + ** the journal needs to be sync()ed before database page pPg->pgno + ** can be written to. The caller has already promised not to write to it. + */ + if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){ needSyncPgno = pPg->pgno; - assert( pPg->inJournal || (int)pgno>pPager->origDbSize ); - assert( pPg->dirty ); + assert( pageInJournal(pPg) || pPg->pgno>pPager->dbOrigSize ); + assert( pPg->flags&PGHDR_DIRTY ); assert( pPager->needSync ); } - /* Unlink pPg from it's hash-chain */ - unlinkHashChain(pPager, pPg); - /* If the cache contains a page with page-number pgno, remove it - ** from it's hash chain. Also, if the PgHdr.needSync was set for + ** from its hash chain. Also, if the PgHdr.needSync was set for ** page pgno before the 'move' operation, it needs to be retained ** for the page moved there. */ - pPg->needSync = 0; + pPg->flags &= ~PGHDR_NEED_SYNC; pPgOld = pager_lookup(pPager, pgno); + assert( !pPgOld || pPgOld->nRef==1 ); if( pPgOld ){ - assert( pPgOld->nRef==0 ); - unlinkHashChain(pPager, pPgOld); - makeClean(pPgOld); - pPg->needSync = pPgOld->needSync; - }else{ - pPg->needSync = 0; - } - if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ - pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; - }else{ - pPg->inJournal = 0; - assert( pPg->needSync==0 || (int)pgno>pPager->origDbSize ); + pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC); + sqlite3PcacheDrop(pPgOld); } - /* Change the page number for pPg and insert it into the new hash-chain. */ - assert( pgno!=0 ); - pPg->pgno = pgno; - h = pgno & (pPager->nHash-1); - if( pPager->aHash[h] ){ - assert( pPager->aHash[h]->pPrevHash==0 ); - pPager->aHash[h]->pPrevHash = pPg; - } - pPg->pNextHash = pPager->aHash[h]; - pPager->aHash[h] = pPg; - pPg->pPrevHash = 0; - - makeDirty(pPg); - pPager->dirtyCache = 1; + origPgno = pPg->pgno; + sqlite3PcacheMove(pPg, pgno); + sqlite3PcacheMakeDirty(pPg); + pPager->dbModified = 1; if( needSyncPgno ){ /* If needSyncPgno is non-zero, then the journal file needs to be ** sync()ed before any data is written to database file page needSyncPgno. ** Currently, no such page exists in the page-cache and the - ** Pager.aInJournal bit has been set. This needs to be remedied by loading - ** the page into the pager-cache and setting the PgHdr.needSync flag. + ** "is journaled" bitvec flag has been set. This needs to be remedied by + ** loading the page into the pager-cache and setting the PgHdr.needSync + ** flag. + ** + ** If the attempt to load the page into the page-cache fails, (due + ** to a malloc() or IO failure), clear the bit in the pInJournal[] + ** array. Otherwise, if the page is loaded and written again in + ** this transaction, it may be written to the database file before + ** it is synced into the journal file. This way, it may end up in + ** the journal file twice, but that is not a problem. ** ** The sqlite3PagerGet() call may cause the journal to sync. So make ** sure the Pager.needSync flag is set too. */ - int rc; PgHdr *pPgHdr; assert( pPager->needSync ); rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); - if( rc!=SQLITE_OK ) return rc; + if( rc!=SQLITE_OK ){ + if( pPager->pInJournal && needSyncPgno<=pPager->dbOrigSize ){ + assert( pPager->pTmpSpace!=0 ); + sqlite3BitvecClear(pPager->pInJournal, needSyncPgno, pPager->pTmpSpace); + } + return rc; + } pPager->needSync = 1; - pPgHdr->needSync = 1; - pPgHdr->inJournal = 1; - makeDirty(pPgHdr); + assert( pPager->noSync==0 && !MEMDB ); + pPgHdr->flags |= PGHDR_NEED_SYNC; + sqlite3PcacheMakeDirty(pPgHdr); sqlite3PagerUnref(pPgHdr); } + /* + ** For an in-memory database, make sure the original page continues + ** to exist, in case the transaction needs to roll back. We allocate + ** the page now, instead of at rollback, because we can better deal + ** with an out-of-memory error now. Ticket #3761. + */ + if( MEMDB ){ + DbPage *pNew; + rc = sqlite3PagerAcquire(pPager, origPgno, &pNew, 1); + if( rc!=SQLITE_OK ) return rc; + sqlite3PagerUnref(pNew); + } + return SQLITE_OK; } #endif @@ -4420,7 +5257,8 @@ ** Return a pointer to the data for the specified page. */ void *sqlite3PagerGetData(DbPage *pPg){ - return PGHDR_TO_DATA(pPg); + assert( pPg->nRef>0 || pPg->pPager->memDb ); + return pPg->pData; } /* @@ -4429,7 +5267,7 @@ */ void *sqlite3PagerGetExtra(DbPage *pPg){ Pager *pPager = pPg->pPager; - return (pPager?PGHDR_TO_EXTRA(pPg, pPager):0); + return (pPager?pPg->pExtra:0); } /* @@ -4449,34 +5287,75 @@ assert( PAGER_LOCKINGMODE_QUERY<0 ); assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); if( eMode>=0 && !pPager->tempFile ){ - pPager->exclusiveMode = eMode; + pPager->exclusiveMode = (u8)eMode; } return (int)pPager->exclusiveMode; } -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) /* -** Return the current state of the file lock for the given pager. -** The return value is one of NO_LOCK, SHARED_LOCK, RESERVED_LOCK, -** PENDING_LOCK, or EXCLUSIVE_LOCK. -*/ -int sqlite3PagerLockstate(Pager *pPager){ - return sqlite3OsLockState(pPager->fd); +** Get/set the journal-mode for this pager. Parameter eMode must be one of: +** +** PAGER_JOURNALMODE_QUERY +** PAGER_JOURNALMODE_DELETE +** PAGER_JOURNALMODE_TRUNCATE +** PAGER_JOURNALMODE_PERSIST +** PAGER_JOURNALMODE_OFF +** PAGER_JOURNALMODE_MEMORY +** +** If the parameter is not _QUERY, then the journal_mode is set to the +** value specified if the change is allowed. The change is disallowed +** for the following reasons: +** +** * An in-memory database can only have its journal_mode set to _OFF +** or _MEMORY. +** +** * The journal mode may not be changed while a transaction is active. +** +** The returned indicate the current (possibly updated) journal-mode. +*/ +int sqlite3PagerJournalMode(Pager *pPager, int eMode){ + assert( eMode==PAGER_JOURNALMODE_QUERY + || eMode==PAGER_JOURNALMODE_DELETE + || eMode==PAGER_JOURNALMODE_TRUNCATE + || eMode==PAGER_JOURNALMODE_PERSIST + || eMode==PAGER_JOURNALMODE_OFF + || eMode==PAGER_JOURNALMODE_MEMORY ); + assert( PAGER_JOURNALMODE_QUERY<0 ); + if( eMode>=0 + && (!MEMDB || eMode==PAGER_JOURNALMODE_MEMORY + || eMode==PAGER_JOURNALMODE_OFF) + && !pPager->dbModified + && (!isOpen(pPager->jfd) || 0==pPager->journalOff) + ){ + if( isOpen(pPager->jfd) ){ + sqlite3OsClose(pPager->jfd); + } + pPager->journalMode = (u8)eMode; + } + return (int)pPager->journalMode; } -#endif -#ifdef SQLITE_DEBUG /* -** Print a listing of all referenced pages and their ref count. +** Get/set the size-limit used for persistent journal files. +** +** Setting the size limit to -1 means no limit is enforced. +** An attempt to set a limit smaller than -1 is a no-op. */ -void sqlite3PagerRefdump(Pager *pPager){ - PgHdr *pPg; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - if( pPg->nRef<=0 ) continue; - sqlite3DebugPrintf("PAGE %3d addr=%p nRef=%d\n", - pPg->pgno, PGHDR_TO_DATA(pPg), pPg->nRef); +i64 sqlite3PagerJournalSizeLimit(Pager *pPager, i64 iLimit){ + if( iLimit>=-1 ){ + pPager->journalSizeLimit = iLimit; } + return pPager->journalSizeLimit; +} + +/* +** Return a pointer to the pPager->pBackup variable. The backup module +** in backup.c maintains the content of this variable. This module +** uses it opaquely as an argument to sqlite3BackupRestart() and +** sqlite3BackupUpdate() only. +*/ +sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ + return &pPager->pBackup; } -#endif #endif /* SQLITE_OMIT_DISKIO */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pager.c.bu1 /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pager.c.bu1 --- sqlite3-3.4.2/src/pager.c.bu1 2007-08-11 01:09:44.000000000 +0100 +++ sqlite3-3.6.16/src/pager.c.bu1 1970-01-01 01:00:00.000000000 +0100 @@ -1,4474 +0,0 @@ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of the page cache subsystem or "pager". -** -** The pager is used to access a database disk file. It implements -** atomic commit and rollback through the use of a journal file that -** is separate from the database file. The pager also implements file -** locking to prevent two processes from writing the same database -** file simultaneously, or one process from reading the database while -** another is writing. -** -** @(#) $Id: pager.c,v 1.352 2007/08/07 17:13:04 drh Exp $ -*/ -#ifndef SQLITE_OMIT_DISKIO -#include "sqliteInt.h" -#include "os.h" -#include "pager.h" -#include -#include - -/* -** Macros for troubleshooting. Normally turned off -*/ -#if 0 -#define sqlite3DebugPrintf printf -#define PAGERTRACE1(X) sqlite3DebugPrintf(X) -#define PAGERTRACE2(X,Y) sqlite3DebugPrintf(X,Y) -#define PAGERTRACE3(X,Y,Z) sqlite3DebugPrintf(X,Y,Z) -#define PAGERTRACE4(X,Y,Z,W) sqlite3DebugPrintf(X,Y,Z,W) -#define PAGERTRACE5(X,Y,Z,W,V) sqlite3DebugPrintf(X,Y,Z,W,V) -#else -#define PAGERTRACE1(X) -#define PAGERTRACE2(X,Y) -#define PAGERTRACE3(X,Y,Z) -#define PAGERTRACE4(X,Y,Z,W) -#define PAGERTRACE5(X,Y,Z,W,V) -#endif - -/* -** The following two macros are used within the PAGERTRACEX() macros above -** to print out file-descriptors. -** -** PAGERID() takes a pointer to a Pager struct as it's argument. The -** associated file-descriptor is returned. FILEHANDLEID() takes an OsFile -** struct as it's argument. -*/ -#define PAGERID(p) ((int)(p->fd)) -#define FILEHANDLEID(fd) ((int)fd) - -/* -** The page cache as a whole is always in one of the following -** states: -** -** PAGER_UNLOCK The page cache is not currently reading or -** writing the database file. There is no -** data held in memory. This is the initial -** state. -** -** PAGER_SHARED The page cache is reading the database. -** Writing is not permitted. There can be -** multiple readers accessing the same database -** file at the same time. -** -** PAGER_RESERVED This process has reserved the database for writing -** but has not yet made any changes. Only one process -** at a time can reserve the database. The original -** database file has not been modified so other -** processes may still be reading the on-disk -** database file. -** -** PAGER_EXCLUSIVE The page cache is writing the database. -** Access is exclusive. No other processes or -** threads can be reading or writing while one -** process is writing. -** -** PAGER_SYNCED The pager moves to this state from PAGER_EXCLUSIVE -** after all dirty pages have been written to the -** database file and the file has been synced to -** disk. All that remains to do is to remove or -** truncate the journal file and the transaction -** will be committed. -** -** The page cache comes up in PAGER_UNLOCK. The first time a -** sqlite3PagerGet() occurs, the state transitions to PAGER_SHARED. -** After all pages have been released using sqlite_page_unref(), -** the state transitions back to PAGER_UNLOCK. The first time -** that sqlite3PagerWrite() is called, the state transitions to -** PAGER_RESERVED. (Note that sqlite3PagerWrite() can only be -** called on an outstanding page which means that the pager must -** be in PAGER_SHARED before it transitions to PAGER_RESERVED.) -** PAGER_RESERVED means that there is an open rollback journal. -** The transition to PAGER_EXCLUSIVE occurs before any changes -** are made to the database file, though writes to the rollback -** journal occurs with just PAGER_RESERVED. After an sqlite3PagerRollback() -** or sqlite3PagerCommitPhaseTwo(), the state can go back to PAGER_SHARED, -** or it can stay at PAGER_EXCLUSIVE if we are in exclusive access mode. -*/ -#define PAGER_UNLOCK 0 -#define PAGER_SHARED 1 /* same as SHARED_LOCK */ -#define PAGER_RESERVED 2 /* same as RESERVED_LOCK */ -#define PAGER_EXCLUSIVE 4 /* same as EXCLUSIVE_LOCK */ -#define PAGER_SYNCED 5 - -/* -** If the SQLITE_BUSY_RESERVED_LOCK macro is set to true at compile-time, -** then failed attempts to get a reserved lock will invoke the busy callback. -** This is off by default. To see why, consider the following scenario: -** -** Suppose thread A already has a shared lock and wants a reserved lock. -** Thread B already has a reserved lock and wants an exclusive lock. If -** both threads are using their busy callbacks, it might be a long time -** be for one of the threads give up and allows the other to proceed. -** But if the thread trying to get the reserved lock gives up quickly -** (if it never invokes its busy callback) then the contention will be -** resolved quickly. -*/ -#ifndef SQLITE_BUSY_RESERVED_LOCK -# define SQLITE_BUSY_RESERVED_LOCK 0 -#endif - -/* -** This macro rounds values up so that if the value is an address it -** is guaranteed to be an address that is aligned to an 8-byte boundary. -*/ -#define FORCE_ALIGNMENT(X) (((X)+7)&~7) - -/* -** Each in-memory image of a page begins with the following header. -** This header is only visible to this pager module. The client -** code that calls pager sees only the data that follows the header. -** -** Client code should call sqlite3PagerWrite() on a page prior to making -** any modifications to that page. The first time sqlite3PagerWrite() -** is called, the original page contents are written into the rollback -** journal and PgHdr.inJournal and PgHdr.needSync are set. Later, once -** the journal page has made it onto the disk surface, PgHdr.needSync -** is cleared. The modified page cannot be written back into the original -** database file until the journal pages has been synced to disk and the -** PgHdr.needSync has been cleared. -** -** The PgHdr.dirty flag is set when sqlite3PagerWrite() is called and -** is cleared again when the page content is written back to the original -** database file. -** -** Details of important structure elements: -** -** needSync -** -** If this is true, this means that it is not safe to write the page -** content to the database because the original content needed -** for rollback has not by synced to the main rollback journal. -** The original content may have been written to the rollback journal -** but it has not yet been synced. So we cannot write to the database -** file because power failure might cause the page in the journal file -** to never reach the disk. It is as if the write to the journal file -** does not occur until the journal file is synced. -** -** This flag is false if the page content exactly matches what -** currently exists in the database file. The needSync flag is also -** false if the original content has been written to the main rollback -** journal and synced. If the page represents a new page that has -** been added onto the end of the database during the current -** transaction, the needSync flag is true until the original database -** size in the journal header has been synced to disk. -** -** inJournal -** -** This is true if the original page has been written into the main -** rollback journal. This is always false for new pages added to -** the end of the database file during the current transaction. -** And this flag says nothing about whether or not the journal -** has been synced to disk. For pages that are in the original -** database file, the following expression should always be true: -** -** inJournal = (pPager->aInJournal[(pgno-1)/8] & (1<<((pgno-1)%8))!=0 -** -** The pPager->aInJournal[] array is only valid for the original -** pages of the database, not new pages that are added to the end -** of the database, so obviously the above expression cannot be -** valid for new pages. For new pages inJournal is always 0. -** -** dirty -** -** When true, this means that the content of the page has been -** modified and needs to be written back to the database file. -** If false, it means that either the content of the page is -** unchanged or else the content is unimportant and we do not -** care whether or not it is preserved. -** -** alwaysRollback -** -** This means that the sqlite3PagerDontRollback() API should be -** ignored for this page. The DontRollback() API attempts to say -** that the content of the page on disk is unimportant (it is an -** unused page on the freelist) so that it is unnecessary to -** rollback changes to this page because the content of the page -** can change without changing the meaning of the database. This -** flag overrides any DontRollback() attempt. This flag is set -** when a page that originally contained valid data is added to -** the freelist. Later in the same transaction, this page might -** be pulled from the freelist and reused for something different -** and at that point the DontRollback() API will be called because -** pages taken from the freelist do not need to be protected by -** the rollback journal. But this flag says that the page was -** not originally part of the freelist so that it still needs to -** be rolled back in spite of any subsequent DontRollback() calls. -** -** needRead -** -** This flag means (when true) that the content of the page has -** not yet been loaded from disk. The in-memory content is just -** garbage. (Actually, we zero the content, but you should not -** make any assumptions about the content nevertheless.) If the -** content is needed in the future, it should be read from the -** original database file. -*/ -typedef struct PgHdr PgHdr; -struct PgHdr { - Pager *pPager; /* The pager to which this page belongs */ - Pgno pgno; /* The page number for this page */ - PgHdr *pNextHash, *pPrevHash; /* Hash collision chain for PgHdr.pgno */ - PgHdr *pNextFree, *pPrevFree; /* Freelist of pages where nRef==0 */ - PgHdr *pNextAll; /* A list of all pages */ - u8 inJournal; /* TRUE if has been written to journal */ - u8 dirty; /* TRUE if we need to write back changes */ - u8 needSync; /* Sync journal before writing this page */ - u8 alwaysRollback; /* Disable DontRollback() for this page */ - u8 needRead; /* Read content if PagerWrite() is called */ - short int nRef; /* Number of users of this page */ - PgHdr *pDirty, *pPrevDirty; /* Dirty pages */ - u32 notUsed; /* Buffer space */ -#ifdef SQLITE_CHECK_PAGES - u32 pageHash; -#endif - /* pPager->pageSize bytes of page data follow this header */ - /* Pager.nExtra bytes of local data follow the page data */ -}; - -/* -** For an in-memory only database, some extra information is recorded about -** each page so that changes can be rolled back. (Journal files are not -** used for in-memory databases.) The following information is added to -** the end of every EXTRA block for in-memory databases. -** -** This information could have been added directly to the PgHdr structure. -** But then it would take up an extra 8 bytes of storage on every PgHdr -** even for disk-based databases. Splitting it out saves 8 bytes. This -** is only a savings of 0.8% but those percentages add up. -*/ -typedef struct PgHistory PgHistory; -struct PgHistory { - u8 *pOrig; /* Original page text. Restore to this on a full rollback */ - u8 *pStmt; /* Text as it was at the beginning of the current statement */ - PgHdr *pNextStmt, *pPrevStmt; /* List of pages in the statement journal */ - u8 inStmt; /* TRUE if in the statement subjournal */ -}; - -/* -** A macro used for invoking the codec if there is one -*/ -#ifdef SQLITE_HAS_CODEC -# define CODEC1(P,D,N,X) if( P->xCodec!=0 ){ P->xCodec(P->pCodecArg,D,N,X); } -# define CODEC2(P,D,N,X) ((char*)(P->xCodec!=0?P->xCodec(P->pCodecArg,D,N,X):D)) -#else -# define CODEC1(P,D,N,X) /* NO-OP */ -# define CODEC2(P,D,N,X) ((char*)D) -#endif - -/* -** Convert a pointer to a PgHdr into a pointer to its data -** and back again. -*/ -#define PGHDR_TO_DATA(P) ((void*)(&(P)[1])) -#define DATA_TO_PGHDR(D) (&((PgHdr*)(D))[-1]) -#define PGHDR_TO_EXTRA(G,P) ((void*)&((char*)(&(G)[1]))[(P)->pageSize]) -#define PGHDR_TO_HIST(P,PGR) \ - ((PgHistory*)&((char*)(&(P)[1]))[(PGR)->pageSize+(PGR)->nExtra]) - -/* -** A open page cache is an instance of the following structure. -** -** Pager.errCode may be set to SQLITE_IOERR, SQLITE_CORRUPT, or -** or SQLITE_FULL. Once one of the first three errors occurs, it persists -** and is returned as the result of every major pager API call. The -** SQLITE_FULL return code is slightly different. It persists only until the -** next successful rollback is performed on the pager cache. Also, -** SQLITE_FULL does not affect the sqlite3PagerGet() and sqlite3PagerLookup() -** APIs, they may still be used successfully. -*/ -struct Pager { - u8 journalOpen; /* True if journal file descriptors is valid */ - u8 journalStarted; /* True if header of journal is synced */ - u8 useJournal; /* Use a rollback journal on this file */ - u8 noReadlock; /* Do not bother to obtain readlocks */ - u8 stmtOpen; /* True if the statement subjournal is open */ - u8 stmtInUse; /* True we are in a statement subtransaction */ - u8 stmtAutoopen; /* Open stmt journal when main journal is opened*/ - u8 noSync; /* Do not sync the journal if true */ - u8 fullSync; /* Do extra syncs of the journal for robustness */ - u8 full_fsync; /* Use F_FULLFSYNC when available */ - u8 state; /* PAGER_UNLOCK, _SHARED, _RESERVED, etc. */ - u8 tempFile; /* zFilename is a temporary file */ - u8 readOnly; /* True for a read-only database */ - u8 needSync; /* True if an fsync() is needed on the journal */ - u8 dirtyCache; /* True if cached pages have changed */ - u8 alwaysRollback; /* Disable DontRollback() for all pages */ - u8 memDb; /* True to inhibit all file I/O */ - u8 setMaster; /* True if a m-j name has been written to jrnl */ - u8 doNotSync; /* Boolean. While true, do not spill the cache */ - u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ - u8 changeCountDone; /* Set after incrementing the change-counter */ - int errCode; /* One of several kinds of errors */ - int dbSize; /* Number of pages in the file */ - int origDbSize; /* dbSize before the current change */ - int stmtSize; /* Size of database (in pages) at stmt_begin() */ - int nRec; /* Number of pages written to the journal */ - u32 cksumInit; /* Quasi-random value added to every checksum */ - int stmtNRec; /* Number of records in stmt subjournal */ - int nExtra; /* Add this many bytes to each in-memory page */ - int pageSize; /* Number of bytes in a page */ - int nPage; /* Total number of in-memory pages */ - int nRef; /* Number of in-memory pages with PgHdr.nRef>0 */ - int mxPage; /* Maximum number of pages to hold in cache */ - Pgno mxPgno; /* Maximum allowed size of the database */ - u8 *aInJournal; /* One bit for each page in the database file */ - u8 *aInStmt; /* One bit for each page in the database */ - char *zFilename; /* Name of the database file */ - char *zJournal; /* Name of the journal file */ - char *zDirectory; /* Directory hold database and journal files */ - OsFile *fd, *jfd; /* File descriptors for database and journal */ - OsFile *stfd; /* File descriptor for the statement subjournal*/ - BusyHandler *pBusyHandler; /* Pointer to sqlite.busyHandler */ - PgHdr *pFirst, *pLast; /* List of free pages */ - PgHdr *pFirstSynced; /* First free page with PgHdr.needSync==0 */ - PgHdr *pAll; /* List of all pages */ - PgHdr *pStmt; /* List of pages in the statement subjournal */ - PgHdr *pDirty; /* List of all dirty pages */ - i64 journalOff; /* Current byte offset in the journal file */ - i64 journalHdr; /* Byte offset to previous journal header */ - i64 stmtHdrOff; /* First journal header written this statement */ - i64 stmtCksum; /* cksumInit when statement was started */ - i64 stmtJSize; /* Size of journal at stmt_begin() */ - int sectorSize; /* Assumed sector size during rollback */ -#ifdef SQLITE_TEST - int nHit, nMiss; /* Cache hits and missing */ - int nRead, nWrite; /* Database pages read/written */ -#endif - void (*xDestructor)(DbPage*,int); /* Call this routine when freeing pages */ - void (*xReiniter)(DbPage*,int); /* Call this routine when reloading pages */ -#ifdef SQLITE_HAS_CODEC - void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ - void *pCodecArg; /* First argument to xCodec() */ -#endif - int nHash; /* Size of the pager hash table */ - PgHdr **aHash; /* Hash table to map page number to PgHdr */ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - Pager *pNext; /* Linked list of pagers in this thread */ -#endif - char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ - char dbFileVers[16]; /* Changes whenever database file changes */ -}; - -/* -** The following global variables hold counters used for -** testing purposes only. These variables do not exist in -** a non-testing build. These variables are not thread-safe. -*/ -#ifdef SQLITE_TEST -int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ -int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ -int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ -int sqlite3_pager_pgfree_count = 0; /* Number of cache pages freed */ -# define PAGER_INCR(v) v++ -#else -# define PAGER_INCR(v) -#endif - - - -/* -** Journal files begin with the following magic string. The data -** was obtained from /dev/random. It is used only as a sanity check. -** -** Since version 2.8.0, the journal format contains additional sanity -** checking information. If the power fails while the journal is begin -** written, semi-random garbage data might appear in the journal -** file after power is restored. If an attempt is then made -** to roll the journal back, the database could be corrupted. The additional -** sanity checking data is an attempt to discover the garbage in the -** journal and ignore it. -** -** The sanity checking information for the new journal format consists -** of a 32-bit checksum on each page of data. The checksum covers both -** the page number and the pPager->pageSize bytes of data for the page. -** This cksum is initialized to a 32-bit random value that appears in the -** journal file right after the header. The random initializer is important, -** because garbage data that appears at the end of a journal is likely -** data that was once in other files that have now been deleted. If the -** garbage data came from an obsolete journal file, the checksums might -** be correct. But by initializing the checksum to random value which -** is different for every journal, we minimize that risk. -*/ -static const unsigned char aJournalMagic[] = { - 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, -}; - -/* -** The size of the header and of each page in the journal is determined -** by the following macros. -*/ -#define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) - -/* -** The journal header size for this pager. In the future, this could be -** set to some value read from the disk controller. The important -** characteristic is that it is the same size as a disk sector. -*/ -#define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) - -/* -** The macro MEMDB is true if we are dealing with an in-memory database. -** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, -** the value of MEMDB will be a constant and the compiler will optimize -** out code that would never execute. -*/ -#ifdef SQLITE_OMIT_MEMORYDB -# define MEMDB 0 -#else -# define MEMDB pPager->memDb -#endif - -/* -** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is -** reserved for working around a windows/posix incompatibility). It is -** used in the journal to signify that the remainder of the journal file -** is devoted to storing a master journal name - there are no more pages to -** roll back. See comments for function writeMasterJournal() for details. -*/ -/* #define PAGER_MJ_PGNO(x) (PENDING_BYTE/((x)->pageSize)) */ -#define PAGER_MJ_PGNO(x) ((PENDING_BYTE/((x)->pageSize))+1) - -/* -** The maximum legal page number is (2^31 - 1). -*/ -#define PAGER_MAX_PGNO 2147483647 - -/* -** Enable reference count tracking (for debugging) here: -*/ -#ifdef SQLITE_DEBUG - int pager3_refinfo_enable = 0; - static void pager_refinfo(PgHdr *p){ - static int cnt = 0; - if( !pager3_refinfo_enable ) return; - sqlite3DebugPrintf( - "REFCNT: %4d addr=%p nRef=%-3d total=%d\n", - p->pgno, PGHDR_TO_DATA(p), p->nRef, p->pPager->nRef - ); - cnt++; /* Something to set a breakpoint on */ - } -# define REFINFO(X) pager_refinfo(X) -#else -# define REFINFO(X) -#endif - -/* -** Return true if page *pPg has already been written to the statement -** journal (or statement snapshot has been created, if *pPg is part -** of an in-memory database). -*/ -static int pageInStatement(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - if( MEMDB ){ - return PGHDR_TO_HIST(pPg, pPager)->inStmt; - }else{ - Pgno pgno = pPg->pgno; - u8 *a = pPager->aInStmt; - return (a && (int)pgno<=pPager->stmtSize && (a[pgno/8] & (1<<(pgno&7)))); - } -} - -/* -** Change the size of the pager hash table to N. N must be a power -** of two. -*/ -static void pager_resize_hash_table(Pager *pPager, int N){ - PgHdr **aHash, *pPg; - assert( N>0 && (N&(N-1))==0 ); - aHash = sqliteMalloc( sizeof(aHash[0])*N ); - if( aHash==0 ){ - /* Failure to rehash is not an error. It is only a performance hit. */ - return; - } - sqliteFree(pPager->aHash); - pPager->nHash = N; - pPager->aHash = aHash; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - int h; - if( pPg->pgno==0 ){ - assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); - continue; - } - h = pPg->pgno & (N-1); - pPg->pNextHash = aHash[h]; - if( aHash[h] ){ - aHash[h]->pPrevHash = pPg; - } - aHash[h] = pPg; - pPg->pPrevHash = 0; - } -} - -/* -** Read a 32-bit integer from the given file descriptor. Store the integer -** that is read in *pRes. Return SQLITE_OK if everything worked, or an -** error code is something goes wrong. -** -** All values are stored on disk as big-endian. -*/ -static int read32bits(OsFile *fd, u32 *pRes){ - unsigned char ac[4]; - int rc = sqlite3OsRead(fd, ac, sizeof(ac)); - if( rc==SQLITE_OK ){ - *pRes = sqlite3Get4byte(ac); - } - return rc; -} - -/* -** Write a 32-bit integer into a string buffer in big-endian byte order. -*/ -#define put32bits(A,B) sqlite3Put4byte((u8*)A,B) - -/* -** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK -** on success or an error code is something goes wrong. -*/ -static int write32bits(OsFile *fd, u32 val){ - char ac[4]; - put32bits(ac, val); - return sqlite3OsWrite(fd, ac, 4); -} - -/* -** This function should be called when an error occurs within the pager -** code. The first argument is a pointer to the pager structure, the -** second the error-code about to be returned by a pager API function. -** The value returned is a copy of the second argument to this function. -** -** If the second argument is SQLITE_IOERR, SQLITE_CORRUPT, or SQLITE_FULL -** the error becomes persistent. All subsequent API calls on this Pager -** will immediately return the same error code. -*/ -static int pager_error(Pager *pPager, int rc){ - int rc2 = rc & 0xff; - assert( pPager->errCode==SQLITE_FULL || pPager->errCode==SQLITE_OK ); - if( - rc2==SQLITE_FULL || - rc2==SQLITE_IOERR || - rc2==SQLITE_CORRUPT - ){ - pPager->errCode = rc; - } - return rc; -} - -/* -** If SQLITE_CHECK_PAGES is defined then we do some sanity checking -** on the cache using a hash function. This is used for testing -** and debugging only. -*/ -#ifdef SQLITE_CHECK_PAGES -/* -** Return a 32-bit hash of the page data for pPage. -*/ -static u32 pager_datahash(int nByte, unsigned char *pData){ - u32 hash = 0; - int i; - for(i=0; ipPager->pageSize, - (unsigned char *)PGHDR_TO_DATA(pPage)); -} - -/* -** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES -** is defined, and NDEBUG is not defined, an assert() statement checks -** that the page is either dirty or still matches the calculated page-hash. -*/ -#define CHECK_PAGE(x) checkPage(x) -static void checkPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - assert( !pPg->pageHash || pPager->errCode || MEMDB || pPg->dirty || - pPg->pageHash==pager_pagehash(pPg) ); -} - -#else -#define pager_datahash(X,Y) 0 -#define pager_pagehash(X) 0 -#define CHECK_PAGE(x) -#endif - -/* -** When this is called the journal file for pager pPager must be open. -** The master journal file name is read from the end of the file and -** written into memory obtained from sqliteMalloc(). *pzMaster is -** set to point at the memory and SQLITE_OK returned. The caller must -** sqliteFree() *pzMaster. -** -** If no master journal file name is present *pzMaster is set to 0 and -** SQLITE_OK returned. -*/ -static int readMasterJournal(OsFile *pJrnl, char **pzMaster){ - int rc; - u32 len; - i64 szJ; - u32 cksum; - int i; - unsigned char aMagic[8]; /* A buffer to hold the magic header */ - - *pzMaster = 0; - - rc = sqlite3OsFileSize(pJrnl, &szJ); - if( rc!=SQLITE_OK || szJ<16 ) return rc; - - rc = sqlite3OsSeek(pJrnl, szJ-16); - if( rc!=SQLITE_OK ) return rc; - - rc = read32bits(pJrnl, &len); - if( rc!=SQLITE_OK ) return rc; - - rc = read32bits(pJrnl, &cksum); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3OsRead(pJrnl, aMagic, 8); - if( rc!=SQLITE_OK || memcmp(aMagic, aJournalMagic, 8) ) return rc; - - rc = sqlite3OsSeek(pJrnl, szJ-16-len); - if( rc!=SQLITE_OK ) return rc; - - *pzMaster = (char *)sqliteMalloc(len+1); - if( !*pzMaster ){ - return SQLITE_NOMEM; - } - rc = sqlite3OsRead(pJrnl, *pzMaster, len); - if( rc!=SQLITE_OK ){ - sqliteFree(*pzMaster); - *pzMaster = 0; - return rc; - } - - /* See if the checksum matches the master journal name */ - for(i=0; ijournalOff; - if( c ){ - offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); - } - assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); - assert( offset>=c ); - assert( (offset-c)journalOff = offset; - return sqlite3OsSeek(pPager->jfd, pPager->journalOff); -} - -/* -** The journal file must be open when this routine is called. A journal -** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the -** current location. -** -** The format for the journal header is as follows: -** - 8 bytes: Magic identifying journal format. -** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. -** - 4 bytes: Random number used for page hash. -** - 4 bytes: Initial database page count. -** - 4 bytes: Sector size used by the process that wrote this journal. -** -** Followed by (JOURNAL_HDR_SZ - 24) bytes of unused space. -*/ -static int writeJournalHdr(Pager *pPager){ - char zHeader[sizeof(aJournalMagic)+16]; - int rc; - - if( pPager->stmtHdrOff==0 ){ - pPager->stmtHdrOff = pPager->journalOff; - } - - rc = seekJournalHdr(pPager); - if( rc ) return rc; - - pPager->journalHdr = pPager->journalOff; - pPager->journalOff += JOURNAL_HDR_SZ(pPager); - - /* FIX ME: - ** - ** Possibly for a pager not in no-sync mode, the journal magic should not - ** be written until nRec is filled in as part of next syncJournal(). - ** - ** Actually maybe the whole journal header should be delayed until that - ** point. Think about this. - */ - memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); - /* The nRec Field. 0xFFFFFFFF for no-sync journals. */ - put32bits(&zHeader[sizeof(aJournalMagic)], pPager->noSync ? 0xffffffff : 0); - /* The random check-hash initialiser */ - sqlite3Randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); - put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); - /* The initial database size */ - put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbSize); - /* The assumed sector size for this process */ - put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); - IOTRACE(("JHDR %p %lld %d\n", pPager, pPager->journalHdr, sizeof(zHeader))) - rc = sqlite3OsWrite(pPager->jfd, zHeader, sizeof(zHeader)); - - /* The journal header has been written successfully. Seek the journal - ** file descriptor to the end of the journal header sector. - */ - if( rc==SQLITE_OK ){ - IOTRACE(("JTAIL %p %lld\n", pPager, pPager->journalOff-1)) - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff-1); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->jfd, "\000", 1); - } - } - return rc; -} - -/* -** The journal file must be open when this is called. A journal header file -** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal -** file. See comments above function writeJournalHdr() for a description of -** the journal header format. -** -** If the header is read successfully, *nRec is set to the number of -** page records following this header and *dbSize is set to the size of the -** database before the transaction began, in pages. Also, pPager->cksumInit -** is set to the value read from the journal header. SQLITE_OK is returned -** in this case. -** -** If the journal header file appears to be corrupted, SQLITE_DONE is -** returned and *nRec and *dbSize are not set. If JOURNAL_HDR_SZ bytes -** cannot be read from the journal file an error code is returned. -*/ -static int readJournalHdr( - Pager *pPager, - i64 journalSize, - u32 *pNRec, - u32 *pDbSize -){ - int rc; - unsigned char aMagic[8]; /* A buffer to hold the magic header */ - - rc = seekJournalHdr(pPager); - if( rc ) return rc; - - if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ - return SQLITE_DONE; - } - - rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic)); - if( rc ) return rc; - - if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ - return SQLITE_DONE; - } - - rc = read32bits(pPager->jfd, pNRec); - if( rc ) return rc; - - rc = read32bits(pPager->jfd, &pPager->cksumInit); - if( rc ) return rc; - - rc = read32bits(pPager->jfd, pDbSize); - if( rc ) return rc; - - /* Update the assumed sector-size to match the value used by - ** the process that created this journal. If this journal was - ** created by a process other than this one, then this routine - ** is being called from within pager_playback(). The local value - ** of Pager.sectorSize is restored at the end of that routine. - */ - rc = read32bits(pPager->jfd, (u32 *)&pPager->sectorSize); - if( rc ) return rc; - - pPager->journalOff += JOURNAL_HDR_SZ(pPager); - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); - return rc; -} - - -/* -** Write the supplied master journal name into the journal file for pager -** pPager at the current location. The master journal name must be the last -** thing written to a journal file. If the pager is in full-sync mode, the -** journal file descriptor is advanced to the next sector boundary before -** anything is written. The format is: -** -** + 4 bytes: PAGER_MJ_PGNO. -** + N bytes: length of master journal name. -** + 4 bytes: N -** + 4 bytes: Master journal name checksum. -** + 8 bytes: aJournalMagic[]. -** -** The master journal page checksum is the sum of the bytes in the master -** journal name. -** -** If zMaster is a NULL pointer (occurs for a single database transaction), -** this call is a no-op. -*/ -static int writeMasterJournal(Pager *pPager, const char *zMaster){ - int rc; - int len; - int i; - u32 cksum = 0; - char zBuf[sizeof(aJournalMagic)+2*4]; - - if( !zMaster || pPager->setMaster) return SQLITE_OK; - pPager->setMaster = 1; - - len = strlen(zMaster); - for(i=0; ifullSync ){ - rc = seekJournalHdr(pPager); - if( rc!=SQLITE_OK ) return rc; - } - pPager->journalOff += (len+20); - - rc = write32bits(pPager->jfd, PAGER_MJ_PGNO(pPager)); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3OsWrite(pPager->jfd, zMaster, len); - if( rc!=SQLITE_OK ) return rc; - - put32bits(zBuf, len); - put32bits(&zBuf[4], cksum); - memcpy(&zBuf[8], aJournalMagic, sizeof(aJournalMagic)); - rc = sqlite3OsWrite(pPager->jfd, zBuf, 8+sizeof(aJournalMagic)); - pPager->needSync = !pPager->noSync; - return rc; -} - -/* -** Add or remove a page from the list of all pages that are in the -** statement journal. -** -** The Pager keeps a separate list of pages that are currently in -** the statement journal. This helps the sqlite3PagerStmtCommit() -** routine run MUCH faster for the common case where there are many -** pages in memory but only a few are in the statement journal. -*/ -static void page_add_to_stmt_list(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( MEMDB ); - if( !pHist->inStmt ){ - assert( pHist->pPrevStmt==0 && pHist->pNextStmt==0 ); - if( pPager->pStmt ){ - PGHDR_TO_HIST(pPager->pStmt, pPager)->pPrevStmt = pPg; - } - pHist->pNextStmt = pPager->pStmt; - pPager->pStmt = pPg; - pHist->inStmt = 1; - } -} - -/* -** Find a page in the hash table given its page number. Return -** a pointer to the page or NULL if not found. -*/ -static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ - PgHdr *p; - if( pPager->aHash==0 ) return 0; - p = pPager->aHash[pgno & (pPager->nHash-1)]; - while( p && p->pgno!=pgno ){ - p = p->pNextHash; - } - return p; -} - -/* -** Unlock the database file. -*/ -static void pager_unlock(Pager *pPager){ - if( !pPager->exclusiveMode ){ - if( !MEMDB ){ - sqlite3OsUnlock(pPager->fd, NO_LOCK); - pPager->dbSize = -1; - IOTRACE(("UNLOCK %p\n", pPager)) - } - pPager->state = PAGER_UNLOCK; - pPager->changeCountDone = 0; - } -} - -/* -** Execute a rollback if a transaction is active and unlock the -** database file. This is a no-op if the pager has already entered -** the error-state. -*/ -static void pagerUnlockAndRollback(Pager *p){ - if( p->errCode ) return; - assert( p->state>=PAGER_RESERVED || p->journalOpen==0 ); - if( p->state>=PAGER_RESERVED ){ - sqlite3PagerRollback(p); - } - pager_unlock(p); - assert( p->errCode || !p->journalOpen || (p->exclusiveMode&&!p->journalOff) ); - assert( p->errCode || !p->stmtOpen || p->exclusiveMode ); -} - - -/* -** Clear the in-memory cache. This routine -** sets the state of the pager back to what it was when it was first -** opened. Any outstanding pages are invalidated and subsequent attempts -** to access those pages will likely result in a coredump. -*/ -static void pager_reset(Pager *pPager){ - PgHdr *pPg, *pNext; - if( pPager->errCode ) return; - for(pPg=pPager->pAll; pPg; pPg=pNext){ - IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - pNext = pPg->pNextAll; - sqliteFree(pPg); - } - pPager->pStmt = 0; - pPager->pFirst = 0; - pPager->pFirstSynced = 0; - pPager->pLast = 0; - pPager->pAll = 0; - pPager->nHash = 0; - sqliteFree(pPager->aHash); - pPager->nPage = 0; - pPager->aHash = 0; - pPager->nRef = 0; -} - -/* -** This routine ends a transaction. A transaction is ended by either -** a COMMIT or a ROLLBACK. -** -** When this routine is called, the pager has the journal file open and -** a RESERVED or EXCLUSIVE lock on the database. This routine will release -** the database lock and acquires a SHARED lock in its place if that is -** the appropriate thing to do. Release locks usually is appropriate, -** unless we are in exclusive access mode or unless this is a -** COMMIT AND BEGIN or ROLLBACK AND BEGIN operation. -** -** The journal file is either deleted or truncated. -** -** TODO: Consider keeping the journal file open for temporary databases. -** This might give a performance improvement on windows where opening -** a file is an expensive operation. -*/ -static int pager_end_transaction(Pager *pPager){ - PgHdr *pPg; - int rc = SQLITE_OK; - int rc2 = SQLITE_OK; - assert( !MEMDB ); - if( pPager->statestmtOpen && !pPager->exclusiveMode ){ - sqlite3OsClose(&pPager->stfd); - pPager->stmtOpen = 0; - } - if( pPager->journalOpen ){ - if( pPager->exclusiveMode - && (rc = sqlite3OsTruncate(pPager->jfd, 0))==SQLITE_OK ){; - sqlite3OsSeek(pPager->jfd, 0); - pPager->journalOff = 0; - pPager->journalStarted = 0; - }else{ - sqlite3OsClose(&pPager->jfd); - pPager->journalOpen = 0; - if( rc==SQLITE_OK ){ - rc = sqlite3OsDelete(pPager->zJournal); - } - } - sqliteFree( pPager->aInJournal ); - pPager->aInJournal = 0; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - pPg->inJournal = 0; - pPg->dirty = 0; - pPg->needSync = 0; - pPg->alwaysRollback = 0; -#ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); -#endif - } - pPager->pDirty = 0; - pPager->dirtyCache = 0; - pPager->nRec = 0; - }else{ - assert( pPager->aInJournal==0 ); - assert( pPager->dirtyCache==0 || pPager->useJournal==0 ); - } - - if( !pPager->exclusiveMode ){ - rc2 = sqlite3OsUnlock(pPager->fd, SHARED_LOCK); - pPager->state = PAGER_SHARED; - }else if( pPager->state==PAGER_SYNCED ){ - pPager->state = PAGER_EXCLUSIVE; - } - pPager->origDbSize = 0; - pPager->setMaster = 0; - pPager->needSync = 0; - pPager->pFirstSynced = pPager->pFirst; - pPager->dbSize = -1; - - return (rc==SQLITE_OK?rc2:rc); -} - -/* -** Compute and return a checksum for the page of data. -** -** This is not a real checksum. It is really just the sum of the -** random initial value and the page number. We experimented with -** a checksum of the entire data, but that was found to be too slow. -** -** Note that the page number is stored at the beginning of data and -** the checksum is stored at the end. This is important. If journal -** corruption occurs due to a power failure, the most likely scenario -** is that one end or the other of the record will be changed. It is -** much less likely that the two ends of the journal record will be -** correct and the middle be corrupt. Thus, this "checksum" scheme, -** though fast and simple, catches the mostly likely kind of corruption. -** -** FIX ME: Consider adding every 200th (or so) byte of the data to the -** checksum. That way if a single page spans 3 or more disk sectors and -** only the middle sector is corrupt, we will still have a reasonable -** chance of failing the checksum and thus detecting the problem. -*/ -static u32 pager_cksum(Pager *pPager, const u8 *aData){ - u32 cksum = pPager->cksumInit; - int i = pPager->pageSize-200; - while( i>0 ){ - cksum += aData[i]; - i -= 200; - } - return cksum; -} - -/* Forward declaration */ -static void makeClean(PgHdr*); - -/* -** Read a single page from the journal file opened on file descriptor -** jfd. Playback this one page. -** -** If useCksum==0 it means this journal does not use checksums. Checksums -** are not used in statement journals because statement journals do not -** need to survive power failures. -*/ -static int pager_playback_one_page(Pager *pPager, OsFile *jfd, int useCksum){ - int rc; - PgHdr *pPg; /* An existing page in the cache */ - Pgno pgno; /* The page number of a page in journal */ - u32 cksum; /* Checksum used for sanity checking */ - u8 *aData = (u8 *)pPager->pTmpSpace; /* Temp storage for a page */ - - /* useCksum should be true for the main journal and false for - ** statement journals. Verify that this is always the case - */ - assert( jfd == (useCksum ? pPager->jfd : pPager->stfd) ); - assert( aData ); - - rc = read32bits(jfd, &pgno); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsRead(jfd, aData, pPager->pageSize); - if( rc!=SQLITE_OK ) return rc; - pPager->journalOff += pPager->pageSize + 4; - - /* Sanity checking on the page. This is more important that I originally - ** thought. If a power failure occurs while the journal is being written, - ** it could cause invalid data to be written into the journal. We need to - ** detect this invalid data (with high probability) and ignore it. - */ - if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ - return SQLITE_DONE; - } - if( pgno>(unsigned)pPager->dbSize ){ - return SQLITE_OK; - } - if( useCksum ){ - rc = read32bits(jfd, &cksum); - if( rc ) return rc; - pPager->journalOff += 4; - if( pager_cksum(pPager, aData)!=cksum ){ - return SQLITE_DONE; - } - } - - assert( pPager->state==PAGER_RESERVED || pPager->state>=PAGER_EXCLUSIVE ); - - /* If the pager is in RESERVED state, then there must be a copy of this - ** page in the pager cache. In this case just update the pager cache, - ** not the database file. The page is left marked dirty in this case. - ** - ** An exception to the above rule: If the database is in no-sync mode - ** and a page is moved during an incremental vacuum then the page may - ** not be in the pager cache. Later: if a malloc() or IO error occurs - ** during a Movepage() call, then the page may not be in the cache - ** either. So the condition described in the above paragraph is not - ** assert()able. - ** - ** If in EXCLUSIVE state, then we update the pager cache if it exists - ** and the main file. The page is then marked not dirty. - ** - ** Ticket #1171: The statement journal might contain page content that is - ** different from the page content at the start of the transaction. - ** This occurs when a page is changed prior to the start of a statement - ** then changed again within the statement. When rolling back such a - ** statement we must not write to the original database unless we know - ** for certain that original page contents are synced into the main rollback - ** journal. Otherwise, a power loss might leave modified data in the - ** database file without an entry in the rollback journal that can - ** restore the database to its original form. Two conditions must be - ** met before writing to the database files. (1) the database must be - ** locked. (2) we know that the original page content is fully synced - ** in the main journal either because the page is not in cache or else - ** the page is marked as needSync==0. - */ - pPg = pager_lookup(pPager, pgno); - PAGERTRACE4("PLAYBACK %d page %d hash(%08x)\n", - PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, aData)); - if( pPager->state>=PAGER_EXCLUSIVE && (pPg==0 || pPg->needSync==0) ){ - rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->fd, aData, pPager->pageSize); - } - if( pPg ){ - makeClean(pPg); - } - } - if( pPg ){ - /* No page should ever be explicitly rolled back that is in use, except - ** for page 1 which is held in use in order to keep the lock on the - ** database active. However such a page may be rolled back as a result - ** of an internal error resulting in an automatic call to - ** sqlite3PagerRollback(). - */ - void *pData; - /* assert( pPg->nRef==0 || pPg->pgno==1 ); */ - pData = PGHDR_TO_DATA(pPg); - memcpy(pData, aData, pPager->pageSize); - if( pPager->xReiniter ){ - pPager->xReiniter(pPg, pPager->pageSize); - } -#ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); -#endif - /* If this was page 1, then restore the value of Pager.dbFileVers. - ** Do this before any decoding. */ - if( pgno==1 ){ - memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); - } - - /* Decode the page just read from disk */ - CODEC1(pPager, pData, pPg->pgno, 3); - } - return rc; -} - -/* -** Parameter zMaster is the name of a master journal file. A single journal -** file that referred to the master journal file has just been rolled back. -** This routine checks if it is possible to delete the master journal file, -** and does so if it is. -** -** The master journal file contains the names of all child journals. -** To tell if a master journal can be deleted, check to each of the -** children. If all children are either missing or do not refer to -** a different master journal, then this master journal can be deleted. -*/ -static int pager_delmaster(const char *zMaster){ - int rc; - int master_open = 0; - OsFile *master = 0; - char *zMasterJournal = 0; /* Contents of master journal file */ - i64 nMasterJournal; /* Size of master journal file */ - - /* Open the master journal file exclusively in case some other process - ** is running this routine also. Not that it makes too much difference. - */ - rc = sqlite3OsOpenReadOnly(zMaster, &master); - assert( rc!=SQLITE_OK || master ); - if( rc!=SQLITE_OK ) goto delmaster_out; - master_open = 1; - rc = sqlite3OsFileSize(master, &nMasterJournal); - if( rc!=SQLITE_OK ) goto delmaster_out; - - if( nMasterJournal>0 ){ - char *zJournal; - char *zMasterPtr = 0; - - /* Load the entire master journal file into space obtained from - ** sqliteMalloc() and pointed to by zMasterJournal. - */ - zMasterJournal = (char *)sqliteMalloc(nMasterJournal); - if( !zMasterJournal ){ - rc = SQLITE_NOMEM; - goto delmaster_out; - } - rc = sqlite3OsRead(master, zMasterJournal, nMasterJournal); - if( rc!=SQLITE_OK ) goto delmaster_out; - - zJournal = zMasterJournal; - while( (zJournal-zMasterJournal)state>=PAGER_EXCLUSIVE ){ - rc = sqlite3OsTruncate(pPager->fd, pPager->pageSize*(i64)nPage); - } - if( rc==SQLITE_OK ){ - pPager->dbSize = nPage; - pager_truncate_cache(pPager); - } - return rc; -} - -/* -** Set the sectorSize for the given pager. -** -** The sector size is the larger of the sector size reported -** by sqlite3OsSectorSize() and the pageSize. -*/ -static void setSectorSize(Pager *pPager){ - pPager->sectorSize = sqlite3OsSectorSize(pPager->fd); - if( pPager->sectorSizepageSize ){ - pPager->sectorSize = pPager->pageSize; - } -} - -/* -** Playback the journal and thus restore the database file to -** the state it was in before we started making changes. -** -** The journal file format is as follows: -** -** (1) 8 byte prefix. A copy of aJournalMagic[]. -** (2) 4 byte big-endian integer which is the number of valid page records -** in the journal. If this value is 0xffffffff, then compute the -** number of page records from the journal size. -** (3) 4 byte big-endian integer which is the initial value for the -** sanity checksum. -** (4) 4 byte integer which is the number of pages to truncate the -** database to during a rollback. -** (5) 4 byte integer which is the number of bytes in the master journal -** name. The value may be zero (indicate that there is no master -** journal.) -** (6) N bytes of the master journal name. The name will be nul-terminated -** and might be shorter than the value read from (5). If the first byte -** of the name is \000 then there is no master journal. The master -** journal name is stored in UTF-8. -** (7) Zero or more pages instances, each as follows: -** + 4 byte page number. -** + pPager->pageSize bytes of data. -** + 4 byte checksum -** -** When we speak of the journal header, we mean the first 6 items above. -** Each entry in the journal is an instance of the 7th item. -** -** Call the value from the second bullet "nRec". nRec is the number of -** valid page entries in the journal. In most cases, you can compute the -** value of nRec from the size of the journal file. But if a power -** failure occurred while the journal was being written, it could be the -** case that the size of the journal file had already been increased but -** the extra entries had not yet made it safely to disk. In such a case, -** the value of nRec computed from the file size would be too large. For -** that reason, we always use the nRec value in the header. -** -** If the nRec value is 0xffffffff it means that nRec should be computed -** from the file size. This value is used when the user selects the -** no-sync option for the journal. A power failure could lead to corruption -** in this case. But for things like temporary table (which will be -** deleted when the power is restored) we don't care. -** -** If the file opened as the journal file is not a well-formed -** journal file then all pages up to the first corrupted page are rolled -** back (or no pages if the journal header is corrupted). The journal file -** is then deleted and SQLITE_OK returned, just as if no corruption had -** been encountered. -** -** If an I/O or malloc() error occurs, the journal-file is not deleted -** and an error code is returned. -*/ -static int pager_playback(Pager *pPager, int isHot){ - i64 szJ; /* Size of the journal file in bytes */ - u32 nRec; /* Number of Records in the journal */ - int i; /* Loop counter */ - Pgno mxPg = 0; /* Size of the original file in pages */ - int rc; /* Result code of a subroutine */ - char *zMaster = 0; /* Name of master journal file if any */ - - /* Figure out how many records are in the journal. Abort early if - ** the journal is empty. - */ - assert( pPager->journalOpen ); - rc = sqlite3OsFileSize(pPager->jfd, &szJ); - if( rc!=SQLITE_OK || szJ==0 ){ - goto end_playback; - } - - /* Read the master journal name from the journal, if it is present. - ** If a master journal file name is specified, but the file is not - ** present on disk, then the journal is not hot and does not need to be - ** played back. - */ - rc = readMasterJournal(pPager->jfd, &zMaster); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK || (zMaster && !sqlite3OsFileExists(zMaster)) ){ - sqliteFree(zMaster); - zMaster = 0; - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - goto end_playback; - } - sqlite3OsSeek(pPager->jfd, 0); - pPager->journalOff = 0; - - /* This loop terminates either when the readJournalHdr() call returns - ** SQLITE_DONE or an IO error occurs. */ - while( 1 ){ - - /* Read the next journal header from the journal file. If there are - ** not enough bytes left in the journal file for a complete header, or - ** it is corrupted, then a process must of failed while writing it. - ** This indicates nothing more needs to be rolled back. - */ - rc = readJournalHdr(pPager, szJ, &nRec, &mxPg); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - } - goto end_playback; - } - - /* If nRec is 0xffffffff, then this journal was created by a process - ** working in no-sync mode. This means that the rest of the journal - ** file consists of pages, there are no more journal headers. Compute - ** the value of nRec based on this assumption. - */ - if( nRec==0xffffffff ){ - assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); - nRec = (szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager); - } - - /* If nRec is 0 and this rollback is of a transaction created by this - ** process and if this is the final header in the journal, then it means - ** that this part of the journal was being filled but has not yet been - ** synced to disk. Compute the number of pages based on the remaining - ** size of the file. - ** - ** The third term of the test was added to fix ticket #2565. - */ - if( nRec==0 && !isHot && - pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ - nRec = (szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager); - } - - /* If this is the first header read from the journal, truncate the - ** database file back to it's original size. - */ - if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ - rc = pager_truncate(pPager, mxPg); - if( rc!=SQLITE_OK ){ - goto end_playback; - } - } - - /* Copy original pages out of the journal and back into the database file. - */ - for(i=0; ijfd, 1); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - pPager->journalOff = szJ; - break; - }else{ - goto end_playback; - } - } - } - } - /*NOTREACHED*/ - assert( 0 ); - -end_playback: - if( rc==SQLITE_OK ){ - rc = pager_end_transaction(pPager); - } - if( zMaster ){ - /* If there was a master journal and this routine will return success, - ** see if it is possible to delete the master journal. - */ - if( rc==SQLITE_OK ){ - rc = pager_delmaster(zMaster); - } - sqliteFree(zMaster); - } - - /* The Pager.sectorSize variable may have been updated while rolling - ** back a journal created by a process with a different sector size - ** value. Reset it to the correct value for this process. - */ - setSectorSize(pPager); - return rc; -} - -/* -** Playback the statement journal. -** -** This is similar to playing back the transaction journal but with -** a few extra twists. -** -** (1) The number of pages in the database file at the start of -** the statement is stored in pPager->stmtSize, not in the -** journal file itself. -** -** (2) In addition to playing back the statement journal, also -** playback all pages of the transaction journal beginning -** at offset pPager->stmtJSize. -*/ -static int pager_stmt_playback(Pager *pPager){ - i64 szJ; /* Size of the full journal */ - i64 hdrOff; - int nRec; /* Number of Records */ - int i; /* Loop counter */ - int rc; - - szJ = pPager->journalOff; -#ifndef NDEBUG - { - i64 os_szJ; - rc = sqlite3OsFileSize(pPager->jfd, &os_szJ); - if( rc!=SQLITE_OK ) return rc; - assert( szJ==os_szJ ); - } -#endif - - /* Set hdrOff to be the offset just after the end of the last journal - ** page written before the first journal-header for this statement - ** transaction was written, or the end of the file if no journal - ** header was written. - */ - hdrOff = pPager->stmtHdrOff; - assert( pPager->fullSync || !hdrOff ); - if( !hdrOff ){ - hdrOff = szJ; - } - - /* Truncate the database back to its original size. - */ - rc = pager_truncate(pPager, pPager->stmtSize); - assert( pPager->state>=PAGER_SHARED ); - - /* Figure out how many records are in the statement journal. - */ - assert( pPager->stmtInUse && pPager->journalOpen ); - sqlite3OsSeek(pPager->stfd, 0); - nRec = pPager->stmtNRec; - - /* Copy original pages out of the statement journal and back into the - ** database file. Note that the statement journal omits checksums from - ** each record since power-failure recovery is not important to statement - ** journals. - */ - for(i=nRec-1; i>=0; i--){ - rc = pager_playback_one_page(pPager, pPager->stfd, 0); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK ) goto end_stmt_playback; - } - - /* Now roll some pages back from the transaction journal. Pager.stmtJSize - ** was the size of the journal file when this statement was started, so - ** everything after that needs to be rolled back, either into the - ** database, the memory cache, or both. - ** - ** If it is not zero, then Pager.stmtHdrOff is the offset to the start - ** of the first journal header written during this statement transaction. - */ - rc = sqlite3OsSeek(pPager->jfd, pPager->stmtJSize); - if( rc!=SQLITE_OK ){ - goto end_stmt_playback; - } - pPager->journalOff = pPager->stmtJSize; - pPager->cksumInit = pPager->stmtCksum; - while( pPager->journalOff < hdrOff ){ - rc = pager_playback_one_page(pPager, pPager->jfd, 1); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK ) goto end_stmt_playback; - } - - while( pPager->journalOff < szJ ){ - u32 nJRec; /* Number of Journal Records */ - u32 dummy; - rc = readJournalHdr(pPager, szJ, &nJRec, &dummy); - if( rc!=SQLITE_OK ){ - assert( rc!=SQLITE_DONE ); - goto end_stmt_playback; - } - if( nJRec==0 ){ - nJRec = (szJ - pPager->journalOff) / (pPager->pageSize+8); - } - for(i=nJRec-1; i>=0 && pPager->journalOff < szJ; i--){ - rc = pager_playback_one_page(pPager, pPager->jfd, 1); - assert( rc!=SQLITE_DONE ); - if( rc!=SQLITE_OK ) goto end_stmt_playback; - } - } - - pPager->journalOff = szJ; - -end_stmt_playback: - if( rc==SQLITE_OK) { - pPager->journalOff = szJ; - /* pager_reload_cache(pPager); */ - } - return rc; -} - -/* -** Change the maximum number of in-memory pages that are allowed. -*/ -void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ - if( mxPage>10 ){ - pPager->mxPage = mxPage; - }else{ - pPager->mxPage = 10; - } -} - -/* -** Adjust the robustness of the database to damage due to OS crashes -** or power failures by changing the number of syncs()s when writing -** the rollback journal. There are three levels: -** -** OFF sqlite3OsSync() is never called. This is the default -** for temporary and transient files. -** -** NORMAL The journal is synced once before writes begin on the -** database. This is normally adequate protection, but -** it is theoretically possible, though very unlikely, -** that an inopertune power failure could leave the journal -** in a state which would cause damage to the database -** when it is rolled back. -** -** FULL The journal is synced twice before writes begin on the -** database (with some additional information - the nRec field -** of the journal header - being written in between the two -** syncs). If we assume that writing a -** single disk sector is atomic, then this mode provides -** assurance that the journal will not be corrupted to the -** point of causing damage to the database during rollback. -** -** Numeric values associated with these states are OFF==1, NORMAL=2, -** and FULL=3. -*/ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -void sqlite3PagerSetSafetyLevel(Pager *pPager, int level, int full_fsync){ - pPager->noSync = level==1 || pPager->tempFile; - pPager->fullSync = level==3 && !pPager->tempFile; - pPager->full_fsync = full_fsync; - if( pPager->noSync ) pPager->needSync = 0; -} -#endif - -/* -** The following global variable is incremented whenever the library -** attempts to open a temporary file. This information is used for -** testing and analysis only. -*/ -#ifdef SQLITE_TEST -int sqlite3_opentemp_count = 0; -#endif - -/* -** Open a temporary file. -** -** Write the file descriptor into *fd. Return SQLITE_OK on success or some -** other error code if we fail. -** -** The OS will automatically delete the temporary file when it is -** closed. -*/ -static int sqlite3PagerOpentemp(OsFile **pFd){ - int cnt = 8; - int rc; - char zFile[SQLITE_TEMPNAME_SIZE]; - -#ifdef SQLITE_TEST - sqlite3_opentemp_count++; /* Used for testing and analysis only */ -#endif - do{ - cnt--; - sqlite3OsTempFileName(zFile); - rc = sqlite3OsOpenExclusive(zFile, pFd, 1); - assert( rc!=SQLITE_OK || *pFd ); - }while( cnt>0 && rc!=SQLITE_OK && rc!=SQLITE_NOMEM ); - return rc; -} - -/* -** Create a new page cache and put a pointer to the page cache in *ppPager. -** The file to be cached need not exist. The file is not locked until -** the first call to sqlite3PagerGet() and is only held open until the -** last page is released using sqlite3PagerUnref(). -** -** If zFilename is NULL then a randomly-named temporary file is created -** and used as the file to be cached. The file will be deleted -** automatically when it is closed. -** -** If zFilename is ":memory:" then all information is held in cache. -** It is never written to disk. This can be used to implement an -** in-memory database. -*/ -int sqlite3PagerOpen( - Pager **ppPager, /* Return the Pager structure here */ - const char *zFilename, /* Name of the database file to open */ - int nExtra, /* Extra bytes append to each in-memory page */ - int flags /* flags controlling this file */ -){ - Pager *pPager = 0; - char *zFullPathname = 0; - int nameLen; /* Compiler is wrong. This is always initialized before use */ - OsFile *fd = 0; - int rc = SQLITE_OK; - int i; - int tempFile = 0; - int memDb = 0; - int readOnly = 0; - int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; - int noReadlock = (flags & PAGER_NO_READLOCK)!=0; - char zTemp[SQLITE_TEMPNAME_SIZE]; -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to - ** malloc() must have already been made by this thread before it gets - ** to this point. This means the ThreadData must have been allocated already - ** so that ThreadData.nAlloc can be set. It would be nice to assert - ** that ThreadData.nAlloc is non-zero, but alas this breaks test cases - ** written to invoke the pager directly. - */ - ThreadData *pTsd = sqlite3ThreadData(); - assert( pTsd ); -#endif - - /* We used to test if malloc() had already failed before proceeding. - ** But the way this function is used in SQLite means that can never - ** happen. Furthermore, if the malloc-failed flag is already set, - ** either the call to sqliteStrDup() or sqliteMalloc() below will - ** fail shortly and SQLITE_NOMEM returned anyway. - */ - *ppPager = 0; - - /* Open the pager file and set zFullPathname to point at malloc()ed - ** memory containing the complete filename (i.e. including the directory). - */ - if( zFilename && zFilename[0] ){ -#ifndef SQLITE_OMIT_MEMORYDB - if( strcmp(zFilename,":memory:")==0 ){ - memDb = 1; - zFullPathname = sqliteStrDup(""); - }else -#endif - { - zFullPathname = sqlite3OsFullPathname(zFilename); - if( zFullPathname ){ - rc = sqlite3OsOpenReadWrite(zFullPathname, &fd, &readOnly); - assert( rc!=SQLITE_OK || fd ); - } - } - }else{ - rc = sqlite3PagerOpentemp(&fd); - sqlite3OsTempFileName(zTemp); - zFilename = zTemp; - zFullPathname = sqlite3OsFullPathname(zFilename); - if( rc==SQLITE_OK ){ - tempFile = 1; - } - } - - /* Allocate the Pager structure. As part of the same allocation, allocate - ** space for the full paths of the file, directory and journal - ** (Pager.zFilename, Pager.zDirectory and Pager.zJournal). - */ - if( zFullPathname ){ - nameLen = strlen(zFullPathname); - pPager = sqliteMalloc( sizeof(*pPager) + nameLen*3 + 30 ); - if( pPager && rc==SQLITE_OK ){ - pPager->pTmpSpace = (char *)sqliteMallocRaw(SQLITE_DEFAULT_PAGE_SIZE); - } - } - - - /* If an error occured in either of the blocks above, free the memory - ** pointed to by zFullPathname, free the Pager structure and close the - ** file. Since the pager is not allocated there is no need to set - ** any Pager.errMask variables. - */ - if( !pPager || !zFullPathname || !pPager->pTmpSpace || rc!=SQLITE_OK ){ - sqlite3OsClose(&fd); - sqliteFree(zFullPathname); - sqliteFree(pPager); - return ((rc==SQLITE_OK)?SQLITE_NOMEM:rc); - } - - PAGERTRACE3("OPEN %d %s\n", FILEHANDLEID(fd), zFullPathname); - IOTRACE(("OPEN %p %s\n", pPager, zFullPathname)) - pPager->zFilename = (char*)&pPager[1]; - pPager->zDirectory = &pPager->zFilename[nameLen+1]; - pPager->zJournal = &pPager->zDirectory[nameLen+1]; - memcpy(pPager->zFilename, zFullPathname, nameLen+1); - memcpy(pPager->zDirectory, zFullPathname, nameLen+1); - - for(i=nameLen; i>0 && pPager->zDirectory[i-1]!='/'; i--){} - if( i>0 ) pPager->zDirectory[i-1] = 0; - memcpy(pPager->zJournal, zFullPathname,nameLen); - sqliteFree(zFullPathname); - memcpy(&pPager->zJournal[nameLen], "-journal",sizeof("-journal")); - pPager->fd = fd; - /* pPager->journalOpen = 0; */ - pPager->useJournal = useJournal && !memDb; - pPager->noReadlock = noReadlock && readOnly; - /* pPager->stmtOpen = 0; */ - /* pPager->stmtInUse = 0; */ - /* pPager->nRef = 0; */ - pPager->dbSize = memDb-1; - pPager->pageSize = SQLITE_DEFAULT_PAGE_SIZE; - /* pPager->stmtSize = 0; */ - /* pPager->stmtJSize = 0; */ - /* pPager->nPage = 0; */ - pPager->mxPage = 100; - pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; - assert( PAGER_UNLOCK==0 ); - /* pPager->state = PAGER_UNLOCK; */ - /* pPager->errMask = 0; */ - pPager->tempFile = tempFile; - assert( tempFile==PAGER_LOCKINGMODE_NORMAL - || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); - assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); - pPager->exclusiveMode = tempFile; - pPager->memDb = memDb; - pPager->readOnly = readOnly; - /* pPager->needSync = 0; */ - pPager->noSync = pPager->tempFile || !useJournal; - pPager->fullSync = (pPager->noSync?0:1); - /* pPager->pFirst = 0; */ - /* pPager->pFirstSynced = 0; */ - /* pPager->pLast = 0; */ - pPager->nExtra = FORCE_ALIGNMENT(nExtra); - assert(fd||memDb); - if( !memDb ){ - setSectorSize(pPager); - } - /* pPager->pBusyHandler = 0; */ - /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ - *ppPager = pPager; -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - pPager->pNext = pTsd->pPager; - pTsd->pPager = pPager; -#endif - return SQLITE_OK; -} - -/* -** Set the busy handler function. -*/ -void sqlite3PagerSetBusyhandler(Pager *pPager, BusyHandler *pBusyHandler){ - pPager->pBusyHandler = pBusyHandler; -} - -/* -** Set the destructor for this pager. If not NULL, the destructor is called -** when the reference count on each page reaches zero. The destructor can -** be used to clean up information in the extra segment appended to each page. -** -** The destructor is not called as a result sqlite3PagerClose(). -** Destructors are only called by sqlite3PagerUnref(). -*/ -void sqlite3PagerSetDestructor(Pager *pPager, void (*xDesc)(DbPage*,int)){ - pPager->xDestructor = xDesc; -} - -/* -** Set the reinitializer for this pager. If not NULL, the reinitializer -** is called when the content of a page in cache is restored to its original -** value as a result of a rollback. The callback gives higher-level code -** an opportunity to restore the EXTRA section to agree with the restored -** page data. -*/ -void sqlite3PagerSetReiniter(Pager *pPager, void (*xReinit)(DbPage*,int)){ - pPager->xReiniter = xReinit; -} - -/* -** Set the page size. Return the new size. If the suggest new page -** size is inappropriate, then an alternative page size is selected -** and returned. -*/ -int sqlite3PagerSetPagesize(Pager *pPager, int pageSize){ - assert( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE ); - if( !pPager->memDb && pPager->nRef==0 ){ - pager_reset(pPager); - pPager->pageSize = pageSize; - pPager->pTmpSpace = sqlite3ReallocOrFree(pPager->pTmpSpace, pageSize); - } - return pPager->pageSize; -} - -/* -** Attempt to set the maximum database page count if mxPage is positive. -** Make no changes if mxPage is zero or negative. And never reduce the -** maximum page count below the current size of the database. -** -** Regardless of mxPage, return the current maximum page count. -*/ -int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ - if( mxPage>0 ){ - pPager->mxPgno = mxPage; - } - sqlite3PagerPagecount(pPager); - return pPager->mxPgno; -} - -/* -** The following set of routines are used to disable the simulated -** I/O error mechanism. These routines are used to avoid simulated -** errors in places where we do not care about errors. -** -** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops -** and generate no code. -*/ -#ifdef SQLITE_TEST -extern int sqlite3_io_error_pending; -extern int sqlite3_io_error_hit; -static int saved_cnt; -void disable_simulated_io_errors(void){ - saved_cnt = sqlite3_io_error_pending; - sqlite3_io_error_pending = -1; -} -void enable_simulated_io_errors(void){ - sqlite3_io_error_pending = saved_cnt; -} -#else -# define disable_simulated_io_errors() -# define enable_simulated_io_errors() -#endif - -/* -** Read the first N bytes from the beginning of the file into memory -** that pDest points to. -** -** No error checking is done. The rational for this is that this function -** may be called even if the file does not exist or contain a header. In -** these cases sqlite3OsRead() will return an error, to which the correct -** response is to zero the memory at pDest and continue. A real IO error -** will presumably recur and be picked up later (Todo: Think about this). -*/ -int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ - int rc = SQLITE_OK; - memset(pDest, 0, N); - if( MEMDB==0 ){ - disable_simulated_io_errors(); - sqlite3OsSeek(pPager->fd, 0); - enable_simulated_io_errors(); - IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) - rc = sqlite3OsRead(pPager->fd, pDest, N); - if( rc==SQLITE_IOERR_SHORT_READ ){ - rc = SQLITE_OK; - } - } - return rc; -} - -/* -** Return the total number of pages in the disk file associated with -** pPager. -** -** If the PENDING_BYTE lies on the page directly after the end of the -** file, then consider this page part of the file too. For example, if -** PENDING_BYTE is byte 4096 (the first byte of page 5) and the size of the -** file is 4096 bytes, 5 is returned instead of 4. -*/ -int sqlite3PagerPagecount(Pager *pPager){ - i64 n; - int rc; - assert( pPager!=0 ); - if( pPager->errCode ){ - return 0; - } - if( pPager->dbSize>=0 ){ - n = pPager->dbSize; - } else { - if( (rc = sqlite3OsFileSize(pPager->fd, &n))!=SQLITE_OK ){ - pager_error(pPager, rc); - return 0; - } - if( n>0 && npageSize ){ - n = 1; - }else{ - n /= pPager->pageSize; - } - if( pPager->state!=PAGER_UNLOCK ){ - pPager->dbSize = n; - } - } - if( n==(PENDING_BYTE/pPager->pageSize) ){ - n++; - } - if( n>pPager->mxPgno ){ - pPager->mxPgno = n; - } - return n; -} - - -#ifndef SQLITE_OMIT_MEMORYDB -/* -** Clear a PgHistory block -*/ -static void clearHistory(PgHistory *pHist){ - sqliteFree(pHist->pOrig); - sqliteFree(pHist->pStmt); - pHist->pOrig = 0; - pHist->pStmt = 0; -} -#else -#define clearHistory(x) -#endif - -/* -** Forward declaration -*/ -static int syncJournal(Pager*); - -/* -** Unlink pPg from it's hash chain. Also set the page number to 0 to indicate -** that the page is not part of any hash chain. This is required because the -** sqlite3PagerMovepage() routine can leave a page in the -** pNextFree/pPrevFree list that is not a part of any hash-chain. -*/ -static void unlinkHashChain(Pager *pPager, PgHdr *pPg){ - if( pPg->pgno==0 ){ - assert( pPg->pNextHash==0 && pPg->pPrevHash==0 ); - return; - } - if( pPg->pNextHash ){ - pPg->pNextHash->pPrevHash = pPg->pPrevHash; - } - if( pPg->pPrevHash ){ - assert( pPager->aHash[pPg->pgno & (pPager->nHash-1)]!=pPg ); - pPg->pPrevHash->pNextHash = pPg->pNextHash; - }else{ - int h = pPg->pgno & (pPager->nHash-1); - pPager->aHash[h] = pPg->pNextHash; - } - if( MEMDB ){ - clearHistory(PGHDR_TO_HIST(pPg, pPager)); - } - pPg->pgno = 0; - pPg->pNextHash = pPg->pPrevHash = 0; -} - -/* -** Unlink a page from the free list (the list of all pages where nRef==0) -** and from its hash collision chain. -*/ -static void unlinkPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - - /* Keep the pFirstSynced pointer pointing at the first synchronized page */ - if( pPg==pPager->pFirstSynced ){ - PgHdr *p = pPg->pNextFree; - while( p && p->needSync ){ p = p->pNextFree; } - pPager->pFirstSynced = p; - } - - /* Unlink from the freelist */ - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg->pNextFree; - }else{ - assert( pPager->pFirst==pPg ); - pPager->pFirst = pPg->pNextFree; - } - if( pPg->pNextFree ){ - pPg->pNextFree->pPrevFree = pPg->pPrevFree; - }else{ - assert( pPager->pLast==pPg ); - pPager->pLast = pPg->pPrevFree; - } - pPg->pNextFree = pPg->pPrevFree = 0; - - /* Unlink from the pgno hash table */ - unlinkHashChain(pPager, pPg); -} - -/* -** This routine is used to truncate the cache when a database -** is truncated. Drop from the cache all pages whose pgno is -** larger than pPager->dbSize and is unreferenced. -** -** Referenced pages larger than pPager->dbSize are zeroed. -** -** Actually, at the point this routine is called, it would be -** an error to have a referenced page. But rather than delete -** that page and guarantee a subsequent segfault, it seems better -** to zero it and hope that we error out sanely. -*/ -static void pager_truncate_cache(Pager *pPager){ - PgHdr *pPg; - PgHdr **ppPg; - int dbSize = pPager->dbSize; - - ppPg = &pPager->pAll; - while( (pPg = *ppPg)!=0 ){ - if( pPg->pgno<=dbSize ){ - ppPg = &pPg->pNextAll; - }else if( pPg->nRef>0 ){ - memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); - ppPg = &pPg->pNextAll; - }else{ - *ppPg = pPg->pNextAll; - IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - unlinkPage(pPg); - makeClean(pPg); - sqliteFree(pPg); - pPager->nPage--; - } - } -} - -/* -** Try to obtain a lock on a file. Invoke the busy callback if the lock -** is currently not available. Repeat until the busy callback returns -** false or until the lock succeeds. -** -** Return SQLITE_OK on success and an error code if we cannot obtain -** the lock. -*/ -static int pager_wait_on_lock(Pager *pPager, int locktype){ - int rc; - - /* The OS lock values must be the same as the Pager lock values */ - assert( PAGER_SHARED==SHARED_LOCK ); - assert( PAGER_RESERVED==RESERVED_LOCK ); - assert( PAGER_EXCLUSIVE==EXCLUSIVE_LOCK ); - - /* If the file is currently unlocked then the size must be unknown */ - assert( pPager->state>=PAGER_SHARED || pPager->dbSize<0 || MEMDB ); - - if( pPager->state>=locktype ){ - rc = SQLITE_OK; - }else{ - do { - rc = sqlite3OsLock(pPager->fd, locktype); - }while( rc==SQLITE_BUSY && sqlite3InvokeBusyHandler(pPager->pBusyHandler) ); - if( rc==SQLITE_OK ){ - pPager->state = locktype; - IOTRACE(("LOCK %p %d\n", pPager, locktype)) - } - } - return rc; -} - -/* -** Truncate the file to the number of pages specified. -*/ -int sqlite3PagerTruncate(Pager *pPager, Pgno nPage){ - int rc; - assert( pPager->state>=PAGER_SHARED || MEMDB ); - sqlite3PagerPagecount(pPager); - if( pPager->errCode ){ - rc = pPager->errCode; - return rc; - } - if( nPage>=(unsigned)pPager->dbSize ){ - return SQLITE_OK; - } - if( MEMDB ){ - pPager->dbSize = nPage; - pager_truncate_cache(pPager); - return SQLITE_OK; - } - rc = syncJournal(pPager); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Get an exclusive lock on the database before truncating. */ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - return rc; - } - - rc = pager_truncate(pPager, nPage); - return rc; -} - -/* -** Shutdown the page cache. Free all memory and close all files. -** -** If a transaction was in progress when this routine is called, that -** transaction is rolled back. All outstanding pages are invalidated -** and their memory is freed. Any attempt to use a page associated -** with this page cache after this function returns will likely -** result in a coredump. -** -** This function always succeeds. If a transaction is active an attempt -** is made to roll it back. If an error occurs during the rollback -** a hot journal may be left in the filesystem but no error is returned -** to the caller. -*/ -int sqlite3PagerClose(Pager *pPager){ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* A malloc() cannot fail in sqlite3ThreadData() as one or more calls to - ** malloc() must have already been made by this thread before it gets - ** to this point. This means the ThreadData must have been allocated already - ** so that ThreadData.nAlloc can be set. - */ - ThreadData *pTsd = sqlite3ThreadData(); - assert( pPager ); - assert( pTsd && pTsd->nAlloc ); -#endif - - disable_simulated_io_errors(); - pPager->errCode = 0; - pPager->exclusiveMode = 0; - pager_reset(pPager); - pagerUnlockAndRollback(pPager); - enable_simulated_io_errors(); - PAGERTRACE2("CLOSE %d\n", PAGERID(pPager)); - IOTRACE(("CLOSE %p\n", pPager)) - assert( pPager->errCode || (pPager->journalOpen==0 && pPager->stmtOpen==0) ); - if( pPager->journalOpen ){ - sqlite3OsClose(&pPager->jfd); - } - sqliteFree(pPager->aInJournal); - if( pPager->stmtOpen ){ - sqlite3OsClose(&pPager->stfd); - } - sqlite3OsClose(&pPager->fd); - /* Temp files are automatically deleted by the OS - ** if( pPager->tempFile ){ - ** sqlite3OsDelete(pPager->zFilename); - ** } - */ - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - /* Remove the pager from the linked list of pagers starting at - ** ThreadData.pPager if memory-management is enabled. - */ - if( pPager==pTsd->pPager ){ - pTsd->pPager = pPager->pNext; - }else{ - Pager *pTmp; - for(pTmp = pTsd->pPager; pTmp->pNext!=pPager; pTmp=pTmp->pNext){} - pTmp->pNext = pPager->pNext; - } -#endif - sqliteFree(pPager->aHash); - sqliteFree(pPager->pTmpSpace); - sqliteFree(pPager); - return SQLITE_OK; -} - -#if !defined(NDEBUG) || defined(SQLITE_TEST) -/* -** Return the page number for the given page data. -*/ -Pgno sqlite3PagerPagenumber(DbPage *p){ - return p->pgno; -} -#endif - -/* -** The page_ref() function increments the reference count for a page. -** If the page is currently on the freelist (the reference count is zero) then -** remove it from the freelist. -** -** For non-test systems, page_ref() is a macro that calls _page_ref() -** online of the reference count is zero. For test systems, page_ref() -** is a real function so that we can set breakpoints and trace it. -*/ -static void _page_ref(PgHdr *pPg){ - if( pPg->nRef==0 ){ - /* The page is currently on the freelist. Remove it. */ - if( pPg==pPg->pPager->pFirstSynced ){ - PgHdr *p = pPg->pNextFree; - while( p && p->needSync ){ p = p->pNextFree; } - pPg->pPager->pFirstSynced = p; - } - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg->pNextFree; - }else{ - pPg->pPager->pFirst = pPg->pNextFree; - } - if( pPg->pNextFree ){ - pPg->pNextFree->pPrevFree = pPg->pPrevFree; - }else{ - pPg->pPager->pLast = pPg->pPrevFree; - } - pPg->pPager->nRef++; - } - pPg->nRef++; - REFINFO(pPg); -} -#ifdef SQLITE_DEBUG - static void page_ref(PgHdr *pPg){ - if( pPg->nRef==0 ){ - _page_ref(pPg); - }else{ - pPg->nRef++; - REFINFO(pPg); - } - } -#else -# define page_ref(P) ((P)->nRef==0?_page_ref(P):(void)(P)->nRef++) -#endif - -/* -** Increment the reference count for a page. The input pointer is -** a reference to the page data. -*/ -int sqlite3PagerRef(DbPage *pPg){ - page_ref(pPg); - return SQLITE_OK; -} - -/* -** Sync the journal. In other words, make sure all the pages that have -** been written to the journal have actually reached the surface of the -** disk. It is not safe to modify the original database file until after -** the journal has been synced. If the original database is modified before -** the journal is synced and a power failure occurs, the unsynced journal -** data would be lost and we would be unable to completely rollback the -** database changes. Database corruption would occur. -** -** This routine also updates the nRec field in the header of the journal. -** (See comments on the pager_playback() routine for additional information.) -** If the sync mode is FULL, two syncs will occur. First the whole journal -** is synced, then the nRec field is updated, then a second sync occurs. -** -** For temporary databases, we do not care if we are able to rollback -** after a power failure, so sync occurs. -** -** This routine clears the needSync field of every page current held in -** memory. -*/ -static int syncJournal(Pager *pPager){ - PgHdr *pPg; - int rc = SQLITE_OK; - - /* Sync the journal before modifying the main database - ** (assuming there is a journal and it needs to be synced.) - */ - if( pPager->needSync ){ - if( !pPager->tempFile ){ - assert( pPager->journalOpen ); - /* assert( !pPager->noSync ); // noSync might be set if synchronous - ** was turned off after the transaction was started. Ticket #615 */ -#ifndef NDEBUG - { - /* Make sure the pPager->nRec counter we are keeping agrees - ** with the nRec computed from the size of the journal file. - */ - i64 jSz; - rc = sqlite3OsFileSize(pPager->jfd, &jSz); - if( rc!=0 ) return rc; - assert( pPager->journalOff==jSz ); - } -#endif - { - /* Write the nRec value into the journal file header. If in - ** full-synchronous mode, sync the journal first. This ensures that - ** all data has really hit the disk before nRec is updated to mark - ** it as a candidate for rollback. - */ - if( pPager->fullSync ){ - PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); - IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, 0); - if( rc!=0 ) return rc; - } - rc = sqlite3OsSeek(pPager->jfd, - pPager->journalHdr + sizeof(aJournalMagic)); - if( rc ) return rc; - IOTRACE(("JHDR %p %lld %d\n", pPager, - pPager->journalHdr + sizeof(aJournalMagic), 4)) - rc = write32bits(pPager->jfd, pPager->nRec); - if( rc ) return rc; - - rc = sqlite3OsSeek(pPager->jfd, pPager->journalOff); - if( rc ) return rc; - } - PAGERTRACE2("SYNC journal of %d\n", PAGERID(pPager)); - IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, pPager->full_fsync); - if( rc!=0 ) return rc; - pPager->journalStarted = 1; - } - pPager->needSync = 0; - - /* Erase the needSync flag from every page. - */ - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - pPg->needSync = 0; - } - pPager->pFirstSynced = pPager->pFirst; - } - -#ifndef NDEBUG - /* If the Pager.needSync flag is clear then the PgHdr.needSync - ** flag must also be clear for all pages. Verify that this - ** invariant is true. - */ - else{ - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - assert( pPg->needSync==0 ); - } - assert( pPager->pFirstSynced==pPager->pFirst ); - } -#endif - - return rc; -} - -/* -** Merge two lists of pages connected by pDirty and in pgno order. -** Do not both fixing the pPrevDirty pointers. -*/ -static PgHdr *merge_pagelist(PgHdr *pA, PgHdr *pB){ - PgHdr result, *pTail; - pTail = &result; - while( pA && pB ){ - if( pA->pgnopgno ){ - pTail->pDirty = pA; - pTail = pA; - pA = pA->pDirty; - }else{ - pTail->pDirty = pB; - pTail = pB; - pB = pB->pDirty; - } - } - if( pA ){ - pTail->pDirty = pA; - }else if( pB ){ - pTail->pDirty = pB; - }else{ - pTail->pDirty = 0; - } - return result.pDirty; -} - -/* -** Sort the list of pages in accending order by pgno. Pages are -** connected by pDirty pointers. The pPrevDirty pointers are -** corrupted by this sort. -*/ -#define N_SORT_BUCKET_ALLOC 25 -#define N_SORT_BUCKET 25 -#ifdef SQLITE_TEST - int sqlite3_pager_n_sort_bucket = 0; - #undef N_SORT_BUCKET - #define N_SORT_BUCKET \ - (sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC) -#endif -static PgHdr *sort_pagelist(PgHdr *pIn){ - PgHdr *a[N_SORT_BUCKET_ALLOC], *p; - int i; - memset(a, 0, sizeof(a)); - while( pIn ){ - p = pIn; - pIn = p->pDirty; - p->pDirty = 0; - for(i=0; ipPager; - - /* At this point there may be either a RESERVED or EXCLUSIVE lock on the - ** database file. If there is already an EXCLUSIVE lock, the following - ** calls to sqlite3OsLock() are no-ops. - ** - ** Moving the lock from RESERVED to EXCLUSIVE actually involves going - ** through an intermediate state PENDING. A PENDING lock prevents new - ** readers from attaching to the database but is unsufficient for us to - ** write. The idea of a PENDING lock is to prevent new readers from - ** coming in while we wait for existing readers to clear. - ** - ** While the pager is in the RESERVED state, the original database file - ** is unchanged and we can rollback without having to playback the - ** journal into the original database file. Once we transition to - ** EXCLUSIVE, it means the database file has been changed and any rollback - ** will require a journal playback. - */ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - return rc; - } - - pList = sort_pagelist(pList); - while( pList ){ - assert( pList->dirty ); - rc = sqlite3OsSeek(pPager->fd, (pList->pgno-1)*(i64)pPager->pageSize); - if( rc ) return rc; - /* If there are dirty pages in the page cache with page numbers greater - ** than Pager.dbSize, this means sqlite3PagerTruncate() was called to - ** make the file smaller (presumably by auto-vacuum code). Do not write - ** any such pages to the file. - */ - if( pList->pgno<=pPager->dbSize ){ - char *pData = CODEC2(pPager, PGHDR_TO_DATA(pList), pList->pgno, 6); - PAGERTRACE4("STORE %d page %d hash(%08x)\n", - PAGERID(pPager), pList->pgno, pager_pagehash(pList)); - IOTRACE(("PGOUT %p %d\n", pPager, pList->pgno)); - rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize); - PAGER_INCR(sqlite3_pager_writedb_count); - PAGER_INCR(pPager->nWrite); - if( pList->pgno==1 ){ - memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); - } - } -#ifndef NDEBUG - else{ - PAGERTRACE3("NOSTORE %d page %d\n", PAGERID(pPager), pList->pgno); - } -#endif - if( rc ) return rc; - pList->dirty = 0; -#ifdef SQLITE_CHECK_PAGES - pList->pageHash = pager_pagehash(pList); -#endif - pList = pList->pDirty; - } - return SQLITE_OK; -} - -/* -** Collect every dirty page into a dirty list and -** return a pointer to the head of that list. All pages are -** collected even if they are still in use. -*/ -static PgHdr *pager_get_all_dirty_pages(Pager *pPager){ - return pPager->pDirty; -} - -/* -** Return TRUE if there is a hot journal on the given pager. -** A hot journal is one that needs to be played back. -** -** If the current size of the database file is 0 but a journal file -** exists, that is probably an old journal left over from a prior -** database with the same name. Just delete the journal. -*/ -static int hasHotJournal(Pager *pPager){ - if( !pPager->useJournal ) return 0; - if( !sqlite3OsFileExists(pPager->zJournal) ){ - return 0; - } - if( sqlite3OsCheckReservedLock(pPager->fd) ){ - return 0; - } - if( sqlite3PagerPagecount(pPager)==0 ){ - sqlite3OsDelete(pPager->zJournal); - return 0; - }else{ - return 1; - } -} - -/* -** Try to find a page in the cache that can be recycled. -** -** This routine may return SQLITE_IOERR, SQLITE_FULL or SQLITE_OK. It -** does not set the pPager->errCode variable. -*/ -static int pager_recycle(Pager *pPager, int syncOk, PgHdr **ppPg){ - PgHdr *pPg; - *ppPg = 0; - - assert(!MEMDB); - - /* Find a page to recycle. Try to locate a page that does not - ** require us to do an fsync() on the journal. - */ - pPg = pPager->pFirstSynced; - - /* If we could not find a page that does not require an fsync() - ** on the journal file then fsync the journal file. This is a - ** very slow operation, so we work hard to avoid it. But sometimes - ** it can't be helped. - */ - if( pPg==0 && pPager->pFirst && syncOk && !MEMDB){ - int rc = syncJournal(pPager); - if( rc!=0 ){ - return rc; - } - if( pPager->fullSync ){ - /* If in full-sync mode, write a new journal header into the - ** journal file. This is done to avoid ever modifying a journal - ** header that is involved in the rollback of pages that have - ** already been written to the database (in case the header is - ** trashed when the nRec field is updated). - */ - pPager->nRec = 0; - assert( pPager->journalOff > 0 ); - assert( pPager->doNotSync==0 ); - rc = writeJournalHdr(pPager); - if( rc!=0 ){ - return rc; - } - } - pPg = pPager->pFirst; - } - if( pPg==0 ){ - return SQLITE_OK; - } - - assert( pPg->nRef==0 ); - - /* Write the page to the database file if it is dirty. - */ - if( pPg->dirty ){ - int rc; - assert( pPg->needSync==0 ); - makeClean(pPg); - pPg->dirty = 1; - pPg->pDirty = 0; - rc = pager_write_pagelist( pPg ); - if( rc!=SQLITE_OK ){ - return rc; - } - } - assert( pPg->dirty==0 ); - - /* If the page we are recycling is marked as alwaysRollback, then - ** set the global alwaysRollback flag, thus disabling the - ** sqlite3PagerDontRollback() optimization for the rest of this transaction. - ** It is necessary to do this because the page marked alwaysRollback - ** might be reloaded at a later time but at that point we won't remember - ** that is was marked alwaysRollback. This means that all pages must - ** be marked as alwaysRollback from here on out. - */ - if( pPg->alwaysRollback ){ - IOTRACE(("ALWAYS_ROLLBACK %p\n", pPager)) - pPager->alwaysRollback = 1; - } - - /* Unlink the old page from the free list and the hash table - */ - unlinkPage(pPg); - assert( pPg->pgno==0 ); - - *ppPg = pPg; - return SQLITE_OK; -} - -/* -** This function is called to free superfluous dynamically allocated memory -** held by the pager system. Memory in use by any SQLite pager allocated -** by the current thread may be sqliteFree()ed. -** -** nReq is the number of bytes of memory required. Once this much has -** been released, the function returns. A negative value for nReq means -** free as much memory as possible. The return value is the total number -** of bytes of memory released. -*/ -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) -int sqlite3PagerReleaseMemory(int nReq){ - const ThreadData *pTsdro = sqlite3ThreadDataReadOnly(); - int nReleased = 0; - int i; - - /* If the the global mutex is held, this subroutine becomes a - ** o-op; zero bytes of memory are freed. This is because - ** some of the code invoked by this function may also - ** try to obtain the mutex, resulting in a deadlock. - */ - if( sqlite3OsInMutex(0) ){ - return 0; - } - - /* Outermost loop runs for at most two iterations. First iteration we - ** try to find memory that can be released without calling fsync(). Second - ** iteration (which only runs if the first failed to free nReq bytes of - ** memory) is permitted to call fsync(). This is of course much more - ** expensive. - */ - for(i=0; i<=1; i++){ - - /* Loop through all the SQLite pagers opened by the current thread. */ - Pager *pPager = pTsdro->pPager; - for( ; pPager && (nReq<0 || nReleasedpNext){ - PgHdr *pPg; - int rc; - - if( MEMDB ){ - continue; - } - - /* For each pager, try to free as many pages as possible (without - ** calling fsync() if this is the first iteration of the outermost - ** loop). - */ - while( SQLITE_OK==(rc = pager_recycle(pPager, i, &pPg)) && pPg) { - /* We've found a page to free. At this point the page has been - ** removed from the page hash-table, free-list and synced-list - ** (pFirstSynced). It is still in the all pages (pAll) list. - ** Remove it from this list before freeing. - ** - ** Todo: Check the Pager.pStmt list to make sure this is Ok. It - ** probably is though. - */ - PgHdr *pTmp; - assert( pPg ); - if( pPg==pPager->pAll ){ - pPager->pAll = pPg->pNextAll; - }else{ - for( pTmp=pPager->pAll; pTmp->pNextAll!=pPg; pTmp=pTmp->pNextAll ){} - pTmp->pNextAll = pPg->pNextAll; - } - nReleased += sqliteAllocSize(pPg); - IOTRACE(("PGFREE %p %d\n", pPager, pPg->pgno)); - PAGER_INCR(sqlite3_pager_pgfree_count); - sqliteFree(pPg); - } - - if( rc!=SQLITE_OK ){ - /* An error occured whilst writing to the database file or - ** journal in pager_recycle(). The error is not returned to the - ** caller of this function. Instead, set the Pager.errCode variable. - ** The error will be returned to the user (or users, in the case - ** of a shared pager cache) of the pager for which the error occured. - */ - assert( (rc&0xff)==SQLITE_IOERR || rc==SQLITE_FULL ); - assert( pPager->state>=PAGER_RESERVED ); - pager_error(pPager, rc); - } - } - } - - return nReleased; -} -#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT && !SQLITE_OMIT_DISKIO */ - -/* -** Read the content of page pPg out of the database file. -*/ -static int readDbPage(Pager *pPager, PgHdr *pPg, Pgno pgno){ - int rc; - assert( MEMDB==0 ); - rc = sqlite3OsSeek(pPager->fd, (pgno-1)*(i64)pPager->pageSize); - if( rc==SQLITE_OK ){ - rc = sqlite3OsRead(pPager->fd, PGHDR_TO_DATA(pPg), - pPager->pageSize); - } - PAGER_INCR(sqlite3_pager_readdb_count); - PAGER_INCR(pPager->nRead); - IOTRACE(("PGIN %p %d\n", pPager, pgno)); - if( pgno==1 ){ - memcpy(&pPager->dbFileVers, &((u8*)PGHDR_TO_DATA(pPg))[24], - sizeof(pPager->dbFileVers)); - } - CODEC1(pPager, PGHDR_TO_DATA(pPg), pPg->pgno, 3); - PAGERTRACE4("FETCH %d page %d hash(%08x)\n", - PAGERID(pPager), pPg->pgno, pager_pagehash(pPg)); - return rc; -} - - -/* -** This function is called to obtain the shared lock required before -** data may be read from the pager cache. If the shared lock has already -** been obtained, this function is a no-op. -** -** Immediately after obtaining the shared lock (if required), this function -** checks for a hot-journal file. If one is found, an emergency rollback -** is performed immediately. -*/ -static int pagerSharedLock(Pager *pPager){ - int rc = SQLITE_OK; - - if( pPager->state==PAGER_UNLOCK ){ - if( !MEMDB ){ - assert( pPager->nRef==0 ); - if( !pPager->noReadlock ){ - rc = pager_wait_on_lock(pPager, SHARED_LOCK); - if( rc!=SQLITE_OK ){ - return pager_error(pPager, rc); - } - assert( pPager->state>=SHARED_LOCK ); - } - - /* If a journal file exists, and there is no RESERVED lock on the - ** database file, then it either needs to be played back or deleted. - */ - if( hasHotJournal(pPager) ){ - /* Get an EXCLUSIVE lock on the database file. At this point it is - ** important that a RESERVED lock is not obtained on the way to the - ** EXCLUSIVE lock. If it were, another process might open the - ** database file, detect the RESERVED lock, and conclude that the - ** database is safe to read while this process is still rolling it - ** back. - ** - ** Because the intermediate RESERVED lock is not requested, the - ** second process will get to this point in the code and fail to - ** obtain it's own EXCLUSIVE lock on the database file. - */ - rc = sqlite3OsLock(pPager->fd, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - pager_unlock(pPager); - return pager_error(pPager, rc); - } - pPager->state = PAGER_EXCLUSIVE; - - /* Open the journal for reading only. Return SQLITE_BUSY if - ** we are unable to open the journal file. - ** - ** The journal file does not need to be locked itself. The - ** journal file is never open unless the main database file holds - ** a write lock, so there is never any chance of two or more - ** processes opening the journal at the same time. - ** - ** Open the journal for read/write access. This is because in - ** exclusive-access mode the file descriptor will be kept open and - ** possibly used for a transaction later on. On some systems, the - ** OsTruncate() call used in exclusive-access mode also requires - ** a read/write file handle. - */ - rc = SQLITE_BUSY; - if( sqlite3OsFileExists(pPager->zJournal) ){ - int ro; - assert( !pPager->tempFile ); - rc = sqlite3OsOpenReadWrite(pPager->zJournal, &pPager->jfd, &ro); - assert( rc!=SQLITE_OK || pPager->jfd ); - if( ro ){ - rc = SQLITE_BUSY; - sqlite3OsClose(&pPager->jfd); - } - } - if( rc!=SQLITE_OK ){ - pager_unlock(pPager); - return SQLITE_BUSY; - } - pPager->journalOpen = 1; - pPager->journalStarted = 0; - pPager->journalOff = 0; - pPager->setMaster = 0; - pPager->journalHdr = 0; - - /* Playback and delete the journal. Drop the database write - ** lock and reacquire the read lock. - */ - rc = pager_playback(pPager, 1); - if( rc!=SQLITE_OK ){ - return pager_error(pPager, rc); - } - assert(pPager->state==PAGER_SHARED || - (pPager->exclusiveMode && pPager->state>PAGER_SHARED) - ); - } - - if( pPager->pAll ){ - /* The shared-lock has just been acquired on the database file - ** and there are already pages in the cache (from a previous - ** read or write transaction). Check to see if the database - ** has been modified. If the database has changed, flush the - ** cache. - ** - ** Database changes is detected by looking at 15 bytes beginning - ** at offset 24 into the file. The first 4 of these 16 bytes are - ** a 32-bit counter that is incremented with each change. The - ** other bytes change randomly with each file change when - ** a codec is in use. - ** - ** There is a vanishingly small chance that a change will not be - ** detected. The chance of an undetected change is so small that - ** it can be neglected. - */ - char dbFileVers[sizeof(pPager->dbFileVers)]; - sqlite3PagerPagecount(pPager); - - if( pPager->errCode ){ - return pPager->errCode; - } - - if( pPager->dbSize>0 ){ - IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); - rc = sqlite3OsSeek(pPager->fd, 24); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers)); - if( rc!=SQLITE_OK ){ - return rc; - } - }else{ - memset(dbFileVers, 0, sizeof(dbFileVers)); - } - - if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ - pager_reset(pPager); - } - } - } - assert( pPager->exclusiveMode || pPager->state<=PAGER_SHARED ); - if( pPager->state==PAGER_UNLOCK ){ - pPager->state = PAGER_SHARED; - } - } - - return rc; -} - -/* -** Allocate a PgHdr object. Either create a new one or reuse -** an existing one that is not otherwise in use. -** -** A new PgHdr structure is created if any of the following are -** true: -** -** (1) We have not exceeded our maximum allocated cache size -** as set by the "PRAGMA cache_size" command. -** -** (2) There are no unused PgHdr objects available at this time. -** -** (3) This is an in-memory database. -** -** (4) There are no PgHdr objects that do not require a journal -** file sync and a sync of the journal file is currently -** prohibited. -** -** Otherwise, reuse an existing PgHdr. In other words, reuse an -** existing PgHdr if all of the following are true: -** -** (1) We have reached or exceeded the maximum cache size -** allowed by "PRAGMA cache_size". -** -** (2) There is a PgHdr available with PgHdr->nRef==0 -** -** (3) We are not in an in-memory database -** -** (4) Either there is an available PgHdr that does not need -** to be synced to disk or else disk syncing is currently -** allowed. -*/ -static int pagerAllocatePage(Pager *pPager, PgHdr **ppPg){ - int rc = SQLITE_OK; - PgHdr *pPg; - - /* Create a new PgHdr if any of the four conditions defined - ** above is met: */ - if( pPager->nPagemxPage - || pPager->pFirst==0 - || MEMDB - || (pPager->pFirstSynced==0 && pPager->doNotSync) - ){ - if( pPager->nPage>=pPager->nHash ){ - pager_resize_hash_table(pPager, - pPager->nHash<256 ? 256 : pPager->nHash*2); - if( pPager->nHash==0 ){ - rc = SQLITE_NOMEM; - goto pager_allocate_out; - } - } - pPg = sqliteMallocRaw( sizeof(*pPg) + pPager->pageSize - + sizeof(u32) + pPager->nExtra - + MEMDB*sizeof(PgHistory) ); - if( pPg==0 ){ - rc = SQLITE_NOMEM; - goto pager_allocate_out; - } - memset(pPg, 0, sizeof(*pPg)); - if( MEMDB ){ - memset(PGHDR_TO_HIST(pPg, pPager), 0, sizeof(PgHistory)); - } - pPg->pPager = pPager; - pPg->pNextAll = pPager->pAll; - pPager->pAll = pPg; - pPager->nPage++; - }else{ - /* Recycle an existing page with a zero ref-count. */ - rc = pager_recycle(pPager, 1, &pPg); - if( rc==SQLITE_BUSY ){ - rc = SQLITE_IOERR_BLOCKED; - } - if( rc!=SQLITE_OK ){ - goto pager_allocate_out; - } - assert( pPager->state>=SHARED_LOCK ); - assert(pPg); - } - *ppPg = pPg; - -pager_allocate_out: - return rc; -} - -/* -** Make sure we have the content for a page. If the page was -** previously acquired with noContent==1, then the content was -** just initialized to zeros instead of being read from disk. -** But now we need the real data off of disk. So make sure we -** have it. Read it in if we do not have it already. -*/ -static int pager_get_content(PgHdr *pPg){ - if( pPg->needRead ){ - int rc = readDbPage(pPg->pPager, pPg, pPg->pgno); - if( rc==SQLITE_OK ){ - pPg->needRead = 0; - }else{ - return rc; - } - } - return SQLITE_OK; -} - -/* -** Acquire a page. -** -** A read lock on the disk file is obtained when the first page is acquired. -** This read lock is dropped when the last page is released. -** -** This routine works for any page number greater than 0. If the database -** file is smaller than the requested page, then no actual disk -** read occurs and the memory image of the page is initialized to -** all zeros. The extra data appended to a page is always initialized -** to zeros the first time a page is loaded into memory. -** -** The acquisition might fail for several reasons. In all cases, -** an appropriate error code is returned and *ppPage is set to NULL. -** -** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt -** to find a page in the in-memory cache first. If the page is not already -** in memory, this routine goes to disk to read it in whereas Lookup() -** just returns 0. This routine acquires a read-lock the first time it -** has to go to disk, and could also playback an old journal if necessary. -** Since Lookup() never goes to disk, it never has to deal with locks -** or journal files. -** -** If noContent is false, the page contents are actually read from disk. -** If noContent is true, it means that we do not care about the contents -** of the page at this time, so do not do a disk read. Just fill in the -** page content with zeros. But mark the fact that we have not read the -** content by setting the PgHdr.needRead flag. Later on, if -** sqlite3PagerWrite() is called on this page or if this routine is -** called again with noContent==0, that means that the content is needed -** and the disk read should occur at that point. -*/ -int sqlite3PagerAcquire( - Pager *pPager, /* The pager open on the database file */ - Pgno pgno, /* Page number to fetch */ - DbPage **ppPage, /* Write a pointer to the page here */ - int noContent /* Do not bother reading content from disk if true */ -){ - PgHdr *pPg; - int rc; - - assert( pPager->state==PAGER_UNLOCK || pPager->nRef>0 || pgno==1 ); - - /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page - ** number greater than this, or zero, is requested. - */ - if( pgno>PAGER_MAX_PGNO || pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ - return SQLITE_CORRUPT_BKPT; - } - - /* Make sure we have not hit any critical errors. - */ - assert( pPager!=0 ); - *ppPage = 0; - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ - return pPager->errCode; - } - - /* If this is the first page accessed, then get a SHARED lock - ** on the database file. pagerSharedLock() is a no-op if - ** a database lock is already held. - */ - rc = pagerSharedLock(pPager); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( pPager->state!=PAGER_UNLOCK ); - - pPg = pager_lookup(pPager, pgno); - if( pPg==0 ){ - /* The requested page is not in the page cache. */ - int nMax; - int h; - PAGER_INCR(pPager->nMiss); - rc = pagerAllocatePage(pPager, &pPg); - if( rc!=SQLITE_OK ){ - return rc; - } - - pPg->pgno = pgno; - assert( !MEMDB || pgno>pPager->stmtSize ); - if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ - sqlite3CheckMemory(pPager->aInJournal, pgno/8); - assert( pPager->journalOpen ); - pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; - pPg->needSync = 0; - }else{ - pPg->inJournal = 0; - pPg->needSync = 0; - } - - makeClean(pPg); - pPg->nRef = 1; - REFINFO(pPg); - - pPager->nRef++; - if( pPager->nExtra>0 ){ - memset(PGHDR_TO_EXTRA(pPg, pPager), 0, pPager->nExtra); - } - nMax = sqlite3PagerPagecount(pPager); - if( pPager->errCode ){ - sqlite3PagerUnref(pPg); - rc = pPager->errCode; - return rc; - } - - /* Populate the page with data, either by reading from the database - ** file, or by setting the entire page to zero. - */ - if( nMax<(int)pgno || MEMDB || (noContent && !pPager->alwaysRollback) ){ - if( pgno>pPager->mxPgno ){ - sqlite3PagerUnref(pPg); - return SQLITE_FULL; - } - memset(PGHDR_TO_DATA(pPg), 0, pPager->pageSize); - pPg->needRead = noContent && !pPager->alwaysRollback; - IOTRACE(("ZERO %p %d\n", pPager, pgno)); - }else{ - rc = readDbPage(pPager, pPg, pgno); - if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ - pPg->pgno = 0; - sqlite3PagerUnref(pPg); - return rc; - } - pPg->needRead = 0; - } - - /* Link the page into the page hash table */ - h = pgno & (pPager->nHash-1); - assert( pgno!=0 ); - pPg->pNextHash = pPager->aHash[h]; - pPager->aHash[h] = pPg; - if( pPg->pNextHash ){ - assert( pPg->pNextHash->pPrevHash==0 ); - pPg->pNextHash->pPrevHash = pPg; - } - -#ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); -#endif - }else{ - /* The requested page is in the page cache. */ - assert(pPager->nRef>0 || pgno==1); - PAGER_INCR(pPager->nHit); - if( !noContent ){ - rc = pager_get_content(pPg); - if( rc ){ - return rc; - } - } - page_ref(pPg); - } - *ppPage = pPg; - return SQLITE_OK; -} - -/* -** Acquire a page if it is already in the in-memory cache. Do -** not read the page from disk. Return a pointer to the page, -** or 0 if the page is not in cache. -** -** See also sqlite3PagerGet(). The difference between this routine -** and sqlite3PagerGet() is that _get() will go to the disk and read -** in the page if the page is not already in cache. This routine -** returns NULL if the page is not in cache or if a disk I/O error -** has ever happened. -*/ -DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ - PgHdr *pPg; - - assert( pPager!=0 ); - assert( pgno!=0 ); - - if( pPager->state==PAGER_UNLOCK ){ - assert( !pPager->pAll || pPager->exclusiveMode ); - return 0; - } - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ - return 0; - } - pPg = pager_lookup(pPager, pgno); - if( pPg==0 ) return 0; - page_ref(pPg); - return pPg; -} - -/* -** Release a page. -** -** If the number of references to the page drop to zero, then the -** page is added to the LRU list. When all references to all pages -** are released, a rollback occurs and the lock on the database is -** removed. -*/ -int sqlite3PagerUnref(DbPage *pPg){ - - /* Decrement the reference count for this page - */ - assert( pPg->nRef>0 ); - pPg->nRef--; - REFINFO(pPg); - - CHECK_PAGE(pPg); - - /* When the number of references to a page reach 0, call the - ** destructor and add the page to the freelist. - */ - if( pPg->nRef==0 ){ - Pager *pPager; - pPager = pPg->pPager; - pPg->pNextFree = 0; - pPg->pPrevFree = pPager->pLast; - pPager->pLast = pPg; - if( pPg->pPrevFree ){ - pPg->pPrevFree->pNextFree = pPg; - }else{ - pPager->pFirst = pPg; - } - if( pPg->needSync==0 && pPager->pFirstSynced==0 ){ - pPager->pFirstSynced = pPg; - } - if( pPager->xDestructor ){ - pPager->xDestructor(pPg, pPager->pageSize); - } - - /* When all pages reach the freelist, drop the read lock from - ** the database file. - */ - pPager->nRef--; - assert( pPager->nRef>=0 ); - if( pPager->nRef==0 && (!pPager->exclusiveMode || pPager->journalOff>0) ){ - pagerUnlockAndRollback(pPager); - } - } - return SQLITE_OK; -} - -/* -** Create a journal file for pPager. There should already be a RESERVED -** or EXCLUSIVE lock on the database file when this routine is called. -** -** Return SQLITE_OK if everything. Return an error code and release the -** write lock if anything goes wrong. -*/ -static int pager_open_journal(Pager *pPager){ - int rc; - assert( !MEMDB ); - assert( pPager->state>=PAGER_RESERVED ); - assert( pPager->journalOpen==0 ); - assert( pPager->useJournal ); - assert( pPager->aInJournal==0 ); - sqlite3PagerPagecount(pPager); - pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( pPager->aInJournal==0 ){ - rc = SQLITE_NOMEM; - goto failed_to_open_journal; - } - rc = sqlite3OsOpenExclusive(pPager->zJournal, &pPager->jfd, - pPager->tempFile); - assert( rc!=SQLITE_OK || pPager->jfd ); - pPager->journalOff = 0; - pPager->setMaster = 0; - pPager->journalHdr = 0; - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ){ - sqlite3OsDelete(pPager->zJournal); - } - goto failed_to_open_journal; - } - sqlite3OsSetFullSync(pPager->jfd, pPager->full_fsync); - sqlite3OsSetFullSync(pPager->fd, pPager->full_fsync); - sqlite3OsOpenDirectory(pPager->jfd, pPager->zDirectory); - pPager->journalOpen = 1; - pPager->journalStarted = 0; - pPager->needSync = 0; - pPager->alwaysRollback = 0; - pPager->nRec = 0; - if( pPager->errCode ){ - rc = pPager->errCode; - goto failed_to_open_journal; - } - pPager->origDbSize = pPager->dbSize; - - rc = writeJournalHdr(pPager); - - if( pPager->stmtAutoopen && rc==SQLITE_OK ){ - rc = sqlite3PagerStmtBegin(pPager); - } - if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - rc = pager_end_transaction(pPager); - if( rc==SQLITE_OK ){ - rc = SQLITE_FULL; - } - } - return rc; - -failed_to_open_journal: - sqliteFree(pPager->aInJournal); - pPager->aInJournal = 0; - return rc; -} - -/* -** Acquire a write-lock on the database. The lock is removed when -** the any of the following happen: -** -** * sqlite3PagerCommitPhaseTwo() is called. -** * sqlite3PagerRollback() is called. -** * sqlite3PagerClose() is called. -** * sqlite3PagerUnref() is called to on every outstanding page. -** -** The first parameter to this routine is a pointer to any open page of the -** database file. Nothing changes about the page - it is used merely to -** acquire a pointer to the Pager structure and as proof that there is -** already a read-lock on the database. -** -** The second parameter indicates how much space in bytes to reserve for a -** master journal file-name at the start of the journal when it is created. -** -** A journal file is opened if this is not a temporary file. For temporary -** files, the opening of the journal file is deferred until there is an -** actual need to write to the journal. -** -** If the database is already reserved for writing, this routine is a no-op. -** -** If exFlag is true, go ahead and get an EXCLUSIVE lock on the file -** immediately instead of waiting until we try to flush the cache. The -** exFlag is ignored if a transaction is already active. -*/ -int sqlite3PagerBegin(DbPage *pPg, int exFlag){ - Pager *pPager = pPg->pPager; - int rc = SQLITE_OK; - assert( pPg->nRef>0 ); - assert( pPager->state!=PAGER_UNLOCK ); - if( pPager->state==PAGER_SHARED ){ - assert( pPager->aInJournal==0 ); - if( MEMDB ){ - pPager->state = PAGER_EXCLUSIVE; - pPager->origDbSize = pPager->dbSize; - }else{ - rc = sqlite3OsLock(pPager->fd, RESERVED_LOCK); - if( rc==SQLITE_OK ){ - pPager->state = PAGER_RESERVED; - if( exFlag ){ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - } - } - if( rc!=SQLITE_OK ){ - return rc; - } - pPager->dirtyCache = 0; - PAGERTRACE2("TRANSACTION %d\n", PAGERID(pPager)); - if( pPager->useJournal && !pPager->tempFile ){ - rc = pager_open_journal(pPager); - } - } - }else if( pPager->journalOpen && pPager->journalOff==0 ){ - /* This happens when the pager was in exclusive-access mode last - ** time a (read or write) transaction was successfully concluded - ** by this connection. Instead of deleting the journal file it was - ** kept open and truncated to 0 bytes. - */ - assert( pPager->nRec==0 ); - assert( pPager->origDbSize==0 ); - assert( pPager->aInJournal==0 ); - sqlite3PagerPagecount(pPager); - pPager->aInJournal = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( !pPager->aInJournal ){ - rc = SQLITE_NOMEM; - }else{ - pPager->origDbSize = pPager->dbSize; - rc = writeJournalHdr(pPager); - } - } - assert( !pPager->journalOpen || pPager->journalOff>0 || rc!=SQLITE_OK ); - return rc; -} - -/* -** Make a page dirty. Set its dirty flag and add it to the dirty -** page list. -*/ -static void makeDirty(PgHdr *pPg){ - if( pPg->dirty==0 ){ - Pager *pPager = pPg->pPager; - pPg->dirty = 1; - pPg->pDirty = pPager->pDirty; - if( pPager->pDirty ){ - pPager->pDirty->pPrevDirty = pPg; - } - pPg->pPrevDirty = 0; - pPager->pDirty = pPg; - } -} - -/* -** Make a page clean. Clear its dirty bit and remove it from the -** dirty page list. -*/ -static void makeClean(PgHdr *pPg){ - if( pPg->dirty ){ - pPg->dirty = 0; - if( pPg->pDirty ){ - pPg->pDirty->pPrevDirty = pPg->pPrevDirty; - } - if( pPg->pPrevDirty ){ - pPg->pPrevDirty->pDirty = pPg->pDirty; - }else{ - pPg->pPager->pDirty = pPg->pDirty; - } - } -} - - -/* -** Mark a data page as writeable. The page is written into the journal -** if it is not there already. This routine must be called before making -** changes to a page. -** -** The first time this routine is called, the pager creates a new -** journal and acquires a RESERVED lock on the database. If the RESERVED -** lock could not be acquired, this routine returns SQLITE_BUSY. The -** calling routine must check for that return value and be careful not to -** change any page data until this routine returns SQLITE_OK. -** -** If the journal file could not be written because the disk is full, -** then this routine returns SQLITE_FULL and does an immediate rollback. -** All subsequent write attempts also return SQLITE_FULL until there -** is a call to sqlite3PagerCommit() or sqlite3PagerRollback() to -** reset. -*/ -static int pager_write(PgHdr *pPg){ - void *pData = PGHDR_TO_DATA(pPg); - Pager *pPager = pPg->pPager; - int rc = SQLITE_OK; - - /* Check for errors - */ - if( pPager->errCode ){ - return pPager->errCode; - } - if( pPager->readOnly ){ - return SQLITE_PERM; - } - - assert( !pPager->setMaster ); - - CHECK_PAGE(pPg); - - /* If this page was previously acquired with noContent==1, that means - ** we didn't really read in the content of the page. This can happen - ** (for example) when the page is being moved to the freelist. But - ** now we are (perhaps) moving the page off of the freelist for - ** reuse and we need to know its original content so that content - ** can be stored in the rollback journal. So do the read at this - ** time. - */ - rc = pager_get_content(pPg); - if( rc ){ - return rc; - } - - /* Mark the page as dirty. If the page has already been written - ** to the journal then we can return right away. - */ - makeDirty(pPg); - if( pPg->inJournal && (pageInStatement(pPg) || pPager->stmtInUse==0) ){ - pPager->dirtyCache = 1; - }else{ - - /* If we get this far, it means that the page needs to be - ** written to the transaction journal or the ckeckpoint journal - ** or both. - ** - ** First check to see that the transaction journal exists and - ** create it if it does not. - */ - assert( pPager->state!=PAGER_UNLOCK ); - rc = sqlite3PagerBegin(pPg, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( pPager->state>=PAGER_RESERVED ); - if( !pPager->journalOpen && pPager->useJournal ){ - rc = pager_open_journal(pPager); - if( rc!=SQLITE_OK ) return rc; - } - assert( pPager->journalOpen || !pPager->useJournal ); - pPager->dirtyCache = 1; - - /* The transaction journal now exists and we have a RESERVED or an - ** EXCLUSIVE lock on the main database file. Write the current page to - ** the transaction journal if it is not there already. - */ - if( !pPg->inJournal && (pPager->useJournal || MEMDB) ){ - if( (int)pPg->pgno <= pPager->origDbSize ){ - int szPg; - if( MEMDB ){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - PAGERTRACE3("JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - assert( pHist->pOrig==0 ); - pHist->pOrig = sqliteMallocRaw( pPager->pageSize ); - if( pHist->pOrig ){ - memcpy(pHist->pOrig, PGHDR_TO_DATA(pPg), pPager->pageSize); - } - }else{ - u32 cksum, saved; - char *pData2, *pEnd; - /* We should never write to the journal file the page that - ** contains the database locks. The following assert verifies - ** that we do not. */ - assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); - pData2 = CODEC2(pPager, pData, pPg->pgno, 7); - cksum = pager_cksum(pPager, (u8*)pData2); - pEnd = pData2 + pPager->pageSize; - pData2 -= 4; - saved = *(u32*)pEnd; - put32bits(pEnd, cksum); - szPg = pPager->pageSize+8; - put32bits(pData2, pPg->pgno); - rc = sqlite3OsWrite(pPager->jfd, pData2, szPg); - IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, - pPager->journalOff, szPg)); - PAGER_INCR(sqlite3_pager_writej_count); - pPager->journalOff += szPg; - PAGERTRACE5("JOURNAL %d page %d needSync=%d hash(%08x)\n", - PAGERID(pPager), pPg->pgno, pPg->needSync, pager_pagehash(pPg)); - *(u32*)pEnd = saved; - - /* An error has occured writing to the journal file. The - ** transaction will be rolled back by the layer above. - */ - if( rc!=SQLITE_OK ){ - return rc; - } - - pPager->nRec++; - assert( pPager->aInJournal!=0 ); - pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); - pPg->needSync = !pPager->noSync; - if( pPager->stmtInUse ){ - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } - } - }else{ - pPg->needSync = !pPager->journalStarted && !pPager->noSync; - PAGERTRACE4("APPEND %d page %d needSync=%d\n", - PAGERID(pPager), pPg->pgno, pPg->needSync); - } - if( pPg->needSync ){ - pPager->needSync = 1; - } - pPg->inJournal = 1; - } - - /* If the statement journal is open and the page is not in it, - ** then write the current page to the statement journal. Note that - ** the statement journal format differs from the standard journal format - ** in that it omits the checksums and the header. - */ - if( pPager->stmtInUse - && !pageInStatement(pPg) - && (int)pPg->pgno<=pPager->stmtSize - ){ - assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); - if( MEMDB ){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( pHist->pStmt==0 ); - pHist->pStmt = sqliteMallocRaw( pPager->pageSize ); - if( pHist->pStmt ){ - memcpy(pHist->pStmt, PGHDR_TO_DATA(pPg), pPager->pageSize); - } - PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - page_add_to_stmt_list(pPg); - }else{ - char *pData2 = CODEC2(pPager, pData, pPg->pgno, 7)-4; - put32bits(pData2, pPg->pgno); - rc = sqlite3OsWrite(pPager->stfd, pData2, pPager->pageSize+4); - PAGERTRACE3("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno); - if( rc!=SQLITE_OK ){ - return rc; - } - pPager->stmtNRec++; - assert( pPager->aInStmt!=0 ); - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } - } - } - - /* Update the database size and return. - */ - assert( pPager->state>=PAGER_SHARED ); - if( pPager->dbSize<(int)pPg->pgno ){ - pPager->dbSize = pPg->pgno; - if( !MEMDB && pPager->dbSize==PENDING_BYTE/pPager->pageSize ){ - pPager->dbSize++; - } - } - return rc; -} - -/* -** This function is used to mark a data-page as writable. It uses -** pager_write() to open a journal file (if it is not already open) -** and write the page *pData to the journal. -** -** The difference between this function and pager_write() is that this -** function also deals with the special case where 2 or more pages -** fit on a single disk sector. In this case all co-resident pages -** must have been written to the journal file before returning. -*/ -int sqlite3PagerWrite(DbPage *pDbPage){ - int rc = SQLITE_OK; - - PgHdr *pPg = pDbPage; - Pager *pPager = pPg->pPager; - Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); - - if( !MEMDB && nPagePerSector>1 ){ - Pgno nPageCount; /* Total number of pages in database file */ - Pgno pg1; /* First page of the sector pPg is located on. */ - int nPage; /* Number of pages starting at pg1 to journal */ - int ii; - - /* Set the doNotSync flag to 1. This is because we cannot allow a journal - ** header to be written between the pages journaled by this function. - */ - assert( pPager->doNotSync==0 ); - pPager->doNotSync = 1; - - /* This trick assumes that both the page-size and sector-size are - ** an integer power of 2. It sets variable pg1 to the identifier - ** of the first page of the sector pPg is located on. - */ - pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; - - nPageCount = sqlite3PagerPagecount(pPager); - if( pPg->pgno>nPageCount ){ - nPage = (pPg->pgno - pg1)+1; - }else if( (pg1+nPagePerSector-1)>nPageCount ){ - nPage = nPageCount+1-pg1; - }else{ - nPage = nPagePerSector; - } - assert(nPage>0); - assert(pg1<=pPg->pgno); - assert((pg1+nPage)>pPg->pgno); - - for(ii=0; iiaInJournal || pg==pPg->pgno || - pg>pPager->origDbSize || !(pPager->aInJournal[pg/8]&(1<<(pg&7))) - ) { - if( pg!=PAGER_MJ_PGNO(pPager) ){ - PgHdr *pPage; - rc = sqlite3PagerGet(pPager, pg, &pPage); - if( rc==SQLITE_OK ){ - rc = pager_write(pPage); - sqlite3PagerUnref(pPage); - } - } - } - } - - assert( pPager->doNotSync==1 ); - pPager->doNotSync = 0; - }else{ - rc = pager_write(pDbPage); - } - return rc; -} - -/* -** Return TRUE if the page given in the argument was previously passed -** to sqlite3PagerWrite(). In other words, return TRUE if it is ok -** to change the content of the page. -*/ -#ifndef NDEBUG -int sqlite3PagerIswriteable(DbPage *pPg){ - return pPg->dirty; -} -#endif - -#ifndef SQLITE_OMIT_VACUUM -/* -** Replace the content of a single page with the information in the third -** argument. -*/ -int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void *pData){ - PgHdr *pPg; - int rc; - - rc = sqlite3PagerGet(pPager, pgno, &pPg); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pPg); - if( rc==SQLITE_OK ){ - memcpy(sqlite3PagerGetData(pPg), pData, pPager->pageSize); - } - sqlite3PagerUnref(pPg); - } - return rc; -} -#endif - -/* -** A call to this routine tells the pager that it is not necessary to -** write the information on page pPg back to the disk, even though -** that page might be marked as dirty. -** -** The overlying software layer calls this routine when all of the data -** on the given page is unused. The pager marks the page as clean so -** that it does not get written to disk. -** -** Tests show that this optimization, together with the -** sqlite3PagerDontRollback() below, more than double the speed -** of large INSERT operations and quadruple the speed of large DELETEs. -** -** When this routine is called, set the alwaysRollback flag to true. -** Subsequent calls to sqlite3PagerDontRollback() for the same page -** will thereafter be ignored. This is necessary to avoid a problem -** where a page with data is added to the freelist during one part of -** a transaction then removed from the freelist during a later part -** of the same transaction and reused for some other purpose. When it -** is first added to the freelist, this routine is called. When reused, -** the sqlite3PagerDontRollback() routine is called. But because the -** page contains critical data, we still need to be sure it gets -** rolled back in spite of the sqlite3PagerDontRollback() call. -*/ -void sqlite3PagerDontWrite(DbPage *pDbPage){ - PgHdr *pPg = pDbPage; - Pager *pPager = pPg->pPager; - - if( MEMDB ) return; - pPg->alwaysRollback = 1; - if( pPg->dirty && !pPager->stmtInUse ){ - assert( pPager->state>=PAGER_SHARED ); - if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSizedbSize ){ - /* If this pages is the last page in the file and the file has grown - ** during the current transaction, then do NOT mark the page as clean. - ** When the database file grows, we must make sure that the last page - ** gets written at least once so that the disk file will be the correct - ** size. If you do not write this page and the size of the file - ** on the disk ends up being too small, that can lead to database - ** corruption during the next transaction. - */ - }else{ - PAGERTRACE3("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)); - IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) - makeClean(pPg); -#ifdef SQLITE_CHECK_PAGES - pPg->pageHash = pager_pagehash(pPg); -#endif - } - } -} - -/* -** A call to this routine tells the pager that if a rollback occurs, -** it is not necessary to restore the data on the given page. This -** means that the pager does not have to record the given page in the -** rollback journal. -** -** If we have not yet actually read the content of this page (if -** the PgHdr.needRead flag is set) then this routine acts as a promise -** that we will never need to read the page content in the future. -** so the needRead flag can be cleared at this point. -*/ -void sqlite3PagerDontRollback(DbPage *pPg){ - Pager *pPager = pPg->pPager; - - assert( pPager->state>=PAGER_RESERVED ); - if( pPager->journalOpen==0 ) return; - if( pPg->alwaysRollback || pPager->alwaysRollback || MEMDB ) return; - if( !pPg->inJournal && (int)pPg->pgno <= pPager->origDbSize ){ - assert( pPager->aInJournal!=0 ); - pPager->aInJournal[pPg->pgno/8] |= 1<<(pPg->pgno&7); - pPg->inJournal = 1; - pPg->needRead = 0; - if( pPager->stmtInUse ){ - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } - PAGERTRACE3("DONT_ROLLBACK page %d of %d\n", pPg->pgno, PAGERID(pPager)); - IOTRACE(("GARBAGE %p %d\n", pPager, pPg->pgno)) - } - if( pPager->stmtInUse - && !pageInStatement(pPg) - && (int)pPg->pgno<=pPager->stmtSize - ){ - assert( pPg->inJournal || (int)pPg->pgno>pPager->origDbSize ); - assert( pPager->aInStmt!=0 ); - pPager->aInStmt[pPg->pgno/8] |= 1<<(pPg->pgno&7); - } -} - - -/* -** This routine is called to increment the database file change-counter, -** stored at byte 24 of the pager file. -*/ -static int pager_incr_changecounter(Pager *pPager){ - PgHdr *pPgHdr; - u32 change_counter; - int rc; - - if( !pPager->changeCountDone ){ - /* Open page 1 of the file for writing. */ - rc = sqlite3PagerGet(pPager, 1, &pPgHdr); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3PagerWrite(pPgHdr); - if( rc!=SQLITE_OK ) return rc; - - /* Increment the value just read and write it back to byte 24. */ - change_counter = sqlite3Get4byte((u8*)pPager->dbFileVers); - change_counter++; - put32bits(((char*)PGHDR_TO_DATA(pPgHdr))+24, change_counter); - /* Release the page reference. */ - sqlite3PagerUnref(pPgHdr); - pPager->changeCountDone = 1; - } - return SQLITE_OK; -} - -/* -** Sync the database file for the pager pPager. zMaster points to the name -** of a master journal file that should be written into the individual -** journal file. zMaster may be NULL, which is interpreted as no master -** journal (a single database transaction). -** -** This routine ensures that the journal is synced, all dirty pages written -** to the database file and the database file synced. The only thing that -** remains to commit the transaction is to delete the journal file (or -** master journal file if specified). -** -** Note that if zMaster==NULL, this does not overwrite a previous value -** passed to an sqlite3PagerCommitPhaseOne() call. -** -** If parameter nTrunc is non-zero, then the pager file is truncated to -** nTrunc pages (this is used by auto-vacuum databases). -*/ -int sqlite3PagerCommitPhaseOne(Pager *pPager, const char *zMaster, Pgno nTrunc){ - int rc = SQLITE_OK; - - PAGERTRACE4("DATABASE SYNC: File=%s zMaster=%s nTrunc=%d\n", - pPager->zFilename, zMaster, nTrunc); - - /* If this is an in-memory db, or no pages have been written to, or this - ** function has already been called, it is a no-op. - */ - if( pPager->state!=PAGER_SYNCED && !MEMDB && pPager->dirtyCache ){ - PgHdr *pPg; - assert( pPager->journalOpen ); - - /* If a master journal file name has already been written to the - ** journal file, then no sync is required. This happens when it is - ** written, then the process fails to upgrade from a RESERVED to an - ** EXCLUSIVE lock. The next time the process tries to commit the - ** transaction the m-j name will have already been written. - */ - if( !pPager->setMaster ){ - rc = pager_incr_changecounter(pPager); - if( rc!=SQLITE_OK ) goto sync_exit; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( nTrunc!=0 ){ - /* If this transaction has made the database smaller, then all pages - ** being discarded by the truncation must be written to the journal - ** file. - */ - Pgno i; - int iSkip = PAGER_MJ_PGNO(pPager); - for( i=nTrunc+1; i<=pPager->origDbSize; i++ ){ - if( !(pPager->aInJournal[i/8] & (1<<(i&7))) && i!=iSkip ){ - rc = sqlite3PagerGet(pPager, i, &pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - rc = sqlite3PagerWrite(pPg); - sqlite3PagerUnref(pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - } - } - } -#endif - rc = writeMasterJournal(pPager, zMaster); - if( rc!=SQLITE_OK ) goto sync_exit; - rc = syncJournal(pPager); - if( rc!=SQLITE_OK ) goto sync_exit; - } - -#ifndef SQLITE_OMIT_AUTOVACUUM - if( nTrunc!=0 ){ - rc = sqlite3PagerTruncate(pPager, nTrunc); - if( rc!=SQLITE_OK ) goto sync_exit; - } -#endif - - /* Write all dirty pages to the database file */ - pPg = pager_get_all_dirty_pages(pPager); - rc = pager_write_pagelist(pPg); - if( rc!=SQLITE_OK ) goto sync_exit; - pPager->pDirty = 0; - - /* Sync the database file. */ - if( !pPager->noSync ){ - rc = sqlite3OsSync(pPager->fd, 0); - } - IOTRACE(("DBSYNC %p\n", pPager)) - - pPager->state = PAGER_SYNCED; - }else if( MEMDB && nTrunc!=0 ){ - rc = sqlite3PagerTruncate(pPager, nTrunc); - } - -sync_exit: - if( rc==SQLITE_IOERR_BLOCKED ){ - /* pager_incr_changecounter() may attempt to obtain an exclusive - * lock to spill the cache and return IOERR_BLOCKED. But since - * there is no chance the cache is inconsistent, it's - * better to return SQLITE_BUSY. - */ - rc = SQLITE_BUSY; - } - return rc; -} - - -/* -** Commit all changes to the database and release the write lock. -** -** If the commit fails for any reason, a rollback attempt is made -** and an error code is returned. If the commit worked, SQLITE_OK -** is returned. -*/ -int sqlite3PagerCommitPhaseTwo(Pager *pPager){ - int rc; - PgHdr *pPg; - - if( pPager->errCode ){ - return pPager->errCode; - } - if( pPager->statedirty = 0; - pPg->inJournal = 0; - pHist->inStmt = 0; - pPg->needSync = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - pPg = pPg->pDirty; - } - pPager->pDirty = 0; -#ifndef NDEBUG - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - assert( !pPg->alwaysRollback ); - assert( !pHist->pOrig ); - assert( !pHist->pStmt ); - } -#endif - pPager->pStmt = 0; - pPager->state = PAGER_SHARED; - return SQLITE_OK; - } - assert( pPager->journalOpen || !pPager->dirtyCache ); - assert( pPager->state==PAGER_SYNCED || !pPager->dirtyCache ); - rc = pager_end_transaction(pPager); - return pager_error(pPager, rc); -} - -/* -** Rollback all changes. The database falls back to PAGER_SHARED mode. -** All in-memory cache pages revert to their original data contents. -** The journal is deleted. -** -** This routine cannot fail unless some other process is not following -** the correct locking protocol or unless some other -** process is writing trash into the journal file (SQLITE_CORRUPT) or -** unless a prior malloc() failed (SQLITE_NOMEM). Appropriate error -** codes are returned for all these occasions. Otherwise, -** SQLITE_OK is returned. -*/ -int sqlite3PagerRollback(Pager *pPager){ - int rc; - PAGERTRACE2("ROLLBACK %d\n", PAGERID(pPager)); - if( MEMDB ){ - PgHdr *p; - for(p=pPager->pAll; p; p=p->pNextAll){ - PgHistory *pHist; - assert( !p->alwaysRollback ); - if( !p->dirty ){ - assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pOrig ); - assert( !((PgHistory *)PGHDR_TO_HIST(p, pPager))->pStmt ); - continue; - } - - pHist = PGHDR_TO_HIST(p, pPager); - if( pHist->pOrig ){ - memcpy(PGHDR_TO_DATA(p), pHist->pOrig, pPager->pageSize); - PAGERTRACE3("ROLLBACK-PAGE %d of %d\n", p->pgno, PAGERID(pPager)); - }else{ - PAGERTRACE3("PAGE %d is clean on %d\n", p->pgno, PAGERID(pPager)); - } - clearHistory(pHist); - p->dirty = 0; - p->inJournal = 0; - pHist->inStmt = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - if( pPager->xReiniter ){ - pPager->xReiniter(p, pPager->pageSize); - } - } - pPager->pDirty = 0; - pPager->pStmt = 0; - pPager->dbSize = pPager->origDbSize; - pager_truncate_cache(pPager); - pPager->stmtInUse = 0; - pPager->state = PAGER_SHARED; - return SQLITE_OK; - } - - if( !pPager->dirtyCache || !pPager->journalOpen ){ - rc = pager_end_transaction(pPager); - return rc; - } - - if( pPager->errCode && pPager->errCode!=SQLITE_FULL ){ - if( pPager->state>=PAGER_EXCLUSIVE ){ - pager_playback(pPager, 0); - } - return pPager->errCode; - } - if( pPager->state==PAGER_RESERVED ){ - int rc2; - rc = pager_playback(pPager, 0); - rc2 = pager_end_transaction(pPager); - if( rc==SQLITE_OK ){ - rc = rc2; - } - }else{ - rc = pager_playback(pPager, 0); - } - /* pager_reset(pPager); */ - pPager->dbSize = -1; - - /* If an error occurs during a ROLLBACK, we can no longer trust the pager - ** cache. So call pager_error() on the way out to make any error - ** persistent. - */ - return pager_error(pPager, rc); -} - -/* -** Return TRUE if the database file is opened read-only. Return FALSE -** if the database is (in theory) writable. -*/ -int sqlite3PagerIsreadonly(Pager *pPager){ - return pPager->readOnly; -} - -/* -** Return the number of references to the pager. -*/ -int sqlite3PagerRefcount(Pager *pPager){ - return pPager->nRef; -} - -#ifdef SQLITE_TEST -/* -** This routine is used for testing and analysis only. -*/ -int *sqlite3PagerStats(Pager *pPager){ - static int a[11]; - a[0] = pPager->nRef; - a[1] = pPager->nPage; - a[2] = pPager->mxPage; - a[3] = pPager->dbSize; - a[4] = pPager->state; - a[5] = pPager->errCode; - a[6] = pPager->nHit; - a[7] = pPager->nMiss; - a[8] = 0; /* Used to be pPager->nOvfl */ - a[9] = pPager->nRead; - a[10] = pPager->nWrite; - return a; -} -#endif - -/* -** Set the statement rollback point. -** -** This routine should be called with the transaction journal already -** open. A new statement journal is created that can be used to rollback -** changes of a single SQL command within a larger transaction. -*/ -int sqlite3PagerStmtBegin(Pager *pPager){ - int rc; - assert( !pPager->stmtInUse ); - assert( pPager->state>=PAGER_SHARED ); - assert( pPager->dbSize>=0 ); - PAGERTRACE2("STMT-BEGIN %d\n", PAGERID(pPager)); - if( MEMDB ){ - pPager->stmtInUse = 1; - pPager->stmtSize = pPager->dbSize; - return SQLITE_OK; - } - if( !pPager->journalOpen ){ - pPager->stmtAutoopen = 1; - return SQLITE_OK; - } - assert( pPager->journalOpen ); - pPager->aInStmt = sqliteMalloc( pPager->dbSize/8 + 1 ); - if( pPager->aInStmt==0 ){ - /* sqlite3OsLock(pPager->fd, SHARED_LOCK); */ - return SQLITE_NOMEM; - } -#ifndef NDEBUG - rc = sqlite3OsFileSize(pPager->jfd, &pPager->stmtJSize); - if( rc ) goto stmt_begin_failed; - assert( pPager->stmtJSize == pPager->journalOff ); -#endif - pPager->stmtJSize = pPager->journalOff; - pPager->stmtSize = pPager->dbSize; - pPager->stmtHdrOff = 0; - pPager->stmtCksum = pPager->cksumInit; - if( !pPager->stmtOpen ){ - rc = sqlite3PagerOpentemp(&pPager->stfd); - if( rc ) goto stmt_begin_failed; - pPager->stmtOpen = 1; - pPager->stmtNRec = 0; - } - pPager->stmtInUse = 1; - return SQLITE_OK; - -stmt_begin_failed: - if( pPager->aInStmt ){ - sqliteFree(pPager->aInStmt); - pPager->aInStmt = 0; - } - return rc; -} - -/* -** Commit a statement. -*/ -int sqlite3PagerStmtCommit(Pager *pPager){ - if( pPager->stmtInUse ){ - PgHdr *pPg, *pNext; - PAGERTRACE2("STMT-COMMIT %d\n", PAGERID(pPager)); - if( !MEMDB ){ - sqlite3OsSeek(pPager->stfd, 0); - /* sqlite3OsTruncate(pPager->stfd, 0); */ - sqliteFree( pPager->aInStmt ); - pPager->aInStmt = 0; - }else{ - for(pPg=pPager->pStmt; pPg; pPg=pNext){ - PgHistory *pHist = PGHDR_TO_HIST(pPg, pPager); - pNext = pHist->pNextStmt; - assert( pHist->inStmt ); - pHist->inStmt = 0; - pHist->pPrevStmt = pHist->pNextStmt = 0; - sqliteFree(pHist->pStmt); - pHist->pStmt = 0; - } - } - pPager->stmtNRec = 0; - pPager->stmtInUse = 0; - pPager->pStmt = 0; - } - pPager->stmtAutoopen = 0; - return SQLITE_OK; -} - -/* -** Rollback a statement. -*/ -int sqlite3PagerStmtRollback(Pager *pPager){ - int rc; - if( pPager->stmtInUse ){ - PAGERTRACE2("STMT-ROLLBACK %d\n", PAGERID(pPager)); - if( MEMDB ){ - PgHdr *pPg; - PgHistory *pHist; - for(pPg=pPager->pStmt; pPg; pPg=pHist->pNextStmt){ - pHist = PGHDR_TO_HIST(pPg, pPager); - if( pHist->pStmt ){ - memcpy(PGHDR_TO_DATA(pPg), pHist->pStmt, pPager->pageSize); - sqliteFree(pHist->pStmt); - pHist->pStmt = 0; - } - } - pPager->dbSize = pPager->stmtSize; - pager_truncate_cache(pPager); - rc = SQLITE_OK; - }else{ - rc = pager_stmt_playback(pPager); - } - sqlite3PagerStmtCommit(pPager); - }else{ - rc = SQLITE_OK; - } - pPager->stmtAutoopen = 0; - return rc; -} - -/* -** Return the full pathname of the database file. -*/ -const char *sqlite3PagerFilename(Pager *pPager){ - return pPager->zFilename; -} - -/* -** Return the directory of the database file. -*/ -const char *sqlite3PagerDirname(Pager *pPager){ - return pPager->zDirectory; -} - -/* -** Return the full pathname of the journal file. -*/ -const char *sqlite3PagerJournalname(Pager *pPager){ - return pPager->zJournal; -} - -/* -** Return true if fsync() calls are disabled for this pager. Return FALSE -** if fsync()s are executed normally. -*/ -int sqlite3PagerNosync(Pager *pPager){ - return pPager->noSync; -} - -#ifdef SQLITE_HAS_CODEC -/* -** Set the codec for this pager -*/ -void sqlite3PagerSetCodec( - Pager *pPager, - void *(*xCodec)(void*,void*,Pgno,int), - void *pCodecArg -){ - pPager->xCodec = xCodec; - pPager->pCodecArg = pCodecArg; -} -#endif - -#ifndef SQLITE_OMIT_AUTOVACUUM -/* -** Move the page pPg to location pgno in the file. -** -** There must be no references to the page previously located at -** pgno (which we call pPgOld) though that page is allowed to be -** in cache. If the page previous located at pgno is not already -** in the rollback journal, it is not put there by by this routine. -** -** References to the page pPg remain valid. Updating any -** meta-data associated with pPg (i.e. data stored in the nExtra bytes -** allocated along with the page) is the responsibility of the caller. -** -** A transaction must be active when this routine is called. It used to be -** required that a statement transaction was not active, but this restriction -** has been removed (CREATE INDEX needs to move a page when a statement -** transaction is active). -*/ -int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno){ - PgHdr *pPgOld; /* The page being overwritten. */ - int h; - Pgno needSyncPgno = 0; - - assert( pPg->nRef>0 ); - - PAGERTRACE5("MOVE %d page %d (needSync=%d) moves to %d\n", - PAGERID(pPager), pPg->pgno, pPg->needSync, pgno); - IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) - - pager_get_content(pPg); - if( pPg->needSync ){ - needSyncPgno = pPg->pgno; - assert( pPg->inJournal || (int)pgno>pPager->origDbSize ); - assert( pPg->dirty ); - assert( pPager->needSync ); - } - - /* Unlink pPg from it's hash-chain */ - unlinkHashChain(pPager, pPg); - - /* If the cache contains a page with page-number pgno, remove it - ** from it's hash chain. Also, if the PgHdr.needSync was set for - ** page pgno before the 'move' operation, it needs to be retained - ** for the page moved there. - */ - pPg->needSync = 0; - pPgOld = pager_lookup(pPager, pgno); - if( pPgOld ){ - assert( pPgOld->nRef==0 ); - unlinkHashChain(pPager, pPgOld); - makeClean(pPgOld); - pPg->needSync = pPgOld->needSync; - }else{ - pPg->needSync = 0; - } - if( pPager->aInJournal && (int)pgno<=pPager->origDbSize ){ - pPg->inJournal = (pPager->aInJournal[pgno/8] & (1<<(pgno&7)))!=0; - }else{ - pPg->inJournal = 0; - assert( pPg->needSync==0 || (int)pgno>pPager->origDbSize ); - } - - /* Change the page number for pPg and insert it into the new hash-chain. */ - assert( pgno!=0 ); - pPg->pgno = pgno; - h = pgno & (pPager->nHash-1); - if( pPager->aHash[h] ){ - assert( pPager->aHash[h]->pPrevHash==0 ); - pPager->aHash[h]->pPrevHash = pPg; - } - pPg->pNextHash = pPager->aHash[h]; - pPager->aHash[h] = pPg; - pPg->pPrevHash = 0; - - makeDirty(pPg); - pPager->dirtyCache = 1; - - if( needSyncPgno ){ - /* If needSyncPgno is non-zero, then the journal file needs to be - ** sync()ed before any data is written to database file page needSyncPgno. - ** Currently, no such page exists in the page-cache and the - ** Pager.aInJournal bit has been set. This needs to be remedied by loading - ** the page into the pager-cache and setting the PgHdr.needSync flag. - ** - ** The sqlite3PagerGet() call may cause the journal to sync. So make - ** sure the Pager.needSync flag is set too. - */ - int rc; - PgHdr *pPgHdr; - assert( pPager->needSync ); - rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); - if( rc!=SQLITE_OK ) return rc; - pPager->needSync = 1; - pPgHdr->needSync = 1; - pPgHdr->inJournal = 1; - makeDirty(pPgHdr); - sqlite3PagerUnref(pPgHdr); - } - - return SQLITE_OK; -} -#endif - -/* -** Return a pointer to the data for the specified page. -*/ -void *sqlite3PagerGetData(DbPage *pPg){ - return PGHDR_TO_DATA(pPg); -} - -/* -** Return a pointer to the Pager.nExtra bytes of "extra" space -** allocated along with the specified page. -*/ -void *sqlite3PagerGetExtra(DbPage *pPg){ - Pager *pPager = pPg->pPager; - return (pPager?PGHDR_TO_EXTRA(pPg, pPager):0); -} - -/* -** Get/set the locking-mode for this pager. Parameter eMode must be one -** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or -** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then -** the locking-mode is set to the value specified. -** -** The returned value is either PAGER_LOCKINGMODE_NORMAL or -** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) -** locking-mode. -*/ -int sqlite3PagerLockingMode(Pager *pPager, int eMode){ - assert( eMode==PAGER_LOCKINGMODE_QUERY - || eMode==PAGER_LOCKINGMODE_NORMAL - || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); - assert( PAGER_LOCKINGMODE_QUERY<0 ); - assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); - if( eMode>=0 && !pPager->tempFile ){ - pPager->exclusiveMode = eMode; - } - return (int)pPager->exclusiveMode; -} - -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) -/* -** Return the current state of the file lock for the given pager. -** The return value is one of NO_LOCK, SHARED_LOCK, RESERVED_LOCK, -** PENDING_LOCK, or EXCLUSIVE_LOCK. -*/ -int sqlite3PagerLockstate(Pager *pPager){ - return sqlite3OsLockState(pPager->fd); -} -#endif - -#ifdef SQLITE_DEBUG -/* -** Print a listing of all referenced pages and their ref count. -*/ -void sqlite3PagerRefdump(Pager *pPager){ - PgHdr *pPg; - for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){ - if( pPg->nRef<=0 ) continue; - sqlite3DebugPrintf("PAGE %3d addr=%p nRef=%d\n", - pPg->pgno, PGHDR_TO_DATA(pPg), pPg->nRef); - } -} -#endif - -#endif /* SQLITE_OMIT_DISKIO */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pager.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pager.h --- sqlite3-3.4.2/src/pager.h 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/pager.h 2009-06-25 12:45:58.000000000 +0100 @@ -13,17 +13,26 @@ ** subsystem. The page cache subsystem reads and writes a file a page ** at a time and provides a journal for rollback. ** -** @(#) $Id: pager.h,v 1.61 2007/05/08 21:45:28 drh Exp $ +** @(#) $Id: pager.h,v 1.102 2009/06/18 17:22:39 drh Exp $ */ #ifndef _PAGER_H_ #define _PAGER_H_ /* +** Default maximum size for persistent journal files. A negative +** value means no limit. This value may be overridden using the +** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit". +*/ +#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT + #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1 +#endif + +/* ** The type used to represent a page number. The first page in a file ** is called page 1. 0 is used to represent "not a page". */ -typedef unsigned int Pgno; +typedef u32 Pgno; /* ** Each open file is managed by a separate instance of the "Pager" structure. @@ -36,9 +45,19 @@ typedef struct PgHdr DbPage; /* +** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is +** reserved for working around a windows/posix incompatibility). It is +** used in the journal to signify that the remainder of the journal file +** is devoted to storing a master journal name - there are no more pages to +** roll back. See comments for function writeMasterJournal() in pager.c +** for details. +*/ +#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1)) + +/* ** Allowed values for the flags parameter to sqlite3PagerOpen(). ** -** NOTE: This values must match the corresponding BTREE_ values in btree.h. +** NOTE: These values must match the corresponding BTREE_ values in btree.h. */ #define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ #define PAGER_NO_READLOCK 0x0002 /* Omit readlocks on readonly files */ @@ -51,75 +70,87 @@ #define PAGER_LOCKINGMODE_EXCLUSIVE 1 /* -** See source code comments for a detailed description of the following -** routines: +** Valid values for the second argument to sqlite3PagerJournalMode(). */ -int sqlite3PagerOpen(Pager **ppPager, const char *zFilename, - int nExtra, int flags); -void sqlite3PagerSetBusyhandler(Pager*, BusyHandler *pBusyHandler); -void sqlite3PagerSetDestructor(Pager*, void(*)(DbPage*,int)); -void sqlite3PagerSetReiniter(Pager*, void(*)(DbPage*,int)); -int sqlite3PagerSetPagesize(Pager*, int); -int sqlite3PagerMaxPageCount(Pager*, int); +#define PAGER_JOURNALMODE_QUERY -1 +#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ +#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ +#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ +#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ +#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ + +/* +** The remainder of this file contains the declarations of the functions +** that make up the Pager sub-system API. See source code comments for +** a detailed description of each routine. +*/ + +/* Open and close a Pager connection. */ +int sqlite3PagerOpen(sqlite3_vfs *, Pager **ppPager, const char*, int,int,int); +int sqlite3PagerClose(Pager *pPager); int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); + +/* Functions used to configure a Pager object. */ +void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); +void sqlite3PagerSetReiniter(Pager*, void(*)(DbPage*)); +int sqlite3PagerSetPagesize(Pager*, u16*, int); +int sqlite3PagerMaxPageCount(Pager*, int); void sqlite3PagerSetCachesize(Pager*, int); -int sqlite3PagerClose(Pager *pPager); +void sqlite3PagerSetSafetyLevel(Pager*,int,int); +int sqlite3PagerLockingMode(Pager *, int); +int sqlite3PagerJournalMode(Pager *, int); +i64 sqlite3PagerJournalSizeLimit(Pager *, i64); +sqlite3_backup **sqlite3PagerBackupPtr(Pager*); + +/* Functions used to obtain and release page references. */ int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); #define sqlite3PagerGet(A,B,C) sqlite3PagerAcquire(A,B,C,0) DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); -int sqlite3PagerRef(DbPage*); -int sqlite3PagerUnref(DbPage*); +void sqlite3PagerRef(DbPage*); +void sqlite3PagerUnref(DbPage*); + +/* Operations on page references. */ int sqlite3PagerWrite(DbPage*); -int sqlite3PagerOverwrite(Pager *pPager, Pgno pgno, void*); -int sqlite3PagerPagecount(Pager*); -int sqlite3PagerTruncate(Pager*,Pgno); -int sqlite3PagerBegin(DbPage*, int exFlag); -int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, Pgno); +void sqlite3PagerDontWrite(DbPage*); +int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int); +int sqlite3PagerPageRefcount(DbPage*); +void *sqlite3PagerGetData(DbPage *); +void *sqlite3PagerGetExtra(DbPage *); + +/* Functions used to manage pager transactions and savepoints. */ +int sqlite3PagerPagecount(Pager*, int*); +int sqlite3PagerBegin(Pager*, int exFlag, int); +int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int); +int sqlite3PagerSync(Pager *pPager); int sqlite3PagerCommitPhaseTwo(Pager*); int sqlite3PagerRollback(Pager*); -int sqlite3PagerIsreadonly(Pager*); -int sqlite3PagerStmtBegin(Pager*); -int sqlite3PagerStmtCommit(Pager*); -int sqlite3PagerStmtRollback(Pager*); -void sqlite3PagerDontRollback(DbPage*); -void sqlite3PagerDontWrite(DbPage*); +int sqlite3PagerOpenSavepoint(Pager *pPager, int n); +int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); + +/* Functions used to query pager state and configuration. */ +u8 sqlite3PagerIsreadonly(Pager*); int sqlite3PagerRefcount(Pager*); -void sqlite3PagerSetSafetyLevel(Pager*,int,int); const char *sqlite3PagerFilename(Pager*); -const char *sqlite3PagerDirname(Pager*); +const sqlite3_vfs *sqlite3PagerVfs(Pager*); +sqlite3_file *sqlite3PagerFile(Pager*); const char *sqlite3PagerJournalname(Pager*); int sqlite3PagerNosync(Pager*); -int sqlite3PagerMovepage(Pager*,DbPage*,Pgno); -void *sqlite3PagerGetData(DbPage *); -void *sqlite3PagerGetExtra(DbPage *); -int sqlite3PagerLockingMode(Pager *, int); +void *sqlite3PagerTempSpace(Pager*); +int sqlite3PagerIsMemdb(Pager*); -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) && !defined(SQLITE_OMIT_DISKIO) - int sqlite3PagerReleaseMemory(int); -#endif - -#ifdef SQLITE_HAS_CODEC - void sqlite3PagerSetCodec(Pager*,void*(*)(void*,void*,Pgno,int),void*); -#endif +/* Functions used to truncate the database file. */ +void sqlite3PagerTruncateImage(Pager*,Pgno); +/* Functions to support testing and debugging. */ #if !defined(NDEBUG) || defined(SQLITE_TEST) Pgno sqlite3PagerPagenumber(DbPage*); int sqlite3PagerIswriteable(DbPage*); #endif - -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - int sqlite3PagerLockstate(Pager*); -#endif - #ifdef SQLITE_TEST int *sqlite3PagerStats(Pager*); void sqlite3PagerRefdump(Pager*); - int pager3_refinfo_enable; -#endif - -#ifdef SQLITE_TEST -void disable_simulated_io_errors(void); -void enable_simulated_io_errors(void); + void disable_simulated_io_errors(void); + void enable_simulated_io_errors(void); #else # define disable_simulated_io_errors() # define enable_simulated_io_errors() diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/parse.y /tmp/3ARg2Grji7/sqlite3-3.6.16/src/parse.y --- sqlite3-3.4.2/src/parse.y 2007-07-14 04:06:11.000000000 +0100 +++ sqlite3-3.6.16/src/parse.y 2009-06-25 12:45:58.000000000 +0100 @@ -14,7 +14,7 @@ ** the parser. Lemon will also generate a header file containing ** numeric codes for all of the tokens. ** -** @(#) $Id: parse.y,v 1.231 2007/06/20 12:18:31 drh Exp $ +** @(#) $Id: parse.y,v 1.283 2009/06/19 14:06:03 drh Exp $ */ // All token codes are small integers with #defines that begin with "TK_" @@ -32,16 +32,13 @@ // This code runs whenever there is a syntax error // %syntax_error { - if( !pParse->parseError ){ - if( TOKEN.z[0] ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); - }else{ - sqlite3ErrorMsg(pParse, "incomplete SQL statement"); - } - pParse->parseError = 1; - } + UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ + assert( TOKEN.z[0] ); /* The tokenizer always gives us a token */ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); + pParse->parseError = 1; } %stack_overflow { + UNUSED_PARAMETER(yypMinor); /* Silence some compiler warnings */ sqlite3ErrorMsg(pParse, "parser stack overflow"); pParse->parseError = 1; } @@ -55,7 +52,17 @@ // %include { #include "sqliteInt.h" -#include "parse.h" + +/* +** Disable all error recovery processing in the parser push-down +** automaton. +*/ +#define YYNOERRORRECOVERY 1 + +/* +** Make yytestcase() the same as testcase() +*/ +#define yytestcase(X) testcase(X) /* ** An instance of this structure holds information about the @@ -97,7 +104,6 @@ input ::= cmdlist. cmdlist ::= cmdlist ecmd. cmdlist ::= ecmd. -cmdx ::= cmd. { sqlite3FinishCoding(pParse); } ecmd ::= SEMI. ecmd ::= explain cmdx SEMI. explain ::= . { sqlite3BeginParse(pParse, 0); } @@ -105,6 +111,7 @@ explain ::= EXPLAIN. { sqlite3BeginParse(pParse, 1); } explain ::= EXPLAIN QUERY PLAN. { sqlite3BeginParse(pParse, 2); } %endif SQLITE_OMIT_EXPLAIN +cmdx ::= cmd. { sqlite3FinishCoding(pParse); } ///////////////////// Begin and end transactions. //////////////////////////// // @@ -122,12 +129,28 @@ cmd ::= END trans_opt. {sqlite3CommitTransaction(pParse);} cmd ::= ROLLBACK trans_opt. {sqlite3RollbackTransaction(pParse);} +savepoint_opt ::= SAVEPOINT. +savepoint_opt ::= . +cmd ::= SAVEPOINT nm(X). { + sqlite3Savepoint(pParse, SAVEPOINT_BEGIN, &X); +} +cmd ::= RELEASE savepoint_opt nm(X). { + sqlite3Savepoint(pParse, SAVEPOINT_RELEASE, &X); +} +cmd ::= ROLLBACK trans_opt TO savepoint_opt nm(X). { + sqlite3Savepoint(pParse, SAVEPOINT_ROLLBACK, &X); +} + ///////////////////// The CREATE TABLE statement //////////////////////////// // cmd ::= create_table create_table_args. -create_table ::= CREATE temp(T) TABLE ifnotexists(E) nm(Y) dbnm(Z). { +create_table ::= createkw temp(T) TABLE ifnotexists(E) nm(Y) dbnm(Z). { sqlite3StartTable(pParse,&Y,&Z,T,0,0,E); } +createkw(A) ::= CREATE(X). { + pParse->db->lookaside.bEnabled = 0; + A = X; +} %type ifnotexists {int} ifnotexists(A) ::= . {A = 0;} ifnotexists(A) ::= IF NOT EXISTS. {A = 1;} @@ -141,7 +164,7 @@ } create_table_args ::= AS select(S). { sqlite3EndTable(pParse,0,0,S); - sqlite3SelectDelete(S); + sqlite3SelectDelete(pParse->db, S); } columnlist ::= columnlist COMMA column. columnlist ::= column. @@ -153,7 +176,7 @@ // column(A) ::= columnid(X) type carglist. { A.z = X.z; - A.n = (pParse->sLastToken.z-X.z) + pParse->sLastToken.n; + A.n = (int)(pParse->sLastToken.z-X.z) + pParse->sLastToken.n; } columnid(A) ::= nm(X). { sqlite3AddColumn(pParse,&X); @@ -166,17 +189,18 @@ // %type id {Token} id(A) ::= ID(X). {A = X;} +id(A) ::= INDEXED(X). {A = X;} // The following directive causes tokens ABORT, AFTER, ASC, etc. to // fallback to ID if they will not parse as their original value. // This obviates the need for the "id" nonterminal. // %fallback ID - ABORT AFTER ANALYZE ASC ATTACH BEFORE BEGIN CASCADE CAST CONFLICT + ABORT AFTER ANALYZE ASC ATTACH BEFORE BEGIN BY CASCADE CAST COLUMNKW CONFLICT DATABASE DEFERRED DESC DETACH EACH END EXCLUSIVE EXPLAIN FAIL FOR IGNORE IMMEDIATE INITIALLY INSTEAD LIKE_KW MATCH PLAN - QUERY KEY OF OFFSET PRAGMA RAISE REPLACE RESTRICT ROW - TEMP TRIGGER VACUUM VIEW VIRTUAL + QUERY KEY OF OFFSET PRAGMA RAISE RELEASE REPLACE RESTRICT ROW ROLLBACK + SAVEPOINT TEMP TRIGGER VACUUM VIEW VIRTUAL %ifdef SQLITE_OMIT_COMPOUND_SELECT EXCEPT INTERSECT UNION %endif SQLITE_OMIT_COMPOUND_SELECT @@ -216,7 +240,7 @@ // The name of a column or table can be any of the following: // %type nm {Token} -nm(A) ::= ID(X). {A = X;} +nm(A) ::= id(X). {A = X;} nm(A) ::= STRING(X). {A = X;} nm(A) ::= JOIN_KW(X). {A = X;} @@ -230,15 +254,15 @@ typetoken(A) ::= typename(X). {A = X;} typetoken(A) ::= typename(X) LP signed RP(Y). { A.z = X.z; - A.n = &Y.z[Y.n] - X.z; + A.n = (int)(&Y.z[Y.n] - X.z); } typetoken(A) ::= typename(X) LP signed COMMA signed RP(Y). { A.z = X.z; - A.n = &Y.z[Y.n] - X.z; + A.n = (int)(&Y.z[Y.n] - X.z); } %type typename {Token} typename(A) ::= ids(X). {A = X;} -typename(A) ::= typename(X) ids(Y). {A.z=X.z; A.n=Y.n+(Y.z-X.z);} +typename(A) ::= typename(X) ids(Y). {A.z=X.z; A.n=Y.n+(int)(Y.z-X.z);} signed ::= plus_num. signed ::= minus_num. @@ -249,31 +273,35 @@ carglist ::= . carg ::= CONSTRAINT nm ccons. carg ::= ccons. -ccons ::= DEFAULT term(X). {sqlite3AddDefaultValue(pParse,X);} -ccons ::= DEFAULT LP expr(X) RP. {sqlite3AddDefaultValue(pParse,X);} -ccons ::= DEFAULT PLUS term(X). {sqlite3AddDefaultValue(pParse,X);} -ccons ::= DEFAULT MINUS term(X). { - Expr *p = sqlite3Expr(TK_UMINUS, X, 0, 0); - sqlite3AddDefaultValue(pParse,p); +ccons ::= DEFAULT term(X). {sqlite3AddDefaultValue(pParse,&X);} +ccons ::= DEFAULT LP expr(X) RP. {sqlite3AddDefaultValue(pParse,&X);} +ccons ::= DEFAULT PLUS term(X). {sqlite3AddDefaultValue(pParse,&X);} +ccons ::= DEFAULT MINUS(A) term(X). { + ExprSpan v; + v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, X.pExpr, 0, 0); + v.zStart = A.z; + v.zEnd = X.zEnd; + sqlite3AddDefaultValue(pParse,&v); } ccons ::= DEFAULT id(X). { - Expr *p = sqlite3Expr(TK_STRING, 0, 0, &X); - sqlite3AddDefaultValue(pParse,p); + ExprSpan v; + spanExpr(&v, pParse, TK_STRING, &X); + sqlite3AddDefaultValue(pParse,&v); } // In addition to the type name, we also care about the primary key and // UNIQUE constraints. // ccons ::= NULL onconf. -ccons ::= NOT NULL onconf(R). {sqlite3AddNotNull(pParse, R);} +ccons ::= NOT NULL onconf(R). {sqlite3AddNotNull(pParse, R);} ccons ::= PRIMARY KEY sortorder(Z) onconf(R) autoinc(I). - {sqlite3AddPrimaryKey(pParse,0,R,I,Z);} -ccons ::= UNIQUE onconf(R). {sqlite3CreateIndex(pParse,0,0,0,0,R,0,0,0,0);} -ccons ::= CHECK LP expr(X) RP. {sqlite3AddCheckConstraint(pParse,X);} + {sqlite3AddPrimaryKey(pParse,0,R,I,Z);} +ccons ::= UNIQUE onconf(R). {sqlite3CreateIndex(pParse,0,0,0,0,R,0,0,0,0);} +ccons ::= CHECK LP expr(X) RP. {sqlite3AddCheckConstraint(pParse,X.pExpr);} ccons ::= REFERENCES nm(T) idxlist_opt(TA) refargs(R). - {sqlite3CreateForeignKey(pParse,0,&T,TA,R);} -ccons ::= defer_subclause(D). {sqlite3DeferForeignKey(pParse,D);} -ccons ::= COLLATE id(C). {sqlite3AddCollateType(pParse, (char*)C.z, C.n);} + {sqlite3CreateForeignKey(pParse,0,&T,TA,R);} +ccons ::= defer_subclause(D). {sqlite3DeferForeignKey(pParse,D);} +ccons ::= COLLATE ids(C). {sqlite3AddCollateType(pParse, &C);} // The optional AUTOINCREMENT keyword %type autoinc {int} @@ -287,7 +315,7 @@ // %type refargs {int} refargs(A) ::= . { A = OE_Restrict * 0x010101; } -refargs(A) ::= refargs(X) refarg(Y). { A = (X & Y.mask) | Y.value; } +refargs(A) ::= refargs(X) refarg(Y). { A = (X & ~Y.mask) | Y.value; } %type refarg {struct {int value; int mask;}} refarg(A) ::= MATCH nm. { A.value = 0; A.mask = 0x000000; } refarg(A) ::= ON DELETE refact(X). { A.value = X; A.mask = 0x0000ff; } @@ -316,10 +344,11 @@ conslist ::= tcons. tcons ::= CONSTRAINT nm. tcons ::= PRIMARY KEY LP idxlist(X) autoinc(I) RP onconf(R). - {sqlite3AddPrimaryKey(pParse,X,R,I,0);} + {sqlite3AddPrimaryKey(pParse,X,R,I,0);} tcons ::= UNIQUE LP idxlist(X) RP onconf(R). {sqlite3CreateIndex(pParse,0,0,0,X,R,0,0,0,0);} -tcons ::= CHECK LP expr(E) RP onconf. {sqlite3AddCheckConstraint(pParse,E);} +tcons ::= CHECK LP expr(E) RP onconf. + {sqlite3AddCheckConstraint(pParse,E.pExpr);} tcons ::= FOREIGN KEY LP idxlist(FA) RP REFERENCES nm(T) idxlist_opt(TA) refargs(R) defer_subclause_opt(D). { sqlite3CreateForeignKey(pParse, FA, &T, TA, R); @@ -355,7 +384,7 @@ ///////////////////// The CREATE VIEW statement ///////////////////////////// // %ifndef SQLITE_OMIT_VIEW -cmd ::= CREATE(X) temp(T) VIEW ifnotexists(E) nm(Y) dbnm(Z) AS select(S). { +cmd ::= createkw(X) temp(T) VIEW ifnotexists(E) nm(Y) dbnm(Z) AS select(S). { sqlite3CreateView(pParse, &X, &Y, &Z, S, T, E); } cmd ::= DROP VIEW ifexists(E) fullname(X). { @@ -366,23 +395,24 @@ //////////////////////// The SELECT statement ///////////////////////////////// // cmd ::= select(X). { - sqlite3Select(pParse, X, SRT_Callback, 0, 0, 0, 0, 0); - sqlite3SelectDelete(X); + SelectDest dest = {SRT_Output, 0, 0, 0, 0}; + sqlite3Select(pParse, X, &dest); + sqlite3SelectDelete(pParse->db, X); } %type select {Select*} -%destructor select {sqlite3SelectDelete($$);} +%destructor select {sqlite3SelectDelete(pParse->db, $$);} %type oneselect {Select*} -%destructor oneselect {sqlite3SelectDelete($$);} +%destructor oneselect {sqlite3SelectDelete(pParse->db, $$);} select(A) ::= oneselect(X). {A = X;} %ifndef SQLITE_OMIT_COMPOUND_SELECT select(A) ::= select(X) multiselect_op(Y) oneselect(Z). { if( Z ){ - Z->op = Y; + Z->op = (u8)Y; Z->pPrior = X; }else{ - sqlite3SelectDelete(X); + sqlite3SelectDelete(pParse->db, X); } A = Z; } @@ -393,7 +423,7 @@ %endif SQLITE_OMIT_COMPOUND_SELECT oneselect(A) ::= SELECT distinct(D) selcollist(W) from(X) where_opt(Y) groupby_opt(P) having_opt(Q) orderby_opt(Z) limit_opt(L). { - A = sqlite3SelectNew(W,X,Y,P,Q,Z,D,L.pLimit,L.pOffset); + A = sqlite3SelectNew(pParse,W,X,Y,P,Q,Z,D,L.pLimit,L.pOffset); } // The "distinct" nonterminal is true (1) if the DISTINCT keyword is @@ -410,21 +440,25 @@ // opcode of TK_ALL. // %type selcollist {ExprList*} -%destructor selcollist {sqlite3ExprListDelete($$);} +%destructor selcollist {sqlite3ExprListDelete(pParse->db, $$);} %type sclp {ExprList*} -%destructor sclp {sqlite3ExprListDelete($$);} +%destructor sclp {sqlite3ExprListDelete(pParse->db, $$);} sclp(A) ::= selcollist(X) COMMA. {A = X;} sclp(A) ::= . {A = 0;} selcollist(A) ::= sclp(P) expr(X) as(Y). { - A = sqlite3ExprListAppend(P,X,Y.n?&Y:0); + A = sqlite3ExprListAppend(pParse, P, X.pExpr); + if( Y.n>0 ) sqlite3ExprListSetName(pParse, A, &Y, 1); + sqlite3ExprListSetSpan(pParse,A,&X); } selcollist(A) ::= sclp(P) STAR. { - A = sqlite3ExprListAppend(P, sqlite3Expr(TK_ALL, 0, 0, 0), 0); + Expr *p = sqlite3Expr(pParse->db, TK_ALL, 0); + A = sqlite3ExprListAppend(pParse, P, p); } -selcollist(A) ::= sclp(P) nm(X) DOT STAR. { - Expr *pRight = sqlite3Expr(TK_ALL, 0, 0, 0); - Expr *pLeft = sqlite3Expr(TK_ID, 0, 0, &X); - A = sqlite3ExprListAppend(P, sqlite3Expr(TK_DOT, pLeft, pRight, 0), 0); +selcollist(A) ::= sclp(P) nm(X) DOT STAR(Y). { + Expr *pRight = sqlite3PExpr(pParse, TK_ALL, 0, 0, &Y); + Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + A = sqlite3ExprListAppend(pParse,P, pDot); } // An option "AS " phrase that can follow one of the expressions that @@ -437,16 +471,16 @@ %type seltablist {SrcList*} -%destructor seltablist {sqlite3SrcListDelete($$);} +%destructor seltablist {sqlite3SrcListDelete(pParse->db, $$);} %type stl_prefix {SrcList*} -%destructor stl_prefix {sqlite3SrcListDelete($$);} +%destructor stl_prefix {sqlite3SrcListDelete(pParse->db, $$);} %type from {SrcList*} -%destructor from {sqlite3SrcListDelete($$);} +%destructor from {sqlite3SrcListDelete(pParse->db, $$);} // A complete FROM clause. // -from(A) ::= . {A = sqliteMalloc(sizeof(*A));} -from(A) ::= FROM seltablist(X). { +from(A) ::= . {A = sqlite3DbMallocZero(pParse->db, sizeof(*A));} +from(A) ::= FROM seltablist(X). { A = X; sqlite3SrcListShiftJoinType(A); } @@ -456,29 +490,43 @@ // stl_prefix(A) ::= seltablist(X) joinop(Y). { A = X; - if( A && A->nSrc>0 ) A->a[A->nSrc-1].jointype = Y; + if( ALWAYS(A && A->nSrc>0) ) A->a[A->nSrc-1].jointype = (u8)Y; } stl_prefix(A) ::= . {A = 0;} -seltablist(A) ::= stl_prefix(X) nm(Y) dbnm(D) as(Z) on_opt(N) using_opt(U). { - A = sqlite3SrcListAppendFromTerm(X,&Y,&D,&Z,0,N,U); +seltablist(A) ::= stl_prefix(X) nm(Y) dbnm(D) as(Z) indexed_opt(I) on_opt(N) using_opt(U). { + A = sqlite3SrcListAppendFromTerm(pParse,X,&Y,&D,&Z,0,N,U); + sqlite3SrcListIndexedBy(pParse, A, &I); } %ifndef SQLITE_OMIT_SUBQUERY - seltablist(A) ::= stl_prefix(X) LP seltablist_paren(S) RP + seltablist(A) ::= stl_prefix(X) LP select(S) RP + as(Z) on_opt(N) using_opt(U). { + A = sqlite3SrcListAppendFromTerm(pParse,X,0,0,&Z,S,N,U); + } + seltablist(A) ::= stl_prefix(X) LP seltablist(F) RP as(Z) on_opt(N) using_opt(U). { - A = sqlite3SrcListAppendFromTerm(X,0,0,&Z,S,N,U); + if( X==0 ){ + sqlite3ExprDelete(pParse->db, N); + sqlite3IdListDelete(pParse->db, U); + A = F; + }else{ + Select *pSubquery; + sqlite3SrcListShiftJoinType(F); + pSubquery = sqlite3SelectNew(pParse,0,F,0,0,0,0,0,0,0); + A = sqlite3SrcListAppendFromTerm(pParse,X,0,0,&Z,pSubquery,N,U); + } } // A seltablist_paren nonterminal represents anything in a FROM that // is contained inside parentheses. This can be either a subquery or // a grouping of table and subqueries. // - %type seltablist_paren {Select*} - %destructor seltablist_paren {sqlite3SelectDelete($$);} - seltablist_paren(A) ::= select(S). {A = S;} - seltablist_paren(A) ::= seltablist(F). { - sqlite3SrcListShiftJoinType(F); - A = sqlite3SelectNew(0,F,0,0,0,0,0,0,0); - } +// %type seltablist_paren {Select*} +// %destructor seltablist_paren {sqlite3SelectDelete(pParse->db, $$);} +// seltablist_paren(A) ::= select(S). {A = S;} +// seltablist_paren(A) ::= seltablist(F). { +// sqlite3SrcListShiftJoinType(F); +// A = sqlite3SelectNew(pParse,0,F,0,0,0,0,0,0,0); +// } %endif SQLITE_OMIT_SUBQUERY %type dbnm {Token} @@ -486,8 +534,8 @@ dbnm(A) ::= DOT nm(X). {A = X;} %type fullname {SrcList*} -%destructor fullname {sqlite3SrcListDelete($$);} -fullname(A) ::= nm(X) dbnm(Y). {A = sqlite3SrcListAppend(0,&X,&Y);} +%destructor fullname {sqlite3SrcListDelete(pParse->db, $$);} +fullname(A) ::= nm(X) dbnm(Y). {A = sqlite3SrcListAppend(pParse->db,0,&X,&Y);} %type joinop {int} %type joinop2 {int} @@ -498,34 +546,49 @@ { X = sqlite3JoinType(pParse,&A,&B,&C); } %type on_opt {Expr*} -%destructor on_opt {sqlite3ExprDelete($$);} -on_opt(N) ::= ON expr(E). {N = E;} +%destructor on_opt {sqlite3ExprDelete(pParse->db, $$);} +on_opt(N) ::= ON expr(E). {N = E.pExpr;} on_opt(N) ::= . {N = 0;} +// Note that this block abuses the Token type just a little. If there is +// no "INDEXED BY" clause, the returned token is empty (z==0 && n==0). If +// there is an INDEXED BY clause, then the token is populated as per normal, +// with z pointing to the token data and n containing the number of bytes +// in the token. +// +// If there is a "NOT INDEXED" clause, then (z==0 && n==1), which is +// normally illegal. The sqlite3SrcListIndexedBy() function +// recognizes and interprets this as a special case. +// +%type indexed_opt {Token} +indexed_opt(A) ::= . {A.z=0; A.n=0;} +indexed_opt(A) ::= INDEXED BY nm(X). {A = X;} +indexed_opt(A) ::= NOT INDEXED. {A.z=0; A.n=1;} + %type using_opt {IdList*} -%destructor using_opt {sqlite3IdListDelete($$);} +%destructor using_opt {sqlite3IdListDelete(pParse->db, $$);} using_opt(U) ::= USING LP inscollist(L) RP. {U = L;} using_opt(U) ::= . {U = 0;} %type orderby_opt {ExprList*} -%destructor orderby_opt {sqlite3ExprListDelete($$);} +%destructor orderby_opt {sqlite3ExprListDelete(pParse->db, $$);} %type sortlist {ExprList*} -%destructor sortlist {sqlite3ExprListDelete($$);} +%destructor sortlist {sqlite3ExprListDelete(pParse->db, $$);} %type sortitem {Expr*} -%destructor sortitem {sqlite3ExprDelete($$);} +%destructor sortitem {sqlite3ExprDelete(pParse->db, $$);} orderby_opt(A) ::= . {A = 0;} orderby_opt(A) ::= ORDER BY sortlist(X). {A = X;} sortlist(A) ::= sortlist(X) COMMA sortitem(Y) sortorder(Z). { - A = sqlite3ExprListAppend(X,Y,0); - if( A ) A->a[A->nExpr-1].sortOrder = Z; + A = sqlite3ExprListAppend(pParse,X,Y); + if( A ) A->a[A->nExpr-1].sortOrder = (u8)Z; } sortlist(A) ::= sortitem(Y) sortorder(Z). { - A = sqlite3ExprListAppend(0,Y,0); - if( A && A->a ) A->a[0].sortOrder = Z; + A = sqlite3ExprListAppend(pParse,0,Y); + if( A && ALWAYS(A->a) ) A->a[0].sortOrder = (u8)Z; } -sortitem(A) ::= expr(X). {A = X;} +sortitem(A) ::= expr(X). {A = X.pExpr;} %type sortorder {int} @@ -534,14 +597,14 @@ sortorder(A) ::= . {A = SQLITE_SO_ASC;} %type groupby_opt {ExprList*} -%destructor groupby_opt {sqlite3ExprListDelete($$);} +%destructor groupby_opt {sqlite3ExprListDelete(pParse->db, $$);} groupby_opt(A) ::= . {A = 0;} groupby_opt(A) ::= GROUP BY nexprlist(X). {A = X;} %type having_opt {Expr*} -%destructor having_opt {sqlite3ExprDelete($$);} +%destructor having_opt {sqlite3ExprDelete(pParse->db, $$);} having_opt(A) ::= . {A = 0;} -having_opt(A) ::= HAVING expr(X). {A = X;} +having_opt(A) ::= HAVING expr(X). {A = X.pExpr;} %type limit_opt {struct LimitVal} @@ -553,39 +616,68 @@ // except as a transient. So there is never anything to destroy. // //%destructor limit_opt { -// sqlite3ExprDelete($$.pLimit); -// sqlite3ExprDelete($$.pOffset); +// sqlite3ExprDelete(pParse->db, $$.pLimit); +// sqlite3ExprDelete(pParse->db, $$.pOffset); //} -limit_opt(A) ::= . {A.pLimit = 0; A.pOffset = 0;} -limit_opt(A) ::= LIMIT expr(X). {A.pLimit = X; A.pOffset = 0;} +limit_opt(A) ::= . {A.pLimit = 0; A.pOffset = 0;} +limit_opt(A) ::= LIMIT expr(X). {A.pLimit = X.pExpr; A.pOffset = 0;} limit_opt(A) ::= LIMIT expr(X) OFFSET expr(Y). - {A.pLimit = X; A.pOffset = Y;} + {A.pLimit = X.pExpr; A.pOffset = Y.pExpr;} limit_opt(A) ::= LIMIT expr(X) COMMA expr(Y). - {A.pOffset = X; A.pLimit = Y;} + {A.pOffset = X.pExpr; A.pLimit = Y.pExpr;} /////////////////////////// The DELETE statement ///////////////////////////// // -cmd ::= DELETE FROM fullname(X) where_opt(Y). {sqlite3DeleteFrom(pParse,X,Y);} +%ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT +cmd ::= DELETE FROM fullname(X) indexed_opt(I) where_opt(W) + orderby_opt(O) limit_opt(L). { + sqlite3SrcListIndexedBy(pParse, X, &I); + W = sqlite3LimitWhere(pParse, X, W, O, L.pLimit, L.pOffset, "DELETE"); + sqlite3DeleteFrom(pParse,X,W); +} +%endif +%ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT +cmd ::= DELETE FROM fullname(X) indexed_opt(I) where_opt(W). { + sqlite3SrcListIndexedBy(pParse, X, &I); + sqlite3DeleteFrom(pParse,X,W); +} +%endif %type where_opt {Expr*} -%destructor where_opt {sqlite3ExprDelete($$);} +%destructor where_opt {sqlite3ExprDelete(pParse->db, $$);} where_opt(A) ::= . {A = 0;} -where_opt(A) ::= WHERE expr(X). {A = X;} +where_opt(A) ::= WHERE expr(X). {A = X.pExpr;} ////////////////////////// The UPDATE command //////////////////////////////// // -cmd ::= UPDATE orconf(R) fullname(X) SET setlist(Y) where_opt(Z). { - sqlite3ExprListCheckLength(pParse,Y,SQLITE_MAX_COLUMN,"set list"); - sqlite3Update(pParse,X,Y,Z,R); +%ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT +cmd ::= UPDATE orconf(R) fullname(X) indexed_opt(I) SET setlist(Y) where_opt(W) orderby_opt(O) limit_opt(L). { + sqlite3SrcListIndexedBy(pParse, X, &I); + sqlite3ExprListCheckLength(pParse,Y,"set list"); + W = sqlite3LimitWhere(pParse, X, W, O, L.pLimit, L.pOffset, "UPDATE"); + sqlite3Update(pParse,X,Y,W,R); } +%endif +%ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT +cmd ::= UPDATE orconf(R) fullname(X) indexed_opt(I) SET setlist(Y) where_opt(W). { + sqlite3SrcListIndexedBy(pParse, X, &I); + sqlite3ExprListCheckLength(pParse,Y,"set list"); + sqlite3Update(pParse,X,Y,W,R); +} +%endif %type setlist {ExprList*} -%destructor setlist {sqlite3ExprListDelete($$);} +%destructor setlist {sqlite3ExprListDelete(pParse->db, $$);} -setlist(A) ::= setlist(Z) COMMA nm(X) EQ expr(Y). - {A = sqlite3ExprListAppend(Z,Y,&X);} -setlist(A) ::= nm(X) EQ expr(Y). {A = sqlite3ExprListAppend(0,Y,&X);} +setlist(A) ::= setlist(Z) COMMA nm(X) EQ expr(Y). { + A = sqlite3ExprListAppend(pParse, Z, Y.pExpr); + sqlite3ExprListSetName(pParse, A, &X, 1); +} +setlist(A) ::= nm(X) EQ expr(Y). { + A = sqlite3ExprListAppend(pParse, 0, Y.pExpr); + sqlite3ExprListSetName(pParse, A, &X, 1); +} ////////////////////////// The INSERT command ///////////////////////////////// // @@ -603,266 +695,362 @@ %type itemlist {ExprList*} -%destructor itemlist {sqlite3ExprListDelete($$);} +%destructor itemlist {sqlite3ExprListDelete(pParse->db, $$);} -itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = sqlite3ExprListAppend(X,Y,0);} -itemlist(A) ::= expr(X). {A = sqlite3ExprListAppend(0,X,0);} +itemlist(A) ::= itemlist(X) COMMA expr(Y). + {A = sqlite3ExprListAppend(pParse,X,Y.pExpr);} +itemlist(A) ::= expr(X). + {A = sqlite3ExprListAppend(pParse,0,X.pExpr);} %type inscollist_opt {IdList*} -%destructor inscollist_opt {sqlite3IdListDelete($$);} +%destructor inscollist_opt {sqlite3IdListDelete(pParse->db, $$);} %type inscollist {IdList*} -%destructor inscollist {sqlite3IdListDelete($$);} +%destructor inscollist {sqlite3IdListDelete(pParse->db, $$);} inscollist_opt(A) ::= . {A = 0;} inscollist_opt(A) ::= LP inscollist(X) RP. {A = X;} -inscollist(A) ::= inscollist(X) COMMA nm(Y). {A = sqlite3IdListAppend(X,&Y);} -inscollist(A) ::= nm(Y). {A = sqlite3IdListAppend(0,&Y);} +inscollist(A) ::= inscollist(X) COMMA nm(Y). + {A = sqlite3IdListAppend(pParse->db,X,&Y);} +inscollist(A) ::= nm(Y). + {A = sqlite3IdListAppend(pParse->db,0,&Y);} /////////////////////////// Expression Processing ///////////////////////////// // -%type expr {Expr*} -%destructor expr {sqlite3ExprDelete($$);} -%type term {Expr*} -%destructor term {sqlite3ExprDelete($$);} +%type expr {ExprSpan} +%destructor expr {sqlite3ExprDelete(pParse->db, $$.pExpr);} +%type term {ExprSpan} +%destructor term {sqlite3ExprDelete(pParse->db, $$.pExpr);} + +%include { + /* This is a utility routine used to set the ExprSpan.zStart and + ** ExprSpan.zEnd values of pOut so that the span covers the complete + ** range of text beginning with pStart and going to the end of pEnd. + */ + static void spanSet(ExprSpan *pOut, Token *pStart, Token *pEnd){ + pOut->zStart = pStart->z; + pOut->zEnd = &pEnd->z[pEnd->n]; + } + + /* Construct a new Expr object from a single identifier. Use the + ** new Expr to populate pOut. Set the span of pOut to be the identifier + ** that created the expression. + */ + static void spanExpr(ExprSpan *pOut, Parse *pParse, int op, Token *pValue){ + pOut->pExpr = sqlite3PExpr(pParse, op, 0, 0, pValue); + pOut->zStart = pValue->z; + pOut->zEnd = &pValue->z[pValue->n]; + } +} expr(A) ::= term(X). {A = X;} -expr(A) ::= LP(B) expr(X) RP(E). {A = X; sqlite3ExprSpan(A,&B,&E); } -term(A) ::= NULL(X). {A = sqlite3Expr(@X, 0, 0, &X);} -expr(A) ::= ID(X). {A = sqlite3Expr(TK_ID, 0, 0, &X);} -expr(A) ::= JOIN_KW(X). {A = sqlite3Expr(TK_ID, 0, 0, &X);} +expr(A) ::= LP(B) expr(X) RP(E). {A.pExpr = X.pExpr; spanSet(&A,&B,&E);} +term(A) ::= NULL(X). {spanExpr(&A, pParse, @X, &X);} +expr(A) ::= id(X). {spanExpr(&A, pParse, TK_ID, &X);} +expr(A) ::= JOIN_KW(X). {spanExpr(&A, pParse, TK_ID, &X);} expr(A) ::= nm(X) DOT nm(Y). { - Expr *temp1 = sqlite3Expr(TK_ID, 0, 0, &X); - Expr *temp2 = sqlite3Expr(TK_ID, 0, 0, &Y); - A = sqlite3Expr(TK_DOT, temp1, temp2, 0); + Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Y); + A.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); + spanSet(&A,&X,&Y); } expr(A) ::= nm(X) DOT nm(Y) DOT nm(Z). { - Expr *temp1 = sqlite3Expr(TK_ID, 0, 0, &X); - Expr *temp2 = sqlite3Expr(TK_ID, 0, 0, &Y); - Expr *temp3 = sqlite3Expr(TK_ID, 0, 0, &Z); - Expr *temp4 = sqlite3Expr(TK_DOT, temp2, temp3, 0); - A = sqlite3Expr(TK_DOT, temp1, temp4, 0); -} -term(A) ::= INTEGER|FLOAT|BLOB(X). {A = sqlite3Expr(@X, 0, 0, &X);} -term(A) ::= STRING(X). {A = sqlite3Expr(@X, 0, 0, &X);} -expr(A) ::= REGISTER(X). {A = sqlite3RegisterExpr(pParse, &X);} -expr(A) ::= VARIABLE(X). { - Token *pToken = &X; - Expr *pExpr = A = sqlite3Expr(TK_VARIABLE, 0, 0, pToken); - sqlite3ExprAssignVarNumber(pParse, pExpr); + Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &X); + Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Y); + Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &Z); + Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); + A.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); + spanSet(&A,&X,&Z); +} +term(A) ::= INTEGER|FLOAT|BLOB(X). {spanExpr(&A, pParse, @X, &X);} +term(A) ::= STRING(X). {spanExpr(&A, pParse, @X, &X);} +expr(A) ::= REGISTER(X). { + /* When doing a nested parse, one can include terms in an expression + ** that look like this: #1 #2 ... These terms refer to registers + ** in the virtual machine. #N is the N-th register. */ + if( pParse->nested==0 ){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &X); + A.pExpr = 0; + }else{ + A.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &X); + if( A.pExpr ) sqlite3GetInt32(&X.z[1], &A.pExpr->iTable); + } + spanSet(&A, &X, &X); } -expr(A) ::= expr(E) COLLATE id(C). { - A = sqlite3ExprSetColl(pParse, E, &C); +expr(A) ::= VARIABLE(X). { + spanExpr(&A, pParse, TK_VARIABLE, &X); + sqlite3ExprAssignVarNumber(pParse, A.pExpr); + spanSet(&A, &X, &X); +} +expr(A) ::= expr(E) COLLATE ids(C). { + A.pExpr = sqlite3ExprSetColl(pParse, E.pExpr, &C); + A.zStart = E.zStart; + A.zEnd = &C.z[C.n]; } %ifndef SQLITE_OMIT_CAST expr(A) ::= CAST(X) LP expr(E) AS typetoken(T) RP(Y). { - A = sqlite3Expr(TK_CAST, E, 0, &T); - sqlite3ExprSpan(A,&X,&Y); + A.pExpr = sqlite3PExpr(pParse, TK_CAST, E.pExpr, 0, &T); + spanSet(&A,&X,&Y); } %endif SQLITE_OMIT_CAST expr(A) ::= ID(X) LP distinct(D) exprlist(Y) RP(E). { - if( Y && Y->nExpr>SQLITE_MAX_FUNCTION_ARG ){ + if( Y && Y->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ sqlite3ErrorMsg(pParse, "too many arguments on function %T", &X); } - A = sqlite3ExprFunction(Y, &X); - sqlite3ExprSpan(A,&X,&E); - if( D && A ){ - A->flags |= EP_Distinct; + A.pExpr = sqlite3ExprFunction(pParse, Y, &X); + spanSet(&A,&X,&E); + if( D && A.pExpr ){ + A.pExpr->flags |= EP_Distinct; } } expr(A) ::= ID(X) LP STAR RP(E). { - A = sqlite3ExprFunction(0, &X); - sqlite3ExprSpan(A,&X,&E); + A.pExpr = sqlite3ExprFunction(pParse, 0, &X); + spanSet(&A,&X,&E); } term(A) ::= CTIME_KW(OP). { /* The CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP values are ** treated as functions that return constants */ - A = sqlite3ExprFunction(0,&OP); - if( A ){ - A->op = TK_CONST_FUNC; - A->span = OP; + A.pExpr = sqlite3ExprFunction(pParse, 0,&OP); + if( A.pExpr ){ + A.pExpr->op = TK_CONST_FUNC; + } + spanSet(&A, &OP, &OP); +} + +%include { + /* This routine constructs a binary expression node out of two ExprSpan + ** objects and uses the result to populate a new ExprSpan object. + */ + static void spanBinaryExpr( + ExprSpan *pOut, /* Write the result here */ + Parse *pParse, /* The parsing context. Errors accumulate here */ + int op, /* The binary operation */ + ExprSpan *pLeft, /* The left operand */ + ExprSpan *pRight /* The right operand */ + ){ + pOut->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr, 0); + pOut->zStart = pLeft->zStart; + pOut->zEnd = pRight->zEnd; } } -expr(A) ::= expr(X) AND(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) OR(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) LT|GT|GE|LE(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) EQ|NE(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} + +expr(A) ::= expr(X) AND(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) OR(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) LT|GT|GE|LE(OP) expr(Y). + {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) EQ|NE(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) BITAND|BITOR|LSHIFT|RSHIFT(OP) expr(Y). - {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) PLUS|MINUS(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) STAR|SLASH|REM(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} -expr(A) ::= expr(X) CONCAT(OP) expr(Y). {A = sqlite3Expr(@OP, X, Y, 0);} + {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) PLUS|MINUS(OP) expr(Y). + {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) STAR|SLASH|REM(OP) expr(Y). + {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} +expr(A) ::= expr(X) CONCAT(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} %type likeop {struct LikeOp} likeop(A) ::= LIKE_KW(X). {A.eOperator = X; A.not = 0;} likeop(A) ::= NOT LIKE_KW(X). {A.eOperator = X; A.not = 1;} likeop(A) ::= MATCH(X). {A.eOperator = X; A.not = 0;} likeop(A) ::= NOT MATCH(X). {A.eOperator = X; A.not = 1;} -%type escape {Expr*} -%destructor escape {sqlite3ExprDelete($$);} +%type escape {ExprSpan} +%destructor escape {sqlite3ExprDelete(pParse->db, $$.pExpr);} escape(X) ::= ESCAPE expr(A). [ESCAPE] {X = A;} -escape(X) ::= . [ESCAPE] {X = 0;} +escape(X) ::= . [ESCAPE] {memset(&X,0,sizeof(X));} expr(A) ::= expr(X) likeop(OP) expr(Y) escape(E). [LIKE_KW] { ExprList *pList; - pList = sqlite3ExprListAppend(0, Y, 0); - pList = sqlite3ExprListAppend(pList, X, 0); - if( E ){ - pList = sqlite3ExprListAppend(pList, E, 0); - } - A = sqlite3ExprFunction(pList, &OP.eOperator); - if( OP.not ) A = sqlite3Expr(TK_NOT, A, 0, 0); - sqlite3ExprSpan(A, &X->span, &Y->span); - if( A ) A->flags |= EP_InfixFunc; -} - -expr(A) ::= expr(X) ISNULL|NOTNULL(E). { - A = sqlite3Expr(@E, X, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); -} -expr(A) ::= expr(X) IS NULL(E). { - A = sqlite3Expr(TK_ISNULL, X, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); -} -expr(A) ::= expr(X) NOT NULL(E). { - A = sqlite3Expr(TK_NOTNULL, X, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); -} -expr(A) ::= expr(X) IS NOT NULL(E). { - A = sqlite3Expr(TK_NOTNULL, X, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); -} -expr(A) ::= NOT|BITNOT(B) expr(X). { - A = sqlite3Expr(@B, X, 0, 0); - sqlite3ExprSpan(A,&B,&X->span); -} -expr(A) ::= MINUS(B) expr(X). [UMINUS] { - A = sqlite3Expr(TK_UMINUS, X, 0, 0); - sqlite3ExprSpan(A,&B,&X->span); -} -expr(A) ::= PLUS(B) expr(X). [UPLUS] { - A = sqlite3Expr(TK_UPLUS, X, 0, 0); - sqlite3ExprSpan(A,&B,&X->span); + pList = sqlite3ExprListAppend(pParse,0, Y.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, X.pExpr); + if( E.pExpr ){ + pList = sqlite3ExprListAppend(pParse,pList, E.pExpr); + } + A.pExpr = sqlite3ExprFunction(pParse, pList, &OP.eOperator); + if( OP.not ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = X.zStart; + A.zEnd = Y.zEnd; + if( A.pExpr ) A.pExpr->flags |= EP_InfixFunc; } + +%include { + /* Construct an expression node for a unary postfix operator + */ + static void spanUnaryPostfix( + ExprSpan *pOut, /* Write the new expression node here */ + Parse *pParse, /* Parsing context to record errors */ + int op, /* The operator */ + ExprSpan *pOperand, /* The operand */ + Token *pPostOp /* The operand token for setting the span */ + ){ + pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); + pOut->zStart = pOperand->zStart; + pOut->zEnd = &pPostOp->z[pPostOp->n]; + } +} + +expr(A) ::= expr(X) ISNULL|NOTNULL(E). {spanUnaryPostfix(&A,pParse,@E,&X,&E);} +expr(A) ::= expr(X) IS NULL(E). {spanUnaryPostfix(&A,pParse,TK_ISNULL,&X,&E);} +expr(A) ::= expr(X) NOT NULL(E). {spanUnaryPostfix(&A,pParse,TK_NOTNULL,&X,&E);} +expr(A) ::= expr(X) IS NOT NULL(E). + {spanUnaryPostfix(&A,pParse,TK_NOTNULL,&X,&E);} + +%include { + /* Construct an expression node for a unary prefix operator + */ + static void spanUnaryPrefix( + ExprSpan *pOut, /* Write the new expression node here */ + Parse *pParse, /* Parsing context to record errors */ + int op, /* The operator */ + ExprSpan *pOperand, /* The operand */ + Token *pPreOp /* The operand token for setting the span */ + ){ + pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); + pOut->zStart = pPreOp->z; + pOut->zEnd = pOperand->zEnd; + } +} + + + +expr(A) ::= NOT(B) expr(X). {spanUnaryPrefix(&A,pParse,@B,&X,&B);} +expr(A) ::= BITNOT(B) expr(X). {spanUnaryPrefix(&A,pParse,@B,&X,&B);} +expr(A) ::= MINUS(B) expr(X). [UMINUS] + {spanUnaryPrefix(&A,pParse,TK_UMINUS,&X,&B);} +expr(A) ::= PLUS(B) expr(X). [UPLUS] + {spanUnaryPrefix(&A,pParse,TK_UPLUS,&X,&B);} + %type between_op {int} between_op(A) ::= BETWEEN. {A = 0;} between_op(A) ::= NOT BETWEEN. {A = 1;} expr(A) ::= expr(W) between_op(N) expr(X) AND expr(Y). [BETWEEN] { - ExprList *pList = sqlite3ExprListAppend(0, X, 0); - pList = sqlite3ExprListAppend(pList, Y, 0); - A = sqlite3Expr(TK_BETWEEN, W, 0, 0); - if( A ){ - A->pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, X.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, Y.pExpr); + A.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, W.pExpr, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pList = pList; }else{ - sqlite3ExprListDelete(pList); + sqlite3ExprListDelete(pParse->db, pList); } - if( N ) A = sqlite3Expr(TK_NOT, A, 0, 0); - sqlite3ExprSpan(A,&W->span,&Y->span); + if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = W.zStart; + A.zEnd = Y.zEnd; } %ifndef SQLITE_OMIT_SUBQUERY %type in_op {int} in_op(A) ::= IN. {A = 0;} in_op(A) ::= NOT IN. {A = 1;} expr(A) ::= expr(X) in_op(N) LP exprlist(Y) RP(E). [IN] { - A = sqlite3Expr(TK_IN, X, 0, 0); - if( A ){ - A->pList = Y; - sqlite3ExprSetHeight(A); + A.pExpr = sqlite3PExpr(pParse, TK_IN, X.pExpr, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pList = Y; + sqlite3ExprSetHeight(pParse, A.pExpr); }else{ - sqlite3ExprListDelete(Y); + sqlite3ExprListDelete(pParse->db, Y); } - if( N ) A = sqlite3Expr(TK_NOT, A, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); + if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = X.zStart; + A.zEnd = &E.z[E.n]; } expr(A) ::= LP(B) select(X) RP(E). { - A = sqlite3Expr(TK_SELECT, 0, 0, 0); - if( A ){ - A->pSelect = X; - sqlite3ExprSetHeight(A); + A.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pSelect = X; + ExprSetProperty(A.pExpr, EP_xIsSelect); + sqlite3ExprSetHeight(pParse, A.pExpr); }else{ - sqlite3SelectDelete(X); + sqlite3SelectDelete(pParse->db, X); } - sqlite3ExprSpan(A,&B,&E); + A.zStart = B.z; + A.zEnd = &E.z[E.n]; } expr(A) ::= expr(X) in_op(N) LP select(Y) RP(E). [IN] { - A = sqlite3Expr(TK_IN, X, 0, 0); - if( A ){ - A->pSelect = Y; - sqlite3ExprSetHeight(A); + A.pExpr = sqlite3PExpr(pParse, TK_IN, X.pExpr, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pSelect = Y; + ExprSetProperty(A.pExpr, EP_xIsSelect); + sqlite3ExprSetHeight(pParse, A.pExpr); }else{ - sqlite3SelectDelete(Y); + sqlite3SelectDelete(pParse->db, Y); } - if( N ) A = sqlite3Expr(TK_NOT, A, 0, 0); - sqlite3ExprSpan(A,&X->span,&E); + if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = X.zStart; + A.zEnd = &E.z[E.n]; } expr(A) ::= expr(X) in_op(N) nm(Y) dbnm(Z). [IN] { - SrcList *pSrc = sqlite3SrcListAppend(0,&Y,&Z); - A = sqlite3Expr(TK_IN, X, 0, 0); - if( A ){ - A->pSelect = sqlite3SelectNew(0,pSrc,0,0,0,0,0,0,0); - sqlite3ExprSetHeight(A); + SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&Y,&Z); + A.pExpr = sqlite3PExpr(pParse, TK_IN, X.pExpr, 0, 0); + if( A.pExpr ){ + A.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); + ExprSetProperty(A.pExpr, EP_xIsSelect); + sqlite3ExprSetHeight(pParse, A.pExpr); }else{ - sqlite3SrcListDelete(pSrc); + sqlite3SrcListDelete(pParse->db, pSrc); } - if( N ) A = sqlite3Expr(TK_NOT, A, 0, 0); - sqlite3ExprSpan(A,&X->span,Z.z?&Z:&Y); + if( N ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); + A.zStart = X.zStart; + A.zEnd = Z.z ? &Z.z[Z.n] : &Y.z[Y.n]; } expr(A) ::= EXISTS(B) LP select(Y) RP(E). { - Expr *p = A = sqlite3Expr(TK_EXISTS, 0, 0, 0); + Expr *p = A.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); if( p ){ - p->pSelect = Y; - sqlite3ExprSpan(p,&B,&E); - sqlite3ExprSetHeight(A); + p->x.pSelect = Y; + ExprSetProperty(p, EP_xIsSelect); + sqlite3ExprSetHeight(pParse, p); }else{ - sqlite3SelectDelete(Y); + sqlite3SelectDelete(pParse->db, Y); } + A.zStart = B.z; + A.zEnd = &E.z[E.n]; } %endif SQLITE_OMIT_SUBQUERY /* CASE expressions */ expr(A) ::= CASE(C) case_operand(X) case_exprlist(Y) case_else(Z) END(E). { - A = sqlite3Expr(TK_CASE, X, Z, 0); - if( A ){ - A->pList = Y; - sqlite3ExprSetHeight(A); + A.pExpr = sqlite3PExpr(pParse, TK_CASE, X, Z, 0); + if( A.pExpr ){ + A.pExpr->x.pList = Y; + sqlite3ExprSetHeight(pParse, A.pExpr); }else{ - sqlite3ExprListDelete(Y); + sqlite3ExprListDelete(pParse->db, Y); } - sqlite3ExprSpan(A, &C, &E); + A.zStart = C.z; + A.zEnd = &E.z[E.n]; } %type case_exprlist {ExprList*} -%destructor case_exprlist {sqlite3ExprListDelete($$);} +%destructor case_exprlist {sqlite3ExprListDelete(pParse->db, $$);} case_exprlist(A) ::= case_exprlist(X) WHEN expr(Y) THEN expr(Z). { - A = sqlite3ExprListAppend(X, Y, 0); - A = sqlite3ExprListAppend(A, Z, 0); + A = sqlite3ExprListAppend(pParse,X, Y.pExpr); + A = sqlite3ExprListAppend(pParse,A, Z.pExpr); } case_exprlist(A) ::= WHEN expr(Y) THEN expr(Z). { - A = sqlite3ExprListAppend(0, Y, 0); - A = sqlite3ExprListAppend(A, Z, 0); + A = sqlite3ExprListAppend(pParse,0, Y.pExpr); + A = sqlite3ExprListAppend(pParse,A, Z.pExpr); } %type case_else {Expr*} -%destructor case_else {sqlite3ExprDelete($$);} -case_else(A) ::= ELSE expr(X). {A = X;} +%destructor case_else {sqlite3ExprDelete(pParse->db, $$);} +case_else(A) ::= ELSE expr(X). {A = X.pExpr;} case_else(A) ::= . {A = 0;} %type case_operand {Expr*} -%destructor case_operand {sqlite3ExprDelete($$);} -case_operand(A) ::= expr(X). {A = X;} +%destructor case_operand {sqlite3ExprDelete(pParse->db, $$);} +case_operand(A) ::= expr(X). {A = X.pExpr;} case_operand(A) ::= . {A = 0;} %type exprlist {ExprList*} -%destructor exprlist {sqlite3ExprListDelete($$);} +%destructor exprlist {sqlite3ExprListDelete(pParse->db, $$);} %type nexprlist {ExprList*} -%destructor nexprlist {sqlite3ExprListDelete($$);} +%destructor nexprlist {sqlite3ExprListDelete(pParse->db, $$);} exprlist(A) ::= nexprlist(X). {A = X;} exprlist(A) ::= . {A = 0;} -nexprlist(A) ::= nexprlist(X) COMMA expr(Y). {A = sqlite3ExprListAppend(X,Y,0);} -nexprlist(A) ::= expr(Y). {A = sqlite3ExprListAppend(0,Y,0);} +nexprlist(A) ::= nexprlist(X) COMMA expr(Y). + {A = sqlite3ExprListAppend(pParse,X,Y.pExpr);} +nexprlist(A) ::= expr(Y). + {A = sqlite3ExprListAppend(pParse,0,Y.pExpr);} ///////////////////////////// The CREATE INDEX command /////////////////////// // -cmd ::= CREATE(S) uniqueflag(U) INDEX ifnotexists(NE) nm(X) dbnm(D) +cmd ::= createkw(S) uniqueflag(U) INDEX ifnotexists(NE) nm(X) dbnm(D) ON nm(Y) LP idxlist(Z) RP(E). { - sqlite3CreateIndex(pParse, &X, &D, sqlite3SrcListAppend(0,&Y,0), Z, U, + sqlite3CreateIndex(pParse, &X, &D, + sqlite3SrcListAppend(pParse->db,0,&Y,0), Z, U, &S, &E, SQLITE_SO_ASC, NE); } @@ -871,38 +1059,38 @@ uniqueflag(A) ::= . {A = OE_None;} %type idxlist {ExprList*} -%destructor idxlist {sqlite3ExprListDelete($$);} +%destructor idxlist {sqlite3ExprListDelete(pParse->db, $$);} %type idxlist_opt {ExprList*} -%destructor idxlist_opt {sqlite3ExprListDelete($$);} -%type idxitem {Token} +%destructor idxlist_opt {sqlite3ExprListDelete(pParse->db, $$);} idxlist_opt(A) ::= . {A = 0;} idxlist_opt(A) ::= LP idxlist(X) RP. {A = X;} -idxlist(A) ::= idxlist(X) COMMA idxitem(Y) collate(C) sortorder(Z). { +idxlist(A) ::= idxlist(X) COMMA nm(Y) collate(C) sortorder(Z). { Expr *p = 0; if( C.n>0 ){ - p = sqlite3Expr(TK_COLUMN, 0, 0, 0); - if( p ) p->pColl = sqlite3LocateCollSeq(pParse, (char*)C.z, C.n); + p = sqlite3Expr(pParse->db, TK_COLUMN, 0); + sqlite3ExprSetColl(pParse, p, &C); } - A = sqlite3ExprListAppend(X, p, &Y); - sqlite3ExprListCheckLength(pParse, A, SQLITE_MAX_COLUMN, "index"); - if( A ) A->a[A->nExpr-1].sortOrder = Z; + A = sqlite3ExprListAppend(pParse,X, p); + sqlite3ExprListSetName(pParse,A,&Y,1); + sqlite3ExprListCheckLength(pParse, A, "index"); + if( A ) A->a[A->nExpr-1].sortOrder = (u8)Z; } -idxlist(A) ::= idxitem(Y) collate(C) sortorder(Z). { +idxlist(A) ::= nm(Y) collate(C) sortorder(Z). { Expr *p = 0; if( C.n>0 ){ - p = sqlite3Expr(TK_COLUMN, 0, 0, 0); - if( p ) p->pColl = sqlite3LocateCollSeq(pParse, (char*)C.z, C.n); + p = sqlite3PExpr(pParse, TK_COLUMN, 0, 0, 0); + sqlite3ExprSetColl(pParse, p, &C); } - A = sqlite3ExprListAppend(0, p, &Y); - sqlite3ExprListCheckLength(pParse, A, SQLITE_MAX_COLUMN, "index"); - if( A ) A->a[A->nExpr-1].sortOrder = Z; + A = sqlite3ExprListAppend(pParse,0, p); + sqlite3ExprListSetName(pParse, A, &Y, 1); + sqlite3ExprListCheckLength(pParse, A, "index"); + if( A ) A->a[A->nExpr-1].sortOrder = (u8)Z; } -idxitem(A) ::= nm(X). {A = X;} %type collate {Token} -collate(C) ::= . {C.z = 0; C.n = 0;} -collate(C) ::= COLLATE id(X). {C = X;} +collate(C) ::= . {C.z = 0; C.n = 0;} +collate(C) ::= COLLATE ids(X). {C = X;} ///////////////////////////// The DROP INDEX command ///////////////////////// @@ -921,15 +1109,19 @@ ///////////////////////////// The PRAGMA command ///////////////////////////// // %ifndef SQLITE_OMIT_PRAGMA -cmd ::= PRAGMA nm(X) dbnm(Z) EQ nmnum(Y). {sqlite3Pragma(pParse,&X,&Z,&Y,0);} -cmd ::= PRAGMA nm(X) dbnm(Z) EQ ON(Y). {sqlite3Pragma(pParse,&X,&Z,&Y,0);} -cmd ::= PRAGMA nm(X) dbnm(Z) EQ minus_num(Y). { - sqlite3Pragma(pParse,&X,&Z,&Y,1); -} +cmd ::= PRAGMA nm(X) dbnm(Z). {sqlite3Pragma(pParse,&X,&Z,0,0);} +cmd ::= PRAGMA nm(X) dbnm(Z) EQ nmnum(Y). {sqlite3Pragma(pParse,&X,&Z,&Y,0);} cmd ::= PRAGMA nm(X) dbnm(Z) LP nmnum(Y) RP. {sqlite3Pragma(pParse,&X,&Z,&Y,0);} -cmd ::= PRAGMA nm(X) dbnm(Z). {sqlite3Pragma(pParse,&X,&Z,0,0);} +cmd ::= PRAGMA nm(X) dbnm(Z) EQ minus_num(Y). + {sqlite3Pragma(pParse,&X,&Z,&Y,1);} +cmd ::= PRAGMA nm(X) dbnm(Z) LP minus_num(Y) RP. + {sqlite3Pragma(pParse,&X,&Z,&Y,1);} + nmnum(A) ::= plus_num(X). {A = X;} nmnum(A) ::= nm(X). {A = X;} +nmnum(A) ::= ON(X). {A = X;} +nmnum(A) ::= DELETE(X). {A = X;} +nmnum(A) ::= DEFAULT(X). {A = X;} %endif SQLITE_OMIT_PRAGMA plus_num(A) ::= plus_opt number(X). {A = X;} minus_num(A) ::= MINUS number(X). {A = X;} @@ -941,10 +1133,10 @@ %ifndef SQLITE_OMIT_TRIGGER -cmd ::= CREATE trigger_decl(A) BEGIN trigger_cmd_list(S) END(Z). { +cmd ::= createkw trigger_decl(A) BEGIN trigger_cmd_list(S) END(Z). { Token all; all.z = A.z; - all.n = (Z.z - A.z) + Z.n; + all.n = (int)(Z.z - A.z) + Z.n; sqlite3FinishTrigger(pParse, S, &all); } @@ -955,14 +1147,14 @@ A = (Z.n==0?B:Z); } -%type trigger_time {int} +%type trigger_time {int} trigger_time(A) ::= BEFORE. { A = TK_BEFORE; } trigger_time(A) ::= AFTER. { A = TK_AFTER; } trigger_time(A) ::= INSTEAD OF. { A = TK_INSTEAD;} trigger_time(A) ::= . { A = TK_BEFORE; } %type trigger_event {struct TrigEvent} -%destructor trigger_event {sqlite3IdListDelete($$.b);} +%destructor trigger_event {sqlite3IdListDelete(pParse->db, $$.b);} trigger_event(A) ::= DELETE|INSERT(OP). {A.a = @OP; A.b = 0;} trigger_event(A) ::= UPDATE(OP). {A.a = @OP; A.b = 0;} trigger_event(A) ::= UPDATE OF inscollist(X). {A.a = TK_UPDATE; A.b = X;} @@ -971,58 +1163,61 @@ foreach_clause ::= FOR EACH ROW. %type when_clause {Expr*} -%destructor when_clause {sqlite3ExprDelete($$);} +%destructor when_clause {sqlite3ExprDelete(pParse->db, $$);} when_clause(A) ::= . { A = 0; } -when_clause(A) ::= WHEN expr(X). { A = X; } +when_clause(A) ::= WHEN expr(X). { A = X.pExpr; } %type trigger_cmd_list {TriggerStep*} -%destructor trigger_cmd_list {sqlite3DeleteTriggerStep($$);} +%destructor trigger_cmd_list {sqlite3DeleteTriggerStep(pParse->db, $$);} trigger_cmd_list(A) ::= trigger_cmd_list(Y) trigger_cmd(X) SEMI. { - if( Y ){ - Y->pLast->pNext = X; - }else{ - Y = X; - } + assert( Y!=0 ); + Y->pLast->pNext = X; Y->pLast = X; A = Y; } -trigger_cmd_list(A) ::= . { A = 0; } +trigger_cmd_list(A) ::= trigger_cmd(X) SEMI. { + assert( X!=0 ); + X->pLast = X; + A = X; +} %type trigger_cmd {TriggerStep*} -%destructor trigger_cmd {sqlite3DeleteTriggerStep($$);} +%destructor trigger_cmd {sqlite3DeleteTriggerStep(pParse->db, $$);} // UPDATE trigger_cmd(A) ::= UPDATE orconf(R) nm(X) SET setlist(Y) where_opt(Z). - { A = sqlite3TriggerUpdateStep(&X, Y, Z, R); } + { A = sqlite3TriggerUpdateStep(pParse->db, &X, Y, Z, R); } // INSERT trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F) VALUES LP itemlist(Y) RP. - {A = sqlite3TriggerInsertStep(&X, F, Y, 0, R);} + {A = sqlite3TriggerInsertStep(pParse->db, &X, F, Y, 0, R);} trigger_cmd(A) ::= insert_cmd(R) INTO nm(X) inscollist_opt(F) select(S). - {A = sqlite3TriggerInsertStep(&X, F, 0, S, R);} + {A = sqlite3TriggerInsertStep(pParse->db, &X, F, 0, S, R);} // DELETE trigger_cmd(A) ::= DELETE FROM nm(X) where_opt(Y). - {A = sqlite3TriggerDeleteStep(&X, Y);} + {A = sqlite3TriggerDeleteStep(pParse->db, &X, Y);} // SELECT -trigger_cmd(A) ::= select(X). {A = sqlite3TriggerSelectStep(X); } +trigger_cmd(A) ::= select(X). {A = sqlite3TriggerSelectStep(pParse->db, X); } // The special RAISE expression that may occur in trigger programs expr(A) ::= RAISE(X) LP IGNORE RP(Y). { - A = sqlite3Expr(TK_RAISE, 0, 0, 0); - if( A ){ - A->iColumn = OE_Ignore; - sqlite3ExprSpan(A, &X, &Y); + A.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); + if( A.pExpr ){ + A.pExpr->affinity = OE_Ignore; } + A.zStart = X.z; + A.zEnd = &Y.z[Y.n]; } expr(A) ::= RAISE(X) LP raisetype(T) COMMA nm(Z) RP(Y). { - A = sqlite3Expr(TK_RAISE, 0, 0, &Z); - if( A ) { - A->iColumn = T; - sqlite3ExprSpan(A, &X, &Y); + A.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &Z); + if( A.pExpr ) { + A.pExpr->affinity = (char)T; } + A.zStart = X.z; + A.zEnd = &Y.z[Y.n]; } %endif !SQLITE_OMIT_TRIGGER @@ -1042,16 +1237,16 @@ //////////////////////// ATTACH DATABASE file AS name ///////////////////////// %ifndef SQLITE_OMIT_ATTACH cmd ::= ATTACH database_kw_opt expr(F) AS expr(D) key_opt(K). { - sqlite3Attach(pParse, F, D, K); + sqlite3Attach(pParse, F.pExpr, D.pExpr, K); } cmd ::= DETACH database_kw_opt expr(D). { - sqlite3Detach(pParse, D); + sqlite3Detach(pParse, D.pExpr); } -%type key_opt {Expr *} -%destructor key_opt {sqlite3ExprDelete($$);} +%type key_opt {Expr*} +%destructor key_opt {sqlite3ExprDelete(pParse->db, $$);} key_opt(A) ::= . { A = 0; } -key_opt(A) ::= KEY expr(X). { A = X; } +key_opt(A) ::= KEY expr(X). { A = X.pExpr; } database_kw_opt ::= DATABASE. database_kw_opt ::= . @@ -1078,6 +1273,7 @@ sqlite3AlterFinishAddColumn(pParse, &Y); } add_column_fullname ::= fullname(X). { + pParse->db->lookaside.bEnabled = 0; sqlite3AlterBeginAddColumn(pParse, X); } kwcolumn_opt ::= . @@ -1088,7 +1284,7 @@ %ifndef SQLITE_OMIT_VIRTUALTABLE cmd ::= create_vtab. {sqlite3VtabFinishParse(pParse,0);} cmd ::= create_vtab LP vtabarglist RP(X). {sqlite3VtabFinishParse(pParse,&X);} -create_vtab ::= CREATE VIRTUAL TABLE nm(X) dbnm(Y) USING nm(Z). { +create_vtab ::= createkw VIRTUAL TABLE nm(X) dbnm(Y) USING nm(Z). { sqlite3VtabBeginParse(pParse, &X, &Y, &Z); } vtabarglist ::= vtabarg. @@ -1099,5 +1295,6 @@ vtabargtoken ::= lp anylist RP(X). {sqlite3VtabArgExtend(pParse,&X);} lp ::= LP(X). {sqlite3VtabArgExtend(pParse,&X);} anylist ::= . -anylist ::= anylist ANY(X). {sqlite3VtabArgExtend(pParse,&X);} +anylist ::= anylist LP anylist RP. +anylist ::= anylist ANY. %endif SQLITE_OMIT_VIRTUALTABLE diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/patch1 /tmp/3ARg2Grji7/sqlite3-3.6.16/src/patch1 --- sqlite3-3.4.2/src/patch1 2007-08-11 01:15:13.000000000 +0100 +++ sqlite3-3.6.16/src/patch1 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -diff -u -r1.354 pager.c ---- pager.c 10 Aug 2007 23:56:36 -0000 1.354 -+++ pager.c 11 Aug 2007 00:14:18 -0000 -@@ -1455,10 +1451,15 @@ - } - - /* If nRec is 0 and this rollback is of a transaction created by this -- ** process. In this case the rest of the journal file consists of -- ** journalled copies of pages that need to be read back into the cache. -+ ** process and if this is the final header in the journal, then it means -+ ** that this part of the journal was being filled but has not yet been -+ ** synced to disk. Compute the number of pages based on the remaining -+ ** size of the file. -+ ** -+ ** The third term of the test was added to fix ticket #2565. - */ -- if( nRec==0 && !isHot ){ -+ if( nRec==0 && !isHot && -+ pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ - nRec = (szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager); - } - -@@ -2629,7 +2630,7 @@ - ** very slow operation, so we work hard to avoid it. But sometimes - ** it can't be helped. - */ -- if( pPg==0 && pPager->pFirst && pPager->nRec && syncOk && !MEMDB){ -+ if( pPg==0 && pPager->pFirst && syncOk && !MEMDB){ - int rc = syncJournal(pPager); - if( rc!=0 ){ - return rc; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pcache1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pcache1.c --- sqlite3-3.4.2/src/pcache1.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/pcache1.c 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,769 @@ +/* +** 2008 November 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements the default page cache implementation (the +** sqlite3_pcache interface). It also contains part of the implementation +** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. +** If the default page cache implementation is overriden, then neither of +** these two features are available. +** +** @(#) $Id: pcache1.c,v 1.17 2009/06/09 18:58:53 shane Exp $ +*/ + +#include "sqliteInt.h" + +typedef struct PCache1 PCache1; +typedef struct PgHdr1 PgHdr1; +typedef struct PgFreeslot PgFreeslot; + +/* Pointers to structures of this type are cast and returned as +** opaque sqlite3_pcache* handles +*/ +struct PCache1 { + /* Cache configuration parameters. Page size (szPage) and the purgeable + ** flag (bPurgeable) are set when the cache is created. nMax may be + ** modified at any time by a call to the pcache1CacheSize() method. + ** The global mutex must be held when accessing nMax. + */ + int szPage; /* Size of allocated pages in bytes */ + int bPurgeable; /* True if cache is purgeable */ + unsigned int nMin; /* Minimum number of pages reserved */ + unsigned int nMax; /* Configured "cache_size" value */ + + /* Hash table of all pages. The following variables may only be accessed + ** when the accessor is holding the global mutex (see pcache1EnterMutex() + ** and pcache1LeaveMutex()). + */ + unsigned int nRecyclable; /* Number of pages in the LRU list */ + unsigned int nPage; /* Total number of pages in apHash */ + unsigned int nHash; /* Number of slots in apHash[] */ + PgHdr1 **apHash; /* Hash table for fast lookup by key */ + + unsigned int iMaxKey; /* Largest key seen since xTruncate() */ +}; + +/* +** Each cache entry is represented by an instance of the following +** structure. A buffer of PgHdr1.pCache->szPage bytes is allocated +** directly before this structure in memory (see the PGHDR1_TO_PAGE() +** macro below). +*/ +struct PgHdr1 { + unsigned int iKey; /* Key value (page number) */ + PgHdr1 *pNext; /* Next in hash table chain */ + PCache1 *pCache; /* Cache that currently owns this page */ + PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ + PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ +}; + +/* +** Free slots in the allocator used to divide up the buffer provided using +** the SQLITE_CONFIG_PAGECACHE mechanism. +*/ +struct PgFreeslot { + PgFreeslot *pNext; /* Next free slot */ +}; + +/* +** Global data used by this cache. +*/ +static SQLITE_WSD struct PCacheGlobal { + sqlite3_mutex *mutex; /* static mutex MUTEX_STATIC_LRU */ + + int nMaxPage; /* Sum of nMaxPage for purgeable caches */ + int nMinPage; /* Sum of nMinPage for purgeable caches */ + int nCurrentPage; /* Number of purgeable pages allocated */ + PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */ + + /* Variables related to SQLITE_CONFIG_PAGECACHE settings. */ + int szSlot; /* Size of each free slot */ + void *pStart, *pEnd; /* Bounds of pagecache malloc range */ + PgFreeslot *pFree; /* Free page blocks */ + int isInit; /* True if initialized */ +} pcache1_g; + +/* +** All code in this file should access the global structure above via the +** alias "pcache1". This ensures that the WSD emulation is used when +** compiling for systems that do not support real WSD. +*/ +#define pcache1 (GLOBAL(struct PCacheGlobal, pcache1_g)) + +/* +** When a PgHdr1 structure is allocated, the associated PCache1.szPage +** bytes of data are located directly before it in memory (i.e. the total +** size of the allocation is sizeof(PgHdr1)+PCache1.szPage byte). The +** PGHDR1_TO_PAGE() macro takes a pointer to a PgHdr1 structure as +** an argument and returns a pointer to the associated block of szPage +** bytes. The PAGE_TO_PGHDR1() macro does the opposite: its argument is +** a pointer to a block of szPage bytes of data and the return value is +** a pointer to the associated PgHdr1 structure. +** +** assert( PGHDR1_TO_PAGE(PAGE_TO_PGHDR1(pCache, X))==X ); +*/ +#define PGHDR1_TO_PAGE(p) (void*)(((char*)p) - p->pCache->szPage) +#define PAGE_TO_PGHDR1(c, p) (PgHdr1*)(((char*)p) + c->szPage) + +/* +** Macros to enter and leave the global LRU mutex. +*/ +#define pcache1EnterMutex() sqlite3_mutex_enter(pcache1.mutex) +#define pcache1LeaveMutex() sqlite3_mutex_leave(pcache1.mutex) + +/******************************************************************************/ +/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/ + +/* +** This function is called during initialization if a static buffer is +** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE +** verb to sqlite3_config(). Parameter pBuf points to an allocation large +** enough to contain 'n' buffers of 'sz' bytes each. +*/ +void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ + if( pcache1.isInit ){ + PgFreeslot *p; + sz = ROUNDDOWN8(sz); + pcache1.szSlot = sz; + pcache1.pStart = pBuf; + pcache1.pFree = 0; + while( n-- ){ + p = (PgFreeslot*)pBuf; + p->pNext = pcache1.pFree; + pcache1.pFree = p; + pBuf = (void*)&((char*)pBuf)[sz]; + } + pcache1.pEnd = pBuf; + } +} + +/* +** Malloc function used within this file to allocate space from the buffer +** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no +** such buffer exists or there is no space left in it, this function falls +** back to sqlite3Malloc(). +*/ +static void *pcache1Alloc(int nByte){ + void *p; + assert( sqlite3_mutex_held(pcache1.mutex) ); + if( nByte<=pcache1.szSlot && pcache1.pFree ){ + assert( pcache1.isInit ); + p = (PgHdr1 *)pcache1.pFree; + pcache1.pFree = pcache1.pFree->pNext; + sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte); + sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, 1); + }else{ + + /* Allocate a new buffer using sqlite3Malloc. Before doing so, exit the + ** global pcache mutex and unlock the pager-cache object pCache. This is + ** so that if the attempt to allocate a new buffer causes the the + ** configured soft-heap-limit to be breached, it will be possible to + ** reclaim memory from this pager-cache. + */ + pcache1LeaveMutex(); + p = sqlite3Malloc(nByte); + pcache1EnterMutex(); + if( p ){ + int sz = sqlite3MallocSize(p); + sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); + } + } + return p; +} + +/* +** Free an allocated buffer obtained from pcache1Alloc(). +*/ +static void pcache1Free(void *p){ + assert( sqlite3_mutex_held(pcache1.mutex) ); + if( p==0 ) return; + if( p>=pcache1.pStart && ppNext = pcache1.pFree; + pcache1.pFree = pSlot; + }else{ + int iSize = sqlite3MallocSize(p); + sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -iSize); + sqlite3_free(p); + } +} + +/* +** Allocate a new page object initially associated with cache pCache. +*/ +static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ + int nByte = sizeof(PgHdr1) + pCache->szPage; + void *pPg = pcache1Alloc(nByte); + PgHdr1 *p; + if( pPg ){ + p = PAGE_TO_PGHDR1(pCache, pPg); + if( pCache->bPurgeable ){ + pcache1.nCurrentPage++; + } + }else{ + p = 0; + } + return p; +} + +/* +** Free a page object allocated by pcache1AllocPage(). +*/ +static void pcache1FreePage(PgHdr1 *p){ + if( p ){ + if( p->pCache->bPurgeable ){ + pcache1.nCurrentPage--; + } + pcache1Free(PGHDR1_TO_PAGE(p)); + } +} + +/* +** Malloc function used by SQLite to obtain space from the buffer configured +** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer +** exists, this function falls back to sqlite3Malloc(). +*/ +void *sqlite3PageMalloc(int sz){ + void *p; + pcache1EnterMutex(); + p = pcache1Alloc(sz); + pcache1LeaveMutex(); + return p; +} + +/* +** Free an allocated buffer obtained from sqlite3PageMalloc(). +*/ +void sqlite3PageFree(void *p){ + pcache1EnterMutex(); + pcache1Free(p); + pcache1LeaveMutex(); +} + +/******************************************************************************/ +/******** General Implementation Functions ************************************/ + +/* +** This function is used to resize the hash table used by the cache passed +** as the first argument. +** +** The global mutex must be held when this function is called. +*/ +static int pcache1ResizeHash(PCache1 *p){ + PgHdr1 **apNew; + unsigned int nNew; + unsigned int i; + + assert( sqlite3_mutex_held(pcache1.mutex) ); + + nNew = p->nHash*2; + if( nNew<256 ){ + nNew = 256; + } + + pcache1LeaveMutex(); + if( p->nHash ){ sqlite3BeginBenignMalloc(); } + apNew = (PgHdr1 **)sqlite3_malloc(sizeof(PgHdr1 *)*nNew); + if( p->nHash ){ sqlite3EndBenignMalloc(); } + pcache1EnterMutex(); + if( apNew ){ + memset(apNew, 0, sizeof(PgHdr1 *)*nNew); + for(i=0; inHash; i++){ + PgHdr1 *pPage; + PgHdr1 *pNext = p->apHash[i]; + while( (pPage = pNext)!=0 ){ + unsigned int h = pPage->iKey % nNew; + pNext = pPage->pNext; + pPage->pNext = apNew[h]; + apNew[h] = pPage; + } + } + sqlite3_free(p->apHash); + p->apHash = apNew; + p->nHash = nNew; + } + + return (p->apHash ? SQLITE_OK : SQLITE_NOMEM); +} + +/* +** This function is used internally to remove the page pPage from the +** global LRU list, if is part of it. If pPage is not part of the global +** LRU list, then this function is a no-op. +** +** The global mutex must be held when this function is called. +*/ +static void pcache1PinPage(PgHdr1 *pPage){ + assert( sqlite3_mutex_held(pcache1.mutex) ); + if( pPage && (pPage->pLruNext || pPage==pcache1.pLruTail) ){ + if( pPage->pLruPrev ){ + pPage->pLruPrev->pLruNext = pPage->pLruNext; + } + if( pPage->pLruNext ){ + pPage->pLruNext->pLruPrev = pPage->pLruPrev; + } + if( pcache1.pLruHead==pPage ){ + pcache1.pLruHead = pPage->pLruNext; + } + if( pcache1.pLruTail==pPage ){ + pcache1.pLruTail = pPage->pLruPrev; + } + pPage->pLruNext = 0; + pPage->pLruPrev = 0; + pPage->pCache->nRecyclable--; + } +} + + +/* +** Remove the page supplied as an argument from the hash table +** (PCache1.apHash structure) that it is currently stored in. +** +** The global mutex must be held when this function is called. +*/ +static void pcache1RemoveFromHash(PgHdr1 *pPage){ + unsigned int h; + PCache1 *pCache = pPage->pCache; + PgHdr1 **pp; + + h = pPage->iKey % pCache->nHash; + for(pp=&pCache->apHash[h]; (*pp)!=pPage; pp=&(*pp)->pNext); + *pp = (*pp)->pNext; + + pCache->nPage--; +} + +/* +** If there are currently more than pcache.nMaxPage pages allocated, try +** to recycle pages to reduce the number allocated to pcache.nMaxPage. +*/ +static void pcache1EnforceMaxPage(void){ + assert( sqlite3_mutex_held(pcache1.mutex) ); + while( pcache1.nCurrentPage>pcache1.nMaxPage && pcache1.pLruTail ){ + PgHdr1 *p = pcache1.pLruTail; + pcache1PinPage(p); + pcache1RemoveFromHash(p); + pcache1FreePage(p); + } +} + +/* +** Discard all pages from cache pCache with a page number (key value) +** greater than or equal to iLimit. Any pinned pages that meet this +** criteria are unpinned before they are discarded. +** +** The global mutex must be held when this function is called. +*/ +static void pcache1TruncateUnsafe( + PCache1 *pCache, + unsigned int iLimit +){ + TESTONLY( unsigned int nPage = 0; ) /* Used to assert pCache->nPage is correct */ + unsigned int h; + assert( sqlite3_mutex_held(pcache1.mutex) ); + for(h=0; hnHash; h++){ + PgHdr1 **pp = &pCache->apHash[h]; + PgHdr1 *pPage; + while( (pPage = *pp)!=0 ){ + if( pPage->iKey>=iLimit ){ + pCache->nPage--; + *pp = pPage->pNext; + pcache1PinPage(pPage); + pcache1FreePage(pPage); + }else{ + pp = &pPage->pNext; + TESTONLY( nPage++; ) + } + } + } + assert( pCache->nPage==nPage ); +} + +/******************************************************************************/ +/******** sqlite3_pcache Methods **********************************************/ + +/* +** Implementation of the sqlite3_pcache.xInit method. +*/ +static int pcache1Init(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( pcache1.isInit==0 ); + memset(&pcache1, 0, sizeof(pcache1)); + if( sqlite3GlobalConfig.bCoreMutex ){ + pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); + } + pcache1.isInit = 1; + return SQLITE_OK; +} + +/* +** Implementation of the sqlite3_pcache.xShutdown method. +*/ +static void pcache1Shutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( pcache1.isInit!=0 ); + memset(&pcache1, 0, sizeof(pcache1)); +} + +/* +** Implementation of the sqlite3_pcache.xCreate method. +** +** Allocate a new cache. +*/ +static sqlite3_pcache *pcache1Create(int szPage, int bPurgeable){ + PCache1 *pCache; + + pCache = (PCache1 *)sqlite3_malloc(sizeof(PCache1)); + if( pCache ){ + memset(pCache, 0, sizeof(PCache1)); + pCache->szPage = szPage; + pCache->bPurgeable = (bPurgeable ? 1 : 0); + if( bPurgeable ){ + pCache->nMin = 10; + pcache1EnterMutex(); + pcache1.nMinPage += pCache->nMin; + pcache1LeaveMutex(); + } + } + return (sqlite3_pcache *)pCache; +} + +/* +** Implementation of the sqlite3_pcache.xCachesize method. +** +** Configure the cache_size limit for a cache. +*/ +static void pcache1Cachesize(sqlite3_pcache *p, int nMax){ + PCache1 *pCache = (PCache1 *)p; + if( pCache->bPurgeable ){ + pcache1EnterMutex(); + pcache1.nMaxPage += (nMax - pCache->nMax); + pCache->nMax = nMax; + pcache1EnforceMaxPage(); + pcache1LeaveMutex(); + } +} + +/* +** Implementation of the sqlite3_pcache.xPagecount method. +*/ +static int pcache1Pagecount(sqlite3_pcache *p){ + int n; + pcache1EnterMutex(); + n = ((PCache1 *)p)->nPage; + pcache1LeaveMutex(); + return n; +} + +/* +** Implementation of the sqlite3_pcache.xFetch method. +** +** Fetch a page by key value. +** +** Whether or not a new page may be allocated by this function depends on +** the value of the createFlag argument. +** +** There are three different approaches to obtaining space for a page, +** depending on the value of parameter createFlag (which may be 0, 1 or 2). +** +** 1. Regardless of the value of createFlag, the cache is searched for a +** copy of the requested page. If one is found, it is returned. +** +** 2. If createFlag==0 and the page is not already in the cache, NULL is +** returned. +** +** 3. If createFlag is 1, the cache is marked as purgeable and the page is +** not already in the cache, and if either of the following are true, +** return NULL: +** +** (a) the number of pages pinned by the cache is greater than +** PCache1.nMax, or +** (b) the number of pages pinned by the cache is greater than +** the sum of nMax for all purgeable caches, less the sum of +** nMin for all other purgeable caches. +** +** 4. If none of the first three conditions apply and the cache is marked +** as purgeable, and if one of the following is true: +** +** (a) The number of pages allocated for the cache is already +** PCache1.nMax, or +** +** (b) The number of pages allocated for all purgeable caches is +** already equal to or greater than the sum of nMax for all +** purgeable caches, +** +** then attempt to recycle a page from the LRU list. If it is the right +** size, return the recycled buffer. Otherwise, free the buffer and +** proceed to step 5. +** +** 5. Otherwise, allocate and return a new page buffer. +*/ +static void *pcache1Fetch(sqlite3_pcache *p, unsigned int iKey, int createFlag){ + unsigned int nPinned; + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = 0; + + pcache1EnterMutex(); + if( createFlag==1 ) sqlite3BeginBenignMalloc(); + + /* Search the hash table for an existing entry. */ + if( pCache->nHash>0 ){ + unsigned int h = iKey % pCache->nHash; + for(pPage=pCache->apHash[h]; pPage&&pPage->iKey!=iKey; pPage=pPage->pNext); + } + + if( pPage || createFlag==0 ){ + pcache1PinPage(pPage); + goto fetch_out; + } + + /* Step 3 of header comment. */ + nPinned = pCache->nPage - pCache->nRecyclable; + if( createFlag==1 && pCache->bPurgeable && ( + nPinned>=(pcache1.nMaxPage+pCache->nMin-pcache1.nMinPage) + || nPinned>=(pCache->nMax * 9 / 10) + )){ + goto fetch_out; + } + + if( pCache->nPage>=pCache->nHash && pcache1ResizeHash(pCache) ){ + goto fetch_out; + } + + /* Step 4. Try to recycle a page buffer if appropriate. */ + if( pCache->bPurgeable && pcache1.pLruTail && ( + (pCache->nPage+1>=pCache->nMax) || pcache1.nCurrentPage>=pcache1.nMaxPage + )){ + pPage = pcache1.pLruTail; + pcache1RemoveFromHash(pPage); + pcache1PinPage(pPage); + if( pPage->pCache->szPage!=pCache->szPage ){ + pcache1FreePage(pPage); + pPage = 0; + }else{ + pcache1.nCurrentPage -= (pPage->pCache->bPurgeable - pCache->bPurgeable); + } + } + + /* Step 5. If a usable page buffer has still not been found, + ** attempt to allocate a new one. + */ + if( !pPage ){ + pPage = pcache1AllocPage(pCache); + } + + if( pPage ){ + unsigned int h = iKey % pCache->nHash; + pCache->nPage++; + pPage->iKey = iKey; + pPage->pNext = pCache->apHash[h]; + pPage->pCache = pCache; + pPage->pLruPrev = 0; + pPage->pLruNext = 0; + *(void **)(PGHDR1_TO_PAGE(pPage)) = 0; + pCache->apHash[h] = pPage; + } + +fetch_out: + if( pPage && iKey>pCache->iMaxKey ){ + pCache->iMaxKey = iKey; + } + if( createFlag==1 ) sqlite3EndBenignMalloc(); + pcache1LeaveMutex(); + return (pPage ? PGHDR1_TO_PAGE(pPage) : 0); +} + + +/* +** Implementation of the sqlite3_pcache.xUnpin method. +** +** Mark a page as unpinned (eligible for asynchronous recycling). +*/ +static void pcache1Unpin(sqlite3_pcache *p, void *pPg, int reuseUnlikely){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = PAGE_TO_PGHDR1(pCache, pPg); + + assert( pPage->pCache==pCache ); + pcache1EnterMutex(); + + /* It is an error to call this function if the page is already + ** part of the global LRU list. + */ + assert( pPage->pLruPrev==0 && pPage->pLruNext==0 ); + assert( pcache1.pLruHead!=pPage && pcache1.pLruTail!=pPage ); + + if( reuseUnlikely || pcache1.nCurrentPage>pcache1.nMaxPage ){ + pcache1RemoveFromHash(pPage); + pcache1FreePage(pPage); + }else{ + /* Add the page to the global LRU list. Normally, the page is added to + ** the head of the list (last page to be recycled). However, if the + ** reuseUnlikely flag passed to this function is true, the page is added + ** to the tail of the list (first page to be recycled). + */ + if( pcache1.pLruHead ){ + pcache1.pLruHead->pLruPrev = pPage; + pPage->pLruNext = pcache1.pLruHead; + pcache1.pLruHead = pPage; + }else{ + pcache1.pLruTail = pPage; + pcache1.pLruHead = pPage; + } + pCache->nRecyclable++; + } + + pcache1LeaveMutex(); +} + +/* +** Implementation of the sqlite3_pcache.xRekey method. +*/ +static void pcache1Rekey( + sqlite3_pcache *p, + void *pPg, + unsigned int iOld, + unsigned int iNew +){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = PAGE_TO_PGHDR1(pCache, pPg); + PgHdr1 **pp; + unsigned int h; + assert( pPage->iKey==iOld ); + assert( pPage->pCache==pCache ); + + pcache1EnterMutex(); + + h = iOld%pCache->nHash; + pp = &pCache->apHash[h]; + while( (*pp)!=pPage ){ + pp = &(*pp)->pNext; + } + *pp = pPage->pNext; + + h = iNew%pCache->nHash; + pPage->iKey = iNew; + pPage->pNext = pCache->apHash[h]; + pCache->apHash[h] = pPage; + + if( iNew>pCache->iMaxKey ){ + pCache->iMaxKey = iNew; + } + + pcache1LeaveMutex(); +} + +/* +** Implementation of the sqlite3_pcache.xTruncate method. +** +** Discard all unpinned pages in the cache with a page number equal to +** or greater than parameter iLimit. Any pinned pages with a page number +** equal to or greater than iLimit are implicitly unpinned. +*/ +static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){ + PCache1 *pCache = (PCache1 *)p; + pcache1EnterMutex(); + if( iLimit<=pCache->iMaxKey ){ + pcache1TruncateUnsafe(pCache, iLimit); + pCache->iMaxKey = iLimit-1; + } + pcache1LeaveMutex(); +} + +/* +** Implementation of the sqlite3_pcache.xDestroy method. +** +** Destroy a cache allocated using pcache1Create(). +*/ +static void pcache1Destroy(sqlite3_pcache *p){ + PCache1 *pCache = (PCache1 *)p; + pcache1EnterMutex(); + pcache1TruncateUnsafe(pCache, 0); + pcache1.nMaxPage -= pCache->nMax; + pcache1.nMinPage -= pCache->nMin; + pcache1EnforceMaxPage(); + pcache1LeaveMutex(); + sqlite3_free(pCache->apHash); + sqlite3_free(pCache); +} + +/* +** This function is called during initialization (sqlite3_initialize()) to +** install the default pluggable cache module, assuming the user has not +** already provided an alternative. +*/ +void sqlite3PCacheSetDefault(void){ + static sqlite3_pcache_methods defaultMethods = { + 0, /* pArg */ + pcache1Init, /* xInit */ + pcache1Shutdown, /* xShutdown */ + pcache1Create, /* xCreate */ + pcache1Cachesize, /* xCachesize */ + pcache1Pagecount, /* xPagecount */ + pcache1Fetch, /* xFetch */ + pcache1Unpin, /* xUnpin */ + pcache1Rekey, /* xRekey */ + pcache1Truncate, /* xTruncate */ + pcache1Destroy /* xDestroy */ + }; + sqlite3_config(SQLITE_CONFIG_PCACHE, &defaultMethods); +} + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +/* +** This function is called to free superfluous dynamically allocated memory +** held by the pager system. Memory in use by any SQLite pager allocated +** by the current thread may be sqlite3_free()ed. +** +** nReq is the number of bytes of memory required. Once this much has +** been released, the function returns. The return value is the total number +** of bytes of memory released. +*/ +int sqlite3PcacheReleaseMemory(int nReq){ + int nFree = 0; + if( pcache1.pStart==0 ){ + PgHdr1 *p; + pcache1EnterMutex(); + while( (nReq<0 || nFreepLruNext){ + nRecyclable++; + } + *pnCurrent = pcache1.nCurrentPage; + *pnMax = pcache1.nMaxPage; + *pnMin = pcache1.nMinPage; + *pnRecyclable = nRecyclable; +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pcache.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pcache.c --- sqlite3-3.4.2/src/pcache.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/pcache.c 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,582 @@ +/* +** 2008 August 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements that page cache. +** +** @(#) $Id: pcache.c,v 1.44 2009/03/31 01:32:18 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** A complete page cache is an instance of this structure. +*/ +struct PCache { + PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ + PgHdr *pSynced; /* Last synced page in dirty page list */ + int nRef; /* Number of referenced pages */ + int nMax; /* Configured cache size */ + int szPage; /* Size of every page in this cache */ + int szExtra; /* Size of extra space for each page */ + int bPurgeable; /* True if pages are on backing store */ + int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ + void *pStress; /* Argument to xStress */ + sqlite3_pcache *pCache; /* Pluggable cache module */ + PgHdr *pPage1; /* Reference to page 1 */ +}; + +/* +** Some of the assert() macros in this code are too expensive to run +** even during normal debugging. Use them only rarely on long-running +** tests. Enable the expensive asserts using the +** -DSQLITE_ENABLE_EXPENSIVE_ASSERT=1 compile-time option. +*/ +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT +# define expensive_assert(X) assert(X) +#else +# define expensive_assert(X) +#endif + +/********************************** Linked List Management ********************/ + +#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT) +/* +** Check that the pCache->pSynced variable is set correctly. If it +** is not, either fail an assert or return zero. Otherwise, return +** non-zero. This is only used in debugging builds, as follows: +** +** expensive_assert( pcacheCheckSynced(pCache) ); +*/ +static int pcacheCheckSynced(PCache *pCache){ + PgHdr *p; + for(p=pCache->pDirtyTail; p!=pCache->pSynced; p=p->pDirtyPrev){ + assert( p->nRef || (p->flags&PGHDR_NEED_SYNC) ); + } + return (p==0 || p->nRef || (p->flags&PGHDR_NEED_SYNC)==0); +} +#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */ + +/* +** Remove page pPage from the list of dirty pages. +*/ +static void pcacheRemoveFromDirtyList(PgHdr *pPage){ + PCache *p = pPage->pCache; + + assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); + assert( pPage->pDirtyPrev || pPage==p->pDirty ); + + /* Update the PCache1.pSynced variable if necessary. */ + if( p->pSynced==pPage ){ + PgHdr *pSynced = pPage->pDirtyPrev; + while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ + pSynced = pSynced->pDirtyPrev; + } + p->pSynced = pSynced; + } + + if( pPage->pDirtyNext ){ + pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; + }else{ + assert( pPage==p->pDirtyTail ); + p->pDirtyTail = pPage->pDirtyPrev; + } + if( pPage->pDirtyPrev ){ + pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; + }else{ + assert( pPage==p->pDirty ); + p->pDirty = pPage->pDirtyNext; + } + pPage->pDirtyNext = 0; + pPage->pDirtyPrev = 0; + + expensive_assert( pcacheCheckSynced(p) ); +} + +/* +** Add page pPage to the head of the dirty list (PCache1.pDirty is set to +** pPage). +*/ +static void pcacheAddToDirtyList(PgHdr *pPage){ + PCache *p = pPage->pCache; + + assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); + + pPage->pDirtyNext = p->pDirty; + if( pPage->pDirtyNext ){ + assert( pPage->pDirtyNext->pDirtyPrev==0 ); + pPage->pDirtyNext->pDirtyPrev = pPage; + } + p->pDirty = pPage; + if( !p->pDirtyTail ){ + p->pDirtyTail = pPage; + } + if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ + p->pSynced = pPage; + } + expensive_assert( pcacheCheckSynced(p) ); +} + +/* +** Wrapper around the pluggable caches xUnpin method. If the cache is +** being used for an in-memory database, this function is a no-op. +*/ +static void pcacheUnpin(PgHdr *p){ + PCache *pCache = p->pCache; + if( pCache->bPurgeable ){ + if( p->pgno==1 ){ + pCache->pPage1 = 0; + } + sqlite3GlobalConfig.pcache.xUnpin(pCache->pCache, p, 0); + } +} + +/*************************************************** General Interfaces ****** +** +** Initialize and shutdown the page cache subsystem. Neither of these +** functions are threadsafe. +*/ +int sqlite3PcacheInitialize(void){ + if( sqlite3GlobalConfig.pcache.xInit==0 ){ + sqlite3PCacheSetDefault(); + } + return sqlite3GlobalConfig.pcache.xInit(sqlite3GlobalConfig.pcache.pArg); +} +void sqlite3PcacheShutdown(void){ + if( sqlite3GlobalConfig.pcache.xShutdown ){ + sqlite3GlobalConfig.pcache.xShutdown(sqlite3GlobalConfig.pcache.pArg); + } +} + +/* +** Return the size in bytes of a PCache object. +*/ +int sqlite3PcacheSize(void){ return sizeof(PCache); } + +/* +** Create a new PCache object. Storage space to hold the object +** has already been allocated and is passed in as the p pointer. +** The caller discovers how much space needs to be allocated by +** calling sqlite3PcacheSize(). +*/ +void sqlite3PcacheOpen( + int szPage, /* Size of every page */ + int szExtra, /* Extra space associated with each page */ + int bPurgeable, /* True if pages are on backing store */ + int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ + void *pStress, /* Argument to xStress */ + PCache *p /* Preallocated space for the PCache */ +){ + memset(p, 0, sizeof(PCache)); + p->szPage = szPage; + p->szExtra = szExtra; + p->bPurgeable = bPurgeable; + p->xStress = xStress; + p->pStress = pStress; + p->nMax = 100; +} + +/* +** Change the page size for PCache object. The caller must ensure that there +** are no outstanding page references when this function is called. +*/ +void sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ + assert( pCache->nRef==0 && pCache->pDirty==0 ); + if( pCache->pCache ){ + sqlite3GlobalConfig.pcache.xDestroy(pCache->pCache); + pCache->pCache = 0; + } + pCache->szPage = szPage; +} + +/* +** Try to obtain a page from the cache. +*/ +int sqlite3PcacheFetch( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number to obtain */ + int createFlag, /* If true, create page if it does not exist already */ + PgHdr **ppPage /* Write the page here */ +){ + PgHdr *pPage = 0; + int eCreate; + + assert( pCache!=0 ); + assert( pgno>0 ); + + /* If the pluggable cache (sqlite3_pcache*) has not been allocated, + ** allocate it now. + */ + if( !pCache->pCache && createFlag ){ + sqlite3_pcache *p; + int nByte; + nByte = pCache->szPage + pCache->szExtra + sizeof(PgHdr); + p = sqlite3GlobalConfig.pcache.xCreate(nByte, pCache->bPurgeable); + if( !p ){ + return SQLITE_NOMEM; + } + sqlite3GlobalConfig.pcache.xCachesize(p, pCache->nMax); + pCache->pCache = p; + } + + eCreate = createFlag ? 1 : 0; + if( eCreate && (!pCache->bPurgeable || !pCache->pDirty) ){ + eCreate = 2; + } + if( pCache->pCache ){ + pPage = sqlite3GlobalConfig.pcache.xFetch(pCache->pCache, pgno, eCreate); + } + + if( !pPage && eCreate==1 ){ + PgHdr *pPg; + + /* Find a dirty page to write-out and recycle. First try to find a + ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC + ** cleared), but if that is not possible settle for any other + ** unreferenced dirty page. + */ + expensive_assert( pcacheCheckSynced(pCache) ); + for(pPg=pCache->pSynced; + pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); + pPg=pPg->pDirtyPrev + ); + if( !pPg ){ + for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); + } + if( pPg ){ + int rc; + rc = pCache->xStress(pCache->pStress, pPg); + if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ + return rc; + } + } + + pPage = sqlite3GlobalConfig.pcache.xFetch(pCache->pCache, pgno, 2); + } + + if( pPage ){ + if( !pPage->pData ){ + memset(pPage, 0, sizeof(PgHdr) + pCache->szExtra); + pPage->pExtra = (void*)&pPage[1]; + pPage->pData = (void *)&((char *)pPage)[sizeof(PgHdr) + pCache->szExtra]; + pPage->pCache = pCache; + pPage->pgno = pgno; + } + assert( pPage->pCache==pCache ); + assert( pPage->pgno==pgno ); + assert( pPage->pExtra==(void *)&pPage[1] ); + + if( 0==pPage->nRef ){ + pCache->nRef++; + } + pPage->nRef++; + if( pgno==1 ){ + pCache->pPage1 = pPage; + } + } + *ppPage = pPage; + return (pPage==0 && eCreate) ? SQLITE_NOMEM : SQLITE_OK; +} + +/* +** Decrement the reference count on a page. If the page is clean and the +** reference count drops to 0, then it is made elible for recycling. +*/ +void sqlite3PcacheRelease(PgHdr *p){ + assert( p->nRef>0 ); + p->nRef--; + if( p->nRef==0 ){ + PCache *pCache = p->pCache; + pCache->nRef--; + if( (p->flags&PGHDR_DIRTY)==0 ){ + pcacheUnpin(p); + }else{ + /* Move the page to the head of the dirty list. */ + pcacheRemoveFromDirtyList(p); + pcacheAddToDirtyList(p); + } + } +} + +/* +** Increase the reference count of a supplied page by 1. +*/ +void sqlite3PcacheRef(PgHdr *p){ + assert(p->nRef>0); + p->nRef++; +} + +/* +** Drop a page from the cache. There must be exactly one reference to the +** page. This function deletes that reference, so after it returns the +** page pointed to by p is invalid. +*/ +void sqlite3PcacheDrop(PgHdr *p){ + PCache *pCache; + assert( p->nRef==1 ); + if( p->flags&PGHDR_DIRTY ){ + pcacheRemoveFromDirtyList(p); + } + pCache = p->pCache; + pCache->nRef--; + if( p->pgno==1 ){ + pCache->pPage1 = 0; + } + sqlite3GlobalConfig.pcache.xUnpin(pCache->pCache, p, 1); +} + +/* +** Make sure the page is marked as dirty. If it isn't dirty already, +** make it so. +*/ +void sqlite3PcacheMakeDirty(PgHdr *p){ + p->flags &= ~PGHDR_DONT_WRITE; + assert( p->nRef>0 ); + if( 0==(p->flags & PGHDR_DIRTY) ){ + p->flags |= PGHDR_DIRTY; + pcacheAddToDirtyList( p); + } +} + +/* +** Make sure the page is marked as clean. If it isn't clean already, +** make it so. +*/ +void sqlite3PcacheMakeClean(PgHdr *p){ + if( (p->flags & PGHDR_DIRTY) ){ + pcacheRemoveFromDirtyList(p); + p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC); + if( p->nRef==0 ){ + pcacheUnpin(p); + } + } +} + +/* +** Make every page in the cache clean. +*/ +void sqlite3PcacheCleanAll(PCache *pCache){ + PgHdr *p; + while( (p = pCache->pDirty)!=0 ){ + sqlite3PcacheMakeClean(p); + } +} + +/* +** Clear the PGHDR_NEED_SYNC flag from all dirty pages. +*/ +void sqlite3PcacheClearSyncFlags(PCache *pCache){ + PgHdr *p; + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + p->flags &= ~PGHDR_NEED_SYNC; + } + pCache->pSynced = pCache->pDirtyTail; +} + +/* +** Change the page number of page p to newPgno. +*/ +void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ + PCache *pCache = p->pCache; + assert( p->nRef>0 ); + assert( newPgno>0 ); + sqlite3GlobalConfig.pcache.xRekey(pCache->pCache, p, p->pgno, newPgno); + p->pgno = newPgno; + if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ + pcacheRemoveFromDirtyList(p); + pcacheAddToDirtyList(p); + } +} + +/* +** Drop every cache entry whose page number is greater than "pgno". The +** caller must ensure that there are no outstanding references to any pages +** other than page 1 with a page number greater than pgno. +** +** If there is a reference to page 1 and the pgno parameter passed to this +** function is 0, then the data area associated with page 1 is zeroed, but +** the page object is not dropped. +*/ +void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ + if( pCache->pCache ){ + PgHdr *p; + PgHdr *pNext; + for(p=pCache->pDirty; p; p=pNext){ + pNext = p->pDirtyNext; + if( p->pgno>pgno ){ + assert( p->flags&PGHDR_DIRTY ); + sqlite3PcacheMakeClean(p); + } + } + if( pgno==0 && pCache->pPage1 ){ + memset(pCache->pPage1->pData, 0, pCache->szPage); + pgno = 1; + } + sqlite3GlobalConfig.pcache.xTruncate(pCache->pCache, pgno+1); + } +} + +/* +** Close a cache. +*/ +void sqlite3PcacheClose(PCache *pCache){ + if( pCache->pCache ){ + sqlite3GlobalConfig.pcache.xDestroy(pCache->pCache); + } +} + +/* +** Discard the contents of the cache. +*/ +void sqlite3PcacheClear(PCache *pCache){ + sqlite3PcacheTruncate(pCache, 0); +} + +/* +** Merge two lists of pages connected by pDirty and in pgno order. +** Do not both fixing the pDirtyPrev pointers. +*/ +static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ + PgHdr result, *pTail; + pTail = &result; + while( pA && pB ){ + if( pA->pgnopgno ){ + pTail->pDirty = pA; + pTail = pA; + pA = pA->pDirty; + }else{ + pTail->pDirty = pB; + pTail = pB; + pB = pB->pDirty; + } + } + if( pA ){ + pTail->pDirty = pA; + }else if( pB ){ + pTail->pDirty = pB; + }else{ + pTail->pDirty = 0; + } + return result.pDirty; +} + +/* +** Sort the list of pages in accending order by pgno. Pages are +** connected by pDirty pointers. The pDirtyPrev pointers are +** corrupted by this sort. +*/ +#define N_SORT_BUCKET_ALLOC 25 +#define N_SORT_BUCKET 25 +#ifdef SQLITE_TEST + int sqlite3_pager_n_sort_bucket = 0; + #undef N_SORT_BUCKET + #define N_SORT_BUCKET \ + (sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC) +#endif +static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ + PgHdr *a[N_SORT_BUCKET_ALLOC], *p; + int i; + memset(a, 0, sizeof(a)); + while( pIn ){ + p = pIn; + pIn = p->pDirty; + p->pDirty = 0; + for(i=0; ipDirty; p; p=p->pDirtyNext){ + p->pDirty = p->pDirtyNext; + } + return pcacheSortDirtyList(pCache->pDirty); +} + +/* +** Return the total number of referenced pages held by the cache. +*/ +int sqlite3PcacheRefCount(PCache *pCache){ + return pCache->nRef; +} + +/* +** Return the number of references to the page supplied as an argument. +*/ +int sqlite3PcachePageRefcount(PgHdr *p){ + return p->nRef; +} + +/* +** Return the total number of pages in the cache. +*/ +int sqlite3PcachePagecount(PCache *pCache){ + int nPage = 0; + if( pCache->pCache ){ + nPage = sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache); + } + return nPage; +} + +#ifdef SQLITE_TEST +/* +** Get the suggested cache-size value. +*/ +int sqlite3PcacheGetCachesize(PCache *pCache){ + return pCache->nMax; +} +#endif + +/* +** Set the suggested cache-size value. +*/ +void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ + pCache->nMax = mxPage; + if( pCache->pCache ){ + sqlite3GlobalConfig.pcache.xCachesize(pCache->pCache, mxPage); + } +} + +#ifdef SQLITE_CHECK_PAGES +/* +** For all dirty pages currently in the cache, invoke the specified +** callback. This is only used if the SQLITE_CHECK_PAGES macro is +** defined. +*/ +void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ + PgHdr *pDirty; + for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ + xIter(pDirty); + } +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pcache.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pcache.h --- sqlite3-3.4.2/src/pcache.h 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/pcache.h 2009-06-12 03:37:48.000000000 +0100 @@ -0,0 +1,157 @@ +/* +** 2008 August 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite page cache +** subsystem. +** +** @(#) $Id: pcache.h,v 1.19 2009/01/20 17:06:27 danielk1977 Exp $ +*/ + +#ifndef _PCACHE_H_ + +typedef struct PgHdr PgHdr; +typedef struct PCache PCache; + +/* +** Every page in the cache is controlled by an instance of the following +** structure. +*/ +struct PgHdr { + void *pData; /* Content of this page */ + void *pExtra; /* Extra content */ + PgHdr *pDirty; /* Transient list of dirty pages */ + Pgno pgno; /* Page number for this page */ + Pager *pPager; /* The pager this page is part of */ +#ifdef SQLITE_CHECK_PAGES + u32 pageHash; /* Hash of page content */ +#endif + u16 flags; /* PGHDR flags defined below */ + + /********************************************************************** + ** Elements above are public. All that follows is private to pcache.c + ** and should not be accessed by other modules. + */ + i16 nRef; /* Number of users of this page */ + PCache *pCache; /* Cache that owns this page */ + + PgHdr *pDirtyNext; /* Next element in list of dirty pages */ + PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ +}; + +/* Bit values for PgHdr.flags */ +#define PGHDR_DIRTY 0x002 /* Page has changed */ +#define PGHDR_NEED_SYNC 0x004 /* Fsync the rollback journal before + ** writing this page to the database */ +#define PGHDR_NEED_READ 0x008 /* Content is unread */ +#define PGHDR_REUSE_UNLIKELY 0x010 /* A hint that reuse is unlikely */ +#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ + +/* Initialize and shutdown the page cache subsystem */ +int sqlite3PcacheInitialize(void); +void sqlite3PcacheShutdown(void); + +/* Page cache buffer management: +** These routines implement SQLITE_CONFIG_PAGECACHE. +*/ +void sqlite3PCacheBufferSetup(void *, int sz, int n); + +/* Create a new pager cache. +** Under memory stress, invoke xStress to try to make pages clean. +** Only clean and unpinned pages can be reclaimed. +*/ +void sqlite3PcacheOpen( + int szPage, /* Size of every page */ + int szExtra, /* Extra space associated with each page */ + int bPurgeable, /* True if pages are on backing store */ + int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */ + void *pStress, /* Argument to xStress */ + PCache *pToInit /* Preallocated space for the PCache */ +); + +/* Modify the page-size after the cache has been created. */ +void sqlite3PcacheSetPageSize(PCache *, int); + +/* Return the size in bytes of a PCache object. Used to preallocate +** storage space. +*/ +int sqlite3PcacheSize(void); + +/* One release per successful fetch. Page is pinned until released. +** Reference counted. +*/ +int sqlite3PcacheFetch(PCache*, Pgno, int createFlag, PgHdr**); +void sqlite3PcacheRelease(PgHdr*); + +void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ +void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ +void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ +void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ + +/* Change a page number. Used by incr-vacuum. */ +void sqlite3PcacheMove(PgHdr*, Pgno); + +/* Remove all pages with pgno>x. Reset the cache if x==0 */ +void sqlite3PcacheTruncate(PCache*, Pgno x); + +/* Get a list of all dirty pages in the cache, sorted by page number */ +PgHdr *sqlite3PcacheDirtyList(PCache*); + +/* Reset and close the cache object */ +void sqlite3PcacheClose(PCache*); + +/* Clear flags from pages of the page cache */ +void sqlite3PcacheClearSyncFlags(PCache *); + +/* Discard the contents of the cache */ +void sqlite3PcacheClear(PCache*); + +/* Return the total number of outstanding page references */ +int sqlite3PcacheRefCount(PCache*); + +/* Increment the reference count of an existing page */ +void sqlite3PcacheRef(PgHdr*); + +int sqlite3PcachePageRefcount(PgHdr*); + +/* Return the total number of pages stored in the cache */ +int sqlite3PcachePagecount(PCache*); + +#ifdef SQLITE_CHECK_PAGES +/* Iterate through all dirty pages currently stored in the cache. This +** interface is only available if SQLITE_CHECK_PAGES is defined when the +** library is built. +*/ +void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); +#endif + +/* Set and get the suggested cache-size for the specified pager-cache. +** +** If no global maximum is configured, then the system attempts to limit +** the total number of pages cached by purgeable pager-caches to the sum +** of the suggested cache-sizes. +*/ +void sqlite3PcacheSetCachesize(PCache *, int); +#ifdef SQLITE_TEST +int sqlite3PcacheGetCachesize(PCache *); +#endif + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +/* Try to return memory used by the pcache module to the main memory heap */ +int sqlite3PcacheReleaseMemory(int); +#endif + +#ifdef SQLITE_TEST +void sqlite3PcacheStats(int*,int*,int*,int*); +#endif + +void sqlite3PCacheSetDefault(void); + +#endif /* _PCACHE_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/pragma.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/pragma.c --- sqlite3-3.4.2/src/pragma.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/pragma.c 2009-06-25 12:45:58.000000000 +0100 @@ -11,20 +11,13 @@ ************************************************************************* ** This file contains code used to implement the PRAGMA command. ** -** $Id: pragma.c,v 1.142 2007/06/26 10:38:55 danielk1977 Exp $ +** $Id: pragma.c,v 1.213 2009/06/19 14:06:03 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include /* Ignore this whole file if pragmas are disabled */ -#if !defined(SQLITE_OMIT_PRAGMA) && !defined(SQLITE_OMIT_PARSER) - -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) -# include "pager.h" -# include "btree.h" -#endif +#if !defined(SQLITE_OMIT_PRAGMA) /* ** Interpret the given string as a safety level. Return 0 for OFF, @@ -36,18 +29,18 @@ ** to support legacy SQL code. The safety level used to be boolean ** and older scripts may have used numbers 0 for OFF and 1 for ON. */ -static int getSafetyLevel(const char *z){ +static u8 getSafetyLevel(const char *z){ /* 123456789 123456789 */ static const char zText[] = "onoffalseyestruefull"; static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 16}; static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 4}; static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 2}; int i, n; - if( isdigit(*z) ){ - return atoi(z); + if( sqlite3Isdigit(*z) ){ + return (u8)atoi(z); } - n = strlen(z); - for(i=0; i=0&&i<=2)?i:0); + return (u8)((i>=0&&i<=2)?i:0); } #endif /* ifndef SQLITE_OMIT_AUTOVACUUM */ @@ -117,7 +110,7 @@ static int invalidateTempStorage(Parse *pParse){ sqlite3 *db = pParse->db; if( db->aDb[1].pBt!=0 ){ - if( !db->autoCommit ){ + if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){ sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " "from within a transaction"); return SQLITE_ERROR; @@ -133,7 +126,7 @@ #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** If the TEMP database is open, close it and mark the database schema -** as needing reloading. This must be done when using the TEMP_STORE +** as needing reloading. This must be done when using the SQLITE_TEMP_STORE ** or DEFAULT_TEMP_STORE pragmas. */ static int changeTempStorage(Parse *pParse, const char *zStorageType){ @@ -143,7 +136,7 @@ if( invalidateTempStorage( pParse ) != SQLITE_OK ){ return SQLITE_ERROR; } - db->temp_store = ts; + db->temp_store = (u8)ts; return SQLITE_OK; } #endif /* SQLITE_PAGER_PRAGMAS */ @@ -151,14 +144,17 @@ /* ** Generate code to return a single integer value. */ -static void returnSingleInt(Parse *pParse, const char *zLabel, int value){ +static void returnSingleInt(Parse *pParse, const char *zLabel, i64 value){ Vdbe *v = sqlite3GetVdbe(pParse); - sqlite3VdbeAddOp(v, OP_Integer, value, 0); - if( pParse->explain==0 ){ - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, P3_STATIC); + int mem = ++pParse->nMem; + i64 *pI64 = sqlite3DbMallocRaw(pParse->db, sizeof(value)); + if( pI64 ){ + memcpy(pI64, &value, sizeof(value)); } - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeAddOp4(v, OP_Int64, 0, mem, 0, (char*)pI64, P4_INT64); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, SQLITE_STATIC); + sqlite3VdbeAddOp2(v, OP_ResultRow, mem, 1); } #ifndef SQLITE_OMIT_FLAG_PRAGMAS @@ -178,6 +174,7 @@ { "empty_result_callbacks", SQLITE_NullCallback }, { "legacy_file_format", SQLITE_LegacyFileFmt }, { "fullfsync", SQLITE_FullFSync }, + { "reverse_unordered_selects", SQLITE_ReverseOrder }, #ifdef SQLITE_DEBUG { "sql_trace", SQLITE_SqlTrace }, { "vdbe_listing", SQLITE_VdbeListing }, @@ -196,12 +193,13 @@ }; int i; const struct sPragmaType *p; - for(i=0, p=aPragma; izName)==0 ){ sqlite3 *db = pParse->db; Vdbe *v; v = sqlite3GetVdbe(pParse); - if( v ){ + assert( v!=0 ); /* Already allocated by sqlite3Pragma() */ + if( ALWAYS(v) ){ if( zRight==0 ){ returnSingleInt(pParse, p->zName, (db->flags & p->mask)!=0 ); }else{ @@ -210,8 +208,15 @@ }else{ db->flags &= ~p->mask; } + + /* Many of the flag-pragmas modify the code generated by the SQL + ** compiler (eg. count_changes). So add an opcode to expire all + ** compiled SQL statements after modifying a pragma value. + */ + sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); } } + return 1; } } @@ -220,6 +225,21 @@ #endif /* SQLITE_OMIT_FLAG_PRAGMAS */ /* +** Return a human-readable name for a constraint resolution action. +*/ +static const char *actionName(u8 action){ + const char *zName; + switch( action ){ + case OE_SetNull: zName = "SET NULL"; break; + case OE_SetDflt: zName = "SET DEFAULT"; break; + case OE_Cascade: zName = "CASCADE"; break; + default: zName = "RESTRICT"; + assert( action==OE_Restrict ); break; + } + return zName; +} + +/* ** Process a pragma statement. ** ** Pragmas are of this form: @@ -248,8 +268,9 @@ int iDb; /* Database index for */ sqlite3 *db = pParse->db; Db *pDb; - Vdbe *v = sqlite3GetVdbe(pParse); + Vdbe *v = pParse->pVdbe = sqlite3VdbeCreate(db); if( v==0 ) return; + pParse->nMem = 2; /* Interpret the [database.] part of the pragma statement. iDb is the ** index of the database this pragma is being applied to in db.aDb[]. */ @@ -264,15 +285,16 @@ return; } - zLeft = sqlite3NameFromToken(pId); + zLeft = sqlite3NameFromToken(db, pId); if( !zLeft ) return; if( minusFlag ){ - zRight = sqlite3MPrintf("-%T", pValue); + zRight = sqlite3MPrintf(db, "-%T", pValue); }else{ - zRight = sqlite3NameFromToken(pValue); + zRight = sqlite3NameFromToken(db, pValue); } - zDb = ((iDb>0)?pDb->zName:0); + assert( pId2 ); + zDb = pId2->n>0 ? pDb->zName : 0; if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ goto pragma_out; } @@ -296,19 +318,21 @@ */ if( sqlite3StrICmp(zLeft,"default_cache_size")==0 ){ static const VdbeOpList getCacheSize[] = { - { OP_ReadCookie, 0, 2, 0}, /* 0 */ - { OP_AbsValue, 0, 0, 0}, - { OP_Dup, 0, 0, 0}, - { OP_Integer, 0, 0, 0}, - { OP_Ne, 0, 6, 0}, - { OP_Integer, 0, 0, 0}, /* 5 */ - { OP_Callback, 1, 0, 0}, + { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 0 */ + { OP_IfPos, 1, 6, 0}, + { OP_Integer, 0, 2, 0}, + { OP_Subtract, 1, 2, 1}, + { OP_IfPos, 1, 6, 0}, + { OP_Integer, 0, 1, 0}, /* 5 */ + { OP_ResultRow, 1, 1, 0}, }; int addr; if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3VdbeUsesBtree(v, iDb); if( !zRight ){ sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", P3_STATIC); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", SQLITE_STATIC); + pParse->nMem += 2; addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize); sqlite3VdbeChangeP1(v, addr, iDb); sqlite3VdbeChangeP1(v, addr+5, SQLITE_DEFAULT_CACHE_SIZE); @@ -316,12 +340,12 @@ int size = atoi(zRight); if( size<0 ) size = -size; sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3VdbeAddOp(v, OP_Integer, size, 0); - sqlite3VdbeAddOp(v, OP_ReadCookie, iDb, 2); - addr = sqlite3VdbeAddOp(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp(v, OP_Ge, 0, addr+3); - sqlite3VdbeAddOp(v, OP_Negative, 0, 0); - sqlite3VdbeAddOp(v, OP_SetCookie, iDb, 2); + sqlite3VdbeAddOp2(v, OP_Integer, size, 1); + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, 2, BTREE_DEFAULT_CACHE_SIZE); + addr = sqlite3VdbeAddOp2(v, OP_IfPos, 2, 0); + sqlite3VdbeAddOp2(v, OP_Integer, -size, 1); + sqlite3VdbeJumpHere(v, addr); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, 1); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); } @@ -338,11 +362,18 @@ */ if( sqlite3StrICmp(zLeft,"page_size")==0 ){ Btree *pBt = pDb->pBt; + assert( pBt!=0 ); if( !zRight ){ - int size = pBt ? sqlite3BtreeGetPageSize(pBt) : 0; + int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; returnSingleInt(pParse, "page_size", size); }else{ - sqlite3BtreeSetPageSize(pBt, atoi(zRight), -1); + /* Malloc may fail when setting the page-size, as there is an internal + ** buffer that the pager module resizes using sqlite3_realloc(). + */ + db->nextPagesize = atoi(zRight); + if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize, -1, 0) ){ + db->mallocFailed = 1; + } } }else @@ -358,16 +389,33 @@ if( sqlite3StrICmp(zLeft,"max_page_count")==0 ){ Btree *pBt = pDb->pBt; int newMax = 0; + assert( pBt!=0 ); if( zRight ){ newMax = atoi(zRight); } - if( pBt ){ + if( ALWAYS(pBt) ){ newMax = sqlite3BtreeMaxPageCount(pBt, newMax); } returnSingleInt(pParse, "max_page_count", newMax); }else /* + ** PRAGMA [database.]page_count + ** + ** Return the number of pages in the specified database. + */ + if( sqlite3StrICmp(zLeft,"page_count")==0 ){ + int iReg; + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + sqlite3CodeVerifySchema(pParse, iDb); + iReg = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg); + sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "page_count", SQLITE_STATIC); + }else + + /* ** PRAGMA [database.]locking_mode ** PRAGMA [database.]locking_mode = (normal|exclusive) */ @@ -398,7 +446,7 @@ pPager = sqlite3BtreePager(db->aDb[ii].pBt); sqlite3PagerLockingMode(pPager, eMode); } - db->dfltLockMode = eMode; + db->dfltLockMode = (u8)eMode; } pPager = sqlite3BtreePager(pDb->pBt); eMode = sqlite3PagerLockingMode(pPager, eMode); @@ -409,31 +457,118 @@ zRet = "exclusive"; } sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", P3_STATIC); - sqlite3VdbeOp3(v, OP_String8, 0, 0, zRet, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", SQLITE_STATIC); + sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zRet, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); + }else + + /* + ** PRAGMA [database.]journal_mode + ** PRAGMA [database.]journal_mode = (delete|persist|off|truncate|memory) + */ + if( sqlite3StrICmp(zLeft,"journal_mode")==0 ){ + int eMode; + static char * const azModeName[] = { + "delete", "persist", "off", "truncate", "memory" + }; + + if( zRight==0 ){ + eMode = PAGER_JOURNALMODE_QUERY; + }else{ + int n = sqlite3Strlen30(zRight); + eMode = sizeof(azModeName)/sizeof(azModeName[0]) - 1; + while( eMode>=0 && sqlite3StrNICmp(zRight, azModeName[eMode], n)!=0 ){ + eMode--; + } + } + if( pId2->n==0 && eMode==PAGER_JOURNALMODE_QUERY ){ + /* Simple "PRAGMA journal_mode;" statement. This is a query for + ** the current default journal mode (which may be different to + ** the journal-mode of the main database). + */ + eMode = db->dfltJournalMode; + }else{ + Pager *pPager; + if( pId2->n==0 ){ + /* This indicates that no database name was specified as part + ** of the PRAGMA command. In this case the journal-mode must be + ** set on all attached databases, as well as the main db file. + ** + ** Also, the sqlite3.dfltJournalMode variable is set so that + ** any subsequently attached databases also use the specified + ** journal mode. + */ + int ii; + assert(pDb==&db->aDb[0]); + for(ii=1; iinDb; ii++){ + if( db->aDb[ii].pBt ){ + pPager = sqlite3BtreePager(db->aDb[ii].pBt); + sqlite3PagerJournalMode(pPager, eMode); + } + } + db->dfltJournalMode = (u8)eMode; + } + pPager = sqlite3BtreePager(pDb->pBt); + eMode = sqlite3PagerJournalMode(pPager, eMode); + } + assert( eMode==PAGER_JOURNALMODE_DELETE + || eMode==PAGER_JOURNALMODE_TRUNCATE + || eMode==PAGER_JOURNALMODE_PERSIST + || eMode==PAGER_JOURNALMODE_OFF + || eMode==PAGER_JOURNALMODE_MEMORY ); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "journal_mode", SQLITE_STATIC); + sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, + azModeName[eMode], P4_STATIC); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); + }else + + /* + ** PRAGMA [database.]journal_size_limit + ** PRAGMA [database.]journal_size_limit=N + ** + ** Get or set the size limit on rollback journal files. + */ + if( sqlite3StrICmp(zLeft,"journal_size_limit")==0 ){ + Pager *pPager = sqlite3BtreePager(pDb->pBt); + i64 iLimit = -2; + if( zRight ){ + sqlite3Atoi64(zRight, &iLimit); + if( iLimit<-1 ) iLimit = -1; + } + iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); + returnSingleInt(pParse, "journal_size_limit", iLimit); }else + #endif /* SQLITE_OMIT_PAGER_PRAGMAS */ /* ** PRAGMA [database.]auto_vacuum ** PRAGMA [database.]auto_vacuum=N ** - ** Get or set the (boolean) value of the database 'auto-vacuum' parameter. + ** Get or set the value of the database 'auto-vacuum' parameter. + ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL */ #ifndef SQLITE_OMIT_AUTOVACUUM if( sqlite3StrICmp(zLeft,"auto_vacuum")==0 ){ Btree *pBt = pDb->pBt; + assert( pBt!=0 ); if( sqlite3ReadSchema(pParse) ){ goto pragma_out; } if( !zRight ){ - int auto_vacuum = - pBt ? sqlite3BtreeGetAutoVacuum(pBt) : SQLITE_DEFAULT_AUTOVACUUM; + int auto_vacuum; + if( ALWAYS(pBt) ){ + auto_vacuum = sqlite3BtreeGetAutoVacuum(pBt); + }else{ + auto_vacuum = SQLITE_DEFAULT_AUTOVACUUM; + } returnSingleInt(pParse, "auto_vacuum", auto_vacuum); }else{ int eAuto = getAutoVacuum(zRight); - if( eAuto>=0 ){ + assert( eAuto>=0 && eAuto<=2 ); + db->nextAutovac = (u8)eAuto; + if( ALWAYS(eAuto>=0) ){ /* Call SetAutoVacuum() to set initialize the internal auto and ** incr-vacuum flags. This is required in case this connection ** creates the database file. It is important that it is created @@ -447,12 +582,12 @@ ** that this really is an auto-vacuum capable database. */ static const VdbeOpList setMeta6[] = { - { OP_Transaction, 0, 1, 0}, /* 0 */ - { OP_ReadCookie, 0, 3, 0}, /* 1 */ - { OP_If, 0, 0, 0}, /* 2 */ - { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ - { OP_Integer, 0, 0, 0}, /* 4 */ - { OP_SetCookie, 0, 6, 0}, /* 5 */ + { OP_Transaction, 0, 1, 0}, /* 0 */ + { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE}, + { OP_If, 1, 0, 0}, /* 2 */ + { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ + { OP_Integer, 0, 1, 0}, /* 4 */ + { OP_SetCookie, 0, BTREE_INCR_VACUUM, 1}, /* 5 */ }; int iAddr; iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6); @@ -461,6 +596,7 @@ sqlite3VdbeChangeP2(v, iAddr+2, iAddr+4); sqlite3VdbeChangeP1(v, iAddr+4, eAuto-1); sqlite3VdbeChangeP1(v, iAddr+5, iDb); + sqlite3VdbeUsesBtree(v, iDb); } } } @@ -482,11 +618,11 @@ iLimit = 0x7fffffff; } sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3VdbeAddOp(v, OP_MemInt, iLimit, 0); - addr = sqlite3VdbeAddOp(v, OP_IncrVacuum, iDb, 0); - sqlite3VdbeAddOp(v, OP_Callback, 0, 0); - sqlite3VdbeAddOp(v, OP_MemIncr, -1, 0); - sqlite3VdbeAddOp(v, OP_IfMemPos, 0, addr); + sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1); + addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); + sqlite3VdbeAddOp1(v, OP_ResultRow, 1); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); + sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); sqlite3VdbeJumpHere(v, addr); }else #endif @@ -552,31 +688,87 @@ if( sqlite3_temp_directory ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, - "temp_store_directory", P3_STATIC); - sqlite3VdbeOp3(v, OP_String8, 0, 0, sqlite3_temp_directory, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + "temp_store_directory", SQLITE_STATIC); + sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_temp_directory, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } }else{ - if( zRight[0] && !sqlite3OsIsDirWritable(zRight) ){ - sqlite3ErrorMsg(pParse, "not a writable directory"); - goto pragma_out; +#ifndef SQLITE_OMIT_WSD + if( zRight[0] ){ + int rc; + int res; + rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); + if( rc!=SQLITE_OK || res==0 ){ + sqlite3ErrorMsg(pParse, "not a writable directory"); + goto pragma_out; + } } - if( TEMP_STORE==0 - || (TEMP_STORE==1 && db->temp_store<=1) - || (TEMP_STORE==2 && db->temp_store==1) + if( SQLITE_TEMP_STORE==0 + || (SQLITE_TEMP_STORE==1 && db->temp_store<=1) + || (SQLITE_TEMP_STORE==2 && db->temp_store==1) ){ invalidateTempStorage(pParse); } - sqliteFree(sqlite3_temp_directory); + sqlite3_free(sqlite3_temp_directory); if( zRight[0] ){ - sqlite3_temp_directory = zRight; - zRight = 0; + sqlite3_temp_directory = sqlite3DbStrDup(0, zRight); }else{ sqlite3_temp_directory = 0; } +#endif /* SQLITE_OMIT_WSD */ } }else +#if !defined(SQLITE_ENABLE_LOCKING_STYLE) +# if defined(__APPLE__) +# define SQLITE_ENABLE_LOCKING_STYLE 1 +# else +# define SQLITE_ENABLE_LOCKING_STYLE 0 +# endif +#endif +#if SQLITE_ENABLE_LOCKING_STYLE + /* + ** PRAGMA [database.]lock_proxy_file + ** PRAGMA [database.]lock_proxy_file = ":auto:"|"lock_file_path" + ** + ** Return or set the value of the lock_proxy_file flag. Changing + ** the value sets a specific file to be used for database access locks. + ** + */ + if( sqlite3StrICmp(zLeft, "lock_proxy_file")==0 ){ + if( !zRight ){ + Pager *pPager = sqlite3BtreePager(pDb->pBt); + char *proxy_file_path = NULL; + sqlite3_file *pFile = sqlite3PagerFile(pPager); + sqlite3OsFileControl(pFile, SQLITE_GET_LOCKPROXYFILE, + &proxy_file_path); + + if( proxy_file_path ){ + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, + "lock_proxy_file", SQLITE_STATIC); + sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, proxy_file_path, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); + } + }else{ + Pager *pPager = sqlite3BtreePager(pDb->pBt); + sqlite3_file *pFile = sqlite3PagerFile(pPager); + int res; + if( zRight[0] ){ + res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, + zRight); + } else { + res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, + NULL); + } + if( res!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "failed to set lock proxy file"); + goto pragma_out; + } + } + }else +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + /* ** PRAGMA [database.]synchronous ** PRAGMA [database.]synchronous=OFF|ON|NORMAL|FULL @@ -630,31 +822,31 @@ int nHidden = 0; Column *pCol; sqlite3VdbeSetNumCols(v, 6); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", P3_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", P3_STATIC); - sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", P3_STATIC); - sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", P3_STATIC); + pParse->nMem = 6; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", SQLITE_STATIC); sqlite3ViewGetColumnNames(pParse, pTab); for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ - const Token *pDflt; if( IsHiddenColumn(pCol) ){ nHidden++; continue; } - sqlite3VdbeAddOp(v, OP_Integer, i-nHidden, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pCol->zName, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, + sqlite3VdbeAddOp2(v, OP_Integer, i-nHidden, 1); + sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pCol->zName, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pCol->zType ? pCol->zType : "", 0); - sqlite3VdbeAddOp(v, OP_Integer, pCol->notNull, 0); - if( pCol->pDflt && (pDflt = &pCol->pDflt->span)->z ){ - sqlite3VdbeOp3(v, OP_String8, 0, 0, (char*)pDflt->z, pDflt->n); + sqlite3VdbeAddOp2(v, OP_Integer, (pCol->notNull ? 1 : 0), 4); + if( pCol->zDflt ){ + sqlite3VdbeAddOp4(v, OP_String8, 0, 5, 0, (char*)pCol->zDflt, 0); }else{ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, 5); } - sqlite3VdbeAddOp(v, OP_Integer, pCol->isPrimKey, 0); - sqlite3VdbeAddOp(v, OP_Callback, 6, 0); + sqlite3VdbeAddOp2(v, OP_Integer, pCol->isPrimKey, 6); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 6); } } }else @@ -668,16 +860,17 @@ int i; pTab = pIdx->pTable; sqlite3VdbeSetNumCols(v, 3); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", P3_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", P3_STATIC); + pParse->nMem = 3; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", SQLITE_STATIC); for(i=0; inColumn; i++){ int cnum = pIdx->aiColumn[i]; - sqlite3VdbeAddOp(v, OP_Integer, i, 0); - sqlite3VdbeAddOp(v, OP_Integer, cnum, 0); + sqlite3VdbeAddOp2(v, OP_Integer, i, 1); + sqlite3VdbeAddOp2(v, OP_Integer, cnum, 2); assert( pTab->nCol>cnum ); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pTab->aCol[cnum].zName, 0); - sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pTab->aCol[cnum].zName, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); } } }else @@ -693,14 +886,15 @@ if( pIdx ){ int i = 0; sqlite3VdbeSetNumCols(v, 3); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", P3_STATIC); + pParse->nMem = 3; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", SQLITE_STATIC); while(pIdx){ - sqlite3VdbeAddOp(v, OP_Integer, i, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pIdx->zName, 0); - sqlite3VdbeAddOp(v, OP_Integer, pIdx->onError!=OE_None, 0); - sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + sqlite3VdbeAddOp2(v, OP_Integer, i, 1); + sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0); + sqlite3VdbeAddOp2(v, OP_Integer, pIdx->onError!=OE_None, 3); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); ++i; pIdx = pIdx->pNext; } @@ -712,17 +906,18 @@ int i; if( sqlite3ReadSchema(pParse) ) goto pragma_out; sqlite3VdbeSetNumCols(v, 3); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", P3_STATIC); + pParse->nMem = 3; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", SQLITE_STATIC); for(i=0; inDb; i++){ if( db->aDb[i].pBt==0 ) continue; assert( db->aDb[i].zName!=0 ); - sqlite3VdbeAddOp(v, OP_Integer, i, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, db->aDb[i].zName, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, + sqlite3VdbeAddOp2(v, OP_Integer, i, 1); + sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, db->aDb[i].zName, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3BtreeGetFilename(db->aDb[i].pBt), 0); - sqlite3VdbeAddOp(v, OP_Callback, 3, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); } }else @@ -730,13 +925,14 @@ int i = 0; HashElem *p; sqlite3VdbeSetNumCols(v, 2); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", P3_STATIC); + pParse->nMem = 2; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ CollSeq *pColl = (CollSeq *)sqliteHashData(p); - sqlite3VdbeAddOp(v, OP_Integer, i++, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pColl->zName, 0); - sqlite3VdbeAddOp(v, OP_Callback, 2, 0); + sqlite3VdbeAddOp2(v, OP_Integer, i++, 1); + sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pColl->zName, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); } }else #endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ @@ -752,23 +948,32 @@ pFK = pTab->pFKey; if( pFK ){ int i = 0; - sqlite3VdbeSetNumCols(v, 5); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", P3_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", P3_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", P3_STATIC); - sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", P3_STATIC); + sqlite3VdbeSetNumCols(v, 8); + pParse->nMem = 8; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "on_update", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 6, COLNAME_NAME, "on_delete", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 7, COLNAME_NAME, "match", SQLITE_STATIC); while(pFK){ int j; for(j=0; jnCol; j++){ char *zCol = pFK->aCol[j].zCol; - sqlite3VdbeAddOp(v, OP_Integer, i, 0); - sqlite3VdbeAddOp(v, OP_Integer, j, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, pFK->zTo, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, - pTab->aCol[pFK->aCol[j].iFrom].zName, 0); - sqlite3VdbeOp3(v, zCol ? OP_String8 : OP_Null, 0, 0, zCol, 0); - sqlite3VdbeAddOp(v, OP_Callback, 5, 0); + char *zOnUpdate = (char *)actionName(pFK->updateConf); + char *zOnDelete = (char *)actionName(pFK->deleteConf); + sqlite3VdbeAddOp2(v, OP_Integer, i, 1); + sqlite3VdbeAddOp2(v, OP_Integer, j, 2); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pFK->zTo, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, + pTab->aCol[pFK->aCol[j].iFrom].zName, 0); + sqlite3VdbeAddOp4(v, zCol ? OP_String8 : OP_Null, 0, 5, 0, zCol, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 6, 0, zOnUpdate, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 7, 0, zOnDelete, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 8, 0, "NONE", 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 8); } ++i; pFK = pFK->pNextFrom; @@ -804,7 +1009,13 @@ #endif #ifndef SQLITE_OMIT_INTEGRITY_CHECK - if( sqlite3StrICmp(zLeft, "integrity_check")==0 ){ + /* Pragma "quick_check" is an experimental reduced version of + ** integrity_check designed to detect most database corruption + ** without most of the overhead of a full integrity-check. + */ + if( sqlite3StrICmp(zLeft, "integrity_check")==0 + || sqlite3StrICmp(zLeft, "quick_check")==0 + ){ int i, j, addr, mxErr; /* Code that appears at the end of the integrity check. If no error @@ -812,17 +1023,19 @@ ** error message */ static const VdbeOpList endCode[] = { - { OP_MemLoad, 0, 0, 0}, - { OP_Integer, 0, 0, 0}, - { OP_Ne, 0, 0, 0}, /* 2 */ - { OP_String8, 0, 0, "ok"}, - { OP_Callback, 1, 0, 0}, + { OP_AddImm, 1, 0, 0}, /* 0 */ + { OP_IfNeg, 1, 0, 0}, /* 1 */ + { OP_String8, 0, 3, 0}, /* 2 */ + { OP_ResultRow, 3, 1, 0}, }; + int isQuick = (zLeft[0]=='q'); + /* Initialize the VDBE program */ if( sqlite3ReadSchema(pParse) ) goto pragma_out; + pParse->nMem = 6; sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", P3_STATIC); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", SQLITE_STATIC); /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; @@ -832,7 +1045,7 @@ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } } - sqlite3VdbeAddOp(v, OP_MemInt, mxErr, 0); + sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1); /* reg[1] holds errors left */ /* Do an integrity check on each database file */ for(i=0; inDb; i++){ @@ -843,100 +1056,117 @@ if( OMIT_TEMPDB && i==1 ) continue; sqlite3CodeVerifySchema(pParse, i); - addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); - sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */ + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeJumpHere(v, addr); /* Do an integrity check of the B-Tree + ** + ** Begin by filling registers 2, 3, ... with the root pages numbers + ** for all tables and indices in the database. */ pTbls = &db->aDb[i].pSchema->tblHash; for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx; - sqlite3VdbeAddOp(v, OP_Integer, pTab->tnum, 0); + sqlite3VdbeAddOp2(v, OP_Integer, pTab->tnum, 2+cnt); cnt++; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - sqlite3VdbeAddOp(v, OP_Integer, pIdx->tnum, 0); + sqlite3VdbeAddOp2(v, OP_Integer, pIdx->tnum, 2+cnt); cnt++; } } - if( cnt==0 ) continue; - sqlite3VdbeAddOp(v, OP_IntegrityCk, 0, i); - addr = sqlite3VdbeAddOp(v, OP_IsNull, -1, 0); - sqlite3VdbeOp3(v, OP_String8, 0, 0, - sqlite3MPrintf("*** in database %s ***\n", db->aDb[i].zName), - P3_DYNAMIC); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); - sqlite3VdbeAddOp(v, OP_Concat, 0, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + + /* Make sure sufficient number of registers have been allocated */ + if( pParse->nMem < cnt+4 ){ + pParse->nMem = cnt+4; + } + + /* Do the b-tree integrity checks */ + sqlite3VdbeAddOp3(v, OP_IntegrityCk, 2, cnt, 1); + sqlite3VdbeChangeP5(v, (u8)i); + addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, + sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName), + P4_DYNAMIC); + sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2); + sqlite3VdbeAddOp2(v, OP_ResultRow, 2, 1); sqlite3VdbeJumpHere(v, addr); /* Make sure all the indices are constructed correctly. */ - for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + for(x=sqliteHashFirst(pTbls); x && !isQuick; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx; int loopTop; if( pTab->pIndex==0 ) continue; - addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); - sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Stop if out of errors */ + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeJumpHere(v, addr); sqlite3OpenTableAndIndices(pParse, pTab, 1, OP_OpenRead); - sqlite3VdbeAddOp(v, OP_MemInt, 0, 1); - loopTop = sqlite3VdbeAddOp(v, OP_Rewind, 1, 0); - sqlite3VdbeAddOp(v, OP_MemIncr, 1, 1); + sqlite3VdbeAddOp2(v, OP_Integer, 0, 2); /* reg(2) will count entries */ + loopTop = sqlite3VdbeAddOp2(v, OP_Rewind, 1, 0); + sqlite3VdbeAddOp2(v, OP_AddImm, 2, 1); /* increment entry count */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ int jmp2; static const VdbeOpList idxErr[] = { - { OP_MemIncr, -1, 0, 0}, - { OP_String8, 0, 0, "rowid "}, - { OP_Rowid, 1, 0, 0}, - { OP_String8, 0, 0, " missing from index "}, - { OP_String8, 0, 0, 0}, /* 4 */ - { OP_Concat, 2, 0, 0}, - { OP_Callback, 1, 0, 0}, + { OP_AddImm, 1, -1, 0}, + { OP_String8, 0, 3, 0}, /* 1 */ + { OP_Rowid, 1, 4, 0}, + { OP_String8, 0, 5, 0}, /* 3 */ + { OP_String8, 0, 6, 0}, /* 4 */ + { OP_Concat, 4, 3, 3}, + { OP_Concat, 5, 3, 3}, + { OP_Concat, 6, 3, 3}, + { OP_ResultRow, 3, 1, 0}, + { OP_IfPos, 1, 0, 0}, /* 9 */ + { OP_Halt, 0, 0, 0}, }; - sqlite3GenerateIndexKey(v, pIdx, 1); - jmp2 = sqlite3VdbeAddOp(v, OP_Found, j+2, 0); + sqlite3GenerateIndexKey(pParse, pIdx, 1, 3, 1); + jmp2 = sqlite3VdbeAddOp3(v, OP_Found, j+2, 0, 3); addr = sqlite3VdbeAddOpList(v, ArraySize(idxErr), idxErr); - sqlite3VdbeChangeP3(v, addr+4, pIdx->zName, P3_STATIC); + sqlite3VdbeChangeP4(v, addr+1, "rowid ", P4_STATIC); + sqlite3VdbeChangeP4(v, addr+3, " missing from index ", P4_STATIC); + sqlite3VdbeChangeP4(v, addr+4, pIdx->zName, P4_STATIC); + sqlite3VdbeJumpHere(v, addr+9); sqlite3VdbeJumpHere(v, jmp2); } - sqlite3VdbeAddOp(v, OP_Next, 1, loopTop+1); + sqlite3VdbeAddOp2(v, OP_Next, 1, loopTop+1); sqlite3VdbeJumpHere(v, loopTop); for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ static const VdbeOpList cntIdx[] = { - { OP_MemInt, 0, 2, 0}, + { OP_Integer, 0, 3, 0}, { OP_Rewind, 0, 0, 0}, /* 1 */ - { OP_MemIncr, 1, 2, 0}, + { OP_AddImm, 3, 1, 0}, { OP_Next, 0, 0, 0}, /* 3 */ - { OP_MemLoad, 1, 0, 0}, - { OP_MemLoad, 2, 0, 0}, - { OP_Eq, 0, 0, 0}, /* 6 */ - { OP_MemIncr, -1, 0, 0}, - { OP_String8, 0, 0, "wrong # of entries in index "}, - { OP_String8, 0, 0, 0}, /* 9 */ - { OP_Concat, 0, 0, 0}, - { OP_Callback, 1, 0, 0}, + { OP_Eq, 2, 0, 3}, /* 4 */ + { OP_AddImm, 1, -1, 0}, + { OP_String8, 0, 2, 0}, /* 6 */ + { OP_String8, 0, 3, 0}, /* 7 */ + { OP_Concat, 3, 2, 2}, + { OP_ResultRow, 2, 1, 0}, }; - if( pIdx->tnum==0 ) continue; - addr = sqlite3VdbeAddOp(v, OP_IfMemPos, 0, 0); - sqlite3VdbeAddOp(v, OP_Halt, 0, 0); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeJumpHere(v, addr); addr = sqlite3VdbeAddOpList(v, ArraySize(cntIdx), cntIdx); sqlite3VdbeChangeP1(v, addr+1, j+2); sqlite3VdbeChangeP2(v, addr+1, addr+4); sqlite3VdbeChangeP1(v, addr+3, j+2); sqlite3VdbeChangeP2(v, addr+3, addr+2); - sqlite3VdbeJumpHere(v, addr+6); - sqlite3VdbeChangeP3(v, addr+9, pIdx->zName, P3_STATIC); + sqlite3VdbeJumpHere(v, addr+4); + sqlite3VdbeChangeP4(v, addr+6, + "wrong # of entries in index ", P4_STATIC); + sqlite3VdbeChangeP4(v, addr+7, pIdx->zName, P4_STATIC); } } } addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode); - sqlite3VdbeChangeP1(v, addr+1, mxErr); - sqlite3VdbeJumpHere(v, addr+2); + sqlite3VdbeChangeP2(v, addr, -mxErr); + sqlite3VdbeJumpHere(v, addr+1); + sqlite3VdbeChangeP4(v, addr+2, "ok", P4_STATIC); }else #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ @@ -945,7 +1175,7 @@ ** PRAGMA encoding ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" ** - ** In it's first form, this pragma returns the encoding of the main + ** In its first form, this pragma returns the encoding of the main ** database. If the database is not initialized, it is initialized now. ** ** The second form of this pragma is a no-op if the main database file @@ -968,11 +1198,11 @@ char *zName; u8 enc; } encnames[] = { - { "UTF-8", SQLITE_UTF8 }, { "UTF8", SQLITE_UTF8 }, - { "UTF-16le", SQLITE_UTF16LE }, + { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */ + { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */ + { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */ { "UTF16le", SQLITE_UTF16LE }, - { "UTF-16be", SQLITE_UTF16BE }, { "UTF16be", SQLITE_UTF16BE }, { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ @@ -982,15 +1212,13 @@ if( !zRight ){ /* "PRAGMA encoding" */ if( sqlite3ReadSchema(pParse) ) goto pragma_out; sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", P3_STATIC); - sqlite3VdbeAddOp(v, OP_String8, 0, 0); - for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ - if( pEnc->enc==ENC(pParse->db) ){ - sqlite3VdbeChangeP3(v, -1, pEnc->zName, P3_STATIC); - break; - } - } - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", SQLITE_STATIC); + sqlite3VdbeAddOp2(v, OP_String8, 0, 1); + assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); + assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); + assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); + sqlite3VdbeChangeP4(v, -1, encnames[ENC(pParse->db)].zName, P4_STATIC); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); }else{ /* "PRAGMA encoding = XXX" */ /* Only change the value of sqlite.enc if the database handle is not ** initialized. If the main database exists, the new sqlite.enc value @@ -1045,28 +1273,26 @@ || sqlite3StrICmp(zLeft, "user_version")==0 || sqlite3StrICmp(zLeft, "freelist_count")==0 ){ - - int iCookie; /* Cookie index. 0 for schema-cookie, 6 for user-cookie. */ + int iCookie; /* Cookie index. 1 for schema-cookie, 6 for user-cookie. */ + sqlite3VdbeUsesBtree(v, iDb); switch( zLeft[0] ){ - case 's': case 'S': - iCookie = 0; - break; case 'f': case 'F': - iCookie = 1; - iDb = (-1*(iDb+1)); - assert(iDb<=0); + iCookie = BTREE_FREE_PAGE_COUNT; + break; + case 's': case 'S': + iCookie = BTREE_SCHEMA_VERSION; break; default: - iCookie = 5; + iCookie = BTREE_USER_VERSION; break; } - if( zRight && iDb>=0 ){ + if( zRight && iCookie!=BTREE_FREE_PAGE_COUNT ){ /* Write the specified cookie value */ static const VdbeOpList setCookie[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ - { OP_Integer, 0, 0, 0}, /* 1 */ - { OP_SetCookie, 0, 0, 0}, /* 2 */ + { OP_Integer, 0, 1, 0}, /* 1 */ + { OP_SetCookie, 0, 0, 1}, /* 2 */ }; int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie); sqlite3VdbeChangeP1(v, addr, iDb); @@ -1076,14 +1302,14 @@ }else{ /* Read the specified cookie value */ static const VdbeOpList readCookie[] = { - { OP_ReadCookie, 0, 0, 0}, /* 0 */ - { OP_Callback, 1, 0, 0} + { OP_ReadCookie, 0, 1, 0}, /* 0 */ + { OP_ResultRow, 1, 1, 0} }; int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie); sqlite3VdbeChangeP1(v, addr, iDb); - sqlite3VdbeChangeP2(v, addr, iCookie); + sqlite3VdbeChangeP3(v, addr, iCookie); sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, P3_TRANSIENT); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); } }else #endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ @@ -1097,42 +1323,52 @@ "unlocked", "shared", "reserved", "pending", "exclusive" }; int i; - Vdbe *v = sqlite3GetVdbe(pParse); sqlite3VdbeSetNumCols(v, 2); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", P3_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", P3_STATIC); + pParse->nMem = 2; + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", SQLITE_STATIC); + sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", SQLITE_STATIC); for(i=0; inDb; i++){ Btree *pBt; Pager *pPager; + const char *zState = "unknown"; + int j; if( db->aDb[i].zName==0 ) continue; - sqlite3VdbeOp3(v, OP_String8, 0, 0, db->aDb[i].zName, P3_STATIC); + sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, db->aDb[i].zName, P4_STATIC); pBt = db->aDb[i].pBt; if( pBt==0 || (pPager = sqlite3BtreePager(pBt))==0 ){ - sqlite3VdbeOp3(v, OP_String8, 0, 0, "closed", P3_STATIC); - }else{ - int j = sqlite3PagerLockstate(pPager); - sqlite3VdbeOp3(v, OP_String8, 0, 0, - (j>=0 && j<=4) ? azLockName[j] : "unknown", P3_STATIC); + zState = "closed"; + }else if( sqlite3_file_control(db, i ? db->aDb[i].zName : 0, + SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ + zState = azLockName[j]; } - sqlite3VdbeAddOp(v, OP_Callback, 2, 0); + sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, zState, P4_STATIC); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); } - }else -#endif -#ifdef SQLITE_SSE - /* - ** Check to see if the sqlite_statements table exists. Create it - ** if it does not. - */ - if( sqlite3StrICmp(zLeft, "create_sqlite_statement_table")==0 ){ - extern int sqlite3CreateStatementsTable(Parse*); - sqlite3CreateStatementsTable(pParse); }else #endif #if SQLITE_HAS_CODEC - if( sqlite3StrICmp(zLeft, "key")==0 ){ - sqlite3_key(db, zRight, strlen(zRight)); + if( sqlite3StrICmp(zLeft, "key")==0 && zRight ){ + sqlite3_key(db, zRight, sqlite3Strlen30(zRight)); + }else + if( sqlite3StrICmp(zLeft, "rekey")==0 && zRight ){ + sqlite3_rekey(db, zRight, sqlite3Strlen30(zRight)); + }else + if( zRight && (sqlite3StrICmp(zLeft, "hexkey")==0 || + sqlite3StrICmp(zLeft, "hexrekey")==0) ){ + int i, h1, h2; + char zKey[40]; + for(i=0; (h1 = zRight[i])!=0 && (h2 = zRight[i+1])!=0; i+=2){ + h1 += 9*(1&(h1>>6)); + h2 += 9*(1&(h2>>6)); + zKey[i/2] = (h2 & 0x0f) | ((h1 & 0xf)<<4); + } + if( (zLeft[3] & 0xf)==0xb ){ + sqlite3_key(db, zKey, i/2); + }else{ + sqlite3_rekey(db, zKey, i/2); + } }else #endif #if SQLITE_HAS_CODEC || defined(SQLITE_ENABLE_CEROD) @@ -1149,32 +1385,31 @@ sqlite3_activate_cerod(&zRight[6]); } #endif - } + }else #endif - {} + + {/* Empty ELSE clause */} - if( v ){ - /* Code an OP_Expire at the end of each PRAGMA program to cause - ** the VDBE implementing the pragma to expire. Most (all?) pragmas - ** are only valid for a single execution. - */ - sqlite3VdbeAddOp(v, OP_Expire, 1, 0); + /* Code an OP_Expire at the end of each PRAGMA program to cause + ** the VDBE implementing the pragma to expire. Most (all?) pragmas + ** are only valid for a single execution. + */ + sqlite3VdbeAddOp2(v, OP_Expire, 1, 0); - /* - ** Reset the safety level, in case the fullfsync flag or synchronous - ** setting changed. - */ + /* + ** Reset the safety level, in case the fullfsync flag or synchronous + ** setting changed. + */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS - if( db->autoCommit ){ - sqlite3BtreeSetSafetyLevel(pDb->pBt, pDb->safety_level, - (db->flags&SQLITE_FullFSync)!=0); - } -#endif + if( db->autoCommit ){ + sqlite3BtreeSetSafetyLevel(pDb->pBt, pDb->safety_level, + (db->flags&SQLITE_FullFSync)!=0); } +#endif pragma_out: - sqliteFree(zLeft); - sqliteFree(zRight); + sqlite3DbFree(db, zLeft); + sqlite3DbFree(db, zRight); } -#endif /* SQLITE_OMIT_PRAGMA || SQLITE_OMIT_PARSER */ +#endif /* SQLITE_OMIT_PRAGMA */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/prepare.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/prepare.c --- sqlite3-3.4.2/src/prepare.c 2007-08-13 15:43:47.000000000 +0100 +++ sqlite3-3.6.16/src/prepare.c 2009-06-25 17:12:11.000000000 +0100 @@ -13,22 +13,30 @@ ** interface, and routines that contribute to loading the database schema ** from disk. ** -** $Id: prepare.c,v 1.52 2007/08/13 14:41:19 danielk1977 Exp $ +** $Id: prepare.c,v 1.125 2009/06/25 11:50:21 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include /* ** Fill the InitData structure with an error message that indicates ** that the database is corrupt. */ -static void corruptSchema(InitData *pData, const char *zExtra){ - if( !sqlite3MallocFailed() ){ - sqlite3SetString(pData->pzErrMsg, "malformed database schema", - zExtra!=0 && zExtra[0]!=0 ? " - " : (char*)0, zExtra, (char*)0); +static void corruptSchema( + InitData *pData, /* Initialization context */ + const char *zObj, /* Object being parsed at the point of error */ + const char *zExtra /* Error information */ +){ + sqlite3 *db = pData->db; + if( !db->mallocFailed && (db->flags & SQLITE_RecoveryMode)==0 ){ + if( zObj==0 ) zObj = "?"; + sqlite3SetString(pData->pzErrMsg, pData->db, + "malformed database schema (%s)", zObj); + if( zExtra ){ + *pData->pzErrMsg = sqlite3MAppendf(pData->db, *pData->pzErrMsg, "%s - %s", + *pData->pzErrMsg, zExtra); + } } - pData->rc = SQLITE_CORRUPT; + pData->rc = db->mallocFailed ? SQLITE_NOMEM : SQLITE_CORRUPT; } /* @@ -43,26 +51,25 @@ ** argv[2] = SQL text for the CREATE statement. ** */ -int sqlite3InitCallback(void *pInit, int argc, char **argv, char **azColName){ +int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ InitData *pData = (InitData*)pInit; sqlite3 *db = pData->db; int iDb = pData->iDb; - pData->rc = SQLITE_OK; + assert( argc==3 ); + UNUSED_PARAMETER2(NotUsed, argc); + assert( sqlite3_mutex_held(db->mutex) ); DbClearProperty(db, iDb, DB_Empty); - if( sqlite3MallocFailed() ){ - corruptSchema(pData, 0); - return SQLITE_NOMEM; + if( db->mallocFailed ){ + corruptSchema(pData, argv[0], 0); + return 1; } - assert( argc==3 ); + assert( iDb>=0 && iDbnDb ); if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ if( argv[1]==0 ){ - corruptSchema(pData, 0); - return 1; - } - assert( iDb>=0 && iDbnDb ); - if( argv[2] && argv[2][0] ){ + corruptSchema(pData, argv[0], 0); + }else if( argv[2] && argv[2][0] ){ /* Call the parser to process a CREATE TABLE, INDEX or VIEW. ** But because db->init.busy is set to 1, no VDBE code is generated ** or executed. All the parser does is build the internal data @@ -79,13 +86,14 @@ if( SQLITE_OK!=rc ){ pData->rc = rc; if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); - }else if( rc!=SQLITE_INTERRUPT ){ - corruptSchema(pData, zErr); + db->mallocFailed = 1; + }else if( rc!=SQLITE_INTERRUPT && rc!=SQLITE_LOCKED ){ + corruptSchema(pData, argv[0], zErr); } - sqlite3_free(zErr); - return 1; + sqlite3DbFree(db, zErr); } + }else if( argv[0]==0 ){ + corruptSchema(pData, 0, 0); }else{ /* If the SQL column is blank it means this is an index that ** was created to be the PRIMARY KEY or to fulfill a UNIQUE @@ -95,15 +103,15 @@ */ Index *pIndex; pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zName); - if( pIndex==0 || pIndex->tnum!=0 ){ + if( pIndex==0 ){ /* This can occur if there exists an index on a TEMP table which ** has the same name as another index on a permanent index. Since ** the permanent table is hidden by the TEMP table, we can also ** safely ignore the index on the permanent table. */ /* Do Nothing */; - }else{ - pIndex->tnum = atoi(argv[1]); + }else if( sqlite3GetInt32(argv[1], &pIndex->tnum)==0 ){ + corruptSchema(pData, argv[0], "invalid rootpage"); } } return 0; @@ -119,12 +127,13 @@ */ static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){ int rc; + int i; BtCursor *curMain; int size; Table *pTab; Db *pDb; char const *azArg[4]; - int meta[10]; + int meta[5]; InitData initData; char const *zMasterSchema; char const *zMasterName = SCHEMA_TABLE(iDb); @@ -157,6 +166,8 @@ assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pSchema ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); /* zMasterSchema and zInitScript are set to point at the master schema ** and initialisation script appropriate for the database being @@ -170,39 +181,43 @@ zMasterName = SCHEMA_TABLE(iDb); /* Construct the schema tables. */ - sqlite3SafetyOff(db); azArg[0] = zMasterName; azArg[1] = "1"; azArg[2] = zMasterSchema; azArg[3] = 0; initData.db = db; initData.iDb = iDb; + initData.rc = SQLITE_OK; initData.pzErrMsg = pzErrMsg; - rc = sqlite3InitCallback(&initData, 3, (char **)azArg, 0); - if( rc ){ - sqlite3SafetyOn(db); - return initData.rc; + (void)sqlite3SafetyOff(db); + sqlite3InitCallback(&initData, 3, (char **)azArg, 0); + (void)sqlite3SafetyOn(db); + if( initData.rc ){ + rc = initData.rc; + goto error_out; } pTab = sqlite3FindTable(db, zMasterName, db->aDb[iDb].zName); - if( pTab ){ - pTab->readOnly = 1; + if( ALWAYS(pTab) ){ + pTab->tabFlags |= TF_Readonly; } - sqlite3SafetyOn(db); /* Create a cursor to hold the database open */ pDb = &db->aDb[iDb]; if( pDb->pBt==0 ){ - if( !OMIT_TEMPDB && iDb==1 ){ + if( !OMIT_TEMPDB && ALWAYS(iDb==1) ){ DbSetProperty(db, 1, DB_SchemaLoaded); } return SQLITE_OK; } - rc = sqlite3BtreeCursor(pDb->pBt, MASTER_ROOT, 0, 0, 0, &curMain); - if( rc!=SQLITE_OK && rc!=SQLITE_EMPTY ){ - sqlite3SetString(pzErrMsg, sqlite3ErrStr(rc), (char*)0); - return rc; + curMain = sqlite3MallocZero(sqlite3BtreeCursorSize()); + if( !curMain ){ + rc = SQLITE_NOMEM; + goto error_out; } + sqlite3BtreeEnter(pDb->pBt); + rc = sqlite3BtreeCursor(pDb->pBt, MASTER_ROOT, 0, 0, curMain); + if( rc==SQLITE_EMPTY ) rc = SQLITE_OK; /* Get the database meta information. ** @@ -210,49 +225,46 @@ ** meta[0] Schema cookie. Changes with each schema change. ** meta[1] File format of schema layer. ** meta[2] Size of the page cache. - ** meta[3] Use freelist if 0. Autovacuum if greater than zero. + ** meta[3] Largest rootpage (auto/incr_vacuum mode) ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE - ** meta[5] The user cookie. Used by the application. - ** meta[6] Incremental-vacuum flag. - ** meta[7] - ** meta[8] - ** meta[9] + ** meta[5] User version + ** meta[6] Incremental vacuum mode + ** meta[7] unused + ** meta[8] unused + ** meta[9] unused ** ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to ** the possible values of meta[4]. */ - if( rc==SQLITE_OK ){ - int i; - for(i=0; rc==SQLITE_OK && ipBt, i+1, (u32 *)&meta[i]); - } - if( rc ){ - sqlite3SetString(pzErrMsg, sqlite3ErrStr(rc), (char*)0); - sqlite3BtreeCloseCursor(curMain); - return rc; - } - }else{ - memset(meta, 0, sizeof(meta)); + for(i=0; rc==SQLITE_OK && ipBt, i+1, (u32 *)&meta[i]); } - pDb->pSchema->schema_cookie = meta[0]; + if( rc ){ + sqlite3SetString(pzErrMsg, db, "%s", sqlite3ErrStr(rc)); + goto initone_error_out; + } + pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1]; /* If opening a non-empty database, check the text encoding. For the ** main database, set sqlite3.enc to the encoding of the main database. ** For an attached db, it is an error if the encoding is not the same ** as sqlite3.enc. */ - if( meta[4] ){ /* text encoding */ + if( meta[BTREE_TEXT_ENCODING-1] ){ /* text encoding */ if( iDb==0 ){ + u8 encoding; /* If opening the main database, set ENC(db). */ - ENC(db) = (u8)meta[4]; - db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 6, 0); + encoding = (u8)meta[BTREE_TEXT_ENCODING-1] & 3; + if( encoding==0 ) encoding = SQLITE_UTF8; + ENC(db) = encoding; + db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0); }else{ /* If opening an attached database, the encoding much match ENC(db) */ - if( meta[4]!=ENC(db) ){ - sqlite3BtreeCloseCursor(curMain); - sqlite3SetString(pzErrMsg, "attached databases must use the same" - " text encoding as main database", (char*)0); - return SQLITE_ERROR; + if( meta[BTREE_TEXT_ENCODING-1]!=ENC(db) ){ + sqlite3SetString(pzErrMsg, db, "attached databases must use the same" + " text encoding as main database"); + rc = SQLITE_ERROR; + goto initone_error_out; } } }else{ @@ -260,10 +272,13 @@ } pDb->pSchema->enc = ENC(db); - size = meta[2]; - if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } - pDb->pSchema->cache_size = size; - sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + if( pDb->pSchema->cache_size==0 ){ + size = meta[BTREE_DEFAULT_CACHE_SIZE-1]; + if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } + if( size<0 ) size = -size; + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } /* ** file_format==1 Version 3.0.0. @@ -271,57 +286,84 @@ ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants */ - pDb->pSchema->file_format = meta[1]; + pDb->pSchema->file_format = (u8)meta[BTREE_FILE_FORMAT-1]; if( pDb->pSchema->file_format==0 ){ pDb->pSchema->file_format = 1; } if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){ - sqlite3BtreeCloseCursor(curMain); - sqlite3SetString(pzErrMsg, "unsupported file format", (char*)0); - return SQLITE_ERROR; + sqlite3SetString(pzErrMsg, db, "unsupported file format"); + rc = SQLITE_ERROR; + goto initone_error_out; } + /* Ticket #2804: When we open a database in the newer file format, + ** clear the legacy_file_format pragma flag so that a VACUUM will + ** not downgrade the database and thus invalidate any descending + ** indices that the user might have created. + */ + if( iDb==0 && meta[BTREE_FILE_FORMAT-1]>=4 ){ + db->flags &= ~SQLITE_LegacyFileFmt; + } /* Read the schema information out of the schema tables */ assert( db->init.busy ); - if( rc==SQLITE_EMPTY ){ - /* For an empty database, there is nothing to read */ - rc = SQLITE_OK; - }else{ + { char *zSql; - zSql = sqlite3MPrintf( + zSql = sqlite3MPrintf(db, "SELECT name, rootpage, sql FROM '%q'.%s", db->aDb[iDb].zName, zMasterName); - sqlite3SafetyOff(db); - rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); - if( rc==SQLITE_ABORT ) rc = initData.rc; - sqlite3SafetyOn(db); - sqliteFree(zSql); + (void)sqlite3SafetyOff(db); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); + xAuth = db->xAuth; + db->xAuth = 0; +#endif + rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; + } +#endif + if( rc==SQLITE_OK ) rc = initData.rc; + (void)sqlite3SafetyOn(db); + sqlite3DbFree(db, zSql); #ifndef SQLITE_OMIT_ANALYZE if( rc==SQLITE_OK ){ sqlite3AnalysisLoad(db, iDb); } #endif - sqlite3BtreeCloseCursor(curMain); } - if( sqlite3MallocFailed() ){ - /* sqlite3SetString(pzErrMsg, "out of memory", (char*)0); */ + if( db->mallocFailed ){ rc = SQLITE_NOMEM; sqlite3ResetInternalSchema(db, 0); } if( rc==SQLITE_OK || (db->flags&SQLITE_RecoveryMode)){ /* Black magic: If the SQLITE_RecoveryMode flag is set, then consider - ** the schema loaded, even if errors occured. In this situation the + ** the schema loaded, even if errors occurred. In this situation the ** current sqlite3_prepare() operation will fail, but the following one ** will attempt to compile the supplied statement against whatever subset - ** of the schema was loaded before the error occured. The primary + ** of the schema was loaded before the error occurred. The primary ** purpose of this is to allow access to the sqlite_master table - ** even when it's contents have been corrupted. + ** even when its contents have been corrupted. */ DbSetProperty(db, iDb, DB_SchemaLoaded); rc = SQLITE_OK; } + + /* Jump here for an error that occurs after successfully allocating + ** curMain and calling sqlite3BtreeEnter(). For an error that occurs + ** before that point, jump to error_out. + */ +initone_error_out: + sqlite3BtreeCloseCursor(curMain); + sqlite3_free(curMain); + sqlite3BtreeLeave(pDb->pBt); + +error_out: + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; + } return rc; } @@ -339,7 +381,7 @@ int i, rc; int commit_internal = !(db->flags&SQLITE_InternChanges); - if( db->init.busy ) return SQLITE_OK; + assert( sqlite3_mutex_held(db->mutex) ); rc = SQLITE_OK; db->init.busy = 1; for(i=0; rc==SQLITE_OK && inDb; i++){ @@ -355,7 +397,8 @@ ** schema may contain references to objects in other databases. */ #ifndef SQLITE_OMIT_TEMPDB - if( rc==SQLITE_OK && db->nDb>1 && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ + if( rc==SQLITE_OK && ALWAYS(db->nDb>1) + && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ rc = sqlite3InitOne(db, 1, pzErrMsg); if( rc ){ sqlite3ResetInternalSchema(db, 1); @@ -378,6 +421,7 @@ int sqlite3ReadSchema(Parse *pParse){ int rc = SQLITE_OK; sqlite3 *db = pParse->db; + assert( sqlite3_mutex_held(db->mutex) ); if( !db->init.busy ){ rc = sqlite3Init(db, &pParse->zErrMsg); } @@ -400,19 +444,33 @@ int cookie; int allOk = 1; - for(iDb=0; allOk && iDbnDb; iDb++){ - Btree *pBt; - pBt = db->aDb[iDb].pBt; - if( pBt==0 ) continue; - rc = sqlite3BtreeCursor(pBt, MASTER_ROOT, 0, 0, 0, &curTemp); - if( rc==SQLITE_OK ){ - rc = sqlite3BtreeGetMeta(pBt, 1, (u32 *)&cookie); - if( rc==SQLITE_OK && cookie!=db->aDb[iDb].pSchema->schema_cookie ){ - allOk = 0; + curTemp = (BtCursor *)sqlite3Malloc(sqlite3BtreeCursorSize()); + if( curTemp ){ + assert( sqlite3_mutex_held(db->mutex) ); + for(iDb=0; allOk && iDbnDb; iDb++){ + Btree *pBt; + pBt = db->aDb[iDb].pBt; + if( pBt==0 ) continue; + memset(curTemp, 0, sqlite3BtreeCursorSize()); + rc = sqlite3BtreeCursor(pBt, MASTER_ROOT, 0, 0, curTemp); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); + if( ALWAYS(rc==SQLITE_OK) + && cookie!=db->aDb[iDb].pSchema->schema_cookie ){ + allOk = 0; + } + sqlite3BtreeCloseCursor(curTemp); + } + if( NEVER(rc==SQLITE_NOMEM) || rc==SQLITE_IOERR_NOMEM ){ + db->mallocFailed = 1; } - sqlite3BtreeCloseCursor(curTemp); } + sqlite3_free(curTemp); + }else{ + allOk = 0; + db->mallocFailed = 1; } + return allOk; } @@ -432,17 +490,18 @@ ** function should never be used. ** ** We return -1000000 instead of the more usual -1 simply because using - ** -1000000 as incorrectly using -1000000 index into db->aDb[] is much + ** -1000000 as the incorrect index into db->aDb[] is much ** more likely to cause a segfault than -1 (of course there are assert() ** statements too, but it never hurts to play the odds). */ + assert( sqlite3_mutex_held(db->mutex) ); if( pSchema ){ - for(i=0; inDb; i++){ + for(i=0; ALWAYS(inDb); i++){ if( db->aDb[i].pSchema==pSchema ){ break; } } - assert( i>=0 &&i>=0 && inDb ); + assert( i>=0 && inDb ); } return i; } @@ -450,7 +509,7 @@ /* ** Compile the UTF-8 encoded SQL statement zSql into a statement handle. */ -int sqlite3Prepare( +static int sqlite3Prepare( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ @@ -458,82 +517,126 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ - Parse sParse; - char *zErrMsg = 0; - int rc = SQLITE_OK; - int i; - - /* Assert that malloc() has not failed */ - assert( !sqlite3MallocFailed() ); + Parse *pParse; /* Parsing context */ + char *zErrMsg = 0; /* Error message */ + int rc = SQLITE_OK; /* Result code */ + int i; /* Loop counter */ + + /* Allocate the parsing context */ + pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); + if( pParse==0 ){ + rc = SQLITE_NOMEM; + goto end_prepare; + } - assert( ppStmt ); - *ppStmt = 0; if( sqlite3SafetyOn(db) ){ - return SQLITE_MISUSE; + rc = SQLITE_MISUSE; + goto end_prepare; } - - /* If any attached database schemas are locked, do not proceed with - ** compilation. Instead return SQLITE_LOCKED immediately. + assert( ppStmt && *ppStmt==0 ); + assert( !db->mallocFailed ); + assert( sqlite3_mutex_held(db->mutex) ); + + /* Check to verify that it is possible to get a read lock on all + ** database schemas. The inability to get a read lock indicates that + ** some other database connection is holding a write-lock, which in + ** turn means that the other connection has made uncommitted changes + ** to the schema. + ** + ** Were we to proceed and prepare the statement against the uncommitted + ** schema changes and if those schema changes are subsequently rolled + ** back and different changes are made in their place, then when this + ** prepared statement goes to run the schema cookie would fail to detect + ** the schema change. Disaster would follow. + ** + ** This thread is currently holding mutexes on all Btrees (because + ** of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it + ** is not possible for another thread to start a new schema change + ** while this routine is running. Hence, we do not need to hold + ** locks on the schema, we just need to make sure nobody else is + ** holding them. + ** + ** Note that setting READ_UNCOMMITTED overrides most lock detection, + ** but it does *not* override schema lock detection, so this all still + ** works even if READ_UNCOMMITTED is set. */ for(i=0; inDb; i++) { Btree *pBt = db->aDb[i].pBt; - if( pBt && sqlite3BtreeSchemaLocked(pBt) ){ - const char *zDb = db->aDb[i].zName; - sqlite3Error(db, SQLITE_LOCKED, "database schema is locked: %s", zDb); - sqlite3SafetyOff(db); - return SQLITE_LOCKED; + if( pBt ){ + assert( sqlite3BtreeHoldsMutex(pBt) ); + rc = sqlite3BtreeSchemaLocked(pBt); + if( rc ){ + const char *zDb = db->aDb[i].zName; + sqlite3Error(db, rc, "database schema is locked: %s", zDb); + (void)sqlite3SafetyOff(db); + testcase( db->flags & SQLITE_ReadUncommitted ); + goto end_prepare; + } } } - - memset(&sParse, 0, sizeof(sParse)); - sParse.db = db; - if( nBytes>=0 && zSql[nBytes]!=0 ){ + + + pParse->db = db; + if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; - if( nBytes>SQLITE_MAX_SQL_LENGTH ){ - return SQLITE_TOOBIG; + int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; + testcase( nBytes==mxLen ); + testcase( nBytes==mxLen+1 ); + if( nBytes>mxLen ){ + sqlite3Error(db, SQLITE_TOOBIG, "statement too long"); + (void)sqlite3SafetyOff(db); + rc = sqlite3ApiExit(db, SQLITE_TOOBIG); + goto end_prepare; } - zSqlCopy = sqlite3StrNDup(zSql, nBytes); + zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); if( zSqlCopy ){ - sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg); - sqliteFree(zSqlCopy); + sqlite3RunParser(pParse, zSqlCopy, &zErrMsg); + sqlite3DbFree(db, zSqlCopy); + pParse->zTail = &zSql[pParse->zTail-zSqlCopy]; + }else{ + pParse->zTail = &zSql[nBytes]; } - sParse.zTail = &zSql[nBytes]; }else{ - sqlite3RunParser(&sParse, zSql, &zErrMsg); + sqlite3RunParser(pParse, zSql, &zErrMsg); } - if( sqlite3MallocFailed() ){ - sParse.rc = SQLITE_NOMEM; + if( db->mallocFailed ){ + pParse->rc = SQLITE_NOMEM; } - if( sParse.rc==SQLITE_DONE ) sParse.rc = SQLITE_OK; - if( sParse.checkSchema && !schemaIsValid(db) ){ - sParse.rc = SQLITE_SCHEMA; + if( pParse->rc==SQLITE_DONE ) pParse->rc = SQLITE_OK; + if( pParse->checkSchema && !schemaIsValid(db) ){ + pParse->rc = SQLITE_SCHEMA; } - if( sParse.rc==SQLITE_SCHEMA ){ + if( pParse->rc==SQLITE_SCHEMA ){ sqlite3ResetInternalSchema(db, 0); } - if( sqlite3MallocFailed() ){ - sParse.rc = SQLITE_NOMEM; + if( db->mallocFailed ){ + pParse->rc = SQLITE_NOMEM; } if( pzTail ){ - *pzTail = sParse.zTail; + *pzTail = pParse->zTail; } - rc = sParse.rc; + rc = pParse->rc; #ifndef SQLITE_OMIT_EXPLAIN - if( rc==SQLITE_OK && sParse.pVdbe && sParse.explain ){ - if( sParse.explain==2 ){ - sqlite3VdbeSetNumCols(sParse.pVdbe, 3); - sqlite3VdbeSetColName(sParse.pVdbe, 0, COLNAME_NAME, "order", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 1, COLNAME_NAME, "from", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 2, COLNAME_NAME, "detail", P3_STATIC); + if( rc==SQLITE_OK && pParse->pVdbe && pParse->explain ){ + static const char * const azColName[] = { + "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", + "order", "from", "detail" + }; + int iFirst, mx; + if( pParse->explain==2 ){ + sqlite3VdbeSetNumCols(pParse->pVdbe, 3); + iFirst = 8; + mx = 11; }else{ - sqlite3VdbeSetNumCols(sParse.pVdbe, 5); - sqlite3VdbeSetColName(sParse.pVdbe, 0, COLNAME_NAME, "addr", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 1, COLNAME_NAME, "opcode", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 2, COLNAME_NAME, "p1", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 3, COLNAME_NAME, "p2", P3_STATIC); - sqlite3VdbeSetColName(sParse.pVdbe, 4, COLNAME_NAME, "p3", P3_STATIC); + sqlite3VdbeSetNumCols(pParse->pVdbe, 8); + iFirst = 0; + mx = 8; + } + for(i=iFirst; ipVdbe, i-iFirst, COLNAME_NAME, + azColName[i], SQLITE_STATIC); } } #endif @@ -542,57 +645,92 @@ rc = SQLITE_MISUSE; } - if( saveSqlFlag ){ - sqlite3VdbeSetSql(sParse.pVdbe, zSql, sParse.zTail - zSql); + assert( db->init.busy==0 || saveSqlFlag==0 ); + if( db->init.busy==0 ){ + Vdbe *pVdbe = pParse->pVdbe; + sqlite3VdbeSetSql(pVdbe, zSql, (int)(pParse->zTail-zSql), saveSqlFlag); } - if( rc!=SQLITE_OK || sqlite3MallocFailed() ){ - sqlite3_finalize((sqlite3_stmt*)sParse.pVdbe); + if( pParse->pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){ + sqlite3VdbeFinalize(pParse->pVdbe); assert(!(*ppStmt)); }else{ - *ppStmt = (sqlite3_stmt*)sParse.pVdbe; + *ppStmt = (sqlite3_stmt*)pParse->pVdbe; } if( zErrMsg ){ sqlite3Error(db, rc, "%s", zErrMsg); - sqliteFree(zErrMsg); + sqlite3DbFree(db, zErrMsg); }else{ sqlite3Error(db, rc, 0); } +end_prepare: + + sqlite3StackFree(db, pParse); rc = sqlite3ApiExit(db, rc); - sqlite3ReleaseThreadData(); assert( (rc&db->errMask)==rc ); return rc; } +static int sqlite3LockAndPrepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + int rc; + assert( ppStmt!=0 ); + *ppStmt = 0; + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE; + } + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, ppStmt, pzTail); + if( rc==SQLITE_SCHEMA ){ + sqlite3_finalize(*ppStmt); + rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, ppStmt, pzTail); + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); + return rc; +} /* ** Rerun the compilation of a statement after a schema change. -** Return true if the statement was recompiled successfully. -** Return false if there is an error of some kind. +** +** If the statement is successfully recompiled, return SQLITE_OK. Otherwise, +** if the statement cannot be recompiled because another connection has +** locked the sqlite3_master table, return SQLITE_LOCKED. If any other error +** occurs, return SQLITE_SCHEMA. */ int sqlite3Reprepare(Vdbe *p){ int rc; sqlite3_stmt *pNew; const char *zSql; sqlite3 *db; - - zSql = sqlite3VdbeGetSql(p); - if( zSql==0 ){ - return 0; - } + + assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) ); + zSql = sqlite3_sql((sqlite3_stmt *)p); + assert( zSql!=0 ); /* Reprepare only called for prepare_v2() statements */ db = sqlite3VdbeDb(p); - rc = sqlite3Prepare(db, zSql, -1, 0, &pNew, 0); + assert( sqlite3_mutex_held(db->mutex) ); + rc = sqlite3LockAndPrepare(db, zSql, -1, 0, &pNew, 0); if( rc ){ + if( rc==SQLITE_NOMEM ){ + db->mallocFailed = 1; + } assert( pNew==0 ); - return 0; + return (rc==SQLITE_LOCKED) ? SQLITE_LOCKED : SQLITE_SCHEMA; }else{ assert( pNew!=0 ); } sqlite3VdbeSwap((Vdbe*)pNew, p); - sqlite3_transfer_bindings(pNew, (sqlite3_stmt*)p); + sqlite3TransferBindings(pNew, (sqlite3_stmt*)p); sqlite3VdbeResetStepResult((Vdbe*)pNew); sqlite3VdbeFinalize((Vdbe*)pNew); - return 1; + return SQLITE_OK; } @@ -611,7 +749,10 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ - return sqlite3Prepare(db,zSql,nBytes,0,ppStmt,pzTail); + int rc; + rc = sqlite3LockAndPrepare(db,zSql,nBytes,0,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; } int sqlite3_prepare_v2( sqlite3 *db, /* Database handle. */ @@ -620,7 +761,10 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ - return sqlite3Prepare(db,zSql,nBytes,1,ppStmt,pzTail); + int rc; + rc = sqlite3LockAndPrepare(db,zSql,nBytes,1,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; } @@ -644,12 +788,15 @@ const char *zTail8 = 0; int rc = SQLITE_OK; - if( sqlite3SafetyCheck(db) ){ + assert( ppStmt ); + *ppStmt = 0; + if( !sqlite3SafetyCheckOk(db) ){ return SQLITE_MISUSE; } - zSql8 = sqlite3Utf16to8(zSql, nBytes); + sqlite3_mutex_enter(db->mutex); + zSql8 = sqlite3Utf16to8(db, zSql, nBytes); if( zSql8 ){ - rc = sqlite3Prepare(db, zSql8, -1, saveSqlFlag, ppStmt, &zTail8); + rc = sqlite3LockAndPrepare(db, zSql8, -1, saveSqlFlag, ppStmt, &zTail8); } if( zTail8 && pzTail ){ @@ -658,11 +805,13 @@ ** characters between zSql8 and zTail8, and then returning a pointer ** the same number of characters into the UTF-16 string. */ - int chars_parsed = sqlite3Utf8CharLen(zSql8, zTail8-zSql8); + int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); } - sqliteFree(zSql8); - return sqlite3ApiExit(db, rc); + sqlite3DbFree(db, zSql8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } /* @@ -680,7 +829,10 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const void **pzTail /* OUT: End of parsed string */ ){ - return sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); + int rc; + rc = sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; } int sqlite3_prepare16_v2( sqlite3 *db, /* Database handle. */ @@ -689,7 +841,10 @@ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const void **pzTail /* OUT: End of parsed string */ ){ - return sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); + int rc; + rc = sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; } #endif /* SQLITE_OMIT_UTF16 */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/printf.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/printf.c --- sqlite3-3.4.2/src/printf.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/printf.c 2009-06-25 12:35:51.000000000 +0100 @@ -5,6 +5,8 @@ ** an historical reference. Most of the "enhancements" have been backed ** out so that the functionality is now the same as standard printf(). ** +** $Id: printf.c,v 1.104 2009/06/03 01:24:54 drh Exp $ +** ************************************************************************** ** ** The following modules is an enhanced replacement for the "printf" subroutines @@ -51,7 +53,6 @@ ** */ #include "sqliteInt.h" -#include /* ** Conversion types fall into various categories as defined by the @@ -67,14 +68,16 @@ #define etPERCENT 8 /* Percent symbol. %% */ #define etCHARX 9 /* Characters. %c */ /* The rest are extensions, not normally found in printf() */ -#define etCHARLIT 10 /* Literal characters. %' */ -#define etSQLESCAPE 11 /* Strings with '\'' doubled. %q */ -#define etSQLESCAPE2 12 /* Strings with '\'' doubled and enclosed in '', +#define etSQLESCAPE 10 /* Strings with '\'' doubled. %q */ +#define etSQLESCAPE2 11 /* Strings with '\'' doubled and enclosed in '', NULL pointers replaced by SQL NULL. %Q */ -#define etTOKEN 13 /* a pointer to a Token structure */ -#define etSRCLIST 14 /* a pointer to a SrcList */ -#define etPOINTER 15 /* The %p conversion */ -#define etSQLESCAPE3 16 /* %w -> Strings with '\"' doubled */ +#define etTOKEN 12 /* a pointer to a Token structure */ +#define etSRCLIST 13 /* a pointer to a SrcList */ +#define etPOINTER 14 /* The %p conversion */ +#define etSQLESCAPE3 15 /* %w -> Strings with '\"' doubled */ +#define etORDINAL 16 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ + +#define etINVALID 0 /* Any unrecognized conversion type */ /* @@ -113,7 +116,7 @@ { 'd', 10, 1, etRADIX, 0, 0 }, { 's', 0, 4, etSTRING, 0, 0 }, { 'g', 0, 1, etGENERIC, 30, 0 }, - { 'z', 0, 6, etDYNSTRING, 0, 0 }, + { 'z', 0, 4, etDYNSTRING, 0, 0 }, { 'q', 0, 4, etSQLESCAPE, 0, 0 }, { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, @@ -132,10 +135,13 @@ { 'n', 0, 0, etSIZE, 0, 0 }, { '%', 0, 0, etPERCENT, 0, 0 }, { 'p', 16, 0, etPOINTER, 0, 1 }, + +/* All the rest have the FLAG_INTERN bit set and are thus for internal +** use only */ { 'T', 0, 2, etTOKEN, 0, 0 }, { 'S', 0, 2, etSRCLIST, 0, 0 }, + { 'r', 10, 3, etORDINAL, 0, 0 }, }; -#define etNINFO (sizeof(fmtinfo)/sizeof(fmtinfo[0])) /* ** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point @@ -155,7 +161,7 @@ ** 16 (the number of significant digits in a 64-bit float) '0' is ** always returned. */ -static int et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ +static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ int digit; LONGDOUBLE_TYPE d; if( (*cnt)++ >= 16 ) return '0'; @@ -163,17 +169,34 @@ d = digit; digit += '0'; *val = (*val - d)*10.0; - return digit; + return (char)digit; } #endif /* SQLITE_OMIT_FLOATING_POINT */ /* +** Append N space characters to the given string buffer. +*/ +static void appendSpace(StrAccum *pAccum, int N){ + static const char zSpaces[] = " "; + while( N>=(int)sizeof(zSpaces)-1 ){ + sqlite3StrAccumAppend(pAccum, zSpaces, sizeof(zSpaces)-1); + N -= sizeof(zSpaces)-1; + } + if( N>0 ){ + sqlite3StrAccumAppend(pAccum, zSpaces, N); + } +} + +/* ** On machines with a small stack size, you can redefine the -** SQLITE_PRINT_BUF_SIZE to be less than 350. But beware - for -** smaller values some %f conversions may go into an infinite loop. +** SQLITE_PRINT_BUF_SIZE to be less than 350. */ #ifndef SQLITE_PRINT_BUF_SIZE -# define SQLITE_PRINT_BUF_SIZE 350 +# if defined(SQLITE_SMALL_STACK) +# define SQLITE_PRINT_BUF_SIZE 50 +# else +# define SQLITE_PRINT_BUF_SIZE 350 +# endif #endif #define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */ @@ -204,9 +227,8 @@ ** seems to make a big difference in determining how fast this beast ** will run. */ -static int vxprintf( - void (*func)(void*,const char*,int), /* Consumer of text */ - void *arg, /* First argument to the consumer */ +void sqlite3VXPrintf( + StrAccum *pAccum, /* Accumulate results here */ int useExtended, /* Allow extended %-conversions */ const char *fmt, /* Format string */ va_list ap /* arguments */ @@ -216,7 +238,6 @@ int precision; /* Precision of the current field */ int length; /* Length of the field */ int idx; /* A general purpose loop counter */ - int count; /* Total number of characters output */ int width; /* Width of the current field */ etByte flag_leftjustify; /* True if "-" flag is present */ etByte flag_plussign; /* True if "+" flag is present */ @@ -232,12 +253,8 @@ const et_info *infop; /* Pointer to the appropriate info structure */ char buf[etBUFSIZE]; /* Conversion buffer */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ - etByte errorflag = 0; /* True if an error is encountered */ - etByte xtype; /* Conversion paradigm */ + etByte xtype = 0; /* Conversion paradigm */ char *zExtra; /* Extra memory used for etTCLESCAPE conversions */ - static const char spaces[] = - " "; -#define etSPACESIZE (sizeof(spaces)-1) #ifndef SQLITE_OMIT_FLOATING_POINT int exp, e2; /* exponent of real numbers */ double rounder; /* Used for rounding floating point values */ @@ -247,8 +264,7 @@ int nsd; /* Number of significant digits returned */ #endif - func(arg,"",0); - count = length = 0; + length = 0; bufpt = 0; for(; (c=(*fmt))!=0; ++fmt){ if( c!='%' ){ @@ -256,14 +272,11 @@ bufpt = (char *)fmt; amt = 1; while( (c=(*++fmt))!='%' && c!=0 ) amt++; - (*func)(arg,bufpt,amt); - count += amt; + sqlite3StrAccumAppend(pAccum, bufpt, amt); if( c==0 ) break; } if( (c=(*++fmt))==0 ){ - errorflag = 1; - (*func)(arg,"%",1); - count++; + sqlite3StrAccumAppend(pAccum, "%", 1); break; } /* Find out what flags are present */ @@ -330,22 +343,20 @@ flag_long = flag_longlong = 0; } /* Fetch the info entry for the field */ - infop = 0; - for(idx=0; idxflags & FLAG_INTERN)==0 ){ xtype = infop->type; }else{ - return -1; + return; } break; } } zExtra = 0; - if( infop==0 ){ - return -1; - } /* Limit the precision to prevent overflowing buf[] during conversion */ @@ -379,12 +390,17 @@ flag_longlong = sizeof(char*)==sizeof(i64); flag_long = sizeof(char*)==sizeof(long int); /* Fall through into the next case */ + case etORDINAL: case etRADIX: if( infop->flags & FLAG_SIGNED ){ i64 v; - if( flag_longlong ) v = va_arg(ap,i64); - else if( flag_long ) v = va_arg(ap,long int); - else v = va_arg(ap,int); + if( flag_longlong ){ + v = va_arg(ap,i64); + }else if( flag_long ){ + v = va_arg(ap,long int); + }else{ + v = va_arg(ap,int); + } if( v<0 ){ longvalue = -v; prefix = '-'; @@ -395,9 +411,13 @@ else prefix = 0; } }else{ - if( flag_longlong ) longvalue = va_arg(ap,u64); - else if( flag_long ) longvalue = va_arg(ap,unsigned long int); - else longvalue = va_arg(ap,unsigned int); + if( flag_longlong ){ + longvalue = va_arg(ap,u64); + }else if( flag_long ){ + longvalue = va_arg(ap,unsigned long int); + }else{ + longvalue = va_arg(ap,unsigned int); + } prefix = 0; } if( longvalue==0 ) flag_alternateform = 0; @@ -405,6 +425,16 @@ precision = width-(prefix!=0); } bufpt = &buf[etBUFSIZE-1]; + if( xtype==etORDINAL ){ + static const char zOrd[] = "thstndrd"; + int x = (int)(longvalue % 10); + if( x>=4 || (longvalue/10)%10==1 ){ + x = 0; + } + buf[etBUFSIZE-3] = zOrd[x*2]; + buf[etBUFSIZE-2] = zOrd[x*2+1]; + bufpt -= 2; + } { register const char *cset; /* Use registers for speed */ register int base; @@ -415,7 +445,7 @@ longvalue = longvalue/base; }while( longvalue>0 ); } - length = &buf[etBUFSIZE-1]-bufpt; + length = (int)(&buf[etBUFSIZE-1]-bufpt); for(idx=precision-length; idx>0; idx--){ *(--bufpt) = '0'; /* Zero pad */ } @@ -424,11 +454,9 @@ const char *pre; char x; pre = &aPrefix[infop->prefix]; - if( *bufpt!=pre[0] ){ - for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; - } + for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; } - length = &buf[etBUFSIZE-1]-bufpt; + length = (int)(&buf[etBUFSIZE-1]-bufpt); break; case etFLOAT: case etEXP: @@ -456,7 +484,7 @@ if( xtype==etFLOAT ) realvalue += rounder; /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ exp = 0; - if( sqlite3_isnan(realvalue) ){ + if( sqlite3IsNaN((double)realvalue) ){ bufpt = "NaN"; length = 3; break; @@ -465,9 +493,9 @@ while( realvalue>=1e32 && exp<=350 ){ realvalue *= 1e-32; exp+=32; } while( realvalue>=1e8 && exp<=350 ){ realvalue *= 1e-8; exp+=8; } while( realvalue>=10.0 && exp<=350 ){ realvalue *= 0.1; exp++; } - while( realvalue<1e-8 && exp>=-350 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 && exp>=-350 ){ realvalue *= 10.0; exp--; } - if( exp>350 || exp<-350 ){ + while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } + while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } + if( exp>350 ){ if( prefix=='-' ){ bufpt = "-Inf"; }else if( prefix=='+' ){ @@ -475,7 +503,7 @@ }else{ bufpt = "Inf"; } - length = strlen(bufpt); + length = sqlite3Strlen30(bufpt); break; } } @@ -506,7 +534,7 @@ e2 = exp; } nsd = 0; - flag_dp = (precision>0) | flag_alternateform | flag_altform2; + flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ *(bufpt++) = prefix; @@ -525,7 +553,8 @@ } /* "0" digits after the decimal point but before the first ** significant digit of the number */ - for(e2++; e2<0 && precision>0; precision--, e2++){ + for(e2++; e2<0; precision--, e2++){ + assert( precision>0 ); *(bufpt++) = '0'; } /* Significant digits after the decimal point */ @@ -545,7 +574,7 @@ } } /* Add the "eNNN" suffix */ - if( flag_exp || (xtype==etEXP && exp) ){ + if( flag_exp || xtype==etEXP ){ *(bufpt++) = aDigits[infop->charset]; if( exp<0 ){ *(bufpt++) = '-'; exp = -exp; @@ -553,18 +582,18 @@ *(bufpt++) = '+'; } if( exp>=100 ){ - *(bufpt++) = (exp/100)+'0'; /* 100's digit */ + *(bufpt++) = (char)((exp/100)+'0'); /* 100's digit */ exp %= 100; } - *(bufpt++) = exp/10+'0'; /* 10's digit */ - *(bufpt++) = exp%10+'0'; /* 1's digit */ + *(bufpt++) = (char)(exp/10+'0'); /* 10's digit */ + *(bufpt++) = (char)(exp%10+'0'); /* 1's digit */ } *bufpt = 0; /* The converted number is in buf[] and zero terminated. Output it. ** Note that the number is in the usual order, not reversed as with ** integer conversions. */ - length = bufpt-buf; + length = (int)(bufpt-buf); bufpt = buf; /* Special case: Add leading zeros if the flag_zeropad flag is @@ -582,7 +611,7 @@ #endif break; case etSIZE: - *(va_arg(ap,int*)) = count; + *(va_arg(ap,int*)) = pAccum->nChar; length = width = 0; break; case etPERCENT: @@ -590,11 +619,11 @@ bufpt = buf; length = 1; break; - case etCHARLIT: case etCHARX: - c = buf[0] = (xtype==etCHARX ? va_arg(ap,int) : *++fmt); + c = va_arg(ap,int); + buf[0] = (char)c; if( precision>=0 ){ - for(idx=1; idx=0 && precision=0 ){ + for(length=0; lengthetBUFSIZE ){ - bufpt = zExtra = sqliteMalloc( n ); - if( bufpt==0 ) return -1; + bufpt = zExtra = sqlite3Malloc( n ); + if( bufpt==0 ){ + pAccum->mallocFailed = 1; + return; + } }else{ bufpt = buf; } @@ -647,8 +683,8 @@ } case etTOKEN: { Token *pToken = va_arg(ap, Token*); - if( pToken && pToken->z ){ - (*func)(arg, (char*)pToken->z, pToken->n); + if( pToken ){ + sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n); } length = width = 0; break; @@ -658,14 +694,18 @@ int k = va_arg(ap, int); struct SrcList_item *pItem = &pSrc->a[k]; assert( k>=0 && knSrc ); - if( pItem->zDatabase && pItem->zDatabase[0] ){ - (*func)(arg, pItem->zDatabase, strlen(pItem->zDatabase)); - (*func)(arg, ".", 1); + if( pItem->zDatabase ){ + sqlite3StrAccumAppend(pAccum, pItem->zDatabase, -1); + sqlite3StrAccumAppend(pAccum, ".", 1); } - (*func)(arg, pItem->zName, strlen(pItem->zName)); + sqlite3StrAccumAppend(pAccum, pItem->zName, -1); length = width = 0; break; } + default: { + assert( xtype==etINVALID ); + return; + } }/* End switch over the format type */ /* ** The text of the conversion is pointed to by "bufpt" and is @@ -676,151 +716,167 @@ register int nspace; nspace = width-length; if( nspace>0 ){ - count += nspace; - while( nspace>=etSPACESIZE ){ - (*func)(arg,spaces,etSPACESIZE); - nspace -= etSPACESIZE; - } - if( nspace>0 ) (*func)(arg,spaces,nspace); + appendSpace(pAccum, nspace); } } if( length>0 ){ - (*func)(arg,bufpt,length); - count += length; + sqlite3StrAccumAppend(pAccum, bufpt, length); } if( flag_leftjustify ){ register int nspace; nspace = width-length; if( nspace>0 ){ - count += nspace; - while( nspace>=etSPACESIZE ){ - (*func)(arg,spaces,etSPACESIZE); - nspace -= etSPACESIZE; - } - if( nspace>0 ) (*func)(arg,spaces,nspace); + appendSpace(pAccum, nspace); } } if( zExtra ){ - sqliteFree(zExtra); + sqlite3_free(zExtra); } }/* End for loop over the format string */ - return errorflag ? -1 : count; } /* End of function */ - -/* This structure is used to store state information about the -** write to memory that is currently in progress. -*/ -struct sgMprintf { - char *zBase; /* A base allocation */ - char *zText; /* The string collected so far */ - int nChar; /* Length of the string so far */ - int nTotal; /* Output size if unconstrained */ - int nAlloc; /* Amount of space allocated in zText */ - void *(*xRealloc)(void*,int); /* Function used to realloc memory */ -}; - -/* -** This function implements the callback from vxprintf. -** -** This routine add nNewChar characters of text in zNewText to -** the sgMprintf structure pointed to by "arg". +/* +** Append N bytes of text from z to the StrAccum object. */ -static void mout(void *arg, const char *zNewText, int nNewChar){ - struct sgMprintf *pM = (struct sgMprintf*)arg; - pM->nTotal += nNewChar; - if( pM->nChar + nNewChar + 1 > pM->nAlloc ){ - if( pM->xRealloc==0 ){ - nNewChar = pM->nAlloc - pM->nChar - 1; +void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){ + assert( z!=0 || N==0 ); + if( p->tooBig | p->mallocFailed ){ + testcase(p->tooBig); + testcase(p->mallocFailed); + return; + } + if( N<0 ){ + N = sqlite3Strlen30(z); + } + if( N==0 || NEVER(z==0) ){ + return; + } + if( p->nChar+N >= p->nAlloc ){ + char *zNew; + if( !p->useMalloc ){ + p->tooBig = 1; + N = p->nAlloc - p->nChar - 1; + if( N<=0 ){ + return; + } }else{ - int nAlloc = pM->nChar + nNewChar*2 + 1; - if( pM->zText==pM->zBase ){ - pM->zText = pM->xRealloc(0, nAlloc); - if( pM->zText && pM->nChar ){ - memcpy(pM->zText, pM->zBase, pM->nChar); - } + i64 szNew = p->nChar; + szNew += N + 1; + if( szNew > p->mxAlloc ){ + sqlite3StrAccumReset(p); + p->tooBig = 1; + return; }else{ - char *zNew; - zNew = pM->xRealloc(pM->zText, nAlloc); - if( zNew ){ - pM->zText = zNew; - }else{ - return; - } + p->nAlloc = (int)szNew; + } + zNew = sqlite3DbMallocRaw(p->db, p->nAlloc ); + if( zNew ){ + memcpy(zNew, p->zText, p->nChar); + sqlite3StrAccumReset(p); + p->zText = zNew; + }else{ + p->mallocFailed = 1; + sqlite3StrAccumReset(p); + return; } - pM->nAlloc = nAlloc; } } - if( pM->zText ){ - if( nNewChar>0 ){ - memcpy(&pM->zText[pM->nChar], zNewText, nNewChar); - pM->nChar += nNewChar; + memcpy(&p->zText[p->nChar], z, N); + p->nChar += N; +} + +/* +** Finish off a string by making sure it is zero-terminated. +** Return a pointer to the resulting string. Return a NULL +** pointer if any kind of error was encountered. +*/ +char *sqlite3StrAccumFinish(StrAccum *p){ + if( p->zText ){ + p->zText[p->nChar] = 0; + if( p->useMalloc && p->zText==p->zBase ){ + p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); + if( p->zText ){ + memcpy(p->zText, p->zBase, p->nChar+1); + }else{ + p->mallocFailed = 1; + } } - pM->zText[pM->nChar] = 0; } + return p->zText; } /* -** This routine is a wrapper around xprintf() that invokes mout() as -** the consumer. +** Reset an StrAccum string. Reclaim all malloced memory. */ -static char *base_vprintf( - void *(*xRealloc)(void*,int), /* Routine to realloc memory. May be NULL */ - int useInternal, /* Use internal %-conversions if true */ - char *zInitBuf, /* Initially write here, before mallocing */ - int nInitBuf, /* Size of zInitBuf[] */ - const char *zFormat, /* format string */ - va_list ap /* arguments */ -){ - struct sgMprintf sM; - sM.zBase = sM.zText = zInitBuf; - sM.nChar = sM.nTotal = 0; - sM.nAlloc = nInitBuf; - sM.xRealloc = xRealloc; - vxprintf(mout, &sM, useInternal, zFormat, ap); - if( xRealloc ){ - if( sM.zText==sM.zBase ){ - sM.zText = xRealloc(0, sM.nChar+1); - if( sM.zText ){ - memcpy(sM.zText, sM.zBase, sM.nChar+1); - } - }else if( sM.nAlloc>sM.nChar+10 ){ - char *zNew = xRealloc(sM.zText, sM.nChar+1); - if( zNew ){ - sM.zText = zNew; - } - } +void sqlite3StrAccumReset(StrAccum *p){ + if( p->zText!=p->zBase ){ + sqlite3DbFree(p->db, p->zText); } - return sM.zText; + p->zText = 0; } /* -** Realloc that is a real function, not a macro. +** Initialize a string accumulator */ -static void *printf_realloc(void *old, int size){ - return sqliteRealloc(old,size); +void sqlite3StrAccumInit(StrAccum *p, char *zBase, int n, int mx){ + p->zText = p->zBase = zBase; + p->db = 0; + p->nChar = 0; + p->nAlloc = n; + p->mxAlloc = mx; + p->useMalloc = 1; + p->tooBig = 0; + p->mallocFailed = 0; } /* ** Print into memory obtained from sqliteMalloc(). Use the internal ** %-conversion extensions. */ -char *sqlite3VMPrintf(const char *zFormat, va_list ap){ +char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){ + char *z; char zBase[SQLITE_PRINT_BUF_SIZE]; - return base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap); + StrAccum acc; + assert( db!=0 ); + sqlite3StrAccumInit(&acc, zBase, sizeof(zBase), + db->aLimit[SQLITE_LIMIT_LENGTH]); + acc.db = db; + sqlite3VXPrintf(&acc, 1, zFormat, ap); + z = sqlite3StrAccumFinish(&acc); + if( acc.mallocFailed ){ + db->mallocFailed = 1; + } + return z; } /* ** Print into memory obtained from sqliteMalloc(). Use the internal ** %-conversion extensions. */ -char *sqlite3MPrintf(const char *zFormat, ...){ +char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){ + va_list ap; + char *z; + va_start(ap, zFormat); + z = sqlite3VMPrintf(db, zFormat, ap); + va_end(ap); + return z; +} + +/* +** Like sqlite3MPrintf(), but call sqlite3DbFree() on zStr after formatting +** the string and before returnning. This routine is intended to be used +** to modify an existing string. For example: +** +** x = sqlite3MPrintf(db, x, "prefix %s suffix", x); +** +*/ +char *sqlite3MAppendf(sqlite3 *db, char *zStr, const char *zFormat, ...){ va_list ap; char *z; - char zBase[SQLITE_PRINT_BUF_SIZE]; va_start(ap, zFormat); - z = base_vprintf(printf_realloc, 1, zBase, sizeof(zBase), zFormat, ap); + z = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); + sqlite3DbFree(db, zStr); return z; } @@ -829,8 +885,16 @@ ** %-conversion extensions. */ char *sqlite3_vmprintf(const char *zFormat, va_list ap){ + char *z; char zBase[SQLITE_PRINT_BUF_SIZE]; - return base_vprintf(sqlite3_realloc, 0, zBase, sizeof(zBase), zFormat, ap); + StrAccum acc; +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + sqlite3StrAccumInit(&acc, zBase, sizeof(zBase), SQLITE_MAX_LENGTH); + sqlite3VXPrintf(&acc, 0, zFormat, ap); + z = sqlite3StrAccumFinish(&acc); + return z; } /* @@ -840,6 +904,9 @@ char *sqlite3_mprintf(const char *zFormat, ...){ va_list ap; char *z; +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif va_start(ap, zFormat); z = sqlite3_vmprintf(zFormat, ap); va_end(ap); @@ -855,30 +922,36 @@ char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ char *z; va_list ap; + StrAccum acc; if( n<=0 ){ return zBuf; } - zBuf[0] = 0; + sqlite3StrAccumInit(&acc, zBuf, n, 0); + acc.useMalloc = 0; va_start(ap,zFormat); - z = base_vprintf(0, 0, zBuf, n, zFormat, ap); + sqlite3VXPrintf(&acc, 0, zFormat, ap); va_end(ap); + z = sqlite3StrAccumFinish(&acc); return z; } -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) || defined(SQLITE_MEMDEBUG) +#if defined(SQLITE_DEBUG) /* ** A version of printf() that understands %lld. Used for debugging. ** The printf() built into some versions of windows does not understand %lld ** and segfaults if you give it a long long int. */ void sqlite3DebugPrintf(const char *zFormat, ...){ - extern int getpid(void); va_list ap; + StrAccum acc; char zBuf[500]; - va_start(ap, zFormat); - base_vprintf(0, 0, zBuf, sizeof(zBuf), zFormat, ap); + sqlite3StrAccumInit(&acc, zBuf, sizeof(zBuf), 0); + acc.useMalloc = 0; + va_start(ap,zFormat); + sqlite3VXPrintf(&acc, 0, zFormat, ap); va_end(ap); + sqlite3StrAccumFinish(&acc); fprintf(stdout,"%s", zBuf); fflush(stdout); } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/random.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/random.c --- sqlite3-3.4.2/src/random.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/random.c 2009-06-12 03:37:48.000000000 +0100 @@ -15,12 +15,20 @@ ** Random numbers are used by some of the database backends in order ** to generate random integer keys for tables or random filenames. ** -** $Id: random.c,v 1.16 2007/01/05 14:38:56 drh Exp $ +** $Id: random.c,v 1.29 2008/12/10 19:26:24 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" +/* All threads share a single random number generator. +** This structure is the current state of the generator. +*/ +static SQLITE_WSD struct sqlite3PrngType { + unsigned char isInit; /* True if initialized */ + unsigned char i, j; /* State variables */ + unsigned char s[256]; /* State variables */ +} sqlite3Prng; + /* ** Get a single 8-bit random value from the RC4 PRNG. The Mutex ** must be held while executing this routine. @@ -37,17 +45,23 @@ ** (Later): Actually, OP_NewRowid does not depend on a good source of ** randomness any more. But we will leave this code in all the same. */ -static int randomByte(void){ +static u8 randomByte(void){ unsigned char t; - /* All threads share a single random number generator. - ** This structure is the current state of the generator. + + /* The "wsdPrng" macro will resolve to the pseudo-random number generator + ** state vector. If writable static data is unsupported on the target, + ** we have to locate the state vector at run-time. In the more common + ** case where writable static data is supported, wsdPrng can refer directly + ** to the "sqlite3Prng" state vector declared above. */ - static struct { - unsigned char isInit; /* True if initialized */ - unsigned char i, j; /* State variables */ - unsigned char s[256]; /* State variables */ - } prng; +#ifdef SQLITE_OMIT_WSD + struct sqlite3PrngType *p = &GLOBAL(struct sqlite3PrngType, sqlite3Prng); +# define wsdPrng p[0] +#else +# define wsdPrng sqlite3Prng +#endif + /* Initialize the state of the random number generator once, ** the first time this routine is called. The seed value does @@ -58,43 +72,76 @@ ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random ** number generator) not as an encryption device. */ - if( !prng.isInit ){ + if( !wsdPrng.isInit ){ int i; char k[256]; - prng.j = 0; - prng.i = 0; - sqlite3OsRandomSeed(k); + wsdPrng.j = 0; + wsdPrng.i = 0; + sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); for(i=0; i<256; i++){ - prng.s[i] = i; + wsdPrng.s[i] = (u8)i; } for(i=0; i<256; i++){ - prng.j += prng.s[i] + k[i]; - t = prng.s[prng.j]; - prng.s[prng.j] = prng.s[i]; - prng.s[i] = t; + wsdPrng.j += wsdPrng.s[i] + k[i]; + t = wsdPrng.s[wsdPrng.j]; + wsdPrng.s[wsdPrng.j] = wsdPrng.s[i]; + wsdPrng.s[i] = t; } - prng.isInit = 1; + wsdPrng.isInit = 1; } /* Generate and return single random byte */ - prng.i++; - t = prng.s[prng.i]; - prng.j += t; - prng.s[prng.i] = prng.s[prng.j]; - prng.s[prng.j] = t; - t += prng.s[prng.i]; - return prng.s[t]; + wsdPrng.i++; + t = wsdPrng.s[wsdPrng.i]; + wsdPrng.j += t; + wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j]; + wsdPrng.s[wsdPrng.j] = t; + t += wsdPrng.s[wsdPrng.i]; + return wsdPrng.s[t]; } /* ** Return N random bytes. */ -void sqlite3Randomness(int N, void *pBuf){ +void sqlite3_randomness(int N, void *pBuf){ unsigned char *zBuf = pBuf; - sqlite3OsEnterMutex(); +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG); +#endif + sqlite3_mutex_enter(mutex); while( N-- ){ *(zBuf++) = randomByte(); } - sqlite3OsLeaveMutex(); + sqlite3_mutex_leave(mutex); +} + +#ifndef SQLITE_OMIT_BUILTIN_TEST +/* +** For testing purposes, we sometimes want to preserve the state of +** PRNG and restore the PRNG to its saved state at a later time, or +** to reset the PRNG to its initial state. These routines accomplish +** those tasks. +** +** The sqlite3_test_control() interface calls these routines to +** control the PRNG. +*/ +static SQLITE_WSD struct sqlite3PrngType sqlite3SavedPrng; +void sqlite3PrngSaveState(void){ + memcpy( + &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), + &GLOBAL(struct sqlite3PrngType, sqlite3Prng), + sizeof(sqlite3Prng) + ); +} +void sqlite3PrngRestoreState(void){ + memcpy( + &GLOBAL(struct sqlite3PrngType, sqlite3Prng), + &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), + sizeof(sqlite3Prng) + ); +} +void sqlite3PrngResetState(void){ + GLOBAL(struct sqlite3PrngType, sqlite3Prng).isInit = 0; } +#endif /* SQLITE_OMIT_BUILTIN_TEST */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/resolve.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/resolve.c --- sqlite3-3.4.2/src/resolve.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/resolve.c 2009-06-25 12:39:54.000000000 +0100 @@ -0,0 +1,1162 @@ +/* +** 2008 August 18 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains routines used for walking the parser tree and +** resolve all identifiers by associating them with a particular +** table and column. +** +** $Id: resolve.c,v 1.30 2009/06/15 23:15:59 drh Exp $ +*/ +#include "sqliteInt.h" +#include +#include + +/* +** Turn the pExpr expression into an alias for the iCol-th column of the +** result set in pEList. +** +** If the result set column is a simple column reference, then this routine +** makes an exact copy. But for any other kind of expression, this +** routine make a copy of the result set column as the argument to the +** TK_AS operator. The TK_AS operator causes the expression to be +** evaluated just once and then reused for each alias. +** +** The reason for suppressing the TK_AS term when the expression is a simple +** column reference is so that the column reference will be recognized as +** usable by indices within the WHERE clause processing logic. +** +** Hack: The TK_AS operator is inhibited if zType[0]=='G'. This means +** that in a GROUP BY clause, the expression is evaluated twice. Hence: +** +** SELECT random()%5 AS x, count(*) FROM tab GROUP BY x +** +** Is equivalent to: +** +** SELECT random()%5 AS x, count(*) FROM tab GROUP BY random()%5 +** +** The result of random()%5 in the GROUP BY clause is probably different +** from the result in the result-set. We might fix this someday. Or +** then again, we might not... +*/ +static void resolveAlias( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* A result set */ + int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ + Expr *pExpr, /* Transform this into an alias to the result set */ + const char *zType /* "GROUP" or "ORDER" or "" */ +){ + Expr *pOrig; /* The iCol-th column of the result set */ + Expr *pDup; /* Copy of pOrig */ + sqlite3 *db; /* The database connection */ + + assert( iCol>=0 && iColnExpr ); + pOrig = pEList->a[iCol].pExpr; + assert( pOrig!=0 ); + assert( pOrig->flags & EP_Resolved ); + db = pParse->db; + if( pOrig->op!=TK_COLUMN && zType[0]!='G' ){ + pDup = sqlite3ExprDup(db, pOrig, 0); + pDup = sqlite3PExpr(pParse, TK_AS, pDup, 0, 0); + if( pDup==0 ) return; + if( pEList->a[iCol].iAlias==0 ){ + pEList->a[iCol].iAlias = (u16)(++pParse->nAlias); + } + pDup->iTable = pEList->a[iCol].iAlias; + }else if( ExprHasProperty(pOrig, EP_IntValue) || pOrig->u.zToken==0 ){ + pDup = sqlite3ExprDup(db, pOrig, 0); + if( pDup==0 ) return; + }else{ + char *zToken = pOrig->u.zToken; + assert( zToken!=0 ); + pOrig->u.zToken = 0; + pDup = sqlite3ExprDup(db, pOrig, 0); + pOrig->u.zToken = zToken; + if( pDup==0 ) return; + assert( (pDup->flags & (EP_Reduced|EP_TokenOnly))==0 ); + pDup->flags2 |= EP2_MallocedToken; + pDup->u.zToken = sqlite3DbStrDup(db, zToken); + } + if( pExpr->flags & EP_ExpCollate ){ + pDup->pColl = pExpr->pColl; + pDup->flags |= EP_ExpCollate; + } + sqlite3ExprClear(db, pExpr); + memcpy(pExpr, pDup, sizeof(*pExpr)); + sqlite3DbFree(db, pDup); +} + +/* +** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up +** that name in the set of source tables in pSrcList and make the pExpr +** expression node refer back to that source column. The following changes +** are made to pExpr: +** +** pExpr->iDb Set the index in db->aDb[] of the database X +** (even if X is implied). +** pExpr->iTable Set to the cursor number for the table obtained +** from pSrcList. +** pExpr->pTab Points to the Table structure of X.Y (even if +** X and/or Y are implied.) +** pExpr->iColumn Set to the column number within the table. +** pExpr->op Set to TK_COLUMN. +** pExpr->pLeft Any expression this points to is deleted +** pExpr->pRight Any expression this points to is deleted. +** +** The zDb variable is the name of the database (the "X"). This value may be +** NULL meaning that name is of the form Y.Z or Z. Any available database +** can be used. The zTable variable is the name of the table (the "Y"). This +** value can be NULL if zDb is also NULL. If zTable is NULL it +** means that the form of the name is Z and that columns from any table +** can be used. +** +** If the name cannot be resolved unambiguously, leave an error message +** in pParse and return WRC_Abort. Return WRC_Prune on success. +*/ +static int lookupName( + Parse *pParse, /* The parsing context */ + const char *zDb, /* Name of the database containing table, or NULL */ + const char *zTab, /* Name of table containing column, or NULL */ + const char *zCol, /* Name of the column. */ + NameContext *pNC, /* The name context used to resolve the name */ + Expr *pExpr /* Make this EXPR node point to the selected column */ +){ + int i, j; /* Loop counters */ + int cnt = 0; /* Number of matching column names */ + int cntTab = 0; /* Number of matching table names */ + sqlite3 *db = pParse->db; /* The database connection */ + struct SrcList_item *pItem; /* Use for looping over pSrcList items */ + struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ + NameContext *pTopNC = pNC; /* First namecontext in the list */ + Schema *pSchema = 0; /* Schema of the expression */ + + assert( pNC ); /* the name context cannot be NULL. */ + assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ + assert( ~ExprHasAnyProperty(pExpr, EP_TokenOnly|EP_Reduced) ); + + /* Initialize the node to no-match */ + pExpr->iTable = -1; + pExpr->pTab = 0; + ExprSetIrreducible(pExpr); + + /* Start at the inner-most context and move outward until a match is found */ + while( pNC && cnt==0 ){ + ExprList *pEList; + SrcList *pSrcList = pNC->pSrcList; + + if( pSrcList ){ + for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ + Table *pTab; + int iDb; + Column *pCol; + + pTab = pItem->pTab; + assert( pTab!=0 && pTab->zName!=0 ); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( pTab->nCol>0 ); + if( zTab ){ + if( pItem->zAlias ){ + char *zTabName = pItem->zAlias; + if( sqlite3StrICmp(zTabName, zTab)!=0 ) continue; + }else{ + char *zTabName = pTab->zName; + if( NEVER(zTabName==0) || sqlite3StrICmp(zTabName, zTab)!=0 ){ + continue; + } + if( zDb!=0 && sqlite3StrICmp(db->aDb[iDb].zName, zDb)!=0 ){ + continue; + } + } + } + if( 0==(cntTab++) ){ + pExpr->iTable = pItem->iCursor; + pExpr->pTab = pTab; + pSchema = pTab->pSchema; + pMatch = pItem; + } + for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + IdList *pUsing; + cnt++; + pExpr->iTable = pItem->iCursor; + pExpr->pTab = pTab; + pMatch = pItem; + pSchema = pTab->pSchema; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; + if( inSrc-1 ){ + if( pItem[1].jointype & JT_NATURAL ){ + /* If this match occurred in the left table of a natural join, + ** then skip the right table to avoid a duplicate match */ + pItem++; + i++; + }else if( (pUsing = pItem[1].pUsing)!=0 ){ + /* If this match occurs on a column that is in the USING clause + ** of a join, skip the search of the right table of the join + ** to avoid a duplicate match there. */ + int k; + for(k=0; knId; k++){ + if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ){ + pItem++; + i++; + break; + } + } + } + } + break; + } + } + } + } + +#ifndef SQLITE_OMIT_TRIGGER + /* If we have not already resolved the name, then maybe + ** it is a new.* or old.* trigger argument reference + */ + if( zDb==0 && zTab!=0 && cnt==0 && pParse->trigStack!=0 ){ + TriggerStack *pTriggerStack = pParse->trigStack; + Table *pTab = 0; + u32 *piColMask = 0; + if( pTriggerStack->newIdx != -1 && sqlite3StrICmp("new", zTab) == 0 ){ + pExpr->iTable = pTriggerStack->newIdx; + assert( pTriggerStack->pTab ); + pTab = pTriggerStack->pTab; + piColMask = &(pTriggerStack->newColMask); + }else if( pTriggerStack->oldIdx != -1 && sqlite3StrICmp("old", zTab)==0 ){ + pExpr->iTable = pTriggerStack->oldIdx; + assert( pTriggerStack->pTab ); + pTab = pTriggerStack->pTab; + piColMask = &(pTriggerStack->oldColMask); + } + + if( pTab ){ + int iCol; + Column *pCol = pTab->aCol; + + pSchema = pTab->pSchema; + cntTab++; + for(iCol=0; iCol < pTab->nCol; iCol++, pCol++) { + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + cnt++; + pExpr->iColumn = iCol==pTab->iPKey ? -1 : (i16)iCol; + pExpr->pTab = pTab; + testcase( iCol==31 ); + testcase( iCol==32 ); + if( iCol>=32 ){ + *piColMask = 0xffffffff; + }else{ + *piColMask |= ((u32)1)<iColumn = -1; + pExpr->affinity = SQLITE_AFF_INTEGER; + } + + /* + ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z + ** might refer to an result-set alias. This happens, for example, when + ** we are resolving names in the WHERE clause of the following command: + ** + ** SELECT a+b AS x FROM table WHERE x<10; + ** + ** In cases like this, replace pExpr with a copy of the expression that + ** forms the result set entry ("a+b" in the example) and return immediately. + ** Note that the expression in the result set should have already been + ** resolved by the time the WHERE clause is resolved. + */ + if( cnt==0 && (pEList = pNC->pEList)!=0 && zTab==0 ){ + for(j=0; jnExpr; j++){ + char *zAs = pEList->a[j].zName; + if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ + Expr *pOrig; + assert( pExpr->pLeft==0 && pExpr->pRight==0 ); + assert( pExpr->x.pList==0 ); + assert( pExpr->x.pSelect==0 ); + pOrig = pEList->a[j].pExpr; + if( !pNC->allowAgg && ExprHasProperty(pOrig, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); + return WRC_Abort; + } + resolveAlias(pParse, pEList, j, pExpr, ""); + cnt = 1; + pMatch = 0; + assert( zTab==0 && zDb==0 ); + goto lookupname_end; + } + } + } + + /* Advance to the next name context. The loop will exit when either + ** we have a match (cnt>0) or when we run out of name contexts. + */ + if( cnt==0 ){ + pNC = pNC->pNext; + } + } + + /* + ** If X and Y are NULL (in other words if only the column name Z is + ** supplied) and the value of Z is enclosed in double-quotes, then + ** Z is a string literal if it doesn't match any column names. In that + ** case, we need to return right away and not make any changes to + ** pExpr. + ** + ** Because no reference was made to outer contexts, the pNC->nRef + ** fields are not changed in any context. + */ + if( cnt==0 && zTab==0 && ExprHasProperty(pExpr,EP_DblQuoted) ){ + pExpr->op = TK_STRING; + pExpr->pTab = 0; + return WRC_Prune; + } + + /* + ** cnt==0 means there was not match. cnt>1 means there were two or + ** more matches. Either way, we have an error. + */ + if( cnt!=1 ){ + const char *zErr; + zErr = cnt==0 ? "no such column" : "ambiguous column name"; + if( zDb ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); + }else if( zTab ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); + }else{ + sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); + } + pTopNC->nErr++; + } + + /* If a column from a table in pSrcList is referenced, then record + ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes + ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the + ** column number is greater than the number of bits in the bitmask + ** then set the high-order bit of the bitmask. + */ + if( pExpr->iColumn>=0 && pMatch!=0 ){ + int n = pExpr->iColumn; + testcase( n==BMS-1 ); + if( n>=BMS ){ + n = BMS-1; + } + assert( pMatch->iCursor==pExpr->iTable ); + pMatch->colUsed |= ((Bitmask)1)<pLeft); + pExpr->pLeft = 0; + sqlite3ExprDelete(db, pExpr->pRight); + pExpr->pRight = 0; + pExpr->op = TK_COLUMN; +lookupname_end: + if( cnt==1 ){ + assert( pNC!=0 ); + sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); + /* Increment the nRef value on all name contexts from TopNC up to + ** the point where the name matched. */ + for(;;){ + assert( pTopNC!=0 ); + pTopNC->nRef++; + if( pTopNC==pNC ) break; + pTopNC = pTopNC->pNext; + } + return WRC_Prune; + } else { + return WRC_Abort; + } +} + +/* +** This routine is callback for sqlite3WalkExpr(). +** +** Resolve symbolic names into TK_COLUMN operators for the current +** node in the expression tree. Return 0 to continue the search down +** the tree or 2 to abort the tree walk. +** +** This routine also does error checking and name resolution for +** function names. The operator for aggregate functions is changed +** to TK_AGG_FUNCTION. +*/ +static int resolveExprStep(Walker *pWalker, Expr *pExpr){ + NameContext *pNC; + Parse *pParse; + + pNC = pWalker->u.pNC; + assert( pNC!=0 ); + pParse = pNC->pParse; + assert( pParse==pWalker->pParse ); + + if( ExprHasAnyProperty(pExpr, EP_Resolved) ) return WRC_Prune; + ExprSetProperty(pExpr, EP_Resolved); +#ifndef NDEBUG + if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ + SrcList *pSrcList = pNC->pSrcList; + int i; + for(i=0; ipSrcList->nSrc; i++){ + assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); + } + } +#endif + switch( pExpr->op ){ + +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) + /* The special operator TK_ROW means use the rowid for the first + ** column in the FROM clause. This is used by the LIMIT and ORDER BY + ** clause processing on UPDATE and DELETE statements. + */ + case TK_ROW: { + SrcList *pSrcList = pNC->pSrcList; + struct SrcList_item *pItem; + assert( pSrcList && pSrcList->nSrc==1 ); + pItem = pSrcList->a; + pExpr->op = TK_COLUMN; + pExpr->pTab = pItem->pTab; + pExpr->iTable = pItem->iCursor; + pExpr->iColumn = -1; + pExpr->affinity = SQLITE_AFF_INTEGER; + break; + } +#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */ + + /* A lone identifier is the name of a column. + */ + case TK_ID: { + return lookupName(pParse, 0, 0, pExpr->u.zToken, pNC, pExpr); + } + + /* A table name and column name: ID.ID + ** Or a database, table and column: ID.ID.ID + */ + case TK_DOT: { + const char *zColumn; + const char *zTable; + const char *zDb; + Expr *pRight; + + /* if( pSrcList==0 ) break; */ + pRight = pExpr->pRight; + if( pRight->op==TK_ID ){ + zDb = 0; + zTable = pExpr->pLeft->u.zToken; + zColumn = pRight->u.zToken; + }else{ + assert( pRight->op==TK_DOT ); + zDb = pExpr->pLeft->u.zToken; + zTable = pRight->pLeft->u.zToken; + zColumn = pRight->pRight->u.zToken; + } + return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); + } + + /* Resolve function names + */ + case TK_CONST_FUNC: + case TK_FUNCTION: { + ExprList *pList = pExpr->x.pList; /* The argument list */ + int n = pList ? pList->nExpr : 0; /* Number of arguments */ + int no_such_func = 0; /* True if no such function exists */ + int wrong_num_args = 0; /* True if wrong number of arguments */ + int is_agg = 0; /* True if is an aggregate function */ + int auth; /* Authorization to use the function */ + int nId; /* Number of characters in function name */ + const char *zId; /* The function name. */ + FuncDef *pDef; /* Information about the function */ + u8 enc = ENC(pParse->db); /* The database encoding */ + + testcase( pExpr->op==TK_CONST_FUNC ); + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + zId = pExpr->u.zToken; + nId = sqlite3Strlen30(zId); + pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0); + if( pDef==0 ){ + pDef = sqlite3FindFunction(pParse->db, zId, nId, -1, enc, 0); + if( pDef==0 ){ + no_such_func = 1; + }else{ + wrong_num_args = 1; + } + }else{ + is_agg = pDef->xFunc==0; + } +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pDef ){ + auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0); + if( auth!=SQLITE_OK ){ + if( auth==SQLITE_DENY ){ + sqlite3ErrorMsg(pParse, "not authorized to use function: %s", + pDef->zName); + pNC->nErr++; + } + pExpr->op = TK_NULL; + return WRC_Prune; + } + } +#endif + if( is_agg && !pNC->allowAgg ){ + sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); + pNC->nErr++; + is_agg = 0; + }else if( no_such_func ){ + sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); + pNC->nErr++; + }else if( wrong_num_args ){ + sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", + nId, zId); + pNC->nErr++; + } + if( is_agg ){ + pExpr->op = TK_AGG_FUNCTION; + pNC->hasAgg = 1; + } + if( is_agg ) pNC->allowAgg = 0; + sqlite3WalkExprList(pWalker, pList); + if( is_agg ) pNC->allowAgg = 1; + /* FIX ME: Compute pExpr->affinity based on the expected return + ** type of the function + */ + return WRC_Prune; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: + case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); +#endif + case TK_IN: { + testcase( pExpr->op==TK_IN ); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + int nRef = pNC->nRef; +#ifndef SQLITE_OMIT_CHECK + if( pNC->isCheck ){ + sqlite3ErrorMsg(pParse,"subqueries prohibited in CHECK constraints"); + } +#endif + sqlite3WalkSelect(pWalker, pExpr->x.pSelect); + assert( pNC->nRef>=nRef ); + if( nRef!=pNC->nRef ){ + ExprSetProperty(pExpr, EP_VarSelect); + } + } + break; + } +#ifndef SQLITE_OMIT_CHECK + case TK_VARIABLE: { + if( pNC->isCheck ){ + sqlite3ErrorMsg(pParse,"parameters prohibited in CHECK constraints"); + } + break; + } +#endif + } + return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; +} + +/* +** pEList is a list of expressions which are really the result set of the +** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause. +** This routine checks to see if pE is a simple identifier which corresponds +** to the AS-name of one of the terms of the expression list. If it is, +** this routine return an integer between 1 and N where N is the number of +** elements in pEList, corresponding to the matching entry. If there is +** no match, or if pE is not a simple identifier, then this routine +** return 0. +** +** pEList has been resolved. pE has not. +*/ +static int resolveAsName( + Parse *pParse, /* Parsing context for error messages */ + ExprList *pEList, /* List of expressions to scan */ + Expr *pE /* Expression we are trying to match */ +){ + int i; /* Loop counter */ + + UNUSED_PARAMETER(pParse); + + if( pE->op==TK_ID ){ + char *zCol = pE->u.zToken; + for(i=0; inExpr; i++){ + char *zAs = pEList->a[i].zName; + if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ + return i+1; + } + } + } + return 0; +} + +/* +** pE is a pointer to an expression which is a single term in the +** ORDER BY of a compound SELECT. The expression has not been +** name resolved. +** +** At the point this routine is called, we already know that the +** ORDER BY term is not an integer index into the result set. That +** case is handled by the calling routine. +** +** Attempt to match pE against result set columns in the left-most +** SELECT statement. Return the index i of the matching column, +** as an indication to the caller that it should sort by the i-th column. +** The left-most column is 1. In other words, the value returned is the +** same integer value that would be used in the SQL statement to indicate +** the column. +** +** If there is no match, return 0. Return -1 if an error occurs. +*/ +static int resolveOrderByTermToExprList( + Parse *pParse, /* Parsing context for error messages */ + Select *pSelect, /* The SELECT statement with the ORDER BY clause */ + Expr *pE /* The specific ORDER BY term */ +){ + int i; /* Loop counter */ + ExprList *pEList; /* The columns of the result set */ + NameContext nc; /* Name context for resolving pE */ + + assert( sqlite3ExprIsInteger(pE, &i)==0 ); + pEList = pSelect->pEList; + + /* Resolve all names in the ORDER BY term expression + */ + memset(&nc, 0, sizeof(nc)); + nc.pParse = pParse; + nc.pSrcList = pSelect->pSrc; + nc.pEList = pEList; + nc.allowAgg = 1; + nc.nErr = 0; + if( sqlite3ResolveExprNames(&nc, pE) ){ + sqlite3ErrorClear(pParse); + return 0; + } + + /* Try to match the ORDER BY expression against an expression + ** in the result set. Return an 1-based index of the matching + ** result-set entry. + */ + for(i=0; inExpr; i++){ + if( sqlite3ExprCompare(pEList->a[i].pExpr, pE) ){ + return i+1; + } + } + + /* If no match, return 0. */ + return 0; +} + +/* +** Generate an ORDER BY or GROUP BY term out-of-range error. +*/ +static void resolveOutOfRangeError( + Parse *pParse, /* The error context into which to write the error */ + const char *zType, /* "ORDER" or "GROUP" */ + int i, /* The index (1-based) of the term out of range */ + int mx /* Largest permissible value of i */ +){ + sqlite3ErrorMsg(pParse, + "%r %s BY term out of range - should be " + "between 1 and %d", i, zType, mx); +} + +/* +** Analyze the ORDER BY clause in a compound SELECT statement. Modify +** each term of the ORDER BY clause is a constant integer between 1 +** and N where N is the number of columns in the compound SELECT. +** +** ORDER BY terms that are already an integer between 1 and N are +** unmodified. ORDER BY terms that are integers outside the range of +** 1 through N generate an error. ORDER BY terms that are expressions +** are matched against result set expressions of compound SELECT +** beginning with the left-most SELECT and working toward the right. +** At the first match, the ORDER BY expression is transformed into +** the integer column number. +** +** Return the number of errors seen. +*/ +static int resolveCompoundOrderBy( + Parse *pParse, /* Parsing context. Leave error messages here */ + Select *pSelect /* The SELECT statement containing the ORDER BY */ +){ + int i; + ExprList *pOrderBy; + ExprList *pEList; + sqlite3 *db; + int moreToDo = 1; + + pOrderBy = pSelect->pOrderBy; + if( pOrderBy==0 ) return 0; + db = pParse->db; +#if SQLITE_MAX_COLUMN + if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); + return 1; + } +#endif + for(i=0; inExpr; i++){ + pOrderBy->a[i].done = 0; + } + pSelect->pNext = 0; + while( pSelect->pPrior ){ + pSelect->pPrior->pNext = pSelect; + pSelect = pSelect->pPrior; + } + while( pSelect && moreToDo ){ + struct ExprList_item *pItem; + moreToDo = 0; + pEList = pSelect->pEList; + assert( pEList!=0 ); + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + int iCol = -1; + Expr *pE, *pDup; + if( pItem->done ) continue; + pE = pItem->pExpr; + if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( iCol<=0 || iCol>pEList->nExpr ){ + resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); + return 1; + } + }else{ + iCol = resolveAsName(pParse, pEList, pE); + if( iCol==0 ){ + pDup = sqlite3ExprDup(db, pE, 0); + if( !db->mallocFailed ){ + assert(pDup); + iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); + } + sqlite3ExprDelete(db, pDup); + } + } + if( iCol>0 ){ + CollSeq *pColl = pE->pColl; + int flags = pE->flags & EP_ExpCollate; + sqlite3ExprDelete(db, pE); + pItem->pExpr = pE = sqlite3Expr(db, TK_INTEGER, 0); + if( pE==0 ) return 1; + pE->pColl = pColl; + pE->flags |= EP_IntValue | flags; + pE->u.iValue = iCol; + pItem->iCol = (u16)iCol; + pItem->done = 1; + }else{ + moreToDo = 1; + } + } + pSelect = pSelect->pNext; + } + for(i=0; inExpr; i++){ + if( pOrderBy->a[i].done==0 ){ + sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any " + "column in the result set", i+1); + return 1; + } + } + return 0; +} + +/* +** Check every term in the ORDER BY or GROUP BY clause pOrderBy of +** the SELECT statement pSelect. If any term is reference to a +** result set expression (as determined by the ExprList.a.iCol field) +** then convert that term into a copy of the corresponding result set +** column. +** +** If any errors are detected, add an error message to pParse and +** return non-zero. Return zero if no errors are seen. +*/ +int sqlite3ResolveOrderGroupBy( + Parse *pParse, /* Parsing context. Leave error messages here */ + Select *pSelect, /* The SELECT statement containing the clause */ + ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ + const char *zType /* "ORDER" or "GROUP" */ +){ + int i; + sqlite3 *db = pParse->db; + ExprList *pEList; + struct ExprList_item *pItem; + + if( pOrderBy==0 || pParse->db->mallocFailed ) return 0; +#if SQLITE_MAX_COLUMN + if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); + return 1; + } +#endif + pEList = pSelect->pEList; + assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */ + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + if( pItem->iCol ){ + if( pItem->iCol>pEList->nExpr ){ + resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); + return 1; + } + resolveAlias(pParse, pEList, pItem->iCol-1, pItem->pExpr, zType); + } + } + return 0; +} + +/* +** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect. +** The Name context of the SELECT statement is pNC. zType is either +** "ORDER" or "GROUP" depending on which type of clause pOrderBy is. +** +** This routine resolves each term of the clause into an expression. +** If the order-by term is an integer I between 1 and N (where N is the +** number of columns in the result set of the SELECT) then the expression +** in the resolution is a copy of the I-th result-set expression. If +** the order-by term is an identify that corresponds to the AS-name of +** a result-set expression, then the term resolves to a copy of the +** result-set expression. Otherwise, the expression is resolved in +** the usual way - using sqlite3ResolveExprNames(). +** +** This routine returns the number of errors. If errors occur, then +** an appropriate error message might be left in pParse. (OOM errors +** excepted.) +*/ +static int resolveOrderGroupBy( + NameContext *pNC, /* The name context of the SELECT statement */ + Select *pSelect, /* The SELECT statement holding pOrderBy */ + ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */ + const char *zType /* Either "ORDER" or "GROUP", as appropriate */ +){ + int i; /* Loop counter */ + int iCol; /* Column number */ + struct ExprList_item *pItem; /* A term of the ORDER BY clause */ + Parse *pParse; /* Parsing context */ + int nResult; /* Number of terms in the result set */ + + if( pOrderBy==0 ) return 0; + nResult = pSelect->pEList->nExpr; + pParse = pNC->pParse; + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + Expr *pE = pItem->pExpr; + iCol = resolveAsName(pParse, pSelect->pEList, pE); + if( iCol>0 ){ + /* If an AS-name match is found, mark this ORDER BY column as being + ** a copy of the iCol-th result-set column. The subsequent call to + ** sqlite3ResolveOrderGroupBy() will convert the expression to a + ** copy of the iCol-th result-set expression. */ + pItem->iCol = (u16)iCol; + continue; + } + if( sqlite3ExprIsInteger(pE, &iCol) ){ + /* The ORDER BY term is an integer constant. Again, set the column + ** number so that sqlite3ResolveOrderGroupBy() will convert the + ** order-by term to a copy of the result-set expression */ + if( iCol<1 ){ + resolveOutOfRangeError(pParse, zType, i+1, nResult); + return 1; + } + pItem->iCol = (u16)iCol; + continue; + } + + /* Otherwise, treat the ORDER BY term as an ordinary expression */ + pItem->iCol = 0; + if( sqlite3ResolveExprNames(pNC, pE) ){ + return 1; + } + } + return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType); +} + +/* +** Resolve names in the SELECT statement p and all of its descendents. +*/ +static int resolveSelectStep(Walker *pWalker, Select *p){ + NameContext *pOuterNC; /* Context that contains this SELECT */ + NameContext sNC; /* Name context of this SELECT */ + int isCompound; /* True if p is a compound select */ + int nCompound; /* Number of compound terms processed so far */ + Parse *pParse; /* Parsing context */ + ExprList *pEList; /* Result set expression list */ + int i; /* Loop counter */ + ExprList *pGroupBy; /* The GROUP BY clause */ + Select *pLeftmost; /* Left-most of SELECT of a compound */ + sqlite3 *db; /* Database connection */ + + + assert( p!=0 ); + if( p->selFlags & SF_Resolved ){ + return WRC_Prune; + } + pOuterNC = pWalker->u.pNC; + pParse = pWalker->pParse; + db = pParse->db; + + /* Normally sqlite3SelectExpand() will be called first and will have + ** already expanded this SELECT. However, if this is a subquery within + ** an expression, sqlite3ResolveExprNames() will be called without a + ** prior call to sqlite3SelectExpand(). When that happens, let + ** sqlite3SelectPrep() do all of the processing for this SELECT. + ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and + ** this routine in the correct order. + */ + if( (p->selFlags & SF_Expanded)==0 ){ + sqlite3SelectPrep(pParse, p, pOuterNC); + return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; + } + + isCompound = p->pPrior!=0; + nCompound = 0; + pLeftmost = p; + while( p ){ + assert( (p->selFlags & SF_Expanded)!=0 ); + assert( (p->selFlags & SF_Resolved)==0 ); + p->selFlags |= SF_Resolved; + + /* Resolve the expressions in the LIMIT and OFFSET clauses. These + ** are not allowed to refer to any names, so pass an empty NameContext. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + if( sqlite3ResolveExprNames(&sNC, p->pLimit) || + sqlite3ResolveExprNames(&sNC, p->pOffset) ){ + return WRC_Abort; + } + + /* Set up the local name-context to pass to sqlite3ResolveExprNames() to + ** resolve the result-set expression list. + */ + sNC.allowAgg = 1; + sNC.pSrcList = p->pSrc; + sNC.pNext = pOuterNC; + + /* Resolve names in the result set. */ + pEList = p->pEList; + assert( pEList!=0 ); + for(i=0; inExpr; i++){ + Expr *pX = pEList->a[i].pExpr; + if( sqlite3ResolveExprNames(&sNC, pX) ){ + return WRC_Abort; + } + } + + /* Recursively resolve names in all subqueries + */ + for(i=0; ipSrc->nSrc; i++){ + struct SrcList_item *pItem = &p->pSrc->a[i]; + if( pItem->pSelect ){ + const char *zSavedContext = pParse->zAuthContext; + if( pItem->zName ) pParse->zAuthContext = pItem->zName; + sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); + pParse->zAuthContext = zSavedContext; + if( pParse->nErr || db->mallocFailed ) return WRC_Abort; + } + } + + /* If there are no aggregate functions in the result-set, and no GROUP BY + ** expression, do not allow aggregates in any of the other expressions. + */ + assert( (p->selFlags & SF_Aggregate)==0 ); + pGroupBy = p->pGroupBy; + if( pGroupBy || sNC.hasAgg ){ + p->selFlags |= SF_Aggregate; + }else{ + sNC.allowAgg = 0; + } + + /* If a HAVING clause is present, then there must be a GROUP BY clause. + */ + if( p->pHaving && !pGroupBy ){ + sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); + return WRC_Abort; + } + + /* Add the expression list to the name-context before parsing the + ** other expressions in the SELECT statement. This is so that + ** expressions in the WHERE clause (etc.) can refer to expressions by + ** aliases in the result set. + ** + ** Minor point: If this is the case, then the expression will be + ** re-evaluated for each reference to it. + */ + sNC.pEList = p->pEList; + if( sqlite3ResolveExprNames(&sNC, p->pWhere) || + sqlite3ResolveExprNames(&sNC, p->pHaving) + ){ + return WRC_Abort; + } + + /* The ORDER BY and GROUP BY clauses may not refer to terms in + ** outer queries + */ + sNC.pNext = 0; + sNC.allowAgg = 1; + + /* Process the ORDER BY clause for singleton SELECT statements. + ** The ORDER BY clause for compounds SELECT statements is handled + ** below, after all of the result-sets for all of the elements of + ** the compound have been resolved. + */ + if( !isCompound && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){ + return WRC_Abort; + } + if( db->mallocFailed ){ + return WRC_Abort; + } + + /* Resolve the GROUP BY clause. At the same time, make sure + ** the GROUP BY clause does not contain aggregate functions. + */ + if( pGroupBy ){ + struct ExprList_item *pItem; + + if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){ + return WRC_Abort; + } + for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ + if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " + "the GROUP BY clause"); + return WRC_Abort; + } + } + } + + /* Advance to the next term of the compound + */ + p = p->pPrior; + nCompound++; + } + + /* Resolve the ORDER BY on a compound SELECT after all terms of + ** the compound have been resolved. + */ + if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){ + return WRC_Abort; + } + + return WRC_Prune; +} + +/* +** This routine walks an expression tree and resolves references to +** table columns and result-set columns. At the same time, do error +** checking on function usage and set a flag if any aggregate functions +** are seen. +** +** To resolve table columns references we look for nodes (or subtrees) of the +** form X.Y.Z or Y.Z or just Z where +** +** X: The name of a database. Ex: "main" or "temp" or +** the symbolic name assigned to an ATTACH-ed database. +** +** Y: The name of a table in a FROM clause. Or in a trigger +** one of the special names "old" or "new". +** +** Z: The name of a column in table Y. +** +** The node at the root of the subtree is modified as follows: +** +** Expr.op Changed to TK_COLUMN +** Expr.pTab Points to the Table object for X.Y +** Expr.iColumn The column index in X.Y. -1 for the rowid. +** Expr.iTable The VDBE cursor number for X.Y +** +** +** To resolve result-set references, look for expression nodes of the +** form Z (with no X and Y prefix) where the Z matches the right-hand +** size of an AS clause in the result-set of a SELECT. The Z expression +** is replaced by a copy of the left-hand side of the result-set expression. +** Table-name and function resolution occurs on the substituted expression +** tree. For example, in: +** +** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x; +** +** The "x" term of the order by is replaced by "a+b" to render: +** +** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b; +** +** Function calls are checked to make sure that the function is +** defined and that the correct number of arguments are specified. +** If the function is an aggregate function, then the pNC->hasAgg is +** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION. +** If an expression contains aggregate functions then the EP_Agg +** property on the expression is set. +** +** An error message is left in pParse if anything is amiss. The number +** if errors is returned. +*/ +int sqlite3ResolveExprNames( + NameContext *pNC, /* Namespace to resolve expressions in. */ + Expr *pExpr /* The expression to be analyzed. */ +){ + int savedHasAgg; + Walker w; + + if( pExpr==0 ) return 0; +#if SQLITE_MAX_EXPR_DEPTH>0 + { + Parse *pParse = pNC->pParse; + if( sqlite3ExprCheckHeight(pParse, pExpr->nHeight+pNC->pParse->nHeight) ){ + return 1; + } + pParse->nHeight += pExpr->nHeight; + } +#endif + savedHasAgg = pNC->hasAgg; + pNC->hasAgg = 0; + w.xExprCallback = resolveExprStep; + w.xSelectCallback = resolveSelectStep; + w.pParse = pNC->pParse; + w.u.pNC = pNC; + sqlite3WalkExpr(&w, pExpr); +#if SQLITE_MAX_EXPR_DEPTH>0 + pNC->pParse->nHeight -= pExpr->nHeight; +#endif + if( pNC->nErr>0 || w.pParse->nErr>0 ){ + ExprSetProperty(pExpr, EP_Error); + } + if( pNC->hasAgg ){ + ExprSetProperty(pExpr, EP_Agg); + }else if( savedHasAgg ){ + pNC->hasAgg = 1; + } + return ExprHasProperty(pExpr, EP_Error); +} + + +/* +** Resolve all names in all expressions of a SELECT and in all +** decendents of the SELECT, including compounds off of p->pPrior, +** subqueries in expressions, and subqueries used as FROM clause +** terms. +** +** See sqlite3ResolveExprNames() for a description of the kinds of +** transformations that occur. +** +** All SELECT statements should have been expanded using +** sqlite3SelectExpand() prior to invoking this routine. +*/ +void sqlite3ResolveSelectNames( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + NameContext *pOuterNC /* Name context for parent SELECT statement */ +){ + Walker w; + + assert( p!=0 ); + w.xExprCallback = resolveExprStep; + w.xSelectCallback = resolveSelectStep; + w.pParse = pParse; + w.u.pNC = pOuterNC; + sqlite3WalkSelect(&w, p); +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/rowset.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/rowset.c --- sqlite3-3.4.2/src/rowset.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/rowset.c 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,424 @@ +/* +** 2008 December 3 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This module implements an object we call a "RowSet". +** +** The RowSet object is a collection of rowids. Rowids +** are inserted into the RowSet in an arbitrary order. Inserts +** can be intermixed with tests to see if a given rowid has been +** previously inserted into the RowSet. +** +** After all inserts are finished, it is possible to extract the +** elements of the RowSet in sorted order. Once this extraction +** process has started, no new elements may be inserted. +** +** Hence, the primitive operations for a RowSet are: +** +** CREATE +** INSERT +** TEST +** SMALLEST +** DESTROY +** +** The CREATE and DESTROY primitives are the constructor and destructor, +** obviously. The INSERT primitive adds a new element to the RowSet. +** TEST checks to see if an element is already in the RowSet. SMALLEST +** extracts the least value from the RowSet. +** +** The INSERT primitive might allocate additional memory. Memory is +** allocated in chunks so most INSERTs do no allocation. There is an +** upper bound on the size of allocated memory. No memory is freed +** until DESTROY. +** +** The TEST primitive includes a "batch" number. The TEST primitive +** will only see elements that were inserted before the last change +** in the batch number. In other words, if an INSERT occurs between +** two TESTs where the TESTs have the same batch nubmer, then the +** value added by the INSERT will not be visible to the second TEST. +** The initial batch number is zero, so if the very first TEST contains +** a non-zero batch number, it will see all prior INSERTs. +** +** No INSERTs may occurs after a SMALLEST. An assertion will fail if +** that is attempted. +** +** The cost of an INSERT is roughly constant. (Sometime new memory +** has to be allocated on an INSERT.) The cost of a TEST with a new +** batch number is O(NlogN) where N is the number of elements in the RowSet. +** The cost of a TEST using the same batch number is O(logN). The cost +** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST +** primitives are constant time. The cost of DESTROY is O(N). +** +** There is an added cost of O(N) when switching between TEST and +** SMALLEST primitives. +** +** $Id: rowset.c,v 1.7 2009/05/22 01:00:13 drh Exp $ +*/ +#include "sqliteInt.h" + + +/* +** Target size for allocation chunks. +*/ +#define ROWSET_ALLOCATION_SIZE 1024 + +/* +** The number of rowset entries per allocation chunk. +*/ +#define ROWSET_ENTRY_PER_CHUNK \ + ((ROWSET_ALLOCATION_SIZE-8)/sizeof(struct RowSetEntry)) + +/* +** Each entry in a RowSet is an instance of the following object. +*/ +struct RowSetEntry { + i64 v; /* ROWID value for this entry */ + struct RowSetEntry *pRight; /* Right subtree (larger entries) or list */ + struct RowSetEntry *pLeft; /* Left subtree (smaller entries) */ +}; + +/* +** RowSetEntry objects are allocated in large chunks (instances of the +** following structure) to reduce memory allocation overhead. The +** chunks are kept on a linked list so that they can be deallocated +** when the RowSet is destroyed. +*/ +struct RowSetChunk { + struct RowSetChunk *pNextChunk; /* Next chunk on list of them all */ + struct RowSetEntry aEntry[ROWSET_ENTRY_PER_CHUNK]; /* Allocated entries */ +}; + +/* +** A RowSet in an instance of the following structure. +** +** A typedef of this structure if found in sqliteInt.h. +*/ +struct RowSet { + struct RowSetChunk *pChunk; /* List of all chunk allocations */ + sqlite3 *db; /* The database connection */ + struct RowSetEntry *pEntry; /* List of entries using pRight */ + struct RowSetEntry *pLast; /* Last entry on the pEntry list */ + struct RowSetEntry *pFresh; /* Source of new entry objects */ + struct RowSetEntry *pTree; /* Binary tree of entries */ + u16 nFresh; /* Number of objects on pFresh */ + u8 isSorted; /* True if pEntry is sorted */ + u8 iBatch; /* Current insert batch */ +}; + +/* +** Turn bulk memory into a RowSet object. N bytes of memory +** are available at pSpace. The db pointer is used as a memory context +** for any subsequent allocations that need to occur. +** Return a pointer to the new RowSet object. +** +** It must be the case that N is sufficient to make a Rowset. If not +** an assertion fault occurs. +** +** If N is larger than the minimum, use the surplus as an initial +** allocation of entries available to be filled. +*/ +RowSet *sqlite3RowSetInit(sqlite3 *db, void *pSpace, unsigned int N){ + RowSet *p; + assert( N >= ROUND8(sizeof(*p)) ); + p = pSpace; + p->pChunk = 0; + p->db = db; + p->pEntry = 0; + p->pLast = 0; + p->pTree = 0; + p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p); + p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry)); + p->isSorted = 1; + p->iBatch = 0; + return p; +} + +/* +** Deallocate all chunks from a RowSet. This frees all memory that +** the RowSet has allocated over its lifetime. This routine is +** the destructor for the RowSet. +*/ +void sqlite3RowSetClear(RowSet *p){ + struct RowSetChunk *pChunk, *pNextChunk; + for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){ + pNextChunk = pChunk->pNextChunk; + sqlite3DbFree(p->db, pChunk); + } + p->pChunk = 0; + p->nFresh = 0; + p->pEntry = 0; + p->pLast = 0; + p->pTree = 0; + p->isSorted = 1; +} + +/* +** Insert a new value into a RowSet. +** +** The mallocFailed flag of the database connection is set if a +** memory allocation fails. +*/ +void sqlite3RowSetInsert(RowSet *p, i64 rowid){ + struct RowSetEntry *pEntry; /* The new entry */ + struct RowSetEntry *pLast; /* The last prior entry */ + assert( p!=0 ); + if( p->nFresh==0 ){ + struct RowSetChunk *pNew; + pNew = sqlite3DbMallocRaw(p->db, sizeof(*pNew)); + if( pNew==0 ){ + return; + } + pNew->pNextChunk = p->pChunk; + p->pChunk = pNew; + p->pFresh = pNew->aEntry; + p->nFresh = ROWSET_ENTRY_PER_CHUNK; + } + pEntry = p->pFresh++; + p->nFresh--; + pEntry->v = rowid; + pEntry->pRight = 0; + pLast = p->pLast; + if( pLast ){ + if( p->isSorted && rowid<=pLast->v ){ + p->isSorted = 0; + } + pLast->pRight = pEntry; + }else{ + assert( p->pEntry==0 ); /* Fires if INSERT after SMALLEST */ + p->pEntry = pEntry; + } + p->pLast = pEntry; +} + +/* +** Merge two lists of RowSetEntry objects. Remove duplicates. +** +** The input lists are connected via pRight pointers and are +** assumed to each already be in sorted order. +*/ +static struct RowSetEntry *rowSetMerge( + struct RowSetEntry *pA, /* First sorted list to be merged */ + struct RowSetEntry *pB /* Second sorted list to be merged */ +){ + struct RowSetEntry head; + struct RowSetEntry *pTail; + + pTail = &head; + while( pA && pB ){ + assert( pA->pRight==0 || pA->v<=pA->pRight->v ); + assert( pB->pRight==0 || pB->v<=pB->pRight->v ); + if( pA->vv ){ + pTail->pRight = pA; + pA = pA->pRight; + pTail = pTail->pRight; + }else if( pB->vv ){ + pTail->pRight = pB; + pB = pB->pRight; + pTail = pTail->pRight; + }else{ + pA = pA->pRight; + } + } + if( pA ){ + assert( pA->pRight==0 || pA->v<=pA->pRight->v ); + pTail->pRight = pA; + }else{ + assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); + pTail->pRight = pB; + } + return head.pRight; +} + +/* +** Sort all elements on the pEntry list of the RowSet into ascending order. +*/ +static void rowSetSort(RowSet *p){ + unsigned int i; + struct RowSetEntry *pEntry; + struct RowSetEntry *aBucket[40]; + + assert( p->isSorted==0 ); + memset(aBucket, 0, sizeof(aBucket)); + while( p->pEntry ){ + pEntry = p->pEntry; + p->pEntry = pEntry->pRight; + pEntry->pRight = 0; + for(i=0; aBucket[i]; i++){ + pEntry = rowSetMerge(aBucket[i], pEntry); + aBucket[i] = 0; + } + aBucket[i] = pEntry; + } + pEntry = 0; + for(i=0; ipEntry = pEntry; + p->pLast = 0; + p->isSorted = 1; +} + + +/* +** The input, pIn, is a binary tree (or subtree) of RowSetEntry objects. +** Convert this tree into a linked list connected by the pRight pointers +** and return pointers to the first and last elements of the new list. +*/ +static void rowSetTreeToList( + struct RowSetEntry *pIn, /* Root of the input tree */ + struct RowSetEntry **ppFirst, /* Write head of the output list here */ + struct RowSetEntry **ppLast /* Write tail of the output list here */ +){ + assert( pIn!=0 ); + if( pIn->pLeft ){ + struct RowSetEntry *p; + rowSetTreeToList(pIn->pLeft, ppFirst, &p); + p->pRight = pIn; + }else{ + *ppFirst = pIn; + } + if( pIn->pRight ){ + rowSetTreeToList(pIn->pRight, &pIn->pRight, ppLast); + }else{ + *ppLast = pIn; + } + assert( (*ppLast)->pRight==0 ); +} + + +/* +** Convert a sorted list of elements (connected by pRight) into a binary +** tree with depth of iDepth. A depth of 1 means the tree contains a single +** node taken from the head of *ppList. A depth of 2 means a tree with +** three nodes. And so forth. +** +** Use as many entries from the input list as required and update the +** *ppList to point to the unused elements of the list. If the input +** list contains too few elements, then construct an incomplete tree +** and leave *ppList set to NULL. +** +** Return a pointer to the root of the constructed binary tree. +*/ +static struct RowSetEntry *rowSetNDeepTree( + struct RowSetEntry **ppList, + int iDepth +){ + struct RowSetEntry *p; /* Root of the new tree */ + struct RowSetEntry *pLeft; /* Left subtree */ + if( *ppList==0 ){ + return 0; + } + if( iDepth==1 ){ + p = *ppList; + *ppList = p->pRight; + p->pLeft = p->pRight = 0; + return p; + } + pLeft = rowSetNDeepTree(ppList, iDepth-1); + p = *ppList; + if( p==0 ){ + return pLeft; + } + p->pLeft = pLeft; + *ppList = p->pRight; + p->pRight = rowSetNDeepTree(ppList, iDepth-1); + return p; +} + +/* +** Convert a sorted list of elements into a binary tree. Make the tree +** as deep as it needs to be in order to contain the entire list. +*/ +static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ + int iDepth; /* Depth of the tree so far */ + struct RowSetEntry *p; /* Current tree root */ + struct RowSetEntry *pLeft; /* Left subtree */ + + assert( pList!=0 ); + p = pList; + pList = p->pRight; + p->pLeft = p->pRight = 0; + for(iDepth=1; pList; iDepth++){ + pLeft = p; + p = pList; + pList = p->pRight; + p->pLeft = pLeft; + p->pRight = rowSetNDeepTree(&pList, iDepth); + } + return p; +} + +/* +** Convert the list in p->pEntry into a sorted list if it is not +** sorted already. If there is a binary tree on p->pTree, then +** convert it into a list too and merge it into the p->pEntry list. +*/ +static void rowSetToList(RowSet *p){ + if( !p->isSorted ){ + rowSetSort(p); + } + if( p->pTree ){ + struct RowSetEntry *pHead, *pTail; + rowSetTreeToList(p->pTree, &pHead, &pTail); + p->pTree = 0; + p->pEntry = rowSetMerge(p->pEntry, pHead); + } +} + +/* +** Extract the smallest element from the RowSet. +** Write the element into *pRowid. Return 1 on success. Return +** 0 if the RowSet is already empty. +** +** After this routine has been called, the sqlite3RowSetInsert() +** routine may not be called again. +*/ +int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ + rowSetToList(p); + if( p->pEntry ){ + *pRowid = p->pEntry->v; + p->pEntry = p->pEntry->pRight; + if( p->pEntry==0 ){ + sqlite3RowSetClear(p); + } + return 1; + }else{ + return 0; + } +} + +/* +** Check to see if element iRowid was inserted into the the rowset as +** part of any insert batch prior to iBatch. Return 1 or 0. +*/ +int sqlite3RowSetTest(RowSet *pRowSet, u8 iBatch, sqlite3_int64 iRowid){ + struct RowSetEntry *p; + if( iBatch!=pRowSet->iBatch ){ + if( pRowSet->pEntry ){ + rowSetToList(pRowSet); + pRowSet->pTree = rowSetListToTree(pRowSet->pEntry); + pRowSet->pEntry = 0; + pRowSet->pLast = 0; + } + pRowSet->iBatch = iBatch; + } + p = pRowSet->pTree; + while( p ){ + if( p->vpRight; + }else if( p->v>iRowid ){ + p = p->pLeft; + }else{ + return 1; + } + } + return 0; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/select.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/select.c --- sqlite3-3.4.2/src/select.c 2007-07-18 19:17:12.000000000 +0100 +++ sqlite3-3.6.16/src/select.c 2009-06-25 12:35:51.000000000 +0100 @@ -12,7 +12,7 @@ ** This file contains C code routines that are called by the parser ** to handle SELECT statements in SQLite. ** -** $Id: select.c,v 1.354 2007/07/18 18:17:12 drh Exp $ +** $Id: select.c,v 1.524 2009/06/12 03:27:27 drh Exp $ */ #include "sqliteInt.h" @@ -21,16 +21,27 @@ ** Delete all the content of a Select structure but do not deallocate ** the select structure itself. */ -static void clearSelect(Select *p){ - sqlite3ExprListDelete(p->pEList); - sqlite3SrcListDelete(p->pSrc); - sqlite3ExprDelete(p->pWhere); - sqlite3ExprListDelete(p->pGroupBy); - sqlite3ExprDelete(p->pHaving); - sqlite3ExprListDelete(p->pOrderBy); - sqlite3SelectDelete(p->pPrior); - sqlite3ExprDelete(p->pLimit); - sqlite3ExprDelete(p->pOffset); +static void clearSelect(sqlite3 *db, Select *p){ + sqlite3ExprListDelete(db, p->pEList); + sqlite3SrcListDelete(db, p->pSrc); + sqlite3ExprDelete(db, p->pWhere); + sqlite3ExprListDelete(db, p->pGroupBy); + sqlite3ExprDelete(db, p->pHaving); + sqlite3ExprListDelete(db, p->pOrderBy); + sqlite3SelectDelete(db, p->pPrior); + sqlite3ExprDelete(db, p->pLimit); + sqlite3ExprDelete(db, p->pOffset); +} + +/* +** Initialize a SelectDest structure. +*/ +void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){ + pDest->eDest = (u8)eDest; + pDest->iParm = iParm; + pDest->affinity = 0; + pDest->iMem = 0; + pDest->nMem = 0; } @@ -39,6 +50,7 @@ ** structure. */ Select *sqlite3SelectNew( + Parse *pParse, /* Parsing context */ ExprList *pEList, /* which columns to include in the result */ SrcList *pSrc, /* the FROM clause -- which tables to scan */ Expr *pWhere, /* the WHERE clause */ @@ -51,14 +63,15 @@ ){ Select *pNew; Select standin; - pNew = sqliteMalloc( sizeof(*pNew) ); - assert( !pOffset || pLimit ); /* Can't have OFFSET without LIMIT. */ + sqlite3 *db = pParse->db; + pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); + assert( db->mallocFailed || !pOffset || pLimit ); /* OFFSET implies LIMIT */ if( pNew==0 ){ pNew = &standin; memset(pNew, 0, sizeof(*pNew)); } if( pEList==0 ){ - pEList = sqlite3ExprListAppend(0, sqlite3Expr(TK_ALL,0,0,0), 0); + pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db,TK_ALL,0)); } pNew->pEList = pEList; pNew->pSrc = pSrc; @@ -66,18 +79,17 @@ pNew->pGroupBy = pGroupBy; pNew->pHaving = pHaving; pNew->pOrderBy = pOrderBy; - pNew->isDistinct = isDistinct; + pNew->selFlags = isDistinct ? SF_Distinct : 0; pNew->op = TK_SELECT; - assert( pOffset==0 || pLimit!=0 ); pNew->pLimit = pLimit; pNew->pOffset = pOffset; - pNew->iLimit = -1; - pNew->iOffset = -1; + assert( pOffset==0 || pLimit!=0 ); pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->addrOpenEphm[2] = -1; - if( pNew==&standin) { - clearSelect(pNew); + if( db->mallocFailed ) { + clearSelect(db, pNew); + if( pNew!=&standin ) sqlite3DbFree(db, pNew); pNew = 0; } return pNew; @@ -86,10 +98,10 @@ /* ** Delete the given Select structure and all of its substructures. */ -void sqlite3SelectDelete(Select *p){ +void sqlite3SelectDelete(sqlite3 *db, Select *p){ if( p ){ - clearSelect(p); - sqliteFree(p); + clearSelect(db, p); + sqlite3DbFree(db, p); } } @@ -114,18 +126,20 @@ int jointype = 0; Token *apAll[3]; Token *p; + /* 0123456789 123456789 123456789 123 */ + static const char zKeyText[] = "naturaleftouterightfullinnercross"; static const struct { - const char zKeyword[8]; - u8 nChar; - u8 code; - } keywords[] = { - { "natural", 7, JT_NATURAL }, - { "left", 4, JT_LEFT|JT_OUTER }, - { "right", 5, JT_RIGHT|JT_OUTER }, - { "full", 4, JT_LEFT|JT_RIGHT|JT_OUTER }, - { "outer", 5, JT_OUTER }, - { "inner", 5, JT_INNER }, - { "cross", 5, JT_INNER|JT_CROSS }, + u8 i; /* Beginning of keyword text in zKeyText[] */ + u8 nChar; /* Length of the keyword in characters */ + u8 code; /* Join type mask */ + } aKeyword[] = { + /* natural */ { 0, 7, JT_NATURAL }, + /* left */ { 6, 4, JT_LEFT|JT_OUTER }, + /* outer */ { 10, 5, JT_OUTER }, + /* right */ { 14, 5, JT_RIGHT|JT_OUTER }, + /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER }, + /* inner */ { 23, 5, JT_INNER }, + /* cross */ { 28, 5, JT_INNER|JT_CROSS }, }; int i, j; apAll[0] = pA; @@ -133,14 +147,15 @@ apAll[2] = pC; for(i=0; i<3 && apAll[i]; i++){ p = apAll[i]; - for(j=0; jn==keywords[j].nChar - && sqlite3StrNICmp((char*)p->z, keywords[j].zKeyword, p->n)==0 ){ - jointype |= keywords[j].code; + for(j=0; jn==aKeyword[j].nChar + && sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){ + jointype |= aKeyword[j].code; break; } } - if( j>=sizeof(keywords)/sizeof(keywords[0]) ){ + testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 ); + if( j>=ArraySize(aKeyword) ){ jointype |= JT_ERROR; break; } @@ -149,14 +164,14 @@ (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) || (jointype & JT_ERROR)!=0 ){ - const char *zSp1 = " "; - const char *zSp2 = " "; - if( pB==0 ){ zSp1++; } - if( pC==0 ){ zSp2++; } + const char *zSp = " "; + assert( pB!=0 ); + if( pC==0 ){ zSp++; } sqlite3ErrorMsg(pParse, "unknown or unsupported join type: " - "%T%s%T%s%T", pA, zSp1, pB, zSp2, pC); + "%T %T%s%T", pA, pB, zSp, pC); jointype = JT_INNER; - }else if( jointype & JT_RIGHT ){ + }else if( (jointype & JT_OUTER)!=0 + && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){ sqlite3ErrorMsg(pParse, "RIGHT and FULL OUTER JOINs are not currently supported"); jointype = JT_INNER; @@ -177,76 +192,51 @@ } /* -** Set the value of a token to a '\000'-terminated string. -*/ -static void setToken(Token *p, const char *z){ - p->z = (u8*)z; - p->n = z ? strlen(z) : 0; - p->dyn = 0; -} - -/* -** Set the token to the double-quoted and escaped version of the string pointed -** to by z. For example; -** -** {a"bc} -> {"a""bc"} -*/ -static void setQuotedToken(Token *p, const char *z){ - p->z = (u8 *)sqlite3MPrintf("\"%w\"", z); - p->dyn = 1; - if( p->z ){ - p->n = strlen((char *)p->z); - } -} - -/* ** Create an expression node for an identifier with the name of zName */ -Expr *sqlite3CreateIdExpr(const char *zName){ - Token dummy; - setToken(&dummy, zName); - return sqlite3Expr(TK_ID, 0, 0, &dummy); +Expr *sqlite3CreateIdExpr(Parse *pParse, const char *zName){ + return sqlite3Expr(pParse->db, TK_ID, zName); } - /* ** Add a term to the WHERE expression in *ppExpr that requires the ** zCol column to be equal in the two tables pTab1 and pTab2. */ static void addWhereTerm( + Parse *pParse, /* Parsing context */ const char *zCol, /* Name of the column */ const Table *pTab1, /* First table */ const char *zAlias1, /* Alias for first table. May be NULL */ const Table *pTab2, /* Second table */ const char *zAlias2, /* Alias for second table. May be NULL */ int iRightJoinTable, /* VDBE cursor for the right table */ - Expr **ppExpr /* Add the equality term to this expression */ + Expr **ppExpr, /* Add the equality term to this expression */ + int isOuterJoin /* True if dealing with an OUTER join */ ){ Expr *pE1a, *pE1b, *pE1c; Expr *pE2a, *pE2b, *pE2c; Expr *pE; - pE1a = sqlite3CreateIdExpr(zCol); - pE2a = sqlite3CreateIdExpr(zCol); + pE1a = sqlite3CreateIdExpr(pParse, zCol); + pE2a = sqlite3CreateIdExpr(pParse, zCol); if( zAlias1==0 ){ zAlias1 = pTab1->zName; } - pE1b = sqlite3CreateIdExpr(zAlias1); + pE1b = sqlite3CreateIdExpr(pParse, zAlias1); if( zAlias2==0 ){ zAlias2 = pTab2->zName; } - pE2b = sqlite3CreateIdExpr(zAlias2); - pE1c = sqlite3ExprOrFree(TK_DOT, pE1b, pE1a, 0); - pE2c = sqlite3ExprOrFree(TK_DOT, pE2b, pE2a, 0); - pE = sqlite3ExprOrFree(TK_EQ, pE1c, pE2c, 0); - if( pE ){ + pE2b = sqlite3CreateIdExpr(pParse, zAlias2); + pE1c = sqlite3PExpr(pParse, TK_DOT, pE1b, pE1a, 0); + pE2c = sqlite3PExpr(pParse, TK_DOT, pE2b, pE2a, 0); + pE = sqlite3PExpr(pParse, TK_EQ, pE1c, pE2c, 0); + if( pE && isOuterJoin ){ ExprSetProperty(pE, EP_FromJoin); - pE->iRightJoinTable = iRightJoinTable; - } - pE = sqlite3ExprAnd(*ppExpr, pE); - if( pE ){ - *ppExpr = pE; + assert( !ExprHasAnyProperty(pE, EP_TokenOnly|EP_Reduced) ); + ExprSetIrreducible(pE); + pE->iRightJoinTable = (i16)iRightJoinTable; } + *ppExpr = sqlite3ExprAnd(pParse->db,*ppExpr, pE); } /* @@ -278,7 +268,9 @@ static void setJoinExpr(Expr *p, int iTable){ while( p ){ ExprSetProperty(p, EP_FromJoin); - p->iRightJoinTable = iTable; + assert( !ExprHasAnyProperty(p, EP_TokenOnly|EP_Reduced) ); + ExprSetIrreducible(p); + p->iRightJoinTable = (i16)iTable; setJoinExpr(p->pLeft, iTable); p = p->pRight; } @@ -310,8 +302,10 @@ for(i=0; inSrc-1; i++, pRight++, pLeft++){ Table *pLeftTab = pLeft->pTab; Table *pRightTab = pRight->pTab; + int isOuter; - if( pLeftTab==0 || pRightTab==0 ) continue; + if( NEVER(pLeftTab==0 || pRightTab==0) ) continue; + isOuter = (pRight->jointype & JT_OUTER)!=0; /* When the NATURAL keyword is present, add WHERE clause terms for ** every column that the two tables have in common. @@ -325,9 +319,9 @@ for(j=0; jnCol; j++){ char *zName = pLeftTab->aCol[j].zName; if( columnIndex(pRightTab, zName)>=0 ){ - addWhereTerm(zName, pLeftTab, pLeft->zAlias, + addWhereTerm(pParse, zName, pLeftTab, pLeft->zAlias, pRightTab, pRight->zAlias, - pRight->iCursor, &p->pWhere); + pRight->iCursor, &p->pWhere, isOuter); } } @@ -345,8 +339,8 @@ ** an AND operator. */ if( pRight->pOn ){ - setJoinExpr(pRight->pOn, pRight->iCursor); - p->pWhere = sqlite3ExprAnd(p->pWhere, pRight->pOn); + if( isOuter ) setJoinExpr(pRight->pOn, pRight->iCursor); + p->pWhere = sqlite3ExprAnd(pParse->db, p->pWhere, pRight->pOn); pRight->pOn = 0; } @@ -366,9 +360,9 @@ "not present in both tables", zName); return 1; } - addWhereTerm(zName, pLeftTab, pLeft->zAlias, + addWhereTerm(pParse, zName, pLeftTab, pLeft->zAlias, pRightTab, pRight->zAlias, - pRight->iCursor, &p->pWhere); + pRight->iCursor, &p->pWhere, isOuter); } } } @@ -382,24 +376,37 @@ static void pushOntoSorter( Parse *pParse, /* Parser context */ ExprList *pOrderBy, /* The ORDER BY clause */ - Select *pSelect /* The whole SELECT statement */ + Select *pSelect, /* The whole SELECT statement */ + int regData /* Register holding data to be sorted */ ){ Vdbe *v = pParse->pVdbe; - sqlite3ExprCodeExprList(pParse, pOrderBy); - sqlite3VdbeAddOp(v, OP_Sequence, pOrderBy->iECursor, 0); - sqlite3VdbeAddOp(v, OP_Pull, pOrderBy->nExpr + 1, 0); - sqlite3VdbeAddOp(v, OP_MakeRecord, pOrderBy->nExpr + 2, 0); - sqlite3VdbeAddOp(v, OP_IdxInsert, pOrderBy->iECursor, 0); - if( pSelect->iLimit>=0 ){ + int nExpr = pOrderBy->nExpr; + int regBase = sqlite3GetTempRange(pParse, nExpr+2); + int regRecord = sqlite3GetTempReg(pParse); + sqlite3ExprCacheClear(pParse); + sqlite3ExprCodeExprList(pParse, pOrderBy, regBase, 0); + sqlite3VdbeAddOp2(v, OP_Sequence, pOrderBy->iECursor, regBase+nExpr); + sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+1, 1); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nExpr + 2, regRecord); + sqlite3VdbeAddOp2(v, OP_IdxInsert, pOrderBy->iECursor, regRecord); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3ReleaseTempRange(pParse, regBase, nExpr+2); + if( pSelect->iLimit ){ int addr1, addr2; - addr1 = sqlite3VdbeAddOp(v, OP_IfMemZero, pSelect->iLimit+1, 0); - sqlite3VdbeAddOp(v, OP_MemIncr, -1, pSelect->iLimit+1); - addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); + int iLimit; + if( pSelect->iOffset ){ + iLimit = pSelect->iOffset+1; + }else{ + iLimit = pSelect->iLimit; + } + addr1 = sqlite3VdbeAddOp1(v, OP_IfZero, iLimit); + sqlite3VdbeAddOp2(v, OP_AddImm, iLimit, -1); + addr2 = sqlite3VdbeAddOp0(v, OP_Goto); sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp(v, OP_Last, pOrderBy->iECursor, 0); - sqlite3VdbeAddOp(v, OP_Delete, pOrderBy->iECursor, 0); + sqlite3VdbeAddOp1(v, OP_Last, pOrderBy->iECursor); + sqlite3VdbeAddOp1(v, OP_Delete, pOrderBy->iECursor); sqlite3VdbeJumpHere(v, addr2); - pSelect->iLimit = -1; + pSelect->iLimit = 0; } } @@ -409,25 +416,21 @@ static void codeOffset( Vdbe *v, /* Generate code into this VM */ Select *p, /* The SELECT statement being coded */ - int iContinue, /* Jump here to skip the current record */ - int nPop /* Number of times to pop stack when jumping */ + int iContinue /* Jump here to skip the current record */ ){ - if( p->iOffset>=0 && iContinue!=0 ){ + if( p->iOffset && iContinue!=0 ){ int addr; - sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iOffset); - addr = sqlite3VdbeAddOp(v, OP_IfMemNeg, p->iOffset, 0); - if( nPop>0 ){ - sqlite3VdbeAddOp(v, OP_Pop, nPop, 0); - } - sqlite3VdbeAddOp(v, OP_Goto, 0, iContinue); - VdbeComment((v, "# skip OFFSET records")); + sqlite3VdbeAddOp2(v, OP_AddImm, p->iOffset, -1); + addr = sqlite3VdbeAddOp1(v, OP_IfNeg, p->iOffset); + sqlite3VdbeAddOp2(v, OP_Goto, 0, iContinue); + VdbeComment((v, "skip OFFSET records")); sqlite3VdbeJumpHere(v, addr); } } /* -** Add code that will check to make sure the top N elements of the -** stack are distinct. iTab is a sorting index that holds previously +** Add code that will check to make sure the N registers starting at iMem +** form a distinct entry. iTab is a sorting index that holds previously ** seen combinations of the N values. A new entry is made in iTab ** if the current N values are new. ** @@ -435,17 +438,21 @@ ** stack if the top N elements are not distinct. */ static void codeDistinct( - Vdbe *v, /* Generate code into this VM */ + Parse *pParse, /* Parsing and code generating context */ int iTab, /* A sorting index used to test for distinctness */ int addrRepeat, /* Jump to here if not distinct */ - int N /* The top N elements of the stack must be distinct */ + int N, /* Number of elements */ + int iMem /* First element */ ){ - sqlite3VdbeAddOp(v, OP_MakeRecord, -N, 0); - sqlite3VdbeAddOp(v, OP_Distinct, iTab, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeAddOp(v, OP_Pop, N+1, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, addrRepeat); - VdbeComment((v, "# skip indistinct records")); - sqlite3VdbeAddOp(v, OP_IdxInsert, iTab, 0); + Vdbe *v; + int r1; + + v = pParse->pVdbe; + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); + sqlite3VdbeAddOp3(v, OP_Found, iTab, addrRepeat, r1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1); + sqlite3ReleaseTempReg(pParse, r1); } /* @@ -454,7 +461,12 @@ ** column. We do this in a subroutine because the error occurs in multiple ** places. */ -static int checkForMultiColumnSelectError(Parse *pParse, int eDest, int nExpr){ +static int checkForMultiColumnSelectError( + Parse *pParse, /* Parse context. */ + SelectDest *pDest, /* Destination of SELECT results */ + int nExpr /* Number of result columns returned by SELECT */ +){ + int eDest = pDest->eDest; if( nExpr>1 && (eDest==SRT_Mem || eDest==SRT_Set) ){ sqlite3ErrorMsg(pParse, "only a single result allowed for " "a SELECT that is part of an expression"); @@ -473,7 +485,7 @@ ** then data is pulled from srcTab and pEList is used only to get the ** datatypes for each column. */ -static int selectInnerLoop( +static void selectInnerLoop( Parse *pParse, /* The parser context */ Select *p, /* The complete select statement being coded */ ExprList *pEList, /* List of values being extracted */ @@ -481,37 +493,53 @@ int nColumn, /* Number of columns in the source table */ ExprList *pOrderBy, /* If not NULL, sort results using this key */ int distinct, /* If >=0, make sure results are distinct */ - int eDest, /* How to dispose of the results */ - int iParm, /* An argument to the disposal method */ + SelectDest *pDest, /* How to dispose of the results */ int iContinue, /* Jump here to continue with next row */ - int iBreak, /* Jump here to break out of the inner loop */ - char *aff /* affinity string if eDest is SRT_Union */ + int iBreak /* Jump here to break out of the inner loop */ ){ Vdbe *v = pParse->pVdbe; int i; int hasDistinct; /* True if the DISTINCT keyword is present */ + int regResult; /* Start of memory holding result set */ + int eDest = pDest->eDest; /* How to dispose of results */ + int iParm = pDest->iParm; /* First argument to disposal method */ + int nResultCol; /* Number of result columns */ - if( v==0 ) return 0; + assert( v ); + if( NEVER(v==0) ) return; assert( pEList!=0 ); - - /* If there was a LIMIT clause on the SELECT statement, then do the check - ** to see if this row should be output. - */ - hasDistinct = distinct>=0 && pEList->nExpr>0; + hasDistinct = distinct>=0; if( pOrderBy==0 && !hasDistinct ){ - codeOffset(v, p, iContinue, 0); + codeOffset(v, p, iContinue); } /* Pull the requested columns. */ if( nColumn>0 ){ + nResultCol = nColumn; + }else{ + nResultCol = pEList->nExpr; + } + if( pDest->iMem==0 ){ + pDest->iMem = pParse->nMem+1; + pDest->nMem = nResultCol; + pParse->nMem += nResultCol; + }else{ + assert( pDest->nMem==nResultCol ); + } + regResult = pDest->iMem; + if( nColumn>0 ){ for(i=0; inExpr; - sqlite3ExprCodeExprList(pParse, pEList); + }else if( eDest!=SRT_Exists ){ + /* If the destination is an EXISTS(...) expression, the actual + ** values returned by the SELECT are not required. + */ + sqlite3ExprCacheClear(pParse); + sqlite3ExprCodeExprList(pParse, pEList, regResult, eDest==SRT_Output); } + nColumn = nResultCol; /* If the DISTINCT keyword was present on the SELECT statement ** and this row has been seen before, then do not make this row @@ -520,14 +548,14 @@ if( hasDistinct ){ assert( pEList!=0 ); assert( pEList->nExpr==nColumn ); - codeDistinct(v, distinct, iContinue, nColumn); + codeDistinct(pParse, distinct, iContinue, nColumn, regResult); if( pOrderBy==0 ){ - codeOffset(v, p, iContinue, nColumn); + codeOffset(v, p, iContinue); } } - if( checkForMultiColumnSelectError(pParse, eDest, pEList->nExpr) ){ - return 0; + if( checkForMultiColumnSelectError(pParse, pDest, pEList->nExpr) ){ + return; } switch( eDest ){ @@ -536,11 +564,11 @@ */ #ifndef SQLITE_OMIT_COMPOUND_SELECT case SRT_Union: { - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - if( aff ){ - sqlite3VdbeChangeP3(v, -1, aff, P3_STATIC); - } - sqlite3VdbeAddOp(v, OP_IdxInsert, iParm, 0); + int r1; + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); + sqlite3ReleaseTempReg(pParse, r1); break; } @@ -549,11 +577,7 @@ ** the temporary table iParm. */ case SRT_Except: { - int addr; - addr = sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - sqlite3VdbeChangeP3(v, -1, aff, P3_STATIC); - sqlite3VdbeAddOp(v, OP_NotFound, iParm, addr+3); - sqlite3VdbeAddOp(v, OP_Delete, iParm, 0); + sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nColumn); break; } #endif @@ -562,14 +586,20 @@ */ case SRT_Table: case SRT_EphemTab: { - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); + int r1 = sqlite3GetTempReg(pParse); + testcase( eDest==SRT_Table ); + testcase( eDest==SRT_EphemTab ); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1); if( pOrderBy ){ - pushOntoSorter(pParse, pOrderBy, p); + pushOntoSorter(pParse, pOrderBy, p, r1); }else{ - sqlite3VdbeAddOp(v, OP_NewRowid, iParm, 0); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); - sqlite3VdbeAddOp(v, OP_Insert, iParm, OPFLAG_APPEND); + int r2 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2); + sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3ReleaseTempReg(pParse, r2); } + sqlite3ReleaseTempReg(pParse, r1); break; } @@ -579,33 +609,28 @@ ** item into the set table with bogus data. */ case SRT_Set: { - int addr1 = sqlite3VdbeCurrentAddr(v); - int addr2; - assert( nColumn==1 ); - sqlite3VdbeAddOp(v, OP_NotNull, -1, addr1+3); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); - p->affinity = sqlite3CompareAffinity(pEList->a[0].pExpr,(iParm>>16)&0xff); + p->affinity = sqlite3CompareAffinity(pEList->a[0].pExpr, pDest->affinity); if( pOrderBy ){ /* At first glance you would think we could optimize out the ** ORDER BY in this case since the order of entries in the set ** does not matter. But there might be a LIMIT clause, in which ** case the order does matter */ - pushOntoSorter(pParse, pOrderBy, p); + pushOntoSorter(pParse, pOrderBy, p, regResult); }else{ - sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &p->affinity, 1); - sqlite3VdbeAddOp(v, OP_IdxInsert, (iParm&0x0000FFFF), 0); + int r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, 1, r1, &p->affinity, 1); + sqlite3ExprCacheAffinityChange(pParse, regResult, 1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); + sqlite3ReleaseTempReg(pParse, r1); } - sqlite3VdbeJumpHere(v, addr2); break; } /* If any row exist in the result set, record that fact and abort. */ case SRT_Exists: { - sqlite3VdbeAddOp(v, OP_MemInt, 1, iParm); - sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm); /* The LIMIT clause will terminate the loop for us */ break; } @@ -617,9 +642,9 @@ case SRT_Mem: { assert( nColumn==1 ); if( pOrderBy ){ - pushOntoSorter(pParse, pOrderBy, p); + pushOntoSorter(pParse, pOrderBy, p, regResult); }else{ - sqlite3VdbeAddOp(v, OP_MemStore, iParm, 1); + sqlite3ExprCodeMove(pParse, regResult, iParm, 1); /* The LIMIT clause will jump out of the loop for us */ } break; @@ -630,15 +655,20 @@ ** case of a subroutine, the subroutine itself is responsible for ** popping the data from the stack. */ - case SRT_Subroutine: - case SRT_Callback: { + case SRT_Coroutine: + case SRT_Output: { + testcase( eDest==SRT_Coroutine ); + testcase( eDest==SRT_Output ); if( pOrderBy ){ - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - pushOntoSorter(pParse, pOrderBy, p); - }else if( eDest==SRT_Subroutine ){ - sqlite3VdbeAddOp(v, OP_Gosub, 0, iParm); + int r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nColumn, r1); + pushOntoSorter(pParse, pOrderBy, p, r1); + sqlite3ReleaseTempReg(pParse, r1); + }else if( eDest==SRT_Coroutine ){ + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iParm); }else{ - sqlite3VdbeAddOp(v, OP_Callback, nColumn, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nColumn); + sqlite3ExprCacheAffinityChange(pParse, regResult, nColumn); } break; } @@ -651,7 +681,6 @@ */ default: { assert( eDest==SRT_Discard ); - sqlite3VdbeAddOp(v, OP_Pop, nColumn, 0); break; } #endif @@ -659,11 +688,12 @@ /* Jump to the end of the loop if the LIMIT is reached. */ - if( p->iLimit>=0 && pOrderBy==0 ){ - sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iLimit); - sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, iBreak); + if( p->iLimit ){ + assert( pOrderBy==0 ); /* If there is an ORDER BY, the call to + ** pushOntoSorter() would have cleared p->iLimit */ + sqlite3VdbeAddOp2(v, OP_AddImm, p->iLimit, -1); + sqlite3VdbeAddOp2(v, OP_IfZero, p->iLimit, iBreak); } - return 0; } /* @@ -678,8 +708,8 @@ ** ** Space to hold the KeyInfo structure is obtain from malloc. The calling ** function is responsible for seeing that this structure is eventually -** freed. Add the KeyInfo structure to the P3 field of an opcode using -** P3_KEYINFO_HANDOFF is the usual way of dealing with this. +** freed. Add the KeyInfo structure to the P4 field of an opcode using +** P4_KEYINFO_HANDOFF is the usual way of dealing with this. */ static KeyInfo *keyInfoFromExprList(Parse *pParse, ExprList *pList){ sqlite3 *db = pParse->db; @@ -689,11 +719,12 @@ int i; nExpr = pList->nExpr; - pInfo = sqliteMalloc( sizeof(*pInfo) + nExpr*(sizeof(CollSeq*)+1) ); + pInfo = sqlite3DbMallocZero(db, sizeof(*pInfo) + nExpr*(sizeof(CollSeq*)+1) ); if( pInfo ){ pInfo->aSortOrder = (u8*)&pInfo->aColl[nExpr]; - pInfo->nField = nExpr; + pInfo->nField = (u16)nExpr; pInfo->enc = ENC(db); + pInfo->db = db; for(i=0, pItem=pList->a; ipExpr); @@ -715,93 +746,95 @@ ** routine generates the code needed to do that. */ static void generateSortTail( - Parse *pParse, /* Parsing context */ - Select *p, /* The SELECT statement */ - Vdbe *v, /* Generate code into this VDBE */ - int nColumn, /* Number of columns of data */ - int eDest, /* Write the sorted results here */ - int iParm /* Optional parameter associated with eDest */ + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement */ + Vdbe *v, /* Generate code into this VDBE */ + int nColumn, /* Number of columns of data */ + SelectDest *pDest /* Write the sorted results here */ ){ - int brk = sqlite3VdbeMakeLabel(v); - int cont = sqlite3VdbeMakeLabel(v); + int addrBreak = sqlite3VdbeMakeLabel(v); /* Jump here to exit loop */ + int addrContinue = sqlite3VdbeMakeLabel(v); /* Jump here for next cycle */ int addr; int iTab; int pseudoTab = 0; ExprList *pOrderBy = p->pOrderBy; + int eDest = pDest->eDest; + int iParm = pDest->iParm; + + int regRow; + int regRowid; + iTab = pOrderBy->iECursor; - if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ + if( eDest==SRT_Output || eDest==SRT_Coroutine ){ pseudoTab = pParse->nTab++; - sqlite3VdbeAddOp(v, OP_OpenPseudo, pseudoTab, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, pseudoTab, nColumn); - } - addr = 1 + sqlite3VdbeAddOp(v, OP_Sort, iTab, brk); - codeOffset(v, p, cont, 0); - if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ - sqlite3VdbeAddOp(v, OP_Integer, 1, 0); + sqlite3VdbeAddOp3(v, OP_OpenPseudo, pseudoTab, eDest==SRT_Output, nColumn); } - sqlite3VdbeAddOp(v, OP_Column, iTab, pOrderBy->nExpr + 1); + addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); + codeOffset(v, p, addrContinue); + regRow = sqlite3GetTempReg(pParse); + regRowid = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_Column, iTab, pOrderBy->nExpr + 1, regRow); switch( eDest ){ case SRT_Table: case SRT_EphemTab: { - sqlite3VdbeAddOp(v, OP_NewRowid, iParm, 0); - sqlite3VdbeAddOp(v, OP_Pull, 1, 0); - sqlite3VdbeAddOp(v, OP_Insert, iParm, OPFLAG_APPEND); + testcase( eDest==SRT_Table ); + testcase( eDest==SRT_EphemTab ); + sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); break; } #ifndef SQLITE_OMIT_SUBQUERY case SRT_Set: { assert( nColumn==1 ); - sqlite3VdbeAddOp(v, OP_NotNull, -1, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+3); - sqlite3VdbeOp3(v, OP_MakeRecord, 1, 0, &p->affinity, 1); - sqlite3VdbeAddOp(v, OP_IdxInsert, (iParm&0x0000FFFF), 0); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, 1, regRowid, &p->affinity, 1); + sqlite3ExprCacheAffinityChange(pParse, regRow, 1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid); break; } case SRT_Mem: { assert( nColumn==1 ); - sqlite3VdbeAddOp(v, OP_MemStore, iParm, 1); + sqlite3ExprCodeMove(pParse, regRow, iParm, 1); /* The LIMIT clause will terminate the loop for us */ break; } #endif - case SRT_Callback: - case SRT_Subroutine: { + default: { int i; - sqlite3VdbeAddOp(v, OP_Insert, pseudoTab, 0); + assert( eDest==SRT_Output || eDest==SRT_Coroutine ); + testcase( eDest==SRT_Output ); + testcase( eDest==SRT_Coroutine ); + sqlite3VdbeAddOp2(v, OP_Integer, 1, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, pseudoTab, regRow, regRowid); for(i=0; iiMem+i ); + sqlite3VdbeAddOp3(v, OP_Column, pseudoTab, i, pDest->iMem+i); } - if( eDest==SRT_Callback ){ - sqlite3VdbeAddOp(v, OP_Callback, nColumn, 0); + if( eDest==SRT_Output ){ + sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iMem, nColumn); + sqlite3ExprCacheAffinityChange(pParse, pDest->iMem, nColumn); }else{ - sqlite3VdbeAddOp(v, OP_Gosub, 0, iParm); + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iParm); } break; } - default: { - /* Do nothing */ - break; - } } + sqlite3ReleaseTempReg(pParse, regRow); + sqlite3ReleaseTempReg(pParse, regRowid); - /* Jump to the end of the loop when the LIMIT is reached + /* LIMIT has been implemented by the pushOntoSorter() routine. */ - if( p->iLimit>=0 ){ - sqlite3VdbeAddOp(v, OP_MemIncr, -1, p->iLimit); - sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, brk); - } + assert( p->iLimit==0 ); /* The bottom of the loop */ - sqlite3VdbeResolveLabel(v, cont); - sqlite3VdbeAddOp(v, OP_Next, iTab, addr); - sqlite3VdbeResolveLabel(v, brk); - if( eDest==SRT_Callback || eDest==SRT_Subroutine ){ - sqlite3VdbeAddOp(v, OP_Close, pseudoTab, 0); + sqlite3VdbeResolveLabel(v, addrContinue); + sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); + sqlite3VdbeResolveLabel(v, addrBreak); + if( eDest==SRT_Output || eDest==SRT_Coroutine ){ + sqlite3VdbeAddOp2(v, OP_Close, pseudoTab, 0); } - } /* @@ -834,7 +867,7 @@ char const *zOriginTab = 0; char const *zOriginCol = 0; int j; - if( pExpr==0 || pNC->pSrcList==0 ) return 0; + if( NEVER(pExpr==0) || pNC->pSrcList==0 ) return 0; switch( pExpr->op ){ case TK_AGG_COLUMN: @@ -846,6 +879,8 @@ Table *pTab = 0; /* Table structure column is extracted from */ Select *pS = 0; /* Select the column is extracted from */ int iCol = pExpr->iColumn; /* Index of column in pTab */ + testcase( pExpr->op==TK_AGG_COLUMN ); + testcase( pExpr->op==TK_COLUMN ); while( pNC && !pTab ){ SrcList *pTabList = pNC->pSrcList; for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); @@ -878,7 +913,7 @@ ** of the SELECT statement. Return the declaration type and origin ** data for the result-set column of the sub-select. */ - if( iCol>=0 && iColpEList->nExpr ){ + if( ALWAYS(iCol>=0 && iColpEList->nExpr) ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see ** test case misc2.2.2) - it always evaluates to NULL. @@ -890,7 +925,7 @@ sNC.pParse = pNC->pParse; zType = columnType(&sNC, p, &zOriginDb, &zOriginTab, &zOriginCol); } - }else if( pTab->pSchema ){ + }else if( ALWAYS(pTab->pSchema) ){ /* A real table */ assert( !pS ); if( iCol<0 ) iCol = pTab->iPKey; @@ -917,8 +952,9 @@ ** statement. */ NameContext sNC; - Select *pS = pExpr->pSelect; + Select *pS = pExpr->x.pSelect; Expr *p = pS->pEList->a[0].pExpr; + assert( ExprHasProperty(pExpr, EP_xIsSelect) ); sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; @@ -946,6 +982,7 @@ SrcList *pTabList, /* List of tables */ ExprList *pEList /* Expressions defining the result set */ ){ +#ifndef SQLITE_OMIT_DECLTYPE Vdbe *v = pParse->pVdbe; int i; NameContext sNC; @@ -953,20 +990,26 @@ sNC.pParse = pParse; for(i=0; inExpr; i++){ Expr *p = pEList->a[i].pExpr; + const char *zType; +#ifdef SQLITE_ENABLE_COLUMN_METADATA const char *zOrigDb = 0; const char *zOrigTab = 0; const char *zOrigCol = 0; - const char *zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol); + zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol); - /* The vdbe must make it's own copy of the column-type and other + /* The vdbe must make its own copy of the column-type and other ** column specific strings, in case the schema is reset before this ** virtual machine is deleted. */ - sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, P3_TRANSIENT); - sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, P3_TRANSIENT); - sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, P3_TRANSIENT); - sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, P3_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT); +#else + zType = columnType(&sNC, p, 0, 0, 0); +#endif + sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT); } +#endif /* SQLITE_OMIT_DECLTYPE */ } /* @@ -991,8 +1034,7 @@ } #endif - assert( v!=0 ); - if( pParse->colNamesSet || v==0 || sqlite3MallocFailed() ) return; + if( pParse->colNamesSet || NEVER(v==0) || db->mallocFailed ) return; pParse->colNamesSet = 1; fullNames = (db->flags & SQLITE_FullColNames)!=0; shortNames = (db->flags & SQLITE_ShortColNames)!=0; @@ -1000,17 +1042,17 @@ for(i=0; inExpr; i++){ Expr *p; p = pEList->a[i].pExpr; - if( p==0 ) continue; + if( NEVER(p==0) ) continue; if( pEList->a[i].zName ){ char *zName = pEList->a[i].zName; - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, strlen(zName)); - continue; - } - if( p->op==TK_COLUMN && pTabList ){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT); + }else if( (p->op==TK_COLUMN || p->op==TK_AGG_COLUMN) && pTabList ){ Table *pTab; char *zCol; int iCol = p->iColumn; - for(j=0; jnSrc && pTabList->a[j].iCursor!=p->iTable; j++){} + for(j=0; ALWAYS(jnSrc); j++){ + if( pTabList->a[j].iCursor==p->iTable ) break; + } assert( jnSrc ); pTab = pTabList->a[j].pTab; if( iCol<0 ) iCol = pTab->iPKey; @@ -1020,27 +1062,19 @@ }else{ zCol = pTab->aCol[iCol].zName; } - if( !shortNames && !fullNames && p->span.z && p->span.z[0] ){ - sqlite3VdbeSetColName(v, i, COLNAME_NAME, (char*)p->span.z, p->span.n); - }else if( fullNames || (!shortNames && pTabList->nSrc>1) ){ + if( !shortNames && !fullNames ){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, + sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC); + }else if( fullNames ){ char *zName = 0; - char *zTab; - - zTab = pTabList->a[j].zAlias; - if( fullNames || zTab==0 ) zTab = pTab->zName; - sqlite3SetString(&zName, zTab, ".", zCol, (char*)0); - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, P3_DYNAMIC); + zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC); }else{ - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, strlen(zCol)); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT); } - }else if( p->span.z && p->span.z[0] ){ - sqlite3VdbeSetColName(v, i, COLNAME_NAME, (char*)p->span.z, p->span.n); - /* sqlite3VdbeCompressSpace(v, addr); */ }else{ - char zName[30]; - assert( p->op!=TK_COLUMN || pTabList==0 ); - sqlite3_snprintf(sizeof(zName), zName, "column%d", i+1); - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, 0); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, + sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC); } } generateColumnTypes(pParse, pTabList, pEList); @@ -1063,537 +1097,254 @@ #endif /* SQLITE_OMIT_COMPOUND_SELECT */ /* -** Forward declaration -*/ -static int prepSelectStmt(Parse*, Select*); - -/* -** Given a SELECT statement, generate a Table structure that describes -** the result set of that SELECT. -*/ -Table *sqlite3ResultSetOfSelect(Parse *pParse, char *zTabName, Select *pSelect){ - Table *pTab; - int i, j; - ExprList *pEList; - Column *aCol, *pCol; - - while( pSelect->pPrior ) pSelect = pSelect->pPrior; - if( prepSelectStmt(pParse, pSelect) ){ - return 0; - } - if( sqlite3SelectResolve(pParse, pSelect, 0) ){ - return 0; - } - pTab = sqliteMalloc( sizeof(Table) ); - if( pTab==0 ){ - return 0; - } - pTab->nRef = 1; - pTab->zName = zTabName ? sqliteStrDup(zTabName) : 0; - pEList = pSelect->pEList; - pTab->nCol = pEList->nExpr; - assert( pTab->nCol>0 ); - pTab->aCol = aCol = sqliteMalloc( sizeof(pTab->aCol[0])*pTab->nCol ); - for(i=0, pCol=aCol; inCol; i++, pCol++){ - Expr *p, *pR; - char *zType; - char *zName; - int nName; - CollSeq *pColl; - int cnt; - NameContext sNC; - +** Given a an expression list (which is really the list of expressions +** that form the result set of a SELECT statement) compute appropriate +** column names for a table that would hold the expression list. +** +** All column names will be unique. +** +** Only the column names are computed. Column.zType, Column.zColl, +** and other fields of Column are zeroed. +** +** Return SQLITE_OK on success. If a memory allocation error occurs, +** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM. +*/ +static int selectColumnsFromExprList( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* Expr list from which to derive column names */ + int *pnCol, /* Write the number of columns here */ + Column **paCol /* Write the new column list here */ +){ + sqlite3 *db = pParse->db; /* Database connection */ + int i, j; /* Loop counters */ + int cnt; /* Index added to make the name unique */ + Column *aCol, *pCol; /* For looping over result columns */ + int nCol; /* Number of columns in the result set */ + Expr *p; /* Expression for a single result column */ + char *zName; /* Column name */ + int nName; /* Size of name in zName[] */ + + *pnCol = nCol = pEList->nExpr; + aCol = *paCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); + if( aCol==0 ) return SQLITE_NOMEM; + for(i=0, pCol=aCol; ia[i].pExpr; - assert( p->pRight==0 || p->pRight->token.z==0 || p->pRight->token.z[0]!=0 ); + assert( p->pRight==0 || ExprHasProperty(p->pRight, EP_IntValue) + || p->pRight->u.zToken==0 || p->pRight->u.zToken[0]!=0 ); if( (zName = pEList->a[i].zName)!=0 ){ /* If the column contains an "AS " phrase, use as the name */ - zName = sqliteStrDup(zName); - }else if( p->op==TK_DOT - && (pR=p->pRight)!=0 && pR->token.z && pR->token.z[0] ){ - /* For columns of the from A.B use B as the name */ - zName = sqlite3MPrintf("%T", &pR->token); - }else if( p->span.z && p->span.z[0] ){ - /* Use the original text of the column expression as its name */ - zName = sqlite3MPrintf("%T", &p->span); + zName = sqlite3DbStrDup(db, zName); }else{ - /* If all else fails, make up a name */ - zName = sqlite3MPrintf("column%d", i+1); + Expr *pColExpr = p; /* The expression that is the result column name */ + Table *pTab; /* Table associated with this expression */ + while( pColExpr->op==TK_DOT ) pColExpr = pColExpr->pRight; + if( pColExpr->op==TK_COLUMN && ALWAYS(pColExpr->pTab!=0) ){ + /* For columns use the column name name */ + int iCol = pColExpr->iColumn; + pTab = pColExpr->pTab; + if( iCol<0 ) iCol = pTab->iPKey; + zName = sqlite3MPrintf(db, "%s", + iCol>=0 ? pTab->aCol[iCol].zName : "rowid"); + }else if( pColExpr->op==TK_ID ){ + assert( !ExprHasProperty(pColExpr, EP_IntValue) ); + zName = sqlite3MPrintf(db, "%s", pColExpr->u.zToken); + }else{ + /* Use the original text of the column expression as its name */ + zName = sqlite3MPrintf(db, "%s", pEList->a[i].zSpan); + } } - sqlite3Dequote(zName); - if( sqlite3MallocFailed() ){ - sqliteFree(zName); - sqlite3DeleteTable(pTab); - return 0; + if( db->mallocFailed ){ + sqlite3DbFree(db, zName); + break; } /* Make sure the column name is unique. If the name is not unique, ** append a integer to the name so that it becomes unique. */ - nName = strlen(zName); + nName = sqlite3Strlen30(zName); for(j=cnt=0; jzName = zName; + } + if( db->mallocFailed ){ + for(j=0; jpSrc; - zType = sqliteStrDup(columnType(&sNC, p, 0, 0, 0)); - pCol->zType = zType; +/* +** Add type and collation information to a column list based on +** a SELECT statement. +** +** The column list presumably came from selectColumnNamesFromExprList(). +** The column list has only names, not types or collations. This +** routine goes through and adds the types and collations. +** +** This routine requires that all identifiers in the SELECT +** statement be resolved. +*/ +static void selectAddColumnTypeAndCollation( + Parse *pParse, /* Parsing contexts */ + int nCol, /* Number of columns */ + Column *aCol, /* List of columns */ + Select *pSelect /* SELECT used to determine types and collations */ +){ + sqlite3 *db = pParse->db; + NameContext sNC; + Column *pCol; + CollSeq *pColl; + int i; + Expr *p; + struct ExprList_item *a; + + assert( pSelect!=0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 ); + assert( nCol==pSelect->pEList->nExpr || db->mallocFailed ); + if( db->mallocFailed ) return; + memset(&sNC, 0, sizeof(sNC)); + sNC.pSrcList = pSelect->pSrc; + a = pSelect->pEList->a; + for(i=0, pCol=aCol; izType = sqlite3DbStrDup(db, columnType(&sNC, p, 0, 0, 0)); pCol->affinity = sqlite3ExprAffinity(p); + if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_NONE; pColl = sqlite3ExprCollSeq(pParse, p); if( pColl ){ - pCol->zColl = sqliteStrDup(pColl->zName); + pCol->zColl = sqlite3DbStrDup(db, pColl->zName); } } - pTab->iPKey = -1; - return pTab; } /* -** Prepare a SELECT statement for processing by doing the following -** things: -** -** (1) Make sure VDBE cursor numbers have been assigned to every -** element of the FROM clause. -** -** (2) Fill in the pTabList->a[].pTab fields in the SrcList that -** defines FROM clause. When views appear in the FROM clause, -** fill pTabList->a[].pSelect with a copy of the SELECT statement -** that implements the view. A copy is made of the view's SELECT -** statement so that we can freely modify or delete that statement -** without worrying about messing up the presistent representation -** of the view. -** -** (3) Add terms to the WHERE clause to accomodate the NATURAL keyword -** on joins and the ON and USING clause of joins. -** -** (4) Scan the list of columns in the result set (pEList) looking -** for instances of the "*" operator or the TABLE.* operator. -** If found, expand each "*" to be every column in every table -** and TABLE.* to be every column in TABLE. -** -** Return 0 on success. If there are problems, leave an error message -** in pParse and return non-zero. +** Given a SELECT statement, generate a Table structure that describes +** the result set of that SELECT. */ -static int prepSelectStmt(Parse *pParse, Select *p){ - int i, j, k, rc; - SrcList *pTabList; - ExprList *pEList; - struct SrcList_item *pFrom; +Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ + Table *pTab; + sqlite3 *db = pParse->db; + int savedFlags; - if( p==0 || p->pSrc==0 || sqlite3MallocFailed() ){ - return 1; + savedFlags = db->flags; + db->flags &= ~SQLITE_FullColNames; + db->flags |= SQLITE_ShortColNames; + sqlite3SelectPrep(pParse, pSelect, 0); + if( pParse->nErr ) return 0; + while( pSelect->pPrior ) pSelect = pSelect->pPrior; + db->flags = savedFlags; + pTab = sqlite3DbMallocZero(db, sizeof(Table) ); + if( pTab==0 ){ + return 0; } - pTabList = p->pSrc; - pEList = p->pEList; - - /* Make sure cursor numbers have been assigned to all entries in - ** the FROM clause of the SELECT statement. - */ - sqlite3SrcListAssignCursors(pParse, p->pSrc); + /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside + ** is disabled, so we might as well hard-code pTab->dbMem to NULL. */ + assert( db->lookaside.bEnabled==0 ); + pTab->dbMem = 0; + pTab->nRef = 1; + pTab->zName = 0; + selectColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); + selectAddColumnTypeAndCollation(pParse, pTab->nCol, pTab->aCol, pSelect); + pTab->iPKey = -1; + if( db->mallocFailed ){ + sqlite3DeleteTable(pTab); + return 0; + } + return pTab; +} - /* Look up every table named in the FROM clause of the select. If - ** an entry of the FROM clause is a subquery instead of a table or view, - ** then create a transient table structure to describe the subquery. - */ - for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab; - if( pFrom->pTab!=0 ){ - /* This statement has already been prepared. There is no need - ** to go further. */ - assert( i==0 ); - return 0; +/* +** Get a VDBE for the given parser context. Create a new one if necessary. +** If an error occurs, return NULL and leave a message in pParse. +*/ +Vdbe *sqlite3GetVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe; + if( v==0 ){ + v = pParse->pVdbe = sqlite3VdbeCreate(pParse->db); +#ifndef SQLITE_OMIT_TRACE + if( v ){ + sqlite3VdbeAddOp0(v, OP_Trace); } - if( pFrom->zName==0 ){ -#ifndef SQLITE_OMIT_SUBQUERY - /* A sub-query in the FROM clause of a SELECT */ - assert( pFrom->pSelect!=0 ); - if( pFrom->zAlias==0 ){ - pFrom->zAlias = - sqlite3MPrintf("sqlite_subquery_%p_", (void*)pFrom->pSelect); - } - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = - sqlite3ResultSetOfSelect(pParse, pFrom->zAlias, pFrom->pSelect); - if( pTab==0 ){ - return 1; - } - /* The isEphem flag indicates that the Table structure has been - ** dynamically allocated and may be freed at any time. In other words, - ** pTab is not pointing to a persistent table structure that defines - ** part of the schema. */ - pTab->isEphem = 1; -#endif - }else{ - /* An ordinary table or view name in the FROM clause */ - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = - sqlite3LocateTable(pParse,pFrom->zName,pFrom->zDatabase); - if( pTab==0 ){ - return 1; - } - pTab->nRef++; -#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) - if( pTab->pSelect || IsVirtual(pTab) ){ - /* We reach here if the named table is a really a view */ - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - return 1; - } - /* If pFrom->pSelect!=0 it means we are dealing with a - ** view within a view. The SELECT structure has already been - ** copied by the outer view so we can skip the copy step here - ** in the inner view. - */ - if( pFrom->pSelect==0 ){ - pFrom->pSelect = sqlite3SelectDup(pTab->pSelect); - } - } #endif - } } + return v; +} - /* Process NATURAL keywords, and ON and USING clauses of joins. - */ - if( sqliteProcessJoin(pParse, p) ) return 1; - /* For every "*" that occurs in the column list, insert the names of - ** all columns in all tables. And for every TABLE.* insert the names - ** of all columns in TABLE. The parser inserted a special expression - ** with the TK_ALL operator for each "*" that it found in the column list. - ** The following code just has to locate the TK_ALL expressions and expand - ** each one to the list of all columns in all tables. - ** - ** The first loop just checks to see if there are any "*" operators - ** that need expanding. +/* +** Compute the iLimit and iOffset fields of the SELECT based on the +** pLimit and pOffset expressions. pLimit and pOffset hold the expressions +** that appear in the original SQL statement after the LIMIT and OFFSET +** keywords. Or NULL if those keywords are omitted. iLimit and iOffset +** are the integer memory register numbers for counters used to compute +** the limit and offset. If there is no limit and/or offset, then +** iLimit and iOffset are negative. +** +** This routine changes the values of iLimit and iOffset only if +** a limit or offset is defined by pLimit and pOffset. iLimit and +** iOffset should have been preset to appropriate default values +** (usually but not always -1) prior to calling this routine. +** Only if pLimit!=0 or pOffset!=0 do the limit registers get +** redefined. The UNION ALL operator uses this property to force +** the reuse of the same limit and offset registers across multiple +** SELECT statements. +*/ +static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ + Vdbe *v = 0; + int iLimit = 0; + int iOffset; + int addr1; + if( p->iLimit ) return; + + /* + ** "LIMIT -1" always shows all rows. There is some + ** contraversy about what the correct behavior should be. + ** The current implementation interprets "LIMIT 0" to mean + ** no rows. */ - for(k=0; knExpr; k++){ - Expr *pE = pEList->a[k].pExpr; - if( pE->op==TK_ALL ) break; - if( pE->op==TK_DOT && pE->pRight && pE->pRight->op==TK_ALL - && pE->pLeft && pE->pLeft->op==TK_ID ) break; + sqlite3ExprCacheClear(pParse); + assert( p->pOffset==0 || p->pLimit!=0 ); + if( p->pLimit ){ + p->iLimit = iLimit = ++pParse->nMem; + v = sqlite3GetVdbe(pParse); + if( NEVER(v==0) ) return; /* VDBE should have already been allocated */ + sqlite3ExprCode(pParse, p->pLimit, iLimit); + sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); + VdbeComment((v, "LIMIT counter")); + sqlite3VdbeAddOp2(v, OP_IfZero, iLimit, iBreak); + if( p->pOffset ){ + p->iOffset = iOffset = ++pParse->nMem; + pParse->nMem++; /* Allocate an extra register for limit+offset */ + sqlite3ExprCode(pParse, p->pOffset, iOffset); + sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); + VdbeComment((v, "OFFSET counter")); + addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iOffset); + sqlite3VdbeAddOp2(v, OP_Integer, 0, iOffset); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp3(v, OP_Add, iLimit, iOffset, iOffset+1); + VdbeComment((v, "LIMIT+OFFSET")); + addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iLimit); + sqlite3VdbeAddOp2(v, OP_Integer, -1, iOffset+1); + sqlite3VdbeJumpHere(v, addr1); + } } - rc = 0; - if( knExpr ){ - /* - ** If we get here it means the result set contains one or more "*" - ** operators that need to be expanded. Loop through each expression - ** in the result set and expand them one by one. - */ - struct ExprList_item *a = pEList->a; - ExprList *pNew = 0; - int flags = pParse->db->flags; - int longNames = (flags & SQLITE_FullColNames)!=0 && - (flags & SQLITE_ShortColNames)==0; - - for(k=0; knExpr; k++){ - Expr *pE = a[k].pExpr; - if( pE->op!=TK_ALL && - (pE->op!=TK_DOT || pE->pRight==0 || pE->pRight->op!=TK_ALL) ){ - /* This particular expression does not need to be expanded. - */ - pNew = sqlite3ExprListAppend(pNew, a[k].pExpr, 0); - if( pNew ){ - pNew->a[pNew->nExpr-1].zName = a[k].zName; - }else{ - rc = 1; - } - a[k].pExpr = 0; - a[k].zName = 0; - }else{ - /* This expression is a "*" or a "TABLE.*" and needs to be - ** expanded. */ - int tableSeen = 0; /* Set to 1 when TABLE matches */ - char *zTName; /* text of name of TABLE */ - if( pE->op==TK_DOT && pE->pLeft ){ - zTName = sqlite3NameFromToken(&pE->pLeft->token); - }else{ - zTName = 0; - } - for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; - char *zTabName = pFrom->zAlias; - if( zTabName==0 || zTabName[0]==0 ){ - zTabName = pTab->zName; - } - if( zTName && (zTabName==0 || zTabName[0]==0 || - sqlite3StrICmp(zTName, zTabName)!=0) ){ - continue; - } - tableSeen = 1; - for(j=0; jnCol; j++){ - Expr *pExpr, *pRight; - char *zName = pTab->aCol[j].zName; - - /* If a column is marked as 'hidden' (currently only possible - ** for virtual tables), do not include it in the expanded - ** result-set list. - */ - if( IsHiddenColumn(&pTab->aCol[j]) ){ - assert(IsVirtual(pTab)); - continue; - } - - if( i>0 ){ - struct SrcList_item *pLeft = &pTabList->a[i-1]; - if( (pLeft[1].jointype & JT_NATURAL)!=0 && - columnIndex(pLeft->pTab, zName)>=0 ){ - /* In a NATURAL join, omit the join columns from the - ** table on the right */ - continue; - } - if( sqlite3IdListIndex(pLeft[1].pUsing, zName)>=0 ){ - /* In a join with a USING clause, omit columns in the - ** using clause from the table on the right. */ - continue; - } - } - pRight = sqlite3Expr(TK_ID, 0, 0, 0); - if( pRight==0 ) break; - setQuotedToken(&pRight->token, zName); - if( zTabName && (longNames || pTabList->nSrc>1) ){ - Expr *pLeft = sqlite3Expr(TK_ID, 0, 0, 0); - pExpr = sqlite3Expr(TK_DOT, pLeft, pRight, 0); - if( pExpr==0 ) break; - setQuotedToken(&pLeft->token, zTabName); - setToken(&pExpr->span, sqlite3MPrintf("%s.%s", zTabName, zName)); - pExpr->span.dyn = 1; - pExpr->token.z = 0; - pExpr->token.n = 0; - pExpr->token.dyn = 0; - }else{ - pExpr = pRight; - pExpr->span = pExpr->token; - pExpr->span.dyn = 0; - } - if( longNames ){ - pNew = sqlite3ExprListAppend(pNew, pExpr, &pExpr->span); - }else{ - pNew = sqlite3ExprListAppend(pNew, pExpr, &pRight->token); - } - } - } - if( !tableSeen ){ - if( zTName ){ - sqlite3ErrorMsg(pParse, "no such table: %s", zTName); - }else{ - sqlite3ErrorMsg(pParse, "no tables specified"); - } - rc = 1; - } - sqliteFree(zTName); - } - } - sqlite3ExprListDelete(pEList); - p->pEList = pNew; - } - if( p->pEList && p->pEList->nExpr>SQLITE_MAX_COLUMN ){ - sqlite3ErrorMsg(pParse, "too many columns in result set"); - rc = SQLITE_ERROR; - } - if( sqlite3MallocFailed() ){ - rc = SQLITE_NOMEM; - } - return rc; -} - -#ifndef SQLITE_OMIT_COMPOUND_SELECT -/* -** This routine associates entries in an ORDER BY expression list with -** columns in a result. For each ORDER BY expression, the opcode of -** the top-level node is changed to TK_COLUMN and the iColumn value of -** the top-level node is filled in with column number and the iTable -** value of the top-level node is filled with iTable parameter. -** -** If there are prior SELECT clauses, they are processed first. A match -** in an earlier SELECT takes precedence over a later SELECT. -** -** Any entry that does not match is flagged as an error. The number -** of errors is returned. -*/ -static int matchOrderbyToColumn( - Parse *pParse, /* A place to leave error messages */ - Select *pSelect, /* Match to result columns of this SELECT */ - ExprList *pOrderBy, /* The ORDER BY values to match against columns */ - int iTable, /* Insert this value in iTable */ - int mustComplete /* If TRUE all ORDER BYs must match */ -){ - int nErr = 0; - int i, j; - ExprList *pEList; - - if( pSelect==0 || pOrderBy==0 ) return 1; - if( mustComplete ){ - for(i=0; inExpr; i++){ pOrderBy->a[i].done = 0; } - } - if( prepSelectStmt(pParse, pSelect) ){ - return 1; - } - if( pSelect->pPrior ){ - if( matchOrderbyToColumn(pParse, pSelect->pPrior, pOrderBy, iTable, 0) ){ - return 1; - } - } - pEList = pSelect->pEList; - for(i=0; inExpr; i++){ - struct ExprList_item *pItem; - Expr *pE = pOrderBy->a[i].pExpr; - int iCol = -1; - char *zLabel; - - if( pOrderBy->a[i].done ) continue; - if( sqlite3ExprIsInteger(pE, &iCol) ){ - if( iCol<=0 || iCol>pEList->nExpr ){ - sqlite3ErrorMsg(pParse, - "ORDER BY position %d should be between 1 and %d", - iCol, pEList->nExpr); - nErr++; - break; - } - if( !mustComplete ) continue; - iCol--; - } - if( iCol<0 && (zLabel = sqlite3NameFromToken(&pE->token))!=0 ){ - for(j=0, pItem=pEList->a; jnExpr; j++, pItem++){ - char *zName; - int isMatch; - if( pItem->zName ){ - zName = sqlite3StrDup(pItem->zName); - }else{ - zName = sqlite3NameFromToken(&pItem->pExpr->token); - } - isMatch = zName && sqlite3StrICmp(zName, zLabel)==0; - sqliteFree(zName); - if( isMatch ){ - iCol = j; - break; - } - } - sqliteFree(zLabel); - } - if( iCol>=0 ){ - pE->op = TK_COLUMN; - pE->iColumn = iCol; - pE->iTable = iTable; - pE->iAgg = -1; - pOrderBy->a[i].done = 1; - }else if( mustComplete ){ - sqlite3ErrorMsg(pParse, - "ORDER BY term number %d does not match any result column", i+1); - nErr++; - break; - } - } - return nErr; -} -#endif /* #ifndef SQLITE_OMIT_COMPOUND_SELECT */ - -/* -** Get a VDBE for the given parser context. Create a new one if necessary. -** If an error occurs, return NULL and leave a message in pParse. -*/ -Vdbe *sqlite3GetVdbe(Parse *pParse){ - Vdbe *v = pParse->pVdbe; - if( v==0 ){ - v = pParse->pVdbe = sqlite3VdbeCreate(pParse->db); - } - return v; -} - - -/* -** Compute the iLimit and iOffset fields of the SELECT based on the -** pLimit and pOffset expressions. pLimit and pOffset hold the expressions -** that appear in the original SQL statement after the LIMIT and OFFSET -** keywords. Or NULL if those keywords are omitted. iLimit and iOffset -** are the integer memory register numbers for counters used to compute -** the limit and offset. If there is no limit and/or offset, then -** iLimit and iOffset are negative. -** -** This routine changes the values of iLimit and iOffset only if -** a limit or offset is defined by pLimit and pOffset. iLimit and -** iOffset should have been preset to appropriate default values -** (usually but not always -1) prior to calling this routine. -** Only if pLimit!=0 or pOffset!=0 do the limit registers get -** redefined. The UNION ALL operator uses this property to force -** the reuse of the same limit and offset registers across multiple -** SELECT statements. -*/ -static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ - Vdbe *v = 0; - int iLimit = 0; - int iOffset; - int addr1, addr2; - - /* - ** "LIMIT -1" always shows all rows. There is some - ** contraversy about what the correct behavior should be. - ** The current implementation interprets "LIMIT 0" to mean - ** no rows. - */ - if( p->pLimit ){ - p->iLimit = iLimit = pParse->nMem; - pParse->nMem += 2; - v = sqlite3GetVdbe(pParse); - if( v==0 ) return; - sqlite3ExprCode(pParse, p->pLimit); - sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); - sqlite3VdbeAddOp(v, OP_MemStore, iLimit, 1); - VdbeComment((v, "# LIMIT counter")); - sqlite3VdbeAddOp(v, OP_IfMemZero, iLimit, iBreak); - sqlite3VdbeAddOp(v, OP_MemLoad, iLimit, 0); - } - if( p->pOffset ){ - p->iOffset = iOffset = pParse->nMem++; - v = sqlite3GetVdbe(pParse); - if( v==0 ) return; - sqlite3ExprCode(pParse, p->pOffset); - sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); - sqlite3VdbeAddOp(v, OP_MemStore, iOffset, p->pLimit==0); - VdbeComment((v, "# OFFSET counter")); - addr1 = sqlite3VdbeAddOp(v, OP_IfMemPos, iOffset, 0); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_Integer, 0, 0); - sqlite3VdbeJumpHere(v, addr1); - if( p->pLimit ){ - sqlite3VdbeAddOp(v, OP_Add, 0, 0); - } - } - if( p->pLimit ){ - addr1 = sqlite3VdbeAddOp(v, OP_IfMemPos, iLimit, 0); - sqlite3VdbeAddOp(v, OP_Pop, 1, 0); - sqlite3VdbeAddOp(v, OP_MemInt, -1, iLimit+1); - addr2 = sqlite3VdbeAddOp(v, OP_Goto, 0, 0); - sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp(v, OP_MemStore, iLimit+1, 1); - VdbeComment((v, "# LIMIT+OFFSET")); - sqlite3VdbeJumpHere(v, addr2); - } -} - -/* -** Allocate a virtual index to use for sorting. -*/ -static void createSortingIndex(Parse *pParse, Select *p, ExprList *pOrderBy){ - if( pOrderBy ){ - int addr; - assert( pOrderBy->iECursor==0 ); - pOrderBy->iECursor = pParse->nTab++; - addr = sqlite3VdbeAddOp(pParse->pVdbe, OP_OpenEphemeral, - pOrderBy->iECursor, pOrderBy->nExpr+1); - assert( p->addrOpenEphm[2] == -1 ); - p->addrOpenEphm[2] = addr; - } -} +} #ifndef SQLITE_OMIT_COMPOUND_SELECT /* @@ -1611,17 +1362,27 @@ }else{ pRet = 0; } - if( pRet==0 ){ + assert( iCol>=0 ); + if( pRet==0 && iColpEList->nExpr ){ pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr); } return pRet; } #endif /* SQLITE_OMIT_COMPOUND_SELECT */ +/* Forward reference */ +static int multiSelectOrderBy( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +); + + #ifndef SQLITE_OMIT_COMPOUND_SELECT /* -** This routine is called to process a query that is really the union -** or intersection of two or more separate queries. +** This routine is called to process a compound query form from +** two or more separate queries using UNION, UNION ALL, EXCEPT, or +** INTERSECT ** ** "p" points to the right-most of the two queries. the query on the ** left is p->pPrior. The left query could also be a compound query @@ -1652,28 +1413,24 @@ static int multiSelect( Parse *pParse, /* Parsing context */ Select *p, /* The right-most of SELECTs to be coded */ - int eDest, /* \___ Store query results as specified */ - int iParm, /* / by these two parameters. */ - char *aff /* If eDest is SRT_Union, the affinity string */ + SelectDest *pDest /* What to do with query results */ ){ int rc = SQLITE_OK; /* Success code from a subroutine */ Select *pPrior; /* Another SELECT immediately to our left */ Vdbe *v; /* Generate code to this VDBE */ - int nCol; /* Number of columns in the result set */ - ExprList *pOrderBy; /* The ORDER BY clause on p */ - int aSetP2[2]; /* Set P2 value of these op to number of columns */ - int nSetP2 = 0; /* Number of slots in aSetP2[] used */ + SelectDest dest; /* Alternative data destination */ + Select *pDelete = 0; /* Chain of simple selects to delete */ + sqlite3 *db; /* Database connection */ /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. */ - if( p==0 || p->pPrior==0 ){ - rc = 1; - goto multi_select_end; - } + assert( p && p->pPrior ); /* Calling function guarantees this much */ + db = pParse->db; pPrior = p->pPrior; assert( pPrior->pRightmost!=pPrior ); assert( pPrior->pRightmost==p->pRightmost ); + dest = *pDest; if( pPrior->pOrderBy ){ sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before", selectOpName(p->op)); @@ -1687,139 +1444,143 @@ goto multi_select_end; } - /* Make sure we have a valid query engine. If not, create a new one. - */ v = sqlite3GetVdbe(pParse); - if( v==0 ){ + assert( v!=0 ); /* The VDBE already created by calling function */ + + /* Create the destination temporary table if necessary + */ + if( dest.eDest==SRT_EphemTab ){ + assert( p->pEList ); + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iParm, p->pEList->nExpr); + dest.eDest = SRT_Table; + } + + /* Make sure all SELECTs in the statement have the same number of elements + ** in their result sets. + */ + assert( p->pEList && pPrior->pEList ); + if( p->pEList->nExpr!=pPrior->pEList->nExpr ){ + sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" + " do not have the same number of result columns", selectOpName(p->op)); rc = 1; goto multi_select_end; } - /* Create the destination temporary table if necessary + /* Compound SELECTs that have an ORDER BY clause are handled separately. */ - if( eDest==SRT_EphemTab ){ - assert( p->pEList ); - assert( nSetP2pOrderBy ){ + return multiSelectOrderBy(pParse, p, pDest); } /* Generate code for the left and right SELECT statements. */ - pOrderBy = p->pOrderBy; switch( p->op ){ case TK_ALL: { - if( pOrderBy==0 ){ - int addr = 0; - assert( !pPrior->pLimit ); - pPrior->pLimit = p->pLimit; - pPrior->pOffset = p->pOffset; - rc = sqlite3Select(pParse, pPrior, eDest, iParm, 0, 0, 0, aff); - p->pLimit = 0; - p->pOffset = 0; - if( rc ){ - goto multi_select_end; - } - p->pPrior = 0; - p->iLimit = pPrior->iLimit; - p->iOffset = pPrior->iOffset; - if( p->iLimit>=0 ){ - addr = sqlite3VdbeAddOp(v, OP_IfMemZero, p->iLimit, 0); - VdbeComment((v, "# Jump ahead if LIMIT reached")); - } - rc = sqlite3Select(pParse, p, eDest, iParm, 0, 0, 0, aff); - p->pPrior = pPrior; - if( rc ){ - goto multi_select_end; - } - if( addr ){ - sqlite3VdbeJumpHere(v, addr); - } - break; + int addr = 0; + assert( !pPrior->pLimit ); + pPrior->pLimit = p->pLimit; + pPrior->pOffset = p->pOffset; + rc = sqlite3Select(pParse, pPrior, &dest); + p->pLimit = 0; + p->pOffset = 0; + if( rc ){ + goto multi_select_end; + } + p->pPrior = 0; + p->iLimit = pPrior->iLimit; + p->iOffset = pPrior->iOffset; + if( p->iLimit ){ + addr = sqlite3VdbeAddOp1(v, OP_IfZero, p->iLimit); + VdbeComment((v, "Jump ahead if LIMIT reached")); + } + rc = sqlite3Select(pParse, p, &dest); + testcase( rc!=SQLITE_OK ); + pDelete = p->pPrior; + p->pPrior = pPrior; + if( addr ){ + sqlite3VdbeJumpHere(v, addr); } - /* For UNION ALL ... ORDER BY fall through to the next case */ + break; } case TK_EXCEPT: case TK_UNION: { int unionTab; /* Cursor number of the temporary table holding result */ - int op = 0; /* One of the SRT_ operations to apply to self */ + u8 op = 0; /* One of the SRT_ operations to apply to self */ int priorOp; /* The SRT_ operation to apply to prior selects */ Expr *pLimit, *pOffset; /* Saved values of p->nLimit and p->nOffset */ int addr; + SelectDest uniondest; - priorOp = p->op==TK_ALL ? SRT_Table : SRT_Union; - if( eDest==priorOp && pOrderBy==0 && !p->pLimit && !p->pOffset ){ + testcase( p->op==TK_EXCEPT ); + testcase( p->op==TK_UNION ); + priorOp = SRT_Union; + if( dest.eDest==priorOp && ALWAYS(!p->pLimit &&!p->pOffset) ){ /* We can reuse a temporary table generated by a SELECT to our ** right. */ - unionTab = iParm; + assert( p->pRightmost!=p ); /* Can only happen for leftward elements + ** of a 3-way or more compound */ + assert( p->pLimit==0 ); /* Not allowed on leftward elements */ + assert( p->pOffset==0 ); /* Not allowed on leftward elements */ + unionTab = dest.iParm; }else{ /* We will need to create our own temporary table to hold the ** intermediate results. */ unionTab = pParse->nTab++; - if( pOrderBy && matchOrderbyToColumn(pParse, p, pOrderBy, unionTab,1) ){ - rc = 1; - goto multi_select_end; - } - addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, unionTab, 0); - if( priorOp==SRT_Table ){ - assert( nSetP2addrOpenEphm[0] == -1 ); - p->addrOpenEphm[0] = addr; - p->pRightmost->usesEphm = 1; - } - createSortingIndex(pParse, p, pOrderBy); + assert( p->pOrderBy==0 ); + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0); + assert( p->addrOpenEphm[0] == -1 ); + p->addrOpenEphm[0] = addr; + p->pRightmost->selFlags |= SF_UsesEphemeral; assert( p->pEList ); } /* Code the SELECT statements to our left */ assert( !pPrior->pOrderBy ); - rc = sqlite3Select(pParse, pPrior, priorOp, unionTab, 0, 0, 0, aff); + sqlite3SelectDestInit(&uniondest, priorOp, unionTab); + rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end; } /* Code the current SELECT statement */ - switch( p->op ){ - case TK_EXCEPT: op = SRT_Except; break; - case TK_UNION: op = SRT_Union; break; - case TK_ALL: op = SRT_Table; break; + if( p->op==TK_EXCEPT ){ + op = SRT_Except; + }else{ + assert( p->op==TK_UNION ); + op = SRT_Union; } p->pPrior = 0; - p->pOrderBy = 0; - p->disallowOrderBy = pOrderBy!=0; pLimit = p->pLimit; p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; - rc = sqlite3Select(pParse, p, op, unionTab, 0, 0, 0, aff); + uniondest.eDest = op; + rc = sqlite3Select(pParse, p, &uniondest); + testcase( rc!=SQLITE_OK ); /* Query flattening in sqlite3Select() might refill p->pOrderBy. ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ - sqlite3ExprListDelete(p->pOrderBy); + sqlite3ExprListDelete(db, p->pOrderBy); + pDelete = p->pPrior; p->pPrior = pPrior; - p->pOrderBy = pOrderBy; - sqlite3ExprDelete(p->pLimit); + p->pOrderBy = 0; + sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; - p->iLimit = -1; - p->iOffset = -1; - if( rc ){ - goto multi_select_end; - } - + p->iLimit = 0; + p->iOffset = 0; /* Convert the data in the temporary table into whatever form ** it is that we currently need. - */ - if( eDest!=priorOp || unionTab!=iParm ){ + */ + assert( unionTab==dest.iParm || dest.eDest!=priorOp ); + if( dest.eDest!=priorOp ){ int iCont, iBreak, iStart; assert( p->pEList ); - if( eDest==SRT_Callback ){ + if( dest.eDest==SRT_Output ){ Select *pFirst = p; while( pFirst->pPrior ) pFirst = pFirst->pPrior; generateColumnNames(pParse, 0, pFirst->pEList); @@ -1827,27 +1588,24 @@ iBreak = sqlite3VdbeMakeLabel(v); iCont = sqlite3VdbeMakeLabel(v); computeLimitRegisters(pParse, p, iBreak); - sqlite3VdbeAddOp(v, OP_Rewind, unionTab, iBreak); + sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); iStart = sqlite3VdbeCurrentAddr(v); - rc = selectInnerLoop(pParse, p, p->pEList, unionTab, p->pEList->nExpr, - pOrderBy, -1, eDest, iParm, - iCont, iBreak, 0); - if( rc ){ - rc = 1; - goto multi_select_end; - } + selectInnerLoop(pParse, p, p->pEList, unionTab, p->pEList->nExpr, + 0, -1, &dest, iCont, iBreak); sqlite3VdbeResolveLabel(v, iCont); - sqlite3VdbeAddOp(v, OP_Next, unionTab, iStart); + sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); sqlite3VdbeResolveLabel(v, iBreak); - sqlite3VdbeAddOp(v, OP_Close, unionTab, 0); + sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0); } break; } - case TK_INTERSECT: { + default: assert( p->op==TK_INTERSECT ); { int tab1, tab2; int iCont, iBreak, iStart; Expr *pLimit, *pOffset; int addr; + SelectDest intersectdest; + int r1; /* INTERSECT is different from the others since it requires ** two temporary tables. Hence it has its own case. Begin @@ -1855,28 +1613,25 @@ */ tab1 = pParse->nTab++; tab2 = pParse->nTab++; - if( pOrderBy && matchOrderbyToColumn(pParse,p,pOrderBy,tab1,1) ){ - rc = 1; - goto multi_select_end; - } - createSortingIndex(pParse, p, pOrderBy); + assert( p->pOrderBy==0 ); - addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, tab1, 0); + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0); assert( p->addrOpenEphm[0] == -1 ); p->addrOpenEphm[0] = addr; - p->pRightmost->usesEphm = 1; + p->pRightmost->selFlags |= SF_UsesEphemeral; assert( p->pEList ); /* Code the SELECTs to our left into temporary table "tab1". */ - rc = sqlite3Select(pParse, pPrior, SRT_Union, tab1, 0, 0, 0, aff); + sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); + rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end; } /* Code the current SELECT into temporary table "tab2" */ - addr = sqlite3VdbeAddOp(v, OP_OpenEphemeral, tab2, 0); + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0); assert( p->addrOpenEphm[1] == -1 ); p->addrOpenEphm[1] = addr; p->pPrior = 0; @@ -1884,20 +1639,20 @@ p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; - rc = sqlite3Select(pParse, p, SRT_Union, tab2, 0, 0, 0, aff); + intersectdest.iParm = tab2; + rc = sqlite3Select(pParse, p, &intersectdest); + testcase( rc!=SQLITE_OK ); + pDelete = p->pPrior; p->pPrior = pPrior; - sqlite3ExprDelete(p->pLimit); + sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; - if( rc ){ - goto multi_select_end; - } /* Generate code to take the intersection of the two temporary ** tables. */ assert( p->pEList ); - if( eDest==SRT_Callback ){ + if( dest.eDest==SRT_Output ){ Select *pFirst = p; while( pFirst->pPrior ) pFirst = pFirst->pPrior; generateColumnNames(pParse, 0, pFirst->pEList); @@ -1905,76 +1660,54 @@ iBreak = sqlite3VdbeMakeLabel(v); iCont = sqlite3VdbeMakeLabel(v); computeLimitRegisters(pParse, p, iBreak); - sqlite3VdbeAddOp(v, OP_Rewind, tab1, iBreak); - iStart = sqlite3VdbeAddOp(v, OP_RowKey, tab1, 0); - sqlite3VdbeAddOp(v, OP_NotFound, tab2, iCont); - rc = selectInnerLoop(pParse, p, p->pEList, tab1, p->pEList->nExpr, - pOrderBy, -1, eDest, iParm, - iCont, iBreak, 0); - if( rc ){ - rc = 1; - goto multi_select_end; - } + sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); + r1 = sqlite3GetTempReg(pParse); + iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1); + sqlite3VdbeAddOp3(v, OP_NotFound, tab2, iCont, r1); + sqlite3ReleaseTempReg(pParse, r1); + selectInnerLoop(pParse, p, p->pEList, tab1, p->pEList->nExpr, + 0, -1, &dest, iCont, iBreak); sqlite3VdbeResolveLabel(v, iCont); - sqlite3VdbeAddOp(v, OP_Next, tab1, iStart); + sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); sqlite3VdbeResolveLabel(v, iBreak); - sqlite3VdbeAddOp(v, OP_Close, tab2, 0); - sqlite3VdbeAddOp(v, OP_Close, tab1, 0); + sqlite3VdbeAddOp2(v, OP_Close, tab2, 0); + sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); break; } } - /* Make sure all SELECTs in the statement have the same number of elements - ** in their result sets. - */ - assert( p->pEList && pPrior->pEList ); - if( p->pEList->nExpr!=pPrior->pEList->nExpr ){ - sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" - " do not have the same number of result columns", selectOpName(p->op)); - rc = 1; - goto multi_select_end; - } - - /* Set the number of columns in temporary tables - */ - nCol = p->pEList->nExpr; - while( nSetP2 ){ - sqlite3VdbeChangeP2(v, aSetP2[--nSetP2], nCol); - } - - /* Compute collating sequences used by either the ORDER BY clause or - ** by any temporary tables needed to implement the compound select. - ** Attach the KeyInfo structure to all temporary tables. Invoke the - ** ORDER BY processing if there is an ORDER BY clause. + /* Compute collating sequences used by + ** temporary tables needed to implement the compound select. + ** Attach the KeyInfo structure to all temporary tables. ** ** This section is run by the right-most SELECT statement only. ** SELECT statements to the left always skip this part. The right-most ** SELECT might also skip this part if it has no ORDER BY clause and ** no temp tables are required. */ - if( pOrderBy || p->usesEphm ){ + if( p->selFlags & SF_UsesEphemeral ){ int i; /* Loop counter */ KeyInfo *pKeyInfo; /* Collating sequence for the result set */ Select *pLoop; /* For looping through SELECT statements */ - int nKeyCol; /* Number of entries in pKeyInfo->aCol[] */ CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */ - CollSeq **aCopy; /* A copy of pKeyInfo->aColl[] */ + int nCol; /* Number of columns in result set */ assert( p->pRightmost==p ); - nKeyCol = nCol + (pOrderBy ? pOrderBy->nExpr : 0); - pKeyInfo = sqliteMalloc(sizeof(*pKeyInfo)+nKeyCol*(sizeof(CollSeq*) + 1)); + nCol = p->pEList->nExpr; + pKeyInfo = sqlite3DbMallocZero(db, + sizeof(*pKeyInfo)+nCol*(sizeof(CollSeq*) + 1)); if( !pKeyInfo ){ rc = SQLITE_NOMEM; goto multi_select_end; } - pKeyInfo->enc = ENC(pParse->db); - pKeyInfo->nField = nCol; + pKeyInfo->enc = ENC(db); + pKeyInfo->nField = (u16)nCol; for(i=0, apColl=pKeyInfo->aColl; idb->pDfltColl; + *apColl = db->pDfltColl; } } @@ -1988,132 +1721,693 @@ break; } sqlite3VdbeChangeP2(v, addr, nCol); - sqlite3VdbeChangeP3(v, addr, (char*)pKeyInfo, P3_KEYINFO); + sqlite3VdbeChangeP4(v, addr, (char*)pKeyInfo, P4_KEYINFO); pLoop->addrOpenEphm[i] = -1; } } - - if( pOrderBy ){ - struct ExprList_item *pOTerm = pOrderBy->a; - int nOrderByExpr = pOrderBy->nExpr; - int addr; - u8 *pSortOrder; - - /* Reuse the same pKeyInfo for the ORDER BY as was used above for - ** the compound select statements. Except we have to change out the - ** pKeyInfo->aColl[] values. Some of the aColl[] values will be - ** reused when constructing the pKeyInfo for the ORDER BY, so make - ** a copy. Sufficient space to hold both the nCol entries for - ** the compound select and the nOrderbyExpr entries for the ORDER BY - ** was allocated above. But we need to move the compound select - ** entries out of the way before constructing the ORDER BY entries. - ** Move the compound select entries into aCopy[] where they can be - ** accessed and reused when constructing the ORDER BY entries. - ** Because nCol might be greater than or less than nOrderByExpr - ** we have to use memmove() when doing the copy. - */ - aCopy = &pKeyInfo->aColl[nOrderByExpr]; - pSortOrder = pKeyInfo->aSortOrder = (u8*)&aCopy[nCol]; - memmove(aCopy, pKeyInfo->aColl, nCol*sizeof(CollSeq*)); - - apColl = pKeyInfo->aColl; - for(i=0; ipExpr; - if( (pExpr->flags & EP_ExpCollate) ){ - assert( pExpr->pColl!=0 ); - *apColl = pExpr->pColl; - }else{ - *apColl = aCopy[pExpr->iColumn]; - } - *pSortOrder = pOTerm->sortOrder; - } - assert( p->pRightmost==p ); - assert( p->addrOpenEphm[2]>=0 ); - addr = p->addrOpenEphm[2]; - sqlite3VdbeChangeP2(v, addr, p->pOrderBy->nExpr+2); - pKeyInfo->nField = nOrderByExpr; - sqlite3VdbeChangeP3(v, addr, (char*)pKeyInfo, P3_KEYINFO_HANDOFF); - pKeyInfo = 0; - generateSortTail(pParse, p, v, p->pEList->nExpr, eDest, iParm); - } - - sqliteFree(pKeyInfo); + sqlite3DbFree(db, pKeyInfo); } multi_select_end: + pDest->iMem = dest.iMem; + pDest->nMem = dest.nMem; + sqlite3SelectDelete(db, pDelete); return rc; } #endif /* SQLITE_OMIT_COMPOUND_SELECT */ -#ifndef SQLITE_OMIT_VIEW /* -** Scan through the expression pExpr. Replace every reference to -** a column in table number iTable with a copy of the iColumn-th -** entry in pEList. (But leave references to the ROWID column -** unchanged.) +** Code an output subroutine for a coroutine implementation of a +** SELECT statment. ** -** This routine is part of the flattening procedure. A subquery -** whose result set is defined by pEList appears as entry in the -** FROM clause of a SELECT such that the VDBE cursor assigned to that -** FORM clause entry is iTable. This routine make the necessary -** changes to pExpr so that it refers directly to the source table -** of the subquery rather the result set of the subquery. -*/ -static void substExprList(ExprList*,int,ExprList*); /* Forward Decl */ -static void substSelect(Select *, int, ExprList *); /* Forward Decl */ -static void substExpr(Expr *pExpr, int iTable, ExprList *pEList){ - if( pExpr==0 ) return; - if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ - if( pExpr->iColumn<0 ){ - pExpr->op = TK_NULL; - }else{ - Expr *pNew; - assert( pEList!=0 && pExpr->iColumnnExpr ); - assert( pExpr->pLeft==0 && pExpr->pRight==0 && pExpr->pList==0 ); - pNew = pEList->a[pExpr->iColumn].pExpr; - assert( pNew!=0 ); - pExpr->op = pNew->op; - assert( pExpr->pLeft==0 ); - pExpr->pLeft = sqlite3ExprDup(pNew->pLeft); - assert( pExpr->pRight==0 ); - pExpr->pRight = sqlite3ExprDup(pNew->pRight); - assert( pExpr->pList==0 ); - pExpr->pList = sqlite3ExprListDup(pNew->pList); - pExpr->iTable = pNew->iTable; - pExpr->pTab = pNew->pTab; - pExpr->iColumn = pNew->iColumn; - pExpr->iAgg = pNew->iAgg; - sqlite3TokenCopy(&pExpr->token, &pNew->token); - sqlite3TokenCopy(&pExpr->span, &pNew->span); - pExpr->pSelect = sqlite3SelectDup(pNew->pSelect); - pExpr->flags = pNew->flags; - } - }else{ - substExpr(pExpr->pLeft, iTable, pEList); - substExpr(pExpr->pRight, iTable, pEList); - substSelect(pExpr->pSelect, iTable, pEList); - substExprList(pExpr->pList, iTable, pEList); +** The data to be output is contained in pIn->iMem. There are +** pIn->nMem columns to be output. pDest is where the output should +** be sent. +** +** regReturn is the number of the register holding the subroutine +** return address. +** +** If regPrev>0 then it is a the first register in a vector that +** records the previous output. mem[regPrev] is a flag that is false +** if there has been no previous output. If regPrev>0 then code is +** generated to suppress duplicates. pKeyInfo is used for comparing +** keys. +** +** If the LIMIT found in p->iLimit is reached, jump immediately to +** iBreak. +*/ +static int generateOutputSubroutine( + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement */ + SelectDest *pIn, /* Coroutine supplying data */ + SelectDest *pDest, /* Where to send the data */ + int regReturn, /* The return address register */ + int regPrev, /* Previous result register. No uniqueness if 0 */ + KeyInfo *pKeyInfo, /* For comparing with previous entry */ + int p4type, /* The p4 type for pKeyInfo */ + int iBreak /* Jump here if we hit the LIMIT */ +){ + Vdbe *v = pParse->pVdbe; + int iContinue; + int addr; + + addr = sqlite3VdbeCurrentAddr(v); + iContinue = sqlite3VdbeMakeLabel(v); + + /* Suppress duplicates for UNION, EXCEPT, and INTERSECT + */ + if( regPrev ){ + int j1, j2; + j1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); + j2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iMem, regPrev+1, pIn->nMem, + (char*)pKeyInfo, p4type); + sqlite3VdbeAddOp3(v, OP_Jump, j2+2, iContinue, j2+2); + sqlite3VdbeJumpHere(v, j1); + sqlite3ExprCodeCopy(pParse, pIn->iMem, regPrev+1, pIn->nMem); + sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev); } -} -static void substExprList(ExprList *pList, int iTable, ExprList *pEList){ - int i; + if( pParse->db->mallocFailed ) return 0; + + /* Suppress the the first OFFSET entries if there is an OFFSET clause + */ + codeOffset(v, p, iContinue); + + switch( pDest->eDest ){ + /* Store the result as data using a unique key. + */ + case SRT_Table: + case SRT_EphemTab: { + int r1 = sqlite3GetTempReg(pParse); + int r2 = sqlite3GetTempReg(pParse); + testcase( pDest->eDest==SRT_Table ); + testcase( pDest->eDest==SRT_EphemTab ); + sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iMem, pIn->nMem, r1); + sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iParm, r2); + sqlite3VdbeAddOp3(v, OP_Insert, pDest->iParm, r1, r2); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3ReleaseTempReg(pParse, r2); + sqlite3ReleaseTempReg(pParse, r1); + break; + } + +#ifndef SQLITE_OMIT_SUBQUERY + /* If we are creating a set for an "expr IN (SELECT ...)" construct, + ** then there should be a single item on the stack. Write this + ** item into the set table with bogus data. + */ + case SRT_Set: { + int r1; + assert( pIn->nMem==1 ); + p->affinity = + sqlite3CompareAffinity(p->pEList->a[0].pExpr, pDest->affinity); + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iMem, 1, r1, &p->affinity, 1); + sqlite3ExprCacheAffinityChange(pParse, pIn->iMem, 1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iParm, r1); + sqlite3ReleaseTempReg(pParse, r1); + break; + } + +#if 0 /* Never occurs on an ORDER BY query */ + /* If any row exist in the result set, record that fact and abort. + */ + case SRT_Exists: { + sqlite3VdbeAddOp2(v, OP_Integer, 1, pDest->iParm); + /* The LIMIT clause will terminate the loop for us */ + break; + } +#endif + + /* If this is a scalar select that is part of an expression, then + ** store the results in the appropriate memory cell and break out + ** of the scan loop. + */ + case SRT_Mem: { + assert( pIn->nMem==1 ); + sqlite3ExprCodeMove(pParse, pIn->iMem, pDest->iParm, 1); + /* The LIMIT clause will jump out of the loop for us */ + break; + } +#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ + + /* The results are stored in a sequence of registers + ** starting at pDest->iMem. Then the co-routine yields. + */ + case SRT_Coroutine: { + if( pDest->iMem==0 ){ + pDest->iMem = sqlite3GetTempRange(pParse, pIn->nMem); + pDest->nMem = pIn->nMem; + } + sqlite3ExprCodeMove(pParse, pIn->iMem, pDest->iMem, pDest->nMem); + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iParm); + break; + } + + /* If none of the above, then the result destination must be + ** SRT_Output. This routine is never called with any other + ** destination other than the ones handled above or SRT_Output. + ** + ** For SRT_Output, results are stored in a sequence of registers. + ** Then the OP_ResultRow opcode is used to cause sqlite3_step() to + ** return the next row of result. + */ + default: { + assert( pDest->eDest==SRT_Output ); + sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iMem, pIn->nMem); + sqlite3ExprCacheAffinityChange(pParse, pIn->iMem, pIn->nMem); + break; + } + } + + /* Jump to the end of the loop if the LIMIT is reached. + */ + if( p->iLimit ){ + sqlite3VdbeAddOp2(v, OP_AddImm, p->iLimit, -1); + sqlite3VdbeAddOp2(v, OP_IfZero, p->iLimit, iBreak); + } + + /* Generate the subroutine return + */ + sqlite3VdbeResolveLabel(v, iContinue); + sqlite3VdbeAddOp1(v, OP_Return, regReturn); + + return addr; +} + +/* +** Alternative compound select code generator for cases when there +** is an ORDER BY clause. +** +** We assume a query of the following form: +** +** ORDER BY +** +** is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea +** is to code both and with the ORDER BY clause as +** co-routines. Then run the co-routines in parallel and merge the results +** into the output. In addition to the two coroutines (called selectA and +** selectB) there are 7 subroutines: +** +** outA: Move the output of the selectA coroutine into the output +** of the compound query. +** +** outB: Move the output of the selectB coroutine into the output +** of the compound query. (Only generated for UNION and +** UNION ALL. EXCEPT and INSERTSECT never output a row that +** appears only in B.) +** +** AltB: Called when there is data from both coroutines and AB. +** +** EofA: Called when data is exhausted from selectA. +** +** EofB: Called when data is exhausted from selectB. +** +** The implementation of the latter five subroutines depend on which +** is used: +** +** +** UNION ALL UNION EXCEPT INTERSECT +** ------------- ----------------- -------------- ----------------- +** AltB: outA, nextA outA, nextA outA, nextA nextA +** +** AeqB: outA, nextA nextA nextA outA, nextA +** +** AgtB: outB, nextB outB, nextB nextB nextB +** +** EofA: outB, nextB outB, nextB halt halt +** +** EofB: outA, nextA outA, nextA outA, nextA halt +** +** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA +** causes an immediate jump to EofA and an EOF on B following nextB causes +** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or +** following nextX causes a jump to the end of the select processing. +** +** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled +** within the output subroutine. The regPrev register set holds the previously +** output value. A comparison is made against this value and the output +** is skipped if the next results would be the same as the previous. +** +** The implementation plan is to implement the two coroutines and seven +** subroutines first, then put the control logic at the bottom. Like this: +** +** goto Init +** coA: coroutine for left query (A) +** coB: coroutine for right query (B) +** outA: output one row of A +** outB: output one row of B (UNION and UNION ALL only) +** EofA: ... +** EofB: ... +** AltB: ... +** AeqB: ... +** AgtB: ... +** Init: initialize coroutine registers +** yield coA +** if eof(A) goto EofA +** yield coB +** if eof(B) goto EofB +** Cmpr: Compare A, B +** Jump AltB, AeqB, AgtB +** End: ... +** +** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not +** actually called using Gosub and they do not Return. EofA and EofB loop +** until all data is exhausted then jump to the "end" labe. AltB, AeqB, +** and AgtB jump to either L2 or to one of EofA or EofB. +*/ +#ifndef SQLITE_OMIT_COMPOUND_SELECT +static int multiSelectOrderBy( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +){ + int i, j; /* Loop counters */ + Select *pPrior; /* Another SELECT immediately to our left */ + Vdbe *v; /* Generate code to this VDBE */ + SelectDest destA; /* Destination for coroutine A */ + SelectDest destB; /* Destination for coroutine B */ + int regAddrA; /* Address register for select-A coroutine */ + int regEofA; /* Flag to indicate when select-A is complete */ + int regAddrB; /* Address register for select-B coroutine */ + int regEofB; /* Flag to indicate when select-B is complete */ + int addrSelectA; /* Address of the select-A coroutine */ + int addrSelectB; /* Address of the select-B coroutine */ + int regOutA; /* Address register for the output-A subroutine */ + int regOutB; /* Address register for the output-B subroutine */ + int addrOutA; /* Address of the output-A subroutine */ + int addrOutB = 0; /* Address of the output-B subroutine */ + int addrEofA; /* Address of the select-A-exhausted subroutine */ + int addrEofB; /* Address of the select-B-exhausted subroutine */ + int addrAltB; /* Address of the AB subroutine */ + int regLimitA; /* Limit register for select-A */ + int regLimitB; /* Limit register for select-A */ + int regPrev; /* A range of registers to hold previous output */ + int savedLimit; /* Saved value of p->iLimit */ + int savedOffset; /* Saved value of p->iOffset */ + int labelCmpr; /* Label for the start of the merge algorithm */ + int labelEnd; /* Label for the end of the overall SELECT stmt */ + int j1; /* Jump instructions that get retargetted */ + int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ + KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ + KeyInfo *pKeyMerge; /* Comparison information for merging rows */ + sqlite3 *db; /* Database connection */ + ExprList *pOrderBy; /* The ORDER BY clause */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + int *aPermute; /* Mapping from ORDER BY terms to result set columns */ + + assert( p->pOrderBy!=0 ); + assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */ + db = pParse->db; + v = pParse->pVdbe; + assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */ + labelEnd = sqlite3VdbeMakeLabel(v); + labelCmpr = sqlite3VdbeMakeLabel(v); + + + /* Patch up the ORDER BY clause + */ + op = p->op; + pPrior = p->pPrior; + assert( pPrior->pOrderBy==0 ); + pOrderBy = p->pOrderBy; + assert( pOrderBy ); + nOrderBy = pOrderBy->nExpr; + + /* For operators other than UNION ALL we have to make sure that + ** the ORDER BY clause covers every term of the result set. Add + ** terms to the ORDER BY clause as necessary. + */ + if( op!=TK_ALL ){ + for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){ + struct ExprList_item *pItem; + for(j=0, pItem=pOrderBy->a; jiCol>0 ); + if( pItem->iCol==i ) break; + } + if( j==nOrderBy ){ + Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); + if( pNew==0 ) return SQLITE_NOMEM; + pNew->flags |= EP_IntValue; + pNew->u.iValue = i; + pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew); + pOrderBy->a[nOrderBy++].iCol = (u16)i; + } + } + } + + /* Compute the comparison permutation and keyinfo that is used with + ** the permutation used to determine if the next + ** row of results comes from selectA or selectB. Also add explicit + ** collations to the ORDER BY clause terms so that when the subqueries + ** to the right and the left are evaluated, they use the correct + ** collation. + */ + aPermute = sqlite3DbMallocRaw(db, sizeof(int)*nOrderBy); + if( aPermute ){ + struct ExprList_item *pItem; + for(i=0, pItem=pOrderBy->a; iiCol>0 && pItem->iCol<=p->pEList->nExpr ); + aPermute[i] = pItem->iCol - 1; + } + pKeyMerge = + sqlite3DbMallocRaw(db, sizeof(*pKeyMerge)+nOrderBy*(sizeof(CollSeq*)+1)); + if( pKeyMerge ){ + pKeyMerge->aSortOrder = (u8*)&pKeyMerge->aColl[nOrderBy]; + pKeyMerge->nField = (u16)nOrderBy; + pKeyMerge->enc = ENC(db); + for(i=0; ia[i].pExpr; + if( pTerm->flags & EP_ExpCollate ){ + pColl = pTerm->pColl; + }else{ + pColl = multiSelectCollSeq(pParse, p, aPermute[i]); + pTerm->flags |= EP_ExpCollate; + pTerm->pColl = pColl; + } + pKeyMerge->aColl[i] = pColl; + pKeyMerge->aSortOrder[i] = pOrderBy->a[i].sortOrder; + } + } + }else{ + pKeyMerge = 0; + } + + /* Reattach the ORDER BY clause to the query. + */ + p->pOrderBy = pOrderBy; + pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); + + /* Allocate a range of temporary registers and the KeyInfo needed + ** for the logic that removes duplicate result rows when the + ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL). + */ + if( op==TK_ALL ){ + regPrev = 0; + }else{ + int nExpr = p->pEList->nExpr; + assert( nOrderBy>=nExpr || db->mallocFailed ); + regPrev = sqlite3GetTempRange(pParse, nExpr+1); + sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev); + pKeyDup = sqlite3DbMallocZero(db, + sizeof(*pKeyDup) + nExpr*(sizeof(CollSeq*)+1) ); + if( pKeyDup ){ + pKeyDup->aSortOrder = (u8*)&pKeyDup->aColl[nExpr]; + pKeyDup->nField = (u16)nExpr; + pKeyDup->enc = ENC(db); + for(i=0; iaColl[i] = multiSelectCollSeq(pParse, p, i); + pKeyDup->aSortOrder[i] = 0; + } + } + } + + /* Separate the left and the right query from one another + */ + p->pPrior = 0; + pPrior->pRightmost = 0; + sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER"); + if( pPrior->pPrior==0 ){ + sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); + } + + /* Compute the limit registers */ + computeLimitRegisters(pParse, p, labelEnd); + if( p->iLimit && op==TK_ALL ){ + regLimitA = ++pParse->nMem; + regLimitB = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit, + regLimitA); + sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB); + }else{ + regLimitA = regLimitB = 0; + } + sqlite3ExprDelete(db, p->pLimit); + p->pLimit = 0; + sqlite3ExprDelete(db, p->pOffset); + p->pOffset = 0; + + regAddrA = ++pParse->nMem; + regEofA = ++pParse->nMem; + regAddrB = ++pParse->nMem; + regEofB = ++pParse->nMem; + regOutA = ++pParse->nMem; + regOutB = ++pParse->nMem; + sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); + sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); + + /* Jump past the various subroutines and coroutines to the main + ** merge loop + */ + j1 = sqlite3VdbeAddOp0(v, OP_Goto); + addrSelectA = sqlite3VdbeCurrentAddr(v); + + + /* Generate a coroutine to evaluate the SELECT statement to the + ** left of the compound operator - the "A" select. + */ + VdbeNoopComment((v, "Begin coroutine for left SELECT")); + pPrior->iLimit = regLimitA; + sqlite3Select(pParse, pPrior, &destA); + sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofA); + sqlite3VdbeAddOp1(v, OP_Yield, regAddrA); + VdbeNoopComment((v, "End coroutine for left SELECT")); + + /* Generate a coroutine to evaluate the SELECT statement on + ** the right - the "B" select + */ + addrSelectB = sqlite3VdbeCurrentAddr(v); + VdbeNoopComment((v, "Begin coroutine for right SELECT")); + savedLimit = p->iLimit; + savedOffset = p->iOffset; + p->iLimit = regLimitB; + p->iOffset = 0; + sqlite3Select(pParse, p, &destB); + p->iLimit = savedLimit; + p->iOffset = savedOffset; + sqlite3VdbeAddOp2(v, OP_Integer, 1, regEofB); + sqlite3VdbeAddOp1(v, OP_Yield, regAddrB); + VdbeNoopComment((v, "End coroutine for right SELECT")); + + /* Generate a subroutine that outputs the current row of the A + ** select as the next output row of the compound select. + */ + VdbeNoopComment((v, "Output routine for A")); + addrOutA = generateOutputSubroutine(pParse, + p, &destA, pDest, regOutA, + regPrev, pKeyDup, P4_KEYINFO_HANDOFF, labelEnd); + + /* Generate a subroutine that outputs the current row of the B + ** select as the next output row of the compound select. + */ + if( op==TK_ALL || op==TK_UNION ){ + VdbeNoopComment((v, "Output routine for B")); + addrOutB = generateOutputSubroutine(pParse, + p, &destB, pDest, regOutB, + regPrev, pKeyDup, P4_KEYINFO_STATIC, labelEnd); + } + + /* Generate a subroutine to run when the results from select A + ** are exhausted and only data in select B remains. + */ + VdbeNoopComment((v, "eof-A subroutine")); + if( op==TK_EXCEPT || op==TK_INTERSECT ){ + addrEofA = sqlite3VdbeAddOp2(v, OP_Goto, 0, labelEnd); + }else{ + addrEofA = sqlite3VdbeAddOp2(v, OP_If, regEofB, labelEnd); + sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); + sqlite3VdbeAddOp1(v, OP_Yield, regAddrB); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofA); + } + + /* Generate a subroutine to run when the results from select B + ** are exhausted and only data in select A remains. + */ + if( op==TK_INTERSECT ){ + addrEofB = addrEofA; + }else{ + VdbeNoopComment((v, "eof-B subroutine")); + addrEofB = sqlite3VdbeAddOp2(v, OP_If, regEofA, labelEnd); + sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA); + sqlite3VdbeAddOp1(v, OP_Yield, regAddrA); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofB); + } + + /* Generate code to handle the case of AB + */ + VdbeNoopComment((v, "A-gt-B subroutine")); + addrAgtB = sqlite3VdbeCurrentAddr(v); + if( op==TK_ALL || op==TK_UNION ){ + sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); + } + sqlite3VdbeAddOp1(v, OP_Yield, regAddrB); + sqlite3VdbeAddOp2(v, OP_If, regEofB, addrEofB); + sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr); + + /* This code runs once to initialize everything. + */ + sqlite3VdbeJumpHere(v, j1); + sqlite3VdbeAddOp2(v, OP_Integer, 0, regEofA); + sqlite3VdbeAddOp2(v, OP_Integer, 0, regEofB); + sqlite3VdbeAddOp2(v, OP_Gosub, regAddrA, addrSelectA); + sqlite3VdbeAddOp2(v, OP_Gosub, regAddrB, addrSelectB); + sqlite3VdbeAddOp2(v, OP_If, regEofA, addrEofA); + sqlite3VdbeAddOp2(v, OP_If, regEofB, addrEofB); + + /* Implement the main merge loop + */ + sqlite3VdbeResolveLabel(v, labelCmpr); + sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY); + sqlite3VdbeAddOp4(v, OP_Compare, destA.iMem, destB.iMem, nOrderBy, + (char*)pKeyMerge, P4_KEYINFO_HANDOFF); + sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); + + /* Release temporary registers + */ + if( regPrev ){ + sqlite3ReleaseTempRange(pParse, regPrev, nOrderBy+1); + } + + /* Jump to the this point in order to terminate the query. + */ + sqlite3VdbeResolveLabel(v, labelEnd); + + /* Set the number of output columns + */ + if( pDest->eDest==SRT_Output ){ + Select *pFirst = pPrior; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, 0, pFirst->pEList); + } + + /* Reassembly the compound query so that it will be freed correctly + ** by the calling function */ + if( p->pPrior ){ + sqlite3SelectDelete(db, p->pPrior); + } + p->pPrior = pPrior; + + /*** TBD: Insert subroutine calls to close cursors on incomplete + **** subqueries ****/ + return SQLITE_OK; +} +#endif + +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* Forward Declarations */ +static void substExprList(sqlite3*, ExprList*, int, ExprList*); +static void substSelect(sqlite3*, Select *, int, ExprList *); + +/* +** Scan through the expression pExpr. Replace every reference to +** a column in table number iTable with a copy of the iColumn-th +** entry in pEList. (But leave references to the ROWID column +** unchanged.) +** +** This routine is part of the flattening procedure. A subquery +** whose result set is defined by pEList appears as entry in the +** FROM clause of a SELECT such that the VDBE cursor assigned to that +** FORM clause entry is iTable. This routine make the necessary +** changes to pExpr so that it refers directly to the source table +** of the subquery rather the result set of the subquery. +*/ +static Expr *substExpr( + sqlite3 *db, /* Report malloc errors to this connection */ + Expr *pExpr, /* Expr in which substitution occurs */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute expressions */ +){ + if( pExpr==0 ) return 0; + if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ + if( pExpr->iColumn<0 ){ + pExpr->op = TK_NULL; + }else{ + Expr *pNew; + assert( pEList!=0 && pExpr->iColumnnExpr ); + assert( pExpr->pLeft==0 && pExpr->pRight==0 ); + pNew = sqlite3ExprDup(db, pEList->a[pExpr->iColumn].pExpr, 0); + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; + } + }else{ + pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList); + pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + substSelect(db, pExpr->x.pSelect, iTable, pEList); + }else{ + substExprList(db, pExpr->x.pList, iTable, pEList); + } + } + return pExpr; +} +static void substExprList( + sqlite3 *db, /* Report malloc errors here */ + ExprList *pList, /* List to scan and in which to make substitutes */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute values */ +){ + int i; if( pList==0 ) return; for(i=0; inExpr; i++){ - substExpr(pList->a[i].pExpr, iTable, pEList); + pList->a[i].pExpr = substExpr(db, pList->a[i].pExpr, iTable, pEList); } } -static void substSelect(Select *p, int iTable, ExprList *pEList){ +static void substSelect( + sqlite3 *db, /* Report malloc errors here */ + Select *p, /* SELECT statement in which to make substitutions */ + int iTable, /* Table to be replaced */ + ExprList *pEList /* Substitute values */ +){ + SrcList *pSrc; + struct SrcList_item *pItem; + int i; if( !p ) return; - substExprList(p->pEList, iTable, pEList); - substExprList(p->pGroupBy, iTable, pEList); - substExprList(p->pOrderBy, iTable, pEList); - substExpr(p->pHaving, iTable, pEList); - substExpr(p->pWhere, iTable, pEList); - substSelect(p->pPrior, iTable, pEList); + substExprList(db, p->pEList, iTable, pEList); + substExprList(db, p->pGroupBy, iTable, pEList); + substExprList(db, p->pOrderBy, iTable, pEList); + p->pHaving = substExpr(db, p->pHaving, iTable, pEList); + p->pWhere = substExpr(db, p->pWhere, iTable, pEList); + substSelect(db, p->pPrior, iTable, pEList); + pSrc = p->pSrc; + assert( pSrc ); /* Even for (SELECT 1) we have: pSrc!=0 but pSrc->nSrc==0 */ + if( ALWAYS(pSrc) ){ + for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ + substSelect(db, pItem->pSelect, iTable, pEList); + } + } } -#endif /* !defined(SQLITE_OMIT_VIEW) */ +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ -#ifndef SQLITE_OMIT_VIEW +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries in order to speed ** execution. It returns 1 if it makes changes and 0 if no flattening @@ -2147,8 +2441,8 @@ ** ** (2) The subquery is not an aggregate or the outer query is not a join. ** -** (3) The subquery is not the right operand of a left outer join, or -** the subquery is not itself a join. (Ticket #306) +** (3) The subquery is not the right operand of a left outer join +** (Originally ticket #306. Strenghtened by ticket #3300) ** ** (4) The subquery is not DISTINCT or the outer query is not a join. ** @@ -2170,8 +2464,8 @@ ** ** (11) The subquery and the outer query do not both have ORDER BY clauses. ** -** (12) The subquery is not the right term of a LEFT OUTER JOIN or the -** subquery has no WHERE clause. (added by ticket #350) +** (12) Not implemented. Subsumed into restriction (3). Was previously +** a separate restriction deriving from ticket #350. ** ** (13) The subquery and outer query do not both use LIMIT ** @@ -2181,6 +2475,35 @@ ** subquery does not have both an ORDER BY and a LIMIT clause. ** (See ticket #2339) ** +** (16) The outer query is not an aggregate or the subquery does +** not contain ORDER BY. (Ticket #2942) This used to not matter +** until we introduced the group_concat() function. +** +** (17) The sub-query is not a compound select, or it is a UNION ALL +** compound clause made up entirely of non-aggregate queries, and +** the parent query: +** +** * is not itself part of a compound select, +** * is not an aggregate or DISTINCT query, and +** * has no other tables or sub-selects in the FROM clause. +** +** The parent and sub-query may contain WHERE clauses. Subject to +** rules (11), (13) and (14), they may also contain ORDER BY, +** LIMIT and OFFSET clauses. +** +** (18) If the sub-query is a compound select, then all terms of the +** ORDER by clause of the parent must be simple references to +** columns of the sub-query. +** +** (19) The subquery does not use LIMIT or the outer query does not +** have a WHERE clause. +** +** (20) If the sub-query is a compound select, then it must not use +** an ORDER BY clause. Ticket #3773. We could relax this constraint +** somewhat by saying that the terms of the ORDER BY clause must +** appear as unmodified result columns in the outer query. But +** have other optimizations in mind to deal with that case. +** ** In this routine, the "p" parameter is a pointer to the outer query. ** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query ** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates. @@ -2192,12 +2515,16 @@ ** the subquery before this routine runs. */ static int flattenSubquery( + Parse *pParse, /* Parsing context */ Select *p, /* The parent or outer SELECT statement */ int iFrom, /* Index in p->pSrc->a[] of the inner subquery */ int isAgg, /* True if outer SELECT uses aggregate functions */ int subqueryIsAgg /* True if the subquery uses aggregate functions */ ){ + const char *zSavedAuthContext = pParse->zAuthContext; + Select *pParent; Select *pSub; /* The inner query or "subquery" */ + Select *pSub1; /* Pointer to the rightmost select in sub-query */ SrcList *pSrc; /* The FROM clause of the outer query */ SrcList *pSubSrc; /* The FROM clause of the subquery */ ExprList *pList; /* The result set of the outer query */ @@ -2205,13 +2532,16 @@ int i; /* Loop counter */ Expr *pWhere; /* The WHERE clause */ struct SrcList_item *pSubitem; /* The subquery */ + sqlite3 *db = pParse->db; /* Check to see if flattening is permitted. Return 0 if not. */ - if( p==0 ) return 0; + assert( p!=0 ); + assert( p->pPrior==0 ); /* Unable to flatten compound queries */ pSrc = p->pSrc; assert( pSrc && iFrom>=0 && iFromnSrc ); pSubitem = &pSrc->a[iFrom]; + iParent = pSubitem->iCursor; pSub = pSubitem->pSelect; assert( pSub!=0 ); if( isAgg && subqueryIsAgg ) return 0; /* Restriction (1) */ @@ -2229,16 +2559,21 @@ return 0; /* Restriction (15) */ } if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ - if( (pSub->isDistinct || pSub->pLimit) + if( ((pSub->selFlags & SF_Distinct)!=0 || pSub->pLimit) && (pSrc->nSrc>1 || isAgg) ){ /* Restrictions (4)(5)(8)(9) */ return 0; } - if( p->isDistinct && subqueryIsAgg ) return 0; /* Restriction (6) */ - if( (p->disallowOrderBy || p->pOrderBy) && pSub->pOrderBy ){ + if( (p->selFlags & SF_Distinct)!=0 && subqueryIsAgg ){ + return 0; /* Restriction (6) */ + } + if( p->pOrderBy && pSub->pOrderBy ){ return 0; /* Restriction (11) */ } + if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */ + if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */ - /* Restriction 3: If the subquery is a join, make sure the subquery is + /* OBSOLETE COMMENT 1: + ** Restriction 3: If the subquery is a join, make sure the subquery is ** not used as the right operand of an outer join. Examples of why this ** is not allowed: ** @@ -2249,12 +2584,9 @@ ** (t1 LEFT OUTER JOIN t2) JOIN t3 ** ** which is not at all the same thing. - */ - if( pSubSrc->nSrc>1 && (pSubitem->jointype & JT_OUTER)!=0 ){ - return 0; - } - - /* Restriction 12: If the subquery is the right operand of a left outer + ** + ** OBSOLETE COMMENT 2: + ** Restriction 12: If the subquery is the right operand of a left outer ** join, make sure the subquery has no WHERE clause. ** An examples of why this is not allowed: ** @@ -2266,16 +2598,153 @@ ** ** But the t2.x>0 test will always fail on a NULL row of t2, which ** effectively converts the OUTER JOIN into an INNER JOIN. + ** + ** THIS OVERRIDES OBSOLETE COMMENTS 1 AND 2 ABOVE: + ** Ticket #3300 shows that flattening the right term of a LEFT JOIN + ** is fraught with danger. Best to avoid the whole thing. If the + ** subquery is the right term of a LEFT JOIN, then do not flatten. */ - if( (pSubitem->jointype & JT_OUTER)!=0 && pSub->pWhere!=0 ){ + if( (pSubitem->jointype & JT_OUTER)!=0 ){ return 0; } - /* If we reach this point, it means flattening is permitted for the - ** iFrom-th entry of the FROM clause in the outer query. + /* Restriction 17: If the sub-query is a compound SELECT, then it must + ** use only the UNION ALL operator. And none of the simple select queries + ** that make up the compound SELECT are allowed to be aggregate or distinct + ** queries. + */ + if( pSub->pPrior ){ + if( pSub->pOrderBy ){ + return 0; /* Restriction 20 */ + } + if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ + return 0; + } + for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ + testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); + testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); + if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 + || (pSub1->pPrior && pSub1->op!=TK_ALL) + || NEVER(pSub1->pSrc==0) || pSub1->pSrc->nSrc!=1 + ){ + return 0; + } + } + + /* Restriction 18. */ + if( p->pOrderBy ){ + int ii; + for(ii=0; iipOrderBy->nExpr; ii++){ + if( p->pOrderBy->a[ii].iCol==0 ) return 0; + } + } + } + + /***** If we reach this point, flattening is permitted. *****/ + + /* Authorize the subquery */ + pParse->zAuthContext = pSubitem->zName; + sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0); + pParse->zAuthContext = zSavedAuthContext; + + /* If the sub-query is a compound SELECT statement, then (by restrictions + ** 17 and 18 above) it must be a UNION ALL and the parent query must + ** be of the form: + ** + ** SELECT FROM () + ** + ** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block + ** creates N-1 copies of the parent query without any ORDER BY, LIMIT or + ** OFFSET clauses and joins them to the left-hand-side of the original + ** using UNION ALL operators. In this case N is the number of simple + ** select statements in the compound sub-query. + ** + ** Example: + ** + ** SELECT a+1 FROM ( + ** SELECT x FROM tab + ** UNION ALL + ** SELECT y FROM tab + ** UNION ALL + ** SELECT abs(z*2) FROM tab2 + ** ) WHERE a!=5 ORDER BY 1 + ** + ** Transformed into: + ** + ** SELECT x+1 FROM tab WHERE x+1!=5 + ** UNION ALL + ** SELECT y+1 FROM tab WHERE y+1!=5 + ** UNION ALL + ** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5 + ** ORDER BY 1 + ** + ** We call this the "compound-subquery flattening". + */ + for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){ + Select *pNew; + ExprList *pOrderBy = p->pOrderBy; + Expr *pLimit = p->pLimit; + Select *pPrior = p->pPrior; + p->pOrderBy = 0; + p->pSrc = 0; + p->pPrior = 0; + p->pLimit = 0; + pNew = sqlite3SelectDup(db, p, 0); + p->pLimit = pLimit; + p->pOrderBy = pOrderBy; + p->pSrc = pSrc; + p->op = TK_ALL; + p->pRightmost = 0; + if( pNew==0 ){ + pNew = pPrior; + }else{ + pNew->pPrior = pPrior; + pNew->pRightmost = 0; + } + p->pPrior = pNew; + if( db->mallocFailed ) return 1; + } + + /* Begin flattening the iFrom-th entry of the FROM clause + ** in the outer query. */ + pSub = pSub1 = pSubitem->pSelect; + + /* Delete the transient table structure associated with the + ** subquery + */ + sqlite3DbFree(db, pSubitem->zDatabase); + sqlite3DbFree(db, pSubitem->zName); + sqlite3DbFree(db, pSubitem->zAlias); + pSubitem->zDatabase = 0; + pSubitem->zName = 0; + pSubitem->zAlias = 0; + pSubitem->pSelect = 0; + + /* Defer deleting the Table object associated with the + ** subquery until code generation is + ** complete, since there may still exist Expr.pTab entries that + ** refer to the subquery even after flattening. Ticket #3346. + ** + ** pSubitem->pTab is always non-NULL by test restrictions and tests above. + */ + if( ALWAYS(pSubitem->pTab!=0) ){ + Table *pTabToDel = pSubitem->pTab; + if( pTabToDel->nRef==1 ){ + pTabToDel->pNextZombie = pParse->pZombieTab; + pParse->pZombieTab = pTabToDel; + }else{ + pTabToDel->nRef--; + } + pSubitem->pTab = 0; + } - /* Move all of the FROM elements of the subquery into the + /* The following loop runs once for each term in a compound-subquery + ** flattening (as described above). If we are doing a different kind + ** of flattening - a flattening other than a compound-subquery flattening - + ** then this loop only runs once. + ** + ** This loop moves all of the FROM elements of the subquery into the ** the FROM clause of the outer query. Before doing this, remember ** the cursor number for the original outer query FROM element in ** iParent. The iParent cursor will never be used. Subsequent code @@ -2283,439 +2752,588 @@ ** those references with expressions that resolve to the subquery FROM ** elements we are now copying in. */ - iParent = pSubitem->iCursor; - { - int nSubSrc = pSubSrc->nSrc; - int jointype = pSubitem->jointype; - - sqlite3DeleteTable(pSubitem->pTab); - sqliteFree(pSubitem->zDatabase); - sqliteFree(pSubitem->zName); - sqliteFree(pSubitem->zAlias); + for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ + int nSubSrc; + u8 jointype = 0; + pSubSrc = pSub->pSrc; /* FROM clause of subquery */ + nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ + pSrc = pParent->pSrc; /* FROM clause of the outer query */ + + if( pSrc ){ + assert( pParent==p ); /* First time through the loop */ + jointype = pSubitem->jointype; + }else{ + assert( pParent!=p ); /* 2nd and subsequent times through the loop */ + pSrc = pParent->pSrc = sqlite3SrcListAppend(db, 0, 0, 0); + if( pSrc==0 ){ + assert( db->mallocFailed ); + break; + } + } + + /* The subquery uses a single slot of the FROM clause of the outer + ** query. If the subquery has more than one element in its FROM clause, + ** then expand the outer query to make space for it to hold all elements + ** of the subquery. + ** + ** Example: + ** + ** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB; + ** + ** The outer query has 3 slots in its FROM clause. One slot of the + ** outer query (the middle slot) is used by the subquery. The next + ** block of code will expand the out query to 4 slots. The middle + ** slot is expanded to two slots in order to make space for the + ** two elements in the FROM clause of the subquery. + */ if( nSubSrc>1 ){ - int extra = nSubSrc - 1; - for(i=1; ipSrc = pSrc; - for(i=pSrc->nSrc-1; i-extra>=iFrom; i--){ - pSrc->a[i] = pSrc->a[i-extra]; + pParent->pSrc = pSrc = sqlite3SrcListEnlarge(db, pSrc, nSubSrc-1,iFrom+1); + if( db->mallocFailed ){ + break; } } + + /* Transfer the FROM clause terms from the subquery into the + ** outer query. + */ for(i=0; ia[i+iFrom].pUsing); pSrc->a[i+iFrom] = pSubSrc->a[i]; memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } pSrc->a[iFrom].jointype = jointype; - } - - /* Now begin substituting subquery result set expressions for - ** references to the iParent in the outer query. - ** - ** Example: - ** - ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; - ** \ \_____________ subquery __________/ / - ** \_____________________ outer query ______________________________/ - ** - ** We look at every expression in the outer query and every place we see - ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". - */ - pList = p->pEList; - for(i=0; inExpr; i++){ - Expr *pExpr; - if( pList->a[i].zName==0 && (pExpr = pList->a[i].pExpr)->span.z!=0 ){ - pList->a[i].zName = sqliteStrNDup((char*)pExpr->span.z, pExpr->span.n); - } - } - substExprList(p->pEList, iParent, pSub->pEList); - if( isAgg ){ - substExprList(p->pGroupBy, iParent, pSub->pEList); - substExpr(p->pHaving, iParent, pSub->pEList); - } - if( pSub->pOrderBy ){ - assert( p->pOrderBy==0 ); - p->pOrderBy = pSub->pOrderBy; - pSub->pOrderBy = 0; - }else if( p->pOrderBy ){ - substExprList(p->pOrderBy, iParent, pSub->pEList); - } - if( pSub->pWhere ){ - pWhere = sqlite3ExprDup(pSub->pWhere); - }else{ - pWhere = 0; - } - if( subqueryIsAgg ){ - assert( p->pHaving==0 ); - p->pHaving = p->pWhere; - p->pWhere = pWhere; - substExpr(p->pHaving, iParent, pSub->pEList); - p->pHaving = sqlite3ExprAnd(p->pHaving, sqlite3ExprDup(pSub->pHaving)); - assert( p->pGroupBy==0 ); - p->pGroupBy = sqlite3ExprListDup(pSub->pGroupBy); - }else{ - substExpr(p->pWhere, iParent, pSub->pEList); - p->pWhere = sqlite3ExprAnd(p->pWhere, pWhere); - } - - /* The flattened query is distinct if either the inner or the - ** outer query is distinct. - */ - p->isDistinct = p->isDistinct || pSub->isDistinct; - - /* - ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; - ** - ** One is tempted to try to add a and b to combine the limits. But this - ** does not work if either limit is negative. - */ - if( pSub->pLimit ){ - p->pLimit = pSub->pLimit; - pSub->pLimit = 0; + + /* Now begin substituting subquery result set expressions for + ** references to the iParent in the outer query. + ** + ** Example: + ** + ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; + ** \ \_____________ subquery __________/ / + ** \_____________________ outer query ______________________________/ + ** + ** We look at every expression in the outer query and every place we see + ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". + */ + pList = pParent->pEList; + for(i=0; inExpr; i++){ + if( pList->a[i].zName==0 ){ + const char *zSpan = pList->a[i].zSpan; + if( ALWAYS(zSpan) ){ + pList->a[i].zName = sqlite3DbStrDup(db, zSpan); + } + } + } + substExprList(db, pParent->pEList, iParent, pSub->pEList); + if( isAgg ){ + substExprList(db, pParent->pGroupBy, iParent, pSub->pEList); + pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); + } + if( pSub->pOrderBy ){ + assert( pParent->pOrderBy==0 ); + pParent->pOrderBy = pSub->pOrderBy; + pSub->pOrderBy = 0; + }else if( pParent->pOrderBy ){ + substExprList(db, pParent->pOrderBy, iParent, pSub->pEList); + } + if( pSub->pWhere ){ + pWhere = sqlite3ExprDup(db, pSub->pWhere, 0); + }else{ + pWhere = 0; + } + if( subqueryIsAgg ){ + assert( pParent->pHaving==0 ); + pParent->pHaving = pParent->pWhere; + pParent->pWhere = pWhere; + pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); + pParent->pHaving = sqlite3ExprAnd(db, pParent->pHaving, + sqlite3ExprDup(db, pSub->pHaving, 0)); + assert( pParent->pGroupBy==0 ); + pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0); + }else{ + pParent->pWhere = substExpr(db, pParent->pWhere, iParent, pSub->pEList); + pParent->pWhere = sqlite3ExprAnd(db, pParent->pWhere, pWhere); + } + + /* The flattened query is distinct if either the inner or the + ** outer query is distinct. + */ + pParent->selFlags |= pSub->selFlags & SF_Distinct; + + /* + ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; + ** + ** One is tempted to try to add a and b to combine the limits. But this + ** does not work if either limit is negative. + */ + if( pSub->pLimit ){ + pParent->pLimit = pSub->pLimit; + pSub->pLimit = 0; + } } /* Finially, delete what is left of the subquery and return ** success. */ - sqlite3SelectDelete(pSub); + sqlite3SelectDelete(db, pSub1); + return 1; } -#endif /* SQLITE_OMIT_VIEW */ +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ /* -** Analyze the SELECT statement passed in as an argument to see if it -** is a simple min() or max() query. If it is and this query can be -** satisfied using a single seek to the beginning or end of an index, -** then generate the code for this SELECT and return 1. If this is not a -** simple min() or max() query, then return 0; -** -** A simply min() or max() query looks like this: +** Analyze the SELECT statement passed as an argument to see if it +** is a min() or max() query. Return WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX if +** it is, or 0 otherwise. At present, a query is considered to be +** a min()/max() query if: ** -** SELECT min(a) FROM table; -** SELECT max(a) FROM table; +** 1. There is a single object in the FROM clause. ** -** The query may have only a single table in its FROM argument. There -** can be no GROUP BY or HAVING or WHERE clauses. The result set must -** be the min() or max() of a single column of the table. The column -** in the min() or max() function must be indexed. -** -** The parameters to this routine are the same as for sqlite3Select(). -** See the header comment on that routine for additional information. +** 2. There is a single expression in the result set, and it is +** either min(x) or max(x), where x is a column reference. */ -static int simpleMinMaxQuery(Parse *pParse, Select *p, int eDest, int iParm){ +static u8 minMaxQuery(Select *p){ Expr *pExpr; - int iCol; - Table *pTab; - Index *pIdx; - int base; - Vdbe *v; - int seekOp; - ExprList *pEList, *pList, eList; - struct ExprList_item eListItem; - SrcList *pSrc; - int brk; - int iDb; + ExprList *pEList = p->pEList; - /* Check to see if this query is a simple min() or max() query. Return - ** zero if it is not. - */ - if( p->pGroupBy || p->pHaving || p->pWhere ) return 0; - pSrc = p->pSrc; - if( pSrc->nSrc!=1 ) return 0; - pEList = p->pEList; - if( pEList->nExpr!=1 ) return 0; + if( pEList->nExpr!=1 ) return WHERE_ORDERBY_NORMAL; pExpr = pEList->a[0].pExpr; if( pExpr->op!=TK_AGG_FUNCTION ) return 0; - pList = pExpr->pList; - if( pList==0 || pList->nExpr!=1 ) return 0; - if( pExpr->token.n!=3 ) return 0; - if( sqlite3StrNICmp((char*)pExpr->token.z,"min",3)==0 ){ - seekOp = OP_Rewind; - }else if( sqlite3StrNICmp((char*)pExpr->token.z,"max",3)==0 ){ - seekOp = OP_Last; - }else{ - return 0; + if( NEVER(ExprHasProperty(pExpr, EP_xIsSelect)) ) return 0; + pEList = pExpr->x.pList; + if( pEList==0 || pEList->nExpr!=1 ) return 0; + if( pEList->a[0].pExpr->op!=TK_AGG_COLUMN ) return WHERE_ORDERBY_NORMAL; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + if( sqlite3StrICmp(pExpr->u.zToken,"min")==0 ){ + return WHERE_ORDERBY_MIN; + }else if( sqlite3StrICmp(pExpr->u.zToken,"max")==0 ){ + return WHERE_ORDERBY_MAX; } - pExpr = pList->a[0].pExpr; - if( pExpr->op!=TK_COLUMN ) return 0; - iCol = pExpr->iColumn; - pTab = pSrc->a[0].pTab; + return WHERE_ORDERBY_NORMAL; +} - /* This optimization cannot be used with virtual tables. */ - if( IsVirtual(pTab) ) return 0; +/* +** The select statement passed as the first argument is an aggregate query. +** The second argment is the associated aggregate-info object. This +** function tests if the SELECT is of the form: +** +** SELECT count(*) FROM +** +** where table is a database table, not a sub-select or view. If the query +** does match this pattern, then a pointer to the Table object representing +** is returned. Otherwise, 0 is returned. +*/ +static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ + Table *pTab; + Expr *pExpr; - /* If we get to here, it means the query is of the correct form. - ** Check to make sure we have an index and make pIdx point to the - ** appropriate index. If the min() or max() is on an INTEGER PRIMARY - ** key column, no index is necessary so set pIdx to NULL. If no - ** usable index is found, return 0. - */ - if( iCol<0 ){ - pIdx = 0; - }else{ - CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr); - if( pColl==0 ) return 0; - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - assert( pIdx->nColumn>=1 ); - if( pIdx->aiColumn[0]==iCol && - 0==sqlite3StrICmp(pIdx->azColl[0], pColl->zName) ){ - break; - } - } - if( pIdx==0 ) return 0; + assert( !p->pGroupBy ); + + if( p->pWhere || p->pEList->nExpr!=1 + || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect + ){ + return 0; } + pTab = p->pSrc->a[0].pTab; + pExpr = p->pEList->a[0].pExpr; + assert( pTab && !pTab->pSelect && pExpr ); - /* Identify column types if we will be using the callback. This - ** step is skipped if the output is going to a table or a memory cell. - ** The column names have already been generated in the calling function. - */ - v = sqlite3GetVdbe(pParse); - if( v==0 ) return 0; + if( IsVirtual(pTab) ) return 0; + if( pExpr->op!=TK_AGG_FUNCTION ) return 0; + if( (pAggInfo->aFunc[0].pFunc->flags&SQLITE_FUNC_COUNT)==0 ) return 0; + if( pExpr->flags&EP_Distinct ) return 0; - /* If the output is destined for a temporary table, open that table. - */ - if( eDest==SRT_EphemTab ){ - sqlite3VdbeAddOp(v, OP_OpenEphemeral, iParm, 1); - } + return pTab; +} - /* Generating code to find the min or the max. Basically all we have - ** to do is find the first or the last entry in the chosen index. If - ** the min() or max() is on the INTEGER PRIMARY KEY, then find the first - ** or last entry in the main table. - */ - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - assert( iDb>=0 || pTab->isEphem ); - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - base = pSrc->a[0].iCursor; - brk = sqlite3VdbeMakeLabel(v); - computeLimitRegisters(pParse, p, brk); - if( pSrc->a[0].pSelect==0 ){ - sqlite3OpenTable(pParse, base, iDb, pTab, OP_OpenRead); - } - if( pIdx==0 ){ - sqlite3VdbeAddOp(v, seekOp, base, 0); - }else{ - /* Even though the cursor used to open the index here is closed - ** as soon as a single value has been read from it, allocate it - ** using (pParse->nTab++) to prevent the cursor id from being - ** reused. This is important for statements of the form - ** "INSERT INTO x SELECT max() FROM x". - */ - int iIdx; - KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); - iIdx = pParse->nTab++; - assert( pIdx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - sqlite3VdbeOp3(v, OP_OpenRead, iIdx, pIdx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - if( seekOp==OP_Rewind ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); - sqlite3VdbeAddOp(v, OP_MakeRecord, 1, 0); - seekOp = OP_MoveGt; - } - if( pIdx->aSortOrder[0]==SQLITE_SO_DESC ){ - /* Ticket #2514: invert the seek operator if we are using - ** a descending index. */ - if( seekOp==OP_Last ){ - seekOp = OP_Rewind; - }else{ - assert( seekOp==OP_MoveGt ); - seekOp = OP_MoveLt; - } +/* +** If the source-list item passed as an argument was augmented with an +** INDEXED BY clause, then try to locate the specified index. If there +** was such a clause and the named index cannot be found, return +** SQLITE_ERROR and leave an error in pParse. Otherwise, populate +** pFrom->pIndex and return SQLITE_OK. +*/ +int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ + if( pFrom->pTab && pFrom->zIndex ){ + Table *pTab = pFrom->pTab; + char *zIndex = pFrom->zIndex; + Index *pIdx; + for(pIdx=pTab->pIndex; + pIdx && sqlite3StrICmp(pIdx->zName, zIndex); + pIdx=pIdx->pNext + ); + if( !pIdx ){ + sqlite3ErrorMsg(pParse, "no such index: %s", zIndex, 0); + return SQLITE_ERROR; } - sqlite3VdbeAddOp(v, seekOp, iIdx, 0); - sqlite3VdbeAddOp(v, OP_IdxRowid, iIdx, 0); - sqlite3VdbeAddOp(v, OP_Close, iIdx, 0); - sqlite3VdbeAddOp(v, OP_MoveGe, base, 0); - } - eList.nExpr = 1; - memset(&eListItem, 0, sizeof(eListItem)); - eList.a = &eListItem; - eList.a[0].pExpr = pExpr; - selectInnerLoop(pParse, p, &eList, 0, 0, 0, -1, eDest, iParm, brk, brk, 0); - sqlite3VdbeResolveLabel(v, brk); - sqlite3VdbeAddOp(v, OP_Close, base, 0); - - return 1; + pFrom->pIndex = pIdx; + } + return SQLITE_OK; } /* -** Analyze and ORDER BY or GROUP BY clause in a SELECT statement. Return -** the number of errors seen. +** This routine is a Walker callback for "expanding" a SELECT statement. +** "Expanding" means to do the following: ** -** An ORDER BY or GROUP BY is a list of expressions. If any expression -** is an integer constant, then that expression is replaced by the -** corresponding entry in the result set. -*/ -static int processOrderGroupBy( - NameContext *pNC, /* Name context of the SELECT statement. */ - ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ - const char *zType /* Either "ORDER" or "GROUP", as appropriate */ -){ - int i; - ExprList *pEList = pNC->pEList; /* The result set of the SELECT */ - Parse *pParse = pNC->pParse; /* The result set of the SELECT */ - assert( pEList ); - - if( pOrderBy==0 ) return 0; - if( pOrderBy->nExpr>SQLITE_MAX_COLUMN ){ - sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); - return 1; +** (1) Make sure VDBE cursor numbers have been assigned to every +** element of the FROM clause. +** +** (2) Fill in the pTabList->a[].pTab fields in the SrcList that +** defines FROM clause. When views appear in the FROM clause, +** fill pTabList->a[].pSelect with a copy of the SELECT statement +** that implements the view. A copy is made of the view's SELECT +** statement so that we can freely modify or delete that statement +** without worrying about messing up the presistent representation +** of the view. +** +** (3) Add terms to the WHERE clause to accomodate the NATURAL keyword +** on joins and the ON and USING clause of joins. +** +** (4) Scan the list of columns in the result set (pEList) looking +** for instances of the "*" operator or the TABLE.* operator. +** If found, expand each "*" to be every column in every table +** and TABLE.* to be every column in TABLE. +** +*/ +static int selectExpander(Walker *pWalker, Select *p){ + Parse *pParse = pWalker->pParse; + int i, j, k; + SrcList *pTabList; + ExprList *pEList; + struct SrcList_item *pFrom; + sqlite3 *db = pParse->db; + + if( db->mallocFailed ){ + return WRC_Abort; } - for(i=0; inExpr; i++){ - int iCol; - Expr *pE = pOrderBy->a[i].pExpr; - if( sqlite3ExprIsInteger(pE, &iCol) ){ - if( iCol>0 && iCol<=pEList->nExpr ){ - CollSeq *pColl = pE->pColl; - int flags = pE->flags & EP_ExpCollate; - sqlite3ExprDelete(pE); - pE = pOrderBy->a[i].pExpr = sqlite3ExprDup(pEList->a[iCol-1].pExpr); - if( pColl && flags ){ - pE->pColl = pColl; - pE->flags |= flags; - } - }else{ - sqlite3ErrorMsg(pParse, - "%s BY column number %d out of range - should be " - "between 1 and %d", zType, iCol, pEList->nExpr); - return 1; + if( NEVER(p->pSrc==0) || (p->selFlags & SF_Expanded)!=0 ){ + return WRC_Prune; + } + p->selFlags |= SF_Expanded; + pTabList = p->pSrc; + pEList = p->pEList; + + /* Make sure cursor numbers have been assigned to all entries in + ** the FROM clause of the SELECT statement. + */ + sqlite3SrcListAssignCursors(pParse, pTabList); + + /* Look up every table named in the FROM clause of the select. If + ** an entry of the FROM clause is a subquery instead of a table or view, + ** then create a transient table structure to describe the subquery. + */ + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab; + if( pFrom->pTab!=0 ){ + /* This statement has already been prepared. There is no need + ** to go further. */ + assert( i==0 ); + return WRC_Prune; + } + if( pFrom->zName==0 ){ +#ifndef SQLITE_OMIT_SUBQUERY + Select *pSel = pFrom->pSelect; + /* A sub-query in the FROM clause of a SELECT */ + assert( pSel!=0 ); + assert( pFrom->pTab==0 ); + sqlite3WalkSelect(pWalker, pSel); + pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return WRC_Abort; + pTab->dbMem = db->lookaside.bEnabled ? db : 0; + pTab->nRef = 1; + pTab->zName = sqlite3MPrintf(db, "sqlite_subquery_%p_", (void*)pTab); + while( pSel->pPrior ){ pSel = pSel->pPrior; } + selectColumnsFromExprList(pParse, pSel->pEList, &pTab->nCol, &pTab->aCol); + pTab->iPKey = -1; + pTab->tabFlags |= TF_Ephemeral; +#endif + }else{ + /* An ordinary table or view name in the FROM clause */ + assert( pFrom->pTab==0 ); + pFrom->pTab = pTab = + sqlite3LocateTable(pParse,0,pFrom->zName,pFrom->zDatabase); + if( pTab==0 ) return WRC_Abort; + pTab->nRef++; +#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) + if( pTab->pSelect || IsVirtual(pTab) ){ + /* We reach here if the named table is a really a view */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; + assert( pFrom->pSelect==0 ); + pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0); + sqlite3WalkSelect(pWalker, pFrom->pSelect); } +#endif } - if( sqlite3ExprResolveNames(pNC, pE) ){ - return 1; + + /* Locate the index named by the INDEXED BY clause, if any. */ + if( sqlite3IndexedByLookup(pParse, pFrom) ){ + return WRC_Abort; } } - return 0; -} -/* -** This routine resolves any names used in the result set of the -** supplied SELECT statement. If the SELECT statement being resolved -** is a sub-select, then pOuterNC is a pointer to the NameContext -** of the parent SELECT. -*/ -int sqlite3SelectResolve( - Parse *pParse, /* The parser context */ - Select *p, /* The SELECT statement being coded. */ - NameContext *pOuterNC /* The outer name context. May be NULL. */ -){ - ExprList *pEList; /* Result set. */ - int i; /* For-loop variable used in multiple places */ - NameContext sNC; /* Local name-context */ - ExprList *pGroupBy; /* The group by clause */ - - /* If this routine has run before, return immediately. */ - if( p->isResolved ){ - assert( !pOuterNC ); - return SQLITE_OK; + /* Process NATURAL keywords, and ON and USING clauses of joins. + */ + if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){ + return WRC_Abort; } - p->isResolved = 1; - /* If there have already been errors, do nothing. */ - if( pParse->nErr>0 ){ - return SQLITE_ERROR; + /* For every "*" that occurs in the column list, insert the names of + ** all columns in all tables. And for every TABLE.* insert the names + ** of all columns in TABLE. The parser inserted a special expression + ** with the TK_ALL operator for each "*" that it found in the column list. + ** The following code just has to locate the TK_ALL expressions and expand + ** each one to the list of all columns in all tables. + ** + ** The first loop just checks to see if there are any "*" operators + ** that need expanding. + */ + for(k=0; knExpr; k++){ + Expr *pE = pEList->a[k].pExpr; + if( pE->op==TK_ALL ) break; + assert( pE->op!=TK_DOT || pE->pRight!=0 ); + assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) ); + if( pE->op==TK_DOT && pE->pRight->op==TK_ALL ) break; } + if( knExpr ){ + /* + ** If we get here it means the result set contains one or more "*" + ** operators that need to be expanded. Loop through each expression + ** in the result set and expand them one by one. + */ + struct ExprList_item *a = pEList->a; + ExprList *pNew = 0; + int flags = pParse->db->flags; + int longNames = (flags & SQLITE_FullColNames)!=0 + && (flags & SQLITE_ShortColNames)==0; - /* Prepare the select statement. This call will allocate all cursors - ** required to handle the tables and subqueries in the FROM clause. - */ - if( prepSelectStmt(pParse, p) ){ - return SQLITE_ERROR; + for(k=0; knExpr; k++){ + Expr *pE = a[k].pExpr; + assert( pE->op!=TK_DOT || pE->pRight!=0 ); + if( pE->op!=TK_ALL && (pE->op!=TK_DOT || pE->pRight->op!=TK_ALL) ){ + /* This particular expression does not need to be expanded. + */ + pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr); + if( pNew ){ + pNew->a[pNew->nExpr-1].zName = a[k].zName; + pNew->a[pNew->nExpr-1].zSpan = a[k].zSpan; + a[k].zName = 0; + a[k].zSpan = 0; + } + a[k].pExpr = 0; + }else{ + /* This expression is a "*" or a "TABLE.*" and needs to be + ** expanded. */ + int tableSeen = 0; /* Set to 1 when TABLE matches */ + char *zTName; /* text of name of TABLE */ + if( pE->op==TK_DOT ){ + assert( pE->pLeft!=0 ); + assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); + zTName = pE->pLeft->u.zToken; + }else{ + zTName = 0; + } + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab = pFrom->pTab; + char *zTabName = pFrom->zAlias; + if( zTabName==0 ){ + zTabName = pTab->zName; + } + if( db->mallocFailed ) break; + if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ + continue; + } + tableSeen = 1; + for(j=0; jnCol; j++){ + Expr *pExpr, *pRight; + char *zName = pTab->aCol[j].zName; + char *zColname; /* The computed column name */ + char *zToFree; /* Malloced string that needs to be freed */ + Token sColname; /* Computed column name as a token */ + + /* If a column is marked as 'hidden' (currently only possible + ** for virtual tables), do not include it in the expanded + ** result-set list. + */ + if( IsHiddenColumn(&pTab->aCol[j]) ){ + assert(IsVirtual(pTab)); + continue; + } + + if( i>0 && zTName==0 ){ + struct SrcList_item *pLeft = &pTabList->a[i-1]; + if( (pLeft[1].jointype & JT_NATURAL)!=0 && + columnIndex(pLeft->pTab, zName)>=0 ){ + /* In a NATURAL join, omit the join columns from the + ** table on the right */ + continue; + } + if( sqlite3IdListIndex(pLeft[1].pUsing, zName)>=0 ){ + /* In a join with a USING clause, omit columns in the + ** using clause from the table on the right. */ + continue; + } + } + pRight = sqlite3Expr(db, TK_ID, zName); + zColname = zName; + zToFree = 0; + if( longNames || pTabList->nSrc>1 ){ + Expr *pLeft; + pLeft = sqlite3Expr(db, TK_ID, zTabName); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); + if( longNames ){ + zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName); + zToFree = zColname; + } + }else{ + pExpr = pRight; + } + pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); + sColname.z = zColname; + sColname.n = sqlite3Strlen30(zColname); + sqlite3ExprListSetName(pParse, pNew, &sColname, 0); + sqlite3DbFree(db, zToFree); + } + } + if( !tableSeen ){ + if( zTName ){ + sqlite3ErrorMsg(pParse, "no such table: %s", zTName); + }else{ + sqlite3ErrorMsg(pParse, "no tables specified"); + } + } + } + } + sqlite3ExprListDelete(db, pEList); + p->pEList = pNew; } - - /* Resolve the expressions in the LIMIT and OFFSET clauses. These - ** are not allowed to refer to any names, so pass an empty NameContext. - */ - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - if( sqlite3ExprResolveNames(&sNC, p->pLimit) || - sqlite3ExprResolveNames(&sNC, p->pOffset) ){ - return SQLITE_ERROR; +#if SQLITE_MAX_COLUMN + if( p->pEList && p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many columns in result set"); } +#endif + return WRC_Continue; +} - /* Set up the local name-context to pass to ExprResolveNames() to - ** resolve the expression-list. - */ - sNC.allowAgg = 1; - sNC.pSrcList = p->pSrc; - sNC.pNext = pOuterNC; +/* +** No-op routine for the parse-tree walker. +** +** When this routine is the Walker.xExprCallback then expression trees +** are walked without any actions being taken at each node. Presumably, +** when this routine is used for Walker.xExprCallback then +** Walker.xSelectCallback is set to do something useful for every +** subquery in the parser tree. +*/ +static int exprWalkNoop(Walker *NotUsed, Expr *NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return WRC_Continue; +} - /* Resolve names in the result set. */ - pEList = p->pEList; - if( !pEList ) return SQLITE_ERROR; - for(i=0; inExpr; i++){ - Expr *pX = pEList->a[i].pExpr; - if( sqlite3ExprResolveNames(&sNC, pX) ){ - return SQLITE_ERROR; - } - } +/* +** This routine "expands" a SELECT statement and all of its subqueries. +** For additional information on what it means to "expand" a SELECT +** statement, see the comment on the selectExpand worker callback above. +** +** Expanding a SELECT statement is the first step in processing a +** SELECT statement. The SELECT statement must be expanded before +** name resolution is performed. +** +** If anything goes wrong, an error message is written into pParse. +** The calling function can detect the problem by looking at pParse->nErr +** and/or pParse->db->mallocFailed. +*/ +static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ + Walker w; + w.xSelectCallback = selectExpander; + w.xExprCallback = exprWalkNoop; + w.pParse = pParse; + sqlite3WalkSelect(&w, pSelect); +} - /* If there are no aggregate functions in the result-set, and no GROUP BY - ** expression, do not allow aggregates in any of the other expressions. - */ - assert( !p->isAgg ); - pGroupBy = p->pGroupBy; - if( pGroupBy || sNC.hasAgg ){ - p->isAgg = 1; - }else{ - sNC.allowAgg = 0; - } - /* If a HAVING clause is present, then there must be a GROUP BY clause. - */ - if( p->pHaving && !pGroupBy ){ - sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); - return SQLITE_ERROR; - } +#ifndef SQLITE_OMIT_SUBQUERY +/* +** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() +** interface. +** +** For each FROM-clause subquery, add Column.zType and Column.zColl +** information to the Table structure that represents the result set +** of that subquery. +** +** The Table structure that represents the result set was constructed +** by selectExpander() but the type and collation information was omitted +** at that point because identifiers had not yet been resolved. This +** routine is called after identifier resolution. +*/ +static int selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ + Parse *pParse; + int i; + SrcList *pTabList; + struct SrcList_item *pFrom; - /* Add the expression list to the name-context before parsing the - ** other expressions in the SELECT statement. This is so that - ** expressions in the WHERE clause (etc.) can refer to expressions by - ** aliases in the result set. - ** - ** Minor point: If this is the case, then the expression will be - ** re-evaluated for each reference to it. - */ - sNC.pEList = p->pEList; - if( sqlite3ExprResolveNames(&sNC, p->pWhere) || - sqlite3ExprResolveNames(&sNC, p->pHaving) ){ - return SQLITE_ERROR; - } - if( p->pPrior==0 ){ - if( processOrderGroupBy(&sNC, p->pOrderBy, "ORDER") || - processOrderGroupBy(&sNC, pGroupBy, "GROUP") ){ - return SQLITE_ERROR; + assert( p->selFlags & SF_Resolved ); + assert( (p->selFlags & SF_HasTypeInfo)==0 ); + p->selFlags |= SF_HasTypeInfo; + pParse = pWalker->pParse; + pTabList = p->pSrc; + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab = pFrom->pTab; + if( ALWAYS(pTab!=0) && (pTab->tabFlags & TF_Ephemeral)!=0 ){ + /* A sub-query in the FROM clause of a SELECT */ + Select *pSel = pFrom->pSelect; + assert( pSel ); + while( pSel->pPrior ) pSel = pSel->pPrior; + selectAddColumnTypeAndCollation(pParse, pTab->nCol, pTab->aCol, pSel); } } + return WRC_Continue; +} +#endif - if( sqlite3MallocFailed() ){ - return SQLITE_NOMEM; - } - /* Make sure the GROUP BY clause does not contain aggregate functions. - */ - if( pGroupBy ){ - struct ExprList_item *pItem; - - for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ - if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ - sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " - "the GROUP BY clause"); - return SQLITE_ERROR; - } - } - } +/* +** This routine adds datatype and collating sequence information to +** the Table structures of all FROM-clause subqueries in a +** SELECT statement. +** +** Use this routine after name resolution. +*/ +static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){ +#ifndef SQLITE_OMIT_SUBQUERY + Walker w; + w.xSelectCallback = selectAddSubqueryTypeInfo; + w.xExprCallback = exprWalkNoop; + w.pParse = pParse; + sqlite3WalkSelect(&w, pSelect); +#endif +} - /* If this is one SELECT of a compound, be sure to resolve names - ** in the other SELECTs. - */ - if( p->pPrior ){ - return sqlite3SelectResolve(pParse, p->pPrior, pOuterNC); - }else{ - return SQLITE_OK; - } + +/* +** This routine sets of a SELECT statement for processing. The +** following is accomplished: +** +** * VDBE Cursor numbers are assigned to all FROM-clause terms. +** * Ephemeral Table objects are created for all FROM-clause subqueries. +** * ON and USING clauses are shifted into WHERE statements +** * Wildcards "*" and "TABLE.*" in result sets are expanded. +** * Identifiers in expression are matched to tables. +** +** This routine acts recursively on all subqueries within the SELECT. +*/ +void sqlite3SelectPrep( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + NameContext *pOuterNC /* Name context for container */ +){ + sqlite3 *db; + if( NEVER(p==0) ) return; + db = pParse->db; + if( p->selFlags & SF_HasTypeInfo ) return; + sqlite3SelectExpand(pParse, p); + if( pParse->nErr || db->mallocFailed ) return; + sqlite3ResolveSelectNames(pParse, p, pOuterNC); + if( pParse->nErr || db->mallocFailed ) return; + sqlite3SelectAddTypeInfo(pParse, p); } /* @@ -2733,20 +3351,21 @@ return; } for(i=0; inColumn; i++){ - sqlite3VdbeAddOp(v, OP_MemNull, pAggInfo->aCol[i].iMem, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, pAggInfo->aCol[i].iMem); } for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ - sqlite3VdbeAddOp(v, OP_MemNull, pFunc->iMem, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, pFunc->iMem); if( pFunc->iDistinct>=0 ){ Expr *pE = pFunc->pExpr; - if( pE->pList==0 || pE->pList->nExpr!=1 ){ - sqlite3ErrorMsg(pParse, "DISTINCT in aggregate must be followed " - "by an expression"); + assert( !ExprHasProperty(pE, EP_xIsSelect) ); + if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){ + sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one " + "argument"); pFunc->iDistinct = -1; }else{ - KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->pList); - sqlite3VdbeOp3(v, OP_OpenEphemeral, pFunc->iDistinct, 0, - (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList); + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0, + (char*)pKeyInfo, P4_KEYINFO_HANDOFF); } } } @@ -2761,9 +3380,10 @@ int i; struct AggInfo_func *pF; for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ - ExprList *pList = pF->pExpr->pList; - sqlite3VdbeOp3(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, - (void*)pF->pFunc, P3_FUNCDEF); + ExprList *pList = pF->pExpr->x.pList; + assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); + sqlite3VdbeAddOp4(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, 0, + (void*)pF->pFunc, P4_FUNCDEF); } } @@ -2778,70 +3398,102 @@ struct AggInfo_col *pC; pAggInfo->directMode = 1; + sqlite3ExprCacheClear(pParse); for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ int nArg; int addrNext = 0; - ExprList *pList = pF->pExpr->pList; + int regAgg; + ExprList *pList = pF->pExpr->x.pList; + assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); if( pList ){ nArg = pList->nExpr; - sqlite3ExprCodeExprList(pParse, pList); + regAgg = sqlite3GetTempRange(pParse, nArg); + sqlite3ExprCodeExprList(pParse, pList, regAgg, 0); }else{ nArg = 0; + regAgg = 0; } if( pF->iDistinct>=0 ){ addrNext = sqlite3VdbeMakeLabel(v); assert( nArg==1 ); - codeDistinct(v, pF->iDistinct, addrNext, 1); + codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg); } - if( pF->pFunc->needCollSeq ){ + if( pF->pFunc->flags & SQLITE_FUNC_NEEDCOLL ){ CollSeq *pColl = 0; struct ExprList_item *pItem; int j; - assert( pList!=0 ); /* pList!=0 if pF->pFunc->needCollSeq is true */ + assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ for(j=0, pItem=pList->a; !pColl && jpExpr); } if( !pColl ){ pColl = pParse->db->pDfltColl; } - sqlite3VdbeOp3(v, OP_CollSeq, 0, 0, (char *)pColl, P3_COLLSEQ); + sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); } - sqlite3VdbeOp3(v, OP_AggStep, pF->iMem, nArg, (void*)pF->pFunc, P3_FUNCDEF); + sqlite3VdbeAddOp4(v, OP_AggStep, 0, regAgg, pF->iMem, + (void*)pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); + sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg); if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); + sqlite3ExprCacheClear(pParse); } } for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ - sqlite3ExprCode(pParse, pC->pExpr); - sqlite3VdbeAddOp(v, OP_MemStore, pC->iMem, 1); + sqlite3ExprCode(pParse, pC->pExpr, pC->iMem); } pAggInfo->directMode = 0; + sqlite3ExprCacheClear(pParse); } - /* -** Generate code for the given SELECT statement. +** Generate code for the SELECT statement given in the p argument. ** ** The results are distributed in various ways depending on the -** value of eDest and iParm. +** contents of the SelectDest structure pointed to by argument pDest +** as follows: ** -** eDest Value Result +** pDest->eDest Result ** ------------ ------------------------------------------- -** SRT_Callback Invoke the callback for each row of the result. +** SRT_Output Generate a row of output (using the OP_ResultRow +** opcode) for each row in the result set. ** -** SRT_Mem Store first result in memory cell iParm -** -** SRT_Set Store results as keys of table iParm. -** -** SRT_Union Store results as a key in a temporary table iParm -** -** SRT_Except Remove results from the temporary table iParm. -** -** SRT_Table Store results in temporary table iParm -** -** The table above is incomplete. Additional eDist value have be added -** since this comment was written. See the selectInnerLoop() function for -** a complete listing of the allowed values of eDest and their meanings. +** SRT_Mem Only valid if the result is a single column. +** Store the first column of the first result row +** in register pDest->iParm then abandon the rest +** of the query. This destination implies "LIMIT 1". +** +** SRT_Set The result must be a single column. Store each +** row of result as the key in table pDest->iParm. +** Apply the affinity pDest->affinity before storing +** results. Used to implement "IN (SELECT ...)". +** +** SRT_Union Store results as a key in a temporary table pDest->iParm. +** +** SRT_Except Remove results from the temporary table pDest->iParm. +** +** SRT_Table Store results in temporary table pDest->iParm. +** This is like SRT_EphemTab except that the table +** is assumed to already be open. +** +** SRT_EphemTab Create an temporary table pDest->iParm and store +** the result there. The cursor is left open after +** returning. This is like SRT_Table except that +** this destination uses OP_OpenEphemeral to create +** the table first. +** +** SRT_Coroutine Generate a co-routine that returns a new row of +** results each time it is invoked. The entry point +** of the co-routine is stored in register pDest->iParm. +** +** SRT_Exists Store a 1 in memory cell pDest->iParm if the result +** set is not empty. +** +** SRT_Discard Throw the results away. This is used by SELECT +** statements within triggers whose only purpose is +** the side-effects of functions. ** ** This routine returns the number of errors. If any errors are ** encountered, then an appropriate error message is left in @@ -2849,37 +3501,11 @@ ** ** This routine does NOT free the Select structure passed in. The ** calling function needs to do that. -** -** The pParent, parentTab, and *pParentAgg fields are filled in if this -** SELECT is a subquery. This routine may try to combine this SELECT -** with its parent to form a single flat query. In so doing, it might -** change the parent query from a non-aggregate to an aggregate query. -** For that reason, the pParentAgg flag is passed as a pointer, so it -** can be changed. -** -** Example 1: The meaning of the pParent parameter. -** -** SELECT * FROM t1 JOIN (SELECT x, count(*) FROM t2) JOIN t3; -** \ \_______ subquery _______/ / -** \ / -** \____________________ outer query ___________________/ -** -** This routine is called for the outer query first. For that call, -** pParent will be NULL. During the processing of the outer query, this -** routine is called recursively to handle the subquery. For the recursive -** call, pParent will point to the outer query. Because the subquery is -** the second element in a three-way join, the parentTab parameter will -** be 1 (the 2nd value of a 0-indexed array.) */ int sqlite3Select( Parse *pParse, /* The parser context */ Select *p, /* The SELECT statement being coded. */ - int eDest, /* How to dispose of the results */ - int iParm, /* A parameter used by the eDest disposal method */ - Select *pParent, /* Another SELECT for which this is a sub-query */ - int parentTab, /* Index in pParent->pSrc of this query */ - int *pParentAgg, /* True if pParent uses aggregate functions */ - char *aff /* If eDest is SRT_Union, the affinity string */ + SelectDest *pDest /* What to do with the query results */ ){ int i, j; /* Loop counters */ WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */ @@ -2897,72 +3523,33 @@ int addrSortIndex; /* Address of an OP_OpenEphemeral instruction */ AggInfo sAggInfo; /* Information used by aggregate queries */ int iEnd; /* Address of the end of the query */ + sqlite3 *db; /* The database connection */ - if( p==0 || sqlite3MallocFailed() || pParse->nErr ){ + db = pParse->db; + if( p==0 || db->mallocFailed || pParse->nErr ){ return 1; } if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; memset(&sAggInfo, 0, sizeof(sAggInfo)); -#ifndef SQLITE_OMIT_COMPOUND_SELECT - /* If there is are a sequence of queries, do the earlier ones first. - */ - if( p->pPrior ){ - if( p->pRightmost==0 ){ - Select *pLoop; - int cnt = 0; - for(pLoop=p; pLoop; pLoop=pLoop->pPrior, cnt++){ - pLoop->pRightmost = p; - } - if( SQLITE_MAX_COMPOUND_SELECT>0 && cnt>SQLITE_MAX_COMPOUND_SELECT ){ - sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); - return 1; - } - } - return multiSelect(pParse, p, eDest, iParm, aff); - } -#endif - - pOrderBy = p->pOrderBy; - if( IgnorableOrderby(eDest) ){ + if( IgnorableOrderby(pDest) ){ + assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || + pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard); + /* If ORDER BY makes no difference in the output then neither does + ** DISTINCT so it can be removed too. */ + sqlite3ExprListDelete(db, p->pOrderBy); p->pOrderBy = 0; + p->selFlags &= ~SF_Distinct; } - if( sqlite3SelectResolve(pParse, p, 0) ){ - goto select_end; - } - p->pOrderBy = pOrderBy; - - /* Make local copies of the parameters for this query. - */ + sqlite3SelectPrep(pParse, p, 0); + pOrderBy = p->pOrderBy; pTabList = p->pSrc; - pWhere = p->pWhere; - pGroupBy = p->pGroupBy; - pHaving = p->pHaving; - isAgg = p->isAgg; - isDistinct = p->isDistinct; pEList = p->pEList; - if( pEList==0 ) goto select_end; - - /* - ** Do not even attempt to generate any code if we have already seen - ** errors before this routine starts. - */ - if( pParse->nErr>0 ) goto select_end; - - /* If writing to memory or generating a set - ** only a single column may be output. - */ -#ifndef SQLITE_OMIT_SUBQUERY - if( checkForMultiColumnSelectError(pParse, eDest, pEList->nExpr) ){ + if( pParse->nErr || db->mallocFailed ){ goto select_end; } -#endif - - /* ORDER BY is ignored for some destinations. - */ - if( IgnorableOrderby(eDest) ){ - pOrderBy = 0; - } + isAgg = (p->selFlags & SF_Aggregate)!=0; + assert( pEList!=0 ); /* Begin generating code. */ @@ -2972,20 +3559,14 @@ /* Generate code for all sub-queries in the FROM clause */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) - for(i=0; inSrc; i++){ - const char *zSavedAuthContext = 0; - int needRestoreContext; + for(i=0; !p->pPrior && inSrc; i++){ struct SrcList_item *pItem = &pTabList->a[i]; + SelectDest dest; + Select *pSub = pItem->pSelect; + int isAggSub; + + if( pSub==0 || pItem->isPopulated ) continue; - if( pItem->pSelect==0 || pItem->isPopulated ) continue; - if( pItem->zName!=0 ){ - zSavedAuthContext = pParse->zAuthContext; - pParse->zAuthContext = pItem->zName; - needRestoreContext = 1; - }else{ - needRestoreContext = 0; - } -#if SQLITE_MAX_EXPR_DEPTH>0 /* Increment Parse.nHeight by the height of the largest expression ** tree refered to by this, the parent select. The child select ** may contain expression trees of at most @@ -2994,45 +3575,80 @@ ** an exact limit. */ pParse->nHeight += sqlite3SelectExprHeight(p); -#endif - sqlite3Select(pParse, pItem->pSelect, SRT_EphemTab, - pItem->iCursor, p, i, &isAgg, 0); -#if SQLITE_MAX_EXPR_DEPTH>0 - pParse->nHeight -= sqlite3SelectExprHeight(p); -#endif - if( needRestoreContext ){ - pParse->zAuthContext = zSavedAuthContext; + + /* Check to see if the subquery can be absorbed into the parent. */ + isAggSub = (pSub->selFlags & SF_Aggregate)!=0; + if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){ + if( isAggSub ){ + isAgg = 1; + p->selFlags |= SF_Aggregate; + } + i = -1; + }else{ + sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); + assert( pItem->isPopulated==0 ); + sqlite3Select(pParse, pSub, &dest); + pItem->isPopulated = 1; + } + if( /*pParse->nErr ||*/ db->mallocFailed ){ + goto select_end; } + pParse->nHeight -= sqlite3SelectExprHeight(p); pTabList = p->pSrc; - pWhere = p->pWhere; - if( !IgnorableOrderby(eDest) ){ + if( !IgnorableOrderby(pDest) ){ pOrderBy = p->pOrderBy; } - pGroupBy = p->pGroupBy; - pHaving = p->pHaving; - isDistinct = p->isDistinct; } + pEList = p->pEList; #endif + pWhere = p->pWhere; + pGroupBy = p->pGroupBy; + pHaving = p->pHaving; + isDistinct = (p->selFlags & SF_Distinct)!=0; - /* Check for the special case of a min() or max() function by itself - ** in the result set. +#ifndef SQLITE_OMIT_COMPOUND_SELECT + /* If there is are a sequence of queries, do the earlier ones first. */ - if( simpleMinMaxQuery(pParse, p, eDest, iParm) ){ - rc = 0; - goto select_end; + if( p->pPrior ){ + if( p->pRightmost==0 ){ + Select *pLoop, *pRight = 0; + int cnt = 0; + int mxSelect; + for(pLoop=p; pLoop; pLoop=pLoop->pPrior, cnt++){ + pLoop->pRightmost = p; + pLoop->pNext = pRight; + pRight = pLoop; + } + mxSelect = db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT]; + if( mxSelect && cnt>mxSelect ){ + sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); + return 1; + } + } + return multiSelect(pParse, p, pDest); } +#endif - /* Check to see if this is a subquery that can be "flattened" into its parent. - ** If flattening is a possiblity, do so and return immediately. + /* If writing to memory or generating a set + ** only a single column may be output. */ -#ifndef SQLITE_OMIT_VIEW - if( pParent && pParentAgg && - flattenSubquery(pParent, parentTab, *pParentAgg, isAgg) ){ - if( isAgg ) *pParentAgg = 1; +#ifndef SQLITE_OMIT_SUBQUERY + if( checkForMultiColumnSelectError(pParse, pDest, pEList->nExpr) ){ goto select_end; } #endif + /* If possible, rewrite the query to use GROUP BY instead of DISTINCT. + ** GROUP BY might use an index, DISTINCT never does. + */ + assert( p->pGroupBy==0 || (p->selFlags & SF_Aggregate)!=0 ); + if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ){ + p->pGroupBy = sqlite3ExprListDup(db, p->pEList, 0); + pGroupBy = p->pGroupBy; + p->selFlags &= ~SF_Distinct; + isDistinct = 0; + } + /* If there is an ORDER BY clause, then this sorting ** index might end up being unused if the data can be ** extracted in pre-sorted order. If that is the case, then the @@ -3042,21 +3658,20 @@ */ if( pOrderBy ){ KeyInfo *pKeyInfo; - if( pParse->nErr ){ - goto select_end; - } pKeyInfo = keyInfoFromExprList(pParse, pOrderBy); pOrderBy->iECursor = pParse->nTab++; p->addrOpenEphm[2] = addrSortIndex = - sqlite3VdbeOp3(v, OP_OpenEphemeral, pOrderBy->iECursor, pOrderBy->nExpr+2, (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + pOrderBy->iECursor, pOrderBy->nExpr+2, 0, + (char*)pKeyInfo, P4_KEYINFO_HANDOFF); }else{ addrSortIndex = -1; } /* If the output is destined for a temporary table, open that table. */ - if( eDest==SRT_EphemTab ){ - sqlite3VdbeAddOp(v, OP_OpenEphemeral, iParm, pEList->nExpr); + if( pDest->eDest==SRT_EphemTab ){ + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iParm, pEList->nExpr); } /* Set the limiter. @@ -3068,10 +3683,11 @@ */ if( isDistinct ){ KeyInfo *pKeyInfo; + assert( isAgg || pGroupBy ); distinct = pParse->nTab++; pKeyInfo = keyInfoFromExprList(pParse, p->pEList); - sqlite3VdbeOp3(v, OP_OpenEphemeral, distinct, 0, - (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, distinct, 0, 0, + (char*)pKeyInfo, P4_KEYINFO_HANDOFF); }else{ distinct = -1; } @@ -3081,7 +3697,7 @@ /* This case is for non-aggregate queries ** Begin the database scan */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pOrderBy); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pOrderBy, 0); if( pWInfo==0 ) goto select_end; /* If sorting index that was created by a prior OP_OpenEphemeral @@ -3095,10 +3711,9 @@ /* Use the standard inner loop */ - if( selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, distinct, eDest, - iParm, pWInfo->iContinue, pWInfo->iBreak, aff) ){ - goto select_end; - } + assert(!isDistinct); + selectInnerLoop(pParse, p, pEList, 0, 0, pOrderBy, -1, pDest, + pWInfo->iContinue, pWInfo->iBreak); /* End the database scan loop. */ @@ -3113,20 +3728,25 @@ ** processed */ int iAbortFlag; /* Mem address which causes query abort if positive */ int groupBySort; /* Rows come from source in GROUP BY order */ + int addrEnd; /* End of processing for this SELECT */ + /* Remove any and all aliases between the result set and the + ** GROUP BY clause. + */ + if( pGroupBy ){ + int k; /* Loop counter */ + struct ExprList_item *pItem; /* For looping over expression in a list */ - /* The following variables hold addresses or labels for parts of the - ** virtual machine program we are putting together */ - int addrOutputRow; /* Start of subroutine that outputs a result row */ - int addrSetAbort; /* Set the abort flag and return */ - int addrInitializeLoop; /* Start of code that initializes the input loop */ - int addrTopOfLoop; /* Top of the input loop */ - int addrGroupByChange; /* Code that runs when any GROUP BY term changes */ - int addrProcessRow; /* Code to process a single input row */ - int addrEnd; /* End of all processing */ - int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ - int addrReset; /* Subroutine for resetting the accumulator */ + for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){ + pItem->iAlias = 0; + } + for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){ + pItem->iAlias = 0; + } + } + + /* Create a label to jump to when we want to abort the query */ addrEnd = sqlite3VdbeMakeLabel(v); /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in @@ -3139,35 +3759,31 @@ sNC.pAggInfo = &sAggInfo; sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr+1 : 0; sAggInfo.pGroupBy = pGroupBy; - if( sqlite3ExprAnalyzeAggList(&sNC, pEList) ){ - goto select_end; - } - if( sqlite3ExprAnalyzeAggList(&sNC, pOrderBy) ){ - goto select_end; - } - if( pHaving && sqlite3ExprAnalyzeAggregates(&sNC, pHaving) ){ - goto select_end; + sqlite3ExprAnalyzeAggList(&sNC, pEList); + sqlite3ExprAnalyzeAggList(&sNC, pOrderBy); + if( pHaving ){ + sqlite3ExprAnalyzeAggregates(&sNC, pHaving); } sAggInfo.nAccumulator = sAggInfo.nColumn; for(i=0; ipList) ){ - goto select_end; - } + assert( !ExprHasProperty(sAggInfo.aFunc[i].pExpr, EP_xIsSelect) ); + sqlite3ExprAnalyzeAggList(&sNC, sAggInfo.aFunc[i].pExpr->x.pList); } - if( sqlite3MallocFailed() ) goto select_end; + if( db->mallocFailed ) goto select_end; /* Processing for aggregates with GROUP BY is very different and - ** much more complex tha aggregates without a GROUP BY. + ** much more complex than aggregates without a GROUP BY. */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ - - /* Create labels that we will be needing - */ - - addrInitializeLoop = sqlite3VdbeMakeLabel(v); - addrGroupByChange = sqlite3VdbeMakeLabel(v); - addrProcessRow = sqlite3VdbeMakeLabel(v); + int j1; /* A-vs-B comparision jump */ + int addrOutputRow; /* Start of subroutine that outputs a result row */ + int regOutputRow; /* Return address register for output subroutine */ + int addrSetAbort; /* Set the abort flag and return */ + int addrTopOfLoop; /* Top of the input loop */ + int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ + int addrReset; /* Subroutine for resetting the accumulator */ + int regReset; /* Return address register for reset subroutine */ /* If there is a GROUP BY clause we might need a sorting index to ** implement it. Allocate that sorting index now. If it turns out @@ -3176,67 +3792,34 @@ */ sAggInfo.sortingIdx = pParse->nTab++; pKeyInfo = keyInfoFromExprList(pParse, pGroupBy); - addrSortingIdx = - sqlite3VdbeOp3(v, OP_OpenEphemeral, sAggInfo.sortingIdx, - sAggInfo.nSortingColumn, - (char*)pKeyInfo, P3_KEYINFO_HANDOFF); + addrSortingIdx = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + sAggInfo.sortingIdx, sAggInfo.nSortingColumn, + 0, (char*)pKeyInfo, P4_KEYINFO_HANDOFF); /* Initialize memory locations used by GROUP BY aggregate processing */ - iUseFlag = pParse->nMem++; - iAbortFlag = pParse->nMem++; - iAMem = pParse->nMem; + iUseFlag = ++pParse->nMem; + iAbortFlag = ++pParse->nMem; + regOutputRow = ++pParse->nMem; + addrOutputRow = sqlite3VdbeMakeLabel(v); + regReset = ++pParse->nMem; + addrReset = sqlite3VdbeMakeLabel(v); + iAMem = pParse->nMem + 1; pParse->nMem += pGroupBy->nExpr; - iBMem = pParse->nMem; + iBMem = pParse->nMem + 1; pParse->nMem += pGroupBy->nExpr; - sqlite3VdbeAddOp(v, OP_MemInt, 0, iAbortFlag); - VdbeComment((v, "# clear abort flag")); - sqlite3VdbeAddOp(v, OP_MemInt, 0, iUseFlag); - VdbeComment((v, "# indicate accumulator empty")); - sqlite3VdbeAddOp(v, OP_Goto, 0, addrInitializeLoop); - - /* Generate a subroutine that outputs a single row of the result - ** set. This subroutine first looks at the iUseFlag. If iUseFlag - ** is less than or equal to zero, the subroutine is a no-op. If - ** the processing calls for the query to abort, this subroutine - ** increments the iAbortFlag memory location before returning in - ** order to signal the caller to abort. - */ - addrSetAbort = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_MemInt, 1, iAbortFlag); - VdbeComment((v, "# set abort flag")); - sqlite3VdbeAddOp(v, OP_Return, 0, 0); - addrOutputRow = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_IfMemPos, iUseFlag, addrOutputRow+2); - VdbeComment((v, "# Groupby result generator entry point")); - sqlite3VdbeAddOp(v, OP_Return, 0, 0); - finalizeAggFunctions(pParse, &sAggInfo); - if( pHaving ){ - sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, 1); - } - rc = selectInnerLoop(pParse, p, p->pEList, 0, 0, pOrderBy, - distinct, eDest, iParm, - addrOutputRow+1, addrSetAbort, aff); - if( rc ){ - goto select_end; - } - sqlite3VdbeAddOp(v, OP_Return, 0, 0); - VdbeComment((v, "# end groupby result generator")); - - /* Generate a subroutine that will reset the group-by accumulator - */ - addrReset = sqlite3VdbeCurrentAddr(v); - resetAccumulator(pParse, &sAggInfo); - sqlite3VdbeAddOp(v, OP_Return, 0, 0); + sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); + VdbeComment((v, "clear abort flag")); + sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag); + VdbeComment((v, "indicate accumulator empty")); /* Begin a loop that will extract all source rows in GROUP BY order. ** This might involve two separate loops with an OP_Sort in between, or ** it might be a single loop that uses an index to extract information ** in the right order to begin with. */ - sqlite3VdbeResolveLabel(v, addrInitializeLoop); - sqlite3VdbeAddOp(v, OP_Gosub, 0, addrReset); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pGroupBy); + sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pGroupBy, 0); if( pWInfo==0 ) goto select_end; if( pGroupBy==0 ){ /* The optimizer is able to deliver rows in group by order so @@ -3251,22 +3834,50 @@ ** then loop over the sorting index in order to get the output ** in sorted order */ + int regBase; + int regRecord; + int nCol; + int nGroupBy; + groupBySort = 1; - sqlite3ExprCodeExprList(pParse, pGroupBy); - sqlite3VdbeAddOp(v, OP_Sequence, sAggInfo.sortingIdx, 0); - j = pGroupBy->nExpr+1; + nGroupBy = pGroupBy->nExpr; + nCol = nGroupBy + 1; + j = nGroupBy+1; + for(i=0; i=j ){ + nCol++; + j++; + } + } + regBase = sqlite3GetTempRange(pParse, nCol); + sqlite3ExprCacheClear(pParse); + sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0); + sqlite3VdbeAddOp2(v, OP_Sequence, sAggInfo.sortingIdx,regBase+nGroupBy); + j = nGroupBy+1; for(i=0; iiSorterColumnpTab, pCol->iColumn, pCol->iTable); - j++; + if( pCol->iSorterColumn>=j ){ + int r1 = j + regBase; + int r2; + + r2 = sqlite3ExprCodeGetColumn(pParse, + pCol->pTab, pCol->iColumn, pCol->iTable, r1, 0); + if( r1!=r2 ){ + sqlite3VdbeAddOp2(v, OP_SCopy, r2, r1); + } + j++; + } } - sqlite3VdbeAddOp(v, OP_MakeRecord, j, 0); - sqlite3VdbeAddOp(v, OP_IdxInsert, sAggInfo.sortingIdx, 0); + regRecord = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); + sqlite3VdbeAddOp2(v, OP_IdxInsert, sAggInfo.sortingIdx, regRecord); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3ReleaseTempRange(pParse, regBase, nCol); sqlite3WhereEnd(pWInfo); - sqlite3VdbeAddOp(v, OP_Sort, sAggInfo.sortingIdx, addrEnd); - VdbeComment((v, "# GROUP BY sort")); + sqlite3VdbeAddOp2(v, OP_Sort, sAggInfo.sortingIdx, addrEnd); + VdbeComment((v, "GROUP BY sort")); sAggInfo.useSortingIdx = 1; + sqlite3ExprCacheClear(pParse); } /* Evaluate the current GROUP BY terms and store in b0, b1, b2... @@ -3275,30 +3886,22 @@ ** from the previous row currently stored in a0, a1, a2... */ addrTopOfLoop = sqlite3VdbeCurrentAddr(v); + sqlite3ExprCacheClear(pParse); for(j=0; jnExpr; j++){ if( groupBySort ){ - sqlite3VdbeAddOp(v, OP_Column, sAggInfo.sortingIdx, j); + sqlite3VdbeAddOp3(v, OP_Column, sAggInfo.sortingIdx, j, iBMem+j); }else{ sAggInfo.directMode = 1; - sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr); - } - sqlite3VdbeAddOp(v, OP_MemStore, iBMem+j, jnExpr-1); - } - for(j=pGroupBy->nExpr-1; j>=0; j--){ - if( jnExpr-1 ){ - sqlite3VdbeAddOp(v, OP_MemLoad, iBMem+j, 0); - } - sqlite3VdbeAddOp(v, OP_MemLoad, iAMem+j, 0); - if( j==0 ){ - sqlite3VdbeAddOp(v, OP_Eq, 0x200, addrProcessRow); - }else{ - sqlite3VdbeAddOp(v, OP_Ne, 0x200, addrGroupByChange); + sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); } - sqlite3VdbeChangeP3(v, -1, (void*)pKeyInfo->aColl[j], P3_COLLSEQ); } + sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, + (char*)pKeyInfo, P4_KEYINFO); + j1 = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp3(v, OP_Jump, j1+1, 0, j1+1); /* Generate code that runs whenever the GROUP BY changes. - ** Change in the GROUP BY are detected by the previous code + ** Changes in the GROUP BY are detected by the previous code ** block. If there were no changes, this block is skipped. ** ** This code copies current group by terms in b0,b1,b2,... @@ -3306,29 +3909,26 @@ ** and resets the aggregate accumulator registers in preparation ** for the next GROUP BY batch. */ - sqlite3VdbeResolveLabel(v, addrGroupByChange); - for(j=0; jnExpr; j++){ - sqlite3VdbeAddOp(v, OP_MemMove, iAMem+j, iBMem+j); - } - sqlite3VdbeAddOp(v, OP_Gosub, 0, addrOutputRow); - VdbeComment((v, "# output one row")); - sqlite3VdbeAddOp(v, OP_IfMemPos, iAbortFlag, addrEnd); - VdbeComment((v, "# check abort flag")); - sqlite3VdbeAddOp(v, OP_Gosub, 0, addrReset); - VdbeComment((v, "# reset accumulator")); + sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); + sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); + VdbeComment((v, "output one row")); + sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); + VdbeComment((v, "check abort flag")); + sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); + VdbeComment((v, "reset accumulator")); /* Update the aggregate accumulators based on the content of ** the current row */ - sqlite3VdbeResolveLabel(v, addrProcessRow); + sqlite3VdbeJumpHere(v, j1); updateAccumulator(pParse, &sAggInfo); - sqlite3VdbeAddOp(v, OP_MemInt, 1, iUseFlag); - VdbeComment((v, "# indicate data in accumulator")); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); + VdbeComment((v, "indicate data in accumulator")); /* End of the loop */ if( groupBySort ){ - sqlite3VdbeAddOp(v, OP_Next, sAggInfo.sortingIdx, addrTopOfLoop); + sqlite3VdbeAddOp2(v, OP_Next, sAggInfo.sortingIdx, addrTopOfLoop); }else{ sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx, 1); @@ -3336,27 +3936,165 @@ /* Output the final row of result */ - sqlite3VdbeAddOp(v, OP_Gosub, 0, addrOutputRow); - VdbeComment((v, "# output final row")); - - } /* endif pGroupBy */ - else { - /* This case runs if the aggregate has no GROUP BY clause. The - ** processing is much simpler since there is only a single row - ** of output. + sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); + VdbeComment((v, "output final row")); + + /* Jump over the subroutines */ - resetAccumulator(pParse, &sAggInfo); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0); - if( pWInfo==0 ) goto select_end; - updateAccumulator(pParse, &sAggInfo); - sqlite3WhereEnd(pWInfo); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEnd); + + /* Generate a subroutine that outputs a single row of the result + ** set. This subroutine first looks at the iUseFlag. If iUseFlag + ** is less than or equal to zero, the subroutine is a no-op. If + ** the processing calls for the query to abort, this subroutine + ** increments the iAbortFlag memory location before returning in + ** order to signal the caller to abort. + */ + addrSetAbort = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag); + VdbeComment((v, "set abort flag")); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); + sqlite3VdbeResolveLabel(v, addrOutputRow); + addrOutputRow = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2); + VdbeComment((v, "Groupby result generator entry point")); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); finalizeAggFunctions(pParse, &sAggInfo); - pOrderBy = 0; - if( pHaving ){ - sqlite3ExprIfFalse(pParse, pHaving, addrEnd, 1); + sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL); + selectInnerLoop(pParse, p, p->pEList, 0, 0, pOrderBy, + distinct, pDest, + addrOutputRow+1, addrSetAbort); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); + VdbeComment((v, "end groupby result generator")); + + /* Generate a subroutine that will reset the group-by accumulator + */ + sqlite3VdbeResolveLabel(v, addrReset); + resetAccumulator(pParse, &sAggInfo); + sqlite3VdbeAddOp1(v, OP_Return, regReset); + + } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ + else { + ExprList *pDel = 0; +#ifndef SQLITE_OMIT_BTREECOUNT + Table *pTab; + if( (pTab = isSimpleCount(p, &sAggInfo))!=0 ){ + /* If isSimpleCount() returns a pointer to a Table structure, then + ** the SQL statement is of the form: + ** + ** SELECT count(*) FROM + ** + ** where the Table structure returned represents table . + ** + ** This statement is so common that it is optimized specially. The + ** OP_Count instruction is executed either on the intkey table that + ** contains the data for table or on one of its indexes. It + ** is better to execute the op on an index, as indexes are almost + ** always spread across less pages than their corresponding tables. + */ + const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */ + Index *pIdx; /* Iterator variable */ + KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */ + Index *pBest = 0; /* Best index found so far */ + int iRoot = pTab->tnum; /* Root page of scanned b-tree */ + + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + + /* Search for the index that has the least amount of columns. If + ** there is such an index, and it has less columns than the table + ** does, then we can assume that it consumes less space on disk and + ** will therefore be cheaper to scan to determine the query result. + ** In this case set iRoot to the root page number of the index b-tree + ** and pKeyInfo to the KeyInfo structure required to navigate the + ** index. + ** + ** In practice the KeyInfo structure will not be used. It is only + ** passed to keep OP_OpenRead happy. + */ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( !pBest || pIdx->nColumnnColumn ){ + pBest = pIdx; + } + } + if( pBest && pBest->nColumnnCol ){ + iRoot = pBest->tnum; + pKeyInfo = sqlite3IndexKeyinfo(pParse, pBest); + } + + /* Open a read-only cursor, execute the OP_Count, close the cursor. */ + sqlite3VdbeAddOp3(v, OP_OpenRead, iCsr, iRoot, iDb); + if( pKeyInfo ){ + sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO_HANDOFF); + } + sqlite3VdbeAddOp2(v, OP_Count, iCsr, sAggInfo.aFunc[0].iMem); + sqlite3VdbeAddOp1(v, OP_Close, iCsr); + }else +#endif /* SQLITE_OMIT_BTREECOUNT */ + { + /* Check if the query is of one of the following forms: + ** + ** SELECT min(x) FROM ... + ** SELECT max(x) FROM ... + ** + ** If it is, then ask the code in where.c to attempt to sort results + ** as if there was an "ORDER ON x" or "ORDER ON x DESC" clause. + ** If where.c is able to produce results sorted in this order, then + ** add vdbe code to break out of the processing loop after the + ** first iteration (since the first iteration of the loop is + ** guaranteed to operate on the row with the minimum or maximum + ** value of x, the only row required). + ** + ** A special flag must be passed to sqlite3WhereBegin() to slightly + ** modify behaviour as follows: + ** + ** + If the query is a "SELECT min(x)", then the loop coded by + ** where.c should not iterate over any values with a NULL value + ** for x. + ** + ** + The optimizer code in where.c (the thing that decides which + ** index or indices to use) should place a different priority on + ** satisfying the 'ORDER BY' clause than it does in other cases. + ** Refer to code and comments in where.c for details. + */ + ExprList *pMinMax = 0; + u8 flag = minMaxQuery(p); + if( flag ){ + assert( !ExprHasProperty(p->pEList->a[0].pExpr, EP_xIsSelect) ); + pMinMax = sqlite3ExprListDup(db, p->pEList->a[0].pExpr->x.pList,0); + pDel = pMinMax; + if( pMinMax && !db->mallocFailed ){ + pMinMax->a[0].sortOrder = flag!=WHERE_ORDERBY_MIN ?1:0; + pMinMax->a[0].pExpr->op = TK_COLUMN; + } + } + + /* This case runs if the aggregate has no GROUP BY clause. The + ** processing is much simpler since there is only a single row + ** of output. + */ + resetAccumulator(pParse, &sAggInfo); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, &pMinMax, flag); + if( pWInfo==0 ){ + sqlite3ExprListDelete(db, pDel); + goto select_end; + } + updateAccumulator(pParse, &sAggInfo); + if( !pMinMax && flag ){ + sqlite3VdbeAddOp2(v, OP_Goto, 0, pWInfo->iBreak); + VdbeComment((v, "%s() by index", + (flag==WHERE_ORDERBY_MIN?"min":"max"))); + } + sqlite3WhereEnd(pWInfo); + finalizeAggFunctions(pParse, &sAggInfo); } + + pOrderBy = 0; + sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL); selectInnerLoop(pParse, p, p->pEList, 0, 0, 0, -1, - eDest, iParm, addrEnd, addrEnd, aff); + pDest, addrEnd, addrEnd); + sqlite3ExprListDelete(db, pDel); } sqlite3VdbeResolveLabel(v, addrEnd); @@ -3366,22 +4104,9 @@ ** and send them to the callback one by one. */ if( pOrderBy ){ - generateSortTail(pParse, p, v, pEList->nExpr, eDest, iParm); + generateSortTail(pParse, p, v, pEList->nExpr, pDest); } -#ifndef SQLITE_OMIT_SUBQUERY - /* If this was a subquery, we have now converted the subquery into a - ** temporary table. So set the SrcList_item.isPopulated flag to prevent - ** this subquery from being evaluated again and to force the use of - ** the temporary table. - */ - if( pParent ){ - assert( pParent->pSrc->nSrc>parentTab ); - assert( pParent->pSrc->a[parentTab].pSelect==p ); - pParent->pSrc->a[parentTab].isPopulated = 1; - } -#endif - /* Jump here to skip this query */ sqlite3VdbeResolveLabel(v, iEnd); @@ -3396,15 +4121,14 @@ */ select_end: - /* Identify column names if we will be using them in a callback. This - ** step is skipped if the output is going to some other destination. + /* Identify column names if results of the SELECT are to be output. */ - if( rc==SQLITE_OK && eDest==SRT_Callback ){ + if( rc==SQLITE_OK && pDest->eDest==SRT_Output ){ generateColumnNames(pParse, pTabList, pEList); } - sqliteFree(sAggInfo.aCol); - sqliteFree(sAggInfo.aFunc); + sqlite3DbFree(db, sAggInfo.aCol); + sqlite3DbFree(db, sAggInfo.aFunc); return rc; } @@ -3424,8 +4148,8 @@ ** or from temporary "printf" statements inserted for debugging. */ void sqlite3PrintExpr(Expr *p){ - if( p->token.z && p->token.n>0 ){ - sqlite3DebugPrintf("(%.*s", p->token.n, p->token.z); + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + sqlite3DebugPrintf("(%s", p->u.zToken); }else{ sqlite3DebugPrintf("(%d", p->op); } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/shell.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/shell.c --- sqlite3-3.4.2/src/shell.c 2007-07-30 21:41:53.000000000 +0100 +++ sqlite3-3.6.16/src/shell.c 2009-06-25 12:35:51.000000000 +0100 @@ -12,8 +12,13 @@ ** This file contains code to implement the "sqlite" command line ** utility for accessing SQLite databases. ** -** $Id: shell.c,v 1.166 2007/07/30 20:41:53 drh Exp $ +** $Id: shell.c,v 1.210 2009/05/31 17:16:10 drh Exp $ */ +#if defined(_WIN32) || defined(WIN32) +/* This needs to come before any includes for MSVC compiler */ +#define _CRT_SECURE_NO_WARNINGS +#endif + #include #include #include @@ -22,22 +27,15 @@ #include #include -#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__) && !defined(__OS2__) +#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) # include -# include +# if !defined(__RTP__) && !defined(_WRS_KERNEL) +# include +# endif # include # include #endif -#ifdef __MACOS__ -# include -# include -# include -# include -# include -# include -#endif - #ifdef __OS2__ # include #endif @@ -55,12 +53,935 @@ #if defined(_WIN32) || defined(WIN32) # include +#define isatty(h) _isatty(h) +#define access(f,m) _access((f),(m)) #else /* Make sure isatty() has a prototype. */ extern int isatty(); #endif +#if defined(_WIN32_WCE) +/* Windows CE (arm-wince-mingw32ce-gcc) does not provide isatty() + * thus we always assume that we have a console. That can be + * overridden with the -batch command line option. + */ +#define isatty(x) 1 +#endif + +#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) && !defined(__RTP__) && !defined(_WRS_KERNEL) +#include +#include + +/* Saved resource information for the beginning of an operation */ +static struct rusage sBegin; + +/* True if the timer is enabled */ +static int enableTimer = 0; + +/* +** Begin timing an operation +*/ +static void beginTimer(void){ + if( enableTimer ){ + getrusage(RUSAGE_SELF, &sBegin); + } +} + +/* Return the difference of two time_structs in seconds */ +static double timeDiff(struct timeval *pStart, struct timeval *pEnd){ + return (pEnd->tv_usec - pStart->tv_usec)*0.000001 + + (double)(pEnd->tv_sec - pStart->tv_sec); +} + +/* +** Print the timing results. +*/ +static void endTimer(void){ + if( enableTimer ){ + struct rusage sEnd; + getrusage(RUSAGE_SELF, &sEnd); + printf("CPU Time: user %f sys %f\n", + timeDiff(&sBegin.ru_utime, &sEnd.ru_utime), + timeDiff(&sBegin.ru_stime, &sEnd.ru_stime)); + } +} +#define BEGIN_TIMER beginTimer() +#define END_TIMER endTimer() +#define HAS_TIMER 1 +#else +#define BEGIN_TIMER +#define END_TIMER +#define HAS_TIMER 0 +#endif + +/* +** Used to prevent warnings about unused parameters +*/ +#define UNUSED_PARAMETER(x) (void)(x) + + +/************************************************************************** +*************************************************************************** +** Begin genfkey logic. +*/ +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined SQLITE_OMIT_SUBQUERY + +#define GENFKEY_ERROR 1 +#define GENFKEY_DROPTRIGGER 2 +#define GENFKEY_CREATETRIGGER 3 +static int genfkey_create_triggers(sqlite3 *, const char *, void *, + int (*)(void *, int, const char *) +); + +struct GenfkeyCb { + void *pCtx; + int eType; + int (*xData)(void *, int, const char *); +}; +typedef struct GenfkeyCb GenfkeyCb; + +/* The code in this file defines a sqlite3 virtual-table module that +** provides a read-only view of the current database schema. There is one +** row in the schema table for each column in the database schema. +*/ +#define SCHEMA \ +"CREATE TABLE x(" \ + "database," /* Name of database (i.e. main, temp etc.) */ \ + "tablename," /* Name of table */ \ + "cid," /* Column number (from left-to-right, 0 upward) */ \ + "name," /* Column name */ \ + "type," /* Specified type (i.e. VARCHAR(32)) */ \ + "not_null," /* Boolean. True if NOT NULL was specified */ \ + "dflt_value," /* Default value for this column */ \ + "pk" /* True if this column is part of the primary key */ \ +")" + +#define SCHEMA2 \ +"CREATE TABLE x(" \ + "database," /* Name of database (i.e. main, temp etc.) */ \ + "from_tbl," /* Name of table */ \ + "fkid," \ + "seq," \ + "to_tbl," \ + "from_col," \ + "to_col," \ + "on_update," \ + "on_delete," \ + "match" \ +")" + +#define SCHEMA3 \ +"CREATE TABLE x(" \ + "database," /* Name of database (i.e. main, temp etc.) */ \ + "tablename," /* Name of table */ \ + "seq," \ + "name," \ + "isunique" \ +")" + +#define SCHEMA4 \ +"CREATE TABLE x(" \ + "database," /* Name of database (i.e. main, temp etc.) */ \ + "indexname," /* Name of table */ \ + "seqno," \ + "cid," \ + "name" \ +")" + +#define SCHEMA5 \ +"CREATE TABLE x(" \ + "database," /* Name of database (i.e. main, temp etc.) */ \ + "triggername," /* Name of trigger */ \ + "dummy" /* Unused */ \ +")" + +typedef struct SchemaTable SchemaTable; +struct SchemaTable { + const char *zName; + const char *zObject; + const char *zPragma; + const char *zSchema; +} aSchemaTable[] = { + { "table_info", "table", "PRAGMA %Q.table_info(%Q)", SCHEMA }, + { "foreign_key_list", "table", "PRAGMA %Q.foreign_key_list(%Q)", SCHEMA2 }, + { "index_list", "table", "PRAGMA %Q.index_list(%Q)", SCHEMA3 }, + { "index_info", "index", "PRAGMA %Q.index_info(%Q)", SCHEMA4 }, + { "trigger_list", "trigger", "SELECT 1", SCHEMA5 }, + { 0, 0, 0, 0 } +}; + +typedef struct schema_vtab schema_vtab; +typedef struct schema_cursor schema_cursor; + +/* A schema table object */ +struct schema_vtab { + sqlite3_vtab base; + sqlite3 *db; + SchemaTable *pType; +}; + +/* A schema table cursor object */ +struct schema_cursor { + sqlite3_vtab_cursor base; + sqlite3_stmt *pDbList; + sqlite3_stmt *pTableList; + sqlite3_stmt *pColumnList; + int rowid; +}; + +/* +** Table destructor for the schema module. +*/ +static int schemaDestroy(sqlite3_vtab *pVtab){ + sqlite3_free(pVtab); + return 0; +} + +/* +** Table constructor for the schema module. +*/ +static int schemaCreate( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + int rc = SQLITE_NOMEM; + schema_vtab *pVtab; + SchemaTable *pType = &aSchemaTable[0]; + + UNUSED_PARAMETER(pzErr); + if( argc>3 ){ + int i; + pType = 0; + for(i=0; aSchemaTable[i].zName; i++){ + if( 0==strcmp(argv[3], aSchemaTable[i].zName) ){ + pType = &aSchemaTable[i]; + } + } + if( !pType ){ + return SQLITE_ERROR; + } + } + + pVtab = sqlite3_malloc(sizeof(schema_vtab)); + if( pVtab ){ + memset(pVtab, 0, sizeof(schema_vtab)); + pVtab->db = (sqlite3 *)pAux; + pVtab->pType = pType; + rc = sqlite3_declare_vtab(db, pType->zSchema); + } + *ppVtab = (sqlite3_vtab *)pVtab; + return rc; +} + +/* +** Open a new cursor on the schema table. +*/ +static int schemaOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + schema_cursor *pCur; + UNUSED_PARAMETER(pVTab); + pCur = sqlite3_malloc(sizeof(schema_cursor)); + if( pCur ){ + memset(pCur, 0, sizeof(schema_cursor)); + *ppCursor = (sqlite3_vtab_cursor *)pCur; + rc = SQLITE_OK; + } + return rc; +} + +/* +** Close a schema table cursor. +*/ +static int schemaClose(sqlite3_vtab_cursor *cur){ + schema_cursor *pCur = (schema_cursor *)cur; + sqlite3_finalize(pCur->pDbList); + sqlite3_finalize(pCur->pTableList); + sqlite3_finalize(pCur->pColumnList); + sqlite3_free(pCur); + return SQLITE_OK; +} + +static void columnToResult(sqlite3_context *ctx, sqlite3_stmt *pStmt, int iCol){ + switch( sqlite3_column_type(pStmt, iCol) ){ + case SQLITE_NULL: + sqlite3_result_null(ctx); + break; + case SQLITE_INTEGER: + sqlite3_result_int64(ctx, sqlite3_column_int64(pStmt, iCol)); + break; + case SQLITE_FLOAT: + sqlite3_result_double(ctx, sqlite3_column_double(pStmt, iCol)); + break; + case SQLITE_TEXT: { + const char *z = (const char *)sqlite3_column_text(pStmt, iCol); + sqlite3_result_text(ctx, z, -1, SQLITE_TRANSIENT); + break; + } + } +} + +/* +** Retrieve a column of data. +*/ +static int schemaColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + schema_cursor *pCur = (schema_cursor *)cur; + switch( i ){ + case 0: + columnToResult(ctx, pCur->pDbList, 1); + break; + case 1: + columnToResult(ctx, pCur->pTableList, 0); + break; + default: + columnToResult(ctx, pCur->pColumnList, i-2); + break; + } + return SQLITE_OK; +} + +/* +** Retrieve the current rowid. +*/ +static int schemaRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ + schema_cursor *pCur = (schema_cursor *)cur; + *pRowid = pCur->rowid; + return SQLITE_OK; +} + +static int finalize(sqlite3_stmt **ppStmt){ + int rc = sqlite3_finalize(*ppStmt); + *ppStmt = 0; + return rc; +} + +static int schemaEof(sqlite3_vtab_cursor *cur){ + schema_cursor *pCur = (schema_cursor *)cur; + return (pCur->pDbList ? 0 : 1); +} + +/* +** Advance the cursor to the next row. +*/ +static int schemaNext(sqlite3_vtab_cursor *cur){ + int rc = SQLITE_OK; + schema_cursor *pCur = (schema_cursor *)cur; + schema_vtab *pVtab = (schema_vtab *)(cur->pVtab); + char *zSql = 0; + + while( !pCur->pColumnList || SQLITE_ROW!=sqlite3_step(pCur->pColumnList) ){ + if( SQLITE_OK!=(rc = finalize(&pCur->pColumnList)) ) goto next_exit; + + while( !pCur->pTableList || SQLITE_ROW!=sqlite3_step(pCur->pTableList) ){ + if( SQLITE_OK!=(rc = finalize(&pCur->pTableList)) ) goto next_exit; + + assert(pCur->pDbList); + while( SQLITE_ROW!=sqlite3_step(pCur->pDbList) ){ + rc = finalize(&pCur->pDbList); + goto next_exit; + } + + /* Set zSql to the SQL to pull the list of tables from the + ** sqlite_master (or sqlite_temp_master) table of the database + ** identfied by the row pointed to by the SQL statement pCur->pDbList + ** (iterating through a "PRAGMA database_list;" statement). + */ + if( sqlite3_column_int(pCur->pDbList, 0)==1 ){ + zSql = sqlite3_mprintf( + "SELECT name FROM sqlite_temp_master WHERE type=%Q", + pVtab->pType->zObject + ); + }else{ + sqlite3_stmt *pDbList = pCur->pDbList; + zSql = sqlite3_mprintf( + "SELECT name FROM %Q.sqlite_master WHERE type=%Q", + sqlite3_column_text(pDbList, 1), pVtab->pType->zObject + ); + } + if( !zSql ){ + rc = SQLITE_NOMEM; + goto next_exit; + } + + rc = sqlite3_prepare(pVtab->db, zSql, -1, &pCur->pTableList, 0); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) goto next_exit; + } + + /* Set zSql to the SQL to the table_info pragma for the table currently + ** identified by the rows pointed to by statements pCur->pDbList and + ** pCur->pTableList. + */ + zSql = sqlite3_mprintf(pVtab->pType->zPragma, + sqlite3_column_text(pCur->pDbList, 1), + sqlite3_column_text(pCur->pTableList, 0) + ); + + if( !zSql ){ + rc = SQLITE_NOMEM; + goto next_exit; + } + rc = sqlite3_prepare(pVtab->db, zSql, -1, &pCur->pColumnList, 0); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) goto next_exit; + } + pCur->rowid++; + +next_exit: + /* TODO: Handle rc */ + return rc; +} + +/* +** Reset a schema table cursor. +*/ +static int schemaFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + int rc; + schema_vtab *pVtab = (schema_vtab *)(pVtabCursor->pVtab); + schema_cursor *pCur = (schema_cursor *)pVtabCursor; + UNUSED_PARAMETER(idxNum); + UNUSED_PARAMETER(idxStr); + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + pCur->rowid = 0; + finalize(&pCur->pTableList); + finalize(&pCur->pColumnList); + finalize(&pCur->pDbList); + rc = sqlite3_prepare(pVtab->db,"SELECT 0, 'main'", -1, &pCur->pDbList, 0); + return (rc==SQLITE_OK ? schemaNext(pVtabCursor) : rc); +} + +/* +** Analyse the WHERE condition. +*/ +static int schemaBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + UNUSED_PARAMETER(tab); + UNUSED_PARAMETER(pIdxInfo); + return SQLITE_OK; +} + +/* +** A virtual table module that merely echos method calls into TCL +** variables. +*/ +static sqlite3_module schemaModule = { + 0, /* iVersion */ + schemaCreate, + schemaCreate, + schemaBestIndex, + schemaDestroy, + schemaDestroy, + schemaOpen, /* xOpen - open a cursor */ + schemaClose, /* xClose - close a cursor */ + schemaFilter, /* xFilter - configure scan constraints */ + schemaNext, /* xNext - advance a cursor */ + schemaEof, /* xEof */ + schemaColumn, /* xColumn - read data */ + schemaRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ +}; + +/* +** Extension load function. +*/ +static int installSchemaModule(sqlite3 *db, sqlite3 *sdb){ + sqlite3_create_module(db, "schema", &schemaModule, (void *)sdb); + return 0; +} + +/* +** sj(zValue, zJoin) +** +** The following block contains the implementation of an aggregate +** function that returns a string. Each time the function is stepped, +** it appends data to an internal buffer. When the aggregate is finalized, +** the contents of the buffer are returned. +** +** The first time the aggregate is stepped the buffer is set to a copy +** of the first argument. The second time and subsequent times it is +** stepped a copy of the second argument is appended to the buffer, then +** a copy of the first. +** +** Example: +** +** INSERT INTO t1(a) VALUES('1'); +** INSERT INTO t1(a) VALUES('2'); +** INSERT INTO t1(a) VALUES('3'); +** SELECT sj(a, ', ') FROM t1; +** +** => "1, 2, 3" +** +*/ +struct StrBuffer { + char *zBuf; +}; +typedef struct StrBuffer StrBuffer; +static void joinFinalize(sqlite3_context *context){ + StrBuffer *p; + p = (StrBuffer *)sqlite3_aggregate_context(context, sizeof(StrBuffer)); + sqlite3_result_text(context, p->zBuf, -1, SQLITE_TRANSIENT); + sqlite3_free(p->zBuf); +} +static void joinStep( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + StrBuffer *p; + UNUSED_PARAMETER(argc); + p = (StrBuffer *)sqlite3_aggregate_context(context, sizeof(StrBuffer)); + if( p->zBuf==0 ){ + p->zBuf = sqlite3_mprintf("%s", sqlite3_value_text(argv[0])); + }else{ + char *zTmp = p->zBuf; + p->zBuf = sqlite3_mprintf("%s%s%s", + zTmp, sqlite3_value_text(argv[1]), sqlite3_value_text(argv[0]) + ); + sqlite3_free(zTmp); + } +} + +/* +** dq(zString) +** +** This scalar function accepts a single argument and interprets it as +** a text value. The return value is the argument enclosed in double +** quotes. If any double quote characters are present in the argument, +** these are escaped. +** +** dq('the raven "Nevermore."') == '"the raven ""Nevermore."""' +*/ +static void doublequote( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int ii; + char *zOut; + char *zCsr; + const char *zIn = (const char *)sqlite3_value_text(argv[0]); + int nIn = sqlite3_value_bytes(argv[0]); + + UNUSED_PARAMETER(argc); + zOut = sqlite3_malloc(nIn*2+3); + zCsr = zOut; + *zCsr++ = '"'; + for(ii=0; iinMalloc ){ + char *zNew; + nMalloc = 16 + (nOut+nCopy)*2; + zNew = (char*)sqlite3_realloc(zOut, nMalloc); + if( zNew==0 ){ + sqlite3_result_error_nomem(context); + return; + }else{ + zOut = zNew; + } + } + assert( nMalloc>=(nOut+nCopy) ); + memcpy(&zOut[nOut], zCopy, nCopy); + i += nReplace; + nOut += nCopy; + } + + sqlite3_result_text(context, zOut, nOut, SQLITE_TRANSIENT); + sqlite3_free(zOut); +} + +/* +** A callback for sqlite3_exec() invokes the callback specified by the +** GenfkeyCb structure pointed to by the void* passed as the first argument. +*/ +static int invokeCallback(void *p, int nArg, char **azArg, char **azCol){ + GenfkeyCb *pCb = (GenfkeyCb *)p; + UNUSED_PARAMETER(nArg); + UNUSED_PARAMETER(azCol); + return pCb->xData(pCb->pCtx, pCb->eType, azArg[0]); +} + +int detectSchemaProblem( + sqlite3 *db, /* Database connection */ + const char *zMessage, /* English language error message */ + const char *zSql, /* SQL statement to run */ + GenfkeyCb *pCb +){ + sqlite3_stmt *pStmt; + int rc; + rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + char *zDel; + int iFk = sqlite3_column_int(pStmt, 0); + const char *zTab = (const char *)sqlite3_column_text(pStmt, 1); + zDel = sqlite3_mprintf("Error in table %s: %s", zTab, zMessage); + rc = pCb->xData(pCb->pCtx, pCb->eType, zDel); + sqlite3_free(zDel); + if( rc!=SQLITE_OK ) return rc; + zDel = sqlite3_mprintf( + "DELETE FROM temp.fkey WHERE from_tbl = %Q AND fkid = %d" + , zTab, iFk + ); + sqlite3_exec(db, zDel, 0, 0, 0); + sqlite3_free(zDel); + } + sqlite3_finalize(pStmt); + return SQLITE_OK; +} + +/* +** Create and populate temporary table "fkey". +*/ +static int populateTempTable(sqlite3 *db, GenfkeyCb *pCallback){ + int rc; + + rc = sqlite3_exec(db, + "CREATE VIRTUAL TABLE temp.v_fkey USING schema(foreign_key_list);" + "CREATE VIRTUAL TABLE temp.v_col USING schema(table_info);" + "CREATE VIRTUAL TABLE temp.v_idxlist USING schema(index_list);" + "CREATE VIRTUAL TABLE temp.v_idxinfo USING schema(index_info);" + "CREATE VIRTUAL TABLE temp.v_triggers USING schema(trigger_list);" + "CREATE TABLE temp.fkey AS " + "SELECT from_tbl, to_tbl, fkid, from_col, to_col, on_update, on_delete " + "FROM temp.v_fkey WHERE database = 'main';" + , 0, 0, 0 + ); + if( rc!=SQLITE_OK ) return rc; + + rc = detectSchemaProblem(db, "foreign key columns do not exist", + "SELECT fkid, from_tbl " + "FROM temp.fkey " + "WHERE to_col IS NOT NULL AND NOT EXISTS (SELECT 1 " + "FROM temp.v_col WHERE tablename=to_tbl AND name==to_col" + ")", pCallback + ); + if( rc!=SQLITE_OK ) return rc; + + /* At this point the temp.fkey table is mostly populated. If any foreign + ** keys were specified so that they implicitly refer to they primary + ** key of the parent table, the "to_col" values of the temp.fkey rows + ** are still set to NULL. + ** + ** This is easily fixed for single column primary keys, but not for + ** composites. With a composite primary key, there is no way to reliably + ** query sqlite for the order in which the columns that make up the + ** composite key were declared i.e. there is no way to tell if the + ** schema actually contains "PRIMARY KEY(a, b)" or "PRIMARY KEY(b, a)". + ** Therefore, this case is not handled. The following function call + ** detects instances of this case. + */ + rc = detectSchemaProblem(db, "implicit mapping to composite primary key", + "SELECT fkid, from_tbl " + "FROM temp.fkey " + "WHERE to_col IS NULL " + "GROUP BY fkid, from_tbl HAVING count(*) > 1", pCallback + ); + if( rc!=SQLITE_OK ) return rc; + + /* Detect attempts to implicitly map to the primary key of a table + ** that has no primary key column. + */ + rc = detectSchemaProblem(db, "implicit mapping to non-existant primary key", + "SELECT fkid, from_tbl " + "FROM temp.fkey " + "WHERE to_col IS NULL AND NOT EXISTS " + "(SELECT 1 FROM temp.v_col WHERE pk AND tablename = temp.fkey.to_tbl)" + , pCallback + ); + if( rc!=SQLITE_OK ) return rc; + + /* Fix all the implicit primary key mappings in the temp.fkey table. */ + rc = sqlite3_exec(db, + "UPDATE temp.fkey SET to_col = " + "(SELECT name FROM temp.v_col WHERE pk AND tablename=temp.fkey.to_tbl)" + " WHERE to_col IS NULL;" + , 0, 0, 0 + ); + if( rc!=SQLITE_OK ) return rc; + + /* Now check that all all parent keys are either primary keys or + ** subject to a unique constraint. + */ + rc = sqlite3_exec(db, + "CREATE TABLE temp.idx2 AS SELECT " + "il.tablename AS tablename," + "ii.indexname AS indexname," + "ii.name AS col " + "FROM temp.v_idxlist AS il, temp.v_idxinfo AS ii " + "WHERE il.isunique AND il.database='main' AND ii.indexname = il.name;" + "INSERT INTO temp.idx2 " + "SELECT tablename, 'pk', name FROM temp.v_col WHERE pk;" + + "CREATE TABLE temp.idx AS SELECT " + "tablename, indexname, sj(dq(col),',') AS cols " + "FROM (SELECT * FROM temp.idx2 ORDER BY col) " + "GROUP BY tablename, indexname;" + + "CREATE TABLE temp.fkey2 AS SELECT " + "fkid, from_tbl, to_tbl, sj(dq(to_col),',') AS cols " + "FROM (SELECT * FROM temp.fkey ORDER BY to_col) " + "GROUP BY fkid, from_tbl;" + + "CREATE TABLE temp.triggers AS SELECT " + "triggername FROM temp.v_triggers WHERE database='main' AND " + "triggername LIKE 'genfkey%';" + , 0, 0, 0 + ); + if( rc!=SQLITE_OK ) return rc; + rc = detectSchemaProblem(db, "foreign key is not unique", + "SELECT fkid, from_tbl " + "FROM temp.fkey2 " + "WHERE NOT EXISTS (SELECT 1 " + "FROM temp.idx WHERE tablename=to_tbl AND fkey2.cols==idx.cols" + ")", pCallback + ); + if( rc!=SQLITE_OK ) return rc; + + return rc; +} + +#define GENFKEY_ERROR 1 +#define GENFKEY_DROPTRIGGER 2 +#define GENFKEY_CREATETRIGGER 3 +static int genfkey_create_triggers( + sqlite3 *sdb, /* Connection to read schema from */ + const char *zDb, /* Name of db to read ("main", "temp") */ + void *pCtx, /* Context pointer to pass to xData */ + int (*xData)(void *, int, const char *) +){ + const char *zSql = + "SELECT multireplace('" + + "-- Triggers for foreign key mapping:\n" + "--\n" + "-- /from_readable/ REFERENCES /to_readable/\n" + "-- on delete /on_delete/\n" + "-- on update /on_update/\n" + "--\n" + + /* The "BEFORE INSERT ON " trigger. This trigger's job is to + ** throw an exception if the user tries to insert a row into the + ** referencing table for which there is no corresponding row in + ** the referenced table. + */ + "CREATE TRIGGER /name/_insert_referencing BEFORE INSERT ON /tbl/ WHEN \n" + " /key_notnull/ AND NOT EXISTS (SELECT 1 FROM /ref/ WHERE /cond1/)\n" + "BEGIN\n" + " SELECT RAISE(ABORT, ''constraint failed'');\n" + "END;\n" + + /* The "BEFORE UPDATE ON " trigger. This trigger's job + ** is to throw an exception if the user tries to update a row in the + ** referencing table causing it to correspond to no row in the + ** referenced table. + */ + "CREATE TRIGGER /name/_update_referencing BEFORE\n" + " UPDATE OF /rkey_list/ ON /tbl/ WHEN \n" + " /key_notnull/ AND \n" + " NOT EXISTS (SELECT 1 FROM /ref/ WHERE /cond1/)\n" + "BEGIN\n" + " SELECT RAISE(ABORT, ''constraint failed'');\n" + "END;\n" + + + /* The "BEFORE DELETE ON " trigger. This trigger's job + ** is to detect when a row is deleted from the referenced table to + ** which rows in the referencing table correspond. The action taken + ** depends on the value of the 'ON DELETE' clause. + */ + "CREATE TRIGGER /name/_delete_referenced BEFORE DELETE ON /ref/ WHEN\n" + " EXISTS (SELECT 1 FROM /tbl/ WHERE /cond2/)\n" + "BEGIN\n" + " /delete_action/\n" + "END;\n" + + /* The "BEFORE DELETE ON " trigger. This trigger's job + ** is to detect when the key columns of a row in the referenced table + ** to which one or more rows in the referencing table correspond are + ** updated. The action taken depends on the value of the 'ON UPDATE' + ** clause. + */ + "CREATE TRIGGER /name/_update_referenced AFTER\n" + " UPDATE OF /fkey_list/ ON /ref/ WHEN \n" + " EXISTS (SELECT 1 FROM /tbl/ WHERE /cond2/)\n" + "BEGIN\n" + " /update_action/\n" + "END;\n" + "'" + + /* These are used in the SQL comment written above each set of triggers */ + ", '/from_readable/', from_tbl || '(' || sj(from_col, ', ') || ')'" + ", '/to_readable/', to_tbl || '(' || sj(to_col, ', ') || ')'" + ", '/on_delete/', on_delete" + ", '/on_update/', on_update" + + ", '/name/', 'genfkey' || min(rowid)" + ", '/tbl/', dq(from_tbl)" + ", '/ref/', dq(to_tbl)" + ", '/key_notnull/', sj('new.' || dq(from_col) || ' IS NOT NULL', ' AND ')" + + ", '/fkey_list/', sj(to_col, ', ')" + ", '/rkey_list/', sj(from_col, ', ')" + + ", '/cond1/', sj(multireplace('new./from/ == /to/'" + ", '/from/', dq(from_col)" + ", '/to/', dq(to_col)" + "), ' AND ')" + ", '/cond2/', sj(multireplace('old./to/ == /from/'" + ", '/from/', dq(from_col)" + ", '/to/', dq(to_col)" + "), ' AND ')" + + ", '/update_action/', CASE on_update " + "WHEN 'SET NULL' THEN " + "multireplace('UPDATE /tbl/ SET /setlist/ WHERE /where/;' " + ", '/setlist/', sj(from_col||' = NULL',', ')" + ", '/tbl/', dq(from_tbl)" + ", '/where/', sj(from_col||' = old.'||dq(to_col),' AND ')" + ")" + "WHEN 'CASCADE' THEN " + "multireplace('UPDATE /tbl/ SET /setlist/ WHERE /where/;' " + ", '/setlist/', sj(dq(from_col)||' = new.'||dq(to_col),', ')" + ", '/tbl/', dq(from_tbl)" + ", '/where/', sj(dq(from_col)||' = old.'||dq(to_col),' AND ')" + ")" + "ELSE " + " 'SELECT RAISE(ABORT, ''constraint failed'');'" + "END " + + ", '/delete_action/', CASE on_delete " + "WHEN 'SET NULL' THEN " + "multireplace('UPDATE /tbl/ SET /setlist/ WHERE /where/;' " + ", '/setlist/', sj(from_col||' = NULL',', ')" + ", '/tbl/', dq(from_tbl)" + ", '/where/', sj(from_col||' = old.'||dq(to_col),' AND ')" + ")" + "WHEN 'CASCADE' THEN " + "multireplace('DELETE FROM /tbl/ WHERE /where/;' " + ", '/tbl/', dq(from_tbl)" + ", '/where/', sj(dq(from_col)||' = old.'||dq(to_col),' AND ')" + ")" + "ELSE " + " 'SELECT RAISE(ABORT, ''constraint failed'');'" + "END " + + ") FROM temp.fkey " + "GROUP BY from_tbl, fkid" + ; + + int rc; + const int enc = SQLITE_UTF8; + sqlite3 *db = 0; + + GenfkeyCb cb; + cb.xData = xData; + cb.pCtx = pCtx; + + UNUSED_PARAMETER(zDb); + + /* Open the working database handle. */ + rc = sqlite3_open(":memory:", &db); + if( rc!=SQLITE_OK ) goto genfkey_exit; + + /* Create the special scalar and aggregate functions used by this program. */ + sqlite3_create_function(db, "dq", 1, enc, 0, doublequote, 0, 0); + sqlite3_create_function(db, "multireplace", -1, enc, db, multireplace, 0, 0); + sqlite3_create_function(db, "sj", 2, enc, 0, 0, joinStep, joinFinalize); + + /* Install the "schema" virtual table module */ + installSchemaModule(db, sdb); + + /* Create and populate a temp table with the information required to + ** build the foreign key triggers. See function populateTempTable() + ** for details. + */ + cb.eType = GENFKEY_ERROR; + rc = populateTempTable(db, &cb); + if( rc!=SQLITE_OK ) goto genfkey_exit; + + /* Unless the --no-drop option was specified, generate DROP TRIGGER + ** statements to drop any triggers in the database generated by a + ** previous run of this program. + */ + cb.eType = GENFKEY_DROPTRIGGER; + rc = sqlite3_exec(db, + "SELECT 'DROP TRIGGER main.' || dq(triggername) || ';' FROM triggers" + ,invokeCallback, (void *)&cb, 0 + ); + if( rc!=SQLITE_OK ) goto genfkey_exit; + + /* Run the main query to create the trigger definitions. */ + cb.eType = GENFKEY_CREATETRIGGER; + rc = sqlite3_exec(db, zSql, invokeCallback, (void *)&cb, 0); + if( rc!=SQLITE_OK ) goto genfkey_exit; + +genfkey_exit: + sqlite3_close(db); + return rc; +} + + +#endif +/* End genfkey logic. */ +/*************************************************************************/ +/*************************************************************************/ + /* ** If the following flag is set, then command execution stops ** at an error if we are not interactive. @@ -168,6 +1089,8 @@ ){ assert( 0==argc ); assert( zShellStatic ); + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); sqlite3_result_text(context, zShellStatic, -1, SQLITE_STATIC); } @@ -290,9 +1213,9 @@ #define MODE_Insert 5 /* Generate SQL "insert" statements */ #define MODE_Tcl 6 /* Generate ANSI-C or TCL quoted elements */ #define MODE_Csv 7 /* Quote strings, numbers are plain */ -#define MODE_NUM_OF 8 /* The number of modes (not a mode itself) */ +#define MODE_Explain 8 /* Like MODE_Column, but do not truncate data */ -static const char *modeDescr[MODE_NUM_OF] = { +static const char *modeDescr[] = { "line", "column", "list", @@ -301,12 +1224,23 @@ "insert", "tcl", "csv", + "explain", }; /* ** Number of elements in an array */ -#define ArraySize(X) (sizeof(X)/sizeof(X[0])) +#define ArraySize(X) (int)(sizeof(X)/sizeof(X[0])) + +/* +** Compute a string length that is limited to what can be stored in +** lower 30 bits of a 32-bit signed integer. +*/ +static int strlen30(const char *z){ + const char *z2 = z; + while( *z2 ){ z2++; } + return 0x3fffffff & (int)(z2 - z); +} /* ** Output the given string as a quoted string using SQL quoting conventions. @@ -423,8 +1357,11 @@ fprintf(out,"%s",p->nullvalue); }else{ int i; + int nSep = strlen30(p->separator); for(i=0; z[i]; i++){ - if( needCsvQuote[((unsigned char*)z)[i]] ){ + if( needCsvQuote[((unsigned char*)z)[i]] + || (z[i]==p->separator[0] && + (nSep==1 || memcmp(z, p->separator, nSep)==0)) ){ i = 0; break; } @@ -441,7 +1378,7 @@ } } if( bSep ){ - fprintf(p->out, p->separator); + fprintf(p->out, "%s", p->separator); } } @@ -450,6 +1387,7 @@ ** This routine runs when the user presses Ctrl-C */ static void interrupt_handler(int NotUsed){ + UNUSED_PARAMETER(NotUsed); seenInterrupt = 1; if( db ) sqlite3_interrupt(db); } @@ -467,7 +1405,7 @@ int w = 5; if( azArg==0 ) break; for(i=0; iw ) w = len; } if( p->cnt++>0 ) fprintf(p->out,"\n"); @@ -477,19 +1415,20 @@ } break; } + case MODE_Explain: case MODE_Column: { if( p->cnt++==0 ){ for(i=0; icolWidth) ){ - w = p->colWidth[i]; + w = p->colWidth[i]; }else{ - w = 0; + w = 0; } if( w<=0 ){ - w = strlen(azCol[i] ? azCol[i] : ""); + w = strlen30(azCol[i] ? azCol[i] : ""); if( w<10 ) w = 10; - n = strlen(azArg && azArg[i] ? azArg[i] : p->nullvalue); + n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullvalue); if( wactualWidth) ){ @@ -521,6 +1460,10 @@ }else{ w = 10; } + if( p->mode==MODE_Explain && azArg[i] && + strlen30(azArg[i])>w ){ + w = strlen30(azArg[i]); + } fprintf(p->out,"%-*.*s%s",w,w, azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " "); } @@ -666,8 +1609,8 @@ static char *appendText(char *zIn, char const *zAppend, char quote){ int len; int i; - int nAppend = strlen(zAppend); - int nIn = (zIn?strlen(zIn):0); + int nAppend = strlen30(zAppend); + int nIn = (zIn?strlen30(zIn):0); len = nAppend+nIn+1; if( quote ){ @@ -708,7 +1651,12 @@ ** This is used, for example, to show the schema of the database by ** querying the SQLITE_MASTER table. */ -static int run_table_dump_query(FILE *out, sqlite3 *db, const char *zSelect){ +static int run_table_dump_query( + FILE *out, /* Send output here */ + sqlite3 *db, /* Database to query */ + const char *zSelect, /* SELECT statement to extract content */ + const char *zFirstRow /* Print before first row, if not NULL */ +){ sqlite3_stmt *pSelect; int rc; rc = sqlite3_prepare(db, zSelect, -1, &pSelect, 0); @@ -717,6 +1665,10 @@ } rc = sqlite3_step(pSelect); while( rc==SQLITE_ROW ){ + if( zFirstRow ){ + fprintf(out, "%s", zFirstRow); + zFirstRow = 0; + } fprintf(out, "%s;\n", sqlite3_column_text(pSelect, 0)); rc = sqlite3_step(pSelect); } @@ -735,15 +1687,17 @@ const char *zTable; const char *zType; const char *zSql; + const char *zPrepStmt = 0; struct callback_data *p = (struct callback_data *)pArg; + UNUSED_PARAMETER(azCol); if( nArg!=3 ) return 1; zTable = azArg[0]; zType = azArg[1]; zSql = azArg[2]; if( strcmp(zTable, "sqlite_sequence")==0 ){ - fprintf(p->out, "DELETE FROM sqlite_sequence;\n"); + zPrepStmt = "DELETE FROM sqlite_sequence;\n"; }else if( strcmp(zTable, "sqlite_stat1")==0 ){ fprintf(p->out, "ANALYZE sqlite_master;\n"); }else if( strncmp(zTable, "sqlite_", 7)==0 ){ @@ -770,13 +1724,14 @@ char *zSelect = 0; char *zTableInfo = 0; char *zTmp = 0; + int nRow = 0; zTableInfo = appendText(zTableInfo, "PRAGMA table_info(", 0); zTableInfo = appendText(zTableInfo, zTable, '"'); zTableInfo = appendText(zTableInfo, ");", 0); rc = sqlite3_prepare(p->db, zTableInfo, -1, &pTableInfo, 0); - if( zTableInfo ) free(zTableInfo); + free(zTableInfo); if( rc!=SQLITE_OK || !pTableInfo ){ return 1; } @@ -798,19 +1753,20 @@ }else{ zSelect = appendText(zSelect, ") ", 0); } + nRow++; } rc = sqlite3_finalize(pTableInfo); - if( rc!=SQLITE_OK ){ - if( zSelect ) free(zSelect); + if( rc!=SQLITE_OK || nRow==0 ){ + free(zSelect); return 1; } zSelect = appendText(zSelect, "|| ')' FROM ", 0); zSelect = appendText(zSelect, zTable, '"'); - rc = run_table_dump_query(p->out, p->db, zSelect); + rc = run_table_dump_query(p->out, p->db, zSelect, zPrepStmt); if( rc==SQLITE_CORRUPT ){ zSelect = appendText(zSelect, " ORDER BY rowid DESC", 0); - rc = run_table_dump_query(p->out, p->db, zSelect); + rc = run_table_dump_query(p->out, p->db, zSelect, 0); } if( zSelect ) free(zSelect); } @@ -833,7 +1789,7 @@ rc = sqlite3_exec(p->db, zQuery, dump_callback, p, pzErrMsg); if( rc==SQLITE_CORRUPT ){ char *zQ2; - int len = strlen(zQuery); + int len = strlen30(zQuery); if( pzErrMsg ) sqlite3_free(*pzErrMsg); zQ2 = malloc( len+100 ); if( zQ2==0 ) return rc; @@ -844,16 +1800,81 @@ return rc; } +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_SUBQUERY) +struct GenfkeyCmd { + sqlite3 *db; /* Database handle */ + struct callback_data *pCb; /* Callback data */ + int isIgnoreErrors; /* True for --ignore-errors */ + int isExec; /* True for --exec */ + int isNoDrop; /* True for --no-drop */ + int nErr; /* Number of errors seen so far */ +}; +typedef struct GenfkeyCmd GenfkeyCmd; + +static int genfkeyParseArgs(GenfkeyCmd *p, char **azArg, int nArg){ + int ii; + memset(p, 0, sizeof(GenfkeyCmd)); + + for(ii=0; ii2 && n<10 && 0==strncmp(azArg[ii], "--no-drop", n) ){ + p->isNoDrop = 1; + }else if( n>2 && n<16 && 0==strncmp(azArg[ii], "--ignore-errors", n) ){ + p->isIgnoreErrors = 1; + }else if( n>2 && n<7 && 0==strncmp(azArg[ii], "--exec", n) ){ + p->isExec = 1; + }else{ + fprintf(stderr, "unknown option: %s\n", azArg[ii]); + return -1; + } + } + + return SQLITE_OK; +} + +static int genfkeyCmdCb(void *pCtx, int eType, const char *z){ + GenfkeyCmd *p = (GenfkeyCmd *)pCtx; + if( eType==GENFKEY_ERROR && !p->isIgnoreErrors ){ + p->nErr++; + fprintf(stderr, "%s\n", z); + } + + if( p->nErr==0 && ( + (eType==GENFKEY_CREATETRIGGER) + || (eType==GENFKEY_DROPTRIGGER && !p->isNoDrop) + )){ + if( p->isExec ){ + sqlite3_exec(p->db, z, 0, 0, 0); + }else{ + char *zCol = "sql"; + callback((void *)p->pCb, 1, (char **)&z, (char **)&zCol); + } + } + + return SQLITE_OK; +} +#endif + /* ** Text of a help message */ static char zHelp[] = + ".backup ?DB? FILE Backup DB (default \"main\") to FILE\n" ".bail ON|OFF Stop after hitting an error. Default OFF\n" ".databases List names and files of attached databases\n" ".dump ?TABLE? ... Dump the database in an SQL text format\n" ".echo ON|OFF Turn command echo on or off\n" ".exit Exit this program\n" ".explain ON|OFF Turn output mode suitable for EXPLAIN on or off.\n" +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_SUBQUERY) + ".genfkey ?OPTIONS? Options are:\n" + " --no-drop: Do not drop old fkey triggers.\n" + " --ignore-errors: Ignore tables with fkey errors\n" + " --exec: Execute generated SQL immediately\n" + " See file tool/genfkey.README in the source \n" + " distribution for further information.\n" +#endif ".header(s) ON|OFF Turn display of headers on or off\n" ".help Show this message\n" ".import FILE TABLE Import data from FILE into TABLE\n" @@ -879,11 +1900,15 @@ ".prompt MAIN CONTINUE Replace the standard prompts\n" ".quit Exit this program\n" ".read FILENAME Execute SQL in FILENAME\n" + ".restore ?DB? FILE Restore content of DB (default \"main\") from FILE\n" ".schema ?TABLE? Show the CREATE statements\n" ".separator STRING Change separator used by output mode and .import\n" ".show Show the current values for various settings\n" ".tables ?PATTERN? List names of tables matching a LIKE pattern\n" ".timeout MS Try opening locked tables for MS milliseconds\n" +#if HAS_TIMER + ".timer ON|OFF Turn the CPU timer measurement on or off\n" +#endif ".width NUM NUM ... Set column widths for \"column\" mode\n" ; @@ -898,9 +1923,11 @@ if( p->db==0 ){ sqlite3_open(p->zDbFilename, &p->db); db = p->db; - sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0, - shellstaticFunc, 0, 0); - if( SQLITE_OK!=sqlite3_errcode(db) ){ + if( db && sqlite3_errcode(db)==SQLITE_OK ){ + sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0, + shellstaticFunc, 0, 0); + } + if( db==0 || SQLITE_OK!=sqlite3_errcode(db) ){ fprintf(stderr,"Unable to open database \"%s\": %s\n", p->zDbFilename, sqlite3_errmsg(db)); exit(1); @@ -921,7 +1948,8 @@ ** \\ -> backslash */ static void resolve_backslashes(char *z){ - int i, j, c; + int i, j; + char c; for(i=j=0; (c = z[i])!=0; i++, j++){ if( c=='\\' ){ c = z[++i]; @@ -955,7 +1983,7 @@ int val = atoi(zArg); int j; for(j=0; zArg[j]; j++){ - zArg[j] = tolower(zArg[j]); + zArg[j] = (char)tolower(zArg[j]); } if( strcmp(zArg,"on")==0 ){ val = 1; @@ -1002,9 +2030,45 @@ /* Process the input line. */ if( nArg==0 ) return rc; - n = strlen(azArg[0]); + n = strlen30(azArg[0]); c = azArg[0][0]; - if( c=='b' && n>1 && strncmp(azArg[0], "bail", n)==0 && nArg>1 ){ + if( c=='b' && n>=3 && strncmp(azArg[0], "backup", n)==0 && nArg>1 ){ + const char *zDestFile; + const char *zDb; + sqlite3 *pDest; + sqlite3_backup *pBackup; + int rc; + if( nArg==2 ){ + zDestFile = azArg[1]; + zDb = "main"; + }else{ + zDestFile = azArg[2]; + zDb = azArg[1]; + } + rc = sqlite3_open(zDestFile, &pDest); + if( rc!=SQLITE_OK ){ + fprintf(stderr, "Error: cannot open %s\n", zDestFile); + sqlite3_close(pDest); + return 1; + } + open_db(p); + pBackup = sqlite3_backup_init(pDest, "main", p->db, zDb); + if( pBackup==0 ){ + fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest)); + sqlite3_close(pDest); + return 1; + } + while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK ){} + sqlite3_backup_finish(pBackup); + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + }else{ + fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest)); + } + sqlite3_close(pDest); + }else + + if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 && nArg>1 ){ bail_on_error = booleanValue(azArg[1]); }else @@ -1031,14 +2095,19 @@ open_db(p); fprintf(p->out, "BEGIN TRANSACTION;\n"); p->writableSchema = 0; + sqlite3_exec(p->db, "PRAGMA writable_schema=ON", 0, 0, 0); if( nArg==1 ){ run_schema_dump_query(p, "SELECT name, type, sql FROM sqlite_master " - "WHERE sql NOT NULL AND type=='table'", 0 + "WHERE sql NOT NULL AND type=='table' AND name!='sqlite_sequence'", 0 + ); + run_schema_dump_query(p, + "SELECT name, type, sql FROM sqlite_master " + "WHERE name=='sqlite_sequence'", 0 ); run_table_dump_query(p->out, p->db, "SELECT sql FROM sqlite_master " - "WHERE sql NOT NULL AND type IN ('index','trigger','view')" + "WHERE sql NOT NULL AND type IN ('index','trigger','view')", 0 ); }else{ int i; @@ -1052,7 +2121,7 @@ "SELECT sql FROM sqlite_master " "WHERE sql NOT NULL" " AND type IN ('index','trigger','view')" - " AND tbl_name LIKE shellstatic()" + " AND tbl_name LIKE shellstatic()", 0 ); zShellStatic = 0; } @@ -1061,6 +2130,7 @@ fprintf(p->out, "PRAGMA writable_schema=OFF;\n"); p->writableSchema = 0; } + sqlite3_exec(p->db, "PRAGMA writable_schema=OFF", 0, 0, 0); if( zErrMsg ){ fprintf(stderr,"Error: %s\n", zErrMsg); sqlite3_free(zErrMsg); @@ -1093,14 +2163,17 @@ ** did an .explain followed by a .width, .mode or .header ** command. */ - p->mode = MODE_Column; + p->mode = MODE_Explain; p->showHeader = 1; memset(p->colWidth,0,ArraySize(p->colWidth)); - p->colWidth[0] = 4; - p->colWidth[1] = 14; - p->colWidth[2] = 10; - p->colWidth[3] = 10; - p->colWidth[4] = 33; + p->colWidth[0] = 4; /* addr */ + p->colWidth[1] = 13; /* opcode */ + p->colWidth[2] = 4; /* P1 */ + p->colWidth[3] = 4; /* P2 */ + p->colWidth[4] = 4; /* P3 */ + p->colWidth[5] = 13; /* P4 */ + p->colWidth[6] = 2; /* P5 */ + p->colWidth[7] = 13; /* Comment */ }else if (p->explainPrev.valid) { p->explainPrev.valid = 0; p->mode = p->explainPrev.mode; @@ -1109,13 +2182,24 @@ } }else +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_SUBQUERY) + if( c=='g' && strncmp(azArg[0], "genfkey", n)==0 ){ + GenfkeyCmd cmd; + if( 0==genfkeyParseArgs(&cmd, &azArg[1], nArg-1) ){ + cmd.db = p->db; + cmd.pCb = p; + genfkey_create_triggers(p->db, "main", (void *)&cmd, genfkeyCmdCb); + } + }else +#endif + if( c=='h' && (strncmp(azArg[0], "header", n)==0 || strncmp(azArg[0], "headers", n)==0 )&& nArg>1 ){ p->showHeader = booleanValue(azArg[1]); }else if( c=='h' && strncmp(azArg[0], "help", n)==0 ){ - fprintf(stderr,zHelp); + fprintf(stderr,"%s",zHelp); }else if( c=='i' && strncmp(azArg[0], "import", n)==0 && nArg>=3 ){ @@ -1135,14 +2219,14 @@ int lineno = 0; /* Line number of input file */ open_db(p); - nSep = strlen(p->separator); + nSep = strlen30(p->separator); if( nSep==0 ){ fprintf(stderr, "non-null separator required for import\n"); return 0; } zSql = sqlite3_mprintf("SELECT * FROM '%q'", zTable); if( zSql==0 ) return 0; - nByte = strlen(zSql); + nByte = strlen30(zSql); rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); if( rc ){ @@ -1157,7 +2241,7 @@ zSql = malloc( nByte + 20 + nCol*2 ); if( zSql==0 ) return 0; sqlite3_snprintf(nByte+20, zSql, "INSERT INTO '%q' VALUES(?", zTable); - j = strlen(zSql); + j = strlen30(zSql); for(i=1; i=2 ){ - int n2 = strlen(azArg[1]); + int n2 = strlen30(azArg[1]); if( strncmp(azArg[1],"line",n2)==0 || strncmp(azArg[1],"lines",n2)==0 ){ @@ -1359,7 +2444,7 @@ rc = 2; }else - if( c=='r' && strncmp(azArg[0], "read", n)==0 && nArg==2 ){ + if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 && nArg==2 ){ FILE *alt = fopen(azArg[1], "rb"); if( alt==0 ){ fprintf(stderr,"can't open \"%s\"\n", azArg[1]); @@ -1369,6 +2454,52 @@ } }else + if( c=='r' && n>=3 && strncmp(azArg[0], "restore", n)==0 && nArg>1 ){ + const char *zSrcFile; + const char *zDb; + sqlite3 *pSrc; + sqlite3_backup *pBackup; + int rc; + int nTimeout = 0; + + if( nArg==2 ){ + zSrcFile = azArg[1]; + zDb = "main"; + }else{ + zSrcFile = azArg[2]; + zDb = azArg[1]; + } + rc = sqlite3_open(zSrcFile, &pSrc); + if( rc!=SQLITE_OK ){ + fprintf(stderr, "Error: cannot open %s\n", zSrcFile); + sqlite3_close(pSrc); + return 1; + } + open_db(p); + pBackup = sqlite3_backup_init(p->db, zDb, pSrc, "main"); + if( pBackup==0 ){ + fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db)); + sqlite3_close(pSrc); + return 1; + } + while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK + || rc==SQLITE_BUSY ){ + if( rc==SQLITE_BUSY ){ + if( nTimeout++ >= 3 ) break; + sqlite3_sleep(100); + } + } + sqlite3_backup_finish(pBackup); + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + }else if( rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){ + fprintf(stderr, "source database is busy\n"); + }else{ + fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db)); + } + sqlite3_close(pSrc); + }else + if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){ struct callback_data data; char *zErrMsg = 0; @@ -1378,7 +2509,7 @@ data.mode = MODE_Semi; if( nArg>1 ){ int i; - for(i=0; azArg[1][i]; i++) azArg[1][i] = tolower(azArg[1][i]); + for(i=0; azArg[1][i]; i++) azArg[1][i] = (char)tolower(azArg[1][i]); if( strcmp(azArg[1],"sqlite_master")==0 ){ char *new_argv[2], *new_colv[2]; new_argv[0] = "CREATE TABLE sqlite_master (\n" @@ -1409,8 +2540,9 @@ zShellStatic = azArg[1]; sqlite3_exec(p->db, "SELECT sql FROM " - " (SELECT * FROM sqlite_master UNION ALL" - " SELECT * FROM sqlite_temp_master) " + " (SELECT sql sql, type type, tbl_name tbl_name, name name" + " FROM sqlite_master UNION ALL" + " SELECT sql, type, tbl_name, name FROM sqlite_temp_master) " "WHERE tbl_name LIKE shellstatic() AND type!='meta' AND sql NOTNULL " "ORDER BY substr(type,2,1), name", callback, &data, &zErrMsg); @@ -1419,8 +2551,9 @@ }else{ sqlite3_exec(p->db, "SELECT sql FROM " - " (SELECT * FROM sqlite_master UNION ALL" - " SELECT * FROM sqlite_temp_master) " + " (SELECT sql sql, type type, tbl_name tbl_name, name name" + " FROM sqlite_master UNION ALL" + " SELECT sql, type, tbl_name, name FROM sqlite_temp_master) " "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%'" "ORDER BY substr(type,2,1), name", callback, &data, &zErrMsg @@ -1447,7 +2580,7 @@ output_c_string(p->out, p->nullvalue); fprintf(p->out, "\n"); fprintf(p->out,"%9.9s: %s\n","output", - strlen(p->outfile) ? p->outfile : "stdout"); + strlen30(p->outfile) ? p->outfile : "stdout"); fprintf(p->out,"%9.9s: ", "separator"); output_c_string(p->out, p->separator); fprintf(p->out, "\n"); @@ -1496,7 +2629,7 @@ int nPrintCol, nPrintRow; for(i=1; i<=nRow; i++){ if( azResult[i]==0 ) continue; - len = strlen(azResult[i]); + len = strlen30(azResult[i]); if( len>maxlen ) maxlen = len; } nPrintCol = 80/(maxlen+2); @@ -1515,10 +2648,16 @@ sqlite3_free_table(azResult); }else - if( c=='t' && n>1 && strncmp(azArg[0], "timeout", n)==0 && nArg>=2 ){ + if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 && nArg>=2 ){ open_db(p); sqlite3_busy_timeout(p->db, atoi(azArg[1])); }else + +#if HAS_TIMER + if( c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 && nArg>1 ){ + enableTimer = booleanValue(azArg[1]); + }else +#endif if( c=='w' && strncmp(azArg[0], "width", n)==0 ){ int j; @@ -1528,6 +2667,7 @@ } }else + { fprintf(stderr, "unknown command or invalid arguments: " " \"%s\". Enter \".help\" for help\n", azArg[0]); @@ -1537,12 +2677,13 @@ } /* -** Return TRUE if the last non-whitespace character in z[] is a semicolon. -** z[] is N characters long. +** Return TRUE if a semicolon occurs anywhere in the first N characters +** of string z[]. */ -static int _ends_with_semicolon(const char *z, int N){ - while( N>0 && isspace((unsigned char)z[N-1]) ){ N--; } - return N>0 && z[N-1]==';'; +static int _contains_semicolon(const char *z, int N){ + int i; + for(i=0; icnt = 0; open_db(p); + BEGIN_TIMER; rc = sqlite3_exec(p->db, zSql, callback, p, &zErrMsg); + END_TIMER; if( rc || zErrMsg ){ char zPrefix[100]; if( in!=0 || !stdin_is_interactive ){ @@ -1680,7 +2842,7 @@ } } if( zSql ){ - if( !_all_whitespace(zSql) ) printf("Incomplete SQL: %s\n", zSql); + if( !_all_whitespace(zSql) ) fprintf(stderr, "Incomplete SQL: %s\n", zSql); free(zSql); } free(zLine); @@ -1696,7 +2858,7 @@ static char *find_home_dir(void){ char *home_dir = NULL; -#if !defined(_WIN32) && !defined(WIN32) && !defined(__MACOS__) && !defined(__OS2__) +#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) && !defined(_WIN32_WCE) && !defined(__RTP__) && !defined(_WRS_KERNEL) struct passwd *pwent; uid_t uid = getuid(); if( (pwent=getpwuid(uid)) != NULL) { @@ -1704,10 +2866,11 @@ } #endif -#ifdef __MACOS__ - char home_path[_MAX_PATH+1]; - home_dir = getcwd(home_path, _MAX_PATH); -#endif +#if defined(_WIN32_WCE) + /* Windows CE (arm-wince-mingw32ce-gcc) does not provide getenv() + */ + home_dir = strdup("/"); +#else #if defined(_WIN32) || defined(WIN32) || defined(__OS2__) if (!home_dir) { @@ -1726,7 +2889,7 @@ zDrive = getenv("HOMEDRIVE"); zPath = getenv("HOMEPATH"); if( zDrive && zPath ){ - n = strlen(zDrive) + strlen(zPath) + 1; + n = strlen30(zDrive) + strlen30(zPath) + 1; home_dir = malloc( n ); if( home_dir==0 ) return 0; sqlite3_snprintf(n, home_dir, "%s%s", zDrive, zPath); @@ -1736,8 +2899,10 @@ } #endif +#endif /* !_WIN32_WCE */ + if( home_dir ){ - int n = strlen(home_dir) + 1; + int n = strlen30(home_dir) + 1; char *z = malloc( n ); if( z ) memcpy(z, home_dir, n); home_dir = z; @@ -1763,10 +2928,12 @@ if (sqliterc == NULL) { home_dir = find_home_dir(); if( home_dir==0 ){ +#if !defined(__RTP__) && !defined(_WRS_KERNEL) fprintf(stderr,"%s: cannot locate your home directory!\n", Argv0); +#endif return; } - nBuf = strlen(home_dir) + 16; + nBuf = strlen30(home_dir) + 16; zBuf = malloc( nBuf ); if( zBuf==0 ){ fprintf(stderr,"%s: out of memory!\n", Argv0); @@ -1840,10 +3007,6 @@ int i; int rc = 0; -#ifdef __MACOS__ - argc = ccommand(&argv); -#endif - Argv0 = argv[0]; main_init(&data); stdin_is_interactive = isatty(0); @@ -1872,7 +3035,11 @@ } } if( iaggregate_context +#ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_aggregate_count sqlite3_api->aggregate_count +#endif #define sqlite3_bind_blob sqlite3_api->bind_blob #define sqlite3_bind_double sqlite3_api->bind_double #define sqlite3_bind_int sqlite3_api->bind_int @@ -225,14 +266,18 @@ #define sqlite3_errmsg sqlite3_api->errmsg #define sqlite3_errmsg16 sqlite3_api->errmsg16 #define sqlite3_exec sqlite3_api->exec +#ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_expired sqlite3_api->expired +#endif #define sqlite3_finalize sqlite3_api->finalize #define sqlite3_free sqlite3_api->free #define sqlite3_free_table sqlite3_api->free_table #define sqlite3_get_autocommit sqlite3_api->get_autocommit #define sqlite3_get_auxdata sqlite3_api->get_auxdata #define sqlite3_get_table sqlite3_api->get_table +#ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_global_recover sqlite3_api->global_recover +#endif #define sqlite3_interrupt sqlite3_api->interruptx #define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid #define sqlite3_libversion sqlite3_api->libversion @@ -270,7 +315,9 @@ #define sqlite3_thread_cleanup sqlite3_api->thread_cleanup #define sqlite3_total_changes sqlite3_api->total_changes #define sqlite3_trace sqlite3_api->trace +#ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_transfer_bindings sqlite3_api->transfer_bindings +#endif #define sqlite3_update_hook sqlite3_api->update_hook #define sqlite3_user_data sqlite3_api->user_data #define sqlite3_value_blob sqlite3_api->value_blob @@ -290,9 +337,44 @@ #define sqlite3_prepare_v2 sqlite3_api->prepare_v2 #define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 #define sqlite3_clear_bindings sqlite3_api->clear_bindings +#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob +#define sqlite3_blob_bytes sqlite3_api->blob_bytes +#define sqlite3_blob_close sqlite3_api->blob_close +#define sqlite3_blob_open sqlite3_api->blob_open +#define sqlite3_blob_read sqlite3_api->blob_read +#define sqlite3_blob_write sqlite3_api->blob_write +#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 +#define sqlite3_file_control sqlite3_api->file_control +#define sqlite3_memory_highwater sqlite3_api->memory_highwater +#define sqlite3_memory_used sqlite3_api->memory_used +#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc +#define sqlite3_mutex_enter sqlite3_api->mutex_enter +#define sqlite3_mutex_free sqlite3_api->mutex_free +#define sqlite3_mutex_leave sqlite3_api->mutex_leave +#define sqlite3_mutex_try sqlite3_api->mutex_try +#define sqlite3_open_v2 sqlite3_api->open_v2 +#define sqlite3_release_memory sqlite3_api->release_memory +#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem +#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig +#define sqlite3_sleep sqlite3_api->sleep +#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit +#define sqlite3_vfs_find sqlite3_api->vfs_find +#define sqlite3_vfs_register sqlite3_api->vfs_register +#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister +#define sqlite3_threadsafe sqlite3_api->xthreadsafe +#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob +#define sqlite3_result_error_code sqlite3_api->result_error_code +#define sqlite3_test_control sqlite3_api->test_control +#define sqlite3_randomness sqlite3_api->randomness +#define sqlite3_context_db_handle sqlite3_api->context_db_handle +#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes +#define sqlite3_limit sqlite3_api->limit +#define sqlite3_next_stmt sqlite3_api->next_stmt +#define sqlite3_sql sqlite3_api->sql +#define sqlite3_status sqlite3_api->status #endif /* SQLITE_CORE */ -#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api; +#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api = 0; #define SQLITE_EXTENSION_INIT2(v) sqlite3_api = v; #endif /* _SQLITE3EXT_H_ */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/sqlite.h.in /tmp/3ARg2Grji7/sqlite3-3.6.16/src/sqlite.h.in --- sqlite3-3.4.2/src/sqlite.h.in 2007-08-08 13:11:21.000000000 +0100 +++ sqlite3-3.6.16/src/sqlite.h.in 2009-06-25 12:45:58.000000000 +0100 @@ -17,7 +17,7 @@ ** ** Some of the definitions that are in this file are marked as ** "experimental". Experimental interfaces are normally new -** features recently added to SQLite. We do not anticipate changes +** features recently added to SQLite. We do not anticipate changes ** to experimental interfaces but reserve to make minor changes if ** experience from use "in the wild" suggest such changes are prudent. ** @@ -30,7 +30,7 @@ ** the version number) and changes its name to "sqlite3.h" as ** part of the build process. ** -** @(#) $Id: sqlite.h.in,v 1.219 2007/08/08 12:11:21 drh Exp $ +** @(#) $Id: sqlite.h.in,v 1.458 2009/06/19 22:50:31 drh Exp $ */ #ifndef _SQLITE3_H_ #define _SQLITE3_H_ @@ -43,6 +43,7 @@ extern "C" { #endif + /* ** Add the ability to override 'extern' */ @@ -51,8 +52,23 @@ #endif /* -** Make sure these symbols where not defined by some previous header -** file. +** These no-op macros are used in front of interfaces to mark those +** interfaces as either deprecated or experimental. New applications +** should not use deprecated intrfaces - they are support for backwards +** compatibility only. Application writers should be aware that +** experimental interfaces are subject to change in point releases. +** +** These macros used to resolve to various kinds of compiler magic that +** would generate warning messages when they were used. But that +** compiler magic ended up generating such a flurry of bug reports +** that we have taken it all out and gone back to using simple +** noop macros. +*/ +#define SQLITE_DEPRECATED +#define SQLITE_EXPERIMENTAL + +/* +** Ensure these symbols were not defined by some previous header file. */ #ifdef SQLITE_VERSION # undef SQLITE_VERSION @@ -62,77 +78,113 @@ #endif /* -** CAPI3REF: Compile-Time Library Version Numbers +** CAPI3REF: Compile-Time Library Version Numbers {H10010} ** -** The version of the SQLite library is contained in the sqlite3.h -** header file in a #define named SQLITE_VERSION. The SQLITE_VERSION -** macro resolves to a string constant. -** -** The format of the version string is "X.Y.Z", where -** X is the major version number, Y is the minor version number and Z -** is the release number. The X.Y.Z might be followed by "alpha" or "beta". -** For example "3.1.1beta". -** -** The X value is always 3 in SQLite. The X value only changes when -** backwards compatibility is broken and we intend to never break -** backwards compatibility. The Y value only changes when +** The SQLITE_VERSION and SQLITE_VERSION_NUMBER #defines in +** the sqlite3.h file specify the version of SQLite with which +** that header file is associated. +** +** The "version" of SQLite is a string of the form "X.Y.Z". +** The phrase "alpha" or "beta" might be appended after the Z. +** The X value is major version number always 3 in SQLite3. +** The X value only changes when backwards compatibility is +** broken and we intend to never break backwards compatibility. +** The Y value is the minor version number and only changes when ** there are major feature enhancements that are forwards compatible -** but not backwards compatible. The Z value is incremented with -** each release but resets back to 0 when Y is incremented. -** -** The SQLITE_VERSION_NUMBER is an integer with the value -** (X*1000000 + Y*1000 + Z). For example, for version "3.1.1beta", -** SQLITE_VERSION_NUMBER is set to 3001001. To detect if they are using -** version 3.1.1 or greater at compile time, programs may use the test -** (SQLITE_VERSION_NUMBER>=3001001). +** but not backwards compatible. +** The Z value is the release number and is incremented with +** each release but resets back to 0 whenever Y is incremented. ** ** See also: [sqlite3_libversion()] and [sqlite3_libversion_number()]. +** +** Requirements: [H10011] [H10014] */ #define SQLITE_VERSION "--VERS--" -#define SQLITE_VERSION_NUMBER --VERSION-NUMBER-- +#define SQLITE_VERSION_NUMBER --VERSION-NUMBER-- /* -** CAPI3REF: Run-Time Library Version Numbers +** CAPI3REF: Run-Time Library Version Numbers {H10020} +** KEYWORDS: sqlite3_version ** -** These routines return values equivalent to the header constants -** [SQLITE_VERSION] and [SQLITE_VERSION_NUMBER]. The values returned -** by this routines should only be different from the header values -** if you compile your program using an sqlite3.h header from a -** different version of SQLite that the version of the library you -** link against. -** -** The sqlite3_version[] string constant contains the text of the -** [SQLITE_VERSION] string. The sqlite3_libversion() function returns -** a poiner to the sqlite3_version[] string constant. The function -** is provided for DLL users who can only access functions and not +** These features provide the same information as the [SQLITE_VERSION] +** and [SQLITE_VERSION_NUMBER] #defines in the header, but are associated +** with the library instead of the header file. Cautious programmers might +** include a check in their application to verify that +** sqlite3_libversion_number() always returns the value +** [SQLITE_VERSION_NUMBER]. +** +** The sqlite3_libversion() function returns the same information as is +** in the sqlite3_version[] string constant. The function is provided +** for use in DLLs since DLL users usually do not have direct access to string ** constants within the DLL. +** +** Requirements: [H10021] [H10022] [H10023] */ SQLITE_EXTERN const char sqlite3_version[]; const char *sqlite3_libversion(void); int sqlite3_libversion_number(void); /* -** CAPI3REF: Database Connection Handle +** CAPI3REF: Test To See If The Library Is Threadsafe {H10100} ** -** Each open SQLite database is represented by pointer to an instance of the -** opaque structure named "sqlite3". It is useful to think of an sqlite3 -** pointer as an object. The [sqlite3_open] interface is its constructor -** and [sqlite3_close] is its destructor. There are many other interfaces -** (such as [sqlite3_prepare_v2], [sqlite3_create_function], and -** [sqlite3_busy_timeout] to name but three) that are methods on this -** object. +** SQLite can be compiled with or without mutexes. When +** the [SQLITE_THREADSAFE] C preprocessor macro 1 or 2, mutexes +** are enabled and SQLite is threadsafe. When the +** [SQLITE_THREADSAFE] macro is 0, +** the mutexes are omitted. Without the mutexes, it is not safe +** to use SQLite concurrently from more than one thread. +** +** Enabling mutexes incurs a measurable performance penalty. +** So if speed is of utmost importance, it makes sense to disable +** the mutexes. But for maximum safety, mutexes should be enabled. +** The default behavior is for mutexes to be enabled. +** +** This interface can be used by a program to make sure that the +** version of SQLite that it is linking against was compiled with +** the desired setting of the [SQLITE_THREADSAFE] macro. +** +** This interface only reports on the compile-time mutex setting +** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with +** SQLITE_THREADSAFE=1 then mutexes are enabled by default but +** can be fully or partially disabled using a call to [sqlite3_config()] +** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], +** or [SQLITE_CONFIG_MUTEX]. The return value of this function shows +** only the default compile-time setting, not any run-time changes +** to that setting. +** +** See the [threading mode] documentation for additional information. +** +** Requirements: [H10101] [H10102] +*/ +int sqlite3_threadsafe(void); + +/* +** CAPI3REF: Database Connection Handle {H12000} +** KEYWORDS: {database connection} {database connections} +** +** Each open SQLite database is represented by a pointer to an instance of +** the opaque structure named "sqlite3". It is useful to think of an sqlite3 +** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and +** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] +** is its destructor. There are many other interfaces (such as +** [sqlite3_prepare_v2()], [sqlite3_create_function()], and +** [sqlite3_busy_timeout()] to name but three) that are methods on an +** sqlite3 object. */ typedef struct sqlite3 sqlite3; - /* -** CAPI3REF: 64-Bit Integer Types +** CAPI3REF: 64-Bit Integer Types {H10200} +** KEYWORDS: sqlite_int64 sqlite_uint64 +** +** Because there is no cross-platform way to specify 64-bit integer types +** SQLite includes typedefs for 64-bit signed and unsigned integers. ** -** Some compilers do not support the "long long" datatype. So we have -** to do compiler-specific typedefs for 64-bit signed and unsigned integers. +** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. +** The sqlite_int64 and sqlite_uint64 types are supported for backwards +** compatibility only. ** -** Many SQLite interface functions require a 64-bit integer arguments. -** Those interfaces are declared using this typedef. +** Requirements: [H10201] [H10202] */ #ifdef SQLITE_INT64_TYPE typedef SQLITE_INT64_TYPE sqlite_int64; @@ -144,26 +196,46 @@ typedef long long int sqlite_int64; typedef unsigned long long int sqlite_uint64; #endif +typedef sqlite_int64 sqlite3_int64; +typedef sqlite_uint64 sqlite3_uint64; /* ** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point +** substitute integer for floating-point. */ #ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite_int64 +# define double sqlite3_int64 #endif /* -** CAPI3REF: Closing A Database Connection +** CAPI3REF: Closing A Database Connection {H12010} ** -** Call this function with a pointer to a structure that was previously -** returned from [sqlite3_open()] and the corresponding database will by -** closed. -** -** All SQL statements prepared using [sqlite3_prepare_v2()] or -** [sqlite3_prepare16_v2()] must be destroyed using [sqlite3_finalize()] -** before this routine is called. Otherwise, SQLITE_BUSY is returned and the -** database connection remains open. +** This routine is the destructor for the [sqlite3] object. +** +** Applications should [sqlite3_finalize | finalize] all [prepared statements] +** and [sqlite3_blob_close | close] all [BLOB handles] associated with +** the [sqlite3] object prior to attempting to close the object. +** The [sqlite3_next_stmt()] interface can be used to locate all +** [prepared statements] associated with a [database connection] if desired. +** Typical code might look like this: +** +**
    +** sqlite3_stmt *pStmt;
    +** while( (pStmt = sqlite3_next_stmt(db, 0))!=0 ){
    +**     sqlite3_finalize(pStmt);
    +** }
    +** 
    +** +** If [sqlite3_close()] is invoked while a transaction is open, +** the transaction is automatically rolled back. +** +** The C parameter to [sqlite3_close(C)] must be either a NULL +** pointer or an [sqlite3] object pointer obtained +** from [sqlite3_open()], [sqlite3_open16()], or +** [sqlite3_open_v2()], and not previously closed. +** +** Requirements: +** [H12011] [H12012] [H12013] [H12014] [H12015] [H12019] */ int sqlite3_close(sqlite3 *); @@ -175,76 +247,73 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); /* -** CAPI3REF: One-Step Query Execution Interface -** -** This interface is used to do a one-time evaluatation of zero -** or more SQL statements. UTF-8 text of the SQL statements to -** be evaluted is passed in as the second parameter. The statements -** are prepared one by one using [sqlite3_prepare()], evaluated -** using [sqlite3_step()], then destroyed using [sqlite3_finalize()]. -** -** If one or more of the SQL statements are queries, then -** the callback function specified by the 3rd parameter is -** invoked once for each row of the query result. This callback -** should normally return 0. If the callback returns a non-zero -** value then the query is aborted, all subsequent SQL statements -** are skipped and the sqlite3_exec() function returns the SQLITE_ABORT. -** -** The 4th parameter to this interface is an arbitrary pointer that is -** passed through to the callback function as its first parameter. -** -** The 2nd parameter to the callback function is the number of -** columns in the query result. The 3rd parameter to the callback -** is an array of strings holding the values for each column -** as extracted using [sqlite3_column_text()]. -** The 4th parameter to the callback is an array of strings -** obtained using [sqlite3_column_name()] and holding -** the names of each column. -** -** The callback function may be NULL, even for queries. A NULL -** callback is not an error. It just means that no callback -** will be invoked. -** -** If an error occurs while parsing or evaluating the SQL (but -** not while executing the callback) then an appropriate error -** message is written into memory obtained from [sqlite3_malloc()] and -** *errmsg is made to point to that message. The calling function -** is responsible for freeing the memory that holds the error -** message. Use [sqlite3_free()] for this. If errmsg==NULL, -** then no error message is ever written. -** -** The return value is is SQLITE_OK if there are no errors and -** some other [SQLITE_OK | return code] if there is an error. -** The particular return value depends on the type of error. +** CAPI3REF: One-Step Query Execution Interface {H12100} ** +** The sqlite3_exec() interface is a convenient way of running one or more +** SQL statements without having to write a lot of C code. The UTF-8 encoded +** SQL statements are passed in as the second parameter to sqlite3_exec(). +** The statements are evaluated one by one until either an error or +** an interrupt is encountered, or until they are all done. The 3rd parameter +** is an optional callback that is invoked once for each row of any query +** results produced by the SQL statements. The 5th parameter tells where +** to write any error messages. +** +** The error message passed back through the 5th parameter is held +** in memory obtained from [sqlite3_malloc()]. To avoid a memory leak, +** the calling application should call [sqlite3_free()] on any error +** message returned through the 5th parameter when it has finished using +** the error message. +** +** If the SQL statement in the 2nd parameter is NULL or an empty string +** or a string containing only whitespace and comments, then no SQL +** statements are evaluated and the database is not changed. +** +** The sqlite3_exec() interface is implemented in terms of +** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()]. +** The sqlite3_exec() routine does nothing to the database that cannot be done +** by [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()]. +** +** The first parameter to [sqlite3_exec()] must be an valid and open +** [database connection]. +** +** The database connection must not be closed while +** [sqlite3_exec()] is running. +** +** The calling function should use [sqlite3_free()] to free +** the memory that *errmsg is left pointing at once the error +** message is no longer needed. +** +** The SQL statement text in the 2nd parameter to [sqlite3_exec()] +** must remain unchanged while [sqlite3_exec()] is running. +** +** Requirements: +** [H12101] [H12102] [H12104] [H12105] [H12107] [H12110] [H12113] [H12116] +** [H12119] [H12122] [H12125] [H12131] [H12134] [H12137] [H12138] */ int sqlite3_exec( sqlite3*, /* An open database */ - const char *sql, /* SQL to be evaluted */ + const char *sql, /* SQL to be evaluated */ int (*callback)(void*,int,char**,char**), /* Callback function */ void *, /* 1st argument to callback */ char **errmsg /* Error msg written here */ ); /* -** CAPI3REF: Result Codes -** KEYWORDS: SQLITE_OK +** CAPI3REF: Result Codes {H10210} +** KEYWORDS: SQLITE_OK {error code} {error codes} +** KEYWORDS: {result code} {result codes} ** ** Many SQLite functions return an integer result code from the set shown -** above in order to indicates success or failure. +** here in order to indicates success or failure. ** -** The result codes above are the only ones returned by SQLite in its -** default configuration. However, the [sqlite3_extended_result_codes()] -** API can be used to set a database connectoin to return more detailed -** result codes. +** New error codes may be added in future versions of SQLite. ** ** See also: [SQLITE_IOERR_READ | extended result codes] -** */ #define SQLITE_OK 0 /* Successful result */ /* beginning-of-error-codes */ #define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* NOT USED. Internal logic error in SQLite */ +#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ #define SQLITE_PERM 3 /* Access permission denied */ #define SQLITE_ABORT 4 /* Callback routine requested an abort */ #define SQLITE_BUSY 5 /* The database file is locked */ @@ -261,7 +330,7 @@ #define SQLITE_EMPTY 16 /* Database is empty */ #define SQLITE_SCHEMA 17 /* The database schema changed */ #define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ -#define SQLITE_CONSTRAINT 19 /* Abort due to contraint violation */ +#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ #define SQLITE_MISMATCH 20 /* Data type mismatch */ #define SQLITE_MISUSE 21 /* Library used incorrectly */ #define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ @@ -274,202 +343,1089 @@ /* end-of-error-codes */ /* -** CAPI3REF: Extended Result Codes +** CAPI3REF: Extended Result Codes {H10220} +** KEYWORDS: {extended error code} {extended error codes} +** KEYWORDS: {extended result code} {extended result codes} ** ** In its default configuration, SQLite API routines return one of 26 integer -** result codes described at result-codes. However, experience has shown that -** many of these result codes are too course-grained. They do not provide as -** much information about problems as users might like. In an effort to +** [SQLITE_OK | result codes]. However, experience has shown that many of +** these result codes are too coarse-grained. They do not provide as +** much information about problems as programmers might like. In an effort to ** address this, newer versions of SQLite (version 3.3.8 and later) include ** support for additional result codes that provide more detailed information -** about errors. The extended result codes are enabled (or disabled) for -** each database -** connection using the [sqlite3_extended_result_codes()] API. -** -** Some of the available extended result codes are listed above. -** We expect the number of extended result codes will be expand +** about errors. The extended result codes are enabled or disabled +** on a per database connection basis using the +** [sqlite3_extended_result_codes()] API. +** +** Some of the available extended result codes are listed here. +** One may expect the number of extended result codes will be expand ** over time. Software that uses extended result codes should expect ** to see new result codes in future releases of SQLite. -** -** The symbolic name for an extended result code always contains a related -** primary result code as a prefix. Primary result codes contain a single -** "_" character. Extended result codes contain two or more "_" characters. -** The numeric value of an extended result code can be converted to its -** corresponding primary result code by masking off the lower 8 bytes. ** ** The SQLITE_OK result code will never be extended. It will always ** be exactly zero. */ -#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) -#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) -#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) -#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) -#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) -#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) -#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) -#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) -#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) -#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) -#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) - -/* -** CAPI3REF: Enable Or Disable Extended Result Codes -** -** This routine enables or disables the -** [SQLITE_IOERR_READ | extended result codes] feature. -** By default, SQLite API routines return one of only 26 integer -** [SQLITE_OK | result codes]. When extended result codes -** are enabled by this routine, the repetoire of result codes can be -** much larger and can (hopefully) provide more detailed information -** about the cause of an error. -** -** The second argument is a boolean value that turns extended result -** codes on and off. Extended result codes are off by default for -** backwards compatibility with older versions of SQLite. +#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) +#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) +#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) +#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) +#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) +#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) +#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) +#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) +#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) +#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) +#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) +#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) +#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) +#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) +#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) +#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) +#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) +#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8) ) + +/* +** CAPI3REF: Flags For File Open Operations {H10230} +** +** These bit values are intended for use in the +** 3rd parameter to the [sqlite3_open_v2()] interface and +** in the 4th parameter to the xOpen method of the +** [sqlite3_vfs] object. +*/ +#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ +#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ +#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ +#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ +#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ +#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ +#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ +#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ +#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ +#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ + +/* +** CAPI3REF: Device Characteristics {H10240} +** +** The xDeviceCapabilities method of the [sqlite3_io_methods] +** object returns an integer which is a vector of the these +** bit values expressing I/O characteristics of the mass storage +** device that holds the file that the [sqlite3_io_methods] +** refers to. +** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +*/ +#define SQLITE_IOCAP_ATOMIC 0x00000001 +#define SQLITE_IOCAP_ATOMIC512 0x00000002 +#define SQLITE_IOCAP_ATOMIC1K 0x00000004 +#define SQLITE_IOCAP_ATOMIC2K 0x00000008 +#define SQLITE_IOCAP_ATOMIC4K 0x00000010 +#define SQLITE_IOCAP_ATOMIC8K 0x00000020 +#define SQLITE_IOCAP_ATOMIC16K 0x00000040 +#define SQLITE_IOCAP_ATOMIC32K 0x00000080 +#define SQLITE_IOCAP_ATOMIC64K 0x00000100 +#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 +#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 + +/* +** CAPI3REF: File Locking Levels {H10250} +** +** SQLite uses one of these integer values as the second +** argument to calls it makes to the xLock() and xUnlock() methods +** of an [sqlite3_io_methods] object. +*/ +#define SQLITE_LOCK_NONE 0 +#define SQLITE_LOCK_SHARED 1 +#define SQLITE_LOCK_RESERVED 2 +#define SQLITE_LOCK_PENDING 3 +#define SQLITE_LOCK_EXCLUSIVE 4 + +/* +** CAPI3REF: Synchronization Type Flags {H10260} +** +** When SQLite invokes the xSync() method of an +** [sqlite3_io_methods] object it uses a combination of +** these integer values as the second argument. +** +** When the SQLITE_SYNC_DATAONLY flag is used, it means that the +** sync operation only needs to flush data to mass storage. Inode +** information need not be flushed. If the lower four bits of the flag +** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. +** If the lower four bits equal SQLITE_SYNC_FULL, that means +** to use Mac OS X style fullsync instead of fsync(). +*/ +#define SQLITE_SYNC_NORMAL 0x00002 +#define SQLITE_SYNC_FULL 0x00003 +#define SQLITE_SYNC_DATAONLY 0x00010 + +/* +** CAPI3REF: OS Interface Open File Handle {H11110} +** +** An [sqlite3_file] object represents an open file in the OS +** interface layer. Individual OS interface implementations will +** want to subclass this object by appending additional fields +** for their own use. The pMethods entry is a pointer to an +** [sqlite3_io_methods] object that defines methods for performing +** I/O operations on the open file. +*/ +typedef struct sqlite3_file sqlite3_file; +struct sqlite3_file { + const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ +}; + +/* +** CAPI3REF: OS Interface File Virtual Methods Object {H11120} +** +** Every file opened by the [sqlite3_vfs] xOpen method populates an +** [sqlite3_file] object (or, more commonly, a subclass of the +** [sqlite3_file] object) with a pointer to an instance of this object. +** This object defines the methods used to perform various operations +** against the open file represented by the [sqlite3_file] object. +** +** If the xOpen method sets the sqlite3_file.pMethods element +** to a non-NULL pointer, then the sqlite3_io_methods.xClose method +** may be invoked even if the xOpen reported that it failed. The +** only way to prevent a call to xClose following a failed xOpen +** is for the xOpen to set the sqlite3_file.pMethods element to NULL. +** +** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or +** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). +** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] +** flag may be ORed in to indicate that only the data of the file +** and not its inode needs to be synced. +** +** The integer values to xLock() and xUnlock() are one of +**
      +**
    • [SQLITE_LOCK_NONE], +**
    • [SQLITE_LOCK_SHARED], +**
    • [SQLITE_LOCK_RESERVED], +**
    • [SQLITE_LOCK_PENDING], or +**
    • [SQLITE_LOCK_EXCLUSIVE]. +**
    +** xLock() increases the lock. xUnlock() decreases the lock. +** The xCheckReservedLock() method checks whether any database connection, +** either in this process or in some other process, is holding a RESERVED, +** PENDING, or EXCLUSIVE lock on the file. It returns true +** if such a lock exists and false otherwise. +** +** The xFileControl() method is a generic interface that allows custom +** VFS implementations to directly control an open file using the +** [sqlite3_file_control()] interface. The second "op" argument is an +** integer opcode. The third argument is a generic pointer intended to +** point to a structure that may contain arguments or space in which to +** write return values. Potential uses for xFileControl() might be +** functions to enable blocking locks with timeouts, to change the +** locking strategy (for example to use dot-file locks), to inquire +** about the status of a lock, or to break stale locks. The SQLite +** core reserves all opcodes less than 100 for its own use. +** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. +** Applications that define a custom xFileControl method should use opcodes +** greater than 100 to avoid conflicts. +** +** The xSectorSize() method returns the sector size of the +** device that underlies the file. The sector size is the +** minimum write that can be performed without disturbing +** other bytes in the file. The xDeviceCharacteristics() +** method returns a bit vector describing behaviors of the +** underlying device: +** +**
      +**
    • [SQLITE_IOCAP_ATOMIC] +**
    • [SQLITE_IOCAP_ATOMIC512] +**
    • [SQLITE_IOCAP_ATOMIC1K] +**
    • [SQLITE_IOCAP_ATOMIC2K] +**
    • [SQLITE_IOCAP_ATOMIC4K] +**
    • [SQLITE_IOCAP_ATOMIC8K] +**
    • [SQLITE_IOCAP_ATOMIC16K] +**
    • [SQLITE_IOCAP_ATOMIC32K] +**
    • [SQLITE_IOCAP_ATOMIC64K] +**
    • [SQLITE_IOCAP_SAFE_APPEND] +**
    • [SQLITE_IOCAP_SEQUENTIAL] +**
    +** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +** +** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill +** in the unread portions of the buffer with zeros. A VFS that +** fails to zero-fill short reads might seem to work. However, +** failure to zero-fill short reads will eventually lead to +** database corruption. */ -int sqlite3_extended_result_codes(sqlite3*, int onoff); +typedef struct sqlite3_io_methods sqlite3_io_methods; +struct sqlite3_io_methods { + int iVersion; + int (*xClose)(sqlite3_file*); + int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); + int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); + int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); + int (*xSync)(sqlite3_file*, int flags); + int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); + int (*xLock)(sqlite3_file*, int); + int (*xUnlock)(sqlite3_file*, int); + int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); + int (*xFileControl)(sqlite3_file*, int op, void *pArg); + int (*xSectorSize)(sqlite3_file*); + int (*xDeviceCharacteristics)(sqlite3_file*); + /* Additional methods may be added in future releases */ +}; /* -** CAPI3REF: Last Insert Rowid +** CAPI3REF: Standard File Control Opcodes {H11310} +** +** These integer constants are opcodes for the xFileControl method +** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] +** interface. +** +** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This +** opcode causes the xFileControl method to write the current state of +** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], +** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) +** into an integer that the pArg argument points to. This capability +** is used during testing and only needs to be supported when SQLITE_TEST +** is defined. +*/ +#define SQLITE_FCNTL_LOCKSTATE 1 +#define SQLITE_GET_LOCKPROXYFILE 2 +#define SQLITE_SET_LOCKPROXYFILE 3 +#define SQLITE_LAST_ERRNO 4 + +/* +** CAPI3REF: Mutex Handle {H17110} +** +** The mutex module within SQLite defines [sqlite3_mutex] to be an +** abstract type for a mutex object. The SQLite core never looks +** at the internal representation of an [sqlite3_mutex]. It only +** deals with pointers to the [sqlite3_mutex] object. +** +** Mutexes are created using [sqlite3_mutex_alloc()]. +*/ +typedef struct sqlite3_mutex sqlite3_mutex; + +/* +** CAPI3REF: OS Interface Object {H11140} +** +** An instance of the sqlite3_vfs object defines the interface between +** the SQLite core and the underlying operating system. The "vfs" +** in the name of the object stands for "virtual file system". +** +** The value of the iVersion field is initially 1 but may be larger in +** future versions of SQLite. Additional fields may be appended to this +** object when the iVersion value is increased. Note that the structure +** of the sqlite3_vfs object changes in the transaction between +** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not +** modified. +** +** The szOsFile field is the size of the subclassed [sqlite3_file] +** structure used by this VFS. mxPathname is the maximum length of +** a pathname in this VFS. +** +** Registered sqlite3_vfs objects are kept on a linked list formed by +** the pNext pointer. The [sqlite3_vfs_register()] +** and [sqlite3_vfs_unregister()] interfaces manage this list +** in a thread-safe way. The [sqlite3_vfs_find()] interface +** searches the list. Neither the application code nor the VFS +** implementation should use the pNext pointer. +** +** The pNext field is the only field in the sqlite3_vfs +** structure that SQLite will ever modify. SQLite will only access +** or modify this field while holding a particular static mutex. +** The application should never modify anything within the sqlite3_vfs +** object once the object has been registered. +** +** The zName field holds the name of the VFS module. The name must +** be unique across all VFS modules. +** +** SQLite will guarantee that the zFilename parameter to xOpen +** is either a NULL pointer or string obtained +** from xFullPathname(). SQLite further guarantees that +** the string will be valid and unchanged until xClose() is +** called. Because of the previous sentence, +** the [sqlite3_file] can safely store a pointer to the +** filename if it needs to remember the filename for some reason. +** If the zFilename parameter is xOpen is a NULL pointer then xOpen +** must invent its own temporary name for the file. Whenever the +** xFilename parameter is NULL it will also be the case that the +** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. +** +** The flags argument to xOpen() includes all bits set in +** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] +** or [sqlite3_open16()] is used, then flags includes at least +** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. +** If xOpen() opens a file read-only then it sets *pOutFlags to +** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. +** +** SQLite will also add one of the following flags to the xOpen() +** call, depending on the object being opened: ** -** Each entry in an SQLite table has a unique 64-bit signed integer key -** called the "rowid". The rowid is always available as an undeclared -** column named ROWID, OID, or _ROWID_. If the table has a column of -** type INTEGER PRIMARY KEY then that column is another an alias for the -** rowid. +**
      +**
    • [SQLITE_OPEN_MAIN_DB] +**
    • [SQLITE_OPEN_MAIN_JOURNAL] +**
    • [SQLITE_OPEN_TEMP_DB] +**
    • [SQLITE_OPEN_TEMP_JOURNAL] +**
    • [SQLITE_OPEN_TRANSIENT_DB] +**
    • [SQLITE_OPEN_SUBJOURNAL] +**
    • [SQLITE_OPEN_MASTER_JOURNAL] +**
    ** -** This routine returns the rowid of the most recent INSERT into -** the database from the database connection given in the first -** argument. If no inserts have ever occurred on this database -** connection, zero is returned. +** The file I/O implementation can use the object type flags to +** change the way it deals with files. For example, an application +** that does not care about crash recovery or rollback might make +** the open of a journal file a no-op. Writes to this journal would +** also be no-ops, and any attempt to read the journal would return +** SQLITE_IOERR. Or the implementation might recognize that a database +** file will be doing page-aligned sector reads and writes in a random +** order and set up its I/O subsystem accordingly. +** +** SQLite might also add one of the following flags to the xOpen method: +** +**
      +**
    • [SQLITE_OPEN_DELETEONCLOSE] +**
    • [SQLITE_OPEN_EXCLUSIVE] +**
    +** +** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be +** deleted when it is closed. The [SQLITE_OPEN_DELETEONCLOSE] +** will be set for TEMP databases, journals and for subjournals. +** +** The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction +** with the [SQLITE_OPEN_CREATE] flag, which are both directly +** analogous to the O_EXCL and O_CREAT flags of the POSIX open() +** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the +** SQLITE_OPEN_CREATE, is used to indicate that file should always +** be created, and that it is an error if it already exists. +** It is not used to indicate the file should be opened +** for exclusive access. +** +** At least szOsFile bytes of memory are allocated by SQLite +** to hold the [sqlite3_file] structure passed as the third +** argument to xOpen. The xOpen method does not have to +** allocate the structure; it should just fill it in. Note that +** the xOpen method must set the sqlite3_file.pMethods to either +** a valid [sqlite3_io_methods] object or to NULL. xOpen must do +** this even if the open fails. SQLite expects that the sqlite3_file.pMethods +** element will be valid after xOpen returns regardless of the success +** or failure of the xOpen call. +** +** The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] +** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to +** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] +** to test whether a file is at least readable. The file can be a +** directory. +** +** SQLite will always allocate at least mxPathname+1 bytes for the +** output buffer xFullPathname. The exact size of the output buffer +** is also passed as a parameter to both methods. If the output buffer +** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is +** handled as a fatal error by SQLite, vfs implementations should endeavor +** to prevent this by setting mxPathname to a sufficiently large value. +** +** The xRandomness(), xSleep(), and xCurrentTime() interfaces +** are not strictly a part of the filesystem, but they are +** included in the VFS structure for completeness. +** The xRandomness() function attempts to return nBytes bytes +** of good-quality randomness into zOut. The return value is +** the actual number of bytes of randomness obtained. +** The xSleep() method causes the calling thread to sleep for at +** least the number of microseconds given. The xCurrentTime() +** method returns a Julian Day Number for the current date and time. +** +*/ +typedef struct sqlite3_vfs sqlite3_vfs; +struct sqlite3_vfs { + int iVersion; /* Structure version number */ + int szOsFile; /* Size of subclassed sqlite3_file */ + int mxPathname; /* Maximum file pathname length */ + sqlite3_vfs *pNext; /* Next registered VFS */ + const char *zName; /* Name of this virtual file system */ + void *pAppData; /* Pointer to application-specific data */ + int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int flags, int *pOutFlags); + int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); + int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); + int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); + void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); + void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); + void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); + void (*xDlClose)(sqlite3_vfs*, void*); + int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); + int (*xSleep)(sqlite3_vfs*, int microseconds); + int (*xCurrentTime)(sqlite3_vfs*, double*); + int (*xGetLastError)(sqlite3_vfs*, int, char *); + /* New fields may be appended in figure versions. The iVersion + ** value will increment whenever this happens. */ +}; + +/* +** CAPI3REF: Flags for the xAccess VFS method {H11190} +** +** These integer constants can be used as the third parameter to +** the xAccess method of an [sqlite3_vfs] object. {END} They determine +** what kind of permissions the xAccess method is looking for. +** With SQLITE_ACCESS_EXISTS, the xAccess method +** simply checks whether the file exists. +** With SQLITE_ACCESS_READWRITE, the xAccess method +** checks whether the file is both readable and writable. +** With SQLITE_ACCESS_READ, the xAccess method +** checks whether the file is readable. +*/ +#define SQLITE_ACCESS_EXISTS 0 +#define SQLITE_ACCESS_READWRITE 1 +#define SQLITE_ACCESS_READ 2 + +/* +** CAPI3REF: Initialize The SQLite Library {H10130} +** +** The sqlite3_initialize() routine initializes the +** SQLite library. The sqlite3_shutdown() routine +** deallocates any resources that were allocated by sqlite3_initialize(). +** +** A call to sqlite3_initialize() is an "effective" call if it is +** the first time sqlite3_initialize() is invoked during the lifetime of +** the process, or if it is the first time sqlite3_initialize() is invoked +** following a call to sqlite3_shutdown(). Only an effective call +** of sqlite3_initialize() does any initialization. All other calls +** are harmless no-ops. +** +** A call to sqlite3_shutdown() is an "effective" call if it is the first +** call to sqlite3_shutdown() since the last sqlite3_initialize(). Only +** an effective call to sqlite3_shutdown() does any deinitialization. +** All other calls to sqlite3_shutdown() are harmless no-ops. +** +** Among other things, sqlite3_initialize() shall invoke +** sqlite3_os_init(). Similarly, sqlite3_shutdown() +** shall invoke sqlite3_os_end(). +** +** The sqlite3_initialize() routine returns [SQLITE_OK] on success. +** If for some reason, sqlite3_initialize() is unable to initialize +** the library (perhaps it is unable to allocate a needed resource such +** as a mutex) it returns an [error code] other than [SQLITE_OK]. +** +** The sqlite3_initialize() routine is called internally by many other +** SQLite interfaces so that an application usually does not need to +** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] +** calls sqlite3_initialize() so the SQLite library will be automatically +** initialized when [sqlite3_open()] is called if it has not be initialized +** already. However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] +** compile-time option, then the automatic calls to sqlite3_initialize() +** are omitted and the application must call sqlite3_initialize() directly +** prior to using any other SQLite interface. For maximum portability, +** it is recommended that applications always invoke sqlite3_initialize() +** directly prior to using any other SQLite interface. Future releases +** of SQLite may require this. In other words, the behavior exhibited +** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the +** default behavior in some future release of SQLite. +** +** The sqlite3_os_init() routine does operating-system specific +** initialization of the SQLite library. The sqlite3_os_end() +** routine undoes the effect of sqlite3_os_init(). Typical tasks +** performed by these routines include allocation or deallocation +** of static resources, initialization of global variables, +** setting up a default [sqlite3_vfs] module, or setting up +** a default configuration using [sqlite3_config()]. +** +** The application should never invoke either sqlite3_os_init() +** or sqlite3_os_end() directly. The application should only invoke +** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() +** interface is called automatically by sqlite3_initialize() and +** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate +** implementations for sqlite3_os_init() and sqlite3_os_end() +** are built into SQLite when it is compiled for unix, windows, or os/2. +** When built for other platforms (using the [SQLITE_OS_OTHER=1] compile-time +** option) the application must supply a suitable implementation for +** sqlite3_os_init() and sqlite3_os_end(). An application-supplied +** implementation of sqlite3_os_init() or sqlite3_os_end() +** must return [SQLITE_OK] on success and some other [error code] upon +** failure. +*/ +int sqlite3_initialize(void); +int sqlite3_shutdown(void); +int sqlite3_os_init(void); +int sqlite3_os_end(void); + +/* +** CAPI3REF: Configuring The SQLite Library {H14100} +** EXPERIMENTAL +** +** The sqlite3_config() interface is used to make global configuration +** changes to SQLite in order to tune SQLite to the specific needs of +** the application. The default configuration is recommended for most +** applications and so this routine is usually not necessary. It is +** provided to support rare applications with unusual needs. +** +** The sqlite3_config() interface is not threadsafe. The application +** must insure that no other SQLite interfaces are invoked by other +** threads while sqlite3_config() is running. Furthermore, sqlite3_config() +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** Note, however, that sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** +** The first argument to sqlite3_config() is an integer +** [SQLITE_CONFIG_SINGLETHREAD | configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [SQLITE_CONFIG_SINGLETHREAD | configuration option] +** in the first argument. +** +** When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. +** If the option is unknown or SQLite is unable to set the option +** then this routine returns a non-zero [error code]. +** +** Requirements: +** [H14103] [H14106] [H14120] [H14123] [H14126] [H14129] [H14132] [H14135] +** [H14138] [H14141] [H14144] [H14147] [H14150] [H14153] [H14156] [H14159] +** [H14162] [H14165] [H14168] +*/ +SQLITE_EXPERIMENTAL int sqlite3_config(int, ...); + +/* +** CAPI3REF: Configure database connections {H14200} +** EXPERIMENTAL +** +** The sqlite3_db_config() interface is used to make configuration +** changes to a [database connection]. The interface is similar to +** [sqlite3_config()] except that the changes apply to a single +** [database connection] (specified in the first argument). The +** sqlite3_db_config() interface can only be used immediately after +** the database connection is created using [sqlite3_open()], +** [sqlite3_open16()], or [sqlite3_open_v2()]. +** +** The second argument to sqlite3_db_config(D,V,...) is the +** configuration verb - an integer code that indicates what +** aspect of the [database connection] is being configured. +** The only choice for this value is [SQLITE_DBCONFIG_LOOKASIDE]. +** New verbs are likely to be added in future releases of SQLite. +** Additional arguments depend on the verb. +** +** Requirements: +** [H14203] [H14206] [H14209] [H14212] [H14215] +*/ +SQLITE_EXPERIMENTAL int sqlite3_db_config(sqlite3*, int op, ...); + +/* +** CAPI3REF: Memory Allocation Routines {H10155} +** EXPERIMENTAL +** +** An instance of this object defines the interface between SQLite +** and low-level memory allocation routines. +** +** This object is used in only one place in the SQLite interface. +** A pointer to an instance of this object is the argument to +** [sqlite3_config()] when the configuration option is +** [SQLITE_CONFIG_MALLOC]. By creating an instance of this object +** and passing it to [sqlite3_config()] during configuration, an +** application can specify an alternative memory allocation subsystem +** for SQLite to use for all of its dynamic memory needs. +** +** Note that SQLite comes with a built-in memory allocator that is +** perfectly adequate for the overwhelming majority of applications +** and that this object is only useful to a tiny minority of applications +** with specialized memory allocation requirements. This object is +** also used during testing of SQLite in order to specify an alternative +** memory allocator that simulates memory out-of-memory conditions in +** order to verify that SQLite recovers gracefully from such +** conditions. +** +** The xMalloc, xFree, and xRealloc methods must work like the +** malloc(), free(), and realloc() functions from the standard library. +** +** xSize should return the allocated size of a memory allocation +** previously obtained from xMalloc or xRealloc. The allocated size +** is always at least as big as the requested size but may be larger. +** +** The xRoundup method returns what would be the allocated size of +** a memory allocation given a particular requested size. Most memory +** allocators round up memory allocations at least to the next multiple +** of 8. Some allocators round up to a larger multiple or to a power of 2. +** +** The xInit method initializes the memory allocator. (For example, +** it might allocate any require mutexes or initialize internal data +** structures. The xShutdown method is invoked (indirectly) by +** [sqlite3_shutdown()] and should deallocate any resources acquired +** by xInit. The pAppData pointer is used as the only parameter to +** xInit and xShutdown. +*/ +typedef struct sqlite3_mem_methods sqlite3_mem_methods; +struct sqlite3_mem_methods { + void *(*xMalloc)(int); /* Memory allocation function */ + void (*xFree)(void*); /* Free a prior allocation */ + void *(*xRealloc)(void*,int); /* Resize an allocation */ + int (*xSize)(void*); /* Return the size of an allocation */ + int (*xRoundup)(int); /* Round up request size to allocation size */ + int (*xInit)(void*); /* Initialize the memory allocator */ + void (*xShutdown)(void*); /* Deinitialize the memory allocator */ + void *pAppData; /* Argument to xInit() and xShutdown() */ +}; + +/* +** CAPI3REF: Configuration Options {H10160} +** EXPERIMENTAL ** -** If an INSERT occurs within a trigger, then the rowid of the -** inserted row is returned by this routine as long as the trigger -** is running. But once the trigger terminates, the value returned -** by this routine reverts to the last value inserted before the -** trigger fired. +** These constants are the available integer configuration options that +** can be passed as the first argument to the [sqlite3_config()] interface. +** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_config()] to make sure that +** the call worked. The [sqlite3_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
    +**
    SQLITE_CONFIG_SINGLETHREAD
    +**
    There are no arguments to this option. This option disables +** all mutexing and puts SQLite into a mode where it can only be used +** by a single thread.
    +** +**
    SQLITE_CONFIG_MULTITHREAD
    +**
    There are no arguments to this option. This option disables +** mutexing on [database connection] and [prepared statement] objects. +** The application is responsible for serializing access to +** [database connections] and [prepared statements]. But other mutexes +** are enabled so that SQLite will be safe to use in a multi-threaded +** environment as long as no two threads attempt to use the same +** [database connection] at the same time. See the [threading mode] +** documentation for additional information.
    +** +**
    SQLITE_CONFIG_SERIALIZED
    +**
    There are no arguments to this option. This option enables +** all mutexes including the recursive +** mutexes on [database connection] and [prepared statement] objects. +** In this mode (which is the default when SQLite is compiled with +** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access +** to [database connections] and [prepared statements] so that the +** application is free to use the same [database connection] or the +** same [prepared statement] in different threads at the same time. +** See the [threading mode] documentation for additional information.
    +** +**
    SQLITE_CONFIG_MALLOC
    +**
    This option takes a single argument which is a pointer to an +** instance of the [sqlite3_mem_methods] structure. The argument specifies +** alternative low-level memory allocation routines to be used in place of +** the memory allocation routines built into SQLite.
    +** +**
    SQLITE_CONFIG_GETMALLOC
    +**
    This option takes a single argument which is a pointer to an +** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods] +** structure is filled with the currently defined memory allocation routines. +** This option can be used to overload the default memory allocation +** routines with a wrapper that simulations memory allocation failure or +** tracks memory usage, for example.
    +** +**
    SQLITE_CONFIG_MEMSTATUS
    +**
    This option takes single argument of type int, interpreted as a +** boolean, which enables or disables the collection of memory allocation +** statistics. When disabled, the following SQLite interfaces become +** non-operational: +**
      +**
    • [sqlite3_memory_used()] +**
    • [sqlite3_memory_highwater()] +**
    • [sqlite3_soft_heap_limit()] +**
    • [sqlite3_status()] +**
    +**
    +** +**
    SQLITE_CONFIG_SCRATCH
    +**
    This option specifies a static memory buffer that SQLite can use for +** scratch memory. There are three arguments: A pointer an 8-byte +** aligned memory buffer from which the scrach allocations will be +** drawn, the size of each scratch allocation (sz), +** and the maximum number of scratch allocations (N). The sz +** argument must be a multiple of 16. The sz parameter should be a few bytes +** larger than the actual scratch space required due to internal overhead. +** The first argument should pointer to an 8-byte aligned buffer +** of at least sz*N bytes of memory. +** SQLite will use no more than one scratch buffer at once per thread, so +** N should be set to the expected maximum number of threads. The sz +** parameter should be 6 times the size of the largest database page size. +** Scratch buffers are used as part of the btree balance operation. If +** The btree balancer needs additional memory beyond what is provided by +** scratch buffers or if no scratch buffer space is specified, then SQLite +** goes to [sqlite3_malloc()] to obtain the memory it needs.
    +** +**
    SQLITE_CONFIG_PAGECACHE
    +**
    This option specifies a static memory buffer that SQLite can use for +** the database page cache with the default page cache implemenation. +** This configuration should not be used if an application-define page +** cache implementation is loaded using the SQLITE_CONFIG_PCACHE option. +** There are three arguments to this option: A pointer to 8-byte aligned +** memory, the size of each page buffer (sz), and the number of pages (N). +** The sz argument should be the size of the largest database page +** (a power of two between 512 and 32768) plus a little extra for each +** page header. The page header size is 20 to 40 bytes depending on +** the host architecture. It is harmless, apart from the wasted memory, +** to make sz a little too large. The first +** argument should point to an allocation of at least sz*N bytes of memory. +** SQLite will use the memory provided by the first argument to satisfy its +** memory needs for the first N pages that it adds to cache. If additional +** page cache memory is needed beyond what is provided by this option, then +** SQLite goes to [sqlite3_malloc()] for the additional storage space. +** The implementation might use one or more of the N buffers to hold +** memory accounting information. The pointer in the first argument must +** be aligned to an 8-byte boundary or subsequent behavior of SQLite +** will be undefined.
    +** +**
    SQLITE_CONFIG_HEAP
    +**
    This option specifies a static memory buffer that SQLite will use +** for all of its dynamic memory allocation needs beyond those provided +** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE]. +** There are three arguments: An 8-byte aligned pointer to the memory, +** the number of bytes in the memory buffer, and the minimum allocation size. +** If the first pointer (the memory pointer) is NULL, then SQLite reverts +** to using its default memory allocator (the system malloc() implementation), +** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. If the +** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or +** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory +** allocator is engaged to handle all of SQLites memory allocation needs. +** The first pointer (the memory pointer) must be aligned to an 8-byte +** boundary or subsequent behavior of SQLite will be undefined.
    +** +**
    SQLITE_CONFIG_MUTEX
    +**
    This option takes a single argument which is a pointer to an +** instance of the [sqlite3_mutex_methods] structure. The argument specifies +** alternative low-level mutex routines to be used in place +** the mutex routines built into SQLite.
    +** +**
    SQLITE_CONFIG_GETMUTEX
    +**
    This option takes a single argument which is a pointer to an +** instance of the [sqlite3_mutex_methods] structure. The +** [sqlite3_mutex_methods] +** structure is filled with the currently defined mutex routines. +** This option can be used to overload the default mutex allocation +** routines with a wrapper used to track mutex usage for performance +** profiling or testing, for example.
    +** +**
    SQLITE_CONFIG_LOOKASIDE
    +**
    This option takes two arguments that determine the default +** memory allcation lookaside optimization. The first argument is the +** size of each lookaside buffer slot and the second is the number of +** slots allocated to each database connection.
    +** +**
    SQLITE_CONFIG_PCACHE
    +**
    This option takes a single argument which is a pointer to +** an [sqlite3_pcache_methods] object. This object specifies the interface +** to a custom page cache implementation. SQLite makes a copy of the +** object and uses it for page cache memory allocations.
    +** +**
    SQLITE_CONFIG_GETPCACHE
    +**
    This option takes a single argument which is a pointer to an +** [sqlite3_pcache_methods] object. SQLite copies of the current +** page cache implementation into that object.
    +** +**
    +*/ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* sqlite3_pcache_methods* */ +#define SQLITE_CONFIG_GETPCACHE 15 /* sqlite3_pcache_methods* */ + +/* +** CAPI3REF: Configuration Options {H10170} +** EXPERIMENTAL +** +** These constants are the available integer configuration options that +** can be passed as the second argument to the [sqlite3_db_config()] interface. +** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_db_config()] to make sure that +** the call worked. The [sqlite3_db_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
    +**
    SQLITE_DBCONFIG_LOOKASIDE
    +**
    This option takes three additional arguments that determine the +** [lookaside memory allocator] configuration for the [database connection]. +** The first argument (the third parameter to [sqlite3_db_config()] is a +** pointer to an 8-byte aligned memory buffer to use for lookaside memory. +** The first argument may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. The second argument is the +** size of each lookaside buffer slot and the third argument is the number of +** slots. The size of the buffer in the first argument must be greater than +** or equal to the product of the second and third arguments.
    +** +**
    +*/ +#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ + + +/* +** CAPI3REF: Enable Or Disable Extended Result Codes {H12200} +** +** The sqlite3_extended_result_codes() routine enables or disables the +** [extended result codes] feature of SQLite. The extended result +** codes are disabled by default for historical compatibility considerations. +** +** Requirements: +** [H12201] [H12202] +*/ +int sqlite3_extended_result_codes(sqlite3*, int onoff); + +/* +** CAPI3REF: Last Insert Rowid {H12220} +** +** Each entry in an SQLite table has a unique 64-bit signed +** integer key called the [ROWID | "rowid"]. The rowid is always available +** as an undeclared column named ROWID, OID, or _ROWID_ as long as those +** names are not also used by explicitly declared columns. If +** the table has a column of type [INTEGER PRIMARY KEY] then that column +** is another alias for the rowid. +** +** This routine returns the [rowid] of the most recent +** successful [INSERT] into the database from the [database connection] +** in the first argument. If no successful [INSERT]s +** have ever occurred on that database connection, zero is returned. +** +** If an [INSERT] occurs within a trigger, then the [rowid] of the inserted +** row is returned by this routine as long as the trigger is running. +** But once the trigger terminates, the value returned by this routine +** reverts to the last value inserted before the trigger fired. +** +** An [INSERT] that fails due to a constraint violation is not a +** successful [INSERT] and does not change the value returned by this +** routine. Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, +** and INSERT OR ABORT make no changes to the return value of this +** routine when their insertion fails. When INSERT OR REPLACE +** encounters a constraint violation, it does not fail. The +** INSERT continues to completion after deleting rows that caused +** the constraint problem so INSERT OR REPLACE will always change +** the return value of this interface. +** +** For the purposes of this routine, an [INSERT] is considered to +** be successful even if it is subsequently rolled back. +** +** Requirements: +** [H12221] [H12223] +** +** If a separate thread performs a new [INSERT] on the same +** database connection while the [sqlite3_last_insert_rowid()] +** function is running and thus changes the last insert [rowid], +** then the value returned by [sqlite3_last_insert_rowid()] is +** unpredictable and might not equal either the old or the new +** last insert [rowid]. */ -sqlite_int64 sqlite3_last_insert_rowid(sqlite3*); +sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); /* -** CAPI3REF: Count The Number Of Rows Modified +** CAPI3REF: Count The Number Of Rows Modified {H12240} ** ** This function returns the number of database rows that were changed -** (or inserted or deleted) by the most recent SQL statement. Only -** changes that are directly specified by the INSERT, UPDATE, or -** DELETE statement are counted. Auxiliary changes caused by -** triggers are not counted. Use the [sqlite3_total_changes()] function +** or inserted or deleted by the most recently completed SQL statement +** on the [database connection] specified by the first parameter. +** Only changes that are directly specified by the [INSERT], [UPDATE], +** or [DELETE] statement are counted. Auxiliary changes caused by +** triggers are not counted. Use the [sqlite3_total_changes()] function ** to find the total number of changes including changes caused by triggers. ** -** Within the body of a trigger, the sqlite3_changes() interface can be -** called to find the number of -** changes in the most recently completed INSERT, UPDATE, or DELETE -** statement within the body of the trigger. +** Changes to a view that are simulated by an [INSTEAD OF trigger] +** are not counted. Only real table changes are counted. ** -** All changes are counted, even if they were later undone by a -** ROLLBACK or ABORT. Except, changes associated with creating and -** dropping tables are not counted. -** -** If a callback invokes [sqlite3_exec()] or [sqlite3_step()] recursively, -** then the changes in the inner, recursive call are counted together -** with the changes in the outer call. -** -** SQLite implements the command "DELETE FROM table" without a WHERE clause -** by dropping and recreating the table. (This is much faster than going -** through and deleting individual elements from the table.) Because of -** this optimization, the change count for "DELETE FROM table" will be -** zero regardless of the number of elements that were originally in the -** table. To get an accurate count of the number of rows deleted, use -** "DELETE FROM table WHERE 1" instead. +** A "row change" is a change to a single row of a single table +** caused by an INSERT, DELETE, or UPDATE statement. Rows that +** are changed as side effects of [REPLACE] constraint resolution, +** rollback, ABORT processing, [DROP TABLE], or by any other +** mechanisms do not count as direct row changes. +** +** A "trigger context" is a scope of execution that begins and +** ends with the script of a [CREATE TRIGGER | trigger]. +** Most SQL statements are +** evaluated outside of any trigger. This is the "top level" +** trigger context. If a trigger fires from the top level, a +** new trigger context is entered for the duration of that one +** trigger. Subtriggers create subcontexts for their duration. +** +** Calling [sqlite3_exec()] or [sqlite3_step()] recursively does +** not create a new trigger context. +** +** This function returns the number of direct row changes in the +** most recent INSERT, UPDATE, or DELETE statement within the same +** trigger context. +** +** Thus, when called from the top level, this function returns the +** number of changes in the most recent INSERT, UPDATE, or DELETE +** that also occurred at the top level. Within the body of a trigger, +** the sqlite3_changes() interface can be called to find the number of +** changes in the most recently completed INSERT, UPDATE, or DELETE +** statement within the body of the same trigger. +** However, the number returned does not include changes +** caused by subtriggers since those have their own context. +** +** See also the [sqlite3_total_changes()] interface and the +** [count_changes pragma]. +** +** Requirements: +** [H12241] [H12243] +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_changes()] is running then the value returned +** is unpredictable and not meaningful. */ int sqlite3_changes(sqlite3*); /* -** CAPI3REF: Total Number Of Rows Modified -*** -** This function returns the number of database rows that have been -** modified by INSERT, UPDATE or DELETE statements since the database handle -** was opened. This includes UPDATE, INSERT and DELETE statements executed -** as part of trigger programs. All changes are counted as soon as the -** statement that makes them is completed (when the statement handle is -** passed to [sqlite3_reset()] or [sqlite_finalise()]). -** -** See also the [sqlite3_change()] interface. -** -** SQLite implements the command "DELETE FROM table" without a WHERE clause -** by dropping and recreating the table. (This is much faster than going -** through and deleting individual elements form the table.) Because of -** this optimization, the change count for "DELETE FROM table" will be -** zero regardless of the number of elements that were originally in the -** table. To get an accurate count of the number of rows deleted, use -** "DELETE FROM table WHERE 1" instead. +** CAPI3REF: Total Number Of Rows Modified {H12260} +** +** This function returns the number of row changes caused by [INSERT], +** [UPDATE] or [DELETE] statements since the [database connection] was opened. +** The count includes all changes from all +** [CREATE TRIGGER | trigger] contexts. However, +** the count does not include changes used to implement [REPLACE] constraints, +** do rollbacks or ABORT processing, or [DROP TABLE] processing. The +** count does not include rows of views that fire an [INSTEAD OF trigger], +** though if the INSTEAD OF trigger makes changes of its own, those changes +** are counted. +** The changes are counted as soon as the statement that makes them is +** completed (when the statement handle is passed to [sqlite3_reset()] or +** [sqlite3_finalize()]). +** +** See also the [sqlite3_changes()] interface and the +** [count_changes pragma]. +** +** Requirements: +** [H12261] [H12263] +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_total_changes()] is running then the value +** returned is unpredictable and not meaningful. */ int sqlite3_total_changes(sqlite3*); /* -** CAPI3REF: Interrupt A Long-Running Query +** CAPI3REF: Interrupt A Long-Running Query {H12270} ** ** This function causes any pending database operation to abort and -** return at its earliest opportunity. This routine is typically +** return at its earliest opportunity. This routine is typically ** called in response to a user action such as pressing "Cancel" ** or Ctrl-C where the user wants a long query operation to halt ** immediately. ** ** It is safe to call this routine from a thread different from the -** thread that is currently running the database operation. +** thread that is currently running the database operation. But it +** is not safe to call this routine with a [database connection] that +** is closed or might close before sqlite3_interrupt() returns. +** +** If an SQL operation is very nearly finished at the time when +** sqlite3_interrupt() is called, then it might not have an opportunity +** to be interrupted and might continue to completion. +** +** An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. +** If the interrupted SQL operation is an INSERT, UPDATE, or DELETE +** that is inside an explicit transaction, then the entire transaction +** will be rolled back automatically. +** +** The sqlite3_interrupt(D) call is in effect until all currently running +** SQL statements on [database connection] D complete. Any new SQL statements +** that are started after the sqlite3_interrupt() call and before the +** running statements reaches zero are interrupted as if they had been +** running prior to the sqlite3_interrupt() call. New SQL statements +** that are started after the running statement count reaches zero are +** not effected by the sqlite3_interrupt(). +** A call to sqlite3_interrupt(D) that occurs when there are no running +** SQL statements is a no-op and has no effect on SQL statements +** that are started after the sqlite3_interrupt() call returns. ** -** The SQL operation that is interrupted will return [SQLITE_INTERRUPT]. -** If an interrupted operation was an update that is inside an -** explicit transaction, then the entire transaction will be rolled -** back automatically. +** Requirements: +** [H12271] [H12272] +** +** If the database connection closes while [sqlite3_interrupt()] +** is running then bad things will likely happen. */ void sqlite3_interrupt(sqlite3*); /* -** CAPI3REF: Determine If An SQL Statement Is Complete +** CAPI3REF: Determine If An SQL Statement Is Complete {H10510} +** +** These routines are useful during command-line input to determine if the +** currently entered text seems to form a complete SQL statement or +** if additional input is needed before sending the text into +** SQLite for parsing. These routines return 1 if the input string +** appears to be a complete SQL statement. A statement is judged to be +** complete if it ends with a semicolon token and is not a prefix of a +** well-formed CREATE TRIGGER statement. Semicolons that are embedded within +** string literals or quoted identifier names or comments are not +** independent tokens (they are part of the token in which they are +** embedded) and thus do not count as a statement terminator. Whitespace +** and comments that follow the final semicolon are ignored. +** +** These routines return 0 if the statement is incomplete. If a +** memory allocation fails, then SQLITE_NOMEM is returned. +** +** These routines do not parse the SQL statements thus +** will not detect syntactically incorrect SQL. +** +** If SQLite has not been initialized using [sqlite3_initialize()] prior +** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked +** automatically by sqlite3_complete16(). If that initialization fails, +** then the return value from sqlite3_complete16() will be non-zero +** regardless of whether or not the input SQL is complete. ** -** These functions return true if the given input string comprises -** one or more complete SQL statements. For the sqlite3_complete() call, -** the parameter must be a nul-terminated UTF-8 string. For -** sqlite3_complete16(), a nul-terminated machine byte order UTF-16 string -** is required. -** -** These routines are useful for command-line input to determine if the -** currently entered text forms one or more complete SQL statements or -** if additional input is needed before sending the statements into -** SQLite for parsing. The algorithm is simple. If the -** last token other than spaces and comments is a semicolon, then return -** true. Actually, the algorithm is a little more complicated than that -** in order to deal with triggers, but the basic idea is the same: the -** statement is not complete unless it ends in a semicolon. +** Requirements: [H10511] [H10512] +** +** The input to [sqlite3_complete()] must be a zero-terminated +** UTF-8 string. +** +** The input to [sqlite3_complete16()] must be a zero-terminated +** UTF-16 string in native byte order. */ int sqlite3_complete(const char *sql); int sqlite3_complete16(const void *sql); /* -** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors +** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors {H12310} ** -** This routine identifies a callback function that might be invoked -** whenever an attempt is made to open a database table -** that another thread or process has locked. -** If the busy callback is NULL, then [SQLITE_BUSY] -** (or sometimes [SQLITE_IOERR_BLOCKED]) -** is returned immediately upon encountering the lock. -** If the busy callback is not NULL, then the -** callback will be invoked with two arguments. The -** first argument to the handler is a copy of the void* pointer which -** is the third argument to this routine. The second argument to -** the handler is the number of times that the busy handler has -** been invoked for this locking event. If the +** This routine sets a callback function that might be invoked whenever +** an attempt is made to open a database table that another thread +** or process has locked. +** +** If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] +** is returned immediately upon encountering the lock. If the busy callback +** is not NULL, then the callback will be invoked with two arguments. +** +** The first argument to the handler is a copy of the void* pointer which +** is the third argument to sqlite3_busy_handler(). The second argument to +** the handler callback is the number of times that the busy handler has +** been invoked for this locking event. If the ** busy callback returns 0, then no additional attempts are made to ** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned. -** If the callback returns non-zero, then another attempt is made to open the -** database for reading and the cycle repeats. +** If the callback returns non-zero, then another attempt +** is made to open the database for reading and the cycle repeats. ** -** The presence of a busy handler does not guarantee that -** it will be invoked when there is lock contention. -** If SQLite determines that invoking the busy handler could result in -** a deadlock, it will return [SQLITE_BUSY] instead. +** The presence of a busy handler does not guarantee that it will be invoked +** when there is lock contention. If SQLite determines that invoking the busy +** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] +** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler. ** Consider a scenario where one process is holding a read lock that ** it is trying to promote to a reserved lock and ** a second process is holding a reserved lock that it is trying @@ -483,8 +1439,8 @@ ** ** The default busy callback is NULL. ** -** The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] when -** SQLite is in the middle of a large transaction where all the +** The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] +** when SQLite is in the middle of a large transaction where all the ** changes will not fit into the in-memory cache. SQLite will ** already hold a RESERVED lock on the database file, but it needs ** to promote this lock to EXCLUSIVE so that it can spill cache @@ -493,109 +1449,140 @@ ** cache will be left in an inconsistent state and so the error ** code is promoted from the relatively benign [SQLITE_BUSY] to ** the more severe [SQLITE_IOERR_BLOCKED]. This error code promotion -** forces an automatic rollback of the changes. See the -** +** forces an automatic rollback of the changes. See the +** ** CorruptionFollowingBusyError wiki page for a discussion of why ** this is important. -** -** Sqlite is re-entrant, so the busy handler may start a new query. -** (It is not clear why anyone would every want to do this, but it -** is allowed, in theory.) But the busy handler may not close the -** database. Closing the database from a busy handler will delete -** data structures out from under the executing query and will -** probably result in a segmentation fault or other runtime error. -** -** There can only be a single busy handler defined for each database -** connection. Setting a new busy handler clears any previous one. -** Note that calling [sqlite3_busy_timeout()] will also set or clear -** the busy handler. +** +** There can only be a single busy handler defined for each +** [database connection]. Setting a new busy handler clears any +** previously set handler. Note that calling [sqlite3_busy_timeout()] +** will also set or clear the busy handler. +** +** The busy callback should not take any actions which modify the +** database connection that invoked the busy handler. Any such actions +** result in undefined behavior. +** +** Requirements: +** [H12311] [H12312] [H12314] [H12316] [H12318] +** +** A busy handler must not close the database connection +** or [prepared statement] that invoked the busy handler. */ int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); /* -** CAPI3REF: Set A Busy Timeout +** CAPI3REF: Set A Busy Timeout {H12340} ** -** This routine sets a busy handler that sleeps for a while when a -** table is locked. The handler will sleep multiple times until -** at least "ms" milliseconds of sleeping have been done. After -** "ms" milliseconds of sleeping, the handler returns 0 which -** causes [sqlite3_step()] to return [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. +** This routine sets a [sqlite3_busy_handler | busy handler] that sleeps +** for a specified amount of time when a table is locked. The handler +** will sleep multiple times until at least "ms" milliseconds of sleeping +** have accumulated. {H12343} After "ms" milliseconds of sleeping, +** the handler returns 0 which causes [sqlite3_step()] to return +** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. ** ** Calling this routine with an argument less than or equal to zero ** turns off all busy handlers. ** -** There can only be a single busy handler for a particular database -** connection. If another busy handler was defined -** (using [sqlite3_busy_handler()]) prior to calling +** There can only be a single busy handler for a particular +** [database connection] any any given moment. If another busy handler +** was defined (using [sqlite3_busy_handler()]) prior to calling ** this routine, that other busy handler is cleared. +** +** Requirements: +** [H12341] [H12343] [H12344] */ int sqlite3_busy_timeout(sqlite3*, int ms); /* -** CAPI3REF: Convenience Routines For Running Queries +** CAPI3REF: Convenience Routines For Running Queries {H12370} ** -** This next routine is a convenience wrapper around [sqlite3_exec()]. -** Instead of invoking a user-supplied callback for each row of the -** result, this routine remembers each row of the result in memory -** obtained from [sqlite3_malloc()], then returns all of the result after the -** query has finished. +** Definition: A result table is memory data structure created by the +** [sqlite3_get_table()] interface. A result table records the +** complete query results from one or more queries. +** +** The table conceptually has a number of rows and columns. But +** these numbers are not part of the result table itself. These +** numbers are obtained separately. Let N be the number of rows +** and M be the number of columns. +** +** A result table is an array of pointers to zero-terminated UTF-8 strings. +** There are (N+1)*M elements in the array. The first M pointers point +** to zero-terminated strings that contain the names of the columns. +** The remaining entries all point to query results. NULL values result +** in NULL pointers. All other values are in their UTF-8 zero-terminated +** string representation as returned by [sqlite3_column_text()]. +** +** A result table might consist of one or more memory allocations. +** It is not safe to pass a result table directly to [sqlite3_free()]. +** A result table should be deallocated using [sqlite3_free_table()]. ** -** As an example, suppose the query result where this table: +** As an example of the result table format, suppose a query result +** is as follows: ** -**
    +** 
     **        Name        | Age
     **        -----------------------
     **        Alice       | 43
     **        Bob         | 28
     **        Cindy       | 21
    -** 
    +**
    ** -** If the 3rd argument were &azResult then after the function returns -** azResult will contain the following data: +** There are two column (M==2) and three rows (N==3). Thus the +** result table has 8 entries. Suppose the result table is stored +** in an array names azResult. Then azResult holds this content: ** -**
    -**        azResult[0] = "Name";
    -**        azResult[1] = "Age";
    -**        azResult[2] = "Alice";
    -**        azResult[3] = "43";
    -**        azResult[4] = "Bob";
    -**        azResult[5] = "28";
    -**        azResult[6] = "Cindy";
    -**        azResult[7] = "21";
    -** 
    +**
    +**        azResult[0] = "Name";
    +**        azResult[1] = "Age";
    +**        azResult[2] = "Alice";
    +**        azResult[3] = "43";
    +**        azResult[4] = "Bob";
    +**        azResult[5] = "28";
    +**        azResult[6] = "Cindy";
    +**        azResult[7] = "21";
    +** 
    ** -** Notice that there is an extra row of data containing the column -** headers. But the *nrow return value is still 3. *ncolumn is -** set to 2. In general, the number of values inserted into azResult -** will be ((*nrow) + 1)*(*ncolumn). -** -** After the calling function has finished using the result, it should -** pass the result data pointer to sqlite3_free_table() in order to -** release the memory that was malloc-ed. Because of the way the -** [sqlite3_malloc()] happens, the calling function must not try to call -** [sqlite3_free()] directly. Only [sqlite3_free_table()] is able to release -** the memory properly and safely. +** The sqlite3_get_table() function evaluates one or more +** semicolon-separated SQL statements in the zero-terminated UTF-8 +** string of its 2nd parameter. It returns a result table to the +** pointer given in its 3rd parameter. +** +** After the calling function has finished using the result, it should +** pass the pointer to the result table to sqlite3_free_table() in order to +** release the memory that was malloced. Because of the way the +** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling +** function must not try to call [sqlite3_free()] directly. Only +** [sqlite3_free_table()] is able to release the memory properly and safely. +** +** The sqlite3_get_table() interface is implemented as a wrapper around +** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access +** to any internal data structures of SQLite. It uses only the public +** interface defined here. As a consequence, errors that occur in the +** wrapper layer outside of the internal [sqlite3_exec()] call are not +** reflected in subsequent calls to [sqlite3_errcode()] or [sqlite3_errmsg()]. ** -** The return value of this routine is the same as from [sqlite3_exec()]. +** Requirements: +** [H12371] [H12373] [H12374] [H12376] [H12379] [H12382] */ int sqlite3_get_table( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be executed */ - char ***resultp, /* Result written to a char *[] that this points to */ - int *nrow, /* Number of result rows written here */ - int *ncolumn, /* Number of result columns written here */ - char **errmsg /* Error msg written here */ + sqlite3 *db, /* An open database */ + const char *zSql, /* SQL to be evaluated */ + char ***pazResult, /* Results of the query */ + int *pnRow, /* Number of result rows written here */ + int *pnColumn, /* Number of result columns written here */ + char **pzErrmsg /* Error msg written here */ ); void sqlite3_free_table(char **result); /* -** CAPI3REF: Formatted String Printing Functions +** CAPI3REF: Formatted String Printing Functions {H17400} ** ** These routines are workalikes of the "printf()" family of functions ** from the standard C library. ** ** The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite_malloc()]. +** results into memory obtained from [sqlite3_malloc()]. ** The strings returned by these two routines should be ** released by [sqlite3_free()]. Both routines return a ** NULL pointer if [sqlite3_malloc()] is unable to allocate enough @@ -604,7 +1591,7 @@ ** In sqlite3_snprintf() routine is similar to "snprintf()" from ** the standard C library. The result is written into the ** buffer supplied as the second parameter whose size is given by -** the first parameter. Note that the order of the +** the first parameter. Note that the order of the ** first two parameters is reversed from snprintf(). This is an ** historical accident that cannot be fixed without breaking ** backwards compatibility. Note also that sqlite3_snprintf() @@ -622,8 +1609,8 @@ ** ** These routines all implement some additional formatting ** options that are useful for constructing SQL statements. -** All of the usual printf formatting options apply. In addition, there -** is are "%q" and "%Q" options. +** All of the usual printf() formatting options apply. In addition, there +** is are "%q", "%Q", and "%z" options. ** ** The %q option works like %s in that it substitutes a null-terminated ** string from the argument list. But %q also doubles every '\'' character. @@ -631,7 +1618,7 @@ ** character it escapes that character and allows it to be inserted into ** the string. ** -** For example, so some string variable contains text as follows: +** For example, assume the string variable zText contains text as follows: ** **
     **  char *zText = "It's a happy day!";
    @@ -659,14 +1646,13 @@
     **  INSERT INTO table1 VALUES('It's a happy day!');
     ** 
    ** -** This second example is an SQL syntax error. As a general rule you -** should always use %q instead of %s when inserting text into a string -** literal. +** This second example is an SQL syntax error. As a general rule you should +** always use %q instead of %s when inserting text into a string literal. ** ** The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Or if the parameter in the argument -** list is a NULL pointer, %Q substitutes the text "NULL" (without single -** quotes) in place of the %Q option. So, for example, one could say: +** the outside of the total string. Additionally, if the parameter in the +** argument list is a NULL pointer, %Q substitutes the text "NULL" (without +** single quotes) in place of the %Q option. So, for example, one could say: ** **
     **  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    @@ -676,76 +1662,222 @@
     **
     ** The code above will render a correct SQL statement in the zSQL
     ** variable even if the zText variable is a NULL pointer.
    +**
    +** The "%z" formatting option works exactly like "%s" with the
    +** addition that after the string has been read and copied into
    +** the result, [sqlite3_free()] is called on the input string. {END}
    +**
    +** Requirements:
    +** [H17403] [H17406] [H17407]
     */
     char *sqlite3_mprintf(const char*,...);
     char *sqlite3_vmprintf(const char*, va_list);
     char *sqlite3_snprintf(int,char*,const char*, ...);
     
     /*
    -** CAPI3REF: Memory Allocation Functions
    +** CAPI3REF: Memory Allocation Subsystem {H17300} 
     **
    -** SQLite uses its own memory allocator.  On some installations, this
    -** memory allocator is identical to the standard malloc()/realloc()/free()
    -** and can be used interchangable.  On others, the implementations are
    -** different.  For maximum portability, it is best not to mix calls
    -** to the standard malloc/realloc/free with the sqlite versions.
    +** The SQLite core  uses these three routines for all of its own
    +** internal memory allocation needs. "Core" in the previous sentence
    +** does not include operating-system specific VFS implementation.  The
    +** Windows VFS uses native malloc() and free() for some operations.
    +**
    +** The sqlite3_malloc() routine returns a pointer to a block
    +** of memory at least N bytes in length, where N is the parameter.
    +** If sqlite3_malloc() is unable to obtain sufficient free
    +** memory, it returns a NULL pointer.  If the parameter N to
    +** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
    +** a NULL pointer.
    +**
    +** Calling sqlite3_free() with a pointer previously returned
    +** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
    +** that it might be reused.  The sqlite3_free() routine is
    +** a no-op if is called with a NULL pointer.  Passing a NULL pointer
    +** to sqlite3_free() is harmless.  After being freed, memory
    +** should neither be read nor written.  Even reading previously freed
    +** memory might result in a segmentation fault or other severe error.
    +** Memory corruption, a segmentation fault, or other severe error
    +** might result if sqlite3_free() is called with a non-NULL pointer that
    +** was not obtained from sqlite3_malloc() or sqlite3_realloc().
    +**
    +** The sqlite3_realloc() interface attempts to resize a
    +** prior memory allocation to be at least N bytes, where N is the
    +** second parameter.  The memory allocation to be resized is the first
    +** parameter.  If the first parameter to sqlite3_realloc()
    +** is a NULL pointer then its behavior is identical to calling
    +** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc().
    +** If the second parameter to sqlite3_realloc() is zero or
    +** negative then the behavior is exactly the same as calling
    +** sqlite3_free(P) where P is the first parameter to sqlite3_realloc().
    +** sqlite3_realloc() returns a pointer to a memory allocation
    +** of at least N bytes in size or NULL if sufficient memory is unavailable.
    +** If M is the size of the prior allocation, then min(N,M) bytes
    +** of the prior allocation are copied into the beginning of buffer returned
    +** by sqlite3_realloc() and the prior allocation is freed.
    +** If sqlite3_realloc() returns NULL, then the prior allocation
    +** is not freed.
    +**
    +** The memory returned by sqlite3_malloc() and sqlite3_realloc()
    +** is always aligned to at least an 8 byte boundary. {END}
    +**
    +** The default implementation of the memory allocation subsystem uses
    +** the malloc(), realloc() and free() provided by the standard C library.
    +** {H17382} However, if SQLite is compiled with the
    +** SQLITE_MEMORY_SIZE=NNN C preprocessor macro (where NNN
    +** is an integer), then SQLite create a static array of at least
    +** NNN bytes in size and uses that array for all of its dynamic
    +** memory allocation needs. {END}  Additional memory allocator options
    +** may be added in future releases.
    +**
    +** In SQLite version 3.5.0 and 3.5.1, it was possible to define
    +** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in
    +** implementation of these routines to be omitted.  That capability
    +** is no longer provided.  Only built-in memory allocators can be used.
    +**
    +** The Windows OS interface layer calls
    +** the system malloc() and free() directly when converting
    +** filenames between the UTF-8 encoding used by SQLite
    +** and whatever filename encoding is used by the particular Windows
    +** installation.  Memory allocation errors are detected, but
    +** they are reported back as [SQLITE_CANTOPEN] or
    +** [SQLITE_IOERR] rather than [SQLITE_NOMEM].
    +**
    +** Requirements:
    +** [H17303] [H17304] [H17305] [H17306] [H17310] [H17312] [H17315] [H17318]
    +** [H17321] [H17322] [H17323]
    +**
    +** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()]
    +** must be either NULL or else pointers obtained from a prior
    +** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have
    +** not yet been released.
    +**
    +** The application must not read or write any part of
    +** a block of memory after it has been released using
    +** [sqlite3_free()] or [sqlite3_realloc()].
     */
     void *sqlite3_malloc(int);
     void *sqlite3_realloc(void*, int);
     void sqlite3_free(void*);
     
     /*
    -** CAPI3REF: Compile-Time Authorization Callbacks
    -***
    -** This routine registers a authorizer callback with the SQLite library.  
    +** CAPI3REF: Memory Allocator Statistics {H17370} 
    +**
    +** SQLite provides these two interfaces for reporting on the status
    +** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()]
    +** routines, which form the built-in memory allocation subsystem.
    +**
    +** Requirements:
    +** [H17371] [H17373] [H17374] [H17375]
    +*/
    +sqlite3_int64 sqlite3_memory_used(void);
    +sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
    +
    +/*
    +** CAPI3REF: Pseudo-Random Number Generator {H17390} 
    +**
    +** SQLite contains a high-quality pseudo-random number generator (PRNG) used to
    +** select random [ROWID | ROWIDs] when inserting new records into a table that
    +** already uses the largest possible [ROWID].  The PRNG is also used for
    +** the build-in random() and randomblob() SQL functions.  This interface allows
    +** applications to access the same PRNG for other purposes.
    +**
    +** A call to this routine stores N bytes of randomness into buffer P.
    +**
    +** The first time this routine is invoked (either internally or by
    +** the application) the PRNG is seeded using randomness obtained
    +** from the xRandomness method of the default [sqlite3_vfs] object.
    +** On all subsequent invocations, the pseudo-randomness is generated
    +** internally and without recourse to the [sqlite3_vfs] xRandomness
    +** method.
    +**
    +** Requirements:
    +** [H17392]
    +*/
    +void sqlite3_randomness(int N, void *P);
    +
    +/*
    +** CAPI3REF: Compile-Time Authorization Callbacks {H12500} 
    +**
    +** This routine registers a authorizer callback with a particular
    +** [database connection], supplied in the first argument.
     ** The authorizer callback is invoked as SQL statements are being compiled
     ** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()],
     ** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()].  At various
     ** points during the compilation process, as logic is being created
     ** to perform various actions, the authorizer callback is invoked to
     ** see if those actions are allowed.  The authorizer callback should
    -** return SQLITE_OK to allow the action, [SQLITE_IGNORE] to disallow the
    +** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the
     ** specific action but allow the SQL statement to continue to be
     ** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be
    -** rejected with an error.  
    -**
    -** Depending on the action, the [SQLITE_IGNORE] and [SQLITE_DENY] return
    -** codes might mean something different or they might mean the same
    -** thing.  If the action is, for example, to perform a delete opertion,
    -** then [SQLITE_IGNORE] and [SQLITE_DENY] both cause the statement compilation
    -** to fail with an error.  But if the action is to read a specific column
    -** from a specific table, then [SQLITE_DENY] will cause the entire
    -** statement to fail but [SQLITE_IGNORE] will cause a NULL value to be
    -** read instead of the actual column value.
    -**
    -** The first parameter to the authorizer callback is a copy of
    -** the third parameter to the sqlite3_set_authorizer() interface.
    -** The second parameter to the callback is an integer 
    -** [SQLITE_COPY | action code] that specifies the particular action
    -** to be authorized.  The available action codes are
    -** [SQLITE_COPY | documented separately].  The third through sixth
    -** parameters to the callback are strings that contain additional
    +** rejected with an error.  If the authorizer callback returns
    +** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY]
    +** then the [sqlite3_prepare_v2()] or equivalent call that triggered
    +** the authorizer will fail with an error message.
    +**
    +** When the callback returns [SQLITE_OK], that means the operation
    +** requested is ok.  When the callback returns [SQLITE_DENY], the
    +** [sqlite3_prepare_v2()] or equivalent call that triggered the
    +** authorizer will fail with an error message explaining that
    +** access is denied. 
    +**
    +** The first parameter to the authorizer callback is a copy of the third
    +** parameter to the sqlite3_set_authorizer() interface. The second parameter
    +** to the callback is an integer [SQLITE_COPY | action code] that specifies
    +** the particular action to be authorized. The third through sixth parameters
    +** to the callback are zero-terminated strings that contain additional
     ** details about the action to be authorized.
     **
    -** An authorizer is used when preparing SQL statements from an untrusted
    -** source, to ensure that the SQL statements do not try to access data
    -** that they are not allowed to see, or that they do not try to
    -** execute malicious statements that damage the database.  For
    +** If the action code is [SQLITE_READ]
    +** and the callback returns [SQLITE_IGNORE] then the
    +** [prepared statement] statement is constructed to substitute
    +** a NULL value in place of the table column that would have
    +** been read if [SQLITE_OK] had been returned.  The [SQLITE_IGNORE]
    +** return can be used to deny an untrusted user access to individual
    +** columns of a table.
    +** If the action code is [SQLITE_DELETE] and the callback returns
    +** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the
    +** [truncate optimization] is disabled and all rows are deleted individually.
    +**
    +** An authorizer is used when [sqlite3_prepare | preparing]
    +** SQL statements from an untrusted source, to ensure that the SQL statements
    +** do not try to access data they are not allowed to see, or that they do not
    +** try to execute malicious statements that damage the database.  For
     ** example, an application may allow a user to enter arbitrary
     ** SQL queries for evaluation by a database.  But the application does
     ** not want the user to be able to make arbitrary changes to the
     ** database.  An authorizer could then be put in place while the
    -** user-entered SQL is being prepared that disallows everything
    -** except SELECT statements.  
    +** user-entered SQL is being [sqlite3_prepare | prepared] that
    +** disallows everything except [SELECT] statements.
    +**
    +** Applications that need to process SQL from untrusted sources
    +** might also consider lowering resource limits using [sqlite3_limit()]
    +** and limiting database size using the [max_page_count] [PRAGMA]
    +** in addition to using an authorizer.
     **
     ** Only a single authorizer can be in place on a database connection
     ** at a time.  Each call to sqlite3_set_authorizer overrides the
    -** previous call.  A NULL authorizer means that no authorization
    -** callback is invoked.  The default authorizer is NULL.
    +** previous call.  Disable the authorizer by installing a NULL callback.
    +** The authorizer is disabled by default.
     **
    -** Note that the authorizer callback is invoked only during 
    +** The authorizer callback must not do anything that will modify
    +** the database connection that invoked the authorizer callback.
    +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
    +** database connections for the meaning of "modify" in this paragraph.
    +**
    +** When [sqlite3_prepare_v2()] is used to prepare a statement, the
    +** statement might be reprepared during [sqlite3_step()] due to a 
    +** schema change.  Hence, the application should ensure that the
    +** correct authorizer callback remains in place during the [sqlite3_step()].
    +**
    +** Note that the authorizer callback is invoked only during
     ** [sqlite3_prepare()] or its variants.  Authorization is not
    -** performed during statement evaluation in [sqlite3_step()].
    +** performed during statement evaluation in [sqlite3_step()], unless
    +** as stated in the previous paragraph, sqlite3_step() invokes
    +** sqlite3_prepare_v2() to reprepare a statement after a schema change.
    +**
    +** Requirements:
    +** [H12501] [H12502] [H12503] [H12504] [H12505] [H12506] [H12507] [H12510]
    +** [H12511] [H12512] [H12520] [H12521] [H12522]
     */
     int sqlite3_set_authorizer(
       sqlite3*,
    @@ -754,7 +1886,7 @@
     );
     
     /*
    -** CAPI3REF: Authorizer Return Codes
    +** CAPI3REF: Authorizer Return Codes {H12590} 
     **
     ** The [sqlite3_set_authorizer | authorizer callback function] must
     ** return either [SQLITE_OK] or one of these two constants in order
    @@ -766,23 +1898,26 @@
     #define SQLITE_IGNORE 2   /* Don't allow access, but don't generate an error */
     
     /*
    -** CAPI3REF: Authorizer Action Codes
    +** CAPI3REF: Authorizer Action Codes {H12550} 
     **
     ** The [sqlite3_set_authorizer()] interface registers a callback function
    -** that is invoked to authorizer certain SQL statement actions.  The
    +** that is invoked to authorize certain SQL statement actions.  The
     ** second parameter to the callback is an integer code that specifies
     ** what action is being authorized.  These are the integer action codes that
     ** the authorizer callback may be passed.
     **
    -** These action code values signify what kind of operation is to be 
    -** authorized.  The 3rd and 4th parameters to the authorization callback
    -** function will be parameters or NULL depending on which of these
    +** These action code values signify what kind of operation is to be
    +** authorized.  The 3rd and 4th parameters to the authorization
    +** callback function will be parameters or NULL depending on which of these
     ** codes is used as the second parameter.  The 5th parameter to the
    -** authorizer callback is the name of the database ("main", "temp", 
    +** authorizer callback is the name of the database ("main", "temp",
     ** etc.) if applicable.  The 6th parameter to the authorizer callback
     ** is the name of the inner-most trigger or view that is responsible for
    -** the access attempt or NULL if this access attempt is directly from 
    +** the access attempt or NULL if this access attempt is directly from
     ** top-level SQL code.
    +**
    +** Requirements:
    +** [H12551] [H12552] [H12553] [H12554]
     */
     /******************************************* 3rd ************ 4th ***********/
     #define SQLITE_CREATE_INDEX          1   /* Index Name      Table Name      */
    @@ -806,7 +1941,7 @@
     #define SQLITE_PRAGMA               19   /* Pragma Name     1st arg or NULL */
     #define SQLITE_READ                 20   /* Table Name      Column Name     */
     #define SQLITE_SELECT               21   /* NULL            NULL            */
    -#define SQLITE_TRANSACTION          22   /* NULL            NULL            */
    +#define SQLITE_TRANSACTION          22   /* Operation       NULL            */
     #define SQLITE_UPDATE               23   /* Table Name      Column Name     */
     #define SQLITE_ATTACH               24   /* Filename        NULL            */
     #define SQLITE_DETACH               25   /* Database Name   NULL            */
    @@ -815,83 +1950,144 @@
     #define SQLITE_ANALYZE              28   /* Table Name      NULL            */
     #define SQLITE_CREATE_VTABLE        29   /* Table Name      Module Name     */
     #define SQLITE_DROP_VTABLE          30   /* Table Name      Module Name     */
    -#define SQLITE_FUNCTION             31   /* Function Name   NULL            */
    +#define SQLITE_FUNCTION             31   /* NULL            Function Name   */
    +#define SQLITE_SAVEPOINT            32   /* Operation       Savepoint Name  */
     #define SQLITE_COPY                  0   /* No longer used */
     
     /*
    -** CAPI3REF: Tracing And Profiling Functions
    +** CAPI3REF: Tracing And Profiling Functions {H12280} 
    +** EXPERIMENTAL
     **
     ** These routines register callback functions that can be used for
     ** tracing and profiling the execution of SQL statements.
    -** The callback function registered by sqlite3_trace() is invoked
    -** at the first [sqlite3_step()] for the evaluation of an SQL statement.
    +**
    +** The callback function registered by sqlite3_trace() is invoked at
    +** various times when an SQL statement is being run by [sqlite3_step()].
    +** The callback returns a UTF-8 rendering of the SQL statement text
    +** as the statement first begins executing.  Additional callbacks occur
    +** as each triggered subprogram is entered.  The callbacks for triggers
    +** contain a UTF-8 SQL comment that identifies the trigger.
    +**
     ** The callback function registered by sqlite3_profile() is invoked
    -** as each SQL statement finishes and includes
    -** information on how long that statement ran.
    +** as each SQL statement finishes.  The profile callback contains
    +** the original statement text and an estimate of wall-clock time
    +** of how long that statement took to run.
     **
    -** The sqlite3_profile() API is currently considered experimental and
    -** is subject to change.
    +** Requirements:
    +** [H12281] [H12282] [H12283] [H12284] [H12285] [H12287] [H12288] [H12289]
    +** [H12290]
     */
    -void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
    -void *sqlite3_profile(sqlite3*,
    -   void(*xProfile)(void*,const char*,sqlite_uint64), void*);
    +SQLITE_EXPERIMENTAL void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
    +SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*,
    +   void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
     
     /*
    -** CAPI3REF: Query Progress Callbacks
    +** CAPI3REF: Query Progress Callbacks {H12910} 
     **
    -** This routine configures a callback function - the progress callback - that
    -** is invoked periodically during long running calls to [sqlite3_exec()],
    -** [sqlite3_step()] and [sqlite3_get_table()].  An example use for this 
    +** This routine configures a callback function - the
    +** progress callback - that is invoked periodically during long
    +** running calls to [sqlite3_exec()], [sqlite3_step()] and
    +** [sqlite3_get_table()].  An example use for this
     ** interface is to keep a GUI updated during a large query.
     **
    -** The progress callback is invoked once for every N virtual machine opcodes,
    -** where N is the second argument to this function. The progress callback
    -** itself is identified by the third argument to this function. The fourth
    -** argument to this function is a void pointer passed to the progress callback
    -** function each time it is invoked.
    -**
    -** If a call to [sqlite3_exec()], [sqlite3_step()], or [sqlite3_get_table()]
    -** results in fewer than N opcodes being executed, then the progress 
    -** callback is never invoked.
    -** 
    -** Only a single progress callback function may be registered for each
    -** open database connection.  Every call to sqlite3_progress_handler()
    -** overwrites the results of the previous call.
    -** To remove the progress callback altogether, pass NULL as the third
    -** argument to this function.
    -**
    -** If the progress callback returns a result other than 0, then the current 
    -** query is immediately terminated and any database changes rolled back.
    -** The containing [sqlite3_exec()], [sqlite3_step()], or
    -** [sqlite3_get_table()] call returns SQLITE_INTERRUPT.   This feature
    -** can be used, for example, to implement the "Cancel" button on a
    -** progress dialog box in a GUI.
    +** If the progress callback returns non-zero, the operation is
    +** interrupted.  This feature can be used to implement a
    +** "Cancel" button on a GUI progress dialog box.
    +**
    +** The progress handler must not do anything that will modify
    +** the database connection that invoked the progress handler.
    +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
    +** database connections for the meaning of "modify" in this paragraph.
    +**
    +** Requirements:
    +** [H12911] [H12912] [H12913] [H12914] [H12915] [H12916] [H12917] [H12918]
    +**
     */
     void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
     
     /*
    -** CAPI3REF: Opening A New Database Connection
    +** CAPI3REF: Opening A New Database Connection {H12700} 
     **
    -** Open the sqlite database file "filename".  The "filename" is UTF-8
    -** encoded for sqlite3_open() and UTF-16 encoded in the native byte order
    -** for sqlite3_open16().  An [sqlite3*] handle is returned in *ppDb, even
    -** if an error occurs. If the database is opened (or created) successfully,
    -** then SQLITE_OK is returned. Otherwise an error code is returned. The
    -** sqlite3_errmsg() or sqlite3_errmsg16()  routines can be used to obtain
    +** These routines open an SQLite database file whose name is given by the
    +** filename argument. The filename argument is interpreted as UTF-8 for
    +** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte
    +** order for sqlite3_open16(). A [database connection] handle is usually
    +** returned in *ppDb, even if an error occurs.  The only exception is that
    +** if SQLite is unable to allocate memory to hold the [sqlite3] object,
    +** a NULL will be written into *ppDb instead of a pointer to the [sqlite3]
    +** object. If the database is opened (and/or created) successfully, then
    +** [SQLITE_OK] is returned.  Otherwise an [error code] is returned.  The
    +** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain
     ** an English language description of the error.
     **
    -** If the database file does not exist, then a new database will be created
    -** as needed.  The default encoding for the database will be UTF-8 if
    -** sqlite3_open() is called and UTF-16 if sqlite3_open16 is used.
    -**
    -** Whether or not an error occurs when it is opened, resources associated
    -** with the [sqlite3*] handle should be released by passing it to
    -** sqlite3_close() when it is no longer required.
    -**
    -** Note to windows users:  The encoding used for the filename argument
    -** of sqlite3_open() must be UTF-8, not whatever codepage is currently
    -** defined.  Filenames containing international characters must be converted
    -** to UTF-8 prior to passing them into sqlite3_open().
    +** The default encoding for the database will be UTF-8 if
    +** sqlite3_open() or sqlite3_open_v2() is called and
    +** UTF-16 in the native byte order if sqlite3_open16() is used.
    +**
    +** Whether or not an error occurs when it is opened, resources
    +** associated with the [database connection] handle should be released by
    +** passing it to [sqlite3_close()] when it is no longer required.
    +**
    +** The sqlite3_open_v2() interface works like sqlite3_open()
    +** except that it accepts two additional parameters for additional control
    +** over the new database connection.  The flags parameter can take one of
    +** the following three values, optionally combined with the 
    +** [SQLITE_OPEN_NOMUTEX] or [SQLITE_OPEN_FULLMUTEX] flags:
    +**
    +** 
    +**
    [SQLITE_OPEN_READONLY]
    +**
    The database is opened in read-only mode. If the database does not +** already exist, an error is returned.
    +** +**
    [SQLITE_OPEN_READWRITE]
    +**
    The database is opened for reading and writing if possible, or reading +** only if the file is write protected by the operating system. In either +** case the database must already exist, otherwise an error is returned.
    +** +**
    [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
    +**
    The database is opened for reading and writing, and is creates it if +** it does not already exist. This is the behavior that is always used for +** sqlite3_open() and sqlite3_open16().
    +**
    +** +** If the 3rd parameter to sqlite3_open_v2() is not one of the +** combinations shown above or one of the combinations shown above combined +** with the [SQLITE_OPEN_NOMUTEX] or [SQLITE_OPEN_FULLMUTEX] flags, +** then the behavior is undefined. +** +** If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection +** opens in the multi-thread [threading mode] as long as the single-thread +** mode has not been set at compile-time or start-time. If the +** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens +** in the serialized [threading mode] unless single-thread was +** previously selected at compile-time or start-time. +** +** If the filename is ":memory:", then a private, temporary in-memory database +** is created for the connection. This in-memory database will vanish when +** the database connection is closed. Future versions of SQLite might +** make use of additional special filenames that begin with the ":" character. +** It is recommended that when a database filename actually does begin with +** a ":" character you should prefix the filename with a pathname such as +** "./" to avoid ambiguity. +** +** If the filename is an empty string, then a private, temporary +** on-disk database will be created. This private database will be +** automatically deleted as soon as the database connection is closed. +** +** The fourth parameter to sqlite3_open_v2() is the name of the +** [sqlite3_vfs] object that defines the operating system interface that +** the new database connection should use. If the fourth parameter is +** a NULL pointer then the default [sqlite3_vfs] object is used. +** +** Note to Windows users: The encoding used for the filename argument +** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever +** codepage is currently defined. Filenames containing international +** characters must be converted to UTF-8 prior to passing them into +** sqlite3_open() or sqlite3_open_v2(). +** +** Requirements: +** [H12701] [H12702] [H12703] [H12704] [H12706] [H12707] [H12709] [H12711] +** [H12712] [H12713] [H12714] [H12717] [H12719] [H12721] [H12723] */ int sqlite3_open( const char *filename, /* Database filename (UTF-8) */ @@ -901,53 +2097,69 @@ const void *filename, /* Database filename (UTF-16) */ sqlite3 **ppDb /* OUT: SQLite db handle */ ); +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); /* -** CAPI3REF: Error Codes And Messages +** CAPI3REF: Error Codes And Messages {H12800} ** -** The sqlite3_errcode() interface returns the numeric -** [SQLITE_OK | result code] or [SQLITE_IOERR_READ | extended result code] -** for the most recent failed sqlite3_* API call associated -** with [sqlite3] handle 'db'. If a prior API call failed but the -** most recent API call succeeded, the return value from sqlite3_errcode() -** is undefined. -** -** The sqlite3_errmsg() and sqlite3_errmsg16() return English-langauge -** text that describes the error, as either UTF8 or UTF16 respectively. -** Memory to hold the error message string is managed internally. The -** string may be overwritten or deallocated by subsequent calls to SQLite -** interface functions. -** -** Calls to many sqlite3_* functions set the error code and string returned -** by [sqlite3_errcode()], [sqlite3_errmsg()], and [sqlite3_errmsg16()] -** (overwriting the previous values). Note that calls to [sqlite3_errcode()], -** [sqlite3_errmsg()], and [sqlite3_errmsg16()] themselves do not affect the -** results of future invocations. Calls to API routines that do not return -** an error code (examples: [sqlite3_data_count()] or [sqlite3_mprintf()]) do -** not change the error code returned by this routine. -** -** Assuming no other intervening sqlite3_* API calls are made, the error -** code returned by this function is associated with the same error as -** the strings returned by [sqlite3_errmsg()] and [sqlite3_errmsg16()]. +** The sqlite3_errcode() interface returns the numeric [result code] or +** [extended result code] for the most recent failed sqlite3_* API call +** associated with a [database connection]. If a prior API call failed +** but the most recent API call succeeded, the return value from +** sqlite3_errcode() is undefined. The sqlite3_extended_errcode() +** interface is the same except that it always returns the +** [extended result code] even when extended result codes are +** disabled. +** +** The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +** text that describes the error, as either UTF-8 or UTF-16 respectively. +** Memory to hold the error message string is managed internally. +** The application does not need to worry about freeing the result. +** However, the error string might be overwritten or deallocated by +** subsequent calls to other SQLite interface functions. +** +** When the serialized [threading mode] is in use, it might be the +** case that a second error occurs on a separate thread in between +** the time of the first error and the call to these interfaces. +** When that happens, the second error will be reported since these +** interfaces always report the most recent result. To avoid +** this, each thread can obtain exclusive use of the [database connection] D +** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning +** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after +** all calls to the interfaces listed here are completed. +** +** If an interface fails with SQLITE_MISUSE, that means the interface +** was invoked incorrectly by the application. In that case, the +** error code and message may or may not be set. +** +** Requirements: +** [H12801] [H12802] [H12803] [H12807] [H12808] [H12809] */ int sqlite3_errcode(sqlite3 *db); +int sqlite3_extended_errcode(sqlite3 *db); const char *sqlite3_errmsg(sqlite3*); const void *sqlite3_errmsg16(sqlite3*); /* -** CAPI3REF: SQL Statement Object +** CAPI3REF: SQL Statement Object {H13000} +** KEYWORDS: {prepared statement} {prepared statements} ** -** Instance of this object represent single SQL statements. This -** is variously known as a "prepared statement" or a +** An instance of this object represents a single SQL statement. +** This object is variously known as a "prepared statement" or a ** "compiled SQL statement" or simply as a "statement". -** +** ** The life of a statement object goes something like this: ** **
      **
    1. Create the object using [sqlite3_prepare_v2()] or a related ** function. -**
    2. Bind values to host parameters using -** [sqlite3_bind_blob | sqlite3_bind_* interfaces]. +**
    3. Bind values to [host parameters] using the sqlite3_bind_*() +** interfaces. **
    4. Run the SQL by calling [sqlite3_step()] one or more times. **
    5. Reset the statement using [sqlite3_reset()] then go back ** to step 2. Do this zero or more times. @@ -960,45 +2172,147 @@ typedef struct sqlite3_stmt sqlite3_stmt; /* -** CAPI3REF: Compiling An SQL Statement +** CAPI3REF: Run-time Limits {H12760} +** +** This interface allows the size of various constructs to be limited +** on a connection by connection basis. The first parameter is the +** [database connection] whose limit is to be set or queried. The +** second parameter is one of the [limit categories] that define a +** class of constructs to be size limited. The third parameter is the +** new limit for that construct. The function returns the old limit. +** +** If the new limit is a negative number, the limit is unchanged. +** For the limit category of SQLITE_LIMIT_XYZ there is a +** [limits | hard upper bound] +** set by a compile-time C preprocessor macro named +** [limits | SQLITE_MAX_XYZ]. +** (The "_LIMIT_" in the name is changed to "_MAX_".) +** Attempts to increase a limit above its hard upper bound are +** silently truncated to the hard upper limit. +** +** Run time limits are intended for use in applications that manage +** both their own internal database and also databases that are controlled +** by untrusted external sources. An example application might be a +** web browser that has its own databases for storing history and +** separate databases controlled by JavaScript applications downloaded +** off the Internet. The internal databases can be given the +** large, default limits. Databases managed by external sources can +** be given much smaller limits designed to prevent a denial of service +** attack. Developers might also want to use the [sqlite3_set_authorizer()] +** interface to further control untrusted SQL. The size of the database +** created by an untrusted script can be contained using the +** [max_page_count] [PRAGMA]. +** +** New run-time limit categories may be added in future releases. +** +** Requirements: +** [H12762] [H12766] [H12769] +*/ +int sqlite3_limit(sqlite3*, int id, int newVal); + +/* +** CAPI3REF: Run-Time Limit Categories {H12790} +** KEYWORDS: {limit category} {limit categories} +** +** These constants define various performance limits +** that can be lowered at run-time using [sqlite3_limit()]. +** The synopsis of the meanings of the various limits is shown below. +** Additional information is available at [limits | Limits in SQLite]. +** +**
      +**
      SQLITE_LIMIT_LENGTH
      +**
      The maximum size of any string or BLOB or table row.
      +** +**
      SQLITE_LIMIT_SQL_LENGTH
      +**
      The maximum length of an SQL statement.
      +** +**
      SQLITE_LIMIT_COLUMN
      +**
      The maximum number of columns in a table definition or in the +** result set of a [SELECT] or the maximum number of columns in an index +** or in an ORDER BY or GROUP BY clause.
      +** +**
      SQLITE_LIMIT_EXPR_DEPTH
      +**
      The maximum depth of the parse tree on any expression.
      +** +**
      SQLITE_LIMIT_COMPOUND_SELECT
      +**
      The maximum number of terms in a compound SELECT statement.
      +** +**
      SQLITE_LIMIT_VDBE_OP
      +**
      The maximum number of instructions in a virtual machine program +** used to implement an SQL statement.
      +** +**
      SQLITE_LIMIT_FUNCTION_ARG
      +**
      The maximum number of arguments on a function.
      +** +**
      SQLITE_LIMIT_ATTACHED
      +**
      The maximum number of [ATTACH | attached databases].
      +** +**
      SQLITE_LIMIT_LIKE_PATTERN_LENGTH
      +**
      The maximum length of the pattern argument to the [LIKE] or +** [GLOB] operators.
      +** +**
      SQLITE_LIMIT_VARIABLE_NUMBER
      +**
      The maximum number of variables in an SQL statement that can +** be bound.
      +**
      +*/ +#define SQLITE_LIMIT_LENGTH 0 +#define SQLITE_LIMIT_SQL_LENGTH 1 +#define SQLITE_LIMIT_COLUMN 2 +#define SQLITE_LIMIT_EXPR_DEPTH 3 +#define SQLITE_LIMIT_COMPOUND_SELECT 4 +#define SQLITE_LIMIT_VDBE_OP 5 +#define SQLITE_LIMIT_FUNCTION_ARG 6 +#define SQLITE_LIMIT_ATTACHED 7 +#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 +#define SQLITE_LIMIT_VARIABLE_NUMBER 9 + +/* +** CAPI3REF: Compiling An SQL Statement {H13010} +** KEYWORDS: {SQL statement compiler} ** ** To execute an SQL query, it must first be compiled into a byte-code -** program using one of these routines. +** program using one of these routines. ** -** The first argument "db" is an [sqlite3 | SQLite database handle] -** obtained from a prior call to [sqlite3_open()] or [sqlite3_open16()]. -** The second argument "zSql" is the statement to be compiled, encoded +** The first argument, "db", is a [database connection] obtained from a +** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or +** [sqlite3_open16()]. The database connection must not have been closed. +** +** The second argument, "zSql", is the statement to be compiled, encoded ** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() -** interfaces uses UTF-8 and sqlite3_prepare16() and sqlite3_prepare16_v2() +** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() ** use UTF-16. ** -** If the nByte argument is less -** than zero, then zSql is read up to the first zero terminator. If -** nByte is non-negative, then it is the maximum number of -** bytes read from zSql. When nByte is non-negative, the -** zSql string ends at either the first '\000' character or -** until the nByte-th byte, whichever comes first. -** -** *pzTail is made to point to the first byte past the end of the first -** SQL statement in zSql. This routine only compiles the first statement -** in zSql, so *pzTail is left pointing to what remains uncompiled. -** -** *ppStmt is left pointing to a compiled -** [sqlite3_stmt | SQL statement structure] that can be -** executed using [sqlite3_step()]. Or if there is an error, *ppStmt may be -** set to NULL. If the input text contained no SQL (if the input is and -** empty string or a comment) then *ppStmt is set to NULL. The calling -** procedure is responsible for deleting the compiled SQL statement -** using [sqlite3_finalize()] after it has finished with it. +** If the nByte argument is less than zero, then zSql is read up to the +** first zero terminator. If nByte is non-negative, then it is the maximum +** number of bytes read from zSql. When nByte is non-negative, the +** zSql string ends at either the first '\000' or '\u0000' character or +** the nByte-th byte, whichever comes first. If the caller knows +** that the supplied string is nul-terminated, then there is a small +** performance advantage to be gained by passing an nByte parameter that +** is equal to the number of bytes in the input string including +** the nul-terminator bytes. +** +** If pzTail is not NULL then *pzTail is made to point to the first byte +** past the end of the first SQL statement in zSql. These routines only +** compile the first statement in zSql, so *pzTail is left pointing to +** what remains uncompiled. +** +** *ppStmt is left pointing to a compiled [prepared statement] that can be +** executed using [sqlite3_step()]. If there is an error, *ppStmt is set +** to NULL. If the input text contains no SQL (if the input is an empty +** string or a comment) then *ppStmt is set to NULL. +** The calling procedure is responsible for deleting the compiled +** SQL statement using [sqlite3_finalize()] after it has finished with it. +** ppStmt may not be NULL. ** -** On success, [SQLITE_OK] is returned. Otherwise an -** [SQLITE_ERROR | error code] is returned. +** On success, [SQLITE_OK] is returned, otherwise an [error code] is returned. ** ** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are ** recommended for all new programs. The two older interfaces are retained ** for backwards compatibility, but their use is discouraged. ** In the "v2" interfaces, the prepared statement -** that is returned (the [sqlite3_stmt] object) contains a copy of the +** that is returned (the [sqlite3_stmt] object) contains a copy of the ** original SQL text. This causes the [sqlite3_step()] interface to ** behave a differently in two ways: ** @@ -1006,26 +2320,27 @@ **
    6. ** If the database schema changes, instead of returning [SQLITE_SCHEMA] as it ** always used to do, [sqlite3_step()] will automatically recompile the SQL -** statement and try to run it again. If the schema has changed in a way -** that makes the statement no longer valid, [sqlite3_step()] will still +** statement and try to run it again. If the schema has changed in +** a way that makes the statement no longer valid, [sqlite3_step()] will still ** return [SQLITE_SCHEMA]. But unlike the legacy behavior, [SQLITE_SCHEMA] is ** now a fatal error. Calling [sqlite3_prepare_v2()] again will not make the -** error go away. Note: use [sqlite3_errmsg()] to find the text of the parsing -** error that results in an [SQLITE_SCHEMA] return. +** error go away. Note: use [sqlite3_errmsg()] to find the text +** of the parsing error that results in an [SQLITE_SCHEMA] return. **
    7. ** **
    8. -** When an error occurs, -** [sqlite3_step()] will return one of the detailed -** [SQLITE_ERROR | result codes] or -** [SQLITE_IOERR_READ | extended result codes] such as directly. -** The legacy behavior was that [sqlite3_step()] would only return a generic -** [SQLITE_ERROR] result code and you would have to make a second call to -** [sqlite3_reset()] in order to find the underlying cause of the problem. -** With the "v2" prepare interfaces, the underlying reason for the error is -** returned immediately. +** When an error occurs, [sqlite3_step()] will return one of the detailed +** [error codes] or [extended error codes]. The legacy behavior was that +** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code +** and you would have to make a second call to [sqlite3_reset()] in order +** to find the underlying cause of the problem. With the "v2" prepare +** interfaces, the underlying reason for the error is returned immediately. **
    9. **
    +** +** Requirements: +** [H13011] [H13012] [H13013] [H13014] [H13015] [H13016] [H13019] [H13021] +** */ int sqlite3_prepare( sqlite3 *db, /* Database handle */ @@ -1057,84 +2372,129 @@ ); /* -** CAPI3REF: Dynamically Typed Value Object +** CAPI3REF: Retrieving Statement SQL {H13100} ** -** SQLite uses dynamic typing for the values it stores. Values can -** be integers, floating point values, strings, BLOBs, or NULL. When -** passing around values internally, each value is represented as -** an instance of the sqlite3_value object. +** This interface can be used to retrieve a saved copy of the original +** SQL text used to create a [prepared statement] if that statement was +** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** +** Requirements: +** [H13101] [H13102] [H13103] +*/ +const char *sqlite3_sql(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Dynamically Typed Value Object {H15000} +** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} +** +** SQLite uses the sqlite3_value object to represent all values +** that can be stored in a database table. SQLite uses dynamic typing +** for the values it stores. Values stored in sqlite3_value objects +** can be integers, floating point values, strings, BLOBs, or NULL. +** +** An sqlite3_value object may be either "protected" or "unprotected". +** Some interfaces require a protected sqlite3_value. Other interfaces +** will accept either a protected or an unprotected sqlite3_value. +** Every interface that accepts sqlite3_value arguments specifies +** whether or not it requires a protected sqlite3_value. +** +** The terms "protected" and "unprotected" refer to whether or not +** a mutex is held. A internal mutex is held for a protected +** sqlite3_value object but no mutex is held for an unprotected +** sqlite3_value object. If SQLite is compiled to be single-threaded +** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) +** or if SQLite is run in one of reduced mutex modes +** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] +** then there is no distinction between protected and unprotected +** sqlite3_value objects and they can be used interchangeably. However, +** for maximum code portability it is recommended that applications +** still make the distinction between between protected and unprotected +** sqlite3_value objects even when not strictly required. +** +** The sqlite3_value objects that are passed as parameters into the +** implementation of [application-defined SQL functions] are protected. +** The sqlite3_value object returned by +** [sqlite3_column_value()] is unprotected. +** Unprotected sqlite3_value objects may only be used with +** [sqlite3_result_value()] and [sqlite3_bind_value()]. +** The [sqlite3_value_blob | sqlite3_value_type()] family of +** interfaces require protected sqlite3_value objects. */ typedef struct Mem sqlite3_value; /* -** CAPI3REF: SQL Function Context Object +** CAPI3REF: SQL Function Context Object {H16001} ** ** The context in which an SQL function executes is stored in an -** sqlite3_context object. A pointer to such an object is the -** first parameter to user-defined SQL functions. +** sqlite3_context object. A pointer to an sqlite3_context object +** is always first parameter to [application-defined SQL functions]. +** The application-defined SQL function implementation will pass this +** pointer through into calls to [sqlite3_result_int | sqlite3_result()], +** [sqlite3_aggregate_context()], [sqlite3_user_data()], +** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], +** and/or [sqlite3_set_auxdata()]. */ typedef struct sqlite3_context sqlite3_context; /* -** CAPI3REF: Binding Values To Prepared Statements +** CAPI3REF: Binding Values To Prepared Statements {H13500} +** KEYWORDS: {host parameter} {host parameters} {host parameter name} +** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} ** ** In the SQL strings input to [sqlite3_prepare_v2()] and its variants, -** one or more literals can be replace by a parameter in one of these -** forms: +** literals may be replaced by a [parameter] in one of these forms: ** **
      **
    • ? **
    • ?NNN -**
    • :AAA -**
    • @AAA +**
    • :VVV +**
    • @VVV **
    • $VVV **
    ** ** In the parameter forms shown above NNN is an integer literal, -** AAA is an alphanumeric identifier and VVV is a variable name according -** to the syntax rules of the TCL programming language. -** The values of these parameters (also called "host parameter names") +** and VVV is an alpha-numeric parameter name. The values of these +** parameters (also called "host parameter names" or "SQL parameters") ** can be set using the sqlite3_bind_*() routines defined here. ** -** The first argument to the sqlite3_bind_*() routines always is a pointer -** to the [sqlite3_stmt] object returned from [sqlite3_prepare_v2()] or -** its variants. The second -** argument is the index of the parameter to be set. The first parameter has -** an index of 1. When the same named parameter is used more than once, second -** and subsequent -** occurrences have the same index as the first occurrence. The index for -** named parameters can be looked up using the -** [sqlite3_bind_parameter_name()] API if desired. The index for "?NNN" -** parametes is the value of NNN. -** The NNN value must be between 1 and the compile-time -** parameter SQLITE_MAX_VARIABLE_NUMBER (default value: 999). -** See limits.html for additional information. +** The first argument to the sqlite3_bind_*() routines is always +** a pointer to the [sqlite3_stmt] object returned from +** [sqlite3_prepare_v2()] or its variants. +** +** The second argument is the index of the SQL parameter to be set. +** The leftmost SQL parameter has an index of 1. When the same named +** SQL parameter is used more than once, second and subsequent +** occurrences have the same index as the first occurrence. +** The index for named parameters can be looked up using the +** [sqlite3_bind_parameter_index()] API if desired. The index +** for "?NNN" parameters is the value of NNN. +** The NNN value must be between 1 and the [sqlite3_limit()] +** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). ** ** The third argument is the value to bind to the parameter. ** -** In those -** routines that have a fourth argument, its value is the number of bytes -** in the parameter. To be clear: the value is the number of bytes in the -** string, not the number of characters. The number -** of bytes does not include the zero-terminator at the end of strings. +** In those routines that have a fourth argument, its value is the +** number of bytes in the parameter. To be clear: the value is the +** number of bytes in the value, not the number of characters. ** If the fourth parameter is negative, the length of the string is -** number of bytes up to the first zero terminator. +** the number of bytes up to the first zero terminator. ** ** The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and ** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** text after SQLite has finished with it. If the fifth argument is the -** special value [SQLITE_STATIC], then the library assumes that the information -** is in static, unmanaged space and does not need to be freed. If the -** fifth argument has the value [SQLITE_TRANSIENT], then SQLite makes its -** own private copy of the data immediately, before the sqlite3_bind_*() -** routine returns. -** -** The sqlite3_bind_zeroblob() routine binds a BLOB of length n that -** is filled with zeros. A zeroblob uses a fixed amount of memory -** (just an integer to hold it size) while it is being processed. -** Zeroblobs are intended to serve as place-holders for BLOBs whose -** content is later written using -** [sqlite3_blob_open | increment BLOB I/O] routines. +** string after SQLite has finished with it. If the fifth argument is +** the special value [SQLITE_STATIC], then SQLite assumes that the +** information is in static, unmanaged space and does not need to be freed. +** If the fifth argument has the value [SQLITE_TRANSIENT], then +** SQLite makes its own private copy of the data immediately, before +** the sqlite3_bind_*() routine returns. +** +** The sqlite3_bind_zeroblob() routine binds a BLOB of length N that +** is filled with zeroes. A zeroblob uses a fixed amount of memory +** (just an integer to hold its size) while it is being processed. +** Zeroblobs are intended to serve as placeholders for BLOBs whose +** content is later written using +** [sqlite3_blob_open | incremental BLOB I/O] routines. +** A negative value for the zeroblob results in a zero-length BLOB. ** ** The sqlite3_bind_*() routines must be called after ** [sqlite3_prepare_v2()] (and its variants) or [sqlite3_reset()] and @@ -1144,14 +2504,26 @@ ** ** These routines return [SQLITE_OK] on success or an error code if ** anything goes wrong. [SQLITE_RANGE] is returned if the parameter -** index is out of range. [SQLITE_NOMEM] is returned if malloc fails. -** [SQLITE_MISUSE] is returned if these routines are called on a virtual -** machine that is the wrong state or which has already been finalized. +** index is out of range. [SQLITE_NOMEM] is returned if malloc() fails. +** [SQLITE_MISUSE] might be returned if these routines are called on a +** virtual machine that is the wrong state or which has already been finalized. +** Detection of misuse is unreliable. Applications should not depend +** on SQLITE_MISUSE returns. SQLITE_MISUSE is intended to indicate a +** a logic error in the application. Future versions of SQLite might +** panic rather than return SQLITE_MISUSE. +** +** See also: [sqlite3_bind_parameter_count()], +** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. +** +** Requirements: +** [H13506] [H13509] [H13512] [H13515] [H13518] [H13521] [H13524] [H13527] +** [H13530] [H13533] [H13536] [H13539] [H13542] [H13545] [H13548] [H13551] +** */ int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); int sqlite3_bind_double(sqlite3_stmt*, int, double); int sqlite3_bind_int(sqlite3_stmt*, int, int); -int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite_int64); +int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); int sqlite3_bind_null(sqlite3_stmt*, int); int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); @@ -1159,122 +2531,175 @@ int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); /* -** CAPI3REF: Number Of Host Parameters +** CAPI3REF: Number Of SQL Parameters {H13600} ** -** Return the largest host parameter index in the precompiled statement given -** as the argument. When the host parameters are of the forms like ":AAA" -** or "?", then they are assigned sequential increasing numbers beginning -** with one, so the value returned is the number of parameters. However -** if the same host parameter name is used multiple times, each occurrance -** is given the same number, so the value returned in that case is the number -** of unique host parameter names. If host parameters of the form "?NNN" -** are used (where NNN is an integer) then there might be gaps in the -** numbering and the value returned by this interface is the index of the -** host parameter with the largest index value. +** This routine can be used to find the number of [SQL parameters] +** in a [prepared statement]. SQL parameters are tokens of the +** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as +** placeholders for values that are [sqlite3_bind_blob | bound] +** to the parameters at a later time. +** +** This routine actually returns the index of the largest (rightmost) +** parameter. For all forms except ?NNN, this will correspond to the +** number of unique parameters. If parameters of the ?NNN are used, +** there may be gaps in the list. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_name()], and +** [sqlite3_bind_parameter_index()]. +** +** Requirements: +** [H13601] */ int sqlite3_bind_parameter_count(sqlite3_stmt*); /* -** CAPI3REF: Name Of A Host Parameter +** CAPI3REF: Name Of A Host Parameter {H13620} ** -** This routine returns a pointer to the name of the n-th parameter in a -** [sqlite3_stmt | prepared statement]. -** Host parameters of the form ":AAA" or "@AAA" or "$VVV" have a name -** which is the string ":AAA" or "@AAA" or "$VVV". -** In other words, the initial ":" or "$" or "@" +** This routine returns a pointer to the name of the n-th +** [SQL parameter] in a [prepared statement]. +** SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" +** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" +** respectively. +** In other words, the initial ":" or "$" or "@" or "?" ** is included as part of the name. -** Parameters of the form "?" or "?NNN" have no name. +** Parameters of the form "?" without a following integer have no name +** and are also referred to as "anonymous parameters". +** +** The first host parameter has an index of 1, not 0. ** -** The first bound parameter has an index of 1, not 0. +** If the value n is out of range or if the n-th parameter is +** nameless, then NULL is returned. The returned string is +** always in UTF-8 encoding even if the named parameter was +** originally specified as UTF-16 in [sqlite3_prepare16()] or +** [sqlite3_prepare16_v2()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_index()]. ** -** If the value n is out of range or if the n-th parameter is nameless, -** then NULL is returned. The returned string is always in the -** UTF-8 encoding even if the named parameter was originally specified -** as UTF-16 in [sqlite3_prepare16()] or [sqlite3_prepare16_v2()]. +** Requirements: +** [H13621] */ const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); /* -** CAPI3REF: Index Of A Parameter With A Given Name +** CAPI3REF: Index Of A Parameter With A Given Name {H13640} ** -** This routine returns the index of a host parameter with the given name. -** The name must match exactly. If no parameter with the given name is -** found, return 0. Parameter names must be UTF8. +** Return the index of an SQL parameter given its name. The +** index value returned is suitable for use as the second +** parameter to [sqlite3_bind_blob|sqlite3_bind()]. A zero +** is returned if no matching parameter is found. The parameter +** name must be given in UTF-8 even if the original statement +** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_index()]. +** +** Requirements: +** [H13641] */ int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); /* -** CAPI3REF: Reset All Bindings On A Prepared Statement +** CAPI3REF: Reset All Bindings On A Prepared Statement {H13660} +** +** Contrary to the intuition of many, [sqlite3_reset()] does not reset +** the [sqlite3_bind_blob | bindings] on a [prepared statement]. +** Use this routine to reset all host parameters to NULL. ** -** Contrary to the intuition of many, [sqlite3_reset()] does not -** reset the [sqlite3_bind_blob | bindings] on a -** [sqlite3_stmt | prepared statement]. Use this routine to -** reset all host parameters to NULL. +** Requirements: +** [H13661] */ int sqlite3_clear_bindings(sqlite3_stmt*); /* -** CAPI3REF: Number Of Columns In A Result Set +** CAPI3REF: Number Of Columns In A Result Set {H13710} ** -** Return the number of columns in the result set returned by the -** [sqlite3_stmt | compiled SQL statement]. This routine returns 0 -** if pStmt is an SQL statement that does not return data (for -** example an UPDATE). +** Return the number of columns in the result set returned by the +** [prepared statement]. This routine returns 0 if pStmt is an SQL +** statement that does not return data (for example an [UPDATE]). +** +** Requirements: +** [H13711] */ int sqlite3_column_count(sqlite3_stmt *pStmt); /* -** CAPI3REF: Column Names In A Result Set +** CAPI3REF: Column Names In A Result Set {H13720} ** ** These routines return the name assigned to a particular column -** in the result set of a SELECT statement. The sqlite3_column_name() -** interface returns a pointer to a UTF8 string and sqlite3_column_name16() -** returns a pointer to a UTF16 string. The first parameter is the -** [sqlite_stmt | prepared statement] that implements the SELECT statement. -** The second parameter is the column number. The left-most column is -** number 0. -** -** The returned string pointer is valid until either the -** [sqlite_stmt | prepared statement] is destroyed by [sqlite3_finalize()] -** or until the next call sqlite3_column_name() or sqlite3_column_name16() -** on the same column. +** in the result set of a [SELECT] statement. The sqlite3_column_name() +** interface returns a pointer to a zero-terminated UTF-8 string +** and sqlite3_column_name16() returns a pointer to a zero-terminated +** UTF-16 string. The first parameter is the [prepared statement] +** that implements the [SELECT] statement. The second parameter is the +** column number. The leftmost column is number 0. +** +** The returned string pointer is valid until either the [prepared statement] +** is destroyed by [sqlite3_finalize()] or until the next call to +** sqlite3_column_name() or sqlite3_column_name16() on the same column. +** +** If sqlite3_malloc() fails during the processing of either routine +** (for example during a conversion from UTF-8 to UTF-16) then a +** NULL pointer is returned. +** +** The name of a result column is the value of the "AS" clause for +** that column, if there is an AS clause. If there is no AS clause +** then the name of the column is unspecified and may change from +** one release of SQLite to the next. +** +** Requirements: +** [H13721] [H13723] [H13724] [H13725] [H13726] [H13727] */ const char *sqlite3_column_name(sqlite3_stmt*, int N); const void *sqlite3_column_name16(sqlite3_stmt*, int N); /* -** CAPI3REF: Source Of Data In A Query Result +** CAPI3REF: Source Of Data In A Query Result {H13740} ** ** These routines provide a means to determine what column of what -** table in which database a result of a SELECT statement comes from. +** table in which database a result of a [SELECT] statement comes from. ** The name of the database or table or column can be returned as -** either a UTF8 or UTF16 string. The _database_ routines return +** either a UTF-8 or UTF-16 string. The _database_ routines return ** the database name, the _table_ routines return the table name, and ** the origin_ routines return the column name. -** The returned string is valid until -** the [sqlite3_stmt | prepared statement] is destroyed using -** [sqlite3_finalize()] or until the same information is requested +** The returned string is valid until the [prepared statement] is destroyed +** using [sqlite3_finalize()] or until the same information is requested ** again in a different encoding. ** ** The names returned are the original un-aliased names of the ** database, table, and column. ** -** The first argument to the following calls is a -** [sqlite3_stmt | compiled SQL statement]. -** These functions return information about the Nth column returned by +** The first argument to the following calls is a [prepared statement]. +** These functions return information about the Nth column returned by ** the statement, where N is the second function argument. ** -** If the Nth column returned by the statement is an expression -** or subquery and is not a column value, then all of these functions -** return NULL. Otherwise, they return the -** name of the attached database, table and column that query result -** column was extracted from. -** -** As with all other SQLite APIs, those postfixed with "16" return UTF-16 -** encoded strings, the other functions return UTF-8. -** -** These APIs are only available if the library was compiled with the -** SQLITE_ENABLE_COLUMN_METADATA preprocessor symbol defined. +** If the Nth column returned by the statement is an expression or +** subquery and is not a column value, then all of these functions return +** NULL. These routine might also return NULL if a memory allocation error +** occurs. Otherwise, they return the name of the attached database, table +** and column that query result column was extracted from. +** +** As with all other SQLite APIs, those postfixed with "16" return +** UTF-16 encoded strings, the other functions return UTF-8. {END} +** +** These APIs are only available if the library was compiled with the +** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined. +** +** {A13751} +** If two or more threads call one or more of these routines against the same +** prepared statement and column at the same time then the results are +** undefined. +** +** Requirements: +** [H13741] [H13742] [H13743] [H13744] [H13745] [H13746] [H13748] +** +** If two or more threads call one or more +** [sqlite3_column_database_name | column metadata interfaces] +** for the same [prepared statement] and result column +** at the same time then the results are undefined. */ const char *sqlite3_column_database_name(sqlite3_stmt*,int); const void *sqlite3_column_database_name16(sqlite3_stmt*,int); @@ -1284,26 +2709,26 @@ const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); /* -** CAPI3REF: Declared Datatype Of A Query Result +** CAPI3REF: Declared Datatype Of A Query Result {H13760} ** -** The first parameter is a [sqlite3_stmt | compiled SQL statement]. -** If this statement is a SELECT statement and the Nth column of the -** returned result set of that SELECT is a table column (not an +** The first parameter is a [prepared statement]. +** If this statement is a [SELECT] statement and the Nth column of the +** returned result set of that [SELECT] is a table column (not an ** expression or subquery) then the declared type of the table -** column is returned. If the Nth column of the result set is an +** column is returned. If the Nth column of the result set is an ** expression or subquery, then a NULL pointer is returned. -** The returned string is always UTF-8 encoded. For example, in -** the database schema: +** The returned string is always UTF-8 encoded. {END} +** +** For example, given the database schema: ** ** CREATE TABLE t1(c1 VARIANT); ** -** And the following statement compiled: +** and the following statement to be compiled: ** ** SELECT c1 + 1, c1 FROM t1; ** -** Then this routine would return the string "VARIANT" for the second -** result column (i==1), and a NULL pointer for the first result column -** (i==0). +** this routine would return the string "VARIANT" for the second result +** column (i==1), and a NULL pointer for the first result column (i==0). ** ** SQLite uses dynamic run-time typing. So just because a column ** is declared to contain a particular type does not mean that the @@ -1311,36 +2736,37 @@ ** strongly typed, but the typing is dynamic not static. Type ** is associated with individual values, not with the containers ** used to hold those values. +** +** Requirements: +** [H13761] [H13762] [H13763] */ -const char *sqlite3_column_decltype(sqlite3_stmt *, int i); +const char *sqlite3_column_decltype(sqlite3_stmt*,int); const void *sqlite3_column_decltype16(sqlite3_stmt*,int); -/* -** CAPI3REF: Evaluate An SQL Statement +/* +** CAPI3REF: Evaluate An SQL Statement {H13200} ** -** After an [sqlite3_stmt | SQL statement] has been prepared with a call -** to either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or to one of -** the legacy interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], -** then this function must be called one or more times to evaluate the -** statement. +** After a [prepared statement] has been prepared using either +** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy +** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function +** must be called one or more times to evaluate the statement. ** -** The details of the behavior of this sqlite3_step() interface depend +** The details of the behavior of the sqlite3_step() interface depend ** on whether the statement was prepared using the newer "v2" interface ** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy ** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the ** new "v2" interface is recommended for new applications but the legacy ** interface will continue to be supported. ** -** In the lagacy interface, the return value will be either [SQLITE_BUSY], +** In the legacy interface, the return value will be either [SQLITE_BUSY], ** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. -** With the "v2" interface, any of the other [SQLITE_OK | result code] -** or [SQLITE_IOERR_READ | extended result code] might be returned as -** well. +** With the "v2" interface, any of the other [result codes] or +** [extended result codes] might be returned as well. ** ** [SQLITE_BUSY] means that the database engine was unable to acquire the -** database locks it needs to do its job. If the statement is a COMMIT +** database locks it needs to do its job. If the statement is a [COMMIT] ** or occurs outside of an explicit transaction, then you can retry the -** statement. If the statement is not a COMMIT and occurs within a +** statement. If the statement is not a [COMMIT] and occurs within a ** explicit transaction then you should rollback the transaction before ** continuing. ** @@ -1349,62 +2775,59 @@ ** machine without first calling [sqlite3_reset()] to reset the virtual ** machine back to its initial state. ** -** If the SQL statement being executed returns any data, then -** [SQLITE_ROW] is returned each time a new row of data is ready -** for processing by the caller. The values may be accessed using -** the [sqlite3_column_int | column access functions]. +** If the SQL statement being executed returns any data, then [SQLITE_ROW] +** is returned each time a new row of data is ready for processing by the +** caller. The values may be accessed using the [column access functions]. ** sqlite3_step() is called again to retrieve the next row of data. -** +** ** [SQLITE_ERROR] means that a run-time error (such as a constraint ** violation) has occurred. sqlite3_step() should not be called again on ** the VM. More information may be found by calling [sqlite3_errmsg()]. -** With the legacy interface, a more specific error code (example: +** With the legacy interface, a more specific error code (for example, ** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) ** can be obtained by calling [sqlite3_reset()] on the -** [sqlite_stmt | prepared statement]. In the "v2" interface, +** [prepared statement]. In the "v2" interface, ** the more specific error code is returned directly by sqlite3_step(). ** ** [SQLITE_MISUSE] means that the this routine was called inappropriately. -** Perhaps it was called on a [sqlite_stmt | prepared statement] that has -** already been [sqlite3_finalize | finalized] or on one that had +** Perhaps it was called on a [prepared statement] that has +** already been [sqlite3_finalize | finalized] or on one that had ** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could ** be the case that the same database connection is being used by two or ** more threads at the same moment in time. ** -** Goofy Interface Alert: -** In the legacy interface, -** the sqlite3_step() API always returns a generic error code, -** [SQLITE_ERROR], following any error other than [SQLITE_BUSY] -** and [SQLITE_MISUSE]. You must call [sqlite3_reset()] or -** [sqlite3_finalize()] in order to find one of the specific -** [SQLITE_ERROR | result codes] that better describes the error. +** Goofy Interface Alert: In the legacy interface, the sqlite3_step() +** API always returns a generic error code, [SQLITE_ERROR], following any +** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call +** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the +** specific [error codes] that better describes the error. ** We admit that this is a goofy design. The problem has been fixed ** with the "v2" interface. If you prepare all of your SQL statements ** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead -** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()], then the -** more specific [SQLITE_ERROR | result codes] are returned directly +** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, +** then the more specific [error codes] are returned directly ** by sqlite3_step(). The use of the "v2" interface is recommended. +** +** Requirements: +** [H13202] [H15304] [H15306] [H15308] [H15310] */ int sqlite3_step(sqlite3_stmt*); /* -** CAPI3REF: +** CAPI3REF: Number of columns in a result set {H13770} ** -** Return the number of values in the current row of the result set. +** Returns the number of values in the current row of the result set. ** -** After a call to [sqlite3_step()] that returns [SQLITE_ROW], this routine -** will return the same value as the [sqlite3_column_count()] function. -** After [sqlite3_step()] has returned an [SQLITE_DONE], [SQLITE_BUSY], or -** a [SQLITE_ERROR | error code], or before [sqlite3_step()] has been -** called on the [sqlite_stmt | prepared statement] for the first time, -** this routine returns zero. +** Requirements: +** [H13771] [H13772] */ int sqlite3_data_count(sqlite3_stmt *pStmt); /* -** CAPI3REF: Fundamental Datatypes +** CAPI3REF: Fundamental Datatypes {H10265} +** KEYWORDS: SQLITE_TEXT ** -** Every value in SQLite has one of five fundamental datatypes: +** {H10266} Every value in SQLite has one of five fundamental datatypes: ** **
      **
    • 64-bit signed integer @@ -1412,13 +2835,13 @@ **
    • string **
    • BLOB **
    • NULL -**
    +** {END} ** ** These constants are codes for each of those types. ** ** Note that the SQLITE_TEXT constant was also used in SQLite version 2 ** for a completely different meaning. Software that links against both -** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT not +** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not ** SQLITE_TEXT. */ #define SQLITE_INTEGER 1 @@ -1433,21 +2856,31 @@ #define SQLITE3_TEXT 3 /* -** CAPI3REF: Results Values From A Query +** CAPI3REF: Result Values From A Query {H13800} +** KEYWORDS: {column access functions} ** -** These routines return information about the information -** in a single column of the current result row of a query. In every -** case the first argument is a pointer to the -** [sqlite3_stmt | SQL statement] that is being -** evaluate (the [sqlite_stmt*] that was returned from -** [sqlite3_prepare_v2()] or one of its variants) and -** the second argument is the index of the column for which information -** should be returned. The left-most column has an index of 0. +** These routines form the "result set query" interface. ** -** If the SQL statement is not currently point to a valid row, or if the -** the column index is out of range, the result is undefined. +** These routines return information about a single column of the current +** result row of a query. In every case the first argument is a pointer +** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] +** that was returned from [sqlite3_prepare_v2()] or one of its variants) +** and the second argument is the index of the column for which information +** should be returned. The leftmost column of the result set has the index 0. +** +** If the SQL statement does not currently point to a valid row, or if the +** column index is out of range, the result is undefined. +** These routines may only be called when the most recent call to +** [sqlite3_step()] has returned [SQLITE_ROW] and neither +** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. +** If any of these routines are called after [sqlite3_reset()] or +** [sqlite3_finalize()] or after [sqlite3_step()] has returned +** something other than [SQLITE_ROW], the results are undefined. +** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] +** are called from a different thread while any of these routines +** are pending, then the results are undefined. ** -** The sqlite3_column_type() routine returns +** The sqlite3_column_type() routine returns the ** [SQLITE_INTEGER | datatype code] for the initial data type ** of the result column. The returned value is one of [SQLITE_INTEGER], ** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value @@ -1457,7 +2890,7 @@ ** versions of SQLite may change the behavior of sqlite3_column_type() ** following a type conversion. ** -** If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +** If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() ** routine returns the number of bytes in that BLOB or string. ** If the result is a UTF-16 string, then sqlite3_column_bytes() converts ** the string to UTF-8 and then returns the number of bytes. @@ -1468,20 +2901,32 @@ ** of the string. For clarity: the value returned is the number of ** bytes in the string, not the number of characters. ** +** Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +** even empty strings, are always zero terminated. The return +** value from sqlite3_column_blob() for a zero-length BLOB is an arbitrary +** pointer, possibly even a NULL pointer. +** ** The sqlite3_column_bytes16() routine is similar to sqlite3_column_bytes() -** but leaves the result in UTF-16 instead of UTF-8. +** but leaves the result in UTF-16 in native byte order instead of UTF-8. ** The zero terminator is not included in this count. ** +** The object returned by [sqlite3_column_value()] is an +** [unprotected sqlite3_value] object. An unprotected sqlite3_value object +** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()]. +** If the [unprotected sqlite3_value] object returned by +** [sqlite3_column_value()] is used in any other way, including calls +** to routines like [sqlite3_value_int()], [sqlite3_value_text()], +** or [sqlite3_value_bytes()], then the behavior is undefined. +** ** These routines attempt to convert the value where appropriate. For ** example, if the internal representation is FLOAT and a text result -** is requested, [sqlite3_snprintf()] is used internally to do the conversion -** automatically. The following table details the conversions that -** are applied: +** is requested, [sqlite3_snprintf()] is used internally to perform the +** conversion automatically. The following table details the conversions +** that are applied: ** **
    **
  • -**
    Internal Requested -**
    Type Type Conversion +**
    Internal
    Type
    Requested
    Type
    Conversion ** **
    NULL INTEGER Result is 0 **
    NULL FLOAT Result is 0.0 @@ -1489,7 +2934,7 @@ **
    NULL BLOB Result is NULL pointer **
    INTEGER FLOAT Convert from integer to float **
    INTEGER TEXT ASCII rendering of the integer -**
    INTEGER BLOB Same as for INTEGER->TEXT +**
    INTEGER BLOB Same as INTEGER->TEXT **
    FLOAT INTEGER Convert from float to integer **
    FLOAT TEXT ASCII rendering of the float **
    FLOAT BLOB Same as FLOAT->TEXT @@ -1504,176 +2949,233 @@ ** ** The table above makes reference to standard C library functions atoi() ** and atof(). SQLite does not really use these functions. It has its -** on equavalent internal routines. The atoi() and atof() names are +** own equivalent internal routines. The atoi() and atof() names are ** used in the table for brevity and because they are familiar to most ** C programmers. ** ** Note that when type conversions occur, pointers returned by prior ** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -** sqlite3_column_text16() may be invalidated. +** sqlite3_column_text16() may be invalidated. ** Type conversions and pointer invalidations might occur ** in the following cases: ** **
      -**
    • The initial content is a BLOB and sqlite3_column_text() -** or sqlite3_column_text16() is called. A zero-terminator might -** need to be added to the string.

    • -** -**
    • The initial content is UTF-8 text and sqlite3_column_bytes16() or -** sqlite3_column_text16() is called. The content must be converted -** to UTF-16.

    • -** -**
    • The initial content is UTF-16 text and sqlite3_column_bytes() or -** sqlite3_column_text() is called. The content must be converted -** to UTF-8.

    • +**
    • The initial content is a BLOB and sqlite3_column_text() or +** sqlite3_column_text16() is called. A zero-terminator might +** need to be added to the string.
    • +**
    • The initial content is UTF-8 text and sqlite3_column_bytes16() or +** sqlite3_column_text16() is called. The content must be converted +** to UTF-16.
    • +**
    • The initial content is UTF-16 text and sqlite3_column_bytes() or +** sqlite3_column_text() is called. The content must be converted +** to UTF-8.
    • **
    ** ** Conversions between UTF-16be and UTF-16le are always done in place and do ** not invalidate a prior pointer, though of course the content of the buffer ** that the prior pointer points to will have been modified. Other kinds -** of conversion are done in place when it is possible, but sometime it is -** not possible and in those cases prior pointers are invalidated. +** of conversion are done in place when it is possible, but sometimes they +** are not possible and in those cases prior pointers are invalidated. ** ** The safest and easiest to remember policy is to invoke these routines ** in one of the following ways: ** -**
      +**
        **
      • sqlite3_column_text() followed by sqlite3_column_bytes()
      • **
      • sqlite3_column_blob() followed by sqlite3_column_bytes()
      • **
      • sqlite3_column_text16() followed by sqlite3_column_bytes16()
      • -**
      +**
    ** -** In other words, you should call sqlite3_column_text(), sqlite3_column_blob(), -** or sqlite3_column_text16() first to force the result into the desired -** format, then invoke sqlite3_column_bytes() or sqlite3_column_bytes16() to -** find the size of the result. Do not mix call to sqlite3_column_text() or -** sqlite3_column_blob() with calls to sqlite3_column_bytes16(). And do not -** mix calls to sqlite3_column_text16() with calls to sqlite3_column_bytes(). +** In other words, you should call sqlite3_column_text(), +** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result +** into the desired format, then invoke sqlite3_column_bytes() or +** sqlite3_column_bytes16() to find the size of the result. Do not mix calls +** to sqlite3_column_text() or sqlite3_column_blob() with calls to +** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() +** with calls to sqlite3_column_bytes(). +** +** The pointers returned are valid until a type conversion occurs as +** described above, or until [sqlite3_step()] or [sqlite3_reset()] or +** [sqlite3_finalize()] is called. The memory space used to hold strings +** and BLOBs is freed automatically. Do not pass the pointers returned +** [sqlite3_column_blob()], [sqlite3_column_text()], etc. into +** [sqlite3_free()]. +** +** If a memory allocation error occurs during the evaluation of any +** of these routines, a default value is returned. The default value +** is either the integer 0, the floating point number 0.0, or a NULL +** pointer. Subsequent calls to [sqlite3_errcode()] will return +** [SQLITE_NOMEM]. +** +** Requirements: +** [H13803] [H13806] [H13809] [H13812] [H13815] [H13818] [H13821] [H13824] +** [H13827] [H13830] */ const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); int sqlite3_column_bytes(sqlite3_stmt*, int iCol); int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); double sqlite3_column_double(sqlite3_stmt*, int iCol); int sqlite3_column_int(sqlite3_stmt*, int iCol); -sqlite_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); +sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); int sqlite3_column_type(sqlite3_stmt*, int iCol); sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); /* -** CAPI3REF: Destroy A Prepared Statement Object +** CAPI3REF: Destroy A Prepared Statement Object {H13300} ** -** The sqlite3_finalize() function is called to delete a -** [sqlite3_stmt | compiled SQL statement]. If the statement was -** executed successfully, or not executed at all, then SQLITE_OK is returned. -** If execution of the statement failed then an -** [SQLITE_ERROR | error code] or [SQLITE_IOERR_READ | extended error code] -** is returned. +** The sqlite3_finalize() function is called to delete a [prepared statement]. +** If the statement was executed successfully or not executed at all, then +** SQLITE_OK is returned. If execution of the statement failed then an +** [error code] or [extended error code] is returned. ** ** This routine can be called at any point during the execution of the -** [sqlite3_stmt | virtual machine]. If the virtual machine has not +** [prepared statement]. If the virtual machine has not ** completed execution when this routine is called, that is like -** encountering an error or an interrupt. (See [sqlite3_interrupt()].) -** Incomplete updates may be rolled back and transactions cancelled, -** depending on the circumstances, and the -** [SQLITE_ERROR | result code] returned will be [SQLITE_ABORT]. +** encountering an error or an [sqlite3_interrupt | interrupt]. +** Incomplete updates may be rolled back and transactions canceled, +** depending on the circumstances, and the +** [error code] returned will be [SQLITE_ABORT]. +** +** Requirements: +** [H11302] [H11304] */ int sqlite3_finalize(sqlite3_stmt *pStmt); /* -** CAPI3REF: Reset A Prepared Statement Object +** CAPI3REF: Reset A Prepared Statement Object {H13330} ** -** The sqlite3_reset() function is called to reset a -** [sqlite_stmt | compiled SQL statement] object. -** back to it's initial state, ready to be re-executed. +** The sqlite3_reset() function is called to reset a [prepared statement] +** object back to its initial state, ready to be re-executed. ** Any SQL statement variables that had values bound to them using ** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. ** Use [sqlite3_clear_bindings()] to reset the bindings. +** +** {H11332} The [sqlite3_reset(S)] interface resets the [prepared statement] S +** back to the beginning of its program. +** +** {H11334} If the most recent call to [sqlite3_step(S)] for the +** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], +** or if [sqlite3_step(S)] has never before been called on S, +** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** +** {H11336} If the most recent call to [sqlite3_step(S)] for the +** [prepared statement] S indicated an error, then +** [sqlite3_reset(S)] returns an appropriate [error code]. +** +** {H11338} The [sqlite3_reset(S)] interface does not change the values +** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ int sqlite3_reset(sqlite3_stmt *pStmt); /* -** CAPI3REF: Create Or Redefine SQL Functions -** -** The following two functions are used to add SQL functions or aggregates -** or to redefine the behavior of existing SQL functions or aggregates. The -** difference only between the two is that the second parameter, the -** name of the (scalar) function or aggregate, is encoded in UTF-8 for -** sqlite3_create_function() and UTF-16 for sqlite3_create_function16(). -** -** The first argument is the [sqlite3 | database handle] that holds the -** SQL function or aggregate is to be added or redefined. If a single -** program uses more than one database handle internally, then SQL -** functions or aggregates must be added individually to each database -** handle with which they will be used. -** -** The second parameter is the name of the SQL function to be created -** or redefined. -** The length of the name is limited to 255 bytes, exclusive of the -** zero-terminator. Note that the name length limit is in bytes, not +** CAPI3REF: Create Or Redefine SQL Functions {H16100} +** KEYWORDS: {function creation routines} +** KEYWORDS: {application-defined SQL function} +** KEYWORDS: {application-defined SQL functions} +** +** These two functions (collectively known as "function creation routines") +** are used to add SQL functions or aggregates or to redefine the behavior +** of existing SQL functions or aggregates. The only difference between the +** two is that the second parameter, the name of the (scalar) function or +** aggregate, is encoded in UTF-8 for sqlite3_create_function() and UTF-16 +** for sqlite3_create_function16(). +** +** The first parameter is the [database connection] to which the SQL +** function is to be added. If a single program uses more than one database +** connection internally, then SQL functions must be added individually to +** each database connection. +** +** The second parameter is the name of the SQL function to be created or +** redefined. The length of the name is limited to 255 bytes, exclusive of +** the zero-terminator. Note that the name length limit is in bytes, not ** characters. Any attempt to create a function with a longer name -** will result in an SQLITE_ERROR error. +** will result in [SQLITE_ERROR] being returned. ** -** The third parameter is the number of arguments that the SQL function or -** aggregate takes. If this parameter is negative, then the SQL function or -** aggregate may take any number of arguments. +** The third parameter (nArg) +** is the number of arguments that the SQL function or +** aggregate takes. If this parameter is -1, then the SQL function or +** aggregate may take any number of arguments between 0 and the limit +** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third +** parameter is less than -1 or greater than 127 then the behavior is +** undefined. ** -** The fourth parameter, eTextRep, specifies what +** The fourth parameter, eTextRep, specifies what ** [SQLITE_UTF8 | text encoding] this SQL function prefers for ** its parameters. Any SQL function implementation should be able to work ** work with UTF-8, UTF-16le, or UTF-16be. But some implementations may be ** more efficient with one encoding than another. It is allowed to -** invoke sqlite_create_function() or sqlite3_create_function16() multiple +** invoke sqlite3_create_function() or sqlite3_create_function16() multiple ** times with the same function but with different values of eTextRep. ** When multiple implementations of the same function are available, SQLite ** will pick the one that involves the least amount of data conversion. -** If there is only a single implementation which does not care what -** text encoding is used, then the fourth argument should be -** [SQLITE_ANY]. -** -** The fifth parameter is an arbitrary pointer. The implementation -** of the function can gain access to this pointer using -** [sqlite_user_data()]. +** If there is only a single implementation which does not care what text +** encoding is used, then the fourth argument should be [SQLITE_ANY]. +** +** The fifth parameter is an arbitrary pointer. The implementation of the +** function can gain access to this pointer using [sqlite3_user_data()]. ** ** The seventh, eighth and ninth parameters, xFunc, xStep and xFinal, are -** pointers to C-language functions that implement the SQL -** function or aggregate. A scalar SQL function requires an implementation of -** the xFunc callback only, NULL pointers should be passed as the xStep -** and xFinal parameters. An aggregate SQL function requires an implementation -** of xStep and xFinal and NULL should be passed for xFunc. To delete an -** existing SQL function or aggregate, pass NULL for all three function -** callback. +** pointers to C-language functions that implement the SQL function or +** aggregate. A scalar SQL function requires an implementation of the xFunc +** callback only, NULL pointers should be passed as the xStep and xFinal +** parameters. An aggregate SQL function requires an implementation of xStep +** and xFinal and NULL should be passed for xFunc. To delete an existing +** SQL function or aggregate, pass NULL for all three function callbacks. ** ** It is permitted to register multiple implementations of the same ** functions with the same name but with either differing numbers of -** arguments or differing perferred text encodings. SQLite will use +** arguments or differing preferred text encodings. SQLite will use ** the implementation most closely matches the way in which the -** SQL function is used. +** SQL function is used. A function implementation with a non-negative +** nArg parameter is a better match than a function implementation with +** a negative nArg. A function where the preferred text encoding +** matches the database encoding is a better +** match than a function where the encoding is different. +** A function where the encoding difference is between UTF16le and UTF16be +** is a closer match than a function where the encoding difference is +** between UTF8 and UTF16. +** +** Built-in functions may be overloaded by new application-defined functions. +** The first application-defined function with a given name overrides all +** built-in functions in the same [database connection] with the same name. +** Subsequent application-defined functions of the same name only override +** prior application-defined functions that are an exact match for the +** number of parameters and preferred encoding. +** +** An application-defined function is permitted to call other +** SQLite interfaces. However, such calls must not +** close the database connection nor finalize or reset the prepared +** statement in which the function is running. +** +** Requirements: +** [H16103] [H16106] [H16109] [H16112] [H16118] [H16121] [H16127] +** [H16130] [H16133] [H16136] [H16139] [H16142] */ int sqlite3_create_function( - sqlite3 *, + sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, - void*, + void *pApp, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*) ); int sqlite3_create_function16( - sqlite3*, + sqlite3 *db, const void *zFunctionName, int nArg, int eTextRep, - void*, + void *pApp, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*) ); /* -** CAPI3REF: Text Encodings +** CAPI3REF: Text Encodings {H10267} ** ** These constant define integer codes that represent the various ** text encodings supported by SQLite. @@ -1686,22 +3188,26 @@ #define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ /* -** CAPI3REF: Obsolete Functions +** CAPI3REF: Deprecated Functions +** DEPRECATED ** -** These functions are all now obsolete. In order to maintain -** backwards compatibility with older code, we continue to support -** these functions. However, new development projects should avoid +** These functions are [deprecated]. In order to maintain +** backwards compatibility with older code, these functions continue +** to be supported. However, new applications should avoid ** the use of these functions. To help encourage people to avoid -** using these functions, we are not going to tell you want they do. +** using these functions, we are not going to tell you what they do. */ -int sqlite3_aggregate_count(sqlite3_context*); -int sqlite3_expired(sqlite3_stmt*); -int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -int sqlite3_global_recover(void); - +#ifndef SQLITE_OMIT_DEPRECATED +SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); +SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); +SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); +SQLITE_DEPRECATED int sqlite3_global_recover(void); +SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); +SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),void*,sqlite3_int64); +#endif /* -** CAPI3REF: Obtaining SQL Function Parameter Values +** CAPI3REF: Obtaining SQL Function Parameter Values {H15100} ** ** The C-language implementation of SQL functions and aggregates uses ** this set of interface routines to access the parameter values on @@ -1711,40 +3217,50 @@ ** to [sqlite3_create_function()] and [sqlite3_create_function16()] ** define callbacks that implement the SQL functions and aggregates. ** The 4th parameter to these callbacks is an array of pointers to -** [sqlite3_value] objects. There is one [sqlite3_value] object for +** [protected sqlite3_value] objects. There is one [sqlite3_value] object for ** each parameter to the SQL function. These routines are used to ** extract values from the [sqlite3_value] objects. ** -** These routines work just like the corresponding -** [sqlite3_column_blob | sqlite3_column_* routines] except that -** these routines take a single [sqlite3_value*] pointer instead -** of an [sqlite3_stmt*] pointer and an integer column number. +** These routines work only with [protected sqlite3_value] objects. +** Any attempt to use these routines on an [unprotected sqlite3_value] +** object results in undefined behavior. +** +** These routines work just like the corresponding [column access functions] +** except that these routines take a single [protected sqlite3_value] object +** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. ** -** The sqlite3_value_text16() interface extracts a UTF16 string +** The sqlite3_value_text16() interface extracts a UTF-16 string ** in the native byte-order of the host machine. The ** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces -** extract UTF16 strings as big-endian and little-endian respectively. +** extract UTF-16 strings as big-endian and little-endian respectively. ** ** The sqlite3_value_numeric_type() interface attempts to apply ** numeric affinity to the value. This means that an attempt is ** made to convert the value to an integer or floating point. If -** such a conversion is possible without loss of information (in order -** words if the value is original a string that looks like a number) -** then it is done. Otherwise no conversion occurs. The -** [SQLITE_INTEGER | datatype] after conversion is returned. +** such a conversion is possible without loss of information (in other +** words, if the value is a string that looks like a number) +** then the conversion is performed. Otherwise no conversion occurs. +** The [SQLITE_INTEGER | datatype] after conversion is returned. ** -** Please pay particular attention to the fact that the pointer that -** is returned from [sqlite3_value_blob()], [sqlite3_value_text()], or +** Please pay particular attention to the fact that the pointer returned +** from [sqlite3_value_blob()], [sqlite3_value_text()], or ** [sqlite3_value_text16()] can be invalidated by a subsequent call to -** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite_value_text()], -** or [sqlite3_value_text16()]. +** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], +** or [sqlite3_value_text16()]. +** +** These routines must be called from the same thread as +** the SQL function that supplied the [sqlite3_value*] parameters. +** +** Requirements: +** [H15103] [H15106] [H15109] [H15112] [H15115] [H15118] [H15121] [H15124] +** [H15127] [H15130] [H15133] [H15136] */ const void *sqlite3_value_blob(sqlite3_value*); int sqlite3_value_bytes(sqlite3_value*); int sqlite3_value_bytes16(sqlite3_value*); double sqlite3_value_double(sqlite3_value*); int sqlite3_value_int(sqlite3_value*); -sqlite_int64 sqlite3_value_int64(sqlite3_value*); +sqlite3_int64 sqlite3_value_int64(sqlite3_value*); const unsigned char *sqlite3_value_text(sqlite3_value*); const void *sqlite3_value_text16(sqlite3_value*); const void *sqlite3_value_text16le(sqlite3_value*); @@ -1753,76 +3269,120 @@ int sqlite3_value_numeric_type(sqlite3_value*); /* -** CAPI3REF: Obtain Aggregate Function Context +** CAPI3REF: Obtain Aggregate Function Context {H16210} ** ** The implementation of aggregate SQL functions use this routine to allocate -** a structure for storing their state. The first time this routine -** is called for a particular aggregate, a new structure of size nBytes -** is allocated, zeroed, and returned. On subsequent calls (for the -** same aggregate instance) the same buffer is returned. The implementation -** of the aggregate can use the returned buffer to accumulate data. +** a structure for storing their state. +** +** The first time the sqlite3_aggregate_context() routine is called for a +** particular aggregate, SQLite allocates nBytes of memory, zeroes out that +** memory, and returns a pointer to it. On second and subsequent calls to +** sqlite3_aggregate_context() for the same aggregate function index, +** the same buffer is returned. The implementation of the aggregate can use +** the returned buffer to accumulate data. ** -** The buffer allocated is freed automatically by SQLite whan the aggregate +** SQLite automatically frees the allocated buffer when the aggregate ** query concludes. ** -** The first parameter should be a copy of the -** [sqlite3_context | SQL function context] that is the first -** parameter to the callback routine that implements the aggregate -** function. +** The first parameter should be a copy of the +** [sqlite3_context | SQL function context] that is the first parameter +** to the callback routine that implements the aggregate function. +** +** This routine must be called from the same thread in which +** the aggregate SQL function is running. +** +** Requirements: +** [H16211] [H16213] [H16215] [H16217] */ void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); /* -** CAPI3REF: User Data For Functions +** CAPI3REF: User Data For Functions {H16240} +** +** The sqlite3_user_data() interface returns a copy of +** the pointer that was the pUserData parameter (the 5th parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. {END} +** +** This routine must be called from the same thread in which +** the application-defined function is running. ** -** The pUserData parameter to the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines -** used to register user functions is available to -** the implementation of the function using this call. +** Requirements: +** [H16243] */ void *sqlite3_user_data(sqlite3_context*); /* -** CAPI3REF: Function Auxiliary Data +** CAPI3REF: Database Connection For Functions {H16250} +** +** The sqlite3_context_db_handle() interface returns a copy of +** the pointer to the [database connection] (the 1st parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. +** +** Requirements: +** [H16253] +*/ +sqlite3 *sqlite3_context_db_handle(sqlite3_context*); + +/* +** CAPI3REF: Function Auxiliary Data {H16270} ** ** The following two functions may be used by scalar SQL functions to -** associate meta-data with argument values. If the same value is passed to +** associate metadata with argument values. If the same value is passed to ** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated meta-data may be preserved. This may +** some circumstances the associated metadata may be preserved. This may ** be used, for example, to add a regular-expression matching scalar ** function. The compiled version of the regular expression is stored as -** meta-data associated with the SQL value passed as the regular expression +** metadata associated with the SQL value passed as the regular expression ** pattern. The compiled regular expression can be reused on multiple ** invocations of the same function so that the original pattern string ** does not need to be recompiled on each invocation. ** -** The sqlite3_get_auxdata() interface returns a pointer to the meta-data -** associated with the Nth argument value to the current SQL function -** call, where N is the second parameter. If no meta-data has been set for -** that value, then a NULL pointer is returned. -** -** The sqlite3_set_auxdata() is used to associate meta-data with an SQL -** function argument. The third parameter is a pointer to the meta-data -** to be associated with the Nth user function argument value. The fourth -** parameter specifies a destructor that will be called on the meta- -** data pointer to release it when it is no longer required. If the -** destructor is NULL, it is not invoked. +** The sqlite3_get_auxdata() interface returns a pointer to the metadata +** associated by the sqlite3_set_auxdata() function with the Nth argument +** value to the application-defined function. If no metadata has been ever +** been set for the Nth argument of the function, or if the corresponding +** function parameter has changed since the meta-data was set, +** then sqlite3_get_auxdata() returns a NULL pointer. +** +** The sqlite3_set_auxdata() interface saves the metadata +** pointed to by its 3rd parameter as the metadata for the N-th +** argument of the application-defined function. Subsequent +** calls to sqlite3_get_auxdata() might return this data, if it has +** not been destroyed. +** If it is not NULL, SQLite will invoke the destructor +** function given by the 4th parameter to sqlite3_set_auxdata() on +** the metadata when the corresponding function parameter changes +** or when the SQL statement completes, whichever comes first. +** +** SQLite is free to call the destructor and drop metadata on any +** parameter of any function at any time. The only guarantee is that +** the destructor will be called before the metadata is dropped. ** -** In practice, meta-data is preserved between function calls for +** In practice, metadata is preserved between function calls for ** expressions that are constant at compile time. This includes literal ** values and SQL variables. +** +** These routines must be called from the same thread in which +** the SQL function is running. +** +** Requirements: +** [H16272] [H16274] [H16276] [H16277] [H16278] [H16279] */ -void *sqlite3_get_auxdata(sqlite3_context*, int); -void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*)); +void *sqlite3_get_auxdata(sqlite3_context*, int N); +void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); /* -** CAPI3REF: Constants Defining Special Destructor Behavior +** CAPI3REF: Constants Defining Special Destructor Behavior {H10280} ** -** These are special value for the destructor that is passed in as the +** These are special values for the destructor that is passed in as the ** final argument to routines like [sqlite3_result_blob()]. If the destructor ** argument is SQLITE_STATIC, it means that the content pointer is constant -** and will never change. It does not need to be destroyed. The +** and will never change. It does not need to be destroyed. The ** SQLITE_TRANSIENT value means that the content will likely change in ** the near future and that SQLite should make its own private copy of ** the content before returning. @@ -1835,36 +3395,123 @@ #define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) /* -** CAPI3REF: Setting The Result Of An SQL Function +** CAPI3REF: Setting The Result Of An SQL Function {H16400} ** ** These routines are used by the xFunc or xFinal callbacks that ** implement SQL functions and aggregates. See ** [sqlite3_create_function()] and [sqlite3_create_function16()] ** for additional information. ** -** These functions work very much like the -** [sqlite3_bind_blob | sqlite3_bind_*] family of functions used -** to bind values to host parameters in prepared statements. -** Refer to the -** [sqlite3_bind_blob | sqlite3_bind_* documentation] for -** additional information. +** These functions work very much like the [parameter binding] family of +** functions used to bind values to host parameters in prepared statements. +** Refer to the [SQL parameter] documentation for additional information. +** +** The sqlite3_result_blob() interface sets the result from +** an application-defined function to be the BLOB whose content is pointed +** to by the second parameter and which is N bytes long where N is the +** third parameter. +** +** The sqlite3_result_zeroblob() interfaces set the result of +** the application-defined function to be a BLOB containing all zero +** bytes and N bytes in size, where N is the value of the 2nd parameter. +** +** The sqlite3_result_double() interface sets the result from +** an application-defined function to be a floating point value specified +** by its 2nd argument. ** ** The sqlite3_result_error() and sqlite3_result_error16() functions -** cause the implemented SQL function to throw an exception. The -** parameter to sqlite3_result_error() or sqlite3_result_error16() -** is the text of an error message. -** -** The sqlite3_result_toobig() cause the function implementation -** to throw and error indicating that a string or BLOB is to long -** to represent. +** cause the implemented SQL function to throw an exception. +** SQLite uses the string pointed to by the +** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() +** as the text of an error message. SQLite interprets the error +** message string from sqlite3_result_error() as UTF-8. SQLite +** interprets the string from sqlite3_result_error16() as UTF-16 in native +** byte order. If the third parameter to sqlite3_result_error() +** or sqlite3_result_error16() is negative then SQLite takes as the error +** message all text up through the first zero character. +** If the third parameter to sqlite3_result_error() or +** sqlite3_result_error16() is non-negative then SQLite takes that many +** bytes (not characters) from the 2nd parameter as the error message. +** The sqlite3_result_error() and sqlite3_result_error16() +** routines make a private copy of the error message text before +** they return. Hence, the calling function can deallocate or +** modify the text after they return without harm. +** The sqlite3_result_error_code() function changes the error code +** returned by SQLite as a result of an error in a function. By default, +** the error code is SQLITE_ERROR. A subsequent call to sqlite3_result_error() +** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. +** +** The sqlite3_result_toobig() interface causes SQLite to throw an error +** indicating that a string or BLOB is to long to represent. +** +** The sqlite3_result_nomem() interface causes SQLite to throw an error +** indicating that a memory allocation failed. +** +** The sqlite3_result_int() interface sets the return value +** of the application-defined function to be the 32-bit signed integer +** value given in the 2nd argument. +** The sqlite3_result_int64() interface sets the return value +** of the application-defined function to be the 64-bit signed integer +** value given in the 2nd argument. +** +** The sqlite3_result_null() interface sets the return value +** of the application-defined function to be NULL. +** +** The sqlite3_result_text(), sqlite3_result_text16(), +** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces +** set the return value of the application-defined function to be +** a text string which is represented as UTF-8, UTF-16 native byte order, +** UTF-16 little endian, or UTF-16 big endian, respectively. +** SQLite takes the text result from the application from +** the 2nd parameter of the sqlite3_result_text* interfaces. +** If the 3rd parameter to the sqlite3_result_text* interfaces +** is negative, then SQLite takes result text from the 2nd parameter +** through the first zero character. +** If the 3rd parameter to the sqlite3_result_text* interfaces +** is non-negative, then as many bytes (not characters) of the text +** pointed to by the 2nd parameter are taken as the application-defined +** function result. +** If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that +** function as the destructor on the text or BLOB result when it has +** finished using that result. +** If the 4th parameter to the sqlite3_result_text* interfaces or +** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite +** assumes that the text or BLOB result is in constant space and does not +** copy the it or call a destructor when it has finished using that result. +** If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT +** then SQLite makes a copy of the result into space obtained from +** from [sqlite3_malloc()] before it returns. +** +** The sqlite3_result_value() interface sets the result of +** the application-defined function to be a copy the +** [unprotected sqlite3_value] object specified by the 2nd parameter. The +** sqlite3_result_value() interface makes a copy of the [sqlite3_value] +** so that the [sqlite3_value] specified in the parameter may change or +** be deallocated after sqlite3_result_value() returns without harm. +** A [protected sqlite3_value] object may always be used where an +** [unprotected sqlite3_value] object is required, so either +** kind of [sqlite3_value] object can be used with this interface. +** +** If these routines are called from within the different thread +** than the one containing the application-defined function that received +** the [sqlite3_context] pointer, the results are undefined. +** +** Requirements: +** [H16403] [H16406] [H16409] [H16412] [H16415] [H16418] [H16421] [H16424] +** [H16427] [H16430] [H16433] [H16436] [H16439] [H16442] [H16445] [H16448] +** [H16451] [H16454] [H16457] [H16460] [H16463] */ void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); void sqlite3_result_double(sqlite3_context*, double); void sqlite3_result_error(sqlite3_context*, const char*, int); void sqlite3_result_error16(sqlite3_context*, const void*, int); void sqlite3_result_error_toobig(sqlite3_context*); +void sqlite3_result_error_nomem(sqlite3_context*); +void sqlite3_result_error_code(sqlite3_context*, int); void sqlite3_result_int(sqlite3_context*, int); -void sqlite3_result_int64(sqlite3_context*, sqlite_int64); +void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); void sqlite3_result_null(sqlite3_context*); void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); @@ -1874,46 +3521,54 @@ void sqlite3_result_zeroblob(sqlite3_context*, int n); /* -** CAPI3REF: Define New Collating Sequences +** CAPI3REF: Define New Collating Sequences {H16600} ** ** These functions are used to add new collation sequences to the -** [sqlite3*] handle specified as the first argument. +** [database connection] specified as the first argument. ** ** The name of the new collation sequence is specified as a UTF-8 string ** for sqlite3_create_collation() and sqlite3_create_collation_v2() -** and a UTF-16 string for sqlite3_create_collation16(). In all cases +** and a UTF-16 string for sqlite3_create_collation16(). In all cases ** the name is passed as the second function argument. ** -** The third argument must be one of the constants [SQLITE_UTF8], -** [SQLITE_UTF16LE] or [SQLITE_UTF16BE], indicating that the user-supplied +** The third argument may be one of the constants [SQLITE_UTF8], +** [SQLITE_UTF16LE], or [SQLITE_UTF16BE], indicating that the user-supplied ** routine expects to be passed pointers to strings encoded using UTF-8, -** UTF-16 little-endian or UTF-16 big-endian respectively. +** UTF-16 little-endian, or UTF-16 big-endian, respectively. The +** third argument might also be [SQLITE_UTF16] to indicate that the routine +** expects pointers to be UTF-16 strings in the native byte order, or the +** argument can be [SQLITE_UTF16_ALIGNED] if the +** the routine expects pointers to 16-bit word aligned strings +** of UTF-16 in the native byte order. ** ** A pointer to the user supplied routine must be passed as the fifth -** argument. If it is NULL, this is the same as deleting the collation -** sequence (so that SQLite cannot call it anymore). Each time the user -** supplied function is invoked, it is passed a copy of the void* passed as -** the fourth argument to sqlite3_create_collation() or -** sqlite3_create_collation16() as its first parameter. +** argument. If it is NULL, this is the same as deleting the collation +** sequence (so that SQLite cannot call it anymore). +** Each time the application supplied function is invoked, it is passed +** as its first parameter a copy of the void* passed as the fourth argument +** to sqlite3_create_collation() or sqlite3_create_collation16(). ** -** The remaining arguments to the user-supplied routine are two strings, -** each represented by a [length, data] pair and encoded in the encoding +** The remaining arguments to the application-supplied routine are two strings, +** each represented by a (length, data) pair and encoded in the encoding ** that was passed as the third argument when the collation sequence was -** registered. The user routine should return negative, zero or positive if -** the first string is less than, equal to, or greater than the second -** string. i.e. (STRING1 - STRING2). +** registered. {END} The application defined collation routine should +** return negative, zero or positive if the first string is less than, +** equal to, or greater than the second string. i.e. (STRING1 - STRING2). ** ** The sqlite3_create_collation_v2() works like sqlite3_create_collation() -** excapt that it takes an extra argument which is a destructor for +** except that it takes an extra argument which is a destructor for ** the collation. The destructor is called when the collation is ** destroyed and is passed a copy of the fourth parameter void* pointer -** of the sqlite3_create_collation_v2(). Collations are destroyed when -** they are overridden by later calls to the collation creation functions -** or when the [sqlite3*] database handle is closed using [sqlite3_close()]. -** -** The sqlite3_create_collation_v2() interface is experimental and -** subject to change in future releases. The other collation creation -** functions are stable. +** of the sqlite3_create_collation_v2(). +** Collations are destroyed when they are overridden by later calls to the +** collation creation functions or when the [database connection] is closed +** using [sqlite3_close()]. +** +** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. +** +** Requirements: +** [H16603] [H16604] [H16606] [H16609] [H16612] [H16615] [H16618] [H16621] +** [H16624] [H16627] [H16630] */ int sqlite3_create_collation( sqlite3*, @@ -1932,37 +3587,40 @@ ); int sqlite3_create_collation16( sqlite3*, - const char *zName, + const void *zName, int eTextRep, void*, int(*xCompare)(void*,int,const void*,int,const void*) ); /* -** CAPI3REF: Collation Needed Callbacks +** CAPI3REF: Collation Needed Callbacks {H16700} ** ** To avoid having to register all collation sequences before a database ** can be used, a single callback function may be registered with the -** database handle to be called whenever an undefined collation sequence is -** required. +** [database connection] to be called whenever an undefined collation +** sequence is required. ** ** If the function is registered using the sqlite3_collation_needed() API, ** then it is passed the names of undefined collation sequences as strings -** encoded in UTF-8. If sqlite3_collation_needed16() is used, the names -** are passed as UTF-16 in machine native byte order. A call to either -** function replaces any existing callback. +** encoded in UTF-8. {H16703} If sqlite3_collation_needed16() is used, +** the names are passed as UTF-16 in machine native byte order. +** A call to either function replaces any existing callback. ** ** When the callback is invoked, the first argument passed is a copy ** of the second argument to sqlite3_collation_needed() or -** sqlite3_collation_needed16(). The second argument is the database -** handle. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], or -** [SQLITE_UTF16LE], indicating the most desirable form of the collation -** sequence function required. The fourth parameter is the name of the +** sqlite3_collation_needed16(). The second argument is the database +** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], +** or [SQLITE_UTF16LE], indicating the most desirable form of the collation +** sequence function required. The fourth parameter is the name of the ** required collation sequence. ** ** The callback function should register the desired collation using ** [sqlite3_create_collation()], [sqlite3_create_collation16()], or ** [sqlite3_create_collation_v2()]. +** +** Requirements: +** [H16702] [H16704] [H16706] */ int sqlite3_collation_needed( sqlite3*, @@ -2001,259 +3659,342 @@ ); /* -** CAPI3REF: Suspend Execution For A Short Time +** CAPI3REF: Suspend Execution For A Short Time {H10530} ** -** This function causes the current thread to suspend execution -** a number of milliseconds specified in its parameter. +** The sqlite3_sleep() function causes the current thread to suspend execution +** for at least a number of milliseconds specified in its parameter. ** -** If the operating system does not support sleep requests with -** millisecond time resolution, then the time will be rounded up to -** the nearest second. The number of milliseconds of sleep actually +** If the operating system does not support sleep requests with +** millisecond time resolution, then the time will be rounded up to +** the nearest second. The number of milliseconds of sleep actually ** requested from the operating system is returned. +** +** SQLite implements this interface by calling the xSleep() +** method of the default [sqlite3_vfs] object. +** +** Requirements: [H10533] [H10536] */ int sqlite3_sleep(int); /* -** CAPI3REF: Name Of The Folder Holding Temporary Files +** CAPI3REF: Name Of The Folder Holding Temporary Files {H10310} ** ** If this global variable is made to point to a string which is -** the name of a folder (a.ka. directory), then all temporary files +** the name of a folder (a.k.a. directory), then all temporary files ** created by SQLite will be placed in that directory. If this variable -** is NULL pointer, then SQLite does a search for an appropriate temporary -** file directory. +** is a NULL pointer, then SQLite performs a search for an appropriate +** temporary file directory. ** -** Once [sqlite3_open()] has been called, changing this variable will -** invalidate the current temporary database, if any. Generally speaking, -** it is not safe to invoke this routine after [sqlite3_open()] has -** been called. +** It is not safe to read or modify this variable in more than one +** thread at a time. It is not safe to read or modify this variable +** if a [database connection] is being used at the same time in a separate +** thread. +** It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been called and that this variable remain unchanged +** thereafter. +** +** The [temp_store_directory pragma] may modify this variable and cause +** it to point to memory obtained from [sqlite3_malloc]. Furthermore, +** the [temp_store_directory pragma] always assumes that any string +** that this variable points to is held in memory obtained from +** [sqlite3_malloc] and the pragma may attempt to free that memory +** using [sqlite3_free]. +** Hence, if this variable is modified directly, either it should be +** made NULL or made to point to memory obtained from [sqlite3_malloc] +** or else the use of the [temp_store_directory pragma] should be avoided. */ SQLITE_EXTERN char *sqlite3_temp_directory; /* -** CAPI3REF: Test To See If The Databse Is In Auto-Commit Mode +** CAPI3REF: Test For Auto-Commit Mode {H12930} +** KEYWORDS: {autocommit mode} ** -** Test to see whether or not the database connection is in autocommit -** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on -** by default. Autocommit is disabled by a BEGIN statement and reenabled -** by the next COMMIT or ROLLBACK. +** The sqlite3_get_autocommit() interface returns non-zero or +** zero if the given database connection is or is not in autocommit mode, +** respectively. Autocommit mode is on by default. +** Autocommit mode is disabled by a [BEGIN] statement. +** Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. +** +** If certain kinds of errors occur on a statement within a multi-statement +** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], +** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the +** transaction might be rolled back automatically. The only way to +** find out whether SQLite automatically rolled back the transaction after +** an error is to use this function. +** +** If another thread changes the autocommit status of the database +** connection while this routine is running, then the return value +** is undefined. +** +** Requirements: [H12931] [H12932] [H12933] [H12934] */ int sqlite3_get_autocommit(sqlite3*); /* -** CAPI3REF: Find The Database Handle Associated With A Prepared Statement +** CAPI3REF: Find The Database Handle Of A Prepared Statement {H13120} +** +** The sqlite3_db_handle interface returns the [database connection] handle +** to which a [prepared statement] belongs. The [database connection] +** returned by sqlite3_db_handle is the same [database connection] that was the first argument +** to the [sqlite3_prepare_v2()] call (or its variants) that was used to +** create the statement in the first place. ** -** Return the [sqlite3*] database handle to which a -** [sqlite3_stmt | prepared statement] belongs. -** This is the same database handle that was -** the first argument to the [sqlite3_prepare_v2()] or its variants -** that was used to create the statement in the first place. +** Requirements: [H13123] */ sqlite3 *sqlite3_db_handle(sqlite3_stmt*); - /* -** CAPI3REF: Commit And Rollback Notification Callbacks -** -** These routines -** register callback functions to be invoked whenever a transaction -** is committed or rolled back. The pArg argument is passed through -** to the callback. If the callback on a commit hook function -** returns non-zero, then the commit is converted into a rollback. +** CAPI3REF: Find the next prepared statement {H13140} ** -** If another function was previously registered, its pArg value is returned. -** Otherwise NULL is returned. +** This interface returns a pointer to the next [prepared statement] after +** pStmt associated with the [database connection] pDb. If pStmt is NULL +** then this interface returns a pointer to the first prepared statement +** associated with the database connection pDb. If no prepared statement +** satisfies the conditions of this routine, it returns NULL. +** +** The [database connection] pointer D in a call to +** [sqlite3_next_stmt(D,S)] must refer to an open database +** connection and in particular must not be a NULL pointer. +** +** Requirements: [H13143] [H13146] [H13149] [H13152] +*/ +sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Commit And Rollback Notification Callbacks {H12950} +** +** The sqlite3_commit_hook() interface registers a callback +** function to be invoked whenever a transaction is [COMMIT | committed]. +** Any callback set by a previous call to sqlite3_commit_hook() +** for the same database connection is overridden. +** The sqlite3_rollback_hook() interface registers a callback +** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. +** Any callback set by a previous call to sqlite3_commit_hook() +** for the same database connection is overridden. +** The pArg argument is passed through to the callback. +** If the callback on a commit hook function returns non-zero, +** then the commit is converted into a rollback. +** +** If another function was previously registered, its +** pArg value is returned. Otherwise NULL is returned. +** +** The callback implementation must not do anything that will modify +** the database connection that invoked the callback. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the commit +** or rollback hook in the first place. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. ** ** Registering a NULL function disables the callback. ** -** For the purposes of this API, a transaction is said to have been -** rolled back if an explicit "ROLLBACK" statement is executed, or -** an error or constraint causes an implicit rollback to occur. The -** callback is not invoked if a transaction is automatically rolled -** back because the database connection is closed. +** When the commit hook callback routine returns zero, the [COMMIT] +** operation is allowed to continue normally. If the commit hook +** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. +** The rollback hook is invoked on a rollback that results from a commit +** hook returning non-zero, just as it would be with any other rollback. ** -** These are experimental interfaces and are subject to change. +** For the purposes of this API, a transaction is said to have been +** rolled back if an explicit "ROLLBACK" statement is executed, or +** an error or constraint causes an implicit rollback to occur. +** The rollback callback is not invoked if a transaction is +** automatically rolled back because the database connection is closed. +** The rollback callback is not invoked if a transaction is +** rolled back because a commit callback returned non-zero. +** Check on this +** +** See also the [sqlite3_update_hook()] interface. +** +** Requirements: +** [H12951] [H12952] [H12953] [H12954] [H12955] +** [H12961] [H12962] [H12963] [H12964] */ void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); /* -** CAPI3REF: Data Change Notification Callbacks +** CAPI3REF: Data Change Notification Callbacks {H12970} ** -** Register a callback function with the database connection identified by the -** first argument to be invoked whenever a row is updated, inserted or deleted. -** Any callback set by a previous call to this function for the same -** database connection is overridden. -** -** The second argument is a pointer to the function to invoke when a -** row is updated, inserted or deleted. The first argument to the callback is -** a copy of the third argument to sqlite3_update_hook(). The second callback -** argument is one of SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE, depending -** on the operation that caused the callback to be invoked. The third and -** fourth arguments to the callback contain pointers to the database and -** table name containing the affected row. The final callback parameter is -** the rowid of the row. In the case of an update, this is the rowid after -** the update takes place. +** The sqlite3_update_hook() interface registers a callback function +** with the [database connection] identified by the first argument +** to be invoked whenever a row is updated, inserted or deleted. +** Any callback set by a previous call to this function +** for the same database connection is overridden. +** +** The second argument is a pointer to the function to invoke when a +** row is updated, inserted or deleted. +** The first argument to the callback is a copy of the third argument +** to sqlite3_update_hook(). +** The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], +** or [SQLITE_UPDATE], depending on the operation that caused the callback +** to be invoked. +** The third and fourth arguments to the callback contain pointers to the +** database and table name containing the affected row. +** The final callback parameter is the [rowid] of the row. +** In the case of an update, this is the [rowid] after the update takes place. ** ** The update hook is not invoked when internal system tables are ** modified (i.e. sqlite_master and sqlite_sequence). ** -** If another function was previously registered, its pArg value is returned. -** Otherwise NULL is returned. +** In the current implementation, the update hook +** is not invoked when duplication rows are deleted because of an +** [ON CONFLICT | ON CONFLICT REPLACE] clause. Nor is the update hook +** invoked when rows are deleted using the [truncate optimization]. +** The exceptions defined in this paragraph might change in a future +** release of SQLite. +** +** The update hook implementation must not do anything that will modify +** the database connection that invoked the update hook. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the update hook. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** If another function was previously registered, its pArg value +** is returned. Otherwise NULL is returned. +** +** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] +** interfaces. +** +** Requirements: +** [H12971] [H12973] [H12975] [H12977] [H12979] [H12981] [H12983] [H12986] */ void *sqlite3_update_hook( sqlite3*, - void(*)(void *,int ,char const *,char const *,sqlite_int64), + void(*)(void *,int ,char const *,char const *,sqlite3_int64), void* ); /* -** CAPI3REF: Enable Or Disable Shared Pager Cache +** CAPI3REF: Enable Or Disable Shared Pager Cache {H10330} +** KEYWORDS: {shared cache} {shared cache mode} ** ** This routine enables or disables the sharing of the database cache -** and schema data structures between connections to the same database. -** Sharing is enabled if the argument is true and disabled if the argument -** is false. -** -** Cache sharing is enabled and disabled on a thread-by-thread basis. -** Each call to this routine enables or disables cache sharing only for -** connections created in the same thread in which this routine is called. -** There is no mechanism for sharing cache between database connections -** running in different threads. -** -** Sharing must be disabled prior to shutting down a thread or else -** the thread will leak memory. Call this routine with an argument of -** 0 to turn off sharing. Or use the sqlite3_thread_cleanup() API. -** -** This routine must not be called when any database connections -** are active in the current thread. Enabling or disabling shared -** cache while there are active database connections will result -** in memory corruption. -** -** When the shared cache is enabled, the -** following routines must always be called from the same thread: -** [sqlite3_open()], [sqlite3_prepare_v2()], [sqlite3_step()], -** [sqlite3_reset()], [sqlite3_finalize()], and [sqlite3_close()]. -** This is due to the fact that the shared cache makes use of -** thread-specific storage so that it will be available for sharing -** with other connections. +** and schema data structures between [database connection | connections] +** to the same database. Sharing is enabled if the argument is true +** and disabled if the argument is false. +** +** Cache sharing is enabled and disabled for an entire process. +** This is a change as of SQLite version 3.5.0. In prior versions of SQLite, +** sharing was enabled or disabled for each thread separately. +** +** The cache sharing mode set by this interface effects all subsequent +** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. +** Existing database connections continue use the sharing mode +** that was in effect at the time they were opened. ** ** Virtual tables cannot be used with a shared cache. When shared -** cache is enabled, the sqlite3_create_module() API used to register +** cache is enabled, the [sqlite3_create_module()] API used to register ** virtual tables will always return an error. ** -** This routine returns [SQLITE_OK] if shared cache was -** enabled or disabled successfully. An [SQLITE_ERROR | error code] -** is returned otherwise. +** This routine returns [SQLITE_OK] if shared cache was enabled or disabled +** successfully. An [error code] is returned otherwise. +** +** Shared cache is disabled by default. But this might change in +** future releases of SQLite. Applications that care about shared +** cache setting should set it explicitly. +** +** See Also: [SQLite Shared-Cache Mode] ** -** Shared cache is disabled by default for backward compatibility. +** Requirements: [H10331] [H10336] [H10337] [H10339] */ int sqlite3_enable_shared_cache(int); /* -** CAPI3REF: Attempt To Free Heap Memory +** CAPI3REF: Attempt To Free Heap Memory {H17340} ** -** Attempt to free N bytes of heap memory by deallocating non-essential -** memory allocations held by the database library (example: memory -** used to cache database pages to improve performance). +** The sqlite3_release_memory() interface attempts to free N bytes +** of heap memory by deallocating non-essential memory allocations +** held by the database library. {END} Memory used to cache database +** pages to improve performance is an example of non-essential memory. +** sqlite3_release_memory() returns the number of bytes actually freed, +** which might be more or less than the amount requested. ** -** This function is not a part of standard builds. It is only created -** if SQLite is compiled with the SQLITE_ENABLE_MEMORY_MANAGEMENT macro. +** Requirements: [H17341] [H17342] */ int sqlite3_release_memory(int); /* -** CAPI3REF: Impose A Limit On Heap Size +** CAPI3REF: Impose A Limit On Heap Size {H17350} ** -** Place a "soft" limit on the amount of heap memory that may be allocated by -** SQLite within the current thread. If an internal allocation is requested -** that would exceed the specified limit, [sqlite3_release_memory()] is invoked -** one or more times to free up some space before the allocation is made. -** -** The limit is called "soft", because if [sqlite3_release_memory()] cannot free -** sufficient memory to prevent the limit from being exceeded, the memory is -** allocated anyway and the current operation proceeds. -** -** Prior to shutting down a thread sqlite3_soft_heap_limit() must be set to -** zero (the default) or else the thread will leak memory. Alternatively, use -** the [sqlite3_thread_cleanup()] API. +** The sqlite3_soft_heap_limit() interface places a "soft" limit +** on the amount of heap memory that may be allocated by SQLite. +** If an internal allocation is requested that would exceed the +** soft heap limit, [sqlite3_release_memory()] is invoked one or +** more times to free up some space before the allocation is performed. +** +** The limit is called "soft", because if [sqlite3_release_memory()] +** cannot free sufficient memory to prevent the limit from being exceeded, +** the memory is allocated anyway and the current operation proceeds. ** ** A negative or zero value for N means that there is no soft heap limit and -** [sqlite3_release_memory()] will only be called when memory is exhaused. +** [sqlite3_release_memory()] will only be called when memory is exhausted. ** The default value for the soft heap limit is zero. ** -** SQLite makes a best effort to honor the soft heap limit. But if it -** is unable to reduce memory usage below the soft limit, execution will -** continue without error or notification. This is why the limit is +** SQLite makes a best effort to honor the soft heap limit. +** But if the soft heap limit cannot be honored, execution will +** continue without error or notification. This is why the limit is ** called a "soft" limit. It is advisory only. ** -** This function is only available if the library was compiled with the -** SQLITE_ENABLE_MEMORY_MANAGEMENT option set. -** memory-management has been enabled. -*/ -void sqlite3_soft_heap_limit(int); - -/* -** CAPI3REF: Clean Up Thread Local Storage +** Prior to SQLite version 3.5.0, this routine only constrained the memory +** allocated by a single thread - the same thread in which this routine +** runs. Beginning with SQLite version 3.5.0, the soft heap limit is +** applied to all threads. The value specified for the soft heap limit +** is an upper bound on the total memory allocation for all threads. In +** version 3.5.0 there is no mechanism for limiting the heap usage for +** individual threads. ** -** This routine makes sure that all thread-local storage has been -** deallocated for the current thread. -** -** This routine is not technically necessary. All thread-local storage -** will be automatically deallocated once memory-management and -** shared-cache are disabled and the soft heap limit has been set -** to zero. This routine is provided as a convenience for users who -** want to make absolutely sure they have not forgotten something -** prior to killing off a thread. +** Requirements: +** [H16351] [H16352] [H16353] [H16354] [H16355] [H16358] */ -void sqlite3_thread_cleanup(void); +void sqlite3_soft_heap_limit(int); /* -** CAPI3REF: Extract Metadata About A Column Of A Table +** CAPI3REF: Extract Metadata About A Column Of A Table {H12850} ** -** This routine -** returns meta-data about a specific column of a specific database -** table accessible using the connection handle passed as the first function -** argument. +** This routine returns metadata about a specific column of a specific +** database table accessible using the [database connection] handle +** passed as the first function argument. ** -** The column is identified by the second, third and fourth parameters to +** The column is identified by the second, third and fourth parameters to ** this function. The second parameter is either the name of the database ** (i.e. "main", "temp" or an attached database) containing the specified ** table or NULL. If it is NULL, then all attached databases are searched -** for the table using the same algorithm as the database engine uses to +** for the table using the same algorithm used by the database engine to ** resolve unqualified table references. ** -** The third and fourth parameters to this function are the table and column -** name of the desired column, respectively. Neither of these parameters +** The third and fourth parameters to this function are the table and column +** name of the desired column, respectively. Neither of these parameters ** may be NULL. ** -** Meta information is returned by writing to the memory locations passed as -** the 5th and subsequent parameters to this function. Any of these -** arguments may be NULL, in which case the corresponding element of meta -** information is ommitted. -** -**
    -** Parameter     Output Type      Description
    -** -----------------------------------
    +** Metadata is returned by writing to the memory locations passed as the 5th
    +** and subsequent parameters to this function. Any of these arguments may be
    +** NULL, in which case the corresponding element of metadata is omitted.
     **
    -**   5th         const char*      Data type
    -**   6th         const char*      Name of the default collation sequence 
    -**   7th         int              True if the column has a NOT NULL constraint
    -**   8th         int              True if the column is part of the PRIMARY KEY
    -**   9th         int              True if the column is AUTOINCREMENT
    -** 
    +**
    +** +**
    Parameter Output
    Type
    Description ** +**
    5th const char* Data type +**
    6th const char* Name of default collation sequence +**
    7th int True if column has a NOT NULL constraint +**
    8th int True if column is part of the PRIMARY KEY +**
    9th int True if column is [AUTOINCREMENT] +**
    +**
    ** -** The memory pointed to by the character pointers returned for the -** declaration type and collation sequence is valid only until the next -** call to any sqlite API function. +** The memory pointed to by the character pointers returned for the +** declaration type and collation sequence is valid only until the next +** call to any SQLite API function. ** -** If the specified table is actually a view, then an error is returned. +** If the specified table is actually a view, an [error code] is returned. ** -** If the specified column is "rowid", "oid" or "_rowid_" and an -** INTEGER PRIMARY KEY column has been explicitly declared, then the output +** If the specified column is "rowid", "oid" or "_rowid_" and an +** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output ** parameters are set for the explicitly declared column. If there is no -** explicitly declared IPK column, then the output parameters are set as -** follows: +** explicitly declared [INTEGER PRIMARY KEY] column, then the output +** parameters are set as follows: ** **
     **     data type: "INTEGER"
    @@ -2265,11 +4006,11 @@
     **
     ** This function may load one or more schemas from database files. If an
     ** error occurs during this process, or if the requested table or column
    -** cannot be found, an SQLITE error code is returned and an error message
    -** left in the database handle (to be retrieved using sqlite3_errmsg()).
    +** cannot be found, an [error code] is returned and an error message left
    +** in the [database connection] (to be retrieved using sqlite3_errmsg()).
     **
     ** This API is only available if the library was compiled with the
    -** SQLITE_ENABLE_COLUMN_METADATA preprocessor symbol defined.
    +** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined.
     */
     int sqlite3_table_column_metadata(
       sqlite3 *db,                /* Connection handle */
    @@ -2280,24 +4021,34 @@
       char const **pzCollSeq,     /* OUTPUT: Collation sequence name */
       int *pNotNull,              /* OUTPUT: True if NOT NULL constraint exists */
       int *pPrimaryKey,           /* OUTPUT: True if column part of PK */
    -  int *pAutoinc               /* OUTPUT: True if colums is auto-increment */
    +  int *pAutoinc               /* OUTPUT: True if column is auto-increment */
     );
     
     /*
    -** CAPI3REF: Load An Extension
    +** CAPI3REF: Load An Extension {H12600} 
    +**
    +** This interface loads an SQLite extension library from the named file.
    +**
    +** {H12601} The sqlite3_load_extension() interface attempts to load an
    +**          SQLite extension library contained in the file zFile.
    +**
    +** {H12602} The entry point is zProc.
     **
    -** Attempt to load an SQLite extension library contained in the file
    -** zFile.  The entry point is zProc.  zProc may be 0 in which case the
    -** name of the entry point defaults to "sqlite3_extension_init".
    +** {H12603} zProc may be 0, in which case the name of the entry point
    +**          defaults to "sqlite3_extension_init".
     **
    -** Return [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
    +** {H12604} The sqlite3_load_extension() interface shall return
    +**          [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
     **
    -** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with 
    -** error message text.  The calling function should free this memory
    -** by calling [sqlite3_free()].
    +** {H12605} If an error occurs and pzErrMsg is not 0, then the
    +**          [sqlite3_load_extension()] interface shall attempt to
    +**          fill *pzErrMsg with error message text stored in memory
    +**          obtained from [sqlite3_malloc()]. {END}  The calling function
    +**          should free this memory by calling [sqlite3_free()].
     **
    -** Extension loading must be enabled using [sqlite3_enable_load_extension()]
    -** prior to calling this API or an error will be returned.
    +** {H12606} Extension loading must be enabled using
    +**          [sqlite3_enable_load_extension()] prior to calling this API,
    +**          otherwise an error will be returned.
     */
     int sqlite3_load_extension(
       sqlite3 *db,          /* Load the extension into this database connection */
    @@ -2307,62 +4058,64 @@
     );
     
     /*
    -** CAPI3REF:  Enable Or Disable Extension Loading
    +** CAPI3REF: Enable Or Disable Extension Loading {H12620} 
     **
     ** So as not to open security holes in older applications that are
     ** unprepared to deal with extension loading, and as a means of disabling
    -** extension loading while evaluating user-entered SQL, the following
    -** API is provided to turn the [sqlite3_load_extension()] mechanism on and
    -** off.  It is off by default.  See ticket #1863.
    +** extension loading while evaluating user-entered SQL, the following API
    +** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
     **
    -** Call this routine with onoff==1 to turn extension loading on
    -** and call it with onoff==0 to turn it back off again.
    +** Extension loading is off by default. See ticket #1863.
    +**
    +** {H12621} Call the sqlite3_enable_load_extension() routine with onoff==1
    +**          to turn extension loading on and call it with onoff==0 to turn
    +**          it back off again.
    +**
    +** {H12622} Extension loading is off by default.
     */
     int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
     
     /*
    -** CAPI3REF: Make Arrangements To Automatically Load An Extension
    -**
    -** Register an extension entry point that is automatically invoked
    -** whenever a new database connection is opened using
    -** [sqlite3_open()] or [sqlite3_open16()].
    +** CAPI3REF: Automatically Load An Extensions {H12640} 
     **
     ** This API can be invoked at program startup in order to register
     ** one or more statically linked extensions that will be available
    -** to all new database connections.
    +** to all new [database connections]. {END}
    +**
    +** This routine stores a pointer to the extension in an array that is
    +** obtained from [sqlite3_malloc()].  If you run a memory leak checker
    +** on your program and it reports a leak because of this array, invoke
    +** [sqlite3_reset_auto_extension()] prior to shutdown to free the memory.
     **
    -** Duplicate extensions are detected so calling this routine multiple
    -** times with the same extension is harmless.
    +** {H12641} This function registers an extension entry point that is
    +**          automatically invoked whenever a new [database connection]
    +**          is opened using [sqlite3_open()], [sqlite3_open16()],
    +**          or [sqlite3_open_v2()].
     **
    -** This routine stores a pointer to the extension in an array
    -** that is obtained from malloc().  If you run a memory leak
    -** checker on your program and it reports a leak because of this
    -** array, then invoke [sqlite3_automatic_extension_reset()] prior
    -** to shutdown to free the memory.
    +** {H12642} Duplicate extensions are detected so calling this routine
    +**          multiple times with the same extension is harmless.
     **
    -** Automatic extensions apply across all threads.
    +** {H12643} This routine stores a pointer to the extension in an array
    +**          that is obtained from [sqlite3_malloc()].
     **
    -** This interface is experimental and is subject to change or
    -** removal in future releases of SQLite.
    +** {H12644} Automatic extensions apply across all threads.
     */
    -int sqlite3_auto_extension(void *xEntryPoint);
    -
    +int sqlite3_auto_extension(void (*xEntryPoint)(void));
     
     /*
    -** CAPI3REF: Reset Automatic Extension Loading
    +** CAPI3REF: Reset Automatic Extension Loading {H12660} 
     **
    -** Disable all previously registered automatic extensions.  This
    -** routine undoes the effect of all prior [sqlite3_automatic_extension()]
    -** calls.
    +** This function disables all previously registered automatic
    +** extensions. {END}  It undoes the effect of all prior
    +** [sqlite3_auto_extension()] calls.
     **
    -** This call disabled automatic extensions in all threads.
    +** {H12661} This function disables all previously registered
    +**          automatic extensions.
     **
    -** This interface is experimental and is subject to change or
    -** removal in future releases of SQLite.
    +** {H12662} This function disables automatic extensions in all threads.
     */
     void sqlite3_reset_auto_extension(void);
     
    -
     /*
     ****** EXPERIMENTAL - subject to change without notice **************
     **
    @@ -2370,7 +4123,7 @@
     ** to be experimental.  The interface might change in incompatible ways.
     ** If this is a problem for you, do not use the interface at this time.
     **
    -** When the virtual-table mechanism stablizes, we will declare the
    +** When the virtual-table mechanism stabilizes, we will declare the
     ** interface fixed, support it indefinitely, and remove this comment.
     */
     
    @@ -2383,9 +4136,21 @@
     typedef struct sqlite3_module sqlite3_module;
     
     /*
    -** A module is a class of virtual tables.  Each module is defined
    -** by an instance of the following structure.  This structure consists
    -** mostly of methods for the module.
    +** CAPI3REF: Virtual Table Object {H18000} 
    +** KEYWORDS: sqlite3_module {virtual table module}
    +** EXPERIMENTAL
    +**
    +** This structure, sometimes called a a "virtual table module", 
    +** defines the implementation of a [virtual tables].  
    +** This structure consists mostly of methods for the module.
    +**
    +** A virtual table module is created by filling in a persistent
    +** instance of this structure and passing a pointer to that instance
    +** to [sqlite3_create_module()] or [sqlite3_create_module_v2()].
    +** The registration remains valid until it is replaced by a different
    +** module or until the [database connection] closes.  The content
    +** of this structure must not change while it is registered with
    +** any database connection.
     */
     struct sqlite3_module {
       int iVersion;
    @@ -2405,8 +4170,8 @@
       int (*xNext)(sqlite3_vtab_cursor*);
       int (*xEof)(sqlite3_vtab_cursor*);
       int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
    -  int (*xRowid)(sqlite3_vtab_cursor*, sqlite_int64 *pRowid);
    -  int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite_int64 *);
    +  int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
    +  int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
       int (*xBegin)(sqlite3_vtab *pVTab);
       int (*xSync)(sqlite3_vtab *pVTab);
       int (*xCommit)(sqlite3_vtab *pVTab);
    @@ -2414,30 +4179,32 @@
       int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
                            void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
                            void **ppArg);
    -
       int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
     };
     
     /*
    +** CAPI3REF: Virtual Table Indexing Information {H18100} 
    +** KEYWORDS: sqlite3_index_info
    +** EXPERIMENTAL
    +**
     ** The sqlite3_index_info structure and its substructures is used to
    -** pass information into and receive the reply from the xBestIndex
    -** method of an sqlite3_module.  The fields under **Inputs** are the
    +** pass information into and receive the reply from the [xBestIndex]
    +** method of a [virtual table module].  The fields under **Inputs** are the
     ** inputs to xBestIndex and are read-only.  xBestIndex inserts its
     ** results into the **Outputs** fields.
     **
    -** The aConstraint[] array records WHERE clause constraints of the
    -** form:
    +** The aConstraint[] array records WHERE clause constraints of the form:
     **
    -**         column OP expr
    +** 
    column OP expr
    ** -** Where OP is =, <, <=, >, or >=. The particular operator is stored -** in aConstraint[].op. The index of the column is stored in +** where OP is =, <, <=, >, or >=. The particular operator is +** stored in aConstraint[].op. The index of the column is stored in ** aConstraint[].iColumn. aConstraint[].usable is TRUE if the ** expr on the right-hand side can be evaluated (and thus the constraint ** is usable) and false if it cannot. ** ** The optimizer automatically inverts terms of the form "expr OP column" -** and makes other simplificatinos to the WHERE clause in an attempt to +** and makes other simplifications to the WHERE clause in an attempt to ** get as many WHERE clause terms into the form shown above as possible. ** The aConstraint[] array only reports WHERE clause terms in the correct ** form that refer to the particular virtual table being queried. @@ -2445,17 +4212,19 @@ ** Information about the ORDER BY clause is stored in aOrderBy[]. ** Each term of aOrderBy records a column of the ORDER BY clause. ** -** The xBestIndex method must fill aConstraintUsage[] with information +** The [xBestIndex] method must fill aConstraintUsage[] with information ** about what parameters to pass to xFilter. If argvIndex>0 then ** the right-hand side of the corresponding aConstraint[] is evaluated ** and becomes the argvIndex-th entry in argv. If aConstraintUsage[].omit ** is true, then the constraint is assumed to be fully handled by the ** virtual table and is not checked again by SQLite. ** -** The idxNum and idxPtr values are recorded and passed into xFilter. -** sqlite3_free() is used to free idxPtr if needToFreeIdxPtr is true. +** The idxNum and idxPtr values are recorded and passed into the +** [xFilter] method. +** [sqlite3_free()] is used to free idxPtr if and only iff +** needToFreeIdxPtr is true. ** -** The orderByConsumed means that output from xFilter will occur in +** The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate ** sorting step is required. ** @@ -2466,24 +4235,23 @@ */ struct sqlite3_index_info { /* Inputs */ - const int nConstraint; /* Number of entries in aConstraint */ - const struct sqlite3_index_constraint { + int nConstraint; /* Number of entries in aConstraint */ + struct sqlite3_index_constraint { int iColumn; /* Column on left-hand side of constraint */ unsigned char op; /* Constraint operator */ unsigned char usable; /* True if this constraint is usable */ int iTermOffset; /* Used internally - xBestIndex should ignore */ - } *const aConstraint; /* Table of WHERE clause constraints */ - const int nOrderBy; /* Number of terms in the ORDER BY clause */ - const struct sqlite3_index_orderby { + } *aConstraint; /* Table of WHERE clause constraints */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + struct sqlite3_index_orderby { int iColumn; /* Column number */ unsigned char desc; /* True for DESC. False for ASC. */ - } *const aOrderBy; /* The ORDER BY clause */ - + } *aOrderBy; /* The ORDER BY clause */ /* Outputs */ struct sqlite3_index_constraint_usage { int argvIndex; /* if >0, constraint is part of argv to xFilter */ unsigned char omit; /* Do not code a test for this constraint */ - } *const aConstraintUsage; + } *aConstraintUsage; int idxNum; /* Number used to identify the index */ char *idxStr; /* String, possibly obtained from sqlite3_malloc */ int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ @@ -2498,47 +4266,68 @@ #define SQLITE_INDEX_CONSTRAINT_MATCH 64 /* -** This routine is used to register a new module name with an SQLite -** connection. Module names must be registered before creating new -** virtual tables on the module, or before using preexisting virtual -** tables of the module. +** CAPI3REF: Register A Virtual Table Implementation {H18200} +** EXPERIMENTAL +** +** This routine is used to register a new [virtual table module] name. +** Module names must be registered before +** creating a new [virtual table] using the module, or before using a +** preexisting [virtual table] for the module. +** +** The module name is registered on the [database connection] specified +** by the first parameter. The name of the module is given by the +** second parameter. The third parameter is a pointer to +** the implementation of the [virtual table module]. The fourth +** parameter is an arbitrary client data pointer that is passed through +** into the [xCreate] and [xConnect] methods of the virtual table module +** when a new virtual table is be being created or reinitialized. +** +** This interface has exactly the same effect as calling +** [sqlite3_create_module_v2()] with a NULL client data destructor. */ -int sqlite3_create_module( +SQLITE_EXPERIMENTAL int sqlite3_create_module( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ - const sqlite3_module *, /* Methods for the module */ - void * /* Client data for xCreate/xConnect */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData /* Client data for xCreate/xConnect */ ); /* -** This routine is identical to the sqlite3_create_module() method above, -** except that it allows a destructor function to be specified. It is -** even more experimental than the rest of the virtual tables API. +** CAPI3REF: Register A Virtual Table Implementation {H18210} +** EXPERIMENTAL +** +** This routine is identical to the [sqlite3_create_module()] method, +** except that it has an extra parameter to specify +** a destructor function for the client data pointer. SQLite will +** invoke the destructor function (if it is not NULL) when SQLite +** no longer needs the pClientData pointer. */ -int sqlite3_create_module_v2( +SQLITE_EXPERIMENTAL int sqlite3_create_module_v2( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ - const sqlite3_module *, /* Methods for the module */ - void *, /* Client data for xCreate/xConnect */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData, /* Client data for xCreate/xConnect */ void(*xDestroy)(void*) /* Module destructor function */ ); /* -** Every module implementation uses a subclass of the following structure -** to describe a particular instance of the module. Each subclass will -** be taylored to the specific needs of the module implementation. The -** purpose of this superclass is to define certain fields that are common -** to all module implementations. +** CAPI3REF: Virtual Table Instance Object {H18010} +** KEYWORDS: sqlite3_vtab +** EXPERIMENTAL +** +** Every [virtual table module] implementation uses a subclass +** of the following structure to describe a particular instance +** of the [virtual table]. Each subclass will +** be tailored to the specific needs of the module implementation. +** The purpose of this superclass is to define certain fields that are +** common to all module implementations. ** ** Virtual tables methods can set an error message by assigning a -** string obtained from sqlite3_mprintf() to zErrMsg. The method should -** take care that any prior string is freed by a call to sqlite3_free() +** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should +** take care that any prior string is freed by a call to [sqlite3_free()] ** prior to assigning a new string to zErrMsg. After the error message ** is delivered up to the client application, the string will be automatically -** freed by sqlite3_free() and the zErrMsg field will be zeroed. Note -** that sqlite3_mprintf() and sqlite3_free() are used on the zErrMsg field -** since virtual tables are commonly implemented in loadable extensions which -** do not have access to sqlite3MPrintf() or sqlite3Free(). +** freed by sqlite3_free() and the zErrMsg field will be zeroed. */ struct sqlite3_vtab { const sqlite3_module *pModule; /* The module for this virtual table */ @@ -2547,10 +4336,19 @@ /* Virtual table implementations will typically add additional fields */ }; -/* Every module implementation uses a subclass of the following structure -** to describe cursors that point into the virtual table and are used +/* +** CAPI3REF: Virtual Table Cursor Object {H18020} +** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} +** EXPERIMENTAL +** +** Every [virtual table module] implementation uses a subclass of the +** following structure to describe cursors that point into the +** [virtual table] and are used ** to loop through the virtual table. Cursors are created using the -** xOpen method of the module. Each module implementation will define +** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed +** by the [sqlite3_module.xClose | xClose] method. Cussors are used +** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods +** of the module. Each module implementation will define ** the content of a cursor structure to suit its own needs. ** ** This superclass exists in order to define fields of the cursor that @@ -2562,15 +4360,23 @@ }; /* -** The xCreate and xConnect methods of a module use the following API +** CAPI3REF: Declare The Schema Of A Virtual Table {H18280} +** EXPERIMENTAL +** +** The [xCreate] and [xConnect] methods of a +** [virtual table module] call this interface ** to declare the format (the names and datatypes of the columns) of ** the virtual tables they implement. */ -int sqlite3_declare_vtab(sqlite3*, const char *zCreateTable); +SQLITE_EXPERIMENTAL int sqlite3_declare_vtab(sqlite3*, const char *zSQL); /* +** CAPI3REF: Overload A Function For A Virtual Table {H18300} +** EXPERIMENTAL +** ** Virtual tables can provide alternative implementations of functions -** using the xFindFunction method. But global versions of those functions +** using the [xFindFunction] method of the [virtual table module]. +** But global versions of those functions ** must exist in order to be overloaded. ** ** This API makes sure a global version of a function with a particular @@ -2578,13 +4384,10 @@ ** before this API is called, a new function is created. The implementation ** of the new function always causes an exception to be thrown. So ** the new function is not good for anything by itself. Its only -** purpose is to be a place-holder function that can be overloaded -** by virtual tables. -** -** This API should be considered part of the virtual table interface, -** which is experimental and subject to change. +** purpose is to be a placeholder function that can be overloaded +** by a [virtual table]. */ -int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); +SQLITE_EXPERIMENTAL int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* ** The interface to the virtual-table mechanism defined above (back up @@ -2592,110 +4395,1224 @@ ** to be experimental. The interface might change in incompatible ways. ** If this is a problem for you, do not use the interface at this time. ** -** When the virtual-table mechanism stablizes, we will declare the +** When the virtual-table mechanism stabilizes, we will declare the ** interface fixed, support it indefinitely, and remove this comment. ** ****** EXPERIMENTAL - subject to change without notice ************** */ /* -** CAPI3REF: A Handle To An Open BLOB +** CAPI3REF: A Handle To An Open BLOB {H17800} +** KEYWORDS: {BLOB handle} {BLOB handles} ** -** An instance of the following opaque structure is used to -** represent an blob-handle. A blob-handle is created by -** [sqlite3_blob_open()] and destroyed by [sqlite3_blob_close()]. +** An instance of this object represents an open BLOB on which +** [sqlite3_blob_open | incremental BLOB I/O] can be performed. +** Objects of this type are created by [sqlite3_blob_open()] +** and destroyed by [sqlite3_blob_close()]. ** The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces -** can be used to read or write small subsections of the blob. -** The [sqltie3_blob_size()] interface returns the size of the -** blob in bytes. +** can be used to read or write small subsections of the BLOB. +** The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. */ typedef struct sqlite3_blob sqlite3_blob; /* -** CAPI3REF: Open A BLOB For Incremental I/O +** CAPI3REF: Open A BLOB For Incremental I/O {H17810} ** -** Open a handle to the blob located in row iRow,, column zColumn, -** table zTable in database zDb. i.e. the same blob that would -** be selected by: +** This interfaces opens a [BLOB handle | handle] to the BLOB located +** in row iRow, column zColumn, table zTable in database zDb; +** in other words, the same BLOB that would be selected by: ** **
    -**     SELECT zColumn FROM zDb.zTable WHERE rowid = iRow;
    -** 
    +** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow; +**
    {END} ** -** If the flags parameter is non-zero, the blob is opened for -** read and write access. If it is zero, the blob is opened for read -** access. -** -** On success, [SQLITE_OK] is returned and the new -** [sqlite3_blob | blob handle] is written to *ppBlob. -** Otherwise an error code is returned and -** any value written to *ppBlob should not be used by the caller. -** This function sets the database-handle error code and message -** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()]. +** If the flags parameter is non-zero, then the BLOB is opened for read +** and write access. If it is zero, the BLOB is opened for read access. +** +** Note that the database name is not the filename that contains +** the database but rather the symbolic name of the database that +** is assigned when the database is connected using [ATTACH]. +** For the main database file, the database name is "main". +** For TEMP tables, the database name is "temp". +** +** On success, [SQLITE_OK] is returned and the new [BLOB handle] is written +** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set +** to be a null pointer. +** This function sets the [database connection] error code and message +** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related +** functions. Note that the *ppBlob variable is always initialized in a +** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob +** regardless of the success or failure of this routine. +** +** If the row that a BLOB handle points to is modified by an +** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects +** then the BLOB handle is marked as "expired". +** This is true if any column of the row is changed, even a column +** other than the one the BLOB handle is open on. +** Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for +** a expired BLOB handle fail with an return code of [SQLITE_ABORT]. +** Changes written into a BLOB prior to the BLOB expiring are not +** rollback by the expiration of the BLOB. Such changes will eventually +** commit if the transaction continues to completion. +** +** Use the [sqlite3_blob_bytes()] interface to determine the size of +** the opened blob. The size of a blob may not be changed by this +** underface. Use the [UPDATE] SQL command to change the size of a +** blob. +** +** The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces +** and the built-in [zeroblob] SQL function can be used, if desired, +** to create an empty, zero-filled blob in which to read or write using +** this interface. +** +** To avoid a resource leak, every open [BLOB handle] should eventually +** be released by a call to [sqlite3_blob_close()]. +** +** Requirements: +** [H17813] [H17814] [H17816] [H17819] [H17821] [H17824] */ int sqlite3_blob_open( sqlite3*, const char *zDb, const char *zTable, const char *zColumn, - sqlite_int64 iRow, + sqlite3_int64 iRow, int flags, sqlite3_blob **ppBlob ); /* -** CAPI3REF: Close A BLOB Handle +** CAPI3REF: Close A BLOB Handle {H17830} +** +** Closes an open [BLOB handle]. ** -** Close an open [sqlite3_blob | blob handle]. +** Closing a BLOB shall cause the current transaction to commit +** if there are no other BLOBs, no pending prepared statements, and the +** database connection is in [autocommit mode]. +** If any writes were made to the BLOB, they might be held in cache +** until the close operation if they will fit. +** +** Closing the BLOB often forces the changes +** out to disk and so if any I/O errors occur, they will likely occur +** at the time when the BLOB is closed. Any errors that occur during +** closing are reported as a non-zero return value. +** +** The BLOB is closed unconditionally. Even if this routine returns +** an error code, the BLOB is still closed. +** +** Calling this routine with a null pointer (which as would be returned +** by failed call to [sqlite3_blob_open()]) is a harmless no-op. +** +** Requirements: +** [H17833] [H17836] [H17839] */ int sqlite3_blob_close(sqlite3_blob *); /* -** CAPI3REF: Return The Size Of An Open BLOB +** CAPI3REF: Return The Size Of An Open BLOB {H17840} ** -** Return the size in bytes of the blob accessible via the open -** [sqlite3_blob | blob-handle] passed as an argument. +** Returns the size in bytes of the BLOB accessible via the +** successfully opened [BLOB handle] in its only argument. The +** incremental blob I/O routines can only read or overwriting existing +** blob content; they cannot change the size of a blob. +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** Requirements: +** [H17843] */ int sqlite3_blob_bytes(sqlite3_blob *); /* -** CAPI3REF: Read Data From A BLOB Incrementally +** CAPI3REF: Read Data From A BLOB Incrementally {H17850} +** +** This function is used to read data from an open [BLOB handle] into a +** caller-supplied buffer. N bytes of data are copied into buffer Z +** from the open BLOB, starting at offset iOffset. +** +** If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is read. If N or iOffset is +** less than zero, [SQLITE_ERROR] is returned and no data is read. +** The size of the blob (and hence the maximum value of N+iOffset) +** can be determined using the [sqlite3_blob_bytes()] interface. +** +** An attempt to read from an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. ** -** This function is used to read data from an open -** [sqlite3_blob | blob-handle] into a caller supplied buffer. -** n bytes of data are copied into buffer -** z from the open blob, starting at offset iOffset. +** On success, SQLITE_OK is returned. +** Otherwise, an [error code] or an [extended error code] is returned. ** -** On success, SQLITE_OK is returned. Otherwise, an -** [SQLITE_ERROR | SQLite error code] or an -** [SQLITE_IOERR_READ | extended error code] is returned. +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_write()]. +** +** Requirements: +** [H17853] [H17856] [H17859] [H17862] [H17863] [H17865] [H17868] */ -int sqlite3_blob_read(sqlite3_blob *, void *z, int n, int iOffset); +int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); /* -** CAPI3REF: Write Data Into A BLOB Incrementally +** CAPI3REF: Write Data Into A BLOB Incrementally {H17870} +** +** This function is used to write data into an open [BLOB handle] from a +** caller-supplied buffer. N bytes of data are copied from the buffer Z +** into the open BLOB, starting at offset iOffset. +** +** If the [BLOB handle] passed as the first argument was not opened for +** writing (the flags parameter to [sqlite3_blob_open()] was zero), +** this function returns [SQLITE_READONLY]. ** -** This function is used to write data into an open -** [sqlite3_blob | blob-handle] from a user supplied buffer. -** n bytes of data are copied from the buffer -** pointed to by z into the open blob, starting at offset iOffset. +** This function may only modify the contents of the BLOB; it is +** not possible to increase the size of a BLOB using this API. +** If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is written. If N is +** less than zero [SQLITE_ERROR] is returned and no data is written. +** The size of the BLOB (and hence the maximum value of N+iOffset) +** can be determined using the [sqlite3_blob_bytes()] interface. ** -** If the [sqlite3_blob | blob-handle] passed as the first argument -** was not opened for writing (the flags parameter to [sqlite3_blob_open()] -*** was zero), this function returns [SQLITE_READONLY]. +** An attempt to write to an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. Writes to the BLOB that occurred +** before the [BLOB handle] expired are not rolled back by the +** expiration of the handle, though of course those changes might +** have been overwritten by the statement that expired the BLOB handle +** or by other independent statements. ** -** This function may only modify the contents of the blob, it is -** not possible to increase the size of a blob using this API. If -** offset iOffset is less than n bytes from the end of the blob, -** [SQLITE_ERROR] is returned and no data is written. +** On success, SQLITE_OK is returned. +** Otherwise, an [error code] or an [extended error code] is returned. ** -** On success, SQLITE_OK is returned. Otherwise, an -** [SQLITE_ERROR | SQLite error code] or an -** [SQLITE_IOERR_READ | extended error code] is returned. +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_read()]. +** +** Requirements: +** [H17873] [H17874] [H17875] [H17876] [H17877] [H17879] [H17882] [H17885] +** [H17888] */ int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); /* +** CAPI3REF: Virtual File System Objects {H11200} +** +** A virtual filesystem (VFS) is an [sqlite3_vfs] object +** that SQLite uses to interact +** with the underlying operating system. Most SQLite builds come with a +** single default VFS that is appropriate for the host computer. +** New VFSes can be registered and existing VFSes can be unregistered. +** The following interfaces are provided. +** +** The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. +** Names are case sensitive. +** Names are zero-terminated UTF-8 strings. +** If there is no match, a NULL pointer is returned. +** If zVfsName is NULL then the default VFS is returned. +** +** New VFSes are registered with sqlite3_vfs_register(). +** Each new VFS becomes the default VFS if the makeDflt flag is set. +** The same VFS can be registered multiple times without injury. +** To make an existing VFS into the default VFS, register it again +** with the makeDflt flag set. If two different VFSes with the +** same name are registered, the behavior is undefined. If a +** VFS is registered with a name that is NULL or an empty string, +** then the behavior is undefined. +** +** Unregister a VFS with the sqlite3_vfs_unregister() interface. +** If the default VFS is unregistered, another VFS is chosen as +** the default. The choice for the new VFS is arbitrary. +** +** Requirements: +** [H11203] [H11206] [H11209] [H11212] [H11215] [H11218] +*/ +sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); +int sqlite3_vfs_unregister(sqlite3_vfs*); + +/* +** CAPI3REF: Mutexes {H17000} +** +** The SQLite core uses these routines for thread +** synchronization. Though they are intended for internal +** use by SQLite, code that links against SQLite is +** permitted to use any of these routines. +** +** The SQLite source code contains multiple implementations +** of these mutex routines. An appropriate implementation +** is selected automatically at compile-time. The following +** implementations are available in the SQLite core: +** +**
      +**
    • SQLITE_MUTEX_OS2 +**
    • SQLITE_MUTEX_PTHREAD +**
    • SQLITE_MUTEX_W32 +**
    • SQLITE_MUTEX_NOOP +**
    +** +** The SQLITE_MUTEX_NOOP implementation is a set of routines +** that does no real locking and is appropriate for use in +** a single-threaded application. The SQLITE_MUTEX_OS2, +** SQLITE_MUTEX_PTHREAD, and SQLITE_MUTEX_W32 implementations +** are appropriate for use on OS/2, Unix, and Windows. +** +** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +** implementation is included with the library. In this case the +** application must supply a custom mutex implementation using the +** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function +** before calling sqlite3_initialize() or any other public sqlite3_ +** function that calls sqlite3_initialize(). +** +** {H17011} The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. {H17012} If it returns NULL +** that means that a mutex could not be allocated. {H17013} SQLite +** will unwind its stack and return an error. {H17014} The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
      +**
    • SQLITE_MUTEX_FAST +**
    • SQLITE_MUTEX_RECURSIVE +**
    • SQLITE_MUTEX_STATIC_MASTER +**
    • SQLITE_MUTEX_STATIC_MEM +**
    • SQLITE_MUTEX_STATIC_MEM2 +**
    • SQLITE_MUTEX_STATIC_PRNG +**
    • SQLITE_MUTEX_STATIC_LRU +**
    • SQLITE_MUTEX_STATIC_LRU2 +**
    +** +** {H17015} The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. {END} +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. {H17016} But SQLite will only request a recursive mutex in +** cases where it really needs one. {END} If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** {H17017} The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. {END} Four static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** {H17018} Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. {H17034} But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +** +** {H17019} The sqlite3_mutex_free() routine deallocates a previously +** allocated dynamic mutex. {H17020} SQLite is careful to deallocate every +** dynamic mutex that it allocates. {A17021} The dynamic mutexes must not be in +** use when they are deallocated. {A17022} Attempting to deallocate a static +** mutex results in undefined behavior. {H17023} SQLite never deallocates +** a static mutex. {END} +** +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. {H17024} If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. {H17025} The sqlite3_mutex_try() interface returns [SQLITE_OK] +** upon successful entry. {H17026} Mutexes created using +** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. +** {H17027} In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. {A17028} If the same thread tries to enter any other +** kind of mutex more than once, the behavior is undefined. +** {H17029} SQLite will never exhibit +** such behavior in its own use of mutexes. +** +** Some systems (for example, Windows 95) do not support the operation +** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() +** will always return SQLITE_BUSY. {H17030} The SQLite core only ever uses +** sqlite3_mutex_try() as an optimization so this is acceptable behavior. +** +** {H17031} The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. {A17032} The behavior +** is undefined if the mutex is not currently entered by the +** calling thread or is not currently allocated. {H17033} SQLite will +** never do either. {END} +** +** If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or +** sqlite3_mutex_leave() is a NULL pointer, then all three routines +** behave as no-ops. +** +** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. +*/ +sqlite3_mutex *sqlite3_mutex_alloc(int); +void sqlite3_mutex_free(sqlite3_mutex*); +void sqlite3_mutex_enter(sqlite3_mutex*); +int sqlite3_mutex_try(sqlite3_mutex*); +void sqlite3_mutex_leave(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Methods Object {H17120} +** EXPERIMENTAL +** +** An instance of this structure defines the low-level routines +** used to allocate and use mutexes. +** +** Usually, the default mutex implementations provided by SQLite are +** sufficient, however the user has the option of substituting a custom +** implementation for specialized deployments or systems for which SQLite +** does not provide a suitable implementation. In this case, the user +** creates and populates an instance of this structure to pass +** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. +** Additionally, an instance of this structure can be used as an +** output variable when querying the system for the current mutex +** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. +** +** The xMutexInit method defined by this structure is invoked as +** part of system initialization by the sqlite3_initialize() function. +** {H17001} The xMutexInit routine shall be called by SQLite once for each +** effective call to [sqlite3_initialize()]. +** +** The xMutexEnd method defined by this structure is invoked as +** part of system shutdown by the sqlite3_shutdown() function. The +** implementation of this method is expected to release all outstanding +** resources obtained by the mutex methods implementation, especially +** those obtained by the xMutexInit method. {H17003} The xMutexEnd() +** interface shall be invoked once for each call to [sqlite3_shutdown()]. +** +** The remaining seven methods defined by this structure (xMutexAlloc, +** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and +** xMutexNotheld) implement the following interfaces (respectively): +** +**
      +**
    • [sqlite3_mutex_alloc()]
    • +**
    • [sqlite3_mutex_free()]
    • +**
    • [sqlite3_mutex_enter()]
    • +**
    • [sqlite3_mutex_try()]
    • +**
    • [sqlite3_mutex_leave()]
    • +**
    • [sqlite3_mutex_held()]
    • +**
    • [sqlite3_mutex_notheld()]
    • +**
    +** +** The only difference is that the public sqlite3_XXX functions enumerated +** above silently ignore any invocations that pass a NULL pointer instead +** of a valid mutex handle. The implementations of the methods defined +** by this structure are not required to handle this case, the results +** of passing a NULL pointer instead of a valid mutex handle are undefined +** (i.e. it is acceptable to provide an implementation that segfaults if +** it is passed a NULL pointer). +*/ +typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; +struct sqlite3_mutex_methods { + int (*xMutexInit)(void); + int (*xMutexEnd)(void); + sqlite3_mutex *(*xMutexAlloc)(int); + void (*xMutexFree)(sqlite3_mutex *); + void (*xMutexEnter)(sqlite3_mutex *); + int (*xMutexTry)(sqlite3_mutex *); + void (*xMutexLeave)(sqlite3_mutex *); + int (*xMutexHeld)(sqlite3_mutex *); + int (*xMutexNotheld)(sqlite3_mutex *); +}; + +/* +** CAPI3REF: Mutex Verification Routines {H17080} +** +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +** are intended for use inside assert() statements. {H17081} The SQLite core +** never uses these routines except inside an assert() and applications +** are advised to follow the lead of the core. {H17082} The core only +** provides implementations for these routines when it is compiled +** with the SQLITE_DEBUG flag. {A17087} External mutex implementations +** are only required to provide these routines if SQLITE_DEBUG is +** defined and if NDEBUG is not defined. +** +** {H17083} These routines should return true if the mutex in their argument +** is held or not held, respectively, by the calling thread. +** +** {X17084} The implementation is not required to provided versions of these +** routines that actually work. If the implementation does not provide working +** versions of these routines, it should at least provide stubs that always +** return true so that one does not get spurious assertion failures. +** +** {H17085} If the argument to sqlite3_mutex_held() is a NULL pointer then +** the routine should return 1. {END} This seems counter-intuitive since +** clearly the mutex cannot be held if it does not exist. But the +** the reason the mutex does not exist is because the build is not +** using mutexes. And we do not want the assert() containing the +** call to sqlite3_mutex_held() to fail, so a non-zero return is +** the appropriate thing to do. {H17086} The sqlite3_mutex_notheld() +** interface should also return 1 when given a NULL pointer. +*/ +int sqlite3_mutex_held(sqlite3_mutex*); +int sqlite3_mutex_notheld(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Types {H17001} +** +** The [sqlite3_mutex_alloc()] interface takes a single argument +** which is one of these integer constants. +** +** The set of static mutexes may change from one SQLite release to the +** next. Applications that override the built-in mutex logic must be +** prepared to accommodate additional static mutexes. +*/ +#define SQLITE_MUTEX_FAST 0 +#define SQLITE_MUTEX_RECURSIVE 1 +#define SQLITE_MUTEX_STATIC_MASTER 2 +#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ +#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ +#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ +#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ +#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ +#define SQLITE_MUTEX_STATIC_LRU2 7 /* lru page list */ + +/* +** CAPI3REF: Retrieve the mutex for a database connection {H17002} +** +** This interface returns a pointer the [sqlite3_mutex] object that +** serializes access to the [database connection] given in the argument +** when the [threading mode] is Serialized. +** If the [threading mode] is Single-thread or Multi-thread then this +** routine returns a NULL pointer. +*/ +sqlite3_mutex *sqlite3_db_mutex(sqlite3*); + +/* +** CAPI3REF: Low-Level Control Of Database Files {H11300} +** +** {H11301} The [sqlite3_file_control()] interface makes a direct call to the +** xFileControl method for the [sqlite3_io_methods] object associated +** with a particular database identified by the second argument. {H11302} The +** name of the database is the name assigned to the database by the +** ATTACH SQL command that opened the +** database. {H11303} To control the main database file, use the name "main" +** or a NULL pointer. {H11304} The third and fourth parameters to this routine +** are passed directly through to the second and third parameters of +** the xFileControl method. {H11305} The return value of the xFileControl +** method becomes the return value of this routine. +** +** {H11306} If the second parameter (zDbName) does not match the name of any +** open database file, then SQLITE_ERROR is returned. {H11307} This error +** code is not remembered and will not be recalled by [sqlite3_errcode()] +** or [sqlite3_errmsg()]. {A11308} The underlying xFileControl method might +** also return SQLITE_ERROR. {A11309} There is no way to distinguish between +** an incorrect zDbName and an SQLITE_ERROR return from the underlying +** xFileControl method. {END} +** +** See also: [SQLITE_FCNTL_LOCKSTATE] +*/ +int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); + +/* +** CAPI3REF: Testing Interface {H11400} +** +** The sqlite3_test_control() interface is used to read out internal +** state of SQLite and to inject faults into SQLite for testing +** purposes. The first parameter is an operation code that determines +** the number, meaning, and operation of all subsequent parameters. +** +** This interface is not for use by applications. It exists solely +** for verifying the correct operation of the SQLite library. Depending +** on how the SQLite library is compiled, this interface might not exist. +** +** The details of the operation codes, their meanings, the parameters +** they take, and what they do are all subject to change without notice. +** Unlike most of the SQLite API, this function is not guaranteed to +** operate consistently from one release to the next. +*/ +int sqlite3_test_control(int op, ...); + +/* +** CAPI3REF: Testing Interface Operation Codes {H11410} +** +** These constants are the valid operation code parameters used +** as the first argument to [sqlite3_test_control()]. +** +** These parameters and their meanings are subject to change +** without notice. These values are for testing purposes only. +** Applications should not use any of these parameters or the +** [sqlite3_test_control()] interface. +*/ +#define SQLITE_TESTCTRL_PRNG_SAVE 5 +#define SQLITE_TESTCTRL_PRNG_RESTORE 6 +#define SQLITE_TESTCTRL_PRNG_RESET 7 +#define SQLITE_TESTCTRL_BITVEC_TEST 8 +#define SQLITE_TESTCTRL_FAULT_INSTALL 9 +#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 +#define SQLITE_TESTCTRL_PENDING_BYTE 11 +#define SQLITE_TESTCTRL_ASSERT 12 +#define SQLITE_TESTCTRL_ALWAYS 13 + +/* +** CAPI3REF: SQLite Runtime Status {H17200} +** EXPERIMENTAL +** +** This interface is used to retrieve runtime status information +** about the preformance of SQLite, and optionally to reset various +** highwater marks. The first argument is an integer code for +** the specific parameter to measure. Recognized integer codes +** are of the form [SQLITE_STATUS_MEMORY_USED | SQLITE_STATUS_...]. +** The current value of the parameter is returned into *pCurrent. +** The highest recorded value is returned in *pHighwater. If the +** resetFlag is true, then the highest record value is reset after +** *pHighwater is written. Some parameters do not record the highest +** value. For those parameters +** nothing is written into *pHighwater and the resetFlag is ignored. +** Other parameters record only the highwater mark and not the current +** value. For these latter parameters nothing is written into *pCurrent. +** +** This routine returns SQLITE_OK on success and a non-zero +** [error code] on failure. +** +** This routine is threadsafe but is not atomic. This routine can +** called while other threads are running the same or different SQLite +** interfaces. However the values returned in *pCurrent and +** *pHighwater reflect the status of SQLite at different points in time +** and it is possible that another thread might change the parameter +** in between the times when *pCurrent and *pHighwater are written. +** +** See also: [sqlite3_db_status()] +*/ +SQLITE_EXPERIMENTAL int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); + + +/* +** CAPI3REF: Status Parameters {H17250} +** EXPERIMENTAL +** +** These integer constants designate various run-time status parameters +** that can be returned by [sqlite3_status()]. +** +**
    +**
    SQLITE_STATUS_MEMORY_USED
    +**
    This parameter is the current amount of memory checked out +** using [sqlite3_malloc()], either directly or indirectly. The +** figure includes calls made to [sqlite3_malloc()] by the application +** and internal memory usage by the SQLite library. Scratch memory +** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache +** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in +** this parameter. The amount returned is the sum of the allocation +** sizes as reported by the xSize method in [sqlite3_mem_methods].
    +** +**
    SQLITE_STATUS_MALLOC_SIZE
    +**
    This parameter records the largest memory allocation request +** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their +** internal equivalents). Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
    +** +**
    SQLITE_STATUS_PAGECACHE_USED
    +**
    This parameter returns the number of pages used out of the +** [pagecache memory allocator] that was configured using +** [SQLITE_CONFIG_PAGECACHE]. The +** value returned is in pages, not in bytes.
    +** +**
    SQLITE_STATUS_PAGECACHE_OVERFLOW
    +**
    This parameter returns the number of bytes of page cache +** allocation which could not be statisfied by the [SQLITE_CONFIG_PAGECACHE] +** buffer and where forced to overflow to [sqlite3_malloc()]. The +** returned value includes allocations that overflowed because they +** where too large (they were larger than the "sz" parameter to +** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because +** no space was left in the page cache.
    +** +**
    SQLITE_STATUS_PAGECACHE_SIZE
    +**
    This parameter records the largest memory allocation request +** handed to [pagecache memory allocator]. Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
    +** +**
    SQLITE_STATUS_SCRATCH_USED
    +**
    This parameter returns the number of allocations used out of the +** [scratch memory allocator] configured using +** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not +** in bytes. Since a single thread may only have one scratch allocation +** outstanding at time, this parameter also reports the number of threads +** using scratch memory at the same time.
    +** +**
    SQLITE_STATUS_SCRATCH_OVERFLOW
    +**
    This parameter returns the number of bytes of scratch memory +** allocation which could not be statisfied by the [SQLITE_CONFIG_SCRATCH] +** buffer and where forced to overflow to [sqlite3_malloc()]. The values +** returned include overflows because the requested allocation was too +** larger (that is, because the requested allocation was larger than the +** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer +** slots were available. +**
    +** +**
    SQLITE_STATUS_SCRATCH_SIZE
    +**
    This parameter records the largest memory allocation request +** handed to [scratch memory allocator]. Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
    +** +**
    SQLITE_STATUS_PARSER_STACK
    +**
    This parameter records the deepest parser stack. It is only +** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
    +**
    +** +** New status parameters may be added from time to time. +*/ +#define SQLITE_STATUS_MEMORY_USED 0 +#define SQLITE_STATUS_PAGECACHE_USED 1 +#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 +#define SQLITE_STATUS_SCRATCH_USED 3 +#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 +#define SQLITE_STATUS_MALLOC_SIZE 5 +#define SQLITE_STATUS_PARSER_STACK 6 +#define SQLITE_STATUS_PAGECACHE_SIZE 7 +#define SQLITE_STATUS_SCRATCH_SIZE 8 + +/* +** CAPI3REF: Database Connection Status {H17500} +** EXPERIMENTAL +** +** This interface is used to retrieve runtime status information +** about a single [database connection]. The first argument is the +** database connection object to be interrogated. The second argument +** is the parameter to interrogate. Currently, the only allowed value +** for the second parameter is [SQLITE_DBSTATUS_LOOKASIDE_USED]. +** Additional options will likely appear in future releases of SQLite. +** +** The current value of the requested parameter is written into *pCur +** and the highest instantaneous value is written into *pHiwtr. If +** the resetFlg is true, then the highest instantaneous value is +** reset back down to the current value. +** +** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. +*/ +SQLITE_EXPERIMENTAL int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); + +/* +** CAPI3REF: Status Parameters for database connections {H17520} +** EXPERIMENTAL +** +** Status verbs for [sqlite3_db_status()]. +** +**
    +**
    SQLITE_DBSTATUS_LOOKASIDE_USED
    +**
    This parameter returns the number of lookaside memory slots currently +** checked out.
    +**
    +*/ +#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 + + +/* +** CAPI3REF: Prepared Statement Status {H17550} +** EXPERIMENTAL +** +** Each prepared statement maintains various +** [SQLITE_STMTSTATUS_SORT | counters] that measure the number +** of times it has performed specific operations. These counters can +** be used to monitor the performance characteristics of the prepared +** statements. For example, if the number of table steps greatly exceeds +** the number of table searches or result rows, that would tend to indicate +** that the prepared statement is using a full table scan rather than +** an index. +** +** This interface is used to retrieve and reset counter values from +** a [prepared statement]. The first argument is the prepared statement +** object to be interrogated. The second argument +** is an integer code for a specific [SQLITE_STMTSTATUS_SORT | counter] +** to be interrogated. +** The current value of the requested counter is returned. +** If the resetFlg is true, then the counter is reset to zero after this +** interface call returns. +** +** See also: [sqlite3_status()] and [sqlite3_db_status()]. +*/ +SQLITE_EXPERIMENTAL int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); + +/* +** CAPI3REF: Status Parameters for prepared statements {H17570} +** EXPERIMENTAL +** +** These preprocessor macros define integer codes that name counter +** values associated with the [sqlite3_stmt_status()] interface. +** The meanings of the various counters are as follows: +** +**
    +**
    SQLITE_STMTSTATUS_FULLSCAN_STEP
    +**
    This is the number of times that SQLite has stepped forward in +** a table as part of a full table scan. Large numbers for this counter +** may indicate opportunities for performance improvement through +** careful use of indices.
    +** +**
    SQLITE_STMTSTATUS_SORT
    +**
    This is the number of sort operations that have occurred. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance through careful use of indices.
    +** +**
    +*/ +#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 +#define SQLITE_STMTSTATUS_SORT 2 + +/* +** CAPI3REF: Custom Page Cache Object +** EXPERIMENTAL +** +** The sqlite3_pcache type is opaque. It is implemented by +** the pluggable module. The SQLite core has no knowledge of +** its size or internal structure and never deals with the +** sqlite3_pcache object except by holding and passing pointers +** to the object. +** +** See [sqlite3_pcache_methods] for additional information. +*/ +typedef struct sqlite3_pcache sqlite3_pcache; + +/* +** CAPI3REF: Application Defined Page Cache. +** EXPERIMENTAL +** +** The [sqlite3_config]([SQLITE_CONFIG_PCACHE], ...) interface can +** register an alternative page cache implementation by passing in an +** instance of the sqlite3_pcache_methods structure. The majority of the +** heap memory used by sqlite is used by the page cache to cache data read +** from, or ready to be written to, the database file. By implementing a +** custom page cache using this API, an application can control more +** precisely the amount of memory consumed by sqlite, the way in which +** said memory is allocated and released, and the policies used to +** determine exactly which parts of a database file are cached and for +** how long. +** +** The contents of the structure are copied to an internal buffer by sqlite +** within the call to [sqlite3_config]. +** +** The xInit() method is called once for each call to [sqlite3_initialize()] +** (usually only once during the lifetime of the process). It is passed +** a copy of the sqlite3_pcache_methods.pArg value. It can be used to set +** up global structures and mutexes required by the custom page cache +** implementation. The xShutdown() method is called from within +** [sqlite3_shutdown()], if the application invokes this API. It can be used +** to clean up any outstanding resources before process shutdown, if required. +** +** The xCreate() method is used to construct a new cache instance. The +** first parameter, szPage, is the size in bytes of the pages that must +** be allocated by the cache. szPage will not be a power of two. The +** second argument, bPurgeable, is true if the cache being created will +** be used to cache database pages read from a file stored on disk, or +** false if it is used for an in-memory database. The cache implementation +** does not have to do anything special based on the value of bPurgeable, +** it is purely advisory. +** +** The xCachesize() method may be called at any time by SQLite to set the +** suggested maximum cache-size (number of pages stored by) the cache +** instance passed as the first argument. This is the value configured using +** the SQLite "[PRAGMA cache_size]" command. As with the bPurgeable parameter, +** the implementation is not required to do anything special with this +** value, it is advisory only. +** +** The xPagecount() method should return the number of pages currently +** stored in the cache supplied as an argument. +** +** The xFetch() method is used to fetch a page and return a pointer to it. +** A 'page', in this context, is a buffer of szPage bytes aligned at an +** 8-byte boundary. The page to be fetched is determined by the key. The +** mimimum key value is 1. After it has been retrieved using xFetch, the page +** is considered to be pinned. +** +** If the requested page is already in the page cache, then a pointer to +** the cached buffer should be returned with its contents intact. If the +** page is not already in the cache, then the expected behaviour of the +** cache is determined by the value of the createFlag parameter passed +** to xFetch, according to the following table: +** +** +**
    createFlagExpected Behaviour +**
    0NULL should be returned. No new cache entry is created. +**
    1If createFlag is set to 1, this indicates that +** SQLite is holding pinned pages that can be unpinned +** by writing their contents to the database file (a +** relatively expensive operation). In this situation the +** cache implementation has two choices: it can return NULL, +** in which case SQLite will attempt to unpin one or more +** pages before re-requesting the same page, or it can +** allocate a new page and return a pointer to it. If a new +** page is allocated, then the first sizeof(void*) bytes of +** it (at least) must be zeroed before it is returned. +**
    2If createFlag is set to 2, then SQLite is not holding any +** pinned pages associated with the specific cache passed +** as the first argument to xFetch() that can be unpinned. The +** cache implementation should attempt to allocate a new +** cache entry and return a pointer to it. Again, the first +** sizeof(void*) bytes of the page should be zeroed before +** it is returned. If the xFetch() method returns NULL when +** createFlag==2, SQLite assumes that a memory allocation +** failed and returns SQLITE_NOMEM to the user. +**
    +** +** xUnpin() is called by SQLite with a pointer to a currently pinned page +** as its second argument. If the third parameter, discard, is non-zero, +** then the page should be evicted from the cache. In this case SQLite +** assumes that the next time the page is retrieved from the cache using +** the xFetch() method, it will be zeroed. If the discard parameter is +** zero, then the page is considered to be unpinned. The cache implementation +** may choose to reclaim (free or recycle) unpinned pages at any time. +** SQLite assumes that next time the page is retrieved from the cache +** it will either be zeroed, or contain the same data that it did when it +** was unpinned. +** +** The cache is not required to perform any reference counting. A single +** call to xUnpin() unpins the page regardless of the number of prior calls +** to xFetch(). +** +** The xRekey() method is used to change the key value associated with the +** page passed as the second argument from oldKey to newKey. If the cache +** previously contains an entry associated with newKey, it should be +** discarded. Any prior cache entry associated with newKey is guaranteed not +** to be pinned. +** +** When SQLite calls the xTruncate() method, the cache must discard all +** existing cache entries with page numbers (keys) greater than or equal +** to the value of the iLimit parameter passed to xTruncate(). If any +** of these pages are pinned, they are implicitly unpinned, meaning that +** they can be safely discarded. +** +** The xDestroy() method is used to delete a cache allocated by xCreate(). +** All resources associated with the specified cache should be freed. After +** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] +** handle invalid, and will not use it with any other sqlite3_pcache_methods +** functions. +*/ +typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; +struct sqlite3_pcache_methods { + void *pArg; + int (*xInit)(void*); + void (*xShutdown)(void*); + sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); + void (*xCachesize)(sqlite3_pcache*, int nCachesize); + int (*xPagecount)(sqlite3_pcache*); + void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); + void (*xUnpin)(sqlite3_pcache*, void*, int discard); + void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); + void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); + void (*xDestroy)(sqlite3_pcache*); +}; + +/* +** CAPI3REF: Online Backup Object +** EXPERIMENTAL +** +** The sqlite3_backup object records state information about an ongoing +** online backup operation. The sqlite3_backup object is created by +** a call to [sqlite3_backup_init()] and is destroyed by a call to +** [sqlite3_backup_finish()]. +** +** See Also: [Using the SQLite Online Backup API] +*/ +typedef struct sqlite3_backup sqlite3_backup; + +/* +** CAPI3REF: Online Backup API. +** EXPERIMENTAL +** +** This API is used to overwrite the contents of one database with that +** of another. It is useful either for creating backups of databases or +** for copying in-memory databases to or from persistent files. +** +** See Also: [Using the SQLite Online Backup API] +** +** Exclusive access is required to the destination database for the +** duration of the operation. However the source database is only +** read-locked while it is actually being read, it is not locked +** continuously for the entire operation. Thus, the backup may be +** performed on a live database without preventing other users from +** writing to the database for an extended period of time. +** +** To perform a backup operation: +**
      +**
    1. sqlite3_backup_init() is called once to initialize the +** backup, +**
    2. sqlite3_backup_step() is called one or more times to transfer +** the data between the two databases, and finally +**
    3. sqlite3_backup_finish() is called to release all resources +** associated with the backup operation. +**
    +** There should be exactly one call to sqlite3_backup_finish() for each +** successful call to sqlite3_backup_init(). +** +** sqlite3_backup_init() +** +** The first two arguments passed to [sqlite3_backup_init()] are the database +** handle associated with the destination database and the database name +** used to attach the destination database to the handle. The database name +** is "main" for the main database, "temp" for the temporary database, or +** the name specified as part of the [ATTACH] statement if the destination is +** an attached database. The third and fourth arguments passed to +** sqlite3_backup_init() identify the [database connection] +** and database name used +** to access the source database. The values passed for the source and +** destination [database connection] parameters must not be the same. +** +** If an error occurs within sqlite3_backup_init(), then NULL is returned +** and an error code and error message written into the [database connection] +** passed as the first argument. They may be retrieved using the +** [sqlite3_errcode()], [sqlite3_errmsg()], and [sqlite3_errmsg16()] functions. +** Otherwise, if successful, a pointer to an [sqlite3_backup] object is +** returned. This pointer may be used with the sqlite3_backup_step() and +** sqlite3_backup_finish() functions to perform the specified backup +** operation. +** +** sqlite3_backup_step() +** +** Function [sqlite3_backup_step()] is used to copy up to nPage pages between +** the source and destination databases, where nPage is the value of the +** second parameter passed to sqlite3_backup_step(). If nPage is a negative +** value, all remaining source pages are copied. If the required pages are +** succesfully copied, but there are still more pages to copy before the +** backup is complete, it returns [SQLITE_OK]. If no error occured and there +** are no more pages to copy, then [SQLITE_DONE] is returned. If an error +** occurs, then an SQLite error code is returned. As well as [SQLITE_OK] and +** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], +** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. +** +** As well as the case where the destination database file was opened for +** read-only access, sqlite3_backup_step() may return [SQLITE_READONLY] if +** the destination is an in-memory database with a different page size +** from the source database. +** +** If sqlite3_backup_step() cannot obtain a required file-system lock, then +** the [sqlite3_busy_handler | busy-handler function] +** is invoked (if one is specified). If the +** busy-handler returns non-zero before the lock is available, then +** [SQLITE_BUSY] is returned to the caller. In this case the call to +** sqlite3_backup_step() can be retried later. If the source +** [database connection] +** is being used to write to the source database when sqlite3_backup_step() +** is called, then [SQLITE_LOCKED] is returned immediately. Again, in this +** case the call to sqlite3_backup_step() can be retried later on. If +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or +** [SQLITE_READONLY] is returned, then +** there is no point in retrying the call to sqlite3_backup_step(). These +** errors are considered fatal. At this point the application must accept +** that the backup operation has failed and pass the backup operation handle +** to the sqlite3_backup_finish() to release associated resources. +** +** Following the first call to sqlite3_backup_step(), an exclusive lock is +** obtained on the destination file. It is not released until either +** sqlite3_backup_finish() is called or the backup operation is complete +** and sqlite3_backup_step() returns [SQLITE_DONE]. Additionally, each time +** a call to sqlite3_backup_step() is made a [shared lock] is obtained on +** the source database file. This lock is released before the +** sqlite3_backup_step() call returns. Because the source database is not +** locked between calls to sqlite3_backup_step(), it may be modified mid-way +** through the backup procedure. If the source database is modified by an +** external process or via a database connection other than the one being +** used by the backup operation, then the backup will be transparently +** restarted by the next call to sqlite3_backup_step(). If the source +** database is modified by the using the same database connection as is used +** by the backup operation, then the backup database is transparently +** updated at the same time. +** +** sqlite3_backup_finish() +** +** Once sqlite3_backup_step() has returned [SQLITE_DONE], or when the +** application wishes to abandon the backup operation, the [sqlite3_backup] +** object should be passed to sqlite3_backup_finish(). This releases all +** resources associated with the backup operation. If sqlite3_backup_step() +** has not yet returned [SQLITE_DONE], then any active write-transaction on the +** destination database is rolled back. The [sqlite3_backup] object is invalid +** and may not be used following a call to sqlite3_backup_finish(). +** +** The value returned by sqlite3_backup_finish is [SQLITE_OK] if no error +** occurred, regardless or whether or not sqlite3_backup_step() was called +** a sufficient number of times to complete the backup operation. Or, if +** an out-of-memory condition or IO error occured during a call to +** sqlite3_backup_step() then [SQLITE_NOMEM] or an +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] error code +** is returned. In this case the error code and an error message are +** written to the destination [database connection]. +** +** A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() is +** not a permanent error and does not affect the return value of +** sqlite3_backup_finish(). +** +** sqlite3_backup_remaining(), sqlite3_backup_pagecount() +** +** Each call to sqlite3_backup_step() sets two values stored internally +** by an [sqlite3_backup] object. The number of pages still to be backed +** up, which may be queried by sqlite3_backup_remaining(), and the total +** number of pages in the source database file, which may be queried by +** sqlite3_backup_pagecount(). +** +** The values returned by these functions are only updated by +** sqlite3_backup_step(). If the source database is modified during a backup +** operation, then the values are not updated to account for any extra +** pages that need to be updated or the size of the source database file +** changing. +** +** Concurrent Usage of Database Handles +** +** The source [database connection] may be used by the application for other +** purposes while a backup operation is underway or being initialized. +** If SQLite is compiled and configured to support threadsafe database +** connections, then the source database connection may be used concurrently +** from within other threads. +** +** However, the application must guarantee that the destination database +** connection handle is not passed to any other API (by any thread) after +** sqlite3_backup_init() is called and before the corresponding call to +** sqlite3_backup_finish(). Unfortunately SQLite does not currently check +** for this, if the application does use the destination [database connection] +** for some other purpose during a backup operation, things may appear to +** work correctly but in fact be subtly malfunctioning. Use of the +** destination database connection while a backup is in progress might +** also cause a mutex deadlock. +** +** Furthermore, if running in [shared cache mode], the application must +** guarantee that the shared cache used by the destination database +** is not accessed while the backup is running. In practice this means +** that the application must guarantee that the file-system file being +** backed up to is not accessed by any connection within the process, +** not just the specific connection that was passed to sqlite3_backup_init(). +** +** The [sqlite3_backup] object itself is partially threadsafe. Multiple +** threads may safely make multiple concurrent calls to sqlite3_backup_step(). +** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() +** APIs are not strictly speaking threadsafe. If they are invoked at the +** same time as another thread is invoking sqlite3_backup_step() it is +** possible that they return invalid values. +*/ +sqlite3_backup *sqlite3_backup_init( + sqlite3 *pDest, /* Destination database handle */ + const char *zDestName, /* Destination database name */ + sqlite3 *pSource, /* Source database handle */ + const char *zSourceName /* Source database name */ +); +int sqlite3_backup_step(sqlite3_backup *p, int nPage); +int sqlite3_backup_finish(sqlite3_backup *p); +int sqlite3_backup_remaining(sqlite3_backup *p); +int sqlite3_backup_pagecount(sqlite3_backup *p); + +/* +** CAPI3REF: Unlock Notification +** EXPERIMENTAL +** +** When running in shared-cache mode, a database operation may fail with +** an [SQLITE_LOCKED] error if the required locks on the shared-cache or +** individual tables within the shared-cache cannot be obtained. See +** [SQLite Shared-Cache Mode] for a description of shared-cache locking. +** This API may be used to register a callback that SQLite will invoke +** when the connection currently holding the required lock relinquishes it. +** This API is only available if the library was compiled with the +** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. +** +** See Also: [Using the SQLite Unlock Notification Feature]. +** +** Shared-cache locks are released when a database connection concludes +** its current transaction, either by committing it or rolling it back. +** +** When a connection (known as the blocked connection) fails to obtain a +** shared-cache lock and SQLITE_LOCKED is returned to the caller, the +** identity of the database connection (the blocking connection) that +** has locked the required resource is stored internally. After an +** application receives an SQLITE_LOCKED error, it may call the +** sqlite3_unlock_notify() method with the blocked connection handle as +** the first argument to register for a callback that will be invoked +** when the blocking connections current transaction is concluded. The +** callback is invoked from within the [sqlite3_step] or [sqlite3_close] +** call that concludes the blocking connections transaction. +** +** If sqlite3_unlock_notify() is called in a multi-threaded application, +** there is a chance that the blocking connection will have already +** concluded its transaction by the time sqlite3_unlock_notify() is invoked. +** If this happens, then the specified callback is invoked immediately, +** from within the call to sqlite3_unlock_notify(). +** +** If the blocked connection is attempting to obtain a write-lock on a +** shared-cache table, and more than one other connection currently holds +** a read-lock on the same table, then SQLite arbitrarily selects one of +** the other connections to use as the blocking connection. +** +** There may be at most one unlock-notify callback registered by a +** blocked connection. If sqlite3_unlock_notify() is called when the +** blocked connection already has a registered unlock-notify callback, +** then the new callback replaces the old. If sqlite3_unlock_notify() is +** called with a NULL pointer as its second argument, then any existing +** unlock-notify callback is cancelled. The blocked connections +** unlock-notify callback may also be canceled by closing the blocked +** connection using [sqlite3_close()]. +** +** The unlock-notify callback is not reentrant. If an application invokes +** any sqlite3_xxx API functions from within an unlock-notify callback, a +** crash or deadlock may be the result. +** +** Unless deadlock is detected (see below), sqlite3_unlock_notify() always +** returns SQLITE_OK. +** +** Callback Invocation Details +** +** When an unlock-notify callback is registered, the application provides a +** single void* pointer that is passed to the callback when it is invoked. +** However, the signature of the callback function allows SQLite to pass +** it an array of void* context pointers. The first argument passed to +** an unlock-notify callback is a pointer to an array of void* pointers, +** and the second is the number of entries in the array. +** +** When a blocking connections transaction is concluded, there may be +** more than one blocked connection that has registered for an unlock-notify +** callback. If two or more such blocked connections have specified the +** same callback function, then instead of invoking the callback function +** multiple times, it is invoked once with the set of void* context pointers +** specified by the blocked connections bundled together into an array. +** This gives the application an opportunity to prioritize any actions +** related to the set of unblocked database connections. +** +** Deadlock Detection +** +** Assuming that after registering for an unlock-notify callback a +** database waits for the callback to be issued before taking any further +** action (a reasonable assumption), then using this API may cause the +** application to deadlock. For example, if connection X is waiting for +** connection Y's transaction to be concluded, and similarly connection +** Y is waiting on connection X's transaction, then neither connection +** will proceed and the system may remain deadlocked indefinitely. +** +** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock +** detection. If a given call to sqlite3_unlock_notify() would put the +** system in a deadlocked state, then SQLITE_LOCKED is returned and no +** unlock-notify callback is registered. The system is said to be in +** a deadlocked state if connection A has registered for an unlock-notify +** callback on the conclusion of connection B's transaction, and connection +** B has itself registered for an unlock-notify callback when connection +** A's transaction is concluded. Indirect deadlock is also detected, so +** the system is also considered to be deadlocked if connection B has +** registered for an unlock-notify callback on the conclusion of connection +** C's transaction, where connection C is waiting on connection A. Any +** number of levels of indirection are allowed. +** +** The "DROP TABLE" Exception +** +** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost +** always appropriate to call sqlite3_unlock_notify(). There is however, +** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, +** SQLite checks if there are any currently executing SELECT statements +** that belong to the same connection. If there are, SQLITE_LOCKED is +** returned. In this case there is no "blocking connection", so invoking +** sqlite3_unlock_notify() results in the unlock-notify callback being +** invoked immediately. If the application then re-attempts the "DROP TABLE" +** or "DROP INDEX" query, an infinite loop might be the result. +** +** One way around this problem is to check the extended error code returned +** by an sqlite3_step() call. If there is a blocking connection, then the +** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in +** the special "DROP TABLE/INDEX" case, the extended error code is just +** SQLITE_LOCKED. +*/ +int sqlite3_unlock_notify( + sqlite3 *pBlocked, /* Waiting connection */ + void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ + void *pNotifyArg /* Argument to pass to xNotify */ +); + +/* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/sqliteInt.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/sqliteInt.h --- sqlite3-3.4.2/src/sqliteInt.h 2007-08-08 13:11:21.000000000 +0100 +++ sqlite3-3.6.16/src/sqliteInt.h 2009-06-26 16:14:55.000000000 +0100 @@ -11,42 +11,95 @@ ************************************************************************* ** Internal interface definitions for SQLite. ** -** @(#) $Id: sqliteInt.h,v 1.585 2007/08/08 12:11:21 drh Exp $ +** @(#) $Id: sqliteInt.h,v 1.890 2009/06/26 15:14:55 drh Exp $ */ #ifndef _SQLITEINT_H_ #define _SQLITEINT_H_ + +/* +** Include the configuration header output by 'configure' if we're using the +** autoconf-based build +*/ +#ifdef _HAVE_SQLITE_CONFIG_H +#include "config.h" +#endif + #include "sqliteLimit.h" +/* Disable nuisance warnings on Borland compilers */ +#if defined(__BORLANDC__) +#pragma warn -rch /* unreachable code */ +#pragma warn -ccc /* Condition is always true or false */ +#pragma warn -aus /* Assigned value is never used */ +#pragma warn -csu /* Comparing signed and unsigned */ +#pragma warn -spa /* Suspicious pointer arithmetic */ +#endif -#if defined(SQLITE_TCL) || defined(TCLSH) -# include +/* Needed for various definitions... */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE #endif /* -** Many people are failing to set -DNDEBUG=1 when compiling SQLite. -** Setting NDEBUG makes the code smaller and run faster. So the following -** lines are added to automatically set NDEBUG unless the -DSQLITE_DEBUG=1 -** option is set. Thus NDEBUG becomes an opt-in rather than an opt-out -** feature. +** Include standard header files as necessary */ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 +#ifdef HAVE_STDINT_H +#include +#endif +#ifdef HAVE_INTTYPES_H +#include +#endif + +/* +** This macro is used to "hide" some ugliness in casting an int +** value to a ptr value under the MSVC 64-bit compiler. Casting +** non 64-bit values to ptr types results in a "hard" error with +** the MSVC 64-bit compiler which this attempts to avoid. +** +** A simple compiler pragma or casting sequence could not be found +** to correct this in all situations, so this macro was introduced. +** +** It could be argued that the intptr_t type could be used in this +** case, but that type is not available on all compilers, or +** requires the #include of specific headers which differs between +** platforms. +** +** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on +** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). +** So we have to define the macros in different ways depending on the +** compiler. +*/ +#if defined(__GNUC__) +# if defined(HAVE_STDINT_H) +# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X)) +# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X)) +# else +# define SQLITE_INT_TO_PTR(X) ((void*)(X)) +# define SQLITE_PTR_TO_INT(X) ((int)(X)) +# endif +#else +# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X]) +# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0)) #endif /* -** These #defines should enable >2GB file support on Posix if the +** These #defines should enable >2GB file support on POSIX if the ** underlying operating system supports it. If the OS lacks ** large file support, or if the OS is windows, these should be no-ops. ** +** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any +** system #includes. Hence, this block of code must be the very first +** code in all source files. +** ** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch ** on the compiler command line. This is necessary if you are compiling -** on a recent machine (ex: RedHat 7.2) but you want your code to work -** on an older machine (ex: RedHat 6.0). If you compile on RedHat 7.2 +** on a recent machine (ex: Red Hat 7.2) but you want your code to work +** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 ** without this option, LFS is enable. But LFS does not exist in the kernel -** in RedHat 6.0, so the code won't work. Hence, for maximum binary +** in Red Hat 6.0, so the code won't work. Hence, for maximum binary ** portability you should omit LFS. ** -** Similar is true for MacOS. LFS is only supported on MacOS 9 and later. +** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. */ #ifndef SQLITE_DISABLE_LFS # define _LARGE_FILE 1 @@ -56,6 +109,185 @@ # define _LARGEFILE_SOURCE 1 #endif + +/* +** The SQLITE_THREADSAFE macro must be defined as either 0 or 1. +** Older versions of SQLite used an optional THREADSAFE macro. +** We support that for legacy +*/ +#if !defined(SQLITE_THREADSAFE) +#if defined(THREADSAFE) +# define SQLITE_THREADSAFE THREADSAFE +#else +# define SQLITE_THREADSAFE 1 +#endif +#endif + +/* +** The SQLITE_DEFAULT_MEMSTATUS macro must be defined as either 0 or 1. +** It determines whether or not the features related to +** SQLITE_CONFIG_MEMSTATUS are available by default or not. This value can +** be overridden at runtime using the sqlite3_config() API. +*/ +#if !defined(SQLITE_DEFAULT_MEMSTATUS) +# define SQLITE_DEFAULT_MEMSTATUS 1 +#endif + +/* +** Exactly one of the following macros must be defined in order to +** specify which memory allocation subsystem to use. +** +** SQLITE_SYSTEM_MALLOC // Use normal system malloc() +** SQLITE_MEMDEBUG // Debugging version of system malloc() +** SQLITE_MEMORY_SIZE // internal allocator #1 +** SQLITE_MMAP_HEAP_SIZE // internal mmap() allocator +** SQLITE_POW2_MEMORY_SIZE // internal power-of-two allocator +** +** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as +** the default. +*/ +#if defined(SQLITE_SYSTEM_MALLOC)+defined(SQLITE_MEMDEBUG)+\ + defined(SQLITE_MEMORY_SIZE)+defined(SQLITE_MMAP_HEAP_SIZE)+\ + defined(SQLITE_POW2_MEMORY_SIZE)>1 +# error "At most one of the following compile-time configuration options\ + is allows: SQLITE_SYSTEM_MALLOC, SQLITE_MEMDEBUG, SQLITE_MEMORY_SIZE,\ + SQLITE_MMAP_HEAP_SIZE, SQLITE_POW2_MEMORY_SIZE" +#endif +#if defined(SQLITE_SYSTEM_MALLOC)+defined(SQLITE_MEMDEBUG)+\ + defined(SQLITE_MEMORY_SIZE)+defined(SQLITE_MMAP_HEAP_SIZE)+\ + defined(SQLITE_POW2_MEMORY_SIZE)==0 +# define SQLITE_SYSTEM_MALLOC 1 +#endif + +/* +** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the +** sizes of memory allocations below this value where possible. +*/ +#if !defined(SQLITE_MALLOC_SOFT_LIMIT) +# define SQLITE_MALLOC_SOFT_LIMIT 1024 +#endif + +/* +** We need to define _XOPEN_SOURCE as follows in order to enable +** recursive mutexes on most Unix systems. But Mac OS X is different. +** The _XOPEN_SOURCE define causes problems for Mac OS X we are told, +** so it is omitted there. See ticket #2673. +** +** Later we learn that _XOPEN_SOURCE is poorly or incorrectly +** implemented on some systems. So we avoid defining it at all +** if it is already defined or if it is unneeded because we are +** not doing a threadsafe build. Ticket #2681. +** +** See also ticket #2741. +*/ +#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__) && SQLITE_THREADSAFE +# define _XOPEN_SOURCE 500 /* Needed to enable pthread recursive mutexes */ +#endif + +/* +** The TCL headers are only needed when compiling the TCL bindings. +*/ +#if defined(SQLITE_TCL) || defined(TCLSH) +# include +#endif + +/* +** Many people are failing to set -DNDEBUG=1 when compiling SQLite. +** Setting NDEBUG makes the code smaller and run faster. So the following +** lines are added to automatically set NDEBUG unless the -DSQLITE_DEBUG=1 +** option is set. Thus NDEBUG becomes an opt-in rather than an opt-out +** feature. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif + +/* +** The testcase() macro is used to aid in coverage testing. When +** doing coverage testing, the condition inside the argument to +** testcase() must be evaluated both true and false in order to +** get full branch coverage. The testcase() macro is inserted +** to help ensure adequate test coverage in places where simple +** condition/decision coverage is inadequate. For example, testcase() +** can be used to make sure boundary values are tested. For +** bitmask tests, testcase() can be used to make sure each bit +** is significant and used at least once. On switch statements +** where multiple cases go to the same block of code, testcase() +** can insure that all cases are evaluated. +** +*/ +#ifdef SQLITE_COVERAGE_TEST + void sqlite3Coverage(int); +# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } +#else +# define testcase(X) +#endif + +/* +** The TESTONLY macro is used to enclose variable declarations or +** other bits of code that are needed to support the arguments +** within testcase() and assert() macros. +*/ +#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) +# define TESTONLY(X) X +#else +# define TESTONLY(X) +#endif + +/* +** Sometimes we need a small amount of code such as a variable initialization +** to setup for a later assert() statement. We do not want this code to +** appear when assert() is disabled. The following macro is therefore +** used to contain that setup code. The "VVA" acronym stands for +** "Verification, Validation, and Accreditation". In other words, the +** code within VVA_ONLY() will only run during verification processes. +*/ +#ifndef NDEBUG +# define VVA_ONLY(X) X +#else +# define VVA_ONLY(X) +#endif + +/* +** The ALWAYS and NEVER macros surround boolean expressions which +** are intended to always be true or false, respectively. Such +** expressions could be omitted from the code completely. But they +** are included in a few cases in order to enhance the resilience +** of SQLite to unexpected behavior - to make the code "self-healing" +** or "ductile" rather than being "brittle" and crashing at the first +** hint of unplanned behavior. +** +** In other words, ALWAYS and NEVER are added for defensive code. +** +** When doing coverage testing ALWAYS and NEVER are hard-coded to +** be true and false so that the unreachable code then specify will +** not be counted as untested code. +*/ +#if defined(SQLITE_COVERAGE_TEST) +# define ALWAYS(X) (1) +# define NEVER(X) (0) +#elif !defined(NDEBUG) +# define ALWAYS(X) ((X)?1:(assert(0),0)) +# define NEVER(X) ((X)?(assert(0),1):0) +#else +# define ALWAYS(X) (X) +# define NEVER(X) (X) +#endif + +/* +** The macro unlikely() is a hint that surrounds a boolean +** expression that is usually false. Macro likely() surrounds +** a boolean expression that is usually true. GCC is able to +** use these hints to generate better code, sometimes. +*/ +#if defined(__GNUC__) && 0 +# define likely(X) __builtin_expect((X),1) +# define unlikely(X) __builtin_expect((X),0) +#else +# define likely(X) !!(X) +# define unlikely(X) !!(X) +#endif + #include "sqlite3.h" #include "hash.h" #include "parse.h" @@ -65,8 +297,6 @@ #include #include -#define sqlite3_isnan(X) ((X)!=(X)) - /* ** If compiling for a processor that lacks floating point support, ** substitute integer for floating-point @@ -75,11 +305,12 @@ # define double sqlite_int64 # define LONGDOUBLE_TYPE sqlite_int64 # ifndef SQLITE_BIG_DBL -# define SQLITE_BIG_DBL (0x7fffffffffffffff) +# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<60) # endif # define SQLITE_OMIT_DATETIME_FUNCS 1 # define SQLITE_OMIT_TRACE 1 # undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT +# undef SQLITE_HAVE_ISNAN #endif #ifndef SQLITE_BIG_DBL # define SQLITE_BIG_DBL (1e99) @@ -122,11 +353,11 @@ #endif /* -** Provide a default value for TEMP_STORE in case it is not specified +** Provide a default value for SQLITE_TEMP_STORE in case it is not specified ** on the command-line */ -#ifndef TEMP_STORE -# define TEMP_STORE 1 +#ifndef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 1 #endif /* @@ -155,19 +386,39 @@ ** cc '-DUINTPTR_TYPE=long long int' ... */ #ifndef UINT32_TYPE -# define UINT32_TYPE unsigned int +# ifdef HAVE_UINT32_T +# define UINT32_TYPE uint32_t +# else +# define UINT32_TYPE unsigned int +# endif #endif #ifndef UINT16_TYPE -# define UINT16_TYPE unsigned short int +# ifdef HAVE_UINT16_T +# define UINT16_TYPE uint16_t +# else +# define UINT16_TYPE unsigned short int +# endif #endif #ifndef INT16_TYPE -# define INT16_TYPE short int +# ifdef HAVE_INT16_T +# define INT16_TYPE int16_t +# else +# define INT16_TYPE short int +# endif #endif #ifndef UINT8_TYPE -# define UINT8_TYPE unsigned char +# ifdef HAVE_UINT8_T +# define UINT8_TYPE uint8_t +# else +# define UINT8_TYPE unsigned char +# endif #endif #ifndef INT8_TYPE -# define INT8_TYPE signed char +# ifdef HAVE_INT8_T +# define INT8_TYPE int8_t +# else +# define INT8_TYPE signed char +# endif #endif #ifndef LONGDOUBLE_TYPE # define LONGDOUBLE_TYPE long double @@ -178,14 +429,27 @@ typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ typedef INT16_TYPE i16; /* 2-byte signed integer */ typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ -typedef UINT8_TYPE i8; /* 1-byte signed integer */ +typedef INT8_TYPE i8; /* 1-byte signed integer */ + +/* +** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value +** that can be stored in a u32 without loss of data. The value +** is 0x00000000ffffffff. But because of quirks of some compilers, we +** have to specify the value in the less intuitive manner shown: +*/ +#define SQLITE_MAX_U32 ((((u64)1)<<32)-1) /* ** Macros to determine whether the machine is big or little endian, ** evaluated at runtime. */ +#ifdef SQLITE_AMALGAMATION +const int sqlite3one = 1; +#else extern const int sqlite3one; -#if defined(i386) || defined(__i386__) || defined(_M_IX86) +#endif +#if defined(i386) || defined(__i386__) || defined(_M_IX86)\ + || defined(__x86_64) || defined(__x86_64__) # define SQLITE_BIGENDIAN 0 # define SQLITE_LITTLEENDIAN 1 # define SQLITE_UTF16NATIVE SQLITE_UTF16LE @@ -196,6 +460,31 @@ #endif /* +** Constants for the largest and smallest possible 64-bit signed integers. +** These macros are designed to work correctly on both 32-bit and 64-bit +** compilers. +*/ +#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) +#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) + +/* +** Round up a number to the next larger multiple of 8. This is used +** to force 8-byte alignment on 64-bit architectures. +*/ +#define ROUND8(x) (((x)+7)&~7) + +/* +** Round down to the nearest multiple of 8 +*/ +#define ROUNDDOWN8(x) ((x)&~7) + +/* +** Assert that the pointer X is aligned to an 8-byte boundary. +*/ +#define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) + + +/* ** An instance of the following structure is used to store the busy-handler ** callback for a given sqlite handle. ** @@ -212,83 +501,6 @@ }; /* -** Defer sourcing vdbe.h and btree.h until after the "u8" and -** "BusyHandler typedefs. -*/ -#include "vdbe.h" -#include "btree.h" -#include "pager.h" - -#ifdef SQLITE_MEMDEBUG -/* -** The following global variables are used for testing and debugging -** only. They only work if SQLITE_MEMDEBUG is defined. -*/ -extern int sqlite3_nMalloc; /* Number of sqliteMalloc() calls */ -extern int sqlite3_nFree; /* Number of sqliteFree() calls */ -extern int sqlite3_iMallocFail; /* Fail sqliteMalloc() after this many calls */ -extern int sqlite3_iMallocReset; /* Set iMallocFail to this when it reaches 0 */ - -extern void *sqlite3_pFirst; /* Pointer to linked list of allocations */ -extern int sqlite3_nMaxAlloc; /* High water mark of ThreadData.nAlloc */ -extern int sqlite3_mallocDisallowed; /* assert() in sqlite3Malloc() if set */ -extern int sqlite3_isFail; /* True if all malloc calls should fail */ -extern const char *sqlite3_zFile; /* Filename to associate debug info with */ -extern int sqlite3_iLine; /* Line number for debug info */ - -#define ENTER_MALLOC (sqlite3_zFile = __FILE__, sqlite3_iLine = __LINE__) -#define sqliteMalloc(x) (ENTER_MALLOC, sqlite3Malloc(x,1)) -#define sqliteMallocRaw(x) (ENTER_MALLOC, sqlite3MallocRaw(x,1)) -#define sqliteRealloc(x,y) (ENTER_MALLOC, sqlite3Realloc(x,y)) -#define sqliteStrDup(x) (ENTER_MALLOC, sqlite3StrDup(x)) -#define sqliteStrNDup(x,y) (ENTER_MALLOC, sqlite3StrNDup(x,y)) -#define sqliteReallocOrFree(x,y) (ENTER_MALLOC, sqlite3ReallocOrFree(x,y)) - -#else - -#define ENTER_MALLOC 0 -#define sqliteMalloc(x) sqlite3Malloc(x,1) -#define sqliteMallocRaw(x) sqlite3MallocRaw(x,1) -#define sqliteRealloc(x,y) sqlite3Realloc(x,y) -#define sqliteStrDup(x) sqlite3StrDup(x) -#define sqliteStrNDup(x,y) sqlite3StrNDup(x,y) -#define sqliteReallocOrFree(x,y) sqlite3ReallocOrFree(x,y) - -#endif - -/* Variable sqlite3MallocHasFailed is set to true after a malloc() -** failure occurs. -** -** The sqlite3MallocFailed() macro returns true if a malloc has failed -** in this thread since the last call to sqlite3ApiExit(), or false -** otherwise. -*/ -extern int sqlite3MallocHasFailed; -#define sqlite3MallocFailed() (sqlite3MallocHasFailed && sqlite3OsInMutex(1)) - -#define sqliteFree(x) sqlite3FreeX(x) -#define sqliteAllocSize(x) sqlite3AllocSize(x) - -/* -** An instance of this structure might be allocated to store information -** specific to a single thread. -*/ -struct ThreadData { - int dummy; /* So that this structure is never empty */ - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - int nSoftHeapLimit; /* Suggested max mem allocation. No limit if <0 */ - int nAlloc; /* Number of bytes currently allocated */ - Pager *pPager; /* Linked list of all pagers in this thread */ -#endif - -#ifndef SQLITE_OMIT_SHARED_CACHE - u8 useSharedData; /* True if shared pagers and schemas are enabled */ - BtShared *pBtree; /* Linked list of all currently open BTrees */ -#endif -}; - -/* ** Name of the master database table. The master database table ** is a special table that holds the names and attributes of all ** user tables and indices. @@ -310,41 +522,115 @@ ** A convenience macro that returns the number of elements in ** an array. */ -#define ArraySize(X) (sizeof(X)/sizeof(X[0])) +#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0]))) + +/* +** The following value as a destructor means to use sqlite3DbFree(). +** This is an internal extension to SQLITE_STATIC and SQLITE_TRANSIENT. +*/ +#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3DbFree) + +/* +** When SQLITE_OMIT_WSD is defined, it means that the target platform does +** not support Writable Static Data (WSD) such as global and static variables. +** All variables must either be on the stack or dynamically allocated from +** the heap. When WSD is unsupported, the variable declarations scattered +** throughout the SQLite code must become constants instead. The SQLITE_WSD +** macro is used for this purpose. And instead of referencing the variable +** directly, we use its constant as a key to lookup the run-time allocated +** buffer that holds real variable. The constant is also the initializer +** for the run-time allocated buffer. +** +** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL +** macros become no-ops and have zero performance impact. +*/ +#ifdef SQLITE_OMIT_WSD + #define SQLITE_WSD const + #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v))) + #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config) + int sqlite3_wsd_init(int N, int J); + void *sqlite3_wsd_find(void *K, int L); +#else + #define SQLITE_WSD + #define GLOBAL(t,v) v + #define sqlite3GlobalConfig sqlite3Config +#endif + +/* +** The following macros are used to suppress compiler warnings and to +** make it clear to human readers when a function parameter is deliberately +** left unused within the body of a function. This usually happens when +** a function is called via a function pointer. For example the +** implementation of an SQL aggregate step callback may not use the +** parameter indicating the number of arguments passed to the aggregate, +** if it knows that this is enforced elsewhere. +** +** When a function parameter is not used at all within the body of a function, +** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. +** However, these macros may also be used to suppress warnings related to +** parameters that may or may not be used depending on compilation options. +** For example those parameters only used in assert() statements. In these +** cases the parameters are named as per the usual conventions. +*/ +#define UNUSED_PARAMETER(x) (void)(x) +#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) /* ** Forward references to structures */ typedef struct AggInfo AggInfo; typedef struct AuthContext AuthContext; +typedef struct AutoincInfo AutoincInfo; +typedef struct Bitvec Bitvec; +typedef struct RowSet RowSet; typedef struct CollSeq CollSeq; typedef struct Column Column; typedef struct Db Db; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; +typedef struct ExprSpan ExprSpan; typedef struct FKey FKey; typedef struct FuncDef FuncDef; +typedef struct FuncDefHash FuncDefHash; typedef struct IdList IdList; typedef struct Index Index; typedef struct KeyClass KeyClass; typedef struct KeyInfo KeyInfo; +typedef struct Lookaside Lookaside; +typedef struct LookasideSlot LookasideSlot; typedef struct Module Module; typedef struct NameContext NameContext; typedef struct Parse Parse; +typedef struct Savepoint Savepoint; typedef struct Select Select; typedef struct SrcList SrcList; -typedef struct ThreadData ThreadData; +typedef struct StrAccum StrAccum; typedef struct Table Table; typedef struct TableLock TableLock; typedef struct Token Token; typedef struct TriggerStack TriggerStack; typedef struct TriggerStep TriggerStep; typedef struct Trigger Trigger; +typedef struct UnpackedRecord UnpackedRecord; +typedef struct Walker Walker; +typedef struct WherePlan WherePlan; typedef struct WhereInfo WhereInfo; typedef struct WhereLevel WhereLevel; +/* +** Defer sourcing vdbe.h and btree.h until after the "u8" and +** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque +** pointer types (i.e. FuncDef) defined above. +*/ +#include "btree.h" +#include "vdbe.h" +#include "pager.h" +#include "pcache.h" + #include "os.h" +#include "mutex.h" + /* ** Each database file to be accessed by the system is an instance @@ -357,9 +643,7 @@ char *zName; /* Name of this database */ Btree *pBt; /* The B*Tree structure for this database file */ u8 inTrans; /* 0: not writable. 1: Transaction. 2: Checkpoint */ - u8 safety_level; /* How aggressive at synching data to disk */ - void *pAux; /* Auxiliary data. Usually NULL */ - void (*xFreeAux)(void*); /* Routine to free pAux */ + u8 safety_level; /* How aggressive at syncing data to disk */ Schema *pSchema; /* Pointer to database schema (possibly shared) */ }; @@ -379,7 +663,6 @@ Hash tblHash; /* All tables indexed by name */ Hash idxHash; /* All (named) indices indexed by name */ Hash trigHash; /* All triggers indexed by name */ - Hash aFKey; /* Foreign keys indexed by to-table */ Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ u8 file_format; /* Schema format version for this file */ u8 enc; /* Text encoding used by this database */ @@ -413,6 +696,55 @@ #define DB_UnresetViews 0x0002 /* Some views have defined column names */ #define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ +/* +** The number of different kinds of things that can be limited +** using the sqlite3_limit() interface. +*/ +#define SQLITE_N_LIMIT (SQLITE_LIMIT_VARIABLE_NUMBER+1) + +/* +** Lookaside malloc is a set of fixed-size buffers that can be used +** to satisfy small transient memory allocation requests for objects +** associated with a particular database connection. The use of +** lookaside malloc provides a significant performance enhancement +** (approx 10%) by avoiding numerous malloc/free requests while parsing +** SQL statements. +** +** The Lookaside structure holds configuration information about the +** lookaside malloc subsystem. Each available memory allocation in +** the lookaside subsystem is stored on a linked list of LookasideSlot +** objects. +** +** Lookaside allocations are only allowed for objects that are associated +** with a particular database connection. Hence, schema information cannot +** be stored in lookaside because in shared cache mode the schema information +** is shared by multiple database connections. Therefore, while parsing +** schema information, the Lookaside.bEnabled flag is cleared so that +** lookaside allocations are not used to construct the schema objects. +*/ +struct Lookaside { + u16 sz; /* Size of each buffer in bytes */ + u8 bEnabled; /* False to disable new lookaside allocations */ + u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */ + int nOut; /* Number of buffers currently checked out */ + int mxOut; /* Highwater mark for nOut */ + LookasideSlot *pFree; /* List of available buffers */ + void *pStart; /* First byte of available memory space */ + void *pEnd; /* First byte past end of available space */ +}; +struct LookasideSlot { + LookasideSlot *pNext; /* Next buffer in the list of free buffers */ +}; + +/* +** A hash table for function definitions. +** +** Hash each FuncDef structure into one of the FuncDefHash.a[] slots. +** Collisions are on the FuncDef.pHash chain. +*/ +struct FuncDefHash { + FuncDef *a[23]; /* Hash table for functions */ +}; /* ** Each database is an instance of the following structure. @@ -441,29 +773,38 @@ ** consistently. */ struct sqlite3 { + sqlite3_vfs *pVfs; /* OS Interface */ int nDb; /* Number of backends currently in use */ Db *aDb; /* All backends */ - int flags; /* Miscellanous flags. See below */ + int flags; /* Miscellaneous flags. See below */ + int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ int errCode; /* Most recent error code (SQLITE_*) */ int errMask; /* & result codes with this before returning */ u8 autoCommit; /* The auto-commit flag. */ u8 temp_store; /* 1: file 2: memory 0: default */ + u8 mallocFailed; /* True if we have seen a malloc failure */ + u8 dfltLockMode; /* Default locking-mode for attached dbs */ + u8 dfltJournalMode; /* Default journal mode for attached dbs */ + signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ + int nextPagesize; /* Pagesize after VACUUM if >0 */ int nTable; /* Number of tables in the database */ CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ i64 lastRowid; /* ROWID of most recent insert (see above) */ - i64 priorNewRowid; /* Last randomly generated ROWID */ - int magic; /* Magic number for detect library misuse */ + u32 magic; /* Magic number for detect library misuse */ int nChange; /* Value returned by sqlite3_changes() */ int nTotalChange; /* Value returned by sqlite3_total_changes() */ + sqlite3_mutex *mutex; /* Connection mutex */ + int aLimit[SQLITE_N_LIMIT]; /* Limits */ struct sqlite3InitInfo { /* Information used during initialization */ int iDb; /* When back is being initialized */ int newTnum; /* Rootpage of table being initialized */ u8 busy; /* TRUE if currently initializing */ } init; int nExtension; /* Number of loaded extensions */ - void **aExtension; /* Array of shared libraray handles */ + void **aExtension; /* Array of shared library handles */ struct Vdbe *pVdbe; /* List of active virtual machines */ - int activeVdbeCnt; /* Number of vdbes currently executing */ + int activeVdbeCnt; /* Number of VDBEs currently executing */ + int writeVdbeCnt; /* Number of active VDBEs that are writing */ void (*xTrace)(void*,const char*); /* Trace function */ void *pTraceArg; /* Argument to the trace function */ void (*xProfile)(void*,const char*,u64); /* Profiling function */ @@ -481,9 +822,10 @@ char *zErrMsg; /* Most recent error message (UTF-8 encoded) */ char *zErrMsg16; /* Most recent error message (UTF-16 encoded) */ union { - int isInterrupted; /* True if sqlite3_interrupt has been called */ + volatile int isInterrupted; /* True if sqlite3_interrupt has been called */ double notUsed1; /* Spacer */ } u1; + Lookaside lookaside; /* Lookaside malloc configuration */ #ifndef SQLITE_OMIT_AUTHORIZATION int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); /* Access authorization function */ @@ -500,15 +842,33 @@ sqlite3_vtab **aVTrans; /* Virtual tables with open transactions */ int nVTrans; /* Allocated size of aVTrans */ #endif - Hash aFunc; /* All functions that can be in SQL exprs */ + FuncDefHash aFunc; /* Hash table of connection functions */ Hash aCollSeq; /* All collating sequences */ BusyHandler busyHandler; /* Busy callback */ int busyTimeout; /* Busy handler timeout, in msec */ Db aDbStatic[2]; /* Static space for the 2 default backends */ -#ifdef SQLITE_SSE - sqlite3_stmt *pFetch; /* Used by SSE to fetch stored statements */ + Savepoint *pSavepoint; /* List of active savepoints */ + int nSavepoint; /* Number of non-transaction savepoints */ + int nStatement; /* Number of nested statement-transactions */ + u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ + +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + /* The following variables are all protected by the STATIC_MASTER + ** mutex, not by sqlite3.mutex. They are used by code in notify.c. + ** + ** When X.pUnlockConnection==Y, that means that X is waiting for Y to + ** unlock so that it can proceed. + ** + ** When X.pBlockingConnection==Y, that means that something that X tried + ** tried to do recently failed with an SQLITE_LOCKED error due to locks + ** held by Y. + */ + sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */ + sqlite3 *pUnlockConnection; /* Connection to watch for unlock */ + void *pUnlockArg; /* Argument to xUnlockNotify */ + void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ + sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ #endif - u8 dfltLockMode; /* Default locking-mode for attached dbs */ }; /* @@ -545,6 +905,9 @@ #define SQLITE_LoadExtension 0x00020000 /* Enable load_extension */ #define SQLITE_RecoveryMode 0x00040000 /* Ignore schema errors */ +#define SQLITE_SharedCache 0x00080000 /* Cache sharing is enabled */ +#define SQLITE_CommitBusy 0x00200000 /* In the process of committing */ +#define SQLITE_ReverseOrder 0x00400000 /* Reverse unordered SELECTs */ /* ** Possible values for the sqlite.magic field. @@ -553,6 +916,7 @@ */ #define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ #define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ +#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ #define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ #define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ @@ -565,17 +929,84 @@ struct FuncDef { i16 nArg; /* Number of arguments. -1 means unlimited */ u8 iPrefEnc; /* Preferred text encoding (SQLITE_UTF8, 16LE, 16BE) */ - u8 needCollSeq; /* True if sqlite3GetFuncCollSeq() might be called */ u8 flags; /* Some combination of SQLITE_FUNC_* */ void *pUserData; /* User data parameter */ FuncDef *pNext; /* Next function with same name */ void (*xFunc)(sqlite3_context*,int,sqlite3_value**); /* Regular function */ void (*xStep)(sqlite3_context*,int,sqlite3_value**); /* Aggregate step */ - void (*xFinalize)(sqlite3_context*); /* Aggregate finializer */ - char zName[1]; /* SQL name of the function. MUST BE LAST */ + void (*xFinalize)(sqlite3_context*); /* Aggregate finalizer */ + char *zName; /* SQL name of the function. */ + FuncDef *pHash; /* Next with a different name but the same hash */ +}; + +/* +** Possible values for FuncDef.flags +*/ +#define SQLITE_FUNC_LIKE 0x01 /* Candidate for the LIKE optimization */ +#define SQLITE_FUNC_CASE 0x02 /* Case-sensitive LIKE-type function */ +#define SQLITE_FUNC_EPHEM 0x04 /* Ephemeral. Delete with VDBE */ +#define SQLITE_FUNC_NEEDCOLL 0x08 /* sqlite3GetFuncCollSeq() might be called */ +#define SQLITE_FUNC_PRIVATE 0x10 /* Allowed for internal use only */ +#define SQLITE_FUNC_COUNT 0x20 /* Built-in count(*) aggregate */ + +/* +** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are +** used to create the initializers for the FuncDef structures. +** +** FUNCTION(zName, nArg, iArg, bNC, xFunc) +** Used to create a scalar function definition of a function zName +** implemented by C function xFunc that accepts nArg arguments. The +** value passed as iArg is cast to a (void*) and made available +** as the user-data (sqlite3_user_data()) for the function. If +** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set. +** +** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal) +** Used to create an aggregate function definition implemented by +** the C functions xStep and xFinal. The first four parameters +** are interpreted in the same way as the first 4 parameters to +** FUNCTION(). +** +** LIKEFUNC(zName, nArg, pArg, flags) +** Used to create a scalar function definition of a function zName +** that accepts nArg arguments and is implemented by a call to C +** function likeFunc. Argument pArg is cast to a (void *) and made +** available as the function user-data (sqlite3_user_data()). The +** FuncDef.flags variable is set to the value passed as the flags +** parameter. +*/ +#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ + {nArg, SQLITE_UTF8, bNC*SQLITE_FUNC_NEEDCOLL, \ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0} +#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ + {nArg, SQLITE_UTF8, bNC*SQLITE_FUNC_NEEDCOLL, \ + pArg, 0, xFunc, 0, 0, #zName, 0} +#define LIKEFUNC(zName, nArg, arg, flags) \ + {nArg, SQLITE_UTF8, flags, (void *)arg, 0, likeFunc, 0, 0, #zName, 0} +#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ + {nArg, SQLITE_UTF8, nc*SQLITE_FUNC_NEEDCOLL, \ + SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0} + +/* +** All current savepoints are stored in a linked list starting at +** sqlite3.pSavepoint. The first element in the list is the most recently +** opened savepoint. Savepoints are added to the list by the vdbe +** OP_Savepoint instruction. +*/ +struct Savepoint { + char *zName; /* Savepoint name (nul-terminated) */ + Savepoint *pNext; /* Parent savepoint (if any) */ }; /* +** The following are used as the second parameter to sqlite3Savepoint(), +** and as the P1 argument to the OP_Savepoint instruction. +*/ +#define SAVEPOINT_BEGIN 0 +#define SAVEPOINT_RELEASE 1 +#define SAVEPOINT_ROLLBACK 2 + + +/* ** Each SQLite module (virtual table definition) is defined by an ** instance of the following structure, stored in the sqlite3.aModule ** hash table. @@ -588,19 +1019,13 @@ }; /* -** Possible values for FuncDef.flags -*/ -#define SQLITE_FUNC_LIKE 0x01 /* Candidate for the LIKE optimization */ -#define SQLITE_FUNC_CASE 0x02 /* Case-sensitive LIKE-type function */ -#define SQLITE_FUNC_EPHEM 0x04 /* Ephermeral. Delete with VDBE */ - -/* ** information about each column of an SQL table is held in an instance ** of this structure. */ struct Column { char *zName; /* Name of this column */ Expr *pDflt; /* Default value of this column */ + char *zDflt; /* Original text of the default value */ char *zType; /* Data type for this column */ char *zColl; /* Collating sequence. If NULL, use the default */ u8 notNull; /* True if there is a NOT NULL constraint */ @@ -616,7 +1041,7 @@ ** structure. Conceptually, a collating sequence consists of a name and ** a comparison routine that defines the order of that sequence. ** -** There may two seperate implementations of the collation function, one +** There may two separate implementations of the collation function, one ** that processes text in UTF-8 encoding (CollSeq.xCmp) and another that ** processes text encoded in UTF-16 (CollSeq.xCmp16), using the machine ** native byte order. When a collation sequence is invoked, SQLite selects @@ -642,7 +1067,7 @@ }; /* -** Allowed values of CollSeq flags: +** Allowed values of CollSeq.type: */ #define SQLITE_COLL_BINARY 1 /* The default memcmp() collating sequence */ #define SQLITE_COLL_NOCASE 2 /* The built-in NOCASE collating sequence */ @@ -660,11 +1085,11 @@ ** ** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and ** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve -** the speed a little by number the values consecutively. +** the speed a little by numbering the values consecutively. ** ** But rather than start with 0 or 1, we begin with 'a'. That way, ** when multiple affinity types are concatenated into a string and -** used as the P3 operand, they will be more readable. +** used as the P4 operand, they will be more readable. ** ** Note also that the numeric types are grouped together so that testing ** for a numeric type is a single comparison. @@ -678,6 +1103,19 @@ #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) /* +** The SQLITE_AFF_MASK values masks off the significant bits of an +** affinity value. +*/ +#define SQLITE_AFF_MASK 0x67 + +/* +** Additional bit values that can be ORed with an affinity without +** changing the affinity. +*/ +#define SQLITE_JUMPIFNULL 0x08 /* jumps if either operand is NULL */ +#define SQLITE_STOREP2 0x10 /* Store result in reg[P2] rather than jump */ + +/* ** Each SQL table is represented in memory by an instance of the ** following structure. ** @@ -693,14 +1131,14 @@ ** that the datatype of the PRIMARY KEY must be INTEGER for this field to ** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of ** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid -** is generated for each row of the table. Table.hasPrimKey is true if +** is generated for each row of the table. TF_HasPrimaryKey is set if ** the table has any PRIMARY KEY, INTEGER or otherwise. ** ** Table.tnum is the page number for the root BTree page of the table in the ** database file. If Table.iDb is the index of the database table backend ** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that -** holds temporary tables and indices. If Table.isEphem -** is true, then the table is stored in a file that is automatically deleted +** holds temporary tables and indices. If TF_Ephemeral is set +** then the table is stored in a file that is automatically deleted ** when the VDBE cursor to the table is closed. In this case Table.tnum ** refers VDBE cursor number that holds the table open, not to the root ** page number. Transient tables are used to hold the results of a @@ -708,46 +1146,55 @@ ** of a SELECT statement. */ struct Table { - char *zName; /* Name of the table */ - int nCol; /* Number of columns in this table */ - Column *aCol; /* Information about each column */ - int iPKey; /* If not less then 0, use aCol[iPKey] as the primary key */ - Index *pIndex; /* List of SQL indexes on this table. */ - int tnum; /* Root BTree node for this table (see note above) */ - Select *pSelect; /* NULL for tables. Points to definition if a view. */ - int nRef; /* Number of pointers to this Table */ - Trigger *pTrigger; /* List of SQL triggers on this table */ - FKey *pFKey; /* Linked list of all foreign keys in this table */ - char *zColAff; /* String defining the affinity of each column */ + sqlite3 *dbMem; /* DB connection used for lookaside allocations. */ + char *zName; /* Name of the table or view */ + int iPKey; /* If not negative, use aCol[iPKey] as the primary key */ + int nCol; /* Number of columns in this table */ + Column *aCol; /* Information about each column */ + Index *pIndex; /* List of SQL indexes on this table. */ + int tnum; /* Root BTree node for this table (see note above) */ + Select *pSelect; /* NULL for tables. Points to definition if a view. */ + u16 nRef; /* Number of pointers to this Table */ + u8 tabFlags; /* Mask of TF_* values */ + u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ + FKey *pFKey; /* Linked list of all foreign keys in this table */ + char *zColAff; /* String defining the affinity of each column */ #ifndef SQLITE_OMIT_CHECK - Expr *pCheck; /* The AND of all CHECK constraints */ + Expr *pCheck; /* The AND of all CHECK constraints */ #endif #ifndef SQLITE_OMIT_ALTERTABLE - int addColOffset; /* Offset in CREATE TABLE statement to add a new column */ + int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ #endif - u8 readOnly; /* True if this table should not be written by the user */ - u8 isEphem; /* True if created using OP_OpenEphermeral */ - u8 hasPrimKey; /* True if there exists a primary key */ - u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ - u8 autoInc; /* True if the integer primary key is autoincrement */ #ifndef SQLITE_OMIT_VIRTUALTABLE - u8 isVirtual; /* True if this is a virtual table */ - u8 isCommit; /* True once the CREATE TABLE has been committed */ - Module *pMod; /* Pointer to the implementation of the module */ - sqlite3_vtab *pVtab; /* Pointer to the module instance */ - int nModuleArg; /* Number of arguments to the module */ - char **azModuleArg; /* Text of all module args. [0] is module name */ + Module *pMod; /* Pointer to the implementation of the module */ + sqlite3_vtab *pVtab; /* Pointer to the module instance */ + int nModuleArg; /* Number of arguments to the module */ + char **azModuleArg; /* Text of all module args. [0] is module name */ #endif - Schema *pSchema; + Trigger *pTrigger; /* List of triggers stored in pSchema */ + Schema *pSchema; /* Schema that contains this table */ + Table *pNextZombie; /* Next on the Parse.pZombieTab list */ }; /* +** Allowed values for Tabe.tabFlags. +*/ +#define TF_Readonly 0x01 /* Read-only system table */ +#define TF_Ephemeral 0x02 /* An ephemeral table */ +#define TF_HasPrimaryKey 0x04 /* Table has a primary key */ +#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */ +#define TF_Virtual 0x10 /* Is a virtual table */ +#define TF_NeedMetadata 0x20 /* aCol[].zType and aCol[].pColl missing */ + + + +/* ** Test to see whether or not a table is a virtual table. This is ** done as a macro so that it will be optimized out when virtual ** table support is omitted from the build. */ #ifndef SQLITE_OMIT_VIRTUALTABLE -# define IsVirtual(X) ((X)->isVirtual) +# define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0) # define IsHiddenColumn(X) ((X)->isHidden) #else # define IsVirtual(X) 0 @@ -771,32 +1218,25 @@ ** ** Each REFERENCES clause generates an instance of the following structure ** which is attached to the from-table. The to-table need not exist when -** the from-table is created. The existance of the to-table is not checked -** until an attempt is made to insert data into the from-table. -** -** The sqlite.aFKey hash table stores pointers to this structure -** given the name of a to-table. For each to-table, all foreign keys -** associated with that table are on a linked list using the FKey.pNextTo -** field. +** the from-table is created. The existence of the to-table is not checked. */ struct FKey { - Table *pFrom; /* The table that constains the REFERENCES clause */ + Table *pFrom; /* The table that contains the REFERENCES clause */ FKey *pNextFrom; /* Next foreign key in pFrom */ char *zTo; /* Name of table that the key points to */ - FKey *pNextTo; /* Next foreign key that points to zTo */ int nCol; /* Number of columns in this key */ - struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ - int iFrom; /* Index of column in pFrom */ - char *zCol; /* Name of column in zTo. If 0 use PRIMARY KEY */ - } *aCol; /* One entry for each of nCol column s */ u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ u8 updateConf; /* How to resolve conflicts that occur on UPDATE */ u8 deleteConf; /* How to resolve conflicts that occur on DELETE */ u8 insertConf; /* How to resolve conflicts that occur on INSERT */ + struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ + int iFrom; /* Index of column in pFrom */ + char *zCol; /* Name of column in zTo. If 0 use PRIMARY KEY */ + } aCol[1]; /* One entry for each of nCol column s */ }; /* -** SQLite supports many different ways to resolve a contraint +** SQLite supports many different ways to resolve a constraint ** error. ROLLBACK processing means that a constraint violation ** causes the operation in process to fail and for the current transaction ** to be rolled back. ABORT processing means the operation in process @@ -839,20 +1279,48 @@ ** An instance of the following structure is passed as the first ** argument to sqlite3VdbeKeyCompare and is used to control the ** comparison of the two index keys. -** -** If the KeyInfo.incrKey value is true and the comparison would -** otherwise be equal, then return a result as if the second key -** were larger. */ struct KeyInfo { + sqlite3 *db; /* The database connection */ u8 enc; /* Text encoding - one of the TEXT_Utf* values */ - u8 incrKey; /* Increase 2nd key by epsilon before comparison */ - int nField; /* Number of entries in aColl[] */ + u16 nField; /* Number of entries in aColl[] */ u8 *aSortOrder; /* If defined an aSortOrder[i] is true, sort DESC */ CollSeq *aColl[1]; /* Collating sequence for each term of the key */ }; /* +** An instance of the following structure holds information about a +** single index record that has already been parsed out into individual +** values. +** +** A record is an object that contains one or more fields of data. +** Records are used to store the content of a table row and to store +** the key of an index. A blob encoding of a record is created by +** the OP_MakeRecord opcode of the VDBE and is disassembled by the +** OP_Column opcode. +** +** This structure holds a record that has already been disassembled +** into its constituent fields. +*/ +struct UnpackedRecord { + KeyInfo *pKeyInfo; /* Collation and sort-order information */ + u16 nField; /* Number of entries in apMem[] */ + u16 flags; /* Boolean settings. UNPACKED_... below */ + i64 rowid; /* Used by UNPACKED_PREFIX_SEARCH */ + Mem *aMem; /* Values */ +}; + +/* +** Allowed values of UnpackedRecord.flags +*/ +#define UNPACKED_NEED_FREE 0x0001 /* Memory is from sqlite3Malloc() */ +#define UNPACKED_NEED_DESTROY 0x0002 /* apMem[]s should all be destroyed */ +#define UNPACKED_IGNORE_ROWID 0x0004 /* Ignore trailing rowid on key1 */ +#define UNPACKED_INCRKEY 0x0008 /* Make this key an epsilon larger */ +#define UNPACKED_PREFIX_MATCH 0x0010 /* A prefix match is considered OK */ +#define UNPACKED_PREFIX_SEARCH 0x0020 /* A prefix match is considered OK */ + +/* ** Each SQL index is represented in memory by an ** instance of the following structure. ** @@ -899,13 +1367,12 @@ ** this structure. Tokens are also used as part of an expression. ** ** Note if Token.z==0 then Token.dyn and Token.n are undefined and -** may contain random values. Do not make any assuptions about Token.dyn +** may contain random values. Do not make any assumptions about Token.dyn ** and Token.n when Token.z==0. */ struct Token { - const unsigned char *z; /* Text of the token. Not NULL-terminated! */ - unsigned dyn : 1; /* True for malloced memory, false for static */ - unsigned n : 31; /* Number of characters in this token */ + const char *z; /* Text of the token. Not NULL-terminated! */ + unsigned int n; /* Number of characters in this token */ }; /* @@ -946,7 +1413,7 @@ Expr *pExpr; /* Expression encoding the function */ FuncDef *pFunc; /* The aggregate function implementation */ int iMem; /* Memory location that acts as accumulator */ - int iDistinct; /* Ephermeral table used to enforce DISTINCT */ + int iDistinct; /* Ephemeral table used to enforce DISTINCT */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ int nFuncAlloc; /* Number of slots allocated for aFunc[] */ @@ -956,19 +1423,27 @@ ** Each node of an expression in the parse tree is an instance ** of this structure. ** -** Expr.op is the opcode. The integer parser token codes are reused -** as opcodes here. For example, the parser defines TK_GE to be an integer -** code representing the ">=" operator. This same integer code is reused +** Expr.op is the opcode. The integer parser token codes are reused +** as opcodes here. For example, the parser defines TK_GE to be an integer +** code representing the ">=" operator. This same integer code is reused ** to represent the greater-than-or-equal-to operator in the expression ** tree. ** -** Expr.pRight and Expr.pLeft are subexpressions. Expr.pList is a list -** of argument if the expression is a function. -** -** Expr.token is the operator token for this node. For some expressions -** that have subexpressions, Expr.token can be the complete text that gave -** rise to the Expr. In the latter case, the token is marked as being -** a compound token. +** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, +** or TK_STRING), then Expr.token contains the text of the SQL literal. If +** the expression is a variable (TK_VARIABLE), then Expr.token contains the +** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), +** then Expr.token contains the name of the function. +** +** Expr.pRight and Expr.pLeft are the left and right subexpressions of a +** binary operator. Either or both may be NULL. +** +** Expr.x.pList is a list of arguments if the expression is an SQL function, +** a CASE expression or an IN expression of the form " IN (, ...)". +** Expr.x.pSelect is used if the expression is a sub-select or an expression of +** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the +** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is +** valid. ** ** An expression of the form ID or ID.ID refers to a column in a table. ** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is @@ -978,10 +1453,9 @@ ** value is also stored in the Expr.iAgg column in the aggregate so that ** it can be accessed after all aggregates are computed. ** -** If the expression is a function, the Expr.iTable is an integer code -** representing which function. If the expression is an unbound variable -** marker (a question mark character '?' in the original SQL) then the -** Expr.iTable holds the index number for that variable. +** If the expression is an unbound variable marker (a question mark +** character '?' in the original SQL) then the Expr.iTable holds the index +** number for that variable. ** ** If the expression is a subquery then Expr.iColumn holds an integer ** register number containing the result of the subquery. If the @@ -989,34 +1463,60 @@ ** gives a different answer at different times during statement processing ** then iTable is the address of a subroutine that computes the subquery. ** -** The Expr.pSelect field points to a SELECT statement. The SELECT might -** be the right operand of an IN operator. Or, if a scalar SELECT appears -** in an expression the opcode is TK_SELECT and Expr.pSelect is the only -** operand. -** ** If the Expr is of type OP_Column, and the table it is selecting from ** is a disk table or the "old.*" pseudo-table, then pTab points to the ** corresponding table definition. +** +** ALLOCATION NOTES: +** +** Expr objects can use a lot of memory space in database schema. To +** help reduce memory requirements, sometimes an Expr object will be +** truncated. And to reduce the number of memory allocations, sometimes +** two or more Expr objects will be stored in a single memory allocation, +** together with Expr.zToken strings. +** +** If the EP_Reduced and EP_TokenOnly flags are set when +** an Expr object is truncated. When EP_Reduced is set, then all +** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees +** are contained within the same memory allocation. Note, however, that +** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately +** allocated, regardless of whether or not EP_Reduced is set. */ struct Expr { u8 op; /* Operation performed by this node */ char affinity; /* The affinity of the column or 0 if not a column */ - u16 flags; /* Various flags. See below */ + u16 flags; /* Various flags. EP_* See below */ + union { + char *zToken; /* Token value. Zero terminated and dequoted */ + int iValue; /* Integer value if EP_IntValue */ + } u; + + /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no + ** space is allocated for the fields below this point. An attempt to + ** access them will result in a segfault or malfunction. + *********************************************************************/ + + Expr *pLeft; /* Left subnode */ + Expr *pRight; /* Right subnode */ + union { + ExprList *pList; /* Function arguments or in " IN ( IN ()" */ - Table *pTab; /* Table for OP_Column expressions. */ - Schema *pSchema; + Table *pTab; /* Table for TK_COLUMN expressions. */ #if SQLITE_MAX_EXPR_DEPTH>0 int nHeight; /* Height of the tree headed by this node */ #endif @@ -1025,15 +1525,41 @@ /* ** The following are the meanings of bits in the Expr.flags field. */ -#define EP_FromJoin 0x01 /* Originated in ON or USING clause of a join */ -#define EP_Agg 0x02 /* Contains one or more aggregate functions */ -#define EP_Resolved 0x04 /* IDs have been resolved to COLUMNs */ -#define EP_Error 0x08 /* Expression contains one or more errors */ -#define EP_Distinct 0x10 /* Aggregate function with DISTINCT keyword */ -#define EP_VarSelect 0x20 /* pSelect is correlated, not constant */ -#define EP_Dequoted 0x40 /* True if the string has been dequoted */ -#define EP_InfixFunc 0x80 /* True for an infix function: LIKE, GLOB, etc */ -#define EP_ExpCollate 0x100 /* Collating sequence specified explicitly */ +#define EP_FromJoin 0x0001 /* Originated in ON or USING clause of a join */ +#define EP_Agg 0x0002 /* Contains one or more aggregate functions */ +#define EP_Resolved 0x0004 /* IDs have been resolved to COLUMNs */ +#define EP_Error 0x0008 /* Expression contains one or more errors */ +#define EP_Distinct 0x0010 /* Aggregate function with DISTINCT keyword */ +#define EP_VarSelect 0x0020 /* pSelect is correlated, not constant */ +#define EP_DblQuoted 0x0040 /* token.z was originally in "..." */ +#define EP_InfixFunc 0x0080 /* True for an infix function: LIKE, GLOB, etc */ +#define EP_ExpCollate 0x0100 /* Collating sequence specified explicitly */ +#define EP_AnyAff 0x0200 /* Can take a cached column of any affinity */ +#define EP_FixedDest 0x0400 /* Result needed in a specific register */ +#define EP_IntValue 0x0800 /* Integer value contained in u.iValue */ +#define EP_xIsSelect 0x1000 /* x.pSelect is valid (otherwise x.pList is) */ + +#define EP_Reduced 0x2000 /* Expr struct is EXPR_REDUCEDSIZE bytes only */ +#define EP_TokenOnly 0x4000 /* Expr struct is EXPR_TOKENONLYSIZE bytes only */ +#define EP_Static 0x8000 /* Held in memory not obtained from malloc() */ + +/* +** The following are the meanings of bits in the Expr.flags2 field. +*/ +#define EP2_MallocedToken 0x0001 /* Need to sqlite3DbFree() Expr.zToken */ +#define EP2_Irreducible 0x0002 /* Cannot EXPRDUP_REDUCE this Expr */ + +/* +** The pseudo-routine sqlite3ExprSetIrreducible sets the EP2_Irreducible +** flag on an expression structure. This flag is used for VV&A only. The +** routine is implemented as a macro that only works when in debugging mode, +** so as not to burden production code. +*/ +#ifdef SQLITE_DEBUG +# define ExprSetIrreducible(X) (X)->flags2 |= EP2_Irreducible +#else +# define ExprSetIrreducible(X) +#endif /* ** These macros can be used to test, set, or clear bits in the @@ -1045,6 +1571,21 @@ #define ExprClearProperty(E,P) (E)->flags&=~(P) /* +** Macros to determine the number of bytes required by a normal Expr +** struct, an Expr struct with the EP_Reduced flag set in Expr.flags +** and an Expr struct with the EP_TokenOnly flag set. +*/ +#define EXPR_FULLSIZE sizeof(Expr) /* Full size */ +#define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */ +#define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */ + +/* +** Flags passed to the sqlite3ExprDup() function. See the header comment +** above sqlite3ExprDup() for details. +*/ +#define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */ + +/* ** A list of expressions. Each expression may optionally have a ** name. An expr/name combination can be used in several ways, such ** as the list of "expr AS ID" fields following a "SELECT" or in the @@ -1059,13 +1600,26 @@ struct ExprList_item { Expr *pExpr; /* The list of expressions */ char *zName; /* Token associated with this expression */ + char *zSpan; /* Original text of the expression */ u8 sortOrder; /* 1 for DESC or 0 for ASC */ - u8 isAgg; /* True if this is an aggregate like count(*) */ u8 done; /* A flag to indicate when processing is finished */ + u16 iCol; /* For ORDER BY, column number in result set */ + u16 iAlias; /* Index into Parse.aAlias[] for zName */ } *a; /* One entry for each expression */ }; /* +** An instance of this structure is used by the parser to record both +** the parse tree for an expression and the span of input text for an +** expression. +*/ +struct ExprSpan { + Expr *pExpr; /* The expression parse tree */ + const char *zStart; /* First character of input text */ + const char *zEnd; /* One character past the end of input text */ +}; + +/* ** An instance of this structure can hold a simple list of identifiers, ** such as the list "a,b,c" in the following statements: ** @@ -1099,6 +1653,11 @@ typedef u64 Bitmask; /* +** The number of bits in a Bitmask. "BMS" means "BitMask Size". +*/ +#define BMS ((int)(sizeof(Bitmask)*8)) + +/* ** The following structure describes the FROM clause of a SELECT statement. ** Each table or subquery in the FROM clause is a separate element of ** the SrcList.a[] array. @@ -1125,10 +1684,13 @@ Select *pSelect; /* A SELECT statement used in place of a table name */ u8 isPopulated; /* Temporary table associated with SELECT is populated */ u8 jointype; /* Type of join between this able and the previous */ + u8 notIndexed; /* True if there is a NOT INDEXED clause */ int iCursor; /* The VDBE cursor number used to access this table */ Expr *pOn; /* The ON clause of a join */ IdList *pUsing; /* The USING clause of a join */ - Bitmask colUsed; /* Bit N (1<" clause */ + Index *pIndex; /* Index structure corresponding to zIndex, if any */ } a[1]; /* One entry for each identifier on the list */ }; @@ -1143,54 +1705,89 @@ #define JT_OUTER 0x0020 /* The "OUTER" keyword is present */ #define JT_ERROR 0x0040 /* unknown or unsupported join type */ + +/* +** A WherePlan object holds information that describes a lookup +** strategy. +** +** This object is intended to be opaque outside of the where.c module. +** It is included here only so that that compiler will know how big it +** is. None of the fields in this object should be used outside of +** the where.c module. +** +** Within the union, pIdx is only used when wsFlags&WHERE_INDEXED is true. +** pTerm is only used when wsFlags&WHERE_MULTI_OR is true. And pVtabIdx +** is only used when wsFlags&WHERE_VIRTUALTABLE is true. It is never the +** case that more than one of these conditions is true. +*/ +struct WherePlan { + u32 wsFlags; /* WHERE_* flags that describe the strategy */ + u32 nEq; /* Number of == constraints */ + union { + Index *pIdx; /* Index when WHERE_INDEXED is true */ + struct WhereTerm *pTerm; /* WHERE clause term for OR-search */ + sqlite3_index_info *pVtabIdx; /* Virtual table index to use */ + } u; +}; + /* ** For each nested loop in a WHERE clause implementation, the WhereInfo ** structure contains a single instance of this structure. This structure ** is intended to be private the the where.c module and should not be ** access or modified by other modules. ** -** The pIdxInfo and pBestIdx fields are used to help pick the best -** index on a virtual table. The pIdxInfo pointer contains indexing +** The pIdxInfo field is used to help pick the best index on a +** virtual table. The pIdxInfo pointer contains indexing ** information for the i-th table in the FROM clause before reordering. ** All the pIdxInfo pointers are freed by whereInfoFree() in where.c. -** The pBestIdx pointer is a copy of pIdxInfo for the i-th table after -** FROM clause ordering. This is a little confusing so I will repeat -** it in different words. WhereInfo.a[i].pIdxInfo is index information -** for WhereInfo.pTabList.a[i]. WhereInfo.a[i].pBestInfo is the -** index information for the i-th loop of the join. pBestInfo is always -** either NULL or a copy of some pIdxInfo. So for cleanup it is -** sufficient to free all of the pIdxInfo pointers. -** +** All other information in the i-th WhereLevel object for the i-th table +** after FROM clause ordering. */ struct WhereLevel { - int iFrom; /* Which entry in the FROM clause */ - int flags; /* Flags associated with this level */ - int iMem; /* First memory cell used by this level */ + WherePlan plan; /* query plan for this element of the FROM clause */ int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */ - Index *pIdx; /* Index used. NULL if no index */ int iTabCur; /* The VDBE cursor used to access the table */ - int iIdxCur; /* The VDBE cursor used to acesss pIdx */ - int brk; /* Jump here to break out of the loop */ - int nxt; /* Jump here to start the next IN combination */ - int cont; /* Jump here to continue with the next loop cycle */ - int top; /* First instruction of interior of the loop */ - int op, p1, p2; /* Opcode used to terminate the loop */ - int nEq; /* Number of == or IN constraints on this loop */ - int nIn; /* Number of IN operators constraining this loop */ - struct InLoop { - int iCur; /* The VDBE cursor used by this IN operator */ - int topAddr; /* Top of the IN loop */ - } *aInLoop; /* Information about each nested IN operator */ - sqlite3_index_info *pBestIdx; /* Index information for this level */ + int iIdxCur; /* The VDBE cursor used to access pIdx */ + int addrBrk; /* Jump here to break out of the loop */ + int addrNxt; /* Jump here to start the next IN combination */ + int addrCont; /* Jump here to continue with the next loop cycle */ + int addrFirst; /* First instruction of interior of the loop */ + u8 iFrom; /* Which entry in the FROM clause */ + u8 op, p5; /* Opcode and P5 of the opcode that ends the loop */ + int p1, p2; /* Operands of the opcode used to ends the loop */ + union { /* Information that depends on plan.wsFlags */ + struct { + int nIn; /* Number of entries in aInLoop[] */ + struct InLoop { + int iCur; /* The VDBE cursor used by this IN operator */ + int addrInTop; /* Top of the IN loop */ + } *aInLoop; /* Information about each nested IN operator */ + } in; /* Used when plan.wsFlags&WHERE_IN_ABLE */ + } u; /* The following field is really not part of the current level. But - ** we need a place to cache index information for each table in the - ** FROM clause and the WhereLevel structure is a convenient place. + ** we need a place to cache virtual table index information for each + ** virtual table in the FROM clause and the WhereLevel structure is + ** a convenient place since there is one WhereLevel for each FROM clause + ** element. */ sqlite3_index_info *pIdxInfo; /* Index info for n-th source table */ }; /* +** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin() +** and the WhereInfo.wctrlFlags member. +*/ +#define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */ +#define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */ +#define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ +#define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ +#define WHERE_DUPLICATES_OK 0x0008 /* Ok to return a row more than once */ +#define WHERE_OMIT_OPEN 0x0010 /* Table cursor are already open */ +#define WHERE_OMIT_CLOSE 0x0020 /* Omit close of table & index cursors */ +#define WHERE_FORCE_TABLE 0x0040 /* Do not use an index-only search */ + +/* ** The WHERE clause processing routine has two halves. The ** first part does the start of the WHERE loop and the second ** half does the tail of the WHERE loop. An instance of @@ -1198,14 +1795,16 @@ ** into the second half to give some continuity. */ struct WhereInfo { - Parse *pParse; - SrcList *pTabList; /* List of tables in the join */ - int iTop; /* The very beginning of the WHERE loop */ - int iContinue; /* Jump here to continue with next record */ - int iBreak; /* Jump here to break out of the loop */ - int nLevel; /* Number of nested loop */ - sqlite3_index_info **apInfo; /* Array of pointers to index info structures */ - WhereLevel a[1]; /* Information about each nest loop in the WHERE */ + Parse *pParse; /* Parsing and code generating context */ + u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */ + u8 okOnePass; /* Ok to use one-pass algorithm for UPDATE or DELETE */ + SrcList *pTabList; /* List of tables in the join */ + int iTop; /* The very beginning of the WHERE loop */ + int iContinue; /* Jump here to continue with next record */ + int iBreak; /* Jump here to break out of the loop */ + int nLevel; /* Number of nested loop */ + struct WhereClause *pWC; /* Decomposition of the WHERE clause */ + WhereLevel a[1]; /* Information about each nest loop in WHERE */ }; /* @@ -1255,7 +1854,7 @@ ** ** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes. ** These addresses must be stored so that we can go back and fill in -** the P3_KEYINFO and P2 parameters later. Neither the KeyInfo nor +** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor ** the number of columns in P2 can be computed at the same time ** as the OP_OpenEphm instruction is coded because not ** enough information about the compound query is known at that point. @@ -1266,18 +1865,15 @@ struct Select { ExprList *pEList; /* The fields of the result */ u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */ - u8 isDistinct; /* True if the DISTINCT keyword is present */ - u8 isResolved; /* True once sqlite3SelectResolve() has run. */ - u8 isAgg; /* True if this is an aggregate query */ - u8 usesEphm; /* True if uses an OpenEphemeral opcode */ - u8 disallowOrderBy; /* Do not allow an ORDER BY to be attached if TRUE */ char affinity; /* MakeRecord with this affinity for SRT_Set */ + u16 selFlags; /* Various SF_* values */ SrcList *pSrc; /* The FROM clause */ Expr *pWhere; /* The WHERE clause */ ExprList *pGroupBy; /* The GROUP BY clause */ Expr *pHaving; /* The HAVING clause */ ExprList *pOrderBy; /* The ORDER BY clause */ Select *pPrior; /* Prior select in a compound select statement */ + Select *pNext; /* Next select to the left in a compound */ Select *pRightmost; /* Right-most select in a compound select statement */ Expr *pLimit; /* LIMIT expression. NULL means not used. */ Expr *pOffset; /* OFFSET expression. NULL means not used. */ @@ -1286,22 +1882,71 @@ }; /* -** The results of a select can be distributed in several ways. +** Allowed values for Select.selFlags. The "SF" prefix stands for +** "Select Flag". +*/ +#define SF_Distinct 0x0001 /* Output should be DISTINCT */ +#define SF_Resolved 0x0002 /* Identifiers have been resolved */ +#define SF_Aggregate 0x0004 /* Contains aggregate functions */ +#define SF_UsesEphemeral 0x0008 /* Uses the OpenEphemeral opcode */ +#define SF_Expanded 0x0010 /* sqlite3SelectExpand() called on this */ +#define SF_HasTypeInfo 0x0020 /* FROM subqueries have Table metadata */ + + +/* +** The results of a select can be distributed in several ways. The +** "SRT" prefix means "SELECT Result Type". */ #define SRT_Union 1 /* Store result as keys in an index */ #define SRT_Except 2 /* Remove result from a UNION index */ -#define SRT_Discard 3 /* Do not save the results anywhere */ +#define SRT_Exists 3 /* Store 1 if the result is not empty */ +#define SRT_Discard 4 /* Do not save the results anywhere */ /* The ORDER BY clause is ignored for all of the above */ -#define IgnorableOrderby(X) (X<=SRT_Discard) +#define IgnorableOrderby(X) ((X->eDest)<=SRT_Discard) -#define SRT_Callback 4 /* Invoke a callback with each row of result */ -#define SRT_Mem 5 /* Store result in a memory cell */ -#define SRT_Set 6 /* Store non-null results as keys in an index */ -#define SRT_Table 7 /* Store result as data with an automatic rowid */ -#define SRT_EphemTab 8 /* Create transient tab and store like SRT_Table */ -#define SRT_Subroutine 9 /* Call a subroutine to handle results */ -#define SRT_Exists 10 /* Store 1 if the result is not empty */ +#define SRT_Output 5 /* Output each row of result */ +#define SRT_Mem 6 /* Store result in a memory cell */ +#define SRT_Set 7 /* Store results as keys in an index */ +#define SRT_Table 8 /* Store result as data with an automatic rowid */ +#define SRT_EphemTab 9 /* Create transient tab and store like SRT_Table */ +#define SRT_Coroutine 10 /* Generate a single row of result */ + +/* +** A structure used to customize the behavior of sqlite3Select(). See +** comments above sqlite3Select() for details. +*/ +typedef struct SelectDest SelectDest; +struct SelectDest { + u8 eDest; /* How to dispose of the results */ + u8 affinity; /* Affinity used when eDest==SRT_Set */ + int iParm; /* A parameter used by the eDest disposal method */ + int iMem; /* Base register where results are written */ + int nMem; /* Number of registers allocated */ +}; + +/* +** During code generation of statements that do inserts into AUTOINCREMENT +** tables, the following information is attached to the Table.u.autoInc.p +** pointer of each autoincrement table to record some side information that +** the code generator needs. We have to keep per-table autoincrement +** information in case inserts are down within triggers. Triggers do not +** normally coordinate their activities, but we do need to coordinate the +** loading and saving of autoincrement information. +*/ +struct AutoincInfo { + AutoincInfo *pNext; /* Next info block in a list of them all */ + Table *pTab; /* Table this info block refers to */ + int iDb; /* Index in sqlite3.aDb[] of database holding pTab */ + int regCtr; /* Memory register holding the rowid counter */ +}; + +/* +** Size of the column cache +*/ +#ifndef SQLITE_N_COLCACHE +# define SQLITE_N_COLCACHE 10 +#endif /* ** An SQL parser context. A copy of this structure is passed through @@ -1329,11 +1974,29 @@ u8 checkSchema; /* Causes schema cookie check after an error */ u8 nested; /* Number of nested calls to the parser/code generator */ u8 parseError; /* True after a parsing error. Ticket #1794 */ + u8 nTempReg; /* Number of temporary registers in aTempReg[] */ + u8 nTempInUse; /* Number of aTempReg[] currently checked out */ + int aTempReg[8]; /* Holding area for temporary registers */ + int nRangeReg; /* Size of the temporary register block */ + int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */ int nTab; /* Number of previously allocated VDBE cursors */ int nMem; /* Number of memory cells used so far */ int nSet; /* Number of sets used so far */ - int ckOffset; /* Stack offset to data used by CHECK constraints */ + int ckBase; /* Base register of data during check constraints */ + int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */ + int iCacheCnt; /* Counter used to generate aColCache[].lru values */ + u8 nColCache; /* Number of entries in the column cache */ + u8 iColCache; /* Next entry of the cache to replace */ + struct yColCache { + int iTable; /* Table cursor number */ + int iColumn; /* Table column number */ + u8 affChange; /* True if this register has had an affinity change */ + u8 tempReg; /* iReg is a temp register that needs to be freed */ + int iLevel; /* Nesting level */ + int iReg; /* Reg with value of this column. 0 means none. */ + int lru; /* Least recently used entry has the smallest value */ + } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */ u32 writeMask; /* Start a write transaction on these databases */ u32 cookieMask; /* Bitmask of schema verified databases */ int cookieGoto; /* Address of OP_Goto to cookie verifier subroutine */ @@ -1342,6 +2005,9 @@ int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */ #endif + int regRowid; /* Register holding rowid of CREATE TABLE entry */ + int regRoot; /* Register holding root page number for new objects */ + AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ /* Above is constant between recursions. Below is reset before and after ** each recursion */ @@ -1350,6 +2016,9 @@ int nVarExpr; /* Number of used slots in apVarExpr[] */ int nVarExprAlloc; /* Number of allocated slots in apVarExpr[] */ Expr **apVarExpr; /* Pointers to :aaa and $aaaa wildcard expressions */ + int nAlias; /* Number of aliased result set columns */ + int nAliasAlloc; /* Number of allocated slots for aAlias[] */ + int *aAlias; /* Register used to hold aliased result */ u8 explain; /* True if the EXPLAIN flag is found on the query */ Token sErrToken; /* The token at which the error occurred */ Token sNameToken; /* Token with unqualified schema object name */ @@ -1363,11 +2032,11 @@ #ifndef SQLITE_OMIT_VIRTUALTABLE Token sArg; /* Complete text of a module argument */ u8 declareVtab; /* True if inside sqlite3_declare_vtab() */ - Table *pVirtualLock; /* Require virtual table lock on this table */ + int nVtabLock; /* Number of virtual tables to lock */ + Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif -#if SQLITE_MAX_EXPR_DEPTH>0 int nHeight; /* Expression tree height of current sub-select */ -#endif + Table *pZombieTab; /* List of Table objects to delete after code gen */ }; #ifdef SQLITE_OMIT_VIRTUALTABLE @@ -1386,12 +2055,13 @@ }; /* -** Bitfield flags for P2 value in OP_Insert and OP_Delete +** Bitfield flags for P5 value in OP_Insert and OP_Delete */ -#define OPFLAG_NCHANGE 1 /* Set to update db->nChange */ -#define OPFLAG_LASTROWID 2 /* Set to update db->lastRowid */ -#define OPFLAG_ISUPDATE 4 /* This OP_Insert is an sql UPDATE */ -#define OPFLAG_APPEND 8 /* This is likely to be an append */ +#define OPFLAG_NCHANGE 1 /* Set to update db->nChange */ +#define OPFLAG_LASTROWID 2 /* Set to update db->lastRowid */ +#define OPFLAG_ISUPDATE 4 /* This OP_Insert is an sql UPDATE */ +#define OPFLAG_APPEND 8 /* This is likely to be an append */ +#define OPFLAG_USESEEKRESULT 16 /* Try to avoid a seek in BtreeInsert() */ /* * Each trigger present in the database schema is stored as an instance of @@ -1413,10 +2083,9 @@ char *table; /* The table or view to which the trigger applies */ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ - Expr *pWhen; /* The WHEN clause of the expresion (may be NULL) */ + Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ IdList *pColumns; /* If this is an UPDATE OF trigger, the is stored here */ - Token nameToken; /* Token containing zName. Use during parsing only */ Schema *pSchema; /* Schema containing the trigger */ Schema *pTabSchema; /* Schema containing the table */ TriggerStep *step_list; /* Link list of trigger program steps */ @@ -1450,7 +2119,7 @@ * orconf -> stores the ON CONFLICT algorithm * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then * this stores a pointer to the SELECT statement. Otherwise NULL. - * target -> A token holding the name of the table to insert into. + * target -> A token holding the quoted name of the table to insert into. * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then * this stores values to be inserted. Otherwise NULL. * pIdList -> If this is an INSERT INTO ... () VALUES ... @@ -1458,12 +2127,12 @@ * inserted into. * * (op == TK_DELETE) - * target -> A token holding the name of the table to delete from. + * target -> A token holding the quoted name of the table to delete from. * pWhere -> The WHERE clause of the DELETE statement if one is specified. * Otherwise NULL. * * (op == TK_UPDATE) - * target -> A token holding the name of the table to update rows of. + * target -> A token holding the quoted name of the table to update rows of. * pWhere -> The WHERE clause of the UPDATE statement if one is specified. * Otherwise NULL. * pExprList -> A list of the columns to update and the expressions to update @@ -1478,7 +2147,7 @@ Select *pSelect; /* Valid for SELECT and sometimes INSERT steps (when pExprList == 0) */ - Token target; /* Valid for DELETE, UPDATE, INSERT steps */ + Token target; /* Target table for DELETE, UPDATE, INSERT. Quoted */ Expr *pWhere; /* Valid for DELETE, UPDATE steps */ ExprList *pExprList; /* Valid for UPDATE statements and sometimes INSERT steps (when pSelect == 0) */ @@ -1518,6 +2187,8 @@ Table *pTab; /* Table that triggers are currently being coded on */ int newIdx; /* Index of vdbe cursor to "new" temp table */ int oldIdx; /* Index of vdbe cursor to "old" temp table */ + u32 newColMask; + u32 oldColMask; int orconf; /* Current orconf policy */ int ignoreJump; /* where to jump to for a RAISE(IGNORE) */ Trigger *pTrigger; /* The trigger currently being coded */ @@ -1538,6 +2209,22 @@ }; /* +** An objected used to accumulate the text of a string where we +** do not necessarily know how big the string will be in the end. +*/ +struct StrAccum { + sqlite3 *db; /* Optional database for lookaside. Can be NULL */ + char *zBase; /* A base allocation. Not from malloc. */ + char *zText; /* The string collected so far */ + int nChar; /* Length of the string so far */ + int nAlloc; /* Amount of space allocated in zText */ + int mxAlloc; /* Maximum allowed string length */ + u8 mallocFailed; /* Becomes true if any memory allocation fails */ + u8 useMalloc; /* True if zText is enlargeable using realloc */ + u8 tooBig; /* Becomes true if string size exceeds limits */ +}; + +/* ** A pointer to this structure is used to communicate information ** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback. */ @@ -1549,6 +2236,69 @@ } InitData; /* +** Structure containing global configuration data for the SQLite library. +** +** This structure also contains some state information. +*/ +struct Sqlite3Config { + int bMemstat; /* True to enable memory status */ + int bCoreMutex; /* True to enable core mutexing */ + int bFullMutex; /* True to enable full mutexing */ + int mxStrlen; /* Maximum string length */ + int szLookaside; /* Default lookaside buffer size */ + int nLookaside; /* Default lookaside buffer count */ + sqlite3_mem_methods m; /* Low-level memory allocation interface */ + sqlite3_mutex_methods mutex; /* Low-level mutex interface */ + sqlite3_pcache_methods pcache; /* Low-level page-cache interface */ + void *pHeap; /* Heap storage space */ + int nHeap; /* Size of pHeap[] */ + int mnReq, mxReq; /* Min and max heap requests sizes */ + void *pScratch; /* Scratch memory */ + int szScratch; /* Size of each scratch buffer */ + int nScratch; /* Number of scratch buffers */ + void *pPage; /* Page cache memory */ + int szPage; /* Size of each page in pPage[] */ + int nPage; /* Number of pages in pPage[] */ + int mxParserStack; /* maximum depth of the parser stack */ + int sharedCacheEnabled; /* true if shared-cache mode enabled */ + /* The above might be initialized to non-zero. The following need to always + ** initially be zero, however. */ + int isInit; /* True after initialization has finished */ + int inProgress; /* True while initialization in progress */ + int isMallocInit; /* True after malloc is initialized */ + sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */ + int nRefInitMutex; /* Number of users of pInitMutex */ +}; + +/* +** Context pointer passed down through the tree-walk. +*/ +struct Walker { + int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */ + int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */ + Parse *pParse; /* Parser context. */ + union { /* Extra data for callback */ + NameContext *pNC; /* Naming context */ + int i; /* Integer value */ + } u; +}; + +/* Forward declarations */ +int sqlite3WalkExpr(Walker*, Expr*); +int sqlite3WalkExprList(Walker*, ExprList*); +int sqlite3WalkSelect(Walker*, Select*); +int sqlite3WalkSelectExpr(Walker*, Select*); +int sqlite3WalkSelectFrom(Walker*, Select*); + +/* +** Return code from the parse-tree walking primitives and their +** callbacks. +*/ +#define WRC_Continue 0 /* Continue down into children */ +#define WRC_Prune 1 /* Omit children but continue walking siblings */ +#define WRC_Abort 2 /* Abandon the tree walk */ + +/* ** Assuming zIn points to the first byte of a UTF-8 character, ** advance zIn to point to the first byte of the next UTF-8 character. */ @@ -1572,56 +2322,147 @@ #endif /* +** The ctype.h header is needed for non-ASCII systems. It is also +** needed by FTS3 when FTS3 is included in the amalgamation. +*/ +#if !defined(SQLITE_ASCII) || \ + (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION)) +# include +#endif + +/* +** The following macros mimic the standard library functions toupper(), +** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The +** sqlite versions only work for ASCII characters, regardless of locale. +*/ +#ifdef SQLITE_ASCII +# define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20)) +# define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01) +# define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06) +# define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02) +# define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04) +# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) +# define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) +#else +# define sqlite3Toupper(x) toupper((unsigned char)(x)) +# define sqlite3Isspace(x) isspace((unsigned char)(x)) +# define sqlite3Isalnum(x) isalnum((unsigned char)(x)) +# define sqlite3Isalpha(x) isalpha((unsigned char)(x)) +# define sqlite3Isdigit(x) isdigit((unsigned char)(x)) +# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) +# define sqlite3Tolower(x) tolower((unsigned char)(x)) +#endif + +/* ** Internal function prototypes */ int sqlite3StrICmp(const char *, const char *); int sqlite3StrNICmp(const char *, const char *, int); int sqlite3IsNumber(const char*, int*, u8); +int sqlite3Strlen30(const char*); + +int sqlite3MallocInit(void); +void sqlite3MallocEnd(void); +void *sqlite3Malloc(int); +void *sqlite3MallocZero(int); +void *sqlite3DbMallocZero(sqlite3*, int); +void *sqlite3DbMallocRaw(sqlite3*, int); +char *sqlite3DbStrDup(sqlite3*,const char*); +char *sqlite3DbStrNDup(sqlite3*,const char*, int); +void *sqlite3Realloc(void*, int); +void *sqlite3DbReallocOrFree(sqlite3 *, void *, int); +void *sqlite3DbRealloc(sqlite3 *, void *, int); +void sqlite3DbFree(sqlite3*, void*); +int sqlite3MallocSize(void*); +int sqlite3DbMallocSize(sqlite3*, void*); +void *sqlite3ScratchMalloc(int); +void sqlite3ScratchFree(void*); +void *sqlite3PageMalloc(int); +void sqlite3PageFree(void*); +void sqlite3MemSetDefault(void); +void sqlite3BenignMallocHooks(void (*)(void), void (*)(void)); +int sqlite3MemoryAlarm(void (*)(void*, sqlite3_int64, int), void*, sqlite3_int64); + +/* +** On systems with ample stack space and that support alloca(), make +** use of alloca() to obtain space for large automatic objects. By default, +** obtain space from malloc(). +** +** The alloca() routine never returns NULL. This will cause code paths +** that deal with sqlite3StackAlloc() failures to be unreachable. +*/ +#ifdef SQLITE_USE_ALLOCA +# define sqlite3StackAllocRaw(D,N) alloca(N) +# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) +# define sqlite3StackFree(D,P) +#else +# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) +# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) +# define sqlite3StackFree(D,P) sqlite3DbFree(D,P) +#endif -void *sqlite3Malloc(int,int); -void *sqlite3MallocRaw(int,int); -void *sqlite3Realloc(void*,int); -char *sqlite3StrDup(const char*); -char *sqlite3StrNDup(const char*, int); -# define sqlite3CheckMemory(a,b) -void *sqlite3ReallocOrFree(void*,int); -void sqlite3FreeX(void*); -void *sqlite3MallocX(int); -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - int sqlite3AllocSize(void *); +#ifdef SQLITE_ENABLE_MEMSYS3 +const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); +#endif +#ifdef SQLITE_ENABLE_MEMSYS5 +const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); #endif -char *sqlite3MPrintf(const char*, ...); -char *sqlite3VMPrintf(const char*, va_list); + +#ifndef SQLITE_MUTEX_OMIT + sqlite3_mutex_methods *sqlite3DefaultMutex(void); + sqlite3_mutex *sqlite3MutexAlloc(int); + int sqlite3MutexInit(void); + int sqlite3MutexEnd(void); +#endif + +int sqlite3StatusValue(int); +void sqlite3StatusAdd(int, int); +void sqlite3StatusSet(int, int); + +int sqlite3IsNaN(double); + +void sqlite3VXPrintf(StrAccum*, int, const char*, va_list); +char *sqlite3MPrintf(sqlite3*,const char*, ...); +char *sqlite3VMPrintf(sqlite3*,const char*, va_list); +char *sqlite3MAppendf(sqlite3*,char*,const char*,...); #if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) void sqlite3DebugPrintf(const char*, ...); - void *sqlite3TextToPtr(const char*); #endif -void sqlite3SetString(char **, ...); +#if defined(SQLITE_TEST) + void *sqlite3TestTextToPtr(const char*); +#endif +void sqlite3SetString(char **, sqlite3*, const char*, ...); void sqlite3ErrorMsg(Parse*, const char*, ...); void sqlite3ErrorClear(Parse*); -void sqlite3Dequote(char*); -void sqlite3DequoteExpr(Expr*); +int sqlite3Dequote(char*); int sqlite3KeywordCode(const unsigned char*, int); int sqlite3RunParser(Parse*, const char*, char **); void sqlite3FinishCoding(Parse*); -Expr *sqlite3Expr(int, Expr*, Expr*, const Token*); -Expr *sqlite3ExprOrFree(int, Expr*, Expr*, const Token*); -Expr *sqlite3RegisterExpr(Parse*,Token*); -Expr *sqlite3ExprAnd(Expr*, Expr*); -void sqlite3ExprSpan(Expr*,Token*,Token*); -Expr *sqlite3ExprFunction(ExprList*, Token*); +int sqlite3GetTempReg(Parse*); +void sqlite3ReleaseTempReg(Parse*,int); +int sqlite3GetTempRange(Parse*,int); +void sqlite3ReleaseTempRange(Parse*,int,int); +Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); +Expr *sqlite3Expr(sqlite3*,int,const char*); +void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); +Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*); +Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); +Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); void sqlite3ExprAssignVarNumber(Parse*, Expr*); -void sqlite3ExprDelete(Expr*); -ExprList *sqlite3ExprListAppend(ExprList*,Expr*,Token*); -void sqlite3ExprListDelete(ExprList*); +void sqlite3ExprClear(sqlite3*, Expr*); +void sqlite3ExprDelete(sqlite3*, Expr*); +ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); +void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int); +void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*); +void sqlite3ExprListDelete(sqlite3*, ExprList*); int sqlite3Init(sqlite3*, char**); int sqlite3InitCallback(void*, int, char**, char**); void sqlite3Pragma(Parse*,Token*,Token*,Token*,int); void sqlite3ResetInternalSchema(sqlite3*, int); void sqlite3BeginParse(Parse*,int); void sqlite3CommitInternalChanges(sqlite3*); -Table *sqlite3ResultSetOfSelect(Parse*,char*,Select*); +Table *sqlite3ResultSetOfSelect(Parse*,Select*); void sqlite3OpenMasterTable(Parse *, int); void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int); void sqlite3AddColumn(Parse*,Token*); @@ -1629,10 +2470,24 @@ void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); void sqlite3AddCheckConstraint(Parse*, Expr*); void sqlite3AddColumnType(Parse*,Token*); -void sqlite3AddDefaultValue(Parse*,Expr*); -void sqlite3AddCollateType(Parse*, const char*, int); +void sqlite3AddDefaultValue(Parse*,ExprSpan*); +void sqlite3AddCollateType(Parse*, Token*); void sqlite3EndTable(Parse*,Token*,Token*,Select*); +Bitvec *sqlite3BitvecCreate(u32); +int sqlite3BitvecTest(Bitvec*, u32); +int sqlite3BitvecSet(Bitvec*, u32); +void sqlite3BitvecClear(Bitvec*, u32, void*); +void sqlite3BitvecDestroy(Bitvec*); +u32 sqlite3BitvecSize(Bitvec*); +int sqlite3BitvecBuiltinTest(int,int*); + +RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int); +void sqlite3RowSetClear(RowSet*); +void sqlite3RowSetInsert(RowSet*, i64); +int sqlite3RowSetTest(RowSet*, u8 iBatch, i64); +int sqlite3RowSetNext(RowSet*, i64*); + void sqlite3CreateView(Parse*,Token*,Token*,Token*,Select*,int,int); #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) @@ -1643,83 +2498,122 @@ void sqlite3DropTable(Parse*, SrcList*, int, int); void sqlite3DeleteTable(Table*); +#ifndef SQLITE_OMIT_AUTOINCREMENT + void sqlite3AutoincrementBegin(Parse *pParse); + void sqlite3AutoincrementEnd(Parse *pParse); +#else +# define sqlite3AutoincrementBegin(X) +# define sqlite3AutoincrementEnd(X) +#endif void sqlite3Insert(Parse*, SrcList*, ExprList*, Select*, IdList*, int); -void *sqlite3ArrayAllocate(void*,int,int,int*,int*,int*); -IdList *sqlite3IdListAppend(IdList*, Token*); +void *sqlite3ArrayAllocate(sqlite3*,void*,int,int,int*,int*,int*); +IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*); int sqlite3IdListIndex(IdList*,const char*); -SrcList *sqlite3SrcListAppend(SrcList*, Token*, Token*); -SrcList *sqlite3SrcListAppendFromTerm(SrcList*, Token*, Token*, Token*, - Select*, Expr*, IdList*); +SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int); +SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*); +SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*, + Token*, Select*, Expr*, IdList*); +void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); +int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); void sqlite3SrcListShiftJoinType(SrcList*); void sqlite3SrcListAssignCursors(Parse*, SrcList*); -void sqlite3IdListDelete(IdList*); -void sqlite3SrcListDelete(SrcList*); +void sqlite3IdListDelete(sqlite3*, IdList*); +void sqlite3SrcListDelete(sqlite3*, SrcList*); void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, Token*, int, int); void sqlite3DropIndex(Parse*, SrcList*, int); -int sqlite3Select(Parse*, Select*, int, int, Select*, int, int*, char *aff); -Select *sqlite3SelectNew(ExprList*,SrcList*,Expr*,ExprList*,Expr*,ExprList*, - int,Expr*,Expr*); -void sqlite3SelectDelete(Select*); +int sqlite3Select(Parse*, Select*, SelectDest*); +Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, + Expr*,ExprList*,int,Expr*,Expr*); +void sqlite3SelectDelete(sqlite3*, Select*); Table *sqlite3SrcListLookup(Parse*, SrcList*); int sqlite3IsReadOnly(Parse*, Table*, int); void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) +Expr *sqlite3LimitWhere(Parse *, SrcList *, Expr *, ExprList *, Expr *, Expr *, char *); +#endif void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); -WhereInfo *sqlite3WhereBegin(Parse*, SrcList*, Expr*, ExprList**); +WhereInfo *sqlite3WhereBegin(Parse*, SrcList*, Expr*, ExprList**, u16); void sqlite3WhereEnd(WhereInfo*); -void sqlite3ExprCodeGetColumn(Vdbe*, Table*, int, int); -void sqlite3ExprCode(Parse*, Expr*); -void sqlite3ExprCodeAndCache(Parse*, Expr*); -int sqlite3ExprCodeExprList(Parse*, ExprList*); +int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, int); +void sqlite3ExprCodeMove(Parse*, int, int, int); +void sqlite3ExprCodeCopy(Parse*, int, int, int); +void sqlite3ExprCacheStore(Parse*, int, int, int); +void sqlite3ExprCachePush(Parse*); +void sqlite3ExprCachePop(Parse*, int); +void sqlite3ExprCacheRemove(Parse*, int); +void sqlite3ExprCacheClear(Parse*); +void sqlite3ExprCacheAffinityChange(Parse*, int, int); +void sqlite3ExprHardCopy(Parse*,int,int); +int sqlite3ExprCode(Parse*, Expr*, int); +int sqlite3ExprCodeTemp(Parse*, Expr*, int*); +int sqlite3ExprCodeTarget(Parse*, Expr*, int); +int sqlite3ExprCodeAndCache(Parse*, Expr*, int); +void sqlite3ExprCodeConstants(Parse*, Expr*); +int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int); void sqlite3ExprIfTrue(Parse*, Expr*, int, int); void sqlite3ExprIfFalse(Parse*, Expr*, int, int); Table *sqlite3FindTable(sqlite3*,const char*, const char*); -Table *sqlite3LocateTable(Parse*,const char*, const char*); +Table *sqlite3LocateTable(Parse*,int isView,const char*, const char*); Index *sqlite3FindIndex(sqlite3*,const char*, const char*); void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); void sqlite3Vacuum(Parse*); int sqlite3RunVacuum(char**, sqlite3*); -char *sqlite3NameFromToken(Token*); +char *sqlite3NameFromToken(sqlite3*, Token*); int sqlite3ExprCompare(Expr*, Expr*); -int sqliteFuncId(Token*); -int sqlite3ExprResolveNames(NameContext *, Expr *); -int sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); -int sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); +void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); +void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); Vdbe *sqlite3GetVdbe(Parse*); -Expr *sqlite3CreateIdExpr(const char*); -void sqlite3Randomness(int, void*); +Expr *sqlite3CreateIdExpr(Parse *, const char*); +void sqlite3PrngSaveState(void); +void sqlite3PrngRestoreState(void); +void sqlite3PrngResetState(void); void sqlite3RollbackAll(sqlite3*); void sqlite3CodeVerifySchema(Parse*, int); void sqlite3BeginTransaction(Parse*, int); void sqlite3CommitTransaction(Parse*); void sqlite3RollbackTransaction(Parse*); +void sqlite3Savepoint(Parse*, int, Token*); +void sqlite3CloseSavepoints(sqlite3 *); int sqlite3ExprIsConstant(Expr*); int sqlite3ExprIsConstantNotJoin(Expr*); int sqlite3ExprIsConstantOrFunction(Expr*); int sqlite3ExprIsInteger(Expr*, int*); int sqlite3IsRowid(const char*); -void sqlite3GenerateRowDelete(sqlite3*, Vdbe*, Table*, int, int); -void sqlite3GenerateRowIndexDelete(Vdbe*, Table*, int, char*); -void sqlite3GenerateIndexKey(Vdbe*, Index*, int); -void sqlite3GenerateConstraintChecks(Parse*,Table*,int,char*,int,int,int,int); -void sqlite3CompleteInsertion(Parse*, Table*, int, char*, int, int, int, int); -void sqlite3OpenTableAndIndices(Parse*, Table*, int, int); +void sqlite3GenerateRowDelete(Parse*, Table*, int, int, int); +void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int*); +int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int); +void sqlite3GenerateConstraintChecks(Parse*,Table*,int,int, + int*,int,int,int,int,int*); +void sqlite3CompleteInsertion(Parse*, Table*, int, int, int*, int, int,int,int); +int sqlite3OpenTableAndIndices(Parse*, Table*, int, int); void sqlite3BeginWriteOperation(Parse*, int, int); -Expr *sqlite3ExprDup(Expr*); -void sqlite3TokenCopy(Token*, Token*); -ExprList *sqlite3ExprListDup(ExprList*); -SrcList *sqlite3SrcListDup(SrcList*); -IdList *sqlite3IdListDup(IdList*); -Select *sqlite3SelectDup(Select*); +Expr *sqlite3ExprDup(sqlite3*,Expr*,int); +ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int); +SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int); +IdList *sqlite3IdListDup(sqlite3*,IdList*); +Select *sqlite3SelectDup(sqlite3*,Select*,int); +void sqlite3FuncDefInsert(FuncDefHash*, FuncDef*); FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,int,u8,int); void sqlite3RegisterBuiltinFunctions(sqlite3*); -void sqlite3RegisterDateTimeFunctions(sqlite3*); -int sqlite3SafetyOn(sqlite3*); -int sqlite3SafetyOff(sqlite3*); -int sqlite3SafetyCheck(sqlite3*); -void sqlite3ChangeCookie(sqlite3*, Vdbe*, int); +void sqlite3RegisterDateTimeFunctions(void); +void sqlite3RegisterGlobalFunctions(void); +#ifdef SQLITE_DEBUG + int sqlite3SafetyOn(sqlite3*); + int sqlite3SafetyOff(sqlite3*); +#else +# define sqlite3SafetyOn(A) 0 +# define sqlite3SafetyOff(A) 0 +#endif +int sqlite3SafetyCheckOk(sqlite3*); +int sqlite3SafetyCheckSickOrOk(sqlite3*); +void sqlite3ChangeCookie(Parse*, int); + +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) +void sqlite3MaterializeView(Parse*, Table*, Expr*, int); +#endif #ifndef SQLITE_OMIT_TRIGGER void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*, @@ -1727,35 +2621,38 @@ void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*); void sqlite3DropTrigger(Parse*, SrcList*, int); void sqlite3DropTriggerPtr(Parse*, Trigger*); - int sqlite3TriggersExist(Parse*, Table*, int, ExprList*); - int sqlite3CodeRowTrigger(Parse*, int, ExprList*, int, Table *, int, int, - int, int); + Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask); + Trigger *sqlite3TriggerList(Parse *, Table *); + int sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *, + int, int, int, int, u32*, u32*); void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*); - void sqlite3DeleteTriggerStep(TriggerStep*); - TriggerStep *sqlite3TriggerSelectStep(Select*); - TriggerStep *sqlite3TriggerInsertStep(Token*, IdList*, ExprList*,Select*,int); - TriggerStep *sqlite3TriggerUpdateStep(Token*, ExprList*, Expr*, int); - TriggerStep *sqlite3TriggerDeleteStep(Token*, Expr*); - void sqlite3DeleteTrigger(Trigger*); + void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*); + TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*); + TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*, + ExprList*,Select*,int); + TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, int); + TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*); + void sqlite3DeleteTrigger(sqlite3*, Trigger*); void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*); #else -# define sqlite3TriggersExist(A,B,C,D,E,F) 0 -# define sqlite3DeleteTrigger(A) +# define sqlite3TriggersExist(B,C,D,E,F) 0 +# define sqlite3DeleteTrigger(A,B) # define sqlite3DropTriggerPtr(A,B) # define sqlite3UnlinkAndDeleteTrigger(A,B,C) -# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I) 0 +# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I,J,K,L) 0 +# define sqlite3TriggerList(X, Y) 0 #endif int sqlite3JoinType(Parse*, Token*, Token*, Token*); void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); void sqlite3DeferForeignKey(Parse*, int); #ifndef SQLITE_OMIT_AUTHORIZATION - void sqlite3AuthRead(Parse*,Expr*,SrcList*); + void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*); int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*); void sqlite3AuthContextPush(Parse*, AuthContext*, const char*); void sqlite3AuthContextPop(AuthContext*); #else -# define sqlite3AuthRead(a,b,c) +# define sqlite3AuthRead(a,b,c,d) # define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK # define sqlite3AuthContextPush(a,b,c) # define sqlite3AuthContextPop(a) ((void)(a)) @@ -1763,7 +2660,7 @@ void sqlite3Attach(Parse*, Expr*, Expr*, Expr*); void sqlite3Detach(Parse*, Expr*); int sqlite3BtreeFactory(const sqlite3 *db, const char *zFilename, - int omitJournal, int nCache, Btree **ppBtree); + int omitJournal, int nCache, int flags, Btree **ppBtree); int sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*); int sqlite3FixSrcList(DbFixer*, SrcList*); int sqlite3FixSelect(DbFixer*, Select*); @@ -1771,16 +2668,48 @@ int sqlite3FixExprList(DbFixer*, ExprList*); int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); int sqlite3AtoF(const char *z, double*); -char *sqlite3_snprintf(int,char*,const char*,...); int sqlite3GetInt32(const char *, int*); -int sqlite3FitsIn64Bits(const char *); +int sqlite3FitsIn64Bits(const char *, int); int sqlite3Utf16ByteLen(const void *pData, int nChar); int sqlite3Utf8CharLen(const char *pData, int nByte); -int sqlite3Utf8Read(const u8*, const u8*, const u8**); -int sqlite3PutVarint(unsigned char *, u64); -int sqlite3GetVarint(const unsigned char *, u64 *); -int sqlite3GetVarint32(const unsigned char *, u32 *); +int sqlite3Utf8Read(const u8*, const u8**); + +/* +** Routines to read and write variable-length integers. These used to +** be defined locally, but now we use the varint routines in the util.c +** file. Code should use the MACRO forms below, as the Varint32 versions +** are coded to assume the single byte case is already handled (which +** the MACRO form does). +*/ +int sqlite3PutVarint(unsigned char*, u64); +int sqlite3PutVarint32(unsigned char*, u32); +u8 sqlite3GetVarint(const unsigned char *, u64 *); +u8 sqlite3GetVarint32(const unsigned char *, u32 *); int sqlite3VarintLen(u64 v); + +/* +** The header of a record consists of a sequence variable-length integers. +** These integers are almost always small and are encoded as a single byte. +** The following macros take advantage this fact to provide a fast encode +** and decode of the integers in a record header. It is faster for the common +** case where the integer is a single byte. It is a little slower when the +** integer is two or more bytes. But overall it is faster. +** +** The following expressions are equivalent: +** +** x = sqlite3GetVarint32( A, &B ); +** x = sqlite3PutVarint32( A, B ); +** +** x = getVarint32( A, B ); +** x = putVarint32( A, B ); +** +*/ +#define getVarint32(A,B) (u8)((*(A)<(u8)0x80) ? ((B) = (u32)*(A)),1 : sqlite3GetVarint32((A), (u32 *)&(B))) +#define putVarint32(A,B) (u8)(((u32)(B)<(u32)0x80) ? (*(A) = (unsigned char)(B)),1 : sqlite3PutVarint32((A), (B))) +#define getVarint sqlite3GetVarint +#define putVarint sqlite3PutVarint + + void sqlite3IndexAffinityStr(Vdbe *, Index *); void sqlite3TableAffinityStr(Vdbe *, Table *); char sqlite3CompareAffinity(Expr *pExpr, char aff2); @@ -1788,12 +2717,12 @@ char sqlite3ExprAffinity(Expr *pExpr); int sqlite3Atoi64(const char*, i64*); void sqlite3Error(sqlite3*, int, const char*,...); -void *sqlite3HexToBlob(const char *z); +void *sqlite3HexToBlob(sqlite3*, const char *z, int n); int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); const char *sqlite3ErrStr(int); int sqlite3ReadSchema(Parse *pParse); -CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char *,int,int); -CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName, int nName); +CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int); +CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName); CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr); Expr *sqlite3ExprSetColl(Parse *pParse, Expr *, Token *); int sqlite3CheckCollSeq(Parse *, CollSeq *); @@ -1802,13 +2731,20 @@ const void *sqlite3ValueText(sqlite3_value*, u8); int sqlite3ValueBytes(sqlite3_value*, u8); -void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, void(*)(void*)); +void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, + void(*)(void*)); void sqlite3ValueFree(sqlite3_value*); -sqlite3_value *sqlite3ValueNew(void); -char *sqlite3Utf16to8(const void*, int); -int sqlite3ValueFromExpr(Expr *, u8, u8, sqlite3_value **); +sqlite3_value *sqlite3ValueNew(sqlite3 *); +char *sqlite3Utf16to8(sqlite3 *, const void*, int); +int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **); void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); +#ifndef SQLITE_AMALGAMATION extern const unsigned char sqlite3UpperToLower[]; +extern const unsigned char sqlite3CtypeMap[]; +extern SQLITE_WSD struct Sqlite3Config sqlite3Config; +extern SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; +extern int sqlite3PendingByte; +#endif void sqlite3RootPageMoved(Db*, int, int); void sqlite3Reindex(Parse*, Token*, Token*); void sqlite3AlterFunctions(sqlite3*); @@ -1816,50 +2752,59 @@ int sqlite3GetToken(const unsigned char *, int *); void sqlite3NestedParse(Parse*, const char*, ...); void sqlite3ExpirePreparedStatements(sqlite3*); -void sqlite3CodeSubselect(Parse *, Expr *); -int sqlite3SelectResolve(Parse *, Select *, NameContext *); +void sqlite3CodeSubselect(Parse *, Expr *, int, int); +void sqlite3SelectPrep(Parse*, Select*, NameContext*); +int sqlite3ResolveExprNames(NameContext*, Expr*); +void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*); +int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*); void sqlite3ColumnDefault(Vdbe *, Table *, int); void sqlite3AlterFinishAddColumn(Parse *, Token *); void sqlite3AlterBeginAddColumn(Parse *, SrcList *); -CollSeq *sqlite3GetCollSeq(sqlite3*, CollSeq *, const char *, int); -char sqlite3AffinityType(const Token*); +CollSeq *sqlite3GetCollSeq(sqlite3*, CollSeq *, const char*); +char sqlite3AffinityType(const char*); void sqlite3Analyze(Parse*, Token*, Token*); int sqlite3InvokeBusyHandler(BusyHandler*); int sqlite3FindDb(sqlite3*, Token*); +int sqlite3FindDbName(sqlite3 *, const char *); int sqlite3AnalysisLoad(sqlite3*,int iDB); void sqlite3DefaultRowEst(Index*); void sqlite3RegisterLikeFunctions(sqlite3*, int); int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*); -ThreadData *sqlite3ThreadData(void); -const ThreadData *sqlite3ThreadDataReadOnly(void); -void sqlite3ReleaseThreadData(void); -void sqlite3AttachFunctions(sqlite3 *); void sqlite3MinimumFileFormat(Parse*, int, int); void sqlite3SchemaFree(void *); -Schema *sqlite3SchemaGet(Btree *); +Schema *sqlite3SchemaGet(sqlite3 *, Btree *); int sqlite3SchemaToIndex(sqlite3 *db, Schema *); KeyInfo *sqlite3IndexKeyinfo(Parse *, Index *); int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *, void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*)); int sqlite3ApiExit(sqlite3 *db, int); -void sqlite3FailedMalloc(void); -void sqlite3AbortOtherActiveVdbes(sqlite3 *, Vdbe *); int sqlite3OpenTempDatabase(Parse *); +void sqlite3StrAccumInit(StrAccum*, char*, int, int); +void sqlite3StrAccumAppend(StrAccum*,const char*,int); +char *sqlite3StrAccumFinish(StrAccum*); +void sqlite3StrAccumReset(StrAccum*); +void sqlite3SelectDestInit(SelectDest*,int,int); + +void sqlite3BackupRestart(sqlite3_backup *); +void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *); + /* ** The interface to the LEMON-generated parser */ void *sqlite3ParserAlloc(void*(*)(size_t)); void sqlite3ParserFree(void*, void(*)(void*)); void sqlite3Parser(void*, int, Token, Parse*); +#ifdef YYTRACKMAXSTACKDEPTH + int sqlite3ParserStackPeak(void*); +#endif +void sqlite3AutoLoadExtensions(sqlite3*); #ifndef SQLITE_OMIT_LOAD_EXTENSION void sqlite3CloseExtensions(sqlite3*); - int sqlite3AutoLoadExtensions(sqlite3*); #else # define sqlite3CloseExtensions(X) -# define sqlite3AutoLoadExtensions(X) SQLITE_OK #endif #ifndef SQLITE_OMIT_SHARED_CACHE @@ -1872,35 +2817,20 @@ int sqlite3Utf8To8(unsigned char*); #endif -#ifdef SQLITE_MEMDEBUG - void sqlite3MallocDisallow(void); - void sqlite3MallocAllow(void); - int sqlite3TestMallocFail(void); -#else - #define sqlite3TestMallocFail() 0 - #define sqlite3MallocDisallow() - #define sqlite3MallocAllow() -#endif - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - void *sqlite3ThreadSafeMalloc(int); - void sqlite3ThreadSafeFree(void *); -#else - #define sqlite3ThreadSafeMalloc sqlite3MallocX - #define sqlite3ThreadSafeFree sqlite3FreeX -#endif - #ifdef SQLITE_OMIT_VIRTUALTABLE # define sqlite3VtabClear(X) -# define sqlite3VtabSync(X,Y) (Y) +# define sqlite3VtabSync(X,Y) SQLITE_OK # define sqlite3VtabRollback(X) # define sqlite3VtabCommit(X) +# define sqlite3VtabInSync(db) 0 #else void sqlite3VtabClear(Table*); - int sqlite3VtabSync(sqlite3 *db, int rc); + int sqlite3VtabSync(sqlite3 *db, char **); int sqlite3VtabRollback(sqlite3 *db); int sqlite3VtabCommit(sqlite3 *db); +# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0) #endif +void sqlite3VtabMakeWritable(Parse*,Table*); void sqlite3VtabLock(sqlite3_vtab*); void sqlite3VtabUnlock(sqlite3*, sqlite3_vtab*); void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*); @@ -1911,24 +2841,73 @@ int sqlite3VtabCallConnect(Parse*, Table*); int sqlite3VtabCallDestroy(sqlite3*, int, const char *); int sqlite3VtabBegin(sqlite3 *, sqlite3_vtab *); -FuncDef *sqlite3VtabOverloadFunction(FuncDef*, int nArg, Expr*); +FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); void sqlite3InvalidFunction(sqlite3_context*,int,sqlite3_value**); +int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); int sqlite3Reprepare(Vdbe*); -void sqlite3ExprListCheckLength(Parse*, ExprList*, int, const char*); +void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*); CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *); +int sqlite3TempInMemory(const sqlite3*); + + + +/* +** Available fault injectors. Should be numbered beginning with 0. +*/ +#define SQLITE_FAULTINJECTOR_MALLOC 0 +#define SQLITE_FAULTINJECTOR_COUNT 1 + +/* +** The interface to the code in fault.c used for identifying "benign" +** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST +** is not defined. +*/ +#ifndef SQLITE_OMIT_BUILTIN_TEST + void sqlite3BeginBenignMalloc(void); + void sqlite3EndBenignMalloc(void); +#else + #define sqlite3BeginBenignMalloc() + #define sqlite3EndBenignMalloc() +#endif + +#define IN_INDEX_ROWID 1 +#define IN_INDEX_EPH 2 +#define IN_INDEX_INDEX 3 +int sqlite3FindInIndex(Parse *, Expr *, int*); + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int); + int sqlite3JournalSize(sqlite3_vfs *); + int sqlite3JournalCreate(sqlite3_file *); +#else + #define sqlite3JournalSize(pVfs) ((pVfs)->szOsFile) +#endif + +void sqlite3MemJournalOpen(sqlite3_file *); +int sqlite3MemJournalSize(void); +int sqlite3IsMemJournal(sqlite3_file *); #if SQLITE_MAX_EXPR_DEPTH>0 - void sqlite3ExprSetHeight(Expr *); + void sqlite3ExprSetHeight(Parse *pParse, Expr *p); int sqlite3SelectExprHeight(Select *); + int sqlite3ExprCheckHeight(Parse*, int); #else - #define sqlite3ExprSetHeight(x) + #define sqlite3ExprSetHeight(x,y) + #define sqlite3SelectExprHeight(x) 0 + #define sqlite3ExprCheckHeight(x,y) #endif u32 sqlite3Get4byte(const u8*); void sqlite3Put4byte(u8*, u32); -#ifdef SQLITE_SSE -#include "sseInt.h" +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *); + void sqlite3ConnectionUnlocked(sqlite3 *db); + void sqlite3ConnectionClosed(sqlite3 *db); +#else + #define sqlite3ConnectionBlocked(x,y) + #define sqlite3ConnectionUnlocked(x) + #define sqlite3ConnectionClosed(x) #endif #ifdef SQLITE_DEBUG @@ -1937,16 +2916,16 @@ /* ** If the SQLITE_ENABLE IOTRACE exists then the global variable -** sqlite3_io_trace is a pointer to a printf-like routine used to +** sqlite3IoTrace is a pointer to a printf-like routine used to ** print I/O tracing messages. */ #ifdef SQLITE_ENABLE_IOTRACE -# define IOTRACE(A) if( sqlite3_io_trace ){ sqlite3_io_trace A; } +# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; } void sqlite3VdbeIOTraceSql(Vdbe*); +SQLITE_EXTERN void (*sqlite3IoTrace)(const char*,...); #else # define IOTRACE(A) # define sqlite3VdbeIOTraceSql(X) #endif -SQLITE_EXTERN void (*sqlite3_io_trace)(const char*,...); #endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/sqliteLimit.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/sqliteLimit.h --- sqlite3-3.4.2/src/sqliteLimit.h 2007-06-19 16:23:48.000000000 +0100 +++ sqlite3-3.6.16/src/sqliteLimit.h 2009-06-12 03:37:49.000000000 +0100 @@ -12,7 +12,7 @@ ** ** This file defines various limits of what SQLite can process. ** -** @(#) $Id: sqliteLimit.h,v 1.1 2007/06/19 15:23:48 drh Exp $ +** @(#) $Id: sqliteLimit.h,v 1.10 2009/01/10 16:15:09 danielk1977 Exp $ */ /* @@ -49,18 +49,24 @@ /* ** The maximum length of a single SQL statement in bytes. -** The hard limit here is the same as SQLITE_MAX_LENGTH. +** +** It used to be the case that setting this value to zero would +** turn the limit off. That is no longer true. It is not possible +** to turn this limit off. */ #ifndef SQLITE_MAX_SQL_LENGTH -# define SQLITE_MAX_SQL_LENGTH 1000000 +# define SQLITE_MAX_SQL_LENGTH 1000000000 #endif /* ** The maximum depth of an expression tree. This is limited to ** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might ** want to place more severe limits on the complexity of an -** expression. A value of 0 (the default) means do not enforce -** any limitation on expression tree depth. +** expression. +** +** A value of 0 used to mean that the limit was not enforced. +** But that is no longer true. The limit is now strictly enforced +** at all times. */ #ifndef SQLITE_MAX_EXPR_DEPTH # define SQLITE_MAX_EXPR_DEPTH 1000 @@ -90,7 +96,7 @@ ** The maximum number of arguments to an SQL function. */ #ifndef SQLITE_MAX_FUNCTION_ARG -# define SQLITE_MAX_FUNCTION_ARG 100 +# define SQLITE_MAX_FUNCTION_ARG 127 #endif /* @@ -105,11 +111,9 @@ #endif /* -** The maximum number of attached databases. This must be at least 2 -** in order to support the main database file (0) and the file used to -** hold temporary tables (1). And it must be less than 32 because -** we use a bitmask of databases with a u32 in places (for example -** the Parse.cookieMask field). +** The maximum number of attached databases. This must be between 0 +** and 30. The upper bound on 30 is because a 32-bit integer bitmap +** is used internally to track attached databases. */ #ifndef SQLITE_MAX_ATTACHED # define SQLITE_MAX_ATTACHED 10 @@ -123,20 +127,48 @@ # define SQLITE_MAX_VARIABLE_NUMBER 999 #endif +/* Maximum page size. The upper bound on this value is 32768. This a limit +** imposed by the necessity of storing the value in a 2-byte unsigned integer +** and the fact that the page size must be a power of 2. +** +** If this limit is changed, then the compiled library is technically +** incompatible with an SQLite library compiled with a different limit. If +** a process operating on a database with a page-size of 65536 bytes +** crashes, then an instance of SQLite compiled with the default page-size +** limit will not be able to rollback the aborted transaction. This could +** lead to database corruption. +*/ +#ifndef SQLITE_MAX_PAGE_SIZE +# define SQLITE_MAX_PAGE_SIZE 32768 +#endif + + /* ** The default size of a database page. */ #ifndef SQLITE_DEFAULT_PAGE_SIZE # define SQLITE_DEFAULT_PAGE_SIZE 1024 #endif +#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE +# undef SQLITE_DEFAULT_PAGE_SIZE +# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE +#endif -/* Maximum page size. The upper bound on this value is 32768. This a limit -** imposed by the necessity of storing the value in a 2-byte unsigned integer -** and the fact that the page size must be a power of 2. +/* +** Ordinarily, if no value is explicitly provided, SQLite creates databases +** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain +** device characteristics (sector-size and atomic write() support), +** SQLite may choose a larger value. This constant is the maximum value +** SQLite will choose on its own. */ -#ifndef SQLITE_MAX_PAGE_SIZE -# define SQLITE_MAX_PAGE_SIZE 32768 +#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE +# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192 #endif +#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE +# undef SQLITE_MAX_DEFAULT_PAGE_SIZE +# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE +#endif + /* ** Maximum number of pages in one database file. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/status.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/status.c --- sqlite3-3.4.2/src/status.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/status.c 2008-09-02 01:52:52.000000000 +0100 @@ -0,0 +1,122 @@ +/* +** 2008 June 18 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This module implements the sqlite3_status() interface and related +** functionality. +** +** $Id: status.c,v 1.9 2008/09/02 00:52:52 drh Exp $ +*/ +#include "sqliteInt.h" + +/* +** Variables in which to record status information. +*/ +typedef struct sqlite3StatType sqlite3StatType; +static SQLITE_WSD struct sqlite3StatType { + int nowValue[9]; /* Current value */ + int mxValue[9]; /* Maximum value */ +} sqlite3Stat = { {0,}, {0,} }; + + +/* The "wsdStat" macro will resolve to the status information +** state vector. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdStat can refer directly +** to the "sqlite3Stat" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat) +# define wsdStat x[0] +#else +# define wsdStatInit +# define wsdStat sqlite3Stat +#endif + +/* +** Return the current value of a status parameter. +*/ +int sqlite3StatusValue(int op){ + wsdStatInit; + assert( op>=0 && op=0 && opwsdStat.mxValue[op] ){ + wsdStat.mxValue[op] = wsdStat.nowValue[op]; + } +} + +/* +** Set the value of a status to X. +*/ +void sqlite3StatusSet(int op, int X){ + wsdStatInit; + assert( op>=0 && opwsdStat.mxValue[op] ){ + wsdStat.mxValue[op] = wsdStat.nowValue[op]; + } +} + +/* +** Query status information. +** +** This implementation assumes that reading or writing an aligned +** 32-bit integer is an atomic operation. If that assumption is not true, +** then this routine is not threadsafe. +*/ +int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){ + wsdStatInit; + if( op<0 || op>=ArraySize(wsdStat.nowValue) ){ + return SQLITE_MISUSE; + } + *pCurrent = wsdStat.nowValue[op]; + *pHighwater = wsdStat.mxValue[op]; + if( resetFlag ){ + wsdStat.mxValue[op] = wsdStat.nowValue[op]; + } + return SQLITE_OK; +} + +/* +** Query status information for a single database connection +*/ +int sqlite3_db_status( + sqlite3 *db, /* The database connection whose status is desired */ + int op, /* Status verb */ + int *pCurrent, /* Write current value here */ + int *pHighwater, /* Write high-water mark here */ + int resetFlag /* Reset high-water mark if true */ +){ + switch( op ){ + case SQLITE_DBSTATUS_LOOKASIDE_USED: { + *pCurrent = db->lookaside.nOut; + *pHighwater = db->lookaside.mxOut; + if( resetFlag ){ + db->lookaside.mxOut = db->lookaside.nOut; + } + break; + } + default: { + return SQLITE_ERROR; + } + } + return SQLITE_OK; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/t1 /tmp/3ARg2Grji7/sqlite3-3.6.16/src/t1 --- sqlite3-3.4.2/src/t1 2007-08-07 18:12:08.000000000 +0100 +++ sqlite3-3.6.16/src/t1 1970-01-01 01:00:00.000000000 +0100 @@ -1,2769 +0,0 @@ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This module contains C code that generates VDBE code used to process -** the WHERE clause of SQL statements. This module is reponsible for -** generating the code that loops through a table looking for applicable -** rows. Indices are selected and used to speed the search when doing -** so is applicable. Because this module is responsible for selecting -** indices, you might also think of this module as the "query optimizer". -** -** $Id: where.c,v 1.254 2007/07/30 14:40:48 danielk1977 Exp $ -*/ -#include "sqliteInt.h" - -/* -** The number of bits in a Bitmask. "BMS" means "BitMask Size". -*/ -#define BMS (sizeof(Bitmask)*8) - -/* -** Trace output macros -*/ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -int sqlite3_where_trace = 0; -# define WHERETRACE(X) if(sqlite3_where_trace) sqlite3DebugPrintf X -#else -# define WHERETRACE(X) -#endif - -/* Forward reference -*/ -typedef struct WhereClause WhereClause; -typedef struct ExprMaskSet ExprMaskSet; - -/* -** The query generator uses an array of instances of this structure to -** help it analyze the subexpressions of the WHERE clause. Each WHERE -** clause subexpression is separated from the others by an AND operator. -** -** All WhereTerms are collected into a single WhereClause structure. -** The following identity holds: -** -** WhereTerm.pWC->a[WhereTerm.idx] == WhereTerm -** -** When a term is of the form: -** -** X -** -** where X is a column name and is one of certain operators, -** then WhereTerm.leftCursor and WhereTerm.leftColumn record the -** cursor number and column number for X. WhereTerm.operator records -** the using a bitmask encoding defined by WO_xxx below. The -** use of a bitmask encoding for the operator allows us to search -** quickly for terms that match any of several different operators. -** -** prereqRight and prereqAll record sets of cursor numbers, -** but they do so indirectly. A single ExprMaskSet structure translates -** cursor number into bits and the translated bit is stored in the prereq -** fields. The translation is used in order to maximize the number of -** bits that will fit in a Bitmask. The VDBE cursor numbers might be -** spread out over the non-negative integers. For example, the cursor -** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The ExprMaskSet -** translates these sparse cursor numbers into consecutive integers -** beginning with 0 in order to make the best possible use of the available -** bits in the Bitmask. So, in the example above, the cursor numbers -** would be mapped into integers 0 through 7. -*/ -typedef struct WhereTerm WhereTerm; -struct WhereTerm { - Expr *pExpr; /* Pointer to the subexpression */ - i16 iParent; /* Disable pWC->a[iParent] when this term disabled */ - i16 leftCursor; /* Cursor number of X in "X " */ - i16 leftColumn; /* Column number of X in "X " */ - u16 eOperator; /* A WO_xx value describing */ - u8 flags; /* Bit flags. See below */ - u8 nChild; /* Number of children that must disable us */ - WhereClause *pWC; /* The clause this term is part of */ - Bitmask prereqRight; /* Bitmask of tables used by pRight */ - Bitmask prereqAll; /* Bitmask of tables referenced by p */ -}; - -/* -** Allowed values of WhereTerm.flags -*/ -#define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(pExpr) */ -#define TERM_VIRTUAL 0x02 /* Added by the optimizer. Do not code */ -#define TERM_CODED 0x04 /* This term is already coded */ -#define TERM_COPIED 0x08 /* Has a child */ -#define TERM_OR_OK 0x10 /* Used during OR-clause processing */ - -/* -** An instance of the following structure holds all information about a -** WHERE clause. Mostly this is a container for one or more WhereTerms. -*/ -struct WhereClause { - Parse *pParse; /* The parser context */ - ExprMaskSet *pMaskSet; /* Mapping of table indices to bitmasks */ - int nTerm; /* Number of terms */ - int nSlot; /* Number of entries in a[] */ - WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ - WhereTerm aStatic[10]; /* Initial static space for a[] */ -}; - -/* -** An instance of the following structure keeps track of a mapping -** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. -** -** The VDBE cursor numbers are small integers contained in -** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE -** clause, the cursor numbers might not begin with 0 and they might -** contain gaps in the numbering sequence. But we want to make maximum -** use of the bits in our bitmasks. This structure provides a mapping -** from the sparse cursor numbers into consecutive integers beginning -** with 0. -** -** If ExprMaskSet.ix[A]==B it means that The A-th bit of a Bitmask -** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<3, 5->1, 8->2, 29->0, -** 57->5, 73->4. Or one of 719 other combinations might be used. It -** does not really matter. What is important is that sparse cursor -** numbers all get mapped into bit numbers that begin with 0 and contain -** no gaps. -*/ -struct ExprMaskSet { - int n; /* Number of assigned cursor values */ - int ix[sizeof(Bitmask)*8]; /* Cursor assigned to each bit */ -}; - - -/* -** Bitmasks for the operators that indices are able to exploit. An -** OR-ed combination of these values can be used when searching for -** terms in the where clause. -*/ -#define WO_IN 1 -#define WO_EQ 2 -#define WO_LT (WO_EQ<<(TK_LT-TK_EQ)) -#define WO_LE (WO_EQ<<(TK_LE-TK_EQ)) -#define WO_GT (WO_EQ<<(TK_GT-TK_EQ)) -#define WO_GE (WO_EQ<<(TK_GE-TK_EQ)) -#define WO_MATCH 64 -#define WO_ISNULL 128 - -/* -** Value for flags returned by bestIndex(). -** -** The least significant byte is reserved as a mask for WO_ values above. -** The WhereLevel.flags field is usually set to WO_IN|WO_EQ|WO_ISNULL. -** But if the table is the right table of a left join, WhereLevel.flags -** is set to WO_IN|WO_EQ. The WhereLevel.flags field can then be used as -** the "op" parameter to findTerm when we are resolving equality constraints. -** ISNULL constraints will then not be used on the right table of a left -** join. Tickets #2177 and #2189. -*/ -#define WHERE_ROWID_EQ 0x000100 /* rowid=EXPR or rowid IN (...) */ -#define WHERE_ROWID_RANGE 0x000200 /* rowidEXPR */ -#define WHERE_COLUMN_EQ 0x001000 /* x=EXPR or x IN (...) */ -#define WHERE_COLUMN_RANGE 0x002000 /* xEXPR */ -#define WHERE_COLUMN_IN 0x004000 /* x IN (...) */ -#define WHERE_TOP_LIMIT 0x010000 /* xEXPR or x>=EXPR constraint */ -#define WHERE_IDX_ONLY 0x080000 /* Use index only - omit table */ -#define WHERE_ORDERBY 0x100000 /* Output will appear in correct order */ -#define WHERE_REVERSE 0x200000 /* Scan in reverse order */ -#define WHERE_UNIQUE 0x400000 /* Selects no more than one row */ -#define WHERE_VIRTUALTABLE 0x800000 /* Use virtual-table processing */ - -/* -** Initialize a preallocated WhereClause structure. -*/ -static void whereClauseInit( - WhereClause *pWC, /* The WhereClause to be initialized */ - Parse *pParse, /* The parsing context */ - ExprMaskSet *pMaskSet /* Mapping from table indices to bitmasks */ -){ - pWC->pParse = pParse; - pWC->pMaskSet = pMaskSet; - pWC->nTerm = 0; - pWC->nSlot = ArraySize(pWC->aStatic); - pWC->a = pWC->aStatic; -} - -/* -** Deallocate a WhereClause structure. The WhereClause structure -** itself is not freed. This routine is the inverse of whereClauseInit(). -*/ -static void whereClauseClear(WhereClause *pWC){ - int i; - WhereTerm *a; - for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ - if( a->flags & TERM_DYNAMIC ){ - sqlite3ExprDelete(a->pExpr); - } - } - if( pWC->a!=pWC->aStatic ){ - sqliteFree(pWC->a); - } -} - -/* -** Add a new entries to the WhereClause structure. Increase the allocated -** space as necessary. -** -** If the flags argument includes TERM_DYNAMIC, then responsibility -** for freeing the expression p is assumed by the WhereClause object. -** -** WARNING: This routine might reallocate the space used to store -** WhereTerms. All pointers to WhereTerms should be invalided after -** calling this routine. Such pointers may be reinitialized by referencing -** the pWC->a[] array. -*/ -static int whereClauseInsert(WhereClause *pWC, Expr *p, int flags){ - WhereTerm *pTerm; - int idx; - if( pWC->nTerm>=pWC->nSlot ){ - WhereTerm *pOld = pWC->a; - pWC->a = sqliteMalloc( sizeof(pWC->a[0])*pWC->nSlot*2 ); - if( pWC->a==0 ){ - if( flags & TERM_DYNAMIC ){ - sqlite3ExprDelete(p); - } - return 0; - } - memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm); - if( pOld!=pWC->aStatic ){ - sqliteFree(pOld); - } - pWC->nSlot *= 2; - } - pTerm = &pWC->a[idx = pWC->nTerm]; - pWC->nTerm++; - pTerm->pExpr = p; - pTerm->flags = flags; - pTerm->pWC = pWC; - pTerm->iParent = -1; - return idx; -} - -/* -** This routine identifies subexpressions in the WHERE clause where -** each subexpression is separated by the AND operator or some other -** operator specified in the op parameter. The WhereClause structure -** is filled with pointers to subexpressions. For example: -** -** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22) -** \________/ \_______________/ \________________/ -** slot[0] slot[1] slot[2] -** -** The original WHERE clause in pExpr is unaltered. All this routine -** does is make slot[] entries point to substructure within pExpr. -** -** In the previous sentence and in the diagram, "slot[]" refers to -** the WhereClause.a[] array. This array grows as needed to contain -** all terms of the WHERE clause. -*/ -static void whereSplit(WhereClause *pWC, Expr *pExpr, int op){ - if( pExpr==0 ) return; - if( pExpr->op!=op ){ - whereClauseInsert(pWC, pExpr, 0); - }else{ - whereSplit(pWC, pExpr->pLeft, op); - whereSplit(pWC, pExpr->pRight, op); - } -} - -/* -** Initialize an expression mask set -*/ -#define initMaskSet(P) memset(P, 0, sizeof(*P)) - -/* -** Return the bitmask for the given cursor number. Return 0 if -** iCursor is not in the set. -*/ -static Bitmask getMask(ExprMaskSet *pMaskSet, int iCursor){ - int i; - for(i=0; in; i++){ - if( pMaskSet->ix[i]==iCursor ){ - return ((Bitmask)1)<ix[] -** array will never overflow. -*/ -static void createMask(ExprMaskSet *pMaskSet, int iCursor){ - assert( pMaskSet->n < ArraySize(pMaskSet->ix) ); - pMaskSet->ix[pMaskSet->n++] = iCursor; -} - -/* -** This routine walks (recursively) an expression tree and generates -** a bitmask indicating which tables are used in that expression -** tree. -** -** In order for this routine to work, the calling function must have -** previously invoked sqlite3ExprResolveNames() on the expression. See -** the header comment on that routine for additional information. -** The sqlite3ExprResolveNames() routines looks for column names and -** sets their opcodes to TK_COLUMN and their Expr.iTable fields to -** the VDBE cursor number of the table. This routine just has to -** translate the cursor numbers into bitmask values and OR all -** the bitmasks together. -*/ -static Bitmask exprListTableUsage(ExprMaskSet*, ExprList*); -static Bitmask exprSelectTableUsage(ExprMaskSet*, Select*); -static Bitmask exprTableUsage(ExprMaskSet *pMaskSet, Expr *p){ - Bitmask mask = 0; - if( p==0 ) return 0; - if( p->op==TK_COLUMN ){ - mask = getMask(pMaskSet, p->iTable); - return mask; - } - mask = exprTableUsage(pMaskSet, p->pRight); - mask |= exprTableUsage(pMaskSet, p->pLeft); - mask |= exprListTableUsage(pMaskSet, p->pList); - mask |= exprSelectTableUsage(pMaskSet, p->pSelect); - return mask; -} -static Bitmask exprListTableUsage(ExprMaskSet *pMaskSet, ExprList *pList){ - int i; - Bitmask mask = 0; - if( pList ){ - for(i=0; inExpr; i++){ - mask |= exprTableUsage(pMaskSet, pList->a[i].pExpr); - } - } - return mask; -} -static Bitmask exprSelectTableUsage(ExprMaskSet *pMaskSet, Select *pS){ - Bitmask mask; - if( pS==0 ){ - mask = 0; - }else{ - mask = exprListTableUsage(pMaskSet, pS->pEList); - mask |= exprListTableUsage(pMaskSet, pS->pGroupBy); - mask |= exprListTableUsage(pMaskSet, pS->pOrderBy); - mask |= exprTableUsage(pMaskSet, pS->pWhere); - mask |= exprTableUsage(pMaskSet, pS->pHaving); - } - return mask; -} - -/* -** Return TRUE if the given operator is one of the operators that is -** allowed for an indexable WHERE clause term. The allowed operators are -** "=", "<", ">", "<=", ">=", and "IN". -*/ -static int allowedOp(int op){ - assert( TK_GT>TK_EQ && TK_GTTK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL; -} - -/* -** Swap two objects of type T. -*/ -#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} - -/* -** Commute a comparision operator. Expressions of the form "X op Y" -** are converted into "Y op X". -** -** If a collation sequence is associated with either the left or right -** side of the comparison, it remains associated with the same side after -** the commutation. So "Y collate NOCASE op X" becomes -** "X collate NOCASE op Y". This is because any collation sequence on -** the left hand side of a comparison overrides any collation sequence -** attached to the right. For the same reason the EP_ExpCollate flag -** is not commuted. -*/ -static void exprCommute(Expr *pExpr){ - u16 expRight = (pExpr->pRight->flags & EP_ExpCollate); - u16 expLeft = (pExpr->pLeft->flags & EP_ExpCollate); - assert( allowedOp(pExpr->op) && pExpr->op!=TK_IN ); - SWAP(CollSeq*,pExpr->pRight->pColl,pExpr->pLeft->pColl); - pExpr->pRight->flags = (pExpr->pRight->flags & ~EP_ExpCollate) | expLeft; - pExpr->pLeft->flags = (pExpr->pLeft->flags & ~EP_ExpCollate) | expRight; - SWAP(Expr*,pExpr->pRight,pExpr->pLeft); - if( pExpr->op>=TK_GT ){ - assert( TK_LT==TK_GT+2 ); - assert( TK_GE==TK_LE+2 ); - assert( TK_GT>TK_EQ ); - assert( TK_GTop>=TK_GT && pExpr->op<=TK_GE ); - pExpr->op = ((pExpr->op-TK_GT)^2)+TK_GT; - } -} - -/* -** Translate from TK_xx operator to WO_xx bitmask. -*/ -static int operatorMask(int op){ - int c; - assert( allowedOp(op) ); - if( op==TK_IN ){ - c = WO_IN; - }else if( op==TK_ISNULL ){ - c = WO_ISNULL; - }else{ - c = WO_EQ<<(op-TK_EQ); - } - assert( op!=TK_ISNULL || c==WO_ISNULL ); - assert( op!=TK_IN || c==WO_IN ); - assert( op!=TK_EQ || c==WO_EQ ); - assert( op!=TK_LT || c==WO_LT ); - assert( op!=TK_LE || c==WO_LE ); - assert( op!=TK_GT || c==WO_GT ); - assert( op!=TK_GE || c==WO_GE ); - return c; -} - -/* -** Search for a term in the WHERE clause that is of the form "X " -** where X is a reference to the iColumn of table iCur and is one of -** the WO_xx operator codes specified by the op parameter. -** Return a pointer to the term. Return 0 if not found. -*/ -static WhereTerm *findTerm( - WhereClause *pWC, /* The WHERE clause to be searched */ - int iCur, /* Cursor number of LHS */ - int iColumn, /* Column number of LHS */ - Bitmask notReady, /* RHS must not overlap with this mask */ - u16 op, /* Mask of WO_xx values describing operator */ - Index *pIdx /* Must be compatible with this index, if not NULL */ -){ - WhereTerm *pTerm; - int k; - for(pTerm=pWC->a, k=pWC->nTerm; k; k--, pTerm++){ - if( pTerm->leftCursor==iCur - && (pTerm->prereqRight & notReady)==0 - && pTerm->leftColumn==iColumn - && (pTerm->eOperator & op)!=0 - ){ - if( iCur>=0 && pIdx && pTerm->eOperator!=WO_ISNULL ){ - Expr *pX = pTerm->pExpr; - CollSeq *pColl; - char idxaff; - int j; - Parse *pParse = pWC->pParse; - - idxaff = pIdx->pTable->aCol[iColumn].affinity; - if( !sqlite3IndexAffinityOk(pX, idxaff) ) continue; - - /* Figure out the collation sequence required from an index for - ** it to be useful for optimising expression pX. Store this - ** value in variable pColl. - */ - assert(pX->pLeft); - pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); - if( !pColl ){ - pColl = pParse->db->pDfltColl; - } - - for(j=0; jnColumn && pIdx->aiColumn[j]!=iColumn; j++){} - assert( jnColumn ); - if( sqlite3StrICmp(pColl->zName, pIdx->azColl[j]) ) continue; - } - return pTerm; - } - } - return 0; -} - -/* Forward reference */ -static void exprAnalyze(SrcList*, WhereClause*, int); - -/* -** Call exprAnalyze on all terms in a WHERE clause. -** -** -*/ -static void exprAnalyzeAll( - SrcList *pTabList, /* the FROM clause */ - WhereClause *pWC /* the WHERE clause to be analyzed */ -){ - int i; - for(i=pWC->nTerm-1; i>=0; i--){ - exprAnalyze(pTabList, pWC, i); - } -} - -#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION -/* -** Check to see if the given expression is a LIKE or GLOB operator that -** can be optimized using inequality constraints. Return TRUE if it is -** so and false if not. -** -** In order for the operator to be optimizible, the RHS must be a string -** literal that does not begin with a wildcard. -*/ -static int isLikeOrGlob( - sqlite3 *db, /* The database */ - Expr *pExpr, /* Test this expression */ - int *pnPattern, /* Number of non-wildcard prefix characters */ - int *pisComplete /* True if the only wildcard is % in the last character */ -){ - const char *z; - Expr *pRight, *pLeft; - ExprList *pList; - int c, cnt; - int noCase; - char wc[3]; - CollSeq *pColl; - - if( !sqlite3IsLikeFunction(db, pExpr, &noCase, wc) ){ - return 0; - } - pList = pExpr->pList; - pRight = pList->a[0].pExpr; - if( pRight->op!=TK_STRING ){ - return 0; - } - pLeft = pList->a[1].pExpr; - if( pLeft->op!=TK_COLUMN ){ - return 0; - } - pColl = pLeft->pColl; - if( pColl==0 ){ - /* TODO: Coverage testing doesn't get this case. Is it actually possible - ** for an expression of type TK_COLUMN to not have an assigned collation - ** sequence at this point? - */ - pColl = db->pDfltColl; - } - if( (pColl->type!=SQLITE_COLL_BINARY || noCase) && - (pColl->type!=SQLITE_COLL_NOCASE || !noCase) ){ - return 0; - } - sqlite3DequoteExpr(pRight); - z = (char *)pRight->token.z; - for(cnt=0; (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2]; cnt++){} - if( cnt==0 || 255==(u8)z[cnt] ){ - return 0; - } - *pisComplete = z[cnt]==wc[0] && z[cnt+1]==0; - *pnPattern = cnt; - return 1; -} -#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ - - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Check to see if the given expression is of the form -** -** column MATCH expr -** -** If it is then return TRUE. If not, return FALSE. -*/ -static int isMatchOfColumn( - Expr *pExpr /* Test this expression */ -){ - ExprList *pList; - - if( pExpr->op!=TK_FUNCTION ){ - return 0; - } - if( pExpr->token.n!=5 || - sqlite3StrNICmp((const char*)pExpr->token.z,"match",5)!=0 ){ - return 0; - } - pList = pExpr->pList; - if( pList->nExpr!=2 ){ - return 0; - } - if( pList->a[1].pExpr->op != TK_COLUMN ){ - return 0; - } - return 1; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** If the pBase expression originated in the ON or USING clause of -** a join, then transfer the appropriate markings over to derived. -*/ -static void transferJoinMarkings(Expr *pDerived, Expr *pBase){ - pDerived->flags |= pBase->flags & EP_FromJoin; - pDerived->iRightJoinTable = pBase->iRightJoinTable; -} - -#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) -/* -** Return TRUE if the given term of an OR clause can be converted -** into an IN clause. The iCursor and iColumn define the left-hand -** side of the IN clause. -** -** The context is that we have multiple OR-connected equality terms -** like this: -** -** a= OR a= OR b= OR ... -** -** The pOrTerm input to this routine corresponds to a single term of -** this OR clause. In order for the term to be a condidate for -** conversion to an IN operator, the following must be true: -** -** * The left-hand side of the term must be the column which -** is identified by iCursor and iColumn. -** -** * If the right-hand side is also a column, then the affinities -** of both right and left sides must be such that no type -** conversions are required on the right. (Ticket #2249) -** -** If both of these conditions are true, then return true. Otherwise -** return false. -*/ -static int orTermIsOptCandidate(WhereTerm *pOrTerm, int iCursor, int iColumn){ - int affLeft, affRight; - assert( pOrTerm->eOperator==WO_EQ ); - if( pOrTerm->leftCursor!=iCursor ){ - return 0; - } - if( pOrTerm->leftColumn!=iColumn ){ - return 0; - } - affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); - if( affRight==0 ){ - return 1; - } - affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); - if( affRight!=affLeft ){ - return 0; - } - return 1; -} - -/* -** Return true if the given term of an OR clause can be ignored during -** a check to make sure all OR terms are candidates for optimization. -** In other words, return true if a call to the orTermIsOptCandidate() -** above returned false but it is not necessary to disqualify the -** optimization. -** -** Suppose the original OR phrase was this: -** -** a=4 OR a=11 OR a=b -** -** During analysis, the third term gets flipped around and duplicate -** so that we are left with this: -** -** a=4 OR a=11 OR a=b OR b=a -** -** Since the last two terms are duplicates, only one of them -** has to qualify in order for the whole phrase to qualify. When -** this routine is called, we know that pOrTerm did not qualify. -** This routine merely checks to see if pOrTerm has a duplicate that -** might qualify. If there is a duplicate that has not yet been -** disqualified, then return true. If there are no duplicates, or -** the duplicate has also been disqualifed, return false. -*/ -static int orTermHasOkDuplicate(WhereClause *pOr, WhereTerm *pOrTerm){ - if( pOrTerm->flags & TERM_COPIED ){ - /* This is the original term. The duplicate is to the left had - ** has not yet been analyzed and thus has not yet been disqualified. */ - return 1; - } - if( (pOrTerm->flags & TERM_VIRTUAL)!=0 - && (pOr->a[pOrTerm->iParent].flags & TERM_OR_OK)!=0 ){ - /* This is a duplicate term. The original qualified so this one - ** does not have to. */ - return 1; - } - /* This is either a singleton term or else it is a duplicate for - ** which the original did not qualify. Either way we are done for. */ - return 0; -} -#endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */ - -/* -** The input to this routine is an WhereTerm structure with only the -** "pExpr" field filled in. The job of this routine is to analyze the -** subexpression and populate all the other fields of the WhereTerm -** structure. -** -** If the expression is of the form " X" it gets commuted -** to the standard form of "X ". If the expression is of -** the form "X Y" where both X and Y are columns, then the original -** expression is unchanged and a new virtual expression of the form -** "Y X" is added to the WHERE clause and analyzed separately. -*/ -static void exprAnalyze( - SrcList *pSrc, /* the FROM clause */ - WhereClause *pWC, /* the WHERE clause */ - int idxTerm /* Index of the term to be analyzed */ -){ - WhereTerm *pTerm = &pWC->a[idxTerm]; - ExprMaskSet *pMaskSet = pWC->pMaskSet; - Expr *pExpr = pTerm->pExpr; - Bitmask prereqLeft; - Bitmask prereqAll; - int nPattern; - int isComplete; - int op; - - if( sqlite3MallocFailed() ) return; - prereqLeft = exprTableUsage(pMaskSet, pExpr->pLeft); - op = pExpr->op; - if( op==TK_IN ){ - assert( pExpr->pRight==0 ); - pTerm->prereqRight = exprListTableUsage(pMaskSet, pExpr->pList) - | exprSelectTableUsage(pMaskSet, pExpr->pSelect); - }else if( op==TK_ISNULL ){ - pTerm->prereqRight = 0; - }else{ - pTerm->prereqRight = exprTableUsage(pMaskSet, pExpr->pRight); - } - prereqAll = exprTableUsage(pMaskSet, pExpr); - if( ExprHasProperty(pExpr, EP_FromJoin) ){ - prereqAll |= getMask(pMaskSet, pExpr->iRightJoinTable); - } - pTerm->prereqAll = prereqAll; - pTerm->leftCursor = -1; - pTerm->iParent = -1; - pTerm->eOperator = 0; - if( allowedOp(op) && (pTerm->prereqRight & prereqLeft)==0 ){ - Expr *pLeft = pExpr->pLeft; - Expr *pRight = pExpr->pRight; - if( pLeft->op==TK_COLUMN ){ - pTerm->leftCursor = pLeft->iTable; - pTerm->leftColumn = pLeft->iColumn; - pTerm->eOperator = operatorMask(op); - } - if( pRight && pRight->op==TK_COLUMN ){ - WhereTerm *pNew; - Expr *pDup; - if( pTerm->leftCursor>=0 ){ - int idxNew; - pDup = sqlite3ExprDup(pExpr); - if( sqlite3MallocFailed() ){ - sqlite3ExprDelete(pDup); - return; - } - idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); - if( idxNew==0 ) return; - pNew = &pWC->a[idxNew]; - pNew->iParent = idxTerm; - pTerm = &pWC->a[idxTerm]; - pTerm->nChild = 1; - pTerm->flags |= TERM_COPIED; - }else{ - pDup = pExpr; - pNew = pTerm; - } - exprCommute(pDup); - pLeft = pDup->pLeft; - pNew->leftCursor = pLeft->iTable; - pNew->leftColumn = pLeft->iColumn; - pNew->prereqRight = prereqLeft; - pNew->prereqAll = prereqAll; - pNew->eOperator = operatorMask(pDup->op); - } - } - -#ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION - /* If a term is the BETWEEN operator, create two new virtual terms - ** that define the range that the BETWEEN implements. - */ - else if( pExpr->op==TK_BETWEEN ){ - ExprList *pList = pExpr->pList; - int i; - static const u8 ops[] = {TK_GE, TK_LE}; - assert( pList!=0 ); - assert( pList->nExpr==2 ); - for(i=0; i<2; i++){ - Expr *pNewExpr; - int idxNew; - pNewExpr = sqlite3Expr(ops[i], sqlite3ExprDup(pExpr->pLeft), - sqlite3ExprDup(pList->a[i].pExpr), 0); - idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew); - pTerm = &pWC->a[idxTerm]; - pWC->a[idxNew].iParent = idxTerm; - } - pTerm->nChild = 2; - } -#endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */ - -#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) - /* Attempt to convert OR-connected terms into an IN operator so that - ** they can make use of indices. Example: - ** - ** x = expr1 OR expr2 = x OR x = expr3 - ** - ** is converted into - ** - ** x IN (expr1,expr2,expr3) - ** - ** This optimization must be omitted if OMIT_SUBQUERY is defined because - ** the compiler for the the IN operator is part of sub-queries. - */ - else if( pExpr->op==TK_OR ){ - int ok; - int i, j; - int iColumn, iCursor; - WhereClause sOr; - WhereTerm *pOrTerm; - - assert( (pTerm->flags & TERM_DYNAMIC)==0 ); - whereClauseInit(&sOr, pWC->pParse, pMaskSet); - whereSplit(&sOr, pExpr, TK_OR); - exprAnalyzeAll(pSrc, &sOr); - assert( sOr.nTerm>=2 ); - j = 0; - do{ - assert( j=0; - for(i=sOr.nTerm-1, pOrTerm=sOr.a; i>=0 && ok; i--, pOrTerm++){ - if( pOrTerm->eOperator!=WO_EQ ){ - goto or_not_possible; - } - if( orTermIsOptCandidate(pOrTerm, iCursor, iColumn) ){ - pOrTerm->flags |= TERM_OR_OK; - }else if( orTermHasOkDuplicate(&sOr, pOrTerm) ){ - pOrTerm->flags &= ~TERM_OR_OK; - }else{ - ok = 0; - } - } - }while( !ok && (sOr.a[j++].flags & TERM_COPIED)!=0 && j<2 ); - if( ok ){ - ExprList *pList = 0; - Expr *pNew, *pDup; - Expr *pLeft = 0; - for(i=sOr.nTerm-1, pOrTerm=sOr.a; i>=0 && ok; i--, pOrTerm++){ - if( (pOrTerm->flags & TERM_OR_OK)==0 ) continue; - pDup = sqlite3ExprDup(pOrTerm->pExpr->pRight); - pList = sqlite3ExprListAppend(pList, pDup, 0); - pLeft = pOrTerm->pExpr->pLeft; - } - assert( pLeft!=0 ); - pDup = sqlite3ExprDup(pLeft); - pNew = sqlite3Expr(TK_IN, pDup, 0, 0); - if( pNew ){ - int idxNew; - transferJoinMarkings(pNew, pExpr); - pNew->pList = pList; - idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew); - pTerm = &pWC->a[idxTerm]; - pWC->a[idxNew].iParent = idxTerm; - pTerm->nChild = 1; - }else{ - sqlite3ExprListDelete(pList); - } - } -or_not_possible: - whereClauseClear(&sOr); - } -#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ - -#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION - /* Add constraints to reduce the search space on a LIKE or GLOB - ** operator. - */ - if( isLikeOrGlob(pWC->pParse->db, pExpr, &nPattern, &isComplete) ){ - Expr *pLeft, *pRight; - Expr *pStr1, *pStr2; - Expr *pNewExpr1, *pNewExpr2; - int idxNew1, idxNew2; - - pLeft = pExpr->pList->a[1].pExpr; - pRight = pExpr->pList->a[0].pExpr; - pStr1 = sqlite3Expr(TK_STRING, 0, 0, 0); - if( pStr1 ){ - sqlite3TokenCopy(&pStr1->token, &pRight->token); - pStr1->token.n = nPattern; - pStr1->flags = EP_Dequoted; - } - pStr2 = sqlite3ExprDup(pStr1); - if( pStr2 ){ - assert( pStr2->token.dyn ); - ++*(u8*)&pStr2->token.z[nPattern-1]; - } - pNewExpr1 = sqlite3Expr(TK_GE, sqlite3ExprDup(pLeft), pStr1, 0); - idxNew1 = whereClauseInsert(pWC, pNewExpr1, TERM_VIRTUAL|TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew1); - pNewExpr2 = sqlite3Expr(TK_LT, sqlite3ExprDup(pLeft), pStr2, 0); - idxNew2 = whereClauseInsert(pWC, pNewExpr2, TERM_VIRTUAL|TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew2); - pTerm = &pWC->a[idxTerm]; - if( isComplete ){ - pWC->a[idxNew1].iParent = idxTerm; - pWC->a[idxNew2].iParent = idxTerm; - pTerm->nChild = 2; - } - } -#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE - /* Add a WO_MATCH auxiliary term to the constraint set if the - ** current expression is of the form: column MATCH expr. - ** This information is used by the xBestIndex methods of - ** virtual tables. The native query optimizer does not attempt - ** to do anything with MATCH functions. - */ - if( isMatchOfColumn(pExpr) ){ - int idxNew; - Expr *pRight, *pLeft; - WhereTerm *pNewTerm; - Bitmask prereqColumn, prereqExpr; - - pRight = pExpr->pList->a[0].pExpr; - pLeft = pExpr->pList->a[1].pExpr; - prereqExpr = exprTableUsage(pMaskSet, pRight); - prereqColumn = exprTableUsage(pMaskSet, pLeft); - if( (prereqExpr & prereqColumn)==0 ){ - Expr *pNewExpr; - pNewExpr = sqlite3Expr(TK_MATCH, 0, sqlite3ExprDup(pRight), 0); - idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); - pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = prereqExpr; - pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->leftColumn = pLeft->iColumn; - pNewTerm->eOperator = WO_MATCH; - pNewTerm->iParent = idxTerm; - pTerm = &pWC->a[idxTerm]; - pTerm->nChild = 1; - pTerm->flags |= TERM_COPIED; - pNewTerm->prereqAll = pTerm->prereqAll; - } - } -#endif /* SQLITE_OMIT_VIRTUALTABLE */ -} - -/* -** Return TRUE if any of the expressions in pList->a[iFirst...] contain -** a reference to any table other than the iBase table. -*/ -static int referencesOtherTables( - ExprList *pList, /* Search expressions in ths list */ - ExprMaskSet *pMaskSet, /* Mapping from tables to bitmaps */ - int iFirst, /* Be searching with the iFirst-th expression */ - int iBase /* Ignore references to this table */ -){ - Bitmask allowed = ~getMask(pMaskSet, iBase); - while( iFirstnExpr ){ - if( (exprTableUsage(pMaskSet, pList->a[iFirst++].pExpr)&allowed)!=0 ){ - return 1; - } - } - return 0; -} - - -/* -** This routine decides if pIdx can be used to satisfy the ORDER BY -** clause. If it can, it returns 1. If pIdx cannot satisfy the -** ORDER BY clause, this routine returns 0. -** -** pOrderBy is an ORDER BY clause from a SELECT statement. pTab is the -** left-most table in the FROM clause of that same SELECT statement and -** the table has a cursor number of "base". pIdx is an index on pTab. -** -** nEqCol is the number of columns of pIdx that are used as equality -** constraints. Any of these columns may be missing from the ORDER BY -** clause and the match can still be a success. -** -** All terms of the ORDER BY that match against the index must be either -** ASC or DESC. (Terms of the ORDER BY clause past the end of a UNIQUE -** index do not need to satisfy this constraint.) The *pbRev value is -** set to 1 if the ORDER BY clause is all DESC and it is set to 0 if -** the ORDER BY clause is all ASC. -*/ -static int isSortingIndex( - Parse *pParse, /* Parsing context */ - ExprMaskSet *pMaskSet, /* Mapping from table indices to bitmaps */ - Index *pIdx, /* The index we are testing */ - int base, /* Cursor number for the table to be sorted */ - ExprList *pOrderBy, /* The ORDER BY clause */ - int nEqCol, /* Number of index columns with == constraints */ - int *pbRev /* Set to 1 if ORDER BY is DESC */ -){ - int i, j; /* Loop counters */ - int sortOrder = 0; /* XOR of index and ORDER BY sort direction */ - int nTerm; /* Number of ORDER BY terms */ - struct ExprList_item *pTerm; /* A term of the ORDER BY clause */ - sqlite3 *db = pParse->db; - - assert( pOrderBy!=0 ); - nTerm = pOrderBy->nExpr; - assert( nTerm>0 ); - - /* Match terms of the ORDER BY clause against columns of - ** the index. - ** - ** Note that indices have pIdx->nColumn regular columns plus - ** one additional column containing the rowid. The rowid column - ** of the index is also allowed to match against the ORDER BY - ** clause. - */ - for(i=j=0, pTerm=pOrderBy->a; jnColumn; i++){ - Expr *pExpr; /* The expression of the ORDER BY pTerm */ - CollSeq *pColl; /* The collating sequence of pExpr */ - int termSortOrder; /* Sort order for this term */ - int iColumn; /* The i-th column of the index. -1 for rowid */ - int iSortOrder; /* 1 for DESC, 0 for ASC on the i-th index term */ - const char *zColl; /* Name of the collating sequence for i-th index term */ - - pExpr = pTerm->pExpr; - if( pExpr->op!=TK_COLUMN || pExpr->iTable!=base ){ - /* Can not use an index sort on anything that is not a column in the - ** left-most table of the FROM clause */ - break; - } - pColl = sqlite3ExprCollSeq(pParse, pExpr); - if( !pColl ){ - pColl = db->pDfltColl; - } - if( inColumn ){ - iColumn = pIdx->aiColumn[i]; - if( iColumn==pIdx->pTable->iPKey ){ - iColumn = -1; - } - iSortOrder = pIdx->aSortOrder[i]; - zColl = pIdx->azColl[i]; - }else{ - iColumn = -1; - iSortOrder = 0; - zColl = pColl->zName; - } - if( pExpr->iColumn!=iColumn || sqlite3StrICmp(pColl->zName, zColl) ){ - /* Term j of the ORDER BY clause does not match column i of the index */ - if( iaSortOrder!=0 ); - assert( pTerm->sortOrder==0 || pTerm->sortOrder==1 ); - assert( iSortOrder==0 || iSortOrder==1 ); - termSortOrder = iSortOrder ^ pTerm->sortOrder; - if( i>nEqCol ){ - if( termSortOrder!=sortOrder ){ - /* Indices can only be used if all ORDER BY terms past the - ** equality constraints are all either DESC or ASC. */ - return 0; - } - }else{ - sortOrder = termSortOrder; - } - j++; - pTerm++; - if( iColumn<0 && !referencesOtherTables(pOrderBy, pMaskSet, j, base) ){ - /* If the indexed column is the primary key and everything matches - ** so far and none of the ORDER BY terms to the right reference other - ** tables in the join, then we are assured that the index can be used - ** to sort because the primary key is unique and so none of the other - ** columns will make any difference - */ - j = nTerm; - } - } - - *pbRev = sortOrder!=0; - if( j>=nTerm ){ - /* All terms of the ORDER BY clause are covered by this index so - ** this index can be used for sorting. */ - return 1; - } - if( pIdx->onError!=OE_None && i==pIdx->nColumn - && !referencesOtherTables(pOrderBy, pMaskSet, j, base) ){ - /* All terms of this index match some prefix of the ORDER BY clause - ** and the index is UNIQUE and no terms on the tail of the ORDER BY - ** clause reference other tables in a join. If this is all true then - ** the order by clause is superfluous. */ - return 1; - } - return 0; -} - -/* -** Check table to see if the ORDER BY clause in pOrderBy can be satisfied -** by sorting in order of ROWID. Return true if so and set *pbRev to be -** true for reverse ROWID and false for forward ROWID order. -*/ -static int sortableByRowid( - int base, /* Cursor number for table to be sorted */ - ExprList *pOrderBy, /* The ORDER BY clause */ - ExprMaskSet *pMaskSet, /* Mapping from tables to bitmaps */ - int *pbRev /* Set to 1 if ORDER BY is DESC */ -){ - Expr *p; - - assert( pOrderBy!=0 ); - assert( pOrderBy->nExpr>0 ); - p = pOrderBy->a[0].pExpr; - if( p->op==TK_COLUMN && p->iTable==base && p->iColumn==-1 - && !referencesOtherTables(pOrderBy, pMaskSet, 1, base) ){ - *pbRev = pOrderBy->a[0].sortOrder; - return 1; - } - return 0; -} - -/* -** Prepare a crude estimate of the logarithm of the input value. -** The results need not be exact. This is only used for estimating -** the total cost of performing operatings with O(logN) or O(NlogN) -** complexity. Because N is just a guess, it is no great tragedy if -** logN is a little off. -*/ -static double estLog(double N){ - double logN = 1; - double x = 10; - while( N>x ){ - logN += 1; - x *= 10; - } - return logN; -} - -/* -** Two routines for printing the content of an sqlite3_index_info -** structure. Used for testing and debugging only. If neither -** SQLITE_TEST or SQLITE_DEBUG are defined, then these routines -** are no-ops. -*/ -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_DEBUG) -static void TRACE_IDX_INPUTS(sqlite3_index_info *p){ - int i; - if( !sqlite3_where_trace ) return; - for(i=0; inConstraint; i++){ - sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", - i, - p->aConstraint[i].iColumn, - p->aConstraint[i].iTermOffset, - p->aConstraint[i].op, - p->aConstraint[i].usable); - } - for(i=0; inOrderBy; i++){ - sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n", - i, - p->aOrderBy[i].iColumn, - p->aOrderBy[i].desc); - } -} -static void TRACE_IDX_OUTPUTS(sqlite3_index_info *p){ - int i; - if( !sqlite3_where_trace ) return; - for(i=0; inConstraint; i++){ - sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", - i, - p->aConstraintUsage[i].argvIndex, - p->aConstraintUsage[i].omit); - } - sqlite3DebugPrintf(" idxNum=%d\n", p->idxNum); - sqlite3DebugPrintf(" idxStr=%s\n", p->idxStr); - sqlite3DebugPrintf(" orderByConsumed=%d\n", p->orderByConsumed); - sqlite3DebugPrintf(" estimatedCost=%g\n", p->estimatedCost); -} -#else -#define TRACE_IDX_INPUTS(A) -#define TRACE_IDX_OUTPUTS(A) -#endif - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Compute the best index for a virtual table. -** -** The best index is computed by the xBestIndex method of the virtual -** table module. This routine is really just a wrapper that sets up -** the sqlite3_index_info structure that is used to communicate with -** xBestIndex. -** -** In a join, this routine might be called multiple times for the -** same virtual table. The sqlite3_index_info structure is created -** and initialized on the first invocation and reused on all subsequent -** invocations. The sqlite3_index_info structure is also used when -** code is generated to access the virtual table. The whereInfoDelete() -** routine takes care of freeing the sqlite3_index_info structure after -** everybody has finished with it. -*/ -static double bestVirtualIndex( - Parse *pParse, /* The parsing context */ - WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to search */ - Bitmask notReady, /* Mask of cursors that are not available */ - ExprList *pOrderBy, /* The order by clause */ - int orderByUsable, /* True if we can potential sort */ - sqlite3_index_info **ppIdxInfo /* Index information passed to xBestIndex */ -){ - Table *pTab = pSrc->pTab; - sqlite3_index_info *pIdxInfo; - struct sqlite3_index_constraint *pIdxCons; - struct sqlite3_index_orderby *pIdxOrderBy; - struct sqlite3_index_constraint_usage *pUsage; - WhereTerm *pTerm; - int i, j; - int nOrderBy; - int rc; - - /* If the sqlite3_index_info structure has not been previously - ** allocated and initialized for this virtual table, then allocate - ** and initialize it now - */ - pIdxInfo = *ppIdxInfo; - if( pIdxInfo==0 ){ - WhereTerm *pTerm; - int nTerm; - WHERETRACE(("Recomputing index info for %s...\n", pTab->zName)); - - /* Count the number of possible WHERE clause constraints referring - ** to this virtual table */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->eOperator==WO_IN ) continue; - nTerm++; - } - - /* If the ORDER BY clause contains only columns in the current - ** virtual table then allocate space for the aOrderBy part of - ** the sqlite3_index_info structure. - */ - nOrderBy = 0; - if( pOrderBy ){ - for(i=0; inExpr; i++){ - Expr *pExpr = pOrderBy->a[i].pExpr; - if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; - } - if( i==pOrderBy->nExpr ){ - nOrderBy = pOrderBy->nExpr; - } - } - - /* Allocate the sqlite3_index_info structure - */ - pIdxInfo = sqliteMalloc( sizeof(*pIdxInfo) - + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy ); - if( pIdxInfo==0 ){ - sqlite3ErrorMsg(pParse, "out of memory"); - return 0.0; - } - *ppIdxInfo = pIdxInfo; - - /* Initialize the structure. The sqlite3_index_info structure contains - ** many fields that are declared "const" to prevent xBestIndex from - ** changing them. We have to do some funky casting in order to - ** initialize those fields. - */ - pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; - pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; - pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; - *(int*)&pIdxInfo->nConstraint = nTerm; - *(int*)&pIdxInfo->nOrderBy = nOrderBy; - *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; - *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; - *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = - pUsage; - - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->eOperator==WO_IN ) continue; - pIdxCons[j].iColumn = pTerm->leftColumn; - pIdxCons[j].iTermOffset = i; - pIdxCons[j].op = pTerm->eOperator; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); - assert( pTerm->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); - j++; - } - for(i=0; ia[i].pExpr; - pIdxOrderBy[i].iColumn = pExpr->iColumn; - pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; - } - } - - /* At this point, the sqlite3_index_info structure that pIdxInfo points - ** to will have been initialized, either during the current invocation or - ** during some prior invocation. Now we just have to customize the - ** details of pIdxInfo for the current invocation and pass it to - ** xBestIndex. - */ - - /* The module name must be defined. Also, by this point there must - ** be a pointer to an sqlite3_vtab structure. Otherwise - ** sqlite3ViewGetColumnNames() would have picked up the error. - */ - assert( pTab->azModuleArg && pTab->azModuleArg[0] ); - assert( pTab->pVtab ); -#if 0 - if( pTab->pVtab==0 ){ - sqlite3ErrorMsg(pParse, "undefined module %s for table %s", - pTab->azModuleArg[0], pTab->zName); - return 0.0; - } -#endif - - /* Set the aConstraint[].usable fields and initialize all - ** output variables to zero. - ** - ** aConstraint[].usable is true for constraints where the right-hand - ** side contains only references to tables to the left of the current - ** table. In other words, if the constraint is of the form: - ** - ** column = expr - ** - ** and we are evaluating a join, then the constraint on column is - ** only valid if all tables referenced in expr occur to the left - ** of the table containing column. - ** - ** The aConstraints[] array contains entries for all constraints - ** on the current table. That way we only have to compute it once - ** even though we might try to pick the best index multiple times. - ** For each attempt at picking an index, the order of tables in the - ** join might be different so we have to recompute the usable flag - ** each time. - */ - pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; - pUsage = pIdxInfo->aConstraintUsage; - for(i=0; inConstraint; i++, pIdxCons++){ - j = pIdxCons->iTermOffset; - pTerm = &pWC->a[j]; - pIdxCons->usable = (pTerm->prereqRight & notReady)==0; - } - memset(pUsage, 0, sizeof(pUsage[0])*pIdxInfo->nConstraint); - if( pIdxInfo->needToFreeIdxStr ){ - sqlite3_free(pIdxInfo->idxStr); - } - pIdxInfo->idxStr = 0; - pIdxInfo->idxNum = 0; - pIdxInfo->needToFreeIdxStr = 0; - pIdxInfo->orderByConsumed = 0; - pIdxInfo->estimatedCost = SQLITE_BIG_DBL / 2.0; - nOrderBy = pIdxInfo->nOrderBy; - if( pIdxInfo->nOrderBy && !orderByUsable ){ - *(int*)&pIdxInfo->nOrderBy = 0; - } - - sqlite3SafetyOff(pParse->db); - WHERETRACE(("xBestIndex for %s\n", pTab->zName)); - TRACE_IDX_INPUTS(pIdxInfo); - rc = pTab->pVtab->pModule->xBestIndex(pTab->pVtab, pIdxInfo); - TRACE_IDX_OUTPUTS(pIdxInfo); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); - }else { - sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); - } - sqlite3SafetyOn(pParse->db); - }else{ - rc = sqlite3SafetyOn(pParse->db); - } - *(int*)&pIdxInfo->nOrderBy = nOrderBy; - - return pIdxInfo->estimatedCost; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** Find the best index for accessing a particular table. Return a pointer -** to the index, flags that describe how the index should be used, the -** number of equality constraints, and the "cost" for this index. -** -** The lowest cost index wins. The cost is an estimate of the amount of -** CPU and disk I/O need to process the request using the selected index. -** Factors that influence cost include: -** -** * The estimated number of rows that will be retrieved. (The -** fewer the better.) -** -** * Whether or not sorting must occur. -** -** * Whether or not there must be separate lookups in the -** index and in the main table. -** -*/ -static double bestIndex( - Parse *pParse, /* The parsing context */ - WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to search */ - Bitmask notReady, /* Mask of cursors that are not available */ - ExprList *pOrderBy, /* The order by clause */ - Index **ppIndex, /* Make *ppIndex point to the best index */ - int *pFlags, /* Put flags describing this choice in *pFlags */ - int *pnEq /* Put the number of == or IN constraints here */ -){ - WhereTerm *pTerm; - Index *bestIdx = 0; /* Index that gives the lowest cost */ - double lowestCost; /* The cost of using bestIdx */ - int bestFlags = 0; /* Flags associated with bestIdx */ - int bestNEq = 0; /* Best value for nEq */ - int iCur = pSrc->iCursor; /* The cursor of the table to be accessed */ - Index *pProbe; /* An index we are evaluating */ - int rev; /* True to scan in reverse order */ - int flags; /* Flags associated with pProbe */ - int nEq; /* Number of == or IN constraints */ - int eqTermMask; /* Mask of valid equality operators */ - double cost; /* Cost of using pProbe */ - - WHERETRACE(("bestIndex: tbl=%s notReady=%x\n", pSrc->pTab->zName, notReady)); - lowestCost = SQLITE_BIG_DBL; - pProbe = pSrc->pTab->pIndex; - - /* If the table has no indices and there are no terms in the where - ** clause that refer to the ROWID, then we will never be able to do - ** anything other than a full table scan on this table. We might as - ** well put it first in the join order. That way, perhaps it can be - ** referenced by other tables in the join. - */ - if( pProbe==0 && - findTerm(pWC, iCur, -1, 0, WO_EQ|WO_IN|WO_LT|WO_LE|WO_GT|WO_GE,0)==0 && - (pOrderBy==0 || !sortableByRowid(iCur, pOrderBy, pWC->pMaskSet, &rev)) ){ - *pFlags = 0; - *ppIndex = 0; - *pnEq = 0; - return 0.0; - } - - /* Check for a rowid=EXPR or rowid IN (...) constraints - */ - pTerm = findTerm(pWC, iCur, -1, notReady, WO_EQ|WO_IN, 0); - if( pTerm ){ - Expr *pExpr; - *ppIndex = 0; - bestFlags = WHERE_ROWID_EQ; - if( pTerm->eOperator & WO_EQ ){ - /* Rowid== is always the best pick. Look no further. Because only - ** a single row is generated, output is always in sorted order */ - *pFlags = WHERE_ROWID_EQ | WHERE_UNIQUE; - *pnEq = 1; - WHERETRACE(("... best is rowid\n")); - return 0.0; - }else if( (pExpr = pTerm->pExpr)->pList!=0 ){ - /* Rowid IN (LIST): cost is NlogN where N is the number of list - ** elements. */ - lowestCost = pExpr->pList->nExpr; - lowestCost *= estLog(lowestCost); - }else{ - /* Rowid IN (SELECT): cost is NlogN where N is the number of rows - ** in the result of the inner select. We have no way to estimate - ** that value so make a wild guess. */ - lowestCost = 200; - } - WHERETRACE(("... rowid IN cost: %.9g\n", lowestCost)); - } - - /* Estimate the cost of a table scan. If we do not know how many - ** entries are in the table, use 1 million as a guess. - */ - cost = pProbe ? pProbe->aiRowEst[0] : 1000000; - WHERETRACE(("... table scan base cost: %.9g\n", cost)); - flags = WHERE_ROWID_RANGE; - - /* Check for constraints on a range of rowids in a table scan. - */ - pTerm = findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE|WO_GT|WO_GE, 0); - if( pTerm ){ - if( findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE, 0) ){ - flags |= WHERE_TOP_LIMIT; - cost /= 3; /* Guess that rowidEXPR eliminates two-thirds of rows */ - } - WHERETRACE(("... rowid range reduces cost to %.9g\n", cost)); - }else{ - flags = 0; - } - - /* If the table scan does not satisfy the ORDER BY clause, increase - ** the cost by NlogN to cover the expense of sorting. */ - if( pOrderBy ){ - if( sortableByRowid(iCur, pOrderBy, pWC->pMaskSet, &rev) ){ - flags |= WHERE_ORDERBY|WHERE_ROWID_RANGE; - if( rev ){ - flags |= WHERE_REVERSE; - } - }else{ - cost += cost*estLog(cost); - WHERETRACE(("... sorting increases cost to %.9g\n", cost)); - } - } - if( costjointype & JT_LEFT)!=0 ){ - eqTermMask = WO_EQ|WO_IN; - }else{ - eqTermMask = WO_EQ|WO_IN|WO_ISNULL; - } - - /* Look at each index. - */ - for(; pProbe; pProbe=pProbe->pNext){ - int i; /* Loop counter */ - double inMultiplier = 1; - - WHERETRACE(("... index %s:\n", pProbe->zName)); - - /* Count the number of columns in the index that are satisfied - ** by x=EXPR constraints or x IN (...) constraints. - */ - flags = 0; - for(i=0; inColumn; i++){ - int j = pProbe->aiColumn[i]; - pTerm = findTerm(pWC, iCur, j, notReady, eqTermMask, pProbe); - if( pTerm==0 ) break; - flags |= WHERE_COLUMN_EQ; - if( pTerm->eOperator & WO_IN ){ - Expr *pExpr = pTerm->pExpr; - flags |= WHERE_COLUMN_IN; - if( pExpr->pSelect!=0 ){ - inMultiplier *= 25; - }else if( pExpr->pList!=0 ){ - inMultiplier *= pExpr->pList->nExpr + 1; - } - } - } - cost = pProbe->aiRowEst[i] * inMultiplier * estLog(inMultiplier); - nEq = i; - if( pProbe->onError!=OE_None && (flags & WHERE_COLUMN_IN)==0 - && nEq==pProbe->nColumn ){ - flags |= WHERE_UNIQUE; - } - WHERETRACE(("...... nEq=%d inMult=%.9g cost=%.9g\n", nEq, inMultiplier, cost)); - - /* Look for range constraints - */ - if( nEqnColumn ){ - int j = pProbe->aiColumn[nEq]; - pTerm = findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE|WO_GT|WO_GE, pProbe); - if( pTerm ){ - flags |= WHERE_COLUMN_RANGE; - if( findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE, pProbe) ){ - flags |= WHERE_TOP_LIMIT; - cost /= 3; - } - if( findTerm(pWC, iCur, j, notReady, WO_GT|WO_GE, pProbe) ){ - flags |= WHERE_BTM_LIMIT; - cost /= 3; - } - WHERETRACE(("...... range reduces cost to %.9g\n", cost)); - } - } - - /* Add the additional cost of sorting if that is a factor. - */ - if( pOrderBy ){ - if( (flags & WHERE_COLUMN_IN)==0 && - isSortingIndex(pParse,pWC->pMaskSet,pProbe,iCur,pOrderBy,nEq,&rev) ){ - if( flags==0 ){ - flags = WHERE_COLUMN_RANGE; - } - flags |= WHERE_ORDERBY; - if( rev ){ - flags |= WHERE_REVERSE; - } - }else{ - cost += cost*estLog(cost); - WHERETRACE(("...... orderby increases cost to %.9g\n", cost)); - } - } - - /* Check to see if we can get away with using just the index without - ** ever reading the table. If that is the case, then halve the - ** cost of this index. - */ - if( flags && pSrc->colUsed < (((Bitmask)1)<<(BMS-1)) ){ - Bitmask m = pSrc->colUsed; - int j; - for(j=0; jnColumn; j++){ - int x = pProbe->aiColumn[j]; - if( xzName : "(none)", lowestCost, bestFlags, bestNEq)); - *pFlags = bestFlags | eqTermMask; - *pnEq = bestNEq; - return lowestCost; -} - - -/* -** Disable a term in the WHERE clause. Except, do not disable the term -** if it controls a LEFT OUTER JOIN and it did not originate in the ON -** or USING clause of that join. -** -** Consider the term t2.z='ok' in the following queries: -** -** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok' -** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok' -** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok' -** -** The t2.z='ok' is disabled in the in (2) because it originates -** in the ON clause. The term is disabled in (3) because it is not part -** of a LEFT OUTER JOIN. In (1), the term is not disabled. -** -** Disabling a term causes that term to not be tested in the inner loop -** of the join. Disabling is an optimization. When terms are satisfied -** by indices, we disable them to prevent redundant tests in the inner -** loop. We would get the correct results if nothing were ever disabled, -** but joins might run a little slower. The trick is to disable as much -** as we can without disabling too much. If we disabled in (1), we'd get -** the wrong answer. See ticket #813. -*/ -static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ - if( pTerm - && (pTerm->flags & TERM_CODED)==0 - && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) - ){ - pTerm->flags |= TERM_CODED; - if( pTerm->iParent>=0 ){ - WhereTerm *pOther = &pTerm->pWC->a[pTerm->iParent]; - if( (--pOther->nChild)==0 ){ - disableTerm(pLevel, pOther); - } - } - } -} - -/* -** Generate code that builds a probe for an index. -** -** There should be nColumn values on the stack. The index -** to be probed is pIdx. Pop the values from the stack and -** replace them all with a single record that is the index -** problem. -*/ -static void buildIndexProbe( - Vdbe *v, /* Generate code into this VM */ - int nColumn, /* The number of columns to check for NULL */ - Index *pIdx /* Index that we will be searching */ -){ - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - sqlite3IndexAffinityStr(v, pIdx); -} - - -/* -** Generate code for a single equality term of the WHERE clause. An equality -** term can be either X=expr or X IN (...). pTerm is the term to be -** coded. -** -** The current value for the constraint is left on the top of the stack. -** -** For a constraint of the form X=expr, the expression is evaluated and its -** result is left on the stack. For constraints of the form X IN (...) -** this routine sets up a loop that will iterate over all values of X. -*/ -static void codeEqualityTerm( - Parse *pParse, /* The parsing context */ - WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ - WhereLevel *pLevel /* When level of the FROM clause we are working on */ -){ - Expr *pX = pTerm->pExpr; - Vdbe *v = pParse->pVdbe; - if( pX->op==TK_EQ ){ - sqlite3ExprCode(pParse, pX->pRight); - }else if( pX->op==TK_ISNULL ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); -#ifndef SQLITE_OMIT_SUBQUERY - }else{ - int iTab; - struct InLoop *pIn; - - assert( pX->op==TK_IN ); - sqlite3CodeSubselect(pParse, pX); - iTab = pX->iTable; - sqlite3VdbeAddOp(v, OP_Rewind, iTab, 0); - VdbeComment((v, "# %.*s", pX->span.n, pX->span.z)); - if( pLevel->nIn==0 ){ - pLevel->nxt = sqlite3VdbeMakeLabel(v); - } - pLevel->nIn++; - pLevel->aInLoop = sqliteReallocOrFree(pLevel->aInLoop, - sizeof(pLevel->aInLoop[0])*pLevel->nIn); - pIn = pLevel->aInLoop; - if( pIn ){ - pIn += pLevel->nIn - 1; - pIn->iCur = iTab; - pIn->topAddr = sqlite3VdbeAddOp(v, OP_Column, iTab, 0); - sqlite3VdbeAddOp(v, OP_IsNull, -1, 0); - }else{ - pLevel->nIn = 0; - } -#endif - } - disableTerm(pLevel, pTerm); -} - -/* -** Generate code that will evaluate all == and IN constraints for an -** index. The values for all constraints are left on the stack. -** -** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c). -** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10 -** The index has as many as three equality constraints, but in this -** example, the third "c" value is an inequality. So only two -** constraints are coded. This routine will generate code to evaluate -** a==5 and b IN (1,2,3). The current values for a and b will be left -** on the stack - a is the deepest and b the shallowest. -** -** In the example above nEq==2. But this subroutine works for any value -** of nEq including 0. If nEq==0, this routine is nearly a no-op. -** The only thing it does is allocate the pLevel->iMem memory cell. -** -** This routine always allocates at least one memory cell and puts -** the address of that memory cell in pLevel->iMem. The code that -** calls this routine will use pLevel->iMem to store the termination -** key value of the loop. If one or more IN operators appear, then -** this routine allocates an additional nEq memory cells for internal -** use. -*/ -static void codeAllEqualityTerms( - Parse *pParse, /* Parsing context */ - WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */ - WhereClause *pWC, /* The WHERE clause */ - Bitmask notReady /* Which parts of FROM have not yet been coded */ -){ - int nEq = pLevel->nEq; /* The number of == or IN constraints to code */ - int termsInMem = 0; /* If true, store value in mem[] cells */ - Vdbe *v = pParse->pVdbe; /* The virtual machine under construction */ - Index *pIdx = pLevel->pIdx; /* The index being used for this loop */ - int iCur = pLevel->iTabCur; /* The cursor of the table */ - WhereTerm *pTerm; /* A single constraint term */ - int j; /* Loop counter */ - - /* Figure out how many memory cells we will need then allocate them. - ** We always need at least one used to store the loop terminator - ** value. If there are IN operators we'll need one for each == or - ** IN constraint. - */ - pLevel->iMem = pParse->nMem++; - if( pLevel->flags & WHERE_COLUMN_IN ){ - pParse->nMem += pLevel->nEq; - termsInMem = 1; - } - - /* Evaluate the equality constraints - */ - assert( pIdx->nColumn>=nEq ); - for(j=0; jaiColumn[j]; - pTerm = findTerm(pWC, iCur, k, notReady, pLevel->flags, pIdx); - if( pTerm==0 ) break; - assert( (pTerm->flags & TERM_CODED)==0 ); - codeEqualityTerm(pParse, pTerm, pLevel); - if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){ - sqlite3VdbeAddOp(v, OP_IsNull, termsInMem ? -1 : -(j+1), pLevel->brk); - } - if( termsInMem ){ - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem+j+1, 1); - } - } - - /* Make sure all the constraint values are on the top of the stack - */ - if( termsInMem ){ - for(j=0; jiMem+j+1, 0); - } - } -} - -#if defined(SQLITE_TEST) -/* -** The following variable holds a text description of query plan generated -** by the most recent call to sqlite3WhereBegin(). Each call to WhereBegin -** overwrites the previous. This information is used for testing and -** analysis only. -*/ -char sqlite3_query_plan[BMS*2*40]; /* Text of the join */ -static int nQPlan = 0; /* Next free slow in _query_plan[] */ - -#endif /* SQLITE_TEST */ - - -/* -** Free a WhereInfo structure -*/ -static void whereInfoFree(WhereInfo *pWInfo){ - if( pWInfo ){ - int i; - for(i=0; inLevel; i++){ - sqlite3_index_info *pInfo = pWInfo->a[i].pIdxInfo; - if( pInfo ){ - if( pInfo->needToFreeIdxStr ){ - /* Coverage: Don't think this can be reached. By the time this - ** function is called, the index-strings have been passed - ** to the vdbe layer for deletion. - */ - sqlite3_free(pInfo->idxStr); - } - sqliteFree(pInfo); - } - } - sqliteFree(pWInfo); - } -} - - -/* -** Generate the beginning of the loop used for WHERE clause processing. -** The return value is a pointer to an opaque structure that contains -** information needed to terminate the loop. Later, the calling routine -** should invoke sqlite3WhereEnd() with the return value of this function -** in order to complete the WHERE clause processing. -** -** If an error occurs, this routine returns NULL. -** -** The basic idea is to do a nested loop, one loop for each table in -** the FROM clause of a select. (INSERT and UPDATE statements are the -** same as a SELECT with only a single table in the FROM clause.) For -** example, if the SQL is this: -** -** SELECT * FROM t1, t2, t3 WHERE ...; -** -** Then the code generated is conceptually like the following: -** -** foreach row1 in t1 do \ Code generated -** foreach row2 in t2 do |-- by sqlite3WhereBegin() -** foreach row3 in t3 do / -** ... -** end \ Code generated -** end |-- by sqlite3WhereEnd() -** end / -** -** Note that the loops might not be nested in the order in which they -** appear in the FROM clause if a different order is better able to make -** use of indices. Note also that when the IN operator appears in -** the WHERE clause, it might result in additional nested loops for -** scanning through all values on the right-hand side of the IN. -** -** There are Btree cursors associated with each table. t1 uses cursor -** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor. -** And so forth. This routine generates code to open those VDBE cursors -** and sqlite3WhereEnd() generates the code to close them. -** -** The code that sqlite3WhereBegin() generates leaves the cursors named -** in pTabList pointing at their appropriate entries. The [...] code -** can use OP_Column and OP_Rowid opcodes on these cursors to extract -** data from the various tables of the loop. -** -** If the WHERE clause is empty, the foreach loops must each scan their -** entire tables. Thus a three-way join is an O(N^3) operation. But if -** the tables have indices and there are terms in the WHERE clause that -** refer to those indices, a complete table scan can be avoided and the -** code will run much faster. Most of the work of this routine is checking -** to see if there are indices that can be used to speed up the loop. -** -** Terms of the WHERE clause are also used to limit which rows actually -** make it to the "..." in the middle of the loop. After each "foreach", -** terms of the WHERE clause that use only terms in that loop and outer -** loops are evaluated and if false a jump is made around all subsequent -** inner loops (or around the "..." if the test occurs within the inner- -** most loop) -** -** OUTER JOINS -** -** An outer join of tables t1 and t2 is conceptally coded as follows: -** -** foreach row1 in t1 do -** flag = 0 -** foreach row2 in t2 do -** start: -** ... -** flag = 1 -** end -** if flag==0 then -** move the row2 cursor to a null row -** goto start -** fi -** end -** -** ORDER BY CLAUSE PROCESSING -** -** *ppOrderBy is a pointer to the ORDER BY clause of a SELECT statement, -** if there is one. If there is no ORDER BY clause or if this routine -** is called from an UPDATE or DELETE statement, then ppOrderBy is NULL. -** -** If an index can be used so that the natural output order of the table -** scan is correct for the ORDER BY clause, then that index is used and -** *ppOrderBy is set to NULL. This is an optimization that prevents an -** unnecessary sort of the result set if an index appropriate for the -** ORDER BY clause already exists. -** -** If the where clause loops cannot be arranged to provide the correct -** output order, then the *ppOrderBy is unchanged. -*/ -WhereInfo *sqlite3WhereBegin( - Parse *pParse, /* The parser context */ - SrcList *pTabList, /* A list of all tables to be scanned */ - Expr *pWhere, /* The WHERE clause */ - ExprList **ppOrderBy /* An ORDER BY clause, or NULL */ -){ - int i; /* Loop counter */ - WhereInfo *pWInfo; /* Will become the return value of this function */ - Vdbe *v = pParse->pVdbe; /* The virtual database engine */ - int brk, cont = 0; /* Addresses used during code generation */ - Bitmask notReady; /* Cursors that are not yet positioned */ - WhereTerm *pTerm; /* A single term in the WHERE clause */ - ExprMaskSet maskSet; /* The expression mask set */ - WhereClause wc; /* The WHERE clause is divided into these terms */ - struct SrcList_item *pTabItem; /* A single entry from pTabList */ - WhereLevel *pLevel; /* A single level in the pWInfo list */ - int iFrom; /* First unused FROM clause element */ - int andFlags; /* AND-ed combination of all wc.a[].flags */ - - /* The number of tables in the FROM clause is limited by the number of - ** bits in a Bitmask - */ - if( pTabList->nSrc>BMS ){ - sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS); - return 0; - } - - /* Split the WHERE clause into separate subexpressions where each - ** subexpression is separated by an AND operator. - */ - initMaskSet(&maskSet); - whereClauseInit(&wc, pParse, &maskSet); - whereSplit(&wc, pWhere, TK_AND); - - /* Allocate and initialize the WhereInfo structure that will become the - ** return value. - */ - pWInfo = sqliteMalloc( sizeof(WhereInfo) + pTabList->nSrc*sizeof(WhereLevel)); - if( sqlite3MallocFailed() ){ - goto whereBeginNoMem; - } - pWInfo->nLevel = pTabList->nSrc; - pWInfo->pParse = pParse; - pWInfo->pTabList = pTabList; - pWInfo->iBreak = sqlite3VdbeMakeLabel(v); - - /* Special case: a WHERE clause that is constant. Evaluate the - ** expression and either jump over all of the code or fall thru. - */ - if( pWhere && (pTabList->nSrc==0 || sqlite3ExprIsConstantNotJoin(pWhere)) ){ - sqlite3ExprIfFalse(pParse, pWhere, pWInfo->iBreak, 1); - pWhere = 0; - } - - /* Analyze all of the subexpressions. Note that exprAnalyze() might - ** add new virtual terms onto the end of the WHERE clause. We do not - ** want to analyze these virtual terms, so start analyzing at the end - ** and work forward so that the added virtual terms are never processed. - */ - for(i=0; inSrc; i++){ - createMask(&maskSet, pTabList->a[i].iCursor); - } - exprAnalyzeAll(pTabList, &wc); - if( sqlite3MallocFailed() ){ - goto whereBeginNoMem; - } - - /* Chose the best index to use for each table in the FROM clause. - ** - ** This loop fills in the following fields: - ** - ** pWInfo->a[].pIdx The index to use for this level of the loop. - ** pWInfo->a[].flags WHERE_xxx flags associated with pIdx - ** pWInfo->a[].nEq The number of == and IN constraints - ** pWInfo->a[].iFrom When term of the FROM clause is being coded - ** pWInfo->a[].iTabCur The VDBE cursor for the database table - ** pWInfo->a[].iIdxCur The VDBE cursor for the index - ** - ** This loop also figures out the nesting order of tables in the FROM - ** clause. - */ - notReady = ~(Bitmask)0; - pTabItem = pTabList->a; - pLevel = pWInfo->a; - andFlags = ~0; - WHERETRACE(("*** Optimizer Start ***\n")); - for(i=iFrom=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ - Index *pIdx; /* Index for FROM table at pTabItem */ - int flags; /* Flags asssociated with pIdx */ - int nEq; /* Number of == or IN constraints */ - double cost; /* The cost for pIdx */ - int j; /* For looping over FROM tables */ - Index *pBest = 0; /* The best index seen so far */ - int bestFlags = 0; /* Flags associated with pBest */ - int bestNEq = 0; /* nEq associated with pBest */ - double lowestCost; /* Cost of the pBest */ - int bestJ = 0; /* The value of j */ - Bitmask m; /* Bitmask value for j or bestJ */ - int once = 0; /* True when first table is seen */ - sqlite3_index_info *pIndex; /* Current virtual index */ - - lowestCost = SQLITE_BIG_DBL; - for(j=iFrom, pTabItem=&pTabList->a[j]; jnSrc; j++, pTabItem++){ - int doNotReorder; /* True if this table should not be reordered */ - - doNotReorder = (pTabItem->jointype & (JT_LEFT|JT_CROSS))!=0; - if( once && doNotReorder ) break; - m = getMask(&maskSet, pTabItem->iCursor); - if( (m & notReady)==0 ){ - if( j==iFrom ) iFrom++; - continue; - } - assert( pTabItem->pTab ); -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTabItem->pTab) ){ - sqlite3_index_info **ppIdxInfo = &pWInfo->a[j].pIdxInfo; - cost = bestVirtualIndex(pParse, &wc, pTabItem, notReady, - ppOrderBy ? *ppOrderBy : 0, i==0, - ppIdxInfo); - flags = WHERE_VIRTUALTABLE; - pIndex = *ppIdxInfo; - if( pIndex && pIndex->orderByConsumed ){ - flags = WHERE_VIRTUALTABLE | WHERE_ORDERBY; - } - pIdx = 0; - nEq = 0; - if( (SQLITE_BIG_DBL/2.0)pBestIdx never set. - */ - cost = (SQLITE_BIG_DBL/2.0); - } - }else -#endif - { - cost = bestIndex(pParse, &wc, pTabItem, notReady, - (i==0 && ppOrderBy) ? *ppOrderBy : 0, - &pIdx, &flags, &nEq); - pIndex = 0; - } - if( costpBestIdx = pIndex; - } - if( doNotReorder ) break; - } - WHERETRACE(("*** Optimizer choose table %d for loop %d\n", bestJ, - pLevel-pWInfo->a)); - if( (bestFlags & WHERE_ORDERBY)!=0 ){ - *ppOrderBy = 0; - } - andFlags &= bestFlags; - pLevel->flags = bestFlags; - pLevel->pIdx = pBest; - pLevel->nEq = bestNEq; - pLevel->aInLoop = 0; - pLevel->nIn = 0; - if( pBest ){ - pLevel->iIdxCur = pParse->nTab++; - }else{ - pLevel->iIdxCur = -1; - } - notReady &= ~getMask(&maskSet, pTabList->a[bestJ].iCursor); - pLevel->iFrom = bestJ; - } - WHERETRACE(("*** Optimizer Finished ***\n")); - - /* If the total query only selects a single row, then the ORDER BY - ** clause is irrelevant. - */ - if( (andFlags & WHERE_UNIQUE)!=0 && ppOrderBy ){ - *ppOrderBy = 0; - } - - /* Open all tables in the pTabList and any indices selected for - ** searching those tables. - */ - sqlite3CodeVerifySchema(pParse, -1); /* Insert the cookie verifier Goto */ - for(i=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ - Table *pTab; /* Table to open */ - Index *pIx; /* Index used to access pTab (if any) */ - int iDb; /* Index of database containing table/index */ - int iIdxCur = pLevel->iIdxCur; - -#ifndef SQLITE_OMIT_EXPLAIN - if( pParse->explain==2 ){ - char *zMsg; - struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; - zMsg = sqlite3MPrintf("TABLE %s", pItem->zName); - if( pItem->zAlias ){ - zMsg = sqlite3MPrintf("%z AS %s", zMsg, pItem->zAlias); - } - if( (pIx = pLevel->pIdx)!=0 ){ - zMsg = sqlite3MPrintf("%z WITH INDEX %s", zMsg, pIx->zName); - }else if( pLevel->flags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ - zMsg = sqlite3MPrintf("%z USING PRIMARY KEY", zMsg); - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - else if( pLevel->pBestIdx ){ - sqlite3_index_info *pBestIdx = pLevel->pBestIdx; - zMsg = sqlite3MPrintf("%z VIRTUAL TABLE INDEX %d:%s", zMsg, - pBestIdx->idxNum, pBestIdx->idxStr); - } -#endif - if( pLevel->flags & WHERE_ORDERBY ){ - zMsg = sqlite3MPrintf("%z ORDER BY", zMsg); - } - sqlite3VdbeOp3(v, OP_Explain, i, pLevel->iFrom, zMsg, P3_DYNAMIC); - } -#endif /* SQLITE_OMIT_EXPLAIN */ - pTabItem = &pTabList->a[pLevel->iFrom]; - pTab = pTabItem->pTab; - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - if( pTab->isEphem || pTab->pSelect ) continue; -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pLevel->pBestIdx ){ - int iCur = pTabItem->iCursor; - sqlite3VdbeOp3(v, OP_VOpen, iCur, 0, (const char*)pTab->pVtab, P3_VTAB); - }else -#endif - if( (pLevel->flags & WHERE_IDX_ONLY)==0 ){ - sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, OP_OpenRead); - if( pTab->nCol<(sizeof(Bitmask)*8) ){ - Bitmask b = pTabItem->colUsed; - int n = 0; - for(; b; b=b>>1, n++){} - sqlite3VdbeChangeP2(v, sqlite3VdbeCurrentAddr(v)-1, n); - assert( n<=pTab->nCol ); - } - }else{ - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - } - pLevel->iTabCur = pTabItem->iCursor; - if( (pIx = pLevel->pIdx)!=0 ){ - KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIx); - assert( pIx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - VdbeComment((v, "# %s", pIx->zName)); - sqlite3VdbeOp3(v, OP_OpenRead, iIdxCur, pIx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - } - if( (pLevel->flags & (WHERE_IDX_ONLY|WHERE_COLUMN_RANGE))!=0 ){ - /* Only call OP_SetNumColumns on the index if we might later use - ** OP_Column on the index. */ - sqlite3VdbeAddOp(v, OP_SetNumColumns, iIdxCur, pIx->nColumn+1); - } - sqlite3CodeVerifySchema(pParse, iDb); - } - pWInfo->iTop = sqlite3VdbeCurrentAddr(v); - - /* Generate the code to do the search. Each iteration of the for - ** loop below generates code for a single nested loop of the VM - ** program. - */ - notReady = ~(Bitmask)0; - for(i=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ - int j; - int iCur = pTabItem->iCursor; /* The VDBE cursor for the table */ - Index *pIdx; /* The index we will be using */ - int nxt; /* Where to jump to continue with the next IN case */ - int iIdxCur; /* The VDBE cursor for the index */ - int omitTable; /* True if we use the index only */ - int bRev; /* True if we need to scan in reverse order */ - - pTabItem = &pTabList->a[pLevel->iFrom]; - iCur = pTabItem->iCursor; - pIdx = pLevel->pIdx; - iIdxCur = pLevel->iIdxCur; - bRev = (pLevel->flags & WHERE_REVERSE)!=0; - omitTable = (pLevel->flags & WHERE_IDX_ONLY)!=0; - - /* Create labels for the "break" and "continue" instructions - ** for the current loop. Jump to brk to break out of a loop. - ** Jump to cont to go immediately to the next iteration of the - ** loop. - ** - ** When there is an IN operator, we also have a "nxt" label that - ** means to continue with the next IN value combination. When - ** there are no IN operators in the constraints, the "nxt" label - ** is the same as "brk". - */ - brk = pLevel->brk = pLevel->nxt = sqlite3VdbeMakeLabel(v); - cont = pLevel->cont = sqlite3VdbeMakeLabel(v); - - /* If this is the right table of a LEFT OUTER JOIN, allocate and - ** initialize a memory cell that records if this table matches any - ** row of the left table of the join. - */ - if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){ - if( !pParse->nMem ) pParse->nMem++; - pLevel->iLeftJoin = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemInt, 0, pLevel->iLeftJoin); - VdbeComment((v, "# init LEFT JOIN no-match flag")); - } - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pLevel->pBestIdx ){ - /* Case 0: The table is a virtual-table. Use the VFilter and VNext - ** to access the data. - */ - int j; - sqlite3_index_info *pBestIdx = pLevel->pBestIdx; - int nConstraint = pBestIdx->nConstraint; - struct sqlite3_index_constraint_usage *aUsage = - pBestIdx->aConstraintUsage; - const struct sqlite3_index_constraint *aConstraint = - pBestIdx->aConstraint; - - for(j=1; j<=nConstraint; j++){ - int k; - for(k=0; kpRight); - break; - } - } - if( k==nConstraint ) break; - } - sqlite3VdbeAddOp(v, OP_Integer, j-1, 0); - sqlite3VdbeAddOp(v, OP_Integer, pBestIdx->idxNum, 0); - sqlite3VdbeOp3(v, OP_VFilter, iCur, brk, pBestIdx->idxStr, - pBestIdx->needToFreeIdxStr ? P3_MPRINTF : P3_STATIC); - pBestIdx->needToFreeIdxStr = 0; - for(j=0; jnConstraint; j++){ - if( aUsage[j].omit ){ - int iTerm = aConstraint[j].iTermOffset; - disableTerm(pLevel, &wc.a[iTerm]); - } - } - pLevel->op = OP_VNext; - pLevel->p1 = iCur; - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - }else -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - - if( pLevel->flags & WHERE_ROWID_EQ ){ - /* Case 1: We can directly reference a single row using an - ** equality comparison against the ROWID field. Or - ** we reference multiple rows using a "rowid IN (...)" - ** construct. - */ - pTerm = findTerm(&wc, iCur, -1, notReady, WO_EQ|WO_IN, 0); - assert( pTerm!=0 ); - assert( pTerm->pExpr!=0 ); - assert( pTerm->leftCursor==iCur ); - assert( omitTable==0 ); - codeEqualityTerm(pParse, pTerm, pLevel); - nxt = pLevel->nxt; - sqlite3VdbeAddOp(v, OP_MustBeInt, 1, nxt); - sqlite3VdbeAddOp(v, OP_NotExists, iCur, nxt); - VdbeComment((v, "pk")); - pLevel->op = OP_Noop; - }else if( pLevel->flags & WHERE_ROWID_RANGE ){ - /* Case 2: We have an inequality comparison against the ROWID field. - */ - int testOp = OP_Noop; - int start; - WhereTerm *pStart, *pEnd; - - assert( omitTable==0 ); - pStart = findTerm(&wc, iCur, -1, notReady, WO_GT|WO_GE, 0); - pEnd = findTerm(&wc, iCur, -1, notReady, WO_LT|WO_LE, 0); - if( bRev ){ - pTerm = pStart; - pStart = pEnd; - pEnd = pTerm; - } - if( pStart ){ - Expr *pX; - pX = pStart->pExpr; - assert( pX!=0 ); - assert( pStart->leftCursor==iCur ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_ForceInt, pX->op==TK_LE || pX->op==TK_GT, brk); - sqlite3VdbeAddOp(v, bRev ? OP_MoveLt : OP_MoveGe, iCur, brk); - VdbeComment((v, "pk")); - disableTerm(pLevel, pStart); - }else{ - sqlite3VdbeAddOp(v, bRev ? OP_Last : OP_Rewind, iCur, brk); - } - if( pEnd ){ - Expr *pX; - pX = pEnd->pExpr; - assert( pX!=0 ); - assert( pEnd->leftCursor==iCur ); - sqlite3ExprCode(pParse, pX->pRight); - pLevel->iMem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - if( pX->op==TK_LT || pX->op==TK_GT ){ - testOp = bRev ? OP_Le : OP_Ge; - }else{ - testOp = bRev ? OP_Lt : OP_Gt; - } - disableTerm(pLevel, pEnd); - } - start = sqlite3VdbeCurrentAddr(v); - pLevel->op = bRev ? OP_Prev : OP_Next; - pLevel->p1 = iCur; - pLevel->p2 = start; - if( testOp!=OP_Noop ){ - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, testOp, SQLITE_AFF_NUMERIC|0x100, brk); - } - }else if( pLevel->flags & WHERE_COLUMN_RANGE ){ - /* Case 3: The WHERE clause term that refers to the right-most - ** column of the index is an inequality. For example, if - ** the index is on (x,y,z) and the WHERE clause is of the - ** form "x=5 AND y<10" then this case is used. Only the - ** right-most column can be an inequality - the rest must - ** use the "==" and "IN" operators. - ** - ** This case is also used when there are no WHERE clause - ** constraints but an index is selected anyway, in order - ** to force the output order to conform to an ORDER BY. - */ - int start; - int nEq = pLevel->nEq; - int topEq=0; /* True if top limit uses ==. False is strictly < */ - int btmEq=0; /* True if btm limit uses ==. False if strictly > */ - int topOp, btmOp; /* Operators for the top and bottom search bounds */ - int testOp; - int topLimit = (pLevel->flags & WHERE_TOP_LIMIT)!=0; - int btmLimit = (pLevel->flags & WHERE_BTM_LIMIT)!=0; - - /* Generate code to evaluate all constraint terms using == or IN - ** and level the values of those terms on the stack. - */ - codeAllEqualityTerms(pParse, pLevel, &wc, notReady); - - /* Duplicate the equality term values because they will all be - ** used twice: once to make the termination key and once to make the - ** start key. - */ - for(j=0; j or >= - ** operator and the top bound is a < or <= operator. For a descending - ** index the operators are reversed. - */ - if( pIdx->aSortOrder[nEq]==SQLITE_SO_ASC ){ - topOp = WO_LT|WO_LE; - btmOp = WO_GT|WO_GE; - }else{ - topOp = WO_GT|WO_GE; - btmOp = WO_LT|WO_LE; - SWAP(int, topLimit, btmLimit); - } - - /* Generate the termination key. This is the key value that - ** will end the search. There is no termination key if there - ** are no equality terms and no "X<..." term. - ** - ** 2002-Dec-04: On a reverse-order scan, the so-called "termination" - ** key computed here really ends up being the start key. - */ - nxt = pLevel->nxt; - if( topLimit ){ - Expr *pX; - int k = pIdx->aiColumn[j]; - pTerm = findTerm(&wc, iCur, k, notReady, topOp, pIdx); - assert( pTerm!=0 ); - pX = pTerm->pExpr; - assert( (pTerm->flags & TERM_CODED)==0 ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_IsNull, -(nEq*2+1), nxt); - topEq = pTerm->eOperator & (WO_LE|WO_GE); - disableTerm(pLevel, pTerm); - testOp = OP_IdxGE; - }else{ - testOp = nEq>0 ? OP_IdxGE : OP_Noop; - topEq = 1; - } - if( testOp!=OP_Noop ){ - int nCol = nEq + topLimit; - pLevel->iMem = pParse->nMem++; - buildIndexProbe(v, nCol, pIdx); - if( bRev ){ - int op = topEq ? OP_MoveLe : OP_MoveLt; - sqlite3VdbeAddOp(v, op, iIdxCur, nxt); - }else{ - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - } - }else if( bRev ){ - sqlite3VdbeAddOp(v, OP_Last, iIdxCur, brk); - } - - /* Generate the start key. This is the key that defines the lower - ** bound on the search. There is no start key if there are no - ** equality terms and if there is no "X>..." term. In - ** that case, generate a "Rewind" instruction in place of the - ** start key search. - ** - ** 2002-Dec-04: In the case of a reverse-order search, the so-called - ** "start" key really ends up being used as the termination key. - */ - if( btmLimit ){ - Expr *pX; - int k = pIdx->aiColumn[j]; - pTerm = findTerm(&wc, iCur, k, notReady, btmOp, pIdx); - assert( pTerm!=0 ); - pX = pTerm->pExpr; - assert( (pTerm->flags & TERM_CODED)==0 ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_IsNull, -(nEq+1), nxt); - btmEq = pTerm->eOperator & (WO_LE|WO_GE); - disableTerm(pLevel, pTerm); - }else{ - btmEq = 1; - } - if( nEq>0 || btmLimit ){ - int nCol = nEq + btmLimit; - buildIndexProbe(v, nCol, pIdx); - if( bRev ){ - pLevel->iMem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - testOp = OP_IdxLT; - }else{ - int op = btmEq ? OP_MoveGe : OP_MoveGt; - sqlite3VdbeAddOp(v, op, iIdxCur, nxt); - } - }else if( bRev ){ - testOp = OP_Noop; - }else{ - sqlite3VdbeAddOp(v, OP_Rewind, iIdxCur, brk); - } - - /* Generate the the top of the loop. If there is a termination - ** key we have to test for that key and abort at the top of the - ** loop. - */ - start = sqlite3VdbeCurrentAddr(v); - if( testOp!=OP_Noop ){ - sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, testOp, iIdxCur, nxt); - if( (topEq && !bRev) || (!btmEq && bRev) ){ - sqlite3VdbeChangeP3(v, -1, "+", P3_STATIC); - } - } - if( topLimit | btmLimit ){ - sqlite3VdbeAddOp(v, OP_Column, iIdxCur, nEq); - sqlite3VdbeAddOp(v, OP_IsNull, 1, cont); - } - if( !omitTable ){ - sqlite3VdbeAddOp(v, OP_IdxRowid, iIdxCur, 0); - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); - } - - /* Record the instruction used to terminate the loop. - */ - pLevel->op = bRev ? OP_Prev : OP_Next; - pLevel->p1 = iIdxCur; - pLevel->p2 = start; - }else if( pLevel->flags & WHERE_COLUMN_EQ ){ - /* Case 4: There is an index and all terms of the WHERE clause that - ** refer to the index using the "==" or "IN" operators. - */ - int start; - int nEq = pLevel->nEq; - - /* Generate code to evaluate all constraint terms using == or IN - ** and leave the values of those terms on the stack. - */ - codeAllEqualityTerms(pParse, pLevel, &wc, notReady); - nxt = pLevel->nxt; - - /* Generate a single key that will be used to both start and terminate - ** the search - */ - buildIndexProbe(v, nEq, pIdx); - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 0); - - /* Generate code (1) to move to the first matching element of the table. - ** Then generate code (2) that jumps to "nxt" after the cursor is past - ** the last matching element of the table. The code (1) is executed - ** once to initialize the search, the code (2) is executed before each - ** iteration of the scan to see if the scan has finished. */ - if( bRev ){ - /* Scan in reverse order */ - sqlite3VdbeAddOp(v, OP_MoveLe, iIdxCur, nxt); - start = sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, OP_IdxLT, iIdxCur, nxt); - pLevel->op = OP_Prev; - }else{ - /* Scan in the forward order */ - sqlite3VdbeAddOp(v, OP_MoveGe, iIdxCur, nxt); - start = sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeOp3(v, OP_IdxGE, iIdxCur, nxt, "+", P3_STATIC); - pLevel->op = OP_Next; - } - if( !omitTable ){ - sqlite3VdbeAddOp(v, OP_IdxRowid, iIdxCur, 0); - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); - } - pLevel->p1 = iIdxCur; - pLevel->p2 = start; - }else{ - /* Case 5: There is no usable index. We must do a complete - ** scan of the entire table. - */ - assert( omitTable==0 ); - assert( bRev==0 ); - pLevel->op = OP_Next; - pLevel->p1 = iCur; - pLevel->p2 = 1 + sqlite3VdbeAddOp(v, OP_Rewind, iCur, brk); - } - notReady &= ~getMask(&maskSet, iCur); - - /* Insert code to test every subexpression that can be completely - ** computed using the current set of tables. - */ - for(pTerm=wc.a, j=wc.nTerm; j>0; j--, pTerm++){ - Expr *pE; - if( pTerm->flags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & notReady)!=0 ) continue; - pE = pTerm->pExpr; - assert( pE!=0 ); - if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ - continue; - } - sqlite3ExprIfFalse(pParse, pE, cont, 1); - pTerm->flags |= TERM_CODED; - } - - /* For a LEFT OUTER JOIN, generate code that will record the fact that - ** at least one row of the right table has matched the left table. - */ - if( pLevel->iLeftJoin ){ - pLevel->top = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_MemInt, 1, pLevel->iLeftJoin); - VdbeComment((v, "# record LEFT JOIN hit")); - for(pTerm=wc.a, j=0; jflags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & notReady)!=0 ) continue; - assert( pTerm->pExpr ); - sqlite3ExprIfFalse(pParse, pTerm->pExpr, cont, 1); - pTerm->flags |= TERM_CODED; - } - } - } - -#ifdef SQLITE_TEST /* For testing and debugging use only */ - /* Record in the query plan information about the current table - ** and the index used to access it (if any). If the table itself - ** is not used, its name is just '{}'. If no index is used - ** the index is listed as "{}". If the primary key is used the - ** index name is '*'. - */ - for(i=0; inSrc; i++){ - char *z; - int n; - pLevel = &pWInfo->a[i]; - pTabItem = &pTabList->a[pLevel->iFrom]; - z = pTabItem->zAlias; - if( z==0 ) z = pTabItem->pTab->zName; - n = strlen(z); - if( n+nQPlan < sizeof(sqlite3_query_plan)-10 ){ - if( pLevel->flags & WHERE_IDX_ONLY ){ - memcpy(&sqlite3_query_plan[nQPlan], "{}", 2); - nQPlan += 2; - }else{ - memcpy(&sqlite3_query_plan[nQPlan], z, n); - nQPlan += n; - } - sqlite3_query_plan[nQPlan++] = ' '; - } - if( pLevel->flags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ - memcpy(&sqlite3_query_plan[nQPlan], "* ", 2); - nQPlan += 2; - }else if( pLevel->pIdx==0 ){ - memcpy(&sqlite3_query_plan[nQPlan], "{} ", 3); - nQPlan += 3; - }else{ - n = strlen(pLevel->pIdx->zName); - if( n+nQPlan < sizeof(sqlite3_query_plan)-2 ){ - memcpy(&sqlite3_query_plan[nQPlan], pLevel->pIdx->zName, n); - nQPlan += n; - sqlite3_query_plan[nQPlan++] = ' '; - } - } - } - while( nQPlan>0 && sqlite3_query_plan[nQPlan-1]==' ' ){ - sqlite3_query_plan[--nQPlan] = 0; - } - sqlite3_query_plan[nQPlan] = 0; - nQPlan = 0; -#endif /* SQLITE_TEST // Testing and debugging use only */ - - /* Record the continuation address in the WhereInfo structure. Then - ** clean up and return. - */ - pWInfo->iContinue = cont; - whereClauseClear(&wc); - return pWInfo; - - /* Jump here if malloc fails */ -whereBeginNoMem: - whereClauseClear(&wc); - whereInfoFree(pWInfo); - return 0; -} - -/* -** Generate the end of the WHERE loop. See comments on -** sqlite3WhereBegin() for additional information. -*/ -void sqlite3WhereEnd(WhereInfo *pWInfo){ - Vdbe *v = pWInfo->pParse->pVdbe; - int i; - WhereLevel *pLevel; - SrcList *pTabList = pWInfo->pTabList; - - /* Generate loop termination code. - */ - for(i=pTabList->nSrc-1; i>=0; i--){ - pLevel = &pWInfo->a[i]; - sqlite3VdbeResolveLabel(v, pLevel->cont); - if( pLevel->op!=OP_Noop ){ - sqlite3VdbeAddOp(v, pLevel->op, pLevel->p1, pLevel->p2); - } - if( pLevel->nIn ){ - struct InLoop *pIn; - int j; - sqlite3VdbeResolveLabel(v, pLevel->nxt); - for(j=pLevel->nIn, pIn=&pLevel->aInLoop[j-1]; j>0; j--, pIn--){ - sqlite3VdbeJumpHere(v, pIn->topAddr+1); - sqlite3VdbeAddOp(v, OP_Next, pIn->iCur, pIn->topAddr); - sqlite3VdbeJumpHere(v, pIn->topAddr-1); - } - sqliteFree(pLevel->aInLoop); - } - sqlite3VdbeResolveLabel(v, pLevel->brk); - if( pLevel->iLeftJoin ){ - int addr; - addr = sqlite3VdbeAddOp(v, OP_IfMemPos, pLevel->iLeftJoin, 0); - sqlite3VdbeAddOp(v, OP_NullRow, pTabList->a[i].iCursor, 0); - if( pLevel->iIdxCur>=0 ){ - sqlite3VdbeAddOp(v, OP_NullRow, pLevel->iIdxCur, 0); - } - sqlite3VdbeAddOp(v, OP_Goto, 0, pLevel->top); - sqlite3VdbeJumpHere(v, addr); - } - } - - /* The "break" point is here, just past the end of the outer loop. - ** Set it. - */ - sqlite3VdbeResolveLabel(v, pWInfo->iBreak); - - /* Close all of the cursors that were opened by sqlite3WhereBegin. - */ - for(i=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ - struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; - Table *pTab = pTabItem->pTab; - assert( pTab!=0 ); - if( pTab->isEphem || pTab->pSelect ) continue; - if( (pLevel->flags & WHERE_IDX_ONLY)==0 ){ - sqlite3VdbeAddOp(v, OP_Close, pTabItem->iCursor, 0); - } - if( pLevel->pIdx!=0 ){ - sqlite3VdbeAddOp(v, OP_Close, pLevel->iIdxCur, 0); - } - - /* Make cursor substitutions for cases where we want to use - ** just the index and never reference the table. - ** - ** Calls to the code generator in between sqlite3WhereBegin and - ** sqlite3WhereEnd will have created code that references the table - ** directly. This loop scans all that code looking for opcodes - ** that reference the table and converts them into opcodes that - ** reference the index. - */ - if( pLevel->flags & WHERE_IDX_ONLY ){ - int k, j, last; - VdbeOp *pOp; - Index *pIdx = pLevel->pIdx; - - assert( pIdx!=0 ); - pOp = sqlite3VdbeGetOp(v, pWInfo->iTop); - last = sqlite3VdbeCurrentAddr(v); - for(k=pWInfo->iTop; kp1!=pLevel->iTabCur ) continue; - if( pOp->opcode==OP_Column ){ - pOp->p1 = pLevel->iIdxCur; - for(j=0; jnColumn; j++){ - if( pOp->p2==pIdx->aiColumn[j] ){ - pOp->p2 = j; - break; - } - } - }else if( pOp->opcode==OP_Rowid ){ - pOp->p1 = pLevel->iIdxCur; - pOp->opcode = OP_IdxRowid; - }else if( pOp->opcode==OP_NullRow ){ - pOp->opcode = OP_Noop; - } - } - } - } - - /* Final cleanup - */ - whereInfoFree(pWInfo); - return; -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/table.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/table.c --- sqlite3-3.4.2/src/table.c 2007-06-12 13:18:01.000000000 +0100 +++ sqlite3-3.6.16/src/table.c 2009-06-25 12:23:18.000000000 +0100 @@ -15,6 +15,8 @@ ** ** These routines are in a separate files so that they will not be linked ** if they are not used. +** +** $Id: table.c,v 1.40 2009/04/10 14:28:00 drh Exp $ */ #include "sqliteInt.h" #include @@ -27,14 +29,13 @@ ** to the callback function is uses to build the result. */ typedef struct TabResult { - char **azResult; - char *zErrMsg; - int nResult; - int nAlloc; - int nRow; - int nColumn; - int nData; - int rc; + char **azResult; /* Accumulated output */ + char *zErrMsg; /* Error message text, if an error occurs */ + int nAlloc; /* Slots allocated for azResult[] */ + int nRow; /* Number of rows in the result */ + int nColumn; /* Number of columns in the result */ + int nData; /* Slots used in azResult[]. (nRow+1)*nColumn */ + int rc; /* Return code from sqlite3_exec() */ } TabResult; /* @@ -43,10 +44,10 @@ ** memory as necessary. */ static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){ - TabResult *p = (TabResult*)pArg; - int need; - int i; - char *z; + TabResult *p = (TabResult*)pArg; /* Result accumulator */ + int need; /* Slots needed in p->azResult[] */ + int i; /* Loop counter */ + char *z; /* A single column of result */ /* Make sure there is enough space in p->azResult to hold everything ** we need to remember from this invocation of the callback. @@ -56,9 +57,9 @@ }else{ need = nCol; } - if( p->nData + need >= p->nAlloc ){ + if( p->nData + need > p->nAlloc ){ char **azNew; - p->nAlloc = p->nAlloc*2 + need + 1; + p->nAlloc = p->nAlloc*2 + need; azNew = sqlite3_realloc( p->azResult, sizeof(char*)*p->nAlloc ); if( azNew==0 ) goto malloc_failed; p->azResult = azNew; @@ -70,17 +71,15 @@ if( p->nRow==0 ){ p->nColumn = nCol; for(i=0; iazResult[p->nData++] = z; } }else if( p->nColumn!=nCol ){ - sqlite3SetString(&p->zErrMsg, - "sqlite3_get_table() called with two or more incompatible queries", - (char*)0); + sqlite3_free(p->zErrMsg); + p->zErrMsg = sqlite3_mprintf( + "sqlite3_get_table() called with two or more incompatible queries" + ); p->rc = SQLITE_ERROR; return 1; } @@ -92,7 +91,7 @@ if( argv[i]==0 ){ z = 0; }else{ - int n = strlen(argv[i])+1; + int n = sqlite3Strlen30(argv[i])+1; z = sqlite3_malloc( n ); if( z==0 ) goto malloc_failed; memcpy(z, argv[i], n); @@ -128,25 +127,26 @@ ){ int rc; TabResult res; - if( pazResult==0 ){ return SQLITE_ERROR; } + *pazResult = 0; if( pnColumn ) *pnColumn = 0; if( pnRow ) *pnRow = 0; + if( pzErrMsg ) *pzErrMsg = 0; res.zErrMsg = 0; - res.nResult = 0; res.nRow = 0; res.nColumn = 0; res.nData = 1; res.nAlloc = 20; res.rc = SQLITE_OK; - res.azResult = sqlite3_malloc( sizeof(char*)*res.nAlloc ); - if( res.azResult==0 ) return SQLITE_NOMEM; + res.azResult = sqlite3_malloc(sizeof(char*)*res.nAlloc ); + if( res.azResult==0 ){ + db->errCode = SQLITE_NOMEM; + return SQLITE_NOMEM; + } res.azResult[0] = 0; rc = sqlite3_exec(db, zSql, sqlite3_get_table_cb, &res, pzErrMsg); - if( res.azResult ){ - assert( sizeof(res.azResult[0])>= sizeof(res.nData) ); - res.azResult[0] = (char*)res.nData; - } + assert( sizeof(res.azResult[0])>= sizeof(res.nData) ); + res.azResult[0] = SQLITE_INT_TO_PTR(res.nData); if( (rc&0xff)==SQLITE_ABORT ){ sqlite3_free_table(&res.azResult[1]); if( res.zErrMsg ){ @@ -154,30 +154,30 @@ sqlite3_free(*pzErrMsg); *pzErrMsg = sqlite3_mprintf("%s",res.zErrMsg); } - sqliteFree(res.zErrMsg); + sqlite3_free(res.zErrMsg); } - db->errCode = res.rc; - return res.rc & db->errMask; + db->errCode = res.rc; /* Assume 32-bit assignment is atomic */ + return res.rc; } - sqliteFree(res.zErrMsg); + sqlite3_free(res.zErrMsg); if( rc!=SQLITE_OK ){ sqlite3_free_table(&res.azResult[1]); - return rc & db->errMask; + return rc; } if( res.nAlloc>res.nData ){ char **azNew; - azNew = sqlite3_realloc( res.azResult, sizeof(char*)*(res.nData+1) ); + azNew = sqlite3_realloc( res.azResult, sizeof(char*)*res.nData ); if( azNew==0 ){ sqlite3_free_table(&res.azResult[1]); + db->errCode = SQLITE_NOMEM; return SQLITE_NOMEM; } - res.nAlloc = res.nData+1; res.azResult = azNew; } *pazResult = &res.azResult[1]; if( pnColumn ) *pnColumn = res.nColumn; if( pnRow ) *pnRow = res.nRow; - return rc & db->errMask; + return rc; } /* @@ -189,8 +189,8 @@ if( azResult ){ int i, n; azResult--; - if( azResult==0 ) return; - n = (int)azResult[0]; + assert( azResult!=0 ); + n = SQLITE_PTR_TO_INT(azResult[0]); for(i=1; i @@ -23,7 +23,6 @@ */ #ifndef SQLITE_AMALGAMATION # include "sqliteInt.h" -# include "hash.h" # include # include # include @@ -86,7 +85,7 @@ SqlPreparedStmt *pPrev; /* Previous on the list */ sqlite3_stmt *pStmt; /* The prepared statement */ int nSql; /* chars in zSql[] */ - char zSql[1]; /* Text of the SQL statement */ + const char *zSql; /* Text of the SQL statement */ }; typedef struct IncrblobChannel IncrblobChannel; @@ -105,10 +104,12 @@ char *zProfile; /* The profile callback routine */ char *zProgress; /* The progress callback routine */ char *zAuth; /* The authorization callback routine */ + int disableAuth; /* Disable the authorizer if it exists */ char *zNull; /* Text to substitute for an SQL NULL value */ SqlFunc *pFunc; /* List of SQL functions */ Tcl_Obj *pUpdateHook; /* Update hook script (if any) */ Tcl_Obj *pRollbackHook; /* Rollback hook script (if any) */ + Tcl_Obj *pUnlockNotify; /* Unlock notify script (if any) */ SqlCollate *pCollate; /* List of SQL collation functions */ int rc; /* Return code of most recent sqlite3_exec() */ Tcl_Obj *pCollateNeeded; /* Collation needed script */ @@ -117,6 +118,8 @@ int maxStmt; /* The next maximum number of stmtList */ int nStmt; /* Number of statements in stmtList */ IncrblobChannel *pIncrblob;/* Linked list of open incrblob channels */ + int nStep, nSort; /* Statistics for most recent operation */ + int nTransaction; /* Number of nested [transaction] methods */ }; struct IncrblobChannel { @@ -128,6 +131,17 @@ IncrblobChannel *pPrev; /* Linked list of all open incrblob channels */ }; +/* +** Compute a string length that is limited to what can be stored in +** lower 30 bits of a 32-bit signed integer. +*/ +static int strlen30(const char *z){ + const char *z2 = z; + while( *z2 ){ z2++; } + return 0x3fffffff & (int)(z2 - z); +} + + #ifndef SQLITE_OMIT_INCRBLOB /* ** Close all incrblob channels opened using database connection pDb. @@ -383,7 +397,7 @@ static SqlFunc *findSqlFunc(SqliteDb *pDb, const char *zName){ SqlFunc *p, *pNew; int i; - pNew = (SqlFunc*)Tcl_Alloc( sizeof(*pNew) + strlen(zName) + 1 ); + pNew = (SqlFunc*)Tcl_Alloc( sizeof(*pNew) + strlen30(zName) + 1 ); pNew->zName = (char*)&pNew[1]; for(i=0; zName[i]; i++){ pNew->zName[i] = tolower(zName[i]); } pNew->zName[i] = 0; @@ -480,6 +494,7 @@ return 1; } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK /* ** This routine is invoked as the 'progress callback' for the database. */ @@ -494,6 +509,7 @@ } return 0; } +#endif #ifndef SQLITE_OMIT_TRACE /* @@ -559,6 +575,33 @@ } } +#if defined(SQLITE_TEST) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) +static void setTestUnlockNotifyVars(Tcl_Interp *interp, int iArg, int nArg){ + char zBuf[64]; + sprintf(zBuf, "%d", iArg); + Tcl_SetVar(interp, "sqlite_unlock_notify_arg", zBuf, TCL_GLOBAL_ONLY); + sprintf(zBuf, "%d", nArg); + Tcl_SetVar(interp, "sqlite_unlock_notify_argcount", zBuf, TCL_GLOBAL_ONLY); +} +#else +# define setTestUnlockNotifyVars(x,y,z) +#endif + +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY +static void DbUnlockNotify(void **apArg, int nArg){ + int i; + for(i=0; iinterp, i, nArg); + assert( pDb->pUnlockNotify); + Tcl_EvalObjEx(pDb->interp, pDb->pUnlockNotify, flags); + Tcl_DecrRefCount(pDb->pUnlockNotify); + pDb->pUnlockNotify = 0; + } +} +#endif + static void DbUpdateHandler( void *p, int op, @@ -760,6 +803,7 @@ int rc; const char *zReply; SqliteDb *pDb = (SqliteDb*)pArg; + if( pDb->disableAuth ) return SQLITE_OK; switch( code ){ case SQLITE_COPY : zCode="SQLITE_COPY"; break; @@ -794,6 +838,7 @@ case SQLITE_CREATE_VTABLE : zCode="SQLITE_CREATE_VTABLE"; break; case SQLITE_DROP_VTABLE : zCode="SQLITE_DROP_VTABLE"; break; case SQLITE_FUNCTION : zCode="SQLITE_FUNCTION"; break; + case SQLITE_SAVEPOINT : zCode="SQLITE_SAVEPOINT"; break; default : zCode="????"; break; } Tcl_DStringInit(&str); @@ -887,6 +932,67 @@ return zLine; } + +/* +** Figure out the column names for the data returned by the statement +** passed as the second argument. +** +** If parameter papColName is not NULL, then *papColName is set to point +** at an array allocated using Tcl_Alloc(). It is the callers responsibility +** to free this array using Tcl_Free(), and to decrement the reference +** count of each Tcl_Obj* member of the array. +** +** The return value of this function is the number of columns of data +** returned by pStmt (and hence the size of the *papColName array). +** +** If pArray is not NULL, then it contains the name of a Tcl array +** variable. The "*" member of this array is set to a list containing +** the names of the columns returned by the statement, in order from +** left to right. e.g. if the names of the returned columns are a, b and +** c, it does the equivalent of the tcl command: +** +** set ${pArray}(*) {a b c} +*/ +static int +computeColumnNames( + Tcl_Interp *interp, + sqlite3_stmt *pStmt, /* SQL statement */ + Tcl_Obj ***papColName, /* OUT: Array of column names */ + Tcl_Obj *pArray /* Name of array variable (may be null) */ +){ + int nCol; + + /* Compute column names */ + nCol = sqlite3_column_count(pStmt); + if( papColName ){ + int i; + Tcl_Obj **apColName = (Tcl_Obj**)Tcl_Alloc( sizeof(Tcl_Obj*)*nCol ); + for(i=0; idb, zSrcDb); + if( pBackup==0 ){ + Tcl_AppendResult(interp, "backup failed: ", + sqlite3_errmsg(pDest), (char*)0); + sqlite3_close(pDest); + return TCL_ERROR; + } + while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK ){} + sqlite3_backup_finish(pBackup); + if( rc==SQLITE_DONE ){ + rc = TCL_OK; + }else{ + Tcl_AppendResult(interp, "backup failed: ", + sqlite3_errmsg(pDest), (char*)0); + rc = TCL_ERROR; + } + sqlite3_close(pDest); + break; + } + /* $db busy ?CALLBACK? ** ** Invoke the given callback if an SQL statement attempts to open @@ -1074,7 +1232,7 @@ } }else{ Tcl_AppendResult( interp, "bad option \"", - Tcl_GetStringFromObj(objv[0],0), "\": must be flush or size", 0); + Tcl_GetStringFromObj(objv[2],0), "\": must be flush or size", 0); return TCL_ERROR; } break; @@ -1239,7 +1397,6 @@ char *zFile; /* The file from which to extract data */ char *zConflict; /* The conflict algorithm to use */ sqlite3_stmt *pStmt; /* A statement */ - int rc; /* Result code */ int nCol; /* Number of columns in the table */ int nByte; /* Number of bytes in an SQL string */ int i, j; /* Loop counters */ @@ -1274,17 +1431,17 @@ zConflict = Tcl_GetStringFromObj(objv[2], 0); zTable = Tcl_GetStringFromObj(objv[3], 0); zFile = Tcl_GetStringFromObj(objv[4], 0); - nSep = strlen(zSep); - nNull = strlen(zNull); + nSep = strlen30(zSep); + nNull = strlen30(zNull); if( nSep==0 ){ Tcl_AppendResult(interp,"Error: non-null separator required for copy",0); return TCL_ERROR; } - if(sqlite3StrICmp(zConflict, "rollback") != 0 && - sqlite3StrICmp(zConflict, "abort" ) != 0 && - sqlite3StrICmp(zConflict, "fail" ) != 0 && - sqlite3StrICmp(zConflict, "ignore" ) != 0 && - sqlite3StrICmp(zConflict, "replace" ) != 0 ) { + if(strcmp(zConflict, "rollback") != 0 && + strcmp(zConflict, "abort" ) != 0 && + strcmp(zConflict, "fail" ) != 0 && + strcmp(zConflict, "ignore" ) != 0 && + strcmp(zConflict, "replace" ) != 0 ) { Tcl_AppendResult(interp, "Error: \"", zConflict, "\", conflict-algorithm must be one of: rollback, " "abort, fail, ignore, or replace", 0); @@ -1295,7 +1452,7 @@ Tcl_AppendResult(interp, "Error: no such table: ", zTable, 0); return TCL_ERROR; } - nByte = strlen(zSql); + nByte = strlen30(zSql); rc = sqlite3_prepare(pDb->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); if( rc ){ @@ -1315,7 +1472,7 @@ } sqlite3_snprintf(nByte+50, zSql, "INSERT OR %q INTO '%q' VALUES(?", zConflict, zTable); - j = strlen(zSql); + j = strlen30(zSql); for(i=1; i0 && strcmp(azCol[i], zNull)==0) || strlen(azCol[i])==0) { + if( (nNull>0 && strcmp(azCol[i], zNull)==0) + || strlen30(azCol[i])==0 + ){ sqlite3_bind_null(pStmt, i+1); }else{ sqlite3_bind_text(pStmt, i+1, azCol[i], -1, SQLITE_STATIC); @@ -1507,7 +1666,7 @@ while( rc==TCL_OK && zSql[0] ){ int i; /* Loop counter */ int nVar; /* Number of bind parameters in the pStmt */ - int nCol; /* Number of columns in the result set */ + int nCol = -1; /* Number of columns in the result set */ Tcl_Obj **apColName = 0; /* Array of column names */ int len; /* String length of zSql */ @@ -1515,13 +1674,8 @@ ** which matches the next sequence of SQL. */ pStmt = 0; - pPreStmt = pDb->stmtList; - len = strlen(zSql); - if( pPreStmt && sqlite3_expired(pPreStmt->pStmt) ){ - flushStmtCache(pDb); - pPreStmt = 0; - } - for(; pPreStmt; pPreStmt=pPreStmt->pNext){ + len = strlen30(zSql); + for(pPreStmt = pDb->stmtList; pPreStmt; pPreStmt=pPreStmt->pNext){ int n = pPreStmt->nSql; if( len>=n && memcmp(pPreStmt->zSql, zSql, n)==0 @@ -1552,7 +1706,7 @@ /* If no prepared statement was found. Compile the SQL text */ if( pStmt==0 ){ - if( SQLITE_OK!=sqlite3_prepare(pDb->db, zSql, -1, &pStmt, &zLeft) ){ + if( SQLITE_OK!=sqlite3_prepare_v2(pDb->db, zSql, -1, &pStmt, &zLeft) ){ Tcl_SetObjResult(interp, dbTextToObj(sqlite3_errmsg(pDb->db))); rc = TCL_ERROR; break; @@ -1626,35 +1780,20 @@ } } - /* Compute column names */ - nCol = sqlite3_column_count(pStmt); - if( pScript ){ - apColName = (Tcl_Obj**)Tcl_Alloc( sizeof(Tcl_Obj*)*nCol ); - if( apColName==0 ) break; - for(i=0; inStep = sqlite3_stmt_status(pStmt, + SQLITE_STMTSTATUS_FULLSCAN_STEP, 0); + pDb->nSort = sqlite3_stmt_status(pStmt, + SQLITE_STMTSTATUS_SORT, 0); rc = Tcl_EvalObjEx(interp, pScript, 0); if( rc==TCL_CONTINUE ){ rc = TCL_OK; @@ -1727,6 +1872,14 @@ /* Free the column name objects */ if( pScript ){ + /* If the query returned no rows, but an array variable was + ** specified, call computeColumnNames() now to populate the + ** arrayname(*) variable. + */ + if (pArray && nCol < 0) { + Tcl_Obj ***ap = (pScript?&apColName:0); + nCol = computeColumnNames(interp, pStmt, ap, pArray); + } for(i=0; inStep = sqlite3_stmt_status(pStmt, + SQLITE_STMTSTATUS_FULLSCAN_STEP, 1); + pDb->nSort = sqlite3_stmt_status(pStmt, + SQLITE_STMTSTATUS_SORT, 1); + if( SQLITE_OK!=rc2 ){ /* If a run-time error occurs, report the error and stop reading ** the SQL */ @@ -1773,12 +1922,13 @@ */ if( pPreStmt==0 ){ len = zLeft - zSql; - pPreStmt = (SqlPreparedStmt*)Tcl_Alloc( sizeof(*pPreStmt) + len ); + pPreStmt = (SqlPreparedStmt*)Tcl_Alloc( sizeof(*pPreStmt) ); if( pPreStmt==0 ) return TCL_ERROR; pPreStmt->pStmt = pStmt; pPreStmt->nSql = len; - memcpy(pPreStmt->zSql, zSql, len); - pPreStmt->zSql[len] = 0; + pPreStmt->zSql = sqlite3_sql(pStmt); + assert( strlen30(pPreStmt->zSql)==len ); + assert( 0==memcmp(pPreStmt->zSql, zSql, len) ); } /* Add the prepared statement to the beginning of the cache list @@ -1826,7 +1976,7 @@ } /* - ** $db function NAME SCRIPT + ** $db function NAME [-argcount N] SCRIPT ** ** Create a new SQL function called NAME. Whenever that function is ** called, invoke SCRIPT to evaluate the function. @@ -1835,12 +1985,26 @@ SqlFunc *pFunc; Tcl_Obj *pScript; char *zName; - if( objc!=4 ){ - Tcl_WrongNumArgs(interp, 2, objv, "NAME SCRIPT"); + int nArg = -1; + if( objc==6 ){ + const char *z = Tcl_GetString(objv[3]); + int n = strlen30(z); + if( n>2 && strncmp(z, "-argcount",n)==0 ){ + if( Tcl_GetIntFromObj(interp, objv[4], &nArg) ) return TCL_ERROR; + if( nArg<0 ){ + Tcl_AppendResult(interp, "number of arguments must be non-negative", + (char*)0); + return TCL_ERROR; + } + } + pScript = objv[5]; + }else if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 2, objv, "NAME [-argcount N] SCRIPT"); return TCL_ERROR; + }else{ + pScript = objv[3]; } zName = Tcl_GetStringFromObj(objv[2], 0); - pScript = objv[3]; pFunc = findSqlFunc(pDb, zName); if( pFunc==0 ) return TCL_ERROR; if( pFunc->pScript ){ @@ -1849,14 +2013,11 @@ pFunc->pScript = pScript; Tcl_IncrRefCount(pScript); pFunc->useEvalObjv = safeToUseEvalObjv(interp, pScript); - rc = sqlite3_create_function(pDb->db, zName, -1, SQLITE_UTF8, + rc = sqlite3_create_function(pDb->db, zName, nArg, SQLITE_UTF8, pFunc, tclSqlFunc, 0, 0); if( rc!=SQLITE_OK ){ rc = TCL_ERROR; Tcl_SetResult(interp, (char *)sqlite3_errmsg(pDb->db), TCL_VOLATILE); - }else{ - /* Must flush any cached statements */ - flushStmtCache( pDb ); } break; } @@ -2069,6 +2230,92 @@ break; } + /* $db restore ?DATABASE? FILENAME + ** + ** Open a database file named FILENAME. Transfer the content + ** of FILENAME into the local database DATABASE (default: "main"). + */ + case DB_RESTORE: { + const char *zSrcFile; + const char *zDestDb; + sqlite3 *pSrc; + sqlite3_backup *pBackup; + int nTimeout = 0; + + if( objc==3 ){ + zDestDb = "main"; + zSrcFile = Tcl_GetString(objv[2]); + }else if( objc==4 ){ + zDestDb = Tcl_GetString(objv[2]); + zSrcFile = Tcl_GetString(objv[3]); + }else{ + Tcl_WrongNumArgs(interp, 2, objv, "?DATABASE? FILENAME"); + return TCL_ERROR; + } + rc = sqlite3_open_v2(zSrcFile, &pSrc, SQLITE_OPEN_READONLY, 0); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "cannot open source database: ", + sqlite3_errmsg(pSrc), (char*)0); + sqlite3_close(pSrc); + return TCL_ERROR; + } + pBackup = sqlite3_backup_init(pDb->db, zDestDb, pSrc, "main"); + if( pBackup==0 ){ + Tcl_AppendResult(interp, "restore failed: ", + sqlite3_errmsg(pDb->db), (char*)0); + sqlite3_close(pSrc); + return TCL_ERROR; + } + while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK + || rc==SQLITE_BUSY ){ + if( rc==SQLITE_BUSY ){ + if( nTimeout++ >= 3 ) break; + sqlite3_sleep(100); + } + } + sqlite3_backup_finish(pBackup); + if( rc==SQLITE_DONE ){ + rc = TCL_OK; + }else if( rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){ + Tcl_AppendResult(interp, "restore failed: source database busy", + (char*)0); + rc = TCL_ERROR; + }else{ + Tcl_AppendResult(interp, "restore failed: ", + sqlite3_errmsg(pDb->db), (char*)0); + rc = TCL_ERROR; + } + sqlite3_close(pSrc); + break; + } + + /* + ** $db status (step|sort) + ** + ** Display SQLITE_STMTSTATUS_FULLSCAN_STEP or + ** SQLITE_STMTSTATUS_SORT for the most recent eval. + */ + case DB_STATUS: { + int v; + const char *zOp; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "(step|sort)"); + return TCL_ERROR; + } + zOp = Tcl_GetString(objv[2]); + if( strcmp(zOp, "step")==0 ){ + v = pDb->nStep; + }else if( strcmp(zOp, "sort")==0 ){ + v = pDb->nSort; + }else{ + Tcl_AppendResult(interp, "bad argument: should be step or sort", + (char*)0); + return TCL_ERROR; + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(v)); + break; + } + /* ** $db timeout MILLESECONDS ** @@ -2153,16 +2400,17 @@ ** 2005 O'Reilly Open Source Convention (OSCON). */ case DB_TRANSACTION: { - int inTrans; Tcl_Obj *pScript; - const char *zBegin = "BEGIN"; + const char *zBegin = "SAVEPOINT _tcl_transaction"; + const char *zEnd; if( objc!=3 && objc!=4 ){ Tcl_WrongNumArgs(interp, 2, objv, "[TYPE] SCRIPT"); return TCL_ERROR; } - if( objc==3 ){ - pScript = objv[2]; - } else { + + if( pDb->nTransaction ){ + zBegin = "SAVEPOINT _tcl_transaction"; + }else if( pDb->nTransaction==0 && objc==4 ){ static const char *TTYPE_strs[] = { "deferred", "exclusive", "immediate", 0 }; @@ -2179,24 +2427,91 @@ case TTYPE_EXCLUSIVE: zBegin = "BEGIN EXCLUSIVE"; break; case TTYPE_IMMEDIATE: zBegin = "BEGIN IMMEDIATE"; break; } - pScript = objv[3]; } - inTrans = !sqlite3_get_autocommit(pDb->db); - if( !inTrans ){ - (void)sqlite3_exec(pDb->db, zBegin, 0, 0, 0); + pScript = objv[objc-1]; + + pDb->disableAuth++; + rc = sqlite3_exec(pDb->db, zBegin, 0, 0, 0); + pDb->disableAuth--; + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, sqlite3_errmsg(pDb->db), 0); + return TCL_ERROR; } + + pDb->nTransaction++; rc = Tcl_EvalObjEx(interp, pScript, 0); - if( !inTrans ){ - const char *zEnd; - if( rc==TCL_ERROR ){ - zEnd = "ROLLBACK"; - } else { + pDb->nTransaction--; + + if( rc!=TCL_ERROR ){ + if( pDb->nTransaction ){ + zEnd = "RELEASE _tcl_transaction"; + }else{ zEnd = "COMMIT"; } - if( sqlite3_exec(pDb->db, zEnd, 0, 0, 0) ){ - sqlite3_exec(pDb->db, "ROLLBACK", 0, 0, 0); + }else{ + if( pDb->nTransaction ){ + zEnd = "ROLLBACK TO _tcl_transaction ; RELEASE _tcl_transaction"; + }else{ + zEnd = "ROLLBACK"; } } + + pDb->disableAuth++; + if( sqlite3_exec(pDb->db, zEnd, 0, 0, 0) ){ + /* This is a tricky scenario to handle. The most likely cause of an + ** error is that the exec() above was an attempt to commit the + ** top-level transaction that returned SQLITE_BUSY. Or, less likely, + ** that an IO-error has occured. In either case, throw a Tcl exception + ** and try to rollback the transaction. + ** + ** But it could also be that the user executed one or more BEGIN, + ** COMMIT, SAVEPOINT, RELEASE or ROLLBACK commands that are confusing + ** this method's logic. Not clear how this would be best handled. + */ + if( rc!=TCL_ERROR ){ + Tcl_AppendResult(interp, sqlite3_errmsg(pDb->db), 0); + rc = TCL_ERROR; + } + sqlite3_exec(pDb->db, "ROLLBACK", 0, 0, 0); + } + pDb->disableAuth--; + + break; + } + + /* + ** $db unlock_notify ?script? + */ + case DB_UNLOCK_NOTIFY: { +#ifndef SQLITE_ENABLE_UNLOCK_NOTIFY + Tcl_AppendResult(interp, "unlock_notify not available in this build", 0); + rc = TCL_ERROR; +#else + if( objc!=2 && objc!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?SCRIPT?"); + rc = TCL_ERROR; + }else{ + void (*xNotify)(void **, int) = 0; + void *pNotifyArg = 0; + + if( pDb->pUnlockNotify ){ + Tcl_DecrRefCount(pDb->pUnlockNotify); + pDb->pUnlockNotify = 0; + } + + if( objc==3 ){ + xNotify = DbUnlockNotify; + pNotifyArg = (void *)pDb; + pDb->pUnlockNotify = objv[2]; + Tcl_IncrRefCount(pDb->pUnlockNotify); + } + + if( sqlite3_unlock_notify(pDb->db, xNotify, pNotifyArg) ){ + Tcl_AppendResult(interp, sqlite3_errmsg(pDb->db), 0); + rc = TCL_ERROR; + } + } +#endif break; } @@ -2257,7 +2572,8 @@ } /* -** sqlite3 DBNAME FILENAME ?MODE? ?-key KEY? +** sqlite3 DBNAME FILENAME ?-vfs VFSNAME? ?-key KEY? ?-readonly BOOLEAN? +** ?-create BOOLEAN? ?-nomutex BOOLEAN? ** ** This is the main Tcl command. When the "sqlite" Tcl command is ** invoked, this routine runs to process that command. @@ -2267,25 +2583,8 @@ ** DBNAME that is used to control that connection. The database ** connection is deleted when the DBNAME command is deleted. ** -** The second argument is the name of the directory that contains -** the sqlite database that is to be accessed. -** -** For testing purposes, we also support the following: -** -** sqlite3 -encoding -** -** Return the encoding used by LIKE and GLOB operators. Choices -** are UTF-8 and iso8859. -** -** sqlite3 -version -** -** Return the version number of the SQLite library. +** The second argument is the name of the database file. ** -** sqlite3 -tcl-uses-utf -** -** Return "1" if compiled with a Tcl uses UTF-8. Return "0" if -** not. Used by tests to make sure the library was compiled -** correctly. */ static int DbMain(void *cd, Tcl_Interp *interp, int objc,Tcl_Obj *const*objv){ SqliteDb *p; @@ -2293,8 +2592,24 @@ int nKey = 0; const char *zArg; char *zErrMsg; + int i; const char *zFile; + const char *zVfs = 0; + int flags; Tcl_DString translatedFilename; + + /* In normal use, each TCL interpreter runs in a single thread. So + ** by default, we can turn of mutexing on SQLite database connections. + ** However, for testing purposes it is useful to have mutexes turned + ** on. So, by default, mutexes default off. But if compiled with + ** SQLITE_TCL_DEFAULT_FULLMUTEX then mutexes default on. + */ +#ifdef SQLITE_TCL_DEFAULT_FULLMUTEX + flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_FULLMUTEX; +#else + flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX; +#endif + if( objc==2 ){ zArg = Tcl_GetStringFromObj(objv[1], 0); if( strcmp(zArg,"-version")==0 ){ @@ -2309,28 +2624,61 @@ #endif return TCL_OK; } - if( strcmp(zArg,"-tcl-uses-utf")==0 ){ -#ifdef TCL_UTF_MAX - Tcl_AppendResult(interp,"1",0); -#else - Tcl_AppendResult(interp,"0",0); -#endif - return TCL_OK; - } } - if( objc==5 || objc==6 ){ - zArg = Tcl_GetStringFromObj(objv[objc-2], 0); + for(i=3; i+1db); + sqlite3_open_v2(zFile, &p->db, flags, zVfs); Tcl_DStringFree(&translatedFilename); if( SQLITE_OK!=sqlite3_errcode(p->db) ){ zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(p->db)); @@ -2352,7 +2700,9 @@ p->db = 0; } #ifdef SQLITE_HAS_CODEC - sqlite3_key(p->db, pKey, nKey); + if( p->db ){ + sqlite3_key(p->db, pKey, nKey); + } #endif if( p->db==0 ){ Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE); @@ -2364,23 +2714,6 @@ p->interp = interp; zArg = Tcl_GetStringFromObj(objv[1], 0); Tcl_CreateObjCommand(interp, zArg, DbObjCmd, (char*)p, DbDeleteCmd); - - /* If compiled with SQLITE_TEST turned on, then register the "md5sum" - ** SQL function. - */ -#ifdef SQLITE_TEST - { - extern void Md5_Register(sqlite3*); -#ifdef SQLITE_MEMDEBUG - int mallocfail = sqlite3_iMallocFail; - sqlite3_iMallocFail = 0; -#endif - Md5_Register(p->db); -#ifdef SQLITE_MEMDEBUG - sqlite3_iMallocFail = mallocfail; -#endif - } -#endif return TCL_OK; } @@ -2422,23 +2755,34 @@ EXTERN int Tclsqlite3_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } EXTERN int Sqlite3_SafeInit(Tcl_Interp *interp){ return TCL_OK; } EXTERN int Tclsqlite3_SafeInit(Tcl_Interp *interp){ return TCL_OK; } +EXTERN int Sqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Tclsqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Sqlite3_SafeUnload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Tclsqlite3_SafeUnload(Tcl_Interp *interp, int flags){ return TCL_OK;} + #ifndef SQLITE_3_SUFFIX_ONLY EXTERN int Sqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } EXTERN int Tclsqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } EXTERN int Sqlite_SafeInit(Tcl_Interp *interp){ return TCL_OK; } EXTERN int Tclsqlite_SafeInit(Tcl_Interp *interp){ return TCL_OK; } +EXTERN int Sqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Tclsqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Sqlite_SafeUnload(Tcl_Interp *interp, int flags){ return TCL_OK; } +EXTERN int Tclsqlite_SafeUnload(Tcl_Interp *interp, int flags){ return TCL_OK;} #endif #ifdef TCLSH /***************************************************************************** ** The code that follows is used to build standalone TCL interpreters +** that are statically linked with SQLite. */ /* ** If the macro TCLSH is one, then put in code this for the ** "main" routine that will initialize Tcl and take input from -** standard input. +** standard input, or if a file is named on the command line +** the TCL interpreter reads and evaluates that file. */ #if TCLSH==1 static char zMainloop[] = @@ -2478,11 +2822,19 @@ #define TCLSH_MAIN main /* Needed to fake out mktclapp */ int TCLSH_MAIN(int argc, char **argv){ Tcl_Interp *interp; + + /* Call sqlite3_shutdown() once before doing anything else. This is to + ** test that sqlite3_shutdown() can be safely called by a process before + ** sqlite3_initialize() is. */ + sqlite3_shutdown(); + Tcl_FindExecutable(argv[0]); interp = Tcl_CreateInterp(); Sqlite3_Init(interp); #ifdef SQLITE_TEST { + extern int Md5_Init(Tcl_Interp*); + extern int Sqliteconfig_Init(Tcl_Interp*); extern int Sqlitetest1_Init(Tcl_Interp*); extern int Sqlitetest2_Init(Tcl_Interp*); extern int Sqlitetest3_Init(Tcl_Interp*); @@ -2492,15 +2844,22 @@ extern int Sqlitetest7_Init(Tcl_Interp*); extern int Sqlitetest8_Init(Tcl_Interp*); extern int Sqlitetest9_Init(Tcl_Interp*); - extern int Md5_Init(Tcl_Interp*); - extern int Sqlitetestsse_Init(Tcl_Interp*); extern int Sqlitetestasync_Init(Tcl_Interp*); - extern int Sqlitetesttclvar_Init(Tcl_Interp*); - extern int Sqlitetestschema_Init(Tcl_Interp*); extern int Sqlitetest_autoext_Init(Tcl_Interp*); + extern int Sqlitetest_func_Init(Tcl_Interp*); extern int Sqlitetest_hexio_Init(Tcl_Interp*); - extern int Sqliteconfig_Init(Tcl_Interp*); + extern int Sqlitetest_malloc_Init(Tcl_Interp*); + extern int Sqlitetest_mutex_Init(Tcl_Interp*); + extern int Sqlitetestschema_Init(Tcl_Interp*); + extern int Sqlitetestsse_Init(Tcl_Interp*); + extern int Sqlitetesttclvar_Init(Tcl_Interp*); + extern int SqlitetestThread_Init(Tcl_Interp*); + extern int SqlitetestOnefile_Init(); + extern int SqlitetestOsinst_Init(Tcl_Interp*); + extern int Sqlitetestbackup_Init(Tcl_Interp*); + Md5_Init(interp); + Sqliteconfig_Init(interp); Sqlitetest1_Init(interp); Sqlitetest2_Init(interp); Sqlitetest3_Init(interp); @@ -2511,12 +2870,18 @@ Sqlitetest8_Init(interp); Sqlitetest9_Init(interp); Sqlitetestasync_Init(interp); - Sqlitetesttclvar_Init(interp); - Sqlitetestschema_Init(interp); Sqlitetest_autoext_Init(interp); + Sqlitetest_func_Init(interp); Sqlitetest_hexio_Init(interp); - Sqliteconfig_Init(interp); - Md5_Init(interp); + Sqlitetest_malloc_Init(interp); + Sqlitetest_mutex_Init(interp); + Sqlitetestschema_Init(interp); + Sqlitetesttclvar_Init(interp); + SqlitetestThread_Init(interp); + SqlitetestOnefile_Init(interp); + SqlitetestOsinst_Init(interp); + Sqlitetestbackup_Init(interp); + #ifdef SQLITE_SSE Sqlitetestsse_Init(interp); #endif @@ -2535,7 +2900,7 @@ } if( TCLSH==1 && Tcl_EvalFile(interp, argv[1])!=TCL_OK ){ const char *zInfo = Tcl_GetVar(interp, "errorInfo", TCL_GLOBAL_ONLY); - if( zInfo==0 ) zInfo = interp->result; + if( zInfo==0 ) zInfo = Tcl_GetStringResult(interp); fprintf(stderr,"%s: %s\n", *argv, zInfo); return 1; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test1.c --- sqlite3-3.4.2/src/test1.c 2007-08-08 13:11:21.000000000 +0100 +++ sqlite3-3.6.16/src/test1.c 2009-06-25 12:24:38.000000000 +0100 @@ -13,11 +13,10 @@ ** is not included in the SQLite library. It is used for automated ** testing of the SQLite library. ** -** $Id: test1.c,v 1.259 2007/08/08 12:11:21 drh Exp $ +** $Id: test1.c,v 1.353 2009/05/03 20:23:54 drh Exp $ */ #include "sqliteInt.h" #include "tcl.h" -#include "os.h" #include #include @@ -45,7 +44,7 @@ return h - 'A' + 10; } } -void *sqlite3TextToPtr(const char *z){ +void *sqlite3TestTextToPtr(const char *z){ void *p; u64 v; u32 v2; @@ -103,14 +102,14 @@ /* ** Decode a pointer to an sqlite3 object. */ -static int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb){ +int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb){ struct SqliteDb *p; Tcl_CmdInfo cmdInfo; if( Tcl_GetCommandInfo(interp, zA, &cmdInfo) ){ p = (struct SqliteDb*)cmdInfo.objClientData; *ppDb = p->db; }else{ - *ppDb = (sqlite3*)sqlite3TextToPtr(zA); + *ppDb = (sqlite3*)sqlite3TestTextToPtr(zA); } return TCL_OK; } @@ -118,34 +117,54 @@ const char *sqlite3TestErrorName(int rc){ const char *zName = 0; - switch( rc & 0xff ){ - case SQLITE_OK: zName = "SQLITE_OK"; break; - case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; - case SQLITE_PERM: zName = "SQLITE_PERM"; break; - case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; - case SQLITE_BUSY: zName = "SQLITE_BUSY"; break; - case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break; - case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break; - case SQLITE_READONLY: zName = "SQLITE_READONLY"; break; - case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break; - case SQLITE_IOERR: zName = "SQLITE_IOERR"; break; - case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break; - case SQLITE_FULL: zName = "SQLITE_FULL"; break; - case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break; - case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break; - case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break; - case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break; - case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break; - case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break; - case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break; - case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break; - case SQLITE_AUTH: zName = "SQLITE_AUTH"; break; - case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break; - case SQLITE_RANGE: zName = "SQLITE_RANGE"; break; - case SQLITE_ROW: zName = "SQLITE_ROW"; break; - case SQLITE_DONE: zName = "SQLITE_DONE"; break; - case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break; - default: zName = "SQLITE_Unknown"; break; + switch( rc ){ + case SQLITE_OK: zName = "SQLITE_OK"; break; + case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; + case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break; + case SQLITE_PERM: zName = "SQLITE_PERM"; break; + case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; + case SQLITE_BUSY: zName = "SQLITE_BUSY"; break; + case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break; + case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break; + case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break; + case SQLITE_READONLY: zName = "SQLITE_READONLY"; break; + case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break; + case SQLITE_IOERR: zName = "SQLITE_IOERR"; break; + case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break; + case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break; + case SQLITE_FULL: zName = "SQLITE_FULL"; break; + case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break; + case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break; + case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break; + case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break; + case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break; + case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break; + case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break; + case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break; + case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break; + case SQLITE_AUTH: zName = "SQLITE_AUTH"; break; + case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break; + case SQLITE_RANGE: zName = "SQLITE_RANGE"; break; + case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break; + case SQLITE_ROW: zName = "SQLITE_ROW"; break; + case SQLITE_DONE: zName = "SQLITE_DONE"; break; + case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break; + case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break; + case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break; + case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break; + case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break; + case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break; + case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break; + case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break; + case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break; + case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break; + case SQLITE_IOERR_BLOCKED: zName = "SQLITE_IOERR_BLOCKED"; break; + case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break; + case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break; + case SQLITE_IOERR_CHECKRESERVEDLOCK: + zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break; + case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break; + default: zName = "SQLITE_Unknown"; break; } return zName; } @@ -162,7 +181,8 @@ ** from sqlite3_errcode. */ int sqlite3TestErrCode(Tcl_Interp *interp, sqlite3 *db, int rc){ - if( rc!=SQLITE_MISUSE && rc!=SQLITE_OK && sqlite3_errcode(db)!=rc ){ + if( sqlite3_threadsafe()==0 && rc!=SQLITE_MISUSE && rc!=SQLITE_OK + && sqlite3_errcode(db)!=rc ){ char zBuf[200]; int r2 = sqlite3_errcode(db); sprintf(zBuf, "error code %s (%d) does not match sqlite3_errcode %s (%d)", @@ -182,19 +202,7 @@ const char *zArg, sqlite3_stmt **ppStmt ){ - *ppStmt = (sqlite3_stmt*)sqlite3TextToPtr(zArg); - return TCL_OK; -} - -/* -** Decode a pointer to an sqlite3_stmt object. -*/ -static int getFilePointer( - Tcl_Interp *interp, - const char *zArg, - OsFile **ppFile -){ - *ppFile = (OsFile*)sqlite3TextToPtr(zArg); + *ppStmt = (sqlite3_stmt*)sqlite3TestTextToPtr(zArg); return TCL_OK; } @@ -237,6 +245,7 @@ /* ** The I/O tracing callback. */ +#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) static FILE *iotrace_file = 0; static void io_trace_callback(const char *zFormat, ...){ va_list ap; @@ -245,6 +254,7 @@ va_end(ap); fflush(iotrace_file); } +#endif /* ** Usage: io_trace FILENAME @@ -259,6 +269,7 @@ int argc, /* Number of arguments */ char **argv /* Text of each argument */ ){ +#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) if( argc!=2 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " FILENAME\"", 0); @@ -269,7 +280,7 @@ fclose(iotrace_file); } iotrace_file = 0; - sqlite3_io_trace = 0; + sqlite3IoTrace = 0; } if( argv[1][0] ){ if( strcmp(argv[1],"stdout")==0 ){ @@ -279,9 +290,10 @@ }else{ iotrace_file = fopen(argv[1], "w"); } - sqlite3_io_trace = io_trace_callback; + sqlite3IoTrace = io_trace_callback; } - return SQLITE_OK; +#endif + return TCL_OK; } @@ -324,6 +336,92 @@ } /* +** Usage: sqlite3_exec_hex DB HEX +** +** Invoke the sqlite3_exec() on a string that is obtained by translating +** HEX into ASCII. Most characters are translated as is. %HH becomes +** a hex character. +*/ +static int test_exec_hex( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + char **argv /* Text of each argument */ +){ + sqlite3 *db; + Tcl_DString str; + int rc, i, j; + char *zErr = 0; + char *zHex; + char zSql[500]; + char zBuf[30]; + if( argc!=3 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " DB HEX", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; + zHex = argv[2]; + for(i=j=0; imutex); + return TCL_OK; +} +static int db_leave( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + char **argv /* Text of each argument */ +){ + sqlite3 *db; + if( argc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " DB", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; + sqlite3_mutex_leave(db->mutex); + return TCL_OK; +} + +/* ** Usage: sqlite3_exec DB SQL ** ** Invoke the sqlite3_exec interface using the open database DB @@ -398,7 +496,7 @@ /* ** Usage: sqlite3_mprintf_z_test SEPARATOR ARG0 ARG1 ... ** -** Test the %z format of sqliteMPrintf(). Use multiple mprintf() calls to +** Test the %z format of sqlite_mprintf(). Use multiple mprintf() calls to ** concatenate arg0 through argn using separator as the separator. ** Return the result. */ @@ -411,18 +509,18 @@ char *zResult = 0; int i; - for(i=2; isizeof(zStr) ) n = sizeof(zStr); - strcpy(zStr, "abcdefghijklmnopqrstuvwxyz"); + sqlite3_snprintf(sizeof(zStr), zStr, "abcdefghijklmnopqrstuvwxyz"); sqlite3_snprintf(n, zStr, zFormat, a1); Tcl_AppendResult(interp, zStr, 0); return TCL_OK; } +#ifndef SQLITE_OMIT_GET_TABLE + /* -** Usage: sqlite3_get_table_printf DB FORMAT STRING +** Usage: sqlite3_get_table_printf DB FORMAT STRING ?--no-counts? ** ** Invoke the sqlite3_get_table_printf() interface using the open database ** DB. The SQL is the string FORMAT. The format string should contain @@ -488,24 +588,35 @@ int i; char zBuf[30]; char *zSql; - if( argc!=4 ){ + int resCount = -1; + if( argc==5 ){ + if( Tcl_GetInt(interp, argv[4], &resCount) ) return TCL_ERROR; + } + if( argc!=4 && argc!=5 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], - " DB FORMAT STRING", 0); + " DB FORMAT STRING ?COUNT?", 0); return TCL_ERROR; } if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; Tcl_DStringInit(&str); zSql = sqlite3_mprintf(argv[2],argv[3]); - rc = sqlite3_get_table(db, zSql, &aResult, &nRow, &nCol, &zErr); + if( argc==5 ){ + rc = sqlite3_get_table(db, zSql, &aResult, 0, 0, &zErr); + }else{ + rc = sqlite3_get_table(db, zSql, &aResult, &nRow, &nCol, &zErr); + resCount = (nRow+1)*nCol; + } sqlite3_free(zSql); sprintf(zBuf, "%d", rc); Tcl_AppendElement(interp, zBuf); if( rc==SQLITE_OK ){ - sprintf(zBuf, "%d", nRow); - Tcl_AppendElement(interp, zBuf); - sprintf(zBuf, "%d", nCol); - Tcl_AppendElement(interp, zBuf); - for(i=0; i<(nRow+1)*nCol; i++){ + if( argc==4 ){ + sprintf(zBuf, "%d", nRow); + Tcl_AppendElement(interp, zBuf); + sprintf(zBuf, "%d", nCol); + Tcl_AppendElement(interp, zBuf); + } + for(i=0; inUsed + n + 2 > p->nAlloc ){ char *zNew; p->nAlloc = p->nAlloc*2 + n + 200; - zNew = sqliteRealloc(p->z, p->nAlloc); + zNew = sqlite3_realloc(p->z, p->nAlloc); if( zNew==0 ){ - sqliteFree(p->z); + sqlite3_free(p->z); memset(p, 0, sizeof(*p)); return; } @@ -742,7 +855,7 @@ (char*)sqlite3_value_text(argv[0]), execFuncCallback, &x, 0); sqlite3_result_text(context, x.z, x.nUsed, SQLITE_TRANSIENT); - sqliteFree(x.z); + sqlite3_free(x.z); } /* @@ -752,7 +865,7 @@ ** * It calls sqlite3_value_text() 3 times on the argument sqlite3_value*. ** If the three pointers returned are not the same an SQL error is raised. ** -** * Otherwise it returns a copy of the text representation of it's +** * Otherwise it returns a copy of the text representation of its ** argument in such a way as the VDBE representation is a Mem* cell ** with the MEM_Term flag clear. ** @@ -883,7 +996,6 @@ ){ int rc; sqlite3 *db; - extern void Md5_Register(sqlite3*); if( argc!=2 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], @@ -916,18 +1028,20 @@ /* Use the sqlite3_create_function16() API here. Mainly for fun, but also ** because it is not tested anywhere else. */ if( rc==SQLITE_OK ){ + const void *zUtf16; sqlite3_value *pVal; -#ifdef SQLITE_MEMDEBUG - if( sqlite3_iMallocFail>0 ){ - sqlite3_iMallocFail++; - } -#endif - pVal = sqlite3ValueNew(); + sqlite3_mutex_enter(db->mutex); + pVal = sqlite3ValueNew(db); sqlite3ValueSetStr(pVal, -1, "x_sqlite_exec", SQLITE_UTF8, SQLITE_STATIC); - rc = sqlite3_create_function16(db, - sqlite3ValueText(pVal, SQLITE_UTF16NATIVE), - 1, SQLITE_UTF16, db, sqlite3ExecFunc, 0, 0); + zUtf16 = sqlite3ValueText(pVal, SQLITE_UTF16NATIVE); + if( db->mallocFailed ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_create_function16(db, zUtf16, + 1, SQLITE_UTF16, db, sqlite3ExecFunc, 0, 0); + } sqlite3ValueFree(pVal); + sqlite3_mutex_leave(db->mutex); } #endif @@ -985,12 +1099,27 @@ } } +static void legacyCountStep( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + /* no-op */ +} + +#ifndef SQLITE_OMIT_DEPRECATED +static void legacyCountFinalize(sqlite3_context *context){ + sqlite3_result_int(context, sqlite3_aggregate_count(context)); +} +#endif + /* -** Usage: sqlite_test_create_aggregate DB +** Usage: sqlite3_create_aggregate DB ** ** Call the sqlite3_create_function API on the given database in order -** to create a function named "x_count". This function does the same thing -** as the "md5sum" function. +** to create a function named "x_count". This function is similar +** to the built-in count() function, with a few special quirks +** for testing the sqlite3_result_error() APIs. ** ** The original motivation for this routine was to be able to call the ** sqlite3_create_aggregate function while a query is in progress in order @@ -998,6 +1127,10 @@ ** ** This routine was later extended to test the use of sqlite3_result_error() ** within aggregate functions. +** +** Later: It is now also extended to register the aggregate function +** "legacy_count()" with the supplied database handle. This is used +** to test the deprecated sqlite3_aggregate_count() API. */ static int test_create_aggregate( void *NotUsed, @@ -1016,10 +1149,18 @@ rc = sqlite3_create_function(db, "x_count", 0, SQLITE_UTF8, 0, 0, t1CountStep,t1CountFinalize); if( rc==SQLITE_OK ){ - sqlite3_create_function(db, "x_count", 1, SQLITE_UTF8, 0, 0, + rc = sqlite3_create_function(db, "x_count", 1, SQLITE_UTF8, 0, 0, t1CountStep,t1CountFinalize); } +#ifndef SQLITE_OMIT_DEPRECATED + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function(db, "legacy_count", 0, SQLITE_ANY, 0, 0, + legacyCountStep, legacyCountFinalize + ); + } +#endif if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; + Tcl_SetResult(interp, (char *)t1ErrorName(rc), 0); return TCL_OK; } @@ -1077,18 +1218,6 @@ } /* -** If zNum represents an integer that will fit in 64-bits, then set -** *pValue to that integer and return true. Otherwise return false. -*/ -static int sqlite3GetInt64(const char *zNum, i64 *pValue){ - if( sqlite3FitsIn64Bits(zNum) ){ - sqlite3Atoi64(zNum, pValue); - return 1; - } - return 0; -} - -/* ** Usage: sqlite3_mprintf_int64 FORMAT INTEGER INTEGER INTEGER ** ** Call mprintf with three 64-bit integer arguments @@ -1108,7 +1237,7 @@ return TCL_ERROR; } for(i=2; i<5; i++){ - if( !sqlite3GetInt64(argv[i], &a[i-2]) ){ + if( !sqlite3Atoi64(argv[i], &a[i-2]) ){ Tcl_AppendResult(interp, "argument is not a valid 64-bit integer", 0); return TCL_ERROR; } @@ -1120,6 +1249,39 @@ } /* +** Usage: sqlite3_mprintf_long FORMAT INTEGER INTEGER INTEGER +** +** Call mprintf with three long integer arguments. This might be the +** same as sqlite3_mprintf_int or sqlite3_mprintf_int64, depending on +** platform. +*/ +static int sqlite3_mprintf_long( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + char **argv /* Text of each argument */ +){ + int i; + long int a[3]; + int b[3]; + char *z; + if( argc!=5 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " FORMAT INT INT INT\"", 0); + return TCL_ERROR; + } + for(i=2; i<5; i++){ + if( Tcl_GetInt(interp, argv[i], &b[i-2]) ) return TCL_ERROR; + a[i-2] = (long int)b[i-2]; + a[i-2] &= (((u64)1)<<(sizeof(int)*8))-1; + } + z = sqlite3_mprintf(argv[1], a[0], a[1], a[2]); + Tcl_AppendResult(interp, z, 0); + sqlite3_free(z); + return TCL_OK; +} + +/* ** Usage: sqlite3_mprintf_str FORMAT INTEGER INTEGER STRING ** ** Call mprintf with two integer arguments and one string argument @@ -1278,8 +1440,8 @@ ){ char *z; double r; - unsigned x1, x2; - long long unsigned d; + unsigned int x1, x2; + sqlite_uint64 d; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " FORMAT STRING\"", 0); @@ -1299,126 +1461,7 @@ } /* -** Usage: sqlite_malloc_fail N ?REPEAT-INTERVAL? -** -** Rig sqliteMalloc() to fail on the N-th call and every REPEAT-INTERVAL call -** after that. If REPEAT-INTERVAL is 0 or is omitted, then only a single -** malloc will fail. If REPEAT-INTERVAL is 1 then all mallocs after the -** first failure will continue to fail on every call. If REPEAT-INTERVAL is -** 2 then every other malloc will fail. And so forth. -** -** Turn off this mechanism and reset the sqlite3ThreadData()->mallocFailed -** variable if N==0. -*/ -#ifdef SQLITE_MEMDEBUG -static int sqlite_malloc_fail( - void *NotUsed, - Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ - int argc, /* Number of arguments */ - char **argv /* Text of each argument */ -){ - int n; - int rep; - if( argc!=2 && argc!=3 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N\"", 0); - return TCL_ERROR; - } - if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; - if( argc==3 ){ - if( Tcl_GetInt(interp, argv[2], &rep) ) return TCL_ERROR; - }else{ - rep = 0; - } - sqlite3_iMallocFail = n; - sqlite3_iMallocReset = rep; - return TCL_OK; -} -#endif - -/* -** Usage: sqlite_malloc_stat -** -** Return the number of prior calls to sqliteMalloc() and sqliteFree(). -*/ -#ifdef SQLITE_MEMDEBUG -static int sqlite_malloc_stat( - void *NotUsed, - Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ - int argc, /* Number of arguments */ - char **argv /* Text of each argument */ -){ - char zBuf[200]; - sprintf(zBuf, "%d %d %d", sqlite3_nMalloc,sqlite3_nFree,sqlite3_iMallocFail); - Tcl_AppendResult(interp, zBuf, 0); - return TCL_OK; -} - -/* -** This function implements a Tcl command that may be invoked using any of -** the four forms enumerated below. -** -** sqlite_malloc_outstanding -** Return a summary of all unfreed blocks of memory allocated by the -** current thread. See comments above function sqlite3OutstandingMallocs() -** in util.c for a description of the returned value. -** -** sqlite_malloc_outstanding -bytes -** Return the total amount of unfreed memory (in bytes) allocated by -** this thread. -** -** sqlite_malloc_outstanding -maxbytes -** Return the maximum amount of dynamic memory in use at one time -** by this thread. -** -** sqlite_malloc_outstanding -clearmaxbytes -** Set the value returned by [sqlite_malloc_outstanding -maxbytes] -** to the current value of [sqlite_malloc_outstanding -bytes]. -*/ -static int sqlite_malloc_outstanding( - ClientData clientData, - Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ - int objc, /* Number of arguments */ - Tcl_Obj *CONST objv[] /* Command arguments */ -){ - extern int sqlite3OutstandingMallocs(Tcl_Interp *interp); - -#if defined(SQLITE_DEBUG) && defined(SQLITE_MEMDEBUG) && SQLITE_MEMDEBUG>1 - if( objc==2 ){ - const char *zArg = Tcl_GetString(objv[1]); -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - ThreadData const *pTd = sqlite3ThreadDataReadOnly(); - if( 0==strcmp(zArg, "-bytes") ){ - Tcl_SetObjResult(interp, Tcl_NewIntObj(pTd->nAlloc)); - }else if( 0==strcmp(zArg, "-clearmaxbytes") ){ - sqlite3_nMaxAlloc = pTd->nAlloc; - }else -#endif - if( 0==strcmp(zArg, "-maxbytes") ){ - Tcl_SetObjResult(interp, Tcl_NewWideIntObj(sqlite3_nMaxAlloc)); - }else{ - Tcl_AppendResult(interp, "bad option \"", zArg, - "\": must be -bytes, -maxbytes or -clearmaxbytes", 0 - ); - return TCL_ERROR; - } - - return TCL_OK; - } - - if( objc!=1 ){ - Tcl_WrongNumArgs(interp, 1, objv, "?-bytes?"); - return TCL_ERROR; - } - - return sqlite3OutstandingMallocs(interp); -#else - return TCL_OK; -#endif -} -#endif - -/* -** Usage: sqlite3_enable_shared_cache BOOLEAN +** Usage: sqlite3_enable_shared_cache ?BOOLEAN? ** */ #if !defined(SQLITE_OMIT_SHARED_CACHE) @@ -1432,18 +1475,21 @@ int enable; int ret = 0; - if( objc!=2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "BOOLEAN"); - return TCL_ERROR; - } - if( Tcl_GetBooleanFromObj(interp, objv[1], &enable) ){ + if( objc!=2 && objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?BOOLEAN?"); return TCL_ERROR; } - ret = sqlite3ThreadDataReadOnly()->useSharedData; - rc = sqlite3_enable_shared_cache(enable); - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)sqlite3ErrStr(rc), TCL_STATIC); - return TCL_ERROR; + ret = sqlite3GlobalConfig.sharedCacheEnabled; + + if( objc==2 ){ + if( Tcl_GetBooleanFromObj(interp, objv[1], &enable) ){ + return TCL_ERROR; + } + rc = sqlite3_enable_shared_cache(enable); + if( rc!=SQLITE_OK ){ + Tcl_SetResult(interp, (char *)sqlite3ErrStr(rc), TCL_STATIC); + return TCL_ERROR; + } } Tcl_SetObjResult(interp, Tcl_NewBooleanObj(ret)); return TCL_OK; @@ -1606,7 +1652,7 @@ } /* -** sqlite3_blob_write CHANNEL OFFSET DATA +** sqlite3_blob_write CHANNEL OFFSET DATA ?NDATA? ** ** This command is used to test the sqlite3_blob_write() in ways that ** the Tcl channel interface does not. The first argument should @@ -1635,16 +1681,13 @@ unsigned char *zBuf; int nBuf; - if( objc!=4 ){ - Tcl_WrongNumArgs(interp, 1, objv, "CHANNEL OFFSET DATA"); + if( objc!=4 && objc!=5 ){ + Tcl_WrongNumArgs(interp, 1, objv, "CHANNEL OFFSET DATA ?NDATA?"); return TCL_ERROR; } channel = Tcl_GetChannel(interp, Tcl_GetString(objv[1]), ¬Used); - if( !channel - || TCL_OK!=Tcl_GetIntFromObj(interp, objv[2], &iOffset) - || iOffset<0 - ){ + if( !channel || TCL_OK!=Tcl_GetIntFromObj(interp, objv[2], &iOffset) ){ return TCL_ERROR; } @@ -1652,6 +1695,9 @@ pBlob = *((sqlite3_blob **)instanceData); zBuf = Tcl_GetByteArrayFromObj(objv[3], &nBuf); + if( objc==5 && Tcl_GetIntFromObj(interp, objv[4], &nBuf) ){ + return TCL_ERROR; + } rc = sqlite3_blob_write(pBlob, zBuf, nBuf, iOffset); if( rc!=SQLITE_OK ){ Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); @@ -1717,6 +1763,7 @@ ){ TestCollationX *p; sqlite3 *db; + int rc; if( objc!=5 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB-HANDLE NAME CMP-PROC DEL-PROC"); @@ -1731,7 +1778,15 @@ Tcl_IncrRefCount(p->pCmp); Tcl_IncrRefCount(p->pDel); - sqlite3_create_collation_v2(db, Tcl_GetString(objv[2]), SQLITE_UTF8, + rc = sqlite3_create_collation_v2(db, Tcl_GetString(objv[2]), 16, + (void *)p, testCreateCollationCmp, testCreateCollationDel + ); + if( rc!=SQLITE_MISUSE ){ + Tcl_AppendResult(interp, "sqlite3_create_collate_v2() failed to detect " + "an invalid encoding", (char*)0); + return TCL_ERROR; + } + rc = sqlite3_create_collation_v2(db, Tcl_GetString(objv[2]), SQLITE_UTF8, (void *)p, testCreateCollationCmp, testCreateCollationDel ); return TCL_OK; @@ -1951,73 +2006,152 @@ } /* -** Usage: sqlite3_reset STMT +** Usage: sqlite3_stmt_status STMT CODE RESETFLAG ** -** Reset a statement handle. +** Get the value of a status counter from a statement. */ -static int test_reset( +static int test_stmt_status( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ + int iValue; + int i, op, resetFlag; + const char *zOpName; sqlite3_stmt *pStmt; - int rc; - if( objc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetStringFromObj(objv[0], 0), " ", 0); + static const struct { + const char *zName; + int op; + } aOp[] = { + { "SQLITE_STMTSTATUS_FULLSCAN_STEP", SQLITE_STMTSTATUS_FULLSCAN_STEP }, + { "SQLITE_STMTSTATUS_SORT", SQLITE_STMTSTATUS_SORT }, + }; + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 1, objv, "STMT PARAMETER RESETFLAG"); return TCL_ERROR; } - if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; - - rc = sqlite3_reset(pStmt); - if( pStmt && sqlite3TestErrCode(interp, StmtToDb(pStmt), rc) ){ - return TCL_ERROR; + zOpName = Tcl_GetString(objv[2]); + for(i=0; i=ArraySize(aOp) ){ + if( Tcl_GetIntFromObj(interp, objv[2], &op) ) return TCL_ERROR; } -*/ + if( Tcl_GetBooleanFromObj(interp, objv[3], &resetFlag) ) return TCL_ERROR; + iValue = sqlite3_stmt_status(pStmt, op, resetFlag); + Tcl_SetObjResult(interp, Tcl_NewIntObj(iValue)); return TCL_OK; } /* -** Usage: sqlite3_expired STMT +** Usage: sqlite3_next_stmt DB STMT ** -** Return TRUE if a recompilation of the statement is recommended. +** Return the next statment in sequence after STMT. */ -static int test_expired( +static int test_next_stmt( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ sqlite3_stmt *pStmt; - if( objc!=2 ){ + sqlite3 *db = 0; + char zBuf[50]; + + if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetStringFromObj(objv[0], 0), " ", 0); + Tcl_GetStringFromObj(objv[0], 0), " DB STMT", 0); return TCL_ERROR; } - if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; - Tcl_SetObjResult(interp, Tcl_NewBooleanObj(sqlite3_expired(pStmt))); + + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + if( getStmtPointer(interp, Tcl_GetString(objv[2]), &pStmt) ) return TCL_ERROR; + pStmt = sqlite3_next_stmt(db, pStmt); + if( pStmt ){ + if( sqlite3TestMakePointerStr(interp, zBuf, pStmt) ) return TCL_ERROR; + Tcl_AppendResult(interp, zBuf, 0); + } return TCL_OK; } + /* -** Usage: sqlite3_transfer_bindings FROMSTMT TOSTMT +** Usage: sqlite3_reset STMT ** -** Transfer all bindings from FROMSTMT over to TOSTMT +** Reset a statement handle. */ -static int test_transfer_bind( +static int test_reset( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ + sqlite3_stmt *pStmt; + int rc; + + if( objc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetStringFromObj(objv[0], 0), " ", 0); + return TCL_ERROR; + } + + if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; + + rc = sqlite3_reset(pStmt); + if( pStmt && sqlite3TestErrCode(interp, StmtToDb(pStmt), rc) ){ + return TCL_ERROR; + } + Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); +/* + if( rc ){ + return TCL_ERROR; + } +*/ + return TCL_OK; +} + +/* +** Usage: sqlite3_expired STMT +** +** Return TRUE if a recompilation of the statement is recommended. +*/ +static int test_expired( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ +#ifndef SQLITE_OMIT_DEPRECATED + sqlite3_stmt *pStmt; + if( objc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetStringFromObj(objv[0], 0), " ", 0); + return TCL_ERROR; + } + if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; + Tcl_SetObjResult(interp, Tcl_NewBooleanObj(sqlite3_expired(pStmt))); +#endif + return TCL_OK; +} + +/* +** Usage: sqlite3_transfer_bindings FROMSTMT TOSTMT +** +** Transfer all bindings from FROMSTMT over to TOSTMT +*/ +static int test_transfer_bind( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_stmt *pStmt1, *pStmt2; if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", @@ -2028,6 +2162,7 @@ if( getStmtPointer(interp, Tcl_GetString(objv[2]), &pStmt2)) return TCL_ERROR; Tcl_SetObjResult(interp, Tcl_NewIntObj(sqlite3_transfer_bindings(pStmt1,pStmt2))); +#endif return TCL_OK; } @@ -2174,7 +2309,7 @@ assert(0); } - pVal = sqlite3ValueNew(); + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, nA, zA, encin, SQLITE_STATIC); n = sqlite3_value_bytes(pVal); Tcl_ListObjAppendElement(i,pX, @@ -2209,22 +2344,29 @@ rc = sqlite3_create_collation(db, "test_collate", SQLITE_UTF8, (void *)SQLITE_UTF8, val?test_collate_func:0); if( rc==SQLITE_OK ){ + const void *zUtf16; if( TCL_OK!=Tcl_GetBooleanFromObj(interp, objv[3], &val) ) return TCL_ERROR; rc = sqlite3_create_collation(db, "test_collate", SQLITE_UTF16LE, (void *)SQLITE_UTF16LE, val?test_collate_func:0); if( TCL_OK!=Tcl_GetBooleanFromObj(interp, objv[4], &val) ) return TCL_ERROR; -#ifdef SQLITE_MEMDEBUG +#if 0 if( sqlite3_iMallocFail>0 ){ sqlite3_iMallocFail++; } #endif - pVal = sqlite3ValueNew(); + sqlite3_mutex_enter(db->mutex); + pVal = sqlite3ValueNew(db); sqlite3ValueSetStr(pVal, -1, "test_collate", SQLITE_UTF8, SQLITE_STATIC); - rc = sqlite3_create_collation16(db, - sqlite3ValueText(pVal, SQLITE_UTF16NATIVE), SQLITE_UTF16BE, + zUtf16 = sqlite3ValueText(pVal, SQLITE_UTF16NATIVE); + if( db->mallocFailed ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_create_collation16(db, zUtf16, SQLITE_UTF16BE, (void *)SQLITE_UTF16BE, val?test_collate_func:0); + } sqlite3ValueFree(pVal); + sqlite3_mutex_leave(db->mutex); } if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; @@ -2334,11 +2476,9 @@ sqlite3 *db; if( objc>=2 ){ if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; - sqlite3_create_collation(db, "utf16_unaligned", - SQLITE_UTF16, + sqlite3_create_collation(db, "utf16_unaligned", SQLITE_UTF16, 0, alignmentCollFunc); - sqlite3_create_collation(db, "utf16_aligned", - SQLITE_UTF16 | SQLITE_UTF16_ALIGNED, + sqlite3_create_collation(db, "utf16_aligned", SQLITE_UTF16_ALIGNED, 0, alignmentCollFunc); } return SQLITE_OK; @@ -2388,7 +2528,7 @@ Tcl_EvalObjEx(interp, pX, 0); Tcl_DecrRefCount(pX); sqlite3_result_text(pCtx, Tcl_GetStringResult(interp), -1, SQLITE_TRANSIENT); - pVal = sqlite3ValueNew(); + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, Tcl_GetStringResult(interp), SQLITE_UTF8, SQLITE_STATIC); sqlite3_result_text16be(pCtx, sqlite3_value_text16be(pVal), @@ -2411,7 +2551,7 @@ Tcl_NewStringObj((char*)sqlite3_value_text(argv[0]), -1)); Tcl_EvalObjEx(interp, pX, 0); Tcl_DecrRefCount(pX); - pVal = sqlite3ValueNew(); + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, Tcl_GetStringResult(interp), SQLITE_UTF8, SQLITE_STATIC); sqlite3_result_text(pCtx,(char*)sqlite3_value_text(pVal),-1,SQLITE_TRANSIENT); @@ -2433,9 +2573,13 @@ Tcl_NewStringObj((char*)sqlite3_value_text(argv[0]), -1)); Tcl_EvalObjEx(interp, pX, 0); Tcl_DecrRefCount(pX); - pVal = sqlite3ValueNew(); + pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, Tcl_GetStringResult(interp), SQLITE_UTF8, SQLITE_STATIC); + sqlite3_result_text16(pCtx, sqlite3_value_text16le(pVal), + -1, SQLITE_TRANSIENT); + sqlite3_result_text16be(pCtx, sqlite3_value_text16le(pVal), + -1, SQLITE_TRANSIENT); sqlite3_result_text16le(pCtx, sqlite3_value_text16le(pVal), -1, SQLITE_TRANSIENT); sqlite3ValueFree(pVal); @@ -2547,8 +2691,7 @@ int rc; if( objc!=4 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetStringFromObj(objv[0], 0), " STMT N VALUE", 0); + Tcl_WrongNumArgs(interp, 1, objv, "STMT IDX N"); return TCL_ERROR; } @@ -2658,6 +2801,24 @@ int idx; double value; int rc; + const char *zVal; + int i; + static const struct { + const char *zName; /* Name of the special floating point value */ + unsigned int iUpper; /* Upper 32 bits */ + unsigned int iLower; /* Lower 32 bits */ + } aSpecialFp[] = { + { "NaN", 0x7fffffff, 0xffffffff }, + { "SNaN", 0x7ff7ffff, 0xffffffff }, + { "-NaN", 0xffffffff, 0xffffffff }, + { "-SNaN", 0xfff7ffff, 0xffffffff }, + { "+Inf", 0x7ff00000, 0x00000000 }, + { "-Inf", 0xfff00000, 0x00000000 }, + { "Epsilon", 0x00000000, 0x00000001 }, + { "-Epsilon", 0x80000000, 0x00000001 }, + { "NaN0", 0x7ff80000, 0x00000000 }, + { "-NaN0", 0xfff80000, 0x00000000 }, + }; if( objc!=4 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", @@ -2667,8 +2828,29 @@ if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; if( Tcl_GetIntFromObj(interp, objv[2], &idx) ) return TCL_ERROR; - if( Tcl_GetDoubleFromObj(interp, objv[3], &value) ) return TCL_ERROR; + /* Intercept the string "NaN" and generate a NaN value for it. + ** All other strings are passed through to Tcl_GetDoubleFromObj(). + ** Tcl_GetDoubleFromObj() should understand "NaN" but some versions + ** contain a bug. + */ + zVal = Tcl_GetString(objv[3]); + for(i=0; i=sizeof(aSpecialFp)/sizeof(aSpecialFp[0]) && + Tcl_GetDoubleFromObj(interp, objv[3], &value) ){ + return TCL_ERROR; + } rc = sqlite3_bind_double(pStmt, idx, value); if( sqlite3TestErrCode(interp, StmtToDb(pStmt), rc) ) return TCL_ERROR; if( rc!=SQLITE_OK ){ @@ -2741,7 +2923,7 @@ if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; if( Tcl_GetIntFromObj(interp, objv[2], &idx) ) return TCL_ERROR; - value = Tcl_GetString(objv[3]); + value = (char*)Tcl_GetByteArrayFromObj(objv[3], &bytes); if( Tcl_GetIntFromObj(interp, objv[4], &bytes) ) return TCL_ERROR; rc = sqlite3_bind_text(pStmt, idx, value, bytes, SQLITE_TRANSIENT); @@ -2795,6 +2977,7 @@ rc = sqlite3_bind_text16(pStmt, idx, (void *)value, bytes, xDel); if( sqlite3TestErrCode(interp, StmtToDb(pStmt), rc) ) return TCL_ERROR; if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, sqlite3TestErrorName(rc), 0); return TCL_ERROR; } @@ -2968,6 +3151,33 @@ } /* +** Usage: sqlite3_extended_errcode DB +** +** Return the string representation of the most recent sqlite3_* API +** error code. e.g. "SQLITE_ERROR". +*/ +static int test_ex_errcode( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + int rc; + + if( objc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetString(objv[0]), " DB", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + rc = sqlite3_extended_errcode(db); + Tcl_AppendResult(interp, (char *)t1ErrorName(rc), 0); + return TCL_OK; +} + + +/* ** Usage: sqlite3_errcode DB ** ** Return the string representation of the most recent sqlite3_* API @@ -2981,7 +3191,6 @@ ){ sqlite3 *db; int rc; - char zBuf[30]; if( objc!=2 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", @@ -2990,17 +3199,12 @@ } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; rc = sqlite3_errcode(db); - if( (rc&0xff)==rc ){ - zBuf[0] = 0; - }else{ - sprintf(zBuf,"+%d", rc>>8); - } - Tcl_AppendResult(interp, (char *)t1ErrorName(rc), zBuf, 0); + Tcl_AppendResult(interp, (char *)t1ErrorName(rc), 0); return TCL_OK; } /* -** Usage: test_errmsg DB +** Usage: sqlite3_errmsg DB ** ** Returns the UTF-8 representation of the error message string for the ** most recent sqlite3_* API call. @@ -3043,6 +3247,7 @@ #ifndef SQLITE_OMIT_UTF16 sqlite3 *db; const void *zErr; + const char *z; int bytes = 0; if( objc!=2 ){ @@ -3054,7 +3259,8 @@ zErr = sqlite3_errmsg16(db); if( zErr ){ - bytes = sqlite3Utf16ByteLen(zErr, -1); + z = zErr; + for(bytes=0; z[bytes] || z[bytes+1]; bytes+=2){} } Tcl_SetObjResult(interp, Tcl_NewByteArrayObj(zErr, bytes)); #endif /* SQLITE_OMIT_UTF16 */ @@ -3062,7 +3268,7 @@ } /* -** Usage: sqlite3_prepare DB sql bytes tailvar +** Usage: sqlite3_prepare DB sql bytes ?tailvar? ** ** Compile up to bytes of the supplied SQL string using ** database handle . The parameter is the name of a global @@ -3083,21 +3289,24 @@ char zBuf[50]; int rc; - if( objc!=5 ){ + if( objc!=5 && objc!=4 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); + Tcl_GetString(objv[0]), " DB sql bytes ?tailvar?", 0); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zSql = Tcl_GetString(objv[2]); if( Tcl_GetIntFromObj(interp, objv[3], &bytes) ) return TCL_ERROR; - rc = sqlite3_prepare(db, zSql, bytes, &pStmt, &zTail); + rc = sqlite3_prepare(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; - if( zTail ){ + if( zTail && objc>=5 ){ if( bytes>=0 ){ bytes = bytes - (zTail-zSql); } + if( strlen(zTail) bytes of the supplied SQL string using ** database handle . The parameter is the name of a global @@ -3136,7 +3345,7 @@ char zBuf[50]; int rc; - if( objc!=5 ){ + if( objc!=5 && objc!=4 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); return TCL_ERROR; @@ -3145,10 +3354,10 @@ zSql = Tcl_GetString(objv[2]); if( Tcl_GetIntFromObj(interp, objv[3], &bytes) ) return TCL_ERROR; - rc = sqlite3_prepare_v2(db, zSql, bytes, &pStmt, &zTail); + rc = sqlite3_prepare_v2(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); assert(rc==SQLITE_OK || pStmt==0); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; - if( zTail ){ + if( zTail && objc>=5 ){ if( bytes>=0 ){ bytes = bytes - (zTail-zSql); } @@ -3169,6 +3378,47 @@ } /* +** Usage: sqlite3_prepare_tkt3134 DB +** +** Generate a prepared statement for a zero-byte string as a test +** for ticket #3134. The string should be preceeded by a zero byte. +*/ +static int test_prepare_tkt3134( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + static const char zSql[] = "\000SELECT 1"; + sqlite3_stmt *pStmt = 0; + char zBuf[50]; + int rc; + + if( objc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + rc = sqlite3_prepare_v2(db, &zSql[1], 0, &pStmt, 0); + assert(rc==SQLITE_OK || pStmt==0); + if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; + if( rc!=SQLITE_OK ){ + assert( pStmt==0 ); + sprintf(zBuf, "(%d) ", rc); + Tcl_AppendResult(interp, zBuf, sqlite3_errmsg(db), 0); + return TCL_ERROR; + } + + if( pStmt ){ + if( sqlite3TestMakePointerStr(interp, zBuf, pStmt) ) return TCL_ERROR; + Tcl_AppendResult(interp, zBuf, 0); + } + return TCL_OK; +} + +/* ** Usage: sqlite3_prepare16 DB sql bytes tailvar ** ** Compile up to bytes of the supplied SQL string using @@ -3193,30 +3443,32 @@ int bytes; /* The integer specified as arg 3 */ int objlen; /* The byte-array length of arg 2 */ - if( objc!=5 ){ + if( objc!=5 && objc!=4 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); + Tcl_GetString(objv[0]), " DB sql bytes ?tailvar?", 0); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zSql = Tcl_GetByteArrayFromObj(objv[2], &objlen); if( Tcl_GetIntFromObj(interp, objv[3], &bytes) ) return TCL_ERROR; - rc = sqlite3_prepare16(db, zSql, bytes, &pStmt, &zTail); + rc = sqlite3_prepare16(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; if( rc ){ return TCL_ERROR; } - if( zTail ){ - objlen = objlen - ((u8 *)zTail-(u8 *)zSql); - }else{ - objlen = 0; + if( objc>=5 ){ + if( zTail ){ + objlen = objlen - ((u8 *)zTail-(u8 *)zSql); + }else{ + objlen = 0; + } + pTail = Tcl_NewByteArrayObj((u8 *)zTail, objlen); + Tcl_IncrRefCount(pTail); + Tcl_ObjSetVar2(interp, objv[4], 0, pTail, 0); + Tcl_DecrRefCount(pTail); } - pTail = Tcl_NewByteArrayObj((u8 *)zTail, objlen); - Tcl_IncrRefCount(pTail); - Tcl_ObjSetVar2(interp, objv[4], 0, pTail, 0); - Tcl_DecrRefCount(pTail); if( pStmt ){ if( sqlite3TestMakePointerStr(interp, zBuf, pStmt) ) return TCL_ERROR; @@ -3227,7 +3479,7 @@ } /* -** Usage: sqlite3_prepare16_v2 DB sql bytes tailvar +** Usage: sqlite3_prepare16_v2 DB sql bytes ?tailvar? ** ** Compile up to bytes of the supplied SQL string using ** database handle . The parameter is the name of a global @@ -3251,30 +3503,32 @@ int bytes; /* The integer specified as arg 3 */ int objlen; /* The byte-array length of arg 2 */ - if( objc!=5 ){ + if( objc!=5 && objc!=4 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); + Tcl_GetString(objv[0]), " DB sql bytes ?tailvar?", 0); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zSql = Tcl_GetByteArrayFromObj(objv[2], &objlen); if( Tcl_GetIntFromObj(interp, objv[3], &bytes) ) return TCL_ERROR; - rc = sqlite3_prepare16_v2(db, zSql, bytes, &pStmt, &zTail); + rc = sqlite3_prepare16_v2(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; if( rc ){ return TCL_ERROR; } - if( zTail ){ - objlen = objlen - ((u8 *)zTail-(u8 *)zSql); - }else{ - objlen = 0; + if( objc>=5 ){ + if( zTail ){ + objlen = objlen - ((u8 *)zTail-(u8 *)zSql); + }else{ + objlen = 0; + } + pTail = Tcl_NewByteArrayObj((u8 *)zTail, objlen); + Tcl_IncrRefCount(pTail); + Tcl_ObjSetVar2(interp, objv[4], 0, pTail, 0); + Tcl_DecrRefCount(pTail); } - pTail = Tcl_NewByteArrayObj((u8 *)zTail, objlen); - Tcl_IncrRefCount(pTail); - Tcl_ObjSetVar2(interp, objv[4], 0, pTail, 0); - Tcl_DecrRefCount(pTail); if( pStmt ){ if( sqlite3TestMakePointerStr(interp, zBuf, pStmt) ) return TCL_ERROR; @@ -3298,13 +3552,13 @@ int rc; char zBuf[100]; - if( objc!=3 && objc!=2 ){ + if( objc!=3 && objc!=2 && objc!=1 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", Tcl_GetString(objv[0]), " filename options-list", 0); return TCL_ERROR; } - zFilename = Tcl_GetString(objv[1]); + zFilename = objc>1 ? Tcl_GetString(objv[1]) : 0; rc = sqlite3_open(zFilename, &db); if( sqlite3TestMakePointerStr(interp, zBuf, db) ) return TCL_ERROR; @@ -3396,6 +3650,24 @@ return TCL_OK; } +static int test_sql( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3_stmt *pStmt; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "STMT"); + return TCL_ERROR; + } + + if( getStmtPointer(interp, Tcl_GetString(objv[1]), &pStmt) ) return TCL_ERROR; + Tcl_SetResult(interp, (char *)sqlite3_sql(pStmt), TCL_VOLATILE); + return TCL_OK; +} + /* ** Usage: sqlite3_column_count STMT ** @@ -3598,9 +3870,10 @@ ){ sqlite3_stmt *pStmt; int col; - const char *(*xFunc)(sqlite3_stmt*, int) = clientData; + const char *(*xFunc)(sqlite3_stmt*, int); const char *zRet; + xFunc = (const char *(*)(sqlite3_stmt*, int))clientData; if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", Tcl_GetString(objv[0]), " STMT column", 0); @@ -3623,6 +3896,7 @@ Tcl_Obj *CONST objv[] ){ #ifndef SQLITE_OMIT_GLOBALRECOVER +#ifndef SQLITE_OMIT_DEPRECATED int rc; if( objc!=1 ){ Tcl_WrongNumArgs(interp, 1, objv, ""); @@ -3631,6 +3905,7 @@ rc = sqlite3_global_recover(); Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); #endif +#endif return TCL_OK; } @@ -3652,8 +3927,9 @@ int col; Tcl_Obj *pRet; const void *zName16; - const void *(*xFunc)(sqlite3_stmt*, int) = clientData; + const void *(*xFunc)(sqlite3_stmt*, int); + xFunc = (const void *(*)(sqlite3_stmt*, int))clientData; if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", Tcl_GetString(objv[0]), " STMT column", 0); @@ -3665,7 +3941,10 @@ zName16 = xFunc(pStmt, col); if( zName16 ){ - pRet = Tcl_NewByteArrayObj(zName16, sqlite3Utf16ByteLen(zName16, -1)+2); + int n; + const char *z = zName16; + for(n=0; z[n] || z[n+1]; n+=2){} + pRet = Tcl_NewByteArrayObj(zName16, n+2); Tcl_SetObjResult(interp, pRet); } #endif /* SQLITE_OMIT_UTF16 */ @@ -3689,8 +3968,9 @@ ){ sqlite3_stmt *pStmt; int col; - int (*xFunc)(sqlite3_stmt*, int) = clientData; + int (*xFunc)(sqlite3_stmt*, int); + xFunc = (int (*)(sqlite3_stmt*, int))clientData; if( objc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", Tcl_GetString(objv[0]), " STMT column", 0); @@ -3704,228 +3984,69 @@ return TCL_OK; } -#ifndef SQLITE_OMIT_DISKIO /* -** Usage: sqlite3OsOpenReadWrite +** Usage: sqlite_set_magic DB MAGIC-NUMBER +** +** Set the db->magic value. This is used to test error recovery logic. */ -static int test_sqlite3OsOpenReadWrite( +static int sqlite_set_magic( void * clientData, Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] + int argc, + char **argv ){ - OsFile *pFile; - int rc; - int dummy; - char zBuf[100]; - - if( objc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " filename", 0); + sqlite3 *db; + if( argc!=3 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " DB MAGIC", 0); return TCL_ERROR; } - - rc = sqlite3OsOpenReadWrite(Tcl_GetString(objv[1]), &pFile, &dummy); - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); + if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; + if( strcmp(argv[2], "SQLITE_MAGIC_OPEN")==0 ){ + db->magic = SQLITE_MAGIC_OPEN; + }else if( strcmp(argv[2], "SQLITE_MAGIC_CLOSED")==0 ){ + db->magic = SQLITE_MAGIC_CLOSED; + }else if( strcmp(argv[2], "SQLITE_MAGIC_BUSY")==0 ){ + db->magic = SQLITE_MAGIC_BUSY; + }else if( strcmp(argv[2], "SQLITE_MAGIC_ERROR")==0 ){ + db->magic = SQLITE_MAGIC_ERROR; + }else if( Tcl_GetInt(interp, argv[2], (int*)&db->magic) ){ return TCL_ERROR; } - sqlite3TestMakePointerStr(interp, zBuf, pFile); - Tcl_SetResult(interp, zBuf, 0); - return TCL_ERROR; + return TCL_OK; } /* -** Usage: sqlite3OsClose +** Usage: sqlite3_interrupt DB +** +** Trigger an interrupt on DB */ -static int test_sqlite3OsClose( +static int test_interrupt( void * clientData, Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] + int argc, + char **argv ){ - OsFile *pFile; - int rc; - - if( objc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " filehandle", 0); - return TCL_ERROR; - } - - if( getFilePointer(interp, Tcl_GetString(objv[1]), &pFile) ){ - return TCL_ERROR; - } - rc = sqlite3OsClose(&pFile); - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); + sqlite3 *db; + if( argc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " DB", 0); return TCL_ERROR; } + if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; + sqlite3_interrupt(db); return TCL_OK; } +static u8 *sqlite3_stack_baseline = 0; + /* -** Usage: sqlite3OsLock +** Fill the stack with a known bitpattern. */ -static int test_sqlite3OsLock( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - OsFile * pFile; - int rc; - - if( objc!=3 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), - " filehandle (SHARED|RESERVED|PENDING|EXCLUSIVE)", 0); - return TCL_ERROR; - } - - if( getFilePointer(interp, Tcl_GetString(objv[1]), &pFile) ){ - return TCL_ERROR; - } - - if( 0==strcmp("SHARED", Tcl_GetString(objv[2])) ){ - rc = sqlite3OsLock(pFile, SHARED_LOCK); - } - else if( 0==strcmp("RESERVED", Tcl_GetString(objv[2])) ){ - rc = sqlite3OsLock(pFile, RESERVED_LOCK); - } - else if( 0==strcmp("PENDING", Tcl_GetString(objv[2])) ){ - rc = sqlite3OsLock(pFile, PENDING_LOCK); - } - else if( 0==strcmp("EXCLUSIVE", Tcl_GetString(objv[2])) ){ - rc = sqlite3OsLock(pFile, EXCLUSIVE_LOCK); - }else{ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), - " filehandle (SHARED|RESERVED|PENDING|EXCLUSIVE)", 0); - return TCL_ERROR; - } - - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); - return TCL_ERROR; - } - return TCL_OK; -} - -/* -** Usage: sqlite3OsUnlock -*/ -static int test_sqlite3OsUnlock( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - OsFile * pFile; - int rc; - - if( objc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", - Tcl_GetString(objv[0]), " filehandle", 0); - return TCL_ERROR; - } - - if( getFilePointer(interp, Tcl_GetString(objv[1]), &pFile) ){ - return TCL_ERROR; - } - rc = sqlite3OsUnlock(pFile, NO_LOCK); - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); - return TCL_ERROR; - } - return TCL_OK; -} - -/* -** Usage: sqlite3OsTempFileName -*/ -static int test_sqlite3OsTempFileName( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - char zFile[SQLITE_TEMPNAME_SIZE]; - int rc; - - rc = sqlite3OsTempFileName(zFile); - if( rc!=SQLITE_OK ){ - Tcl_SetResult(interp, (char *)t1ErrorName(rc), TCL_STATIC); - return TCL_ERROR; - } - Tcl_AppendResult(interp, zFile, 0); - return TCL_OK; -} -#endif - -/* -** Usage: sqlite_set_magic DB MAGIC-NUMBER -** -** Set the db->magic value. This is used to test error recovery logic. -*/ -static int sqlite_set_magic( - void * clientData, - Tcl_Interp *interp, - int argc, - char **argv -){ - sqlite3 *db; - if( argc!=3 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], - " DB MAGIC", 0); - return TCL_ERROR; - } - if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; - if( strcmp(argv[2], "SQLITE_MAGIC_OPEN")==0 ){ - db->magic = SQLITE_MAGIC_OPEN; - }else if( strcmp(argv[2], "SQLITE_MAGIC_CLOSED")==0 ){ - db->magic = SQLITE_MAGIC_CLOSED; - }else if( strcmp(argv[2], "SQLITE_MAGIC_BUSY")==0 ){ - db->magic = SQLITE_MAGIC_BUSY; - }else if( strcmp(argv[2], "SQLITE_MAGIC_ERROR")==0 ){ - db->magic = SQLITE_MAGIC_ERROR; - }else if( Tcl_GetInt(interp, argv[2], &db->magic) ){ - return TCL_ERROR; - } - return TCL_OK; -} - -/* -** Usage: sqlite3_interrupt DB -** -** Trigger an interrupt on DB -*/ -static int test_interrupt( - void * clientData, - Tcl_Interp *interp, - int argc, - char **argv -){ - sqlite3 *db; - if( argc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " DB", 0); - return TCL_ERROR; - } - if( getDbPointer(interp, argv[1], &db) ) return TCL_ERROR; - sqlite3_interrupt(db); - return TCL_OK; -} - -static u8 *sqlite3_stack_baseline = 0; - -/* -** Fill the stack with a known bitpattern. -*/ -static void prepStack(void){ - int i; - u32 bigBuf[65536]; - for(i=0; inSoftHeapLimit; + amt = softHeapLimit; if( objc==2 ){ int N; if( Tcl_GetIntFromObj(interp, objv[1], &N) ) return TCL_ERROR; sqlite3_soft_heap_limit(N); + softHeapLimit = N; } Tcl_SetObjResult(interp, Tcl_NewIntObj(amt)); -#endif - return TCL_OK; -} - -/* -** Usage: sqlite3_clear_tsd_memdebug -** -** Clear all of the MEMDEBUG information out of thread-specific data. -** This will allow it to be deallocated. -*/ -static int test_clear_tsd_memdebug( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - return TCL_OK; -} - -/* -** Usage: sqlite3_tsd_release -** -** Call sqlite3ReleaseThreadData. -*/ -static int test_tsd_release( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ -#if defined(SQLITE_MEMDEBUG) - sqlite3ReleaseThreadData(); -#endif return TCL_OK; } @@ -4195,11 +4284,12 @@ int objc, Tcl_Obj *CONST objv[] ){ +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_thread_cleanup(); +#endif return TCL_OK; } - /* ** Usage: sqlite3_pager_refcounts DB ** @@ -4228,8 +4318,10 @@ if( db->aDb[i].pBt==0 ){ v = -1; }else{ + sqlite3_mutex_enter(db->mutex); a = sqlite3PagerStats(sqlite3BtreePager(db->aDb[i].pBt)); v = a[0]; + sqlite3_mutex_leave(db->mutex); } Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(v)); } @@ -4268,6 +4360,510 @@ /* +** tclcmd: vfs_unlink_test +** +** This TCL command unregisters the primary VFS and then registers +** it back again. This is used to test the ability to register a +** VFS when none are previously registered, and the ability to +** unregister the only available VFS. Ticket #2738 +*/ +static int vfs_unlink_test( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + int i; + sqlite3_vfs *pMain; + sqlite3_vfs *apVfs[20]; + sqlite3_vfs one, two; + + sqlite3_vfs_unregister(0); /* Unregister of NULL is harmless */ + one.zName = "__one"; + two.zName = "__two"; + + /* Calling sqlite3_vfs_register with 2nd argument of 0 does not + ** change the default VFS + */ + pMain = sqlite3_vfs_find(0); + sqlite3_vfs_register(&one, 0); + assert( pMain==0 || pMain==sqlite3_vfs_find(0) ); + sqlite3_vfs_register(&two, 0); + assert( pMain==0 || pMain==sqlite3_vfs_find(0) ); + + /* We can find a VFS by its name */ + assert( sqlite3_vfs_find("__one")==&one ); + assert( sqlite3_vfs_find("__two")==&two ); + + /* Calling sqlite_vfs_register with non-zero second parameter changes the + ** default VFS, even if the 1st parameter is an existig VFS that is + ** previously registered as the non-default. + */ + sqlite3_vfs_register(&one, 1); + assert( sqlite3_vfs_find("__one")==&one ); + assert( sqlite3_vfs_find("__two")==&two ); + assert( sqlite3_vfs_find(0)==&one ); + sqlite3_vfs_register(&two, 1); + assert( sqlite3_vfs_find("__one")==&one ); + assert( sqlite3_vfs_find("__two")==&two ); + assert( sqlite3_vfs_find(0)==&two ); + if( pMain ){ + sqlite3_vfs_register(pMain, 1); + assert( sqlite3_vfs_find("__one")==&one ); + assert( sqlite3_vfs_find("__two")==&two ); + assert( sqlite3_vfs_find(0)==pMain ); + } + + /* Unlink the default VFS. Repeat until there are no more VFSes + ** registered. + */ + for(i=0; izName) ); + sqlite3_vfs_unregister(apVfs[i]); + assert( 0==sqlite3_vfs_find(apVfs[i]->zName) ); + } + } + assert( 0==sqlite3_vfs_find(0) ); + + /* Register the main VFS as non-default (will be made default, since + ** it'll be the only one in existence). + */ + sqlite3_vfs_register(pMain, 0); + assert( sqlite3_vfs_find(0)==pMain ); + + /* Un-register the main VFS again to restore an empty VFS list */ + sqlite3_vfs_unregister(pMain); + assert( 0==sqlite3_vfs_find(0) ); + + /* Relink all VFSes in reverse order. */ + for(i=sizeof(apVfs)/sizeof(apVfs[0])-1; i>=0; i--){ + if( apVfs[i] ){ + sqlite3_vfs_register(apVfs[i], 1); + assert( apVfs[i]==sqlite3_vfs_find(0) ); + assert( apVfs[i]==sqlite3_vfs_find(apVfs[i]->zName) ); + } + } + + /* Unregister out sample VFSes. */ + sqlite3_vfs_unregister(&one); + sqlite3_vfs_unregister(&two); + + /* Unregistering a VFS that is not currently registered is harmless */ + sqlite3_vfs_unregister(&one); + sqlite3_vfs_unregister(&two); + assert( sqlite3_vfs_find("__one")==0 ); + assert( sqlite3_vfs_find("__two")==0 ); + + /* We should be left with the original default VFS back as the + ** original */ + assert( sqlite3_vfs_find(0)==pMain ); + + return TCL_OK; +} + +/* +** tclcmd: vfs_initfail_test +** +** This TCL command attempts to vfs_find and vfs_register when the +** sqlite3_initialize() interface is failing. All calls should fail. +*/ +static int vfs_initfail_test( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3_vfs one; + one.zName = "__one"; + + if( sqlite3_vfs_find(0) ) return TCL_ERROR; + sqlite3_vfs_register(&one, 0); + if( sqlite3_vfs_find(0) ) return TCL_ERROR; + sqlite3_vfs_register(&one, 1); + if( sqlite3_vfs_find(0) ) return TCL_ERROR; + return TCL_OK; +} + +/* +** Saved VFSes +*/ +static sqlite3_vfs *apVfs[20]; +static int nVfs = 0; + +/* +** tclcmd: vfs_unregister_all +** +** Unregister all VFSes. +*/ +static int vfs_unregister_all( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + int i; + for(i=0; ipNext){ + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(pVfs->zName, -1)); + } + Tcl_SetObjResult(interp, pRet); + return TCL_OK; +} + +/* +** tclcmd: sqlite3_limit DB ID VALUE +** +** This TCL command runs the sqlite3_limit interface and +** verifies correct operation of the same. +*/ +static int test_limit( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3 *db; + int rc; + static const struct { + char *zName; + int id; + } aId[] = { + { "SQLITE_LIMIT_LENGTH", SQLITE_LIMIT_LENGTH }, + { "SQLITE_LIMIT_SQL_LENGTH", SQLITE_LIMIT_SQL_LENGTH }, + { "SQLITE_LIMIT_COLUMN", SQLITE_LIMIT_COLUMN }, + { "SQLITE_LIMIT_EXPR_DEPTH", SQLITE_LIMIT_EXPR_DEPTH }, + { "SQLITE_LIMIT_COMPOUND_SELECT", SQLITE_LIMIT_COMPOUND_SELECT }, + { "SQLITE_LIMIT_VDBE_OP", SQLITE_LIMIT_VDBE_OP }, + { "SQLITE_LIMIT_FUNCTION_ARG", SQLITE_LIMIT_FUNCTION_ARG }, + { "SQLITE_LIMIT_ATTACHED", SQLITE_LIMIT_ATTACHED }, + { "SQLITE_LIMIT_LIKE_PATTERN_LENGTH", SQLITE_LIMIT_LIKE_PATTERN_LENGTH }, + { "SQLITE_LIMIT_VARIABLE_NUMBER", SQLITE_LIMIT_VARIABLE_NUMBER }, + + /* Out of range test cases */ + { "SQLITE_LIMIT_TOOSMALL", -1, }, + { "SQLITE_LIMIT_TOOBIG", SQLITE_LIMIT_VARIABLE_NUMBER+1 }, + }; + int i, id; + int val; + const char *zId; + + if( objc!=4 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetStringFromObj(objv[0], 0), " DB ID VALUE", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + zId = Tcl_GetString(objv[2]); + for(i=0; i=sizeof(aId)/sizeof(aId[0]) ){ + Tcl_AppendResult(interp, "unknown limit type: ", zId, (char*)0); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, objv[3], &val) ) return TCL_ERROR; + rc = sqlite3_limit(db, id, val); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); + return TCL_OK; +} + +/* +** tclcmd: save_prng_state +** +** Save the state of the pseudo-random number generator. +** At the same time, verify that sqlite3_test_control works even when +** called with an out-of-range opcode. +*/ +static int save_prng_state( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + int rc = sqlite3_test_control(9999); + assert( rc==0 ); + rc = sqlite3_test_control(-1); + assert( rc==0 ); + sqlite3_test_control(SQLITE_TESTCTRL_PRNG_SAVE); + return TCL_OK; +} +/* +** tclcmd: restore_prng_state +*/ +static int restore_prng_state( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3_test_control(SQLITE_TESTCTRL_PRNG_RESTORE); + return TCL_OK; +} +/* +** tclcmd: reset_prng_state +*/ +static int reset_prng_state( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + sqlite3_test_control(SQLITE_TESTCTRL_PRNG_RESET); + return TCL_OK; +} + +/* +** tclcmd: pcache_stats +*/ +static int test_pcache_stats( + ClientData clientData, /* Pointer to sqlite3_enable_XXX function */ + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int objc, /* Number of arguments */ + Tcl_Obj *CONST objv[] /* Command arguments */ +){ + int nMin; + int nMax; + int nCurrent; + int nRecyclable; + Tcl_Obj *pRet; + + sqlite3PcacheStats(&nCurrent, &nMax, &nMin, &nRecyclable); + + pRet = Tcl_NewObj(); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj("current", -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(nCurrent)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj("max", -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(nMax)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj("min", -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(nMin)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj("recyclable", -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(nRecyclable)); + + Tcl_SetObjResult(interp, pRet); + + return TCL_OK; +} + +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY +static void test_unlock_notify_cb(void **aArg, int nArg){ + int ii; + for(ii=0; ii #include +#include /* ** Interpret an SQLite error number @@ -68,6 +67,7 @@ int argc, /* Number of arguments */ const char **argv /* Text of each argument */ ){ + u16 pageSize; Pager *pPager; int nPage; int rc; @@ -78,13 +78,15 @@ return TCL_ERROR; } if( Tcl_GetInt(interp, argv[2], &nPage) ) return TCL_ERROR; - rc = sqlite3PagerOpen(&pPager, argv[1], 0, 0); + rc = sqlite3PagerOpen(sqlite3_vfs_find(0), &pPager, argv[1], 0, 0, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_MAIN_DB); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; } sqlite3PagerSetCachesize(pPager, nPage); - sqlite3PagerSetPagesize(pPager, test_pagesize); + pageSize = test_pagesize; + sqlite3PagerSetPagesize(pPager, &pageSize, -1); sqlite3_snprintf(sizeof(zBuf),zBuf,"%p",pPager); Tcl_AppendResult(interp, zBuf, 0); return TCL_OK; @@ -108,7 +110,7 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); rc = sqlite3PagerClose(pPager); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); @@ -135,7 +137,7 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); rc = sqlite3PagerRollback(pPager); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); @@ -162,7 +164,7 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); rc = sqlite3PagerCommitPhaseOne(pPager, 0, 0); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); @@ -194,8 +196,8 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); - rc = sqlite3PagerStmtBegin(pPager); + pPager = sqlite3TestTextToPtr(argv[1]); + rc = sqlite3PagerOpenSavepoint(pPager, 1); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -221,8 +223,9 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); - rc = sqlite3PagerStmtRollback(pPager); + pPager = sqlite3TestTextToPtr(argv[1]); + rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, 0); + sqlite3PagerSavepoint(pPager, SAVEPOINT_RELEASE, 0); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -248,8 +251,8 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); - rc = sqlite3PagerStmtCommit(pPager); + pPager = sqlite3TestTextToPtr(argv[1]); + rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_RELEASE, 0); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -275,7 +278,7 @@ " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); a = sqlite3PagerStats(pPager); for(i=0; i<9; i++){ static char *zName[] = { @@ -303,13 +306,15 @@ ){ Pager *pPager; char zBuf[100]; + int nPage; if( argc!=2 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " ID\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); - sqlite3_snprintf(sizeof(zBuf),zBuf,"%d",sqlite3PagerPagecount(pPager)); + pPager = sqlite3TestTextToPtr(argv[1]); + sqlite3PagerPagecount(pPager, &nPage); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%d", nPage); Tcl_AppendResult(interp, zBuf, 0); return TCL_OK; } @@ -335,7 +340,7 @@ " ID PGNO\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &pgno) ) return TCL_ERROR; rc = sqlite3PagerGet(pPager, pgno, &pPage); if( rc!=SQLITE_OK ){ @@ -368,7 +373,7 @@ " ID PGNO\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &pgno) ) return TCL_ERROR; pPage = sqlite3PagerLookup(pPager, pgno); if( pPage ){ @@ -388,20 +393,15 @@ const char **argv /* Text of each argument */ ){ Pager *pPager; - int rc; int pgno; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " ID PGNO\"", 0); return TCL_ERROR; } - pPager = sqlite3TextToPtr(argv[1]); + pPager = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &pgno) ) return TCL_ERROR; - rc = sqlite3PagerTruncate(pPager, pgno); - if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, errorName(rc), 0); - return TCL_ERROR; - } + sqlite3PagerTruncateImage(pPager, pgno); return TCL_OK; } @@ -418,18 +418,13 @@ const char **argv /* Text of each argument */ ){ DbPage *pPage; - int rc; if( argc!=2 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " PAGE\"", 0); return TCL_ERROR; } - pPage = (DbPage *)sqlite3TextToPtr(argv[1]); - rc = sqlite3PagerUnref(pPage); - if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, errorName(rc), 0); - return TCL_ERROR; - } + pPage = (DbPage *)sqlite3TestTextToPtr(argv[1]); + sqlite3PagerUnref(pPage); return TCL_OK; } @@ -451,7 +446,7 @@ " PAGE\"", 0); return TCL_ERROR; } - pPage = sqlite3TextToPtr(argv[1]); + pPage = sqlite3TestTextToPtr(argv[1]); memcpy(zBuf, sqlite3PagerGetData(pPage), sizeof(zBuf)); Tcl_AppendResult(interp, zBuf, 0); return TCL_OK; @@ -475,7 +470,7 @@ " PAGE\"", 0); return TCL_ERROR; } - pPage = (DbPage *)sqlite3TextToPtr(argv[1]); + pPage = (DbPage *)sqlite3TestTextToPtr(argv[1]); sqlite3_snprintf(sizeof(zBuf), zBuf, "%d", sqlite3PagerPagenumber(pPage)); Tcl_AppendResult(interp, zBuf, 0); return TCL_OK; @@ -500,7 +495,7 @@ " PAGE DATA\"", 0); return TCL_ERROR; } - pPage = (DbPage *)sqlite3TextToPtr(argv[1]); + pPage = (DbPage *)sqlite3TestTextToPtr(argv[1]); rc = sqlite3PagerWrite(pPage); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); @@ -528,31 +523,30 @@ int argc, /* Number of arguments */ const char **argv /* Text of each argument */ ){ + sqlite3_vfs *pVfs; + sqlite3_file *fd = 0; int rc; int n; i64 offset; - OsFile *fd = 0; - int readOnly = 0; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N-MEGABYTES FILE\"", 0); return TCL_ERROR; } if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; - rc = sqlite3OsOpenReadWrite(argv[2], &fd, &readOnly); + + pVfs = sqlite3_vfs_find(0); + rc = sqlite3OsOpenMalloc(pVfs, argv[2], &fd, + (SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB), 0 + ); if( rc ){ Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0); return TCL_ERROR; } offset = n; offset *= 1024*1024; - rc = sqlite3OsSeek(fd, offset); - if( rc ){ - Tcl_AppendResult(interp, "seek failed: ", errorName(rc), 0); - return TCL_ERROR; - } - rc = sqlite3OsWrite(fd, "Hello, World!", 14); - sqlite3OsClose(&fd); + rc = sqlite3OsWrite(fd, "Hello, World!", 14, offset); + sqlite3OsCloseFree(fd); if( rc ){ Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0); return TCL_ERROR; @@ -561,6 +555,64 @@ } #endif + +/* +** test_control_pending_byte PENDING_BYTE +** +** Set the PENDING_BYTE using the sqlite3_test_control() interface. +*/ +static int testPendingByte( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + const char **argv /* Text of each argument */ +){ + int pbyte; + int rc; + if( argc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " PENDING-BYTE\"", (void*)0); + } + if( Tcl_GetInt(interp, argv[1], &pbyte) ) return TCL_ERROR; + rc = sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, pbyte); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); + return TCL_OK; +} + +/* +** sqlite3BitvecBuiltinTest SIZE PROGRAM +** +** Invoke the SQLITE_TESTCTRL_BITVEC_TEST operator on test_control. +** See comments on sqlite3BitvecBuiltinTest() for additional information. +*/ +static int testBitvecBuiltinTest( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + const char **argv /* Text of each argument */ +){ + int sz, rc; + int nProg = 0; + int aProg[100]; + const char *z; + if( argc!=3 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " SIZE PROGRAM\"", (void*)0); + } + if( Tcl_GetInt(interp, argv[1], &sz) ) return TCL_ERROR; + z = argv[2]; + while( nProg<99 && *z ){ + while( *z && !sqlite3Isdigit(*z) ){ z++; } + if( *z==0 ) break; + aProg[nProg++] = atoi(z); + while( sqlite3Isdigit(*z) ){ z++; } + } + aProg[nProg] = 0; + rc = sqlite3_test_control(SQLITE_TESTCTRL_BITVEC_TEST, sz, aProg); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); + return TCL_OK; +} + /* ** Register commands with the TCL interpreter. */ @@ -568,6 +620,7 @@ extern int sqlite3_io_error_persist; extern int sqlite3_io_error_pending; extern int sqlite3_io_error_hit; + extern int sqlite3_io_error_hardhit; extern int sqlite3_diskfull_pending; extern int sqlite3_diskfull; extern int sqlite3_pager_n_sort_bucket; @@ -594,6 +647,8 @@ #ifndef SQLITE_OMIT_DISKIO { "fake_big_file", (Tcl_CmdProc*)fake_big_file }, #endif + { "sqlite3BitvecBuiltinTest",(Tcl_CmdProc*)testBitvecBuiltinTest }, + { "sqlite3_test_control_pending_byte", (Tcl_CmdProc*)testPendingByte }, }; int i; for(i=0; i #include @@ -49,6 +48,13 @@ } /* +** A bogus sqlite3 connection structure for use in the btree +** tests. +*/ +static sqlite3 sDb; +static int nRefSqlite3 = 0; + +/* ** Usage: btree_open FILENAME NCACHE FLAGS ** ** Open a new database @@ -69,7 +75,14 @@ } if( Tcl_GetInt(interp, argv[2], &nCache) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[3], &flags) ) return TCL_ERROR; - rc = sqlite3BtreeOpen(argv[1], 0, &pBt, flags); + nRefSqlite3++; + if( nRefSqlite3==1 ){ + sDb.pVfs = sqlite3_vfs_find(0); + sDb.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); + sqlite3_mutex_enter(sDb.mutex); + } + rc = sqlite3BtreeOpen(argv[1], &sDb, &pBt, flags, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_MAIN_DB); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -98,15 +111,23 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); rc = sqlite3BtreeClose(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; } + nRefSqlite3--; + if( nRefSqlite3==0 ){ + sqlite3_mutex_leave(sDb.mutex); + sqlite3_mutex_free(sDb.mutex); + sDb.mutex = 0; + sDb.pVfs = 0; + } return TCL_OK; } + /* ** Usage: btree_begin_transaction ID ** @@ -125,8 +146,10 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeBeginTrans(pBt, 1); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -152,8 +175,10 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeRollback(pBt); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -179,8 +204,10 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeCommit(pBt); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -206,8 +233,10 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); - rc = sqlite3BtreeBeginStmt(pBt); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); + rc = sqlite3BtreeBeginStmt(pBt, 1); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -233,8 +262,13 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); - rc = sqlite3BtreeRollbackStmt(pBt); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); + rc = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, 0); + } + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -260,8 +294,10 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); - rc = sqlite3BtreeCommitStmt(pBt); + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pBt); + rc = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, 0); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -288,9 +324,11 @@ " ID FLAGS\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &flags) ) return TCL_ERROR; + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeCreateTable(pBt, &iTable, flags); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -320,9 +358,11 @@ " ID TABLENUM\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &iTable) ) return TCL_ERROR; + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeDropTable(pBt, iTable, ¬Used1); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -349,9 +389,11 @@ " ID TABLENUM\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &iTable) ) return TCL_ERROR; - rc = sqlite3BtreeClearTable(pBt, iTable); + sqlite3BtreeEnter(pBt); + rc = sqlite3BtreeClearTable(pBt, iTable, 0); + sqlite3BtreeLeave(pBt); if( rc!=SQLITE_OK ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -378,11 +420,13 @@ " ID\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); for(i=0; idb->mutex); + + sqlite3BtreeEnter(pBt); a = sqlite3PagerStats(sqlite3BtreePager(pBt)); for(i=0; i<11; i++){ static char *zName[] = { @@ -522,31 +519,10 @@ sqlite3_snprintf(sizeof(zBuf), zBuf,"%d",a[i]); Tcl_AppendElement(interp, zBuf); } - return TCL_OK; -} + sqlite3BtreeLeave(pBt); -/* -** Usage: btree_pager_ref_dump ID -** -** Print out all outstanding pages. -*/ -static int btree_pager_ref_dump( - void *NotUsed, - Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ - int argc, /* Number of arguments */ - const char **argv /* Text of each argument */ -){ - Btree *pBt; - - if( argc!=2 ){ - Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], - " ID\"", 0); - return TCL_ERROR; - } - pBt = sqlite3TextToPtr(argv[1]); -#ifdef SQLITE_DEBUG - sqlite3PagerRefdump(sqlite3BtreePager(pBt)); -#endif + /* Release the mutex on the SQLite handle that controls this b-tree */ + sqlite3_mutex_leave(pBt->db->mutex); return TCL_OK; } @@ -575,21 +551,23 @@ " ID ROOT ...\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); nRoot = argc-2; - aRoot = (int*)malloc( sizeof(int)*(argc-2) ); + aRoot = (int*)sqlite3_malloc( sizeof(int)*(argc-2) ); for(i=0; ipBtree; + sqlite3BtreeEnter(pBt); rc = sqlite3BtreeCloseCursor(pCur); + sqlite3BtreeLeave(pBt); + ckfree((char *)pCur); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -702,14 +692,19 @@ " ID KEY\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); if( sqlite3BtreeFlags(pCur) & BTREE_INTKEY ){ int iKey; - if( Tcl_GetInt(interp, argv[2], &iKey) ) return TCL_ERROR; - rc = sqlite3BtreeMoveto(pCur, 0, iKey, 0, &res); + if( Tcl_GetInt(interp, argv[2], &iKey) ){ + sqlite3BtreeLeave(pCur->pBtree); + return TCL_ERROR; + } + rc = sqlite3BtreeMovetoUnpacked(pCur, 0, iKey, 0, &res); }else{ rc = sqlite3BtreeMoveto(pCur, argv[2], strlen(argv[2]), 0, &res); } + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -740,8 +735,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreeDelete(pCur); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -769,19 +766,23 @@ Tcl_WrongNumArgs(interp, 1, objv, "ID KEY DATA ?NZERO?"); return TCL_ERROR; } - pCur = sqlite3TextToPtr(Tcl_GetString(objv[1])); + pCur = sqlite3TestTextToPtr(Tcl_GetString(objv[1])); if( objc==5 ){ if( Tcl_GetIntFromObj(interp, objv[4], &nZero) ) return TCL_ERROR; }else{ nZero = 0; } + sqlite3BtreeEnter(pCur->pBtree); if( sqlite3BtreeFlags(pCur) & BTREE_INTKEY ){ i64 iKey; int len; unsigned char *pBuf; - if( Tcl_GetWideIntFromObj(interp, objv[2], &iKey) ) return TCL_ERROR; + if( Tcl_GetWideIntFromObj(interp, objv[2], &iKey) ){ + sqlite3BtreeLeave(pCur->pBtree); + return TCL_ERROR; + } pBuf = Tcl_GetByteArrayFromObj(objv[3], &len); - rc = sqlite3BtreeInsert(pCur, 0, iKey, pBuf, len, nZero, 0); + rc = sqlite3BtreeInsert(pCur, 0, iKey, pBuf, len, nZero, 0, 0); }else{ int keylen; int dlen; @@ -789,8 +790,9 @@ unsigned char *pDBuf; pKBuf = Tcl_GetByteArrayFromObj(objv[2], &keylen); pDBuf = Tcl_GetByteArrayFromObj(objv[3], &dlen); - rc = sqlite3BtreeInsert(pCur, pKBuf, keylen, pDBuf, dlen, nZero, 0); + rc = sqlite3BtreeInsert(pCur, pKBuf, keylen, pDBuf, dlen, nZero, 0, 0); } + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -821,8 +823,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreeNext(pCur, &res); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -855,8 +859,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreePrevious(pCur, &res); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -888,8 +894,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreeFirst(pCur, &res); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -921,8 +929,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreeLast(pCur, &res); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; @@ -945,6 +955,7 @@ const char **argv /* Text of each argument */ ){ BtCursor *pCur; + int rc; char zBuf[50]; if( argc!=2 ){ @@ -952,8 +963,11 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); - sqlite3_snprintf(sizeof(zBuf),zBuf, "%d", sqlite3BtreeEof(pCur)); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); + rc = sqlite3BtreeEof(pCur); + sqlite3BtreeLeave(pCur->pBtree); + sqlite3_snprintf(sizeof(zBuf),zBuf, "%d", rc); Tcl_AppendResult(interp, zBuf, 0); return SQLITE_OK; } @@ -979,8 +993,10 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); sqlite3BtreeKeySize(pCur, (i64*)&n); + sqlite3BtreeLeave(pCur->pBtree); sqlite3_snprintf(sizeof(zBuf),zBuf, "%llu", n); Tcl_AppendResult(interp, zBuf, 0); return SQLITE_OK; @@ -1007,23 +1023,26 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); sqlite3BtreeKeySize(pCur, (i64*)&n); if( sqlite3BtreeFlags(pCur) & BTREE_INTKEY ){ char zBuf2[60]; sqlite3_snprintf(sizeof(zBuf2),zBuf2, "%llu", n); Tcl_AppendResult(interp, zBuf2, 0); }else{ - zBuf = malloc( n+1 ); + zBuf = sqlite3_malloc( n+1 ); rc = sqlite3BtreeKey(pCur, 0, n, zBuf); if( rc ){ + sqlite3BtreeLeave(pCur->pBtree); Tcl_AppendResult(interp, errorName(rc), 0); return TCL_ERROR; } zBuf[n] = 0; Tcl_AppendResult(interp, zBuf, 0); - free(zBuf); + sqlite3_free(zBuf); } + sqlite3BtreeLeave(pCur->pBtree); return SQLITE_OK; } @@ -1048,22 +1067,24 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); if( argc==2 ){ sqlite3BtreeDataSize(pCur, &n); }else{ n = atoi(argv[2]); } - zBuf = malloc( n+1 ); + zBuf = sqlite3_malloc( n+1 ); rc = sqlite3BtreeData(pCur, 0, n, zBuf); + sqlite3BtreeLeave(pCur->pBtree); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); - free(zBuf); + sqlite3_free(zBuf); return TCL_ERROR; } zBuf[n] = 0; Tcl_AppendResult(interp, zBuf, 0); - free(zBuf); + sqlite3_free(zBuf); return SQLITE_OK; } @@ -1091,8 +1112,9 @@ " ID AMT\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &n) ) return TCL_ERROR; + sqlite3BtreeEnter(pCur->pBtree); sqlite3BtreeKeySize(pCur, (i64*)&nKey); zBuf = sqlite3BtreeKeyFetch(pCur, &amt); if( zBuf && amt>=n ){ @@ -1102,6 +1124,7 @@ zStatic[nKey] = 0; Tcl_AppendResult(interp, zStatic, 0); } + sqlite3BtreeLeave(pCur->pBtree); return TCL_OK; } @@ -1129,8 +1152,9 @@ " ID AMT\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &n) ) return TCL_ERROR; + sqlite3BtreeEnter(pCur->pBtree); sqlite3BtreeDataSize(pCur, &nData); zBuf = sqlite3BtreeDataFetch(pCur, &amt); if( zBuf && amt>=n ){ @@ -1140,6 +1164,7 @@ zStatic[nData] = 0; Tcl_AppendResult(interp, zStatic, 0); } + sqlite3BtreeLeave(pCur->pBtree); return TCL_OK; } @@ -1164,13 +1189,15 @@ " ID\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); + sqlite3BtreeEnter(pCur->pBtree); if( sqlite3BtreeFlags(pCur) & BTREE_INTKEY ){ n1 = 0; }else{ sqlite3BtreeKeySize(pCur, (i64*)&n1); } sqlite3BtreeDataSize(pCur, (u32*)&n2); + sqlite3BtreeLeave(pCur->pBtree); sqlite3_snprintf(sizeof(zBuf),zBuf, "%d", (int)(n1+n2)); Tcl_AppendResult(interp, zBuf, 0); return SQLITE_OK; @@ -1212,15 +1239,17 @@ " ID ?UP-CNT?\"", 0); return TCL_ERROR; } - pCur = sqlite3TextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[1]); if( argc==3 ){ if( Tcl_GetInt(interp, argv[2], &up) ) return TCL_ERROR; }else{ up = 0; } + sqlite3BtreeEnter(pCur->pBtree); rc = sqlite3BtreeCursorInfo(pCur, aResult, up); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); + sqlite3BtreeLeave(pCur->pBtree); return TCL_ERROR; } j = 0; @@ -1228,6 +1257,7 @@ sqlite3_snprintf(40,&zBuf[j]," %d", aResult[i]); j += strlen(&zBuf[j]); } + sqlite3BtreeLeave(pCur->pBtree); Tcl_AppendResult(interp, &zBuf[1], 0); return SQLITE_OK; } @@ -1269,20 +1299,22 @@ " BTREE CURSOR", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); - pCur = sqlite3TextToPtr(argv[2]); + pBt = sqlite3TestTextToPtr(argv[1]); + pCur = sqlite3TestTextToPtr(argv[2]); if( (*(void**)pCur) != (void*)pBt ){ Tcl_AppendResult(interp, "Cursor ", argv[2], " does not belong to btree ", argv[1], 0); return TCL_ERROR; } + sqlite3BtreeEnter(pBt); pPager = sqlite3BtreePager(pBt); rc = sqlite3BtreeCursorInfo(pCur, aResult, 0); if( rc ){ Tcl_AppendResult(interp, errorName(rc), 0); + sqlite3BtreeLeave(pBt); return TCL_ERROR; } - dataSize = sqlite3BtreeGetPageSize(pBt) - sqlite3BtreeGetReserve(pBt); + dataSize = pBt->pBt->usableSize; Tcl_DStringInit(&str); n = aResult[6] - aResult[8]; n = (n + dataSize - 1)/dataSize; @@ -1294,12 +1326,14 @@ if( sqlite3PagerGet(pPager, pgno, &pDbPage)!=SQLITE_OK ){ Tcl_DStringFree(&str); Tcl_AppendResult(interp, "unable to get page ", zElem, 0); + sqlite3BtreeLeave(pBt); return TCL_ERROR; } pPage = sqlite3PagerGetData(pDbPage); pgno = t4Get4byte((unsigned char*)pPage); sqlite3PagerUnref(pDbPage); } + sqlite3BtreeLeave(pBt); Tcl_DStringResult(interp, &str); return SQLITE_OK; } @@ -1325,11 +1359,11 @@ /* ** usage: varint_test START MULTIPLIER COUNT INCREMENT ** -** This command tests the sqlite3PutVarint() and sqlite3GetVarint() +** This command tests the putVarint() and getVarint() ** routines, both for accuracy and for speed. ** -** An integer is written using PutVarint() and read back with -** GetVarint() and varified to be unchanged. This repeats COUNT +** An integer is written using putVarint() and read back with +** getVarint() and varified to be unchanged. This repeats COUNT ** times. The first integer is START*MULTIPLIER. Each iteration ** increases the integer by INCREMENT. ** @@ -1359,15 +1393,15 @@ in *= mult; for(i=0; i9 || n1<1 ){ - sprintf(zErr, "PutVarint returned %d - should be between 1 and 9", n1); + sprintf(zErr, "putVarint returned %d - should be between 1 and 9", n1); Tcl_AppendResult(interp, zErr, 0); return TCL_ERROR; } - n2 = sqlite3GetVarint(zBuf, &out); + n2 = getVarint(zBuf, &out); if( n1!=n2 ){ - sprintf(zErr, "PutVarint returned %d and GetVarint returned %d", n1, n2); + sprintf(zErr, "putVarint returned %d and getVarint returned %d", n1, n2); Tcl_AppendResult(interp, zErr, 0); return TCL_ERROR; } @@ -1378,10 +1412,10 @@ } if( (in & 0xffffffff)==in ){ u32 out32; - n2 = sqlite3GetVarint32(zBuf, &out32); + n2 = getVarint32(zBuf, out32); out = out32; if( n1!=n2 ){ - sprintf(zErr, "PutVarint returned %d and GetVarint32 returned %d", + sprintf(zErr, "putVarint returned %d and GetVarint32 returned %d", n1, n2); Tcl_AppendResult(interp, zErr, 0); return TCL_ERROR; @@ -1399,7 +1433,7 @@ ** than putVarint. */ for(j=0; j<19; j++){ - sqlite3GetVarint(zBuf, &out); + getVarint(zBuf, &out); } in += incr; } @@ -1470,18 +1504,52 @@ " BT NCACHE\"", 0); return TCL_ERROR; } - pBt = sqlite3TextToPtr(argv[1]); + pBt = sqlite3TestTextToPtr(argv[1]); if( Tcl_GetInt(interp, argv[2], &nCache) ) return TCL_ERROR; + + sqlite3_mutex_enter(pBt->db->mutex); + sqlite3BtreeEnter(pBt); sqlite3BtreeSetCacheSize(pBt, nCache); + sqlite3BtreeLeave(pBt); + sqlite3_mutex_leave(pBt->db->mutex); + return TCL_OK; } +/* +** Usage: btree_ismemdb ID +** +** Return true if the B-Tree is in-memory. +*/ +static int btree_ismemdb( + void *NotUsed, + Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ + int argc, /* Number of arguments */ + const char **argv /* Text of each argument */ +){ + Btree *pBt; + int res; + + if( argc!=2 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], + " ID\"", 0); + return TCL_ERROR; + } + pBt = sqlite3TestTextToPtr(argv[1]); + sqlite3_mutex_enter(pBt->db->mutex); + sqlite3BtreeEnter(pBt); + res = sqlite3PagerIsMemdb(sqlite3BtreePager(pBt)); + sqlite3BtreeLeave(pBt); + sqlite3_mutex_leave(pBt->db->mutex); + Tcl_SetObjResult(interp, Tcl_NewBooleanObj(res)); + return SQLITE_OK; +} + /* ** Register commands with the TCL interpreter. */ int Sqlitetest3_Init(Tcl_Interp *interp){ - extern int sqlite3_btree_trace; static struct { char *zName; Tcl_CmdProc *xProc; @@ -1496,10 +1564,7 @@ { "btree_clear_table", (Tcl_CmdProc*)btree_clear_table }, { "btree_get_meta", (Tcl_CmdProc*)btree_get_meta }, { "btree_update_meta", (Tcl_CmdProc*)btree_update_meta }, - { "btree_page_dump", (Tcl_CmdProc*)btree_page_dump }, - { "btree_tree_dump", (Tcl_CmdProc*)btree_tree_dump }, { "btree_pager_stats", (Tcl_CmdProc*)btree_pager_stats }, - { "btree_pager_ref_dump", (Tcl_CmdProc*)btree_pager_ref_dump }, { "btree_cursor", (Tcl_CmdProc*)btree_cursor }, { "btree_close_cursor", (Tcl_CmdProc*)btree_close_cursor }, { "btree_move_to", (Tcl_CmdProc*)btree_move_to }, @@ -1526,16 +1591,13 @@ { "btree_cursor_info", (Tcl_CmdProc*)btree_cursor_info }, { "btree_ovfl_info", (Tcl_CmdProc*)btree_ovfl_info }, { "btree_cursor_list", (Tcl_CmdProc*)btree_cursor_list }, + { "btree_ismemdb", (Tcl_CmdProc*)btree_ismemdb }, }; int i; for(i=0; i #include #include @@ -97,7 +96,9 @@ p->zErr = 0; } p->completed++; +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_thread_cleanup(); +#endif return 0; } @@ -142,14 +143,14 @@ return TCL_ERROR; } threadset[i].busy = 1; - sqliteFree(threadset[i].zFilename); - threadset[i].zFilename = sqliteStrDup(argv[2]); + sqlite3_free(threadset[i].zFilename); + threadset[i].zFilename = sqlite3DbStrDup(0, argv[2]); threadset[i].opnum = 1; threadset[i].completed = 0; rc = pthread_create(&x, 0, thread_main, &threadset[i]); if( rc ){ Tcl_AppendResult(interp, "failed to create the thread", 0); - sqliteFree(threadset[i].zFilename); + sqlite3_free(threadset[i].zFilename); threadset[i].busy = 0; return TCL_ERROR; } @@ -200,9 +201,9 @@ p->xOp = 0; p->opnum++; thread_wait(p); - sqliteFree(p->zArg); + sqlite3_free(p->zArg); p->zArg = 0; - sqliteFree(p->zFilename); + sqlite3_free(p->zFilename); p->zFilename = 0; p->busy = 0; } @@ -476,8 +477,8 @@ } thread_wait(&threadset[i]); threadset[i].xOp = do_compile; - sqliteFree(threadset[i].zArg); - threadset[i].zArg = sqliteStrDup(argv[2]); + sqlite3_free(threadset[i].zArg); + threadset[i].zArg = sqlite3DbStrDup(0, argv[2]); threadset[i].opnum++; return TCL_OK; } @@ -571,7 +572,7 @@ } thread_wait(&threadset[i]); threadset[i].xOp = do_finalize; - sqliteFree(threadset[i].zArg); + sqlite3_free(threadset[i].zArg); threadset[i].zArg = 0; threadset[i].opnum++; return TCL_OK; @@ -714,4 +715,4 @@ } #else int Sqlitetest4_Init(Tcl_Interp *interp){ return TCL_OK; } -#endif /* OS_UNIX */ +#endif /* SQLITE_OS_UNIX */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test5.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test5.c --- sqlite3-3.4.2/src/test5.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/test5.c 2009-05-05 04:39:58.000000000 +0100 @@ -15,11 +15,10 @@ ** is used for testing the SQLite routines for converting between ** the various supported unicode encodings. ** -** $Id: test5.c,v 1.16 2007/05/08 20:37:40 drh Exp $ +** $Id: test5.c,v 1.22 2008/08/12 15:04:59 danielk1977 Exp $ */ #include "sqliteInt.h" #include "vdbeInt.h" -#include "os.h" /* to get SQLITE_BIGENDIAN */ #include "tcl.h" #include #include @@ -100,7 +99,7 @@ { "UTF8", SQLITE_UTF8 }, { "UTF16LE", SQLITE_UTF16LE }, { "UTF16BE", SQLITE_UTF16BE }, - { "UTF16", SQLITE_UTF16NATIVE }, + { "UTF16", SQLITE_UTF16 }, { 0, 0 } }; struct EncName *pEnc; @@ -113,6 +112,9 @@ if( !pEnc->enc ){ Tcl_AppendResult(interp, "No such encoding: ", z, 0); } + if( pEnc->enc==SQLITE_UTF16 ){ + return SQLITE_UTF16NATIVE; + } return pEnc->enc; } @@ -142,7 +144,7 @@ return TCL_ERROR; } if( objc==5 ){ - xDel = sqlite3FreeX; + xDel = sqlite3_free; } enc_from = name_to_enc(interp, objv[2]); @@ -150,19 +152,19 @@ enc_to = name_to_enc(interp, objv[3]); if( !enc_to ) return TCL_ERROR; - pVal = sqlite3ValueNew(); + pVal = sqlite3ValueNew(0); if( enc_from==SQLITE_UTF8 ){ z = Tcl_GetString(objv[1]); if( objc==5 ){ - z = sqliteStrDup(z); + z = sqlite3DbStrDup(0, z); } sqlite3ValueSetStr(pVal, -1, z, enc_from, xDel); }else{ z = (char*)Tcl_GetByteArrayFromObj(objv[1], &len); if( objc==5 ){ char *zTmp = z; - z = sqliteMalloc(len); + z = sqlite3_malloc(len); memcpy(z, zTmp, len); } sqlite3ValueSetStr(pVal, -1, z, enc_from, xDel); @@ -183,7 +185,7 @@ ** Call sqlite3UtfSelfTest() to run the internal tests for unicode ** translation. If there is a problem an assert() will fail. **/ -void sqlite3UtfSelfTest(); +void sqlite3UtfSelfTest(void); static int test_translate_selftest( void * clientData, Tcl_Interp *interp, diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test6.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test6.c --- sqlite3-3.4.2/src/test6.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/test6.c 2009-06-12 03:37:49.000000000 +0100 @@ -13,525 +13,823 @@ ** This file contains code that modified the OS layer in order to simulate ** the effect on the database file of an OS crash or power failure. This ** is used to test the ability of SQLite to recover from those situations. +** +** $Id: test6.c,v 1.43 2009/02/11 14:27:04 danielk1977 Exp $ */ -#if SQLITE_TEST /* This file is used for the testing only */ +#if SQLITE_TEST /* This file is used for testing only */ #include "sqliteInt.h" -#include "os.h" #include "tcl.h" #ifndef SQLITE_OMIT_DISKIO /* This file is a no-op if disk I/O is disabled */ -/* -** crashFile is a subclass of OsFile that is taylored for the -** crash test module. -*/ -typedef struct crashFile crashFile; -struct crashFile { - IoMethod const *pMethod; /* Must be first */ - u8 **apBlk; /* Array of blocks that have been written to. */ - int nBlk; /* Size of apBlock. */ - i64 offset; /* Next character to be read from the file */ - int nMaxWrite; /* Largest offset written to. */ - char *zName; /* File name */ - OsFile *pBase; /* The real file */ - crashFile *pNext; /* Next in a list of them all */ -}; - -/* -** Size of a simulated disk block. Default is 512 bytes. -*/ -static int BLOCKSIZE = 512; -#define BLOCK_OFFSET(x) ((x) * BLOCKSIZE) +/* #define TRACE_CRASHTEST */ +typedef struct CrashFile CrashFile; +typedef struct CrashGlobal CrashGlobal; +typedef struct WriteBuffer WriteBuffer; /* -** The following variables control when a simulated crash occurs. +** Method: +** +** This layer is implemented as a wrapper around the "real" +** sqlite3_file object for the host system. Each time data is +** written to the file object, instead of being written to the +** underlying file, the write operation is stored in an in-memory +** structure (type WriteBuffer). This structure is placed at the +** end of a global ordered list (the write-list). +** +** When data is read from a file object, the requested region is +** first retrieved from the real file. The write-list is then +** traversed and data copied from any overlapping WriteBuffer +** structures to the output buffer. i.e. a read() operation following +** one or more write() operations works as expected, even if no +** data has actually been written out to the real file. +** +** When a fsync() operation is performed, an operating system crash +** may be simulated, in which case exit(-1) is called (the call to +** xSync() never returns). Whether or not a crash is simulated, +** the data associated with a subset of the WriteBuffer structures +** stored in the write-list is written to the real underlying files +** and the entries removed from the write-list. If a crash is simulated, +** a subset of the buffers may be corrupted before the data is written. +** +** The exact subset of the write-list written and/or corrupted is +** determined by the simulated device characteristics and sector-size. +** +** "Normal" mode: +** +** Normal mode is used when the simulated device has none of the +** SQLITE_IOCAP_XXX flags set. +** +** In normal mode, if the fsync() is not a simulated crash, the +** write-list is traversed from beginning to end. Each WriteBuffer +** structure associated with the file handle used to call xSync() +** is written to the real file and removed from the write-list. +** +** If a crash is simulated, one of the following takes place for +** each WriteBuffer in the write-list, regardless of which +** file-handle it is associated with: +** +** 1. The buffer is correctly written to the file, just as if +** a crash were not being simulated. +** +** 2. Nothing is done. +** +** 3. Garbage data is written to all sectors of the file that +** overlap the region specified by the WriteBuffer. Or garbage +** data is written to some contiguous section within the +** overlapped sectors. ** -** If iCrashDelay is non-zero, then zCrashFile contains (full path) name of -** a file that SQLite will call sqlite3OsSync() on. Each time this happens -** iCrashDelay is decremented. If iCrashDelay is zero after being -** decremented, a "crash" occurs during the sync() operation. +** Device Characteristic flag handling: ** -** In other words, a crash occurs the iCrashDelay'th time zCrashFile is -** synced. +** If the IOCAP_ATOMIC flag is set, then option (3) above is +** never selected. +** +** If the IOCAP_ATOMIC512 flag is set, and the WriteBuffer represents +** an aligned write() of an integer number of 512 byte regions, then +** option (3) above is never selected. Instead, each 512 byte region +** is either correctly written or left completely untouched. Similar +** logic governs the behaviour if any of the other ATOMICXXX flags +** is set. +** +** If either the IOCAP_SAFEAPPEND or IOCAP_SEQUENTIAL flags are set +** and a crash is being simulated, then an entry of the write-list is +** selected at random. Everything in the list after the selected entry +** is discarded before processing begins. +** +** If IOCAP_SEQUENTIAL is set and a crash is being simulated, option +** (1) is selected for all write-list entries except the last. If a +** crash is not being simulated, then all entries in the write-list +** that occur before at least one write() on the file-handle specified +** as part of the xSync() are written to their associated real files. +** +** If IOCAP_SAFEAPPEND is set and the first byte written by the write() +** operation is one byte past the current end of the file, then option +** (1) is always selected. */ -static int iCrashDelay = 0; -static char zCrashFile[500]; /* -** A list of all open files. -*/ -static crashFile *pAllFiles = 0; +** Each write operation in the write-list is represented by an instance +** of the following structure. +** +** If zBuf is 0, then this structure represents a call to xTruncate(), +** not xWrite(). In that case, iOffset is the size that the file is +** truncated to. +*/ +struct WriteBuffer { + i64 iOffset; /* Byte offset of the start of this write() */ + int nBuf; /* Number of bytes written */ + u8 *zBuf; /* Pointer to copy of written data */ + CrashFile *pFile; /* File this write() applies to */ -/* -** Set the value of the two crash parameters. -*/ -static void setCrashParams(int iDelay, char const *zFile){ - sqlite3OsEnterMutex(); - assert( strlen(zFile)n ){ - n = strlen(zPath); +static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ + int rc; + int iSkip = 0; + if( iOff==PENDING_BYTE && (p->flags&SQLITE_OPEN_MAIN_DB) ){ + iSkip = 512; } - r = 0; - if( iCrashDelay>0 && strncmp(zPath, zCrashFile, n)==0 ){ - iCrashDelay--; - if( iCrashDelay<=0 ){ - r = 1; - } + if( (iAmt-iSkip)>0 ){ + rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], iAmt-iSkip, iOff+iSkip); } - sqlite3OsLeaveMutex(); - return r; + return rc; } -/* Forward reference */ -static void initFile(OsFile **pId, char const *zName, OsFile *pBase); - /* -** Undo the work done by initFile. Delete the OsFile structure -** and unlink the structure from the pAllFiles list. +** Flush the write-list as if xSync() had been called on file handle +** pFile. If isCrash is true, simulate a crash. */ -static void closeFile(crashFile **pId){ - crashFile *pFile = *pId; - if( pFile==pAllFiles ){ - pAllFiles = pFile->pNext; - }else{ - crashFile *p; - for(p=pAllFiles; p->pNext!=pFile; p=p->pNext ){ - assert( p ); - } - p->pNext = pFile->pNext; - } - sqliteFree(*pId); - *pId = 0; -} +static int writeListSync(CrashFile *pFile, int isCrash){ + int rc = SQLITE_OK; + int iDc = g.iDeviceCharacteristics; -/* -** Read block 'blk' off of the real disk file and into the cache of pFile. -*/ -static int readBlockIntoCache(crashFile *pFile, int blk){ - if( blk>=pFile->nBlk ){ - int n = ((pFile->nBlk * 2) + 100 + blk); - /* if( pFile->nBlk==0 ){ printf("DIRTY %s\n", pFile->zName); } */ - pFile->apBlk = (u8 **)sqliteRealloc(pFile->apBlk, n * sizeof(u8*)); - if( !pFile->apBlk ) return SQLITE_NOMEM; - memset(&pFile->apBlk[pFile->nBlk], 0, (n - pFile->nBlk)*sizeof(u8*)); - pFile->nBlk = n; + WriteBuffer *pWrite; + WriteBuffer **ppPtr; + + /* If this is not a crash simulation, set pFinal to point to the + ** last element of the write-list that is associated with file handle + ** pFile. + ** + ** If this is a crash simulation, set pFinal to an arbitrarily selected + ** element of the write-list. + */ + WriteBuffer *pFinal = 0; + if( !isCrash ){ + for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext){ + if( pWrite->pFile==pFile ){ + pFinal = pWrite; + } + } + }else if( iDc&(SQLITE_IOCAP_SEQUENTIAL|SQLITE_IOCAP_SAFE_APPEND) ){ + int nWrite = 0; + int iFinal; + for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext) nWrite++; + sqlite3_randomness(sizeof(int), &iFinal); + iFinal = ((iFinal<0)?-1*iFinal:iFinal)%nWrite; + for(pWrite=g.pWriteList; iFinal>0; pWrite=pWrite->pNext) iFinal--; + pFinal = pWrite; } - if( !pFile->apBlk[blk] ){ - i64 filesize; - int rc; +#ifdef TRACE_CRASHTEST + printf("Sync %s (is %s crash)\n", pFile->zName, (isCrash?"a":"not a")); +#endif - u8 *p = sqliteMalloc(BLOCKSIZE); - if( !p ) return SQLITE_NOMEM; - pFile->apBlk[blk] = p; + ppPtr = &g.pWriteList; + for(pWrite=*ppPtr; rc==SQLITE_OK && pWrite; pWrite=*ppPtr){ + sqlite3_file *pRealFile = pWrite->pFile->pRealFile; + + /* (eAction==1) -> write block out normally, + ** (eAction==2) -> do nothing, + ** (eAction==3) -> trash sectors. + */ + int eAction = 0; + if( !isCrash ){ + eAction = 2; + if( (pWrite->pFile==pFile || iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + eAction = 1; + } + }else{ + char random; + sqlite3_randomness(1, &random); - rc = sqlite3OsFileSize(pFile->pBase, &filesize); - if( rc!=SQLITE_OK ) return rc; + /* Do not select option 3 (sector trashing) if the IOCAP_ATOMIC flag + ** is set or this is an OsTruncate(), not an Oswrite(). + */ + if( (iDc&SQLITE_IOCAP_ATOMIC) || (pWrite->zBuf==0) ){ + random &= 0x01; + } - if( BLOCK_OFFSET(blk)pBase, blk*BLOCKSIZE); - if( BLOCK_OFFSET(blk+1)>filesize ){ - len = filesize - BLOCK_OFFSET(blk); + /* If IOCAP_SEQUENTIAL is set and this is not the final entry + ** in the truncated write-list, always select option 1 (write + ** out correctly). + */ + if( (iDc&SQLITE_IOCAP_SEQUENTIAL && pWrite!=pFinal) ){ + random = 0; } - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsRead(pFile->pBase, p, len); - if( rc!=SQLITE_OK ) return rc; - } - } - return SQLITE_OK; -} + /* If IOCAP_SAFE_APPEND is set and this OsWrite() operation is + ** an append (first byte of the written region is 1 byte past the + ** current EOF), always select option 1 (write out correctly). + */ + if( iDc&SQLITE_IOCAP_SAFE_APPEND && pWrite->zBuf ){ + i64 iSize; + sqlite3OsFileSize(pRealFile, &iSize); + if( iSize==pWrite->iOffset ){ + random = 0; + } + } -/* -** Write the cache of pFile to disk. If crash is non-zero, randomly -** skip blocks when writing. The cache is deleted before returning. -*/ -static int writeCache2(crashFile *pFile, int crash){ - int i; - int nMax = pFile->nMaxWrite; - int rc = SQLITE_OK; + if( (random&0x06)==0x06 ){ + eAction = 3; + }else{ + eAction = ((random&0x01)?2:1); + } + } - for(i=0; inBlk; i++){ - u8 *p = pFile->apBlk[i]; - if( p ){ - int skip = 0; - int trash = 0; - if( crash ){ - char random; - sqlite3Randomness(1, &random); - if( random & 0x01 ){ - if( random & 0x02 ){ - trash = 1; -#ifdef TRACE_WRITECACHE -printf("Trashing block %d of %s\n", i, pFile->zName); -#endif - }else{ - skip = 1; -#ifdef TRACE_WRITECACHE -printf("Skiping block %d of %s\n", i, pFile->zName); -#endif - } + switch( eAction ){ + case 1: { /* Write out correctly */ + if( pWrite->zBuf ){ + rc = writeDbFile( + pWrite->pFile, pWrite->zBuf, pWrite->nBuf, pWrite->iOffset + ); }else{ -#ifdef TRACE_WRITECACHE -printf("Writing block %d of %s\n", i, pFile->zName); -#endif + rc = sqlite3OsTruncate(pRealFile, pWrite->iOffset); } + *ppPtr = pWrite->pNext; +#ifdef TRACE_CRASHTEST + if( isCrash ){ + printf("Writing %d bytes @ %d (%s)\n", + pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName + ); + } +#endif + crash_free(pWrite); + break; } - if( rc==SQLITE_OK ){ - rc = sqlite3OsSeek(pFile->pBase, BLOCK_OFFSET(i)); - } - if( rc==SQLITE_OK && !skip ){ - int len = BLOCKSIZE; - if( BLOCK_OFFSET(i+1)>nMax ){ - len = nMax-BLOCK_OFFSET(i); + case 2: { /* Do nothing */ + ppPtr = &pWrite->pNext; +#ifdef TRACE_CRASHTEST + if( isCrash ){ + printf("Omiting %d bytes @ %d (%s)\n", + pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName + ); } - if( len>0 ){ - if( trash ){ - sqlite3Randomness(len, p); +#endif + break; + } + case 3: { /* Trash sectors */ + u8 *zGarbage; + int iFirst = (pWrite->iOffset/g.iSectorSize); + int iLast = (pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize; + + assert(pWrite->zBuf); + +#ifdef TRACE_CRASHTEST + printf("Trashing %d sectors @ sector %d (%s)\n", + 1+iLast-iFirst, iFirst, pWrite->pFile->zName + ); +#endif + + zGarbage = crash_malloc(g.iSectorSize); + if( zGarbage ){ + sqlite3_int64 i; + for(i=iFirst; rc==SQLITE_OK && i<=iLast; i++){ + sqlite3_randomness(g.iSectorSize, zGarbage); + rc = writeDbFile( + pWrite->pFile, zGarbage, g.iSectorSize, i*g.iSectorSize + ); } - rc = sqlite3OsWrite(pFile->pBase, p, len); + crash_free(zGarbage); + }else{ + rc = SQLITE_NOMEM; } + + ppPtr = &pWrite->pNext; + break; } - sqliteFree(p); + + default: + assert(!"Cannot happen"); } + + if( pWrite==pFinal ) break; } - sqliteFree(pFile->apBlk); - pFile->nBlk = 0; - pFile->apBlk = 0; - pFile->nMaxWrite = 0; + + if( rc==SQLITE_OK && isCrash ){ + exit(-1); + } + + for(pWrite=g.pWriteList; pWrite && pWrite->pNext; pWrite=pWrite->pNext); + g.pWriteListEnd = pWrite; + return rc; } /* -** Write the cache to disk. +** Add an entry to the end of the write-list. */ -static int writeCache(crashFile *pFile){ - if( pFile->apBlk ){ - int c = crashRequired(pFile->zName); - if( c ){ - crashFile *p; -#ifdef TRACE_WRITECACHE - printf("\nCrash during sync of %s\n", pFile->zName); -#endif - for(p=pAllFiles; p; p=p->pNext){ - writeCache2(p, 1); - } - exit(-1); - }else{ - return writeCache2(pFile, 0); - } +static int writeListAppend( + sqlite3_file *pFile, + sqlite3_int64 iOffset, + const u8 *zBuf, + int nBuf +){ + WriteBuffer *pNew; + + assert((zBuf && nBuf) || (!nBuf && !zBuf)); + + pNew = (WriteBuffer *)crash_malloc(sizeof(WriteBuffer) + nBuf); + if( pNew==0 ){ + fprintf(stderr, "out of memory in the crash simulator\n"); + } + memset(pNew, 0, sizeof(WriteBuffer)+nBuf); + pNew->iOffset = iOffset; + pNew->nBuf = nBuf; + pNew->pFile = (CrashFile *)pFile; + if( zBuf ){ + pNew->zBuf = (u8 *)&pNew[1]; + memcpy(pNew->zBuf, zBuf, nBuf); + } + + if( g.pWriteList ){ + assert(g.pWriteListEnd); + g.pWriteListEnd->pNext = pNew; + }else{ + g.pWriteList = pNew; } + g.pWriteListEnd = pNew; + return SQLITE_OK; } /* -** Close the file. +** Close a crash-file. */ -static int crashClose(OsFile **pId){ - crashFile *pFile = (crashFile*)*pId; - if( pFile ){ - /* printf("CLOSE %s (%d blocks)\n", pFile->zName, pFile->nBlk); */ - writeCache(pFile); - sqlite3OsClose(&pFile->pBase); - } - closeFile(&pFile); - *pId = 0; +static int cfClose(sqlite3_file *pFile){ + CrashFile *pCrash = (CrashFile *)pFile; + writeListSync(pCrash, 0); + sqlite3OsClose(pCrash->pRealFile); return SQLITE_OK; } -static int crashSeek(OsFile *id, i64 offset){ - ((crashFile*)id)->offset = offset; - return SQLITE_OK; -} - -static int crashRead(OsFile *id, void *pBuf, int amt){ - i64 offset; /* The current offset from the start of the file */ - i64 end; /* The byte just past the last byte read */ - int blk; /* Block number the read starts on */ - int i; - u8 *zCsr; - int rc = SQLITE_OK; - crashFile *pFile = (crashFile*)id; - - offset = pFile->offset; - end = offset+amt; - blk = (offset/BLOCKSIZE); - - zCsr = (u8 *)pBuf; - for(i=blk; i*BLOCKSIZEiSize<(iOfst+iAmt) ){ + return SQLITE_IOERR_SHORT_READ; + } - if( BLOCK_OFFSET(i) < offset ){ - off = offset-BLOCK_OFFSET(i); - } - len = BLOCKSIZE - off; - if( BLOCK_OFFSET(i+1) > end ){ - len = len - (BLOCK_OFFSET(i+1)-end); - } + memcpy(zBuf, &pCrash->zData[iOfst], iAmt); + return SQLITE_OK; +} - if( inBlk && pFile->apBlk[i]){ - u8 *pBlk = pFile->apBlk[i]; - memcpy(zCsr, &pBlk[off], len); - }else{ - rc = sqlite3OsSeek(pFile->pBase, BLOCK_OFFSET(i) + off); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsRead(pFile->pBase, zCsr, len); - if( rc!=SQLITE_OK ) return rc; +/* +** Write data to a crash-file. +*/ +static int cfWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + CrashFile *pCrash = (CrashFile *)pFile; + if( iAmt+iOfst>pCrash->iSize ){ + pCrash->iSize = iAmt+iOfst; + } + while( pCrash->iSize>pCrash->nData ){ + u8 *zNew; + int nNew = (pCrash->nData*2) + 4096; + zNew = crash_realloc(pCrash->zData, nNew); + if( !zNew ){ + return SQLITE_NOMEM; } - - zCsr += len; + memset(&zNew[pCrash->nData], 0, nNew-pCrash->nData); + pCrash->nData = nNew; + pCrash->zData = zNew; } - assert( zCsr==&((u8 *)pBuf)[amt] ); + memcpy(&pCrash->zData[iOfst], zBuf, iAmt); + return writeListAppend(pFile, iOfst, zBuf, iAmt); +} - pFile->offset = end; - return rc; +/* +** Truncate a crash-file. +*/ +static int cfTruncate(sqlite3_file *pFile, sqlite_int64 size){ + CrashFile *pCrash = (CrashFile *)pFile; + assert(size>=0); + if( pCrash->iSize>size ){ + pCrash->iSize = size; + } + return writeListAppend(pFile, size, 0, 0); } -static int crashWrite(OsFile *id, const void *pBuf, int amt){ - i64 offset; /* The current offset from the start of the file */ - i64 end; /* The byte just past the last byte written */ - int blk; /* Block number the write starts on */ - int i; - const u8 *zCsr; - int rc = SQLITE_OK; - crashFile *pFile = (crashFile*)id; +/* +** Sync a crash-file. +*/ +static int cfSync(sqlite3_file *pFile, int flags){ + CrashFile *pCrash = (CrashFile *)pFile; + int isCrash = 0; - offset = pFile->offset; - end = offset+amt; - blk = (offset/BLOCKSIZE); - - zCsr = (u8 *)pBuf; - for(i=blk; i*BLOCKSIZEapBlk[i]; - assert( pBlk ); + const char *zName = pCrash->zName; + const char *zCrashFile = g.zCrashFile; + int nName = strlen(zName); + int nCrashFile = strlen(zCrashFile); - if( BLOCK_OFFSET(i) < offset ){ - off = offset-BLOCK_OFFSET(i); - } - len = BLOCKSIZE - off; - if( BLOCK_OFFSET(i+1) > end ){ - len = len - (BLOCK_OFFSET(i+1)-end); - } - memcpy(&pBlk[off], zCsr, len); - zCsr += len; + if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ + nCrashFile--; + if( nName>nCrashFile ) nName = nCrashFile; } - if( pFile->nMaxWritenMaxWrite = end; + + if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ + if( (--g.iCrash)==0 ) isCrash = 1; } - assert( zCsr==&((u8 *)pBuf)[amt] ); - pFile->offset = end; - return rc; + + return writeListSync(pCrash, isCrash); } /* -** Sync the file. First flush the write-cache to disk, then call the -** real sync() function. +** Return the current file-size of the crash-file. */ -static int crashSync(OsFile *id, int dataOnly){ - return writeCache((crashFile*)id); +static int cfFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + CrashFile *pCrash = (CrashFile *)pFile; + *pSize = (i64)pCrash->iSize; + return SQLITE_OK; } /* -** Truncate the file. Set the internal OsFile.nMaxWrite variable to the new -** file size to ensure that nothing in the write-cache past this point -** is written to disk. +** Calls related to file-locks are passed on to the real file handle. */ -static int crashTruncate(OsFile *id, i64 nByte){ - crashFile *pFile = (crashFile*)id; - pFile->nMaxWrite = nByte; - return sqlite3OsTruncate(pFile->pBase, nByte); +static int cfLock(sqlite3_file *pFile, int eLock){ + return sqlite3OsLock(((CrashFile *)pFile)->pRealFile, eLock); +} +static int cfUnlock(sqlite3_file *pFile, int eLock){ + return sqlite3OsUnlock(((CrashFile *)pFile)->pRealFile, eLock); +} +static int cfCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + return sqlite3OsCheckReservedLock(((CrashFile *)pFile)->pRealFile, pResOut); +} +static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ + return sqlite3OsFileControl(((CrashFile *)pFile)->pRealFile, op, pArg); } /* -** Return the size of the file. If the cache contains a write that extended -** the file, then return this size instead of the on-disk size. +** The xSectorSize() and xDeviceCharacteristics() functions return +** the global values configured by the [sqlite_crashparams] tcl +* interface. */ -static int crashFileSize(OsFile *id, i64 *pSize){ - crashFile *pFile = (crashFile*)id; - int rc = sqlite3OsFileSize(pFile->pBase, pSize); - if( rc==SQLITE_OK && pSize && *pSizenMaxWrite ){ - *pSize = pFile->nMaxWrite; - } - return rc; +static int cfSectorSize(sqlite3_file *pFile){ + return g.iSectorSize; +} +static int cfDeviceCharacteristics(sqlite3_file *pFile){ + return g.iDeviceCharacteristics; } +static const sqlite3_io_methods CrashFileVtab = { + 1, /* iVersion */ + cfClose, /* xClose */ + cfRead, /* xRead */ + cfWrite, /* xWrite */ + cfTruncate, /* xTruncate */ + cfSync, /* xSync */ + cfFileSize, /* xFileSize */ + cfLock, /* xLock */ + cfUnlock, /* xUnlock */ + cfCheckReservedLock, /* xCheckReservedLock */ + cfFileControl, /* xFileControl */ + cfSectorSize, /* xSectorSize */ + cfDeviceCharacteristics /* xDeviceCharacteristics */ +}; + /* -** Set this global variable to 1 to enable crash testing. +** Application data for the crash VFS */ -int sqlite3CrashTestEnable = 0; +struct crashAppData { + sqlite3_vfs *pOrig; /* Wrapped vfs structure */ +}; /* -** The three functions used to open files. All that is required is to -** initialise the os_test.c specific fields and then call the corresponding -** os_unix.c function to really open the file. -*/ -int sqlite3CrashOpenReadWrite(const char *zFilename, OsFile **pId,int *pRdonly){ - OsFile *pBase = 0; +** Open a crash-file file handle. +** +** The caller will have allocated pVfs->szOsFile bytes of space +** at pFile. This file uses this space for the CrashFile structure +** and allocates space for the "real" file structure using +** sqlite3_malloc(). The assumption here is (pVfs->szOsFile) is +** equal or greater than sizeof(CrashFile). +*/ +static int cfOpen( + sqlite3_vfs *pCfVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; int rc; + CrashFile *pWrapper = (CrashFile *)pFile; + sqlite3_file *pReal = (sqlite3_file*)&pWrapper[1]; - sqlite3CrashTestEnable = 0; - rc = sqlite3OsOpenReadWrite(zFilename, &pBase, pRdonly); - sqlite3CrashTestEnable = 1; - if( !rc ){ - initFile(pId, zFilename, pBase); - } - return rc; -} -int sqlite3CrashOpenExclusive(const char *zFilename, OsFile **pId, int delFlag){ - OsFile *pBase = 0; - int rc; + memset(pWrapper, 0, sizeof(CrashFile)); + rc = sqlite3OsOpen(pVfs, zName, pReal, flags, pOutFlags); - sqlite3CrashTestEnable = 0; - rc = sqlite3OsOpenExclusive(zFilename, &pBase, delFlag); - sqlite3CrashTestEnable = 1; - if( !rc ){ - initFile(pId, zFilename, pBase); + if( rc==SQLITE_OK ){ + i64 iSize; + pWrapper->pMethod = &CrashFileVtab; + pWrapper->zName = (char *)zName; + pWrapper->pRealFile = pReal; + rc = sqlite3OsFileSize(pReal, &iSize); + pWrapper->iSize = (int)iSize; + pWrapper->flags = flags; + } + if( rc==SQLITE_OK ){ + pWrapper->nData = (4096 + pWrapper->iSize); + pWrapper->zData = crash_malloc(pWrapper->nData); + if( pWrapper->zData ){ + /* os_unix.c contains an assert() that fails if the caller attempts + ** to read data from the 512-byte locking region of a file opened + ** with the SQLITE_OPEN_MAIN_DB flag. This region of a database file + ** never contains valid data anyhow. So avoid doing such a read here. + */ + const int isDb = (flags&SQLITE_OPEN_MAIN_DB); + i64 iChunk = pWrapper->iSize; + if( iChunk>PENDING_BYTE && isDb ){ + iChunk = PENDING_BYTE; + } + memset(pWrapper->zData, 0, pWrapper->nData); + rc = sqlite3OsRead(pReal, pWrapper->zData, iChunk, 0); + if( SQLITE_OK==rc && pWrapper->iSize>(PENDING_BYTE+512) && isDb ){ + i64 iOff = PENDING_BYTE+512; + iChunk = pWrapper->iSize - iOff; + rc = sqlite3OsRead(pReal, &pWrapper->zData[iOff], iChunk, iOff); + } + }else{ + rc = SQLITE_NOMEM; + } } - return rc; -} -int sqlite3CrashOpenReadOnly(const char *zFilename, OsFile **pId, int NotUsed){ - OsFile *pBase = 0; - int rc; - - sqlite3CrashTestEnable = 0; - rc = sqlite3OsOpenReadOnly(zFilename, &pBase); - sqlite3CrashTestEnable = 1; - if( !rc ){ - initFile(pId, zFilename, pBase); + if( rc!=SQLITE_OK && pWrapper->pMethod ){ + sqlite3OsClose(pFile); } return rc; } -/* -** OpenDirectory is a no-op -*/ -static int crashOpenDir(OsFile *id, const char *zName){ - return SQLITE_OK; +static int cfDelete(sqlite3_vfs *pCfVfs, const char *zPath, int dirSync){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xDelete(pVfs, zPath, dirSync); +} +static int cfAccess( + sqlite3_vfs *pCfVfs, + const char *zPath, + int flags, + int *pResOut +){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xAccess(pVfs, zPath, flags, pResOut); } - -/* -** Locking primitives are passed through into the underlying -** file descriptor. -*/ -int crashLock(OsFile *id, int lockType){ - return sqlite3OsLock(((crashFile*)id)->pBase, lockType); +static int cfFullPathname( + sqlite3_vfs *pCfVfs, + const char *zPath, + int nPathOut, + char *zPathOut +){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); } -int crashUnlock(OsFile *id, int lockType){ - return sqlite3OsUnlock(((crashFile*)id)->pBase, lockType); +static void *cfDlOpen(sqlite3_vfs *pCfVfs, const char *zPath){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xDlOpen(pVfs, zPath); } -int crashCheckReservedLock(OsFile *id){ - return sqlite3OsCheckReservedLock(((crashFile*)id)->pBase); +static void cfDlError(sqlite3_vfs *pCfVfs, int nByte, char *zErrMsg){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + pVfs->xDlError(pVfs, nByte, zErrMsg); } -void crashSetFullSync(OsFile *id, int setting){ - return; /* This is a no-op */ +static void (*cfDlSym(sqlite3_vfs *pCfVfs, void *pH, const char *zSym))(void){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xDlSym(pVfs, pH, zSym); } -int crashLockState(OsFile *id){ - return sqlite3OsLockState(((crashFile*)id)->pBase); +static void cfDlClose(sqlite3_vfs *pCfVfs, void *pHandle){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + pVfs->xDlClose(pVfs, pHandle); } - -/* -** Return the underlying file handle. -*/ -int crashFileHandle(OsFile *id){ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) - return sqlite3OsFileHandle(((crashFile*)id)->pBase); -#endif - return 0; +static int cfRandomness(sqlite3_vfs *pCfVfs, int nByte, char *zBufOut){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xRandomness(pVfs, nByte, zBufOut); +} +static int cfSleep(sqlite3_vfs *pCfVfs, int nMicro){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xSleep(pVfs, nMicro); +} +static int cfCurrentTime(sqlite3_vfs *pCfVfs, double *pTimeOut){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; + return pVfs->xCurrentTime(pVfs, pTimeOut); } -/* -** Return the simulated file-system sector size. -*/ -int crashSectorSize(OsFile *id){ - return BLOCKSIZE; +static int processDevSymArgs( + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[], + int *piDeviceChar, + int *piSectorSize +){ + struct DeviceFlag { + char *zName; + int iValue; + } aFlag[] = { + { "atomic", SQLITE_IOCAP_ATOMIC }, + { "atomic512", SQLITE_IOCAP_ATOMIC512 }, + { "atomic1k", SQLITE_IOCAP_ATOMIC1K }, + { "atomic2k", SQLITE_IOCAP_ATOMIC2K }, + { "atomic4k", SQLITE_IOCAP_ATOMIC4K }, + { "atomic8k", SQLITE_IOCAP_ATOMIC8K }, + { "atomic16k", SQLITE_IOCAP_ATOMIC16K }, + { "atomic32k", SQLITE_IOCAP_ATOMIC32K }, + { "atomic64k", SQLITE_IOCAP_ATOMIC64K }, + { "sequential", SQLITE_IOCAP_SEQUENTIAL }, + { "safe_append", SQLITE_IOCAP_SAFE_APPEND }, + { 0, 0 } + }; + + int i; + int iDc = 0; + int iSectorSize = 0; + int setSectorsize = 0; + int setDeviceChar = 0; + + for(i=0; i11 || nOpt<2 || strncmp("-sectorsize", zOpt, nOpt)) + && (nOpt>16 || nOpt<2 || strncmp("-characteristics", zOpt, nOpt)) + ){ + Tcl_AppendResult(interp, + "Bad option: \"", zOpt, + "\" - must be \"-characteristics\" or \"-sectorsize\"", 0 + ); + return TCL_ERROR; + } + if( i==objc-1 ){ + Tcl_AppendResult(interp, "Option requires an argument: \"", zOpt, "\"",0); + return TCL_ERROR; + } + + if( zOpt[1]=='s' ){ + if( Tcl_GetIntFromObj(interp, objv[i+1], &iSectorSize) ){ + return TCL_ERROR; + } + setSectorsize = 1; + }else{ + int j; + Tcl_Obj **apObj; + int nObj; + if( Tcl_ListObjGetElements(interp, objv[i+1], &nObj, &apObj) ){ + return TCL_ERROR; + } + for(j=0; jpMethod = &crashIoMethod; - pFile->nMaxWrite = 0; - pFile->offset = 0; - pFile->nBlk = 0; - pFile->apBlk = 0; - pFile->zName = (char *)(&pFile[1]); - strcpy(pFile->zName, zName); - pFile->pBase = pBase; - pFile->pNext = pAllFiles; - pAllFiles = pFile; - *pId = (OsFile*)pFile; -} + if( Tcl_GetBooleanFromObj(interp, objv[1], &isEnable) ){ + return TCL_ERROR; + } + + if( (isEnable && crashVfs.pAppData) || (!isEnable && !crashVfs.pAppData) ){ + return TCL_OK; + } + if( crashVfs.pAppData==0 ){ + sqlite3_vfs *pOriginalVfs = sqlite3_vfs_find(0); + crashVfs.mxPathname = pOriginalVfs->mxPathname; + crashVfs.pAppData = (void *)pOriginalVfs; + crashVfs.szOsFile = sizeof(CrashFile) + pOriginalVfs->szOsFile; + sqlite3_vfs_register(&crashVfs, 0); + }else{ + crashVfs.pAppData = 0; + sqlite3_vfs_unregister(&crashVfs); + } + + return TCL_OK; +} /* -** tclcmd: sqlite_crashparams DELAY CRASHFILE ?BLOCKSIZE? +** tclcmd: sqlite_crashparams ?OPTIONS? DELAY CRASHFILE ** ** This procedure implements a TCL command that enables crash testing ** in testfixture. Once enabled, crash testing cannot be disabled. +** +** Available options are "-characteristics" and "-sectorsize". Both require +** an argument. For -sectorsize, this is the simulated sector size in +** bytes. For -characteristics, the argument must be a list of io-capability +** flags to simulate. Valid flags are "atomic", "atomic512", "atomic1K", +** "atomic2K", "atomic4K", "atomic8K", "atomic16K", "atomic32K", +** "atomic64K", "sequential" and "safe_append". +** +** Example: +** +** sqlite_crashparams -sect 1024 -char {atomic sequential} ./test.db 1 +** */ static int crashParamsObjCmd( void * clientData, @@ -540,31 +838,120 @@ Tcl_Obj *CONST objv[] ){ int iDelay; - const char *zFile; - int nFile; + const char *zCrashFile; + int nCrashFile, iDc, iSectorSize; + + iDc = -1; + iSectorSize = -1; + + if( objc<3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?OPTIONS? DELAY CRASHFILE"); + goto error; + } + + zCrashFile = Tcl_GetStringFromObj(objv[objc-1], &nCrashFile); + if( nCrashFile>=sizeof(g.zCrashFile) ){ + Tcl_AppendResult(interp, "Filename is too long: \"", zCrashFile, "\"", 0); + goto error; + } + if( Tcl_GetIntFromObj(interp, objv[objc-2], &iDelay) ){ + goto error; + } + + if( processDevSymArgs(interp, objc-3, &objv[1], &iDc, &iSectorSize) ){ + return TCL_ERROR; + } + + if( iDc>=0 ){ + g.iDeviceCharacteristics = iDc; + } + if( iSectorSize>=0 ){ + g.iSectorSize = iSectorSize; + } + + g.iCrash = iDelay; + memcpy(g.zCrashFile, zCrashFile, nCrashFile+1); + sqlite3CrashTestEnable = 1; + return TCL_OK; + +error: + return TCL_ERROR; +} + +static int devSymObjCmd( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + void devsym_register(int iDeviceChar, int iSectorSize); + + int iDc = -1; + int iSectorSize = -1; - if( objc!=3 && objc!=4 ){ - Tcl_WrongNumArgs(interp, 1, objv, "DELAY CRASHFILE ?BLOCKSIZE?"); + if( processDevSymArgs(interp, objc-1, &objv[1], &iDc, &iSectorSize) ){ return TCL_ERROR; } - if( Tcl_GetIntFromObj(interp, objv[1], &iDelay) ) return TCL_ERROR; - zFile = Tcl_GetStringFromObj(objv[2], &nFile); - if( nFile>=sizeof(zCrashFile)-1 ){ - Tcl_AppendResult(interp, "crash file name too big", 0); + devsym_register(iDc, iSectorSize); + + return TCL_OK; +} + +/* +** tclcmd: register_jt_vfs ?-default? PARENT-VFS +*/ +static int jtObjCmd( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int jt_register(char *, int); + char *zParent = 0; + + if( objc!=2 && objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?-default? PARENT-VFS"); return TCL_ERROR; } - setCrashParams(iDelay, zFile); - if( objc==4 ){ - int iBlockSize = 0; - if( Tcl_GetIntFromObj(interp, objv[3], &iBlockSize) ) return TCL_ERROR; - if( pAllFiles ){ - char *zErr = "Cannot modify blocksize after opening files"; - Tcl_SetResult(interp, zErr, TCL_STATIC); + zParent = Tcl_GetString(objv[1]); + if( objc==3 ){ + if( strcmp(zParent, "-default") ){ + Tcl_AppendResult(interp, + "bad option \"", zParent, "\": must be -default", 0 + ); return TCL_ERROR; } - setBlocksize(iBlockSize); + zParent = Tcl_GetString(objv[2]); } - sqlite3CrashTestEnable = 1; + + if( !(*zParent) ){ + zParent = 0; + } + if( jt_register(zParent, objc==3) ){ + Tcl_AppendResult(interp, "Error in jt_register", 0); + return TCL_ERROR; + } + + return TCL_OK; +} + +/* +** tclcmd: unregister_jt_vfs +*/ +static int jtUnregisterObjCmd( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + void jt_unregister(void); + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + jt_unregister(); return TCL_OK; } @@ -575,7 +962,11 @@ */ int Sqlitetest6_Init(Tcl_Interp *interp){ #ifndef SQLITE_OMIT_DISKIO + Tcl_CreateObjCommand(interp, "sqlite3_crash_enable", crashEnableCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_crashparams", crashParamsObjCmd, 0, 0); + Tcl_CreateObjCommand(interp, "sqlite3_simulate_device", devSymObjCmd, 0, 0); + Tcl_CreateObjCommand(interp, "register_jt_vfs", jtObjCmd, 0, 0); + Tcl_CreateObjCommand(interp, "unregister_jt_vfs", jtUnregisterObjCmd, 0, 0); #endif return TCL_OK; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test7.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test7.c --- sqlite3-3.4.2/src/test7.c 2006-03-22 22:10:08.000000000 +0000 +++ sqlite3-3.6.16/src/test7.c 2009-06-12 03:37:49.000000000 +0100 @@ -12,18 +12,17 @@ ** Code for testing the client/server version of the SQLite library. ** Derived from test4.c. ** -** $Id: test7.c,v 1.4 2006/03/22 22:10:08 drh Exp $ +** $Id: test7.c,v 1.13 2008/10/12 00:27:54 shane Exp $ */ #include "sqliteInt.h" #include "tcl.h" -#include "os.h" /* -** This test only works on UNIX with a THREADSAFE build that includes +** This test only works on UNIX with a SQLITE_THREADSAFE build that includes ** the SQLITE_SERVER option. */ -#if OS_UNIX && defined(THREADSAFE) && THREADSAFE==1 && \ - defined(SQLITE_SERVER) && !defined(SQLITE_OMIT_SHARED_CACHE) +#if defined(SQLITE_SERVER) && !defined(SQLITE_OMIT_SHARED_CACHE) && \ + defined(SQLITE_OS_UNIX) && OS_UNIX && SQLITE_THREADSAFE #include #include @@ -119,7 +118,9 @@ p->zErr = 0; } p->completed++; +#ifndef SQLITE_OMIT_DEPRECATED sqlite3_thread_cleanup(); +#endif return 0; } @@ -164,14 +165,14 @@ return TCL_ERROR; } threadset[i].busy = 1; - sqliteFree(threadset[i].zFilename); - threadset[i].zFilename = sqliteStrDup(argv[2]); + sqlite3_free(threadset[i].zFilename); + threadset[i].zFilename = sqlite3DbStrDup(0, argv[2]); threadset[i].opnum = 1; threadset[i].completed = 0; rc = pthread_create(&x, 0, client_main, &threadset[i]); if( rc ){ Tcl_AppendResult(interp, "failed to create the thread", 0); - sqliteFree(threadset[i].zFilename); + sqlite3_free(threadset[i].zFilename); threadset[i].busy = 0; return TCL_ERROR; } @@ -223,9 +224,9 @@ p->xOp = 0; p->opnum++; client_wait(p); - sqliteFree(p->zArg); + sqlite3_free(p->zArg); p->zArg = 0; - sqliteFree(p->zFilename); + sqlite3_free(p->zFilename); p->zFilename = 0; p->busy = 0; } @@ -507,8 +508,8 @@ } client_wait(&threadset[i]); threadset[i].xOp = do_compile; - sqliteFree(threadset[i].zArg); - threadset[i].zArg = sqliteStrDup(argv[2]); + sqlite3_free(threadset[i].zArg); + threadset[i].zArg = sqlite3DbStrDup(0, argv[2]); threadset[i].opnum++; return TCL_OK; } @@ -602,7 +603,7 @@ } client_wait(&threadset[i]); threadset[i].xOp = do_finalize; - sqliteFree(threadset[i].zArg); + sqlite3_free(threadset[i].zArg); threadset[i].zArg = 0; threadset[i].opnum++; return TCL_OK; @@ -646,7 +647,7 @@ } client_wait(&threadset[i]); threadset[i].xOp = do_reset; - sqliteFree(threadset[i].zArg); + sqlite3_free(threadset[i].zArg); threadset[i].zArg = 0; threadset[i].opnum++; return TCL_OK; @@ -721,4 +722,4 @@ } #else int Sqlitetest7_Init(Tcl_Interp *interp){ return TCL_OK; } -#endif /* OS_UNIX */ +#endif /* SQLITE_OS_UNIX */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test8.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test8.c --- sqlite3-3.4.2/src/test8.c 2007-07-20 01:35:59.000000000 +0100 +++ sqlite3-3.6.16/src/test8.c 2009-06-25 12:24:38.000000000 +0100 @@ -13,11 +13,10 @@ ** is not included in the SQLite library. It is used for automated ** testing of the SQLite library. ** -** $Id: test8.c,v 1.48 2007/07/20 00:35:59 drh Exp $ +** $Id: test8.c,v 1.78 2009/04/29 11:50:54 danielk1977 Exp $ */ #include "sqliteInt.h" #include "tcl.h" -#include "os.h" #include #include @@ -45,6 +44,22 @@ ** use the named table as a backing store will fail. */ +/* +** Errors can be provoked within the following echo virtual table methods: +** +** xBestIndex xOpen xFilter xNext +** xColumn xRowid xUpdate xSync +** xBegin xRename +** +** This is done by setting the global tcl variable: +** +** echo_module_fail($method,$tbl) +** +** where $method is set to the name of the virtual table method to fail +** (i.e. "xBestIndex") and $tbl is the name of the table being echoed (not +** the name of the virtual table, the name of the underlying real table). +*/ + /* ** An echo virtual-table object. ** @@ -62,6 +77,7 @@ sqlite3 *db; /* Database connection */ int isPattern; + int inTransaction; /* True if within a transaction */ char *zThis; /* Name of the echo table */ char *zTableName; /* Name of the real table */ char *zLogName; /* Name of the log table */ @@ -76,6 +92,18 @@ sqlite3_stmt *pStmt; }; +static int simulateVtabError(echo_vtab *p, const char *zMethod){ + const char *zErr; + char zVarname[128]; + zVarname[127] = '\0'; + sqlite3_snprintf(127, zVarname, "echo_module_fail(%s,%s)", zMethod, p->zTableName); + zErr = Tcl_GetVar(p->interp, zVarname, TCL_GLOBAL_ONLY); + if( zErr ){ + p->base.zErrMsg = sqlite3_mprintf("echo-vtab-error: %s", zErr); + } + return (zErr!=0); +} + /* ** Convert an SQL-style quoted string into a normal string by removing ** the quote characters. The conversion is done in-place. If the @@ -122,8 +150,8 @@ ** code otherwise. ** ** If successful, the number of columns is written to *pnCol. *paCol is -** set to point at sqliteMalloc()'d space containing the array of -** nCol column names. The caller is responsible for calling sqliteFree +** set to point at sqlite3_malloc()'d space containing the array of +** nCol column names. The caller is responsible for calling sqlite3_free ** on *paCol. */ static int getColumnNames( @@ -142,13 +170,13 @@ ** of the result set of the compiled SELECT will be the same as ** the column names of table . */ - zSql = sqlite3MPrintf("SELECT * FROM %Q", zTab); + zSql = sqlite3_mprintf("SELECT * FROM %Q", zTab); if( !zSql ){ rc = SQLITE_NOMEM; goto out; } rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); - sqliteFree(zSql); + sqlite3_free(zSql); if( rc==SQLITE_OK ){ int ii; @@ -161,9 +189,14 @@ */ nBytes = sizeof(char *) * nCol; for(ii=0; ii=0 && cidzTableName ){ sqlite3_stmt *pStmt = 0; - sqlite3_prepare(db, + rc = sqlite3_prepare(db, "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = ?", -1, &pStmt, 0); - sqlite3_bind_text(pStmt, 1, pVtab->zTableName, -1, 0); - if( sqlite3_step(pStmt)==SQLITE_ROW ){ - int rc2; - const char *zCreateTable = (const char *)sqlite3_column_text(pStmt, 0); - rc = sqlite3_declare_vtab(db, zCreateTable); - rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pStmt, 1, pVtab->zTableName, -1, 0); + if( sqlite3_step(pStmt)==SQLITE_ROW ){ + int rc2; + const char *zCreateTable = (const char *)sqlite3_column_text(pStmt, 0); + rc = sqlite3_declare_vtab(db, zCreateTable); + rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } else { + rc = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ){ + rc = SQLITE_ERROR; + } + } if( rc==SQLITE_OK ){ - rc = rc2; + rc = getColumnNames(db, pVtab->zTableName, &pVtab->aCol, &pVtab->nCol); } - } else { - rc = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ){ - rc = SQLITE_ERROR; + if( rc==SQLITE_OK ){ + rc = getIndexArray(db, pVtab->zTableName, pVtab->nCol, &pVtab->aIndex); } } - if( rc==SQLITE_OK ){ - rc = getColumnNames(db, pVtab->zTableName, &pVtab->aCol, &pVtab->nCol); - } - if( rc==SQLITE_OK ){ - rc = getIndexArray(db, pVtab->zTableName, pVtab->nCol, &pVtab->aIndex); - } } return rc; @@ -339,15 +374,20 @@ */ static int echoDestructor(sqlite3_vtab *pVtab){ echo_vtab *p = (echo_vtab*)pVtab; - sqliteFree(p->aIndex); - sqliteFree(p->aCol); - sqliteFree(p->zThis); - sqliteFree(p->zTableName); - sqliteFree(p->zLogName); - sqliteFree(p); + sqlite3_free(p->aIndex); + sqlite3_free(p->aCol); + sqlite3_free(p->zThis); + sqlite3_free(p->zTableName); + sqlite3_free(p->zLogName); + sqlite3_free(p); return 0; } +typedef struct EchoModule EchoModule; +struct EchoModule { + Tcl_Interp *interp; +}; + /* ** This function is called to do the work of the xConnect() method - ** to allocate the required in-memory structures for a newly connected @@ -360,19 +400,20 @@ sqlite3_vtab **ppVtab, char **pzErr ){ + int rc; int i; echo_vtab *pVtab; /* Allocate the sqlite3_vtab/echo_vtab structure itself */ - pVtab = sqliteMalloc( sizeof(*pVtab) ); + pVtab = sqlite3MallocZero( sizeof(*pVtab) ); if( !pVtab ){ return SQLITE_NOMEM; } - pVtab->interp = (Tcl_Interp *)pAux; + pVtab->interp = ((EchoModule *)pAux)->interp; pVtab->db = db; /* Allocate echo_vtab.zThis */ - pVtab->zThis = sqlite3MPrintf("%s", argv[2]); + pVtab->zThis = sqlite3_mprintf("%s", argv[2]); if( !pVtab->zThis ){ echoDestructor((sqlite3_vtab *)pVtab); return SQLITE_NOMEM; @@ -380,11 +421,11 @@ /* Allocate echo_vtab.zTableName */ if( argc>3 ){ - pVtab->zTableName = sqlite3MPrintf("%s", argv[3]); + pVtab->zTableName = sqlite3_mprintf("%s", argv[3]); dequoteString(pVtab->zTableName); if( pVtab->zTableName && pVtab->zTableName[0]=='*' ){ - char *z = sqlite3MPrintf("%s%s", argv[2], &(pVtab->zTableName[1])); - sqliteFree(pVtab->zTableName); + char *z = sqlite3_mprintf("%s%s", argv[2], &(pVtab->zTableName[1])); + sqlite3_free(pVtab->zTableName); pVtab->zTableName = z; pVtab->isPattern = 1; } @@ -403,9 +444,10 @@ ** structure. If an error occurs, delete the sqlite3_vtab structure and ** return an error code. */ - if( echoDeclareVtab(pVtab, db) ){ + rc = echoDeclareVtab(pVtab, db); + if( rc!=SQLITE_OK ){ echoDestructor((sqlite3_vtab *)pVtab); - return SQLITE_ERROR; + return rc; } /* Success. Set *ppVtab and return */ @@ -424,7 +466,7 @@ char **pzErr ){ int rc = SQLITE_OK; - appendToEchoModule((Tcl_Interp *)(pAux), "xCreate"); + appendToEchoModule(((EchoModule *)pAux)->interp, "xCreate"); rc = echoConstructor(db, pAux, argc, argv, ppVtab, pzErr); /* If there were two arguments passed to the module at the SQL level @@ -440,10 +482,22 @@ if( rc==SQLITE_OK && argc==5 ){ char *zSql; echo_vtab *pVtab = *(echo_vtab **)ppVtab; - pVtab->zLogName = sqlite3MPrintf("%s", argv[4]); - zSql = sqlite3MPrintf("CREATE TABLE %Q(logmsg)", pVtab->zLogName); + pVtab->zLogName = sqlite3_mprintf("%s", argv[4]); + zSql = sqlite3_mprintf("CREATE TABLE %Q(logmsg)", pVtab->zLogName); rc = sqlite3_exec(db, zSql, 0, 0, 0); - sqliteFree(zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ){ + *pzErr = sqlite3DbStrDup(0, sqlite3_errmsg(db)); + } + } + + if( *ppVtab && rc!=SQLITE_OK ){ + echoDestructor(*ppVtab); + *ppVtab = 0; + } + + if( rc==SQLITE_OK ){ + (*(echo_vtab**)ppVtab)->inTransaction = 1; } return rc; @@ -459,7 +513,7 @@ sqlite3_vtab **ppVtab, char **pzErr ){ - appendToEchoModule((Tcl_Interp *)(pAux), "xConnect"); + appendToEchoModule(((EchoModule *)pAux)->interp, "xConnect"); return echoConstructor(db, pAux, argc, argv, ppVtab, pzErr); } @@ -482,9 +536,9 @@ /* Drop the "log" table, if one exists (see echoCreate() for details) */ if( p && p->zLogName ){ char *zSql; - zSql = sqlite3MPrintf("DROP TABLE %Q", p->zLogName); + zSql = sqlite3_mprintf("DROP TABLE %Q", p->zLogName); rc = sqlite3_exec(p->db, zSql, 0, 0, 0); - sqliteFree(zSql); + sqlite3_free(zSql); } if( rc==SQLITE_OK ){ @@ -498,7 +552,10 @@ */ static int echoOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ echo_cursor *pCur; - pCur = sqliteMalloc(sizeof(echo_cursor)); + if( simulateVtabError((echo_vtab *)pVTab, "xOpen") ){ + return SQLITE_ERROR; + } + pCur = sqlite3MallocZero(sizeof(echo_cursor)); *ppCursor = (sqlite3_vtab_cursor *)pCur; return (pCur ? SQLITE_OK : SQLITE_NOMEM); } @@ -511,7 +568,7 @@ echo_cursor *pCur = (echo_cursor *)cur; sqlite3_stmt *pStmt = pCur->pStmt; pCur->pStmt = 0; - sqliteFree(pCur); + sqlite3_free(pCur); rc = sqlite3_finalize(pStmt); return rc; } @@ -528,15 +585,21 @@ ** Echo virtual table module xNext method. */ static int echoNext(sqlite3_vtab_cursor *cur){ - int rc; + int rc = SQLITE_OK; echo_cursor *pCur = (echo_cursor *)cur; - rc = sqlite3_step(pCur->pStmt); - if( rc==SQLITE_ROW ){ - rc = SQLITE_OK; - }else{ - rc = sqlite3_finalize(pCur->pStmt); - pCur->pStmt = 0; + if( simulateVtabError((echo_vtab *)(cur->pVtab), "xNext") ){ + return SQLITE_ERROR; + } + + if( pCur->pStmt ){ + rc = sqlite3_step(pCur->pStmt); + if( rc==SQLITE_ROW ){ + rc = SQLITE_OK; + }else{ + rc = sqlite3_finalize(pCur->pStmt); + pCur->pStmt = 0; + } } return rc; @@ -548,6 +611,11 @@ static int echoColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ int iCol = i + 1; sqlite3_stmt *pStmt = ((echo_cursor *)cur)->pStmt; + + if( simulateVtabError((echo_vtab *)(cur->pVtab), "xColumn") ){ + return SQLITE_ERROR; + } + if( !pStmt ){ sqlite3_result_null(ctx); }else{ @@ -562,6 +630,11 @@ */ static int echoRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ sqlite3_stmt *pStmt = ((echo_cursor *)cur)->pStmt; + + if( simulateVtabError((echo_vtab *)(cur->pVtab), "xRowid") ){ + return SQLITE_ERROR; + } + *pRowid = sqlite3_column_int64(pStmt, 0); return SQLITE_OK; } @@ -599,6 +672,10 @@ echo_vtab *pVtab = (echo_vtab *)pVtabCursor->pVtab; sqlite3 *db = pVtab->db; + if( simulateVtabError(pVtab, "xFilter") ){ + return SQLITE_ERROR; + } + /* Check that idxNum matches idxStr */ assert( idxNum==hashString(idxStr) ); @@ -618,7 +695,7 @@ rc = sqlite3_prepare(db, idxStr, -1, &pCur->pStmt, 0); assert( pCur->pStmt || rc!=SQLITE_OK ); for(i=0; rc==SQLITE_OK && ipStmt, i+1, argv[i]); + rc = sqlite3_bind_value(pCur->pStmt, i+1, argv[i]); } /* If everything was successful, advance to the first row of the scan */ @@ -643,14 +720,25 @@ ** If the third argument, doFree, is true, then sqlite3_free() is ** also called to free the buffer pointed to by zAppend. */ -static void string_concat(char **pzStr, char *zAppend, int doFree){ +static void string_concat(char **pzStr, char *zAppend, int doFree, int *pRc){ char *zIn = *pzStr; - if( zIn ){ - char *zTemp = zIn; - zIn = sqlite3_mprintf("%s%s", zIn, zAppend); - sqlite3_free(zTemp); + if( !zAppend && doFree && *pRc==SQLITE_OK ){ + *pRc = SQLITE_NOMEM; + } + if( *pRc!=SQLITE_OK ){ + sqlite3_free(zIn); + zIn = 0; }else{ - zIn = sqlite3_mprintf("%s", zAppend); + if( zIn ){ + char *zTemp = zIn; + zIn = sqlite3_mprintf("%s%s", zIn, zAppend); + sqlite3_free(zTemp); + }else{ + zIn = sqlite3_mprintf("%s", zAppend); + } + if( !zIn ){ + *pRc = SQLITE_NOMEM; + } } *pzStr = zIn; if( doFree ){ @@ -668,7 +756,7 @@ ** ** then the echo module handles WHERE or ORDER BY clauses that refer ** to the column "b", but not "a" or "c". If a multi-column index is -** present, only it's left most column is considered. +** present, only its left most column is considered. ** ** This xBestIndex method encodes the proposed search strategy as ** an SQL query on the real table underlying the virtual echo module @@ -695,6 +783,14 @@ int rc = SQLITE_OK; int useCost = 0; double cost; + int isIgnoreUsable = 0; + if( Tcl_GetVar(interp, "echo_module_ignore_usable", TCL_GLOBAL_ONLY) ){ + isIgnoreUsable = 1; + } + + if( simulateVtabError(pVtab, "xBestIndex") ){ + return SQLITE_ERROR; + } /* Determine the number of rows in the table and store this value in local ** variable nRow. The 'estimated-cost' of the scan will be the number of @@ -706,6 +802,9 @@ useCost = 1; } else { zQuery = sqlite3_mprintf("SELECT count(*) FROM %Q", pVtab->zTableName); + if( !zQuery ){ + return SQLITE_NOMEM; + } rc = sqlite3_prepare(pVtab->db, zQuery, -1, &pStmt, 0); sqlite3_free(zQuery); if( rc!=SQLITE_OK ){ @@ -720,6 +819,9 @@ } zQuery = sqlite3_mprintf("SELECT rowid, * FROM %Q", pVtab->zTableName); + if( !zQuery ){ + return SQLITE_NOMEM; + } for(ii=0; iinConstraint; ii++){ const struct sqlite3_index_constraint *pConstraint; struct sqlite3_index_constraint_usage *pUsage; @@ -728,8 +830,10 @@ pConstraint = &pIdxInfo->aConstraint[ii]; pUsage = &pIdxInfo->aConstraintUsage[ii]; + if( !isIgnoreUsable && !pConstraint->usable ) continue; + iCol = pConstraint->iColumn; - if( pVtab->aIndex[iCol] ){ + if( pVtab->aIndex[iCol] || iCol<0 ){ char *zCol = pVtab->aCol[iCol]; char *zOp = 0; useIdx = 1; @@ -756,7 +860,7 @@ } else { zNew = sqlite3_mprintf(" %s %s %s ?", zSep, zCol, zOp); } - string_concat(&zQuery, zNew, 1); + string_concat(&zQuery, zNew, 1, &rc); zSep = "AND"; pUsage->argvIndex = ++nArg; @@ -776,26 +880,29 @@ zCol = "rowid"; } zNew = sqlite3_mprintf(" ORDER BY %s %s", zCol, zDir); - string_concat(&zQuery, zNew, 1); + string_concat(&zQuery, zNew, 1, &rc); pIdxInfo->orderByConsumed = 1; } appendToEchoModule(pVtab->interp, "xBestIndex");; appendToEchoModule(pVtab->interp, zQuery); + if( !zQuery ){ + return rc; + } pIdxInfo->idxNum = hashString(zQuery); pIdxInfo->idxStr = zQuery; pIdxInfo->needToFreeIdxStr = 1; - if (useCost) { + if( useCost ){ pIdxInfo->estimatedCost = cost; - } else if( useIdx ){ + }else if( useIdx ){ /* Approximation of log2(nRow). */ for( ii=0; ii<(sizeof(int)*8); ii++ ){ if( nRow & (1<estimatedCost = (double)ii; } } - } else { + }else{ pIdxInfo->estimatedCost = (double)nRow; } return rc; @@ -833,30 +940,44 @@ assert( nData==pVtab->nCol+2 || nData==1 ); + /* Ticket #3083 - make sure we always start a transaction prior to + ** making any changes to a virtual table */ + assert( pVtab->inTransaction ); + + if( simulateVtabError(pVtab, "xUpdate") ){ + return SQLITE_ERROR; + } + /* If apData[0] is an integer and nData>1 then do an UPDATE */ if( nData>1 && sqlite3_value_type(apData[0])==SQLITE_INTEGER ){ char *zSep = " SET"; z = sqlite3_mprintf("UPDATE %Q", pVtab->zTableName); + if( !z ){ + rc = SQLITE_NOMEM; + } bindArgOne = (apData[1] && sqlite3_value_type(apData[1])==SQLITE_INTEGER); bindArgZero = 1; if( bindArgOne ){ - string_concat(&z, " SET rowid=?1 ", 0); + string_concat(&z, " SET rowid=?1 ", 0, &rc); zSep = ","; } for(i=2; iaCol[i-2], i), 1); + "%s %Q=?%d", zSep, pVtab->aCol[i-2], i), 1, &rc); zSep = ","; } - string_concat(&z, sqlite3_mprintf(" WHERE rowid=?%d", nData), 0); + string_concat(&z, sqlite3_mprintf(" WHERE rowid=?%d", nData), 1, &rc); } /* If apData[0] is an integer and nData==1 then do a DELETE */ else if( nData==1 && sqlite3_value_type(apData[0])==SQLITE_INTEGER ){ z = sqlite3_mprintf("DELETE FROM %Q WHERE rowid = ?1", pVtab->zTableName); + if( !z ){ + rc = SQLITE_NOMEM; + } bindArgZero = 1; } @@ -867,24 +988,27 @@ char *zValues = 0; zInsert = sqlite3_mprintf("INSERT INTO %Q (", pVtab->zTableName); + if( !zInsert ){ + rc = SQLITE_NOMEM; + } if( sqlite3_value_type(apData[1])==SQLITE_INTEGER ){ bindArgOne = 1; zValues = sqlite3_mprintf("?"); - string_concat(&zInsert, "rowid", 0); + string_concat(&zInsert, "rowid", 0, &rc); } assert((pVtab->nCol+2)==nData); for(ii=2; iiaCol[ii-2]), 1); + sqlite3_mprintf("%s%Q", zValues?", ":"", pVtab->aCol[ii-2]), 1, &rc); string_concat(&zValues, - sqlite3_mprintf("%s?%d", zValues?", ":"", ii), 1); + sqlite3_mprintf("%s?%d", zValues?", ":"", ii), 1, &rc); } - string_concat(&z, zInsert, 1); - string_concat(&z, ") VALUES(", 0); - string_concat(&z, zValues, 1); - string_concat(&z, ")", 0); + string_concat(&z, zInsert, 1, &rc); + string_concat(&z, ") VALUES(", 0, &rc); + string_concat(&z, zValues, 1, &rc); + string_concat(&z, ")", 0, &rc); } /* Anything else is an error */ @@ -893,7 +1017,9 @@ return SQLITE_ERROR; } - rc = sqlite3_prepare(db, z, -1, &pStmt, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare(db, z, -1, &pStmt, 0); + } assert( rc!=SQLITE_OK || pStmt ); sqlite3_free(z); if( rc==SQLITE_OK ) { @@ -903,16 +1029,23 @@ if( bindArgOne ){ sqlite3_bind_value(pStmt, 1, apData[1]); } - for(i=2; izErrMsg = sqlite3_mprintf("echo-vtab-error: %s", sqlite3_errmsg(db)); + } return rc; } @@ -926,50 +1059,100 @@ char *z; echo_vtab *pVtab = (echo_vtab *)tab; z = sqlite3_mprintf("echo(%s)", pVtab->zTableName); + if( z==0 ) return SQLITE_NOMEM; appendToEchoModule(pVtab->interp, zCall); appendToEchoModule(pVtab->interp, z); sqlite3_free(z); return SQLITE_OK; } static int echoBegin(sqlite3_vtab *tab){ + int rc; echo_vtab *pVtab = (echo_vtab *)tab; Tcl_Interp *interp = pVtab->interp; const char *zVal; - echoTransactionCall(tab, "xBegin"); + /* Ticket #3083 - do not start a transaction if we are already in + ** a transaction */ + assert( !pVtab->inTransaction ); - /* Check if the $::echo_module_begin_fail variable is defined. If it is, - ** and it is set to the name of the real table underlying this virtual - ** echo module table, then cause this xSync operation to fail. - */ - zVal = Tcl_GetVar(interp, "echo_module_begin_fail", TCL_GLOBAL_ONLY); - if( zVal && 0==strcmp(zVal, pVtab->zTableName) ){ + if( simulateVtabError(pVtab, "xBegin") ){ return SQLITE_ERROR; } - return SQLITE_OK; + + rc = echoTransactionCall(tab, "xBegin"); + + if( rc==SQLITE_OK ){ + /* Check if the $::echo_module_begin_fail variable is defined. If it is, + ** and it is set to the name of the real table underlying this virtual + ** echo module table, then cause this xSync operation to fail. + */ + zVal = Tcl_GetVar(interp, "echo_module_begin_fail", TCL_GLOBAL_ONLY); + if( zVal && 0==strcmp(zVal, pVtab->zTableName) ){ + rc = SQLITE_ERROR; + } + } + if( rc==SQLITE_OK ){ + pVtab->inTransaction = 1; + } + return rc; } static int echoSync(sqlite3_vtab *tab){ + int rc; echo_vtab *pVtab = (echo_vtab *)tab; Tcl_Interp *interp = pVtab->interp; const char *zVal; - echoTransactionCall(tab, "xSync"); + /* Ticket #3083 - Only call xSync if we have previously started a + ** transaction */ + assert( pVtab->inTransaction ); - /* Check if the $::echo_module_sync_fail variable is defined. If it is, - ** and it is set to the name of the real table underlying this virtual - ** echo module table, then cause this xSync operation to fail. - */ - zVal = Tcl_GetVar(interp, "echo_module_sync_fail", TCL_GLOBAL_ONLY); - if( zVal && 0==strcmp(zVal, pVtab->zTableName) ){ - return -1; + if( simulateVtabError(pVtab, "xSync") ){ + return SQLITE_ERROR; } - return SQLITE_OK; + + rc = echoTransactionCall(tab, "xSync"); + + if( rc==SQLITE_OK ){ + /* Check if the $::echo_module_sync_fail variable is defined. If it is, + ** and it is set to the name of the real table underlying this virtual + ** echo module table, then cause this xSync operation to fail. + */ + zVal = Tcl_GetVar(interp, "echo_module_sync_fail", TCL_GLOBAL_ONLY); + if( zVal && 0==strcmp(zVal, pVtab->zTableName) ){ + rc = -1; + } + } + return rc; } static int echoCommit(sqlite3_vtab *tab){ - return echoTransactionCall(tab, "xCommit"); + echo_vtab *pVtab = (echo_vtab*)tab; + int rc; + + /* Ticket #3083 - Only call xCommit if we have previously started + ** a transaction */ + assert( pVtab->inTransaction ); + + if( simulateVtabError(pVtab, "xCommit") ){ + return SQLITE_ERROR; + } + + sqlite3BeginBenignMalloc(); + rc = echoTransactionCall(tab, "xCommit"); + sqlite3EndBenignMalloc(); + pVtab->inTransaction = 0; + return rc; } static int echoRollback(sqlite3_vtab *tab){ - return echoTransactionCall(tab, "xRollback"); + int rc; + echo_vtab *pVtab = (echo_vtab*)tab; + + /* Ticket #3083 - Only call xRollback if we have previously started + ** a transaction */ + assert( pVtab->inTransaction ); + + rc = echoTransactionCall(tab, "xRollback"); + pVtab->inTransaction = 0; + return rc; } /* @@ -1035,13 +1218,17 @@ int rc = SQLITE_OK; echo_vtab *p = (echo_vtab *)vtab; + if( simulateVtabError(p, "xRename") ){ + return SQLITE_ERROR; + } + if( p->isPattern ){ int nThis = strlen(p->zThis); - char *zSql = sqlite3MPrintf("ALTER TABLE %s RENAME TO %s%s", + char *zSql = sqlite3_mprintf("ALTER TABLE %s RENAME TO %s%s", p->zTableName, zNewName, &p->zTableName[nThis] ); rc = sqlite3_exec(p->db, zSql, 0, 0, 0); - sqliteFree(zSql); + sqlite3_free(zSql); } return rc; @@ -1077,9 +1264,10 @@ /* ** Decode a pointer to an sqlite3 object. */ -static int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb){ - *ppDb = (sqlite3*)sqlite3TextToPtr(zA); - return TCL_OK; +extern int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb); + +static void moduleDestroy(void *p){ + sqlite3_free(p); } /* @@ -1092,12 +1280,15 @@ Tcl_Obj *CONST objv[] /* Command arguments */ ){ sqlite3 *db; + EchoModule *pMod; if( objc!=2 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB"); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; - sqlite3_create_module(db, "echo", &echoModule, (void *)interp); + pMod = sqlite3_malloc(sizeof(EchoModule)); + pMod->interp = interp; + sqlite3_create_module_v2(db, "echo", &echoModule, (void*)pMod, moduleDestroy); return TCL_OK; } @@ -1133,20 +1324,20 @@ ** Register commands with the TCL interpreter. */ int Sqlitetest8_Init(Tcl_Interp *interp){ +#ifndef SQLITE_OMIT_VIRTUALTABLE static struct { char *zName; Tcl_ObjCmdProc *xProc; void *clientData; } aObjCmd[] = { -#ifndef SQLITE_OMIT_VIRTUALTABLE { "register_echo_module", register_echo_module, 0 }, { "sqlite3_declare_vtab", declare_vtab, 0 }, -#endif }; int i; for(i=0; i #include @@ -48,7 +47,7 @@ } rc = sqlite3_create_collation(db, "collate", 456, 0, 0); - if( rc!=SQLITE_ERROR ){ + if( rc!=SQLITE_MISUSE ){ sqlite3_close(db); zErrFunction = "sqlite3_create_collation"; goto error_out; @@ -115,6 +114,7 @@ ){ const char *zErrFunction = "N/A"; sqlite3 *db = 0; + sqlite3_stmt *pStmt; int rc; if( objc!=1 ){ @@ -132,50 +132,44 @@ } sqlite3_close(db); -#ifndef SQLITE_OMIT_UTF16 - rc = sqlite3_collation_needed16(db, 0, 0); - if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_collation_needed16"; - goto error_out; - } -#endif - - rc = sqlite3_collation_needed(db, 0, 0); - if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_collation_needed"; - goto error_out; - } - rc = sqlite3_create_collation(db, 0, 0, 0, 0); + rc = sqlite3_errcode(db); if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_create_collation"; + zErrFunction = "sqlite3_errcode"; goto error_out; } - rc = sqlite3_create_function(db, 0, 0, 0, 0, 0, 0, 0); + pStmt = (sqlite3_stmt*)1234; + rc = sqlite3_prepare(db, 0, 0, &pStmt, 0); if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_create_function"; + zErrFunction = "sqlite3_prepare"; goto error_out; } + assert( pStmt==0 ); /* Verify that pStmt is zeroed even on a MISUSE error */ - rc = sqlite3_busy_handler(db, 0, 0); + pStmt = (sqlite3_stmt*)1234; + rc = sqlite3_prepare_v2(db, 0, 0, &pStmt, 0); if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_busy_handler"; + zErrFunction = "sqlite3_prepare_v2"; goto error_out; } + assert( pStmt==0 ); - rc = sqlite3_errcode(db); +#ifndef SQLITE_OMIT_UTF16 + pStmt = (sqlite3_stmt*)1234; + rc = sqlite3_prepare16(db, 0, 0, &pStmt, 0); if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_busy_handler"; + zErrFunction = "sqlite3_prepare16"; goto error_out; } - -#ifndef SQLITE_OMIT_UTF16 - rc = sqlite3_prepare16(db, 0, 0, 0, 0); + assert( pStmt==0 ); + pStmt = (sqlite3_stmt*)1234; + rc = sqlite3_prepare16_v2(db, 0, 0, &pStmt, 0); if( rc!=SQLITE_MISUSE ){ - zErrFunction = "sqlite3_prepare16"; + zErrFunction = "sqlite3_prepare16_v2"; goto error_out; } + assert( pStmt==0 ); #endif return TCL_OK; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_async.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_async.c --- sqlite3-3.4.2/src/test_async.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/test_async.c 2009-06-25 12:24:38.000000000 +0100 @@ -10,1185 +10,85 @@ ** ************************************************************************* ** -** This file contains an example implementation of an asynchronous IO -** backend for SQLite. +** $Id: test_async.c,v 1.62 2009/04/28 13:01:09 drh Exp $ ** -** WHAT IS ASYNCHRONOUS I/O? -** -** With asynchronous I/O, write requests are handled by a separate thread -** running in the background. This means that the thread that initiates -** a database write does not have to wait for (sometimes slow) disk I/O -** to occur. The write seems to happen very quickly, though in reality -** it is happening at its usual slow pace in the background. -** -** Asynchronous I/O appears to give better responsiveness, but at a price. -** You lose the Durable property. With the default I/O backend of SQLite, -** once a write completes, you know that the information you wrote is -** safely on disk. With the asynchronous I/O, this is no the case. If -** your program crashes or if you take a power lose after the database -** write but before the asynchronous write thread has completed, then the -** database change might never make it to disk and the next user of the -** database might not see your change. -** -** You lose Durability with asynchronous I/O, but you still retain the -** other parts of ACID: Atomic, Consistent, and Isolated. Many -** appliations get along fine without the Durablity. -** -** HOW IT WORKS -** -** Asynchronous I/O works by overloading the OS-layer disk I/O routines -** with modified versions that store the data to be written in queue of -** pending write operations. Look at the asyncEnable() subroutine to see -** how overloading works. Six os-layer routines are overloaded: -** -** sqlite3OsOpenReadWrite; -** sqlite3OsOpenReadOnly; -** sqlite3OsOpenExclusive; -** sqlite3OsDelete; -** sqlite3OsFileExists; -** sqlite3OsSyncDirectory; -** -** The original implementations of these routines are saved and are -** used by the writer thread to do the real I/O. The substitute -** implementations typically put the I/O operation on a queue -** to be handled later by the writer thread, though read operations -** must be handled right away, obviously. -** -** Asynchronous I/O is disabled by setting the os-layer interface routines -** back to their original values. -** -** LIMITATIONS -** -** This demonstration code is deliberately kept simple in order to keep -** the main ideas clear and easy to understand. Real applications that -** want to do asynchronous I/O might want to add additional capabilities. -** For example, in this demonstration if writes are happening at a steady -** stream that exceeds the I/O capability of the background writer thread, -** the queue of pending write operations will grow without bound until we -** run out of memory. Users of this technique may want to keep track of -** the quantity of pending writes and stop accepting new write requests -** when the buffer gets to be too big. +** This file contains a binding of the asynchronous IO extension interface +** (defined in ext/async/sqlite3async.h) to Tcl. */ -#include "sqliteInt.h" -#include "os.h" +#define TCL_THREADS #include -/* If the THREADSAFE macro is not set, assume that it is turned off. */ -#ifndef THREADSAFE -# define THREADSAFE 0 -#endif - -/* -** This test uses pthreads and hence only works on unix and with -** a threadsafe build of SQLite. It also requires that the redefinable -** I/O feature of SQLite be turned on. This feature is turned off by -** default. If a required element is missing, almost all of the code -** in this file is commented out. -*/ -#if OS_UNIX && THREADSAFE && defined(SQLITE_ENABLE_REDEF_IO) - -/* -** This demo uses pthreads. If you do not have a pthreads implementation -** for your operating system, you will need to recode the threading -** logic. -*/ -#include -#include - -/* Useful macros used in several places */ -#define MIN(x,y) ((x)<(y)?(x):(y)) -#define MAX(x,y) ((x)>(y)?(x):(y)) - -/* Forward references */ -typedef struct AsyncWrite AsyncWrite; -typedef struct AsyncFile AsyncFile; - -/* Enable for debugging */ -static int sqlite3async_trace = 0; -# define ASYNC_TRACE(X) if( sqlite3async_trace ) asyncTrace X -static void asyncTrace(const char *zFormat, ...){ - char *z; - va_list ap; - va_start(ap, zFormat); - z = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - fprintf(stderr, "[%d] %s", (int)pthread_self(), z); - sqlite3_free(z); -} - -/* -** THREAD SAFETY NOTES -** -** Basic rules: -** -** * Both read and write access to the global write-op queue must be -** protected by the async.queueMutex. -** -** * The file handles from the underlying system are assumed not to -** be thread safe. -** -** * See the last two paragraphs under "The Writer Thread" for -** an assumption to do with file-handle synchronization by the Os. -** -** File system operations (invoked by SQLite thread): -** -** xOpenXXX (three versions) -** xDelete -** xFileExists -** xSyncDirectory -** -** File handle operations (invoked by SQLite thread): -** -** asyncWrite, asyncClose, asyncTruncate, asyncSync, -** asyncSetFullSync, asyncOpenDirectory. -** -** The operations above add an entry to the global write-op list. They -** prepare the entry, acquire the async.queueMutex momentarily while -** list pointers are manipulated to insert the new entry, then release -** the mutex and signal the writer thread to wake up in case it happens -** to be asleep. -** -** -** asyncRead, asyncFileSize. -** -** Read operations. Both of these read from both the underlying file -** first then adjust their result based on pending writes in the -** write-op queue. So async.queueMutex is held for the duration -** of these operations to prevent other threads from changing the -** queue in mid operation. -** -** -** asyncLock, asyncUnlock, asyncLockState, asyncCheckReservedLock -** -** These primitives implement in-process locking using a hash table -** on the file name. Files are locked correctly for connections coming -** from the same process. But other processes cannot see these locks -** and will therefore not honor them. -** -** -** asyncFileHandle. -** -** The sqlite3OsFileHandle() function is currently only used when -** debugging the pager module. Unless sqlite3OsClose() is called on the -** file (shouldn't be possible for other reasons), the underlying -** implementations are safe to call without grabbing any mutex. So we just -** go ahead and call it no matter what any other threads are doing. -** -** -** asyncSeek. -** -** Calling this method just manipulates the AsyncFile.iOffset variable. -** Since this variable is never accessed by writer thread, this -** function does not require the mutex. Actual calls to OsSeek() take -** place just before OsWrite() or OsRead(), which are always protected by -** the mutex. -** -** The writer thread: -** -** The async.writerMutex is used to make sure only there is only -** a single writer thread running at a time. -** -** Inside the writer thread is a loop that works like this: -** -** WHILE (write-op list is not empty) -** Do IO operation at head of write-op list -** Remove entry from head of write-op list -** END WHILE -** -** The async.queueMutex is always held during the test, and when the entry is removed from the head -** of the write-op list. Sometimes it is held for the interim -** period (while the IO is performed), and sometimes it is -** relinquished. It is relinquished if (a) the IO op is an -** ASYNC_CLOSE or (b) when the file handle was opened, two of -** the underlying systems handles were opened on the same -** file-system entry. -** -** If condition (b) above is true, then one file-handle -** (AsyncFile.pBaseRead) is used exclusively by sqlite threads to read the -** file, the other (AsyncFile.pBaseWrite) by sqlite3_async_flush() -** threads to perform write() operations. This means that read -** operations are not blocked by asynchronous writes (although -** asynchronous writes may still be blocked by reads). -** -** This assumes that the OS keeps two handles open on the same file -** properly in sync. That is, any read operation that starts after a -** write operation on the same file system entry has completed returns -** data consistent with the write. We also assume that if one thread -** reads a file while another is writing it all bytes other than the -** ones actually being written contain valid data. -** -** If the above assumptions are not true, set the preprocessor symbol -** SQLITE_ASYNC_TWO_FILEHANDLES to 0. -*/ - -#ifndef SQLITE_ASYNC_TWO_FILEHANDLES -/* #define SQLITE_ASYNC_TWO_FILEHANDLES 0 */ -#define SQLITE_ASYNC_TWO_FILEHANDLES 1 -#endif - -/* -** State information is held in the static variable "async" defined -** as follows: -*/ -static struct TestAsyncStaticData { - pthread_mutex_t queueMutex; /* Mutex for access to write operation queue */ - pthread_mutex_t writerMutex; /* Prevents multiple writer threads */ - pthread_mutex_t lockMutex; /* For access to aLock hash table */ - pthread_cond_t queueSignal; /* For waking up sleeping writer thread */ - pthread_cond_t emptySignal; /* Notify when the write queue is empty */ - AsyncWrite *pQueueFirst; /* Next write operation to be processed */ - AsyncWrite *pQueueLast; /* Last write operation on the list */ - Hash aLock; /* Files locked */ - volatile int ioDelay; /* Extra delay between write operations */ - volatile int writerHaltWhenIdle; /* Writer thread halts when queue empty */ - volatile int writerHaltNow; /* Writer thread halts after next op */ - int ioError; /* True if an IO error has occured */ - int nFile; /* Number of open files (from sqlite pov) */ -} async = { - PTHREAD_MUTEX_INITIALIZER, - PTHREAD_MUTEX_INITIALIZER, - PTHREAD_MUTEX_INITIALIZER, - PTHREAD_COND_INITIALIZER, - PTHREAD_COND_INITIALIZER, -}; - -/* Possible values of AsyncWrite.op */ -#define ASYNC_NOOP 0 -#define ASYNC_WRITE 1 -#define ASYNC_SYNC 2 -#define ASYNC_TRUNCATE 3 -#define ASYNC_CLOSE 4 -#define ASYNC_OPENDIRECTORY 5 -#define ASYNC_SETFULLSYNC 6 -#define ASYNC_DELETE 7 -#define ASYNC_OPENEXCLUSIVE 8 -#define ASYNC_SYNCDIRECTORY 9 - -/* Names of opcodes. Used for debugging only. -** Make sure these stay in sync with the macros above! -*/ -static const char *azOpcodeName[] = { - "NOOP", "WRITE", "SYNC", "TRUNCATE", "CLOSE", - "OPENDIR", "SETFULLSYNC", "DELETE", "OPENEX", "SYNCDIR", -}; - -/* -** Entries on the write-op queue are instances of the AsyncWrite -** structure, defined here. -** -** The interpretation of the iOffset and nByte variables varies depending -** on the value of AsyncWrite.op: -** -** ASYNC_WRITE: -** iOffset -> Offset in file to write to. -** nByte -> Number of bytes of data to write (pointed to by zBuf). -** -** ASYNC_SYNC: -** iOffset -> Unused. -** nByte -> Value of "fullsync" flag to pass to sqlite3OsSync(). -** -** ASYNC_TRUNCATE: -** iOffset -> Size to truncate file to. -** nByte -> Unused. -** -** ASYNC_CLOSE: -** iOffset -> Unused. -** nByte -> Unused. -** -** ASYNC_OPENDIRECTORY: -** iOffset -> Unused. -** nByte -> Number of bytes of zBuf points to (directory name). -** -** ASYNC_SETFULLSYNC: -** iOffset -> Unused. -** nByte -> New value for the full-sync flag. -** -** -** ASYNC_DELETE: -** iOffset -> Unused. -** nByte -> Number of bytes of zBuf points to (file name). -** -** ASYNC_OPENEXCLUSIVE: -** iOffset -> Value of "delflag". -** nByte -> Number of bytes of zBuf points to (file name). -** -** -** For an ASYNC_WRITE operation, zBuf points to the data to write to the file. -** This space is sqliteMalloc()d along with the AsyncWrite structure in a -** single blob, so is deleted when sqliteFree() is called on the parent -** structure. -*/ -struct AsyncWrite { - AsyncFile *pFile; /* File to write data to or sync */ - int op; /* One of ASYNC_xxx etc. */ - i64 iOffset; /* See above */ - int nByte; /* See above */ - char *zBuf; /* Data to write to file (or NULL if op!=ASYNC_WRITE) */ - AsyncWrite *pNext; /* Next write operation (to any file) */ -}; - -/* -** The AsyncFile structure is a subclass of OsFile used for asynchronous IO. -*/ -struct AsyncFile { - IoMethod *pMethod; /* Must be first */ - i64 iOffset; /* Current seek() offset in file */ - char *zName; /* Underlying OS filename - used for debugging */ - int nName; /* Number of characters in zName */ - OsFile *pBaseRead; /* Read handle to the underlying Os file */ - OsFile *pBaseWrite; /* Write handle to the underlying Os file */ -}; +#ifdef SQLITE_ENABLE_ASYNCIO -/* -** Add an entry to the end of the global write-op list. pWrite should point -** to an AsyncWrite structure allocated using sqlite3OsMalloc(). The writer -** thread will call sqlite3OsFree() to free the structure after the specified -** operation has been completed. -** -** Once an AsyncWrite structure has been added to the list, it becomes the -** property of the writer thread and must not be read or modified by the -** caller. -*/ -static void addAsyncWrite(AsyncWrite *pWrite){ - /* We must hold the queue mutex in order to modify the queue pointers */ - pthread_mutex_lock(&async.queueMutex); - - /* Add the record to the end of the write-op queue */ - assert( !pWrite->pNext ); - if( async.pQueueLast ){ - assert( async.pQueueFirst ); - async.pQueueLast->pNext = pWrite; - }else{ - async.pQueueFirst = pWrite; - } - async.pQueueLast = pWrite; - ASYNC_TRACE(("PUSH %p (%s %s %d)\n", pWrite, azOpcodeName[pWrite->op], - pWrite->pFile ? pWrite->pFile->zName : "-", pWrite->iOffset)); - - if( pWrite->op==ASYNC_CLOSE ){ - async.nFile--; - if( async.nFile==0 ){ - async.ioError = SQLITE_OK; - } - } +#include "sqlite3async.h" +#include "sqlite3.h" +#include - /* Drop the queue mutex */ - pthread_mutex_unlock(&async.queueMutex); +/* From test1.c */ +const char *sqlite3TestErrorName(int); - /* The writer thread might have been idle because there was nothing - ** on the write-op queue for it to do. So wake it up. */ - pthread_cond_signal(&async.queueSignal); -} -/* -** Increment async.nFile in a thread-safe manner. -*/ -static void incrOpenFileCount(){ - /* We must hold the queue mutex in order to modify async.nFile */ - pthread_mutex_lock(&async.queueMutex); - if( async.nFile==0 ){ - async.ioError = SQLITE_OK; - } - async.nFile++; - pthread_mutex_unlock(&async.queueMutex); -} +struct TestAsyncGlobal { + int isInstalled; /* True when async VFS is installed */ +} testasync_g = { 0 }; -/* -** This is a utility function to allocate and populate a new AsyncWrite -** structure and insert it (via addAsyncWrite() ) into the global list. -*/ -static int addNewAsyncWrite( - AsyncFile *pFile, - int op, - i64 iOffset, - int nByte, - const char *zByte -){ - AsyncWrite *p; - if( op!=ASYNC_CLOSE && async.ioError ){ - return async.ioError; - } - p = sqlite3OsMalloc(sizeof(AsyncWrite) + (zByte?nByte:0)); - if( !p ){ - return SQLITE_NOMEM; - } - p->op = op; - p->iOffset = iOffset; - p->nByte = nByte; - p->pFile = pFile; - p->pNext = 0; - if( zByte ){ - p->zBuf = (char *)&p[1]; - memcpy(p->zBuf, zByte, nByte); - }else{ - p->zBuf = 0; - } - addAsyncWrite(p); - return SQLITE_OK; -} +TCL_DECLARE_MUTEX(testasync_g_writerMutex); /* -** Close the file. This just adds an entry to the write-op list, the file is -** not actually closed. +** sqlite3async_initialize PARENT-VFS ISDEFAULT */ -static int asyncClose(OsFile **pId){ - return addNewAsyncWrite((AsyncFile *)*pId, ASYNC_CLOSE, 0, 0, 0); -} - -/* -** Implementation of sqlite3OsWrite() for asynchronous files. Instead of -** writing to the underlying file, this function adds an entry to the end of -** the global AsyncWrite list. Either SQLITE_OK or SQLITE_NOMEM may be -** returned. -*/ -static int asyncWrite(OsFile *id, const void *pBuf, int amt){ - AsyncFile *pFile = (AsyncFile *)id; - int rc = addNewAsyncWrite(pFile, ASYNC_WRITE, pFile->iOffset, amt, pBuf); - pFile->iOffset += (i64)amt; - return rc; -} - -/* -** Truncate the file to nByte bytes in length. This just adds an entry to -** the write-op list, no IO actually takes place. -*/ -static int asyncTruncate(OsFile *id, i64 nByte){ - return addNewAsyncWrite((AsyncFile *)id, ASYNC_TRUNCATE, nByte, 0, 0); -} - -/* -** Open the directory identified by zName and associate it with the -** specified file. This just adds an entry to the write-op list, the -** directory is opened later by sqlite3_async_flush(). -*/ -static int asyncOpenDirectory(OsFile *id, const char *zName){ - AsyncFile *pFile = (AsyncFile *)id; - return addNewAsyncWrite(pFile, ASYNC_OPENDIRECTORY, 0, strlen(zName)+1,zName); -} - -/* -** Sync the file. This just adds an entry to the write-op list, the -** sync() is done later by sqlite3_async_flush(). -*/ -static int asyncSync(OsFile *id, int fullsync){ - return addNewAsyncWrite((AsyncFile *)id, ASYNC_SYNC, 0, fullsync, 0); -} - -/* -** Set (or clear) the full-sync flag on the underlying file. This operation -** is queued and performed later by sqlite3_async_flush(). -*/ -static void asyncSetFullSync(OsFile *id, int value){ - addNewAsyncWrite((AsyncFile *)id, ASYNC_SETFULLSYNC, 0, value, 0); -} - -/* -** Read data from the file. First we read from the filesystem, then adjust -** the contents of the buffer based on ASYNC_WRITE operations in the -** write-op queue. -** -** This method holds the mutex from start to finish. -*/ -static int asyncRead(OsFile *id, void *obuf, int amt){ - int rc = SQLITE_OK; - i64 filesize; - int nRead; - AsyncFile *pFile = (AsyncFile *)id; - OsFile *pBase = pFile->pBaseRead; - - /* If an I/O error has previously occurred on this file, then all - ** subsequent operations fail. - */ - if( async.ioError!=SQLITE_OK ){ - return async.ioError; - } - - /* Grab the write queue mutex for the duration of the call */ - pthread_mutex_lock(&async.queueMutex); - - if( pBase ){ - rc = sqlite3OsFileSize(pBase, &filesize); - if( rc!=SQLITE_OK ){ - goto asyncread_out; - } - rc = sqlite3OsSeek(pBase, pFile->iOffset); - if( rc!=SQLITE_OK ){ - goto asyncread_out; - } - nRead = MIN(filesize - pFile->iOffset, amt); - if( nRead>0 ){ - rc = sqlite3OsRead(pBase, obuf, nRead); - ASYNC_TRACE(("READ %s %d bytes at %d\n", pFile->zName, nRead, pFile->iOffset)); - } - } - - if( rc==SQLITE_OK ){ - AsyncWrite *p; - i64 iOffset = pFile->iOffset; /* Current seek offset */ - - for(p=async.pQueueFirst; p; p = p->pNext){ - if( p->pFile==pFile && p->op==ASYNC_WRITE ){ - int iBeginOut = (p->iOffset - iOffset); - int iBeginIn = -iBeginOut; - int nCopy; - - if( iBeginIn<0 ) iBeginIn = 0; - if( iBeginOut<0 ) iBeginOut = 0; - nCopy = MIN(p->nByte-iBeginIn, amt-iBeginOut); - - if( nCopy>0 ){ - memcpy(&((char *)obuf)[iBeginOut], &p->zBuf[iBeginIn], nCopy); - ASYNC_TRACE(("OVERREAD %d bytes at %d\n", nCopy, iBeginOut+iOffset)); - } - } - } - - pFile->iOffset += (i64)amt; - } - -asyncread_out: - pthread_mutex_unlock(&async.queueMutex); - return rc; -} - -/* -** Seek to the specified offset. This just adjusts the AsyncFile.iOffset -** variable - calling seek() on the underlying file is defered until the -** next read() or write() operation. -*/ -static int asyncSeek(OsFile *id, i64 offset){ - AsyncFile *pFile = (AsyncFile *)id; - pFile->iOffset = offset; - return SQLITE_OK; -} - -/* -** Read the size of the file. First we read the size of the file system -** entry, then adjust for any ASYNC_WRITE or ASYNC_TRUNCATE operations -** currently in the write-op list. -** -** This method holds the mutex from start to finish. -*/ -int asyncFileSize(OsFile *id, i64 *pSize){ - int rc = SQLITE_OK; - i64 s = 0; - OsFile *pBase; - - pthread_mutex_lock(&async.queueMutex); - - /* Read the filesystem size from the base file. If pBaseRead is NULL, this - ** means the file hasn't been opened yet. In this case all relevant data - ** must be in the write-op queue anyway, so we can omit reading from the - ** file-system. - */ - pBase = ((AsyncFile *)id)->pBaseRead; - if( pBase ){ - rc = sqlite3OsFileSize(pBase, &s); - } - - if( rc==SQLITE_OK ){ - AsyncWrite *p; - for(p=async.pQueueFirst; p; p = p->pNext){ - if( p->pFile==(AsyncFile *)id ){ - switch( p->op ){ - case ASYNC_WRITE: - s = MAX(p->iOffset + (i64)(p->nByte), s); - break; - case ASYNC_TRUNCATE: - s = MIN(s, p->iOffset); - break; - } - } - } - *pSize = s; - } - pthread_mutex_unlock(&async.queueMutex); - return rc; -} - -/* -** Return the operating system file handle. This is only used for debugging -** at the moment anyway. -*/ -static int asyncFileHandle(OsFile *id){ - return sqlite3OsFileHandle(((AsyncFile *)id)->pBaseRead); -} - -/* -** No disk locking is performed. We keep track of locks locally in -** the async.aLock hash table. Locking should appear to work the same -** as with standard (unmodified) SQLite as long as all connections -** come from this one process. Connections from external processes -** cannot see our internal hash table (obviously) and will thus not -** honor our locks. -*/ -static int asyncLock(OsFile *id, int lockType){ - AsyncFile *pFile = (AsyncFile*)id; - ASYNC_TRACE(("LOCK %d (%s)\n", lockType, pFile->zName)); - pthread_mutex_lock(&async.lockMutex); - sqlite3HashInsert(&async.aLock, pFile->zName, pFile->nName, (void*)lockType); - pthread_mutex_unlock(&async.lockMutex); - return SQLITE_OK; -} -static int asyncUnlock(OsFile *id, int lockType){ - return asyncLock(id, lockType); -} - -/* -** This function is called when the pager layer first opens a database file -** and is checking for a hot-journal. -*/ -static int asyncCheckReservedLock(OsFile *id){ - AsyncFile *pFile = (AsyncFile*)id; - int rc; - pthread_mutex_lock(&async.lockMutex); - rc = (int)sqlite3HashFind(&async.aLock, pFile->zName, pFile->nName); - pthread_mutex_unlock(&async.lockMutex); - ASYNC_TRACE(("CHECK-LOCK %d (%s)\n", rc, pFile->zName)); - return rc>SHARED_LOCK; -} - -static int asyncSectorSize(OsFile *id){ - /* TODO: This is tricky to implement, as this backend might not have - ** an open file handle at this point. - */ - return 512; -} - -/* -** This is broken. But sqlite3OsLockState() is only used for testing anyway. -*/ -static int asyncLockState(OsFile *id){ - return SQLITE_OK; -} - -/* -** The following variables hold pointers to the original versions of -** OS-layer interface routines that are overloaded in order to create -** the asynchronous I/O backend. -*/ -static int (*xOrigOpenReadWrite)(const char*, OsFile**, int*) = 0; -static int (*xOrigOpenExclusive)(const char*, OsFile**, int) = 0; -static int (*xOrigOpenReadOnly)(const char*, OsFile**) = 0; -static int (*xOrigDelete)(const char*) = 0; -static int (*xOrigFileExists)(const char*) = 0; -static int (*xOrigSyncDirectory)(const char*) = 0; - -/* -** This routine does most of the work of opening a file and building -** the OsFile structure. -*/ -static int asyncOpenFile( - const char *zName, /* The name of the file to be opened */ - OsFile **pFile, /* Put the OsFile structure here */ - OsFile *pBaseRead, /* The real OsFile from the real I/O routine */ - int openForWriting /* Open a second file handle for writing if true */ -){ - int rc, i, n; - AsyncFile *p; - OsFile *pBaseWrite = 0; - - static IoMethod iomethod = { - asyncClose, - asyncOpenDirectory, - asyncRead, - asyncWrite, - asyncSeek, - asyncTruncate, - asyncSync, - asyncSetFullSync, - asyncFileHandle, - asyncFileSize, - asyncLock, - asyncUnlock, - asyncLockState, - asyncCheckReservedLock, - asyncSectorSize, - }; - - if( openForWriting && SQLITE_ASYNC_TWO_FILEHANDLES ){ - int dummy; - rc = xOrigOpenReadWrite(zName, &pBaseWrite, &dummy); - if( rc!=SQLITE_OK ){ - goto error_out; - } - } - - n = strlen(zName); - for(i=n-1; i>=0 && zName[i]!='/'; i--){} - p = (AsyncFile *)sqlite3OsMalloc(sizeof(AsyncFile) + n - i); - if( !p ){ - rc = SQLITE_NOMEM; - goto error_out; - } - memset(p, 0, sizeof(AsyncFile)); - p->zName = (char*)&p[1]; - strcpy(p->zName, &zName[i+1]); - p->nName = n - i; - p->pMethod = &iomethod; - p->pBaseRead = pBaseRead; - p->pBaseWrite = pBaseWrite; - - *pFile = (OsFile *)p; - return SQLITE_OK; - -error_out: - assert(!p); - sqlite3OsClose(&pBaseRead); - sqlite3OsClose(&pBaseWrite); - *pFile = 0; - return rc; -} - -/* -** The async-IO backends implementation of the three functions used to open -** a file (xOpenExclusive, xOpenReadWrite and xOpenReadOnly). Most of the -** work is done in function asyncOpenFile() - see above. -*/ -static int asyncOpenExclusive(const char *z, OsFile **ppFile, int delFlag){ - int rc = asyncOpenFile(z, ppFile, 0, 0); - if( rc==SQLITE_OK ){ - AsyncFile *pFile = (AsyncFile *)(*ppFile); - int nByte = strlen(z)+1; - i64 i = (i64)(delFlag); - rc = addNewAsyncWrite(pFile, ASYNC_OPENEXCLUSIVE, i, nByte, z); - if( rc!=SQLITE_OK ){ - sqlite3OsFree(pFile); - *ppFile = 0; - } - } - if( rc==SQLITE_OK ){ - incrOpenFileCount(); - } - return rc; -} -static int asyncOpenReadOnly(const char *z, OsFile **ppFile){ - OsFile *pBase = 0; - int rc = xOrigOpenReadOnly(z, &pBase); - if( rc==SQLITE_OK ){ - rc = asyncOpenFile(z, ppFile, pBase, 0); - } - if( rc==SQLITE_OK ){ - incrOpenFileCount(); - } - return rc; -} -static int asyncOpenReadWrite(const char *z, OsFile **ppFile, int *pReadOnly){ - OsFile *pBase = 0; - int rc = xOrigOpenReadWrite(z, &pBase, pReadOnly); - if( rc==SQLITE_OK ){ - rc = asyncOpenFile(z, ppFile, pBase, (*pReadOnly ? 0 : 1)); - } - if( rc==SQLITE_OK ){ - incrOpenFileCount(); - } - return rc; -} - -/* -** Implementation of sqlite3OsDelete. Add an entry to the end of the -** write-op queue to perform the delete. -*/ -static int asyncDelete(const char *z){ - return addNewAsyncWrite(0, ASYNC_DELETE, 0, strlen(z)+1, z); -} - -/* -** Implementation of sqlite3OsSyncDirectory. Add an entry to the end of the -** write-op queue to perform the directory sync. -*/ -static int asyncSyncDirectory(const char *z){ - return addNewAsyncWrite(0, ASYNC_SYNCDIRECTORY, 0, strlen(z)+1, z); -} - -/* -** Implementation of sqlite3OsFileExists. Return true if file 'z' exists -** in the file system. -** -** This method holds the mutex from start to finish. -*/ -static int asyncFileExists(const char *z){ - int ret; - AsyncWrite *p; - - pthread_mutex_lock(&async.queueMutex); - - /* See if the real file system contains the specified file. */ - ret = xOrigFileExists(z); - - for(p=async.pQueueFirst; p; p = p->pNext){ - if( p->op==ASYNC_DELETE && 0==strcmp(p->zBuf, z) ){ - ret = 0; - }else if( p->op==ASYNC_OPENEXCLUSIVE && 0==strcmp(p->zBuf, z) ){ - ret = 1; - } - } - - ASYNC_TRACE(("EXISTS: %s = %d\n", z, ret)); - pthread_mutex_unlock(&async.queueMutex); - return ret; -} - -/* -** Call this routine to enable or disable the -** asynchronous IO features implemented in this file. -** -** This routine is not even remotely threadsafe. Do not call -** this routine while any SQLite database connections are open. -*/ -static void asyncEnable(int enable){ - if( enable && xOrigOpenReadWrite==0 ){ - assert(sqlite3Os.xOpenReadWrite); - sqlite3HashInit(&async.aLock, SQLITE_HASH_BINARY, 1); - xOrigOpenReadWrite = sqlite3Os.xOpenReadWrite; - xOrigOpenReadOnly = sqlite3Os.xOpenReadOnly; - xOrigOpenExclusive = sqlite3Os.xOpenExclusive; - xOrigDelete = sqlite3Os.xDelete; - xOrigFileExists = sqlite3Os.xFileExists; - xOrigSyncDirectory = sqlite3Os.xSyncDirectory; - - sqlite3Os.xOpenReadWrite = asyncOpenReadWrite; - sqlite3Os.xOpenReadOnly = asyncOpenReadOnly; - sqlite3Os.xOpenExclusive = asyncOpenExclusive; - sqlite3Os.xDelete = asyncDelete; - sqlite3Os.xFileExists = asyncFileExists; - sqlite3Os.xSyncDirectory = asyncSyncDirectory; - assert(sqlite3Os.xOpenReadWrite); - } - if( !enable && xOrigOpenReadWrite!=0 ){ - assert(sqlite3Os.xOpenReadWrite); - sqlite3HashClear(&async.aLock); - sqlite3Os.xOpenReadWrite = xOrigOpenReadWrite; - sqlite3Os.xOpenReadOnly = xOrigOpenReadOnly; - sqlite3Os.xOpenExclusive = xOrigOpenExclusive; - sqlite3Os.xDelete = xOrigDelete; - sqlite3Os.xFileExists = xOrigFileExists; - sqlite3Os.xSyncDirectory = xOrigSyncDirectory; - - xOrigOpenReadWrite = 0; - xOrigOpenReadOnly = 0; - xOrigOpenExclusive = 0; - xOrigDelete = 0; - xOrigFileExists = 0; - xOrigSyncDirectory = 0; - assert(sqlite3Os.xOpenReadWrite); - } -} - -/* -** This procedure runs in a separate thread, reading messages off of the -** write queue and processing them one by one. -** -** If async.writerHaltNow is true, then this procedure exits -** after processing a single message. -** -** If async.writerHaltWhenIdle is true, then this procedure exits when -** the write queue is empty. -** -** If both of the above variables are false, this procedure runs -** indefinately, waiting for operations to be added to the write queue -** and processing them in the order in which they arrive. -** -** An artifical delay of async.ioDelay milliseconds is inserted before -** each write operation in order to simulate the effect of a slow disk. -** -** Only one instance of this procedure may be running at a time. -*/ -static void *asyncWriterThread(void *NotUsed){ - AsyncWrite *p = 0; - int rc = SQLITE_OK; - int holdingMutex = 0; - - if( pthread_mutex_trylock(&async.writerMutex) ){ - return 0; - } - while( async.writerHaltNow==0 ){ - OsFile *pBase = 0; - - if( !holdingMutex ){ - pthread_mutex_lock(&async.queueMutex); - } - while( (p = async.pQueueFirst)==0 ){ - pthread_cond_broadcast(&async.emptySignal); - if( async.writerHaltWhenIdle ){ - pthread_mutex_unlock(&async.queueMutex); - break; - }else{ - ASYNC_TRACE(("IDLE\n")); - pthread_cond_wait(&async.queueSignal, &async.queueMutex); - ASYNC_TRACE(("WAKEUP\n")); - } - } - if( p==0 ) break; - holdingMutex = 1; - - /* Right now this thread is holding the mutex on the write-op queue. - ** Variable 'p' points to the first entry in the write-op queue. In - ** the general case, we hold on to the mutex for the entire body of - ** the loop. - ** - ** However in the cases enumerated below, we relinquish the mutex, - ** perform the IO, and then re-request the mutex before removing 'p' from - ** the head of the write-op queue. The idea is to increase concurrency with - ** sqlite threads. - ** - ** * An ASYNC_CLOSE operation. - ** * An ASYNC_OPENEXCLUSIVE operation. For this one, we relinquish - ** the mutex, call the underlying xOpenExclusive() function, then - ** re-aquire the mutex before seting the AsyncFile.pBaseRead - ** variable. - ** * ASYNC_SYNC and ASYNC_WRITE operations, if - ** SQLITE_ASYNC_TWO_FILEHANDLES was set at compile time and two - ** file-handles are open for the particular file being "synced". - */ - if( async.ioError!=SQLITE_OK && p->op!=ASYNC_CLOSE ){ - p->op = ASYNC_NOOP; - } - if( p->pFile ){ - pBase = p->pFile->pBaseWrite; - if( - p->op==ASYNC_CLOSE || - p->op==ASYNC_OPENEXCLUSIVE || - (pBase && (p->op==ASYNC_SYNC || p->op==ASYNC_WRITE) ) - ){ - pthread_mutex_unlock(&async.queueMutex); - holdingMutex = 0; - } - if( !pBase ){ - pBase = p->pFile->pBaseRead; - } - } - - switch( p->op ){ - case ASYNC_NOOP: - break; - - case ASYNC_WRITE: - assert( pBase ); - ASYNC_TRACE(("WRITE %s %d bytes at %d\n", - p->pFile->zName, p->nByte, p->iOffset)); - rc = sqlite3OsSeek(pBase, p->iOffset); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pBase, (const void *)(p->zBuf), p->nByte); - } - break; - - case ASYNC_SYNC: - assert( pBase ); - ASYNC_TRACE(("SYNC %s\n", p->pFile->zName)); - rc = sqlite3OsSync(pBase, p->nByte); - break; - - case ASYNC_TRUNCATE: - assert( pBase ); - ASYNC_TRACE(("TRUNCATE %s to %d bytes\n", p->pFile->zName, p->iOffset)); - rc = sqlite3OsTruncate(pBase, p->iOffset); - break; - - case ASYNC_CLOSE: - ASYNC_TRACE(("CLOSE %s\n", p->pFile->zName)); - sqlite3OsClose(&p->pFile->pBaseWrite); - sqlite3OsClose(&p->pFile->pBaseRead); - sqlite3OsFree(p->pFile); - break; - - case ASYNC_OPENDIRECTORY: - assert( pBase ); - ASYNC_TRACE(("OPENDIR %s\n", p->zBuf)); - sqlite3OsOpenDirectory(pBase, p->zBuf); - break; - - case ASYNC_SETFULLSYNC: - assert( pBase ); - ASYNC_TRACE(("SETFULLSYNC %s %d\n", p->pFile->zName, p->nByte)); - sqlite3OsSetFullSync(pBase, p->nByte); - break; - - case ASYNC_DELETE: - ASYNC_TRACE(("DELETE %s\n", p->zBuf)); - rc = xOrigDelete(p->zBuf); - break; - - case ASYNC_SYNCDIRECTORY: - ASYNC_TRACE(("SYNCDIR %s\n", p->zBuf)); - rc = xOrigSyncDirectory(p->zBuf); - break; - - case ASYNC_OPENEXCLUSIVE: { - AsyncFile *pFile = p->pFile; - int delFlag = ((p->iOffset)?1:0); - OsFile *pBase = 0; - ASYNC_TRACE(("OPEN %s delFlag=%d\n", p->zBuf, delFlag)); - assert(pFile->pBaseRead==0 && pFile->pBaseWrite==0); - rc = xOrigOpenExclusive(p->zBuf, &pBase, delFlag); - assert( holdingMutex==0 ); - pthread_mutex_lock(&async.queueMutex); - holdingMutex = 1; - if( rc==SQLITE_OK ){ - pFile->pBaseRead = pBase; - } - break; - } - - default: assert(!"Illegal value for AsyncWrite.op"); - } - - /* If we didn't hang on to the mutex during the IO op, obtain it now - ** so that the AsyncWrite structure can be safely removed from the - ** global write-op queue. - */ - if( !holdingMutex ){ - pthread_mutex_lock(&async.queueMutex); - holdingMutex = 1; - } - /* ASYNC_TRACE(("UNLINK %p\n", p)); */ - if( p==async.pQueueLast ){ - async.pQueueLast = 0; - } - async.pQueueFirst = p->pNext; - sqlite3OsFree(p); - assert( holdingMutex ); - - /* An IO error has occured. We cannot report the error back to the - ** connection that requested the I/O since the error happened - ** asynchronously. The connection has already moved on. There - ** really is nobody to report the error to. - ** - ** The file for which the error occured may have been a database or - ** journal file. Regardless, none of the currently queued operations - ** associated with the same database should now be performed. Nor should - ** any subsequently requested IO on either a database or journal file - ** handle for the same database be accepted until the main database - ** file handle has been closed and reopened. - ** - ** Furthermore, no further IO should be queued or performed on any file - ** handle associated with a database that may have been part of a - ** multi-file transaction that included the database associated with - ** the IO error (i.e. a database ATTACHed to the same handle at some - ** point in time). - */ - if( rc!=SQLITE_OK ){ - async.ioError = rc; - } - - /* Drop the queue mutex before continuing to the next write operation - ** in order to give other threads a chance to work with the write queue. - */ - if( !async.pQueueFirst || !async.ioError ){ - sqlite3ApiExit(0, 0); - pthread_mutex_unlock(&async.queueMutex); - holdingMutex = 0; - if( async.ioDelay>0 ){ - sqlite3OsSleep(async.ioDelay); - }else{ - sched_yield(); - } - } - } - - pthread_mutex_unlock(&async.writerMutex); - return 0; -} - -/************************************************************************** -** The remaining code defines a Tcl interface for testing the asynchronous -** IO implementation in this file. -** -** To adapt the code to a non-TCL environment, delete or comment out -** the code that follows. -*/ - -/* -** sqlite3async_enable ?YES/NO? -** -** Enable or disable the asynchronous I/O backend. This command is -** not thread-safe. Do not call it while any database connections -** are open. -*/ -static int testAsyncEnable( +static int testAsyncInit( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ - if( objc!=1 && objc!=2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "?YES/NO?"); + const char *zParent; + int isDefault; + int rc; + + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "PARENT-VFS ISDEFAULT"); return TCL_ERROR; } - if( objc==1 ){ - Tcl_SetObjResult(interp, Tcl_NewBooleanObj(xOrigOpenReadWrite!=0)); - }else{ - int en; - if( Tcl_GetBooleanFromObj(interp, objv[1], &en) ) return TCL_ERROR; - asyncEnable(en); + zParent = Tcl_GetString(objv[1]); + if( !*zParent ) { + zParent = 0; } - return TCL_OK; -} - -/* -** sqlite3async_halt "now"|"idle"|"never" -** -** Set the conditions at which the writer thread will halt. -*/ -static int testAsyncHalt( - void * clientData, - Tcl_Interp *interp, - int objc, - Tcl_Obj *CONST objv[] -){ - const char *zCond; - if( objc!=2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "\"now\"|\"idle\"|\"never\""); + if( Tcl_GetBooleanFromObj(interp, objv[2], &isDefault) ){ return TCL_ERROR; } - zCond = Tcl_GetString(objv[1]); - if( strcmp(zCond, "now")==0 ){ - async.writerHaltNow = 1; - pthread_cond_broadcast(&async.queueSignal); - }else if( strcmp(zCond, "idle")==0 ){ - async.writerHaltWhenIdle = 1; - async.writerHaltNow = 0; - pthread_cond_broadcast(&async.queueSignal); - }else if( strcmp(zCond, "never")==0 ){ - async.writerHaltWhenIdle = 0; - async.writerHaltNow = 0; - }else{ - Tcl_AppendResult(interp, - "should be one of: \"now\", \"idle\", or \"never\"", (char*)0); + + rc = sqlite3async_initialize(zParent, isDefault); + if( rc!=SQLITE_OK ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3TestErrorName(rc), -1)); return TCL_ERROR; } return TCL_OK; } /* -** sqlite3async_delay ?MS? -** -** Query or set the number of milliseconds of delay in the writer -** thread after each write operation. The default is 0. By increasing -** the memory delay we can simulate the effect of slow disk I/O. +** sqlite3async_shutdown */ -static int testAsyncDelay( +static int testAsyncShutdown( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ - if( objc!=1 && objc!=2 ){ - Tcl_WrongNumArgs(interp, 1, objv, "?MS?"); - return TCL_ERROR; - } - if( objc==1 ){ - Tcl_SetObjResult(interp, Tcl_NewIntObj(async.ioDelay)); - }else{ - int ioDelay; - if( Tcl_GetIntFromObj(interp, objv[1], &ioDelay) ) return TCL_ERROR; - async.ioDelay = ioDelay; - } + sqlite3async_shutdown(); return TCL_OK; } +static Tcl_ThreadCreateType tclWriterThread(ClientData pIsStarted){ + Tcl_MutexLock(&testasync_g_writerMutex); + *((int *)pIsStarted) = 1; + sqlite3async_run(); + Tcl_MutexUnlock(&testasync_g_writerMutex); + TCL_THREAD_CREATE_RETURN; +} + /* ** sqlite3async_start ** @@ -1200,14 +100,21 @@ int objc, Tcl_Obj *CONST objv[] ){ - pthread_t x; + volatile int isStarted = 0; + ClientData threadData = (ClientData)&isStarted; + + Tcl_ThreadId x; + const int nStack = TCL_THREAD_STACK_DEFAULT; + const int flags = TCL_THREAD_NOFLAGS; int rc; - rc = pthread_create(&x, 0, asyncWriterThread, 0); - if( rc ){ - Tcl_AppendResult(interp, "failed to create the thread", 0); + + rc = Tcl_CreateThread(&x, tclWriterThread, threadData, nStack, flags); + if( rc!=TCL_OK ){ + Tcl_AppendResult(interp, "Tcl_CreateThread() failed", 0); return TCL_ERROR; } - pthread_detach(x); + + while( isStarted==0 ) { /* Busy loop */ } return TCL_OK; } @@ -1225,31 +132,97 @@ int objc, Tcl_Obj *CONST objv[] ){ - int cnt = 10; - if( async.writerHaltNow==0 && async.writerHaltWhenIdle==0 ){ + int eCond; + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + sqlite3async_control(SQLITEASYNC_GET_HALT, &eCond); + if( eCond==SQLITEASYNC_HALT_NEVER ){ Tcl_AppendResult(interp, "would block forever", (char*)0); return TCL_ERROR; } - while( cnt-- && !pthread_mutex_trylock(&async.writerMutex) ){ - pthread_mutex_unlock(&async.writerMutex); - sched_yield(); + Tcl_MutexLock(&testasync_g_writerMutex); + Tcl_MutexUnlock(&testasync_g_writerMutex); + return TCL_OK; +} + +/* +** sqlite3async_control OPTION ?VALUE? +*/ +static int testAsyncControl( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc = SQLITE_OK; + int aeOpt[] = { SQLITEASYNC_HALT, SQLITEASYNC_DELAY, SQLITEASYNC_LOCKFILES }; + const char *azOpt[] = { "halt", "delay", "lockfiles", 0 }; + const char *az[] = { "never", "now", "idle", 0 }; + int iVal; + int eOpt; + + if( objc!=2 && objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "OPTION ?VALUE?"); + return TCL_ERROR; } - if( cnt>=0 ){ - ASYNC_TRACE(("WAIT\n")); - pthread_mutex_lock(&async.queueMutex); - pthread_cond_broadcast(&async.queueSignal); - pthread_mutex_unlock(&async.queueMutex); - pthread_mutex_lock(&async.writerMutex); - pthread_mutex_unlock(&async.writerMutex); + if( Tcl_GetIndexFromObj(interp, objv[1], azOpt, "option", 0, &eOpt) ){ + return TCL_ERROR; + } + eOpt = aeOpt[eOpt]; + + if( objc==3 ){ + switch( eOpt ){ + case SQLITEASYNC_HALT: { + assert( SQLITEASYNC_HALT_NEVER==0 ); + assert( SQLITEASYNC_HALT_NOW==1 ); + assert( SQLITEASYNC_HALT_IDLE==2 ); + if( Tcl_GetIndexFromObj(interp, objv[2], az, "value", 0, &iVal) ){ + return TCL_ERROR; + } + break; + } + case SQLITEASYNC_DELAY: + if( Tcl_GetIntFromObj(interp, objv[2], &iVal) ){ + return TCL_ERROR; + } + break; + + case SQLITEASYNC_LOCKFILES: + if( Tcl_GetBooleanFromObj(interp, objv[2], &iVal) ){ + return TCL_ERROR; + } + break; + } + + rc = sqlite3async_control(eOpt, iVal); + } + + if( rc==SQLITE_OK ){ + rc = sqlite3async_control( + eOpt==SQLITEASYNC_HALT ? SQLITEASYNC_GET_HALT : + eOpt==SQLITEASYNC_DELAY ? SQLITEASYNC_GET_DELAY : + SQLITEASYNC_GET_LOCKFILES, &iVal); + } + + if( rc!=SQLITE_OK ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3TestErrorName(rc), -1)); + return TCL_ERROR; + } + + if( eOpt==SQLITEASYNC_HALT ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(az[iVal], -1)); }else{ - ASYNC_TRACE(("NO-WAIT\n")); + Tcl_SetObjResult(interp, Tcl_NewIntObj(iVal)); } + return TCL_OK; } - -#endif /* OS_UNIX and THREADSAFE and defined(SQLITE_ENABLE_REDEF_IO) */ +#endif /* SQLITE_ENABLE_ASYNCIO */ /* ** This routine registers the custom TCL commands defined in this @@ -1257,14 +230,13 @@ ** of this module. */ int Sqlitetestasync_Init(Tcl_Interp *interp){ -#if OS_UNIX && THREADSAFE && defined(SQLITE_ENABLE_REDEF_IO) - Tcl_CreateObjCommand(interp,"sqlite3async_enable",testAsyncEnable,0,0); - Tcl_CreateObjCommand(interp,"sqlite3async_halt",testAsyncHalt,0,0); - Tcl_CreateObjCommand(interp,"sqlite3async_delay",testAsyncDelay,0,0); +#if SQLITE_ENABLE_ASYNCIO Tcl_CreateObjCommand(interp,"sqlite3async_start",testAsyncStart,0,0); Tcl_CreateObjCommand(interp,"sqlite3async_wait",testAsyncWait,0,0); - Tcl_LinkVar(interp, "sqlite3async_trace", - (char*)&sqlite3async_trace, TCL_LINK_INT); -#endif /* OS_UNIX and THREADSAFE and defined(SQLITE_ENABLE_REDEF_IO) */ + + Tcl_CreateObjCommand(interp,"sqlite3async_control",testAsyncControl,0,0); + Tcl_CreateObjCommand(interp,"sqlite3async_initialize",testAsyncInit,0,0); + Tcl_CreateObjCommand(interp,"sqlite3async_shutdown",testAsyncShutdown,0,0); +#endif /* SQLITE_ENABLE_ASYNCIO */ return TCL_OK; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_autoext.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_autoext.c --- sqlite3-3.4.2/src/test_autoext.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/test_autoext.c 2009-05-05 04:39:59.000000000 +0100 @@ -11,11 +11,12 @@ ************************************************************************* ** Test extension for testing the sqlite3_auto_extension() function. ** -** $Id: test_autoext.c,v 1.2 2006/12/19 18:57:11 drh Exp $ +** $Id: test_autoext.c,v 1.5 2008/07/08 02:12:37 drh Exp $ */ #include "tcl.h" -#ifndef SQLITE_OMIT_LOAD_EXTENSION #include "sqlite3ext.h" + +#ifndef SQLITE_OMIT_LOAD_EXTENSION static SQLITE_EXTENSION_INIT1 /* @@ -94,7 +95,8 @@ int objc, Tcl_Obj *CONST objv[] ){ - sqlite3_auto_extension((void*)sqr_init); + int rc = sqlite3_auto_extension((void*)sqr_init); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); return SQLITE_OK; } @@ -109,7 +111,8 @@ int objc, Tcl_Obj *CONST objv[] ){ - sqlite3_auto_extension((void*)cube_init); + int rc = sqlite3_auto_extension((void*)cube_init); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); return SQLITE_OK; } @@ -124,10 +127,14 @@ int objc, Tcl_Obj *CONST objv[] ){ - sqlite3_auto_extension((void*)broken_init); + int rc = sqlite3_auto_extension((void*)broken_init); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); return SQLITE_OK; } +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + + /* ** tclcmd: sqlite3_reset_auto_extension ** @@ -144,8 +151,6 @@ } -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ - /* ** This procedure registers the TCL procs defined in this file. */ @@ -157,8 +162,8 @@ autoExtCubeObjCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_auto_extension_broken", autoExtBrokenObjCmd, 0, 0); +#endif Tcl_CreateObjCommand(interp, "sqlite3_reset_auto_extension", resetAutoExtObjCmd, 0, 0); -#endif return TCL_OK; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_backup.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_backup.c --- sqlite3-3.4.2/src/test_backup.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_backup.c 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,148 @@ +/* +** 2009 January 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: test_backup.c,v 1.3 2009/03/30 12:56:52 drh Exp $ +*/ + +#include "tcl.h" +#include +#include + +/* These functions are implemented in test1.c. */ +int getDbPointer(Tcl_Interp *, const char *, sqlite3 **); +const char *sqlite3TestErrorName(int); + +static int backupTestCmd( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *const*objv +){ + enum BackupSubCommandEnum { + BACKUP_STEP, BACKUP_FINISH, BACKUP_REMAINING, BACKUP_PAGECOUNT + }; + struct BackupSubCommand { + const char *zCmd; + enum BackupSubCommandEnum eCmd; + int nArg; + const char *zArg; + } aSub[] = { + {"step", BACKUP_STEP , 1, "npage" }, + {"finish", BACKUP_FINISH , 0, "" }, + {"remaining", BACKUP_REMAINING , 0, "" }, + {"pagecount", BACKUP_PAGECOUNT , 0, "" }, + {0, 0, 0, 0} + }; + + sqlite3_backup *p = (sqlite3_backup *)clientData; + int iCmd; + int rc; + + rc = Tcl_GetIndexFromObjStruct( + interp, objv[1], aSub, sizeof(aSub[0]), "option", 0, &iCmd + ); + if( rc!=TCL_OK ){ + return rc; + } + if( objc!=(2 + aSub[iCmd].nArg) ){ + Tcl_WrongNumArgs(interp, 2, objv, aSub[iCmd].zArg); + return TCL_ERROR; + } + + switch( aSub[iCmd].eCmd ){ + + case BACKUP_FINISH: { + const char *zCmdName; + Tcl_CmdInfo cmdInfo; + zCmdName = Tcl_GetString(objv[0]); + Tcl_GetCommandInfo(interp, zCmdName, &cmdInfo); + cmdInfo.deleteProc = 0; + Tcl_SetCommandInfo(interp, zCmdName, &cmdInfo); + Tcl_DeleteCommand(interp, zCmdName); + + rc = sqlite3_backup_finish(p); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_STATIC); + break; + } + + case BACKUP_STEP: { + int nPage; + if( TCL_OK!=Tcl_GetIntFromObj(interp, objv[2], &nPage) ){ + return TCL_ERROR; + } + rc = sqlite3_backup_step(p, nPage); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_STATIC); + break; + } + + case BACKUP_REMAINING: + Tcl_SetObjResult(interp, Tcl_NewIntObj(sqlite3_backup_remaining(p))); + break; + + case BACKUP_PAGECOUNT: + Tcl_SetObjResult(interp, Tcl_NewIntObj(sqlite3_backup_pagecount(p))); + break; + } + + return TCL_OK; +} + +static void backupTestFinish(ClientData clientData){ + sqlite3_backup *pBackup = (sqlite3_backup *)clientData; + sqlite3_backup_finish(pBackup); +} + +/* +** sqlite3_backup CMDNAME DESTHANDLE DESTNAME SRCHANDLE SRCNAME +** +*/ +static int backupTestInit( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *const*objv +){ + sqlite3_backup *pBackup; + sqlite3 *pDestDb; + sqlite3 *pSrcDb; + const char *zDestName; + const char *zSrcName; + const char *zCmd; + + if( objc!=6 ){ + Tcl_WrongNumArgs( + interp, 1, objv, "CMDNAME DESTHANDLE DESTNAME SRCHANDLE SRCNAME" + ); + return TCL_ERROR; + } + + zCmd = Tcl_GetString(objv[1]); + getDbPointer(interp, Tcl_GetString(objv[2]), &pDestDb); + zDestName = Tcl_GetString(objv[3]); + getDbPointer(interp, Tcl_GetString(objv[4]), &pSrcDb); + zSrcName = Tcl_GetString(objv[5]); + + pBackup = sqlite3_backup_init(pDestDb, zDestName, pSrcDb, zSrcName); + if( !pBackup ){ + Tcl_AppendResult(interp, "sqlite3_backup_init() failed", 0); + return TCL_ERROR; + } + + Tcl_CreateObjCommand(interp, zCmd, backupTestCmd, pBackup, backupTestFinish); + Tcl_SetObjResult(interp, objv[1]); + return TCL_OK; +} + +int Sqlitetestbackup_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "sqlite3_backup", backupTestInit, 0, 0); + return TCL_OK; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_btree.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_btree.c --- sqlite3-3.4.2/src/test_btree.c 2007-05-08 12:27:16.000000000 +0100 +++ sqlite3-3.6.16/src/test_btree.c 2009-05-05 04:39:59.000000000 +0100 @@ -13,121 +13,12 @@ ** is not included in the SQLite library. It is used for automated ** testing of the SQLite library. ** -** $Id: test_btree.c,v 1.2 2007/05/08 11:27:16 drh Exp $ +** $Id: test_btree.c,v 1.8 2008/09/29 11:49:48 danielk1977 Exp $ */ #include "btreeInt.h" #include /* -** Print a disassembly of the given page on standard output. This routine -** is used for debugging and testing only. -*/ -static int btreePageDump( - BtShared *pBt, /* The Btree to be dumped */ - int pgno, /* The page to be dumped */ - int recursive, /* True to decend into child pages */ - MemPage *pParent /* Parent page */ -){ - int rc; - MemPage *pPage; - int i, j, c; - int nFree; - u16 idx; - int hdr; - int nCell; - int isInit; - unsigned char *data; - char range[20]; - unsigned char payload[20]; - - rc = sqlite3BtreeGetPage(pBt, (Pgno)pgno, &pPage, 0); - isInit = pPage->isInit; - if( pPage->isInit==0 ){ - sqlite3BtreeInitPage(pPage, pParent); - } - if( rc ){ - return rc; - } - hdr = pPage->hdrOffset; - data = pPage->aData; - c = data[hdr]; - pPage->intKey = (c & (PTF_INTKEY|PTF_LEAFDATA))!=0; - pPage->zeroData = (c & PTF_ZERODATA)!=0; - pPage->leafData = (c & PTF_LEAFDATA)!=0; - pPage->leaf = (c & PTF_LEAF)!=0; - pPage->hasData = !(pPage->zeroData || (!pPage->leaf && pPage->leafData)); - nCell = get2byte(&data[hdr+3]); - sqlite3DebugPrintf("PAGE %d: flags=0x%02x frag=%d parent=%d\n", pgno, - data[hdr], data[hdr+7], - (pPage->isInit && pPage->pParent) ? pPage->pParent->pgno : 0); - assert( hdr == (pgno==1 ? 100 : 0) ); - idx = hdr + 12 - pPage->leaf*4; - for(i=0; ileaf ){ - child = 0; - }else{ - child = get4byte(pCell); - } - sz = info.nData; - if( !pPage->intKey ) sz += info.nKey; - if( sz>sizeof(payload)-1 ) sz = sizeof(payload)-1; - memcpy(payload, &pCell[info.nHeader], sz); - for(j=0; j0x7f ) payload[j] = '.'; - } - payload[sz] = 0; - sqlite3DebugPrintf( - "cell %2d: i=%-10s chld=%-4d nk=%-4lld nd=%-4d payload=%s\n", - i, range, child, info.nKey, info.nData, payload - ); - } - if( !pPage->leaf ){ - sqlite3DebugPrintf("right_child: %d\n", get4byte(&data[hdr+8])); - } - nFree = 0; - i = 0; - idx = get2byte(&data[hdr+1]); - while( idx>0 && idxpBt->usableSize ){ - int sz = get2byte(&data[idx+2]); - sqlite3_snprintf(sizeof(range),range,"%d..%d", idx, idx+sz-1); - nFree += sz; - sqlite3DebugPrintf("freeblock %2d: i=%-10s size=%-4d total=%d\n", - i, range, sz, nFree); - idx = get2byte(&data[idx]); - i++; - } - if( idx!=0 ){ - sqlite3DebugPrintf("ERROR: next freeblock index out of range: %d\n", idx); - } - if( recursive && !pPage->leaf ){ - for(i=0; iisInit = isInit; - sqlite3PagerUnref(pPage->pDbPage); - fflush(stdout); - return SQLITE_OK; -} -int sqlite3BtreePageDump(Btree *p, int pgno, int recursive){ - return btreePageDump(p->pBt, pgno, recursive, 0); -} - -/* ** Usage: sqlite3_shared_cache_report ** ** Return a list of file that are shared and the number of @@ -140,17 +31,15 @@ Tcl_Obj *CONST objv[] ){ #ifndef SQLITE_OMIT_SHARED_CACHE - const ThreadData *pTd = sqlite3ThreadDataReadOnly(); - if( pTd->useSharedData ){ - BtShared *pBt; - Tcl_Obj *pRet = Tcl_NewObj(); - for(pBt=pTd->pBtree; pBt; pBt=pBt->pNext){ - const char *zFile = sqlite3PagerFilename(pBt->pPager); - Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(zFile, -1)); - Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(pBt->nRef)); - } - Tcl_SetObjResult(interp, pRet); + extern BtShared *sqlite3SharedCacheList; + BtShared *pBt; + Tcl_Obj *pRet = Tcl_NewObj(); + for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ + const char *zFile = sqlite3PagerFilename(pBt->pPager); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(zFile, -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(pBt->nRef)); } + Tcl_SetObjResult(interp, pRet); #endif return TCL_OK; } @@ -159,17 +48,19 @@ ** Print debugging information about all cursors to standard output. */ void sqlite3BtreeCursorList(Btree *p){ +#ifdef SQLITE_DEBUG BtCursor *pCur; BtShared *pBt = p->pBt; for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ - MemPage *pPage = pCur->pPage; + MemPage *pPage = pCur->apPage[pCur->iPage]; char *zMode = pCur->wrFlag ? "rw" : "ro"; sqlite3DebugPrintf("CURSOR %p rooted at %4d(%s) currently at %d.%d%s\n", pCur, pCur->pgnoRoot, zMode, - pPage ? pPage->pgno : 0, pCur->idx, + pPage ? pPage->pgno : 0, pCur->aiIdx[pCur->iPage], (pCur->eState==CURSOR_VALID) ? "" : " eof" ); } +#endif } @@ -192,13 +83,14 @@ ** This routine is used for testing and debugging only. */ int sqlite3BtreeCursorInfo(BtCursor *pCur, int *aResult, int upCnt){ +#if 0 int cnt, idx; - MemPage *pPage = pCur->pPage; + MemPage *pPage = pCur->apPage[pCur->iPage]; BtCursor tmpCur; int rc; if( pCur->eState==CURSOR_REQUIRESEEK ){ - rc = sqlite3BtreeRestoreOrClearCursorPosition(pCur); + rc = sqlite3BtreeRestoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } @@ -245,5 +137,6 @@ aResult[10] = 0; } sqlite3BtreeReleaseTempCursor(&tmpCur); +#endif return SQLITE_OK; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_config.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_config.c --- sqlite3-3.4.2/src/test_config.c 2007-08-13 16:18:28.000000000 +0100 +++ sqlite3-3.6.16/src/test_config.c 2009-06-25 12:45:58.000000000 +0100 @@ -16,15 +16,24 @@ ** The focus of this file is providing the TCL testing layer ** access to compile-time constants. ** -** $Id: test_config.c,v 1.7 2007/08/13 15:18:28 drh Exp $ +** $Id: test_config.c,v 1.50 2009/06/19 14:06:03 drh Exp $ */ + +#include "sqliteLimit.h" + #include "sqliteInt.h" #include "tcl.h" -#include "os.h" #include #include /* +** Macro to stringify the results of the evaluation a pre-processor +** macro. i.e. so that STRINGVALUE(SQLITE_NOMEM) -> "7". +*/ +#define STRINGVALUE2(x) #x +#define STRINGVALUE(x) STRINGVALUE2(x) + +/* ** This routine sets entries in the global ::sqlite_options() array variable ** according to the compile-time configuration of the database. Test ** procedures use this to determine when tests should be omitted. @@ -60,6 +69,30 @@ Tcl_SetVar2(interp, "sqlite_options", "lfs", "1", TCL_GLOBAL_ONLY); #endif +#if 1 /* def SQLITE_MEMDEBUG */ + Tcl_SetVar2(interp, "sqlite_options", "memdebug", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "memdebug", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_ENABLE_MEMSYS3 + Tcl_SetVar2(interp, "sqlite_options", "mem3", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "mem3", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_ENABLE_MEMSYS5 + Tcl_SetVar2(interp, "sqlite_options", "mem5", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "mem5", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_MUTEX_OMIT + Tcl_SetVar2(interp, "sqlite_options", "mutex", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "mutex", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_ALTERTABLE Tcl_SetVar2(interp, "sqlite_options", "altertable", "0", TCL_GLOBAL_ONLY); #else @@ -72,6 +105,12 @@ Tcl_SetVar2(interp, "sqlite_options", "analyze", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + Tcl_SetVar2(interp, "sqlite_options", "atomicwrite", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "atomicwrite", "0", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_ATTACH Tcl_SetVar2(interp, "sqlite_options", "attach", "0", TCL_GLOBAL_ONLY); #else @@ -95,10 +134,11 @@ #else Tcl_SetVar2(interp, "sqlite_options", "autovacuum", "1", TCL_GLOBAL_ONLY); #endif /* SQLITE_OMIT_AUTOVACUUM */ -#if !defined(SQLITE_DEFAULT_AUTOVACUUM) || SQLITE_DEFAULT_AUTOVACUUM==0 +#if !defined(SQLITE_DEFAULT_AUTOVACUUM) Tcl_SetVar2(interp,"sqlite_options","default_autovacuum","0",TCL_GLOBAL_ONLY); #else - Tcl_SetVar2(interp,"sqlite_options","default_autovacuum","1",TCL_GLOBAL_ONLY); + Tcl_SetVar2(interp, "sqlite_options", "default_autovacuum", + STRINGVALUE(SQLITE_DEFAULT_AUTOVACUUM), TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_BETWEEN_OPTIMIZATION @@ -107,6 +147,12 @@ Tcl_SetVar2(interp, "sqlite_options", "between_opt", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_BUILTIN_TEST + Tcl_SetVar2(interp, "sqlite_options", "builtin_test", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "builtin_test", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_BLOB_LITERAL Tcl_SetVar2(interp, "sqlite_options", "bloblit", "0", TCL_GLOBAL_ONLY); #else @@ -131,6 +177,16 @@ Tcl_SetVar2(interp, "sqlite_options", "columnmetadata", "0", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK + Tcl_SetVar2(interp, "sqlite_options", "oversize_cell_check", "1", + TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "oversize_cell_check", "0", + TCL_GLOBAL_ONLY); +#endif + + + #ifdef SQLITE_OMIT_COMPLETE Tcl_SetVar2(interp, "sqlite_options", "complete", "0", TCL_GLOBAL_ONLY); #else @@ -149,7 +205,7 @@ Tcl_SetVar2(interp, "sqlite_options", "conflict", "1", TCL_GLOBAL_ONLY); #endif -#if OS_UNIX +#if SQLITE_OS_UNIX Tcl_SetVar2(interp, "sqlite_options", "crashtest", "1", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "crashtest", "0", TCL_GLOBAL_ONLY); @@ -161,6 +217,18 @@ Tcl_SetVar2(interp, "sqlite_options", "datetime", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_DECLTYPE + Tcl_SetVar2(interp, "sqlite_options", "decltype", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "decltype", "1", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_OMIT_DEPRECATED + Tcl_SetVar2(interp, "sqlite_options", "deprecated", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "deprecated", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_DISKIO Tcl_SetVar2(interp, "sqlite_options", "diskio", "0", TCL_GLOBAL_ONLY); #else @@ -197,6 +265,18 @@ Tcl_SetVar2(interp, "sqlite_options", "fts2", "0", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_ENABLE_FTS3 + Tcl_SetVar2(interp, "sqlite_options", "fts3", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "fts3", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_OMIT_GET_TABLE + Tcl_SetVar2(interp, "sqlite_options", "gettable", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "gettable", "1", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_GLOBALRECOVER Tcl_SetVar2(interp, "sqlite_options", "globalrecover", "0", TCL_GLOBAL_ONLY); #else @@ -239,6 +319,18 @@ Tcl_SetVar2(interp, "sqlite_options", "load_ext", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_LOCALTIME + Tcl_SetVar2(interp, "sqlite_options", "localtime", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "localtime", "1", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_OMIT_LOOKASIDE + Tcl_SetVar2(interp, "sqlite_options", "lookaside", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "lookaside", "1", TCL_GLOBAL_ONLY); +#endif + Tcl_SetVar2(interp, "sqlite_options", "long_double", sizeof(LONGDOUBLE_TYPE)>sizeof(double) ? "1" : "0", TCL_GLOBAL_ONLY); @@ -267,12 +359,6 @@ Tcl_SetVar2(interp, "sqlite_options", "pager_pragmas", "1", TCL_GLOBAL_ONLY); #endif -#ifdef SQLITE_OMIT_PARSER - Tcl_SetVar2(interp, "sqlite_options", "parser", "0", TCL_GLOBAL_ONLY); -#else - Tcl_SetVar2(interp, "sqlite_options", "parser", "1", TCL_GLOBAL_ONLY); -#endif - #if defined(SQLITE_OMIT_PRAGMA) || defined(SQLITE_OMIT_FLAG_PRAGMAS) Tcl_SetVar2(interp, "sqlite_options", "pragma", "0", TCL_GLOBAL_ONLY); Tcl_SetVar2(interp, "sqlite_options", "integrityck", "0", TCL_GLOBAL_ONLY); @@ -286,18 +372,18 @@ Tcl_SetVar2(interp, "sqlite_options", "progress", "1", TCL_GLOBAL_ONLY); #endif -#ifdef SQLITE_ENABLE_REDEF_IO - Tcl_SetVar2(interp, "sqlite_options", "redefio", "1", TCL_GLOBAL_ONLY); -#else - Tcl_SetVar2(interp, "sqlite_options", "redefio", "0", TCL_GLOBAL_ONLY); -#endif - #ifdef SQLITE_OMIT_REINDEX Tcl_SetVar2(interp, "sqlite_options", "reindex", "0", TCL_GLOBAL_ONLY); #else Tcl_SetVar2(interp, "sqlite_options", "reindex", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_ENABLE_RTREE + Tcl_SetVar2(interp, "sqlite_options", "rtree", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "rtree", "0", TCL_GLOBAL_ONLY); +#endif + #ifdef SQLITE_OMIT_SCHEMA_PRAGMAS Tcl_SetVar2(interp, "sqlite_options", "schema_pragmas", "0", TCL_GLOBAL_ONLY); #else @@ -310,6 +396,25 @@ Tcl_SetVar2(interp, "sqlite_options", "schema_version", "1", TCL_GLOBAL_ONLY); #endif +#if !defined(SQLITE_ENABLE_LOCKING_STYLE) +# if defined(__APPLE__) +# define SQLITE_ENABLE_LOCKING_STYLE 1 +# else +# define SQLITE_ENABLE_LOCKING_STYLE 0 +# endif +#endif +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + Tcl_SetVar2(interp,"sqlite_options","lock_proxy_pragmas","1",TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp,"sqlite_options","lock_proxy_pragmas","0",TCL_GLOBAL_ONLY); +#endif +#if defined(SQLITE_PREFER_PROXY_LOCKING) && defined(__APPLE__) + Tcl_SetVar2(interp,"sqlite_options","prefer_proxy_locking","1",TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp,"sqlite_options","prefer_proxy_locking","0",TCL_GLOBAL_ONLY); +#endif + + #ifdef SQLITE_OMIT_SHARED_CACHE Tcl_SetVar2(interp, "sqlite_options", "shared_cache", "0", TCL_GLOBAL_ONLY); #else @@ -328,10 +433,14 @@ Tcl_SetVar2(interp, "sqlite_options", "tclvar", "1", TCL_GLOBAL_ONLY); #endif -#if defined(THREADSAFE) && THREADSAFE - Tcl_SetVar2(interp, "sqlite_options", "threadsafe", "1", TCL_GLOBAL_ONLY); + Tcl_SetVar2(interp, "sqlite_options", "threadsafe", + STRINGVALUE(SQLITE_THREADSAFE), TCL_GLOBAL_ONLY); + assert( sqlite3_threadsafe()==SQLITE_THREADSAFE ); + +#ifdef SQLITE_OMIT_TEMPDB + Tcl_SetVar2(interp, "sqlite_options", "tempdb", "0", TCL_GLOBAL_ONLY); #else - Tcl_SetVar2(interp, "sqlite_options", "threadsafe", "0", TCL_GLOBAL_ONLY); + Tcl_SetVar2(interp, "sqlite_options", "tempdb", "1", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_TRACE @@ -346,10 +455,10 @@ Tcl_SetVar2(interp, "sqlite_options", "trigger", "1", TCL_GLOBAL_ONLY); #endif -#ifdef SQLITE_OMIT_TEMPDB - Tcl_SetVar2(interp, "sqlite_options", "tempdb", "0", TCL_GLOBAL_ONLY); +#ifdef SQLITE_OMIT_TRUCATE_OPTIMIZATION + Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "0", TCL_GLOBAL_ONLY); #else - Tcl_SetVar2(interp, "sqlite_options", "tempdb", "1", TCL_GLOBAL_ONLY); + Tcl_SetVar2(interp, "sqlite_options", "truncate_opt", "1", TCL_GLOBAL_ONLY); #endif #ifdef SQLITE_OMIT_UTF16 @@ -376,91 +485,62 @@ Tcl_SetVar2(interp, "sqlite_options", "vtab", "1", TCL_GLOBAL_ONLY); #endif +#ifdef SQLITE_OMIT_WSD + Tcl_SetVar2(interp, "sqlite_options", "wsd", "0", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "wsd", "1", TCL_GLOBAL_ONLY); +#endif + +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) + Tcl_SetVar2(interp, "sqlite_options", "update_delete_limit", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "update_delete_limit", "0", TCL_GLOBAL_ONLY); +#endif + +#if defined(SQLITE_ENABLE_UNLOCK_NOTIFY) + Tcl_SetVar2(interp, "sqlite_options", "unlock_notify", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "unlock_notify", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef SQLITE_SECURE_DELETE + Tcl_SetVar2(interp, "sqlite_options", "secure_delete", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "secure_delete", "0", TCL_GLOBAL_ONLY); +#endif + +#ifdef YYTRACKMAXSTACKDEPTH + Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "1", TCL_GLOBAL_ONLY); +#else + Tcl_SetVar2(interp, "sqlite_options", "yytrackmaxstackdepth", "0", TCL_GLOBAL_ONLY); +#endif + +#define LINKVAR(x) { \ + static const int cv_ ## x = SQLITE_ ## x; \ + Tcl_LinkVar(interp, "SQLITE_" #x, (char *)&(cv_ ## x), \ + TCL_LINK_INT | TCL_LINK_READ_ONLY); } + + LINKVAR( MAX_LENGTH ); + LINKVAR( MAX_COLUMN ); + LINKVAR( MAX_SQL_LENGTH ); + LINKVAR( MAX_EXPR_DEPTH ); + LINKVAR( MAX_COMPOUND_SELECT ); + LINKVAR( MAX_VDBE_OP ); + LINKVAR( MAX_FUNCTION_ARG ); + LINKVAR( MAX_VARIABLE_NUMBER ); + LINKVAR( MAX_PAGE_SIZE ); + LINKVAR( MAX_PAGE_COUNT ); + LINKVAR( MAX_LIKE_PATTERN_LENGTH ); + LINKVAR( DEFAULT_TEMP_CACHE_SIZE ); + LINKVAR( DEFAULT_CACHE_SIZE ); + LINKVAR( DEFAULT_PAGE_SIZE ); + LINKVAR( DEFAULT_FILE_FORMAT ); + LINKVAR( MAX_ATTACHED ); + { - static int sqlite_max_length = SQLITE_MAX_LENGTH; - Tcl_LinkVar(interp, "SQLITE_MAX_LENGTH", - (char*)&sqlite_max_length, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_column = SQLITE_MAX_COLUMN; - Tcl_LinkVar(interp, "SQLITE_MAX_COLUMN", - (char*)&sqlite_max_column, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_sql_length = SQLITE_MAX_SQL_LENGTH; - Tcl_LinkVar(interp, "SQLITE_MAX_SQL_LENGTH", - (char*)&sqlite_max_sql_length, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_expr_depth = SQLITE_MAX_EXPR_DEPTH; - Tcl_LinkVar(interp, "SQLITE_MAX_EXPR_DEPTH", - (char*)&sqlite_max_expr_depth, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_compound_select = SQLITE_MAX_COMPOUND_SELECT; - Tcl_LinkVar(interp, "SQLITE_MAX_COMPOUND_SELECT", - (char*)&sqlite_max_compound_select, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_vdbe_op = SQLITE_MAX_VDBE_OP; - Tcl_LinkVar(interp, "SQLITE_MAX_VDBE_OP", - (char*)&sqlite_max_vdbe_op, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_function_arg = SQLITE_MAX_FUNCTION_ARG; - Tcl_LinkVar(interp, "SQLITE_MAX_FUNCTION_ARG", - (char*)&sqlite_max_function_arg, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_default_temp_cache_size = SQLITE_DEFAULT_TEMP_CACHE_SIZE; - Tcl_LinkVar(interp, "SQLITE_DEFAULT_TEMP_CACHE_SIZE", - (char*)&sqlite_default_temp_cache_size, - TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_default_cache_size = SQLITE_DEFAULT_CACHE_SIZE; - Tcl_LinkVar(interp, "SQLITE_DEFAULT_CACHE_SIZE", - (char*)&sqlite_default_cache_size, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_variable_number = SQLITE_MAX_VARIABLE_NUMBER; - Tcl_LinkVar(interp, "SQLITE_MAX_VARIABLE_NUMBER", - (char*)&sqlite_max_variable_number, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_default_page_size = SQLITE_DEFAULT_PAGE_SIZE; - Tcl_LinkVar(interp, "SQLITE_DEFAULT_PAGE_SIZE", - (char*)&sqlite_default_page_size, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_page_size = SQLITE_MAX_PAGE_SIZE; - Tcl_LinkVar(interp, "SQLITE_MAX_PAGE_SIZE", - (char*)&sqlite_max_page_size, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_page_count = SQLITE_MAX_PAGE_COUNT; - Tcl_LinkVar(interp, "SQLITE_MAX_PAGE_COUNT", - (char*)&sqlite_max_page_count, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int temp_store = TEMP_STORE; - Tcl_LinkVar(interp, "TEMP_STORE", - (char*)&temp_store, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_default_file_format = SQLITE_DEFAULT_FILE_FORMAT; - Tcl_LinkVar(interp, "SQLITE_DEFAULT_FILE_FORMAT", - (char*)&sqlite_default_file_format, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_like_pattern = SQLITE_MAX_LIKE_PATTERN_LENGTH; - Tcl_LinkVar(interp, "SQLITE_MAX_LIKE_PATTERN_LENGTH", - (char*)&sqlite_max_like_pattern, TCL_LINK_INT|TCL_LINK_READ_ONLY); - } - { - static int sqlite_max_attached = SQLITE_MAX_ATTACHED; - Tcl_LinkVar(interp, "SQLITE_MAX_ATTACHED", - (char*)&sqlite_max_attached, TCL_LINK_INT|TCL_LINK_READ_ONLY); + static const int cv_TEMP_STORE = SQLITE_TEMP_STORE; + Tcl_LinkVar(interp, "TEMP_STORE", (char *)&(cv_TEMP_STORE), + TCL_LINK_INT | TCL_LINK_READ_ONLY); } } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_devsym.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_devsym.c --- sqlite3-3.4.2/src/test_devsym.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_devsym.c 2009-06-12 03:37:49.000000000 +0100 @@ -0,0 +1,357 @@ +/* +** 2008 Jan 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that modified the OS layer in order to simulate +** different device types (by overriding the return values of the +** xDeviceCharacteristics() and xSectorSize() methods). +** +** $Id: test_devsym.c,v 1.9 2008/12/09 01:32:03 drh Exp $ +*/ +#if SQLITE_TEST /* This file is used for testing only */ + +#include "sqlite3.h" +#include "sqliteInt.h" + +/* +** Maximum pathname length supported by the devsym backend. +*/ +#define DEVSYM_MAX_PATHNAME 512 + +/* +** Name used to identify this VFS. +*/ +#define DEVSYM_VFS_NAME "devsym" + +typedef struct devsym_file devsym_file; +struct devsym_file { + sqlite3_file base; + sqlite3_file *pReal; +}; + +/* +** Method declarations for devsym_file. +*/ +static int devsymClose(sqlite3_file*); +static int devsymRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int devsymWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int devsymTruncate(sqlite3_file*, sqlite3_int64 size); +static int devsymSync(sqlite3_file*, int flags); +static int devsymFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int devsymLock(sqlite3_file*, int); +static int devsymUnlock(sqlite3_file*, int); +static int devsymCheckReservedLock(sqlite3_file*, int *); +static int devsymFileControl(sqlite3_file*, int op, void *pArg); +static int devsymSectorSize(sqlite3_file*); +static int devsymDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for devsym_vfs. +*/ +static int devsymOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int devsymDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int devsymAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int devsymFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +static void *devsymDlOpen(sqlite3_vfs*, const char *zFilename); +static void devsymDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*devsymDlSym(sqlite3_vfs*,void*, const char *zSymbol))(void); +static void devsymDlClose(sqlite3_vfs*, void*); +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +static int devsymRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int devsymSleep(sqlite3_vfs*, int microseconds); +static int devsymCurrentTime(sqlite3_vfs*, double*); + +static sqlite3_vfs devsym_vfs = { + 1, /* iVersion */ + sizeof(devsym_file), /* szOsFile */ + DEVSYM_MAX_PATHNAME, /* mxPathname */ + 0, /* pNext */ + DEVSYM_VFS_NAME, /* zName */ + 0, /* pAppData */ + devsymOpen, /* xOpen */ + devsymDelete, /* xDelete */ + devsymAccess, /* xAccess */ + devsymFullPathname, /* xFullPathname */ +#ifndef SQLITE_OMIT_LOAD_EXTENSION + devsymDlOpen, /* xDlOpen */ + devsymDlError, /* xDlError */ + devsymDlSym, /* xDlSym */ + devsymDlClose, /* xDlClose */ +#else + 0, /* xDlOpen */ + 0, /* xDlError */ + 0, /* xDlSym */ + 0, /* xDlClose */ +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + devsymRandomness, /* xRandomness */ + devsymSleep, /* xSleep */ + devsymCurrentTime /* xCurrentTime */ +}; + +static sqlite3_io_methods devsym_io_methods = { + 1, /* iVersion */ + devsymClose, /* xClose */ + devsymRead, /* xRead */ + devsymWrite, /* xWrite */ + devsymTruncate, /* xTruncate */ + devsymSync, /* xSync */ + devsymFileSize, /* xFileSize */ + devsymLock, /* xLock */ + devsymUnlock, /* xUnlock */ + devsymCheckReservedLock, /* xCheckReservedLock */ + devsymFileControl, /* xFileControl */ + devsymSectorSize, /* xSectorSize */ + devsymDeviceCharacteristics /* xDeviceCharacteristics */ +}; + +struct DevsymGlobal { + sqlite3_vfs *pVfs; + int iDeviceChar; + int iSectorSize; +}; +struct DevsymGlobal g = {0, 0, 512}; + +/* +** Close an devsym-file. +*/ +static int devsymClose(sqlite3_file *pFile){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsClose(p->pReal); +} + +/* +** Read data from an devsym-file. +*/ +static int devsymRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); +} + +/* +** Write data to an devsym-file. +*/ +static int devsymWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); +} + +/* +** Truncate an devsym-file. +*/ +static int devsymTruncate(sqlite3_file *pFile, sqlite_int64 size){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsTruncate(p->pReal, size); +} + +/* +** Sync an devsym-file. +*/ +static int devsymSync(sqlite3_file *pFile, int flags){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsSync(p->pReal, flags); +} + +/* +** Return the current file-size of an devsym-file. +*/ +static int devsymFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsFileSize(p->pReal, pSize); +} + +/* +** Lock an devsym-file. +*/ +static int devsymLock(sqlite3_file *pFile, int eLock){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsLock(p->pReal, eLock); +} + +/* +** Unlock an devsym-file. +*/ +static int devsymUnlock(sqlite3_file *pFile, int eLock){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsUnlock(p->pReal, eLock); +} + +/* +** Check if another file-handle holds a RESERVED lock on an devsym-file. +*/ +static int devsymCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsCheckReservedLock(p->pReal, pResOut); +} + +/* +** File control method. For custom operations on an devsym-file. +*/ +static int devsymFileControl(sqlite3_file *pFile, int op, void *pArg){ + devsym_file *p = (devsym_file *)pFile; + return sqlite3OsFileControl(p->pReal, op, pArg); +} + +/* +** Return the sector-size in bytes for an devsym-file. +*/ +static int devsymSectorSize(sqlite3_file *pFile){ + return g.iSectorSize; +} + +/* +** Return the device characteristic flags supported by an devsym-file. +*/ +static int devsymDeviceCharacteristics(sqlite3_file *pFile){ + return g.iDeviceChar; +} + +/* +** Open an devsym file handle. +*/ +static int devsymOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + int rc; + devsym_file *p = (devsym_file *)pFile; + p->pReal = (sqlite3_file *)&p[1]; + rc = sqlite3OsOpen(g.pVfs, zName, p->pReal, flags, pOutFlags); + if( p->pReal->pMethods ){ + pFile->pMethods = &devsym_io_methods; + } + return rc; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int devsymDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + return sqlite3OsDelete(g.pVfs, zPath, dirSync); +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int devsymAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + return sqlite3OsAccess(g.pVfs, zPath, flags, pResOut); +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (DEVSYM_MAX_PATHNAME+1) bytes. +*/ +static int devsymFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + return sqlite3OsFullPathname(g.pVfs, zPath, nOut, zOut); +} + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *devsymDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return sqlite3OsDlOpen(g.pVfs, zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void devsymDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + sqlite3OsDlError(g.pVfs, nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*devsymDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ + return sqlite3OsDlSym(g.pVfs, p, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void devsymDlClose(sqlite3_vfs *pVfs, void *pHandle){ + sqlite3OsDlClose(g.pVfs, pHandle); +} +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int devsymRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return sqlite3OsRandomness(g.pVfs, nByte, zBufOut); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int devsymSleep(sqlite3_vfs *pVfs, int nMicro){ + return sqlite3OsSleep(g.pVfs, nMicro); +} + +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int devsymCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return sqlite3OsCurrentTime(g.pVfs, pTimeOut); +} + +/* +** This procedure registers the devsym vfs with SQLite. If the argument is +** true, the devsym vfs becomes the new default vfs. It is the only publicly +** available function in this file. +*/ +void devsym_register(int iDeviceChar, int iSectorSize){ + if( g.pVfs==0 ){ + g.pVfs = sqlite3_vfs_find(0); + devsym_vfs.szOsFile += g.pVfs->szOsFile; + sqlite3_vfs_register(&devsym_vfs, 0); + } + if( iDeviceChar>=0 ){ + g.iDeviceChar = iDeviceChar; + } + if( iSectorSize>=0 ){ + g.iSectorSize = iSectorSize; + } +} + +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_func.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_func.c --- sqlite3-3.4.2/src/test_func.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_func.c 2009-06-25 12:31:29.000000000 +0100 @@ -0,0 +1,471 @@ +/* +** 2008 March 19 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Code for testing all sorts of SQLite interfaces. This code +** implements new SQL functions used by the test scripts. +** +** $Id: test_func.c,v 1.15 2009/05/07 13:43:49 drh Exp $ +*/ +#include "sqlite3.h" +#include "tcl.h" +#include +#include +#include + + +/* +** Allocate nByte bytes of space using sqlite3_malloc(). If the +** allocation fails, call sqlite3_result_error_nomem() to notify +** the database handle that malloc() has failed. +*/ +static void *testContextMalloc(sqlite3_context *context, int nByte){ + char *z = sqlite3_malloc(nByte); + if( !z && nByte>0 ){ + sqlite3_result_error_nomem(context); + } + return z; +} + +/* +** This function generates a string of random characters. Used for +** generating test data. +*/ +static void randStr(sqlite3_context *context, int argc, sqlite3_value **argv){ + static const unsigned char zSrc[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789" + ".-!,:*^+=_|?/<> "; + int iMin, iMax, n, r, i; + unsigned char zBuf[1000]; + + /* It used to be possible to call randstr() with any number of arguments, + ** but now it is registered with SQLite as requiring exactly 2. + */ + assert(argc==2); + + iMin = sqlite3_value_int(argv[0]); + if( iMin<0 ) iMin = 0; + if( iMin>=sizeof(zBuf) ) iMin = sizeof(zBuf)-1; + iMax = sqlite3_value_int(argv[1]); + if( iMax=sizeof(zBuf) ) iMax = sizeof(zBuf)-1; + n = iMin; + if( iMax>iMin ){ + sqlite3_randomness(sizeof(r), &r); + r &= 0x7fffffff; + n += r%(iMax + 1 - iMin); + } + assert( n=4 ){ memcpy(aNum, aOut, 4); }else{ memset(aNum, 0, sizeof(aNum)); memcpy(&aNum[4-nOut], aOut, nOut); } - free(aOut); + sqlite3_free(aOut); val = (aNum[0]<<24) | (aNum[1]<<16) | (aNum[2]<<8) | aNum[3]; Tcl_SetObjResult(interp, Tcl_NewIntObj(val)); return TCL_OK; @@ -244,7 +250,7 @@ if( Tcl_GetIntFromObj(interp, objv[1], &val) ) return TCL_ERROR; aNum[0] = val>>8; aNum[1] = val; - binToHex(aNum, 2); + sqlite3TestBinToHex(aNum, 2); Tcl_SetObjResult(interp, Tcl_NewStringObj((char*)aNum, 4)); return TCL_OK; } @@ -273,7 +279,7 @@ aNum[1] = val>>16; aNum[2] = val>>8; aNum[3] = val; - binToHex(aNum, 4); + sqlite3TestBinToHex(aNum, 4); Tcl_SetObjResult(interp, Tcl_NewStringObj((char*)aNum, 8)); return TCL_OK; } @@ -291,6 +297,7 @@ int objc, Tcl_Obj *CONST objv[] ){ +#ifdef SQLITE_DEBUG int n; int nOut; const unsigned char *zOrig; @@ -301,12 +308,13 @@ } zOrig = (unsigned char *)Tcl_GetStringFromObj(objv[1], &n); z = sqlite3_malloc( n+3 ); - n = hexToBin(zOrig, n, z); + n = sqlite3TestHexToBin(zOrig, n, z); z[n] = 0; nOut = sqlite3Utf8To8(z); - binToHex(z,nOut); + sqlite3TestBinToHex(z,nOut); Tcl_AppendResult(interp, (char*)z, 0); sqlite3_free(z); +#endif return TCL_OK; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_journal.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_journal.c --- sqlite3-3.4.2/src/test_journal.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_journal.c 2009-06-26 15:19:54.000000000 +0100 @@ -0,0 +1,837 @@ +/* +** 2008 Jan 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code for a VFS layer that acts as a wrapper around +** an existing VFS. The code in this file attempts to verify that SQLite +** correctly populates and syncs a journal file before writing to a +** corresponding database file. +** +** $Id: test_journal.c,v 1.17 2009/06/26 10:39:36 danielk1977 Exp $ +*/ +#if SQLITE_TEST /* This file is used for testing only */ + +#include "sqlite3.h" +#include "sqliteInt.h" + +/* +** INTERFACE +** +** The public interface to this wrapper VFS is two functions: +** +** jt_register() +** jt_unregister() +** +** See header comments associated with those two functions below for +** details. +** +** LIMITATIONS +** +** This wrapper will not work if "PRAGMA synchronous = off" is used. +** +** OPERATION +** +** Starting a Transaction: +** +** When a write-transaction is started, the contents of the database is +** inspected and the following data stored as part of the database file +** handle (type struct jt_file): +** +** a) The page-size of the database file. +** b) The number of pages that are in the database file. +** c) The set of page numbers corresponding to free-list leaf pages. +** d) A check-sum for every page in the database file. +** +** The start of a write-transaction is deemed to have occurred when a +** 28-byte journal header is written to byte offset 0 of the journal +** file. +** +** Syncing the Journal File: +** +** Whenever the xSync method is invoked to sync a journal-file, the +** contents of the journal file are read. For each page written to +** the journal file, a check-sum is calculated and compared to the +** check-sum calculated for the corresponding database page when the +** write-transaction was initialized. The success of the comparison +** is assert()ed. So if SQLite has written something other than the +** original content to the database file, an assert() will fail. +** +** Additionally, the set of page numbers for which records exist in +** the journal file is added to (unioned with) the set of page numbers +** corresponding to free-list leaf pages collected when the +** write-transaction was initialized. This set comprises the page-numbers +** corresponding to those pages that SQLite may now safely modify. +** +** Writing to the Database File: +** +** When a block of data is written to a database file, the following +** invariants are asserted: +** +** a) That the block of data is an aligned block of page-size bytes. +** +** b) That if the page being written did not exist when the +** transaction was started (i.e. the database file is growing), then +** the journal-file must have been synced at least once since +** the start of the transaction. +** +** c) That if the page being written did exist when the transaction +** was started, then the page must have either been a free-list +** leaf page at the start of the transaction, or else must have +** been stored in the journal file prior to the most recent sync. +** +** Closing a Transaction: +** +** When a transaction is closed, all data collected at the start of +** the transaction, or following an xSync of a journal-file, is +** discarded. The end of a transaction is recognized when any one +** of the following occur: +** +** a) A block of zeroes (or anything else that is not a valid +** journal-header) is written to the start of the journal file. +** +** b) A journal file is truncated to zero bytes in size using xTruncate. +** +** c) The journal file is deleted using xDelete. +*/ + +/* +** Maximum pathname length supported by the jt backend. +*/ +#define JT_MAX_PATHNAME 512 + +/* +** Name used to identify this VFS. +*/ +#define JT_VFS_NAME "jt" + +typedef struct jt_file jt_file; +struct jt_file { + sqlite3_file base; + const char *zName; /* Name of open file */ + int flags; /* Flags the file was opened with */ + + /* The following are only used by database file file handles */ + int eLock; /* Current lock held on the file */ + u32 nPage; /* Size of file in pages when transaction started */ + u32 nPagesize; /* Page size when transaction started */ + Bitvec *pWritable; /* Bitvec of pages that may be written to the file */ + u32 *aCksum; /* Checksum for first nPage pages */ + int nSync; /* Number of times journal file has been synced */ + + /* Only used by journal file-handles */ + sqlite3_int64 iMaxOff; /* Maximum offset written to this transaction */ + + jt_file *pNext; /* All files are stored in a linked list */ + sqlite3_file *pReal; /* The file handle for the underlying vfs */ +}; + +/* +** Method declarations for jt_file. +*/ +static int jtClose(sqlite3_file*); +static int jtRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int jtWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int jtTruncate(sqlite3_file*, sqlite3_int64 size); +static int jtSync(sqlite3_file*, int flags); +static int jtFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int jtLock(sqlite3_file*, int); +static int jtUnlock(sqlite3_file*, int); +static int jtCheckReservedLock(sqlite3_file*, int *); +static int jtFileControl(sqlite3_file*, int op, void *pArg); +static int jtSectorSize(sqlite3_file*); +static int jtDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for jt_vfs. +*/ +static int jtOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int jtDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int jtAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int jtFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *jtDlOpen(sqlite3_vfs*, const char *zFilename); +static void jtDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*jtDlSym(sqlite3_vfs*,void*, const char *zSymbol))(void); +static void jtDlClose(sqlite3_vfs*, void*); +static int jtRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int jtSleep(sqlite3_vfs*, int microseconds); +static int jtCurrentTime(sqlite3_vfs*, double*); + +static sqlite3_vfs jt_vfs = { + 1, /* iVersion */ + sizeof(jt_file), /* szOsFile */ + JT_MAX_PATHNAME, /* mxPathname */ + 0, /* pNext */ + JT_VFS_NAME, /* zName */ + 0, /* pAppData */ + jtOpen, /* xOpen */ + jtDelete, /* xDelete */ + jtAccess, /* xAccess */ + jtFullPathname, /* xFullPathname */ + jtDlOpen, /* xDlOpen */ + jtDlError, /* xDlError */ + jtDlSym, /* xDlSym */ + jtDlClose, /* xDlClose */ + jtRandomness, /* xRandomness */ + jtSleep, /* xSleep */ + jtCurrentTime /* xCurrentTime */ +}; + +static sqlite3_io_methods jt_io_methods = { + 1, /* iVersion */ + jtClose, /* xClose */ + jtRead, /* xRead */ + jtWrite, /* xWrite */ + jtTruncate, /* xTruncate */ + jtSync, /* xSync */ + jtFileSize, /* xFileSize */ + jtLock, /* xLock */ + jtUnlock, /* xUnlock */ + jtCheckReservedLock, /* xCheckReservedLock */ + jtFileControl, /* xFileControl */ + jtSectorSize, /* xSectorSize */ + jtDeviceCharacteristics /* xDeviceCharacteristics */ +}; + +struct JtGlobal { + sqlite3_vfs *pVfs; /* Parent VFS */ + jt_file *pList; /* List of all open files */ +}; +static struct JtGlobal g = {0, 0}; + +/* +** Functions to obtain and relinquish a mutex to protect g.pList. The +** STATIC_PRNG mutex is reused, purely for the sake of convenience. +*/ +static void enterJtMutex(void){ + sqlite3_mutex_enter(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PRNG)); +} +static void leaveJtMutex(void){ + sqlite3_mutex_leave(sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PRNG)); +} + +extern int sqlite3_io_error_pending; +extern int sqlite3_io_error_hit; +static void stop_ioerr_simulation(int *piSave, int *piSave2){ + *piSave = sqlite3_io_error_pending; + *piSave2 = sqlite3_io_error_hit; + sqlite3_io_error_pending = -1; + sqlite3_io_error_hit = 0; +} +static void start_ioerr_simulation(int iSave, int iSave2){ + sqlite3_io_error_pending = iSave; + sqlite3_io_error_hit = iSave2; +} + +/* +** The jt_file pointed to by the argument may or may not be a file-handle +** open on a main database file. If it is, and a transaction is currently +** opened on the file, then discard all transaction related data. +*/ +static void closeTransaction(jt_file *p){ + sqlite3BitvecDestroy(p->pWritable); + sqlite3_free(p->aCksum); + p->pWritable = 0; + p->aCksum = 0; + p->nSync = 0; +} + +/* +** Close an jt-file. +*/ +static int jtClose(sqlite3_file *pFile){ + jt_file **pp; + jt_file *p = (jt_file *)pFile; + + closeTransaction(p); + enterJtMutex(); + if( p->zName ){ + for(pp=&g.pList; *pp!=p; pp=&(*pp)->pNext); + *pp = p->pNext; + } + leaveJtMutex(); + return sqlite3OsClose(p->pReal); +} + +/* +** Read data from an jt-file. +*/ +static int jtRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); +} + +/* +** Parameter zJournal is the name of a journal file that is currently +** open. This function locates and returns the handle opened on the +** corresponding database file by the pager that currently has the +** journal file opened. This file-handle is identified by the +** following properties: +** +** a) SQLITE_OPEN_MAIN_DB was specified when the file was opened. +** +** b) The file-name specified when the file was opened matches +** all but the final 8 characters of the journal file name. +** +** c) There is currently a reserved lock on the file. +**/ +static jt_file *locateDatabaseHandle(const char *zJournal){ + jt_file *pMain = 0; + enterJtMutex(); + for(pMain=g.pList; pMain; pMain=pMain->pNext){ + int nName = strlen(zJournal) - strlen("-journal"); + if( (pMain->flags&SQLITE_OPEN_MAIN_DB) + && (strlen(pMain->zName)==nName) + && 0==memcmp(pMain->zName, zJournal, nName) + && (pMain->eLock>=SQLITE_LOCK_RESERVED) + ){ + break; + } + } + leaveJtMutex(); + return pMain; +} + +/* +** Parameter z points to a buffer of 4 bytes in size containing a +** unsigned 32-bit integer stored in big-endian format. Decode the +** integer and return its value. +*/ +static u32 decodeUint32(const unsigned char *z){ + return (z[0]<<24) + (z[1]<<16) + (z[2]<<8) + z[3]; +} + +/* +** Calculate a checksum from the buffer of length n bytes pointed to +** by parameter z. +*/ +static u32 genCksum(const unsigned char *z, int n){ + int i; + u32 cksum = 0; + for(i=0; ipReal; + int rc = SQLITE_OK; + + aData = sqlite3_malloc(pMain->nPagesize); + pMain->pWritable = sqlite3BitvecCreate(pMain->nPage); + pMain->aCksum = sqlite3_malloc(sizeof(u32) * (pMain->nPage + 1)); + pJournal->iMaxOff = 0; + + if( !pMain->pWritable || !pMain->aCksum || !aData ){ + rc = SQLITE_IOERR_NOMEM; + }else if( pMain->nPage>0 ){ + u32 iTrunk; + int iSave; + int iSave2; + + stop_ioerr_simulation(&iSave, &iSave2); + + /* Read the database free-list. Add the page-number for each free-list + ** leaf to the jt_file.pWritable bitvec. + */ + rc = sqlite3OsRead(p, aData, pMain->nPagesize, 0); + iTrunk = decodeUint32(&aData[32]); + while( rc==SQLITE_OK && iTrunk>0 ){ + u32 nLeaf; + u32 iLeaf; + sqlite3_int64 iOff = (iTrunk-1)*pMain->nPagesize; + rc = sqlite3OsRead(p, aData, pMain->nPagesize, iOff); + nLeaf = decodeUint32(&aData[4]); + for(iLeaf=0; rc==SQLITE_OK && iLeafpWritable, pgno); + } + iTrunk = decodeUint32(aData); + } + + /* Calculate and store a checksum for each page in the database file. */ + if( rc==SQLITE_OK ){ + int ii; + for(ii=0; rc==SQLITE_OK && iinPage; ii++){ + i64 iOff = (i64)(pMain->nPagesize) * (i64)ii; + if( iOff==PENDING_BYTE ) continue; + rc = sqlite3OsRead(pMain->pReal, aData, pMain->nPagesize, iOff); + pMain->aCksum[ii] = genCksum(aData, pMain->nPagesize); + } + } + + start_ioerr_simulation(iSave, iSave2); + } + + sqlite3_free(aData); + return rc; +} + +/* +** The first argument to this function is a handle open on a journal file. +** This function reads the journal file and adds the page number for each +** page in the journal to the Bitvec object passed as the second argument. +*/ +static int readJournalFile(jt_file *p, jt_file *pMain){ + int rc = SQLITE_OK; + unsigned char zBuf[28]; + sqlite3_file *pReal = p->pReal; + sqlite3_int64 iOff = 0; + sqlite3_int64 iSize = p->iMaxOff; + unsigned char *aPage; + int iSave; + int iSave2; + + aPage = sqlite3_malloc(pMain->nPagesize); + if( !aPage ){ + return SQLITE_IOERR_NOMEM; + } + + stop_ioerr_simulation(&iSave, &iSave2); + + while( rc==SQLITE_OK && iOff=(iOff+nSector) ){ + rc = sqlite3OsRead(pReal, zBuf, 28, iOff); + if( rc!=SQLITE_OK || 0==decodeJournalHdr(zBuf, 0, 0, 0, 0) ){ + continue; + } + } + nRec = (iSize-iOff) / (pMain->nPagesize+8); + } + + /* Read all the records that follow the journal-header just read. */ + for(ii=0; rc==SQLITE_OK && ii0 && pgno<=pMain->nPage ){ + if( 0==sqlite3BitvecTest(pMain->pWritable, pgno) ){ + rc = sqlite3OsRead(pReal, aPage, pMain->nPagesize, iOff+4); + if( rc==SQLITE_OK ){ + u32 cksum = genCksum(aPage, pMain->nPagesize); + assert( cksum==pMain->aCksum[pgno-1] ); + } + } + sqlite3BitvecSet(pMain->pWritable, pgno); + } + iOff += (8 + pMain->nPagesize); + } + } + + iOff = ((iOff + (nSector-1)) / nSector) * nSector; + } + +finish_rjf: + start_ioerr_simulation(iSave, iSave2); + sqlite3_free(aPage); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + return rc; +} + + +/* +** Write data to an jt-file. +*/ +static int jtWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + int rc; + jt_file *p = (jt_file *)pFile; + if( p->flags&SQLITE_OPEN_MAIN_JOURNAL ){ + if( iOfst==0 ){ + jt_file *pMain = locateDatabaseHandle(p->zName); + assert( pMain ); + + if( iAmt==28 ){ + /* Zeroing the first journal-file header. This is the end of a + ** transaction. */ + closeTransaction(pMain); + }else if( iAmt!=12 ){ + /* Writing the first journal header to a journal file. This happens + ** when a transaction is first started. */ + u8 *z = (u8 *)zBuf; + pMain->nPage = decodeUint32(&z[16]); + pMain->nPagesize = decodeUint32(&z[24]); + if( SQLITE_OK!=(rc=openTransaction(pMain, p)) ){ + return rc; + } + } + } + if( p->iMaxOff<(iOfst + iAmt) ){ + p->iMaxOff = iOfst + iAmt; + } + } + + if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ + if( iAmtnPagesize + && p->nPagesize%iAmt==0 + && iOfst>=(PENDING_BYTE+512) + && iOfst+iAmt<=PENDING_BYTE+p->nPagesize + ){ + /* No-op. This special case is hit when the backup code is copying a + ** to a database with a larger page-size than the source database and + ** it needs to fill in the non-locking-region part of the original + ** pending-byte page. + */ + }else{ + u32 pgno = iOfst/p->nPagesize + 1; + assert( (iAmt==1||iAmt==p->nPagesize) && ((iOfst+iAmt)%p->nPagesize)==0 ); + assert( pgno<=p->nPage || p->nSync>0 ); + assert( pgno>p->nPage || sqlite3BitvecTest(p->pWritable, pgno) ); + } + } + + rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); + if( (p->flags&SQLITE_OPEN_MAIN_JOURNAL) && iAmt==12 ){ + jt_file *pMain = locateDatabaseHandle(p->zName); + int rc2 = readJournalFile(p, pMain); + if( rc==SQLITE_OK ) rc = rc2; + } + return rc; +} + +/* +** Truncate an jt-file. +*/ +static int jtTruncate(sqlite3_file *pFile, sqlite_int64 size){ + jt_file *p = (jt_file *)pFile; + if( p->flags&SQLITE_OPEN_MAIN_JOURNAL && size==0 ){ + /* Truncating a journal file. This is the end of a transaction. */ + jt_file *pMain = locateDatabaseHandle(p->zName); + closeTransaction(pMain); + } + if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ + u32 pgno; + u32 locking_page = (u32)(PENDING_BYTE/p->nPagesize+1); + for(pgno=size/p->nPagesize+1; pgno<=p->nPage; pgno++){ + assert( pgno==locking_page || sqlite3BitvecTest(p->pWritable, pgno) ); + } + } + return sqlite3OsTruncate(p->pReal, size); +} + +/* +** Sync an jt-file. +*/ +static int jtSync(sqlite3_file *pFile, int flags){ + jt_file *p = (jt_file *)pFile; + + if( p->flags&SQLITE_OPEN_MAIN_JOURNAL ){ + int rc; + jt_file *pMain; /* The associated database file */ + + /* The journal file is being synced. At this point, we inspect the + ** contents of the file up to this point and set each bit in the + ** jt_file.pWritable bitvec of the main database file associated with + ** this journal file. + */ + pMain = locateDatabaseHandle(p->zName); + assert(pMain); + + /* Set the bitvec values */ + if( pMain->pWritable ){ + pMain->nSync++; + rc = readJournalFile(p, pMain); + if( rc!=SQLITE_OK ){ + return rc; + } + } + } + + return sqlite3OsSync(p->pReal, flags); +} + +/* +** Return the current file-size of an jt-file. +*/ +static int jtFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsFileSize(p->pReal, pSize); +} + +/* +** Lock an jt-file. +*/ +static int jtLock(sqlite3_file *pFile, int eLock){ + int rc; + jt_file *p = (jt_file *)pFile; + rc = sqlite3OsLock(p->pReal, eLock); + if( rc==SQLITE_OK && eLock>p->eLock ){ + p->eLock = eLock; + } + return rc; +} + +/* +** Unlock an jt-file. +*/ +static int jtUnlock(sqlite3_file *pFile, int eLock){ + int rc; + jt_file *p = (jt_file *)pFile; + rc = sqlite3OsUnlock(p->pReal, eLock); + if( rc==SQLITE_OK && eLockeLock ){ + p->eLock = eLock; + } + return rc; +} + +/* +** Check if another file-handle holds a RESERVED lock on an jt-file. +*/ +static int jtCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsCheckReservedLock(p->pReal, pResOut); +} + +/* +** File control method. For custom operations on an jt-file. +*/ +static int jtFileControl(sqlite3_file *pFile, int op, void *pArg){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsFileControl(p->pReal, op, pArg); +} + +/* +** Return the sector-size in bytes for an jt-file. +*/ +static int jtSectorSize(sqlite3_file *pFile){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsSectorSize(p->pReal); +} + +/* +** Return the device characteristic flags supported by an jt-file. +*/ +static int jtDeviceCharacteristics(sqlite3_file *pFile){ + jt_file *p = (jt_file *)pFile; + return sqlite3OsDeviceCharacteristics(p->pReal); +} + +/* +** Open an jt file handle. +*/ +static int jtOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + int rc; + jt_file *p = (jt_file *)pFile; + pFile->pMethods = 0; + p->pReal = (sqlite3_file *)&p[1]; + p->pReal->pMethods = 0; + rc = sqlite3OsOpen(g.pVfs, zName, p->pReal, flags, pOutFlags); + assert( rc==SQLITE_OK || p->pReal->pMethods==0 ); + if( rc==SQLITE_OK ){ + pFile->pMethods = &jt_io_methods; + p->eLock = 0; + p->zName = zName; + p->flags = flags; + p->pNext = 0; + p->pWritable = 0; + p->aCksum = 0; + enterJtMutex(); + if( zName ){ + p->pNext = g.pList; + g.pList = p; + } + leaveJtMutex(); + } + return rc; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int jtDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int nPath = strlen(zPath); + if( nPath>8 && 0==strcmp("-journal", &zPath[nPath-8]) ){ + /* Deleting a journal file. The end of a transaction. */ + jt_file *pMain = locateDatabaseHandle(zPath); + if( pMain ){ + closeTransaction(pMain); + } + } + + return sqlite3OsDelete(g.pVfs, zPath, dirSync); +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int jtAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + return sqlite3OsAccess(g.pVfs, zPath, flags, pResOut); +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (JT_MAX_PATHNAME+1) bytes. +*/ +static int jtFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + return sqlite3OsFullPathname(g.pVfs, zPath, nOut, zOut); +} + +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *jtDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return g.pVfs->xDlOpen(g.pVfs, zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void jtDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + g.pVfs->xDlError(g.pVfs, nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*jtDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ + return g.pVfs->xDlSym(g.pVfs, p, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void jtDlClose(sqlite3_vfs *pVfs, void *pHandle){ + g.pVfs->xDlClose(g.pVfs, pHandle); +} + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int jtRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return sqlite3OsRandomness(g.pVfs, nByte, zBufOut); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int jtSleep(sqlite3_vfs *pVfs, int nMicro){ + return sqlite3OsSleep(g.pVfs, nMicro); +} + +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int jtCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return sqlite3OsCurrentTime(g.pVfs, pTimeOut); +} + +/************************************************************************** +** Start of public API. +*/ + +/* +** Configure the jt VFS as a wrapper around the VFS named by parameter +** zWrap. If the isDefault parameter is true, then the jt VFS is installed +** as the new default VFS for SQLite connections. If isDefault is not +** true, then the jt VFS is installed as non-default. In this case it +** is available via its name, "jt". +*/ +int jt_register(char *zWrap, int isDefault){ + g.pVfs = sqlite3_vfs_find(zWrap); + if( g.pVfs==0 ){ + return SQLITE_ERROR; + } + jt_vfs.szOsFile = sizeof(jt_file) + g.pVfs->szOsFile; + sqlite3_vfs_register(&jt_vfs, isDefault); + return SQLITE_OK; +} + +/* +** Uninstall the jt VFS, if it is installed. +*/ +void jt_unregister(void){ + sqlite3_vfs_unregister(&jt_vfs); +} + +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_loadext.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_loadext.c --- sqlite3-3.4.2/src/test_loadext.c 2006-06-14 11:38:03.000000000 +0100 +++ sqlite3-3.6.16/src/test_loadext.c 2009-05-05 04:39:59.000000000 +0100 @@ -11,9 +11,9 @@ ************************************************************************* ** Test extension for testing the sqlite3_load_extension() function. ** -** $Id: test_loadext.c,v 1.1 2006/06/14 10:38:03 danielk1977 Exp $ +** $Id: test_loadext.c,v 1.3 2008/08/02 03:50:39 drh Exp $ */ - +#include #include "sqlite3ext.h" SQLITE_EXTENSION_INIT1 @@ -29,6 +29,68 @@ } /* +** SQL functions to call the sqlite3_status function and return results. +*/ +static void statusFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int op, mx, cur, resetFlag, rc; + if( sqlite3_value_type(argv[0])==SQLITE_INTEGER ){ + op = sqlite3_value_int(argv[0]); + }else if( sqlite3_value_type(argv[0])==SQLITE_TEXT ){ + int i; + const char *zName; + static const struct { + const char *zName; + int op; + } aOp[] = { + { "MEMORY_USED", SQLITE_STATUS_MEMORY_USED }, + { "PAGECACHE_USED", SQLITE_STATUS_PAGECACHE_USED }, + { "PAGECACHE_OVERFLOW", SQLITE_STATUS_PAGECACHE_OVERFLOW }, + { "SCRATCH_USED", SQLITE_STATUS_SCRATCH_USED }, + { "SCRATCH_OVERFLOW", SQLITE_STATUS_SCRATCH_OVERFLOW }, + { "MALLOC_SIZE", SQLITE_STATUS_MALLOC_SIZE }, + }; + int nOp = sizeof(aOp)/sizeof(aOp[0]); + zName = (const char*)sqlite3_value_text(argv[0]); + for(i=0; i=nOp ){ + char *zMsg = sqlite3_mprintf("unknown status property: %s", zName); + sqlite3_result_error(context, zMsg, -1); + sqlite3_free(zMsg); + return; + } + }else{ + sqlite3_result_error(context, "unknown status type", -1); + return; + } + if( argc==2 ){ + resetFlag = sqlite3_value_int(argv[1]); + }else{ + resetFlag = 0; + } + rc = sqlite3_status(op, &cur, &mx, resetFlag); + if( rc!=SQLITE_OK ){ + char *zMsg = sqlite3_mprintf("sqlite3_status(%d,...) returns %d", op, rc); + sqlite3_result_error(context, zMsg, -1); + sqlite3_free(zMsg); + return; + } + if( argc==2 ){ + sqlite3_result_int(context, mx); + }else{ + sqlite3_result_int(context, cur); + } +} + +/* ** Extension load function. */ int testloadext_init( @@ -36,9 +98,14 @@ char **pzErrMsg, const sqlite3_api_routines *pApi ){ + int nErr = 0; SQLITE_EXTENSION_INIT2(pApi); - sqlite3_create_function(db, "half", 1, SQLITE_ANY, 0, halfFunc, 0, 0); - return 0; + nErr |= sqlite3_create_function(db, "half", 1, SQLITE_ANY, 0, halfFunc, 0, 0); + nErr |= sqlite3_create_function(db, "sqlite3_status", 1, SQLITE_ANY, 0, + statusFunc, 0, 0); + nErr |= sqlite3_create_function(db, "sqlite3_status", 2, SQLITE_ANY, 0, + statusFunc, 0, 0); + return nErr ? SQLITE_ERROR : SQLITE_OK; } /* diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_malloc.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_malloc.c --- sqlite3-3.4.2/src/test_malloc.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_malloc.c 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,1389 @@ +/* +** 2007 August 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code used to implement test interfaces to the +** memory allocation subsystem. +** +** $Id: test_malloc.c,v 1.54 2009/04/07 11:21:29 danielk1977 Exp $ +*/ +#include "sqliteInt.h" +#include "tcl.h" +#include +#include +#include + +/* +** This structure is used to encapsulate the global state variables used +** by malloc() fault simulation. +*/ +static struct MemFault { + int iCountdown; /* Number of pending successes before a failure */ + int nRepeat; /* Number of times to repeat the failure */ + int nBenign; /* Number of benign failures seen since last config */ + int nFail; /* Number of failures seen since last config */ + u8 enable; /* True if enabled */ + int isInstalled; /* True if the fault simulation layer is installed */ + int isBenignMode; /* True if malloc failures are considered benign */ + sqlite3_mem_methods m; /* 'Real' malloc implementation */ +} memfault; + +/* +** This routine exists as a place to set a breakpoint that will +** fire on any simulated malloc() failure. +*/ +static void sqlite3Fault(void){ + static int cnt = 0; + cnt++; +} + +/* +** Check to see if a fault should be simulated. Return true to simulate +** the fault. Return false if the fault should not be simulated. +*/ +static int faultsimStep(void){ + if( likely(!memfault.enable) ){ + return 0; + } + if( memfault.iCountdown>0 ){ + memfault.iCountdown--; + return 0; + } + sqlite3Fault(); + memfault.nFail++; + if( memfault.isBenignMode>0 ){ + memfault.nBenign++; + } + memfault.nRepeat--; + if( memfault.nRepeat<=0 ){ + memfault.enable = 0; + } + return 1; +} + +/* +** A version of sqlite3_mem_methods.xMalloc() that includes fault simulation +** logic. +*/ +static void *faultsimMalloc(int n){ + void *p = 0; + if( !faultsimStep() ){ + p = memfault.m.xMalloc(n); + } + return p; +} + + +/* +** A version of sqlite3_mem_methods.xRealloc() that includes fault simulation +** logic. +*/ +static void *faultsimRealloc(void *pOld, int n){ + void *p = 0; + if( !faultsimStep() ){ + p = memfault.m.xRealloc(pOld, n); + } + return p; +} + +/* +** The following method calls are passed directly through to the underlying +** malloc system: +** +** xFree +** xSize +** xRoundup +** xInit +** xShutdown +*/ +static void faultsimFree(void *p){ + memfault.m.xFree(p); +} +static int faultsimSize(void *p){ + return memfault.m.xSize(p); +} +static int faultsimRoundup(int n){ + return memfault.m.xRoundup(n); +} +static int faultsimInit(void *p){ + return memfault.m.xInit(memfault.m.pAppData); +} +static void faultsimShutdown(void *p){ + memfault.m.xShutdown(memfault.m.pAppData); +} + +/* +** This routine configures the malloc failure simulation. After +** calling this routine, the next nDelay mallocs will succeed, followed +** by a block of nRepeat failures, after which malloc() calls will begin +** to succeed again. +*/ +static void faultsimConfig(int nDelay, int nRepeat){ + memfault.iCountdown = nDelay; + memfault.nRepeat = nRepeat; + memfault.nBenign = 0; + memfault.nFail = 0; + memfault.enable = nDelay>=0; + + /* Sometimes, when running multi-threaded tests, the isBenignMode + ** variable is not properly incremented/decremented so that it is + ** 0 when not inside a benign malloc block. This doesn't affect + ** the multi-threaded tests, as they do not use this system. But + ** it does affect OOM tests run later in the same process. So + ** zero the variable here, just to be sure. + */ + memfault.isBenignMode = 0; +} + +/* +** Return the number of faults (both hard and benign faults) that have +** occurred since the injector was last configured. +*/ +static int faultsimFailures(void){ + return memfault.nFail; +} + +/* +** Return the number of benign faults that have occurred since the +** injector was last configured. +*/ +static int faultsimBenignFailures(void){ + return memfault.nBenign; +} + +/* +** Return the number of successes that will occur before the next failure. +** If no failures are scheduled, return -1. +*/ +static int faultsimPending(void){ + if( memfault.enable ){ + return memfault.iCountdown; + }else{ + return -1; + } +} + + +static void faultsimBeginBenign(void){ + memfault.isBenignMode++; +} +static void faultsimEndBenign(void){ + memfault.isBenignMode--; +} + +/* +** Add or remove the fault-simulation layer using sqlite3_config(). If +** the argument is non-zero, the +*/ +static int faultsimInstall(int install){ + static struct sqlite3_mem_methods m = { + faultsimMalloc, /* xMalloc */ + faultsimFree, /* xFree */ + faultsimRealloc, /* xRealloc */ + faultsimSize, /* xSize */ + faultsimRoundup, /* xRoundup */ + faultsimInit, /* xInit */ + faultsimShutdown, /* xShutdown */ + 0 /* pAppData */ + }; + int rc; + + install = (install ? 1 : 0); + assert(memfault.isInstalled==1 || memfault.isInstalled==0); + + if( install==memfault.isInstalled ){ + return SQLITE_ERROR; + } + + if( install ){ + rc = sqlite3_config(SQLITE_CONFIG_GETMALLOC, &memfault.m); + assert(memfault.m.xMalloc); + if( rc==SQLITE_OK ){ + rc = sqlite3_config(SQLITE_CONFIG_MALLOC, &m); + } + sqlite3_test_control(SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS, + faultsimBeginBenign, faultsimEndBenign + ); + }else{ + sqlite3_mem_methods m; + assert(memfault.m.xMalloc); + + /* One should be able to reset the default memory allocator by storing + ** a zeroed allocator then calling GETMALLOC. */ + memset(&m, 0, sizeof(m)); + sqlite3_config(SQLITE_CONFIG_MALLOC, &m); + sqlite3_config(SQLITE_CONFIG_GETMALLOC, &m); + assert( memcmp(&m, &memfault.m, sizeof(m))==0 ); + + rc = sqlite3_config(SQLITE_CONFIG_MALLOC, &memfault.m); + sqlite3_test_control(SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS, 0, 0); + } + + if( rc==SQLITE_OK ){ + memfault.isInstalled = 1; + } + return rc; +} + +#ifdef SQLITE_TEST + +/* +** This function is implemented in test1.c. Returns a pointer to a static +** buffer containing the symbolic SQLite error code that corresponds to +** the least-significant 8-bits of the integer passed as an argument. +** For example: +** +** sqlite3TestErrorName(1) -> "SQLITE_ERROR" +*/ +const char *sqlite3TestErrorName(int); + +/* +** Transform pointers to text and back again +*/ +static void pointerToText(void *p, char *z){ + static const char zHex[] = "0123456789abcdef"; + int i, k; + unsigned int u; + sqlite3_uint64 n; + if( p==0 ){ + strcpy(z, "0"); + return; + } + if( sizeof(n)==sizeof(p) ){ + memcpy(&n, &p, sizeof(p)); + }else if( sizeof(u)==sizeof(p) ){ + memcpy(&u, &p, sizeof(u)); + n = u; + }else{ + assert( 0 ); + } + for(i=0, k=sizeof(p)*2-1; i>= 4; + } + z[sizeof(p)*2] = 0; +} +static int hexToInt(int h){ + if( h>='0' && h<='9' ){ + return h - '0'; + }else if( h>='a' && h<='f' ){ + return h - 'a' + 10; + }else{ + return -1; + } +} +static int textToPointer(const char *z, void **pp){ + sqlite3_uint64 n = 0; + int i; + unsigned int u; + for(i=0; isizeof(zBin)*2 ) n = sizeof(zBin)*2; + n = sqlite3TestHexToBin(zHex, n, zBin); + if( n==0 ){ + Tcl_AppendResult(interp, "no data", (char*)0); + return TCL_ERROR; + } + zOut = p; + for(i=0; i0 ){ + if( size>(sizeof(zHex)-1)/2 ){ + n = (sizeof(zHex)-1)/2; + }else{ + n = size; + } + memcpy(zHex, zBin, n); + zBin += n; + size -= n; + sqlite3TestBinToHex(zHex, n); + Tcl_AppendResult(interp, zHex, (char*)0); + } + return TCL_OK; +} + +/* +** Usage: sqlite3_memory_used +** +** Raw test interface for sqlite3_memory_used(). +*/ +static int test_memory_used( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_SetObjResult(interp, Tcl_NewWideIntObj(sqlite3_memory_used())); + return TCL_OK; +} + +/* +** Usage: sqlite3_memory_highwater ?RESETFLAG? +** +** Raw test interface for sqlite3_memory_highwater(). +*/ +static int test_memory_highwater( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int resetFlag = 0; + if( objc!=1 && objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?RESET?"); + return TCL_ERROR; + } + if( objc==2 ){ + if( Tcl_GetBooleanFromObj(interp, objv[1], &resetFlag) ) return TCL_ERROR; + } + Tcl_SetObjResult(interp, + Tcl_NewWideIntObj(sqlite3_memory_highwater(resetFlag))); + return TCL_OK; +} + +/* +** Usage: sqlite3_memdebug_backtrace DEPTH +** +** Set the depth of backtracing. If SQLITE_MEMDEBUG is not defined +** then this routine is a no-op. +*/ +static int test_memdebug_backtrace( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int depth; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DEPT"); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, objv[1], &depth) ) return TCL_ERROR; +#ifdef SQLITE_MEMDEBUG + { + extern void sqlite3MemdebugBacktrace(int); + sqlite3MemdebugBacktrace(depth); + } +#endif + return TCL_OK; +} + +/* +** Usage: sqlite3_memdebug_dump FILENAME +** +** Write a summary of unfreed memory to FILENAME. +*/ +static int test_memdebug_dump( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "FILENAME"); + return TCL_ERROR; + } +#if defined(SQLITE_MEMDEBUG) || defined(SQLITE_MEMORY_SIZE) \ + || defined(SQLITE_POW2_MEMORY_SIZE) + { + extern void sqlite3MemdebugDump(const char*); + sqlite3MemdebugDump(Tcl_GetString(objv[1])); + } +#endif + return TCL_OK; +} + +/* +** Usage: sqlite3_memdebug_malloc_count +** +** Return the total number of times malloc() has been called. +*/ +static int test_memdebug_malloc_count( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int nMalloc = -1; + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } +#if defined(SQLITE_MEMDEBUG) + { + extern int sqlite3MemdebugMallocCount(); + nMalloc = sqlite3MemdebugMallocCount(); + } +#endif + Tcl_SetObjResult(interp, Tcl_NewIntObj(nMalloc)); + return TCL_OK; +} + + +/* +** Usage: sqlite3_memdebug_fail COUNTER ?OPTIONS? +** +** where options are: +** +** -repeat +** -benigncnt +** +** Arrange for a simulated malloc() failure after COUNTER successes. +** If a repeat count is specified, the fault is repeated that many +** times. +** +** Each call to this routine overrides the prior counter value. +** This routine returns the number of simulated failures that have +** happened since the previous call to this routine. +** +** To disable simulated failures, use a COUNTER of -1. +*/ +static int test_memdebug_fail( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int ii; + int iFail; + int nRepeat = 1; + Tcl_Obj *pBenignCnt = 0; + int nBenign; + int nFail = 0; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "COUNTER ?OPTIONS?"); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, objv[1], &iFail) ) return TCL_ERROR; + + for(ii=2; ii1 && strncmp(zOption, "-repeat", nOption)==0 ){ + if( ii==(objc-1) ){ + zErr = "option requires an argument: "; + }else{ + if( Tcl_GetIntFromObj(interp, objv[ii+1], &nRepeat) ){ + return TCL_ERROR; + } + } + }else if( nOption>1 && strncmp(zOption, "-benigncnt", nOption)==0 ){ + if( ii==(objc-1) ){ + zErr = "option requires an argument: "; + }else{ + pBenignCnt = objv[ii+1]; + } + }else{ + zErr = "unknown option: "; + } + + if( zErr ){ + Tcl_AppendResult(interp, zErr, zOption, 0); + return TCL_ERROR; + } + } + + nBenign = faultsimBenignFailures(); + nFail = faultsimFailures(); + faultsimConfig(iFail, nRepeat); + + if( pBenignCnt ){ + Tcl_ObjSetVar2(interp, pBenignCnt, 0, Tcl_NewIntObj(nBenign), 0); + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(nFail)); + return TCL_OK; +} + +/* +** Usage: sqlite3_memdebug_pending +** +** Return the number of malloc() calls that will succeed before a +** simulated failure occurs. A negative return value indicates that +** no malloc() failure is scheduled. +*/ +static int test_memdebug_pending( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int nPending; + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + nPending = faultsimPending(); + Tcl_SetObjResult(interp, Tcl_NewIntObj(nPending)); + return TCL_OK; +} + + +/* +** Usage: sqlite3_memdebug_settitle TITLE +** +** Set a title string stored with each allocation. The TITLE is +** typically the name of the test that was running when the +** allocation occurred. The TITLE is stored with the allocation +** and can be used to figure out which tests are leaking memory. +** +** Each title overwrite the previous. +*/ +static int test_memdebug_settitle( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + const char *zTitle; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "TITLE"); + return TCL_ERROR; + } + zTitle = Tcl_GetString(objv[1]); +#ifdef SQLITE_MEMDEBUG + { + extern int sqlite3MemdebugSettitle(const char*); + sqlite3MemdebugSettitle(zTitle); + } +#endif + return TCL_OK; +} + +#define MALLOC_LOG_FRAMES 10 +static Tcl_HashTable aMallocLog; +static int mallocLogEnabled = 0; + +typedef struct MallocLog MallocLog; +struct MallocLog { + int nCall; + int nByte; +}; + +#ifdef SQLITE_MEMDEBUG +static void test_memdebug_callback(int nByte, int nFrame, void **aFrame){ + if( mallocLogEnabled ){ + MallocLog *pLog; + Tcl_HashEntry *pEntry; + int isNew; + + int aKey[MALLOC_LOG_FRAMES]; + int nKey = sizeof(int)*MALLOC_LOG_FRAMES; + + memset(aKey, 0, nKey); + if( (sizeof(void*)*nFrame)nCall++; + pLog->nByte += nByte; + } +} +#endif /* SQLITE_MEMDEBUG */ + +static void test_memdebug_log_clear(void){ + Tcl_HashSearch search; + Tcl_HashEntry *pEntry; + for( + pEntry=Tcl_FirstHashEntry(&aMallocLog, &search); + pEntry; + pEntry=Tcl_NextHashEntry(&search) + ){ + MallocLog *pLog = (MallocLog *)Tcl_GetHashValue(pEntry); + Tcl_Free((char *)pLog); + } + Tcl_DeleteHashTable(&aMallocLog); + Tcl_InitHashTable(&aMallocLog, MALLOC_LOG_FRAMES); +} + +static int test_memdebug_log( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static int isInit = 0; + int iSub; + + static const char *MB_strs[] = { "start", "stop", "dump", "clear", "sync" }; + enum MB_enum { + MB_LOG_START, MB_LOG_STOP, MB_LOG_DUMP, MB_LOG_CLEAR, MB_LOG_SYNC + }; + + if( !isInit ){ +#ifdef SQLITE_MEMDEBUG + extern void sqlite3MemdebugBacktraceCallback( + void (*xBacktrace)(int, int, void **)); + sqlite3MemdebugBacktraceCallback(test_memdebug_callback); +#endif + Tcl_InitHashTable(&aMallocLog, MALLOC_LOG_FRAMES); + isInit = 1; + } + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUB-COMMAND ..."); + } + if( Tcl_GetIndexFromObj(interp, objv[1], MB_strs, "sub-command", 0, &iSub) ){ + return TCL_ERROR; + } + + switch( (enum MB_enum)iSub ){ + case MB_LOG_START: + mallocLogEnabled = 1; + break; + case MB_LOG_STOP: + mallocLogEnabled = 0; + break; + case MB_LOG_DUMP: { + Tcl_HashSearch search; + Tcl_HashEntry *pEntry; + Tcl_Obj *pRet = Tcl_NewObj(); + + assert(sizeof(int)==sizeof(void*)); + + for( + pEntry=Tcl_FirstHashEntry(&aMallocLog, &search); + pEntry; + pEntry=Tcl_NextHashEntry(&search) + ){ + Tcl_Obj *apElem[MALLOC_LOG_FRAMES+2]; + MallocLog *pLog = (MallocLog *)Tcl_GetHashValue(pEntry); + int *aKey = (int *)Tcl_GetHashKey(&aMallocLog, pEntry); + int ii; + + apElem[0] = Tcl_NewIntObj(pLog->nCall); + apElem[1] = Tcl_NewIntObj(pLog->nByte); + for(ii=0; ii5 ){ + Tcl_WrongNumArgs(interp, 1, objv, + "INSTALLFLAG DISCARDCHANCE PRNGSEEED HIGHSTRESS"); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, objv[1], &installFlag) ) return TCL_ERROR; + if( objc>=3 && Tcl_GetIntFromObj(interp, objv[2], &discardChance) ){ + return TCL_ERROR; + } + if( objc>=4 && Tcl_GetIntFromObj(interp, objv[3], &prngSeed) ){ + return TCL_ERROR; + } + if( objc>=5 && Tcl_GetIntFromObj(interp, objv[4], &highStress) ){ + return TCL_ERROR; + } + if( discardChance<0 || discardChance>100 ){ + Tcl_AppendResult(interp, "discard-chance should be between 0 and 100", + (char*)0); + return TCL_ERROR; + } + installTestPCache(installFlag, (unsigned)discardChance, (unsigned)prngSeed, + (unsigned)highStress); + return TCL_OK; +} + +/* +** Usage: sqlite3_config_memstatus BOOLEAN +** +** Enable or disable memory status reporting using SQLITE_CONFIG_MEMSTATUS. +*/ +static int test_config_memstatus( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int enable, rc; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "BOOLEAN"); + return TCL_ERROR; + } + if( Tcl_GetBooleanFromObj(interp, objv[1], &enable) ) return TCL_ERROR; + rc = sqlite3_config(SQLITE_CONFIG_MEMSTATUS, enable); + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); + return TCL_OK; +} + +/* +** Usage: sqlite3_config_lookaside SIZE COUNT +** +*/ +static int test_config_lookaside( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc; + int sz, cnt; + Tcl_Obj *pRet; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SIZE COUNT"); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, objv[1], &sz) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[2], &cnt) ) return TCL_ERROR; + pRet = Tcl_NewObj(); + Tcl_ListObjAppendElement( + interp, pRet, Tcl_NewIntObj(sqlite3GlobalConfig.szLookaside) + ); + Tcl_ListObjAppendElement( + interp, pRet, Tcl_NewIntObj(sqlite3GlobalConfig.nLookaside) + ); + rc = sqlite3_config(SQLITE_CONFIG_LOOKASIDE, sz, cnt); + Tcl_SetObjResult(interp, pRet); + return TCL_OK; +} + + +/* +** Usage: sqlite3_db_config_lookaside CONNECTION BUFID SIZE COUNT +** +** There are two static buffers with BUFID 1 and 2. Each static buffer +** is 10KB in size. A BUFID of 0 indicates that the buffer should be NULL +** which will cause sqlite3_db_config() to allocate space on its own. +*/ +static int test_db_config_lookaside( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc; + int sz, cnt; + sqlite3 *db; + int bufid; + static char azBuf[2][10000]; + int getDbPointer(Tcl_Interp*, const char*, sqlite3**); + if( objc!=5 ){ + Tcl_WrongNumArgs(interp, 1, objv, "BUFID SIZE COUNT"); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[2], &bufid) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[3], &sz) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[4], &cnt) ) return TCL_ERROR; + if( bufid==0 ){ + rc = sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE, 0, sz, cnt); + }else if( bufid>=1 && bufid<=2 && sz*cnt<=sizeof(azBuf[0]) ){ + rc = sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE, azBuf[bufid], sz,cnt); + }else{ + Tcl_AppendResult(interp, "illegal arguments - see documentation", (char*)0); + return TCL_ERROR; + } + Tcl_SetObjResult(interp, Tcl_NewIntObj(rc)); + return TCL_OK; +} + +/* +** Usage: +** +** sqlite3_config_heap NBYTE NMINALLOC +*/ +static int test_config_heap( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static char *zBuf; /* Use this memory */ + static int szBuf; /* Bytes allocated for zBuf */ + int nByte; /* Size of buffer to pass to sqlite3_config() */ + int nMinAlloc; /* Size of minimum allocation */ + int rc; /* Return code of sqlite3_config() */ + + Tcl_Obj * CONST *aArg = &objv[1]; + int nArg = objc-1; + + if( nArg!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "NBYTE NMINALLOC"); + return TCL_ERROR; + } + if( Tcl_GetIntFromObj(interp, aArg[0], &nByte) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, aArg[1], &nMinAlloc) ) return TCL_ERROR; + + if( nByte==0 ){ + free( zBuf ); + zBuf = 0; + szBuf = 0; + rc = sqlite3_config(SQLITE_CONFIG_HEAP, (void*)0, 0, 0); + }else{ + zBuf = realloc(zBuf, nByte); + szBuf = nByte; + rc = sqlite3_config(SQLITE_CONFIG_HEAP, zBuf, nByte, nMinAlloc); + } + + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +/* +** tclcmd: sqlite3_config_error [DB] +** +** Invoke sqlite3_config() or sqlite3_db_config() with invalid +** opcodes and verify that they return errors. +*/ +static int test_config_error( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + int getDbPointer(Tcl_Interp*, const char*, sqlite3**); + + if( objc!=2 && objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, "[DB]"); + return TCL_ERROR; + } + if( objc==2 ){ + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + if( sqlite3_db_config(db, 99999)!=SQLITE_ERROR ){ + Tcl_AppendResult(interp, + "sqlite3_db_config(db, 99999) does not return SQLITE_ERROR", + (char*)0); + return TCL_ERROR; + } + }else{ + if( sqlite3_config(99999)!=SQLITE_ERROR ){ + Tcl_AppendResult(interp, + "sqlite3_config(99999) does not return SQLITE_ERROR", + (char*)0); + return TCL_ERROR; + } + } + return TCL_OK; +} + +/* +** Usage: +** +** sqlite3_dump_memsys3 FILENAME +** sqlite3_dump_memsys5 FILENAME +** +** Write a summary of unfreed memsys3 allocations to FILENAME. +*/ +static int test_dump_memsys3( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "FILENAME"); + return TCL_ERROR; + } + + switch( (int)clientData ){ + case 3: { +#ifdef SQLITE_ENABLE_MEMSYS3 + extern void sqlite3Memsys3Dump(const char*); + sqlite3Memsys3Dump(Tcl_GetString(objv[1])); + break; +#endif + } + case 5: { +#ifdef SQLITE_ENABLE_MEMSYS5 + extern void sqlite3Memsys5Dump(const char*); + sqlite3Memsys5Dump(Tcl_GetString(objv[1])); + break; +#endif + } + } + return TCL_OK; +} + +/* +** Usage: sqlite3_status OPCODE RESETFLAG +** +** Return a list of three elements which are the sqlite3_status() return +** code, the current value, and the high-water mark value. +*/ +static int test_status( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc, iValue, mxValue; + int i, op, resetFlag; + const char *zOpName; + static const struct { + const char *zName; + int op; + } aOp[] = { + { "SQLITE_STATUS_MEMORY_USED", SQLITE_STATUS_MEMORY_USED }, + { "SQLITE_STATUS_MALLOC_SIZE", SQLITE_STATUS_MALLOC_SIZE }, + { "SQLITE_STATUS_PAGECACHE_USED", SQLITE_STATUS_PAGECACHE_USED }, + { "SQLITE_STATUS_PAGECACHE_OVERFLOW", SQLITE_STATUS_PAGECACHE_OVERFLOW }, + { "SQLITE_STATUS_PAGECACHE_SIZE", SQLITE_STATUS_PAGECACHE_SIZE }, + { "SQLITE_STATUS_SCRATCH_USED", SQLITE_STATUS_SCRATCH_USED }, + { "SQLITE_STATUS_SCRATCH_OVERFLOW", SQLITE_STATUS_SCRATCH_OVERFLOW }, + { "SQLITE_STATUS_SCRATCH_SIZE", SQLITE_STATUS_SCRATCH_SIZE }, + { "SQLITE_STATUS_PARSER_STACK", SQLITE_STATUS_PARSER_STACK }, + }; + Tcl_Obj *pResult; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "PARAMETER RESETFLAG"); + return TCL_ERROR; + } + zOpName = Tcl_GetString(objv[1]); + for(i=0; i=ArraySize(aOp) ){ + if( Tcl_GetIntFromObj(interp, objv[1], &op) ) return TCL_ERROR; + } + if( Tcl_GetBooleanFromObj(interp, objv[2], &resetFlag) ) return TCL_ERROR; + iValue = 0; + mxValue = 0; + rc = sqlite3_status(op, &iValue, &mxValue, resetFlag); + pResult = Tcl_NewObj(); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(rc)); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(iValue)); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(mxValue)); + Tcl_SetObjResult(interp, pResult); + return TCL_OK; +} + +/* +** Usage: sqlite3_db_status DATABASE OPCODE RESETFLAG +** +** Return a list of three elements which are the sqlite3_db_status() return +** code, the current value, and the high-water mark value. +*/ +static int test_db_status( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc, iValue, mxValue; + int i, op, resetFlag; + const char *zOpName; + sqlite3 *db; + int getDbPointer(Tcl_Interp*, const char*, sqlite3**); + static const struct { + const char *zName; + int op; + } aOp[] = { + { "SQLITE_DBSTATUS_LOOKASIDE_USED", SQLITE_DBSTATUS_LOOKASIDE_USED }, + }; + Tcl_Obj *pResult; + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 1, objv, "PARAMETER RESETFLAG"); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + zOpName = Tcl_GetString(objv[2]); + for(i=0; i=ArraySize(aOp) ){ + if( Tcl_GetIntFromObj(interp, objv[2], &op) ) return TCL_ERROR; + } + if( Tcl_GetBooleanFromObj(interp, objv[3], &resetFlag) ) return TCL_ERROR; + iValue = 0; + mxValue = 0; + rc = sqlite3_db_status(db, op, &iValue, &mxValue, resetFlag); + pResult = Tcl_NewObj(); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(rc)); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(iValue)); + Tcl_ListObjAppendElement(0, pResult, Tcl_NewIntObj(mxValue)); + Tcl_SetObjResult(interp, pResult); + return TCL_OK; +} + +/* +** install_malloc_faultsim BOOLEAN +*/ +static int test_install_malloc_faultsim( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc; + int isInstall; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "BOOLEAN"); + return TCL_ERROR; + } + if( TCL_OK!=Tcl_GetBooleanFromObj(interp, objv[1], &isInstall) ){ + return TCL_ERROR; + } + rc = faultsimInstall(isInstall); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +/* +** Register commands with the TCL interpreter. +*/ +int Sqlitetest_malloc_Init(Tcl_Interp *interp){ + static struct { + char *zName; + Tcl_ObjCmdProc *xProc; + int clientData; + } aObjCmd[] = { + { "sqlite3_malloc", test_malloc ,0 }, + { "sqlite3_realloc", test_realloc ,0 }, + { "sqlite3_free", test_free ,0 }, + { "memset", test_memset ,0 }, + { "memget", test_memget ,0 }, + { "sqlite3_memory_used", test_memory_used ,0 }, + { "sqlite3_memory_highwater", test_memory_highwater ,0 }, + { "sqlite3_memdebug_backtrace", test_memdebug_backtrace ,0 }, + { "sqlite3_memdebug_dump", test_memdebug_dump ,0 }, + { "sqlite3_memdebug_fail", test_memdebug_fail ,0 }, + { "sqlite3_memdebug_pending", test_memdebug_pending ,0 }, + { "sqlite3_memdebug_settitle", test_memdebug_settitle ,0 }, + { "sqlite3_memdebug_malloc_count", test_memdebug_malloc_count ,0 }, + { "sqlite3_memdebug_log", test_memdebug_log ,0 }, + { "sqlite3_config_scratch", test_config_scratch ,0 }, + { "sqlite3_config_pagecache", test_config_pagecache ,0 }, + { "sqlite3_config_alt_pcache", test_alt_pcache ,0 }, + { "sqlite3_status", test_status ,0 }, + { "sqlite3_db_status", test_db_status ,0 }, + { "install_malloc_faultsim", test_install_malloc_faultsim ,0 }, + { "sqlite3_config_heap", test_config_heap ,0 }, + { "sqlite3_config_memstatus", test_config_memstatus ,0 }, + { "sqlite3_config_lookaside", test_config_lookaside ,0 }, + { "sqlite3_config_error", test_config_error ,0 }, + { "sqlite3_db_config_lookaside",test_db_config_lookaside ,0 }, + { "sqlite3_dump_memsys3", test_dump_memsys3 ,3 }, + { "sqlite3_dump_memsys5", test_dump_memsys3 ,5 }, + }; + int i; + for(i=0; ibuf, (uint32 *)ctx->in); byteReverse((unsigned char *)ctx->buf, 4); memcpy(digest, ctx->buf, 16); - memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */ + memset(ctx, 0, sizeof(ctx)); /* In case it is sensitive */ } /* @@ -297,6 +299,7 @@ static int md5_cmd(void*cd, Tcl_Interp *interp, int argc, const char **argv){ MD5Context ctx; unsigned char digest[16]; + char zBuf[33]; if( argc!=2 ){ Tcl_AppendResult(interp,"wrong # args: should be \"", argv[0], @@ -306,7 +309,8 @@ MD5Init(&ctx); MD5Update(&ctx, (unsigned char*)argv[1], (unsigned)strlen(argv[1])); MD5Final(digest, &ctx); - DigestToBase16(digest, interp->result); + DigestToBase16(digest, zBuf); + Tcl_AppendResult(interp, zBuf, (char*)0); return TCL_OK; } @@ -340,7 +344,8 @@ } fclose(in); MD5Final(digest, &ctx); - DigestToBase16(digest, interp->result); + DigestToBase16(digest, zBuf); + Tcl_AppendResult(interp, zBuf, (char*)0); return TCL_OK; } @@ -382,7 +387,9 @@ DigestToBase16(digest, zBuf); sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); } -void Md5_Register(sqlite3 *db){ - sqlite3_create_function(db, "md5sum", -1, SQLITE_UTF8, 0, 0, - md5step, md5finalize); +int Md5_Register(sqlite3 *db){ + int rc = sqlite3_create_function(db, "md5sum", -1, SQLITE_UTF8, 0, 0, + md5step, md5finalize); + sqlite3_overload_function(db, "md5sum", -1); /* To exercise this API */ + return rc; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_mutex.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_mutex.c --- sqlite3-3.4.2/src/test_mutex.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_mutex.c 2009-06-12 03:37:49.000000000 +0100 @@ -0,0 +1,440 @@ +/* +** 2008 June 18 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** $Id: test_mutex.c,v 1.15 2009/03/20 13:15:30 drh Exp $ +*/ + +#include "tcl.h" +#include "sqlite3.h" +#include "sqliteInt.h" +#include +#include +#include + +/* defined in test1.c */ +const char *sqlite3TestErrorName(int); + +/* A countable mutex */ +struct sqlite3_mutex { + sqlite3_mutex *pReal; + int eType; +}; + +/* State variables */ +static struct test_mutex_globals { + int isInstalled; /* True if installed */ + int disableInit; /* True to cause sqlite3_initalize() to fail */ + int disableTry; /* True to force sqlite3_mutex_try() to fail */ + int isInit; /* True if initialized */ + sqlite3_mutex_methods m; /* Interface to "real" mutex system */ + int aCounter[8]; /* Number of grabs of each type of mutex */ + sqlite3_mutex aStatic[6]; /* The six static mutexes */ +} g = {0}; + +/* Return true if the countable mutex is currently held */ +static int counterMutexHeld(sqlite3_mutex *p){ + return g.m.xMutexHeld(p->pReal); +} + +/* Return true if the countable mutex is not currently held */ +static int counterMutexNotheld(sqlite3_mutex *p){ + return g.m.xMutexNotheld(p->pReal); +} + +/* Initialize the countable mutex interface +** Or, if g.disableInit is non-zero, then do not initialize but instead +** return the value of g.disableInit as the result code. This can be used +** to simulate an initialization failure. +*/ +static int counterMutexInit(void){ + int rc; + if( g.disableInit ) return g.disableInit; + rc = g.m.xMutexInit(); + g.isInit = 1; + return rc; +} + +/* +** Uninitialize the mutex subsystem +*/ +static int counterMutexEnd(void){ + g.isInit = 0; + return g.m.xMutexEnd(); +} + +/* +** Allocate a countable mutex +*/ +static sqlite3_mutex *counterMutexAlloc(int eType){ + sqlite3_mutex *pReal; + sqlite3_mutex *pRet = 0; + + assert( g.isInit ); + assert(eType<8 && eType>=0); + + pReal = g.m.xMutexAlloc(eType); + if( !pReal ) return 0; + + if( eType==SQLITE_MUTEX_FAST || eType==SQLITE_MUTEX_RECURSIVE ){ + pRet = (sqlite3_mutex *)malloc(sizeof(sqlite3_mutex)); + }else{ + pRet = &g.aStatic[eType-2]; + } + + pRet->eType = eType; + pRet->pReal = pReal; + return pRet; +} + +/* +** Free a countable mutex +*/ +static void counterMutexFree(sqlite3_mutex *p){ + assert( g.isInit ); + g.m.xMutexFree(p->pReal); + if( p->eType==SQLITE_MUTEX_FAST || p->eType==SQLITE_MUTEX_RECURSIVE ){ + free(p); + } +} + +/* +** Enter a countable mutex. Block until entry is safe. +*/ +static void counterMutexEnter(sqlite3_mutex *p){ + assert( g.isInit ); + g.aCounter[p->eType]++; + g.m.xMutexEnter(p->pReal); +} + +/* +** Try to enter a mutex. Return true on success. +*/ +static int counterMutexTry(sqlite3_mutex *p){ + assert( g.isInit ); + g.aCounter[p->eType]++; + if( g.disableTry ) return SQLITE_BUSY; + return g.m.xMutexTry(p->pReal); +} + +/* Leave a mutex +*/ +static void counterMutexLeave(sqlite3_mutex *p){ + assert( g.isInit ); + g.m.xMutexLeave(p->pReal); +} + +/* +** sqlite3_shutdown +*/ +static int test_shutdown( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc; + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + rc = sqlite3_shutdown(); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +/* +** sqlite3_initialize +*/ +static int test_initialize( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc; + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + rc = sqlite3_initialize(); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +/* +** install_mutex_counters BOOLEAN +*/ +static int test_install_mutex_counters( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int rc = SQLITE_OK; + int isInstall; + + sqlite3_mutex_methods counter_methods = { + counterMutexInit, + counterMutexEnd, + counterMutexAlloc, + counterMutexFree, + counterMutexEnter, + counterMutexTry, + counterMutexLeave, + counterMutexHeld, + counterMutexNotheld + }; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "BOOLEAN"); + return TCL_ERROR; + } + if( TCL_OK!=Tcl_GetBooleanFromObj(interp, objv[1], &isInstall) ){ + return TCL_ERROR; + } + + assert(isInstall==0 || isInstall==1); + assert(g.isInstalled==0 || g.isInstalled==1); + if( isInstall==g.isInstalled ){ + Tcl_AppendResult(interp, "mutex counters are ", 0); + Tcl_AppendResult(interp, isInstall?"already installed":"not installed", 0); + return TCL_ERROR; + } + + if( isInstall ){ + assert( g.m.xMutexAlloc==0 ); + rc = sqlite3_config(SQLITE_CONFIG_GETMUTEX, &g.m); + if( rc==SQLITE_OK ){ + sqlite3_config(SQLITE_CONFIG_MUTEX, &counter_methods); + } + g.disableTry = 0; + }else{ + assert( g.m.xMutexAlloc ); + rc = sqlite3_config(SQLITE_CONFIG_MUTEX, &g.m); + memset(&g.m, 0, sizeof(sqlite3_mutex_methods)); + } + + if( rc==SQLITE_OK ){ + g.isInstalled = isInstall; + } + + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +/* +** read_mutex_counters +*/ +static int test_read_mutex_counters( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_Obj *pRet; + int ii; + char *aName[8] = { + "fast", "recursive", "static_master", "static_mem", + "static_open", "static_prng", "static_lru", "static_lru2" + }; + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + for(ii=0; ii<8; ii++){ + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(aName[ii], -1)); + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewIntObj(g.aCounter[ii])); + } + Tcl_SetObjResult(interp, pRet); + Tcl_DecrRefCount(pRet); + + return TCL_OK; +} + +/* +** clear_mutex_counters +*/ +static int test_clear_mutex_counters( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int ii; + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + for(ii=0; ii<8; ii++){ + g.aCounter[ii] = 0; + } + return TCL_OK; +} + +/* +** Create and free a mutex. Return the mutex pointer. The pointer +** will be invalid since the mutex has already been freed. The +** return pointer just checks to see if the mutex really was allocated. +*/ +static int test_alloc_mutex( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ +#if SQLITE_THREADSAFE + sqlite3_mutex *p = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + char zBuf[100]; + sqlite3_mutex_free(p); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%p", p); + Tcl_AppendResult(interp, zBuf, (char*)0); +#endif + return TCL_OK; +} + +/* +** sqlite3_config OPTION +** +** OPTION can be either one of the keywords: +** +** SQLITE_CONFIG_SINGLETHREAD +** SQLITE_CONFIG_MULTITHREAD +** SQLITE_CONFIG_SERIALIZED +** +** Or OPTION can be an raw integer. +*/ +static int test_config( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + struct ConfigOption { + const char *zName; + int iValue; + } aOpt[] = { + {"singlethread", SQLITE_CONFIG_SINGLETHREAD}, + {"multithread", SQLITE_CONFIG_MULTITHREAD}, + {"serialized", SQLITE_CONFIG_SERIALIZED}, + {0, 0} + }; + int s = sizeof(struct ConfigOption); + int i; + int rc; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + if( Tcl_GetIndexFromObjStruct(interp, objv[1], aOpt, s, "flag", 0, &i) ){ + if( Tcl_GetIntFromObj(interp, objv[1], &i) ){ + return TCL_ERROR; + } + }else{ + i = aOpt[i].iValue; + } + + rc = sqlite3_config(i); + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), TCL_VOLATILE); + return TCL_OK; +} + +static sqlite3 *getDbPointer(Tcl_Interp *pInterp, Tcl_Obj *pObj){ + sqlite3 *db; + Tcl_CmdInfo info; + char *zCmd = Tcl_GetString(pObj); + if( Tcl_GetCommandInfo(pInterp, zCmd, &info) ){ + db = *((sqlite3 **)info.objClientData); + }else{ + db = (sqlite3*)sqlite3TestTextToPtr(zCmd); + } + assert( db ); + return db; +} + +static int test_enter_db_mutex( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + db = getDbPointer(interp, objv[1]); + if( !db ){ + return TCL_ERROR; + } + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + return TCL_OK; +} + +static int test_leave_db_mutex( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + db = getDbPointer(interp, objv[1]); + if( !db ){ + return TCL_ERROR; + } + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + return TCL_OK; +} + +int Sqlitetest_mutex_Init(Tcl_Interp *interp){ + static struct { + char *zName; + Tcl_ObjCmdProc *xProc; + } aCmd[] = { + { "sqlite3_shutdown", (Tcl_ObjCmdProc*)test_shutdown }, + { "sqlite3_initialize", (Tcl_ObjCmdProc*)test_initialize }, + { "sqlite3_config", (Tcl_ObjCmdProc*)test_config }, + + { "enter_db_mutex", (Tcl_ObjCmdProc*)test_enter_db_mutex }, + { "leave_db_mutex", (Tcl_ObjCmdProc*)test_leave_db_mutex }, + + { "alloc_dealloc_mutex", (Tcl_ObjCmdProc*)test_alloc_mutex }, + { "install_mutex_counters", (Tcl_ObjCmdProc*)test_install_mutex_counters }, + { "read_mutex_counters", (Tcl_ObjCmdProc*)test_read_mutex_counters }, + { "clear_mutex_counters", (Tcl_ObjCmdProc*)test_clear_mutex_counters }, + }; + int i; + for(i=0; i +#include + +/* +** Maximum pathname length supported by the fs backend. +*/ +#define BLOCKSIZE 512 +#define BLOBSIZE 10485760 + +/* +** Name used to identify this VFS. +*/ +#define FS_VFS_NAME "fs" + +typedef struct fs_real_file fs_real_file; +struct fs_real_file { + sqlite3_file *pFile; + const char *zName; + int nDatabase; /* Current size of database region */ + int nJournal; /* Current size of journal region */ + int nBlob; /* Total size of allocated blob */ + int nRef; /* Number of pointers to this structure */ + fs_real_file *pNext; + fs_real_file **ppThis; +}; + +typedef struct fs_file fs_file; +struct fs_file { + sqlite3_file base; + int eType; + fs_real_file *pReal; +}; + +typedef struct tmp_file tmp_file; +struct tmp_file { + sqlite3_file base; + int nSize; + int nAlloc; + char *zAlloc; +}; + +/* Values for fs_file.eType. */ +#define DATABASE_FILE 1 +#define JOURNAL_FILE 2 + +/* +** Method declarations for fs_file. +*/ +static int fsClose(sqlite3_file*); +static int fsRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int fsWrite(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); +static int fsTruncate(sqlite3_file*, sqlite3_int64 size); +static int fsSync(sqlite3_file*, int flags); +static int fsFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int fsLock(sqlite3_file*, int); +static int fsUnlock(sqlite3_file*, int); +static int fsCheckReservedLock(sqlite3_file*, int *pResOut); +static int fsFileControl(sqlite3_file*, int op, void *pArg); +static int fsSectorSize(sqlite3_file*); +static int fsDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for tmp_file. +*/ +static int tmpClose(sqlite3_file*); +static int tmpRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int tmpWrite(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); +static int tmpTruncate(sqlite3_file*, sqlite3_int64 size); +static int tmpSync(sqlite3_file*, int flags); +static int tmpFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int tmpLock(sqlite3_file*, int); +static int tmpUnlock(sqlite3_file*, int); +static int tmpCheckReservedLock(sqlite3_file*, int *pResOut); +static int tmpFileControl(sqlite3_file*, int op, void *pArg); +static int tmpSectorSize(sqlite3_file*); +static int tmpDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for fs_vfs. +*/ +static int fsOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int fsDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int fsAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int fsFullPathname(sqlite3_vfs*, const char *zName, int nOut,char *zOut); +static void *fsDlOpen(sqlite3_vfs*, const char *zFilename); +static void fsDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*fsDlSym(sqlite3_vfs*,void*, const char *zSymbol))(void); +static void fsDlClose(sqlite3_vfs*, void*); +static int fsRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int fsSleep(sqlite3_vfs*, int microseconds); +static int fsCurrentTime(sqlite3_vfs*, double*); + + +typedef struct fs_vfs_t fs_vfs_t; +struct fs_vfs_t { + sqlite3_vfs base; + fs_real_file *pFileList; + sqlite3_vfs *pParent; +}; + +static fs_vfs_t fs_vfs = { + { + 1, /* iVersion */ + 0, /* szOsFile */ + 0, /* mxPathname */ + 0, /* pNext */ + FS_VFS_NAME, /* zName */ + 0, /* pAppData */ + fsOpen, /* xOpen */ + fsDelete, /* xDelete */ + fsAccess, /* xAccess */ + fsFullPathname, /* xFullPathname */ + fsDlOpen, /* xDlOpen */ + fsDlError, /* xDlError */ + fsDlSym, /* xDlSym */ + fsDlClose, /* xDlClose */ + fsRandomness, /* xRandomness */ + fsSleep, /* xSleep */ + fsCurrentTime /* xCurrentTime */ + }, + 0, /* pFileList */ + 0 /* pParent */ +}; + +static sqlite3_io_methods fs_io_methods = { + 1, /* iVersion */ + fsClose, /* xClose */ + fsRead, /* xRead */ + fsWrite, /* xWrite */ + fsTruncate, /* xTruncate */ + fsSync, /* xSync */ + fsFileSize, /* xFileSize */ + fsLock, /* xLock */ + fsUnlock, /* xUnlock */ + fsCheckReservedLock, /* xCheckReservedLock */ + fsFileControl, /* xFileControl */ + fsSectorSize, /* xSectorSize */ + fsDeviceCharacteristics /* xDeviceCharacteristics */ +}; + + +static sqlite3_io_methods tmp_io_methods = { + 1, /* iVersion */ + tmpClose, /* xClose */ + tmpRead, /* xRead */ + tmpWrite, /* xWrite */ + tmpTruncate, /* xTruncate */ + tmpSync, /* xSync */ + tmpFileSize, /* xFileSize */ + tmpLock, /* xLock */ + tmpUnlock, /* xUnlock */ + tmpCheckReservedLock, /* xCheckReservedLock */ + tmpFileControl, /* xFileControl */ + tmpSectorSize, /* xSectorSize */ + tmpDeviceCharacteristics /* xDeviceCharacteristics */ +}; + +/* Useful macros used in several places */ +#define MIN(x,y) ((x)<(y)?(x):(y)) +#define MAX(x,y) ((x)>(y)?(x):(y)) + + +/* +** Close a tmp-file. +*/ +static int tmpClose(sqlite3_file *pFile){ + tmp_file *pTmp = (tmp_file *)pFile; + sqlite3_free(pTmp->zAlloc); + return SQLITE_OK; +} + +/* +** Read data from a tmp-file. +*/ +static int tmpRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + tmp_file *pTmp = (tmp_file *)pFile; + if( (iAmt+iOfst)>pTmp->nSize ){ + return SQLITE_IOERR_SHORT_READ; + } + memcpy(zBuf, &pTmp->zAlloc[iOfst], iAmt); + return SQLITE_OK; +} + +/* +** Write data to a tmp-file. +*/ +static int tmpWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + tmp_file *pTmp = (tmp_file *)pFile; + if( (iAmt+iOfst)>pTmp->nAlloc ){ + int nNew = 2*(iAmt+iOfst+pTmp->nAlloc); + char *zNew = sqlite3_realloc(pTmp->zAlloc, nNew); + if( !zNew ){ + return SQLITE_NOMEM; + } + pTmp->zAlloc = zNew; + pTmp->nAlloc = nNew; + } + memcpy(&pTmp->zAlloc[iOfst], zBuf, iAmt); + pTmp->nSize = MAX(pTmp->nSize, iOfst+iAmt); + return SQLITE_OK; +} + +/* +** Truncate a tmp-file. +*/ +static int tmpTruncate(sqlite3_file *pFile, sqlite_int64 size){ + tmp_file *pTmp = (tmp_file *)pFile; + pTmp->nSize = MIN(pTmp->nSize, size); + return SQLITE_OK; +} + +/* +** Sync a tmp-file. +*/ +static int tmpSync(sqlite3_file *pFile, int flags){ + return SQLITE_OK; +} + +/* +** Return the current file-size of a tmp-file. +*/ +static int tmpFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + tmp_file *pTmp = (tmp_file *)pFile; + *pSize = pTmp->nSize; + return SQLITE_OK; +} + +/* +** Lock a tmp-file. +*/ +static int tmpLock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} + +/* +** Unlock a tmp-file. +*/ +static int tmpUnlock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} + +/* +** Check if another file-handle holds a RESERVED lock on a tmp-file. +*/ +static int tmpCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + *pResOut = 0; + return SQLITE_OK; +} + +/* +** File control method. For custom operations on a tmp-file. +*/ +static int tmpFileControl(sqlite3_file *pFile, int op, void *pArg){ + return SQLITE_OK; +} + +/* +** Return the sector-size in bytes for a tmp-file. +*/ +static int tmpSectorSize(sqlite3_file *pFile){ + return 0; +} + +/* +** Return the device characteristic flags supported by a tmp-file. +*/ +static int tmpDeviceCharacteristics(sqlite3_file *pFile){ + return 0; +} + +/* +** Close an fs-file. +*/ +static int fsClose(sqlite3_file *pFile){ + int rc = SQLITE_OK; + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + + /* Decrement the real_file ref-count. */ + pReal->nRef--; + assert(pReal->nRef>=0); + + /* When the ref-count reaches 0, destroy the structure */ + if( pReal->nRef==0 ){ + *pReal->ppThis = pReal->pNext; + if( pReal->pNext ){ + pReal->pNext->ppThis = pReal->ppThis; + } + rc = pReal->pFile->pMethods->xClose(pReal->pFile); + sqlite3_free(pReal); + } + + return rc; +} + +/* +** Read data from an fs-file. +*/ +static int fsRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + int rc = SQLITE_OK; + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + sqlite3_file *pF = pReal->pFile; + + if( (p->eType==DATABASE_FILE && (iAmt+iOfst)>pReal->nDatabase) + || (p->eType==JOURNAL_FILE && (iAmt+iOfst)>pReal->nJournal) + ){ + rc = SQLITE_IOERR_SHORT_READ; + }else if( p->eType==DATABASE_FILE ){ + rc = pF->pMethods->xRead(pF, zBuf, iAmt, iOfst+BLOCKSIZE); + }else{ + /* Journal file. */ + int iRem = iAmt; + int iBuf = 0; + int ii = iOfst; + while( iRem>0 && rc==SQLITE_OK ){ + int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE; + int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE)); + + rc = pF->pMethods->xRead(pF, &((char *)zBuf)[iBuf], iRealAmt, iRealOff); + ii += iRealAmt; + iBuf += iRealAmt; + iRem -= iRealAmt; + } + } + + return rc; +} + +/* +** Write data to an fs-file. +*/ +static int fsWrite( + sqlite3_file *pFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + int rc = SQLITE_OK; + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + sqlite3_file *pF = pReal->pFile; + + if( p->eType==DATABASE_FILE ){ + if( (iAmt+iOfst+BLOCKSIZE)>(pReal->nBlob-pReal->nJournal) ){ + rc = SQLITE_FULL; + }else{ + rc = pF->pMethods->xWrite(pF, zBuf, iAmt, iOfst+BLOCKSIZE); + if( rc==SQLITE_OK ){ + pReal->nDatabase = MAX(pReal->nDatabase, iAmt+iOfst); + } + } + }else{ + /* Journal file. */ + int iRem = iAmt; + int iBuf = 0; + int ii = iOfst; + while( iRem>0 && rc==SQLITE_OK ){ + int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE; + int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE)); + + if( iRealOff<(pReal->nDatabase+BLOCKSIZE) ){ + rc = SQLITE_FULL; + }else{ + rc = pF->pMethods->xWrite(pF, &((char *)zBuf)[iBuf], iRealAmt,iRealOff); + ii += iRealAmt; + iBuf += iRealAmt; + iRem -= iRealAmt; + } + } + if( rc==SQLITE_OK ){ + pReal->nJournal = MAX(pReal->nJournal, iAmt+iOfst); + } + } + + return rc; +} + +/* +** Truncate an fs-file. +*/ +static int fsTruncate(sqlite3_file *pFile, sqlite_int64 size){ + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + if( p->eType==DATABASE_FILE ){ + pReal->nDatabase = MIN(pReal->nDatabase, size); + }else{ + pReal->nJournal = MIN(pReal->nJournal, size); + } + return SQLITE_OK; +} + +/* +** Sync an fs-file. +*/ +static int fsSync(sqlite3_file *pFile, int flags){ + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + sqlite3_file *pRealFile = pReal->pFile; + int rc = SQLITE_OK; + + if( p->eType==DATABASE_FILE ){ + unsigned char zSize[4]; + zSize[0] = (pReal->nDatabase&0xFF000000)>>24; + zSize[1] = (pReal->nDatabase&0x00FF0000)>>16; + zSize[2] = (pReal->nDatabase&0x0000FF00)>>8; + zSize[3] = (pReal->nDatabase&0x000000FF); + rc = pRealFile->pMethods->xWrite(pRealFile, zSize, 4, 0); + } + if( rc==SQLITE_OK ){ + rc = pRealFile->pMethods->xSync(pRealFile, flags&(~SQLITE_SYNC_DATAONLY)); + } + + return rc; +} + +/* +** Return the current file-size of an fs-file. +*/ +static int fsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = p->pReal; + if( p->eType==DATABASE_FILE ){ + *pSize = pReal->nDatabase; + }else{ + *pSize = pReal->nJournal; + } + return SQLITE_OK; +} + +/* +** Lock an fs-file. +*/ +static int fsLock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} + +/* +** Unlock an fs-file. +*/ +static int fsUnlock(sqlite3_file *pFile, int eLock){ + return SQLITE_OK; +} + +/* +** Check if another file-handle holds a RESERVED lock on an fs-file. +*/ +static int fsCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + *pResOut = 0; + return SQLITE_OK; +} + +/* +** File control method. For custom operations on an fs-file. +*/ +static int fsFileControl(sqlite3_file *pFile, int op, void *pArg){ + return SQLITE_OK; +} + +/* +** Return the sector-size in bytes for an fs-file. +*/ +static int fsSectorSize(sqlite3_file *pFile){ + return BLOCKSIZE; +} + +/* +** Return the device characteristic flags supported by an fs-file. +*/ +static int fsDeviceCharacteristics(sqlite3_file *pFile){ + return 0; +} + +/* +** Open an fs file handle. +*/ +static int fsOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + fs_vfs_t *pFsVfs = (fs_vfs_t *)pVfs; + fs_file *p = (fs_file *)pFile; + fs_real_file *pReal = 0; + int eType; + int nName; + int rc = SQLITE_OK; + + if( 0==(flags&(SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_MAIN_JOURNAL)) ){ + tmp_file *p = (tmp_file *)pFile; + memset(p, 0, sizeof(*p)); + p->base.pMethods = &tmp_io_methods; + return SQLITE_OK; + } + + eType = ((flags&(SQLITE_OPEN_MAIN_DB))?DATABASE_FILE:JOURNAL_FILE); + p->base.pMethods = &fs_io_methods; + p->eType = eType; + + assert(strlen("-journal")==8); + nName = strlen(zName)-((eType==JOURNAL_FILE)?8:0); + pReal=pFsVfs->pFileList; + for(; pReal && strncmp(pReal->zName, zName, nName); pReal=pReal->pNext); + + if( !pReal ){ + int real_flags = (flags&~(SQLITE_OPEN_MAIN_DB))|SQLITE_OPEN_TEMP_DB; + sqlite3_int64 size; + sqlite3_file *pRealFile; + sqlite3_vfs *pParent = pFsVfs->pParent; + assert(eType==DATABASE_FILE); + + pReal = (fs_real_file *)sqlite3_malloc(sizeof(*pReal)+pParent->szOsFile); + if( !pReal ){ + rc = SQLITE_NOMEM; + goto open_out; + } + memset(pReal, 0, sizeof(*pReal)+pParent->szOsFile); + pReal->zName = zName; + pReal->pFile = (sqlite3_file *)(&pReal[1]); + + rc = pParent->xOpen(pParent, zName, pReal->pFile, real_flags, pOutFlags); + if( rc!=SQLITE_OK ){ + goto open_out; + } + pRealFile = pReal->pFile; + + rc = pRealFile->pMethods->xFileSize(pRealFile, &size); + if( rc!=SQLITE_OK ){ + goto open_out; + } + if( size==0 ){ + rc = pRealFile->pMethods->xWrite(pRealFile, "\0", 1, BLOBSIZE-1); + pReal->nBlob = BLOBSIZE; + }else{ + unsigned char zS[4]; + pReal->nBlob = size; + rc = pRealFile->pMethods->xRead(pRealFile, zS, 4, 0); + pReal->nDatabase = (zS[0]<<24)+(zS[1]<<16)+(zS[2]<<8)+zS[3]; + if( rc==SQLITE_OK ){ + rc = pRealFile->pMethods->xRead(pRealFile, zS, 4, pReal->nBlob-4); + if( zS[0] || zS[1] || zS[2] || zS[3] ){ + pReal->nJournal = pReal->nBlob; + } + } + } + + if( rc==SQLITE_OK ){ + pReal->pNext = pFsVfs->pFileList; + if( pReal->pNext ){ + pReal->pNext->ppThis = &pReal->pNext; + } + pReal->ppThis = &pFsVfs->pFileList; + pFsVfs->pFileList = pReal; + } + } + +open_out: + if( pReal ){ + if( rc==SQLITE_OK ){ + p->pReal = pReal; + pReal->nRef++; + }else{ + if( pReal->pFile->pMethods ){ + pReal->pFile->pMethods->xClose(pReal->pFile); + } + sqlite3_free(pReal); + } + } + return rc; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int fsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + int rc = SQLITE_OK; + fs_vfs_t *pFsVfs = (fs_vfs_t *)pVfs; + fs_real_file *pReal; + sqlite3_file *pF; + int nName = strlen(zPath) - 8; + + assert(strlen("-journal")==8); + assert(strcmp("-journal", &zPath[nName])==0); + + pReal = pFsVfs->pFileList; + for(; pReal && strncmp(pReal->zName, zPath, nName); pReal=pReal->pNext); + if( pReal ){ + pF = pReal->pFile; + rc = pF->pMethods->xWrite(pF, "\0\0\0\0", 4, pReal->nBlob-BLOCKSIZE); + if( rc==SQLITE_OK ){ + pReal->nJournal = 0; + } + } + return rc; +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int fsAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + fs_vfs_t *pFsVfs = (fs_vfs_t *)pVfs; + fs_real_file *pReal; + int isJournal = 0; + int nName = strlen(zPath); + + if( flags!=SQLITE_ACCESS_EXISTS ){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xAccess(pParent, zPath, flags, pResOut); + } + + assert(strlen("-journal")==8); + if( nName>8 && strcmp("-journal", &zPath[nName-8])==0 ){ + nName -= 8; + isJournal = 1; + } + + pReal = pFsVfs->pFileList; + for(; pReal && strncmp(pReal->zName, zPath, nName); pReal=pReal->pNext); + + *pResOut = (pReal && (!isJournal || pReal->nJournal>0)); + return SQLITE_OK; +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (FS_MAX_PATHNAME+1) bytes. +*/ +static int fsFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zPath, /* Possibly relative input path */ + int nOut, /* Size of output buffer in bytes */ + char *zOut /* Output buffer */ +){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xFullPathname(pParent, zPath, nOut, zOut); +} + +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *fsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xDlOpen(pParent, zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void fsDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + pParent->xDlError(pParent, nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*fsDlSym(sqlite3_vfs *pVfs, void *pH, const char *zSym))(void){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xDlSym(pParent, pH, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void fsDlClose(sqlite3_vfs *pVfs, void *pHandle){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + pParent->xDlClose(pParent, pHandle); +} + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int fsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xRandomness(pParent, nByte, zBufOut); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int fsSleep(sqlite3_vfs *pVfs, int nMicro){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xSleep(pParent, nMicro); +} + +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int fsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + sqlite3_vfs *pParent = ((fs_vfs_t *)pVfs)->pParent; + return pParent->xCurrentTime(pParent, pTimeOut); +} + +/* +** This procedure registers the fs vfs with SQLite. If the argument is +** true, the fs vfs becomes the new default vfs. It is the only publicly +** available function in this file. +*/ +int fs_register(void){ + if( fs_vfs.pParent ) return SQLITE_OK; + fs_vfs.pParent = sqlite3_vfs_find(0); + fs_vfs.base.mxPathname = fs_vfs.pParent->mxPathname; + fs_vfs.base.szOsFile = MAX(sizeof(tmp_file), sizeof(fs_file)); + return sqlite3_vfs_register(&fs_vfs.base, 0); +} + +#ifdef SQLITE_TEST + int SqlitetestOnefile_Init() {return fs_register();} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_osinst.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_osinst.c --- sqlite3-3.4.2/src/test_osinst.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_osinst.c 2009-06-12 03:37:49.000000000 +0100 @@ -0,0 +1,1069 @@ +/* +** 2008 April 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains the implementation of an SQLite vfs wrapper that +** adds instrumentation to all vfs and file methods. C and Tcl interfaces +** are provided to control the instrumentation. +** +** $Id: test_osinst.c,v 1.19 2009/01/08 17:57:32 danielk1977 Exp $ +*/ + +#ifdef SQLITE_ENABLE_INSTVFS +/* +** C interface: +** +** sqlite3_instvfs_create() +** sqlite3_instvfs_destroy() +** sqlite3_instvfs_configure() +** +** sqlite3_instvfs_reset() +** sqlite3_instvfs_get() +** +** sqlite3_instvfs_binarylog +** sqlite3_instvfs_binarylog_marker +** +** Tcl interface (omitted if SQLITE_TEST is not set): +** +** sqlite3_instvfs create NAME ?PARENT? +** +** Create and register new vfs called $NAME, which is a wrapper around +** the existing vfs $PARENT. If the PARENT argument is omitted, the +** new vfs is a wrapper around the current default vfs. +** +** sqlite3_instvfs destroy NAME +** +** Deregister and destroy the vfs named $NAME, which must have been +** created by an earlier invocation of [sqlite3_instvfs create]. +** +** sqlite3_instvfs configure NAME SCRIPT +** +** Configure the callback script for the vfs $NAME, which much have +** been created by an earlier invocation of [sqlite3_instvfs create]. +** After a callback script has been configured, it is invoked each +** time a vfs or file method is called by SQLite. Before invoking +** the callback script, five arguments are appended to it: +** +** * The name of the invoked method - i.e. "xRead". +** +** * The time consumed by the method call as measured by +** sqlite3Hwtime() (an integer value) +** +** * A string value with a different meaning for different calls. +** For file methods, the name of the file being operated on. For +** other methods it is the filename argument, if any. +** +** * A 32-bit integer value with a call-specific meaning. +** +** * A 64-bit integer value. For xRead() and xWrite() calls this +** is the file offset being written to or read from. Unused by +** all other calls. +** +** sqlite3_instvfs reset NAME +** +** Zero the internal event counters associated with vfs $NAME, +** which must have been created by an earlier invocation of +** [sqlite3_instvfs create]. +** +** sqlite3_instvfs report NAME +** +** Return the values of the internal event counters associated +** with vfs $NAME. The report format is a list with one element +** for each method call (xWrite, xRead etc.). Each element is +** itself a list with three elements: +** +** * The name of the method call - i.e. "xWrite", +** * The total number of calls to the method (an integer). +** * The aggregate time consumed by all calls to the method as +** measured by sqlite3Hwtime() (an integer). +*/ + +#include "sqlite3.h" +#include +#include + +/* +** Maximum pathname length supported by the inst backend. +*/ +#define INST_MAX_PATHNAME 512 + + +/* File methods */ +/* Vfs methods */ +#define OS_ACCESS 1 +#define OS_CHECKRESERVEDLOCK 2 +#define OS_CLOSE 3 +#define OS_CURRENTTIME 4 +#define OS_DELETE 5 +#define OS_DEVCHAR 6 +#define OS_FILECONTROL 7 +#define OS_FILESIZE 8 +#define OS_FULLPATHNAME 9 +#define OS_LOCK 11 +#define OS_OPEN 12 +#define OS_RANDOMNESS 13 +#define OS_READ 14 +#define OS_SECTORSIZE 15 +#define OS_SLEEP 16 +#define OS_SYNC 17 +#define OS_TRUNCATE 18 +#define OS_UNLOCK 19 +#define OS_WRITE 20 + +#define OS_NUMEVENTS 21 + +#define BINARYLOG_STRING 30 +#define BINARYLOG_MARKER 31 + +#define BINARYLOG_PREPARE_V2 64 +#define BINARYLOG_STEP 65 +#define BINARYLOG_FINALIZE 66 + +struct InstVfs { + sqlite3_vfs base; + sqlite3_vfs *pVfs; + + void *pClient; + void (*xDel)(void *); + void (*xCall)(void *, int, int, sqlite3_int64, int, const char *, int, int, sqlite3_int64); + + /* Counters */ + sqlite3_int64 aTime[OS_NUMEVENTS]; + int aCount[OS_NUMEVENTS]; + + int iNextFileId; +}; +typedef struct InstVfs InstVfs; + +#define REALVFS(p) (((InstVfs *)(p))->pVfs) + +typedef struct inst_file inst_file; +struct inst_file { + sqlite3_file base; + sqlite3_file *pReal; + InstVfs *pInstVfs; + const char *zName; + int iFileId; /* File id number */ + int flags; +}; + +/* +** Method declarations for inst_file. +*/ +static int instClose(sqlite3_file*); +static int instRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int instWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int instTruncate(sqlite3_file*, sqlite3_int64 size); +static int instSync(sqlite3_file*, int flags); +static int instFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int instLock(sqlite3_file*, int); +static int instUnlock(sqlite3_file*, int); +static int instCheckReservedLock(sqlite3_file*, int *pResOut); +static int instFileControl(sqlite3_file*, int op, void *pArg); +static int instSectorSize(sqlite3_file*); +static int instDeviceCharacteristics(sqlite3_file*); + +/* +** Method declarations for inst_vfs. +*/ +static int instOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int instDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int instAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int instFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *instDlOpen(sqlite3_vfs*, const char *zFilename); +static void instDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*instDlSym(sqlite3_vfs *pVfs, void *p, const char*zSym))(void); +static void instDlClose(sqlite3_vfs*, void*); +static int instRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int instSleep(sqlite3_vfs*, int microseconds); +static int instCurrentTime(sqlite3_vfs*, double*); + +static void binarylog_blob(sqlite3_vfs *, const char *, int, int); + +static sqlite3_vfs inst_vfs = { + 1, /* iVersion */ + sizeof(inst_file), /* szOsFile */ + INST_MAX_PATHNAME, /* mxPathname */ + 0, /* pNext */ + 0, /* zName */ + 0, /* pAppData */ + instOpen, /* xOpen */ + instDelete, /* xDelete */ + instAccess, /* xAccess */ + instFullPathname, /* xFullPathname */ + instDlOpen, /* xDlOpen */ + instDlError, /* xDlError */ + instDlSym, /* xDlSym */ + instDlClose, /* xDlClose */ + instRandomness, /* xRandomness */ + instSleep, /* xSleep */ + instCurrentTime /* xCurrentTime */ +}; + +static sqlite3_io_methods inst_io_methods = { + 1, /* iVersion */ + instClose, /* xClose */ + instRead, /* xRead */ + instWrite, /* xWrite */ + instTruncate, /* xTruncate */ + instSync, /* xSync */ + instFileSize, /* xFileSize */ + instLock, /* xLock */ + instUnlock, /* xUnlock */ + instCheckReservedLock, /* xCheckReservedLock */ + instFileControl, /* xFileControl */ + instSectorSize, /* xSectorSize */ + instDeviceCharacteristics /* xDeviceCharacteristics */ +}; + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +#include "hwtime.h" + +#define OS_TIME_IO(eEvent, A, B, Call) { \ + inst_file *p = (inst_file *)pFile; \ + InstVfs *pInstVfs = p->pInstVfs; \ + int rc; \ + sqlite_uint64 t = sqlite3Hwtime(); \ + rc = Call; \ + t = sqlite3Hwtime() - t; \ + pInstVfs->aTime[eEvent] += t; \ + pInstVfs->aCount[eEvent] += 1; \ + if( pInstVfs->xCall ){ \ + pInstVfs->xCall( \ + pInstVfs->pClient,eEvent,p->iFileId,t,rc,p->zName,p->flags,A,B \ + ); \ + } \ + return rc; \ +} + +#define OS_TIME_VFS(eEvent, Z, flags, A, B, Call) { \ + InstVfs *pInstVfs = (InstVfs *)pVfs; \ + int rc; \ + sqlite_uint64 t = sqlite3Hwtime(); \ + rc = Call; \ + t = sqlite3Hwtime() - t; \ + pInstVfs->aTime[eEvent] += t; \ + pInstVfs->aCount[eEvent] += 1; \ + if( pInstVfs->xCall ){ \ + pInstVfs->xCall(pInstVfs->pClient,eEvent,0, t, rc, Z, flags, A, B); \ + } \ + return rc; \ +} + +/* +** Close an inst-file. +*/ +static int instClose(sqlite3_file *pFile){ + OS_TIME_IO(OS_CLOSE, 0, 0, + (p->pReal->pMethods ? p->pReal->pMethods->xClose(p->pReal) : SQLITE_OK) + ); +} + +/* +** Read data from an inst-file. +*/ +static int instRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)(((inst_file *)pFile)->pInstVfs); + OS_TIME_IO(OS_READ, iAmt, (binarylog_blob(pVfs, zBuf, iAmt, 1), iOfst), + p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst) + ); +} + +/* +** Write data to an inst-file. +*/ +static int instWrite( + sqlite3_file *pFile, + const void *z, + int iAmt, + sqlite_int64 iOfst +){ + sqlite3_vfs *pVfs = (sqlite3_vfs *)(((inst_file *)pFile)->pInstVfs); + binarylog_blob(pVfs, z, iAmt, 1); + OS_TIME_IO(OS_WRITE, iAmt, iOfst, + p->pReal->pMethods->xWrite(p->pReal, z, iAmt, iOfst) + ); +} + +/* +** Truncate an inst-file. +*/ +static int instTruncate(sqlite3_file *pFile, sqlite_int64 size){ + OS_TIME_IO(OS_TRUNCATE, 0, (int)size, + p->pReal->pMethods->xTruncate(p->pReal, size) + ); +} + +/* +** Sync an inst-file. +*/ +static int instSync(sqlite3_file *pFile, int flags){ + OS_TIME_IO(OS_SYNC, flags, 0, p->pReal->pMethods->xSync(p->pReal, flags)); +} + +/* +** Return the current file-size of an inst-file. +*/ +static int instFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + OS_TIME_IO(OS_FILESIZE, (int)(*pSize), 0, + p->pReal->pMethods->xFileSize(p->pReal, pSize) + ); +} + +/* +** Lock an inst-file. +*/ +static int instLock(sqlite3_file *pFile, int eLock){ + OS_TIME_IO(OS_LOCK, eLock, 0, p->pReal->pMethods->xLock(p->pReal, eLock)); +} + +/* +** Unlock an inst-file. +*/ +static int instUnlock(sqlite3_file *pFile, int eLock){ + OS_TIME_IO(OS_UNLOCK, eLock, 0, p->pReal->pMethods->xUnlock(p->pReal, eLock)); +} + +/* +** Check if another file-handle holds a RESERVED lock on an inst-file. +*/ +static int instCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + OS_TIME_IO(OS_CHECKRESERVEDLOCK, 0, 0, + p->pReal->pMethods->xCheckReservedLock(p->pReal, pResOut) + ); +} + +/* +** File control method. For custom operations on an inst-file. +*/ +static int instFileControl(sqlite3_file *pFile, int op, void *pArg){ + OS_TIME_IO(OS_FILECONTROL, 0, 0, p->pReal->pMethods->xFileControl(p->pReal, op, pArg)); +} + +/* +** Return the sector-size in bytes for an inst-file. +*/ +static int instSectorSize(sqlite3_file *pFile){ + OS_TIME_IO(OS_SECTORSIZE, 0, 0, p->pReal->pMethods->xSectorSize(p->pReal)); +} + +/* +** Return the device characteristic flags supported by an inst-file. +*/ +static int instDeviceCharacteristics(sqlite3_file *pFile){ + OS_TIME_IO(OS_DEVCHAR, 0, 0, p->pReal->pMethods->xDeviceCharacteristics(p->pReal)); +} + +/* +** Open an inst file handle. +*/ +static int instOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + inst_file *p = (inst_file *)pFile; + pFile->pMethods = &inst_io_methods; + p->pReal = (sqlite3_file *)&p[1]; + p->pInstVfs = (InstVfs *)pVfs; + p->zName = zName; + p->flags = flags; + p->iFileId = ++p->pInstVfs->iNextFileId; + + binarylog_blob(pVfs, zName, -1, 0); + OS_TIME_VFS(OS_OPEN, zName, flags, p->iFileId, 0, + REALVFS(pVfs)->xOpen(REALVFS(pVfs), zName, p->pReal, flags, pOutFlags) + ); +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int instDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + binarylog_blob(pVfs, zPath, -1, 0); + OS_TIME_VFS(OS_DELETE, zPath, 0, dirSync, 0, + REALVFS(pVfs)->xDelete(REALVFS(pVfs), zPath, dirSync) + ); +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int instAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + binarylog_blob(pVfs, zPath, -1, 0); + OS_TIME_VFS(OS_ACCESS, zPath, 0, flags, *pResOut, + REALVFS(pVfs)->xAccess(REALVFS(pVfs), zPath, flags, pResOut) + ); +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (INST_MAX_PATHNAME+1) bytes. +*/ +static int instFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + OS_TIME_VFS( OS_FULLPATHNAME, zPath, 0, 0, 0, + REALVFS(pVfs)->xFullPathname(REALVFS(pVfs), zPath, nOut, zOut); + ); +} + +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *instDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return REALVFS(pVfs)->xDlOpen(REALVFS(pVfs), zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void instDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + REALVFS(pVfs)->xDlError(REALVFS(pVfs), nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*instDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ + return REALVFS(pVfs)->xDlSym(REALVFS(pVfs), p, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void instDlClose(sqlite3_vfs *pVfs, void *pHandle){ + REALVFS(pVfs)->xDlClose(REALVFS(pVfs), pHandle); +} + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int instRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + OS_TIME_VFS( OS_RANDOMNESS, 0, 0, nByte, 0, + REALVFS(pVfs)->xRandomness(REALVFS(pVfs), nByte, zBufOut); + ); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int instSleep(sqlite3_vfs *pVfs, int nMicro){ + OS_TIME_VFS( OS_SLEEP, 0, 0, nMicro, 0, + REALVFS(pVfs)->xSleep(REALVFS(pVfs), nMicro) + ); +} + +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int instCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + OS_TIME_VFS( OS_CURRENTTIME, 0, 0, 0, 0, + REALVFS(pVfs)->xCurrentTime(REALVFS(pVfs), pTimeOut) + ); +} + +sqlite3_vfs *sqlite3_instvfs_create(const char *zName, const char *zParent){ + int nByte; + InstVfs *p; + sqlite3_vfs *pParent; + + pParent = sqlite3_vfs_find(zParent); + if( !pParent ){ + return 0; + } + + nByte = strlen(zName) + 1 + sizeof(InstVfs); + p = (InstVfs *)sqlite3_malloc(nByte); + if( p ){ + char *zCopy = (char *)&p[1]; + memset(p, 0, nByte); + memcpy(p, &inst_vfs, sizeof(sqlite3_vfs)); + p->pVfs = pParent; + memcpy(zCopy, zName, strlen(zName)); + p->base.zName = (const char *)zCopy; + p->base.szOsFile += pParent->szOsFile; + sqlite3_vfs_register((sqlite3_vfs *)p, 0); + } + + return (sqlite3_vfs *)p; +} + +void sqlite3_instvfs_configure( + sqlite3_vfs *pVfs, + void (*xCall)( + void*, + int, /* File id */ + int, /* Event code */ + sqlite3_int64, + int, /* Return code */ + const char*, /* File name */ + int, + int, + sqlite3_int64 + ), + void *pClient, + void (*xDel)(void *) +){ + InstVfs *p = (InstVfs *)pVfs; + assert( pVfs->xOpen==instOpen ); + if( p->xDel ){ + p->xDel(p->pClient); + } + p->xCall = xCall; + p->xDel = xDel; + p->pClient = pClient; +} + +void sqlite3_instvfs_destroy(sqlite3_vfs *pVfs){ + if( pVfs ){ + sqlite3_vfs_unregister(pVfs); + sqlite3_instvfs_configure(pVfs, 0, 0, 0); + sqlite3_free(pVfs); + } +} + +void sqlite3_instvfs_reset(sqlite3_vfs *pVfs){ + InstVfs *p = (InstVfs *)pVfs; + assert( pVfs->xOpen==instOpen ); + memset(p->aTime, 0, sizeof(sqlite3_int64)*OS_NUMEVENTS); + memset(p->aCount, 0, sizeof(int)*OS_NUMEVENTS); +} + +const char *sqlite3_instvfs_name(int eEvent){ + const char *zEvent = 0; + + switch( eEvent ){ + case OS_CLOSE: zEvent = "xClose"; break; + case OS_READ: zEvent = "xRead"; break; + case OS_WRITE: zEvent = "xWrite"; break; + case OS_TRUNCATE: zEvent = "xTruncate"; break; + case OS_SYNC: zEvent = "xSync"; break; + case OS_FILESIZE: zEvent = "xFilesize"; break; + case OS_LOCK: zEvent = "xLock"; break; + case OS_UNLOCK: zEvent = "xUnlock"; break; + case OS_CHECKRESERVEDLOCK: zEvent = "xCheckReservedLock"; break; + case OS_FILECONTROL: zEvent = "xFileControl"; break; + case OS_SECTORSIZE: zEvent = "xSectorSize"; break; + case OS_DEVCHAR: zEvent = "xDeviceCharacteristics"; break; + case OS_OPEN: zEvent = "xOpen"; break; + case OS_DELETE: zEvent = "xDelete"; break; + case OS_ACCESS: zEvent = "xAccess"; break; + case OS_FULLPATHNAME: zEvent = "xFullPathname"; break; + case OS_RANDOMNESS: zEvent = "xRandomness"; break; + case OS_SLEEP: zEvent = "xSleep"; break; + case OS_CURRENTTIME: zEvent = "xCurrentTime"; break; + } + + return zEvent; +} + +void sqlite3_instvfs_get( + sqlite3_vfs *pVfs, + int eEvent, + const char **pzEvent, + sqlite3_int64 *pnClick, + int *pnCall +){ + InstVfs *p = (InstVfs *)pVfs; + assert( pVfs->xOpen==instOpen ); + if( eEvent<1 || eEvent>=OS_NUMEVENTS ){ + *pzEvent = 0; + *pnClick = 0; + *pnCall = 0; + return; + } + + *pzEvent = sqlite3_instvfs_name(eEvent); + *pnClick = p->aTime[eEvent]; + *pnCall = p->aCount[eEvent]; +} + +#define BINARYLOG_BUFFERSIZE 8192 + +struct InstVfsBinaryLog { + int nBuf; + char *zBuf; + sqlite3_int64 iOffset; + int log_data; + sqlite3_file *pOut; + char *zOut; /* Log file name */ +}; +typedef struct InstVfsBinaryLog InstVfsBinaryLog; + +static void put32bits(unsigned char *p, unsigned int v){ + p[0] = v>>24; + p[1] = v>>16; + p[2] = v>>8; + p[3] = v; +} + +static void binarylog_flush(InstVfsBinaryLog *pLog){ + sqlite3_file *pFile = pLog->pOut; + +#ifdef SQLITE_TEST + extern int sqlite3_io_error_pending; + extern int sqlite3_io_error_persist; + extern int sqlite3_diskfull_pending; + + int pending = sqlite3_io_error_pending; + int persist = sqlite3_io_error_persist; + int diskfull = sqlite3_diskfull_pending; + + sqlite3_io_error_pending = 0; + sqlite3_io_error_persist = 0; + sqlite3_diskfull_pending = 0; +#endif + + pFile->pMethods->xWrite(pFile, pLog->zBuf, pLog->nBuf, pLog->iOffset); + pLog->iOffset += pLog->nBuf; + pLog->nBuf = 0; + +#ifdef SQLITE_TEST + sqlite3_io_error_pending = pending; + sqlite3_io_error_persist = persist; + sqlite3_diskfull_pending = diskfull; +#endif +} + +static void binarylog_xcall( + void *p, + int eEvent, + int iFileId, + sqlite3_int64 nClick, + int return_code, + const char *zName, + int flags, + int nByte, + sqlite3_int64 iOffset +){ + InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)p; + unsigned char *zRec; + if( (28+pLog->nBuf)>BINARYLOG_BUFFERSIZE ){ + binarylog_flush(pLog); + } + zRec = (unsigned char *)&pLog->zBuf[pLog->nBuf]; + put32bits(&zRec[0], eEvent); + put32bits(&zRec[4], (int)iFileId); + put32bits(&zRec[8], (int)nClick); + put32bits(&zRec[12], return_code); + put32bits(&zRec[16], flags); + put32bits(&zRec[20], nByte); + put32bits(&zRec[24], (int)iOffset); + pLog->nBuf += 28; +} + +static void binarylog_xdel(void *p){ + /* Close the log file and free the memory allocated for the + ** InstVfsBinaryLog structure. + */ + InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)p; + sqlite3_file *pFile = pLog->pOut; + if( pLog->nBuf ){ + binarylog_flush(pLog); + } + pFile->pMethods->xClose(pFile); + sqlite3_free(pLog->pOut); + sqlite3_free(pLog->zBuf); + sqlite3_free(pLog); +} + +static void binarylog_blob( + sqlite3_vfs *pVfs, + const char *zBlob, + int nBlob, + int isBinary +){ + InstVfsBinaryLog *pLog; + InstVfs *pInstVfs = (InstVfs *)pVfs; + + if( pVfs->xOpen!=instOpen || pInstVfs->xCall!=binarylog_xcall ){ + return; + } + pLog = (InstVfsBinaryLog *)pInstVfs->pClient; + if( zBlob && (!isBinary || pLog->log_data) ){ + unsigned char *zRec; + int nWrite; + + if( nBlob<0 ){ + nBlob = strlen(zBlob); + } + nWrite = nBlob + 28; + + if( (nWrite+pLog->nBuf)>BINARYLOG_BUFFERSIZE ){ + binarylog_flush(pLog); + } + + zRec = (unsigned char *)&pLog->zBuf[pLog->nBuf]; + memset(zRec, 0, nWrite); + put32bits(&zRec[0], BINARYLOG_STRING); + put32bits(&zRec[4], (int)nBlob); + put32bits(&zRec[8], (int)isBinary); + memcpy(&zRec[28], zBlob, nBlob); + pLog->nBuf += nWrite; + } +} + +void sqlite3_instvfs_binarylog_call( + sqlite3_vfs *pVfs, + int eEvent, + sqlite3_int64 nClick, + int return_code, + const char *zString +){ + InstVfs *pInstVfs = (InstVfs *)pVfs; + InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)pInstVfs->pClient; + + if( zString ){ + binarylog_blob(pVfs, zString, -1, 0); + } + binarylog_xcall(pLog, eEvent, 0, nClick, return_code, 0, 0, 0, 0); +} + +void sqlite3_instvfs_binarylog_marker( + sqlite3_vfs *pVfs, + const char *zMarker +){ + InstVfs *pInstVfs = (InstVfs *)pVfs; + InstVfsBinaryLog *pLog = (InstVfsBinaryLog *)pInstVfs->pClient; + binarylog_blob(pVfs, zMarker, -1, 0); + binarylog_xcall(pLog, BINARYLOG_MARKER, 0, 0, 0, 0, 0, 0, 0); +} + +sqlite3_vfs *sqlite3_instvfs_binarylog( + const char *zVfs, + const char *zParentVfs, + const char *zLog, + int log_data +){ + InstVfsBinaryLog *p; + sqlite3_vfs *pVfs; + sqlite3_vfs *pParent; + int nByte; + int flags; + int rc; + + pParent = sqlite3_vfs_find(zParentVfs); + if( !pParent ){ + return 0; + } + + nByte = sizeof(InstVfsBinaryLog) + pParent->mxPathname+1; + p = (InstVfsBinaryLog *)sqlite3_malloc(nByte); + memset(p, 0, nByte); + p->zBuf = sqlite3_malloc(BINARYLOG_BUFFERSIZE); + p->zOut = (char *)&p[1]; + p->pOut = (sqlite3_file *)sqlite3_malloc(pParent->szOsFile); + p->log_data = log_data; + pParent->xFullPathname(pParent, zLog, pParent->mxPathname, p->zOut); + flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_MASTER_JOURNAL; + pParent->xDelete(pParent, p->zOut, 0); + rc = pParent->xOpen(pParent, p->zOut, p->pOut, flags, &flags); + if( rc==SQLITE_OK ){ + memcpy(p->zBuf, "sqlite_ostrace1.....", 20); + p->iOffset = 0; + p->nBuf = 20; + } + if( rc ){ + binarylog_xdel(p); + return 0; + } + + pVfs = sqlite3_instvfs_create(zVfs, zParentVfs); + if( pVfs ){ + sqlite3_instvfs_configure(pVfs, binarylog_xcall, p, binarylog_xdel); + } + + return pVfs; +} +#endif /* SQLITE_ENABLE_INSTVFS */ + +/************************************************************************** +*************************************************************************** +** Tcl interface starts here. +*/ +#if SQLITE_TEST + +#include + +#ifdef SQLITE_ENABLE_INSTVFS +struct InstVfsCall { + Tcl_Interp *interp; + Tcl_Obj *pScript; +}; +typedef struct InstVfsCall InstVfsCall; + +static void test_instvfs_xcall( + void *p, + int eEvent, + int iFileId, + sqlite3_int64 nClick, + int return_code, + const char *zName, + int flags, + int nByte, + sqlite3_int64 iOffset +){ + int rc; + InstVfsCall *pCall = (InstVfsCall *)p; + Tcl_Obj *pObj = Tcl_DuplicateObj( pCall->pScript); + const char *zEvent = sqlite3_instvfs_name(eEvent); + + Tcl_IncrRefCount(pObj); + Tcl_ListObjAppendElement(0, pObj, Tcl_NewStringObj(zEvent, -1)); + Tcl_ListObjAppendElement(0, pObj, Tcl_NewWideIntObj(nClick)); + Tcl_ListObjAppendElement(0, pObj, Tcl_NewStringObj(zName, -1)); + Tcl_ListObjAppendElement(0, pObj, Tcl_NewIntObj(nByte)); + Tcl_ListObjAppendElement(0, pObj, Tcl_NewWideIntObj(iOffset)); + + rc = Tcl_EvalObjEx(pCall->interp, pObj, TCL_EVAL_GLOBAL|TCL_EVAL_DIRECT); + if( rc ){ + Tcl_BackgroundError(pCall->interp); + } + Tcl_DecrRefCount(pObj); +} + +static void test_instvfs_xdel(void *p){ + InstVfsCall *pCall = (InstVfsCall *)p; + Tcl_DecrRefCount(pCall->pScript); + sqlite3_free(pCall); +} + +static int test_sqlite3_instvfs( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + static const char *IV_strs[] = + { "create", "destroy", "reset", "report", "configure", "binarylog", "marker", 0 }; + enum IV_enum { IV_CREATE, IV_DESTROY, IV_RESET, IV_REPORT, IV_CONFIGURE, IV_BINARYLOG, IV_MARKER }; + int iSub; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUB-COMMAND ..."); + } + if( Tcl_GetIndexFromObj(interp, objv[1], IV_strs, "sub-command", 0, &iSub) ){ + return TCL_ERROR; + } + + switch( (enum IV_enum)iSub ){ + case IV_CREATE: { + char *zParent = 0; + sqlite3_vfs *p; + int isDefault = 0; + if( objc>2 && 0==strcmp("-default", Tcl_GetString(objv[2])) ){ + isDefault = 1; + } + if( (objc-isDefault)!=4 && (objc-isDefault)!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "?-default? NAME ?PARENT-VFS?"); + return TCL_ERROR; + } + if( objc==(4+isDefault) ){ + zParent = Tcl_GetString(objv[3+isDefault]); + } + p = sqlite3_instvfs_create(Tcl_GetString(objv[2+isDefault]), zParent); + if( !p ){ + Tcl_AppendResult(interp, "error creating vfs ", 0); + return TCL_ERROR; + } + if( isDefault ){ + sqlite3_vfs_register(p, 1); + } + Tcl_SetObjResult(interp, objv[2]); + break; + } + case IV_BINARYLOG: { + char *zName = 0; + char *zLog = 0; + char *zParent = 0; + sqlite3_vfs *p; + int isDefault = 0; + int isLogdata = 0; + int argbase = 2; + + for(argbase=2; argbase<(objc-2); argbase++){ + if( 0==strcmp("-default", Tcl_GetString(objv[argbase])) ){ + isDefault = 1; + } + else if( 0==strcmp("-parent", Tcl_GetString(objv[argbase])) ){ + argbase++; + zParent = Tcl_GetString(objv[argbase]); + } + else if( 0==strcmp("-logdata", Tcl_GetString(objv[argbase])) ){ + isLogdata = 1; + }else{ + break; + } + } + + if( (objc-argbase)!=2 ){ + Tcl_WrongNumArgs( + interp, 2, objv, "?-default? ?-parent VFS? ?-logdata? NAME LOGFILE" + ); + return TCL_ERROR; + } + zName = Tcl_GetString(objv[argbase]); + zLog = Tcl_GetString(objv[argbase+1]); + p = sqlite3_instvfs_binarylog(zName, zParent, zLog, isLogdata); + if( !p ){ + Tcl_AppendResult(interp, "error creating vfs ", 0); + return TCL_ERROR; + } + if( isDefault ){ + sqlite3_vfs_register(p, 1); + } + Tcl_SetObjResult(interp, objv[2]); + break; + } + + case IV_MARKER: { + sqlite3_vfs *p; + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 2, objv, "VFS MARKER"); + return TCL_ERROR; + } + p = sqlite3_vfs_find(Tcl_GetString(objv[2])); + if( !p || p->xOpen!=instOpen ){ + Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); + return TCL_ERROR; + } + sqlite3_instvfs_binarylog_marker(p, Tcl_GetString(objv[3])); + Tcl_ResetResult(interp); + break; + } + + case IV_CONFIGURE: { + InstVfsCall *pCall; + + sqlite3_vfs *p; + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 2, objv, "NAME SCRIPT"); + return TCL_ERROR; + } + p = sqlite3_vfs_find(Tcl_GetString(objv[2])); + if( !p || p->xOpen!=instOpen ){ + Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); + return TCL_ERROR; + } + + if( strlen(Tcl_GetString(objv[3])) ){ + pCall = (InstVfsCall *)sqlite3_malloc(sizeof(InstVfsCall)); + pCall->interp = interp; + pCall->pScript = Tcl_DuplicateObj(objv[3]); + Tcl_IncrRefCount(pCall->pScript); + sqlite3_instvfs_configure(p, + test_instvfs_xcall, (void *)pCall, test_instvfs_xdel + ); + }else{ + sqlite3_instvfs_configure(p, 0, 0, 0); + } + break; + } + + case IV_REPORT: + case IV_DESTROY: + case IV_RESET: { + sqlite3_vfs *p; + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 2, objv, "NAME"); + return TCL_ERROR; + } + p = sqlite3_vfs_find(Tcl_GetString(objv[2])); + if( !p || p->xOpen!=instOpen ){ + Tcl_AppendResult(interp, "no such vfs: ", Tcl_GetString(objv[2]), 0); + return TCL_ERROR; + } + + if( ((enum IV_enum)iSub)==IV_DESTROY ){ + sqlite3_instvfs_destroy(p); + } + if( ((enum IV_enum)iSub)==IV_RESET ){ + sqlite3_instvfs_reset(p); + } + if( ((enum IV_enum)iSub)==IV_REPORT ){ + int ii; + Tcl_Obj *pRet = Tcl_NewObj(); + + const char *zName = (char *)1; + sqlite3_int64 nClick; + int nCall; + for(ii=1; zName; ii++){ + sqlite3_instvfs_get(p, ii, &zName, &nClick, &nCall); + if( zName ){ + Tcl_Obj *pElem = Tcl_NewObj(); + Tcl_ListObjAppendElement(0, pElem, Tcl_NewStringObj(zName, -1)); + Tcl_ListObjAppendElement(0, pElem, Tcl_NewIntObj(nCall)); + Tcl_ListObjAppendElement(0, pElem, Tcl_NewWideIntObj(nClick)); + Tcl_ListObjAppendElement(0, pRet, pElem); + } + } + + Tcl_SetObjResult(interp, pRet); + } + + break; + } + } + + return TCL_OK; +} +#endif /* SQLITE_ENABLE_INSTVFS */ + +/* Alternative implementation of sqlite3_instvfs when the real +** implementation is unavailable. +*/ +#ifndef SQLITE_ENABLE_INSTVFS +static int test_sqlite3_instvfs( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_AppendResult(interp, + "not compiled with -DSQLITE_ENABLE_INSTVFS; sqlite3_instvfs is " + "unavailable", (char*)0); + return TCL_ERROR; +} +#endif /* !defined(SQLITE_ENABLE_INSTVFS) */ + +int SqlitetestOsinst_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "sqlite3_instvfs", test_sqlite3_instvfs, 0, 0); + return TCL_OK; +} + +#endif /* SQLITE_TEST */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_pcache.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_pcache.c --- sqlite3-3.4.2/src/test_pcache.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_pcache.c 2009-06-25 12:23:18.000000000 +0100 @@ -0,0 +1,460 @@ +/* +** 2008 November 18 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code used for testing the SQLite system. +** None of the code in this file goes into a deliverable build. +** +** This file contains an application-defined pager cache +** implementation that can be plugged in in place of the +** default pcache. This alternative pager cache will throw +** some errors that the default cache does not. +** +** This pagecache implementation is designed for simplicity +** not speed. +** +** $Id: test_pcache.c,v 1.3 2009/04/11 11:38:54 drh Exp $ +*/ +#include "sqlite3.h" +#include +#include + +/* +** Global data used by this test implementation. There is no +** mutexing, which means this page cache will not work in a +** multi-threaded test. +*/ +typedef struct testpcacheGlobalType testpcacheGlobalType; +struct testpcacheGlobalType { + void *pDummy; /* Dummy allocation to simulate failures */ + int nInstance; /* Number of current instances */ + unsigned discardChance; /* Chance of discarding on an unpin (0-100) */ + unsigned prngSeed; /* Seed for the PRNG */ + unsigned highStress; /* Call xStress agressively */ +}; +static testpcacheGlobalType testpcacheGlobal; + +/* +** Initializer. +** +** Verify that the initializer is only called when the system is +** uninitialized. Allocate some memory and report SQLITE_NOMEM if +** the allocation fails. This provides a means to test the recovery +** from a failed initialization attempt. It also verifies that the +** the destructor always gets call - otherwise there would be a +** memory leak. +*/ +static int testpcacheInit(void *pArg){ + assert( pArg==(void*)&testpcacheGlobal ); + assert( testpcacheGlobal.pDummy==0 ); + assert( testpcacheGlobal.nInstance==0 ); + testpcacheGlobal.pDummy = sqlite3_malloc(10); + return testpcacheGlobal.pDummy==0 ? SQLITE_NOMEM : SQLITE_OK; +} + +/* +** Destructor +** +** Verify that this is only called after initialization. +** Free the memory allocated by the initializer. +*/ +static void testpcacheShutdown(void *pArg){ + assert( pArg==(void*)&testpcacheGlobal ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance==0 ); + sqlite3_free( testpcacheGlobal.pDummy ); + testpcacheGlobal.pDummy = 0; +} + +/* +** Number of pages in a cache. +** +** The number of pages is a hard upper bound in this test module. +** If more pages are requested, sqlite3PcacheFetch() returns NULL. +** +** If testing with in-memory temp tables, provide a larger pcache. +** Some of the test cases need this. +*/ +#if defined(SQLITE_TEMP_STORE) && SQLITE_TEMP_STORE>=2 +# define TESTPCACHE_NPAGE 499 +#else +# define TESTPCACHE_NPAGE 217 +#endif +#define TESTPCACHE_RESERVE 17 + +/* +** Magic numbers used to determine validity of the page cache. +*/ +#define TESTPCACHE_VALID 0x364585fd +#define TESTPCACHE_CLEAR 0xd42670d4 + +/* +** Private implementation of a page cache. +*/ +typedef struct testpcache testpcache; +struct testpcache { + int szPage; /* Size of each page. Multiple of 8. */ + int bPurgeable; /* True if the page cache is purgeable */ + int nFree; /* Number of unused slots in a[] */ + int nPinned; /* Number of pinned slots in a[] */ + unsigned iRand; /* State of the PRNG */ + unsigned iMagic; /* Magic number for sanity checking */ + struct testpcachePage { + unsigned key; /* The key for this page. 0 means unallocated */ + int isPinned; /* True if the page is pinned */ + void *pData; /* Data for this page */ + } a[TESTPCACHE_NPAGE]; /* All pages in the cache */ +}; + +/* +** Get a random number using the PRNG in the given page cache. +*/ +static unsigned testpcacheRandom(testpcache *p){ + unsigned x = 0; + int i; + for(i=0; i<4; i++){ + p->iRand = (p->iRand*69069 + 5); + x = (x<<8) | ((p->iRand>>16)&0xff); + } + return x; +} + + +/* +** Allocate a new page cache instance. +*/ +static sqlite3_pcache *testpcacheCreate(int szPage, int bPurgeable){ + int nMem; + char *x; + testpcache *p; + int i; + assert( testpcacheGlobal.pDummy!=0 ); + szPage = (szPage+7)&~7; + nMem = sizeof(testpcache) + TESTPCACHE_NPAGE*szPage; + p = sqlite3_malloc( nMem ); + if( p==0 ) return 0; + x = (char*)&p[1]; + p->szPage = szPage; + p->nFree = TESTPCACHE_NPAGE; + p->nPinned = 0; + p->iRand = testpcacheGlobal.prngSeed; + p->bPurgeable = bPurgeable; + p->iMagic = TESTPCACHE_VALID; + for(i=0; ia[i].key = 0; + p->a[i].isPinned = 0; + p->a[i].pData = (void*)x; + } + testpcacheGlobal.nInstance++; + return (sqlite3_pcache*)p; +} + +/* +** Set the cache size +*/ +static void testpcacheCachesize(sqlite3_pcache *pCache, int newSize){ + testpcache *p = (testpcache*)pCache; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( newSize>=1 ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); +} + +/* +** Return the number of pages in the cache that are being used. +** This includes both pinned and unpinned pages. +*/ +static int testpcachePagecount(sqlite3_pcache *pCache){ + testpcache *p = (testpcache*)pCache; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + return TESTPCACHE_NPAGE - p->nFree; +} + +/* +** Fetch a page. +*/ +static void *testpcacheFetch( + sqlite3_pcache *pCache, + unsigned key, + int createFlag +){ + testpcache *p = (testpcache*)pCache; + int i, j; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + + /* See if the page is already in cache. Return immediately if it is */ + for(i=0; ia[i].key==key ){ + if( !p->a[i].isPinned ){ + p->nPinned++; + assert( p->nPinned <= TESTPCACHE_NPAGE - p->nFree ); + p->a[i].isPinned = 1; + } + return p->a[i].pData; + } + } + + /* If createFlag is 0, never allocate a new page */ + if( createFlag==0 ){ + return 0; + } + + /* If no pages are available, always fail */ + if( p->nPinned==TESTPCACHE_NPAGE ){ + return 0; + } + + /* Do not allocate the last TESTPCACHE_RESERVE pages unless createFlag is 2 */ + if( p->nPinned>=TESTPCACHE_NPAGE-TESTPCACHE_RESERVE && createFlag<2 ){ + return 0; + } + + /* Do not allocate if highStress is enabled and createFlag is not 2. + ** + ** The highStress setting causes pagerStress() to be called much more + ** often, which exercises the pager logic more intensely. + */ + if( testpcacheGlobal.highStress && createFlag<2 ){ + return 0; + } + + /* Find a free page to allocate if there are any free pages. + ** Withhold TESTPCACHE_RESERVE free pages until createFlag is 2. + */ + if( p->nFree>TESTPCACHE_RESERVE || (createFlag==2 && p->nFree>0) ){ + j = testpcacheRandom(p) % TESTPCACHE_NPAGE; + for(i=0; ia[j].key==0 ){ + p->a[j].key = key; + p->a[j].isPinned = 1; + memset(p->a[j].pData, 0, p->szPage); + p->nPinned++; + p->nFree--; + assert( p->nPinned <= TESTPCACHE_NPAGE - p->nFree ); + return p->a[j].pData; + } + } + + /* The prior loop always finds a freepage to allocate */ + assert( 0 ); + } + + /* If this cache is not purgeable then we have to fail. + */ + if( p->bPurgeable==0 ){ + return 0; + } + + /* If there are no free pages, recycle a page. The page to + ** recycle is selected at random from all unpinned pages. + */ + j = testpcacheRandom(p) % TESTPCACHE_NPAGE; + for(i=0; ia[j].key>0 && p->a[j].isPinned==0 ){ + p->a[j].key = key; + p->a[j].isPinned = 1; + memset(p->a[j].pData, 0, p->szPage); + p->nPinned++; + assert( p->nPinned <= TESTPCACHE_NPAGE - p->nFree ); + return p->a[j].pData; + } + } + + /* The previous loop always finds a page to recycle. */ + assert(0); + return 0; +} + +/* +** Unpin a page. +*/ +static void testpcacheUnpin( + sqlite3_pcache *pCache, + void *pOldPage, + int discard +){ + testpcache *p = (testpcache*)pCache; + int i; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + + /* Randomly discard pages as they are unpinned according to the + ** discardChance setting. If discardChance is 0, the random discard + ** never happens. If discardChance is 100, it always happens. + */ + if( p->bPurgeable + && (100-testpcacheGlobal.discardChance) <= (testpcacheRandom(p)%100) + ){ + discard = 1; + } + + for(i=0; ia[i].pData==pOldPage ){ + /* The pOldPage pointer always points to a pinned page */ + assert( p->a[i].isPinned ); + p->a[i].isPinned = 0; + p->nPinned--; + assert( p->nPinned>=0 ); + if( discard ){ + p->a[i].key = 0; + p->nFree++; + assert( p->nFree<=TESTPCACHE_NPAGE ); + } + return; + } + } + + /* The pOldPage pointer always points to a valid page */ + assert( 0 ); +} + + +/* +** Rekey a single page. +*/ +static void testpcacheRekey( + sqlite3_pcache *pCache, + void *pOldPage, + unsigned oldKey, + unsigned newKey +){ + testpcache *p = (testpcache*)pCache; + int i; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + + /* If there already exists another page at newKey, verify that + ** the other page is unpinned and discard it. + */ + for(i=0; ia[i].key==newKey ){ + /* The new key is never a page that is already pinned */ + assert( p->a[i].isPinned==0 ); + p->a[i].key = 0; + p->nFree++; + assert( p->nFree<=TESTPCACHE_NPAGE ); + break; + } + } + + /* Find the page to be rekeyed and rekey it. + */ + for(i=0; ia[i].key==oldKey ){ + /* The oldKey and pOldPage parameters match */ + assert( p->a[i].pData==pOldPage ); + /* Page to be rekeyed must be pinned */ + assert( p->a[i].isPinned ); + p->a[i].key = newKey; + return; + } + } + + /* Rekey is always given a valid page to work with */ + assert( 0 ); +} + + +/* +** Truncate the page cache. Every page with a key of iLimit or larger +** is discarded. +*/ +static void testpcacheTruncate(sqlite3_pcache *pCache, unsigned iLimit){ + testpcache *p = (testpcache*)pCache; + unsigned int i; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + for(i=0; ia[i].key>=iLimit ){ + p->a[i].key = 0; + if( p->a[i].isPinned ){ + p->nPinned--; + assert( p->nPinned>=0 ); + } + p->nFree++; + assert( p->nFree<=TESTPCACHE_NPAGE ); + } + } +} + +/* +** Destroy a page cache. +*/ +static void testpcacheDestroy(sqlite3_pcache *pCache){ + testpcache *p = (testpcache*)pCache; + assert( p->iMagic==TESTPCACHE_VALID ); + assert( testpcacheGlobal.pDummy!=0 ); + assert( testpcacheGlobal.nInstance>0 ); + p->iMagic = TESTPCACHE_CLEAR; + sqlite3_free(p); + testpcacheGlobal.nInstance--; +} + + +/* +** Invoke this routine to register or unregister the testing pager cache +** implemented by this file. +** +** Install the test pager cache if installFlag is 1 and uninstall it if +** installFlag is 0. +** +** When installing, discardChance is a number between 0 and 100 that +** indicates the probability of discarding a page when unpinning the +** page. 0 means never discard (unless the discard flag is set). +** 100 means always discard. +*/ +void installTestPCache( + int installFlag, /* True to install. False to uninstall. */ + unsigned discardChance, /* 0-100. Chance to discard on unpin */ + unsigned prngSeed, /* Seed for the PRNG */ + unsigned highStress /* Call xStress agressively */ +){ + static const sqlite3_pcache_methods testPcache = { + (void*)&testpcacheGlobal, + testpcacheInit, + testpcacheShutdown, + testpcacheCreate, + testpcacheCachesize, + testpcachePagecount, + testpcacheFetch, + testpcacheUnpin, + testpcacheRekey, + testpcacheTruncate, + testpcacheDestroy, + }; + static sqlite3_pcache_methods defaultPcache; + static int isInstalled = 0; + + assert( testpcacheGlobal.nInstance==0 ); + assert( testpcacheGlobal.pDummy==0 ); + assert( discardChance<=100 ); + testpcacheGlobal.discardChance = discardChance; + testpcacheGlobal.prngSeed = prngSeed ^ (prngSeed<<16); + testpcacheGlobal.highStress = highStress; + if( installFlag!=isInstalled ){ + if( installFlag ){ + sqlite3_config(SQLITE_CONFIG_GETPCACHE, &defaultPcache); + assert( defaultPcache.xCreate!=testpcacheCreate ); + sqlite3_config(SQLITE_CONFIG_PCACHE, &testPcache); + }else{ + assert( defaultPcache.xCreate!=0 ); + sqlite3_config(SQLITE_CONFIG_PCACHE, &defaultPcache); + } + isInstalled = installFlag; + } +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_schema.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_schema.c --- sqlite3-3.4.2/src/test_schema.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/test_schema.c 2009-05-05 04:39:59.000000000 +0100 @@ -13,7 +13,7 @@ ** is not included in the SQLite library. It is used for automated ** testing of the SQLite library. ** -** $Id: test_schema.c,v 1.12 2007/06/27 16:26:07 danielk1977 Exp $ +** $Id: test_schema.c,v 1.15 2008/07/07 14:50:14 drh Exp $ */ /* The code in this file defines a sqlite3 virtual-table module that @@ -39,13 +39,9 @@ #ifdef SQLITE_TEST #include "sqliteInt.h" #include "tcl.h" - #define MALLOC(x) sqliteMallocRaw(x) - #define FREE(x) sqliteFree(x) #else #include "sqlite3ext.h" SQLITE_EXTENSION_INIT1 - #define MALLOC(x) malloc(x) - #define FREE(x) free(x) #endif #include @@ -71,10 +67,15 @@ }; /* +** None of this works unless we have virtual tables. +*/ +#ifndef SQLITE_OMIT_VIRTUALTABLE + +/* ** Table destructor for the schema module. */ static int schemaDestroy(sqlite3_vtab *pVtab){ - FREE(pVtab); + sqlite3_free(pVtab); return 0; } @@ -89,7 +90,7 @@ char **pzErr ){ int rc = SQLITE_NOMEM; - schema_vtab *pVtab = MALLOC(sizeof(schema_vtab)); + schema_vtab *pVtab = sqlite3_malloc(sizeof(schema_vtab)); if( pVtab ){ memset(pVtab, 0, sizeof(schema_vtab)); pVtab->db = db; @@ -107,7 +108,7 @@ static int schemaOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ int rc = SQLITE_NOMEM; schema_cursor *pCur; - pCur = MALLOC(sizeof(schema_cursor)); + pCur = sqlite3_malloc(sizeof(schema_cursor)); if( pCur ){ memset(pCur, 0, sizeof(schema_cursor)); *ppCursor = (sqlite3_vtab_cursor *)pCur; @@ -124,7 +125,7 @@ sqlite3_finalize(pCur->pDbList); sqlite3_finalize(pCur->pTableList); sqlite3_finalize(pCur->pColumnList); - FREE(pCur); + sqlite3_free(pCur); return SQLITE_OK; } @@ -291,16 +292,14 @@ 0, /* xRename */ }; +#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */ #ifdef SQLITE_TEST /* ** Decode a pointer to an sqlite3 object. */ -static int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb){ - *ppDb = (sqlite3*)sqlite3TextToPtr(zA); - return TCL_OK; -} +extern int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb); /* ** Register the schema virtual table module. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_server.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_server.c --- sqlite3-3.4.2/src/test_server.c 2007-06-15 19:06:54.000000000 +0100 +++ sqlite3-3.6.16/src/test_server.c 2009-05-05 04:39:59.000000000 +0100 @@ -10,6 +10,8 @@ ** ****************************************************************************** ** +** $Id: test_server.c,v 1.8 2008/06/26 10:41:19 danielk1977 Exp $ +** ** This file contains demonstration code. Nothing in this file gets compiled ** or linked into the SQLite library unless you use a non-standard option: ** @@ -196,12 +198,17 @@ ** an integer. */ +/* Include this to get the definition of SQLITE_THREADSAFE, in the +** case that default values are used. +*/ +#include "sqliteInt.h" + /* -** Only compile the code in this file on UNIX with a THREADSAFE build +** Only compile the code in this file on UNIX with a SQLITE_THREADSAFE build ** and only if the SQLITE_SERVER macro is defined. */ #if defined(SQLITE_SERVER) && !defined(SQLITE_OMIT_SHARED_CACHE) -#if defined(OS_UNIX) && OS_UNIX && defined(THREADSAFE) && THREADSAFE +#if defined(SQLITE_OS_UNIX) && OS_UNIX && SQLITE_THREADSAFE /* ** We require only pthreads and the public interface of SQLite. @@ -386,11 +393,10 @@ ** true. */ void *sqlite3_server(void *NotUsed){ - sqlite3_enable_shared_cache(1); if( pthread_mutex_trylock(&g.serverMutex) ){ - sqlite3_enable_shared_cache(0); return 0; /* Another server is already running */ } + sqlite3_enable_shared_cache(1); while( !g.serverHalt ){ SqlMessage *pMsg; @@ -483,5 +489,5 @@ pthread_mutex_unlock(&g.serverMutex); } -#endif /* defined(OS_UNIX) && OS_UNIX && defined(THREADSAFE) && THREADSAFE */ +#endif /* defined(SQLITE_OS_UNIX) && OS_UNIX && SQLITE_THREADSAFE */ #endif /* defined(SQLITE_SERVER) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_tclvar.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_tclvar.c --- sqlite3-3.4.2/src/test_tclvar.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/test_tclvar.c 2009-05-05 04:39:59.000000000 +0100 @@ -16,11 +16,10 @@ ** The emphasis of this file is a virtual table that provides ** access to TCL variables. ** -** $Id: test_tclvar.c,v 1.11 2007/06/27 16:26:07 danielk1977 Exp $ +** $Id: test_tclvar.c,v 1.17 2008/08/12 14:48:41 danielk1977 Exp $ */ #include "sqliteInt.h" #include "tcl.h" -#include "os.h" #include #include @@ -58,7 +57,7 @@ tclvar_vtab *pVtab; static const char zSchema[] = "CREATE TABLE whatever(name TEXT, arrayname TEXT, value TEXT)"; - pVtab = sqliteMalloc( sizeof(*pVtab) ); + pVtab = sqlite3MallocZero( sizeof(*pVtab) ); if( pVtab==0 ) return SQLITE_NOMEM; *ppVtab = &pVtab->base; pVtab->interp = (Tcl_Interp *)pAux; @@ -69,7 +68,7 @@ ** methods are identical. */ static int tclvarDisconnect(sqlite3_vtab *pVtab){ - sqliteFree(pVtab); + sqlite3_free(pVtab); return SQLITE_OK; } /* The xDisconnect and xDestroy methods are also the same */ @@ -79,7 +78,7 @@ */ static int tclvarOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ tclvar_cursor *pCur; - pCur = sqliteMalloc(sizeof(tclvar_cursor)); + pCur = sqlite3MallocZero(sizeof(tclvar_cursor)); *ppCursor = &pCur->base; return SQLITE_OK; } @@ -95,7 +94,7 @@ if( pCur->pList2 ){ Tcl_DecrRefCount(pCur->pList2); } - sqliteFree(pCur); + sqlite3_free(pCur); return SQLITE_OK; } @@ -223,7 +222,8 @@ for(ii=0; iinConstraint; ii++){ struct sqlite3_index_constraint const *pCons = &pIdxInfo->aConstraint[ii]; - if( pCons->iColumn==0 && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + if( pCons->iColumn==0 && pCons->usable + && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ ){ struct sqlite3_index_constraint_usage *pUsage; pUsage = &pIdxInfo->aConstraintUsage[ii]; pUsage->omit = 0; @@ -234,7 +234,8 @@ for(ii=0; iinConstraint; ii++){ struct sqlite3_index_constraint const *pCons = &pIdxInfo->aConstraint[ii]; - if( pCons->iColumn==0 && pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + if( pCons->iColumn==0 && pCons->usable + && pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ struct sqlite3_index_constraint_usage *pUsage; pUsage = &pIdxInfo->aConstraintUsage[ii]; pUsage->omit = 1; @@ -276,11 +277,7 @@ /* ** Decode a pointer to an sqlite3 object. */ -static int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb){ - *ppDb = (sqlite3*)sqlite3TextToPtr(zA); - return TCL_OK; -} - +extern int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb); /* ** Register the echo virtual table module. @@ -310,19 +307,19 @@ ** Register commands with the TCL interpreter. */ int Sqlitetesttclvar_Init(Tcl_Interp *interp){ +#ifndef SQLITE_OMIT_VIRTUALTABLE static struct { char *zName; Tcl_ObjCmdProc *xProc; void *clientData; } aObjCmd[] = { -#ifndef SQLITE_OMIT_VIRTUALTABLE { "register_tclvar_module", register_tclvar_module, 0 }, -#endif }; int i; for(i=0; i + +#if SQLITE_THREADSAFE + +#include + +#if !defined(_MSC_VER) +#include +#endif + +/* +** One of these is allocated for each thread created by [sqlthread spawn]. +*/ +typedef struct SqlThread SqlThread; +struct SqlThread { + Tcl_ThreadId parent; /* Thread id of parent thread */ + Tcl_Interp *interp; /* Parent interpreter */ + char *zScript; /* The script to execute. */ + char *zVarname; /* Varname in parent script */ +}; + +/* +** A custom Tcl_Event type used by this module. When the event is +** handled, script zScript is evaluated in interpreter interp. If +** the evaluation throws an exception (returns TCL_ERROR), then the +** error is handled by Tcl_BackgroundError(). If no error occurs, +** the result is simply discarded. +*/ +typedef struct EvalEvent EvalEvent; +struct EvalEvent { + Tcl_Event base; /* Base class of type Tcl_Event */ + char *zScript; /* The script to execute. */ + Tcl_Interp *interp; /* The interpreter to execute it in. */ +}; + +static Tcl_ObjCmdProc sqlthread_proc; +static Tcl_ObjCmdProc clock_seconds_proc; +#if defined(SQLITE_OS_UNIX) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) +static Tcl_ObjCmdProc blocking_step_proc; +static Tcl_ObjCmdProc blocking_prepare_v2_proc; +#endif +int Sqlitetest1_Init(Tcl_Interp *); + +/* Functions from test1.c */ +void *sqlite3TestTextToPtr(const char *); +const char *sqlite3TestErrorName(int); +int getDbPointer(Tcl_Interp *, const char *, sqlite3 **); +int sqlite3TestMakePointerStr(Tcl_Interp *, char *, void *); +int sqlite3TestErrCode(Tcl_Interp *, sqlite3 *, int); + +/* +** Handler for events of type EvalEvent. +*/ +static int tclScriptEvent(Tcl_Event *evPtr, int flags){ + int rc; + EvalEvent *p = (EvalEvent *)evPtr; + rc = Tcl_Eval(p->interp, p->zScript); + if( rc!=TCL_OK ){ + Tcl_BackgroundError(p->interp); + } + UNUSED_PARAMETER(flags); + return 1; +} + +/* +** Register an EvalEvent to evaluate the script pScript in the +** parent interpreter/thread of SqlThread p. +*/ +static void postToParent(SqlThread *p, Tcl_Obj *pScript){ + EvalEvent *pEvent; + char *zMsg; + int nMsg; + + zMsg = Tcl_GetStringFromObj(pScript, &nMsg); + pEvent = (EvalEvent *)ckalloc(sizeof(EvalEvent)+nMsg+1); + pEvent->base.nextPtr = 0; + pEvent->base.proc = tclScriptEvent; + pEvent->zScript = (char *)&pEvent[1]; + memcpy(pEvent->zScript, zMsg, nMsg+1); + pEvent->interp = p->interp; + + Tcl_ThreadQueueEvent(p->parent, (Tcl_Event *)pEvent, TCL_QUEUE_TAIL); + Tcl_ThreadAlert(p->parent); +} + +/* +** The main function for threads created with [sqlthread spawn]. +*/ +static Tcl_ThreadCreateType tclScriptThread(ClientData pSqlThread){ + Tcl_Interp *interp; + Tcl_Obj *pRes; + Tcl_Obj *pList; + int rc; + SqlThread *p = (SqlThread *)pSqlThread; + extern int Sqlitetest_mutex_Init(Tcl_Interp*); + + interp = Tcl_CreateInterp(); + Tcl_CreateObjCommand(interp, "clock_seconds", clock_seconds_proc, 0, 0); + Tcl_CreateObjCommand(interp, "sqlthread", sqlthread_proc, pSqlThread, 0); +#if defined(SQLITE_OS_UNIX) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) + Tcl_CreateObjCommand(interp, "sqlite3_blocking_step", blocking_step_proc,0,0); + Tcl_CreateObjCommand(interp, + "sqlite3_blocking_prepare_v2", blocking_prepare_v2_proc, (void *)1, 0); + Tcl_CreateObjCommand(interp, + "sqlite3_nonblocking_prepare_v2", blocking_prepare_v2_proc, 0, 0); +#endif + Sqlitetest1_Init(interp); + Sqlitetest_mutex_Init(interp); + + rc = Tcl_Eval(interp, p->zScript); + pRes = Tcl_GetObjResult(interp); + pList = Tcl_NewObj(); + Tcl_IncrRefCount(pList); + Tcl_IncrRefCount(pRes); + + if( rc!=TCL_OK ){ + Tcl_ListObjAppendElement(interp, pList, Tcl_NewStringObj("error", -1)); + Tcl_ListObjAppendElement(interp, pList, pRes); + postToParent(p, pList); + Tcl_DecrRefCount(pList); + pList = Tcl_NewObj(); + } + + Tcl_ListObjAppendElement(interp, pList, Tcl_NewStringObj("set", -1)); + Tcl_ListObjAppendElement(interp, pList, Tcl_NewStringObj(p->zVarname, -1)); + Tcl_ListObjAppendElement(interp, pList, pRes); + postToParent(p, pList); + + ckfree((void *)p); + Tcl_DecrRefCount(pList); + Tcl_DecrRefCount(pRes); + Tcl_DeleteInterp(interp); + TCL_THREAD_CREATE_RETURN; +} + +/* +** sqlthread spawn VARNAME SCRIPT +** +** Spawn a new thread with its own Tcl interpreter and run the +** specified SCRIPT(s) in it. The thread terminates after running +** the script. The result of the script is stored in the variable +** VARNAME. +** +** The caller can wait for the script to terminate using [vwait VARNAME]. +*/ +static int sqlthread_spawn( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_ThreadId x; + SqlThread *pNew; + int rc; + + int nVarname; char *zVarname; + int nScript; char *zScript; + + /* Parameters for thread creation */ + const int nStack = TCL_THREAD_STACK_DEFAULT; + const int flags = TCL_THREAD_NOFLAGS; + + assert(objc==4); + UNUSED_PARAMETER(clientData); + UNUSED_PARAMETER(objc); + + zVarname = Tcl_GetStringFromObj(objv[2], &nVarname); + zScript = Tcl_GetStringFromObj(objv[3], &nScript); + + pNew = (SqlThread *)ckalloc(sizeof(SqlThread)+nVarname+nScript+2); + pNew->zVarname = (char *)&pNew[1]; + pNew->zScript = (char *)&pNew->zVarname[nVarname+1]; + memcpy(pNew->zVarname, zVarname, nVarname+1); + memcpy(pNew->zScript, zScript, nScript+1); + pNew->parent = Tcl_GetCurrentThread(); + pNew->interp = interp; + + rc = Tcl_CreateThread(&x, tclScriptThread, (void *)pNew, nStack, flags); + if( rc!=TCL_OK ){ + Tcl_AppendResult(interp, "Error in Tcl_CreateThread()", 0); + ckfree((char *)pNew); + return TCL_ERROR; + } + + return TCL_OK; +} + +/* +** sqlthread parent SCRIPT +** +** This can be called by spawned threads only. It sends the specified +** script back to the parent thread for execution. The result of +** evaluating the SCRIPT is returned. The parent thread must enter +** the event loop for this to work - otherwise the caller will +** block indefinitely. +** +** NOTE: At the moment, this doesn't work. FIXME. +*/ +static int sqlthread_parent( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + EvalEvent *pEvent; + char *zMsg; + int nMsg; + SqlThread *p = (SqlThread *)clientData; + + assert(objc==3); + UNUSED_PARAMETER(objc); + + if( p==0 ){ + Tcl_AppendResult(interp, "no parent thread", 0); + return TCL_ERROR; + } + + zMsg = Tcl_GetStringFromObj(objv[2], &nMsg); + pEvent = (EvalEvent *)ckalloc(sizeof(EvalEvent)+nMsg+1); + pEvent->base.nextPtr = 0; + pEvent->base.proc = tclScriptEvent; + pEvent->zScript = (char *)&pEvent[1]; + memcpy(pEvent->zScript, zMsg, nMsg+1); + pEvent->interp = p->interp; + Tcl_ThreadQueueEvent(p->parent, (Tcl_Event *)pEvent, TCL_QUEUE_TAIL); + Tcl_ThreadAlert(p->parent); + + return TCL_OK; +} + +static int xBusy(void *pArg, int nBusy){ + UNUSED_PARAMETER(pArg); + UNUSED_PARAMETER(nBusy); + sqlite3_sleep(50); + return 1; /* Try again... */ +} + +/* +** sqlthread open +** +** Open a database handle and return the string representation of +** the pointer value. +*/ +static int sqlthread_open( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int sqlite3TestMakePointerStr(Tcl_Interp *interp, char *zPtr, void *p); + + const char *zFilename; + sqlite3 *db; + int rc; + char zBuf[100]; + extern void Md5_Register(sqlite3*); + + UNUSED_PARAMETER(clientData); + UNUSED_PARAMETER(objc); + + zFilename = Tcl_GetString(objv[2]); + rc = sqlite3_open(zFilename, &db); + Md5_Register(db); + sqlite3_busy_handler(db, xBusy, 0); + + if( sqlite3TestMakePointerStr(interp, zBuf, db) ) return TCL_ERROR; + Tcl_AppendResult(interp, zBuf, 0); + + return TCL_OK; +} + + +/* +** sqlthread open +** +** Return the current thread-id (Tcl_GetCurrentThread()) cast to +** an integer. +*/ +static int sqlthread_id( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_ThreadId id = Tcl_GetCurrentThread(); + Tcl_SetObjResult(interp, Tcl_NewIntObj((int)id)); + UNUSED_PARAMETER(clientData); + UNUSED_PARAMETER(objc); + UNUSED_PARAMETER(objv); + return TCL_OK; +} + + +/* +** Dispatch routine for the sub-commands of [sqlthread]. +*/ +static int sqlthread_proc( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + struct SubCommand { + char *zName; + Tcl_ObjCmdProc *xProc; + int nArg; + char *zUsage; + } aSub[] = { + {"parent", sqlthread_parent, 1, "SCRIPT"}, + {"spawn", sqlthread_spawn, 2, "VARNAME SCRIPT"}, + {"open", sqlthread_open, 1, "DBNAME"}, + {"id", sqlthread_id, 0, ""}, + {0, 0, 0} + }; + struct SubCommand *pSub; + int rc; + int iIndex; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUB-COMMAND"); + return TCL_ERROR; + } + + rc = Tcl_GetIndexFromObjStruct( + interp, objv[1], aSub, sizeof(aSub[0]), "sub-command", 0, &iIndex + ); + if( rc!=TCL_OK ) return rc; + pSub = &aSub[iIndex]; + + if( objc!=(pSub->nArg+2) ){ + Tcl_WrongNumArgs(interp, 2, objv, pSub->zUsage); + return TCL_ERROR; + } + + return pSub->xProc(clientData, interp, objc, objv); +} + +/* +** The [clock_seconds] command. This is more or less the same as the +** regular tcl [clock seconds], except that it is available in testfixture +** when linked against both Tcl 8.4 and 8.5. Because [clock seconds] is +** implemented as a script in Tcl 8.5, it is not usually available to +** testfixture. +*/ +static int clock_seconds_proc( + ClientData clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + Tcl_Time now; + Tcl_GetTime(&now); + Tcl_SetObjResult(interp, Tcl_NewIntObj(now.sec)); + UNUSED_PARAMETER(clientData); + UNUSED_PARAMETER(objc); + UNUSED_PARAMETER(objv); + return TCL_OK; +} + +/************************************************************************* +** This block contains the implementation of the [sqlite3_blocking_step] +** command available to threads created by [sqlthread spawn] commands. It +** is only available on UNIX for now. This is because pthread condition +** variables are used. +** +** The source code for the C functions sqlite3_blocking_step(), +** blocking_step_notify() and the structure UnlockNotification is +** automatically extracted from this file and used as part of the +** documentation for the sqlite3_unlock_notify() API function. This +** should be considered if these functions are to be extended (i.e. to +** support windows) in the future. +*/ +#if defined(SQLITE_OS_UNIX) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) + +/* BEGIN_SQLITE_BLOCKING_STEP */ +/* This example uses the pthreads API */ +#include + +/* +** A pointer to an instance of this structure is passed as the user-context +** pointer when registering for an unlock-notify callback. +*/ +typedef struct UnlockNotification UnlockNotification; +struct UnlockNotification { + int fired; /* True after unlock event has occured */ + pthread_cond_t cond; /* Condition variable to wait on */ + pthread_mutex_t mutex; /* Mutex to protect structure */ +}; + +/* +** This function is an unlock-notify callback registered with SQLite. +*/ +static void unlock_notify_cb(void **apArg, int nArg){ + int i; + for(i=0; imutex); + p->fired = 1; + pthread_cond_signal(&p->cond); + pthread_mutex_unlock(&p->mutex); + } +} + +/* +** This function assumes that an SQLite API call (either sqlite3_prepare_v2() +** or sqlite3_step()) has just returned SQLITE_LOCKED. The argument is the +** associated database connection. +** +** This function calls sqlite3_unlock_notify() to register for an +** unlock-notify callback, then blocks until that callback is delivered +** and returns SQLITE_OK. The caller should then retry the failed operation. +** +** Or, if sqlite3_unlock_notify() indicates that to block would deadlock +** the system, then this function returns SQLITE_LOCKED immediately. In +** this case the caller should not retry the operation and should roll +** back the current transaction (if any). +*/ +static int wait_for_unlock_notify(sqlite3 *db){ + int rc; + UnlockNotification un; + + /* Initialize the UnlockNotification structure. */ + un.fired = 0; + pthread_mutex_init(&un.mutex, 0); + pthread_cond_init(&un.cond, 0); + + /* Register for an unlock-notify callback. */ + rc = sqlite3_unlock_notify(db, unlock_notify_cb, (void *)&un); + assert( rc==SQLITE_LOCKED || rc==SQLITE_OK ); + + /* The call to sqlite3_unlock_notify() always returns either SQLITE_LOCKED + ** or SQLITE_OK. + ** + ** If SQLITE_LOCKED was returned, then the system is deadlocked. In this + ** case this function needs to return SQLITE_LOCKED to the caller so + ** that the current transaction can be rolled back. Otherwise, block + ** until the unlock-notify callback is invoked, then return SQLITE_OK. + */ + if( rc==SQLITE_OK ){ + pthread_mutex_lock(&un.mutex); + if( !un.fired ){ + pthread_cond_wait(&un.cond, &un.mutex); + } + pthread_mutex_unlock(&un.mutex); + } + + /* Destroy the mutex and condition variables. */ + pthread_cond_destroy(&un.cond); + pthread_mutex_destroy(&un.mutex); + + return rc; +} + +/* +** This function is a wrapper around the SQLite function sqlite3_step(). +** It functions in the same way as step(), except that if a required +** shared-cache lock cannot be obtained, this function may block waiting for +** the lock to become available. In this scenario the normal API step() +** function always returns SQLITE_LOCKED. +** +** If this function returns SQLITE_LOCKED, the caller should rollback +** the current transaction (if any) and try again later. Otherwise, the +** system may become deadlocked. +*/ +int sqlite3_blocking_step(sqlite3_stmt *pStmt){ + int rc; + while( SQLITE_LOCKED==(rc = sqlite3_step(pStmt)) ){ + rc = wait_for_unlock_notify(sqlite3_db_handle(pStmt)); + if( rc!=SQLITE_OK ) break; + sqlite3_reset(pStmt); + } + return rc; +} + +/* +** This function is a wrapper around the SQLite function sqlite3_prepare_v2(). +** It functions in the same way as prepare_v2(), except that if a required +** shared-cache lock cannot be obtained, this function may block waiting for +** the lock to become available. In this scenario the normal API prepare_v2() +** function always returns SQLITE_LOCKED. +** +** If this function returns SQLITE_LOCKED, the caller should rollback +** the current transaction (if any) and try again later. Otherwise, the +** system may become deadlocked. +*/ +int sqlite3_blocking_prepare_v2( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nSql, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pz /* OUT: End of parsed string */ +){ + int rc; + while( SQLITE_LOCKED==(rc = sqlite3_prepare_v2(db, zSql, nSql, ppStmt, pz)) ){ + rc = wait_for_unlock_notify(db); + if( rc!=SQLITE_OK ) break; + } + return rc; +} +/* END_SQLITE_BLOCKING_STEP */ + +/* +** Usage: sqlite3_blocking_step STMT +** +** Advance the statement to the next row. +*/ +static int blocking_step_proc( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + + sqlite3_stmt *pStmt; + int rc; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "STMT"); + return TCL_ERROR; + } + + pStmt = (sqlite3_stmt*)sqlite3TestTextToPtr(Tcl_GetString(objv[1])); + rc = sqlite3_blocking_step(pStmt); + + Tcl_SetResult(interp, (char *)sqlite3TestErrorName(rc), 0); + return TCL_OK; +} + +/* +** Usage: sqlite3_blocking_prepare_v2 DB sql bytes ?tailvar? +** Usage: sqlite3_nonblocking_prepare_v2 DB sql bytes ?tailvar? +*/ +static int blocking_prepare_v2_proc( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + const char *zSql; + int bytes; + const char *zTail = 0; + sqlite3_stmt *pStmt = 0; + char zBuf[50]; + int rc; + int isBlocking = !(clientData==0); + + if( objc!=5 && objc!=4 ){ + Tcl_AppendResult(interp, "wrong # args: should be \"", + Tcl_GetString(objv[0]), " DB sql bytes tailvar", 0); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + zSql = Tcl_GetString(objv[2]); + if( Tcl_GetIntFromObj(interp, objv[3], &bytes) ) return TCL_ERROR; + + if( isBlocking ){ + rc = sqlite3_blocking_prepare_v2(db, zSql, bytes, &pStmt, &zTail); + }else{ + rc = sqlite3_prepare_v2(db, zSql, bytes, &pStmt, &zTail); + } + + assert(rc==SQLITE_OK || pStmt==0); + if( zTail && objc>=5 ){ + if( bytes>=0 ){ + bytes = bytes - (zTail-zSql); + } + Tcl_ObjSetVar2(interp, objv[4], 0, Tcl_NewStringObj(zTail, bytes), 0); + } + if( rc!=SQLITE_OK ){ + assert( pStmt==0 ); + sprintf(zBuf, "%s ", (char *)sqlite3TestErrorName(rc)); + Tcl_AppendResult(interp, zBuf, sqlite3_errmsg(db), 0); + return TCL_ERROR; + } + + if( pStmt ){ + if( sqlite3TestMakePointerStr(interp, zBuf, pStmt) ) return TCL_ERROR; + Tcl_AppendResult(interp, zBuf, 0); + } + return TCL_OK; +} + +#endif /* SQLITE_OS_UNIX && SQLITE_ENABLE_UNLOCK_NOTIFY */ +/* +** End of implementation of [sqlite3_blocking_step]. +************************************************************************/ + +/* +** Register commands with the TCL interpreter. +*/ +int SqlitetestThread_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "sqlthread", sqlthread_proc, 0, 0); + Tcl_CreateObjCommand(interp, "clock_seconds", clock_seconds_proc, 0, 0); +#if defined(SQLITE_OS_UNIX) && defined(SQLITE_ENABLE_UNLOCK_NOTIFY) + Tcl_CreateObjCommand(interp, "sqlite3_blocking_step", blocking_step_proc,0,0); + Tcl_CreateObjCommand(interp, + "sqlite3_blocking_prepare_v2", blocking_prepare_v2_proc, (void *)1, 0); + Tcl_CreateObjCommand(interp, + "sqlite3_nonblocking_prepare_v2", blocking_prepare_v2_proc, 0, 0); +#endif + return TCL_OK; +} +#else +int SqlitetestThread_Init(Tcl_Interp *interp){ + return TCL_OK; +} +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/test_wsd.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/test_wsd.c --- sqlite3-3.4.2/src/test_wsd.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/test_wsd.c 2009-06-12 03:37:49.000000000 +0100 @@ -0,0 +1,86 @@ +/* +** 2008 September 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** The code in this file contains sample implementations of the +** sqlite3_wsd_init() and sqlite3_wsd_find() functions required if the +** SQLITE_OMIT_WSD symbol is defined at build time. +** +** $Id: test_wsd.c,v 1.4 2009/03/23 04:33:33 danielk1977 Exp $ +*/ + +#if defined(SQLITE_OMIT_WSD) && defined(SQLITE_TEST) + +#include "sqliteInt.h" + +#define PLS_HASHSIZE 43 + +typedef struct ProcessLocalStorage ProcessLocalStorage; +typedef struct ProcessLocalVar ProcessLocalVar; + +struct ProcessLocalStorage { + ProcessLocalVar *aData[PLS_HASHSIZE]; + int nFree; + u8 *pFree; +}; + +struct ProcessLocalVar { + void *pKey; + ProcessLocalVar *pNext; +}; + +static ProcessLocalStorage *pGlobal = 0; + +int sqlite3_wsd_init(int N, int J){ + if( !pGlobal ){ + int nMalloc = N + sizeof(ProcessLocalStorage) + J*sizeof(ProcessLocalVar); + pGlobal = (ProcessLocalStorage *)malloc(nMalloc); + if( pGlobal ){ + memset(pGlobal, 0, sizeof(ProcessLocalStorage)); + pGlobal->nFree = nMalloc - sizeof(ProcessLocalStorage); + pGlobal->pFree = (u8 *)&pGlobal[1]; + } + } + + return pGlobal ? SQLITE_OK : SQLITE_NOMEM; +} + +void *sqlite3_wsd_find(void *K, int L){ + int i; + int iHash = 0; + ProcessLocalVar *pVar; + + /* Calculate a hash of K */ + for(i=0; iaData[iHash]; pVar && pVar->pKey!=K; pVar=pVar->pNext); + + /* If no entry for K was found, create and populate a new one. */ + if( !pVar ){ + int nByte = ROUND8(sizeof(ProcessLocalVar) + L); + assert( pGlobal->nFree>=nByte ); + pVar = (ProcessLocalVar *)pGlobal->pFree; + pVar->pKey = K; + pVar->pNext = pGlobal->aData[iHash]; + pGlobal->aData[iHash] = pVar; + pGlobal->nFree -= nByte; + pGlobal->pFree += nByte; + memcpy(&pVar[1], K, L); + } + + return (void *)&pVar[1]; +} + +#endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/tokenize.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/tokenize.c --- sqlite3-3.4.2/src/tokenize.c 2007-07-23 20:31:17.000000000 +0100 +++ sqlite3-3.6.16/src/tokenize.c 2009-06-25 12:45:58.000000000 +0100 @@ -15,11 +15,9 @@ ** individual tokens and sends those tokens one-by-one over to the ** parser for analysis. ** -** $Id: tokenize.c,v 1.131 2007/07/23 19:31:17 drh Exp $ +** $Id: tokenize.c,v 1.162 2009/06/23 20:28:54 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include #include /* @@ -86,7 +84,7 @@ ** But the feature is undocumented. */ #ifdef SQLITE_ASCII -const char sqlite3IsIdChar[] = { +const char sqlite3IsAsciiIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ @@ -95,10 +93,10 @@ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ }; -#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && sqlite3IsIdChar[c-0x20])) +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && sqlite3IsAsciiIdChar[c-0x20])) #endif #ifdef SQLITE_EBCDIC -const char sqlite3IsIdChar[] = { +const char sqlite3IsEbcdicIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 4x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, /* 5x */ @@ -113,7 +111,7 @@ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Ex */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, /* Fx */ }; -#define IdChar(C) (((c=C)>=0x42 && sqlite3IsIdChar[c-0x40])) +#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) #endif @@ -121,18 +119,23 @@ ** Return the length of the token that begins at z[0]. ** Store the token type in *tokenType before returning. */ -static int getToken(const unsigned char *z, int *tokenType){ +int sqlite3GetToken(const unsigned char *z, int *tokenType){ int i, c; switch( *z ){ case ' ': case '\t': case '\n': case '\f': case '\r': { - for(i=1; isspace(z[i]); i++){} + testcase( z[0]==' ' ); + testcase( z[0]=='\t' ); + testcase( z[0]=='\n' ); + testcase( z[0]=='\f' ); + testcase( z[0]=='\r' ); + for(i=1; sqlite3Isspace(z[i]); i++){} *tokenType = TK_SPACE; return i; } case '-': { if( z[1]=='-' ){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} - *tokenType = TK_COMMENT; + *tokenType = TK_SPACE; return i; } *tokenType = TK_MINUS; @@ -165,7 +168,7 @@ } for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} if( c ) i++; - *tokenType = TK_COMMENT; + *tokenType = TK_SPACE; return i; } case '%': { @@ -237,6 +240,9 @@ case '\'': case '"': { int delim = z[0]; + testcase( delim=='`' ); + testcase( delim=='\'' ); + testcase( delim=='"' ); for(i=1; (c=z[i])!=0; i++){ if( c==delim ){ if( z[i+1]==delim ){ @@ -246,9 +252,12 @@ } } } - if( c ){ + if( c=='\'' ){ *tokenType = TK_STRING; return i+1; + }else if( c!=0 ){ + *tokenType = TK_ID; + return i+1; }else{ *tokenType = TK_ILLEGAL; return i; @@ -256,7 +265,7 @@ } case '.': { #ifndef SQLITE_OMIT_FLOATING_POINT - if( !isdigit(z[1]) ) + if( !sqlite3Isdigit(z[1]) ) #endif { *tokenType = TK_DOT; @@ -267,21 +276,25 @@ } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { + testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); + testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); + testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); + testcase( z[0]=='9' ); *tokenType = TK_INTEGER; - for(i=0; isdigit(z[i]); i++){} + for(i=0; sqlite3Isdigit(z[i]); i++){} #ifndef SQLITE_OMIT_FLOATING_POINT if( z[i]=='.' ){ i++; - while( isdigit(z[i]) ){ i++; } + while( sqlite3Isdigit(z[i]) ){ i++; } *tokenType = TK_FLOAT; } if( (z[i]=='e' || z[i]=='E') && - ( isdigit(z[i+1]) - || ((z[i+1]=='+' || z[i+1]=='-') && isdigit(z[i+2])) + ( sqlite3Isdigit(z[i+1]) + || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) ) ){ i += 2; - while( isdigit(z[i]) ){ i++; } + while( sqlite3Isdigit(z[i]) ){ i++; } *tokenType = TK_FLOAT; } #endif @@ -293,16 +306,16 @@ } case '[': { for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} - *tokenType = TK_ID; + *tokenType = c==']' ? TK_ID : TK_ILLEGAL; return i; } case '?': { *tokenType = TK_VARIABLE; - for(i=1; isdigit(z[i]); i++){} + for(i=1; sqlite3Isdigit(z[i]); i++){} return i; } case '#': { - for(i=1; isdigit(z[i]); i++){} + for(i=1; sqlite3Isdigit(z[i]); i++){} if( i>1 ){ /* Parameters of the form #NNN (where NNN is a number) are used ** internally by sqlite3NestedParse. */ @@ -318,6 +331,7 @@ case '@': /* For compatibility with MS SQL Server */ case ':': { int n = 0; + testcase( z[0]=='$' ); testcase( z[0]=='@' ); testcase( z[0]==':' ); *tokenType = TK_VARIABLE; for(i=1; (c=z[i])!=0; i++){ if( IdChar(c) ){ @@ -326,7 +340,7 @@ }else if( c=='(' && n>0 ){ do{ i++; - }while( (c=z[i])!=0 && !isspace(c) && c!=')' ); + }while( (c=z[i])!=0 && !sqlite3Isspace(c) && c!=')' ); if( c==')' ){ i++; }else{ @@ -345,19 +359,15 @@ } #ifndef SQLITE_OMIT_BLOB_LITERAL case 'x': case 'X': { - if( (c=z[1])=='\'' || c=='"' ){ - int delim = c; + testcase( z[0]=='x' ); testcase( z[0]=='X' ); + if( z[1]=='\'' ){ *tokenType = TK_BLOB; - for(i=2; (c=z[i])!=0; i++){ - if( c==delim ){ - if( i%2 ) *tokenType = TK_ILLEGAL; - break; - } - if( !isxdigit(c) ){ + for(i=2; (c=z[i])!=0 && c!='\''; i++){ + if( !sqlite3Isxdigit(c) ){ *tokenType = TK_ILLEGAL; - return i; } } + if( i%2 || !c ) *tokenType = TK_ILLEGAL; if( c ) i++; return i; } @@ -376,68 +386,68 @@ *tokenType = TK_ILLEGAL; return 1; } -int sqlite3GetToken(const unsigned char *z, int *tokenType){ - return getToken(z, tokenType); -} /* ** Run the parser on the given SQL string. The parser structure is ** passed in. An SQLITE_ status code is returned. If an error occurs -** and pzErrMsg!=NULL then an error message might be written into -** memory obtained from malloc() and *pzErrMsg made to point to that -** error message. Or maybe not. +** then an and attempt is made to write an error message into +** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that +** error message. */ int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){ - int nErr = 0; - int i; - void *pEngine; - int tokenType; - int lastTokenParsed = -1; - sqlite3 *db = pParse->db; + int nErr = 0; /* Number of errors encountered */ + int i; /* Loop counter */ + void *pEngine; /* The LEMON-generated LALR(1) parser */ + int tokenType; /* type of the next token */ + int lastTokenParsed = -1; /* type of the previous token */ + u8 enableLookaside; /* Saved value of db->lookaside.bEnabled */ + sqlite3 *db = pParse->db; /* The database connection */ + int mxSqlLen; /* Max length of an SQL string */ + + mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; if( db->activeVdbeCnt==0 ){ db->u1.isInterrupted = 0; } pParse->rc = SQLITE_OK; + pParse->zTail = pParse->zSql = zSql; i = 0; - pEngine = sqlite3ParserAlloc((void*(*)(size_t))sqlite3MallocX); + assert( pzErrMsg!=0 ); + pEngine = sqlite3ParserAlloc((void*(*)(size_t))sqlite3Malloc); if( pEngine==0 ){ + db->mallocFailed = 1; return SQLITE_NOMEM; } - assert( pParse->sLastToken.dyn==0 ); assert( pParse->pNewTable==0 ); assert( pParse->pNewTrigger==0 ); assert( pParse->nVar==0 ); assert( pParse->nVarExpr==0 ); assert( pParse->nVarExprAlloc==0 ); assert( pParse->apVarExpr==0 ); - pParse->zTail = pParse->zSql = zSql; - while( !sqlite3MallocFailed() && zSql[i]!=0 ){ + enableLookaside = db->lookaside.bEnabled; + if( db->lookaside.pStart ) db->lookaside.bEnabled = 1; + while( !db->mallocFailed && zSql[i]!=0 ){ assert( i>=0 ); - pParse->sLastToken.z = (u8*)&zSql[i]; - assert( pParse->sLastToken.dyn==0 ); - pParse->sLastToken.n = getToken((unsigned char*)&zSql[i],&tokenType); + pParse->sLastToken.z = &zSql[i]; + pParse->sLastToken.n = sqlite3GetToken((unsigned char*)&zSql[i],&tokenType); i += pParse->sLastToken.n; - if( i>SQLITE_MAX_SQL_LENGTH ){ + if( i>mxSqlLen ){ pParse->rc = SQLITE_TOOBIG; break; } switch( tokenType ){ - case TK_SPACE: - case TK_COMMENT: { + case TK_SPACE: { if( db->u1.isInterrupted ){ + sqlite3ErrorMsg(pParse, "interrupt"); pParse->rc = SQLITE_INTERRUPT; - sqlite3SetString(pzErrMsg, "interrupt", (char*)0); goto abort_parse; } break; } case TK_ILLEGAL: { - if( pzErrMsg ){ - sqliteFree(*pzErrMsg); - *pzErrMsg = sqlite3MPrintf("unrecognized token: \"%T\"", - &pParse->sLastToken); - } + sqlite3DbFree(db, *pzErrMsg); + *pzErrMsg = sqlite3MPrintf(db, "unrecognized token: \"%T\"", + &pParse->sLastToken); nErr++; goto abort_parse; } @@ -463,21 +473,24 @@ } sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse); } - sqlite3ParserFree(pEngine, sqlite3FreeX); - if( sqlite3MallocFailed() ){ +#ifdef YYTRACKMAXSTACKDEPTH + sqlite3StatusSet(SQLITE_STATUS_PARSER_STACK, + sqlite3ParserStackPeak(pEngine) + ); +#endif /* YYDEBUG */ + sqlite3ParserFree(pEngine, sqlite3_free); + db->lookaside.bEnabled = enableLookaside; + if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM; } if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){ - sqlite3SetString(&pParse->zErrMsg, sqlite3ErrStr(pParse->rc), (char*)0); + sqlite3SetString(&pParse->zErrMsg, db, "%s", sqlite3ErrStr(pParse->rc)); } + assert( pzErrMsg!=0 ); if( pParse->zErrMsg ){ - if( pzErrMsg && *pzErrMsg==0 ){ - *pzErrMsg = pParse->zErrMsg; - }else{ - sqliteFree(pParse->zErrMsg); - } + *pzErrMsg = pParse->zErrMsg; pParse->zErrMsg = 0; - if( !nErr ) nErr++; + nErr++; } if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){ sqlite3VdbeDelete(pParse->pVdbe); @@ -485,11 +498,14 @@ } #ifndef SQLITE_OMIT_SHARED_CACHE if( pParse->nested==0 ){ - sqliteFree(pParse->aTableLock); + sqlite3DbFree(db, pParse->aTableLock); pParse->aTableLock = 0; pParse->nTableLock = 0; } #endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3DbFree(db, pParse->apVtabLock); +#endif if( !IN_DECLARE_VTAB ){ /* If the pParse->declareVtab flag is set, do not delete any table @@ -499,9 +515,20 @@ sqlite3DeleteTable(pParse->pNewTable); } - sqlite3DeleteTrigger(pParse->pNewTrigger); - sqliteFree(pParse->apVarExpr); - if( nErr>0 && (pParse->rc==SQLITE_OK || pParse->rc==SQLITE_DONE) ){ + sqlite3DeleteTrigger(db, pParse->pNewTrigger); + sqlite3DbFree(db, pParse->apVarExpr); + sqlite3DbFree(db, pParse->aAlias); + while( pParse->pAinc ){ + AutoincInfo *p = pParse->pAinc; + pParse->pAinc = p->pNext; + sqlite3DbFree(db, p); + } + while( pParse->pZombieTab ){ + Table *p = pParse->pZombieTab; + pParse->pZombieTab = p->pNextZombie; + sqlite3DeleteTable(p); + } + if( nErr>0 && pParse->rc==SQLITE_OK ){ pParse->rc = SQLITE_ERROR; } return nErr; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/trigger.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/trigger.c --- sqlite3-3.4.2/src/trigger.c 2007-08-07 18:12:05.000000000 +0100 +++ sqlite3-3.6.16/src/trigger.c 2009-06-25 12:35:51.000000000 +0100 @@ -8,7 +8,9 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* -* +** +** +** $Id: trigger.c,v 1.141 2009/05/28 01:00:55 drh Exp $ */ #include "sqliteInt.h" @@ -16,19 +18,52 @@ /* ** Delete a linked list of TriggerStep structures. */ -void sqlite3DeleteTriggerStep(TriggerStep *pTriggerStep){ +void sqlite3DeleteTriggerStep(sqlite3 *db, TriggerStep *pTriggerStep){ while( pTriggerStep ){ TriggerStep * pTmp = pTriggerStep; pTriggerStep = pTriggerStep->pNext; - if( pTmp->target.dyn ) sqliteFree((char*)pTmp->target.z); - sqlite3ExprDelete(pTmp->pWhere); - sqlite3ExprListDelete(pTmp->pExprList); - sqlite3SelectDelete(pTmp->pSelect); - sqlite3IdListDelete(pTmp->pIdList); + sqlite3ExprDelete(db, pTmp->pWhere); + sqlite3ExprListDelete(db, pTmp->pExprList); + sqlite3SelectDelete(db, pTmp->pSelect); + sqlite3IdListDelete(db, pTmp->pIdList); + + sqlite3DbFree(db, pTmp); + } +} - sqliteFree(pTmp); +/* +** Given table pTab, return a list of all the triggers attached to +** the table. The list is connected by Trigger.pNext pointers. +** +** All of the triggers on pTab that are in the same database as pTab +** are already attached to pTab->pTrigger. But there might be additional +** triggers on pTab in the TEMP schema. This routine prepends all +** TEMP triggers on pTab to the beginning of the pTab->pTrigger list +** and returns the combined list. +** +** To state it another way: This routine returns a list of all triggers +** that fire off of pTab. The list will include any TEMP triggers on +** pTab as well as the triggers lised in pTab->pTrigger. +*/ +Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ + Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; + Trigger *pList = 0; /* List of triggers to return */ + + if( pTmpSchema!=pTab->pSchema ){ + HashElem *p; + for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ + Trigger *pTrig = (Trigger *)sqliteHashData(p); + if( pTrig->pTabSchema==pTab->pSchema + && 0==sqlite3StrICmp(pTrig->table, pTab->zName) + ){ + pTrig->pNext = (pList ? pList : pTab->pTrigger); + pList = pTrig; + } + } } + + return (pList ? pList : pTab->pTrigger); } /* @@ -62,6 +97,8 @@ assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */ assert( pName2!=0 ); + assert( op==TK_INSERT || op==TK_UPDATE || op==TK_DELETE ); + assert( op>0 && op<0xff ); if( isTemp ){ /* If TEMP was specified, then the trigger name may not be qualified. */ if( pName2->n>0 ){ @@ -83,7 +120,7 @@ ** If sqlite3SrcListLookup() returns 0, indicating the table does not ** exist, the error is caught by the block below. */ - if( !pTableName || sqlite3MallocFailed() ){ + if( !pTableName || db->mallocFailed ){ goto trigger_cleanup; } pTab = sqlite3SrcListLookup(pParse, pTableName); @@ -92,7 +129,7 @@ } /* Ensure the table name matches database name and that the table exists */ - if( sqlite3MallocFailed() ) goto trigger_cleanup; + if( db->mallocFailed ) goto trigger_cleanup; assert( pTableName->nSrc==1 ); if( sqlite3FixInit(&sFix, pParse, iDb, "trigger", pName) && sqlite3FixSrcList(&sFix, pTableName) ){ @@ -110,11 +147,12 @@ /* Check that the trigger name is not reserved and that no trigger of the ** specified name exists */ - zName = sqlite3NameFromToken(pName); + zName = sqlite3NameFromToken(db, pName); if( !zName || SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto trigger_cleanup; } - if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash), zName,strlen(zName)) ){ + if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash), + zName, sqlite3Strlen30(zName)) ){ if( !noErr ){ sqlite3ErrorMsg(pParse, "trigger %T already exists", pName); } @@ -168,28 +206,27 @@ } /* Build the Trigger object */ - pTrigger = (Trigger*)sqliteMalloc(sizeof(Trigger)); + pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger)); if( pTrigger==0 ) goto trigger_cleanup; pTrigger->name = zName; zName = 0; - pTrigger->table = sqliteStrDup(pTableName->a[0].zName); + pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName); pTrigger->pSchema = db->aDb[iDb].pSchema; pTrigger->pTabSchema = pTab->pSchema; - pTrigger->op = op; + pTrigger->op = (u8)op; pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER; - pTrigger->pWhen = sqlite3ExprDup(pWhen); - pTrigger->pColumns = sqlite3IdListDup(pColumns); - sqlite3TokenCopy(&pTrigger->nameToken,pName); + pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); + pTrigger->pColumns = sqlite3IdListDup(db, pColumns); assert( pParse->pNewTrigger==0 ); pParse->pNewTrigger = pTrigger; trigger_cleanup: - sqliteFree(zName); - sqlite3SrcListDelete(pTableName); - sqlite3IdListDelete(pColumns); - sqlite3ExprDelete(pWhen); + sqlite3DbFree(db, zName); + sqlite3SrcListDelete(db, pTableName); + sqlite3IdListDelete(db, pColumns); + sqlite3ExprDelete(db, pWhen); if( !pParse->pNewTrigger ){ - sqlite3DeleteTrigger(pTrigger); + sqlite3DeleteTrigger(db, pTrigger); }else{ assert( pParse->pNewTrigger==pTrigger ); } @@ -204,21 +241,26 @@ TriggerStep *pStepList, /* The triggered program */ Token *pAll /* Token that describes the complete CREATE TRIGGER */ ){ - Trigger *pTrig = 0; /* The trigger whose construction is finishing up */ - sqlite3 *db = pParse->db; /* The database */ + Trigger *pTrig = pParse->pNewTrigger; /* Trigger being finished */ + char *zName; /* Name of trigger */ + sqlite3 *db = pParse->db; /* The database */ DbFixer sFix; - int iDb; /* Database containing the trigger */ + int iDb; /* Database containing the trigger */ + Token nameToken; /* Trigger name for error reporting */ pTrig = pParse->pNewTrigger; pParse->pNewTrigger = 0; - if( pParse->nErr || !pTrig ) goto triggerfinish_cleanup; + if( NEVER(pParse->nErr) || !pTrig ) goto triggerfinish_cleanup; + zName = pTrig->name; iDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); pTrig->step_list = pStepList; while( pStepList ){ pStepList->pTrig = pTrig; pStepList = pStepList->pNext; } - if( sqlite3FixInit(&sFix, pParse, iDb, "trigger", &pTrig->nameToken) + nameToken.z = pTrig->name; + nameToken.n = sqlite3Strlen30(nameToken.z); + if( sqlite3FixInit(&sFix, pParse, iDb, "trigger", &nameToken) && sqlite3FixTriggerStep(&sFix, pTrig->step_list) ){ goto triggerfinish_cleanup; } @@ -227,95 +269,45 @@ ** build the sqlite_master entry */ if( !db->init.busy ){ - static const VdbeOpList insertTrig[] = { - { OP_NewRowid, 0, 0, 0 }, - { OP_String8, 0, 0, "trigger" }, - { OP_String8, 0, 0, 0 }, /* 2: trigger name */ - { OP_String8, 0, 0, 0 }, /* 3: table name */ - { OP_Integer, 0, 0, 0 }, - { OP_String8, 0, 0, "CREATE TRIGGER "}, - { OP_String8, 0, 0, 0 }, /* 6: SQL */ - { OP_Concat, 0, 0, 0 }, - { OP_MakeRecord, 5, 0, "aaada" }, - { OP_Insert, 0, 0, 0 }, - }; - int addr; Vdbe *v; + char *z; /* Make an entry in the sqlite_master table */ v = sqlite3GetVdbe(pParse); if( v==0 ) goto triggerfinish_cleanup; sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3OpenMasterTable(pParse, iDb); - addr = sqlite3VdbeAddOpList(v, ArraySize(insertTrig), insertTrig); - sqlite3VdbeChangeP3(v, addr+2, pTrig->name, 0); - sqlite3VdbeChangeP3(v, addr+3, pTrig->table, 0); - sqlite3VdbeChangeP3(v, addr+6, (char*)pAll->z, pAll->n); - sqlite3ChangeCookie(db, v, iDb); - sqlite3VdbeAddOp(v, OP_Close, 0, 0); - sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 0, - sqlite3MPrintf("type='trigger' AND name='%q'", pTrig->name), P3_DYNAMIC); + z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); + sqlite3NestedParse(pParse, + "INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", + db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zName, + pTrig->table, z); + sqlite3DbFree(db, z); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddOp4(v, OP_ParseSchema, iDb, 0, 0, sqlite3MPrintf( + db, "type='trigger' AND name='%q'", zName), P4_DYNAMIC + ); } if( db->init.busy ){ - int n; - Table *pTab; - Trigger *pDel; - pDel = sqlite3HashInsert(&db->aDb[iDb].pSchema->trigHash, - pTrig->name, strlen(pTrig->name), pTrig); - if( pDel ){ - assert( sqlite3MallocFailed() && pDel==pTrig ); - goto triggerfinish_cleanup; - } - n = strlen(pTrig->table) + 1; - pTab = sqlite3HashFind(&pTrig->pTabSchema->tblHash, pTrig->table, n); - assert( pTab!=0 ); - pTrig->pNext = pTab->pTrigger; - pTab->pTrigger = pTrig; - pTrig = 0; + Trigger *pLink = pTrig; + Hash *pHash = &db->aDb[iDb].pSchema->trigHash; + pTrig = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), pTrig); + if( pTrig ){ + db->mallocFailed = 1; + }else if( pLink->pSchema==pLink->pTabSchema ){ + Table *pTab; + int n = sqlite3Strlen30(pLink->table); + pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table, n); + assert( pTab!=0 ); + pLink->pNext = pTab->pTrigger; + pTab->pTrigger = pLink; + } } triggerfinish_cleanup: - sqlite3DeleteTrigger(pTrig); + sqlite3DeleteTrigger(db, pTrig); assert( !pParse->pNewTrigger ); - sqlite3DeleteTriggerStep(pStepList); -} - -/* -** Make a copy of all components of the given trigger step. This has -** the effect of copying all Expr.token.z values into memory obtained -** from sqliteMalloc(). As initially created, the Expr.token.z values -** all point to the input string that was fed to the parser. But that -** string is ephemeral - it will go away as soon as the sqlite3_exec() -** call that started the parser exits. This routine makes a persistent -** copy of all the Expr.token.z strings so that the TriggerStep structure -** will be valid even after the sqlite3_exec() call returns. -*/ -static void sqlitePersistTriggerStep(TriggerStep *p){ - if( p->target.z ){ - p->target.z = (u8*)sqliteStrNDup((char*)p->target.z, p->target.n); - p->target.dyn = 1; - } - if( p->pSelect ){ - Select *pNew = sqlite3SelectDup(p->pSelect); - sqlite3SelectDelete(p->pSelect); - p->pSelect = pNew; - } - if( p->pWhere ){ - Expr *pNew = sqlite3ExprDup(p->pWhere); - sqlite3ExprDelete(p->pWhere); - p->pWhere = pNew; - } - if( p->pExprList ){ - ExprList *pNew = sqlite3ExprListDup(p->pExprList); - sqlite3ExprListDelete(p->pExprList); - p->pExprList = pNew; - } - if( p->pIdList ){ - IdList *pNew = sqlite3IdListDup(p->pIdList); - sqlite3IdListDelete(p->pIdList); - p->pIdList = pNew; - } + sqlite3DeleteTriggerStep(db, pStepList); } /* @@ -325,18 +317,39 @@ ** The parser calls this routine when it finds a SELECT statement in ** body of a TRIGGER. */ -TriggerStep *sqlite3TriggerSelectStep(Select *pSelect){ - TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep)); +TriggerStep *sqlite3TriggerSelectStep(sqlite3 *db, Select *pSelect){ + TriggerStep *pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep)); if( pTriggerStep==0 ) { - sqlite3SelectDelete(pSelect); + sqlite3SelectDelete(db, pSelect); return 0; } - pTriggerStep->op = TK_SELECT; pTriggerStep->pSelect = pSelect; pTriggerStep->orconf = OE_Default; - sqlitePersistTriggerStep(pTriggerStep); + return pTriggerStep; +} + +/* +** Allocate space to hold a new trigger step. The allocated space +** holds both the TriggerStep object and the TriggerStep.target.z string. +** +** If an OOM error occurs, NULL is returned and db->mallocFailed is set. +*/ +static TriggerStep *triggerStepAllocate( + sqlite3 *db, /* Database connection */ + int op, /* Trigger opcode */ + Token *pName /* The target name */ +){ + TriggerStep *pTriggerStep; + pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n); + if( pTriggerStep ){ + char *z = (char*)&pTriggerStep[1]; + memcpy(z, pName->z, pName->n); + pTriggerStep->target.z = z; + pTriggerStep->target.n = pName->n; + pTriggerStep->op = op; + } return pTriggerStep; } @@ -348,30 +361,29 @@ ** body of a trigger. */ TriggerStep *sqlite3TriggerInsertStep( + sqlite3 *db, /* The database connection */ Token *pTableName, /* Name of the table into which we insert */ IdList *pColumn, /* List of columns in pTableName to insert into */ ExprList *pEList, /* The VALUE clause: a list of values to be inserted */ Select *pSelect, /* A SELECT statement that supplies values */ int orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */ ){ - TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep)); + TriggerStep *pTriggerStep; assert(pEList == 0 || pSelect == 0); - assert(pEList != 0 || pSelect != 0); + assert(pEList != 0 || pSelect != 0 || db->mallocFailed); + pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName); if( pTriggerStep ){ - pTriggerStep->op = TK_INSERT; - pTriggerStep->pSelect = pSelect; - pTriggerStep->target = *pTableName; + pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); pTriggerStep->pIdList = pColumn; - pTriggerStep->pExprList = pEList; + pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE); pTriggerStep->orconf = orconf; - sqlitePersistTriggerStep(pTriggerStep); }else{ - sqlite3IdListDelete(pColumn); - sqlite3ExprListDelete(pEList); - sqlite3SelectDup(pSelect); + sqlite3IdListDelete(db, pColumn); } + sqlite3ExprListDelete(db, pEList); + sqlite3SelectDelete(db, pSelect); return pTriggerStep; } @@ -382,25 +394,22 @@ ** sees an UPDATE statement inside the body of a CREATE TRIGGER. */ TriggerStep *sqlite3TriggerUpdateStep( + sqlite3 *db, /* The database connection */ Token *pTableName, /* Name of the table to be updated */ ExprList *pEList, /* The SET clause: list of column and new values */ Expr *pWhere, /* The WHERE clause */ int orconf /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */ ){ - TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep)); - if( pTriggerStep==0 ){ - sqlite3ExprListDelete(pEList); - sqlite3ExprDelete(pWhere); - return 0; - } - - pTriggerStep->op = TK_UPDATE; - pTriggerStep->target = *pTableName; - pTriggerStep->pExprList = pEList; - pTriggerStep->pWhere = pWhere; - pTriggerStep->orconf = orconf; - sqlitePersistTriggerStep(pTriggerStep); + TriggerStep *pTriggerStep; + pTriggerStep = triggerStepAllocate(db, TK_UPDATE, pTableName); + if( pTriggerStep ){ + pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE); + pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); + pTriggerStep->orconf = orconf; + } + sqlite3ExprListDelete(db, pEList); + sqlite3ExprDelete(db, pWhere); return pTriggerStep; } @@ -409,34 +418,33 @@ ** a pointer to that trigger step. The parser calls this routine when it ** sees a DELETE statement inside the body of a CREATE TRIGGER. */ -TriggerStep *sqlite3TriggerDeleteStep(Token *pTableName, Expr *pWhere){ - TriggerStep *pTriggerStep = sqliteMalloc(sizeof(TriggerStep)); - if( pTriggerStep==0 ){ - sqlite3ExprDelete(pWhere); - return 0; - } - - pTriggerStep->op = TK_DELETE; - pTriggerStep->target = *pTableName; - pTriggerStep->pWhere = pWhere; - pTriggerStep->orconf = OE_Default; - sqlitePersistTriggerStep(pTriggerStep); +TriggerStep *sqlite3TriggerDeleteStep( + sqlite3 *db, /* Database connection */ + Token *pTableName, /* The table from which rows are deleted */ + Expr *pWhere /* The WHERE clause */ +){ + TriggerStep *pTriggerStep; + pTriggerStep = triggerStepAllocate(db, TK_DELETE, pTableName); + if( pTriggerStep ){ + pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); + pTriggerStep->orconf = OE_Default; + } + sqlite3ExprDelete(db, pWhere); return pTriggerStep; } /* ** Recursively delete a Trigger structure */ -void sqlite3DeleteTrigger(Trigger *pTrigger){ +void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ if( pTrigger==0 ) return; - sqlite3DeleteTriggerStep(pTrigger->step_list); - sqliteFree(pTrigger->name); - sqliteFree(pTrigger->table); - sqlite3ExprDelete(pTrigger->pWhen); - sqlite3IdListDelete(pTrigger->pColumns); - if( pTrigger->nameToken.dyn ) sqliteFree((char*)pTrigger->nameToken.z); - sqliteFree(pTrigger); + sqlite3DeleteTriggerStep(db, pTrigger->step_list); + sqlite3DbFree(db, pTrigger->name); + sqlite3DbFree(db, pTrigger->table); + sqlite3ExprDelete(db, pTrigger->pWhen); + sqlite3IdListDelete(db, pTrigger->pColumns); + sqlite3DbFree(db, pTrigger); } /* @@ -455,7 +463,7 @@ int nName; sqlite3 *db = pParse->db; - if( sqlite3MallocFailed() ) goto drop_trigger_cleanup; + if( db->mallocFailed ) goto drop_trigger_cleanup; if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto drop_trigger_cleanup; } @@ -463,7 +471,7 @@ assert( pName->nSrc==1 ); zDb = pName->a[0].zDatabase; zName = pName->a[0].zName; - nName = strlen(zName); + nName = sqlite3Strlen30(zName); for(i=OMIT_TEMPDB; inDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ if( zDb && sqlite3StrICmp(db->aDb[j].zName, zDb) ) continue; @@ -479,7 +487,7 @@ sqlite3DropTriggerPtr(pParse, pTrigger); drop_trigger_cleanup: - sqlite3SrcListDelete(pName); + sqlite3SrcListDelete(db, pName); } /* @@ -487,7 +495,7 @@ ** is set on. */ static Table *tableOfTrigger(Trigger *pTrigger){ - int n = strlen(pTrigger->table) + 1; + int n = sqlite3Strlen30(pTrigger->table); return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table, n); } @@ -526,12 +534,12 @@ int base; static const VdbeOpList dropTrigger[] = { { OP_Rewind, 0, ADDR(9), 0}, - { OP_String8, 0, 0, 0}, /* 1 */ - { OP_Column, 0, 1, 0}, - { OP_Ne, 0, ADDR(8), 0}, - { OP_String8, 0, 0, "trigger"}, - { OP_Column, 0, 0, 0}, - { OP_Ne, 0, ADDR(8), 0}, + { OP_String8, 0, 1, 0}, /* 1 */ + { OP_Column, 0, 1, 2}, + { OP_Ne, 2, ADDR(8), 1}, + { OP_String8, 0, 1, 0}, /* 4: "trigger" */ + { OP_Column, 0, 0, 2}, + { OP_Ne, 2, ADDR(8), 1}, { OP_Delete, 0, 0, 0}, { OP_Next, 0, ADDR(1), 0}, /* 8 */ }; @@ -539,10 +547,14 @@ sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3OpenMasterTable(pParse, iDb); base = sqlite3VdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger); - sqlite3VdbeChangeP3(v, base+1, pTrigger->name, 0); - sqlite3ChangeCookie(db, v, iDb); - sqlite3VdbeAddOp(v, OP_Close, 0, 0); - sqlite3VdbeOp3(v, OP_DropTrigger, iDb, 0, pTrigger->name, 0); + sqlite3VdbeChangeP4(v, base+1, pTrigger->name, 0); + sqlite3VdbeChangeP4(v, base+4, "trigger", P4_STATIC); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddOp2(v, OP_Close, 0, 0); + sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->name, 0); + if( pParse->nMem<3 ){ + pParse->nMem = 3; + } } } @@ -550,27 +562,17 @@ ** Remove a trigger from the hash tables of the sqlite* pointer. */ void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const char *zName){ + Hash *pHash = &(db->aDb[iDb].pSchema->trigHash); Trigger *pTrigger; - int nName = strlen(zName); - pTrigger = sqlite3HashInsert(&(db->aDb[iDb].pSchema->trigHash), - zName, nName, 0); - if( pTrigger ){ - Table *pTable = tableOfTrigger(pTrigger); - assert( pTable!=0 ); - if( pTable->pTrigger == pTrigger ){ - pTable->pTrigger = pTrigger->pNext; - }else{ - Trigger *cc = pTable->pTrigger; - while( cc ){ - if( cc->pNext == pTrigger ){ - cc->pNext = cc->pNext->pNext; - break; - } - cc = cc->pNext; - } - assert(cc); + pTrigger = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), 0); + if( ALWAYS(pTrigger) ){ + if( pTrigger->pSchema==pTrigger->pTabSchema ){ + Table *pTab = tableOfTrigger(pTrigger); + Trigger **pp; + for(pp=&pTab->pTrigger; *pp!=pTrigger; pp=&((*pp)->pNext)); + *pp = (*pp)->pNext; } - sqlite3DeleteTrigger(pTrigger); + sqlite3DeleteTrigger(db, pTrigger); db->flags |= SQLITE_InternChanges; } } @@ -584,9 +586,9 @@ ** it matches anything so always return true. Return false only ** if there is no match. */ -static int checkColumnOverLap(IdList *pIdList, ExprList *pEList){ +static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){ int e; - if( !pIdList || !pEList ) return 1; + if( pIdList==0 || NEVER(pEList==0) ) return 1; for(e=0; enExpr; e++){ if( sqlite3IdListIndex(pIdList, pEList->a[e].zName)>=0 ) return 1; } @@ -594,31 +596,31 @@ } /* -** Return a bit vector to indicate what kind of triggers exist for operation -** "op" on table pTab. If pChanges is not NULL then it is a list of columns -** that are being updated. Triggers only match if the ON clause of the -** trigger definition overlaps the set of columns being updated. -** -** The returned bit vector is some combination of TRIGGER_BEFORE and -** TRIGGER_AFTER. +** Return a list of all triggers on table pTab if there exists at least +** one trigger that must be fired when an operation of type 'op' is +** performed on the table, and, if that operation is an UPDATE, if at +** least one of the columns in pChanges is being modified. */ -int sqlite3TriggersExist( - Parse *pParse, /* Used to check for recursive triggers */ +Trigger *sqlite3TriggersExist( + Parse *pParse, /* Parse context */ Table *pTab, /* The table the contains the triggers */ int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */ - ExprList *pChanges /* Columns that change in an UPDATE statement */ + ExprList *pChanges, /* Columns that change in an UPDATE statement */ + int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ ){ - Trigger *pTrigger; int mask = 0; - - pTrigger = IsVirtual(pTab) ? 0 : pTab->pTrigger; - while( pTrigger ){ - if( pTrigger->op==op && checkColumnOverLap(pTrigger->pColumns, pChanges) ){ - mask |= pTrigger->tr_tm; + Trigger *pList = sqlite3TriggerList(pParse, pTab); + Trigger *p; + assert( pList==0 || IsVirtual(pTab)==0 ); + for(p=pList; p; p=p->pNext){ + if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ + mask |= p->tr_tm; } - pTrigger = pTrigger->pNext; } - return mask; + if( pMask ){ + *pMask = mask; + } + return (mask ? pList : 0); } /* @@ -635,18 +637,19 @@ Parse *pParse, /* The parsing context */ TriggerStep *pStep /* The trigger containing the target token */ ){ - Token sDb; /* Dummy database name token */ int iDb; /* Index of the database to use */ SrcList *pSrc; /* SrcList to be returned */ - iDb = sqlite3SchemaToIndex(pParse->db, pStep->pTrig->pSchema); - if( iDb==0 || iDb>=2 ){ - assert( iDbdb->nDb ); - sDb.z = (u8*)pParse->db->aDb[iDb].zName; - sDb.n = strlen((char*)sDb.z); - pSrc = sqlite3SrcListAppend(0, &sDb, &pStep->target); - } else { - pSrc = sqlite3SrcListAppend(0, &pStep->target, 0); + pSrc = sqlite3SrcListAppend(pParse->db, 0, &pStep->target, 0); + if( pSrc ){ + assert( pSrc->nSrc>0 ); + assert( pSrc->a!=0 ); + iDb = sqlite3SchemaToIndex(pParse->db, pStep->pTrig->pSchema); + if( iDb==0 || iDb>=2 ){ + sqlite3 *db = pParse->db; + assert( iDbdb->nDb ); + pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName); + } } return pSrc; } @@ -663,60 +666,63 @@ TriggerStep * pTriggerStep = pStepList; int orconf; Vdbe *v = pParse->pVdbe; + sqlite3 *db = pParse->db; assert( pTriggerStep!=0 ); assert( v!=0 ); - sqlite3VdbeAddOp(v, OP_ContextPush, 0, 0); - VdbeComment((v, "# begin trigger %s", pStepList->pTrig->name)); + sqlite3VdbeAddOp2(v, OP_ContextPush, 0, 0); + VdbeComment((v, "begin trigger %s", pStepList->pTrig->name)); while( pTriggerStep ){ + sqlite3ExprCacheClear(pParse); orconf = (orconfin == OE_Default)?pTriggerStep->orconf:orconfin; pParse->trigStack->orconf = orconf; switch( pTriggerStep->op ){ - case TK_SELECT: { - Select *ss = sqlite3SelectDup(pTriggerStep->pSelect); - if( ss ){ - sqlite3SelectResolve(pParse, ss, 0); - sqlite3Select(pParse, ss, SRT_Discard, 0, 0, 0, 0, 0); - sqlite3SelectDelete(ss); - } - break; - } case TK_UPDATE: { SrcList *pSrc; pSrc = targetSrcList(pParse, pTriggerStep); - sqlite3VdbeAddOp(v, OP_ResetCount, 0, 0); + sqlite3VdbeAddOp2(v, OP_ResetCount, 0, 0); sqlite3Update(pParse, pSrc, - sqlite3ExprListDup(pTriggerStep->pExprList), - sqlite3ExprDup(pTriggerStep->pWhere), orconf); - sqlite3VdbeAddOp(v, OP_ResetCount, 1, 0); + sqlite3ExprListDup(db, pTriggerStep->pExprList, 0), + sqlite3ExprDup(db, pTriggerStep->pWhere, 0), orconf); + sqlite3VdbeAddOp2(v, OP_ResetCount, 1, 0); break; } case TK_INSERT: { SrcList *pSrc; pSrc = targetSrcList(pParse, pTriggerStep); - sqlite3VdbeAddOp(v, OP_ResetCount, 0, 0); + sqlite3VdbeAddOp2(v, OP_ResetCount, 0, 0); sqlite3Insert(pParse, pSrc, - sqlite3ExprListDup(pTriggerStep->pExprList), - sqlite3SelectDup(pTriggerStep->pSelect), - sqlite3IdListDup(pTriggerStep->pIdList), orconf); - sqlite3VdbeAddOp(v, OP_ResetCount, 1, 0); + sqlite3ExprListDup(db, pTriggerStep->pExprList, 0), + sqlite3SelectDup(db, pTriggerStep->pSelect, 0), + sqlite3IdListDup(db, pTriggerStep->pIdList), orconf); + sqlite3VdbeAddOp2(v, OP_ResetCount, 1, 0); break; } case TK_DELETE: { SrcList *pSrc; - sqlite3VdbeAddOp(v, OP_ResetCount, 0, 0); + sqlite3VdbeAddOp2(v, OP_ResetCount, 0, 0); pSrc = targetSrcList(pParse, pTriggerStep); - sqlite3DeleteFrom(pParse, pSrc, sqlite3ExprDup(pTriggerStep->pWhere)); - sqlite3VdbeAddOp(v, OP_ResetCount, 1, 0); + sqlite3DeleteFrom(pParse, pSrc, + sqlite3ExprDup(db, pTriggerStep->pWhere, 0)); + sqlite3VdbeAddOp2(v, OP_ResetCount, 1, 0); + break; + } + default: assert( pTriggerStep->op==TK_SELECT ); { + Select *ss = sqlite3SelectDup(db, pTriggerStep->pSelect, 0); + if( ss ){ + SelectDest dest; + + sqlite3SelectDestInit(&dest, SRT_Discard, 0); + sqlite3Select(pParse, ss, &dest); + sqlite3SelectDelete(db, ss); + } break; } - default: - assert(0); } pTriggerStep = pTriggerStep->pNext; } - sqlite3VdbeAddOp(v, OP_ContextPop, 0, 0); - VdbeComment((v, "# end trigger %s", pStepList->pTrig->name)); + sqlite3VdbeAddOp2(v, OP_ContextPop, 0, 0); + VdbeComment((v, "end trigger %s", pStepList->pTrig->name)); return 0; } @@ -740,9 +746,17 @@ ** a row containing values to be substituted for old.* expressions in the ** trigger program(s). ** +** If they are not NULL, the piOldColMask and piNewColMask output variables +** are set to values that describe the columns used by the trigger program +** in the OLD.* and NEW.* tables respectively. If column N of the +** pseudo-table is read at least once, the corresponding bit of the output +** mask is set. If a column with an index greater than 32 is read, the +** output mask is set to the special value 0xffffffff. +** */ int sqlite3CodeRowTrigger( Parse *pParse, /* Parse context */ + Trigger *pTrigger, /* List of triggers on table pTab */ int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ int tr_tm, /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ @@ -750,25 +764,37 @@ int newIdx, /* The indice of the "new" row to access */ int oldIdx, /* The indice of the "old" row to access */ int orconf, /* ON CONFLICT policy */ - int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ + int ignoreJump, /* Instruction to jump to for RAISE(IGNORE) */ + u32 *piOldColMask, /* OUT: Mask of columns used from the OLD.* table */ + u32 *piNewColMask /* OUT: Mask of columns used from the NEW.* table */ ){ Trigger *p; + sqlite3 *db = pParse->db; TriggerStack trigStackEntry; + trigStackEntry.oldColMask = 0; + trigStackEntry.newColMask = 0; + assert(op == TK_UPDATE || op == TK_INSERT || op == TK_DELETE); assert(tr_tm == TRIGGER_BEFORE || tr_tm == TRIGGER_AFTER ); assert(newIdx != -1 || oldIdx != -1); - for(p=pTab->pTrigger; p; p=p->pNext){ + for(p=pTrigger; p; p=p->pNext){ int fire_this = 0; + /* Sanity checking: The schema for the trigger and for the table are + ** always defined. The trigger must be in the same schema as the table + ** or else it must be a TEMP trigger. */ + assert( p->pSchema!=0 ); + assert( p->pTabSchema!=0 ); + assert( p->pSchema==p->pTabSchema || p->pSchema==db->aDb[1].pSchema ); + /* Determine whether we should code this trigger */ if( p->op==op && p->tr_tm==tr_tm && - (p->pSchema==p->pTabSchema || p->pSchema==pParse->db->aDb[1].pSchema) && - (op!=TK_UPDATE||!p->pColumns||checkColumnOverLap(p->pColumns,pChanges)) + checkColumnOverlap(p->pColumns,pChanges) ){ TriggerStack *pS; /* Pointer to trigger-stack entry */ for(pS=pParse->trigStack; pS && p!=pS->pTrigger; pS=pS->pNext){} @@ -790,6 +816,11 @@ AuthContext sContext; NameContext sNC; +#ifndef SQLITE_OMIT_TRACE + sqlite3VdbeAddOp4(pParse->pVdbe, OP_Trace, 0, 0, 0, + sqlite3MPrintf(db, "-- TRIGGER %s", p->name), + P4_DYNAMIC); +#endif memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; @@ -805,14 +836,14 @@ /* code the WHEN clause */ endTrigger = sqlite3VdbeMakeLabel(pParse->pVdbe); - whenExpr = sqlite3ExprDup(p->pWhen); - if( sqlite3ExprResolveNames(&sNC, whenExpr) ){ + whenExpr = sqlite3ExprDup(db, p->pWhen, 0); + if( db->mallocFailed || sqlite3ResolveExprNames(&sNC, whenExpr) ){ pParse->trigStack = trigStackEntry.pNext; - sqlite3ExprDelete(whenExpr); + sqlite3ExprDelete(db, whenExpr); return 1; } - sqlite3ExprIfFalse(pParse, whenExpr, endTrigger, 1); - sqlite3ExprDelete(whenExpr); + sqlite3ExprIfFalse(pParse, whenExpr, endTrigger, SQLITE_JUMPIFNULL); + sqlite3ExprDelete(db, whenExpr); codeTriggerProgram(pParse, p->step_list, orconf); @@ -823,6 +854,8 @@ sqlite3VdbeResolveLabel(pParse->pVdbe, endTrigger); } } + if( piOldColMask ) *piOldColMask |= trigStackEntry.oldColMask; + if( piNewColMask ) *piNewColMask |= trigStackEntry.newColMask; return 0; } #endif /* !defined(SQLITE_OMIT_TRIGGER) */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/update.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/update.c --- sqlite3-3.4.2/src/update.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/update.c 2009-06-27 12:17:35.000000000 +0100 @@ -12,7 +12,7 @@ ** This file contains C code routines that are called by the parser ** to handle UPDATE statements. ** -** $Id: update.c,v 1.138 2007/06/25 16:29:34 danielk1977 Exp $ +** $Id: update.c,v 1.204 2009/06/27 11:17:35 drh Exp $ */ #include "sqliteInt.h" @@ -31,7 +31,7 @@ /* ** The most recently coded instruction was an OP_Column to retrieve the -** i-th column of table pTab. This routine sets the P3 parameter of the +** i-th column of table pTab. This routine sets the P4 parameter of the ** OP_Column to the default value, if any. ** ** The default value of a column is specified by a DEFAULT clause in the @@ -39,9 +39,9 @@ ** was created, or added later to the table definition by an ALTER TABLE ** command. If the latter, then the row-records in the table btree on disk ** may not contain a value for the column and the default value, taken -** from the P3 parameter of the OP_Column instruction, is returned instead. +** from the P4 parameter of the OP_Column instruction, is returned instead. ** If the former, then all row-records are guaranteed to include a value -** for the column and the P3 value is not required. +** for the column and the P4 value is not required. ** ** Column definitions created by an ALTER TABLE command may only have ** literal default values specified: a number, null or a string. (If a more @@ -49,22 +49,23 @@ ** when the ALTER TABLE is executed and one of the literal values written ** into the sqlite_master table.) ** -** Therefore, the P3 parameter is only required if the default value for +** Therefore, the P4 parameter is only required if the default value for ** the column is a literal number, string or null. The sqlite3ValueFromExpr() ** function is capable of transforming these types of expressions into ** sqlite3_value objects. */ void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i){ - if( pTab && !pTab->pSelect ){ + assert( pTab!=0 ); + if( !pTab->pSelect ){ sqlite3_value *pValue; u8 enc = ENC(sqlite3VdbeDb(v)); Column *pCol = &pTab->aCol[i]; + VdbeComment((v, "%s.%s", pTab->zName, pCol->zName)); assert( inCol ); - sqlite3ValueFromExpr(pCol->pDflt, enc, pCol->affinity, &pValue); + sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, + pCol->affinity, &pValue); if( pValue ){ - sqlite3VdbeChangeP3(v, -1, (const char *)pValue, P3_MEM); - }else{ - VdbeComment((v, "# %s.%s", pTab->zName, pCol->zName)); + sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM); } } } @@ -90,11 +91,9 @@ Vdbe *v; /* The virtual database engine */ Index *pIdx; /* For looping over indices */ int nIdx; /* Number of indices that need updating */ - int nIdxTotal; /* Total number of indices */ int iCur; /* VDBE Cursor number of pTab */ sqlite3 *db; /* The database structure */ - Index **apIdx = 0; /* An array of indices that need updating too */ - char *aIdxUsed = 0; /* aIdxUsed[i]==1 if the i-th index is used */ + int *aRegIdx = 0; /* One register assigned to each index to be updated */ int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the ** an expression for the i-th column of the table. ** aXRef[i]==-1 if the i-th column is not changed. */ @@ -104,21 +103,35 @@ AuthContext sContext; /* The authorization context */ NameContext sNC; /* The name-context to resolve expressions in */ int iDb; /* Database containing the table being updated */ - int memCnt = 0; /* Memory cell used for counting rows changed */ + int j1; /* Addresses of jump instructions */ + int okOnePass; /* True for one-pass algorithm without the FIFO */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* Trying to update a view */ - int triggers_exist = 0; /* True if any row triggers exist */ + Trigger *pTrigger; /* List of triggers on pTab, if required */ #endif + int iBeginAfterTrigger = 0; /* Address of after trigger program */ + int iEndAfterTrigger = 0; /* Exit of after trigger program */ + int iBeginBeforeTrigger = 0; /* Address of before trigger program */ + int iEndBeforeTrigger = 0; /* Exit of before trigger program */ + u32 old_col_mask = 0; /* Mask of OLD.* columns in use */ + u32 new_col_mask = 0; /* Mask of NEW.* columns in use */ int newIdx = -1; /* index of trigger "new" temp table */ int oldIdx = -1; /* index of trigger "old" temp table */ + /* Register Allocations */ + int regRowCount = 0; /* A count of rows changed */ + int regOldRowid; /* The old rowid */ + int regNewRowid; /* The new rowid */ + int regData; /* New data for the row */ + int regRowSet = 0; /* Rowset of rows to be updated */ + sContext.pParse = 0; - if( pParse->nErr || sqlite3MallocFailed() ){ + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ goto update_cleanup; } - db = pParse->db; assert( pTabList->nSrc==1 ); /* Locate the table which we want to update. @@ -131,10 +144,10 @@ ** updated is a view */ #ifndef SQLITE_OMIT_TRIGGER - triggers_exist = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges); + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, 0); isView = pTab->pSelect!=0; #else -# define triggers_exist 0 +# define pTrigger 0 # define isView 0 #endif #ifdef SQLITE_OMIT_VIEW @@ -142,20 +155,20 @@ # define isView 0 #endif - if( sqlite3IsReadOnly(pParse, pTab, triggers_exist) ){ + if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ goto update_cleanup; } if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto update_cleanup; } - aXRef = sqliteMallocRaw( sizeof(int) * pTab->nCol ); + aXRef = sqlite3DbMallocRaw(db, sizeof(int) * pTab->nCol ); if( aXRef==0 ) goto update_cleanup; for(i=0; inCol; i++) aXRef[i] = -1; /* If there are FOR EACH ROW triggers, allocate cursors for the ** special OLD and NEW tables */ - if( triggers_exist ){ + if( pTrigger ){ newIdx = pParse->nTab++; oldIdx = pParse->nTab++; } @@ -183,7 +196,7 @@ */ chngRowid = 0; for(i=0; inExpr; i++){ - if( sqlite3ExprResolveNames(&sNC, pChanges->a[i].pExpr) ){ + if( sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } for(j=0; jnCol; j++){ @@ -219,41 +232,47 @@ #endif } - /* Allocate memory for the array apIdx[] and fill it with pointers to every - ** index that needs to be updated. Indices only need updating if their - ** key includes one of the columns named in pChanges or if the record - ** number of the original table entry is changing. - */ - for(nIdx=nIdxTotal=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdxTotal++){ - if( chngRowid ){ - i = 0; - }else { - for(i=0; inColumn; i++){ - if( aXRef[pIdx->aiColumn[i]]>=0 ) break; - } - } - if( inColumn ) nIdx++; - } - if( nIdxTotal>0 ){ - apIdx = sqliteMallocRaw( sizeof(Index*) * nIdx + nIdxTotal ); - if( apIdx==0 ) goto update_cleanup; - aIdxUsed = (char*)&apIdx[nIdx]; + /* Allocate memory for the array aRegIdx[]. There is one entry in the + ** array for each index associated with table being updated. Fill in + ** the value with a register number for indices that are to be used + ** and with zero for unused indices. + */ + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){} + if( nIdx>0 ){ + aRegIdx = sqlite3DbMallocRaw(db, sizeof(Index*) * nIdx ); + if( aRegIdx==0 ) goto update_cleanup; } - for(nIdx=j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + int reg; if( chngRowid ){ - i = 0; + reg = ++pParse->nMem; }else{ + reg = 0; for(i=0; inColumn; i++){ - if( aXRef[pIdx->aiColumn[i]]>=0 ) break; + if( aXRef[pIdx->aiColumn[i]]>=0 ){ + reg = ++pParse->nMem; + break; + } } } - if( inColumn ){ - apIdx[nIdx++] = pIdx; - aIdxUsed[j] = 1; - }else{ - aIdxUsed[j] = 0; - } + aRegIdx[j] = reg; + } + + /* Allocate a block of register used to store the change record + ** sent to sqlite3GenerateConstraintChecks(). There are either + ** one or two registers for holding the rowid. One rowid register + ** is used if chngRowid is false and two are used if chngRowid is + ** true. Following these are pTab->nCol register holding column + ** data. + */ + regOldRowid = regNewRowid = pParse->nMem + 1; + pParse->nMem += pTab->nCol + 1; + if( chngRowid ){ + regNewRowid++; + pParse->nMem++; } + regData = regNewRowid+1; + /* Begin generating code. */ @@ -273,38 +292,71 @@ } #endif - /* Resolve the column names in all the expressions in the - ** WHERE clause. - */ - if( sqlite3ExprResolveNames(&sNC, pWhere) ){ - goto update_cleanup; - } - /* Start the view context */ if( isView ){ sqlite3AuthContextPush(pParse, &sContext, pTab->zName); } + /* Generate the code for triggers. + */ + if( pTrigger ){ + int iGoto; + + /* Create pseudo-tables for NEW and OLD + */ + sqlite3VdbeAddOp3(v, OP_OpenPseudo, oldIdx, 0, pTab->nCol); + sqlite3VdbeAddOp3(v, OP_OpenPseudo, newIdx, 0, pTab->nCol); + + iGoto = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); + addr = sqlite3VdbeMakeLabel(v); + iBeginBeforeTrigger = sqlite3VdbeCurrentAddr(v); + if( sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_BEFORE, pTab, newIdx, oldIdx, onError, addr, + &old_col_mask, &new_col_mask) ){ + goto update_cleanup; + } + iEndBeforeTrigger = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); + iBeginAfterTrigger = sqlite3VdbeCurrentAddr(v); + if( sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_AFTER, pTab, newIdx, oldIdx, onError, addr, + &old_col_mask, &new_col_mask) ){ + goto update_cleanup; + } + iEndAfterTrigger = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); + sqlite3VdbeJumpHere(v, iGoto); + } + /* If we are trying to update a view, realize that view into ** a ephemeral table. */ +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) if( isView ){ - Select *pView; - pView = sqlite3SelectDup(pTab->pSelect); - sqlite3Select(pParse, pView, SRT_EphemTab, iCur, 0, 0, 0, 0); - sqlite3SelectDelete(pView); + sqlite3MaterializeView(pParse, pTab, pWhere, iCur); + } +#endif + + /* Resolve the column names in all the expressions in the + ** WHERE clause. + */ + if( sqlite3ResolveExprNames(&sNC, pWhere) ){ + goto update_cleanup; } /* Begin the database scan */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, regOldRowid); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0, WHERE_ONEPASS_DESIRED); if( pWInfo==0 ) goto update_cleanup; + okOnePass = pWInfo->okOnePass; /* Remember the rowid of every item to be updated. */ - sqlite3VdbeAddOp(v, IsVirtual(pTab) ? OP_VRowid : OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_FifoWrite, 0, 0); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, regOldRowid); + if( !okOnePass ){ + regRowSet = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); + } /* End the database scan loop. */ @@ -313,202 +365,211 @@ /* Initialize the count of updated rows */ if( db->flags & SQLITE_CountRows && !pParse->trigStack ){ - memCnt = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemInt, 0, memCnt); + regRowCount = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); } - if( triggers_exist ){ - /* Create pseudo-tables for NEW and OLD + if( !isView ){ + /* + ** Open every index that needs updating. Note that if any + ** index could potentially invoke a REPLACE conflict resolution + ** action, then we need to open all indices because we might need + ** to be deleting some records. */ - sqlite3VdbeAddOp(v, OP_OpenPseudo, oldIdx, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, oldIdx, pTab->nCol); - sqlite3VdbeAddOp(v, OP_OpenPseudo, newIdx, 0); - sqlite3VdbeAddOp(v, OP_SetNumColumns, newIdx, pTab->nCol); + if( !okOnePass ) sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenWrite); + if( onError==OE_Replace ){ + openAll = 1; + }else{ + openAll = 0; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->onError==OE_Replace ){ + openAll = 1; + break; + } + } + } + for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ + if( openAll || aRegIdx[i]>0 ){ + KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); + sqlite3VdbeAddOp4(v, OP_OpenWrite, iCur+i+1, pIdx->tnum, iDb, + (char*)pKey, P4_KEYINFO_HANDOFF); + assert( pParse->nTab>iCur+i+1 ); + } + } + } + + /* Jump back to this point if a trigger encounters an IGNORE constraint. */ + if( pTrigger ){ + sqlite3VdbeResolveLabel(v, addr); + } - /* The top of the update loop for when there are triggers. - */ - addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, 0); + /* Top of the update loop */ + if( okOnePass ){ + int a1 = sqlite3VdbeAddOp1(v, OP_NotNull, regOldRowid); + addr = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeJumpHere(v, a1); + }else{ + addr = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet, 0, regOldRowid); + } - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - /* Open a cursor and make it point to the record that is - ** being updated. - */ - sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); - } - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); + if( pTrigger ){ + int regRowid; + int regRow; + int regCols; + + /* Make cursor iCur point to the record that is being updated. + */ + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addr, regOldRowid); /* Generate the OLD table */ - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_RowData, iCur, 0); - sqlite3VdbeAddOp(v, OP_Insert, oldIdx, 0); + regRowid = sqlite3GetTempReg(pParse); + regRow = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, regRowid); + if( !old_col_mask ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regRow); + }else{ + sqlite3VdbeAddOp2(v, OP_RowData, iCur, regRow); + } + sqlite3VdbeAddOp3(v, OP_Insert, oldIdx, regRow, regRowid); /* Generate the NEW table */ if( chngRowid ){ - sqlite3ExprCodeAndCache(pParse, pRowidExpr); + sqlite3ExprCodeAndCache(pParse, pRowidExpr, regRowid); + sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); }else{ - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, regRowid); } + regCols = sqlite3GetTempRange(pParse, pTab->nCol); for(i=0; inCol; i++){ if( i==pTab->iPKey ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, regCols+i); continue; } j = aXRef[i]; - if( j<0 ){ - sqlite3VdbeAddOp(v, OP_Column, iCur, i); - sqlite3ColumnDefault(v, pTab, i); + if( (i<32 && (new_col_mask&((u32)1<a[j].pExpr, regCols+i); + } }else{ - sqlite3ExprCodeAndCache(pParse, pChanges->a[j].pExpr); + sqlite3VdbeAddOp2(v, OP_Null, 0, regCols+i); } } - sqlite3VdbeAddOp(v, OP_MakeRecord, pTab->nCol, 0); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regCols, pTab->nCol, regRow); if( !isView ){ sqlite3TableAffinityStr(v, pTab); + sqlite3ExprCacheAffinityChange(pParse, regCols, pTab->nCol); } - if( pParse->nErr ) goto update_cleanup; - sqlite3VdbeAddOp(v, OP_Insert, newIdx, 0); - if( !isView ){ - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); - } + sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol); + /* if( pParse->nErr ) goto update_cleanup; */ + sqlite3VdbeAddOp3(v, OP_Insert, newIdx, regRow, regRowid); + sqlite3ReleaseTempReg(pParse, regRowid); + sqlite3ReleaseTempReg(pParse, regRow); - /* Fire the BEFORE and INSTEAD OF triggers - */ - if( sqlite3CodeRowTrigger(pParse, TK_UPDATE, pChanges, TRIGGER_BEFORE, pTab, - newIdx, oldIdx, onError, addr) ){ - goto update_cleanup; - } + sqlite3VdbeAddOp2(v, OP_Goto, 0, iBeginBeforeTrigger); + sqlite3VdbeJumpHere(v, iEndBeforeTrigger); } - if( !isView && !IsVirtual(pTab) ){ - /* - ** Open every index that needs updating. Note that if any - ** index could potentially invoke a REPLACE conflict resolution - ** action, then we need to open all indices because we might need - ** to be deleting some records. - */ - sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenWrite); - if( onError==OE_Replace ){ - openAll = 1; - }else{ - openAll = 0; - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->onError==OE_Replace ){ - openAll = 1; - break; - } - } - } - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - if( openAll || aIdxUsed[i] ){ - KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIdx); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - sqlite3VdbeOp3(v, OP_OpenWrite, iCur+i+1, pIdx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - assert( pParse->nTab>iCur+i+1 ); - } - } - + if( !isView ){ /* Loop over every record that needs updating. We have to load ** the old data for each record to be updated because some columns ** might not change and we will need to copy the old value. ** Also, the old data is needed to delete the old index entries. ** So make the cursor point at the old record. */ - if( !triggers_exist ){ - addr = sqlite3VdbeAddOp(v, OP_FifoRead, 0, 0); - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - } - sqlite3VdbeAddOp(v, OP_NotExists, iCur, addr); + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addr, regOldRowid); /* If the record number will change, push the record number as it ** will be after the update. (The old record number is currently ** on top of the stack.) */ if( chngRowid ){ - sqlite3ExprCode(pParse, pRowidExpr); - sqlite3VdbeAddOp(v, OP_MustBeInt, 0, 0); + sqlite3ExprCode(pParse, pRowidExpr, regNewRowid); + sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); } /* Compute new data for this record. */ for(i=0; inCol; i++){ if( i==pTab->iPKey ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + sqlite3VdbeAddOp2(v, OP_Null, 0, regData+i); continue; } j = aXRef[i]; if( j<0 ){ - sqlite3VdbeAddOp(v, OP_Column, iCur, i); + sqlite3VdbeAddOp3(v, OP_Column, iCur, i, regData+i); sqlite3ColumnDefault(v, pTab, i); }else{ - sqlite3ExprCode(pParse, pChanges->a[j].pExpr); + sqlite3ExprCode(pParse, pChanges->a[j].pExpr, regData+i); } } /* Do constraint checks */ - sqlite3GenerateConstraintChecks(pParse, pTab, iCur, aIdxUsed, chngRowid, 1, - onError, addr); + sqlite3GenerateConstraintChecks(pParse, pTab, iCur, regNewRowid, + aRegIdx, chngRowid, 1, + onError, addr, 0); /* Delete the old indices for the current record. */ - sqlite3GenerateRowIndexDelete(v, pTab, iCur, aIdxUsed); + j1 = sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regOldRowid); + sqlite3GenerateRowIndexDelete(pParse, pTab, iCur, aRegIdx); /* If changing the record number, delete the old record. */ if( chngRowid ){ - sqlite3VdbeAddOp(v, OP_Delete, iCur, 0); + sqlite3VdbeAddOp2(v, OP_Delete, iCur, 0); } + sqlite3VdbeJumpHere(v, j1); /* Create the new index entries and the new record. */ - sqlite3CompleteInsertion(pParse, pTab, iCur, aIdxUsed, chngRowid, 1, -1, 0); + sqlite3CompleteInsertion(pParse, pTab, iCur, regNewRowid, + aRegIdx, 1, -1, 0, 0); } /* Increment the row counter */ if( db->flags & SQLITE_CountRows && !pParse->trigStack){ - sqlite3VdbeAddOp(v, OP_MemIncr, 1, memCnt); + sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } /* If there are triggers, close all the cursors after each iteration ** through the loop. The fire the after triggers. */ - if( triggers_exist ){ - if( !isView ){ - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - if( openAll || aIdxUsed[i] ) - sqlite3VdbeAddOp(v, OP_Close, iCur+i+1, 0); - } - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); - } - if( sqlite3CodeRowTrigger(pParse, TK_UPDATE, pChanges, TRIGGER_AFTER, pTab, - newIdx, oldIdx, onError, addr) ){ - goto update_cleanup; - } + if( pTrigger ){ + sqlite3VdbeAddOp2(v, OP_Goto, 0, iBeginAfterTrigger); + sqlite3VdbeJumpHere(v, iEndAfterTrigger); } /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated. */ - sqlite3VdbeAddOp(v, OP_Goto, 0, addr); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addr); sqlite3VdbeJumpHere(v, addr); - /* Close all tables if there were no FOR EACH ROW triggers */ - if( !triggers_exist ){ - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - if( openAll || aIdxUsed[i] ){ - sqlite3VdbeAddOp(v, OP_Close, iCur+i+1, 0); - } + /* Close all tables */ + for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ + if( openAll || aRegIdx[i]>0 ){ + sqlite3VdbeAddOp2(v, OP_Close, iCur+i+1, 0); } - sqlite3VdbeAddOp(v, OP_Close, iCur, 0); - }else{ - sqlite3VdbeAddOp(v, OP_Close, newIdx, 0); - sqlite3VdbeAddOp(v, OP_Close, oldIdx, 0); + } + sqlite3VdbeAddOp2(v, OP_Close, iCur, 0); + if( pTrigger ){ + sqlite3VdbeAddOp2(v, OP_Close, newIdx, 0); + sqlite3VdbeAddOp2(v, OP_Close, oldIdx, 0); + } + + /* Update the sqlite_sequence table by storing the content of the + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. + */ + if( pParse->nested==0 && pParse->trigStack==0 ){ + sqlite3AutoincrementEnd(pParse); } /* @@ -517,19 +578,18 @@ ** invoke the callback function. */ if( db->flags & SQLITE_CountRows && !pParse->trigStack && pParse->nested==0 ){ - sqlite3VdbeAddOp(v, OP_MemLoad, memCnt, 0); - sqlite3VdbeAddOp(v, OP_Callback, 1, 0); + sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", P3_STATIC); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); } update_cleanup: sqlite3AuthContextPop(&sContext); - sqliteFree(apIdx); - sqliteFree(aXRef); - sqlite3SrcListDelete(pTabList); - sqlite3ExprListDelete(pChanges); - sqlite3ExprDelete(pWhere); + sqlite3DbFree(db, aRegIdx); + sqlite3DbFree(db, aXRef); + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprListDelete(db, pChanges); + sqlite3ExprDelete(db, pWhere); return; } @@ -569,59 +629,64 @@ int ephemTab; /* Table holding the result of the SELECT */ int i; /* Loop counter */ int addr; /* Address of top of loop */ + int iReg; /* First register in set passed to OP_VUpdate */ + sqlite3 *db = pParse->db; /* Database connection */ + const char *pVtab = (const char*)pTab->pVtab; + SelectDest dest; /* Construct the SELECT statement that will find the new values for ** all updated rows. */ - pEList = sqlite3ExprListAppend(0, sqlite3CreateIdExpr("_rowid_"), 0); + pEList = sqlite3ExprListAppend(pParse, 0, + sqlite3CreateIdExpr(pParse, "_rowid_")); if( pRowid ){ - pEList = sqlite3ExprListAppend(pEList, sqlite3ExprDup(pRowid), 0); + pEList = sqlite3ExprListAppend(pParse, pEList, + sqlite3ExprDup(db, pRowid, 0)); } assert( pTab->iPKey<0 ); for(i=0; inCol; i++){ if( aXRef[i]>=0 ){ - pExpr = sqlite3ExprDup(pChanges->a[aXRef[i]].pExpr); + pExpr = sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0); }else{ - pExpr = sqlite3CreateIdExpr(pTab->aCol[i].zName); + pExpr = sqlite3CreateIdExpr(pParse, pTab->aCol[i].zName); } - pEList = sqlite3ExprListAppend(pEList, pExpr, 0); + pEList = sqlite3ExprListAppend(pParse, pEList, pExpr); } - pSelect = sqlite3SelectNew(pEList, pSrc, pWhere, 0, 0, 0, 0, 0, 0); + pSelect = sqlite3SelectNew(pParse, pEList, pSrc, pWhere, 0, 0, 0, 0, 0, 0); /* Create the ephemeral table into which the update results will ** be stored. */ assert( v ); ephemTab = pParse->nTab++; - sqlite3VdbeAddOp(v, OP_OpenEphemeral, ephemTab, pTab->nCol+1+(pRowid!=0)); + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, pTab->nCol+1+(pRowid!=0)); /* fill the ephemeral table */ - sqlite3Select(pParse, pSelect, SRT_Table, ephemTab, 0, 0, 0, 0); + sqlite3SelectDestInit(&dest, SRT_Table, ephemTab); + sqlite3Select(pParse, pSelect, &dest); - /* - ** Generate code to scan the ephemeral table and call VDelete and - ** VInsert - */ - sqlite3VdbeAddOp(v, OP_Rewind, ephemTab, 0); - addr = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_Column, ephemTab, 0); - if( pRowid ){ - sqlite3VdbeAddOp(v, OP_Column, ephemTab, 1); - }else{ - sqlite3VdbeAddOp(v, OP_Dup, 0, 0); - } + /* Generate code to scan the ephemeral table and call VUpdate. */ + iReg = ++pParse->nMem; + pParse->nMem += pTab->nCol+1; + addr = sqlite3VdbeAddOp2(v, OP_Rewind, ephemTab, 0); + sqlite3VdbeAddOp3(v, OP_Column, ephemTab, 0, iReg); + sqlite3VdbeAddOp3(v, OP_Column, ephemTab, (pRowid?1:0), iReg+1); for(i=0; inCol; i++){ - sqlite3VdbeAddOp(v, OP_Column, ephemTab, i+1+(pRowid!=0)); + sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i+1+(pRowid!=0), iReg+2+i); } - pParse->pVirtualLock = pTab; - sqlite3VdbeOp3(v, OP_VUpdate, 0, pTab->nCol+2, - (const char*)pTab->pVtab, P3_VTAB); - sqlite3VdbeAddOp(v, OP_Next, ephemTab, addr); - sqlite3VdbeJumpHere(v, addr-1); - sqlite3VdbeAddOp(v, OP_Close, ephemTab, 0); + sqlite3VtabMakeWritable(pParse, pTab); + sqlite3VdbeAddOp4(v, OP_VUpdate, 0, pTab->nCol+2, iReg, pVtab, P4_VTAB); + sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1); + sqlite3VdbeJumpHere(v, addr); + sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0); /* Cleanup */ - sqlite3SelectDelete(pSelect); + sqlite3SelectDelete(db, pSelect); } #endif /* SQLITE_OMIT_VIRTUALTABLE */ + +/* Make sure "isView" gets undefined in case this file becomes part of +** the amalgamation - so that subsequent files do not see isView as a +** macro. */ +#undef isView diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/utf.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/utf.c --- sqlite3-3.4.2/src/utf.c 2007-08-07 18:04:59.000000000 +0100 +++ sqlite3-3.6.16/src/utf.c 2009-06-25 12:22:33.000000000 +0100 @@ -12,7 +12,7 @@ ** This file contains routines used to translate between UTF-8, ** UTF-16, UTF-16BE, and UTF-16LE. ** -** $Id: utf.c,v 1.53 2007/08/07 17:04:59 drh Exp $ +** $Id: utf.c,v 1.73 2009/04/01 18:40:32 drh Exp $ ** ** Notes on UTF-8: ** @@ -39,17 +39,19 @@ #include #include "vdbeInt.h" +#ifndef SQLITE_AMALGAMATION /* ** The following constant value is used by the SQLITE_BIGENDIAN and ** SQLITE_LITTLEENDIAN macros. */ const int sqlite3one = 1; +#endif /* SQLITE_AMALGAMATION */ /* ** This lookup table is used to help decode the first byte of ** a multi-byte UTF8 character. */ -static const unsigned char sqlite3UtfTrans1[] = { +static const unsigned char sqlite3Utf8Trans1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, @@ -63,67 +65,65 @@ #define WRITE_UTF8(zOut, c) { \ if( c<0x00080 ){ \ - *zOut++ = (c&0xFF); \ + *zOut++ = (u8)(c&0xFF); \ } \ else if( c<0x00800 ){ \ - *zOut++ = 0xC0 + ((c>>6)&0x1F); \ - *zOut++ = 0x80 + (c & 0x3F); \ + *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ else if( c<0x10000 ){ \ - *zOut++ = 0xE0 + ((c>>12)&0x0F); \ - *zOut++ = 0x80 + ((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (c & 0x3F); \ + *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ }else{ \ - *zOut++ = 0xF0 + ((c>>18) & 0x07); \ - *zOut++ = 0x80 + ((c>>12) & 0x3F); \ - *zOut++ = 0x80 + ((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (c & 0x3F); \ + *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ + *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ } -#define WRITE_UTF16LE(zOut, c) { \ - if( c<=0xFFFF ){ \ - *zOut++ = (c&0x00FF); \ - *zOut++ = ((c>>8)&0x00FF); \ - }else{ \ - *zOut++ = (((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ - *zOut++ = (0x00D8 + (((c-0x10000)>>18)&0x03)); \ - *zOut++ = (c&0x00FF); \ - *zOut++ = (0x00DC + ((c>>8)&0x03)); \ - } \ -} - -#define WRITE_UTF16BE(zOut, c) { \ - if( c<=0xFFFF ){ \ - *zOut++ = ((c>>8)&0x00FF); \ - *zOut++ = (c&0x00FF); \ - }else{ \ - *zOut++ = (0x00D8 + (((c-0x10000)>>18)&0x03)); \ - *zOut++ = (((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ - *zOut++ = (0x00DC + ((c>>8)&0x03)); \ - *zOut++ = (c&0x00FF); \ - } \ +#define WRITE_UTF16LE(zOut, c) { \ + if( c<=0xFFFF ){ \ + *zOut++ = (u8)(c&0x00FF); \ + *zOut++ = (u8)((c>>8)&0x00FF); \ + }else{ \ + *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ + *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ + *zOut++ = (u8)(c&0x00FF); \ + *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ + } \ +} + +#define WRITE_UTF16BE(zOut, c) { \ + if( c<=0xFFFF ){ \ + *zOut++ = (u8)((c>>8)&0x00FF); \ + *zOut++ = (u8)(c&0x00FF); \ + }else{ \ + *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ + *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ + *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ + *zOut++ = (u8)(c&0x00FF); \ + } \ } #define READ_UTF16LE(zIn, c){ \ c = (*zIn++); \ c += ((*zIn++)<<8); \ - if( c>=0xD800 && c<0xE000 ){ \ + if( c>=0xD800 && c<0xE000 ){ \ int c2 = (*zIn++); \ c2 += ((*zIn++)<<8); \ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ - if( (c & 0xFFFF0000)==0 ) c = 0xFFFD; \ } \ } #define READ_UTF16BE(zIn, c){ \ c = ((*zIn++)<<8); \ c += (*zIn++); \ - if( c>=0xD800 && c<0xE000 ){ \ + if( c>=0xD800 && c<0xE000 ){ \ int c2 = ((*zIn++)<<8); \ c2 += (*zIn++); \ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ - if( (c & 0xFFFF0000)==0 ) c = 0xFFFD; \ } \ } @@ -154,27 +154,43 @@ ** for unicode values 0x80 and greater. It do not change over-length ** encodings to 0xfffd as some systems recommend. */ +#define READ_UTF8(zIn, zTerm, c) \ + c = *(zIn++); \ + if( c>=0xc0 ){ \ + c = sqlite3Utf8Trans1[c-0xc0]; \ + while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + c = (c<<6) + (0x3f & *(zIn++)); \ + } \ + if( c<0x80 \ + || (c&0xFFFFF800)==0xD800 \ + || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ + } int sqlite3Utf8Read( - const unsigned char *z, /* First byte of UTF-8 character */ - const unsigned char *zTerm, /* Pretend this byte is 0x00 */ + const unsigned char *zIn, /* First byte of UTF-8 character */ const unsigned char **pzNext /* Write first byte past UTF-8 char here */ ){ - int c = *(z++); + int c; + + /* Same as READ_UTF8() above but without the zTerm parameter. + ** For this routine, we assume the UTF8 string is always zero-terminated. + */ + c = *(zIn++); if( c>=0xc0 ){ - c = sqlite3UtfTrans1[c-0xc0]; - while( z!=zTerm && (*z & 0xc0)==0x80 ){ - c = (c<<6) + (0x3f & *(z++)); + c = sqlite3Utf8Trans1[c-0xc0]; + while( (*zIn & 0xc0)==0x80 ){ + c = (c<<6) + (0x3f & *(zIn++)); } if( c<0x80 || (c&0xFFFFF800)==0xD800 || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } } - *pzNext = z; + *pzNext = zIn; return c; } + /* ** If the TRANSLATE_TRACE macro is defined, the value of each Mem is ** printed on stderr on the way into and out of sqlite3VdbeMemTranslate(). @@ -188,7 +204,6 @@ ** encoding, or if *pMem does not contain a string value. */ int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){ - unsigned char zShort[NBFS]; /* Temporary short output buffer */ int len; /* Maximum length of output string in bytes */ unsigned char *zOut; /* Output buffer */ unsigned char *zIn; /* Input iterator */ @@ -196,6 +211,7 @@ unsigned char *z; /* Output iterator */ unsigned int c; + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( pMem->flags&MEM_Str ); assert( pMem->enc!=desiredEnc ); assert( pMem->enc!=0 ); @@ -222,7 +238,7 @@ return SQLITE_NOMEM; } zIn = (u8*)pMem->z; - zTerm = &zIn[pMem->n]; + zTerm = &zIn[pMem->n&~1]; while( zInn &= ~1; len = pMem->n * 2 + 1; }else{ /* When converting from UTF-8 to UTF-16 the maximum growth is caused @@ -253,17 +270,14 @@ /* Set zIn to point at the start of the input buffer and zTerm to point 1 ** byte past the end. ** - ** Variable zOut is set to point at the output buffer. This may be space - ** obtained from malloc(), or Mem.zShort, if it large enough and not in - ** use, or the zShort array on the stack (see above). + ** Variable zOut is set to point at the output buffer, space obtained + ** from sqlite3_malloc(). */ zIn = (u8*)pMem->z; zTerm = &zIn[pMem->n]; - if( len>NBFS ){ - zOut = sqliteMallocRaw(len); - if( !zOut ) return SQLITE_NOMEM; - }else{ - zOut = zShort; + zOut = sqlite3DbMallocRaw(pMem->db, len); + if( !zOut ){ + return SQLITE_NOMEM; } z = zOut; @@ -271,18 +285,20 @@ if( desiredEnc==SQLITE_UTF16LE ){ /* UTF-8 -> UTF-16 Little-endian */ while( zIn UTF-16 Big-endian */ while( zInn = z - zOut; + pMem->n = (int)(z - zOut); *z++ = 0; }else{ assert( desiredEnc==SQLITE_UTF8 ); @@ -293,28 +309,23 @@ WRITE_UTF8(z, c); } }else{ - /* UTF-16 Little-endian -> UTF-8 */ + /* UTF-16 Big-endian -> UTF-8 */ while( zInn = z - zOut; + pMem->n = (int)(z - zOut); } *z = 0; assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len ); sqlite3VdbeMemRelease(pMem); - pMem->flags &= ~(MEM_Static|MEM_Dyn|MEM_Ephem|MEM_Short); + pMem->flags &= ~(MEM_Static|MEM_Dyn|MEM_Ephem); pMem->enc = desiredEnc; - if( zOut==zShort ){ - memcpy(pMem->zShort, zOut, len); - zOut = (u8*)pMem->zShort; - pMem->flags |= (MEM_Term|MEM_Short); - }else{ - pMem->flags |= (MEM_Term|MEM_Dyn); - } + pMem->flags |= (MEM_Term|MEM_Dyn); pMem->z = (char*)zOut; + pMem->zMalloc = pMem->z; translate_out: #if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) @@ -340,7 +351,8 @@ int rc = SQLITE_OK; u8 bom = 0; - if( pMem->n<0 || pMem->n>1 ){ + assert( pMem->n>=0 ); + if( pMem->n>1 ){ u8 b1 = *(u8 *)pMem->z; u8 b2 = *(((u8 *)pMem->z) + 1); if( b1==0xFE && b2==0xFF ){ @@ -352,23 +364,14 @@ } if( bom ){ - /* This function is called as soon as a string is stored in a Mem*, - ** from within sqlite3VdbeMemSetStr(). At that point it is not possible - ** for the string to be stored in Mem.zShort, or for it to be stored - ** in dynamic memory with no destructor. - */ - assert( !(pMem->flags&MEM_Short) ); - assert( !(pMem->flags&MEM_Dyn) || pMem->xDel ); - if( pMem->flags & MEM_Dyn ){ - void (*xDel)(void*) = pMem->xDel; - char *z = pMem->z; - pMem->z = 0; - pMem->xDel = 0; - rc = sqlite3VdbeMemSetStr(pMem, &z[2], pMem->n-2, bom, SQLITE_TRANSIENT); - xDel(z); - }else{ - rc = sqlite3VdbeMemSetStr(pMem, &pMem->z[2], pMem->n-2, bom, - SQLITE_TRANSIENT); + rc = sqlite3VdbeMemMakeWriteable(pMem); + if( rc==SQLITE_OK ){ + pMem->n -= 2; + memmove(pMem->z, &pMem->z[2], pMem->n); + pMem->z[pMem->n] = '\0'; + pMem->z[pMem->n+1] = '\0'; + pMem->flags |= MEM_Term; + pMem->enc = bom; } } return rc; @@ -399,34 +402,66 @@ return r; } +/* This test function is not currently used by the automated test-suite. +** Hence it is only available in debug builds. +*/ +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +/* +** Translate UTF-8 to UTF-8. +** +** This has the effect of making sure that the string is well-formed +** UTF-8. Miscoded characters are removed. +** +** The translation is done in-place (since it is impossible for the +** correct UTF-8 encoding to be longer than a malformed encoding). +*/ +int sqlite3Utf8To8(unsigned char *zIn){ + unsigned char *zOut = zIn; + unsigned char *zStart = zIn; + u32 c; + + while( zIn[0] ){ + c = sqlite3Utf8Read(zIn, (const u8**)&zIn); + if( c!=0xfffd ){ + WRITE_UTF8(zOut, c); + } + } + *zOut = 0; + return (int)(zOut - zStart); +} +#endif + #ifndef SQLITE_OMIT_UTF16 /* ** Convert a UTF-16 string in the native encoding into a UTF-8 string. -** Memory to hold the UTF-8 string is obtained from malloc and must be -** freed by the calling function. +** Memory to hold the UTF-8 string is obtained from sqlite3_malloc and must +** be freed by the calling function. ** ** NULL is returned if there is an allocation error. */ -char *sqlite3Utf16to8(const void *z, int nByte){ +char *sqlite3Utf16to8(sqlite3 *db, const void *z, int nByte){ Mem m; memset(&m, 0, sizeof(m)); + m.db = db; sqlite3VdbeMemSetStr(&m, z, nByte, SQLITE_UTF16NATIVE, SQLITE_STATIC); sqlite3VdbeChangeEncoding(&m, SQLITE_UTF8); - assert( (m.flags & MEM_Term)!=0 || sqlite3MallocFailed() ); - assert( (m.flags & MEM_Str)!=0 || sqlite3MallocFailed() ); - return (m.flags & MEM_Dyn)!=0 ? m.z : sqliteStrDup(m.z); + if( db->mallocFailed ){ + sqlite3VdbeMemRelease(&m); + m.z = 0; + } + assert( (m.flags & MEM_Term)!=0 || db->mallocFailed ); + assert( (m.flags & MEM_Str)!=0 || db->mallocFailed ); + return (m.flags & MEM_Dyn)!=0 ? m.z : sqlite3DbStrDup(db, m.z); } /* -** pZ is a UTF-16 encoded unicode string. If nChar is less than zero, -** return the number of bytes up to (but not including), the first pair -** of consecutive 0x00 bytes in pZ. If nChar is not less than zero, -** then return the number of bytes in the first nChar unicode characters -** in pZ (or up until the first pair of 0x00 bytes, whichever comes first). +** pZ is a UTF-16 encoded unicode string at least nChar characters long. +** Return the number of bytes in the first nChar unicode characters +** in pZ. nChar must be non-negative. */ int sqlite3Utf16ByteLen(const void *zIn, int nChar){ - unsigned int c = 1; - char const *z = zIn; + int c; + unsigned char const *z = zIn; int n = 0; if( SQLITE_UTF16NATIVE==SQLITE_UTF16BE ){ /* Using an "if (SQLITE_UTF16NATIVE==SQLITE_UTF16BE)" construct here @@ -438,45 +473,18 @@ ** which branch will be followed. It is therefore assumed that no runtime ** penalty is paid for this "if" statement. */ - while( c && ((nChar<0) || n0 && n<=4 ); z[0] = 0; - zTerm = z; z = zBuf; - c = sqlite3Utf8Read(z, zTerm, (const u8**)&z); + c = sqlite3Utf8Read(z, (const u8**)&z); t = i; if( i>=0xD800 && i<=0xDFFF ) t = 0xFFFD; if( (i&0xFFFFFFFE)==0xFFFE ) t = 0xFFFD; @@ -510,7 +517,8 @@ if( i>=0xD800 && i<0xE000 ) continue; z = zBuf; WRITE_UTF16LE(z, i); - n = z-zBuf; + n = (int)(z-zBuf); + assert( n>0 && n<=4 ); z[0] = 0; z = zBuf; READ_UTF16LE(z, c); @@ -521,7 +529,8 @@ if( i>=0xD800 && i<0xE000 ) continue; z = zBuf; WRITE_UTF16BE(z, i); - n = z-zBuf; + n = (int)(z-zBuf); + assert( n>0 && n<=4 ); z[0] = 0; z = zBuf; READ_UTF16BE(z, c); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/util.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/util.c --- sqlite3-3.4.2/src/util.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/util.c 2009-06-25 12:45:58.000000000 +0100 @@ -14,13 +14,83 @@ ** This file contains functions for allocating memory, comparing ** strings, and stuff like that. ** -** $Id: util.c,v 1.207 2007/06/26 00:37:28 drh Exp $ +** $Id: util.c,v 1.261 2009/06/24 10:26:33 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" #include -#include +#ifdef SQLITE_HAVE_ISNAN +# include +#endif + +/* +** Routine needed to support the testcase() macro. +*/ +#ifdef SQLITE_COVERAGE_TEST +void sqlite3Coverage(int x){ + static int dummy = 0; + dummy += x; +} +#endif + +/* +** Return true if the floating point value is Not a Number (NaN). +** +** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN. +** Otherwise, we have our own implementation that works on most systems. +*/ +int sqlite3IsNaN(double x){ + int rc; /* The value return */ +#if !defined(SQLITE_HAVE_ISNAN) + /* + ** Systems that support the isnan() library function should probably + ** make use of it by compiling with -DSQLITE_HAVE_ISNAN. But we have + ** found that many systems do not have a working isnan() function so + ** this implementation is provided as an alternative. + ** + ** This NaN test sometimes fails if compiled on GCC with -ffast-math. + ** On the other hand, the use of -ffast-math comes with the following + ** warning: + ** + ** This option [-ffast-math] should never be turned on by any + ** -O option since it can result in incorrect output for programs + ** which depend on an exact implementation of IEEE or ISO + ** rules/specifications for math functions. + ** + ** Under MSVC, this NaN test may fail if compiled with a floating- + ** point precision mode other than /fp:precise. From the MSDN + ** documentation: + ** + ** The compiler [with /fp:precise] will properly handle comparisons + ** involving NaN. For example, x != x evaluates to true if x is NaN + ** ... + */ +#ifdef __FAST_MATH__ +# error SQLite will not work correctly with the -ffast-math option of GCC. +#endif + volatile double y = x; + volatile double z = y; + rc = (y!=z); +#else /* if defined(SQLITE_HAVE_ISNAN) */ + rc = isnan(x); +#endif /* SQLITE_HAVE_ISNAN */ + testcase( rc ); + return rc; +} +/* +** Compute a string length that is limited to what can be stored in +** lower 30 bits of a 32-bit signed integer. +** +** The value returned will never be negative. Nor will it ever be greater +** than the actual length of the string. For very long strings (greater +** than 1GiB) the value returned might be less than the true string length. +*/ +int sqlite3Strlen30(const char *z){ + const char *z2 = z; + if( z==0 ) return 0; + while( *z2 ){ z2++; } + return 0x3fffffff & (int)(z2 - z); +} /* ** Set the most recent error code and error string for the sqlite @@ -44,15 +114,15 @@ ** to NULL. */ void sqlite3Error(sqlite3 *db, int err_code, const char *zFormat, ...){ - if( db && (db->pErr || (db->pErr = sqlite3ValueNew())!=0) ){ + if( db && (db->pErr || (db->pErr = sqlite3ValueNew(db))!=0) ){ db->errCode = err_code; if( zFormat ){ char *z; va_list ap; va_start(ap, zFormat); - z = sqlite3VMPrintf(zFormat, ap); + z = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); - sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, sqlite3FreeX); + sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC); }else{ sqlite3ValueSetStr(db->pErr, 0, 0, SQLITE_UTF8, SQLITE_STATIC); } @@ -78,21 +148,20 @@ */ void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ va_list ap; + sqlite3 *db = pParse->db; pParse->nErr++; - sqliteFree(pParse->zErrMsg); + sqlite3DbFree(db, pParse->zErrMsg); va_start(ap, zFormat); - pParse->zErrMsg = sqlite3VMPrintf(zFormat, ap); + pParse->zErrMsg = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); - if( pParse->rc==SQLITE_OK ){ - pParse->rc = SQLITE_ERROR; - } + pParse->rc = SQLITE_ERROR; } /* ** Clear the error message in pParse, if any */ void sqlite3ErrorClear(Parse *pParse){ - sqliteFree(pParse->zErrMsg); + sqlite3DbFree(pParse->db, pParse->zErrMsg); pParse->zErrMsg = 0; pParse->nErr = 0; } @@ -103,77 +172,46 @@ ** input does not begin with a quote character, then this routine ** is a no-op. ** +** The input string must be zero-terminated. A new zero-terminator +** is added to the dequoted string. +** +** The return value is -1 if no dequoting occurs or the length of the +** dequoted string, exclusive of the zero terminator, if dequoting does +** occur. +** ** 2002-Feb-14: This routine is extended to remove MS-Access style ** brackets from around identifers. For example: "[a-b-c]" becomes ** "a-b-c". */ -void sqlite3Dequote(char *z){ - int quote; +int sqlite3Dequote(char *z){ + char quote; int i, j; - if( z==0 ) return; + if( z==0 ) return -1; quote = z[0]; switch( quote ){ case '\'': break; case '"': break; case '`': break; /* For MySQL compatibility */ case '[': quote = ']'; break; /* For MS SqlServer compatibility */ - default: return; + default: return -1; } - for(i=1, j=0; z[i]; i++){ + for(i=1, j=0; ALWAYS(z[i]); i++){ if( z[i]==quote ){ if( z[i+1]==quote ){ z[j++] = quote; i++; }else{ - z[j++] = 0; break; } }else{ z[j++] = z[i]; } } + z[j] = 0; + return j; } -/* An array to map all upper-case characters into their corresponding -** lower-case character. -*/ -const unsigned char sqlite3UpperToLower[] = { -#ifdef SQLITE_ASCII - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103, - 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, - 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107, - 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, - 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, - 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, - 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, - 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, - 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, - 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, - 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, - 252,253,254,255 -#endif -#ifdef SQLITE_EBCDIC - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */ - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */ - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */ - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */ - 96, 97, 66, 67, 68, 69, 70, 71, 72, 73,106,107,108,109,110,111, /* 6x */ - 112, 81, 82, 83, 84, 85, 86, 87, 88, 89,122,123,124,125,126,127, /* 7x */ - 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */ - 144,145,146,147,148,149,150,151,152,153,154,155,156,157,156,159, /* 9x */ - 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */ - 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */ - 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */ - 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */ - 224,225,162,163,164,165,166,167,168,169,232,203,204,205,206,207, /* Ex */ - 239,240,241,242,243,244,245,246,247,248,249,219,220,221,222,255, /* Fx */ -#endif -}; +/* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* @@ -196,10 +234,15 @@ } /* -** Return TRUE if z is a pure numeric string. Return FALSE if the -** string contains any character which is not part of a number. If -** the string is numeric and contains the '.' character, set *realnum -** to TRUE (otherwise FALSE). +** Return TRUE if z is a pure numeric string. Return FALSE and leave +** *realnum unchanged if the string contains any character which is not +** part of a number. +** +** If the string is pure numeric, set *realnum to TRUE if the string +** contains the '.' character or an "E+000" style exponentiation suffix. +** Otherwise set *realnum to FALSE. Note that just becaue *realnum is +** false does not mean that the number can be successfully converted into +** an integer - it might be too big. ** ** An empty string is considered non-numeric. */ @@ -207,24 +250,24 @@ int incr = (enc==SQLITE_UTF8?1:2); if( enc==SQLITE_UTF16BE ) z++; if( *z=='-' || *z=='+' ) z += incr; - if( !isdigit(*(u8*)z) ){ + if( !sqlite3Isdigit(*z) ){ return 0; } z += incr; - if( realnum ) *realnum = 0; - while( isdigit(*(u8*)z) ){ z += incr; } + *realnum = 0; + while( sqlite3Isdigit(*z) ){ z += incr; } if( *z=='.' ){ z += incr; - if( !isdigit(*(u8*)z) ) return 0; - while( isdigit(*(u8*)z) ){ z += incr; } - if( realnum ) *realnum = 1; + if( !sqlite3Isdigit(*z) ) return 0; + while( sqlite3Isdigit(*z) ){ z += incr; } + *realnum = 1; } if( *z=='e' || *z=='E' ){ z += incr; if( *z=='+' || *z=='-' ) z += incr; - if( !isdigit(*(u8*)z) ) return 0; - while( isdigit(*(u8*)z) ){ z += incr; } - if( realnum ) *realnum = 1; + if( !sqlite3Isdigit(*z) ) return 0; + while( sqlite3Isdigit(*z) ){ z += incr; } + *realnum = 1; } return *z==0; } @@ -246,23 +289,37 @@ int sign = 1; const char *zBegin = z; LONGDOUBLE_TYPE v1 = 0.0; - while( isspace(*(u8*)z) ) z++; + int nSignificant = 0; + while( sqlite3Isspace(*z) ) z++; if( *z=='-' ){ sign = -1; z++; }else if( *z=='+' ){ z++; } - while( isdigit(*(u8*)z) ){ + while( z[0]=='0' ){ + z++; + } + while( sqlite3Isdigit(*z) ){ v1 = v1*10.0 + (*z - '0'); z++; + nSignificant++; } if( *z=='.' ){ LONGDOUBLE_TYPE divisor = 1.0; z++; - while( isdigit(*(u8*)z) ){ - v1 = v1*10.0 + (*z - '0'); - divisor *= 10.0; + if( nSignificant==0 ){ + while( z[0]=='0' ){ + divisor *= 10.0; + z++; + } + } + while( sqlite3Isdigit(*z) ){ + if( nSignificant<18 ){ + v1 = v1*10.0 + (*z - '0'); + divisor *= 10.0; + nSignificant++; + } z++; } v1 /= divisor; @@ -278,7 +335,7 @@ }else if( *z=='+' ){ z++; } - while( isdigit(*(u8*)z) ){ + while( sqlite3Isdigit(*z) ){ eval = eval*10 + *z - '0'; z++; } @@ -292,8 +349,8 @@ v1 *= scale; } } - *pResult = sign<0 ? -v1 : v1; - return z - zBegin; + *pResult = (double)(sign<0 ? -v1 : v1); + return (int)(z - zBegin); #else return sqlite3Atoi64(z, pResult); #endif /* SQLITE_OMIT_FLOATING_POINT */ @@ -314,7 +371,7 @@ */ static int compare2pow63(const char *zNum){ int c; - c = memcmp(zNum,"922337203685477580",18); + c = memcmp(zNum,"922337203685477580",18)*10; if( c==0 ){ c = zNum[18] - '8'; } @@ -336,7 +393,8 @@ i64 v = 0; int neg; int i, c; - while( isspace(*(u8*)zNum) ) zNum++; + const char *zStart; + while( sqlite3Isspace(*zNum) ) zNum++; if( *zNum=='-' ){ neg = 1; zNum++; @@ -346,12 +404,13 @@ }else{ neg = 0; } + zStart = zNum; while( zNum[0]=='0' ){ zNum++; } /* Skip over leading zeros. Ticket #2454 */ for(i=0; (c=zNum[i])>='0' && c<='9'; i++){ v = v*10 + c - '0'; } *pNum = neg ? -v : v; - if( c!=0 || i==0 || i>19 ){ + if( c!=0 || (i==0 && zStart==zNum) || i>19 ){ /* zNum is empty or contains non-numeric text or is longer ** than 19 digits (thus guaranting that it is too large) */ return 0; @@ -367,29 +426,33 @@ } /* -** The string zNum represents an integer. There might be some other -** information following the integer too, but that part is ignored. -** If the integer that the prefix of zNum represents will fit in a +** The string zNum represents an unsigned integer. The zNum string +** consists of one or more digit characters and is terminated by +** a zero character. Any stray characters in zNum result in undefined +** behavior. +** +** If the unsigned integer that zNum represents will fit in a ** 64-bit signed integer, return TRUE. Otherwise return FALSE. ** -** This routine returns FALSE for the string -9223372036854775808 even that -** that number will, in theory fit in a 64-bit integer. Positive -** 9223373036854775808 will not fit in 64 bits. So it seems safer to return -** false. +** If the negFlag parameter is true, that means that zNum really represents +** a negative number. (The leading "-" is omitted from zNum.) This +** parameter is needed to determine a boundary case. A string +** of "9223373036854775808" returns false if negFlag is false or true +** if negFlag is true. +** +** Leading zeros are ignored. */ -int sqlite3FitsIn64Bits(const char *zNum){ - int i, c; +int sqlite3FitsIn64Bits(const char *zNum, int negFlag){ + int i; int neg = 0; - if( *zNum=='-' ){ - neg = 1; - zNum++; - }else if( *zNum=='+' ){ - zNum++; - } + + assert( zNum[0]>='0' && zNum[0]<='9' ); /* zNum is an unsigned number */ + + if( negFlag ) neg = 1-neg; while( *zNum=='0' ){ zNum++; /* Skip leading zeros. Ticket #2454 */ } - for(i=0; (c=zNum[i])>='0' && c<='9'; i++){} + for(i=0; zNum[i]; i++){ assert( zNum[i]>='0' && zNum[i]<='9' ); } if( i<19 ){ /* Guaranteed to fit if less than 19 digits */ return 1; @@ -421,10 +484,16 @@ zNum++; } while( zNum[0]=='0' ) zNum++; - for(i=0; i<10 && (c = zNum[i] - '0')>=0 && c<=9; i++){ + for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){ v = v*10 + c; } - if( i>9 ){ + + /* The longest decimal representation of a 32 bit integer is 10 digits: + ** + ** 1234567890 + ** 2^31 -> 2147483648 + */ + if( i>10 ){ return 0; } if( v-neg>2147483647 ){ @@ -438,25 +507,6 @@ } /* -** Check to make sure we have a valid db pointer. This test is not -** foolproof but it does provide some measure of protection against -** misuse of the interface such as passing in db pointers that are -** NULL or which have been previously closed. If this routine returns -** TRUE it means that the db pointer is invalid and should not be -** dereferenced for any reason. The calling function should invoke -** SQLITE_MISUSE immediately. -*/ -int sqlite3SafetyCheck(sqlite3 *db){ - int magic; - if( db==0 ) return 1; - magic = db->magic; - if( magic!=SQLITE_MAGIC_CLOSED && - magic!=SQLITE_MAGIC_OPEN && - magic!=SQLITE_MAGIC_BUSY ) return 1; - return 0; -} - -/* ** The variable-length integer encoding is as follows: ** ** KEY: @@ -489,17 +539,17 @@ int i, j, n; u8 buf[10]; if( v & (((u64)0xff000000)<<32) ){ - p[8] = v; + p[8] = (u8)v; v >>= 8; for(i=7; i>=0; i--){ - p[i] = (v & 0x7f) | 0x80; + p[i] = (u8)((v & 0x7f) | 0x80); v >>= 7; } return 9; } n = 0; do{ - buf[n++] = (v & 0x7f) | 0x80; + buf[n++] = (u8)((v & 0x7f) | 0x80); v >>= 7; }while( v!=0 ); buf[0] &= 0x7f; @@ -511,71 +561,315 @@ } /* +** This routine is a faster version of sqlite3PutVarint() that only +** works for 32-bit positive integers and which is optimized for +** the common case of small integers. A MACRO version, putVarint32, +** is provided which inlines the single-byte case. All code should use +** the MACRO version as this function assumes the single-byte case has +** already been handled. +*/ +int sqlite3PutVarint32(unsigned char *p, u32 v){ +#ifndef putVarint32 + if( (v & ~0x7f)==0 ){ + p[0] = v; + return 1; + } +#endif + if( (v & ~0x3fff)==0 ){ + p[0] = (u8)((v>>7) | 0x80); + p[1] = (u8)(v & 0x7f); + return 2; + } + return sqlite3PutVarint(p, v); +} + +/* ** Read a 64-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read. The value is stored in *v. */ -int sqlite3GetVarint(const unsigned char *p, u64 *v){ - u32 x; - u64 x64; - int n; - unsigned char c; - if( ((c = p[0]) & 0x80)==0 ){ - *v = c; +u8 sqlite3GetVarint(const unsigned char *p, u64 *v){ + u32 a,b,s; + + a = *p; + /* a: p0 (unmasked) */ + if (!(a&0x80)) + { + *v = a; return 1; } - x = c & 0x7f; - if( ((c = p[1]) & 0x80)==0 ){ - *v = (x<<7) | c; + + p++; + b = *p; + /* b: p1 (unmasked) */ + if (!(b&0x80)) + { + a &= 0x7f; + a = a<<7; + a |= b; + *v = a; return 2; } - x = (x<<7) | (c&0x7f); - if( ((c = p[2]) & 0x80)==0 ){ - *v = (x<<7) | c; + + p++; + a = a<<14; + a |= *p; + /* a: p0<<14 | p2 (unmasked) */ + if (!(a&0x80)) + { + a &= (0x7f<<14)|(0x7f); + b &= 0x7f; + b = b<<7; + a |= b; + *v = a; return 3; } - x = (x<<7) | (c&0x7f); - if( ((c = p[3]) & 0x80)==0 ){ - *v = (x<<7) | c; + + /* CSE1 from below */ + a &= (0x7f<<14)|(0x7f); + p++; + b = b<<14; + b |= *p; + /* b: p1<<14 | p3 (unmasked) */ + if (!(b&0x80)) + { + b &= (0x7f<<14)|(0x7f); + /* moved CSE1 up */ + /* a &= (0x7f<<14)|(0x7f); */ + a = a<<7; + a |= b; + *v = a; return 4; } - x64 = (x<<7) | (c&0x7f); - n = 4; - do{ - c = p[n++]; - if( n==9 ){ - x64 = (x64<<8) | c; - break; - } - x64 = (x64<<7) | (c&0x7f); - }while( (c & 0x80)!=0 ); - *v = x64; - return n; + + /* a: p0<<14 | p2 (masked) */ + /* b: p1<<14 | p3 (unmasked) */ + /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + /* moved CSE1 up */ + /* a &= (0x7f<<14)|(0x7f); */ + b &= (0x7f<<14)|(0x7f); + s = a; + /* s: p0<<14 | p2 (masked) */ + + p++; + a = a<<14; + a |= *p; + /* a: p0<<28 | p2<<14 | p4 (unmasked) */ + if (!(a&0x80)) + { + /* we can skip these cause they were (effectively) done above in calc'ing s */ + /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ + /* b &= (0x7f<<14)|(0x7f); */ + b = b<<7; + a |= b; + s = s>>18; + *v = ((u64)s)<<32 | a; + return 5; + } + + /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + s = s<<7; + s |= b; + /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + + p++; + b = b<<14; + b |= *p; + /* b: p1<<28 | p3<<14 | p5 (unmasked) */ + if (!(b&0x80)) + { + /* we can skip this cause it was (effectively) done above in calc'ing s */ + /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ + a &= (0x7f<<14)|(0x7f); + a = a<<7; + a |= b; + s = s>>18; + *v = ((u64)s)<<32 | a; + return 6; + } + + p++; + a = a<<14; + a |= *p; + /* a: p2<<28 | p4<<14 | p6 (unmasked) */ + if (!(a&0x80)) + { + a &= (0x1f<<28)|(0x7f<<14)|(0x7f); + b &= (0x7f<<14)|(0x7f); + b = b<<7; + a |= b; + s = s>>11; + *v = ((u64)s)<<32 | a; + return 7; + } + + /* CSE2 from below */ + a &= (0x7f<<14)|(0x7f); + p++; + b = b<<14; + b |= *p; + /* b: p3<<28 | p5<<14 | p7 (unmasked) */ + if (!(b&0x80)) + { + b &= (0x1f<<28)|(0x7f<<14)|(0x7f); + /* moved CSE2 up */ + /* a &= (0x7f<<14)|(0x7f); */ + a = a<<7; + a |= b; + s = s>>4; + *v = ((u64)s)<<32 | a; + return 8; + } + + p++; + a = a<<15; + a |= *p; + /* a: p4<<29 | p6<<15 | p8 (unmasked) */ + + /* moved CSE2 up */ + /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */ + b &= (0x7f<<14)|(0x7f); + b = b<<8; + a |= b; + + s = s<<4; + b = p[-4]; + b &= 0x7f; + b = b>>3; + s |= b; + + *v = ((u64)s)<<32 | a; + + return 9; } /* ** Read a 32-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read. The value is stored in *v. -*/ -int sqlite3GetVarint32(const unsigned char *p, u32 *v){ - u32 x; - int n; - unsigned char c; - if( ((signed char*)p)[0]>=0 ){ - *v = p[0]; +** +** If the varint stored in p[0] is larger than can fit in a 32-bit unsigned +** integer, then set *v to 0xffffffff. +** +** A MACRO version, getVarint32, is provided which inlines the +** single-byte case. All code should use the MACRO version as +** this function assumes the single-byte case has already been handled. +*/ +u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ + u32 a,b; + + /* The 1-byte case. Overwhelmingly the most common. Handled inline + ** by the getVarin32() macro */ + a = *p; + /* a: p0 (unmasked) */ +#ifndef getVarint32 + if (!(a&0x80)) + { + /* Values between 0 and 127 */ + *v = a; return 1; } - x = p[0] & 0x7f; - if( ((signed char*)p)[1]>=0 ){ - *v = (x<<7) | p[1]; +#endif + + /* The 2-byte case */ + p++; + b = *p; + /* b: p1 (unmasked) */ + if (!(b&0x80)) + { + /* Values between 128 and 16383 */ + a &= 0x7f; + a = a<<7; + *v = a | b; return 2; } - x = (x<<7) | (p[1] & 0x7f); - n = 2; - do{ - x = (x<<7) | ((c = p[n++])&0x7f); - }while( (c & 0x80)!=0 && n<9 ); - *v = x; - return n; + + /* The 3-byte case */ + p++; + a = a<<14; + a |= *p; + /* a: p0<<14 | p2 (unmasked) */ + if (!(a&0x80)) + { + /* Values between 16384 and 2097151 */ + a &= (0x7f<<14)|(0x7f); + b &= 0x7f; + b = b<<7; + *v = a | b; + return 3; + } + + /* A 32-bit varint is used to store size information in btrees. + ** Objects are rarely larger than 2MiB limit of a 3-byte varint. + ** A 3-byte varint is sufficient, for example, to record the size + ** of a 1048569-byte BLOB or string. + ** + ** We only unroll the first 1-, 2-, and 3- byte cases. The very + ** rare larger cases can be handled by the slower 64-bit varint + ** routine. + */ +#if 1 + { + u64 v64; + u8 n; + + p -= 2; + n = sqlite3GetVarint(p, &v64); + assert( n>3 && n<=9 ); + if( (v64 & SQLITE_MAX_U32)!=v64 ){ + *v = 0xffffffff; + }else{ + *v = (u32)v64; + } + return n; + } + +#else + /* For following code (kept for historical record only) shows an + ** unrolling for the 3- and 4-byte varint cases. This code is + ** slightly faster, but it is also larger and much harder to test. + */ + p++; + b = b<<14; + b |= *p; + /* b: p1<<14 | p3 (unmasked) */ + if (!(b&0x80)) + { + /* Values between 2097152 and 268435455 */ + b &= (0x7f<<14)|(0x7f); + a &= (0x7f<<14)|(0x7f); + a = a<<7; + *v = a | b; + return 4; + } + + p++; + a = a<<14; + a |= *p; + /* a: p0<<28 | p2<<14 | p4 (unmasked) */ + if (!(a&0x80)) + { + /* Walues between 268435456 and 34359738367 */ + a &= (0x1f<<28)|(0x7f<<14)|(0x7f); + b &= (0x1f<<28)|(0x7f<<14)|(0x7f); + b = b<<7; + *v = a | b; + return 5; + } + + /* We can only reach this point when reading a corrupt database + ** file. In that case we are not in any hurry. Use the (relatively + ** slow) general-purpose sqlite3GetVarint() routine to extract the + ** value. */ + { + u64 v64; + u8 n; + + p -= 4; + n = sqlite3GetVarint(p, &v64); + assert( n>5 && n<=9 ); + *v = (u32)v64; + return n; + } +#endif } /* @@ -587,7 +881,7 @@ do{ i++; v >>= 7; - }while( v!=0 && i<9 ); + }while( v!=0 && ALWAYS(i<9) ); return i; } @@ -599,30 +893,31 @@ return (p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]; } void sqlite3Put4byte(unsigned char *p, u32 v){ - p[0] = v>>24; - p[1] = v>>16; - p[2] = v>>8; - p[3] = v; + p[0] = (u8)(v>>24); + p[1] = (u8)(v>>16); + p[2] = (u8)(v>>8); + p[3] = (u8)v; } -#if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) \ - || defined(SQLITE_TEST) +#if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) /* ** Translate a single byte of Hex into an integer. +** This routinen only works if h really is a valid hexadecimal +** character: 0..9a..fA..F */ -static int hexToInt(int h){ - if( h>='0' && h<='9' ){ - return h - '0'; - }else if( h>='a' && h<='f' ){ - return h - 'a' + 10; - }else{ - assert( h>='A' && h<='F' ); - return h - 'A' + 10; - } +static u8 hexToInt(int h){ + assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); +#ifdef SQLITE_ASCII + h += 9*(1&(h>>6)); +#endif +#ifdef SQLITE_EBCDIC + h += 9*(1&~(h>>4)); +#endif + return (u8)(h & 0xf); } -#endif /* !SQLITE_OMIT_BLOB_LITERAL || SQLITE_HAS_CODEC || SQLITE_TEST */ +#endif /* !SQLITE_OMIT_BLOB_LITERAL || SQLITE_HAS_CODEC */ #if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) /* @@ -631,17 +926,17 @@ ** binary value has been obtained from malloc and must be freed by ** the calling routine. */ -void *sqlite3HexToBlob(const char *z){ +void *sqlite3HexToBlob(sqlite3 *db, const char *z, int n){ char *zBlob; int i; - int n = strlen(z); - if( n%2 ) return 0; - zBlob = (char *)sqliteMalloc(n/2); + zBlob = (char *)sqlite3DbMallocRaw(db, n/2 + 1); + n--; if( zBlob ){ for(i=0; imagic==SQLITE_MAGIC_OPEN ){ db->magic = SQLITE_MAGIC_BUSY; + assert( sqlite3_mutex_held(db->mutex) ); return 0; }else if( db->magic==SQLITE_MAGIC_BUSY ){ db->magic = SQLITE_MAGIC_ERROR; @@ -683,50 +980,60 @@ } return 1; } +#endif /* ** Change the magic from SQLITE_MAGIC_BUSY to SQLITE_MAGIC_OPEN. ** Return an error (non-zero) if the magic was not SQLITE_MAGIC_BUSY ** when this routine is called. */ +#ifdef SQLITE_DEBUG int sqlite3SafetyOff(sqlite3 *db){ if( db->magic==SQLITE_MAGIC_BUSY ){ db->magic = SQLITE_MAGIC_OPEN; + assert( sqlite3_mutex_held(db->mutex) ); return 0; - }else { + }else{ db->magic = SQLITE_MAGIC_ERROR; db->u1.isInterrupted = 1; return 1; } } +#endif /* -** Return a pointer to the ThreadData associated with the calling thread. -*/ -ThreadData *sqlite3ThreadData(){ - ThreadData *p = (ThreadData*)sqlite3OsThreadSpecificData(1); - if( !p ){ - sqlite3FailedMalloc(); +** Check to make sure we have a valid db pointer. This test is not +** foolproof but it does provide some measure of protection against +** misuse of the interface such as passing in db pointers that are +** NULL or which have been previously closed. If this routine returns +** 1 it means that the db pointer is valid and 0 if it should not be +** dereferenced for any reason. The calling function should invoke +** SQLITE_MISUSE immediately. +** +** sqlite3SafetyCheckOk() requires that the db pointer be valid for +** use. sqlite3SafetyCheckSickOrOk() allows a db pointer that failed to +** open properly and is not fit for general use but which can be +** used as an argument to sqlite3_errmsg() or sqlite3_close(). +*/ +int sqlite3SafetyCheckOk(sqlite3 *db){ + u32 magic; + if( db==0 ) return 0; + magic = db->magic; + if( magic!=SQLITE_MAGIC_OPEN +#ifdef SQLITE_DEBUG + && magic!=SQLITE_MAGIC_BUSY +#endif + ){ + return 0; + }else{ + return 1; } - return p; } - -/* -** Return a pointer to the ThreadData associated with the calling thread. -** If no ThreadData has been allocated to this thread yet, return a pointer -** to a substitute ThreadData structure that is all zeros. -*/ -const ThreadData *sqlite3ThreadDataReadOnly(){ - static const ThreadData zeroData = {0}; /* Initializer to silence warnings - ** from broken compilers */ - const ThreadData *pTd = sqlite3OsThreadSpecificData(0); - return pTd ? pTd : &zeroData; -} - -/* -** Check to see if the ThreadData for this thread is all zero. If it -** is, then deallocate it. -*/ -void sqlite3ReleaseThreadData(){ - sqlite3OsThreadSpecificData(-1); +int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ + u32 magic; + magic = db->magic; + if( magic!=SQLITE_MAGIC_SICK && + magic!=SQLITE_MAGIC_OPEN && + magic!=SQLITE_MAGIC_BUSY ) return 0; + return 1; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vacuum.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vacuum.c --- sqlite3-3.4.2/src/vacuum.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/vacuum.c 2009-06-25 12:35:51.000000000 +0100 @@ -14,11 +14,10 @@ ** Most of the code in this file may be omitted by defining the ** SQLITE_OMIT_VACUUM macro. ** -** $Id: vacuum.c,v 1.69 2007/03/27 16:19:52 danielk1977 Exp $ +** $Id: vacuum.c,v 1.90 2009/06/03 11:25:07 danielk1977 Exp $ */ #include "sqliteInt.h" #include "vdbeInt.h" -#include "os.h" #if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) /* @@ -26,10 +25,15 @@ */ static int execSql(sqlite3 *db, const char *zSql){ sqlite3_stmt *pStmt; + VVA_ONLY( int rc; ) + if( !zSql ){ + return SQLITE_NOMEM; + } if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){ return sqlite3_errcode(db); } - while( SQLITE_ROW==sqlite3_step(pStmt) ){} + VVA_ONLY( rc = ) sqlite3_step(pStmt); + assert( rc!=SQLITE_ROW ); return sqlite3_finalize(pStmt); } @@ -68,7 +72,7 @@ void sqlite3Vacuum(Parse *pParse){ Vdbe *v = sqlite3GetVdbe(pParse); if( v ){ - sqlite3VdbeAddOp(v, OP_Vacuum, 0, 0); + sqlite3VdbeAddOp2(v, OP_Vacuum, 0, 0); } return; } @@ -82,19 +86,25 @@ Btree *pTemp; /* The temporary database we vacuum into */ char *zSql = 0; /* SQL statements */ int saved_flags; /* Saved value of the db->flags */ + int saved_nChange; /* Saved value of db->nChange */ + int saved_nTotalChange; /* Saved value of db->nTotalChange */ Db *pDb = 0; /* Database to detach at end of vacuum */ + int isMemDb; /* True if vacuuming a :memory: database */ + int nRes; + + if( !db->autoCommit ){ + sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); + return SQLITE_ERROR; + } /* Save the current value of the write-schema flag before setting it. */ saved_flags = db->flags; + saved_nChange = db->nChange; + saved_nTotalChange = db->nTotalChange; db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks; - if( !db->autoCommit ){ - sqlite3SetString(pzErrMsg, "cannot VACUUM from within a transaction", - (char*)0); - rc = SQLITE_ERROR; - goto end_of_vacuum; - } pMain = db->aDb[0].pBt; + isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); /* Attach the temporary database as 'vacuum_db'. The synchronous pragma ** can be set to 'off' for this file, as it is not recovered if a crash @@ -103,6 +113,12 @@ ** sqlite3BtreeCopyFile() is called. ** ** An optimisation would be to use a non-journaled pager. + ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but + ** that actually made the VACUUM run slower. Very little journalling + ** actually occurs when doing a vacuum since the vacuum_db is initially + ** empty. Only the journal header is written. Apparently it takes more + ** time to parse and run the PRAGMA to turn journalling off than it does + ** to write the journal header file. */ zSql = "ATTACH '' AS vacuum_db;"; rc = execSql(db, zSql); @@ -110,20 +126,35 @@ pDb = &db->aDb[db->nDb-1]; assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 ); pTemp = db->aDb[db->nDb-1].pBt; - sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), - sqlite3BtreeGetReserve(pMain)); - if( sqlite3MallocFailed() ){ + + nRes = sqlite3BtreeGetReserve(pMain); + + /* A VACUUM cannot change the pagesize of an encrypted database. */ +#ifdef SQLITE_HAS_CODEC + if( db->nextPagesize ){ + extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); + int nKey; + char *zKey; + sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); + if( nKey ) db->nextPagesize = 0; + } +#endif + + if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0) + || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0)) + || NEVER(db->mallocFailed) + ){ rc = SQLITE_NOMEM; goto end_of_vacuum; } - assert( sqlite3BtreeGetPageSize(pTemp)==sqlite3BtreeGetPageSize(pMain) ); rc = execSql(db, "PRAGMA vacuum_db.synchronous=OFF"); if( rc!=SQLITE_OK ){ goto end_of_vacuum; } #ifndef SQLITE_OMIT_AUTOVACUUM - sqlite3BtreeSetAutoVacuum(pTemp, sqlite3BtreeGetAutoVacuum(pMain)); + sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac : + sqlite3BtreeGetAutoVacuum(pMain)); #endif /* Begin a transaction */ @@ -134,17 +165,17 @@ ** in the temporary database. */ rc = execExecSql(db, - "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14,100000000) " + "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) " " FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'" " AND rootpage>0" ); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = execExecSql(db, - "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000)" + "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)" " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = execExecSql(db, - "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) " + "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) " " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"); if( rc!=SQLITE_OK ) goto end_of_vacuum; @@ -199,7 +230,7 @@ ** opened for writing. This way, the SQL transaction used to create the ** temporary database never needs to be committed. */ - if( rc==SQLITE_OK ){ + { u32 meta; int i; @@ -210,33 +241,42 @@ ** connections to the same database will know to reread the schema. */ static const unsigned char aCopy[] = { - 1, 1, /* Add one to the old schema cookie */ - 3, 0, /* Preserve the default page cache size */ - 5, 0, /* Preserve the default text encoding */ - 6, 0, /* Preserve the user version */ + BTREE_SCHEMA_VERSION, 1, /* Add one to the old schema cookie */ + BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */ + BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */ + BTREE_USER_VERSION, 0, /* Preserve the user version */ }; assert( 1==sqlite3BtreeIsInTrans(pTemp) ); assert( 1==sqlite3BtreeIsInTrans(pMain) ); /* Copy Btree meta values */ - for(i=0; iflags */ db->flags = saved_flags; + db->nChange = saved_nChange; + db->nTotalChange = saved_nTotalChange; /* Currently there is an SQL level transaction open on the vacuum ** database. No locks are held on any other files (since the main file @@ -248,9 +288,7 @@ db->autoCommit = 1; if( pDb ){ - sqlite3MallocDisallow(); sqlite3BtreeClose(pDb->pBt); - sqlite3MallocAllow(); pDb->pBt = 0; pDb->pSchema = 0; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbeapi.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbeapi.c --- sqlite3-3.4.2/src/vdbeapi.c 2007-07-30 21:36:28.000000000 +0100 +++ sqlite3-3.6.16/src/vdbeapi.c 2009-06-25 12:45:58.000000000 +0100 @@ -12,11 +12,13 @@ ** ** This file contains code use to implement APIs that are part of the ** VDBE. +** +** $Id: vdbeapi.c,v 1.167 2009/06/25 01:47:12 drh Exp $ */ #include "sqliteInt.h" #include "vdbeInt.h" -#include "os.h" +#ifndef SQLITE_OMIT_DEPRECATED /* ** Return TRUE (non-zero) of the statement supplied as an argument needs ** to be recompiled. A statement needs to be recompiled whenever the @@ -29,6 +31,78 @@ Vdbe *p = (Vdbe*)pStmt; return p==0 || p->expired; } +#endif + +/* +** The following routine destroys a virtual machine that is created by +** the sqlite3_compile() routine. The integer returned is an SQLITE_ +** success/failure code that describes the result of executing the virtual +** machine. +** +** This routine sets the error code and string returned by +** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). +*/ +int sqlite3_finalize(sqlite3_stmt *pStmt){ + int rc; + if( pStmt==0 ){ + rc = SQLITE_OK; + }else{ + Vdbe *v = (Vdbe*)pStmt; + sqlite3 *db = v->db; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = v->db->mutex; +#endif + sqlite3_mutex_enter(mutex); + rc = sqlite3VdbeFinalize(v); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(mutex); + } + return rc; +} + +/* +** Terminate the current execution of an SQL statement and reset it +** back to its starting state so that it can be reused. A success code from +** the prior execution is returned. +** +** This routine sets the error code and string returned by +** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). +*/ +int sqlite3_reset(sqlite3_stmt *pStmt){ + int rc; + if( pStmt==0 ){ + rc = SQLITE_OK; + }else{ + Vdbe *v = (Vdbe*)pStmt; + sqlite3_mutex_enter(v->db->mutex); + rc = sqlite3VdbeReset(v); + sqlite3VdbeMakeReady(v, -1, 0, 0, 0); + assert( (rc & (v->db->errMask))==rc ); + rc = sqlite3ApiExit(v->db, rc); + sqlite3_mutex_leave(v->db->mutex); + } + return rc; +} + +/* +** Set all the parameters in the compiled SQL statement to NULL. +*/ +int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ + int i; + int rc = SQLITE_OK; + Vdbe *p = (Vdbe*)pStmt; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; +#endif + sqlite3_mutex_enter(mutex); + for(i=0; inVar; i++){ + sqlite3VdbeMemRelease(&p->aVar[i]); + p->aVar[i].flags = MEM_Null; + } + sqlite3_mutex_leave(mutex); + return rc; +} + /**************************** sqlite3_value_ ******************************* ** The following routines extract information from a Mem or sqlite3_value @@ -55,7 +129,7 @@ return sqlite3VdbeRealValue((Mem*)pVal); } int sqlite3_value_int(sqlite3_value *pVal){ - return sqlite3VdbeIntValue((Mem*)pVal); + return (int)sqlite3VdbeIntValue((Mem*)pVal); } sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){ return sqlite3VdbeIntValue((Mem*)pVal); @@ -77,12 +151,26 @@ int sqlite3_value_type(sqlite3_value* pVal){ return pVal->type; } -/* sqlite3_value_numeric_type() defined in vdbe.c */ /**************************** sqlite3_result_ ******************************* ** The following routines are used by user-defined functions to specify ** the function result. -*/ +** +** The setStrOrError() funtion calls sqlite3VdbeMemSetStr() to store the +** result as a string or blob but if the string or blob is too large, it +** then sets the error code to SQLITE_TOOBIG +*/ +static void setResultStrOrError( + sqlite3_context *pCtx, /* Function context */ + const char *z, /* String pointer */ + int n, /* Bytes in string, or negative */ + u8 enc, /* Encoding of z. 0 for BLOBs */ + void (*xDel)(void*) /* Destructor function */ +){ + if( sqlite3VdbeMemSetStr(&pCtx->s, z, n, enc, xDel)==SQLITE_TOOBIG ){ + sqlite3_result_error_toobig(pCtx); + } +} void sqlite3_result_blob( sqlite3_context *pCtx, const void *z, @@ -90,28 +178,35 @@ void (*xDel)(void *) ){ assert( n>=0 ); - sqlite3VdbeMemSetStr(&pCtx->s, z, n, 0, xDel); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + setResultStrOrError(pCtx, z, n, 0, xDel); } void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemSetDouble(&pCtx->s, rVal); } void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ - pCtx->isError = 1; + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ - pCtx->isError = 1; + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemSetInt64(&pCtx->s, (i64)iVal); } void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemSetInt64(&pCtx->s, iVal); } void sqlite3_result_null(sqlite3_context *pCtx){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemSetNull(&pCtx->s); } void sqlite3_result_text( @@ -120,7 +215,8 @@ int n, void (*xDel)(void *) ){ - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF8, xDel); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); } #ifndef SQLITE_OMIT_UTF16 void sqlite3_result_text16( @@ -129,7 +225,8 @@ int n, void (*xDel)(void *) ){ - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16NATIVE, xDel); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); } void sqlite3_result_text16be( sqlite3_context *pCtx, @@ -137,7 +234,8 @@ int n, void (*xDel)(void *) ){ - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16BE, xDel); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); } void sqlite3_result_text16le( sqlite3_context *pCtx, @@ -145,21 +243,41 @@ int n, void (*xDel)(void *) ){ - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16LE, xDel); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); } #endif /* SQLITE_OMIT_UTF16 */ void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemCopy(&pCtx->s, pValue); } void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); sqlite3VdbeMemSetZeroBlob(&pCtx->s, n); } +void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ + pCtx->isError = errCode; + if( pCtx->s.flags & MEM_Null ){ + sqlite3VdbeMemSetStr(&pCtx->s, sqlite3ErrStr(errCode), -1, + SQLITE_UTF8, SQLITE_STATIC); + } +} /* Force an SQLITE_TOOBIG error. */ void sqlite3_result_error_toobig(sqlite3_context *pCtx){ - sqlite3VdbeMemSetZeroBlob(&pCtx->s, SQLITE_MAX_LENGTH+1); + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + pCtx->isError = SQLITE_TOOBIG; + sqlite3VdbeMemSetStr(&pCtx->s, "string or blob too big", -1, + SQLITE_UTF8, SQLITE_STATIC); } +/* An SQLITE_NOMEM error. */ +void sqlite3_result_error_nomem(sqlite3_context *pCtx){ + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + sqlite3VdbeMemSetNull(&pCtx->s); + pCtx->isError = SQLITE_NOMEM; + pCtx->s.db->mallocFailed = 1; +} /* ** Execute the statement pStmt, either until a row of data is ready, the @@ -174,23 +292,24 @@ sqlite3 *db; int rc; - /* Assert that malloc() has not failed */ - assert( !sqlite3MallocFailed() ); - - if( p==0 || p->magic!=VDBE_MAGIC_RUN ){ + assert(p); + if( p->magic!=VDBE_MAGIC_RUN ){ return SQLITE_MISUSE; } - if( p->aborted ){ - return SQLITE_ABORT; + + /* Assert that malloc() has not failed */ + db = p->db; + if( db->mallocFailed ){ + return SQLITE_NOMEM; } + if( p->pc<=0 && p->expired ){ - if( p->rc==SQLITE_OK ){ + if( ALWAYS(p->rc==SQLITE_OK) ){ p->rc = SQLITE_SCHEMA; } rc = SQLITE_ERROR; goto end_of_step; } - db = p->db; if( sqlite3SafetyOn(db) ){ p->rc = SQLITE_MISUSE; return SQLITE_MISUSE; @@ -205,37 +324,15 @@ } #ifndef SQLITE_OMIT_TRACE - /* Invoke the trace callback if there is one - */ - if( db->xTrace && !db->init.busy ){ - assert( p->nOp>0 ); - assert( p->aOp[p->nOp-1].opcode==OP_Noop ); - assert( p->aOp[p->nOp-1].p3!=0 ); - assert( p->aOp[p->nOp-1].p3type==P3_DYNAMIC ); - sqlite3SafetyOff(db); - db->xTrace(db->pTraceArg, p->aOp[p->nOp-1].p3); - if( sqlite3SafetyOn(db) ){ - p->rc = SQLITE_MISUSE; - return SQLITE_MISUSE; - } - } if( db->xProfile && !db->init.busy ){ double rNow; - sqlite3OsCurrentTime(&rNow); - p->startTime = (rNow - (int)rNow)*3600.0*24.0*1000000000.0; + sqlite3OsCurrentTime(db->pVfs, &rNow); + p->startTime = (u64)((rNow - (int)rNow)*3600.0*24.0*1000000000.0); } #endif - /* Print a copy of SQL as it is executed if the SQL_TRACE pragma is turned - ** on in debugging mode. - */ -#ifdef SQLITE_DEBUG - if( (db->flags & SQLITE_SqlTrace)!=0 ){ - sqlite3DebugPrintf("SQL-trace: %s\n", p->aOp[p->nOp-1].p3); - } -#endif /* SQLITE_DEBUG */ - db->activeVdbeCnt++; + if( p->readOnly==0 ) db->writeVdbeCnt++; p->pc = 0; } #ifndef SQLITE_OMIT_EXPLAIN @@ -254,34 +351,41 @@ #ifndef SQLITE_OMIT_TRACE /* Invoke the profile callback if there is one */ - if( rc!=SQLITE_ROW && db->xProfile && !db->init.busy ){ + if( rc!=SQLITE_ROW && db->xProfile && !db->init.busy && p->zSql ){ double rNow; u64 elapseTime; - sqlite3OsCurrentTime(&rNow); - elapseTime = (rNow - (int)rNow)*3600.0*24.0*1000000000.0 - p->startTime; - assert( p->nOp>0 ); - assert( p->aOp[p->nOp-1].opcode==OP_Noop ); - assert( p->aOp[p->nOp-1].p3!=0 ); - assert( p->aOp[p->nOp-1].p3type==P3_DYNAMIC ); - db->xProfile(db->pProfileArg, p->aOp[p->nOp-1].p3, elapseTime); + sqlite3OsCurrentTime(db->pVfs, &rNow); + elapseTime = (u64)((rNow - (int)rNow)*3600.0*24.0*1000000000.0); + elapseTime -= p->startTime; + db->xProfile(db->pProfileArg, p->zSql, elapseTime); } #endif - sqlite3Error(p->db, rc, 0); - p->rc = sqlite3ApiExit(p->db, p->rc); + db->errCode = rc; + if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){ + p->rc = SQLITE_NOMEM; + } end_of_step: - assert( (rc&0xff)==rc ); - if( p->zSql && (rc&0xff)db, p->rc, 0); - return p->rc; - }else{ - /* This is for legacy sqlite3_prepare() builds and when the code - ** is SQLITE_ROW or SQLITE_DONE */ - return rc; + /* At this point local variable rc holds the value that should be + ** returned if this statement was compiled using the legacy + ** sqlite3_prepare() interface. According to the docs, this can only + ** be one of the values in the first assert() below. Variable p->rc + ** contains the value that would be returned if sqlite3_finalize() + ** were called on statement p. + */ + assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR + || rc==SQLITE_BUSY || rc==SQLITE_MISUSE + ); + assert( p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE ); + if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ + /* If this statement was prepared using sqlite3_prepare_v2(), and an + ** error has occured, then return the error code in p->rc to the + ** caller. Set the error code in the database handle to the same value. + */ + rc = db->errCode = p->rc; } + return (rc&db->errMask); } /* @@ -289,24 +393,42 @@ ** sqlite3Step() to do most of the work. If a schema error occurs, ** call sqlite3Reprepare() and try again. */ -#ifdef SQLITE_OMIT_PARSER int sqlite3_step(sqlite3_stmt *pStmt){ - return sqlite3Step((Vdbe*)pStmt); -} -#else -int sqlite3_step(sqlite3_stmt *pStmt){ - int cnt = 0; - int rc; - Vdbe *v = (Vdbe*)pStmt; - while( (rc = sqlite3Step(v))==SQLITE_SCHEMA - && cnt++ < 5 - && sqlite3Reprepare(v) ){ - sqlite3_reset(pStmt); - v->expired = 0; + int rc = SQLITE_MISUSE; + if( pStmt ){ + int cnt = 0; + Vdbe *v = (Vdbe*)pStmt; + sqlite3 *db = v->db; + sqlite3_mutex_enter(db->mutex); + while( (rc = sqlite3Step(v))==SQLITE_SCHEMA + && cnt++ < 5 + && (rc = sqlite3Reprepare(v))==SQLITE_OK ){ + sqlite3_reset(pStmt); + v->expired = 0; + } + if( rc==SQLITE_SCHEMA && ALWAYS(v->isPrepareV2) && ALWAYS(db->pErr) ){ + /* This case occurs after failing to recompile an sql statement. + ** The error message from the SQL compiler has already been loaded + ** into the database handle. This block copies the error message + ** from the database handle into the statement and sets the statement + ** program counter to 0 to ensure that when the statement is + ** finalized or reset the parser error message is available via + ** sqlite3_errmsg() and sqlite3_errcode(). + */ + const char *zErr = (const char *)sqlite3_value_text(db->pErr); + sqlite3DbFree(db, v->zErrMsg); + if( !db->mallocFailed ){ + v->zErrMsg = sqlite3DbStrDup(db, zErr); + } else { + v->zErrMsg = 0; + v->rc = SQLITE_NOMEM; + } + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); } return rc; } -#endif /* ** Extract the user data from a sqlite3_context structure and return a @@ -318,6 +440,15 @@ } /* +** Extract the user data from a sqlite3_context structure and return a +** pointer to it. +*/ +sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ + assert( p && p->pFunc ); + return p->s.db; +} + +/* ** The following is the implementation of an SQL function that always ** fails with an error message stating that the function is used in the ** wrong context. The sqlite3_overload_function() API might construct @@ -327,15 +458,16 @@ */ void sqlite3InvalidFunction( sqlite3_context *context, /* The function calling context */ - int argc, /* Number of arguments to the function */ - sqlite3_value **argv /* Value of each argument */ + int NotUsed, /* Number of arguments to the function */ + sqlite3_value **NotUsed2 /* Value of each argument */ ){ const char *zName = context->pFunc->zName; char *zErr; - zErr = sqlite3MPrintf( + UNUSED_PARAMETER2(NotUsed, NotUsed2); + zErr = sqlite3_mprintf( "unable to use function %s in the requested context", zName); sqlite3_result_error(context, zErr, -1); - sqliteFree(zErr); + sqlite3_free(zErr); } /* @@ -344,21 +476,21 @@ ** same context that was returned on prior calls. */ void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ - Mem *pMem = p->pMem; + Mem *pMem; assert( p && p->pFunc && p->pFunc->xStep ); + assert( sqlite3_mutex_held(p->s.db->mutex) ); + pMem = p->pMem; if( (pMem->flags & MEM_Agg)==0 ){ if( nByte==0 ){ - assert( pMem->flags==MEM_Null ); + sqlite3VdbeMemReleaseExternal(pMem); + pMem->flags = MEM_Null; pMem->z = 0; }else{ + sqlite3VdbeMemGrow(pMem, nByte, 0); pMem->flags = MEM_Agg; - pMem->xDel = sqlite3FreeX; pMem->u.pDef = p->pFunc; - if( nByte<=NBFS ){ - pMem->z = pMem->zShort; + if( pMem->z ){ memset(pMem->z, 0, nByte); - }else{ - pMem->z = sqliteMalloc( nByte ); } } } @@ -370,7 +502,10 @@ ** the user-function defined by pCtx. */ void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ - VdbeFunc *pVdbeFunc = pCtx->pVdbeFunc; + VdbeFunc *pVdbeFunc; + + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + pVdbeFunc = pCtx->pVdbeFunc; if( !pVdbeFunc || iArg>=pVdbeFunc->nAux || iArg<0 ){ return 0; } @@ -392,14 +527,17 @@ VdbeFunc *pVdbeFunc; if( iArg<0 ) goto failed; + assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); pVdbeFunc = pCtx->pVdbeFunc; if( !pVdbeFunc || pVdbeFunc->nAux<=iArg ){ + int nAux = (pVdbeFunc ? pVdbeFunc->nAux : 0); int nMalloc = sizeof(VdbeFunc) + sizeof(struct AuxData)*iArg; - pVdbeFunc = sqliteRealloc(pVdbeFunc, nMalloc); - if( !pVdbeFunc ) goto failed; + pVdbeFunc = sqlite3DbRealloc(pCtx->s.db, pVdbeFunc, nMalloc); + if( !pVdbeFunc ){ + goto failed; + } pCtx->pVdbeFunc = pVdbeFunc; - memset(&pVdbeFunc->apAux[pVdbeFunc->nAux], 0, - sizeof(struct AuxData)*(iArg+1-pVdbeFunc->nAux)); + memset(&pVdbeFunc->apAux[nAux], 0, sizeof(struct AuxData)*(iArg+1-nAux)); pVdbeFunc->nAux = iArg+1; pVdbeFunc->pFunc = pCtx->pFunc; } @@ -418,6 +556,7 @@ } } +#ifndef SQLITE_OMIT_DEPRECATED /* ** Return the number of times the Step function of a aggregate has been ** called. @@ -428,9 +567,10 @@ ** context. */ int sqlite3_aggregate_count(sqlite3_context *p){ - assert( p && p->pFunc && p->pFunc->xStep ); + assert( p && p->pMem && p->pFunc && p->pFunc->xStep ); return p->pMem->n; } +#endif /* ** Return the number of columns in the result set for the statement pStmt. @@ -446,7 +586,7 @@ */ int sqlite3_data_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; - if( pVm==0 || !pVm->resOnStack ) return 0; + if( pVm==0 || pVm->pResultSet==0 ) return 0; return pVm->nResColumn; } @@ -458,14 +598,40 @@ ** of NULL. */ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ - Vdbe *pVm = (Vdbe *)pStmt; - int vals = sqlite3_data_count(pStmt); - if( pVm==0 || pVm->resOnStack==0 || i>=pVm->nResColumn || i<0 ){ - static const Mem nullMem = {{0}, 0.0, "", 0, MEM_Null, SQLITE_NULL }; - sqlite3Error(pVm->db, SQLITE_RANGE, 0); - return (Mem*)&nullMem; + Vdbe *pVm; + int vals; + Mem *pOut; + + pVm = (Vdbe *)pStmt; + if( pVm && pVm->pResultSet!=0 && inResColumn && i>=0 ){ + sqlite3_mutex_enter(pVm->db->mutex); + vals = sqlite3_data_count(pStmt); + pOut = &pVm->pResultSet[i]; + }else{ + /* If the value passed as the second argument is out of range, return + ** a pointer to the following static Mem object which contains the + ** value SQL NULL. Even though the Mem structure contains an element + ** of type i64, on certain architecture (x86) with certain compiler + ** switches (-Os), gcc may align this Mem object on a 4-byte boundary + ** instead of an 8-byte one. This all works fine, except that when + ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s + ** that a Mem structure is located on an 8-byte boundary. To prevent + ** this assert() from failing, when building with SQLITE_DEBUG defined + ** using gcc, force nullMem to be 8-byte aligned using the magical + ** __attribute__((aligned(8))) macro. */ + static const Mem nullMem +#if defined(SQLITE_DEBUG) && defined(__GNUC__) + __attribute__((aligned(8))) +#endif + = {{0}, (double)0, 0, "", 0, MEM_Null, SQLITE_NULL, 0, 0, 0 }; + + if( pVm && ALWAYS(pVm->db) ){ + sqlite3_mutex_enter(pVm->db->mutex); + sqlite3Error(pVm->db, SQLITE_RANGE, 0); + } + pOut = (Mem*)&nullMem; } - return &pVm->pTos[(1-vals)+i]; + return pOut; } /* @@ -475,7 +641,7 @@ ** malloc() has failed, the threads mallocFailed flag is cleared and the result ** code of statement pStmt set to SQLITE_NOMEM. ** -** Specificly, this is called from within: +** Specifically, this is called from within: ** ** sqlite3_column_int() ** sqlite3_column_int64() @@ -495,7 +661,10 @@ ** and _finalize() will return NOMEM. */ Vdbe *p = (Vdbe *)pStmt; - p->rc = sqlite3ApiExit(0, p->rc); + if( p ){ + p->rc = sqlite3ApiExit(p->db, p->rc); + sqlite3_mutex_leave(p->db->mutex); + } } /**************************** sqlite3_column_ ******************************* @@ -543,7 +712,13 @@ return val; } sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){ - return columnMem(pStmt, i); + Mem *pOut = columnMem(pStmt, i); + if( pOut->flags&MEM_Static ){ + pOut->flags &= ~MEM_Static; + pOut->flags |= MEM_Ephem; + } + columnMallocFailure(pStmt); + return (sqlite3_value *)pOut; } #ifndef SQLITE_OMIT_UTF16 const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){ @@ -553,7 +728,9 @@ } #endif /* SQLITE_OMIT_UTF16 */ int sqlite3_column_type(sqlite3_stmt *pStmt, int i){ - return sqlite3_value_type( columnMem(pStmt,i) ); + int iType = sqlite3_value_type( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return iType; } /* The following function is experimental and subject to change or @@ -585,20 +762,27 @@ const void *(*xFunc)(Mem*), int useType ){ - const void *ret; + const void *ret = 0; Vdbe *p = (Vdbe *)pStmt; - int n = sqlite3_column_count(pStmt); - - if( p==0 || N>=n || N<0 ){ - return 0; + int n; + sqlite3 *db = p->db; + + assert( db!=0 ); + n = sqlite3_column_count(pStmt); + if( N=0 ){ + N += useType*n; + sqlite3_mutex_enter(db->mutex); + assert( db->mallocFailed==0 ); + ret = xFunc(&p->aColName[N]); + /* A malloc may have failed inside of the xFunc() call. If this + ** is the case, clear the mallocFailed flag and return NULL. + */ + if( db->mallocFailed ){ + db->mallocFailed = 0; + ret = 0; + } + sqlite3_mutex_leave(db->mutex); } - N += useType*n; - ret = xFunc(&p->aColName[N]); - - /* A malloc may have failed inside of the xFunc() call. If this is the case, - ** clear the mallocFailed flag and return NULL. - */ - sqlite3ApiExit(0, 0); return ret; } @@ -618,6 +802,16 @@ #endif /* +** Constraint: If you have ENABLE_COLUMN_METADATA then you must +** not define OMIT_DECLTYPE. +*/ +#if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA) +# error "Must not define both SQLITE_OMIT_DECLTYPE \ + and SQLITE_ENABLE_COLUMN_METADATA" +#endif + +#ifndef SQLITE_OMIT_DECLTYPE +/* ** Return the column declaration type (if applicable) of the 'i'th column ** of the result set of SQL statement pStmt. */ @@ -631,6 +825,7 @@ pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE); } #endif /* SQLITE_OMIT_UTF16 */ +#endif /* SQLITE_OMIT_DECLTYPE */ #ifdef SQLITE_ENABLE_COLUMN_METADATA /* @@ -692,17 +887,24 @@ ** the same as binding a NULL value to the column. If the "i" parameter is ** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. ** +** A successful evaluation of this routine acquires the mutex on p. +** the mutex is released if any kind of error occurs. +** ** The error code stored in database p->db is overwritten with the return ** value in any case. */ static int vdbeUnbind(Vdbe *p, int i){ Mem *pVar; - if( p==0 || p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ - if( p ) sqlite3Error(p->db, SQLITE_MISUSE, 0); + if( p==0 ) return SQLITE_MISUSE; + sqlite3_mutex_enter(p->db->mutex); + if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ + sqlite3Error(p->db, SQLITE_MISUSE, 0); + sqlite3_mutex_leave(p->db->mutex); return SQLITE_MISUSE; } if( i<1 || i>p->nVar ){ sqlite3Error(p->db, SQLITE_RANGE, 0); + sqlite3_mutex_leave(p->db->mutex); return SQLITE_RANGE; } i--; @@ -717,29 +919,31 @@ ** Bind a text or BLOB value. */ static int bindText( - sqlite3_stmt *pStmt, - int i, - const void *zData, - int nData, - void (*xDel)(void*), - int encoding + sqlite3_stmt *pStmt, /* The statement to bind against */ + int i, /* Index of the parameter to bind */ + const void *zData, /* Pointer to the data to be bound */ + int nData, /* Number of bytes of data to be bound */ + void (*xDel)(void*), /* Destructor for the data */ + u8 encoding /* Encoding for the data */ ){ Vdbe *p = (Vdbe *)pStmt; Mem *pVar; int rc; rc = vdbeUnbind(p, i); - if( rc || zData==0 ){ - return rc; - } - pVar = &p->aVar[i-1]; - rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); - if( rc==SQLITE_OK && encoding!=0 ){ - rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); + if( rc==SQLITE_OK ){ + if( zData!=0 ){ + pVar = &p->aVar[i-1]; + rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); + if( rc==SQLITE_OK && encoding!=0 ){ + rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); + } + sqlite3Error(p->db, rc, 0); + rc = sqlite3ApiExit(p->db, rc); + } + sqlite3_mutex_leave(p->db->mutex); } - - sqlite3Error(((Vdbe *)pStmt)->db, rc, 0); - return sqlite3ApiExit(((Vdbe *)pStmt)->db, rc); + return rc; } @@ -761,6 +965,7 @@ rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); + sqlite3_mutex_leave(p->db->mutex); } return rc; } @@ -773,11 +978,18 @@ rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); + sqlite3_mutex_leave(p->db->mutex); } return rc; } -int sqlite3_bind_null(sqlite3_stmt* p, int i){ - return vdbeUnbind((Vdbe *)p, i); +int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ + int rc; + Vdbe *p = (Vdbe*)pStmt; + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + sqlite3_mutex_leave(p->db->mutex); + } + return rc; } int sqlite3_bind_text( sqlite3_stmt *pStmt, @@ -801,10 +1013,32 @@ #endif /* SQLITE_OMIT_UTF16 */ int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){ int rc; - Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - sqlite3VdbeMemCopy(&p->aVar[i-1], pValue); + switch( pValue->type ){ + case SQLITE_INTEGER: { + rc = sqlite3_bind_int64(pStmt, i, pValue->u.i); + break; + } + case SQLITE_FLOAT: { + rc = sqlite3_bind_double(pStmt, i, pValue->r); + break; + } + case SQLITE_BLOB: { + if( pValue->flags & MEM_Zero ){ + rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero); + }else{ + rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT); + } + break; + } + case SQLITE_TEXT: { + rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT, + pValue->enc); + break; + } + default: { + rc = sqlite3_bind_null(pStmt, i); + break; + } } return rc; } @@ -814,6 +1048,7 @@ rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); + sqlite3_mutex_leave(p->db->mutex); } return rc; } @@ -836,13 +1071,20 @@ if( !p->okVar ){ int j; Op *pOp; + sqlite3_mutex_enter(p->db->mutex); + /* The race condition here is harmless. If two threads call this + ** routine on the same Vdbe at the same time, they both might end + ** up initializing the Vdbe.azVar[] array. That is a little extra + ** work but it results in the same answer. + */ for(j=0, pOp=p->aOp; jnOp; j++, pOp++){ if( pOp->opcode==OP_Variable ){ assert( pOp->p1>0 && pOp->p1<=p->nVar ); - p->azVar[pOp->p1-1] = pOp->p3; + p->azVar[pOp->p1-1] = pOp->p4.z; } } p->okVar = 1; + sqlite3_mutex_leave(p->db->mutex); } } @@ -886,28 +1128,43 @@ /* ** Transfer all bindings from the first statement over to the second. +*/ +int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ + Vdbe *pFrom = (Vdbe*)pFromStmt; + Vdbe *pTo = (Vdbe*)pToStmt; + int i; + assert( pTo->db==pFrom->db ); + assert( pTo->nVar==pFrom->nVar ); + sqlite3_mutex_enter(pTo->db->mutex); + for(i=0; inVar; i++){ + sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]); + } + sqlite3_mutex_leave(pTo->db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Deprecated external interface. Internal/core SQLite code +** should call sqlite3TransferBindings. +** +** Is is misuse to call this routine with statements from different +** database connections. But as this is a deprecated interface, we +** will not bother to check for that condition. +** ** If the two statements contain a different number of bindings, then -** an SQLITE_ERROR is returned. +** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise +** SQLITE_OK is returned. */ int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ Vdbe *pFrom = (Vdbe*)pFromStmt; Vdbe *pTo = (Vdbe*)pToStmt; - int i, rc = SQLITE_OK; - if( (pFrom->magic!=VDBE_MAGIC_RUN && pFrom->magic!=VDBE_MAGIC_HALT) - || (pTo->magic!=VDBE_MAGIC_RUN && pTo->magic!=VDBE_MAGIC_HALT) ){ - return SQLITE_MISUSE; - } if( pFrom->nVar!=pTo->nVar ){ return SQLITE_ERROR; } - for(i=0; rc==SQLITE_OK && inVar; i++){ - sqlite3MallocDisallow(); - rc = sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]); - sqlite3MallocAllow(); - } - assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); - return rc; + return sqlite3TransferBindings(pFromStmt, pToStmt); } +#endif /* ** Return the sqlite3* database handle to which the prepared statement given @@ -918,3 +1175,31 @@ sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){ return pStmt ? ((Vdbe*)pStmt)->db : 0; } + +/* +** Return a pointer to the next prepared statement after pStmt associated +** with database connection pDb. If pStmt is NULL, return the first +** prepared statement for the database connection. Return NULL if there +** are no more. +*/ +sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){ + sqlite3_stmt *pNext; + sqlite3_mutex_enter(pDb->mutex); + if( pStmt==0 ){ + pNext = (sqlite3_stmt*)pDb->pVdbe; + }else{ + pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext; + } + sqlite3_mutex_leave(pDb->mutex); + return pNext; +} + +/* +** Return the value of a status counter for a prepared statement +*/ +int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ + Vdbe *pVdbe = (Vdbe*)pStmt; + int v = pVdbe->aCounter[op-1]; + if( resetFlag ) pVdbe->aCounter[op-1] = 0; + return v; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbeaux.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbeaux.c --- sqlite3-3.4.2/src/vdbeaux.c 2007-08-13 12:09:25.000000000 +0100 +++ sqlite3-3.6.16/src/vdbeaux.c 2009-06-26 19:17:20.000000000 +0100 @@ -13,20 +13,21 @@ ** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) Prior ** to version 2.8.7, all this code was combined into the vdbe.c source file. ** But that file was getting too big so this subroutines were split out. +** +** $Id: vdbeaux.c,v 1.467 2009/06/26 16:32:13 shane Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include #include "vdbeInt.h" + /* ** When debugging the code generator in a symbolic debugger, one can -** set the sqlite3_vdbe_addop_trace to 1 and all opcodes will be printed +** set the sqlite3VdbeAddopTrace to 1 and all opcodes will be printed ** as they are added to the instruction stream. */ #ifdef SQLITE_DEBUG -int sqlite3_vdbe_addop_trace = 0; +int sqlite3VdbeAddopTrace = 0; #endif @@ -35,7 +36,7 @@ */ Vdbe *sqlite3VdbeCreate(sqlite3 *db){ Vdbe *p; - p = sqliteMalloc( sizeof(Vdbe) ); + p = sqlite3DbMallocZero(db, sizeof(Vdbe) ); if( p==0 ) return 0; p->db = db; if( db->pVdbe ){ @@ -51,17 +52,22 @@ /* ** Remember the SQL string for a prepared statement. */ -void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n){ +void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){ if( p==0 ) return; +#ifdef SQLITE_OMIT_TRACE + if( !isPrepareV2 ) return; +#endif assert( p->zSql==0 ); - p->zSql = sqlite3StrNDup(z, n); + p->zSql = sqlite3DbStrNDup(p->db, z, n); + p->isPrepareV2 = isPrepareV2 ? 1 : 0; } /* ** Return the SQL associated with a prepared statement */ -const char *sqlite3VdbeGetSql(Vdbe *p){ - return p->zSql; +const char *sqlite3_sql(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe *)pStmt; + return (p->isPrepareV2 ? p->zSql : 0); } /* @@ -70,7 +76,6 @@ void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ Vdbe tmp, *pTmp; char *zTmp; - int nTmp; tmp = *pA; *pA = *pB; *pB = tmp; @@ -83,9 +88,7 @@ zTmp = pA->zSql; pA->zSql = pB->zSql; pB->zSql = zTmp; - nTmp = pA->nSql; - pA->nSql = pB->nSql; - pB->nSql = nTmp; + pB->isPrepareV2 = pA->isPrepareV2; } #ifdef SQLITE_DEBUG @@ -98,32 +101,23 @@ #endif /* -** Resize the Vdbe.aOp array so that it contains at least N -** elements. If the Vdbe is in VDBE_MAGIC_RUN state, then -** the Vdbe.aOp array will be sized to contain exactly N -** elements. Vdbe.nOpAlloc is set to reflect the new size of -** the array. -** -** If an out-of-memory error occurs while resizing the array, -** Vdbe.aOp and Vdbe.nOpAlloc remain unchanged (this is so that -** any opcodes already allocated can be correctly deallocated -** along with the rest of the Vdbe). -*/ -static void resizeOpArray(Vdbe *p, int N){ - int runMode = p->magic==VDBE_MAGIC_RUN; - if( runMode || p->nOpAllocnOpAlloc; - pNew = sqliteRealloc(p->aOp, nNew*sizeof(Op)); - if( pNew ){ - p->nOpAlloc = nNew; - p->aOp = pNew; - if( nNew>oldSize ){ - memset(&p->aOp[oldSize], 0, (nNew-oldSize)*sizeof(Op)); - } - } +** Resize the Vdbe.aOp array so that it is at least one op larger than +** it was. +** +** If an out-of-memory error occurs while resizing the array, return +** SQLITE_NOMEM. In this case Vdbe.aOp and Vdbe.nOpAlloc remain +** unchanged (this is so that any opcodes already allocated can be +** correctly deallocated along with the rest of the Vdbe). +*/ +static int growOpArray(Vdbe *p){ + VdbeOp *pNew; + int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op))); + pNew = sqlite3DbRealloc(p->db, p->aOp, nNew*sizeof(Op)); + if( pNew ){ + p->nOpAlloc = sqlite3DbMallocSize(p->db, pNew)/sizeof(Op); + p->aOp = pNew; } + return (pNew ? SQLITE_OK : SQLITE_NOMEM); } /* @@ -136,44 +130,69 @@ ** ** op The opcode for this instruction ** -** p1, p2 First two of the three possible operands. +** p1, p2, p3 Operands ** ** Use the sqlite3VdbeResolveLabel() function to fix an address and -** the sqlite3VdbeChangeP3() function to change the value of the P3 +** the sqlite3VdbeChangeP4() function to change the value of the P4 ** operand. */ -int sqlite3VdbeAddOp(Vdbe *p, int op, int p1, int p2){ +int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ int i; VdbeOp *pOp; i = p->nOp; assert( p->magic==VDBE_MAGIC_INIT ); + assert( op>0 && op<0xff ); if( p->nOpAlloc<=i ){ - resizeOpArray(p, i+1); - if( sqlite3MallocFailed() ){ - return 0; + if( growOpArray(p) ){ + return 1; } } p->nOp++; pOp = &p->aOp[i]; - pOp->opcode = op; + pOp->opcode = (u8)op; + pOp->p5 = 0; pOp->p1 = p1; pOp->p2 = p2; - pOp->p3 = 0; - pOp->p3type = P3_NOTUSED; + pOp->p3 = p3; + pOp->p4.p = 0; + pOp->p4type = P4_NOTUSED; p->expired = 0; #ifdef SQLITE_DEBUG - if( sqlite3_vdbe_addop_trace ) sqlite3VdbePrintOp(0, i, &p->aOp[i]); + pOp->zComment = 0; + if( sqlite3VdbeAddopTrace ) sqlite3VdbePrintOp(0, i, &p->aOp[i]); +#endif +#ifdef VDBE_PROFILE + pOp->cycles = 0; + pOp->cnt = 0; #endif return i; } +int sqlite3VdbeAddOp0(Vdbe *p, int op){ + return sqlite3VdbeAddOp3(p, op, 0, 0, 0); +} +int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ + return sqlite3VdbeAddOp3(p, op, p1, 0, 0); +} +int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ + return sqlite3VdbeAddOp3(p, op, p1, p2, 0); +} + /* -** Add an opcode that includes the p3 value. +** Add an opcode that includes the p4 value as a pointer. */ -int sqlite3VdbeOp3(Vdbe *p, int op, int p1, int p2, const char *zP3,int p3type){ - int addr = sqlite3VdbeAddOp(p, op, p1, p2); - sqlite3VdbeChangeP3(p, addr, zP3, p3type); +int sqlite3VdbeAddOp4( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + const char *zP4, /* The P4 operand */ + int p4type /* P4 operand type */ +){ + int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); + sqlite3VdbeChangeP4(p, addr, zP4, p4type); return addr; } @@ -196,9 +215,10 @@ i = p->nLabel++; assert( p->magic==VDBE_MAGIC_INIT ); if( i>=p->nLabelAlloc ){ - p->nLabelAlloc = p->nLabelAlloc*2 + 10; - p->aLabel = sqliteReallocOrFree(p->aLabel, - p->nLabelAlloc*sizeof(p->aLabel[0])); + int n = p->nLabelAlloc*2 + 5; + p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel, + n*sizeof(p->aLabel[0])); + p->nLabelAlloc = sqlite3DbMallocSize(p->db, p->aLabel)/sizeof(p->aLabel[0]); } if( p->aLabel ){ p->aLabel[i] = -1; @@ -221,51 +241,9 @@ } /* -** Return non-zero if opcode 'op' is guarenteed not to push more values -** onto the VDBE stack than it pops off. -*/ -static int opcodeNoPush(u8 op){ - /* The 10 NOPUSH_MASK_n constants are defined in the automatically - ** generated header file opcodes.h. Each is a 16-bit bitmask, one - ** bit corresponding to each opcode implemented by the virtual - ** machine in vdbe.c. The bit is true if the word "no-push" appears - ** in a comment on the same line as the "case OP_XXX:" in - ** sqlite3VdbeExec() in vdbe.c. - ** - ** If the bit is true, then the corresponding opcode is guarenteed not - ** to grow the stack when it is executed. Otherwise, it may grow the - ** stack by at most one entry. - ** - ** NOPUSH_MASK_0 corresponds to opcodes 0 to 15. NOPUSH_MASK_1 contains - ** one bit for opcodes 16 to 31, and so on. - ** - ** 16-bit bitmasks (rather than 32-bit) are specified in opcodes.h - ** because the file is generated by an awk program. Awk manipulates - ** all numbers as floating-point and we don't want to risk a rounding - ** error if someone builds with an awk that uses (for example) 32-bit - ** IEEE floats. - */ - static const u32 masks[5] = { - NOPUSH_MASK_0 + (((unsigned)NOPUSH_MASK_1)<<16), - NOPUSH_MASK_2 + (((unsigned)NOPUSH_MASK_3)<<16), - NOPUSH_MASK_4 + (((unsigned)NOPUSH_MASK_5)<<16), - NOPUSH_MASK_6 + (((unsigned)NOPUSH_MASK_7)<<16), - NOPUSH_MASK_8 + (((unsigned)NOPUSH_MASK_9)<<16) - }; - assert( op<32*5 ); - return (masks[op>>5] & (1<<(op&0x1F))); -} - -#ifndef NDEBUG -int sqlite3VdbeOpcodeNoPush(u8 op){ - return opcodeNoPush(op); -} -#endif - -/* -** Loop through the program looking for P2 values that are negative. -** Each such value is a label. Resolve the label by setting the P2 -** value to its correct non-zero value. +** Loop through the program looking for P2 values that are negative +** on jump instructions. Each such value is a label. Resolve the +** label by setting the P2 value to its correct non-zero value. ** ** This routine is called once after all opcodes have been inserted. ** @@ -273,32 +251,37 @@ ** to an OP_Function, OP_AggStep or OP_VFilter opcode. This is used by ** sqlite3VdbeMakeReady() to size the Vdbe.apArg[] array. ** -** The integer *pMaxStack is set to the maximum number of vdbe stack -** entries that static analysis reveals this program might need. -** ** This routine also does the following optimization: It scans for -** Halt instructions where P1==SQLITE_CONSTRAINT or P2==OE_Abort or for -** IdxInsert instructions where P2!=0. If no such instruction is -** found, then every Statement instruction is changed to a Noop. In -** this way, we avoid creating the statement journal file unnecessarily. +** instructions that might cause a statement rollback. Such instructions +** are: +** +** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort. +** * OP_Destroy +** * OP_VUpdate +** * OP_VRename +** +** If no such instruction is found, then every Statement instruction +** is changed to a Noop. In this way, we avoid creating the statement +** journal file unnecessarily. */ -static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs, int *pMaxStack){ +static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ int i; int nMaxArgs = 0; - int nMaxStack = p->nOp; Op *pOp; int *aLabel = p->aLabel; int doesStatementRollback = 0; int hasStatementBegin = 0; + p->readOnly = 1; + p->usesStmtJournal = 0; for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ u8 opcode = pOp->opcode; - if( opcode==OP_Function || opcode==OP_AggStep + if( opcode==OP_Function || opcode==OP_AggStep ){ + if( pOp->p5>nMaxArgs ) nMaxArgs = pOp->p5; #ifndef SQLITE_OMIT_VIRTUALTABLE - || opcode==OP_VUpdate -#endif - ){ + }else if( opcode==OP_VUpdate ){ if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; +#endif } if( opcode==OP_Halt ){ if( pOp->p1==SQLITE_CONSTRAINT && pOp->p2==OE_Abort ){ @@ -306,30 +289,32 @@ } }else if( opcode==OP_Statement ){ hasStatementBegin = 1; + p->usesStmtJournal = 1; + }else if( opcode==OP_Destroy ){ + doesStatementRollback = 1; + }else if( opcode==OP_Transaction && pOp->p2!=0 ){ + p->readOnly = 0; #ifndef SQLITE_OMIT_VIRTUALTABLE }else if( opcode==OP_VUpdate || opcode==OP_VRename ){ doesStatementRollback = 1; }else if( opcode==OP_VFilter ){ int n; assert( p->nOp - i >= 3 ); - assert( pOp[-2].opcode==OP_Integer ); - n = pOp[-2].p1; + assert( pOp[-1].opcode==OP_Integer ); + n = pOp[-1].p1; if( n>nMaxArgs ) nMaxArgs = n; #endif } - if( opcodeNoPush(opcode) ){ - nMaxStack--; - } - if( pOp->p2>=0 ) continue; - assert( -1-pOp->p2nLabel ); - pOp->p2 = aLabel[-1-pOp->p2]; + if( sqlite3VdbeOpcodeHasProperty(opcode, OPFLG_JUMP) && pOp->p2<0 ){ + assert( -1-pOp->p2nLabel ); + pOp->p2 = aLabel[-1-pOp->p2]; + } } - sqliteFree(p->aLabel); + sqlite3DbFree(p->db, p->aLabel); p->aLabel = 0; *pMaxFuncArgs = nMaxArgs; - *pMaxStack = nMaxStack; /* If we never rollback a statement transaction, then statement ** transactions are not needed. So change every OP_Statement @@ -337,6 +322,7 @@ ** which can be expensive on some platforms. */ if( hasStatementBegin && !doesStatementRollback ){ + p->usesStmtJournal = 0; for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ if( pOp->opcode==OP_Statement ){ pOp->opcode = OP_Noop; @@ -360,12 +346,11 @@ int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp){ int addr; assert( p->magic==VDBE_MAGIC_INIT ); - resizeOpArray(p, p->nOp + nOp); - if( sqlite3MallocFailed() ){ + if( p->nOp + nOp > p->nOpAlloc && growOpArray(p) ){ return 0; } addr = p->nOp; - if( nOp>0 ){ + if( ALWAYS(nOp>0) ){ int i; VdbeOpList const *pIn = aOp; for(i=0; iaOp[i+addr]; pOut->opcode = pIn->opcode; pOut->p1 = pIn->p1; - pOut->p2 = p2<0 ? addr + ADDR(p2) : p2; + if( p2<0 && sqlite3VdbeOpcodeHasProperty(pOut->opcode, OPFLG_JUMP) ){ + pOut->p2 = addr + ADDR(p2); + }else{ + pOut->p2 = p2; + } pOut->p3 = pIn->p3; - pOut->p3type = pIn->p3 ? P3_STATIC : P3_NOTUSED; + pOut->p4type = P4_NOTUSED; + pOut->p4.p = 0; + pOut->p5 = 0; #ifdef SQLITE_DEBUG - if( sqlite3_vdbe_addop_trace ){ + pOut->zComment = 0; + if( sqlite3VdbeAddopTrace ){ sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]); } #endif @@ -394,8 +386,9 @@ ** few minor changes to the program. */ void sqlite3VdbeChangeP1(Vdbe *p, int addr, int val){ - assert( p==0 || p->magic==VDBE_MAGIC_INIT ); - if( p && addr>=0 && p->nOp>addr && p->aOp ){ + assert( p!=0 ); + assert( addr>=0 ); + if( p->nOp>addr ){ p->aOp[addr].p1 = val; } } @@ -405,14 +398,37 @@ ** This routine is useful for setting a jump destination. */ void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){ - assert( val>=0 ); - assert( p==0 || p->magic==VDBE_MAGIC_INIT ); - if( p && addr>=0 && p->nOp>addr && p->aOp ){ + assert( p!=0 ); + assert( addr>=0 ); + if( p->nOp>addr ){ p->aOp[addr].p2 = val; } } /* +** Change the value of the P3 operand for a specific instruction. +*/ +void sqlite3VdbeChangeP3(Vdbe *p, int addr, int val){ + assert( p!=0 ); + assert( addr>=0 ); + if( p->nOp>addr ){ + p->aOp[addr].p3 = val; + } +} + +/* +** Change the value of the P5 operand for the most recently +** added operation. +*/ +void sqlite3VdbeChangeP5(Vdbe *p, u8 val){ + assert( p!=0 ); + if( p->aOp ){ + assert( p->nOp>0 ); + p->aOp[p->nOp-1].p5 = val; + } +} + +/* ** Change the P2 operand of instruction addr so that it points to ** the address of the next instruction to be coded. */ @@ -425,41 +441,41 @@ ** If the input FuncDef structure is ephemeral, then free it. If ** the FuncDef is not ephermal, then do nothing. */ -static void freeEphemeralFunction(FuncDef *pDef){ - if( pDef && (pDef->flags & SQLITE_FUNC_EPHEM)!=0 ){ - sqliteFree(pDef); +static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ + if( ALWAYS(pDef) && (pDef->flags & SQLITE_FUNC_EPHEM)!=0 ){ + sqlite3DbFree(db, pDef); } } /* -** Delete a P3 value if necessary. -*/ -static void freeP3(int p3type, void *p3){ - if( p3 ){ - switch( p3type ){ - case P3_DYNAMIC: - case P3_KEYINFO: - case P3_KEYINFO_HANDOFF: { - sqliteFree(p3); - break; - } - case P3_MPRINTF: { - sqlite3_free(p3); +** Delete a P4 value if necessary. +*/ +static void freeP4(sqlite3 *db, int p4type, void *p4){ + if( p4 ){ + switch( p4type ){ + case P4_REAL: + case P4_INT64: + case P4_MPRINTF: + case P4_DYNAMIC: + case P4_KEYINFO: + case P4_INTARRAY: + case P4_KEYINFO_HANDOFF: { + sqlite3DbFree(db, p4); break; } - case P3_VDBEFUNC: { - VdbeFunc *pVdbeFunc = (VdbeFunc *)p3; - freeEphemeralFunction(pVdbeFunc->pFunc); + case P4_VDBEFUNC: { + VdbeFunc *pVdbeFunc = (VdbeFunc *)p4; + freeEphemeralFunction(db, pVdbeFunc->pFunc); sqlite3VdbeDeleteAuxData(pVdbeFunc, 0); - sqliteFree(pVdbeFunc); + sqlite3DbFree(db, pVdbeFunc); break; } - case P3_FUNCDEF: { - freeEphemeralFunction((FuncDef*)p3); + case P4_FUNCDEF: { + freeEphemeralFunction(db, (FuncDef*)p4); break; } - case P3_MEM: { - sqlite3ValueFree((sqlite3_value*)p3); + case P4_MEM: { + sqlite3ValueFree((sqlite3_value*)p4); break; } } @@ -471,10 +487,11 @@ ** Change N opcodes starting at addr to No-ops. */ void sqlite3VdbeChangeToNoop(Vdbe *p, int addr, int N){ - if( p && p->aOp ){ + if( p->aOp ){ VdbeOp *pOp = &p->aOp[addr]; + sqlite3 *db = p->db; while( N-- ){ - freeP3(pOp->p3type, pOp->p3); + freeP4(db, pOp->p4type, pOp->p4.p); memset(pOp, 0, sizeof(pOp[0])); pOp->opcode = OP_Noop; pOp++; @@ -483,125 +500,181 @@ } /* -** Change the value of the P3 operand for a specific instruction. +** Change the value of the P4 operand for a specific instruction. ** This routine is useful when a large program is loaded from a ** static array using sqlite3VdbeAddOpList but we want to make a ** few minor changes to the program. ** -** If n>=0 then the P3 operand is dynamic, meaning that a copy of -** the string is made into memory obtained from sqliteMalloc(). -** A value of n==0 means copy bytes of zP3 up to and including the -** first null byte. If n>0 then copy n+1 bytes of zP3. +** If n>=0 then the P4 operand is dynamic, meaning that a copy of +** the string is made into memory obtained from sqlite3_malloc(). +** A value of n==0 means copy bytes of zP4 up to and including the +** first null byte. If n>0 then copy n+1 bytes of zP4. ** -** If n==P3_KEYINFO it means that zP3 is a pointer to a KeyInfo structure. +** If n==P4_KEYINFO it means that zP4 is a pointer to a KeyInfo structure. ** A copy is made of the KeyInfo structure into memory obtained from -** sqliteMalloc, to be freed when the Vdbe is finalized. -** n==P3_KEYINFO_HANDOFF indicates that zP3 points to a KeyInfo structure -** stored in memory that the caller has obtained from sqliteMalloc. The +** sqlite3_malloc, to be freed when the Vdbe is finalized. +** n==P4_KEYINFO_HANDOFF indicates that zP4 points to a KeyInfo structure +** stored in memory that the caller has obtained from sqlite3_malloc. The ** caller should not free the allocation, it will be freed when the Vdbe is ** finalized. ** -** Other values of n (P3_STATIC, P3_COLLSEQ etc.) indicate that zP3 points +** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points ** to a string or structure that is guaranteed to exist for the lifetime of ** the Vdbe. In these cases we can just copy the pointer. ** -** If addr<0 then change P3 on the most recently inserted instruction. +** If addr<0 then change P4 on the most recently inserted instruction. */ -void sqlite3VdbeChangeP3(Vdbe *p, int addr, const char *zP3, int n){ +void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){ Op *pOp; - assert( p==0 || p->magic==VDBE_MAGIC_INIT ); - if( p==0 || p->aOp==0 || sqlite3MallocFailed() ){ - if (n != P3_KEYINFO) { - freeP3(n, (void*)*(char**)&zP3); + sqlite3 *db; + assert( p!=0 ); + db = p->db; + assert( p->magic==VDBE_MAGIC_INIT ); + if( p->aOp==0 || db->mallocFailed ){ + if (n != P4_KEYINFO) { + freeP4(db, n, (void*)*(char**)&zP4); } return; } - if( addr<0 || addr>=p->nOp ){ + assert( p->nOp>0 ); + assert( addrnOp ); + if( addr<0 ){ addr = p->nOp - 1; - if( addr<0 ) return; } pOp = &p->aOp[addr]; - freeP3(pOp->p3type, pOp->p3); - pOp->p3 = 0; - if( zP3==0 ){ - pOp->p3 = 0; - pOp->p3type = P3_NOTUSED; - }else if( n==P3_KEYINFO ){ + freeP4(db, pOp->p4type, pOp->p4.p); + pOp->p4.p = 0; + if( n==P4_INT32 ){ + /* Note: this cast is safe, because the origin data point was an int + ** that was cast to a (const char *). */ + pOp->p4.i = SQLITE_PTR_TO_INT(zP4); + pOp->p4type = P4_INT32; + }else if( zP4==0 ){ + pOp->p4.p = 0; + pOp->p4type = P4_NOTUSED; + }else if( n==P4_KEYINFO ){ KeyInfo *pKeyInfo; int nField, nByte; - nField = ((KeyInfo*)zP3)->nField; + nField = ((KeyInfo*)zP4)->nField; nByte = sizeof(*pKeyInfo) + (nField-1)*sizeof(pKeyInfo->aColl[0]) + nField; - pKeyInfo = sqliteMallocRaw( nByte ); - pOp->p3 = (char*)pKeyInfo; + pKeyInfo = sqlite3Malloc( nByte ); + pOp->p4.pKeyInfo = pKeyInfo; if( pKeyInfo ){ - unsigned char *aSortOrder; - memcpy(pKeyInfo, zP3, nByte); + u8 *aSortOrder; + memcpy(pKeyInfo, zP4, nByte); aSortOrder = pKeyInfo->aSortOrder; if( aSortOrder ){ pKeyInfo->aSortOrder = (unsigned char*)&pKeyInfo->aColl[nField]; memcpy(pKeyInfo->aSortOrder, aSortOrder, nField); } - pOp->p3type = P3_KEYINFO; + pOp->p4type = P4_KEYINFO; }else{ - pOp->p3type = P3_NOTUSED; + p->db->mallocFailed = 1; + pOp->p4type = P4_NOTUSED; } - }else if( n==P3_KEYINFO_HANDOFF ){ - pOp->p3 = (char*)zP3; - pOp->p3type = P3_KEYINFO; + }else if( n==P4_KEYINFO_HANDOFF ){ + pOp->p4.p = (void*)zP4; + pOp->p4type = P4_KEYINFO; }else if( n<0 ){ - pOp->p3 = (char*)zP3; - pOp->p3type = n; + pOp->p4.p = (void*)zP4; + pOp->p4type = (signed char)n; }else{ - if( n==0 ) n = strlen(zP3); - pOp->p3 = sqliteStrNDup(zP3, n); - pOp->p3type = P3_DYNAMIC; + if( n==0 ) n = sqlite3Strlen30(zP4); + pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n); + pOp->p4type = P4_DYNAMIC; } } #ifndef NDEBUG /* -** Replace the P3 field of the most recently coded instruction with -** comment text. +** Change the comment on the the most recently coded instruction. Or +** insert a No-op and add the comment to that new instruction. This +** makes the code easier to read during debugging. None of this happens +** in a production build. */ void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){ va_list ap; assert( p->nOp>0 || p->aOp==0 ); - assert( p->aOp==0 || p->aOp[p->nOp-1].p3==0 || sqlite3MallocFailed() ); - va_start(ap, zFormat); - sqlite3VdbeChangeP3(p, -1, sqlite3VMPrintf(zFormat, ap), P3_DYNAMIC); - va_end(ap); + assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); + if( p->nOp ){ + char **pz = &p->aOp[p->nOp-1].zComment; + va_start(ap, zFormat); + sqlite3DbFree(p->db, *pz); + *pz = sqlite3VMPrintf(p->db, zFormat, ap); + va_end(ap); + } } -#endif - -/* -** Return the opcode for a given address. +void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ + va_list ap; + sqlite3VdbeAddOp0(p, OP_Noop); + assert( p->nOp>0 || p->aOp==0 ); + assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); + if( p->nOp ){ + char **pz = &p->aOp[p->nOp-1].zComment; + va_start(ap, zFormat); + sqlite3DbFree(p->db, *pz); + *pz = sqlite3VMPrintf(p->db, zFormat, ap); + va_end(ap); + } +} +#endif /* NDEBUG */ + +/* +** Return the opcode for a given address. If the address is -1, then +** return the most recently inserted opcode. +** +** If a memory allocation error has occurred prior to the calling of this +** routine, then a pointer to a dummy VdbeOp will be returned. That opcode +** is readable and writable, but it has no effect. The return of a dummy +** opcode allows the call to continue functioning after a OOM fault without +** having to check to see if the return from this routine is a valid pointer. +** +** About the #ifdef SQLITE_OMIT_TRACE: Normally, this routine is never called +** unless p->nOp>0. This is because in the absense of SQLITE_OMIT_TRACE, +** an OP_Trace instruction is always inserted by sqlite3VdbeGet() as soon as +** a new VDBE is created. So we are free to set addr to p->nOp-1 without +** having to double-check to make sure that the result is non-negative. But +** if SQLITE_OMIT_TRACE is defined, the OP_Trace is omitted and we do need to +** check the value of p->nOp-1 before continuing. */ VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ + static VdbeOp dummy; assert( p->magic==VDBE_MAGIC_INIT ); - assert( (addr>=0 && addrnOp) || sqlite3MallocFailed() ); - return ((addr>=0 && addrnOp)?(&p->aOp[addr]):0); + if( addr<0 ){ +#ifdef SQLITE_OMIT_TRACE + if( p->nOp==0 ) return &dummy; +#endif + addr = p->nOp - 1; + } + assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); + if( p->db->mallocFailed ){ + return &dummy; + }else{ + return &p->aOp[addr]; + } } #if !defined(SQLITE_OMIT_EXPLAIN) || !defined(NDEBUG) \ || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) /* -** Compute a string that describes the P3 parameter for an opcode. +** Compute a string that describes the P4 parameter for an opcode. ** Use zTemp for any required temporary buffer space. */ -static char *displayP3(Op *pOp, char *zTemp, int nTemp){ - char *zP3; +static char *displayP4(Op *pOp, char *zTemp, int nTemp){ + char *zP4 = zTemp; assert( nTemp>=20 ); - switch( pOp->p3type ){ - case P3_KEYINFO: { + switch( pOp->p4type ){ + case P4_KEYINFO_STATIC: + case P4_KEYINFO: { int i, j; - KeyInfo *pKeyInfo = (KeyInfo*)pOp->p3; + KeyInfo *pKeyInfo = pOp->p4.pKeyInfo; sqlite3_snprintf(nTemp, zTemp, "keyinfo(%d", pKeyInfo->nField); - i = strlen(zTemp); + i = sqlite3Strlen30(zTemp); for(j=0; jnField; j++){ CollSeq *pColl = pKeyInfo->aColl[j]; if( pColl ){ - int n = strlen(pColl->zName); + int n = sqlite3Strlen30(pColl->zName); if( i+n>nTemp-6 ){ memcpy(&zTemp[i],",...",4); break; @@ -620,54 +693,100 @@ zTemp[i++] = ')'; zTemp[i] = 0; assert( ip3; + case P4_COLLSEQ: { + CollSeq *pColl = pOp->p4.pColl; sqlite3_snprintf(nTemp, zTemp, "collseq(%.20s)", pColl->zName); - zP3 = zTemp; break; } - case P3_FUNCDEF: { - FuncDef *pDef = (FuncDef*)pOp->p3; + case P4_FUNCDEF: { + FuncDef *pDef = pOp->p4.pFunc; sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg); - zP3 = zTemp; + break; + } + case P4_INT64: { + sqlite3_snprintf(nTemp, zTemp, "%lld", *pOp->p4.pI64); + break; + } + case P4_INT32: { + sqlite3_snprintf(nTemp, zTemp, "%d", pOp->p4.i); + break; + } + case P4_REAL: { + sqlite3_snprintf(nTemp, zTemp, "%.16g", *pOp->p4.pReal); + break; + } + case P4_MEM: { + Mem *pMem = pOp->p4.pMem; + assert( (pMem->flags & MEM_Null)==0 ); + if( pMem->flags & MEM_Str ){ + zP4 = pMem->z; + }else if( pMem->flags & MEM_Int ){ + sqlite3_snprintf(nTemp, zTemp, "%lld", pMem->u.i); + }else if( pMem->flags & MEM_Real ){ + sqlite3_snprintf(nTemp, zTemp, "%.16g", pMem->r); + } break; } #ifndef SQLITE_OMIT_VIRTUALTABLE - case P3_VTAB: { - sqlite3_vtab *pVtab = (sqlite3_vtab*)pOp->p3; + case P4_VTAB: { + sqlite3_vtab *pVtab = pOp->p4.pVtab; sqlite3_snprintf(nTemp, zTemp, "vtab:%p:%p", pVtab, pVtab->pModule); - zP3 = zTemp; break; } #endif + case P4_INTARRAY: { + sqlite3_snprintf(nTemp, zTemp, "intarray"); + break; + } default: { - zP3 = pOp->p3; - if( zP3==0 || pOp->opcode==OP_Noop ){ - zP3 = ""; + zP4 = pOp->p4.z; + if( zP4==0 ){ + zP4 = zTemp; + zTemp[0] = 0; } } } - assert( zP3!=0 ); - return zP3; + assert( zP4!=0 ); + return zP4; } #endif +/* +** Declare to the Vdbe that the BTree object at db->aDb[i] is used. +** +*/ +void sqlite3VdbeUsesBtree(Vdbe *p, int i){ + int mask; + assert( i>=0 && idb->nDb && ibtreeMask)*8 ); + mask = ((u32)1)<btreeMask & mask)==0 ){ + p->btreeMask |= mask; + sqlite3BtreeMutexArrayInsert(&p->aMutex, p->db->aDb[i].pBt); + } +} + #if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) /* ** Print a single opcode. This routine is used for debugging only. */ void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){ - char *zP3; + char *zP4; char zPtr[50]; - static const char *zFormat1 = "%4d %-13s %4d %4d %s\n"; + static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-4s %.2X %s\n"; if( pOut==0 ) pOut = stdout; - zP3 = displayP3(pOp, zPtr, sizeof(zPtr)); - fprintf(pOut, zFormat1, - pc, sqlite3OpcodeNames[pOp->opcode], pOp->p1, pOp->p2, zP3); + zP4 = displayP4(pOp, zPtr, sizeof(zPtr)); + fprintf(pOut, zFormat1, pc, + sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5, +#ifdef SQLITE_DEBUG + pOp->zComment ? pOp->zComment : "" +#else + "" +#endif + ); fflush(pOut); } #endif @@ -676,12 +795,57 @@ ** Release an array of N Mem elements */ static void releaseMemArray(Mem *p, int N){ - if( p ){ - while( N-->0 ){ - sqlite3VdbeMemRelease(p++); + if( p && N ){ + Mem *pEnd; + sqlite3 *db = p->db; + u8 malloc_failed = db->mallocFailed; + for(pEnd=&p[N]; pflags&(MEM_Agg|MEM_Dyn) ){ + sqlite3VdbeMemRelease(p); + }else if( p->zMalloc ){ + sqlite3DbFree(db, p->zMalloc); + p->zMalloc = 0; + } + + p->flags = MEM_Null; + } + db->mallocFailed = malloc_failed; + } +} + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +int sqlite3VdbeReleaseBuffers(Vdbe *p){ + int ii; + int nFree = 0; + assert( sqlite3_mutex_held(p->db->mutex) ); + for(ii=1; ii<=p->nMem; ii++){ + Mem *pMem = &p->aMem[ii]; + if( pMem->flags & MEM_RowSet ){ + sqlite3RowSetClear(pMem->u.pRowSet); + } + if( pMem->z && pMem->flags&MEM_Dyn ){ + assert( !pMem->xDel ); + nFree += sqlite3DbMallocSize(pMem->db, pMem->z); + sqlite3VdbeMemRelease(pMem); } } + return nFree; } +#endif #ifndef SQLITE_OMIT_EXPLAIN /* @@ -690,6 +854,11 @@ ** The interface is the same as sqlite3VdbeExec(). But instead of ** running the code, it invokes the callback once for each instruction. ** This feature is used to implement "EXPLAIN". +** +** When p->explain==1, each instruction is listed. When +** p->explain==2, only OP_Explain instructions are listed and these +** are shown in a different format. p->explain==2 is used to implement +** EXPLAIN QUERY PLAN. */ int sqlite3VdbeList( Vdbe *p /* The VDBE */ @@ -697,20 +866,25 @@ sqlite3 *db = p->db; int i; int rc = SQLITE_OK; + Mem *pMem = p->pResultSet = &p->aMem[1]; assert( p->explain ); if( p->magic!=VDBE_MAGIC_RUN ) return SQLITE_MISUSE; assert( db->magic==SQLITE_MAGIC_BUSY ); - assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY ); + assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); - /* Even though this opcode does not put dynamic strings onto the - ** the stack, they may become dynamic if the user calls + /* Even though this opcode does not use dynamic strings for + ** the result, result columns may become dynamic if the user calls ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. */ - if( p->pTos==&p->aStack[4] ){ - releaseMemArray(p->aStack, 5); + releaseMemArray(pMem, p->nMem); + + if( p->rc==SQLITE_NOMEM ){ + /* This happens if a malloc() inside a call to sqlite3_column_text() or + ** sqlite3_column_text16() failed. */ + db->mallocFailed = 1; + return SQLITE_ERROR; } - p->resOnStack = 0; do{ i = p->pc++; @@ -721,22 +895,24 @@ }else if( db->u1.isInterrupted ){ p->rc = SQLITE_INTERRUPT; rc = SQLITE_ERROR; - sqlite3SetString(&p->zErrMsg, sqlite3ErrStr(p->rc), (char*)0); + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(p->rc)); }else{ + char *z; Op *pOp = &p->aOp[i]; - Mem *pMem = p->aStack; - pMem->flags = MEM_Int; - pMem->type = SQLITE_INTEGER; - pMem->u.i = i; /* Program counter */ - pMem++; - - pMem->flags = MEM_Static|MEM_Str|MEM_Term; - pMem->z = (char*)sqlite3OpcodeNames[pOp->opcode]; /* Opcode */ - assert( pMem->z!=0 ); - pMem->n = strlen(pMem->z); - pMem->type = SQLITE_TEXT; - pMem->enc = SQLITE_UTF8; - pMem++; + if( p->explain==1 ){ + pMem->flags = MEM_Int; + pMem->type = SQLITE_INTEGER; + pMem->u.i = i; /* Program counter */ + pMem++; + + pMem->flags = MEM_Static|MEM_Str|MEM_Term; + pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */ + assert( pMem->z!=0 ); + pMem->n = sqlite3Strlen30(pMem->z); + pMem->type = SQLITE_TEXT; + pMem->enc = SQLITE_UTF8; + pMem++; + } pMem->flags = MEM_Int; pMem->u.i = pOp->p1; /* P1 */ @@ -748,17 +924,58 @@ pMem->type = SQLITE_INTEGER; pMem++; - pMem->flags = MEM_Ephem|MEM_Str|MEM_Term; /* P3 */ - pMem->z = displayP3(pOp, pMem->zShort, sizeof(pMem->zShort)); - assert( pMem->z!=0 ); - pMem->n = strlen(pMem->z); + if( p->explain==1 ){ + pMem->flags = MEM_Int; + pMem->u.i = pOp->p3; /* P3 */ + pMem->type = SQLITE_INTEGER; + pMem++; + } + + if( sqlite3VdbeMemGrow(pMem, 32, 0) ){ /* P4 */ + assert( p->db->mallocFailed ); + return SQLITE_ERROR; + } + pMem->flags = MEM_Dyn|MEM_Str|MEM_Term; + z = displayP4(pOp, pMem->z, 32); + if( z!=pMem->z ){ + sqlite3VdbeMemSetStr(pMem, z, -1, SQLITE_UTF8, 0); + }else{ + assert( pMem->z!=0 ); + pMem->n = sqlite3Strlen30(pMem->z); + pMem->enc = SQLITE_UTF8; + } pMem->type = SQLITE_TEXT; - pMem->enc = SQLITE_UTF8; + pMem++; + + if( p->explain==1 ){ + if( sqlite3VdbeMemGrow(pMem, 4, 0) ){ + assert( p->db->mallocFailed ); + return SQLITE_ERROR; + } + pMem->flags = MEM_Dyn|MEM_Str|MEM_Term; + pMem->n = 2; + sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */ + pMem->type = SQLITE_TEXT; + pMem->enc = SQLITE_UTF8; + pMem++; + +#ifdef SQLITE_DEBUG + if( pOp->zComment ){ + pMem->flags = MEM_Str|MEM_Term; + pMem->z = pOp->zComment; + pMem->n = sqlite3Strlen30(pMem->z); + pMem->enc = SQLITE_UTF8; + pMem->type = SQLITE_TEXT; + }else +#endif + { + pMem->flags = MEM_Null; /* Comment */ + pMem->type = SQLITE_NULL; + } + } - p->nResColumn = 5 - 2*(p->explain-1); - p->pTos = pMem; + p->nResColumn = 8 - 5*(p->explain-1); p->rc = SQLITE_OK; - p->resOnStack = 1; rc = SQLITE_ROW; } return rc; @@ -773,10 +990,10 @@ int nOp = p->nOp; VdbeOp *pOp; if( nOp<1 ) return; - pOp = &p->aOp[nOp-1]; - if( pOp->opcode==OP_Noop && pOp->p3!=0 ){ - const char *z = pOp->p3; - while( isspace(*(u8*)z) ) z++; + pOp = &p->aOp[0]; + if( pOp->opcode==OP_Trace && pOp->p4.z!=0 ){ + const char *z = pOp->p4.z; + while( sqlite3Isspace(*z) ) z++; printf("SQL: [%s]\n", z); } } @@ -789,16 +1006,16 @@ void sqlite3VdbeIOTraceSql(Vdbe *p){ int nOp = p->nOp; VdbeOp *pOp; - if( sqlite3_io_trace==0 ) return; + if( sqlite3IoTrace==0 ) return; if( nOp<1 ) return; - pOp = &p->aOp[nOp-1]; - if( pOp->opcode==OP_Noop && pOp->p3!=0 ){ + pOp = &p->aOp[0]; + if( pOp->opcode==OP_Trace && pOp->p4.z!=0 ){ int i, j; char z[1000]; - sqlite3_snprintf(sizeof(z), z, "%s", pOp->p3); - for(i=0; isspace((unsigned char)z[i]); i++){} + sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z); + for(i=0; sqlite3Isspace(z[i]); i++){} for(j=0; z[i]; i++){ - if( isspace((unsigned char)z[i]) ){ + if( sqlite3Isspace(z[i]) ){ if( z[i-1]!=' ' ){ z[j++] = ' '; } @@ -807,11 +1024,45 @@ } } z[j] = 0; - sqlite3_io_trace("SQL %s\n", z); + sqlite3IoTrace("SQL %s\n", z); } } #endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */ +/* +** Allocate space from a fixed size buffer. Make *pp point to the +** allocated space. (Note: pp is a char* rather than a void** to +** work around the pointer aliasing rules of C.) *pp should initially +** be zero. If *pp is not zero, that means that the space has already +** been allocated and this routine is a noop. +** +** nByte is the number of bytes of space needed. +** +** *ppFrom point to available space and pEnd points to the end of the +** available space. +** +** *pnByte is a counter of the number of bytes of space that have failed +** to allocate. If there is insufficient space in *ppFrom to satisfy the +** request, then increment *pnByte by the amount of the request. +*/ +static void allocSpace( + char *pp, /* IN/OUT: Set *pp to point to allocated buffer */ + int nByte, /* Number of bytes to allocate */ + u8 **ppFrom, /* IN/OUT: Allocate from *ppFrom */ + u8 *pEnd, /* Pointer to 1 byte past the end of *ppFrom buffer */ + int *pnByte /* If allocation cannot be made, increment *pnByte */ +){ + assert( EIGHT_BYTE_ALIGNMENT(*ppFrom) ); + if( (*(void**)pp)==0 ){ + nByte = ROUND8(nByte); + if( (pEnd - *ppFrom)>=nByte ){ + *(void**)pp = (void *)*ppFrom; + *ppFrom += nByte; + }else{ + *pnByte += nByte; + } + } +} /* ** Prepare a virtual machine for execution. This involves things such @@ -821,6 +1072,14 @@ ** ** This is the only way to move a VDBE from VDBE_MAGIC_INIT to ** VDBE_MAGIC_RUN. +** +** This function may be called more than once on a single virtual machine. +** The first call is made while compiling the SQL statement. Subsequent +** calls are made as part of the process of resetting a statement to be +** re-executed (from a call to sqlite3_reset()). The nVar, nMem, nCursor +** and isExplain parameters are only passed correct values the first time +** the function is called. On subsequent calls, from sqlite3_reset(), nVar +** is passed -1 and nMem, nCursor and isExplain are all passed zero. */ void sqlite3VdbeMakeReady( Vdbe *p, /* The VDBE */ @@ -830,6 +1089,7 @@ int isExplain /* True if the EXPLAIN keywords is present */ ){ int n; + sqlite3 *db = p->db; assert( p!=0 ); assert( p->magic==VDBE_MAGIC_INIT ); @@ -838,72 +1098,87 @@ */ assert( p->nOp>0 ); - /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. This - * is because the call to resizeOpArray() below may shrink the - * p->aOp[] array to save memory if called when in VDBE_MAGIC_RUN - * state. - */ + /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ p->magic = VDBE_MAGIC_RUN; - /* No instruction ever pushes more than a single element onto the - ** stack. And the stack never grows on successive executions of the - ** same loop. So the total number of instructions is an upper bound - ** on the maximum stack depth required. (Added later:) The - ** resolveP2Values() call computes a tighter upper bound on the - ** stack size. + /* For each cursor required, also allocate a memory cell. Memory + ** cells (nMem+1-nCursor)..nMem, inclusive, will never be used by + ** the vdbe program. Instead they are used to allocate space for + ** VdbeCursor/BtCursor structures. The blob of memory associated with + ** cursor 0 is stored in memory cell nMem. Memory cell (nMem-1) + ** stores the blob of memory associated with cursor 1, etc. ** - ** Allocation all the stack space we will ever need. + ** See also: allocateCursor(). + */ + nMem += nCursor; + + /* Allocate space for memory registers, SQL variables, VDBE cursors and + ** an array to marshal SQL function arguments in. This is only done the + ** first time this function is called for a given VDBE, not when it is + ** being called from sqlite3_reset() to reset the virtual machine. */ - if( p->aStack==0 ){ + if( nVar>=0 && !db->mallocFailed ){ + u8 *zCsr = (u8 *)&p->aOp[p->nOp]; + u8 *zEnd = (u8 *)&p->aOp[p->nOpAlloc]; + int nByte; int nArg; /* Maximum number of args passed to a user function. */ - int nStack; /* Maximum number of stack entries required */ - resolveP2Values(p, &nArg, &nStack); - resizeOpArray(p, p->nOp); - assert( nVar>=0 ); - assert( nStacknOp ); - if( isExplain ){ - nStack = 10; - } - p->aStack = sqliteMalloc( - nStack*sizeof(p->aStack[0]) /* aStack */ - + nArg*sizeof(Mem*) /* apArg */ - + nVar*sizeof(Mem) /* aVar */ - + nVar*sizeof(char*) /* azVar */ - + nMem*sizeof(Mem) /* aMem */ - + nCursor*sizeof(Cursor*) /* apCsr */ - ); - if( !sqlite3MallocFailed() ){ - p->aMem = &p->aStack[nStack]; - p->nMem = nMem; - p->aVar = &p->aMem[nMem]; - p->nVar = nVar; - p->okVar = 0; - p->apArg = (Mem**)&p->aVar[nVar]; - p->azVar = (char**)&p->apArg[nArg]; - p->apCsr = (Cursor**)&p->azVar[nVar]; - p->nCursor = nCursor; + resolveP2Values(p, &nArg); + if( isExplain && nMem<10 ){ + nMem = 10; + } + zCsr += (zCsr - (u8*)0)&7; + assert( EIGHT_BYTE_ALIGNMENT(zCsr) ); + if( zEndaMem, nMem*sizeof(Mem), &zCsr, zEnd, &nByte); + allocSpace((char*)&p->aVar, nVar*sizeof(Mem), &zCsr, zEnd, &nByte); + allocSpace((char*)&p->apArg, nArg*sizeof(Mem*), &zCsr, zEnd, &nByte); + allocSpace((char*)&p->azVar, nVar*sizeof(char*), &zCsr, zEnd, &nByte); + allocSpace((char*)&p->apCsr, + nCursor*sizeof(VdbeCursor*), &zCsr, zEnd, &nByte + ); + if( nByte ){ + p->pFree = sqlite3DbMallocRaw(db, nByte); + } + zCsr = p->pFree; + zEnd = &zCsr[nByte]; + }while( nByte && !db->mallocFailed ); + + p->nCursor = (u16)nCursor; + if( p->aVar ){ + p->nVar = (u16)nVar; for(n=0; naVar[n].flags = MEM_Null; + p->aVar[n].db = db; + } + } + if( p->aMem ){ + p->aMem--; /* aMem[] goes from 1..nMem */ + p->nMem = nMem; /* not from 0..nMem-1 */ + for(n=1; n<=nMem; n++){ + p->aMem[n].flags = MEM_Null; + p->aMem[n].db = db; } } } - for(n=0; nnMem; n++){ - p->aMem[n].flags = MEM_Null; +#ifdef SQLITE_DEBUG + for(n=1; nnMem; n++){ + assert( p->aMem[n].db==db ); } +#endif - p->pTos = &p->aStack[-1]; p->pc = -1; p->rc = SQLITE_OK; - p->uniqueCnt = 0; - p->returnDepth = 0; p->errorAction = OE_Abort; - p->popStack = 0; p->explain |= isExplain; p->magic = VDBE_MAGIC_RUN; p->nChange = 0; p->cacheCtr = 1; p->minWriteFileFormat = 255; - p->openedStatement = 0; + p->iStatement = 0; #ifdef VDBE_PROFILE { int i; @@ -916,44 +1191,47 @@ } /* -** Close a cursor and release all the resources that cursor happens -** to hold. +** Close a VDBE cursor and release all the resources that cursor +** happens to hold. */ -void sqlite3VdbeFreeCursor(Vdbe *p, Cursor *pCx){ +void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } - if( pCx->pCursor ){ - sqlite3BtreeCloseCursor(pCx->pCursor); - } if( pCx->pBt ){ sqlite3BtreeClose(pCx->pBt); + /* The pCx->pCursor will be close automatically, if it exists, by + ** the call above. */ + }else if( pCx->pCursor ){ + sqlite3BtreeCloseCursor(pCx->pCursor); } #ifndef SQLITE_OMIT_VIRTUALTABLE if( pCx->pVtabCursor ){ sqlite3_vtab_cursor *pVtabCursor = pCx->pVtabCursor; const sqlite3_module *pModule = pCx->pModule; p->inVtabMethod = 1; - sqlite3SafetyOff(p->db); + (void)sqlite3SafetyOff(p->db); pModule->xClose(pVtabCursor); - sqlite3SafetyOn(p->db); + (void)sqlite3SafetyOn(p->db); p->inVtabMethod = 0; } #endif - sqliteFree(pCx->pData); - sqliteFree(pCx->aType); - sqliteFree(pCx); + if( !pCx->ephemPseudoTable ){ + sqlite3DbFree(p->db, pCx->pData); + } } /* -** Close all cursors +** Close all cursors except for VTab cursors that are currently +** in use. */ -static void closeAllCursors(Vdbe *p){ +static void closeAllCursorsExceptActiveVtabs(Vdbe *p){ int i; if( p->apCsr==0 ) return; for(i=0; inCursor; i++){ - if( !p->inVtabMethod || (p->apCsr[i] && !p->apCsr[i]->pVtabCursor) ){ - sqlite3VdbeFreeCursor(p, p->apCsr[i]); + VdbeCursor *pC = p->apCsr[i]; + if( pC && (!p->inVtabMethod || !pC->pVtabCursor) ){ + sqlite3VdbeFreeCursor(p, pC); p->apCsr[i] = 0; } } @@ -968,25 +1246,25 @@ */ static void Cleanup(Vdbe *p){ int i; - if( p->aStack ){ - releaseMemArray(p->aStack, 1 + (p->pTos - p->aStack)); - p->pTos = &p->aStack[-1]; - } - closeAllCursors(p); - releaseMemArray(p->aMem, p->nMem); - sqlite3VdbeFifoClear(&p->sFifo); - if( p->contextStack ){ - for(i=0; icontextStackTop; i++){ - sqlite3VdbeFifoClear(&p->contextStack[i].sFifo); + sqlite3 *db = p->db; + Mem *pMem; + closeAllCursorsExceptActiveVtabs(p); + for(pMem=&p->aMem[1], i=1; i<=p->nMem; i++, pMem++){ + if( pMem->flags & MEM_RowSet ){ + sqlite3RowSetClear(pMem->u.pRowSet); } - sqliteFree(p->contextStack); + MemSetTypeFlag(pMem, MEM_Null); + } + releaseMemArray(&p->aMem[1], p->nMem); + if( p->contextStack ){ + sqlite3DbFree(db, p->contextStack); } p->contextStack = 0; p->contextStackDepth = 0; p->contextStackTop = 0; - sqliteFree(p->zErrMsg); + sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; - p->resOnStack = 0; + p->pResultSet = 0; } /* @@ -998,14 +1276,18 @@ void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ Mem *pColName; int n; + sqlite3 *db = p->db; + releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); - sqliteFree(p->aColName); + sqlite3DbFree(db, p->aColName); n = nResColumn*COLNAME_N; - p->nResColumn = nResColumn; - p->aColName = pColName = (Mem*)sqliteMalloc( sizeof(Mem)*n ); + p->nResColumn = (u16)nResColumn; + p->aColName = pColName = (Mem*)sqlite3DbMallocZero(db, sizeof(Mem)*n ); if( p->aColName==0 ) return; while( n-- > 0 ){ - (pColName++)->flags = MEM_Null; + pColName->flags = MEM_Null; + pColName->db = p->db; + pColName++; } } @@ -1015,28 +1297,29 @@ ** ** This call must be made after a call to sqlite3VdbeSetNumCols(). ** -** If N==P3_STATIC it means that zName is a pointer to a constant static -** string and we can just copy the pointer. If it is P3_DYNAMIC, then -** the string is freed using sqliteFree() when the vdbe is finished with -** it. Otherwise, N bytes of zName are copied. -*/ -int sqlite3VdbeSetColName(Vdbe *p, int idx, int var, const char *zName, int N){ +** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC +** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed +** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed. +*/ +int sqlite3VdbeSetColName( + Vdbe *p, /* Vdbe being configured */ + int idx, /* Index of column zName applies to */ + int var, /* One of the COLNAME_* constants */ + const char *zName, /* Pointer to buffer containing name */ + void (*xDel)(void*) /* Memory management strategy for zName */ +){ int rc; Mem *pColName; assert( idxnResColumn ); assert( vardb->mallocFailed ){ + assert( !zName || xDel!=SQLITE_DYNAMIC ); + return SQLITE_NOMEM; + } assert( p->aColName!=0 ); pColName = &(p->aColName[idx+var*p->nResColumn]); - if( N==P3_DYNAMIC || N==P3_STATIC ){ - rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, SQLITE_STATIC); - }else{ - rc = sqlite3VdbeMemSetStr(pColName, zName, N, SQLITE_UTF8,SQLITE_TRANSIENT); - } - if( rc==SQLITE_OK && N==P3_DYNAMIC ){ - pColName->flags = (pColName->flags&(~MEM_Static))|MEM_Dyn; - pColName->xDel = 0; - } + rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); + assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); return rc; } @@ -1046,19 +1329,26 @@ ** write-transaction spanning more than one database file, this routine ** takes care of the master journal trickery. */ -static int vdbeCommit(sqlite3 *db){ +static int vdbeCommit(sqlite3 *db, Vdbe *p){ int i; int nTrans = 0; /* Number of databases with an active write-transaction */ int rc = SQLITE_OK; int needXcommit = 0; +#ifdef SQLITE_OMIT_VIRTUALTABLE + /* With this option, sqlite3VtabSync() is defined to be simply + ** SQLITE_OK so p is not used. + */ + UNUSED_PARAMETER(p); +#endif + /* Before doing anything else, call the xSync() callback for any ** virtual module tables written in this transaction. This has to ** be done before determining whether a master journal file is ** required, as an xSync() callback may add an attached database ** to the transaction. */ - rc = sqlite3VtabSync(db, rc); + rc = sqlite3VtabSync(db, &p->zErrMsg); if( rc!=SQLITE_OK ){ return rc; } @@ -1071,7 +1361,7 @@ */ for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt && sqlite3BtreeIsInTrans(pBt) ){ + if( sqlite3BtreeIsInTrans(pBt) ){ needXcommit = 1; if( i!=1 ) nTrans++; } @@ -1079,9 +1369,12 @@ /* If there are any write-transactions at all, invoke the commit hook */ if( needXcommit && db->xCommitCallback ){ - sqlite3SafetyOff(db); + assert( (db->flags & SQLITE_CommitBusy)==0 ); + db->flags |= SQLITE_CommitBusy; + (void)sqlite3SafetyOff(db); rc = db->xCommitCallback(db->pCommitArg); - sqlite3SafetyOn(db); + (void)sqlite3SafetyOn(db); + db->flags &= ~SQLITE_CommitBusy; if( rc ){ return SQLITE_CONSTRAINT; } @@ -1092,12 +1385,14 @@ ** master-journal. ** ** If the return value of sqlite3BtreeGetFilename() is a zero length - ** string, it means the main database is :memory:. In that case we do - ** not support atomic multi-file commits, so use the simple case then - ** too. + ** string, it means the main database is :memory: or a temp file. In + ** that case we do not support atomic multi-file commits, so use the + ** simple case then too. */ - if( 0==strlen(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){ - for(i=0; rc==SQLITE_OK && inDb; i++){ + if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt)) + || nTrans<=1 + ){ + for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ rc = sqlite3BtreeCommitPhaseOne(pBt, 0); @@ -1126,26 +1421,34 @@ */ #ifndef SQLITE_OMIT_DISKIO else{ + sqlite3_vfs *pVfs = db->pVfs; int needSync = 0; char *zMaster = 0; /* File-name for the master journal */ char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); - OsFile *master = 0; + sqlite3_file *pMaster = 0; + i64 offset = 0; + int res; /* Select a master journal file name */ do { - u32 random; - sqliteFree(zMaster); - sqlite3Randomness(sizeof(random), &random); - zMaster = sqlite3MPrintf("%s-mj%08X", zMainFile, random&0x7fffffff); + u32 iRandom; + sqlite3DbFree(db, zMaster); + sqlite3_randomness(sizeof(iRandom), &iRandom); + zMaster = sqlite3MPrintf(db, "%s-mj%08X", zMainFile, iRandom&0x7fffffff); if( !zMaster ){ return SQLITE_NOMEM; } - }while( sqlite3OsFileExists(zMaster) ); - - /* Open the master journal. */ - rc = sqlite3OsOpenExclusive(zMaster, &master, 0); + rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); + }while( rc==SQLITE_OK && res ); + if( rc==SQLITE_OK ){ + /* Open the master journal. */ + rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster, + SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| + SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0 + ); + } if( rc!=SQLITE_OK ){ - sqliteFree(zMaster); + sqlite3DbFree(db, zMaster); return rc; } @@ -1155,36 +1458,36 @@ ** still have 'null' as the master journal pointer, so they will roll ** back independently if a failure occurs. */ - for(i=0; inDb; i++){ + for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( i==1 ) continue; /* Ignore the TEMP database */ - if( pBt && sqlite3BtreeIsInTrans(pBt) ){ + if( sqlite3BtreeIsInTrans(pBt) ){ char const *zFile = sqlite3BtreeGetJournalname(pBt); if( zFile[0]==0 ) continue; /* Ignore :memory: databases */ if( !needSync && !sqlite3BtreeSyncDisabled(pBt) ){ needSync = 1; } - rc = sqlite3OsWrite(master, zFile, strlen(zFile)+1); + rc = sqlite3OsWrite(pMaster, zFile, sqlite3Strlen30(zFile)+1, offset); + offset += sqlite3Strlen30(zFile)+1; if( rc!=SQLITE_OK ){ - sqlite3OsClose(&master); - sqlite3OsDelete(zMaster); - sqliteFree(zMaster); + sqlite3OsCloseFree(pMaster); + sqlite3OsDelete(pVfs, zMaster, 0); + sqlite3DbFree(db, zMaster); return rc; } } } - - /* Sync the master journal file. Before doing this, open the directory - ** the master journal file is store in so that it gets synced too. + /* Sync the master journal file. If the IOCAP_SEQUENTIAL device + ** flag is set this is not required. */ - zMainFile = sqlite3BtreeGetDirname(db->aDb[0].pBt); - rc = sqlite3OsOpenDirectory(master, zMainFile); - if( rc!=SQLITE_OK || - (needSync && (rc=sqlite3OsSync(master,0))!=SQLITE_OK) ){ - sqlite3OsClose(&master); - sqlite3OsDelete(zMaster); - sqliteFree(zMaster); + if( needSync + && 0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL) + && SQLITE_OK!=(rc = sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL)) + ){ + sqlite3OsCloseFree(pMaster); + sqlite3OsDelete(pVfs, zMaster, 0); + sqlite3DbFree(db, zMaster); return rc; } @@ -1196,17 +1499,17 @@ ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the ** master journal file will be orphaned. But we cannot delete it, ** in case the master journal file name was written into the journal - ** file before the failure occured. + ** file before the failure occurred. */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt && sqlite3BtreeIsInTrans(pBt) ){ + if( pBt ){ rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster); } } - sqlite3OsClose(&master); + sqlite3OsCloseFree(pMaster); if( rc!=SQLITE_OK ){ - sqliteFree(zMaster); + sqlite3DbFree(db, zMaster); return rc; } @@ -1214,23 +1517,12 @@ ** doing this the directory is synced again before any individual ** transaction files are deleted. */ - rc = sqlite3OsDelete(zMaster); - sqliteFree(zMaster); + rc = sqlite3OsDelete(pVfs, zMaster, 1); + sqlite3DbFree(db, zMaster); zMaster = 0; if( rc ){ return rc; } - rc = sqlite3OsSyncDirectory(zMainFile); - if( rc!=SQLITE_OK ){ - /* This is not good. The master journal file has been deleted, but - ** the directory sync failed. There is no completely safe course of - ** action from here. The individual journals contain the name of the - ** master journal file, but there is no way of knowing if that - ** master journal exists now or if it will exist after the operating - ** system crash that may follow the fsync() failure. - */ - return rc; - } /* All files and directories have already been synced, so the following ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and @@ -1240,12 +1532,14 @@ ** may be lying around. Returning an error code won't help matters. */ disable_simulated_io_errors(); + sqlite3BeginBenignMalloc(); for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ sqlite3BtreeCommitPhaseTwo(pBt); } } + sqlite3EndBenignMalloc(); enable_simulated_io_errors(); sqlite3VtabCommit(db); @@ -1268,179 +1562,214 @@ static void checkActiveVdbeCnt(sqlite3 *db){ Vdbe *p; int cnt = 0; + int nWrite = 0; p = db->pVdbe; while( p ){ if( p->magic==VDBE_MAGIC_RUN && p->pc>=0 ){ cnt++; + if( p->readOnly==0 ) nWrite++; } p = p->pNext; } assert( cnt==db->activeVdbeCnt ); + assert( nWrite==db->writeVdbeCnt ); } #else #define checkActiveVdbeCnt(x) #endif /* -** Find every active VM other than pVdbe and change its status to -** aborted. This happens when one VM causes a rollback due to an -** ON CONFLICT ROLLBACK clause (for example). The other VMs must be -** aborted so that they do not have data rolled out from underneath -** them leading to a segfault. -*/ -void sqlite3AbortOtherActiveVdbes(sqlite3 *db, Vdbe *pExcept){ - Vdbe *pOther; - for(pOther=db->pVdbe; pOther; pOther=pOther->pNext){ - if( pOther==pExcept ) continue; - if( pOther->magic!=VDBE_MAGIC_RUN || pOther->pc<0 ) continue; - checkActiveVdbeCnt(db); - closeAllCursors(pOther); - checkActiveVdbeCnt(db); - pOther->aborted = 1; +** For every Btree that in database connection db which +** has been modified, "trip" or invalidate each cursor in +** that Btree might have been modified so that the cursor +** can never be used again. This happens when a rollback +*** occurs. We have to trip all the other cursors, even +** cursor from other VMs in different database connections, +** so that none of them try to use the data at which they +** were pointing and which now may have been changed due +** to the rollback. +** +** Remember that a rollback can delete tables complete and +** reorder rootpages. So it is not sufficient just to save +** the state of the cursor. We have to invalidate the cursor +** so that it is never used again. +*/ +static void invalidateCursorsOnModifiedBtrees(sqlite3 *db){ + int i; + for(i=0; inDb; i++){ + Btree *p = db->aDb[i].pBt; + if( p && sqlite3BtreeIsInTrans(p) ){ + sqlite3BtreeTripAllCursors(p, SQLITE_ABORT); + } } } /* +** If the Vdbe passed as the first argument opened a statement-transaction, +** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or +** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement +** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the +** statement transaction is commtted. +** +** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned. +** Otherwise SQLITE_OK. +*/ +int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ + sqlite3 *const db = p->db; + int rc = SQLITE_OK; + if( p->iStatement && db->nStatement ){ + int i; + const int iSavepoint = p->iStatement-1; + + assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); + assert( db->nStatement>0 ); + assert( p->iStatement==(db->nStatement+db->nSavepoint) ); + + for(i=0; inDb; i++){ + int rc2 = SQLITE_OK; + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + if( eOp==SAVEPOINT_ROLLBACK ){ + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); + } + if( rc2==SQLITE_OK ){ + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); + } + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + db->nStatement--; + p->iStatement = 0; + } + return rc; +} + +/* +** If SQLite is compiled to support shared-cache mode and to be threadsafe, +** this routine obtains the mutex associated with each BtShared structure +** that may be accessed by the VM passed as an argument. In doing so it +** sets the BtShared.db member of each of the BtShared structures, ensuring +** that the correct busy-handler callback is invoked if required. +** +** If SQLite is not threadsafe but does support shared-cache mode, then +** sqlite3BtreeEnterAll() is invoked to set the BtShared.db variables +** of all of BtShared structures accessible via the database handle +** associated with the VM. Of course only a subset of these structures +** will be accessed by the VM, and we could use Vdbe.btreeMask to figure +** that subset out, but there is no advantage to doing so. +** +** If SQLite is not threadsafe and does not support shared-cache mode, this +** function is a no-op. +*/ +#ifndef SQLITE_OMIT_SHARED_CACHE +void sqlite3VdbeMutexArrayEnter(Vdbe *p){ +#if SQLITE_THREADSAFE + sqlite3BtreeMutexArrayEnter(&p->aMutex); +#else + sqlite3BtreeEnterAll(p->db); +#endif +} +#endif + +/* ** This routine is called the when a VDBE tries to halt. If the VDBE ** has made changes and is in autocommit mode, then commit those ** changes. If a rollback is needed, then do the rollback. ** ** This routine is the only way to move the state of a VM from -** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. +** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to +** call this on a VM that is in the SQLITE_MAGIC_HALT state. ** ** Return an error code. If the commit could not complete because of ** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it ** means the close did not happen and needs to be repeated. */ int sqlite3VdbeHalt(Vdbe *p){ + int rc; /* Used to store transient return codes */ sqlite3 *db = p->db; - int i; - int (*xFunc)(Btree *pBt) = 0; /* Function to call on each btree backend */ - int isSpecialError; /* Set to true if SQLITE_NOMEM or IOERR */ /* This function contains the logic that determines if a statement or ** transaction will be committed or rolled back as a result of the ** execution of this virtual machine. ** - ** Special errors: - ** - ** If an SQLITE_NOMEM error has occured in a statement that writes to - ** the database, then either a statement or transaction must be rolled - ** back to ensure the tree-structures are in a consistent state. A - ** statement transaction is rolled back if one is open, otherwise the - ** entire transaction must be rolled back. + ** If any of the following errors occur: ** - ** If an SQLITE_IOERR error has occured in a statement that writes to - ** the database, then the entire transaction must be rolled back. The - ** I/O error may have caused garbage to be written to the journal - ** file. Were the transaction to continue and eventually be rolled - ** back that garbage might end up in the database file. - ** - ** In both of the above cases, the Vdbe.errorAction variable is - ** ignored. If the sqlite3.autoCommit flag is false and a transaction - ** is rolled back, it will be set to true. - ** - ** Other errors: - ** - ** No error: + ** SQLITE_NOMEM + ** SQLITE_IOERR + ** SQLITE_FULL + ** SQLITE_INTERRUPT ** + ** Then the internal cache might have been left in an inconsistent + ** state. We need to rollback the statement transaction, if there is + ** one, or the complete transaction if there is no statement transaction. */ - if( sqlite3MallocFailed() ){ + if( p->db->mallocFailed ){ p->rc = SQLITE_NOMEM; } + closeAllCursorsExceptActiveVtabs(p); if( p->magic!=VDBE_MAGIC_RUN ){ - /* Already halted. Nothing to do. */ - assert( p->magic==VDBE_MAGIC_HALT ); -#ifndef SQLITE_OMIT_VIRTUALTABLE - closeAllCursors(p); -#endif return SQLITE_OK; } - closeAllCursors(p); checkActiveVdbeCnt(db); /* No commit or rollback needed if the program never started */ if( p->pc>=0 ){ int mrc; /* Primary error code from p->rc */ - /* Check for one of the special errors - SQLITE_NOMEM or SQLITE_IOERR */ + int eStatementOp = 0; + int isSpecialError; /* Set to true if a 'special' error */ + + /* Lock all btrees used by the statement */ + sqlite3VdbeMutexArrayEnter(p); + + /* Check for one of the special errors */ mrc = p->rc & 0xff; - isSpecialError = ( - (mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR || mrc==SQLITE_INTERRUPT)?1:0); + isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR + || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL; if( isSpecialError ){ - /* This loop does static analysis of the query to see which of the - ** following three categories it falls into: - ** - ** Read-only - ** Query with statement journal - ** Query without statement journal - ** - ** We could do something more elegant than this static analysis (i.e. - ** store the type of query as part of the compliation phase), but - ** handling malloc() or IO failure is a fairly obscure edge case so - ** this is probably easier. Todo: Might be an opportunity to reduce - ** code size a very small amount though... - */ - int isReadOnly = 1; - int isStatement = 0; - assert(p->aOp || p->nOp==0); - for(i=0; inOp; i++){ - switch( p->aOp[i].opcode ){ - case OP_Transaction: - /* This is a bit strange. If we hit a malloc() or IO error and - ** the statement did not open a statement transaction, we will - ** rollback any active transaction and abort all other active - ** statements. Or, if this is an SQLITE_INTERRUPT error, we - ** will only rollback if the interrupted statement was a write. - ** - ** It could be argued that read-only statements should never - ** rollback anything. But careful analysis is required before - ** making this change - */ - if( p->aOp[i].p2 || mrc!=SQLITE_INTERRUPT ){ - isReadOnly = 0; - } - break; - case OP_Statement: - isStatement = 1; - break; - } - } - /* If the query was read-only, we need do no rollback at all. Otherwise, ** proceed with the special handling. */ - if( !isReadOnly ){ - if( p->rc==SQLITE_IOERR_BLOCKED && isStatement ){ - xFunc = sqlite3BtreeRollbackStmt; + if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){ + if( p->rc==SQLITE_IOERR_BLOCKED && p->usesStmtJournal ){ + eStatementOp = SAVEPOINT_ROLLBACK; p->rc = SQLITE_BUSY; - } else if( p->rc==SQLITE_NOMEM && isStatement ){ - xFunc = sqlite3BtreeRollbackStmt; + }else if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) + && p->usesStmtJournal ){ + eStatementOp = SAVEPOINT_ROLLBACK; }else{ /* We are forced to roll back the active transaction. Before doing ** so, abort any other statements this handle currently has active. */ - sqlite3AbortOtherActiveVdbes(db, p); + invalidateCursorsOnModifiedBtrees(db); sqlite3RollbackAll(db); + sqlite3CloseSavepoints(db); db->autoCommit = 1; } } } - /* If the auto-commit flag is set and this is the only active vdbe, then - ** we do either a commit or rollback of the current transaction. + /* If the auto-commit flag is set and this is the only active writer + ** VM, then we do either a commit or rollback of the current transaction. ** ** Note: This block also runs if one of the special errors handled - ** above has occured. + ** above has occurred. */ - if( db->autoCommit && db->activeVdbeCnt==1 ){ + if( !sqlite3VtabInSync(db) + && db->autoCommit + && db->writeVdbeCnt==(p->readOnly==0) + && (db->flags & SQLITE_CommitBusy)==0 + ){ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ /* The auto-commit flag is true, and the vdbe program was ** successful or hit an 'OR FAIL' constraint. This means a commit ** is required. */ - int rc = vdbeCommit(db); + rc = vdbeCommit(db, p); if( rc==SQLITE_BUSY ){ + sqlite3BtreeMutexArrayLeave(&p->aMutex); return SQLITE_BUSY; }else if( rc!=SQLITE_OK ){ p->rc = rc; @@ -1451,46 +1780,40 @@ }else{ sqlite3RollbackAll(db); } - }else if( !xFunc ){ + db->nStatement = 0; + }else if( eStatementOp==0 ){ if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){ - if( p->openedStatement ){ - xFunc = sqlite3BtreeCommitStmt; - } + eStatementOp = SAVEPOINT_RELEASE; }else if( p->errorAction==OE_Abort ){ - xFunc = sqlite3BtreeRollbackStmt; + eStatementOp = SAVEPOINT_ROLLBACK; }else{ - sqlite3AbortOtherActiveVdbes(db, p); + invalidateCursorsOnModifiedBtrees(db); sqlite3RollbackAll(db); + sqlite3CloseSavepoints(db); db->autoCommit = 1; } } - /* If xFunc is not NULL, then it is one of sqlite3BtreeRollbackStmt or - ** sqlite3BtreeCommitStmt. Call it once on each backend. If an error occurs - ** and the return code is still SQLITE_OK, set the return code to the new - ** error value. + /* If eStatementOp is non-zero, then a statement transaction needs to + ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to + ** do so. If this operation returns an error, and the current statement + ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then set the error + ** code to the new value. */ - assert(!xFunc || - xFunc==sqlite3BtreeCommitStmt || - xFunc==sqlite3BtreeRollbackStmt - ); - for(i=0; xFunc && inDb; i++){ - int rc; - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - rc = xFunc(pBt); - if( rc && (p->rc==SQLITE_OK || p->rc==SQLITE_CONSTRAINT) ){ - p->rc = rc; - sqlite3SetString(&p->zErrMsg, 0); - } + if( eStatementOp ){ + rc = sqlite3VdbeCloseStatement(p, eStatementOp); + if( rc && (p->rc==SQLITE_OK || p->rc==SQLITE_CONSTRAINT) ){ + p->rc = rc; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; } } - /* If this was an INSERT, UPDATE or DELETE and the statement was committed, - ** set the change counter. + /* If this was an INSERT, UPDATE or DELETE and no statement transaction + ** has been rolled back, update the database connection change-counter. */ if( p->changeCntOn && p->pc>=0 ){ - if( !xFunc || xFunc==sqlite3BtreeCommitStmt ){ + if( eStatementOp!=SAVEPOINT_ROLLBACK ){ sqlite3VdbeSetChanges(db, p->nChange); }else{ sqlite3VdbeSetChanges(db, 0); @@ -1503,18 +1826,38 @@ sqlite3ResetInternalSchema(db, 0); db->flags = (db->flags | SQLITE_InternChanges); } + + /* Release the locks */ + sqlite3BtreeMutexArrayLeave(&p->aMutex); } /* We have successfully halted and closed the VM. Record this fact. */ if( p->pc>=0 ){ db->activeVdbeCnt--; + if( !p->readOnly ){ + db->writeVdbeCnt--; + } + assert( db->activeVdbeCnt>=db->writeVdbeCnt ); } p->magic = VDBE_MAGIC_HALT; checkActiveVdbeCnt(db); + if( p->db->mallocFailed ){ + p->rc = SQLITE_NOMEM; + } + /* If the auto-commit flag is set to true, then any locks that were held + ** by connection db have now been released. Call sqlite3ConnectionUnlocked() + ** to invoke any required unlock-notify callbacks. + */ + if( db->autoCommit ){ + sqlite3ConnectionUnlocked(db); + } + + assert( db->activeVdbeCnt>0 || db->autoCommit==0 || db->nStatement==0 ); return SQLITE_OK; } + /* ** Each VDBE holds the result of the most recent sqlite3_step() call ** in p->rc. This routine sets that result back to SQLITE_OK. @@ -1542,9 +1885,9 @@ ** error, then it might not have been halted properly. So halt ** it now. */ - sqlite3SafetyOn(db); + (void)sqlite3SafetyOn(db); sqlite3VdbeHalt(p); - sqlite3SafetyOff(db); + (void)sqlite3SafetyOff(db); /* If the VDBE has be run even partially, then transfer the error code ** and error message from the VDBE into the main database structure. But @@ -1553,8 +1896,11 @@ */ if( p->pc>=0 ){ if( p->zErrMsg ){ - sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, sqlite3FreeX); + sqlite3BeginBenignMalloc(); + sqlite3ValueSetStr(db->pErr,-1,p->zErrMsg,SQLITE_UTF8,SQLITE_TRANSIENT); + sqlite3EndBenignMalloc(); db->errCode = p->rc; + sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; }else if( p->rc ){ sqlite3Error(db, p->rc, 0); @@ -1567,6 +1913,9 @@ ** called), set the database error in this case as well. */ sqlite3Error(db, p->rc, 0); + sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; } /* Reclaim all memory used by the VDBE @@ -1575,7 +1924,6 @@ /* Save profiling information from this VDBE run. */ - assert( p->pTos<&p->aStack[p->pc<0?0:p->pc] || !p->aStack ); #ifdef VDBE_PROFILE { FILE *out = fopen("vdbe_profile.out", "a"); @@ -1599,7 +1947,6 @@ } #endif p->magic = VDBE_MAGIC_INIT; - p->aborted = 0; return p->rc & db->errMask; } @@ -1629,7 +1976,7 @@ int i; for(i=0; inAux; i++){ struct AuxData *pAux = &pVdbeFunc->apAux[i]; - if( (i>31 || !(mask&(1<pAux ){ + if( (i>31 || !(mask&(((u32)1)<pAux ){ if( pAux->xDelete ){ pAux->xDelete(pAux->pAux); } @@ -1643,52 +1990,64 @@ */ void sqlite3VdbeDelete(Vdbe *p){ int i; + sqlite3 *db; + if( p==0 ) return; - Cleanup(p); + db = p->db; if( p->pPrev ){ p->pPrev->pNext = p->pNext; }else{ - assert( p->db->pVdbe==p ); - p->db->pVdbe = p->pNext; + assert( db->pVdbe==p ); + db->pVdbe = p->pNext; } if( p->pNext ){ p->pNext->pPrev = p->pPrev; } if( p->aOp ){ - for(i=0; inOp; i++){ - Op *pOp = &p->aOp[i]; - freeP3(pOp->p3type, pOp->p3); + Op *pOp = p->aOp; + for(i=0; inOp; i++, pOp++){ + freeP4(db, pOp->p4type, pOp->p4.p); +#ifdef SQLITE_DEBUG + sqlite3DbFree(db, pOp->zComment); +#endif } - sqliteFree(p->aOp); } releaseMemArray(p->aVar, p->nVar); - sqliteFree(p->aLabel); - sqliteFree(p->aStack); + sqlite3DbFree(db, p->aLabel); releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); - sqliteFree(p->aColName); - sqliteFree(p->zSql); + sqlite3DbFree(db, p->aColName); + sqlite3DbFree(db, p->zSql); p->magic = VDBE_MAGIC_DEAD; - sqliteFree(p); + sqlite3DbFree(db, p->aOp); + sqlite3DbFree(db, p->pFree); + sqlite3DbFree(db, p); } /* +** Make sure the cursor p is ready to read or write the row to which it +** was last positioned. Return an error code if an OOM fault or I/O error +** prevents us from positioning the cursor to its correct position. +** ** If a MoveTo operation is pending on the given cursor, then do that -** MoveTo now. Return an error code. If no MoveTo is pending, this -** routine does nothing and returns SQLITE_OK. +** MoveTo now. If no move is pending, check to see if the row has been +** deleted out from under the cursor and if it has, mark the row as +** a NULL row. +** +** If the cursor is already pointing to the correct row and that row has +** not been deleted out from under the cursor, then this routine is a no-op. */ -int sqlite3VdbeCursorMoveto(Cursor *p){ +int sqlite3VdbeCursorMoveto(VdbeCursor *p){ if( p->deferredMoveto ){ int res, rc; #ifdef SQLITE_TEST extern int sqlite3_search_count; #endif assert( p->isTable ); - rc = sqlite3BtreeMoveto(p->pCursor, 0, p->movetoTarget, 0, &res); + rc = sqlite3BtreeMovetoUnpacked(p->pCursor, 0, p->movetoTarget, 0, &res); if( rc ) return rc; - *p->pIncrKey = 0; - p->lastRowid = keyToInt(p->movetoTarget); - p->rowidIsValid = res==0; - if( res<0 ){ + p->lastRowid = p->movetoTarget; + p->rowidIsValid = ALWAYS(res==0) ?1:0; + if( NEVER(res<0) ){ rc = sqlite3BtreeNext(p->pCursor, &res); if( rc ) return rc; } @@ -1697,6 +2056,14 @@ #endif p->deferredMoveto = 0; p->cacheStatus = CACHE_STALE; + }else if( p->pCursor ){ + int hasMoved; + int rc = sqlite3BtreeCursorHasMoved(p->pCursor, &hasMoved); + if( rc ) return rc; + if( hasMoved ){ + p->cacheStatus = CACHE_STALE; + p->nullRow = 1; + } } return SQLITE_OK; } @@ -1706,9 +2073,9 @@ ** ** sqlite3VdbeSerialType() ** sqlite3VdbeSerialTypeLen() -** sqlite3VdbeSerialRead() ** sqlite3VdbeSerialLen() -** sqlite3VdbeSerialWrite() +** sqlite3VdbeSerialPut() +** sqlite3VdbeSerialGet() ** ** encapsulate the code that serializes values for storage in SQLite ** data and index records. Each serialized value consists of a @@ -1755,11 +2122,11 @@ } if( flags&MEM_Int ){ /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */ -# define MAX_6BYTE ((((i64)0x00001000)<<32)-1) +# define MAX_6BYTE ((((i64)0x00008000)<<32)-1) i64 i = pMem->u.i; u64 u; if( file_format>=4 && (i&1)==i ){ - return 8+i; + return 8+(u32)i; } u = i<0 ? -i : i; if( u<=127 ) return 1; @@ -1772,10 +2139,10 @@ if( flags&MEM_Real ){ return 7; } - assert( flags&(MEM_Str|MEM_Blob) ); + assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) ); n = pMem->n; if( flags & MEM_Zero ){ - n += pMem->u.i; + n += pMem->u.nZero; } assert( n>=0 ); return ((n*2) + 12 + ((flags&MEM_Str)!=0)); @@ -1784,7 +2151,7 @@ /* ** Return the length of the data corresponding to the supplied serial-type. */ -int sqlite3VdbeSerialTypeLen(u32 serial_type){ +u32 sqlite3VdbeSerialTypeLen(u32 serial_type){ if( serial_type>=12 ){ return (serial_type-12)/2; }else{ @@ -1813,11 +2180,24 @@ ** application using -DSQLITE_DEBUG=1 at least once. With DEBUG ** enabled, some asserts below will ensure that the byte order of ** floating point values is correct. +** +** (2007-08-30) Frank van Vugt has studied this problem closely +** and has send his findings to the SQLite developers. Frank +** writes that some Linux kernels offer floating point hardware +** emulation that uses only 32-bit mantissas instead of a full +** 48-bits as required by the IEEE standard. (This is the +** CONFIG_FPE_FASTFPE option.) On such systems, floating point +** byte swapping becomes very complicated. To avoid problems, +** the necessary byte swapping is carried out using a 64-bit integer +** rather than a 64-bit float. Frank assures us that the code here +** works for him. We, the developers, have no way to independently +** verify this, but Frank seems to know what he is talking about +** so we trust him. */ #ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT -static double floatSwap(double in){ +static u64 floatSwap(u64 in){ union { - double r; + u64 r; u32 i[2]; } u; u32 t; @@ -1851,25 +2231,25 @@ ** of bytes in the zero-filled tail is included in the return value only ** if those bytes were zeroed in buf[]. */ -int sqlite3VdbeSerialPut(u8 *buf, int nBuf, Mem *pMem, int file_format){ +u32 sqlite3VdbeSerialPut(u8 *buf, int nBuf, Mem *pMem, int file_format){ u32 serial_type = sqlite3VdbeSerialType(pMem, file_format); - int len; + u32 len; /* Integer and Real */ if( serial_type<=7 && serial_type>0 ){ u64 v; - int i; + u32 i; if( serial_type==7 ){ assert( sizeof(v)==sizeof(pMem->r) ); - swapMixedEndianFloat(pMem->r); memcpy(&v, &pMem->r, sizeof(v)); + swapMixedEndianFloat(v); }else{ v = pMem->u.i; } len = i = sqlite3VdbeSerialTypeLen(serial_type); - assert( len<=nBuf ); + assert( len<=(u32)nBuf ); while( i-- ){ - buf[i] = (v&0xFF); + buf[i] = (u8)(v&0xFF); v >>= 8; } return len; @@ -1877,15 +2257,16 @@ /* String or blob */ if( serial_type>=12 ){ - assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.i:0) - == sqlite3VdbeSerialTypeLen(serial_type) ); + assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0) + == (int)sqlite3VdbeSerialTypeLen(serial_type) ); assert( pMem->n<=nBuf ); len = pMem->n; memcpy(buf, pMem->z, len); if( pMem->flags & MEM_Zero ){ - len += pMem->u.i; - if( len>nBuf ){ - len = nBuf; + len += pMem->u.nZero; + assert( nBuf>=0 ); + if( len > (u32)nBuf ){ + len = (u32)nBuf; } memset(&buf[pMem->n], 0, len-pMem->n); } @@ -1900,7 +2281,7 @@ ** Deserialize the data blob pointed to by buf as serial type serial_type ** and store the result in pMem. Return the number of bytes read. */ -int sqlite3VdbeSerialGet( +u32 sqlite3VdbeSerialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ Mem *pMem /* Memory cell to write value into */ @@ -1952,9 +2333,9 @@ */ static const u64 t1 = ((u64)0x3ff00000)<<32; static const double r1 = 1.0; - double r2 = r1; - swapMixedEndianFloat(r2); - assert( sizeof(r2)==sizeof(t1) && memcmp(&r2, &t1, sizeof(r1))==0 ); + u64 t2 = t1; + swapMixedEndianFloat(t2); + assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 ); #endif x = (buf[0]<<24) | (buf[1]<<16) | (buf[2]<<8) | buf[3]; @@ -1965,9 +2346,9 @@ pMem->flags = MEM_Int; }else{ assert( sizeof(x)==8 && sizeof(pMem->r)==8 ); + swapMixedEndianFloat(x); memcpy(&pMem->r, &x, sizeof(x)); - swapMixedEndianFloat(pMem->r); - pMem->flags = MEM_Real; + pMem->flags = sqlite3IsNaN(pMem->r) ? MEM_Null : MEM_Real; } return 8; } @@ -1978,7 +2359,7 @@ return 0; } default: { - int len = (serial_type-12)/2; + u32 len = (serial_type-12)/2; pMem->z = (char *)buf; pMem->n = len; pMem->xDel = 0; @@ -1993,92 +2374,200 @@ return 0; } + /* -** The header of a record consists of a sequence variable-length integers. -** These integers are almost always small and are encoded as a single byte. -** The following macro takes advantage this fact to provide a fast decode -** of the integers in a record header. It is faster for the common case -** where the integer is a single byte. It is a little slower when the -** integer is two or more bytes. But overall it is faster. -** -** The following expressions are equivalent: -** -** x = sqlite3GetVarint32( A, &B ); -** -** x = GetVarint( A, B ); +** Given the nKey-byte encoding of a record in pKey[], parse the +** record into a UnpackedRecord structure. Return a pointer to +** that structure. +** +** The calling function might provide szSpace bytes of memory +** space at pSpace. This space can be used to hold the returned +** VDbeParsedRecord structure if it is large enough. If it is +** not big enough, space is obtained from sqlite3_malloc(). ** +** The returned structure should be closed by a call to +** sqlite3VdbeDeleteUnpackedRecord(). +*/ +UnpackedRecord *sqlite3VdbeRecordUnpack( + KeyInfo *pKeyInfo, /* Information about the record format */ + int nKey, /* Size of the binary record */ + const void *pKey, /* The binary record */ + char *pSpace, /* Unaligned space available to hold the object */ + int szSpace /* Size of pSpace[] in bytes */ +){ + const unsigned char *aKey = (const unsigned char *)pKey; + UnpackedRecord *p; /* The unpacked record that we will return */ + int nByte; /* Memory space needed to hold p, in bytes */ + int d; + u32 idx; + u16 u; /* Unsigned loop counter */ + u32 szHdr; + Mem *pMem; + int nOff; /* Increase pSpace by this much to 8-byte align it */ + + /* + ** We want to shift the pointer pSpace up such that it is 8-byte aligned. + ** Thus, we need to calculate a value, nOff, between 0 and 7, to shift + ** it by. If pSpace is already 8-byte aligned, nOff should be zero. + */ + nOff = (8 - (SQLITE_PTR_TO_INT(pSpace) & 7)) & 7; + pSpace += nOff; + szSpace -= nOff; + nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1); + if( nByte>szSpace ){ + p = sqlite3DbMallocRaw(pKeyInfo->db, nByte); + if( p==0 ) return 0; + p->flags = UNPACKED_NEED_FREE | UNPACKED_NEED_DESTROY; + }else{ + p = (UnpackedRecord*)pSpace; + p->flags = UNPACKED_NEED_DESTROY; + } + p->pKeyInfo = pKeyInfo; + p->nField = pKeyInfo->nField + 1; + p->aMem = pMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))]; + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + idx = getVarint32(aKey, szHdr); + d = szHdr; + u = 0; + while( idxnField ){ + u32 serial_type; + + idx += getVarint32(&aKey[idx], serial_type); + if( d>=nKey && sqlite3VdbeSerialTypeLen(serial_type)>0 ) break; + pMem->enc = pKeyInfo->enc; + pMem->db = pKeyInfo->db; + pMem->flags = 0; + pMem->zMalloc = 0; + d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); + pMem++; + u++; + } + assert( u<=pKeyInfo->nField + 1 ); + p->nField = u; + return (void*)p; +} + +/* +** This routine destroys a UnpackedRecord object. */ -#define GetVarint(A,B) ((B = *(A))<=0x7f ? 1 : sqlite3GetVarint32(A, &B)) +void sqlite3VdbeDeleteUnpackedRecord(UnpackedRecord *p){ + int i; + Mem *pMem; + + assert( p!=0 ); + assert( p->flags & UNPACKED_NEED_DESTROY ); + for(i=0, pMem=p->aMem; inField; i++, pMem++){ + if( pMem->zMalloc ){ + sqlite3VdbeMemRelease(pMem); + } + } + if( p->flags & UNPACKED_NEED_FREE ){ + sqlite3DbFree(p->pKeyInfo->db, p); + } +} /* -** This function compares the two table rows or index records specified by -** {nKey1, pKey1} and {nKey2, pKey2}, returning a negative, zero -** or positive integer if {nKey1, pKey1} is less than, equal to or -** greater than {nKey2, pKey2}. Both Key1 and Key2 must be byte strings -** composed by the OP_MakeRecord opcode of the VDBE. +** This function compares the two table rows or index records +** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero +** or positive integer if key1 is less than, equal to or +** greater than key2. The {nKey1, pKey1} key must be a blob +** created by th OP_MakeRecord opcode of the VDBE. The pPKey2 +** key must be a parsed key such as obtained from +** sqlite3VdbeParseRecord. +** +** Key1 and Key2 do not have to contain the same number of fields. +** The key with fewer fields is usually compares less than the +** longer key. However if the UNPACKED_INCRKEY flags in pPKey2 is set +** and the common prefixes are equal, then key1 is less than key2. +** Or if the UNPACKED_MATCH_PREFIX flag is set and the prefixes are +** equal, then the keys are considered to be equal and +** the parts beyond the common prefix are ignored. +** +** If the UNPACKED_IGNORE_ROWID flag is set, then the last byte of +** the header of pKey1 is ignored. It is assumed that pKey1 is +** an index key, and thus ends with a rowid value. The last byte +** of the header will therefore be the serial type of the rowid: +** one of 1, 2, 3, 4, 5, 6, 8, or 9 - the integer serial types. +** The serial type of the final rowid will always be a single byte. +** By ignoring this last byte of the header, we force the comparison +** to ignore the rowid at the end of key1. */ int sqlite3VdbeRecordCompare( - void *userData, - int nKey1, const void *pKey1, - int nKey2, const void *pKey2 + int nKey1, const void *pKey1, /* Left key */ + UnpackedRecord *pPKey2 /* Right key */ ){ - KeyInfo *pKeyInfo = (KeyInfo*)userData; - u32 d1, d2; /* Offset into aKey[] of next data element */ - u32 idx1, idx2; /* Offset into aKey[] of next header element */ - u32 szHdr1, szHdr2; /* Number of bytes in header */ + int d1; /* Offset into aKey[] of next data element */ + u32 idx1; /* Offset into aKey[] of next header element */ + u32 szHdr1; /* Number of bytes in header */ int i = 0; int nField; int rc = 0; const unsigned char *aKey1 = (const unsigned char *)pKey1; - const unsigned char *aKey2 = (const unsigned char *)pKey2; - + KeyInfo *pKeyInfo; Mem mem1; - Mem mem2; + + pKeyInfo = pPKey2->pKeyInfo; mem1.enc = pKeyInfo->enc; - mem2.enc = pKeyInfo->enc; + mem1.db = pKeyInfo->db; + mem1.flags = 0; + mem1.u.i = 0; /* not needed, here to silence compiler warning */ + mem1.zMalloc = 0; - idx1 = GetVarint(aKey1, szHdr1); + idx1 = getVarint32(aKey1, szHdr1); d1 = szHdr1; - idx2 = GetVarint(aKey2, szHdr2); - d2 = szHdr2; + if( pPKey2->flags & UNPACKED_IGNORE_ROWID ){ + szHdr1--; + } nField = pKeyInfo->nField; - while( idx1nField ){ u32 serial_type1; - u32 serial_type2; /* Read the serial types for the next element in each key. */ - idx1 += GetVarint( aKey1+idx1, serial_type1 ); + idx1 += getVarint32( aKey1+idx1, serial_type1 ); if( d1>=nKey1 && sqlite3VdbeSerialTypeLen(serial_type1)>0 ) break; - idx2 += GetVarint( aKey2+idx2, serial_type2 ); - if( d2>=nKey2 && sqlite3VdbeSerialTypeLen(serial_type2)>0 ) break; /* Extract the values to be compared. */ d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); - d2 += sqlite3VdbeSerialGet(&aKey2[d2], serial_type2, &mem2); /* Do the comparison */ - rc = sqlite3MemCompare(&mem1, &mem2, iaColl[i] : 0); - if( mem1.flags & MEM_Dyn ) sqlite3VdbeMemRelease(&mem1); - if( mem2.flags & MEM_Dyn ) sqlite3VdbeMemRelease(&mem2); + rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], + iaColl[i] : 0); if( rc!=0 ){ break; } i++; } + if( mem1.zMalloc ) sqlite3VdbeMemRelease(&mem1); - /* One of the keys ran out of fields, but all the fields up to that point - ** were equal. If the incrKey flag is true, then the second key is - ** treated as larger. + /* If the PREFIX_SEARCH flag is set and all fields except the final + ** rowid field were equal, then clear the PREFIX_SEARCH flag and set + ** pPKey2->rowid to the value of the rowid field in (pKey1, nKey1). + ** This is used by the OP_IsUnique opcode. */ + if( (pPKey2->flags & UNPACKED_PREFIX_SEARCH) && i==(pPKey2->nField-1) ){ + assert( idx1==szHdr1 && rc ); + assert( mem1.flags & MEM_Int ); + pPKey2->flags &= ~UNPACKED_PREFIX_SEARCH; + pPKey2->rowid = mem1.u.i; + } + if( rc==0 ){ - if( pKeyInfo->incrKey ){ + /* rc==0 here means that one of the keys ran out of fields and + ** all the fields up to that point were equal. If the UNPACKED_INCRKEY + ** flag is set, then break the tie by treating key2 as larger. + ** If the UPACKED_PREFIX_MATCH flag is set, then keys with common prefixes + ** are considered to be equal. Otherwise, the longer key is the + ** larger. As it happens, the pPKey2 will always be the longer + ** if there is a difference. + */ + if( pPKey2->flags & UNPACKED_INCRKEY ){ rc = -1; - }else if( d1flags & UNPACKED_PREFIX_MATCH ){ + /* Leave rc==0 */ + }else if( idx1aSortOrder && inField && pKeyInfo->aSortOrder[i] ){ @@ -2087,29 +2576,17 @@ return rc; } - -/* -** The argument is an index entry composed using the OP_MakeRecord opcode. -** The last entry in this record should be an integer (specifically -** an integer rowid). This routine returns the number of bytes in -** that integer. -*/ -int sqlite3VdbeIdxRowidLen(const u8 *aKey){ - u32 szHdr; /* Size of the header */ - u32 typeRowid; /* Serial type of the rowid */ - - sqlite3GetVarint32(aKey, &szHdr); - sqlite3GetVarint32(&aKey[szHdr-1], &typeRowid); - return sqlite3VdbeSerialTypeLen(typeRowid); -} - + /* ** pCur points at an index entry created using the OP_MakeRecord opcode. ** Read the rowid (the last field in the record) and store it in *rowid. ** Return SQLITE_OK if everything works, or an error code otherwise. +** +** pCur might be pointing to text obtained from a corrupt database file. +** So the content cannot be trusted. Do appropriate checks on the content. */ -int sqlite3VdbeIdxRowid(BtCursor *pCur, i64 *rowid){ +int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ i64 nCellKey = 0; int rc; u32 szHdr; /* Size of the header */ @@ -2117,21 +2594,62 @@ u32 lenRowid; /* Size of the rowid */ Mem m, v; + /* Get the size of the index entry. Only indices entries of less + ** than 2GiB are support - anything large must be database corruption. + ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so + ** this code can safely assume that nCellKey is 32-bits */ sqlite3BtreeKeySize(pCur, &nCellKey); - if( nCellKey<=0 ){ - return SQLITE_CORRUPT_BKPT; - } - rc = sqlite3VdbeMemFromBtree(pCur, 0, nCellKey, 1, &m); + assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey ); + + /* Read in the complete content of the index entry */ + m.flags = 0; + m.db = db; + m.zMalloc = 0; + rc = sqlite3VdbeMemFromBtree(pCur, 0, (int)nCellKey, 1, &m); if( rc ){ return rc; } - sqlite3GetVarint32((u8*)m.z, &szHdr); - sqlite3GetVarint32((u8*)&m.z[szHdr-1], &typeRowid); + + /* The index entry must begin with a header size */ + (void)getVarint32((u8*)m.z, szHdr); + testcase( szHdr==3 ); + testcase( szHdr==m.n ); + if( unlikely(szHdr<3 || (int)szHdr>m.n) ){ + goto idx_rowid_corruption; + } + + /* The last field of the index should be an integer - the ROWID. + ** Verify that the last entry really is an integer. */ + (void)getVarint32((u8*)&m.z[szHdr-1], typeRowid); + testcase( typeRowid==1 ); + testcase( typeRowid==2 ); + testcase( typeRowid==3 ); + testcase( typeRowid==4 ); + testcase( typeRowid==5 ); + testcase( typeRowid==6 ); + testcase( typeRowid==8 ); + testcase( typeRowid==9 ); + if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){ + goto idx_rowid_corruption; + } lenRowid = sqlite3VdbeSerialTypeLen(typeRowid); + testcase( m.n-lenRowid==szHdr ); + if( unlikely(m.n-lenRowidpCursor; - int lenRowid; Mem m; sqlite3BtreeKeySize(pCur, &nCellKey); - if( nCellKey<=0 ){ + if( nCellKey<=0 || nCellKey>0x7fffffff ){ *res = 0; return SQLITE_OK; } - rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, nCellKey, 1, &m); + m.db = 0; + m.flags = 0; + m.zMalloc = 0; + rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, (int)nCellKey, 1, &m); if( rc ){ return rc; } - lenRowid = sqlite3VdbeIdxRowidLen((u8*)m.z); - *res = sqlite3VdbeRecordCompare(pC->pKeyInfo, m.n-lenRowid, m.z, nKey, pKey); + assert( pUnpacked->flags & UNPACKED_IGNORE_ROWID ); + *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked); sqlite3VdbeMemRelease(&m); return SQLITE_OK; } @@ -2175,6 +2699,7 @@ ** sqlite3_changes() on the database handle 'db'. */ void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ + assert( sqlite3_mutex_held(db->mutex) ); db->nChange = nChange; db->nTotalChange += nChange; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbeblob.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbeblob.c --- sqlite3-3.4.2/src/vdbeblob.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/vdbeblob.c 2009-06-25 12:35:51.000000000 +0100 @@ -12,7 +12,7 @@ ** ** This file contains code used to implement incremental BLOB I/O. ** -** $Id: vdbeblob.c,v 1.11 2007/06/27 00:36:14 drh Exp $ +** $Id: vdbeblob.c,v 1.33 2009/06/01 19:53:31 drh Exp $ */ #include "sqliteInt.h" @@ -30,6 +30,7 @@ int iOffset; /* Byte offset of blob in cursor data */ BtCursor *pCsr; /* Cursor pointing at blob row */ sqlite3_stmt *pStmt; /* Statement holding cursor open */ + sqlite3 *db; /* The associated database */ }; /* @@ -53,7 +54,7 @@ ** vdbe program will take advantage of the various transaction, ** locking and error handling infrastructure built into the vdbe. ** - ** After seeking the cursor, the vdbe executes an OP_Callback. + ** After seeking the cursor, the vdbe executes an OP_ResultRow. ** Code external to the Vdbe then "borrows" the b-tree cursor and ** uses it to implement the blob_read(), blob_write() and ** blob_bytes() functions. @@ -65,48 +66,66 @@ static const VdbeOpList openBlob[] = { {OP_Transaction, 0, 0, 0}, /* 0: Start a transaction */ {OP_VerifyCookie, 0, 0, 0}, /* 1: Check the schema cookie */ - {OP_Integer, 0, 0, 0}, /* 2: Database number */ /* One of the following two instructions is replaced by an ** OP_Noop before exection. */ - {OP_OpenRead, 0, 0, 0}, /* 3: Open cursor 0 for reading */ - {OP_OpenWrite, 0, 0, 0}, /* 4: Open cursor 0 for read/write */ - {OP_SetNumColumns, 0, 0, 0}, /* 5: Num cols for cursor */ - - {OP_Variable, 1, 0, 0}, /* 6: Push the rowid to the stack */ - {OP_NotExists, 0, 10, 0}, /* 7: Seek the cursor */ - {OP_Column, 0, 0, 0}, /* 8 */ - {OP_Callback, 0, 0, 0}, /* 9 */ - {OP_Close, 0, 0, 0}, /* 10 */ - {OP_Halt, 0, 0, 0}, /* 11 */ + {OP_OpenRead, 0, 0, 0}, /* 2: Open cursor 0 for reading */ + {OP_OpenWrite, 0, 0, 0}, /* 3: Open cursor 0 for read/write */ + + {OP_Variable, 1, 1, 1}, /* 4: Push the rowid to the stack */ + {OP_NotExists, 0, 8, 1}, /* 5: Seek the cursor */ + {OP_Column, 0, 0, 1}, /* 6 */ + {OP_ResultRow, 1, 0, 0}, /* 7 */ + {OP_Close, 0, 0, 0}, /* 8 */ + {OP_Halt, 0, 0, 0}, /* 9 */ }; Vdbe *v = 0; int rc = SQLITE_OK; - char zErr[128]; - - zErr[0] = 0; + char *zErr = 0; + Table *pTab; + Parse *pParse; + + *ppBlob = 0; + sqlite3_mutex_enter(db->mutex); + pParse = sqlite3StackAllocRaw(db, sizeof(*pParse)); + if( pParse==0 ){ + rc = SQLITE_NOMEM; + goto blob_open_out; + } do { - Parse sParse; - Table *pTab; - - memset(&sParse, 0, sizeof(Parse)); - sParse.db = db; + memset(pParse, 0, sizeof(Parse)); + pParse->db = db; - rc = sqlite3SafetyOn(db); - if( rc!=SQLITE_OK ){ - return rc; + if( sqlite3SafetyOn(db) ){ + sqlite3DbFree(db, zErr); + sqlite3StackFree(db, pParse); + sqlite3_mutex_leave(db->mutex); + return SQLITE_MISUSE; } - pTab = sqlite3LocateTable(&sParse, zTable, zDb); + sqlite3BtreeEnterAll(db); + pTab = sqlite3LocateTable(pParse, 0, zTable, zDb); + if( pTab && IsVirtual(pTab) ){ + pTab = 0; + sqlite3ErrorMsg(pParse, "cannot open virtual table: %s", zTable); + } +#ifndef SQLITE_OMIT_VIEW + if( pTab && pTab->pSelect ){ + pTab = 0; + sqlite3ErrorMsg(pParse, "cannot open view: %s", zTable); + } +#endif if( !pTab ){ - if( sParse.zErrMsg ){ - sqlite3_snprintf(sizeof(zErr), zErr, "%s", sParse.zErrMsg); + if( pParse->zErrMsg ){ + sqlite3DbFree(db, zErr); + zErr = pParse->zErrMsg; + pParse->zErrMsg = 0; } - sqliteFree(sParse.zErrMsg); rc = SQLITE_ERROR; - sqlite3SafetyOff(db); + (void)sqlite3SafetyOff(db); + sqlite3BtreeLeaveAll(db); goto blob_open_out; } @@ -117,9 +136,11 @@ } } if( iCol==pTab->nCol ){ - sqlite3_snprintf(sizeof(zErr), zErr, "no such column: \"%s\"", zColumn); + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); rc = SQLITE_ERROR; - sqlite3SafetyOff(db); + (void)sqlite3SafetyOff(db); + sqlite3BtreeLeaveAll(db); goto blob_open_out; } @@ -133,10 +154,12 @@ int j; for(j=0; jnColumn; j++){ if( pIdx->aiColumn[j]==iCol ){ - sqlite3_snprintf(sizeof(zErr), zErr, + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "cannot open indexed column for writing"); rc = SQLITE_ERROR; - sqlite3SafetyOff(db); + (void)sqlite3SafetyOff(db); + sqlite3BtreeLeaveAll(db); goto blob_open_out; } } @@ -156,30 +179,34 @@ sqlite3VdbeChangeP1(v, 1, iDb); sqlite3VdbeChangeP2(v, 1, pTab->pSchema->schema_cookie); - /* Configure the db number pushed onto the stack */ - sqlite3VdbeChangeP1(v, 2, iDb); + /* Make sure a mutex is held on the table to be accessed */ + sqlite3VdbeUsesBtree(v, iDb); /* Remove either the OP_OpenWrite or OpenRead. Set the P2 ** parameter of the other to pTab->tnum. */ - sqlite3VdbeChangeToNoop(v, (flags ? 3 : 4), 1); - sqlite3VdbeChangeP2(v, (flags ? 4 : 3), pTab->tnum); + flags = !!flags; + sqlite3VdbeChangeToNoop(v, 3 - flags, 1); + sqlite3VdbeChangeP2(v, 2 + flags, pTab->tnum); + sqlite3VdbeChangeP3(v, 2 + flags, iDb); - /* Configure the OP_SetNumColumns. Configure the cursor to + /* Configure the number of columns. Configure the cursor to ** think that the table has one more column than it really ** does. An OP_Column to retrieve this imaginary column will ** always return an SQL NULL. This is useful because it means ** we can invoke OP_Column to fill in the vdbe cursors type ** and offset cache without causing any IO. */ - sqlite3VdbeChangeP2(v, 5, pTab->nCol+1); - if( !sqlite3MallocFailed() ){ - sqlite3VdbeMakeReady(v, 1, 0, 1, 0); + sqlite3VdbeChangeP4(v, 2+flags, SQLITE_INT_TO_PTR(pTab->nCol+1),P4_INT32); + sqlite3VdbeChangeP2(v, 6, pTab->nCol); + if( !db->mallocFailed ){ + sqlite3VdbeMakeReady(v, 1, 1, 1, 0); } } - + + sqlite3BtreeLeaveAll(db); rc = sqlite3SafetyOff(db); - if( rc!=SQLITE_OK || sqlite3MallocFailed() ){ + if( NEVER(rc!=SQLITE_OK) || db->mallocFailed ){ goto blob_open_out; } @@ -188,7 +215,8 @@ if( rc!=SQLITE_ROW ){ nAttempt++; rc = sqlite3_finalize((sqlite3_stmt *)v); - sqlite3_snprintf(sizeof(zErr), zErr, sqlite3_errmsg(db)); + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, sqlite3_errmsg(db)); v = 0; } } while( nAttempt<5 && rc==SQLITE_SCHEMA ); @@ -202,37 +230,45 @@ u32 type = v->apCsr[0]->aType[iCol]; if( type<12 ){ - sqlite3_snprintf(sizeof(zErr), zErr, "cannot open value of type %s", + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "cannot open value of type %s", type==0?"null": type==7?"real": "integer" ); rc = SQLITE_ERROR; goto blob_open_out; } - pBlob = (Incrblob *)sqliteMalloc(sizeof(Incrblob)); - if( sqlite3MallocFailed() ){ - sqliteFree(pBlob); + pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); + if( db->mallocFailed ){ + sqlite3DbFree(db, pBlob); goto blob_open_out; } pBlob->flags = flags; pBlob->pCsr = v->apCsr[0]->pCursor; + sqlite3BtreeEnterCursor(pBlob->pCsr); sqlite3BtreeCacheOverflow(pBlob->pCsr); + sqlite3BtreeLeaveCursor(pBlob->pCsr); pBlob->pStmt = (sqlite3_stmt *)v; pBlob->iOffset = v->apCsr[0]->aOffset[iCol]; pBlob->nByte = sqlite3VdbeSerialTypeLen(type); + pBlob->db = db; *ppBlob = (sqlite3_blob *)pBlob; rc = SQLITE_OK; }else if( rc==SQLITE_OK ){ - sqlite3_snprintf(sizeof(zErr), zErr, "no such rowid: %lld", iRow); + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "no such rowid: %lld", iRow); rc = SQLITE_ERROR; } blob_open_out: - zErr[sizeof(zErr)-1] = '\0'; - if( rc!=SQLITE_OK || sqlite3MallocFailed() ){ - sqlite3_finalize((sqlite3_stmt *)v); + if( v && (rc!=SQLITE_OK || db->mallocFailed) ){ + sqlite3VdbeFinalize(v); } - sqlite3Error(db, rc, (rc==SQLITE_OK?0:zErr)); - return sqlite3ApiExit(db, rc); + sqlite3Error(db, rc, zErr); + sqlite3DbFree(db, zErr); + sqlite3StackFree(db, pParse); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } /* @@ -241,12 +277,24 @@ */ int sqlite3_blob_close(sqlite3_blob *pBlob){ Incrblob *p = (Incrblob *)pBlob; - sqlite3_stmt *pStmt = p->pStmt; - sqliteFree(p); - return sqlite3_finalize(pStmt); -} + int rc; + sqlite3 *db; + if( p ){ + db = p->db; + sqlite3_mutex_enter(db->mutex); + rc = sqlite3_finalize(p->pStmt); + sqlite3DbFree(db, p); + sqlite3_mutex_leave(db->mutex); + }else{ + rc = SQLITE_OK; + } + return rc; +} +/* +** Perform a read or write operation on a blob +*/ static int blobReadWrite( sqlite3_blob *pBlob, void *z, @@ -256,33 +304,42 @@ ){ int rc; Incrblob *p = (Incrblob *)pBlob; - Vdbe *v = (Vdbe *)(p->pStmt); - sqlite3 *db; + Vdbe *v; + sqlite3 *db; - /* If there is no statement handle, then the blob-handle has - ** already been invalidated. Return SQLITE_ABORT in this case. - */ - if( !v ) return SQLITE_ABORT; + if( p==0 ) return SQLITE_MISUSE; + db = p->db; + sqlite3_mutex_enter(db->mutex); + v = (Vdbe*)p->pStmt; - /* Request is out of range. Return a transient error. */ - if( (iOffset+n)>p->nByte ){ - return SQLITE_ERROR; - } - - /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is - ** returned, clean-up the statement handle. - */ - db = v->db; - rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); - if( rc==SQLITE_ABORT ){ - sqlite3VdbeFinalize(v); - p->pStmt = 0; + if( n<0 || iOffset<0 || (iOffset+n)>p->nByte ){ + /* Request is out of range. Return a transient error. */ + rc = SQLITE_ERROR; + sqlite3Error(db, SQLITE_ERROR, 0); + } else if( v==0 ){ + /* If there is no statement handle, then the blob-handle has + ** already been invalidated. Return SQLITE_ABORT in this case. + */ + rc = SQLITE_ABORT; }else{ - db->errCode = rc; - v->rc = rc; + /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is + ** returned, clean-up the statement handle. + */ + assert( db == v->db ); + sqlite3BtreeEnterCursor(p->pCsr); + rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); + sqlite3BtreeLeaveCursor(p->pCsr); + if( rc==SQLITE_ABORT ){ + sqlite3VdbeFinalize(v); + p->pStmt = 0; + }else{ + db->errCode = rc; + v->rc = rc; + } } - - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } /* @@ -301,10 +358,13 @@ /* ** Query a blob handle for the size of the data. +** +** The Incrblob.nByte field is fixed for the lifetime of the Incrblob +** so no mutex is required for access. */ int sqlite3_blob_bytes(sqlite3_blob *pBlob){ Incrblob *p = (Incrblob *)pBlob; - return p->nByte; + return p ? p->nByte : 0; } #endif /* #ifndef SQLITE_OMIT_INCRBLOB */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbe.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbe.c --- sqlite3-3.4.2/src/vdbe.c 2007-07-30 21:36:28.000000000 +0100 +++ sqlite3-3.6.16/src/vdbe.c 2009-06-26 19:17:20.000000000 +0100 @@ -22,14 +22,14 @@ ** the VDBE to do the work of the SQL statement. VDBE programs are ** similar in form to assembly language. The program consists of ** a linear sequence of operations. Each operation has an opcode -** and 3 operands. Operands P1 and P2 are integers. Operand P3 -** is a null-terminated string. The P2 operand must be non-negative. -** Opcodes will typically ignore one or more operands. Many opcodes -** ignore all three operands. -** -** Computation results are stored on a stack. Each entry on the -** stack is either an integer, a null-terminated string, a floating point -** number, or the SQL "NULL" value. An inplicit conversion from one +** and 5 operands. Operands P1, P2, and P3 are integers. Operand P4 +** is a null-terminated string. Operand P5 is an unsigned character. +** Few opcodes use all 5 operands. +** +** Computation results are stored on a set of registers numbered beginning +** with 1 and going up to Vdbe.nMem. Each register can store +** either an integer, a null-terminated string, a floating point +** number, or the SQL "NULL" value. An implicit conversion from one ** type to the other occurs as necessary. ** ** Most of the code in this file is taken up by the sqlite3VdbeExec() @@ -43,17 +43,14 @@ ** in this file for details. If in doubt, do not deviate from existing ** commenting and indentation practices when changing or adding code. ** -** $Id: vdbe.c,v 1.639 2007/07/26 06:50:06 danielk1977 Exp $ +** $Id: vdbe.c,v 1.866 2009/06/26 16:32:13 shane Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include -#include #include "vdbeInt.h" /* ** The following global variable is incremented every time a cursor -** moves, either by the OP_MoveXX, OP_Next, or OP_Prev opcodes. The test +** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test ** procedures use this information to make sure that indices are ** working correctly. This variable has no function other than to ** help verify the correct operation of the library. @@ -77,7 +74,7 @@ /* ** The next global variable is incremented each type the OP_Sort opcode ** is executed. The test procedures use this information to make sure that -** sorting is occurring or not occuring at appropriate times. This variable +** sorting is occurring or not occurring at appropriate times. This variable ** has no function other than to help verify the correct operation of the ** library. */ @@ -87,23 +84,32 @@ /* ** The next global variable records the size of the largest MEM_Blob -** or MEM_Str that has appeared on the VDBE stack. The test procedures +** or MEM_Str that has been used by a VDBE opcode. The test procedures ** use this information to make sure that the zero-blob functionality ** is working correctly. This variable has no function other than to ** help verify the correct operation of the library. */ #ifdef SQLITE_TEST int sqlite3_max_blobsize = 0; +static void updateMaxBlobsize(Mem *p){ + if( (p->flags & (MEM_Str|MEM_Blob))!=0 && p->n>sqlite3_max_blobsize ){ + sqlite3_max_blobsize = p->n; + } +} #endif /* -** Release the memory associated with the given stack level. This -** leaves the Mem.flags field in an inconsistent state. +** Test a register to see if it exceeds the current maximum blob size. +** If it does, record the new maximum blob size. */ -#define Release(P) if((P)->flags&MEM_Dyn){ sqlite3VdbeMemRelease(P); } +#if defined(SQLITE_TEST) && !defined(SQLITE_OMIT_BUILTIN_TEST) +# define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P) +#else +# define UPDATE_MAX_BLOBSIZE(P) +#endif /* -** Convert the given stack entity into a string if it isn't one +** Convert the given register into a string if it isn't one ** already. Return non-zero if a malloc() fails. */ #define Stringify(P, enc) \ @@ -111,40 +117,14 @@ { goto no_mem; } /* -** Convert the given stack entity into a string that has been obtained -** from sqliteMalloc(). This is different from Stringify() above in that -** Stringify() will use the NBFS bytes of static string space if the string -** will fit but this routine always mallocs for space. -** Return non-zero if we run out of memory. -*/ -#define Dynamicify(P,enc) sqlite3VdbeMemDynamicify(P) - -/* -** The header of a record consists of a sequence variable-length integers. -** These integers are almost always small and are encoded as a single byte. -** The following macro takes advantage this fact to provide a fast decode -** of the integers in a record header. It is faster for the common case -** where the integer is a single byte. It is a little slower when the -** integer is two or more bytes. But overall it is faster. -** -** The following expressions are equivalent: -** -** x = sqlite3GetVarint32( A, &B ); -** -** x = GetVarint( A, B ); -** -*/ -#define GetVarint(A,B) ((B = *(A))<=0x7f ? 1 : sqlite3GetVarint32(A, &B)) - -/* ** An ephemeral string value (signified by the MEM_Ephem flag) contains ** a pointer to a dynamically allocated string where some other entity -** is responsible for deallocating that string. Because the stack entry -** does not control the string, it might be deleted without the stack -** entry knowing it. +** is responsible for deallocating that string. Because the register +** does not control the string, it might be deleted without the register +** knowing it. ** ** This routine converts an ephemeral string into a dynamically allocated -** string that the stack entry itself controls. In other words, it +** string that the register itself controls. In other words, it ** converts an MEM_Ephem string into an MEM_Dyn string. */ #define Deephemeralize(P) \ @@ -158,10 +138,10 @@ #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) /* -** Argument pMem points at a memory cell that will be passed to a +** Argument pMem points at a register that will be passed to a ** user-defined function or returned to the user as the result of a query. ** The second argument, 'db_enc' is the text encoding used by the vdbe for -** stack variables. This routine sets the pMem->enc and pMem->type +** register variables. This routine sets the pMem->enc and pMem->type ** variables used by the sqlite3_value_*() routines. */ #define storeTypeInfo(A,B) _storeTypeInfo(A) @@ -184,31 +164,77 @@ } /* -** Pop the stack N times. +** Properties of opcodes. The OPFLG_INITIALIZER macro is +** created by mkopcodeh.awk during compilation. Data is obtained +** from the comments following the "case OP_xxxx:" statements in +** this file. */ -static void popStack(Mem **ppTos, int N){ - Mem *pTos = *ppTos; - while( N>0 ){ - N--; - Release(pTos); - pTos--; - } - *ppTos = pTos; +static const unsigned char opcodeProperty[] = OPFLG_INITIALIZER; + +/* +** Return true if an opcode has any of the OPFLG_xxx properties +** specified by mask. +*/ +int sqlite3VdbeOpcodeHasProperty(int opcode, int mask){ + assert( opcode>0 && opcode<(int)sizeof(opcodeProperty) ); + return (opcodeProperty[opcode]&mask)!=0; } /* -** Allocate cursor number iCur. Return a pointer to it. Return NULL +** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL ** if we run out of memory. */ -static Cursor *allocateCursor(Vdbe *p, int iCur, int iDb){ - Cursor *pCx; +static VdbeCursor *allocateCursor( + Vdbe *p, /* The virtual machine */ + int iCur, /* Index of the new VdbeCursor */ + int nField, /* Number of fields in the table or index */ + int iDb, /* When database the cursor belongs to, or -1 */ + int isBtreeCursor /* True for B-Tree vs. pseudo-table or vtab */ +){ + /* Find the memory cell that will be used to store the blob of memory + ** required for this VdbeCursor structure. It is convenient to use a + ** vdbe memory cell to manage the memory allocation required for a + ** VdbeCursor structure for the following reasons: + ** + ** * Sometimes cursor numbers are used for a couple of different + ** purposes in a vdbe program. The different uses might require + ** different sized allocations. Memory cells provide growable + ** allocations. + ** + ** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can + ** be freed lazily via the sqlite3_release_memory() API. This + ** minimizes the number of malloc calls made by the system. + ** + ** Memory cells for cursors are allocated at the top of the address + ** space. Memory cell (p->nMem) corresponds to cursor 0. Space for + ** cursor 1 is managed by memory cell (p->nMem-1), etc. + */ + Mem *pMem = &p->aMem[p->nMem-iCur]; + + int nByte; + VdbeCursor *pCx = 0; + nByte = + sizeof(VdbeCursor) + + (isBtreeCursor?sqlite3BtreeCursorSize():0) + + 2*nField*sizeof(u32); + assert( iCurnCursor ); if( p->apCsr[iCur] ){ sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); + p->apCsr[iCur] = 0; } - p->apCsr[iCur] = pCx = sqliteMalloc( sizeof(Cursor) ); - if( pCx ){ + if( SQLITE_OK==sqlite3VdbeMemGrow(pMem, nByte, 0) ){ + p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; + memset(pMem->z, 0, nByte); pCx->iDb = iDb; + pCx->nField = nField; + if( nField ){ + pCx->aType = (u32 *)&pMem->z[sizeof(VdbeCursor)]; + } + if( isBtreeCursor ){ + pCx->pCursor = (BtCursor*) + &pMem->z[sizeof(VdbeCursor)+2*nField*sizeof(u32)]; + } } return pCx; } @@ -228,9 +254,8 @@ i64 value; sqlite3VdbeChangeEncoding(pRec, SQLITE_UTF8); if( !realnum && sqlite3Atoi64(pRec->z, &value) ){ - sqlite3VdbeMemRelease(pRec); pRec->u.i = value; - pRec->flags = MEM_Int; + MemSetTypeFlag(pRec, MEM_Int); }else{ sqlite3VdbeMemRealify(pRec); } @@ -256,7 +281,11 @@ ** SQLITE_AFF_NONE: ** No-op. pRec is unchanged. */ -static void applyAffinity(Mem *pRec, char affinity, u8 enc){ +static void applyAffinity( + Mem *pRec, /* The value to apply affinity to */ + char affinity, /* The affinity to be applied */ + u8 enc /* Use this text encoding */ +){ if( affinity==SQLITE_AFF_TEXT ){ /* Only attempt the conversion to TEXT if there is an integer or real ** representation (blob and NULL do not get converted) but no string @@ -295,7 +324,11 @@ ** Exported version of applyAffinity(). This one works on sqlite3_value*, ** not the internal Mem* type. */ -void sqlite3ValueApplyAffinity(sqlite3_value *pVal, u8 affinity, u8 enc){ +void sqlite3ValueApplyAffinity( + sqlite3_value *pVal, + u8 affinity, + u8 enc +){ applyAffinity((Mem *)pVal, affinity, enc); } @@ -327,12 +360,12 @@ } sqlite3_snprintf(100, zCsr, "%c", c); - zCsr += strlen(zCsr); + zCsr += sqlite3Strlen30(zCsr); sqlite3_snprintf(100, zCsr, "%d[", pMem->n); - zCsr += strlen(zCsr); + zCsr += sqlite3Strlen30(zCsr); for(i=0; i<16 && in; i++){ sqlite3_snprintf(100, zCsr, "%02X", ((int)pMem->z[i] & 0xFF)); - zCsr += strlen(zCsr); + zCsr += sqlite3Strlen30(zCsr); } for(i=0; i<16 && in; i++){ char z = pMem->z[i]; @@ -341,10 +374,10 @@ } sqlite3_snprintf(100, zCsr, "]%s", encnames[pMem->enc]); - zCsr += strlen(zCsr); + zCsr += sqlite3Strlen30(zCsr); if( f & MEM_Zero ){ - sqlite3_snprintf(100, zCsr,"+%lldz",pMem->u.i); - zCsr += strlen(zCsr); + sqlite3_snprintf(100, zCsr,"+%dz",pMem->u.nZero); + zCsr += sqlite3Strlen30(zCsr); } *zCsr = '\0'; }else if( f & MEM_Str ){ @@ -364,7 +397,7 @@ } k = 2; sqlite3_snprintf(100, &zBuf[k], "%d", pMem->n); - k += strlen(&zBuf[k]); + k += sqlite3Strlen30(&zBuf[k]); zBuf[k++] = '['; for(j=0; j<15 && jn; j++){ u8 c = pMem->z[j]; @@ -376,29 +409,60 @@ } zBuf[k++] = ']'; sqlite3_snprintf(100,&zBuf[k], encnames[pMem->enc]); - k += strlen(&zBuf[k]); + k += sqlite3Strlen30(&zBuf[k]); zBuf[k++] = 0; } } #endif - -#ifdef VDBE_PROFILE +#ifdef SQLITE_DEBUG /* -** The following routine only works on pentium-class processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -__inline__ unsigned long long int hwtime(void){ - unsigned long long int x; - __asm__("rdtsc\n\t" - "mov %%edx, %%ecx\n\t" - :"=A" (x)); - return x; +** Print the value of a register for tracing purposes: +*/ +static void memTracePrint(FILE *out, Mem *p){ + if( p->flags & MEM_Null ){ + fprintf(out, " NULL"); + }else if( (p->flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){ + fprintf(out, " si:%lld", p->u.i); + }else if( p->flags & MEM_Int ){ + fprintf(out, " i:%lld", p->u.i); +#ifndef SQLITE_OMIT_FLOATING_POINT + }else if( p->flags & MEM_Real ){ + fprintf(out, " r:%g", p->r); +#endif + }else if( p->flags & MEM_RowSet ){ + fprintf(out, " (rowset)"); + }else{ + char zBuf[200]; + sqlite3VdbeMemPrettyPrint(p, zBuf); + fprintf(out, " "); + fprintf(out, "%s", zBuf); + } +} +static void registerTrace(FILE *out, int iReg, Mem *p){ + fprintf(out, "REG[%d] = ", iReg); + memTracePrint(out, p); + fprintf(out, "\n"); } #endif +#ifdef SQLITE_DEBUG +# define REGISTER_TRACE(R,M) if(p->trace)registerTrace(p->trace,R,M) +#else +# define REGISTER_TRACE(R,M) +#endif + + +#ifdef VDBE_PROFILE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +#include "hwtime.h" + +#endif + /* ** The CHECK_FOR_INTERRUPT macro defined here looks to see if the ** sqlite3_interrupt() routine has been called. If it has been, then @@ -412,6 +476,42 @@ #define CHECK_FOR_INTERRUPT \ if( db->u1.isInterrupted ) goto abort_due_to_interrupt; +#ifdef SQLITE_DEBUG +static int fileExists(sqlite3 *db, const char *zFile){ + int res = 0; + int rc = SQLITE_OK; +#ifdef SQLITE_TEST + /* If we are currently testing IO errors, then do not call OsAccess() to + ** test for the presence of zFile. This is because any IO error that + ** occurs here will not be reported, causing the test to fail. + */ + extern int sqlite3_io_error_pending; + if( sqlite3_io_error_pending<=0 ) +#endif + rc = sqlite3OsAccess(db->pVfs, zFile, SQLITE_ACCESS_EXISTS, &res); + return (res && rc==SQLITE_OK); +} +#endif + +#ifndef NDEBUG +/* +** This function is only called from within an assert() expression. It +** checks that the sqlite3.nTransaction variable is correctly set to +** the number of non-transaction savepoints currently in the +** linked list starting at sqlite3.pSavepoint. +** +** Usage: +** +** assert( checkSavepointCount(db) ); +*/ +static int checkSavepointCount(sqlite3 *db){ + int n = 0; + Savepoint *p; + for(p=db->pSavepoint; p; p=p->pNext) n++; + assert( n==(db->nSavepoint + db->isTransactionSavepoint) ); + return 1; +} +#endif /* ** Execute as much of a VDBE program as we can then return. @@ -429,7 +529,7 @@ ** return SQLITE_BUSY. ** ** If an error occurs, an error message is written to memory obtained -** from sqliteMalloc() and p->zErrMsg is made to point to that memory. +** from sqlite3_malloc() and p->zErrMsg is made to point to that memory. ** The error code is stored in p->rc and this routine returns SQLITE_ERROR. ** ** If the callback ever returns non-zero, then the program exits @@ -452,21 +552,25 @@ int rc = SQLITE_OK; /* Value to return */ sqlite3 *db = p->db; /* The database */ u8 encoding = ENC(db); /* The database encoding */ - Mem *pTos; /* Top entry in the operand stack */ + Mem *pIn1 = 0; /* 1st input operand */ + Mem *pIn2 = 0; /* 2nd input operand */ + Mem *pIn3 = 0; /* 3rd input operand */ + Mem *pOut = 0; /* Output operand */ + u8 opProperty; + int iCompare = 0; /* Result of last OP_Compare operation */ + int *aPermute = 0; /* Permutation of columns for OP_Compare */ #ifdef VDBE_PROFILE - unsigned long long start; /* CPU clock count at start of opcode */ + u64 start; /* CPU clock count at start of opcode */ int origPc; /* Program counter at start of opcode */ #endif #ifndef SQLITE_OMIT_PROGRESS_CALLBACK int nProgressOps = 0; /* Opcodes executed since progress callback. */ #endif -#ifndef NDEBUG - Mem *pStackLimit; -#endif + /*** INSERT STACK UNION HERE ***/ - if( p->magic!=VDBE_MAGIC_RUN ) return SQLITE_MISUSE; + assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ assert( db->magic==SQLITE_MAGIC_BUSY ); - pTos = p->pTos; + sqlite3VdbeMutexArrayEnter(p); if( p->rc==SQLITE_NOMEM ){ /* This happens if a malloc() inside a call to sqlite3_column_text() or ** sqlite3_column_text16() failed. */ @@ -475,17 +579,14 @@ assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY ); p->rc = SQLITE_OK; assert( p->explain==0 ); - if( p->popStack ){ - popStack(&pTos, p->popStack); - p->popStack = 0; - } - p->resOnStack = 0; + p->pResultSet = 0; db->busyHandler.nBusy = 0; CHECK_FOR_INTERRUPT; sqlite3VdbeIOTraceSql(p); #ifdef SQLITE_DEBUG - if( (p->db->flags & SQLITE_VdbeListing)!=0 - || sqlite3OsFileExists("vdbe_explain") + sqlite3BeginBenignMalloc(); + if( p->pc==0 + && ((p->db->flags & SQLITE_VdbeListing) || fileExists(db, "vdbe_explain")) ){ int i; printf("VDBE Program Listing:\n"); @@ -494,17 +595,17 @@ sqlite3VdbePrintOp(stdout, i, &p->aOp[i]); } } - if( sqlite3OsFileExists("vdbe_trace") ){ + if( fileExists(db, "vdbe_trace") ){ p->trace = stdout; } + sqlite3EndBenignMalloc(); #endif for(pc=p->pc; rc==SQLITE_OK; pc++){ assert( pc>=0 && pcnOp ); - assert( pTos<=&p->aStack[pc] ); - if( sqlite3MallocFailed() ) goto no_mem; + if( db->mallocFailed ) goto no_mem; #ifdef VDBE_PROFILE origPc = pc; - start = hwtime(); + start = sqlite3Hwtime(); #endif pOp = &p->aOp[pc]; @@ -518,8 +619,12 @@ } sqlite3VdbePrintOp(p->trace, pc, pOp); } - if( p->trace==0 && pc==0 && sqlite3OsFileExists("vdbe_sqltrace") ){ - sqlite3VdbePrintSql(p); + if( p->trace==0 && pc==0 ){ + sqlite3BeginBenignMalloc(); + if( fileExists(db, "vdbe_sqltrace") ){ + sqlite3VdbePrintSql(p); + } + sqlite3EndBenignMalloc(); } #endif @@ -551,7 +656,7 @@ if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; if( prc!=0 ){ rc = SQLITE_INTERRUPT; - goto vdbe_halt; + goto vdbe_error_halt; } nProgressOps = 0; } @@ -559,22 +664,64 @@ } #endif -#ifndef NDEBUG - /* This is to check that the return value of static function - ** opcodeNoPush() (see vdbeaux.c) returns values that match the - ** implementation of the virtual machine in this file. If - ** opcodeNoPush() returns non-zero, then the stack is guarenteed - ** not to grow when the opcode is executed. If it returns zero, then - ** the stack may grow by at most 1. + /* Do common setup processing for any opcode that is marked + ** with the "out2-prerelease" tag. Such opcodes have a single + ** output which is specified by the P2 parameter. The P2 register + ** is initialized to a NULL. + */ + opProperty = opcodeProperty[pOp->opcode]; + if( (opProperty & OPFLG_OUT2_PRERELEASE)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=p->nMem ); + pOut = &p->aMem[pOp->p2]; + sqlite3VdbeMemReleaseExternal(pOut); + pOut->flags = MEM_Null; + pOut->n = 0; + }else + + /* Do common setup for opcodes marked with one of the following + ** combinations of properties. ** - ** The global wrapper function sqlite3VdbeOpcodeUsesStack() is not - ** available if NDEBUG is defined at build time. - */ - pStackLimit = pTos; - if( !sqlite3VdbeOpcodeNoPush(pOp->opcode) ){ - pStackLimit++; + ** in1 + ** in1 in2 + ** in1 in2 out3 + ** in1 in3 + ** + ** Variables pIn1, pIn2, and pIn3 are made to point to appropriate + ** registers for inputs. Variable pOut points to the output register. + */ + if( (opProperty & OPFLG_IN1)!=0 ){ + assert( pOp->p1>0 ); + assert( pOp->p1<=p->nMem ); + pIn1 = &p->aMem[pOp->p1]; + REGISTER_TRACE(pOp->p1, pIn1); + if( (opProperty & OPFLG_IN2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=p->nMem ); + pIn2 = &p->aMem[pOp->p2]; + REGISTER_TRACE(pOp->p2, pIn2); + if( (opProperty & OPFLG_OUT3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=p->nMem ); + pOut = &p->aMem[pOp->p3]; + } + }else if( (opProperty & OPFLG_IN3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=p->nMem ); + pIn3 = &p->aMem[pOp->p3]; + REGISTER_TRACE(pOp->p3, pIn3); + } + }else if( (opProperty & OPFLG_IN2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=p->nMem ); + pIn2 = &p->aMem[pOp->p2]; + REGISTER_TRACE(pOp->p2, pIn2); + }else if( (opProperty & OPFLG_IN3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=p->nMem ); + pIn3 = &p->aMem[pOp->p3]; + REGISTER_TRACE(pOp->p3, pIn3); } -#endif switch( pOp->opcode ){ @@ -596,10 +743,10 @@ ** case statement is followed by a comment of the form "/# same as ... #/" ** that comment is used to determine the particular value of the opcode. ** -** If a comment on the same line as the "case OP_" construction contains -** the word "no-push", then the opcode is guarenteed not to grow the -** vdbe stack when it is executed. See function opcode() in -** vdbeaux.c for details. +** Other keywords in the comment that follows each case are used to +** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[]. +** Keywords include: in1, in2, in3, out2_prerelease, out2, out3. See +** the mkopcodeh.awk script for additional information. ** ** Documentation about VDBE opcodes is generated by scanning this file ** for lines of that contain "Opcode:". That line and all subsequent @@ -613,52 +760,75 @@ ** *****************************************************************************/ -/* Opcode: Goto * P2 * +/* Opcode: Goto * P2 * * * ** ** An unconditional jump to address P2. ** The next instruction executed will be ** the one at index P2 from the beginning of ** the program. */ -case OP_Goto: { /* no-push */ +case OP_Goto: { /* jump */ CHECK_FOR_INTERRUPT; pc = pOp->p2 - 1; break; } -/* Opcode: Gosub * P2 * +/* Opcode: Gosub P1 P2 * * * ** -** Push the current address plus 1 onto the return address stack +** Write the current address onto register P1 ** and then jump to address P2. -** -** The return address stack is of limited depth. If too many -** OP_Gosub operations occur without intervening OP_Returns, then -** the return address stack will fill up and processing will abort -** with a fatal error. -*/ -case OP_Gosub: { /* no-push */ - assert( p->returnDepthreturnStack)/sizeof(p->returnStack[0]) ); - p->returnStack[p->returnDepth++] = pc+1; +*/ +case OP_Gosub: { /* jump */ + assert( pOp->p1>0 ); + assert( pOp->p1<=p->nMem ); + pIn1 = &p->aMem[pOp->p1]; + assert( (pIn1->flags & MEM_Dyn)==0 ); + pIn1->flags = MEM_Int; + pIn1->u.i = pc; + REGISTER_TRACE(pOp->p1, pIn1); pc = pOp->p2 - 1; break; } -/* Opcode: Return * * * +/* Opcode: Return P1 * * * * +** +** Jump to the next instruction after the address in register P1. +*/ +case OP_Return: { /* in1 */ + assert( pIn1->flags & MEM_Int ); + pc = (int)pIn1->u.i; + break; +} + +/* Opcode: Yield P1 * * * * ** -** Jump immediately to the next instruction after the last unreturned -** OP_Gosub. If an OP_Return has occurred for all OP_Gosubs, then -** processing aborts with a fatal error. +** Swap the program counter with the value in register P1. */ -case OP_Return: { /* no-push */ - assert( p->returnDepth>0 ); - p->returnDepth--; - pc = p->returnStack[p->returnDepth] - 1; +case OP_Yield: { /* in1 */ + int pcDest; + assert( (pIn1->flags & MEM_Dyn)==0 ); + pIn1->flags = MEM_Int; + pcDest = (int)pIn1->u.i; + pIn1->u.i = pc; + REGISTER_TRACE(pOp->p1, pIn1); + pc = pcDest; break; } -/* Opcode: Halt P1 P2 P3 +/* Opcode: HaltIfNull P1 P2 P3 P4 * +** +** Check the value in register P3. If is is NULL then Halt using +** parameter P1, P2, and P4 as if this were a Halt instruction. If the +** value in register P3 is not NULL, then this routine is a no-op. +*/ +case OP_HaltIfNull: { /* in3 */ + if( (pIn3->flags & MEM_Null)==0 ) break; + /* Fall through into OP_Halt */ +} + +/* Opcode: Halt P1 P2 * P4 * ** -** Exit immediately. All open cursors, Fifos, etc are closed +** Exit immediately. All open cursors, etc are closed ** automatically. ** ** P1 is the result code returned by sqlite3_exec(), sqlite3_reset(), @@ -669,560 +839,468 @@ ** then back out all changes that have occurred during this execution of the ** VDBE, but do not rollback the transaction. ** -** If P3 is not null then it is an error message string. +** If P4 is not null then it is an error message string. ** ** There is an implied "Halt 0 0 0" instruction inserted at the very end of ** every program. So a jump past the last instruction of the program ** is the same as executing Halt. */ -case OP_Halt: { /* no-push */ - p->pTos = pTos; +case OP_Halt: { p->rc = pOp->p1; p->pc = pc; - p->errorAction = pOp->p2; - if( pOp->p3 ){ - sqlite3SetString(&p->zErrMsg, pOp->p3, (char*)0); + p->errorAction = (u8)pOp->p2; + if( pOp->p4.z ){ + sqlite3SetString(&p->zErrMsg, db, "%s", pOp->p4.z); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK ); if( rc==SQLITE_BUSY ){ - p->rc = SQLITE_BUSY; - return SQLITE_BUSY; + p->rc = rc = SQLITE_BUSY; + }else{ + rc = p->rc ? SQLITE_ERROR : SQLITE_DONE; } - return p->rc ? SQLITE_ERROR : SQLITE_DONE; + goto vdbe_return; } -/* Opcode: Integer P1 * * +/* Opcode: Integer P1 P2 * * * ** -** The 32-bit integer value P1 is pushed onto the stack. +** The 32-bit integer value P1 is written into register P2. */ -case OP_Integer: { - pTos++; - pTos->flags = MEM_Int; - pTos->u.i = pOp->p1; +case OP_Integer: { /* out2-prerelease */ + pOut->flags = MEM_Int; + pOut->u.i = pOp->p1; break; } -/* Opcode: Int64 * * P3 +/* Opcode: Int64 * P2 * P4 * ** -** P3 is a string representation of an integer. Convert that integer -** to a 64-bit value and push it onto the stack. +** P4 is a pointer to a 64-bit integer value. +** Write that value into register P2. */ -case OP_Int64: { - pTos++; - assert( pOp->p3!=0 ); - pTos->flags = MEM_Str|MEM_Static|MEM_Term; - pTos->z = pOp->p3; - pTos->n = strlen(pTos->z); - pTos->enc = SQLITE_UTF8; - pTos->u.i = sqlite3VdbeIntValue(pTos); - pTos->flags |= MEM_Int; +case OP_Int64: { /* out2-prerelease */ + assert( pOp->p4.pI64!=0 ); + pOut->flags = MEM_Int; + pOut->u.i = *pOp->p4.pI64; break; } -/* Opcode: Real * * P3 +/* Opcode: Real * P2 * P4 * ** -** The string value P3 is converted to a real and pushed on to the stack. +** P4 is a pointer to a 64-bit floating point value. +** Write that value into register P2. */ -case OP_Real: { /* same as TK_FLOAT, */ - pTos++; - pTos->flags = MEM_Str|MEM_Static|MEM_Term; - pTos->z = pOp->p3; - pTos->n = strlen(pTos->z); - pTos->enc = SQLITE_UTF8; - pTos->r = sqlite3VdbeRealValue(pTos); - pTos->flags |= MEM_Real; - sqlite3VdbeChangeEncoding(pTos, encoding); +case OP_Real: { /* same as TK_FLOAT, out2-prerelease */ + pOut->flags = MEM_Real; + assert( !sqlite3IsNaN(*pOp->p4.pReal) ); + pOut->r = *pOp->p4.pReal; break; } -/* Opcode: String8 * * P3 +/* Opcode: String8 * P2 * P4 * ** -** P3 points to a nul terminated UTF-8 string. This opcode is transformed +** P4 points to a nul terminated UTF-8 string. This opcode is transformed ** into an OP_String before it is executed for the first time. */ -case OP_String8: { /* same as TK_STRING */ - assert( pOp->p3!=0 ); +case OP_String8: { /* same as TK_STRING, out2-prerelease */ + assert( pOp->p4.z!=0 ); pOp->opcode = OP_String; - pOp->p1 = strlen(pOp->p3); - assert( SQLITE_MAX_SQL_LENGTH < SQLITE_MAX_LENGTH ); - assert( pOp->p1 < SQLITE_MAX_LENGTH ); + pOp->p1 = sqlite3Strlen30(pOp->p4.z); #ifndef SQLITE_OMIT_UTF16 if( encoding!=SQLITE_UTF8 ){ - pTos++; - sqlite3VdbeMemSetStr(pTos, pOp->p3, -1, SQLITE_UTF8, SQLITE_STATIC); - if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pTos, encoding) ) goto no_mem; - if( SQLITE_OK!=sqlite3VdbeMemDynamicify(pTos) ) goto no_mem; - pTos->flags &= ~(MEM_Dyn); - pTos->flags |= MEM_Static; - if( pOp->p3type==P3_DYNAMIC ){ - sqliteFree(pOp->p3); - } - pOp->p3type = P3_DYNAMIC; - pOp->p3 = pTos->z; - pOp->p1 = pTos->n; - assert( pOp->p1 < SQLITE_MAX_LENGTH ); /* Due to SQLITE_MAX_SQL_LENGTH */ - break; + rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC); + if( rc==SQLITE_TOOBIG ) goto too_big; + if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem; + assert( pOut->zMalloc==pOut->z ); + assert( pOut->flags & MEM_Dyn ); + pOut->zMalloc = 0; + pOut->flags |= MEM_Static; + pOut->flags &= ~MEM_Dyn; + if( pOp->p4type==P4_DYNAMIC ){ + sqlite3DbFree(db, pOp->p4.z); + } + pOp->p4type = P4_DYNAMIC; + pOp->p4.z = pOut->z; + pOp->p1 = pOut->n; } #endif - /* Otherwise fall through to the next case, OP_String */ + if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + /* Fall through to the next case, OP_String */ } -/* Opcode: String P1 * P3 +/* Opcode: String P1 P2 * P4 * ** -** The string value P3 of length P1 (bytes) is pushed onto the stack. +** The string value P4 of length P1 (bytes) is stored in register P2. */ -case OP_String: { - assert( pOp->p1 < SQLITE_MAX_LENGTH ); /* Due to SQLITE_MAX_SQL_LENGTH */ - pTos++; - assert( pOp->p3!=0 ); - pTos->flags = MEM_Str|MEM_Static|MEM_Term; - pTos->z = pOp->p3; - pTos->n = pOp->p1; - pTos->enc = encoding; +case OP_String: { /* out2-prerelease */ + assert( pOp->p4.z!=0 ); + pOut->flags = MEM_Str|MEM_Static|MEM_Term; + pOut->z = pOp->p4.z; + pOut->n = pOp->p1; + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); break; } -/* Opcode: Null * * * +/* Opcode: Null * P2 * * * ** -** Push a NULL onto the stack. +** Write a NULL into register P2. */ -case OP_Null: { - pTos++; - pTos->flags = MEM_Null; - pTos->n = 0; +case OP_Null: { /* out2-prerelease */ break; } -#ifndef SQLITE_OMIT_BLOB_LITERAL -/* Opcode: HexBlob * * P3 -** -** P3 is an UTF-8 SQL hex encoding of a blob. The blob is pushed onto the -** vdbe stack. -** -** The first time this instruction executes, in transforms itself into a -** 'Blob' opcode with a binary blob as P3. -*/ -case OP_HexBlob: { /* same as TK_BLOB */ - pOp->opcode = OP_Blob; - pOp->p1 = strlen(pOp->p3)/2; - assert( SQLITE_MAX_SQL_LENGTH < SQLITE_MAX_LENGTH ); - assert( pOp->p1 < SQLITE_MAX_LENGTH ); - if( pOp->p1 ){ - char *zBlob = sqlite3HexToBlob(pOp->p3); - if( !zBlob ) goto no_mem; - if( pOp->p3type==P3_DYNAMIC ){ - sqliteFree(pOp->p3); - } - pOp->p3 = zBlob; - pOp->p3type = P3_DYNAMIC; - }else{ - if( pOp->p3type==P3_DYNAMIC ){ - sqliteFree(pOp->p3); - } - pOp->p3type = P3_STATIC; - pOp->p3 = ""; - } - - /* Fall through to the next case, OP_Blob. */ -} - -/* Opcode: Blob P1 * P3 +/* Opcode: Blob P1 P2 * P4 ** -** P3 points to a blob of data P1 bytes long. Push this -** value onto the stack. This instruction is not coded directly +** P4 points to a blob of data P1 bytes long. Store this +** blob in register P2. This instruction is not coded directly ** by the compiler. Instead, the compiler layer specifies ** an OP_HexBlob opcode, with the hex string representation of -** the blob as P3. This opcode is transformed to an OP_Blob +** the blob as P4. This opcode is transformed to an OP_Blob ** the first time it is executed. */ -case OP_Blob: { - pTos++; - assert( pOp->p1 < SQLITE_MAX_LENGTH ); /* Due to SQLITE_MAX_SQL_LENGTH */ - sqlite3VdbeMemSetStr(pTos, pOp->p3, pOp->p1, 0, 0); - pTos->enc = encoding; - break; -} -#endif /* SQLITE_OMIT_BLOB_LITERAL */ - -/* Opcode: Variable P1 * * -** -** Push the value of variable P1 onto the stack. A variable is -** an unknown in the original SQL string as handed to sqlite3_compile(). -** Any occurance of the '?' character in the original SQL is considered -** a variable. Variables in the SQL string are number from left to -** right beginning with 1. The values of variables are set using the -** sqlite3_bind() API. -*/ -case OP_Variable: { - int j = pOp->p1 - 1; - Mem *pVar; - assert( j>=0 && jnVar ); - - pVar = &p->aVar[j]; - if( sqlite3VdbeMemTooBig(pVar) ){ - goto too_big; - } - pTos++; - sqlite3VdbeMemShallowCopy(pTos, &p->aVar[j], MEM_Static); +case OP_Blob: { /* out2-prerelease */ + assert( pOp->p1 <= SQLITE_MAX_LENGTH ); + sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); break; } -/* Opcode: Pop P1 * * +/* Opcode: Variable P1 P2 P3 P4 * +** +** Transfer the values of bound parameters P1..P1+P3-1 into registers +** P2..P2+P3-1. ** -** P1 elements are popped off of the top of stack and discarded. +** If the parameter is named, then its name appears in P4 and P3==1. +** The P4 value is used by sqlite3_bind_parameter_name(). */ -case OP_Pop: { /* no-push */ - assert( pOp->p1>=0 ); - popStack(&pTos, pOp->p1); - assert( pTos>=&p->aStack[-1] ); +case OP_Variable: { + int p1; /* Variable to copy from */ + int p2; /* Register to copy to */ + int n; /* Number of values left to copy */ + Mem *pVar; /* Value being transferred */ + + p1 = pOp->p1 - 1; + p2 = pOp->p2; + n = pOp->p3; + assert( p1>=0 && p1+n<=p->nVar ); + assert( p2>=1 && p2+n-1<=p->nMem ); + assert( pOp->p4.z==0 || pOp->p3==1 ); + + while( n-- > 0 ){ + pVar = &p->aVar[p1++]; + if( sqlite3VdbeMemTooBig(pVar) ){ + goto too_big; + } + pOut = &p->aMem[p2++]; + sqlite3VdbeMemReleaseExternal(pOut); + pOut->flags = MEM_Null; + sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); + UPDATE_MAX_BLOBSIZE(pOut); + } break; } -/* Opcode: Dup P1 P2 * -** -** A copy of the P1-th element of the stack -** is made and pushed onto the top of the stack. -** The top of the stack is element 0. So the -** instruction "Dup 0 0 0" will make a copy of the -** top of the stack. +/* Opcode: Move P1 P2 P3 * * ** -** If the content of the P1-th element is a dynamically -** allocated string, then a new copy of that string -** is made if P2==0. If P2!=0, then just a pointer -** to the string is copied. -** -** Also see the Pull instruction. +** Move the values in register P1..P1+P3-1 over into +** registers P2..P2+P3-1. Registers P1..P1+P1-1 are +** left holding a NULL. It is an error for register ranges +** P1..P1+P3-1 and P2..P2+P3-1 to overlap. */ -case OP_Dup: { - Mem *pFrom = &pTos[-pOp->p1]; - assert( pFrom<=pTos && pFrom>=p->aStack ); - pTos++; - sqlite3VdbeMemShallowCopy(pTos, pFrom, MEM_Ephem); - if( pOp->p2 ){ - Deephemeralize(pTos); +case OP_Move: { + char *zMalloc; /* Holding variable for allocated memory */ + int n; /* Number of registers left to copy */ + int p1; /* Register to copy from */ + int p2; /* Register to copy to */ + + n = pOp->p3; + p1 = pOp->p1; + p2 = pOp->p2; + assert( n>0 && p1>0 && p2>0 ); + assert( p1+n<=p2 || p2+n<=p1 ); + + pIn1 = &p->aMem[p1]; + pOut = &p->aMem[p2]; + while( n-- ){ + assert( pOut<=&p->aMem[p->nMem] ); + assert( pIn1<=&p->aMem[p->nMem] ); + zMalloc = pOut->zMalloc; + pOut->zMalloc = 0; + sqlite3VdbeMemMove(pOut, pIn1); + pIn1->zMalloc = zMalloc; + REGISTER_TRACE(p2++, pOut); + pIn1++; + pOut++; } break; } -/* Opcode: Pull P1 * * +/* Opcode: Copy P1 P2 * * * ** -** The P1-th element is removed from its current location on -** the stack and pushed back on top of the stack. The -** top of the stack is element 0, so "Pull 0 0 0" is -** a no-op. "Pull 1 0 0" swaps the top two elements of -** the stack. +** Make a copy of register P1 into register P2. ** -** See also the Dup instruction. +** This instruction makes a deep copy of the value. A duplicate +** is made of any string or blob constant. See also OP_SCopy. */ -case OP_Pull: { /* no-push */ - Mem *pFrom = &pTos[-pOp->p1]; - int i; - Mem ts; - - ts = *pFrom; - Deephemeralize(pTos); - for(i=0; ip1; i++, pFrom++){ - Deephemeralize(&pFrom[1]); - assert( (pFrom[1].flags & MEM_Ephem)==0 ); - *pFrom = pFrom[1]; - if( pFrom->flags & MEM_Short ){ - assert( pFrom->flags & (MEM_Str|MEM_Blob) ); - assert( pFrom->z==pFrom[1].zShort ); - pFrom->z = pFrom->zShort; - } - } - *pTos = ts; - if( pTos->flags & MEM_Short ){ - assert( pTos->flags & (MEM_Str|MEM_Blob) ); - assert( pTos->z==pTos[-pOp->p1].zShort ); - pTos->z = pTos->zShort; - } +case OP_Copy: { /* in1 */ + assert( pOp->p2>0 ); + assert( pOp->p2<=p->nMem ); + pOut = &p->aMem[pOp->p2]; + assert( pOut!=pIn1 ); + sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); + Deephemeralize(pOut); + REGISTER_TRACE(pOp->p2, pOut); break; } -/* Opcode: Push P1 * * +/* Opcode: SCopy P1 P2 * * * ** -** Overwrite the value of the P1-th element down on the -** stack (P1==0 is the top of the stack) with the value -** of the top of the stack. Then pop the top of the stack. +** Make a shallow copy of register P1 into register P2. +** +** This instruction makes a shallow copy of the value. If the value +** is a string or blob, then the copy is only a pointer to the +** original and hence if the original changes so will the copy. +** Worse, if the original is deallocated, the copy becomes invalid. +** Thus the program must guarantee that the original will not change +** during the lifetime of the copy. Use OP_Copy to make a complete +** copy. */ -case OP_Push: { /* no-push */ - Mem *pTo = &pTos[-pOp->p1]; - - assert( pTo>=p->aStack ); - sqlite3VdbeMemMove(pTo, pTos); - pTos--; +case OP_SCopy: { /* in1 */ + REGISTER_TRACE(pOp->p1, pIn1); + assert( pOp->p2>0 ); + assert( pOp->p2<=p->nMem ); + pOut = &p->aMem[pOp->p2]; + assert( pOut!=pIn1 ); + sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); + REGISTER_TRACE(pOp->p2, pOut); break; } -/* Opcode: Callback P1 * * +/* Opcode: ResultRow P1 P2 * * * ** -** The top P1 values on the stack represent a single result row from -** a query. This opcode causes the sqlite3_step() call to terminate +** The registers P1 through P1+P2-1 contain a single row of +** results. This opcode causes the sqlite3_step() call to terminate ** with an SQLITE_ROW return code and it sets up the sqlite3_stmt ** structure to provide access to the top P1 values as the result -** row. When the sqlite3_step() function is run again, the top P1 -** values will be automatically popped from the stack before the next -** instruction executes. +** row. */ -case OP_Callback: { /* no-push */ +case OP_ResultRow: { Mem *pMem; - Mem *pFirstColumn; - assert( p->nResColumn==pOp->p1 ); - - /* Data in the pager might be moved or changed out from under us - ** in between the return from this sqlite3_step() call and the - ** next call to sqlite3_step(). So deephermeralize everything on - ** the stack. Note that ephemeral data is never stored in memory - ** cells so we do not have to worry about them. + int i; + assert( p->nResColumn==pOp->p2 ); + assert( pOp->p1>0 ); + assert( pOp->p1+pOp->p2<=p->nMem+1 ); + + /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then + ** DML statements invoke this opcode to return the number of rows + ** modified to the user. This is the only way that a VM that + ** opens a statement transaction may invoke this opcode. + ** + ** In case this is such a statement, close any statement transaction + ** opened by this VM before returning control to the user. This is to + ** ensure that statement-transactions are always nested, not overlapping. + ** If the open statement-transaction is not closed here, then the user + ** may step another VM that opens its own statement transaction. This + ** may lead to overlapping statement transactions. + ** + ** The statement transaction is never a top-level transaction. Hence + ** the RELEASE call below can never fail. */ - pFirstColumn = &pTos[0-pOp->p1]; - for(pMem = p->aStack; pMemiStatement==0 || db->flags&SQLITE_CountRows ); + rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); + if( NEVER(rc!=SQLITE_OK) ){ + break; } /* Invalidate all ephemeral cursor row caches */ p->cacheCtr = (p->cacheCtr + 2)|1; /* Make sure the results of the current row are \000 terminated - ** and have an assigned type. The results are deephemeralized as + ** and have an assigned type. The results are de-ephemeralized as ** as side effect. */ - for(; pMem<=pTos; pMem++ ){ - sqlite3VdbeMemNulTerminate(pMem); - storeTypeInfo(pMem, encoding); + pMem = p->pResultSet = &p->aMem[pOp->p1]; + for(i=0; ip2; i++){ + sqlite3VdbeMemNulTerminate(&pMem[i]); + storeTypeInfo(&pMem[i], encoding); + REGISTER_TRACE(pOp->p1+i, &pMem[i]); } + if( db->mallocFailed ) goto no_mem; - /* Set up the statement structure so that it will pop the current - ** results from the stack when the statement returns. + /* Return SQLITE_ROW */ - p->resOnStack = 1; - p->nCallback++; - p->popStack = pOp->p1; p->pc = pc + 1; - p->pTos = pTos; - return SQLITE_ROW; + rc = SQLITE_ROW; + goto vdbe_return; } -/* Opcode: Concat P1 P2 * +/* Opcode: Concat P1 P2 P3 * * +** +** Add the text in register P1 onto the end of the text in +** register P2 and store the result in register P3. +** If either the P1 or P2 text are NULL then store NULL in P3. ** -** Look at the first P1+2 elements of the stack. Append them all -** together with the lowest element first. The original P1+2 elements -** are popped from the stack if P2==0 and retained if P2==1. If -** any element of the stack is NULL, then the result is NULL. +** P3 = P2 || P1 ** -** When P1==1, this routine makes a copy of the top stack element -** into memory obtained from sqliteMalloc(). +** It is illegal for P1 and P3 to be the same register. Sometimes, +** if P3 is the same register as P2, the implementation is able +** to avoid a memcpy(). */ -case OP_Concat: { /* same as TK_CONCAT */ - char *zNew; +case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */ i64 nByte; - int nField; - int i, j; - Mem *pTerm; - /* Loop through the stack elements to see how long the result will be. */ - nField = pOp->p1 + 2; - pTerm = &pTos[1-nField]; - nByte = 0; - for(i=0; ip2==0 || (pTerm->flags&MEM_Str) ); - if( pTerm->flags&MEM_Null ){ - nByte = -1; - break; - } - ExpandBlob(pTerm); - Stringify(pTerm, encoding); - nByte += pTerm->n; + assert( pIn1!=pOut ); + if( (pIn1->flags | pIn2->flags) & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); + break; } - - if( nByte<0 ){ - /* If nByte is less than zero, then there is a NULL value on the stack. - ** In this case just pop the values off the stack (if required) and - ** push on a NULL. - */ - if( pOp->p2==0 ){ - popStack(&pTos, nField); - } - pTos++; - pTos->flags = MEM_Null; - }else{ - /* Otherwise malloc() space for the result and concatenate all the - ** stack values. - */ - if( nByte+2>SQLITE_MAX_LENGTH ){ - goto too_big; - } - zNew = sqliteMallocRaw( nByte+2 ); - if( zNew==0 ) goto no_mem; - j = 0; - pTerm = &pTos[1-nField]; - for(i=j=0; in; - assert( pTerm->flags & (MEM_Str|MEM_Blob) ); - memcpy(&zNew[j], pTerm->z, n); - j += n; - } - zNew[j] = 0; - zNew[j+1] = 0; - assert( j==nByte ); - - if( pOp->p2==0 ){ - popStack(&pTos, nField); - } - pTos++; - pTos->n = j; - pTos->flags = MEM_Str|MEM_Dyn|MEM_Term; - pTos->xDel = 0; - pTos->enc = encoding; - pTos->z = zNew; + if( ExpandBlob(pIn1) || ExpandBlob(pIn2) ) goto no_mem; + Stringify(pIn1, encoding); + Stringify(pIn2, encoding); + nByte = pIn1->n + pIn2->n; + if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + MemSetTypeFlag(pOut, MEM_Str); + if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){ + goto no_mem; } + if( pOut!=pIn2 ){ + memcpy(pOut->z, pIn2->z, pIn2->n); + } + memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n); + pOut->z[nByte] = 0; + pOut->z[nByte+1] = 0; + pOut->flags |= MEM_Term; + pOut->n = (int)nByte; + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); break; } -/* Opcode: Add * * * +/* Opcode: Add P1 P2 P3 * * ** -** Pop the top two elements from the stack, add them together, -** and push the result back onto the stack. If either element -** is a string then it is converted to a double using the atof() -** function before the addition. -** If either operand is NULL, the result is NULL. +** Add the value in register P1 to the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: Multiply * * * +/* Opcode: Multiply P1 P2 P3 * * ** -** Pop the top two elements from the stack, multiply them together, -** and push the result back onto the stack. If either element -** is a string then it is converted to a double using the atof() -** function before the multiplication. -** If either operand is NULL, the result is NULL. +** +** Multiply the value in register P1 by the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: Subtract * * * +/* Opcode: Subtract P1 P2 P3 * * ** -** Pop the top two elements from the stack, subtract the -** first (what was on top of the stack) from the second (the -** next on stack) -** and push the result back onto the stack. If either element -** is a string then it is converted to a double using the atof() -** function before the subtraction. -** If either operand is NULL, the result is NULL. +** Subtract the value in register P1 from the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: Divide * * * +/* Opcode: Divide P1 P2 P3 * * ** -** Pop the top two elements from the stack, divide the -** first (what was on top of the stack) from the second (the -** next on stack) -** and push the result back onto the stack. If either element -** is a string then it is converted to a double using the atof() -** function before the division. Division by zero returns NULL. -** If either operand is NULL, the result is NULL. +** Divide the value in register P1 by the value in register P2 +** and store the result in register P3. If the value in register P2 +** is zero, then the result is NULL. +** If either input is NULL, the result is NULL. */ -/* Opcode: Remainder * * * +/* Opcode: Remainder P1 P2 P3 * * ** -** Pop the top two elements from the stack, divide the -** first (what was on top of the stack) from the second (the -** next on stack) -** and push the remainder after division onto the stack. If either element -** is a string then it is converted to a double using the atof() -** function before the division. Division by zero returns NULL. +** Compute the remainder after integer division of the value in +** register P1 by the value in register P2 and store the result in P3. +** If the value in register P2 is zero the result is NULL. ** If either operand is NULL, the result is NULL. */ -case OP_Add: /* same as TK_PLUS, no-push */ -case OP_Subtract: /* same as TK_MINUS, no-push */ -case OP_Multiply: /* same as TK_STAR, no-push */ -case OP_Divide: /* same as TK_SLASH, no-push */ -case OP_Remainder: { /* same as TK_REM, no-push */ - Mem *pNos = &pTos[-1]; - int flags; - assert( pNos>=p->aStack ); - flags = pTos->flags | pNos->flags; - if( (flags & MEM_Null)!=0 ){ - Release(pTos); - pTos--; - Release(pTos); - pTos->flags = MEM_Null; - }else if( (pTos->flags & pNos->flags & MEM_Int)==MEM_Int ){ - i64 a, b; - a = pTos->u.i; - b = pNos->u.i; +case OP_Add: /* same as TK_PLUS, in1, in2, out3 */ +case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */ +case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */ +case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */ +case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ + int flags; /* Combined MEM_* flags from both inputs */ + i64 iA; /* Integer value of left operand */ + i64 iB; /* Integer value of right operand */ + double rA; /* Real value of left operand */ + double rB; /* Real value of right operand */ + + applyNumericAffinity(pIn1); + applyNumericAffinity(pIn2); + flags = pIn1->flags | pIn2->flags; + if( (flags & MEM_Null)!=0 ) goto arithmetic_result_is_null; + if( (pIn1->flags & pIn2->flags & MEM_Int)==MEM_Int ){ + iA = pIn1->u.i; + iB = pIn2->u.i; switch( pOp->opcode ){ - case OP_Add: b += a; break; - case OP_Subtract: b -= a; break; - case OP_Multiply: b *= a; break; + case OP_Add: iB += iA; break; + case OP_Subtract: iB -= iA; break; + case OP_Multiply: iB *= iA; break; case OP_Divide: { - if( a==0 ) goto divide_by_zero; + if( iA==0 ) goto arithmetic_result_is_null; /* Dividing the largest possible negative 64-bit integer (1<<63) by - ** -1 returns an integer to large to store in a 64-bit data-type. On + ** -1 returns an integer too large to store in a 64-bit data-type. On ** some architectures, the value overflows to (1<<63). On others, ** a SIGFPE is issued. The following statement normalizes this - ** behaviour so that all architectures behave as if integer - ** overflow occured. + ** behavior so that all architectures behave as if integer + ** overflow occurred. */ - if( a==-1 && b==(((i64)1)<<63) ) a = 1; - b /= a; + if( iA==-1 && iB==SMALLEST_INT64 ) iA = 1; + iB /= iA; break; } default: { - if( a==0 ) goto divide_by_zero; - if( a==-1 ) a = 1; - b %= a; + if( iA==0 ) goto arithmetic_result_is_null; + if( iA==-1 ) iA = 1; + iB %= iA; break; } } - Release(pTos); - pTos--; - Release(pTos); - pTos->u.i = b; - pTos->flags = MEM_Int; + pOut->u.i = iB; + MemSetTypeFlag(pOut, MEM_Int); }else{ - double a, b; - a = sqlite3VdbeRealValue(pTos); - b = sqlite3VdbeRealValue(pNos); + rA = sqlite3VdbeRealValue(pIn1); + rB = sqlite3VdbeRealValue(pIn2); switch( pOp->opcode ){ - case OP_Add: b += a; break; - case OP_Subtract: b -= a; break; - case OP_Multiply: b *= a; break; + case OP_Add: rB += rA; break; + case OP_Subtract: rB -= rA; break; + case OP_Multiply: rB *= rA; break; case OP_Divide: { - if( a==0.0 ) goto divide_by_zero; - b /= a; + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + if( rA==(double)0 ) goto arithmetic_result_is_null; + rB /= rA; break; } default: { - i64 ia = (i64)a; - i64 ib = (i64)b; - if( ia==0 ) goto divide_by_zero; - if( ia==-1 ) ia = 1; - b = ib % ia; + iA = (i64)rA; + iB = (i64)rB; + if( iA==0 ) goto arithmetic_result_is_null; + if( iA==-1 ) iA = 1; + rB = (double)(iB % iA); break; } } - if( sqlite3_isnan(b) ){ - goto divide_by_zero; + if( sqlite3IsNaN(rB) ){ + goto arithmetic_result_is_null; } - Release(pTos); - pTos--; - Release(pTos); - pTos->r = b; - pTos->flags = MEM_Real; + pOut->r = rB; + MemSetTypeFlag(pOut, MEM_Real); if( (flags & MEM_Real)==0 ){ - sqlite3VdbeIntegerAffinity(pTos); + sqlite3VdbeIntegerAffinity(pOut); } } break; -divide_by_zero: - Release(pTos); - pTos--; - Release(pTos); - pTos->flags = MEM_Null; +arithmetic_result_is_null: + sqlite3VdbeMemSetNull(pOut); break; } -/* Opcode: CollSeq * * P3 +/* Opcode: CollSeq * * P4 ** -** P3 is a pointer to a CollSeq struct. If the next call to a user function +** P4 is a pointer to a CollSeq struct. If the next call to a user function ** or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will ** be returned. This is used by the built-in min(), max() and nullif() ** functions. @@ -1231,16 +1309,17 @@ ** to retrieve the collation sequence set by this opcode is not available ** publicly, only to user functions defined in func.c. */ -case OP_CollSeq: { /* no-push */ - assert( pOp->p3type==P3_COLLSEQ ); +case OP_CollSeq: { + assert( pOp->p4type==P4_COLLSEQ ); break; } -/* Opcode: Function P1 P2 P3 +/* Opcode: Function P1 P2 P3 P4 P5 ** -** Invoke a user function (P3 is a pointer to a Function structure that -** defines the function) with P2 arguments taken from the stack. Pop all -** arguments from the stack and push back the result. +** Invoke a user function (P4 is a pointer to a Function structure that +** defines the function) with P5 arguments taken from register P2 and +** successors. The result of the function is stored in register P3. +** Register P3 must not be one of the function inputs. ** ** P1 is a 32-bit bitmask indicating whether or not each argument to the ** function was determined to be constant at compile time. If the first @@ -1256,40 +1335,58 @@ Mem *pArg; sqlite3_context ctx; sqlite3_value **apVal; - int n = pOp->p2; + int n; + n = pOp->p5; apVal = p->apArg; assert( apVal || n==0 ); - pArg = &pTos[1-n]; + assert( n==0 || (pOp->p2>0 && pOp->p2+n<=p->nMem+1) ); + assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); + pArg = &p->aMem[pOp->p2]; for(i=0; ip2, pArg); } - assert( pOp->p3type==P3_FUNCDEF || pOp->p3type==P3_VDBEFUNC ); - if( pOp->p3type==P3_FUNCDEF ){ - ctx.pFunc = (FuncDef*)pOp->p3; + assert( pOp->p4type==P4_FUNCDEF || pOp->p4type==P4_VDBEFUNC ); + if( pOp->p4type==P4_FUNCDEF ){ + ctx.pFunc = pOp->p4.pFunc; ctx.pVdbeFunc = 0; }else{ - ctx.pVdbeFunc = (VdbeFunc*)pOp->p3; + ctx.pVdbeFunc = (VdbeFunc*)pOp->p4.pVdbeFunc; ctx.pFunc = ctx.pVdbeFunc->pFunc; } + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + pOut = &p->aMem[pOp->p3]; ctx.s.flags = MEM_Null; - ctx.s.z = 0; + ctx.s.db = db; ctx.s.xDel = 0; + ctx.s.zMalloc = 0; + + /* The output cell may already have a buffer allocated. Move + ** the pointer to ctx.s so in case the user-function can use + ** the already allocated buffer instead of allocating a new one. + */ + sqlite3VdbeMemMove(&ctx.s, pOut); + MemSetTypeFlag(&ctx.s, MEM_Null); + ctx.isError = 0; - if( ctx.pFunc->needCollSeq ){ + if( ctx.pFunc->flags & SQLITE_FUNC_NEEDCOLL ){ assert( pOp>p->aOp ); - assert( pOp[-1].p3type==P3_COLLSEQ ); + assert( pOp[-1].p4type==P4_COLLSEQ ); assert( pOp[-1].opcode==OP_CollSeq ); - ctx.pColl = (CollSeq *)pOp[-1].p3; + ctx.pColl = pOp[-1].p4.pColl; } if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; (*ctx.pFunc->xFunc)(&ctx, n, apVal); - if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; - if( sqlite3MallocFailed() ){ + if( sqlite3SafetyOn(db) ){ + sqlite3VdbeMemRelease(&ctx.s); + goto abort_due_to_misuse; + } + if( db->mallocFailed ){ /* Even though a malloc() has failed, the implementation of the ** user function may have called an sqlite3_result_XXX() function ** to return a value. The following call releases any resources @@ -1302,234 +1399,182 @@ sqlite3VdbeMemRelease(&ctx.s); goto no_mem; } - popStack(&pTos, n); - /* If any auxilary data functions have been called by this user function, + /* If any auxiliary data functions have been called by this user function, ** immediately call the destructor for any non-static values. */ if( ctx.pVdbeFunc ){ sqlite3VdbeDeleteAuxData(ctx.pVdbeFunc, pOp->p1); - pOp->p3 = (char *)ctx.pVdbeFunc; - pOp->p3type = P3_VDBEFUNC; + pOp->p4.pVdbeFunc = ctx.pVdbeFunc; + pOp->p4type = P4_VDBEFUNC; } /* If the function returned an error, throw an exception */ if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, sqlite3_value_text(&ctx.s), (char*)0); - rc = SQLITE_ERROR; + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); + rc = ctx.isError; } - /* Copy the result of the function to the top of the stack */ + /* Copy the result of the function into register P3 */ sqlite3VdbeChangeEncoding(&ctx.s, encoding); - pTos++; - pTos->flags = 0; - sqlite3VdbeMemMove(pTos, &ctx.s); - if( sqlite3VdbeMemTooBig(pTos) ){ + sqlite3VdbeMemMove(pOut, &ctx.s); + if( sqlite3VdbeMemTooBig(pOut) ){ goto too_big; } + REGISTER_TRACE(pOp->p3, pOut); + UPDATE_MAX_BLOBSIZE(pOut); break; } -/* Opcode: BitAnd * * * +/* Opcode: BitAnd P1 P2 P3 * * ** -** Pop the top two elements from the stack. Convert both elements -** to integers. Push back onto the stack the bit-wise AND of the -** two elements. -** If either operand is NULL, the result is NULL. +** Take the bit-wise AND of the values in register P1 and P2 and +** store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: BitOr * * * +/* Opcode: BitOr P1 P2 P3 * * ** -** Pop the top two elements from the stack. Convert both elements -** to integers. Push back onto the stack the bit-wise OR of the -** two elements. -** If either operand is NULL, the result is NULL. +** Take the bit-wise OR of the values in register P1 and P2 and +** store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: ShiftLeft * * * +/* Opcode: ShiftLeft P1 P2 P3 * * ** -** Pop the top two elements from the stack. Convert both elements -** to integers. Push back onto the stack the second element shifted -** left by N bits where N is the top element on the stack. -** If either operand is NULL, the result is NULL. +** Shift the integer value in register P2 to the left by the +** number of bits specified by the integer in regiser P1. +** Store the result in register P3. +** If either input is NULL, the result is NULL. */ -/* Opcode: ShiftRight * * * +/* Opcode: ShiftRight P1 P2 P3 * * ** -** Pop the top two elements from the stack. Convert both elements -** to integers. Push back onto the stack the second element shifted -** right by N bits where N is the top element on the stack. -** If either operand is NULL, the result is NULL. +** Shift the integer value in register P2 to the right by the +** number of bits specified by the integer in register P1. +** Store the result in register P3. +** If either input is NULL, the result is NULL. */ -case OP_BitAnd: /* same as TK_BITAND, no-push */ -case OP_BitOr: /* same as TK_BITOR, no-push */ -case OP_ShiftLeft: /* same as TK_LSHIFT, no-push */ -case OP_ShiftRight: { /* same as TK_RSHIFT, no-push */ - Mem *pNos = &pTos[-1]; - i64 a, b; - - assert( pNos>=p->aStack ); - if( (pTos->flags | pNos->flags) & MEM_Null ){ - popStack(&pTos, 2); - pTos++; - pTos->flags = MEM_Null; +case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */ +case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */ +case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */ +case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */ + i64 a; + i64 b; + + if( (pIn1->flags | pIn2->flags) & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); break; } - a = sqlite3VdbeIntValue(pNos); - b = sqlite3VdbeIntValue(pTos); + a = sqlite3VdbeIntValue(pIn2); + b = sqlite3VdbeIntValue(pIn1); switch( pOp->opcode ){ case OP_BitAnd: a &= b; break; case OP_BitOr: a |= b; break; case OP_ShiftLeft: a <<= b; break; - case OP_ShiftRight: a >>= b; break; - default: /* CANT HAPPEN */ break; + default: assert( pOp->opcode==OP_ShiftRight ); + a >>= b; break; } - Release(pTos); - pTos--; - Release(pTos); - pTos->u.i = a; - pTos->flags = MEM_Int; + pOut->u.i = a; + MemSetTypeFlag(pOut, MEM_Int); break; } -/* Opcode: AddImm P1 * * +/* Opcode: AddImm P1 P2 * * * ** -** Add the value P1 to whatever is on top of the stack. The result -** is always an integer. +** Add the constant P2 to the value in register P1. +** The result is always an integer. ** -** To force the top of the stack to be an integer, just add 0. +** To force any register to be an integer, just add 0. */ -case OP_AddImm: { /* no-push */ - assert( pTos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); - pTos->u.i += pOp->p1; - break; -} - -/* Opcode: ForceInt P1 P2 * -** -** Convert the top of the stack into an integer. If the current top of -** the stack is not numeric (meaning that is is a NULL or a string that -** does not look like an integer or floating point number) then pop the -** stack and jump to P2. If the top of the stack is numeric then -** convert it into the least integer that is greater than or equal to its -** current value if P1==0, or to the least integer that is strictly -** greater than its current value if P1==1. -*/ -case OP_ForceInt: { /* no-push */ - i64 v; - assert( pTos>=p->aStack ); - applyAffinity(pTos, SQLITE_AFF_NUMERIC, encoding); - if( (pTos->flags & (MEM_Int|MEM_Real))==0 ){ - Release(pTos); - pTos--; - pc = pOp->p2 - 1; - break; - } - if( pTos->flags & MEM_Int ){ - v = pTos->u.i + (pOp->p1!=0); - }else{ - /* FIX ME: should this not be assert( pTos->flags & MEM_Real ) ??? */ - sqlite3VdbeMemRealify(pTos); - v = (int)pTos->r; - if( pTos->r>(double)v ) v++; - if( pOp->p1 && pTos->r==(double)v ) v++; - } - Release(pTos); - pTos->u.i = v; - pTos->flags = MEM_Int; +case OP_AddImm: { /* in1 */ + sqlite3VdbeMemIntegerify(pIn1); + pIn1->u.i += pOp->p2; break; } -/* Opcode: MustBeInt P1 P2 * +/* Opcode: MustBeInt P1 P2 * * * ** -** Force the top of the stack to be an integer. If the top of the -** stack is not an integer and cannot be converted into an integer -** with out data loss, then jump immediately to P2, or if P2==0 +** Force the value in register P1 to be an integer. If the value +** in P1 is not an integer and cannot be converted into an integer +** without data loss, then jump immediately to P2, or if P2==0 ** raise an SQLITE_MISMATCH exception. -** -** If the top of the stack is not an integer and P2 is not zero and -** P1 is 1, then the stack is popped. In all other cases, the depth -** of the stack is unchanged. -*/ -case OP_MustBeInt: { /* no-push */ - assert( pTos>=p->aStack ); - applyAffinity(pTos, SQLITE_AFF_NUMERIC, encoding); - if( (pTos->flags & MEM_Int)==0 ){ +*/ +case OP_MustBeInt: { /* jump, in1 */ + applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); + if( (pIn1->flags & MEM_Int)==0 ){ if( pOp->p2==0 ){ rc = SQLITE_MISMATCH; goto abort_due_to_error; }else{ - if( pOp->p1 ) popStack(&pTos, 1); pc = pOp->p2 - 1; } }else{ - Release(pTos); - pTos->flags = MEM_Int; + MemSetTypeFlag(pIn1, MEM_Int); } break; } -/* Opcode: RealAffinity * * * +/* Opcode: RealAffinity P1 * * * * ** -** If the top of the stack is an integer, convert it to a real value. +** If register P1 holds an integer convert it to a real value. ** ** This opcode is used when extracting information from a column that ** has REAL affinity. Such column values may still be stored as ** integers, for space efficiency, but after extraction we want them ** to have only a real value. */ -case OP_RealAffinity: { /* no-push */ - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Int ){ - sqlite3VdbeMemRealify(pTos); +case OP_RealAffinity: { /* in1 */ + if( pIn1->flags & MEM_Int ){ + sqlite3VdbeMemRealify(pIn1); } break; } #ifndef SQLITE_OMIT_CAST -/* Opcode: ToText * * * +/* Opcode: ToText P1 * * * * ** -** Force the value on the top of the stack to be text. +** Force the value in register P1 to be text. ** If the value is numeric, convert it to a string using the ** equivalent of printf(). Blob values are unchanged and ** are afterwards simply interpreted as text. ** ** A NULL value is not changed by this routine. It remains NULL. */ -case OP_ToText: { /* same as TK_TO_TEXT, no-push */ - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Null ) break; +case OP_ToText: { /* same as TK_TO_TEXT, in1 */ + if( pIn1->flags & MEM_Null ) break; assert( MEM_Str==(MEM_Blob>>3) ); - pTos->flags |= (pTos->flags&MEM_Blob)>>3; - applyAffinity(pTos, SQLITE_AFF_TEXT, encoding); - rc = ExpandBlob(pTos); - assert( pTos->flags & MEM_Str ); - pTos->flags &= ~(MEM_Int|MEM_Real|MEM_Blob); + pIn1->flags |= (pIn1->flags&MEM_Blob)>>3; + applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding); + rc = ExpandBlob(pIn1); + assert( pIn1->flags & MEM_Str || db->mallocFailed ); + pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero); + UPDATE_MAX_BLOBSIZE(pIn1); break; } -/* Opcode: ToBlob * * * +/* Opcode: ToBlob P1 * * * * ** -** Force the value on the top of the stack to be a BLOB. +** Force the value in register P1 to be a BLOB. ** If the value is numeric, convert it to a string first. ** Strings are simply reinterpreted as blobs with no change ** to the underlying data. ** ** A NULL value is not changed by this routine. It remains NULL. */ -case OP_ToBlob: { /* same as TK_TO_BLOB, no-push */ - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Null ) break; - if( (pTos->flags & MEM_Blob)==0 ){ - applyAffinity(pTos, SQLITE_AFF_TEXT, encoding); - assert( pTos->flags & MEM_Str ); - pTos->flags |= MEM_Blob; +case OP_ToBlob: { /* same as TK_TO_BLOB, in1 */ + if( pIn1->flags & MEM_Null ) break; + if( (pIn1->flags & MEM_Blob)==0 ){ + applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding); + assert( pIn1->flags & MEM_Str || db->mallocFailed ); + MemSetTypeFlag(pIn1, MEM_Blob); + }else{ + pIn1->flags &= ~(MEM_TypeMask&~MEM_Blob); } - pTos->flags &= ~(MEM_Int|MEM_Real|MEM_Str); + UPDATE_MAX_BLOBSIZE(pIn1); break; } -/* Opcode: ToNumeric * * * +/* Opcode: ToNumeric P1 * * * * ** -** Force the value on the top of the stack to be numeric (either an +** Force the value in register P1 to be numeric (either an ** integer or a floating-point number.) ** If the value is text or blob, try to convert it to an using the ** equivalent of atoi() or atof() and store 0 if no such conversion @@ -1537,177 +1582,145 @@ ** ** A NULL value is not changed by this routine. It remains NULL. */ -case OP_ToNumeric: { /* same as TK_TO_NUMERIC, no-push */ - assert( pTos>=p->aStack ); - if( (pTos->flags & (MEM_Null|MEM_Int|MEM_Real))==0 ){ - sqlite3VdbeMemNumerify(pTos); +case OP_ToNumeric: { /* same as TK_TO_NUMERIC, in1 */ + if( (pIn1->flags & (MEM_Null|MEM_Int|MEM_Real))==0 ){ + sqlite3VdbeMemNumerify(pIn1); } break; } #endif /* SQLITE_OMIT_CAST */ -/* Opcode: ToInt * * * +/* Opcode: ToInt P1 * * * * ** -** Force the value on the top of the stack to be an integer. If +** Force the value in register P1 be an integer. If ** The value is currently a real number, drop its fractional part. ** If the value is text or blob, try to convert it to an integer using the ** equivalent of atoi() and store 0 if no such conversion is possible. ** ** A NULL value is not changed by this routine. It remains NULL. */ -case OP_ToInt: { /* same as TK_TO_INT, no-push */ - assert( pTos>=p->aStack ); - if( (pTos->flags & MEM_Null)==0 ){ - sqlite3VdbeMemIntegerify(pTos); +case OP_ToInt: { /* same as TK_TO_INT, in1 */ + if( (pIn1->flags & MEM_Null)==0 ){ + sqlite3VdbeMemIntegerify(pIn1); } break; } #ifndef SQLITE_OMIT_CAST -/* Opcode: ToReal * * * +/* Opcode: ToReal P1 * * * * ** -** Force the value on the top of the stack to be a floating point number. +** Force the value in register P1 to be a floating point number. ** If The value is currently an integer, convert it. ** If the value is text or blob, try to convert it to an integer using the -** equivalent of atoi() and store 0 if no such conversion is possible. +** equivalent of atoi() and store 0.0 if no such conversion is possible. ** ** A NULL value is not changed by this routine. It remains NULL. */ -case OP_ToReal: { /* same as TK_TO_REAL, no-push */ - assert( pTos>=p->aStack ); - if( (pTos->flags & MEM_Null)==0 ){ - sqlite3VdbeMemRealify(pTos); +case OP_ToReal: { /* same as TK_TO_REAL, in1 */ + if( (pIn1->flags & MEM_Null)==0 ){ + sqlite3VdbeMemRealify(pIn1); } break; } #endif /* SQLITE_OMIT_CAST */ -/* Opcode: Eq P1 P2 P3 +/* Opcode: Lt P1 P2 P3 P4 P5 ** -** Pop the top two elements from the stack. If they are equal, then -** jump to instruction P2. Otherwise, continue to the next instruction. +** Compare the values in register P1 and P3. If reg(P3)flags|pNos->flags; + flags = pIn1->flags|pIn3->flags; - /* If either value is a NULL P2 is not zero, take the jump if the least - ** significant byte of P1 is true. If P2 is zero, then push a NULL onto - ** the stack. - */ if( flags&MEM_Null ){ - if( (pOp->p1 & 0x200)!=0 ){ - /* The 0x200 bit of P1 means, roughly "do not treat NULL as the - ** magic SQL value it normally is - treat it as if it were another - ** integer". - ** - ** With 0x200 set, if either operand is NULL then both operands - ** are converted to integers prior to being passed down into the - ** normal comparison logic below. NULL operands are converted to - ** zero and non-NULL operands are converted to 1. Thus, for example, - ** with 0x200 set, NULL==NULL is true whereas it would normally - ** be NULL. Similarly, NULL!=123 is true. - */ - sqlite3VdbeMemSetInt64(pTos, (pTos->flags & MEM_Null)==0); - sqlite3VdbeMemSetInt64(pNos, (pNos->flags & MEM_Null)==0); - }else{ - /* If the 0x200 bit of P1 is clear and either operand is NULL then - ** the result is always NULL. The jump is taken if the 0x100 bit - ** of P1 is set. - */ - popStack(&pTos, 2); - if( pOp->p2 ){ - if( pOp->p1 & 0x100 ){ - pc = pOp->p2-1; - } - }else{ - pTos++; - pTos->flags = MEM_Null; - } - break; + /* If either operand is NULL then the result is always NULL. + ** The jump is taken if the SQLITE_JUMPIFNULL bit is set. + */ + if( pOp->p5 & SQLITE_STOREP2 ){ + pOut = &p->aMem[pOp->p2]; + MemSetTypeFlag(pOut, MEM_Null); + REGISTER_TRACE(pOp->p2, pOut); + }else if( pOp->p5 & SQLITE_JUMPIFNULL ){ + pc = pOp->p2-1; } + break; } - affinity = pOp->p1 & 0xFF; + affinity = pOp->p5 & SQLITE_AFF_MASK; if( affinity ){ - applyAffinity(pNos, affinity, encoding); - applyAffinity(pTos, affinity, encoding); + applyAffinity(pIn1, affinity, encoding); + applyAffinity(pIn3, affinity, encoding); + if( db->mallocFailed ) goto no_mem; } - assert( pOp->p3type==P3_COLLSEQ || pOp->p3==0 ); - ExpandBlob(pNos); - ExpandBlob(pTos); - res = sqlite3MemCompare(pNos, pTos, (CollSeq*)pOp->p3); + assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 ); + ExpandBlob(pIn1); + ExpandBlob(pIn3); + res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl); switch( pOp->opcode ){ case OP_Eq: res = res==0; break; case OP_Ne: res = res!=0; break; @@ -1717,284 +1730,302 @@ default: res = res>=0; break; } - popStack(&pTos, 2); - if( pOp->p2 ){ - if( res ){ - pc = pOp->p2-1; + if( pOp->p5 & SQLITE_STOREP2 ){ + pOut = &p->aMem[pOp->p2]; + MemSetTypeFlag(pOut, MEM_Int); + pOut->u.i = res; + REGISTER_TRACE(pOp->p2, pOut); + }else if( res ){ + pc = pOp->p2-1; + } + break; +} + +/* Opcode: Permutation * * * P4 * +** +** Set the permutation used by the OP_Compare operator to be the array +** of integers in P4. +** +** The permutation is only valid until the next OP_Permutation, OP_Compare, +** OP_Halt, or OP_ResultRow. Typically the OP_Permutation should occur +** immediately prior to the OP_Compare. +*/ +case OP_Permutation: { + assert( pOp->p4type==P4_INTARRAY ); + assert( pOp->p4.ai ); + aPermute = pOp->p4.ai; + break; +} + +/* Opcode: Compare P1 P2 P3 P4 * +** +** Compare to vectors of registers in reg(P1)..reg(P1+P3-1) (all this +** one "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of +** the comparison for use by the next OP_Jump instruct. +** +** P4 is a KeyInfo structure that defines collating sequences and sort +** orders for the comparison. The permutation applies to registers +** only. The KeyInfo elements are used sequentially. +** +** The comparison is a sort comparison, so NULLs compare equal, +** NULLs are less than numbers, numbers are less than strings, +** and strings are less than blobs. +*/ +case OP_Compare: { + int n; + int i; + int p1; + int p2; + const KeyInfo *pKeyInfo; + int idx; + CollSeq *pColl; /* Collating sequence to use on this term */ + int bRev; /* True for DESCENDING sort order */ + + n = pOp->p3; + pKeyInfo = pOp->p4.pKeyInfo; + assert( n>0 ); + assert( pKeyInfo!=0 ); + p1 = pOp->p1; + assert( p1>0 && p1+n<=p->nMem+1 ); + p2 = pOp->p2; + assert( p2>0 && p2+n<=p->nMem+1 ); + for(i=0; iaMem[p1+idx]); + REGISTER_TRACE(p2+idx, &p->aMem[p2+idx]); + assert( inField ); + pColl = pKeyInfo->aColl[i]; + bRev = pKeyInfo->aSortOrder[i]; + iCompare = sqlite3MemCompare(&p->aMem[p1+idx], &p->aMem[p2+idx], pColl); + if( iCompare ){ + if( bRev ) iCompare = -iCompare; + break; } + } + aPermute = 0; + break; +} + +/* Opcode: Jump P1 P2 P3 * * +** +** Jump to the instruction at address P1, P2, or P3 depending on whether +** in the most recent OP_Compare instruction the P1 vector was less than +** equal to, or greater than the P2 vector, respectively. +*/ +case OP_Jump: { /* jump */ + if( iCompare<0 ){ + pc = pOp->p1 - 1; + }else if( iCompare==0 ){ + pc = pOp->p2 - 1; }else{ - pTos++; - pTos->flags = MEM_Int; - pTos->u.i = res; + pc = pOp->p3 - 1; } break; } -/* Opcode: And * * * +/* Opcode: And P1 P2 P3 * * ** -** Pop two values off the stack. Take the logical AND of the -** two values and push the resulting boolean value back onto the -** stack. +** Take the logical AND of the values in registers P1 and P2 and +** write the result into register P3. +** +** If either P1 or P2 is 0 (false) then the result is 0 even if +** the other input is NULL. A NULL and true or two NULLs give +** a NULL output. */ -/* Opcode: Or * * * +/* Opcode: Or P1 P2 P3 * * +** +** Take the logical OR of the values in register P1 and P2 and +** store the answer in register P3. ** -** Pop two values off the stack. Take the logical OR of the -** two values and push the resulting boolean value back onto the -** stack. +** If either P1 or P2 is nonzero (true) then the result is 1 (true) +** even if the other input is NULL. A NULL and false or two NULLs +** give a NULL output. */ -case OP_And: /* same as TK_AND, no-push */ -case OP_Or: { /* same as TK_OR, no-push */ - Mem *pNos = &pTos[-1]; - int v1, v2; /* 0==TRUE, 1==FALSE, 2==UNKNOWN or NULL */ +case OP_And: /* same as TK_AND, in1, in2, out3 */ +case OP_Or: { /* same as TK_OR, in1, in2, out3 */ + int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ + int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ - assert( pNos>=p->aStack ); - if( pTos->flags & MEM_Null ){ + if( pIn1->flags & MEM_Null ){ v1 = 2; }else{ - sqlite3VdbeMemIntegerify(pTos); - v1 = pTos->u.i==0; + v1 = sqlite3VdbeIntValue(pIn1)!=0; } - if( pNos->flags & MEM_Null ){ + if( pIn2->flags & MEM_Null ){ v2 = 2; }else{ - sqlite3VdbeMemIntegerify(pNos); - v2 = pNos->u.i==0; + v2 = sqlite3VdbeIntValue(pIn2)!=0; } if( pOp->opcode==OP_And ){ - static const unsigned char and_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 }; + static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; v1 = and_logic[v1*3+v2]; }else{ - static const unsigned char or_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; + static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 }; v1 = or_logic[v1*3+v2]; } - popStack(&pTos, 2); - pTos++; if( v1==2 ){ - pTos->flags = MEM_Null; + MemSetTypeFlag(pOut, MEM_Null); }else{ - pTos->u.i = v1==0; - pTos->flags = MEM_Int; + pOut->u.i = v1; + MemSetTypeFlag(pOut, MEM_Int); } break; } -/* Opcode: Negative * * * -** -** Treat the top of the stack as a numeric quantity. Replace it -** with its additive inverse. If the top of the stack is NULL -** its value is unchanged. -*/ -/* Opcode: AbsValue * * * +/* Opcode: Not P1 P2 * * * ** -** Treat the top of the stack as a numeric quantity. Replace it -** with its absolute value. If the top of the stack is NULL -** its value is unchanged. +** Interpret the value in register P1 as a boolean value. Store the +** boolean complement in register P2. If the value in register P1 is +** NULL, then a NULL is stored in P2. */ -case OP_Negative: /* same as TK_UMINUS, no-push */ -case OP_AbsValue: { - assert( pTos>=p->aStack ); - if( (pTos->flags & (MEM_Real|MEM_Int|MEM_Null))==0 ){ - sqlite3VdbeMemNumerify(pTos); - } - if( pTos->flags & MEM_Real ){ - Release(pTos); - if( pOp->opcode==OP_Negative || pTos->r<0.0 ){ - pTos->r = -pTos->r; - } - pTos->flags = MEM_Real; - }else if( pTos->flags & MEM_Int ){ - Release(pTos); - if( pOp->opcode==OP_Negative || pTos->u.i<0 ){ - pTos->u.i = -pTos->u.i; - } - pTos->flags = MEM_Int; +case OP_Not: { /* same as TK_NOT, in1 */ + pOut = &p->aMem[pOp->p2]; + if( pIn1->flags & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); + }else{ + sqlite3VdbeMemSetInt64(pOut, !sqlite3VdbeIntValue(pIn1)); } break; } -/* Opcode: Not * * * -** -** Interpret the top of the stack as a boolean value. Replace it -** with its complement. If the top of the stack is NULL its value -** is unchanged. -*/ -case OP_Not: { /* same as TK_NOT, no-push */ - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Null ) break; /* Do nothing to NULLs */ - sqlite3VdbeMemIntegerify(pTos); - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos->u.i = !pTos->u.i; - pTos->flags = MEM_Int; - break; -} - -/* Opcode: BitNot * * * -** -** Interpret the top of the stack as an value. Replace it -** with its ones-complement. If the top of the stack is NULL its -** value is unchanged. -*/ -case OP_BitNot: { /* same as TK_BITNOT, no-push */ - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Null ) break; /* Do nothing to NULLs */ - sqlite3VdbeMemIntegerify(pTos); - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos->u.i = ~pTos->u.i; - pTos->flags = MEM_Int; - break; -} - -/* Opcode: Noop * * * +/* Opcode: BitNot P1 P2 * * * ** -** Do nothing. This instruction is often useful as a jump -** destination. +** Interpret the content of register P1 as an integer. Store the +** ones-complement of the P1 value into register P2. If P1 holds +** a NULL then store a NULL in P2. */ -/* -** The magic Explain opcode are only inserted when explain==2 (which -** is to say when the EXPLAIN QUERY PLAN syntax is used.) -** This opcode records information from the optimizer. It is the -** the same as a no-op. This opcodesnever appears in a real VM program. -*/ -case OP_Explain: -case OP_Noop: { /* no-push */ +case OP_BitNot: { /* same as TK_BITNOT, in1 */ + pOut = &p->aMem[pOp->p2]; + if( pIn1->flags & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); + }else{ + sqlite3VdbeMemSetInt64(pOut, ~sqlite3VdbeIntValue(pIn1)); + } break; } -/* Opcode: If P1 P2 * +/* Opcode: If P1 P2 P3 * * ** -** Pop a single boolean from the stack. If the boolean popped is -** true, then jump to p2. Otherwise continue to the next instruction. -** An integer is false if zero and true otherwise. A string is -** false if it has zero length and true otherwise. -** -** If the value popped of the stack is NULL, then take the jump if P1 -** is true and fall through if P1 is false. +** Jump to P2 if the value in register P1 is true. The value is +** is considered true if it is numeric and non-zero. If the value +** in P1 is NULL then take the jump if P3 is true. */ -/* Opcode: IfNot P1 P2 * -** -** Pop a single boolean from the stack. If the boolean popped is -** false, then jump to p2. Otherwise continue to the next instruction. -** An integer is false if zero and true otherwise. A string is -** false if it has zero length and true otherwise. +/* Opcode: IfNot P1 P2 P3 * * ** -** If the value popped of the stack is NULL, then take the jump if P1 -** is true and fall through if P1 is false. +** Jump to P2 if the value in register P1 is False. The value is +** is considered true if it has a numeric value of zero. If the value +** in P1 is NULL then take the jump if P3 is true. */ -case OP_If: /* no-push */ -case OP_IfNot: { /* no-push */ +case OP_If: /* jump, in1 */ +case OP_IfNot: { /* jump, in1 */ int c; - assert( pTos>=p->aStack ); - if( pTos->flags & MEM_Null ){ - c = pOp->p1; + if( pIn1->flags & MEM_Null ){ + c = pOp->p3; }else{ #ifdef SQLITE_OMIT_FLOATING_POINT - c = sqlite3VdbeIntValue(pTos); + c = sqlite3VdbeIntValue(pIn1)!=0; #else - c = sqlite3VdbeRealValue(pTos)!=0.0; + c = sqlite3VdbeRealValue(pIn1)!=0.0; #endif if( pOp->opcode==OP_IfNot ) c = !c; } - Release(pTos); - pTos--; - if( c ) pc = pOp->p2-1; + if( c ){ + pc = pOp->p2-1; + } break; } -/* Opcode: IsNull P1 P2 * +/* Opcode: IsNull P1 P2 * * * ** -** Check the top of the stack and jump to P2 if the top of the stack -** is NULL. If P1 is positive, then pop P1 elements from the stack -** regardless of whether or not the jump is taken. If P1 is negative, -** pop -P1 elements from the stack only if the jump is taken and leave -** the stack unchanged if the jump is not taken. +** Jump to P2 if the value in register P1 is NULL. */ -case OP_IsNull: { /* same as TK_ISNULL, no-push */ - if( pTos->flags & MEM_Null ){ - pc = pOp->p2-1; - if( pOp->p1<0 ){ - popStack(&pTos, -pOp->p1); - } - } - if( pOp->p1>0 ){ - popStack(&pTos, pOp->p1); +case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ + if( (pIn1->flags & MEM_Null)!=0 ){ + pc = pOp->p2 - 1; } break; } -/* Opcode: NotNull P1 P2 * +/* Opcode: NotNull P1 P2 * * * ** -** Jump to P2 if the top abs(P1) values on the stack are all not NULL. -** Regardless of whether or not the jump is taken, pop the stack -** P1 times if P1 is greater than zero. But if P1 is negative, -** leave the stack unchanged. +** Jump to P2 if the value in register P1 is not NULL. */ -case OP_NotNull: { /* same as TK_NOTNULL, no-push */ - int i, cnt; - cnt = pOp->p1; - if( cnt<0 ) cnt = -cnt; - assert( &pTos[1-cnt] >= p->aStack ); - for(i=0; i=cnt ) pc = pOp->p2-1; - if( pOp->p1>0 ) popStack(&pTos, cnt); +case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ + if( (pIn1->flags & MEM_Null)==0 ){ + pc = pOp->p2 - 1; + } break; } -/* Opcode: SetNumColumns P1 P2 * +/* Opcode: SetNumColumns * P2 * * * ** -** Before the OP_Column opcode can be executed on a cursor, this -** opcode must be called to set the number of fields in the table. +** This opcode sets the number of columns for the cursor opened by the +** following instruction to P2. ** -** This opcode sets the number of columns for cursor P1 to P2. +** An OP_SetNumColumns is only useful if it occurs immediately before +** one of the following opcodes: ** -** If OP_KeyAsData is to be applied to cursor P1, it must be executed -** before this op-code. +** OpenRead +** OpenWrite +** OpenPseudo +** +** If the OP_Column opcode is to be executed on a cursor, then +** this opcode must be present immediately before the opcode that +** opens the cursor. */ -case OP_SetNumColumns: { /* no-push */ - Cursor *pC; - assert( (pOp->p1)nCursor ); - assert( p->apCsr[pOp->p1]!=0 ); - pC = p->apCsr[pOp->p1]; - pC->nField = pOp->p2; +#if 0 +case OP_SetNumColumns: { break; } +#endif -/* Opcode: Column P1 P2 P3 +/* Opcode: Column P1 P2 P3 P4 * ** ** Interpret the data that cursor P1 points to as a structure built using ** the MakeRecord instruction. (See the MakeRecord opcode for additional -** information about the format of the data.) Push onto the stack the value -** of the P2-th column contained in the data. If there are less that (P2+1) -** values in the record, push a NULL onto the stack. -** -** If the KeyAsData opcode has previously executed on this cursor, then the -** field might be extracted from the key rather than the data. -** -** If the column contains fewer than P2 fields, then push a NULL. Or -** if P3 is of type P3_MEM, then push the P3 value. The P3 value will -** be default value for a column that has been added using the ALTER TABLE -** ADD COLUMN command. If P3 is an ordinary string, just push a NULL. -** When P3 is a string it is really just a comment describing the value -** to be pushed, not a default value. +** information about the format of the data.) Extract the P2-th column +** from this record. If there are less that (P2+1) +** values in the record, extract a NULL. +** +** The value extracted is stored in register P3. +** +** If the column contains fewer than P2 fields, then extract a NULL. Or, +** if the P4 argument is a P4_MEM use the value of the P4 argument as +** the result. */ case OP_Column: { u32 payloadSize; /* Number of bytes in the record */ - int p1 = pOp->p1; /* P1 value of the opcode */ - int p2 = pOp->p2; /* column number to retrieve */ - Cursor *pC = 0; /* The VDBE cursor */ + i64 payloadSize64; /* Number of bytes in the record */ + int p1; /* P1 value of the opcode */ + int p2; /* column number to retrieve */ + VdbeCursor *pC; /* The VDBE cursor */ char *zRec; /* Pointer to complete record-data */ BtCursor *pCrsr; /* The BTree cursor */ u32 *aType; /* aType[i] holds the numeric type of the i-th column */ u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */ - u32 nField; /* number of fields in the record */ + int nField; /* number of fields in the record */ int len; /* The length of the serialized data for the column */ int i; /* Loop counter */ char *zData; /* Part of the record being decoded */ + Mem *pDest; /* Where to write the extracted value */ Mem sMem; /* For storing the record being decoded */ + u8 *zIdx; /* Index into header */ + u8 *zEndHdr; /* Pointer to first byte after the header */ + u32 offset; /* Offset into the data */ + u64 offset64; /* 64-bit offset. 64 bits needed to catch overflow */ + int szHdr; /* Size of the header size field at start of record */ + int avail; /* Number of bytes of available data */ + - sMem.flags = 0; + p1 = pOp->p1; + p2 = pOp->p2; + pC = 0; + memset(&sMem, 0, sizeof(sMem)); assert( p1nCursor ); - pTos++; - pTos->flags = MEM_Null; + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + pDest = &p->aMem[pOp->p3]; + MemSetTypeFlag(pDest, MEM_Null); + zRec = 0; /* This block sets the variable payloadSize to be the total number of ** bytes in the record. @@ -2006,80 +2037,66 @@ ** If the data is unavailable, zRec is set to NULL. ** ** We also compute the number of columns in the record. For cursors, - ** the number of columns is stored in the Cursor.nField element. For - ** records on the stack, the next entry down on the stack is an integer - ** which is the number of records. + ** the number of columns is stored in the VdbeCursor.nField element. */ pC = p->apCsr[p1]; + assert( pC!=0 ); #ifndef SQLITE_OMIT_VIRTUALTABLE assert( pC->pVtabCursor==0 ); #endif - assert( pC!=0 ); - if( pC->pCursor!=0 ){ + pCrsr = pC->pCursor; + if( pCrsr!=0 ){ /* The record is stored in a B-Tree */ rc = sqlite3VdbeCursorMoveto(pC); if( rc ) goto abort_due_to_error; - zRec = 0; - pCrsr = pC->pCursor; if( pC->nullRow ){ payloadSize = 0; }else if( pC->cacheStatus==p->cacheCtr ){ payloadSize = pC->payloadSize; zRec = (char*)pC->aRow; }else if( pC->isIndex ){ - i64 payloadSize64; sqlite3BtreeKeySize(pCrsr, &payloadSize64); - payloadSize = payloadSize64; + /* sqlite3BtreeParseCellPtr() uses getVarint32() to extract the + ** payload size, so it is impossible for payloadSize64 to be + ** larger than 32 bits. */ + assert( (payloadSize64 & SQLITE_MAX_U32)==(u64)payloadSize64 ); + payloadSize = (u32)payloadSize64; }else{ sqlite3BtreeDataSize(pCrsr, &payloadSize); } - nField = pC->nField; }else if( pC->pseudoTable ){ /* The record is the sole entry of a pseudo-table */ payloadSize = pC->nData; zRec = pC->pData; pC->cacheStatus = CACHE_STALE; assert( payloadSize==0 || zRec!=0 ); - nField = pC->nField; - pCrsr = 0; }else{ - zRec = 0; + /* Consider the row to be NULL */ payloadSize = 0; - pCrsr = 0; - nField = 0; } - /* If payloadSize is 0, then just push a NULL onto the stack. */ + /* If payloadSize is 0, then just store a NULL */ if( payloadSize==0 ){ - assert( pTos->flags==MEM_Null ); - break; + assert( pDest->flags&MEM_Null ); + goto op_column_out; } - if( payloadSize>SQLITE_MAX_LENGTH ){ + assert( db->aLimit[SQLITE_LIMIT_LENGTH]>=0 ); + if( payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } + nField = pC->nField; assert( p2cacheStatus==p->cacheCtr ){ - aType = pC->aType; + aType = pC->aType; + if( pC->cacheStatus==p->cacheCtr ){ aOffset = pC->aOffset; }else{ - u8 *zIdx; /* Index into header */ - u8 *zEndHdr; /* Pointer to first byte after the header */ - u32 offset; /* Offset into the data */ - int szHdrSz; /* Size of the header size field at start of record */ - int avail; /* Number of bytes of available data */ - - aType = pC->aType; - if( aType==0 ){ - pC->aType = aType = sqliteMallocRaw( 2*nField*sizeof(aType) ); - } - if( aType==0 ){ - goto no_mem; - } + assert(aType); + avail = 0; pC->aOffset = aOffset = &aType[nField]; pC->payloadSize = payloadSize; pC->cacheStatus = p->cacheCtr; @@ -2098,7 +2115,8 @@ ** having to make additional calls to fetch the content portion of ** the record. */ - if( avail>=payloadSize ){ + assert( avail>=0 ); + if( payloadSize <= (u32)avail ){ zRec = zData; pC->aRow = (u8*)zData; }else{ @@ -2108,7 +2126,37 @@ /* The following assert is true in all cases accept when ** the database file has been corrupted externally. ** assert( zRec!=0 || avail>=payloadSize || avail>=9 ); */ - szHdrSz = GetVarint((u8*)zData, offset); + szHdr = getVarint32((u8*)zData, offset); + + /* Make sure a corrupt database has not given us an oversize header. + ** Do this now to avoid an oversize memory allocation. + ** + ** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte + ** types use so much data space that there can only be 4096 and 32 of + ** them, respectively. So the maximum header length results from a + ** 3-byte type for each of the maximum of 32768 columns plus three + ** extra bytes for the header length itself. 32768*3 + 3 = 98307. + */ + if( offset > 98307 ){ + rc = SQLITE_CORRUPT_BKPT; + goto op_column_out; + } + + /* Compute in len the number of bytes of data we need to read in order + ** to get nField type values. offset is an upper bound on this. But + ** nField might be significantly less than the true number of columns + ** in the table, and in that case, 5*nField+3 might be smaller than offset. + ** We want to minimize len in order to limit the size of the memory + ** allocation, especially if a corrupt database file has caused offset + ** to be oversized. Offset is limited to 98307 above. But 98307 might + ** still exceed Robson memory allocation limits on some configurations. + ** On systems that cannot tolerate large memory allocations, nField*5+3 + ** will likely be much smaller since nField will likely be less than + ** 20 or so. This insures that Robson memory allocation limits are + ** not exceeded even for corrupt database files. + */ + len = nField*5 + 3; + if( len > (int)offset ) len = (int)offset; /* The KeyFetch() or DataFetch() above are fast and will get the entire ** record header in most cases. But they will fail to get the complete @@ -2116,44 +2164,50 @@ ** in the B-Tree. When that happens, use sqlite3VdbeMemFromBtree() to ** acquire the complete header text. */ - if( !zRec && availisIndex, &sMem); + if( !zRec && availisIndex, &sMem); if( rc!=SQLITE_OK ){ goto op_column_out; } zData = sMem.z; } - zEndHdr = (u8 *)&zData[offset]; - zIdx = (u8 *)&zData[szHdrSz]; + zEndHdr = (u8 *)&zData[len]; + zIdx = (u8 *)&zData[szHdr]; /* Scan the header and use it to fill in the aType[] and aOffset[] ** arrays. aType[i] will contain the type integer for the i-th ** column and aOffset[i] will contain the offset from the beginning ** of the record to the start of the data for the i-th column */ + offset64 = offset; for(i=0; izEndHdr || offset>payloadSize ){ + if( (zIdx > zEndHdr)|| (offset64 > payloadSize) + || (zIdx==zEndHdr && offset64!=(u64)payloadSize) ){ rc = SQLITE_CORRUPT_BKPT; goto op_column_out; } @@ -2162,91 +2216,115 @@ /* Get the column information. If aOffset[p2] is non-zero, then ** deserialize the value from the record. If aOffset[p2] is zero, ** then there are not enough fields in the record to satisfy the - ** request. In this case, set the value NULL or to P3 if P3 is + ** request. In this case, set the value NULL or to P4 if P4 is ** a pointer to a Mem object. */ if( aOffset[p2] ){ assert( rc==SQLITE_OK ); if( zRec ){ - zData = &zRec[aOffset[p2]]; + sqlite3VdbeMemReleaseExternal(pDest); + sqlite3VdbeSerialGet((u8 *)&zRec[aOffset[p2]], aType[p2], pDest); }else{ len = sqlite3VdbeSerialTypeLen(aType[p2]); - rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, pC->isIndex,&sMem); + sqlite3VdbeMemMove(&sMem, pDest); + rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, pC->isIndex, &sMem); if( rc!=SQLITE_OK ){ goto op_column_out; } zData = sMem.z; + sqlite3VdbeSerialGet((u8*)zData, aType[p2], pDest); } - sqlite3VdbeSerialGet((u8*)zData, aType[p2], pTos); - pTos->enc = encoding; + pDest->enc = encoding; }else{ - if( pOp->p3type==P3_MEM ){ - sqlite3VdbeMemShallowCopy(pTos, (Mem *)(pOp->p3), MEM_Static); + if( pOp->p4type==P4_MEM ){ + sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static); }else{ - pTos->flags = MEM_Null; + assert( pDest->flags&MEM_Null ); } } /* If we dynamically allocated space to hold the data (in the ** sqlite3VdbeMemFromBtree() call above) then transfer control of that - ** dynamically allocated space over to the pTos structure. + ** dynamically allocated space over to the pDest structure. ** This prevents a memory copy. */ - if( (sMem.flags & MEM_Dyn)!=0 ){ - assert( pTos->flags & MEM_Ephem ); - assert( pTos->flags & (MEM_Str|MEM_Blob) ); - assert( pTos->z==sMem.z ); - assert( sMem.flags & MEM_Term ); - pTos->flags &= ~MEM_Ephem; - pTos->flags |= MEM_Dyn|MEM_Term; + if( sMem.zMalloc ){ + assert( sMem.z==sMem.zMalloc ); + assert( !(pDest->flags & MEM_Dyn) ); + assert( !(pDest->flags & (MEM_Blob|MEM_Str)) || pDest->z==sMem.z ); + pDest->flags &= ~(MEM_Ephem|MEM_Static); + pDest->flags |= MEM_Term; + pDest->z = sMem.z; + pDest->zMalloc = sMem.zMalloc; } - /* pTos->z might be pointing to sMem.zShort[]. Fix that so that we - ** can abandon sMem */ - rc = sqlite3VdbeMemMakeWriteable(pTos); + rc = sqlite3VdbeMemMakeWriteable(pDest); op_column_out: + UPDATE_MAX_BLOBSIZE(pDest); + REGISTER_TRACE(pOp->p3, pDest); break; } -/* Opcode: MakeRecord P1 P2 P3 +/* Opcode: Affinity P1 P2 * P4 * ** -** Convert the top abs(P1) entries of the stack into a single entry -** suitable for use as a data record in a database table or as a key -** in an index. The details of the format are irrelavant as long as -** the OP_Column opcode can decode the record later and as long as the -** sqlite3VdbeRecordCompare function will correctly compare two encoded -** records. Refer to source code comments for the details of the record -** format. +** Apply affinities to a range of P2 registers starting with P1. ** -** The original stack entries are popped from the stack if P1>0 but -** remain on the stack if P1<0. +** P4 is a string that is P2 characters long. The nth character of the +** string indicates the column affinity that should be used for the nth +** memory cell in the range. +*/ +case OP_Affinity: { + char *zAffinity; /* The affinity to be applied */ + Mem *pData0; /* First register to which to apply affinity */ + Mem *pLast; /* Last register to which to apply affinity */ + Mem *pRec; /* Current register */ + + zAffinity = pOp->p4.z; + pData0 = &p->aMem[pOp->p1]; + pLast = &pData0[pOp->p2-1]; + for(pRec=pData0; pRec<=pLast; pRec++){ + ExpandBlob(pRec); + applyAffinity(pRec, zAffinity[pRec-pData0], encoding); + } + break; +} + +/* Opcode: MakeRecord P1 P2 P3 P4 * ** -** If P2 is not zero and one or more of the entries are NULL, then jump -** to the address given by P2. This feature can be used to skip a -** uniqueness test on indices. +** Convert P2 registers beginning with P1 into a single entry +** suitable for use as a data record in a database table or as a key +** in an index. The details of the format are irrelevant as long as +** the OP_Column opcode can decode the record later. +** Refer to source code comments for the details of the record +** format. ** -** P3 may be a string that is P1 characters long. The nth character of the +** P4 may be a string that is P2 characters long. The nth character of the ** string indicates the column affinity that should be used for the nth -** field of the index key (i.e. the first character of P3 corresponds to the -** lowest element on the stack). +** field of the index key. ** ** The mapping from character to affinity is given by the SQLITE_AFF_ ** macros defined in sqliteInt.h. ** -** If P3 is NULL then all index fields have the affinity NONE. -** -** See also OP_MakeIdxRec +** If P4 is NULL then all index fields have the affinity NONE. */ -/* Opcode: MakeIdxRec P1 P2 P3 -** -** This opcode works just OP_MakeRecord except that it reads an extra -** integer from the stack (thus reading a total of abs(P1+1) entries) -** and appends that extra integer to the end of the record as a varint. -** This results in an index key. -*/ -case OP_MakeIdxRec: case OP_MakeRecord: { + u8 *zNewRecord; /* A buffer to hold the data for the new record */ + Mem *pRec; /* The new record */ + u64 nData; /* Number of bytes of data space */ + int nHdr; /* Number of bytes of header space */ + i64 nByte; /* Data space required for this record */ + int nZero; /* Number of zero bytes at the end of the record */ + int nVarint; /* Number of bytes in a varint */ + u32 serial_type; /* Type field */ + Mem *pData0; /* First field to be combined into the record */ + Mem *pLast; /* Last field of the record */ + int nField; /* Number of fields in the record */ + char *zAffinity; /* The affinity string for the record */ + int file_format; /* File format to use for encoding */ + int i; /* Space used in zNewRecord[] */ + int len; /* Length of a field */ + /* Assuming the record contains N fields, the record format looks ** like this: ** @@ -2254,58 +2332,35 @@ ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 | ** ------------------------------------------------------------------------ ** - ** Data(0) is taken from the lowest element of the stack and data(N-1) is - ** the top of the stack. + ** Data(0) is taken from register P1. Data(1) comes from register P1+1 + ** and so froth. ** ** Each type field is a varint representing the serial type of the ** corresponding data element (see sqlite3VdbeSerialType()). The ** hdr-size field is also a varint which is the offset from the beginning ** of the record to data0. */ - u8 *zNewRecord; /* A buffer to hold the data for the new record */ - Mem *pRec; /* The new record */ - Mem *pRowid = 0; /* Rowid appended to the new record */ - u64 nData = 0; /* Number of bytes of data space */ - int nHdr = 0; /* Number of bytes of header space */ - u64 nByte = 0; /* Data space required for this record */ - int nZero = 0; /* Number of zero bytes at the end of the record */ - int nVarint; /* Number of bytes in a varint */ - u32 serial_type; /* Type field */ - int containsNull = 0; /* True if any of the data fields are NULL */ - Mem *pData0; /* Bottom of the stack */ - int leaveOnStack; /* If true, leave the entries on the stack */ - int nField; /* Number of fields in the record */ - int jumpIfNull; /* Jump here if non-zero and any entries are NULL. */ - int addRowid; /* True to append a rowid column at the end */ - char *zAffinity; /* The affinity string for the record */ - int file_format; /* File format to use for encoding */ - int i; /* Space used in zNewRecord[] */ - char zTemp[NBFS]; /* Space to hold small records */ - - leaveOnStack = ((pOp->p1<0)?1:0); - nField = pOp->p1 * (leaveOnStack?-1:1); - jumpIfNull = pOp->p2; - addRowid = pOp->opcode==OP_MakeIdxRec; - zAffinity = pOp->p3; - - pData0 = &pTos[1-nField]; - assert( pData0>=p->aStack ); - containsNull = 0; + nData = 0; /* Number of bytes of data space */ + nHdr = 0; /* Number of bytes of header space */ + nByte = 0; /* Data space required for this record */ + nZero = 0; /* Number of zero bytes at the end of the record */ + nField = pOp->p1; + zAffinity = pOp->p4.z; + assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=p->nMem+1 ); + pData0 = &p->aMem[nField]; + nField = pOp->p2; + pLast = &pData0[nField-1]; file_format = p->minWriteFileFormat; /* Loop through the elements that will make up the record to figure ** out how much space is required for the new record. */ - for(pRec=pData0; pRec<=pTos; pRec++){ - int len; + for(pRec=pData0; pRec<=pLast; pRec++){ if( zAffinity ){ applyAffinity(pRec, zAffinity[pRec-pData0], encoding); } - if( pRec->flags&MEM_Null ){ - containsNull = 1; - } if( pRec->flags&MEM_Zero && pRec->n>0 ){ - ExpandBlob(pRec); + sqlite3VdbeMemExpandBlob(pRec); } serial_type = sqlite3VdbeSerialType(pRec, file_format); len = sqlite3VdbeSerialTypeLen(serial_type); @@ -2314,175 +2369,331 @@ if( pRec->flags & MEM_Zero ){ /* Only pure zero-filled BLOBs can be input to this Opcode. ** We do not allow blobs with a prefix and a zero-filled tail. */ - nZero += pRec->u.i; + nZero += pRec->u.nZero; }else if( len ){ nZero = 0; } } - /* If we have to append a varint rowid to this record, set pRowid - ** to the value of the rowid and increase nByte by the amount of space - ** required to store it. - */ - if( addRowid ){ - pRowid = &pTos[0-nField]; - assert( pRowid>=p->aStack ); - sqlite3VdbeMemIntegerify(pRowid); - serial_type = sqlite3VdbeSerialType(pRowid, 0); - nData += sqlite3VdbeSerialTypeLen(serial_type); - nHdr += sqlite3VarintLen(serial_type); - nZero = 0; - } - /* Add the initial header varint and total the size */ nHdr += nVarint = sqlite3VarintLen(nHdr); if( nVarintSQLITE_MAX_LENGTH ){ + if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } - /* Allocate space for the new record. */ - if( nByte>sizeof(zTemp) ){ - zNewRecord = sqliteMallocRaw(nByte); - if( !zNewRecord ){ - goto no_mem; - } - }else{ - zNewRecord = (u8*)zTemp; + /* Make sure the output register has a buffer large enough to store + ** the new record. The output register (pOp->p3) is not allowed to + ** be one of the input registers (because the following call to + ** sqlite3VdbeMemGrow() could clobber the value before it is used). + */ + assert( pOp->p3p1 || pOp->p3>=pOp->p1+pOp->p2 ); + pOut = &p->aMem[pOp->p3]; + if( sqlite3VdbeMemGrow(pOut, (int)nByte, 0) ){ + goto no_mem; } + zNewRecord = (u8 *)pOut->z; /* Write the record */ - i = sqlite3PutVarint(zNewRecord, nHdr); - for(pRec=pData0; pRec<=pTos; pRec++){ + i = putVarint32(zNewRecord, nHdr); + for(pRec=pData0; pRec<=pLast; pRec++){ serial_type = sqlite3VdbeSerialType(pRec, file_format); - i += sqlite3PutVarint(&zNewRecord[i], serial_type); /* serial type */ - } - if( addRowid ){ - i += sqlite3PutVarint(&zNewRecord[i], sqlite3VdbeSerialType(pRowid, 0)); - } - for(pRec=pData0; pRec<=pTos; pRec++){ /* serial data */ - i += sqlite3VdbeSerialPut(&zNewRecord[i], nByte-i, pRec, file_format); + i += putVarint32(&zNewRecord[i], serial_type); /* serial type */ } - if( addRowid ){ - i += sqlite3VdbeSerialPut(&zNewRecord[i], nByte-i, pRowid, 0); + for(pRec=pData0; pRec<=pLast; pRec++){ /* serial data */ + i += sqlite3VdbeSerialPut(&zNewRecord[i], (int)(nByte-i), pRec,file_format); } assert( i==nByte ); - /* Pop entries off the stack if required. Push the new record on. */ - if( !leaveOnStack ){ - popStack(&pTos, nField+addRowid); - } - pTos++; - pTos->n = nByte; - if( nByte<=sizeof(zTemp) ){ - assert( zNewRecord==(unsigned char *)zTemp ); - pTos->z = pTos->zShort; - memcpy(pTos->zShort, zTemp, nByte); - pTos->flags = MEM_Blob | MEM_Short; - }else{ - assert( zNewRecord!=(unsigned char *)zTemp ); - pTos->z = (char*)zNewRecord; - pTos->flags = MEM_Blob | MEM_Dyn; - pTos->xDel = 0; - } + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + pOut->n = (int)nByte; + pOut->flags = MEM_Blob | MEM_Dyn; + pOut->xDel = 0; if( nZero ){ - pTos->u.i = nZero; - pTos->flags |= MEM_Zero; + pOut->u.nZero = nZero; + pOut->flags |= MEM_Zero; } - pTos->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */ + pOut->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */ + REGISTER_TRACE(pOp->p3, pOut); + UPDATE_MAX_BLOBSIZE(pOut); + break; +} - /* If a NULL was encountered and jumpIfNull is non-zero, take the jump. */ - if( jumpIfNull && containsNull ){ - pc = jumpIfNull - 1; +/* Opcode: Count P1 P2 * * * +** +** Store the number of entries (an integer value) in the table or index +** opened by cursor P1 in register P2 +*/ +#ifndef SQLITE_OMIT_BTREECOUNT +case OP_Count: { /* out2-prerelease */ + i64 nEntry; + BtCursor *pCrsr; + + pCrsr = p->apCsr[pOp->p1]->pCursor; + if( pCrsr ){ + rc = sqlite3BtreeCount(pCrsr, &nEntry); + }else{ + nEntry = 0; } + pOut->flags = MEM_Int; + pOut->u.i = nEntry; break; } +#endif -/* Opcode: Statement P1 * * +/* Opcode: Statement P1 * * * * ** ** Begin an individual statement transaction which is part of a larger -** BEGIN..COMMIT transaction. This is needed so that the statement +** transaction. This is needed so that the statement ** can be rolled back after an error without having to roll back the ** entire transaction. The statement transaction will automatically ** commit when the VDBE halts. ** +** If the database connection is currently in autocommit mode (that +** is to say, if it is in between BEGIN and COMMIT) +** and if there are no other active statements on the same database +** connection, then this operation is a no-op. No statement transaction +** is needed since any error can use the normal ROLLBACK process to +** undo changes. +** +** If a statement transaction is started, then a statement journal file +** will be allocated and initialized. +** ** The statement is begun on the database file with index P1. The main ** database file has an index of 0 and the file used for temporary tables ** has an index of 1. */ -case OP_Statement: { /* no-push */ - int i = pOp->p1; +case OP_Statement: { Btree *pBt; - if( i>=0 && inDb && (pBt = db->aDb[i].pBt)!=0 && !(db->autoCommit) ){ + if( db->autoCommit==0 || db->activeVdbeCnt>1 ){ + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( db->aDb[pOp->p1].pBt!=0 ); + pBt = db->aDb[pOp->p1].pBt; assert( sqlite3BtreeIsInTrans(pBt) ); - if( !sqlite3BtreeIsInStmt(pBt) ){ - rc = sqlite3BtreeBeginStmt(pBt); - p->openedStatement = 1; + assert( (p->btreeMask & (1<p1))!=0 ); + if( p->iStatement==0 ){ + assert( db->nStatement>=0 && db->nSavepoint>=0 ); + db->nStatement++; + p->iStatement = db->nSavepoint + db->nStatement; + } + rc = sqlite3BtreeBeginStmt(pBt, p->iStatement); + } + break; +} + +/* Opcode: Savepoint P1 * * P4 * +** +** Open, release or rollback the savepoint named by parameter P4, depending +** on the value of P1. To open a new savepoint, P1==0. To release (commit) an +** existing savepoint, P1==1, or to rollback an existing savepoint P1==2. +*/ +case OP_Savepoint: { + int p1; /* Value of P1 operand */ + char *zName; /* Name of savepoint */ + int nName; + Savepoint *pNew; + Savepoint *pSavepoint; + Savepoint *pTmp; + int iSavepoint; + int ii; + + p1 = pOp->p1; + zName = pOp->p4.z; + + /* Assert that the p1 parameter is valid. Also that if there is no open + ** transaction, then there cannot be any savepoints. + */ + assert( db->pSavepoint==0 || db->autoCommit==0 ); + assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK ); + assert( db->pSavepoint || db->isTransactionSavepoint==0 ); + assert( checkSavepointCount(db) ); + + if( p1==SAVEPOINT_BEGIN ){ + if( db->writeVdbeCnt>0 ){ + /* A new savepoint cannot be created if there are active write + ** statements (i.e. open read/write incremental blob handles). + */ + sqlite3SetString(&p->zErrMsg, db, "cannot open savepoint - " + "SQL statements in progress"); + rc = SQLITE_BUSY; + }else{ + nName = sqlite3Strlen30(zName); + + /* Create a new savepoint structure. */ + pNew = sqlite3DbMallocRaw(db, sizeof(Savepoint)+nName+1); + if( pNew ){ + pNew->zName = (char *)&pNew[1]; + memcpy(pNew->zName, zName, nName+1); + + /* If there is no open transaction, then mark this as a special + ** "transaction savepoint". */ + if( db->autoCommit ){ + db->autoCommit = 0; + db->isTransactionSavepoint = 1; + }else{ + db->nSavepoint++; + } + + /* Link the new savepoint into the database handle's list. */ + pNew->pNext = db->pSavepoint; + db->pSavepoint = pNew; + } + } + }else{ + iSavepoint = 0; + + /* Find the named savepoint. If there is no such savepoint, then an + ** an error is returned to the user. */ + for( + pSavepoint = db->pSavepoint; + pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName); + pSavepoint = pSavepoint->pNext + ){ + iSavepoint++; + } + if( !pSavepoint ){ + sqlite3SetString(&p->zErrMsg, db, "no such savepoint: %s", zName); + rc = SQLITE_ERROR; + }else if( + db->writeVdbeCnt>0 || (p1==SAVEPOINT_ROLLBACK && db->activeVdbeCnt>1) + ){ + /* It is not possible to release (commit) a savepoint if there are + ** active write statements. It is not possible to rollback a savepoint + ** if there are any active statements at all. + */ + sqlite3SetString(&p->zErrMsg, db, + "cannot %s savepoint - SQL statements in progress", + (p1==SAVEPOINT_ROLLBACK ? "rollback": "release") + ); + rc = SQLITE_BUSY; + }else{ + + /* Determine whether or not this is a transaction savepoint. If so, + ** and this is a RELEASE command, then the current transaction + ** is committed. + */ + int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; + if( isTransaction && p1==SAVEPOINT_RELEASE ){ + db->autoCommit = 1; + if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ + p->pc = pc; + db->autoCommit = 0; + p->rc = rc = SQLITE_BUSY; + goto vdbe_return; + } + db->isTransactionSavepoint = 0; + rc = p->rc; + }else{ + iSavepoint = db->nSavepoint - iSavepoint - 1; + for(ii=0; iinDb; ii++){ + rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + } + if( p1==SAVEPOINT_ROLLBACK && (db->flags&SQLITE_InternChanges)!=0 ){ + sqlite3ExpirePreparedStatements(db); + sqlite3ResetInternalSchema(db, 0); + } + } + + /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all + ** savepoints nested inside of the savepoint being operated on. */ + while( db->pSavepoint!=pSavepoint ){ + pTmp = db->pSavepoint; + db->pSavepoint = pTmp->pNext; + sqlite3DbFree(db, pTmp); + db->nSavepoint--; + } + + /* If it is a RELEASE, then destroy the savepoint being operated on too */ + if( p1==SAVEPOINT_RELEASE ){ + assert( pSavepoint==db->pSavepoint ); + db->pSavepoint = pSavepoint->pNext; + sqlite3DbFree(db, pSavepoint); + if( !isTransaction ){ + db->nSavepoint--; + } + } } } + break; } -/* Opcode: AutoCommit P1 P2 * +/* Opcode: AutoCommit P1 P2 * * * ** ** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll ** back any currently active btree transactions. If there are any active -** VMs (apart from this one), then the COMMIT or ROLLBACK statement fails. +** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if +** there are active writing VMs or active VMs that use shared cache. ** ** This instruction causes the VM to halt. */ -case OP_AutoCommit: { /* no-push */ - u8 i = pOp->p1; - u8 rollback = pOp->p2; - - assert( i==1 || i==0 ); - assert( i==1 || rollback==0 ); - +case OP_AutoCommit: { + int desiredAutoCommit; + int iRollback; + int turnOnAC; + + desiredAutoCommit = pOp->p1; + iRollback = pOp->p2; + turnOnAC = desiredAutoCommit && !db->autoCommit; + assert( desiredAutoCommit==1 || desiredAutoCommit==0 ); + assert( desiredAutoCommit==1 || iRollback==0 ); assert( db->activeVdbeCnt>0 ); /* At least this one VM is active */ - if( db->activeVdbeCnt>1 && i && !db->autoCommit ){ - /* If this instruction implements a COMMIT or ROLLBACK, other VMs are + if( turnOnAC && iRollback && db->activeVdbeCnt>1 ){ + /* If this instruction implements a ROLLBACK and other VMs are ** still running, and a transaction is active, return an error indicating ** that the other VMs must complete first. */ - sqlite3SetString(&p->zErrMsg, "cannot ", rollback?"rollback":"commit", - " transaction - SQL statements in progress", (char*)0); - rc = SQLITE_ERROR; - }else if( i!=db->autoCommit ){ - if( pOp->p2 ){ - assert( i==1 ); + sqlite3SetString(&p->zErrMsg, db, "cannot rollback transaction - " + "SQL statements in progress"); + rc = SQLITE_BUSY; + }else if( turnOnAC && !iRollback && db->writeVdbeCnt>0 ){ + /* If this instruction implements a COMMIT and other VMs are writing + ** return an error indicating that the other VMs must complete first. + */ + sqlite3SetString(&p->zErrMsg, db, "cannot commit transaction - " + "SQL statements in progress"); + rc = SQLITE_BUSY; + }else if( desiredAutoCommit!=db->autoCommit ){ + if( iRollback ){ + assert( desiredAutoCommit==1 ); sqlite3RollbackAll(db); db->autoCommit = 1; }else{ - db->autoCommit = i; + db->autoCommit = (u8)desiredAutoCommit; if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ - p->pTos = pTos; p->pc = pc; - db->autoCommit = 1-i; - p->rc = SQLITE_BUSY; - return SQLITE_BUSY; + db->autoCommit = (u8)(1-desiredAutoCommit); + p->rc = rc = SQLITE_BUSY; + goto vdbe_return; } } + assert( db->nStatement==0 ); + sqlite3CloseSavepoints(db); if( p->rc==SQLITE_OK ){ - return SQLITE_DONE; + rc = SQLITE_DONE; }else{ - return SQLITE_ERROR; + rc = SQLITE_ERROR; } + goto vdbe_return; }else{ - sqlite3SetString(&p->zErrMsg, - (!i)?"cannot start a transaction within a transaction":( - (rollback)?"cannot rollback - no transaction is active": - "cannot commit - no transaction is active"), (char*)0); + sqlite3SetString(&p->zErrMsg, db, + (!desiredAutoCommit)?"cannot start a transaction within a transaction":( + (iRollback)?"cannot rollback - no transaction is active": + "cannot commit - no transaction is active")); rc = SQLITE_ERROR; } break; } -/* Opcode: Transaction P1 P2 * +/* Opcode: Transaction P1 P2 * * * ** ** Begin a transaction. The transaction ends when a Commit or Rollback ** opcode is encountered. Depending on the ON CONFLICT setting, the @@ -2490,7 +2701,8 @@ ** ** P1 is the index of the database file on which the transaction is ** started. Index 0 is the main database file and index 1 is the -** file used for temporary tables. +** file used for temporary tables. Indices of 2 or more are used for +** attached databases. ** ** If P2 is non-zero, then a write-transaction is started. A RESERVED lock is ** obtained on the database file when a write-transaction is started. No @@ -2502,20 +2714,19 @@ ** ** If P2 is zero, then a read-lock is obtained on the database file. */ -case OP_Transaction: { /* no-push */ - int i = pOp->p1; +case OP_Transaction: { Btree *pBt; - assert( i>=0 && inDb ); - pBt = db->aDb[i].pBt; + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( (p->btreeMask & (1<p1))!=0 ); + pBt = db->aDb[pOp->p1].pBt; if( pBt ){ rc = sqlite3BtreeBeginTrans(pBt, pOp->p2); if( rc==SQLITE_BUSY ){ p->pc = pc; - p->rc = SQLITE_BUSY; - p->pTos = pTos; - return SQLITE_BUSY; + p->rc = rc = SQLITE_BUSY; + goto vdbe_return; } if( rc!=SQLITE_OK && rc!=SQLITE_READONLY /* && rc!=SQLITE_BUSY */ ){ goto abort_due_to_error; @@ -2524,79 +2735,64 @@ break; } -/* Opcode: ReadCookie P1 P2 * +/* Opcode: ReadCookie P1 P2 P3 * * ** -** Read cookie number P2 from database P1 and push it onto the stack. -** P2==0 is the schema version. P2==1 is the database format. -** P2==2 is the recommended pager cache size, and so forth. P1==0 is +** Read cookie number P3 from database P1 and write it into register P2. +** P3==1 is the schema version. P3==2 is the database format. +** P3==3 is the recommended pager cache size, and so forth. P1==0 is ** the main database file and P1==1 is the database file used to store ** temporary tables. ** -** If P1 is negative, then this is a request to read the size of a -** databases free-list. P2 must be set to 1 in this case. The actual -** database accessed is ((P1+1)*-1). For example, a P1 parameter of -1 -** corresponds to database 0 ("main"), a P1 of -2 is database 1 ("temp"). -** ** There must be a read-lock on the database (either a transaction ** must be started or there must be an open cursor) before ** executing this instruction. */ -case OP_ReadCookie: { +case OP_ReadCookie: { /* out2-prerelease */ int iMeta; - int iDb = pOp->p1; - int iCookie = pOp->p2; + int iDb; + int iCookie; - assert( pOp->p2p1; + iCookie = pOp->p3; + assert( pOp->p3=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 ); - /* The indexing of meta values at the schema layer is off by one from - ** the indexing in the btree layer. The btree considers meta[0] to - ** be the number of free pages in the database (a read-only value) - ** and meta[1] to be the schema cookie. The schema layer considers - ** meta[1] to be the schema cookie. So we have to shift the index - ** by one in the following statement. - */ - rc = sqlite3BtreeGetMeta(db->aDb[iDb].pBt, 1 + iCookie, (u32 *)&iMeta); - pTos++; - pTos->u.i = iMeta; - pTos->flags = MEM_Int; + assert( (p->btreeMask & (1<aDb[iDb].pBt, iCookie, (u32 *)&iMeta); + pOut->u.i = iMeta; + MemSetTypeFlag(pOut, MEM_Int); break; } -/* Opcode: SetCookie P1 P2 * +/* Opcode: SetCookie P1 P2 P3 * * ** -** Write the top of the stack into cookie number P2 of database P1. -** P2==0 is the schema version. P2==1 is the database format. -** P2==2 is the recommended pager cache size, and so forth. P1==0 is -** the main database file and P1==1 is the database file used to store -** temporary tables. +** Write the content of register P3 (interpreted as an integer) +** into cookie number P2 of database P1. P2==1 is the schema version. +** P2==2 is the database format. P2==3 is the recommended pager cache +** size, and so forth. P1==0 is the main database file and P1==1 is the +** database file used to store temporary tables. ** ** A transaction must be started before executing this opcode. */ -case OP_SetCookie: { /* no-push */ +case OP_SetCookie: { /* in3 */ Db *pDb; assert( pOp->p2p1>=0 && pOp->p1nDb ); + assert( (p->btreeMask & (1<p1))!=0 ); pDb = &db->aDb[pOp->p1]; assert( pDb->pBt!=0 ); - assert( pTos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); + sqlite3VdbeMemIntegerify(pIn3); /* See note about index shifting on OP_ReadCookie */ - rc = sqlite3BtreeUpdateMeta(pDb->pBt, 1+pOp->p2, (int)pTos->u.i); - if( pOp->p2==0 ){ + rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, (int)pIn3->u.i); + if( pOp->p2==BTREE_SCHEMA_VERSION ){ /* When the schema cookie changes, record the new cookie internally */ - pDb->pSchema->schema_cookie = pTos->u.i; + pDb->pSchema->schema_cookie = (int)pIn3->u.i; db->flags |= SQLITE_InternChanges; - }else if( pOp->p2==1 ){ + }else if( pOp->p2==BTREE_FILE_FORMAT ){ /* Record changes in the file format */ - pDb->pSchema->file_format = pTos->u.i; + pDb->pSchema->file_format = (u8)pIn3->u.i; } - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; if( pOp->p1==1 ){ /* Invalidate all prepared statements whenever the TEMP database ** schema is changed. Ticket #1644 */ @@ -2621,24 +2817,26 @@ ** to be executed (to establish a read lock) before this opcode is ** invoked. */ -case OP_VerifyCookie: { /* no-push */ +case OP_VerifyCookie: { int iMeta; Btree *pBt; assert( pOp->p1>=0 && pOp->p1nDb ); + assert( (p->btreeMask & (1<p1))!=0 ); pBt = db->aDb[pOp->p1].pBt; if( pBt ){ - rc = sqlite3BtreeGetMeta(pBt, 1, (u32 *)&iMeta); + rc = sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta); }else{ rc = SQLITE_OK; iMeta = 0; } if( rc==SQLITE_OK && iMeta!=pOp->p2 ){ - sqlite3SetString(&p->zErrMsg, "database schema has changed", (char*)0); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); /* If the schema-cookie from the database file matches the cookie ** stored with the in-memory representation of the schema, do ** not reload the schema from the database file. ** - ** If virtual-tables are in use, this is not just an optimisation. + ** If virtual-tables are in use, this is not just an optimization. ** Often, v-tables store their data in other SQLite tables, which ** are queried from within xNext() and other v-table methods using ** prepared queries. If such a query is out-of-date, we do not want to @@ -2657,17 +2855,18 @@ break; } -/* Opcode: OpenRead P1 P2 P3 +/* Opcode: OpenRead P1 P2 P3 P4 P5 ** ** Open a read-only cursor for the database table whose root page is -** P2 in a database file. The database file is determined by an -** integer from the top of the stack. 0 means the main database and -** 1 means the database used for temporary tables. Give the new -** cursor an identifier of P1. The P1 values need not be contiguous -** but all P1 values should be small integers. It is an error for -** P1 to be negative. +** P2 in a database file. The database file is determined by P3. +** P3==0 means the main database, P3==1 means the database used for +** temporary tables, and P3>1 means used the corresponding attached +** database. Give the new cursor an identifier of P1. The P1 +** values need not be contiguous but all P1 values should be small integers. +** It is an error for P1 to be negative. ** -** If P2==0 then take the root page number from the next of the stack. +** If P5!=0 then use the content of register P2 as the root page, not +** the value of P2 itself. ** ** There will be a read lock on the database whenever there is an ** open cursor. If the database was unlocked prior to this instruction @@ -2678,20 +2877,26 @@ ** to get a read lock but fails, the script terminates with an ** SQLITE_BUSY error code. ** -** The P3 value is a pointer to a KeyInfo structure that defines the -** content and collating sequence of indices. P3 is NULL for cursors -** that are not pointing to indices. +** The P4 value may be either an integer (P4_INT32) or a pointer to +** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +** structure, then said structure defines the content and collating +** sequence of the index being opened. Otherwise, if P4 is an integer +** value, it is set to the number of columns in the table. ** ** See also OpenWrite. */ -/* Opcode: OpenWrite P1 P2 P3 +/* Opcode: OpenWrite P1 P2 P3 P4 P5 ** ** Open a read/write cursor named P1 on the table or index whose root -** page is P2. If P2==0 then take the root page number from the stack. +** page is P2. Or if P5!=0 use the content of register P2 to find the +** root page. ** -** The P3 value is a pointer to a KeyInfo structure that defines the -** content and collating sequence of indices. P3 is NULL for cursors -** that are not pointing to indices. +** The P4 value may be either an integer (P4_INT32) or a pointer to +** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +** structure, then said structure defines the content and collating +** sequence of the index being opened. Otherwise, if P4 is an integer +** value, it is set to the number of columns in the table, or to the +** largest index of any column of the table that is actually used. ** ** This instruction works just like OpenRead except that it opens the cursor ** in read/write mode. For a given table, there can be one or more read-only @@ -2699,22 +2904,24 @@ ** ** See also OpenRead. */ -case OP_OpenRead: /* no-push */ -case OP_OpenWrite: { /* no-push */ - int i = pOp->p1; - int p2 = pOp->p2; +case OP_OpenRead: +case OP_OpenWrite: { + int nField; + KeyInfo *pKeyInfo; + int p2; + int iDb; int wrFlag; Btree *pX; - int iDb; - Cursor *pCur; + VdbeCursor *pCur; Db *pDb; - - assert( pTos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); - iDb = pTos->u.i; - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; + int flags; + + nField = 0; + pKeyInfo = 0; + p2 = pOp->p2; + iDb = pOp->p3; assert( iDb>=0 && iDbnDb ); + assert( (p->btreeMask & (1<aDb[iDb]; pX = pDb->pBt; assert( pX!=0 ); @@ -2726,78 +2933,78 @@ }else{ wrFlag = 0; } - if( p2<=0 ){ - assert( pTos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); - p2 = pTos->u.i; - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; - assert( p2>=2 ); + if( pOp->p5 ){ + assert( p2>0 ); + assert( p2<=p->nMem ); + pIn2 = &p->aMem[p2]; + sqlite3VdbeMemIntegerify(pIn2); + p2 = (int)pIn2->u.i; + /* The p2 value always comes from a prior OP_CreateTable opcode and + ** that opcode will always set the p2 value to 2 or more or else fail. + ** If there were a failure, the prepared statement would have halted + ** before reaching this instruction. */ + if( NEVER(p2<2) ) { + rc = SQLITE_CORRUPT_BKPT; + goto abort_due_to_error; + } + } + if( pOp->p4type==P4_KEYINFO ){ + pKeyInfo = pOp->p4.pKeyInfo; + pKeyInfo->enc = ENC(p->db); + nField = pKeyInfo->nField+1; + }else if( pOp->p4type==P4_INT32 ){ + nField = pOp->p4.i; } - assert( i>=0 ); - pCur = allocateCursor(p, i, iDb); + assert( pOp->p1>=0 ); + pCur = allocateCursor(p, pOp->p1, nField, iDb, 1); if( pCur==0 ) goto no_mem; pCur->nullRow = 1; - if( pX==0 ) break; - /* We always provide a key comparison function. If the table being - ** opened is of type INTKEY, the comparision function will be ignored. */ - rc = sqlite3BtreeCursor(pX, p2, wrFlag, - sqlite3VdbeRecordCompare, pOp->p3, - &pCur->pCursor); - if( pOp->p3type==P3_KEYINFO ){ - pCur->pKeyInfo = (KeyInfo*)pOp->p3; - pCur->pIncrKey = &pCur->pKeyInfo->incrKey; - pCur->pKeyInfo->enc = ENC(p->db); - }else{ - pCur->pKeyInfo = 0; - pCur->pIncrKey = &pCur->bogusIncrKey; - } + rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->pCursor); + pCur->pKeyInfo = pKeyInfo; + switch( rc ){ - case SQLITE_BUSY: { - p->pc = pc; - p->rc = SQLITE_BUSY; - p->pTos = &pTos[1 + (pOp->p2<=0)]; /* Operands must remain on stack */ - return SQLITE_BUSY; - } case SQLITE_OK: { - int flags = sqlite3BtreeFlags(pCur->pCursor); + flags = sqlite3BtreeFlags(pCur->pCursor); + /* Sanity checking. Only the lower four bits of the flags byte should - ** be used. Bit 3 (mask 0x08) is unpreditable. The lower 3 bits + ** be used. Bit 3 (mask 0x08) is unpredictable. The lower 3 bits ** (mask 0x07) should be either 5 (intkey+leafdata for tables) or ** 2 (zerodata for indices). If these conditions are not met it can - ** only mean that we are dealing with a corrupt database file + ** only mean that we are dealing with a corrupt database file. + ** Note: All of the above is checked already in sqlite3BtreeCursor(). */ - if( (flags & 0xf0)!=0 || ((flags & 0x07)!=5 && (flags & 0x07)!=2) ){ - rc = SQLITE_CORRUPT_BKPT; - goto abort_due_to_error; - } - pCur->isTable = (flags & BTREE_INTKEY)!=0; - pCur->isIndex = (flags & BTREE_ZERODATA)!=0; - /* If P3==0 it means we are expected to open a table. If P3!=0 then + assert( (flags & 0xf0)==0 ); + assert( (flags & 0x07)==5 || (flags & 0x07)==2 ); + + pCur->isTable = (flags & BTREE_INTKEY)!=0 ?1:0; + pCur->isIndex = (flags & BTREE_ZERODATA)!=0 ?1:0; + /* If P4==0 it means we are expected to open a table. If P4!=0 then ** we expect to be opening an index. If this is not what happened, ** then the database is corrupt */ - if( (pCur->isTable && pOp->p3type==P3_KEYINFO) - || (pCur->isIndex && pOp->p3type!=P3_KEYINFO) ){ + if( (pCur->isTable && pOp->p4type==P4_KEYINFO) + || (pCur->isIndex && pOp->p4type!=P4_KEYINFO) ){ rc = SQLITE_CORRUPT_BKPT; goto abort_due_to_error; } break; } case SQLITE_EMPTY: { - pCur->isTable = pOp->p3type!=P3_KEYINFO; + pCur->isTable = pOp->p4type!=P4_KEYINFO; pCur->isIndex = !pCur->isTable; + pCur->pCursor = 0; rc = SQLITE_OK; break; } default: { + assert( rc!=SQLITE_BUSY ); /* Busy conditions detected earlier */ goto abort_due_to_error; } } break; } -/* Opcode: OpenEphemeral P1 P2 P3 +/* Opcode: OpenEphemeral P1 P2 * P4 * ** ** Open a new cursor P1 to a transient table. ** The cursor is always opened read/write even if @@ -2805,8 +3012,8 @@ ** table is deleted automatically when the cursor is closed. ** ** P2 is the number of columns in the virtual table. -** The cursor points to a BTree table if P3==0 and to a BTree index -** if P3 is not 0. If P3 is not NULL, it points to a KeyInfo structure +** The cursor points to a BTree table if P4==0 and to a BTree index +** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure ** that defines the format of keys in the index. ** ** This opcode was once called OpenTemp. But that created @@ -2815,14 +3022,21 @@ ** this opcode. Then this opcode was call OpenVirtual. But ** that created confusion with the whole virtual-table idea. */ -case OP_OpenEphemeral: { /* no-push */ - int i = pOp->p1; - Cursor *pCx; - assert( i>=0 ); - pCx = allocateCursor(p, i, -1); +case OP_OpenEphemeral: { + VdbeCursor *pCx; + static const int openFlags = + SQLITE_OPEN_READWRITE | + SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_TRANSIENT_DB; + + assert( pOp->p1>=0 ); + pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; - rc = sqlite3BtreeFactory(db, 0, 1, SQLITE_DEFAULT_TEMP_CACHE_SIZE, &pCx->pBt); + rc = sqlite3BtreeFactory(db, 0, 1, SQLITE_DEFAULT_TEMP_CACHE_SIZE, openFlags, + &pCx->pBt); if( rc==SQLITE_OK ){ rc = sqlite3BtreeBeginTrans(pCx->pBt, 1); } @@ -2832,31 +3046,28 @@ ** opening it. If a transient table is required, just use the ** automatically created table with root-page 1 (an INTKEY table). */ - if( pOp->p3 ){ + if( pOp->p4.pKeyInfo ){ int pgno; - assert( pOp->p3type==P3_KEYINFO ); + assert( pOp->p4type==P4_KEYINFO ); rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_ZERODATA); if( rc==SQLITE_OK ){ assert( pgno==MASTER_ROOT+1 ); - rc = sqlite3BtreeCursor(pCx->pBt, pgno, 1, sqlite3VdbeRecordCompare, - pOp->p3, &pCx->pCursor); - pCx->pKeyInfo = (KeyInfo*)pOp->p3; + rc = sqlite3BtreeCursor(pCx->pBt, pgno, 1, + (KeyInfo*)pOp->p4.z, pCx->pCursor); + pCx->pKeyInfo = pOp->p4.pKeyInfo; pCx->pKeyInfo->enc = ENC(p->db); - pCx->pIncrKey = &pCx->pKeyInfo->incrKey; } pCx->isTable = 0; }else{ - rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, 0, &pCx->pCursor); + rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor); pCx->isTable = 1; - pCx->pIncrKey = &pCx->bogusIncrKey; } } - pCx->nField = pOp->p2; pCx->isIndex = !pCx->isTable; break; } -/* Opcode: OpenPseudo P1 * * +/* Opcode: OpenPseudo P1 P2 P3 * * ** ** Open a new cursor that points to a fake table that contains a single ** row of data. Any attempt to write a second row of data causes the @@ -2867,112 +3078,184 @@ ** NEW or OLD tables in a trigger. Also used to hold the a single ** row output from the sorter so that the row can be decomposed into ** individual columns using the OP_Column opcode. +** +** When OP_Insert is executed to insert a row in to the pseudo table, +** the pseudo-table cursor may or may not make it's own copy of the +** original row data. If P2 is 0, then the pseudo-table will copy the +** original row data. Otherwise, a pointer to the original memory cell +** is stored. In this case, the vdbe program must ensure that the +** memory cell containing the row data is not overwritten until the +** pseudo table is closed (or a new row is inserted into it). +** +** P3 is the number of fields in the records that will be stored by +** the pseudo-table. */ -case OP_OpenPseudo: { /* no-push */ - int i = pOp->p1; - Cursor *pCx; - assert( i>=0 ); - pCx = allocateCursor(p, i, -1); +case OP_OpenPseudo: { + VdbeCursor *pCx; + + assert( pOp->p1>=0 ); + pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, 0); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->pseudoTable = 1; - pCx->pIncrKey = &pCx->bogusIncrKey; + pCx->ephemPseudoTable = (u8)pOp->p2; pCx->isTable = 1; pCx->isIndex = 0; break; } -/* Opcode: Close P1 * * +/* Opcode: Close P1 * * * * ** ** Close a cursor previously opened as P1. If P1 is not ** currently open, this instruction is a no-op. */ -case OP_Close: { /* no-push */ - int i = pOp->p1; - if( i>=0 && inCursor ){ - sqlite3VdbeFreeCursor(p, p->apCsr[i]); - p->apCsr[i] = 0; - } +case OP_Close: { + assert( pOp->p1>=0 && pOp->p1nCursor ); + sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); + p->apCsr[pOp->p1] = 0; break; } -/* Opcode: MoveGe P1 P2 * +/* Opcode: SeekGe P1 P2 P3 P4 * +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as the key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. ** -** Pop the top of the stack and use its value as a key. Reposition -** cursor P1 so that it points to the smallest entry that is greater -** than or equal to the key that was popped ffrom the stack. -** If there are no records greater than or equal to the key and P2 -** is not zero, then jump to P2. +** Reposition cursor P1 so that it points to the smallest entry that +** is greater than or equal to the key value. If there are no records +** greater than or equal to the key and P2 is not zero, then jump to P2. ** -** See also: Found, NotFound, Distinct, MoveLt, MoveGt, MoveLe +** See also: Found, NotFound, Distinct, SeekLt, SeekGt, SeekLe */ -/* Opcode: MoveGt P1 P2 * +/* Opcode: SeekGt P1 P2 P3 P4 * ** -** Pop the top of the stack and use its value as a key. Reposition -** cursor P1 so that it points to the smallest entry that is greater -** than the key from the stack. -** If there are no records greater than the key and P2 is not zero, -** then jump to P2. +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. ** -** See also: Found, NotFound, Distinct, MoveLt, MoveGe, MoveLe +** Reposition cursor P1 so that it points to the smallest entry that +** is greater than the key value. If there are no records greater than +** the key and P2 is not zero, then jump to P2. +** +** See also: Found, NotFound, Distinct, SeekLt, SeekGe, SeekLe */ -/* Opcode: MoveLt P1 P2 * +/* Opcode: SeekLt P1 P2 P3 P4 * +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. ** -** Pop the top of the stack and use its value as a key. Reposition -** cursor P1 so that it points to the largest entry that is less -** than the key from the stack. -** If there are no records less than the key and P2 is not zero, -** then jump to P2. +** Reposition cursor P1 so that it points to the largest entry that +** is less than the key value. If there are no records less than +** the key and P2 is not zero, then jump to P2. ** -** See also: Found, NotFound, Distinct, MoveGt, MoveGe, MoveLe +** See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLe */ -/* Opcode: MoveLe P1 P2 * +/* Opcode: SeekLe P1 P2 P3 P4 * ** -** Pop the top of the stack and use its value as a key. Reposition -** cursor P1 so that it points to the largest entry that is less than -** or equal to the key that was popped from the stack. -** If there are no records less than or eqal to the key and P2 is not zero, -** then jump to P2. +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. ** -** See also: Found, NotFound, Distinct, MoveGt, MoveGe, MoveLt +** Reposition cursor P1 so that it points to the largest entry that +** is less than or equal to the key value. If there are no records +** less than or equal to the key and P2 is not zero, then jump to P2. +** +** See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLt */ -case OP_MoveLt: /* no-push */ -case OP_MoveLe: /* no-push */ -case OP_MoveGe: /* no-push */ -case OP_MoveGt: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_SeekLt: /* jump, in3 */ +case OP_SeekLe: /* jump, in3 */ +case OP_SeekGe: /* jump, in3 */ +case OP_SeekGt: { /* jump, in3 */ + int res; + int oc; + VdbeCursor *pC; + UnpackedRecord r; + int nField; + i64 iKey; /* The rowid we are to seek to */ - assert( pTos>=p->aStack ); - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p2!=0 ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); if( pC->pCursor!=0 ){ - int res, oc; oc = pOp->opcode; pC->nullRow = 0; - *pC->pIncrKey = oc==OP_MoveGt || oc==OP_MoveLe; if( pC->isTable ){ - i64 iKey; - sqlite3VdbeMemIntegerify(pTos); - iKey = intToKey(pTos->u.i); - if( pOp->p2==0 && pOp->opcode==OP_MoveGe ){ - pC->movetoTarget = iKey; - pC->deferredMoveto = 1; - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; - break; - } - rc = sqlite3BtreeMoveto(pC->pCursor, 0, (u64)iKey, 0, &res); + /* The input value in P3 might be of any type: integer, real, string, + ** blob, or NULL. But it needs to be an integer before we can do + ** the seek, so covert it. */ + applyNumericAffinity(pIn3); + iKey = sqlite3VdbeIntValue(pIn3); + pC->rowidIsValid = 0; + + /* If the P3 value could not be converted into an integer without + ** loss of information, then special processing is required... */ + if( (pIn3->flags & MEM_Int)==0 ){ + if( (pIn3->flags & MEM_Real)==0 ){ + /* If the P3 value cannot be converted into any kind of a number, + ** then the seek is not possible, so jump to P2 */ + pc = pOp->p2 - 1; + break; + } + /* If we reach this point, then the P3 value must be a floating + ** point number. */ + assert( (pIn3->flags & MEM_Real)!=0 ); + + if( iKey==SMALLEST_INT64 && (pIn3->r<(double)iKey || pIn3->r>0) ){ + /* The P3 value is too large in magnitude to be expressed as an + ** integer. */ + res = 1; + if( pIn3->r<0 ){ + if( oc==OP_SeekGt || oc==OP_SeekGe ){ + rc = sqlite3BtreeFirst(pC->pCursor, &res); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + } + }else{ + if( oc==OP_SeekLt || oc==OP_SeekLe ){ + rc = sqlite3BtreeLast(pC->pCursor, &res); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + } + } + if( res ){ + pc = pOp->p2 - 1; + } + break; + }else if( oc==OP_SeekLt || oc==OP_SeekGe ){ + /* Use the ceiling() function to convert real->int */ + if( pIn3->r > (double)iKey ) iKey++; + }else{ + /* Use the floor() function to convert real->int */ + assert( oc==OP_SeekLe || oc==OP_SeekGt ); + if( pIn3->r < (double)iKey ) iKey--; + } + } + rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)iKey, 0, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } - pC->lastRowid = pTos->u.i; - pC->rowidIsValid = res==0; + if( res==0 ){ + pC->rowidIsValid = 1; + pC->lastRowid = iKey; + } }else{ - assert( pTos->flags & MEM_Blob ); - ExpandBlob(pTos); - rc = sqlite3BtreeMoveto(pC->pCursor, pTos->z, pTos->n, 0, &res); + nField = pOp->p4.i; + assert( pOp->p4type==P4_INT32 ); + assert( nField>0 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)nField; + if( oc==OP_SeekGt || oc==OP_SeekLe ){ + r.flags = UNPACKED_INCRKEY; + }else{ + r.flags = 0; + } + r.aMem = &p->aMem[pOp->p3]; + rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, &r, 0, 0, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } @@ -2980,12 +3263,11 @@ } pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; - *pC->pIncrKey = 0; #ifdef SQLITE_TEST sqlite3_search_count++; #endif - if( oc==OP_MoveGe || oc==OP_MoveGt ){ - if( res<0 ){ + if( oc==OP_SeekGe || oc==OP_SeekGt ){ + if( res<0 || (res==0 && oc==OP_SeekGt) ){ rc = sqlite3BtreeNext(pC->pCursor, &res); if( rc!=SQLITE_OK ) goto abort_due_to_error; pC->rowidIsValid = 0; @@ -2993,8 +3275,8 @@ res = 0; } }else{ - assert( oc==OP_MoveLt || oc==OP_MoveLe ); - if( res>=0 ){ + assert( oc==OP_SeekLt || oc==OP_SeekLe ); + if( res>0 || (res==0 && oc==OP_SeekLt) ){ rc = sqlite3BtreePrevious(pC->pCursor, &res); if( rc!=SQLITE_OK ) goto abort_due_to_error; pC->rowidIsValid = 0; @@ -3005,83 +3287,107 @@ res = sqlite3BtreeEof(pC->pCursor); } } + assert( pOp->p2>0 ); if( res ){ - if( pOp->p2>0 ){ - pc = pOp->p2 - 1; - }else{ - pC->nullRow = 1; - } + pc = pOp->p2 - 1; } + }else{ + /* This happens when attempting to open the sqlite3_master table + ** for read access returns SQLITE_EMPTY. In this case always + ** take the jump (since there are no records in the table). + */ + assert( pC->pseudoTable==0 ); + pc = pOp->p2 - 1; } - Release(pTos); - pTos--; break; } -/* Opcode: Distinct P1 P2 * -** -** Use the top of the stack as a record created using MakeRecord. P1 is a -** cursor on a table that declared as an index. If that table contains an -** entry that matches the top of the stack fall thru. If the top of the stack -** matches no entry in P1 then jump to P2. -** -** The cursor is left pointing at the matching entry if it exists. The -** record on the top of the stack is not popped. +/* Opcode: Seek P1 P2 * * * ** -** This instruction is similar to NotFound except that this operation -** does not pop the key from the stack. +** P1 is an open table cursor and P2 is a rowid integer. Arrange +** for P1 to move so that it points to the rowid given by P2. ** -** The instruction is used to implement the DISTINCT operator on SELECT -** statements. The P1 table is not a true index but rather a record of -** all results that have produced so far. -** -** See also: Found, NotFound, MoveTo, IsUnique, NotExists +** This is actually a deferred seek. Nothing actually happens until +** the cursor is used to read a record. That way, if no reads +** occur, no unnecessary I/O happens. */ -/* Opcode: Found P1 P2 * +case OP_Seek: { /* in2 */ + VdbeCursor *pC; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + if( ALWAYS(pC->pCursor!=0) ){ + assert( pC->isTable ); + pC->nullRow = 0; + pC->movetoTarget = sqlite3VdbeIntValue(pIn2); + pC->rowidIsValid = 0; + pC->deferredMoveto = 1; + } + break; +} + + +/* Opcode: Found P1 P2 P3 * * ** -** Top of the stack holds a blob constructed by MakeRecord. P1 is an index. -** If an entry that matches the top of the stack exists in P1 then -** jump to P2. If the top of the stack does not match any entry in P1 +** Register P3 holds a blob constructed by MakeRecord. P1 is an index. +** If an entry that matches the value in register p3 exists in P1 then +** jump to P2. If the P3 value does not match any entry in P1 ** then fall thru. The P1 cursor is left pointing at the matching entry -** if it exists. The blob is popped off the top of the stack. +** if it exists. ** ** This instruction is used to implement the IN operator where the -** left-hand side is a SELECT statement. P1 is not a true index but -** is instead a temporary index that holds the results of the SELECT -** statement. This instruction just checks to see if the left-hand side -** of the IN operator (stored on the top of the stack) exists in the -** result of the SELECT statement. +** left-hand side is a SELECT statement. P1 may be a true index, or it +** may be a temporary index that holds the results of the SELECT +** statement. This instruction is also used to implement the +** DISTINCT keyword in SELECT statements. +** +** This instruction checks if index P1 contains a record for which +** the first N serialized values exactly match the N serialized values +** in the record in register P3, where N is the total number of values in +** the P3 record (the P3 record is a prefix of the P1 record). ** -** See also: Distinct, NotFound, MoveTo, IsUnique, NotExists +** See also: NotFound, IsUnique, NotExists */ -/* Opcode: NotFound P1 P2 * +/* Opcode: NotFound P1 P2 P3 * * ** -** The top of the stack holds a blob constructed by MakeRecord. P1 is +** Register P3 holds a blob constructed by MakeRecord. P1 is ** an index. If no entry exists in P1 that matches the blob then jump ** to P2. If an entry does existing, fall through. The cursor is left -** pointing to the entry that matches. The blob is popped from the stack. +** pointing to the entry that matches. ** -** The difference between this operation and Distinct is that -** Distinct does not pop the key from the stack. -** -** See also: Distinct, Found, MoveTo, NotExists, IsUnique +** See also: Found, NotExists, IsUnique */ -case OP_Distinct: /* no-push */ -case OP_NotFound: /* no-push */ -case OP_Found: { /* no-push */ - int i = pOp->p1; - int alreadyExists = 0; - Cursor *pC; - assert( pTos>=p->aStack ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - if( (pC = p->apCsr[i])->pCursor!=0 ){ - int res, rx; +case OP_NotFound: /* jump, in3 */ +case OP_Found: { /* jump, in3 */ + int alreadyExists; + VdbeCursor *pC; + int res; + UnpackedRecord *pIdxKey; + char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*3 + 7]; + + alreadyExists = 0; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + if( ALWAYS(pC->pCursor!=0) ){ + assert( pC->isTable==0 ); - assert( pTos->flags & MEM_Blob ); - Stringify(pTos, encoding); - rx = sqlite3BtreeMoveto(pC->pCursor, pTos->z, pTos->n, 0, &res); - alreadyExists = rx==SQLITE_OK && res==0; + assert( pIn3->flags & MEM_Blob ); + pIdxKey = sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, + aTempRec, sizeof(aTempRec)); + if( pIdxKey==0 ){ + goto no_mem; + } + if( pOp->opcode==OP_Found ){ + pIdxKey->flags |= UNPACKED_PREFIX_MATCH; + } + rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, pIdxKey, 0, 0, &res); + sqlite3VdbeDeleteUnpackedRecord(pIdxKey); + if( rc!=SQLITE_OK ){ + break; + } + alreadyExists = (res==0); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; } @@ -3090,204 +3396,187 @@ }else{ if( !alreadyExists ) pc = pOp->p2 - 1; } - if( pOp->opcode!=OP_Distinct ){ - Release(pTos); - pTos--; - } break; } -/* Opcode: IsUnique P1 P2 * +/* Opcode: IsUnique P1 P2 P3 P4 * ** -** The top of the stack is an integer record number. Call this -** record number R. The next on the stack is an index key created -** using MakeIdxRec. Call it K. This instruction pops R from the -** stack but it leaves K unchanged. -** -** P1 is an index. So it has no data and its key consists of a -** record generated by OP_MakeRecord where the last field is the +** Cursor P1 is open on an index. So it has no data and its key consists +** of a record generated by OP_MakeRecord where the last field is the ** rowid of the entry that the index refers to. -** -** This instruction asks if there is an entry in P1 where the -** fields matches K but the rowid is different from R. -** If there is no such entry, then there is an immediate -** jump to P2. If any entry does exist where the index string -** matches K but the record number is not R, then the record -** number for that entry is pushed onto the stack and control -** falls through to the next instruction. -** -** See also: Distinct, NotFound, NotExists, Found -*/ -case OP_IsUnique: { /* no-push */ - int i = pOp->p1; - Mem *pNos = &pTos[-1]; - Cursor *pCx; +** +** The P3 register contains an integer record number. Call this record +** number R. Register P4 is the first in a set of N contiguous registers +** that make up an unpacked index key that can be used with cursor P1. +** The value of N can be inferred from the cursor. N includes the rowid +** value appended to the end of the index record. This rowid value may +** or may not be the same as R. +** +** If any of the N registers beginning with register P4 contains a NULL +** value, jump immediately to P2. +** +** Otherwise, this instruction checks if cursor P1 contains an entry +** where the first (N-1) fields match but the rowid value at the end +** of the index entry is not R. If there is no such entry, control jumps +** to instruction P2. Otherwise, the rowid of the conflicting index +** entry is copied to register P3 and control falls through to the next +** instruction. +** +** See also: NotFound, NotExists, Found +*/ +case OP_IsUnique: { /* jump, in3 */ + u16 ii; + VdbeCursor *pCx; BtCursor *pCrsr; - i64 R; + u16 nField; + Mem *aMem; + UnpackedRecord r; /* B-Tree index search key */ + i64 R; /* Rowid stored in register P3 */ + + aMem = &p->aMem[pOp->p4.i]; + /* Assert that the values of parameters P1 and P4 are in range. */ + assert( pOp->p4type==P4_INT32 ); + assert( pOp->p4.i>0 && pOp->p4.i<=p->nMem ); + assert( pOp->p1>=0 && pOp->p1nCursor ); - /* Pop the value R off the top of the stack - */ - assert( pNos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); - R = pTos->u.i; - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; - assert( i>=0 && inCursor ); - pCx = p->apCsr[i]; - assert( pCx!=0 ); + /* Find the index cursor. */ + pCx = p->apCsr[pOp->p1]; + assert( pCx->deferredMoveto==0 ); + pCx->seekResult = 0; + pCx->cacheStatus = CACHE_STALE; pCrsr = pCx->pCursor; - if( pCrsr!=0 ){ - int res; - i64 v; /* The record number on the P1 entry that matches K */ - char *zKey; /* The value of K */ - int nKey; /* Number of bytes in K */ - int len; /* Number of bytes in K without the rowid at the end */ - int szRowid; /* Size of the rowid column at the end of zKey */ - - /* Make sure K is a string and make zKey point to K - */ - assert( pNos->flags & MEM_Blob ); - Stringify(pNos, encoding); - zKey = pNos->z; - nKey = pNos->n; - szRowid = sqlite3VdbeIdxRowidLen((u8*)zKey); - len = nKey-szRowid; - - /* Search for an entry in P1 where all but the last four bytes match K. - ** If there is no such entry, jump immediately to P2. - */ - assert( pCx->deferredMoveto==0 ); - pCx->cacheStatus = CACHE_STALE; - rc = sqlite3BtreeMoveto(pCrsr, zKey, len, 0, &res); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - if( res<0 ){ - rc = sqlite3BtreeNext(pCrsr, &res); - if( res ){ - pc = pOp->p2 - 1; - break; - } - } - rc = sqlite3VdbeIdxKeyCompare(pCx, len, (u8*)zKey, &res); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - if( res>0 ){ + /* If any of the values are NULL, take the jump. */ + nField = pCx->pKeyInfo->nField; + for(ii=0; iip2 - 1; + pCrsr = 0; break; } + } + assert( (aMem[nField].flags & MEM_Null)==0 ); - /* At this point, pCrsr is pointing to an entry in P1 where all but - ** the final entry (the rowid) matches K. Check to see if the - ** final rowid column is different from R. If it equals R then jump - ** immediately to P2. - */ - rc = sqlite3VdbeIdxRowid(pCrsr, &v); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - if( v==R ){ + if( pCrsr!=0 ){ + /* Populate the index search key. */ + r.pKeyInfo = pCx->pKeyInfo; + r.nField = nField + 1; + r.flags = UNPACKED_PREFIX_SEARCH; + r.aMem = aMem; + + /* Extract the value of R from register P3. */ + sqlite3VdbeMemIntegerify(pIn3); + R = pIn3->u.i; + + /* Search the B-Tree index. If no conflicting record is found, jump + ** to P2. Otherwise, copy the rowid of the conflicting record to + ** register P3 and fall through to the next instruction. */ + rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &pCx->seekResult); + if( (r.flags & UNPACKED_PREFIX_SEARCH) || r.rowid==R ){ pc = pOp->p2 - 1; - break; + }else{ + pIn3->u.i = r.rowid; } - - /* The final varint of the key is different from R. Push it onto - ** the stack. (The record number of an entry that violates a UNIQUE - ** constraint.) - */ - pTos++; - pTos->u.i = v; - pTos->flags = MEM_Int; } break; } -/* Opcode: NotExists P1 P2 * +/* Opcode: NotExists P1 P2 P3 * * ** -** Use the top of the stack as a integer key. If a record with that key -** does not exist in table of P1, then jump to P2. If the record -** does exist, then fall thru. The cursor is left pointing to the -** record if it exists. The integer key is popped from the stack. +** Use the content of register P3 as a integer key. If a record +** with that key does not exist in table of P1, then jump to P2. +** If the record does exist, then fall thru. The cursor is left +** pointing to the record if it exists. ** ** The difference between this operation and NotFound is that this ** operation assumes the key is an integer and that P1 is a table whereas ** NotFound assumes key is a blob constructed from MakeRecord and ** P1 is an index. ** -** See also: Distinct, Found, MoveTo, NotFound, IsUnique +** See also: Found, NotFound, IsUnique */ -case OP_NotExists: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_NotExists: { /* jump, in3 */ + VdbeCursor *pC; BtCursor *pCrsr; - assert( pTos>=p->aStack ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - if( (pCrsr = (pC = p->apCsr[i])->pCursor)!=0 ){ - int res; - u64 iKey; - assert( pTos->flags & MEM_Int ); - assert( p->apCsr[i]->isTable ); - iKey = intToKey(pTos->u.i); - rc = sqlite3BtreeMoveto(pCrsr, 0, iKey, 0,&res); - pC->lastRowid = pTos->u.i; - pC->rowidIsValid = res==0; - pC->nullRow = 0; - pC->cacheStatus = CACHE_STALE; - /* res might be uninitialized if rc!=SQLITE_OK. But if rc!=SQLITE_OK - ** processing is about to abort so we really do not care whether or not - ** the following jump is taken. (In other words, do not stress over - ** the error that valgrind sometimes shows on the next statement when - ** running ioerr.test and similar failure-recovery test scripts.) */ - if( res!=0 ){ - pc = pOp->p2 - 1; - pC->rowidIsValid = 0; + int res; + u64 iKey; + + assert( pIn3->flags & MEM_Int ); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->isTable ); + pCrsr = pC->pCursor; + if( pCrsr!=0 ){ + res = 0; + iKey = pIn3->u.i; + rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res); + pC->lastRowid = pIn3->u.i; + pC->rowidIsValid = res==0 ?1:0; + pC->nullRow = 0; + pC->cacheStatus = CACHE_STALE; + pC->deferredMoveto = 0; + if( res!=0 ){ + pc = pOp->p2 - 1; + assert( pC->rowidIsValid==0 ); } + pC->seekResult = res; + }else{ + /* This happens when an attempt to open a read cursor on the + ** sqlite_master table returns SQLITE_EMPTY. + */ + assert( !pC->pseudoTable ); + assert( pC->isTable ); + pc = pOp->p2 - 1; + assert( pC->rowidIsValid==0 ); + pC->seekResult = 0; } - Release(pTos); - pTos--; break; } -/* Opcode: Sequence P1 * * +/* Opcode: Sequence P1 P2 * * * ** -** Push an integer onto the stack which is the next available -** sequence number for cursor P1. The sequence number on the -** cursor is incremented after the push. +** Find the next available sequence number for cursor P1. +** Write the sequence number into register P2. +** The sequence number on the cursor is incremented after this +** instruction. */ -case OP_Sequence: { - int i = pOp->p1; - assert( pTos>=p->aStack ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - pTos++; - pTos->u.i = p->apCsr[i]->seqCount++; - pTos->flags = MEM_Int; +case OP_Sequence: { /* out2-prerelease */ + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( p->apCsr[pOp->p1]!=0 ); + pOut->u.i = p->apCsr[pOp->p1]->seqCount++; + MemSetTypeFlag(pOut, MEM_Int); break; } -/* Opcode: NewRowid P1 P2 * +/* Opcode: NewRowid P1 P2 P3 * * ** ** Get a new integer record number (a.k.a "rowid") used as the key to a table. ** The record number is not previously used as a key in the database -** table that cursor P1 points to. The new record number is pushed -** onto the stack. +** table that cursor P1 points to. The new record number is written +** written to register P2. ** -** If P2>0 then P2 is a memory cell that holds the largest previously +** If P3>0 then P3 is a register that holds the largest previously ** generated record number. No new record numbers are allowed to be less ** than this value. When this value reaches its maximum, a SQLITE_FULL -** error is generated. The P2 memory cell is updated with the generated -** record number. This P2 mechanism is used to help implement the +** error is generated. The P3 register is updated with the generated +** record number. This P3 mechanism is used to help implement the ** AUTOINCREMENT feature. */ -case OP_NewRowid: { - int i = pOp->p1; - i64 v = 0; - Cursor *pC; - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - if( (pC = p->apCsr[i])->pCursor==0 ){ +case OP_NewRowid: { /* out2-prerelease */ + i64 v; /* The new rowid */ + VdbeCursor *pC; /* Cursor of table to get the new rowid */ + int res; /* Result of an sqlite3BtreeLast() */ + int cnt; /* Counter to limit the number of searches */ + Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ + + v = 0; + res = 0; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + if( NEVER(pC->pCursor==0) ){ /* The zero initialization above is all that is needed */ }else{ /* The next rowid or record number (different terms for the same @@ -3301,36 +3590,10 @@ ** The second algorithm is to select a rowid at random and see if ** it already exists in the table. If it does not exist, we have ** succeeded. If the random rowid does exist, we select a new one - ** and try again, up to 1000 times. - ** - ** For a table with less than 2 billion entries, the probability - ** of not finding a unused rowid is about 1.0e-300. This is a - ** non-zero probability, but it is still vanishingly small and should - ** never cause a problem. You are much, much more likely to have a - ** hardware failure than for this algorithm to fail. - ** - ** The analysis in the previous paragraph assumes that you have a good - ** source of random numbers. Is a library function like lrand48() - ** good enough? Maybe. Maybe not. It's hard to know whether there - ** might be subtle bugs is some implementations of lrand48() that - ** could cause problems. To avoid uncertainty, SQLite uses its own - ** random number generator based on the RC4 algorithm. - ** - ** To promote locality of reference for repetitive inserts, the - ** first few attempts at chosing a random rowid pick values just a little - ** larger than the previous rowid. This has been shown experimentally - ** to double the speed of the COPY operation. + ** and try again, up to 100 times. */ - int res, rx=SQLITE_OK, cnt; - i64 x; + assert( pC->isTable ); cnt = 0; - if( (sqlite3BtreeFlags(pC->pCursor)&(BTREE_INTKEY|BTREE_ZERODATA)) != - BTREE_INTKEY ){ - rc = SQLITE_CORRUPT_BKPT; - goto abort_due_to_error; - } - assert( (sqlite3BtreeFlags(pC->pCursor) & BTREE_INTKEY)!=0 ); - assert( (sqlite3BtreeFlags(pC->pCursor) & BTREE_ZERODATA)==0 ); #ifdef SQLITE_32BIT_ROWID # define MAX_ROWID 0x7fffffff @@ -3339,13 +3602,12 @@ ** Others complain about 0x7ffffffffffffffffLL. The following macro seems ** to provide the constant while making all compilers happy. */ -# define MAX_ROWID ( (((u64)0x7fffffff)<<32) | (u64)0xffffffff ) +# define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff ) #endif if( !pC->useRandomRowid ){ - if( pC->nextRowidValid ){ - v = pC->nextRowid; - }else{ + v = sqlite3BtreeGetCachedRowid(pC->pCursor); + if( v==0 ){ rc = sqlite3BtreeLast(pC->pCursor, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; @@ -3354,7 +3616,6 @@ v = 1; }else{ sqlite3BtreeKeySize(pC->pCursor, &v); - v = keyToInt(v); if( v==MAX_ROWID ){ pC->useRandomRowid = 1; }else{ @@ -3364,12 +3625,12 @@ } #ifndef SQLITE_OMIT_AUTOINCREMENT - if( pOp->p2 ){ - Mem *pMem; - assert( pOp->p2>0 && pOp->p2nMem ); /* P2 is a valid memory cell */ - pMem = &p->aMem[pOp->p2]; + if( pOp->p3 ){ + assert( pOp->p3>0 && pOp->p3<=p->nMem ); /* P3 is a valid memory cell */ + pMem = &p->aMem[pOp->p3]; + REGISTER_TRACE(pOp->p3, pMem); sqlite3VdbeMemIntegerify(pMem); - assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P2) holds an integer */ + assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */ if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){ rc = SQLITE_FULL; goto abort_due_to_error; @@ -3381,33 +3642,24 @@ } #endif - if( vnextRowidValid = 1; - pC->nextRowid = v+1; - }else{ - pC->nextRowidValid = 0; - } + sqlite3BtreeSetCachedRowid(pC->pCursor, vuseRandomRowid ){ - assert( pOp->p2==0 ); /* SQLITE_FULL must have occurred prior to this */ - v = db->priorNewRowid; + assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is + ** an AUTOINCREMENT table. */ + v = db->lastRowid; cnt = 0; do{ - if( v==0 || cnt>2 ){ - sqlite3Randomness(sizeof(v), &v); - if( cnt<5 ) v &= 0xffffff; + if( cnt==0 && (v&0xffffff)==v ){ + v++; }else{ - unsigned char r; - sqlite3Randomness(1, &r); - v += r + 1; + sqlite3_randomness(sizeof(v), &v); + if( cnt<5 ) v &= 0xffffff; } - if( v==0 ) continue; - x = intToKey(v); - rx = sqlite3BtreeMoveto(pC->pCursor, 0, (u64)x, 0, &res); + rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)v, 0, &res); cnt++; - }while( cnt<1000 && rx==SQLITE_OK && res==0 ); - db->priorNewRowid = v; - if( rx==SQLITE_OK && res==0 ){ + }while( cnt<100 && rc==SQLITE_OK && res==0 ); + if( rc==SQLITE_OK && res==0 ){ rc = SQLITE_FULL; goto abort_due_to_error; } @@ -3416,104 +3668,120 @@ pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; } - pTos++; - pTos->u.i = v; - pTos->flags = MEM_Int; + MemSetTypeFlag(pOut, MEM_Int); + pOut->u.i = v; break; } -/* Opcode: Insert P1 P2 P3 +/* Opcode: Insert P1 P2 P3 P4 P5 ** ** Write an entry into the table of cursor P1. A new entry is ** created if it doesn't already exist or the data for an existing -** entry is overwritten. The data is the value on the top of the -** stack. The key is the next value down on the stack. The key must -** be an integer. The stack is popped twice by this instruction. +** entry is overwritten. The data is the value stored register +** number P2. The key is stored in register P3. The key must +** be an integer. ** -** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is -** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P2 is set, +** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is +** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set, ** then rowid is stored for subsequent return by the -** sqlite3_last_insert_rowid() function (otherwise it's unmodified). +** sqlite3_last_insert_rowid() function (otherwise it is unmodified). ** -** Parameter P3 may point to a string containing the table-name, or +** Parameter P4 may point to a string containing the table-name, or ** may be NULL. If it is not NULL, then the update-hook ** (sqlite3.xUpdateCallback) is invoked following a successful insert. ** +** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically +** allocated, then ownership of P2 is transferred to the pseudo-cursor +** and register P2 becomes ephemeral. If the cursor is changed, the +** value of register P2 will then change. Make sure this does not +** cause any problems.) +** ** This instruction only works on tables. The equivalent instruction ** for indices is OP_IdxInsert. */ -case OP_Insert: { /* no-push */ - Mem *pNos = &pTos[-1]; - int i = pOp->p1; - Cursor *pC; - assert( pNos>=p->aStack ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - if( ((pC = p->apCsr[i])->pCursor!=0 || pC->pseudoTable) ){ - i64 iKey; /* The integer ROWID or key for the record to be inserted */ - - assert( pNos->flags & MEM_Int ); - assert( pC->isTable ); - iKey = intToKey(pNos->u.i); +case OP_Insert: { + Mem *pData; + Mem *pKey; + i64 iKey; /* The integer ROWID or key for the record to be inserted */ + VdbeCursor *pC; + int nZero; + int seekResult; + const char *zDb; + const char *zTbl; + int op; - if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p2 & OPFLAG_LASTROWID ) db->lastRowid = pNos->u.i; - if( pC->nextRowidValid && pNos->u.i>=pC->nextRowid ){ - pC->nextRowidValid = 0; - } - if( pTos->flags & MEM_Null ){ - pTos->z = 0; - pTos->n = 0; - }else{ - assert( pTos->flags & (MEM_Blob|MEM_Str) ); - } - if( pC->pseudoTable ){ - sqliteFree(pC->pData); - pC->iKey = iKey; - pC->nData = pTos->n; - if( pTos->flags & MEM_Dyn ){ - pC->pData = pTos->z; - pTos->flags = MEM_Null; - }else{ - pC->pData = sqliteMallocRaw( pC->nData+2 ); - if( !pC->pData ) goto no_mem; - memcpy(pC->pData, pTos->z, pC->nData); - pC->pData[pC->nData] = 0; - pC->pData[pC->nData+1] = 0; + pData = &p->aMem[pOp->p2]; + pKey = &p->aMem[pOp->p3]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->pCursor!=0 || pC->pseudoTable ); + assert( pKey->flags & MEM_Int ); + assert( pC->isTable ); + REGISTER_TRACE(pOp->p2, pData); + REGISTER_TRACE(pOp->p3, pKey); + + iKey = pKey->u.i; + if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = pKey->u.i; + if( pData->flags & MEM_Null ){ + pData->z = 0; + pData->n = 0; + }else{ + assert( pData->flags & (MEM_Blob|MEM_Str) ); + } + if( pC->pseudoTable ){ + if( !pC->ephemPseudoTable ){ + sqlite3DbFree(db, pC->pData); + } + pC->iKey = iKey; + pC->nData = pData->n; + if( pC->ephemPseudoTable || pData->z==pData->zMalloc ){ + pC->pData = pData->z; + if( !pC->ephemPseudoTable ){ + pData->flags &= ~MEM_Dyn; + pData->flags |= MEM_Ephem; + pData->zMalloc = 0; } - pC->nullRow = 0; }else{ - int nZero; - if( pTos->flags & MEM_Zero ){ - nZero = pTos->u.i; - }else{ - nZero = 0; - } - rc = sqlite3BtreeInsert(pC->pCursor, 0, iKey, - pTos->z, pTos->n, nZero, - pOp->p2 & OPFLAG_APPEND); + pC->pData = sqlite3Malloc( pC->nData+2 ); + if( !pC->pData ) goto no_mem; + memcpy(pC->pData, pData->z, pC->nData); + pC->pData[pC->nData] = 0; + pC->pData[pC->nData+1] = 0; } - - pC->rowidIsValid = 0; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - - /* Invoke the update-hook if required. */ - if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p3 ){ - const char *zDb = db->aDb[pC->iDb].zName; - const char *zTbl = pOp->p3; - int op = ((pOp->p2 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); - assert( pC->isTable ); - db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey); - assert( pC->iDb>=0 ); + pC->nullRow = 0; + }else{ + seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); + if( pData->flags & MEM_Zero ){ + nZero = pData->u.nZero; + }else{ + nZero = 0; } + sqlite3BtreeSetCachedRowid(pC->pCursor, 0); + rc = sqlite3BtreeInsert(pC->pCursor, 0, iKey, + pData->z, pData->n, nZero, + pOp->p5 & OPFLAG_APPEND, seekResult + ); } - popStack(&pTos, 2); + + pC->rowidIsValid = 0; + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + /* Invoke the update-hook if required. */ + if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z ){ + zDb = db->aDb[pC->iDb].zName; + zTbl = pOp->p4.z; + op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); + assert( pC->isTable ); + db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey); + assert( pC->iDb>=0 ); + } break; } -/* Opcode: Delete P1 P2 P3 +/* Opcode: Delete P1 P2 * P4 * ** ** Delete the record at which the P1 cursor is currently pointing. ** @@ -3525,46 +3793,54 @@ ** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is ** incremented (otherwise not). ** -** If P1 is a pseudo-table, then this instruction is a no-op. -*/ -case OP_Delete: { /* no-push */ - int i = pOp->p1; - Cursor *pC; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; +** P1 must not be pseudo-table. It has to be a real table with +** multiple rows. +** +** If P4 is not NULL, then it is the name of the table that P1 is +** pointing to. The update hook will be invoked, if it exists. +** If P4 is not NULL then the P1 cursor must have been positioned +** using OP_NotFound prior to invoking this opcode. +*/ +case OP_Delete: { + i64 iKey; + VdbeCursor *pC; + + iKey = 0; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); - if( pC->pCursor!=0 ){ - i64 iKey; + assert( pC->pCursor!=0 ); /* Only valid for real tables, no pseudotables */ - /* If the update-hook will be invoked, set iKey to the rowid of the - ** row being deleted. - */ - if( db->xUpdateCallback && pOp->p3 ){ - assert( pC->isTable ); - if( pC->rowidIsValid ){ - iKey = pC->lastRowid; - }else{ - rc = sqlite3BtreeKeySize(pC->pCursor, &iKey); - if( rc ){ - goto abort_due_to_error; - } - iKey = keyToInt(iKey); - } - } + /* If the update-hook will be invoked, set iKey to the rowid of the + ** row being deleted. + */ + if( db->xUpdateCallback && pOp->p4.z ){ + assert( pC->isTable ); + assert( pC->rowidIsValid ); /* lastRowid set by previous OP_NotFound */ + iKey = pC->lastRowid; + } - rc = sqlite3VdbeCursorMoveto(pC); - if( rc ) goto abort_due_to_error; - rc = sqlite3BtreeDelete(pC->pCursor); - pC->nextRowidValid = 0; - pC->cacheStatus = CACHE_STALE; + /* The OP_Delete opcode always follows an OP_NotExists or OP_Last or + ** OP_Column on the same table without any intervening operations that + ** might move or invalidate the cursor. Hence cursor pC is always pointing + ** to the row to be deleted and the sqlite3VdbeCursorMoveto() operation + ** below is always a no-op and cannot fail. We will run it anyhow, though, + ** to guard against future changes to the code generator. + **/ + assert( pC->deferredMoveto==0 ); + rc = sqlite3VdbeCursorMoveto(pC); + if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; - /* Invoke the update-hook if required. */ - if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p3 ){ - const char *zDb = db->aDb[pC->iDb].zName; - const char *zTbl = pOp->p3; - db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, zTbl, iKey); - assert( pC->iDb>=0 ); - } + sqlite3BtreeSetCachedRowid(pC->pCursor, 0); + rc = sqlite3BtreeDelete(pC->pCursor); + pC->cacheStatus = CACHE_STALE; + + /* Invoke the update-hook if required. */ + if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z ){ + const char *zDb = db->aDb[pC->iDb].zName; + const char *zTbl = pOp->p4.z; + db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, zTbl, iKey); + assert( pC->iDb>=0 ); } if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++; break; @@ -3577,7 +3853,7 @@ ** change counter (returned by subsequent calls to sqlite3_changes()) ** before it is reset. This is used by trigger programs. */ -case OP_ResetCount: { /* no-push */ +case OP_ResetCount: { if( pOp->p1 ){ sqlite3VdbeSetChanges(db, p->nChange); } @@ -3585,140 +3861,156 @@ break; } -/* Opcode: RowData P1 * * +/* Opcode: RowData P1 P2 * * * ** -** Push onto the stack the complete row data for cursor P1. -** There is no interpretation of the data. It is just copied -** onto the stack exactly as it is found in the database file. +** Write into register P2 the complete row data for cursor P1. +** There is no interpretation of the data. +** It is just copied onto the P2 register exactly as +** it is found in the database file. ** -** If the cursor is not pointing to a valid row, a NULL is pushed -** onto the stack. +** If the P1 cursor must be pointing to a valid row (not a NULL row) +** of a real table, not a pseudo-table. */ -/* Opcode: RowKey P1 * * +/* Opcode: RowKey P1 P2 * * * ** -** Push onto the stack the complete row key for cursor P1. -** There is no interpretation of the key. It is just copied -** onto the stack exactly as it is found in the database file. +** Write into register P2 the complete row key for cursor P1. +** There is no interpretation of the data. +** The key is copied onto the P3 register exactly as +** it is found in the database file. ** -** If the cursor is not pointing to a valid row, a NULL is pushed -** onto the stack. +** If the P1 cursor must be pointing to a valid row (not a NULL row) +** of a real table, not a pseudo-table. */ case OP_RowKey: case OP_RowData: { - int i = pOp->p1; - Cursor *pC; + VdbeCursor *pC; + BtCursor *pCrsr; u32 n; + i64 n64; + + pOut = &p->aMem[pOp->p2]; /* Note that RowKey and RowData are really exactly the same instruction */ - pTos++; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC->isTable || pOp->opcode==OP_RowKey ); assert( pC->isIndex || pOp->opcode==OP_RowData ); assert( pC!=0 ); - if( pC->nullRow ){ - pTos->flags = MEM_Null; - }else if( pC->pCursor!=0 ){ - BtCursor *pCrsr = pC->pCursor; - rc = sqlite3VdbeCursorMoveto(pC); - if( rc ) goto abort_due_to_error; - if( pC->nullRow ){ - pTos->flags = MEM_Null; - break; - }else if( pC->isIndex ){ - i64 n64; - assert( !pC->isTable ); - sqlite3BtreeKeySize(pCrsr, &n64); - if( n64>SQLITE_MAX_LENGTH ){ - goto too_big; - } - n = n64; - }else{ - sqlite3BtreeDataSize(pCrsr, &n); - } - if( n>SQLITE_MAX_LENGTH ){ + assert( pC->nullRow==0 ); + assert( pC->pseudoTable==0 ); + assert( pC->pCursor!=0 ); + pCrsr = pC->pCursor; + + /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or + ** OP_Rewind/Op_Next with no intervening instructions that might invalidate + ** the cursor. Hence the following sqlite3VdbeCursorMoveto() call is always + ** a no-op and can never fail. But we leave it in place as a safety. + */ + assert( pC->deferredMoveto==0 ); + rc = sqlite3VdbeCursorMoveto(pC); + if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; + + if( pC->isIndex ){ + assert( !pC->isTable ); + sqlite3BtreeKeySize(pCrsr, &n64); + if( n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } - pTos->n = n; - if( n<=NBFS ){ - pTos->flags = MEM_Blob | MEM_Short; - pTos->z = pTos->zShort; - }else{ - char *z = sqliteMallocRaw( n ); - if( z==0 ) goto no_mem; - pTos->flags = MEM_Blob | MEM_Dyn; - pTos->xDel = 0; - pTos->z = z; - } - if( pC->isIndex ){ - rc = sqlite3BtreeKey(pCrsr, 0, n, pTos->z); - }else{ - rc = sqlite3BtreeData(pCrsr, 0, n, pTos->z); + n = (u32)n64; + }else{ + sqlite3BtreeDataSize(pCrsr, &n); + if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; } - }else if( pC->pseudoTable ){ - pTos->n = pC->nData; - assert( pC->nData<=SQLITE_MAX_LENGTH ); - pTos->z = pC->pData; - pTos->flags = MEM_Blob|MEM_Ephem; + } + if( sqlite3VdbeMemGrow(pOut, n, 0) ){ + goto no_mem; + } + pOut->n = n; + MemSetTypeFlag(pOut, MEM_Blob); + if( pC->isIndex ){ + rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z); }else{ - pTos->flags = MEM_Null; + rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z); } - pTos->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */ + pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */ + UPDATE_MAX_BLOBSIZE(pOut); break; } -/* Opcode: Rowid P1 * * +/* Opcode: Rowid P1 P2 * * * ** -** Push onto the stack an integer which is the key of the table entry that +** Store in register P2 an integer which is the key of the table entry that ** P1 is currently point to. +** +** P1 can be either an ordinary table or a virtual table. There used to +** be a separate OP_VRowid opcode for use with virtual tables, but this +** one opcode now works for both table types. */ -case OP_Rowid: { - int i = pOp->p1; - Cursor *pC; +case OP_Rowid: { /* out2-prerelease */ + VdbeCursor *pC; i64 v; + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); - rc = sqlite3VdbeCursorMoveto(pC); - if( rc ) goto abort_due_to_error; - pTos++; - if( pC->rowidIsValid ){ - v = pC->lastRowid; - }else if( pC->pseudoTable ){ - v = keyToInt(pC->iKey); - }else if( pC->nullRow || pC->pCursor==0 ){ - pTos->flags = MEM_Null; + if( pC->nullRow ){ + /* Do nothing so that reg[P2] remains NULL */ break; + }else if( pC->deferredMoveto ){ + v = pC->movetoTarget; + }else if( pC->pseudoTable ){ + v = pC->iKey; +#ifndef SQLITE_OMIT_VIRTUALTABLE + }else if( pC->pVtabCursor ){ + pVtab = pC->pVtabCursor->pVtab; + pModule = pVtab->pModule; + assert( pModule->xRowid ); + if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; + rc = pModule->xRowid(pC->pVtabCursor, &v); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; + if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; +#endif /* SQLITE_OMIT_VIRTUALTABLE */ }else{ - assert( pC->pCursor!=0 ); - sqlite3BtreeKeySize(pC->pCursor, &v); - v = keyToInt(v); + rc = sqlite3VdbeCursorMoveto(pC); + if( rc ) goto abort_due_to_error; + if( pC->rowidIsValid ){ + v = pC->lastRowid; + }else{ + assert( pC->pCursor!=0 ); + sqlite3BtreeKeySize(pC->pCursor, &v); + } } - pTos->u.i = v; - pTos->flags = MEM_Int; + pOut->u.i = v; + MemSetTypeFlag(pOut, MEM_Int); break; } -/* Opcode: NullRow P1 * * +/* Opcode: NullRow P1 * * * * ** ** Move the cursor P1 to a null row. Any OP_Column operations -** that occur while the cursor is on the null row will always push -** a NULL onto the stack. +** that occur while the cursor is on the null row will always +** write a NULL. */ -case OP_NullRow: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_NullRow: { + VdbeCursor *pC; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pC->nullRow = 1; pC->rowidIsValid = 0; + if( pC->pCursor ){ + sqlite3BtreeClearCursor(pC->pCursor); + } break; } -/* Opcode: Last P1 P2 * +/* Opcode: Last P1 P2 * * * ** ** The next use of the Rowid or Column or Next instruction for P1 ** will refer to the last entry in the database table or index. @@ -3726,31 +4018,32 @@ ** If P2 is 0 or if the table or index is not empty, fall through ** to the following instruction. */ -case OP_Last: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_Last: { /* jump */ + VdbeCursor *pC; BtCursor *pCrsr; + int res; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); - if( (pCrsr = pC->pCursor)!=0 ){ - int res; - rc = sqlite3BtreeLast(pCrsr, &res); - pC->nullRow = res; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - if( res && pOp->p2>0 ){ - pc = pOp->p2 - 1; - } + pCrsr = pC->pCursor; + if( pCrsr==0 ){ + res = 1; }else{ - pC->nullRow = 0; + rc = sqlite3BtreeLast(pCrsr, &res); + } + pC->nullRow = (u8)res; + pC->deferredMoveto = 0; + pC->rowidIsValid = 0; + pC->cacheStatus = CACHE_STALE; + if( pOp->p2>0 && res ){ + pc = pOp->p2 - 1; } break; } -/* Opcode: Sort P1 P2 * +/* Opcode: Sort P1 P2 * * * ** ** This opcode does exactly the same thing as OP_Rewind except that ** it increments an undocumented global variable used for testing. @@ -3762,14 +4055,15 @@ ** regression tests can determine whether or not the optimizer is ** correctly optimizing out sorts. */ -case OP_Sort: { /* no-push */ +case OP_Sort: { /* jump */ #ifdef SQLITE_TEST sqlite3_sort_count++; sqlite3_search_count--; #endif + p->aCounter[SQLITE_STMTSTATUS_SORT-1]++; /* Fall through into OP_Rewind */ } -/* Opcode: Rewind P1 P2 * +/* Opcode: Rewind P1 P2 * * * ** ** The next use of the Rowid or Column or Next instruction for P1 ** will refer to the first entry in the database table or index. @@ -3777,50 +4071,56 @@ ** If P2 is 0 or if the table or index is not empty, fall through ** to the following instruction. */ -case OP_Rewind: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_Rewind: { /* jump */ + VdbeCursor *pC; BtCursor *pCrsr; int res; - assert( i>=0 && inCursor ); - pC = p->apCsr[i]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); if( (pCrsr = pC->pCursor)!=0 ){ rc = sqlite3BtreeFirst(pCrsr, &res); - pC->atFirst = res==0; + pC->atFirst = res==0 ?1:0; pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; + pC->rowidIsValid = 0; }else{ res = 1; } - pC->nullRow = res; - if( res && pOp->p2>0 ){ + pC->nullRow = (u8)res; + assert( pOp->p2>0 && pOp->p2nOp ); + if( res ){ pc = pOp->p2 - 1; } break; } -/* Opcode: Next P1 P2 * +/* Opcode: Next P1 P2 * * * ** ** Advance cursor P1 so that it points to the next key/data pair in its ** table or index. If there are no more key/value pairs then fall through ** to the following instruction. But if the cursor advance was successful, ** jump immediately to P2. ** +** The P1 cursor must be for a real table, not a pseudo-table. +** ** See also: Prev */ -/* Opcode: Prev P1 P2 * +/* Opcode: Prev P1 P2 * * * ** ** Back up cursor P1 so that it points to the previous key/data pair in its ** table or index. If there is no previous key/value pairs then fall through ** to the following instruction. But if the cursor backup was successful, ** jump immediately to P2. +** +** The P1 cursor must be for a real table, not a pseudo-table. */ -case OP_Prev: /* no-push */ -case OP_Next: { /* no-push */ - Cursor *pC; +case OP_Prev: /* jump */ +case OP_Next: { /* jump */ + VdbeCursor *pC; BtCursor *pCrsr; + int res; CHECK_FOR_INTERRUPT; assert( pOp->p1>=0 && pOp->p1nCursor ); @@ -3828,237 +4128,223 @@ if( pC==0 ){ break; /* See ticket #2273 */ } - if( (pCrsr = pC->pCursor)!=0 ){ - int res; - if( pC->nullRow ){ - res = 1; - }else{ - assert( pC->deferredMoveto==0 ); - rc = pOp->opcode==OP_Next ? sqlite3BtreeNext(pCrsr, &res) : - sqlite3BtreePrevious(pCrsr, &res); - pC->nullRow = res; - pC->cacheStatus = CACHE_STALE; - } - if( res==0 ){ - pc = pOp->p2 - 1; + pCrsr = pC->pCursor; + if( pCrsr==0 ){ + pC->nullRow = 1; + break; + } + res = 1; + assert( pC->deferredMoveto==0 ); + rc = pOp->opcode==OP_Next ? sqlite3BtreeNext(pCrsr, &res) : + sqlite3BtreePrevious(pCrsr, &res); + pC->nullRow = (u8)res; + pC->cacheStatus = CACHE_STALE; + if( res==0 ){ + pc = pOp->p2 - 1; + if( pOp->p5 ) p->aCounter[pOp->p5-1]++; #ifdef SQLITE_TEST - sqlite3_search_count++; + sqlite3_search_count++; #endif - } - }else{ - pC->nullRow = 1; } pC->rowidIsValid = 0; break; } -/* Opcode: IdxInsert P1 P2 * +/* Opcode: IdxInsert P1 P2 P3 * P5 ** -** The top of the stack holds a SQL index key made using either the -** MakeIdxRec or MakeRecord instructions. This opcode writes that key +** Register P2 holds a SQL index key made using the +** MakeRecord instructions. This opcode writes that key ** into the index P1. Data for the entry is nil. ** -** P2 is a flag that provides a hint to the b-tree layer that this +** P3 is a flag that provides a hint to the b-tree layer that this ** insert is likely to be an append. ** ** This instruction only works for indices. The equivalent instruction ** for tables is OP_Insert. */ -case OP_IdxInsert: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_IdxInsert: { /* in2 */ + VdbeCursor *pC; BtCursor *pCrsr; - assert( pTos>=p->aStack ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - assert( pTos->flags & MEM_Blob ); - if( (pCrsr = (pC = p->apCsr[i])->pCursor)!=0 ){ + int nKey; + const char *zKey; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pIn2->flags & MEM_Blob ); + pCrsr = pC->pCursor; + if( ALWAYS(pCrsr!=0) ){ assert( pC->isTable==0 ); - rc = ExpandBlob(pTos); + rc = ExpandBlob(pIn2); if( rc==SQLITE_OK ){ - int nKey = pTos->n; - const char *zKey = pTos->z; - rc = sqlite3BtreeInsert(pCrsr, zKey, nKey, "", 0, 0, pOp->p2); + nKey = pIn2->n; + zKey = pIn2->z; + rc = sqlite3BtreeInsert(pCrsr, zKey, nKey, "", 0, 0, pOp->p3, + ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) + ); assert( pC->deferredMoveto==0 ); pC->cacheStatus = CACHE_STALE; } } - Release(pTos); - pTos--; break; } -/* Opcode: IdxDelete P1 * * +/* Opcode: IdxDelete P1 P2 P3 * * ** -** The top of the stack is an index key built using the either the -** MakeIdxRec or MakeRecord opcodes. -** This opcode removes that entry from the index. +** The content of P3 registers starting at register P2 form +** an unpacked index key. This opcode removes that entry from the +** index opened by cursor P1. */ -case OP_IdxDelete: { /* no-push */ - int i = pOp->p1; - Cursor *pC; +case OP_IdxDelete: { + VdbeCursor *pC; BtCursor *pCrsr; - assert( pTos>=p->aStack ); - assert( pTos->flags & MEM_Blob ); - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - if( (pCrsr = (pC = p->apCsr[i])->pCursor)!=0 ){ - int res; - rc = sqlite3BtreeMoveto(pCrsr, pTos->z, pTos->n, 0, &res); + int res; + UnpackedRecord r; + + assert( pOp->p3>0 ); + assert( pOp->p2>0 && pOp->p2+pOp->p3<=p->nMem+1 ); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + pCrsr = pC->pCursor; + if( ALWAYS(pCrsr!=0) ){ + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp->p3; + r.flags = 0; + r.aMem = &p->aMem[pOp->p2]; + rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &res); if( rc==SQLITE_OK && res==0 ){ rc = sqlite3BtreeDelete(pCrsr); } assert( pC->deferredMoveto==0 ); pC->cacheStatus = CACHE_STALE; } - Release(pTos); - pTos--; break; } -/* Opcode: IdxRowid P1 * * +/* Opcode: IdxRowid P1 P2 * * * ** -** Push onto the stack an integer which is the last entry in the record at +** Write into register P2 an integer which is the last entry in the record at ** the end of the index key pointed to by cursor P1. This integer should be ** the rowid of the table entry to which this index entry points. ** -** See also: Rowid, MakeIdxRec. +** See also: Rowid, MakeRecord. */ -case OP_IdxRowid: { - int i = pOp->p1; +case OP_IdxRowid: { /* out2-prerelease */ BtCursor *pCrsr; - Cursor *pC; - - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - pTos++; - pTos->flags = MEM_Null; - if( (pCrsr = (pC = p->apCsr[i])->pCursor)!=0 ){ - i64 rowid; + VdbeCursor *pC; + i64 rowid; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + pCrsr = pC->pCursor; + if( ALWAYS(pCrsr!=0) ){ + rc = sqlite3VdbeCursorMoveto(pC); + if( NEVER(rc) ) goto abort_due_to_error; assert( pC->deferredMoveto==0 ); assert( pC->isTable==0 ); - if( pC->nullRow ){ - pTos->flags = MEM_Null; - }else{ - rc = sqlite3VdbeIdxRowid(pCrsr, &rowid); + if( !pC->nullRow ){ + rc = sqlite3VdbeIdxRowid(db, pCrsr, &rowid); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } - pTos->flags = MEM_Int; - pTos->u.i = rowid; + MemSetTypeFlag(pOut, MEM_Int); + pOut->u.i = rowid; } } break; } -/* Opcode: IdxGT P1 P2 * -** -** The top of the stack is an index entry that omits the ROWID. Compare -** the top of stack against the index that P1 is currently pointing to. -** Ignore the ROWID on the P1 index. +/* Opcode: IdxGE P1 P2 P3 P4 P5 ** -** The top of the stack might have fewer columns that P1. +** The P4 register values beginning with P3 form an unpacked index +** key that omits the ROWID. Compare this key value against the index +** that P1 is currently pointing to, ignoring the ROWID on the P1 index. ** -** If the P1 index entry is greater than the top of the stack +** If the P1 index entry is greater than or equal to the key value ** then jump to P2. Otherwise fall through to the next instruction. -** In either case, the stack is popped once. -*/ -/* Opcode: IdxGE P1 P2 P3 -** -** The top of the stack is an index entry that omits the ROWID. Compare -** the top of stack against the index that P1 is currently pointing to. -** Ignore the ROWID on the P1 index. -** -** If the P1 index entry is greater than or equal to the top of the stack -** then jump to P2. Otherwise fall through to the next instruction. -** In either case, the stack is popped once. -** -** If P3 is the "+" string (or any other non-NULL string) then the -** index taken from the top of the stack is temporarily increased by -** an epsilon prior to the comparison. This make the opcode work -** like IdxGT except that if the key from the stack is a prefix of -** the key in the cursor, the result is false whereas it would be -** true with IdxGT. -*/ -/* Opcode: IdxLT P1 P2 P3 -** -** The top of the stack is an index entry that omits the ROWID. Compare -** the top of stack against the index that P1 is currently pointing to. -** Ignore the ROWID on the P1 index. ** -** If the P1 index entry is less than the top of the stack -** then jump to P2. Otherwise fall through to the next instruction. -** In either case, the stack is popped once. -** -** If P3 is the "+" string (or any other non-NULL string) then the -** index taken from the top of the stack is temporarily increased by -** an epsilon prior to the comparison. This makes the opcode work -** like IdxLE. -*/ -case OP_IdxLT: /* no-push */ -case OP_IdxGT: /* no-push */ -case OP_IdxGE: { /* no-push */ - int i= pOp->p1; - Cursor *pC; - - assert( i>=0 && inCursor ); - assert( p->apCsr[i]!=0 ); - assert( pTos>=p->aStack ); - if( (pC = p->apCsr[i])->pCursor!=0 ){ - int res; - - assert( pTos->flags & MEM_Blob ); /* Created using OP_MakeRecord */ +** If P5 is non-zero then the key value is increased by an epsilon +** prior to the comparison. This make the opcode work like IdxGT except +** that if the key from register P3 is a prefix of the key in the cursor, +** the result is false whereas it would be true with IdxGT. +*/ +/* Opcode: IdxLT P1 P2 P3 * P5 +** +** The P4 register values beginning with P3 form an unpacked index +** key that omits the ROWID. Compare this key value against the index +** that P1 is currently pointing to, ignoring the ROWID on the P1 index. +** +** If the P1 index entry is less than the key value then jump to P2. +** Otherwise fall through to the next instruction. +** +** If P5 is non-zero then the key value is increased by an epsilon prior +** to the comparison. This makes the opcode work like IdxLE. +*/ +case OP_IdxLT: /* jump, in3 */ +case OP_IdxGE: { /* jump, in3 */ + VdbeCursor *pC; + int res; + UnpackedRecord r; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + if( ALWAYS(pC->pCursor!=0) ){ assert( pC->deferredMoveto==0 ); - ExpandBlob(pTos); - *pC->pIncrKey = pOp->p3!=0; - assert( pOp->p3==0 || pOp->opcode!=OP_IdxGT ); - rc = sqlite3VdbeIdxKeyCompare(pC, pTos->n, (u8*)pTos->z, &res); - *pC->pIncrKey = 0; - if( rc!=SQLITE_OK ){ - break; + assert( pOp->p5==0 || pOp->p5==1 ); + assert( pOp->p4type==P4_INT32 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp->p4.i; + if( pOp->p5 ){ + r.flags = UNPACKED_INCRKEY | UNPACKED_IGNORE_ROWID; + }else{ + r.flags = UNPACKED_IGNORE_ROWID; } + r.aMem = &p->aMem[pOp->p3]; + rc = sqlite3VdbeIdxKeyCompare(pC, &r, &res); if( pOp->opcode==OP_IdxLT ){ res = -res; - }else if( pOp->opcode==OP_IdxGE ){ + }else{ + assert( pOp->opcode==OP_IdxGE ); res++; } if( res>0 ){ pc = pOp->p2 - 1 ; } } - Release(pTos); - pTos--; break; } -/* Opcode: Destroy P1 P2 * +/* Opcode: Destroy P1 P2 P3 * * ** ** Delete an entire database table or index whose root page in the database ** file is given by P1. ** -** The table being destroyed is in the main database file if P2==0. If -** P2==1 then the table to be clear is in the auxiliary database file +** The table being destroyed is in the main database file if P3==0. If +** P3==1 then the table to be clear is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If AUTOVACUUM is enabled then it is possible that another root page ** might be moved into the newly deleted root page in order to keep all ** root pages contiguous at the beginning of the database. The former ** value of the root page that moved - its value before the move occurred - -** is pushed onto the stack. If no page movement was required (because -** the table being dropped was already the last one in the database) then -** a zero is pushed onto the stack. If AUTOVACUUM is disabled -** then a zero is pushed onto the stack. +** is stored in register P2. If no page +** movement was required (because the table being dropped was already +** the last one in the database) then a zero is stored in register P2. +** If AUTOVACUUM is disabled then a zero is stored in register P2. ** ** See also: Clear */ -case OP_Destroy: { +case OP_Destroy: { /* out2-prerelease */ int iMoved; int iCnt; -#ifndef SQLITE_OMIT_VIRTUALTABLE Vdbe *pVdbe; + int iDb; +#ifndef SQLITE_OMIT_VIRTUALTABLE iCnt = 0; - for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){ + for(pVdbe=db->pVdbe; pVdbe; pVdbe = pVdbe->pNext){ if( pVdbe->magic==VDBE_MAGIC_RUN && pVdbe->inVtabMethod<2 && pVdbe->pc>=0 ){ iCnt++; } @@ -4068,22 +4354,24 @@ #endif if( iCnt>1 ){ rc = SQLITE_LOCKED; + p->errorAction = OE_Abort; }else{ + iDb = pOp->p3; assert( iCnt==1 ); - rc = sqlite3BtreeDropTable(db->aDb[pOp->p2].pBt, pOp->p1, &iMoved); - pTos++; - pTos->flags = MEM_Int; - pTos->u.i = iMoved; + assert( (p->btreeMask & (1<aDb[iDb].pBt, pOp->p1, &iMoved); + MemSetTypeFlag(pOut, MEM_Int); + pOut->u.i = iMoved; #ifndef SQLITE_OMIT_AUTOVACUUM if( rc==SQLITE_OK && iMoved!=0 ){ - sqlite3RootPageMoved(&db->aDb[pOp->p2], iMoved, pOp->p1); + sqlite3RootPageMoved(&db->aDb[iDb], iMoved, pOp->p1); } #endif } break; } -/* Opcode: Clear P1 P2 * +/* Opcode: Clear P1 P2 P3 ** ** Delete all contents of the database table or index whose root page ** in the database file is given by P1. But, unlike Destroy, do not @@ -4093,53 +4381,37 @@ ** P2==1 then the table to be clear is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** +** If the P3 value is non-zero, then the table referred to must be an +** intkey table (an SQL table, not an index). In this case the row change +** count is incremented by the number of rows in the table being cleared. +** If P3 is greater than zero, then the value stored in register P3 is +** also incremented by the number of rows in the table being cleared. +** ** See also: Destroy */ -case OP_Clear: { /* no-push */ - - /* For consistency with the way other features of SQLite operate - ** with a truncate, we will also skip the update callback. - */ -#if 0 - Btree *pBt = db->aDb[pOp->p2].pBt; - if( db->xUpdateCallback && pOp->p3 ){ - const char *zDb = db->aDb[pOp->p2].zName; - const char *zTbl = pOp->p3; - BtCursor *pCur = 0; - int fin = 0; - - rc = sqlite3BtreeCursor(pBt, pOp->p1, 0, 0, 0, &pCur); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - for( - rc=sqlite3BtreeFirst(pCur, &fin); - rc==SQLITE_OK && !fin; - rc=sqlite3BtreeNext(pCur, &fin) - ){ - i64 iKey; - rc = sqlite3BtreeKeySize(pCur, &iKey); - if( rc ){ - break; - } - iKey = keyToInt(iKey); - db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, zTbl, iKey); - } - sqlite3BtreeCloseCursor(pCur); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; +case OP_Clear: { + int nChange; + + nChange = 0; + assert( (p->btreeMask & (1<p2))!=0 ); + rc = sqlite3BtreeClearTable( + db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &nChange : 0) + ); + if( pOp->p3 ){ + p->nChange += nChange; + if( pOp->p3>0 ){ + p->aMem[pOp->p3].u.i += nChange; } } -#endif - rc = sqlite3BtreeClearTable(db->aDb[pOp->p2].pBt, pOp->p1); break; } -/* Opcode: CreateTable P1 * * +/* Opcode: CreateTable P1 P2 * * * ** -** Allocate a new table in the main database file if P2==0 or in the -** auxiliary database file if P2==1. Push the page number -** for the root page of the new table onto the stack. +** Allocate a new table in the main database file if P1==0 or in the +** auxiliary database file if P1==1 or in an attached database if +** P1>1. Write the root page number of the new table into +** register P2 ** ** The difference between a table and an index is this: A table must ** have a 4-byte integer key and can have arbitrary data. An index @@ -4147,20 +4419,24 @@ ** ** See also: CreateIndex */ -/* Opcode: CreateIndex P1 * * +/* Opcode: CreateIndex P1 P2 * * * ** -** Allocate a new index in the main database file if P2==0 or in the -** auxiliary database file if P2==1. Push the page number of the -** root page of the new index onto the stack. +** Allocate a new index in the main database file if P1==0 or in the +** auxiliary database file if P1==1 or in an attached database if +** P1>1. Write the root page number of the new table into +** register P2. ** ** See documentation on OP_CreateTable for additional information. */ -case OP_CreateIndex: -case OP_CreateTable: { +case OP_CreateIndex: /* out2-prerelease */ +case OP_CreateTable: { /* out2-prerelease */ int pgno; int flags; Db *pDb; + + pgno = 0; assert( pOp->p1>=0 && pOp->p1nDb ); + assert( (p->btreeMask & (1<p1))!=0 ); pDb = &db->aDb[pOp->p1]; assert( pDb->pBt!=0 ); if( pOp->opcode==OP_CreateTable ){ @@ -4170,211 +4446,304 @@ flags = BTREE_ZERODATA; } rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, flags); - pTos++; - if( rc==SQLITE_OK ){ - pTos->u.i = pgno; - pTos->flags = MEM_Int; - }else{ - pTos->flags = MEM_Null; - } + pOut->u.i = pgno; + MemSetTypeFlag(pOut, MEM_Int); break; } -/* Opcode: ParseSchema P1 P2 P3 +/* Opcode: ParseSchema P1 P2 * P4 * ** ** Read and parse all entries from the SQLITE_MASTER table of database P1 -** that match the WHERE clause P3. P2 is the "force" flag. Always do +** that match the WHERE clause P4. P2 is the "force" flag. Always do ** the parsing if P2 is true. If P2 is false, then this routine is a ** no-op if the schema is not currently loaded. In other words, if P2 ** is false, the SQLITE_MASTER table is only parsed if the rest of the ** schema is already loaded into the symbol table. ** ** This opcode invokes the parser to create a new virtual machine, -** then runs the new virtual machine. It is thus a reentrant opcode. +** then runs the new virtual machine. It is thus a re-entrant opcode. */ -case OP_ParseSchema: { /* no-push */ - char *zSql; - int iDb = pOp->p1; +case OP_ParseSchema: { + int iDb; const char *zMaster; + char *zSql; InitData initData; + iDb = pOp->p1; assert( iDb>=0 && iDbnDb ); - if( !pOp->p2 && !DbHasProperty(db, iDb, DB_SchemaLoaded) ){ - break; + + /* If pOp->p2 is 0, then this opcode is being executed to read a + ** single row, for example the row corresponding to a new index + ** created by this VDBE, from the sqlite_master table. It only + ** does this if the corresponding in-memory schema is currently + ** loaded. Otherwise, the new index definition can be loaded along + ** with the rest of the schema when it is required. + ** + ** Although the mutex on the BtShared object that corresponds to + ** database iDb (the database containing the sqlite_master table + ** read by this instruction) is currently held, it is necessary to + ** obtain the mutexes on all attached databases before checking if + ** the schema of iDb is loaded. This is because, at the start of + ** the sqlite3_exec() call below, SQLite will invoke + ** sqlite3BtreeEnterAll(). If all mutexes are not already held, the + ** iDb mutex may be temporarily released to avoid deadlock. If + ** this happens, then some other thread may delete the in-memory + ** schema of database iDb before the SQL statement runs. The schema + ** will not be reloaded becuase the db->init.busy flag is set. This + ** can result in a "no such table: sqlite_master" or "malformed + ** database schema" error being returned to the user. + */ + assert( sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + sqlite3BtreeEnterAll(db); + if( pOp->p2 || DbHasProperty(db, iDb, DB_SchemaLoaded) ){ + zMaster = SCHEMA_TABLE(iDb); + initData.db = db; + initData.iDb = pOp->p1; + initData.pzErrMsg = &p->zErrMsg; + zSql = sqlite3MPrintf(db, + "SELECT name, rootpage, sql FROM '%q'.%s WHERE %s", + db->aDb[iDb].zName, zMaster, pOp->p4.z); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + (void)sqlite3SafetyOff(db); + assert( db->init.busy==0 ); + db->init.busy = 1; + initData.rc = SQLITE_OK; + assert( !db->mallocFailed ); + rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); + if( rc==SQLITE_OK ) rc = initData.rc; + sqlite3DbFree(db, zSql); + db->init.busy = 0; + (void)sqlite3SafetyOn(db); + } } - zMaster = SCHEMA_TABLE(iDb); - initData.db = db; - initData.iDb = pOp->p1; - initData.pzErrMsg = &p->zErrMsg; - zSql = sqlite3MPrintf( - "SELECT name, rootpage, sql FROM '%q'.%s WHERE %s", - db->aDb[iDb].zName, zMaster, pOp->p3); - if( zSql==0 ) goto no_mem; - sqlite3SafetyOff(db); - assert( db->init.busy==0 ); - db->init.busy = 1; - assert( !sqlite3MallocFailed() ); - rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); - if( rc==SQLITE_ABORT ) rc = initData.rc; - sqliteFree(zSql); - db->init.busy = 0; - sqlite3SafetyOn(db); + sqlite3BtreeLeaveAll(db); if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); goto no_mem; } break; } -#if !defined(SQLITE_OMIT_ANALYZE) && !defined(SQLITE_OMIT_PARSER) -/* Opcode: LoadAnalysis P1 * * +#if !defined(SQLITE_OMIT_ANALYZE) +/* Opcode: LoadAnalysis P1 * * * * ** ** Read the sqlite_stat1 table for database P1 and load the content ** of that table into the internal index hash table. This will cause ** the analysis to be used when preparing all subsequent queries. */ -case OP_LoadAnalysis: { /* no-push */ - int iDb = pOp->p1; - assert( iDb>=0 && iDbnDb ); - rc = sqlite3AnalysisLoad(db, iDb); +case OP_LoadAnalysis: { + assert( pOp->p1>=0 && pOp->p1nDb ); + rc = sqlite3AnalysisLoad(db, pOp->p1); break; } -#endif /* !defined(SQLITE_OMIT_ANALYZE) && !defined(SQLITE_OMIT_PARSER) */ +#endif /* !defined(SQLITE_OMIT_ANALYZE) */ -/* Opcode: DropTable P1 * P3 +/* Opcode: DropTable P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe -** the table named P3 in database P1. This is called after a table +** the table named P4 in database P1. This is called after a table ** is dropped in order to keep the internal representation of the ** schema consistent with what is on disk. */ -case OP_DropTable: { /* no-push */ - sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p3); +case OP_DropTable: { + sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p4.z); break; } -/* Opcode: DropIndex P1 * P3 +/* Opcode: DropIndex P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe -** the index named P3 in database P1. This is called after an index +** the index named P4 in database P1. This is called after an index ** is dropped in order to keep the internal representation of the ** schema consistent with what is on disk. */ -case OP_DropIndex: { /* no-push */ - sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p3); +case OP_DropIndex: { + sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p4.z); break; } -/* Opcode: DropTrigger P1 * P3 +/* Opcode: DropTrigger P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe -** the trigger named P3 in database P1. This is called after a trigger +** the trigger named P4 in database P1. This is called after a trigger ** is dropped in order to keep the internal representation of the ** schema consistent with what is on disk. */ -case OP_DropTrigger: { /* no-push */ - sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p3); +case OP_DropTrigger: { + sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z); break; } #ifndef SQLITE_OMIT_INTEGRITY_CHECK -/* Opcode: IntegrityCk P1 P2 * +/* Opcode: IntegrityCk P1 P2 P3 * P5 ** -** Do an analysis of the currently open database. Push onto the -** stack the text of an error message describing any problems. -** If no problems are found, push a NULL onto the stack. -** -** P1 is the address of a memory cell that contains the maximum -** number of allowed errors. At most mem[P1] errors will be reported. -** In other words, the analysis stops as soon as mem[P1] errors are -** seen. Mem[P1] is updated with the number of errors remaining. +** Do an analysis of the currently open database. Store in +** register P1 the text of an error message describing any problems. +** If no problems are found, store a NULL in register P1. +** +** The register P3 contains the maximum number of allowed errors. +** At most reg(P3) errors will be reported. +** In other words, the analysis stops as soon as reg(P1) errors are +** seen. Reg(P1) is updated with the number of errors remaining. ** ** The root page numbers of all tables in the database are integer -** values on the stack. This opcode pulls as many integers as it -** can off of the stack and uses those numbers as the root pages. +** stored in reg(P1), reg(P1+1), reg(P1+2), .... There are P2 tables +** total. ** -** If P2 is not zero, the check is done on the auxiliary database +** If P5 is not zero, the check is done on the auxiliary database ** file, not the main database file. ** ** This opcode is used to implement the integrity_check pragma. */ case OP_IntegrityCk: { - int nRoot; - int *aRoot; - int j; - int nErr; - char *z; - Mem *pnErr; - - for(nRoot=0; &pTos[-nRoot]>=p->aStack; nRoot++){ - if( (pTos[-nRoot].flags & MEM_Int)==0 ) break; - } + int nRoot; /* Number of tables to check. (Number of root pages.) */ + int *aRoot; /* Array of rootpage numbers for tables to be checked */ + int j; /* Loop counter */ + int nErr; /* Number of errors reported */ + char *z; /* Text of the error report */ + Mem *pnErr; /* Register keeping track of errors remaining */ + + nRoot = pOp->p2; assert( nRoot>0 ); - aRoot = sqliteMallocRaw( sizeof(int)*(nRoot+1) ); + aRoot = sqlite3DbMallocRaw(db, sizeof(int)*(nRoot+1) ); if( aRoot==0 ) goto no_mem; - j = pOp->p1; - assert( j>=0 && jnMem ); - pnErr = &p->aMem[j]; + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + pnErr = &p->aMem[pOp->p3]; assert( (pnErr->flags & MEM_Int)!=0 ); + assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); + pIn1 = &p->aMem[pOp->p1]; for(j=0; ju.i; + aRoot[j] = (int)sqlite3VdbeIntValue(&pIn1[j]); } aRoot[j] = 0; - popStack(&pTos, nRoot); - pTos++; - z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p2].pBt, aRoot, nRoot, - pnErr->u.i, &nErr); + assert( pOp->p5nDb ); + assert( (p->btreeMask & (1<p5))!=0 ); + z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot, + (int)pnErr->u.i, &nErr); + sqlite3DbFree(db, aRoot); pnErr->u.i -= nErr; + sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); - pTos->flags = MEM_Null; + }else if( z==0 ){ + goto no_mem; }else{ - pTos->z = z; - pTos->n = strlen(z); - pTos->flags = MEM_Str | MEM_Dyn | MEM_Term; - pTos->xDel = 0; - } - pTos->enc = SQLITE_UTF8; - sqlite3VdbeChangeEncoding(pTos, encoding); - sqliteFree(aRoot); + sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free); + } + UPDATE_MAX_BLOBSIZE(pIn1); + sqlite3VdbeChangeEncoding(pIn1, encoding); break; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ -/* Opcode: FifoWrite * * * +/* Opcode: RowSetAdd P1 P2 * * * +** +** Insert the integer value held by register P2 into a boolean index +** held in register P1. ** -** Write the integer on the top of the stack -** into the Fifo. +** An assertion fails if P2 is not an integer. */ -case OP_FifoWrite: { /* no-push */ - assert( pTos>=p->aStack ); - sqlite3VdbeMemIntegerify(pTos); - sqlite3VdbeFifoPush(&p->sFifo, pTos->u.i); - assert( (pTos->flags & MEM_Dyn)==0 ); - pTos--; +case OP_RowSetAdd: { /* in2 */ + Mem *pIdx; + Mem *pVal; + assert( pOp->p1>0 && pOp->p1<=p->nMem ); + pIdx = &p->aMem[pOp->p1]; + assert( pOp->p2>0 && pOp->p2<=p->nMem ); + pVal = &p->aMem[pOp->p2]; + assert( (pVal->flags & MEM_Int)!=0 ); + if( (pIdx->flags & MEM_RowSet)==0 ){ + sqlite3VdbeMemSetRowSet(pIdx); + if( (pIdx->flags & MEM_RowSet)==0 ) goto no_mem; + } + sqlite3RowSetInsert(pIdx->u.pRowSet, pVal->u.i); break; } -/* Opcode: FifoRead * P2 * +/* Opcode: RowSetRead P1 P2 P3 * * ** -** Attempt to read a single integer from the Fifo -** and push it onto the stack. If the Fifo is empty -** push nothing but instead jump to P2. +** Extract the smallest value from boolean index P1 and put that value into +** register P3. Or, if boolean index P1 is initially empty, leave P3 +** unchanged and jump to instruction P2. */ -case OP_FifoRead: { - i64 v; +case OP_RowSetRead: { /* jump, out3 */ + Mem *pIdx; + i64 val; + assert( pOp->p1>0 && pOp->p1<=p->nMem ); CHECK_FOR_INTERRUPT; - if( sqlite3VdbeFifoPop(&p->sFifo, &v)==SQLITE_DONE ){ + pIdx = &p->aMem[pOp->p1]; + pOut = &p->aMem[pOp->p3]; + if( (pIdx->flags & MEM_RowSet)==0 + || sqlite3RowSetNext(pIdx->u.pRowSet, &val)==0 + ){ + /* The boolean index is empty */ + sqlite3VdbeMemSetNull(pIdx); pc = pOp->p2 - 1; }else{ - pTos++; - pTos->u.i = v; - pTos->flags = MEM_Int; + /* A value was pulled from the index */ + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + sqlite3VdbeMemSetInt64(pOut, val); + } + break; +} + +/* Opcode: RowSetTest P1 P2 P3 P4 +** +** Register P3 is assumed to hold a 64-bit integer value. If register P1 +** contains a RowSet object and that RowSet object contains +** the value held in P3, jump to register P2. Otherwise, insert the +** integer in P3 into the RowSet and continue on to the +** next opcode. +** +** The RowSet object is optimized for the case where successive sets +** of integers, where each set contains no duplicates. Each set +** of values is identified by a unique P4 value. The first set +** must have P4==0, the final set P4=-1. P4 must be either -1 or +** non-negative. For non-negative values of P4 only the lower 4 +** bits are significant. +** +** This allows optimizations: (a) when P4==0 there is no need to test +** the rowset object for P3, as it is guaranteed not to contain it, +** (b) when P4==-1 there is no need to insert the value, as it will +** never be tested for, and (c) when a value that is part of set X is +** inserted, there is no need to search to see if the same value was +** previously inserted as part of set X (only if it was previously +** inserted as part of some other set). +*/ +case OP_RowSetTest: { /* jump, in1, in3 */ + int iSet; + int exists; + + iSet = pOp->p4.i; + assert( pIn3->flags&MEM_Int ); + + /* If there is anything other than a rowset object in memory cell P1, + ** delete it now and initialize P1 with an empty rowset + */ + if( (pIn1->flags & MEM_RowSet)==0 ){ + sqlite3VdbeMemSetRowSet(pIn1); + if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; + } + + assert( pOp->p4type==P4_INT32 ); + assert( iSet==-1 || iSet>=0 ); + if( iSet ){ + exists = sqlite3RowSetTest(pIn1->u.pRowSet, + (u8)(iSet>=0 ? iSet & 0xf : 0xff), + pIn3->u.i); + if( exists ){ + pc = pOp->p2 - 1; + break; + } + } + if( iSet>=0 ){ + sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i); } break; } + #ifndef SQLITE_OMIT_TRIGGER /* Opcode: ContextPush * * * ** @@ -4382,23 +4751,22 @@ ** opcode. The context stores the last insert row id, the last statement change ** count, and the current statement change count. */ -case OP_ContextPush: { /* no-push */ - int i = p->contextStackTop++; +case OP_ContextPush: { + int i; Context *pContext; + i = p->contextStackTop++; assert( i>=0 ); /* FIX ME: This should be allocated as part of the vdbe at compile-time */ if( i>=p->contextStackDepth ){ p->contextStackDepth = i+1; - p->contextStack = sqliteReallocOrFree(p->contextStack, + p->contextStack = sqlite3DbReallocOrFree(db, p->contextStack, sizeof(Context)*(i+1)); if( p->contextStack==0 ) goto no_mem; } pContext = &p->contextStack[i]; pContext->lastRowid = db->lastRowid; pContext->nChange = p->nChange; - pContext->sFifo = p->sFifo; - sqlite3VdbeFifoInit(&p->sFifo); break; } @@ -4408,259 +4776,156 @@ ** executed. The context stores the last insert row id, the last statement ** change count, and the current statement change count. */ -case OP_ContextPop: { /* no-push */ - Context *pContext = &p->contextStack[--p->contextStackTop]; +case OP_ContextPop: { + Context *pContext; + pContext = &p->contextStack[--p->contextStackTop]; assert( p->contextStackTop>=0 ); db->lastRowid = pContext->lastRowid; p->nChange = pContext->nChange; - sqlite3VdbeFifoClear(&p->sFifo); - p->sFifo = pContext->sFifo; break; } #endif /* #ifndef SQLITE_OMIT_TRIGGER */ -/* Opcode: MemStore P1 P2 * -** -** Write the top of the stack into memory location P1. -** P1 should be a small integer since space is allocated -** for all memory locations between 0 and P1 inclusive. -** -** After the data is stored in the memory location, the -** stack is popped once if P2 is 1. If P2 is zero, then -** the original data remains on the stack. -*/ -case OP_MemStore: { /* no-push */ - assert( pTos>=p->aStack ); - assert( pOp->p1>=0 && pOp->p1nMem ); - rc = sqlite3VdbeMemMove(&p->aMem[pOp->p1], pTos); - pTos--; - - /* If P2 is 0 then fall thru to the next opcode, OP_MemLoad, that will - ** restore the top of the stack to its original value. - */ - if( pOp->p2 ){ - break; - } -} -/* Opcode: MemLoad P1 * * -** -** Push a copy of the value in memory location P1 onto the stack. -** -** If the value is a string, then the value pushed is a pointer to -** the string that is stored in the memory location. If the memory -** location is subsequently changed (using OP_MemStore) then the -** value pushed onto the stack will change too. -*/ -case OP_MemLoad: { - int i = pOp->p1; - assert( i>=0 && inMem ); - pTos++; - sqlite3VdbeMemShallowCopy(pTos, &p->aMem[i], MEM_Ephem); - break; -} - #ifndef SQLITE_OMIT_AUTOINCREMENT -/* Opcode: MemMax P1 * * +/* Opcode: MemMax P1 P2 * * * ** -** Set the value of memory cell P1 to the maximum of its current value -** and the value on the top of the stack. The stack is unchanged. +** Set the value of register P1 to the maximum of its current value +** and the value in register P2. ** ** This instruction throws an error if the memory cell is not initially ** an integer. */ -case OP_MemMax: { /* no-push */ - int i = pOp->p1; - Mem *pMem; - assert( pTos>=p->aStack ); - assert( i>=0 && inMem ); - pMem = &p->aMem[i]; - sqlite3VdbeMemIntegerify(pMem); - sqlite3VdbeMemIntegerify(pTos); - if( pMem->u.iu.i){ - pMem->u.i = pTos->u.i; +case OP_MemMax: { /* in1, in2 */ + sqlite3VdbeMemIntegerify(pIn1); + sqlite3VdbeMemIntegerify(pIn2); + if( pIn1->u.iu.i){ + pIn1->u.i = pIn2->u.i; } break; } #endif /* SQLITE_OMIT_AUTOINCREMENT */ -/* Opcode: MemIncr P1 P2 * -** -** Increment the integer valued memory cell P2 by the value in P1. -** -** It is illegal to use this instruction on a memory cell that does -** not contain an integer. An assertion fault will result if you try. -*/ -case OP_MemIncr: { /* no-push */ - int i = pOp->p2; - Mem *pMem; - assert( i>=0 && inMem ); - pMem = &p->aMem[i]; - assert( pMem->flags==MEM_Int ); - pMem->u.i += pOp->p1; - break; -} - -/* Opcode: IfMemPos P1 P2 * +/* Opcode: IfPos P1 P2 * * * ** -** If the value of memory cell P1 is 1 or greater, jump to P2. +** If the value of register P1 is 1 or greater, jump to P2. ** -** It is illegal to use this instruction on a memory cell that does +** It is illegal to use this instruction on a register that does ** not contain an integer. An assertion fault will result if you try. */ -case OP_IfMemPos: { /* no-push */ - int i = pOp->p1; - Mem *pMem; - assert( i>=0 && inMem ); - pMem = &p->aMem[i]; - assert( pMem->flags==MEM_Int ); - if( pMem->u.i>0 ){ +case OP_IfPos: { /* jump, in1 */ + assert( pIn1->flags&MEM_Int ); + if( pIn1->u.i>0 ){ pc = pOp->p2 - 1; } break; } -/* Opcode: IfMemNeg P1 P2 * +/* Opcode: IfNeg P1 P2 * * * ** -** If the value of memory cell P1 is less than zero, jump to P2. +** If the value of register P1 is less than zero, jump to P2. ** -** It is illegal to use this instruction on a memory cell that does +** It is illegal to use this instruction on a register that does ** not contain an integer. An assertion fault will result if you try. */ -case OP_IfMemNeg: { /* no-push */ - int i = pOp->p1; - Mem *pMem; - assert( i>=0 && inMem ); - pMem = &p->aMem[i]; - assert( pMem->flags==MEM_Int ); - if( pMem->u.i<0 ){ +case OP_IfNeg: { /* jump, in1 */ + assert( pIn1->flags&MEM_Int ); + if( pIn1->u.i<0 ){ pc = pOp->p2 - 1; } break; } -/* Opcode: IfMemZero P1 P2 * +/* Opcode: IfZero P1 P2 * * * ** -** If the value of memory cell P1 is exactly 0, jump to P2. +** If the value of register P1 is exactly 0, jump to P2. ** -** It is illegal to use this instruction on a memory cell that does +** It is illegal to use this instruction on a register that does ** not contain an integer. An assertion fault will result if you try. */ -case OP_IfMemZero: { /* no-push */ - int i = pOp->p1; - Mem *pMem; - assert( i>=0 && inMem ); - pMem = &p->aMem[i]; - assert( pMem->flags==MEM_Int ); - if( pMem->u.i==0 ){ +case OP_IfZero: { /* jump, in1 */ + assert( pIn1->flags&MEM_Int ); + if( pIn1->u.i==0 ){ pc = pOp->p2 - 1; } break; } -/* Opcode: MemNull P1 * * -** -** Store a NULL in memory cell P1 -*/ -case OP_MemNull: { - assert( pOp->p1>=0 && pOp->p1nMem ); - sqlite3VdbeMemSetNull(&p->aMem[pOp->p1]); - break; -} - -/* Opcode: MemInt P1 P2 * -** -** Store the integer value P1 in memory cell P2. -*/ -case OP_MemInt: { - assert( pOp->p2>=0 && pOp->p2nMem ); - sqlite3VdbeMemSetInt64(&p->aMem[pOp->p2], pOp->p1); - break; -} - -/* Opcode: MemMove P1 P2 * -** -** Move the content of memory cell P2 over to memory cell P1. -** Any prior content of P1 is erased. Memory cell P2 is left -** containing a NULL. -*/ -case OP_MemMove: { - assert( pOp->p1>=0 && pOp->p1nMem ); - assert( pOp->p2>=0 && pOp->p2nMem ); - rc = sqlite3VdbeMemMove(&p->aMem[pOp->p1], &p->aMem[pOp->p2]); - break; -} - -/* Opcode: AggStep P1 P2 P3 +/* Opcode: AggStep * P2 P3 P4 P5 ** ** Execute the step function for an aggregate. The -** function has P2 arguments. P3 is a pointer to the FuncDef -** structure that specifies the function. Use memory location -** P1 as the accumulator. +** function has P5 arguments. P4 is a pointer to the FuncDef +** structure that specifies the function. Use register +** P3 as the accumulator. ** -** The P2 arguments are popped from the stack. +** The P5 arguments are taken from register P2 and its +** successors. */ -case OP_AggStep: { /* no-push */ - int n = pOp->p2; +case OP_AggStep: { + int n; int i; - Mem *pMem, *pRec; + Mem *pMem; + Mem *pRec; sqlite3_context ctx; sqlite3_value **apVal; + n = pOp->p5; assert( n>=0 ); - pRec = &pTos[1-n]; - assert( pRec>=p->aStack ); + pRec = &p->aMem[pOp->p2]; apVal = p->apArg; assert( apVal || n==0 ); for(i=0; ip3; - assert( pOp->p1>=0 && pOp->p1nMem ); - ctx.pMem = pMem = &p->aMem[pOp->p1]; + ctx.pFunc = pOp->p4.pFunc; + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + ctx.pMem = pMem = &p->aMem[pOp->p3]; pMem->n++; ctx.s.flags = MEM_Null; ctx.s.z = 0; + ctx.s.zMalloc = 0; ctx.s.xDel = 0; + ctx.s.db = db; ctx.isError = 0; ctx.pColl = 0; - if( ctx.pFunc->needCollSeq ){ + if( ctx.pFunc->flags & SQLITE_FUNC_NEEDCOLL ){ assert( pOp>p->aOp ); - assert( pOp[-1].p3type==P3_COLLSEQ ); + assert( pOp[-1].p4type==P4_COLLSEQ ); assert( pOp[-1].opcode==OP_CollSeq ); - ctx.pColl = (CollSeq *)pOp[-1].p3; + ctx.pColl = pOp[-1].p4.pColl; } (ctx.pFunc->xStep)(&ctx, n, apVal); - popStack(&pTos, n); if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, sqlite3_value_text(&ctx.s), (char*)0); - rc = SQLITE_ERROR; + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); + rc = ctx.isError; } sqlite3VdbeMemRelease(&ctx.s); break; } -/* Opcode: AggFinal P1 P2 P3 +/* Opcode: AggFinal P1 P2 * P4 * ** ** Execute the finalizer function for an aggregate. P1 is ** the memory location that is the accumulator for the aggregate. ** ** P2 is the number of arguments that the step function takes and -** P3 is a pointer to the FuncDef for this function. The P2 +** P4 is a pointer to the FuncDef for this function. The P2 ** argument is not used by this opcode. It is only there to disambiguate ** functions that can take varying numbers of arguments. The -** P3 argument is only needed for the degenerate case where +** P4 argument is only needed for the degenerate case where ** the step function was not previously called. */ -case OP_AggFinal: { /* no-push */ +case OP_AggFinal: { Mem *pMem; - assert( pOp->p1>=0 && pOp->p1nMem ); + assert( pOp->p1>0 && pOp->p1<=p->nMem ); pMem = &p->aMem[pOp->p1]; assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 ); - rc = sqlite3VdbeMemFinalize(pMem, (FuncDef*)pOp->p3); - if( rc==SQLITE_ERROR ){ - sqlite3SetString(&p->zErrMsg, sqlite3_value_text(pMem), (char*)0); + rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc); + if( rc ){ + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(pMem)); } + sqlite3VdbeChangeEncoding(pMem, encoding); + UPDATE_MAX_BLOBSIZE(pMem); if( sqlite3VdbeMemTooBig(pMem) ){ goto too_big; } @@ -4669,13 +4934,13 @@ #if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) -/* Opcode: Vacuum * * * +/* Opcode: Vacuum * * * * * ** ** Vacuum the entire database. This opcode will cause other virtual ** machines to be created and run. It may not be called from within ** a transaction. */ -case OP_Vacuum: { /* no-push */ +case OP_Vacuum: { if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; rc = sqlite3RunVacuum(&p->zErrMsg, db); if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; @@ -4684,16 +4949,17 @@ #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) -/* Opcode: IncrVacuum P1 P2 * +/* Opcode: IncrVacuum P1 P2 * * * ** ** Perform a single step of the incremental vacuum procedure on ** the P1 database. If the vacuum has finished, jump to instruction ** P2. Otherwise, fall through to the next instruction. */ -case OP_IncrVacuum: { /* no-push */ +case OP_IncrVacuum: { /* jump */ Btree *pBt; assert( pOp->p1>=0 && pOp->p1nDb ); + assert( (p->btreeMask & (1<p1))!=0 ); pBt = db->aDb[pOp->p1].pBt; rc = sqlite3BtreeIncrVacuum(pBt); if( rc==SQLITE_DONE ){ @@ -4704,7 +4970,7 @@ } #endif -/* Opcode: Expire P1 * * +/* Opcode: Expire P1 * * * * ** ** Cause precompiled statements to become expired. An expired statement ** fails with an error code of SQLITE_SCHEMA if it is ever executed @@ -4713,7 +4979,7 @@ ** If P1 is 0, then all SQL statements become expired. If P1 is non-zero, ** then only the currently executing statement is affected. */ -case OP_Expire: { /* no-push */ +case OP_Expire: { if( !pOp->p1 ){ sqlite3ExpirePreparedStatements(db); }else{ @@ -4723,103 +4989,122 @@ } #ifndef SQLITE_OMIT_SHARED_CACHE -/* Opcode: TableLock P1 P2 P3 +/* Opcode: TableLock P1 P2 P3 P4 * ** ** Obtain a lock on a particular table. This instruction is only used when ** the shared-cache feature is enabled. ** -** If P1 is not negative, then it is the index of the database -** in sqlite3.aDb[] and a read-lock is required. If P1 is negative, a -** write-lock is required. In this case the index of the database is the -** absolute value of P1 minus one (iDb = abs(P1) - 1;) and a write-lock is -** required. +** If P1 is the index of the database in sqlite3.aDb[] of the database +** on which the lock is acquired. A readlock is obtained if P3==0 or +** a write lock if P3==1. ** ** P2 contains the root-page of the table to lock. ** -** P3 contains a pointer to the name of the table being locked. This is only +** P4 contains a pointer to the name of the table being locked. This is only ** used to generate an error message if the lock cannot be obtained. */ -case OP_TableLock: { /* no-push */ - int p1 = pOp->p1; - u8 isWriteLock = (p1<0); - if( isWriteLock ){ - p1 = (-1*p1)-1; - } +case OP_TableLock: { + int p1; + u8 isWriteLock; + + p1 = pOp->p1; + isWriteLock = (u8)pOp->p3; + assert( p1>=0 && p1nDb ); + assert( (p->btreeMask & (1<aDb[p1].pBt, pOp->p2, isWriteLock); - if( rc==SQLITE_LOCKED ){ - const char *z = (const char *)pOp->p3; - sqlite3SetString(&p->zErrMsg, "database table is locked: ", z, (char*)0); + if( (rc&0xFF)==SQLITE_LOCKED ){ + const char *z = pOp->p4.z; + sqlite3SetString(&p->zErrMsg, db, "database table is locked: %s", z); } break; } #endif /* SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VBegin * * P3 +/* Opcode: VBegin * * * P4 * ** -** P3 a pointer to an sqlite3_vtab structure. Call the xBegin method -** for that table. -*/ -case OP_VBegin: { /* no-push */ - rc = sqlite3VtabBegin(db, (sqlite3_vtab *)pOp->p3); +** P4 may be a pointer to an sqlite3_vtab structure. If so, call the +** xBegin method for that table. +** +** Also, whether or not P4 is set, check that this is not being called from +** within a callback to a virtual table xSync() method. If it is, the error +** code will be set to SQLITE_LOCKED. +*/ +case OP_VBegin: { + sqlite3_vtab *pVtab; + pVtab = pOp->p4.pVtab; + rc = sqlite3VtabBegin(db, pVtab); + if( pVtab ){ + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; + } break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VCreate P1 * P3 +/* Opcode: VCreate P1 * * P4 * ** -** P3 is the name of a virtual table in database P1. Call the xCreate method +** P4 is the name of a virtual table in database P1. Call the xCreate method ** for that table. */ -case OP_VCreate: { /* no-push */ - rc = sqlite3VtabCallCreate(db, pOp->p1, pOp->p3, &p->zErrMsg); +case OP_VCreate: { + rc = sqlite3VtabCallCreate(db, pOp->p1, pOp->p4.z, &p->zErrMsg); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VDestroy P1 * P3 +/* Opcode: VDestroy P1 * * P4 * ** -** P3 is the name of a virtual table in database P1. Call the xDestroy method +** P4 is the name of a virtual table in database P1. Call the xDestroy method ** of that table. */ -case OP_VDestroy: { /* no-push */ +case OP_VDestroy: { p->inVtabMethod = 2; - rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p3); + rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z); p->inVtabMethod = 0; break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VOpen P1 * P3 +/* Opcode: VOpen P1 * * P4 * ** -** P3 is a pointer to a virtual table object, an sqlite3_vtab structure. +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** P1 is a cursor number. This opcode opens a cursor to the virtual ** table and stores that cursor in P1. */ -case OP_VOpen: { /* no-push */ - Cursor *pCur = 0; - sqlite3_vtab_cursor *pVtabCursor = 0; - - sqlite3_vtab *pVtab = (sqlite3_vtab *)(pOp->p3); - sqlite3_module *pModule = (sqlite3_module *)pVtab->pModule; - +case OP_VOpen: { + VdbeCursor *pCur; + sqlite3_vtab_cursor *pVtabCursor; + sqlite3_vtab *pVtab; + sqlite3_module *pModule; + + pCur = 0; + pVtabCursor = 0; + pVtab = pOp->p4.pVtab; + pModule = (sqlite3_module *)pVtab->pModule; assert(pVtab && pModule); if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; rc = pModule->xOpen(pVtab, &pVtabCursor); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; if( SQLITE_OK==rc ){ - /* Initialise sqlite3_vtab_cursor base class */ + /* Initialize sqlite3_vtab_cursor base class */ pVtabCursor->pVtab = pVtab; /* Initialise vdbe cursor object */ - pCur = allocateCursor(p, pOp->p1, -1); + pCur = allocateCursor(p, pOp->p1, 0, -1, 0); if( pCur ){ pCur->pVtabCursor = pVtabCursor; pCur->pModule = pVtabCursor->pVtab->pModule; }else{ + db->mallocFailed = 1; pModule->xClose(pVtabCursor); } } @@ -4828,58 +5113,71 @@ #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VFilter P1 P2 P3 +/* Opcode: VFilter P1 P2 P3 P4 * ** ** P1 is a cursor opened using VOpen. P2 is an address to jump to if ** the filtered result set is empty. ** -** P3 is either NULL or a string that was generated by the xBestIndex -** method of the module. The interpretation of the P3 string is left +** P4 is either NULL or a string that was generated by the xBestIndex +** method of the module. The interpretation of the P4 string is left ** to the module implementation. ** ** This opcode invokes the xFilter method on the virtual table specified -** by P1. The integer query plan parameter to xFilter is the top of the -** stack. Next down on the stack is the argc parameter. Beneath the -** next of stack are argc additional parameters which are passed to -** xFilter as argv. The topmost parameter (i.e. 3rd element popped from -** the stack) becomes argv[argc-1] when passed to xFilter. +** by P1. The integer query plan parameter to xFilter is stored in register +** P3. Register P3+1 stores the argc parameter to be passed to the +** xFilter method. Registers P3+2..P3+1+argc are the argc +** additional parameters which are passed to +** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter. ** -** The integer query plan parameter, argc, and all argv stack values -** are popped from the stack before this instruction completes. -** -** A jump is made to P2 if the result set after filtering would be -** empty. +** A jump is made to P2 if the result set after filtering would be empty. */ -case OP_VFilter: { /* no-push */ +case OP_VFilter: { /* jump */ int nArg; - + int iQuery; const sqlite3_module *pModule; + Mem *pQuery; + Mem *pArgc; + sqlite3_vtab_cursor *pVtabCursor; + sqlite3_vtab *pVtab; + VdbeCursor *pCur; + int res; + int i; + Mem **apArg; - Cursor *pCur = p->apCsr[pOp->p1]; + pQuery = &p->aMem[pOp->p3]; + pArgc = &pQuery[1]; + pCur = p->apCsr[pOp->p1]; + REGISTER_TRACE(pOp->p3, pQuery); assert( pCur->pVtabCursor ); - pModule = pCur->pVtabCursor->pVtab->pModule; - - /* Grab the index number and argc parameters off the top of the stack. */ - assert( (&pTos[-1])>=p->aStack ); - assert( (pTos[0].flags&MEM_Int)!=0 && pTos[-1].flags==MEM_Int ); - nArg = pTos[-1].u.i; + pVtabCursor = pCur->pVtabCursor; + pVtab = pVtabCursor->pVtab; + pModule = pVtab->pModule; + + /* Grab the index number and argc parameters */ + assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int ); + nArg = (int)pArgc->u.i; + iQuery = (int)pQuery->u.i; /* Invoke the xFilter method */ { - int res = 0; - int i; - Mem **apArg = p->apArg; + res = 0; + apArg = p->apArg; for(i = 0; iinVtabMethod = 1; - rc = pModule->xFilter(pCur->pVtabCursor, pTos->u.i, pOp->p3, nArg, apArg); + rc = pModule->xFilter(pVtabCursor, iQuery, pOp->p4.z, nArg, apArg); p->inVtabMethod = 0; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; + sqlite3VtabUnlock(db, pVtab); if( rc==SQLITE_OK ){ - res = pModule->xEof(pCur->pVtabCursor); + res = pModule->xEof(pVtabCursor); } if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; @@ -4887,171 +5185,170 @@ pc = pOp->p2 - 1; } } + pCur->nullRow = 0; - /* Pop the index number, argc value and parameters off the stack */ - popStack(&pTos, 2+nArg); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VRowid P1 * * +/* Opcode: VColumn P1 P2 P3 * * ** -** Push an integer onto the stack which is the rowid of -** the virtual-table that the P1 cursor is pointing to. +** Store the value of the P2-th column of +** the row of the virtual-table that the +** P1 cursor is pointing to into register P3. */ -case OP_VRowid: { +case OP_VColumn: { + sqlite3_vtab *pVtab; const sqlite3_module *pModule; + Mem *pDest; + sqlite3_context sContext; - Cursor *pCur = p->apCsr[pOp->p1]; + VdbeCursor *pCur = p->apCsr[pOp->p1]; assert( pCur->pVtabCursor ); - pModule = pCur->pVtabCursor->pVtab->pModule; - if( pModule->xRowid==0 ){ - sqlite3SetString(&p->zErrMsg, "Unsupported module operation: xRowid", 0); - rc = SQLITE_ERROR; - } else { - sqlite_int64 iRow; - - if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; - rc = pModule->xRowid(pCur->pVtabCursor, &iRow); - if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; - - pTos++; - pTos->flags = MEM_Int; - pTos->u.i = iRow; + assert( pOp->p3>0 && pOp->p3<=p->nMem ); + pDest = &p->aMem[pOp->p3]; + if( pCur->nullRow ){ + sqlite3VdbeMemSetNull(pDest); + break; } + pVtab = pCur->pVtabCursor->pVtab; + pModule = pVtab->pModule; + assert( pModule->xColumn ); + memset(&sContext, 0, sizeof(sContext)); + + /* The output cell may already have a buffer allocated. Move + ** the current contents to sContext.s so in case the user-function + ** can use the already allocated buffer instead of allocating a + ** new one. + */ + sqlite3VdbeMemMove(&sContext.s, pDest); + MemSetTypeFlag(&sContext.s, MEM_Null); - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VColumn P1 P2 * -** -** Push onto the stack the value of the P2-th column of -** the row of the virtual-table that the P1 cursor is pointing to. -*/ -case OP_VColumn: { - const sqlite3_module *pModule; - - Cursor *pCur = p->apCsr[pOp->p1]; - assert( pCur->pVtabCursor ); - pModule = pCur->pVtabCursor->pVtab->pModule; - if( pModule->xColumn==0 ){ - sqlite3SetString(&p->zErrMsg, "Unsupported module operation: xColumn", 0); - rc = SQLITE_ERROR; - } else { - sqlite3_context sContext; - memset(&sContext, 0, sizeof(sContext)); - sContext.s.flags = MEM_Null; - if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; - rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2); + if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; + rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; + if( sContext.isError ){ + rc = sContext.isError; + } - /* Copy the result of the function to the top of the stack. We - ** do this regardless of whether or not an error occured to ensure any - ** dynamic allocation in sContext.s (a Mem struct) is released. - */ - sqlite3VdbeChangeEncoding(&sContext.s, encoding); - pTos++; - pTos->flags = 0; - sqlite3VdbeMemMove(pTos, &sContext.s); + /* Copy the result of the function to the P3 register. We + ** do this regardless of whether or not an error occurred to ensure any + ** dynamic allocation in sContext.s (a Mem struct) is released. + */ + sqlite3VdbeChangeEncoding(&sContext.s, encoding); + REGISTER_TRACE(pOp->p3, pDest); + sqlite3VdbeMemMove(pDest, &sContext.s); + UPDATE_MAX_BLOBSIZE(pDest); - if( sqlite3SafetyOn(db) ){ - goto abort_due_to_misuse; - } - if( sqlite3VdbeMemTooBig(pTos) ){ - goto too_big; - } + if( sqlite3SafetyOn(db) ){ + goto abort_due_to_misuse; + } + if( sqlite3VdbeMemTooBig(pDest) ){ + goto too_big; } - break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VNext P1 P2 * +/* Opcode: VNext P1 P2 * * * ** ** Advance virtual table P1 to the next row in its result set and ** jump to instruction P2. Or, if the virtual table has reached ** the end of its result set, then fall through to the next instruction. */ -case OP_VNext: { /* no-push */ +case OP_VNext: { /* jump */ + sqlite3_vtab *pVtab; const sqlite3_module *pModule; - int res = 0; + int res; + VdbeCursor *pCur; - Cursor *pCur = p->apCsr[pOp->p1]; + res = 0; + pCur = p->apCsr[pOp->p1]; assert( pCur->pVtabCursor ); - pModule = pCur->pVtabCursor->pVtab->pModule; - if( pModule->xNext==0 ){ - sqlite3SetString(&p->zErrMsg, "Unsupported module operation: xNext", 0); - rc = SQLITE_ERROR; - } else { - /* Invoke the xNext() method of the module. There is no way for the - ** underlying implementation to return an error if one occurs during - ** xNext(). Instead, if an error occurs, true is returned (indicating that - ** data is available) and the error code returned when xColumn or - ** some other method is next invoked on the save virtual table cursor. - */ - if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; - p->inVtabMethod = 1; - rc = pModule->xNext(pCur->pVtabCursor); - p->inVtabMethod = 0; - if( rc==SQLITE_OK ){ - res = pModule->xEof(pCur->pVtabCursor); - } - if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; - - if( !res ){ - /* If there is data, jump to P2 */ - pc = pOp->p2 - 1; - } + if( pCur->nullRow ){ + break; } + pVtab = pCur->pVtabCursor->pVtab; + pModule = pVtab->pModule; + assert( pModule->xNext ); + + /* Invoke the xNext() method of the module. There is no way for the + ** underlying implementation to return an error if one occurs during + ** xNext(). Instead, if an error occurs, true is returned (indicating that + ** data is available) and the error code returned when xColumn or + ** some other method is next invoked on the save virtual table cursor. + */ + if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; + sqlite3VtabLock(pVtab); + p->inVtabMethod = 1; + rc = pModule->xNext(pCur->pVtabCursor); + p->inVtabMethod = 0; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; + sqlite3VtabUnlock(db, pVtab); + if( rc==SQLITE_OK ){ + res = pModule->xEof(pCur->pVtabCursor); + } + if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; + if( !res ){ + /* If there is data, jump to P2 */ + pc = pOp->p2 - 1; + } break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VRename * * P3 +/* Opcode: VRename P1 * * P4 * ** -** P3 is a pointer to a virtual table object, an sqlite3_vtab structure. +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** This opcode invokes the corresponding xRename method. The value -** on the top of the stack is popped and passed as the zName argument -** to the xRename method. +** in register P1 is passed as the zName argument to the xRename method. */ -case OP_VRename: { /* no-push */ - sqlite3_vtab *pVtab = (sqlite3_vtab *)(pOp->p3); - assert( pVtab->pModule->xRename ); - - Stringify(pTos, encoding); +case OP_VRename: { + sqlite3_vtab *pVtab; + Mem *pName; + pVtab = pOp->p4.pVtab; + pName = &p->aMem[pOp->p1]; + assert( pVtab->pModule->xRename ); + REGISTER_TRACE(pOp->p1, pName); + assert( pName->flags & MEM_Str ); if( sqlite3SafetyOff(db) ) goto abort_due_to_misuse; sqlite3VtabLock(pVtab); - rc = pVtab->pModule->xRename(pVtab, pTos->z); + rc = pVtab->pModule->xRename(pVtab, pName->z); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; sqlite3VtabUnlock(db, pVtab); if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; - popStack(&pTos, 1); break; } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VUpdate P1 P2 P3 +/* Opcode: VUpdate P1 P2 P3 P4 * ** -** P3 is a pointer to a virtual table object, an sqlite3_vtab structure. +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** This opcode invokes the corresponding xUpdate method. P2 values -** are taken from the stack to pass to the xUpdate invocation. The -** value on the top of the stack corresponds to the p2th element -** of the argv array passed to xUpdate. +** are contiguous memory cells starting at P3 to pass to the xUpdate +** invocation. The value in register (P3+P2-1) corresponds to the +** p2th element of the argv array passed to xUpdate. ** ** The xUpdate method will do a DELETE or an INSERT or both. -** The argv[0] element (which corresponds to the P2-th element down -** on the stack) is the rowid of a row to delete. If argv[0] is -** NULL then no deletion occurs. The argv[1] element is the rowid -** of the new row. This can be NULL to have the virtual table -** select the new rowid for itself. The higher elements in the -** stack are the values of columns in the new row. +** The argv[0] element (which corresponds to memory cell P3) +** is the rowid of a row to delete. If argv[0] is NULL then no +** deletion occurs. The argv[1] element is the rowid of the new +** row. This can be NULL to have the virtual table select the new +** rowid for itself. The subsequent elements in the array are +** the values of columns in the new row. ** ** If P2==1 then no insert is performed. argv[0] is the rowid of ** a row to delete. @@ -5060,42 +5357,106 @@ ** is successful, then the value returned by sqlite3_last_insert_rowid() ** is set to the value of the rowid for the row just inserted. */ -case OP_VUpdate: { /* no-push */ - sqlite3_vtab *pVtab = (sqlite3_vtab *)(pOp->p3); - sqlite3_module *pModule = (sqlite3_module *)pVtab->pModule; - int nArg = pOp->p2; - assert( pOp->p3type==P3_VTAB ); - if( pModule->xUpdate==0 ){ - sqlite3SetString(&p->zErrMsg, "read-only table", 0); - rc = SQLITE_ERROR; - }else{ - int i; - sqlite_int64 rowid; - Mem **apArg = p->apArg; - Mem *pX = &pTos[1-nArg]; - for(i = 0; ip4.pVtab; + pModule = (sqlite3_module *)pVtab->pModule; + nArg = pOp->p2; + assert( pOp->p4type==P4_VTAB ); + if( ALWAYS(pModule->xUpdate) ){ + apArg = p->apArg; + pX = &p->aMem[pOp->p3]; + for(i=0; ixUpdate(pVtab, nArg, apArg, &rowid); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; sqlite3VtabUnlock(db, pVtab); if( sqlite3SafetyOn(db) ) goto abort_due_to_misuse; - if( pOp->p1 && rc==SQLITE_OK ){ + if( rc==SQLITE_OK && pOp->p1 ){ assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) ); db->lastRowid = rowid; } + p->nChange++; } - popStack(&pTos, nArg); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ -/* An other opcode is illegal... +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* Opcode: Pagecount P1 P2 * * * +** +** Write the current number of pages in database P1 to memory cell P2. */ -default: { - assert( 0 ); +case OP_Pagecount: { /* out2-prerelease */ + int p1; + int nPage; + Pager *pPager; + + p1 = pOp->p1; + pPager = sqlite3BtreePager(db->aDb[p1].pBt); + rc = sqlite3PagerPagecount(pPager, &nPage); + /* OP_Pagecount is always called from within a read transaction. The + ** page count has already been successfully read and cached. So the + ** sqlite3PagerPagecount() call above cannot fail. */ + if( ALWAYS(rc==SQLITE_OK) ){ + pOut->flags = MEM_Int; + pOut->u.i = nPage; + } + break; +} +#endif + +#ifndef SQLITE_OMIT_TRACE +/* Opcode: Trace * * * P4 * +** +** If tracing is enabled (by the sqlite3_trace()) interface, then +** the UTF-8 string contained in P4 is emitted on the trace callback. +*/ +case OP_Trace: { + char *zTrace; + + zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql); + if( zTrace ){ + if( db->xTrace ){ + db->xTrace(db->pTraceArg, zTrace); + } +#ifdef SQLITE_DEBUG + if( (db->flags & SQLITE_SqlTrace)!=0 ){ + sqlite3DebugPrintf("SQL-trace: %s\n", zTrace); + } +#endif /* SQLITE_DEBUG */ + } + break; +} +#endif + + +/* Opcode: Noop * * * * * +** +** Do nothing. This instruction is often useful as a jump +** destination. +*/ +/* +** The magic Explain opcode are only inserted when explain==2 (which +** is to say when the EXPLAIN QUERY PLAN syntax is used.) +** This opcode records information from the optimizer. It is the +** the same as a no-op. This opcodesnever appears in a real VM program. +*/ +default: { /* This is really OP_Noop and OP_Explain */ break; } @@ -5107,103 +5468,72 @@ *****************************************************************************/ } - /* Make sure the stack limit was not exceeded */ - assert( pTos<=pStackLimit ); - #ifdef VDBE_PROFILE { - long long elapse = hwtime() - start; - pOp->cycles += elapse; + u64 elapsed = sqlite3Hwtime() - start; + pOp->cycles += elapsed; pOp->cnt++; #if 0 - fprintf(stdout, "%10lld ", elapse); + fprintf(stdout, "%10llu ", elapsed); sqlite3VdbePrintOp(stdout, origPc, &p->aOp[origPc]); #endif } #endif -#ifdef SQLITE_TEST - /* Keep track of the size of the largest BLOB or STR that has appeared - ** on the top of the VDBE stack. - */ - if( pTos>=p->aStack && (pTos->flags & (MEM_Blob|MEM_Str))!=0 - && pTos->n>sqlite3_max_blobsize ){ - sqlite3_max_blobsize = pTos->n; - } -#endif - /* The following code adds nothing to the actual functionality ** of the program. It is only here for testing and debugging. ** On the other hand, it does burn CPU cycles every time through ** the evaluator loop. So we can leave it out when NDEBUG is defined. */ #ifndef NDEBUG - /* Sanity checking on the top element of the stack. If the previous - ** instruction was VNoChange, then the flags field of the top - ** of the stack is set to 0. This is technically invalid for a memory - ** cell, so avoid calling MemSanity() in this case. - */ - if( pTos>=p->aStack && pTos->flags ){ - sqlite3VdbeMemSanity(pTos); - assert( !sqlite3VdbeMemTooBig(pTos) ); - } assert( pc>=-1 && pcnOp ); #ifdef SQLITE_DEBUG - /* Code for tracing the vdbe stack. */ - if( p->trace && pTos>=p->aStack ){ - int i; - fprintf(p->trace, "Stack:"); - for(i=0; i>-5 && &pTos[i]>=p->aStack; i--){ - if( pTos[i].flags & MEM_Null ){ - fprintf(p->trace, " NULL"); - }else if( (pTos[i].flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){ - fprintf(p->trace, " si:%lld", pTos[i].u.i); - }else if( pTos[i].flags & MEM_Int ){ - fprintf(p->trace, " i:%lld", pTos[i].u.i); - }else if( pTos[i].flags & MEM_Real ){ - fprintf(p->trace, " r:%g", pTos[i].r); - }else{ - char zBuf[200]; - sqlite3VdbeMemPrettyPrint(&pTos[i], zBuf); - fprintf(p->trace, " "); - fprintf(p->trace, "%s", zBuf); - } + if( p->trace ){ + if( rc!=0 ) fprintf(p->trace,"rc=%d\n",rc); + if( opProperty & OPFLG_OUT2_PRERELEASE ){ + registerTrace(p->trace, pOp->p2, pOut); + } + if( opProperty & OPFLG_OUT3 ){ + registerTrace(p->trace, pOp->p3, pOut); } - if( rc!=0 ) fprintf(p->trace," rc=%d",rc); - fprintf(p->trace,"\n"); } #endif /* SQLITE_DEBUG */ #endif /* NDEBUG */ } /* The end of the for(;;) loop the loops through opcodes */ - /* If we reach this point, it means that execution is finished. + /* If we reach this point, it means that execution is finished with + ** an error of some kind. */ -vdbe_halt: - if( rc ){ - p->rc = rc; - rc = SQLITE_ERROR; - }else{ - rc = SQLITE_DONE; - } +vdbe_error_halt: + assert( rc ); + p->rc = rc; sqlite3VdbeHalt(p); - p->pTos = pTos; + if( rc==SQLITE_IOERR_NOMEM ) db->mallocFailed = 1; + rc = SQLITE_ERROR; + + /* This is the only way out of this procedure. We have to + ** release the mutexes on btrees that were acquired at the + ** top. */ +vdbe_return: + sqlite3BtreeMutexArrayLeave(&p->aMutex); return rc; /* Jump to here if a string or blob larger than SQLITE_MAX_LENGTH ** is encountered. */ too_big: - sqlite3SetString(&p->zErrMsg, "string or blob too big", (char*)0); + sqlite3SetString(&p->zErrMsg, db, "string or blob too big"); rc = SQLITE_TOOBIG; - goto vdbe_halt; + goto vdbe_error_halt; /* Jump to here if a malloc() fails. */ no_mem: - sqlite3SetString(&p->zErrMsg, "out of memory", (char*)0); + db->mallocFailed = 1; + sqlite3SetString(&p->zErrMsg, db, "out of memory"); rc = SQLITE_NOMEM; - goto vdbe_halt; + goto vdbe_error_halt; /* Jump to here for an SQLITE_MISUSE error. */ @@ -5215,23 +5545,20 @@ ** should hold the error number. */ abort_due_to_error: - if( p->zErrMsg==0 ){ - if( sqlite3MallocFailed() ) rc = SQLITE_NOMEM; - sqlite3SetString(&p->zErrMsg, sqlite3ErrStr(rc), (char*)0); + assert( p->zErrMsg==0 ); + if( db->mallocFailed ) rc = SQLITE_NOMEM; + if( rc!=SQLITE_IOERR_NOMEM ){ + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc)); } - goto vdbe_halt; + goto vdbe_error_halt; /* Jump to here if the sqlite3_interrupt() API sets the interrupt ** flag. */ abort_due_to_interrupt: assert( db->u1.isInterrupted ); - if( db->magic!=SQLITE_MAGIC_BUSY ){ - rc = SQLITE_MISUSE; - }else{ - rc = SQLITE_INTERRUPT; - } + rc = SQLITE_INTERRUPT; p->rc = rc; - sqlite3SetString(&p->zErrMsg, sqlite3ErrStr(rc), (char*)0); - goto vdbe_halt; + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc)); + goto vdbe_error_halt; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbefifo.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbefifo.c --- sqlite3-3.4.2/src/vdbefifo.c 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/vdbefifo.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,114 +0,0 @@ -/* -** 2005 June 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements a FIFO queue of rowids used for processing -** UPDATE and DELETE statements. -*/ -#include "sqliteInt.h" -#include "vdbeInt.h" - -/* -** Allocate a new FifoPage and return a pointer to it. Return NULL if -** we run out of memory. Leave space on the page for nEntry entries. -*/ -static FifoPage *allocateFifoPage(int nEntry){ - FifoPage *pPage; - if( nEntry>32767 ){ - nEntry = 32767; - } - pPage = sqliteMallocRaw( sizeof(FifoPage) + sizeof(i64)*(nEntry-1) ); - if( pPage ){ - pPage->nSlot = nEntry; - pPage->iWrite = 0; - pPage->iRead = 0; - pPage->pNext = 0; - } - return pPage; -} - -/* -** Initialize a Fifo structure. -*/ -void sqlite3VdbeFifoInit(Fifo *pFifo){ - memset(pFifo, 0, sizeof(*pFifo)); -} - -/* -** Push a single 64-bit integer value into the Fifo. Return SQLITE_OK -** normally. SQLITE_NOMEM is returned if we are unable to allocate -** memory. -*/ -int sqlite3VdbeFifoPush(Fifo *pFifo, i64 val){ - FifoPage *pPage; - pPage = pFifo->pLast; - if( pPage==0 ){ - pPage = pFifo->pLast = pFifo->pFirst = allocateFifoPage(20); - if( pPage==0 ){ - return SQLITE_NOMEM; - } - }else if( pPage->iWrite>=pPage->nSlot ){ - pPage->pNext = allocateFifoPage(pFifo->nEntry); - if( pPage->pNext==0 ){ - return SQLITE_NOMEM; - } - pPage = pFifo->pLast = pPage->pNext; - } - pPage->aSlot[pPage->iWrite++] = val; - pFifo->nEntry++; - return SQLITE_OK; -} - -/* -** Extract a single 64-bit integer value from the Fifo. The integer -** extracted is the one least recently inserted. If the Fifo is empty -** return SQLITE_DONE. -*/ -int sqlite3VdbeFifoPop(Fifo *pFifo, i64 *pVal){ - FifoPage *pPage; - if( pFifo->nEntry==0 ){ - return SQLITE_DONE; - } - assert( pFifo->nEntry>0 ); - pPage = pFifo->pFirst; - assert( pPage!=0 ); - assert( pPage->iWrite>pPage->iRead ); - assert( pPage->iWrite<=pPage->nSlot ); - assert( pPage->iReadnSlot ); - assert( pPage->iRead>=0 ); - *pVal = pPage->aSlot[pPage->iRead++]; - pFifo->nEntry--; - if( pPage->iRead>=pPage->iWrite ){ - pFifo->pFirst = pPage->pNext; - sqliteFree(pPage); - if( pFifo->nEntry==0 ){ - assert( pFifo->pLast==pPage ); - pFifo->pLast = 0; - }else{ - assert( pFifo->pFirst!=0 ); - } - }else{ - assert( pFifo->nEntry>0 ); - } - return SQLITE_OK; -} - -/* -** Delete all information from a Fifo object. Free all memory held -** by the Fifo. -*/ -void sqlite3VdbeFifoClear(Fifo *pFifo){ - FifoPage *pPage, *pNextPage; - for(pPage=pFifo->pFirst; pPage; pPage=pNextPage){ - pNextPage = pPage->pNext; - sqliteFree(pPage); - } - sqlite3VdbeFifoInit(pFifo); -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbe.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbe.h --- sqlite3-3.4.2/src/vdbe.h 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/src/vdbe.h 2009-06-25 12:23:18.000000000 +0100 @@ -15,7 +15,7 @@ ** or VDBE. The VDBE implements an abstract machine that runs a ** simple program to access and modify the underlying database. ** -** $Id: vdbe.h,v 1.110 2007/05/08 21:45:28 drh Exp $ +** $Id: vdbe.h,v 1.141 2009/04/10 00:56:29 drh Exp $ */ #ifndef _SQLITE_VDBE_H_ #define _SQLITE_VDBE_H_ @@ -29,19 +29,45 @@ typedef struct Vdbe Vdbe; /* +** The names of the following types declared in vdbeInt.h are required +** for the VdbeOp definition. +*/ +typedef struct VdbeFunc VdbeFunc; +typedef struct Mem Mem; + +/* ** A single instruction of the virtual machine has an opcode ** and as many as three operands. The instruction is recorded ** as an instance of the following structure: */ struct VdbeOp { u8 opcode; /* What operation to perform */ + signed char p4type; /* One of the P4_xxx constants for p4 */ + u8 opflags; /* Not currently used */ + u8 p5; /* Fifth parameter is an unsigned character */ int p1; /* First operand */ int p2; /* Second parameter (often the jump destination) */ - char *p3; /* Third parameter */ - int p3type; /* One of the P3_xxx constants defined below */ + int p3; /* The third parameter */ + union { /* forth parameter */ + int i; /* Integer value if p4type==P4_INT32 */ + void *p; /* Generic pointer */ + char *z; /* Pointer to data for string (char array) types */ + i64 *pI64; /* Used when p4type is P4_INT64 */ + double *pReal; /* Used when p4type is P4_REAL */ + FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */ + VdbeFunc *pVdbeFunc; /* Used when p4type is P4_VDBEFUNC */ + CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */ + Mem *pMem; /* Used when p4type is P4_MEM */ + sqlite3_vtab *pVtab; /* Used when p4type is P4_VTAB */ + KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ + int *ai; /* Used when p4type is P4_INTARRAY */ + } p4; +#ifdef SQLITE_DEBUG + char *zComment; /* Comment to improve readability */ +#endif #ifdef VDBE_PROFILE - int cnt; /* Number of times this instruction was executed */ - long long cycles; /* Total time spend executing this instruction */ + int cnt; /* Number of times this instruction was executed */ + u64 cycles; /* Total time spent executing this instruction */ #endif }; typedef struct VdbeOp VdbeOp; @@ -53,34 +79,39 @@ struct VdbeOpList { u8 opcode; /* What operation to perform */ signed char p1; /* First operand */ - short int p2; /* Second parameter (often the jump destination) */ - char *p3; /* Third parameter */ + signed char p2; /* Second parameter (often the jump destination) */ + signed char p3; /* Third parameter */ }; typedef struct VdbeOpList VdbeOpList; /* ** Allowed values of VdbeOp.p3type */ -#define P3_NOTUSED 0 /* The P3 parameter is not used */ -#define P3_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ -#define P3_STATIC (-2) /* Pointer to a static string */ -#define P3_COLLSEQ (-4) /* P3 is a pointer to a CollSeq structure */ -#define P3_FUNCDEF (-5) /* P3 is a pointer to a FuncDef structure */ -#define P3_KEYINFO (-6) /* P3 is a pointer to a KeyInfo structure */ -#define P3_VDBEFUNC (-7) /* P3 is a pointer to a VdbeFunc structure */ -#define P3_MEM (-8) /* P3 is a pointer to a Mem* structure */ -#define P3_TRANSIENT (-9) /* P3 is a pointer to a transient string */ -#define P3_VTAB (-10) /* P3 is a pointer to an sqlite3_vtab structure */ -#define P3_MPRINTF (-11) /* P3 is a string obtained from sqlite3_mprintf() */ +#define P4_NOTUSED 0 /* The P4 parameter is not used */ +#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ +#define P4_STATIC (-2) /* Pointer to a static string */ +#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */ +#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */ +#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */ +#define P4_VDBEFUNC (-7) /* P4 is a pointer to a VdbeFunc structure */ +#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */ +#define P4_TRANSIENT (-9) /* P4 is a pointer to a transient string */ +#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */ +#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */ +#define P4_REAL (-12) /* P4 is a 64-bit floating point value */ +#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ +#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */ +#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ -/* When adding a P3 argument using P3_KEYINFO, a copy of the KeyInfo structure +/* When adding a P4 argument using P4_KEYINFO, a copy of the KeyInfo structure ** is made. That copy is freed when the Vdbe is finalized. But if the -** argument is P3_KEYINFO_HANDOFF, the passed in pointer is used. It still +** argument is P4_KEYINFO_HANDOFF, the passed in pointer is used. It still ** gets freed when the Vdbe is finalized so it still should be obtained ** from a single sqliteMalloc(). But no copy is made and the calling ** function should *not* try to free the KeyInfo. */ -#define P3_KEYINFO_HANDOFF (-9) +#define P4_KEYINFO_HANDOFF (-16) +#define P4_KEYINFO_STATIC (-17) /* ** The Vdbe.aColName array contains 5n Mem structures, where n is the @@ -91,7 +122,15 @@ #define COLNAME_DATABASE 2 #define COLNAME_TABLE 3 #define COLNAME_COLUMN 4 -#define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ +#ifdef SQLITE_ENABLE_COLUMN_METADATA +# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ +#else +# ifdef SQLITE_OMIT_DECLTYPE +# define COLNAME_N 1 /* Store only the name */ +# else +# define COLNAME_N 2 /* Store the name and decltype */ +# endif +#endif /* ** The following macro converts a relative address in the p2 field @@ -112,14 +151,20 @@ ** for a description of what each of these routines does. */ Vdbe *sqlite3VdbeCreate(sqlite3*); -int sqlite3VdbeAddOp(Vdbe*,int,int,int); -int sqlite3VdbeOp3(Vdbe*,int,int,int,const char *zP3,int); +int sqlite3VdbeAddOp0(Vdbe*,int); +int sqlite3VdbeAddOp1(Vdbe*,int,int); +int sqlite3VdbeAddOp2(Vdbe*,int,int,int); +int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int); +int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int); int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp); void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1); void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2); +void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3); +void sqlite3VdbeChangeP5(Vdbe*, u8 P5); void sqlite3VdbeJumpHere(Vdbe*, int addr); void sqlite3VdbeChangeToNoop(Vdbe*, int addr, int N); -void sqlite3VdbeChangeP3(Vdbe*, int addr, const char *zP1, int N); +void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); +void sqlite3VdbeUsesBtree(Vdbe*, int); VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); int sqlite3VdbeMakeLabel(Vdbe*); void sqlite3VdbeDelete(Vdbe*); @@ -133,18 +178,28 @@ void sqlite3VdbeResetStepResult(Vdbe*); int sqlite3VdbeReset(Vdbe*); void sqlite3VdbeSetNumCols(Vdbe*,int); -int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, int); +int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*)); void sqlite3VdbeCountChanges(Vdbe*); sqlite3 *sqlite3VdbeDb(Vdbe*); -void sqlite3VdbeSetSql(Vdbe*, const char *z, int n); -const char *sqlite3VdbeGetSql(Vdbe*); +void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int); void sqlite3VdbeSwap(Vdbe*,Vdbe*); +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +int sqlite3VdbeReleaseMemory(int); +#endif +UnpackedRecord *sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,char*,int); +void sqlite3VdbeDeleteUnpackedRecord(UnpackedRecord*); +int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); + + #ifndef NDEBUG void sqlite3VdbeComment(Vdbe*, const char*, ...); # define VdbeComment(X) sqlite3VdbeComment X + void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); +# define VdbeNoopComment(X) sqlite3VdbeNoopComment X #else # define VdbeComment(X) +# define VdbeNoopComment(X) #endif #endif diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbeInt.h /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbeInt.h --- sqlite3-3.4.2/src/vdbeInt.h 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/vdbeInt.h 2009-06-25 12:45:58.000000000 +0100 @@ -14,26 +14,13 @@ ** source code file "vdbe.c". When that file became too big (over ** 6000 lines long) it was split up into several smaller files and ** this header information was factored out. +** +** $Id: vdbeInt.h,v 1.174 2009/06/23 14:15:04 drh Exp $ */ #ifndef _VDBEINT_H_ #define _VDBEINT_H_ /* -** intToKey() and keyToInt() used to transform the rowid. But with -** the latest versions of the design they are no-ops. -*/ -#define keyToInt(X) (X) -#define intToKey(X) (X) - -/* -** The makefile scans the vdbe.c source file and creates the following -** array of string constants which are the names of all VDBE opcodes. This -** array is defined in a separate source code file named opcode.c which is -** automatically generated by the makefile. -*/ -extern const char *const sqlite3OpcodeNames[]; - -/* ** SQL is translated into a sequence of instructions to be ** executed by a virtual machine. Each instruction is an instance ** of the following structure. @@ -55,39 +42,40 @@ ** Every cursor that the virtual machine has open is represented by an ** instance of the following structure. ** -** If the Cursor.isTriggerRow flag is set it means that this cursor is +** If the VdbeCursor.isTriggerRow flag is set it means that this cursor is ** really a single row that represents the NEW or OLD pseudo-table of -** a row trigger. The data for the row is stored in Cursor.pData and -** the rowid is in Cursor.iKey. +** a row trigger. The data for the row is stored in VdbeCursor.pData and +** the rowid is in VdbeCursor.iKey. */ -struct Cursor { +struct VdbeCursor { BtCursor *pCursor; /* The cursor structure of the backend */ int iDb; /* Index of cursor database in db->aDb[] (or -1) */ i64 lastRowid; /* Last rowid from a Next or NextIdx operation */ - i64 nextRowid; /* Next rowid returned by OP_NewRowid */ Bool zeroed; /* True if zeroed out and ready for reuse */ Bool rowidIsValid; /* True if lastRowid is valid */ Bool atFirst; /* True if pointing to first entry */ Bool useRandomRowid; /* Generate new record numbers semi-randomly */ Bool nullRow; /* True if pointing to a row with no data */ - Bool nextRowidValid; /* True if the nextRowid field is valid */ Bool pseudoTable; /* This is a NEW or OLD pseudo-tables of a trigger */ + Bool ephemPseudoTable; Bool deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ Bool isTable; /* True if a table requiring integer keys */ Bool isIndex; /* True if an index containing keys only - no data */ - u8 bogusIncrKey; /* Something for pIncrKey to point to if pKeyInfo==0 */ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ Btree *pBt; /* Separate file holding temporary table */ int nData; /* Number of bytes in pData */ char *pData; /* Data for a NEW or OLD pseudo-table */ i64 iKey; /* Key for the NEW or OLD pseudo-table row */ - u8 *pIncrKey; /* Pointer to pKeyInfo->incrKey */ KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */ int nField; /* Number of fields in the header */ i64 seqCount; /* Sequence counter */ sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */ const sqlite3_module *pModule; /* Module for cursor pVtabCursor */ + /* Result of last sqlite3BtreeMoveto() done by an OP_NotExists or + ** OP_IsUnique opcode on this cursor. */ + int seekResult; + /* Cached information about the header for the data record that the ** cursor is currently pointing to. Only valid if cacheValid is true. ** aRow might point to (ephemeral) data for the current row, or it might @@ -99,17 +87,10 @@ u32 *aOffset; /* Cached offsets to the start of each columns data */ u8 *aRow; /* Data for the current row, if all on one page */ }; -typedef struct Cursor Cursor; +typedef struct VdbeCursor VdbeCursor; /* -** Number of bytes of string storage space available to each stack -** layer without having to malloc. NBFS is short for Number of Bytes -** For Strings. -*/ -#define NBFS 32 - -/* -** A value for Cursor.cacheValid that means the cache is always invalid. +** A value for VdbeCursor.cacheValid that means the cache is always invalid. */ #define CACHE_STALE 0 @@ -126,19 +107,21 @@ */ struct Mem { union { - i64 i; /* Integer value. Or FuncDef* when flags==MEM_Agg */ + i64 i; /* Integer value. */ + int nZero; /* Used when bit MEM_Zero is set in flags */ FuncDef *pDef; /* Used only when flags==MEM_Agg */ + RowSet *pRowSet; /* Used only when flags==MEM_RowSet */ } u; double r; /* Real value */ + sqlite3 *db; /* The associated database connection */ char *z; /* String or BLOB value */ - int n; /* Number of characters in string value, including '\0' */ + int n; /* Number of characters in string value, excluding '\0' */ u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */ u8 type; /* One of SQLITE_NULL, SQLITE_TEXT, SQLITE_INTEGER, etc */ u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */ void (*xDel)(void *); /* If not null, call this function to delete Mem.z */ - char zShort[NBFS]; /* Space for short strings */ + char *zMalloc; /* Dynamic buffer allocated by sqlite3_malloc() */ }; -typedef struct Mem Mem; /* One or more of the following flags are set to indicate the validOK ** representations of the value stored in the Mem struct. @@ -160,19 +143,20 @@ #define MEM_Int 0x0004 /* Value is an integer */ #define MEM_Real 0x0008 /* Value is a real number */ #define MEM_Blob 0x0010 /* Value is a BLOB */ +#define MEM_RowSet 0x0020 /* Value is a RowSet object */ +#define MEM_TypeMask 0x00ff /* Mask of type bits */ /* Whenever Mem contains a valid string or blob representation, one of ** the following flags must be set to determine the memory management ** policy for Mem.z. The MEM_Term flag tells us whether or not the ** string is \000 or \u0000 terminated */ -#define MEM_Term 0x0020 /* String rep is nul terminated */ -#define MEM_Dyn 0x0040 /* Need to call sqliteFree() on Mem.z */ -#define MEM_Static 0x0080 /* Mem.z points to a static string */ -#define MEM_Ephem 0x0100 /* Mem.z points to an ephemeral string */ -#define MEM_Short 0x0200 /* Mem.z points to Mem.zShort */ -#define MEM_Agg 0x0400 /* Mem.z points to an agg function context */ -#define MEM_Zero 0x0800 /* Mem.i contains count of 0s appended to blob */ +#define MEM_Term 0x0200 /* String rep is nul terminated */ +#define MEM_Dyn 0x0400 /* Need to call sqliteFree() on Mem.z */ +#define MEM_Static 0x0800 /* Mem.z points to a static string */ +#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */ +#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */ +#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */ #ifdef SQLITE_OMIT_INCRBLOB #undef MEM_Zero @@ -180,6 +164,13 @@ #endif +/* +** Clear any existing type flags from a Mem and replace them with f +*/ +#define MemSetTypeFlag(p, f) \ + ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f) + + /* A VdbeFunc is just a FuncDef (defined in sqliteInt.h) that contains ** additional information about auxiliary information bound to arguments ** of the function. This is used to implement the sqlite3_get_auxdata() @@ -197,7 +188,6 @@ void (*xDelete)(void *); /* Destructor for the aux data */ } apAux[1]; /* One slot for each function argument */ }; -typedef struct VdbeFunc VdbeFunc; /* ** The "context" argument for a installable function. A pointer to an @@ -217,7 +207,7 @@ VdbeFunc *pVdbeFunc; /* Auxilary data, if created. */ Mem s; /* The return value is stored here */ Mem *pMem; /* Memory cell used to store aggregate context */ - u8 isError; /* Set to true for an error */ + int isError; /* Error code returned by the function. */ CollSeq *pColl; /* Collating sequence */ }; @@ -234,32 +224,6 @@ }; /* -** A FifoPage structure holds a single page of valves. Pages are arranged -** in a list. -*/ -typedef struct FifoPage FifoPage; -struct FifoPage { - int nSlot; /* Number of entries aSlot[] */ - int iWrite; /* Push the next value into this entry in aSlot[] */ - int iRead; /* Read the next value from this entry in aSlot[] */ - FifoPage *pNext; /* Next page in the fifo */ - i64 aSlot[1]; /* One or more slots for rowid values */ -}; - -/* -** The Fifo structure is typedef-ed in vdbeInt.h. But the implementation -** of that structure is private to this file. -** -** The Fifo structure describes the entire fifo. -*/ -typedef struct Fifo Fifo; -struct Fifo { - int nEntry; /* Total number of entries */ - FifoPage *pFirst; /* First page on the list */ - FifoPage *pLast; /* Last page on the list */ -}; - -/* ** A Context stores the last insert rowid, the last statement change count, ** and the current statement change count (i.e. changes since last statement). ** The current keylist is also stored in the context. @@ -272,7 +236,6 @@ struct Context { i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */ int nChange; /* Statement changes (Vdbe.nChanges) */ - Fifo sFifo; /* Records that will participate in a DELETE or UPDATE */ }; /* @@ -291,62 +254,53 @@ ** method function. */ struct Vdbe { - sqlite3 *db; /* The whole database */ - Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ - int nOp; /* Number of instructions in the program */ - int nOpAlloc; /* Number of slots allocated for aOp[] */ - Op *aOp; /* Space to hold the virtual machine's program */ - int nLabel; /* Number of labels used */ - int nLabelAlloc; /* Number of slots allocated in aLabel[] */ - int *aLabel; /* Space to hold the labels */ - Mem *aStack; /* The operand stack, except string values */ - Mem *pTos; /* Top entry in the operand stack */ - Mem **apArg; /* Arguments to currently executing user function */ - Mem *aColName; /* Column names to return */ - int nCursor; /* Number of slots in apCsr[] */ - Cursor **apCsr; /* One element of this array for each open cursor */ - int nVar; /* Number of entries in aVar[] */ - Mem *aVar; /* Values for the OP_Variable opcode. */ - char **azVar; /* Name of variables */ - int okVar; /* True if azVar[] has been initialized */ - int magic; /* Magic number for sanity checking */ + sqlite3 *db; /* The database connection that owns this statement */ + Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ + int nOp; /* Number of instructions in the program */ + int nOpAlloc; /* Number of slots allocated for aOp[] */ + Op *aOp; /* Space to hold the virtual machine's program */ + int nLabel; /* Number of labels used */ + int nLabelAlloc; /* Number of slots allocated in aLabel[] */ + int *aLabel; /* Space to hold the labels */ + Mem **apArg; /* Arguments to currently executing user function */ + Mem *aColName; /* Column names to return */ + Mem *pResultSet; /* Pointer to an array of results */ + u16 nResColumn; /* Number of columns in one row of the result set */ + u16 nCursor; /* Number of slots in apCsr[] */ + VdbeCursor **apCsr; /* One element of this array for each open cursor */ + u8 errorAction; /* Recovery action to do in case of an error */ + u8 okVar; /* True if azVar[] has been initialized */ + u16 nVar; /* Number of entries in aVar[] */ + Mem *aVar; /* Values for the OP_Variable opcode. */ + char **azVar; /* Name of variables */ + u32 magic; /* Magic number for sanity checking */ int nMem; /* Number of memory locations currently allocated */ Mem *aMem; /* The memory locations */ - int nCallback; /* Number of callbacks invoked so far */ - int cacheCtr; /* Cursor row cache generation counter */ - Fifo sFifo; /* A list of ROWIDs */ + int cacheCtr; /* VdbeCursor row cache generation counter */ int contextStackTop; /* Index of top element in the context stack */ int contextStackDepth; /* The size of the "context" stack */ Context *contextStack; /* Stack used by opcodes ContextPush & ContextPop*/ int pc; /* The program counter */ int rc; /* Value to return */ - unsigned uniqueCnt; /* Used by OP_MakeRecord when P2!=0 */ - int errorAction; /* Recovery action to do in case of an error */ - int inTempTrans; /* True if temp database is transactioned */ - int returnStack[100]; /* Return address stack for OP_Gosub & OP_Return */ - int returnDepth; /* Next unused element in returnStack[] */ - int nResColumn; /* Number of columns in one row of the result set */ - char **azResColumn; /* Values for one row of result */ - int popStack; /* Pop the stack this much on entry to VdbeExec() */ char *zErrMsg; /* Error message written here */ - u8 resOnStack; /* True if there are result values on the stack */ u8 explain; /* True if EXPLAIN present on SQL command */ u8 changeCntOn; /* True to update the change-counter */ - u8 aborted; /* True if ROLLBACK in another VM causes an abort */ u8 expired; /* True if the VM needs to be recompiled */ u8 minWriteFileFormat; /* Minimum file format for writable database files */ u8 inVtabMethod; /* See comments above */ + u8 usesStmtJournal; /* True if uses a statement journal */ + u8 readOnly; /* True for read-only statements */ + u8 isPrepareV2; /* True if prepared with prepare_v2() */ int nChange; /* Number of db changes made since last reset */ + int btreeMask; /* Bitmask of db->aDb[] entries referenced */ i64 startTime; /* Time when query started - used for profiling */ - int nSql; /* Number of bytes in zSql */ - char *zSql; /* Text of the SQL statement that generated this */ + BtreeMutexArray aMutex; /* An array of Btree used here and needing locks */ + int aCounter[2]; /* Counters used by sqlite3_stmt_status() */ + char *zSql; /* Text of the SQL statement that generated this */ + void *pFree; /* Free this when deleting the vdbe */ + int iStatement; /* Statement number (or 0 if has not opened stmt) */ #ifdef SQLITE_DEBUG - FILE *trace; /* Write an execution trace here, if not NULL */ -#endif - int openedStatement; /* True if this VM has opened a statement journal */ -#ifdef SQLITE_SSE - int fetchId; /* Statement number used by sqlite3_fetch_statement */ - int lru; /* Counter used for LRU cache replacement */ + FILE *trace; /* Write an execution trace here, if not NULL */ #endif }; @@ -361,24 +315,22 @@ /* ** Function prototypes */ -void sqlite3VdbeFreeCursor(Vdbe *, Cursor*); +void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*); void sqliteVdbePopStack(Vdbe*,int); -int sqlite3VdbeCursorMoveto(Cursor*); +int sqlite3VdbeCursorMoveto(VdbeCursor*); #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) void sqlite3VdbePrintOp(FILE*, int, Op*); #endif -int sqlite3VdbeSerialTypeLen(u32); +u32 sqlite3VdbeSerialTypeLen(u32); u32 sqlite3VdbeSerialType(Mem*, int); -int sqlite3VdbeSerialPut(unsigned char*, int, Mem*, int); -int sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); +u32 sqlite3VdbeSerialPut(unsigned char*, int, Mem*, int); +u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); void sqlite3VdbeDeleteAuxData(VdbeFunc*, int); int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *); -int sqlite3VdbeIdxKeyCompare(Cursor*, int , const unsigned char*, int*); -int sqlite3VdbeIdxRowid(BtCursor *, i64 *); +int sqlite3VdbeIdxKeyCompare(VdbeCursor*,UnpackedRecord*,int*); +int sqlite3VdbeIdxRowid(sqlite3*, BtCursor *, i64 *); int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); -int sqlite3VdbeRecordCompare(void*,int,const void*,int, const void*); -int sqlite3VdbeIdxRowidLen(const u8*); int sqlite3VdbeExec(Vdbe*); int sqlite3VdbeList(Vdbe*); int sqlite3VdbeHalt(Vdbe*); @@ -386,15 +338,15 @@ int sqlite3VdbeMemTooBig(Mem*); int sqlite3VdbeMemCopy(Mem*, const Mem*); void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int); -int sqlite3VdbeMemMove(Mem*, Mem*); +void sqlite3VdbeMemMove(Mem*, Mem*); int sqlite3VdbeMemNulTerminate(Mem*); int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*)); void sqlite3VdbeMemSetInt64(Mem*, i64); void sqlite3VdbeMemSetDouble(Mem*, double); void sqlite3VdbeMemSetNull(Mem*); void sqlite3VdbeMemSetZeroBlob(Mem*,int); +void sqlite3VdbeMemSetRowSet(Mem*); int sqlite3VdbeMemMakeWriteable(Mem*); -int sqlite3VdbeMemDynamicify(Mem*); int sqlite3VdbeMemStringify(Mem*, int); i64 sqlite3VdbeIntValue(Mem*); int sqlite3VdbeMemIntegerify(Mem*); @@ -404,21 +356,28 @@ int sqlite3VdbeMemNumerify(Mem*); int sqlite3VdbeMemFromBtree(BtCursor*,int,int,int,Mem*); void sqlite3VdbeMemRelease(Mem *p); +void sqlite3VdbeMemReleaseExternal(Mem *p); int sqlite3VdbeMemFinalize(Mem*, FuncDef*); -#ifndef NDEBUG - void sqlite3VdbeMemSanity(Mem*); - int sqlite3VdbeOpcodeNoPush(u8); +const char *sqlite3OpcodeName(int); +int sqlite3VdbeOpcodeHasProperty(int, int); +int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); +int sqlite3VdbeCloseStatement(Vdbe *, int); +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +int sqlite3VdbeReleaseBuffers(Vdbe *p); #endif + +#ifndef SQLITE_OMIT_SHARED_CACHE +void sqlite3VdbeMutexArrayEnter(Vdbe *p); +#else +# define sqlite3VdbeMutexArrayEnter(p) +#endif + int sqlite3VdbeMemTranslate(Mem*, u8); #ifdef SQLITE_DEBUG void sqlite3VdbePrintSql(Vdbe*); void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf); #endif int sqlite3VdbeMemHandleBom(Mem *pMem); -void sqlite3VdbeFifoInit(Fifo*); -int sqlite3VdbeFifoPush(Fifo*, i64); -int sqlite3VdbeFifoPop(Fifo*, i64*); -void sqlite3VdbeFifoClear(Fifo*); #ifndef SQLITE_OMIT_INCRBLOB int sqlite3VdbeMemExpandBlob(Mem *); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vdbemem.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vdbemem.c --- sqlite3-3.4.2/src/vdbemem.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/vdbemem.c 2009-06-25 12:45:58.000000000 +0100 @@ -14,11 +14,10 @@ ** stores a single value in the VDBE. Mem is an opaque structure visible ** only within the VDBE. Interface routines refer to a Mem using the ** name sqlite_value +** +** $Id: vdbemem.c,v 1.150 2009/06/25 01:47:12 drh Exp $ */ #include "sqliteInt.h" -#include "os.h" -#include -#include #include "vdbeInt.h" /* @@ -42,18 +41,21 @@ */ int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){ int rc; + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE + || desiredEnc==SQLITE_UTF16BE ); if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){ return SQLITE_OK; } + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); #ifdef SQLITE_OMIT_UTF16 return SQLITE_ERROR; #else - /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned, ** then the encoding of the value may not have changed. */ - rc = sqlite3VdbeMemTranslate(pMem, desiredEnc); + rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc); assert(rc==SQLITE_OK || rc==SQLITE_NOMEM); assert(rc==SQLITE_OK || pMem->enc!=desiredEnc); assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc); @@ -62,31 +64,78 @@ } /* -** Make the given Mem object MEM_Dyn. +** Make sure pMem->z points to a writable allocation of at least +** n bytes. +** +** If the memory cell currently contains string or blob data +** and the third argument passed to this function is true, the +** current content of the cell is preserved. Otherwise, it may +** be discarded. +** +** This function sets the MEM_Dyn flag and clears any xDel callback. +** It also clears MEM_Ephem and MEM_Static. If the preserve flag is +** not set, Mem.n is zeroed. +*/ +int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve){ + assert( 1 >= + ((pMem->zMalloc && pMem->zMalloc==pMem->z) ? 1 : 0) + + (((pMem->flags&MEM_Dyn)&&pMem->xDel) ? 1 : 0) + + ((pMem->flags&MEM_Ephem) ? 1 : 0) + + ((pMem->flags&MEM_Static) ? 1 : 0) + ); + assert( (pMem->flags&MEM_RowSet)==0 ); + + if( n<32 ) n = 32; + if( sqlite3DbMallocSize(pMem->db, pMem->zMalloc)z==pMem->zMalloc ){ + pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n); + preserve = 0; + }else{ + sqlite3DbFree(pMem->db, pMem->zMalloc); + pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n); + } + } + + if( pMem->z && preserve && pMem->zMalloc && pMem->z!=pMem->zMalloc ){ + memcpy(pMem->zMalloc, pMem->z, pMem->n); + } + if( pMem->flags&MEM_Dyn && pMem->xDel ){ + pMem->xDel((void *)(pMem->z)); + } + + pMem->z = pMem->zMalloc; + if( pMem->z==0 ){ + pMem->flags = MEM_Null; + }else{ + pMem->flags &= ~(MEM_Ephem|MEM_Static); + } + pMem->xDel = 0; + return (pMem->z ? SQLITE_OK : SQLITE_NOMEM); +} + +/* +** Make the given Mem object MEM_Dyn. In other words, make it so +** that any TEXT or BLOB content is stored in memory obtained from +** malloc(). In this way, we know that the memory is safe to be +** overwritten or altered. ** ** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. */ -int sqlite3VdbeMemDynamicify(Mem *pMem){ - int n; - u8 *z; +int sqlite3VdbeMemMakeWriteable(Mem *pMem){ + int f; + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags&MEM_RowSet)==0 ); expandBlob(pMem); - if( (pMem->flags & (MEM_Ephem|MEM_Static|MEM_Short))==0 ){ - return SQLITE_OK; - } - assert( (pMem->flags & MEM_Dyn)==0 ); - n = pMem->n; - assert( pMem->flags & (MEM_Str|MEM_Blob) ); - z = sqliteMallocRaw( n+2 ); - if( z==0 ){ - return SQLITE_NOMEM; + f = pMem->flags; + if( (f&(MEM_Str|MEM_Blob)) && pMem->z!=pMem->zMalloc ){ + if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){ + return SQLITE_NOMEM; + } + pMem->z[pMem->n] = 0; + pMem->z[pMem->n+1] = 0; + pMem->flags |= MEM_Term; } - pMem->flags |= MEM_Dyn|MEM_Term; - pMem->xDel = 0; - memcpy(z, pMem->z, n ); - z[n] = 0; - z[n+1] = 0; - pMem->z = (char*)z; - pMem->flags &= ~(MEM_Ephem|MEM_Static|MEM_Short); + return SQLITE_OK; } @@ -97,23 +146,23 @@ #ifndef SQLITE_OMIT_INCRBLOB int sqlite3VdbeMemExpandBlob(Mem *pMem){ if( pMem->flags & MEM_Zero ){ - char *pNew; int nByte; - assert( (pMem->flags & MEM_Blob)!=0 ); - nByte = pMem->n + pMem->u.i; - if( nByte<=0 ) nByte = 1; - pNew = sqliteMalloc(nByte); - if( pNew==0 ){ + assert( pMem->flags&MEM_Blob ); + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + + /* Set nByte to the number of bytes required to store the expanded blob. */ + nByte = pMem->n + pMem->u.nZero; + if( nByte<=0 ){ + nByte = 1; + } + if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){ return SQLITE_NOMEM; } - memcpy(pNew, pMem->z, pMem->n); - memset(&pNew[pMem->n], 0, pMem->u.i); - sqlite3VdbeMemRelease(pMem); - pMem->z = pNew; - pMem->n += pMem->u.i; - pMem->u.i = 0; - pMem->flags &= ~(MEM_Zero|MEM_Static|MEM_Ephem|MEM_Short|MEM_Term); - pMem->flags |= MEM_Dyn; + + memset(&pMem->z[pMem->n], 0, pMem->u.nZero); + pMem->n += pMem->u.nZero; + pMem->flags &= ~(MEM_Zero|MEM_Term); } return SQLITE_OK; } @@ -121,67 +170,19 @@ /* -** Make the given Mem object either MEM_Short or MEM_Dyn so that bytes -** of the Mem.z[] array can be modified. -** -** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. -*/ -int sqlite3VdbeMemMakeWriteable(Mem *pMem){ - int n; - u8 *z; - expandBlob(pMem); - if( (pMem->flags & (MEM_Ephem|MEM_Static))==0 ){ - return SQLITE_OK; - } - assert( (pMem->flags & MEM_Dyn)==0 ); - assert( pMem->flags & (MEM_Str|MEM_Blob) ); - if( (n = pMem->n)+2zShort) ){ - z = (u8*)pMem->zShort; - pMem->flags |= MEM_Short|MEM_Term; - }else{ - z = sqliteMallocRaw( n+2 ); - if( z==0 ){ - return SQLITE_NOMEM; - } - pMem->flags |= MEM_Dyn|MEM_Term; - pMem->xDel = 0; - } - memcpy(z, pMem->z, n ); - z[n] = 0; - z[n+1] = 0; - pMem->z = (char*)z; - pMem->flags &= ~(MEM_Ephem|MEM_Static); - assert(0==(1&(int)pMem->z)); - return SQLITE_OK; -} - -/* ** Make sure the given Mem is \u0000 terminated. */ int sqlite3VdbeMemNulTerminate(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); if( (pMem->flags & MEM_Term)!=0 || (pMem->flags & MEM_Str)==0 ){ return SQLITE_OK; /* Nothing to do */ } - if( pMem->flags & (MEM_Static|MEM_Ephem) ){ - return sqlite3VdbeMemMakeWriteable(pMem); - }else{ - char *z; - sqlite3VdbeMemExpandBlob(pMem); - z = sqliteMalloc(pMem->n+2); - - if( !z ) return SQLITE_NOMEM; - memcpy(z, pMem->z, pMem->n); - z[pMem->n] = 0; - z[pMem->n+1] = 0; - if( pMem->xDel ){ - pMem->xDel(pMem->z); - }else{ - sqliteFree(pMem->z); - } - pMem->xDel = 0; - pMem->z = z; - pMem->flags |= MEM_Term; + if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){ + return SQLITE_NOMEM; } + pMem->z[pMem->n] = 0; + pMem->z[pMem->n+1] = 0; + pMem->flags |= MEM_Term; return SQLITE_OK; } @@ -201,28 +202,35 @@ int sqlite3VdbeMemStringify(Mem *pMem, int enc){ int rc = SQLITE_OK; int fg = pMem->flags; - char *z = pMem->zShort; + const int nByte = 32; + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !(fg&MEM_Zero) ); assert( !(fg&(MEM_Str|MEM_Blob)) ); assert( fg&(MEM_Int|MEM_Real) ); + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - /* For a Real or Integer, use sqlite3_snprintf() to produce the UTF-8 + + if( sqlite3VdbeMemGrow(pMem, nByte, 0) ){ + return SQLITE_NOMEM; + } + + /* For a Real or Integer, use sqlite3_mprintf() to produce the UTF-8 ** string representation of the value. Then, if the required encoding ** is UTF-16le or UTF-16be do a translation. ** ** FIX ME: It would be better if sqlite3_snprintf() could do UTF-16. */ if( fg & MEM_Int ){ - sqlite3_snprintf(NBFS, z, "%lld", pMem->u.i); + sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i); }else{ assert( fg & MEM_Real ); - sqlite3_snprintf(NBFS, z, "%!.15g", pMem->r); + sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->r); } - pMem->n = strlen(z); - pMem->z = z; + pMem->n = sqlite3Strlen30(pMem->z); pMem->enc = SQLITE_UTF8; - pMem->flags |= MEM_Str | MEM_Short | MEM_Term; + pMem->flags |= MEM_Str|MEM_Term; sqlite3VdbeChangeEncoding(pMem, enc); return rc; } @@ -237,49 +245,92 @@ */ int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ int rc = SQLITE_OK; - if( pFunc && pFunc->xFinalize ){ + if( ALWAYS(pFunc && pFunc->xFinalize) ){ sqlite3_context ctx; assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + memset(&ctx, 0, sizeof(ctx)); ctx.s.flags = MEM_Null; - ctx.s.z = pMem->zShort; + ctx.s.db = pMem->db; ctx.pMem = pMem; ctx.pFunc = pFunc; - ctx.isError = 0; pFunc->xFinalize(&ctx); - if( pMem->z && pMem->z!=pMem->zShort ){ - sqliteFree( pMem->z ); - } - *pMem = ctx.s; - if( pMem->flags & MEM_Short ){ - pMem->z = pMem->zShort; - } - if( ctx.isError ){ - rc = SQLITE_ERROR; - } + assert( 0==(pMem->flags&MEM_Dyn) && !pMem->xDel ); + sqlite3DbFree(pMem->db, pMem->zMalloc); + memcpy(pMem, &ctx.s, sizeof(ctx.s)); + rc = ctx.isError; } return rc; } /* +** If the memory cell contains a string value that must be freed by +** invoking an external callback, free it now. Calling this function +** does not free any Mem.zMalloc buffer. +*/ +void sqlite3VdbeMemReleaseExternal(Mem *p){ + assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) ); + if( p->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet) ){ + if( p->flags&MEM_Agg ){ + sqlite3VdbeMemFinalize(p, p->u.pDef); + assert( (p->flags & MEM_Agg)==0 ); + sqlite3VdbeMemRelease(p); + }else if( p->flags&MEM_Dyn && p->xDel ){ + assert( (p->flags&MEM_RowSet)==0 ); + p->xDel((void *)p->z); + p->xDel = 0; + }else if( p->flags&MEM_RowSet ){ + sqlite3RowSetClear(p->u.pRowSet); + } + } +} + +/* ** Release any memory held by the Mem. This may leave the Mem in an ** inconsistent state, for example with (Mem.z==0) and ** (Mem.type==SQLITE_TEXT). */ void sqlite3VdbeMemRelease(Mem *p){ - if( p->flags & (MEM_Dyn|MEM_Agg) ){ - if( p->xDel ){ - if( p->flags & MEM_Agg ){ - sqlite3VdbeMemFinalize(p, p->u.pDef); - assert( (p->flags & MEM_Agg)==0 ); - sqlite3VdbeMemRelease(p); - }else{ - p->xDel((void *)p->z); - } - }else{ - sqliteFree(p->z); - } - p->z = 0; - p->xDel = 0; + sqlite3VdbeMemReleaseExternal(p); + sqlite3DbFree(p->db, p->zMalloc); + p->z = 0; + p->zMalloc = 0; + p->xDel = 0; +} + +/* +** Convert a 64-bit IEEE double into a 64-bit signed integer. +** If the double is too large, return 0x8000000000000000. +** +** Most systems appear to do this simply by assigning +** variables and without the extra range tests. But +** there are reports that windows throws an expection +** if the floating point value is out of range. (See ticket #2880.) +** Because we do not completely understand the problem, we will +** take the conservative approach and always do range tests +** before attempting the conversion. +*/ +static i64 doubleToInt64(double r){ + /* + ** Many compilers we encounter do not define constants for the + ** minimum and maximum 64-bit integers, or they define them + ** inconsistently. And many do not understand the "LL" notation. + ** So we define our own static constants here using nothing + ** larger than a 32-bit integer constant. + */ + static const i64 maxInt = LARGEST_INT64; + static const i64 minInt = SMALLEST_INT64; + + if( r<(double)minInt ){ + return minInt; + }else if( r>(double)maxInt ){ + /* minInt is correct here - not maxInt. It turns out that assigning + ** a very large positive number to an integer results in a very large + ** negative integer. This makes no sense, but it is what x86 hardware + ** does so for compatibility we will do the same in software. */ + return minInt; + }else{ + return (i64)r; } } @@ -289,16 +340,20 @@ ** If pMem is an integer, then the value is exact. If pMem is ** a floating-point then the value returned is the integer part. ** If pMem is a string or blob, then we make an attempt to convert -** it into a integer and return that. If pMem is NULL, return 0. +** it into a integer and return that. If pMem represents an +** an SQL-NULL value, return 0. ** -** If pMem is a string, its encoding might be changed. +** If pMem represents a string value, its encoding might be changed. */ i64 sqlite3VdbeIntValue(Mem *pMem){ - int flags = pMem->flags; + int flags; + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + flags = pMem->flags; if( flags & MEM_Int ){ return pMem->u.i; }else if( flags & MEM_Real ){ - return (i64)pMem->r; + return doubleToInt64(pMem->r); }else if( flags & (MEM_Str|MEM_Blob) ){ i64 value; pMem->flags |= MEM_Str; @@ -321,22 +376,27 @@ ** If it is a NULL, return 0.0. */ double sqlite3VdbeRealValue(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); if( pMem->flags & MEM_Real ){ return pMem->r; }else if( pMem->flags & MEM_Int ){ return (double)pMem->u.i; }else if( pMem->flags & (MEM_Str|MEM_Blob) ){ - double val = 0.0; + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + double val = (double)0; pMem->flags |= MEM_Str; if( sqlite3VdbeChangeEncoding(pMem, SQLITE_UTF8) || sqlite3VdbeMemNulTerminate(pMem) ){ - return 0.0; + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + return (double)0; } assert( pMem->z ); sqlite3AtoF(pMem->z, &val); return val; }else{ - return 0.0; + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + return (double)0; } } @@ -346,8 +406,23 @@ */ void sqlite3VdbeIntegerAffinity(Mem *pMem){ assert( pMem->flags & MEM_Real ); - pMem->u.i = pMem->r; - if( ((double)pMem->u.i)==pMem->r ){ + assert( (pMem->flags & MEM_RowSet)==0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + + pMem->u.i = doubleToInt64(pMem->r); + + /* Only mark the value as an integer if + ** + ** (1) the round-trip conversion real->int->real is a no-op, and + ** (2) The integer is neither the largest nor the smallest + ** possible integer (ticket #3922) + ** + ** The second term in the following conditional enforces the second + ** condition under the assumption that additional overflow causes + ** values to wrap around. + */ + if( pMem->r==(double)pMem->u.i && (pMem->u.i-1) < (pMem->u.i+1) ){ pMem->flags |= MEM_Int; } } @@ -356,9 +431,12 @@ ** Convert pMem to type integer. Invalidate any prior representations. */ int sqlite3VdbeMemIntegerify(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags & MEM_RowSet)==0 ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + pMem->u.i = sqlite3VdbeIntValue(pMem); - sqlite3VdbeMemRelease(pMem); - pMem->flags = MEM_Int; + MemSetTypeFlag(pMem, MEM_Int); return SQLITE_OK; } @@ -367,9 +445,11 @@ ** Invalidate any prior representations. */ int sqlite3VdbeMemRealify(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + pMem->r = sqlite3VdbeRealValue(pMem); - sqlite3VdbeMemRelease(pMem); - pMem->flags = MEM_Real; + MemSetTypeFlag(pMem, MEM_Real); return SQLITE_OK; } @@ -382,15 +462,15 @@ i64 i; assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))==0 ); assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); r1 = sqlite3VdbeRealValue(pMem); - i = (i64)r1; + i = doubleToInt64(r1); r2 = (double)i; if( r1==r2 ){ sqlite3VdbeMemIntegerify(pMem); }else{ pMem->r = r1; - pMem->flags = MEM_Real; - sqlite3VdbeMemRelease(pMem); + MemSetTypeFlag(pMem, MEM_Real); } return SQLITE_OK; } @@ -399,10 +479,11 @@ ** Delete any previous value and set the value stored in *pMem to NULL. */ void sqlite3VdbeMemSetNull(Mem *pMem){ - sqlite3VdbeMemRelease(pMem); - pMem->flags = MEM_Null; + if( pMem->flags & MEM_RowSet ){ + sqlite3RowSetClear(pMem->u.pRowSet); + } + MemSetTypeFlag(pMem, MEM_Null); pMem->type = SQLITE_NULL; - pMem->n = 0; } /* @@ -411,12 +492,11 @@ */ void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ sqlite3VdbeMemRelease(pMem); - pMem->flags = MEM_Blob|MEM_Zero|MEM_Short; + pMem->flags = MEM_Blob|MEM_Zero; pMem->type = SQLITE_BLOB; pMem->n = 0; if( n<0 ) n = 0; - pMem->u.i = n; - pMem->z = pMem->zShort; + pMem->u.nZero = n; pMem->enc = SQLITE_UTF8; } @@ -436,7 +516,7 @@ ** manifest type REAL. */ void sqlite3VdbeMemSetDouble(Mem *pMem, double val){ - if( sqlite3_isnan(val) ){ + if( sqlite3IsNaN(val) ){ sqlite3VdbeMemSetNull(pMem); }else{ sqlite3VdbeMemRelease(pMem); @@ -447,31 +527,60 @@ } /* +** Delete any previous value and set the value of pMem to be an +** empty boolean index. +*/ +void sqlite3VdbeMemSetRowSet(Mem *pMem){ + sqlite3 *db = pMem->db; + assert( db!=0 ); + assert( (pMem->flags & MEM_RowSet)==0 ); + sqlite3VdbeMemRelease(pMem); + pMem->zMalloc = sqlite3DbMallocRaw(db, 64); + if( db->mallocFailed ){ + pMem->flags = MEM_Null; + }else{ + assert( pMem->zMalloc ); + pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, + sqlite3DbMallocSize(db, pMem->zMalloc)); + assert( pMem->u.pRowSet!=0 ); + pMem->flags = MEM_RowSet; + } +} + +/* ** Return true if the Mem object contains a TEXT or BLOB that is ** too large - whose size exceeds SQLITE_MAX_LENGTH. */ int sqlite3VdbeMemTooBig(Mem *p){ + assert( p->db!=0 ); if( p->flags & (MEM_Str|MEM_Blob) ){ int n = p->n; if( p->flags & MEM_Zero ){ - n += p->u.i; + n += p->u.nZero; } - return n>SQLITE_MAX_LENGTH; + return n>p->db->aLimit[SQLITE_LIMIT_LENGTH]; } return 0; } /* +** Size of struct Mem not including the Mem.zMalloc member. +*/ +#define MEMCELLSIZE (size_t)(&(((Mem *)0)->zMalloc)) + +/* ** Make an shallow copy of pFrom into pTo. Prior contents of -** pTo are overwritten. The pFrom->z field is not duplicated. If +** pTo are freed. The pFrom->z field is not duplicated. If ** pFrom->z is used, then pTo->z points to the same thing as pFrom->z ** and flags gets srcType (either MEM_Ephem or MEM_Static). */ void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){ - memcpy(pTo, pFrom, sizeof(*pFrom)-sizeof(pFrom->zShort)); + assert( (pFrom->flags & MEM_RowSet)==0 ); + sqlite3VdbeMemReleaseExternal(pTo); + memcpy(pTo, pFrom, MEMCELLSIZE); pTo->xDel = 0; - if( pTo->flags & (MEM_Str|MEM_Blob) ){ - pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Short|MEM_Ephem); + if( (pFrom->flags&MEM_Dyn)!=0 || pFrom->z==pFrom->zMalloc ){ + pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem); assert( srcType==MEM_Ephem || srcType==MEM_Static ); pTo->flags |= srcType; } @@ -482,16 +591,20 @@ ** freed before the copy is made. */ int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){ - int rc; - if( pTo->flags & MEM_Dyn ){ - sqlite3VdbeMemRelease(pTo); - } - sqlite3VdbeMemShallowCopy(pTo, pFrom, MEM_Ephem); - if( pTo->flags & MEM_Ephem ){ - rc = sqlite3VdbeMemMakeWriteable(pTo); - }else{ - rc = SQLITE_OK; + int rc = SQLITE_OK; + + assert( (pFrom->flags & MEM_RowSet)==0 ); + sqlite3VdbeMemReleaseExternal(pTo); + memcpy(pTo, pFrom, MEMCELLSIZE); + pTo->flags &= ~MEM_Dyn; + + if( pTo->flags&(MEM_Str|MEM_Blob) ){ + if( 0==(pFrom->flags&MEM_Static) ){ + pTo->flags |= MEM_Ephem; + rc = sqlite3VdbeMemMakeWriteable(pTo); + } } + return rc; } @@ -499,31 +612,34 @@ ** Transfer the contents of pFrom to pTo. Any existing value in pTo is ** freed. If pFrom contains ephemeral data, a copy is made. ** -** pFrom contains an SQL NULL when this routine returns. SQLITE_NOMEM -** might be returned if pFrom held ephemeral data and we were unable -** to allocate enough space to make a copy. +** pFrom contains an SQL NULL when this routine returns. */ -int sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ - int rc; - if( pTo->flags & MEM_Dyn ){ - sqlite3VdbeMemRelease(pTo); - } +void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ + assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) ); + assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) ); + assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db ); + + sqlite3VdbeMemRelease(pTo); memcpy(pTo, pFrom, sizeof(Mem)); - if( pFrom->flags & MEM_Short ){ - pTo->z = pTo->zShort; - } pFrom->flags = MEM_Null; pFrom->xDel = 0; - if( pTo->flags & MEM_Ephem ){ - rc = sqlite3VdbeMemMakeWriteable(pTo); - }else{ - rc = SQLITE_OK; - } - return rc; + pFrom->zMalloc = 0; } /* ** Change the value of a Mem to be a string or a BLOB. +** +** The memory management strategy depends on the value of the xDel +** parameter. If the value passed is SQLITE_TRANSIENT, then the +** string is copied into a (possibly existing) buffer managed by the +** Mem structure. Otherwise, any existing buffer is freed and the +** pointer copied. +** +** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH +** size limit) then no memory allocation occurs. If the string can be +** stored without allocating memory, then it is. If a memory allocation +** is required to store the string, then value of pMem is unchanged. In +** either case, SQLITE_TOOBIG is returned. */ int sqlite3VdbeMemSetStr( Mem *pMem, /* Memory cell to set to string value */ @@ -532,59 +648,77 @@ u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ - sqlite3VdbeMemRelease(pMem); + int nByte = n; /* New value for pMem->n */ + int iLimit; /* Maximum allowed string or blob size */ + u16 flags = 0; /* New value for pMem->flags */ + + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags & MEM_RowSet)==0 ); + + /* If z is a NULL pointer, set pMem to contain an SQL NULL. */ if( !z ){ - pMem->flags = MEM_Null; - pMem->type = SQLITE_NULL; + sqlite3VdbeMemSetNull(pMem); return SQLITE_OK; } - pMem->z = (char *)z; - if( xDel==SQLITE_STATIC ){ - pMem->flags = MEM_Static; - }else if( xDel==SQLITE_TRANSIENT ){ - pMem->flags = MEM_Ephem; + if( pMem->db ){ + iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH]; }else{ - pMem->flags = MEM_Dyn; + iLimit = SQLITE_MAX_LENGTH; + } + flags = (enc==0?MEM_Blob:MEM_Str); + if( nByte<0 ){ + assert( enc!=0 ); + if( enc==SQLITE_UTF8 ){ + for(nByte=0; nByte<=iLimit && z[nByte]; nByte++){} + }else{ + for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){} + } + flags |= MEM_Term; + } + + /* The following block sets the new values of Mem.z and Mem.xDel. It + ** also sets a flag in local variable "flags" to indicate the memory + ** management (one of MEM_Dyn or MEM_Static). + */ + if( xDel==SQLITE_TRANSIENT ){ + int nAlloc = nByte; + if( flags&MEM_Term ){ + nAlloc += (enc==SQLITE_UTF8?1:2); + } + if( nByte>iLimit ){ + return SQLITE_TOOBIG; + } + if( sqlite3VdbeMemGrow(pMem, nAlloc, 0) ){ + return SQLITE_NOMEM; + } + memcpy(pMem->z, z, nAlloc); + }else if( xDel==SQLITE_DYNAMIC ){ + sqlite3VdbeMemRelease(pMem); + pMem->zMalloc = pMem->z = (char *)z; + pMem->xDel = 0; + }else{ + sqlite3VdbeMemRelease(pMem); + pMem->z = (char *)z; pMem->xDel = xDel; + flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn); } - pMem->enc = enc; - pMem->type = enc==0 ? SQLITE_BLOB : SQLITE_TEXT; - pMem->n = n; - - assert( enc==0 || enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE - || enc==SQLITE_UTF16BE ); - switch( enc ){ - case 0: - pMem->flags |= MEM_Blob; - pMem->enc = SQLITE_UTF8; - break; - - case SQLITE_UTF8: - pMem->flags |= MEM_Str; - if( n<0 ){ - pMem->n = strlen(z); - pMem->flags |= MEM_Term; - } - break; + pMem->n = nByte; + pMem->flags = flags; + pMem->enc = (enc==0 ? SQLITE_UTF8 : enc); + pMem->type = (enc==0 ? SQLITE_BLOB : SQLITE_TEXT); #ifndef SQLITE_OMIT_UTF16 - case SQLITE_UTF16LE: - case SQLITE_UTF16BE: - pMem->flags |= MEM_Str; - if( pMem->n<0 ){ - pMem->n = sqlite3Utf16ByteLen(pMem->z,-1); - pMem->flags |= MEM_Term; - } - if( sqlite3VdbeMemHandleBom(pMem) ){ - return SQLITE_NOMEM; - } -#endif /* SQLITE_OMIT_UTF16 */ + if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){ + return SQLITE_NOMEM; } - if( pMem->flags&MEM_Ephem ){ - return sqlite3VdbeMemMakeWriteable(pMem); +#endif + + if( nByte>iLimit ){ + return SQLITE_TOOBIG; } + return SQLITE_OK; } @@ -608,6 +742,7 @@ f1 = pMem1->flags; f2 = pMem2->flags; combined_flags = f1|f2; + assert( (combined_flags & MEM_RowSet)==0 ); /* If one value is NULL, it is less than the other. If both values ** are NULL, return 0. @@ -630,12 +765,12 @@ if( (f1 & f2 & MEM_Int)==0 ){ double r1, r2; if( (f1&MEM_Real)==0 ){ - r1 = pMem1->u.i; + r1 = (double)pMem1->u.i; }else{ r1 = pMem1->r; } if( (f2&MEM_Real)==0 ){ - r2 = pMem2->u.i; + r2 = (double)pMem2->u.i; }else{ r2 = pMem2->r; } @@ -678,22 +813,21 @@ ** comparison function directly */ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z); }else{ - u8 origEnc = pMem1->enc; const void *v1, *v2; int n1, n2; - /* Convert the strings into the encoding that the comparison - ** function expects */ - v1 = sqlite3ValueText((sqlite3_value*)pMem1, pColl->enc); - n1 = v1==0 ? 0 : pMem1->n; - assert( n1==sqlite3ValueBytes((sqlite3_value*)pMem1, pColl->enc) ); - v2 = sqlite3ValueText((sqlite3_value*)pMem2, pColl->enc); - n2 = v2==0 ? 0 : pMem2->n; - assert( n2==sqlite3ValueBytes((sqlite3_value*)pMem2, pColl->enc) ); - /* Do the comparison */ + Mem c1; + Mem c2; + memset(&c1, 0, sizeof(c1)); + memset(&c2, 0, sizeof(c2)); + sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); + sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); + v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); + n1 = v1==0 ? 0 : c1.n; + v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); + n2 = v2==0 ? 0 : c2.n; rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2); - /* Convert the strings back into the database encoding */ - sqlite3ValueText((sqlite3_value*)pMem1, origEnc); - sqlite3ValueText((sqlite3_value*)pMem2, origEnc); + sqlite3VdbeMemRelease(&c1); + sqlite3VdbeMemRelease(&c2); return rc; } } @@ -729,9 +863,13 @@ int key, /* If true, retrieve from the btree key, not data. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ - char *zData; /* Data from the btree layer */ - int available = 0; /* Number of bytes available on the local btree page */ - + char *zData; /* Data from the btree layer */ + int available = 0; /* Number of bytes available on the local btree page */ + int rc = SQLITE_OK; /* Return code */ + + /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert() + ** that both the BtShared and database handle mutexes are held. */ + assert( (pMem->flags & MEM_RowSet)==0 ); if( key ){ zData = (char *)sqlite3BtreeKeyFetch(pCur, &available); }else{ @@ -739,98 +877,29 @@ } assert( zData!=0 ); - pMem->n = amt; - if( offset+amt<=available ){ + if( offset+amt<=available && (pMem->flags&MEM_Dyn)==0 ){ + sqlite3VdbeMemRelease(pMem); pMem->z = &zData[offset]; pMem->flags = MEM_Blob|MEM_Ephem; - }else{ - int rc; - if( amt>NBFS-2 ){ - zData = (char *)sqliteMallocRaw(amt+2); - if( !zData ){ - return SQLITE_NOMEM; - } - pMem->flags = MEM_Blob|MEM_Dyn|MEM_Term; - pMem->xDel = 0; - }else{ - zData = &(pMem->zShort[0]); - pMem->flags = MEM_Blob|MEM_Short|MEM_Term; - } - pMem->z = zData; + }else if( SQLITE_OK==(rc = sqlite3VdbeMemGrow(pMem, amt+2, 0)) ){ + pMem->flags = MEM_Blob|MEM_Dyn|MEM_Term; pMem->enc = 0; pMem->type = SQLITE_BLOB; - if( key ){ - rc = sqlite3BtreeKey(pCur, offset, amt, zData); + rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z); }else{ - rc = sqlite3BtreeData(pCur, offset, amt, zData); + rc = sqlite3BtreeData(pCur, offset, amt, pMem->z); } - zData[amt] = 0; - zData[amt+1] = 0; + pMem->z[amt] = 0; + pMem->z[amt+1] = 0; if( rc!=SQLITE_OK ){ - if( amt>NBFS-2 ){ - assert( zData!=pMem->zShort ); - assert( pMem->flags & MEM_Dyn ); - sqliteFree(zData); - } else { - assert( zData==pMem->zShort ); - assert( pMem->flags & MEM_Short ); - } - return rc; + sqlite3VdbeMemRelease(pMem); } } + pMem->n = amt; - return SQLITE_OK; -} - -#ifndef NDEBUG -/* -** Perform various checks on the memory cell pMem. An assert() will -** fail if pMem is internally inconsistent. -*/ -void sqlite3VdbeMemSanity(Mem *pMem){ - int flags = pMem->flags; - assert( flags!=0 ); /* Must define some type */ - if( flags & (MEM_Str|MEM_Blob) ){ - int x = flags & (MEM_Static|MEM_Dyn|MEM_Ephem|MEM_Short); - assert( x!=0 ); /* Strings must define a string subtype */ - assert( (x & (x-1))==0 ); /* Only one string subtype can be defined */ - assert( pMem->z!=0 ); /* Strings must have a value */ - /* Mem.z points to Mem.zShort iff the subtype is MEM_Short */ - assert( (x & MEM_Short)==0 || pMem->z==pMem->zShort ); - assert( (x & MEM_Short)!=0 || pMem->z!=pMem->zShort ); - /* No destructor unless there is MEM_Dyn */ - assert( pMem->xDel==0 || (pMem->flags & MEM_Dyn)!=0 ); - - if( (flags & MEM_Str) ){ - assert( pMem->enc==SQLITE_UTF8 || - pMem->enc==SQLITE_UTF16BE || - pMem->enc==SQLITE_UTF16LE - ); - /* If the string is UTF-8 encoded and nul terminated, then pMem->n - ** must be the length of the string. (Later:) If the database file - ** has been corrupted, '\000' characters might have been inserted - ** into the middle of the string. In that case, the strlen() might - ** be less. - */ - if( pMem->enc==SQLITE_UTF8 && (flags & MEM_Term) ){ - assert( strlen(pMem->z)<=pMem->n ); - assert( pMem->z[pMem->n]==0 ); - } - } - }else{ - /* Cannot define a string subtype for non-string objects */ - assert( (pMem->flags & (MEM_Static|MEM_Dyn|MEM_Ephem|MEM_Short))==0 ); - assert( pMem->xDel==0 ); - } - /* MEM_Null excludes all other types */ - assert( (pMem->flags&(MEM_Str|MEM_Int|MEM_Real|MEM_Blob))==0 - || (pMem->flags&MEM_Null)==0 ); - /* If the MEM is both real and integer, the values are equal */ - assert( (pMem->flags & (MEM_Int|MEM_Real))!=(MEM_Int|MEM_Real) - || pMem->r==pMem->u.i ); + return rc; } -#endif /* This function is only available internally, it is not part of the ** external API. It works in a similar way to sqlite3_value_text(), @@ -844,7 +913,10 @@ */ const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ if( !pVal ) return 0; + + assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); + assert( (pVal->flags & MEM_RowSet)==0 ); if( pVal->flags&MEM_Null ){ return 0; @@ -854,7 +926,7 @@ expandBlob(pVal); if( pVal->flags&MEM_Str ){ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); - if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&(int)pVal->z) ){ + if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){ assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 ); if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){ return 0; @@ -864,9 +936,10 @@ }else{ assert( (pVal->flags&MEM_Blob)==0 ); sqlite3VdbeMemStringify(pVal, enc); - assert( 0==(1&(int)pVal->z) ); + assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) ); } - assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || sqlite3MallocFailed() ); + assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0 + || pVal->db->mallocFailed ); if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){ return pVal->z; }else{ @@ -877,11 +950,12 @@ /* ** Create a new sqlite3_value object. */ -sqlite3_value *sqlite3ValueNew(void){ - Mem *p = sqliteMalloc(sizeof(*p)); +sqlite3_value *sqlite3ValueNew(sqlite3 *db){ + Mem *p = sqlite3DbMallocZero(db, sizeof(*p)); if( p ){ p->flags = MEM_Null; p->type = SQLITE_NULL; + p->db = db; } return p; } @@ -890,17 +964,18 @@ ** Create a new sqlite3_value object, containing the value of pExpr. ** ** This only works for very simple expressions that consist of one constant -** token (i.e. "5", "5.1", "NULL", "'a string'"). If the expression can +** token (i.e. "5", "5.1", "'a string'"). If the expression can ** be converted directly into a value, then the value is allocated and ** a pointer written to *ppVal. The caller is responsible for deallocating ** the value by passing it to sqlite3ValueFree() later on. If the expression ** cannot be converted to a value, then *ppVal is set to NULL. */ int sqlite3ValueFromExpr( - Expr *pExpr, - u8 enc, - u8 affinity, - sqlite3_value **ppVal + sqlite3 *db, /* The database connection */ + Expr *pExpr, /* The expression to evaluate */ + u8 enc, /* Encoding to use */ + u8 affinity, /* Affinity to use */ + sqlite3_value **ppVal /* Write the new value here */ ){ int op; char *zVal = 0; @@ -913,32 +988,42 @@ op = pExpr->op; if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ - zVal = sqliteStrNDup((char*)pExpr->token.z, pExpr->token.n); - pVal = sqlite3ValueNew(); - if( !zVal || !pVal ) goto no_mem; - sqlite3Dequote(zVal); - sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, sqlite3FreeX); + pVal = sqlite3ValueNew(db); + if( pVal==0 ) goto no_mem; + if( ExprHasProperty(pExpr, EP_IntValue) ){ + sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue); + }else{ + zVal = sqlite3DbStrDup(db, pExpr->u.zToken); + if( zVal==0 ) goto no_mem; + sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + } if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_NONE ){ - sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, enc); + sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); }else{ - sqlite3ValueApplyAffinity(pVal, affinity, enc); + sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); + } + if( enc!=SQLITE_UTF8 ){ + sqlite3VdbeChangeEncoding(pVal, enc); } }else if( op==TK_UMINUS ) { - if( SQLITE_OK==sqlite3ValueFromExpr(pExpr->pLeft, enc, affinity, &pVal) ){ + if( SQLITE_OK==sqlite3ValueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal) ){ pVal->u.i = -1 * pVal->u.i; - pVal->r = -1.0 * pVal->r; + /* (double)-1 In case of SQLITE_OMIT_FLOATING_POINT... */ + pVal->r = (double)-1 * pVal->r; } } #ifndef SQLITE_OMIT_BLOB_LITERAL else if( op==TK_BLOB ){ int nVal; - pVal = sqlite3ValueNew(); - zVal = sqliteStrNDup((char*)pExpr->token.z+1, pExpr->token.n-1); - if( !zVal || !pVal ) goto no_mem; - sqlite3Dequote(zVal); - nVal = strlen(zVal)/2; - sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(zVal), nVal, 0, sqlite3FreeX); - sqliteFree(zVal); + assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); + assert( pExpr->u.zToken[1]=='\'' ); + pVal = sqlite3ValueNew(db); + if( !pVal ) goto no_mem; + zVal = &pExpr->u.zToken[2]; + nVal = sqlite3Strlen30(zVal)-1; + assert( zVal[nVal]=='\'' ); + sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2, + 0, SQLITE_DYNAMIC); } #endif @@ -946,7 +1031,8 @@ return SQLITE_OK; no_mem: - sqliteFree(zVal); + db->mallocFailed = 1; + sqlite3DbFree(db, zVal); sqlite3ValueFree(pVal); *ppVal = 0; return SQLITE_NOMEM; @@ -956,11 +1042,11 @@ ** Change the string value of an sqlite3_value object */ void sqlite3ValueSetStr( - sqlite3_value *v, - int n, - const void *z, - u8 enc, - void (*xDel)(void*) + sqlite3_value *v, /* Value to be set */ + int n, /* Length of string z */ + const void *z, /* Text of the new string */ + u8 enc, /* Encoding to use */ + void (*xDel)(void*) /* Destructor for the string */ ){ if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel); } @@ -970,8 +1056,8 @@ */ void sqlite3ValueFree(sqlite3_value *v){ if( !v ) return; - sqlite3ValueSetStr(v, 0, 0, SQLITE_UTF8, SQLITE_STATIC); - sqliteFree(v); + sqlite3VdbeMemRelease((Mem *)v); + sqlite3DbFree(((Mem*)v)->db, v); } /* @@ -982,7 +1068,7 @@ Mem *p = (Mem*)pVal; if( (p->flags & MEM_Blob)!=0 || sqlite3ValueText(pVal, enc) ){ if( p->flags & MEM_Zero ){ - return p->n+p->u.i; + return p->n + p->u.nZero; }else{ return p->n; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/vtab.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/vtab.c --- sqlite3-3.4.2/src/vtab.c 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/src/vtab.c 2009-06-25 12:39:54.000000000 +0100 @@ -11,11 +11,16 @@ ************************************************************************* ** This file contains code used to help implement virtual tables. ** -** $Id: vtab.c,v 1.48 2007/06/26 10:38:55 danielk1977 Exp $ +** $Id: vtab.c,v 1.91 2009/06/15 16:27:08 shane Exp $ */ #ifndef SQLITE_OMIT_VIRTUALTABLE #include "sqliteInt.h" +/* +** The actual function that does the work of creating a new module. +** This function implements the sqlite3_create_module() and +** sqlite3_create_module_v2() interfaces. +*/ static int createModule( sqlite3 *db, /* Database in which module is registered */ const char *zName, /* Name assigned to this module */ @@ -23,23 +28,35 @@ void *pAux, /* Context pointer for xCreate/xConnect */ void (*xDestroy)(void *) /* Module destructor function */ ) { - int nName = strlen(zName); - Module *pMod = (Module *)sqliteMallocRaw(sizeof(Module) + nName + 1); + int rc, nName; + Module *pMod; + + sqlite3_mutex_enter(db->mutex); + nName = sqlite3Strlen30(zName); + pMod = (Module *)sqlite3DbMallocRaw(db, sizeof(Module) + nName + 1); if( pMod ){ + Module *pDel; char *zCopy = (char *)(&pMod[1]); memcpy(zCopy, zName, nName+1); pMod->zName = zCopy; pMod->pModule = pModule; pMod->pAux = pAux; pMod->xDestroy = xDestroy; - pMod = (Module *)sqlite3HashInsert(&db->aModule, zCopy, nName, (void*)pMod); - if( pMod && pMod->xDestroy ){ - pMod->xDestroy(pMod->pAux); + pDel = (Module *)sqlite3HashInsert(&db->aModule, zCopy, nName, (void*)pMod); + if( pDel && pDel->xDestroy ){ + pDel->xDestroy(pDel->pAux); + } + sqlite3DbFree(db, pDel); + if( pDel==pMod ){ + db->mallocFailed = 1; } - sqliteFree(pMod); sqlite3ResetInternalSchema(db, 0); + }else if( xDestroy ){ + xDestroy(pAux); } - return sqlite3ApiExit(db, SQLITE_OK); + rc = sqlite3ApiExit(db, SQLITE_OK); + sqlite3_mutex_leave(db->mutex); + return rc; } @@ -85,15 +102,22 @@ ** disconnect the virtual table. */ void sqlite3VtabUnlock(sqlite3 *db, sqlite3_vtab *pVtab){ +#ifndef SQLITE_DEBUG + UNUSED_PARAMETER(db); +#endif + assert( pVtab->nRef>0 ); pVtab->nRef--; assert(db); - assert(!sqlite3SafetyCheck(db)); + assert( sqlite3SafetyCheckOk(db) ); if( pVtab->nRef==0 ){ +#ifdef SQLITE_DEBUG if( db->magic==SQLITE_MAGIC_BUSY ){ - sqlite3SafetyOff(db); + (void)sqlite3SafetyOff(db); pVtab->pModule->xDisconnect(pVtab); - sqlite3SafetyOn(db); - } else { + (void)sqlite3SafetyOn(db); + } else +#endif + { pVtab->pModule->xDisconnect(pVtab); } } @@ -106,17 +130,19 @@ */ void sqlite3VtabClear(Table *p){ sqlite3_vtab *pVtab = p->pVtab; + Schema *pSchema = p->pSchema; + sqlite3 *db = pSchema ? pSchema->db : 0; if( pVtab ){ assert( p->pMod && p->pMod->pModule ); - sqlite3VtabUnlock(p->pSchema->db, pVtab); + sqlite3VtabUnlock(db, pVtab); p->pVtab = 0; } if( p->azModuleArg ){ int i; for(i=0; inModuleArg; i++){ - sqliteFree(p->azModuleArg[i]); + sqlite3DbFree(db, p->azModuleArg[i]); } - sqliteFree(p->azModuleArg); + sqlite3DbFree(db, p->azModuleArg); } } @@ -126,18 +152,18 @@ ** string will be freed automatically when the table is ** deleted. */ -static void addModuleArgument(Table *pTable, char *zArg){ +static void addModuleArgument(sqlite3 *db, Table *pTable, char *zArg){ int i = pTable->nModuleArg++; int nBytes = sizeof(char *)*(1+pTable->nModuleArg); char **azModuleArg; - azModuleArg = sqliteRealloc(pTable->azModuleArg, nBytes); + azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes); if( azModuleArg==0 ){ int j; for(j=0; jazModuleArg[j]); + sqlite3DbFree(db, pTable->azModuleArg[j]); } - sqliteFree(zArg); - sqliteFree(pTable->azModuleArg); + sqlite3DbFree(db, zArg); + sqlite3DbFree(db, pTable->azModuleArg); pTable->nModuleArg = 0; }else{ azModuleArg[i] = zArg; @@ -159,28 +185,28 @@ ){ int iDb; /* The database the table is being created in */ Table *pTable; /* The new virtual table */ + sqlite3 *db; /* Database connection */ -#ifndef SQLITE_OMIT_SHARED_CACHE - if( sqlite3ThreadDataReadOnly()->useSharedData ){ + if( pParse->db->flags & SQLITE_SharedCache ){ sqlite3ErrorMsg(pParse, "Cannot use virtual tables in shared-cache mode"); return; } -#endif sqlite3StartTable(pParse, pName1, pName2, 0, 0, 1, 0); pTable = pParse->pNewTable; - if( pTable==0 || pParse->nErr ) return; + if( pTable==0 ) return; assert( 0==pTable->pIndex ); - iDb = sqlite3SchemaToIndex(pParse->db, pTable->pSchema); + db = pParse->db; + iDb = sqlite3SchemaToIndex(db, pTable->pSchema); assert( iDb>=0 ); - pTable->isVirtual = 1; + pTable->tabFlags |= TF_Virtual; pTable->nModuleArg = 0; - addModuleArgument(pTable, sqlite3NameFromToken(pModuleName)); - addModuleArgument(pTable, sqlite3StrDup(pParse->db->aDb[iDb].zName)); - addModuleArgument(pTable, sqlite3StrDup(pTable->zName)); - pParse->sNameToken.n = pModuleName->z + pModuleName->n - pName1->z; + addModuleArgument(db, pTable, sqlite3NameFromToken(db, pModuleName)); + addModuleArgument(db, pTable, sqlite3DbStrDup(db, db->aDb[iDb].zName)); + addModuleArgument(db, pTable, sqlite3DbStrDup(db, pTable->zName)); + pParse->sNameToken.n = (int)(&pModuleName->z[pModuleName->n] - pName1->z); #ifndef SQLITE_OMIT_AUTHORIZATION /* Creating a virtual table invokes the authorization callback twice. @@ -201,10 +227,11 @@ ** virtual table currently under construction in pParse->pTable. */ static void addArgumentToVtab(Parse *pParse){ - if( pParse->sArg.z && pParse->pNewTable ){ + if( pParse->sArg.z && ALWAYS(pParse->pNewTable) ){ const char *z = (const char*)pParse->sArg.z; int n = pParse->sArg.n; - addModuleArgument(pParse->pNewTable, sqliteStrNDup(z, n)); + sqlite3 *db = pParse->db; + addModuleArgument(db, pParse->pNewTable, sqlite3DbStrNDup(db, z, n)); } } @@ -227,7 +254,8 @@ db = pParse->db; if( pTab->nModuleArg<1 ) return; zModule = pTab->azModuleArg[0]; - pMod = (Module *)sqlite3HashFind(&db->aModule, zModule, strlen(zModule)); + pMod = (Module*)sqlite3HashFind(&db->aModule, zModule, + sqlite3Strlen30(zModule)); pTab->pMod = pMod; /* If the CREATE VIRTUAL TABLE statement is being entered for the @@ -244,37 +272,38 @@ /* Compute the complete text of the CREATE VIRTUAL TABLE statement */ if( pEnd ){ - pParse->sNameToken.n = pEnd->z - pParse->sNameToken.z + pEnd->n; + pParse->sNameToken.n = (int)(pEnd->z - pParse->sNameToken.z) + pEnd->n; } - zStmt = sqlite3MPrintf("CREATE VIRTUAL TABLE %T", &pParse->sNameToken); + zStmt = sqlite3MPrintf(db, "CREATE VIRTUAL TABLE %T", &pParse->sNameToken); /* A slot for the record has already been allocated in the ** SQLITE_MASTER table. We just need to update that slot with all ** the information we've collected. ** - ** The top of the stack is the rootpage allocated by sqlite3StartTable(). - ** This value is always 0 and is ignored, a virtual table does not have a - ** rootpage. The next entry on the stack is the rowid of the record - ** in the sqlite_master table. + ** The VM register number pParse->regRowid holds the rowid of an + ** entry in the sqlite_master table tht was created for this vtab + ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3NestedParse(pParse, "UPDATE %Q.%s " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " - "WHERE rowid=#1", + "WHERE rowid=#%d", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pTab->zName, pTab->zName, - zStmt + zStmt, + pParse->regRowid ); - sqliteFree(zStmt); + sqlite3DbFree(db, zStmt); v = sqlite3GetVdbe(pParse); - sqlite3ChangeCookie(db, v, iDb); + sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddOp(v, OP_Expire, 0, 0); - zWhere = sqlite3MPrintf("name='%q'", pTab->zName); - sqlite3VdbeOp3(v, OP_ParseSchema, iDb, 1, zWhere, P3_DYNAMIC); - sqlite3VdbeOp3(v, OP_VCreate, iDb, 0, pTab->zName, strlen(pTab->zName) + 1); + sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); + zWhere = sqlite3MPrintf(db, "name='%q'", pTab->zName); + sqlite3VdbeAddOp4(v, OP_ParseSchema, iDb, 1, 0, zWhere, P4_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_VCreate, iDb, 0, 0, + pTab->zName, sqlite3Strlen30(pTab->zName) + 1); } /* If we are rereading the sqlite_master table create the in-memory @@ -285,9 +314,10 @@ Table *pOld; Schema *pSchema = pTab->pSchema; const char *zName = pTab->zName; - int nName = strlen(zName) + 1; + int nName = sqlite3Strlen30(zName); pOld = sqlite3HashInsert(&pSchema->tblHash, zName, nName, pTab); if( pOld ){ + db->mallocFailed = 1; assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */ return; } @@ -317,7 +347,7 @@ pArg->n = p->n; }else{ assert(pArg->z < p->z); - pArg->n = (p->z + p->n - pArg->z); + pArg->n = (int)(&p->z[p->n] - pArg->z); } } @@ -335,11 +365,11 @@ ){ int rc; int rc2; - sqlite3_vtab *pVtab; + sqlite3_vtab *pVtab = 0; const char *const*azArg = (const char *const*)pTab->azModuleArg; int nArg = pTab->nModuleArg; char *zErr = 0; - char *zModuleName = sqlite3MPrintf("%s", pTab->zName); + char *zModuleName = sqlite3MPrintf(db, "%s", pTab->zName); if( !zModuleName ){ return SQLITE_NOMEM; @@ -351,31 +381,34 @@ db->pVTab = pTab; rc = sqlite3SafetyOff(db); assert( rc==SQLITE_OK ); - rc = xConstruct(db, pMod->pAux, nArg, azArg, &pTab->pVtab, &zErr); + rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVtab, &zErr); rc2 = sqlite3SafetyOn(db); - pVtab = pTab->pVtab; - if( rc==SQLITE_OK && pVtab ){ + if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; + /* Justification of ALWAYS(): A correct vtab constructor must allocate + ** the sqlite3_vtab object if successful. */ + if( rc==SQLITE_OK && ALWAYS(pVtab) ){ pVtab->pModule = pMod->pModule; pVtab->nRef = 1; + pTab->pVtab = pVtab; } if( SQLITE_OK!=rc ){ if( zErr==0 ){ - *pzErr = sqlite3MPrintf("vtable constructor failed: %s", zModuleName); + *pzErr = sqlite3MPrintf(db, "vtable constructor failed: %s", zModuleName); }else { - *pzErr = sqlite3MPrintf("%s", zErr); - sqlite3_free(zErr); + *pzErr = sqlite3MPrintf(db, "%s", zErr); + sqlite3DbFree(db, zErr); } }else if( db->pVTab ){ const char *zFormat = "vtable constructor did not declare schema: %s"; - *pzErr = sqlite3MPrintf(zFormat, pTab->zName); + *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); rc = SQLITE_ERROR; } if( rc==SQLITE_OK ){ rc = rc2; } db->pVTab = 0; - sqliteFree(zModuleName); + sqlite3DbFree(db, zModuleName); /* If everything went according to plan, loop through the columns ** of the table to see if any of them contain the token "hidden". @@ -389,7 +422,7 @@ int nType; int i = 0; if( !zType ) continue; - nType = strlen(zType); + nType = sqlite3Strlen30(zType); if( sqlite3StrNICmp("hidden", zType, 6) || (zType[6] && zType[6]!=' ') ){ for(i=0; iisVirtual || pTab->pVtab ){ + assert( pTab ); + if( (pTab->tabFlags & TF_Virtual)==0 || pTab->pVtab ){ return SQLITE_OK; } @@ -444,7 +478,7 @@ if( rc!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "%s", zErr); } - sqliteFree(zErr); + sqlite3DbFree(db, zErr); } return rc; @@ -460,7 +494,7 @@ if( (db->nVTrans%ARRAY_INCR)==0 ){ sqlite3_vtab **aVTrans; int nBytes = sizeof(sqlite3_vtab *) * (db->nVTrans + ARRAY_INCR); - aVTrans = sqliteRealloc((void *)db->aVTrans, nBytes); + aVTrans = sqlite3DbRealloc(db, (void *)db->aVTrans, nBytes); if( !aVTrans ){ return SQLITE_NOMEM; } @@ -480,7 +514,7 @@ ** ** If an error occurs, *pzErr is set to point an an English language ** description of the error and an SQLITE_XXX error code is returned. -** In this case the caller must call sqliteFree() on *pzErr. +** In this case the caller must call sqlite3DbFree(db, ) on *pzErr. */ int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, char **pzErr){ int rc = SQLITE_OK; @@ -489,7 +523,7 @@ const char *zModule; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); - assert(pTab && pTab->isVirtual && !pTab->pVtab); + assert(pTab && (pTab->tabFlags & TF_Virtual)!=0 && !pTab->pVtab); pMod = pTab->pMod; zModule = pTab->azModuleArg[0]; @@ -498,13 +532,15 @@ ** error. Otherwise, do nothing. */ if( !pMod ){ - *pzErr = sqlite3MPrintf("no such module: %s", zModule); + *pzErr = sqlite3MPrintf(db, "no such module: %s", zModule); rc = SQLITE_ERROR; }else{ rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xCreate, pzErr); } - if( rc==SQLITE_OK && pTab->pVtab ){ + /* Justification of ALWAYS(): The xConstructor method is required to + ** create a valid sqlite3_vtab if it returns SQLITE_OK. */ + if( rc==SQLITE_OK && ALWAYS(pTab->pVtab) ){ rc = addToVTrans(db, pTab->pVtab); } @@ -517,46 +553,57 @@ ** virtual table module. */ int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ - Parse sParse; + Parse *pParse; int rc = SQLITE_OK; - Table *pTab = db->pVTab; + Table *pTab; char *zErr = 0; + sqlite3_mutex_enter(db->mutex); + pTab = db->pVTab; if( !pTab ){ sqlite3Error(db, SQLITE_MISUSE, 0); + sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE; } - assert(pTab->isVirtual && pTab->nCol==0 && pTab->aCol==0); + assert((pTab->tabFlags & TF_Virtual)!=0 && pTab->nCol==0 && pTab->aCol==0); - memset(&sParse, 0, sizeof(Parse)); - sParse.declareVtab = 1; - sParse.db = db; - - if( - SQLITE_OK == sqlite3RunParser(&sParse, zCreateTable, &zErr) && - sParse.pNewTable && - !sParse.pNewTable->pSelect && - !sParse.pNewTable->isVirtual - ){ - pTab->aCol = sParse.pNewTable->aCol; - pTab->nCol = sParse.pNewTable->nCol; - sParse.pNewTable->nCol = 0; - sParse.pNewTable->aCol = 0; - db->pVTab = 0; - } else { - sqlite3Error(db, SQLITE_ERROR, zErr); - sqliteFree(zErr); - rc = SQLITE_ERROR; + pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); + if( pParse==0 ){ + rc = SQLITE_NOMEM; + }else{ + pParse->declareVtab = 1; + pParse->db = db; + + if( + SQLITE_OK == sqlite3RunParser(pParse, zCreateTable, &zErr) && + pParse->pNewTable && + !pParse->pNewTable->pSelect && + (pParse->pNewTable->tabFlags & TF_Virtual)==0 + ){ + pTab->aCol = pParse->pNewTable->aCol; + pTab->nCol = pParse->pNewTable->nCol; + pParse->pNewTable->nCol = 0; + pParse->pNewTable->aCol = 0; + db->pVTab = 0; + } else { + sqlite3Error(db, SQLITE_ERROR, zErr); + sqlite3DbFree(db, zErr); + rc = SQLITE_ERROR; + } + pParse->declareVtab = 0; + + if( pParse->pVdbe ){ + sqlite3VdbeFinalize(pParse->pVdbe); + } + sqlite3DeleteTable(pParse->pNewTable); + sqlite3StackFree(db, pParse); } - sParse.declareVtab = 0; - - sqlite3_finalize((sqlite3_stmt*)sParse.pVdbe); - sqlite3DeleteTable(sParse.pNewTable); - sParse.pNewTable = 0; assert( (rc&0xff)==rc ); - return sqlite3ApiExit(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; } /* @@ -566,22 +613,25 @@ ** ** This call is a no-op if zTab is not a virtual table. */ -int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab) -{ +int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab){ int rc = SQLITE_OK; Table *pTab; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); - assert(pTab); - if( pTab->pVtab ){ + if( ALWAYS(pTab!=0 && pTab->pVtab!=0) ){ int (*xDestroy)(sqlite3_vtab *pVTab) = pTab->pMod->pModule->xDestroy; rc = sqlite3SafetyOff(db); assert( rc==SQLITE_OK ); - if( xDestroy ){ - rc = xDestroy(pTab->pVtab); - } - sqlite3SafetyOn(db); + rc = xDestroy(pTab->pVtab); + (void)sqlite3SafetyOn(db); if( rc==SQLITE_OK ){ + int i; + for(i=0; inVTrans; i++){ + if( db->aVTrans[i]==pTab->pVtab ){ + db->aVTrans[i] = db->aVTrans[--db->nVTrans]; + break; + } + } pTab->pVtab = 0; } } @@ -600,40 +650,47 @@ static void callFinaliser(sqlite3 *db, int offset){ int i; if( db->aVTrans ){ - for(i=0; inVTrans && db->aVTrans[i]; i++){ + for(i=0; inVTrans; i++){ sqlite3_vtab *pVtab = db->aVTrans[i]; int (*x)(sqlite3_vtab *); + + assert( pVtab!=0 ); x = *(int (**)(sqlite3_vtab *))((char *)pVtab->pModule + offset); if( x ) x(pVtab); sqlite3VtabUnlock(db, pVtab); } - sqliteFree(db->aVTrans); + sqlite3DbFree(db, db->aVTrans); db->nVTrans = 0; db->aVTrans = 0; } } /* -** If argument rc2 is not SQLITE_OK, then return it and do nothing. -** Otherwise, invoke the xSync method of all virtual tables in the -** sqlite3.aVTrans array. Return the error code for the first error -** that occurs, or SQLITE_OK if all xSync operations are successful. +** Invoke the xSync method of all virtual tables in the sqlite3.aVTrans +** array. Return the error code for the first error that occurs, or +** SQLITE_OK if all xSync operations are successful. +** +** Set *pzErrmsg to point to a buffer that should be released using +** sqlite3DbFree() containing an error message, if one is available. */ -int sqlite3VtabSync(sqlite3 *db, int rc2){ +int sqlite3VtabSync(sqlite3 *db, char **pzErrmsg){ int i; int rc = SQLITE_OK; int rcsafety; sqlite3_vtab **aVTrans = db->aVTrans; - if( rc2!=SQLITE_OK ) return rc2; rc = sqlite3SafetyOff(db); db->aVTrans = 0; - for(i=0; rc==SQLITE_OK && inVTrans && aVTrans[i]; i++){ + for(i=0; rc==SQLITE_OK && inVTrans; i++){ sqlite3_vtab *pVtab = aVTrans[i]; int (*x)(sqlite3_vtab *); + assert( pVtab!=0 ); x = pVtab->pModule->xSync; if( x ){ rc = x(pVtab); + sqlite3DbFree(db, *pzErrmsg); + *pzErrmsg = pVtab->zErrMsg; + pVtab->zErrMsg = 0; } } db->aVTrans = aVTrans; @@ -650,7 +707,7 @@ ** sqlite3.aVTrans array. Then clear the array itself. */ int sqlite3VtabRollback(sqlite3 *db){ - callFinaliser(db, (int)(&((sqlite3_module *)0)->xRollback)); + callFinaliser(db, offsetof(sqlite3_module,xRollback)); return SQLITE_OK; } @@ -659,7 +716,7 @@ ** sqlite3.aVTrans array. Then clear the array itself. */ int sqlite3VtabCommit(sqlite3 *db){ - callFinaliser(db, (int)(&((sqlite3_module *)0)->xCommit)); + callFinaliser(db, offsetof(sqlite3_module,xCommit)); return SQLITE_OK; } @@ -680,7 +737,7 @@ ** virtual module xSync() callback. It is illegal to write to ** virtual module tables in this case, so return SQLITE_LOCKED. */ - if( 0==db->aVTrans && db->nVTrans>0 ){ + if( sqlite3VtabInSync(db) ){ return SQLITE_LOCKED; } if( !pVtab ){ @@ -693,7 +750,7 @@ /* If pVtab is already in the aVTrans array, return early */ - for(i=0; (inVTrans) && 0!=db->aVTrans[i]; i++){ + for(i=0; inVTrans; i++){ if( db->aVTrans[i]==pVtab ){ return SQLITE_OK; } @@ -701,11 +758,9 @@ /* Invoke the xBegin method */ rc = pModule->xBegin(pVtab); - if( rc!=SQLITE_OK ){ - return rc; + if( rc==SQLITE_OK ){ + rc = addToVTrans(db, pVtab); } - - rc = addToVTrans(db, pVtab); } return rc; } @@ -724,6 +779,7 @@ ** SQLITE_FUNC_EPHEM flag. */ FuncDef *sqlite3VtabOverloadFunction( + sqlite3 *db, /* Database connection for reporting malloc problems */ FuncDef *pDef, /* Function to possibly overload */ int nArg, /* Number of arguments to the function */ Expr *pExpr /* First argument to the function */ @@ -731,51 +787,79 @@ Table *pTab; sqlite3_vtab *pVtab; sqlite3_module *pMod; - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); - void *pArg; + void (*xFunc)(sqlite3_context*,int,sqlite3_value**) = 0; + void *pArg = 0; FuncDef *pNew; - int rc; + int rc = 0; char *zLowerName; unsigned char *z; /* Check to see the left operand is a column in a virtual table */ - if( pExpr==0 ) return pDef; + if( NEVER(pExpr==0) ) return pDef; if( pExpr->op!=TK_COLUMN ) return pDef; pTab = pExpr->pTab; - if( pTab==0 ) return pDef; - if( !pTab->isVirtual ) return pDef; + if( NEVER(pTab==0) ) return pDef; + if( (pTab->tabFlags & TF_Virtual)==0 ) return pDef; pVtab = pTab->pVtab; assert( pVtab!=0 ); assert( pVtab->pModule!=0 ); pMod = (sqlite3_module *)pVtab->pModule; if( pMod->xFindFunction==0 ) return pDef; - /* Call the xFuncFunction method on the virtual table implementation + /* Call the xFindFunction method on the virtual table implementation ** to see if the implementation wants to overload this function */ - zLowerName = sqlite3StrDup(pDef->zName); - for(z=(unsigned char*)zLowerName; *z; z++){ - *z = sqlite3UpperToLower[*z]; + zLowerName = sqlite3DbStrDup(db, pDef->zName); + if( zLowerName ){ + for(z=(unsigned char*)zLowerName; *z; z++){ + *z = sqlite3UpperToLower[*z]; + } + rc = pMod->xFindFunction(pVtab, nArg, zLowerName, &xFunc, &pArg); + sqlite3DbFree(db, zLowerName); } - rc = pMod->xFindFunction(pVtab, nArg, zLowerName, &xFunc, &pArg); - sqliteFree(zLowerName); if( rc==0 ){ return pDef; } /* Create a new ephemeral function definition for the overloaded ** function */ - pNew = sqliteMalloc( sizeof(*pNew) + strlen(pDef->zName) ); + pNew = sqlite3DbMallocZero(db, sizeof(*pNew) + + sqlite3Strlen30(pDef->zName) ); if( pNew==0 ){ return pDef; } *pNew = *pDef; - memcpy(pNew->zName, pDef->zName, strlen(pDef->zName)+1); + pNew->zName = (char *)&pNew[1]; + memcpy(pNew->zName, pDef->zName, sqlite3Strlen30(pDef->zName)+1); pNew->xFunc = xFunc; pNew->pUserData = pArg; pNew->flags |= SQLITE_FUNC_EPHEM; return pNew; } +/* +** Make sure virtual table pTab is contained in the pParse->apVirtualLock[] +** array so that an OP_VBegin will get generated for it. Add pTab to the +** array if it is missing. If pTab is already in the array, this routine +** is a no-op. +*/ +void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ + int i, n; + Table **apVtabLock; + + assert( IsVirtual(pTab) ); + for(i=0; inVtabLock; i++){ + if( pTab==pParse->apVtabLock[i] ) return; + } + n = (pParse->nVtabLock+1)*sizeof(pParse->apVtabLock[0]); + apVtabLock = sqlite3_realloc(pParse->apVtabLock, n); + if( apVtabLock ){ + pParse->apVtabLock = apVtabLock; + pParse->apVtabLock[pParse->nVtabLock++] = pTab; + }else{ + pParse->db->mallocFailed = 1; + } +} + #endif /* SQLITE_OMIT_VIRTUALTABLE */ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/walker.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/walker.c --- sqlite3-3.4.2/src/walker.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/src/walker.c 2009-06-25 12:39:54.000000000 +0100 @@ -0,0 +1,138 @@ +/* +** 2008 August 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains routines used for walking the parser tree for +** an SQL statement. +** +** $Id: walker.c,v 1.7 2009/06/15 23:15:59 drh Exp $ +*/ +#include "sqliteInt.h" +#include +#include + + +/* +** Walk an expression tree. Invoke the callback once for each node +** of the expression, while decending. (In other words, the callback +** is invoked before visiting children.) +** +** The return value from the callback should be one of the WRC_* +** constants to specify how to proceed with the walk. +** +** WRC_Continue Continue descending down the tree. +** +** WRC_Prune Do not descend into child nodes. But allow +** the walk to continue with sibling nodes. +** +** WRC_Abort Do no more callbacks. Unwind the stack and +** return the top-level walk call. +** +** The return value from this routine is WRC_Abort to abandon the tree walk +** and WRC_Continue to continue. +*/ +int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ + int rc; + if( pExpr==0 ) return WRC_Continue; + testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); + testcase( ExprHasProperty(pExpr, EP_Reduced) ); + rc = pWalker->xExprCallback(pWalker, pExpr); + if( rc==WRC_Continue + && !ExprHasAnyProperty(pExpr,EP_TokenOnly) ){ + if( sqlite3WalkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, pExpr->pRight) ) return WRC_Abort; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort; + }else{ + if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort; + } + } + return rc & WRC_Abort; +} + +/* +** Call sqlite3WalkExpr() for every expression in list p or until +** an abort request is seen. +*/ +int sqlite3WalkExprList(Walker *pWalker, ExprList *p){ + int i; + struct ExprList_item *pItem; + if( p ){ + for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ + if( sqlite3WalkExpr(pWalker, pItem->pExpr) ) return WRC_Abort; + } + } + return WRC_Continue; +} + +/* +** Walk all expressions associated with SELECT statement p. Do +** not invoke the SELECT callback on p, but do (of course) invoke +** any expr callbacks and SELECT callbacks that come from subqueries. +** Return WRC_Abort or WRC_Continue. +*/ +int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ + if( sqlite3WalkExprList(pWalker, p->pEList) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pWhere) ) return WRC_Abort; + if( sqlite3WalkExprList(pWalker, p->pGroupBy) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort; + if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pOffset) ) return WRC_Abort; + return WRC_Continue; +} + +/* +** Walk the parse trees associated with all subqueries in the +** FROM clause of SELECT statement p. Do not invoke the select +** callback on p, but do invoke it on each FROM clause subquery +** and on any subqueries further down in the tree. Return +** WRC_Abort or WRC_Continue; +*/ +int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ + SrcList *pSrc; + int i; + struct SrcList_item *pItem; + + pSrc = p->pSrc; + if( ALWAYS(pSrc) ){ + for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ + if( sqlite3WalkSelect(pWalker, pItem->pSelect) ){ + return WRC_Abort; + } + } + } + return WRC_Continue; +} + +/* +** Call sqlite3WalkExpr() for every expression in Select statement p. +** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and +** on the compound select chain, p->pPrior. +** +** Return WRC_Continue under normal conditions. Return WRC_Abort if +** there is an abort request. +** +** If the Walker does not have an xSelectCallback() then this routine +** is a no-op returning WRC_Continue. +*/ +int sqlite3WalkSelect(Walker *pWalker, Select *p){ + int rc; + if( p==0 || pWalker->xSelectCallback==0 ) return WRC_Continue; + rc = WRC_Continue; + while( p ){ + rc = pWalker->xSelectCallback(pWalker, p); + if( rc ) break; + if( sqlite3WalkSelectExpr(pWalker, p) ) return WRC_Abort; + if( sqlite3WalkSelectFrom(pWalker, p) ) return WRC_Abort; + p = p->pPrior; + } + return rc & WRC_Abort; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/src/where.c /tmp/3ARg2Grji7/sqlite3-3.6.16/src/where.c --- sqlite3-3.4.2/src/where.c 2007-07-30 21:36:28.000000000 +0100 +++ sqlite3-3.6.16/src/where.c 2009-06-26 14:01:54.000000000 +0100 @@ -10,27 +10,24 @@ ** ************************************************************************* ** This module contains C code that generates VDBE code used to process -** the WHERE clause of SQL statements. This module is reponsible for +** the WHERE clause of SQL statements. This module is responsible for ** generating the code that loops through a table looking for applicable ** rows. Indices are selected and used to speed the search when doing ** so is applicable. Because this module is responsible for selecting ** indices, you might also think of this module as the "query optimizer". ** -** $Id: where.c,v 1.254 2007/07/30 14:40:48 danielk1977 Exp $ +** $Id: where.c,v 1.408 2009/06/16 14:15:22 shane Exp $ */ #include "sqliteInt.h" /* -** The number of bits in a Bitmask. "BMS" means "BitMask Size". -*/ -#define BMS (sizeof(Bitmask)*8) - -/* ** Trace output macros */ #if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -int sqlite3_where_trace = 0; -# define WHERETRACE(X) if(sqlite3_where_trace) sqlite3DebugPrintf X +int sqlite3WhereTrace = 0; +#endif +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +# define WHERETRACE(X) if(sqlite3WhereTrace) sqlite3DebugPrintf X #else # define WHERETRACE(X) #endif @@ -38,12 +35,16 @@ /* Forward reference */ typedef struct WhereClause WhereClause; -typedef struct ExprMaskSet ExprMaskSet; +typedef struct WhereMaskSet WhereMaskSet; +typedef struct WhereOrInfo WhereOrInfo; +typedef struct WhereAndInfo WhereAndInfo; +typedef struct WhereCost WhereCost; /* ** The query generator uses an array of instances of this structure to ** help it analyze the subexpressions of the WHERE clause. Each WHERE -** clause subexpression is separated from the others by an AND operator. +** clause subexpression is separated from the others by AND operators, +** usually, or sometimes subexpressions separated by OR. ** ** All WhereTerms are collected into a single WhereClause structure. ** The following identity holds: @@ -55,46 +56,69 @@ ** X ** ** where X is a column name and is one of certain operators, -** then WhereTerm.leftCursor and WhereTerm.leftColumn record the -** cursor number and column number for X. WhereTerm.operator records +** then WhereTerm.leftCursor and WhereTerm.u.leftColumn record the +** cursor number and column number for X. WhereTerm.eOperator records ** the using a bitmask encoding defined by WO_xxx below. The ** use of a bitmask encoding for the operator allows us to search ** quickly for terms that match any of several different operators. ** -** prereqRight and prereqAll record sets of cursor numbers, -** but they do so indirectly. A single ExprMaskSet structure translates +** A WhereTerm might also be two or more subterms connected by OR: +** +** (t1.X ) OR (t1.Y ) OR .... +** +** In this second case, wtFlag as the TERM_ORINFO set and eOperator==WO_OR +** and the WhereTerm.u.pOrInfo field points to auxiliary information that +** is collected about the +** +** If a term in the WHERE clause does not match either of the two previous +** categories, then eOperator==0. The WhereTerm.pExpr field is still set +** to the original subexpression content and wtFlags is set up appropriately +** but no other fields in the WhereTerm object are meaningful. +** +** When eOperator!=0, prereqRight and prereqAll record sets of cursor numbers, +** but they do so indirectly. A single WhereMaskSet structure translates ** cursor number into bits and the translated bit is stored in the prereq ** fields. The translation is used in order to maximize the number of ** bits that will fit in a Bitmask. The VDBE cursor numbers might be ** spread out over the non-negative integers. For example, the cursor -** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The ExprMaskSet +** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The WhereMaskSet ** translates these sparse cursor numbers into consecutive integers ** beginning with 0 in order to make the best possible use of the available ** bits in the Bitmask. So, in the example above, the cursor numbers ** would be mapped into integers 0 through 7. +** +** The number of terms in a join is limited by the number of bits +** in prereqRight and prereqAll. The default is 64 bits, hence SQLite +** is only able to process joins with 64 or fewer tables. */ typedef struct WhereTerm WhereTerm; struct WhereTerm { - Expr *pExpr; /* Pointer to the subexpression */ - i16 iParent; /* Disable pWC->a[iParent] when this term disabled */ - i16 leftCursor; /* Cursor number of X in "X " */ - i16 leftColumn; /* Column number of X in "X " */ + Expr *pExpr; /* Pointer to the subexpression that is this term */ + int iParent; /* Disable pWC->a[iParent] when this term disabled */ + int leftCursor; /* Cursor number of X in "X " */ + union { + int leftColumn; /* Column number of X in "X " */ + WhereOrInfo *pOrInfo; /* Extra information if eOperator==WO_OR */ + WhereAndInfo *pAndInfo; /* Extra information if eOperator==WO_AND */ + } u; u16 eOperator; /* A WO_xx value describing */ - u8 flags; /* Bit flags. See below */ + u8 wtFlags; /* TERM_xxx bit flags. See below */ u8 nChild; /* Number of children that must disable us */ WhereClause *pWC; /* The clause this term is part of */ - Bitmask prereqRight; /* Bitmask of tables used by pRight */ - Bitmask prereqAll; /* Bitmask of tables referenced by p */ + Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */ + Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */ }; /* -** Allowed values of WhereTerm.flags +** Allowed values of WhereTerm.wtFlags */ -#define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(pExpr) */ +#define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(db, pExpr) */ #define TERM_VIRTUAL 0x02 /* Added by the optimizer. Do not code */ #define TERM_CODED 0x04 /* This term is already coded */ #define TERM_COPIED 0x08 /* Has a child */ -#define TERM_OR_OK 0x10 /* Used during OR-clause processing */ +#define TERM_ORINFO 0x10 /* Need to free the WhereTerm.u.pOrInfo object */ +#define TERM_ANDINFO 0x20 /* Need to free the WhereTerm.u.pAndInfo obj */ +#define TERM_OR_OK 0x40 /* Used during OR-clause processing */ /* ** An instance of the following structure holds all information about a @@ -102,11 +126,34 @@ */ struct WhereClause { Parse *pParse; /* The parser context */ - ExprMaskSet *pMaskSet; /* Mapping of table indices to bitmasks */ + WhereMaskSet *pMaskSet; /* Mapping of table cursor numbers to bitmasks */ + Bitmask vmask; /* Bitmask identifying virtual table cursors */ + u8 op; /* Split operator. TK_AND or TK_OR */ int nTerm; /* Number of terms */ int nSlot; /* Number of entries in a[] */ WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ - WhereTerm aStatic[10]; /* Initial static space for a[] */ +#if defined(SQLITE_SMALL_STACK) + WhereTerm aStatic[1]; /* Initial static space for a[] */ +#else + WhereTerm aStatic[8]; /* Initial static space for a[] */ +#endif +}; + +/* +** A WhereTerm with eOperator==WO_OR has its u.pOrInfo pointer set to +** a dynamically allocated instance of the following structure. +*/ +struct WhereOrInfo { + WhereClause wc; /* Decomposition into subterms */ + Bitmask indexable; /* Bitmask of all indexable tables in the clause */ +}; + +/* +** A WhereTerm with eOperator==WO_AND has its u.pAndInfo pointer set to +** a dynamically allocated instance of the following structure. +*/ +struct WhereAndInfo { + WhereClause wc; /* The subexpression broken out */ }; /* @@ -121,11 +168,11 @@ ** from the sparse cursor numbers into consecutive integers beginning ** with 0. ** -** If ExprMaskSet.ix[A]==B it means that The A-th bit of a Bitmask +** If WhereMaskSet.ix[A]==B it means that The A-th bit of a Bitmask ** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<EXPR */ -#define WHERE_COLUMN_EQ 0x001000 /* x=EXPR or x IN (...) */ -#define WHERE_COLUMN_RANGE 0x002000 /* xEXPR */ -#define WHERE_COLUMN_IN 0x004000 /* x IN (...) */ -#define WHERE_TOP_LIMIT 0x010000 /* xEXPR or x>=EXPR constraint */ -#define WHERE_IDX_ONLY 0x080000 /* Use index only - omit table */ -#define WHERE_ORDERBY 0x100000 /* Output will appear in correct order */ -#define WHERE_REVERSE 0x200000 /* Scan in reverse order */ -#define WHERE_UNIQUE 0x400000 /* Selects no more than one row */ -#define WHERE_VIRTUALTABLE 0x800000 /* Use virtual-table processing */ +#define WHERE_ROWID_EQ 0x00001000 /* rowid=EXPR or rowid IN (...) */ +#define WHERE_ROWID_RANGE 0x00002000 /* rowidEXPR */ +#define WHERE_COLUMN_EQ 0x00010000 /* x=EXPR or x IN (...) or x IS NULL */ +#define WHERE_COLUMN_RANGE 0x00020000 /* xEXPR */ +#define WHERE_COLUMN_IN 0x00040000 /* x IN (...) */ +#define WHERE_COLUMN_NULL 0x00080000 /* x IS NULL */ +#define WHERE_INDEXED 0x000f0000 /* Anything that uses an index */ +#define WHERE_IN_ABLE 0x000f1000 /* Able to support an IN operator */ +#define WHERE_TOP_LIMIT 0x00100000 /* xEXPR or x>=EXPR constraint */ +#define WHERE_IDX_ONLY 0x00800000 /* Use index only - omit table */ +#define WHERE_ORDERBY 0x01000000 /* Output will appear in correct order */ +#define WHERE_REVERSE 0x02000000 /* Scan in reverse order */ +#define WHERE_UNIQUE 0x04000000 /* Selects no more than one row */ +#define WHERE_VIRTUALTABLE 0x08000000 /* Use virtual-table processing */ +#define WHERE_MULTI_OR 0x10000000 /* OR using multiple indices */ /* ** Initialize a preallocated WhereClause structure. @@ -185,13 +252,33 @@ static void whereClauseInit( WhereClause *pWC, /* The WhereClause to be initialized */ Parse *pParse, /* The parsing context */ - ExprMaskSet *pMaskSet /* Mapping from table indices to bitmasks */ + WhereMaskSet *pMaskSet /* Mapping from table cursor numbers to bitmasks */ ){ pWC->pParse = pParse; pWC->pMaskSet = pMaskSet; pWC->nTerm = 0; pWC->nSlot = ArraySize(pWC->aStatic); pWC->a = pWC->aStatic; + pWC->vmask = 0; +} + +/* Forward reference */ +static void whereClauseClear(WhereClause*); + +/* +** Deallocate all memory associated with a WhereOrInfo object. +*/ +static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){ + whereClauseClear(&p->wc); + sqlite3DbFree(db, p); +} + +/* +** Deallocate all memory associated with a WhereAndInfo object. +*/ +static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){ + whereClauseClear(&p->wc); + sqlite3DbFree(db, p); } /* @@ -201,50 +288,64 @@ static void whereClauseClear(WhereClause *pWC){ int i; WhereTerm *a; + sqlite3 *db = pWC->pParse->db; for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ - if( a->flags & TERM_DYNAMIC ){ - sqlite3ExprDelete(a->pExpr); + if( a->wtFlags & TERM_DYNAMIC ){ + sqlite3ExprDelete(db, a->pExpr); + } + if( a->wtFlags & TERM_ORINFO ){ + whereOrInfoDelete(db, a->u.pOrInfo); + }else if( a->wtFlags & TERM_ANDINFO ){ + whereAndInfoDelete(db, a->u.pAndInfo); } } if( pWC->a!=pWC->aStatic ){ - sqliteFree(pWC->a); + sqlite3DbFree(db, pWC->a); } } /* -** Add a new entries to the WhereClause structure. Increase the allocated -** space as necessary. +** Add a single new WhereTerm entry to the WhereClause object pWC. +** The new WhereTerm object is constructed from Expr p and with wtFlags. +** The index in pWC->a[] of the new WhereTerm is returned on success. +** 0 is returned if the new WhereTerm could not be added due to a memory +** allocation error. The memory allocation failure will be recorded in +** the db->mallocFailed flag so that higher-level functions can detect it. ** -** If the flags argument includes TERM_DYNAMIC, then responsibility -** for freeing the expression p is assumed by the WhereClause object. +** This routine will increase the size of the pWC->a[] array as necessary. +** +** If the wtFlags argument includes TERM_DYNAMIC, then responsibility +** for freeing the expression p is assumed by the WhereClause object pWC. +** This is true even if this routine fails to allocate a new WhereTerm. ** ** WARNING: This routine might reallocate the space used to store -** WhereTerms. All pointers to WhereTerms should be invalided after +** WhereTerms. All pointers to WhereTerms should be invalidated after ** calling this routine. Such pointers may be reinitialized by referencing ** the pWC->a[] array. */ -static int whereClauseInsert(WhereClause *pWC, Expr *p, int flags){ +static int whereClauseInsert(WhereClause *pWC, Expr *p, u8 wtFlags){ WhereTerm *pTerm; int idx; if( pWC->nTerm>=pWC->nSlot ){ WhereTerm *pOld = pWC->a; - pWC->a = sqliteMalloc( sizeof(pWC->a[0])*pWC->nSlot*2 ); + sqlite3 *db = pWC->pParse->db; + pWC->a = sqlite3DbMallocRaw(db, sizeof(pWC->a[0])*pWC->nSlot*2 ); if( pWC->a==0 ){ - if( flags & TERM_DYNAMIC ){ - sqlite3ExprDelete(p); + if( wtFlags & TERM_DYNAMIC ){ + sqlite3ExprDelete(db, p); } + pWC->a = pOld; return 0; } memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm); if( pOld!=pWC->aStatic ){ - sqliteFree(pOld); + sqlite3DbFree(db, pOld); } - pWC->nSlot *= 2; + pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]); } - pTerm = &pWC->a[idx = pWC->nTerm]; - pWC->nTerm++; + pTerm = &pWC->a[idx = pWC->nTerm++]; pTerm->pExpr = p; - pTerm->flags = flags; + pTerm->wtFlags = wtFlags; pTerm->pWC = pWC; pTerm->iParent = -1; return idx; @@ -264,10 +365,11 @@ ** does is make slot[] entries point to substructure within pExpr. ** ** In the previous sentence and in the diagram, "slot[]" refers to -** the WhereClause.a[] array. This array grows as needed to contain +** the WhereClause.a[] array. The slot[] array grows as needed to contain ** all terms of the WHERE clause. */ static void whereSplit(WhereClause *pWC, Expr *pExpr, int op){ + pWC->op = (u8)op; if( pExpr==0 ) return; if( pExpr->op!=op ){ whereClauseInsert(pWC, pExpr, 0); @@ -278,7 +380,7 @@ } /* -** Initialize an expression mask set +** Initialize an expression mask set (a WhereMaskSet object) */ #define initMaskSet(P) memset(P, 0, sizeof(*P)) @@ -286,8 +388,9 @@ ** Return the bitmask for the given cursor number. Return 0 if ** iCursor is not in the set. */ -static Bitmask getMask(ExprMaskSet *pMaskSet, int iCursor){ +static Bitmask getMask(WhereMaskSet *pMaskSet, int iCursor){ int i; + assert( pMaskSet->n<=sizeof(Bitmask)*8 ); for(i=0; in; i++){ if( pMaskSet->ix[i]==iCursor ){ return ((Bitmask)1)<ix[] ** array will never overflow. */ -static void createMask(ExprMaskSet *pMaskSet, int iCursor){ +static void createMask(WhereMaskSet *pMaskSet, int iCursor){ assert( pMaskSet->n < ArraySize(pMaskSet->ix) ); pMaskSet->ix[pMaskSet->n++] = iCursor; } @@ -315,17 +418,17 @@ ** tree. ** ** In order for this routine to work, the calling function must have -** previously invoked sqlite3ExprResolveNames() on the expression. See +** previously invoked sqlite3ResolveExprNames() on the expression. See ** the header comment on that routine for additional information. -** The sqlite3ExprResolveNames() routines looks for column names and +** The sqlite3ResolveExprNames() routines looks for column names and ** sets their opcodes to TK_COLUMN and their Expr.iTable fields to ** the VDBE cursor number of the table. This routine just has to ** translate the cursor numbers into bitmask values and OR all ** the bitmasks together. */ -static Bitmask exprListTableUsage(ExprMaskSet*, ExprList*); -static Bitmask exprSelectTableUsage(ExprMaskSet*, Select*); -static Bitmask exprTableUsage(ExprMaskSet *pMaskSet, Expr *p){ +static Bitmask exprListTableUsage(WhereMaskSet*, ExprList*); +static Bitmask exprSelectTableUsage(WhereMaskSet*, Select*); +static Bitmask exprTableUsage(WhereMaskSet *pMaskSet, Expr *p){ Bitmask mask = 0; if( p==0 ) return 0; if( p->op==TK_COLUMN ){ @@ -334,11 +437,14 @@ } mask = exprTableUsage(pMaskSet, p->pRight); mask |= exprTableUsage(pMaskSet, p->pLeft); - mask |= exprListTableUsage(pMaskSet, p->pList); - mask |= exprSelectTableUsage(pMaskSet, p->pSelect); + if( ExprHasProperty(p, EP_xIsSelect) ){ + mask |= exprSelectTableUsage(pMaskSet, p->x.pSelect); + }else{ + mask |= exprListTableUsage(pMaskSet, p->x.pList); + } return mask; } -static Bitmask exprListTableUsage(ExprMaskSet *pMaskSet, ExprList *pList){ +static Bitmask exprListTableUsage(WhereMaskSet *pMaskSet, ExprList *pList){ int i; Bitmask mask = 0; if( pList ){ @@ -348,16 +454,15 @@ } return mask; } -static Bitmask exprSelectTableUsage(ExprMaskSet *pMaskSet, Select *pS){ - Bitmask mask; - if( pS==0 ){ - mask = 0; - }else{ - mask = exprListTableUsage(pMaskSet, pS->pEList); +static Bitmask exprSelectTableUsage(WhereMaskSet *pMaskSet, Select *pS){ + Bitmask mask = 0; + while( pS ){ + mask |= exprListTableUsage(pMaskSet, pS->pEList); mask |= exprListTableUsage(pMaskSet, pS->pGroupBy); mask |= exprListTableUsage(pMaskSet, pS->pOrderBy); mask |= exprTableUsage(pMaskSet, pS->pWhere); mask |= exprTableUsage(pMaskSet, pS->pHaving); + pS = pS->pPrior; } return mask; } @@ -376,12 +481,12 @@ } /* -** Swap two objects of type T. +** Swap two objects of type TYPE. */ #define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} /* -** Commute a comparision operator. Expressions of the form "X op Y" +** Commute a comparison operator. Expressions of the form "X op Y" ** are converted into "Y op X". ** ** If a collation sequence is associated with either the left or right @@ -392,10 +497,12 @@ ** attached to the right. For the same reason the EP_ExpCollate flag ** is not commuted. */ -static void exprCommute(Expr *pExpr){ +static void exprCommute(Parse *pParse, Expr *pExpr){ u16 expRight = (pExpr->pRight->flags & EP_ExpCollate); u16 expLeft = (pExpr->pLeft->flags & EP_ExpCollate); assert( allowedOp(pExpr->op) && pExpr->op!=TK_IN ); + pExpr->pRight->pColl = sqlite3ExprCollSeq(pParse, pExpr->pRight); + pExpr->pLeft->pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); SWAP(CollSeq*,pExpr->pRight->pColl,pExpr->pLeft->pColl); pExpr->pRight->flags = (pExpr->pRight->flags & ~EP_ExpCollate) | expLeft; pExpr->pLeft->flags = (pExpr->pLeft->flags & ~EP_ExpCollate) | expRight; @@ -413,15 +520,16 @@ /* ** Translate from TK_xx operator to WO_xx bitmask. */ -static int operatorMask(int op){ - int c; +static u16 operatorMask(int op){ + u16 c; assert( allowedOp(op) ); if( op==TK_IN ){ c = WO_IN; }else if( op==TK_ISNULL ){ c = WO_ISNULL; }else{ - c = WO_EQ<<(op-TK_EQ); + assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); + c = (u16)(WO_EQ<<(op-TK_EQ)); } assert( op!=TK_ISNULL || c==WO_ISNULL ); assert( op!=TK_IN || c==WO_IN ); @@ -444,18 +552,20 @@ int iCur, /* Cursor number of LHS */ int iColumn, /* Column number of LHS */ Bitmask notReady, /* RHS must not overlap with this mask */ - u16 op, /* Mask of WO_xx values describing operator */ + u32 op, /* Mask of WO_xx values describing operator */ Index *pIdx /* Must be compatible with this index, if not NULL */ ){ WhereTerm *pTerm; int k; + assert( iCur>=0 ); + op &= WO_ALL; for(pTerm=pWC->a, k=pWC->nTerm; k; k--, pTerm++){ if( pTerm->leftCursor==iCur && (pTerm->prereqRight & notReady)==0 - && pTerm->leftColumn==iColumn + && pTerm->u.leftColumn==iColumn && (pTerm->eOperator & op)!=0 ){ - if( iCur>=0 && pIdx && pTerm->eOperator!=WO_ISNULL ){ + if( pIdx && pTerm->eOperator!=WO_ISNULL ){ Expr *pX = pTerm->pExpr; CollSeq *pColl; char idxaff; @@ -471,13 +581,12 @@ */ assert(pX->pLeft); pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); - if( !pColl ){ - pColl = pParse->db->pDfltColl; - } + assert(pColl || pParse->nErr); - for(j=0; jnColumn && pIdx->aiColumn[j]!=iColumn; j++){} - assert( jnColumn ); - if( sqlite3StrICmp(pColl->zName, pIdx->azColl[j]) ) continue; + for(j=0; pIdx->aiColumn[j]!=iColumn; j++){ + if( NEVER(j>=pIdx->nColumn) ) return 0; + } + if( pColl && sqlite3StrICmp(pColl->zName, pIdx->azColl[j]) ) continue; } return pTerm; } @@ -513,23 +622,28 @@ ** literal that does not begin with a wildcard. */ static int isLikeOrGlob( - sqlite3 *db, /* The database */ + Parse *pParse, /* Parsing and code generating context */ Expr *pExpr, /* Test this expression */ int *pnPattern, /* Number of non-wildcard prefix characters */ - int *pisComplete /* True if the only wildcard is % in the last character */ + int *pisComplete, /* True if the only wildcard is % in the last character */ + int *pnoCase /* True if uppercase is equivalent to lowercase */ ){ - const char *z; - Expr *pRight, *pLeft; - ExprList *pList; - int c, cnt; - int noCase; - char wc[3]; - CollSeq *pColl; + const char *z; /* String on RHS of LIKE operator */ + Expr *pRight, *pLeft; /* Right and left size of LIKE operator */ + ExprList *pList; /* List of operands to the LIKE operator */ + int c; /* One character in z[] */ + int cnt; /* Number of non-wildcard prefix characters */ + char wc[3]; /* Wildcard characters */ + CollSeq *pColl; /* Collating sequence for LHS */ + sqlite3 *db = pParse->db; /* Database connection */ - if( !sqlite3IsLikeFunction(db, pExpr, &noCase, wc) ){ + if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, wc) ){ return 0; } - pList = pExpr->pList; +#ifdef SQLITE_EBCDIC + if( *pnoCase ) return 0; +#endif + pList = pExpr->x.pList; pRight = pList->a[0].pExpr; if( pRight->op!=TK_STRING ){ return 0; @@ -538,27 +652,27 @@ if( pLeft->op!=TK_COLUMN ){ return 0; } - pColl = pLeft->pColl; - if( pColl==0 ){ - /* TODO: Coverage testing doesn't get this case. Is it actually possible - ** for an expression of type TK_COLUMN to not have an assigned collation - ** sequence at this point? - */ - pColl = db->pDfltColl; - } - if( (pColl->type!=SQLITE_COLL_BINARY || noCase) && - (pColl->type!=SQLITE_COLL_NOCASE || !noCase) ){ + pColl = sqlite3ExprCollSeq(pParse, pLeft); + assert( pColl!=0 || pLeft->iColumn==-1 ); + if( pColl==0 ) return 0; + if( (pColl->type!=SQLITE_COLL_BINARY || *pnoCase) && + (pColl->type!=SQLITE_COLL_NOCASE || !*pnoCase) ){ return 0; } - sqlite3DequoteExpr(pRight); - z = (char *)pRight->token.z; - for(cnt=0; (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2]; cnt++){} - if( cnt==0 || 255==(u8)z[cnt] ){ - return 0; + if( sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT ) return 0; + z = pRight->u.zToken; + if( ALWAYS(z) ){ + cnt = 0; + while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ + cnt++; + } + if( cnt!=0 && c!=0 && 255!=(u8)z[cnt-1] ){ + *pisComplete = z[cnt]==wc[0] && z[cnt+1]==0; + *pnPattern = cnt; + return 1; + } } - *pisComplete = z[cnt]==wc[0] && z[cnt+1]==0; - *pnPattern = cnt; - return 1; + return 0; } #endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ @@ -579,11 +693,10 @@ if( pExpr->op!=TK_FUNCTION ){ return 0; } - if( pExpr->token.n!=5 || - sqlite3StrNICmp((const char*)pExpr->token.z,"match",5)!=0 ){ + if( sqlite3StrICmp(pExpr->u.zToken,"match")!=0 ){ return 0; } - pList = pExpr->pList; + pList = pExpr->x.pList; if( pList->nExpr!=2 ){ return 0; } @@ -605,91 +718,313 @@ #if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) /* -** Return TRUE if the given term of an OR clause can be converted -** into an IN clause. The iCursor and iColumn define the left-hand -** side of the IN clause. +** Analyze a term that consists of two or more OR-connected +** subterms. So in: +** +** ... WHERE (a=5) AND (b=7 OR c=9 OR d=13) AND (d=13) +** ^^^^^^^^^^^^^^^^^^^^ +** +** This routine analyzes terms such as the middle term in the above example. +** A WhereOrTerm object is computed and attached to the term under +** analysis, regardless of the outcome of the analysis. Hence: +** +** WhereTerm.wtFlags |= TERM_ORINFO +** WhereTerm.u.pOrInfo = a dynamically allocated WhereOrTerm object +** +** The term being analyzed must have two or more of OR-connected subterms. +** A single subterm might be a set of AND-connected sub-subterms. +** Examples of terms under analysis: +** +** (A) t1.x=t2.y OR t1.x=t2.z OR t1.y=15 OR t1.z=t3.a+5 +** (B) x=expr1 OR expr2=x OR x=expr3 +** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15) +** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*') +** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6) +** +** CASE 1: +** +** If all subterms are of the form T.C=expr for some single column of C +** a single table T (as shown in example B above) then create a new virtual +** term that is an equivalent IN expression. In other words, if the term +** being analyzed is: +** +** x = expr1 OR expr2 = x OR x = expr3 +** +** then create a new virtual term like this: +** +** x IN (expr1,expr2,expr3) +** +** CASE 2: ** -** The context is that we have multiple OR-connected equality terms -** like this: +** If all subterms are indexable by a single table T, then set ** -** a= OR a= OR b= OR ... +** WhereTerm.eOperator = WO_OR +** WhereTerm.u.pOrInfo->indexable |= the cursor number for table T ** -** The pOrTerm input to this routine corresponds to a single term of -** this OR clause. In order for the term to be a condidate for -** conversion to an IN operator, the following must be true: +** A subterm is "indexable" if it is of the form +** "T.C " where C is any column of table T and +** is one of "=", "<", "<=", ">", ">=", "IS NULL", or "IN". +** A subterm is also indexable if it is an AND of two or more +** subsubterms at least one of which is indexable. Indexable AND +** subterms have their eOperator set to WO_AND and they have +** u.pAndInfo set to a dynamically allocated WhereAndTerm object. ** -** * The left-hand side of the term must be the column which -** is identified by iCursor and iColumn. +** From another point of view, "indexable" means that the subterm could +** potentially be used with an index if an appropriate index exists. +** This analysis does not consider whether or not the index exists; that +** is something the bestIndex() routine will determine. This analysis +** only looks at whether subterms appropriate for indexing exist. ** -** * If the right-hand side is also a column, then the affinities -** of both right and left sides must be such that no type -** conversions are required on the right. (Ticket #2249) +** All examples A through E above all satisfy case 2. But if a term +** also statisfies case 1 (such as B) we know that the optimizer will +** always prefer case 1, so in that case we pretend that case 2 is not +** satisfied. ** -** If both of these conditions are true, then return true. Otherwise -** return false. +** It might be the case that multiple tables are indexable. For example, +** (E) above is indexable on tables P, Q, and R. +** +** Terms that satisfy case 2 are candidates for lookup by using +** separate indices to find rowids for each subterm and composing +** the union of all rowids using a RowSet object. This is similar +** to "bitmap indices" in other database engines. +** +** OTHERWISE: +** +** If neither case 1 nor case 2 apply, then leave the eOperator set to +** zero. This term is not useful for search. */ -static int orTermIsOptCandidate(WhereTerm *pOrTerm, int iCursor, int iColumn){ - int affLeft, affRight; - assert( pOrTerm->eOperator==WO_EQ ); - if( pOrTerm->leftCursor!=iCursor ){ - return 0; - } - if( pOrTerm->leftColumn!=iColumn ){ - return 0; - } - affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); - if( affRight==0 ){ - return 1; - } - affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); - if( affRight!=affLeft ){ - return 0; +static void exprAnalyzeOrTerm( + SrcList *pSrc, /* the FROM clause */ + WhereClause *pWC, /* the complete WHERE clause */ + int idxTerm /* Index of the OR-term to be analyzed */ +){ + Parse *pParse = pWC->pParse; /* Parser context */ + sqlite3 *db = pParse->db; /* Database connection */ + WhereTerm *pTerm = &pWC->a[idxTerm]; /* The term to be analyzed */ + Expr *pExpr = pTerm->pExpr; /* The expression of the term */ + WhereMaskSet *pMaskSet = pWC->pMaskSet; /* Table use masks */ + int i; /* Loop counters */ + WhereClause *pOrWc; /* Breakup of pTerm into subterms */ + WhereTerm *pOrTerm; /* A Sub-term within the pOrWc */ + WhereOrInfo *pOrInfo; /* Additional information associated with pTerm */ + Bitmask chngToIN; /* Tables that might satisfy case 1 */ + Bitmask indexable; /* Tables that are indexable, satisfying case 2 */ + + /* + ** Break the OR clause into its separate subterms. The subterms are + ** stored in a WhereClause structure containing within the WhereOrInfo + ** object that is attached to the original OR clause term. + */ + assert( (pTerm->wtFlags & (TERM_DYNAMIC|TERM_ORINFO|TERM_ANDINFO))==0 ); + assert( pExpr->op==TK_OR ); + pTerm->u.pOrInfo = pOrInfo = sqlite3DbMallocZero(db, sizeof(*pOrInfo)); + if( pOrInfo==0 ) return; + pTerm->wtFlags |= TERM_ORINFO; + pOrWc = &pOrInfo->wc; + whereClauseInit(pOrWc, pWC->pParse, pMaskSet); + whereSplit(pOrWc, pExpr, TK_OR); + exprAnalyzeAll(pSrc, pOrWc); + if( db->mallocFailed ) return; + assert( pOrWc->nTerm>=2 ); + + /* + ** Compute the set of tables that might satisfy cases 1 or 2. + */ + indexable = ~(Bitmask)0; + chngToIN = ~(pWC->vmask); + for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0 && indexable; i--, pOrTerm++){ + if( (pOrTerm->eOperator & WO_SINGLE)==0 ){ + WhereAndInfo *pAndInfo; + assert( pOrTerm->eOperator==0 ); + assert( (pOrTerm->wtFlags & (TERM_ANDINFO|TERM_ORINFO))==0 ); + chngToIN = 0; + pAndInfo = sqlite3DbMallocRaw(db, sizeof(*pAndInfo)); + if( pAndInfo ){ + WhereClause *pAndWC; + WhereTerm *pAndTerm; + int j; + Bitmask b = 0; + pOrTerm->u.pAndInfo = pAndInfo; + pOrTerm->wtFlags |= TERM_ANDINFO; + pOrTerm->eOperator = WO_AND; + pAndWC = &pAndInfo->wc; + whereClauseInit(pAndWC, pWC->pParse, pMaskSet); + whereSplit(pAndWC, pOrTerm->pExpr, TK_AND); + exprAnalyzeAll(pSrc, pAndWC); + testcase( db->mallocFailed ); + if( !db->mallocFailed ){ + for(j=0, pAndTerm=pAndWC->a; jnTerm; j++, pAndTerm++){ + assert( pAndTerm->pExpr ); + if( allowedOp(pAndTerm->pExpr->op) ){ + b |= getMask(pMaskSet, pAndTerm->leftCursor); + } + } + } + indexable &= b; + } + }else if( pOrTerm->wtFlags & TERM_COPIED ){ + /* Skip this term for now. We revisit it when we process the + ** corresponding TERM_VIRTUAL term */ + }else{ + Bitmask b; + b = getMask(pMaskSet, pOrTerm->leftCursor); + if( pOrTerm->wtFlags & TERM_VIRTUAL ){ + WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent]; + b |= getMask(pMaskSet, pOther->leftCursor); + } + indexable &= b; + if( pOrTerm->eOperator!=WO_EQ ){ + chngToIN = 0; + }else{ + chngToIN &= b; + } + } } - return 1; -} -/* -** Return true if the given term of an OR clause can be ignored during -** a check to make sure all OR terms are candidates for optimization. -** In other words, return true if a call to the orTermIsOptCandidate() -** above returned false but it is not necessary to disqualify the -** optimization. -** -** Suppose the original OR phrase was this: -** -** a=4 OR a=11 OR a=b -** -** During analysis, the third term gets flipped around and duplicate -** so that we are left with this: -** -** a=4 OR a=11 OR a=b OR b=a -** -** Since the last two terms are duplicates, only one of them -** has to qualify in order for the whole phrase to qualify. When -** this routine is called, we know that pOrTerm did not qualify. -** This routine merely checks to see if pOrTerm has a duplicate that -** might qualify. If there is a duplicate that has not yet been -** disqualified, then return true. If there are no duplicates, or -** the duplicate has also been disqualifed, return false. -*/ -static int orTermHasOkDuplicate(WhereClause *pOr, WhereTerm *pOrTerm){ - if( pOrTerm->flags & TERM_COPIED ){ - /* This is the original term. The duplicate is to the left had - ** has not yet been analyzed and thus has not yet been disqualified. */ - return 1; - } - if( (pOrTerm->flags & TERM_VIRTUAL)!=0 - && (pOr->a[pOrTerm->iParent].flags & TERM_OR_OK)!=0 ){ - /* This is a duplicate term. The original qualified so this one - ** does not have to. */ - return 1; + /* + ** Record the set of tables that satisfy case 2. The set might be + ** empty. + */ + pOrInfo->indexable = indexable; + pTerm->eOperator = indexable==0 ? 0 : WO_OR; + + /* + ** chngToIN holds a set of tables that *might* satisfy case 1. But + ** we have to do some additional checking to see if case 1 really + ** is satisfied. + ** + ** chngToIN will hold either 0, 1, or 2 bits. The 0-bit case means + ** that there is no possibility of transforming the OR clause into an + ** IN operator because one or more terms in the OR clause contain + ** something other than == on a column in the single table. The 1-bit + ** case means that every term of the OR clause is of the form + ** "table.column=expr" for some single table. The one bit that is set + ** will correspond to the common table. We still need to check to make + ** sure the same column is used on all terms. The 2-bit case is when + ** the all terms are of the form "table1.column=table2.column". It + ** might be possible to form an IN operator with either table1.column + ** or table2.column as the LHS if either is common to every term of + ** the OR clause. + ** + ** Note that terms of the form "table.column1=table.column2" (the + ** same table on both sizes of the ==) cannot be optimized. + */ + if( chngToIN ){ + int okToChngToIN = 0; /* True if the conversion to IN is valid */ + int iColumn = -1; /* Column index on lhs of IN operator */ + int iCursor = -1; /* Table cursor common to all terms */ + int j = 0; /* Loop counter */ + + /* Search for a table and column that appears on one side or the + ** other of the == operator in every subterm. That table and column + ** will be recorded in iCursor and iColumn. There might not be any + ** such table and column. Set okToChngToIN if an appropriate table + ** and column is found but leave okToChngToIN false if not found. + */ + for(j=0; j<2 && !okToChngToIN; j++){ + pOrTerm = pOrWc->a; + for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){ + assert( pOrTerm->eOperator==WO_EQ ); + pOrTerm->wtFlags &= ~TERM_OR_OK; + if( pOrTerm->leftCursor==iCursor ){ + /* This is the 2-bit case and we are on the second iteration and + ** current term is from the first iteration. So skip this term. */ + assert( j==1 ); + continue; + } + if( (chngToIN & getMask(pMaskSet, pOrTerm->leftCursor))==0 ){ + /* This term must be of the form t1.a==t2.b where t2 is in the + ** chngToIN set but t1 is not. This term will be either preceeded + ** or follwed by an inverted copy (t2.b==t1.a). Skip this term + ** and use its inversion. */ + testcase( pOrTerm->wtFlags & TERM_COPIED ); + testcase( pOrTerm->wtFlags & TERM_VIRTUAL ); + assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); + continue; + } + iColumn = pOrTerm->u.leftColumn; + iCursor = pOrTerm->leftCursor; + break; + } + if( i<0 ){ + /* No candidate table+column was found. This can only occur + ** on the second iteration */ + assert( j==1 ); + assert( (chngToIN&(chngToIN-1))==0 ); + assert( chngToIN==getMask(pMaskSet, iCursor) ); + break; + } + testcase( j==1 ); + + /* We have found a candidate table and column. Check to see if that + ** table and column is common to every term in the OR clause */ + okToChngToIN = 1; + for(; i>=0 && okToChngToIN; i--, pOrTerm++){ + assert( pOrTerm->eOperator==WO_EQ ); + if( pOrTerm->leftCursor!=iCursor ){ + pOrTerm->wtFlags &= ~TERM_OR_OK; + }else if( pOrTerm->u.leftColumn!=iColumn ){ + okToChngToIN = 0; + }else{ + int affLeft, affRight; + /* If the right-hand side is also a column, then the affinities + ** of both right and left sides must be such that no type + ** conversions are required on the right. (Ticket #2249) + */ + affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); + affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); + if( affRight!=0 && affRight!=affLeft ){ + okToChngToIN = 0; + }else{ + pOrTerm->wtFlags |= TERM_OR_OK; + } + } + } + } + + /* At this point, okToChngToIN is true if original pTerm satisfies + ** case 1. In that case, construct a new virtual term that is + ** pTerm converted into an IN operator. + */ + if( okToChngToIN ){ + Expr *pDup; /* A transient duplicate expression */ + ExprList *pList = 0; /* The RHS of the IN operator */ + Expr *pLeft = 0; /* The LHS of the IN operator */ + Expr *pNew; /* The complete IN operator */ + + for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){ + if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; + assert( pOrTerm->eOperator==WO_EQ ); + assert( pOrTerm->leftCursor==iCursor ); + assert( pOrTerm->u.leftColumn==iColumn ); + pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); + pList = sqlite3ExprListAppend(pWC->pParse, pList, pDup); + pLeft = pOrTerm->pExpr->pLeft; + } + assert( pLeft!=0 ); + pDup = sqlite3ExprDup(db, pLeft, 0); + pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0, 0); + if( pNew ){ + int idxNew; + transferJoinMarkings(pNew, pExpr); + assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + pNew->x.pList = pList; + idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); + exprAnalyze(pSrc, pWC, idxNew); + pTerm = &pWC->a[idxTerm]; + pWC->a[idxNew].iParent = idxTerm; + pTerm->nChild = 1; + }else{ + sqlite3ExprListDelete(db, pList); + } + pTerm->eOperator = 0; /* case 1 trumps case 2 */ + } } - /* This is either a singleton term or else it is a duplicate for - ** which the original did not qualify. Either way we are done for. */ - return 0; } #endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */ + /* ** The input to this routine is an WhereTerm structure with only the ** "pExpr" field filled in. The job of this routine is to analyze the @@ -697,32 +1032,50 @@ ** structure. ** ** If the expression is of the form " X" it gets commuted -** to the standard form of "X ". If the expression is of -** the form "X Y" where both X and Y are columns, then the original -** expression is unchanged and a new virtual expression of the form -** "Y X" is added to the WHERE clause and analyzed separately. +** to the standard form of "X ". +** +** If the expression is of the form "X Y" where both X and Y are +** columns, then the original expression is unchanged and a new virtual +** term of the form "Y X" is added to the WHERE clause and +** analyzed separately. The original term is marked with TERM_COPIED +** and the new term is marked with TERM_DYNAMIC (because it's pExpr +** needs to be freed with the WhereClause) and TERM_VIRTUAL (because it +** is a commuted copy of a prior term.) The original term has nChild=1 +** and the copy has idxParent set to the index of the original term. */ static void exprAnalyze( SrcList *pSrc, /* the FROM clause */ WhereClause *pWC, /* the WHERE clause */ int idxTerm /* Index of the term to be analyzed */ ){ - WhereTerm *pTerm = &pWC->a[idxTerm]; - ExprMaskSet *pMaskSet = pWC->pMaskSet; - Expr *pExpr = pTerm->pExpr; - Bitmask prereqLeft; - Bitmask prereqAll; + WhereTerm *pTerm; /* The term to be analyzed */ + WhereMaskSet *pMaskSet; /* Set of table index masks */ + Expr *pExpr; /* The expression to be analyzed */ + Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ + Bitmask prereqAll; /* Prerequesites of pExpr */ + Bitmask extraRight = 0; int nPattern; int isComplete; - int op; - - if( sqlite3MallocFailed() ) return; + int noCase; + int op; /* Top-level operator. pExpr->op */ + Parse *pParse = pWC->pParse; /* Parsing context */ + sqlite3 *db = pParse->db; /* Database connection */ + + if( db->mallocFailed ){ + return; + } + pTerm = &pWC->a[idxTerm]; + pMaskSet = pWC->pMaskSet; + pExpr = pTerm->pExpr; prereqLeft = exprTableUsage(pMaskSet, pExpr->pLeft); op = pExpr->op; if( op==TK_IN ){ assert( pExpr->pRight==0 ); - pTerm->prereqRight = exprListTableUsage(pMaskSet, pExpr->pList) - | exprSelectTableUsage(pMaskSet, pExpr->pSelect); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + pTerm->prereqRight = exprSelectTableUsage(pMaskSet, pExpr->x.pSelect); + }else{ + pTerm->prereqRight = exprListTableUsage(pMaskSet, pExpr->x.pList); + } }else if( op==TK_ISNULL ){ pTerm->prereqRight = 0; }else{ @@ -730,7 +1083,10 @@ } prereqAll = exprTableUsage(pMaskSet, pExpr); if( ExprHasProperty(pExpr, EP_FromJoin) ){ - prereqAll |= getMask(pMaskSet, pExpr->iRightJoinTable); + Bitmask x = getMask(pMaskSet, pExpr->iRightJoinTable); + prereqAll |= x; + extraRight = x-1; /* ON clause terms may not be used with an index + ** on left table of a LEFT JOIN. Ticket #3015 */ } pTerm->prereqAll = prereqAll; pTerm->leftCursor = -1; @@ -741,7 +1097,7 @@ Expr *pRight = pExpr->pRight; if( pLeft->op==TK_COLUMN ){ pTerm->leftCursor = pLeft->iTable; - pTerm->leftColumn = pLeft->iColumn; + pTerm->u.leftColumn = pLeft->iColumn; pTerm->eOperator = operatorMask(op); } if( pRight && pRight->op==TK_COLUMN ){ @@ -749,9 +1105,9 @@ Expr *pDup; if( pTerm->leftCursor>=0 ){ int idxNew; - pDup = sqlite3ExprDup(pExpr); - if( sqlite3MallocFailed() ){ - sqlite3ExprDelete(pDup); + pDup = sqlite3ExprDup(db, pExpr, 0); + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDup); return; } idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); @@ -760,15 +1116,15 @@ pNew->iParent = idxTerm; pTerm = &pWC->a[idxTerm]; pTerm->nChild = 1; - pTerm->flags |= TERM_COPIED; + pTerm->wtFlags |= TERM_COPIED; }else{ pDup = pExpr; pNew = pTerm; } - exprCommute(pDup); + exprCommute(pParse, pDup); pLeft = pDup->pLeft; pNew->leftCursor = pLeft->iTable; - pNew->leftColumn = pLeft->iColumn; + pNew->u.leftColumn = pLeft->iColumn; pNew->prereqRight = prereqLeft; pNew->prereqAll = prereqAll; pNew->eOperator = operatorMask(pDup->op); @@ -777,10 +1133,22 @@ #ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION /* If a term is the BETWEEN operator, create two new virtual terms - ** that define the range that the BETWEEN implements. + ** that define the range that the BETWEEN implements. For example: + ** + ** a BETWEEN b AND c + ** + ** is converted into: + ** + ** (a BETWEEN b AND c) AND (a>=b) AND (a<=c) + ** + ** The two new terms are added onto the end of the WhereClause object. + ** The new terms are "dynamic" and are children of the original BETWEEN + ** term. That means that if the BETWEEN term is coded, the children are + ** skipped. Or, if the children are satisfied by an index, the original + ** BETWEEN term is skipped. */ - else if( pExpr->op==TK_BETWEEN ){ - ExprList *pList = pExpr->pList; + else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){ + ExprList *pList = pExpr->x.pList; int i; static const u8 ops[] = {TK_GE, TK_LE}; assert( pList!=0 ); @@ -788,9 +1156,11 @@ for(i=0; i<2; i++){ Expr *pNewExpr; int idxNew; - pNewExpr = sqlite3Expr(ops[i], sqlite3ExprDup(pExpr->pLeft), - sqlite3ExprDup(pList->a[i].pExpr), 0); + pNewExpr = sqlite3PExpr(pParse, ops[i], + sqlite3ExprDup(db, pExpr->pLeft, 0), + sqlite3ExprDup(db, pList->a[i].pExpr, 0), 0); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); exprAnalyze(pSrc, pWC, idxNew); pTerm = &pWC->a[idxTerm]; pWC->a[idxNew].iParent = idxTerm; @@ -800,108 +1170,62 @@ #endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */ #if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) - /* Attempt to convert OR-connected terms into an IN operator so that - ** they can make use of indices. Example: - ** - ** x = expr1 OR expr2 = x OR x = expr3 - ** - ** is converted into - ** - ** x IN (expr1,expr2,expr3) - ** - ** This optimization must be omitted if OMIT_SUBQUERY is defined because - ** the compiler for the the IN operator is part of sub-queries. + /* Analyze a term that is composed of two or more subterms connected by + ** an OR operator. */ else if( pExpr->op==TK_OR ){ - int ok; - int i, j; - int iColumn, iCursor; - WhereClause sOr; - WhereTerm *pOrTerm; - - assert( (pTerm->flags & TERM_DYNAMIC)==0 ); - whereClauseInit(&sOr, pWC->pParse, pMaskSet); - whereSplit(&sOr, pExpr, TK_OR); - exprAnalyzeAll(pSrc, &sOr); - assert( sOr.nTerm>=2 ); - j = 0; - do{ - assert( j=0; - for(i=sOr.nTerm-1, pOrTerm=sOr.a; i>=0 && ok; i--, pOrTerm++){ - if( pOrTerm->eOperator!=WO_EQ ){ - goto or_not_possible; - } - if( orTermIsOptCandidate(pOrTerm, iCursor, iColumn) ){ - pOrTerm->flags |= TERM_OR_OK; - }else if( orTermHasOkDuplicate(&sOr, pOrTerm) ){ - pOrTerm->flags &= ~TERM_OR_OK; - }else{ - ok = 0; - } - } - }while( !ok && (sOr.a[j++].flags & TERM_COPIED)!=0 && j<2 ); - if( ok ){ - ExprList *pList = 0; - Expr *pNew, *pDup; - Expr *pLeft = 0; - for(i=sOr.nTerm-1, pOrTerm=sOr.a; i>=0 && ok; i--, pOrTerm++){ - if( (pOrTerm->flags & TERM_OR_OK)==0 ) continue; - pDup = sqlite3ExprDup(pOrTerm->pExpr->pRight); - pList = sqlite3ExprListAppend(pList, pDup, 0); - pLeft = pOrTerm->pExpr->pLeft; - } - assert( pLeft!=0 ); - pDup = sqlite3ExprDup(pLeft); - pNew = sqlite3Expr(TK_IN, pDup, 0, 0); - if( pNew ){ - int idxNew; - transferJoinMarkings(pNew, pExpr); - pNew->pList = pList; - idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew); - pTerm = &pWC->a[idxTerm]; - pWC->a[idxNew].iParent = idxTerm; - pTerm->nChild = 1; - }else{ - sqlite3ExprListDelete(pList); - } - } -or_not_possible: - whereClauseClear(&sOr); + assert( pWC->op==TK_AND ); + exprAnalyzeOrTerm(pSrc, pWC, idxTerm); } #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ #ifndef SQLITE_OMIT_LIKE_OPTIMIZATION /* Add constraints to reduce the search space on a LIKE or GLOB ** operator. + ** + ** A like pattern of the form "x LIKE 'abc%'" is changed into constraints + ** + ** x>='abc' AND x<'abd' AND x LIKE 'abc%' + ** + ** The last character of the prefix "abc" is incremented to form the + ** termination condition "abd". */ - if( isLikeOrGlob(pWC->pParse->db, pExpr, &nPattern, &isComplete) ){ + if( isLikeOrGlob(pParse, pExpr, &nPattern, &isComplete, &noCase) + && pWC->op==TK_AND ){ Expr *pLeft, *pRight; Expr *pStr1, *pStr2; Expr *pNewExpr1, *pNewExpr2; int idxNew1, idxNew2; - pLeft = pExpr->pList->a[1].pExpr; - pRight = pExpr->pList->a[0].pExpr; - pStr1 = sqlite3Expr(TK_STRING, 0, 0, 0); - if( pStr1 ){ - sqlite3TokenCopy(&pStr1->token, &pRight->token); - pStr1->token.n = nPattern; - pStr1->flags = EP_Dequoted; - } - pStr2 = sqlite3ExprDup(pStr1); - if( pStr2 ){ - assert( pStr2->token.dyn ); - ++*(u8*)&pStr2->token.z[nPattern-1]; + pLeft = pExpr->x.pList->a[1].pExpr; + pRight = pExpr->x.pList->a[0].pExpr; + pStr1 = sqlite3Expr(db, TK_STRING, pRight->u.zToken); + if( pStr1 ) pStr1->u.zToken[nPattern] = 0; + pStr2 = sqlite3ExprDup(db, pStr1, 0); + if( !db->mallocFailed ){ + u8 c, *pC; /* Last character before the first wildcard */ + pC = (u8*)&pStr2->u.zToken[nPattern-1]; + c = *pC; + if( noCase ){ + /* The point is to increment the last character before the first + ** wildcard. But if we increment '@', that will push it into the + ** alphabetic range where case conversions will mess up the + ** inequality. To avoid this, make sure to also run the full + ** LIKE on all candidate expressions by clearing the isComplete flag + */ + if( c=='A'-1 ) isComplete = 0; + + c = sqlite3UpperToLower[c]; + } + *pC = c + 1; } - pNewExpr1 = sqlite3Expr(TK_GE, sqlite3ExprDup(pLeft), pStr1, 0); + pNewExpr1 = sqlite3PExpr(pParse, TK_GE, sqlite3ExprDup(db,pLeft,0),pStr1,0); idxNew1 = whereClauseInsert(pWC, pNewExpr1, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew1==0 ); exprAnalyze(pSrc, pWC, idxNew1); - pNewExpr2 = sqlite3Expr(TK_LT, sqlite3ExprDup(pLeft), pStr2, 0); + pNewExpr2 = sqlite3PExpr(pParse, TK_LT, sqlite3ExprDup(db,pLeft,0),pStr2,0); idxNew2 = whereClauseInsert(pWC, pNewExpr2, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew2==0 ); exprAnalyze(pSrc, pWC, idxNew2); pTerm = &pWC->a[idxTerm]; if( isComplete ){ @@ -925,27 +1249,34 @@ WhereTerm *pNewTerm; Bitmask prereqColumn, prereqExpr; - pRight = pExpr->pList->a[0].pExpr; - pLeft = pExpr->pList->a[1].pExpr; + pRight = pExpr->x.pList->a[0].pExpr; + pLeft = pExpr->x.pList->a[1].pExpr; prereqExpr = exprTableUsage(pMaskSet, pRight); prereqColumn = exprTableUsage(pMaskSet, pLeft); if( (prereqExpr & prereqColumn)==0 ){ Expr *pNewExpr; - pNewExpr = sqlite3Expr(TK_MATCH, 0, sqlite3ExprDup(pRight), 0); + pNewExpr = sqlite3PExpr(pParse, TK_MATCH, + 0, sqlite3ExprDup(db, pRight, 0), 0); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); pNewTerm = &pWC->a[idxNew]; pNewTerm->prereqRight = prereqExpr; pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->leftColumn = pLeft->iColumn; + pNewTerm->u.leftColumn = pLeft->iColumn; pNewTerm->eOperator = WO_MATCH; pNewTerm->iParent = idxTerm; pTerm = &pWC->a[idxTerm]; pTerm->nChild = 1; - pTerm->flags |= TERM_COPIED; + pTerm->wtFlags |= TERM_COPIED; pNewTerm->prereqAll = pTerm->prereqAll; } } #endif /* SQLITE_OMIT_VIRTUALTABLE */ + + /* Prevent ON clause terms of a LEFT JOIN from being used to drive + ** an index for tables to the left of the join. + */ + pTerm->prereqRight |= extraRight; } /* @@ -954,7 +1285,7 @@ */ static int referencesOtherTables( ExprList *pList, /* Search expressions in ths list */ - ExprMaskSet *pMaskSet, /* Mapping from tables to bitmaps */ + WhereMaskSet *pMaskSet, /* Mapping from tables to bitmaps */ int iFirst, /* Be searching with the iFirst-th expression */ int iBase /* Ignore references to this table */ ){ @@ -989,7 +1320,7 @@ */ static int isSortingIndex( Parse *pParse, /* Parsing context */ - ExprMaskSet *pMaskSet, /* Mapping from table indices to bitmaps */ + WhereMaskSet *pMaskSet, /* Mapping from table cursor numbers to bitmaps */ Index *pIdx, /* The index we are testing */ int base, /* Cursor number for the table to be sorted */ ExprList *pOrderBy, /* The ORDER BY clause */ @@ -1051,6 +1382,9 @@ ** ORDER BY term, that is OK. Just ignore that column of the index */ continue; + }else if( i==pIdx->nColumn ){ + /* Index column i is the rowid. All other terms match. */ + break; }else{ /* If an index column fails to match and is not constrained by == ** then the index cannot satisfy the ORDER BY constraint. @@ -1109,7 +1443,7 @@ static int sortableByRowid( int base, /* Cursor number for table to be sorted */ ExprList *pOrderBy, /* The ORDER BY clause */ - ExprMaskSet *pMaskSet, /* Mapping from tables to bitmaps */ + WhereMaskSet *pMaskSet, /* Mapping from table cursors to bitmaps */ int *pbRev /* Set to 1 if ORDER BY is DESC */ ){ Expr *p; @@ -1128,7 +1462,7 @@ /* ** Prepare a crude estimate of the logarithm of the input value. ** The results need not be exact. This is only used for estimating -** the total cost of performing operatings with O(logN) or O(NlogN) +** the total cost of performing operations with O(logN) or O(NlogN) ** complexity. Because N is just a guess, it is no great tragedy if ** logN is a little off. */ @@ -1151,7 +1485,7 @@ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_DEBUG) static void TRACE_IDX_INPUTS(sqlite3_index_info *p){ int i; - if( !sqlite3_where_trace ) return; + if( !sqlite3WhereTrace ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", i, @@ -1169,7 +1503,7 @@ } static void TRACE_IDX_OUTPUTS(sqlite3_index_info *p){ int i; - if( !sqlite3_where_trace ) return; + if( !sqlite3WhereTrace ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, @@ -1186,8 +1520,247 @@ #define TRACE_IDX_OUTPUTS(A) #endif +/* +** Required because bestIndex() is called by bestOrClauseIndex() +*/ +static void bestIndex( + Parse*, WhereClause*, struct SrcList_item*, Bitmask, ExprList*, WhereCost*); + +/* +** This routine attempts to find an scanning strategy that can be used +** to optimize an 'OR' expression that is part of a WHERE clause. +** +** The table associated with FROM clause term pSrc may be either a +** regular B-Tree table or a virtual table. +*/ +static void bestOrClauseIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to search */ + Bitmask notReady, /* Mask of cursors that are not available */ + ExprList *pOrderBy, /* The ORDER BY clause */ + WhereCost *pCost /* Lowest cost query plan */ +){ +#ifndef SQLITE_OMIT_OR_OPTIMIZATION + const int iCur = pSrc->iCursor; /* The cursor of the table to be accessed */ + const Bitmask maskSrc = getMask(pWC->pMaskSet, iCur); /* Bitmask for pSrc */ + WhereTerm * const pWCEnd = &pWC->a[pWC->nTerm]; /* End of pWC->a[] */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ + + /* Search the WHERE clause terms for a usable WO_OR term. */ + for(pTerm=pWC->a; pTermeOperator==WO_OR + && ((pTerm->prereqAll & ~maskSrc) & notReady)==0 + && (pTerm->u.pOrInfo->indexable & maskSrc)!=0 + ){ + WhereClause * const pOrWC = &pTerm->u.pOrInfo->wc; + WhereTerm * const pOrWCEnd = &pOrWC->a[pOrWC->nTerm]; + WhereTerm *pOrTerm; + int flags = WHERE_MULTI_OR; + double rTotal = 0; + double nRow = 0; + + for(pOrTerm=pOrWC->a; pOrTerma), (pTerm - pWC->a) + )); + if( pOrTerm->eOperator==WO_AND ){ + WhereClause *pAndWC = &pOrTerm->u.pAndInfo->wc; + bestIndex(pParse, pAndWC, pSrc, notReady, 0, &sTermCost); + }else if( pOrTerm->leftCursor==iCur ){ + WhereClause tempWC; + tempWC.pParse = pWC->pParse; + tempWC.pMaskSet = pWC->pMaskSet; + tempWC.op = TK_AND; + tempWC.a = pOrTerm; + tempWC.nTerm = 1; + bestIndex(pParse, &tempWC, pSrc, notReady, 0, &sTermCost); + }else{ + continue; + } + rTotal += sTermCost.rCost; + nRow += sTermCost.nRow; + if( rTotal>=pCost->rCost ) break; + } + + /* If there is an ORDER BY clause, increase the scan cost to account + ** for the cost of the sort. */ + if( pOrderBy!=0 ){ + rTotal += nRow*estLog(nRow); + WHERETRACE(("... sorting increases OR cost to %.9g\n", rTotal)); + } + + /* If the cost of scanning using this OR term for optimization is + ** less than the current cost stored in pCost, replace the contents + ** of pCost. */ + WHERETRACE(("... multi-index OR cost=%.9g nrow=%.9g\n", rTotal, nRow)); + if( rTotalrCost ){ + pCost->rCost = rTotal; + pCost->nRow = nRow; + pCost->plan.wsFlags = flags; + pCost->plan.u.pTerm = pTerm; + } + } + } +#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ +} + #ifndef SQLITE_OMIT_VIRTUALTABLE /* +** Allocate and populate an sqlite3_index_info structure. It is the +** responsibility of the caller to eventually release the structure +** by passing the pointer returned by this function to sqlite3_free(). +*/ +static sqlite3_index_info *allocateIndexInfo( + Parse *pParse, + WhereClause *pWC, + struct SrcList_item *pSrc, + ExprList *pOrderBy +){ + int i, j; + int nTerm; + struct sqlite3_index_constraint *pIdxCons; + struct sqlite3_index_orderby *pIdxOrderBy; + struct sqlite3_index_constraint_usage *pUsage; + WhereTerm *pTerm; + int nOrderBy; + sqlite3_index_info *pIdxInfo; + + WHERETRACE(("Recomputing index info for %s...\n", pSrc->pTab->zName)); + + /* Count the number of possible WHERE clause constraints referring + ** to this virtual table */ + for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + if( pTerm->leftCursor != pSrc->iCursor ) continue; + assert( (pTerm->eOperator&(pTerm->eOperator-1))==0 ); + testcase( pTerm->eOperator==WO_IN ); + testcase( pTerm->eOperator==WO_ISNULL ); + if( pTerm->eOperator & (WO_IN|WO_ISNULL) ) continue; + nTerm++; + } + + /* If the ORDER BY clause contains only columns in the current + ** virtual table then allocate space for the aOrderBy part of + ** the sqlite3_index_info structure. + */ + nOrderBy = 0; + if( pOrderBy ){ + for(i=0; inExpr; i++){ + Expr *pExpr = pOrderBy->a[i].pExpr; + if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; + } + if( i==pOrderBy->nExpr ){ + nOrderBy = pOrderBy->nExpr; + } + } + + /* Allocate the sqlite3_index_info structure + */ + pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm + + sizeof(*pIdxOrderBy)*nOrderBy ); + if( pIdxInfo==0 ){ + sqlite3ErrorMsg(pParse, "out of memory"); + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + return 0; + } + + /* Initialize the structure. The sqlite3_index_info structure contains + ** many fields that are declared "const" to prevent xBestIndex from + ** changing them. We have to do some funky casting in order to + ** initialize those fields. + */ + pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; + pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; + pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; + *(int*)&pIdxInfo->nConstraint = nTerm; + *(int*)&pIdxInfo->nOrderBy = nOrderBy; + *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; + *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; + *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = + pUsage; + + for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + if( pTerm->leftCursor != pSrc->iCursor ) continue; + assert( (pTerm->eOperator&(pTerm->eOperator-1))==0 ); + testcase( pTerm->eOperator==WO_IN ); + testcase( pTerm->eOperator==WO_ISNULL ); + if( pTerm->eOperator & (WO_IN|WO_ISNULL) ) continue; + pIdxCons[j].iColumn = pTerm->u.leftColumn; + pIdxCons[j].iTermOffset = i; + pIdxCons[j].op = (u8)pTerm->eOperator; + /* The direct assignment in the previous line is possible only because + ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The + ** following asserts verify this fact. */ + assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); + assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); + assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); + assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); + assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); + assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); + assert( pTerm->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); + j++; + } + for(i=0; ia[i].pExpr; + pIdxOrderBy[i].iColumn = pExpr->iColumn; + pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; + } + + return pIdxInfo; +} + +/* +** The table object reference passed as the second argument to this function +** must represent a virtual table. This function invokes the xBestIndex() +** method of the virtual table with the sqlite3_index_info pointer passed +** as the argument. +** +** If an error occurs, pParse is populated with an error message and a +** non-zero value is returned. Otherwise, 0 is returned and the output +** part of the sqlite3_index_info structure is left populated. +** +** Whether or not an error is returned, it is the responsibility of the +** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates +** that this is required. +*/ +static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ + sqlite3_vtab *pVtab = pTab->pVtab; + int i; + int rc; + + (void)sqlite3SafetyOff(pParse->db); + WHERETRACE(("xBestIndex for %s\n", pTab->zName)); + TRACE_IDX_INPUTS(p); + rc = pVtab->pModule->xBestIndex(pVtab, p); + TRACE_IDX_OUTPUTS(p); + (void)sqlite3SafetyOn(pParse->db); + + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ){ + pParse->db->mallocFailed = 1; + }else if( !pVtab->zErrMsg ){ + sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); + }else{ + sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); + } + } + sqlite3DbFree(pParse->db, pVtab->zErrMsg); + pVtab->zErrMsg = 0; + + for(i=0; inConstraint; i++){ + if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){ + sqlite3ErrorMsg(pParse, + "table %s: xBestIndex returned an invalid plan", pTab->zName); + } + } + + return pParse->nErr; +} + + +/* ** Compute the best index for a virtual table. ** ** The best index is computed by the xBestIndex method of the virtual @@ -1203,107 +1776,39 @@ ** routine takes care of freeing the sqlite3_index_info structure after ** everybody has finished with it. */ -static double bestVirtualIndex( - Parse *pParse, /* The parsing context */ - WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to search */ - Bitmask notReady, /* Mask of cursors that are not available */ - ExprList *pOrderBy, /* The order by clause */ - int orderByUsable, /* True if we can potential sort */ - sqlite3_index_info **ppIdxInfo /* Index information passed to xBestIndex */ +static void bestVirtualIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to search */ + Bitmask notReady, /* Mask of cursors that are not available */ + ExprList *pOrderBy, /* The order by clause */ + WhereCost *pCost, /* Lowest cost query plan */ + sqlite3_index_info **ppIdxInfo /* Index information passed to xBestIndex */ ){ Table *pTab = pSrc->pTab; sqlite3_index_info *pIdxInfo; struct sqlite3_index_constraint *pIdxCons; - struct sqlite3_index_orderby *pIdxOrderBy; struct sqlite3_index_constraint_usage *pUsage; WhereTerm *pTerm; int i, j; int nOrderBy; - int rc; + + /* Make sure wsFlags is initialized to some sane value. Otherwise, if the + ** malloc in allocateIndexInfo() fails and this function returns leaving + ** wsFlags in an uninitialized state, the caller may behave unpredictably. + */ + memset(pCost, 0, sizeof(*pCost)); + pCost->plan.wsFlags = WHERE_VIRTUALTABLE; /* If the sqlite3_index_info structure has not been previously - ** allocated and initialized for this virtual table, then allocate - ** and initialize it now + ** allocated and initialized, then allocate and initialize it now. */ pIdxInfo = *ppIdxInfo; if( pIdxInfo==0 ){ - WhereTerm *pTerm; - int nTerm; - WHERETRACE(("Recomputing index info for %s...\n", pTab->zName)); - - /* Count the number of possible WHERE clause constraints referring - ** to this virtual table */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->eOperator==WO_IN ) continue; - nTerm++; - } - - /* If the ORDER BY clause contains only columns in the current - ** virtual table then allocate space for the aOrderBy part of - ** the sqlite3_index_info structure. - */ - nOrderBy = 0; - if( pOrderBy ){ - for(i=0; inExpr; i++){ - Expr *pExpr = pOrderBy->a[i].pExpr; - if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; - } - if( i==pOrderBy->nExpr ){ - nOrderBy = pOrderBy->nExpr; - } - } - - /* Allocate the sqlite3_index_info structure - */ - pIdxInfo = sqliteMalloc( sizeof(*pIdxInfo) - + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy ); - if( pIdxInfo==0 ){ - sqlite3ErrorMsg(pParse, "out of memory"); - return 0.0; - } - *ppIdxInfo = pIdxInfo; - - /* Initialize the structure. The sqlite3_index_info structure contains - ** many fields that are declared "const" to prevent xBestIndex from - ** changing them. We have to do some funky casting in order to - ** initialize those fields. - */ - pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; - pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; - pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; - *(int*)&pIdxInfo->nConstraint = nTerm; - *(int*)&pIdxInfo->nOrderBy = nOrderBy; - *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; - *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; - *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = - pUsage; - - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->eOperator==WO_IN ) continue; - pIdxCons[j].iColumn = pTerm->leftColumn; - pIdxCons[j].iTermOffset = i; - pIdxCons[j].op = pTerm->eOperator; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); - assert( pTerm->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); - j++; - } - for(i=0; ia[i].pExpr; - pIdxOrderBy[i].iColumn = pExpr->iColumn; - pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; - } + *ppIdxInfo = pIdxInfo = allocateIndexInfo(pParse, pWC, pSrc, pOrderBy); + } + if( pIdxInfo==0 ){ + return; } /* At this point, the sqlite3_index_info structure that pIdxInfo points @@ -1319,13 +1824,6 @@ */ assert( pTab->azModuleArg && pTab->azModuleArg[0] ); assert( pTab->pVtab ); -#if 0 - if( pTab->pVtab==0 ){ - sqlite3ErrorMsg(pParse, "undefined module %s for table %s", - pTab->azModuleArg[0], pTab->zName); - return 0.0; - } -#endif /* Set the aConstraint[].usable fields and initialize all ** output variables to zero. @@ -1352,7 +1850,7 @@ for(i=0; inConstraint; i++, pIdxCons++){ j = pIdxCons->iTermOffset; pTerm = &pWC->a[j]; - pIdxCons->usable = (pTerm->prereqRight & notReady)==0; + pIdxCons->usable = (pTerm->prereqRight & notReady)==0 ?1:0; } memset(pUsage, 0, sizeof(pUsage[0])*pIdxInfo->nConstraint); if( pIdxInfo->needToFreeIdxStr ){ @@ -1362,40 +1860,50 @@ pIdxInfo->idxNum = 0; pIdxInfo->needToFreeIdxStr = 0; pIdxInfo->orderByConsumed = 0; - pIdxInfo->estimatedCost = SQLITE_BIG_DBL / 2.0; + /* ((double)2) In case of SQLITE_OMIT_FLOATING_POINT... */ + pIdxInfo->estimatedCost = SQLITE_BIG_DBL / ((double)2); nOrderBy = pIdxInfo->nOrderBy; - if( pIdxInfo->nOrderBy && !orderByUsable ){ - *(int*)&pIdxInfo->nOrderBy = 0; + if( !pOrderBy ){ + pIdxInfo->nOrderBy = 0; } - sqlite3SafetyOff(pParse->db); - WHERETRACE(("xBestIndex for %s\n", pTab->zName)); - TRACE_IDX_INPUTS(pIdxInfo); - rc = pTab->pVtab->pModule->xBestIndex(pTab->pVtab, pIdxInfo); - TRACE_IDX_OUTPUTS(pIdxInfo); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ){ - sqlite3FailedMalloc(); - }else { - sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); - } - sqlite3SafetyOn(pParse->db); - }else{ - rc = sqlite3SafetyOn(pParse->db); + if( vtabBestIndex(pParse, pTab, pIdxInfo) ){ + return; } - *(int*)&pIdxInfo->nOrderBy = nOrderBy; - return pIdxInfo->estimatedCost; + /* The cost is not allowed to be larger than SQLITE_BIG_DBL (the + ** inital value of lowestCost in this loop. If it is, then the + ** (costestimatedCost ){ + pCost->rCost = (SQLITE_BIG_DBL/((double)2)); + }else{ + pCost->rCost = pIdxInfo->estimatedCost; + } + pCost->plan.u.pVtabIdx = pIdxInfo; + if( pIdxInfo->orderByConsumed ){ + pCost->plan.wsFlags |= WHERE_ORDERBY; + } + pCost->plan.nEq = 0; + pIdxInfo->nOrderBy = nOrderBy; + + /* Try to find a more efficient access pattern by using multiple indexes + ** to optimize an OR expression within the WHERE clause. + */ + bestOrClauseIndex(pParse, pWC, pSrc, notReady, pOrderBy, pCost); } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* -** Find the best index for accessing a particular table. Return a pointer -** to the index, flags that describe how the index should be used, the -** number of equality constraints, and the "cost" for this index. +** Find the query plan for accessing a particular table. Write the +** best query plan and its cost into the WhereCost object supplied as the +** last parameter. ** -** The lowest cost index wins. The cost is an estimate of the amount of -** CPU and disk I/O need to process the request using the selected index. +** The lowest cost plan wins. The cost is an estimate of the amount of +** CPU and disk I/O need to process the request using the selected plan. ** Factors that influence cost include: ** ** * The estimated number of rows that will be retrieved. (The @@ -1406,33 +1914,41 @@ ** * Whether or not there must be separate lookups in the ** index and in the main table. ** +** If there was an INDEXED BY clause (pSrc->pIndex) attached to the table in +** the SQL statement, then this function only considers plans using the +** named index. If no such plan is found, then the returned cost is +** SQLITE_BIG_DBL. If a plan is found that uses the named index, +** then the cost is calculated in the usual way. +** +** If a NOT INDEXED clause (pSrc->notIndexed!=0) was attached to the table +** in the SELECT statement, then no indexes are considered. However, the +** selected plan may still take advantage of the tables built-in rowid +** index. */ -static double bestIndex( +static void bestBtreeIndex( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause */ struct SrcList_item *pSrc, /* The FROM clause term to search */ Bitmask notReady, /* Mask of cursors that are not available */ - ExprList *pOrderBy, /* The order by clause */ - Index **ppIndex, /* Make *ppIndex point to the best index */ - int *pFlags, /* Put flags describing this choice in *pFlags */ - int *pnEq /* Put the number of == or IN constraints here */ + ExprList *pOrderBy, /* The ORDER BY clause */ + WhereCost *pCost /* Lowest cost query plan */ ){ - WhereTerm *pTerm; - Index *bestIdx = 0; /* Index that gives the lowest cost */ - double lowestCost; /* The cost of using bestIdx */ - int bestFlags = 0; /* Flags associated with bestIdx */ - int bestNEq = 0; /* Best value for nEq */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ int iCur = pSrc->iCursor; /* The cursor of the table to be accessed */ Index *pProbe; /* An index we are evaluating */ int rev; /* True to scan in reverse order */ - int flags; /* Flags associated with pProbe */ + int wsFlags; /* Flags associated with pProbe */ int nEq; /* Number of == or IN constraints */ int eqTermMask; /* Mask of valid equality operators */ double cost; /* Cost of using pProbe */ + double nRow; /* Estimated number of rows in result set */ + int i; /* Loop counter */ - WHERETRACE(("bestIndex: tbl=%s notReady=%x\n", pSrc->pTab->zName, notReady)); - lowestCost = SQLITE_BIG_DBL; + WHERETRACE(("bestIndex: tbl=%s notReady=%llx\n", pSrc->pTab->zName,notReady)); pProbe = pSrc->pTab->pIndex; + if( pSrc->notIndexed ){ + pProbe = 0; + } /* If the table has no indices and there are no terms in the where ** clause that refer to the ROWID, then we will never be able to do @@ -1440,84 +1956,109 @@ ** well put it first in the join order. That way, perhaps it can be ** referenced by other tables in the join. */ + memset(pCost, 0, sizeof(*pCost)); if( pProbe==0 && findTerm(pWC, iCur, -1, 0, WO_EQ|WO_IN|WO_LT|WO_LE|WO_GT|WO_GE,0)==0 && (pOrderBy==0 || !sortableByRowid(iCur, pOrderBy, pWC->pMaskSet, &rev)) ){ - *pFlags = 0; - *ppIndex = 0; - *pnEq = 0; - return 0.0; - } - - /* Check for a rowid=EXPR or rowid IN (...) constraints - */ - pTerm = findTerm(pWC, iCur, -1, notReady, WO_EQ|WO_IN, 0); - if( pTerm ){ - Expr *pExpr; - *ppIndex = 0; - bestFlags = WHERE_ROWID_EQ; - if( pTerm->eOperator & WO_EQ ){ - /* Rowid== is always the best pick. Look no further. Because only - ** a single row is generated, output is always in sorted order */ - *pFlags = WHERE_ROWID_EQ | WHERE_UNIQUE; - *pnEq = 1; - WHERETRACE(("... best is rowid\n")); - return 0.0; - }else if( (pExpr = pTerm->pExpr)->pList!=0 ){ - /* Rowid IN (LIST): cost is NlogN where N is the number of list - ** elements. */ - lowestCost = pExpr->pList->nExpr; - lowestCost *= estLog(lowestCost); - }else{ - /* Rowid IN (SELECT): cost is NlogN where N is the number of rows - ** in the result of the inner select. We have no way to estimate - ** that value so make a wild guess. */ - lowestCost = 200; + if( pParse->db->flags & SQLITE_ReverseOrder ){ + /* For application testing, randomly reverse the output order for + ** SELECT statements that omit the ORDER BY clause. This will help + ** to find cases where + */ + pCost->plan.wsFlags |= WHERE_REVERSE; } - WHERETRACE(("... rowid IN cost: %.9g\n", lowestCost)); + return; } + pCost->rCost = SQLITE_BIG_DBL; - /* Estimate the cost of a table scan. If we do not know how many - ** entries are in the table, use 1 million as a guess. - */ - cost = pProbe ? pProbe->aiRowEst[0] : 1000000; - WHERETRACE(("... table scan base cost: %.9g\n", cost)); - flags = WHERE_ROWID_RANGE; - - /* Check for constraints on a range of rowids in a table scan. + /* Check for a rowid=EXPR or rowid IN (...) constraints. If there was + ** an INDEXED BY clause attached to this table, skip this step. */ - pTerm = findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE|WO_GT|WO_GE, 0); - if( pTerm ){ - if( findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE, 0) ){ - flags |= WHERE_TOP_LIMIT; - cost /= 3; /* Guess that rowidpIndex ){ + pTerm = findTerm(pWC, iCur, -1, notReady, WO_EQ|WO_IN, 0); + if( pTerm ){ + Expr *pExpr; + pCost->plan.wsFlags = WHERE_ROWID_EQ; + if( pTerm->eOperator & WO_EQ ){ + /* Rowid== is always the best pick. Look no further. Because only + ** a single row is generated, output is always in sorted order */ + pCost->plan.wsFlags = WHERE_ROWID_EQ | WHERE_UNIQUE; + pCost->plan.nEq = 1; + WHERETRACE(("... best is rowid\n")); + pCost->rCost = 0; + pCost->nRow = 1; + return; + }else if( !ExprHasProperty((pExpr = pTerm->pExpr), EP_xIsSelect) + && pExpr->x.pList + ){ + /* Rowid IN (LIST): cost is NlogN where N is the number of list + ** elements. */ + pCost->rCost = pCost->nRow = pExpr->x.pList->nExpr; + pCost->rCost *= estLog(pCost->rCost); + }else{ + /* Rowid IN (SELECT): cost is NlogN where N is the number of rows + ** in the result of the inner select. We have no way to estimate + ** that value so make a wild guess. */ + pCost->nRow = 100; + pCost->rCost = 200; + } + WHERETRACE(("... rowid IN cost: %.9g\n", pCost->rCost)); + } + + /* Estimate the cost of a table scan. If we do not know how many + ** entries are in the table, use 1 million as a guess. + */ + cost = pProbe ? pProbe->aiRowEst[0] : 1000000; + WHERETRACE(("... table scan base cost: %.9g\n", cost)); + wsFlags = WHERE_ROWID_RANGE; + + /* Check for constraints on a range of rowids in a table scan. + */ + pTerm = findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE|WO_GT|WO_GE, 0); + if( pTerm ){ + if( findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE, 0) ){ + wsFlags |= WHERE_TOP_LIMIT; + cost /= 3; /* Guess that rowidEXPR eliminates two-thirds of rows */ + } + WHERETRACE(("... rowid range reduces cost to %.9g\n", cost)); + }else{ + wsFlags = 0; } - if( findTerm(pWC, iCur, -1, notReady, WO_GT|WO_GE, 0) ){ - flags |= WHERE_BTM_LIMIT; - cost /= 3; /* Guess that rowid>EXPR eliminates two-thirds of rows */ + nRow = cost; + + /* If the table scan does not satisfy the ORDER BY clause, increase + ** the cost by NlogN to cover the expense of sorting. */ + if( pOrderBy ){ + if( sortableByRowid(iCur, pOrderBy, pWC->pMaskSet, &rev) ){ + wsFlags |= WHERE_ORDERBY|WHERE_ROWID_RANGE; + if( rev ){ + wsFlags |= WHERE_REVERSE; + } + }else{ + cost += cost*estLog(cost); + WHERETRACE(("... sorting increases cost to %.9g\n", cost)); + } + }else if( pParse->db->flags & SQLITE_ReverseOrder ){ + /* For application testing, randomly reverse the output order for + ** SELECT statements that omit the ORDER BY clause. This will help + ** to find cases where + */ + wsFlags |= WHERE_REVERSE; } - WHERETRACE(("... rowid range reduces cost to %.9g\n", cost)); - }else{ - flags = 0; - } - /* If the table scan does not satisfy the ORDER BY clause, increase - ** the cost by NlogN to cover the expense of sorting. */ - if( pOrderBy ){ - if( sortableByRowid(iCur, pOrderBy, pWC->pMaskSet, &rev) ){ - flags |= WHERE_ORDERBY|WHERE_ROWID_RANGE; - if( rev ){ - flags |= WHERE_REVERSE; - } - }else{ - cost += cost*estLog(cost); - WHERETRACE(("... sorting increases cost to %.9g\n", cost)); + /* Remember this case if it is the best so far */ + if( costrCost ){ + pCost->rCost = cost; + pCost->nRow = nRow; + pCost->plan.wsFlags = wsFlags; } } - if( costpNext){ - int i; /* Loop counter */ - double inMultiplier = 1; + if( pSrc->pIndex ){ + pProbe = pSrc->pIndex; + } + for(; pProbe; pProbe=(pSrc->pIndex ? 0 : pProbe->pNext)){ + double inMultiplier = 1; /* Number of equality look-ups needed */ + int inMultIsEst = 0; /* True if inMultiplier is an estimate */ WHERETRACE(("... index %s:\n", pProbe->zName)); /* Count the number of columns in the index that are satisfied - ** by x=EXPR constraints or x IN (...) constraints. + ** by x=EXPR or x IS NULL constraints or x IN (...) constraints. + ** For a term of the form x=EXPR or x IS NULL we only have to do + ** a single binary search. But for x IN (...) we have to do a + ** number of binary searched + ** equal to the number of entries on the RHS of the IN operator. + ** The inMultipler variable with try to estimate the number of + ** binary searches needed. */ - flags = 0; + wsFlags = 0; for(i=0; inColumn; i++){ int j = pProbe->aiColumn[i]; pTerm = findTerm(pWC, iCur, j, notReady, eqTermMask, pProbe); if( pTerm==0 ) break; - flags |= WHERE_COLUMN_EQ; + wsFlags |= WHERE_COLUMN_EQ; if( pTerm->eOperator & WO_IN ){ Expr *pExpr = pTerm->pExpr; - flags |= WHERE_COLUMN_IN; - if( pExpr->pSelect!=0 ){ + wsFlags |= WHERE_COLUMN_IN; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ inMultiplier *= 25; - }else if( pExpr->pList!=0 ){ - inMultiplier *= pExpr->pList->nExpr + 1; + inMultIsEst = 1; + }else if( pExpr->x.pList ){ + inMultiplier *= pExpr->x.pList->nExpr + 1; } + }else if( pTerm->eOperator & WO_ISNULL ){ + wsFlags |= WHERE_COLUMN_NULL; } } - cost = pProbe->aiRowEst[i] * inMultiplier * estLog(inMultiplier); + nRow = pProbe->aiRowEst[i] * inMultiplier; + /* If inMultiplier is an estimate and that estimate results in an + ** nRow it that is more than half number of rows in the table, + ** then reduce inMultipler */ + if( inMultIsEst && nRow*2 > pProbe->aiRowEst[0] ){ + nRow = pProbe->aiRowEst[0]/2; + inMultiplier = nRow/pProbe->aiRowEst[i]; + } + cost = nRow + inMultiplier*estLog(pProbe->aiRowEst[0]); nEq = i; - if( pProbe->onError!=OE_None && (flags & WHERE_COLUMN_IN)==0 - && nEq==pProbe->nColumn ){ - flags |= WHERE_UNIQUE; + if( pProbe->onError!=OE_None && nEq==pProbe->nColumn ){ + testcase( wsFlags & WHERE_COLUMN_IN ); + testcase( wsFlags & WHERE_COLUMN_NULL ); + if( (wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_NULL))==0 ){ + wsFlags |= WHERE_UNIQUE; + } } - WHERETRACE(("...... nEq=%d inMult=%.9g cost=%.9g\n", nEq, inMultiplier, cost)); + WHERETRACE(("...... nEq=%d inMult=%.9g nRow=%.9g cost=%.9g\n", + nEq, inMultiplier, nRow, cost)); - /* Look for range constraints + /* Look for range constraints. Assume that each range constraint + ** makes the search space 1/3rd smaller. */ if( nEqnColumn ){ int j = pProbe->aiColumn[nEq]; pTerm = findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE|WO_GT|WO_GE, pProbe); if( pTerm ){ - flags |= WHERE_COLUMN_RANGE; + wsFlags |= WHERE_COLUMN_RANGE; if( findTerm(pWC, iCur, j, notReady, WO_LT|WO_LE, pProbe) ){ - flags |= WHERE_TOP_LIMIT; + wsFlags |= WHERE_TOP_LIMIT; cost /= 3; + nRow /= 3; } if( findTerm(pWC, iCur, j, notReady, WO_GT|WO_GE, pProbe) ){ - flags |= WHERE_BTM_LIMIT; + wsFlags |= WHERE_BTM_LIMIT; cost /= 3; + nRow /= 3; } - WHERETRACE(("...... range reduces cost to %.9g\n", cost)); + WHERETRACE(("...... range reduces nRow to %.9g and cost to %.9g\n", + nRow, cost)); } } /* Add the additional cost of sorting if that is a factor. */ if( pOrderBy ){ - if( (flags & WHERE_COLUMN_IN)==0 && - isSortingIndex(pParse,pWC->pMaskSet,pProbe,iCur,pOrderBy,nEq,&rev) ){ - if( flags==0 ){ - flags = WHERE_COLUMN_RANGE; + if( (wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_NULL))==0 + && isSortingIndex(pParse,pWC->pMaskSet,pProbe,iCur,pOrderBy,nEq,&rev) + ){ + if( wsFlags==0 ){ + wsFlags = WHERE_COLUMN_RANGE; } - flags |= WHERE_ORDERBY; + wsFlags |= WHERE_ORDERBY; if( rev ){ - flags |= WHERE_REVERSE; + wsFlags |= WHERE_REVERSE; } }else{ cost += cost*estLog(cost); WHERETRACE(("...... orderby increases cost to %.9g\n", cost)); } + }else if( wsFlags!=0 && (pParse->db->flags & SQLITE_ReverseOrder)!=0 ){ + /* For application testing, randomly reverse the output order for + ** SELECT statements that omit the ORDER BY clause. This will help + ** to find cases where + */ + wsFlags |= WHERE_REVERSE; } /* Check to see if we can get away with using just the index without ** ever reading the table. If that is the case, then halve the ** cost of this index. */ - if( flags && pSrc->colUsed < (((Bitmask)1)<<(BMS-1)) ){ + if( wsFlags && pSrc->colUsed < (((Bitmask)1)<<(BMS-1)) ){ Bitmask m = pSrc->colUsed; int j; for(j=0; jnColumn; j++){ @@ -1616,7 +2192,7 @@ } } if( m==0 ){ - flags |= WHERE_IDX_ONLY; + wsFlags |= WHERE_IDX_ONLY; cost /= 2; WHERETRACE(("...... idx-only reduces cost to %.9g\n", cost)); } @@ -1624,25 +2200,53 @@ /* If this index has achieved the lowest cost so far, then use it. */ - if( cost < lowestCost ){ - bestIdx = pProbe; - lowestCost = cost; - assert( flags!=0 ); - bestFlags = flags; - bestNEq = nEq; + if( wsFlags!=0 && cost < pCost->rCost ){ + pCost->rCost = cost; + pCost->nRow = nRow; + pCost->plan.wsFlags = wsFlags; + pCost->plan.nEq = nEq; + assert( pCost->plan.wsFlags & WHERE_INDEXED ); + pCost->plan.u.pIdx = pProbe; } } /* Report the best result */ - *ppIndex = bestIdx; - WHERETRACE(("best index is %s, cost=%.9g, flags=%x, nEq=%d\n", - bestIdx ? bestIdx->zName : "(none)", lowestCost, bestFlags, bestNEq)); - *pFlags = bestFlags | eqTermMask; - *pnEq = bestNEq; - return lowestCost; + pCost->plan.wsFlags |= eqTermMask; + WHERETRACE(("best index is %s, cost=%.9g, nrow=%.9g, wsFlags=%x, nEq=%d\n", + (pCost->plan.wsFlags & WHERE_INDEXED)!=0 ? + pCost->plan.u.pIdx->zName : "(none)", pCost->nRow, + pCost->rCost, pCost->plan.wsFlags, pCost->plan.nEq)); } +/* +** Find the query plan for accessing table pSrc->pTab. Write the +** best query plan and its cost into the WhereCost object supplied +** as the last parameter. This function may calculate the cost of +** both real and virtual table scans. +*/ +static void bestIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to search */ + Bitmask notReady, /* Mask of cursors that are not available */ + ExprList *pOrderBy, /* The ORDER BY clause */ + WhereCost *pCost /* Lowest cost query plan */ +){ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pSrc->pTab) ){ + sqlite3_index_info *p = 0; + bestVirtualIndex(pParse, pWC, pSrc, notReady, pOrderBy, pCost, &p); + if( p->needToFreeIdxStr ){ + sqlite3_free(p->idxStr); + } + sqlite3DbFree(pParse->db, p); + }else +#endif + { + bestBtreeIndex(pParse, pWC, pSrc, notReady, pOrderBy, pCost); + } +} /* ** Disable a term in the WHERE clause. Except, do not disable the term @@ -1669,10 +2273,10 @@ */ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ if( pTerm - && (pTerm->flags & TERM_CODED)==0 + && ALWAYS((pTerm->wtFlags & TERM_CODED)==0) && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) ){ - pTerm->flags |= TERM_CODED; + pTerm->wtFlags |= TERM_CODED; if( pTerm->iParent>=0 ){ WhereTerm *pOther = &pTerm->pWC->a[pTerm->iParent]; if( (--pOther->nChild)==0 ){ @@ -1683,20 +2287,17 @@ } /* -** Generate code that builds a probe for an index. -** -** There should be nColumn values on the stack. The index -** to be probed is pIdx. Pop the values from the stack and -** replace them all with a single record that is the index -** problem. -*/ -static void buildIndexProbe( - Vdbe *v, /* Generate code into this VM */ - int nColumn, /* The number of columns to check for NULL */ - Index *pIdx /* Index that we will be searching */ -){ - sqlite3VdbeAddOp(v, OP_MakeRecord, nColumn, 0); - sqlite3IndexAffinityStr(v, pIdx); +** Apply the affinities associated with the first n columns of index +** pIdx to the values in the n registers starting at base. +*/ +static void codeApplyAffinity(Parse *pParse, int base, int n, Index *pIdx){ + if( n>0 ){ + Vdbe *v = pParse->pVdbe; + assert( v!=0 ); + sqlite3VdbeAddOp2(v, OP_Affinity, base, n); + sqlite3IndexAffinityStr(v, pIdx); + sqlite3ExprCacheAffinityChange(pParse, base, n); + } } @@ -1705,51 +2306,64 @@ ** term can be either X=expr or X IN (...). pTerm is the term to be ** coded. ** -** The current value for the constraint is left on the top of the stack. +** The current value for the constraint is left in register iReg. ** ** For a constraint of the form X=expr, the expression is evaluated and its ** result is left on the stack. For constraints of the form X IN (...) ** this routine sets up a loop that will iterate over all values of X. */ -static void codeEqualityTerm( +static int codeEqualityTerm( Parse *pParse, /* The parsing context */ WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ - WhereLevel *pLevel /* When level of the FROM clause we are working on */ + WhereLevel *pLevel, /* When level of the FROM clause we are working on */ + int iTarget /* Attempt to leave results in this register */ ){ Expr *pX = pTerm->pExpr; Vdbe *v = pParse->pVdbe; + int iReg; /* Register holding results */ + + assert( iTarget>0 ); if( pX->op==TK_EQ ){ - sqlite3ExprCode(pParse, pX->pRight); + iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); }else if( pX->op==TK_ISNULL ){ - sqlite3VdbeAddOp(v, OP_Null, 0, 0); + iReg = iTarget; + sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); #ifndef SQLITE_OMIT_SUBQUERY }else{ + int eType; int iTab; struct InLoop *pIn; assert( pX->op==TK_IN ); - sqlite3CodeSubselect(pParse, pX); + iReg = iTarget; + eType = sqlite3FindInIndex(pParse, pX, 0); iTab = pX->iTable; - sqlite3VdbeAddOp(v, OP_Rewind, iTab, 0); - VdbeComment((v, "# %.*s", pX->span.n, pX->span.z)); - if( pLevel->nIn==0 ){ - pLevel->nxt = sqlite3VdbeMakeLabel(v); - } - pLevel->nIn++; - pLevel->aInLoop = sqliteReallocOrFree(pLevel->aInLoop, - sizeof(pLevel->aInLoop[0])*pLevel->nIn); - pIn = pLevel->aInLoop; + sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); + assert( pLevel->plan.wsFlags & WHERE_IN_ABLE ); + if( pLevel->u.in.nIn==0 ){ + pLevel->addrNxt = sqlite3VdbeMakeLabel(v); + } + pLevel->u.in.nIn++; + pLevel->u.in.aInLoop = + sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop, + sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); + pIn = pLevel->u.in.aInLoop; if( pIn ){ - pIn += pLevel->nIn - 1; + pIn += pLevel->u.in.nIn - 1; pIn->iCur = iTab; - pIn->topAddr = sqlite3VdbeAddOp(v, OP_Column, iTab, 0); - sqlite3VdbeAddOp(v, OP_IsNull, -1, 0); + if( eType==IN_INDEX_ROWID ){ + pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg); + }else{ + pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg); + } + sqlite3VdbeAddOp1(v, OP_IsNull, iReg); }else{ - pLevel->nIn = 0; + pLevel->u.in.nIn = 0; } #endif } disableTerm(pLevel, pTerm); + return iReg; } /* @@ -1761,69 +2375,637 @@ ** The index has as many as three equality constraints, but in this ** example, the third "c" value is an inequality. So only two ** constraints are coded. This routine will generate code to evaluate -** a==5 and b IN (1,2,3). The current values for a and b will be left -** on the stack - a is the deepest and b the shallowest. +** a==5 and b IN (1,2,3). The current values for a and b will be stored +** in consecutive registers and the index of the first register is returned. ** ** In the example above nEq==2. But this subroutine works for any value ** of nEq including 0. If nEq==0, this routine is nearly a no-op. ** The only thing it does is allocate the pLevel->iMem memory cell. ** -** This routine always allocates at least one memory cell and puts -** the address of that memory cell in pLevel->iMem. The code that -** calls this routine will use pLevel->iMem to store the termination +** This routine always allocates at least one memory cell and returns +** the index of that memory cell. The code that +** calls this routine will use that memory cell to store the termination ** key value of the loop. If one or more IN operators appear, then ** this routine allocates an additional nEq memory cells for internal ** use. */ -static void codeAllEqualityTerms( +static int codeAllEqualityTerms( Parse *pParse, /* Parsing context */ WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */ WhereClause *pWC, /* The WHERE clause */ - Bitmask notReady /* Which parts of FROM have not yet been coded */ + Bitmask notReady, /* Which parts of FROM have not yet been coded */ + int nExtraReg /* Number of extra registers to allocate */ ){ - int nEq = pLevel->nEq; /* The number of == or IN constraints to code */ - int termsInMem = 0; /* If true, store value in mem[] cells */ - Vdbe *v = pParse->pVdbe; /* The virtual machine under construction */ - Index *pIdx = pLevel->pIdx; /* The index being used for this loop */ + int nEq = pLevel->plan.nEq; /* The number of == or IN constraints to code */ + Vdbe *v = pParse->pVdbe; /* The vm under construction */ + Index *pIdx; /* The index being used for this loop */ int iCur = pLevel->iTabCur; /* The cursor of the table */ WhereTerm *pTerm; /* A single constraint term */ int j; /* Loop counter */ + int regBase; /* Base register */ + int nReg; /* Number of registers to allocate */ + + /* This module is only called on query plans that use an index. */ + assert( pLevel->plan.wsFlags & WHERE_INDEXED ); + pIdx = pLevel->plan.u.pIdx; /* Figure out how many memory cells we will need then allocate them. - ** We always need at least one used to store the loop terminator - ** value. If there are IN operators we'll need one for each == or - ** IN constraint. - */ - pLevel->iMem = pParse->nMem++; - if( pLevel->flags & WHERE_COLUMN_IN ){ - pParse->nMem += pLevel->nEq; - termsInMem = 1; - } + */ + regBase = pParse->nMem + 1; + nReg = pLevel->plan.nEq + nExtraReg; + pParse->nMem += nReg; /* Evaluate the equality constraints */ assert( pIdx->nColumn>=nEq ); for(j=0; jaiColumn[j]; - pTerm = findTerm(pWC, iCur, k, notReady, pLevel->flags, pIdx); - if( pTerm==0 ) break; - assert( (pTerm->flags & TERM_CODED)==0 ); - codeEqualityTerm(pParse, pTerm, pLevel); - if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){ - sqlite3VdbeAddOp(v, OP_IsNull, termsInMem ? -1 : -(j+1), pLevel->brk); + pTerm = findTerm(pWC, iCur, k, notReady, pLevel->plan.wsFlags, pIdx); + if( NEVER(pTerm==0) ) break; + assert( (pTerm->wtFlags & TERM_CODED)==0 ); + r1 = codeEqualityTerm(pParse, pTerm, pLevel, regBase+j); + if( r1!=regBase+j ){ + if( nReg==1 ){ + sqlite3ReleaseTempReg(pParse, regBase); + regBase = r1; + }else{ + sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j); + } } - if( termsInMem ){ - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem+j+1, 1); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_IN ); + if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){ + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); } } + return regBase; +} - /* Make sure all the constraint values are on the top of the stack - */ - if( termsInMem ){ - for(j=0; jiMem+j+1, 0); +/* +** Generate code for the start of the iLevel-th loop in the WHERE clause +** implementation described by pWInfo. +*/ +static Bitmask codeOneLoopStart( + WhereInfo *pWInfo, /* Complete information about the WHERE clause */ + int iLevel, /* Which level of pWInfo->a[] should be coded */ + u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ + Bitmask notReady /* Which tables are currently available */ +){ + int j, k; /* Loop counters */ + int iCur; /* The VDBE cursor for the table */ + int addrNxt; /* Where to jump to continue with the next IN case */ + int omitTable; /* True if we use the index only */ + int bRev; /* True if we need to scan in reverse order */ + WhereLevel *pLevel; /* The where level to be coded */ + WhereClause *pWC; /* Decomposition of the entire WHERE clause */ + WhereTerm *pTerm; /* A WHERE clause term */ + Parse *pParse; /* Parsing context */ + Vdbe *v; /* The prepared stmt under constructions */ + struct SrcList_item *pTabItem; /* FROM clause term being coded */ + int addrBrk; /* Jump here to break out of the loop */ + int addrCont; /* Jump here to continue with next cycle */ + int iRowidReg = 0; /* Rowid is stored in this register, if not zero */ + int iReleaseReg = 0; /* Temp register to free before returning */ + + pParse = pWInfo->pParse; + v = pParse->pVdbe; + pWC = pWInfo->pWC; + pLevel = &pWInfo->a[iLevel]; + pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; + iCur = pTabItem->iCursor; + bRev = (pLevel->plan.wsFlags & WHERE_REVERSE)!=0; + omitTable = (pLevel->plan.wsFlags & WHERE_IDX_ONLY)!=0 + && (wctrlFlags & WHERE_FORCE_TABLE)==0; + + /* Create labels for the "break" and "continue" instructions + ** for the current loop. Jump to addrBrk to break out of a loop. + ** Jump to cont to go immediately to the next iteration of the + ** loop. + ** + ** When there is an IN operator, we also have a "addrNxt" label that + ** means to continue with the next IN value combination. When + ** there are no IN operators in the constraints, the "addrNxt" label + ** is the same as "addrBrk". + */ + addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v); + addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v); + + /* If this is the right table of a LEFT OUTER JOIN, allocate and + ** initialize a memory cell that records if this table matches any + ** row of the left table of the join. + */ + if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){ + pLevel->iLeftJoin = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); + VdbeComment((v, "init LEFT JOIN no-match flag")); + } + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( (pLevel->plan.wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + /* Case 0: The table is a virtual-table. Use the VFilter and VNext + ** to access the data. + */ + int iReg; /* P3 Value for OP_VFilter */ + sqlite3_index_info *pVtabIdx = pLevel->plan.u.pVtabIdx; + int nConstraint = pVtabIdx->nConstraint; + struct sqlite3_index_constraint_usage *aUsage = + pVtabIdx->aConstraintUsage; + const struct sqlite3_index_constraint *aConstraint = + pVtabIdx->aConstraint; + + iReg = sqlite3GetTempRange(pParse, nConstraint+2); + for(j=1; j<=nConstraint; j++){ + for(k=0; ka[iTerm].pExpr->pRight, iReg+j+1); + break; + } + } + if( k==nConstraint ) break; + } + sqlite3VdbeAddOp2(v, OP_Integer, pVtabIdx->idxNum, iReg); + sqlite3VdbeAddOp2(v, OP_Integer, j-1, iReg+1); + sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrBrk, iReg, pVtabIdx->idxStr, + pVtabIdx->needToFreeIdxStr ? P4_MPRINTF : P4_STATIC); + pVtabIdx->needToFreeIdxStr = 0; + for(j=0; ja[iTerm]); + } + } + pLevel->op = OP_VNext; + pLevel->p1 = iCur; + pLevel->p2 = sqlite3VdbeCurrentAddr(v); + sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2); + }else +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + + if( pLevel->plan.wsFlags & WHERE_ROWID_EQ ){ + /* Case 1: We can directly reference a single row using an + ** equality comparison against the ROWID field. Or + ** we reference multiple rows using a "rowid IN (...)" + ** construct. + */ + iReleaseReg = sqlite3GetTempReg(pParse); + pTerm = findTerm(pWC, iCur, -1, notReady, WO_EQ|WO_IN, 0); + assert( pTerm!=0 ); + assert( pTerm->pExpr!=0 ); + assert( pTerm->leftCursor==iCur ); + assert( omitTable==0 ); + iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, iReleaseReg); + addrNxt = pLevel->addrNxt; + sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + VdbeComment((v, "pk")); + pLevel->op = OP_Noop; + }else if( pLevel->plan.wsFlags & WHERE_ROWID_RANGE ){ + /* Case 2: We have an inequality comparison against the ROWID field. + */ + int testOp = OP_Noop; + int start; + int memEndValue = 0; + WhereTerm *pStart, *pEnd; + + assert( omitTable==0 ); + pStart = findTerm(pWC, iCur, -1, notReady, WO_GT|WO_GE, 0); + pEnd = findTerm(pWC, iCur, -1, notReady, WO_LT|WO_LE, 0); + if( bRev ){ + pTerm = pStart; + pStart = pEnd; + pEnd = pTerm; + } + if( pStart ){ + Expr *pX; /* The expression that defines the start bound */ + int r1, rTemp; /* Registers for holding the start boundary */ + + /* The following constant maps TK_xx codes into corresponding + ** seek opcodes. It depends on a particular ordering of TK_xx + */ + const u8 aMoveOp[] = { + /* TK_GT */ OP_SeekGt, + /* TK_LE */ OP_SeekLe, + /* TK_LT */ OP_SeekLt, + /* TK_GE */ OP_SeekGe + }; + assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ + assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ + assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ + + pX = pStart->pExpr; + assert( pX!=0 ); + assert( pStart->leftCursor==iCur ); + r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp); + sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1); + VdbeComment((v, "pk")); + sqlite3ExprCacheAffinityChange(pParse, r1, 1); + sqlite3ReleaseTempReg(pParse, rTemp); + disableTerm(pLevel, pStart); + }else{ + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk); + } + if( pEnd ){ + Expr *pX; + pX = pEnd->pExpr; + assert( pX!=0 ); + assert( pEnd->leftCursor==iCur ); + memEndValue = ++pParse->nMem; + sqlite3ExprCode(pParse, pX->pRight, memEndValue); + if( pX->op==TK_LT || pX->op==TK_GT ){ + testOp = bRev ? OP_Le : OP_Ge; + }else{ + testOp = bRev ? OP_Lt : OP_Gt; + } + disableTerm(pLevel, pEnd); + } + start = sqlite3VdbeCurrentAddr(v); + pLevel->op = bRev ? OP_Prev : OP_Next; + pLevel->p1 = iCur; + pLevel->p2 = start; + pLevel->p5 = (pStart==0 && pEnd==0) ?1:0; + if( testOp!=OP_Noop ){ + iRowidReg = iReleaseReg = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg); + sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL); + } + }else if( pLevel->plan.wsFlags & (WHERE_COLUMN_RANGE|WHERE_COLUMN_EQ) ){ + /* Case 3: A scan using an index. + ** + ** The WHERE clause may contain zero or more equality + ** terms ("==" or "IN" operators) that refer to the N + ** left-most columns of the index. It may also contain + ** inequality constraints (>, <, >= or <=) on the indexed + ** column that immediately follows the N equalities. Only + ** the right-most column can be an inequality - the rest must + ** use the "==" and "IN" operators. For example, if the + ** index is on (x,y,z), then the following clauses are all + ** optimized: + ** + ** x=5 + ** x=5 AND y=10 + ** x=5 AND y<10 + ** x=5 AND y>5 AND y<10 + ** x=5 AND y=5 AND z<=10 + ** + ** The z<10 term of the following cannot be used, only + ** the x=5 term: + ** + ** x=5 AND z<10 + ** + ** N may be zero if there are inequality constraints. + ** If there are no inequality constraints, then N is at + ** least one. + ** + ** This case is also used when there are no WHERE clause + ** constraints but an index is selected anyway, in order + ** to force the output order to conform to an ORDER BY. + */ + int aStartOp[] = { + 0, + 0, + OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */ + OP_Last, /* 3: (!start_constraints && startEq && bRev) */ + OP_SeekGt, /* 4: (start_constraints && !startEq && !bRev) */ + OP_SeekLt, /* 5: (start_constraints && !startEq && bRev) */ + OP_SeekGe, /* 6: (start_constraints && startEq && !bRev) */ + OP_SeekLe /* 7: (start_constraints && startEq && bRev) */ + }; + int aEndOp[] = { + OP_Noop, /* 0: (!end_constraints) */ + OP_IdxGE, /* 1: (end_constraints && !bRev) */ + OP_IdxLT /* 2: (end_constraints && bRev) */ + }; + int nEq = pLevel->plan.nEq; + int isMinQuery = 0; /* If this is an optimized SELECT min(x).. */ + int regBase; /* Base register holding constraint values */ + int r1; /* Temp register */ + WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */ + WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */ + int startEq; /* True if range start uses ==, >= or <= */ + int endEq; /* True if range end uses ==, >= or <= */ + int start_constraints; /* Start of range is constrained */ + int nConstraint; /* Number of constraint terms */ + Index *pIdx; /* The index we will be using */ + int iIdxCur; /* The VDBE cursor for the index */ + int nExtraReg = 0; /* Number of extra registers needed */ + int op; /* Instruction opcode */ + + pIdx = pLevel->plan.u.pIdx; + iIdxCur = pLevel->iIdxCur; + k = pIdx->aiColumn[nEq]; /* Column for inequality constraints */ + + /* If this loop satisfies a sort order (pOrderBy) request that + ** was passed to this function to implement a "SELECT min(x) ..." + ** query, then the caller will only allow the loop to run for + ** a single iteration. This means that the first row returned + ** should not have a NULL value stored in 'x'. If column 'x' is + ** the first one after the nEq equality constraints in the index, + ** this requires some special handling. + */ + if( (wctrlFlags&WHERE_ORDERBY_MIN)!=0 + && (pLevel->plan.wsFlags&WHERE_ORDERBY) + && (pIdx->nColumn>nEq) + ){ + /* assert( pOrderBy->nExpr==1 ); */ + /* assert( pOrderBy->a[0].pExpr->iColumn==pIdx->aiColumn[nEq] ); */ + isMinQuery = 1; + nExtraReg = 1; + } + + /* Find any inequality constraint terms for the start and end + ** of the range. + */ + if( pLevel->plan.wsFlags & WHERE_TOP_LIMIT ){ + pRangeEnd = findTerm(pWC, iCur, k, notReady, (WO_LT|WO_LE), pIdx); + nExtraReg = 1; + } + if( pLevel->plan.wsFlags & WHERE_BTM_LIMIT ){ + pRangeStart = findTerm(pWC, iCur, k, notReady, (WO_GT|WO_GE), pIdx); + nExtraReg = 1; + } + + /* Generate code to evaluate all constraint terms using == or IN + ** and store the values of those terms in an array of registers + ** starting at regBase. + */ + regBase = codeAllEqualityTerms(pParse, pLevel, pWC, notReady, nExtraReg); + addrNxt = pLevel->addrNxt; + + + /* If we are doing a reverse order scan on an ascending index, or + ** a forward order scan on a descending index, interchange the + ** start and end terms (pRangeStart and pRangeEnd). + */ + if( bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC) ){ + SWAP(WhereTerm *, pRangeEnd, pRangeStart); + } + + testcase( pRangeStart && pRangeStart->eOperator & WO_LE ); + testcase( pRangeStart && pRangeStart->eOperator & WO_GE ); + testcase( pRangeEnd && pRangeEnd->eOperator & WO_LE ); + testcase( pRangeEnd && pRangeEnd->eOperator & WO_GE ); + startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE); + endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE); + start_constraints = pRangeStart || nEq>0; + + /* Seek the index cursor to the start of the range. */ + nConstraint = nEq; + if( pRangeStart ){ + sqlite3ExprCode(pParse, pRangeStart->pExpr->pRight, regBase+nEq); + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); + nConstraint++; + }else if( isMinQuery ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); + nConstraint++; + startEq = 0; + start_constraints = 1; + } + codeApplyAffinity(pParse, regBase, nConstraint, pIdx); + op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; + assert( op!=0 ); + testcase( op==OP_Rewind ); + testcase( op==OP_Last ); + testcase( op==OP_SeekGt ); + testcase( op==OP_SeekGe ); + testcase( op==OP_SeekLe ); + testcase( op==OP_SeekLt ); + sqlite3VdbeAddOp4(v, op, iIdxCur, addrNxt, regBase, + SQLITE_INT_TO_PTR(nConstraint), P4_INT32); + + /* Load the value for the inequality constraint at the end of the + ** range (if any). + */ + nConstraint = nEq; + if( pRangeEnd ){ + sqlite3ExprCacheRemove(pParse, regBase+nEq); + sqlite3ExprCode(pParse, pRangeEnd->pExpr->pRight, regBase+nEq); + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); + codeApplyAffinity(pParse, regBase, nEq+1, pIdx); + nConstraint++; + } + + /* Top of the loop body */ + pLevel->p2 = sqlite3VdbeCurrentAddr(v); + + /* Check if the index cursor is past the end of the range. */ + op = aEndOp[(pRangeEnd || nEq) * (1 + bRev)]; + testcase( op==OP_Noop ); + testcase( op==OP_IdxGE ); + testcase( op==OP_IdxLT ); + if( op!=OP_Noop ){ + sqlite3VdbeAddOp4(v, op, iIdxCur, addrNxt, regBase, + SQLITE_INT_TO_PTR(nConstraint), P4_INT32); + sqlite3VdbeChangeP5(v, endEq!=bRev ?1:0); + } + + /* If there are inequality constraints, check that the value + ** of the table column that the inequality contrains is not NULL. + ** If it is, jump to the next iteration of the loop. + */ + r1 = sqlite3GetTempReg(pParse); + testcase( pLevel->plan.wsFlags & WHERE_BTM_LIMIT ); + testcase( pLevel->plan.wsFlags & WHERE_TOP_LIMIT ); + if( pLevel->plan.wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT) ){ + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, nEq, r1); + sqlite3VdbeAddOp2(v, OP_IsNull, r1, addrCont); + } + sqlite3ReleaseTempReg(pParse, r1); + + /* Seek the table cursor, if required */ + disableTerm(pLevel, pRangeStart); + disableTerm(pLevel, pRangeEnd); + if( !omitTable ){ + iRowidReg = iReleaseReg = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + sqlite3VdbeAddOp2(v, OP_Seek, iCur, iRowidReg); /* Deferred seek */ + } + + /* Record the instruction used to terminate the loop. Disable + ** WHERE clause terms made redundant by the index range scan. + */ + pLevel->op = bRev ? OP_Prev : OP_Next; + pLevel->p1 = iIdxCur; + }else + +#ifndef SQLITE_OMIT_OR_OPTIMIZATION + if( pLevel->plan.wsFlags & WHERE_MULTI_OR ){ + /* Case 4: Two or more separately indexed terms connected by OR + ** + ** Example: + ** + ** CREATE TABLE t1(a,b,c,d); + ** CREATE INDEX i1 ON t1(a); + ** CREATE INDEX i2 ON t1(b); + ** CREATE INDEX i3 ON t1(c); + ** + ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13) + ** + ** In the example, there are three indexed terms connected by OR. + ** The top of the loop looks like this: + ** + ** Null 1 # Zero the rowset in reg 1 + ** + ** Then, for each indexed term, the following. The arguments to + ** RowSetTest are such that the rowid of the current row is inserted + ** into the RowSet. If it is already present, control skips the + ** Gosub opcode and jumps straight to the code generated by WhereEnd(). + ** + ** sqlite3WhereBegin() + ** RowSetTest # Insert rowid into rowset + ** Gosub 2 A + ** sqlite3WhereEnd() + ** + ** Following the above, code to terminate the loop. Label A, the target + ** of the Gosub above, jumps to the instruction right after the Goto. + ** + ** Null 1 # Zero the rowset in reg 1 + ** Goto B # The loop is finished. + ** + ** A: # Return data, whatever. + ** + ** Return 2 # Jump back to the Gosub + ** + ** B: + ** + */ + WhereClause *pOrWc; /* The OR-clause broken out into subterms */ + WhereTerm *pFinal; /* Final subterm within the OR-clause. */ + SrcList oneTab; /* Shortened table list */ + + int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */ + int regRowset = 0; /* Register for RowSet object */ + int regRowid = 0; /* Register holding rowid */ + int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */ + int iRetInit; /* Address of regReturn init */ + int ii; + + pTerm = pLevel->plan.u.pTerm; + assert( pTerm!=0 ); + assert( pTerm->eOperator==WO_OR ); + assert( (pTerm->wtFlags & TERM_ORINFO)!=0 ); + pOrWc = &pTerm->u.pOrInfo->wc; + pFinal = &pOrWc->a[pOrWc->nTerm-1]; + + /* Set up a SrcList containing just the table being scanned by this loop. */ + oneTab.nSrc = 1; + oneTab.nAlloc = 1; + oneTab.a[0] = *pTabItem; + + /* Initialize the rowset register to contain NULL. An SQL NULL is + ** equivalent to an empty rowset. + ** + ** Also initialize regReturn to contain the address of the instruction + ** immediately following the OP_Return at the bottom of the loop. This + ** is required in a few obscure LEFT JOIN cases where control jumps + ** over the top of the loop into the body of it. In this case the + ** correct response for the end-of-loop code (the OP_Return) is to + ** fall through to the next instruction, just as an OP_Next does if + ** called on an uninitialized cursor. + */ + if( (wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ + regRowset = ++pParse->nMem; + regRowid = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset); + } + iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); + + for(ii=0; iinTerm; ii++){ + WhereTerm *pOrTerm = &pOrWc->a[ii]; + if( pOrTerm->leftCursor==iCur || pOrTerm->eOperator==WO_AND ){ + WhereInfo *pSubWInfo; /* Info for single OR-term scan */ + /* Loop through table entries that match term pOrTerm. */ + pSubWInfo = sqlite3WhereBegin(pParse, &oneTab, pOrTerm->pExpr, 0, + WHERE_OMIT_OPEN | WHERE_OMIT_CLOSE | WHERE_FORCE_TABLE); + if( pSubWInfo ){ + if( (wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ + int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); + int r; + r = sqlite3ExprCodeGetColumn(pParse, pTabItem->pTab, -1, iCur, + regRowid, 0); + sqlite3VdbeAddOp4(v, OP_RowSetTest, regRowset, + sqlite3VdbeCurrentAddr(v)+2, + r, SQLITE_INT_TO_PTR(iSet), P4_INT32); + } + sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody); + + /* Finish the loop through table entries that match term pOrTerm. */ + sqlite3WhereEnd(pSubWInfo); + } + } + } + sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v)); + /* sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset); */ + sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrBrk); + sqlite3VdbeResolveLabel(v, iLoopBody); + + pLevel->op = OP_Return; + pLevel->p1 = regReturn; + disableTerm(pLevel, pTerm); + }else +#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ + + { + /* Case 5: There is no usable index. We must do a complete + ** scan of the entire table. + */ + static const u8 aStep[] = { OP_Next, OP_Prev }; + static const u8 aStart[] = { OP_Rewind, OP_Last }; + assert( bRev==0 || bRev==1 ); + assert( omitTable==0 ); + pLevel->op = aStep[bRev]; + pLevel->p1 = iCur; + pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk); + pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; + } + notReady &= ~getMask(pWC->pMaskSet, iCur); + + /* Insert code to test every subexpression that can be completely + ** computed using the current set of tables. + */ + k = 0; + for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ + Expr *pE; + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + testcase( pTerm->wtFlags & TERM_CODED ); + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( (pTerm->prereqAll & notReady)!=0 ) continue; + pE = pTerm->pExpr; + assert( pE!=0 ); + if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ + continue; + } + sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL); + k = 1; + pTerm->wtFlags |= TERM_CODED; + } + + /* For a LEFT OUTER JOIN, generate code that will record the fact that + ** at least one row of the right table has matched the left table. + */ + if( pLevel->iLeftJoin ){ + pLevel->addrFirst = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin); + VdbeComment((v, "record LEFT JOIN hit")); + sqlite3ExprCacheClear(pParse); + for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){ + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + testcase( pTerm->wtFlags & TERM_CODED ); + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( (pTerm->prereqAll & notReady)!=0 ) continue; + assert( pTerm->pExpr ); + sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); + pTerm->wtFlags |= TERM_CODED; } } + sqlite3ReleaseTempReg(pParse, iReleaseReg); + + return notReady; } #if defined(SQLITE_TEST) @@ -1842,23 +3024,21 @@ /* ** Free a WhereInfo structure */ -static void whereInfoFree(WhereInfo *pWInfo){ +static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ if( pWInfo ){ int i; for(i=0; inLevel; i++){ sqlite3_index_info *pInfo = pWInfo->a[i].pIdxInfo; if( pInfo ){ + /* assert( pInfo->needToFreeIdxStr==0 || db->mallocFailed ); */ if( pInfo->needToFreeIdxStr ){ - /* Coverage: Don't think this can be reached. By the time this - ** function is called, the index-strings have been passed - ** to the vdbe layer for deletion. - */ sqlite3_free(pInfo->idxStr); } - sqliteFree(pInfo); + sqlite3DbFree(db, pInfo); } } - sqliteFree(pWInfo); + whereClauseClear(pWInfo->pWC); + sqlite3DbFree(db, pWInfo); } } @@ -1955,20 +3135,21 @@ Parse *pParse, /* The parser context */ SrcList *pTabList, /* A list of all tables to be scanned */ Expr *pWhere, /* The WHERE clause */ - ExprList **ppOrderBy /* An ORDER BY clause, or NULL */ + ExprList **ppOrderBy, /* An ORDER BY clause, or NULL */ + u16 wctrlFlags /* One of the WHERE_* flags defined in sqliteInt.h */ ){ int i; /* Loop counter */ + int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ WhereInfo *pWInfo; /* Will become the return value of this function */ Vdbe *v = pParse->pVdbe; /* The virtual database engine */ - int brk, cont = 0; /* Addresses used during code generation */ Bitmask notReady; /* Cursors that are not yet positioned */ - WhereTerm *pTerm; /* A single term in the WHERE clause */ - ExprMaskSet maskSet; /* The expression mask set */ - WhereClause wc; /* The WHERE clause is divided into these terms */ + WhereMaskSet *pMaskSet; /* The expression mask set */ + WhereClause *pWC; /* Decomposition of the WHERE clause */ struct SrcList_item *pTabItem; /* A single entry from pTabList */ WhereLevel *pLevel; /* A single level in the pWInfo list */ int iFrom; /* First unused FROM clause element */ - int andFlags; /* AND-ed combination of all wc.a[].flags */ + int andFlags; /* AND-ed combination of all pWC->a[].wtFlags */ + sqlite3 *db; /* Database connection */ /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask @@ -1978,44 +3159,91 @@ return 0; } - /* Split the WHERE clause into separate subexpressions where each - ** subexpression is separated by an AND operator. - */ - initMaskSet(&maskSet); - whereClauseInit(&wc, pParse, &maskSet); - whereSplit(&wc, pWhere, TK_AND); - /* Allocate and initialize the WhereInfo structure that will become the - ** return value. - */ - pWInfo = sqliteMalloc( sizeof(WhereInfo) + pTabList->nSrc*sizeof(WhereLevel)); - if( sqlite3MallocFailed() ){ - goto whereBeginNoMem; + ** return value. A single allocation is used to store the WhereInfo + ** struct, the contents of WhereInfo.a[], the WhereClause structure + ** and the WhereMaskSet structure. Since WhereClause contains an 8-byte + ** field (type Bitmask) it must be aligned on an 8-byte boundary on + ** some architectures. Hence the ROUND8() below. + */ + db = pParse->db; + nByteWInfo = ROUND8(sizeof(WhereInfo)+(pTabList->nSrc-1)*sizeof(WhereLevel)); + pWInfo = sqlite3DbMallocZero(db, + nByteWInfo + + sizeof(WhereClause) + + sizeof(WhereMaskSet) + ); + if( db->mallocFailed ){ + goto whereBeginError; } pWInfo->nLevel = pTabList->nSrc; pWInfo->pParse = pParse; pWInfo->pTabList = pTabList; pWInfo->iBreak = sqlite3VdbeMakeLabel(v); + pWInfo->pWC = pWC = (WhereClause *)&((u8 *)pWInfo)[nByteWInfo]; + pWInfo->wctrlFlags = wctrlFlags; + pMaskSet = (WhereMaskSet*)&pWC[1]; + /* Split the WHERE clause into separate subexpressions where each + ** subexpression is separated by an AND operator. + */ + initMaskSet(pMaskSet); + whereClauseInit(pWC, pParse, pMaskSet); + sqlite3ExprCodeConstants(pParse, pWhere); + whereSplit(pWC, pWhere, TK_AND); + /* Special case: a WHERE clause that is constant. Evaluate the ** expression and either jump over all of the code or fall thru. */ if( pWhere && (pTabList->nSrc==0 || sqlite3ExprIsConstantNotJoin(pWhere)) ){ - sqlite3ExprIfFalse(pParse, pWhere, pWInfo->iBreak, 1); + sqlite3ExprIfFalse(pParse, pWhere, pWInfo->iBreak, SQLITE_JUMPIFNULL); pWhere = 0; } + /* Assign a bit from the bitmask to every term in the FROM clause. + ** + ** When assigning bitmask values to FROM clause cursors, it must be + ** the case that if X is the bitmask for the N-th FROM clause term then + ** the bitmask for all FROM clause terms to the left of the N-th term + ** is (X-1). An expression from the ON clause of a LEFT JOIN can use + ** its Expr.iRightJoinTable value to find the bitmask of the right table + ** of the join. Subtracting one from the right table bitmask gives a + ** bitmask for all tables to the left of the join. Knowing the bitmask + ** for all tables to the left of a left join is important. Ticket #3015. + ** + ** Configure the WhereClause.vmask variable so that bits that correspond + ** to virtual table cursors are set. This is used to selectively disable + ** the OR-to-IN transformation in exprAnalyzeOrTerm(). It is not helpful + ** with virtual tables. + */ + assert( pWC->vmask==0 && pMaskSet->n==0 ); + for(i=0; inSrc; i++){ + createMask(pMaskSet, pTabList->a[i].iCursor); +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( ALWAYS(pTabList->a[i].pTab) && IsVirtual(pTabList->a[i].pTab) ){ + pWC->vmask |= ((Bitmask)1 << i); + } +#endif + } +#ifndef NDEBUG + { + Bitmask toTheLeft = 0; + for(i=0; inSrc; i++){ + Bitmask m = getMask(pMaskSet, pTabList->a[i].iCursor); + assert( (m-1)==toTheLeft ); + toTheLeft |= m; + } + } +#endif + /* Analyze all of the subexpressions. Note that exprAnalyze() might ** add new virtual terms onto the end of the WHERE clause. We do not ** want to analyze these virtual terms, so start analyzing at the end ** and work forward so that the added virtual terms are never processed. */ - for(i=0; inSrc; i++){ - createMask(&maskSet, pTabList->a[i].iCursor); - } - exprAnalyzeAll(pTabList, &wc); - if( sqlite3MallocFailed() ){ - goto whereBeginNoMem; + exprAnalyzeAll(pTabList, pWC); + if( db->mallocFailed ){ + goto whereBeginError; } /* Chose the best index to use for each table in the FROM clause. @@ -2023,11 +3251,12 @@ ** This loop fills in the following fields: ** ** pWInfo->a[].pIdx The index to use for this level of the loop. - ** pWInfo->a[].flags WHERE_xxx flags associated with pIdx + ** pWInfo->a[].wsFlags WHERE_xxx flags associated with pIdx ** pWInfo->a[].nEq The number of == and IN constraints - ** pWInfo->a[].iFrom When term of the FROM clause is being coded + ** pWInfo->a[].iFrom Which term of the FROM clause is being coded ** pWInfo->a[].iTabCur The VDBE cursor for the database table ** pWInfo->a[].iIdxCur The VDBE cursor for the index + ** pWInfo->a[].pTerm When wsFlags==WO_OR, the OR-clause term ** ** This loop also figures out the nesting order of tables in the FROM ** clause. @@ -2038,92 +3267,85 @@ andFlags = ~0; WHERETRACE(("*** Optimizer Start ***\n")); for(i=iFrom=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ + WhereCost bestPlan; /* Most efficient plan seen so far */ Index *pIdx; /* Index for FROM table at pTabItem */ - int flags; /* Flags asssociated with pIdx */ - int nEq; /* Number of == or IN constraints */ - double cost; /* The cost for pIdx */ int j; /* For looping over FROM tables */ - Index *pBest = 0; /* The best index seen so far */ - int bestFlags = 0; /* Flags associated with pBest */ - int bestNEq = 0; /* nEq associated with pBest */ - double lowestCost; /* Cost of the pBest */ int bestJ = 0; /* The value of j */ Bitmask m; /* Bitmask value for j or bestJ */ int once = 0; /* True when first table is seen */ - sqlite3_index_info *pIndex; /* Current virtual index */ - lowestCost = SQLITE_BIG_DBL; + memset(&bestPlan, 0, sizeof(bestPlan)); + bestPlan.rCost = SQLITE_BIG_DBL; for(j=iFrom, pTabItem=&pTabList->a[j]; jnSrc; j++, pTabItem++){ - int doNotReorder; /* True if this table should not be reordered */ + int doNotReorder; /* True if this table should not be reordered */ + WhereCost sCost; /* Cost information from best[Virtual]Index() */ + ExprList *pOrderBy; /* ORDER BY clause for index to optimize */ doNotReorder = (pTabItem->jointype & (JT_LEFT|JT_CROSS))!=0; if( once && doNotReorder ) break; - m = getMask(&maskSet, pTabItem->iCursor); + m = getMask(pMaskSet, pTabItem->iCursor); if( (m & notReady)==0 ){ if( j==iFrom ) iFrom++; continue; } + pOrderBy = ((i==0 && ppOrderBy )?*ppOrderBy:0); + assert( pTabItem->pTab ); #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTabItem->pTab) ){ - sqlite3_index_info **ppIdxInfo = &pWInfo->a[j].pIdxInfo; - cost = bestVirtualIndex(pParse, &wc, pTabItem, notReady, - ppOrderBy ? *ppOrderBy : 0, i==0, - ppIdxInfo); - flags = WHERE_VIRTUALTABLE; - pIndex = *ppIdxInfo; - if( pIndex && pIndex->orderByConsumed ){ - flags = WHERE_VIRTUALTABLE | WHERE_ORDERBY; - } - pIdx = 0; - nEq = 0; - if( (SQLITE_BIG_DBL/2.0)pBestIdx never set. - */ - cost = (SQLITE_BIG_DBL/2.0); - } + sqlite3_index_info **pp = &pWInfo->a[j].pIdxInfo; + bestVirtualIndex(pParse, pWC, pTabItem, notReady, pOrderBy, &sCost, pp); }else #endif { - cost = bestIndex(pParse, &wc, pTabItem, notReady, - (i==0 && ppOrderBy) ? *ppOrderBy : 0, - &pIdx, &flags, &nEq); - pIndex = 0; + bestBtreeIndex(pParse, pWC, pTabItem, notReady, pOrderBy, &sCost); } - if( costpBestIdx = pIndex; } if( doNotReorder ) break; } - WHERETRACE(("*** Optimizer choose table %d for loop %d\n", bestJ, + assert( once ); + assert( notReady & getMask(pMaskSet, pTabList->a[bestJ].iCursor) ); + WHERETRACE(("*** Optimizer selects table %d for loop %d\n", bestJ, pLevel-pWInfo->a)); - if( (bestFlags & WHERE_ORDERBY)!=0 ){ + if( (bestPlan.plan.wsFlags & WHERE_ORDERBY)!=0 ){ *ppOrderBy = 0; } - andFlags &= bestFlags; - pLevel->flags = bestFlags; - pLevel->pIdx = pBest; - pLevel->nEq = bestNEq; - pLevel->aInLoop = 0; - pLevel->nIn = 0; - if( pBest ){ + andFlags &= bestPlan.plan.wsFlags; + pLevel->plan = bestPlan.plan; + if( bestPlan.plan.wsFlags & WHERE_INDEXED ){ pLevel->iIdxCur = pParse->nTab++; }else{ pLevel->iIdxCur = -1; } - notReady &= ~getMask(&maskSet, pTabList->a[bestJ].iCursor); - pLevel->iFrom = bestJ; + notReady &= ~getMask(pMaskSet, pTabList->a[bestJ].iCursor); + pLevel->iFrom = (u8)bestJ; + + /* Check that if the table scanned by this loop iteration had an + ** INDEXED BY clause attached to it, that the named index is being + ** used for the scan. If not, then query compilation has failed. + ** Return an error. + */ + pIdx = pTabList->a[bestJ].pIndex; + if( pIdx ){ + if( (bestPlan.plan.wsFlags & WHERE_INDEXED)==0 ){ + sqlite3ErrorMsg(pParse, "cannot use index: %s", pIdx->zName); + goto whereBeginError; + }else{ + /* If an INDEXED BY clause is used, the bestIndex() function is + ** guaranteed to find the index specified in the INDEXED BY clause + ** if it find an index at all. */ + assert( bestPlan.plan.u.pIdx==pIdx ); + } + } } WHERETRACE(("*** Optimizer Finished ***\n")); + if( pParse->nErr || db->mallocFailed ){ + goto whereBeginError; + } /* If the total query only selects a single row, then the ORDER BY ** clause is irrelevant. @@ -2132,77 +3354,89 @@ *ppOrderBy = 0; } + /* If the caller is an UPDATE or DELETE statement that is requesting + ** to use a one-pass algorithm, determine if this is appropriate. + ** The one-pass algorithm only works if the WHERE clause constraints + ** the statement to update a single row. + */ + assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 ); + if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 && (andFlags & WHERE_UNIQUE)!=0 ){ + pWInfo->okOnePass = 1; + pWInfo->a[0].plan.wsFlags &= ~WHERE_IDX_ONLY; + } + /* Open all tables in the pTabList and any indices selected for ** searching those tables. */ sqlite3CodeVerifySchema(pParse, -1); /* Insert the cookie verifier Goto */ for(i=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ Table *pTab; /* Table to open */ - Index *pIx; /* Index used to access pTab (if any) */ int iDb; /* Index of database containing table/index */ - int iIdxCur = pLevel->iIdxCur; #ifndef SQLITE_OMIT_EXPLAIN if( pParse->explain==2 ){ char *zMsg; struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; - zMsg = sqlite3MPrintf("TABLE %s", pItem->zName); + zMsg = sqlite3MPrintf(db, "TABLE %s", pItem->zName); if( pItem->zAlias ){ - zMsg = sqlite3MPrintf("%z AS %s", zMsg, pItem->zAlias); + zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias); } - if( (pIx = pLevel->pIdx)!=0 ){ - zMsg = sqlite3MPrintf("%z WITH INDEX %s", zMsg, pIx->zName); - }else if( pLevel->flags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ - zMsg = sqlite3MPrintf("%z USING PRIMARY KEY", zMsg); + if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s WITH INDEX %s", + zMsg, pLevel->plan.u.pIdx->zName); + }else if( pLevel->plan.wsFlags & WHERE_MULTI_OR ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s VIA MULTI-INDEX UNION", zMsg); + }else if( pLevel->plan.wsFlags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s USING PRIMARY KEY", zMsg); } #ifndef SQLITE_OMIT_VIRTUALTABLE - else if( pLevel->pBestIdx ){ - sqlite3_index_info *pBestIdx = pLevel->pBestIdx; - zMsg = sqlite3MPrintf("%z VIRTUAL TABLE INDEX %d:%s", zMsg, - pBestIdx->idxNum, pBestIdx->idxStr); + else if( (pLevel->plan.wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + sqlite3_index_info *pVtabIdx = pLevel->plan.u.pVtabIdx; + zMsg = sqlite3MAppendf(db, zMsg, "%s VIRTUAL TABLE INDEX %d:%s", zMsg, + pVtabIdx->idxNum, pVtabIdx->idxStr); } #endif - if( pLevel->flags & WHERE_ORDERBY ){ - zMsg = sqlite3MPrintf("%z ORDER BY", zMsg); + if( pLevel->plan.wsFlags & WHERE_ORDERBY ){ + zMsg = sqlite3MAppendf(db, zMsg, "%s ORDER BY", zMsg); } - sqlite3VdbeOp3(v, OP_Explain, i, pLevel->iFrom, zMsg, P3_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_Explain, i, pLevel->iFrom, 0, zMsg, P4_DYNAMIC); } #endif /* SQLITE_OMIT_EXPLAIN */ pTabItem = &pTabList->a[pLevel->iFrom]; pTab = pTabItem->pTab; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - if( pTab->isEphem || pTab->pSelect ) continue; + if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ) continue; #ifndef SQLITE_OMIT_VIRTUALTABLE - if( pLevel->pBestIdx ){ + if( (pLevel->plan.wsFlags & WHERE_VIRTUALTABLE)!=0 ){ int iCur = pTabItem->iCursor; - sqlite3VdbeOp3(v, OP_VOpen, iCur, 0, (const char*)pTab->pVtab, P3_VTAB); + sqlite3VdbeAddOp4(v, OP_VOpen, iCur, 0, 0, + (const char*)pTab->pVtab, P4_VTAB); }else #endif - if( (pLevel->flags & WHERE_IDX_ONLY)==0 ){ - sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, OP_OpenRead); - if( pTab->nCol<(sizeof(Bitmask)*8) ){ + if( (pLevel->plan.wsFlags & WHERE_IDX_ONLY)==0 + && (wctrlFlags & WHERE_OMIT_OPEN)==0 ){ + int op = pWInfo->okOnePass ? OP_OpenWrite : OP_OpenRead; + sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op); + if( !pWInfo->okOnePass && pTab->nColcolUsed; int n = 0; for(; b; b=b>>1, n++){} - sqlite3VdbeChangeP2(v, sqlite3VdbeCurrentAddr(v)-1, n); + sqlite3VdbeChangeP4(v, sqlite3VdbeCurrentAddr(v)-1, SQLITE_INT_TO_PTR(n), P4_INT32); assert( n<=pTab->nCol ); } }else{ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); } pLevel->iTabCur = pTabItem->iCursor; - if( (pIx = pLevel->pIdx)!=0 ){ + if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + Index *pIx = pLevel->plan.u.pIdx; KeyInfo *pKey = sqlite3IndexKeyinfo(pParse, pIx); + int iIdxCur = pLevel->iIdxCur; assert( pIx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp(v, OP_Integer, iDb, 0); - VdbeComment((v, "# %s", pIx->zName)); - sqlite3VdbeOp3(v, OP_OpenRead, iIdxCur, pIx->tnum, - (char*)pKey, P3_KEYINFO_HANDOFF); - } - if( (pLevel->flags & (WHERE_IDX_ONLY|WHERE_COLUMN_RANGE))!=0 ){ - /* Only call OP_SetNumColumns on the index if we might later use - ** OP_Column on the index. */ - sqlite3VdbeAddOp(v, OP_SetNumColumns, iIdxCur, pIx->nColumn+1); + assert( iIdxCur>=0 ); + sqlite3VdbeAddOp4(v, OP_OpenRead, iIdxCur, pIx->tnum, iDb, + (char*)pKey, P4_KEYINFO_HANDOFF); + VdbeComment((v, "%s", pIx->zName)); } sqlite3CodeVerifySchema(pParse, iDb); } @@ -2213,394 +3447,9 @@ ** program. */ notReady = ~(Bitmask)0; - for(i=0, pLevel=pWInfo->a; inSrc; i++, pLevel++){ - int j; - int iCur = pTabItem->iCursor; /* The VDBE cursor for the table */ - Index *pIdx; /* The index we will be using */ - int nxt; /* Where to jump to continue with the next IN case */ - int iIdxCur; /* The VDBE cursor for the index */ - int omitTable; /* True if we use the index only */ - int bRev; /* True if we need to scan in reverse order */ - - pTabItem = &pTabList->a[pLevel->iFrom]; - iCur = pTabItem->iCursor; - pIdx = pLevel->pIdx; - iIdxCur = pLevel->iIdxCur; - bRev = (pLevel->flags & WHERE_REVERSE)!=0; - omitTable = (pLevel->flags & WHERE_IDX_ONLY)!=0; - - /* Create labels for the "break" and "continue" instructions - ** for the current loop. Jump to brk to break out of a loop. - ** Jump to cont to go immediately to the next iteration of the - ** loop. - ** - ** When there is an IN operator, we also have a "nxt" label that - ** means to continue with the next IN value combination. When - ** there are no IN operators in the constraints, the "nxt" label - ** is the same as "brk". - */ - brk = pLevel->brk = pLevel->nxt = sqlite3VdbeMakeLabel(v); - cont = pLevel->cont = sqlite3VdbeMakeLabel(v); - - /* If this is the right table of a LEFT OUTER JOIN, allocate and - ** initialize a memory cell that records if this table matches any - ** row of the left table of the join. - */ - if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){ - if( !pParse->nMem ) pParse->nMem++; - pLevel->iLeftJoin = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemInt, 0, pLevel->iLeftJoin); - VdbeComment((v, "# init LEFT JOIN no-match flag")); - } - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pLevel->pBestIdx ){ - /* Case 0: The table is a virtual-table. Use the VFilter and VNext - ** to access the data. - */ - int j; - sqlite3_index_info *pBestIdx = pLevel->pBestIdx; - int nConstraint = pBestIdx->nConstraint; - struct sqlite3_index_constraint_usage *aUsage = - pBestIdx->aConstraintUsage; - const struct sqlite3_index_constraint *aConstraint = - pBestIdx->aConstraint; - - for(j=1; j<=nConstraint; j++){ - int k; - for(k=0; kpRight); - break; - } - } - if( k==nConstraint ) break; - } - sqlite3VdbeAddOp(v, OP_Integer, j-1, 0); - sqlite3VdbeAddOp(v, OP_Integer, pBestIdx->idxNum, 0); - sqlite3VdbeOp3(v, OP_VFilter, iCur, brk, pBestIdx->idxStr, - pBestIdx->needToFreeIdxStr ? P3_MPRINTF : P3_STATIC); - pBestIdx->needToFreeIdxStr = 0; - for(j=0; jnConstraint; j++){ - if( aUsage[j].omit ){ - int iTerm = aConstraint[j].iTermOffset; - disableTerm(pLevel, &wc.a[iTerm]); - } - } - pLevel->op = OP_VNext; - pLevel->p1 = iCur; - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - }else -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - - if( pLevel->flags & WHERE_ROWID_EQ ){ - /* Case 1: We can directly reference a single row using an - ** equality comparison against the ROWID field. Or - ** we reference multiple rows using a "rowid IN (...)" - ** construct. - */ - pTerm = findTerm(&wc, iCur, -1, notReady, WO_EQ|WO_IN, 0); - assert( pTerm!=0 ); - assert( pTerm->pExpr!=0 ); - assert( pTerm->leftCursor==iCur ); - assert( omitTable==0 ); - codeEqualityTerm(pParse, pTerm, pLevel); - nxt = pLevel->nxt; - sqlite3VdbeAddOp(v, OP_MustBeInt, 1, nxt); - sqlite3VdbeAddOp(v, OP_NotExists, iCur, nxt); - VdbeComment((v, "pk")); - pLevel->op = OP_Noop; - }else if( pLevel->flags & WHERE_ROWID_RANGE ){ - /* Case 2: We have an inequality comparison against the ROWID field. - */ - int testOp = OP_Noop; - int start; - WhereTerm *pStart, *pEnd; - - assert( omitTable==0 ); - pStart = findTerm(&wc, iCur, -1, notReady, WO_GT|WO_GE, 0); - pEnd = findTerm(&wc, iCur, -1, notReady, WO_LT|WO_LE, 0); - if( bRev ){ - pTerm = pStart; - pStart = pEnd; - pEnd = pTerm; - } - if( pStart ){ - Expr *pX; - pX = pStart->pExpr; - assert( pX!=0 ); - assert( pStart->leftCursor==iCur ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_ForceInt, pX->op==TK_LE || pX->op==TK_GT, brk); - sqlite3VdbeAddOp(v, bRev ? OP_MoveLt : OP_MoveGe, iCur, brk); - VdbeComment((v, "pk")); - disableTerm(pLevel, pStart); - }else{ - sqlite3VdbeAddOp(v, bRev ? OP_Last : OP_Rewind, iCur, brk); - } - if( pEnd ){ - Expr *pX; - pX = pEnd->pExpr; - assert( pX!=0 ); - assert( pEnd->leftCursor==iCur ); - sqlite3ExprCode(pParse, pX->pRight); - pLevel->iMem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - if( pX->op==TK_LT || pX->op==TK_GT ){ - testOp = bRev ? OP_Le : OP_Ge; - }else{ - testOp = bRev ? OP_Lt : OP_Gt; - } - disableTerm(pLevel, pEnd); - } - start = sqlite3VdbeCurrentAddr(v); - pLevel->op = bRev ? OP_Prev : OP_Next; - pLevel->p1 = iCur; - pLevel->p2 = start; - if( testOp!=OP_Noop ){ - sqlite3VdbeAddOp(v, OP_Rowid, iCur, 0); - sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, testOp, SQLITE_AFF_NUMERIC|0x100, brk); - } - }else if( pLevel->flags & WHERE_COLUMN_RANGE ){ - /* Case 3: The WHERE clause term that refers to the right-most - ** column of the index is an inequality. For example, if - ** the index is on (x,y,z) and the WHERE clause is of the - ** form "x=5 AND y<10" then this case is used. Only the - ** right-most column can be an inequality - the rest must - ** use the "==" and "IN" operators. - ** - ** This case is also used when there are no WHERE clause - ** constraints but an index is selected anyway, in order - ** to force the output order to conform to an ORDER BY. - */ - int start; - int nEq = pLevel->nEq; - int topEq=0; /* True if top limit uses ==. False is strictly < */ - int btmEq=0; /* True if btm limit uses ==. False if strictly > */ - int topOp, btmOp; /* Operators for the top and bottom search bounds */ - int testOp; - int topLimit = (pLevel->flags & WHERE_TOP_LIMIT)!=0; - int btmLimit = (pLevel->flags & WHERE_BTM_LIMIT)!=0; - - /* Generate code to evaluate all constraint terms using == or IN - ** and level the values of those terms on the stack. - */ - codeAllEqualityTerms(pParse, pLevel, &wc, notReady); - - /* Duplicate the equality term values because they will all be - ** used twice: once to make the termination key and once to make the - ** start key. - */ - for(j=0; j or >= - ** operator and the top bound is a < or <= operator. For a descending - ** index the operators are reversed. - */ - if( pIdx->aSortOrder[nEq]==SQLITE_SO_ASC ){ - topOp = WO_LT|WO_LE; - btmOp = WO_GT|WO_GE; - }else{ - topOp = WO_GT|WO_GE; - btmOp = WO_LT|WO_LE; - SWAP(int, topLimit, btmLimit); - } - - /* Generate the termination key. This is the key value that - ** will end the search. There is no termination key if there - ** are no equality terms and no "X<..." term. - ** - ** 2002-Dec-04: On a reverse-order scan, the so-called "termination" - ** key computed here really ends up being the start key. - */ - nxt = pLevel->nxt; - if( topLimit ){ - Expr *pX; - int k = pIdx->aiColumn[j]; - pTerm = findTerm(&wc, iCur, k, notReady, topOp, pIdx); - assert( pTerm!=0 ); - pX = pTerm->pExpr; - assert( (pTerm->flags & TERM_CODED)==0 ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_IsNull, -(nEq*2+1), nxt); - topEq = pTerm->eOperator & (WO_LE|WO_GE); - disableTerm(pLevel, pTerm); - testOp = OP_IdxGE; - }else{ - testOp = nEq>0 ? OP_IdxGE : OP_Noop; - topEq = 1; - } - if( testOp!=OP_Noop ){ - int nCol = nEq + topLimit; - pLevel->iMem = pParse->nMem++; - buildIndexProbe(v, nCol, pIdx); - if( bRev ){ - int op = topEq ? OP_MoveLe : OP_MoveLt; - sqlite3VdbeAddOp(v, op, iIdxCur, nxt); - }else{ - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - } - }else if( bRev ){ - sqlite3VdbeAddOp(v, OP_Last, iIdxCur, brk); - } - - /* Generate the start key. This is the key that defines the lower - ** bound on the search. There is no start key if there are no - ** equality terms and if there is no "X>..." term. In - ** that case, generate a "Rewind" instruction in place of the - ** start key search. - ** - ** 2002-Dec-04: In the case of a reverse-order search, the so-called - ** "start" key really ends up being used as the termination key. - */ - if( btmLimit ){ - Expr *pX; - int k = pIdx->aiColumn[j]; - pTerm = findTerm(&wc, iCur, k, notReady, btmOp, pIdx); - assert( pTerm!=0 ); - pX = pTerm->pExpr; - assert( (pTerm->flags & TERM_CODED)==0 ); - sqlite3ExprCode(pParse, pX->pRight); - sqlite3VdbeAddOp(v, OP_IsNull, -(nEq+1), nxt); - btmEq = pTerm->eOperator & (WO_LE|WO_GE); - disableTerm(pLevel, pTerm); - }else{ - btmEq = 1; - } - if( nEq>0 || btmLimit ){ - int nCol = nEq + btmLimit; - buildIndexProbe(v, nCol, pIdx); - if( bRev ){ - pLevel->iMem = pParse->nMem++; - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 1); - testOp = OP_IdxLT; - }else{ - int op = btmEq ? OP_MoveGe : OP_MoveGt; - sqlite3VdbeAddOp(v, op, iIdxCur, nxt); - } - }else if( bRev ){ - testOp = OP_Noop; - }else{ - sqlite3VdbeAddOp(v, OP_Rewind, iIdxCur, brk); - } - - /* Generate the the top of the loop. If there is a termination - ** key we have to test for that key and abort at the top of the - ** loop. - */ - start = sqlite3VdbeCurrentAddr(v); - if( testOp!=OP_Noop ){ - sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, testOp, iIdxCur, nxt); - if( (topEq && !bRev) || (!btmEq && bRev) ){ - sqlite3VdbeChangeP3(v, -1, "+", P3_STATIC); - } - } - if( topLimit | btmLimit ){ - sqlite3VdbeAddOp(v, OP_Column, iIdxCur, nEq); - sqlite3VdbeAddOp(v, OP_IsNull, 1, cont); - } - if( !omitTable ){ - sqlite3VdbeAddOp(v, OP_IdxRowid, iIdxCur, 0); - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); - } - - /* Record the instruction used to terminate the loop. - */ - pLevel->op = bRev ? OP_Prev : OP_Next; - pLevel->p1 = iIdxCur; - pLevel->p2 = start; - }else if( pLevel->flags & WHERE_COLUMN_EQ ){ - /* Case 4: There is an index and all terms of the WHERE clause that - ** refer to the index using the "==" or "IN" operators. - */ - int start; - int nEq = pLevel->nEq; - - /* Generate code to evaluate all constraint terms using == or IN - ** and leave the values of those terms on the stack. - */ - codeAllEqualityTerms(pParse, pLevel, &wc, notReady); - nxt = pLevel->nxt; - - /* Generate a single key that will be used to both start and terminate - ** the search - */ - buildIndexProbe(v, nEq, pIdx); - sqlite3VdbeAddOp(v, OP_MemStore, pLevel->iMem, 0); - - /* Generate code (1) to move to the first matching element of the table. - ** Then generate code (2) that jumps to "nxt" after the cursor is past - ** the last matching element of the table. The code (1) is executed - ** once to initialize the search, the code (2) is executed before each - ** iteration of the scan to see if the scan has finished. */ - if( bRev ){ - /* Scan in reverse order */ - sqlite3VdbeAddOp(v, OP_MoveLe, iIdxCur, nxt); - start = sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeAddOp(v, OP_IdxLT, iIdxCur, nxt); - pLevel->op = OP_Prev; - }else{ - /* Scan in the forward order */ - sqlite3VdbeAddOp(v, OP_MoveGe, iIdxCur, nxt); - start = sqlite3VdbeAddOp(v, OP_MemLoad, pLevel->iMem, 0); - sqlite3VdbeOp3(v, OP_IdxGE, iIdxCur, nxt, "+", P3_STATIC); - pLevel->op = OP_Next; - } - if( !omitTable ){ - sqlite3VdbeAddOp(v, OP_IdxRowid, iIdxCur, 0); - sqlite3VdbeAddOp(v, OP_MoveGe, iCur, 0); - } - pLevel->p1 = iIdxCur; - pLevel->p2 = start; - }else{ - /* Case 5: There is no usable index. We must do a complete - ** scan of the entire table. - */ - assert( omitTable==0 ); - assert( bRev==0 ); - pLevel->op = OP_Next; - pLevel->p1 = iCur; - pLevel->p2 = 1 + sqlite3VdbeAddOp(v, OP_Rewind, iCur, brk); - } - notReady &= ~getMask(&maskSet, iCur); - - /* Insert code to test every subexpression that can be completely - ** computed using the current set of tables. - */ - for(pTerm=wc.a, j=wc.nTerm; j>0; j--, pTerm++){ - Expr *pE; - if( pTerm->flags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & notReady)!=0 ) continue; - pE = pTerm->pExpr; - assert( pE!=0 ); - if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ - continue; - } - sqlite3ExprIfFalse(pParse, pE, cont, 1); - pTerm->flags |= TERM_CODED; - } - - /* For a LEFT OUTER JOIN, generate code that will record the fact that - ** at least one row of the right table has matched the left table. - */ - if( pLevel->iLeftJoin ){ - pLevel->top = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp(v, OP_MemInt, 1, pLevel->iLeftJoin); - VdbeComment((v, "# record LEFT JOIN hit")); - for(pTerm=wc.a, j=0; jflags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & notReady)!=0 ) continue; - assert( pTerm->pExpr ); - sqlite3ExprIfFalse(pParse, pTerm->pExpr, cont, 1); - pTerm->flags |= TERM_CODED; - } - } + for(i=0; inSrc; i++){ + notReady = codeOneLoopStart(pWInfo, i, wctrlFlags, notReady); + pWInfo->iContinue = pWInfo->a[i].addrCont; } #ifdef SQLITE_TEST /* For testing and debugging use only */ @@ -2617,9 +3466,9 @@ pTabItem = &pTabList->a[pLevel->iFrom]; z = pTabItem->zAlias; if( z==0 ) z = pTabItem->pTab->zName; - n = strlen(z); + n = sqlite3Strlen30(z); if( n+nQPlan < sizeof(sqlite3_query_plan)-10 ){ - if( pLevel->flags & WHERE_IDX_ONLY ){ + if( pLevel->plan.wsFlags & WHERE_IDX_ONLY ){ memcpy(&sqlite3_query_plan[nQPlan], "{}", 2); nQPlan += 2; }else{ @@ -2628,19 +3477,21 @@ } sqlite3_query_plan[nQPlan++] = ' '; } - if( pLevel->flags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ + testcase( pLevel->plan.wsFlags & WHERE_ROWID_EQ ); + testcase( pLevel->plan.wsFlags & WHERE_ROWID_RANGE ); + if( pLevel->plan.wsFlags & (WHERE_ROWID_EQ|WHERE_ROWID_RANGE) ){ memcpy(&sqlite3_query_plan[nQPlan], "* ", 2); nQPlan += 2; - }else if( pLevel->pIdx==0 ){ - memcpy(&sqlite3_query_plan[nQPlan], "{} ", 3); - nQPlan += 3; - }else{ - n = strlen(pLevel->pIdx->zName); + }else if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + n = sqlite3Strlen30(pLevel->plan.u.pIdx->zName); if( n+nQPlan < sizeof(sqlite3_query_plan)-2 ){ - memcpy(&sqlite3_query_plan[nQPlan], pLevel->pIdx->zName, n); + memcpy(&sqlite3_query_plan[nQPlan], pLevel->plan.u.pIdx->zName, n); nQPlan += n; sqlite3_query_plan[nQPlan++] = ' '; } + }else{ + memcpy(&sqlite3_query_plan[nQPlan], "{} ", 3); + nQPlan += 3; } } while( nQPlan>0 && sqlite3_query_plan[nQPlan-1]==' ' ){ @@ -2653,14 +3504,11 @@ /* Record the continuation address in the WhereInfo structure. Then ** clean up and return. */ - pWInfo->iContinue = cont; - whereClauseClear(&wc); return pWInfo; /* Jump here if malloc fails */ -whereBeginNoMem: - whereClauseClear(&wc); - whereInfoFree(pWInfo); +whereBeginError: + whereInfoFree(db, pWInfo); return 0; } @@ -2669,39 +3517,47 @@ ** sqlite3WhereBegin() for additional information. */ void sqlite3WhereEnd(WhereInfo *pWInfo){ - Vdbe *v = pWInfo->pParse->pVdbe; + Parse *pParse = pWInfo->pParse; + Vdbe *v = pParse->pVdbe; int i; WhereLevel *pLevel; SrcList *pTabList = pWInfo->pTabList; + sqlite3 *db = pParse->db; /* Generate loop termination code. */ + sqlite3ExprCacheClear(pParse); for(i=pTabList->nSrc-1; i>=0; i--){ pLevel = &pWInfo->a[i]; - sqlite3VdbeResolveLabel(v, pLevel->cont); + sqlite3VdbeResolveLabel(v, pLevel->addrCont); if( pLevel->op!=OP_Noop ){ - sqlite3VdbeAddOp(v, pLevel->op, pLevel->p1, pLevel->p2); + sqlite3VdbeAddOp2(v, pLevel->op, pLevel->p1, pLevel->p2); + sqlite3VdbeChangeP5(v, pLevel->p5); } - if( pLevel->nIn ){ + if( pLevel->plan.wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){ struct InLoop *pIn; int j; - sqlite3VdbeResolveLabel(v, pLevel->nxt); - for(j=pLevel->nIn, pIn=&pLevel->aInLoop[j-1]; j>0; j--, pIn--){ - sqlite3VdbeJumpHere(v, pIn->topAddr+1); - sqlite3VdbeAddOp(v, OP_Next, pIn->iCur, pIn->topAddr); - sqlite3VdbeJumpHere(v, pIn->topAddr-1); + sqlite3VdbeResolveLabel(v, pLevel->addrNxt); + for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){ + sqlite3VdbeJumpHere(v, pIn->addrInTop+1); + sqlite3VdbeAddOp2(v, OP_Next, pIn->iCur, pIn->addrInTop); + sqlite3VdbeJumpHere(v, pIn->addrInTop-1); } - sqliteFree(pLevel->aInLoop); + sqlite3DbFree(db, pLevel->u.in.aInLoop); } - sqlite3VdbeResolveLabel(v, pLevel->brk); + sqlite3VdbeResolveLabel(v, pLevel->addrBrk); if( pLevel->iLeftJoin ){ int addr; - addr = sqlite3VdbeAddOp(v, OP_IfMemPos, pLevel->iLeftJoin, 0); - sqlite3VdbeAddOp(v, OP_NullRow, pTabList->a[i].iCursor, 0); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); + sqlite3VdbeAddOp1(v, OP_NullRow, pTabList->a[i].iCursor); if( pLevel->iIdxCur>=0 ){ - sqlite3VdbeAddOp(v, OP_NullRow, pLevel->iIdxCur, 0); + sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); + } + if( pLevel->op==OP_Return ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst); + }else{ + sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrFirst); } - sqlite3VdbeAddOp(v, OP_Goto, 0, pLevel->top); sqlite3VdbeJumpHere(v, addr); } } @@ -2717,16 +3573,22 @@ struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; Table *pTab = pTabItem->pTab; assert( pTab!=0 ); - if( pTab->isEphem || pTab->pSelect ) continue; - if( (pLevel->flags & WHERE_IDX_ONLY)==0 ){ - sqlite3VdbeAddOp(v, OP_Close, pTabItem->iCursor, 0); - } - if( pLevel->pIdx!=0 ){ - sqlite3VdbeAddOp(v, OP_Close, pLevel->iIdxCur, 0); + if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ) continue; + if( (pWInfo->wctrlFlags & WHERE_OMIT_CLOSE)==0 ){ + if( !pWInfo->okOnePass && (pLevel->plan.wsFlags & WHERE_IDX_ONLY)==0 ){ + sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor); + } + if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 ){ + sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur); + } } - /* Make cursor substitutions for cases where we want to use - ** just the index and never reference the table. + /* If this scan uses an index, make code substitutions to read data + ** from the index in preference to the table. Sometimes, this means + ** the table need never be read from. This is a performance boost, + ** as the vdbe level waits until the table is read before actually + ** seeking the table cursor to the record corresponding to the current + ** position in the index. ** ** Calls to the code generator in between sqlite3WhereBegin and ** sqlite3WhereEnd will have created code that references the table @@ -2734,10 +3596,11 @@ ** that reference the table and converts them into opcodes that ** reference the index. */ - if( pLevel->flags & WHERE_IDX_ONLY ){ + if( (pLevel->plan.wsFlags & WHERE_INDEXED)!=0 && !db->mallocFailed){ int k, j, last; VdbeOp *pOp; - Index *pIdx = pLevel->pIdx; + Index *pIdx = pLevel->plan.u.pIdx; + int useIndexOnly = pLevel->plan.wsFlags & WHERE_IDX_ONLY; assert( pIdx!=0 ); pOp = sqlite3VdbeGetOp(v, pWInfo->iTop); @@ -2745,17 +3608,18 @@ for(k=pWInfo->iTop; kp1!=pLevel->iTabCur ) continue; if( pOp->opcode==OP_Column ){ - pOp->p1 = pLevel->iIdxCur; for(j=0; jnColumn; j++){ if( pOp->p2==pIdx->aiColumn[j] ){ pOp->p2 = j; + pOp->p1 = pLevel->iIdxCur; break; } } + assert(!useIndexOnly || jnColumn); }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; pOp->opcode = OP_IdxRowid; - }else if( pOp->opcode==OP_NullRow ){ + }else if( pOp->opcode==OP_NullRow && useIndexOnly ){ pOp->opcode = OP_Noop; } } @@ -2764,6 +3628,6 @@ /* Final cleanup */ - whereInfoFree(pWInfo); + whereInfoFree(db, pWInfo); return; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tclinstaller.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/tclinstaller.tcl --- sqlite3-3.4.2/tclinstaller.tcl 2007-06-12 13:18:00.000000000 +0100 +++ sqlite3-3.6.16/tclinstaller.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,32 +0,0 @@ -# This script attempts to install SQLite3 so that it can be used -# by TCL. Invoke this script with single argument which is the -# version number of SQLite. Example: -# -# tclsh tclinstaller.tcl 3.0 -# -set VERSION [lindex $argv 0] -set LIBFILE .libs/libtclsqlite3[info sharedlibextension] -if { ![info exists env(DESTDIR)] } { set env(DESTDIR) "" } -if { ![info exists env(TCLLIBDIR)] } { set env(TCLLIBDIR) [lindex $auto_path 0] } -set LIBDIR $env(DESTDIR)$env(TCLLIBDIR) -set LIBDIR_INSTALL $env(TCLLIBDIR) -set LIBNAME [file tail $LIBFILE] -set LIB $LIBDIR/sqlite3/$LIBNAME -set LIB_INSTALL $LIBDIR_INSTALL/sqlite3/$LIBNAME - -file delete -force $LIBDIR/sqlite3 -file mkdir $LIBDIR/sqlite3 -set fd [open $LIBDIR/sqlite3/pkgIndex.tcl w] -puts $fd "package ifneeded sqlite3 $VERSION \[list load $LIB_INSTALL sqlite3\]" -close $fd - -# We cannot use [file copy] because that will just make a copy of -# a symbolic link. We have to open and copy the file for ourselves. -# -set in [open $LIBFILE] -fconfigure $in -translation binary -set out [open $LIB w] -fconfigure $out -translation binary -puts -nonewline $out [read $in] -close $in -close $out diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/alias.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/alias.test --- sqlite3-3.4.2/test/alias.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/alias.test 2009-06-25 12:24:38.000000000 +0100 @@ -0,0 +1,140 @@ +# 2008 August 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this script is correct code generation of aliased result-set +# values. See ticket #3343. +# +# $Id: alias.test,v 1.3 2009/04/23 13:22:44 drh Exp $ +# +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Aliases are currently evaluated twice. We might try to change this +# in the future. But not now. +return + +# A procedure to return a sequence of increasing integers. +# +namespace eval ::seq { + variable counter 0 + proc value {args} { + variable counter + incr counter + return $counter + } + proc reset {} { + variable counter + set counter 0 + } +} + + +do_test alias-1.1 { + db function sequence ::seq::value + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(9); + INSERT INTO t1 VALUES(8); + INSERT INTO t1 VALUES(7); + SELECT x, sequence() FROM t1; + } +} {9 1 8 2 7 3} +do_test alias-1.2 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 WHERE y>0 + } +} {9 1 8 2 7 3} +do_test alias-1.3 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 WHERE y>0 AND y<99 + } +} {9 1 8 2 7 3} +do_test alias-1.4 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 WHERE y>0 AND y<99 AND y!=55 + } +} {9 1 8 2 7 3} +do_test alias-1.5 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 + WHERE y>0 AND y<99 AND y!=55 AND y NOT IN (56,57,58) + AND y NOT LIKE 'abc%' AND y%10==2 + } +} {8 2} +do_test alias-1.6 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 WHERE y BETWEEN 0 AND 99 + } +} {9 1 8 2 7 3} +#do_test alias-1.7 { +# ::seq::reset +# db eval { +# SELECT x, sequence() AS y FROM t1 WHERE y IN (55,66,3) +# } +#} {7 3} +do_test alias-1.8 { + ::seq::reset + db eval { + SELECT x, 1-sequence() AS y FROM t1 ORDER BY y + } +} {7 -2 8 -1 9 0} +do_test alias-1.9 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 ORDER BY -y + } +} {7 3 8 2 9 1} +do_test alias-1.10 { + ::seq::reset + db eval { + SELECT x, sequence() AS y FROM t1 ORDER BY x%2, y + } +} {8 2 9 1 7 3} + +unset -nocomplain random_int_list +set random_int_list [db eval { + SELECT random()&2147483647 AS r FROM t1, t1, t1, t1 ORDER BY r +}] +do_test alias-1.11 { + lsort -integer $::random_int_list +} $random_int_list + + +do_test alias-2.1 { + db eval { + SELECT 4 UNION SELECT 1 ORDER BY 1 + } +} {1 4} +do_test alias-2.2 { + db eval { + SELECT 4 UNION SELECT 1 UNION SELECT 9 ORDER BY 1 + } +} {1 4 9} + +if 0 { + # Aliases in the GROUP BY clause cause the expression to be evaluated + # twice in the current implementation. This might change in the future. + # + do_test alias-3.1 { + ::seq::reset + db eval { + SELECT sequence(*) AS y, count(*) AS z FROM t1 GROUP BY y ORDER BY z, y + } + } {1 1 2 1 3 1} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/all.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/all.test --- sqlite3-3.4.2/test/all.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/all.test 2009-06-12 03:37:49.000000000 +0100 @@ -10,19 +10,20 @@ #*********************************************************************** # This file runs all tests. # -# $Id: all.test,v 1.44 2007/06/18 12:22:43 drh Exp $ +# $Id: all.test,v 1.62 2009/01/06 18:43:51 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl rename finish_test really_finish_test proc finish_test {} { - memleak_check + catch {db close} + show_memstats } if {[file exists ./sqlite_test_count]} { set COUNT [exec cat ./sqlite_test_count] } else { - set COUNT 3 + set COUNT 1 } if {[llength $argv]>0} { @@ -52,20 +53,20 @@ # set LeakList {} -set EXCLUDE { - all.test - async.test - crash.test - crash2.test - autovacuum_crash.test - quick.test - malloc.test - misuse.test - memleak.test - sqllimits1.test - fuzz.test - soak.test -} +set EXCLUDE {} +lappend EXCLUDE all.test ;# This file +lappend EXCLUDE async.test +lappend EXCLUDE crash.test ;# Run seperately later. +lappend EXCLUDE crash2.test ;# Run seperately later. +lappend EXCLUDE quick.test ;# Alternate test driver script +lappend EXCLUDE veryquick.test ;# Alternate test driver script +lappend EXCLUDE malloc.test ;# Run seperately later. +lappend EXCLUDE misuse.test ;# Run seperately later. +lappend EXCLUDE memleak.test ;# Alternate test driver script +lappend EXCLUDE permutations.test ;# Run seperately later. +lappend EXCLUDE soak.test ;# Takes a very long time (default 1 hr) +lappend EXCLUDE fts3.test ;# Wrapper for muliple fts3*.tests +lappend EXCLUDE mallocAll.test ;# Wrapper for running all malloc tests # Files to include in the test. If this list is empty then everything # that is not in the EXCLUDE list is run. @@ -73,25 +74,12 @@ set INCLUDE { } -# Test files btree2.test and btree4.test don't work if the -# SQLITE_DEFAULT_AUTOVACUUM macro is defined to true (because they depend -# on tables being allocated starting at page 2). -# -ifcapable default_autovacuum { - lappend EXCLUDE btree2.test - lappend EXCLUDE btree4.test -} - for {set Counter 0} {$Counter<$COUNT && $nErr==0} {incr Counter} { - if {$Counter%2} { - set ::SETUP_SQL {PRAGMA default_synchronous=off;} - } else { - catch {unset ::SETUP_SQL} - } foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] if {[lsearch -exact $EXCLUDE $tail]>=0} continue if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + reset_prng_state source $testfile catch {db close} if {$sqlite_open_file_count>0} { @@ -100,17 +88,14 @@ lappend ::failList $tail set sqlite_open_file_count 0 } - if {$::sqlite3_tsd_count} { - puts "Thread-specific data leak: $::sqlite3_tsd_count instances" - incr nErr - lappend ::failList $tail - set ::sqlite3_tsd_count 0 - } } if {[info exists Leak]} { lappend LeakList $Leak } } +set argv all +source $testdir/permutations.test +set argv "" # Do one last test to look for a memory leak in the library. This will # only work if SQLite is compiled with the -DSQLITE_DEBUG=1 flag. @@ -138,7 +123,9 @@ source $testdir/crash.test source $testdir/crash2.test ifcapable !default_autovacuum { - source $testdir/autovacuum_crash.test + set argv autovacuum_crash + source $testdir/permutations.test + set argv "" } } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/alter2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/alter2.test --- sqlite3-3.4.2/test/alter2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/alter2.test 2009-06-25 12:22:33.000000000 +0100 @@ -13,7 +13,7 @@ # file format change that may be used in the future to implement # "ALTER TABLE ... ADD COLUMN". # -# $Id: alter2.test,v 1.8 2007/05/08 01:08:49 drh Exp $ +# $Id: alter2.test,v 1.14 2009/04/07 14:14:22 danielk1977 Exp $ # set testdir [file dirname $argv0] @@ -22,8 +22,7 @@ # We have to have pragmas in order to do this test ifcapable {!pragma} return -# These tests do not work if there is a codec. The -# btree_open command does not know how to handle codecs. +# These tests do not work if there is a codec. # #if {[catch {sqlite3 -has_codec} r] || $r} return @@ -42,23 +41,17 @@ # to $newval. Also, the schema cookie is incremented. # proc set_file_format {newval} { - set bt [btree_open test.db 10 0] - btree_begin_transaction $bt - set meta [btree_get_meta $bt] - lset meta 2 $newval ;# File format - lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie - eval "btree_update_meta $bt $meta" - btree_commit $bt - btree_close $bt + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} } # This procedure returns the value of the file-format in file 'test.db'. # proc get_file_format {{fname test.db}} { - set bt [btree_open $fname 10 0] - set meta [btree_get_meta $bt] - btree_close $bt - lindex $meta 2 + return [hexio_get_int [hexio_read $fname 44 4]] } # This procedure sets the SQL statement stored for table $tbl in the @@ -68,11 +61,13 @@ # proc alter_table {tbl sql {file_format 2}} { sqlite3 dbat test.db - dbat eval { + set s [string map {' ''} $sql] + set t [string map {' ''} $tbl] + dbat eval [subst { PRAGMA writable_schema = 1; - UPDATE sqlite_master SET sql = $sql WHERE name = $tbl AND type = 'table'; + UPDATE sqlite_master SET sql = '$s' WHERE name = '$t' AND type = 'table'; PRAGMA writable_schema = 0; - } + }] dbat close set_file_format 2 } @@ -225,6 +220,8 @@ SELECT * FROM clog; } } {{} 2 {} 6 {} 10} +} else { + execsql { CREATE TABLE abc3(a, b); } } #--------------------------------------------------------------------- @@ -234,15 +231,28 @@ # file format requires incrementing the schema cookie. # do_test alter2-4.1 { + db close set_file_format 5 + sqlite3 db test.db } {} do_test alter2-4.2 { - catchsql { - SELECT * FROM sqlite_master; - } + # We have to run two queries here because the Tcl interface uses + # sqlite3_prepare_v2(). In this case, the first query encounters an + # SQLITE_SCHEMA error. Then, when trying to recompile the statement, the + # "unsupported file format" error is encountered. So the error code + # returned is SQLITE_SCHEMA, not SQLITE_ERROR as required by the following + # test case. + # + # When the query is attempted a second time, the same error message is + # returned but the error code is SQLITE_ERROR, because the unsupported + # file format was detected during a call to sqlite3_prepare(), not + # sqlite3_step(). + # + catchsql { SELECT * FROM sqlite_master; } + catchsql { SELECT * FROM sqlite_master; } } {1 {unsupported file format}} do_test alter2-4.3 { - sqlite3_errcode $::DB + sqlite3_errcode db } {SQLITE_ERROR} do_test alter2-4.4 { set ::DB [sqlite3_connection_pointer db] @@ -251,7 +261,7 @@ } } {1 {unsupported file format}} do_test alter2-4.5 { - sqlite3_errcode $::DB + sqlite3_errcode db } {SQLITE_ERROR} #--------------------------------------------------------------------- @@ -259,18 +269,21 @@ # resets the file format to 1. # set default_file_format [expr $SQLITE_DEFAULT_FILE_FORMAT==4 ? 4 : 1] -do_test alter2-5.1 { - set_file_format 2 - get_file_format -} {2} -do_test alter2-5.2 { - execsql { - VACUUM; - } -} {} -do_test alter2-5.3 { - get_file_format -} $default_file_format +ifcapable vacuum { + do_test alter2-5.1 { + set_file_format 2 + db close + sqlite3 db test.db + execsql {SELECT 1 FROM sqlite_master LIMIT 1;} + get_file_format + } {2} + do_test alter2-5.2 { + execsql { VACUUM } + } {} + do_test alter2-5.3 { + get_file_format + } $default_file_format +} #--------------------------------------------------------------------- # Test that when a database with file-format 2 is opened, new @@ -280,18 +293,19 @@ db close set_file_format 2 sqlite3 db test.db - set ::DB [sqlite3_connection_pointer db] get_file_format } {2} -do_test alter2-6.2 { - file delete -force test2.db-journal - file delete -force test2.db - execsql { - ATTACH 'test2.db' AS aux; - CREATE TABLE aux.t1(a, b); - } - get_file_format test2.db -} $default_file_format +ifcapable attach { + do_test alter2-6.2 { + file delete -force test2.db-journal + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1(a, b); + } + get_file_format test2.db + } $default_file_format +} do_test alter2-6.3 { execsql { CREATE TABLE t1(a, b); @@ -397,38 +411,40 @@ #----------------------------------------------------------------------- # Test creating an index on a column added with a default value. # -do_test alter2-10.1 { - execsql { - CREATE TABLE t2(a); - INSERT INTO t2 VALUES('a'); - INSERT INTO t2 VALUES('b'); - INSERT INTO t2 VALUES('c'); - INSERT INTO t2 VALUES('d'); - } - alter_table t2 {CREATE TABLE t2(a, b DEFAULT X'ABCD', c DEFAULT NULL);} 3 - catchsql { - SELECT * FROM sqlite_master; - } - execsql { - SELECT quote(a), quote(b), quote(c) FROM t2 LIMIT 1; - } -} {'a' X'ABCD' NULL} -do_test alter2-10.2 { - execsql { - CREATE INDEX i1 ON t2(b); - SELECT a FROM t2 WHERE b = X'ABCD'; - } -} {a b c d} -do_test alter2-10.3 { - execsql { - DELETE FROM t2 WHERE a = 'c'; - SELECT a FROM t2 WHERE b = X'ABCD'; - } -} {a b d} -do_test alter2-10.4 { - execsql { - SELECT count(b) FROM t2 WHERE b = X'ABCD'; - } -} {3} +ifcapable bloblit { + do_test alter2-10.1 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES('a'); + INSERT INTO t2 VALUES('b'); + INSERT INTO t2 VALUES('c'); + INSERT INTO t2 VALUES('d'); + } + alter_table t2 {CREATE TABLE t2(a, b DEFAULT X'ABCD', c DEFAULT NULL);} 3 + catchsql { + SELECT * FROM sqlite_master; + } + execsql { + SELECT quote(a), quote(b), quote(c) FROM t2 LIMIT 1; + } + } {'a' X'ABCD' NULL} + do_test alter2-10.2 { + execsql { + CREATE INDEX i1 ON t2(b); + SELECT a FROM t2 WHERE b = X'ABCD'; + } + } {a b c d} + do_test alter2-10.3 { + execsql { + DELETE FROM t2 WHERE a = 'c'; + SELECT a FROM t2 WHERE b = X'ABCD'; + } + } {a b d} + do_test alter2-10.4 { + execsql { + SELECT count(b) FROM t2 WHERE b = X'ABCD'; + } + } {3} +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/alter3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/alter3.test --- sqlite3-3.4.2/test/alter3.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/alter3.test 2009-06-05 18:02:52.000000000 +0100 @@ -13,7 +13,7 @@ # file format change that may be used in the future to implement # "ALTER TABLE ... ADD COLUMN". # -# $Id: alter3.test,v 1.9 2006/01/17 09:35:02 danielk1977 Exp $ +# $Id: alter3.test,v 1.11 2008/03/19 00:21:31 drh Exp $ # set testdir [file dirname $argv0] @@ -50,10 +50,7 @@ # This procedure returns the value of the file-format in file 'test.db'. # proc get_file_format {{fname test.db}} { - set bt [btree_open $fname 10 0] - set meta [btree_get_meta $bt] - btree_close $bt - lindex $meta 2 + return [hexio_get_int [hexio_read $fname 44 4]] } do_test alter3-1.1 { @@ -236,71 +233,73 @@ } } {} -do_test alter3-5.1 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - CREATE TABLE t1(a, b); - INSERT INTO t1 VALUES(1, 'one'); - INSERT INTO t1 VALUES(2, 'two'); - ATTACH 'test2.db' AS aux; - CREATE TABLE aux.t1 AS SELECT * FROM t1; - PRAGMA aux.schema_version = 30; - SELECT sql FROM aux.sqlite_master; - } -} {{CREATE TABLE t1(a,b)}} -do_test alter3-5.2 { - execsql { - ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128); - SELECT sql FROM aux.sqlite_master; +ifcapable attach { + do_test alter3-5.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1 AS SELECT * FROM t1; + PRAGMA aux.schema_version = 30; + SELECT sql FROM aux.sqlite_master; + } + } {{CREATE TABLE t1(a,b)}} + do_test alter3-5.2 { + execsql { + ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128); + SELECT sql FROM aux.sqlite_master; + } + } {{CREATE TABLE t1(a,b, c VARCHAR(128))}} + do_test alter3-5.3 { + execsql { + SELECT * FROM aux.t1; + } + } {1 one {} 2 two {}} + ifcapable schema_version { + do_test alter3-5.4 { + execsql { + PRAGMA aux.schema_version; + } + } {31} } -} {{CREATE TABLE t1(a,b, c VARCHAR(128))}} -do_test alter3-5.3 { - execsql { - SELECT * FROM aux.t1; + if {!$has_codec} { + do_test alter3-5.5 { + list [get_file_format test2.db] [get_file_format] + } {2 3} } -} {1 one {} 2 two {}} -ifcapable schema_version { - do_test alter3-5.4 { + do_test alter3-5.6 { execsql { - PRAGMA aux.schema_version; + ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000; + SELECT sql FROM aux.sqlite_master; } - } {31} -} -if {!$has_codec} { - do_test alter3-5.5 { - list [get_file_format test2.db] [get_file_format] - } {2 3} -} -do_test alter3-5.6 { - execsql { - ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000; - SELECT sql FROM aux.sqlite_master; - } -} {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}} -do_test alter3-5.7 { - execsql { - SELECT * FROM aux.t1; + } {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}} + do_test alter3-5.7 { + execsql { + SELECT * FROM aux.t1; + } + } {1 one {} 1000 2 two {} 1000} + ifcapable schema_version { + do_test alter3-5.8 { + execsql { + PRAGMA aux.schema_version; + } + } {32} } -} {1 one {} 1000 2 two {} 1000} -ifcapable schema_version { - do_test alter3-5.8 { + do_test alter3-5.9 { + execsql { + SELECT * FROM t1; + } + } {1 one 2 two} + do_test alter3-5.99 { execsql { - PRAGMA aux.schema_version; + DROP TABLE aux.t1; + DROP TABLE t1; } - } {32} + } {} } -do_test alter3-5.9 { - execsql { - SELECT * FROM t1; - } -} {1 one 2 two} -do_test alter3-5.99 { - execsql { - DROP TABLE aux.t1; - DROP TABLE t1; - } -} {} #---------------------------------------------------------------- # Test that the table schema is correctly reloaded when a column diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/alter4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/alter4.test --- sqlite3-3.4.2/test/alter4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/alter4.test 2009-02-02 18:03:22.000000000 +0000 @@ -0,0 +1,397 @@ +# 2009 February 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing that SQLite can handle a subtle +# file format change that may be used in the future to implement +# "ALTER TABLE ... ADD COLUMN". +# +# $Id: alter4.test,v 1.1 2009/02/02 18:03:22 drh Exp $ +# + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl + +# If SQLITE_OMIT_ALTERTABLE is defined, omit this file. +ifcapable !altertable { + finish_test + return +} + +# Determine if there is a codec available on this test. +# +if {[catch {sqlite3 -has_codec} r] || $r} { + set has_codec 1 +} else { + set has_codec 0 +} + + +# Test Organisation: +# ------------------ +# +# alter4-1.*: Test that ALTER TABLE correctly modifies the CREATE TABLE sql. +# alter4-2.*: Test error messages. +# alter4-3.*: Test adding columns with default value NULL. +# alter4-4.*: Test adding columns with default values other than NULL. +# alter4-5.*: Test adding columns to tables in ATTACHed databases. +# alter4-6.*: Test that temp triggers are not accidentally dropped. +# alter4-7.*: Test that VACUUM resets the file-format. +# + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + return [hexio_get_int [hexio_read $fname 44 4]] +} + +do_test alter4-1.1 { + execsql { + CREATE TEMP TABLE abc(a, b, c); + SELECT sql FROM sqlite_temp_master; + } +} {{CREATE TABLE abc(a, b, c)}} +do_test alter4-1.2 { + execsql {ALTER TABLE abc ADD d INTEGER;} + execsql { + SELECT sql FROM sqlite_temp_master; + } +} {{CREATE TABLE abc(a, b, c, d INTEGER)}} +do_test alter4-1.3 { + execsql {ALTER TABLE abc ADD e} + execsql { + SELECT sql FROM sqlite_temp_master; + } +} {{CREATE TABLE abc(a, b, c, d INTEGER, e)}} +do_test alter4-1.4 { + execsql { + CREATE TABLE temp.t1(a, b); + ALTER TABLE t1 ADD c; + SELECT sql FROM sqlite_temp_master WHERE tbl_name = 't1'; + } +} {{CREATE TABLE t1(a, b, c)}} +do_test alter4-1.5 { + execsql { + ALTER TABLE t1 ADD d CHECK (a>d); + SELECT sql FROM sqlite_temp_master WHERE tbl_name = 't1'; + } +} {{CREATE TABLE t1(a, b, c, d CHECK (a>d))}} +ifcapable foreignkey { + do_test alter4-1.6 { + execsql { + CREATE TEMP TABLE t2(a, b, UNIQUE(a, b)); + ALTER TABLE t2 ADD c REFERENCES t1(c) ; + SELECT sql FROM sqlite_temp_master + WHERE tbl_name = 't2' AND type = 'table'; + } + } {{CREATE TABLE t2(a, b, c REFERENCES t1(c), UNIQUE(a, b))}} +} +do_test alter4-1.7 { + execsql { + CREATE TEMPORARY TABLE t3(a, b, UNIQUE(a, b)); + ALTER TABLE t3 ADD COLUMN c VARCHAR(10, 20); + SELECT sql FROM sqlite_temp_master + WHERE tbl_name = 't3' AND type = 'table'; + } +} {{CREATE TABLE t3(a, b, c VARCHAR(10, 20), UNIQUE(a, b))}} +do_test alter4-1.99 { + catchsql { + # May not exist if foriegn-keys are omitted at compile time. + DROP TABLE t2; + } + execsql { + DROP TABLE abc; + DROP TABLE t1; + DROP TABLE t3; + } +} {} + +do_test alter4-2.1 { + execsql { + CREATE TABLE temp.t1(a, b); + } + catchsql { + ALTER TABLE t1 ADD c PRIMARY KEY; + } +} {1 {Cannot add a PRIMARY KEY column}} +do_test alter4-2.2 { + catchsql { + ALTER TABLE t1 ADD c UNIQUE + } +} {1 {Cannot add a UNIQUE column}} +do_test alter4-2.3 { + catchsql { + ALTER TABLE t1 ADD b VARCHAR(10) + } +} {1 {duplicate column name: b}} +do_test alter4-2.3 { + catchsql { + ALTER TABLE t1 ADD c NOT NULL; + } +} {1 {Cannot add a NOT NULL column with default value NULL}} +do_test alter4-2.4 { + catchsql { + ALTER TABLE t1 ADD c NOT NULL DEFAULT 10; + } +} {0 {}} +ifcapable view { + do_test alter4-2.5 { + execsql { + CREATE TEMPORARY VIEW v1 AS SELECT * FROM t1; + } + catchsql { + alter table v1 add column d; + } + } {1 {Cannot add a column to a view}} +} +do_test alter4-2.6 { + catchsql { + alter table t1 add column d DEFAULT CURRENT_TIME; + } +} {1 {Cannot add a column with non-constant default}} +do_test alter4-2.99 { + execsql { + DROP TABLE t1; + } +} {} + +do_test alter4-3.1 { + execsql { + CREATE TEMP TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 300); + SELECT * FROM t1; + } +} {1 100 2 300} +do_test alter4-3.1 { + execsql { + PRAGMA schema_version = 10; + } +} {} +do_test alter4-3.2 { + execsql { + ALTER TABLE t1 ADD c; + SELECT * FROM t1; + } +} {1 100 {} 2 300 {}} +if {!$has_codec} { + do_test alter4-3.3 { + get_file_format + } {3} +} +ifcapable schema_version { + do_test alter4-3.4 { + execsql { + PRAGMA schema_version; + } + } {10} +} + +do_test alter4-4.1 { + db close + file delete -force test.db + set ::DB [sqlite3 db test.db] + execsql { + CREATE TEMP TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 300); + SELECT * FROM t1; + } +} {1 100 2 300} +do_test alter4-4.1 { + execsql { + PRAGMA schema_version = 20; + } +} {} +do_test alter4-4.2 { + execsql { + ALTER TABLE t1 ADD c DEFAULT 'hello world'; + SELECT * FROM t1; + } +} {1 100 {hello world} 2 300 {hello world}} +if {!$has_codec} { + do_test alter4-4.3 { + get_file_format + } {3} +} +ifcapable schema_version { + do_test alter4-4.4 { + execsql { + PRAGMA schema_version; + } + } {20} +} +do_test alter4-4.99 { + execsql { + DROP TABLE t1; + } +} {} + +ifcapable attach { + do_test alter4-5.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + CREATE TEMP TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1 AS SELECT * FROM t1; + PRAGMA aux.schema_version = 30; + SELECT sql FROM aux.sqlite_master; + } + } {{CREATE TABLE t1(a,b)}} + do_test alter4-5.2 { + execsql { + ALTER TABLE aux.t1 ADD COLUMN c VARCHAR(128); + SELECT sql FROM aux.sqlite_master; + } + } {{CREATE TABLE t1(a,b, c VARCHAR(128))}} + do_test alter4-5.3 { + execsql { + SELECT * FROM aux.t1; + } + } {1 one {} 2 two {}} + ifcapable schema_version { + do_test alter4-5.4 { + execsql { + PRAGMA aux.schema_version; + } + } {31} + } + if {!$has_codec} { + do_test alter4-5.5 { + list [get_file_format test2.db] [get_file_format] + } {2 3} + } + do_test alter4-5.6 { + execsql { + ALTER TABLE aux.t1 ADD COLUMN d DEFAULT 1000; + SELECT sql FROM aux.sqlite_master; + } + } {{CREATE TABLE t1(a,b, c VARCHAR(128), d DEFAULT 1000)}} + do_test alter4-5.7 { + execsql { + SELECT * FROM aux.t1; + } + } {1 one {} 1000 2 two {} 1000} + ifcapable schema_version { + do_test alter4-5.8 { + execsql { + PRAGMA aux.schema_version; + } + } {32} + } + do_test alter4-5.9 { + execsql { + SELECT * FROM t1; + } + } {1 one 2 two} + do_test alter4-5.99 { + execsql { + DROP TABLE aux.t1; + DROP TABLE t1; + } + } {} +} + +#---------------------------------------------------------------- +# Test that the table schema is correctly reloaded when a column +# is added to a table. +# +ifcapable trigger&&tempdb { + do_test alter4-6.1 { + execsql { + CREATE TEMP TABLE t1(a, b); + CREATE TEMP TABLE log(trig, a, b); + + CREATE TRIGGER t1_a AFTER INSERT ON t1 BEGIN + INSERT INTO log VALUES('a', new.a, new.b); + END; + CREATE TEMP TRIGGER t1_b AFTER INSERT ON t1 BEGIN + INSERT INTO log VALUES('b', new.a, new.b); + END; + + INSERT INTO t1 VALUES(1, 2); + SELECT * FROM log; + } + } {b 1 2 a 1 2} + do_test alter4-6.2 { + execsql { + ALTER TABLE t1 ADD COLUMN c DEFAULT 'c'; + INSERT INTO t1(a, b) VALUES(3, 4); + SELECT * FROM log; + } + } {b 1 2 a 1 2 b 3 4 a 3 4} +} + +if {!$has_codec} { + ifcapable vacuum { + do_test alter4-7.1 { + execsql { + VACUUM; + } + get_file_format + } {1} + do_test alter4-7.2 { + execsql { + CREATE TEMP TABLE abc(a, b, c); + ALTER TABLE abc ADD d DEFAULT NULL; + } + get_file_format + } {2} + do_test alter4-7.3 { + execsql { + ALTER TABLE abc ADD e DEFAULT 10; + } + get_file_format + } {3} + do_test alter4-7.4 { + execsql { + ALTER TABLE abc ADD f DEFAULT NULL; + } + get_file_format + } {3} + do_test alter4-7.5 { + execsql { + VACUUM; + } + get_file_format + } {1} + } +} + +# Ticket #1183 - Make sure adding columns to large tables does not cause +# memory corruption (as was the case before this bug was fixed). +do_test alter4-8.1 { + execsql { + CREATE TEMP TABLE t4(c1); + } +} {} +set ::sql "" +do_test alter4-8.2 { + set cols c1 + for {set i 2} {$i < 100} {incr i} { + execsql " + ALTER TABLE t4 ADD c$i + " + lappend cols c$i + } + set ::sql "CREATE TABLE t4([join $cols {, }])" + list +} {} +do_test alter4-8.2 { + execsql { + SELECT sql FROM sqlite_temp_master WHERE name = 't4'; + } +} [list $::sql] + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/altermalloc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/altermalloc.test --- sqlite3-3.4.2/test/altermalloc.test 2007-05-15 04:29:07.000000000 +0100 +++ sqlite3-3.6.16/test/altermalloc.test 2009-06-12 03:37:50.000000000 +0100 @@ -12,103 +12,19 @@ # focus of this script is testing the ALTER TABLE statement and # specifically out-of-memory conditions within that command. # -# $Id: altermalloc.test,v 1.3 2006/09/04 18:54:14 drh Exp $ +# $Id: altermalloc.test,v 1.10 2008/10/30 17:21:13 danielk1977 Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG=1" - finish_test - return -} - # If SQLITE_OMIT_ALTERTABLE is defined, omit this file. -ifcapable !altertable { +ifcapable !altertable||!memdebug { finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go} {incr ::n} { - - do_test $tn.$::n { - - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - set ::DB [sqlite3 db test.db] - - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - - set v [catch $::mallocbody msg] - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - set v {1 1} - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - sqlite_malloc_fail 0 - - if {[info exists ::mallocopts(-cleanup)]} { - catch $::mallocopts(-cleanup) - } - } - unset ::mallocopts -} +source $testdir/malloc_common.tcl do_malloc_test altermalloc-1 -tclprep { db close @@ -116,11 +32,40 @@ if {[catch {sqlite3 db test.db}]} { error "out of memory" } + sqlite3_db_config_lookaside db 0 0 0 + sqlite3_extended_result_codes db 1 } -sqlbody { CREATE TABLE t1(a int); ALTER TABLE t1 ADD COLUMN b INTEGER DEFAULT NULL; ALTER TABLE t1 ADD COLUMN c TEXT DEFAULT 'default-text'; ALTER TABLE t1 RENAME TO t2; + ALTER TABLE t2 ADD COLUMN d BLOB DEFAULT X'ABCD'; +} + +# Test malloc() failure on an ALTER TABLE on a virtual table. +# +ifcapable vtab { + do_malloc_test altermalloc-vtab -tclprep { + sqlite3 db2 test.db + sqlite3_db_config_lookaside db2 0 0 0 + sqlite3_extended_result_codes db2 1 + register_echo_module [sqlite3_connection_pointer db2] + db2 eval { + CREATE TABLE t1(a, b VARCHAR, c INTEGER); + CREATE VIRTUAL TABLE t1echo USING echo(t1); + } + db2 close + + register_echo_module [sqlite3_connection_pointer db] + } -tclbody { + set rc [catch {db eval { ALTER TABLE t1echo RENAME TO t1_echo }} msg] + if {$msg eq "vtable constructor failed: t1echo"} { + set msg "out of memory" + } + if {$rc} { + error $msg + } + } } finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/alter.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/alter.test --- sqlite3-3.4.2/test/alter.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/alter.test 2009-06-17 18:11:03.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is testing the ALTER TABLE statement. # -# $Id: alter.test,v 1.25 2007/05/15 16:51:37 drh Exp $ +# $Id: alter.test,v 1.32 2009/03/24 15:08:10 drh Exp $ # set testdir [file dirname $argv0] @@ -37,6 +37,8 @@ # alter-2.*: Test error conditions and messages. # alter-3.*: Test ALTER TABLE on tables that have TRIGGERs attached to them. # alter-4.*: Test ALTER TABLE on tables that have AUTOINCREMENT fields. +# ... +# alter-12.*: Test ALTER TABLE on views. # # Create some tables to rename. Be sure to include some TEMP tables @@ -203,50 +205,52 @@ # Check that ALTER TABLE works on attached databases. # -do_test alter-1.8.1 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - ATTACH 'test2.db' AS aux; - } -} {} -do_test alter-1.8.2 { - execsql { - CREATE TABLE t4(a PRIMARY KEY, b, c); - CREATE TABLE aux.t4(a PRIMARY KEY, b, c); - CREATE INDEX i4 ON t4(b); - CREATE INDEX aux.i4 ON t4(b); - } -} {} -do_test alter-1.8.3 { - execsql { - INSERT INTO t4 VALUES('main', 'main', 'main'); - INSERT INTO aux.t4 VALUES('aux', 'aux', 'aux'); - SELECT * FROM t4 WHERE a = 'main'; - } -} {main main main} -do_test alter-1.8.4 { - execsql { - ALTER TABLE t4 RENAME TO t5; - SELECT * FROM t4 WHERE a = 'aux'; - } -} {aux aux aux} -do_test alter-1.8.5 { - execsql { - SELECT * FROM t5; - } -} {main main main} -do_test alter-1.8.6 { - execsql { - SELECT * FROM t5 WHERE b = 'main'; - } -} {main main main} -do_test alter-1.8.7 { - execsql { - ALTER TABLE aux.t4 RENAME TO t5; - SELECT * FROM aux.t5 WHERE b = 'aux'; - } -} {aux aux aux} +ifcapable attach { + do_test alter-1.8.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + } + } {} + do_test alter-1.8.2 { + execsql { + CREATE TABLE t4(a PRIMARY KEY, b, c); + CREATE TABLE aux.t4(a PRIMARY KEY, b, c); + CREATE INDEX i4 ON t4(b); + CREATE INDEX aux.i4 ON t4(b); + } + } {} + do_test alter-1.8.3 { + execsql { + INSERT INTO t4 VALUES('main', 'main', 'main'); + INSERT INTO aux.t4 VALUES('aux', 'aux', 'aux'); + SELECT * FROM t4 WHERE a = 'main'; + } + } {main main main} + do_test alter-1.8.4 { + execsql { + ALTER TABLE t4 RENAME TO t5; + SELECT * FROM t4 WHERE a = 'aux'; + } + } {aux aux aux} + do_test alter-1.8.5 { + execsql { + SELECT * FROM t5; + } + } {main main main} + do_test alter-1.8.6 { + execsql { + SELECT * FROM t5 WHERE b = 'main'; + } + } {main main main} + do_test alter-1.8.7 { + execsql { + ALTER TABLE aux.t4 RENAME TO t5; + SELECT * FROM aux.t5 WHERE b = 'aux'; + } + } {aux aux aux} +} do_test alter-1.9.1 { execsql { @@ -396,36 +400,38 @@ # quoting. Otherwise the sqlite_alter_trigger() function might not work. file delete -force test3.db file delete -force test3.db-journal -do_test alter-3.2.1 { - catchsql { - ATTACH 'test3.db' AS ON; - } -} {1 {near "ON": syntax error}} -do_test alter-3.2.2 { - catchsql { - ATTACH 'test3.db' AS 'ON'; - } -} {0 {}} -do_test alter-3.2.3 { - catchsql { - CREATE TABLE ON.t1(a, b, c); - } -} {1 {near "ON": syntax error}} -do_test alter-3.2.4 { - catchsql { - CREATE TABLE 'ON'.t1(a, b, c); - } -} {0 {}} -do_test alter-3.2.4 { - catchsql { - CREATE TABLE 'ON'.ON(a, b, c); - } -} {1 {near "ON": syntax error}} -do_test alter-3.2.5 { - catchsql { - CREATE TABLE 'ON'.'ON'(a, b, c); - } -} {0 {}} +ifcapable attach { + do_test alter-3.2.1 { + catchsql { + ATTACH 'test3.db' AS ON; + } + } {1 {near "ON": syntax error}} + do_test alter-3.2.2 { + catchsql { + ATTACH 'test3.db' AS 'ON'; + } + } {0 {}} + do_test alter-3.2.3 { + catchsql { + CREATE TABLE ON.t1(a, b, c); + } + } {1 {near "ON": syntax error}} + do_test alter-3.2.4 { + catchsql { + CREATE TABLE 'ON'.t1(a, b, c); + } + } {0 {}} + do_test alter-3.2.4 { + catchsql { + CREATE TABLE 'ON'.ON(a, b, c); + } + } {1 {near "ON": syntax error}} + do_test alter-3.2.5 { + catchsql { + CREATE TABLE 'ON'.'ON'(a, b, c); + } + } {0 {}} +} do_test alter-3.2.6 { catchsql { CREATE TABLE t10(a, ON, c); @@ -441,11 +447,13 @@ CREATE TRIGGER trig4 AFTER INSERT ON ON BEGIN SELECT 1; END; } } {1 {near "ON": syntax error}} -do_test alter-3.2.9 { - catchsql { - CREATE TRIGGER 'on'.trig4 AFTER INSERT ON 'ON' BEGIN SELECT 1; END; - } -} {0 {}} +ifcapable attach { + do_test alter-3.2.9 { + catchsql { + CREATE TRIGGER 'on'.trig4 AFTER INSERT ON 'ON' BEGIN SELECT 1; END; + } + } {0 {}} +} do_test alter-3.2.10 { execsql { DROP TABLE t10; @@ -588,7 +596,7 @@ execsql " SELECT sql FROM sqlite_master WHERE oid = $::oid " -} "{CREATE TABLE '${::tbl_name2}'(a, b, c)}" +} "{CREATE TABLE \"${::tbl_name2}\"(a, b, c)}" do_test alter-6.4 { execsql " ALTER TABLE $::tbl_name2 RENAME TO $::tbl_name @@ -596,7 +604,7 @@ execsql " SELECT sql FROM sqlite_master WHERE oid = $::oid " -} "{CREATE TABLE '${::tbl_name}'(a, b, c)}" +} "{CREATE TABLE \"${::tbl_name}\"(a, b, c)}" set ::col_name ghi\1234\jkl do_test alter-6.5 { execsql " @@ -605,7 +613,7 @@ execsql " SELECT sql FROM sqlite_master WHERE oid = $::oid " -} "{CREATE TABLE '${::tbl_name}'(a, b, c, $::col_name VARCHAR)}" +} "{CREATE TABLE \"${::tbl_name}\"(a, b, c, $::col_name VARCHAR)}" set ::col_name2 B\3421\A do_test alter-6.6 { db close @@ -616,7 +624,7 @@ execsql " SELECT sql FROM sqlite_master WHERE oid = $::oid " -} "{CREATE TABLE '${::tbl_name}'(a, b, c, $::col_name VARCHAR, $::col_name2)}" +} "{CREATE TABLE \"${::tbl_name}\"(a, b, c, $::col_name VARCHAR, $::col_name2)}" do_test alter-6.7 { execsql " INSERT INTO ${::tbl_name} VALUES(1, 2, 3, 4, 5); @@ -632,10 +640,10 @@ CREATE TABLE t1(a TEXT COLLATE BINARY); ALTER TABLE t1 ADD COLUMN b INTEGER COLLATE NOCASE; INSERT INTO t1 VALUES(1,'-2'); - INSERT INTO t1 VALUES(5.4e-8,'5.4e-8'); + INSERT INTO t1 VALUES(5.4e-08,'5.4e-08'); SELECT typeof(a), a, typeof(b), b FROM t1; } -} {text 1 integer -2 text 5.4e-8 real 5.4e-08} +} {text 1 integer -2 text 5.4e-08 real 5.4e-08} # Make sure that when a column is added by ALTER TABLE ADD COLUMN and has # a default value that the default value is used by aggregate functions. @@ -669,7 +677,7 @@ execsql { SELECT SQLITE_RENAME_TABLE(0,0); SELECT SQLITE_RENAME_TABLE(10,20); - SELECT SQLITE_RENAME_TABLE("foo", "foo"); + SELECT SQLITE_RENAME_TABLE('foo', 'foo'); } } {{} {} {}} @@ -752,5 +760,69 @@ } {0 {xyz abc 5 6}} } +do_test alter-12.1 { + execsql { + CREATE TABLE t12(a, b, c); + CREATE VIEW v1 AS SELECT * FROM t12; + } +} {} +do_test alter-12.2 { + catchsql { + ALTER TABLE v1 RENAME TO v2; + } +} {1 {view v1 may not be altered}} +do_test alter-12.3 { + execsql { SELECT * FROM v1; } +} {} +do_test alter-12.4 { + db close + sqlite3 db test.db + execsql { SELECT * FROM v1; } +} {} +do_test alter-12.5 { + catchsql { + ALTER TABLE v1 ADD COLUMN new_column; + } +} {1 {Cannot add a column to a view}} + +# Ticket #3102: +# Verify that comments do not interfere with the table rename +# algorithm. +# +do_test alter-13.1 { + execsql { + CREATE TABLE /* hi */ t3102a(x); + CREATE TABLE t3102b -- comment + (y); + CREATE INDEX t3102c ON t3102a(x); + SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + } +} {t3102a t3102b t3102c} +do_test alter-13.2 { + execsql { + ALTER TABLE t3102a RENAME TO t3102a_rename; + SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + } +} {t3102a_rename t3102b t3102c} +do_test alter-13.3 { + execsql { + ALTER TABLE t3102b RENAME TO t3102b_rename; + SELECT name FROM sqlite_master WHERE name LIKE 't3102%' ORDER BY 1; + } +} {t3102a_rename t3102b_rename t3102c} + +# Ticket #3651 +do_test alter-14.1 { + catchsql { + CREATE TABLE t3651(a UNIQUE); + ALTER TABLE t3651 ADD COLUMN b UNIQUE; + } +} {1 {Cannot add a UNIQUE column}} +do_test alter-14.2 { + catchsql { + ALTER TABLE t3651 ADD COLUMN b PRIMARY KEY; + } +} {1 {Cannot add a PRIMARY KEY column}} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/analyze.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/analyze.test --- sqlite3-3.4.2/test/analyze.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/analyze.test 2009-06-05 18:02:52.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. # This file implements tests for the ANALYZE command. # -# $Id: analyze.test,v 1.5 2005/09/10 22:40:54 drh Exp $ +# $Id: analyze.test,v 1.9 2008/08/11 18:44:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -61,6 +61,16 @@ SELECT count(*) FROM sqlite_master WHERE name='sqlite_stat1' } } {1} +do_test analyze-1.6.2 { + catchsql { + CREATE INDEX stat1idx ON sqlite_stat1(idx); + } +} {1 {table sqlite_stat1 may not be indexed}} +do_test analyze-1.6.3 { + catchsql { + CREATE INDEX main.stat1idx ON SQLite_stat1(idx); + } +} {1 {table sqlite_stat1 may not be indexed}} do_test analyze-1.7 { execsql { SELECT * FROM sqlite_stat1 @@ -165,7 +175,7 @@ ANALYZE t1; SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; } -} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2} t2i3 {5 3 1}} +} {t1i1 {5 3} t1i2 {5 2} t1i3 {5 3 1} t2i1 {5 3} t2i2 {5 2}} do_test analyze-3.6 { execsql { ANALYZE t2; @@ -187,9 +197,38 @@ CREATE INDEX t3i3 ON t3(d,b,c,a); DROP TABLE t1; DROP TABLE t2; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {} +do_test analyze-3.9 { + execsql { + ANALYZE; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}} + +do_test analyze-3.10 { + execsql { + CREATE TABLE [silly " name](a, b, c); + CREATE INDEX 'foolish '' name' ON [silly " name](a, b); + CREATE INDEX 'another foolish '' name' ON [silly " name](c); + INSERT INTO [silly " name] VALUES(1, 2, 3); + INSERT INTO [silly " name] VALUES(4, 5, 6); ANALYZE; SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; } +} {{another foolish ' name} {2 1} {foolish ' name} {2 1 1} t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}} +do_test analyze-3.11 { + execsql { + DROP INDEX "foolish ' name"; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } +} {{another foolish ' name} {2 1} t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}} +do_test analyze-3.11 { + execsql { + DROP TABLE "silly "" name"; + SELECT idx, stat FROM sqlite_stat1 ORDER BY idx; + } } {t3i1 {5 3} t3i2 {5 3 1 1 1} t3i3 {5 5 2 1 1}} # Try corrupting the sqlite_stat1 table and make sure the @@ -237,6 +276,16 @@ SELECT * FROM t4 WHERE x=1234; } } {} +do_test analyze-4.3 { + execsql { + INSERT INTO sqlite_stat1 VALUES('t4','xyzzy','0 1 2 3'); + } + db close + sqlite3 db test.db + execsql { + SELECT * FROM t4 WHERE x=1234; + } +} {} # This test corrupts the database file so it must be the last test # in the series. @@ -244,14 +293,14 @@ do_test analyze-99.1 { execsql { PRAGMA writable_schema=on; - UPDATE sqlite_master SET sql='nonsense'; + UPDATE sqlite_master SET sql='nonsense' WHERE name='sqlite_stat1'; } db close sqlite3 db test.db catchsql { ANALYZE } -} {1 {malformed database schema - near "nonsense": syntax error}} +} {1 {malformed database schema (sqlite_stat1) - near "nonsense": syntax error}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/async2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/async2.test --- sqlite3-3.4.2/test/async2.test 2006-02-14 14:02:08.000000000 +0000 +++ sqlite3-3.6.16/test/async2.test 2009-06-25 12:24:38.000000000 +0100 @@ -5,17 +5,22 @@ # #*********************************************************************** # -# $Id: async2.test,v 1.3 2006/02/14 14:02:08 danielk1977 Exp $ +# $Id: async2.test,v 1.12 2009/04/25 08:39:15 danielk1977 Exp $ -if {[info commands sqlite3async_enable]==""} { +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if { + [info commands sqlite3async_initialize]=="" || + [info command sqlite3_memdebug_fail]=="" +} { # The async logic is not built into this system + puts "Skipping async2 tests: not compiled with required features" + finish_test return } -set testdir [file dirname $argv0] -source $testdir/tester.tcl - # Enable asynchronous IO. set setup_script { @@ -41,35 +46,40 @@ db close - -foreach err [list ioerr malloc] { - set ::go 1 +foreach err [list ioerr malloc-transient malloc-persistent] { + set ::go 10 for {set n 1} {$::go} {incr n} { set ::sqlite_io_error_pending 0 - sqlite_malloc_fail 0 + sqlite3_memdebug_fail -1 file delete -force test.db test.db-journal sqlite3 db test.db execsql $::setup_script db close - sqlite3async_enable 1 + sqlite3async_initialize "" 1 sqlite3 db test.db - execsql $::sql_script - db close + sqlite3_db_config_lookaside db 0 0 0 switch -- $err { - ioerr { set ::sqlite_io_error_pending $n } - malloc { sqlite_malloc_fail $n } + ioerr { set ::sqlite_io_error_pending $n } + malloc-persistent { sqlite3_memdebug_fail $n -repeat 1 } + malloc-transient { sqlite3_memdebug_fail $n -repeat 0 } } - sqlite3async_halt idle + + catchsql $::sql_script + db close + + sqlite3async_control halt idle sqlite3async_start sqlite3async_wait - + sqlite3async_control halt never + sqlite3async_shutdown + set ::sqlite_io_error_pending 0 - sqlite_malloc_fail 0 + sqlite3_memdebug_fail -1 sqlite3 db test.db - set c [db eval {SELECT c FROM counter LIMIT 1}] + set c [db one {SELECT c FROM counter LIMIT 1}] switch -- $c { 1 { do_test async-$err-1.1.$n { @@ -103,17 +113,14 @@ } {klmnopqrst and seven} } FIN { - set ::go 0 + incr ::go -1 } } - sqlite3async_enable 0 + db close } } catch {db close} -sqlite3async_halt idle -sqlite3async_start -sqlite3async_wait finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/async3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/async3.test --- sqlite3-3.4.2/test/async3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/async3.test 2009-06-25 12:24:38.000000000 +0100 @@ -0,0 +1,76 @@ +# 2007 September 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the code in test_async.c. +# Specifically, it tests that the xFullPathname() method of +# of the asynchronous vfs works correctly. +# +# $Id: async3.test,v 1.5 2009/04/25 08:39:15 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if { [info commands sqlite3async_initialize]=="" } { + # The async logic is not built into this system + puts "Skipping async3 tests: not compiled with required features" + finish_test + return +} + +db close +sqlite3async_initialize "" 1 +#set sqlite3async_trace 1 +sqlite3async_start + +set paths { + chocolate/banana/vanilla/file.db + chocolate//banana/vanilla/file.db + chocolate/./banana//vanilla/file.db + chocolate/banana/./vanilla/file.db + chocolate/banana/../banana/vanilla/file.db + chocolate/banana/./vanilla/extra_bit/../file.db +} + +do_test async3-1.0 { + file mkdir [file join chocolate banana vanilla] + file delete -force chocolate/banana/vanilla/file.db + file delete -force chocolate/banana/vanilla/file.db-journal +} {} + +do_test async3-1.1 { + sqlite3 db chocolate/banana/vanilla/file.db + execsql { + CREATE TABLE abc(a, b, c); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + } +} {} + +set N 2 +foreach p $paths { + sqlite3 db2 $p + do_test async3-1.$N.1 { + execsql {SELECT * FROM abc} db2 + } {} + do_test async3-1.$N.2 { + catchsql {INSERT INTO abc VALUES(4, 5, 6)} db2 + } {1 {database is locked}} + db2 close + incr N +} + +db close + +sqlite3async_control halt idle +sqlite3async_wait +sqlite3async_control halt never +sqlite3async_shutdown +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/async4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/async4.test --- sqlite3-3.4.2/test/async4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/async4.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,162 @@ +# 2009 April 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: async4.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# These tests only work for Tcl version 8.5 and later on Windows (for now) +# +if {$tcl_platform(platform)=="windows"} { + scan $::tcl_version %f vx + if {$vx<8.5} { + finish_test + return + } +} + +if {[info commands sqlite3async_initialize] eq ""} { + # The async logic is not built into this system + finish_test + return +} +db close + +# Test layout: +# +# async4.1.*: Test the lockfiles parameter. +# async4.2.*: Test the delay parameter. + +do_test async4.1.1 { + sqlite3async_initialize {} 0 + sqlite3async_control lockfiles +} {1} +do_test async4.1.2 { + sqlite3async_control lockfiles false +} {0} +do_test async4.1.3 { + sqlite3async_control lockfiles +} {0} +do_test async4.1.4 { + sqlite3async_control lockfiles true +} {1} + +do_test async4.1.5 { + sqlite3 db test.db -vfs sqlite3async + execsql { CREATE TABLE t1(a, b, c) } +} {} +do_test async4.1.6 { + list [file exists test.db] [file size test.db] +} {1 0} +do_test async4.1.7 { + sqlite3 db2 test.db + catchsql { CREATE TABLE t2(a, b, c) } db2 +} {1 {database is locked}} +do_test async4.1.8 { + sqlite3async_control halt idle + sqlite3async_start + sqlite3async_wait +} {} +do_test async4.1.9 { + catchsql { CREATE TABLE t2(a, b, c) } db2 +} {0 {}} +do_test async4.1.10 { + list [catch {sqlite3async_control lockfiles false} msg] $msg +} {1 SQLITE_MISUSE} +do_test async4.1.11 { + db close + list [catch {sqlite3async_control lockfiles false} msg] $msg +} {1 SQLITE_MISUSE} +do_test async4.1.12 { + sqlite3async_start + sqlite3async_wait + sqlite3async_control lockfiles false +} {0} +do_test async4.1.13 { + sqlite3 db test.db -vfs sqlite3async + execsql { CREATE TABLE t3(a, b, c) } db +} {} +do_test async4.1.14 { + execsql { + CREATE INDEX i1 ON t2(a); + CREATE INDEX i2 ON t1(a); + } db2 +} {} +do_test async4.1.15 { + sqlite3async_start + sqlite3async_wait + execsql { pragma integrity_check } db2 +} {{*** in database main *** +Page 5 is never used}} +do_test async4.1.16 { + db close + db2 close + sqlite3async_start + sqlite3async_wait +} {} +do_test async4.1.17 { + sqlite3async_control lockfiles true +} {1} + +do_test async4.2.1 { + sqlite3async_control delay +} {0} +do_test async4.2.2 { + sqlite3async_control delay 23 +} {23} +do_test async4.2.3 { + sqlite3async_control delay +} {23} +do_test async4.2.4 { + sqlite3async_control delay 0 +} {0} +do_test async4.2.5 { + sqlite3 db test.db -vfs sqlite3async + + execsql { CREATE TABLE t4(a, b) } + set T1 [lindex [time { + sqlite3async_start + sqlite3async_wait + }] 0] + + sqlite3async_control delay 100 + execsql { CREATE TABLE t5(a, b) } + set T2 [lindex [time { + sqlite3async_start + sqlite3async_wait + }] 0] + + expr {($T1+1000000) < $T2} +} {1} + +do_test async4.2.6 { + sqlite3async_control delay 0 + execsql { CREATE TABLE t6(a, b) } + set T1 [lindex [time { + sqlite3async_start + sqlite3async_wait + }] 0] + + expr {($T1+1000000) < $T2} +} {1} + +do_test async4.2.7 { + list [catch { sqlite3async_control delay -1 } msg] $msg +} {1 SQLITE_MISUSE} + +do_test async4.2.8 { + db close + sqlite3async_start + sqlite3async_wait +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/async.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/async.test --- sqlite3-3.4.2/test/async.test 2006-03-19 13:00:25.000000000 +0000 +++ sqlite3-3.6.16/test/async.test 2009-06-25 12:35:51.000000000 +0100 @@ -6,60 +6,82 @@ #*********************************************************************** # This file runs all tests. # -# $Id: async.test,v 1.7 2006/03/19 13:00:25 drh Exp $ +# $Id: async.test,v 1.21 2009/06/05 17:09:12 drh Exp $ +set testdir [file dirname $argv0] +source $testdir/tester.tcl -if {[catch {sqlite3async_enable}]} { +if {[info commands sqlite3async_initialize] eq ""} { # The async logic is not built into this system + finish_test return } - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -rename finish_test really_finish_test -proc finish_test {} {} +rename finish_test async_really_finish_test +proc finish_test {} { + catch {db close} + catch {db2 close} + catch {db3 close} +} +if {[info exists ISQUICK]} { set ASYNC_SAVE_ISQUICK $ISQUICK } set ISQUICK 1 -set INCLUDE { +set ASYNC_INCLUDE { + insert.test + insert2.test + insert3.test + lock.test + lock2.test + lock3.test select1.test select2.test select3.test select4.test - insert.test - insert2.test - insert3.test trans.test } -# set INCLUDE {select4.test} # Enable asynchronous IO. -sqlite3async_enable 1 +sqlite3async_initialize "" 1 -rename do_test really_do_test +rename do_test async_really_do_test proc do_test {name args} { - uplevel really_do_test async_io-$name $args - sqlite3async_halt idle + uplevel async_really_do_test async_io-$name $args sqlite3async_start + sqlite3async_control halt idle sqlite3async_wait + sqlite3async_control halt never } foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] - if {[lsearch -exact $INCLUDE $tail]<0} continue + if {[lsearch -exact $ASYNC_INCLUDE $tail]<0} continue source $testfile - catch {db close} + + # Make sure everything is flushed through. This is because [source]ing + # the next test file will delete the database file on disk (using + # [file delete]). If the asynchronous backend still has the file + # open, it will become confused. + # + sqlite3async_control halt idle + sqlite3async_start + sqlite3async_wait + sqlite3async_control halt never } # Flush the write-queue and disable asynchronous IO. This should ensure # all allocated memory is cleaned up. set sqlite3async_trace 1 -sqlite3async_halt idle +sqlite3async_control halt idle sqlite3async_start sqlite3async_wait -sqlite3async_enable 0 +sqlite3async_control halt never +sqlite3async_shutdown set sqlite3async_trace 0 -really_finish_test -rename really_do_test do_test -rename really_finish_test finish_test +rename do_test {} +rename async_really_do_test do_test +rename finish_test {} +rename async_really_finish_test finish_test + +if {[info exists ASYNC_SAVE_ISQUICK]} { set ISQUICK $ASYNC_SAVE_ISQUICK } +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/attach2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/attach2.test --- sqlite3-3.4.2/test/attach2.test 2007-08-10 20:46:14.000000000 +0100 +++ sqlite3-3.6.16/test/attach2.test 2009-06-05 18:02:52.000000000 +0100 @@ -12,12 +12,17 @@ # focus of this script is testing the ATTACH and DETACH commands # and related functionality. # -# $Id: attach2.test,v 1.36 2007/08/10 19:46:14 drh Exp $ +# $Id: attach2.test,v 1.38 2007/12/13 21:54:11 drh Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !attach { + finish_test + return +} + # Ticket #354 # # Databases test.db and test2.db contain identical schemas. Make @@ -154,7 +159,7 @@ proc lock_status {testnum db expected_result} { # If the database was compiled with OMIT_TEMPDB set, then # the lock_status list will not contain an entry for the temp - # db. But the test code doesn't know this, so it's easiest + # db. But the test code doesn't know this, so its easiest # to filter it out of the $expected_result list here. ifcapable !tempdb { set expected_result [concat \ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/attach3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/attach3.test --- sqlite3-3.4.2/test/attach3.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/attach3.test 2009-06-05 18:02:52.000000000 +0100 @@ -12,13 +12,17 @@ # focus of this script is testing the ATTACH and DETACH commands # and schema changes to attached databases. # -# $Id: attach3.test,v 1.17 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: attach3.test,v 1.18 2007/10/09 08:29:32 danielk1977 Exp $ # - set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !attach { + finish_test + return +} + # Create tables t1 and t2 in the main database execsql { CREATE TABLE t1(a, b); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/attachmalloc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/attachmalloc.test --- sqlite3-3.4.2/test/attachmalloc.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/attachmalloc.test 2009-06-12 03:37:50.000000000 +0100 @@ -12,98 +12,18 @@ # focus of this script is testing the ATTACH statement and # specifically out-of-memory conditions within that command. # -# $Id: attachmalloc.test,v 1.3 2006/09/04 18:54:14 drh Exp $ +# $Id: attachmalloc.test,v 1.10 2008/10/22 10:45:38 danielk1977 Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG=1" +ifcapable !memdebug||!attach { finish_test return } - -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go} {incr ::n} { - - do_test $tn.$::n { - - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - set ::DB [sqlite3 db test.db] - - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - - set v [catch $::mallocbody msg] - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - set v {1 1} - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - sqlite_malloc_fail 0 - - if {[info exists ::mallocopts(-cleanup)]} { - catch $::mallocopts(-cleanup) - } - } - unset ::mallocopts -} +source $testdir/malloc_common.tcl do_malloc_test attachmalloc-1 -tclprep { db close @@ -115,6 +35,8 @@ if {[catch {sqlite3 db test.db}]} { error "out of memory" } + sqlite3_db_config_lookaside db 0 0 0 + sqlite3_extended_result_codes db 1 } -sqlbody { ATTACH 'test2.db' AS two; CREATE TABLE two.t1(x); @@ -124,4 +46,18 @@ CREATE TABLE four.t1(x); } +do_malloc_test attachmalloc-2 -tclprep { + file delete -force test2.db + file delete -force test2.db-journal + sqlite3 db2 test2.db + db2 eval { + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(a, b); + } + db2 close +} -sqlbody { + CREATE TABLE t1(d, e, f); + ATTACH 'test2.db' AS db1; +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/attach.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/attach.test --- sqlite3-3.4.2/test/attach.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/attach.test 2009-06-25 12:35:51.000000000 +0100 @@ -12,18 +12,22 @@ # focus of this script is testing the ATTACH and DETACH commands # and related functionality. # -# $Id: attach.test,v 1.44 2007/05/09 20:31:30 drh Exp $ +# $Id: attach.test,v 1.52 2009/05/29 14:39:08 drh Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !attach { + finish_test + return +} + for {set i 2} {$i<=15} {incr i} { file delete -force test$i.db file delete -force test$i.db-journal } -set btree_trace 0 do_test attach-1.1 { execsql { CREATE TABLE t1(a,b); @@ -112,6 +116,9 @@ ATTACH 'test.db' as db2; } } {1 {database db2 is already in use}} +do_test attach-1.12.2 { + db errorcode +} {1} do_test attach-1.13 { catchsql { ATTACH 'test.db' as db5; @@ -150,6 +157,9 @@ ATTACH 'test.db' as db12; } } {1 {too many attached databases - max 10}} +do_test attach-1.19.1 { + db errorcode +} {1} do_test attach-1.20.1 { execsql { DETACH db5; @@ -174,6 +184,9 @@ ATTACH 'test.db' as db13; } } {1 {too many attached databases - max 10}} +do_test attach-1.22.1 { + db errorcode +} {1} do_test attach-1.23 { catchsql { DETACH "db14"; @@ -393,7 +406,6 @@ # prevent test2.db from being read by db2. do_test attach-3.5 { execsql {SELECT * FROM t2} -btree_breakpoint catchsql { SELECT * FROM t2; } db2; @@ -462,8 +474,6 @@ execsql {SELECT * FROM t1} } {1 2 3 4} -#set btree_trace 1 - # Ticket #323 do_test attach-4.1 { execsql {DETACH db2} @@ -718,6 +728,9 @@ ATTACH DATABASE 'cannot-read' AS noread; } } {1 {unable to open database: cannot-read}} + do_test attach-6.2.2 { + db errorcode + } {14} file delete -force cannot-read } @@ -735,13 +748,44 @@ file delete -force test2.db file delete -force no-such-file -do_test attach-7.1 { - file delete -force test.db test.db-journal - sqlite3 db test.db +ifcapable subquery { + do_test attach-7.1 { + file delete -force test.db test.db-journal + sqlite3 db test.db + catchsql { + DETACH RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY + REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL ) + } + } {1 {no such table: AAAAAA}} +} + +# Create a malformed file (a file that is not a valid database) +# and try to attach it +# +do_test attach-8.1 { + set fd [open test2.db w] + puts $fd "This file is not a valid SQLite database" + close $fd + catchsql { + ATTACH 'test2.db' AS t2; + } +} {1 {file is encrypted or is not a database}} +do_test attach-8.2 { + db errorcode +} {26} +file delete -force test2.db +do_test attach-8.3 { + sqlite3 db2 test2.db + db2 eval {CREATE TABLE t1(x); BEGIN EXCLUSIVE} catchsql { - DETACH RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY - REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL ) + ATTACH 'test2.db' AS t2; } -} {1 {invalid name: "RAISE ( IGNORE ) IN ( SELECT "AAAAAA" . * ORDER BY - REGISTER LIMIT "AAAAAA" . "AAAAAA" OFFSET RAISE ( IGNORE ) NOT NULL )"}} +} {1 {database is locked}} +do_test attach-8.4 { + db errorcode +} {5} +db2 close +file delete -force test2.db + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/auth2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/auth2.test --- sqlite3-3.4.2/test/auth2.test 2006-08-24 15:59:46.000000000 +0100 +++ sqlite3-3.6.16/test/auth2.test 2009-06-05 18:02:52.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this script is testing the sqlite3_set_authorizer() API # and related functionality. # -# $Id: auth2.test,v 1.1 2006/08/24 14:59:46 drh Exp $ +# $Id: auth2.test,v 1.3 2008/07/02 13:13:53 danielk1977 Exp $ # set testdir [file dirname $argv0] @@ -72,4 +72,97 @@ set ::flist } coalesce +# Make sure the authorizer is not called when parsing the schema +# and when computing the result set of a view. +# +db close +sqlite3 db test.db +sqlite3 db2 test.db +proc auth {args} { + global authargs + append authargs $args\n + return SQLITE_OK +} +db auth auth +do_test auth2-2.1 { + set ::authargs {} + db eval { + CREATE TABLE t2(x,y,z); + } + set ::authargs +} {SQLITE_INSERT sqlite_master {} main {} +SQLITE_CREATE_TABLE t2 {} main {} +SQLITE_UPDATE sqlite_master type main {} +SQLITE_UPDATE sqlite_master name main {} +SQLITE_UPDATE sqlite_master tbl_name main {} +SQLITE_UPDATE sqlite_master rootpage main {} +SQLITE_UPDATE sqlite_master sql main {} +SQLITE_READ sqlite_master ROWID main {} +SQLITE_READ sqlite_master name main {} +SQLITE_READ sqlite_master rootpage main {} +SQLITE_READ sqlite_master sql main {} +SQLITE_READ sqlite_master tbl_name main {} +} +do_test auth2-2.2 { + set ::authargs {} + db eval { + CREATE VIEW v2 AS SELECT x+y AS a, y+z AS b from t2; + } + set ::authargs +} {SQLITE_INSERT sqlite_master {} main {} +SQLITE_CREATE_VIEW v2 {} main {} +SQLITE_UPDATE sqlite_master type main {} +SQLITE_UPDATE sqlite_master name main {} +SQLITE_UPDATE sqlite_master tbl_name main {} +SQLITE_UPDATE sqlite_master rootpage main {} +SQLITE_UPDATE sqlite_master sql main {} +SQLITE_READ sqlite_master ROWID main {} +SQLITE_READ sqlite_master name main {} +SQLITE_READ sqlite_master rootpage main {} +SQLITE_READ sqlite_master sql main {} +SQLITE_READ sqlite_master tbl_name main {} +} +do_test auth2-2.3 { + set ::authargs {} + db eval { + SELECT a, b FROM v2; + } + set ::authargs +} {SQLITE_SELECT {} {} {} {} +SQLITE_READ v2 a main {} +SQLITE_READ v2 b main {} +SQLITE_READ t2 x main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 z main v2 +SQLITE_SELECT {} {} {} v2 +} +do_test auth2-2.4 { + db2 eval { + CREATE TABLE t3(p,q,r); + } + set ::authargs {} + db eval { + SELECT b, a FROM v2; + } + set ::authargs +} {SQLITE_SELECT {} {} {} {} +SQLITE_READ v2 b main {} +SQLITE_READ v2 a main {} +SQLITE_READ t2 x main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 z main v2 +SQLITE_SELECT {} {} {} v2 +SQLITE_SELECT {} {} {} {} +SQLITE_READ v2 b main {} +SQLITE_READ v2 a main {} +SQLITE_READ t2 x main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 y main v2 +SQLITE_READ t2 z main v2 +SQLITE_SELECT {} {} {} v2 +} +db2 close + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/auth3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/auth3.test --- sqlite3-3.4.2/test/auth3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/auth3.test 2009-06-25 12:24:38.000000000 +0100 @@ -0,0 +1,111 @@ +# 2008 October 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test that the truncate optimization is disabled if the SQLITE_DELETE +# authorization callback returns SQLITE_IGNORE. +# +# $Id: auth3.test,v 1.2 2009/05/04 01:58:31 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# disable this test if the SQLITE_OMIT_AUTHORIZATION macro is +# defined during compilation. +if {[catch {db auth {}} msg]} { + finish_test + return +} + +# Disable the statement cache for these tests. +# +db cache size 0 + +db authorizer ::auth +proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DELETE"} { + return $::authcode + } + return SQLITE_OK +} + +#-------------------------------------------------------------------------- +# The following tests - auth3-1.* - test that return values of SQLITE_DENY, +# SQLITE_IGNORE, SQLITE_OK and are correctly handled when returned +# by an SQLITE_DELETE authorization callback triggered by a +# "DELETE FROM " statement. +# +do_test auth3-1.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + } +} {} +do_test auth3.1.2 { + set ::authcode SQLITE_DENY + catchsql { DELETE FROM t1 } +} {1 {not authorized}} +do_test auth3.1.3 { + set ::authcode SQLITE_INVALID + catchsql { DELETE FROM t1 } +} {1 {authorizer malfunction}} +do_test auth3.1.4 { + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6} +do_test auth3-1.5 { + set ::authcode SQLITE_IGNORE + execsql { + DELETE FROM t1; + SELECT * FROM t1; + } +} {} +do_test auth3-1.6 { + set ::authcode SQLITE_OK + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + DELETE FROM t1; + SELECT * FROM t1; + } +} {} + +#-------------------------------------------------------------------------- +# These tests - auth3-2.* - test that returning SQLITE_IGNORE really does +# disable the truncate optimization. +# +do_test auth3-2.1 { + set ::authcode SQLITE_OK + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + } + set sqlite_search_count 0 + execsql { + DELETE FROM t1; + } + set sqlite_search_count +} {0} + +do_test auth3-2.2 { + set ::authcode SQLITE_IGNORE + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + } + set sqlite_search_count 0 + execsql { + DELETE FROM t1; + } + set sqlite_search_count +} {1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/auth.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/auth.test --- sqlite3-3.4.2/test/auth.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/auth.test 2009-06-25 12:24:38.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this script is testing the sqlite3_set_authorizer() API # and related functionality. # -# $Id: auth.test,v 1.37 2006/08/24 14:59:46 drh Exp $ +# $Id: auth.test,v 1.45 2009/05/04 01:58:31 drh Exp $ # set testdir [file dirname $argv0] @@ -305,11 +305,13 @@ } catchsql {SELECT * FROM t2} } {1 {access to t2.b is prohibited}} -do_test auth-1.35.2 { - execsql {ATTACH DATABASE 'test.db' AS two} - catchsql {SELECT * FROM two.t2} -} {1 {access to two.t2.b is prohibited}} -execsql {DETACH DATABASE two} +ifcapable attach { + do_test auth-1.35.2 { + execsql {ATTACH DATABASE 'test.db' AS two} + catchsql {SELECT * FROM two.t2} + } {1 {access to two.t2.b is prohibited}} + execsql {DETACH DATABASE two} +} do_test auth-1.36 { proc auth {code arg1 arg2 arg3 arg4} { if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="b"} { @@ -416,7 +418,10 @@ } {0 {}} do_test auth-1.50 { execsql {SELECT * FROM t2} -} {11 2 33} +} {} +do_test auth-1.50.2 { + execsql {INSERT INTO t2 VALUES(11, 2, 33)} +} {} do_test auth-1.51 { proc auth {code arg1 arg2 arg3 arg4} { @@ -1610,174 +1615,176 @@ # ticket #340 - authorization for ATTACH and DETACH. # -do_test auth-1.251 { - db authorizer ::auth - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ATTACH"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] +ifcapable attach { + do_test auth-1.251 { + db authorizer ::auth + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + } + return SQLITE_OK } - return SQLITE_OK - } - catchsql { - ATTACH DATABASE ':memory:' AS test1 - } -} {0 {}} -do_test auth-1.252 { - set ::authargs -} {:memory: {} {} {}} -do_test auth-1.253 { - catchsql {DETACH DATABASE test1} - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ATTACH"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_DENY + catchsql { + ATTACH DATABASE ':memory:' AS test1 } - return SQLITE_OK - } - catchsql { - ATTACH DATABASE ':memory:' AS test1; - } -} {1 {not authorized}} -do_test auth-1.254 { - lindex [execsql {PRAGMA database_list}] 7 -} {} -do_test auth-1.255 { - catchsql {DETACH DATABASE test1} - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ATTACH"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_IGNORE + } {0 {}} + do_test auth-1.252 { + set ::authargs + } {:memory: {} {} {}} + do_test auth-1.253 { + catchsql {DETACH DATABASE test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK } - return SQLITE_OK - } - catchsql { - ATTACH DATABASE ':memory:' AS test1; - } -} {0 {}} -do_test auth-1.256 { - lindex [execsql {PRAGMA database_list}] 7 -} {} -do_test auth-1.257 { - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_DETACH"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] + catchsql { + ATTACH DATABASE ':memory:' AS test1; + } + } {1 {not authorized}} + do_test auth-1.254 { + lindex [execsql {PRAGMA database_list}] 7 + } {} + do_test auth-1.255 { + catchsql {DETACH DATABASE test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ATTACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } return SQLITE_OK } - return SQLITE_OK - } - execsql {ATTACH DATABASE ':memory:' AS test1} - catchsql { - DETACH DATABASE test1; - } -} {0 {}} -do_test auth-1.258 { - lindex [execsql {PRAGMA database_list}] 7 -} {} -do_test auth-1.259 { - execsql {ATTACH DATABASE ':memory:' AS test1} - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_DETACH"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_IGNORE + catchsql { + ATTACH DATABASE ':memory:' AS test1; } - return SQLITE_OK - } - catchsql { - DETACH DATABASE test1; - } -} {0 {}} -ifcapable tempdb { - ifcapable schema_pragmas { - do_test auth-1.260 { + } {0 {}} + do_test auth-1.256 { lindex [execsql {PRAGMA database_list}] 7 - } {test1} - } ;# ifcapable schema_pragmas - do_test auth-1.261 { + } {} + do_test auth-1.257 { proc auth {code arg1 arg2 arg3 arg4} { if {$code=="SQLITE_DETACH"} { set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_DENY + return SQLITE_OK } return SQLITE_OK } + execsql {ATTACH DATABASE ':memory:' AS test1} catchsql { DETACH DATABASE test1; } - } {1 {not authorized}} - ifcapable schema_pragmas { - do_test auth-1.262 { + } {0 {}} + do_test auth-1.258 { lindex [execsql {PRAGMA database_list}] 7 - } {test1} - } ;# ifcapable schema_pragmas - db authorizer {} - execsql {DETACH DATABASE test1} - db authorizer ::auth - - # Authorization for ALTER TABLE. These tests are omitted if the library - # was built without ALTER TABLE support. - ifcapable altertable { - - do_test auth-1.263 { - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ALTER_TABLE"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_OK - } - return SQLITE_OK - } - catchsql { - ALTER TABLE t1 RENAME TO t1x - } - } {0 {}} - do_test auth-1.264 { - execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} - } {t1x} - do_test auth-1.265 { - set authargs - } {temp t1 {} {}} - do_test auth-1.266 { - proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ALTER_TABLE"} { - set ::authargs [list $arg1 $arg2 $arg3 $arg4] - return SQLITE_IGNORE - } - return SQLITE_OK - } - catchsql { - ALTER TABLE t1x RENAME TO t1 + } {} + do_test auth-1.259 { + execsql {ATTACH DATABASE ':memory:' AS test1} + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_DETACH"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE } - } {0 {}} - do_test auth-1.267 { - execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} - } {t1x} - do_test auth-1.268 { - set authargs - } {temp t1x {} {}} - do_test auth-1.269 { + return SQLITE_OK + } + catchsql { + DETACH DATABASE test1; + } + } {0 {}} + ifcapable tempdb { + ifcapable schema_pragmas { + do_test auth-1.260 { + lindex [execsql {PRAGMA database_list}] 7 + } {test1} + } ;# ifcapable schema_pragmas + do_test auth-1.261 { proc auth {code arg1 arg2 arg3 arg4} { - if {$code=="SQLITE_ALTER_TABLE"} { + if {$code=="SQLITE_DETACH"} { set ::authargs [list $arg1 $arg2 $arg3 $arg4] return SQLITE_DENY } return SQLITE_OK } catchsql { - ALTER TABLE t1x RENAME TO t1 + DETACH DATABASE test1; } } {1 {not authorized}} - do_test auth-1.270 { - execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} - } {t1x} - - do_test auth-1.271 { - set authargs - } {temp t1x {} {}} - } ;# ifcapable altertable - -} else { - db authorizer {} - db eval { - DETACH DATABASE test1; + ifcapable schema_pragmas { + do_test auth-1.262 { + lindex [execsql {PRAGMA database_list}] 7 + } {test1} + } ;# ifcapable schema_pragmas + db authorizer {} + execsql {DETACH DATABASE test1} + db authorizer ::auth + + # Authorization for ALTER TABLE. These tests are omitted if the library + # was built without ALTER TABLE support. + ifcapable altertable { + + do_test auth-1.263 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_OK + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1 RENAME TO t1x + } + } {0 {}} + do_test auth-1.264 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + do_test auth-1.265 { + set authargs + } {temp t1 {} {}} + do_test auth-1.266 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_IGNORE + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1x RENAME TO t1 + } + } {0 {}} + do_test auth-1.267 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + do_test auth-1.268 { + set authargs + } {temp t1x {} {}} + do_test auth-1.269 { + proc auth {code arg1 arg2 arg3 arg4} { + if {$code=="SQLITE_ALTER_TABLE"} { + set ::authargs [list $arg1 $arg2 $arg3 $arg4] + return SQLITE_DENY + } + return SQLITE_OK + } + catchsql { + ALTER TABLE t1x RENAME TO t1 + } + } {1 {not authorized}} + do_test auth-1.270 { + execsql {SELECT name FROM sqlite_temp_master WHERE type='table'} + } {t1x} + + do_test auth-1.271 { + set authargs + } {temp t1x {} {}} + } ;# ifcapable altertable + + } else { + db authorizer {} + db eval { + DETACH DATABASE test1; + } } } @@ -2117,6 +2124,16 @@ catchsql {SELECT ROWID,b,c FROM t2} } {0 {{} 2 33 {} 8 9}} do_test auth-2.9.1 { + # We have to flush the cache here in case the Tcl interface tries to + # reuse a statement compiled with sqlite3_prepare_v2(). In this case, + # the first error encountered is an SQLITE_SCHEMA error. Then, when + # trying to recompile the statement, the authorization error is encountered. + # If we do not flush the cache, the correct error message is returned, but + # the error code is SQLITE_SCHEMA, not SQLITE_ERROR as required by the test + # case after this one. + # + db cache flush + proc auth {code arg1 arg2 arg3 arg4} { if {$code=="SQLITE_READ" && $arg1=="t2" && $arg2=="ROWID"} { return bogus @@ -2124,7 +2141,7 @@ return SQLITE_OK } catchsql {SELECT ROWID,b,c FROM t2} -} {1 {illegal return value (999) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}} +} {1 {authorizer malfunction}} do_test auth-2.9.2 { db errorcode } {1} @@ -2136,7 +2153,7 @@ return SQLITE_OK } catchsql {SELECT ROWID,b,c FROM t2} -} {1 {illegal return value (1) from the authorization function - should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY}} +} {1 {authorizer malfunction}} do_test auth-2.11.1 { proc auth {code arg1 arg2 arg3 arg4} { if {$code=="SQLITE_READ" && $arg2=="a"} { @@ -2231,13 +2248,15 @@ set authargs } [list \ SQLITE_UPDATE v1 x main {} \ - SQLITE_READ v1 x main {} \ + SQLITE_INSERT v1chng {} main r2 \ + SQLITE_READ v1 x main r2 \ + SQLITE_READ v1 x main r2 \ SQLITE_SELECT {} {} {} v1 \ SQLITE_READ t2 a main v1 \ SQLITE_READ t2 b main v1 \ - SQLITE_INSERT v1chng {} main r2 \ - SQLITE_READ v1 x main r2 \ - SQLITE_READ v1 x main r2] + SQLITE_SELECT {} {} {} {} \ + SQLITE_READ v1 x main v1 \ +] do_test auth-4.4 { execsql { CREATE TRIGGER r3 INSTEAD OF DELETE ON v1 BEGIN @@ -2254,12 +2273,14 @@ set authargs } [list \ SQLITE_DELETE v1 {} main {} \ - SQLITE_READ v1 x main {} \ + SQLITE_INSERT v1chng {} main r3 \ + SQLITE_READ v1 x main r3 \ SQLITE_SELECT {} {} {} v1 \ SQLITE_READ t2 a main v1 \ SQLITE_READ t2 b main v1 \ - SQLITE_INSERT v1chng {} main r3 \ - SQLITE_READ v1 x main r3] + SQLITE_SELECT {} {} {} {} \ + SQLITE_READ v1 x main v1 \ +] } ;# ifcapable view && trigger diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/autoinc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/autoinc.test --- sqlite3-3.4.2/test/autoinc.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/autoinc.test 2009-06-25 12:45:58.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is testing the AUTOINCREMENT features. # -# $Id: autoinc.test,v 1.9 2006/01/03 00:33:50 drh Exp $ +# $Id: autoinc.test,v 1.14 2009/06/23 20:28:54 drh Exp $ # set testdir [file dirname $argv0] @@ -50,6 +50,11 @@ SELECT * FROM sqlite_sequence; } } {} +do_test autoinc-1.3.1 { + catchsql { + CREATE INDEX seqidx ON sqlite_sequence(name) + } +} {1 {table sqlite_sequence may not be indexed}} # Close and reopen the database. Verify that everything is still there. # @@ -240,6 +245,8 @@ do_test autoinc-2.52 { execsql { CREATE TEMP TABLE t2 AS SELECT y FROM t1; + } + execsql { INSERT INTO t1 SELECT NULL, y+4 FROM t2; SELECT * FROM t1; } @@ -416,7 +423,7 @@ # Make sure AUTOINCREMENT works on ATTACH-ed tables. # -ifcapable tempdb { +ifcapable tempdb&&attach { do_test autoinc-5.1 { file delete -force test2.db file delete -force test2.db-journal @@ -533,4 +540,98 @@ } } {1} +# Ticket #3148 +# Make sure the sqlite_sequence table is not damaged when doing +# an empty insert - an INSERT INTO ... SELECT ... where the SELECT +# clause returns an empty set. +# +do_test autoinc-9.1 { + db eval { + CREATE TABLE t2(x INTEGER PRIMARY KEY AUTOINCREMENT, y); + INSERT INTO t2 VALUES(NULL, 1); + CREATE TABLE t3(a INTEGER PRIMARY KEY AUTOINCREMENT, b); + INSERT INTO t3 SELECT * FROM t2 WHERE y>1; + + SELECT * FROM sqlite_sequence WHERE name='t3'; + } +} {t3 0} + +# Ticket #3928. Make sure that triggers to not make extra slots in +# the SQLITE_SEQUENCE table. +# +do_test autoinc-3928.1 { + db eval { + CREATE TABLE t3928(a INTEGER PRIMARY KEY AUTOINCREMENT, b); + CREATE TRIGGER t3928r1 BEFORE INSERT ON t3928 BEGIN + INSERT INTO t3928(b) VALUES('before1'); + INSERT INTO t3928(b) VALUES('before2'); + END; + CREATE TRIGGER t3928r2 AFTER INSERT ON t3928 BEGIN + INSERT INTO t3928(b) VALUES('after1'); + INSERT INTO t3928(b) VALUES('after2'); + END; + INSERT INTO t3928(b) VALUES('test'); + SELECT * FROM t3928 ORDER BY a; + } +} {1 before1 2 after1 3 after2 4 before2 5 after1 6 after2 7 test 8 before1 9 before2 10 after1 11 before1 12 before2 13 after2} +do_test autoinc-3928.2 { + db eval { + SELECT * FROM sqlite_sequence WHERE name='t3928' + } +} {t3928 13} + +do_test autoinc-3928.3 { + db eval { + DROP TRIGGER t3928r1; + DROP TRIGGER t3928r2; + CREATE TRIGGER t3928r3 BEFORE UPDATE ON t3928 + WHEN typeof(new.b)=='integer' BEGIN + INSERT INTO t3928(b) VALUES('before-int-' || new.b); + END; + CREATE TRIGGER t3928r4 AFTER UPDATE ON t3928 + WHEN typeof(new.b)=='integer' BEGIN + INSERT INTO t3928(b) VALUES('after-int-' || new.b); + END; + DELETE FROM t3928 WHERE a!=1; + UPDATE t3928 SET b=456 WHERE a=1; + SELECT * FROM t3928 ORDER BY a; + } +} {1 456 14 before-int-456 15 after-int-456} +do_test autoinc-3928.4 { + db eval { + SELECT * FROM sqlite_sequence WHERE name='t3928' + } +} {t3928 15} + +do_test autoinc-3928.5 { + db eval { + CREATE TABLE t3928b(x); + INSERT INTO t3928b VALUES(100); + INSERT INTO t3928b VALUES(200); + INSERT INTO t3928b VALUES(300); + DELETE FROM t3928; + CREATE TABLE t3928c(y INTEGER PRIMARY KEY AUTOINCREMENT, z); + CREATE TRIGGER t3928br1 BEFORE DELETE ON t3928b BEGIN + INSERT INTO t3928(b) VALUES('before-del-'||old.x); + INSERT INTO t3928c(z) VALUES('before-del-'||old.x); + END; + CREATE TRIGGER t3928br2 AFTER DELETE ON t3928b BEGIN + INSERT INTO t3928(b) VALUES('after-del-'||old.x); + INSERT INTO t3928c(z) VALUES('after-del-'||old.x); + END; + DELETE FROM t3928b; + SELECT * FROM t3928 ORDER BY a; + } +} {16 before-del-100 17 after-del-100 18 before-del-200 19 after-del-200 20 before-del-300 21 after-del-300} +do_test autoinc-3928.6 { + db eval { + SELECT * FROM t3928c ORDER BY y; + } +} {1 before-del-100 2 after-del-100 3 before-del-200 4 after-del-200 5 before-del-300 6 after-del-300} +do_test autoinc-3928.7 { + db eval { + SELECT * FROM sqlite_sequence WHERE name LIKE 't3928%' ORDER BY name; + } +} {t3928 21 t3928c 6} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/autovacuum_crash.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/autovacuum_crash.test --- sqlite3-3.4.2/test/autovacuum_crash.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/autovacuum_crash.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -# 2001 September 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# This file runs the tests in the file crash.test with auto-vacuum enabled -# databases. -# -# $Id: autovacuum_crash.test,v 1.2 2005/01/16 09:06:34 danielk1977 Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# If this build of the library does not support auto-vacuum, omit this -# whole file. -ifcapable {!autovacuum} { - finish_test - return -} - -rename finish_test really_finish_test2 -proc finish_test {} {} -set ISQUICK 1 - -rename sqlite3 real_sqlite3 -proc sqlite3 {args} { - set r [eval "real_sqlite3 $args"] - if { [llength $args] == 2 } { - [lindex $args 0] eval {pragma auto_vacuum = 1} - } - set r -} - -rename do_test really_do_test -proc do_test {args} { - set sc [concat really_do_test "autovacuum-[lindex $args 0]" \ - [lrange $args 1 end]] - eval $sc -} - -source $testdir/crash.test - -rename sqlite3 "" -rename real_sqlite3 sqlite3 -rename finish_test "" -rename really_finish_test2 finish_test -rename do_test "" -rename really_do_test do_test -finish_test - - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/autovacuum_ioerr2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/autovacuum_ioerr2.test --- sqlite3-3.4.2/test/autovacuum_ioerr2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/autovacuum_ioerr2.test 2009-06-05 18:02:52.000000000 +0100 @@ -15,7 +15,7 @@ # The tests in this file use special facilities that are only # available in the SQLite test fixture. # -# $Id: autovacuum_ioerr2.test,v 1.6 2007/04/28 15:47:44 danielk1977 Exp $ +# $Id: autovacuum_ioerr2.test,v 1.7 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -130,4 +130,3 @@ } finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/autovacuum_ioerr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/autovacuum_ioerr.test --- sqlite3-3.4.2/test/autovacuum_ioerr.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/autovacuum_ioerr.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -# 2001 September 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# This file runs the tests in the file ioerr.test with auto-vacuum enabled -# databases. -# -# $Id: autovacuum_ioerr.test,v 1.3 2006/01/16 12:46:41 danielk1977 Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# If this build of the library does not support auto-vacuum, omit this -# whole file. -ifcapable {!autovacuum} { - finish_test - return -} - -rename finish_test really_finish_test2 -proc finish_test {} {} -set ISQUICK 1 - -rename sqlite3 real_sqlite3 -proc sqlite3 {args} { - set r [eval "real_sqlite3 $args"] - if { [llength $args] == 2 } { - [lindex $args 0] eval {pragma auto_vacuum = 1} - } - set r -} - -rename do_test really_do_test -proc do_test {args} { - set sc [concat really_do_test "autovacuum-[lindex $args 0]" \ - [lrange $args 1 end]] - eval $sc -} - -source $testdir/ioerr.test - -rename sqlite3 "" -rename real_sqlite3 sqlite3 -rename finish_test "" -rename really_finish_test2 finish_test -rename do_test "" -rename really_do_test do_test -finish_test - - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/autovacuum.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/autovacuum.test --- sqlite3-3.4.2/test/autovacuum.test 2007-07-01 20:55:57.000000000 +0100 +++ sqlite3-3.6.16/test/autovacuum.test 2009-06-25 12:22:33.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the SELECT statement. # -# $Id: autovacuum.test,v 1.26 2007/04/07 15:03:17 danielk1977 Exp $ +# $Id: autovacuum.test,v 1.29 2009/04/06 17:50:03 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -484,7 +484,7 @@ PRAGMA auto_vacuum = 1; PRAGMA auto_vacuum; } -} $AUTOVACUUM +} [expr $AUTOVACUUM ? 1 : 0] do_test autovacuum-3.7 { execsql { DROP TABLE av1; @@ -643,4 +643,54 @@ expr {[file size test.db] / 1024} } {286} +#------------------------------------------------------------------------ +# Additional tests. +# +# Try to determine the autovacuum setting for a database that is locked. +# +do_test autovacuum-8.1 { + db close + sqlite3 db test.db + sqlite3 db2 test.db + db eval {PRAGMA auto_vacuum} +} {1} +do_test autovacuum-8.2 { + db eval {BEGIN EXCLUSIVE} + catchsql {PRAGMA auto_vacuum} db2 +} {1 {database is locked}} +catch {db2 close} +catch {db eval {COMMIT}} + +do_test autovacuum-9.1 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + DROP TABLE t4; + DROP TABLE t5; + PRAGMA page_count; + } +} {1} +do_test autovacuum-9.2 { + file size test.db +} 1024 +do_test autovacuum-9.3 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(NULL, randstr(50,50)); + } + for {set ii 0} {$ii < 10} {incr ii} { + db eval { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 } + } + file size test.db +} $::sqlite_pending_byte +do_test autovacuum-9.4 { + execsql { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 } +} {} +do_test autovacuum-9.5 { + execsql { DELETE FROM t1 WHERE rowid > (SELECT max(a)/2 FROM t1) } + file size test.db +} $::sqlite_pending_byte + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/avtrans.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/avtrans.test --- sqlite3-3.4.2/test/avtrans.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/avtrans.test 2009-06-05 18:02:52.000000000 +0100 @@ -12,7 +12,7 @@ # file is a copy of "trans.test" modified to run under autovacuum mode. # the point is to stress the autovacuum logic and try to get it to fail. # -# $Id: avtrans.test,v 1.5 2007/08/12 20:07:59 drh Exp $ +# $Id: avtrans.test,v 1.6 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] @@ -909,7 +909,7 @@ } [expr {$i%2==0}] } else { do_test avtrans-9.$i.5-$cnt { - expr {$sqlite_fullsync_count>0} + expr {$sqlite_fullsync_count==0} } {1} } } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/backup2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/backup2.test --- sqlite3-3.4.2/test/backup2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/backup2.test 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,187 @@ +# 2009 February 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the "backup" and "restore" methods +# of the TCL interface - methods which are based on the +# sqlite3_backup_XXX API. +# +# $Id: backup2.test,v 1.4 2009/04/07 14:14:23 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !trigger||!view { finish_test ; return } + +# Fill a database with test data. +# +do_test backup2-1 { + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(randstr(8000,8000)); + INSERT INTO t1 VALUES(randstr(8000,8000)); + INSERT INTO t1 VALUES(randstr(8000,8000)); + INSERT INTO t1 VALUES(randstr(8000,8000)); + INSERT INTO t1 VALUES(randstr(8000,8000)); + CREATE VIEW v1 AS SELECT substr(x,10,10) FROM t1; + CREATE TABLE t2(a,b); + INSERT INTO t2 VALUES(1,2); + INSERT INTO t2 VALUES(2,4); + INSERT INTO t2 SELECT a+2, (a+2)*2 FROM t2; + INSERT INTO t2 SELECT a+4, (a+4)*2 FROM t2; + INSERT INTO t2 SELECT a+8, (a+8)*2 FROM t2; + INSERT INTO t2 SELECT a+16, (a+16)*2 FROM t2; + INSERT INTO t2 SELECT a+32, (a+32)*2 FROM t2; + INSERT INTO t2 SELECT a+64, (a+64)*2 FROM t2; + INSERT INTO t2 SELECT a+128, (a+128)*2 FROM t2; + CREATE INDEX t2i1 ON t2(a,b); + CREATE TRIGGER r1 AFTER INSERT ON t2 BEGIN + SELECT 'hello'; + END; + ANALYZE; + PRAGMA integrity_check; + } +} {ok} + +# Remember a check-sum on the database file. +# +unset -nocomplain cksum +set cksum [dbcksum db main] + +# Make a backup of the test data. Verify that the backup copy +# is identical to the original. +# +do_test backup2-2 { + file delete -force bu1.db + db backup bu1.db + sqlite3 db2 bu1.db + dbcksum db2 main +} $cksum + +# Delete the original. Restore from backup. Verify the content is +# unchanged. +# +do_test backup2-3.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + db2 eval {BEGIN EXCLUSIVE} + set rc [catch {db restore bu1.db} res] + lappend rc $res + db2 eval {ROLLBACK} + set rc +} {1 {restore failed: source database busy}} +do_test backup2-3.2 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + db restore bu1.db + dbcksum db main +} $cksum + +# Use alternative databases - other than "main". +# +do_test backup2-4 { + db restore temp bu1.db + dbcksum db temp +} $cksum +do_test backup2-5 { + db2 close + file delete -force bu1.db bu2.db + db backup temp bu2.db + sqlite3 db2 bu2.db + dbcksum db2 main +} $cksum + +# Try to backup to a readonly file. +# +do_test backup2-6 { + db2 close + catch {file attributes bu2.db -permissions r--------} + catch {file attributes bu2.db -readonly 1} + set rc [catch {db backup temp bu2.db} res] + lappend rc $res +} {1 {backup failed: attempt to write a readonly database}} + +# Try to backup to something that is not a database file. +# +do_test backup2-7 { + catch {file attributes bu2.db -readonly 0} + catch {file attributes bu2.db -permissions rw-------} + set out [open bu2.db w] + puts $out "This is not a valid database file" + close $out + set rc [catch {db backup temp bu2.db} res] + lappend rc $res +} {1 {backup failed: file is encrypted or is not a database}} + +# Try to backup database that does not exist +# +do_test backup2-8 { + file delete -force bu1.db + set rc [catch {db backup aux1 bu1.db} res] + lappend rc $res +} {1 {backup failed: unknown database aux1}} + +# Invalid syntax on the backup method +# +do_test backup2-9 { + set rc [catch {db backup} res] + lappend rc $res +} {1 {wrong # args: should be "db backup ?DATABASE? FILENAME"}} + +# Try to restore from an unreadable file. +# +if {$tcl_platform(platform)=="windows"} { + do_test backup2-10 { + file delete -force bu3.db + file mkdir bu3.db + set rc [catch {db restore temp bu3.db} res] + lappend rc $res + } {1 {cannot open source database: unable to open database file}} +} +if {$tcl_platform(platform)!="windows"} { + do_test backup2-10 { + file delete -force bu3.db + file mkdir bu3.db + set rc [catch {db restore temp bu3.db} res] + lappend rc $res + } {1 {cannot open source database: disk I/O error}} +} + +# Try to restore from something that is not a database file. +# +do_test backup2-11 { + set rc [catch {db restore temp bu2.db} res] + lappend rc $res +} {1 {restore failed: file is encrypted or is not a database}} + +# Try to restore a database that does not exist +# +do_test backup2-12 { + set rc [catch {db restore aux1 bu2.db} res] + lappend rc $res +} {1 {restore failed: unknown database aux1}} +do_test backup2-13 { + file delete -force bu4.db + set rc [catch {db restore bu4.db} res] + lappend rc $res +} {1 {cannot open source database: unable to open database file}} + +# Invalid syntax on the restore method +# +do_test backup2-14 { + set rc [catch {db restore} res] + lappend rc $res +} {1 {wrong # args: should be "db restore ?DATABASE? FILENAME"}} + +file delete -force bu1.db bu2.db bu3.db bu4.db + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/backup_ioerr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/backup_ioerr.test --- sqlite3-3.4.2/test/backup_ioerr.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/backup_ioerr.test 2009-06-25 12:23:18.000000000 +0100 @@ -0,0 +1,286 @@ +# 2009 January 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the handling of IO errors by the +# sqlite3_backup_XXX APIs. +# +# $Id: backup_ioerr.test,v 1.3 2009/04/10 18:41:01 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc data_checksum {db file} { + $db one "SELECT md5sum(a, b) FROM ${file}.t1" +} +proc test_contents {name db1 file1 db2 file2} { + $db2 eval {select * from sqlite_master} + $db1 eval {select * from sqlite_master} + set checksum [data_checksum $db2 $file2] + uplevel [list do_test $name [list data_checksum $db1 $file1] $checksum] +} + +#-------------------------------------------------------------------- +# This proc creates a database of approximately 290 pages. Depending +# on whether or not auto-vacuum is configured. Test cases backup_ioerr-1.* +# verify nothing more than this assumption. +# +proc populate_database {db {xtra_large 0}} { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1; + CREATE INDEX i1 ON t1(b); + COMMIT; + } $db + if {$xtra_large} { + execsql { INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1 } $db + } +} +do_test backup_ioerr-1.1 { + populate_database db + set nPage [expr {[file size test.db] / 1024}] + expr {$nPage>130 && $nPage<160} +} {1} +do_test backup_ioerr-1.2 { + expr {[file size test.db] > $sqlite_pending_byte} +} {1} +do_test backup_ioerr-1.3 { + db close + file delete -force test.db +} {} + +# Turn off IO error simulation. +# +proc clear_ioerr_simulation {} { + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_hardhit 0 + set ::sqlite_io_error_pending 0 + set ::sqlite_io_error_persist 0 +} + +#-------------------------------------------------------------------- +# The following procedure runs with SQLite's IO error simulation +# enabled. +# +# 1) Start with a reasonably sized database. One that includes the +# pending-byte (locking) page. +# +# 2) Open a backup process. Set the cache-size for the destination +# database to 10 pages only. +# +# 3) Step the backup process N times to partially backup the database +# file. If an IO error is reported, then the backup process is +# concluded with a call to backup_finish(). +# +# If an IO error occurs, verify that: +# +# * the call to backup_step() returns an SQLITE_IOERR_XXX error code. +# +# * after the failed call to backup_step() but before the call to +# backup_finish() the destination database handle error code and +# error message remain unchanged. +# +# * the call to backup_finish() returns an SQLITE_IOERR_XXX error code. +# +# * following the call to backup_finish(), the destination database +# handle has been populated with an error code and error message. +# +# 4) Write to the database via the source database connection. Check +# that: +# +# * If an IO error occurs while writing the source database, the +# write operation should report an IO error. The backup should +# proceed as normal. +# +# * If an IO error occurs while updating the backup, the write +# operation should proceed normally. The error should be reported +# from the next call to backup_step() (in step 5 of this test +# procedure). +# +# 5) Step the backup process to finish the backup. If an IO error is +# reported, then the backup process is concluded with a call to +# backup_finish(). +# +# Test that if an IO error occurs, or if one occured while updating +# the backup database during step 4, then the conditions listed +# under step 3 are all true. +# +# 6) Finish the backup process. +# +# * If the backup succeeds (backup_finish() returns SQLITE_OK), then +# the contents of the backup database should match that of the +# source database. +# +# * If the backup fails (backup_finish() returns other than SQLITE_OK), +# then the contents of the backup database should be as they were +# before the operation was started. +# +# The following factors are varied: +# +# * Destination database is initially larger than the source database, OR +# * Destination database is initially smaller than the source database. +# +# * IO errors are transient, OR +# * IO errors are persistent. +# +# * Destination page-size is smaller than the source. +# * Destination page-size is the same as the source. +# * Destination page-size is larger than the source. +# + +set iTest 1 +foreach bPersist {0 1} { +foreach iDestPagesize {512 1024 4096} { +foreach zSetupBak [list "" {populate_database ddb 1}] { + + incr iTest + set bStop 0 +for {set iError 1} {$bStop == 0} {incr iError} { + # Disable IO error simulation. + clear_ioerr_simulation + + catch { ddb close } + catch { sdb close } + catch { file delete -force test.db } + catch { file delete -force bak.db } + + # Open the source and destination databases. + sqlite3 sdb test.db + sqlite3 ddb bak.db + + # Step 1: Populate the source and destination databases. + populate_database sdb + ddb eval "PRAGMA page_size = $iDestPagesize" + ddb eval "PRAGMA cache_size = 10" + eval $zSetupBak + + # Step 2: Open the backup process. + sqlite3_backup B ddb main sdb main + + # Enable IO error simulation. + set ::sqlite_io_error_pending $iError + set ::sqlite_io_error_persist $bPersist + + # Step 3: Partially backup the database. If an IO error occurs, check + # a few things then skip to the next iteration of the loop. + # + set rc [B step 100] + if {$::sqlite_io_error_hardhit} { + + do_test backup_ioerr-$iTest.$iError.1 { + string match SQLITE_IOERR* $rc + } {1} + do_test backup_ioerr-$iTest.$iError.2 { + list [sqlite3_errcode ddb] [sqlite3_errmsg ddb] + } {SQLITE_OK {not an error}} + + set rc [B finish] + do_test backup_ioerr-$iTest.$iError.3 { + string match SQLITE_IOERR* $rc + } {1} + + do_test backup_ioerr-$iTest.$iError.4 { + sqlite3_errmsg ddb + } {disk I/O error} + + clear_ioerr_simulation + sqlite3 ddb bak.db + integrity_check backup_ioerr-$iTest.$iError.5 ddb + + continue + } + + # No IO error was encountered during step 3. Check that backup_step() + # returned SQLITE_OK before proceding. + do_test backup_ioerr-$iTest.$iError.6 { + expr {$rc eq "SQLITE_OK"} + } {1} + + # Step 4: Write to the source database. + set rc [catchsql { UPDATE t1 SET b = randstr(1000,1000) WHERE a < 50 } sdb] + + if {[lindex $rc 0] && $::sqlite_io_error_persist==0} { + # The IO error occured while updating the source database. In this + # case the backup should be able to continue. + set rc [B step 5000] + if { $rc != "SQLITE_IOERR_UNLOCK" } { + do_test backup_ioerr-$iTest.$iError.7 { + list [B step 5000] [B finish] + } {SQLITE_DONE SQLITE_OK} + + clear_ioerr_simulation + test_contents backup_ioerr-$iTest.$iError.8 ddb main sdb main + integrity_check backup_ioerr-$iTest.$iError.9 ddb + } else { + do_test backup_ioerr-$iTest.$iError.10 { + B finish + } {SQLITE_IOERR_UNLOCK} + } + + clear_ioerr_simulation + sqlite3 ddb bak.db + integrity_check backup_ioerr-$iTest.$iError.11 ddb + + continue + } + + # Step 5: Finish the backup operation. If an IO error occurs, check that + # it is reported correctly and skip to the next iteration of the loop. + # + set rc [B step 5000] + if {$rc != "SQLITE_DONE"} { + do_test backup_ioerr-$iTest.$iError.12 { + string match SQLITE_IOERR* $rc + } {1} + do_test backup_ioerr-$iTest.$iError.13 { + list [sqlite3_errcode ddb] [sqlite3_errmsg ddb] + } {SQLITE_OK {not an error}} + + set rc [B finish] + do_test backup_ioerr-$iTest.$iError.14 { + string match SQLITE_IOERR* $rc + } {1} + do_test backup_ioerr-$iTest.$iError.15 { + sqlite3_errmsg ddb + } {disk I/O error} + + clear_ioerr_simulation + sqlite3 ddb bak.db + integrity_check backup_ioerr-$iTest.$iError.16 ddb + + continue + } + + # The backup was successfully completed. + # + do_test backup_ioerr-$iTest.$iError.17 { + list [set rc] [B finish] + } {SQLITE_DONE SQLITE_OK} + + clear_ioerr_simulation + sqlite3 sdb test.db + sqlite3 ddb bak.db + + test_contents backup_ioerr-$iTest.$iError.18 ddb main sdb main + integrity_check backup_ioerr-$iTest.$iError.19 ddb + + set bStop [expr $::sqlite_io_error_pending<=0] +}}}} + +catch { sdb close } +catch { ddb close } +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/backup_malloc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/backup_malloc.test --- sqlite3-3.4.2/test/backup_malloc.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/backup_malloc.test 2009-02-04 22:46:47.000000000 +0000 @@ -0,0 +1,86 @@ +# 2009 January 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the handling of OOM errors by the +# sqlite3_backup_XXX APIs. +# +# $Id: backup_malloc.test,v 1.2 2009/02/04 22:46:47 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +source $testdir/malloc_common.tcl + +do_malloc_test backup_malloc-1 -tclprep { + execsql { + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1; + CREATE INDEX i1 ON t1(b); + COMMIT; + } + sqlite3 db2 test2.db + execsql { PRAGMA cache_size = 10 } db2 +} -tclbody { + + # Create a backup object. + # + set rc [catch {sqlite3_backup B db2 main db main}] + if {$rc && [sqlite3_errcode db2] == "SQLITE_NOMEM"} { + error "out of memory" + } + + # Run the backup process some. + # + set rc [B step 50] + if {$rc == "SQLITE_NOMEM" || $rc == "SQLITE_IOERR_NOMEM"} { + error "out of memory" + } + + # Update the database. + # + execsql { UPDATE t1 SET a = a + 1 } + + # Finish doing the backup. + # + set rc [B step 5000] + if {$rc == "SQLITE_NOMEM" || $rc == "SQLITE_IOERR_NOMEM"} { + error "out of memory" + } + + # Finalize the backup. + B finish +} -cleanup { + catch { B finish } +} + +do_malloc_test backup_malloc-1 -tclprep { + sqlite3 db2 test2.db +} -tclbody { + set rc [catch {sqlite3_backup B db2 temp db main}] + set errcode [sqlite3_errcode db2] + if {$rc && ($errcode == "SQLITE_NOMEM" || $errcode == "SQLITE_IOERR_NOMEM")} { + error "out of memory" + } +} -cleanup { + catch { B finish } + db2 close +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/backup.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/backup.test --- sqlite3-3.4.2/test/backup.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/backup.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,907 @@ +# 2009 January 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite3_backup_XXX API. +# +# $Id: backup.test,v 1.11 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +#--------------------------------------------------------------------- +# Test organization: +# +# backup-1.*: Warm-body tests. +# +# backup-2.*: Test backup under various conditions. To and from in-memory +# databases. To and from empty/populated databases. etc. +# +# backup-3.*: Verify that the locking-page (pending byte page) is handled. +# +# backup-4.*: Test various error conditions. +# +# backup-5.*: Test the source database being modified during a backup. +# +# backup-6.*: Test the backup_remaining() and backup_pagecount() APIs. +# +# backup-7.*: Test SQLITE_BUSY and SQLITE_LOCKED errors. +# +# backup-8.*: Test multiple simultaneous backup operations. +# +# backup-9.*: Test that passing a negative argument to backup_step() is +# interpreted as "copy the whole file". +# + +proc data_checksum {db file} { $db one "SELECT md5sum(a, b) FROM ${file}.t1" } +proc test_contents {name db1 file1 db2 file2} { + $db2 eval {select * from sqlite_master} + $db1 eval {select * from sqlite_master} + set checksum [data_checksum $db2 $file2] + uplevel [list do_test $name [list data_checksum $db1 $file1] $checksum] +} + +do_test backup-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + COMMIT; + } +} {} + +# Sanity check to verify that the [test_contents] proc works. +# +test_contents backup-1.2 db main db main + +# Check that it is possible to create and finish backup operations. +# +do_test backup-1.3.1 { + file delete test2.db + sqlite3 db2 test2.db + sqlite3_backup B db2 main db main +} {B} +do_test backup-1.3.2 { + B finish +} {SQLITE_OK} +do_test backup-1.3.3 { + info commands B +} {} + +# Simplest backup operation. Backup test.db to test2.db. test2.db is +# initially empty. test.db uses the default page size. +# +do_test backup-1.4.1 { + sqlite3_backup B db2 main db main +} {B} +do_test backup-1.4.2 { + B step 200 +} {SQLITE_DONE} +do_test backup-1.4.3 { + B finish +} {SQLITE_OK} +do_test backup-1.4.4 { + info commands B +} {} +test_contents backup-1.4.5 db2 main db main +db close +db2 close +# +# End of backup-1.* tests. +#--------------------------------------------------------------------- + + +#--------------------------------------------------------------------- +# The following tests, backup-2.*, are based on the following procedure: +# +# 1) Populate the source database. +# 2) Populate the destination database. +# 3) Run the backup to completion. (backup-2.*.1) +# 4) Integrity check the destination db. (backup-2.*.2) +# 5) Check that the contents of the destination db is the same as that +# of the source db. (backup-2.*.3) +# +# The test is run with all possible combinations of the following +# input parameters, except that if the destination is an in-memory +# database, the only page size tested is 1024 bytes (the same as the +# source page-size). +# +# * Source database is an in-memory database, OR +# * Source database is a file-backed database. +# +# * Target database is an in-memory database, OR +# * Target database is a file-backed database. +# +# * Destination database is a main file, OR +# * Destination database is an attached file, OR +# * Destination database is a temp database. +# +# * Target database is empty (zero bytes), OR +# * Target database is larger than the source, OR +# * Target database is smaller than the source. +# +# * Target database page-size is the same as the source, OR +# * Target database page-size is larger than the source, OR +# * Target database page-size is smaller than the source. +# +# * Each call to step copies a single page, OR +# * A single call to step copies the entire source database. +# +set iTest 1 +foreach zSrcFile {test.db :memory:} { +foreach zDestFile {test2.db :memory:} { +foreach zOpenScript [list { + sqlite3 db $zSrcFile + sqlite3 db2 $zSrcFile + db2 eval "ATTACH '$zDestFile' AS bak" + set db_dest db2 + set file_dest bak +} { + sqlite3 db $zSrcFile + sqlite3 db2 $zDestFile + set db_dest db2 + set file_dest main +} { + sqlite3 db $zSrcFile + sqlite3 db2 $zDestFile + set db_dest db2 + set file_dest temp +}] { +foreach rows_dest {0 3 10} { +foreach pgsz_dest {512 1024 2048} { +foreach nPagePerStep {1 200} { + + # Open the databases. + catch { file delete test.db } + catch { file delete test2.db } + eval $zOpenScript + + # Set to true if copying to an in-memory destination. Copying to an + # in-memory destination is only possible if the initial destination + # page size is the same as the source page size (in this case 1024 bytes). + # + set isMemDest [expr { + $zDestFile eq ":memory:" || $file_dest eq "temp" && $TEMP_STORE==3 + }] + + if { $isMemDest==0 || $pgsz_dest == 1024 } { + if 0 { + puts -nonewline "Test $iTest: src=$zSrcFile dest=$zDestFile" + puts -nonewline " (as $db_dest.$file_dest)" + puts -nonewline " rows_dest=$rows_dest pgsz_dest=$pgsz_dest" + puts "" + } + + # Set up the content of the source database. + execsql { + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + COMMIT; + } + + + + # Set up the content of the target database. + execsql "PRAGMA ${file_dest}.page_size = ${pgsz_dest}" $db_dest + if {$rows_dest != 0} { + execsql " + BEGIN; + CREATE TABLE ${file_dest}.t1(a, b); + CREATE INDEX ${file_dest}.i1 ON t1(a, b); + " $db_dest + for {set ii 0} {$ii < $rows_dest} {incr ii} { + execsql " + INSERT INTO ${file_dest}.t1 VALUES(1, randstr(1000,1000)) + " $db_dest + } + } + + # Backup the source database. + do_test backup-2.$iTest.1 { + sqlite3_backup B $db_dest $file_dest db main + while {[B step $nPagePerStep]=="SQLITE_OK"} {} + B finish + } {SQLITE_OK} + + # Run integrity check on the backup. + do_test backup-2.$iTest.2 { + execsql "PRAGMA ${file_dest}.integrity_check" $db_dest + } {ok} + + test_contents backup-2.$iTest.3 db main $db_dest $file_dest + + } + + db close + catch {db2 close} + incr iTest + +} } } } } } +# +# End of backup-2.* tests. +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# These tests, backup-3.*, ensure that nothing goes wrong if either +# the source or destination database are large enough to include the +# the locking-page (the page that contains the range of bytes that +# the locks are applied to). These tests assume that the pending +# byte is at offset 0x00010000 (64KB offset), as set by tester.tcl, +# not at the 1GB offset as it usually is. +# +# The test procedure is as follows (same procedure as used for +# the backup-2.* tests): +# +# 1) Populate the source database. +# 2) Populate the destination database. +# 3) Run the backup to completion. (backup-3.*.1) +# 4) Integrity check the destination db. (backup-3.*.2) +# 5) Check that the contents of the destination db is the same as that +# of the source db. (backup-3.*.3) +# +# The test procedure is run with the following parameters varied: +# +# * Source database includes pending-byte page. +# * Source database does not include pending-byte page. +# +# * Target database includes pending-byte page. +# * Target database does not include pending-byte page. +# +# * Target database page-size is the same as the source, OR +# * Target database page-size is larger than the source, OR +# * Target database page-size is smaller than the source. +# +set iTest 1 +foreach nSrcPg {10 64 65 66 100} { +foreach nDestRow {10 100} { +foreach nDestPgsz {512 1024 2048 4096} { + + catch { file delete test.db } + catch { file delete test2.db } + sqlite3 db test.db + sqlite3 db2 test2.db + + # Set up the content of the two databases. + # + execsql { PRAGMA page_size = 1024 } + execsql "PRAGMA page_size = $nDestPgsz" db2 + foreach db {db db2} { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + COMMIT; + } $db + } + while {[file size test.db]/1024 < $nSrcPg} { + execsql { INSERT INTO t1 VALUES($ii, randstr(200,200)) } + } + + for {set ii 0} {$ii < $nDestRow} {incr ii} { + execsql { INSERT INTO t1 VALUES($ii, randstr(1000,1000)) } db2 + } + + # Backup the source database. + do_test backup-3.$iTest.1 { + sqlite3_backup B db main db2 main + while {[B step 10]=="SQLITE_OK"} {} + B finish + } {SQLITE_OK} + + # Run integrity check on the backup. + do_test backup-3.$iTest.2 { + execsql "PRAGMA integrity_check" db2 + } {ok} + + test_contents backup-3.$iTest.3 db main db2 main + + db close + db2 close + incr iTest +} +} +} + +#-------------------------------------------------------------------- +do_test backup-3.$iTest.1 { + catch { file delete -force test.db } + catch { file delete -force test2.db } + sqlite3 db test.db + set iTab 1 + + db eval { PRAGMA page_size = 512 } + while {[file size test.db] <= $::sqlite_pending_byte} { + db eval "CREATE TABLE t${iTab}(a, b, c)" + incr iTab + } + + sqlite3 db2 test2.db + db2 eval { PRAGMA page_size = 4096 } + while {[file size test2.db] < $::sqlite_pending_byte} { + db2 eval "CREATE TABLE t${iTab}(a, b, c)" + incr iTab + } + + sqlite3_backup B db2 main db main + B step -1 +} {SQLITE_DONE} + +do_test backup-3.$iTest.2 { + B finish +} {SQLITE_OK} + +# +# End of backup-3.* tests. +#--------------------------------------------------------------------- + + +#--------------------------------------------------------------------- +# The following tests, backup-4.*, test various error conditions: +# +# backup-4.1.*: Test invalid database names. +# +# backup-4.2.*: Test that the source database cannot be detached while +# a backup is in progress. +# +# backup-4.3.*: Test that the source database handle cannot be closed +# while a backup is in progress. +# +# backup-4.4.*: Test an attempt to specify the same handle for the +# source and destination databases. +# +# backup-4.5.*: Test that an in-memory destination with a different +# page-size to the source database is an error. +# +sqlite3 db test.db +sqlite3 db2 test2.db + +do_test backup-4.1.1 { + catch { sqlite3_backup B db aux db2 main } +} {1} +do_test backup-4.1.2 { + sqlite3_errmsg db +} {unknown database aux} +do_test backup-4.1.3 { + catch { sqlite3_backup B db main db2 aux } +} {1} +do_test backup-4.1.4 { + sqlite3_errmsg db +} {unknown database aux} + +do_test backup-4.2.1 { + catch { file delete -force test3.db } + catch { file delete -force test4.db } + execsql { + ATTACH 'test3.db' AS aux1; + CREATE TABLE aux1.t1(a, b); + } + execsql { + ATTACH 'test4.db' AS aux2; + CREATE TABLE aux2.t2(a, b); + } db2 + sqlite3_backup B db aux1 db2 aux2 +} {B} +do_test backup-4.2.2 { + catchsql { DETACH aux2 } db2 +} {1 {database aux2 is locked}} +do_test backup-4.2.3 { + B step 50 +} {SQLITE_DONE} +do_test backup-4.2.4 { + B finish +} {SQLITE_OK} + +do_test backup-4.3.1 { + sqlite3_backup B db aux1 db2 aux2 +} {B} +do_test backup-4.3.2 { + db2 cache flush + sqlite3_close db2 +} {SQLITE_BUSY} +do_test backup-4.3.3 { + sqlite3_errmsg db2 +} {unable to close due to unfinished backup operation} +do_test backup-4.3.4 { + B step 50 +} {SQLITE_DONE} +do_test backup-4.3.5 { + B finish +} {SQLITE_OK} + +do_test backup-4.4.1 { + set rc [catch {sqlite3_backup B db main db aux1}] + list $rc [sqlite3_errcode db] [sqlite3_errmsg db] +} {1 SQLITE_ERROR {source and destination must be distinct}} +db close +db2 close + +do_test backup-4.5.1 { + catch { file delete -force test.db } + sqlite3 db test.db + sqlite3 db2 :memory: + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } + execsql { + PRAGMA page_size = 4096; + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES(3, 4); + } db2 + sqlite3_backup B db2 main db main +} {B} +do_test backup-4.5.2 { + B step 5000 +} {SQLITE_READONLY} +do_test backup-4.5.3 { + B finish +} {SQLITE_READONLY} + +db close +db2 close +# +# End of backup-5.* tests. +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# The following tests, backup-5.*, test that the backup works properly +# when the source database is modified during the backup. Test cases +# are organized as follows: +# +# backup-5.x.1.*: Nothing special. Modify the database mid-backup. +# +# backup-5.x.2.*: Modify the database mid-backup so that one or more +# pages are written out due to cache stress. Then +# rollback the transaction. +# +# backup-5.x.3.*: Database is vacuumed. +# +# backup-5.x.4.*: Database is vacuumed and the page-size modified. +# +# backup-5.x.5.*: Database is shrunk via incr-vacuum. +# +# Each test is run three times, in the following configurations: +# +# 1) Backing up file-to-file. The writer writes via an external pager. +# 2) Backing up file-to-file. The writer writes via the same pager as +# is used by the backup operation. +# 3) Backing up memory-to-file. +# +set iTest 0 +foreach {writer file} {db test.db db3 test.db db :memory:} { + incr iTest + catch { file delete bak.db } + sqlite3 db2 bak.db + catch { file delete $file } + sqlite3 db $file + sqlite3 db3 $file + + do_test backup-5.$iTest.1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + COMMIT; + } + expr {[execsql {PRAGMA page_count}] > 10} + } {1} + do_test backup-5.$iTest.1.2 { + sqlite3_backup B db2 main db main + B step 5 + } {SQLITE_OK} + do_test backup-5.$iTest.1.3 { + execsql { UPDATE t1 SET a = a + 1 } $writer + B step 50 + } {SQLITE_DONE} + do_test backup-5.$iTest.1.4 { + B finish + } {SQLITE_OK} + integrity_check backup-5.$iTest.1.5 db2 + test_contents backup-5.$iTest.1.6 db main db2 main + + do_test backup-5.$iTest.2.1 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT '', randstr(1000,1000) FROM t1; + COMMIT; + } + } {} + do_test backup-5.$iTest.2.2 { + sqlite3_backup B db2 main db main + B step 50 + } {SQLITE_OK} + do_test backup-5.$iTest.2.3 { + execsql { + BEGIN; + UPDATE t1 SET a = a + 1; + ROLLBACK; + } $writer + B step 5000 + } {SQLITE_DONE} + do_test backup-5.$iTest.2.4 { + B finish + } {SQLITE_OK} + integrity_check backup-5.$iTest.2.5 db2 + test_contents backup-5.$iTest.2.6 db main db2 main + + do_test backup-5.$iTest.3.1 { + execsql { UPDATE t1 SET b = randstr(1000,1000) } + } {} + do_test backup-5.$iTest.3.2 { + sqlite3_backup B db2 main db main + B step 50 + } {SQLITE_OK} + do_test backup-5.$iTest.3.3 { + execsql { VACUUM } $writer + B step 5000 + } {SQLITE_DONE} + do_test backup-5.$iTest.3.4 { + B finish + } {SQLITE_OK} + integrity_check backup-5.$iTest.3.5 db2 + test_contents backup-5.$iTest.3.6 db main db2 main + + do_test backup-5.$iTest.4.1 { + execsql { UPDATE t1 SET b = randstr(1000,1000) } + } {} + do_test backup-5.$iTest.4.2 { + sqlite3_backup B db2 main db main + B step 50 + } {SQLITE_OK} + do_test backup-5.$iTest.4.3 { + execsql { + PRAGMA page_size = 2048; + VACUUM; + } $writer + B step 5000 + } {SQLITE_DONE} + do_test backup-5.$iTest.4.4 { + B finish + } {SQLITE_OK} + integrity_check backup-5.$iTest.4.5 db2 + test_contents backup-5.$iTest.4.6 db main db2 main + + catch {db close} + catch {db2 close} + catch {db3 close} + catch { file delete bak.db } + sqlite3 db2 bak.db + catch { file delete $file } + sqlite3 db $file + sqlite3 db3 $file + do_test backup-5.$iTest.5.1 { + execsql { + PRAGMA auto_vacuum = incremental; + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + COMMIT; + } + } {} + do_test backup-5.$iTest.5.2 { + sqlite3_backup B db2 main db main + B step 8 + } {SQLITE_OK} + do_test backup-5.$iTest.5.3 { + execsql { + DELETE FROM t1; + PRAGMA incremental_vacuum; + } $writer + B step 50 + } {SQLITE_DONE} + do_test backup-5.$iTest.5.4 { + B finish + } {SQLITE_OK} + integrity_check backup-5.$iTest.5.5 db2 + test_contents backup-5.$iTest.5.6 db main db2 main + catch {db close} + catch {db2 close} + catch {db3 close} +} +# +# End of backup-5.* tests. +#--------------------------------------------------------------------- + +#--------------------------------------------------------------------- +# Test the sqlite3_backup_remaining() and backup_pagecount() APIs. +# +do_test backup-6.1 { + catch { file delete -force test.db } + catch { file delete -force test2.db } + sqlite3 db test.db + sqlite3 db2 test2.db + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + COMMIT; + } +} {} +do_test backup-6.2 { + set nTotal [expr {[file size test.db]/1024}] + sqlite3_backup B db2 main db main + B step 1 +} {SQLITE_OK} +do_test backup-6.3 { + B pagecount +} $nTotal +do_test backup-6.4 { + B remaining +} [expr $nTotal-1] +do_test backup-6.5 { + B step 5 + list [B remaining] [B pagecount] +} [list [expr $nTotal-6] $nTotal] +do_test backup-6.6 { + execsql { CREATE TABLE t2(a PRIMARY KEY, b) } + B step 1 + list [B remaining] [B pagecount] +} [list [expr $nTotal-5] [expr $nTotal+2]] + +do_test backup-6.X { + B finish +} {SQLITE_OK} + +catch {db close} +catch {db2 close} + +#--------------------------------------------------------------------- +# Test cases backup-7.* test that SQLITE_BUSY and SQLITE_LOCKED errors +# are returned correctly: +# +# backup-7.1.*: Source database is externally locked (return SQLITE_BUSY). +# +# backup-7.2.*: Attempt to step the backup process while a +# write-transaction is underway on the source pager (return +# SQLITE_LOCKED). +# +# backup-7.3.*: Destination database is externally locked (return SQLITE_BUSY). +# +do_test backup-7.0 { + catch { file delete -force test.db } + catch { file delete -force test2.db } + sqlite3 db2 test2.db + sqlite3 db test.db + execsql { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 SELECT a+ 1, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 2, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 4, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+ 8, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+16, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+32, randstr(1000,1000) FROM t1; + INSERT INTO t1 SELECT a+64, randstr(1000,1000) FROM t1; + } +} {} + +do_test backup-7.1.1 { + sqlite3_backup B db2 main db main + B step 5 +} {SQLITE_OK} +do_test backup-7.1.2 { + sqlite3 db3 test.db + execsql { BEGIN EXCLUSIVE } db3 + B step 5 +} {SQLITE_BUSY} +do_test backup-7.1.3 { + execsql { ROLLBACK } db3 + B step 5 +} {SQLITE_OK} +do_test backup-7.2.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 4); + } +} {} +do_test backup-7.2.2 { + B step 5000 +} {SQLITE_BUSY} +do_test backup-7.2.3 { + execsql { ROLLBACK } + B step 5000 +} {SQLITE_DONE} +do_test backup-7.2.4 { + B finish +} {SQLITE_OK} +test_contents backup-7.2.5 db main db2 main +integrity_check backup-7.3.6 db2 + +do_test backup-7.3.1 { + db2 close + db3 close + file delete -force test2.db + sqlite3 db2 test2.db + sqlite3 db3 test2.db + + sqlite3_backup B db2 main db main + execsql { BEGIN ; CREATE TABLE t2(a, b); } db3 + + B step 5 +} {SQLITE_BUSY} +do_test backup-7.3.2 { + execsql { COMMIT } db3 + B step 5000 +} {SQLITE_DONE} +do_test backup-7.3.3 { + B finish +} {SQLITE_OK} +test_contents backup-7.3.4 db main db2 main +integrity_check backup-7.3.5 db2 +catch { db2 close } +catch { db3 close } + +#----------------------------------------------------------------------- +# The following tests, backup-8.*, test attaching multiple backup +# processes to the same source database. Also, reading from the source +# database while a read transaction is active. +# +# These tests reuse the database "test.db" left over from backup-7.*. +# +do_test backup-8.1 { + catch { file delete -force test2.db } + catch { file delete -force test3.db } + sqlite3 db2 test2.db + sqlite3 db3 test3.db + + sqlite3_backup B2 db2 main db main + sqlite3_backup B3 db3 main db main + list [B2 finish] [B3 finish] +} {SQLITE_OK SQLITE_OK} +do_test backup-8.2 { + sqlite3_backup B3 db3 main db main + sqlite3_backup B2 db2 main db main + list [B2 finish] [B3 finish] +} {SQLITE_OK SQLITE_OK} +do_test backup-8.3 { + sqlite3_backup B2 db2 main db main + sqlite3_backup B3 db3 main db main + B2 step 5 +} {SQLITE_OK} +do_test backup-8.4 { + execsql { + BEGIN; + SELECT * FROM sqlite_master; + } + B3 step 5 +} {SQLITE_OK} +do_test backup-8.5 { + list [B3 step 5000] [B3 finish] +} {SQLITE_DONE SQLITE_OK} +do_test backup-8.6 { + list [B2 step 5000] [B2 finish] +} {SQLITE_DONE SQLITE_OK} +test_contents backup-8.7 db main db2 main +test_contents backup-8.8 db main db3 main +do_test backup-8.9 { + execsql { PRAGMA lock_status } +} {main shared temp closed} +do_test backup-8.10 { + execsql COMMIT +} {} +catch { db2 close } +catch { db3 close } + +#----------------------------------------------------------------------- +# The following tests, backup-9.*, test that: +# +# * Passing 0 as an argument to sqlite3_backup_step() means no pages +# are backed up (backup-9.1.*), and +# * Passing a negative value as an argument to sqlite3_backup_step() means +# all pages are backed up (backup-9.2.*). +# +# These tests reuse the database "test.db" left over from backup-7.*. +# +do_test backup-9.1.1 { + sqlite3 db2 test2.db + sqlite3_backup B db2 main db main + B step 1 +} {SQLITE_OK} +do_test backup-9.1.2 { + set nRemaining [B remaining] + expr {$nRemaining>100} +} {1} +do_test backup-9.1.3 { + B step 0 +} {SQLITE_OK} +do_test backup-9.1.4 { + B remaining +} $nRemaining + +do_test backup-9.2.1 { + B step -1 +} {SQLITE_DONE} +do_test backup-9.2.2 { + B remaining +} {0} +do_test backup-9.2.3 { + B finish +} {SQLITE_OK} +catch {db2 close} + +ifcapable memorymanage { + db close + file delete -force test.db + file delete -force bak.db + + sqlite3 db test.db + sqlite3 db2 test.db + sqlite3 db3 bak.db + + do_test backup-10.1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + CREATE INDEX i1 ON t1(a, b); + COMMIT; + } + } {} + do_test backup-10.1.2 { + sqlite3_backup B db3 main db2 main + B step 5 + } {SQLITE_OK} + do_test backup-10.1.3 { + execsql { + UPDATE t1 SET b = randstr(500,500); + } + } {} + sqlite3_release_memory [expr 1024*1024] + do_test backup-10.1.3 { + B step 50 + } {SQLITE_DONE} + do_test backup-10.1.4 { + B finish + } {SQLITE_OK} + do_test backup-10.1.5 { + execsql { PRAGMA integrity_check } db3 + } {ok} + + db2 close + db3 close +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/badutf.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/badutf.test --- sqlite3-3.4.2/test/badutf.test 2007-05-15 19:35:21.000000000 +0100 +++ sqlite3-3.6.16/test/badutf.test 2009-06-05 18:02:57.000000000 +0100 @@ -13,7 +13,7 @@ # This file checks to make sure SQLite is able to gracefully # handle malformed UTF-8. # -# $Id: badutf.test,v 1.1 2007/05/15 18:35:21 drh Exp $ +# $Id: badutf.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -41,49 +41,53 @@ sqlite3_exec db {SELECT hex('%ff') AS x} } {0 {x FF}} -do_test badutf-1.10 { - sqlite3 db2 {} - db2 eval {PRAGMA encoding=UTF16be} - sqlite3_exec db2 {SELECT hex('%80') AS x} -} {0 {x 0080}} -do_test badutf-1.11 { - sqlite3_exec db2 {SELECT hex('%81') AS x} -} {0 {x 0081}} -do_test badutf-1.12 { - sqlite3_exec db2 {SELECT hex('%bf') AS x} -} {0 {x 00BF}} -do_test badutf-1.13 { - sqlite3_exec db2 {SELECT hex('%c0') AS x} -} {0 {x FFFD}} -do_test badutf-1.14 { - sqlite3_exec db2 {SELECT hex('%c1') AS x} -} {0 {x FFFD}} -do_test badutf-1.15 { - sqlite3_exec db2 {SELECT hex('%c0%bf') AS x} -} {0 {x FFFD}} -do_test badutf-1.16 { - sqlite3_exec db2 {SELECT hex('%c1%bf') AS x} -} {0 {x FFFD}} -do_test badutf-1.17 { - sqlite3_exec db2 {SELECT hex('%c3%bf') AS x} -} {0 {x 00FF}} -do_test badutf-1.18 { - sqlite3_exec db2 {SELECT hex('%e0') AS x} -} {0 {x FFFD}} -do_test badutf-1.19 { - sqlite3_exec db2 {SELECT hex('%f0') AS x} -} {0 {x FFFD}} -do_test badutf-1.20 { - sqlite3_exec db2 {SELECT hex('%ff') AS x} -} {0 {x FFFD}} - - -do_test badutf-2.1 { - sqlite3_exec db {SELECT '%80'=CAST(x'80' AS text) AS x} -} {0 {x 1}} -do_test badutf-2.2 { - sqlite3_exec db {SELECT CAST('%80' AS blob)=x'80' AS x} -} {0 {x 1}} +sqlite3 db2 {} +ifcapable utf16 { + do_test badutf-1.10 { + db2 eval {PRAGMA encoding=UTF16be} + sqlite3_exec db2 {SELECT hex('%80') AS x} + } {0 {x 0080}} + do_test badutf-1.11 { + sqlite3_exec db2 {SELECT hex('%81') AS x} + } {0 {x 0081}} + do_test badutf-1.12 { + sqlite3_exec db2 {SELECT hex('%bf') AS x} + } {0 {x 00BF}} + do_test badutf-1.13 { + sqlite3_exec db2 {SELECT hex('%c0') AS x} + } {0 {x FFFD}} + do_test badutf-1.14 { + sqlite3_exec db2 {SELECT hex('%c1') AS x} + } {0 {x FFFD}} + do_test badutf-1.15 { + sqlite3_exec db2 {SELECT hex('%c0%bf') AS x} + } {0 {x FFFD}} + do_test badutf-1.16 { + sqlite3_exec db2 {SELECT hex('%c1%bf') AS x} + } {0 {x FFFD}} + do_test badutf-1.17 { + sqlite3_exec db2 {SELECT hex('%c3%bf') AS x} + } {0 {x 00FF}} + do_test badutf-1.18 { + sqlite3_exec db2 {SELECT hex('%e0') AS x} + } {0 {x FFFD}} + do_test badutf-1.19 { + sqlite3_exec db2 {SELECT hex('%f0') AS x} + } {0 {x FFFD}} + do_test badutf-1.20 { + sqlite3_exec db2 {SELECT hex('%ff') AS x} + } {0 {x FFFD}} +} + + +ifcapable bloblit { + do_test badutf-2.1 { + sqlite3_exec db {SELECT '%80'=CAST(x'80' AS text) AS x} + } {0 {x 1}} + do_test badutf-2.2 { + sqlite3_exec db {SELECT CAST('%80' AS blob)=x'80' AS x} + } {0 {x 1}} +} do_test badutf-3.1 { sqlite3_exec db {SELECT length('%80') AS x} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/bigfile.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/bigfile.test --- sqlite3-3.4.2/test/bigfile.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/bigfile.test 2009-06-12 03:37:50.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this script testing the ability of SQLite to handle database # files larger than 4GB. # -# $Id: bigfile.test,v 1.9 2005/11/25 09:01:24 danielk1977 Exp $ +# $Id: bigfile.test,v 1.12 2009/03/05 04:27:08 shane Exp $ # set testdir [file dirname $argv0] @@ -64,7 +64,7 @@ # large files. So skip all of the remaining tests in this file. # db close -if {[catch {fake_big_file 4096 test.db}]} { +if {[catch {fake_big_file 4096 [pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return @@ -82,7 +82,11 @@ # tests. We will know the above test failed because the "db" command # does not exist. # -if {[llength [info command db]]>0} { +if {[llength [info command db]]<=0} { + puts "**** Large file support appears to be broken. *****" + finish_test + return +} do_test bigfile-1.3 { execsql { @@ -97,25 +101,26 @@ SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM -do_test bigfile-1.5 { - execsql { - SELECT md5sum(x) FROM t2; - } -} $::MAGIC_SUM db close -if {[catch {fake_big_file 8192 test.db}]} { +if {[catch {fake_big_file 8192 [pwd]/test.db}]} { puts "**** Unable to create a file larger than 8192 MB. *****" finish_test return } -do_test bigfile-1.6 { +do_test bigfile-1.5 { sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM +do_test bigfile-1.6 { + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM do_test bigfile-1.7 { execsql { CREATE TABLE t3 AS SELECT * FROM t1; @@ -134,46 +139,48 @@ SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM -do_test bigfile-1.10 { - execsql { - SELECT md5sum(x) FROM t3; - } -} $::MAGIC_SUM db close -if {[catch {fake_big_file 16384 test.db}]} { +if {[catch {fake_big_file 16384 [pwd]/test.db}]} { puts "**** Unable to create a file larger than 16384 MB. *****" finish_test return } -do_test bigfile-1.11 { +do_test bigfile-1.10 { sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM +do_test bigfile-1.11 { + sqlite3 db test.db + execsql { + SELECT md5sum(x) FROM t2; + } +} $::MAGIC_SUM do_test bigfile-1.12 { + sqlite3 db test.db execsql { - CREATE TABLE t4 AS SELECT * FROM t1; - SELECT md5sum(x) FROM t4; + SELECT md5sum(x) FROM t3; } } $::MAGIC_SUM do_test bigfile-1.13 { - db close - sqlite3 db test.db execsql { - SELECT md5sum(x) FROM t1; + CREATE TABLE t4 AS SELECT * FROM t1; + SELECT md5sum(x) FROM t4; } } $::MAGIC_SUM do_test bigfile-1.14 { + db close + sqlite3 db test.db execsql { - SELECT md5sum(x) FROM t2; + SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM do_test bigfile-1.15 { execsql { - SELECT md5sum(x) FROM t3; + SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM do_test bigfile-1.16 { @@ -181,12 +188,5 @@ SELECT md5sum(x) FROM t3; } } $::MAGIC_SUM -do_test bigfile-1.17 { - execsql { - SELECT md5sum(x) FROM t4; - } -} $::MAGIC_SUM - -} ;# End of the "if( db command exists )" finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/bind.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/bind.test --- sqlite3-3.4.2/test/bind.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/bind.test 2009-06-12 03:37:50.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script testing the sqlite_bind API. # -# $Id: bind.test,v 1.40 2007/05/10 17:23:12 drh Exp $ +# $Id: bind.test,v 1.47 2009/02/20 03:55:05 drh Exp $ # set testdir [file dirname $argv0] @@ -91,6 +91,19 @@ execsql {SELECT rowid, * FROM t1} } {1 123 abcdefg {} 2 456 abcdefg {}} +do_test bind-1.10 { + set rc [catch { + sqlite3_prepare db {INSERT INTO t1 VALUES($abc:123,?,:abc)} -1 TAIL + } msg] + lappend rc $msg +} {1 {(1) near ":123": syntax error}} +do_test bind-1.11 { + set rc [catch { + sqlite3_prepare db {INSERT INTO t1 VALUES(@abc:xyz,?,:abc)} -1 TAIL + } msg] + lappend rc $msg +} {1 {(1) near ":xyz": syntax error}} + do_test bind-1.99 { sqlite3_finalize $VM } SQLITE_OK @@ -252,7 +265,7 @@ do_test bind-6.1 { sqlite3_bind_text $VM 1 hellothere 5 sqlite3_bind_text $VM 2 ".." 1 - sqlite3_bind_text $VM 3 world -1 + sqlite3_bind_text $VM 3 world\000 -1 sqlite_step $VM N VALUES COLNAMES sqlite3_reset $VM execsql {SELECT rowid, * FROM t1} @@ -266,6 +279,44 @@ } } {} +# Make sure zeros in a string work. +# +do_test bind-6.4 { + db eval {DELETE FROM t1} + sqlite3_bind_text $VM 1 hello\000there\000 12 + sqlite3_bind_text $VM 2 hello\000there\000 11 + sqlite3_bind_text $VM 3 hello\000there\000 -1 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT * FROM t1} +} {hello hello hello} +set enc [db eval {PRAGMA encoding}] +if {$enc=="UTF-8"} { + do_test bind-6.5 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {68656C6C6F00746865726500 68656C6C6F007468657265 68656C6C6F} +} elseif {$enc=="UTF-16le"} { + do_test bind-6.5 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {680065006C006C006F000000740068006500720065000000 680065006C006C006F00000074006800650072006500 680065006C006C006F00} +} elseif {$enc=="UTF-16be"} { + do_test bind-6.5 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {00680065006C006C006F0000007400680065007200650000 00680065006C006C006F000000740068006500720065 00680065006C006C006F} +} else { + do_test bind-6.5 { + set "Unknown database encoding: $::enc" + } {} +} +do_test bind-6.6 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} +} {text text text} +do_test bind-6.7 { + execsql { + DELETE FROM t1; + } +} {} + # UTF-16 text ifcapable {utf16} { do_test bind-7.1 { @@ -279,11 +330,34 @@ do_test bind-7.2 { execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} } {text text text} -} -do_test bind-7.3 { - execsql { - DELETE FROM t1; + do_test bind-7.3 { + db eval {DELETE FROM t1} + sqlite3_bind_text16 $VM 1 [encoding convertto unicode hi\000yall\000] 16 + sqlite3_bind_text16 $VM 2 [encoding convertto unicode hi\000yall\000] 14 + sqlite3_bind_text16 $VM 3 [encoding convertto unicode hi\000yall\000] -1 + sqlite_step $VM N VALUES COLNAMES + sqlite3_reset $VM + execsql {SELECT * FROM t1} + } {hi hi hi} + if {$enc=="UTF-8"} { + do_test bind-7.4 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {68690079616C6C00 68690079616C6C 6869} + } elseif {$enc=="UTF-16le"} { + do_test bind-7.4 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {680069000000790061006C006C000000 680069000000790061006C006C00 68006900} + } elseif {$enc=="UTF-16be"} { + do_test bind-7.4 { + execsql {SELECT hex(a), hex(b), hex(c) FROM t1} + } {00680069000000790061006C006C0000 00680069000000790061006C006C 00680069} } + do_test bind-7.5 { + execsql {SELECT typeof(a), typeof(b), typeof(c) FROM t1} + } {text text text} +} +do_test bind-7.99 { + execsql {DELETE FROM t1;} } {} # Test that the 'out of range' error works. @@ -345,6 +419,8 @@ sqlite3_finalize $VM } SQLITE_OK +set iMaxVar $SQLITE_MAX_VARIABLE_NUMBER +set zError "(1) variable number must be between ?1 and ?$iMaxVar" do_test bind-9.1 { execsql { CREATE TABLE t2(a,b,c,d,e,f); @@ -355,37 +431,44 @@ } -1 TAIL } msg] lappend rc $msg -} {1 {(1) variable number must be between ?1 and ?999}} +} [list 1 $zError] do_test bind-9.2 { set rc [catch { - sqlite3_prepare $DB { - INSERT INTO t2(a) VALUES(?1000) - } -1 TAIL + sqlite3_prepare $DB "INSERT INTO t2(a) VALUES(?[expr $iMaxVar+1])" -1 TAIL } msg] lappend rc $msg -} {1 {(1) variable number must be between ?1 and ?999}} -do_test bind-9.3 { +} [list 1 $zError] +do_test bind-9.3.1 { set VM [ - sqlite3_prepare $DB { - INSERT INTO t2(a,b) VALUES(?1,?999) - } -1 TAIL + sqlite3_prepare $DB " + INSERT INTO t2(a,b) VALUES(?1,?$iMaxVar) + " -1 TAIL ] sqlite3_bind_parameter_count $VM -} {999} +} $iMaxVar +catch {sqlite3_finalize $VM} +do_test bind-9.3.2 { + set VM [ + sqlite3_prepare $DB " + INSERT INTO t2(a,b) VALUES(?2,?[expr $iMaxVar - 1]) + " -1 TAIL + ] + sqlite3_bind_parameter_count $VM +} [expr {$iMaxVar - 1}] catch {sqlite3_finalize $VM} do_test bind-9.4 { set VM [ - sqlite3_prepare $DB { - INSERT INTO t2(a,b,c,d) VALUES(?1,?997,?,?) - } -1 TAIL + sqlite3_prepare $DB " + INSERT INTO t2(a,b,c,d) VALUES(?1,?[expr $iMaxVar - 2],?,?) + " -1 TAIL ] sqlite3_bind_parameter_count $VM -} {999} +} $iMaxVar do_test bind-9.5 { sqlite3_bind_int $VM 1 1 - sqlite3_bind_int $VM 997 999 - sqlite3_bind_int $VM 998 1000 - sqlite3_bind_int $VM 999 1001 + sqlite3_bind_int $VM [expr $iMaxVar - 2] 999 + sqlite3_bind_int $VM [expr $iMaxVar - 1] 1000 + sqlite3_bind_int $VM $iMaxVar 1001 sqlite3_step $VM } SQLITE_DONE do_test bind-9.6 { @@ -574,4 +657,102 @@ } {NULL NULL NULL} sqlite3_finalize $VM +#-------------------------------------------------------------------- +# These tests attempt to reproduce bug #3463. +# +proc param_names {db zSql} { + set ret [list] + set VM [sqlite3_prepare db $zSql -1 TAIL] + for {set ii 1} {$ii <= [sqlite3_bind_parameter_count $VM]} {incr ii} { + lappend ret [sqlite3_bind_parameter_name $VM $ii] + } + sqlite3_finalize $VM + set ret +} + +do_test bind-14.1 { + param_names db { SELECT @a, @b } +} {@a @b} +do_test bind-14.2 { + param_names db { SELECT NULL FROM (SELECT NULL) WHERE @a = @b } +} {@a @b} +do_test bind-14.3 { + param_names db { SELECT @a FROM (SELECT NULL) WHERE 1 = @b } +} {@a @b} +do_test bind-14.4 { + param_names db { SELECT @a, @b FROM (SELECT NULL) } +} {@a @b} + +#-------------------------------------------------------------------------- +# Tests of the OP_Variable opcode where P3>1 +# +do_test bind-15.1 { + db eval {CREATE TABLE t4(a,b,c,d,e,f,g,h);} + set VM [sqlite3_prepare db { + INSERT INTO t4(a,b,c,d,f,g,h,e) VALUES(?,?,?,?,?,?,?,?) + } -1 TAIL] + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_bind_int $VM 4 4 + sqlite3_bind_int $VM 5 5 + sqlite3_bind_int $VM 6 6 + sqlite3_bind_int $VM 7 7 + sqlite3_bind_int $VM 8 8 + sqlite3_step $VM + sqlite3_finalize $VM + db eval {SELECT * FROM t4} +} {1 2 3 4 8 5 6 7} +do_test bind-15.2 { + db eval {DELETE FROM t4} + set VM [sqlite3_prepare db { + INSERT INTO t4(a,b,c,d,e,f,g,h) VALUES(?,?,?,?,?,?,?,?) + } -1 TAIL] + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_bind_int $VM 4 4 + sqlite3_bind_int $VM 5 5 + sqlite3_bind_int $VM 6 6 + sqlite3_bind_int $VM 7 7 + sqlite3_bind_int $VM 8 8 + sqlite3_step $VM + sqlite3_finalize $VM + db eval {SELECT * FROM t4} +} {1 2 3 4 5 6 7 8} +do_test bind-15.3 { + db eval {DELETE FROM t4} + set VM [sqlite3_prepare db { + INSERT INTO t4(h,g,f,e,d,c,b,a) VALUES(?,?,?,?,?,?,?,?) + } -1 TAIL] + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_bind_int $VM 4 4 + sqlite3_bind_int $VM 5 5 + sqlite3_bind_int $VM 6 6 + sqlite3_bind_int $VM 7 7 + sqlite3_bind_int $VM 8 8 + sqlite3_step $VM + sqlite3_finalize $VM + db eval {SELECT * FROM t4} +} {8 7 6 5 4 3 2 1} +do_test bind-15.4 { + db eval {DELETE FROM t4} + set VM [sqlite3_prepare db { + INSERT INTO t4(a,b,c,d,e,f,g,h) VALUES(?,?,?,?4,?,?6,?,?) + } -1 TAIL] + sqlite3_bind_int $VM 1 1 + sqlite3_bind_int $VM 2 2 + sqlite3_bind_int $VM 3 3 + sqlite3_bind_int $VM 4 4 + sqlite3_bind_int $VM 5 5 + sqlite3_bind_int $VM 6 6 + sqlite3_bind_int $VM 7 7 + sqlite3_bind_int $VM 8 8 + sqlite3_step $VM + sqlite3_finalize $VM + db eval {SELECT * FROM t4} +} {1 2 3 4 5 6 7 8} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/bindxfer.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/bindxfer.test --- sqlite3-3.4.2/test/bindxfer.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/bindxfer.test 2009-06-25 12:24:38.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script testing the sqlite_transfer_bindings() API. # -# $Id: bindxfer.test,v 1.4 2007/04/05 11:25:59 drh Exp $ +# $Id: bindxfer.test,v 1.9 2009/04/17 11:56:28 drh Exp $ # set testdir [file dirname $argv0] @@ -50,33 +50,25 @@ do_test bindxfer-1.4 { sqlite3_bind_parameter_count $VM2 } 3 -do_test bindxfer-1.5 { - sqlite_bind $VM1 1 one normal - set sqlite_static_bind_value two - sqlite_bind $VM1 2 {} static - sqlite_bind $VM1 3 {} null - sqlite3_transfer_bindings $VM1 $VM2 - sqlite_step $VM1 VALUES COLNAMES -} SQLITE_ROW -do_test bindxfer-1.6 { - set VALUES -} {{} {} {}} -do_test bindxfer-1.7 { - sqlite_step $VM2 VALUES COLNAMES -} SQLITE_ROW -do_test bindxfer-1.8 { - set VALUES -} {one two {}} -do_test bindxfer-1.9 { - catch {sqlite3_finalize $VM1} - catch {sqlite3_finalize $VM2} - sqlite3_transfer_bindings $VM1 $VM2 -} 21 ;# SQLITE_MISUSE -do_test bindxfer-1.10 { - set VM1 [sqlite3_prepare $DB {SELECT ?, ?, ?} -1 TAIL] - set VM2 [sqlite3_prepare $DB {SELECT ?, ?, ?, ?} -1 TAIL] - sqlite3_transfer_bindings $VM1 $VM2 -} 1 ;# SQLITE_ERROR +ifcapable deprecated { + do_test bindxfer-1.5 { + sqlite_bind $VM1 1 one normal + set sqlite_static_bind_value two + sqlite_bind $VM1 2 {} static + sqlite_bind $VM1 3 {} null + sqlite3_transfer_bindings $VM1 $VM2 + sqlite_step $VM1 VALUES COLNAMES + } SQLITE_ROW + do_test bindxfer-1.6 { + set VALUES + } {{} {} {}} + do_test bindxfer-1.7 { + sqlite_step $VM2 VALUES COLNAMES + } SQLITE_ROW + do_test bindxfer-1.8 { + set VALUES + } {one two {}} +} catch {sqlite3_finalize $VM1} catch {sqlite3_finalize $VM2} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/bitvec.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/bitvec.test --- sqlite3-3.4.2/test/bitvec.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/bitvec.test 2009-06-25 12:22:33.000000000 +0100 @@ -0,0 +1,195 @@ +# 2008 February 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Unit testing of the Bitvec object. +# +# $Id: bitvec.test,v 1.4 2009/04/01 23:49:04 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# The built-in test logic must be operational in order for +# this test to work. +ifcapable !builtin_test { + finish_test + return +} + +# Test that sqlite3BitvecBuiltinTest correctly reports errors +# that are deliberately introduced. +# +do_test bitvec-1.0.1 { + sqlite3BitvecBuiltinTest 400 {5 1 1 1 0} +} 1 +do_test bitvec-1.0.2 { + sqlite3BitvecBuiltinTest 400 {5 1 234 1 0} +} 234 + +# Run test cases that set every bit in vectors of various sizes. +# for larger cases, this should cycle the bit vector representation +# from hashing into subbitmaps. The subbitmaps should start as +# hashes then change to either subbitmaps or linear maps, depending +# on their size. +# +do_test bitvec-1.1 { + sqlite3BitvecBuiltinTest 400 {1 400 1 1 0} +} 0 +do_test bitvec-1.2 { + sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 0} +} 0 +do_test bitvec-1.3 { + sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 0} +} 0 +do_test bitvec-1.4 { + sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 0} +} 0 + +# By specifying a larger increments, we spread the load around. +# +do_test bitvec-1.5 { + sqlite3BitvecBuiltinTest 400 {1 400 1 7 0} +} 0 +do_test bitvec-1.6 { + sqlite3BitvecBuiltinTest 4000 {1 4000 1 7 0} +} 0 +do_test bitvec-1.7 { + sqlite3BitvecBuiltinTest 40000 {1 40000 1 7 0} +} 0 +do_test bitvec-1.8 { + sqlite3BitvecBuiltinTest 400000 {1 400000 1 7 0} +} 0 + +# First fill up the bitmap with ones, then go through and +# clear all the bits. This will stress the clearing mechanism. +# +do_test bitvec-1.9 { + sqlite3BitvecBuiltinTest 400 {1 400 1 1 2 400 1 1 0} +} 0 +do_test bitvec-1.10 { + sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 2 4000 1 1 0} +} 0 +do_test bitvec-1.11 { + sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 2 40000 1 1 0} +} 0 +do_test bitvec-1.12 { + sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 2 400000 1 1 0} +} 0 + +do_test bitvec-1.13 { + sqlite3BitvecBuiltinTest 400 {1 400 1 1 2 400 1 7 0} +} 0 +do_test bitvec-1.15 { + sqlite3BitvecBuiltinTest 4000 {1 4000 1 1 2 4000 1 7 0} +} 0 +do_test bitvec-1.16 { + sqlite3BitvecBuiltinTest 40000 {1 40000 1 1 2 40000 1 77 0} +} 0 +do_test bitvec-1.17 { + sqlite3BitvecBuiltinTest 400000 {1 400000 1 1 2 400000 1 777 0} +} 0 + +do_test bitvec-1.18 { + sqlite3BitvecBuiltinTest 400000 {1 5000 100000 1 2 400000 1 37 0} +} 0 + +# Attempt to induce hash collisions. +# +unset -nocomplain start +unset -nocomplain incr +foreach start {1 2 3 4 5 6 7 8} { + foreach incr {124 125} { + do_test bitvec-1.20.$start.$incr { + set prog [list 1 60 $::start $::incr 2 5000 1 1 0] + sqlite3BitvecBuiltinTest 5000 $prog + } 0 + } +} + +do_test bitvec-1.30.big_and_slow { + sqlite3BitvecBuiltinTest 17000000 {1 17000000 1 1 2 17000000 1 1 0} +} 0 + + +# Test setting and clearing a random subset of bits. +# +do_test bitvec-2.1 { + sqlite3BitvecBuiltinTest 4000 {3 2000 4 2000 0} +} 0 +do_test bitvec-2.2 { + sqlite3BitvecBuiltinTest 4000 {3 1000 4 1000 3 1000 4 1000 3 1000 4 1000 + 3 1000 4 1000 3 1000 4 1000 3 1000 4 1000 0} +} 0 +do_test bitvec-2.3 { + sqlite3BitvecBuiltinTest 400000 {3 10 0} +} 0 +do_test bitvec-2.4 { + sqlite3BitvecBuiltinTest 4000 {3 10 2 4000 1 1 0} +} 0 +do_test bitvec-2.5 { + sqlite3BitvecBuiltinTest 5000 {3 20 2 5000 1 1 0} +} 0 +do_test bitvec-2.6 { + sqlite3BitvecBuiltinTest 50000 {3 60 2 50000 1 1 0} +} 0 +do_test bitvec-2.7 { + sqlite3BitvecBuiltinTest 5000 { + 1 25 121 125 + 1 50 121 125 + 2 25 121 125 + 0 + } +} 0 + +# This procedure runs sqlite3BitvecBuiltinTest with argments "n" and +# "program". But it also causes a malloc error to occur after the +# "failcnt"-th malloc. The result should be "0" if no malloc failure +# occurs or "-1" if there is a malloc failure. +# +proc bitvec_malloc_test {label failcnt n program} { + do_test $label [subst { + sqlite3_memdebug_fail $failcnt + set x \[sqlite3BitvecBuiltinTest $n [list $program]\] + set nFail \[sqlite3_memdebug_fail -1\] + if {\$nFail==0} { + set ::go 0 + set x -1 + } + set x + }] -1 +} + +# Make sure malloc failures are handled sanily. +# +unset -nocomplain n +unset -nocomplain go +set go 1 +save_prng_state +for {set n 0} {$go} {incr n} { + restore_prng_state + bitvec_malloc_test bitvec-3.1.$n $n 5000 { + 3 60 2 5000 1 1 3 60 2 5000 1 1 3 60 2 5000 1 1 0 + } +} +set go 1 +for {set n 0} {$go} {incr n} { + restore_prng_state + bitvec_malloc_test bitvec-3.2.$n $n 5000 { + 3 600 2 5000 1 1 3 600 2 5000 1 1 3 600 2 5000 1 1 0 + } +} +set go 1 +for {set n 1} {$go} {incr n} { + bitvec_malloc_test bitvec-3.3.$n $n 50000 {1 50000 1 1 0} +} + +finish_test +return diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/blob.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/blob.test --- sqlite3-3.4.2/test/blob.test 2007-03-29 19:39:33.000000000 +0100 +++ sqlite3-3.6.16/test/blob.test 2009-06-25 12:24:38.000000000 +0100 @@ -10,7 +10,7 @@ #*********************************************************************** # This file implements regression tests for SQLite library. # -# $Id: blob.test,v 1.5 2006/01/03 00:33:50 drh Exp $ +# $Id: blob.test,v 1.8 2009/04/28 18:00:27 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -45,20 +45,43 @@ set blob [execsql {SELECT x'abcdEF12';}] bin_to_hex [lindex $blob 0] } {ABCDEF12} +do_test blob-1.3.2 { + set blob [execsql {SELECT x'0123456789abcdefABCDEF';}] + bin_to_hex [lindex $blob 0] +} {0123456789ABCDEFABCDEF} # Try some syntax errors in blob literals. do_test blob-1.4 { catchsql {SELECT X'01020k304', 100} -} {1 {unrecognized token: "X'01020"}} +} {1 {unrecognized token: "X'01020k304'"}} do_test blob-1.5 { catchsql {SELECT X'01020, 100} -} {1 {unrecognized token: "X'01020"}} +} {1 {unrecognized token: "X'01020, 100"}} do_test blob-1.6 { catchsql {SELECT X'01020 100'} -} {1 {unrecognized token: "X'01020"}} +} {1 {unrecognized token: "X'01020 100'"}} do_test blob-1.7 { catchsql {SELECT X'01001'} } {1 {unrecognized token: "X'01001'"}} +do_test blob-1.8 { + catchsql {SELECT x'012/45'} +} {1 {unrecognized token: "x'012/45'"}} +do_test blob-1.9 { + catchsql {SELECT x'012:45'} +} {1 {unrecognized token: "x'012:45'"}} +do_test blob-1.10 { + catchsql {SELECT x'012@45'} +} {1 {unrecognized token: "x'012@45'"}} +do_test blob-1.11 { + catchsql {SELECT x'012G45'} +} {1 {unrecognized token: "x'012G45'"}} +do_test blob-1.12 { + catchsql {SELECT x'012`45'} +} {1 {unrecognized token: "x'012`45'"}} +do_test blob-1.13 { + catchsql {SELECT x'012g45'} +} {1 {unrecognized token: "x'012g45'"}} + # Insert a blob into a table and retrieve it. do_test blob-2.0 { @@ -114,7 +137,7 @@ sqlite3_finalize $STMT db2 close } {} -do_test blob-2.3 { +do_test blob-3.2 { set blobs [execsql {SELECT * FROM t1}] set blobs2 [list] foreach b $blobs {lappend blobs2 [bin_to_hex $b]} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary1.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary1.tcl --- sqlite3-3.4.2/test/boundary1.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary1.tcl 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,289 @@ +puts {# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary1.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } +} + +expr srand(0) + +# Generate interesting boundary numbers +# +foreach x { + 0 + 1 + 0x7f + 0x7fff + 0x7fffff + 0x7fffffff + 0x7fffffffff + 0x7fffffffffff + 0x7fffffffffffff + 0x7fffffffffffffff +} { + set x [expr {wide($x)}] + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set boundarynum([expr {-($x+1)}]) 1 + set boundarynum([expr {-($x+2)}]) 1 + set boundarynum([expr {$x+$x+1}]) 1 + set boundarynum([expr {$x+$x+2}]) 1 +} +set x [expr {wide(127)}] +for {set i 1} {$i<=9} {incr i} { + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set x [expr {wide($x*128 + 127)}] +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# A simple selection sort. Not trying to be efficient. +# +proc sort {inlist} { + set outlist {} + set mn [lindex $inlist 0] + foreach x $inlist { + if {$x<$mn} {set mn $x} + } + set outlist $mn + set mx $mn + while {1} { + set valid 0 + foreach x $inlist { + if {$x>$mx && (!$valid || $mn>$x)} { + set mn $x + set valid 1 + } + } + if {!$valid} break + lappend outlist $mn + set mx $mn + } + return $outlist +} + +# Reverse the order of a list +# +proc reverse {inlist} { + set i [llength $inlist] + set outlist {} + for {incr i -1} {$i>=0} {incr i -1} { + lappend outlist [lindex $inlist $i] + } + return $outlist +} + +set nums1 [scramble [array names boundarynum]] +set nums2 [scramble [array names boundarynum]] + +set tname boundary1 +puts "do_test $tname-1.1 \173" +puts " db eval \173" +puts " CREATE TABLE t1(a,x);" +set a 0 +foreach r $nums1 { + incr a + set t1ra($r) $a + set t1ar($a) $r + set x [format %08x%08x [expr {wide($r)>>32}] $r] + set t1rx($r) $x + set t1xr($x) $r + puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');" +} +puts " CREATE INDEX t1i1 ON t1(a);" +puts " CREATE INDEX t1i2 ON t1(x);" +puts " \175" +puts "\175 {}" + +puts "do_test $tname-1.2 \173" +puts " db eval \173" +puts " SELECT count(*) FROM t1" +puts " \175" +puts "\175 {64}" + +set nums3 $nums2 +lappend nums3 9.22337303685477580800e+18 +lappend nums3 -9.22337303685477580800e+18 + +set i 0 +foreach r $nums3 { + incr i + + if {abs($r)<9.22337203685477580800e+18} { + set x $t1rx($r) + set a $t1ra($r) + set r5 $r.5 + set r0 $r.0 + puts "do_test $tname-2.$i.1 \173" + puts " db eval \173" + puts " SELECT * FROM t1 WHERE rowid=$r" + puts " \175" + puts "\175 {$a $x}" + puts "do_test $tname-2.$i.2 \173" + puts " db eval \173" + puts " SELECT rowid, a FROM t1 WHERE x='$x'" + puts " \175" + puts "\175 {$r $a}" + puts "do_test $tname-2.$i.3 \173" + puts " db eval \173" + puts " SELECT rowid, x FROM t1 WHERE a=$a" + puts " \175" + puts "\175 {$r $x}" + } + + foreach op {> >= < <=} subno {gt ge lt le} { + + ################################################################ 2.x.y.1 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r" { + lappend rset $rx + lappend aset $t1ra($rx) + } + } + puts "do_test $tname-2.$i.$subno.1 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY a" + puts " \175" + puts "\175 {[sort $aset]}" + + ################################################################ 2.x.y.2 + puts "do_test $tname-2.$i.$subno.2 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY a DESC" + puts " \175" + puts "\175 {[reverse [sort $aset]]}" + + ################################################################ 2.x.y.3 + set aset {} + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.3 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY rowid" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.4 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.4 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY rowid DESC" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.5 + set aset {} + set xset {} + foreach rx $rset { + lappend xset $t1rx($rx) + } + foreach x [sort $xset] { + set rx $t1xr($x) + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.5 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r ORDER BY x" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.10 + if {abs($r)>9223372036854775808 || [string length $r5]>15} continue + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r0" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.10 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r0 ORDER BY rowid" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.11 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.11 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r0 ORDER BY rowid DESC" + puts " \175" + puts "\175 {$aset}" + + + ################################################################ 2.x.y.12 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r5" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.12 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r5 ORDER BY rowid" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.13 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.13 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE rowid $op $r5 ORDER BY rowid DESC" + puts " \175" + puts "\175 {$aset}" + } + +} + + +puts {finish_test} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary1.test --- sqlite3-3.4.2/test/boundary1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary1.test 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,7645 @@ +# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary1.test,v 1.2 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } + +do_test boundary1-1.1 { + db eval { + CREATE TABLE t1(a,x); + INSERT INTO t1(oid,a,x) VALUES(-8388609,1,'ffffffffff7fffff'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963969,2,'ff7fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(9223372036854775807,3,'7fffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(127,4,'000000000000007f'); + INSERT INTO t1(oid,a,x) VALUES(3,5,'0000000000000003'); + INSERT INTO t1(oid,a,x) VALUES(16777216,6,'0000000001000000'); + INSERT INTO t1(oid,a,x) VALUES(4398046511103,7,'000003ffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(16383,8,'0000000000003fff'); + INSERT INTO t1(oid,a,x) VALUES(16777215,9,'0000000000ffffff'); + INSERT INTO t1(oid,a,x) VALUES(281474976710655,10,'0000ffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483648,11,'ffffffff80000000'); + INSERT INTO t1(oid,a,x) VALUES(268435455,12,'000000000fffffff'); + INSERT INTO t1(oid,a,x) VALUES(562949953421311,13,'0001ffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(4294967295,14,'00000000ffffffff'); + INSERT INTO t1(oid,a,x) VALUES(2097151,15,'00000000001fffff'); + INSERT INTO t1(oid,a,x) VALUES(16384,16,'0000000000004000'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927935,17,'00ffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(8388607,18,'00000000007fffff'); + INSERT INTO t1(oid,a,x) VALUES(1099511627776,19,'0000010000000000'); + INSERT INTO t1(oid,a,x) VALUES(2147483647,20,'000000007fffffff'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355329,21,'ffff7fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(34359738368,22,'0000000800000000'); + INSERT INTO t1(oid,a,x) VALUES(32767,23,'0000000000007fff'); + INSERT INTO t1(oid,a,x) VALUES(8388608,24,'0000000000800000'); + INSERT INTO t1(oid,a,x) VALUES(140737488355327,25,'00007fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(281474976710656,26,'0001000000000000'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963967,27,'007fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927936,28,'0100000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-32769,29,'ffffffffffff7fff'); + INSERT INTO t1(oid,a,x) VALUES(255,30,'00000000000000ff'); + INSERT INTO t1(oid,a,x) VALUES(4,31,'0000000000000004'); + INSERT INTO t1(oid,a,x) VALUES(-32768,32,'ffffffffffff8000'); + INSERT INTO t1(oid,a,x) VALUES(-2,33,'fffffffffffffffe'); + INSERT INTO t1(oid,a,x) VALUES(140737488355328,34,'0000800000000000'); + INSERT INTO t1(oid,a,x) VALUES(549755813888,35,'0000008000000000'); + INSERT INTO t1(oid,a,x) VALUES(4294967296,36,'0000000100000000'); + INSERT INTO t1(oid,a,x) VALUES(-8388608,37,'ffffffffff800000'); + INSERT INTO t1(oid,a,x) VALUES(-1,38,'ffffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(34359738367,39,'00000007ffffffff'); + INSERT INTO t1(oid,a,x) VALUES(268435456,40,'0000000010000000'); + INSERT INTO t1(oid,a,x) VALUES(2,41,'0000000000000002'); + INSERT INTO t1(oid,a,x) VALUES(2097152,42,'0000000000200000'); + INSERT INTO t1(oid,a,x) VALUES(562949953421312,43,'0002000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355328,44,'ffff800000000000'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963968,45,'0080000000000000'); + INSERT INTO t1(oid,a,x) VALUES(549755813887,46,'0000007fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483649,47,'ffffffff7fffffff'); + INSERT INTO t1(oid,a,x) VALUES(65535,48,'000000000000ffff'); + INSERT INTO t1(oid,a,x) VALUES(128,49,'0000000000000080'); + INSERT INTO t1(oid,a,x) VALUES(32768,50,'0000000000008000'); + INSERT INTO t1(oid,a,x) VALUES(2147483648,51,'0000000080000000'); + INSERT INTO t1(oid,a,x) VALUES(-3,52,'fffffffffffffffd'); + INSERT INTO t1(oid,a,x) VALUES(-128,53,'ffffffffffffff80'); + INSERT INTO t1(oid,a,x) VALUES(-129,54,'ffffffffffffff7f'); + INSERT INTO t1(oid,a,x) VALUES(-9223372036854775808,55,'8000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(4398046511104,56,'0000040000000000'); + INSERT INTO t1(oid,a,x) VALUES(1099511627775,57,'000000ffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-549755813889,58,'ffffff7fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(0,59,'0000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(1,60,'0000000000000001'); + INSERT INTO t1(oid,a,x) VALUES(256,61,'0000000000000100'); + INSERT INTO t1(oid,a,x) VALUES(65536,62,'0000000000010000'); + INSERT INTO t1(oid,a,x) VALUES(-549755813888,63,'ffffff8000000000'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963968,64,'ff80000000000000'); + CREATE INDEX t1i1 ON t1(a); + CREATE INDEX t1i2 ON t1(x); + } +} {} +do_test boundary1-1.2 { + db eval { + SELECT count(*) FROM t1 + } +} {64} +do_test boundary1-2.1.1 { + db eval { + SELECT * FROM t1 WHERE rowid=72057594037927935 + } +} {17 00ffffffffffffff} +do_test boundary1-2.1.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00ffffffffffffff' + } +} {72057594037927935 17} +do_test boundary1-2.1.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=17 + } +} {72057594037927935 00ffffffffffffff} +do_test boundary1-2.1.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927935 ORDER BY a + } +} {3 28} +do_test boundary1-2.1.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927935 ORDER BY a DESC + } +} {28 3} +do_test boundary1-2.1.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927935 ORDER BY rowid + } +} {28 3} +do_test boundary1-2.1.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927935 ORDER BY rowid DESC + } +} {3 28} +do_test boundary1-2.1.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927935 ORDER BY x + } +} {28 3} +do_test boundary1-2.1.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927935 ORDER BY a + } +} {3 17 28} +do_test boundary1-2.1.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927935 ORDER BY a DESC + } +} {28 17 3} +do_test boundary1-2.1.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927935 ORDER BY rowid + } +} {17 28 3} +do_test boundary1-2.1.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927935 ORDER BY rowid DESC + } +} {3 28 17} +do_test boundary1-2.1.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927935 ORDER BY x + } +} {17 28 3} +do_test boundary1-2.1.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.1.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.1.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927935 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary1-2.1.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927935 ORDER BY rowid DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.1.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.1.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.1.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.1.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927935 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary1-2.1.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927935 ORDER BY rowid DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.1.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.2.1 { + db eval { + SELECT * FROM t1 WHERE rowid=16384 + } +} {16 0000000000004000} +do_test boundary1-2.2.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000004000' + } +} {16384 16} +do_test boundary1-2.2.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=16 + } +} {16384 0000000000004000} +do_test boundary1-2.2.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.2.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.2.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 16384 ORDER BY rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.2.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 16384 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary1-2.2.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 16384 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.2.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.2.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.2.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16384 ORDER BY rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.2.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16384 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary1-2.2.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16384 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.2.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 16384 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.2.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary1-2.2.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 16384 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary1-2.2.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 16384 ORDER BY rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.2.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.2.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16384 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.2.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary1-2.2.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16384 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary1-2.2.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16384 ORDER BY rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.2.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.3.1 { + db eval { + SELECT * FROM t1 WHERE rowid=4294967296 + } +} {36 0000000100000000} +do_test boundary1-2.3.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000100000000' + } +} {4294967296 36} +do_test boundary1-2.3.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=36 + } +} {4294967296 0000000100000000} +do_test boundary1-2.3.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary1-2.3.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.3.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967296 ORDER BY rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.3.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967296 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary1-2.3.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967296 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.3.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary1-2.3.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.3.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967296 ORDER BY rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.3.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967296 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary1-2.3.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967296 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.3.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.3.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.3.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967296 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary1-2.3.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967296 ORDER BY rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.3.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.3.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.3.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.3.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967296 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary1-2.3.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967296 ORDER BY rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.3.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.4.1 { + db eval { + SELECT * FROM t1 WHERE rowid=16777216 + } +} {6 0000000001000000} +do_test boundary1-2.4.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000001000000' + } +} {16777216 6} +do_test boundary1-2.4.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=6 + } +} {16777216 0000000001000000} +do_test boundary1-2.4.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777216 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.4.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary1-2.4.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777216 ORDER BY rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.4.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777216 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary1-2.4.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777216 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.4.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777216 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.4.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary1-2.4.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777216 ORDER BY rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.4.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777216 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary1-2.4.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777216 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.4.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777216 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.4.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary1-2.4.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777216 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary1-2.4.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777216 ORDER BY rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.4.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.4.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777216 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.4.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary1-2.4.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777216 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary1-2.4.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777216 ORDER BY rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.4.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.5.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-32769 + } +} {29 ffffffffffff7fff} +do_test boundary1-2.5.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffffff7fff' + } +} {-32769 29} +do_test boundary1-2.5.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=29 + } +} {-32769 ffffffffffff7fff} +do_test boundary1-2.5.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.5.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.5.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -32769 ORDER BY rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.5.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -32769 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary1-2.5.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary1-2.5.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.5.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.5.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32769 ORDER BY rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.5.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32769 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary1-2.5.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary1-2.5.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -32769 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary1-2.5.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary1-2.5.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -32769 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary1-2.5.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -32769 ORDER BY rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.5.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary1-2.5.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32769 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary1-2.5.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary1-2.5.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32769 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary1-2.5.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32769 ORDER BY rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.5.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary1-2.6.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-140737488355329 + } +} {21 ffff7fffffffffff} +do_test boundary1-2.6.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffff7fffffffffff' + } +} {-140737488355329 21} +do_test boundary1-2.6.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=21 + } +} {-140737488355329 ffff7fffffffffff} +do_test boundary1-2.6.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.6.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.6.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355329 ORDER BY rowid + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.6.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355329 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary1-2.6.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.6.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.6.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.6.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355329 ORDER BY rowid + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.6.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355329 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary1-2.6.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.6.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355329 ORDER BY a + } +} {2 55 64} +do_test boundary1-2.6.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355329 ORDER BY a DESC + } +} {64 55 2} +do_test boundary1-2.6.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355329 ORDER BY rowid + } +} {55 2 64} +do_test boundary1-2.6.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355329 ORDER BY rowid DESC + } +} {64 2 55} +do_test boundary1-2.6.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355329 ORDER BY x + } +} {55 2 64} +do_test boundary1-2.6.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355329 ORDER BY a + } +} {2 21 55 64} +do_test boundary1-2.6.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355329 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary1-2.6.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355329 ORDER BY rowid + } +} {55 2 64 21} +do_test boundary1-2.6.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355329 ORDER BY rowid DESC + } +} {21 64 2 55} +do_test boundary1-2.6.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355329 ORDER BY x + } +} {55 2 64 21} +do_test boundary1-2.7.1 { + db eval { + SELECT * FROM t1 WHERE rowid=2 + } +} {41 0000000000000002} +do_test boundary1-2.7.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000002' + } +} {2 41} +do_test boundary1-2.7.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=41 + } +} {2 0000000000000002} +do_test boundary1-2.7.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.7.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.7.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 2 ORDER BY rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.7.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 2 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary1-2.7.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 2 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.7.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.7.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.7.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2 ORDER BY rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.7.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary1-2.7.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.7.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.7.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.7.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 2 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary1-2.7.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 2 ORDER BY rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.7.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 2 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.7.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.7.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.7.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary1-2.7.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2 ORDER BY rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.7.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.8.1 { + db eval { + SELECT * FROM t1 WHERE rowid=4 + } +} {31 0000000000000004} +do_test boundary1-2.8.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000004' + } +} {4 31} +do_test boundary1-2.8.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=31 + } +} {4 0000000000000004} +do_test boundary1-2.8.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.8.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary1-2.8.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 4 ORDER BY rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.8.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 4 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary1-2.8.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 4 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.8.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.8.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary1-2.8.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4 ORDER BY rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.8.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary1-2.8.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.8.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 4 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.8.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary1-2.8.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 4 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary1-2.8.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 4 ORDER BY rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.8.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 4 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.8.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.8.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary1-2.8.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary1-2.8.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4 ORDER BY rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.8.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.9.1 { + db eval { + SELECT * FROM t1 WHERE rowid=562949953421311 + } +} {13 0001ffffffffffff} +do_test boundary1-2.9.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0001ffffffffffff' + } +} {562949953421311 13} +do_test boundary1-2.9.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=13 + } +} {562949953421311 0001ffffffffffff} +do_test boundary1-2.9.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421311 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary1-2.9.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary1-2.9.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421311 ORDER BY rowid + } +} {43 27 45 17 28 3} +do_test boundary1-2.9.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421311 ORDER BY rowid DESC + } +} {3 28 17 45 27 43} +do_test boundary1-2.9.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421311 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary1-2.9.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421311 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary1-2.9.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary1-2.9.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421311 ORDER BY rowid + } +} {13 43 27 45 17 28 3} +do_test boundary1-2.9.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421311 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13} +do_test boundary1-2.9.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421311 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary1-2.9.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.9.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.9.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421311 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary1-2.9.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421311 ORDER BY rowid DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.9.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.9.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.9.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.9.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421311 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary1-2.9.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421311 ORDER BY rowid DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.9.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.10.1 { + db eval { + SELECT * FROM t1 WHERE rowid=256 + } +} {61 0000000000000100} +do_test boundary1-2.10.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000100' + } +} {256 61} +do_test boundary1-2.10.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=61 + } +} {256 0000000000000100} +do_test boundary1-2.10.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.10.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 256 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.10.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 256 ORDER BY rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.10.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 256 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary1-2.10.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 256 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.10.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary1-2.10.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 256 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.10.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 256 ORDER BY rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.10.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 256 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary1-2.10.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 256 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.10.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.10.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 256 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary1-2.10.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 256 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary1-2.10.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 256 ORDER BY rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.10.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.10.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.10.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 256 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary1-2.10.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 256 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary1-2.10.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 256 ORDER BY rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.10.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.11.1 { + db eval { + SELECT * FROM t1 WHERE rowid=34359738368 + } +} {22 0000000800000000} +do_test boundary1-2.11.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000800000000' + } +} {34359738368 22} +do_test boundary1-2.11.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=22 + } +} {34359738368 0000000800000000} +do_test boundary1-2.11.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary1-2.11.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.11.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738368 ORDER BY rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.11.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738368 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary1-2.11.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738368 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.11.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary1-2.11.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.11.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738368 ORDER BY rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.11.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738368 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary1-2.11.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738368 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.11.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.11.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.11.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738368 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary1-2.11.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738368 ORDER BY rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.11.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.11.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.11.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.11.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738368 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary1-2.11.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738368 ORDER BY rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.11.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.12.1 { + db eval { + SELECT * FROM t1 WHERE rowid=65536 + } +} {62 0000000000010000} +do_test boundary1-2.12.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000010000' + } +} {65536 62} +do_test boundary1-2.12.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=62 + } +} {65536 0000000000010000} +do_test boundary1-2.12.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary1-2.12.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 65536 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.12.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 65536 ORDER BY rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.12.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 65536 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary1-2.12.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 65536 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.12.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary1-2.12.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65536 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.12.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65536 ORDER BY rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.12.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65536 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary1-2.12.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65536 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.12.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.12.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 65536 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.12.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 65536 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary1-2.12.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 65536 ORDER BY rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.12.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.12.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.12.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65536 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.12.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65536 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary1-2.12.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65536 ORDER BY rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.12.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.13.1 { + db eval { + SELECT * FROM t1 WHERE rowid=268435456 + } +} {40 0000000010000000} +do_test boundary1-2.13.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000010000000' + } +} {268435456 40} +do_test boundary1-2.13.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=40 + } +} {268435456 0000000010000000} +do_test boundary1-2.13.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary1-2.13.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary1-2.13.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435456 ORDER BY rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.13.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435456 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary1-2.13.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435456 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.13.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.13.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary1-2.13.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435456 ORDER BY rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.13.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435456 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary1-2.13.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435456 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.13.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.13.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.13.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435456 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary1-2.13.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435456 ORDER BY rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.13.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.13.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.13.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.13.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435456 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary1-2.13.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435456 ORDER BY rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.13.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.14.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-140737488355328 + } +} {44 ffff800000000000} +do_test boundary1-2.14.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffff800000000000' + } +} {-140737488355328 44} +do_test boundary1-2.14.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=44 + } +} {-140737488355328 ffff800000000000} +do_test boundary1-2.14.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.14.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.14.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355328 ORDER BY rowid + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.14.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355328 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary1-2.14.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.14.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.14.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.14.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355328 ORDER BY rowid + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.14.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355328 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary1-2.14.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.14.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355328 ORDER BY a + } +} {2 21 55 64} +do_test boundary1-2.14.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355328 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary1-2.14.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355328 ORDER BY rowid + } +} {55 2 64 21} +do_test boundary1-2.14.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355328 ORDER BY rowid DESC + } +} {21 64 2 55} +do_test boundary1-2.14.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -140737488355328 ORDER BY x + } +} {55 2 64 21} +do_test boundary1-2.14.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355328 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary1-2.14.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355328 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary1-2.14.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355328 ORDER BY rowid + } +} {55 2 64 21 44} +do_test boundary1-2.14.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355328 ORDER BY rowid DESC + } +} {44 21 64 2 55} +do_test boundary1-2.14.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -140737488355328 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary1-2.15.1 { + db eval { + SELECT * FROM t1 WHERE rowid=1099511627776 + } +} {19 0000010000000000} +do_test boundary1-2.15.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000010000000000' + } +} {1099511627776 19} +do_test boundary1-2.15.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=19 + } +} {1099511627776 0000010000000000} +do_test boundary1-2.15.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627776 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary1-2.15.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary1-2.15.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627776 ORDER BY rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.15.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627776 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary1-2.15.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627776 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.15.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627776 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary1-2.15.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.15.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627776 ORDER BY rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.15.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627776 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary1-2.15.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627776 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.15.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.15.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.15.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627776 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary1-2.15.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627776 ORDER BY rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.15.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.15.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.15.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.15.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627776 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary1-2.15.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627776 ORDER BY rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.15.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.16.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 9223372036854775807 ORDER BY a + } +} {} +do_test boundary1-2.16.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 9223372036854775807 ORDER BY a DESC + } +} {} +do_test boundary1-2.16.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 9223372036854775807 ORDER BY rowid + } +} {} +do_test boundary1-2.16.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 9223372036854775807 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.16.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 9223372036854775807 ORDER BY x + } +} {} +do_test boundary1-2.16.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9223372036854775807 ORDER BY a + } +} {3} +do_test boundary1-2.16.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9223372036854775807 ORDER BY a DESC + } +} {3} +do_test boundary1-2.16.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9223372036854775807 ORDER BY rowid + } +} {3} +do_test boundary1-2.16.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9223372036854775807 ORDER BY rowid DESC + } +} {3} +do_test boundary1-2.16.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9223372036854775807 ORDER BY x + } +} {3} +do_test boundary1-2.16.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 9223372036854775807 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.16.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.16.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 9223372036854775807 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary1-2.16.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 9223372036854775807 ORDER BY rowid DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.16.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.16.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9223372036854775807 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.16.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.16.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9223372036854775807 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.16.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9223372036854775807 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.16.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.17.1 { + db eval { + SELECT * FROM t1 WHERE rowid=32768 + } +} {50 0000000000008000} +do_test boundary1-2.17.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000008000' + } +} {32768 50} +do_test boundary1-2.17.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=50 + } +} {32768 0000000000008000} +do_test boundary1-2.17.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary1-2.17.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 32768 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.17.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 32768 ORDER BY rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.17.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 32768 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary1-2.17.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 32768 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.17.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.17.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32768 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.17.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32768 ORDER BY rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.17.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32768 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary1-2.17.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32768 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.17.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.17.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.17.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 32768 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary1-2.17.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 32768 ORDER BY rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.17.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.17.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.17.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.17.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32768 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary1-2.17.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32768 ORDER BY rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.17.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.18.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-36028797018963968 + } +} {64 ff80000000000000} +do_test boundary1-2.18.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ff80000000000000' + } +} {-36028797018963968 64} +do_test boundary1-2.18.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=64 + } +} {-36028797018963968 ff80000000000000} +do_test boundary1-2.18.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.18.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963968 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.18.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963968 ORDER BY rowid + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.18.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963968 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary1-2.18.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.18.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.18.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.18.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963968 ORDER BY rowid + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.18.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963968 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary1-2.18.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.18.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963968 ORDER BY a + } +} {2 55} +do_test boundary1-2.18.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963968 ORDER BY a DESC + } +} {55 2} +do_test boundary1-2.18.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963968 ORDER BY rowid + } +} {55 2} +do_test boundary1-2.18.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963968 ORDER BY rowid DESC + } +} {2 55} +do_test boundary1-2.18.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963968 ORDER BY x + } +} {55 2} +do_test boundary1-2.18.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963968 ORDER BY a + } +} {2 55 64} +do_test boundary1-2.18.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963968 ORDER BY a DESC + } +} {64 55 2} +do_test boundary1-2.18.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963968 ORDER BY rowid + } +} {55 2 64} +do_test boundary1-2.18.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963968 ORDER BY rowid DESC + } +} {64 2 55} +do_test boundary1-2.18.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963968 ORDER BY x + } +} {55 2 64} +do_test boundary1-2.19.1 { + db eval { + SELECT * FROM t1 WHERE rowid=65535 + } +} {48 000000000000ffff} +do_test boundary1-2.19.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000000000000ffff' + } +} {65535 48} +do_test boundary1-2.19.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=48 + } +} {65535 000000000000ffff} +do_test boundary1-2.19.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary1-2.19.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 65535 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.19.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 65535 ORDER BY rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.19.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 65535 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary1-2.19.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 65535 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.19.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary1-2.19.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65535 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.19.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65535 ORDER BY rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.19.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65535 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary1-2.19.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 65535 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.19.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.19.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.19.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 65535 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary1-2.19.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 65535 ORDER BY rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.19.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.19.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.19.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.19.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65535 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary1-2.19.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65535 ORDER BY rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.19.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.20.1 { + db eval { + SELECT * FROM t1 WHERE rowid=4294967295 + } +} {14 00000000ffffffff} +do_test boundary1-2.20.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00000000ffffffff' + } +} {4294967295 14} +do_test boundary1-2.20.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=14 + } +} {4294967295 00000000ffffffff} +do_test boundary1-2.20.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967295 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary1-2.20.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.20.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967295 ORDER BY rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.20.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967295 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary1-2.20.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 4294967295 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.20.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967295 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary1-2.20.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary1-2.20.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967295 ORDER BY rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.20.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967295 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary1-2.20.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4294967295 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.20.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.20.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.20.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967295 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary1-2.20.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967295 ORDER BY rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.20.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.20.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.20.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.20.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967295 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary1-2.20.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967295 ORDER BY rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.20.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.21.1 { + db eval { + SELECT * FROM t1 WHERE rowid=1099511627775 + } +} {57 000000ffffffffff} +do_test boundary1-2.21.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000000ffffffffff' + } +} {1099511627775 57} +do_test boundary1-2.21.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=57 + } +} {1099511627775 000000ffffffffff} +do_test boundary1-2.21.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary1-2.21.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627775 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.21.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627775 ORDER BY rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.21.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627775 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary1-2.21.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 1099511627775 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.21.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary1-2.21.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627775 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.21.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627775 ORDER BY rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.21.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627775 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary1-2.21.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1099511627775 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.21.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.21.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.21.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627775 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary1-2.21.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627775 ORDER BY rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.21.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.21.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.21.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.21.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627775 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary1-2.21.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627775 ORDER BY rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.21.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.22.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-8388608 + } +} {37 ffffffffff800000} +do_test boundary1-2.22.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffff800000' + } +} {-8388608 37} +do_test boundary1-2.22.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=37 + } +} {-8388608 ffffffffff800000} +do_test boundary1-2.22.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.22.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.22.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388608 ORDER BY rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.22.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388608 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary1-2.22.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary1-2.22.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.22.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.22.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388608 ORDER BY rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.22.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388608 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary1-2.22.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary1-2.22.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388608 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary1-2.22.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary1-2.22.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388608 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary1-2.22.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388608 ORDER BY rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.22.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary1-2.22.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388608 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary1-2.22.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary1-2.22.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388608 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary1-2.22.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388608 ORDER BY rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.22.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary1-2.23.1 { + db eval { + SELECT * FROM t1 WHERE rowid=549755813888 + } +} {35 0000008000000000} +do_test boundary1-2.23.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000008000000000' + } +} {549755813888 35} +do_test boundary1-2.23.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=35 + } +} {549755813888 0000008000000000} +do_test boundary1-2.23.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary1-2.23.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813888 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.23.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813888 ORDER BY rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.23.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813888 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary1-2.23.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813888 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.23.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary1-2.23.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813888 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.23.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813888 ORDER BY rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.23.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813888 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary1-2.23.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813888 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.23.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.23.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.23.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813888 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary1-2.23.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813888 ORDER BY rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.23.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.23.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.23.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.23.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813888 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary1-2.23.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813888 ORDER BY rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.23.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.24.1 { + db eval { + SELECT * FROM t1 WHERE rowid=8388607 + } +} {18 00000000007fffff} +do_test boundary1-2.24.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00000000007fffff' + } +} {8388607 18} +do_test boundary1-2.24.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=18 + } +} {8388607 00000000007fffff} +do_test boundary1-2.24.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.24.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.24.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388607 ORDER BY rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.24.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388607 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary1-2.24.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388607 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.24.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.24.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.24.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388607 ORDER BY rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.24.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388607 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary1-2.24.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388607 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.24.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.24.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary1-2.24.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388607 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary1-2.24.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388607 ORDER BY rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.24.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.24.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.24.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary1-2.24.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388607 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary1-2.24.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388607 ORDER BY rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.24.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.25.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-3 + } +} {52 fffffffffffffffd} +do_test boundary1-2.25.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='fffffffffffffffd' + } +} {-3 52} +do_test boundary1-2.25.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=52 + } +} {-3 fffffffffffffffd} +do_test boundary1-2.25.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.25.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.25.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -3 ORDER BY rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.25.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -3 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary1-2.25.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary1-2.25.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary1-2.25.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.25.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -3 ORDER BY rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.25.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -3 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary1-2.25.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary1-2.25.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary1-2.25.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.25.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -3 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary1-2.25.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -3 ORDER BY rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.25.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary1-2.25.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.25.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.25.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -3 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary1-2.25.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -3 ORDER BY rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.25.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary1-2.26.1 { + db eval { + SELECT * FROM t1 WHERE rowid=0 + } +} {59 0000000000000000} +do_test boundary1-2.26.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000000' + } +} {0 59} +do_test boundary1-2.26.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=59 + } +} {0 0000000000000000} +do_test boundary1-2.26.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary1-2.26.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 0 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.26.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 0 ORDER BY rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.26.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 0 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary1-2.26.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 0 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.26.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.26.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 0 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.26.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 0 ORDER BY rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.26.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 0 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary1-2.26.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 0 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.26.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.26.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 0 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.26.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 0 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.26.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 0 ORDER BY rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.26.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 0 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.26.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary1-2.26.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 0 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.26.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 0 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary1-2.26.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 0 ORDER BY rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.26.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 0 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.27.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-1 + } +} {38 ffffffffffffffff} +do_test boundary1-2.27.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffffffffff' + } +} {-1 38} +do_test boundary1-2.27.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=38 + } +} {-1 ffffffffffffffff} +do_test boundary1-2.27.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.27.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.27.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -1 ORDER BY rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.27.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -1 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary1-2.27.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.27.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.27.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.27.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -1 ORDER BY rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.27.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -1 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary1-2.27.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary1-2.27.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.27.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary1-2.27.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -1 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary1-2.27.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -1 ORDER BY rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.27.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary1-2.27.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.27.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.27.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -1 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.27.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -1 ORDER BY rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.27.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.28.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-2 + } +} {33 fffffffffffffffe} +do_test boundary1-2.28.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='fffffffffffffffe' + } +} {-2 33} +do_test boundary1-2.28.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=33 + } +} {-2 fffffffffffffffe} +do_test boundary1-2.28.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.28.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.28.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -2 ORDER BY rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.28.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -2 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary1-2.28.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary1-2.28.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary1-2.28.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.28.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2 ORDER BY rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.28.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary1-2.28.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary1-2.28.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -2 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.28.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.28.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -2 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary1-2.28.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -2 ORDER BY rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.28.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary1-2.28.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary1-2.28.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary1-2.28.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary1-2.28.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2 ORDER BY rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.28.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary1-2.29.1 { + db eval { + SELECT * FROM t1 WHERE rowid=2097152 + } +} {42 0000000000200000} +do_test boundary1-2.29.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000200000' + } +} {2097152 42} +do_test boundary1-2.29.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=42 + } +} {2097152 0000000000200000} +do_test boundary1-2.29.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.29.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.29.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097152 ORDER BY rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.29.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097152 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary1-2.29.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097152 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.29.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary1-2.29.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.29.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097152 ORDER BY rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.29.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097152 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary1-2.29.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097152 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.29.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.29.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary1-2.29.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097152 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary1-2.29.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097152 ORDER BY rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.29.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.29.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.29.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary1-2.29.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097152 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary1-2.29.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097152 ORDER BY rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.29.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.30.1 { + db eval { + SELECT * FROM t1 WHERE rowid=128 + } +} {49 0000000000000080} +do_test boundary1-2.30.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000080' + } +} {128 49} +do_test boundary1-2.30.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=49 + } +} {128 0000000000000080} +do_test boundary1-2.30.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary1-2.30.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.30.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 128 ORDER BY rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.30.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 128 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary1-2.30.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 128 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.30.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.30.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.30.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 128 ORDER BY rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.30.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 128 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary1-2.30.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 128 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.30.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.30.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary1-2.30.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 128 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary1-2.30.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 128 ORDER BY rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.30.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 128 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.30.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.30.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary1-2.30.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 128 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary1-2.30.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 128 ORDER BY rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.30.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 128 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.31.1 { + db eval { + SELECT * FROM t1 WHERE rowid=255 + } +} {30 00000000000000ff} +do_test boundary1-2.31.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00000000000000ff' + } +} {255 30} +do_test boundary1-2.31.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=30 + } +} {255 00000000000000ff} +do_test boundary1-2.31.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary1-2.31.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.31.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 255 ORDER BY rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.31.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 255 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary1-2.31.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 255 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.31.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary1-2.31.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.31.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 255 ORDER BY rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.31.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 255 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary1-2.31.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 255 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.31.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 255 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.31.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary1-2.31.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 255 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary1-2.31.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 255 ORDER BY rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.31.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 255 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.31.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 255 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.31.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary1-2.31.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 255 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary1-2.31.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 255 ORDER BY rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.31.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 255 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.32.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-2147483648 + } +} {11 ffffffff80000000} +do_test boundary1-2.32.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffff80000000' + } +} {-2147483648 11} +do_test boundary1-2.32.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=11 + } +} {-2147483648 ffffffff80000000} +do_test boundary1-2.32.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.32.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.32.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483648 ORDER BY rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.32.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483648 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary1-2.32.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.32.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.32.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.32.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483648 ORDER BY rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.32.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483648 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary1-2.32.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.32.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483648 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary1-2.32.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary1-2.32.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483648 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary1-2.32.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483648 ORDER BY rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary1-2.32.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary1-2.32.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483648 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary1-2.32.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary1-2.32.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483648 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary1-2.32.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483648 ORDER BY rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary1-2.32.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary1-2.33.1 { + db eval { + SELECT * FROM t1 WHERE rowid=34359738367 + } +} {39 00000007ffffffff} +do_test boundary1-2.33.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00000007ffffffff' + } +} {34359738367 39} +do_test boundary1-2.33.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=39 + } +} {34359738367 00000007ffffffff} +do_test boundary1-2.33.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary1-2.33.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.33.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738367 ORDER BY rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.33.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738367 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary1-2.33.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 34359738367 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.33.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary1-2.33.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary1-2.33.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738367 ORDER BY rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.33.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738367 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary1-2.33.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 34359738367 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.33.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.33.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.33.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738367 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary1-2.33.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738367 ORDER BY rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.33.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.33.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.33.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.33.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738367 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary1-2.33.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738367 ORDER BY rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.33.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.34.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-549755813889 + } +} {58 ffffff7fffffffff} +do_test boundary1-2.34.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffff7fffffffff' + } +} {-549755813889 58} +do_test boundary1-2.34.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=58 + } +} {-549755813889 ffffff7fffffffff} +do_test boundary1-2.34.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary1-2.34.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.34.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813889 ORDER BY rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.34.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813889 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary1-2.34.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.34.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary1-2.34.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.34.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813889 ORDER BY rowid + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.34.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813889 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary1-2.34.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.34.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813889 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary1-2.34.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813889 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary1-2.34.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813889 ORDER BY rowid + } +} {55 2 64 21 44} +do_test boundary1-2.34.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813889 ORDER BY rowid DESC + } +} {44 21 64 2 55} +do_test boundary1-2.34.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813889 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary1-2.34.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813889 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary1-2.34.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813889 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary1-2.34.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813889 ORDER BY rowid + } +} {55 2 64 21 44 58} +do_test boundary1-2.34.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813889 ORDER BY rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary1-2.34.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813889 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary1-2.35.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-32768 + } +} {32 ffffffffffff8000} +do_test boundary1-2.35.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffffff8000' + } +} {-32768 32} +do_test boundary1-2.35.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=32 + } +} {-32768 ffffffffffff8000} +do_test boundary1-2.35.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.35.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.35.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -32768 ORDER BY rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.35.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -32768 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary1-2.35.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary1-2.35.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.35.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.35.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32768 ORDER BY rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.35.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32768 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary1-2.35.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary1-2.35.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -32768 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary1-2.35.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary1-2.35.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -32768 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary1-2.35.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -32768 ORDER BY rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.35.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary1-2.35.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32768 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary1-2.35.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.35.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32768 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary1-2.35.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32768 ORDER BY rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.35.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary1-2.36.1 { + db eval { + SELECT * FROM t1 WHERE rowid=2147483647 + } +} {20 000000007fffffff} +do_test boundary1-2.36.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000000007fffffff' + } +} {2147483647 20} +do_test boundary1-2.36.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=20 + } +} {2147483647 000000007fffffff} +do_test boundary1-2.36.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary1-2.36.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary1-2.36.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483647 ORDER BY rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.36.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483647 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary1-2.36.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483647 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.36.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary1-2.36.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary1-2.36.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483647 ORDER BY rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.36.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483647 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary1-2.36.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483647 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.36.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.36.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.36.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483647 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary1-2.36.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483647 ORDER BY rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.36.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.36.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.36.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.36.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483647 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary1-2.36.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483647 ORDER BY rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.36.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.37.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-129 + } +} {54 ffffffffffffff7f} +do_test boundary1-2.37.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffffffff7f' + } +} {-129 54} +do_test boundary1-2.37.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=54 + } +} {-129 ffffffffffffff7f} +do_test boundary1-2.37.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary1-2.37.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.37.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -129 ORDER BY rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.37.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -129 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary1-2.37.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary1-2.37.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.37.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.37.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -129 ORDER BY rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.37.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -129 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary1-2.37.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary1-2.37.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary1-2.37.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -129 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.37.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -129 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary1-2.37.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -129 ORDER BY rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.37.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary1-2.37.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary1-2.37.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -129 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.37.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -129 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary1-2.37.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -129 ORDER BY rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.37.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary1-2.38.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-128 + } +} {53 ffffffffffffff80} +do_test boundary1-2.38.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffffffff80' + } +} {-128 53} +do_test boundary1-2.38.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=53 + } +} {-128 ffffffffffffff80} +do_test boundary1-2.38.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary1-2.38.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.38.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -128 ORDER BY rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.38.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -128 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary1-2.38.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary1-2.38.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary1-2.38.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.38.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -128 ORDER BY rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.38.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -128 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary1-2.38.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary1-2.38.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary1-2.38.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -128 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.38.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -128 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary1-2.38.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -128 ORDER BY rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.38.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary1-2.38.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary1-2.38.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -128 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary1-2.38.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -128 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary1-2.38.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -128 ORDER BY rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.38.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary1-2.39.1 { + db eval { + SELECT * FROM t1 WHERE rowid=72057594037927936 + } +} {28 0100000000000000} +do_test boundary1-2.39.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0100000000000000' + } +} {72057594037927936 28} +do_test boundary1-2.39.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=28 + } +} {72057594037927936 0100000000000000} +do_test boundary1-2.39.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927936 ORDER BY a + } +} {3} +do_test boundary1-2.39.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927936 ORDER BY a DESC + } +} {3} +do_test boundary1-2.39.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927936 ORDER BY rowid + } +} {3} +do_test boundary1-2.39.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927936 ORDER BY rowid DESC + } +} {3} +do_test boundary1-2.39.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 72057594037927936 ORDER BY x + } +} {3} +do_test boundary1-2.39.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927936 ORDER BY a + } +} {3 28} +do_test boundary1-2.39.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927936 ORDER BY a DESC + } +} {28 3} +do_test boundary1-2.39.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927936 ORDER BY rowid + } +} {28 3} +do_test boundary1-2.39.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927936 ORDER BY rowid DESC + } +} {3 28} +do_test boundary1-2.39.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 72057594037927936 ORDER BY x + } +} {28 3} +do_test boundary1-2.39.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.39.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.39.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927936 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary1-2.39.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927936 ORDER BY rowid DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.39.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.39.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.39.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.39.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927936 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary1-2.39.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927936 ORDER BY rowid DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.39.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.40.1 { + db eval { + SELECT * FROM t1 WHERE rowid=2147483648 + } +} {51 0000000080000000} +do_test boundary1-2.40.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000080000000' + } +} {2147483648 51} +do_test boundary1-2.40.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=51 + } +} {2147483648 0000000080000000} +do_test boundary1-2.40.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary1-2.40.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483648 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary1-2.40.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483648 ORDER BY rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.40.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483648 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary1-2.40.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 2147483648 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.40.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary1-2.40.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483648 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary1-2.40.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483648 ORDER BY rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.40.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483648 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary1-2.40.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2147483648 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.40.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.40.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.40.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483648 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary1-2.40.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483648 ORDER BY rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.40.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.40.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.40.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.40.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483648 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary1-2.40.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483648 ORDER BY rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.40.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.41.1 { + db eval { + SELECT * FROM t1 WHERE rowid=549755813887 + } +} {46 0000007fffffffff} +do_test boundary1-2.41.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000007fffffffff' + } +} {549755813887 46} +do_test boundary1-2.41.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=46 + } +} {549755813887 0000007fffffffff} +do_test boundary1-2.41.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary1-2.41.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813887 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.41.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813887 ORDER BY rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.41.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813887 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary1-2.41.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 549755813887 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.41.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary1-2.41.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813887 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary1-2.41.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813887 ORDER BY rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.41.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813887 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary1-2.41.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 549755813887 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.41.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.41.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.41.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813887 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary1-2.41.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813887 ORDER BY rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.41.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.41.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.41.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.41.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813887 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary1-2.41.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813887 ORDER BY rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.41.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.42.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-549755813888 + } +} {63 ffffff8000000000} +do_test boundary1-2.42.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffff8000000000' + } +} {-549755813888 63} +do_test boundary1-2.42.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=63 + } +} {-549755813888 ffffff8000000000} +do_test boundary1-2.42.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.42.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813888 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.42.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813888 ORDER BY rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.42.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813888 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary1-2.42.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.42.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary1-2.42.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813888 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.42.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813888 ORDER BY rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.42.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813888 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary1-2.42.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.42.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813888 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary1-2.42.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813888 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary1-2.42.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813888 ORDER BY rowid + } +} {55 2 64 21 44 58} +do_test boundary1-2.42.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813888 ORDER BY rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary1-2.42.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -549755813888 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary1-2.42.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813888 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary1-2.42.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813888 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary1-2.42.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813888 ORDER BY rowid + } +} {55 2 64 21 44 58 63} +do_test boundary1-2.42.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813888 ORDER BY rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary1-2.42.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -549755813888 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary1-2.43.1 { + db eval { + SELECT * FROM t1 WHERE rowid=281474976710655 + } +} {10 0000ffffffffffff} +do_test boundary1-2.43.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000ffffffffffff' + } +} {281474976710655 10} +do_test boundary1-2.43.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=10 + } +} {281474976710655 0000ffffffffffff} +do_test boundary1-2.43.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710655 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary1-2.43.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary1-2.43.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710655 ORDER BY rowid + } +} {26 13 43 27 45 17 28 3} +do_test boundary1-2.43.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710655 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary1-2.43.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710655 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary1-2.43.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710655 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary1-2.43.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary1-2.43.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710655 ORDER BY rowid + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary1-2.43.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710655 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary1-2.43.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710655 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary1-2.43.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.43.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.43.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710655 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary1-2.43.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710655 ORDER BY rowid DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.43.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.43.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.43.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.43.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710655 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary1-2.43.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710655 ORDER BY rowid DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.43.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.44.1 { + db eval { + SELECT * FROM t1 WHERE rowid=4398046511103 + } +} {7 000003ffffffffff} +do_test boundary1-2.44.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000003ffffffffff' + } +} {4398046511103 7} +do_test boundary1-2.44.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=7 + } +} {4398046511103 000003ffffffffff} +do_test boundary1-2.44.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511103 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary1-2.44.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary1-2.44.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511103 ORDER BY rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.44.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511103 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary1-2.44.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511103 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.44.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511103 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary1-2.44.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary1-2.44.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511103 ORDER BY rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.44.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511103 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary1-2.44.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511103 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.44.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511103 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.44.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.44.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511103 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary1-2.44.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511103 ORDER BY rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.44.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.44.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511103 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.44.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.44.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511103 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary1-2.44.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511103 ORDER BY rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.44.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.45.1 { + db eval { + SELECT * FROM t1 WHERE rowid=268435455 + } +} {12 000000000fffffff} +do_test boundary1-2.45.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000000000fffffff' + } +} {268435455 12} +do_test boundary1-2.45.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=12 + } +} {268435455 000000000fffffff} +do_test boundary1-2.45.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435455 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.45.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary1-2.45.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435455 ORDER BY rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.45.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435455 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary1-2.45.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 268435455 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.45.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435455 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.45.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary1-2.45.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435455 ORDER BY rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.45.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435455 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary1-2.45.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 268435455 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.45.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.45.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary1-2.45.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435455 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary1-2.45.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435455 ORDER BY rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.45.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.45.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.45.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary1-2.45.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435455 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary1-2.45.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435455 ORDER BY rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.45.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.46.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-9223372036854775808 + } +} {55 8000000000000000} +do_test boundary1-2.46.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='8000000000000000' + } +} {-9223372036854775808 55} +do_test boundary1-2.46.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=55 + } +} {-9223372036854775808 8000000000000000} +do_test boundary1-2.46.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.46.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.46.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -9223372036854775808 ORDER BY rowid + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.46.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -9223372036854775808 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary1-2.46.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.46.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.46.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.46.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9223372036854775808 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.46.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9223372036854775808 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.46.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.46.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -9223372036854775808 ORDER BY a + } +} {} +do_test boundary1-2.46.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -9223372036854775808 ORDER BY a DESC + } +} {} +do_test boundary1-2.46.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -9223372036854775808 ORDER BY rowid + } +} {} +do_test boundary1-2.46.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -9223372036854775808 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.46.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -9223372036854775808 ORDER BY x + } +} {} +do_test boundary1-2.46.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9223372036854775808 ORDER BY a + } +} {55} +do_test boundary1-2.46.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9223372036854775808 ORDER BY a DESC + } +} {55} +do_test boundary1-2.46.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9223372036854775808 ORDER BY rowid + } +} {55} +do_test boundary1-2.46.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9223372036854775808 ORDER BY rowid DESC + } +} {55} +do_test boundary1-2.46.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9223372036854775808 ORDER BY x + } +} {55} +do_test boundary1-2.47.1 { + db eval { + SELECT * FROM t1 WHERE rowid=562949953421312 + } +} {43 0002000000000000} +do_test boundary1-2.47.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0002000000000000' + } +} {562949953421312 43} +do_test boundary1-2.47.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=43 + } +} {562949953421312 0002000000000000} +do_test boundary1-2.47.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421312 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary1-2.47.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421312 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary1-2.47.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421312 ORDER BY rowid + } +} {27 45 17 28 3} +do_test boundary1-2.47.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421312 ORDER BY rowid DESC + } +} {3 28 17 45 27} +do_test boundary1-2.47.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 562949953421312 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary1-2.47.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421312 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary1-2.47.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421312 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary1-2.47.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421312 ORDER BY rowid + } +} {43 27 45 17 28 3} +do_test boundary1-2.47.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421312 ORDER BY rowid DESC + } +} {3 28 17 45 27 43} +do_test boundary1-2.47.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 562949953421312 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary1-2.47.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.47.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.47.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421312 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary1-2.47.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421312 ORDER BY rowid DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.47.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.47.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.47.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.47.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421312 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary1-2.47.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421312 ORDER BY rowid DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.47.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.48.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-8388609 + } +} {1 ffffffffff7fffff} +do_test boundary1-2.48.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffffff7fffff' + } +} {-8388609 1} +do_test boundary1-2.48.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=1 + } +} {-8388609 ffffffffff7fffff} +do_test boundary1-2.48.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388609 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.48.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.48.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388609 ORDER BY rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.48.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388609 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary1-2.48.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary1-2.48.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388609 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.48.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.48.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388609 ORDER BY rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.48.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388609 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary1-2.48.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.48.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388609 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary1-2.48.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary1-2.48.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388609 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary1-2.48.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388609 ORDER BY rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary1-2.48.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary1-2.48.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388609 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary1-2.48.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary1-2.48.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388609 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary1-2.48.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388609 ORDER BY rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.48.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary1-2.49.1 { + db eval { + SELECT * FROM t1 WHERE rowid=16777215 + } +} {9 0000000000ffffff} +do_test boundary1-2.49.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000ffffff' + } +} {16777215 9} +do_test boundary1-2.49.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=9 + } +} {16777215 0000000000ffffff} +do_test boundary1-2.49.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777215 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.49.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary1-2.49.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777215 ORDER BY rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.49.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777215 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary1-2.49.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 16777215 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.49.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777215 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.49.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.49.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777215 ORDER BY rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.49.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777215 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary1-2.49.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16777215 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.49.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777215 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.49.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary1-2.49.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777215 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary1-2.49.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777215 ORDER BY rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.49.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.49.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777215 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.49.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary1-2.49.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777215 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary1-2.49.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777215 ORDER BY rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.49.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.50.1 { + db eval { + SELECT * FROM t1 WHERE rowid=8388608 + } +} {24 0000000000800000} +do_test boundary1-2.50.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000800000' + } +} {8388608 24} +do_test boundary1-2.50.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=24 + } +} {8388608 0000000000800000} +do_test boundary1-2.50.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.50.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.50.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388608 ORDER BY rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.50.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388608 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary1-2.50.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 8388608 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.50.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary1-2.50.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.50.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388608 ORDER BY rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.50.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388608 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary1-2.50.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 8388608 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.50.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.50.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary1-2.50.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388608 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary1-2.50.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388608 ORDER BY rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.50.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.50.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.50.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary1-2.50.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388608 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary1-2.50.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388608 ORDER BY rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.50.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.51.1 { + db eval { + SELECT * FROM t1 WHERE rowid=16383 + } +} {8 0000000000003fff} +do_test boundary1-2.51.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000003fff' + } +} {16383 8} +do_test boundary1-2.51.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=8 + } +} {16383 0000000000003fff} +do_test boundary1-2.51.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 16383 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.51.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.51.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 16383 ORDER BY rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.51.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 16383 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary1-2.51.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 16383 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.51.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16383 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.51.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.51.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16383 ORDER BY rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.51.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16383 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary1-2.51.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 16383 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.51.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 16383 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.51.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary1-2.51.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 16383 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary1-2.51.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 16383 ORDER BY rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.51.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.51.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16383 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.51.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary1-2.51.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16383 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary1-2.51.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16383 ORDER BY rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.51.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.52.1 { + db eval { + SELECT * FROM t1 WHERE rowid=140737488355328 + } +} {34 0000800000000000} +do_test boundary1-2.52.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000800000000000' + } +} {140737488355328 34} +do_test boundary1-2.52.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=34 + } +} {140737488355328 0000800000000000} +do_test boundary1-2.52.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary1-2.52.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355328 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary1-2.52.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355328 ORDER BY rowid + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary1-2.52.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355328 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary1-2.52.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355328 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary1-2.52.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary1-2.52.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355328 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary1-2.52.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355328 ORDER BY rowid + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.52.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355328 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary1-2.52.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355328 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.52.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.52.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.52.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355328 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary1-2.52.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355328 ORDER BY rowid DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.52.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.52.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.52.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.52.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355328 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary1-2.52.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355328 ORDER BY rowid DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.52.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.53.1 { + db eval { + SELECT * FROM t1 WHERE rowid=2097151 + } +} {15 00000000001fffff} +do_test boundary1-2.53.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00000000001fffff' + } +} {2097151 15} +do_test boundary1-2.53.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=15 + } +} {2097151 00000000001fffff} +do_test boundary1-2.53.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary1-2.53.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary1-2.53.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097151 ORDER BY rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.53.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097151 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary1-2.53.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 2097151 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.53.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary1-2.53.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.53.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097151 ORDER BY rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.53.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097151 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary1-2.53.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 2097151 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.53.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097151 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.53.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.53.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097151 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary1-2.53.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097151 ORDER BY rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.53.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.53.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097151 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary1-2.53.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary1-2.53.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097151 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary1-2.53.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097151 ORDER BY rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.53.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.54.1 { + db eval { + SELECT * FROM t1 WHERE rowid=140737488355327 + } +} {25 00007fffffffffff} +do_test boundary1-2.54.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='00007fffffffffff' + } +} {140737488355327 25} +do_test boundary1-2.54.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=25 + } +} {140737488355327 00007fffffffffff} +do_test boundary1-2.54.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355327 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary1-2.54.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary1-2.54.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355327 ORDER BY rowid + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.54.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355327 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary1-2.54.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 140737488355327 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.54.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355327 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary1-2.54.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary1-2.54.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355327 ORDER BY rowid + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.54.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355327 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary1-2.54.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 140737488355327 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.54.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.54.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.54.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355327 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary1-2.54.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355327 ORDER BY rowid DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.54.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.54.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.54.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.54.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355327 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary1-2.54.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355327 ORDER BY rowid DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.54.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.55.1 { + db eval { + SELECT * FROM t1 WHERE rowid=281474976710656 + } +} {26 0001000000000000} +do_test boundary1-2.55.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0001000000000000' + } +} {281474976710656 26} +do_test boundary1-2.55.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=26 + } +} {281474976710656 0001000000000000} +do_test boundary1-2.55.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710656 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary1-2.55.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary1-2.55.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710656 ORDER BY rowid + } +} {13 43 27 45 17 28 3} +do_test boundary1-2.55.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710656 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13} +do_test boundary1-2.55.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 281474976710656 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary1-2.55.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710656 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary1-2.55.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary1-2.55.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710656 ORDER BY rowid + } +} {26 13 43 27 45 17 28 3} +do_test boundary1-2.55.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710656 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary1-2.55.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 281474976710656 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary1-2.55.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.55.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.55.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710656 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary1-2.55.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710656 ORDER BY rowid DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.55.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.55.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.55.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.55.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710656 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary1-2.55.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710656 ORDER BY rowid DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.55.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.56.1 { + db eval { + SELECT * FROM t1 WHERE rowid=32767 + } +} {23 0000000000007fff} +do_test boundary1-2.56.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000007fff' + } +} {32767 23} +do_test boundary1-2.56.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=23 + } +} {32767 0000000000007fff} +do_test boundary1-2.56.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.56.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.56.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 32767 ORDER BY rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.56.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 32767 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary1-2.56.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 32767 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.56.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary1-2.56.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary1-2.56.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32767 ORDER BY rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.56.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32767 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary1-2.56.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 32767 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.56.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.56.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary1-2.56.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 32767 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary1-2.56.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 32767 ORDER BY rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.56.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.56.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary1-2.56.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary1-2.56.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32767 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary1-2.56.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32767 ORDER BY rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.56.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.57.1 { + db eval { + SELECT * FROM t1 WHERE rowid=127 + } +} {4 000000000000007f} +do_test boundary1-2.57.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='000000000000007f' + } +} {127 4} +do_test boundary1-2.57.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=4 + } +} {127 000000000000007f} +do_test boundary1-2.57.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 127 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.57.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary1-2.57.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 127 ORDER BY rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.57.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 127 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary1-2.57.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 127 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.57.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 127 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.57.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary1-2.57.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 127 ORDER BY rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.57.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 127 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary1-2.57.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 127 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.57.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 127 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.57.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary1-2.57.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 127 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary1-2.57.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 127 ORDER BY rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.57.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 127 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.57.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 127 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.57.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary1-2.57.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 127 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary1-2.57.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 127 ORDER BY rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.57.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 127 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.58.1 { + db eval { + SELECT * FROM t1 WHERE rowid=36028797018963967 + } +} {27 007fffffffffffff} +do_test boundary1-2.58.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='007fffffffffffff' + } +} {36028797018963967 27} +do_test boundary1-2.58.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=27 + } +} {36028797018963967 007fffffffffffff} +do_test boundary1-2.58.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963967 ORDER BY a + } +} {3 17 28 45} +do_test boundary1-2.58.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963967 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary1-2.58.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963967 ORDER BY rowid + } +} {45 17 28 3} +do_test boundary1-2.58.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963967 ORDER BY rowid DESC + } +} {3 28 17 45} +do_test boundary1-2.58.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963967 ORDER BY x + } +} {45 17 28 3} +do_test boundary1-2.58.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963967 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary1-2.58.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963967 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary1-2.58.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963967 ORDER BY rowid + } +} {27 45 17 28 3} +do_test boundary1-2.58.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963967 ORDER BY rowid DESC + } +} {3 28 17 45 27} +do_test boundary1-2.58.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963967 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary1-2.58.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.58.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.58.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963967 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary1-2.58.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963967 ORDER BY rowid DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.58.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.58.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.58.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.58.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963967 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary1-2.58.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963967 ORDER BY rowid DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.58.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.59.1 { + db eval { + SELECT * FROM t1 WHERE rowid=4398046511104 + } +} {56 0000040000000000} +do_test boundary1-2.59.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000040000000000' + } +} {4398046511104 56} +do_test boundary1-2.59.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=56 + } +} {4398046511104 0000040000000000} +do_test boundary1-2.59.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary1-2.59.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511104 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary1-2.59.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511104 ORDER BY rowid + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.59.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511104 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary1-2.59.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 4398046511104 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.59.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary1-2.59.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511104 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary1-2.59.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511104 ORDER BY rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.59.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511104 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary1-2.59.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 4398046511104 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.59.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary1-2.59.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.59.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511104 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary1-2.59.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511104 ORDER BY rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.59.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.59.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.59.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary1-2.59.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511104 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary1-2.59.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511104 ORDER BY rowid DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.59.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.60.1 { + db eval { + SELECT * FROM t1 WHERE rowid=1 + } +} {60 0000000000000001} +do_test boundary1-2.60.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000001' + } +} {1 60} +do_test boundary1-2.60.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=60 + } +} {1 0000000000000001} +do_test boundary1-2.60.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.60.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 1 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.60.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 1 ORDER BY rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.60.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 1 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary1-2.60.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 1 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.60.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary1-2.60.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.60.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1 ORDER BY rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.60.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary1-2.60.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 1 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.60.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary1-2.60.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 1 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.60.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 1 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary1-2.60.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 1 ORDER BY rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.60.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 1 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.60.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.60.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.60.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary1-2.60.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1 ORDER BY rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.60.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 1 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.61.1 { + db eval { + SELECT * FROM t1 WHERE rowid=36028797018963968 + } +} {45 0080000000000000} +do_test boundary1-2.61.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0080000000000000' + } +} {36028797018963968 45} +do_test boundary1-2.61.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=45 + } +} {36028797018963968 0080000000000000} +do_test boundary1-2.61.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963968 ORDER BY a + } +} {3 17 28} +do_test boundary1-2.61.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963968 ORDER BY a DESC + } +} {28 17 3} +do_test boundary1-2.61.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963968 ORDER BY rowid + } +} {17 28 3} +do_test boundary1-2.61.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963968 ORDER BY rowid DESC + } +} {3 28 17} +do_test boundary1-2.61.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 36028797018963968 ORDER BY x + } +} {17 28 3} +do_test boundary1-2.61.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963968 ORDER BY a + } +} {3 17 28 45} +do_test boundary1-2.61.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963968 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary1-2.61.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963968 ORDER BY rowid + } +} {45 17 28 3} +do_test boundary1-2.61.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963968 ORDER BY rowid DESC + } +} {3 28 17 45} +do_test boundary1-2.61.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 36028797018963968 ORDER BY x + } +} {45 17 28 3} +do_test boundary1-2.61.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.61.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.61.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963968 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary1-2.61.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963968 ORDER BY rowid DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.61.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.61.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.61.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary1-2.61.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963968 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary1-2.61.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963968 ORDER BY rowid DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.61.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.62.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-2147483649 + } +} {47 ffffffff7fffffff} +do_test boundary1-2.62.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ffffffff7fffffff' + } +} {-2147483649 47} +do_test boundary1-2.62.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=47 + } +} {-2147483649 ffffffff7fffffff} +do_test boundary1-2.62.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.62.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.62.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483649 ORDER BY rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.62.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483649 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary1-2.62.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.62.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary1-2.62.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.62.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483649 ORDER BY rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.62.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483649 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary1-2.62.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.62.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483649 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary1-2.62.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483649 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary1-2.62.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483649 ORDER BY rowid + } +} {55 2 64 21 44 58 63} +do_test boundary1-2.62.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483649 ORDER BY rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary1-2.62.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary1-2.62.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483649 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary1-2.62.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483649 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary1-2.62.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483649 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary1-2.62.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483649 ORDER BY rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary1-2.62.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary1-2.63.1 { + db eval { + SELECT * FROM t1 WHERE rowid=-36028797018963969 + } +} {2 ff7fffffffffffff} +do_test boundary1-2.63.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='ff7fffffffffffff' + } +} {-36028797018963969 2} +do_test boundary1-2.63.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=2 + } +} {-36028797018963969 ff7fffffffffffff} +do_test boundary1-2.63.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963969 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.63.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary1-2.63.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963969 ORDER BY rowid + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.63.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963969 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary1-2.63.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.63.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963969 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.63.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.63.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963969 ORDER BY rowid + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.63.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963969 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary1-2.63.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.63.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963969 ORDER BY a + } +} {55} +do_test boundary1-2.63.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963969 ORDER BY a DESC + } +} {55} +do_test boundary1-2.63.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963969 ORDER BY rowid + } +} {55} +do_test boundary1-2.63.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963969 ORDER BY rowid DESC + } +} {55} +do_test boundary1-2.63.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -36028797018963969 ORDER BY x + } +} {55} +do_test boundary1-2.63.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963969 ORDER BY a + } +} {2 55} +do_test boundary1-2.63.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963969 ORDER BY a DESC + } +} {55 2} +do_test boundary1-2.63.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963969 ORDER BY rowid + } +} {55 2} +do_test boundary1-2.63.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963969 ORDER BY rowid DESC + } +} {2 55} +do_test boundary1-2.63.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -36028797018963969 ORDER BY x + } +} {55 2} +do_test boundary1-2.64.1 { + db eval { + SELECT * FROM t1 WHERE rowid=3 + } +} {5 0000000000000003} +do_test boundary1-2.64.2 { + db eval { + SELECT rowid, a FROM t1 WHERE x='0000000000000003' + } +} {3 5} +do_test boundary1-2.64.3 { + db eval { + SELECT rowid, x FROM t1 WHERE a=5 + } +} {3 0000000000000003} +do_test boundary1-2.64.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 3 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.64.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary1-2.64.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 3 ORDER BY rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.64.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 3 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary1-2.64.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 3 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.64.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary1-2.64.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary1-2.64.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 3 ORDER BY rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.64.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 3 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary1-2.64.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 3 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.64.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 3 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.64.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary1-2.64.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 3 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary1-2.64.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 3 ORDER BY rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.64.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 3 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.64.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 3 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary1-2.64.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary1-2.64.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 3 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary1-2.64.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 3 ORDER BY rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.64.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 3 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.65.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary1-2.65.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary1-2.65.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > 9.22337303685477580800e+18 ORDER BY rowid + } +} {} +do_test boundary1-2.65.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > 9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.65.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary1-2.65.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary1-2.65.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary1-2.65.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9.22337303685477580800e+18 ORDER BY rowid + } +} {} +do_test boundary1-2.65.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.65.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary1-2.65.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.65.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.65.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < 9.22337303685477580800e+18 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.65.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < 9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.65.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.65.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.65.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.65.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9.22337303685477580800e+18 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.65.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.65.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.66.gt.1 { + db eval { + SELECT a FROM t1 WHERE rowid > -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.66.gt.2 { + db eval { + SELECT a FROM t1 WHERE rowid > -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.66.gt.3 { + db eval { + SELECT a FROM t1 WHERE rowid > -9.22337303685477580800e+18 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.66.gt.4 { + db eval { + SELECT a FROM t1 WHERE rowid > -9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.66.gt.5 { + db eval { + SELECT a FROM t1 WHERE rowid > -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.66.ge.1 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary1-2.66.ge.2 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary1-2.66.ge.3 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9.22337303685477580800e+18 ORDER BY rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary1-2.66.ge.4 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary1-2.66.ge.5 { + db eval { + SELECT a FROM t1 WHERE rowid >= -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary1-2.66.lt.1 { + db eval { + SELECT a FROM t1 WHERE rowid < -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary1-2.66.lt.2 { + db eval { + SELECT a FROM t1 WHERE rowid < -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary1-2.66.lt.3 { + db eval { + SELECT a FROM t1 WHERE rowid < -9.22337303685477580800e+18 ORDER BY rowid + } +} {} +do_test boundary1-2.66.lt.4 { + db eval { + SELECT a FROM t1 WHERE rowid < -9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.66.lt.5 { + db eval { + SELECT a FROM t1 WHERE rowid < -9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary1-2.66.le.1 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary1-2.66.le.2 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary1-2.66.le.3 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9.22337303685477580800e+18 ORDER BY rowid + } +} {} +do_test boundary1-2.66.le.4 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9.22337303685477580800e+18 ORDER BY rowid DESC + } +} {} +do_test boundary1-2.66.le.5 { + db eval { + SELECT a FROM t1 WHERE rowid <= -9.22337303685477580800e+18 ORDER BY x + } +} {} +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary2.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary2.tcl --- sqlite3-3.4.2/test/boundary2.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary2.tcl 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,445 @@ +puts {# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary2.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } +} + +expr srand(0) + +# Generate interesting boundary numbers +# +foreach x { + 0 + 1 + 0x7f + 0x7fff + 0x7fffff + 0x7fffffff + 0x7fffffffff + 0x7fffffffffff + 0x7fffffffffffff + 0x7fffffffffffffff +} { + set x [expr {wide($x)}] + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set boundarynum([expr {-($x+1)}]) 1 + set boundarynum([expr {-($x+2)}]) 1 + set boundarynum([expr {$x+$x+1}]) 1 + set boundarynum([expr {$x+$x+2}]) 1 +} +set x [expr {wide(127)}] +for {set i 1} {$i<=9} {incr i} { + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set x [expr {wide($x*128 + 127)}] +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# A simple selection sort. Not trying to be efficient. +# +proc sort {inlist} { + set outlist {} + set mn [lindex $inlist 0] + foreach x $inlist { + if {$x<$mn} {set mn $x} + } + set outlist $mn + set mx $mn + while {1} { + set valid 0 + foreach x $inlist { + if {$x>$mx && (!$valid || $mn>$x)} { + set mn $x + set valid 1 + } + } + if {!$valid} break + lappend outlist $mn + set mx $mn + } + return $outlist +} + +# Reverse the order of a list +# +proc reverse {inlist} { + set i [llength $inlist] + set outlist {} + for {incr i -1} {$i>=0} {incr i -1} { + lappend outlist [lindex $inlist $i] + } + return $outlist +} + +set nums1 [scramble [array names boundarynum]] +set nums2 [scramble [array names boundarynum]] + +set tname boundary2 +puts "do_test $tname-1.1 \173" +puts " db eval \173" +puts " CREATE TABLE t1(r INTEGER, a INTEGER, x TEXT);" +set a 0 +foreach r $nums1 { + incr a + set t1ra($r) $a + set t1ar($a) $r + set x [format %08x%08x [expr {wide($r)>>32}] $r] + set t1rx($r) $x + set t1xr($x) $r + puts " INSERT INTO t1 VALUES($r,$a,'$x');" +} +puts " CREATE INDEX t1i1 ON t1(r);" +puts " CREATE INDEX t1i2 ON t1(a);" +puts " CREATE INDEX t1i3 ON t1(x);" +puts " \175" +puts "\175 {}" + +puts "do_test $tname-1.2 \173" +puts " db eval \173" +puts " SELECT count(*) FROM t1" +puts " \175" +puts "\175 {64}" + +set nums3 $nums2 +lappend nums3 9.22337303685477580800e+18 +lappend nums3 -9.22337303685477580800e+18 + +set i 0 +foreach r $nums3 { + incr i + + if {abs($r)<9.22337203685477580800e+18} { + set x $t1rx($r) + set a $t1ra($r) + set r5 $r.5 + set r0 $r.0 + puts "do_test $tname-2.$i.1 \173" + puts " db eval \173" + puts " SELECT * FROM t1 WHERE r=$r" + puts " \175" + puts "\175 {$r $a $x}" + puts "do_test $tname-2.$i.2 \173" + puts " db eval \173" + puts " SELECT r, a FROM t1 WHERE x='$x'" + puts " \175" + puts "\175 {$r $a}" + puts "do_test $tname-2.$i.3 \173" + puts " db eval \173" + puts " SELECT r, x FROM t1 WHERE a=$a" + puts " \175" + puts "\175 {$r $x}" + } + + foreach op {> >= < <=} subno {gt ge lt le} { + + ################################################################ 2.x.y.1 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r" { + lappend rset $rx + lappend aset $t1ra($rx) + } + } + puts "do_test $tname-2.$i.$subno.1 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a" + puts " \175" + puts "\175 {[sort $aset]}" + + ################################################################ 2.x.y.2 + puts "do_test $tname-2.$i.$subno.2 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a DESC" + puts " \175" + puts "\175 {[reverse [sort $aset]]}" + + ################################################################ 2.x.y.3 + set aset {} + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.3 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.4 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.4 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.5 + set aset {} + set xset {} + foreach rx $rset { + lappend xset $t1rx($rx) + } + foreach x [sort $xset] { + set rx $t1xr($x) + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.5 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY x" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.10 + if {abs($r)>9223372036854775808 || [string length $r5]>15} continue + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r0" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.10 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.11 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.11 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + + + ################################################################ 2.x.y.12 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r5" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.12 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.13 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.13 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + } +} + +puts "do_test $tname-3.1 \173" +puts " db eval \173" +puts " DROP INDEX t1i1;" +puts " DROP INDEX t1i2;" +puts " DROP INDEX t1i3;" +puts " \175" +puts "\175 {}" + +set i 0 +foreach r $nums3 { + incr i + + if {abs($r)<9.22337203685477580800e+18} { + set x $t1rx($r) + set a $t1ra($r) + set r5 $r.5 + set r0 $r.0 + puts "do_test $tname-4.$i.1 \173" + puts " db eval \173" + puts " SELECT * FROM t1 WHERE r=$r" + puts " \175" + puts "\175 {$r $a $x}" + puts "do_test $tname-4.$i.2 \173" + puts " db eval \173" + puts " SELECT r, a FROM t1 WHERE x='$x'" + puts " \175" + puts "\175 {$r $a}" + puts "do_test $tname-4.$i.3 \173" + puts " db eval \173" + puts " SELECT r, x FROM t1 WHERE a=$a" + puts " \175" + puts "\175 {$r $x}" + } + + foreach op {> >= < <=} subno {gt ge lt le} { + + ################################################################ 2.x.y.1 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r" { + lappend rset $rx + lappend aset $t1ra($rx) + } + } + puts "do_test $tname-4.$i.$subno.1 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a" + puts " \175" + puts "\175 {[sort $aset]}" + + ################################################################ 2.x.y.2 + puts "do_test $tname-4.$i.$subno.2 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY a DESC" + puts " \175" + puts "\175 {[reverse [sort $aset]]}" + + ################################################################ 2.x.y.3 + set aset {} + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.3 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.4 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.4 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.5 + set aset {} + set xset {} + foreach rx $rset { + lappend xset $t1rx($rx) + } + foreach x [sort $xset] { + set rx $t1xr($x) + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.5 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r ORDER BY x" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.10 + if {abs($r)>9223372036854775808 || [string length $r5]>15} continue + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r0" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.10 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.11 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.11 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r0 ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + + + ################################################################ 2.x.y.12 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r5" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.12 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.13 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-4.$i.$subno.13 \173" + puts " db eval \173" + puts " SELECT a FROM t1 WHERE r $op $r5 ORDER BY r DESC" + puts " \175" + puts "\175 {$aset}" + } +} + + +puts {finish_test} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary2.test --- sqlite3-3.4.2/test/boundary2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary2.test 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,15198 @@ +# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary2.test,v 1.2 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } + +do_test boundary2-1.1 { + db eval { + CREATE TABLE t1(r INTEGER, a INTEGER, x TEXT); + INSERT INTO t1 VALUES(-8388609,1,'ffffffffff7fffff'); + INSERT INTO t1 VALUES(-36028797018963969,2,'ff7fffffffffffff'); + INSERT INTO t1 VALUES(9223372036854775807,3,'7fffffffffffffff'); + INSERT INTO t1 VALUES(127,4,'000000000000007f'); + INSERT INTO t1 VALUES(3,5,'0000000000000003'); + INSERT INTO t1 VALUES(16777216,6,'0000000001000000'); + INSERT INTO t1 VALUES(4398046511103,7,'000003ffffffffff'); + INSERT INTO t1 VALUES(16383,8,'0000000000003fff'); + INSERT INTO t1 VALUES(16777215,9,'0000000000ffffff'); + INSERT INTO t1 VALUES(281474976710655,10,'0000ffffffffffff'); + INSERT INTO t1 VALUES(-2147483648,11,'ffffffff80000000'); + INSERT INTO t1 VALUES(268435455,12,'000000000fffffff'); + INSERT INTO t1 VALUES(562949953421311,13,'0001ffffffffffff'); + INSERT INTO t1 VALUES(4294967295,14,'00000000ffffffff'); + INSERT INTO t1 VALUES(2097151,15,'00000000001fffff'); + INSERT INTO t1 VALUES(16384,16,'0000000000004000'); + INSERT INTO t1 VALUES(72057594037927935,17,'00ffffffffffffff'); + INSERT INTO t1 VALUES(8388607,18,'00000000007fffff'); + INSERT INTO t1 VALUES(1099511627776,19,'0000010000000000'); + INSERT INTO t1 VALUES(2147483647,20,'000000007fffffff'); + INSERT INTO t1 VALUES(-140737488355329,21,'ffff7fffffffffff'); + INSERT INTO t1 VALUES(34359738368,22,'0000000800000000'); + INSERT INTO t1 VALUES(32767,23,'0000000000007fff'); + INSERT INTO t1 VALUES(8388608,24,'0000000000800000'); + INSERT INTO t1 VALUES(140737488355327,25,'00007fffffffffff'); + INSERT INTO t1 VALUES(281474976710656,26,'0001000000000000'); + INSERT INTO t1 VALUES(36028797018963967,27,'007fffffffffffff'); + INSERT INTO t1 VALUES(72057594037927936,28,'0100000000000000'); + INSERT INTO t1 VALUES(-32769,29,'ffffffffffff7fff'); + INSERT INTO t1 VALUES(255,30,'00000000000000ff'); + INSERT INTO t1 VALUES(4,31,'0000000000000004'); + INSERT INTO t1 VALUES(-32768,32,'ffffffffffff8000'); + INSERT INTO t1 VALUES(-2,33,'fffffffffffffffe'); + INSERT INTO t1 VALUES(140737488355328,34,'0000800000000000'); + INSERT INTO t1 VALUES(549755813888,35,'0000008000000000'); + INSERT INTO t1 VALUES(4294967296,36,'0000000100000000'); + INSERT INTO t1 VALUES(-8388608,37,'ffffffffff800000'); + INSERT INTO t1 VALUES(-1,38,'ffffffffffffffff'); + INSERT INTO t1 VALUES(34359738367,39,'00000007ffffffff'); + INSERT INTO t1 VALUES(268435456,40,'0000000010000000'); + INSERT INTO t1 VALUES(2,41,'0000000000000002'); + INSERT INTO t1 VALUES(2097152,42,'0000000000200000'); + INSERT INTO t1 VALUES(562949953421312,43,'0002000000000000'); + INSERT INTO t1 VALUES(-140737488355328,44,'ffff800000000000'); + INSERT INTO t1 VALUES(36028797018963968,45,'0080000000000000'); + INSERT INTO t1 VALUES(549755813887,46,'0000007fffffffff'); + INSERT INTO t1 VALUES(-2147483649,47,'ffffffff7fffffff'); + INSERT INTO t1 VALUES(65535,48,'000000000000ffff'); + INSERT INTO t1 VALUES(128,49,'0000000000000080'); + INSERT INTO t1 VALUES(32768,50,'0000000000008000'); + INSERT INTO t1 VALUES(2147483648,51,'0000000080000000'); + INSERT INTO t1 VALUES(-3,52,'fffffffffffffffd'); + INSERT INTO t1 VALUES(-128,53,'ffffffffffffff80'); + INSERT INTO t1 VALUES(-129,54,'ffffffffffffff7f'); + INSERT INTO t1 VALUES(-9223372036854775808,55,'8000000000000000'); + INSERT INTO t1 VALUES(4398046511104,56,'0000040000000000'); + INSERT INTO t1 VALUES(1099511627775,57,'000000ffffffffff'); + INSERT INTO t1 VALUES(-549755813889,58,'ffffff7fffffffff'); + INSERT INTO t1 VALUES(0,59,'0000000000000000'); + INSERT INTO t1 VALUES(1,60,'0000000000000001'); + INSERT INTO t1 VALUES(256,61,'0000000000000100'); + INSERT INTO t1 VALUES(65536,62,'0000000000010000'); + INSERT INTO t1 VALUES(-549755813888,63,'ffffff8000000000'); + INSERT INTO t1 VALUES(-36028797018963968,64,'ff80000000000000'); + CREATE INDEX t1i1 ON t1(r); + CREATE INDEX t1i2 ON t1(a); + CREATE INDEX t1i3 ON t1(x); + } +} {} +do_test boundary2-1.2 { + db eval { + SELECT count(*) FROM t1 + } +} {64} +do_test boundary2-2.1.1 { + db eval { + SELECT * FROM t1 WHERE r=72057594037927935 + } +} {72057594037927935 17 00ffffffffffffff} +do_test boundary2-2.1.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00ffffffffffffff' + } +} {72057594037927935 17} +do_test boundary2-2.1.3 { + db eval { + SELECT r, x FROM t1 WHERE a=17 + } +} {72057594037927935 00ffffffffffffff} +do_test boundary2-2.1.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY a + } +} {3 28} +do_test boundary2-2.1.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY a DESC + } +} {28 3} +do_test boundary2-2.1.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY r + } +} {28 3} +do_test boundary2-2.1.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY r DESC + } +} {3 28} +do_test boundary2-2.1.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY x + } +} {28 3} +do_test boundary2-2.1.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY a + } +} {3 17 28} +do_test boundary2-2.1.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY a DESC + } +} {28 17 3} +do_test boundary2-2.1.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY r + } +} {17 28 3} +do_test boundary2-2.1.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY r DESC + } +} {3 28 17} +do_test boundary2-2.1.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY x + } +} {17 28 3} +do_test boundary2-2.1.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.1.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.1.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary2-2.1.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY r DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.1.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.1.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.1.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.1.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary2-2.1.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY r DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.1.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.2.1 { + db eval { + SELECT * FROM t1 WHERE r=16384 + } +} {16384 16 0000000000004000} +do_test boundary2-2.2.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000004000' + } +} {16384 16} +do_test boundary2-2.2.3 { + db eval { + SELECT r, x FROM t1 WHERE a=16 + } +} {16384 0000000000004000} +do_test boundary2-2.2.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.2.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.2.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY r + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.2.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary2-2.2.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.2.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.2.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.2.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY r + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.2.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary2-2.2.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.2.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.2.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary2-2.2.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary2-2.2.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY r DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.2.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.2.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.2.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary2-2.2.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary2-2.2.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY r DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.2.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.3.1 { + db eval { + SELECT * FROM t1 WHERE r=4294967296 + } +} {4294967296 36 0000000100000000} +do_test boundary2-2.3.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000100000000' + } +} {4294967296 36} +do_test boundary2-2.3.3 { + db eval { + SELECT r, x FROM t1 WHERE a=36 + } +} {4294967296 0000000100000000} +do_test boundary2-2.3.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary2-2.3.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.3.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY r + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.3.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary2-2.3.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.3.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-2.3.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.3.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY r + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.3.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary2-2.3.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.3.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.3.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.3.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary2-2.3.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY r DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.3.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.3.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.3.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.3.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary2-2.3.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY r DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.3.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.4.1 { + db eval { + SELECT * FROM t1 WHERE r=16777216 + } +} {16777216 6 0000000001000000} +do_test boundary2-2.4.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000001000000' + } +} {16777216 6} +do_test boundary2-2.4.3 { + db eval { + SELECT r, x FROM t1 WHERE a=6 + } +} {16777216 0000000001000000} +do_test boundary2-2.4.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.4.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary2-2.4.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY r + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.4.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary2-2.4.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.4.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.4.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary2-2.4.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY r + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.4.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary2-2.4.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.4.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.4.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary2-2.4.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary2-2.4.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY r DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.4.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.4.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.4.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary2-2.4.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary2-2.4.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY r DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.4.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.5.1 { + db eval { + SELECT * FROM t1 WHERE r=-32769 + } +} {-32769 29 ffffffffffff7fff} +do_test boundary2-2.5.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffff7fff' + } +} {-32769 29} +do_test boundary2-2.5.3 { + db eval { + SELECT r, x FROM t1 WHERE a=29 + } +} {-32769 ffffffffffff7fff} +do_test boundary2-2.5.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.5.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.5.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY r + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.5.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary2-2.5.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary2-2.5.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.5.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.5.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY r + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.5.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary2-2.5.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary2-2.5.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary2-2.5.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary2-2.5.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-2.5.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY r DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.5.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-2.5.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary2-2.5.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary2-2.5.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-2.5.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY r DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.5.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-2.6.1 { + db eval { + SELECT * FROM t1 WHERE r=-140737488355329 + } +} {-140737488355329 21 ffff7fffffffffff} +do_test boundary2-2.6.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffff7fffffffffff' + } +} {-140737488355329 21} +do_test boundary2-2.6.3 { + db eval { + SELECT r, x FROM t1 WHERE a=21 + } +} {-140737488355329 ffff7fffffffffff} +do_test boundary2-2.6.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.6.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.6.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY r + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.6.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary2-2.6.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.6.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.6.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.6.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY r + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.6.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary2-2.6.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.6.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY a + } +} {2 55 64} +do_test boundary2-2.6.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY a DESC + } +} {64 55 2} +do_test boundary2-2.6.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY r + } +} {55 2 64} +do_test boundary2-2.6.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY r DESC + } +} {64 2 55} +do_test boundary2-2.6.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY x + } +} {55 2 64} +do_test boundary2-2.6.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY a + } +} {2 21 55 64} +do_test boundary2-2.6.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary2-2.6.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY r + } +} {55 2 64 21} +do_test boundary2-2.6.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY r DESC + } +} {21 64 2 55} +do_test boundary2-2.6.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY x + } +} {55 2 64 21} +do_test boundary2-2.7.1 { + db eval { + SELECT * FROM t1 WHERE r=2 + } +} {2 41 0000000000000002} +do_test boundary2-2.7.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000002' + } +} {2 41} +do_test boundary2-2.7.3 { + db eval { + SELECT r, x FROM t1 WHERE a=41 + } +} {2 0000000000000002} +do_test boundary2-2.7.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.7.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.7.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY r + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.7.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary2-2.7.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.7.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.7.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.7.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY r + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.7.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary2-2.7.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.7.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.7.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.7.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary2-2.7.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY r DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.7.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.7.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.7.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.7.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary2-2.7.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY r DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.7.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.8.1 { + db eval { + SELECT * FROM t1 WHERE r=4 + } +} {4 31 0000000000000004} +do_test boundary2-2.8.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000004' + } +} {4 31} +do_test boundary2-2.8.3 { + db eval { + SELECT r, x FROM t1 WHERE a=31 + } +} {4 0000000000000004} +do_test boundary2-2.8.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.8.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-2.8.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY r + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.8.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary2-2.8.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.8.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.8.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-2.8.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY r + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.8.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary2-2.8.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.8.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.8.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary2-2.8.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary2-2.8.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY r DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.8.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.8.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.8.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary2-2.8.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary2-2.8.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY r DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.8.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.9.1 { + db eval { + SELECT * FROM t1 WHERE r=562949953421311 + } +} {562949953421311 13 0001ffffffffffff} +do_test boundary2-2.9.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0001ffffffffffff' + } +} {562949953421311 13} +do_test boundary2-2.9.3 { + db eval { + SELECT r, x FROM t1 WHERE a=13 + } +} {562949953421311 0001ffffffffffff} +do_test boundary2-2.9.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary2-2.9.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary2-2.9.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY r + } +} {43 27 45 17 28 3} +do_test boundary2-2.9.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY r DESC + } +} {3 28 17 45 27 43} +do_test boundary2-2.9.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary2-2.9.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary2-2.9.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary2-2.9.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY r + } +} {13 43 27 45 17 28 3} +do_test boundary2-2.9.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY r DESC + } +} {3 28 17 45 27 43 13} +do_test boundary2-2.9.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary2-2.9.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.9.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.9.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary2-2.9.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY r DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.9.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.9.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.9.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.9.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary2-2.9.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY r DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.9.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.10.1 { + db eval { + SELECT * FROM t1 WHERE r=256 + } +} {256 61 0000000000000100} +do_test boundary2-2.10.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000100' + } +} {256 61} +do_test boundary2-2.10.3 { + db eval { + SELECT r, x FROM t1 WHERE a=61 + } +} {256 0000000000000100} +do_test boundary2-2.10.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.10.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.10.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY r + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.10.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary2-2.10.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.10.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-2.10.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.10.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY r + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.10.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary2-2.10.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.10.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.10.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-2.10.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary2-2.10.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY r DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.10.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.10.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.10.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-2.10.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary2-2.10.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY r DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.10.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.11.1 { + db eval { + SELECT * FROM t1 WHERE r=34359738368 + } +} {34359738368 22 0000000800000000} +do_test boundary2-2.11.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000800000000' + } +} {34359738368 22} +do_test boundary2-2.11.3 { + db eval { + SELECT r, x FROM t1 WHERE a=22 + } +} {34359738368 0000000800000000} +do_test boundary2-2.11.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-2.11.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.11.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY r + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.11.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary2-2.11.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.11.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-2.11.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.11.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY r + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.11.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary2-2.11.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.11.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.11.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.11.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary2-2.11.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY r DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.11.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.11.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.11.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.11.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary2-2.11.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY r DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.11.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.12.1 { + db eval { + SELECT * FROM t1 WHERE r=65536 + } +} {65536 62 0000000000010000} +do_test boundary2-2.12.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000010000' + } +} {65536 62} +do_test boundary2-2.12.3 { + db eval { + SELECT r, x FROM t1 WHERE a=62 + } +} {65536 0000000000010000} +do_test boundary2-2.12.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-2.12.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.12.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY r + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.12.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary2-2.12.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.12.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary2-2.12.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.12.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY r + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.12.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary2-2.12.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.12.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.12.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.12.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary2-2.12.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY r DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.12.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.12.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.12.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.12.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary2-2.12.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY r DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.12.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.13.1 { + db eval { + SELECT * FROM t1 WHERE r=268435456 + } +} {268435456 40 0000000010000000} +do_test boundary2-2.13.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000010000000' + } +} {268435456 40} +do_test boundary2-2.13.3 { + db eval { + SELECT r, x FROM t1 WHERE a=40 + } +} {268435456 0000000010000000} +do_test boundary2-2.13.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-2.13.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-2.13.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY r + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.13.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary2-2.13.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.13.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.13.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-2.13.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY r + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.13.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary2-2.13.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.13.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.13.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.13.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary2-2.13.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY r DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.13.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.13.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.13.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.13.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary2-2.13.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY r DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.13.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.14.1 { + db eval { + SELECT * FROM t1 WHERE r=-140737488355328 + } +} {-140737488355328 44 ffff800000000000} +do_test boundary2-2.14.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffff800000000000' + } +} {-140737488355328 44} +do_test boundary2-2.14.3 { + db eval { + SELECT r, x FROM t1 WHERE a=44 + } +} {-140737488355328 ffff800000000000} +do_test boundary2-2.14.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.14.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.14.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY r + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.14.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary2-2.14.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.14.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.14.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.14.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY r + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.14.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary2-2.14.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.14.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY a + } +} {2 21 55 64} +do_test boundary2-2.14.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary2-2.14.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY r + } +} {55 2 64 21} +do_test boundary2-2.14.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY r DESC + } +} {21 64 2 55} +do_test boundary2-2.14.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY x + } +} {55 2 64 21} +do_test boundary2-2.14.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary2-2.14.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary2-2.14.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY r + } +} {55 2 64 21 44} +do_test boundary2-2.14.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY r DESC + } +} {44 21 64 2 55} +do_test boundary2-2.14.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary2-2.15.1 { + db eval { + SELECT * FROM t1 WHERE r=1099511627776 + } +} {1099511627776 19 0000010000000000} +do_test boundary2-2.15.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000010000000000' + } +} {1099511627776 19} +do_test boundary2-2.15.3 { + db eval { + SELECT r, x FROM t1 WHERE a=19 + } +} {1099511627776 0000010000000000} +do_test boundary2-2.15.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-2.15.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary2-2.15.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY r + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.15.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary2-2.15.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.15.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary2-2.15.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.15.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY r + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.15.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary2-2.15.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.15.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.15.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.15.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary2-2.15.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY r DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.15.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.15.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.15.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.15.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary2-2.15.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY r DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.15.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.16.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY a + } +} {} +do_test boundary2-2.16.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY a DESC + } +} {} +do_test boundary2-2.16.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY r + } +} {} +do_test boundary2-2.16.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY r DESC + } +} {} +do_test boundary2-2.16.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY x + } +} {} +do_test boundary2-2.16.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY a + } +} {3} +do_test boundary2-2.16.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY a DESC + } +} {3} +do_test boundary2-2.16.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY r + } +} {3} +do_test boundary2-2.16.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY r DESC + } +} {3} +do_test boundary2-2.16.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY x + } +} {3} +do_test boundary2-2.16.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.16.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.16.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary2-2.16.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY r DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.16.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.16.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.16.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.16.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.16.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.16.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.17.1 { + db eval { + SELECT * FROM t1 WHERE r=32768 + } +} {32768 50 0000000000008000} +do_test boundary2-2.17.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000008000' + } +} {32768 50} +do_test boundary2-2.17.3 { + db eval { + SELECT r, x FROM t1 WHERE a=50 + } +} {32768 0000000000008000} +do_test boundary2-2.17.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary2-2.17.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.17.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY r + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.17.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary2-2.17.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.17.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.17.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.17.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY r + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.17.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary2-2.17.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.17.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.17.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.17.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary2-2.17.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY r DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.17.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.17.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.17.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.17.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary2-2.17.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY r DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.17.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.18.1 { + db eval { + SELECT * FROM t1 WHERE r=-36028797018963968 + } +} {-36028797018963968 64 ff80000000000000} +do_test boundary2-2.18.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ff80000000000000' + } +} {-36028797018963968 64} +do_test boundary2-2.18.3 { + db eval { + SELECT r, x FROM t1 WHERE a=64 + } +} {-36028797018963968 ff80000000000000} +do_test boundary2-2.18.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.18.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.18.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY r + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.18.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary2-2.18.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.18.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.18.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.18.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY r + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.18.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary2-2.18.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.18.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY a + } +} {2 55} +do_test boundary2-2.18.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY a DESC + } +} {55 2} +do_test boundary2-2.18.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY r + } +} {55 2} +do_test boundary2-2.18.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY r DESC + } +} {2 55} +do_test boundary2-2.18.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY x + } +} {55 2} +do_test boundary2-2.18.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY a + } +} {2 55 64} +do_test boundary2-2.18.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY a DESC + } +} {64 55 2} +do_test boundary2-2.18.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY r + } +} {55 2 64} +do_test boundary2-2.18.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY r DESC + } +} {64 2 55} +do_test boundary2-2.18.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY x + } +} {55 2 64} +do_test boundary2-2.19.1 { + db eval { + SELECT * FROM t1 WHERE r=65535 + } +} {65535 48 000000000000ffff} +do_test boundary2-2.19.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000000ffff' + } +} {65535 48} +do_test boundary2-2.19.3 { + db eval { + SELECT r, x FROM t1 WHERE a=48 + } +} {65535 000000000000ffff} +do_test boundary2-2.19.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary2-2.19.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.19.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY r + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.19.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary2-2.19.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.19.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary2-2.19.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.19.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY r + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.19.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary2-2.19.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.19.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.19.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.19.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary2-2.19.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY r DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.19.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.19.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.19.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.19.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary2-2.19.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY r DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.19.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.20.1 { + db eval { + SELECT * FROM t1 WHERE r=4294967295 + } +} {4294967295 14 00000000ffffffff} +do_test boundary2-2.20.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000ffffffff' + } +} {4294967295 14} +do_test boundary2-2.20.3 { + db eval { + SELECT r, x FROM t1 WHERE a=14 + } +} {4294967295 00000000ffffffff} +do_test boundary2-2.20.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-2.20.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.20.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY r + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.20.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary2-2.20.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.20.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-2.20.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-2.20.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY r + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.20.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary2-2.20.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.20.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.20.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.20.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary2-2.20.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY r DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.20.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.20.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.20.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.20.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary2-2.20.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY r DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.20.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.21.1 { + db eval { + SELECT * FROM t1 WHERE r=1099511627775 + } +} {1099511627775 57 000000ffffffffff} +do_test boundary2-2.21.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000ffffffffff' + } +} {1099511627775 57} +do_test boundary2-2.21.3 { + db eval { + SELECT r, x FROM t1 WHERE a=57 + } +} {1099511627775 000000ffffffffff} +do_test boundary2-2.21.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary2-2.21.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.21.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY r + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.21.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary2-2.21.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.21.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary2-2.21.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.21.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY r + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.21.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary2-2.21.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.21.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.21.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.21.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary2-2.21.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY r DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.21.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.21.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.21.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.21.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary2-2.21.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY r DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.21.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.22.1 { + db eval { + SELECT * FROM t1 WHERE r=-8388608 + } +} {-8388608 37 ffffffffff800000} +do_test boundary2-2.22.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffff800000' + } +} {-8388608 37} +do_test boundary2-2.22.3 { + db eval { + SELECT r, x FROM t1 WHERE a=37 + } +} {-8388608 ffffffffff800000} +do_test boundary2-2.22.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.22.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.22.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY r + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.22.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary2-2.22.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary2-2.22.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.22.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.22.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY r + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.22.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary2-2.22.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary2-2.22.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary2-2.22.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary2-2.22.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-2.22.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY r DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.22.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-2.22.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary2-2.22.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary2-2.22.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-2.22.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY r DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.22.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-2.23.1 { + db eval { + SELECT * FROM t1 WHERE r=549755813888 + } +} {549755813888 35 0000008000000000} +do_test boundary2-2.23.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000008000000000' + } +} {549755813888 35} +do_test boundary2-2.23.3 { + db eval { + SELECT r, x FROM t1 WHERE a=35 + } +} {549755813888 0000008000000000} +do_test boundary2-2.23.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary2-2.23.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.23.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY r + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.23.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary2-2.23.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.23.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary2-2.23.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.23.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY r + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.23.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary2-2.23.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.23.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.23.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.23.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary2-2.23.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY r DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.23.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.23.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.23.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.23.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary2-2.23.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY r DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.23.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.24.1 { + db eval { + SELECT * FROM t1 WHERE r=8388607 + } +} {8388607 18 00000000007fffff} +do_test boundary2-2.24.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000007fffff' + } +} {8388607 18} +do_test boundary2-2.24.3 { + db eval { + SELECT r, x FROM t1 WHERE a=18 + } +} {8388607 00000000007fffff} +do_test boundary2-2.24.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.24.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.24.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY r + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.24.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary2-2.24.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.24.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.24.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.24.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY r + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.24.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary2-2.24.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.24.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.24.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-2.24.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary2-2.24.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY r DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.24.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.24.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.24.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-2.24.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary2-2.24.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY r DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.24.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.25.1 { + db eval { + SELECT * FROM t1 WHERE r=-3 + } +} {-3 52 fffffffffffffffd} +do_test boundary2-2.25.2 { + db eval { + SELECT r, a FROM t1 WHERE x='fffffffffffffffd' + } +} {-3 52} +do_test boundary2-2.25.3 { + db eval { + SELECT r, x FROM t1 WHERE a=52 + } +} {-3 fffffffffffffffd} +do_test boundary2-2.25.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.25.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.25.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY r + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.25.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary2-2.25.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary2-2.25.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary2-2.25.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.25.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY r + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.25.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary2-2.25.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary2-2.25.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary2-2.25.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.25.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-2.25.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY r DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.25.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-2.25.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.25.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.25.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-2.25.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY r DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.25.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-2.26.1 { + db eval { + SELECT * FROM t1 WHERE r=0 + } +} {0 59 0000000000000000} +do_test boundary2-2.26.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000000' + } +} {0 59} +do_test boundary2-2.26.3 { + db eval { + SELECT r, x FROM t1 WHERE a=59 + } +} {0 0000000000000000} +do_test boundary2-2.26.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary2-2.26.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.26.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY r + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.26.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary2-2.26.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.26.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.26.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.26.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY r + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.26.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary2-2.26.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.26.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.26.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.26.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.26.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY r DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.26.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.26.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary2-2.26.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.26.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary2-2.26.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY r DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.26.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.27.1 { + db eval { + SELECT * FROM t1 WHERE r=-1 + } +} {-1 38 ffffffffffffffff} +do_test boundary2-2.27.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffffff' + } +} {-1 38} +do_test boundary2-2.27.3 { + db eval { + SELECT r, x FROM t1 WHERE a=38 + } +} {-1 ffffffffffffffff} +do_test boundary2-2.27.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.27.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.27.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY r + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.27.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary2-2.27.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.27.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.27.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.27.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY r + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.27.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary2-2.27.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary2-2.27.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.27.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary2-2.27.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-2.27.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY r DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.27.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-2.27.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.27.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.27.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.27.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY r DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.27.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.28.1 { + db eval { + SELECT * FROM t1 WHERE r=-2 + } +} {-2 33 fffffffffffffffe} +do_test boundary2-2.28.2 { + db eval { + SELECT r, a FROM t1 WHERE x='fffffffffffffffe' + } +} {-2 33} +do_test boundary2-2.28.3 { + db eval { + SELECT r, x FROM t1 WHERE a=33 + } +} {-2 fffffffffffffffe} +do_test boundary2-2.28.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.28.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.28.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY r + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.28.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary2-2.28.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary2-2.28.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-2.28.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.28.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY r + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.28.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary2-2.28.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary2-2.28.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.28.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.28.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-2.28.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY r DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.28.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-2.28.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-2.28.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary2-2.28.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-2.28.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY r DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.28.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-2.29.1 { + db eval { + SELECT * FROM t1 WHERE r=2097152 + } +} {2097152 42 0000000000200000} +do_test boundary2-2.29.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000200000' + } +} {2097152 42} +do_test boundary2-2.29.3 { + db eval { + SELECT r, x FROM t1 WHERE a=42 + } +} {2097152 0000000000200000} +do_test boundary2-2.29.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.29.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.29.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY r + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.29.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary2-2.29.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.29.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-2.29.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.29.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY r + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.29.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary2-2.29.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.29.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.29.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-2.29.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary2-2.29.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY r DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.29.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.29.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.29.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-2.29.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary2-2.29.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY r DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.29.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.30.1 { + db eval { + SELECT * FROM t1 WHERE r=128 + } +} {128 49 0000000000000080} +do_test boundary2-2.30.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000080' + } +} {128 49} +do_test boundary2-2.30.3 { + db eval { + SELECT r, x FROM t1 WHERE a=49 + } +} {128 0000000000000080} +do_test boundary2-2.30.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-2.30.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.30.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY r + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.30.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary2-2.30.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.30.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.30.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.30.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY r + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.30.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary2-2.30.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.30.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.30.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-2.30.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary2-2.30.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY r DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.30.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.30.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.30.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-2.30.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary2-2.30.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY r DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.30.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.31.1 { + db eval { + SELECT * FROM t1 WHERE r=255 + } +} {255 30 00000000000000ff} +do_test boundary2-2.31.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000000000ff' + } +} {255 30} +do_test boundary2-2.31.3 { + db eval { + SELECT r, x FROM t1 WHERE a=30 + } +} {255 00000000000000ff} +do_test boundary2-2.31.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-2.31.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.31.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY r + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.31.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary2-2.31.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.31.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-2.31.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.31.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY r + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.31.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary2-2.31.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.31.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.31.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-2.31.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary2-2.31.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY r DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.31.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.31.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.31.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-2.31.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary2-2.31.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY r DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.31.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.32.1 { + db eval { + SELECT * FROM t1 WHERE r=-2147483648 + } +} {-2147483648 11 ffffffff80000000} +do_test boundary2-2.32.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffff80000000' + } +} {-2147483648 11} +do_test boundary2-2.32.3 { + db eval { + SELECT r, x FROM t1 WHERE a=11 + } +} {-2147483648 ffffffff80000000} +do_test boundary2-2.32.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.32.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.32.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY r + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.32.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary2-2.32.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.32.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.32.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.32.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY r + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.32.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary2-2.32.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.32.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary2-2.32.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary2-2.32.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-2.32.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY r DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary2-2.32.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-2.32.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary2-2.32.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary2-2.32.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-2.32.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY r DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary2-2.32.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-2.33.1 { + db eval { + SELECT * FROM t1 WHERE r=34359738367 + } +} {34359738367 39 00000007ffffffff} +do_test boundary2-2.33.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000007ffffffff' + } +} {34359738367 39} +do_test boundary2-2.33.3 { + db eval { + SELECT r, x FROM t1 WHERE a=39 + } +} {34359738367 00000007ffffffff} +do_test boundary2-2.33.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-2.33.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.33.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY r + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.33.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary2-2.33.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.33.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary2-2.33.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-2.33.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY r + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.33.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary2-2.33.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.33.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.33.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.33.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary2-2.33.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY r DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.33.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.33.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.33.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.33.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary2-2.33.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY r DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.33.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.34.1 { + db eval { + SELECT * FROM t1 WHERE r=-549755813889 + } +} {-549755813889 58 ffffff7fffffffff} +do_test boundary2-2.34.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffff7fffffffff' + } +} {-549755813889 58} +do_test boundary2-2.34.3 { + db eval { + SELECT r, x FROM t1 WHERE a=58 + } +} {-549755813889 ffffff7fffffffff} +do_test boundary2-2.34.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary2-2.34.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.34.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY r + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.34.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary2-2.34.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.34.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-2.34.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.34.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY r + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.34.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary2-2.34.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.34.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary2-2.34.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary2-2.34.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY r + } +} {55 2 64 21 44} +do_test boundary2-2.34.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY r DESC + } +} {44 21 64 2 55} +do_test boundary2-2.34.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary2-2.34.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary2-2.34.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary2-2.34.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY r + } +} {55 2 64 21 44 58} +do_test boundary2-2.34.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY r DESC + } +} {58 44 21 64 2 55} +do_test boundary2-2.34.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary2-2.35.1 { + db eval { + SELECT * FROM t1 WHERE r=-32768 + } +} {-32768 32 ffffffffffff8000} +do_test boundary2-2.35.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffff8000' + } +} {-32768 32} +do_test boundary2-2.35.3 { + db eval { + SELECT r, x FROM t1 WHERE a=32 + } +} {-32768 ffffffffffff8000} +do_test boundary2-2.35.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.35.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.35.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY r + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.35.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary2-2.35.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary2-2.35.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.35.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.35.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY r + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.35.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary2-2.35.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary2-2.35.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary2-2.35.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary2-2.35.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-2.35.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY r DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.35.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-2.35.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary2-2.35.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.35.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-2.35.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY r DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.35.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-2.36.1 { + db eval { + SELECT * FROM t1 WHERE r=2147483647 + } +} {2147483647 20 000000007fffffff} +do_test boundary2-2.36.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000007fffffff' + } +} {2147483647 20} +do_test boundary2-2.36.3 { + db eval { + SELECT r, x FROM t1 WHERE a=20 + } +} {2147483647 000000007fffffff} +do_test boundary2-2.36.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-2.36.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-2.36.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY r + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.36.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary2-2.36.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.36.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-2.36.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-2.36.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY r + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.36.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary2-2.36.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.36.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.36.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.36.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary2-2.36.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY r DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.36.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.36.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.36.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.36.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary2-2.36.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY r DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.36.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.37.1 { + db eval { + SELECT * FROM t1 WHERE r=-129 + } +} {-129 54 ffffffffffffff7f} +do_test boundary2-2.37.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffff7f' + } +} {-129 54} +do_test boundary2-2.37.3 { + db eval { + SELECT r, x FROM t1 WHERE a=54 + } +} {-129 ffffffffffffff7f} +do_test boundary2-2.37.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary2-2.37.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.37.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY r + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.37.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary2-2.37.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary2-2.37.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.37.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.37.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY r + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.37.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary2-2.37.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary2-2.37.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary2-2.37.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.37.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-2.37.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY r DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.37.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-2.37.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary2-2.37.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.37.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-2.37.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY r DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.37.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-2.38.1 { + db eval { + SELECT * FROM t1 WHERE r=-128 + } +} {-128 53 ffffffffffffff80} +do_test boundary2-2.38.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffff80' + } +} {-128 53} +do_test boundary2-2.38.3 { + db eval { + SELECT r, x FROM t1 WHERE a=53 + } +} {-128 ffffffffffffff80} +do_test boundary2-2.38.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary2-2.38.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.38.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY r + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.38.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary2-2.38.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary2-2.38.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary2-2.38.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.38.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY r + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.38.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary2-2.38.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary2-2.38.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary2-2.38.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.38.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-2.38.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY r DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.38.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-2.38.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary2-2.38.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary2-2.38.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-2.38.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY r DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.38.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-2.39.1 { + db eval { + SELECT * FROM t1 WHERE r=72057594037927936 + } +} {72057594037927936 28 0100000000000000} +do_test boundary2-2.39.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0100000000000000' + } +} {72057594037927936 28} +do_test boundary2-2.39.3 { + db eval { + SELECT r, x FROM t1 WHERE a=28 + } +} {72057594037927936 0100000000000000} +do_test boundary2-2.39.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY a + } +} {3} +do_test boundary2-2.39.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY a DESC + } +} {3} +do_test boundary2-2.39.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY r + } +} {3} +do_test boundary2-2.39.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY r DESC + } +} {3} +do_test boundary2-2.39.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY x + } +} {3} +do_test boundary2-2.39.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY a + } +} {3 28} +do_test boundary2-2.39.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY a DESC + } +} {28 3} +do_test boundary2-2.39.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY r + } +} {28 3} +do_test boundary2-2.39.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY r DESC + } +} {3 28} +do_test boundary2-2.39.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY x + } +} {28 3} +do_test boundary2-2.39.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.39.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.39.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary2-2.39.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY r DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.39.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.39.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.39.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.39.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary2-2.39.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY r DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.39.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.40.1 { + db eval { + SELECT * FROM t1 WHERE r=2147483648 + } +} {2147483648 51 0000000080000000} +do_test boundary2-2.40.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000080000000' + } +} {2147483648 51} +do_test boundary2-2.40.3 { + db eval { + SELECT r, x FROM t1 WHERE a=51 + } +} {2147483648 0000000080000000} +do_test boundary2-2.40.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-2.40.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-2.40.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY r + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.40.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary2-2.40.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.40.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-2.40.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-2.40.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY r + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.40.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary2-2.40.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.40.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.40.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.40.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary2-2.40.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY r DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.40.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.40.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.40.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.40.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary2-2.40.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY r DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.40.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.41.1 { + db eval { + SELECT * FROM t1 WHERE r=549755813887 + } +} {549755813887 46 0000007fffffffff} +do_test boundary2-2.41.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000007fffffffff' + } +} {549755813887 46} +do_test boundary2-2.41.3 { + db eval { + SELECT r, x FROM t1 WHERE a=46 + } +} {549755813887 0000007fffffffff} +do_test boundary2-2.41.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary2-2.41.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.41.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY r + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.41.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary2-2.41.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.41.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-2.41.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-2.41.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY r + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.41.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary2-2.41.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.41.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.41.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.41.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary2-2.41.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY r DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.41.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.41.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.41.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.41.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary2-2.41.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY r DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.41.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.42.1 { + db eval { + SELECT * FROM t1 WHERE r=-549755813888 + } +} {-549755813888 63 ffffff8000000000} +do_test boundary2-2.42.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffff8000000000' + } +} {-549755813888 63} +do_test boundary2-2.42.3 { + db eval { + SELECT r, x FROM t1 WHERE a=63 + } +} {-549755813888 ffffff8000000000} +do_test boundary2-2.42.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.42.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.42.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY r + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.42.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary2-2.42.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.42.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary2-2.42.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.42.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY r + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.42.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary2-2.42.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.42.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary2-2.42.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary2-2.42.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY r + } +} {55 2 64 21 44 58} +do_test boundary2-2.42.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY r DESC + } +} {58 44 21 64 2 55} +do_test boundary2-2.42.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary2-2.42.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary2-2.42.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary2-2.42.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63} +do_test boundary2-2.42.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY r DESC + } +} {63 58 44 21 64 2 55} +do_test boundary2-2.42.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary2-2.43.1 { + db eval { + SELECT * FROM t1 WHERE r=281474976710655 + } +} {281474976710655 10 0000ffffffffffff} +do_test boundary2-2.43.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000ffffffffffff' + } +} {281474976710655 10} +do_test boundary2-2.43.3 { + db eval { + SELECT r, x FROM t1 WHERE a=10 + } +} {281474976710655 0000ffffffffffff} +do_test boundary2-2.43.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary2-2.43.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary2-2.43.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY r + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-2.43.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary2-2.43.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-2.43.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary2-2.43.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary2-2.43.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY r + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-2.43.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary2-2.43.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-2.43.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.43.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.43.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary2-2.43.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY r DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.43.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.43.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.43.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.43.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary2-2.43.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY r DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.43.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.44.1 { + db eval { + SELECT * FROM t1 WHERE r=4398046511103 + } +} {4398046511103 7 000003ffffffffff} +do_test boundary2-2.44.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000003ffffffffff' + } +} {4398046511103 7} +do_test boundary2-2.44.3 { + db eval { + SELECT r, x FROM t1 WHERE a=7 + } +} {4398046511103 000003ffffffffff} +do_test boundary2-2.44.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-2.44.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-2.44.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY r + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.44.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary2-2.44.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.44.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-2.44.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary2-2.44.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY r + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.44.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary2-2.44.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.44.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.44.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.44.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary2-2.44.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY r DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.44.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.44.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.44.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.44.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary2-2.44.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY r DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.44.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.45.1 { + db eval { + SELECT * FROM t1 WHERE r=268435455 + } +} {268435455 12 000000000fffffff} +do_test boundary2-2.45.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000fffffff' + } +} {268435455 12} +do_test boundary2-2.45.3 { + db eval { + SELECT r, x FROM t1 WHERE a=12 + } +} {268435455 000000000fffffff} +do_test boundary2-2.45.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.45.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-2.45.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY r + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.45.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary2-2.45.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.45.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.45.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary2-2.45.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY r + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.45.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary2-2.45.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.45.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.45.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary2-2.45.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary2-2.45.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY r DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.45.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.45.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.45.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-2.45.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary2-2.45.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY r DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.45.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.46.1 { + db eval { + SELECT * FROM t1 WHERE r=-9223372036854775808 + } +} {-9223372036854775808 55 8000000000000000} +do_test boundary2-2.46.2 { + db eval { + SELECT r, a FROM t1 WHERE x='8000000000000000' + } +} {-9223372036854775808 55} +do_test boundary2-2.46.3 { + db eval { + SELECT r, x FROM t1 WHERE a=55 + } +} {-9223372036854775808 8000000000000000} +do_test boundary2-2.46.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.46.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.46.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY r + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.46.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary2-2.46.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.46.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.46.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.46.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.46.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.46.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.46.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY a + } +} {} +do_test boundary2-2.46.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY a DESC + } +} {} +do_test boundary2-2.46.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY r + } +} {} +do_test boundary2-2.46.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY r DESC + } +} {} +do_test boundary2-2.46.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY x + } +} {} +do_test boundary2-2.46.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY a + } +} {55} +do_test boundary2-2.46.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY a DESC + } +} {55} +do_test boundary2-2.46.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY r + } +} {55} +do_test boundary2-2.46.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY r DESC + } +} {55} +do_test boundary2-2.46.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY x + } +} {55} +do_test boundary2-2.47.1 { + db eval { + SELECT * FROM t1 WHERE r=562949953421312 + } +} {562949953421312 43 0002000000000000} +do_test boundary2-2.47.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0002000000000000' + } +} {562949953421312 43} +do_test boundary2-2.47.3 { + db eval { + SELECT r, x FROM t1 WHERE a=43 + } +} {562949953421312 0002000000000000} +do_test boundary2-2.47.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary2-2.47.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary2-2.47.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY r + } +} {27 45 17 28 3} +do_test boundary2-2.47.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY r DESC + } +} {3 28 17 45 27} +do_test boundary2-2.47.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary2-2.47.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary2-2.47.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary2-2.47.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY r + } +} {43 27 45 17 28 3} +do_test boundary2-2.47.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY r DESC + } +} {3 28 17 45 27 43} +do_test boundary2-2.47.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary2-2.47.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.47.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.47.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary2-2.47.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY r DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.47.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.47.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.47.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.47.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary2-2.47.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY r DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.47.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.48.1 { + db eval { + SELECT * FROM t1 WHERE r=-8388609 + } +} {-8388609 1 ffffffffff7fffff} +do_test boundary2-2.48.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffff7fffff' + } +} {-8388609 1} +do_test boundary2-2.48.3 { + db eval { + SELECT r, x FROM t1 WHERE a=1 + } +} {-8388609 ffffffffff7fffff} +do_test boundary2-2.48.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.48.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.48.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY r + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.48.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary2-2.48.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary2-2.48.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.48.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.48.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY r + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.48.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary2-2.48.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.48.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary2-2.48.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary2-2.48.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-2.48.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY r DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary2-2.48.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-2.48.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary2-2.48.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary2-2.48.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-2.48.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY r DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.48.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-2.49.1 { + db eval { + SELECT * FROM t1 WHERE r=16777215 + } +} {16777215 9 0000000000ffffff} +do_test boundary2-2.49.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000ffffff' + } +} {16777215 9} +do_test boundary2-2.49.3 { + db eval { + SELECT r, x FROM t1 WHERE a=9 + } +} {16777215 0000000000ffffff} +do_test boundary2-2.49.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.49.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary2-2.49.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY r + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.49.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary2-2.49.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.49.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.49.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.49.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY r + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.49.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary2-2.49.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.49.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.49.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-2.49.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary2-2.49.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY r DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.49.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.49.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.49.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary2-2.49.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary2-2.49.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY r DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.49.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.50.1 { + db eval { + SELECT * FROM t1 WHERE r=8388608 + } +} {8388608 24 0000000000800000} +do_test boundary2-2.50.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000800000' + } +} {8388608 24} +do_test boundary2-2.50.3 { + db eval { + SELECT r, x FROM t1 WHERE a=24 + } +} {8388608 0000000000800000} +do_test boundary2-2.50.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.50.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.50.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY r + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.50.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary2-2.50.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.50.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-2.50.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.50.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY r + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.50.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary2-2.50.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.50.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.50.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-2.50.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary2-2.50.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY r DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.50.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.50.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.50.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-2.50.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary2-2.50.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY r DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.50.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.51.1 { + db eval { + SELECT * FROM t1 WHERE r=16383 + } +} {16383 8 0000000000003fff} +do_test boundary2-2.51.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000003fff' + } +} {16383 8} +do_test boundary2-2.51.3 { + db eval { + SELECT r, x FROM t1 WHERE a=8 + } +} {16383 0000000000003fff} +do_test boundary2-2.51.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.51.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.51.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY r + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.51.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary2-2.51.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.51.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.51.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.51.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY r + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.51.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary2-2.51.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.51.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.51.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-2.51.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary2-2.51.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY r DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.51.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.51.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.51.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary2-2.51.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary2-2.51.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY r DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.51.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.52.1 { + db eval { + SELECT * FROM t1 WHERE r=140737488355328 + } +} {140737488355328 34 0000800000000000} +do_test boundary2-2.52.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000800000000000' + } +} {140737488355328 34} +do_test boundary2-2.52.3 { + db eval { + SELECT r, x FROM t1 WHERE a=34 + } +} {140737488355328 0000800000000000} +do_test boundary2-2.52.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary2-2.52.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary2-2.52.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY r + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-2.52.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary2-2.52.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-2.52.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary2-2.52.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary2-2.52.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY r + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.52.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary2-2.52.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.52.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.52.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.52.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary2-2.52.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY r DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.52.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.52.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.52.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.52.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary2-2.52.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY r DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.52.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.53.1 { + db eval { + SELECT * FROM t1 WHERE r=2097151 + } +} {2097151 15 00000000001fffff} +do_test boundary2-2.53.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000001fffff' + } +} {2097151 15} +do_test boundary2-2.53.3 { + db eval { + SELECT r, x FROM t1 WHERE a=15 + } +} {2097151 00000000001fffff} +do_test boundary2-2.53.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-2.53.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-2.53.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY r + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.53.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary2-2.53.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.53.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-2.53.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.53.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY r + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.53.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary2-2.53.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.53.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.53.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.53.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary2-2.53.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY r DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.53.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.53.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-2.53.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-2.53.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary2-2.53.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY r DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.53.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.54.1 { + db eval { + SELECT * FROM t1 WHERE r=140737488355327 + } +} {140737488355327 25 00007fffffffffff} +do_test boundary2-2.54.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00007fffffffffff' + } +} {140737488355327 25} +do_test boundary2-2.54.3 { + db eval { + SELECT r, x FROM t1 WHERE a=25 + } +} {140737488355327 00007fffffffffff} +do_test boundary2-2.54.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary2-2.54.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary2-2.54.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY r + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.54.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary2-2.54.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.54.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary2-2.54.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-2.54.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY r + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.54.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary2-2.54.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.54.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.54.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.54.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary2-2.54.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY r DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.54.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.54.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.54.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.54.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary2-2.54.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY r DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.54.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.55.1 { + db eval { + SELECT * FROM t1 WHERE r=281474976710656 + } +} {281474976710656 26 0001000000000000} +do_test boundary2-2.55.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0001000000000000' + } +} {281474976710656 26} +do_test boundary2-2.55.3 { + db eval { + SELECT r, x FROM t1 WHERE a=26 + } +} {281474976710656 0001000000000000} +do_test boundary2-2.55.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary2-2.55.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary2-2.55.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY r + } +} {13 43 27 45 17 28 3} +do_test boundary2-2.55.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY r DESC + } +} {3 28 17 45 27 43 13} +do_test boundary2-2.55.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary2-2.55.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary2-2.55.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary2-2.55.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY r + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-2.55.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary2-2.55.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-2.55.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.55.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.55.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary2-2.55.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY r DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.55.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.55.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.55.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.55.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary2-2.55.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY r DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.55.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.56.1 { + db eval { + SELECT * FROM t1 WHERE r=32767 + } +} {32767 23 0000000000007fff} +do_test boundary2-2.56.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000007fff' + } +} {32767 23} +do_test boundary2-2.56.3 { + db eval { + SELECT r, x FROM t1 WHERE a=23 + } +} {32767 0000000000007fff} +do_test boundary2-2.56.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.56.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.56.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY r + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.56.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary2-2.56.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.56.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-2.56.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-2.56.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY r + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.56.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary2-2.56.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.56.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.56.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary2-2.56.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary2-2.56.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY r DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.56.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.56.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-2.56.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-2.56.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary2-2.56.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY r DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.56.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.57.1 { + db eval { + SELECT * FROM t1 WHERE r=127 + } +} {127 4 000000000000007f} +do_test boundary2-2.57.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000000007f' + } +} {127 4} +do_test boundary2-2.57.3 { + db eval { + SELECT r, x FROM t1 WHERE a=4 + } +} {127 000000000000007f} +do_test boundary2-2.57.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.57.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-2.57.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY r + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.57.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary2-2.57.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.57.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.57.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-2.57.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY r + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.57.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary2-2.57.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.57.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.57.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary2-2.57.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary2-2.57.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY r DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.57.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.57.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.57.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-2.57.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary2-2.57.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY r DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.57.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.58.1 { + db eval { + SELECT * FROM t1 WHERE r=36028797018963967 + } +} {36028797018963967 27 007fffffffffffff} +do_test boundary2-2.58.2 { + db eval { + SELECT r, a FROM t1 WHERE x='007fffffffffffff' + } +} {36028797018963967 27} +do_test boundary2-2.58.3 { + db eval { + SELECT r, x FROM t1 WHERE a=27 + } +} {36028797018963967 007fffffffffffff} +do_test boundary2-2.58.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY a + } +} {3 17 28 45} +do_test boundary2-2.58.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary2-2.58.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY r + } +} {45 17 28 3} +do_test boundary2-2.58.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY r DESC + } +} {3 28 17 45} +do_test boundary2-2.58.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY x + } +} {45 17 28 3} +do_test boundary2-2.58.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary2-2.58.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary2-2.58.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY r + } +} {27 45 17 28 3} +do_test boundary2-2.58.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY r DESC + } +} {3 28 17 45 27} +do_test boundary2-2.58.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary2-2.58.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.58.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.58.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary2-2.58.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY r DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.58.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.58.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.58.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.58.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary2-2.58.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY r DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.58.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.59.1 { + db eval { + SELECT * FROM t1 WHERE r=4398046511104 + } +} {4398046511104 56 0000040000000000} +do_test boundary2-2.59.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000040000000000' + } +} {4398046511104 56} +do_test boundary2-2.59.3 { + db eval { + SELECT r, x FROM t1 WHERE a=56 + } +} {4398046511104 0000040000000000} +do_test boundary2-2.59.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary2-2.59.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-2.59.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY r + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.59.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary2-2.59.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.59.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-2.59.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-2.59.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY r + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.59.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary2-2.59.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.59.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-2.59.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.59.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary2-2.59.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY r DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.59.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.59.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.59.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-2.59.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary2-2.59.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY r DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.59.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.60.1 { + db eval { + SELECT * FROM t1 WHERE r=1 + } +} {1 60 0000000000000001} +do_test boundary2-2.60.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000001' + } +} {1 60} +do_test boundary2-2.60.3 { + db eval { + SELECT r, x FROM t1 WHERE a=60 + } +} {1 0000000000000001} +do_test boundary2-2.60.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.60.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.60.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY r + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.60.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary2-2.60.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.60.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary2-2.60.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.60.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY r + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.60.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary2-2.60.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.60.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary2-2.60.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.60.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary2-2.60.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY r DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.60.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.60.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.60.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.60.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary2-2.60.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY r DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.60.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.61.1 { + db eval { + SELECT * FROM t1 WHERE r=36028797018963968 + } +} {36028797018963968 45 0080000000000000} +do_test boundary2-2.61.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0080000000000000' + } +} {36028797018963968 45} +do_test boundary2-2.61.3 { + db eval { + SELECT r, x FROM t1 WHERE a=45 + } +} {36028797018963968 0080000000000000} +do_test boundary2-2.61.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY a + } +} {3 17 28} +do_test boundary2-2.61.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY a DESC + } +} {28 17 3} +do_test boundary2-2.61.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY r + } +} {17 28 3} +do_test boundary2-2.61.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY r DESC + } +} {3 28 17} +do_test boundary2-2.61.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY x + } +} {17 28 3} +do_test boundary2-2.61.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY a + } +} {3 17 28 45} +do_test boundary2-2.61.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary2-2.61.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY r + } +} {45 17 28 3} +do_test boundary2-2.61.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY r DESC + } +} {3 28 17 45} +do_test boundary2-2.61.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY x + } +} {45 17 28 3} +do_test boundary2-2.61.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.61.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.61.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary2-2.61.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY r DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.61.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.61.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.61.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-2.61.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary2-2.61.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY r DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.61.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.62.1 { + db eval { + SELECT * FROM t1 WHERE r=-2147483649 + } +} {-2147483649 47 ffffffff7fffffff} +do_test boundary2-2.62.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffff7fffffff' + } +} {-2147483649 47} +do_test boundary2-2.62.3 { + db eval { + SELECT r, x FROM t1 WHERE a=47 + } +} {-2147483649 ffffffff7fffffff} +do_test boundary2-2.62.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.62.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.62.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY r + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.62.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary2-2.62.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.62.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-2.62.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.62.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY r + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.62.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary2-2.62.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.62.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary2-2.62.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary2-2.62.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY r + } +} {55 2 64 21 44 58 63} +do_test boundary2-2.62.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY r DESC + } +} {63 58 44 21 64 2 55} +do_test boundary2-2.62.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary2-2.62.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary2-2.62.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary2-2.62.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY r + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-2.62.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY r DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary2-2.62.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-2.63.1 { + db eval { + SELECT * FROM t1 WHERE r=-36028797018963969 + } +} {-36028797018963969 2 ff7fffffffffffff} +do_test boundary2-2.63.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ff7fffffffffffff' + } +} {-36028797018963969 2} +do_test boundary2-2.63.3 { + db eval { + SELECT r, x FROM t1 WHERE a=2 + } +} {-36028797018963969 ff7fffffffffffff} +do_test boundary2-2.63.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.63.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-2.63.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY r + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.63.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary2-2.63.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.63.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.63.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.63.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY r + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.63.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary2-2.63.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.63.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY a + } +} {55} +do_test boundary2-2.63.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY a DESC + } +} {55} +do_test boundary2-2.63.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY r + } +} {55} +do_test boundary2-2.63.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY r DESC + } +} {55} +do_test boundary2-2.63.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY x + } +} {55} +do_test boundary2-2.63.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY a + } +} {2 55} +do_test boundary2-2.63.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY a DESC + } +} {55 2} +do_test boundary2-2.63.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY r + } +} {55 2} +do_test boundary2-2.63.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY r DESC + } +} {2 55} +do_test boundary2-2.63.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY x + } +} {55 2} +do_test boundary2-2.64.1 { + db eval { + SELECT * FROM t1 WHERE r=3 + } +} {3 5 0000000000000003} +do_test boundary2-2.64.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000003' + } +} {3 5} +do_test boundary2-2.64.3 { + db eval { + SELECT r, x FROM t1 WHERE a=5 + } +} {3 0000000000000003} +do_test boundary2-2.64.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.64.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-2.64.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY r + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.64.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary2-2.64.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.64.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-2.64.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-2.64.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY r + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.64.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary2-2.64.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.64.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.64.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary2-2.64.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary2-2.64.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY r DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.64.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.64.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-2.64.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary2-2.64.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary2-2.64.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY r DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.64.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.65.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-2.65.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-2.65.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-2.65.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-2.65.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-2.65.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-2.65.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-2.65.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-2.65.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-2.65.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-2.65.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.65.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.65.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.65.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.65.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.65.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.65.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.65.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.65.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.65.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.66.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.66.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.66.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.66.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.66.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.66.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-2.66.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-2.66.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-2.66.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-2.66.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-2.66.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-2.66.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-2.66.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-2.66.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-2.66.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-2.66.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-2.66.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-2.66.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-2.66.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-2.66.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-3.1 { + db eval { + DROP INDEX t1i1; + DROP INDEX t1i2; + DROP INDEX t1i3; + } +} {} +do_test boundary2-4.1.1 { + db eval { + SELECT * FROM t1 WHERE r=72057594037927935 + } +} {72057594037927935 17 00ffffffffffffff} +do_test boundary2-4.1.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00ffffffffffffff' + } +} {72057594037927935 17} +do_test boundary2-4.1.3 { + db eval { + SELECT r, x FROM t1 WHERE a=17 + } +} {72057594037927935 00ffffffffffffff} +do_test boundary2-4.1.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY a + } +} {3 28} +do_test boundary2-4.1.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY a DESC + } +} {28 3} +do_test boundary2-4.1.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY r + } +} {28 3} +do_test boundary2-4.1.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY r DESC + } +} {3 28} +do_test boundary2-4.1.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927935 ORDER BY x + } +} {28 3} +do_test boundary2-4.1.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY a + } +} {3 17 28} +do_test boundary2-4.1.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY a DESC + } +} {28 17 3} +do_test boundary2-4.1.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY r + } +} {17 28 3} +do_test boundary2-4.1.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY r DESC + } +} {3 28 17} +do_test boundary2-4.1.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927935 ORDER BY x + } +} {17 28 3} +do_test boundary2-4.1.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.1.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.1.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary2-4.1.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY r DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.1.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.1.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.1.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.1.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary2-4.1.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY r DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.1.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927935 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.2.1 { + db eval { + SELECT * FROM t1 WHERE r=16384 + } +} {16384 16 0000000000004000} +do_test boundary2-4.2.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000004000' + } +} {16384 16} +do_test boundary2-4.2.3 { + db eval { + SELECT r, x FROM t1 WHERE a=16 + } +} {16384 0000000000004000} +do_test boundary2-4.2.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.2.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.2.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY r + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.2.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary2-4.2.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16384 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.2.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.2.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.2.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY r + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.2.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary2-4.2.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16384 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.2.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.2.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary2-4.2.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary2-4.2.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY r DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.2.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.2.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.2.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary2-4.2.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary2-4.2.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY r DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.2.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16384 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.3.1 { + db eval { + SELECT * FROM t1 WHERE r=4294967296 + } +} {4294967296 36 0000000100000000} +do_test boundary2-4.3.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000100000000' + } +} {4294967296 36} +do_test boundary2-4.3.3 { + db eval { + SELECT r, x FROM t1 WHERE a=36 + } +} {4294967296 0000000100000000} +do_test boundary2-4.3.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary2-4.3.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.3.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY r + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.3.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary2-4.3.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4294967296 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.3.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-4.3.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.3.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY r + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.3.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary2-4.3.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967296 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.3.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.3.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.3.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary2-4.3.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY r DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.3.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.3.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.3.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.3.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary2-4.3.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY r DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.3.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967296 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.4.1 { + db eval { + SELECT * FROM t1 WHERE r=16777216 + } +} {16777216 6 0000000001000000} +do_test boundary2-4.4.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000001000000' + } +} {16777216 6} +do_test boundary2-4.4.3 { + db eval { + SELECT r, x FROM t1 WHERE a=6 + } +} {16777216 0000000001000000} +do_test boundary2-4.4.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.4.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary2-4.4.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY r + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.4.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary2-4.4.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16777216 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.4.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.4.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary2-4.4.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY r + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.4.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary2-4.4.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16777216 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.4.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.4.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary2-4.4.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary2-4.4.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY r DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.4.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.4.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.4.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary2-4.4.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary2-4.4.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY r DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.4.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16777216 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.5.1 { + db eval { + SELECT * FROM t1 WHERE r=-32769 + } +} {-32769 29 ffffffffffff7fff} +do_test boundary2-4.5.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffff7fff' + } +} {-32769 29} +do_test boundary2-4.5.3 { + db eval { + SELECT r, x FROM t1 WHERE a=29 + } +} {-32769 ffffffffffff7fff} +do_test boundary2-4.5.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.5.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.5.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY r + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.5.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary2-4.5.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary2-4.5.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.5.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.5.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY r + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.5.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary2-4.5.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -32769 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary2-4.5.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary2-4.5.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary2-4.5.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-4.5.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY r DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.5.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-4.5.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary2-4.5.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary2-4.5.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-4.5.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY r DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.5.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -32769 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-4.6.1 { + db eval { + SELECT * FROM t1 WHERE r=-140737488355329 + } +} {-140737488355329 21 ffff7fffffffffff} +do_test boundary2-4.6.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffff7fffffffffff' + } +} {-140737488355329 21} +do_test boundary2-4.6.3 { + db eval { + SELECT r, x FROM t1 WHERE a=21 + } +} {-140737488355329 ffff7fffffffffff} +do_test boundary2-4.6.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.6.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.6.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY r + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.6.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary2-4.6.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.6.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.6.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.6.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY r + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.6.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary2-4.6.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355329 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.6.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY a + } +} {2 55 64} +do_test boundary2-4.6.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY a DESC + } +} {64 55 2} +do_test boundary2-4.6.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY r + } +} {55 2 64} +do_test boundary2-4.6.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY r DESC + } +} {64 2 55} +do_test boundary2-4.6.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355329 ORDER BY x + } +} {55 2 64} +do_test boundary2-4.6.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY a + } +} {2 21 55 64} +do_test boundary2-4.6.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary2-4.6.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY r + } +} {55 2 64 21} +do_test boundary2-4.6.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY r DESC + } +} {21 64 2 55} +do_test boundary2-4.6.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355329 ORDER BY x + } +} {55 2 64 21} +do_test boundary2-4.7.1 { + db eval { + SELECT * FROM t1 WHERE r=2 + } +} {2 41 0000000000000002} +do_test boundary2-4.7.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000002' + } +} {2 41} +do_test boundary2-4.7.3 { + db eval { + SELECT r, x FROM t1 WHERE a=41 + } +} {2 0000000000000002} +do_test boundary2-4.7.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.7.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.7.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY r + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.7.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary2-4.7.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.7.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.7.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.7.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY r + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.7.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary2-4.7.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.7.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.7.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.7.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary2-4.7.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY r DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.7.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.7.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.7.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.7.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary2-4.7.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY r DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.7.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.8.1 { + db eval { + SELECT * FROM t1 WHERE r=4 + } +} {4 31 0000000000000004} +do_test boundary2-4.8.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000004' + } +} {4 31} +do_test boundary2-4.8.3 { + db eval { + SELECT r, x FROM t1 WHERE a=31 + } +} {4 0000000000000004} +do_test boundary2-4.8.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.8.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-4.8.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY r + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.8.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary2-4.8.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.8.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.8.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-4.8.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY r + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.8.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary2-4.8.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.8.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.8.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary2-4.8.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary2-4.8.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY r DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.8.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.8.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.8.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary2-4.8.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary2-4.8.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY r DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.8.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.9.1 { + db eval { + SELECT * FROM t1 WHERE r=562949953421311 + } +} {562949953421311 13 0001ffffffffffff} +do_test boundary2-4.9.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0001ffffffffffff' + } +} {562949953421311 13} +do_test boundary2-4.9.3 { + db eval { + SELECT r, x FROM t1 WHERE a=13 + } +} {562949953421311 0001ffffffffffff} +do_test boundary2-4.9.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary2-4.9.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary2-4.9.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY r + } +} {43 27 45 17 28 3} +do_test boundary2-4.9.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY r DESC + } +} {3 28 17 45 27 43} +do_test boundary2-4.9.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421311 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary2-4.9.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary2-4.9.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary2-4.9.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY r + } +} {13 43 27 45 17 28 3} +do_test boundary2-4.9.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY r DESC + } +} {3 28 17 45 27 43 13} +do_test boundary2-4.9.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421311 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary2-4.9.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.9.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.9.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary2-4.9.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY r DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.9.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.9.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.9.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.9.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary2-4.9.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY r DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.9.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421311 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.10.1 { + db eval { + SELECT * FROM t1 WHERE r=256 + } +} {256 61 0000000000000100} +do_test boundary2-4.10.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000100' + } +} {256 61} +do_test boundary2-4.10.3 { + db eval { + SELECT r, x FROM t1 WHERE a=61 + } +} {256 0000000000000100} +do_test boundary2-4.10.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.10.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.10.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY r + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.10.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary2-4.10.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 256 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.10.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-4.10.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.10.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY r + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.10.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary2-4.10.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 256 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.10.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.10.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-4.10.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary2-4.10.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY r DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.10.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.10.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.10.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-4.10.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary2-4.10.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY r DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.10.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 256 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.11.1 { + db eval { + SELECT * FROM t1 WHERE r=34359738368 + } +} {34359738368 22 0000000800000000} +do_test boundary2-4.11.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000800000000' + } +} {34359738368 22} +do_test boundary2-4.11.3 { + db eval { + SELECT r, x FROM t1 WHERE a=22 + } +} {34359738368 0000000800000000} +do_test boundary2-4.11.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-4.11.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.11.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY r + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.11.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary2-4.11.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 34359738368 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.11.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-4.11.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.11.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY r + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.11.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary2-4.11.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738368 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.11.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.11.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.11.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary2-4.11.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY r DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.11.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.11.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.11.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.11.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary2-4.11.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY r DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.11.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738368 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.12.1 { + db eval { + SELECT * FROM t1 WHERE r=65536 + } +} {65536 62 0000000000010000} +do_test boundary2-4.12.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000010000' + } +} {65536 62} +do_test boundary2-4.12.3 { + db eval { + SELECT r, x FROM t1 WHERE a=62 + } +} {65536 0000000000010000} +do_test boundary2-4.12.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-4.12.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.12.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY r + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.12.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary2-4.12.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 65536 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.12.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary2-4.12.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.12.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY r + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.12.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary2-4.12.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 65536 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.12.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.12.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.12.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary2-4.12.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY r DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.12.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.12.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.12.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.12.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary2-4.12.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY r DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.12.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 65536 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.13.1 { + db eval { + SELECT * FROM t1 WHERE r=268435456 + } +} {268435456 40 0000000010000000} +do_test boundary2-4.13.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000010000000' + } +} {268435456 40} +do_test boundary2-4.13.3 { + db eval { + SELECT r, x FROM t1 WHERE a=40 + } +} {268435456 0000000010000000} +do_test boundary2-4.13.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-4.13.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-4.13.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY r + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.13.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary2-4.13.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 268435456 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.13.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.13.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-4.13.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY r + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.13.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary2-4.13.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 268435456 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.13.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.13.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.13.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary2-4.13.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY r DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.13.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.13.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.13.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.13.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary2-4.13.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY r DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.13.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 268435456 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.14.1 { + db eval { + SELECT * FROM t1 WHERE r=-140737488355328 + } +} {-140737488355328 44 ffff800000000000} +do_test boundary2-4.14.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffff800000000000' + } +} {-140737488355328 44} +do_test boundary2-4.14.3 { + db eval { + SELECT r, x FROM t1 WHERE a=44 + } +} {-140737488355328 ffff800000000000} +do_test boundary2-4.14.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.14.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.14.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY r + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.14.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary2-4.14.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.14.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.14.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.14.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY r + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.14.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary2-4.14.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.14.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY a + } +} {2 21 55 64} +do_test boundary2-4.14.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY a DESC + } +} {64 55 21 2} +do_test boundary2-4.14.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY r + } +} {55 2 64 21} +do_test boundary2-4.14.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY r DESC + } +} {21 64 2 55} +do_test boundary2-4.14.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -140737488355328 ORDER BY x + } +} {55 2 64 21} +do_test boundary2-4.14.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary2-4.14.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary2-4.14.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY r + } +} {55 2 64 21 44} +do_test boundary2-4.14.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY r DESC + } +} {44 21 64 2 55} +do_test boundary2-4.14.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -140737488355328 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary2-4.15.1 { + db eval { + SELECT * FROM t1 WHERE r=1099511627776 + } +} {1099511627776 19 0000010000000000} +do_test boundary2-4.15.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000010000000000' + } +} {1099511627776 19} +do_test boundary2-4.15.3 { + db eval { + SELECT r, x FROM t1 WHERE a=19 + } +} {1099511627776 0000010000000000} +do_test boundary2-4.15.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-4.15.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary2-4.15.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY r + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.15.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary2-4.15.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627776 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.15.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary2-4.15.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.15.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY r + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.15.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary2-4.15.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627776 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.15.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.15.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.15.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary2-4.15.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY r DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.15.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.15.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.15.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.15.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary2-4.15.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY r DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.15.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627776 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.16.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY a + } +} {} +do_test boundary2-4.16.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY a DESC + } +} {} +do_test boundary2-4.16.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY r + } +} {} +do_test boundary2-4.16.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY r DESC + } +} {} +do_test boundary2-4.16.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 9223372036854775807 ORDER BY x + } +} {} +do_test boundary2-4.16.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY a + } +} {3} +do_test boundary2-4.16.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY a DESC + } +} {3} +do_test boundary2-4.16.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY r + } +} {3} +do_test boundary2-4.16.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY r DESC + } +} {3} +do_test boundary2-4.16.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 9223372036854775807 ORDER BY x + } +} {3} +do_test boundary2-4.16.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.16.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.16.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary2-4.16.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY r DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.16.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.16.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.16.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.16.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.16.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.16.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 9223372036854775807 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.17.1 { + db eval { + SELECT * FROM t1 WHERE r=32768 + } +} {32768 50 0000000000008000} +do_test boundary2-4.17.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000008000' + } +} {32768 50} +do_test boundary2-4.17.3 { + db eval { + SELECT r, x FROM t1 WHERE a=50 + } +} {32768 0000000000008000} +do_test boundary2-4.17.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary2-4.17.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.17.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY r + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.17.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary2-4.17.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 32768 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.17.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.17.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.17.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY r + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.17.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary2-4.17.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 32768 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.17.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.17.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.17.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary2-4.17.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY r DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.17.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.17.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.17.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.17.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary2-4.17.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY r DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.17.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.18.1 { + db eval { + SELECT * FROM t1 WHERE r=-36028797018963968 + } +} {-36028797018963968 64 ff80000000000000} +do_test boundary2-4.18.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ff80000000000000' + } +} {-36028797018963968 64} +do_test boundary2-4.18.3 { + db eval { + SELECT r, x FROM t1 WHERE a=64 + } +} {-36028797018963968 ff80000000000000} +do_test boundary2-4.18.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.18.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.18.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY r + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.18.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary2-4.18.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.18.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.18.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.18.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY r + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.18.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary2-4.18.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.18.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY a + } +} {2 55} +do_test boundary2-4.18.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY a DESC + } +} {55 2} +do_test boundary2-4.18.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY r + } +} {55 2} +do_test boundary2-4.18.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY r DESC + } +} {2 55} +do_test boundary2-4.18.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963968 ORDER BY x + } +} {55 2} +do_test boundary2-4.18.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY a + } +} {2 55 64} +do_test boundary2-4.18.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY a DESC + } +} {64 55 2} +do_test boundary2-4.18.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY r + } +} {55 2 64} +do_test boundary2-4.18.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY r DESC + } +} {64 2 55} +do_test boundary2-4.18.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963968 ORDER BY x + } +} {55 2 64} +do_test boundary2-4.19.1 { + db eval { + SELECT * FROM t1 WHERE r=65535 + } +} {65535 48 000000000000ffff} +do_test boundary2-4.19.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000000ffff' + } +} {65535 48} +do_test boundary2-4.19.3 { + db eval { + SELECT r, x FROM t1 WHERE a=48 + } +} {65535 000000000000ffff} +do_test boundary2-4.19.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary2-4.19.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.19.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY r + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.19.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary2-4.19.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 65535 ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.19.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary2-4.19.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.19.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY r + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.19.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary2-4.19.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 65535 ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.19.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.19.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.19.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary2-4.19.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY r DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.19.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.19.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.19.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.19.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary2-4.19.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY r DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.19.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 65535 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.20.1 { + db eval { + SELECT * FROM t1 WHERE r=4294967295 + } +} {4294967295 14 00000000ffffffff} +do_test boundary2-4.20.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000ffffffff' + } +} {4294967295 14} +do_test boundary2-4.20.3 { + db eval { + SELECT r, x FROM t1 WHERE a=14 + } +} {4294967295 00000000ffffffff} +do_test boundary2-4.20.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-4.20.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.20.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY r + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.20.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary2-4.20.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4294967295 ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.20.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-4.20.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-4.20.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY r + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.20.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary2-4.20.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4294967295 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.20.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.20.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.20.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary2-4.20.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY r DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.20.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.20.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.20.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.20.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary2-4.20.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY r DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.20.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4294967295 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.21.1 { + db eval { + SELECT * FROM t1 WHERE r=1099511627775 + } +} {1099511627775 57 000000ffffffffff} +do_test boundary2-4.21.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000ffffffffff' + } +} {1099511627775 57} +do_test boundary2-4.21.3 { + db eval { + SELECT r, x FROM t1 WHERE a=57 + } +} {1099511627775 000000ffffffffff} +do_test boundary2-4.21.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary2-4.21.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.21.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY r + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.21.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary2-4.21.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1099511627775 ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.21.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary2-4.21.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.21.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY r + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.21.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary2-4.21.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1099511627775 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.21.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.21.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.21.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary2-4.21.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY r DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.21.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.21.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.21.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.21.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary2-4.21.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY r DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.21.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1099511627775 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.22.1 { + db eval { + SELECT * FROM t1 WHERE r=-8388608 + } +} {-8388608 37 ffffffffff800000} +do_test boundary2-4.22.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffff800000' + } +} {-8388608 37} +do_test boundary2-4.22.3 { + db eval { + SELECT r, x FROM t1 WHERE a=37 + } +} {-8388608 ffffffffff800000} +do_test boundary2-4.22.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.22.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.22.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY r + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.22.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary2-4.22.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary2-4.22.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.22.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.22.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY r + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.22.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary2-4.22.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary2-4.22.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary2-4.22.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary2-4.22.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-4.22.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY r DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.22.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-4.22.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary2-4.22.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary2-4.22.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-4.22.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY r DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.22.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -8388608 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary2-4.23.1 { + db eval { + SELECT * FROM t1 WHERE r=549755813888 + } +} {549755813888 35 0000008000000000} +do_test boundary2-4.23.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000008000000000' + } +} {549755813888 35} +do_test boundary2-4.23.3 { + db eval { + SELECT r, x FROM t1 WHERE a=35 + } +} {549755813888 0000008000000000} +do_test boundary2-4.23.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary2-4.23.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.23.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY r + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.23.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary2-4.23.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 549755813888 ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.23.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary2-4.23.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.23.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY r + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.23.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary2-4.23.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813888 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.23.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.23.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.23.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary2-4.23.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY r DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.23.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.23.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.23.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.23.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary2-4.23.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY r DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.23.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.24.1 { + db eval { + SELECT * FROM t1 WHERE r=8388607 + } +} {8388607 18 00000000007fffff} +do_test boundary2-4.24.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000007fffff' + } +} {8388607 18} +do_test boundary2-4.24.3 { + db eval { + SELECT r, x FROM t1 WHERE a=18 + } +} {8388607 00000000007fffff} +do_test boundary2-4.24.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.24.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.24.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY r + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.24.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary2-4.24.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 8388607 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.24.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.24.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.24.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY r + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.24.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary2-4.24.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 8388607 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.24.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.24.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-4.24.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary2-4.24.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY r DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.24.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.24.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.24.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-4.24.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary2-4.24.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY r DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.24.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 8388607 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.25.1 { + db eval { + SELECT * FROM t1 WHERE r=-3 + } +} {-3 52 fffffffffffffffd} +do_test boundary2-4.25.2 { + db eval { + SELECT r, a FROM t1 WHERE x='fffffffffffffffd' + } +} {-3 52} +do_test boundary2-4.25.3 { + db eval { + SELECT r, x FROM t1 WHERE a=52 + } +} {-3 fffffffffffffffd} +do_test boundary2-4.25.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.25.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.25.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY r + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.25.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary2-4.25.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary2-4.25.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary2-4.25.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.25.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY r + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.25.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary2-4.25.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -3 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary2-4.25.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary2-4.25.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.25.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-4.25.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY r DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.25.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-4.25.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.25.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.25.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-4.25.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY r DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.25.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -3 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-4.26.1 { + db eval { + SELECT * FROM t1 WHERE r=0 + } +} {0 59 0000000000000000} +do_test boundary2-4.26.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000000' + } +} {0 59} +do_test boundary2-4.26.3 { + db eval { + SELECT r, x FROM t1 WHERE a=59 + } +} {0 0000000000000000} +do_test boundary2-4.26.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary2-4.26.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.26.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY r + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.26.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary2-4.26.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 0 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.26.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.26.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.26.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY r + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.26.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary2-4.26.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 0 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.26.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.26.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.26.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.26.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY r DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.26.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 0 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.26.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary2-4.26.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.26.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary2-4.26.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY r DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.26.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 0 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.27.1 { + db eval { + SELECT * FROM t1 WHERE r=-1 + } +} {-1 38 ffffffffffffffff} +do_test boundary2-4.27.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffffff' + } +} {-1 38} +do_test boundary2-4.27.3 { + db eval { + SELECT r, x FROM t1 WHERE a=38 + } +} {-1 ffffffffffffffff} +do_test boundary2-4.27.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.27.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.27.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY r + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.27.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary2-4.27.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.27.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.27.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.27.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY r + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.27.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary2-4.27.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -1 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary2-4.27.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.27.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary2-4.27.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-4.27.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY r DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.27.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-4.27.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.27.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.27.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.27.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY r DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.27.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -1 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.28.1 { + db eval { + SELECT * FROM t1 WHERE r=-2 + } +} {-2 33 fffffffffffffffe} +do_test boundary2-4.28.2 { + db eval { + SELECT r, a FROM t1 WHERE x='fffffffffffffffe' + } +} {-2 33} +do_test boundary2-4.28.3 { + db eval { + SELECT r, x FROM t1 WHERE a=33 + } +} {-2 fffffffffffffffe} +do_test boundary2-4.28.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.28.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.28.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY r + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.28.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary2-4.28.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary2-4.28.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary2-4.28.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.28.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY r + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.28.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary2-4.28.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary2-4.28.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.28.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.28.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-4.28.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY r DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.28.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary2-4.28.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary2-4.28.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary2-4.28.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-4.28.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY r DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.28.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary2-4.29.1 { + db eval { + SELECT * FROM t1 WHERE r=2097152 + } +} {2097152 42 0000000000200000} +do_test boundary2-4.29.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000200000' + } +} {2097152 42} +do_test boundary2-4.29.3 { + db eval { + SELECT r, x FROM t1 WHERE a=42 + } +} {2097152 0000000000200000} +do_test boundary2-4.29.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.29.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.29.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY r + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.29.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary2-4.29.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2097152 ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.29.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-4.29.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.29.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY r + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.29.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary2-4.29.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2097152 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.29.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.29.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-4.29.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary2-4.29.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY r DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.29.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.29.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.29.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-4.29.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary2-4.29.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY r DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.29.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2097152 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.30.1 { + db eval { + SELECT * FROM t1 WHERE r=128 + } +} {128 49 0000000000000080} +do_test boundary2-4.30.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000080' + } +} {128 49} +do_test boundary2-4.30.3 { + db eval { + SELECT r, x FROM t1 WHERE a=49 + } +} {128 0000000000000080} +do_test boundary2-4.30.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-4.30.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.30.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY r + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.30.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary2-4.30.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 128 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.30.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.30.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.30.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY r + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.30.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary2-4.30.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 128 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.30.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.30.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-4.30.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary2-4.30.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY r DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.30.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 128 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.30.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.30.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-4.30.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary2-4.30.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY r DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.30.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 128 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.31.1 { + db eval { + SELECT * FROM t1 WHERE r=255 + } +} {255 30 00000000000000ff} +do_test boundary2-4.31.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000000000ff' + } +} {255 30} +do_test boundary2-4.31.3 { + db eval { + SELECT r, x FROM t1 WHERE a=30 + } +} {255 00000000000000ff} +do_test boundary2-4.31.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-4.31.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.31.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY r + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.31.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary2-4.31.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 255 ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.31.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary2-4.31.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.31.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY r + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.31.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary2-4.31.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 255 ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.31.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.31.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-4.31.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary2-4.31.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY r DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.31.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 255 ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.31.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.31.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-4.31.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary2-4.31.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY r DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.31.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 255 ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.32.1 { + db eval { + SELECT * FROM t1 WHERE r=-2147483648 + } +} {-2147483648 11 ffffffff80000000} +do_test boundary2-4.32.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffff80000000' + } +} {-2147483648 11} +do_test boundary2-4.32.3 { + db eval { + SELECT r, x FROM t1 WHERE a=11 + } +} {-2147483648 ffffffff80000000} +do_test boundary2-4.32.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.32.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.32.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY r + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.32.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary2-4.32.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.32.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.32.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.32.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY r + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.32.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary2-4.32.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.32.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary2-4.32.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary2-4.32.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-4.32.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY r DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary2-4.32.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-4.32.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary2-4.32.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary2-4.32.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-4.32.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY r DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary2-4.32.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483648 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-4.33.1 { + db eval { + SELECT * FROM t1 WHERE r=34359738367 + } +} {34359738367 39 00000007ffffffff} +do_test boundary2-4.33.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000007ffffffff' + } +} {34359738367 39} +do_test boundary2-4.33.3 { + db eval { + SELECT r, x FROM t1 WHERE a=39 + } +} {34359738367 00000007ffffffff} +do_test boundary2-4.33.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-4.33.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.33.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY r + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.33.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary2-4.33.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 34359738367 ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.33.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary2-4.33.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary2-4.33.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY r + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.33.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary2-4.33.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 34359738367 ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.33.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.33.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.33.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary2-4.33.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY r DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.33.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.33.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.33.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.33.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary2-4.33.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY r DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.33.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 34359738367 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.34.1 { + db eval { + SELECT * FROM t1 WHERE r=-549755813889 + } +} {-549755813889 58 ffffff7fffffffff} +do_test boundary2-4.34.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffff7fffffffff' + } +} {-549755813889 58} +do_test boundary2-4.34.3 { + db eval { + SELECT r, x FROM t1 WHERE a=58 + } +} {-549755813889 ffffff7fffffffff} +do_test boundary2-4.34.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary2-4.34.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.34.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY r + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.34.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary2-4.34.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.34.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary2-4.34.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.34.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY r + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.34.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary2-4.34.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813889 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.34.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY a + } +} {2 21 44 55 64} +do_test boundary2-4.34.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY a DESC + } +} {64 55 44 21 2} +do_test boundary2-4.34.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY r + } +} {55 2 64 21 44} +do_test boundary2-4.34.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY r DESC + } +} {44 21 64 2 55} +do_test boundary2-4.34.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -549755813889 ORDER BY x + } +} {55 2 64 21 44} +do_test boundary2-4.34.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary2-4.34.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary2-4.34.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY r + } +} {55 2 64 21 44 58} +do_test boundary2-4.34.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY r DESC + } +} {58 44 21 64 2 55} +do_test boundary2-4.34.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813889 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary2-4.35.1 { + db eval { + SELECT * FROM t1 WHERE r=-32768 + } +} {-32768 32 ffffffffffff8000} +do_test boundary2-4.35.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffff8000' + } +} {-32768 32} +do_test boundary2-4.35.3 { + db eval { + SELECT r, x FROM t1 WHERE a=32 + } +} {-32768 ffffffffffff8000} +do_test boundary2-4.35.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.35.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.35.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY r + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.35.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary2-4.35.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary2-4.35.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.35.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.35.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY r + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.35.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary2-4.35.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -32768 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary2-4.35.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary2-4.35.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary2-4.35.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-4.35.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY r DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.35.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary2-4.35.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary2-4.35.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.35.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-4.35.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY r DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.35.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -32768 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-4.36.1 { + db eval { + SELECT * FROM t1 WHERE r=2147483647 + } +} {2147483647 20 000000007fffffff} +do_test boundary2-4.36.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000007fffffff' + } +} {2147483647 20} +do_test boundary2-4.36.3 { + db eval { + SELECT r, x FROM t1 WHERE a=20 + } +} {2147483647 000000007fffffff} +do_test boundary2-4.36.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-4.36.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-4.36.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY r + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.36.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary2-4.36.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2147483647 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.36.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-4.36.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-4.36.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY r + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.36.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary2-4.36.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483647 ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.36.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.36.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.36.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary2-4.36.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY r DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.36.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.36.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.36.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.36.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary2-4.36.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY r DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.36.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483647 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.37.1 { + db eval { + SELECT * FROM t1 WHERE r=-129 + } +} {-129 54 ffffffffffffff7f} +do_test boundary2-4.37.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffff7f' + } +} {-129 54} +do_test boundary2-4.37.3 { + db eval { + SELECT r, x FROM t1 WHERE a=54 + } +} {-129 ffffffffffffff7f} +do_test boundary2-4.37.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary2-4.37.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.37.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY r + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.37.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary2-4.37.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary2-4.37.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.37.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.37.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY r + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.37.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary2-4.37.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -129 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary2-4.37.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary2-4.37.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.37.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-4.37.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY r DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.37.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary2-4.37.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary2-4.37.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.37.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-4.37.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY r DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.37.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -129 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-4.38.1 { + db eval { + SELECT * FROM t1 WHERE r=-128 + } +} {-128 53 ffffffffffffff80} +do_test boundary2-4.38.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffffffff80' + } +} {-128 53} +do_test boundary2-4.38.3 { + db eval { + SELECT r, x FROM t1 WHERE a=53 + } +} {-128 ffffffffffffff80} +do_test boundary2-4.38.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary2-4.38.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.38.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY r + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.38.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary2-4.38.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary2-4.38.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary2-4.38.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.38.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY r + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.38.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary2-4.38.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -128 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary2-4.38.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary2-4.38.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.38.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-4.38.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY r DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.38.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary2-4.38.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary2-4.38.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary2-4.38.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-4.38.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY r DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.38.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -128 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary2-4.39.1 { + db eval { + SELECT * FROM t1 WHERE r=72057594037927936 + } +} {72057594037927936 28 0100000000000000} +do_test boundary2-4.39.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0100000000000000' + } +} {72057594037927936 28} +do_test boundary2-4.39.3 { + db eval { + SELECT r, x FROM t1 WHERE a=28 + } +} {72057594037927936 0100000000000000} +do_test boundary2-4.39.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY a + } +} {3} +do_test boundary2-4.39.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY a DESC + } +} {3} +do_test boundary2-4.39.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY r + } +} {3} +do_test boundary2-4.39.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY r DESC + } +} {3} +do_test boundary2-4.39.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 72057594037927936 ORDER BY x + } +} {3} +do_test boundary2-4.39.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY a + } +} {3 28} +do_test boundary2-4.39.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY a DESC + } +} {28 3} +do_test boundary2-4.39.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY r + } +} {28 3} +do_test boundary2-4.39.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY r DESC + } +} {3 28} +do_test boundary2-4.39.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 72057594037927936 ORDER BY x + } +} {28 3} +do_test boundary2-4.39.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.39.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.39.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary2-4.39.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY r DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.39.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.39.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.39.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.39.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary2-4.39.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY r DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.39.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 72057594037927936 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.40.1 { + db eval { + SELECT * FROM t1 WHERE r=2147483648 + } +} {2147483648 51 0000000080000000} +do_test boundary2-4.40.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000080000000' + } +} {2147483648 51} +do_test boundary2-4.40.3 { + db eval { + SELECT r, x FROM t1 WHERE a=51 + } +} {2147483648 0000000080000000} +do_test boundary2-4.40.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary2-4.40.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-4.40.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY r + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.40.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary2-4.40.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2147483648 ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.40.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary2-4.40.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary2-4.40.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY r + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.40.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary2-4.40.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2147483648 ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.40.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.40.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.40.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary2-4.40.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY r DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.40.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.40.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.40.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.40.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary2-4.40.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY r DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.40.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2147483648 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.41.1 { + db eval { + SELECT * FROM t1 WHERE r=549755813887 + } +} {549755813887 46 0000007fffffffff} +do_test boundary2-4.41.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000007fffffffff' + } +} {549755813887 46} +do_test boundary2-4.41.3 { + db eval { + SELECT r, x FROM t1 WHERE a=46 + } +} {549755813887 0000007fffffffff} +do_test boundary2-4.41.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary2-4.41.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.41.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY r + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.41.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary2-4.41.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 549755813887 ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.41.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary2-4.41.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary2-4.41.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY r + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.41.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary2-4.41.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 549755813887 ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.41.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.41.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.41.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary2-4.41.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY r DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.41.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.41.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.41.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.41.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary2-4.41.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY r DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.41.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 549755813887 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.42.1 { + db eval { + SELECT * FROM t1 WHERE r=-549755813888 + } +} {-549755813888 63 ffffff8000000000} +do_test boundary2-4.42.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffff8000000000' + } +} {-549755813888 63} +do_test boundary2-4.42.3 { + db eval { + SELECT r, x FROM t1 WHERE a=63 + } +} {-549755813888 ffffff8000000000} +do_test boundary2-4.42.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.42.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.42.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY r + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.42.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary2-4.42.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.42.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary2-4.42.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.42.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY r + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.42.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary2-4.42.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -549755813888 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.42.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY a + } +} {2 21 44 55 58 64} +do_test boundary2-4.42.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY a DESC + } +} {64 58 55 44 21 2} +do_test boundary2-4.42.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY r + } +} {55 2 64 21 44 58} +do_test boundary2-4.42.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY r DESC + } +} {58 44 21 64 2 55} +do_test boundary2-4.42.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -549755813888 ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary2-4.42.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary2-4.42.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary2-4.42.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY r + } +} {55 2 64 21 44 58 63} +do_test boundary2-4.42.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY r DESC + } +} {63 58 44 21 64 2 55} +do_test boundary2-4.42.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -549755813888 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary2-4.43.1 { + db eval { + SELECT * FROM t1 WHERE r=281474976710655 + } +} {281474976710655 10 0000ffffffffffff} +do_test boundary2-4.43.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000ffffffffffff' + } +} {281474976710655 10} +do_test boundary2-4.43.3 { + db eval { + SELECT r, x FROM t1 WHERE a=10 + } +} {281474976710655 0000ffffffffffff} +do_test boundary2-4.43.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary2-4.43.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary2-4.43.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY r + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-4.43.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary2-4.43.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710655 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-4.43.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary2-4.43.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary2-4.43.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY r + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-4.43.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary2-4.43.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710655 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-4.43.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.43.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.43.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary2-4.43.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY r DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.43.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.43.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.43.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.43.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary2-4.43.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY r DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.43.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710655 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.44.1 { + db eval { + SELECT * FROM t1 WHERE r=4398046511103 + } +} {4398046511103 7 000003ffffffffff} +do_test boundary2-4.44.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000003ffffffffff' + } +} {4398046511103 7} +do_test boundary2-4.44.3 { + db eval { + SELECT r, x FROM t1 WHERE a=7 + } +} {4398046511103 000003ffffffffff} +do_test boundary2-4.44.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-4.44.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-4.44.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY r + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.44.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary2-4.44.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511103 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.44.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-4.44.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary2-4.44.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY r + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.44.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary2-4.44.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511103 ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.44.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.44.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.44.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary2-4.44.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY r DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.44.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.44.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.44.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.44.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary2-4.44.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY r DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.44.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511103 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.45.1 { + db eval { + SELECT * FROM t1 WHERE r=268435455 + } +} {268435455 12 000000000fffffff} +do_test boundary2-4.45.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000fffffff' + } +} {268435455 12} +do_test boundary2-4.45.3 { + db eval { + SELECT r, x FROM t1 WHERE a=12 + } +} {268435455 000000000fffffff} +do_test boundary2-4.45.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.45.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary2-4.45.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY r + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.45.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary2-4.45.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 268435455 ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.45.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.45.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary2-4.45.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY r + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.45.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary2-4.45.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 268435455 ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.45.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.45.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary2-4.45.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary2-4.45.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY r DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.45.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.45.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.45.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary2-4.45.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary2-4.45.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY r DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.45.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 268435455 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.46.1 { + db eval { + SELECT * FROM t1 WHERE r=-9223372036854775808 + } +} {-9223372036854775808 55 8000000000000000} +do_test boundary2-4.46.2 { + db eval { + SELECT r, a FROM t1 WHERE x='8000000000000000' + } +} {-9223372036854775808 55} +do_test boundary2-4.46.3 { + db eval { + SELECT r, x FROM t1 WHERE a=55 + } +} {-9223372036854775808 8000000000000000} +do_test boundary2-4.46.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.46.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.46.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY r + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.46.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary2-4.46.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.46.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.46.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.46.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.46.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.46.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -9223372036854775808 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.46.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY a + } +} {} +do_test boundary2-4.46.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY a DESC + } +} {} +do_test boundary2-4.46.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY r + } +} {} +do_test boundary2-4.46.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY r DESC + } +} {} +do_test boundary2-4.46.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -9223372036854775808 ORDER BY x + } +} {} +do_test boundary2-4.46.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY a + } +} {55} +do_test boundary2-4.46.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY a DESC + } +} {55} +do_test boundary2-4.46.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY r + } +} {55} +do_test boundary2-4.46.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY r DESC + } +} {55} +do_test boundary2-4.46.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -9223372036854775808 ORDER BY x + } +} {55} +do_test boundary2-4.47.1 { + db eval { + SELECT * FROM t1 WHERE r=562949953421312 + } +} {562949953421312 43 0002000000000000} +do_test boundary2-4.47.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0002000000000000' + } +} {562949953421312 43} +do_test boundary2-4.47.3 { + db eval { + SELECT r, x FROM t1 WHERE a=43 + } +} {562949953421312 0002000000000000} +do_test boundary2-4.47.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary2-4.47.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary2-4.47.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY r + } +} {27 45 17 28 3} +do_test boundary2-4.47.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY r DESC + } +} {3 28 17 45 27} +do_test boundary2-4.47.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 562949953421312 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary2-4.47.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY a + } +} {3 17 27 28 43 45} +do_test boundary2-4.47.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY a DESC + } +} {45 43 28 27 17 3} +do_test boundary2-4.47.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY r + } +} {43 27 45 17 28 3} +do_test boundary2-4.47.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY r DESC + } +} {3 28 17 45 27 43} +do_test boundary2-4.47.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 562949953421312 ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary2-4.47.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.47.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.47.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary2-4.47.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY r DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.47.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.47.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.47.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.47.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary2-4.47.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY r DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.47.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 562949953421312 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.48.1 { + db eval { + SELECT * FROM t1 WHERE r=-8388609 + } +} {-8388609 1 ffffffffff7fffff} +do_test boundary2-4.48.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffffff7fffff' + } +} {-8388609 1} +do_test boundary2-4.48.3 { + db eval { + SELECT r, x FROM t1 WHERE a=1 + } +} {-8388609 ffffffffff7fffff} +do_test boundary2-4.48.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.48.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.48.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY r + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.48.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary2-4.48.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary2-4.48.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.48.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.48.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY r + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.48.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary2-4.48.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -8388609 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.48.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary2-4.48.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary2-4.48.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-4.48.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY r DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary2-4.48.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary2-4.48.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary2-4.48.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary2-4.48.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-4.48.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY r DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.48.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -8388609 ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary2-4.49.1 { + db eval { + SELECT * FROM t1 WHERE r=16777215 + } +} {16777215 9 0000000000ffffff} +do_test boundary2-4.49.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000ffffff' + } +} {16777215 9} +do_test boundary2-4.49.3 { + db eval { + SELECT r, x FROM t1 WHERE a=9 + } +} {16777215 0000000000ffffff} +do_test boundary2-4.49.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.49.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary2-4.49.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY r + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.49.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary2-4.49.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16777215 ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.49.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.49.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.49.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY r + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.49.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary2-4.49.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16777215 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.49.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.49.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-4.49.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary2-4.49.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY r DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.49.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.49.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.49.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary2-4.49.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary2-4.49.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY r DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.49.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16777215 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.50.1 { + db eval { + SELECT * FROM t1 WHERE r=8388608 + } +} {8388608 24 0000000000800000} +do_test boundary2-4.50.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000800000' + } +} {8388608 24} +do_test boundary2-4.50.3 { + db eval { + SELECT r, x FROM t1 WHERE a=24 + } +} {8388608 0000000000800000} +do_test boundary2-4.50.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.50.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.50.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY r + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.50.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary2-4.50.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 8388608 ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.50.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary2-4.50.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.50.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY r + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.50.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary2-4.50.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 8388608 ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.50.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.50.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-4.50.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary2-4.50.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY r DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.50.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.50.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.50.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary2-4.50.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary2-4.50.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY r DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.50.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 8388608 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.51.1 { + db eval { + SELECT * FROM t1 WHERE r=16383 + } +} {16383 8 0000000000003fff} +do_test boundary2-4.51.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000003fff' + } +} {16383 8} +do_test boundary2-4.51.3 { + db eval { + SELECT r, x FROM t1 WHERE a=8 + } +} {16383 0000000000003fff} +do_test boundary2-4.51.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.51.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.51.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY r + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.51.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary2-4.51.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 16383 ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.51.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.51.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.51.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY r + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.51.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary2-4.51.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 16383 ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.51.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.51.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary2-4.51.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary2-4.51.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY r DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.51.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.51.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.51.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary2-4.51.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary2-4.51.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY r DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.51.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 16383 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.52.1 { + db eval { + SELECT * FROM t1 WHERE r=140737488355328 + } +} {140737488355328 34 0000800000000000} +do_test boundary2-4.52.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000800000000000' + } +} {140737488355328 34} +do_test boundary2-4.52.3 { + db eval { + SELECT r, x FROM t1 WHERE a=34 + } +} {140737488355328 0000800000000000} +do_test boundary2-4.52.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary2-4.52.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary2-4.52.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY r + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-4.52.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary2-4.52.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355328 ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary2-4.52.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary2-4.52.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary2-4.52.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY r + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.52.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary2-4.52.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355328 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.52.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.52.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.52.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary2-4.52.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY r DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.52.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.52.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.52.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.52.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary2-4.52.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY r DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.52.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355328 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.53.1 { + db eval { + SELECT * FROM t1 WHERE r=2097151 + } +} {2097151 15 00000000001fffff} +do_test boundary2-4.53.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00000000001fffff' + } +} {2097151 15} +do_test boundary2-4.53.3 { + db eval { + SELECT r, x FROM t1 WHERE a=15 + } +} {2097151 00000000001fffff} +do_test boundary2-4.53.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-4.53.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary2-4.53.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY r + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.53.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary2-4.53.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 2097151 ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.53.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary2-4.53.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.53.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY r + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.53.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary2-4.53.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 2097151 ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.53.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.53.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.53.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary2-4.53.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY r DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.53.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.53.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary2-4.53.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary2-4.53.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary2-4.53.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY r DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.53.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 2097151 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.54.1 { + db eval { + SELECT * FROM t1 WHERE r=140737488355327 + } +} {140737488355327 25 00007fffffffffff} +do_test boundary2-4.54.2 { + db eval { + SELECT r, a FROM t1 WHERE x='00007fffffffffff' + } +} {140737488355327 25} +do_test boundary2-4.54.3 { + db eval { + SELECT r, x FROM t1 WHERE a=25 + } +} {140737488355327 00007fffffffffff} +do_test boundary2-4.54.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary2-4.54.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary2-4.54.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY r + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.54.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary2-4.54.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 140737488355327 ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.54.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary2-4.54.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-4.54.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY r + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.54.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary2-4.54.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 140737488355327 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.54.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.54.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.54.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary2-4.54.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY r DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.54.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.54.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.54.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.54.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary2-4.54.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY r DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.54.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 140737488355327 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.55.1 { + db eval { + SELECT * FROM t1 WHERE r=281474976710656 + } +} {281474976710656 26 0001000000000000} +do_test boundary2-4.55.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0001000000000000' + } +} {281474976710656 26} +do_test boundary2-4.55.3 { + db eval { + SELECT r, x FROM t1 WHERE a=26 + } +} {281474976710656 0001000000000000} +do_test boundary2-4.55.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY a + } +} {3 13 17 27 28 43 45} +do_test boundary2-4.55.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary2-4.55.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY r + } +} {13 43 27 45 17 28 3} +do_test boundary2-4.55.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY r DESC + } +} {3 28 17 45 27 43 13} +do_test boundary2-4.55.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 281474976710656 ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary2-4.55.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY a + } +} {3 13 17 26 27 28 43 45} +do_test boundary2-4.55.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary2-4.55.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY r + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-4.55.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary2-4.55.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 281474976710656 ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary2-4.55.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.55.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.55.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary2-4.55.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY r DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.55.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.55.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.55.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.55.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary2-4.55.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY r DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.55.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 281474976710656 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.56.1 { + db eval { + SELECT * FROM t1 WHERE r=32767 + } +} {32767 23 0000000000007fff} +do_test boundary2-4.56.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000007fff' + } +} {32767 23} +do_test boundary2-4.56.3 { + db eval { + SELECT r, x FROM t1 WHERE a=23 + } +} {32767 0000000000007fff} +do_test boundary2-4.56.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.56.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.56.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY r + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.56.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary2-4.56.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 32767 ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.56.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary2-4.56.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary2-4.56.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY r + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.56.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary2-4.56.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 32767 ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.56.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.56.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary2-4.56.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary2-4.56.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY r DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.56.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.56.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary2-4.56.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary2-4.56.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary2-4.56.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY r DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.56.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 32767 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.57.1 { + db eval { + SELECT * FROM t1 WHERE r=127 + } +} {127 4 000000000000007f} +do_test boundary2-4.57.2 { + db eval { + SELECT r, a FROM t1 WHERE x='000000000000007f' + } +} {127 4} +do_test boundary2-4.57.3 { + db eval { + SELECT r, x FROM t1 WHERE a=4 + } +} {127 000000000000007f} +do_test boundary2-4.57.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.57.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary2-4.57.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY r + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.57.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary2-4.57.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 127 ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.57.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.57.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-4.57.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY r + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.57.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary2-4.57.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 127 ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.57.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.57.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary2-4.57.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary2-4.57.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY r DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.57.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 127 ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.57.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.57.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary2-4.57.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary2-4.57.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY r DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.57.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 127 ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.58.1 { + db eval { + SELECT * FROM t1 WHERE r=36028797018963967 + } +} {36028797018963967 27 007fffffffffffff} +do_test boundary2-4.58.2 { + db eval { + SELECT r, a FROM t1 WHERE x='007fffffffffffff' + } +} {36028797018963967 27} +do_test boundary2-4.58.3 { + db eval { + SELECT r, x FROM t1 WHERE a=27 + } +} {36028797018963967 007fffffffffffff} +do_test boundary2-4.58.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY a + } +} {3 17 28 45} +do_test boundary2-4.58.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary2-4.58.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY r + } +} {45 17 28 3} +do_test boundary2-4.58.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY r DESC + } +} {3 28 17 45} +do_test boundary2-4.58.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963967 ORDER BY x + } +} {45 17 28 3} +do_test boundary2-4.58.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY a + } +} {3 17 27 28 45} +do_test boundary2-4.58.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY a DESC + } +} {45 28 27 17 3} +do_test boundary2-4.58.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY r + } +} {27 45 17 28 3} +do_test boundary2-4.58.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY r DESC + } +} {3 28 17 45 27} +do_test boundary2-4.58.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963967 ORDER BY x + } +} {27 45 17 28 3} +do_test boundary2-4.58.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.58.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.58.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary2-4.58.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY r DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.58.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.58.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.58.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.58.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary2-4.58.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY r DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.58.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963967 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.59.1 { + db eval { + SELECT * FROM t1 WHERE r=4398046511104 + } +} {4398046511104 56 0000040000000000} +do_test boundary2-4.59.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000040000000000' + } +} {4398046511104 56} +do_test boundary2-4.59.3 { + db eval { + SELECT r, x FROM t1 WHERE a=56 + } +} {4398046511104 0000040000000000} +do_test boundary2-4.59.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary2-4.59.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-4.59.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY r + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.59.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary2-4.59.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 4398046511104 ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.59.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary2-4.59.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary2-4.59.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY r + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.59.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary2-4.59.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 4398046511104 ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.59.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary2-4.59.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.59.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary2-4.59.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY r DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.59.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.59.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.59.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary2-4.59.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary2-4.59.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY r DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.59.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 4398046511104 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.60.1 { + db eval { + SELECT * FROM t1 WHERE r=1 + } +} {1 60 0000000000000001} +do_test boundary2-4.60.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000001' + } +} {1 60} +do_test boundary2-4.60.3 { + db eval { + SELECT r, x FROM t1 WHERE a=60 + } +} {1 0000000000000001} +do_test boundary2-4.60.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.60.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.60.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY r + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.60.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary2-4.60.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 1 ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.60.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary2-4.60.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.60.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY r + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.60.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary2-4.60.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 1 ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.60.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary2-4.60.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.60.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary2-4.60.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY r DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.60.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 1 ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.60.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.60.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.60.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary2-4.60.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY r DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.60.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 1 ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.61.1 { + db eval { + SELECT * FROM t1 WHERE r=36028797018963968 + } +} {36028797018963968 45 0080000000000000} +do_test boundary2-4.61.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0080000000000000' + } +} {36028797018963968 45} +do_test boundary2-4.61.3 { + db eval { + SELECT r, x FROM t1 WHERE a=45 + } +} {36028797018963968 0080000000000000} +do_test boundary2-4.61.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY a + } +} {3 17 28} +do_test boundary2-4.61.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY a DESC + } +} {28 17 3} +do_test boundary2-4.61.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY r + } +} {17 28 3} +do_test boundary2-4.61.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY r DESC + } +} {3 28 17} +do_test boundary2-4.61.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 36028797018963968 ORDER BY x + } +} {17 28 3} +do_test boundary2-4.61.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY a + } +} {3 17 28 45} +do_test boundary2-4.61.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY a DESC + } +} {45 28 17 3} +do_test boundary2-4.61.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY r + } +} {45 17 28 3} +do_test boundary2-4.61.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY r DESC + } +} {3 28 17 45} +do_test boundary2-4.61.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 36028797018963968 ORDER BY x + } +} {45 17 28 3} +do_test boundary2-4.61.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.61.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.61.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary2-4.61.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY r DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.61.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.61.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.61.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary2-4.61.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary2-4.61.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY r DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.61.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 36028797018963968 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.62.1 { + db eval { + SELECT * FROM t1 WHERE r=-2147483649 + } +} {-2147483649 47 ffffffff7fffffff} +do_test boundary2-4.62.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ffffffff7fffffff' + } +} {-2147483649 47} +do_test boundary2-4.62.3 { + db eval { + SELECT r, x FROM t1 WHERE a=47 + } +} {-2147483649 ffffffff7fffffff} +do_test boundary2-4.62.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.62.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.62.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY r + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.62.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary2-4.62.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.62.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary2-4.62.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.62.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY r + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.62.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary2-4.62.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -2147483649 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.62.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY a + } +} {2 21 44 55 58 63 64} +do_test boundary2-4.62.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary2-4.62.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY r + } +} {55 2 64 21 44 58 63} +do_test boundary2-4.62.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY r DESC + } +} {63 58 44 21 64 2 55} +do_test boundary2-4.62.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary2-4.62.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY a + } +} {2 21 44 47 55 58 63 64} +do_test boundary2-4.62.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary2-4.62.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY r + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-4.62.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY r DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary2-4.62.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -2147483649 ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary2-4.63.1 { + db eval { + SELECT * FROM t1 WHERE r=-36028797018963969 + } +} {-36028797018963969 2 ff7fffffffffffff} +do_test boundary2-4.63.2 { + db eval { + SELECT r, a FROM t1 WHERE x='ff7fffffffffffff' + } +} {-36028797018963969 2} +do_test boundary2-4.63.3 { + db eval { + SELECT r, x FROM t1 WHERE a=2 + } +} {-36028797018963969 ff7fffffffffffff} +do_test boundary2-4.63.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.63.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary2-4.63.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY r + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.63.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary2-4.63.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.63.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.63.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.63.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY r + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.63.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary2-4.63.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -36028797018963969 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.63.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY a + } +} {55} +do_test boundary2-4.63.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY a DESC + } +} {55} +do_test boundary2-4.63.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY r + } +} {55} +do_test boundary2-4.63.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY r DESC + } +} {55} +do_test boundary2-4.63.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -36028797018963969 ORDER BY x + } +} {55} +do_test boundary2-4.63.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY a + } +} {2 55} +do_test boundary2-4.63.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY a DESC + } +} {55 2} +do_test boundary2-4.63.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY r + } +} {55 2} +do_test boundary2-4.63.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY r DESC + } +} {2 55} +do_test boundary2-4.63.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -36028797018963969 ORDER BY x + } +} {55 2} +do_test boundary2-4.64.1 { + db eval { + SELECT * FROM t1 WHERE r=3 + } +} {3 5 0000000000000003} +do_test boundary2-4.64.2 { + db eval { + SELECT r, a FROM t1 WHERE x='0000000000000003' + } +} {3 5} +do_test boundary2-4.64.3 { + db eval { + SELECT r, x FROM t1 WHERE a=5 + } +} {3 0000000000000003} +do_test boundary2-4.64.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.64.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary2-4.64.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY r + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.64.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary2-4.64.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 3 ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.64.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary2-4.64.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary2-4.64.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY r + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.64.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary2-4.64.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 3 ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.64.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.64.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary2-4.64.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary2-4.64.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY r DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.64.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 3 ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.64.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary2-4.64.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary2-4.64.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary2-4.64.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY r DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.64.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 3 ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.65.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-4.65.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-4.65.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-4.65.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-4.65.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-4.65.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-4.65.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-4.65.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-4.65.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-4.65.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= 9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-4.65.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.65.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.65.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.65.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.65.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.65.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.65.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.65.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.65.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.65.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= 9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.66.gt.1 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.66.gt.2 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.66.gt.3 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.66.gt.4 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.66.gt.5 { + db eval { + SELECT a FROM t1 WHERE r > -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.66.ge.1 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary2-4.66.ge.2 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary2-4.66.ge.3 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY r + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary2-4.66.ge.4 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY r DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary2-4.66.ge.5 { + db eval { + SELECT a FROM t1 WHERE r >= -9.22337303685477580800e+18 ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary2-4.66.lt.1 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-4.66.lt.2 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-4.66.lt.3 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-4.66.lt.4 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-4.66.lt.5 { + db eval { + SELECT a FROM t1 WHERE r < -9.22337303685477580800e+18 ORDER BY x + } +} {} +do_test boundary2-4.66.le.1 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY a + } +} {} +do_test boundary2-4.66.le.2 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY a DESC + } +} {} +do_test boundary2-4.66.le.3 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY r + } +} {} +do_test boundary2-4.66.le.4 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY r DESC + } +} {} +do_test boundary2-4.66.le.5 { + db eval { + SELECT a FROM t1 WHERE r <= -9.22337303685477580800e+18 ORDER BY x + } +} {} +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary3.tcl --- sqlite3-3.4.2/test/boundary3.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary3.tcl 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,290 @@ +puts {# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary3.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } +} + +expr srand(0) + +# Generate interesting boundary numbers +# +foreach x { + 0 + 1 + 0x7f + 0x7fff + 0x7fffff + 0x7fffffff + 0x7fffffffff + 0x7fffffffffff + 0x7fffffffffffff + 0x7fffffffffffffff +} { + set x [expr {wide($x)}] + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set boundarynum([expr {-($x+1)}]) 1 + set boundarynum([expr {-($x+2)}]) 1 + set boundarynum([expr {$x+$x+1}]) 1 + set boundarynum([expr {$x+$x+2}]) 1 +} +set x [expr {wide(127)}] +for {set i 1} {$i<=9} {incr i} { + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set x [expr {wide($x*128 + 127)}] +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# A simple selection sort. Not trying to be efficient. +# +proc sort {inlist} { + set outlist {} + set mn [lindex $inlist 0] + foreach x $inlist { + if {$x<$mn} {set mn $x} + } + set outlist $mn + set mx $mn + while {1} { + set valid 0 + foreach x $inlist { + if {$x>$mx && (!$valid || $mn>$x)} { + set mn $x + set valid 1 + } + } + if {!$valid} break + lappend outlist $mn + set mx $mn + } + return $outlist +} + +# Reverse the order of a list +# +proc reverse {inlist} { + set i [llength $inlist] + set outlist {} + for {incr i -1} {$i>=0} {incr i -1} { + lappend outlist [lindex $inlist $i] + } + return $outlist +} + +set nums1 [scramble [array names boundarynum]] +set nums2 [scramble [array names boundarynum]] + +set tname boundary3 +puts "do_test $tname-1.1 \173" +puts " db eval \173" +puts " CREATE TABLE t1(a,x);" +set a 0 +foreach r $nums1 { + incr a + set t1ra($r) $a + set t1ar($a) $r + set x [format %08x%08x [expr {wide($r)>>32}] $r] + set t1rx($r) $x + set t1xr($x) $r + puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');" +} +puts " CREATE INDEX t1i1 ON t1(a);" +puts " CREATE INDEX t1i2 ON t1(x);" +puts " \175" +puts "\175 {}" + +puts "do_test $tname-1.2 \173" +puts " db eval \173" +puts " SELECT count(*) FROM t1" +puts " \175" +puts "\175 {64}" + +puts "do_test $tname-1.3 \173" +puts " db eval \173" +puts " CREATE TABLE t2(r,a);" +puts " INSERT INTO t2 SELECT rowid, a FROM t1;" +puts " CREATE INDEX t2i1 ON t2(r);" +puts " CREATE INDEX t2i2 ON t2(a);" +puts " INSERT INTO t2 VALUES(9.22337303685477580800e+18,65);" +set t1ra(9.22337303685477580800e+18) 65 +set t1ar(65) 9.22337303685477580800e+18) +puts " INSERT INTO t2 VALUES(-9.22337303685477580800e+18,66);" +set t1ra(-9.22337303685477580800e+18) 66 +set t1ar(66) -9.22337303685477580800e+18) +puts " SELECT count(*) FROM t2;" +puts " \175" +puts "\175 {66}" + +set nums3 $nums2 +lappend nums3 9.22337303685477580800e+18 +lappend nums3 -9.22337303685477580800e+18 + +set i 0 +foreach r $nums3 { + incr i + + set r5 $r.5 + set r0 $r.0 + if {abs($r)<9.22337203685477580800e+18} { + set x $t1rx($r) + set a $t1ra($r) + puts "do_test $tname-2.$i.1 \173" + puts " db eval \173" + puts " SELECT t1.* FROM t1, t2 WHERE t1.rowid=$r AND t2.a=t1.a" + puts " \175" + puts "\175 {$a $x}" + puts "do_test $tname-2.$i.2 \173" + puts " db eval \173" + puts " SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='$x'" + puts " \175" + puts "\175 {$r $a}" + puts "do_test $tname-2.$i.3 \173" + puts " db eval \173" + puts " SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=$a" + puts " \175" + puts "\175 {$r $x}" + } + + foreach op {> >= < <=} subno {gt ge lt le} { + + ################################################################ 2.x.y.1 + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r" { + lappend rset $rx + lappend aset $t1ra($rx) + } + } + puts "do_test $tname-2.$i.$subno.1 \173" + puts " db eval \173" + puts " SELECT t2.a FROM t1 JOIN t2 USING(a)" + puts " WHERE t1.rowid $op $r ORDER BY t2.a" + puts " \175" + puts "\175 {[sort $aset]}" + + ################################################################ 2.x.y.2 + puts "do_test $tname-2.$i.$subno.2 \173" + puts " db eval \173" + puts " SELECT t2.a FROM t2 NATURAL JOIN t1" + puts " WHERE t1.rowid $op $r ORDER BY t1.a DESC" + puts " \175" + puts "\175 {[reverse [sort $aset]]}" + + + ################################################################ 2.x.y.3 + set ax $t1ra($r) + set aset {} + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.3 \173" + puts " db eval \173" + puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r" + puts " WHERE t2.a=$ax" + puts " ORDER BY t1.rowid" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.4 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.4 \173" + puts " db eval \173" + puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r" + puts " WHERE t2.a=$ax" + puts " ORDER BY t1.rowid DESC" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.5 + set aset {} + set xset {} + foreach rx $rset { + lappend xset $t1rx($rx) + } + foreach x [sort $xset] { + set rx $t1xr($x) + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.5 \173" + puts " db eval \173" + puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op t2.r" + puts " WHERE t2.a=$ax" + puts " ORDER BY x" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.10 + if {[string length $r5]>15} continue + set rset {} + set aset {} + foreach rx $nums2 { + if "\$rx $op \$r0" { + lappend rset $rx + } + } + foreach rx [sort $rset] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.10 \173" + puts " db eval \173" + puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op CAST(t2.r AS real)" + puts " WHERE t2.a=$ax" + puts " ORDER BY t1.rowid" + puts " \175" + puts "\175 {$aset}" + + ################################################################ 2.x.y.11 + set aset {} + foreach rx [reverse [sort $rset]] { + lappend aset $t1ra($rx) + } + puts "do_test $tname-2.$i.$subno.11 \173" + puts " db eval \173" + puts " SELECT t1.a FROM t1 JOIN t2 ON t1.rowid $op CAST(t2.r AS real)" + puts " WHERE t2.a=$ax" + puts " ORDER BY t1.rowid DESC" + puts " \175" + puts "\175 {$aset}" + } + +} + + +puts {finish_test} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary3.test --- sqlite3-3.4.2/test/boundary3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary3.test 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,12456 @@ +# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary3.test,v 1.2 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } + +do_test boundary3-1.1 { + db eval { + CREATE TABLE t1(a,x); + INSERT INTO t1(oid,a,x) VALUES(-8388609,1,'ffffffffff7fffff'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963969,2,'ff7fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(9223372036854775807,3,'7fffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(127,4,'000000000000007f'); + INSERT INTO t1(oid,a,x) VALUES(3,5,'0000000000000003'); + INSERT INTO t1(oid,a,x) VALUES(16777216,6,'0000000001000000'); + INSERT INTO t1(oid,a,x) VALUES(4398046511103,7,'000003ffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(16383,8,'0000000000003fff'); + INSERT INTO t1(oid,a,x) VALUES(16777215,9,'0000000000ffffff'); + INSERT INTO t1(oid,a,x) VALUES(281474976710655,10,'0000ffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483648,11,'ffffffff80000000'); + INSERT INTO t1(oid,a,x) VALUES(268435455,12,'000000000fffffff'); + INSERT INTO t1(oid,a,x) VALUES(562949953421311,13,'0001ffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(4294967295,14,'00000000ffffffff'); + INSERT INTO t1(oid,a,x) VALUES(2097151,15,'00000000001fffff'); + INSERT INTO t1(oid,a,x) VALUES(16384,16,'0000000000004000'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927935,17,'00ffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(8388607,18,'00000000007fffff'); + INSERT INTO t1(oid,a,x) VALUES(1099511627776,19,'0000010000000000'); + INSERT INTO t1(oid,a,x) VALUES(2147483647,20,'000000007fffffff'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355329,21,'ffff7fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(34359738368,22,'0000000800000000'); + INSERT INTO t1(oid,a,x) VALUES(32767,23,'0000000000007fff'); + INSERT INTO t1(oid,a,x) VALUES(8388608,24,'0000000000800000'); + INSERT INTO t1(oid,a,x) VALUES(140737488355327,25,'00007fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(281474976710656,26,'0001000000000000'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963967,27,'007fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927936,28,'0100000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-32769,29,'ffffffffffff7fff'); + INSERT INTO t1(oid,a,x) VALUES(255,30,'00000000000000ff'); + INSERT INTO t1(oid,a,x) VALUES(4,31,'0000000000000004'); + INSERT INTO t1(oid,a,x) VALUES(-32768,32,'ffffffffffff8000'); + INSERT INTO t1(oid,a,x) VALUES(-2,33,'fffffffffffffffe'); + INSERT INTO t1(oid,a,x) VALUES(140737488355328,34,'0000800000000000'); + INSERT INTO t1(oid,a,x) VALUES(549755813888,35,'0000008000000000'); + INSERT INTO t1(oid,a,x) VALUES(4294967296,36,'0000000100000000'); + INSERT INTO t1(oid,a,x) VALUES(-8388608,37,'ffffffffff800000'); + INSERT INTO t1(oid,a,x) VALUES(-1,38,'ffffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(34359738367,39,'00000007ffffffff'); + INSERT INTO t1(oid,a,x) VALUES(268435456,40,'0000000010000000'); + INSERT INTO t1(oid,a,x) VALUES(2,41,'0000000000000002'); + INSERT INTO t1(oid,a,x) VALUES(2097152,42,'0000000000200000'); + INSERT INTO t1(oid,a,x) VALUES(562949953421312,43,'0002000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355328,44,'ffff800000000000'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963968,45,'0080000000000000'); + INSERT INTO t1(oid,a,x) VALUES(549755813887,46,'0000007fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483649,47,'ffffffff7fffffff'); + INSERT INTO t1(oid,a,x) VALUES(65535,48,'000000000000ffff'); + INSERT INTO t1(oid,a,x) VALUES(128,49,'0000000000000080'); + INSERT INTO t1(oid,a,x) VALUES(32768,50,'0000000000008000'); + INSERT INTO t1(oid,a,x) VALUES(2147483648,51,'0000000080000000'); + INSERT INTO t1(oid,a,x) VALUES(-3,52,'fffffffffffffffd'); + INSERT INTO t1(oid,a,x) VALUES(-128,53,'ffffffffffffff80'); + INSERT INTO t1(oid,a,x) VALUES(-129,54,'ffffffffffffff7f'); + INSERT INTO t1(oid,a,x) VALUES(-9223372036854775808,55,'8000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(4398046511104,56,'0000040000000000'); + INSERT INTO t1(oid,a,x) VALUES(1099511627775,57,'000000ffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-549755813889,58,'ffffff7fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(0,59,'0000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(1,60,'0000000000000001'); + INSERT INTO t1(oid,a,x) VALUES(256,61,'0000000000000100'); + INSERT INTO t1(oid,a,x) VALUES(65536,62,'0000000000010000'); + INSERT INTO t1(oid,a,x) VALUES(-549755813888,63,'ffffff8000000000'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963968,64,'ff80000000000000'); + CREATE INDEX t1i1 ON t1(a); + CREATE INDEX t1i2 ON t1(x); + } +} {} +do_test boundary3-1.2 { + db eval { + SELECT count(*) FROM t1 + } +} {64} +do_test boundary3-1.3 { + db eval { + CREATE TABLE t2(r,a); + INSERT INTO t2 SELECT rowid, a FROM t1; + CREATE INDEX t2i1 ON t2(r); + CREATE INDEX t2i2 ON t2(a); + INSERT INTO t2 VALUES(9.22337303685477580800e+18,65); + INSERT INTO t2 VALUES(-9.22337303685477580800e+18,66); + SELECT count(*) FROM t2; + } +} {66} +do_test boundary3-2.1.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=72057594037927935 AND t2.a=t1.a + } +} {17 00ffffffffffffff} +do_test boundary3-2.1.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00ffffffffffffff' + } +} {72057594037927935 17} +do_test boundary3-2.1.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=17 + } +} {72057594037927935 00ffffffffffffff} +do_test boundary3-2.1.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 72057594037927935 ORDER BY t2.a + } +} {3 28} +do_test boundary3-2.1.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 72057594037927935 ORDER BY t1.a DESC + } +} {28 3} +do_test boundary3-2.1.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=17 + ORDER BY t1.rowid + } +} {28 3} +do_test boundary3-2.1.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=17 + ORDER BY t1.rowid DESC + } +} {3 28} +do_test boundary3-2.1.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=17 + ORDER BY x + } +} {28 3} +do_test boundary3-2.1.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 72057594037927935 ORDER BY t2.a + } +} {3 17 28} +do_test boundary3-2.1.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 72057594037927935 ORDER BY t1.a DESC + } +} {28 17 3} +do_test boundary3-2.1.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=17 + ORDER BY t1.rowid + } +} {17 28 3} +do_test boundary3-2.1.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=17 + ORDER BY t1.rowid DESC + } +} {3 28 17} +do_test boundary3-2.1.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=17 + ORDER BY x + } +} {17 28 3} +do_test boundary3-2.1.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 72057594037927935 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.1.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 72057594037927935 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.1.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=17 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary3-2.1.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=17 + ORDER BY t1.rowid DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.1.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=17 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.1.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 72057594037927935 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.1.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 72057594037927935 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.1.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=17 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary3-2.1.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=17 + ORDER BY t1.rowid DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.1.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=17 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.2.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=16384 AND t2.a=t1.a + } +} {16 0000000000004000} +do_test boundary3-2.2.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000004000' + } +} {16384 16} +do_test boundary3-2.2.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=16 + } +} {16384 0000000000004000} +do_test boundary3-2.2.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 16384 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.2.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 16384 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.2.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary3-2.2.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=16 + ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary3-2.2.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 16384 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.2.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 16384 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.2.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary3-2.2.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=16 + ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.2.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary3-2.2.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 16384 ORDER BY t2.a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.2.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 16384 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary3-2.2.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary3-2.2.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.2.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=16 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.2.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary3-2.2.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.2.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 16384 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.2.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 16384 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary3-2.2.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary3-2.2.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.2.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=16 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.2.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary3-2.2.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=16 + ORDER BY t1.rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.3.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=4294967296 AND t2.a=t1.a + } +} {36 0000000100000000} +do_test boundary3-2.3.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000100000000' + } +} {4294967296 36} +do_test boundary3-2.3.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=36 + } +} {4294967296 0000000100000000} +do_test boundary3-2.3.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 4294967296 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary3-2.3.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 4294967296 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.3.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary3-2.3.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=36 + ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary3-2.3.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 4294967296 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary3-2.3.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 4294967296 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.3.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary3-2.3.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=36 + ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.3.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary3-2.3.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 4294967296 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.3.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 4294967296 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.3.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary3-2.3.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.3.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=36 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.3.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary3-2.3.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.3.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 4294967296 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.3.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 4294967296 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.3.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary3-2.3.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.3.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=36 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.3.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary3-2.3.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=36 + ORDER BY t1.rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.4.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=16777216 AND t2.a=t1.a + } +} {6 0000000001000000} +do_test boundary3-2.4.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000001000000' + } +} {16777216 6} +do_test boundary3-2.4.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=6 + } +} {16777216 0000000001000000} +do_test boundary3-2.4.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 16777216 ORDER BY t2.a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.4.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 16777216 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary3-2.4.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary3-2.4.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=6 + ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary3-2.4.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 16777216 ORDER BY t2.a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.4.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 16777216 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary3-2.4.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary3-2.4.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=6 + ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.4.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary3-2.4.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 16777216 ORDER BY t2.a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.4.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 16777216 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary3-2.4.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary3-2.4.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.4.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=6 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.4.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary3-2.4.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.4.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 16777216 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.4.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 16777216 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary3-2.4.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary3-2.4.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.4.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=6 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.4.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary3-2.4.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=6 + ORDER BY t1.rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.5.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-32769 AND t2.a=t1.a + } +} {29 ffffffffffff7fff} +do_test boundary3-2.5.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffffff7fff' + } +} {-32769 29} +do_test boundary3-2.5.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=29 + } +} {-32769 ffffffffffff7fff} +do_test boundary3-2.5.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -32769 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.5.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -32769 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.5.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.5.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary3-2.5.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=29 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary3-2.5.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.5.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary3-2.5.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -32769 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.5.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -32769 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.5.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.5.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary3-2.5.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=29 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary3-2.5.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.5.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary3-2.5.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -32769 ORDER BY t2.a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary3-2.5.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -32769 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary3-2.5.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.5.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.5.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=29 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.5.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.5.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.5.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -32769 ORDER BY t2.a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary3-2.5.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -32769 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary3-2.5.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.5.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.5.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=29 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.5.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.5.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=29 + ORDER BY t1.rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.6.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-140737488355329 AND t2.a=t1.a + } +} {21 ffff7fffffffffff} +do_test boundary3-2.6.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffff7fffffffffff' + } +} {-140737488355329 21} +do_test boundary3-2.6.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=21 + } +} {-140737488355329 ffff7fffffffffff} +do_test boundary3-2.6.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -140737488355329 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.6.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -140737488355329 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.6.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=21 + ORDER BY t1.rowid + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.6.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=21 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary3-2.6.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=21 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.6.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -140737488355329 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.6.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -140737488355329 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.6.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=21 + ORDER BY t1.rowid + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.6.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=21 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary3-2.6.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=21 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.6.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -140737488355329 ORDER BY t2.a + } +} {2 55 64} +do_test boundary3-2.6.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -140737488355329 ORDER BY t1.a DESC + } +} {64 55 2} +do_test boundary3-2.6.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=21 + ORDER BY t1.rowid + } +} {55 2 64} +do_test boundary3-2.6.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=21 + ORDER BY t1.rowid DESC + } +} {64 2 55} +do_test boundary3-2.6.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=21 + ORDER BY x + } +} {55 2 64} +do_test boundary3-2.6.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -140737488355329 ORDER BY t2.a + } +} {2 21 55 64} +do_test boundary3-2.6.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -140737488355329 ORDER BY t1.a DESC + } +} {64 55 21 2} +do_test boundary3-2.6.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=21 + ORDER BY t1.rowid + } +} {55 2 64 21} +do_test boundary3-2.6.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=21 + ORDER BY t1.rowid DESC + } +} {21 64 2 55} +do_test boundary3-2.6.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=21 + ORDER BY x + } +} {55 2 64 21} +do_test boundary3-2.7.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=2 AND t2.a=t1.a + } +} {41 0000000000000002} +do_test boundary3-2.7.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000002' + } +} {2 41} +do_test boundary3-2.7.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=41 + } +} {2 0000000000000002} +do_test boundary3-2.7.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 2 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.7.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 2 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.7.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary3-2.7.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=41 + ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary3-2.7.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 2 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.7.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 2 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.7.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary3-2.7.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=41 + ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.7.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary3-2.7.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 2 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.7.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 2 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.7.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary3-2.7.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.7.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=41 + ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.7.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary3-2.7.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.7.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 2 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.7.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 2 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.7.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary3-2.7.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.7.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=41 + ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.7.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary3-2.7.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=41 + ORDER BY t1.rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.8.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=4 AND t2.a=t1.a + } +} {31 0000000000000004} +do_test boundary3-2.8.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000004' + } +} {4 31} +do_test boundary3-2.8.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=31 + } +} {4 0000000000000004} +do_test boundary3-2.8.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 4 ORDER BY t2.a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.8.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 4 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary3-2.8.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary3-2.8.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=31 + ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary3-2.8.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 4 ORDER BY t2.a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.8.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 4 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary3-2.8.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary3-2.8.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=31 + ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.8.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary3-2.8.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 4 ORDER BY t2.a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.8.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 4 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary3-2.8.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary3-2.8.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.8.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=31 + ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.8.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary3-2.8.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.8.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 4 ORDER BY t2.a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.8.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 4 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary3-2.8.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary3-2.8.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.8.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=31 + ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.8.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary3-2.8.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=31 + ORDER BY t1.rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.9.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=562949953421311 AND t2.a=t1.a + } +} {13 0001ffffffffffff} +do_test boundary3-2.9.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0001ffffffffffff' + } +} {562949953421311 13} +do_test boundary3-2.9.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=13 + } +} {562949953421311 0001ffffffffffff} +do_test boundary3-2.9.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 562949953421311 ORDER BY t2.a + } +} {3 17 27 28 43 45} +do_test boundary3-2.9.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 562949953421311 ORDER BY t1.a DESC + } +} {45 43 28 27 17 3} +do_test boundary3-2.9.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=13 + ORDER BY t1.rowid + } +} {43 27 45 17 28 3} +do_test boundary3-2.9.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=13 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43} +do_test boundary3-2.9.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=13 + ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary3-2.9.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 562949953421311 ORDER BY t2.a + } +} {3 13 17 27 28 43 45} +do_test boundary3-2.9.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 562949953421311 ORDER BY t1.a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary3-2.9.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=13 + ORDER BY t1.rowid + } +} {13 43 27 45 17 28 3} +do_test boundary3-2.9.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=13 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13} +do_test boundary3-2.9.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=13 + ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary3-2.9.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 562949953421311 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.9.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 562949953421311 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.9.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=13 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary3-2.9.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=13 + ORDER BY t1.rowid DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.9.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=13 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.9.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 562949953421311 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.9.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 562949953421311 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.9.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=13 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary3-2.9.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=13 + ORDER BY t1.rowid DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.9.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=13 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.10.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=256 AND t2.a=t1.a + } +} {61 0000000000000100} +do_test boundary3-2.10.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000100' + } +} {256 61} +do_test boundary3-2.10.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=61 + } +} {256 0000000000000100} +do_test boundary3-2.10.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 256 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.10.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 256 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.10.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary3-2.10.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=61 + ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary3-2.10.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 256 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary3-2.10.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 256 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.10.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary3-2.10.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=61 + ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.10.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary3-2.10.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 256 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.10.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 256 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary3-2.10.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary3-2.10.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.10.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=61 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.10.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary3-2.10.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.10.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 256 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.10.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 256 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary3-2.10.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary3-2.10.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.10.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=61 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.10.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary3-2.10.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=61 + ORDER BY t1.rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.11.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=34359738368 AND t2.a=t1.a + } +} {22 0000000800000000} +do_test boundary3-2.11.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000800000000' + } +} {34359738368 22} +do_test boundary3-2.11.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=22 + } +} {34359738368 0000000800000000} +do_test boundary3-2.11.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 34359738368 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary3-2.11.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 34359738368 ORDER BY t1.a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.11.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary3-2.11.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=22 + ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary3-2.11.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 34359738368 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary3-2.11.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 34359738368 ORDER BY t1.a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.11.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary3-2.11.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=22 + ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.11.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary3-2.11.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 34359738368 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.11.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 34359738368 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.11.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary3-2.11.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.11.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=22 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.11.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary3-2.11.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.11.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 34359738368 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.11.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 34359738368 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.11.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary3-2.11.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.11.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=22 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.11.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary3-2.11.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=22 + ORDER BY t1.rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.12.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=65536 AND t2.a=t1.a + } +} {62 0000000000010000} +do_test boundary3-2.12.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000010000' + } +} {65536 62} +do_test boundary3-2.12.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=62 + } +} {65536 0000000000010000} +do_test boundary3-2.12.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 65536 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary3-2.12.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 65536 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.12.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary3-2.12.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=62 + ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary3-2.12.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 65536 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary3-2.12.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 65536 ORDER BY t1.a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.12.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary3-2.12.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=62 + ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.12.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary3-2.12.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 65536 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.12.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 65536 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.12.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary3-2.12.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.12.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=62 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.12.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary3-2.12.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.12.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 65536 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.12.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 65536 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.12.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary3-2.12.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.12.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=62 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.12.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary3-2.12.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=62 + ORDER BY t1.rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.13.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=268435456 AND t2.a=t1.a + } +} {40 0000000010000000} +do_test boundary3-2.13.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000010000000' + } +} {268435456 40} +do_test boundary3-2.13.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=40 + } +} {268435456 0000000010000000} +do_test boundary3-2.13.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 268435456 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary3-2.13.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 268435456 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary3-2.13.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary3-2.13.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=40 + ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary3-2.13.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 268435456 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.13.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 268435456 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary3-2.13.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary3-2.13.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=40 + ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.13.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary3-2.13.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 268435456 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.13.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 268435456 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.13.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary3-2.13.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.13.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=40 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.13.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary3-2.13.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.13.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 268435456 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.13.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 268435456 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.13.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary3-2.13.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.13.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=40 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.13.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary3-2.13.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=40 + ORDER BY t1.rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.14.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-140737488355328 AND t2.a=t1.a + } +} {44 ffff800000000000} +do_test boundary3-2.14.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffff800000000000' + } +} {-140737488355328 44} +do_test boundary3-2.14.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=44 + } +} {-140737488355328 ffff800000000000} +do_test boundary3-2.14.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -140737488355328 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.14.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -140737488355328 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.14.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=44 + ORDER BY t1.rowid + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.14.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=44 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary3-2.14.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=44 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.14.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -140737488355328 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.14.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -140737488355328 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.14.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=44 + ORDER BY t1.rowid + } +} {44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.14.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=44 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44} +do_test boundary3-2.14.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=44 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.14.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -140737488355328 ORDER BY t2.a + } +} {2 21 55 64} +do_test boundary3-2.14.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -140737488355328 ORDER BY t1.a DESC + } +} {64 55 21 2} +do_test boundary3-2.14.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=44 + ORDER BY t1.rowid + } +} {55 2 64 21} +do_test boundary3-2.14.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=44 + ORDER BY t1.rowid DESC + } +} {21 64 2 55} +do_test boundary3-2.14.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=44 + ORDER BY x + } +} {55 2 64 21} +do_test boundary3-2.14.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -140737488355328 ORDER BY t2.a + } +} {2 21 44 55 64} +do_test boundary3-2.14.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -140737488355328 ORDER BY t1.a DESC + } +} {64 55 44 21 2} +do_test boundary3-2.14.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=44 + ORDER BY t1.rowid + } +} {55 2 64 21 44} +do_test boundary3-2.14.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=44 + ORDER BY t1.rowid DESC + } +} {44 21 64 2 55} +do_test boundary3-2.14.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=44 + ORDER BY x + } +} {55 2 64 21 44} +do_test boundary3-2.15.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=1099511627776 AND t2.a=t1.a + } +} {19 0000010000000000} +do_test boundary3-2.15.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000010000000000' + } +} {1099511627776 19} +do_test boundary3-2.15.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=19 + } +} {1099511627776 0000010000000000} +do_test boundary3-2.15.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 1099511627776 ORDER BY t2.a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary3-2.15.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 1099511627776 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary3-2.15.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary3-2.15.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=19 + ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary3-2.15.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 1099511627776 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary3-2.15.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 1099511627776 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.15.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary3-2.15.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=19 + ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.15.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary3-2.15.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 1099511627776 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.15.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 1099511627776 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.15.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary3-2.15.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.15.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=19 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.15.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary3-2.15.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.15.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 1099511627776 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.15.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 1099511627776 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.15.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary3-2.15.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.15.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=19 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.15.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary3-2.15.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=19 + ORDER BY t1.rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.16.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 9223372036854775807 ORDER BY t2.a + } +} {} +do_test boundary3-2.16.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 9223372036854775807 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.16.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=3 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.16.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=3 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.16.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=3 + ORDER BY x + } +} {} +do_test boundary3-2.16.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 9223372036854775807 ORDER BY t2.a + } +} {3} +do_test boundary3-2.16.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 9223372036854775807 ORDER BY t1.a DESC + } +} {3} +do_test boundary3-2.16.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=3 + ORDER BY t1.rowid + } +} {3} +do_test boundary3-2.16.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=3 + ORDER BY t1.rowid DESC + } +} {3} +do_test boundary3-2.16.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=3 + ORDER BY x + } +} {3} +do_test boundary3-2.16.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 9223372036854775807 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.16.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 9223372036854775807 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.16.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=3 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary3-2.16.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=3 + ORDER BY t1.rowid DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.16.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=3 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.16.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 9223372036854775807 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.16.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 9223372036854775807 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.16.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=3 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.16.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=3 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.16.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=3 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.17.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=32768 AND t2.a=t1.a + } +} {50 0000000000008000} +do_test boundary3-2.17.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000008000' + } +} {32768 50} +do_test boundary3-2.17.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=50 + } +} {32768 0000000000008000} +do_test boundary3-2.17.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 32768 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary3-2.17.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 32768 ORDER BY t1.a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.17.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary3-2.17.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=50 + ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary3-2.17.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 32768 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.17.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 32768 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.17.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary3-2.17.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=50 + ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.17.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary3-2.17.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 32768 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.17.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 32768 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.17.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary3-2.17.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.17.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=50 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.17.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary3-2.17.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.17.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 32768 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.17.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 32768 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.17.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary3-2.17.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.17.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=50 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.17.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary3-2.17.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=50 + ORDER BY t1.rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.18.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-36028797018963968 AND t2.a=t1.a + } +} {64 ff80000000000000} +do_test boundary3-2.18.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ff80000000000000' + } +} {-36028797018963968 64} +do_test boundary3-2.18.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=64 + } +} {-36028797018963968 ff80000000000000} +do_test boundary3-2.18.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -36028797018963968 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.18.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -36028797018963968 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.18.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=64 + ORDER BY t1.rowid + } +} {21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.18.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=64 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21} +do_test boundary3-2.18.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=64 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.18.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -36028797018963968 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.18.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -36028797018963968 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.18.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=64 + ORDER BY t1.rowid + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.18.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=64 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary3-2.18.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=64 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.18.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -36028797018963968 ORDER BY t2.a + } +} {2 55} +do_test boundary3-2.18.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -36028797018963968 ORDER BY t1.a DESC + } +} {55 2} +do_test boundary3-2.18.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=64 + ORDER BY t1.rowid + } +} {55 2} +do_test boundary3-2.18.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=64 + ORDER BY t1.rowid DESC + } +} {2 55} +do_test boundary3-2.18.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=64 + ORDER BY x + } +} {55 2} +do_test boundary3-2.18.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -36028797018963968 ORDER BY t2.a + } +} {2 55 64} +do_test boundary3-2.18.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -36028797018963968 ORDER BY t1.a DESC + } +} {64 55 2} +do_test boundary3-2.18.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=64 + ORDER BY t1.rowid + } +} {55 2 64} +do_test boundary3-2.18.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=64 + ORDER BY t1.rowid DESC + } +} {64 2 55} +do_test boundary3-2.18.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=64 + ORDER BY x + } +} {55 2 64} +do_test boundary3-2.19.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=65535 AND t2.a=t1.a + } +} {48 000000000000ffff} +do_test boundary3-2.19.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000000000000ffff' + } +} {65535 48} +do_test boundary3-2.19.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=48 + } +} {65535 000000000000ffff} +do_test boundary3-2.19.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 65535 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57 62} +do_test boundary3-2.19.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 65535 ORDER BY t1.a DESC + } +} {62 57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.19.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary3-2.19.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=48 + ORDER BY x + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62} +do_test boundary3-2.19.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 65535 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 51 56 57 62} +do_test boundary3-2.19.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 65535 ORDER BY t1.a DESC + } +} {62 57 56 51 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.19.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary3-2.19.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=48 + ORDER BY x + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.19.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48} +do_test boundary3-2.19.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 65535 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.19.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 65535 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.19.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary3-2.19.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.19.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=48 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.19.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50} +do_test boundary3-2.19.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.19.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 65535 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.19.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 65535 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.19.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary3-2.19.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.19.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=48 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.19.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48} +do_test boundary3-2.19.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=48 + ORDER BY t1.rowid DESC + } +} {48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.20.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=4294967295 AND t2.a=t1.a + } +} {14 00000000ffffffff} +do_test boundary3-2.20.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00000000ffffffff' + } +} {4294967295 14} +do_test boundary3-2.20.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=14 + } +} {4294967295 00000000ffffffff} +do_test boundary3-2.20.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 4294967295 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary3-2.20.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 4294967295 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.20.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary3-2.20.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=14 + ORDER BY x + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36} +do_test boundary3-2.20.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 4294967295 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary3-2.20.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 4294967295 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary3-2.20.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary3-2.20.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=14 + ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.20.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary3-2.20.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 4294967295 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.20.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 4294967295 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.20.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary3-2.20.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.20.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=14 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.20.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary3-2.20.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.20.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 4294967295 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.20.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 4294967295 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.20.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary3-2.20.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.20.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=14 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.20.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14} +do_test boundary3-2.20.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=14 + ORDER BY t1.rowid DESC + } +} {14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.21.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=1099511627775 AND t2.a=t1.a + } +} {57 000000ffffffffff} +do_test boundary3-2.21.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000000ffffffffff' + } +} {1099511627775 57} +do_test boundary3-2.21.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=57 + } +} {1099511627775 000000ffffffffff} +do_test boundary3-2.21.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 1099511627775 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56} +do_test boundary3-2.21.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 1099511627775 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.21.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary3-2.21.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=57 + ORDER BY x + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19} +do_test boundary3-2.21.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 1099511627775 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary3-2.21.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 1099511627775 ORDER BY t1.a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.21.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary3-2.21.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=57 + ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.21.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary3-2.21.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 1099511627775 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.21.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 1099511627775 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.21.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary3-2.21.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.21.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=57 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.21.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary3-2.21.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.21.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 1099511627775 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.21.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 1099511627775 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.21.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary3-2.21.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.21.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=57 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.21.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57} +do_test boundary3-2.21.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=57 + ORDER BY t1.rowid DESC + } +} {57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.22.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-8388608 AND t2.a=t1.a + } +} {37 ffffffffff800000} +do_test boundary3-2.22.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffff800000' + } +} {-8388608 37} +do_test boundary3-2.22.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=37 + } +} {-8388608 ffffffffff800000} +do_test boundary3-2.22.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -8388608 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.22.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -8388608 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.22.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.22.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary3-2.22.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=37 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 29 32 54 53 52 33 38} +do_test boundary3-2.22.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.22.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29} +do_test boundary3-2.22.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -8388608 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.22.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -8388608 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.22.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.22.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary3-2.22.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=37 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary3-2.22.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.22.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary3-2.22.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -8388608 ORDER BY t2.a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary3-2.22.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -8388608 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary3-2.22.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.22.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.22.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=37 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.22.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.22.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.22.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -8388608 ORDER BY t2.a + } +} {1 2 11 21 37 44 47 55 58 63 64} +do_test boundary3-2.22.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -8388608 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 21 11 2 1} +do_test boundary3-2.22.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.22.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.22.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=37 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.22.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37} +do_test boundary3-2.22.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=37 + ORDER BY t1.rowid DESC + } +} {37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.23.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=549755813888 AND t2.a=t1.a + } +} {35 0000008000000000} +do_test boundary3-2.23.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000008000000000' + } +} {549755813888 35} +do_test boundary3-2.23.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=35 + } +} {549755813888 0000008000000000} +do_test boundary3-2.23.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 549755813888 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 43 45 56 57} +do_test boundary3-2.23.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 549755813888 ORDER BY t1.a DESC + } +} {57 56 45 43 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.23.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary3-2.23.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=35 + ORDER BY x + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57} +do_test boundary3-2.23.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 549755813888 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary3-2.23.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 549755813888 ORDER BY t1.a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.23.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary3-2.23.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=35 + ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.23.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary3-2.23.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 549755813888 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.23.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 549755813888 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.23.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary3-2.23.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.23.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=35 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.23.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary3-2.23.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.23.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 549755813888 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.23.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 549755813888 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.23.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary3-2.23.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.23.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=35 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.23.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35} +do_test boundary3-2.23.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=35 + ORDER BY t1.rowid DESC + } +} {35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.24.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=8388607 AND t2.a=t1.a + } +} {18 00000000007fffff} +do_test boundary3-2.24.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00000000007fffff' + } +} {8388607 18} +do_test boundary3-2.24.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=18 + } +} {8388607 00000000007fffff} +do_test boundary3-2.24.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 8388607 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.24.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 8388607 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.24.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary3-2.24.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=18 + ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary3-2.24.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 8388607 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.24.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 8388607 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.24.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary3-2.24.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=18 + ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.24.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary3-2.24.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 8388607 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.24.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 8388607 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary3-2.24.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary3-2.24.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.24.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=18 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.24.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary3-2.24.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.24.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 8388607 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.24.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 8388607 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary3-2.24.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary3-2.24.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.24.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=18 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.24.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary3-2.24.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=18 + ORDER BY t1.rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.25.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-3 AND t2.a=t1.a + } +} {52 fffffffffffffffd} +do_test boundary3-2.25.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='fffffffffffffffd' + } +} {-3 52} +do_test boundary3-2.25.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=52 + } +} {-3 fffffffffffffffd} +do_test boundary3-2.25.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -3 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.25.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -3 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.25.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.25.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary3-2.25.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=52 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary3-2.25.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.25.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary3-2.25.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -3 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary3-2.25.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -3 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.25.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.25.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary3-2.25.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=52 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary3-2.25.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.25.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary3-2.25.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -3 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary3-2.25.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -3 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.25.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.25.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.25.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=52 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.25.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.25.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.25.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -3 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.25.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -3 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.25.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.25.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.25.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=52 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.25.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.25.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=52 + ORDER BY t1.rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.26.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=0 AND t2.a=t1.a + } +} {59 0000000000000000} +do_test boundary3-2.26.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000000' + } +} {0 59} +do_test boundary3-2.26.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=59 + } +} {0 0000000000000000} +do_test boundary3-2.26.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 0 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary3-2.26.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 0 ORDER BY t1.a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.26.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary3-2.26.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=59 + ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary3-2.26.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 0 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.26.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 0 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.26.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary3-2.26.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=59 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.26.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary3-2.26.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 0 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.26.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 0 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.26.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.26.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.26.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=59 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.26.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.26.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.26.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 0 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary3-2.26.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 0 ORDER BY t1.a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.26.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary3-2.26.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.26.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=59 + ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.26.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary3-2.26.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=59 + ORDER BY t1.rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.27.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-1 AND t2.a=t1.a + } +} {38 ffffffffffffffff} +do_test boundary3-2.27.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffffffffff' + } +} {-1 38} +do_test boundary3-2.27.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=38 + } +} {-1 ffffffffffffffff} +do_test boundary3-2.27.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -1 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.27.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -1 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.27.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.27.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary3-2.27.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=38 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.27.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.27.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59} +do_test boundary3-2.27.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -1 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.27.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -1 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.27.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.27.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary3-2.27.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=38 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary3-2.27.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.27.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary3-2.27.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -1 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.27.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -1 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary3-2.27.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.27.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.27.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=38 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.27.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.27.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.27.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -1 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.27.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -1 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.27.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.27.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.27.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=38 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.27.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.27.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=38 + ORDER BY t1.rowid DESC + } +} {38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.28.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-2 AND t2.a=t1.a + } +} {33 fffffffffffffffe} +do_test boundary3-2.28.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='fffffffffffffffe' + } +} {-2 33} +do_test boundary3-2.28.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=33 + } +} {-2 fffffffffffffffe} +do_test boundary3-2.28.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -2 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.28.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -2 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.28.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.28.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary3-2.28.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=33 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 38} +do_test boundary3-2.28.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.28.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38} +do_test boundary3-2.28.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -2 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 56 57 59 60 61 62} +do_test boundary3-2.28.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -2 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.28.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.28.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary3-2.28.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=33 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 33 38} +do_test boundary3-2.28.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.28.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33} +do_test boundary3-2.28.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -2 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.28.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -2 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.28.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.28.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.28.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=33 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.28.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52} +do_test boundary3-2.28.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.28.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -2 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 44 47 52 53 54 55 58 63 64} +do_test boundary3-2.28.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -2 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 52 47 44 37 33 32 29 21 11 2 1} +do_test boundary3-2.28.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.28.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.28.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=33 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.28.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33} +do_test boundary3-2.28.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=33 + ORDER BY t1.rowid DESC + } +} {33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.29.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=2097152 AND t2.a=t1.a + } +} {42 0000000000200000} +do_test boundary3-2.29.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000200000' + } +} {2097152 42} +do_test boundary3-2.29.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=42 + } +} {2097152 0000000000200000} +do_test boundary3-2.29.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 2097152 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.29.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 2097152 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.29.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary3-2.29.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=42 + ORDER BY x + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18} +do_test boundary3-2.29.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 2097152 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary3-2.29.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 2097152 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.29.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary3-2.29.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=42 + ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.29.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary3-2.29.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 2097152 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.29.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 2097152 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary3-2.29.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary3-2.29.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.29.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=42 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.29.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary3-2.29.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.29.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 2097152 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.29.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 2097152 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary3-2.29.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary3-2.29.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.29.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=42 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.29.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42} +do_test boundary3-2.29.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=42 + ORDER BY t1.rowid DESC + } +} {42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.30.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=128 AND t2.a=t1.a + } +} {49 0000000000000080} +do_test boundary3-2.30.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000080' + } +} {128 49} +do_test boundary3-2.30.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=49 + } +} {128 0000000000000080} +do_test boundary3-2.30.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 128 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary3-2.30.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 128 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.30.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary3-2.30.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=49 + ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary3-2.30.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 128 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.30.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 128 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.30.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary3-2.30.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=49 + ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.30.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary3-2.30.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 128 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.30.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 128 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary3-2.30.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary3-2.30.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.30.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=49 + ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.30.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary3-2.30.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.30.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 128 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.30.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 128 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary3-2.30.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary3-2.30.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.30.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=49 + ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.30.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary3-2.30.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=49 + ORDER BY t1.rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.31.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=255 AND t2.a=t1.a + } +} {30 00000000000000ff} +do_test boundary3-2.31.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00000000000000ff' + } +} {255 30} +do_test boundary3-2.31.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=30 + } +} {255 00000000000000ff} +do_test boundary3-2.31.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 255 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary3-2.31.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 255 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.31.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary3-2.31.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=30 + ORDER BY x + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61} +do_test boundary3-2.31.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 255 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 50 51 56 57 61 62} +do_test boundary3-2.31.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 255 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.31.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary3-2.31.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=30 + ORDER BY x + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.31.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30} +do_test boundary3-2.31.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 255 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.31.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 255 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary3-2.31.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary3-2.31.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.31.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=30 + ORDER BY x + } +} {59 60 41 5 31 4 49 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.31.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49} +do_test boundary3-2.31.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.31.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 255 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.31.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 255 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary3-2.31.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary3-2.31.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.31.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=30 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.31.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30} +do_test boundary3-2.31.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=30 + ORDER BY t1.rowid DESC + } +} {30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.32.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-2147483648 AND t2.a=t1.a + } +} {11 ffffffff80000000} +do_test boundary3-2.32.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffff80000000' + } +} {-2147483648 11} +do_test boundary3-2.32.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=11 + } +} {-2147483648 ffffffff80000000} +do_test boundary3-2.32.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -2147483648 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.32.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -2147483648 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.32.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.32.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary3-2.32.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=11 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.32.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.32.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary3-2.32.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -2147483648 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.32.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -2147483648 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.32.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.32.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary3-2.32.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=11 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.32.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.32.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary3-2.32.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -2147483648 ORDER BY t2.a + } +} {2 21 44 47 55 58 63 64} +do_test boundary3-2.32.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -2147483648 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary3-2.32.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.32.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary3-2.32.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=11 + ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.32.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.32.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary3-2.32.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -2147483648 ORDER BY t2.a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary3-2.32.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -2147483648 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary3-2.32.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.32.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary3-2.32.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=11 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.32.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.32.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=11 + ORDER BY t1.rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary3-2.33.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=34359738367 AND t2.a=t1.a + } +} {39 00000007ffffffff} +do_test boundary3-2.33.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00000007ffffffff' + } +} {34359738367 39} +do_test boundary3-2.33.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=39 + } +} {34359738367 00000007ffffffff} +do_test boundary3-2.33.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 34359738367 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary3-2.33.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 34359738367 ORDER BY t1.a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.33.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary3-2.33.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=39 + ORDER BY x + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22} +do_test boundary3-2.33.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 34359738367 ORDER BY t2.a + } +} {3 7 10 13 17 19 22 25 26 27 28 34 35 39 43 45 46 56 57} +do_test boundary3-2.33.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 34359738367 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 35 34 28 27 26 25 22 19 17 13 10 7 3} +do_test boundary3-2.33.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary3-2.33.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=39 + ORDER BY x + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.33.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39} +do_test boundary3-2.33.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 34359738367 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.33.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 34359738367 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.33.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary3-2.33.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.33.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=39 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.33.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36} +do_test boundary3-2.33.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.33.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 34359738367 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.33.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 34359738367 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.33.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary3-2.33.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.33.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=39 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.33.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39} +do_test boundary3-2.33.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=39 + ORDER BY t1.rowid DESC + } +} {39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.34.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-549755813889 AND t2.a=t1.a + } +} {58 ffffff7fffffffff} +do_test boundary3-2.34.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffff7fffffffff' + } +} {-549755813889 58} +do_test boundary3-2.34.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=58 + } +} {-549755813889 ffffff7fffffffff} +do_test boundary3-2.34.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -549755813889 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary3-2.34.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -549755813889 ORDER BY t1.a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.34.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.34.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary3-2.34.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=58 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.34.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.34.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary3-2.34.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -549755813889 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63} +do_test boundary3-2.34.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -549755813889 ORDER BY t1.a DESC + } +} {63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.34.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.34.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary3-2.34.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=58 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.34.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.34.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58} +do_test boundary3-2.34.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -549755813889 ORDER BY t2.a + } +} {2 21 44 55 64} +do_test boundary3-2.34.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -549755813889 ORDER BY t1.a DESC + } +} {64 55 44 21 2} +do_test boundary3-2.34.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {55 2 64 21 44} +do_test boundary3-2.34.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {44 21 64 2 55} +do_test boundary3-2.34.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=58 + ORDER BY x + } +} {55 2 64 21 44} +do_test boundary3-2.34.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {55 2 64 21 44} +do_test boundary3-2.34.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {44 21 64 2 55} +do_test boundary3-2.34.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -549755813889 ORDER BY t2.a + } +} {2 21 44 55 58 64} +do_test boundary3-2.34.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -549755813889 ORDER BY t1.a DESC + } +} {64 58 55 44 21 2} +do_test boundary3-2.34.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58} +do_test boundary3-2.34.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary3-2.34.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=58 + ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary3-2.34.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58} +do_test boundary3-2.34.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=58 + ORDER BY t1.rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary3-2.35.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-32768 AND t2.a=t1.a + } +} {32 ffffffffffff8000} +do_test boundary3-2.35.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffffff8000' + } +} {-32768 32} +do_test boundary3-2.35.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=32 + } +} {-32768 ffffffffffff8000} +do_test boundary3-2.35.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -32768 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.35.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -32768 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.35.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.35.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary3-2.35.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=32 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary3-2.35.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.35.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary3-2.35.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -32768 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 32 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.35.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -32768 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 32 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.35.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.35.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary3-2.35.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=32 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 32 54 53 52 33 38} +do_test boundary3-2.35.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.35.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32} +do_test boundary3-2.35.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -32768 ORDER BY t2.a + } +} {1 2 11 21 29 37 44 47 55 58 63 64} +do_test boundary3-2.35.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -32768 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 29 21 11 2 1} +do_test boundary3-2.35.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.35.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.35.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=32 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.35.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29} +do_test boundary3-2.35.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.35.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -32768 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary3-2.35.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -32768 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.35.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.35.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.35.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=32 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.35.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.35.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=32 + ORDER BY t1.rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.36.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=2147483647 AND t2.a=t1.a + } +} {20 000000007fffffff} +do_test boundary3-2.36.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000000007fffffff' + } +} {2147483647 20} +do_test boundary3-2.36.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=20 + } +} {2147483647 000000007fffffff} +do_test boundary3-2.36.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 2147483647 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary3-2.36.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 2147483647 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary3-2.36.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary3-2.36.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=20 + ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary3-2.36.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 2147483647 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary3-2.36.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 2147483647 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary3-2.36.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary3-2.36.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=20 + ORDER BY x + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.36.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20} +do_test boundary3-2.36.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 2147483647 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.36.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 2147483647 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.36.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary3-2.36.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.36.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=20 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.36.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40} +do_test boundary3-2.36.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.36.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 2147483647 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.36.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 2147483647 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.36.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary3-2.36.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.36.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=20 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.36.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary3-2.36.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=20 + ORDER BY t1.rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.37.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-129 AND t2.a=t1.a + } +} {54 ffffffffffffff7f} +do_test boundary3-2.37.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffffffff7f' + } +} {-129 54} +do_test boundary3-2.37.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=54 + } +} {-129 ffffffffffffff7f} +do_test boundary3-2.37.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -129 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary3-2.37.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -129 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.37.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.37.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary3-2.37.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=54 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary3-2.37.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.37.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary3-2.37.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -129 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.37.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -129 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.37.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.37.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary3-2.37.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=54 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 54 53 52 33 38} +do_test boundary3-2.37.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.37.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54} +do_test boundary3-2.37.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -129 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 55 58 63 64} +do_test boundary3-2.37.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -129 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.37.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.37.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.37.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=54 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.37.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32} +do_test boundary3-2.37.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.37.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -129 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary3-2.37.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -129 ORDER BY t1.a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.37.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.37.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.37.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=54 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.37.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.37.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=54 + ORDER BY t1.rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.38.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-128 AND t2.a=t1.a + } +} {53 ffffffffffffff80} +do_test boundary3-2.38.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffffffff80' + } +} {-128 53} +do_test boundary3-2.38.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=53 + } +} {-128 ffffffffffffff80} +do_test boundary3-2.38.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -128 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 56 57 59 60 61 62} +do_test boundary3-2.38.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -128 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.38.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.38.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary3-2.38.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=53 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 52 33 38} +do_test boundary3-2.38.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.38.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52} +do_test boundary3-2.38.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -128 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 33 34 35 36 38 39 40 41 42 43 45 46 48 49 50 51 52 53 56 57 59 60 61 62} +do_test boundary3-2.38.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -128 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 53 52 51 50 49 48 46 45 43 42 41 40 39 38 36 35 34 33 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.38.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.38.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary3-2.38.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=53 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 53 52 33 38} +do_test boundary3-2.38.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.38.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53} +do_test boundary3-2.38.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -128 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 54 55 58 63 64} +do_test boundary3-2.38.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -128 ORDER BY t1.a DESC + } +} {64 63 58 55 54 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.38.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.38.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.38.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=53 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.38.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54} +do_test boundary3-2.38.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.38.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -128 ORDER BY t2.a + } +} {1 2 11 21 29 32 37 44 47 53 54 55 58 63 64} +do_test boundary3-2.38.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -128 ORDER BY t1.a DESC + } +} {64 63 58 55 54 53 47 44 37 32 29 21 11 2 1} +do_test boundary3-2.38.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.38.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.38.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=53 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.38.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53} +do_test boundary3-2.38.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=53 + ORDER BY t1.rowid DESC + } +} {53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.39.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=72057594037927936 AND t2.a=t1.a + } +} {28 0100000000000000} +do_test boundary3-2.39.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0100000000000000' + } +} {72057594037927936 28} +do_test boundary3-2.39.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=28 + } +} {72057594037927936 0100000000000000} +do_test boundary3-2.39.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 72057594037927936 ORDER BY t2.a + } +} {3} +do_test boundary3-2.39.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 72057594037927936 ORDER BY t1.a DESC + } +} {3} +do_test boundary3-2.39.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=28 + ORDER BY t1.rowid + } +} {3} +do_test boundary3-2.39.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=28 + ORDER BY t1.rowid DESC + } +} {3} +do_test boundary3-2.39.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=28 + ORDER BY x + } +} {3} +do_test boundary3-2.39.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 72057594037927936 ORDER BY t2.a + } +} {3 28} +do_test boundary3-2.39.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 72057594037927936 ORDER BY t1.a DESC + } +} {28 3} +do_test boundary3-2.39.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=28 + ORDER BY t1.rowid + } +} {28 3} +do_test boundary3-2.39.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=28 + ORDER BY t1.rowid DESC + } +} {3 28} +do_test boundary3-2.39.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=28 + ORDER BY x + } +} {28 3} +do_test boundary3-2.39.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 72057594037927936 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.39.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 72057594037927936 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.39.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=28 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17} +do_test boundary3-2.39.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=28 + ORDER BY t1.rowid DESC + } +} {17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.39.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=28 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.39.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 72057594037927936 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.39.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 72057594037927936 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.39.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=28 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28} +do_test boundary3-2.39.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=28 + ORDER BY t1.rowid DESC + } +} {28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.39.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=28 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.40.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=2147483648 AND t2.a=t1.a + } +} {51 0000000080000000} +do_test boundary3-2.40.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000080000000' + } +} {2147483648 51} +do_test boundary3-2.40.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=51 + } +} {2147483648 0000000080000000} +do_test boundary3-2.40.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 2147483648 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 56 57} +do_test boundary3-2.40.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 2147483648 ORDER BY t1.a DESC + } +} {57 56 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary3-2.40.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary3-2.40.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=51 + ORDER BY x + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14} +do_test boundary3-2.40.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 2147483648 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 22 25 26 27 28 34 35 36 39 43 45 46 51 56 57} +do_test boundary3-2.40.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 2147483648 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 39 36 35 34 28 27 26 25 22 19 17 14 13 10 7 3} +do_test boundary3-2.40.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary3-2.40.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=51 + ORDER BY x + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.40.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51} +do_test boundary3-2.40.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 2147483648 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.40.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 2147483648 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.40.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary3-2.40.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.40.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=51 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.40.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20} +do_test boundary3-2.40.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.40.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 2147483648 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 20 21 23 24 29 30 31 32 33 37 38 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.40.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 2147483648 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 38 37 33 32 31 30 29 24 23 21 20 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.40.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary3-2.40.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.40.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=51 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.40.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51} +do_test boundary3-2.40.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=51 + ORDER BY t1.rowid DESC + } +} {51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.41.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=549755813887 AND t2.a=t1.a + } +} {46 0000007fffffffff} +do_test boundary3-2.41.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000007fffffffff' + } +} {549755813887 46} +do_test boundary3-2.41.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=46 + } +} {549755813887 0000007fffffffff} +do_test boundary3-2.41.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 549755813887 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 56 57} +do_test boundary3-2.41.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 549755813887 ORDER BY t1.a DESC + } +} {57 56 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.41.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary3-2.41.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=46 + ORDER BY x + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35} +do_test boundary3-2.41.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 549755813887 ORDER BY t2.a + } +} {3 7 10 13 17 19 25 26 27 28 34 35 43 45 46 56 57} +do_test boundary3-2.41.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 549755813887 ORDER BY t1.a DESC + } +} {57 56 46 45 43 35 34 28 27 26 25 19 17 13 10 7 3} +do_test boundary3-2.41.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary3-2.41.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=46 + ORDER BY x + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.41.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46} +do_test boundary3-2.41.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 549755813887 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.41.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 549755813887 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.41.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary3-2.41.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.41.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=46 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.41.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22} +do_test boundary3-2.41.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.41.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 549755813887 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 20 21 22 23 24 29 30 31 32 33 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.41.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 549755813887 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 33 32 31 30 29 24 23 22 21 20 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.41.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary3-2.41.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.41.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=46 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.41.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46} +do_test boundary3-2.41.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=46 + ORDER BY t1.rowid DESC + } +} {46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.42.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-549755813888 AND t2.a=t1.a + } +} {63 ffffff8000000000} +do_test boundary3-2.42.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffff8000000000' + } +} {-549755813888 63} +do_test boundary3-2.42.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=63 + } +} {-549755813888 ffffff8000000000} +do_test boundary3-2.42.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -549755813888 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.42.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -549755813888 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.42.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.42.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary3-2.42.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=63 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.42.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.42.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary3-2.42.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -549755813888 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62 63} +do_test boundary3-2.42.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -549755813888 ORDER BY t1.a DESC + } +} {63 62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.42.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.42.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary3-2.42.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=63 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.42.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.42.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63} +do_test boundary3-2.42.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -549755813888 ORDER BY t2.a + } +} {2 21 44 55 58 64} +do_test boundary3-2.42.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -549755813888 ORDER BY t1.a DESC + } +} {64 58 55 44 21 2} +do_test boundary3-2.42.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58} +do_test boundary3-2.42.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary3-2.42.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=63 + ORDER BY x + } +} {55 2 64 21 44 58} +do_test boundary3-2.42.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58} +do_test boundary3-2.42.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {58 44 21 64 2 55} +do_test boundary3-2.42.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -549755813888 ORDER BY t2.a + } +} {2 21 44 55 58 63 64} +do_test boundary3-2.42.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -549755813888 ORDER BY t1.a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary3-2.42.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.42.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary3-2.42.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=63 + ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.42.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.42.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=63 + ORDER BY t1.rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary3-2.43.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=281474976710655 AND t2.a=t1.a + } +} {10 0000ffffffffffff} +do_test boundary3-2.43.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000ffffffffffff' + } +} {281474976710655 10} +do_test boundary3-2.43.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=10 + } +} {281474976710655 0000ffffffffffff} +do_test boundary3-2.43.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 281474976710655 ORDER BY t2.a + } +} {3 13 17 26 27 28 43 45} +do_test boundary3-2.43.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 281474976710655 ORDER BY t1.a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary3-2.43.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=10 + ORDER BY t1.rowid + } +} {26 13 43 27 45 17 28 3} +do_test boundary3-2.43.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=10 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary3-2.43.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=10 + ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary3-2.43.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 281474976710655 ORDER BY t2.a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary3-2.43.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 281474976710655 ORDER BY t1.a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary3-2.43.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=10 + ORDER BY t1.rowid + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary3-2.43.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=10 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary3-2.43.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=10 + ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary3-2.43.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 281474976710655 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.43.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 281474976710655 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.43.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=10 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary3-2.43.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=10 + ORDER BY t1.rowid DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.43.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=10 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.43.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 281474976710655 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.43.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 281474976710655 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.43.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=10 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary3-2.43.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=10 + ORDER BY t1.rowid DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.43.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=10 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.44.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=4398046511103 AND t2.a=t1.a + } +} {7 000003ffffffffff} +do_test boundary3-2.44.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000003ffffffffff' + } +} {4398046511103 7} +do_test boundary3-2.44.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=7 + } +} {4398046511103 000003ffffffffff} +do_test boundary3-2.44.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 4398046511103 ORDER BY t2.a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary3-2.44.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 4398046511103 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary3-2.44.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary3-2.44.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=7 + ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary3-2.44.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 4398046511103 ORDER BY t2.a + } +} {3 7 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary3-2.44.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 4398046511103 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 7 3} +do_test boundary3-2.44.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary3-2.44.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=7 + ORDER BY x + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.44.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7} +do_test boundary3-2.44.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 4398046511103 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.44.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 4398046511103 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.44.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary3-2.44.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.44.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=7 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.44.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19} +do_test boundary3-2.44.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.44.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 4398046511103 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.44.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 4398046511103 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.44.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary3-2.44.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.44.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=7 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.44.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary3-2.44.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=7 + ORDER BY t1.rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.45.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=268435455 AND t2.a=t1.a + } +} {12 000000000fffffff} +do_test boundary3-2.45.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000000000fffffff' + } +} {268435455 12} +do_test boundary3-2.45.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=12 + } +} {268435455 000000000fffffff} +do_test boundary3-2.45.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 268435455 ORDER BY t2.a + } +} {3 7 10 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.45.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 268435455 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 10 7 3} +do_test boundary3-2.45.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary3-2.45.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=12 + ORDER BY x + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40} +do_test boundary3-2.45.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 268435455 ORDER BY t2.a + } +} {3 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.45.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 268435455 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 3} +do_test boundary3-2.45.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary3-2.45.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=12 + ORDER BY x + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.45.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12} +do_test boundary3-2.45.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 268435455 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.45.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 268435455 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 6 5 4 2 1} +do_test boundary3-2.45.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary3-2.45.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.45.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=12 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.45.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6} +do_test boundary3-2.45.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.45.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 268435455 ORDER BY t2.a + } +} {1 2 4 5 6 8 9 11 12 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.45.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 268435455 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 12 11 9 8 6 5 4 2 1} +do_test boundary3-2.45.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary3-2.45.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.45.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=12 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.45.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12} +do_test boundary3-2.45.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=12 + ORDER BY t1.rowid DESC + } +} {12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.46.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-9223372036854775808 AND t2.a=t1.a + } +} {55 8000000000000000} +do_test boundary3-2.46.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='8000000000000000' + } +} {-9223372036854775808 55} +do_test boundary3-2.46.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=55 + } +} {-9223372036854775808 8000000000000000} +do_test boundary3-2.46.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -9223372036854775808 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.46.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -9223372036854775808 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.46.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=55 + ORDER BY t1.rowid + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.46.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=55 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary3-2.46.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=55 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.46.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -9223372036854775808 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.46.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -9223372036854775808 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.46.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=55 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.46.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=55 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.46.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=55 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.46.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -9223372036854775808 ORDER BY t2.a + } +} {} +do_test boundary3-2.46.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -9223372036854775808 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.46.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=55 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.46.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=55 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.46.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=55 + ORDER BY x + } +} {} +do_test boundary3-2.46.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -9223372036854775808 ORDER BY t2.a + } +} {55} +do_test boundary3-2.46.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -9223372036854775808 ORDER BY t1.a DESC + } +} {55} +do_test boundary3-2.46.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=55 + ORDER BY t1.rowid + } +} {55} +do_test boundary3-2.46.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=55 + ORDER BY t1.rowid DESC + } +} {55} +do_test boundary3-2.46.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=55 + ORDER BY x + } +} {55} +do_test boundary3-2.47.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=562949953421312 AND t2.a=t1.a + } +} {43 0002000000000000} +do_test boundary3-2.47.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0002000000000000' + } +} {562949953421312 43} +do_test boundary3-2.47.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=43 + } +} {562949953421312 0002000000000000} +do_test boundary3-2.47.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 562949953421312 ORDER BY t2.a + } +} {3 17 27 28 45} +do_test boundary3-2.47.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 562949953421312 ORDER BY t1.a DESC + } +} {45 28 27 17 3} +do_test boundary3-2.47.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=43 + ORDER BY t1.rowid + } +} {27 45 17 28 3} +do_test boundary3-2.47.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=43 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27} +do_test boundary3-2.47.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=43 + ORDER BY x + } +} {27 45 17 28 3} +do_test boundary3-2.47.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 562949953421312 ORDER BY t2.a + } +} {3 17 27 28 43 45} +do_test boundary3-2.47.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 562949953421312 ORDER BY t1.a DESC + } +} {45 43 28 27 17 3} +do_test boundary3-2.47.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=43 + ORDER BY t1.rowid + } +} {43 27 45 17 28 3} +do_test boundary3-2.47.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=43 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43} +do_test boundary3-2.47.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=43 + ORDER BY x + } +} {43 27 45 17 28 3} +do_test boundary3-2.47.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 562949953421312 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.47.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 562949953421312 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.47.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=43 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13} +do_test boundary3-2.47.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=43 + ORDER BY t1.rowid DESC + } +} {13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.47.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=43 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.47.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 562949953421312 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.47.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 562949953421312 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.47.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=43 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary3-2.47.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=43 + ORDER BY t1.rowid DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.47.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=43 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.48.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-8388609 AND t2.a=t1.a + } +} {1 ffffffffff7fffff} +do_test boundary3-2.48.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffffff7fffff' + } +} {-8388609 1} +do_test boundary3-2.48.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=1 + } +} {-8388609 ffffffffff7fffff} +do_test boundary3-2.48.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -8388609 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.48.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -8388609 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.48.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.48.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary3-2.48.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=1 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 37 29 32 54 53 52 33 38} +do_test boundary3-2.48.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.48.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37} +do_test boundary3-2.48.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -8388609 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.48.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -8388609 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.48.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.48.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary3-2.48.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=1 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.48.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.48.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1} +do_test boundary3-2.48.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -8388609 ORDER BY t2.a + } +} {2 11 21 44 47 55 58 63 64} +do_test boundary3-2.48.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -8388609 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 11 2} +do_test boundary3-2.48.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.48.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary3-2.48.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=1 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.48.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11} +do_test boundary3-2.48.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {11 47 63 58 44 21 64 2 55} +do_test boundary3-2.48.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -8388609 ORDER BY t2.a + } +} {1 2 11 21 44 47 55 58 63 64} +do_test boundary3-2.48.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -8388609 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 11 2 1} +do_test boundary3-2.48.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.48.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.48.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=1 + ORDER BY x + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.48.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1} +do_test boundary3-2.48.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=1 + ORDER BY t1.rowid DESC + } +} {1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.49.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=16777215 AND t2.a=t1.a + } +} {9 0000000000ffffff} +do_test boundary3-2.49.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000ffffff' + } +} {16777215 9} +do_test boundary3-2.49.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=9 + } +} {16777215 0000000000ffffff} +do_test boundary3-2.49.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 16777215 ORDER BY t2.a + } +} {3 6 7 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.49.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 16777215 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 7 6 3} +do_test boundary3-2.49.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary3-2.49.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=9 + ORDER BY x + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6} +do_test boundary3-2.49.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 16777215 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.49.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 16777215 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.49.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary3-2.49.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=9 + ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.49.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary3-2.49.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 16777215 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.49.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 16777215 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary3-2.49.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary3-2.49.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.49.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=9 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.49.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary3-2.49.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.49.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 16777215 ORDER BY t2.a + } +} {1 2 4 5 8 9 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.49.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 16777215 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 9 8 5 4 2 1} +do_test boundary3-2.49.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary3-2.49.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.49.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=9 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.49.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9} +do_test boundary3-2.49.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=9 + ORDER BY t1.rowid DESC + } +} {9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.50.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=8388608 AND t2.a=t1.a + } +} {24 0000000000800000} +do_test boundary3-2.50.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000800000' + } +} {8388608 24} +do_test boundary3-2.50.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=24 + } +} {8388608 0000000000800000} +do_test boundary3-2.50.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 8388608 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.50.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 8388608 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.50.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary3-2.50.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=24 + ORDER BY x + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9} +do_test boundary3-2.50.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 8388608 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 19 20 22 24 25 26 27 28 34 35 36 39 40 43 45 46 51 56 57} +do_test boundary3-2.50.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 8388608 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 40 39 36 35 34 28 27 26 25 24 22 20 19 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.50.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary3-2.50.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=24 + ORDER BY x + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.50.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24} +do_test boundary3-2.50.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 8388608 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 18 21 23 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.50.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 8388608 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary3-2.50.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary3-2.50.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.50.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=24 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.50.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18} +do_test boundary3-2.50.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.50.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 8388608 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 18 21 23 24 29 30 31 32 33 37 38 41 42 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.50.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 8388608 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 42 41 38 37 33 32 31 30 29 24 23 21 18 16 15 11 8 5 4 2 1} +do_test boundary3-2.50.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary3-2.50.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.50.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=24 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.50.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24} +do_test boundary3-2.50.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=24 + ORDER BY t1.rowid DESC + } +} {24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.51.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=16383 AND t2.a=t1.a + } +} {8 0000000000003fff} +do_test boundary3-2.51.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000003fff' + } +} {16383 8} +do_test boundary3-2.51.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=8 + } +} {16383 0000000000003fff} +do_test boundary3-2.51.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 16383 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.51.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 16383 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.51.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary3-2.51.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=8 + ORDER BY x + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16} +do_test boundary3-2.51.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 16383 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.51.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 16383 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.51.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary3-2.51.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=8 + ORDER BY x + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.51.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8} +do_test boundary3-2.51.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 16383 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.51.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 16383 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 5 4 2 1} +do_test boundary3-2.51.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary3-2.51.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.51.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=8 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.51.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61} +do_test boundary3-2.51.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.51.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 16383 ORDER BY t2.a + } +} {1 2 4 5 8 11 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.51.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 16383 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 11 8 5 4 2 1} +do_test boundary3-2.51.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary3-2.51.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.51.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=8 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.51.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8} +do_test boundary3-2.51.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=8 + ORDER BY t1.rowid DESC + } +} {8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.52.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=140737488355328 AND t2.a=t1.a + } +} {34 0000800000000000} +do_test boundary3-2.52.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000800000000000' + } +} {140737488355328 34} +do_test boundary3-2.52.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=34 + } +} {140737488355328 0000800000000000} +do_test boundary3-2.52.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 140737488355328 ORDER BY t2.a + } +} {3 10 13 17 26 27 28 43 45} +do_test boundary3-2.52.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 140737488355328 ORDER BY t1.a DESC + } +} {45 43 28 27 26 17 13 10 3} +do_test boundary3-2.52.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=34 + ORDER BY t1.rowid + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary3-2.52.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=34 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10} +do_test boundary3-2.52.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=34 + ORDER BY x + } +} {10 26 13 43 27 45 17 28 3} +do_test boundary3-2.52.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 140737488355328 ORDER BY t2.a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary3-2.52.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 140737488355328 ORDER BY t1.a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary3-2.52.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=34 + ORDER BY t1.rowid + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.52.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=34 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary3-2.52.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=34 + ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.52.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 140737488355328 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.52.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 140737488355328 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.52.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=34 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary3-2.52.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=34 + ORDER BY t1.rowid DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.52.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=34 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.52.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 140737488355328 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.52.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 140737488355328 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.52.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=34 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34} +do_test boundary3-2.52.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=34 + ORDER BY t1.rowid DESC + } +} {34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.52.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=34 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.53.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=2097151 AND t2.a=t1.a + } +} {15 00000000001fffff} +do_test boundary3-2.53.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00000000001fffff' + } +} {2097151 15} +do_test boundary3-2.53.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=15 + } +} {2097151 00000000001fffff} +do_test boundary3-2.53.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 2097151 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary3-2.53.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 2097151 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 14 13 12 10 9 7 6 3} +do_test boundary3-2.53.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary3-2.53.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=15 + ORDER BY x + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42} +do_test boundary3-2.53.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 2097151 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 51 56 57} +do_test boundary3-2.53.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 2097151 ORDER BY t1.a DESC + } +} {57 56 51 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.53.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary3-2.53.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=15 + ORDER BY x + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.53.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15} +do_test boundary3-2.53.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 2097151 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.53.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 2097151 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.53.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary3-2.53.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.53.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=15 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.53.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62} +do_test boundary3-2.53.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.53.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 2097151 ORDER BY t2.a + } +} {1 2 4 5 8 11 15 16 21 23 29 30 31 32 33 37 38 41 44 47 48 49 50 52 53 54 55 58 59 60 61 62 63 64} +do_test boundary3-2.53.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 2097151 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 55 54 53 52 50 49 48 47 44 41 38 37 33 32 31 30 29 23 21 16 15 11 8 5 4 2 1} +do_test boundary3-2.53.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary3-2.53.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.53.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=15 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.53.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15} +do_test boundary3-2.53.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=15 + ORDER BY t1.rowid DESC + } +} {15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.54.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=140737488355327 AND t2.a=t1.a + } +} {25 00007fffffffffff} +do_test boundary3-2.54.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='00007fffffffffff' + } +} {140737488355327 25} +do_test boundary3-2.54.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=25 + } +} {140737488355327 00007fffffffffff} +do_test boundary3-2.54.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 140737488355327 ORDER BY t2.a + } +} {3 10 13 17 26 27 28 34 43 45} +do_test boundary3-2.54.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 140737488355327 ORDER BY t1.a DESC + } +} {45 43 34 28 27 26 17 13 10 3} +do_test boundary3-2.54.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=25 + ORDER BY t1.rowid + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.54.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=25 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34} +do_test boundary3-2.54.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=25 + ORDER BY x + } +} {34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.54.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 140737488355327 ORDER BY t2.a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary3-2.54.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 140737488355327 ORDER BY t1.a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary3-2.54.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=25 + ORDER BY t1.rowid + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.54.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=25 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary3-2.54.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=25 + ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.54.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 140737488355327 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.54.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 140737488355327 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.54.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=25 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary3-2.54.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=25 + ORDER BY t1.rowid DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.54.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=25 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.54.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 140737488355327 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.54.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 140737488355327 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.54.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=25 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25} +do_test boundary3-2.54.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=25 + ORDER BY t1.rowid DESC + } +} {25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.54.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=25 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.55.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=281474976710656 AND t2.a=t1.a + } +} {26 0001000000000000} +do_test boundary3-2.55.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0001000000000000' + } +} {281474976710656 26} +do_test boundary3-2.55.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=26 + } +} {281474976710656 0001000000000000} +do_test boundary3-2.55.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 281474976710656 ORDER BY t2.a + } +} {3 13 17 27 28 43 45} +do_test boundary3-2.55.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 281474976710656 ORDER BY t1.a DESC + } +} {45 43 28 27 17 13 3} +do_test boundary3-2.55.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=26 + ORDER BY t1.rowid + } +} {13 43 27 45 17 28 3} +do_test boundary3-2.55.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=26 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13} +do_test boundary3-2.55.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=26 + ORDER BY x + } +} {13 43 27 45 17 28 3} +do_test boundary3-2.55.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 281474976710656 ORDER BY t2.a + } +} {3 13 17 26 27 28 43 45} +do_test boundary3-2.55.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 281474976710656 ORDER BY t1.a DESC + } +} {45 43 28 27 26 17 13 3} +do_test boundary3-2.55.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=26 + ORDER BY t1.rowid + } +} {26 13 43 27 45 17 28 3} +do_test boundary3-2.55.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=26 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26} +do_test boundary3-2.55.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=26 + ORDER BY x + } +} {26 13 43 27 45 17 28 3} +do_test boundary3-2.55.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 281474976710656 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.55.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 281474976710656 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.55.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=26 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10} +do_test boundary3-2.55.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=26 + ORDER BY t1.rowid DESC + } +} {10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.55.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=26 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.55.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 281474976710656 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.55.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 281474976710656 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.55.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=26 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26} +do_test boundary3-2.55.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=26 + ORDER BY t1.rowid DESC + } +} {26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.55.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=26 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.56.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=32767 AND t2.a=t1.a + } +} {23 0000000000007fff} +do_test boundary3-2.56.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000007fff' + } +} {32767 23} +do_test boundary3-2.56.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=23 + } +} {32767 0000000000007fff} +do_test boundary3-2.56.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 32767 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.56.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 32767 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.56.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary3-2.56.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=23 + ORDER BY x + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50} +do_test boundary3-2.56.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 32767 ORDER BY t2.a + } +} {3 6 7 9 10 12 13 14 15 17 18 19 20 22 23 24 25 26 27 28 34 35 36 39 40 42 43 45 46 48 50 51 56 57 62} +do_test boundary3-2.56.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 32767 ORDER BY t1.a DESC + } +} {62 57 56 51 50 48 46 45 43 42 40 39 36 35 34 28 27 26 25 24 23 22 20 19 18 17 15 14 13 12 10 9 7 6 3} +do_test boundary3-2.56.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary3-2.56.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=23 + ORDER BY x + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.56.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23} +do_test boundary3-2.56.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 32767 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.56.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 32767 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 21 16 11 8 5 4 2 1} +do_test boundary3-2.56.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary3-2.56.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.56.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=23 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.56.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16} +do_test boundary3-2.56.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.56.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 32767 ORDER BY t2.a + } +} {1 2 4 5 8 11 16 21 23 29 30 31 32 33 37 38 41 44 47 49 52 53 54 55 58 59 60 61 63 64} +do_test boundary3-2.56.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 32767 ORDER BY t1.a DESC + } +} {64 63 61 60 59 58 55 54 53 52 49 47 44 41 38 37 33 32 31 30 29 23 21 16 11 8 5 4 2 1} +do_test boundary3-2.56.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary3-2.56.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.56.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=23 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.56.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23} +do_test boundary3-2.56.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=23 + ORDER BY t1.rowid DESC + } +} {23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.57.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=127 AND t2.a=t1.a + } +} {4 000000000000007f} +do_test boundary3-2.57.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='000000000000007f' + } +} {127 4} +do_test boundary3-2.57.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=4 + } +} {127 000000000000007f} +do_test boundary3-2.57.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 127 ORDER BY t2.a + } +} {3 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.57.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 127 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 3} +do_test boundary3-2.57.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary3-2.57.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=4 + ORDER BY x + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49} +do_test boundary3-2.57.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 127 ORDER BY t2.a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.57.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 127 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary3-2.57.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary3-2.57.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=4 + ORDER BY x + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.57.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4} +do_test boundary3-2.57.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 127 ORDER BY t2.a + } +} {1 2 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.57.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 127 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 2 1} +do_test boundary3-2.57.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary3-2.57.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.57.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=4 + ORDER BY x + } +} {59 60 41 5 31 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.57.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31} +do_test boundary3-2.57.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.57.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 127 ORDER BY t2.a + } +} {1 2 4 5 11 21 29 31 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.57.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 127 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 31 29 21 11 5 4 2 1} +do_test boundary3-2.57.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary3-2.57.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.57.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=4 + ORDER BY x + } +} {59 60 41 5 31 4 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.57.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4} +do_test boundary3-2.57.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=4 + ORDER BY t1.rowid DESC + } +} {4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.58.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=36028797018963967 AND t2.a=t1.a + } +} {27 007fffffffffffff} +do_test boundary3-2.58.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='007fffffffffffff' + } +} {36028797018963967 27} +do_test boundary3-2.58.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=27 + } +} {36028797018963967 007fffffffffffff} +do_test boundary3-2.58.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 36028797018963967 ORDER BY t2.a + } +} {3 17 28 45} +do_test boundary3-2.58.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 36028797018963967 ORDER BY t1.a DESC + } +} {45 28 17 3} +do_test boundary3-2.58.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=27 + ORDER BY t1.rowid + } +} {45 17 28 3} +do_test boundary3-2.58.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=27 + ORDER BY t1.rowid DESC + } +} {3 28 17 45} +do_test boundary3-2.58.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=27 + ORDER BY x + } +} {45 17 28 3} +do_test boundary3-2.58.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 36028797018963967 ORDER BY t2.a + } +} {3 17 27 28 45} +do_test boundary3-2.58.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 36028797018963967 ORDER BY t1.a DESC + } +} {45 28 27 17 3} +do_test boundary3-2.58.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=27 + ORDER BY t1.rowid + } +} {27 45 17 28 3} +do_test boundary3-2.58.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=27 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27} +do_test boundary3-2.58.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=27 + ORDER BY x + } +} {27 45 17 28 3} +do_test boundary3-2.58.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 36028797018963967 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.58.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 36028797018963967 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.58.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=27 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43} +do_test boundary3-2.58.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=27 + ORDER BY t1.rowid DESC + } +} {43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.58.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=27 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.58.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 36028797018963967 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.58.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 36028797018963967 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.58.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=27 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary3-2.58.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=27 + ORDER BY t1.rowid DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.58.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=27 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.59.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=4398046511104 AND t2.a=t1.a + } +} {56 0000040000000000} +do_test boundary3-2.59.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000040000000000' + } +} {4398046511104 56} +do_test boundary3-2.59.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=56 + } +} {4398046511104 0000040000000000} +do_test boundary3-2.59.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 4398046511104 ORDER BY t2.a + } +} {3 10 13 17 25 26 27 28 34 43 45} +do_test boundary3-2.59.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 4398046511104 ORDER BY t1.a DESC + } +} {45 43 34 28 27 26 25 17 13 10 3} +do_test boundary3-2.59.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary3-2.59.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=56 + ORDER BY x + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25} +do_test boundary3-2.59.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 4398046511104 ORDER BY t2.a + } +} {3 10 13 17 25 26 27 28 34 43 45 56} +do_test boundary3-2.59.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 4398046511104 ORDER BY t1.a DESC + } +} {56 45 43 34 28 27 26 25 17 13 10 3} +do_test boundary3-2.59.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary3-2.59.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=56 + ORDER BY x + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.59.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56} +do_test boundary3-2.59.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 4398046511104 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 62 63 64} +do_test boundary3-2.59.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 4398046511104 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.59.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary3-2.59.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.59.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=56 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.59.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7} +do_test boundary3-2.59.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.59.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 4398046511104 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 11 12 14 15 16 18 19 20 21 22 23 24 29 30 31 32 33 35 36 37 38 39 40 41 42 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.59.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 4398046511104 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 42 41 40 39 38 37 36 35 33 32 31 30 29 24 23 22 21 20 19 18 16 15 14 12 11 9 8 7 6 5 4 2 1} +do_test boundary3-2.59.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary3-2.59.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.59.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=56 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.59.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56} +do_test boundary3-2.59.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=56 + ORDER BY t1.rowid DESC + } +} {56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.60.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=1 AND t2.a=t1.a + } +} {60 0000000000000001} +do_test boundary3-2.60.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000001' + } +} {1 60} +do_test boundary3-2.60.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=60 + } +} {1 0000000000000001} +do_test boundary3-2.60.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 1 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.60.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 1 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.60.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary3-2.60.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=60 + ORDER BY x + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41} +do_test boundary3-2.60.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 1 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 41 42 43 45 46 48 49 50 51 56 57 60 61 62} +do_test boundary3-2.60.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 1 ORDER BY t1.a DESC + } +} {62 61 60 57 56 51 50 49 48 46 45 43 42 41 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.60.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary3-2.60.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=60 + ORDER BY x + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.60.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60} +do_test boundary3-2.60.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 1 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 63 64} +do_test boundary3-2.60.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 1 ORDER BY t1.a DESC + } +} {64 63 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.60.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary3-2.60.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.60.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=60 + ORDER BY x + } +} {59 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.60.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59} +do_test boundary3-2.60.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.60.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 1 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.60.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 1 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.60.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary3-2.60.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.60.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=60 + ORDER BY x + } +} {59 60 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.60.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60} +do_test boundary3-2.60.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=60 + ORDER BY t1.rowid DESC + } +} {60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.61.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=36028797018963968 AND t2.a=t1.a + } +} {45 0080000000000000} +do_test boundary3-2.61.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0080000000000000' + } +} {36028797018963968 45} +do_test boundary3-2.61.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=45 + } +} {36028797018963968 0080000000000000} +do_test boundary3-2.61.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 36028797018963968 ORDER BY t2.a + } +} {3 17 28} +do_test boundary3-2.61.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 36028797018963968 ORDER BY t1.a DESC + } +} {28 17 3} +do_test boundary3-2.61.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=45 + ORDER BY t1.rowid + } +} {17 28 3} +do_test boundary3-2.61.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=45 + ORDER BY t1.rowid DESC + } +} {3 28 17} +do_test boundary3-2.61.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=45 + ORDER BY x + } +} {17 28 3} +do_test boundary3-2.61.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 36028797018963968 ORDER BY t2.a + } +} {3 17 28 45} +do_test boundary3-2.61.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 36028797018963968 ORDER BY t1.a DESC + } +} {45 28 17 3} +do_test boundary3-2.61.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=45 + ORDER BY t1.rowid + } +} {45 17 28 3} +do_test boundary3-2.61.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=45 + ORDER BY t1.rowid DESC + } +} {3 28 17 45} +do_test boundary3-2.61.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=45 + ORDER BY x + } +} {45 17 28 3} +do_test boundary3-2.61.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 36028797018963968 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.61.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 36028797018963968 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.61.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=45 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27} +do_test boundary3-2.61.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=45 + ORDER BY t1.rowid DESC + } +} {27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.61.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=45 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.61.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 36028797018963968 ORDER BY t2.a + } +} {1 2 4 5 6 7 8 9 10 11 12 13 14 15 16 18 19 20 21 22 23 24 25 26 27 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.61.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 36028797018963968 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 27 26 25 24 23 22 21 20 19 18 16 15 14 13 12 11 10 9 8 7 6 5 4 2 1} +do_test boundary3-2.61.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=45 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45} +do_test boundary3-2.61.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=45 + ORDER BY t1.rowid DESC + } +} {45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.61.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=45 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.62.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-2147483649 AND t2.a=t1.a + } +} {47 ffffffff7fffffff} +do_test boundary3-2.62.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ffffffff7fffffff' + } +} {-2147483649 47} +do_test boundary3-2.62.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=47 + } +} {-2147483649 ffffffff7fffffff} +do_test boundary3-2.62.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -2147483649 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.62.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -2147483649 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.62.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.62.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary3-2.62.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=47 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.62.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.62.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11} +do_test boundary3-2.62.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -2147483649 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 45 46 47 48 49 50 51 52 53 54 56 57 59 60 61 62} +do_test boundary3-2.62.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -2147483649 ORDER BY t1.a DESC + } +} {62 61 60 59 57 56 54 53 52 51 50 49 48 47 46 45 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.62.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.62.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary3-2.62.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=47 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.62.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.62.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47} +do_test boundary3-2.62.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -2147483649 ORDER BY t2.a + } +} {2 21 44 55 58 63 64} +do_test boundary3-2.62.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -2147483649 ORDER BY t1.a DESC + } +} {64 63 58 55 44 21 2} +do_test boundary3-2.62.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.62.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary3-2.62.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=47 + ORDER BY x + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.62.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63} +do_test boundary3-2.62.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {63 58 44 21 64 2 55} +do_test boundary3-2.62.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -2147483649 ORDER BY t2.a + } +} {2 21 44 47 55 58 63 64} +do_test boundary3-2.62.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -2147483649 ORDER BY t1.a DESC + } +} {64 63 58 55 47 44 21 2} +do_test boundary3-2.62.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.62.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary3-2.62.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=47 + ORDER BY x + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.62.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47} +do_test boundary3-2.62.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=47 + ORDER BY t1.rowid DESC + } +} {47 63 58 44 21 64 2 55} +do_test boundary3-2.63.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=-36028797018963969 AND t2.a=t1.a + } +} {2 ff7fffffffffffff} +do_test boundary3-2.63.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='ff7fffffffffffff' + } +} {-36028797018963969 2} +do_test boundary3-2.63.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=2 + } +} {-36028797018963969 ff7fffffffffffff} +do_test boundary3-2.63.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -36028797018963969 ORDER BY t2.a + } +} {1 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.63.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -36028797018963969 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 1} +do_test boundary3-2.63.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=2 + ORDER BY t1.rowid + } +} {64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.63.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=2 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64} +do_test boundary3-2.63.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=2 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.63.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -36028797018963969 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.63.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -36028797018963969 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.63.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=2 + ORDER BY t1.rowid + } +} {2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.63.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=2 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2} +do_test boundary3-2.63.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=2 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.63.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -36028797018963969 ORDER BY t2.a + } +} {55} +do_test boundary3-2.63.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -36028797018963969 ORDER BY t1.a DESC + } +} {55} +do_test boundary3-2.63.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=2 + ORDER BY t1.rowid + } +} {55} +do_test boundary3-2.63.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=2 + ORDER BY t1.rowid DESC + } +} {55} +do_test boundary3-2.63.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=2 + ORDER BY x + } +} {55} +do_test boundary3-2.63.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -36028797018963969 ORDER BY t2.a + } +} {2 55} +do_test boundary3-2.63.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -36028797018963969 ORDER BY t1.a DESC + } +} {55 2} +do_test boundary3-2.63.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=2 + ORDER BY t1.rowid + } +} {55 2} +do_test boundary3-2.63.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=2 + ORDER BY t1.rowid DESC + } +} {2 55} +do_test boundary3-2.63.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=2 + ORDER BY x + } +} {55 2} +do_test boundary3-2.64.1 { + db eval { + SELECT t1.* FROM t1, t2 WHERE t1.rowid=3 AND t2.a=t1.a + } +} {5 0000000000000003} +do_test boundary3-2.64.2 { + db eval { + SELECT t2.* FROM t1 JOIN t2 USING(a) WHERE x='0000000000000003' + } +} {3 5} +do_test boundary3-2.64.3 { + db eval { + SELECT t1.rowid, x FROM t1 JOIN t2 ON t2.r=t1.rowid WHERE t2.a=5 + } +} {3 0000000000000003} +do_test boundary3-2.64.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 3 ORDER BY t2.a + } +} {3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.64.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 3 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 4 3} +do_test boundary3-2.64.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary3-2.64.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=5 + ORDER BY x + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.gt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.gt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31} +do_test boundary3-2.64.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 3 ORDER BY t2.a + } +} {3 4 5 6 7 8 9 10 12 13 14 15 16 17 18 19 20 22 23 24 25 26 27 28 30 31 34 35 36 39 40 42 43 45 46 48 49 50 51 56 57 61 62} +do_test boundary3-2.64.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 3 ORDER BY t1.a DESC + } +} {62 61 57 56 51 50 49 48 46 45 43 42 40 39 36 35 34 31 30 28 27 26 25 24 23 22 20 19 18 17 16 15 14 13 12 10 9 8 7 6 5 4 3} +do_test boundary3-2.64.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary3-2.64.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=5 + ORDER BY x + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.ge.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.64.ge.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5} +do_test boundary3-2.64.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 3 ORDER BY t2.a + } +} {1 2 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.64.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 3 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 2 1} +do_test boundary3-2.64.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary3-2.64.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.64.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=5 + ORDER BY x + } +} {59 60 41 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.64.lt.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41} +do_test boundary3-2.64.lt.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.64.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 3 ORDER BY t2.a + } +} {1 2 5 11 21 29 32 33 37 38 41 44 47 52 53 54 55 58 59 60 63 64} +do_test boundary3-2.64.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 3 ORDER BY t1.a DESC + } +} {64 63 60 59 58 55 54 53 52 47 44 41 38 37 33 32 29 21 11 5 2 1} +do_test boundary3-2.64.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary3-2.64.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.64.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=5 + ORDER BY x + } +} {59 60 41 5 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.64.le.10 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5} +do_test boundary3-2.64.le.11 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= CAST(t2.r AS real) + WHERE t2.a=5 + ORDER BY t1.rowid DESC + } +} {5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.65.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > 9.22337303685477580800e+18 ORDER BY t2.a + } +} {} +do_test boundary3-2.65.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > 9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.65.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=65 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.65.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=65 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.65.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=65 + ORDER BY x + } +} {} +do_test boundary3-2.65.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= 9.22337303685477580800e+18 ORDER BY t2.a + } +} {} +do_test boundary3-2.65.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= 9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.65.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=65 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.65.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=65 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.65.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=65 + ORDER BY x + } +} {} +do_test boundary3-2.65.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < 9.22337303685477580800e+18 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.65.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < 9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.65.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=65 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.65.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=65 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.65.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=65 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.65.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= 9.22337303685477580800e+18 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.65.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= 9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.65.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=65 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.65.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=65 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.65.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=65 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.66.gt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid > -9.22337303685477580800e+18 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.66.gt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid > -9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.66.gt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=66 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.66.gt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=66 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.66.gt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid > t2.r + WHERE t2.a=66 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.66.ge.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid >= -9.22337303685477580800e+18 ORDER BY t2.a + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64} +do_test boundary3-2.66.ge.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid >= -9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1} +do_test boundary3-2.66.ge.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=66 + ORDER BY t1.rowid + } +} {55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38 59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3} +do_test boundary3-2.66.ge.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=66 + ORDER BY t1.rowid DESC + } +} {3 28 17 45 27 43 13 26 10 34 25 56 7 19 57 35 46 22 39 36 14 51 20 40 12 6 9 24 18 42 15 62 48 50 23 16 8 61 30 49 4 31 5 41 60 59 38 33 52 53 54 32 29 37 1 11 47 63 58 44 21 64 2 55} +do_test boundary3-2.66.ge.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid >= t2.r + WHERE t2.a=66 + ORDER BY x + } +} {59 60 41 5 31 4 49 30 61 8 16 23 50 48 62 15 42 18 24 9 6 12 40 20 51 14 36 39 22 46 35 57 19 7 56 25 34 10 26 13 43 27 45 17 28 3 55 2 64 21 44 58 63 47 11 1 37 29 32 54 53 52 33 38} +do_test boundary3-2.66.lt.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid < -9.22337303685477580800e+18 ORDER BY t2.a + } +} {} +do_test boundary3-2.66.lt.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid < -9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.66.lt.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=66 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.66.lt.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=66 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.66.lt.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid < t2.r + WHERE t2.a=66 + ORDER BY x + } +} {} +do_test boundary3-2.66.le.1 { + db eval { + SELECT t2.a FROM t1 JOIN t2 USING(a) + WHERE t1.rowid <= -9.22337303685477580800e+18 ORDER BY t2.a + } +} {} +do_test boundary3-2.66.le.2 { + db eval { + SELECT t2.a FROM t2 NATURAL JOIN t1 + WHERE t1.rowid <= -9.22337303685477580800e+18 ORDER BY t1.a DESC + } +} {} +do_test boundary3-2.66.le.3 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=66 + ORDER BY t1.rowid + } +} {} +do_test boundary3-2.66.le.4 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=66 + ORDER BY t1.rowid DESC + } +} {} +do_test boundary3-2.66.le.5 { + db eval { + SELECT t1.a FROM t1 JOIN t2 ON t1.rowid <= t2.r + WHERE t2.a=66 + ORDER BY x + } +} {} +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary4.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary4.tcl --- sqlite3-3.4.2/test/boundary4.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary4.tcl 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,339 @@ +puts {# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary4.tcl,v 1.3 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } +} + +expr srand(0) + +# Generate interesting boundary numbers +# +foreach x { + 0x7f + 0x7fff + 0x7fffff + 0x7fffffff + 0x7fffffffff + 0x7fffffffffff + 0x7fffffffffffff + 0x7fffffffffffffff +} { + set x [expr {wide($x)}] + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set boundarynum([expr {-($x+1)}]) 1 + set boundarynum([expr {-($x+2)}]) 1 + set boundarynum([expr {$x+$x+1}]) 1 + set boundarynum([expr {$x+$x+2}]) 1 +} +set x [expr {wide(127)}] +for {set i 127} {$i<=9} {incr i} { + set boundarynum($x) 1 + set boundarynum([expr {$x+1}]) 1 + set x [expr {wide($x*128 + 127)}] +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# A simple selection sort. Not trying to be efficient. +# +proc sort {inlist} { + set outlist {} + set mn [lindex $inlist 0] + foreach x $inlist { + if {$x<$mn} {set mn $x} + } + set outlist $mn + set mx $mn + while {1} { + set valid 0 + foreach x $inlist { + if {$x>$mx && (!$valid || $mn>$x)} { + set mn $x + set valid 1 + } + } + if {!$valid} break + lappend outlist $mn + set mx $mn + } + return $outlist +} + +# Reverse the order of a list +# +proc reverse {inlist} { + set i [llength $inlist] + set outlist {} + for {incr i -1} {$i>=0} {incr i -1} { + lappend outlist [lindex $inlist $i] + } + return $outlist +} + +set nums1 [scramble [array names boundarynum]] +set nums2 [scramble [array names boundarynum]] + +set tname boundary4 +puts "do_test $tname-1.1 \173" +puts " db eval \173" +puts " CREATE TABLE t1(a,x);" +set a 0 +set all_rowid {} +set all_a {} +set all_x {} +foreach r $nums1 { + incr a + set t1ra($r) $a + set t1ar($a) $r + set x [format %08x%08x [expr {wide($r)>>32}] $r] + set t1rx($r) $x + set t1xr($x) $r + puts " INSERT INTO t1(oid,a,x) VALUES($r,$a,'$x');" + lappend all_rowid $r + lappend all_a $a + lappend all_x $x +} +puts " CREATE INDEX t1i1 ON t1(a);" +puts " CREATE INDEX t1i2 ON t1(x);" +puts " \175" +puts "\175 {}" + +puts "do_test $tname-1.2 \173" +puts " db eval \173" +puts " SELECT count(*) FROM t1" +puts " \175" +puts "\175 {[llength $nums1]}" + +proc maketest {tnum sql answer} { + puts "do_test $::tname-$tnum \173" + puts " db eval \173" + puts " $sql" + puts " \175" + puts "\175 {$answer}" +} + +set ans {} +foreach r [sort $all_rowid] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 1.3 {SELECT rowid, a, x FROM t1 ORDER BY +rowid} $ans +maketest 1.4 {SELECT rowid, a, x FROM t1 ORDER BY rowid} $ans + +set ans {} +foreach r [reverse [sort $all_rowid]] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 1.5 {SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC} $ans +maketest 1.6 {SELECT rowid, a, x FROM t1 ORDER BY rowid DESC} $ans + +set ans {} +foreach a [sort $all_a] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 1.7 {SELECT rowid, a, x FROM t1 ORDER BY +a} $ans +maketest 1.8 {SELECT rowid, a, x FROM t1 ORDER BY a} $ans + +set ans {} +foreach a [reverse [sort $all_a]] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 1.9 {SELECT rowid, a, x FROM t1 ORDER BY +a DESC} $ans +maketest 1.10 {SELECT rowid, a, x FROM t1 ORDER BY a DESC} $ans + +set ans {} +foreach x [sort $all_x] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 1.11 {SELECT rowid, a, x FROM t1 ORDER BY +x} $ans +maketest 1.12 {SELECT rowid, a, x FROM t1 ORDER BY x} $ans + +set ans {} +foreach x [reverse [sort $all_x]] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 1.13 {SELECT rowid, a, x FROM t1 ORDER BY +x DESC} $ans +maketest 1.14 {SELECT rowid, a, x FROM t1 ORDER BY x DESC} $ans + +maketest 2.1 {UPDATE t1 SET rowid=a, a=rowid} {} + +set ans {} +foreach r [sort $all_rowid] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 2.3 {SELECT a, rowid, x FROM t1 ORDER BY +a} $ans +maketest 2.4 {SELECT a, rowid, x FROM t1 ORDER BY a} $ans + +set ans {} +foreach r [reverse [sort $all_rowid]] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 2.5 {SELECT a, rowid, x FROM t1 ORDER BY +a DESC} $ans +maketest 2.6 {SELECT a, rowid, x FROM t1 ORDER BY a DESC} $ans + +set ans {} +foreach a [sort $all_a] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 2.7 {SELECT a, rowid, x FROM t1 ORDER BY +rowid} $ans +maketest 2.8 {SELECT a, rowid, x FROM t1 ORDER BY rowid} $ans + +set ans {} +foreach a [reverse [sort $all_a]] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 2.9 {SELECT a, rowid, x FROM t1 ORDER BY +rowid DESC} $ans +maketest 2.10 {SELECT a, rowid, x FROM t1 ORDER BY rowid DESC} $ans + +set ans {} +foreach x [sort $all_x] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 2.11 {SELECT a, rowid, x FROM t1 ORDER BY +x} $ans +maketest 2.12 {SELECT a, rowid, x FROM t1 ORDER BY x} $ans + +set ans {} +foreach x [reverse [sort $all_x]] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 2.13 {SELECT a, rowid, x FROM t1 ORDER BY +x DESC} $ans +maketest 2.14 {SELECT a, rowid, x FROM t1 ORDER BY x DESC} $ans + +maketest 3.1 {UPDATE t1 SET rowid=a, a=rowid} {} +maketest 3.2 {ALTER TABLE t1 ADD COLUMN z; UPDATE t1 SET z=zeroblob(600)} {} + +set ans {} +foreach r [sort $all_rowid] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 3.3 {SELECT rowid, a, x FROM t1 ORDER BY +rowid} $ans +maketest 3.4 {SELECT rowid, a, x FROM t1 ORDER BY rowid} $ans + +set ans {} +foreach r [reverse [sort $all_rowid]] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 3.5 {SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC} $ans +maketest 3.6 {SELECT rowid, a, x FROM t1 ORDER BY rowid DESC} $ans + +set ans {} +foreach a [sort $all_a] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 3.7 {SELECT rowid, a, x FROM t1 ORDER BY +a} $ans +maketest 3.8 {SELECT rowid, a, x FROM t1 ORDER BY a} $ans + +set ans {} +foreach a [reverse [sort $all_a]] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 3.9 {SELECT rowid, a, x FROM t1 ORDER BY +a DESC} $ans +maketest 3.10 {SELECT rowid, a, x FROM t1 ORDER BY a DESC} $ans + +set ans {} +foreach x [sort $all_x] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 3.11 {SELECT rowid, a, x FROM t1 ORDER BY +x} $ans +maketest 3.12 {SELECT rowid, a, x FROM t1 ORDER BY x} $ans + +set ans {} +foreach x [reverse [sort $all_x]] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 3.13 {SELECT rowid, a, x FROM t1 ORDER BY +x DESC} $ans +maketest 3.14 {SELECT rowid, a, x FROM t1 ORDER BY x DESC} $ans + + +maketest 4.1 {UPDATE t1 SET rowid=a, a=rowid, x=z, z=x} {} + +set ans {} +foreach r [sort $all_rowid] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 4.3 {SELECT a, rowid, z FROM t1 ORDER BY +a} $ans +maketest 4.4 {SELECT a, rowid, z FROM t1 ORDER BY a} $ans + +set ans {} +foreach r [reverse [sort $all_rowid]] { + lappend ans $r $t1ra($r) $t1rx($r) +} +maketest 4.5 {SELECT a, rowid, z FROM t1 ORDER BY +a DESC} $ans +maketest 4.6 {SELECT a, rowid, z FROM t1 ORDER BY a DESC} $ans + +set ans {} +foreach a [sort $all_a] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 4.7 {SELECT a, rowid, z FROM t1 ORDER BY +rowid} $ans +maketest 4.8 {SELECT a, rowid, z FROM t1 ORDER BY rowid} $ans + +set ans {} +foreach a [reverse [sort $all_a]] { + set r $t1ar($a) + lappend ans $r $a $t1rx($r) +} +maketest 4.9 {SELECT a, rowid, z FROM t1 ORDER BY +rowid DESC} $ans +maketest 4.10 {SELECT a, rowid, z FROM t1 ORDER BY rowid DESC} $ans + +set ans {} +foreach x [sort $all_x] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 4.11 {SELECT a, rowid, z FROM t1 ORDER BY +z} $ans +maketest 4.12 {SELECT a, rowid, z FROM t1 ORDER BY z} $ans + +set ans {} +foreach x [reverse [sort $all_x]] { + set r $t1xr($x) + lappend ans $r $t1ra($r) $x +} +maketest 4.13 {SELECT a, rowid, z FROM t1 ORDER BY +z DESC} $ans +maketest 4.14 {SELECT a, rowid, z FROM t1 ORDER BY z DESC} $ans + +puts {finish_test} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/boundary4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/boundary4.test --- sqlite3-3.4.2/test/boundary4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/boundary4.test 2009-01-02 15:45:48.000000000 +0000 @@ -0,0 +1,342 @@ +# 2008 December 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is automatically generated from a separate TCL script. +# This file seeks to exercise integer boundary values. +# +# $Id: boundary4.test,v 1.2 2009/01/02 15:45:48 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Many of the boundary tests depend on a working 64-bit implementation. +if {![working_64bit_int]} { finish_test; return } + +do_test boundary4-1.1 { + db eval { + CREATE TABLE t1(a,x); + INSERT INTO t1(oid,a,x) VALUES(549755813887,1,'0000007fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-8388608,2,'ffffffffff800000'); + INSERT INTO t1(oid,a,x) VALUES(0,3,'0000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-129,4,'ffffffffffffff7f'); + INSERT INTO t1(oid,a,x) VALUES(8388608,5,'0000000000800000'); + INSERT INTO t1(oid,a,x) VALUES(65535,6,'000000000000ffff'); + INSERT INTO t1(oid,a,x) VALUES(8388607,7,'00000000007fffff'); + INSERT INTO t1(oid,a,x) VALUES(1099511627776,8,'0000010000000000'); + INSERT INTO t1(oid,a,x) VALUES(16777215,9,'0000000000ffffff'); + INSERT INTO t1(oid,a,x) VALUES(32767,10,'0000000000007fff'); + INSERT INTO t1(oid,a,x) VALUES(4294967296,11,'0000000100000000'); + INSERT INTO t1(oid,a,x) VALUES(-549755813888,12,'ffffff8000000000'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355328,13,'ffff800000000000'); + INSERT INTO t1(oid,a,x) VALUES(256,14,'0000000000000100'); + INSERT INTO t1(oid,a,x) VALUES(16777216,15,'0000000001000000'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927936,16,'0100000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-1,17,'ffffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(9223372036854775807,18,'7fffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(281474976710655,19,'0000ffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(1099511627775,20,'000000ffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-8388609,21,'ffffffffff7fffff'); + INSERT INTO t1(oid,a,x) VALUES(32768,22,'0000000000008000'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963968,23,'0080000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-32769,24,'ffffffffffff7fff'); + INSERT INTO t1(oid,a,x) VALUES(127,25,'000000000000007f'); + INSERT INTO t1(oid,a,x) VALUES(-9223372036854775808,26,'8000000000000000'); + INSERT INTO t1(oid,a,x) VALUES(72057594037927935,27,'00ffffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-549755813889,28,'ffffff7fffffffff'); + INSERT INTO t1(oid,a,x) VALUES(255,29,'00000000000000ff'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963969,30,'ff7fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483648,31,'ffffffff80000000'); + INSERT INTO t1(oid,a,x) VALUES(281474976710656,32,'0001000000000000'); + INSERT INTO t1(oid,a,x) VALUES(65536,33,'0000000000010000'); + INSERT INTO t1(oid,a,x) VALUES(140737488355328,34,'0000800000000000'); + INSERT INTO t1(oid,a,x) VALUES(549755813888,35,'0000008000000000'); + INSERT INTO t1(oid,a,x) VALUES(2147483648,36,'0000000080000000'); + INSERT INTO t1(oid,a,x) VALUES(4294967295,37,'00000000ffffffff'); + INSERT INTO t1(oid,a,x) VALUES(140737488355327,38,'00007fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-2147483649,39,'ffffffff7fffffff'); + INSERT INTO t1(oid,a,x) VALUES(36028797018963967,40,'007fffffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(128,41,'0000000000000080'); + INSERT INTO t1(oid,a,x) VALUES(-32768,42,'ffffffffffff8000'); + INSERT INTO t1(oid,a,x) VALUES(-36028797018963968,43,'ff80000000000000'); + INSERT INTO t1(oid,a,x) VALUES(-140737488355329,44,'ffff7fffffffffff'); + INSERT INTO t1(oid,a,x) VALUES(-128,45,'ffffffffffffff80'); + INSERT INTO t1(oid,a,x) VALUES(2147483647,46,'000000007fffffff'); + CREATE INDEX t1i1 ON t1(a); + CREATE INDEX t1i2 ON t1(x); + } +} {} +do_test boundary4-1.2 { + db eval { + SELECT count(*) FROM t1 + } +} {46} +do_test boundary4-1.3 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +rowid + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-1.4 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY rowid + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-1.5 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-1.6 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY rowid DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-1.7 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +a + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-1.8 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY a + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-1.9 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +a DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-1.10 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY a DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-1.11 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-1.12 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-1.13 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-1.14 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-2.1 { + db eval { + UPDATE t1 SET rowid=a, a=rowid + } +} {} +do_test boundary4-2.3 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +a + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-2.4 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY a + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-2.5 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +a DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-2.6 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY a DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-2.7 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +rowid + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-2.8 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY rowid + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-2.9 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +rowid DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-2.10 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY rowid DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-2.11 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-2.12 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-2.13 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY +x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-2.14 { + db eval { + SELECT a, rowid, x FROM t1 ORDER BY x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-3.1 { + db eval { + UPDATE t1 SET rowid=a, a=rowid + } +} {} +do_test boundary4-3.2 { + db eval { + ALTER TABLE t1 ADD COLUMN z; UPDATE t1 SET z=zeroblob(600) + } +} {} +do_test boundary4-3.3 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +rowid + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-3.4 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY rowid + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-3.5 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +rowid DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-3.6 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY rowid DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-3.7 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +a + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-3.8 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY a + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-3.9 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +a DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-3.10 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY a DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-3.11 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-3.12 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY x + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-3.13 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY +x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-3.14 { + db eval { + SELECT rowid, a, x FROM t1 ORDER BY x DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-4.1 { + db eval { + UPDATE t1 SET rowid=a, a=rowid, x=z, z=x + } +} {} +do_test boundary4-4.3 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +a + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-4.4 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY a + } +} {-9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff 0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff} +do_test boundary4-4.5 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +a DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-4.6 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY a DESC + } +} {9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000 -1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000} +do_test boundary4-4.7 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +rowid + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-4.8 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY rowid + } +} {549755813887 1 0000007fffffffff -8388608 2 ffffffffff800000 0 3 0000000000000000 -129 4 ffffffffffffff7f 8388608 5 0000000000800000 65535 6 000000000000ffff 8388607 7 00000000007fffff 1099511627776 8 0000010000000000 16777215 9 0000000000ffffff 32767 10 0000000000007fff 4294967296 11 0000000100000000 -549755813888 12 ffffff8000000000 -140737488355328 13 ffff800000000000 256 14 0000000000000100 16777216 15 0000000001000000 72057594037927936 16 0100000000000000 -1 17 ffffffffffffffff 9223372036854775807 18 7fffffffffffffff 281474976710655 19 0000ffffffffffff 1099511627775 20 000000ffffffffff -8388609 21 ffffffffff7fffff 32768 22 0000000000008000 36028797018963968 23 0080000000000000 -32769 24 ffffffffffff7fff 127 25 000000000000007f -9223372036854775808 26 8000000000000000 72057594037927935 27 00ffffffffffffff -549755813889 28 ffffff7fffffffff 255 29 00000000000000ff -36028797018963969 30 ff7fffffffffffff -2147483648 31 ffffffff80000000 281474976710656 32 0001000000000000 65536 33 0000000000010000 140737488355328 34 0000800000000000 549755813888 35 0000008000000000 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 140737488355327 38 00007fffffffffff -2147483649 39 ffffffff7fffffff 36028797018963967 40 007fffffffffffff 128 41 0000000000000080 -32768 42 ffffffffffff8000 -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -128 45 ffffffffffffff80 2147483647 46 000000007fffffff} +do_test boundary4-4.9 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +rowid DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-4.10 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY rowid DESC + } +} {2147483647 46 000000007fffffff -128 45 ffffffffffffff80 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -32768 42 ffffffffffff8000 128 41 0000000000000080 36028797018963967 40 007fffffffffffff -2147483649 39 ffffffff7fffffff 140737488355327 38 00007fffffffffff 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 549755813888 35 0000008000000000 140737488355328 34 0000800000000000 65536 33 0000000000010000 281474976710656 32 0001000000000000 -2147483648 31 ffffffff80000000 -36028797018963969 30 ff7fffffffffffff 255 29 00000000000000ff -549755813889 28 ffffff7fffffffff 72057594037927935 27 00ffffffffffffff -9223372036854775808 26 8000000000000000 127 25 000000000000007f -32769 24 ffffffffffff7fff 36028797018963968 23 0080000000000000 32768 22 0000000000008000 -8388609 21 ffffffffff7fffff 1099511627775 20 000000ffffffffff 281474976710655 19 0000ffffffffffff 9223372036854775807 18 7fffffffffffffff -1 17 ffffffffffffffff 72057594037927936 16 0100000000000000 16777216 15 0000000001000000 256 14 0000000000000100 -140737488355328 13 ffff800000000000 -549755813888 12 ffffff8000000000 4294967296 11 0000000100000000 32767 10 0000000000007fff 16777215 9 0000000000ffffff 1099511627776 8 0000010000000000 8388607 7 00000000007fffff 65535 6 000000000000ffff 8388608 5 0000000000800000 -129 4 ffffffffffffff7f 0 3 0000000000000000 -8388608 2 ffffffffff800000 549755813887 1 0000007fffffffff} +do_test boundary4-4.11 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +z + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-4.12 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY z + } +} {0 3 0000000000000000 127 25 000000000000007f 128 41 0000000000000080 255 29 00000000000000ff 256 14 0000000000000100 32767 10 0000000000007fff 32768 22 0000000000008000 65535 6 000000000000ffff 65536 33 0000000000010000 8388607 7 00000000007fffff 8388608 5 0000000000800000 16777215 9 0000000000ffffff 16777216 15 0000000001000000 2147483647 46 000000007fffffff 2147483648 36 0000000080000000 4294967295 37 00000000ffffffff 4294967296 11 0000000100000000 549755813887 1 0000007fffffffff 549755813888 35 0000008000000000 1099511627775 20 000000ffffffffff 1099511627776 8 0000010000000000 140737488355327 38 00007fffffffffff 140737488355328 34 0000800000000000 281474976710655 19 0000ffffffffffff 281474976710656 32 0001000000000000 36028797018963967 40 007fffffffffffff 36028797018963968 23 0080000000000000 72057594037927935 27 00ffffffffffffff 72057594037927936 16 0100000000000000 9223372036854775807 18 7fffffffffffffff -9223372036854775808 26 8000000000000000 -36028797018963969 30 ff7fffffffffffff -36028797018963968 43 ff80000000000000 -140737488355329 44 ffff7fffffffffff -140737488355328 13 ffff800000000000 -549755813889 28 ffffff7fffffffff -549755813888 12 ffffff8000000000 -2147483649 39 ffffffff7fffffff -2147483648 31 ffffffff80000000 -8388609 21 ffffffffff7fffff -8388608 2 ffffffffff800000 -32769 24 ffffffffffff7fff -32768 42 ffffffffffff8000 -129 4 ffffffffffffff7f -128 45 ffffffffffffff80 -1 17 ffffffffffffffff} +do_test boundary4-4.13 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY +z DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +do_test boundary4-4.14 { + db eval { + SELECT a, rowid, z FROM t1 ORDER BY z DESC + } +} {-1 17 ffffffffffffffff -128 45 ffffffffffffff80 -129 4 ffffffffffffff7f -32768 42 ffffffffffff8000 -32769 24 ffffffffffff7fff -8388608 2 ffffffffff800000 -8388609 21 ffffffffff7fffff -2147483648 31 ffffffff80000000 -2147483649 39 ffffffff7fffffff -549755813888 12 ffffff8000000000 -549755813889 28 ffffff7fffffffff -140737488355328 13 ffff800000000000 -140737488355329 44 ffff7fffffffffff -36028797018963968 43 ff80000000000000 -36028797018963969 30 ff7fffffffffffff -9223372036854775808 26 8000000000000000 9223372036854775807 18 7fffffffffffffff 72057594037927936 16 0100000000000000 72057594037927935 27 00ffffffffffffff 36028797018963968 23 0080000000000000 36028797018963967 40 007fffffffffffff 281474976710656 32 0001000000000000 281474976710655 19 0000ffffffffffff 140737488355328 34 0000800000000000 140737488355327 38 00007fffffffffff 1099511627776 8 0000010000000000 1099511627775 20 000000ffffffffff 549755813888 35 0000008000000000 549755813887 1 0000007fffffffff 4294967296 11 0000000100000000 4294967295 37 00000000ffffffff 2147483648 36 0000000080000000 2147483647 46 000000007fffffff 16777216 15 0000000001000000 16777215 9 0000000000ffffff 8388608 5 0000000000800000 8388607 7 00000000007fffff 65536 33 0000000000010000 65535 6 000000000000ffff 32768 22 0000000000008000 32767 10 0000000000007fff 256 14 0000000000000100 255 29 00000000000000ff 128 41 0000000000000080 127 25 000000000000007f 0 3 0000000000000000} +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree2.test --- sqlite3-3.4.2/test/btree2.test 2007-04-13 04:11:16.000000000 +0100 +++ sqlite3-3.6.16/test/btree2.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,502 +0,0 @@ -# 2001 September 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend -# -# $Id: btree2.test,v 1.15 2006/03/19 13:00:25 drh Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -if {[info commands btree_open]!=""} { - -# Create a new database file containing no entries. The database should -# contain 5 tables: -# -# 2 The descriptor table -# 3 The foreground table -# 4 The background table -# 5 The long key table -# 6 The long data table -# -# An explanation for what all these tables are used for is provided below. -# -do_test btree2-1.1 { - expr srand(1) - file delete -force test2.bt - file delete -force test2.bt-journal - set ::b [btree_open test2.bt 2000 0] - btree_begin_transaction $::b - btree_create_table $::b 0 -} {2} -do_test btree2-1.2 { - btree_create_table $::b 0 -} {3} -do_test btree2-1.3 { - btree_create_table $::b 0 -} {4} -do_test btree2-1.4 { - btree_create_table $::b 0 -} {5} -do_test btree2-1.5 { - btree_create_table $::b 0 -} {6} -do_test btree2-1.6 { - set ::c2 [btree_cursor $::b 2 1] - btree_insert $::c2 {one} {1} - btree_move_to $::c2 {one} - btree_delete $::c2 - btree_close_cursor $::c2 - btree_commit $::b - btree_integrity_check $::b 1 2 3 4 5 6 -} {} - -# This test module works by making lots of pseudo-random changes to a -# database while simultaneously maintaining an invariant on that database. -# Periodically, the script does a sanity check on the database and verifies -# that the invariant is satisfied. -# -# The invariant is as follows: -# -# 1. The descriptor table always contains 2 enters. An entry keyed by -# "N" is the number of elements in the foreground and background tables -# combined. The entry keyed by "L" is the number of digits in the keys -# for foreground and background tables. -# -# 2. The union of the foreground an background tables consists of N entries -# where each entry has an L-digit key. (Actually, some keys can be longer -# than L characters, but they always start with L digits.) The keys -# cover all integers between 1 and N. Whenever an entry is added to -# the foreground it is removed form the background and vice versa. -# -# 3. Some entries in the foreground and background tables have keys that -# begin with an L-digit number but are followed by additional characters. -# For each such entry there is a corresponding entry in the long key -# table. The long key table entry has a key which is just the L-digit -# number and data which is the length of the key in the foreground and -# background tables. -# -# 4. The data for both foreground and background entries is usually a -# short string. But some entries have long data strings. For each -# such entries there is an entry in the long data type. The key to -# long data table is an L-digit number. (The extension on long keys -# is omitted.) The data is the number of charaters in the data of the -# foreground or background entry. -# -# The following function builds a database that satisfies all of the above -# invariants. -# -proc build_db {N L} { - for {set i 2} {$i<=6} {incr i} { - catch {btree_close_cursor [set ::c$i]} - btree_clear_table $::b $i - set ::c$i [btree_cursor $::b $i 1] - } - btree_insert $::c2 N $N - btree_insert $::c2 L $L - set format %0${L}d - for {set i 1} {$i<=$N} {incr i} { - set key [format $format $i] - set data $key - btree_insert $::c3 $key $data - } -} - -# Given a base key number and a length, construct the full text of the key -# or data. -# -proc make_payload {keynum L len} { - set key [format %0${L}d $keynum] - set r $key - set i 1 - while {[string length $r]<$len} { - append r " ($i) $key" - incr i - } - return [string range $r 0 [expr {$len-1}]] -} - -# Verify the invariants on the database. Return an empty string on -# success or an error message if something is amiss. -# -proc check_invariants {} { - set ck [btree_integrity_check $::b 1 2 3 4 5 6] - if {$ck!=""} { - puts "\n*** SANITY:\n$ck" - exit - return $ck - } - btree_move_to $::c3 {} - btree_move_to $::c4 {} - btree_move_to $::c2 N - set N [btree_data $::c2] - btree_move_to $::c2 L - set L [btree_data $::c2] - set LM1 [expr {$L-1}] - for {set i 1} {$i<=$N} {incr i} { - set key {} - if {![btree_eof $::c3]} { - set key [btree_key $::c3] - } - if {[scan $key %d k]<1} {set k 0} - if {$k!=$i} { - set key {} - if {![btree_eof $::c4]} { - set key [btree_key $::c4] - } - if {[scan $key %d k]<1} {set k 0} - if {$k!=$i} { - return "Key $i is missing from both foreground and background" - } - set data [btree_data $::c4] - btree_next $::c4 - } else { - set data [btree_data $::c3] - btree_next $::c3 - } - set skey [string range $key 0 $LM1] - if {[btree_move_to $::c5 $skey]==0} { - set keylen [btree_data $::c5] - } else { - set keylen $L - } - if {[string length $key]!=$keylen} { - return "Key $i is the wrong size.\ - Is \"$key\" but should be \"[make_payload $k $L $keylen]\"" - } - if {[make_payload $k $L $keylen]!=$key} { - return "Key $i has an invalid extension" - } - if {[btree_move_to $::c6 $skey]==0} { - set datalen [btree_data $::c6] - } else { - set datalen $L - } - if {[string length $data]!=$datalen} { - return "Data for $i is the wrong size.\ - Is [string length $data] but should be $datalen" - } - if {[make_payload $k $L $datalen]!=$data} { - return "Entry $i has an incorrect data" - } - } -} - -# Look at all elements in both the foreground and background tables. -# Make sure the key is always the same as the prefix of the data. -# -# This routine was used for hunting bugs. It is not a part of standard -# tests. -# -proc check_data {n key} { - global c3 c4 - incr n -1 - foreach c [list $c3 $c4] { - btree_first $c ;# move_to $c $key - set cnt 0 - while {![btree_eof $c]} { - set key [btree_key $c] - set data [btree_data $c] - if {[string range $key 0 $n] ne [string range $data 0 $n]} { - puts "key=[list $key] data=[list $data] n=$n" - puts "cursor info = [btree_cursor_info $c]" - btree_page_dump $::b [lindex [btree_cursor_info $c] 0] - exit - } - btree_next $c - } - } -} - -# Make random changes to the database such that each change preserves -# the invariants. The number of changes is $n*N where N is the parameter -# from the descriptor table. Each changes begins with a random key. -# the entry with that key is put in the foreground table with probability -# $I and it is put in background with probability (1.0-$I). It gets -# a long key with probability $K and long data with probability $D. -# -set chngcnt 0 -proc random_changes {n I K D} { - global chngcnt - btree_move_to $::c2 N - set N [btree_data $::c2] - btree_move_to $::c2 L - set L [btree_data $::c2] - set LM1 [expr {$L-1}] - set total [expr {int($N*$n)}] - set format %0${L}d - for {set i 0} {$i<$total} {incr i} { - set k [expr {int(rand()*$N)+1}] - set insert [expr {rand()<=$I}] - set longkey [expr {rand()<=$K}] - set longdata [expr {rand()<=$D}] - if {$longkey} { - set x [expr {rand()}] - set keylen [expr {int($x*$x*$x*$x*3000)+10}] - } else { - set keylen $L - } - set key [make_payload $k $L $keylen] - if {$longdata} { - set x [expr {rand()}] - set datalen [expr {int($x*$x*$x*$x*3000)+10}] - } else { - set datalen $L - } - set data [make_payload $k $L $datalen] - set basekey [format $format $k] - if {[set c [btree_move_to $::c3 $basekey]]==0} { - btree_delete $::c3 - } else { - if {$c<0} {btree_next $::c3} - if {![btree_eof $::c3]} { - if {[string match $basekey* [btree_key $::c3]]} { - btree_delete $::c3 - } - } - } - if {[set c [btree_move_to $::c4 $basekey]]==0} { - btree_delete $::c4 - } else { - if {$c<0} {btree_next $::c4} - if {![btree_eof $::c4]} { - if {[string match $basekey* [btree_key $::c4]]} { - btree_delete $::c4 - } - } - } - set kx -1 - if {![btree_eof $::c4]} { - if {[scan [btree_key $::c4] %d kx]<1} {set kx -1} - } - if {$kx==$k} { - btree_delete $::c4 - } - # For debugging - change the "0" to "1" to integrity check after - # every change. - if 0 { - incr chngcnt - puts check----$chngcnt - set ck [btree_integrity_check $::b 1 2 3 4 5 6] - if {$ck!=""} { - puts "\nSANITY CHECK FAILED!\n$ck" - exit - } - } - if {$insert} { - btree_insert $::c3 $key $data - } else { - btree_insert $::c4 $key $data - } - if {$longkey} { - btree_insert $::c5 $basekey $keylen - } elseif {[btree_move_to $::c5 $basekey]==0} { - btree_delete $::c5 - } - if {$longdata} { - btree_insert $::c6 $basekey $datalen - } elseif {[btree_move_to $::c6 $basekey]==0} { - btree_delete $::c6 - } - # For debugging - change the "0" to "1" to integrity check after - # every change. - if 0 { - incr chngcnt - puts check----$chngcnt - set ck [btree_integrity_check $::b 1 2 3 4 5 6] - if {$ck!=""} { - puts "\nSANITY CHECK FAILED!\n$ck" - exit - } - } - } -} -set btree_trace 0 - -# Repeat this test sequence on database of various sizes -# -set testno 2 -foreach {N L} { - 10 2 - 50 2 - 200 3 - 2000 5 -} { - puts "**** N=$N L=$L ****" - set hash [md5file test2.bt] - do_test btree2-$testno.1 [subst -nocommands { - set ::c2 [btree_cursor $::b 2 1] - set ::c3 [btree_cursor $::b 3 1] - set ::c4 [btree_cursor $::b 4 1] - set ::c5 [btree_cursor $::b 5 1] - set ::c6 [btree_cursor $::b 6 1] - btree_begin_transaction $::b - build_db $N $L - check_invariants - }] {} - do_test btree2-$testno.2 { - btree_close_cursor $::c2 - btree_close_cursor $::c3 - btree_close_cursor $::c4 - btree_close_cursor $::c5 - btree_close_cursor $::c6 - btree_rollback $::b - md5file test2.bt - } $hash - do_test btree2-$testno.3 [subst -nocommands { - btree_begin_transaction $::b - set ::c2 [btree_cursor $::b 2 1] - set ::c3 [btree_cursor $::b 3 1] - set ::c4 [btree_cursor $::b 4 1] - set ::c5 [btree_cursor $::b 5 1] - set ::c6 [btree_cursor $::b 6 1] - build_db $N $L - check_invariants - }] {} - do_test btree2-$testno.4 { - btree_commit $::b - check_invariants - } {} - do_test btree2-$testno.5 { - lindex [btree_pager_stats $::b] 1 - } {6} - do_test btree2-$testno.6 { - btree_cursor_info $::c2 - btree_cursor_info $::c3 - btree_cursor_info $::c4 - btree_cursor_info $::c5 - btree_cursor_info $::c6 - btree_close_cursor $::c2 - btree_close_cursor $::c3 - btree_close_cursor $::c4 - btree_close_cursor $::c5 - btree_close_cursor $::c6 - lindex [btree_pager_stats $::b] 1 - } {0} - do_test btree2-$testno.7 { - btree_close $::b - } {} - - # For each database size, run various changes tests. - # - set num2 1 - foreach {n I K D} { - 0.5 0.5 0.1 0.1 - 1.0 0.2 0.1 0.1 - 1.0 0.8 0.1 0.1 - 2.0 0.0 0.1 0.1 - 2.0 1.0 0.1 0.1 - 2.0 0.0 0.0 0.0 - 2.0 1.0 0.0 0.0 - } { - set testid btree2-$testno.8.$num2 - set hash [md5file test2.bt] - do_test $testid.0 { - set ::b [btree_open test2.bt 2000 0] - set ::c2 [btree_cursor $::b 2 1] - set ::c3 [btree_cursor $::b 3 1] - set ::c4 [btree_cursor $::b 4 1] - set ::c5 [btree_cursor $::b 5 1] - set ::c6 [btree_cursor $::b 6 1] - check_invariants - } {} - set cnt 6 - for {set i 2} {$i<=6} {incr i} { - if {[lindex [btree_cursor_info [set ::c$i]] 0]!=$i} {incr cnt} - } - do_test $testid.1 { - btree_begin_transaction $::b - lindex [btree_pager_stats $::b] 1 - } $cnt - do_test $testid.2 [subst { - random_changes $n $I $K $D - }] {} - do_test $testid.3 { - check_invariants - } {} - do_test $testid.4 { - btree_close_cursor $::c2 - btree_close_cursor $::c3 - btree_close_cursor $::c4 - btree_close_cursor $::c5 - btree_close_cursor $::c6 - btree_rollback $::b - md5file test2.bt - } $hash - btree_begin_transaction $::b - set ::c2 [btree_cursor $::b 2 1] - set ::c3 [btree_cursor $::b 3 1] - set ::c4 [btree_cursor $::b 4 1] - set ::c5 [btree_cursor $::b 5 1] - set ::c6 [btree_cursor $::b 6 1] - do_test $testid.5 [subst { - random_changes $n $I $K $D - }] {} - do_test $testid.6 { - check_invariants - } {} - do_test $testid.7 { - btree_commit $::b - check_invariants - } {} - set hash [md5file test2.bt] - do_test $testid.8 { - btree_close_cursor $::c2 - btree_close_cursor $::c3 - btree_close_cursor $::c4 - btree_close_cursor $::c5 - btree_close_cursor $::c6 - lindex [btree_pager_stats $::b] 1 - } {0} - do_test $testid.9 { - btree_close $::b - set ::b [btree_open test2.bt 2000 0] - set ::c2 [btree_cursor $::b 2 1] - set ::c3 [btree_cursor $::b 3 1] - set ::c4 [btree_cursor $::b 4 1] - set ::c5 [btree_cursor $::b 5 1] - set ::c6 [btree_cursor $::b 6 1] - check_invariants - } {} - do_test $testid.10 { - btree_close_cursor $::c2 - btree_close_cursor $::c3 - btree_close_cursor $::c4 - btree_close_cursor $::c5 - btree_close_cursor $::c6 - lindex [btree_pager_stats $::b] 1 - } {0} - do_test $testid.11 { - btree_close $::b - } {} - incr num2 - } - incr testno - set ::b [btree_open test2.bt 2000 0] -} - -# Testing is complete. Shut everything down. -# -do_test btree-999.1 { - lindex [btree_pager_stats $::b] 1 -} {0} -do_test btree-999.2 { - btree_close $::b -} {} -do_test btree-999.3 { - file delete -force test2.bt - file exists test2.bt-journal -} {0} - -} ;# end if( not mem: and has pager_open command ); - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree4.test --- sqlite3-3.4.2/test/btree4.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/btree4.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,101 +0,0 @@ -# 2002 December 03 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend -# -# This file focuses on testing the sqliteBtreeNext() and -# sqliteBtreePrevious() procedures and making sure they are able -# to step through an entire table from either direction. -# -# $Id: btree4.test,v 1.2 2004/05/09 20:40:12 drh Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -if {[info commands btree_open]!=""} { - -# Open a test database. -# -file delete -force test1.bt -file delete -force test1.bt-journal -set b1 [btree_open test1.bt 2000 0] -btree_begin_transaction $b1 -do_test btree4-0.1 { - btree_create_table $b1 0 -} 2 - -set data {abcdefghijklmnopqrstuvwxyz0123456789} -append data $data -append data $data -append data $data -append data $data - -foreach N {10 100 1000} { - btree_clear_table $::b1 2 - set ::c1 [btree_cursor $::b1 2 1] - do_test btree4-$N.1 { - for {set i 1} {$i<=$N} {incr i} { - btree_insert $::c1 [format k-%05d $i] $::data-$i - } - btree_first $::c1 - btree_key $::c1 - } {k-00001} - do_test btree4-$N.2 { - btree_data $::c1 - } $::data-1 - for {set i 2} {$i<=$N} {incr i} { - do_test btree-$N.3.$i.1 { - btree_next $::c1 - } 0 - do_test btree-$N.3.$i.2 { - btree_key $::c1 - } [format k-%05d $i] - do_test btree-$N.3.$i.3 { - btree_data $::c1 - } $::data-$i - } - do_test btree4-$N.4 { - btree_next $::c1 - } 1 - do_test btree4-$N.5 { - btree_last $::c1 - } 0 - do_test btree4-$N.6 { - btree_key $::c1 - } [format k-%05d $N] - do_test btree4-$N.7 { - btree_data $::c1 - } $::data-$N - for {set i [expr {$N-1}]} {$i>=1} {incr i -1} { - do_test btree4-$N.8.$i.1 { - btree_prev $::c1 - } 0 - do_test btree4-$N.8.$i.2 { - btree_key $::c1 - } [format k-%05d $i] - do_test btree4-$N.8.$i.3 { - btree_data $::c1 - } $::data-$i - } - do_test btree4-$N.9 { - btree_prev $::c1 - } 1 - btree_close_cursor $::c1 -} - -btree_rollback $::b1 -btree_pager_ref_dump $::b1 -btree_close $::b1 - -} ;# end if( not mem: and has pager_open command ); - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree5.test --- sqlite3-3.4.2/test/btree5.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/btree5.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,292 +0,0 @@ -# 2004 May 10 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend -# -# $Id: btree5.test,v 1.5 2004/05/14 12:17:46 drh Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# Attempting to read table 1 of an empty file gives an SQLITE_EMPTY -# error. -# -do_test btree5-1.1 { - file delete -force test1.bt - file delete -force test1.bt-journal - set rc [catch {btree_open test1.bt 2000 0} ::b1] -} {0} -do_test btree5-1.2 { - set rc [catch {btree_cursor $::b1 1 0} ::c1] -} {1} -do_test btree5-1.3 { - set ::c1 -} {SQLITE_EMPTY} -do_test btree5-1.4 { - set rc [catch {btree_cursor $::b1 1 1} ::c1] -} {1} -do_test btree5-1.5 { - set ::c1 -} {SQLITE_EMPTY} - -# Starting a transaction initializes the first page of the database -# and the error goes away. -# -do_test btree5-1.6 { - btree_begin_transaction $b1 - set rc [catch {btree_cursor $b1 1 0} c1] -} {0} -do_test btree5-1.7 { - btree_first $c1 -} {1} -do_test btree5-1.8 { - btree_close_cursor $c1 - btree_rollback $b1 - set rc [catch {btree_cursor $b1 1 0} c1] -} {1} -do_test btree5-1.9 { - set c1 -} {SQLITE_EMPTY} -do_test btree5-1.10 { - btree_begin_transaction $b1 - set rc [catch {btree_cursor $b1 1 0} c1] -} {0} -do_test btree5-1.11 { - btree_first $c1 -} {1} -do_test btree5-1.12 { - btree_close_cursor $c1 - btree_commit $b1 - set rc [catch {btree_cursor $b1 1 0} c1] -} {0} -do_test btree5-1.13 { - btree_first $c1 -} {1} -do_test btree5-1.14 { - btree_close_cursor $c1 - btree_integrity_check $b1 1 -} {} - -# Insert many entries into table 1. This is designed to test the -# virtual-root logic that comes into play for page one. It is also -# a good test of INTKEY tables. -# -# Stagger the inserts. After the inserts complete, go back and do -# deletes. Stagger the deletes too. Repeat this several times. -# - -# Do N inserts into table 1 using random keys between 0 and 1000000 -# -proc random_inserts {N} { - global c1 - while {$N>0} { - set k [expr {int(rand()*1000000)}] - if {[btree_move_to $c1 $k]==0} continue; # entry already exists - btree_insert $c1 $k data-for-$k - incr N -1 - } -} - -# Do N delete from table 1 -# -proc random_deletes {N} { - global c1 - while {$N>0} { - set k [expr {int(rand()*1000000)}] - btree_move_to $c1 $k - btree_delete $c1 - incr N -1 - } -} - -# Make sure the table has exactly N entries. Make sure the data for -# each entry agrees with its key. -# -proc check_table {N} { - global c1 - btree_first $c1 - set cnt 0 - while {![btree_eof $c1]} { - if {[set data [btree_data $c1]] ne "data-for-[btree_key $c1]"} { - return "wrong data for entry $cnt" - } - set n [string length $data] - set fdata1 [btree_fetch_data $c1 $n] - set fdata2 [btree_fetch_data $c1 -1] - if {$fdata1 ne "" && $fdata1 ne $data} { - return "DataFetch returned the wrong value with amt=$n" - } - if {$fdata1 ne $fdata2} { - return "DataFetch returned the wrong value when amt=-1" - } - if {$n>10} { - set fdata3 [btree_fetch_data $c1 10] - if {$fdata3 ne [string range $data 0 9]} { - return "DataFetch returned the wrong value when amt=10" - } - } - incr cnt - btree_next $c1 - } - if {$cnt!=$N} { - return "wrong number of entries" - } - return {} -} - -# Initialize the database -# -btree_begin_transaction $b1 -set c1 [btree_cursor $b1 1 1] -set btree_trace 0 - -# Do the tests. -# -set cnt 0 -for {set i 1} {$i<=100} {incr i} { - do_test btree5-2.$i.1 { - random_inserts 200 - incr cnt 200 - check_table $cnt - } {} - do_test btree5-2.$i.2 { - btree_integrity_check $b1 1 - } {} - do_test btree5-2.$i.3 { - random_deletes 190 - incr cnt -190 - check_table $cnt - } {} - do_test btree5-2.$i.4 { - btree_integrity_check $b1 1 - } {} -} - -#btree_tree_dump $b1 1 -btree_close_cursor $c1 -btree_commit $b1 -btree_begin_transaction $b1 - -# This procedure converts an integer into a variable-length text key. -# The conversion is reversible. -# -# The first two characters of the string are alphabetics derived from -# the least significant bits of the number. Because they are derived -# from least significant bits, the sort order of the resulting string -# is different from numeric order. After the alphabetic prefix comes -# the original number. A variable-length suffix follows. The length -# of the suffix is based on a hash of the original number. -# -proc num_to_key {n} { - global charset ncharset suffix - set c1 [string index $charset [expr {$n%$ncharset}]] - set c2 [string index $charset [expr {($n/$ncharset)%$ncharset}]] - set nsuf [expr {($n*211)%593}] - return $c1$c2-$n-[string range $suffix 0 $nsuf] -} -set charset {abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ} -set ncharset [string length $charset] -set suffix $charset$charset -while {[string length $suffix]<1000} {append suffix $suffix} - -# This procedures extracts the original integer used to create -# a key by num_to_key -# -proc key_to_num {key} { - regexp {^..-([0-9]+)} $key all n - return $n -} - -# Insert into table $tab keys corresponding to all values between -# $start and $end, inclusive. -# -proc insert_range {tab start end} { - for {set i $start} {$i<=$end} {incr i} { - btree_insert $tab [num_to_key $i] {} - } -} - -# Delete from table $tab keys corresponding to all values between -# $start and $end, inclusive. -# -proc delete_range {tab start end} { - for {set i $start} {$i<=$end} {incr i} { - if {[btree_move_to $tab [num_to_key $i]]==0} { - btree_delete $tab - } - } -} - -# Make sure table $tab contains exactly those keys corresponding -# to values between $start and $end -# -proc check_range {tab start end} { - btree_first $tab - while {![btree_eof $tab]} { - set key [btree_key $tab] - set i [key_to_num $key] - if {[num_to_key $i] ne $key} { - return "malformed key: $key" - } - set got($i) 1 - btree_next $tab - } - set all [lsort -integer [array names got]] - if {[llength $all]!=$end+1-$start} { - return "table contains wrong number of values" - } - if {[lindex $all 0]!=$start} { - return "wrong starting value" - } - if {[lindex $all end]!=$end} { - return "wrong ending value" - } - return {} -} - -# Create a zero-data table and test it out. -# -do_test btree5-3.1 { - set rc [catch {btree_create_table $b1 2} t2] -} {0} -do_test btree5-3.2 { - set rc [catch {btree_cursor $b1 $t2 1} c2] -} {0} -set start 1 -set end 100 -for {set i 1} {$i<=100} {incr i} { - do_test btree5-3.3.$i.1 { - insert_range $c2 $start $end - btree_integrity_check $b1 1 $t2 - } {} - do_test btree5-3.3.$i.2 { - check_range $c2 $start $end - } {} - set nstart $start - incr nstart 89 - do_test btree5-3.3.$i.3 { - delete_range $c2 $start $nstart - btree_integrity_check $b1 1 $t2 - } {} - incr start 90 - do_test btree5-3.3.$i.4 { - check_range $c2 $start $end - } {} - incr end 100 -} - - -btree_close_cursor $c2 -btree_commit $b1 -btree_close $b1 - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree6.test --- sqlite3-3.4.2/test/btree6.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/btree6.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -# 2004 May 10 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend - specifically -# the B+tree tables. B+trees store all data on the leaves rather -# that storing data with keys on interior nodes. -# -# $Id: btree6.test,v 1.4 2004/05/20 22:16:31 drh Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - - -# Insert many entries into the table that cursor $cur points to. -# The table should be an INTKEY table. -# -# Stagger the inserts. After the inserts complete, go back and do -# deletes. Stagger the deletes too. Repeat this several times. -# - -# Do N inserts into table $tab using random keys between 0 and 1000000 -# -proc random_inserts {cur N} { - global inscnt - while {$N>0} { - set k [expr {int(rand()*1000000)}] - if {[btree_move_to $cur $k]==0} { - continue; # entry already exists - } - incr inscnt - btree_insert $cur $k data-for-$k - incr N -1 - } -} -set inscnt 0 - -# Do N delete from the table that $cur points to. -# -proc random_deletes {cur N} { - while {$N>0} { - set k [expr {int(rand()*1000000)}] - btree_move_to $cur $k - btree_delete $cur - incr N -1 - } -} - -# Make sure the table that $cur points to has exactly N entries. -# Make sure the data for each entry agrees with its key. -# -proc check_table {cur N} { - btree_first $cur - set cnt 0 - while {![btree_eof $cur]} { - if {[set data [btree_data $cur]] ne "data-for-[btree_key $cur]"} { - return "wrong data for entry $cnt" - } - set n [string length $data] - set fdata1 [btree_fetch_data $cur $n] - set fdata2 [btree_fetch_data $cur -1] - if {$fdata1 ne "" && $fdata1 ne $data} { - return "DataFetch returned the wrong value with amt=$n" - } - if {$fdata1 ne $fdata2} { - return "DataFetch returned the wrong value when amt=-1" - } - if {$n>10} { - set fdata3 [btree_fetch_data $cur 10] - if {$fdata3 ne [string range $data 0 9]} { - return "DataFetch returned the wrong value when amt=10" - } - } - incr cnt - btree_next $cur - } - if {$cnt!=$N} { - return "wrong number of entries. Got $cnt. Looking for $N" - } - return {} -} - -# Initialize the database -# -file delete -force test1.bt -file delete -force test1.bt-journal -set b1 [btree_open test1.bt 2000 0] -btree_begin_transaction $b1 -set tab [btree_create_table $b1 5] -set cur [btree_cursor $b1 $tab 1] -set btree_trace 0 -expr srand(1) - -# Do the tests. -# -set cnt 0 -for {set i 1} {$i<=40} {incr i} { - do_test btree6-1.$i.1 { - random_inserts $cur 200 - incr cnt 200 - check_table $cur $cnt - } {} - do_test btree6-1.$i.2 { - btree_integrity_check $b1 1 $tab - } {} - do_test btree6-1.$i.3 { - random_deletes $cur 90 - incr cnt -90 - check_table $cur $cnt - } {} - do_test btree6-1.$i.4 { - btree_integrity_check $b1 1 $tab - } {} -} - -btree_close_cursor $cur -btree_commit $b1 -btree_close $b1 - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree7.test --- sqlite3-3.4.2/test/btree7.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/btree7.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,50 +0,0 @@ -# 2004 Jun 4 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend. -# -# $Id: btree7.test,v 1.2 2004/11/04 14:47:13 drh Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# Stress the balance routine by trying to create situations where -# 3 neighboring nodes split into 5. -# -set bigdata _123456789 ;# 10 -append bigdata $bigdata ;# 20 -append bigdata $bigdata ;# 40 -append bigdata $bigdata ;# 80 -append bigdata $bigdata ;# 160 -append bigdata $bigdata ;# 320 -append bigdata $bigdata ;# 640 -set data450 [string range $bigdata 0 449] -do_test btree7-1.1 { - execsql " - CREATE TABLE t1(x INTEGER PRIMARY KEY, y TEXT); - INSERT INTO t1 VALUES(1, '$bigdata'); - INSERT INTO t1 VALUES(2, '$bigdata'); - INSERT INTO t1 VALUES(3, '$data450'); - INSERT INTO t1 VALUES(5, '$data450'); - INSERT INTO t1 VALUES(8, '$bigdata'); - INSERT INTO t1 VALUES(9, '$bigdata'); - " -} {} -integrity_check btree7-1.2 -do_test btree7-1.3 { - execsql " - INSERT INTO t1 VALUES(4, '$bigdata'); - " -} {} -integrity_check btree7-1.4 - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree8.test --- sqlite3-3.4.2/test/btree8.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/btree8.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -# 2005 August 2 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend. -# -# $Id: btree8.test,v 1.6 2005/08/02 17:13:12 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# Ticket #1346: If the table rooted on page 1 contains a single entry -# and that single entries has to flow out into another page because -# page 1 is 100-bytes smaller than most other pages, then you delete that -# one entry, everything should still work. -# -do_test btree8-1.1 { - execsql { -CREATE TABLE t1(x - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- - ---------------------------------------------------------------------------- -); -DROP table t1; - } -} {} -integrity_check btree8-1.2 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree9.test --- sqlite3-3.4.2/test/btree9.test 2007-05-02 02:34:32.000000000 +0100 +++ sqlite3-3.6.16/test/btree9.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -# 2007 May 01 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend. -# -# $Id: btree9.test,v 1.1 2007/05/02 01:34:32 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# The sqlite3BtreeInsert() API now has an additional "nZero" parameter -# which specifies the number of zero bytes to append to the end of the -# data. This feature allows large zero-filled BLOBs to be created without -# having to allocate a big chunk of memory to instantiate the blob. -# -# The following code tests the new feature. -# - -# Create the database -# -do_test btree9-1.1 { - file delete -force test1.bt - file delete -force test1.bt-journal - set b1 [btree_open test1.bt 2000 0] - btree_begin_transaction $b1 - set t1 [btree_create_table $b1 5] - set c1 [btree_cursor $b1 $t1 1] - btree_insert $c1 1 data-for-1 20000 - btree_move_to $c1 1 - btree_key $c1 -} {1} -do_test btree9-1.2 { - btree_payload_size $c1 -} {20010} - - -btree_close_cursor $c1 -btree_commit $b1 -btree_close $b1 - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/btree.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/btree.test --- sqlite3-3.4.2/test/btree.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/btree.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,1071 +0,0 @@ -# 2001 September 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file implements regression tests for SQLite library. The -# focus of this script is btree database backend -# -# $Id: btree.test,v 1.40 2007/06/25 08:16:58 danielk1977 Exp $ - - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -ifcapable default_autovacuum { - finish_test - return -} - -# Basic functionality. Open and close a database. -# -do_test btree-1.1 { - file delete -force test1.bt - file delete -force test1.bt-journal - set rc [catch {btree_open test1.bt 2000 0} ::b1] -} {0} - -# The second element of the list returned by btree_pager_stats is the -# number of pages currently checked out. We'll be checking this value -# frequently during this test script, to make sure the btree library -# is properly releasing the pages it checks out, and thus avoiding -# page leaks. -# -do_test btree-1.1.1 { - lindex [btree_pager_stats $::b1] 1 -} {0} -do_test btree-1.2 { - set rc [catch {btree_open test1.bt 2000 0} ::b2] -} {0} -do_test btree-1.3 { - set rc [catch {btree_close $::b2} msg] - lappend rc $msg -} {0 {}} - -# Do an insert and verify that the database file grows in size. -# -do_test btree-1.4 { - set rc [catch {btree_begin_transaction $::b1} msg] - lappend rc $msg -} {0 {}} -do_test btree-1.4.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-1.5 { - set rc [catch {btree_cursor $::b1 1 1} ::c1] - if {$rc} {lappend rc $::c1} - set rc -} {0} -do_test btree-1.6 { - set rc [catch {btree_insert $::c1 100 1.00} msg] - lappend rc $msg -} {0 {}} -do_test btree-1.7 { - btree_move_to $::c1 100 - btree_key $::c1 -} {100} -do_test btree-1.8 { - btree_data $::c1 -} {1.00} -do_test btree-1.9 { - set rc [catch {btree_close_cursor $::c1} msg] - lappend rc $msg -} {0 {}} -do_test btree-1.10 { - set rc [catch {btree_commit $::b1} msg] - lappend rc $msg -} {0 {}} -do_test btree-1.11 { - file size test1.bt -} {1024} -do_test btree-1.12 { - lindex [btree_pager_stats $::b1] 1 -} {0} - -# Reopen the database and attempt to read the record that we wrote. -# -do_test btree-2.1 { - set rc [catch {btree_cursor $::b1 1 1} ::c1] - if {$rc} {lappend rc $::c1} - set rc -} {0} -do_test btree-2.1.1 { - btree_cursor_list $::b1 -} {} -do_test btree-2.2 { - btree_move_to $::c1 99 -} {1} -do_test btree-2.3 { - btree_move_to $::c1 101 -} {-1} -do_test btree-2.4 { - btree_move_to $::c1 100 -} {0} -do_test btree-2.5 { - btree_key $::c1 -} {100} -do_test btree-2.6 { - btree_data $::c1 -} {1.00} -do_test btree-2.7 { - lindex [btree_pager_stats $::b1] 1 -} {1} - -# Do some additional inserts -# -do_test btree-3.1 { - btree_begin_transaction $::b1 - btree_insert $::c1 200 2.00 - btree_move_to $::c1 200 - btree_key $::c1 -} {200} -do_test btree-3.1.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-3.2 { - btree_insert $::c1 300 3.00 - btree_move_to $::c1 300 - btree_key $::c1 -} {300} -do_test btree-3.4 { - btree_insert $::c1 400 4.00 - btree_move_to $::c1 400 - btree_key $::c1 -} {400} -do_test btree-3.5 { - btree_insert $::c1 500 5.00 - btree_move_to $::c1 500 - btree_key $::c1 -} {500} -do_test btree-3.6 { - btree_insert $::c1 600 6.00 - btree_move_to $::c1 600 - btree_key $::c1 -} {600} -#btree_page_dump $::b1 2 -do_test btree-3.7 { - set rc [btree_move_to $::c1 0] - expr {$rc>0} -} {1} -do_test btree-3.8 { - btree_key $::c1 -} {100} -do_test btree-3.9 { - btree_data $::c1 -} {1.00} -do_test btree-3.10 { - btree_next $::c1 - btree_key $::c1 -} {200} -do_test btree-3.11 { - btree_data $::c1 -} {2.00} -do_test btree-3.12 { - btree_next $::c1 - btree_key $::c1 -} {300} -do_test btree-3.13 { - btree_data $::c1 -} {3.00} -do_test btree-3.14 { - btree_next $::c1 - btree_key $::c1 -} {400} -do_test btree-3.15 { - btree_data $::c1 -} {4.00} -do_test btree-3.16 { - btree_next $::c1 - btree_key $::c1 -} {500} -do_test btree-3.17 { - btree_data $::c1 -} {5.00} -do_test btree-3.18 { - btree_next $::c1 - btree_key $::c1 -} {600} -do_test btree-3.19 { - btree_data $::c1 -} {6.00} -do_test btree-3.20.1 { - btree_next $::c1 - btree_key $::c1 -} {0} -do_test btree-3.20.2 { - btree_eof $::c1 -} {1} -# This test case used to test that one couldn't request data from an -# invalid cursor. That is now an assert()ed condition. -# -# do_test btree-3.21 { -# set rc [catch {btree_data $::c1} res] -# lappend rc $res -# } {1 SQLITE_INTERNAL} - -# Commit the changes, reopen and reread the data -# -do_test btree-3.22 { - set rc [catch {btree_close_cursor $::c1} msg] - lappend rc $msg -} {0 {}} -do_test btree-3.22.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-3.23 { - set rc [catch {btree_commit $::b1} msg] - lappend rc $msg -} {0 {}} -do_test btree-3.23.1 { - lindex [btree_pager_stats $::b1] 1 -} {0} -do_test btree-3.24 { - file size test1.bt -} {1024} -do_test btree-3.25 { - set rc [catch {btree_cursor $::b1 1 1} ::c1] - if {$rc} {lappend rc $::c1} - set rc -} {0} -do_test btree-3.25.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-3.26 { - set rc [btree_move_to $::c1 0] - expr {$rc>0} -} {1} -do_test btree-3.27 { - btree_key $::c1 -} {100} -do_test btree-3.28 { - btree_data $::c1 -} {1.00} -do_test btree-3.29 { - btree_next $::c1 - btree_key $::c1 -} {200} -do_test btree-3.30 { - btree_data $::c1 -} {2.00} -do_test btree-3.31 { - btree_next $::c1 - btree_key $::c1 -} {300} -do_test btree-3.32 { - btree_data $::c1 -} {3.00} -do_test btree-3.33 { - btree_next $::c1 - btree_key $::c1 -} {400} -do_test btree-3.34 { - btree_data $::c1 -} {4.00} -do_test btree-3.35 { - btree_next $::c1 - btree_key $::c1 -} {500} -do_test btree-3.36 { - btree_data $::c1 -} {5.00} -do_test btree-3.37 { - btree_next $::c1 - btree_key $::c1 -} {600} -do_test btree-3.38 { - btree_data $::c1 -} {6.00} -do_test btree-3.39 { - btree_next $::c1 - btree_key $::c1 -} {0} -# This test case used to test that requesting data from an invalid cursor -# returned SQLITE_INTERNAL. That is now an assert()ed condition. -# -# do_test btree-3.40 { -# set rc [catch {btree_data $::c1} res] -# lappend rc $res -# } {1 SQLITE_INTERNAL} -do_test btree-3.41 { - lindex [btree_pager_stats $::b1] 1 -} {1} - - -# Now try a delete -# -do_test btree-4.1 { - btree_begin_transaction $::b1 - btree_move_to $::c1 100 - btree_key $::c1 -} {100} -do_test btree-4.1.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-4.2 { - btree_delete $::c1 -} {} -do_test btree-4.3 { - btree_move_to $::c1 100 - btree_key $::c1 -} {200} -do_test btree-4.4 { - btree_next $::c1 - btree_key $::c1 -} {300} -do_test btree-4.5 { - btree_next $::c1 - btree_key $::c1 -} {400} -do_test btree-4.4 { - btree_move_to $::c1 0 - set r {} - while 1 { - set key [btree_key $::c1] - if {[btree_eof $::c1]} break - lappend r $key - lappend r [btree_data $::c1] - btree_next $::c1 - } - set r -} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} - -# Commit and make sure the delete is still there. -# -do_test btree-4.5 { - btree_commit $::b1 - btree_move_to $::c1 0 - set r {} - while 1 { - set key [btree_key $::c1] - if {[btree_eof $::c1]} break - lappend r $key - lappend r [btree_data $::c1] - btree_next $::c1 - } - set r -} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} - -# Completely close the database and reopen it. Then check -# the data again. -# -do_test btree-4.6 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-4.7 { - btree_close_cursor $::c1 - lindex [btree_pager_stats $::b1] 1 -} {0} -do_test btree-4.8 { - btree_close $::b1 - set ::b1 [btree_open test1.bt 2000 0] - set ::c1 [btree_cursor $::b1 1 1] - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-4.9 { - set r {} - btree_first $::c1 - while 1 { - set key [btree_key $::c1] - if {[btree_eof $::c1]} break - lappend r $key - lappend r [btree_data $::c1] - btree_next $::c1 - } - set r -} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} - -# Try to read and write meta data -# -do_test btree-5.1 { - btree_get_meta $::b1 -} {0 0 0 0 0 0 0 0 0 0} -do_test btree-5.2 { - set rc [catch { - btree_update_meta $::b1 0 1 2 3 4 5 6 7 8 9 - } msg] - lappend rc $msg -} {1 SQLITE_ERROR} -do_test btree-5.3 { - btree_begin_transaction $::b1 - set rc [catch { - btree_update_meta $::b1 0 1 2 3 0 5 6 0 8 9 - } msg] - lappend rc $msg -} {0 {}} -do_test btree-5.4 { - btree_get_meta $::b1 -} {0 1 2 3 0 5 6 0 8 9} -do_test btree-5.5 { - btree_close_cursor $::c1 - btree_rollback $::b1 - btree_get_meta $::b1 -} {0 0 0 0 0 0 0 0 0 0} -do_test btree-5.6 { - btree_begin_transaction $::b1 - btree_update_meta $::b1 0 10 20 30 0 50 60 0 80 90 - btree_commit $::b1 - btree_get_meta $::b1 -} {0 10 20 30 0 50 60 0 80 90} - -proc select_all {cursor} { - set r {} - btree_first $cursor - while {![btree_eof $cursor]} { - set key [btree_key $cursor] - lappend r $key - lappend r [btree_data $cursor] - btree_next $cursor - } - return $r -} -proc select_keys {cursor} { - set r {} - btree_first $cursor - while {![btree_eof $cursor]} { - set key [btree_key $cursor] - lappend r $key - btree_next $cursor - } - return $r -} - -# Try to create a new table in the database file -# -do_test btree-6.1 { - set rc [catch {btree_create_table $::b1 0} msg] - lappend rc $msg -} {1 SQLITE_ERROR} -do_test btree-6.2 { - btree_begin_transaction $::b1 - set ::t2 [btree_create_table $::b1 0] -} {2} -do_test btree-6.2.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-6.2.2 { - set ::c2 [btree_cursor $::b1 $::t2 1] - lindex [btree_pager_stats $::b1] 1 -} {2} -do_test btree-6.2.3 { - btree_insert $::c2 ten 10 - btree_move_to $::c2 ten - btree_key $::c2 -} {ten} -do_test btree-6.3 { - btree_commit $::b1 - set ::c1 [btree_cursor $::b1 1 1] - lindex [btree_pager_stats $::b1] 1 -} {2} -do_test btree-6.3.1 { - select_all $::c1 -} {200 2.00 300 3.00 400 4.00 500 5.00 600 6.00} -#btree_page_dump $::b1 3 -do_test btree-6.4 { - select_all $::c2 -} {ten 10} - -# Drop the new table, then create it again anew. -# -do_test btree-6.5 { - btree_begin_transaction $::b1 -} {} -do_test btree-6.6 { - btree_close_cursor $::c2 -} {} -do_test btree-6.6.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-6.7 { - btree_close_cursor $::c1 - btree_drop_table $::b1 $::t2 -} {} -do_test btree-6.7.1 { - lindex [btree_get_meta $::b1] 0 -} {1} -do_test btree-6.8 { - set ::t2 [btree_create_table $::b1 0] -} {2} -do_test btree-6.8.1 { - lindex [btree_get_meta $::b1] 0 -} {0} -do_test btree-6.9 { - set ::c2 [btree_cursor $::b1 $::t2 1] - lindex [btree_pager_stats $::b1] 1 -} {2} - -# This test case used to test that requesting the key from an invalid cursor -# returned an empty string. But that is now an assert()ed condition. -# -# do_test btree-6.9.1 { -# btree_move_to $::c2 {} -# btree_key $::c2 -# } {} - -# If we drop table 1 it just clears the table. Table 1 always exists. -# -do_test btree-6.10 { - btree_close_cursor $::c2 - btree_drop_table $::b1 1 - set ::c2 [btree_cursor $::b1 $::t2 1] - set ::c1 [btree_cursor $::b1 1 1] - btree_first $::c1 - btree_eof $::c1 -} {1} -do_test btree-6.11 { - btree_commit $::b1 - select_all $::c1 -} {} -do_test btree-6.12 { - select_all $::c2 -} {} -do_test btree-6.13 { - btree_close_cursor $::c2 - lindex [btree_pager_stats $::b1] 1 -} {1} - -# Check to see that pages defragment properly. To do this test we will -# -# 1. Fill the first page of table 1 with data. -# 2. Delete every other entry of table 1. -# 3. Insert a single entry that requires more contiguous -# space than is available. -# -do_test btree-7.1 { - btree_begin_transaction $::b1 -} {} -catch {unset key} -catch {unset data} - -# Check to see that data on overflow pages work correctly. -# -do_test btree-8.1 { - set data "*** This is a very long key " - while {[string length $data]<1234} {append data $data} - set ::data $data - btree_insert $::c1 2020 $data -} {} -btree_page_dump $::b1 1 -btree_page_dump $::b1 2 -do_test btree-8.1.1 { - lindex [btree_pager_stats $::b1] 1 -} {1} -#btree_pager_ref_dump $::b1 -do_test btree-8.2 { - btree_move_to $::c1 2020 - string length [btree_data $::c1] -} [string length $::data] -do_test btree-8.3 { - btree_data $::c1 -} $::data -do_test btree-8.4 { - btree_delete $::c1 -} {} -do_test btree-8.4.1 { - lindex [btree_get_meta $::b1] 0 -} [expr {int(([string length $::data]-238+1019)/1020)}] -do_test btree-8.4.2 { - btree_integrity_check $::b1 1 2 -} {} -do_test btree-8.5 { - set data "*** This is an even longer key " - while {[string length $data]<2000} {append data $data} - append data END - set ::data $data - btree_insert $::c1 2030 $data -} {} -do_test btree-8.6 { - btree_move_to $::c1 2030 - string length [btree_data $::c1] -} [string length $::data] -do_test btree-8.7 { - btree_data $::c1 -} $::data -do_test btree-8.8 { - btree_commit $::b1 - btree_data $::c1 -} $::data -do_test btree-8.9.1 { - btree_close_cursor $::c1 - btree_close $::b1 - set ::b1 [btree_open test1.bt 2000 0] - set ::c1 [btree_cursor $::b1 1 1] - btree_move_to $::c1 2030 - btree_data $::c1 -} $::data -do_test btree-8.9.2 { - btree_integrity_check $::b1 1 2 -} {} -do_test btree-8.10 { - btree_begin_transaction $::b1 - btree_delete $::c1 -} {} -do_test btree-8.11 { - lindex [btree_get_meta $::b1] 0 -} {4} - -# Now check out keys on overflow pages. -# -do_test btree-8.12.1 { - set ::keyprefix "This is a long prefix to a key " - while {[string length $::keyprefix]<256} {append ::keyprefix $::keyprefix} - btree_close_cursor $::c1 - btree_clear_table $::b1 2 - lindex [btree_get_meta $::b1] 0 -} {4} -do_test btree-8.12.2 { - btree_integrity_check $::b1 1 2 -} {} -do_test btree-8.12.3 { - set ::c1 [btree_cursor $::b1 2 1] - btree_insert $::c1 ${::keyprefix}1 1 - btree_first $::c1 - btree_data $::c1 -} {1} -do_test btree-8.13 { - btree_key $::c1 -} ${keyprefix}1 -do_test btree-8.14 { - btree_insert $::c1 ${::keyprefix}2 2 - btree_insert $::c1 ${::keyprefix}3 3 - btree_last $::c1 - btree_key $::c1 -} ${keyprefix}3 -do_test btree-8.15 { - btree_move_to $::c1 ${::keyprefix}2 - btree_data $::c1 -} {2} -do_test btree-8.16 { - btree_move_to $::c1 ${::keyprefix}1 - btree_data $::c1 -} {1} -do_test btree-8.17 { - btree_move_to $::c1 ${::keyprefix}3 - btree_data $::c1 -} {3} -do_test btree-8.18 { - lindex [btree_get_meta $::b1] 0 -} {1} -do_test btree-8.19 { - btree_move_to $::c1 ${::keyprefix}2 - btree_key $::c1 -} ${::keyprefix}2 -#btree_page_dump $::b1 2 -do_test btree-8.20 { - btree_delete $::c1 - btree_next $::c1 - btree_key $::c1 -} ${::keyprefix}3 -#btree_page_dump $::b1 2 -do_test btree-8.21 { - lindex [btree_get_meta $::b1] 0 -} {2} -do_test btree-8.22 { - lindex [btree_pager_stats $::b1] 1 -} {2} -do_test btree-8.23.1 { - btree_close_cursor $::c1 - btree_drop_table $::b1 2 - btree_integrity_check $::b1 1 -} {} -do_test btree-8.23.2 { - btree_create_table $::b1 0 -} {2} -do_test btree-8.23.3 { - set ::c1 [btree_cursor $::b1 2 1] - lindex [btree_get_meta $::b1] 0 -} {4} -do_test btree-8.24 { - lindex [btree_pager_stats $::b1] 1 -} {2} -#btree_pager_ref_dump $::b1 -do_test btree-8.25 { - btree_integrity_check $::b1 1 2 -} {} - -# Check page splitting logic -# -do_test btree-9.1 { - for {set i 1} {$i<=19} {incr i} { - set key [format %03d $i] - set data "*** $key *** $key *** $key *** $key ***" - btree_insert $::c1 $key $data - } -} {} -#btree_tree_dump $::b1 2 -#btree_pager_ref_dump $::b1 -#set pager_refinfo_enable 1 -do_test btree-9.2 { - btree_insert $::c1 020 {*** 020 *** 020 *** 020 *** 020 ***} - select_keys $::c1 -} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020} -#btree_page_dump $::b1 2 -#btree_pager_ref_dump $::b1 -#set pager_refinfo_enable 0 - -# The previous "select_keys" command left the cursor pointing at the root -# page. So there should only be two pages checked out. 2 (the root) and -# page 1. -do_test btree-9.2.1 { - lindex [btree_pager_stats $::b1] 1 -} {2} -for {set i 1} {$i<=20} {incr i} { - do_test btree-9.3.$i.1 [subst { - btree_move_to $::c1 [format %03d $i] - btree_key $::c1 - }] [format %03d $i] - do_test btree-9.3.$i.2 [subst { - btree_move_to $::c1 [format %03d $i] - string range \[btree_data $::c1\] 0 10 - }] "*** [format %03d $i] ***" -} -do_test btree-9.4.1 { - lindex [btree_pager_stats $::b1] 1 -} {2} - -# Check the page joining logic. -# -#btree_page_dump $::b1 2 -#btree_pager_ref_dump $::b1 -do_test btree-9.4.2 { - btree_move_to $::c1 005 - btree_delete $::c1 -} {} -#btree_page_dump $::b1 2 -for {set i 1} {$i<=19} {incr i} { - if {$i==5} continue - do_test btree-9.5.$i.1 [subst { - btree_move_to $::c1 [format %03d $i] - btree_key $::c1 - }] [format %03d $i] - do_test btree-9.5.$i.2 [subst { - btree_move_to $::c1 [format %03d $i] - string range \[btree_data $::c1\] 0 10 - }] "*** [format %03d $i] ***" -} -#btree_pager_ref_dump $::b1 -do_test btree-9.6 { - btree_close_cursor $::c1 - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-9.7 { - btree_integrity_check $::b1 1 2 -} {} -do_test btree-9.8 { - btree_rollback $::b1 - lindex [btree_pager_stats $::b1] 1 -} {0} -do_test btree-9.9 { - btree_integrity_check $::b1 1 2 -} {} -do_test btree-9.10 { - btree_close $::b1 - set ::b1 [btree_open test1.bt 2000 0] - btree_integrity_check $::b1 1 2 -} {} - -# Create a tree of depth two. That is, there is a single divider entry -# on the root pages and two leaf pages. Then delete the divider entry -# see what happens. -# -do_test btree-10.1 { - btree_begin_transaction $::b1 - btree_clear_table $::b1 2 - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-10.2 { - set ::c1 [btree_cursor $::b1 2 1] - lindex [btree_pager_stats $::b1] 1 -} {2} -do_test btree-10.3 { - for {set i 1} {$i<=30} {incr i} { - set key [format %03d $i] - set data "*** $key *** $key *** $key *** $key ***" - btree_insert $::c1 $key $data - } - select_keys $::c1 -} {001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030} -#btree_tree_dump $::b1 2 -do_test btree-10.4 { - # The divider entry is 012. This is found by uncommenting the - # btree_tree_dump call above and looking at the tree. If the page size - # changes, this test will no longer work. - btree_move_to $::c1 012 - btree_delete $::c1 - select_keys $::c1 -} {001 002 003 004 005 006 007 008 009 010 011 013 014 015 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030} -#btree_pager_ref_dump $::b1 -#btree_tree_dump $::b1 2 -for {set i 1} {$i<=30} {incr i} { - # Check the number of unreference pages. This should be 3 in most cases, - # but 2 when the cursor is pointing to the divider entry which is now 013. - do_test btree-10.5.$i { - btree_move_to $::c1 [format %03d $i] - lindex [btree_pager_stats $::b1] 1 - } [expr {$i==13?2:3}] - #btree_pager_ref_dump $::b1 - #btree_tree_dump $::b1 2 -} - -# Create a tree with lots more pages -# -catch {unset ::data} -catch {unset ::key} -for {set i 31} {$i<=2000} {incr i} { - do_test btree-11.1.$i.1 { - set key [format %03d $i] - set ::data "*** $key *** $key *** $key *** $key ***" - btree_insert $::c1 $key $data - btree_move_to $::c1 $key - btree_key $::c1 - } [format %03d $i] - do_test btree-11.1.$i.2 { - btree_data $::c1 - } $::data - set ::key [format %03d [expr {$i/2}]] - if {$::key=="012"} {set ::key 013} - do_test btree-11.1.$i.3 { - btree_move_to $::c1 $::key - btree_key $::c1 - } $::key -} -catch {unset ::data} -catch {unset ::key} - -# Make sure our reference count is still correct. -# -do_test btree-11.2 { - btree_close_cursor $::c1 - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-11.3 { - set ::c1 [btree_cursor $::b1 2 1] - lindex [btree_pager_stats $::b1] 1 -} {2} - -# Delete the dividers on the root page -# -#btree_page_dump $::b1 2 -do_test btree-11.4 { - btree_move_to $::c1 1667 - btree_delete $::c1 - btree_move_to $::c1 1667 - set k [btree_key $::c1] - if {$k==1666} { - set k [btree_next $::c1] - } - btree_key $::c1 -} {1668} -#btree_page_dump $::b1 2 - -# Change the data on an intermediate node such that the node becomes overfull -# and has to split. We happen to know that intermediate nodes exist on -# 337, 401 and 465 by the btree_page_dumps above -# -catch {unset ::data} -set ::data {This is going to be a very long data segment} -append ::data $::data -append ::data $::data -do_test btree-12.1 { - btree_insert $::c1 337 $::data - btree_move_to $::c1 337 - btree_data $::c1 -} $::data -do_test btree-12.2 { - btree_insert $::c1 401 $::data - btree_move_to $::c1 401 - btree_data $::c1 -} $::data -do_test btree-12.3 { - btree_insert $::c1 465 $::data - btree_move_to $::c1 465 - btree_data $::c1 -} $::data -do_test btree-12.4 { - btree_move_to $::c1 337 - btree_key $::c1 -} {337} -do_test btree-12.5 { - btree_data $::c1 -} $::data -do_test btree-12.6 { - btree_next $::c1 - btree_key $::c1 -} {338} -do_test btree-12.7 { - btree_move_to $::c1 464 - btree_key $::c1 -} {464} -do_test btree-12.8 { - btree_next $::c1 - btree_data $::c1 -} $::data -do_test btree-12.9 { - btree_next $::c1 - btree_key $::c1 -} {466} -do_test btree-12.10 { - btree_move_to $::c1 400 - btree_key $::c1 -} {400} -do_test btree-12.11 { - btree_next $::c1 - btree_data $::c1 -} $::data -do_test btree-12.12 { - btree_next $::c1 - btree_key $::c1 -} {402} -# btree_commit $::b1 -# btree_tree_dump $::b1 1 -do_test btree-13.1 { - btree_integrity_check $::b1 1 2 -} {} - -# To Do: -# -# 1. Do some deletes from the 3-layer tree -# 2. Commit and reopen the database -# 3. Read every 15th entry and make sure it works -# 4. Implement btree_sanity and put it throughout this script -# - -do_test btree-15.98 { - btree_close_cursor $::c1 - lindex [btree_pager_stats $::b1] 1 -} {1} -do_test btree-15.99 { - btree_rollback $::b1 - lindex [btree_pager_stats $::b1] 1 -} {0} -btree_pager_ref_dump $::b1 - -# Miscellaneous tests. -# -# btree-16.1 - Check that a statement cannot be started if a transaction -# is not active. -# btree-16.2 - Check that it is an error to request more payload from a -# btree entry than the entry contains. -do_test btree-16.1 { - catch {btree_begin_statement $::b1} msg - set msg -} SQLITE_ERROR - -do_test btree-16.2 { - btree_begin_transaction $::b1 - set ::c1 [btree_cursor $::b1 2 1] - btree_insert $::c1 1 helloworld - btree_close_cursor $::c1 - btree_commit $::b1 -} {} -do_test btree-16.3 { - set ::c1 [btree_cursor $::b1 2 1] - btree_first $::c1 -} 0 -do_test btree-16.4 { - catch {btree_data $::c1 [expr [btree_payload_size $::c1] + 10]} msg - set msg -} SQLITE_ERROR - -if {$tcl_platform(platform)=="unix"} { - do_test btree-16.5 { - btree_close $::b1 - set ::origperm [file attributes test1.bt -permissions] - file attributes test1.bt -permissions o-w,g-w,a-w - set ::b1 [btree_open test1.bt 2000 0] - catch {btree_cursor $::b1 2 1} msg - file attributes test1.bt -permissions $::origperm - btree_close $::b1 - set ::b1 [btree_open test1.bt 2000 0] - set msg - } {SQLITE_READONLY} -} - -do_test btree-16.6 { - set ::c1 [btree_cursor $::b1 2 1] - set ::c2 [btree_cursor $::b1 2 1] - btree_begin_transaction $::b1 - for {set i 0} {$i<100} {incr i} { - btree_insert $::c1 $i [string repeat helloworld 10] - } - btree_last $::c2 - btree_insert $::c1 100 [string repeat helloworld 10] -} {} - -do_test btree-16.7 { - btree_close_cursor $::c1 - btree_close_cursor $::c2 - btree_commit $::b1 - set ::c1 [btree_cursor $::b1 2 1] - catch {btree_insert $::c1 101 helloworld} msg - set msg -} {SQLITE_ERROR} -do_test btree-16.8 { - btree_first $::c1 - catch {btree_delete $::c1} msg - set msg -} {SQLITE_ERROR} -do_test btree-16.9 { - btree_close_cursor $::c1 - btree_begin_transaction $::b1 - set ::c1 [btree_cursor $::b1 2 0] - catch {btree_insert $::c1 101 helloworld} msg - set msg -} {SQLITE_PERM} -do_test btree-16.10 { - catch {btree_delete $::c1} msg - set msg -} {SQLITE_PERM} - -# As of 2006-08-16 (version 3.3.7+) a read cursor will no -# longer block a write cursor from the same database -# connectiin. The following three tests uses to return -# the SQLITE_LOCK error, but no more. -# -do_test btree-16.11 { - btree_close_cursor $::c1 - set ::c2 [btree_cursor $::b1 2 1] - set ::c1 [btree_cursor $::b1 2 0] - catch {btree_insert $::c2 101 helloworld} msg - set msg -} {} -do_test btree-16.12 { - btree_first $::c2 - catch {btree_delete $::c2} msg - set msg -} {} -do_test btree-16.13 { - catch {btree_clear_table $::b1 2} msg - set msg -} {} - - -do_test btree-16.14 { - btree_close_cursor $::c1 - btree_close_cursor $::c2 - btree_commit $::b1 - catch {btree_clear_table $::b1 2} msg - set msg -} {SQLITE_ERROR} -do_test btree-16.15 { - catch {btree_drop_table $::b1 2} msg - set msg -} {SQLITE_ERROR} -do_test btree-16.16 { - btree_begin_transaction $::b1 - set ::c1 [btree_cursor $::b1 2 0] - catch {btree_drop_table $::b1 2} msg - set msg -} {SQLITE_LOCKED} - -do_test btree-99.1 { - btree_close $::b1 -} {} -catch {unset data} -catch {unset key} - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/busy.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/busy.test --- sqlite3-3.4.2/test/busy.test 2007-03-27 15:43:01.000000000 +0100 +++ sqlite3-3.6.16/test/busy.test 2009-06-05 18:02:57.000000000 +0100 @@ -10,7 +10,7 @@ #*********************************************************************** # This file test the busy handler # -# $Id: busy.test,v 1.2 2005/09/17 18:02:37 drh Exp $ +# $Id: busy.test,v 1.3 2008/03/15 02:09:22 drh Exp $ set testdir [file dirname $argv0] @@ -32,12 +32,29 @@ set busyargs {} do_test busy-1.2 { db busy busy - db2 eval {begin exclusive} - catchsql {begin immediate} + db2 eval {BEGIN EXCLUSIVE} + catchsql {BEGIN IMMEDIATE} } {1 {database is locked}} do_test busy-1.3 { set busyargs } {0 1 2 3} +do_test busy-1.4 { + set busyargs {} + catchsql {BEGIN IMMEDIATE} + set busyargs +} {0 1 2 3} + +do_test busy-2.1 { + db2 eval {COMMIT} + db eval {BEGIN; INSERT INTO t1 VALUES(5)} + db2 eval {BEGIN; SELECT * FROM t1} + set busyargs {} + catchsql COMMIT +} {1 {database is locked}} +do_test busy-2.2 { + set busyargs +} {0 1 2 3} + db2 close diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/cache.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/cache.test --- sqlite3-3.4.2/test/cache.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/cache.test 2009-06-05 18:02:57.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: cache.test,v 1.3 2007/08/12 20:07:59 drh Exp $ +# $Id: cache.test,v 1.4 2007/08/22 02:56:44 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -22,7 +22,9 @@ proc pager_cache_size {db} { set bt [btree_from_db $db] + db_enter $db array set stats [btree_pager_stats $bt] + db_leave $db return $stats(page) } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/capi2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/capi2.test --- sqlite3-3.4.2/test/capi2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/capi2.test 2009-06-12 03:37:53.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script testing the callback-free C/C++ API. # -# $Id: capi2.test,v 1.33 2007/01/03 23:37:29 drh Exp $ +# $Id: capi2.test,v 1.37 2008/12/30 17:55:00 drh Exp $ # set testdir [file dirname $argv0] @@ -208,9 +208,9 @@ sqlite3_finalize $VM } {SQLITE_OK} do_test capi2-3.11b {db changes} {1} -do_test capi2-3.12-misuse { - sqlite3_finalize $VM -} {SQLITE_MISUSE} +#do_test capi2-3.12-misuse { +# sqlite3_finalize $VM +#} {SQLITE_MISUSE} do_test capi2-3.13 { set VM [sqlite3_prepare $DB {INSERT INTO t1 VALUES(1,3,4)} -1 TAIL] list [sqlite3_step $VM] \ @@ -618,7 +618,6 @@ ifcapable {explain} { do_test capi2-7.12 { -btree_breakpoint set x [stepsql $DB {EXPLAIN SELECT * FROM t1}] lindex $x 0 } {0} @@ -750,7 +749,6 @@ CREATE VIEW view2 AS SELECT * FROM tab1 limit 10 offset 10; } } {} - breakpoint do_test capi2-13.2 { check_origins {SELECT col2, col1 FROM view2} } [list {main tab1 col2} {main tab1 col1}] diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/capi3c.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/capi3c.test --- sqlite3-3.4.2/test/capi3c.test 2007-07-19 23:30:19.000000000 +0100 +++ sqlite3-3.6.16/test/capi3c.test 2009-06-12 03:37:53.000000000 +0100 @@ -13,7 +13,7 @@ # This is a copy of the capi3.test file that has been adapted to # test the new sqlite3_prepare_v2 interface. # -# $Id: capi3c.test,v 1.9 2007/07/19 22:30:19 drh Exp $ +# $Id: capi3c.test,v 1.22 2008/11/05 16:37:35 drh Exp $ # set testdir [file dirname $argv0] @@ -63,9 +63,12 @@ sqlite3_finalize $STMT set TAIL } {} -do_test capi3c-1.2 { +do_test capi3c-1.2.1 { sqlite3_errcode $DB } {SQLITE_OK} +do_test capi3c-1.2.2 { + sqlite3_extended_errcode $DB +} {SQLITE_OK} do_test capi3c-1.3 { sqlite3_errmsg $DB } {not an error} @@ -81,13 +84,17 @@ set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] } } {1} -do_test capi3c-1.6 { +do_test capi3c-1.6.1 { sqlite3_errcode $DB } {SQLITE_ERROR} +do_test capi3c-1.6.2 { + sqlite3_extended_errcode $DB +} {SQLITE_ERROR} do_test capi3c-1.7 { sqlite3_errmsg $DB } {no such column: namex} + ifcapable {utf16} { do_test capi3c-2.1 { set sql16 [utf16 {SELECT name FROM sqlite_master}] @@ -107,9 +114,12 @@ set STMT [sqlite3_prepare16_v2 $DB $sql -1 TAIL] } } {1} - do_test capi3c-2.4 { + do_test capi3c-2.4.1 { sqlite3_errcode $DB } {SQLITE_ERROR} + do_test capi3c-2.4.2 { + sqlite3_extended_errcode $DB + } {SQLITE_ERROR} do_test capi3c-2.5 { sqlite3_errmsg $DB } {no such column: namex} @@ -594,10 +604,8 @@ db close do_test capi3c-6.0 { -btree_breakpoint sqlite3 db test.db set DB [sqlite3_connection_pointer db] -btree_breakpoint sqlite3_key $DB xyzzy set sql {SELECT a FROM t1 order by rowid} set STMT [sqlite3_prepare_v2 $DB $sql -1 TAIL] @@ -622,17 +630,27 @@ db close } {} +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + return [hexio_get_int [hexio_read $fname 44 4]] +} + if {![sqlite3 -has-codec]} { # Test what happens when the library encounters a newer file format. - # Do this by updating the file format via the btree layer. do_test capi3c-7.1 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set meta [btree_get_meta $::bt] - lset meta 2 5 - eval [concat btree_update_meta $::bt [lrange $meta 0 end]] - btree_commit $::bt - btree_close $::bt + set_file_format 5 } {} do_test capi3c-7.2 { sqlite3 db test.db @@ -647,8 +665,7 @@ # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3c-8.1 { - file delete -force test.db - file delete -force test.db-journal + file delete -force test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); @@ -656,54 +673,38 @@ db close } {} do_test capi3c-8.2 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set ::bc [btree_cursor $::bt 1 1] - - # Build a 5-field row record consisting of 5 null records. This is - # officially black magic. - catch {unset data} - set data [binary format c6 {6 0 0 0 0 0}] - btree_insert $::bc 5 $data - - btree_close_cursor $::bc - btree_commit $::bt - btree_close $::bt + sqlite3 db test.db + execsql { + PRAGMA writable_schema=ON; + INSERT INTO sqlite_master VALUES(NULL,NULL,NULL,NULL,NULL); + } + db close } {} do_test capi3c-8.3 { sqlite3 db test.db catchsql { SELECT * FROM sqlite_master; } - } {1 {malformed database schema}} + } {1 {malformed database schema (?)}} do_test capi3c-8.4 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set ::bc [btree_cursor $::bt 1 1] - # Build a 5-field row record. The first field is a string 'table', and - # subsequent fields are all NULL. Replace the other broken record with - # this one and try to read the schema again. The broken record uses - # either UTF-8 or native UTF-16 (if this file is being run by - # utf16.test). - if { [string match UTF-16* $::ENC] } { - set data [binary format c6a10 {6 33 0 0 0 0} [utf16 table]] - } else { - set data [binary format c6a5 {6 23 0 0 0 0} table] + # subsequent fields are all NULL. + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + CREATE TABLE t1(a); + PRAGMA writable_schema=ON; + INSERT INTO sqlite_master VALUES('table',NULL,NULL,NULL,NULL); } - btree_insert $::bc 5 $data - - btree_close_cursor $::bc - btree_commit $::bt - btree_close $::bt + db close } {}; do_test capi3c-8.5 { - db close sqlite3 db test.db catchsql { SELECT * FROM sqlite_master; } - } {1 {malformed database schema}} + } {1 {malformed database schema (?)}} db close } file delete -force test.db @@ -730,7 +731,7 @@ SQLITE_CONSTRAINT {constraint failed} \ SQLITE_MISMATCH {datatype mismatch} \ SQLITE_MISUSE {library routine called out of sequence} \ -SQLITE_NOLFS {kernel lacks large file support} \ +SQLITE_NOLFS {large file support is disabled} \ SQLITE_AUTH {authorization denied} \ SQLITE_FORMAT {auxiliary database format error} \ SQLITE_RANGE {bind or column index out of range} \ @@ -745,26 +746,25 @@ } # Test the error message when a "real" out of memory occurs. -if {[info command sqlite_malloc_stat]!=""} { -set sqlite_malloc_fail 1 -do_test capi3c-10-1 { - sqlite3 db test.db - set DB [sqlite3_connection_pointer db] - sqlite_malloc_fail 1 - catchsql { - select * from sqlite_master; - } -} {1 {out of memory}} -do_test capi3c-10-2 { - sqlite3_errmsg $::DB -} {out of memory} -ifcapable {utf16} { - do_test capi3c-10-3 { - utf8 [sqlite3_errmsg16 $::DB] +ifcapable memdebug { + do_test capi3c-10-1 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_memdebug_fail 0 + catchsql { + select * from sqlite_master; + } + } {1 {out of memory}} + do_test capi3c-10-2 { + sqlite3_errmsg $::DB } {out of memory} -} -db close -sqlite_malloc_fail 0 + ifcapable {utf16} { + do_test capi3c-10-3 { + utf8 [sqlite3_errmsg16 $::DB] + } {out of memory} + } + db close + sqlite3_memdebug_fail -1 } # The following tests - capi3c-11.* - test that a COMMIT or ROLLBACK @@ -788,14 +788,25 @@ set STMT [sqlite3_prepare_v2 $DB "SELECT func(b, a) FROM t1" -1 TAIL] sqlite3_step $STMT } {SQLITE_ROW} -do_test capi3c-11.3 { + +# As of 3.6.5 a COMMIT is OK during while a query is still running - +# as long as it is a read-only query and not an incremental BLOB write. +# +do_test capi3-11.3.1 { catchsql { COMMIT; } -} {1 {cannot commit transaction - SQL statements in progress}} -do_test capi3c-11.3.1 { +} {0 {}} +do_test capi3-11.3.2 { + sqlite3_extended_errcode $DB +} {SQLITE_OK} +do_test capi3-11.3.3 { sqlite3_get_autocommit $DB -} 0 +} 1 +do_test capi3-11.3.4 { + db eval {PRAGMA lock_status} +} {main shared temp closed} + do_test capi3c-11.4 { sqlite3_step $STMT } {SQLITE_ERROR} @@ -807,15 +818,7 @@ SELECT * FROM t1; } } {0 {1 int 2 notatype}} -do_test capi3c-11.6.1 { - sqlite3_get_autocommit $DB -} 0 do_test capi3c-11.7 { - catchsql { - COMMIT; - } -} {0 {}} -do_test capi3c-11.7.1 { sqlite3_get_autocommit $DB } 1 do_test capi3c-11.8 { @@ -902,19 +905,19 @@ BEGIN; COMMIT; } -} {1 {cannot commit transaction - SQL statements in progress}} +} {0 {}} do_test capi3c-11.20 { sqlite3_reset $STMT catchsql { COMMIT; } -} {0 {}} +} {1 {cannot commit - no transaction is active}} do_test capi3c-11.21 { sqlite3_finalize $STMT } {SQLITE_OK} -# The following tests - capi3c-12.* - check that it's Ok to start a -# transaction while other VMs are active, and that it's Ok to execute +# The following tests - capi3c-12.* - check that its Ok to start a +# transaction while other VMs are active, and that its Ok to execute # atomic updates in the same situation # do_test capi3c-12.1 { @@ -1174,9 +1177,22 @@ db eval {DROP TABLE t3} sqlite3_step $STMT } SQLITE_SCHEMA -do_test capi3c-19.4.2 { +do_test capi3c-19.4.1 { sqlite3_errmsg $DB } {no such table: t3} +ifcapable deprecated { + do_test capi3c-19.4.2 { + sqlite3_expired $STMT + } 1 +} +do_test capi3c-19.4.3 { + sqlite3_errmsg $DB +} {no such table: t3} +ifcapable deprecated { + do_test capi3c-19.4.4 { + sqlite3_expired 0 + } 1 +} do_test capi3c-19.5 { sqlite3_reset $STMT db eval { @@ -1185,6 +1201,11 @@ } sqlite3_step $STMT } SQLITE_ROW +ifcapable deprecated { + do_test capi3c-19.5.2 { + sqlite3_expired $STMT + } 0 +} do_test capi3c-19.6 { sqlite3_column_int $STMT 1 } 2 @@ -1222,7 +1243,7 @@ sqlite3_step $STMT } {SQLITE_INTERRUPT} do_test capi3c-21.2 { - sqlite3_errcode $DB + sqlite3_extended_errcode $DB } {SQLITE_INTERRUPT} do_test capi3c-21.3 { sqlite3_finalize $STMT @@ -1241,6 +1262,61 @@ do_test capi3c-21.7 { sqlite3_errcode $DB } {SQLITE_INTERRUPT} -} + do_test capi3c-21.8 { + sqlite3_extended_errcode $DB + } {SQLITE_INTERRUPT} +} + +# Make sure sqlite3_result_error_code() returns the correct error code. +# See ticket #2940 +# +do_test capi3c-22.1 { + db progress 0 {} + set STMT [sqlite3_prepare_v2 db {SELECT test_error('the message',3)} -1 TAIL] + sqlite3_step $STMT +} {SQLITE_PERM} +sqlite3_finalize $STMT +do_test capi3c-22.2 { + set STMT [sqlite3_prepare_v2 db {SELECT test_error('the message',4)} -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ABORT} +sqlite3_finalize $STMT +do_test capi3c-22.3 { + set STMT [sqlite3_prepare_v2 db {SELECT test_error('the message',16)} -1 TAIL] + sqlite3_step $STMT +} {SQLITE_EMPTY} +sqlite3_finalize $STMT + +# For a multi-column result set where the same table column is repeated +# in multiple columns of the output, verify that doing a UTF-8 to UTF-16 +# conversion (or vice versa) on one column does not change the value of +# the second. +# +do_test capi3c-23.1 { + set STMT [sqlite3_prepare_v2 db {SELECT b,b,b,b FROM t1} -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-23.2 { + sqlite3_column_text16 $STMT 0 + sqlite3_column_text $STMT 1 +} {one} +do_test capi3c-23.3 { + sqlite3_column_text16 $STMT 2 + sqlite3_column_text $STMT 3 +} {one} +sqlite3_finalize $STMT +do_test capi3c-23.4 { + set STMT [sqlite3_prepare_v2 db {SELECT b||'x',b,b,b FROM t1} -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test capi3c-23.5 { + sqlite3_column_text16 $STMT 0 + sqlite3_column_text $STMT 1 +} {one} +do_test capi3c-23.6 { + sqlite3_column_text16 $STMT 2 + sqlite3_column_text $STMT 3 +} {one} +sqlite3_finalize $STMT finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/capi3d.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/capi3d.test --- sqlite3-3.4.2/test/capi3d.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/capi3d.test 2009-06-05 18:02:57.000000000 +0100 @@ -0,0 +1,93 @@ +# 2008 June 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is devoted to testing the sqlite3_next_stmt interface. +# +# $Id: capi3d.test,v 1.2 2008/07/14 15:11:20 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create N prepared statements against database connection db +# and return a list of all the generated prepared statements. +# +proc make_prepared_statements {N} { + set plist {} + for {set i 0} {$i<$N} {incr i} { + set sql "SELECT $i FROM sqlite_master WHERE name LIKE '%$i%'" + if {rand()<0.33} { + set s [sqlite3_prepare_v2 db $sql -1 notused] + } else { + ifcapable utf16 { + if {rand()<0.5} { + set sql [encoding convertto unicode $sql]\x00\x00 + set s [sqlite3_prepare16 db $sql -1 notused] + } else { + set s [sqlite3_prepare db $sql -1 notused] + } + } + ifcapable !utf16 { + set s [sqlite3_prepare db $sql -1 notused] + } + } + lappend plist $s + } + return $plist +} + + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# Database initially has no prepared statements. +# +do_test capi3d-1.1 { + db cache flush + sqlite3_next_stmt db 0 +} {} + +# Run the following tests for between 1 and 100 prepared statements. +# +for {set i 1} {$i<=100} {incr i} { + set stmtlist [make_prepared_statements $i] + do_test capi3d-1.2.$i.1 { + set p [sqlite3_next_stmt db 0] + set x {} + while {$p!=""} { + lappend x $p + set p [sqlite3_next_stmt db $p] + } + lsort $x + } [lsort $stmtlist] + do_test capi3-1.2.$i.2 { + foreach p [scramble $::stmtlist] { + sqlite3_finalize $p + } + sqlite3_next_stmt db 0 + } {} +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/capi3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/capi3.test --- sqlite3-3.4.2/test/capi3.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/capi3.test 2009-06-12 03:37:53.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script testing the callback-free C/C++ API. # -# $Id: capi3.test,v 1.51 2007/08/12 20:07:59 drh Exp $ +# $Id: capi3.test,v 1.70 2009/01/09 02:49:32 drh Exp $ # set testdir [file dirname $argv0] @@ -61,9 +61,12 @@ sqlite3_finalize $STMT set TAIL } {} -do_test capi3-1.2 { +do_test capi3-1.2.1 { sqlite3_errcode $DB } {SQLITE_OK} +do_test capi3-1.2.2 { + sqlite3_extended_errcode $DB +} {SQLITE_OK} do_test capi3-1.3 { sqlite3_errmsg $DB } {not an error} @@ -74,15 +77,31 @@ set TAIL } {SELECT 10} do_test capi3-1.5 { + set sql {SELECT name FROM sqlite_master;SELECT 10} + set STMT [sqlite3_prepare $DB $sql [string length $sql] TAIL] + sqlite3_finalize $STMT + set TAIL +} {SELECT 10} +do_test capi3-1.6 { + set sql {SELECT name FROM sqlite_master;SELECT 10} + set STMT [sqlite3_prepare $DB $sql [expr [string length $sql]+1] TAIL] + sqlite3_finalize $STMT + set TAIL +} {SELECT 10} + +do_test capi3-1.7 { set sql {SELECT namex FROM sqlite_master} catch { set STMT [sqlite3_prepare $DB $sql -1 TAIL] } } {1} -do_test capi3-1.6 { +do_test capi3-1.8.1 { sqlite3_errcode $DB } {SQLITE_ERROR} -do_test capi3-1.7 { +do_test capi3-1.8.2 { + sqlite3_extended_errcode $DB +} {SQLITE_ERROR} +do_test capi3-1.9 { sqlite3_errmsg $DB } {no such column: namex} @@ -102,12 +121,15 @@ do_test capi3-2.3 { set sql [utf16 {SELECT namex FROM sqlite_master}] catch { - set STMT [sqlite3_prepare16 $DB $sql -1 TAIL] + set STMT [sqlite3_prepare16 $DB $sql -1] } } {1} - do_test capi3-2.4 { + do_test capi3-2.4.1 { sqlite3_errcode $DB } {SQLITE_ERROR} + do_test capi3-2.4.2 { + sqlite3_extended_errcode $DB + } {SQLITE_ERROR} do_test capi3-2.5 { sqlite3_errmsg $DB } {no such column: namex} @@ -115,8 +137,8 @@ ifcapable schema_pragmas { do_test capi3-2.6 { execsql {CREATE TABLE tablename(x)} - set sql16 [utf16 {PRAGMA table_info("TableName")}] - set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + set sql16 [utf16 {PRAGMA table_info("TableName"); --excess text}] + set STMT [sqlite3_prepare16 $DB $sql16 -1] sqlite3_step $STMT } SQLITE_ROW do_test capi3-2.7 { @@ -144,7 +166,7 @@ catch { set db2 [sqlite3_open /bogus/path/test.db {}] } - sqlite3_errcode $db2 + sqlite3_extended_errcode $db2 } {SQLITE_CANTOPEN} do_test capi3-3.4 { sqlite3_errmsg $db2 @@ -164,6 +186,14 @@ } {library routine called out of sequence} } +do_test capi3-3.7 { + set db2 [sqlite3_open] + sqlite3_errcode $db2 +} {SQLITE_OK} +do_test capi3-3.8 { + sqlite3_close $db2 +} {SQLITE_OK} + # rename sqlite3_open "" # rename sqlite3_open_old sqlite3_open @@ -603,10 +633,8 @@ db close do_test capi3-6.0 { -btree_breakpoint sqlite3 db test.db set DB [sqlite3_connection_pointer db] -btree_breakpoint sqlite3_key $DB xyzzy set sql {SELECT a FROM t1 order by rowid} set STMT [sqlite3_prepare $DB $sql -1 TAIL] @@ -629,17 +657,27 @@ } {SQLITE_OK} db close +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} +} + +# This procedure returns the value of the file-format in file 'test.db'. +# +proc get_file_format {{fname test.db}} { + return [hexio_get_int [hexio_read $fname 44 4]] +} + if {![sqlite3 -has-codec]} { # Test what happens when the library encounters a newer file format. - # Do this by updating the file format via the btree layer. do_test capi3-7.1 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set meta [btree_get_meta $::bt] - lset meta 2 5 - eval [concat btree_update_meta $::bt [lrange $meta 0 end]] - btree_commit $::bt - btree_close $::bt + set_file_format 5 } {} do_test capi3-7.2 { sqlite3 db test.db @@ -654,8 +692,7 @@ # Now test that the library correctly handles bogus entries in the # sqlite_master table (schema corruption). do_test capi3-8.1 { - file delete -force test.db - file delete -force test.db-journal + file delete -force test.db test.db-journal sqlite3 db test.db execsql { CREATE TABLE t1(a); @@ -663,54 +700,38 @@ db close } {} do_test capi3-8.2 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set ::bc [btree_cursor $::bt 1 1] - - # Build a 5-field row record consisting of 5 null records. This is - # officially black magic. - catch {unset data} - set data [binary format c6 {6 0 0 0 0 0}] - btree_insert $::bc 5 $data - - btree_close_cursor $::bc - btree_commit $::bt - btree_close $::bt + sqlite3 db test.db + execsql { + PRAGMA writable_schema=ON; + INSERT INTO sqlite_master VALUES(NULL,NULL,NULL,NULL,NULL); + } + db close } {} do_test capi3-8.3 { sqlite3 db test.db catchsql { SELECT * FROM sqlite_master; } - } {1 {malformed database schema}} + } {1 {malformed database schema (?)}} do_test capi3-8.4 { - set ::bt [btree_open test.db 10 0] - btree_begin_transaction $::bt - set ::bc [btree_cursor $::bt 1 1] - # Build a 5-field row record. The first field is a string 'table', and - # subsequent fields are all NULL. Replace the other broken record with - # this one and try to read the schema again. The broken record uses - # either UTF-8 or native UTF-16 (if this file is being run by - # utf16.test). - if { [string match UTF-16* $::ENC] } { - set data [binary format c6a10 {6 33 0 0 0 0} [utf16 table]] - } else { - set data [binary format c6a5 {6 23 0 0 0 0} table] + # subsequent fields are all NULL. + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + CREATE TABLE t1(a); + PRAGMA writable_schema=ON; + INSERT INTO sqlite_master VALUES('table',NULL,NULL,NULL,NULL); } - btree_insert $::bc 5 $data - - btree_close_cursor $::bc - btree_commit $::bt - btree_close $::bt + db close } {}; do_test capi3-8.5 { - db close sqlite3 db test.db catchsql { SELECT * FROM sqlite_master; } - } {1 {malformed database schema}} + } {1 {malformed database schema (?)}} db close } file delete -force test.db @@ -737,7 +758,7 @@ SQLITE_CONSTRAINT {constraint failed} \ SQLITE_MISMATCH {datatype mismatch} \ SQLITE_MISUSE {library routine called out of sequence} \ -SQLITE_NOLFS {kernel lacks large file support} \ +SQLITE_NOLFS {large file support is disabled} \ SQLITE_AUTH {authorization denied} \ SQLITE_FORMAT {auxiliary database format error} \ SQLITE_RANGE {bind or column index out of range} \ @@ -752,26 +773,43 @@ } # Test the error message when a "real" out of memory occurs. -if {[info command sqlite_malloc_stat]!=""} { -set sqlite_malloc_fail 1 -do_test capi3-10-1 { - sqlite3 db test.db - set DB [sqlite3_connection_pointer db] - sqlite_malloc_fail 1 - catchsql { - select * from sqlite_master; +ifcapable memdebug { + do_test capi3-10-1 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_memdebug_fail 1 + catchsql { + select * from sqlite_master; + } + } {1 {out of memory}} + do_test capi3-10-2 { + sqlite3_errmsg $::DB + } {out of memory} + ifcapable {utf16} { + do_test capi3-10-3 { + utf8 [sqlite3_errmsg16 $::DB] + } {out of memory} } -} {1 {out of memory}} -do_test capi3-10-2 { - sqlite3_errmsg $::DB -} {out of memory} -ifcapable {utf16} { - do_test capi3-10-3 { - utf8 [sqlite3_errmsg16 $::DB] + db close + sqlite3_memdebug_fail -1 + do_test capi3-10-4 { + sqlite3 db test.db + set DB [sqlite3_connection_pointer db] + sqlite3_memdebug_fail 1 + catchsql { + select * from sqlite_master where rowid>5; + } + } {1 {out of memory}} + do_test capi3-10-5 { + sqlite3_errmsg $::DB } {out of memory} -} -db close -sqlite_malloc_fail 0 + ifcapable {utf16} { + do_test capi3-10-6 { + utf8 [sqlite3_errmsg16 $::DB] + } {out of memory} + } + db close + sqlite3_memdebug_fail -1 } # The following tests - capi3-11.* - test that a COMMIT or ROLLBACK @@ -795,14 +833,25 @@ set STMT [sqlite3_prepare $DB "SELECT func(b, a) FROM t1" -1 TAIL] sqlite3_step $STMT } {SQLITE_ROW} -do_test capi3-11.3 { + +# As of 3.6.5 a COMMIT is OK during while a query is still running - +# as long as it is a read-only query and not an incremental BLOB write. +# +do_test capi3-11.3.1 { catchsql { COMMIT; } -} {1 {cannot commit transaction - SQL statements in progress}} -do_test capi3-11.3.1 { +} {0 {}} +do_test capi3-11.3.2 { + sqlite3_extended_errcode $DB +} {SQLITE_OK} +do_test capi3-11.3.3 { sqlite3_get_autocommit $DB -} 0 +} 1 +do_test capi3-11.3.4 { + db eval {PRAGMA lock_status} +} {main shared temp closed} + do_test capi3-11.4 { sqlite3_step $STMT } {SQLITE_ERROR} @@ -814,15 +863,7 @@ SELECT * FROM t1; } } {0 {1 int 2 notatype}} -do_test capi3-11.6.1 { - sqlite3_get_autocommit $DB -} 0 do_test capi3-11.7 { - catchsql { - COMMIT; - } -} {0 {}} -do_test capi3-11.7.1 { sqlite3_get_autocommit $DB } 1 do_test capi3-11.8 { @@ -909,19 +950,19 @@ BEGIN; COMMIT; } -} {1 {cannot commit transaction - SQL statements in progress}} +} {0 {}} do_test capi3-11.20 { sqlite3_reset $STMT catchsql { COMMIT; } -} {0 {}} +} {1 {cannot commit - no transaction is active}} do_test capi3-11.21 { sqlite3_finalize $STMT } {SQLITE_OK} -# The following tests - capi3-12.* - check that it's Ok to start a -# transaction while other VMs are active, and that it's Ok to execute +# The following tests - capi3-12.* - check that its Ok to start a +# transaction while other VMs are active, and that its Ok to execute # atomic updates in the same situation # do_test capi3-12.1 { @@ -1023,6 +1064,51 @@ do_test capi3-15.3 { sqlite3_finalize $STMT } {SQLITE_OK} +do_test capi3-15.4 { + # 123456789 1234567 + set sql {SELECT 1234567890} + set STMT [sqlite3_prepare $DB $sql 8 TAIL] + sqlite3_step $STMT + set v1 [sqlite3_column_int $STMT 0] + sqlite3_finalize $STMT + set v1 +} {1} +do_test capi3-15.5 { + # 123456789 1234567 + set sql {SELECT 1234567890} + set STMT [sqlite3_prepare $DB $sql 9 TAIL] + sqlite3_step $STMT + set v1 [sqlite3_column_int $STMT 0] + sqlite3_finalize $STMT + set v1 +} {12} +do_test capi3-15.6 { + # 123456789 1234567 + set sql {SELECT 1234567890} + set STMT [sqlite3_prepare $DB $sql 12 TAIL] + sqlite3_step $STMT + set v1 [sqlite3_column_int $STMT 0] + sqlite3_finalize $STMT + set v1 +} {12345} +do_test capi3-15.7 { + # 123456789 1234567 + set sql {SELECT 12.34567890} + set STMT [sqlite3_prepare $DB $sql 12 TAIL] + sqlite3_step $STMT + set v1 [sqlite3_column_double $STMT 0] + sqlite3_finalize $STMT + set v1 +} {12.34} +do_test capi3-15.8 { + # 123456789 1234567 + set sql {SELECT 12.34567890} + set STMT [sqlite3_prepare $DB $sql 14 TAIL] + sqlite3_step $STMT + set v1 [sqlite3_column_double $STMT 0] + sqlite3_finalize $STMT + set v1 +} {12.3456} # Make sure code is always generated even if an IF EXISTS or # IF NOT EXISTS clause is present that the table does not or @@ -1071,4 +1157,54 @@ sqlite3_finalize $STMT } {SQLITE_OK} +# Verify that sqlite3_step() fails with an SQLITE_SCHEMA error +# when the statement is prepared with sqlite3_prepare() (not +# sqlite3_prepare_v2()) and the schema has changed. +# +do_test capi3-18.1 { + set STMT [sqlite3_prepare db {SELECT * FROM t2} -1 TAIL] + sqlite3 db2 test.db + db2 eval {CREATE TABLE t3(x)} + db2 close + sqlite3_step $STMT +} {SQLITE_ERROR} +do_test capi3-18.2 { + sqlite3_reset $STMT + sqlite3_errcode db +} {SQLITE_SCHEMA} +do_test capi3-18.3 { + sqlite3_errmsg db +} {database schema has changed} +# The error persist on retry when sqlite3_prepare() has been used. +do_test capi3-18.4 { + sqlite3_step $STMT +} {SQLITE_ERROR} +do_test capi3-18.5 { + sqlite3_reset $STMT + sqlite3_errcode db +} {SQLITE_SCHEMA} +do_test capi3-18.6 { + sqlite3_errmsg db +} {database schema has changed} +sqlite3_finalize $STMT + +# Ticket #3134. Prepare a statement with an nBytes parameter of 0. +# Make sure this works correctly and does not reference memory out of +# range. +# +do_test capi3-19.1 { + sqlite3_prepare_tkt3134 db +} {} + +# Tests of the interface when no VFS is registered. +# +if {![info exists tester_do_binarylog]} { + db close + vfs_unregister_all + do_test capi3-20.1 { + sqlite3_sleep 100 + } {0} + vfs_reregister_all +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/cast.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/cast.test --- sqlite3-3.4.2/test/cast.test 2007-08-13 16:18:28.000000000 +0100 +++ sqlite3-3.6.16/test/cast.test 2009-06-12 03:37:53.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the CAST operator. # -# $Id: cast.test,v 1.8 2007/08/13 15:18:28 drh Exp $ +# $Id: cast.test,v 1.10 2008/11/06 15:33:04 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -183,6 +183,38 @@ execsql {SELECT CAST('123.5abc' AS integer)} } 123 +do_test case-1.60 { + execsql {SELECT CAST(null AS REAL)} +} {{}} +do_test case-1.61 { + execsql {SELECT typeof(CAST(null AS REAL))} +} {null} +do_test case-1.62 { + execsql {SELECT CAST(1 AS REAL)} +} {1.0} +do_test case-1.63 { + execsql {SELECT typeof(CAST(1 AS REAL))} +} {real} +do_test case-1.64 { + execsql {SELECT CAST('1' AS REAL)} +} {1.0} +do_test case-1.65 { + execsql {SELECT typeof(CAST('1' AS REAL))} +} {real} +do_test case-1.66 { + execsql {SELECT CAST('abc' AS REAL)} +} {0.0} +do_test case-1.67 { + execsql {SELECT typeof(CAST('abc' AS REAL))} +} {real} +do_test case-1.68 { + execsql {SELECT CAST(x'31' AS REAL)} +} {1.0} +do_test case-1.69 { + execsql {SELECT typeof(CAST(x'31' AS REAL))} +} {real} + + # Ticket #1662. Ignore leading spaces in numbers when casting. # do_test cast-2.1 { @@ -287,4 +319,28 @@ sqlite3_finalize $::STMT } {SQLITE_OK} + +do_test cast-4.1 { + db eval { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES('abc'); + SELECT a, CAST(a AS integer) FROM t1; + } +} {abc 0} +do_test cast-4.2 { + db eval { + SELECT CAST(a AS integer), a FROM t1; + } +} {0 abc} +do_test cast-4.3 { + db eval { + SELECT a, CAST(a AS integer), a FROM t1; + } +} {abc 0 abc} +do_test cast-4.4 { + db eval { + SELECT CAST(a AS integer), a, CAST(a AS real), a FROM t1; + } +} {0 abc 0.0 abc} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/check.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/check.test --- sqlite3-3.4.2/test/check.test 2007-07-23 20:39:47.000000000 +0100 +++ sqlite3-3.6.16/test/check.test 2009-06-25 12:35:51.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing CHECK constraints # -# $Id: check.test,v 1.11 2007/07/23 19:39:47 drh Exp $ +# $Id: check.test,v 1.13 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -118,8 +118,8 @@ execsql { CREATE TABLE t2( x INTEGER CHECK( typeof(coalesce(x,0))=="integer" ), - y REAL CHECK( typeof(coalesce(y,0.1))=="real" ), - z TEXT CHECK( typeof(coalesce(z,''))=="text" ) + y REAL CHECK( typeof(coalesce(y,0.1))=='real' ), + z TEXT CHECK( typeof(coalesce(z,''))=='text' ) ); } } {} @@ -129,6 +129,8 @@ SELECT * FROM t2; } } {1 2.2 three} +db close +sqlite3 db test.db do_test check-2.3 { execsql { INSERT INTO t2 VALUES(NULL, NULL, NULL); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate2.test --- sqlite3-3.4.2/test/collate2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/collate2.test 2009-06-05 18:03:02.000000000 +0100 @@ -12,7 +12,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is page cache subsystem. # -# $Id: collate2.test,v 1.5 2007/02/01 23:02:46 drh Exp $ +# $Id: collate2.test,v 1.6 2008/08/20 16:35:10 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -136,6 +136,26 @@ ORDER BY 1, oid; } } {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.4 { + execsql { + SELECT b FROM collate2t1 WHERE b > 'aa' ORDER BY +b; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.5 { + execsql { + SELECT b FROM collate2t1 WHERE a COLLATE nocase > 'aa' ORDER BY +b; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.6 { + execsql { + SELECT b FROM collate2t1 WHERE b COLLATE nocase > 'aa' ORDER BY +b; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.2.7 { + execsql { + SELECT b FROM collate2t1 WHERE c COLLATE nocase > 'aa' ORDER BY +b; + } +} {ab aB Ab AB ba bA Ba BA bb bB Bb BB} do_test collate2-1.3 { execsql { SELECT c FROM collate2t1 WHERE c > 'aa' ORDER BY 1; @@ -169,6 +189,11 @@ SELECT b FROM collate2t1 WHERE b < 'aa' ORDER BY 1, oid; } } {} +do_test collate2-1.5.1 { + execsql { + SELECT b FROM collate2t1 WHERE b < 'aa' ORDER BY +b; + } +} {} do_test collate2-1.6 { execsql { SELECT c FROM collate2t1 WHERE c < 'aa' ORDER BY 1; @@ -229,6 +254,11 @@ SELECT b FROM collate2t1 WHERE b BETWEEN 'Aa' AND 'Bb' ORDER BY 1, oid; } } {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB} +do_test collate2-1.17.1 { + execsql { + SELECT b FROM collate2t1 WHERE b BETWEEN 'Aa' AND 'Bb' ORDER BY +b; + } +} {aa aA Aa AA ab aB Ab AB ba bA Ba BA bb bB Bb BB} do_test collate2-1.18 { execsql { SELECT c FROM collate2t1 WHERE c BETWEEN 'Aa' AND 'Bb' ORDER BY 1; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate3.test --- sqlite3-3.4.2/test/collate3.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/collate3.test 2009-06-05 18:03:02.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is page cache subsystem. # -# $Id: collate3.test,v 1.11 2005/09/08 01:58:43 drh Exp $ +# $Id: collate3.test,v 1.13 2008/08/20 16:35:10 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -152,7 +152,6 @@ } } {0 {}} do_test collate3-2.13 { -btree_breakpoint catchsql { SELECT 10 UNION ALL SELECT 20 ORDER BY 1 COLLATE string_compare; } @@ -293,19 +292,19 @@ SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined; } } {1 {no such collation sequence: user_defined}} -do_test collate3-4.8 { +do_test collate3-4.8.1 { db collate user_defined "string compare" catchsql { SELECT * FROM collate3t1 ORDER BY a COLLATE user_defined; } } {0 {hello {}}} -do_test collate3-4.8 { +do_test collate3-4.8.2 { db close lindex [catch { sqlite3 db test.db }] 0 } {0} -do_test collate3-4.8 { +do_test collate3-4.8.3 { execsql { DROP TABLE collate3t1; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate4.test --- sqlite3-3.4.2/test/collate4.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/collate4.test 2009-06-05 18:03:02.000000000 +0100 @@ -12,7 +12,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is page cache subsystem. # -# $Id: collate4.test,v 1.8 2005/04/01 10:47:40 drh Exp $ +# $Id: collate4.test,v 1.9 2008/01/05 17:39:30 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -591,12 +591,12 @@ count { SELECT min(a) FROM collate4t1; } -} {10 2} +} {10 1} do_test collate4-4.4 { count { SELECT max(a) FROM collate4t1; } -} {20 1} +} {20 0} do_test collate4-4.5 { # Test that the index with collation type NUMERIC is not used. execsql { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate5.test --- sqlite3-3.4.2/test/collate5.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/collate5.test 2009-06-05 18:03:02.000000000 +0100 @@ -14,7 +14,7 @@ # SELECT statements that use user-defined collation sequences. Also # GROUP BY clauses that use user-defined collation sequences. # -# $Id: collate5.test,v 1.5 2005/09/07 22:48:16 drh Exp $ +# $Id: collate5.test,v 1.7 2008/09/16 11:58:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -57,17 +57,39 @@ execsql { SELECT DISTINCT a FROM collate5t1; } -} {a b n} +} {A B N} do_test collate5-1.2 { execsql { SELECT DISTINCT b FROM collate5t1; } -} {apple Apple banana {}} +} {{} Apple apple banana} do_test collate5-1.3 { execsql { SELECT DISTINCT a, b FROM collate5t1; } -} {a apple A Apple b banana n {}} +} {A Apple a apple B banana N {}} + +# Ticket #3376 +# +do_test collate5-1.11 { + execsql { + CREATE TABLE tkt3376(a COLLATE nocase PRIMARY KEY); + INSERT INTO tkt3376 VALUES('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'); + INSERT INTO tkt3376 VALUES('ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789'); + SELECT DISTINCT a FROM tkt3376; + } +} {abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789ABXYZ012234567890123456789} +do_test collate5-1.12 { + sqlite3 db2 :memory: + db2 eval { + PRAGMA encoding=UTF16le; + CREATE TABLE tkt3376(a COLLATE nocase PRIMARY KEY); + INSERT INTO tkt3376 VALUES('abc'); + INSERT INTO tkt3376 VALUES('ABX'); + SELECT DISTINCT a FROM tkt3376; + } +} {abc ABX} +catch {db2 close} # The remainder of this file tests compound SELECT statements. # Omit it if the library is compiled such that they are omitted. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate7.test --- sqlite3-3.4.2/test/collate7.test 2007-05-07 15:58:53.000000000 +0100 +++ sqlite3-3.6.16/test/collate7.test 2009-06-05 18:03:02.000000000 +0100 @@ -13,7 +13,7 @@ # focus of this script is the experimental sqlite3_create_collation_v2() # API. # -# $Id: collate7.test,v 1.1 2007/05/07 14:58:53 danielk1977 Exp $ +# $Id: collate7.test,v 1.2 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -70,4 +70,3 @@ } {1 {no such collation sequence: CASELESS}} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate8.test --- sqlite3-3.4.2/test/collate8.test 2007-06-20 17:13:23.000000000 +0100 +++ sqlite3-3.6.16/test/collate8.test 2009-06-05 18:03:02.000000000 +0100 @@ -13,7 +13,7 @@ # focus of this script is making sure collations pass through the # unary + operator. # -# $Id: collate8.test,v 1.1 2007/06/20 16:13:23 drh Exp $ +# $Id: collate8.test,v 1.2 2008/08/25 12:14:09 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -48,5 +48,78 @@ SELECT a FROM t1 ORDER BY +a } } {aaa BBB ccc DDD} +do_test collate8-1.11 { + execsql { + SELECT a AS x FROM t1 ORDER BY "x"; + } +} {aaa BBB ccc DDD} +do_test collate8-1.12 { + execsql { + SELECT a AS x FROM t1 WHERE x<'ccc' ORDER BY 1 + } +} {aaa BBB} +do_test collate8-1.13 { + execsql { + SELECT a AS x FROM t1 WHERE x<'ccc' COLLATE binary ORDER BY [x] + } +} {aaa BBB DDD} +do_test collate8-1.14 { + execsql { + SELECT a AS x FROM t1 WHERE +x<'ccc' ORDER BY 1 + } +} {aaa BBB} +do_test collate8-1.15 { + execsql { + SELECT a AS x FROM t1 ORDER BY +x + } +} {aaa BBB ccc DDD} + + +# When a result-set column is aliased into a WHERE clause, make sure the +# collating sequence logic works correctly. +# +do_test collate8-2.1 { + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES('abc'); + INSERT INTO t2 VALUES('ABC'); + SELECT a AS x FROM t2 WHERE x='abc'; + } +} {abc} +do_test collate8-2.2 { + execsql { + SELECT a AS x FROM t2 WHERE x='abc' COLLATE nocase; + } +} {abc ABC} +do_test collate8-2.3 { + execsql { + SELECT a AS x FROM t2 WHERE (x COLLATE nocase)='abc'; + } +} {abc ABC} +do_test collate8-2.4 { + execsql { + SELECT a COLLATE nocase AS x FROM t2 WHERE x='abc'; + } +} {abc ABC} +do_test collate8-2.5 { + execsql { + SELECT a COLLATE nocase AS x FROM t2 WHERE (x COLLATE binary)='abc'; + } +} {abc} +do_test collate8-2.6 { + execsql { + SELECT a COLLATE nocase AS x FROM t2 WHERE x='abc' COLLATE binary; + } +} {abc ABC} +do_test collate8-2.7 { + execsql { + SELECT * FROM t2 WHERE (a COLLATE nocase)='abc' COLLATE binary; + } +} {abc ABC} +do_test collate8-2.8 { + execsql { + SELECT a COLLATE nocase AS x FROM t2 WHERE 'abc'=x COLLATE binary; + } +} {abc} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collate9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collate9.test --- sqlite3-3.4.2/test/collate9.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/collate9.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,178 @@ +# +# 2007 November 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is making sure that the names of collation +# sequences may be quoted using double quotes in SQL statements. +# +# $Id: collate9.test,v 1.2 2008/07/10 00:32:42 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc reverse_sort {lhs rhs} { + return [string compare $rhs $lhs] +} +db collate "reverse sort" reverse_sort + +# This procedure executes the SQL. Then it checks to see if the OP_Sort +# opcode was executed. If an OP_Sort did occur, then "sort" is appended +# to the result. If no OP_Sort happened, then "nosort" is appended. +# +# This procedure is used to check to make sure sorting is or is not +# occurring as expected. +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# Test plan: +# +# collate9-1.* - Test collation sequences attached to table columns +# collate9-2.* - Test collation sequences attached to expressions +# collate9-3.* - Test collation sequences attached to an index +# collate9-4.* - Test collation sequences as an argument to REINDEX +# + +do_test collate9-1.1 { + execsql { + CREATE TABLE xy(x COLLATE "reverse sort", y COLLATE binary); + INSERT INTO xy VALUES('one', 'one'); + INSERT INTO xy VALUES('two', 'two'); + INSERT INTO xy VALUES('three', 'three'); + } +} {} +do_test collate9-1.2 { + execsql { + SELECT x FROM xy ORDER BY x + } +} {two three one} +do_test collate9-1.3 { + execsql { + SELECT y FROM xy ORDER BY y + } +} {one three two} +do_test collate9-1.4 { + cksort { + SELECT x FROM xy ORDER BY x + } +} {two three one sort} +do_test collate9-1.5 { + execsql { + CREATE INDEX xy_i ON xy(x) + } +} {} +do_test collate9-1.6 { + cksort { + SELECT x FROM xy ORDER BY x + } +} {two three one nosort} + +do_test collate9-2.1 { + execsql { + SELECT x, x < 'seven' FROM xy ORDER BY x + } +} {two 1 three 1 one 0} +do_test collate9-2.2 { + execsql { + SELECT y, y < 'seven' FROM xy ORDER BY x + } +} {two 0 three 0 one 1} +do_test collate9-2.3 { + execsql { + SELECT y, y COLLATE "reverse sort" < 'seven' FROM xy ORDER BY x + } +} {two 1 three 1 one 0} +do_test collate9-2.4 { + execsql { + SELECT y FROM xy ORDER BY y + } +} {one three two} +do_test collate9-2.5 { + execsql { + SELECT y FROM xy ORDER BY y COLLATE "reverse sort" + } +} {two three one} +do_test collate9-2.6 { + execsql { + SELECT y COLLATE "reverse sort" AS aaa FROM xy ORDER BY aaa + } +} {two three one} + +do_test collate9-3.1 { + execsql { + CREATE INDEX xy_i2 ON xy(y COLLATE "reverse sort"); + } +} {} +do_test collate9-3.2 { + cksort { + SELECT y FROM xy ORDER BY y + } +} {one three two sort} +do_test collate9-3.3 { + cksort { + SELECT y FROM xy ORDER BY y COLLATE "reverse sort" + } +} {two three one nosort} +do_test collate9-3.4 { + cksort { + SELECT y AS aaa FROM xy ORDER BY aaa + } +} {one three two sort} +do_test collate9-3.5 { + cksort { + SELECT y COLLATE "reverse sort" AS aaa FROM xy ORDER BY aaa + } +} {two three one nosort} + +ifcapable reindex { + do_test collate9-4.1 { + execsql { + REINDEX "reverse sort" + } + } {} + + # Modify the "reverse sort" collation so that it now sorts in the same + # order as binary. + proc reverse_sort {lhs rhs} { + return [string compare $lhs $rhs] + } + + # The integrity check should now fail because the indexes created using + # "reverse sort" are no longer in sync with the collation sequence + # implementation. + do_test collate9-4.2 { + expr {"ok" eq [execsql { PRAGMA integrity_check }]} + } {0} + + do_test collate9-4.3 { + execsql { + REINDEX "reverse sort" + } + } {} + + # Integrity check should now pass. + do_test collate9-4.4 { + expr {"ok" eq [execsql { PRAGMA integrity_check }]} + } {1} + + do_test collate9-4.5 { + cksort { + SELECT x FROM xy ORDER BY x COLLATE "reverse sort" + } + } {one three two nosort} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/collateA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/collateA.test --- sqlite3-3.4.2/test/collateA.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/collateA.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,217 @@ +# +# 2008 January 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is the built-in RTRIM collating +# API. +# +# $Id: collateA.test,v 1.3 2008/04/15 04:02:41 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test collateA-1.1 { + execsql { + CREATE TABLE t1( + a INTEGER PRIMARY KEY, + b TEXT COLLATE BINARY, + c TEXT COLLATE RTRIM + ); + INSERT INTO t1 VALUES(1, 'abcde','abcde'); + INSERT INTO t1 VALUES(2, 'xyzzy ','xyzzy '); + INSERT INTO t1 VALUES(3, 'xyzzy ','xyzzy '); + INSERT INTO t1 VALUES(4, 'xyzzy ','xyzzy '); + INSERT INTO t1 VALUES(5, ' ', ' '); + INSERT INTO t1 VALUES(6, '', ''); + SELECT count(*) FROM t1; + } +} {6} +do_test collateA-1.2 { + execsql {SELECT a FROM t1 WHERE b='abcde '} +} {} +do_test collateA-1.3 { + execsql {SELECT a FROM t1 WHERE c='abcde '} +} {1} +do_test collateA-1.4 { + execsql {SELECT a FROM t1 WHERE b='xyzzy'} +} {} +do_test collateA-1.5 { + execsql {SELECT a FROM t1 WHERE c='xyzzy'} +} {2 3 4} +do_test collateA-1.6 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-1.7 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-1.8 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-1.9 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-1.10 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-1.11 { + execsql {SELECT 'abc123'='abc123 ' COLLATE RTRIM;} +} {1} +do_test collateA-1.12 { + execsql {SELECT 'abc123 '='abc123' COLLATE RTRIM;} +} {1} +do_test collateA-1.13 { + execsql {SELECT ' '='' COLLATE RTRIM, ' '='' COLLATE BINARY, ' '=''} +} {1 0 0} +do_test collateA-1.14 { + execsql {SELECT ''=' ' COLLATE RTRIM, ''=' ' COLLATE BINARY, ''=' '} +} {1 0 0} +do_test collateA-1.15 { + execsql {SELECT ' '=' ' COLLATE RTRIM, ' '=' '} +} {1 0} +do_test collateA-1.16 { + execsql {SELECT ''<>' ' COLLATE RTRIM, ''<>' ' COLLATE BINARY, ''<>' '} +} {0 1 1} +do_test collateA-1.17 { + execsql {SELECT a FROM t1 WHERE c='xyzz'} +} {} +do_test collateA-1.18 { + execsql {SELECT a FROM t1 WHERE c='xyzzyy '} +} {} +do_test collateA-1.19 { + execsql {SELECT a FROM t1 WHERE c='xyzz '} +} {} +do_test collateA-1.20 { + execsql {SELECT a FROM t1 WHERE c='abcd '} +} {} +do_test collateA-1.21 { + execsql {SELECT a FROM t1 WHERE c='abcd'} +} {} +do_test collateA-1.22 { + execsql {SELECT a FROM t1 WHERE c='abc'} +} {} +do_test collateA-1.23 { + execsql {SELECT a FROM t1 WHERE c='abcdef '} +} {} +do_test collateA-1.24 { + execsql {SELECT a FROM t1 WHERE c=''} +} {5 6} +do_test collateA-1.25 { + execsql {SELECT a FROM t1 WHERE c=' '} +} {5 6} +do_test collateA-1.26 { + execsql {SELECT a FROM t1 WHERE c=' '} +} {5 6} + + +do_test collateA-2.1 { + execsql { + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); + PRAGMA integrity_check; + } +} {ok} +do_test collateA-2.2 { + execsql {SELECT a FROM t1 WHERE b='abcde '} +} {} +do_test collateA-2.3 { + execsql {SELECT a FROM t1 WHERE c='abcde '} +} {1} +do_test collateA-2.4 { + execsql {SELECT a FROM t1 WHERE b='xyzzy'} +} {} +do_test collateA-2.5 { + execsql {SELECT a FROM t1 WHERE c='xyzzy'} +} {2 3 4} +do_test collateA-2.6 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-2.7 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-2.8 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-2.9 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-2.10 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-2.17 { + execsql {SELECT a FROM t1 WHERE c='xyzz'} +} {} +do_test collateA-2.18 { + execsql {SELECT a FROM t1 WHERE c='xyzzyy '} +} {} +do_test collateA-2.19 { + execsql {SELECT a FROM t1 WHERE c='xyzz '} +} {} +do_test collateA-2.20 { + execsql {SELECT a FROM t1 WHERE c='abcd '} +} {} +do_test collateA-2.21 { + execsql {SELECT a FROM t1 WHERE c='abcd'} +} {} +do_test collateA-2.22 { + execsql {SELECT a FROM t1 WHERE c='abc'} +} {} +do_test collateA-2.23 { + execsql {SELECT a FROM t1 WHERE c='abcdef '} +} {} +do_test collateA-2.24 { + execsql {SELECT a FROM t1 WHERE c=''} +} {5 6} +do_test collateA-2.25 { + execsql {SELECT a FROM t1 WHERE c=' '} +} {5 6} +do_test collateA-2.26 { + execsql {SELECT a FROM t1 WHERE c=' '} +} {5 6} + + +do_test collateA-3.1 { + db close + sqlite3 db test.db + execsql { + REINDEX; + PRAGMA integrity_check; + } +} {ok} +do_test collateA-3.2 { + execsql {SELECT a FROM t1 WHERE b='abcde '} +} {} +do_test collateA-3.3 { + execsql {SELECT a FROM t1 WHERE c='abcde '} +} {1} +do_test collateA-3.4 { + execsql {SELECT a FROM t1 WHERE b='xyzzy'} +} {} +do_test collateA-3.5 { + execsql {SELECT a FROM t1 WHERE c='xyzzy'} +} {2 3 4} +do_test collateA-3.6 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-3.7 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-3.8 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-3.9 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} +do_test collateA-3.10 { + execsql {SELECT a FROM t1 WHERE c='xyzzy '} +} {2 3 4} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/colmeta.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/colmeta.test --- sqlite3-3.4.2/test/colmeta.test 2006-02-10 13:33:31.000000000 +0000 +++ sqlite3-3.6.16/test/colmeta.test 2009-06-05 18:03:02.000000000 +0100 @@ -12,7 +12,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is the sqlite3_table_column_metadata() API. # -# $Id: colmeta.test,v 1.3 2006/02/10 13:33:31 danielk1977 Exp $ +# $Id: colmeta.test,v 1.4 2008/01/23 12:52:41 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -91,13 +91,4 @@ } $results } -do_test colmeta-misuse.1 { - db close - set rc [catch { - sqlite3_table_column_metadata $::DB a b c - } msg] - list $rc $msg -} {1 {library routine called out of sequence}} - finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/colname.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/colname.test --- sqlite3-3.4.2/test/colname.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/colname.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,329 @@ +# 2008 July 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing how SQLite generates the names +# of columns in a result set. +# +# $Id: colname.test,v 1.7 2009/06/02 15:47:38 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Rules (applied in order): +# +# (1) If there is an AS clause, use it. +# +# (2) A non-trival expression (not a table column name) then the name is +# a copy of the expression text. +# +# (3) If short_column_names=ON, then just the abbreviated column name without +# the table name. +# +# (4) When short_column_names=OFF and full_column_names=OFF then +# use case (2) for simple queries and case (5) for joins. +# +# (5) When short_column_names=OFF and full_column_names=ON then +# use the form: TABLE.COLUMN +# + + +# Verify the default settings for short_column_name and full_column_name +# +do_test colname-1.1 { + db eval {PRAGMA short_column_names} +} {1} +do_test colname-1.2 { + db eval {PRAGMA full_column_names} +} {0} + +# Tests for then short=ON and full=any +# +do_test colname-2.1 { + db eval { + CREATE TABLE tabc(a,b,c); + INSERT INTO tabc VALUES(1,2,3); + CREATE TABLE txyz(x,y,z); + INSERT INTO txyz VALUES(4,5,6); + CREATE TABLE tboth(a,b,c,x,y,z); + INSERT INTO tboth VALUES(11,12,13,14,15,16); + CREATE VIEW v1 AS SELECT tabC.a, txyZ.x, * + FROM tabc, txyz ORDER BY 1 LIMIT 1; + CREATE VIEW v2 AS SELECT tabC.a, txyZ.x, tboTh.a, tbotH.x, * + FROM tabc, txyz, tboth ORDER BY 1 LIMIT 1; + } + execsql2 { + SELECT * FROM tabc; + } +} {a 1 b 2 c 3} +do_test colname-2.2 { + execsql2 { + SELECT Tabc.a, tAbc.b, taBc.c, * FROM tabc + } +} {a 1 b 2 c 3 a 1 b 2 c 3} +do_test colname-2.3 { + execsql2 { + SELECT +tabc.a, -tabc.b, tabc.c, * FROM tabc + } +} {+tabc.a 1 -tabc.b -2 c 3 a 1 b 2 c 3} +do_test colname-2.4 { + execsql2 { + SELECT +tabc.a AS AAA, -tabc.b AS BBB, tabc.c CCC, * FROM tabc + } +} {AAA 1 BBB -2 CCC 3 a 1 b 2 c 3} +do_test colname-2.5 { + execsql2 { + SELECT tabc.a, txyz.x, * FROM tabc, txyz; + } +} {a 1 x 4 a 1 b 2 c 3 x 4 y 5 z 6} +do_test colname-2.6 { + execsql2 { + SELECT tabc.a, txyz.x, tabc.*, txyz.* FROM tabc, txyz; + } +} {a 1 x 4 a 1 b 2 c 3 x 4 y 5 z 6} +do_test colname-2.7 { + execsql2 { + SELECT tabc.a, txyz.x, tboth.a, tboth.x, * FROM tabc, txyz, tboth; + } +} {a 11 x 14 a 11 x 14 a 11 b 12 c 13 x 14 y 15 z 16 a 11 b 12 c 13 x 14 y 15 z 16} +do_test colname-2.8 { + execsql2 { + SELECT * FROM v1 ORDER BY 2; + } +} {a 1 x 4 a:1 1 b 2 c 3 x:1 4 y 5 z 6} +do_test colname-2.9 { + execsql2 { + SELECT * FROM v2 ORDER BY 2; + } +} {a 1 x 4 a:1 11 x:1 14 a:2 1 b 2 c 3 x:2 4 y 5 z 6 a:3 11 b:1 12 c:1 13 x:3 14 y:1 15 z:1 16} + + +# Tests for short=OFF and full=OFF +# +do_test colname-3.1 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=OFF; + CREATE VIEW v3 AS SELECT tabC.a, txyZ.x, * + FROM tabc, txyz ORDER BY 1 LIMIT 1; + CREATE VIEW v4 AS SELECT tabC.a, txyZ.x, tboTh.a, tbotH.x, * + FROM tabc, txyz, tboth ORDER BY 1 LIMIT 1; + } + execsql2 { + SELECT * FROM tabc; + } +} {a 1 b 2 c 3} +do_test colname-3.2 { + execsql2 { + SELECT Tabc.a, tAbc.b, taBc.c FROM tabc + } +} {Tabc.a 1 tAbc.b 2 taBc.c 3} +do_test colname-3.3 { + execsql2 { + SELECT +tabc.a, -tabc.b, tabc.c FROM tabc + } +} {+tabc.a 1 -tabc.b -2 tabc.c 3} +do_test colname-3.4 { + execsql2 { + SELECT +tabc.a AS AAA, -tabc.b AS BBB, tabc.c CCC FROM tabc + } +} {AAA 1 BBB -2 CCC 3} +do_test colname-3.5 { + execsql2 { + SELECT Tabc.a, Txyz.x, * FROM tabc, txyz; + } +} {Tabc.a 1 Txyz.x 4 a 1 b 2 c 3 x 4 y 5 z 6} +do_test colname-3.6 { + execsql2 { + SELECT tabc.*, txyz.* FROM tabc, txyz; + } +} {a 1 b 2 c 3 x 4 y 5 z 6} +do_test colname-3.7 { + execsql2 { + SELECT * FROM tabc, txyz, tboth; + } +} {a 11 b 12 c 13 x 14 y 15 z 16 a 11 b 12 c 13 x 14 y 15 z 16} +do_test colname-3.8 { + execsql2 { + SELECT v1.a, * FROM v1 ORDER BY 2; + } +} {v1.a 1 a 1 x 4 a:1 1 b 2 c 3 x:1 4 y 5 z 6} +do_test colname-3.9 { + execsql2 { + SELECT * FROM v2 ORDER BY 2; + } +} {a 1 x 4 a:1 11 x:1 14 a:2 1 b 2 c 3 x:2 4 y 5 z 6 a:3 11 b:1 12 c:1 13 x:3 14 y:1 15 z:1 16} +do_test colname-3.10 { + execsql2 { + SELECT * FROM v3 ORDER BY 2; + } +} {a 1 x 4 a:1 1 b 2 c 3 x:1 4 y 5 z 6} +do_test colname-3.11 { + execsql2 { + SELECT * FROM v4 ORDER BY 2; + } +} {a 1 x 4 a:1 11 x:1 14 a:2 1 b 2 c 3 x:2 4 y 5 z 6 a:3 11 b:1 12 c:1 13 x:3 14 y:1 15 z:1 16} + +# Test for short=OFF and full=ON +# +do_test colname-4.1 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=ON; + CREATE VIEW v5 AS SELECT tabC.a, txyZ.x, * + FROM tabc, txyz ORDER BY 1 LIMIT 1; + CREATE VIEW v6 AS SELECT tabC.a, txyZ.x, tboTh.a, tbotH.x, * + FROM tabc, txyz, tboth ORDER BY 1 LIMIT 1; + } + execsql2 { + SELECT * FROM tabc; + } +} {tabc.a 1 tabc.b 2 tabc.c 3} +do_test colname-4.2 { + execsql2 { + SELECT Tabc.a, tAbc.b, taBc.c FROM tabc + } +} {tabc.a 1 tabc.b 2 tabc.c 3} +do_test colname-4.3 { + execsql2 { + SELECT +tabc.a, -tabc.b, tabc.c FROM tabc + } +} {+tabc.a 1 -tabc.b -2 tabc.c 3} +do_test colname-4.4 { + execsql2 { + SELECT +tabc.a AS AAA, -tabc.b AS BBB, tabc.c CCC FROM tabc + } +} {AAA 1 BBB -2 CCC 3} +do_test colname-4.5 { + execsql2 { + SELECT Tabc.a, Txyz.x, * FROM tabc, txyz; + } +} {tabc.a 1 txyz.x 4 tabc.a 1 tabc.b 2 tabc.c 3 txyz.x 4 txyz.y 5 txyz.z 6} +do_test colname-4.6 { + execsql2 { + SELECT tabc.*, txyz.* FROM tabc, txyz; + } +} {tabc.a 1 tabc.b 2 tabc.c 3 txyz.x 4 txyz.y 5 txyz.z 6} +do_test colname-4.7 { + execsql2 { + SELECT * FROM tabc, txyz, tboth; + } +} {tabc.a 1 tabc.b 2 tabc.c 3 txyz.x 4 txyz.y 5 txyz.z 6 tboth.a 11 tboth.b 12 tboth.c 13 tboth.x 14 tboth.y 15 tboth.z 16} +do_test colname-4.8 { + execsql2 { + SELECT * FROM v1 ORDER BY 2; + } +} {v1.a 1 v1.x 4 v1.a:1 1 v1.b 2 v1.c 3 v1.x:1 4 v1.y 5 v1.z 6} +do_test colname-4.9 { + execsql2 { + SELECT * FROM v2 ORDER BY 2; + } +} {v2.a 1 v2.x 4 v2.a:1 11 v2.x:1 14 v2.a:2 1 v2.b 2 v2.c 3 v2.x:2 4 v2.y 5 v2.z 6 v2.a:3 11 v2.b:1 12 v2.c:1 13 v2.x:3 14 v2.y:1 15 v2.z:1 16} +do_test colname-4.10 { + execsql2 { + SELECT * FROM v3 ORDER BY 2; + } +} {v3.a 1 v3.x 4 v3.a:1 1 v3.b 2 v3.c 3 v3.x:1 4 v3.y 5 v3.z 6} +do_test colname-4.11 { + execsql2 { + SELECT * FROM v4 ORDER BY 2; + } +} {v4.a 1 v4.x 4 v4.a:1 11 v4.x:1 14 v4.a:2 1 v4.b 2 v4.c 3 v4.x:2 4 v4.y 5 v4.z 6 v4.a:3 11 v4.b:1 12 v4.c:1 13 v4.x:3 14 v4.y:1 15 v4.z:1 16} +do_test colname-4.12 { + execsql2 { + SELECT * FROM v5 ORDER BY 2; + } +} {v5.a 1 v5.x 4 v5.a:1 1 v5.b 2 v5.c 3 v5.x:1 4 v5.y 5 v5.z 6} +do_test colname-4.13 { + execsql2 { + SELECT * FROM v6 ORDER BY 2; + } +} {v6.a 1 v6.x 4 v6.a:1 11 v6.x:1 14 v6.a:2 1 v6.b 2 v6.c 3 v6.x:2 4 v6.y 5 v6.z 6 v6.a:3 11 v6.b:1 12 v6.c:1 13 v6.x:3 14 v6.y:1 15 v6.z:1 16} + +# ticket #3229 +do_test colname-5.1 { + lreplace [db eval { + SELECT x.* FROM sqlite_master X LIMIT 1; + }] 3 3 x +} {table tabc tabc x {CREATE TABLE tabc(a,b,c)}} + +# ticket #3370, #3371, #3372 +# +do_test colname-6.1 { + db close + sqlite3 db test.db + db eval { + CREATE TABLE t6(a, ['a'], ["a"], "[a]", [`a`]); + INSERT INTO t6 VALUES(1,2,3,4,5); + } + execsql2 {SELECT * FROM t6} +} {a 1 'a' 2 {"a"} 3 {[a]} 4 `a` 5} +do_test colname-6.2 { + execsql2 {SELECT ['a'], [`a`], "[a]", [a], ["a"] FROM t6} +} {'a' 2 `a` 5 {[a]} 4 a 1 {"a"} 3} +do_test colname-6.3 { + execsql2 {SELECT "'a'", "`a`", "[a]", "a", """a""" FROM t6} +} {'a' 2 `a` 5 {[a]} 4 a 1 {"a"} 3} +do_test colname-6.4 { + execsql2 {SELECT `'a'`, ```a```, `[a]`, `a`, `"a"` FROM t6} +} {'a' 2 `a` 5 {[a]} 4 a 1 {"a"} 3} +do_test colname-6.11 { + execsql2 {SELECT a, max(a) AS m FROM t6} +} {a 1 m 1} +do_test colname-6.12 { + execsql2 {SELECT `a`, max(a) AS m FROM t6} +} {a 1 m 1} +do_test colname-6.13 { + execsql2 {SELECT "a", max(a) AS m FROM t6} +} {a 1 m 1} +do_test colname-6.14 { + execsql2 {SELECT [a], max(a) AS m FROM t6} +} {a 1 m 1} +do_test colname-6.15 { + execsql2 {SELECT t6.a, max(a) AS m FROM t6} +} {a 1 m 1} +do_test colname-6.16 { + execsql2 {SELECT ['a'], max(['a']) AS m FROM t6} +} {'a' 2 m 2} +do_test colname-6.17 { + execsql2 {SELECT ["a"], max(["a"]) AS m FROM t6} +} {{"a"} 3 m 3} +do_test colname-6.18 { + execsql2 {SELECT "[a]", max("[a]") AS m FROM t6} +} {{[a]} 4 m 4} +do_test colname-6.19 { + execsql2 {SELECT "`a`", max([`a`]) AS m FROM t6} +} {`a` 5 m 5} + + +# Ticket #3429 +# We cannot find anything wrong, but it never hurts to add another +# test case. +# +do_test colname-7.1 { + db eval { + CREATE TABLE t7(x INTEGER PRIMARY KEY, y); + INSERT INTO t7 VALUES(1,2); + } + execsql2 {SELECT rowid, * FROM t7} +} {x 1 x 1 y 2} + +# Tickets #3893 and #3984. (Same problem; independently reported) +# +do_test colname-8.1 { + db eval { + CREATE TABLE "t3893"("x"); + INSERT INTO t3893 VALUES(123); + SELECT "y"."x" FROM (SELECT "x" FROM "t3893") AS "y"; + } +} {123} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/conflict.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/conflict.test --- sqlite3-3.4.2/test/conflict.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/conflict.test 2009-06-25 12:24:39.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for the conflict resolution extension # to SQLite. # -# $Id: conflict.test,v 1.29 2007/04/06 21:42:22 drh Exp $ +# $Id: conflict.test,v 1.32 2009/04/30 09:10:38 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -285,27 +285,30 @@ # t3 Number of temporary files for tables # t4 Number of temporary files for statement journals # +# Update: Since temporary table files are now opened lazily, and none +# of the following tests use large quantities of data, t3 is always 0. +# foreach {i conf1 cmd t0 t1 t2 t3 t4} { 1 {} UPDATE 1 {6 7 8 9} 1 0 1 - 2 REPLACE UPDATE 0 {7 6 9} 1 1 0 - 3 IGNORE UPDATE 0 {6 7 3 9} 1 1 0 + 2 REPLACE UPDATE 0 {7 6 9} 1 0 0 + 3 IGNORE UPDATE 0 {6 7 3 9} 1 0 0 4 FAIL UPDATE 1 {6 7 3 4} 1 0 0 5 ABORT UPDATE 1 {1 2 3 4} 1 0 1 6 ROLLBACK UPDATE 1 {1 2 3 4} 0 0 0 - 7 REPLACE {UPDATE OR IGNORE} 0 {6 7 3 9} 1 1 0 - 8 IGNORE {UPDATE OR REPLACE} 0 {7 6 9} 1 1 0 - 9 FAIL {UPDATE OR IGNORE} 0 {6 7 3 9} 1 1 0 - 10 ABORT {UPDATE OR REPLACE} 0 {7 6 9} 1 1 0 - 11 ROLLBACK {UPDATE OR IGNORE} 0 {6 7 3 9} 1 1 0 - 12 {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 1 0 - 13 {} {UPDATE OR REPLACE} 0 {7 6 9} 1 1 0 + 7 REPLACE {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 8 IGNORE {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 + 9 FAIL {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 10 ABORT {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 + 11 ROLLBACK {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 12 {} {UPDATE OR IGNORE} 0 {6 7 3 9} 1 0 0 + 13 {} {UPDATE OR REPLACE} 0 {7 6 9} 1 0 0 14 {} {UPDATE OR FAIL} 1 {6 7 3 4} 1 0 0 15 {} {UPDATE OR ABORT} 1 {1 2 3 4} 1 0 1 16 {} {UPDATE OR ROLLBACK} 1 {1 2 3 4} 0 0 0 } { if {$t0} {set t1 {column a is not unique}} if {[info exists TEMP_STORE] && $TEMP_STORE>=2} { - set t3 $t4 + set t3 0 } else { set t3 [expr {$t3+$t4}] } @@ -756,5 +759,34 @@ } {1 2 3 7 8 9} integrity_check conflict-11.6 +# Make sure UPDATE OR REPLACE works on tables that have only +# an INTEGER PRIMARY KEY. +# +do_test conflict-12.1 { + execsql { + CREATE TABLE t5(a INTEGER PRIMARY KEY, b text); + INSERT INTO t5 VALUES(1,'one'); + INSERT INTO t5 VALUES(2,'two'); + SELECT * FROM t5 + } +} {1 one 2 two} +do_test conflict-12.2 { + execsql { + UPDATE OR IGNORE t5 SET a=a+1 WHERE a=1; + SELECT * FROM t5; + } +} {1 one 2 two} +do_test conflict-12.3 { + catchsql { + UPDATE t5 SET a=a+1 WHERE a=1; + } +} {1 {PRIMARY KEY must be unique}} +do_test conflict-12.4 { + execsql { + UPDATE OR REPLACE t5 SET a=a+1 WHERE a=1; + SELECT * FROM t5; + } +} {2 one} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt2.test --- sqlite3-3.4.2/test/corrupt2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt2.test 2009-06-25 12:22:33.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. # -# $Id: corrupt2.test,v 1.4 2007/03/13 16:32:25 danielk1977 Exp $ +# $Id: corrupt2.test,v 1.20 2009/04/06 17:50:03 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -23,6 +23,8 @@ # do_test corrupt2-1.1 { execsql { + PRAGMA auto_vacuum=0; + PRAGMA page_size=1024; CREATE TABLE abc(a, b, c); } } {} @@ -128,8 +130,406 @@ catchsql { SELECT * FROM sqlite_master; } db2 -} {1 {malformed database schema - index a3 already exists}} +} {1 {malformed database schema (a3) - index a3 already exists}} db2 close +do_test corrupt2-3.1 { + file delete -force corrupt.db + file delete -force corrupt.db-journal + sqlite3 db2 corrupt.db + + execsql { + PRAGMA auto_vacuum = 1; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + INSERT INTO t2 VALUES(randomblob(100), randomblob(100), randomblob(100)); + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + } db2 + + db2 close + + # On the root page of table t2 (page 4), set one of the child page-numbers + # to 0. This corruption will be detected when SQLite attempts to update + # the pointer-map after moving the content of page 4 to page 3 as part + # of the DROP TABLE operation below. + # + set fd [open corrupt.db r+] + fconfigure $fd -encoding binary -translation binary + seek $fd [expr 1024*3 + 12] + set zCelloffset [read $fd 2] + binary scan $zCelloffset S iCelloffset + seek $fd [expr 1024*3 + $iCelloffset] + puts -nonewline $fd "\00\00\00\00" + close $fd + + sqlite3 db2 corrupt.db + catchsql { + DROP TABLE t1; + } db2 +} {1 {database disk image is malformed}} + +do_test corrupt2-4.1 { + catchsql { + SELECT * FROM t2; + } db2 +} {1 {database disk image is malformed}} + +db2 close + +unset -nocomplain result +do_test corrupt2-5.1 { + file delete -force corrupt.db + file delete -force corrupt.db-journal + sqlite3 db2 corrupt.db + + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + INSERT INTO t2 VALUES(randomblob(100), randomblob(100), randomblob(100)); + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + INSERT INTO t1 SELECT * FROM t2; + } db2 + + db2 close + + # This block links a page from table t2 into the t1 table structure. + # + set fd [open corrupt.db r+] + fconfigure $fd -encoding binary -translation binary + seek $fd [expr 1024 + 12] + set zCelloffset [read $fd 2] + binary scan $zCelloffset S iCelloffset + seek $fd [expr 1024 + $iCelloffset] + set zChildPage [read $fd 4] + seek $fd [expr 2*1024 + 12] + set zCelloffset [read $fd 2] + binary scan $zCelloffset S iCelloffset + seek $fd [expr 2*1024 + $iCelloffset] + puts -nonewline $fd $zChildPage + close $fd + + sqlite3 db2 corrupt.db + db2 eval {SELECT rowid FROM t1} { + set result [db2 eval {pragma integrity_check}] + break + } + set result +} {{*** in database main *** +On tree page 2 cell 0: 2nd reference to page 10 +On tree page 2 cell 1: Child page depth differs +Page 4 is never used}} + +db2 close + +proc corruption_test {args} { + set A(-corrupt) {} + set A(-sqlprep) {} + set A(-tclprep) {} + array set A $args + + catch {db close} + file delete -force corrupt.db + file delete -force corrupt.db-journal + + sqlite3 db corrupt.db + eval $A(-tclprep) + db eval $A(-sqlprep) + db close + + eval $A(-corrupt) + + sqlite3 db corrupt.db + eval $A(-test) +} + +ifcapable autovacuum { + # The tests within this block - corrupt2-6.* - aim to test corruption + # detection within an incremental-vacuum. When an incremental-vacuum + # step is executed, the last non-free page of the database file is + # moved into a free space in the body of the file. After doing so, + # the page reference in the parent page must be updated to refer + # to the new location. These tests test the outcome of corrupting + # that page reference before performing the incremental vacuum. + # + + # The last page in the database page is the second page + # in an overflow chain. + # + corruption_test -sqlprep { + PRAGMA auto_vacuum = incremental; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randomblob(2500)); + INSERT INTO t1 VALUES(2, randomblob(2500)); + DELETE FROM t1 WHERE a = 1; + } -corrupt { + hexio_write corrupt.db [expr 1024*5] 00000008 + } -test { + do_test corrupt2-6.1 { + catchsql { pragma incremental_vacuum = 1 } + } {1 {database disk image is malformed}} + } + + # The last page in the database page is a non-root b-tree page. + # + corruption_test -sqlprep { + PRAGMA auto_vacuum = incremental; + PRAGMA page_size = 1024; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, randomblob(2500)); + INSERT INTO t1 VALUES(2, randomblob(50)); + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + DELETE FROM t1 WHERE a = 1; + } -corrupt { + hexio_write corrupt.db [expr 1024*2 + 8] 00000009 + } -test { + do_test corrupt2-6.2 { + catchsql { pragma incremental_vacuum = 1 } + } {1 {database disk image is malformed}} + } + + # Set up a pointer-map entry so that the last page of the database + # file appears to be a b-tree root page. This should be detected + # as corruption. + # + corruption_test -sqlprep { + PRAGMA auto_vacuum = incremental; + PRAGMA page_size = 1024; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, randomblob(2500)); + INSERT INTO t1 VALUES(2, randomblob(2500)); + INSERT INTO t1 VALUES(3, randomblob(2500)); + DELETE FROM t1 WHERE a = 1; + } -corrupt { + set nPage [expr [file size corrupt.db] / 1024] + hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000 + } -test { + do_test corrupt2-6.3 { + catchsql { pragma incremental_vacuum = 1 } + } {1 {database disk image is malformed}} + } + + corruption_test -sqlprep { + PRAGMA auto_vacuum = 1; + PRAGMA page_size = 1024; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, randomblob(2500)); + DELETE FROM t1 WHERE a = 1; + } -corrupt { + set nAppend [expr 1024*207 - [file size corrupt.db]] + set fd [open corrupt.db r+] + seek $fd 0 end + puts -nonewline $fd [string repeat x $nAppend] + close $fd + } -test { + do_test corrupt2-6.4 { + catchsql { + BEGIN EXCLUSIVE; + COMMIT; + } + } {1 {database disk image is malformed}} + } +} + + +set sqlprep { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + CREATE INDEX i1 ON t1(b); + INSERT INTO t1 VALUES(1, randomblob(50)); + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; + INSERT INTO t1 SELECT NULL, randomblob(50) FROM t1; +} + +corruption_test -sqlprep $sqlprep -corrupt { + # Set the page-flags of one of the leaf pages of the index B-Tree to + # 0x0D (interpreted by SQLite as "leaf page of a table B-Tree"). + # + set fd [open corrupt.db r+] + fconfigure $fd -translation binary -encoding binary + seek $fd [expr 1024*2 + 8] + set zRightChild [read $fd 4] + binary scan $zRightChild I iRightChild + seek $fd [expr 1024*($iRightChild-1)] + puts -nonewline $fd "\x0D" + close $fd +} -test { + do_test corrupt2-7.1 { + catchsql { SELECT b FROM t1 ORDER BY b ASC } + } {1 {database disk image is malformed}} +} + +corruption_test -sqlprep $sqlprep -corrupt { + # Mess up the page-header of one of the leaf pages of the index B-Tree. + # The corruption is detected as part of an OP_Prev opcode. + # + set fd [open corrupt.db r+] + fconfigure $fd -translation binary -encoding binary + seek $fd [expr 1024*2 + 12] + set zCellOffset [read $fd 2] + binary scan $zCellOffset S iCellOffset + seek $fd [expr 1024*2 + $iCellOffset] + set zChild [read $fd 4] + binary scan $zChild I iChild + seek $fd [expr 1024*($iChild-1)+3] + puts -nonewline $fd "\xFFFF" + close $fd +} -test { + do_test corrupt2-7.1 { + catchsql { SELECT b FROM t1 ORDER BY b DESC } + } {1 {database disk image is malformed}} +} + +corruption_test -sqlprep $sqlprep -corrupt { + # Set the page-flags of one of the leaf pages of the table B-Tree to + # 0x0A (interpreted by SQLite as "leaf page of an index B-Tree"). + # + set fd [open corrupt.db r+] + fconfigure $fd -translation binary -encoding binary + seek $fd [expr 1024*1 + 8] + set zRightChild [read $fd 4] + binary scan $zRightChild I iRightChild + seek $fd [expr 1024*($iRightChild-1)] + puts -nonewline $fd "\x0A" + close $fd +} -test { + do_test corrupt2-8.1 { + catchsql { SELECT * FROM t1 WHERE rowid=1000 } + } {1 {database disk image is malformed}} +} + +corruption_test -sqlprep { + CREATE TABLE t1(a, b, c); CREATE TABLE t8(a, b, c); CREATE TABLE tE(a, b, c); + CREATE TABLE t2(a, b, c); CREATE TABLE t9(a, b, c); CREATE TABLE tF(a, b, c); + CREATE TABLE t3(a, b, c); CREATE TABLE tA(a, b, c); CREATE TABLE tG(a, b, c); + CREATE TABLE t4(a, b, c); CREATE TABLE tB(a, b, c); CREATE TABLE tH(a, b, c); + CREATE TABLE t5(a, b, c); CREATE TABLE tC(a, b, c); CREATE TABLE tI(a, b, c); + CREATE TABLE t6(a, b, c); CREATE TABLE tD(a, b, c); CREATE TABLE tJ(a, b, c); + CREATE TABLE x1(a, b, c); CREATE TABLE x8(a, b, c); CREATE TABLE xE(a, b, c); + CREATE TABLE x2(a, b, c); CREATE TABLE x9(a, b, c); CREATE TABLE xF(a, b, c); + CREATE TABLE x3(a, b, c); CREATE TABLE xA(a, b, c); CREATE TABLE xG(a, b, c); + CREATE TABLE x4(a, b, c); CREATE TABLE xB(a, b, c); CREATE TABLE xH(a, b, c); + CREATE TABLE x5(a, b, c); CREATE TABLE xC(a, b, c); CREATE TABLE xI(a, b, c); + CREATE TABLE x6(a, b, c); CREATE TABLE xD(a, b, c); CREATE TABLE xJ(a, b, c); +} -corrupt { + set fd [open corrupt.db r+] + fconfigure $fd -translation binary -encoding binary + seek $fd 108 + set zRightChild [read $fd 4] + binary scan $zRightChild I iRightChild + seek $fd [expr 1024*($iRightChild-1)+3] + puts -nonewline $fd "\x00\x00" + close $fd +} -test { + do_test corrupt2-9.1 { + catchsql { SELECT sql FROM sqlite_master } + } {1 {database disk image is malformed}} +} + +corruption_test -sqlprep { + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + PRAGMA writable_schema = 1; + UPDATE sqlite_master SET rootpage = NULL WHERE name = 't2'; +} -test { + do_test corrupt2-10.1 { + catchsql { SELECT * FROM t2 } + } {1 {malformed database schema (t2)}} + do_test corrupt2-10.2 { + sqlite3_errcode db + } {SQLITE_CORRUPT} +} + +corruption_test -sqlprep { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, randstr(100,100)); + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t2 SELECT * FROM t1; + DELETE FROM t1; +} -corrupt { + set offset [expr [file size corrupt.db] - 1024] + hexio_write corrupt.db $offset FF + hexio_write corrupt.db 24 12345678 +} -test { + do_test corrupt2-11.1 { + catchsql { PRAGMA incremental_vacuum } + } {1 {database disk image is malformed}} +} +corruption_test -sqlprep { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, randstr(100,100)); + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t1 SELECT NULL, randstr(100,100) FROM t1; + INSERT INTO t2 SELECT * FROM t1; + DELETE FROM t1; +} -corrupt { + set pgno [expr [file size corrupt.db] / 1024] + hexio_write corrupt.db [expr 1024+5*($pgno-3)] 03 + hexio_write corrupt.db 24 12345678 +} -test { + do_test corrupt2-12.1 { + catchsql { PRAGMA incremental_vacuum } + } {1 {database disk image is malformed}} +} + +ifcapable autovacuum { + # It is not possible for the last page in a database file to be the + # pending-byte page (AKA the locking page). This test verifies that if + # an attempt is made to commit a transaction to such an auto-vacuum + # database SQLITE_CORRUPT is returned. + # + corruption_test -tclprep { + db eval { + PRAGMA auto_vacuum = full; + PRAGMA page_size = 1024; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(NULL, randstr(50,50)); + } + for {set ii 0} {$ii < 10} {incr ii} { + db eval { INSERT INTO t1 SELECT NULL, randstr(50,50) FROM t1 } + } + } -corrupt { + do_test corrupt2-13.1 { + file size corrupt.db + } $::sqlite_pending_byte + hexio_write corrupt.db [expr $::sqlite_pending_byte+1023] 00 + } -test { + do_test corrupt2-13.2 { + file size corrupt.db + } [expr $::sqlite_pending_byte + 1024] + do_test corrupt2-13.3 { + catchsql { DELETE FROM t1 WHERE rowid < 30; } + } {1 {database disk image is malformed}} + } +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt4.test --- sqlite3-3.4.2/test/corrupt4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt4.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,74 @@ +# 2007 Sept 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. +# +# $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a database with a freelist containing at least two pages. +# +do_test corrupt4-1.1 { + set bigstring [string repeat 0123456789 200] + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES($bigstring); + CREATE TABLE t2(y); + INSERT INTO t2 VALUES(1); + DROP TABLE t1; + } + file size test.db +} [expr {1024*4}] + +# Verify that there are two pages on the freelist. +# +do_test corrupt4-1.2 { + execsql {PRAGMA freelist_count} +} {2} + +# Get the page number for the trunk of the freelist. +# +set trunkpgno [hexio_get_int [hexio_read test.db 32 4]] +set baseaddr [expr {($trunkpgno-1)*1024}] + +# Verify that the trunk of the freelist has exactly one +# leaf. +# +do_test corrupt4-1.3 { + hexio_get_int [hexio_read test.db [expr {$::baseaddr+4}] 4] +} {1} + +# Insert a negative number as the number of leaves on the trunk. +# Then try to add a new element to the freelist. +# +do_test corrupt4-1.4 { + hexio_write test.db [expr {$::baseaddr+4}] [hexio_render_int32 -100000000] + db close + sqlite3 db test.db + catchsql { + DROP TABLE t2 + } +} {1 {database disk image is malformed}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt5.test --- sqlite3-3.4.2/test/corrupt5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt5.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,45 @@ +# 2008 Jan 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. Checks for +# malformed schema. +# +# $Id: corrupt5.test,v 1.3 2009/06/04 02:47:04 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a database with a freelist containing at least two pages. +# +do_test corrupt5-1.1 { + execsql { + CREATE TABLE t1(a,b,c); + CREATE INDEX i1 ON t1(a,b); + PRAGMA writable_schema=ON; + UPDATE sqlite_master SET name=NULL, sql=NULL WHERE name='i1'; + } + db close + sqlite3 db test.db + catchsql { + SELECT * FROM t1 + } +} {1 {malformed database schema (?)}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt6.test --- sqlite3-3.4.2/test/corrupt6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt6.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,251 @@ +# 2008 May 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on corrupt SerialTypeLen values. +# +# $Id: corrupt6.test,v 1.2 2008/05/19 15:37:10 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a simple, small database. +# +do_test corrupt6-1.1 { + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1(x) VALUES('varint32-01234567890123456789012345678901234567890123456789'); + INSERT INTO t1(x) VALUES('varint32-01234567890123456789012345678901234567890123456789'); + } + file size test.db +} [expr {1024*2}] + +# Verify that the file format is as we expect. The page size +# should be 1024 bytes. +# +do_test corrupt6-1.2 { + hexio_get_int [hexio_read test.db 16 2] +} 1024 ;# The page size is 1024 +do_test corrupt6-1.3 { + hexio_get_int [hexio_read test.db 20 1] +} 0 ;# Unused bytes per page is 0 + +integrity_check corrupt6-1.4 + +# Verify SerialTypeLen for first field of two records as we expect. +# SerialTypeLen = (len*2+12) = 60*2+12 = 132 +do_test corrupt6-1.5.1 { + hexio_read test.db 1923 2 +} 8103 ;# First text field size is 81 03 == 131 +do_test corrupt6-1.5.2 { + hexio_read test.db 1987 2 +} 8103 ;# Second text field size is 81 03 == 131 + +# Verify simple query results as expected. +do_test corrupt6-1.6 { + db close + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 0 {varint32 varint32} ] +integrity_check corrupt6-1.7 + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Increase SerialTypeLen by 2. +do_test corrupt6-1.8.1 { + db close + hexio_write test.db 1923 8105 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Decrease SerialTypeLen by 2. +do_test corrupt6-1.8.2 { + db close + hexio_write test.db 1923 8101 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Put value of record 1 / field 1 SerialTypeLen back. +do_test corrupt6-1.8.3 { + db close + hexio_write test.db 1923 8103 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 0 {varint32 varint32} ] +integrity_check corrupt6-1.8.4 + +# Adjust value of record 2 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Increase SerialTypeLen by 2. +do_test corrupt6-1.9.1 { + db close + hexio_write test.db 1987 8105 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 2 / field 2 SerialTypeLen and see if the +# corruption is detected. +# Decrease SerialTypeLen by 2. +do_test corrupt6-1.9.2 { + db close + hexio_write test.db 1987 8101 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Put value of record 1 / field 2 SerialTypeLen back. +do_test corrupt6-1.9.3 { + db close + hexio_write test.db 1987 8103 + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 0 {varint32 varint32} ] +integrity_check corrupt6-1.9.4 + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF 7F (2 bytes) +do_test corrupt6-1.10.1 { + db close + hexio_write test.db 1923 FF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF 7F (3 bytes) +do_test corrupt6-1.10.2 { + db close + hexio_write test.db 1923 FFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF 7F (4 bytes) +do_test corrupt6-1.10.3 { + db close + hexio_write test.db 1923 FFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF FF 7F (5 bytes) +do_test corrupt6-1.10.4 { + db close + hexio_write test.db 1923 FFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF FF FF 7F (6 bytes, and overflows). +do_test corrupt6-1.10.5 { + db close + hexio_write test.db 1923 FFFFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF FF FF FF 7F (7 bytes, and overflows). +do_test corrupt6-1.10.6 { + db close + hexio_write test.db 1923 FFFFFFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF FF FF FF FF 7F (8 bytes, and overflows). +do_test corrupt6-1.10.7 { + db close + hexio_write test.db 1923 FFFFFFFFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FF FF FF FF FF FF FF FF 7F (9 bytes, and overflows). +do_test corrupt6-1.10.8 { + db close + hexio_write test.db 1923 FFFFFFFFFFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +# Adjust value of record 1 / field 1 SerialTypeLen and see if the +# corruption is detected. +# Set SerialTypeLen to FFFF FF FF FF FF FF FF FF 7F (10 bytes, and overflows). +do_test corrupt6-1.10.9 { + db close + hexio_write test.db 1923 FFFFFFFFFFFFFFFFFF7F + sqlite3 db test.db + catchsql { + SELECT substr(x,1,8) FROM t1 + } +} [list 1 {database disk image is malformed}] + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt7.test --- sqlite3-3.4.2/test/corrupt7.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt7.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,125 @@ +# 2008 June 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on corrupt cell offsets in a btree page. +# +# $Id: corrupt7.test,v 1.7 2009/06/09 13:42:25 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Create a simple, small database. +# +do_test corrupt7-1.1 { + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1(x) VALUES(1); + INSERT INTO t1(x) VALUES(2); + INSERT INTO t1(x) SELECT x+2 FROM t1; + INSERT INTO t1(x) SELECT x+4 FROM t1; + INSERT INTO t1(x) SELECT x+8 FROM t1; + } + file size test.db +} [expr {1024*2}] + +# Verify that the file format is as we expect. The page size +# should be 1024 bytes. +# +do_test corrupt7-1.2 { + hexio_get_int [hexio_read test.db 16 2] +} 1024 ;# The page size is 1024 +do_test corrupt7-1.3 { + hexio_get_int [hexio_read test.db 20 1] +} 0 ;# Unused bytes per page is 0 + +integrity_check corrupt7-1.4 + +# Deliberately corrupt some of the cell offsets in the btree page +# on page 2 of the database. +# +# The error message is different depending on whether or not the +# SQLITE_ENABLE_OVERSIZE_CELL_CHECK compile-time option is engaged. +# +ifcapable oversize_cell_check { + do_test corrupt7-2.1 { + db close + hexio_write test.db 1062 FF + sqlite3 db test.db + db eval {PRAGMA integrity_check(1)} + } {{*** in database main *** +Page 2: sqlite3BtreeInitPage() returns error code 11}} + do_test corrupt7-2.2 { + db close + hexio_write test.db 1062 04 + sqlite3 db test.db + db eval {PRAGMA integrity_check(1)} + } {{*** in database main *** +Page 2: sqlite3BtreeInitPage() returns error code 11}} +} else { + do_test corrupt7-2.1 { + db close + hexio_write test.db 1062 FF + sqlite3 db test.db + db eval {PRAGMA integrity_check(1)} + } {{*** in database main *** +Corruption detected in cell 15 on page 2}} + do_test corrupt7-2.2 { + db close + hexio_write test.db 1062 04 + sqlite3 db test.db + db eval {PRAGMA integrity_check(1)} + } {{*** in database main *** +Corruption detected in cell 15 on page 2}} +} + +# The code path that was causing the buffer overrun that this test +# case was checking for was removed. +# +#do_test corrupt7-3.1 { +# execsql { +# DROP TABLE t1; +# CREATE TABLE t1(a, b); +# INSERT INTO t1 VALUES(1, 'one'); +# INSERT INTO t1 VALUES(100, 'one hundred'); +# INSERT INTO t1 VALUES(100000, 'one hundred thousand'); +# CREATE INDEX i1 ON t1(b); +# } +# db close +# +# # Locate the 3rd cell in the index. +# set cell_offset [hexio_get_int [hexio_read test.db [expr 1024*2 + 12] 2]] +# incr cell_offset [expr 1024*2] +# incr cell_offset 1 +# +# # This write corrupts the "header-size" field of the database record +# # stored in the index cell. At one point this was causing sqlite to +# # reference invalid memory. +# hexio_write test.db $cell_offset FFFF7F +# +# sqlite3 db test.db +# catchsql { +# SELECT b FROM t1 WHERE b > 'o' AND b < 'p'; +# } +#} {1 {database disk image is malformed}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt8.test --- sqlite3-3.4.2/test/corrupt8.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt8.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,99 @@ +# 2008 July 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on corrupt pointer map pages. +# +# $Id: corrupt8.test,v 1.2 2008/07/11 03:34:10 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas||!autovacuum { + finish_test + return +} + +# Create a database to work with. +# +do_test corrupt8-1.1 { + execsql { + PRAGMA auto_vacuum=1; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1(x) VALUES(1); + INSERT INTO t1(x) VALUES(2); + INSERT INTO t1(x) SELECT x+2 FROM t1; + INSERT INTO t1(x) SELECT x+4 FROM t1; + INSERT INTO t1(x) SELECT x+8 FROM t1; + INSERT INTO t1(x) SELECT x+16 FROM t1; + INSERT INTO t1(x) SELECT x+32 FROM t1; + INSERT INTO t1(x) SELECT x+64 FROM t1; + INSERT INTO t1(x) SELECT x+128 FROM t1; + INSERT INTO t1(x) SELECT x+256 FROM t1; + CREATE TABLE t2(a,b); + INSERT INTO t2 SELECT x, x*x FROM t1; + } + expr {[file size test.db]>1024*12} +} {1} +integrity_check corrupt8-1.2 + +# Loop through each ptrmap entry. Corrupt the entry and make sure the +# corruption is detected by the integrity_check. +# +for {set i 1024} {$i<2048} {incr i 5} { + set oldval [hexio_read test.db $i 1] + if {$oldval==0} break + hexio_write test.db $i 00 + do_test corrupt8-2.$i.0 { + db close + sqlite3 db test.db + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} + } {1} + for {set k 1} {$k<=5} {incr k} { + if {$k==$oldval} continue + hexio_write test.db $i 0$k + do_test corrupt8-2.$i.$k { + db close + sqlite3 db test.db + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} + } {1} + } + hexio_write test.db $i 06 + do_test corrupt8-2.$i.6 { + db close + sqlite3 db test.db + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} + } {1} + hexio_write test.db $i $oldval + if {$oldval>2} { + set i2 [expr {$i+1+$i%4}] + set oldval [hexio_read test.db $i2 1] + hexio_write test.db $i2 [format %02x [expr {($oldval+1)&0xff}]] + do_test corrupt8-2.$i.7 { + db close + sqlite3 db test.db + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} + } {1} + hexio_write test.db $i2 $oldval + } +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt9.test --- sqlite3-3.4.2/test/corrupt9.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt9.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,131 @@ +# 2008 July 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on corruption in the form of duplicate entries on the freelist. +# +# $Id: corrupt9.test,v 1.3 2009/06/04 02:47:04 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# We must have the page_size pragma for these tests to work. +# +ifcapable !pager_pragmas { + finish_test + return +} + +# Return the offset to the first (trunk) page of the freelist. Return +# zero of the freelist is empty. +# +proc freelist_trunk_offset {filename} { + if {[hexio_read $filename 36 4]==0} {return 0} + set pgno [hexio_get_int [hexio_read $filename 32 4]] + return [expr {($pgno-1)*[hexio_get_int [hexio_read $filename 16 2]]}] +} + +# This procedure looks at the first trunk page of the freelist and +# corrupts that page by overwriting up to N entries with duplicates +# of the first entry. +# +proc corrupt_freelist {filename N} { + set offset [freelist_trunk_offset $filename] + if {$offset==0} {error "Freelist is empty"} + set cnt [hexio_get_int [hexio_read $filename [expr {$offset+4}] 4]] + set pgno [hexio_read $filename [expr {$offset+8}] 4] + for {set i 12} {$N>0 && $i<8+4*$cnt} {incr i 4; incr N -1} { + hexio_write $filename [expr {$offset+$i}] $pgno + } +} + +# Create a database to work with. Make sure there are plenty of +# entries on the freelist. +# +do_test corrupt9-1.1 { + execsql { + PRAGMA auto_vacuum=NONE; + PRAGMA page_size=1024; + CREATE TABLE t1(x); + INSERT INTO t1(x) VALUES(1); + INSERT INTO t1(x) VALUES(2); + INSERT INTO t1(x) SELECT x+2 FROM t1; + INSERT INTO t1(x) SELECT x+4 FROM t1; + INSERT INTO t1(x) SELECT x+8 FROM t1; + INSERT INTO t1(x) SELECT x+16 FROM t1; + INSERT INTO t1(x) SELECT x+32 FROM t1; + INSERT INTO t1(x) SELECT x+64 FROM t1; + INSERT INTO t1(x) SELECT x+128 FROM t1; + INSERT INTO t1(x) SELECT x+256 FROM t1; + CREATE TABLE t2(a,b); + INSERT INTO t2 SELECT x, x*x FROM t1; + CREATE INDEX i1 ON t1(x); + CREATE INDEX i2 ON t2(b,a); + DROP INDEX i2; + } + expr {[file size test.db]>1024*24} +} {1} +integrity_check corrupt9-1.2 + +# Corrupt the freelist by adding duplicate entries to the freelist. +# Make sure the corruption is detected. +# +db close +file copy -force test.db test.db-template + +corrupt_freelist test.db 1 +sqlite3 db test.db +do_test corrupt9-2.1 { + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} +} {1} +do_test corrupt9-2.2 { + catchsql { + CREATE INDEX i2 ON t2(b,a); + REINDEX; + } +} {1 {database disk image is malformed}} + + +db close +file copy -force test.db-template test.db +corrupt_freelist test.db 2 +sqlite3 db test.db +do_test corrupt9-3.1 { + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} +} {1} +do_test corrupt9-3.2 { + catchsql { + CREATE INDEX i2 ON t2(b,a); + REINDEX; + } +} {1 {database disk image is malformed}} + +db close +file copy -force test.db-template test.db +corrupt_freelist test.db 3 +sqlite3 db test.db +do_test corrupt9-4.1 { + set x [db eval {PRAGMA integrity_check}] + expr {$x!="ok"} +} {1} +do_test corrupt9-4.2 { + catchsql { + CREATE INDEX i2 ON t2(b,a); + REINDEX; + } +} {1 {database disk image is malformed}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corruptA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corruptA.test --- sqlite3-3.4.2/test/corruptA.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corruptA.test 2009-06-05 18:03:02.000000000 +0100 @@ -0,0 +1,72 @@ +# 2008 July 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on corrupt database headers. +# +# $Id: corruptA.test,v 1.1 2008/07/11 16:39:23 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Create a database to work with. +# +do_test corruptA-1.1 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1(x) VALUES(1); + } + expr {[file size test.db]>=1024} +} {1} +integrity_check corruptA-1.2 + +# Corrupt the file header in various ways and make sure the corruption +# is detected when opening the database file. +# +db close +file copy -force test.db test.db-template + +do_test corruptA-2.1 { + file copy -force test.db-template test.db + hexio_write test.db 19 02 ;# the read format number + sqlite3 db test.db + catchsql {SELECT * FROM t1} +} {1 {file is encrypted or is not a database}} + +do_test corruptA-2.2 { + db close + file copy -force test.db-template test.db + hexio_write test.db 21 41 ;# max embedded payload fraction + sqlite3 db test.db + catchsql {SELECT * FROM t1} +} {1 {file is encrypted or is not a database}} + +do_test corruptA-2.3 { + db close + file copy -force test.db-template test.db + hexio_write test.db 22 1f ;# min embedded payload fraction + sqlite3 db test.db + catchsql {SELECT * FROM t1} +} {1 {file is encrypted or is not a database}} + +do_test corruptA-2.4 { + db close + file copy -force test.db-template test.db + hexio_write test.db 23 21 ;# min leaf payload fraction + sqlite3 db test.db + catchsql {SELECT * FROM t1} +} {1 {file is encrypted or is not a database}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corruptB.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corruptB.test --- sqlite3-3.4.2/test/corruptB.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corruptB.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,184 @@ +# 2008 Sep 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It specifically focuses +# on loops in the B-Tree structure. A loop is formed in a B-Tree structure +# when there exists a page that is both an a descendent or ancestor of +# itself. +# +# Also test that an SQLITE_CORRUPT error is returned if a B-Tree page +# contains a (corrupt) reference to a page greater than the configured +# maximum page number. +# +# $Id: corruptB.test,v 1.3 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +do_test corruptB-1.1 { + execsql { + PRAGMA auto_vacuum = 1; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(randomblob(200)); + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + } + expr {[file size test.db] > (1024*9)} +} {1} +integrity_check corruptB-1.2 + +file copy -force test.db bak.db + +# Set the right-child of a B-Tree rootpage to refer to the root-page itself. +# +do_test corruptB-1.3.1 { + set ::root [execsql {SELECT rootpage FROM sqlite_master}] + set ::offset [expr {($::root-1)*1024}] + hexio_write test.db [expr $offset+8] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.3.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +# Set the left-child of a cell in a B-Tree rootpage to refer to the +# root-page itself. +# +do_test corruptB-1.4.1 { + db close + file copy -force bak.db test.db + set cell_offset [hexio_get_int [hexio_read test.db [expr $offset+12] 2]] + hexio_write test.db [expr $offset+$cell_offset] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.4.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +# Now grow the table B-Tree so that it is more than 2 levels high. +# +do_test corruptB-1.5.1 { + db close + file copy -force bak.db test.db + sqlite3 db test.db + execsql { + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + INSERT INTO t1 SELECT randomblob(200) FROM t1; + } +} {} + +file copy -force test.db bak.db + +# Set the right-child pointer of the right-child of the root page to point +# back to the root page. +# +do_test corruptB-1.6.1 { + db close + set iRightChild [hexio_get_int [hexio_read test.db [expr $offset+8] 4]] + set c_offset [expr ($iRightChild-1)*1024] + hexio_write test.db [expr $c_offset+8] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.6.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +# Set the left-child pointer of a cell of the right-child of the root page to +# point back to the root page. +# +do_test corruptB-1.7.1 { + db close + file copy -force bak.db test.db + set cell_offset [hexio_get_int [hexio_read test.db [expr $c_offset+12] 2]] + hexio_write test.db [expr $c_offset+$cell_offset] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.7.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +do_test corruptB-1.8.1 { + db close + set cell_offset [hexio_get_int [hexio_read test.db [expr $offset+12] 2]] + set iLeftChild [ + hexio_get_int [hexio_read test.db [expr $offset+$cell_offset] 4] + ] + set c_offset [expr ($iLeftChild-1)*1024] + hexio_write test.db [expr $c_offset+8] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.8.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +# Set the left-child pointer of a cell of the right-child of the root page to +# point back to the root page. +# +do_test corruptB-1.9.1 { + db close + file copy -force bak.db test.db + set cell_offset [hexio_get_int [hexio_read test.db [expr $c_offset+12] 2]] + hexio_write test.db [expr $c_offset+$cell_offset] [hexio_render_int32 $::root] +} {4} +do_test corruptB-1.9.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +#--------------------------------------------------------------------------- + +do_test corruptB-2.1.1 { + db close + file copy -force bak.db test.db + hexio_write test.db [expr $offset+8] [hexio_render_int32 0x6FFFFFFF] +} {4} +do_test corruptB-2.1.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +#--------------------------------------------------------------------------- + +# Corrupt the header-size field of a database record. +# +do_test corruptB-3.1.1 { + db close + file copy -force bak.db test.db + sqlite3 db test.db + set v [string repeat abcdefghij 200] + execsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES($v); + } + set t2_root [execsql {SELECT rootpage FROM sqlite_master WHERE name = 't2'}] + set iPage [expr ($t2_root-1)*1024] + set iCellarray [expr $iPage + 8] + set iRecord [hexio_get_int [hexio_read test.db $iCellarray 2]] + db close + hexio_write test.db [expr $iPage+$iRecord+3] FF00 +} {2} +do_test corruptB-3.1.2 { + sqlite3 db test.db + catchsql { SELECT * FROM t2 } +} {1 {database disk image is malformed}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corruptC.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corruptC.test --- sqlite3-3.4.2/test/corruptC.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corruptC.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,394 @@ +# 2004 August 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to make sure SQLite does not crash or +# segfault if it sees a corrupt database file. It creates a base +# data base file, then tests that single byte corruptions in +# increasingly larger quantities are handled gracefully. +# +# $Id: corruptC.test,v 1.13 2009/06/06 19:21:13 drh Exp $ + +catch {file delete -force test.db test.db-journal test.bu} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Construct a compact, dense database for testing. +# +do_test corruptC-1.1 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA legacy_file_format=1; + BEGIN; + CREATE TABLE t1(x,y); + INSERT INTO t1 VALUES(1,1); + INSERT OR IGNORE INTO t1 SELECT x*2,y FROM t1; + INSERT OR IGNORE INTO t1 SELECT x*3,y FROM t1; + INSERT OR IGNORE INTO t1 SELECT x*5,y FROM t1; + INSERT OR IGNORE INTO t1 SELECT x*7,y FROM t1; + INSERT OR IGNORE INTO t1 SELECT x*11,y FROM t1; + INSERT OR IGNORE INTO t1 SELECT x*13,y FROM t1; + CREATE INDEX t1i1 ON t1(x); + CREATE TABLE t2 AS SELECT x,2 as y FROM t1 WHERE rowid%5!=0; + COMMIT; + } +} {} + +ifcapable {integrityck} { + integrity_check corruptC-1.2 +} + +# Generate random integer +# +proc random {range} { + return [expr {round(rand()*$range)}] +} + +# Copy file $from into $to +# +proc copy_file {from to} { + file copy -force $from $to +} + +# Setup for the tests. Make a backup copy of the good database in test.bu. +# +db close +copy_file test.db test.bu +sqlite3 db test.db +set fsize [file size test.db] + +# Set a quasi-random random seed. +if {[info exists SOAKTEST]} { + # If we are doing SOAK tests, we want a different + # random seed for each run. Ideally we would like + # to use [clock clicks] or something like that here. + set qseed [file mtime test.db] +} else { + # If we are not doing soak tests, + # make it repeatable. + set qseed 0 +} +expr srand($qseed) + +# +# First test some specific corruption tests found from earlier runs +# with specific seeds. +# + +# test that a corrupt content offset size is handled (seed 5577) +do_test corruptC-2.1 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 2053 [format %02x 0x04] + + sqlite3 db test.db + catchsql {PRAGMA integrity_check} +} {1 {database disk image is malformed}} + +# test that a corrupt content offset size is handled (seed 5649) +do_test corruptC-2.2 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 27 [format %02x 0x08] + hexio_write test.db 233 [format %02x 0x6a] + hexio_write test.db 328 [format %02x 0x67] + hexio_write test.db 750 [format %02x 0x1f] + hexio_write test.db 1132 [format %02x 0x52] + hexio_write test.db 1133 [format %02x 0x84] + hexio_write test.db 1220 [format %02x 0x01] + hexio_write test.db 3688 [format %02x 0xc1] + hexio_write test.db 3714 [format %02x 0x58] + hexio_write test.db 3746 [format %02x 0x9a] + + sqlite3 db test.db + catchsql {UPDATE t1 SET y=1} +} {1 {database disk image is malformed}} + +# test that a corrupt free cell size is handled (seed 13329) +do_test corruptC-2.3 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 1094 [format %02x 0x76] + + sqlite3 db test.db + catchsql {UPDATE t1 SET y=1} +} {1 {database disk image is malformed}} + +# test that a corrupt free cell size is handled (seed 169571) +do_test corruptC-2.4 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 3119 [format %02x 0xdf] + + sqlite3 db test.db + catchsql {UPDATE t2 SET y='abcdef-uvwxyz'} +} {1 {database disk image is malformed}} + +# test that a corrupt free cell size is handled (seed 169571) +do_test corruptC-2.5 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 3119 [format %02x 0xdf] + hexio_write test.db 4073 [format %02x 0xbf] + + sqlite3 db test.db + catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} + catchsql {PRAGMA integrity_check} +} {0 {{*** in database main *** +Corruption detected in cell 710 on page 4 +Multiple uses for byte 661 of page 4 +Fragmented space is 249 byte reported as 21 on page 4}}} + +# test that a corrupt free cell size is handled (seed 169595) +do_test corruptC-2.6 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 619 [format %02x 0xe2] + hexio_write test.db 3150 [format %02x 0xa8] + + sqlite3 db test.db + catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 178692) +do_test corruptC-2.7 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 3074 [format %02x 0xa0] + + sqlite3 db test.db + catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 179069) +do_test corruptC-2.8 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 1393 [format %02x 0x7d] + hexio_write test.db 84 [format %02x 0x19] + hexio_write test.db 3287 [format %02x 0x3b] + hexio_write test.db 2564 [format %02x 0xed] + hexio_write test.db 2139 [format %02x 0x55] + + sqlite3 db test.db + catchsql {BEGIN; DELETE FROM t1 WHERE x>13; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 170434) +do_test corruptC-2.9 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 2095 [format %02x 0xd6] + + sqlite3 db test.db + catchsql {BEGIN; DELETE FROM t1 WHERE x>13; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 186504) +do_test corruptC-2.10 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 3130 [format %02x 0x02] + + sqlite3 db test.db + catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 1589) +do_test corruptC-2.11 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 55 [format %02x 0xa7] + + sqlite3 db test.db + catchsql {BEGIN; CREATE TABLE t3 AS SELECT x,3 as y FROM t2 WHERE rowid%5!=0; ROLLBACK;} +} {1 {database disk image is malformed}} + +# corruption (seed 14166) +do_test corruptC-2.12 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 974 [format %02x 0x2e] + + sqlite3 db test.db + catchsql {SELECT count(*) FROM sqlite_master;} +} {1 {malformed database schema (t1i1) - corrupt database}} + +# corruption (seed 218803) +do_test corruptC-2.13 { + db close + copy_file test.bu test.db + + # insert corrupt byte(s) + hexio_write test.db 102 [format %02x 0x12] + + sqlite3 db test.db + catchsql {BEGIN; CREATE TABLE t3 AS SELECT x,3 as y FROM t2 WHERE rowid%5!=0; ROLLBACK;} +} {1 {database disk image is malformed}} + +do_test corruptC-2.14 { + db close + copy_file test.bu test.db + + sqlite3 db test.db + set blob [string repeat abcdefghij 10000] + execsql { INSERT INTO t1 VALUES (1, $blob) } + + sqlite3 db test.db + set filesize [file size test.db] + hexio_write test.db [expr $filesize-2048] 00000001 + catchsql {DELETE FROM t1 WHERE rowid = (SELECT max(rowid) FROM t1)} +} {1 {database disk image is malformed}} + +# +# Now test for a series of quasi-random seeds. +# We loop over the entire file size and touch +# each byte at least once. +for {set tn 0} {$tn<$fsize} {incr tn 1} { + + # setup for test + db close + copy_file test.bu test.db + sqlite3 db test.db + + # Seek to a random location in the file, and write a random single byte + # value. Then do various operations on the file to make sure that + # the database engine can handle the corruption gracefully. + # + set last 0 + for {set i 1} {$i<=512 && !$last} {incr i 1} { + + db close + if {$i==1} { + # on the first corrupt value, use location $tn + # this ensures that we touch each location in the + # file at least once. + set roffset $tn + } else { + # insert random byte at random location + set roffset [random $fsize] + } + set rbyte [format %02x [random 255]] + + # You can uncomment the following to have it trace + # exactly how it's corrupting the file. This is + # useful for generating the "seed specific" tests + # above. + # set rline "$roffset $rbyte" + # puts stdout $rline + + hexio_write test.db $roffset $rbyte + sqlite3 db test.db + + # do a few random operations to make sure that if + # they error, they error gracefully instead of crashing. + do_test corruptC-3.$tn.($qseed).$i.1 { + catchsql {SELECT count(*) FROM sqlite_master} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.2 { + catchsql {SELECT count(*) FROM t1} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.3 { + catchsql {SELECT count(*) FROM t1 WHERE x>13} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.4 { + catchsql {SELECT count(*) FROM t2} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.5 { + catchsql {SELECT count(*) FROM t2 WHERE x<13} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.6 { + catchsql {BEGIN; UPDATE t1 SET y=1; ROLLBACK;} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.7 { + catchsql {BEGIN; UPDATE t2 SET y='abcdef-uvwxyz'; ROLLBACK;} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.8 { + catchsql {BEGIN; DELETE FROM t1 WHERE x>13; ROLLBACK;} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.9 { + catchsql {BEGIN; DELETE FROM t2 WHERE x<13; ROLLBACK;} + set x {} + } {} + do_test corruptC-3.$tn.($qseed).$i.10 { + catchsql {BEGIN; CREATE TABLE t3 AS SELECT x,3 as y FROM t2 WHERE rowid%5!=0; ROLLBACK;} + set x {} + } {} + + # check the integrity of the database. + # once the corruption is detected, we can stop. + ifcapable {integrityck} { + set res [ catchsql {PRAGMA integrity_check} ] + set ans [lindex $res 1] + if { [ string compare $ans "ok" ] != 0 } { + set last -1 + } + } + # if we are not capable of doing an integrity check, + # stop after corrupting 5 bytes. + ifcapable {!integrityck} { + if { $i > 5 } { + set last -1 + } + } + + # Check that no page references were leaked. + # TBD: need to figure out why this doesn't work + # work with ROLLBACKs... + if {0} { + do_test corruptC-3.$tn.($qseed).$i.11 { + set bt [btree_from_db db] + db_enter db + array set stats [btree_pager_stats $bt] + db_leave db + set stats(ref) + } {0} + } + } + # end for i + +} +# end for tn + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corruptD.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corruptD.test --- sqlite3-3.4.2/test/corruptD.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/corruptD.test 2009-06-05 18:09:12.000000000 +0100 @@ -0,0 +1,135 @@ +# 2009 June 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: corruptD.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +#-------------------------------------------------------------------------- +# OVERVIEW +# +# This test file attempts to verify that SQLite does not read past the +# end of any in-memory buffers as a result of corrupted database page +# images. Usually this happens because a field within a database page +# that contains an offset to some other structure within the same page +# is set to too large a value. A database page contains the following +# such fields: +# +# 1. The page header field that contains the offset to the first +# free block of space. +# +# 2. The first two bytes of all but the last free block on the free-block +# list (the offset to the next free block). +# +# 3. The page header field containing the number of cells on the page +# (implicitly defines the offset to the final element in the cell offset +# array, which could potentially be off the end of the page). +# +# 4. The page header field containing the offset to the start of the cell +# content area. +# +# 5. The contents of the cell offset array. +# +# 6. The first few bytes of each cell determine the size of the cell +# stored within the page, and hence the offset to the final byte of +# the cell. +# +# If any of the above fields are set to too large a value, then a buffer +# overread may occur. This test script creates and operates on various +# strategically corrupted database files to attempt to provoke such buffer +# overreads. +# +# Very often, a buffer overread passes unnoticed, particularly in workstation +# environments. For this reason, this test script should be run using valgrind +# (or similar) in order to verify that no overreads occur. +# +# TEST PLAN +# +# Test cases corruptD-1.* are white-box tests. They attempt to corrupt +# one of the above fields, then exercise each part of the code in btree.c +# that uses said field. +# +# Offset variables 1, 2, 3 and 4 are all checked to make sure they +# will not result in buffer overruns as part of page initialization in +# sqlite3BtreeInitPage(). Offsets 5 and 6 cannot be tested as part of +# page initialization, as trying to do so causes a performance hit. +# + +do_test corruptD-1.0 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + } + for {set ii 1} {$ii < 50} {incr ii} { + execsql { INSERT INTO t1 VALUES($ii, $ii * $ii) } + } + execsql { + DELETE FROM t1 WHERE a = 10; + DELETE FROM t1 WHERE a = 20; + DELETE FROM t1 WHERE a = 30; + DELETE FROM t1 WHERE a = 40; + } + copy_file test.db test.bu +} {} + +proc incr_change_counter {} { + hexio_write test.db 24 [ + hexio_render_int32 [expr [hexio_get_int [hexio_read test.db 24 4]] + 1] + ] +} + +proc restore_file {} { + db close + copy_file test.bu test.db + sqlite3 db test.db +} + +#------------------------------------------------------------------------- +# The following tests, corruptD-1.1.*, focus on the page header field +# containing the offset of the first free block in a page. +# +do_test corruptD-1.1.1 { + incr_change_counter + hexio_write test.db [expr 1024+1] FFFF + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} +do_test corruptD-1.1.2 { + incr_change_counter + hexio_write test.db [expr 1024+1] [hexio_render_int32 1021] + catchsql { SELECT * FROM t1 } +} {1 {database disk image is malformed}} + +#------------------------------------------------------------------------- +# The following tests, corruptD-1.2.*, focus on the offsets contained +# in the first 2 byte of each free-block on the free-list. +# +do_test corruptD-1.2.1 { + restore_file +} {} +do_test corruptD-1.2.2 { +} {} + +#------------------------------------------------------------------------- +# The following tests, corruptD-1.4.*, ... +# + + +#------------------------------------------------------------------------- +# The following tests, corruptD-1.5.*, focus on the offsets contained +# in the cell offset array. +# +# defragmentPage +# + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/corrupt.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/corrupt.test --- sqlite3-3.4.2/test/corrupt.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/corrupt.test 2009-06-05 18:03:02.000000000 +0100 @@ -13,10 +13,9 @@ # This file implements tests to make sure SQLite does not crash or # segfault if it sees a corrupt database file. # -# $Id: corrupt.test,v 1.8 2005/02/19 08:18:06 danielk1977 Exp $ +# $Id: corrupt.test,v 1.10 2008/08/25 12:14:09 drh Exp $ -catch {file delete -force test.db} -catch {file delete -force test.db-journal} +catch {file delete -force test.db test.db-journal test.bu} set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -110,6 +109,15 @@ catchsql {PRAGMA integrity_check} set x {} } {} + + # Check that no page references were leaked. + do_test corrupt-2.$tn.8 { + set bt [btree_from_db db] + db_enter db + array set stats [btree_pager_stats $bt] + db_leave db + set stats(ref) + } {0} } #------------------------------------------------------------------------ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/count.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/count.test --- sqlite3-3.4.2/test/count.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/count.test 2009-06-25 12:35:51.000000000 +0100 @@ -0,0 +1,192 @@ +# 2009 February 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing "SELECT count(*)" statements. +# +# $Id: count.test,v 1.6 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test plan: +# +# count-0.*: Make sure count(*) works on an empty database. (Ticket #3774) +# +# count-1.*: Test that the OP_Count instruction appears to work on both +# tables and indexes. Test both when they contain 0 entries, +# when all entries are on the root page, and when the b-tree +# forms a structure 2 and 3 levels deep. +# +# count-2.*: Test that +# +# + +do_test count-0.1 { + db eval { + SELECT count(*) FROM sqlite_master; + } +} {0} + +set iTest 0 +foreach zIndex [list { + /* no-op */ +} { + CREATE INDEX i1 ON t1(a); +}] { + incr iTest + do_test count-1.$iTest.1 { + execsql { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(a, b); + } + execsql $zIndex + execsql { SELECT count(*) FROM t1 } + } {0} + + do_test count-1.$iTest.2 { + execsql { + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + SELECT count(*) FROM t1; + } + } {2} + + do_test count-1.$iTest.3 { + execsql { + INSERT INTO t1 SELECT * FROM t1; -- 4 + INSERT INTO t1 SELECT * FROM t1; -- 8 + INSERT INTO t1 SELECT * FROM t1; -- 16 + INSERT INTO t1 SELECT * FROM t1; -- 32 + INSERT INTO t1 SELECT * FROM t1; -- 64 + INSERT INTO t1 SELECT * FROM t1; -- 128 + INSERT INTO t1 SELECT * FROM t1; -- 256 + SELECT count(*) FROM t1; + } + } {256} + + do_test count-1.$iTest.4 { + execsql { + INSERT INTO t1 SELECT * FROM t1; -- 512 + INSERT INTO t1 SELECT * FROM t1; -- 1024 + INSERT INTO t1 SELECT * FROM t1; -- 2048 + INSERT INTO t1 SELECT * FROM t1; -- 4096 + SELECT count(*) FROM t1; + } + } {4096} + + do_test count-1.$iTest.5 { + execsql { + BEGIN; + INSERT INTO t1 SELECT * FROM t1; -- 8192 + INSERT INTO t1 SELECT * FROM t1; -- 16384 + INSERT INTO t1 SELECT * FROM t1; -- 32768 + INSERT INTO t1 SELECT * FROM t1; -- 65536 + COMMIT; + SELECT count(*) FROM t1; + } + } {65536} +} + +proc uses_op_count {sql} { + if {[lsearch [execsql "EXPLAIN $sql"] Count]>=0} { + return 1; + } + return 0 +} + +do_test count-2.1 { + execsql { + CREATE TABLE t2(a, b); + } + uses_op_count {SELECT count(*) FROM t2} +} {1} +do_test count-2.2 { + catchsql {SELECT count(DISTINCT *) FROM t2} +} {1 {near "*": syntax error}} +do_test count-2.3 { + uses_op_count {SELECT count(DISTINCT a) FROM t2} +} {0} +do_test count-2.4 { + uses_op_count {SELECT count(a) FROM t2} +} {0} +do_test count-2.5 { + uses_op_count {SELECT count() FROM t2} +} {1} +do_test count-2.6 { + catchsql {SELECT count(DISTINCT) FROM t2} +} {1 {DISTINCT aggregates must have exactly one argument}} +do_test count-2.7 { + uses_op_count {SELECT count(*)+1 FROM t2} +} {0} +do_test count-2.8 { + uses_op_count {SELECT count(*) FROM t2 WHERE a IS NOT NULL} +} {0} +do_test count-2.9 { + catchsql {SELECT count(*) FROM t2 HAVING count(*)>1} +} {1 {a GROUP BY clause is required before HAVING}} +do_test count-2.10 { + uses_op_count {SELECT count(*) FROM (SELECT 1)} +} {0} +do_test count-2.11 { + execsql { CREATE VIEW v1 AS SELECT 1 AS a } + uses_op_count {SELECT count(*) FROM v1} +} {0} +do_test count-2.12 { + uses_op_count {SELECT count(*), max(a) FROM t2} +} {0} +do_test count-2.13 { + uses_op_count {SELECT count(*) FROM t1, t2} +} {0} + +ifcapable vtab { + register_echo_module [sqlite3_connection_pointer db] + do_test count-2.14 { + execsql { CREATE VIRTUAL TABLE techo USING echo(t1); } + uses_op_count {SELECT count(*) FROM techo} + } {0} +} + +do_test count-3.1 { + execsql { + CREATE TABLE t3(a, b); + SELECT a FROM (SELECT count(*) AS a FROM t3) WHERE a==0; + } +} {0} +do_test count-3.2 { + execsql { + SELECT a FROM (SELECT count(*) AS a FROM t3) WHERE a==1; + } +} {} + +do_test count-4.1 { + execsql { + CREATE TABLE t4(a, b); + INSERT INTO t4 VALUES('a', 'b'); + CREATE INDEX t4i1 ON t4(b, a); + SELECT count(*) FROM t4; + } +} {1} +do_test count-4.2 { + execsql { + CREATE INDEX t4i2 ON t4(b); + SELECT count(*) FROM t4; + } +} {1} +do_test count-4.3 { + execsql { + DROP INDEX t4i1; + CREATE INDEX t4i1 ON t4(b, a); + SELECT count(*) FROM t4; + } +} {1} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash2.test --- sqlite3-3.4.2/test/crash2.test 2007-04-06 22:42:22.000000000 +0100 +++ sqlite3-3.6.16/test/crash2.test 2009-06-05 18:03:04.000000000 +0100 @@ -16,7 +16,7 @@ # specifically, the tests in this file verify this functionality # for storage mediums with various sector sizes. # -# $Id: crash2.test,v 1.4 2007/04/06 21:42:22 drh Exp $ +# $Id: crash2.test,v 1.6 2008/08/25 07:12:29 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -26,6 +26,8 @@ return } +db close + # This test is designed to check that the crash-test infrastructure # can create files that do not consist of an integer number of # simulated disk blocks (i.e. 3KB file using 2KB disk blocks). @@ -33,6 +35,7 @@ do_test crash2-1.1 { crashsql -delay 500 -file test.db -blocksize 2048 { PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; BEGIN; CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; CREATE TABLE def AS SELECT 1 AS d, 2 AS e, 3 AS f; @@ -53,10 +56,11 @@ # of different seeds for the random number generator. # do_test crash2-1.2.$ii { - crashsql -file test.db -blocksize 2048 " + crashsql -file test.db -blocksize 2048 [subst { [string repeat {SELECT random();} $ii] CREATE TABLE hij(h, i, j); - " + }] + sqlite3 db test.db db eval {PRAGMA integrity_check} } {ok} } @@ -86,8 +90,10 @@ for {set i 1} {$i < 30} {incr i} { set sig [signature] set sector [expr 1024 * 1<<($i%4)] - do_test crash-2.$i.1 { + db close + do_test crash2-2.$i.1 { crashsql -blocksize $sector -delay [expr $i%5 + 1] -file test.db-journal " + PRAGMA temp_store = memory; BEGIN; SELECT random() FROM abc LIMIT $i; INSERT INTO abc SELECT randstr(10,10), 0, 0 FROM abc WHERE random()%2==0; @@ -95,7 +101,8 @@ COMMIT; " } {1 {child process exited abnormally}} - do_test crash-2.$i.2 { + do_test crash2-2.$i.2 { + sqlite3 db test.db signature } $sig } @@ -107,7 +114,8 @@ for {set i 1} {$i < 10} {incr i} { set sig [signature] set sector [expr 1024 * 1<<($i%4)] - do_test crash-3.$i.1 { + db close + do_test crash2-3.$i.1 { crashsql -blocksize $sector -file test.db " BEGIN; SELECT random() FROM abc LIMIT $i; @@ -116,7 +124,8 @@ COMMIT; " } {1 {child process exited abnormally}} - do_test crash-3.$i.2 { + do_test crash2-3.$i.2 { + sqlite3 db test.db signature } $sig } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash3.test --- sqlite3-3.4.2/test/crash3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash3.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,190 @@ +# 2007 August 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests that verify that SQLite can correctly rollback +# databases after crashes when using the special IO modes triggered +# by device IOCAP flags. +# +# $Id: crash3.test,v 1.4 2008/07/12 14:52:20 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +proc do_test2 {name tcl res1 res2} { + set script [subst -nocommands { + do_test $name { + set res1 {$res1} + set res2 {$res2} + set res [eval {$tcl}] + if {[set res] eq [set res1] || [set res] eq [set res2]} { + set res "{[set res1]} or {[set res2]}" + } + set res + } {{$res1} or {$res2}} + }] + uplevel $script +} + +# This block tests crash-recovery when the IOCAP_ATOMIC flags is set. +# +# Each iteration of the following loop sets up the database to contain +# the following schema and data: +# +# CREATE TABLE abc(a, b, c); +# INSERT INTO abc VALUES(1, 2, 3); +# +# Then execute the SQL statement, scheduling a crash for part-way through +# the first sync() of either the database file or the journal file (often +# the journal file is not required - meaning no crash occurs). +# +# After the crash (or absence of a crash), open the database and +# verify that: +# +# * The integrity check passes, and +# * The contents of table abc is either {1 2 3} or the value specified +# to the right of the SQL statement below. +# +# The procedure is repeated 10 times for each SQL statement. Five times +# with the crash scheduled for midway through the first journal sync (if +# any), and five times with the crash midway through the database sync. +# +set tn 1 +foreach {sql res2} [list \ + {INSERT INTO abc VALUES(4, 5, 6)} {1 2 3 4 5 6} \ + {DELETE FROM abc} {} \ + {INSERT INTO abc SELECT * FROM abc} {1 2 3 1 2 3} \ + {UPDATE abc SET a = 2} {2 2 3} \ + {INSERT INTO abc VALUES(4, 5, randstr(1000,1000))} {n/a} \ + {CREATE TABLE def(d, e, f)} {n/a} \ +] { + for {set ii 0} {$ii < 10} {incr ii} { + + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + do_test crash3-1.$tn.1 { + execsql { + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + COMMIT; + } + } {} + db close + + set crashfile test.db + if {($ii%2)==0} { append crashfile -journal } + set rand "SELECT randstr($tn,$tn);" + do_test crash3-1.$tn.2 [subst { + crashsql -file $crashfile -char atomic {$rand $sql} + sqlite3 db test.db + execsql { PRAGMA integrity_check; } + }] {ok} + + do_test2 crash3-1.$tn.3 { + execsql { SELECT * FROM abc } + } {1 2 3} $res2 + + incr tn + } +} + +# This block tests both the IOCAP_SEQUENTIAL and IOCAP_SAFE_APPEND flags. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db +do_test crash3-2.0 { + execsql { + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + CREATE TABLE def(d PRIMARY KEY, e, f); + PRAGMA default_cache_size = 10; + INSERT INTO abc VALUES(randstr(10,1000),randstr(10,1000),randstr(10,1000)); + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) FROM abc; + COMMIT; + } +} {} + +set tn 1 +foreach {::crashfile ::delay ::char} { + test.db 1 sequential + test.db 1 safe_append + test.db-journal 1 sequential + test.db-journal 1 safe_append + test.db-journal 2 safe_append + test.db-journal 2 sequential + test.db-journal 3 sequential + test.db-journal 3 safe_append +} { + for {set ii 0} {$ii < 100} {incr ii} { + set ::SQL [subst { + SELECT randstr($ii,$ii+10); + BEGIN; + DELETE FROM abc WHERE random()%5; + INSERT INTO abc + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) + FROM abc + WHERE (random()%5)==0; + DELETE FROM def WHERE random()%5; + INSERT INTO def + SELECT randstr(10,1000),randstr(10,1000),randstr(10,1000) + FROM def + WHERE (random()%5)==0; + COMMIT; + }] + + do_test crash3-2.$tn.$ii { + crashsql -file $::crashfile -delay $::delay -char $::char $::SQL + db close + sqlite3 db test.db + execsql {PRAGMA integrity_check} + } {ok} + } + incr tn +} + +# The following block tests an interaction between IOCAP_ATOMIC and +# IOCAP_SEQUENTIAL. At one point, if both flags were set, small +# journal files that contained only a single page, but were required +# for some other reason (i.e. nTrunk) were not being written to +# disk. +# +for {set ii 0} {$ii < 10} {incr ii} { + db close + file delete -force test.db test.db-journal + crashsql -file test.db -char {sequential atomic} { + CREATE TABLE abc(a, b, c); + } + sqlite3 db test.db + do_test crash3-3.$ii { + execsql {PRAGMA integrity_check} + } {ok} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash4.test --- sqlite3-3.4.2/test/crash4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash4.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,102 @@ +# 2008 January 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains additional tests to verify that SQLite database +# file survive a power loss or OS crash. +# +# $Id: crash4.test,v 1.3 2008/01/16 17:46:38 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + + +# A sequence of SQL commands: +# +set sql_cmd_list { + {CREATE TABLE a(id INTEGER, name CHAR(50))} + {INSERT INTO a(id,name) VALUES(1,'one')} + {INSERT INTO a(id,name) VALUES(2,'two')} + {INSERT INTO a(id,name) VALUES(3,'three')} + {INSERT INTO a(id,name) VALUES(4,'four')} + {INSERT INTO a(id,name) VALUES(5,'five')} + {INSERT INTO a(id,name) VALUES(6,'six')} + {INSERT INTO a(id,name) VALUES(7,'seven')} + {INSERT INTO a(id,name) VALUES(8,'eight')} + {INSERT INTO a(id,name) VALUES(9,'nine')} + {INSERT INTO a(id,name) VALUES(10,'ten')} + {UPDATE A SET name='new text for row 3' WHERE id=3} +} + +# Assume that a database is created by evaluating the SQL statements +# in $sql_cmd_list. Compute a set of checksums that capture the state +# of the database after each statement. Also include a checksum for +# the state of the database prior to any of these statements. +# +set crash4_cksum_set {} +lappend crash4_cksum_set [allcksum db] +foreach cmd $sql_cmd_list { + db eval $cmd + lappend crash4_cksum_set [allcksum db] +} + +# Run the sequence of SQL statements shown above repeatedly. +# Close and reopen the database right before the UPDATE statement. +# On each repetition, introduce database corruption typical of +# what might be seen in a power loss or OS crash. +# +# Slowly increase the delay before the crash, repeating the test +# over and over. Stop testing when the entire sequence of SQL +# statements runs to completing without hitting the crash. +# +for {set cnt 1; set fin 0} {!$fin} {incr cnt} { + db close + file delete -force test.db test.db-journal + do_test crash4-1.$cnt.1 { + set seed [expr {int(abs(rand()*10000))}] + set delay [expr {int($cnt/50)+1}] + set file [expr {($cnt&1)?"test.db":"test.db-journal"}] + set c [crashsql -delay $delay -file $file -seed $seed -tclbody { + db eval {CREATE TABLE a(id INTEGER, name CHAR(50))} + db eval {INSERT INTO a(id,name) VALUES(1,'one')} + db eval {INSERT INTO a(id,name) VALUES(2,'two')} + db eval {INSERT INTO a(id,name) VALUES(3,'three')} + db eval {INSERT INTO a(id,name) VALUES(4,'four')} + db eval {INSERT INTO a(id,name) VALUES(5,'five')} + db eval {INSERT INTO a(id,name) VALUES(6,'six')} + db eval {INSERT INTO a(id,name) VALUES(7,'seven')} + db eval {INSERT INTO a(id,name) VALUES(8,'eight')} + db eval {INSERT INTO a(id,name) VALUES(9,'nine')} + db eval {INSERT INTO a(id,name) VALUES(10,'ten')} + db close + sqlite3 db test.db + db eval {UPDATE A SET name='new text for row 3' WHERE id=3} + db close + } {}] + if {$c==[list 0 {}]} { + set ::fin 1 + set c [list 1 {child process exited abnormally}] + } + set c + } {1 {child process exited abnormally}} + sqlite3 db test.db + integrity_check crash4-1.$cnt.2 + do_test crash4-1.$cnt.3 { + set x [lsearch $::crash4_cksum_set [allcksum db]] + expr {$x>=0} + } {1} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash5.test --- sqlite3-3.4.2/test/crash5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash5.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,110 @@ + +# 2007 Aug 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests aspects of recovery from a malloc() failure +# in a CREATE INDEX statement. +# +# $Id: crash5.test,v 1.3 2008/07/12 14:52:20 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Only run these tests if memory debugging is turned on. +# +ifcapable !memdebug||!crashtest||!memorymanage { + puts "Skipping crash5 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +db close + +for {set ii 0} {$ii < 10} {incr ii} { + for {set jj 50} {$jj < 100} {incr jj} { + + # Set up the database so that it is an auto-vacuum database + # containing a single table (root page 3) with a single row. + # The row has an overflow page (page 4). + file delete -force test.db test.db-journal + sqlite3 db test.db + set c [string repeat 3 1500] + db eval { + pragma auto_vacuum = 1; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES('1111111111', '2222222222', $c); + } + db close + + do_test crash5-$ii.$jj.1 { + crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ + [list set iFail $jj] { + sqlite3_crashparams 0 [file join [pwd] test.db-journal] + + # Begin a transaction and evaluate a "CREATE INDEX" statement + # with the iFail'th malloc() set to fail. This operation will + # have to move the current contents of page 4 (the overflow + # page) to make room for the new root page. The bug is that + # if malloc() fails at a particular point in sqlite3PagerMovepage(), + # sqlite mistakenly thinks that the page being moved (page 4) has + # been safely synced into the journal. If the page is written + # to later in the transaction, it may be written out to the database + # before the relevant part of the journal has been synced. + # + db eval BEGIN + sqlite3_memdebug_fail $iFail -repeat 0 + catch {db eval { CREATE UNIQUE INDEX i1 ON t1(a); }} msg + # puts "$n $msg ac=[sqlite3_get_autocommit db]" + + # If the transaction is still active (it may not be if the malloc() + # failure occured in the OS layer), write to the database. Make sure + # page 4 is among those written. + # + if {![sqlite3_get_autocommit db]} { + db eval { + DELETE FROM t1; -- This will put page 4 on the free list. + INSERT INTO t1 VALUES('111111111', '2222222222', '33333333'); + INSERT INTO t1 SELECT * FROM t1; -- 2 + INSERT INTO t1 SELECT * FROM t1; -- 4 + INSERT INTO t1 SELECT * FROM t1; -- 8 + INSERT INTO t1 SELECT * FROM t1; -- 16 + INSERT INTO t1 SELECT * FROM t1; -- 32 + INSERT INTO t1 SELECT * FROM t1 WHERE rowid%2; -- 48 + } + } + + # If the right malloc() failed during the 'CREATE INDEX' above and + # the transaction was not rolled back, then the sqlite cache now + # has a dirty page 4 that it incorrectly believes is already safely + # in the synced part of the journal file. When + # sqlite3_release_memory() is called sqlite tries to free memory + # by writing page 4 out to the db file. If it crashes later on, + # before syncing the journal... Corruption! + # + sqlite3_crashparams 1 [file join [pwd] test.db-journal] + sqlite3_release_memory 8092 + }]] {} + expr 1 + } {1} + + sqlite3 db test.db + do_test crash5-$ii.$jj.2 { + db eval {pragma integrity_check} + } {ok} + do_test crash5-$ii.$jj.3 { + db eval {SELECT * FROM t1} + } [list 1111111111 2222222222 $::c] + db close + } +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash6.test --- sqlite3-3.4.2/test/crash6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash6.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,118 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file tests that rollback journals for databases that use a +# page-size other than the default page-size can be rolled back Ok. +# +# $Id: crash6.test,v 1.2 2008/04/14 15:27:19 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +for {set ii 0} {$ii < 10} {incr ii} { + catch {db close} + file delete -force test.db test.db-journal + crashsql -delay 2 -file test.db { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=4096; + BEGIN; + CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; + COMMIT; + BEGIN; + CREATE TABLE def AS SELECT 1 AS d, 2 AS e, 3 AS f; + COMMIT; + } + sqlite3 db test.db + integrity_check crash6-1.$ii +} + +for {set ii 0} {$ii < 10} {incr ii} { + catch {db close} + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=2048; + BEGIN; + CREATE TABLE abc AS SELECT 1 AS a, 2 AS b, 3 AS c; + COMMIT; + } + db close + crashsql -delay 1 -file test.db { + INSERT INTO abc VALUES(5, 6, 7); + } + sqlite3 db test.db + integrity_check crash6-2.$ii +} + +proc signature {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] +} + +# Test case for crashing during database sync with page-size values +# from 1024 to 8192. +# +for {set ii 0} {$ii < 30} {incr ii} { + db close + file delete -force test.db + sqlite3 db test.db + + set pagesize [expr 1024 << ($ii % 4)] + if {$pagesize>$::SQLITE_MAX_PAGE_SIZE} { + set pagesize $::SQLITE_MAX_PAGE_SIZE + } + do_test crash6-3.$ii.0 { + execsql "pragma page_size = $pagesize" + execsql "pragma page_size" + } $pagesize + + do_test crash6-3.$ii.1 { + + execsql BEGIN + execsql {CREATE TABLE abc(a, b, c)} + for {set n 0} {$n < 1000} {incr n} { + execsql "INSERT INTO abc VALUES($n, [expr 2*$n], [expr 3*$n])" + } + execsql { + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + execsql COMMIT + expr ([file size test.db] / 1024) > 450 + } {1} + + set sig [signature] + db close + + do_test crash6-3.$ii.2 { + crashsql -file test.db " + BEGIN; + SELECT random() FROM abc LIMIT $ii; + INSERT INTO abc SELECT randstr(10,10), 0, 0 FROM abc WHERE random()%2==0; + DELETE FROM abc WHERE random()%2!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + + do_test crash6-3.$ii.3 { + sqlite3 db test.db + signature + } $sig +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash7.test --- sqlite3-3.4.2/test/crash7.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash7.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,82 @@ +# 2008 March 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: crash7.test,v 1.1 2008/04/03 14:36:26 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +proc signature {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] +} + +foreach f [list test.db test.db-journal] { + for {set ii 1} {$ii < 64} {incr ii} { + db close + file delete test.db + sqlite3 db test.db + + set from_size [expr 1024 << ($ii&3)] + set to_size [expr 1024 << (($ii>>2)&3)] + + execsql " + PRAGMA page_size = $from_size; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc VALUES(randomblob(100), randomblob(200), randomblob(1000)); + INSERT INTO abc + SELECT randomblob(1000), randomblob(200), randomblob(100) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc WHERE [expr $ii&16]; + INSERT INTO abc + SELECT randomblob(25), randomblob(45), randomblob(9456) + FROM abc WHERE [expr $ii&32]; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc WHERE [expr $ii&8]; + INSERT INTO abc + SELECT randomblob(25), randomblob(45), randomblob(9456) + FROM abc WHERE [expr $ii&4]; + COMMIT; + " + + set sig [signature] + db close + + do_test crash7-1.$ii.crash { + crashsql -file $f " + PRAGMA page_size = $to_size; + VACUUM; + " + } {1 {child process exited abnormally}} + + sqlite3 db test.db + integrity_check crash7-1.$ii.integrity + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash8.test --- sqlite3-3.4.2/test/crash8.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crash8.test 2009-01-11 00:44:48.000000000 +0000 @@ -0,0 +1,343 @@ +# 2009 January 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test verifies a couple of specific potential data corruption +# scenarios involving crashes or power failures. +# +# Later: Also, some other specific scenarios required for coverage +# testing that do not lead to corruption. +# +# $Id: crash8.test,v 1.4 2009/01/11 00:44:48 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +do_test crash8-1.1 { + execsql { + PRAGMA auto_vacuum=OFF; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + INSERT INTO t1 VALUES(3, randstr(1000,1000)); + INSERT INTO t1 VALUES(4, randstr(1000,1000)); + INSERT INTO t1 VALUES(5, randstr(1000,1000)); + INSERT INTO t1 VALUES(6, randstr(1000,1000)); + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); + CREATE TABLE t4(a, b); + CREATE TABLE t5(a, b); + CREATE TABLE t6(a, b); + CREATE TABLE t7(a, b); + CREATE TABLE t8(a, b); + CREATE TABLE t9(a, b); + CREATE TABLE t10(a, b); + PRAGMA integrity_check + } +} {ok} + + +# Potential corruption scenario 1. A second process opens the database +# and modifies a large portion of it. It then opens a second transaction +# and modifies a small part of the database, but crashes before it commits +# the transaction. +# +# When the first process accessed the database again, it was rolling back +# the aborted transaction, but was not purging its in-memory cache (which +# was loaded before the second process made its first, successful, +# modification). Producing an inconsistent cache. +# +do_test crash8-1.2 { + crashsql -delay 2 -file test.db { + PRAGMA cache_size = 10; + UPDATE t1 SET b = randstr(1000,1000); + INSERT INTO t9 VALUES(1, 2); + } +} {1 {child process exited abnormally}} +do_test crash8-1.3 { + execsql {PRAGMA integrity_check} +} {ok} + +# Potential corruption scenario 2. The second process, operating in +# persistent-journal mode, makes a large change to the database file +# with a small in-memory cache. Such that more than one journal-header +# was written to the file. It then opens a second transaction and makes +# a smaller change that requires only a single journal-header to be +# written to the journal file. The second change is such that the +# journal content written to the persistent journal file exactly overwrites +# the first journal-header and set of subsequent records written by the +# first, successful, change. The second process crashes before it can +# commit its second change. +# +# When the first process accessed the database again, it was rolling back +# the second aborted transaction, then continuing to rollback the second +# and subsequent journal-headers written by the first, successful, change. +# Database corruption. +# +do_test crash8.2.1 { + crashsql -delay 2 -file test.db { + PRAGMA journal_mode = persist; + PRAGMA cache_size = 10; + UPDATE t1 SET b = randstr(1000,1000); + PRAGMA cache_size = 100; + BEGIN; + INSERT INTO t2 VALUES('a', 'b'); + INSERT INTO t3 VALUES('a', 'b'); + INSERT INTO t4 VALUES('a', 'b'); + INSERT INTO t5 VALUES('a', 'b'); + INSERT INTO t6 VALUES('a', 'b'); + INSERT INTO t7 VALUES('a', 'b'); + INSERT INTO t8 VALUES('a', 'b'); + INSERT INTO t9 VALUES('a', 'b'); + INSERT INTO t10 VALUES('a', 'b'); + COMMIT; + } +} {1 {child process exited abnormally}} + +do_test crash8-2.3 { + execsql {PRAGMA integrity_check} +} {ok} + +proc read_file {zFile} { + set fd [open $zFile] + fconfigure $fd -translation binary + set zData [read $fd] + close $fd + return $zData +} +proc write_file {zFile zData} { + set fd [open $zFile w] + fconfigure $fd -translation binary + puts -nonewline $fd $zData + close $fd +} + +# The following tests check that SQLite will not roll back a hot-journal +# file if the sector-size field in the first journal file header is +# suspect. Definition of suspect: +# +# a) Not a power of 2, or (crash8-3.5) +# b) Greater than 0x01000000 (16MB), or (crash8-3.6) +# c) Less than 512. (crash8-3.7) +# +# Also test that SQLite will not rollback a hot-journal file with a +# suspect page-size. In this case "suspect" means: +# +# a) Not a power of 2, or +# b) Less than 512, or +# c) Greater than SQLITE_MAX_PAGE_SIZE +# +do_test crash8-3.1 { + list [file exists test.db-joural] [file exists test.db] +} {0 1} +do_test crash8-3.2 { + execsql { + PRAGMA synchronous = off; + BEGIN; + DELETE FROM t1; + SELECT count(*) FROM t1; + } +} {0} +do_test crash8-3.3 { + set zJournal [read_file test.db-journal] + execsql { + COMMIT; + SELECT count(*) FROM t1; + } +} {0} +do_test crash8-3.4 { + binary scan [string range $zJournal 20 23] I nSector + set nSector +} {512} + +do_test crash8-3.5 { + set zJournal2 [string replace $zJournal 20 23 [binary format I 513]] + write_file test.db-journal $zJournal2 + + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} +do_test crash8-3.6 { + set zJournal2 [string replace $zJournal 20 23 [binary format I 0x2000000]] + write_file test.db-journal $zJournal2 + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} +do_test crash8-3.7 { + set zJournal2 [string replace $zJournal 20 23 [binary format I 256]] + write_file test.db-journal $zJournal2 + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} + +do_test crash8-3.8 { + set zJournal2 [string replace $zJournal 24 27 [binary format I 513]] + write_file test.db-journal $zJournal2 + + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} +do_test crash8-3.9 { + set big [expr $SQLITE_MAX_PAGE_SIZE * 2] + set zJournal2 [string replace $zJournal 24 27 [binary format I $big]] + write_file test.db-journal $zJournal2 + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} +do_test crash8-3.10 { + set zJournal2 [string replace $zJournal 24 27 [binary format I 256]] + write_file test.db-journal $zJournal2 + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {0 ok} + +do_test crash8-3.11 { + set fd [open test.db-journal w] + fconfigure $fd -translation binary + puts -nonewline $fd $zJournal + close $fd + execsql { + SELECT count(*) FROM t1; + PRAGMA integrity_check + } +} {6 ok} + + +# If a connection running in persistent-journal mode is part of a +# multi-file transaction, it must ensure that the master-journal name +# appended to the journal file contents during the commit is located +# at the end of the physical journal file. If there was already a +# large journal file allocated at the start of the transaction, this +# may mean truncating the file so that the master journal name really +# is at the physical end of the file. +# +# This block of tests test that SQLite correctly truncates such +# journal files, and that the results behave correctly if a hot-journal +# rollback occurs. +# +ifcapable pragma { + reset_db + file delete -force test2.db + + do_test crash8-4.1 { + execsql { + PRAGMA journal_mode = persist; + CREATE TABLE ab(a, b); + INSERT INTO ab VALUES(0, 'abc'); + INSERT INTO ab VALUES(1, NULL); + INSERT INTO ab VALUES(2, NULL); + INSERT INTO ab VALUES(3, NULL); + INSERT INTO ab VALUES(4, NULL); + INSERT INTO ab VALUES(5, NULL); + INSERT INTO ab VALUES(6, NULL); + UPDATE ab SET b = randstr(1000,1000); + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.ab(a, b); + INSERT INTO aux.ab SELECT * FROM main.ab; + + UPDATE aux.ab SET b = randstr(1000,1000) WHERE a>=1; + UPDATE ab SET b = randstr(1000,1000) WHERE a>=1; + } + list [file exists test.db-journal] [file exists test2.db-journal] + } {1 1} + + do_test crash8-4.2 { + execsql { + BEGIN; + UPDATE aux.ab SET b = 'def' WHERE a = 0; + UPDATE main.ab SET b = 'def' WHERE a = 0; + COMMIT; + } + } {} + + do_test crash8-4.3 { + execsql { + UPDATE aux.ab SET b = randstr(1000,1000) WHERE a>=1; + UPDATE ab SET b = randstr(1000,1000) WHERE a>=1; + } + } {} + + set contents_main [db eval {SELECT b FROM main.ab WHERE a = 1}] + set contents_aux [db eval {SELECT b FROM aux.ab WHERE a = 1}] + + do_test crash8-4.4 { + crashsql -file test2.db -delay 1 { + ATTACH 'test2.db' AS aux; + BEGIN; + UPDATE aux.ab SET b = 'ghi' WHERE a = 0; + UPDATE main.ab SET b = 'ghi' WHERE a = 0; + COMMIT; + } + } {1 {child process exited abnormally}} + + do_test crash8-4.5 { + list [file exists test.db-journal] [file exists test2.db-journal] + } {1 1} + + do_test crash8-4.6 { + execsql { + SELECT b FROM main.ab WHERE a = 0; + SELECT b FROM aux.ab WHERE a = 0; + } + } {def def} + + do_test crash8-4.7 { + crashsql -file test2.db -delay 1 { + ATTACH 'test2.db' AS aux; + BEGIN; + UPDATE aux.ab SET b = 'jkl' WHERE a = 0; + UPDATE main.ab SET b = 'jkl' WHERE a = 0; + COMMIT; + } + } {1 {child process exited abnormally}} + + do_test crash8-4.8 { + set fd [open test.db-journal] + fconfigure $fd -translation binary + seek $fd -16 end + binary scan [read $fd 4] I len + + seek $fd [expr {-1 * ($len + 16)}] end + set zMasterJournal [read $fd $len] + close $fd + + file exists $zMasterJournal + } {1} + + do_test crash8-4.9 { + execsql { SELECT b FROM aux.ab WHERE a = 0 } + } {def} + + do_test crash8-4.10 { + file delete $zMasterJournal + execsql { SELECT b FROM main.ab WHERE a = 0 } + } {jkl} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crash.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crash.test --- sqlite3-3.4.2/test/crash.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/crash.test 2009-06-05 18:03:04.000000000 +0100 @@ -17,7 +17,7 @@ # These routines allow us to simulate the kind of file damage that # occurs after a power failure. # -# $Id: crash.test,v 1.24 2007/05/23 06:25:13 danielk1977 Exp $ +# $Id: crash.test,v 1.27 2008/01/08 15:18:52 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -28,7 +28,7 @@ } set repeats 100 -# set repeats 10 +#set repeats 10 # The following procedure computes a "signature" for table "abc". If # abc changes in any way, the signature should change. @@ -67,14 +67,17 @@ set ::sig [signature] expr 0 } {0} -do_test crash-1.2 { - crashsql -delay 1 -file test.db-journal { - DELETE FROM abc WHERE a = 1; - } -} {1 {child process exited abnormally}} -do_test crash-1.3 { - signature -} $::sig +for {set i 0} {$i<10} {incr i} { + set seed [expr {int(abs(rand()*10000))}] + do_test crash-1.2.$i { + crashsql -delay 1 -file test.db-journal -seed $seed { + DELETE FROM abc WHERE a = 1; + } + } {1 {child process exited abnormally}} + do_test crash-1.3.$i { + signature + } $::sig +} do_test crash-1.4 { crashsql -delay 1 -file test.db { DELETE FROM abc WHERE a = 1; @@ -185,7 +188,8 @@ for {set i 1} {$i < $repeats} {incr i} { set sig [signature] do_test crash-3.$i.1 { - crashsql -delay [expr $i%5 + 1] -file test.db-journal " + set seed [expr {int(abs(rand()*10000))}] + crashsql -delay [expr $i%5 + 1] -file test.db-journal -seed $seed " BEGIN; SELECT random() FROM abc LIMIT $i; INSERT INTO abc VALUES(randstr(10,10), 0, 0); @@ -209,98 +213,102 @@ # crash-4.3.*: Test recovery when crash occurs during sync() of the master # journal file. # -do_test crash-4.0 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - ATTACH 'test2.db' AS aux; - PRAGMA aux.default_cache_size = 10; - CREATE TABLE aux.abc2 AS SELECT 2*a as a, 2*b as b, 2*c as c FROM abc; +ifcapable attach { + do_test crash-4.0 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.default_cache_size = 10; + CREATE TABLE aux.abc2 AS SELECT 2*a as a, 2*b as b, 2*c as c FROM abc; + } + expr ([file size test2.db] / 1024) > 450 + } {1} + + set fin 0 + for {set i 1} {$i<$repeats} {incr i} { + set seed [expr {int(abs(rand()*10000))}] + set sig [signature] + set sig2 [signature2] + do_test crash-4.1.$i.1 { + set c [crashsql -delay $i -file test.db-journal -seed $::seed " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT randstr($i,$i) FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + "] + if { $c == {0 {}} } { + set ::fin 1 + set c {1 {child process exited abnormally}} + } + set c + } {1 {child process exited abnormally}} + if {$::fin} break + do_test crash-4.1.$i.2 { + signature + } $sig + do_test crash-4.1.$i.3 { + signature2 + } $sig2 + } + set i 0 + set fin 0 + while {[incr i]} { + set seed [expr {int(abs(rand()*10000))}] + set sig [signature] + set sig2 [signature2] + set ::fin 0 + do_test crash-4.2.$i.1 { + set c [crashsql -delay $i -file test2.db-journal -seed $::seed " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT randstr($i,$i) FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + "] + if { $c == {0 {}} } { + set ::fin 1 + set c {1 {child process exited abnormally}} + } + set c + } {1 {child process exited abnormally}} + if { $::fin } break + do_test crash-4.2.$i.2 { + signature + } $sig + do_test crash-4.2.$i.3 { + signature2 + } $sig2 + } + for {set i 1} {$i < 5} {incr i} { + set sig [signature] + set sig2 [signature2] + do_test crash-4.3.$i.1 { + crashsql -delay 1 -file test.db-mj* " + ATTACH 'test2.db' AS aux; + BEGIN; + SELECT random() FROM abc LIMIT $i; + INSERT INTO abc VALUES(randstr(10,10), 0, 0); + DELETE FROM abc WHERE random()%10!=0; + INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); + DELETE FROM abc2 WHERE random()%10!=0; + COMMIT; + " + } {1 {child process exited abnormally}} + do_test crash-4.3.$i.2 { + signature + } $sig + do_test crash-4.3.$i.3 { + signature2 + } $sig2 } - expr ([file size test2.db] / 1024) > 450 -} {1} - -set fin 0 -for {set i 1} {$i<$repeats} {incr i} { - set sig [signature] - set sig2 [signature2] - do_test crash-4.1.$i.1 { - set c [crashsql -delay $i -file test.db-journal " - ATTACH 'test2.db' AS aux; - BEGIN; - SELECT randstr($i,$i) FROM abc LIMIT $i; - INSERT INTO abc VALUES(randstr(10,10), 0, 0); - DELETE FROM abc WHERE random()%10!=0; - INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); - DELETE FROM abc2 WHERE random()%10!=0; - COMMIT; - "] - if { $c == {0 {}} } { - set ::fin 1 - set c {1 {child process exited abnormally}} - } - set c - } {1 {child process exited abnormally}} - if {$::fin} break - do_test crash-4.1.$i.2 { - signature - } $sig - do_test crash-4.1.$i.3 { - signature2 - } $sig2 -} -set i 0 -set fin 0 -while {[incr i]} { - set sig [signature] - set sig2 [signature2] - set ::fin 0 - do_test crash-4.2.$i.1 { - set c [crashsql -delay $i -file test2.db-journal " - ATTACH 'test2.db' AS aux; - BEGIN; - SELECT randstr($i,$i) FROM abc LIMIT $i; - INSERT INTO abc VALUES(randstr(10,10), 0, 0); - DELETE FROM abc WHERE random()%10!=0; - INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); - DELETE FROM abc2 WHERE random()%10!=0; - COMMIT; - "] - if { $c == {0 {}} } { - set ::fin 1 - set c {1 {child process exited abnormally}} - } - set c - } {1 {child process exited abnormally}} - if { $::fin } break - do_test crash-4.2.$i.2 { - signature - } $sig - do_test crash-4.2.$i.3 { - signature2 - } $sig2 -} -for {set i 1} {$i < 5} {incr i} { - set sig [signature] - set sig2 [signature2] - do_test crash-4.3.$i.1 { - crashsql -delay 1 -file test.db-mj* " - ATTACH 'test2.db' AS aux; - BEGIN; - SELECT random() FROM abc LIMIT $i; - INSERT INTO abc VALUES(randstr(10,10), 0, 0); - DELETE FROM abc WHERE random()%10!=0; - INSERT INTO abc2 VALUES(randstr(10,10), 0, 0); - DELETE FROM abc2 WHERE random()%10!=0; - COMMIT; - " - } {1 {child process exited abnormally}} - do_test crash-4.3.$i.2 { - signature - } $sig - do_test crash-4.3.$i.3 { - signature2 - } $sig2 } #-------------------------------------------------------------------------- diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/crashtest1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/test/crashtest1.c --- sqlite3-3.4.2/test/crashtest1.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/crashtest1.c 2004-02-08 06:06:37.000000000 +0000 @@ -0,0 +1,96 @@ +/* +** This program tests the ability of SQLite database to recover from a crash. +** This program runs under Unix only, but the results are applicable to all +** systems. +** +** The main process first constructs a test database, then starts creating +** subprocesses that write to that database. Each subprocess is killed off, +** without a chance to clean up its database connection, after a random +** delay. This killing of the subprocesses simulates a crash or power +** failure. The next subprocess to open the database should rollback +** whatever operation was in process at the time of the simulated crash. +** +** If any problems are encountered, an error is reported and the test stops. +** If no problems are seen after a large number of tests, we assume that +** the rollback mechanism is working. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "sqlite.h" + +static void do_some_sql(int parent){ + char *zErr; + int rc = SQLITE_OK; + sqlite *db; + int cnt = 0; + static char zBig[] = + "-abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + "-abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + + if( access("./test.db-journal",0)==0 ){ + /*printf("pid %d: journal exists. rollback will be required\n",getpid());*/ unlink("test.db-saved"); + system("cp test.db test.db-saved"); + unlink("test.db-journal-saved"); + system("cp test.db-journal test.db-journal-saved"); + } + db = sqlite_open("./test.db", 0, &zErr); + if( db==0 ){ + printf("ERROR: %s\n", zErr); + if( strcmp(zErr,"database disk image is malformed")==0 ){ + kill(parent, SIGKILL); + } + exit(1); + } + srand(getpid()); + while( rc==SQLITE_OK ){ + cnt++; + rc = sqlite_exec_printf(db, + "INSERT INTO t1 VALUES(%d,'%d%s')", 0, 0, &zErr, + rand(), rand(), zBig); + } + if( rc!=SQLITE_OK ){ + printf("ERROR #%d: %s\n", rc, zErr); + if( rc==SQLITE_CORRUPT ){ + kill(parent, SIGKILL); + } + } + printf("pid %d: cnt=%d\n", getpid(), cnt); +} + + +int main(int argc, char **argv){ + int i; + sqlite *db; + char *zErr; + int status; + int parent = getpid(); + + unlink("test.db"); + unlink("test.db-journal"); + db = sqlite_open("test.db", 0, &zErr); + if( db==0 ){ + printf("Cannot initialize: %s\n", zErr); + return 1; + } + sqlite_exec(db, "CREATE TABLE t1(a,b)", 0, 0, 0); + sqlite_close(db); + for(i=0; i<10000; i++){ + int pid = fork(); + if( pid==0 ){ + sched_yield(); + do_some_sql(parent); + return 0; + } + printf("test %d, pid=%d\n", i, pid); + usleep(rand()%10000 + 1000); + kill(pid, SIGKILL); + waitpid(pid, &status, 0); + } + return 0; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/createtab.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/createtab.test --- sqlite3-3.4.2/test/createtab.test 2007-05-04 15:36:22.000000000 +0100 +++ sqlite3-3.6.16/test/createtab.test 2009-06-05 18:03:04.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing that it is OK to create new tables # and indices while creating existing tables and indices. # -# $Id: createtab.test,v 1.2 2007/05/04 14:36:22 drh Exp $ +# $Id: createtab.test,v 1.3 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -45,9 +45,15 @@ SELECT count(*) FROM t1; } } {4} + + set isUtf16 0 + ifcapable utf16 { + set isUtf16 [expr {[execsql {PRAGMA encoding}] != "UTF-8"}] + } + do_test createtab-$av.2 { file size test.db - } [expr {1024*(4+($av!=0)+([execsql {PRAGMA encoding}]!="UTF-8")*2)}] + } [expr {1024*(4+($av!=0)+(${isUtf16}*2))}] # Start reading the table # diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/cse.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/cse.test --- sqlite3-3.4.2/test/cse.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/cse.test 2009-06-05 18:03:04.000000000 +0100 @@ -0,0 +1,160 @@ +# 2008 April 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test cases designed to exercise and verify the logic for +# factoring constant expressions out of loops and for +# common subexpression eliminations. +# +# $Id: cse.test,v 1.6 2008/08/04 03:51:24 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test cse-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, d, e, f); + INSERT INTO t1 VALUES(1,11,12,13,14,15); + INSERT INTO t1 VALUES(2,21,22,23,24,25); + } + execsql { + SELECT b, -b, ~b, NOT b, NOT NOT b, b-b, b+b, b*b, b/b, b FROM t1 + } +} {11 -11 -12 0 1 0 22 121 1 11 21 -21 -22 0 1 0 42 441 1 21} +do_test cse-1.2 { + execsql { + SELECT b, b%b, b==b, b!=b, b49} {set r [expr {99-$r}]} + lappend colset a$j a$r + lappend answer $j $r + } + set sql "SELECT [join $colset ,] FROM t2" + do_test cse-2.2.$i { + # explain $::sql + execsql $::sql + } $answer +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/date.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/date.test --- sqlite3-3.4.2/test/date.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/date.test 2009-06-25 12:23:18.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing date and time functions. # -# $Id: date.test,v 1.22 2007/05/04 13:15:57 drh Exp $ +# $Id: date.test,v 1.34 2009/04/16 12:58:03 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -57,6 +57,7 @@ datetest 1.21 {julianday('2000-01-01 12:00:00.001')} 2451545.00000001 datetest 1.22 {julianday('2000-01-01 12:00:00.')} NULL datetest 1.23 julianday(12345.6) 12345.6 +datetest 1.23b julianday('12345.6') 12345.6 datetest 1.24 {julianday('2001-01-01 12:00:00 bogus')} NULL datetest 1.25 {julianday('2001-01-01 bogus')} NULL datetest 1.26 {julianday('2001-01-01 12:60:00')} NULL @@ -69,6 +70,12 @@ datetest 2.1c datetime(0,'unixepochx') NULL datetest 2.1d datetime('2003-10-22','unixepoch') NULL datetest 2.2 datetime(946684800,'unixepoch') {2000-01-01 00:00:00} +datetest 2.2b datetime('946684800','unixepoch') {2000-01-01 00:00:00} +for {set i 0} {$i<1000} {incr i} { + set sql [format {strftime('%%H:%%M:%%f',1237962480.%03d,'unixepoch')} $i] + set res [format {06:28:00.%03d} $i] + datetest 2.2c-$i $sql $res +} datetest 2.3 {date('2003-10-22','weekday 0')} 2003-10-26 datetest 2.4 {date('2003-10-22','weekday 1')} 2003-10-27 datetest 2.4a {date('2003-10-22','weekday 1')} 2003-10-27 @@ -119,24 +126,45 @@ datetest 2.37 {datetime('2003-10-22 12:24','+1 abcdefg')} NULL datetest 2.38 {datetime('2003-10-22 12:24','+1 abcdefgh')} NULL datetest 2.39 {datetime('2003-10-22 12:24','+1 abcdefghi')} NULL -datetest 2.40 {datetime()} NULL - +set sqlite_current_time 1199243045 +datetest 2.40 {datetime()} {2008-01-02 03:04:05} +set sqlite_current_time 0 +datetest 2.41 {datetime('2003-10-22 12:24','23 seconds')} {2003-10-22 12:24:23} +datetest 2.42 {datetime('2003-10-22 12:24','345 second')} {2003-10-22 12:29:45} +datetest 2.43 {datetime('2003-10-22 12:24','4 second')} {2003-10-22 12:24:04} +datetest 2.44 {datetime('2003-10-22 12:24','56 second')} {2003-10-22 12:24:56} +datetest 2.45 {datetime('2003-10-22 12:24','60 second')} {2003-10-22 12:25:00} +datetest 2.46 {datetime('2003-10-22 12:24','70 second')} {2003-10-22 12:25:10} +datetest 2.47 {datetime('2003-10-22 12:24','8.6 seconds')} {2003-10-22 12:24:08} +datetest 2.48 {datetime('2003-10-22 12:24','9.4 second')} {2003-10-22 12:24:09} +datetest 2.49 {datetime('2003-10-22 12:24','0000 second')} {2003-10-22 12:24:00} +datetest 2.50 {datetime('2003-10-22 12:24','0001 second')} {2003-10-22 12:24:01} +datetest 2.51 {datetime('2003-10-22 12:24','nonsense')} NULL datetest 3.1 {strftime('%d','2003-10-31 12:34:56.432')} 31 -datetest 3.2 {strftime('%f','2003-10-31 12:34:56.432')} 56.432 +datetest 3.2.1 {strftime('pre%fpost','2003-10-31 12:34:56.432')} pre56.432post +datetest 3.2.2 {strftime('%f','2003-10-31 12:34:59.9999999')} 59.999 datetest 3.3 {strftime('%H','2003-10-31 12:34:56.432')} 12 datetest 3.4 {strftime('%j','2003-10-31 12:34:56.432')} 304 datetest 3.5 {strftime('%J','2003-10-31 12:34:56.432')} 2452944.02426426 datetest 3.6 {strftime('%m','2003-10-31 12:34:56.432')} 10 datetest 3.7 {strftime('%M','2003-10-31 12:34:56.432')} 34 -datetest 3.8 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696 +datetest 3.8.1 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696 +datetest 3.8.2 {strftime('%s','2038-01-19 03:14:07')} 2147483647 +datetest 3.8.3 {strftime('%s','2038-01-19 03:14:08')} 2147483648 +datetest 3.8.4 {strftime('%s','2201-04-09 12:00:00')} 7298164800 +datetest 3.8.5 {strftime('%s','9999-12-31 23:59:59')} 253402300799 +datetest 3.8.6 {strftime('%s','1969-12-31 23:59:59')} -1 +datetest 3.8.7 {strftime('%s','1901-12-13 20:45:52')} -2147483648 +datetest 3.8.8 {strftime('%s','1901-12-13 20:45:51')} -2147483649 +datetest 3.8.9 {strftime('%s','1776-07-04 00:00:00')} -6106060800 datetest 3.9 {strftime('%S','2003-10-31 12:34:56.432')} 56 datetest 3.10 {strftime('%w','2003-10-31 12:34:56.432')} 5 datetest 3.11.1 {strftime('%W','2003-10-31 12:34:56.432')} 43 datetest 3.11.2 {strftime('%W','2004-01-01')} 00 datetest 3.11.3 {strftime('%W','2004-01-02')} 00 datetest 3.11.4 {strftime('%W','2004-01-03')} 00 -datetest 3.11.5 {strftime('%W','2004-01-04')} 00 +datetest 3.11.5 {strftime('abc%Wxyz','2004-01-04')} abc00xyz datetest 3.11.6 {strftime('%W','2004-01-05')} 01 datetest 3.11.7 {strftime('%W','2004-01-06')} 01 datetest 3.11.8 {strftime('%W','2004-01-07')} 01 @@ -154,9 +182,10 @@ datetest 3.11.20 {strftime('%W %j',2454109.04140975)} {02 008} datetest 3.11.21 {strftime('%W %j',2454109.04140976)} {02 008} datetest 3.11.22 {strftime('%W %j',2454109.04140977)} {02 008} -datetest 3.11.22 {strftime('%W %j',2454109.04140978)} {02 008} -datetest 3.11.22 {strftime('%W %j',2454109.04140979)} {02 008} -datetest 3.11.22 {strftime('%W %j',2454109.04140980)} {02 008} +datetest 3.11.23 {strftime('%W %j',2454109.04140978)} {02 008} +datetest 3.11.24 {strftime('%W %j',2454109.04140979)} {02 008} +datetest 3.11.25 {strftime('%W %j',2454109.04140980)} {02 008} +datetest 3.11.99 {strftime('%W %j','2454109.04140970')} {02 008} datetest 3.12 {strftime('%Y','2003-10-31 12:34:56.432')} 2003 datetest 3.13 {strftime('%%','2003-10-31 12:34:56.432')} % datetest 3.14 {strftime('%_','2003-10-31 12:34:56.432')} NULL @@ -173,6 +202,12 @@ datetest 3.17 "strftime('[repeat 200 abc%m123]','2003-10-31')" \ [repeat 200 abc10123] +foreach c {a b c e g h i k l n o p q r t v x y z + A B C D E F G I K L N O P Q R T U V Z + 0 1 2 3 4 5 6 6 7 9 _} { + datetest 3.18.$c "strftime('%$c','2003-10-31')" NULL +} + # Ticket #2276. Make sure leading zeros are inserted where appropriate. # datetest 3.20 \ @@ -188,31 +223,113 @@ datetest 5.3 {datetime('1994-04-16 05:00:00 +08:30')} {1994-04-15 20:30:00} datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55')} {1994-04-17 01:55:00} datetest 5.5 {datetime('1994-04-16 14:00:00 -11:60')} NULL -datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55 ')} {1994-04-17 01:55:00} -datetest 5.4 {datetime('1994-04-16 14:00:00 -11:55 x')} NULL +datetest 5.6 {datetime('1994-04-16 14:00:00 -11:55 ')} {1994-04-17 01:55:00} +datetest 5.7 {datetime('1994-04-16 14:00:00 -11:55 x')} NULL +datetest 5.8 {datetime('1994-04-16T14:00:00Z')} {1994-04-16 14:00:00} +datetest 5.9 {datetime('1994-04-16 14:00:00z')} {1994-04-16 14:00:00} +datetest 5.10 {datetime('1994-04-16 14:00:00 Z')} {1994-04-16 14:00:00} +datetest 5.11 {datetime('1994-04-16 14:00:00z ')} {1994-04-16 14:00:00} +datetest 5.12 {datetime('1994-04-16 14:00:00 z ')} {1994-04-16 14:00:00} +datetest 5.13 {datetime('1994-04-16 14:00:00Zulu')} NULL +datetest 5.14 {datetime('1994-04-16 14:00:00Z +05:00')} NULL +datetest 5.15 {datetime('1994-04-16 14:00:00 +05:00 Z')} NULL # localtime->utc and utc->localtime conversions. These tests only work # if the localtime is in the US Eastern Time (the time in Charlotte, NC # and in New York.) # -set tzoffset [db one { - SELECT CAST(24*(julianday('2006-09-01') - - julianday('2006-09-01','localtime'))+0.5 +# On non-Vista Windows platform, '2006-03-31' is treated incorrectly as being +# in DST giving a 4 hour offset instead of 5. In 2007, DST was extended to +# start three weeks earlier (second Sunday in March) and end one week +# later (first Sunday in November). Older Windows systems apply this +# new rule incorrectly to dates prior to 2007. +# +# It might be argued that this is masking a problem on non-Vista Windows +# platform. A ticket has already been opened for this issue +# (http://www.sqlite.org/cvstrac/tktview?tn=2322). This is just to prevent +# more confusion/reports of the issue. +# + +# $tzoffset_old should be 5 if DST is working correctly. +set tzoffset_old [db one { + SELECT CAST(24*(julianday('2006-03-31') - + julianday('2006-03-31','localtime'))+0.5 AS INT) }] -if {$tzoffset==4} { + +# $tzoffset_new should be 4 if DST is working correctly. +set tzoffset_new [db one { + SELECT CAST(24*(julianday('2007-03-31') - + julianday('2007-03-31','localtime'))+0.5 + AS INT) +}] + +# Warn about possibly broken Windows DST implementations. +if {$::tcl_platform(platform)=="windows" && $tzoffset_new==4 && $tzoffset_old==4} { + puts "******************************************************************" + puts "N.B.: The DST support provided by your current O/S seems to be" + puts "suspect in that it is reporting incorrect DST values for dates" + puts "prior to 2007. This is the known case for most (all?) non-Vista" + puts "Windows versions. Please see ticket #2322 for more information." + puts "******************************************************************" +} + +if {$tzoffset_new==4} { datetest 6.1 {datetime('2000-10-29 05:59:00','localtime')}\ {2000-10-29 01:59:00} - datetest 6.2 {datetime('2000-10-29 06:00:00','localtime')}\ - {2000-10-29 01:00:00} - datetest 6.3 {datetime('2000-04-02 06:59:00','localtime')}\ - {2000-04-02 01:59:00} + datetest 6.1.1 {datetime('2006-10-29 05:59:00','localtime')}\ + {2006-10-29 01:59:00} + datetest 6.1.2 {datetime('2007-11-04 05:59:00','localtime')}\ + {2007-11-04 01:59:00} + + # If the new and old DST rules seem to be working correctly... + if {$tzoffset_new==4 && $tzoffset_old==5} { + datetest 6.2 {datetime('2000-10-29 06:00:00','localtime')}\ + {2000-10-29 01:00:00} + datetest 6.2.1 {datetime('2006-10-29 06:00:00','localtime')}\ + {2006-10-29 01:00:00} + } + datetest 6.2.2 {datetime('2007-11-04 06:00:00','localtime')}\ + {2007-11-04 01:00:00} + + # If the new and old DST rules seem to be working correctly... + if {$tzoffset_new==4 && $tzoffset_old==5} { + datetest 6.3 {datetime('2000-04-02 06:59:00','localtime')}\ + {2000-04-02 01:59:00} + datetest 6.3.1 {datetime('2006-04-02 06:59:00','localtime')}\ + {2006-04-02 01:59:00} + } + datetest 6.3.2 {datetime('2007-03-11 07:00:00','localtime')}\ + {2007-03-11 03:00:00} + datetest 6.4 {datetime('2000-04-02 07:00:00','localtime')}\ {2000-04-02 03:00:00} + datetest 6.4.1 {datetime('2006-04-02 07:00:00','localtime')}\ + {2006-04-02 03:00:00} + datetest 6.4.2 {datetime('2007-03-11 07:00:00','localtime')}\ + {2007-03-11 03:00:00} + datetest 6.5 {datetime('2000-10-29 01:59:00','utc')} {2000-10-29 05:59:00} - datetest 6.6 {datetime('2000-10-29 02:00:00','utc')} {2000-10-29 07:00:00} - datetest 6.7 {datetime('2000-04-02 01:59:00','utc')} {2000-04-02 06:59:00} + datetest 6.5.1 {datetime('2006-10-29 01:59:00','utc')} {2006-10-29 05:59:00} + datetest 6.5.2 {datetime('2007-11-04 01:59:00','utc')} {2007-11-04 05:59:00} + + # If the new and old DST rules seem to be working correctly... + if {$tzoffset_new==4 && $tzoffset_old==5} { + datetest 6.6 {datetime('2000-10-29 02:00:00','utc')} {2000-10-29 07:00:00} + datetest 6.6.1 {datetime('2006-10-29 02:00:00','utc')} {2006-10-29 07:00:00} + } + datetest 6.6.2 {datetime('2007-11-04 02:00:00','utc')} {2007-11-04 07:00:00} + + # If the new and old DST rules seem to be working correctly... + if {$tzoffset_new==4 && $tzoffset_old==5} { + datetest 6.7 {datetime('2000-04-02 01:59:00','utc')} {2000-04-02 06:59:00} + datetest 6.7.1 {datetime('2006-04-02 01:59:00','utc')} {2006-04-02 06:59:00} + } + datetest 6.7.2 {datetime('2007-03-11 01:59:00','utc')} {2007-03-11 06:59:00} + datetest 6.8 {datetime('2000-04-02 02:00:00','utc')} {2000-04-02 06:00:00} + datetest 6.8.1 {datetime('2006-04-02 02:00:00','utc')} {2006-04-02 06:00:00} + datetest 6.8.2 {datetime('2007-03-11 02:00:00','utc')} {2007-03-11 06:00:00} datetest 6.10 {datetime('2000-01-01 12:00:00','localtime')} \ {2000-01-01 07:00:00} @@ -234,6 +351,13 @@ set sqlite_current_time 0 } +# These two are a bit of a scam. They are added to ensure that 100% of +# the date.c file is covered by testing, even when the time-zone +# is not -0400 (the condition for running of the block of tests above). +# +datetest 6.19 {datetime('2039-07-01 12:00:00','localtime',null)} NULL +datetest 6.20 {datetime('2039-07-01 12:00:00','utc',null)} NULL + # Date-time functions that contain NULL arguments return a NULL # result. # @@ -343,5 +467,58 @@ datetest 13.7 {strftime('%Y-%m-%d %H:%M:%f', '2007-01-01 23:59:59.6')} \ {2007-01-01 23:59:59.600} +# Ticket #3618 +datetest 13.11 {julianday(2454832.5,'-1 day')} {2454831.5} +datetest 13.12 {julianday(2454832.5,'+1 day')} {2454833.5} +datetest 13.13 {julianday(2454832.5,'-1.5 day')} {2454831.0} +datetest 13.14 {julianday(2454832.5,'+1.5 day')} {2454834.0} +datetest 13.15 {julianday(2454832.5,'-3 hours')} {2454832.375} +datetest 13.16 {julianday(2454832.5,'+3 hours')} {2454832.625} +datetest 13.17 {julianday(2454832.5,'-45 minutes')} {2454832.46875} +datetest 13.18 {julianday(2454832.5,'+45 minutes')} {2454832.53125} +datetest 13.19 {julianday(2454832.5,'-675 seconds')} {2454832.4921875} +datetest 13.20 {julianday(2454832.5,'+675 seconds')} {2454832.5078125} +datetest 13.21 {julianday(2454832.5,'-1.5 months')} {2454786.5} +datetest 13.22 {julianday(2454832.5,'+1.5 months')} {2454878.5} +datetest 13.23 {julianday(2454832.5,'-1.5 years')} {2454284.0} +datetest 13.24 {julianday(2454832.5,'+1.5 years')} {2455380.0} + +datetest 13.30 {date('2000-01-01','+1.5 years')} {2001-07-02} +datetest 13.31 {date('2001-01-01','+1.5 years')} {2002-07-02} +datetest 13.32 {date('2002-01-01','+1.5 years')} {2003-07-02} +datetest 13.33 {date('2002-01-01','-1.5 years')} {2000-07-02} +datetest 13.34 {date('2001-01-01','-1.5 years')} {1999-07-02} +# Test for issues reported by BareFeet (list.sql at tandb.com.au) +# on mailing list on 2008-06-12. +# +# Put a floating point number in the database so that we can manipulate +# raw bits using the hexio interface. +# +do_test date-14.1 { + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size = 1024; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1.1); + } + db close + hexio_write test.db 2040 4142ba32bffffff9 + sqlite3 db test.db + db eval {SELECT * FROM t1} +} {2454629.5} + +# Changing the least significant byte of the floating point value between +# 00 and FF should always generate a time of either 23:59:59 or 00:00:00, +# never 24:00:00 +# +for {set i 0} {$i<=255} {incr i} { + db close + hexio_write test.db 2047 [format %02x $i] + sqlite3 db test.db + do_test date-14.2.$i { + set date [db one {SELECT datetime(x) FROM t1}] + expr {$date eq "2008-06-12 00:00:00" || $date eq "2008-06-11 23:59:59"} + } {1} +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/default.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/default.test --- sqlite3-3.4.2/test/default.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/default.test 2009-06-12 03:37:53.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing corner cases of the DEFAULT syntax # on table definitions. # -# $Id: default.test,v 1.2 2005/08/20 03:03:04 drh Exp $ +# $Id: default.test,v 1.3 2009/02/19 14:39:25 danielk1977 Exp $ # set testdir [file dirname $argv0] @@ -49,4 +49,19 @@ } } {1 {default value of column [y] is not constant}} +ifcapable pragma { + do_test default-2.1 { + execsql { + CREATE TABLE t4(c DEFAULT 'abc'); + PRAGMA table_info(t4); + } + } {0 c {} 0 'abc' 0} + do_test default-2.2 { + execsql { + INSERT INTO t4 DEFAULT VALUES; + PRAGMA table_info(t4); + } + } {0 c {} 0 'abc' 0} +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/delete2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/delete2.test --- sqlite3-3.4.2/test/delete2.test 2007-03-29 19:39:34.000000000 +0100 +++ sqlite3-3.6.16/test/delete2.test 2009-06-05 18:03:25.000000000 +0100 @@ -29,7 +29,7 @@ # The solution to the problem was to detect that the table is locked # before the index entry is deleted. # -# $Id: delete2.test,v 1.7 2006/08/16 16:42:48 drh Exp $ +# $Id: delete2.test,v 1.8 2008/07/08 15:59:52 danielk1977 Exp $ # set testdir [file dirname $argv0] @@ -96,4 +96,24 @@ } } {goodbye id.2 again id.3} +do_test delete2-2.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(c, d); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t2 VALUES(3, 4); + INSERT INTO t2 VALUES(5, 6); + } +} {} +do_test delete2-2.2 { + set res [list] + db eval { + SELECT CASE WHEN c = 5 THEN b ELSE NULL END AS b, c, d FROM t1, t2 + } { + db eval {DELETE FROM t1} + lappend res $b $c $d + } + set res +} {{} 3 4 {} 5 6} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/delete.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/delete.test --- sqlite3-3.4.2/test/delete.test 2007-03-29 19:39:34.000000000 +0100 +++ sqlite3-3.6.16/test/delete.test 2009-06-25 12:35:51.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the DELETE FROM statement. # -# $Id: delete.test,v 1.21 2006/01/03 00:33:50 drh Exp $ +# $Id: delete.test,v 1.26 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -275,6 +275,7 @@ } } {123} db close +catch {file delete -force test.db-journal} catch {file attributes test.db -permissions 0444} catch {file attributes test.db -readonly 1} sqlite3 db test.db @@ -310,4 +311,84 @@ } {123} integrity_check delete-8.7 +# Need to do the following for tcl 8.5 on mac. On that configuration, the +# -readonly flag is taken so seriously that a subsequent [file delete -force] +# (required before the next test file can be executed) will fail. +# +catch {file attributes test.db -readonly 0} +db close +file delete -force test.db test.db-journal + +# The following tests verify that SQLite correctly handles the case +# where an index B-Tree is being scanned, the rowid column being read +# from each index entry and another statement deletes some rows from +# the index B-Tree. At one point this (obscure) scenario was causing +# SQLite to return spurious SQLITE_CORRUPT errors and arguably incorrect +# query results. +# +do_test delete-9.1 { + sqlite3 db test.db + execsql { + CREATE TABLE t5(a, b); + CREATE TABLE t6(c, d); + INSERT INTO t5 VALUES(1, 2); + INSERT INTO t5 VALUES(3, 4); + INSERT INTO t5 VALUES(5, 6); + INSERT INTO t6 VALUES('a', 'b'); + INSERT INTO t6 VALUES('c', 'd'); + CREATE INDEX i5 ON t5(a); + CREATE INDEX i6 ON t6(c); + } +} {} +do_test delete-9.2 { + set res [list] + db eval { SELECT t5.rowid AS r, c, d FROM t5, t6 ORDER BY a } { + if {$r==2} { db eval { DELETE FROM t5 } } + lappend res $r $c $d + } + set res +} {1 a b 1 c d 2 a b {} c d} +do_test delete-9.3 { + execsql { + INSERT INTO t5 VALUES(1, 2); + INSERT INTO t5 VALUES(3, 4); + INSERT INTO t5 VALUES(5, 6); + } + set res [list] + db eval { SELECT t5.rowid AS r, c, d FROM t5, t6 ORDER BY a } { + if {$r==2} { db eval { DELETE FROM t5 WHERE rowid = 2 } } + lappend res $r $c $d + } + set res +} {1 a b 1 c d 2 a b {} c d 3 a b 3 c d} +do_test delete-9.4 { + execsql { + DELETE FROM t5; + INSERT INTO t5 VALUES(1, 2); + INSERT INTO t5 VALUES(3, 4); + INSERT INTO t5 VALUES(5, 6); + } + set res [list] + db eval { SELECT t5.rowid AS r, c, d FROM t5, t6 ORDER BY a } { + if {$r==2} { db eval { DELETE FROM t5 WHERE rowid = 1 } } + lappend res $r $c $d + } + set res +} {1 a b 1 c d 2 a b 2 c d 3 a b 3 c d} +do_test delete-9.5 { + execsql { + DELETE FROM t5; + INSERT INTO t5 VALUES(1, 2); + INSERT INTO t5 VALUES(3, 4); + INSERT INTO t5 VALUES(5, 6); + } + set res [list] + db eval { SELECT t5.rowid AS r, c, d FROM t5, t6 ORDER BY a } { + if {$r==2} { db eval { DELETE FROM t5 WHERE rowid = 3 } } + lappend res $r $c $d + } + set res +} {1 a b 1 c d 2 a b 2 c d} + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/descidx1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/descidx1.test --- sqlite3-3.4.2/test/descidx1.test 2007-03-29 19:39:34.000000000 +0100 +++ sqlite3-3.6.16/test/descidx1.test 2009-06-05 18:03:25.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is descending indices. # -# $Id: descidx1.test,v 1.7 2006/07/11 14:17:52 drh Exp $ +# $Id: descidx1.test,v 1.10 2008/03/19 00:21:31 drh Exp $ # set testdir [file dirname $argv0] @@ -23,25 +23,20 @@ # to $newval. Also, the schema cookie is incremented. # proc set_file_format {newval} { - set bt [btree_open test.db 10 0] - btree_begin_transaction $bt - set meta [btree_get_meta $bt] - lset meta 2 $newval ;# File format - lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie - eval "btree_update_meta $bt $meta" - btree_commit $bt - btree_close $bt + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} } # This procedure returns the value of the file-format in file 'test.db'. # proc get_file_format {{fname test.db}} { - set bt [btree_open $fname 10 0] - set meta [btree_get_meta $bt] - btree_close $bt - lindex $meta 2 + return [hexio_get_int [hexio_read $fname 44 4]] } + # Verify that the file format starts as 4. # do_test descidx1-1.1 { @@ -319,6 +314,13 @@ } get_file_format } {1} +ifcapable vacuum { + # Verify that the file format is preserved across a vacuum. + do_test descidx1-6.3.1 { + execsql {VACUUM} + get_file_format + } {1} +} do_test descidx1-6.4 { db close file delete -force test.db test.db-journal @@ -329,9 +331,29 @@ do_test descidx1-6.5 { execsql { CREATE TABLE t1(a,b,c); + CREATE INDEX i1 ON t1(a ASC, b DESC, c ASC); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(1,1,0); + INSERT INTO t1 VALUES(1,2,1); + INSERT INTO t1 VALUES(1,3,4); } get_file_format } {4} +ifcapable vacuum { + # Verify that the file format is preserved across a vacuum. + do_test descidx1-6.6 { + execsql {VACUUM} + get_file_format + } {4} + do_test descidx1-6.7 { + execsql { + PRAGMA legacy_file_format=ON; + VACUUM; + } + get_file_format + } {4} +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/descidx2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/descidx2.test --- sqlite3-3.4.2/test/descidx2.test 2007-03-29 19:39:34.000000000 +0100 +++ sqlite3-3.6.16/test/descidx2.test 2009-06-05 18:03:25.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is descending indices. # -# $Id: descidx2.test,v 1.4 2006/07/11 14:17:52 drh Exp $ +# $Id: descidx2.test,v 1.5 2008/03/19 00:21:31 drh Exp $ # set testdir [file dirname $argv0] @@ -23,25 +23,20 @@ # to $newval. Also, the schema cookie is incremented. # proc set_file_format {newval} { - set bt [btree_open test.db 10 0] - btree_begin_transaction $bt - set meta [btree_get_meta $bt] - lset meta 2 $newval ;# File format - lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie - eval "btree_update_meta $bt $meta" - btree_commit $bt - btree_close $bt + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} } # This procedure returns the value of the file-format in file 'test.db'. # proc get_file_format {{fname test.db}} { - set bt [btree_open $fname 10 0] - set meta [btree_get_meta $bt] - btree_close $bt - lindex $meta 2 + return [hexio_get_int [hexio_read $fname 44 4]] } + # Verify that the file format starts as 4 # do_test descidx2-1.1 { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/descidx3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/descidx3.test --- sqlite3-3.4.2/test/descidx3.test 2006-07-11 15:17:52.000000000 +0100 +++ sqlite3-3.6.16/test/descidx3.test 2009-06-05 18:03:25.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is descending indices. # -# $Id: descidx3.test,v 1.5 2006/07/11 14:17:52 drh Exp $ +# $Id: descidx3.test,v 1.6 2008/03/19 00:21:31 drh Exp $ # set testdir [file dirname $argv0] @@ -27,23 +27,17 @@ # to $newval. Also, the schema cookie is incremented. # proc set_file_format {newval} { - set bt [btree_open test.db 10 0] - btree_begin_transaction $bt - set meta [btree_get_meta $bt] - lset meta 2 $newval ;# File format - lset meta 1 [expr [lindex $meta 1]+1] ;# Schema cookie - eval "btree_update_meta $bt $meta" - btree_commit $bt - btree_close $bt + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} } # This procedure returns the value of the file-format in file 'test.db'. # proc get_file_format {{fname test.db}} { - set bt [btree_open $fname 10 0] - set meta [btree_get_meta $bt] - btree_close $bt - lindex $meta 2 + return [hexio_get_int [hexio_read $fname 44 4]] } # Verify that the file format starts as 4. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/diskfull.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/diskfull.test --- sqlite3-3.4.2/test/diskfull.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/diskfull.test 2009-06-05 18:03:25.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing for correct handling of disk full # errors. # -# $Id: diskfull.test,v 1.6 2007/04/05 17:15:53 danielk1977 Exp $ +# $Id: diskfull.test,v 1.8 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -104,7 +104,7 @@ # PRAGMA cache_size; # } # } {10} -# breakpoint +# # do_diskfull_test diskfull-3.2 { # BEGIN; # INSERT INTO t3 VALUES( randstr(100, 100), randstr(100, 100) ); @@ -113,4 +113,3 @@ # } finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/distinctagg.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/distinctagg.test --- sqlite3-3.4.2/test/distinctagg.test 2007-03-27 15:43:02.000000000 +0100 +++ sqlite3-3.6.16/test/distinctagg.test 2009-06-12 03:37:53.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is the DISTINCT modifier on aggregate functions. # -# $Id: distinctagg.test,v 1.2 2005/09/12 23:03:17 drh Exp $ +# $Id: distinctagg.test,v 1.3 2009/02/09 13:19:28 drh Exp $ set testdir [file dirname $argv0] @@ -52,6 +52,11 @@ catchsql { SELECT count(distinct) FROM t1; } -} {1 {DISTINCT in aggregate must be followed by an expression}} +} {1 {DISTINCT aggregates must have exactly one argument}} +do_test distinctagg-2.2 { + catchsql { + SELECT group_concat(distinct a,b) FROM t1; + } +} {1 {DISTINCT aggregates must have exactly one argument}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/enc2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/enc2.test --- sqlite3-3.4.2/test/enc2.test 2007-03-29 19:39:34.000000000 +0100 +++ sqlite3-3.6.16/test/enc2.test 2009-06-05 18:03:25.000000000 +0100 @@ -13,7 +13,7 @@ # various suported unicode encodings (UTF-8, UTF-16, UTF-16le and # UTF-16be). # -# $Id: enc2.test,v 1.28 2006/09/23 20:36:03 drh Exp $ +# $Id: enc2.test,v 1.29 2007/10/09 08:29:32 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -170,26 +170,27 @@ # Test that it is an error to try to attach a database with a different # encoding to the main database. -do_test enc2-4.1 { - file delete -force test.db - sqlite3 db test.db - db eval "PRAGMA encoding = 'UTF-8'" - db eval "CREATE TABLE abc(a, b, c);" -} {} -do_test enc2-4.2 { - file delete -force test2.db - sqlite3 db2 test2.db - db2 eval "PRAGMA encoding = 'UTF-16'" - db2 eval "CREATE TABLE abc(a, b, c);" -} {} -do_test enc2-4.3 { - catchsql { - ATTACH 'test2.db' as aux; - } -} {1 {attached databases must use the same text encoding as main database}} - -db2 close -db close +ifcapable attach { + do_test enc2-4.1 { + file delete -force test.db + sqlite3 db test.db + db eval "PRAGMA encoding = 'UTF-8'" + db eval "CREATE TABLE abc(a, b, c);" + } {} + do_test enc2-4.2 { + file delete -force test2.db + sqlite3 db2 test2.db + db2 eval "PRAGMA encoding = 'UTF-16'" + db2 eval "CREATE TABLE abc(a, b, c);" + } {} + do_test enc2-4.3 { + catchsql { + ATTACH 'test2.db' as aux; + } + } {1 {attached databases must use the same text encoding as main database}} + db2 close + db close +} # The following tests - enc2-5.* - test that SQLite selects the correct # collation sequence when more than one is available. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/enc3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/enc3.test --- sqlite3-3.4.2/test/enc3.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/enc3.test 2009-06-05 18:03:25.000000000 +0100 @@ -13,7 +13,7 @@ # The focus of this file is testing of the proper handling of conversions # to the native text representation. # -# $Id: enc3.test,v 1.6 2007/05/10 21:14:03 drh Exp $ +# $Id: enc3.test,v 1.8 2008/01/22 01:48:09 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -77,5 +77,31 @@ } {1} } +# Try to attach a database with a different encoding. +# +ifcapable {utf16 && shared_cache} { + db close + file delete -force test8.db test8.db-journal + set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + sqlite3 dbaux test8.db + sqlite3 db test.db + db eval {SELECT 1 FROM sqlite_master LIMIT 1} + do_test enc3-3.1 { + dbaux eval { + PRAGMA encoding='utf8'; + CREATE TABLE t1(x); + PRAGMA encoding + } + } {UTF-8} + do_test enc3-3.2 { + catchsql { + ATTACH 'test.db' AS utf16; + SELECT 1 FROM utf16.sqlite_master LIMIT 1; + } dbaux + } {1 {attached databases must use the same text encoding as main database}} + dbaux close + file delete -force test8.db test8.db-journal + sqlite3_enable_shared_cache $::enable_shared_cache +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/eval.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/eval.test --- sqlite3-3.4.2/test/eval.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/eval.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,75 @@ +# 2008 July 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file experiments with recursion using the "test_eval()" SQL function +# in order to make sure that SQLite is reentrant. +# +# $Id: eval.test,v 1.2 2008/10/13 10:37:50 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a table to work with. +# +do_test eval-1.1 { + execsql { + CREATE TABLE t1(x INTEGER PRIMARY KEY); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 SELECT x+2 FROM t1; + INSERT INTO t1 SELECT x+4 FROM t1; + INSERT INTO t1 SELECT x+8 FROM t1; + INSERT INTO t1 SELECT x+16 FROM t1; + INSERT INTO t1 SELECT x+32 FROM t1; + INSERT INTO t1 SELECT x+64 FROM t1; + INSERT INTO t1 SELECT x+128 FROM t1; + INSERT INTO t1 SELECT x+256 FROM t1; + SELECT count(*), max(x) FROM t1; + } +} {512 512} +do_test eval-1.2 { + execsql { + SELECT x, test_eval('SELECT max(x) FROM t1 WHERE x<' || x) FROM t1 LIMIT 5 + } +} {1 {} 2 1 3 2 4 3 5 4} + +# Delete a row out from under a read cursor in the middle of +# collecting the arguments for a single row in a result set. +# Verify that subsequent rows come out as NULL. +# +do_test eval-2.1 { + execsql { + CREATE TABLE t2(x,y); + INSERT INTO t2 SELECT x, x+1 FROM t1 WHERE x<5; + SELECT x, test_eval('DELETE FROM t2 WHERE x='||x), y FROM t2; + } +} {1 {} {} 2 {} {} 3 {} {} 4 {} {}} +do_test eval-2.2 { + execsql { + SELECT * FROM t2 + } +} {} + +# Modify a row while it is being read. +# +do_test eval-3.1 { + execsql { + INSERT INTO t2 SELECT x, x+1 FROM t1 WHERE x<5; + SELECT x, test_eval('UPDATE t2 SET y=y+100 WHERE x='||x), y FROM t2; + } +} {1 {} 102 2 {} 103 3 {} 104 4 {} 105} + +do_test eval-4.1 { + execsql { SELECT test_eval('SELECT "abcdefghij"') } +} {abcdefghij} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/exclusive2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/exclusive2.test --- sqlite3-3.4.2/test/exclusive2.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/exclusive2.test 2009-06-12 03:37:53.000000000 +0100 @@ -10,7 +10,7 @@ #*********************************************************************** # This file implements regression tests for SQLite library. # -# $Id: exclusive2.test,v 1.8 2007/08/12 20:07:59 drh Exp $ +# $Id: exclusive2.test,v 1.10 2008/11/27 02:22:11 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -25,9 +25,14 @@ # sqlite3_soft_heap_limit 0 -proc pagerChangeCounter {filename {new ""}} { - set fd [open $filename RDWR] - fconfigure $fd -translation binary -encoding binary +proc pagerChangeCounter {filename new {fd ""}} { + if {$fd==""} { + set fd [open $filename RDWR] + fconfigure $fd -translation binary -encoding binary + set needClose 1 + } else { + set needClose 0 + } if {$new ne ""} { seek $fd 24 set a [expr {($new&0xFF000000)>>24}] @@ -46,7 +51,7 @@ incr ret [expr ($c&0x000000FF)<<8] incr ret [expr ($d&0x000000FF)<<0] - close $fd + if {$needClose} {close $fd} return $ret } @@ -174,6 +179,7 @@ # to prevent memory-induced cache spills. # do_test exclusive2-2.1 { + execsql {PRAGMA cache_size=1000;} execsql {PRAGMA locking_mode = exclusive;} execsql { BEGIN; @@ -207,16 +213,16 @@ } $::sig do_test exclusive2-2.4 { - set fd [open test.db RDWR] - seek $fd 1024 - puts -nonewline $fd [string repeat [binary format c 0] 10000] - flush $fd - close $fd + set ::fd [open test.db RDWR] + fconfigure $::fd -translation binary + seek $::fd 1024 + puts -nonewline $::fd [string repeat [binary format c 0] 10000] + flush $::fd t1sig } $::sig do_test exclusive2-2.5 { - pagerChangeCounter test.db 5 + pagerChangeCounter test.db 5 $::fd } {5} do_test exclusive2-2.6 { t1sig @@ -240,6 +246,7 @@ db close db2 close +catch {close $::fd} file delete -force test.db file delete -force test.db-journal diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/exclusive3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/exclusive3.test --- sqlite3-3.4.2/test/exclusive3.test 2007-03-30 17:01:55.000000000 +0100 +++ sqlite3-3.6.16/test/exclusive3.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -# 2007 March 26 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# -# This file runs the tests in the file ioerr.test with -# exclusive access mode enabled. -# -# $Id: exclusive3.test,v 1.3 2007/03/30 16:01:55 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -ifcapable {!pager_pragmas} { - finish_test - return -} - -rename finish_test really_finish_test2 -proc finish_test {} {} -set ISQUICK 1 - -rename sqlite3 real_sqlite3 -proc sqlite3 {args} { - set r [eval "real_sqlite3 $args"] - if { [llength $args] == 2 } { - [lindex $args 0] eval {pragma locking_mode = exclusive} - } - set r -} - -rename do_test really_do_test -proc do_test {args} { - set sc [concat really_do_test "exclusive-[lindex $args 0]" \ - [lrange $args 1 end]] - eval $sc -} - -#source $testdir/rollback.test -#source $testdir/select1.test -#source $testdir/select2.test - -source $testdir/malloc.test -source $testdir/ioerr.test - - -rename sqlite3 "" -rename real_sqlite3 sqlite3 -rename finish_test "" -rename really_finish_test2 finish_test -rename do_test "" -rename really_do_test do_test -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/exclusive.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/exclusive.test --- sqlite3-3.4.2/test/exclusive.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/exclusive.test 2009-06-26 15:19:54.000000000 +0100 @@ -12,7 +12,7 @@ # of these tests is exclusive access mode (i.e. the thing activated by # "PRAGMA locking_mode = EXCLUSIVE"). # -# $Id: exclusive.test,v 1.6 2007/08/12 20:07:59 drh Exp $ +# $Id: exclusive.test,v 1.15 2009/06/26 12:30:40 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -29,15 +29,6 @@ file delete -force test4.db-journal file delete -force test4.db -# The locking mode for the TEMP table is always "exclusive" for -# on-disk tables and "normal" for in-memory tables. -# -if {[info exists TEMP_STORE] && $TEMP_STORE>=2} { - set temp_mode normal -} else { - set temp_mode exclusive -} - #---------------------------------------------------------------------- # Test cases exclusive-1.X test the PRAGMA logic. # @@ -47,7 +38,7 @@ pragma main.locking_mode; pragma temp.locking_mode; } -} [list normal normal $temp_mode] +} [list normal normal exclusive] do_test exclusive-1.1 { execsql { pragma locking_mode = exclusive; @@ -59,7 +50,7 @@ pragma main.locking_mode; pragma temp.locking_mode; } -} [list exclusive exclusive $temp_mode] +} [list exclusive exclusive exclusive] do_test exclusive-1.3 { execsql { pragma locking_mode = normal; @@ -71,7 +62,7 @@ pragma main.locking_mode; pragma temp.locking_mode; } -} [list normal normal $temp_mode] +} [list normal normal exclusive] do_test exclusive-1.5 { execsql { pragma locking_mode = invalid; @@ -83,83 +74,85 @@ pragma main.locking_mode; pragma temp.locking_mode; } -} [list normal normal $temp_mode] -do_test exclusive-1.7 { - execsql { - pragma locking_mode = exclusive; - ATTACH 'test2.db' as aux; - } - execsql { - pragma main.locking_mode; - pragma aux.locking_mode; - } -} {exclusive exclusive} -do_test exclusive-1.8 { - execsql { - pragma main.locking_mode = normal; - } - execsql { - pragma main.locking_mode; - pragma temp.locking_mode; - pragma aux.locking_mode; - } -} [list normal $temp_mode exclusive] -do_test exclusive-1.9 { - execsql { - pragma locking_mode; - } -} {exclusive} -do_test exclusive-1.10 { - execsql { - ATTACH 'test3.db' as aux2; - } - execsql { - pragma main.locking_mode; - pragma aux.locking_mode; - pragma aux2.locking_mode; - } -} {normal exclusive exclusive} -do_test exclusive-1.11 { - execsql { - pragma aux.locking_mode = normal; - } - execsql { - pragma main.locking_mode; - pragma aux.locking_mode; - pragma aux2.locking_mode; - } -} {normal normal exclusive} -do_test exclusive-1.12 { - execsql { - pragma locking_mode = normal; - } - execsql { - pragma main.locking_mode; - pragma temp.locking_mode; - pragma aux.locking_mode; - pragma aux2.locking_mode; - } -} [list normal $temp_mode normal normal] -do_test exclusive-1.13 { - execsql { - ATTACH 'test4.db' as aux3; - } - execsql { - pragma main.locking_mode; - pragma temp.locking_mode; - pragma aux.locking_mode; - pragma aux2.locking_mode; - pragma aux3.locking_mode; - } -} [list normal $temp_mode normal normal normal] - -do_test exclusive-1.99 { - execsql { - DETACH aux; - DETACH aux2; - DETACH aux3; - } -} {} +} [list normal normal exclusive] +ifcapable attach { + do_test exclusive-1.7 { + execsql { + pragma locking_mode = exclusive; + ATTACH 'test2.db' as aux; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + } + } {exclusive exclusive} + do_test exclusive-1.8 { + execsql { + pragma main.locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + } + } [list normal exclusive exclusive] + do_test exclusive-1.9 { + execsql { + pragma locking_mode; + } + } {exclusive} + do_test exclusive-1.10 { + execsql { + ATTACH 'test3.db' as aux2; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } + } {normal exclusive exclusive} + do_test exclusive-1.11 { + execsql { + pragma aux.locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } + } {normal normal exclusive} + do_test exclusive-1.12 { + execsql { + pragma locking_mode = normal; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + } + } [list normal exclusive normal normal] + do_test exclusive-1.13 { + execsql { + ATTACH 'test4.db' as aux3; + } + execsql { + pragma main.locking_mode; + pragma temp.locking_mode; + pragma aux.locking_mode; + pragma aux2.locking_mode; + pragma aux3.locking_mode; + } + } [list normal exclusive normal normal normal] + + do_test exclusive-1.99 { + execsql { + DETACH aux; + DETACH aux2; + DETACH aux3; + } + } {} +} #---------------------------------------------------------------------- # Test cases exclusive-2.X verify that connections in exclusive @@ -255,56 +248,63 @@ # truncates instead of deletes the journal file when committing # a transaction. # -proc filestate {fname} { - set exists 0 - set content 0 - if {[file exists $fname]} { - set exists 1 - set content [expr {[file size $fname] > 0}] - } - list $exists $content +# These tests are not run on windows because the windows backend +# opens the journal file for exclusive access, preventing its contents +# from being inspected externally. +# +if {$tcl_platform(platform) != "windows"} { + proc filestate {fname} { + set exists 0 + set content 0 + if {[file exists $fname]} { + set exists 1 + set hdr [hexio_read $fname 0 28] + set content [expr {0==[string match $hdr [string repeat 0 56]]}] + } + list $exists $content + } + do_test exclusive-3.0 { + filestate test.db-journal + } {0 0} + do_test exclusive-3.1 { + execsql { + PRAGMA locking_mode = exclusive; + BEGIN; + DELETE FROM abc; + } + filestate test.db-journal + } {1 1} + do_test exclusive-3.2 { + execsql { + COMMIT; + } + filestate test.db-journal + } {1 0} + do_test exclusive-3.3 { + execsql { + INSERT INTO abc VALUES('A', 'B', 'C'); + SELECT * FROM abc; + } + } {A B C} + do_test exclusive-3.4 { + execsql { + BEGIN; + UPDATE abc SET a = 1, b = 2, c = 3; + ROLLBACK; + SELECT * FROM abc; + } + } {A B C} + do_test exclusive-3.5 { + filestate test.db-journal + } {1 0} + do_test exclusive-3.6 { + execsql { + PRAGMA locking_mode = normal; + SELECT * FROM abc; + } + filestate test.db-journal + } {0 0} } -do_test exclusive-3.0 { - filestate test.db-journal -} {0 0} -do_test exclusive-3.1 { - execsql { - PRAGMA locking_mode = exclusive; - BEGIN; - DELETE FROM abc; - } - filestate test.db-journal -} {1 1} -do_test exclusive-3.2 { - execsql { - COMMIT; - } - filestate test.db-journal -} {1 0} -do_test exclusive-3.3 { - execsql { - INSERT INTO abc VALUES('A', 'B', 'C'); - SELECT * FROM abc; - } -} {A B C} -do_test exclusive-3.4 { - execsql { - BEGIN; - UPDATE abc SET a = 1, b = 2, c = 3; - ROLLBACK; - SELECT * FROM abc; - } -} {A B C} -do_test exclusive-3.5 { - filestate test.db-journal -} {1 0} -do_test exclusive-3.6 { - execsql { - PRAGMA locking_mode = normal; - SELECT * FROM abc; - } - filestate test.db-journal -} {0 0} #---------------------------------------------------------------------- # Tests exclusive-4.X - test that rollback works correctly when @@ -392,6 +392,17 @@ db close sqlite db test.db +# if we're using proxy locks, we use 3 filedescriptors for a db +# that is open but NOT writing changes, normally +# sqlite uses 1 (proxy locking adds the conch and the local lock) +set using_proxy 0 +foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set using_proxy $value +} +set extrafds 0 +if {$using_proxy!=0} { + set extrafds 2 +} do_test exclusive-5.0 { execsql { @@ -404,13 +415,15 @@ do_test exclusive-5.1 { # Three files are open: The db, journal and statement-journal. set sqlite_open_file_count -} {3} + expr $sqlite_open_file_count-$extrafds +} [expr 3 - ($TEMP_STORE>=2)] do_test exclusive-5.2 { execsql { COMMIT; } # One file open: the db. set sqlite_open_file_count + expr $sqlite_open_file_count-$extrafds } {1} do_test exclusive-5.3 { execsql { @@ -420,6 +433,7 @@ } # Two files open: the db and journal. set sqlite_open_file_count + expr $sqlite_open_file_count-$extrafds } {2} do_test exclusive-5.4 { execsql { @@ -427,14 +441,16 @@ } # Three files are open: The db, journal and statement-journal. set sqlite_open_file_count -} {3} + expr $sqlite_open_file_count-$extrafds +} [expr 3 - ($TEMP_STORE>=2)] do_test exclusive-5.5 { execsql { COMMIT; } # Three files are still open: The db, journal and statement-journal. set sqlite_open_file_count -} {3} + expr $sqlite_open_file_count-$extrafds +} [expr 3 - ($TEMP_STORE>=2)] do_test exclusive-5.6 { execsql { PRAGMA locking_mode = normal; @@ -444,6 +460,7 @@ do_test exclusive-5.7 { # Just the db open. set sqlite_open_file_count + expr $sqlite_open_file_count-$extrafds } {1} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/exec.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/exec.test --- sqlite3-3.4.2/test/exec.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/exec.test 2009-06-05 18:03:25.000000000 +0100 @@ -0,0 +1,37 @@ +# 2008 Jan 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for the sqlite3_exec interface +# +# $Id: exec.test,v 1.1 2008/01/21 16:22:46 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test exec-1.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + SELECT * FROM t1; + } +} {1 2} +do_test exec-1.2 { + sqlite3_exec db {/* comment */;;; SELECT * FROM t1; /* comment */} +} {0 {a b 1 2}} +do_test exec-1.3 { + sqlite3 db2 test.db + db2 eval {CREATE TABLE t2(x, y);} + db2 close + sqlite3_exec db {SELECT * FROM t1} +} {0 {a b 1 2}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/expr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/expr.test --- sqlite3-3.4.2/test/expr.test 2007-07-23 19:57:54.000000000 +0100 +++ sqlite3-3.6.16/test/expr.test 2009-06-12 03:37:53.000000000 +0100 @@ -11,15 +11,22 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing expressions. # -# $Id: expr.test,v 1.57 2007/06/26 11:13:27 danielk1977 Exp $ +# $Id: expr.test,v 1.67 2009/02/04 03:59:25 shane Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Create a table to work with. # -execsql {CREATE TABLE test1(i1 int, i2 int, r1 real, r2 real, t1 text, t2 text)} -execsql {INSERT INTO test1 VALUES(1,2,1.1,2.2,'hello','world')} +ifcapable floatingpoint { + execsql {CREATE TABLE test1(i1 int, i2 int, r1 real, r2 real, t1 text, t2 text)} + execsql {INSERT INTO test1 VALUES(1,2,1.1,2.2,'hello','world')} +} +ifcapable !floatingpoint { + execsql {CREATE TABLE test1(i1 int, i2 int, t1 text, t2 text)} + execsql {INSERT INTO test1 VALUES(1,2,'hello','world')} +} + proc test_expr {name settings expr result} { do_test $name [format { execsql {BEGIN; UPDATE test1 SET %s; SELECT %s FROM test1; ROLLBACK;} @@ -47,8 +54,10 @@ test_expr expr-1.19 {i1=20, i2=20} {i2=i1} 1 test_expr expr-1.20 {i1=20, i2=20} {i2<>i1} 0 test_expr expr-1.21 {i1=20, i2=20} {i2==i1} 1 -test_expr expr-1.22 {i1=1, i2=2, r1=3.0} {i1+i2*r1} {7.0} -test_expr expr-1.23 {i1=1, i2=2, r1=3.0} {(i1+i2)*r1} {9.0} +ifcapable floatingpoint { + test_expr expr-1.22 {i1=1, i2=2, r1=3.0} {i1+i2*r1} {7.0} + test_expr expr-1.23 {i1=1, i2=2, r1=3.0} {(i1+i2)*r1} {9.0} +} test_expr expr-1.24 {i1=1, i2=2} {min(i1,i2,i1+i2,i1-i2)} {-1} test_expr expr-1.25 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3} test_expr expr-1.26 {i1=1, i2=2} {max(i1,i2,i1+i2,i1-i2)} {3} @@ -72,6 +81,7 @@ test_expr expr-1.43 {i1=1, i2=2} {i1&i2} {0} test_expr expr-1.43b {i1=1, i2=2} {4&5} {4} test_expr expr-1.44 {i1=1} {~i1} {-2} +test_expr expr-1.44b {i1=NULL} {~i1} {{}} test_expr expr-1.45 {i1=1, i2=3} {i1<>i2} {4} test_expr expr-1.47 {i1=9999999999, i2=8888888888} {i11} 1 +ifcapable floatingpoint { + test_expr expr-1.103 {i1=0} {(-2147483648.0 % -1)} 0.0 + test_expr expr-1.104 {i1=0} {(-9223372036854775808.0 % -1)} 0.0 + test_expr expr-1.105 {i1=0} {(-9223372036854775808.0 / -1)>1} 1 +} + +if {[working_64bit_int]} { + test_expr expr-1.106 {i1=0} {(1<<63)/-1} -9223372036854775808 +} -test_expr expr-1.106 {i1=0} {(1<<63)/-1} -9223372036854775808 test_expr expr-1.107 {i1=0} {(1<<63)%-1} 0 +test_expr expr-1.108 {i1=0} {1%0} {{}} +test_expr expr-1.109 {i1=0} {1/0} {{}} + +if {[working_64bit_int]} { + test_expr expr-1.110 {i1=0} {-9223372036854775807/-1} 9223372036854775807 +} -test_expr expr-2.1 {r1=1.23, r2=2.34} {r1+r2} 3.57 -test_expr expr-2.2 {r1=1.23, r2=2.34} {r1-r2} -1.11 -test_expr expr-2.3 {r1=1.23, r2=2.34} {r1*r2} 2.8782 +ifcapable floatingpoint { + test_expr expr-2.1 {r1=1.23, r2=2.34} {r1+r2} 3.57 + test_expr expr-2.2 {r1=1.23, r2=2.34} {r1-r2} -1.11 + test_expr expr-2.3 {r1=1.23, r2=2.34} {r1*r2} 2.8782 +} set tcl_precision 15 -test_expr expr-2.4 {r1=1.23, r2=2.34} {r1/r2} 0.525641025641026 -test_expr expr-2.5 {r1=1.23, r2=2.34} {r2/r1} 1.90243902439024 -test_expr expr-2.6 {r1=1.23, r2=2.34} {r2r1} 1 -test_expr expr-2.9 {r1=1.23, r2=2.34} {r2>=r1} 1 -test_expr expr-2.10 {r1=1.23, r2=2.34} {r2!=r1} 1 -test_expr expr-2.11 {r1=1.23, r2=2.34} {r2=r1} 0 -test_expr expr-2.12 {r1=1.23, r2=2.34} {r2<>r1} 1 -test_expr expr-2.13 {r1=1.23, r2=2.34} {r2==r1} 0 -test_expr expr-2.14 {r1=2.34, r2=2.34} {r2r1} 0 -test_expr expr-2.17 {r1=2.34, r2=2.34} {r2>=r1} 1 -test_expr expr-2.18 {r1=2.34, r2=2.34} {r2!=r1} 0 -test_expr expr-2.19 {r1=2.34, r2=2.34} {r2=r1} 1 -test_expr expr-2.20 {r1=2.34, r2=2.34} {r2<>r1} 0 -test_expr expr-2.21 {r1=2.34, r2=2.34} {r2==r1} 1 -test_expr expr-2.22 {r1=1.23, r2=2.34} {min(r1,r2,r1+r2,r1-r2)} {-1.11} -test_expr expr-2.23 {r1=1.23, r2=2.34} {max(r1,r2,r1+r2,r1-r2)} {3.57} -test_expr expr-2.24 {r1=25.0, r2=11.0} {r1%r2} 3.0 -test_expr expr-2.25 {r1=1.23, r2=NULL} {coalesce(r1+r2,99.0)} 99.0 -test_expr expr-2.26 {r1=1e300, r2=1e300} {coalesce((r1*r2)*0.0,99.0)} 99.0 +ifcapable floatingpoint { + test_expr expr-2.4 {r1=1.23, r2=2.34} {r1/r2} 0.525641025641026 + test_expr expr-2.5 {r1=1.23, r2=2.34} {r2/r1} 1.90243902439024 + test_expr expr-2.6 {r1=1.23, r2=2.34} {r2r1} 1 + test_expr expr-2.9 {r1=1.23, r2=2.34} {r2>=r1} 1 + test_expr expr-2.10 {r1=1.23, r2=2.34} {r2!=r1} 1 + test_expr expr-2.11 {r1=1.23, r2=2.34} {r2=r1} 0 + test_expr expr-2.12 {r1=1.23, r2=2.34} {r2<>r1} 1 + test_expr expr-2.13 {r1=1.23, r2=2.34} {r2==r1} 0 + test_expr expr-2.14 {r1=2.34, r2=2.34} {r2r1} 0 + test_expr expr-2.17 {r1=2.34, r2=2.34} {r2>=r1} 1 + test_expr expr-2.18 {r1=2.34, r2=2.34} {r2!=r1} 0 + test_expr expr-2.19 {r1=2.34, r2=2.34} {r2=r1} 1 + test_expr expr-2.20 {r1=2.34, r2=2.34} {r2<>r1} 0 + test_expr expr-2.21 {r1=2.34, r2=2.34} {r2==r1} 1 + test_expr expr-2.22 {r1=1.23, r2=2.34} {min(r1,r2,r1+r2,r1-r2)} {-1.11} + test_expr expr-2.23 {r1=1.23, r2=2.34} {max(r1,r2,r1+r2,r1-r2)} {3.57} + test_expr expr-2.24 {r1=25.0, r2=11.0} {r1%r2} 3.0 + test_expr expr-2.25 {r1=1.23, r2=NULL} {coalesce(r1+r2,99.0)} 99.0 + test_expr expr-2.26 {r1=1e300, r2=1e300} {coalesce((r1*r2)*0.0,99.0)} 99.0 + test_expr expr-2.26b {r1=1e300, r2=-1e300} {coalesce((r1*r2)*0.0,99.0)} 99.0 + test_expr expr-2.27 {r1=1.1, r2=0.0} {r1/r2} {{}} + test_expr expr-2.28 {r1=1.1, r2=0.0} {r1%r2} {{}} +} test_expr expr-3.1 {t1='abc', t2='xyz'} {t1r2} 0 -test_expr expr-4.11 {r1='abc', r2='Abc'} {r1r2} 1 -test_expr expr-4.13 {r1='abc', r2='Bbc'} {r1r2} 1 -test_expr expr-4.15 {r1='0', r2='0.0'} {r1==r2} 1 -test_expr expr-4.16 {r1='0.000', r2='0.0'} {r1==r2} 1 -test_expr expr-4.17 {r1=' 0.000', r2=' 0.0'} {r1==r2} 0 -test_expr expr-4.18 {r1='0.0', r2='abc'} {r1r2} 0 + +ifcapable floatingpoint { + test_expr expr-4.10 {r1='0.0', r2='abc'} {r1>r2} 0 + test_expr expr-4.11 {r1='abc', r2='Abc'} {r1r2} 1 + test_expr expr-4.13 {r1='abc', r2='Bbc'} {r1r2} 1 + test_expr expr-4.15 {r1='0', r2='0.0'} {r1==r2} 1 + test_expr expr-4.16 {r1='0.000', r2='0.0'} {r1==r2} 1 + test_expr expr-4.17 {r1=' 0.000', r2=' 0.0'} {r1==r2} 0 + test_expr expr-4.18 {r1='0.0', r2='abc'} {r1r2} 0 +} # CSL is true if LIKE is case sensitive and false if not. # NCSL is the opposite. Use these variables as the result @@ -272,6 +305,8 @@ test_expr expr-5.11 {t1='abc', t2='xyz'} {t1 NOT LIKE t2} 1 test_expr expr-5.12a {t1='abc', t2='abc'} {t1 NOT LIKE t2} 0 test_expr expr-5.12b {t1='abc', t2='ABC'} {t1 NOT LIKE t2} $CSL +test_expr expr-5.13 {t1='A'} {t1 LIKE 'A%_'} 0 +test_expr expr-5.14 {t1='AB'} {t1 LIKE 'A%b' ESCAPE 'b'} 0 # The following tests only work on versions of TCL that support Unicode # @@ -509,7 +544,7 @@ execsql {DROP TABLE test1} execsql {CREATE TABLE test1(a int, b int);} for {set i 1} {$i<=20} {incr i} { - execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + execsql "INSERT INTO test1 VALUES($i,[expr {1<<$i}])" } execsql "INSERT INTO test1 VALUES(NULL,0)" do_test expr-7.1 { @@ -566,7 +601,9 @@ test_expr2 expr-7.37 {a<2 OR (b=0 OR a<0)} {{} 1} test_expr2 expr-7.38 {a<2 OR (a<0 AND b=0)} {1} test_expr2 expr-7.39 {a<2 OR (b=0 AND a<0)} {1} -test_expr2 expr-7.40 {((a<2 OR a IS NULL) AND b<3) OR b>1e10} {{} 1} +ifcapable floatingpoint { + test_expr2 expr-7.40 {((a<2 OR a IS NULL) AND b<3) OR b>1e10} {{} 1} +} test_expr2 expr-7.41 {a BETWEEN -1 AND 1} {1} test_expr2 expr-7.42 {a NOT BETWEEN 2 AND 100} {1} test_expr2 expr-7.43 {(b+1234)||'this is a string that is at least 32 characters long' BETWEEN 1 AND 2} {} @@ -597,19 +634,36 @@ test_expr2 expr-7.61 {GLOB('1?',a)} {10 11 12 13 14 15 16 17 18 19} test_expr2 expr-7.62 {GLOB('1*4',b)} {10 14} test_expr2 expr-7.63 {GLOB('*1[456]',b)} {4} +test_expr2 expr-7.64 {b = abs(-2)} {1} +test_expr2 expr-7.65 {b = abs(+-2)} {1} +test_expr2 expr-7.66 {b = abs(++-2)} {1} +test_expr2 expr-7.67 {b = abs(+-+-2)} {1} +test_expr2 expr-7.68 {b = abs(+-++-2)} {1} +test_expr2 expr-7.69 {b = abs(++++-2)} {1} +test_expr2 expr-7.70 {b = 5 - abs(+3)} {1} +test_expr2 expr-7.71 {b = 5 - abs(-3)} {1} +ifcapable floatingpoint { + test_expr2 expr-7.72 {b = abs(-2.0)} {1} +} +test_expr2 expr-7.73 {b = 6 - abs(-a)} {2} +ifcapable floatingpoint { + test_expr2 expr-7.74 {b = abs(8.0)} {3} +} # Test the CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP expressions. # -set sqlite_current_time 1157124849 -do_test expr-8.1 { - execsql {SELECT CURRENT_TIME} -} {15:34:09} -do_test expr-8.2 { - execsql {SELECT CURRENT_DATE} -} {2006-09-01} -do_test expr-8.3 { - execsql {SELECT CURRENT_TIMESTAMP} -} {{2006-09-01 15:34:09}} +ifcapable {floatingpoint} { + set sqlite_current_time 1157124849 + do_test expr-8.1 { + execsql {SELECT CURRENT_TIME} + } {15:34:09} + do_test expr-8.2 { + execsql {SELECT CURRENT_DATE} + } {2006-09-01} + do_test expr-8.3 { + execsql {SELECT CURRENT_TIMESTAMP} + } {{2006-09-01 15:34:09}} +} ifcapable datetime { do_test expr-8.4 { execsql {SELECT CURRENT_TIME==time('now');} @@ -623,9 +677,11 @@ } set sqlite_current_time 0 -do_test expr-9.1 { - execsql {SELECT round(-('-'||'123'))} -} 123.0 +ifcapable floatingpoint { + do_test expr-9.1 { + execsql {SELECT round(-('-'||'123'))} + } 123.0 +} # Test an error message that can be generated by the LIKE expression do_test expr-10.1 { @@ -668,12 +724,14 @@ do_test expr-11.12 { execsql {SELECT typeof(-00000009223372036854775808)} } {integer} -do_test expr-11.13 { - execsql {SELECT typeof(-9223372036854775809)} -} {real} -do_test expr-11.14 { - execsql {SELECT typeof(-00000009223372036854775809)} -} {real} +ifcapable floatingpoint { + do_test expr-11.13 { + execsql {SELECT typeof(-9223372036854775809)} + } {real} + do_test expr-11.14 { + execsql {SELECT typeof(-00000009223372036854775809)} + } {real} +} # These two statements used to leak memory (because of missing %destructor # directives in parse.y). @@ -688,4 +746,57 @@ } } {1 {near ")": syntax error}} +ifcapable floatingpoint { + do_test expr-13.1 { + execsql { + SELECT 12345678901234567890; + } + } {1.23456789012346e+19} +} + +# Implicit String->Integer conversion is used when possible. +# +if {[working_64bit_int]} { + do_test expr-13.2 { + execsql { + SELECT 0+'9223372036854775807' + } + } {9223372036854775807} + do_test expr-13.3 { + execsql { + SELECT '9223372036854775807'+0 + } + } {9223372036854775807} +} + +# If the value is too large, use String->Float conversion. +# +ifcapable floatingpoint { + do_test expr-13.4 { + execsql { + SELECT 0+'9223372036854775808' + } + } {9.22337203685478e+18} + do_test expr-13.5 { + execsql { + SELECT '9223372036854775808'+0 + } + } {9.22337203685478e+18} +} + +# Use String->float conversion if the value is explicitly a floating +# point value. +# +do_test expr-13.6 { + execsql { + SELECT 0+'9223372036854775807.0' + } +} {9.22337203685478e+18} +do_test expr-13.7 { + execsql { + SELECT '9223372036854775807.0'+0 + } +} {9.22337203685478e+18} + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/filectrl.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/filectrl.test --- sqlite3-3.4.2/test/filectrl.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/filectrl.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,41 @@ +# 2008 Jan 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: filectrl.test,v 1.2 2008/11/21 00:10:35 aswift Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +do_test filectrl-1.1 { + file_control_test db +} {} +do_test filectrl-1.2 { + db eval {CREATE TEMP TABLE x(y);} + file_control_test db +} {} +do_test filectrl-1.3 { + db close + sqlite3 db :memory: + file_control_test db +} {} +do_test filectrl-1.4 { + sqlite3 db test.db + file_control_lasterrno_test db +} {} +do_test filectrl-1.5 { + db close + sqlite3 db test_control_lockproxy.db + file_control_lockproxy_test db +} {} +db close +file delete -force .test_control_lockproxy.db-conch test.proxy +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/filefmt.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/filefmt.test --- sqlite3-3.4.2/test/filefmt.test 2007-04-06 22:42:22.000000000 +0100 +++ sqlite3-3.6.16/test/filefmt.test 2009-06-25 12:45:58.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests to verify database file format. # -# $Id: filefmt.test,v 1.2 2007/04/06 21:42:22 drh Exp $ +# $Id: filefmt.test,v 1.3 2009/06/18 11:34:43 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -94,7 +94,7 @@ } {1 {file is encrypted or is not a database}} # Usable space per page (page-size minus unused space per page) -# must be at least 500 bytes +# must be at least 480 bytes # ifcapable pager_pragmas { do_test filefmt-1.8 { @@ -103,7 +103,7 @@ sqlite3 db test.db db eval {PRAGMA page_size=512; CREATE TABLE t1(x)} db close - hexio_write test.db 20 10 + hexio_write test.db 20 21 sqlite3 db test.db catchsql { SELECT count(*) FROM sqlite_master diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fkey1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fkey1.test --- sqlite3-3.4.2/test/fkey1.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/fkey1.test 2009-06-12 03:37:53.000000000 +0100 @@ -71,7 +71,51 @@ } } {} - - +do_test fkey1-3.1 { + execsql { + CREATE TABLE t5(a PRIMARY KEY, b, c); + CREATE TABLE t6( + d REFERENCES t5, + e REFERENCES t5(c) + ); + PRAGMA foreign_key_list(t6); + } +} [concat \ + {0 0 t5 e c RESTRICT RESTRICT NONE} \ + {1 0 t5 d {} RESTRICT RESTRICT NONE} \ +] +do_test fkey1-3.2 { + execsql { + CREATE TABLE t7(d, e, f, + FOREIGN KEY (d, e) REFERENCES t5(a, b) + ); + PRAGMA foreign_key_list(t7); + } +} [concat \ + {0 0 t5 d a RESTRICT RESTRICT NONE} \ + {0 1 t5 e b RESTRICT RESTRICT NONE} \ +] +do_test fkey1-3.3 { + execsql { + CREATE TABLE t8(d, e, f, + FOREIGN KEY (d, e) REFERENCES t5 ON DELETE CASCADE ON UPDATE SET NULL + ); + PRAGMA foreign_key_list(t8); + } +} [concat \ + {0 0 t5 d {} {SET NULL} CASCADE NONE} \ + {0 1 t5 e {} {SET NULL} CASCADE NONE} \ +] +do_test fkey1-3.4 { + execsql { + CREATE TABLE t9(d, e, f, + FOREIGN KEY (d, e) REFERENCES t5 ON DELETE CASCADE ON UPDATE SET DEFAULT + ); + PRAGMA foreign_key_list(t9); + } +} [concat \ + {0 0 t5 d {} {SET DEFAULT} CASCADE NONE} \ + {0 1 t5 e {} {SET DEFAULT} CASCADE NONE} \ +] finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts1k.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts1k.test --- sqlite3-3.4.2/test/fts1k.test 2007-03-29 17:30:41.000000000 +0100 +++ sqlite3-3.6.16/test/fts1k.test 2009-06-05 18:03:28.000000000 +0100 @@ -6,10 +6,10 @@ # This file implements regression tests for SQLite library. The focus # of this script is testing isspace/isalnum/tolower problems with the # FTS1 module. Unfortunately, this code isn't a really principled set -# of tests, because it's impossible to know where new uses of these +# of tests, because it is impossible to know where new uses of these # functions might appear. # -# $Id: fts1k.test,v 1.1 2007/03/29 16:30:41 shess Exp $ +# $Id: fts1k.test,v 1.2 2007/12/13 21:54:11 drh Exp $ # set testdir [file dirname $argv0] diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts1o.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts1o.test --- sqlite3-3.4.2/test/fts1o.test 2007-07-25 01:56:10.000000000 +0100 +++ sqlite3-3.6.16/test/fts1o.test 2009-06-05 18:03:28.000000000 +0100 @@ -12,7 +12,7 @@ # of this script is testing the FTS1 module rename functionality. Mostly # copied from fts2o.test. # -# $Id: fts1o.test,v 1.1 2007/07/25 00:56:10 shess Exp $ +# $Id: fts1o.test,v 1.2 2007/08/30 20:01:33 shess Exp $ # set testdir [file dirname $argv0] @@ -25,7 +25,7 @@ } db eval { - CREATE VIRTUAL TABLE t1 USING fts2(a, b, c); + CREATE VIRTUAL TABLE t1 USING fts1(a, b, c); INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); } @@ -34,7 +34,7 @@ # do_test fts1o-1.1 { execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} -} {t1 t1_content t1_segments t1_segdir} +} {t1 t1_content t1_term} do_test fts1o-1.2 { execsql { ALTER TABLE t1 RENAME to fts_t1; } } {} @@ -43,13 +43,13 @@ } {1 {one three four}} do_test fts1o-1.4 { execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} -} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir} +} {fts_t1 fts_t1_content fts_t1_term} # See what happens when renaming the fts1 table fails. # do_test fts1o-2.1 { catchsql { - CREATE TABLE t1_segdir(a, b, c); + CREATE TABLE t1_term(a, b, c); ALTER TABLE fts_t1 RENAME to t1; } } {1 {SQL logic error or missing database}} @@ -58,7 +58,7 @@ } {1 {one three four}} do_test fts1o-2.3 { execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} -} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} +} {fts_t1 fts_t1_content fts_t1_term t1_term} # See what happens when renaming the fts1 table fails inside a transaction. # @@ -73,12 +73,16 @@ ALTER TABLE fts_t1 RENAME to t1; } } {1 {SQL logic error or missing database}} +# NOTE(shess) rowid AS rowid to defeat caching. Otherwise, this +# seg-faults, I suspect that there's something up with a stale +# virtual-table reference, but I'm not quite sure how it happens here +# but not for fts2o.test. do_test fts1o-3.3 { - execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } + execsql { SELECT rowid AS rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } } {1 {one three four}} do_test fts1o-3.4 { execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} -} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} +} {fts_t1 fts_t1_content fts_t1_term t1_term} do_test fts1o-3.5 { execsql COMMIT execsql {SELECT a FROM fts_t1} @@ -95,7 +99,7 @@ do_test fts1o-4.1 { execsql { - DROP TABLE t1_segdir; + DROP TABLE t1_term; ALTER TABLE fts_t1 RENAME to t1; SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2g.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2g.test --- sqlite3-3.4.2/test/fts2g.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/fts2g.test 2009-06-05 18:03:28.000000000 +0100 @@ -7,7 +7,7 @@ # of this script is testing handling of edge cases for various doclist # merging functions in the FTS2 module query logic. # -# $Id: fts2g.test,v 1.2 2007/04/19 18:36:32 shess Exp $ +# $Id: fts2g.test,v 1.3 2007/11/16 00:23:08 shess Exp $ # set testdir [file dirname $argv0] @@ -84,4 +84,10 @@ execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'also OR this'} } {1 2} +# Empty left and right in docListOrMerge(). Each term matches neither +# row, and when combined there was an assertion failure. +do_test fts2g-1.13 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something OR nothing'} +} {} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2l.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2l.test --- sqlite3-3.4.2/test/fts2l.test 2007-03-29 17:30:41.000000000 +0100 +++ sqlite3-3.6.16/test/fts2l.test 2009-06-05 18:03:29.000000000 +0100 @@ -6,10 +6,10 @@ # This file implements regression tests for SQLite library. The focus # of this script is testing isspace/isalnum/tolower problems with the # FTS2 module. Unfortunately, this code isn't a really principled set -# of tests, because it's impossible to know where new uses of these +# of tests, because it is impossible to know where new uses of these # functions might appear. # -# $Id: fts2l.test,v 1.1 2007/03/29 16:30:41 shess Exp $ +# $Id: fts2l.test,v 1.2 2007/12/13 21:54:11 drh Exp $ # set testdir [file dirname $argv0] diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2n.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2n.test --- sqlite3-3.4.2/test/fts2n.test 2007-05-01 19:25:53.000000000 +0100 +++ sqlite3-3.6.16/test/fts2n.test 2009-06-05 18:03:29.000000000 +0100 @@ -6,7 +6,7 @@ # This file implements tests for prefix-searching in the fts2 # component of the SQLite library. # -# $Id: fts2n.test,v 1.1 2007/05/01 18:25:53 shess Exp $ +# $Id: fts2n.test,v 1.2 2007/12/13 21:54:11 drh Exp $ # set testdir [file dirname $argv0] @@ -190,7 +190,7 @@ # time. Prefix hits can cross leaves, which the code above _should_ # hit by virtue of size. There are two variations on this. If the # tree is 2 levels high, the code will find the leaf-node extent -# directly, but if it's higher, the code will have to follow two +# directly, but if its higher, the code will have to follow two # separate interior branches down the tree. Both should be tested. finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2p.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2p.test --- sqlite3-3.4.2/test/fts2p.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts2p.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,357 @@ +# 2008 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file exercises some new testing functions in the FTS2 module, +# and then uses them to do some basic tests that FTS2 is internally +# working as expected. +# +# $Id: fts2p.test,v 1.1 2008/07/22 23:32:28 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is not defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +#************************************************************************* +# Probe to see if support for these functions is compiled in. +# TODO(shess): Change main.mk to do the right thing and remove this test. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'x'); +} + +set s {SELECT dump_terms(t1, 1) FROM t1 LIMIT 1} +set r {1 {unable to use function dump_terms in the requested context}} +if {[catchsql $s]==$r} { + finish_test + return +} + +#************************************************************************* +# Test that the new functions give appropriate errors. +do_test fts2p-0.0 { + catchsql { + SELECT dump_terms(t1, 1) FROM t1 LIMIT 1; + } +} {1 {dump_terms: incorrect arguments}} + +do_test fts2p-0.1 { + catchsql { + SELECT dump_terms(t1, 0, 0, 0) FROM t1 LIMIT 1; + } +} {1 {dump_terms: incorrect arguments}} + +do_test fts2p-0.2 { + catchsql { + SELECT dump_terms(1, t1) FROM t1 LIMIT 1; + } +} {1 {unable to use function dump_terms in the requested context}} + +do_test fts2p-0.3 { + catchsql { + SELECT dump_terms(t1, 16, 16) FROM t1 LIMIT 1; + } +} {1 {dump_terms: segment not found}} + +do_test fts2p-0.4 { + catchsql { + SELECT dump_doclist(t1) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts2p-0.5 { + catchsql { + SELECT dump_doclist(t1, NULL) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: empty second argument}} + +do_test fts2p-0.6 { + catchsql { + SELECT dump_doclist(t1, '') FROM t1 LIMIT 1; + } +} {1 {dump_doclist: empty second argument}} + +do_test fts2p-0.7 { + catchsql { + SELECT dump_doclist(t1, 'a', 0) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts2p-0.8 { + catchsql { + SELECT dump_doclist(t1, 'a', 0, 0, 0) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts2p-0.9 { + catchsql { + SELECT dump_doclist(t1, 'a', 16, 16) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: segment not found}} + +#************************************************************************* +# Utility function to check for the expected terms in the segment +# level/index. _all version does same but for entire index. +proc check_terms {test level index terms} { + # TODO(shess): Figure out why uplevel in do_test can't catch + # $level and $index directly. + set ::level $level + set ::index $index + do_test $test.terms { + execsql { + SELECT dump_terms(t1, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $terms] +} +proc check_terms_all {test terms} { + do_test $test.terms { + execsql { + SELECT dump_terms(t1) FROM t1 LIMIT 1; + } + } [list $terms] +} + +# Utility function to check for the expected doclist for the term in +# segment level/index. _all version does same for entire index. +proc check_doclist {test level index term doclist} { + # TODO(shess): Again, why can't the non-:: versions work? + set ::term $term + set ::level $level + set ::index $index + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $doclist] +} +proc check_doclist_all {test term doclist} { + set ::term $term + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term) FROM t1 LIMIT 1; + } + } [list $doclist] +} + +#************************************************************************* +# Test the segments resulting from straight-forward inserts. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); +} + +# Check for expected segments and expected matches. +do_test fts2p-1.0.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2} +do_test fts2p-1.0.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +# Check the specifics of the segments constructed. +# Logical view of entire index. +check_terms_all fts2p-1.0.1 {a is test that this was} +check_doclist_all fts2p-1.0.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts2p-1.0.1.2 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts2p-1.0.1.3 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts2p-1.0.1.4 that {[2 0[0]]} +check_doclist_all fts2p-1.0.1.5 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts2p-1.0.1.6 was {[2 0[1]]} + +# Segment 0,0 +check_terms fts2p-1.0.2 0 0 {a is test this} +check_doclist fts2p-1.0.2.1 0 0 a {[1 0[2]]} +check_doclist fts2p-1.0.2.2 0 0 is {[1 0[1]]} +check_doclist fts2p-1.0.2.3 0 0 test {[1 0[3]]} +check_doclist fts2p-1.0.2.4 0 0 this {[1 0[0]]} + +# Segment 0,1 +check_terms fts2p-1.0.3 0 1 {a test that was} +check_doclist fts2p-1.0.3.1 0 1 a {[2 0[2]]} +check_doclist fts2p-1.0.3.2 0 1 test {[2 0[3]]} +check_doclist fts2p-1.0.3.3 0 1 that {[2 0[0]]} +check_doclist fts2p-1.0.3.4 0 1 was {[2 0[1]]} + +# Segment 0,2 +check_terms fts2p-1.0.4 0 2 {a is test this} +check_doclist fts2p-1.0.4.1 0 2 a {[3 0[2]]} +check_doclist fts2p-1.0.4.2 0 2 is {[3 0[1]]} +check_doclist fts2p-1.0.4.3 0 2 test {[3 0[3]]} +check_doclist fts2p-1.0.4.4 0 2 this {[3 0[0]]} + +#************************************************************************* +# Test the segments resulting from inserts followed by a delete. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE rowid = 1; +} + +do_test fts2p-1.1.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2 0 3} +do_test fts2p-1.1.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}} + +check_terms_all fts2p-1.1.1 {a is test that this was} +check_doclist_all fts2p-1.1.1.1 a {[2 0[2]] [3 0[2]]} +check_doclist_all fts2p-1.1.1.2 is {[3 0[1]]} +check_doclist_all fts2p-1.1.1.3 test {[2 0[3]] [3 0[3]]} +check_doclist_all fts2p-1.1.1.4 that {[2 0[0]]} +check_doclist_all fts2p-1.1.1.5 this {[3 0[0]]} +check_doclist_all fts2p-1.1.1.6 was {[2 0[1]]} + +check_terms fts2p-1.1.2 0 0 {a is test this} +check_doclist fts2p-1.1.2.1 0 0 a {[1 0[2]]} +check_doclist fts2p-1.1.2.2 0 0 is {[1 0[1]]} +check_doclist fts2p-1.1.2.3 0 0 test {[1 0[3]]} +check_doclist fts2p-1.1.2.4 0 0 this {[1 0[0]]} + +check_terms fts2p-1.1.3 0 1 {a test that was} +check_doclist fts2p-1.1.3.1 0 1 a {[2 0[2]]} +check_doclist fts2p-1.1.3.2 0 1 test {[2 0[3]]} +check_doclist fts2p-1.1.3.3 0 1 that {[2 0[0]]} +check_doclist fts2p-1.1.3.4 0 1 was {[2 0[1]]} + +check_terms fts2p-1.1.4 0 2 {a is test this} +check_doclist fts2p-1.1.4.1 0 2 a {[3 0[2]]} +check_doclist fts2p-1.1.4.2 0 2 is {[3 0[1]]} +check_doclist fts2p-1.1.4.3 0 2 test {[3 0[3]]} +check_doclist fts2p-1.1.4.4 0 2 this {[3 0[0]]} + +check_terms fts2p-1.1.5 0 3 {a is test this} +check_doclist fts2p-1.1.5.1 0 3 a {[1]} +check_doclist fts2p-1.1.5.2 0 3 is {[1]} +check_doclist fts2p-1.1.5.3 0 3 test {[1]} +check_doclist fts2p-1.1.5.4 0 3 this {[1]} + +#************************************************************************* +# Test results when all references to certain tokens are deleted. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE rowid IN (1,3); +} + +# Still 4 segments because 0,3 will contain deletes for rowid 1 and 3. +do_test fts2p-1.2.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2 0 3} +do_test fts2p-1.2.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts2p-1.2.1 {a is test that this was} +check_doclist_all fts2p-1.2.1.1 a {[2 0[2]]} +check_doclist_all fts2p-1.2.1.2 is {} +check_doclist_all fts2p-1.2.1.3 test {[2 0[3]]} +check_doclist_all fts2p-1.2.1.4 that {[2 0[0]]} +check_doclist_all fts2p-1.2.1.5 this {} +check_doclist_all fts2p-1.2.1.6 was {[2 0[1]]} + +check_terms fts2p-1.2.2 0 0 {a is test this} +check_doclist fts2p-1.2.2.1 0 0 a {[1 0[2]]} +check_doclist fts2p-1.2.2.2 0 0 is {[1 0[1]]} +check_doclist fts2p-1.2.2.3 0 0 test {[1 0[3]]} +check_doclist fts2p-1.2.2.4 0 0 this {[1 0[0]]} + +check_terms fts2p-1.2.3 0 1 {a test that was} +check_doclist fts2p-1.2.3.1 0 1 a {[2 0[2]]} +check_doclist fts2p-1.2.3.2 0 1 test {[2 0[3]]} +check_doclist fts2p-1.2.3.3 0 1 that {[2 0[0]]} +check_doclist fts2p-1.2.3.4 0 1 was {[2 0[1]]} + +check_terms fts2p-1.2.4 0 2 {a is test this} +check_doclist fts2p-1.2.4.1 0 2 a {[3 0[2]]} +check_doclist fts2p-1.2.4.2 0 2 is {[3 0[1]]} +check_doclist fts2p-1.2.4.3 0 2 test {[3 0[3]]} +check_doclist fts2p-1.2.4.4 0 2 this {[3 0[0]]} + +check_terms fts2p-1.2.5 0 3 {a is test this} +check_doclist fts2p-1.2.5.1 0 3 a {[1] [3]} +check_doclist fts2p-1.2.5.2 0 3 is {[1] [3]} +check_doclist fts2p-1.2.5.3 0 3 test {[1] [3]} +check_doclist fts2p-1.2.5.4 0 3 this {[1] [3]} + +#************************************************************************* +# Test results when everything is optimized manually. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE rowid IN (1,3); + DROP TABLE IF EXISTS t1old; + ALTER TABLE t1 RENAME TO t1old; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) SELECT rowid, c FROM t1old; + DROP TABLE t1old; +} + +# Should be a single optimal segment with the same logical results. +do_test fts2p-1.3.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts2p-1.3.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts2p-1.3.1 {a test that was} +check_doclist_all fts2p-1.3.1.1 a {[2 0[2]]} +check_doclist_all fts2p-1.3.1.2 test {[2 0[3]]} +check_doclist_all fts2p-1.3.1.3 that {[2 0[0]]} +check_doclist_all fts2p-1.3.1.4 was {[2 0[1]]} + +check_terms fts2p-1.3.2 0 0 {a test that was} +check_doclist fts2p-1.3.2.1 0 0 a {[2 0[2]]} +check_doclist fts2p-1.3.2.2 0 0 test {[2 0[3]]} +check_doclist fts2p-1.3.2.3 0 0 that {[2 0[0]]} +check_doclist fts2p-1.3.2.4 0 0 was {[2 0[1]]} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2q.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2q.test --- sqlite3-3.4.2/test/fts2q.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts2q.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,346 @@ +# 2008 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the FTS2 module's optimize() function. +# +# $Id: fts2q.test,v 1.2 2008/07/22 23:49:44 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is not defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +#************************************************************************* +# Probe to see if support for the FTS2 dump_* functions is compiled in. +# TODO(shess): Change main.mk to do the right thing and remove this test. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'x'); +} + +set s {SELECT dump_terms(t1, 1) FROM t1 LIMIT 1} +set r {1 {unable to use function dump_terms in the requested context}} +if {[catchsql $s]==$r} { + finish_test + return +} + +#************************************************************************* +# Utility function to check for the expected terms in the segment +# level/index. _all version does same but for entire index. +proc check_terms {test level index terms} { + # TODO(shess): Figure out why uplevel in do_test can't catch + # $level and $index directly. + set ::level $level + set ::index $index + do_test $test.terms { + execsql { + SELECT dump_terms(t1, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $terms] +} +proc check_terms_all {test terms} { + do_test $test.terms { + execsql { + SELECT dump_terms(t1) FROM t1 LIMIT 1; + } + } [list $terms] +} + +# Utility function to check for the expected doclist for the term in +# segment level/index. _all version does same for entire index. +proc check_doclist {test level index term doclist} { + # TODO(shess): Again, why can't the non-:: versions work? + set ::term $term + set ::level $level + set ::index $index + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $doclist] +} +proc check_doclist_all {test term doclist} { + set ::term $term + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term) FROM t1 LIMIT 1; + } + } [list $doclist] +} + +#************************************************************************* +# Test results when all rows are deleted and one is added back. +# Previously older segments would continue to exist, but now the index +# should be dropped when the table is empty. The results should look +# exactly like we never added the earlier rows in the first place. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE 1=1; -- Delete each row rather than dropping table. + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); +} + +# Should be a single initial segment. +do_test fts2q-1.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts2q-1.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}} + +check_terms_all fts2q-1.1 {a is test this} +check_doclist_all fts2q-1.1.1 a {[1 0[2]]} +check_doclist_all fts2q-1.1.2 is {[1 0[1]]} +check_doclist_all fts2q-1.1.3 test {[1 0[3]]} +check_doclist_all fts2q-1.1.4 this {[1 0[0]]} + +check_terms fts2q-1.2 0 0 {a is test this} +check_doclist fts2q-1.2.1 0 0 a {[1 0[2]]} +check_doclist fts2q-1.2.2 0 0 is {[1 0[1]]} +check_doclist fts2q-1.2.3 0 0 test {[1 0[3]]} +check_doclist fts2q-1.2.4 0 0 this {[1 0[0]]} + +#************************************************************************* +# Test results when everything is optimized manually. +# NOTE(shess): This is a copy of fts2c-1.3. I've pulled a copy here +# because fts2q-2 and fts2q-3 should have identical results. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE rowid IN (1,3); + DROP TABLE IF EXISTS t1old; + ALTER TABLE t1 RENAME TO t1old; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) SELECT rowid, c FROM t1old; + DROP TABLE t1old; +} + +# Should be a single optimal segment with the same logical results. +do_test fts2q-2.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts2q-2.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts2q-2.1 {a test that was} +check_doclist_all fts2q-2.1.1 a {[2 0[2]]} +check_doclist_all fts2q-2.1.2 test {[2 0[3]]} +check_doclist_all fts2q-2.1.3 that {[2 0[0]]} +check_doclist_all fts2q-2.1.4 was {[2 0[1]]} + +check_terms fts2q-2.2 0 0 {a test that was} +check_doclist fts2q-2.2.1 0 0 a {[2 0[2]]} +check_doclist fts2q-2.2.2 0 0 test {[2 0[3]]} +check_doclist fts2q-2.2.3 0 0 that {[2 0[0]]} +check_doclist fts2q-2.2.4 0 0 was {[2 0[1]]} + +#************************************************************************* +# Test results when everything is optimized via optimize(). +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE rowid IN (1,3); + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; +} + +# Should be a single optimal segment with the same logical results. +do_test fts2q-3.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts2q-3.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts2q-3.1 {a test that was} +check_doclist_all fts2q-3.1.1 a {[2 0[2]]} +check_doclist_all fts2q-3.1.2 test {[2 0[3]]} +check_doclist_all fts2q-3.1.3 that {[2 0[0]]} +check_doclist_all fts2q-3.1.4 was {[2 0[1]]} + +check_terms fts2q-3.2 0 0 {a test that was} +check_doclist fts2q-3.2.1 0 0 a {[2 0[2]]} +check_doclist fts2q-3.2.2 0 0 test {[2 0[3]]} +check_doclist fts2q-3.2.3 0 0 that {[2 0[0]]} +check_doclist fts2q-3.2.4 0 0 was {[2 0[1]]} + +#************************************************************************* +# Test optimize() against a table involving segment merges. +# NOTE(shess): Since there's no transaction, each of the INSERT/UPDATE +# statements generates a segment. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + + UPDATE t1 SET c = 'This is a test one' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test one' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test one' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test two' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test two' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test two' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test three' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test three' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test three' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test four' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test four' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test four' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test' WHERE rowid = 3; +} + +# 2 segments in level 0, 1 in level 1 (18 segments created, 16 +# merged). +do_test fts2q-4.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 1 0} + +do_test fts2q-4.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +check_terms_all fts2q-4.1 {a four is one test that this three two was} +check_doclist_all fts2q-4.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts2q-4.1.2 four {} +check_doclist_all fts2q-4.1.3 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts2q-4.1.4 one {} +check_doclist_all fts2q-4.1.5 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts2q-4.1.6 that {[2 0[0]]} +check_doclist_all fts2q-4.1.7 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts2q-4.1.8 three {} +check_doclist_all fts2q-4.1.9 two {} +check_doclist_all fts2q-4.1.10 was {[2 0[1]]} + +check_terms fts2q-4.2 0 0 {a four test that was} +check_doclist fts2q-4.2.1 0 0 a {[2 0[2]]} +check_doclist fts2q-4.2.2 0 0 four {[2]} +check_doclist fts2q-4.2.3 0 0 test {[2 0[3]]} +check_doclist fts2q-4.2.4 0 0 that {[2 0[0]]} +check_doclist fts2q-4.2.5 0 0 was {[2 0[1]]} + +check_terms fts2q-4.3 0 1 {a four is test this} +check_doclist fts2q-4.3.1 0 1 a {[3 0[2]]} +check_doclist fts2q-4.3.2 0 1 four {[3]} +check_doclist fts2q-4.3.3 0 1 is {[3 0[1]]} +check_doclist fts2q-4.3.4 0 1 test {[3 0[3]]} +check_doclist fts2q-4.3.5 0 1 this {[3 0[0]]} + +check_terms fts2q-4.4 1 0 {a four is one test that this three two was} +check_doclist fts2q-4.4.1 1 0 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist fts2q-4.4.2 1 0 four {[1] [2 0[4]] [3 0[4]]} +check_doclist fts2q-4.4.3 1 0 is {[1 0[1]] [3 0[1]]} +check_doclist fts2q-4.4.4 1 0 one {[1] [2] [3]} +check_doclist fts2q-4.4.5 1 0 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist fts2q-4.4.6 1 0 that {[2 0[0]]} +check_doclist fts2q-4.4.7 1 0 this {[1 0[0]] [3 0[0]]} +check_doclist fts2q-4.4.8 1 0 three {[1] [2] [3]} +check_doclist fts2q-4.4.9 1 0 two {[1] [2] [3]} +check_doclist fts2q-4.4.10 1 0 was {[2 0[1]]} + +# Optimize should leave the result in the level of the highest-level +# prior segment. +do_test fts2q-4.5 { + execsql { + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index optimized} 1 0} + +# Identical to fts2q-4.matches. +do_test fts2q-4.5.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY rowid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +check_terms_all fts2q-4.5.1 {a is test that this was} +check_doclist_all fts2q-4.5.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts2q-4.5.1.2 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts2q-4.5.1.3 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts2q-4.5.1.4 that {[2 0[0]]} +check_doclist_all fts2q-4.5.1.5 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts2q-4.5.1.6 was {[2 0[1]]} + +check_terms fts2q-4.5.2 1 0 {a is test that this was} +check_doclist fts2q-4.5.2.1 1 0 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist fts2q-4.5.2.2 1 0 is {[1 0[1]] [3 0[1]]} +check_doclist fts2q-4.5.2.3 1 0 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist fts2q-4.5.2.4 1 0 that {[2 0[0]]} +check_doclist fts2q-4.5.2.5 1 0 this {[1 0[0]] [3 0[0]]} +check_doclist fts2q-4.5.2.6 1 0 was {[2 0[1]]} + +# Re-optimizing does nothing. +do_test fts2q-5.0 { + execsql { + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index already optimal} 1 0} + +# Even if we move things around, still does nothing. +do_test fts2q-5.1 { + execsql { + UPDATE t1_segdir SET level = 2 WHERE level = 1 AND idx = 0; + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index already optimal} 2 0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2r.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2r.test --- sqlite3-3.4.2/test/fts2r.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts2r.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,121 @@ +# 2008 July 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# These tests exercise the various types of fts2 cursors. +# +# $Id: fts2r.test,v 1.1 2008/07/29 20:38:18 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS2 is not defined, omit this file. +ifcapable !fts2 { + finish_test + return +} + +#************************************************************************* +# Test table scan (QUERY_GENERIC). This kind of query happens for +# queries with no WHERE clause, or for WHERE clauses which cannot be +# satisfied by an index. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts2(c); + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); +} + +do_test fts2e-1.1 { + execsql { + SELECT rowid FROM t1 ORDER BY rowid; + } +} {1 2 3} + +do_test fts2e-1.2 { + execsql { + SELECT rowid FROM t1 WHERE c LIKE '%test' ORDER BY rowid; + } +} {1 2 3} + +do_test fts2e-1.3 { + execsql { + SELECT rowid FROM t1 WHERE c LIKE 'That%' ORDER BY rowid; + } +} {2} + +#************************************************************************* +# Test lookup by rowid (QUERY_ROWID). This kind of query happens for +# queries which select by the rowid implicit index. +db eval { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + CREATE VIRTUAL TABLE t1 USING fts2(c); + CREATE TABLE t2(id INTEGER PRIMARY KEY AUTOINCREMENT, weight INTEGER UNIQUE); + INSERT INTO t2 VALUES (null, 10); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'This is a test'); + INSERT INTO t2 VALUES (null, 5); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'That was a test'); + INSERT INTO t2 VALUES (null, 20); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'This is a test'); +} + +# TODO(shess): This actually is doing QUERY_GENERIC? I'd have +# expected QUERY_ROWID in this case, as for a very large table the +# full scan is less efficient. +do_test fts2e-2.1 { + execsql { + SELECT rowid FROM t1 WHERE rowid in (1, 2, 10); + } +} {1 2} + +do_test fts2e-2.2 { + execsql { + SELECT t1.rowid, weight FROM t1, t2 WHERE t2.id = t1.rowid ORDER BY weight; + } +} {2 5 1 10 3 20} + +do_test fts2e-2.3 { + execsql { + SELECT t1.rowid, weight FROM t1, t2 + WHERE t2.weight>5 AND t2.id = t1.rowid ORDER BY weight; + } +} {1 10 3 20} + +#************************************************************************* +# Test lookup by MATCH (QUERY_FULLTEXT). This is the fulltext index. +db eval { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + CREATE VIRTUAL TABLE t1 USING fts2(c); + CREATE TABLE t2(id INTEGER PRIMARY KEY AUTOINCREMENT, weight INTEGER UNIQUE); + INSERT INTO t2 VALUES (null, 10); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'This is a test'); + INSERT INTO t2 VALUES (null, 5); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'That was a test'); + INSERT INTO t2 VALUES (null, 20); + INSERT INTO t1 (rowid, c) VALUES (last_insert_rowid(), 'This is a test'); +} + +do_test fts2e-3.1 { + execsql { + SELECT rowid FROM t1 WHERE t1 MATCH 'this' ORDER BY rowid; + } +} {1 3} + +do_test fts2e-3.2 { + execsql { + SELECT t1.rowid, weight FROM t1, t2 + WHERE t1 MATCH 'this' AND t1.rowid = t2.id ORDER BY weight; + } +} {1 10 3 20} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts2.test --- sqlite3-3.4.2/test/fts2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts2.test 2009-06-05 18:03:28.000000000 +0100 @@ -0,0 +1,68 @@ +# 2008 July 22 +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: fts2.test,v 1.2 2008/07/23 18:17:32 drh Exp $ + +proc lshift {lvar} { + upvar $lvar l + set ret [lindex $l 0] + set l [lrange $l 1 end] + return $ret +} +while {[set arg [lshift argv]] != ""} { + switch -- $arg { + -sharedpagercache { + sqlite3_enable_shared_cache 1 + } + -soak { + set SOAKTEST 1 + } + default { + set argv [linsert $argv 0 $arg] + break + } + } +} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +# If SQLITE_ENABLE_FTS2 is defined, omit this file. +ifcapable !fts2 { + return +} +rename finish_test really_finish_test +proc finish_test {} {} +set ISQUICK 1 + +set EXCLUDE { + fts2.test +} + +# Files to include in the test. If this list is empty then everything +# that is not in the EXCLUDE list is run. +# +set INCLUDE { +} + +foreach testfile [lsort -dictionary [glob $testdir/fts2*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } +} + +set sqlite_open_file_count 0 +really_finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3aa.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3aa.test --- sqlite3-3.4.2/test/fts3aa.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3aa.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,202 @@ +# 2006 September 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3aa.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1(content) VALUES('one'); + INSERT INTO t1(content) VALUES('two'); + INSERT INTO t1(content) VALUES('one two'); + INSERT INTO t1(content) VALUES('three'); + INSERT INTO t1(content) VALUES('one three'); + INSERT INTO t1(content) VALUES('two three'); + INSERT INTO t1(content) VALUES('one two three'); + INSERT INTO t1(content) VALUES('four'); + INSERT INTO t1(content) VALUES('one four'); + INSERT INTO t1(content) VALUES('two four'); + INSERT INTO t1(content) VALUES('one two four'); + INSERT INTO t1(content) VALUES('three four'); + INSERT INTO t1(content) VALUES('one three four'); + INSERT INTO t1(content) VALUES('two three four'); + INSERT INTO t1(content) VALUES('one two three four'); + INSERT INTO t1(content) VALUES('five'); + INSERT INTO t1(content) VALUES('one five'); + INSERT INTO t1(content) VALUES('two five'); + INSERT INTO t1(content) VALUES('one two five'); + INSERT INTO t1(content) VALUES('three five'); + INSERT INTO t1(content) VALUES('one three five'); + INSERT INTO t1(content) VALUES('two three five'); + INSERT INTO t1(content) VALUES('one two three five'); + INSERT INTO t1(content) VALUES('four five'); + INSERT INTO t1(content) VALUES('one four five'); + INSERT INTO t1(content) VALUES('two four five'); + INSERT INTO t1(content) VALUES('one two four five'); + INSERT INTO t1(content) VALUES('three four five'); + INSERT INTO t1(content) VALUES('one three four five'); + INSERT INTO t1(content) VALUES('two three four five'); + INSERT INTO t1(content) VALUES('one two three four five'); +} + +do_test fts3aa-1.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-1.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-1.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-1.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two three'} +} {7 15 23 31} +do_test fts3aa-1.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one three two'} +} {7 15 23 31} +do_test fts3aa-1.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two three one'} +} {7 15 23 31} +do_test fts3aa-1.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two one three'} +} {7 15 23 31} +do_test fts3aa-1.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three one two'} +} {7 15 23 31} +do_test fts3aa-1.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three two one'} +} {7 15 23 31} +do_test fts3aa-1.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two THREE'} +} {7 15 23 31} +do_test fts3aa-1.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH ' ONE Two three '} +} {7 15 23 31} + +do_test fts3aa-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one"'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two"'} +} {3 7 11 15 19 23 27 31} +do_test fts3aa-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"two one"'} +} {} +do_test fts3aa-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three"'} +} {7 15 23 31} +do_test fts3aa-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two"'} +} {} +do_test fts3aa-2.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two three four"'} +} {15 31} +do_test fts3aa-2.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three two four"'} +} {} +do_test fts3aa-2.8 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three five"'} +} {21} +do_test fts3aa-2.9 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" five'} +} {21 29} +do_test fts3aa-2.10 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three"'} +} {21 29} +do_test fts3aa-2.11 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five "one three" four'} +} {29} +do_test fts3aa-2.12 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five four "one three"'} +} {29} +do_test fts3aa-2.13 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one three" four five'} +} {29} + +do_test fts3aa-3.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3aa-3.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one -two'} +} {1 5 9 13 17 21 25 29} +do_test fts3aa-3.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '-two one'} +} {1 5 9 13 17 21 25 29} + +do_test fts3aa-4.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one OR two'} +} {1 2 3 5 6 7 9 10 11 13 14 15 17 18 19 21 22 23 25 26 27 29 30 31} +do_test fts3aa-4.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH '"one two" OR three'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts3aa-4.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR "one two"'} +} {3 4 5 6 7 11 12 13 14 15 19 20 21 22 23 27 28 29 30 31} +do_test fts3aa-4.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts3aa-4.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three OR two one'} +} {3 5 7 11 13 15 19 21 23 27 29 31} +do_test fts3aa-4.6 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one two OR three OR four'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} +do_test fts3aa-4.7 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two OR three OR four one'} +} {3 5 7 9 11 13 15 19 21 23 25 27 29 31} + +# Test the ability to handle NULL content +# +do_test fts3aa-5.1 { + execsql {INSERT INTO t1(content) VALUES(NULL)} +} {} +do_test fts3aa-5.2 { + set rowid [db last_insert_rowid] + execsql {SELECT content FROM t1 WHERE rowid=$rowid} +} {{}} +do_test fts3aa-5.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH NULL} +} {} + +# Test the ability to handle non-positive rowids +# +do_test fts3aa-6.0 { + execsql {INSERT INTO t1(rowid, content) VALUES(0, 'four five')} +} {} +do_test fts3aa-6.1 { + execsql {SELECT content FROM t1 WHERE rowid = 0} +} {{four five}} +do_test fts3aa-6.2 { + execsql {INSERT INTO t1(rowid, content) VALUES(-1, 'three four')} +} {} +do_test fts3aa-6.3 { + execsql {SELECT content FROM t1 WHERE rowid = -1} +} {{three four}} +do_test fts3aa-6.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'four'} +} {-1 0 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ab.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ab.test --- sqlite3-3.4.2/test/fts3ab.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ab.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,147 @@ +# 2006 September 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ab.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Fill the full-text index "t1" with phrases in english, spanish, +# and german. For the i-th row, fill in the names for the bits +# that are set in the value of i. The least significant bit is +# 1. For example, the value 5 is 101 in binary which will be +# converted to "one three" in english. +# +proc fill_multilanguage_fulltext_t1 {} { + set english {one two three four five} + set spanish {un dos tres cuatro cinco} + set german {eine zwei drei vier funf} + + for {set i 1} {$i<=31} {incr i} { + set cmd "INSERT INTO t1 VALUES" + set vset {} + foreach lang {english spanish german} { + set words {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend words [lindex [set $lang] $j]} + } + lappend vset "'$words'" + } + set sql "INSERT INTO t1(english,spanish,german) VALUES([join $vset ,])" + # puts $sql + db eval $sql + } +} + +# Construct a full-text search table containing five keywords: +# one, two, three, four, and five, in various combinations. The +# rowid for each will be a bitmask for the elements it contains. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(english,spanish,german); +} +fill_multilanguage_fulltext_t1 + +do_test fts3ab-1.1 { + execsql {SELECT rowid FROM t1 WHERE english MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3ab-1.2 { + execsql {SELECT rowid FROM t1 WHERE spanish MATCH 'one'} +} {} +do_test fts3ab-1.3 { + execsql {SELECT rowid FROM t1 WHERE german MATCH 'one'} +} {} +do_test fts3ab-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one'} +} {1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31} +do_test fts3ab-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'one dos drei'} +} {7 15 23 31} +do_test fts3ab-1.6 { + execsql {SELECT english, spanish, german FROM t1 WHERE rowid=1} +} {one un eine} +do_test fts3ab-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"one un"'} +} {} + +do_test fts3ab-2.1 { + execsql { + CREATE VIRTUAL TABLE t2 USING fts3(from,to); + INSERT INTO t2([from],[to]) VALUES ('one two three', 'four five six'); + SELECT [from], [to] FROM t2 + } +} {{one two three} {four five six}} + + +# Compute an SQL string that contains the words one, two, three,... to +# describe bits set in the value $i. Only the lower 5 bits are examined. +# +proc wordset {i} { + set x {} + for {set j 0; set k 1} {$j<5} {incr j; incr k $k} { + if {$k&$i} {lappend x [lindex {one two three four five} $j]} + } + return '$x' +} + +# Create a new FTS table with three columns: +# +# norm: words for the bits of rowid +# plusone: words for the bits of rowid+1 +# invert: words for the bits of ~rowid +# +db eval { + CREATE VIRTUAL TABLE t4 USING fts3([norm],'plusone',"invert"); +} +for {set i 1} {$i<=15} {incr i} { + set vset [list [wordset $i] [wordset [expr {$i+1}]] [wordset [expr {~$i}]]] + db eval "INSERT INTO t4(norm,plusone,invert) VALUES([join $vset ,]);" +} + +do_test fts3ab-4.1 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.2 { + execsql {SELECT rowid FROM t4 WHERE norm MATCH 'one'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.3 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'one'} +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15} +do_test fts3ab-4.4 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:one'} +} {2 4 6 8 10 12 14} +do_test fts3ab-4.5 { + execsql {SELECT rowid FROM t4 WHERE plusone MATCH 'one'} +} {2 4 6 8 10 12 14} +do_test fts3ab-4.6 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one plusone:two'} +} {1 5 9 13} +do_test fts3ab-4.7 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'norm:one two'} +} {1 3 5 7 9 11 13 15} +do_test fts3ab-4.8 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'plusone:two norm:one'} +} {1 5 9 13} +do_test fts3ab-4.9 { + execsql {SELECT rowid FROM t4 WHERE t4 MATCH 'two norm:one'} +} {1 3 5 7 9 11 13 15} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ac.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ac.test --- sqlite3-3.4.2/test/fts3ac.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ac.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,1213 @@ +# 2006 September 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ac.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Create a table of sample email data. The data comes from email +# archives of Enron executives that was published as part of the +# litigation against that company. +# +do_test fts3ac-1.1 { + db eval { + CREATE VIRTUAL TABLE email USING fts3([from],[to],subject,body); + BEGIN TRANSACTION; +INSERT INTO email([from],[to],subject,body) VALUES('savita.puthigai@enron.com', 'traders.eol@enron.com, traders.eol@enron.com', 'EnronOnline- Change to Autohedge', 'Effective Monday, October 22, 2001 the following changes will be made to the Autohedge functionality on EnronOnline. + +The volume on the hedge will now respect the minimum volume and volume increment settings on the parent product. See rules below: + +? If the transaction volume on the child is less than half of the parent''s minimum volume no hedge will occur. +? If the transaction volume on the child is more than half the parent''s minimum volume but less than half the volume increment on the parent, the hedge will volume will be the parent''s minimum volume. +? For all other volumes, the same rounding rules will apply based on the volume increment on the parent product. + +Please see example below: + +Parent''s Settings: +Minimum: 5000 +Increment: 1000 + +Volume on Autohedge transaction Volume Hedged +1 - 2499 0 +2500 - 5499 5000 +5500 - 6499 6000'); +INSERT INTO email([from],[to],subject,body) VALUES('dana.davis@enron.com', 'laynie.east@enron.com, lisa.king@enron.com, lisa.best@enron.com,', 'Leaving Early', 'FYI: +If it''s ok with everyone''s needs, I would like to leave @4pm. If you think +you will need my assistance past the 4 o''clock hour just let me know; I''ll +be more than willing to stay.'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'louise.kitchen@enron.com', '<> - CC02.06.02', 'The following expense report is ready for approval: + +Employee Name: Christopher F. Calger +Status last changed by: Mollie E. Gustafson Ms +Expense Report Name: CC02.06.02 +Report Total: $3,972.93 +Amount Due Employee: $3,972.93 + + +To approve this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('jeff.duff@enron.com', 'julie.johnson@enron.com', 'Work request', 'Julie, + +Could you print off the current work request report by 1:30 today? + +Gentlemen, + +I''d like to review this today at 1:30 in our office. Also, could you provide +me with your activity reports so I can have Julie enter this information. + +JD'); +INSERT INTO email([from],[to],subject,body) VALUES('v.weldon@enron.com', 'gary.l.carrier@usa.dupont.com, scott.joyce@bankofamerica.com', 'Enron News', 'This could turn into something big.... +http://biz.yahoo.com/rf/010129/n29305829.html'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.haedicke@enron.com', 'paul.simons@enron.com', 'Re: First Polish Deal!', 'Congrats! Things seem to be building rapidly now on the Continent. Mark'); +INSERT INTO email([from],[to],subject,body) VALUES('e..carter@enron.com', 't..robinson@enron.com', 'FW: Producers Newsletter 9-24-2001', ' +The producer lumber pricing sheet. + -----Original Message----- +From: Johnson, Jay +Sent: Tuesday, October 16, 2001 3:42 PM +To: Carter, Karen E. +Subject: FW: Producers Newsletter 9-24-2001 + + + + -----Original Message----- +From: Daigre, Sergai +Sent: Friday, September 21, 2001 8:33 PM +Subject: Producers Newsletter 9-24-2001 + + '); +INSERT INTO email([from],[to],subject,body) VALUES('david.delainey@enron.com', 'kenneth.lay@enron.com', 'Greater Houston Partnership', 'Ken, in response to the letter from Mr Miguel San Juan, my suggestion would +be to offer up the Falcon for their use; however, given the tight time frame +and your recent visit with Mr. Fox that it would be difficult for either you +or me to participate. + +I spoke to Max and he agrees with this approach. + +I hope this meets with your approval. + +Regards +Delainey'); +INSERT INTO email([from],[to],subject,body) VALUES('lachandra.fenceroy@enron.com', 'lindy.donoho@enron.com', 'FW: Bus Applications Meeting Follow Up', 'Lindy, + +Here is the original memo we discussed earlier. Please provide any information that you may have. + +Your cooperation is greatly appreciated. + +Thanks, + +lachandra.fenceroy@enron.com +713.853.3884 +877.498.3401 Pager + + -----Original Message----- +From: Bisbee, Joanne +Sent: Wednesday, September 26, 2001 7:50 AM +To: Fenceroy, LaChandra +Subject: FW: Bus Applications Meeting Follow Up + +Lachandra, Please get with David Duff today and see what this is about. Who are our TW accounting business users? + + -----Original Message----- +From: Koh, Wendy +Sent: Tuesday, September 25, 2001 2:41 PM +To: Bisbee, Joanne +Subject: Bus Applications Meeting Follow Up + +Lisa brought up a TW change effective Nov 1. It involves eliminating a turnback surcharge. I have no other information, but you might check with the business folks for any system changes required. + +Wendy'); +INSERT INTO email([from],[to],subject,body) VALUES('danny.mccarty@enron.com', 'fran.fagan@enron.com', 'RE: worksheets', 'Fran, + If Julie''s merit needs to be lump sum, just move it over to that column. Also, send me Eric Gadd''s sheets as well. Thanks. +Dan + + -----Original Message----- +From: Fagan, Fran +Sent: Thursday, December 20, 2001 11:10 AM +To: McCarty, Danny +Subject: worksheets + +As discussed, attached are your sheets for bonus and merit. + +Thanks, + +Fran Fagan +Sr. HR Rep +713.853.5219 + + + << File: McCartyMerit.xls >> << File: mccartyBonusCommercial_UnP.xls >> + +'); +INSERT INTO email([from],[to],subject,body) VALUES('bert.meyers@enron.com', 'shift.dl-portland@enron.com', 'OCTOBER SCHEDULE', 'TEAM, + +PLEASE SEND ME ANY REQUESTS THAT YOU HAVE FOR OCTOBER. SO FAR I HAVE THEM FOR LEAF. I WOULD LIKE TO HAVE IT DONE BY THE 15TH OF THE MONTH. ANY QUESTIONS PLEASE GIVE ME A CALL. + +BERT'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'john.arnold@enron.com, bilal.bajwa@enron.com, john.griffith@enron.com,', 'TRV Notification: (NG - PROPT P/L - 09/27/2001)', 'The report named: NG - PROPT P/L , published as of 09/27/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('patrice.mims@enron.com', 'calvin.eakins@enron.com', 'Re: Small business supply assistance', 'Hi Calvin + + +I spoke with Rickey (boy, is he long-winded!!). Gave him the name of our +credit guy, Russell Diamond. + +Thank for your help!'); +INSERT INTO email([from],[to],subject,body) VALUES('legal <.hall@enron.com>', 'stephanie.panus@enron.com', 'Termination update', 'City of Vernon and Salt River Project terminated their contracts. I will fax these notices to you.'); +INSERT INTO email([from],[to],subject,body) VALUES('d..steffes@enron.com', 'richard.shapiro@enron.com', 'EES / ENA Government Affairs Staffing & Outside Services', 'Rick -- + +Here is the information on staffing and outside services. Call if you need anything else. + +Jim + + '); +INSERT INTO email([from],[to],subject,body) VALUES('gelliott@industrialinfo.com', 'pcopello@industrialinfo.com', 'ECAAR (Gavin), WSCC (Diablo Canyon), & NPCC (Seabrook)', 'Dear Power Outage Database Customer, +Attached you will find an excel document. The outages contained within are forced or rescheduled outages. Your daily delivery will still contain these outages. +In addition to the two excel documents, there is a dbf file that is formatted like your daily deliveries you receive nightly. This will enable you to load the data into your regular database. Any questions please let me know. Thanks. +Greg Elliott +IIR, Inc. +713-783-5147 x 3481 +outages@industrialinfo.com +THE INFORMATION CONTAINED IN THIS E-MAIL IS LEGALLY PRIVILEGED AND CONFIDENTIAL INFORMATION INTENDED ONLY FOR THE USE OF THE INDIVIDUAL OR ENTITY NAMED ABOVE. YOU ARE HEREBY NOTIFIED THAT ANY DISSEMINATION, DISTRIBUTION, OR COPY OF THIS E-MAIL TO UNAUTHORIZED ENTITIES IS STRICTLY PROHIBITED. IF YOU HAVE RECEIVED THIS +E-MAIL IN ERROR, PLEASE DELETE IT. + - OUTAGE.dbf + - 111201R.xls + - 111201.xls '); +INSERT INTO email([from],[to],subject,body) VALUES('enron.announcements@enron.com', 'all_ena_egm_eim@enron.com', 'EWS Brown Bag', 'MARK YOUR LUNCH CALENDARS NOW ! + +You are invited to attend the EWS Brown Bag Lunch Series + +Featuring: RAY BOWEN, COO + +Topic: Enron Industrial Markets + +Thursday, March 15, 2001 +11:30 am - 12:30 pm +EB 5 C2 + + +You bring your lunch, Limited Seating +We provide drinks and dessert. RSVP x 3-9610'); +INSERT INTO email([from],[to],subject,body) VALUES('chris.germany@enron.com', 'ingrid.immer@williams.com', 'Re: About St Pauls', 'Sounds good to me. I bet this is next to the Warick?? Hotel. + + + + +"Immer, Ingrid" on 12/21/2000 11:48:47 AM +To: "''chris.germany@enron.com''" +cc: +Subject: About St Pauls + + + + + <> +? +?http://www.stpaulshouston.org/about.html + +Chris, + +I like the looks of this place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. + +Let me know.?? ii + + - About St Pauls.url + +'); +INSERT INTO email([from],[to],subject,body) VALUES('nas@cpuc.ca.gov', 'skatz@sempratrading.com, kmccrea@sablaw.com, thompson@wrightlaw.com,', 'Reply Brief filed July 31, 2000', ' - CPUC01-#76371-v1-Revised_Reply_Brief__Due_today_7_31_.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('gascontrol@aglresources.com', 'dscott4@enron.com, lcampbel@enron.com', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder', 'Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder +As discussed in the Winter Operations Meeting on Sept.29,2000, +E-Gas(Emergency Gas) will not be offered this winter as a service from AGLC. +Marketers and Poolers can receive gas via Peaking and IBSS nominations(daisy +chain) from other marketers up to the 6 p.m. Same Day 2 nomination cycle. +'); +INSERT INTO email([from],[to],subject,body) VALUES('dutch.quigley@enron.com', 'rwolkwitz@powermerchants.com', '', ' + +Here is a goody for you'); +INSERT INTO email([from],[to],subject,body) VALUES('ryan.o''rourke@enron.com', 'k..allen@enron.com, randy.bhatia@enron.com, frank.ermis@enron.com,', 'TRV Notification: (West VaR - 11/07/2001)', 'The report named: West VaR , published as of 11/07/2001 is now available for viewing on the website.'); +INSERT INTO email([from],[to],subject,body) VALUES('mjones7@txu.com', 'cstone1@txu.com, ggreen2@txu.com, timpowell@txu.com,', 'Enron / HPL Actuals for July 10, 2000', 'Teco Tap 10.000 / Enron ; 110.000 / HPL IFERC + +LS HPL LSK IC 30.000 / Enron +'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.pereira@enron.com', 'kkw816@aol.com', 'soccer practice', 'Kathy- + +Is it safe to assume that practice is cancelled for tonight?? + +Susan Pereira'); +INSERT INTO email([from],[to],subject,body) VALUES('mark.whitt@enron.com', 'barry.tycholiz@enron.com', 'Huber Internal Memo', 'Please look at this. I didn''t know how deep to go with the desk. Do you think this works. + + '); +INSERT INTO email([from],[to],subject,body) VALUES('m..forney@enron.com', 'george.phillips@enron.com', '', 'George, +Give me a call and we will further discuss opportunities on the 13st floor. + +Thanks, +JMForney +3-7160'); +INSERT INTO email([from],[to],subject,body) VALUES('brad.mckay@enron.com', 'angusmcka@aol.com', 'Re: (no subject)', 'not yet'); +INSERT INTO email([from],[to],subject,body) VALUES('adam.bayer@enron.com', 'jonathan.mckay@enron.com', 'FW: Curve Fetch File', 'Here is the curve fetch file sent to me. It has plenty of points in it. If you give me a list of which ones you need we may be able to construct a secondary worksheet to vlookup the values. + +adam +35227 + + + -----Original Message----- +From: Royed, Jeff +Sent: Tuesday, September 25, 2001 11:37 AM +To: Bayer, Adam +Subject: Curve Fetch File + +Let me know if it works. It may be required to have a certain version of Oracle for it to work properly. + + + +Jeff Royed +Enron +Energy Operations +Phone: 713-853-5295'); +INSERT INTO email([from],[to],subject,body) VALUES('matt.smith@enron.com', 'yan.wang@enron.com', 'Report Formats', 'Yan, + +The merged reports look great. I believe the only orientation changes are to +"unmerge" the following six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 +23 West_3 +25 CIG_WIC + +The orientation of the individual reports should be correct. Thanks. + +Mat + +PS. Just a reminder to add the "*" by the title of calculated points.'); +INSERT INTO email([from],[to],subject,body) VALUES('michelle.lokay@enron.com', 'jimboman@bigfoot.com', 'Egyptian Festival', '---------------------- Forwarded by Michelle Lokay/ET&S/Enron on 09/07/2000 +10:08 AM --------------------------- + + +"Karkour, Randa" on 09/07/2000 09:01:04 AM +To: "''Agheb (E-mail)" , "Leila Mankarious (E-mail)" +, "''Marymankarious (E-mail)" +, "Michelle lokay (E-mail)" , "Ramy +Mankarious (E-mail)" +cc: + +Subject: Egyptian Festival + + + <> + + http://www.egyptianfestival.com/ + + - Egyptian Festival.url +'); +INSERT INTO email([from],[to],subject,body) VALUES('errol.mclaughlin@enron.com', 'sherry.dawson@enron.com', 'Urgent!!! --- New EAST books', 'This has to be done.................................. + +Thanks +---------------------- Forwarded by Errol McLaughlin/Corp/Enron on 12/20/2000 +08:39 AM --------------------------- + + + + From: William Kelly @ ECT 12/20/2000 08:31 AM + + +To: Kam Keiser/HOU/ECT@ECT, Darron C Giron/HOU/ECT@ECT, David +Baumbach/HOU/ECT@ECT, Errol McLaughlin/Corp/Enron@ENRON +cc: Kimat Singla/HOU/ECT@ECT, Kulvinder Fowler/NA/Enron@ENRON, Kyle R +Lilly/HOU/ECT@ECT, Jeff Royed/Corp/Enron@ENRON, Alejandra +Chavez/NA/Enron@ENRON, Crystal Hyde/HOU/ECT@ECT + +Subject: New EAST books + +We have new book names in TAGG for our intramonth portfolios and it is +extremely important that any deal booked to the East is communicated quickly +to someone on my team. I know it will take some time for the new names to +sink in and I do not want us to miss any positions or P&L. + +Thanks for your help on this. + +New: +Scott Neal : East Northeast +Dick Jenkins: East Marketeast + +WK +'); +INSERT INTO email([from],[to],subject,body) VALUES('david.forster@enron.com', 'eol.wide@enron.com', 'Change to Stack Manager', 'Effective immediately, there is a change to the Stack Manager which will +affect any Inactive Child. + +An inactive Child with links to Parent products will not have their +calculated prices updated until the Child product is Activated. + +When the Child Product is activated, the price will be recalculated and +updated BEFORE it is displayed on the web. + +This means that if you are inputting a basis price on a Child product, you +will not see the final, calculated price until you Activate the product, at +which time the customer will also see it. + +If you have any questions, please contact the Help Desk on: + +Americas: 713 853 4357 +Europe: + 44 (0) 20 7783 7783 +Asia/Australia: +61 2 9229 2300 + +Dave'); +INSERT INTO email([from],[to],subject,body) VALUES('vince.kaminski@enron.com', 'jhh1@email.msn.com', 'Re: Light reading - see pieces beginning on page 7', 'John, + +I saw it. Very interesting. + +Vince + + + + + +"John H Herbert" on 07/28/2000 08:38:08 AM +To: "Vince J Kaminski" +cc: +Subject: Light reading - see pieces beginning on page 7 + + +Cheers and have a nice weekend, + + +JHHerbert + + + + + - gd000728.pdf + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('matthew.lenhart@enron.com', 'mmmarcantel@equiva.com', 'RE:', 'i will try to line up a pig for you '); +INSERT INTO email([from],[to],subject,body) VALUES('jae.black@enron.com', 'claudette.harvey@enron.com, chaun.roberts@enron.com, judy.martinez@enron.com,', 'Disaster Recovery Equipment', 'As a reminder...there are several pieces of equipment that are set up on the 30th Floor, as well as on our floor, for the Disaster Recovery Team. PLEASE DO NOT TAKE, BORROW OR USE this equipment. Should you need to use another computer system, other than yours, or make conference calls please work with your Assistant to help find or set up equipment for you to use. + +Thanks for your understanding in this matter. + +T.Jae Black +East Power Trading +Assistant to Kevin Presto +off. 713-853-5800 +fax 713-646-8272 +cell 713-539-4760'); +INSERT INTO email([from],[to],subject,body) VALUES('eric.bass@enron.com', 'dale.neuner@enron.com', '5 X 24', 'Dale, + +Have you heard anything more on the 5 X 24s? We would like to get this +product out ASAP. + + +Thanks, + +Eric'); +INSERT INTO email([from],[to],subject,body) VALUES('messenger@smartreminders.com', 'm..tholt@enron.com', '10% Coupon - PrintPal Printer Cartridges - 100% Guaranteed', '[IMAGE] +[IMAGE][IMAGE][IMAGE] +Dear SmartReminders Member, + [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] [IMAGE] + + + + + + + + + + + + + + + + + + + + + +We respect your privacy and are a Certified Participant of the BBBOnLine + Privacy Program. To be removed from future offers,click here. +SmartReminders.com is a permission based service. To unsubscribe click here . '); +INSERT INTO email([from],[to],subject,body) VALUES('benjamin.rogers@enron.com', 'mark.bernstein@enron.com', '', 'The guy you are talking about left CIN under a "cloud of suspicion" sort of +speak. He was the one who got into several bad deals and PPA''s in California +for CIN, thus he left on a bad note. Let me know if you need more detail +than that, I felt this was the type of info you were looking for. Thanks! +Ben'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'michelle.cash@enron.com', 'Expense Report Receipts Not Received', 'Employee Name: Michelle Cash +Report Name: Houston Cellular 8-11-01 +Report Date: 12/13/01 +Report ID: 594D37C9ED2111D5B452 +Submitted On: 12/13/01 + +You are only allowed 2 reports with receipts outstanding. Your expense reports will not be paid until you meet this requirement.'); +INSERT INTO email([from],[to],subject,body) VALUES('susan.mara@enron.com', 'ray.alvarez@enron.com, mark.palmer@enron.com, karen.denne@enron.com,', 'CAISO Emergency Motion -- to discontinue market-based rates for', 'FYI. the latest broadside against the generators. + +Sue Mara +Enron Corp. +Tel: (415) 782-7802 +Fax:(415) 782-7854 +----- Forwarded by Susan J Mara/NA/Enron on 06/08/2001 12:24 PM ----- + + + "Milner, Marcie" 06/08/2001 11:13 AM To: "''smara@enron.com''" cc: Subject: CAISO Emergency Motion + + +Sue, did you see this emergency motion the CAISO filed today? Apparently +they are requesting that FERC discontinue market-based rates immediately and +grant refunds plus interest on the difference between cost-based rates and +market revenues received back to May 2000. They are requesting the +commission act within 14 days. Have you heard anything about what they are +doing? + +Marcie + +http://www.caiso.com/docs/2001/06/08/200106081005526469.pdf +'); +INSERT INTO email([from],[to],subject,body) VALUES('fletcher.sturm@enron.com', 'eloy.escobar@enron.com', 'Re: General Brinks Position Meeting', 'Eloy, + +Who is General Brinks? + +Fletch'); +INSERT INTO email([from],[to],subject,body) VALUES('nailia.dindarova@enron.com', 'richard.shapiro@enron.com', 'Documents for Mark Frevert (on EU developments and lessons from', 'Rick, + +Here are the documents that Peter has prepared for Mark Frevert. + +Nailia +---------------------- Forwarded by Nailia Dindarova/LON/ECT on 25/06/2001 +16:36 --------------------------- + + +Nailia Dindarova +25/06/2001 15:36 +To: Michael Brown/Enron@EUEnronXGate +cc: Ross Sankey/Enron@EUEnronXGate, Eric Shaw/ENRON@EUEnronXGate, Peter +Styles/LON/ECT@ECT + +Subject: Documents for Mark Frevert (on EU developments and lessons from +California) + +Michael, + + +These are the documents that Peter promised to give to you for Mark Frevert. +He has now handed them to him in person but asked me to transmit them +electronically to you, as well as Eric and Ross. + +Nailia + + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('peggy.a.kostial@accenture.com', 'dave.samuels@enron.com', 'EOL-Accenture Deal Sheet', 'Dave - + +Attached are our comments and suggested changes. Please call to review. + +On the time line for completion, we have four critical steps to complete: + Finalize market analysis to refine business case, specifically + projected revenue stream + Complete counterparty surveying, including targeting 3 CPs for letters + of intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations + +Joe will come back to us with an updated time line, but it is my +expectation that we are still on the same schedule (we just begun week +three) with possibly a week or so slippage.....contract negotiations will +probably be the critical path. + +We will send our cut at the actual time line here shortly. Thanks, + +Peggy + +(See attached file: accenture-dealpoints v2.doc) + - accenture-dealpoints v2.doc '); +INSERT INTO email([from],[to],subject,body) VALUES('thomas.martin@enron.com', 'thomas.martin@enron.com', 'Re: Guadalupe Power Partners LP', '---------------------- Forwarded by Thomas A Martin/HOU/ECT on 03/20/2001 +03:49 PM --------------------------- + + +Thomas A Martin +10/11/2000 03:55 PM +To: Patrick Wade/HOU/ECT@ECT +cc: +Subject: Re: Guadalupe Power Partners LP + +The deal is physically served at Oasis Waha or Oasis Katy and is priced at +either HSC, Waha or Katytailgate GD at buyers option three days prior to +NYMEX close. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('judy.townsend@enron.com', 'dan.junek@enron.com, chris.germany@enron.com', 'Columbia Distribution''s Capacity Available for Release - Sum', '---------------------- Forwarded by Judy Townsend/HOU/ECT on 03/09/2001 11:04 +AM --------------------------- + + +agoddard@nisource.com on 03/08/2001 09:16:57 AM +To: " - *Koch, Kent" , " - +*Millar, Debra" , " - *Burke, Lynn" + +cc: " - *Heckathorn, Tom" +Subject: Columbia Distribution''s Capacity Available for Release - Sum + + +Attached is Columbia Distribution''s notice of capacity available for release +for +the summer of 2001 (Apr. 2001 through Oct. 2001). + +Please note that the deadline for bids is 3:00pm EST on March 20, 2001. + +If you have any questions, feel free to contact any of the representatives +listed +at the bottom of the attachment. + +Aaron Goddard + + + + + - 2001Summer.doc +'); +INSERT INTO email([from],[to],subject,body) VALUES('rhonda.denton@enron.com', 'tim.belden@enron.com, dana.davis@enron.com, genia.fitzgerald@enron.com,', 'Split Rock Energy LLC', 'We have received the executed EEI contract from this CP dated 12/12/2000. +Copies will be distributed to Legal and Credit.'); +INSERT INTO email([from],[to],subject,body) VALUES('kerrymcelroy@dwt.com', 'jack.speer@alcoa.com, crow@millernash.com, michaelearly@earthlink.net,', 'Oral Argument Request', ' - Oral Argument Request.doc'); +INSERT INTO email([from],[to],subject,body) VALUES('mike.carson@enron.com', 'rlmichaelis@hormel.com', '', 'Did you come in town this wk end..... My new number at our house is : +713-668-3712...... my cell # is 281-381-7332 + +the kid'); +INSERT INTO email([from],[to],subject,body) VALUES('cooper.richey@enron.com', 'trycooper@hotmail.com', 'FW: Contact Info', ' + +-----Original Message----- +From: Punja, Karim +Sent: Thursday, December 13, 2001 2:35 PM +To: Richey, Cooper +Subject: Contact Info + + +Cooper, + +Its been a real pleasure working with you (even though it was for only a small amount of time) +I hope we can stay in touch. + +Home# 234-0249 +email: kpunja@hotmail.com + +Take Care, + +Karim. + '); +INSERT INTO email([from],[to],subject,body) VALUES('bjm30@earthlink.net', 'mcguinn.k@enron.com, mcguinn.ian@enron.com, mcguinn.stephen@enron.com,', 'email address change', 'Hello all. + +I haven''t talked to many of you via email recently but I do want to give you +my new address for your email file: + + bjm30@earthlink.net + +I hope all is well. + +Brian McGuinn'); +INSERT INTO email([from],[to],subject,body) VALUES('shelley.corman@enron.com', 'steve.hotte@enron.com', 'Flat Panels', 'Can you please advise what is going on with the flat panels that we had planned to distribute to our gas logistics team. It was in the budget and we had the okay, but now I''m hearing there is some hold-up & the units are stored on 44. + +Shelley'); +INSERT INTO email([from],[to],subject,body) VALUES('sara.davidson@enron.com', 'john.schwartzenburg@enron.com, scott.dieball@enron.com, recipients@enron.com,', '2001 Enron Law Conference (Distribution List 2)', ' Enron Law Conference + +San Antonio, Texas May 2-4, 2001 Westin Riverwalk + + See attached memo for more details!! + + +? Registration for the law conference this year will be handled through an +Online RSVP Form on the Enron Law Conference Website at +http://lawconference.corp.enron.com. The website is still under construction +and will not be available until Thursday, March 15, 2001. + +? We will send you another e-mail to confirm when the Law Conference Website +is operational. + +? Please complete the Online RSVP Form as soon as it is available and submit +it no later than Friday, March 30th. + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('tori.kuykendall@enron.com', 'heath.b.taylor@accenture.com', 'Re:', 'hey - thats funny about john - he definitely remembers him - i''ll call pat +and let him know - we are coming on saturday - i just havent had a chance to +call you guys back -- looking forward to it -- i probably need the +directions again though'); +INSERT INTO email([from],[to],subject,body) VALUES('darron.giron@enron.com', 'bryce.baxter@enron.com', 'Re: Feedback for Audrey Cook', 'Bryce, + +I''ll get it done today. + +DG 3-9573 + + + + + + From: Bryce Baxter 06/12/2000 07:15 PM + + +To: Darron C Giron/HOU/ECT@ECT +cc: +Subject: Feedback for Audrey Cook + +You were identified as a reviewer for Audrey Cook. If possible, could you +complete her feedback by end of business Wednesday? It will really help me +in the PRC process to have your input. Thanks. + +'); +INSERT INTO email([from],[to],subject,body) VALUES('casey.evans@enron.com', 'stephanie.sever@enron.com', 'Gas EOL ID', 'Stephanie, + +In conjunction with the recent movement of several power traders, they are changing the names of their gas books as well. The names of the new gas books and traders are as follows: + +PWR-NG-LT-SPP: Mike Carson +PWR-NG-LT-SERC: Jeff King + +If you need to know their power desk to map their ID to their gas books, those desks are as follows: + +EPMI-LT-SPP: Mike Carson +EPMI-LT-SERC: Jeff King + +I will be in training this afternoon, but will be back when class is over. Let me know if you have any questions. + +Thanks for your help! +Casey'); +INSERT INTO email([from],[to],subject,body) VALUES('darrell.schoolcraft@enron.com', 'david.roensch@enron.com, kimberly.watson@enron.com, michelle.lokay@enron.com,', 'Postings', 'Please see the attached. + + +ds + + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('mcominsky@aol.com', 'cpatman@bracepatt.com, james_derrick@enron.com', 'Jurisprudence Luncheon', 'Carrin & Jim -- + +It was an honor and a pleasure to meet both of you yesterday. I know we will +have fun working together on this very special event. + +Jeff left the jurisprudence luncheon lists for me before he left on vacation. + I wasn''t sure whether he transmitted them to you as well. Would you please +advise me if you would like them sent to you? I can email the MS Excel files +or I can fax the hard copies to you. Please advise what is most convenient. + +I plan to be in town through the holidays and can be reached by phone, email, +or cell phone at any time. My cell phone number is 713/705-4829. + +Thanks again for your interest in the ADL''s work. Martin. + +Martin B. Cominsky +Director, Southwest Region +Anti-Defamation League +713/627-3490, ext. 122 +713/627-2011 (fax) +MCominsky@aol.com'); +INSERT INTO email([from],[to],subject,body) VALUES('phillip.love@enron.com', 'todagost@utmb.edu, gbsonnta@utmb.edu', 'New President', 'I had a little bird put a word in my ear. Is there any possibility for Ben +Raimer to be Bush''s secretary of HHS? Just curious about that infamous UTMB +rumor mill. Hope things are well, happy holidays. +PL'); +INSERT INTO email([from],[to],subject,body) VALUES('marie.heard@enron.com', 'ehamilton@fna.com', 'ISDA Master Agreement', 'Erin: + +Pursuant to your request, attached are the Schedule to the ISDA Master Agreement, together with Paragraph 13 to the ISDA Credit Support Annex. Please let me know if you need anything else. We look forward to hearing your comments. + +Marie + +Marie Heard +Senior Legal Specialist +Enron North America Corp. +Phone: (713) 853-3907 +Fax: (713) 646-3490 +marie.heard@enron.com + + '); +INSERT INTO email([from],[to],subject,body) VALUES('andrea.ring@enron.com', 'beverly.beaty@enron.com', 'Re: Tennessee Buy - Louis Dreyfus', 'Beverly - once again thanks so much for your help on this. + + + + '); +INSERT INTO email([from],[to],subject,body) VALUES('karolyn.criado@enron.com', 'j..bonin@enron.com, felicia.case@enron.com, b..clapp@enron.com,', 'Price List week of Oct. 8-9, 2001', ' +Please contact me if you have any questions regarding last weeks prices. + +Thank you, +Karolyn Criado +3-9441 + + + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.presto@enron.com', 'edward.baughman@enron.com, billy.braddock@enron.com', 'Associated', 'Please begin working on filling our Associated short position in 02. I would like to take this risk off the books. + +In addition, please find out what a buy-out of VEPCO would cost us. With Rogers transitioning to run our retail risk management, I would like to clean up our customer positions. + +We also need to continue to explore a JEA buy-out. + +Thanks.'); +INSERT INTO email([from],[to],subject,body) VALUES('stacy.dickson@enron.com', 'gregg.penman@enron.com', 'RE: Constellation TC 5-7-01', 'Gregg, + +I am at home with a sick baby. (Lots of fun!) I will call you about this +tomorrow. + +Stacy'); +INSERT INTO email([from],[to],subject,body) VALUES('joe.quenet@enron.com', 'dfincher@utilicorp.com', '', 'hey big guy.....check this out..... + + w ww.gorelieberman-2000.com/'); +INSERT INTO email([from],[to],subject,body) VALUES('k..allen@enron.com', 'jacqestc@aol.com', '', 'Jacques, + +I sent you a fax of Kevin Kolb''s comments on the release. The payoff on the note would be $36,248 ($36090(principal) + $158 (accrued interest)). +This is assuming we wrap this up on Tuesday. + +Please email to confirm that their changes are ok so I can set up a meeting on Tuesday to reach closure. + +Phillip'); +INSERT INTO email([from],[to],subject,body) VALUES('kourtney.nelson@enron.com', 'mike.swerzbin@enron.com', 'Adjusted L/R Balance', 'Mike, + +I placed the adjusted L/R Balance on the Enronwest site. It is under the "Staff/Kourtney Nelson". There are two links: + +1) "Adj L_R" is the same data/format from the weekly strategy meeting. +2) "New Gen 2001_2002" link has all of the supply side info that is used to calculate the L/R balance + -Please note the Data Flag column, a value of "3" indicates the project was cancelled, on hold, etc and is not included in the calc. + +Both of these sheets are interactive Excel spreadsheets and thus you can play around with the data as you please. Also, James Bruce is working to get his gen report on the web. That will help with your access to information on new gen. + +Please let me know if you have any questions or feedback, + +Kourtney + + + +Kourtney Nelson +Fundamental Analysis +Enron North America +(503) 464-8280 +kourtney.nelson@enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('d..thomas@enron.com', 'naveed.ahmed@enron.com', 'FW: Current Enron TCC Portfolio', ' + +-----Original Message----- +From: Grace, Rebecca M. +Sent: Monday, December 17, 2001 9:44 AM +To: Thomas, Paul D. +Cc: Cashion, Jim; Allen, Thresa A.; May, Tom +Subject: RE: Current Enron TCC Portfolio + + +Paul, + +I reviewed NY''s list. I agree with all of their contracts numbers and mw amounts. + +Call if you have any more questions. + +Rebecca + + + + -----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:08 AM +To: Grace, Rebecca M. +Subject: FW: Current Enron TCC Portfolio + + << File: enrontccs.xls >> +Rebecca, +Let me know if you see any differences. + +Paul +X 3-0403 +-----Original Message----- +From: Thomas, Paul D. +Sent: Monday, December 17, 2001 9:04 AM +To: Ahmed, Naveed +Subject: FW: Current Enron TCC Portfolio + + + + +-----Original Message----- +From: Thomas, Paul D. +Sent: Thursday, December 13, 2001 10:01 AM +To: Baughman, Edward D. +Subject: Current Enron TCC Portfolio + + +'); +INSERT INTO email([from],[to],subject,body) VALUES('stephanie.panus@enron.com', 'william.bradford@enron.com, debbie.brackett@enron.com,', 'Coastal Merchant Energy/El Paso Merchant Energy', 'Coastal Merchant Energy, L.P. merged with and into El Paso Merchant Energy, +L.P., effective February 1, 2001, with the surviving entity being El Paso +Merchant Energy, L.P. We currently have ISDA Master Agreements with both +counterparties. Please see the attached memo regarding the existing Masters +and let us know which agreement should be terminated. + +Thanks, +Stephanie +'); +INSERT INTO email([from],[to],subject,body) VALUES('kam.keiser@enron.com', 'c..kenne@enron.com', 'RE: What about this too???', ' + + -----Original Message----- +From: Kenne, Dawn C. +Sent: Wednesday, February 06, 2002 11:50 AM +To: Keiser, Kam +Subject: What about this too??? + + + << File: Netco Trader Matrix.xls >> + '); +INSERT INTO email([from],[to],subject,body) VALUES('chris.meyer@enron.com', 'joe.parks@enron.com', 'Centana', 'Talked to Chip. We do need Cash Committe approval given the netting feature of your deal, which means Batch Funding Request. Please update per my previous e-mail and forward. + +Thanks + +chris +x31666'); +INSERT INTO email([from],[to],subject,body) VALUES('debra.perlingiere@enron.com', 'jworman@academyofhealth.com', '', 'Have a great weekend! Happy Fathers Day! + + +Debra Perlingiere +Enron North America Corp. +1400 Smith Street, EB 3885 +Houston, Texas 77002 +dperlin@enron.com +Phone 713-853-7658 +Fax 713-646-3490'); +INSERT INTO email([from],[to],subject,body) VALUES('outlook.team@enron.com', '', 'Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia &', 'CALENDAR ENTRY: APPOINTMENT + +Description: + Demo by Martha Janousek of Dashboard & Pipeline Profile / Julia & Dir Rpts. - 4102 + +Date: 1/5/2001 +Time: 9:00 AM - 10:00 AM (Central Standard Time) + +Chairperson: Outlook Migration Team + +Detailed Description:'); +INSERT INTO email([from],[to],subject,body) VALUES('diana.seifert@enron.com', 'mark.taylor@enron.com', 'Guest access Chile', 'Hello Mark, + +Justin Boyd told me that your can help me with questions regarding Chile. +We got a request for guest access through MG. +The company is called Escondida and is a subsidiary of BHP Australia. + +Please advise if I can set up a guest account or not. +F.Y.I.: MG is planning to put a "in w/h Chile" contract for Copper on-line as +soon as Enron has done the due diligence for this country. +Thanks ! + + +Best regards + +Diana Seifert +EOL PCG'); +INSERT INTO email([from],[to],subject,body) VALUES('enron_update@concureworkplace.com', 'mark.whitt@enron.com', '<> - 121001', 'The Approval status has changed on the following report: + +Status last changed by: Barry L. Tycholiz +Expense Report Name: 121001 +Report Total: $198.98 +Amount Due Employee: $198.98 +Amount Approved: $198.98 +Amount Paid: $0.00 +Approval Status: Approved +Payment Status: Pending + + +To review this expense report, click on the following link for Concur Expense. +http://expensexms.enron.com'); +INSERT INTO email([from],[to],subject,body) VALUES('kevin.hyatt@enron.com', '', 'Technical Support', 'Outside the U.S., please refer to the list below: + +Australia: +1800 678-515 +support@palm-au.com + +Canada: +1905 305-6530 +support@palm.com + +New Zealand: +0800 446-398 +support@palm-nz.com + +U.K.: +0171 867 0108 +eurosupport@palm.3com.com + +Please refer to the Worldwide Customer Support card for a complete technical support contact list.'); +INSERT INTO email([from],[to],subject,body) VALUES('geoff.storey@enron.com', 'dutch.quigley@enron.com', 'RE:', 'duke contact? + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 10:14 AM +To: Storey, Geoff +Subject: RE: + +bp corp Albert LaMore 281-366-4962 + +running the reports now + + + -----Original Message----- +From: Storey, Geoff +Sent: Wednesday, October 31, 2001 10:10 AM +To: Quigley, Dutch +Subject: RE: + +give me a contact over there too +BP + + + -----Original Message----- +From: Quigley, Dutch +Sent: Wednesday, October 31, 2001 9:42 AM +To: Storey, Geoff +Subject: + +Coral Jeff Whitnah 713-767-5374 +Relaint Steve McGinn 713-207-4000'); +INSERT INTO email([from],[to],subject,body) VALUES('pete.davis@enron.com', 'pete.davis@enron.com', 'Start Date: 4/22/01; HourAhead hour: 3; ', 'Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING FILE -->> O:\Portland\WestDesk\California Scheduling\ISO Final +Schedules\2001042203.txt + +---- Load Schedule ---- +$$$ Variance found in table tblLoads. + Details: (Hour: 3 / Preferred: 1.92 / Final: 1.89) + TRANS_TYPE: FINAL + LOAD_ID: PGE4 + MKT_TYPE: 2 + TRANS_DATE: 4/22/01 + SC_ID: EPMI + +'); +INSERT INTO email([from],[to],subject,body) VALUES('john.postlethwaite@enron.com', 'john.zufferli@enron.com', 'Reference', 'John, hope things are going well up there for you. The big day is almost here for you and Jessica. I was wondering if I could use your name as a job reference if need be. I am just trying to get everything in order just in case something happens. + +John'); +INSERT INTO email([from],[to],subject,body) VALUES('jeffrey.shankman@enron.com', 'lschiffm@jonesday.com', 'Re:', 'I saw you called on the cell this a.m. Sorry I missed you. (I was in the +shower). I have had a shitty week--I suspect my silence (not only to you, +but others) after our phone call is a result of the week. I''m seeing Glen at +11:15....talk to you'); +INSERT INTO email([from],[to],subject,body) VALUES('litebytz@enron.com', '', 'Lite Bytz RSVP', ' +This week''s Lite Bytz presentation will feature the following TOOLZ speaker: + +Richard McDougall +Solaris 8 +Thursday, June 7, 2001 + +If you have not already signed up, please RSVP via email to litebytz@enron.com by the end of the day Tuesday, June 5, 2001. + +*Remember: this is now a Brown Bag Event--so bring your lunch and we will provide cookies and drinks. + +Click below for more details. + +http://home.enron.com:84/messaging/litebytztoolzprint.jpg'); + COMMIT; + } +} {} + +############################################################################### +# Everything above just builds an interesting test database. The actual +# tests come after this comment. +############################################################################### + +do_test fts3ac-1.2 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark' + } +} {6 17 25 38 40 42 73 74} +do_test fts3ac-1.3 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan' + } +} {24 40} +do_test fts3ac-1.4 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark susan' + } +} {40} +do_test fts3ac-1.5 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'susan mark' + } +} {40} +do_test fts3ac-1.6 { + execsql { + SELECT rowid FROM email WHERE email MATCH '"mark susan"' + } +} {} +do_test fts3ac-1.7 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark -susan' + } +} {6 17 25 38 42 73 74} +do_test fts3ac-1.8 { + execsql { + SELECT rowid FROM email WHERE email MATCH '-mark susan' + } +} {24} +do_test fts3ac-1.9 { + execsql { + SELECT rowid FROM email WHERE email MATCH 'mark OR susan' + } +} {6 17 24 25 38 40 42 73 74} + +# Some simple tests of the automatic "offsets(email)" column. In the sample +# data set above, only one message, number 20, contains the words +# "gas" and "reminder" in both body and subject. +# +do_test fts3ac-2.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts3ac-2.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {20 {2 0 42 3 2 1 54 8 3 1 54 8}} +do_test fts3ac-2.3 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE email MATCH 'body:gas reminder' + } +} {20 {2 1 54 8 3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} +do_test fts3ac-2.4 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE subject MATCH 'gas reminder' + } +} {20 {2 0 42 3 2 1 54 8}} +do_test fts3ac-2.5 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'gas reminder' + } +} {20 {3 0 42 3 3 1 54 8 3 0 129 3 3 0 143 3 3 0 240 3}} + +# Document 32 contains 5 instances of the world "child". But only +# 3 of them are paired with "product". Make sure only those instances +# that match the phrase appear in the offsets(email) list. +# +do_test fts3ac-3.1 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH 'child product' AND +rowid=32 + } +} {32 {3 0 94 5 3 0 114 5 3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7 3 1 493 7}} +do_test fts3ac-3.2 { + execsql { + SELECT rowid, offsets(email) FROM email + WHERE body MATCH '"child product"' + } +} {32 {3 0 207 5 3 1 213 7 3 0 245 5 3 1 251 7 3 0 409 5 3 1 415 7}} + +# Snippet generator tests +# +do_test fts3ac-4.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'subject:gas reminder' + } +} {{Alert Posted 10:00 AM November 20,2000: E-GAS Request Reminder}} +do_test fts3ac-4.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'christmas candlelight' + } +} {{... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. service and a candlelight service at 5:00 p.m., +among others. ...}} + +do_test fts3ac-4.3 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture Deal Sheet ... intent + Review Enron asset base for potential reuse/ licensing + Contract negotiations ...}} +do_test fts3ac-4.4 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'deal sheet potential reuse' + } +} {{EOL-Accenture <<>> <<>> intent + Review Enron asset base for <<>> <<>>/ licensing + Contract negotiations }} +do_test fts3ac-4.5 { + execsql { + SELECT snippet(email,'<<<','>>>',' ') FROM email + WHERE email MATCH 'first things' + } +} {{Re: <<>> Polish Deal! Congrats! <<>> seem to be building rapidly now on the }} +do_test fts3ac-4.6 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'chris is here' + } +} {{chris.germany@enron.com ... Sounds good to me. I bet this is next to the Warick?? Hotel. ... place.? What do you think about going here Christmas +eve?? They have an 11:00 a.m. ...}} +do_test fts3ac-4.7 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH '"pursuant to"' + } +} {{Erin: + +Pursuant to your request, attached are the Schedule to ...}} +do_test fts3ac-4.8 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'ancillary load davis' + } +} {{pete.davis@enron.com ... Start Date: 4/22/01; HourAhead hour: 3; No ancillary schedules awarded. +Variances detected. +Variances detected in Load schedule. + + LOG MESSAGES: + +PARSING ...}} + +# Combinations of AND and OR operators: +# +do_test fts3ac-5.1 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'questar enron OR com' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} +do_test fts3ac-5.2 { + execsql { + SELECT snippet(email) FROM email + WHERE email MATCH 'enron OR com questar' + } +} {{matt.smith@enron.com ... six reports: + +31 Keystone Receipts +15 Questar Pipeline +40 Rockies Production +22 West_2 ...}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ad.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ad.test --- sqlite3-3.4.2/test/fts3ad.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ad.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,65 @@ +# 2006 October 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module, and in particular +# the Porter stemmer. +# +# $Id: fts3ad.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +do_test fts3ad-1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize porter); + INSERT INTO t1(rowid, content) VALUES(1, 'running and jumping'); + SELECT rowid FROM t1 WHERE content MATCH 'run jump'; + } +} {1} +do_test fts3ad-1.2 { + execsql { + SELECT snippet(t1) FROM t1 WHERE t1 MATCH 'run jump'; + } +} {{running and jumping}} +do_test fts3ad-1.3 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(2, 'abcdefghijklmnopqrstuvwyxz'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts3ad-1.4 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH 'abcdefghijXXXXqrstuvwyxz' + } +} {2 abcdefghijklmnopqrstuvwyxz} +do_test fts3ad-1.5 { + execsql { + INSERT INTO t1(rowid, content) + VALUES(3, 'The value is 123456789'); + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123789' + } +} {3 {The value is 123456789}} +do_test fts3ad-1.6 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE t1 MATCH '123000000789' + } +} {3 {The value is 123456789}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ae.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ae.test --- sqlite3-3.4.2/test/fts3ae.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ae.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,85 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing deletions in the FTS3 module. +# +# $Id: fts3ae.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 30 INSERT and +# DELETE statements, so that we'll test both the segmentMerge() merge +# (over the first 16) and the termSelect() merge (over the level-1 +# segment and 14 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + DELETE FROM t1 WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + DELETE FROM t1 WHERE rowid = 22; +} + +do_test fts3af-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {14} + +do_test fts3ae-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {3 5 9 11 15 17 21} + +do_test fts3ae-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 11 14 15 18} + +do_test fts3ae-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {5 6 12 14 15 20 21} + +do_test fts3ae-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {8 9 11 12 14 15} + +do_test fts3ae-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {17 18 20 21} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3af.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3af.test --- sqlite3-3.4.2/test/fts3af.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3af.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,90 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing updates in the FTS3 module. +# +# $Id: fts3af.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Construct a full-text search table containing keywords which are the +# ordinal numbers of the bit positions set for a sequence of integers, +# which are used for the rowid. There are a total of 31 INSERT, +# UPDATE, and DELETE statements, so that we'll test both the +# segmentMerge() merge (over the first 16) and the termSelect() merge +# (over the level-1 segment and 15 level-0 segments). +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'one'); + INSERT INTO t1 (rowid, content) VALUES(2, 'two'); + INSERT INTO t1 (rowid, content) VALUES(3, 'one two'); + INSERT INTO t1 (rowid, content) VALUES(4, 'three'); + INSERT INTO t1 (rowid, content) VALUES(5, 'one three'); + INSERT INTO t1 (rowid, content) VALUES(6, 'two three'); + INSERT INTO t1 (rowid, content) VALUES(7, 'one two three'); + DELETE FROM t1 WHERE rowid = 4; + INSERT INTO t1 (rowid, content) VALUES(8, 'four'); + UPDATE t1 SET content = 'update one three' WHERE rowid = 1; + INSERT INTO t1 (rowid, content) VALUES(9, 'one four'); + INSERT INTO t1 (rowid, content) VALUES(10, 'two four'); + DELETE FROM t1 WHERE rowid = 7; + INSERT INTO t1 (rowid, content) VALUES(11, 'one two four'); + INSERT INTO t1 (rowid, content) VALUES(12, 'three four'); + INSERT INTO t1 (rowid, content) VALUES(13, 'one three four'); + DELETE FROM t1 WHERE rowid = 10; + INSERT INTO t1 (rowid, content) VALUES(14, 'two three four'); + INSERT INTO t1 (rowid, content) VALUES(15, 'one two three four'); + UPDATE t1 SET content = 'update two five' WHERE rowid = 8; + INSERT INTO t1 (rowid, content) VALUES(16, 'five'); + DELETE FROM t1 WHERE rowid = 13; + INSERT INTO t1 (rowid, content) VALUES(17, 'one five'); + INSERT INTO t1 (rowid, content) VALUES(18, 'two five'); + INSERT INTO t1 (rowid, content) VALUES(19, 'one two five'); + DELETE FROM t1 WHERE rowid = 16; + INSERT INTO t1 (rowid, content) VALUES(20, 'three five'); + INSERT INTO t1 (rowid, content) VALUES(21, 'one three five'); + INSERT INTO t1 (rowid, content) VALUES(22, 'two three five'); + DELETE FROM t1 WHERE rowid = 19; + UPDATE t1 SET content = 'update' WHERE rowid = 15; +} + +do_test fts3af-1.1 { + execsql {SELECT COUNT(*) FROM t1} +} {16} + +do_test fts3af-2.0 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'update'} +} {1 8 15} + +do_test fts3af-2.1 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'one'} +} {1 3 5 9 11 17 21} + +do_test fts3af-2.2 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'two'} +} {2 3 6 8 11 14 18 22} + +do_test fts3af-2.3 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'three'} +} {1 5 6 12 14 20 21 22} + +do_test fts3af-2.4 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'four'} +} {9 11 12 14} + +do_test fts3af-2.5 { + execsql {SELECT rowid FROM t1 WHERE content MATCH 'five'} +} {8 17 18 20 21 22} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ag.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ag.test --- sqlite3-3.4.2/test/fts3ag.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ag.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,93 @@ +# 2006 October 19 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing handling of edge cases for various doclist +# merging functions in the FTS3 module query logic. +# +# $Id: fts3ag.test,v 1.2 2007/11/16 00:23:08 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, 'this is a test'); + INSERT INTO t1 (rowid, content) VALUES(2, 'also a test'); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts3ag-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +# Empty left in docListExceptMerge(). +do_test fts3ag-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this something'} +} {} + +# Empty right in docListExceptMerge(). +do_test fts3ag-1.3 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this -something'} +} {1} + +# Empty left in docListPhraseMerge(). +do_test fts3ag-1.4 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"this something"'} +} {} + +# Empty right in docListPhraseMerge(). +do_test fts3ag-1.5 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH '"something is"'} +} {} + +# Empty left in docListOrMerge(). +do_test fts3ag-1.6 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something OR this'} +} {1} + +# Empty right in docListOrMerge(). +do_test fts3ag-1.7 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR something'} +} {1} + +# Empty left in docListAndMerge(). +do_test fts3ag-1.8 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something this'} +} {} + +# Empty right in docListAndMerge(). +do_test fts3ag-1.9 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this something'} +} {} + +# No support for all-except queries. +do_test fts3ag-1.10 { + catchsql {SELECT rowid FROM t1 WHERE t1 MATCH '-this -something'} +} {1 {SQL logic error or missing database}} + +# Test that docListOrMerge() correctly handles reaching the end of one +# doclist before it reaches the end of the other. +do_test fts3ag-1.11 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'this OR also'} +} {1 2} +do_test fts3ag-1.12 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'also OR this'} +} {1 2} + +# Empty left and right in docListOrMerge(). Each term matches neither +# row, and when combined there was an assertion failure. +do_test fts3ag-1.13 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something OR nothing'} +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ah.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ah.test --- sqlite3-3.4.2/test/fts3ah.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ah.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,76 @@ +# 2006 October 31 (scaaarey) +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# here is testing correct handling of excessively long terms. +# +# $Id: fts3ah.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Generate a term of len copies of char. +proc bigterm {char len} { + for {set term ""} {$len>0} {incr len -1} { + append term $char + } + return $term +} + +# Generate a document of bigterms based on characters from the list +# chars. +proc bigtermdoc {chars len} { + set doc "" + foreach char $chars { + append doc " " [bigterm $char $len] + } + return $doc +} + +set len 5000 +set doc1 [bigtermdoc {a b c d} $len] +set doc2 [bigtermdoc {b d e f} $len] +set doc3 [bigtermdoc {a c e} $len] + +set aterm [bigterm a $len] +set bterm [bigterm b $len] +set xterm [bigterm x $len] + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, $doc1); + INSERT INTO t1 (rowid, content) VALUES(2, $doc2); + INSERT INTO t1 (rowid, content) VALUES(3, $doc3); +} + +# No hits at all. Returns empty doclists from termSelect(). +do_test fts3ah-1.1 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'something'} +} {} + +do_test fts3ah-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $aterm} +} {1 3} + +do_test fts3ah-1.2 { + execsql {SELECT rowid FROM t1 WHERE t1 MATCH $xterm} +} {} + +do_test fts3ah-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '$aterm -$xterm'" +} {1 3} + +do_test fts3ah-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"$aterm $bterm\"'" +} {1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ai.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ai.test --- sqlite3-3.4.2/test/fts3ai.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ai.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,87 @@ +# 2007 January 17 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite fts3 library. The +# focus here is testing handling of UPDATE when using UTF-16-encoded +# databases. +# +# $Id: fts3ai.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Return the UTF-16 representation of the supplied UTF-8 string $str. +# If $nt is true, append two 0x00 bytes as a nul terminator. +# NOTE(shess) Copied from capi3.test. +proc utf16 {str {nt 1}} { + set r [encoding convertto unicode $str] + if {$nt} { + append r "\x00\x00" + } + return $r +} + +db eval { + PRAGMA encoding = "UTF-16le"; + CREATE VIRTUAL TABLE t1 USING fts3(content); +} + +do_test fts3ai-1.0 { + execsql {PRAGMA encoding} +} {UTF-16le} + +do_test fts3ai-1.1 { + execsql {INSERT INTO t1 (rowid, content) VALUES(1, 'one')} + execsql {SELECT content FROM t1 WHERE rowid = 1} +} {one} + +do_test fts3ai-1.2 { + set sql "INSERT INTO t1 (rowid, content) VALUES(2, 'two')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 2} +} {two} + +do_test fts3ai-1.3 { + set sql "INSERT INTO t1 (rowid, content) VALUES(3, 'three')" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'trois' WHERE rowid = 3" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 3} +} {trois} + +do_test fts3ai-1.4 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(4, 'four')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 4} +} {four} + +do_test fts3ai-1.5 { + set sql16 [utf16 {INSERT INTO t1 (rowid, content) VALUES(5, 'five')}] + set STMT [sqlite3_prepare16 $DB $sql16 -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + set sql "UPDATE t1 SET content = 'cinq' WHERE rowid = 5" + set STMT [sqlite3_prepare $DB $sql -1 TAIL] + sqlite3_step $STMT + sqlite3_finalize $STMT + execsql {SELECT content FROM t1 WHERE rowid = 5} +} {cinq} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3aj.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3aj.test --- sqlite3-3.4.2/test/fts3aj.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3aj.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,89 @@ +# 2007 February 6 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# tests creating fts3 tables in an attached database. +# +# $Id: fts3aj.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Clean up anything left over from a previous pass. +file delete -force test2.db +file delete -force test2.db-journal +sqlite3 db2 test2.db + +db eval { + CREATE VIRTUAL TABLE t3 USING fts3(content); + INSERT INTO t3 (rowid, content) VALUES(1, "hello world"); +} + +db2 eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# This has always worked because the t1_* tables used by fts3 will be +# the defaults. +do_test fts3aj-1.1 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + SELECT rowid FROM t1 WHERE t1 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +# Make certain we're detached if there was an error. +catch {db eval {DETACH DATABASE two}} + +# In older code, this appears to work fine, but the t2_* tables used +# by fts3 will be created in database 'main' instead of database +# 'two'. It appears to work fine because the tables end up being the +# defaults, but obviously is badly broken if you hope to use things +# other than in the exact same ATTACH setup. +do_test fts3aj-1.2 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + CREATE VIRTUAL TABLE two.t2 USING fts3(content); + INSERT INTO t2 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t2 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t2 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM t2 WHERE t2 MATCH 'hello'; + DETACH DATABASE two; + } +} {1 2} +catch {db eval {DETACH DATABASE two}} + +# In older code, this broke because the fts3 code attempted to create +# t3_* tables in database 'main', but they already existed. Normally +# this wouldn't happen without t3 itself existing, in which case the +# fts3 code would never be called in the first place. +do_test fts3aj-1.3 { + execsql { + ATTACH DATABASE 'test2.db' AS two; + + CREATE VIRTUAL TABLE two.t3 USING fts3(content); + INSERT INTO two.t3 (rowid, content) VALUES(2, "hello there"); + INSERT INTO two.t3 (rowid, content) VALUES(3, "cruel world"); + SELECT rowid FROM two.t3 WHERE t3 MATCH 'hello'; + + DETACH DATABASE two; + } db2 +} {2} +catch {db eval {DETACH DATABASE two}} + +catch {db2 close} +file delete -force test2.db + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ak.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ak.test --- sqlite3-3.4.2/test/fts3ak.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ak.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,105 @@ +# 2007 March 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. These +# make sure that fts3 insertion buffering is fully transparent when +# using transactions. +# +# $Id: fts3ak.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1 (rowid, content) VALUES(1, "hello world"); + INSERT INTO t1 (rowid, content) VALUES(2, "hello there"); + INSERT INTO t1 (rowid, content) VALUES(3, "cruel world"); +} + +# Test that possibly-buffered inserts went through after commit. +do_test fts3ak-1.1 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(4, "false world"); + INSERT INTO t1 (rowid, content) VALUES(5, "false door"); + COMMIT TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4} + +# Test that buffered inserts are seen by selects in the same +# transaction. +do_test fts3ak-1.2 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(6, "another world"); + INSERT INTO t1 (rowid, content) VALUES(7, "another test"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + COMMIT TRANSACTION; + } +} {1 3 4 6} + +# Test that buffered inserts are seen within a transaction. This is +# really the same test as 1.2. +do_test fts3ak-1.3 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(8, "second world"); + INSERT INTO t1 (rowid, content) VALUES(9, "second sight"); + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + ROLLBACK TRANSACTION; + } +} {1 3 4 6 8} + +# Double-check that the previous result doesn't persist past the +# rollback! +do_test fts3ak-1.4 { + execsql { + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test it all together. +do_test fts3ak-1.5 { + execsql { + BEGIN TRANSACTION; + INSERT INTO t1 (rowid, content) VALUES(10, "second world"); + INSERT INTO t1 (rowid, content) VALUES(11, "second sight"); + ROLLBACK TRANSACTION; + SELECT rowid FROM t1 WHERE t1 MATCH 'world'; + } +} {1 3 4 6} + +# Test that the obvious case works. +do_test fts3ak-1.6 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(12, "third world"); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'third'; + } +} {12} + +# This is exactly the same as the previous test, except that older +# code loses the INSERT due to an SQLITE_SCHEMA error. +do_test fts3ak-1.7 { + execsql { + BEGIN; + INSERT INTO t1 (rowid, content) VALUES(13, "third dimension"); + CREATE TABLE x (c); + COMMIT; + SELECT rowid FROM t1 WHERE t1 MATCH 'dimension'; + } +} {13} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3al.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3al.test --- sqlite3-3.4.2/test/fts3al.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3al.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,69 @@ +# 2007 March 28 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing isspace/isalnum/tolower problems with the +# FTS3 module. Unfortunately, this code isn't a really principled set +# of tests, because it is impossible to know where new uses of these +# functions might appear. +# +# $Id: fts3al.test,v 1.2 2007/12/13 21:54:11 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Tests that startsWith() (calls isspace, tolower, isalnum) can handle +# hi-bit chars. parseSpec() also calls isalnum here. +do_test fts3al-1.1 { + execsql "CREATE VIRTUAL TABLE t1 USING fts3(content, \x80)" +} {} + +# Additionally tests isspace() call in getToken(), and isalnum() call +# in tokenListToIdList(). +do_test fts3al-1.2 { + catch { + execsql "CREATE VIRTUAL TABLE t2 USING fts3(content, tokenize \x80)" + } + sqlite3_errmsg $DB +} "unknown tokenizer: \x80" + +# Additionally test final isalnum() in startsWith(). +do_test fts3al-1.3 { + execsql "CREATE VIRTUAL TABLE t3 USING fts3(content, tokenize\x80)" +} {} + +# The snippet-generation code has calls to isspace() which are sort of +# hard to get to. It finds convenient breakpoints by starting ~40 +# chars before and after the matched term, and scanning ~10 chars +# around that position for isspace() characters. The long word with +# embedded hi-bit chars causes one of these isspace() calls to be +# exercised. The version with a couple extra spaces should cause the +# other isspace() call to be exercised. [Both cases have been tested +# in the debugger, but I'm hoping to continue to catch it if simple +# constant changes change things slightly. +# +# The trailing and leading hi-bit chars help with code which tests for +# isspace() to coalesce multiple spaces. + +set word "\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80xxxxx\x80" +set phrase1 "$word $word $word target $word $word $word" +set phrase2 "$word $word $word target $word $word $word" + +db eval {CREATE VIRTUAL TABLE t4 USING fts3(content)} +db eval "INSERT INTO t4 (content) VALUES ('$phrase1')" +db eval "INSERT INTO t4 (content) VALUES ('$phrase2')" + +do_test fts3al-1.4 { + execsql {SELECT rowid, length(snippet(t4)) FROM t4 WHERE t4 MATCH 'target'} +} {1 111 2 117} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3am.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3am.test --- sqlite3-3.4.2/test/fts3am.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3am.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,65 @@ +# 2007 April 9 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements regression tests for SQLite library. fts3 +# DELETE handling assumed all fields were non-null. This was not +# the intention at all. +# +# $Id: fts3am.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(col_a, col_b); + + INSERT INTO t1(rowid, col_a, col_b) VALUES(1, 'testing', 'testing'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(2, 'only a', null); + INSERT INTO t1(rowid, col_a, col_b) VALUES(3, null, 'only b'); + INSERT INTO t1(rowid, col_a, col_b) VALUES(4, null, null); +} + +do_test fts3am-1.0 { + execsql { + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {2 2 4} + +do_test fts3am-1.1 { + execsql { + DELETE FROM t1 WHERE rowid = 1; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {1 1 3} + +do_test fts3am-1.2 { + execsql { + DELETE FROM t1 WHERE rowid = 2; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 1 2} + +do_test fts3am-1.3 { + execsql { + DELETE FROM t1 WHERE rowid = 3; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 1} + +do_test fts3am-1.4 { + execsql { + DELETE FROM t1 WHERE rowid = 4; + SELECT COUNT(col_a), COUNT(col_b), COUNT(*) FROM t1; + } +} {0 0 0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3an.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3an.test --- sqlite3-3.4.2/test/fts3an.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3an.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,196 @@ +# 2007 April 26 +# +# The author disclaims copyright to this source code. +# +#************************************************************************* +# This file implements tests for prefix-searching in the fts3 +# component of the SQLite library. +# +# $Id: fts3an.test,v 1.2 2007/12/13 21:54:11 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# A large string to prime the pump with. +set text { + Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas + iaculis mollis ipsum. Praesent rhoncus placerat justo. Duis non quam + sed turpis posuere placerat. Curabitur et lorem in lorem porttitor + aliquet. Pellentesque bibendum tincidunt diam. Vestibulum blandit + ante nec elit. In sapien diam, facilisis eget, dictum sed, viverra + at, felis. Vestibulum magna. Sed magna dolor, vestibulum rhoncus, + ornare vel, vulputate sit amet, felis. Integer malesuada, tellus at + luctus gravida, diam nunc porta nibh, nec imperdiet massa metus eu + lectus. Aliquam nisi. Nunc fringilla nulla at lectus. Suspendisse + potenti. Cum sociis natoque penatibus et magnis dis parturient + montes, nascetur ridiculus mus. Pellentesque odio nulla, feugiat eu, + suscipit nec, consequat quis, risus. +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(c); + + INSERT INTO t1(rowid, c) VALUES(1, $text); + INSERT INTO t1(rowid, c) VALUES(2, 'Another lovely row'); +} + +# Exact match +do_test fts3an-1.1 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem'" +} {1} + +# And a prefix +do_test fts3an-1.2 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore*'" +} {1} + +# Prefix includes exact match +do_test fts3an-1.3 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lorem*'" +} {1} + +# Make certain everything isn't considered a prefix! +do_test fts3an-1.4 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lore'" +} {} + +# Prefix across multiple rows. +do_test fts3an-1.5 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo*'" +} {1 2} + +# Likewise, with multiple hits in one document. +do_test fts3an-1.6 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts3an-1.7 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lov*'" +} {2} + +# * not at end is dropped. +do_test fts3an-1.8 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH 'lo *'" +} {} + +# Stand-alone * is dropped. +do_test fts3an-1.9 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '*'" +} {} + +# Phrase-query prefix. +do_test fts3an-1.10 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r*\"'" +} {2} +do_test fts3an-1.11 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"lovely r\"'" +} {} + +# Phrase query with multiple prefix matches. +do_test fts3an-1.12 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l*\"'" +} {1 2} + +# Phrase query with multiple prefix matches. +do_test fts3an-1.13 { + execsql "SELECT rowid FROM t1 WHERE t1 MATCH '\"a* l* row\"'" +} {2} + + + + +# Test across updates (and, by implication, deletes). + +# Version of text without "lorem". +regsub -all {[Ll]orem} $text '' ntext + +db eval { + CREATE VIRTUAL TABLE t2 USING fts3(c); + + INSERT INTO t2(rowid, c) VALUES(1, $text); + INSERT INTO t2(rowid, c) VALUES(2, 'Another lovely row'); + UPDATE t2 SET c = $ntext WHERE rowid = 1; +} + +# Can't see lorem as an exact match. +do_test fts3an-2.1 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lorem'" +} {} + +# Can't see a prefix of lorem, either. +do_test fts3an-2.2 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lore*'" +} {} + +# Can see lovely in the other document. +do_test fts3an-2.3 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lo*'" +} {2} + +# Can still see other hits. +do_test fts3an-2.4 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'l*'" +} {1 2} + +# Prefix which should only hit one document. +do_test fts3an-2.5 { + execsql "SELECT rowid FROM t2 WHERE t2 MATCH 'lov*'" +} {2} + + + +# Test with a segment which will have multiple levels in the tree. + +# Build a big document with lots of unique terms. +set bigtext $text +foreach c {a b c d e} { + regsub -all {[A-Za-z]+} $bigtext "&$c" t + append bigtext $t +} + +# Populate a table with many copies of the big document, so that we +# can test the number of hits found. Populate $ret with the expected +# hit counts for each row. offsets() returns 4 elements for every +# hit. We'll have 6 hits for row 1, 1 for row 2, and 6*(2^5)==192 for +# $bigtext. +set ret {6 1} +db eval { + BEGIN; + CREATE VIRTUAL TABLE t3 USING fts3(c); + + INSERT INTO t3(rowid, c) VALUES(1, $text); + INSERT INTO t3(rowid, c) VALUES(2, 'Another lovely row'); +} +for {set i 0} {$i<100} {incr i} { + db eval {INSERT INTO t3(rowid, c) VALUES(3+$i, $bigtext)} + lappend ret 192 +} +db eval {COMMIT;} + +# Test that we get the expected number of hits. +do_test fts3an-3.1 { + set t {} + db eval {SELECT offsets(t3) as o FROM t3 WHERE t3 MATCH 'l*'} { + set l [llength $o] + lappend t [expr {$l/4}] + } + set t +} $ret + +# TODO(shess) It would be useful to test a couple edge cases, but I +# don't know if we have the precision to manage it from here at this +# time. Prefix hits can cross leaves, which the code above _should_ +# hit by virtue of size. There are two variations on this. If the +# tree is 2 levels high, the code will find the leaf-node extent +# directly, but if it is higher, the code will have to follow two +# separate interior branches down the tree. Both should be tested. + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3ao.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3ao.test --- sqlite3-3.4.2/test/fts3ao.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3ao.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,169 @@ +# 2007 June 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3ao.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +#--------------------------------------------------------------------- +# These tests, fts3ao-1.*, test that ticket #2429 is fixed. +# +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one four two'); +} +do_test fts3ao-1.1 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE c MATCH 'four'; + } +} {1 {one four two}} +do_test fts3ao-1.2 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE b MATCH 'four'; + } +} {1 {one four}} +do_test fts3ao-1.3 { + execsql { + SELECT rowid, snippet(t1) FROM t1 WHERE a MATCH 'four'; + } +} {1 {one three four}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts3 table. +# +do_test fts3ao-2.1 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {t1 t1_content t1_segments t1_segdir} +do_test fts3ao-2.2 { + execsql { ALTER TABLE t1 RENAME to fts_t1; } +} {} +do_test fts3ao-2.3 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.4 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir} + +# See what happens when renaming the fts3 table fails. +# +do_test fts3ao-2.5 { + catchsql { + CREATE TABLE t1_segdir(a, b, c); + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts3ao-2.6 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.7 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} + +# See what happens when renaming the fts3 table fails inside a transaction. +# +do_test fts3ao-2.8 { + execsql { + BEGIN; + INSERT INTO fts_t1(a, b, c) VALUES('one two three', 'one four', 'one two'); + } +} {} +do_test fts3ao-2.9 { + catchsql { + ALTER TABLE fts_t1 RENAME to t1; + } +} {1 {SQL logic error or missing database}} +do_test fts3ao-2.10 { + execsql { SELECT rowid, snippet(fts_t1) FROM fts_t1 WHERE a MATCH 'four'; } +} {1 {one three four}} +do_test fts3ao-2.11 { + execsql { SELECT tbl_name FROM sqlite_master WHERE type = 'table'} +} {fts_t1 fts_t1_content fts_t1_segments fts_t1_segdir t1_segdir} +do_test fts3ao-2.12 { + execsql COMMIT + execsql {SELECT a FROM fts_t1} +} {{one three four} {one two three}} +do_test fts3ao-2.12 { + execsql { SELECT a, b, c FROM fts_t1 WHERE c MATCH 'four'; } +} {{one three four} {one four} {one four two}} + +#------------------------------------------------------------------- +# Close, delete and reopen the database. The following test should +# be run on an initially empty db. +# +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test fts3ao-3.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts3(a, b, c); + INSERT INTO t1(a, b, c) VALUES('one three four', 'one four', 'one two'); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two}} + +# This test was crashing at one point. +# +do_test fts3ao-3.2 { + execsql { + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + CREATE TABLE t3(a, b, c); + SELECT a, b, c FROM t1 WHERE c MATCH 'two'; + } +} {{one three four} {one four} {one two} {one three four} {one four} {one two}} + +#--------------------------------------------------------------------- +# Test that it is possible to rename an fts3 table in an attached +# database. +# +file delete -force test2.db test2.db-journal + +do_test fts3ao-3.1 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.t1 USING fts3(a, b, c); + INSERT INTO aux.t1(a, b, c) VALUES( + 'neung song sahm', 'neung see', 'neung see song' + ); + } +} {} + +do_test fts3ao-3.2 { + execsql { SELECT a, b, c FROM aux.t1 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts3ao-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +do_test fts3ao-3.4 { + execsql { ALTER TABLE aux.t1 RENAME TO t2 } +} {} + +do_test fts3ao-3.2 { + execsql { SELECT a, b, c FROM t2 WHERE a MATCH 'song'; } +} {{neung song sahm} {neung see} {neung see song}} + +do_test fts3ao-3.3 { + execsql { SELECT a, b, c FROM t1 WHERE c MATCH 'two'; } +} {{one three four} {one four} {one two}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3atoken.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3atoken.test --- sqlite3-3.4.2/test/fts3atoken.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3atoken.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,174 @@ +# 2007 June 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the pluggable tokeniser feature of the +# FTS3 module. +# +# $Id: fts3atoken.test,v 1.1 2007/08/20 17:38:42 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +proc escape_string {str} { + set out "" + foreach char [split $str ""] { + scan $char %c i + if {$i<=127} { + append out $char + } else { + append out [format {\x%.4x} $i] + } + } + set out +} + +#-------------------------------------------------------------------------- +# Test cases fts3token-1.* are the warm-body test for the SQL scalar +# function fts3_tokenizer(). The procedure is as follows: +# +# 1: Verify that there is no such fts3 tokenizer as 'blah'. +# +# 2: Query for the built-in tokenizer 'simple'. Insert a copy of the +# retrieved value as tokenizer 'blah'. +# +# 3: Test that the value returned for tokenizer 'blah' is now the +# same as that retrieved for 'simple'. +# +# 4: Test that it is now possible to create an fts3 table using +# tokenizer 'blah' (it was not possible in step 1). +# +# 5: Test that the table created to use tokenizer 'blah' is usable. +# +do_test fts3token-1.1 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize blah); + } +} {1 {unknown tokenizer: blah}} +do_test fts3token-1.2 { + execsql { + SELECT fts3_tokenizer('blah', fts3_tokenizer('simple')) IS NULL; + } +} {0} +do_test fts3token-1.3 { + execsql { + SELECT fts3_tokenizer('blah') == fts3_tokenizer('simple'); + } +} {1} +do_test fts3token-1.4 { + catchsql { + CREATE VIRTUAL TABLE t1 USING fts3(content, tokenize blah); + } +} {0 {}} +do_test fts3token-1.5 { + execsql { + INSERT INTO t1(content) VALUES('There was movement at the station'); + INSERT INTO t1(content) VALUES('For the word has passed around'); + INSERT INTO t1(content) VALUES('That the colt from ol regret had got away'); + SELECT content FROM t1 WHERE content MATCH 'movement' + } +} {{There was movement at the station}} + +#-------------------------------------------------------------------------- +# Test cases fts3token-2.* test error cases in the scalar function based +# API for getting and setting tokenizers. +# +do_test fts3token-2.1 { + catchsql { + SELECT fts3_tokenizer('nosuchtokenizer'); + } +} {1 {unknown tokenizer: nosuchtokenizer}} + +#-------------------------------------------------------------------------- +# Test cases fts3token-3.* test the three built-in tokenizers with a +# simple input string via the built-in test function. This is as much +# to test the test function as the tokenizer implementations. +# +do_test fts3token-3.1 { + execsql { + SELECT fts3_tokenizer_test('simple', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +do_test fts3token-3.2 { + execsql { + SELECT fts3_tokenizer_test('porter', 'I don''t see how'); + } +} {{0 i I 1 don don 2 t t 3 see see 4 how how}} +ifcapable icu { + do_test fts3token-3.3 { + execsql { + SELECT fts3_tokenizer_test('icu', 'I don''t see how'); + } + } {{0 i I 1 don't don't 2 see see 3 how how}} +} + +#-------------------------------------------------------------------------- +# Test cases fts3token-4.* test the ICU tokenizer. In practice, this +# tokenizer only has two modes - "thai" and "everybody else". Some other +# Asian languages (Lao, Khmer etc.) require the same special treatment as +# Thai, but ICU doesn't support them yet. +# +ifcapable icu { + + proc do_icu_test {name locale input output} { + set ::out [db eval { SELECT fts3_tokenizer_test('icu', $locale, $input) }] + do_test $name { + lindex $::out 0 + } $output + } + + do_icu_test fts3token-4.1 en_US {} {} + do_icu_test fts3token-4.2 en_US {Test cases fts3} [list \ + 0 test Test 1 cases cases 2 fts3 fts3 + ] + + # The following test shows that ICU is smart enough to recognise + # Thai chararacters, even when the locale is set to English/United + # States. + # + set input "\u0e2d\u0e30\u0e44\u0e23\u0e19\u0e30\u0e04\u0e23\u0e31\u0e1a" + set output "0 \u0e2d\u0e30\u0e44\u0e23 \u0e2d\u0e30\u0e44\u0e23 " + append output "1 \u0e19\u0e30 \u0e19\u0e30 " + append output "2 \u0e04\u0e23\u0e31\u0e1a \u0e04\u0e23\u0e31\u0e1a" + + do_icu_test fts3token-4.3 th_TH $input $output + do_icu_test fts3token-4.4 en_US $input $output + + # ICU handles an unknown locale by falling back to the default. + # So this is not an error. + do_icu_test fts3token-4.5 MiddleOfTheOcean $input $output + + set longtoken "AReallyReallyLongTokenOneThatWillSurelyRequire" + append longtoken "AReallocInTheIcuTokenizerCode" + + set input "short tokens then " + append input $longtoken + set output "0 short short " + append output "1 tokens tokens " + append output "2 then then " + append output "3 [string tolower $longtoken] $longtoken" + + do_icu_test fts3token-4.6 MiddleOfTheOcean $input $output + do_icu_test fts3token-4.7 th_TH $input $output + do_icu_test fts3token-4.8 en_US $input $output +} + +do_test fts3token-internal { + execsql { SELECT fts3_tokenizer_internal_test() } +} {ok} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3b.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3b.test --- sqlite3-3.4.2/test/fts3b.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3b.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,218 @@ +# 2007 August 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. This +# script tests for the fts2 rowid-versus-vacuum problem (ticket #2566). +# +# $Id: fts3b.test,v 1.3 2007/09/13 18:14:49 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (c) VALUES('this is a test'); + INSERT INTO t1 (c) VALUES('that was a test'); + INSERT INTO t1 (c) VALUES('this is fun'); + DELETE FROM t1 WHERE c = 'that was a test'; +} + +# Baseline test. +do_test fts3b-1.1 { + execsql { + SELECT rowid FROM t1 WHERE c MATCH 'this'; + } +} {1 3} + +db eval {VACUUM} + +# The VACUUM renumbered the t1_content table in fts2, which breaks +# this. +do_test fts3b-1.2 { + execsql { + SELECT rowid FROM t1 WHERE c MATCH 'this'; + } +} {1 3} + +# The t2 table is unfortunately pretty contrived. We need documents +# that are bigger than ROOT_MAX (1024) to force segments out of the +# segdir and into %_segments. We also need to force segment merging +# to generate a hole in the %_segments table, which needs more than 16 +# docs. Beyond that, to test correct operation of BLOCK_SELECT_STMT, +# we need to merge a mult-level tree, which is where the 10,000 comes +# from. Which is slow, thus the set of transactions, with the 500 +# being a number such that 10,000/500 > 16. +set text { + Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Maecenas + iaculis mollis ipsum. Praesent rhoncus placerat justo. Duis non quam + sed turpis posuere placerat. Curabitur et lorem in lorem porttitor + aliquet. Pellentesque bibendum tincidunt diam. Vestibulum blandit + ante nec elit. In sapien diam, facilisis eget, dictum sed, viverra + at, felis. Vestibulum magna. Sed magna dolor, vestibulum rhoncus, + ornare vel, vulputate sit amet, felis. Integer malesuada, tellus at + luctus gravida, diam nunc porta nibh, nec imperdiet massa metus eu + lectus. Aliquam nisi. Nunc fringilla nulla at lectus. Suspendisse + potenti. Cum sociis natoque penatibus et magnis dis parturient + montes, nascetur ridiculus mus. Pellentesque odio nulla, feugiat eu, + suscipit nec, consequat quis, risus. +} +append text $text + +db eval {CREATE VIRTUAL TABLE t2 USING fts3(c)} +set res {} +db eval {BEGIN} +for {set ii 0} {$ii<10000} {incr ii} { + db eval {INSERT INTO t2 (c) VALUES ($text)} + lappend res [expr {$ii+1}] + if {($ii%500)==0} { + db eval { + COMMIT; + BEGIN; + } + } +} +db eval {COMMIT} + +do_test fts3b-2.1 { + execsql { + SELECT rowid FROM t2 WHERE c MATCH 'lorem'; + } +} $res + +db eval {VACUUM} + +# The VACUUM renumbered the t2_segment table in fts2, which would +# break the following. +do_test fts3b-2.2 { + execsql { + SELECT rowid FROM t2 WHERE c MATCH 'lorem'; + } +} $res + +# Since fts3 is already an API break, I've marked the table-named +# column HIDDEN. + +db eval { + CREATE VIRTUAL TABLE t3 USING fts3(c); + INSERT INTO t3 (c) VALUES('this is a test'); + INSERT INTO t3 (c) VALUES('that was a test'); + INSERT INTO t3 (c) VALUES('this is fun'); + DELETE FROM t3 WHERE c = 'that was a test'; +} + +# Test that the table-named column still works. +do_test fts3b-3.1 { + execsql { + SELECT snippet(t3) FROM t3 WHERE t3 MATCH 'test'; + } +} {{this is a test}} + +# Test that the column doesn't appear when selecting all columns. +do_test fts3b-3.2 { + execsql { + SELECT * FROM t3 WHERE rowid = 1; + } +} {{this is a test}} + +# Test that the column doesn't conflict with inserts that don't name +# columns. +do_test fts3b-3.3 { + execsql { + INSERT INTO t3 VALUES ('another test'); + } +} {} + +# fts3 adds a new implicit column, docid, which acts as an alias for +# rowid. + +db eval { + CREATE VIRTUAL TABLE t4 USING fts3(c); + INSERT INTO t4 (c) VALUES('this is a test'); + INSERT INTO t4 (c) VALUES('that was a test'); + INSERT INTO t4 (c) VALUES('this is fun'); + DELETE FROM t4 WHERE c = 'that was a test'; +} + +# Test that docid is present and identical to rowid. +do_test fts3b-4.1 { + execsql { + SELECT rowid FROM t4 WHERE rowid <> docid; + } +} {} + +# Test that docid is hidden. +do_test fts3b-4.2 { + execsql { + SELECT * FROM t4 WHERE rowid = 1; + } +} {{this is a test}} + +# Test that docid can be selected. +do_test fts3b-4.3 { + execsql { + SELECT docid, * FROM t4 WHERE rowid = 1; + } +} {1 {this is a test}} + +# Test that docid can be used in WHERE. +do_test fts3b-4.4 { + execsql { + SELECT docid, * FROM t4 WHERE docid = 1; + } +} {1 {this is a test}} + +# Test that the column doesn't conflict with inserts that don't name +# columns. [Yes, this is the same as fts3b-3.3, here just in case the +# goals of that test change.] +do_test fts3b-4.5 { + execsql { + INSERT INTO t4 VALUES ('another test'); + } +} {} + +# Test that the docid can be forced on insert. +do_test fts3b-4.6 { + execsql { + INSERT INTO t4 (docid, c) VALUES (10, 'yet another test'); + SELECT * FROM t4 WHERE docid = 10; + } +} {{yet another test}} + +# Test that rowid can also be forced. +do_test fts3b-4.7 { + execsql { + INSERT INTO t4 (docid, c) VALUES (12, 'still testing'); + SELECT * FROM t4 WHERE docid = 12; + } +} {{still testing}} + +# If an insert tries to set both docid and rowid, require an error. +do_test fts3b-4.8 { + catchsql { + INSERT INTO t4 (rowid, docid, c) VALUES (14, 15, 'bad test'); + SELECT * FROM t4 WHERE docid = 14; + } +} {1 {SQL logic error or missing database}} + +# Don't allow update of docid, to match rowid behaviour. +do_test fts3b-4.9 { + catchsql { + UPDATE t4 SET docid = 14 WHERE docid = 12; + } +} {1 {SQL logic error or missing database}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3c.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3c.test --- sqlite3-3.4.2/test/fts3c.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3c.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,357 @@ +# 2008 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file exercises some new testing functions in the FTS3 module, +# and then uses them to do some basic tests that FTS3 is internally +# working as expected. +# +# $Id: fts3c.test,v 1.1 2008/07/03 19:53:22 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +#************************************************************************* +# Probe to see if support for these functions is compiled in. +# TODO(shess): Change main.mk to do the right thing and remove this test. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'x'); +} + +set s {SELECT dump_terms(t1, 1) FROM t1 LIMIT 1} +set r {1 {unable to use function dump_terms in the requested context}} +if {[catchsql $s]==$r} { + finish_test + return +} + +#************************************************************************* +# Test that the new functions give appropriate errors. +do_test fts3c-0.0 { + catchsql { + SELECT dump_terms(t1, 1) FROM t1 LIMIT 1; + } +} {1 {dump_terms: incorrect arguments}} + +do_test fts3c-0.1 { + catchsql { + SELECT dump_terms(t1, 0, 0, 0) FROM t1 LIMIT 1; + } +} {1 {dump_terms: incorrect arguments}} + +do_test fts3c-0.2 { + catchsql { + SELECT dump_terms(1, t1) FROM t1 LIMIT 1; + } +} {1 {unable to use function dump_terms in the requested context}} + +do_test fts3c-0.3 { + catchsql { + SELECT dump_terms(t1, 16, 16) FROM t1 LIMIT 1; + } +} {1 {dump_terms: segment not found}} + +do_test fts3c-0.4 { + catchsql { + SELECT dump_doclist(t1) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts3c-0.5 { + catchsql { + SELECT dump_doclist(t1, NULL) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: empty second argument}} + +do_test fts3c-0.6 { + catchsql { + SELECT dump_doclist(t1, '') FROM t1 LIMIT 1; + } +} {1 {dump_doclist: empty second argument}} + +do_test fts3c-0.7 { + catchsql { + SELECT dump_doclist(t1, 'a', 0) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts3c-0.8 { + catchsql { + SELECT dump_doclist(t1, 'a', 0, 0, 0) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: incorrect arguments}} + +do_test fts3c-0.9 { + catchsql { + SELECT dump_doclist(t1, 'a', 16, 16) FROM t1 LIMIT 1; + } +} {1 {dump_doclist: segment not found}} + +#************************************************************************* +# Utility function to check for the expected terms in the segment +# level/index. _all version does same but for entire index. +proc check_terms {test level index terms} { + # TODO(shess): Figure out why uplevel in do_test can't catch + # $level and $index directly. + set ::level $level + set ::index $index + do_test $test.terms { + execsql { + SELECT dump_terms(t1, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $terms] +} +proc check_terms_all {test terms} { + do_test $test.terms { + execsql { + SELECT dump_terms(t1) FROM t1 LIMIT 1; + } + } [list $terms] +} + +# Utility function to check for the expected doclist for the term in +# segment level/index. _all version does same for entire index. +proc check_doclist {test level index term doclist} { + # TODO(shess): Again, why can't the non-:: versions work? + set ::term $term + set ::level $level + set ::index $index + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $doclist] +} +proc check_doclist_all {test term doclist} { + set ::term $term + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term) FROM t1 LIMIT 1; + } + } [list $doclist] +} + +#************************************************************************* +# Test the segments resulting from straight-forward inserts. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); +} + +# Check for expected segments and expected matches. +do_test fts3c-1.0.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2} +do_test fts3c-1.0.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +# Check the specifics of the segments constructed. +# Logical view of entire index. +check_terms_all fts3c-1.0.1 {a is test that this was} +check_doclist_all fts3c-1.0.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts3c-1.0.1.2 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts3c-1.0.1.3 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts3c-1.0.1.4 that {[2 0[0]]} +check_doclist_all fts3c-1.0.1.5 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts3c-1.0.1.6 was {[2 0[1]]} + +# Segment 0,0 +check_terms fts3c-1.0.2 0 0 {a is test this} +check_doclist fts3c-1.0.2.1 0 0 a {[1 0[2]]} +check_doclist fts3c-1.0.2.2 0 0 is {[1 0[1]]} +check_doclist fts3c-1.0.2.3 0 0 test {[1 0[3]]} +check_doclist fts3c-1.0.2.4 0 0 this {[1 0[0]]} + +# Segment 0,1 +check_terms fts3c-1.0.3 0 1 {a test that was} +check_doclist fts3c-1.0.3.1 0 1 a {[2 0[2]]} +check_doclist fts3c-1.0.3.2 0 1 test {[2 0[3]]} +check_doclist fts3c-1.0.3.3 0 1 that {[2 0[0]]} +check_doclist fts3c-1.0.3.4 0 1 was {[2 0[1]]} + +# Segment 0,2 +check_terms fts3c-1.0.4 0 2 {a is test this} +check_doclist fts3c-1.0.4.1 0 2 a {[3 0[2]]} +check_doclist fts3c-1.0.4.2 0 2 is {[3 0[1]]} +check_doclist fts3c-1.0.4.3 0 2 test {[3 0[3]]} +check_doclist fts3c-1.0.4.4 0 2 this {[3 0[0]]} + +#************************************************************************* +# Test the segments resulting from inserts followed by a delete. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE docid = 1; +} + +do_test fts3c-1.1.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2 0 3} +do_test fts3c-1.1.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}} + +check_terms_all fts3c-1.1.1 {a is test that this was} +check_doclist_all fts3c-1.1.1.1 a {[2 0[2]] [3 0[2]]} +check_doclist_all fts3c-1.1.1.2 is {[3 0[1]]} +check_doclist_all fts3c-1.1.1.3 test {[2 0[3]] [3 0[3]]} +check_doclist_all fts3c-1.1.1.4 that {[2 0[0]]} +check_doclist_all fts3c-1.1.1.5 this {[3 0[0]]} +check_doclist_all fts3c-1.1.1.6 was {[2 0[1]]} + +check_terms fts3c-1.1.2 0 0 {a is test this} +check_doclist fts3c-1.1.2.1 0 0 a {[1 0[2]]} +check_doclist fts3c-1.1.2.2 0 0 is {[1 0[1]]} +check_doclist fts3c-1.1.2.3 0 0 test {[1 0[3]]} +check_doclist fts3c-1.1.2.4 0 0 this {[1 0[0]]} + +check_terms fts3c-1.1.3 0 1 {a test that was} +check_doclist fts3c-1.1.3.1 0 1 a {[2 0[2]]} +check_doclist fts3c-1.1.3.2 0 1 test {[2 0[3]]} +check_doclist fts3c-1.1.3.3 0 1 that {[2 0[0]]} +check_doclist fts3c-1.1.3.4 0 1 was {[2 0[1]]} + +check_terms fts3c-1.1.4 0 2 {a is test this} +check_doclist fts3c-1.1.4.1 0 2 a {[3 0[2]]} +check_doclist fts3c-1.1.4.2 0 2 is {[3 0[1]]} +check_doclist fts3c-1.1.4.3 0 2 test {[3 0[3]]} +check_doclist fts3c-1.1.4.4 0 2 this {[3 0[0]]} + +check_terms fts3c-1.1.5 0 3 {a is test this} +check_doclist fts3c-1.1.5.1 0 3 a {[1]} +check_doclist fts3c-1.1.5.2 0 3 is {[1]} +check_doclist fts3c-1.1.5.3 0 3 test {[1]} +check_doclist fts3c-1.1.5.4 0 3 this {[1]} + +#************************************************************************* +# Test results when all references to certain tokens are deleted. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE docid IN (1,3); +} + +# Still 4 segments because 0,3 will contain deletes for docid 1 and 3. +do_test fts3c-1.2.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 0 2 0 3} +do_test fts3c-1.2.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts3c-1.2.1 {a is test that this was} +check_doclist_all fts3c-1.2.1.1 a {[2 0[2]]} +check_doclist_all fts3c-1.2.1.2 is {} +check_doclist_all fts3c-1.2.1.3 test {[2 0[3]]} +check_doclist_all fts3c-1.2.1.4 that {[2 0[0]]} +check_doclist_all fts3c-1.2.1.5 this {} +check_doclist_all fts3c-1.2.1.6 was {[2 0[1]]} + +check_terms fts3c-1.2.2 0 0 {a is test this} +check_doclist fts3c-1.2.2.1 0 0 a {[1 0[2]]} +check_doclist fts3c-1.2.2.2 0 0 is {[1 0[1]]} +check_doclist fts3c-1.2.2.3 0 0 test {[1 0[3]]} +check_doclist fts3c-1.2.2.4 0 0 this {[1 0[0]]} + +check_terms fts3c-1.2.3 0 1 {a test that was} +check_doclist fts3c-1.2.3.1 0 1 a {[2 0[2]]} +check_doclist fts3c-1.2.3.2 0 1 test {[2 0[3]]} +check_doclist fts3c-1.2.3.3 0 1 that {[2 0[0]]} +check_doclist fts3c-1.2.3.4 0 1 was {[2 0[1]]} + +check_terms fts3c-1.2.4 0 2 {a is test this} +check_doclist fts3c-1.2.4.1 0 2 a {[3 0[2]]} +check_doclist fts3c-1.2.4.2 0 2 is {[3 0[1]]} +check_doclist fts3c-1.2.4.3 0 2 test {[3 0[3]]} +check_doclist fts3c-1.2.4.4 0 2 this {[3 0[0]]} + +check_terms fts3c-1.2.5 0 3 {a is test this} +check_doclist fts3c-1.2.5.1 0 3 a {[1] [3]} +check_doclist fts3c-1.2.5.2 0 3 is {[1] [3]} +check_doclist fts3c-1.2.5.3 0 3 test {[1] [3]} +check_doclist fts3c-1.2.5.4 0 3 this {[1] [3]} + +#************************************************************************* +# Test results when everything is optimized manually. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE docid IN (1,3); + DROP TABLE IF EXISTS t1old; + ALTER TABLE t1 RENAME TO t1old; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) SELECT docid, c FROM t1old; + DROP TABLE t1old; +} + +# Should be a single optimal segment with the same logical results. +do_test fts3c-1.3.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts3c-1.3.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts3c-1.3.1 {a test that was} +check_doclist_all fts3c-1.3.1.1 a {[2 0[2]]} +check_doclist_all fts3c-1.3.1.2 test {[2 0[3]]} +check_doclist_all fts3c-1.3.1.3 that {[2 0[0]]} +check_doclist_all fts3c-1.3.1.4 was {[2 0[1]]} + +check_terms fts3c-1.3.2 0 0 {a test that was} +check_doclist fts3c-1.3.2.1 0 0 a {[2 0[2]]} +check_doclist fts3c-1.3.2.2 0 0 test {[2 0[3]]} +check_doclist fts3c-1.3.2.3 0 0 that {[2 0[0]]} +check_doclist fts3c-1.3.2.4 0 0 was {[2 0[1]]} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3d.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3d.test --- sqlite3-3.4.2/test/fts3d.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3d.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,346 @@ +# 2008 June 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The focus +# of this script is testing the FTS3 module's optimize() function. +# +# $Id: fts3d.test,v 1.2 2008/07/15 21:32:07 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +#************************************************************************* +# Probe to see if support for the FTS3 dump_* functions is compiled in. +# TODO(shess): Change main.mk to do the right thing and remove this test. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'x'); +} + +set s {SELECT dump_terms(t1, 1) FROM t1 LIMIT 1} +set r {1 {unable to use function dump_terms in the requested context}} +if {[catchsql $s]==$r} { + finish_test + return +} + +#************************************************************************* +# Utility function to check for the expected terms in the segment +# level/index. _all version does same but for entire index. +proc check_terms {test level index terms} { + # TODO(shess): Figure out why uplevel in do_test can't catch + # $level and $index directly. + set ::level $level + set ::index $index + do_test $test.terms { + execsql { + SELECT dump_terms(t1, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $terms] +} +proc check_terms_all {test terms} { + do_test $test.terms { + execsql { + SELECT dump_terms(t1) FROM t1 LIMIT 1; + } + } [list $terms] +} + +# Utility function to check for the expected doclist for the term in +# segment level/index. _all version does same for entire index. +proc check_doclist {test level index term doclist} { + # TODO(shess): Again, why can't the non-:: versions work? + set ::term $term + set ::level $level + set ::index $index + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term, $::level, $::index) FROM t1 LIMIT 1; + } + } [list $doclist] +} +proc check_doclist_all {test term doclist} { + set ::term $term + do_test $test { + execsql { + SELECT dump_doclist(t1, $::term) FROM t1 LIMIT 1; + } + } [list $doclist] +} + +#************************************************************************* +# Test results when all rows are deleted and one is added back. +# Previously older segments would continue to exist, but now the index +# should be dropped when the table is empty. The results should look +# exactly like we never added the earlier rows in the first place. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE 1=1; -- Delete each row rather than dropping table. + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); +} + +# Should be a single initial segment. +do_test fts3d-1.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts3d-1.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}} + +check_terms_all fts3d-1.1 {a is test this} +check_doclist_all fts3d-1.1.1 a {[1 0[2]]} +check_doclist_all fts3d-1.1.2 is {[1 0[1]]} +check_doclist_all fts3d-1.1.3 test {[1 0[3]]} +check_doclist_all fts3d-1.1.4 this {[1 0[0]]} + +check_terms fts3d-1.2 0 0 {a is test this} +check_doclist fts3d-1.2.1 0 0 a {[1 0[2]]} +check_doclist fts3d-1.2.2 0 0 is {[1 0[1]]} +check_doclist fts3d-1.2.3 0 0 test {[1 0[3]]} +check_doclist fts3d-1.2.4 0 0 this {[1 0[0]]} + +#************************************************************************* +# Test results when everything is optimized manually. +# NOTE(shess): This is a copy of fts3c-1.3. I've pulled a copy here +# because fts3d-2 and fts3d-3 should have identical results. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE docid IN (1,3); + DROP TABLE IF EXISTS t1old; + ALTER TABLE t1 RENAME TO t1old; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) SELECT docid, c FROM t1old; + DROP TABLE t1old; +} + +# Should be a single optimal segment with the same logical results. +do_test fts3d-2.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts3d-2.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts3d-2.1 {a test that was} +check_doclist_all fts3d-2.1.1 a {[2 0[2]]} +check_doclist_all fts3d-2.1.2 test {[2 0[3]]} +check_doclist_all fts3d-2.1.3 that {[2 0[0]]} +check_doclist_all fts3d-2.1.4 was {[2 0[1]]} + +check_terms fts3d-2.2 0 0 {a test that was} +check_doclist fts3d-2.2.1 0 0 a {[2 0[2]]} +check_doclist fts3d-2.2.2 0 0 test {[2 0[3]]} +check_doclist fts3d-2.2.3 0 0 that {[2 0[0]]} +check_doclist fts3d-2.2.4 0 0 was {[2 0[1]]} + +#************************************************************************* +# Test results when everything is optimized via optimize(). +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); + DELETE FROM t1 WHERE docid IN (1,3); + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; +} + +# Should be a single optimal segment with the same logical results. +do_test fts3d-3.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0} +do_test fts3d-3.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} {{0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4}} + +check_terms_all fts3d-3.1 {a test that was} +check_doclist_all fts3d-3.1.1 a {[2 0[2]]} +check_doclist_all fts3d-3.1.2 test {[2 0[3]]} +check_doclist_all fts3d-3.1.3 that {[2 0[0]]} +check_doclist_all fts3d-3.1.4 was {[2 0[1]]} + +check_terms fts3d-3.2 0 0 {a test that was} +check_doclist fts3d-3.2.1 0 0 a {[2 0[2]]} +check_doclist fts3d-3.2.2 0 0 test {[2 0[3]]} +check_doclist fts3d-3.2.3 0 0 that {[2 0[0]]} +check_doclist fts3d-3.2.4 0 0 was {[2 0[1]]} + +#************************************************************************* +# Test optimize() against a table involving segment merges. +# NOTE(shess): Since there's no transaction, each of the INSERT/UPDATE +# statements generates a segment. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + + INSERT INTO t1 (rowid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (rowid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (rowid, c) VALUES (3, 'This is a test'); + + UPDATE t1 SET c = 'This is a test one' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test one' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test one' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test two' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test two' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test two' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test three' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test three' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test three' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test four' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test four' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test four' WHERE rowid = 3; + + UPDATE t1 SET c = 'This is a test' WHERE rowid = 1; + UPDATE t1 SET c = 'That was a test' WHERE rowid = 2; + UPDATE t1 SET c = 'This is a test' WHERE rowid = 3; +} + +# 2 segments in level 0, 1 in level 1 (18 segments created, 16 +# merged). +do_test fts3d-4.segments { + execsql { + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {0 0 0 1 1 0} + +do_test fts3d-4.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +check_terms_all fts3d-4.1 {a four is one test that this three two was} +check_doclist_all fts3d-4.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts3d-4.1.2 four {} +check_doclist_all fts3d-4.1.3 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts3d-4.1.4 one {} +check_doclist_all fts3d-4.1.5 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts3d-4.1.6 that {[2 0[0]]} +check_doclist_all fts3d-4.1.7 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts3d-4.1.8 three {} +check_doclist_all fts3d-4.1.9 two {} +check_doclist_all fts3d-4.1.10 was {[2 0[1]]} + +check_terms fts3d-4.2 0 0 {a four test that was} +check_doclist fts3d-4.2.1 0 0 a {[2 0[2]]} +check_doclist fts3d-4.2.2 0 0 four {[2]} +check_doclist fts3d-4.2.3 0 0 test {[2 0[3]]} +check_doclist fts3d-4.2.4 0 0 that {[2 0[0]]} +check_doclist fts3d-4.2.5 0 0 was {[2 0[1]]} + +check_terms fts3d-4.3 0 1 {a four is test this} +check_doclist fts3d-4.3.1 0 1 a {[3 0[2]]} +check_doclist fts3d-4.3.2 0 1 four {[3]} +check_doclist fts3d-4.3.3 0 1 is {[3 0[1]]} +check_doclist fts3d-4.3.4 0 1 test {[3 0[3]]} +check_doclist fts3d-4.3.5 0 1 this {[3 0[0]]} + +check_terms fts3d-4.4 1 0 {a four is one test that this three two was} +check_doclist fts3d-4.4.1 1 0 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist fts3d-4.4.2 1 0 four {[1] [2 0[4]] [3 0[4]]} +check_doclist fts3d-4.4.3 1 0 is {[1 0[1]] [3 0[1]]} +check_doclist fts3d-4.4.4 1 0 one {[1] [2] [3]} +check_doclist fts3d-4.4.5 1 0 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist fts3d-4.4.6 1 0 that {[2 0[0]]} +check_doclist fts3d-4.4.7 1 0 this {[1 0[0]] [3 0[0]]} +check_doclist fts3d-4.4.8 1 0 three {[1] [2] [3]} +check_doclist fts3d-4.4.9 1 0 two {[1] [2] [3]} +check_doclist fts3d-4.4.10 1 0 was {[2 0[1]]} + +# Optimize should leave the result in the level of the highest-level +# prior segment. +do_test fts3d-4.5 { + execsql { + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index optimized} 1 0} + +# Identical to fts3d-4.matches. +do_test fts3d-4.5.matches { + execsql { + SELECT OFFSETS(t1) FROM t1 + WHERE t1 MATCH 'this OR that OR was OR a OR is OR test' ORDER BY docid; + } +} [list {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4} \ + {0 1 0 4 0 2 5 3 0 3 9 1 0 5 11 4} \ + {0 0 0 4 0 4 5 2 0 3 8 1 0 5 10 4}] + +check_terms_all fts3d-4.5.1 {a is test that this was} +check_doclist_all fts3d-4.5.1.1 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist_all fts3d-4.5.1.2 is {[1 0[1]] [3 0[1]]} +check_doclist_all fts3d-4.5.1.3 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist_all fts3d-4.5.1.4 that {[2 0[0]]} +check_doclist_all fts3d-4.5.1.5 this {[1 0[0]] [3 0[0]]} +check_doclist_all fts3d-4.5.1.6 was {[2 0[1]]} + +check_terms fts3d-4.5.2 1 0 {a is test that this was} +check_doclist fts3d-4.5.2.1 1 0 a {[1 0[2]] [2 0[2]] [3 0[2]]} +check_doclist fts3d-4.5.2.2 1 0 is {[1 0[1]] [3 0[1]]} +check_doclist fts3d-4.5.2.3 1 0 test {[1 0[3]] [2 0[3]] [3 0[3]]} +check_doclist fts3d-4.5.2.4 1 0 that {[2 0[0]]} +check_doclist fts3d-4.5.2.5 1 0 this {[1 0[0]] [3 0[0]]} +check_doclist fts3d-4.5.2.6 1 0 was {[2 0[1]]} + +# Re-optimizing does nothing. +do_test fts3d-5.0 { + execsql { + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index already optimal} 1 0} + +# Even if we move things around, still does nothing. +do_test fts3d-5.1 { + execsql { + UPDATE t1_segdir SET level = 2 WHERE level = 1 AND idx = 0; + SELECT OPTIMIZE(t1) FROM t1 LIMIT 1; + SELECT level, idx FROM t1_segdir ORDER BY level, idx; + } +} {{Index already optimal} 2 0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3e.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3e.test --- sqlite3-3.4.2/test/fts3e.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3e.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,125 @@ +# 2008 July 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# These tests exercise the various types of fts3 cursors. +# +# $Id: fts3e.test,v 1.1 2008/07/29 20:24:46 shess Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is not defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +#************************************************************************* +# Test table scan (QUERY_GENERIC). This kind of query happens for +# queries with no WHERE clause, or for WHERE clauses which cannot be +# satisfied by an index. +db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts3(c); + INSERT INTO t1 (docid, c) VALUES (1, 'This is a test'); + INSERT INTO t1 (docid, c) VALUES (2, 'That was a test'); + INSERT INTO t1 (docid, c) VALUES (3, 'This is a test'); +} + +do_test fts3e-1.1 { + execsql { + SELECT docid FROM t1 ORDER BY docid; + } +} {1 2 3} + +do_test fts3e-1.2 { + execsql { + SELECT docid FROM t1 WHERE c LIKE '%test' ORDER BY docid; + } +} {1 2 3} + +do_test fts3e-1.3 { + execsql { + SELECT docid FROM t1 WHERE c LIKE 'That%' ORDER BY docid; + } +} {2} + +#************************************************************************* +# Test lookup by docid (QUERY_DOCID). This kind of query happens for +# queries which select by the docid/rowid implicit index. +db eval { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + CREATE VIRTUAL TABLE t1 USING fts3(c); + CREATE TABLE t2(id INTEGER PRIMARY KEY AUTOINCREMENT, weight INTEGER UNIQUE); + INSERT INTO t2 VALUES (null, 10); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'This is a test'); + INSERT INTO t2 VALUES (null, 5); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'That was a test'); + INSERT INTO t2 VALUES (null, 20); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'This is a test'); +} + +# TODO(shess): This actually is doing QUERY_GENERIC? I'd have +# expected QUERY_DOCID in this case, as for a very large table the +# full scan is less efficient. +do_test fts3e-2.1 { + execsql { + SELECT docid FROM t1 WHERE docid in (1, 2, 10); + SELECT rowid FROM t1 WHERE rowid in (1, 2, 10); + } +} {1 2 1 2} + +do_test fts3e-2.2 { + execsql { + SELECT docid, weight FROM t1, t2 WHERE t2.id = t1.docid ORDER BY weight; + SELECT t1.rowid, weight FROM t1, t2 WHERE t2.id = t1.rowid ORDER BY weight; + } +} {2 5 1 10 3 20 2 5 1 10 3 20} + +do_test fts3e-2.3 { + execsql { + SELECT docid, weight FROM t1, t2 + WHERE t2.weight>5 AND t2.id = t1.docid ORDER BY weight; + SELECT t1.rowid, weight FROM t1, t2 + WHERE t2.weight>5 AND t2.id = t1.rowid ORDER BY weight; + } +} {1 10 3 20 1 10 3 20} + +#************************************************************************* +# Test lookup by MATCH (QUERY_FULLTEXT). This is the fulltext index. +db eval { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + CREATE VIRTUAL TABLE t1 USING fts3(c); + CREATE TABLE t2(id INTEGER PRIMARY KEY AUTOINCREMENT, weight INTEGER UNIQUE); + INSERT INTO t2 VALUES (null, 10); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'This is a test'); + INSERT INTO t2 VALUES (null, 5); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'That was a test'); + INSERT INTO t2 VALUES (null, 20); + INSERT INTO t1 (docid, c) VALUES (last_insert_rowid(), 'This is a test'); +} + +do_test fts3e-3.1 { + execsql { + SELECT docid FROM t1 WHERE t1 MATCH 'this' ORDER BY docid; + } +} {1 3} + +do_test fts3e-3.2 { + execsql { + SELECT docid, weight FROM t1, t2 + WHERE t1 MATCH 'this' AND t1.docid = t2.id ORDER BY weight; + } +} {1 10 3 20} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3expr2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3expr2.test --- sqlite3-3.4.2/test/fts3expr2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3expr2.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,166 @@ +# 2009 January 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module syntax parser. +# +# $Id: fts3expr2.test,v 1.2 2009/06/05 17:09:12 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +# Test overview: +# +# The tests in this file are pseudo-randomly generated. They test +# the fts3 match expression parser via the test interface +# SQL function "fts3_exprtest" (see comments in fts3_expr.c). +# +# Each test case works as follows: +# +# 1. A random expression tree is generated using proc [random_expr_tree]. +# 2. The expression tree is converted to the text of an equivalent +# fts3 expression using proc [tree_to_expr]. +# 3. The test SQL function "fts3_exprtest" is used to parse the +# expression text generated in step (2), returning a parsed expression +# tree. +# 4. Test that the tree returned in step (3) matches that generated in +# step (1). +# +# In step (2), 4 different fts3 expressions are created from each +# expression tree by varying the following boolean properties: +# +# * Whether or not superflous parenthesis are included. i.e. if +# "a OR b AND (c OR d)" or "a OR (b AND (c OR d))" is generated. +# +# * Whether or not explict AND operators are used. i.e. if +# "a OR b AND c" or "a OR b c" is generated. +# + +set sqlite_fts3_enable_parentheses 1 + +proc strip_phrase_data {L} { + if {[lindex $L 0] eq "PHRASE"} { + return [list P [lrange $L 3 end]] + } + return [list \ + [lindex $L 0] \ + [strip_phrase_data [lindex $L 1]] \ + [strip_phrase_data [lindex $L 2]] \ + ] +} +proc test_fts3expr2 {expr} { + strip_phrase_data [ + db one {SELECT fts3_exprtest('simple', $expr, 'a', 'b', 'c')} + ] +} + +proc rnd {nMax} { expr {int(rand()*$nMax)} } + +proc random_phrase {} { + set phrases [list one two three four "one two" "three four"] + list P [lindex $phrases [rnd [llength $phrases]]] +} + +# Generate and return a pseudo-random expression tree. Using the same +# format returned by the [test_fts3expr2] proc. +# +proc random_expr_tree {iHeight} { + if {$iHeight==0 || [rnd 3]==0} { + return [random_phrase] + } + + set operators [list NEAR NOT AND OR] + set op [lindex $operators [rnd 4]] + + if {$op eq "NEAR"} { + set iDistance [rnd 15] + return [list $op/$iDistance [random_phrase] [random_phrase]] + } + + set iNH [expr {$iHeight - 1}] + return [list $op [random_expr_tree $iNH] [random_expr_tree $iNH]] +} + +# Given an expression tree, generate a corresponding expression. +# +proc tree_to_expr {tree all_brackets implicit_and} { + set prec(NOT) 2 + set prec(AND) 3 + set prec() 3 + set prec(OR) 4 + + set op [lindex $tree 0] + + if {$op eq "P"} { + set phrase [lindex $tree 1] + if {[llength $phrase]>1} { + return "\"$phrase\"" + } else { + return $phrase + } + } + + if {$op eq "NEAR/10"} { + set op "NEAR" + } + if {$op eq "AND" && $implicit_and} { + set op "" + } + + set lhs [lindex $tree 1] + set rhs [lindex $tree 2] + set zLeft [tree_to_expr $lhs $all_brackets $implicit_and] + set zRight [tree_to_expr $rhs $all_brackets $implicit_and] + + set iPrec 5 + set iLeftPrec 0 + set iRightPrec 0 + + catch {set iPrec $prec($op)} + catch {set iLeftPrec $prec([lindex $lhs 0])} + catch {set iRightPrec $prec([lindex $rhs 0])} + + if {$iLeftPrec > $iPrec || $all_brackets} { + set zLeft "($zLeft)" + } + if {$iRightPrec >= $iPrec || $all_brackets} { + set zRight "($zRight)" + } + + return "$zLeft $op $zRight" +} + +proc do_exprparse_test {name expr tree} { + uplevel do_test $name [list "test_fts3expr2 {$expr}"] [list $tree] +} + +for {set iTest 1} {$iTest<500} {incr iTest} { + set t [random_expr_tree 4] + + set e1 [tree_to_expr $t 0 0] + set e2 [tree_to_expr $t 0 1] + set e3 [tree_to_expr $t 1 0] + set e4 [tree_to_expr $t 1 1] + + do_exprparse_test fts3expr2-$iTest.1 $e1 $t + do_exprparse_test fts3expr2-$iTest.2 $e2 $t + do_exprparse_test fts3expr2-$iTest.3 $e3 $t + do_exprparse_test fts3expr2-$iTest.4 $e4 $t +} + +set sqlite_fts3_enable_parentheses 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3expr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3expr.test --- sqlite3-3.4.2/test/fts3expr.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3expr.test 2009-03-12 15:43:48.000000000 +0000 @@ -0,0 +1,480 @@ +# 2006 September 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS3 module. +# +# $Id: fts3expr.test,v 1.7 2009/03/12 15:43:48 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +set sqlite_fts3_enable_parentheses 1 + +proc test_fts3expr {expr} { + db one {SELECT fts3_exprtest('simple', $expr, 'a', 'b', 'c')} +} +do_test fts3expr-1.0 { + test_fts3expr "abcd" +} {PHRASE 3 0 abcd} +do_test fts3expr-1.1 { + test_fts3expr " tag " +} {PHRASE 3 0 tag} + +do_test fts3expr-1.2 { + test_fts3expr "ab AND cd" +} {AND {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.2.1 { + test_fts3expr "ab cd" +} {AND {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.3 { + test_fts3expr "ab OR cd" +} {OR {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.4 { + test_fts3expr "ab NOT cd" +} {NOT {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.5 { + test_fts3expr "ab NEAR cd" +} {NEAR/10 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.1 { + test_fts3expr "ab NEAR/5 cd" +} {NEAR/5 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.2 { + test_fts3expr "ab NEAR/87654321 cd" +} {NEAR/87654321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.3 { + test_fts3expr "ab NEAR/7654321 cd" +} {NEAR/7654321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.4 { + test_fts3expr "ab NEAR/654321 cd" +} {NEAR/654321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.5 { + test_fts3expr "ab NEAR/54321 cd" +} {NEAR/54321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.6 { + test_fts3expr "ab NEAR/4321 cd" +} {NEAR/4321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.7 { + test_fts3expr "ab NEAR/321 cd" +} {NEAR/321 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} +do_test fts3expr-1.6.8 { + test_fts3expr "ab NEAR/21 cd" +} {NEAR/21 {PHRASE 3 0 ab} {PHRASE 3 0 cd}} + +do_test fts3expr-1.7 { + test_fts3expr {"one two three"} +} {PHRASE 3 0 one two three} +do_test fts3expr-1.8.1 { + test_fts3expr {zero "one two three" four} +} {AND {AND {PHRASE 3 0 zero} {PHRASE 3 0 one two three}} {PHRASE 3 0 four}} +do_test fts3expr-1.8.2 { + test_fts3expr {zero AND "one two three" four} +} {AND {AND {PHRASE 3 0 zero} {PHRASE 3 0 one two three}} {PHRASE 3 0 four}} +do_test fts3expr-1.8.3 { + test_fts3expr {zero "one two three" AND four} +} {AND {AND {PHRASE 3 0 zero} {PHRASE 3 0 one two three}} {PHRASE 3 0 four}} +do_test fts3expr-1.8.4 { + test_fts3expr {zero AND "one two three" AND four} +} {AND {AND {PHRASE 3 0 zero} {PHRASE 3 0 one two three}} {PHRASE 3 0 four}} +do_test fts3expr-1.9.1 { + test_fts3expr {"one* two three"} +} {PHRASE 3 0 one+ two three} +do_test fts3expr-1.9.2 { + test_fts3expr {"one two* three"} +} {PHRASE 3 0 one two+ three} +do_test fts3expr-1.9.3 { + test_fts3expr {"one* two* three"} +} {PHRASE 3 0 one+ two+ three} +do_test fts3expr-1.9.4 { + test_fts3expr {"one two three*"} +} {PHRASE 3 0 one two three+} +do_test fts3expr-1.9.5 { + test_fts3expr {"one* two three*"} +} {PHRASE 3 0 one+ two three+} +do_test fts3expr-1.9.6 { + test_fts3expr {"one two* three*"} +} {PHRASE 3 0 one two+ three+} +do_test fts3expr-1.9.7 { + test_fts3expr {"one* two* three*"} +} {PHRASE 3 0 one+ two+ three+} + +do_test fts3expr-1.10 { + test_fts3expr {one* two} +} {AND {PHRASE 3 0 one+} {PHRASE 3 0 two}} +do_test fts3expr-1.11 { + test_fts3expr {one two*} +} {AND {PHRASE 3 0 one} {PHRASE 3 0 two+}} + +do_test fts3expr-1.14 { + test_fts3expr {a:one two} +} {AND {PHRASE 0 0 one} {PHRASE 3 0 two}} +do_test fts3expr-1.15 { + test_fts3expr {one b:two} +} {AND {PHRASE 3 0 one} {PHRASE 1 0 two}} + +do_test fts3expr-1.16 { + test_fts3expr {one AND two AND three AND four AND five} +} [list AND \ + [list AND \ + [list AND \ + [list AND {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + {PHRASE 3 0 three} \ + ] \ + {PHRASE 3 0 four} \ + ] \ + {PHRASE 3 0 five} \ + ] +do_test fts3expr-1.17 { + test_fts3expr {(one AND two) AND ((three AND four) AND five)} +} [list AND \ + [list AND {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list AND {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.18 { + test_fts3expr {(one AND two) OR ((three AND four) AND five)} +} [list OR \ + [list AND {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list AND {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.19 { + test_fts3expr {(one AND two) AND ((three AND four) OR five)} +} [list AND \ + [list AND {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list OR \ + [list AND {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.20 { + test_fts3expr {(one OR two) AND ((three OR four) AND five)} +} [list AND \ + [list OR {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list OR {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.21 { + test_fts3expr {(one OR two) AND ((three NOT four) AND five)} +} [list AND \ + [list OR {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list NOT {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.22 { + test_fts3expr {(one OR two) NOT ((three OR four) AND five)} +} [list NOT \ + [list OR {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list OR {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.23 { + test_fts3expr {(((((one OR two))))) NOT (((((three OR four))) AND five))} +} [list NOT \ + [list OR {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list OR {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.24 { + test_fts3expr {one NEAR two} +} [list NEAR/10 {PHRASE 3 0 one} {PHRASE 3 0 two}] +do_test fts3expr-1.25 { + test_fts3expr {(one NEAR two)} +} [list NEAR/10 {PHRASE 3 0 one} {PHRASE 3 0 two}] +do_test fts3expr-1.26 { + test_fts3expr {((((((one NEAR two))))))} +} [list NEAR/10 {PHRASE 3 0 one} {PHRASE 3 0 two}] +do_test fts3expr-1.27 { + test_fts3expr {(one NEAR two) OR ((three OR four) AND five)} +} [list OR \ + [list NEAR/10 {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list OR {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] +do_test fts3expr-1.28 { + test_fts3expr {(one NEAR/321 two) OR ((three OR four) AND five)} +} [list OR \ + [list NEAR/321 {PHRASE 3 0 one} {PHRASE 3 0 two}] \ + [list AND \ + [list OR {PHRASE 3 0 three} {PHRASE 3 0 four}] \ + {PHRASE 3 0 five} \ + ] \ + ] + +proc strip_phrase_data {L} { + if {[lindex $L 0] eq "PHRASE"} { + return [lrange $L 3 end] + } + return [list \ + [lindex $L 0] \ + [strip_phrase_data [lindex $L 1]] \ + [strip_phrase_data [lindex $L 2]] \ + ] +} +proc test_fts3expr2 {expr} { + strip_phrase_data [ + db one {SELECT fts3_exprtest('simple', $expr, 'a', 'b', 'c')} + ] +} +do_test fts3expr-2.1 { + test_fts3expr2 "ab OR cd AND ef" +} {OR ab {AND cd ef}} +do_test fts3expr-2.2 { + test_fts3expr2 "cd AND ef OR ab" +} {OR {AND cd ef} ab} +do_test fts3expr-2.3 { + test_fts3expr2 "ab AND cd AND ef OR gh" +} {OR {AND {AND ab cd} ef} gh} +do_test fts3expr-2.4 { + test_fts3expr2 "ab AND cd OR ef AND gh" +} {OR {AND ab cd} {AND ef gh}} +do_test fts3expr-2.5 { + test_fts3expr2 "ab cd" +} {AND ab cd} + +do_test fts3expr-3.1 { + test_fts3expr2 "(ab OR cd) AND ef" +} {AND {OR ab cd} ef} +do_test fts3expr-3.2 { + test_fts3expr2 "ef AND (ab OR cd)" +} {AND ef {OR ab cd}} +do_test fts3expr-3.3 { + test_fts3expr2 "(ab OR cd)" +} {OR ab cd} +do_test fts3expr-3.4 { + test_fts3expr2 "(((ab OR cd)))" +} {OR ab cd} + +do_test fts3expr-3.5 { + test_fts3expr2 "one AND (two NEAR three)" +} {AND one {NEAR/10 two three}} +do_test fts3expr-3.6 { + test_fts3expr2 "one (two NEAR three)" +} {AND one {NEAR/10 two three}} +do_test fts3expr-3.7 { + test_fts3expr2 "(two NEAR three) one" +} {AND {NEAR/10 two three} one} +do_test fts3expr-3.8 { + test_fts3expr2 "(two NEAR three) AND one" +} {AND {NEAR/10 two three} one} +do_test fts3expr-3.9 { + test_fts3expr2 "(two NEAR three) (four five)" +} {AND {NEAR/10 two three} {AND four five}} +do_test fts3expr-3.10 { + test_fts3expr2 "(two NEAR three) AND (four five)" +} {AND {NEAR/10 two three} {AND four five}} +do_test fts3expr-3.11 { + test_fts3expr2 "(two NEAR three) (four NEAR five)" +} {AND {NEAR/10 two three} {NEAR/10 four five}} +do_test fts3expr-3.12 { + test_fts3expr2 "(two NEAR three) OR (four NEAR five)" +} {OR {NEAR/10 two three} {NEAR/10 four five}} + +do_test fts3expr-3.13 { + test_fts3expr2 "(two NEAR/1a three)" +} {AND {AND {AND two near} 1a} three} + +do_test fts3expr-3.14 { + test_fts3expr2 "(two NEAR// three)" +} {AND {AND two near} three} +do_test fts3expr-3.15 { + test_fts3expr2 "(two NEAR/: three)" +} {AND {AND two near} three} + +do_test fts3expr-3.16 { + test_fts3expr2 "(two NEAR three)OR(four NEAR five)" +} {OR {NEAR/10 two three} {NEAR/10 four five}} +do_test fts3expr-3.17 { + test_fts3expr2 "(two NEAR three)OR\"four five\"" +} {OR {NEAR/10 two three} {four five}} +do_test fts3expr-3.18 { + test_fts3expr2 "one \u0080wo" +} "AND one \u0080wo" + + + +#------------------------------------------------------------------------ +# The following tests, fts3expr-4.*, test the parsers response to syntax +# errors in query expressions. This is done using a real fts3 table and +# MATCH clauses, not the parser test interface. +# +do_test fts3expr-4.1 { + execsql { CREATE VIRTUAL TABLE t1 USING fts3(a, b, c) } +} {} + +# Mismatched parenthesis: +do_test fts3expr-4.2.1 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'example AND (hello OR world))' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.2.2 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'example AND (hello OR world' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.2.3 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH '(hello' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.2.4 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH '(' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.2.5 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH ')' } +} {1 {SQL logic error or missing database}} + +do_test fts3expr-4.2.6 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'example (hello world' } +} {1 {SQL logic error or missing database}} + +# Unterminated quotation marks: +do_test fts3expr-4.3.1 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'example OR "hello world' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.3.2 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'example OR hello world"' } +} {1 {SQL logic error or missing database}} + +# Binary operators without the required operands. +do_test fts3expr-4.4.1 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'OR hello world' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.4.2 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'hello world OR' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.4.3 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'one (hello world OR) two' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.4.4 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'one (OR hello world) two' } +} {1 {SQL logic error or missing database}} + +# NEAR operators with something other than phrases as arguments. +do_test fts3expr-4.5.1 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH '(hello OR world) NEAR one' } +} {1 {SQL logic error or missing database}} +do_test fts3expr-4.5.2 { + catchsql { SELECT * FROM t1 WHERE t1 MATCH 'one NEAR (hello OR world)' } +} {1 {SQL logic error or missing database}} + +#------------------------------------------------------------------------ +# The following OOM tests are designed to cover cases in fts3_expr.c. +# +source $testdir/malloc_common.tcl +do_malloc_test fts3expr-malloc-1 -sqlbody { + SELECT fts3_exprtest('simple', 'a b c "d e f"', 'a', 'b', 'c') +} +do_malloc_test fts3expr-malloc-2 -tclprep { + set sqlite_fts3_enable_parentheses 0 +} -sqlbody { + SELECT fts3_exprtest('simple', 'a -b', 'a', 'b', 'c') +} -cleanup { + set sqlite_fts3_enable_parentheses 1 +} + +#------------------------------------------------------------------------ +# The following tests are not very important. They cover error handling +# cases in the test code, which makes test coverage easier to measure. +# +do_test fts3expr-5.1 { + catchsql { SELECT fts3_exprtest('simple', 'a b') } +} {1 {Usage: fts3_exprtest(tokenizer, expr, col1, ...}} +do_test fts3expr-5.2 { + catchsql { SELECT fts3_exprtest('doesnotexist', 'a b', 'c') } +} {1 {No such tokenizer module}} +do_test fts3expr-5.3 { + catchsql { SELECT fts3_exprtest('simple', 'a b OR', 'c') } +} {1 {Error parsing expression}} + +#------------------------------------------------------------------------ +# The next set of tests verifies that things actually work as they are +# supposed to when using the new syntax. +# +do_test fts3expr-6.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts3(a); + } + for {set ii 1} {$ii < 32} {incr ii} { + set v [list] + if {$ii & 1} { lappend v one } + if {$ii & 2} { lappend v two } + if {$ii & 4} { lappend v three } + if {$ii & 8} { lappend v four } + if {$ii & 16} { lappend v five } + execsql { INSERT INTO t1 VALUES($v) } + } + + execsql {SELECT rowid FROM t1 WHERE t1 MATCH 'five four one' ORDER BY rowid} +} {25 27 29 31} + +foreach {id expr res} { + + 2 "five four NOT one" {24 26 28 30} + + 3 "five AND four OR one" + {1 3 5 7 9 11 13 15 17 19 21 23 24 25 26 27 28 29 30 31} + + 4 "five AND (four OR one)" {17 19 21 23 24 25 26 27 28 29 30 31} + + 5 "five NOT (four OR one)" {16 18 20 22} + + 6 "(five NOT (four OR one)) OR (five AND (four OR one))" + {16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} + + 7 "(five OR one) AND two AND three" {7 15 22 23 30 31} + + 8 "five OR one AND two AND three" + {7 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} + + 9 "five OR one two three" + {7 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} + + 10 "five OR \"one two three\"" + {7 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31} + + 11 "one two OR four five NOT three" {3 7 11 15 19 23 24 25 26 27 31} + + 12 "(one two OR four five) NOT three" {3 11 19 24 25 26 27} + + 13 "((((((one two OR four five)))))) NOT three" {3 11 19 24 25 26 27} + +} { + do_test fts3expr-6.$id { + execsql { SELECT rowid FROM t1 WHERE t1 MATCH $expr ORDER BY rowid } + } $res +} + +do_test fts3expr-7.1 { + execsql { + CREATE VIRTUAL TABLE test USING fts3 (keyword); + INSERT INTO test VALUES ('abc'); + SELECT * FROM test WHERE keyword MATCH '""'; + } +} {} + +set sqlite_fts3_enable_parentheses 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3near.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3near.test --- sqlite3-3.4.2/test/fts3near.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3near.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,572 @@ + +# 2007 October 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# +# $Id: fts3near.test,v 1.3 2009/01/02 17:33:46 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + finish_test + return +} + +db eval { + CREATE VIRTUAL TABLE t1 USING fts3(content); + INSERT INTO t1(content) VALUES('one three four five'); + INSERT INTO t1(content) VALUES('two three four five'); + INSERT INTO t1(content) VALUES('one two three four five'); +} + +do_test fts3near-1.1 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR/0 three'} +} {1} +do_test fts3near-1.2 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR/1 two'} +} {3} +do_test fts3near-1.3 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR/1 three'} +} {1 3} +do_test fts3near-1.4 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'three NEAR/1 one'} +} {1 3} +do_test fts3near-1.5 { + execsql {SELECT docid FROM t1 WHERE content MATCH '"one two" NEAR/1 five'} +} {} +do_test fts3near-1.6 { + execsql {SELECT docid FROM t1 WHERE content MATCH '"one two" NEAR/2 five'} +} {3} +do_test fts3near-1.7 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR four'} +} {1 3} +do_test fts3near-1.8 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'four NEAR three'} +} {1 2 3} +do_test fts3near-1.9 { + execsql {SELECT docid FROM t1 WHERE content MATCH '"four five" NEAR/0 three'} +} {1 2 3} +do_test fts3near-1.10 { + execsql {SELECT docid FROM t1 WHERE content MATCH '"four five" NEAR/2 one'} +} {1 3} +do_test fts3near-1.11 { + execsql {SELECT docid FROM t1 WHERE content MATCH '"four five" NEAR/1 one'} +} {1} +do_test fts3near-1.12 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'five NEAR/1 "two three"'} +} {2 3} +do_test fts3near-1.13 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR five'} +} {1 3} + +do_test fts3near-1.14 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'four NEAR four'} +} {} +do_test fts3near-1.15 { + execsql {SELECT docid FROM t1 WHERE content MATCH 'one NEAR two NEAR one'} +} {3} + + +# Output format of the offsets() function: +# +# +# +db eval { + INSERT INTO t1(content) VALUES('A X B C D A B'); +} +do_test fts3near-2.1 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH 'A NEAR/0 B' + } +} {{0 0 10 1 0 1 12 1}} +do_test fts3near-2.2 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH 'B NEAR/0 A' + } +} {{0 1 10 1 0 0 12 1}} +do_test fts3near-2.3 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH '"C D" NEAR/0 A' + } +} {{0 0 6 1 0 1 8 1 0 2 10 1}} +do_test fts3near-2.4 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH 'A NEAR/0 "C D"' + } +} {{0 1 6 1 0 2 8 1 0 0 10 1}} +do_test fts3near-2.5 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH 'A NEAR A' + } +} {{0 0 0 1 0 1 0 1 0 0 10 1 0 1 10 1}} +do_test fts3near-2.6 { + execsql { + INSERT INTO t1 VALUES('A A A'); + SELECT offsets(t1) FROM t1 WHERE content MATCH 'A NEAR/2 A'; + } +} [list [list 0 0 0 1 0 1 0 1 0 0 2 1 0 1 2 1 0 0 4 1 0 1 4 1]] +do_test fts3near-2.7 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('A A A A'); + SELECT offsets(t1) FROM t1 WHERE content MATCH 'A NEAR A NEAR A'; + } +} [list [list \ + 0 0 0 1 0 1 0 1 0 2 0 1 0 0 2 1 \ + 0 1 2 1 0 2 2 1 0 0 4 1 0 1 4 1 \ + 0 2 4 1 0 0 6 1 0 1 6 1 0 2 6 1 \ +]] + +db eval { + DELETE FROM t1; + INSERT INTO t1(content) VALUES( + 'one two three two four six three six nine four eight twelve' + ); +} + +do_test fts3near-3.1 { + execsql {SELECT offsets(t1) FROM t1 WHERE content MATCH 'three NEAR/1 one'} +} {{0 1 0 3 0 0 8 5}} +do_test fts3near-3.2 { + execsql {SELECT offsets(t1) FROM t1 WHERE content MATCH 'one NEAR/1 three'} +} {{0 0 0 3 0 1 8 5}} +do_test fts3near-3.3 { + execsql {SELECT offsets(t1) FROM t1 WHERE content MATCH 'three NEAR/1 two'} +} {{0 1 4 3 0 0 8 5 0 1 14 3}} +do_test fts3near-3.4 { + execsql {SELECT offsets(t1) FROM t1 WHERE content MATCH 'three NEAR/2 two'} +} {{0 1 4 3 0 0 8 5 0 1 14 3 0 0 27 5}} +do_test fts3near-3.5 { + execsql {SELECT offsets(t1) FROM t1 WHERE content MATCH 'two NEAR/2 three'} +} {{0 0 4 3 0 1 8 5 0 0 14 3 0 1 27 5}} +do_test fts3near-3.6 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH 'three NEAR/0 "two four"' + } +} {{0 0 8 5 0 1 14 3 0 2 18 4}} +do_test fts3near-3.7 { + execsql { + SELECT offsets(t1) FROM t1 WHERE content MATCH '"two four" NEAR/0 three'} +} {{0 2 8 5 0 0 14 3 0 1 18 4}} + +db eval { + INSERT INTO t1(content) VALUES(' + This specification defines Cascading Style Sheets, level 2 (CSS2). CSS2 is a style sheet language that allows authors and users to attach style (e.g., fonts, spacing, and aural cues) to structured documents (e.g., HTML documents and XML applications). By separating the presentation style of documents from the content of documents, CSS2 simplifies Web authoring and site maintenance. + + CSS2 builds on CSS1 (see [CSS1]) and, with very few exceptions, all valid CSS1 style sheets are valid CSS2 style sheets. CSS2 supports media-specific style sheets so that authors may tailor the presentation of their documents to visual browsers, aural devices, printers, braille devices, handheld devices, etc. This specification also supports content positioning, downloadable fonts, table layout, features for internationalization, automatic counters and numbering, and some properties related to user interface. + ') +} +do_test fts3near-4.1 { + execsql { + SELECT snippet(t1) FROM t1 WHERE content MATCH 'specification NEAR supports' + } +} {{... devices, handheld devices, etc. This specification also supports content positioning, downloadable fonts, ...}} + +do_test fts3near-5.1 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification attach' + } +} {2} +do_test fts3near-5.2 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification NEAR attach' + } +} {} +do_test fts3near-5.3 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification NEAR/18 attach' + } +} {} +do_test fts3near-5.4 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification NEAR/19 attach' + } +} {2} +do_test fts3near-5.5 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification NEAR/000018 attach' + } +} {} +do_test fts3near-5.6 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'specification NEAR/000019 attach' + } +} {2} + +db eval { + INSERT INTO t1 VALUES(' + abbrev aberrations abjurations aboding abr abscesses absolutistic + abstention abuses acanthuses acceptance acclaimers accomplish + accoutring accusation acetonic acid acolytes acquitting acrylonitrile + actives acyclic addicted adenoid adjacently adjusting admissible + adoption adulated advantaging advertisers aedes aerogramme aetiology + affiliative afforest afterclap agamogenesis aggrade agings agonize + agron ailurophile airfreight airspeed alarmists alchemizing + alexandrines alien aliped all allergenic allocator allowances almost + alphabetizes altho alvine amaurosis ambles ameliorate amicability amnio + amour ampicillin amusement anadromous analogues anarchy anchormen + anecdota aneurin angst animating anlage announcements anodized + answerable antemeridian anthracene antiabortionist anticlimaxes + antifriction antimitotic antiphon antiques antithetic anviled + apatosaurus aphrodisia apodal aposiopesis apparatus appendectomies + applications appraisingly appropriate apteryx arabinose + arboricultural archdeaconates archipelago ardently arguers armadillo + arnicas arrayed arrowy arthroscope artisans ascensive ashier + aspersorium assail assentor assignees assonants astereognosis + astringency astutest atheistical atomize attachment attenuates + attrahent audibility augite auricle auteurists autobus autolysis + autosome avenge avidest aw awl ayes babirusa backbeats backgrounder + backseat backswings baddie bagnios baked balefuller ballista balmily + bandbox bandylegged bankruptcy baptism barbering bargain barneys + barracuda barterer bashes bassists bathers batterer bavardage + beachfront beanstalk beauteous become bedim bedtimes beermats begat + begun belabors bellarmine belongings bending benthos bereavements + besieger bestialized betide bevels biases bicarbonates bidentate bigger + bile billow bine biodynamics biomedicine biotites birding bisection + bitingly bkg blackheads blaeberry blanking blatherer bleeper blindage + blithefulness blockish bloodstreams bloused blubbing bluestocking + blurted boatbill bobtailed boffo bold boltrope bondservant bonks + bookbinding bookworm booting borating boscages botchers bougainvillea + bounty bowlegged boyhood bracketed brainstorm brandishes + braunschweigers brazilin breakneck breathlessness brewage bridesmaids + brighter brisker broader brokerages bronziest browband brunets bryology + bucking budlike bugleweed bulkily bulling bummer bunglers bureau burgs + burrito bushfire buss butlery buttressing bylines cabdriver cached + cadaverousnesses cafeterias cakewalk calcifies calendula callboy calms + calyptra camisoles camps candelabrum caned cannolis canoodling cantors + cape caponize capsuling caracoled carbolics carcase carditis caretakers + carnallite carousel carrageenan cartels carves cashbook castanets + casuistry catalyzer catchers categorizations cathexis caucuses + causeway cavetto cede cella cementite centenary centrals ceramics ceria + cervixes chafferer chalcopyrites chamfers change chaotically + characteristically charivari chases chatterer cheats cheeks chef + chemurgy chetah chickaree chigoes chillies chinning chirp chive + chloroforms chokebore choplogic chorioids chromatic chronically + chubbiest chunder chutzpah cimetidine cinque circulated circumscribe + cirrose citrin claddagh clamorousness clapperboards classicalism + clauses cleanse clemency clicker clinchers cliquiest clods closeting + cloudscape clucking cnidarian coalfish coatrack coca cockfights coddled + coeducation coexistence cognitively coiffed colatitude collage + collections collinear colonelcy colorimetric columelliform combos + comforters commence commercialist commit commorancy communized compar + compendiously complainers compliance composition comprised comradery + concelebrants concerted conciliation concourses condensate + condonations confab confessionals confirmed conforming congeal + congregant conjectured conjurers connoisseurs conscripting + conservator consolable conspired constricting consuls contagious + contemporaneity contesters continuities contractors contrarian + contrive convalescents convents convexly convulsed cooncan coparcenary + coprolite copyreader cordially corklike cornflour coroner corralling + corrigible corsages cosies cosmonauts costumer cottontails counselings + counterclaim counterpane countertenors courageously couth coveting + coworker cozier cracklings crampon crappies craved cream credenzas + crematoriums cresol cricoid crinkle criterion crocodile crore crossover + crowded cruelest crunch cruzeiros cryptomeria cubism cuesta culprit + cumquat cupped curdle curly cursoring curvy customized cutting cyclamens + cylindrical cytaster dachshund daikon damages damselfly dangling + darkest databanks dauphine dazzling deadpanned deathday debauchers + debunking decameter decedents decibel decisions declinations + decomposition decoratively decretive deduct deescalated defecating + deferentially definiendum defluxion defrocks degrade deice dekaliters + deli delinquencies deludedly demarcates demineralizers demodulating + demonstrabilities demurred deniabilities denouncement denudation + departure deplorable deposing depredatory deputizes derivational + desalinization descriptors desexes desisted despising destitute + detectability determiner detoxifying devalued devilries devotions + dextrous diagenesis dialling diaphoresis diazonium dickeys diddums + differencing dig dignified dildo dimetric dineric dinosaurs diplodocus + directer dirty disagrees disassembler disburses disclosures + disconcerts discountability discrete disembarrass disenthrone + disgruntled dishpans disintegrators dislodged disobedient + dispassionate dispiritednesses dispraised disqualifying + dissatisfying dissidence dissolvers distich distracting distrusts + ditto diverse divineness dizzily dockyard dodgers doggish doited dom + dominium doohickey doozie dorsum doubleheaders dourer downbeats + downshifted doyennes draftsman dramatic drawling dredge drifter + drivelines droopier drowsed drunkards dubiosities duding dulcifying + dumpcart duodecillion durable duteous dyed dysgenic eagles earplugs + earwitness ebonite echoers economical ectothermous edibility educates + effected effigies eggbeaters egresses ejaculates elasticize elector + electrodynamometer electrophorus elem eligibly eloped emaciating + embarcaderos embezzlers embosses embryectomy emfs emotionalizing + empiricist emu enamels enchained encoded encrusts endeavored endogamous + endothelioma energizes engager engrosses enl enologist enrolls ensphere + enters entirety entrap entryways envies eosinophil epicentral + epigrammatized episodic epochs equestrian equitably erect ernes + errorless escalated eschatology espaliers essonite estop eternity + ethnologically eudemonics euphonious euthenist evangelizations + eventuality evilest evulsion examinee exceptionably exciter + excremental execrably exemplars exhalant exhorter exocrine exothermic + expected expends explainable exploratory expostulatory expunges + extends externals extorts extrapolative extrorse eyebolt eyra + facetiously factor faeries fairings fallacies falsities fancifulness + fantasticalness farmhouse fascinate fatalistically fattener fave + fearlessly featly federates feints fellowman fencers ferny + fertilenesses feta feudality fibers fictionalize fiefs fightback + filefish filmier finaglers fingerboards finochio firefly firmament + fishmeal fitted fjords flagitiousnesses flamen flaps flatfooting + flauntier fleapit fleshes flickertail flints floaty floorboards + floristic flow fluffily fluorescein flutes flyspecks foetal folderols + followable foolhardier footlockers foppish forceless foredo foreknows + foreseeing foretaste forgather forlorn formidableness fortalice + forwarding founding foxhunting fragmentarily frangipani fray freeform + freezable freshening fridges frilliest frizzed frontbench frottages + fruitcake fryable fugleman fulminated functionalists fungoid furfuran + furtive fussy fwd gadolinium galabias gallinaceous galvanism gamers + gangland gaoling garganey garrisoning gasp gate gauger gayety geed + geminately generalissimos genii gentled geochronology geomorphic + geriatricians gesellschaft ghat gibbeting giggles gimps girdlers + glabella glaive glassfuls gleefully glistered globetrotted glorifier + gloving glutathione glyptodont goaled gobsmacked goggliest golliwog + goobers gooseberries gormandizer gouramis grabbier gradually grampuses + grandmothers granulated graptolite gratuitously gravitates greaten + greenmailer greys grills grippers groan gropingly grounding groveling + grueled grunter guardroom guggle guineas gummed gunnysacks gushingly + gutturals gynecoid gyrostabilizer habitudes haemophilia hailer hairs + halest hallow halters hamsters handhelds handsaw hangup haranguer + hardheartedness harlotry harps hashing hated hauntingly hayrack + headcases headphone headword heartbreakers heaters hebephrenia + hedonist heightening heliozoan helots hemelytron hemorrhagic hent + herbicides hereunto heroines heteroclitics heterotrophs hexers + hidebound hies hightails hindmost hippopotomonstrosesquipedalian + histologist hittable hobbledehoys hogans holdings holocrine homegirls + homesteader homogeneousness homopolar honeys hoodwinks hoovered + horizontally horridness horseshoers hospitalization hotdogging houri + housemate howitzers huffier humanist humid humors huntress husbandmen + hyaenas hydride hydrokinetics hydroponically hygrothermograph + hyperbolically hypersensitiveness hypnogogic hypodermically + hypothermia iatrochemistry ichthyological idealist ideograms idling + igniting illegal illuminatingly ilmenite imbibing immateriality + immigrating immortalizes immures imparts impeder imperfection + impersonated implant implying imposition imprecating imprimis + improvising impv inanenesses inaugurate incapably incentivize + incineration incloses incomparableness inconsequential incorporate + incrementing incumbered indecorous indentation indicative indignities + indistinguishably indoors indulges ineducation inerrable + inexperienced infants infestations infirmnesses inflicting + infracostal ingathered ingressions inheritances iniquity + injuriousnesses innervated inoculates inquisitionist insectile + insiders insolate inspirers instatement instr insulates intactness + intellects intensifies intercalations intercontinental interferon + interlarded intermarrying internalizing interpersonally + interrelatednesses intersperse interviewees intolerance + intransigents introducing intubates invades inventing inveterate + invocate iodides irenicism ironsmith irreducibly irresistibility + irriguous isobarisms isometrically issuable itineracies jackdaws + jaggery jangling javelins jeeringly jeremiad jeweler jigsawing jitter + jocosity jokester jot jowls judicative juicy jungly jurists juxtaposed + kalpa karstify keddah kendo kermesses keynote kibbutznik kidnaper + kilogram kindred kingpins kissers klatch kneads knobbed knowingest + kookaburras kruller labefaction labyrinths lacquer laddered lagoons + lambency laminates lancinate landscapist lankiness lapse larked lasso + laterite laudableness laundrywomen lawgiver laypersons leafhoppers + leapfrogs leaven leeches legated legislature leitmotifs lenients + leprous letterheads levelling lexicographically liberalists + librettist licorice lifesaving lightheadedly likelier limekiln limped + lines linkers lipoma liquidator listeners litharge litmus + liverishnesses loamier lobeline locative locutionary loggier loiterer + longevity loomed loping lotion louts lowboys luaus lucrativeness lulus + lumpier lungi lush luthern lymphangial lythraceous machinists maculate + maggot magnetochemistry maharani maimers majored malaprops malignants + maloti mammary manchineel manfully manicotti manipulativenesses + mansards manufactories maraschino margin markdown marooning marshland + mascaraing massaging masticate matchmark matings mattes mausoleum + mayflies mealworm meataxe medevaced medievalist meetings megavitamin + melded melodramatic memorableness mendaciousnesses mensurable + mercenaries mere meronymous mesmerizes mestee metallurgical + metastasize meterages meticulosity mewed microbe microcrystalline + micromanager microsporophyll midiron miffed milder militiamen + millesimal milometer mincing mingily minims minstrelsy mires + misanthropic miscalculate miscomprehended misdefines misery mishears + misled mispickel misrepresent misspending mistranslate miswriting + mixologists mobilizers moderators modulate mojo mollies momentum monde + monied monocles monographs monophyletic monotonousness moocher + moorages morality morion mortally moseyed motherly motorboat mouldering + mousers moveables mucky mudslides mulatto multicellularity + multipartite multivalences mundanities murkiest mushed muskiness + mutability mutisms mycelia myosotis mythicist nacred namable napkin + narghile nastiness nattering nauseations nearliest necessitate + necrophobia neg negotiators neologizes nephrotomy netiquette + neurophysiology newbie newspaper niccolite nielsbohriums nightlong + nincompoops nitpicked nix noddling nomadize nonadhesive noncandidates + nonconducting nondigestible nones nongreasy nonjoinder nonoccurrence + nonporousness nonrestrictive nonstaining nonuniform nooses northwards + nostalgic notepaper nourishment noyades nuclides numberless numskulls + nutmegged nymphaea oatmeal obis objurgators oblivious obsequiousness + obsoletism obtruding occlusions ocher octettes odeums offcuts + officiation ogival oilstone olestras omikron oncogenesis onsetting + oomphs openly ophthalmoscope opposites optimum orangutans + orchestrations ordn organophosphates origin ornithosis orthognathous + oscillatory ossuaries ostracized ounce outbreaks outearning outgrows + outlived outpoints outrunning outspends outwearing overabound + overbalance overcautious overcrowds overdubbing overexpanding + overgraze overindustrialize overlearning overoptimism overproducing + overripe overshadowing overspreading overstuff overtones overwind ow + oxidizing pacer packs paganish painstakingly palate palette pally + palsying pandemic panhandled pantheism papaws papped parading + parallelize paranoia parasitically pardners parietal parodied pars + participator partridgeberry passerines password pastors + paterfamiliases patination patrolman paunch pawnshops peacekeeper + peatbog peculator pedestrianism peduncles pegboard pellucidnesses + pendency penitentiary penstock pentylenetetrazol peptidase perched + perennial performing perigynous peripheralize perjurer permissively + perpetuals persistency perspicuously perturbingly pesky petcock + petrologists pfennige pharmacies phenformin philanderers + philosophically phonecards phosgenes photocomposer photogenic photons + phototype phylloid physiotherapeutics picadores pickup pieces pigging + pilaster pillion pimples pinioned pinpricks pipers pirogi pit + pitifullest pizza placental plainly planing plasmin platforming + playacts playwrights plectra pleurisy plopped plug plumule plussed + poaches poetasters pointless polarize policyholder polkaed + polyadelphous polygraphing polyphonous pomace ponderers pooch poplar + porcelains portableness portly positioning postage posthumously + postponed potages potholed poulard powdering practised pranksters + preadapt preassigning precentors precipitous preconditions predefined + predictors preengage prefers prehumans premedical prenotification + preplanning prepuberty presbytery presentation presidia prestissimo + preterites prevailer prewarmed priding primitively principalships + prisage privileged probed prochurch proctoscope products proficients + prognathism prohibiting proletarianisms prominence promulgates + proofreading property proportions prorate proselytize prosthesis + proteins prototypic provenances provitamin prudish pseudonymities + psychoanalysts psychoneuroses psychrometer publishable pufferies + pullet pulses punchy punkins purchased purities pursers pushover + putridity pylons pyrogenous pzazz quadricepses quaff qualmish quarriers + quasilinear queerness questionnaires quieten quintals quislings quoits + rabidness racketeers radiative radioisotope radiotherapists ragingly + rainband rakishness rampagers rands raped rare raspy ratiocinator + rattlebrain ravening razz reactivation readoption realm reapportioning + reasoning reattempts rebidding rebuts recapitulatory receptiveness + recipes reckonings recognizee recommendatory reconciled reconnoiters + recontaminated recoupments recruits recumbently redact redefine + redheaded redistributable redraw redwing reeled reenlistment reexports + refiles reflate reflowing refortified refried refuses regelate + registrant regretting rehabilitative reigning reinduced reinstalled + reinvesting rejoining relations relegates religiosities reluctivity + remastered reminisce remodifying remounted rends renovate reordered + repartee repel rephrase replicate repossessing reprint reprogramed + repugnantly requiter rescheduling resegregate resettled residually + resold resourcefulness respondent restating restrainedly resubmission + resurveyed retaliating retiarius retorsion retreated retrofitting + returning revanchism reverberated reverted revitalization + revolutionize rewind rhapsodizing rhizogenic rhythms ricketinesses + ridicule righteous rilles rinks rippliest ritualize riyals roast rockery + roguish romanizations rookiest roquelaure rotation rotundity rounder + routinizing rubberize rubricated ruefully ruining rummaged runic + russets ruttish sackers sacrosanctly safeguarding said salaciousness + salinity salsas salutatorians sampan sandbag saned santonin + saprophagous sarnies satem saturant savaged sawbucks scablike scalp + scant scared scatter schedulers schizophrenics schnauzers schoolmarms + scintillae scleroses scoped scotched scram scratchiness screwball + scripting scrubwomen scrutinizing scumbled scuttled seals seasickness + seccos secretions secularizing seditiousnesses seeking segregators + seize selfish semeiology seminarian semitropical sensate sensors + sentimo septicemic sequentially serener serine serums + sesquicentennials seventeen sexiest sforzandos shadowing shallot + shampooing sharking shearer sheered shelters shifter shiner shipper + shitted shoaled shofroth shorebirds shortsightedly showboated shrank + shrines shucking shuttlecocks sickeningly sideling sidewise sigil + signifiers siliceous silty simony simulative singled sinkings sirrah + situps skateboarder sketchpad skim skirmished skulkers skywalk slander + slating sleaziest sleepyheads slicking slink slitting slot slub + slumlords smallest smattered smilier smokers smriti snailfish snatch + snides snitching snooze snowblowers snub soapboxing socialite sockeyes + softest sold solicitings solleret sombreros somnolencies sons sopor + sorites soubrette soupspoon southpaw spaces spandex sparkers spatially + speccing specking spectroscopists speedsters spermatics sphincter + spiffied spindlings spirals spitball splayfeet splitter spokeswomen + spooled sportily spousals sprightliness sprogs spurner squalene + squattered squelches squirms stablish staggerings stalactitic stamp + stands starflower starwort stations stayed steamroll steeplebush + stemmatics stepfathers stereos steroid sticks stillage stinker + stirringly stockpiling stomaching stopcock stormers strabismuses + strainer strappado strawberries streetwise striae strikeouts strives + stroppiest stubbed study stunting style suavity subchloride subdeb + subfields subjoin sublittoral subnotebooks subprograms subside + substantial subtenants subtreasuries succeeding sucked sufferers + sugarier sulfaguanidine sulphating summerhouse sunbonnets sunned + superagency supercontinent superheroes supernatural superscribing + superthin supplest suppositive surcease surfs surprise survey + suspiration svelte swamplands swashes sweatshop swellhead swindling + switching sworn syllabuses sympathetics synchrocyclotron syndic + synonymously syringed tablatures tabulation tackling taiga takas talker + tamarisks tangential tans taproom tarpapers taskmaster tattiest + tautologically taxied teacup tearjerkers technocracies teepee + telegenic telephony telexed temperaments temptress tenderizing tensed + tenuring tergal terned terror testatrices tetherball textile thatched + their theorem thereof thermometers thewy thimerosal thirsty + thoroughwort threateningly thrived through thumbnails thwacks + ticketing tie til timekeepers timorousness tinkers tippers tisane + titrating toastmaster toff toking tomb tongs toolmakings topes topple + torose tortilla totalizing touchlines tousling townsmen trachea + tradeable tragedienne traitorous trances transcendentalists + transferrable tranship translating transmogrifying transportable + transvestism traumatize treachery treed trenail tressing tribeswoman + trichromatism triennials trikes trims triplicate tristich trivializes + trombonist trots trouts trued trunnion tryster tubes tulle tundras turban + turgescence turnround tutelar tweedinesses twill twit tympanum typists + tzarists ulcered ultramodern umbles unaccountability unamended + unassertivenesses unbanned unblocked unbundled uncertified unclaimed + uncoated unconcerns unconvinced uncrossing undefined underbodice + underemphasize undergrowth underpayment undershirts understudy + underwritten undissolved unearthed unentered unexpended unfeeling + unforeseen unfussy unhair unhinges unifilar unimproved uninvitingly + universalization unknowns unlimbering unman unmet unnaturalness + unornament unperturbed unprecedentedly unproportionate unread + unreflecting unreproducible unripe unsatisfying unseaworthiness + unsharable unsociable unstacking unsubtly untactfully untied untruest + unveils unwilled unyokes upheave upraised upstart upwind urethrae + urtexts usurers uvula vacillators vailed validation valvule vanities + varia variously vassaled vav veggies velours venerator ventrals + verbalizes verification vernacularized verticality vestigially via + vicariously victoriousness viewpoint villainies vines violoncellist + virtual viscus vital vitrify viviparous vocalizers voidable volleys + volutes vouches vulcanology wackos waggery wainwrights waling wallowing + wanking wardroom warmup wartiest washwoman watchman watermarks waverer + wayzgoose weariest weatherstripped weediness weevil welcomed + wentletrap whackers wheatworm whelp whf whinged whirl whistles whithers + wholesomeness whosoever widows wikiup willowier windburned windsail + wingspread winterkilled wisecracking witchgrass witling wobbliest + womanliness woodcut woodworking woozy working worldwide worthiest + wrappings wretched writhe wynd xylophone yardarm yea yelped yippee yoni + yuks zealotry zigzagger zitherists zoologists zygosis'); +} + +do_test fts3near-6.1 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'abbrev zygosis' + } +} {3} +do_test fts3near-6.2 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'abbrev NEAR zygosis' + } +} {} +do_test fts3near-6.3 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'abbrev NEAR/100 zygosis' + } +} {} +do_test fts3near-6.4 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'abbrev NEAR/1000 zygosis' + } +} {} +do_test fts3near-6.5 { + execsql { + SELECT docid FROM t1 WHERE content MATCH 'abbrev NEAR/10000 zygosis' + } +} {3} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fts3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fts3.test --- sqlite3-3.4.2/test/fts3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fts3.test 2009-06-05 18:03:29.000000000 +0100 @@ -0,0 +1,68 @@ +# 2007 November 23 +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all tests. +# +# $Id: fts3.test,v 1.2 2008/07/23 18:17:32 drh Exp $ + +proc lshift {lvar} { + upvar $lvar l + set ret [lindex $l 0] + set l [lrange $l 1 end] + return $ret +} +while {[set arg [lshift argv]] != ""} { + switch -- $arg { + -sharedpagercache { + sqlite3_enable_shared_cache 1 + } + -soak { + set SOAKTEST 1 + } + default { + set argv [linsert $argv 0 $arg] + break + } + } +} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +# If SQLITE_ENABLE_FTS3 is defined, omit this file. +ifcapable !fts3 { + return +} +rename finish_test really_finish_test +proc finish_test {} {} +set ISQUICK 1 + +set EXCLUDE { + fts3.test +} + +# Files to include in the test. If this list is empty then everything +# that is not in the EXCLUDE list is run. +# +set INCLUDE { +} + +foreach testfile [lsort -dictionary [glob $testdir/fts3*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } +} + +set sqlite_open_file_count 0 +really_finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/func.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/func.test --- sqlite3-3.4.2/test/func.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/func.test 2009-06-25 12:45:58.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing built-in functions. # -# $Id: func.test,v 1.67 2007/05/15 18:35:21 drh Exp $ +# $Id: func.test,v 1.93 2009/06/19 16:44:41 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -149,24 +149,49 @@ # Test the abs() and round() functions. # -do_test func-4.1 { - execsql { - CREATE TABLE t1(a,b,c); - INSERT INTO t1 VALUES(1,2,3); - INSERT INTO t1 VALUES(2,1.2345678901234,-12345.67890); - INSERT INTO t1 VALUES(3,-2,-5); - } - catchsql {SELECT abs(a,b) FROM t1} -} {1 {wrong number of arguments to function abs()}} +ifcapable !floatingpoint { + do_test func-4.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,12345678901234,-1234567890); + INSERT INTO t1 VALUES(3,-2,-5); + } + catchsql {SELECT abs(a,b) FROM t1} + } {1 {wrong number of arguments to function abs()}} +} +ifcapable floatingpoint { + do_test func-4.1 { + execsql { + CREATE TABLE t1(a,b,c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,1.2345678901234,-12345.67890); + INSERT INTO t1 VALUES(3,-2,-5); + } + catchsql {SELECT abs(a,b) FROM t1} + } {1 {wrong number of arguments to function abs()}} +} do_test func-4.2 { catchsql {SELECT abs() FROM t1} } {1 {wrong number of arguments to function abs()}} -do_test func-4.3 { - catchsql {SELECT abs(b) FROM t1 ORDER BY a} -} {0 {2 1.2345678901234 2}} -do_test func-4.4 { - catchsql {SELECT abs(c) FROM t1 ORDER BY a} -} {0 {3 12345.6789 5}} +ifcapable floatingpoint { + do_test func-4.3 { + catchsql {SELECT abs(b) FROM t1 ORDER BY a} + } {0 {2 1.2345678901234 2}} + do_test func-4.4 { + catchsql {SELECT abs(c) FROM t1 ORDER BY a} + } {0 {3 12345.6789 5}} +} +ifcapable !floatingpoint { + if {[working_64bit_int]} { + do_test func-4.3 { + catchsql {SELECT abs(b) FROM t1 ORDER BY a} + } {0 {2 12345678901234 2}} + } + do_test func-4.4 { + catchsql {SELECT abs(c) FROM t1 ORDER BY a} + } {0 {3 1234567890 5}} +} do_test func-4.4.1 { execsql {SELECT abs(a) FROM t2} } {1 {} 345 {} 67890} @@ -174,40 +199,44 @@ execsql {SELECT abs(t1) FROM tbl1} } {0.0 0.0 0.0 0.0 0.0} -do_test func-4.5 { - catchsql {SELECT round(a,b,c) FROM t1} -} {1 {wrong number of arguments to function round()}} -do_test func-4.6 { - catchsql {SELECT round(b,2) FROM t1 ORDER BY b} -} {0 {-2.0 1.23 2.0}} -do_test func-4.7 { - catchsql {SELECT round(b,0) FROM t1 ORDER BY a} -} {0 {2.0 1.0 -2.0}} -do_test func-4.8 { - catchsql {SELECT round(c) FROM t1 ORDER BY a} -} {0 {3.0 -12346.0 -5.0}} -do_test func-4.9 { - catchsql {SELECT round(c,a) FROM t1 ORDER BY a} -} {0 {3.0 -12345.68 -5.0}} -do_test func-4.10 { - catchsql {SELECT 'x' || round(c,a) || 'y' FROM t1 ORDER BY a} -} {0 {x3.0y x-12345.68y x-5.0y}} -do_test func-4.11 { - catchsql {SELECT round() FROM t1 ORDER BY a} -} {1 {wrong number of arguments to function round()}} -do_test func-4.12 { - execsql {SELECT coalesce(round(a,2),'nil') FROM t2} -} {1.0 nil 345.0 nil 67890.0} -do_test func-4.13 { - execsql {SELECT round(t1,2) FROM tbl1} -} {0.0 0.0 0.0 0.0 0.0} -do_test func-4.14 { - execsql {SELECT typeof(round(5.1,1));} -} {real} -do_test func-4.15 { - execsql {SELECT typeof(round(5.1));} -} {real} - +ifcapable floatingpoint { + do_test func-4.5 { + catchsql {SELECT round(a,b,c) FROM t1} + } {1 {wrong number of arguments to function round()}} + do_test func-4.6 { + catchsql {SELECT round(b,2) FROM t1 ORDER BY b} + } {0 {-2.0 1.23 2.0}} + do_test func-4.7 { + catchsql {SELECT round(b,0) FROM t1 ORDER BY a} + } {0 {2.0 1.0 -2.0}} + do_test func-4.8 { + catchsql {SELECT round(c) FROM t1 ORDER BY a} + } {0 {3.0 -12346.0 -5.0}} + do_test func-4.9 { + catchsql {SELECT round(c,a) FROM t1 ORDER BY a} + } {0 {3.0 -12345.68 -5.0}} + do_test func-4.10 { + catchsql {SELECT 'x' || round(c,a) || 'y' FROM t1 ORDER BY a} + } {0 {x3.0y x-12345.68y x-5.0y}} + do_test func-4.11 { + catchsql {SELECT round() FROM t1 ORDER BY a} + } {1 {wrong number of arguments to function round()}} + do_test func-4.12 { + execsql {SELECT coalesce(round(a,2),'nil') FROM t2} + } {1.0 nil 345.0 nil 67890.0} + do_test func-4.13 { + execsql {SELECT round(t1,2) FROM tbl1} + } {0.0 0.0 0.0 0.0 0.0} + do_test func-4.14 { + execsql {SELECT typeof(round(5.1,1));} + } {real} + do_test func-4.15 { + execsql {SELECT typeof(round(5.1));} + } {real} + do_test func-4.16 { + catchsql {SELECT round(b,2.0) FROM t1 ORDER BY b} + } {0 {-2.0 1.23 2.0}} +} # Test the upper() and lower() functions # @@ -256,14 +285,26 @@ # Tests for aggregate functions and how they handle NULLs. # -do_test func-8.1 { - ifcapable explain { - execsql {EXPLAIN SELECT sum(a) FROM t2;} - } - execsql { - SELECT sum(a), count(a), round(avg(a),2), min(a), max(a), count(*) FROM t2; - } -} {68236 3 22745.33 1 67890 5} +ifcapable floatingpoint { + do_test func-8.1 { + ifcapable explain { + execsql {EXPLAIN SELECT sum(a) FROM t2;} + } + execsql { + SELECT sum(a), count(a), round(avg(a),2), min(a), max(a), count(*) FROM t2; + } + } {68236 3 22745.33 1 67890 5} +} +ifcapable !floatingpoint { + do_test func-8.1 { + ifcapable explain { + execsql {EXPLAIN SELECT sum(a) FROM t2;} + } + execsql { + SELECT sum(a), count(a), avg(a), min(a), max(a), count(*) FROM t2; + } + } {68236 3 22745.0 1 67890 5} +} do_test func-8.2 { execsql { SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t2; @@ -290,6 +331,42 @@ SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; } } {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} +ifcapable compound { + do_test func-8.5 { + execsql { + SELECT sum(x) FROM (SELECT '9223372036' || '854775807' AS x + UNION ALL SELECT -9223372036854775807) + } + } {0} + do_test func-8.6 { + execsql { + SELECT typeof(sum(x)) FROM (SELECT '9223372036' || '854775807' AS x + UNION ALL SELECT -9223372036854775807) + } + } {integer} + do_test func-8.7 { + execsql { + SELECT typeof(sum(x)) FROM (SELECT '9223372036' || '854775808' AS x + UNION ALL SELECT -9223372036854775807) + } + } {real} +ifcapable floatingpoint { + do_test func-8.8 { + execsql { + SELECT sum(x)>0.0 FROM (SELECT '9223372036' || '854775808' AS x + UNION ALL SELECT -9223372036850000000) + } + } {1} +} +ifcapable !floatingpoint { + do_test func-8.8 { + execsql { + SELECT sum(x)>0 FROM (SELECT '9223372036' || '854775808' AS x + UNION ALL SELECT -9223372036850000000) + } + } {1} +} +} # How do you test the random() function in a meaningful, deterministic way? # @@ -324,9 +401,11 @@ # generated by randomblob(). So this seems like a good place to test # hex(). # -do_test func-9.10 { - execsql {SELECT hex(x'00112233445566778899aAbBcCdDeEfF')} -} {00112233445566778899AABBCCDDEEFF} +ifcapable bloblit { + do_test func-9.10 { + execsql {SELECT hex(x'00112233445566778899aAbBcCdDeEfF')} + } {00112233445566778899AABBCCDDEEFF} +} set encoding [db one {PRAGMA encoding}] if {$encoding=="UTF-16le"} { do_test func-9.11-utf16le { @@ -334,8 +413,7 @@ } {6100620063006400310032006700} do_test func-9.12-utf16le { execsql {SELECT hex(replace('abcdefg','','12'))} - } {{}} - breakpoint + } {6100620063006400650066006700} do_test func-9.13-utf16le { execsql {SELECT hex(replace('aabcdefg','a','aaa'))} } {610061006100610061006100620063006400650066006700} @@ -345,8 +423,7 @@ } {61626364313267} do_test func-9.12-utf8 { execsql {SELECT hex(replace('abcdefg','','12'))} - } {{}} - breakpoint + } {61626364656667} do_test func-9.13-utf8 { execsql {SELECT hex(replace('aabcdefg','a','aaa'))} } {616161616161626364656667} @@ -380,32 +457,35 @@ ); } } {{}} -do_test func-10.4 { - execsql { - SELECT testfunc( - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'double', 1.234 - ); - } -} {1.234} -do_test func-10.5 { - execsql { - SELECT testfunc( - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'int', 1234, - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'string', NULL, - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'double', 1.234, - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'int', 1234, - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'string', NULL, - 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', - 'double', 1.234 - ); - } -} {1.234} + +ifcapable floatingpoint { + do_test func-10.4 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234 + ); + } + } {1.234} + do_test func-10.5 { + execsql { + SELECT testfunc( + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'int', 1234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'string', NULL, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'int', 1234, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'string', NULL, + 'string', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 'double', 1.234 + ); + } + } {1.234} +} # Test the built-in sqlite_version(*) SQL function. # @@ -424,11 +504,21 @@ # value. Function test_destructor_count() returns the number of outstanding # destructor calls for values returned by test_destructor(). # -do_test func-12.1 { - execsql { - SELECT test_destructor('hello world'), test_destructor_count(); - } -} {{hello world} 1} +if {[db eval {PRAGMA encoding}]=="UTF-8"} { + do_test func-12.1-utf8 { + execsql { + SELECT test_destructor('hello world'), test_destructor_count(); + } + } {{hello world} 1} +} else { + ifcapable {utf16} { + do_test func-12.1-utf16 { + execsql { + SELECT test_destructor16('hello world'), test_destructor_count(); + } + } {{hello world} 1} + } +} do_test func-12.2 { execsql { SELECT test_destructor_count(); @@ -436,9 +526,9 @@ } {0} do_test func-12.3 { execsql { - SELECT test_destructor('hello')||' world', test_destructor_count(); + SELECT test_destructor('hello')||' world' } -} {{hello world} 0} +} {{hello world}} do_test func-12.4 { execsql { SELECT test_destructor_count(); @@ -463,6 +553,7 @@ } } {} + # Test that the auxdata API for scalar functions works. This test uses # a special user-defined function only available in test builds, # test_auxdata(). Function test_auxdata() takes any number of arguments. @@ -505,7 +596,7 @@ set DB [sqlite3_connection_pointer db] set sql "SELECT test_auxdata( ? , a ) FROM t4;" set STMT [sqlite3_prepare $DB $sql -1 TAIL] - sqlite3_bind_text $STMT 1 hello -1 + sqlite3_bind_text $STMT 1 hello\000 -1 set res [list] while { "SQLITE_ROW"==[sqlite3_step $STMT] } { lappend res [sqlite3_column_text $STMT 0] @@ -526,10 +617,17 @@ } {1} do_test func-15.1 { - catchsql { - select test_error(NULL); - } + catchsql {select test_error(NULL)} } {1 {}} +do_test func-15.2 { + catchsql {select test_error('this is the error message')} +} {1 {this is the error message}} +do_test func-15.3 { + catchsql {select test_error('this is the error message',12)} +} {1 {this is the error message}} +do_test func-15.4 { + db errorcode +} {12} # Test the quote function for BLOB and NULL values. do_test func-16.1 { @@ -566,12 +664,14 @@ SELECT sum(x) FROM t5; } } {9902} -do_test func-18.2 { - execsql { - INSERT INTO t5 VALUES(0.0); - SELECT sum(x) FROM t5; - } -} {9902.0} +ifcapable floatingpoint { + do_test func-18.2 { + execsql { + INSERT INTO t5 VALUES(0.0); + SELECT sum(x) FROM t5; + } + } {9902.0} +} # The sum of nothing is NULL. But the sum of all NULLs is NULL. # @@ -619,23 +719,39 @@ SELECT typeof(sum(x)) FROM t6 } } integer -do_test func-18.12 { - catchsql { - INSERT INTO t6 VALUES(1<<62); - SELECT sum(x) - ((1<<62)*2.0+1) from t6; - } -} {1 {integer overflow}} -do_test func-18.13 { - execsql { - SELECT total(x) - ((1<<62)*2.0+1) FROM t6 - } -} 0.0 -do_test func-18.14 { - execsql { - SELECT sum(-9223372036854775805); - } -} -9223372036854775805 - +ifcapable floatingpoint { + do_test func-18.12 { + catchsql { + INSERT INTO t6 VALUES(1<<62); + SELECT sum(x) - ((1<<62)*2.0+1) from t6; + } + } {1 {integer overflow}} + do_test func-18.13 { + execsql { + SELECT total(x) - ((1<<62)*2.0+1) FROM t6 + } + } 0.0 +} +ifcapable !floatingpoint { + do_test func-18.12 { + catchsql { + INSERT INTO t6 VALUES(1<<62); + SELECT sum(x) - ((1<<62)*2+1) from t6; + } + } {1 {integer overflow}} + do_test func-18.13 { + execsql { + SELECT total(x) - ((1<<62)*2+1) FROM t6 + } + } 0.0 +} +if {[working_64bit_int]} { + do_test func-18.14 { + execsql { + SELECT sum(-9223372036854775805); + } + } -9223372036854775805 +} ifcapable compound&&subquery { do_test func-18.15 { @@ -645,20 +761,22 @@ SELECT 10 AS x); } } {1 {integer overflow}} -do_test func-18.16 { - catchsql { - SELECT sum(x) FROM - (SELECT 9223372036854775807 AS x UNION ALL - SELECT -10 AS x); - } -} {0 9223372036854775797} -do_test func-18.17 { - catchsql { - SELECT sum(x) FROM - (SELECT -9223372036854775807 AS x UNION ALL - SELECT 10 AS x); - } -} {0 -9223372036854775797} +if {[working_64bit_int]} { + do_test func-18.16 { + catchsql { + SELECT sum(x) FROM + (SELECT 9223372036854775807 AS x UNION ALL + SELECT -10 AS x); + } + } {0 9223372036854775797} + do_test func-18.17 { + catchsql { + SELECT sum(x) FROM + (SELECT -9223372036854775807 AS x UNION ALL + SELECT 10 AS x); + } + } {0 -9223372036854775797} +} do_test func-18.18 { catchsql { SELECT sum(x) FROM @@ -691,11 +809,13 @@ # Integer overflow on abs() # -do_test func-18.31 { - catchsql { - SELECT abs(-9223372036854775807); - } -} {0 9223372036854775807} +if {[working_64bit_int]} { + do_test func-18.31 { + catchsql { + SELECT abs(-9223372036854775807); + } + } {0 9223372036854775807} +} do_test func-18.32 { catchsql { SELECT abs(-9223372036854775807-1); @@ -797,15 +917,17 @@ } } {0123456789012345678901234567890123456789012345678901234567890123456789} -do_test func-21.9 { - # Attempt to exploit a buffer-overflow that at one time existed - # in the REPLACE function. - set ::str "[string repeat A 29998]CC[string repeat A 35537]" - set ::rep [string repeat B 65536] - execsql { - SELECT LENGTH(REPLACE($::str, 'C', $::rep)); - } -} [expr 29998 + 2*65536 + 35537] +ifcapable tclvar { + do_test func-21.9 { + # Attempt to exploit a buffer-overflow that at one time existed + # in the REPLACE function. + set ::str "[string repeat A 29998]CC[string repeat A 35537]" + set ::rep [string repeat B 65536] + execsql { + SELECT LENGTH(REPLACE($::str, 'C', $::rep)); + } + } [expr 29998 + 2*65536 + 35537] +} # Tests for the TRIM, LTRIM and RTRIM functions. # @@ -870,4 +992,169 @@ execsql {SELECT typeof(trim('hello',NULL));} } {null} +# This is to test the deprecated sqlite3_aggregate_count() API. +# +ifcapable deprecated { + do_test func-23.1 { + sqlite3_create_aggregate db + execsql { + SELECT legacy_count() FROM t6; + } + } {3} +} + +# The group_concat() function. +# +do_test func-24.1 { + execsql { + SELECT group_concat(t1) FROM tbl1 + } +} {this,program,is,free,software} +do_test func-24.2 { + execsql { + SELECT group_concat(t1,' ') FROM tbl1 + } +} {{this program is free software}} +do_test func-24.3 { + execsql { + SELECT group_concat(t1,' ' || rowid || ' ') FROM tbl1 + } +} {{this 2 program 3 is 4 free 5 software}} +do_test func-24.4 { + execsql { + SELECT group_concat(NULL,t1) FROM tbl1 + } +} {{}} +do_test func-24.5 { + execsql { + SELECT group_concat(t1,NULL) FROM tbl1 + } +} {thisprogramisfreesoftware} +do_test func-24.6 { + execsql { + SELECT 'BEGIN-'||group_concat(t1) FROM tbl1 + } +} {BEGIN-this,program,is,free,software} + +# Ticket #3179: Make sure aggregate functions can take many arguments. +# None of the built-in aggregates do this, so use the md5sum() from the +# test extensions. +# +unset -nocomplain midargs +set midargs {} +unset -nocomplain midres +set midres {} +unset -nocomplain result +for {set i 1} {$i<[sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG -1]} {incr i} { + append midargs ,'/$i' + append midres /$i + set result [md5 \ + "this${midres}program${midres}is${midres}free${midres}software${midres}"] + set sql "SELECT md5sum(t1$midargs) FROM tbl1" + do_test func-24.7.$i { + db eval $::sql + } $result +} + +# Ticket #3806. If the initial string in a group_concat is an empty +# string, the separator that follows should still be present. +# +do_test func-24.8 { + execsql { + SELECT group_concat(CASE t1 WHEN 'this' THEN '' ELSE t1 END) FROM tbl1 + } +} {,program,is,free,software} +do_test func-24.9 { + execsql { + SELECT group_concat(CASE WHEN t1!='software' THEN '' ELSE t1 END) FROM tbl1 + } +} {,,,,software} + +# Ticket #3923. Initial empty strings have a separator. But initial +# NULLs do not. +# +do_test func-24.10 { + execsql { + SELECT group_concat(CASE t1 WHEN 'this' THEN null ELSE t1 END) FROM tbl1 + } +} {program,is,free,software} +do_test func-24.11 { + execsql { + SELECT group_concat(CASE WHEN t1!='software' THEN null ELSE t1 END) FROM tbl1 + } +} {software} +do_test func-24.12 { + execsql { + SELECT group_concat(CASE t1 WHEN 'this' THEN '' + WHEN 'program' THEN null ELSE t1 END) FROM tbl1 + } +} {,is,free,software} + + +# Use the test_isolation function to make sure that type conversions +# on function arguments do not effect subsequent arguments. +# +do_test func-25.1 { + execsql {SELECT test_isolation(t1,t1) FROM tbl1} +} {this program is free software} + +# Try to misuse the sqlite3_create_function() interface. Verify that +# errors are returned. +# +do_test func-26.1 { + abuse_create_function db +} {} + +# The previous test (func-26.1) registered a function with a very long +# function name that takes many arguments and always returns NULL. Verify +# that this function works correctly. +# +do_test func-26.2 { + set a {} + for {set i 1} {$i<=$::SQLITE_MAX_FUNCTION_ARG} {incr i} { + lappend a $i + } + db eval " + SELECT nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789([join $a ,]); + " +} {{}} +do_test func-26.3 { + set a {} + for {set i 1} {$i<=$::SQLITE_MAX_FUNCTION_ARG+1} {incr i} { + lappend a $i + } + catchsql " + SELECT nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789([join $a ,]); + " +} {1 {too many arguments on function nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789}} +do_test func-26.4 { + set a {} + for {set i 1} {$i<=$::SQLITE_MAX_FUNCTION_ARG-1} {incr i} { + lappend a $i + } + catchsql " + SELECT nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789([join $a ,]); + " +} {1 {wrong number of arguments to function nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789()}} +do_test func-26.5 { + catchsql " + SELECT nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_12345678a(0); + " +} {1 {no such function: nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_12345678a}} +do_test func-26.6 { + catchsql " + SELECT nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789a(0); + " +} {1 {no such function: nullx_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789_123456789a}} + +do_test func-27.1 { + catchsql {SELECT coalesce()} +} {1 {wrong number of arguments to function coalesce()}} +do_test func-27.2 { + catchsql {SELECT coalesce(1)} +} {1 {wrong number of arguments to function coalesce()}} +do_test func-27.3 { + catchsql {SELECT coalesce(1,2)} +} {0 1} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fuzz3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fuzz3.test --- sqlite3-3.4.2/test/fuzz3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/fuzz3.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,169 @@ +# 2007 May 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of this file is checking the libraries response to subtly corrupting +# the database file by changing the values of pseudo-randomly selected +# bytes. +# +# $Id: fuzz3.test,v 1.3 2009/01/05 17:19:03 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +expr srand(123) + +proc rstring {n} { + set str s + while {[string length $str] < $n} { + append str [expr rand()] + } + return [string range $str 0 $n] +} + +# Return a randomly generated SQL literal. +# +proc rvalue {} { + switch -- [expr int(rand()*5)] { + 0 { # SQL NULL value. + return NULL + } + 1 { # Integer value. + return [expr int(rand()*1024)] + } + 2 { # Real value. + return [expr rand()] + } + 3 { # String value. + set n [expr int(rand()*2500)] + return "'[rstring $n]'" + } + 4 { # Blob value. + set n [expr int(rand()*2500)] + return "CAST('[rstring $n]' AS BLOB)" + } + } +} + +proc db_checksum {} { + set cksum [execsql { SELECT md5sum(a, b, c) FROM t1 }] + append cksum [execsql { SELECT md5sum(d, e, f) FROM t2 }] + set cksum +} + +# Modify a single byte in the file 'test.db' using tcl IO commands. The +# argument value, which must be an integer, determines both the offset of +# the byte that is modified, and the value that it is set to. The lower +# 8 bits of iMod determine the new byte value. The offset of the byte +# modified is the value of ($iMod >> 8). +# +# The return value is the iMod value required to restore the file +# to its original state. The command: +# +# modify_database [modify_database $x] +# +# leaves the file in the same state as it was in at the start of the +# command (assuming that the file is at least ($x>>8) bytes in size). +# +proc modify_database {iMod} { + set blob [binary format c [expr {$iMod&0xFF}]] + set offset [expr {$iMod>>8}] + + set fd [open test.db r+] + fconfigure $fd -encoding binary -translation binary + seek $fd $offset + set old_blob [read $fd 1] + seek $fd $offset + puts -nonewline $fd $blob + close $fd + + binary scan $old_blob c iOld + return [expr {($offset<<8) + ($iOld&0xFF)}] +} + +proc purge_pcache {} { + ifcapable !memorymanage { + db close + sqlite3 db test.db + } else { + sqlite3_release_memory 10000000 + } + if {[lindex [pcache_stats] 1] != 0} { + error "purge_pcache failed: [pcache_stats]" + } +} + +# This block creates a database to work with. +# +do_test fuzz3-1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(d, e, f); + CREATE INDEX i1 ON t1(a, b, c); + CREATE INDEX i2 ON t2(d, e, f); + } + for {set i 0} {$i < 50} {incr i} { + execsql "INSERT INTO t1 VALUES([rvalue], [rvalue], [rvalue])" + execsql "INSERT INTO t2 VALUES([rvalue], [rvalue], [rvalue])" + } + execsql COMMIT +} {} + +set ::cksum [db_checksum] +do_test fuzz3-2 { + db_checksum +} $::cksum + +for {set ii 0} {$ii < 5000} {incr ii} { + purge_pcache + + # Randomly modify a single byte of the database file somewhere within + # the first 100KB of the file. + set iNew [expr int(rand()*5*1024*256)] + set iOld [modify_database $iNew] + + set iTest 0 + foreach sql { + {SELECT * FROM t2 ORDER BY d} + {SELECT * FROM t1} + {SELECT * FROM t2} + {SELECT * FROM t1 ORDER BY a} + {SELECT * FROM t1 WHERE a = (SELECT a FROM t1 WHERE rowid=25)} + {SELECT * FROM t2 WHERE d = (SELECT d FROM t2 WHERE rowid=1)} + {SELECT * FROM t2 WHERE d = (SELECT d FROM t2 WHERE rowid=50)} + {PRAGMA integrity_check} + } { + do_test fuzz3-$ii.$iNew.[incr iTest] { + foreach {rc msg} [catchsql $sql] {} + if {$rc == 0 + || $msg eq "database or disk is full" + || $msg eq "database disk image is malformed" + || $msg eq "file is encrypted or is not a database" + || [string match "malformed database schema*" $msg] + } { + set msg ok + } + set msg + } {ok} + } + + # Restore the original database file content. Test that the correct + # checksum is now returned. + # + purge_pcache + modify_database $iOld + do_test fuzz3-$ii.$iNew.[incr iTest] { + db_checksum + } $::cksum +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fuzz_common.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fuzz_common.tcl --- sqlite3-3.4.2/test/fuzz_common.tcl 2007-05-30 11:36:47.000000000 +0100 +++ sqlite3-3.6.16/test/fuzz_common.tcl 2009-06-12 03:37:53.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: fuzz_common.tcl,v 1.1 2007/05/30 10:36:47 danielk1977 Exp $ +# $Id: fuzz_common.tcl,v 1.2 2009/01/05 19:36:30 drh Exp $ proc fuzz {TemplateList} { set n [llength $TemplateList] @@ -374,7 +374,7 @@ if {$rc} { set e 0 foreach error $::fuzzyopts(-errorlist) { - if {0 == [string first $error $msg]} { + if {[string first $error $msg]>=0} { set e 1 break } @@ -389,4 +389,3 @@ } {1} } } - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fuzz_malloc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fuzz_malloc.test --- sqlite3-3.4.2/test/fuzz_malloc.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/fuzz_malloc.test 2009-06-05 18:03:30.000000000 +0100 @@ -12,21 +12,18 @@ # # This file tests malloc failures in concert with fuzzy SQL generation. # -# $Id: fuzz_malloc.test,v 1.5 2007/06/18 12:22:43 drh Exp $ +# $Id: fuzz_malloc.test,v 1.10 2008/08/20 16:35:10 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping fuzz_malloc tests: not compiled with -DSQLITE_MEMDEBUG=1" +ifcapable !memdebug { finish_test return } -source $testdir/fuzz_common.tcl source $testdir/malloc_common.tcl +source $testdir/fuzz_common.tcl if {[info exists ISQUICK]} { set ::REPEATS 20 @@ -48,7 +45,7 @@ set ::fuzzyopts(-sqlprep) {} array set ::fuzzyopts $args - sqlite_malloc_fail 0 + sqlite3_memdebug_fail -1 db close file delete test.db test.db-journal sqlite3 db test.db @@ -59,6 +56,7 @@ expr srand($jj) incr jj set ::sql [subst $::fuzzyopts(-template)] + # puts fuzyy-sql=\[$::sql\]; flush stdout foreach {rc res} [catchsql "$::sql"] {} if {$rc==0} { do_malloc_test $testname-$ii -sqlbody $::sql -sqlprep $::prep @@ -93,5 +91,4 @@ -template {[Select]} \ -sqlprep $::SQLPREP -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/fuzz.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/fuzz.test --- sqlite3-3.4.2/test/fuzz.test 2007-05-30 11:36:47.000000000 +0100 +++ sqlite3-3.6.16/test/fuzz.test 2009-06-25 12:24:39.000000000 +0100 @@ -19,7 +19,7 @@ # # The most complicated trees are for SELECT statements. # -# $Id: fuzz.test,v 1.14 2007/05/30 10:36:47 danielk1977 Exp $ +# $Id: fuzz.test,v 1.19 2009/04/28 11:10:39 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -32,6 +32,7 @@ } source $testdir/fuzz_common.tcl +expr srand(0) #---------------------------------------------------------------- # These tests caused errors that were first caught by the tests @@ -173,6 +174,133 @@ } } {} +# Making sure previously discovered errors have been fixed. +# +do_test fuzz-1.15 { + execsql { + SELECT hex(CAST(zeroblob(1000) AS integer)) + } +} {30} + +do_test fuzz-1.16.1 { + execsql { + CREATE TABLE abc(a, b, c); + CREATE TABLE def(a, b, c); + CREATE TABLE ghi(a, b, c); + } +} {} +do_test fuzz-1.16.2 { + catchsql { + SELECT DISTINCT EXISTS( + SELECT 1 + FROM ( + SELECT C FROM (SELECT 1) + ) + WHERE (SELECT c) + ) + FROM abc + } +} {0 {}} +do_test fuzz-1.16.3 { + catchsql { + SELECT DISTINCT substr(-456 ISNULL,zeroblob(1000), EXISTS( + SELECT DISTINCT EXISTS( + SELECT DISTINCT b FROM abc + ORDER BY EXISTS ( + SELECT DISTINCT 2147483647 UNION ALL SELECT -2147483648 + ) ASC + ) + FROM ( + SELECT c, c FROM ( + SELECT 456, 'injection' ORDER BY 56.1 ASC, -56.1 DESC + ) + ) + GROUP BY (SELECT ALL (SELECT DISTINCT 'hardware')) + HAVING ( + SELECT DISTINCT c + FROM ( + SELECT ALL -2147483648, 'experiments' + ORDER BY -56.1 ASC, -56.1 DESC + ) + GROUP BY (SELECT DISTINCT 456) IN + (SELECT DISTINCT 'injection') NOT IN (SELECT ALL -456) + HAVING EXISTS ( + SELECT ALL 'injection' + ) + ) + UNION ALL + SELECT a IN ( + SELECT -2147483647 + UNION ALL + SELECT ALL 'injection' + ) + FROM sqlite_master + ) -- end EXISTS + ) /* end SUBSTR() */, c NOTNULL ISNULL + FROM abc + ORDER BY CAST(-56.1 AS blob) ASC + } +} {0 {}} +do_test fuzz-1.16.4 { + execsql { + DROP TABLE abc; DROP TABLE def; DROP TABLE ghi; + } +} {} + +do_test fuzz-1.17 { + catchsql { + SELECT 'hardware', 56.1 NOTNULL, random()&0 + FROM ( + SELECT ALL lower(~ EXISTS ( + SELECT 1 NOT IN (SELECT ALL 1) + )), CAST(456 AS integer), -2147483647 + FROM ( + SELECT DISTINCT -456, CAST(1 AS integer) ISNULL + FROM (SELECT ALL 2147483647, typeof(2147483649)) + ) + ) + GROUP BY CAST(CAST('experiments' AS blob) AS blob) + HAVING random() + } +} {0 {hardware 1 0}} + +do_test fuzz-1.18 { + catchsql { + SELECT -2147483649 << upper('fault' NOT IN ( + SELECT ALL ( + SELECT ALL -1 + ORDER BY -2147483649 + LIMIT ( + SELECT ALL ( + SELECT 0 EXCEPT SELECT DISTINCT 'experiments' ORDER BY 1 ASC + ) + ) + OFFSET EXISTS ( + SELECT ALL + (SELECT ALL -2147483648) NOT IN ( + SELECT ALL 123456789.1234567899 + ) IN (SELECT 2147483649) + FROM sqlite_master + ) NOT IN (SELECT ALL 'The') + ) + )) + } +} {0 -4294967298} + +# At one point the following INSERT statement caused an assert() to fail. +# +do_test fuzz-1.19 { + execsql { CREATE TABLE t1(a) } + catchsql { + INSERT INTO t1 VALUES( + CASE WHEN NULL THEN NULL ELSE ( SELECT 0 ORDER BY 456 ) END + ) + } +} {1 {1st ORDER BY term out of range - should be between 1 and 1}} +do_test fuzz-1.20 { + execsql { DROP TABLE t1 } +} {} + #---------------------------------------------------------------- # Test some fuzzily generated expressions. # diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/hook.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/hook.test --- sqlite3-3.4.2/test/hook.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/hook.test 2009-06-25 12:22:34.000000000 +0100 @@ -17,7 +17,7 @@ # sqlite_update_hook (tests hook-4-*) # sqlite_rollback_hook (tests hook-5.*) # -# $Id: hook.test,v 1.11 2006/01/17 09:35:02 danielk1977 Exp $ +# $Id: hook.test,v 1.15 2009/04/07 14:14:23 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -92,6 +92,27 @@ set ::commit_cnt } {} +# Ticket #3564. +# +do_test hook-3.10 { + file delete -force test2.db test2.db-journal + sqlite3 db2 test2.db + proc commit_hook {} { + set y [db2 one {SELECT y FROM t3 WHERE y>10}] + return [expr {$y>10}] + } + db2 eval {CREATE TABLE t3(x,y)} + db2 commit_hook commit_hook + catchsql {INSERT INTO t3 VALUES(1,2)} db2 + catchsql {INSERT INTO t3 VALUES(11,12)} db2 + catchsql {INSERT INTO t3 VALUES(3,4)} db2 + db2 eval { + SELECT * FROM t3 ORDER BY x; + } +} {1 2 3 4} +db2 close + + #---------------------------------------------------------------------------- # Tests for the update-hook. # @@ -137,8 +158,25 @@ DELETE main t1 4 \ ] -set ::update_hook {} ifcapable trigger { + # Update hook is not invoked for changes to sqlite_master + # + do_test hook-4.1.3 { + set ::update_hook {} + execsql { + CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN SELECT RAISE(IGNORE); END; + } + set ::update_hook + } {} + do_test hook-4.1.4 { + set ::update_hook {} + execsql { + DROP TRIGGER r1; + } + set ::update_hook + } {} + + set ::update_hook {} do_test hook-4.2.1 { catchsql { DROP TABLE t2; @@ -177,23 +215,25 @@ # Update-hook + ATTACH set ::update_hook {} -do_test hook-4.2.3 { - file delete -force test2.db - execsql { - ATTACH 'test2.db' AS aux; - CREATE TABLE aux.t3(a INTEGER PRIMARY KEY, b); - INSERT INTO aux.t3 SELECT * FROM t1; - UPDATE t3 SET b = 'two or so' WHERE a = 2; - DELETE FROM t3 WHERE 1; -- Avoid the truncate optimization (for now) - } - set ::update_hook -} [list \ - INSERT aux t3 1 \ - INSERT aux t3 2 \ - UPDATE aux t3 2 \ - DELETE aux t3 1 \ - DELETE aux t3 2 \ -] +ifcapable attach { + do_test hook-4.2.3 { + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t3(a INTEGER PRIMARY KEY, b); + INSERT INTO aux.t3 SELECT * FROM t1; + UPDATE t3 SET b = 'two or so' WHERE a = 2; + DELETE FROM t3 WHERE 1; -- Avoid the truncate optimization (for now) + } + set ::update_hook + } [list \ + INSERT aux t3 1 \ + INSERT aux t3 2 \ + UPDATE aux t3 2 \ + DELETE aux t3 1 \ + DELETE aux t3 2 \ + ] +} ifcapable trigger { execsql { @@ -221,7 +261,7 @@ DELETE main t1 3 \ ] set ::update_hook {} -ifcapable compound { +ifcapable compound&&attach { do_test hook-4.3.2 { execsql { SELECT * FROM t1 UNION SELECT * FROM t3; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/icu.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/icu.test --- sqlite3-3.4.2/test/icu.test 2007-05-07 12:53:14.000000000 +0100 +++ sqlite3-3.6.16/test/icu.test 2009-06-05 18:03:30.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: icu.test,v 1.1 2007/05/07 11:53:14 danielk1977 Exp $ +# $Id: icu.test,v 1.2 2008/07/12 14:52:20 drh Exp $ # set testdir [file dirname $argv0] @@ -115,4 +115,3 @@ } {apricot cherry chokecherry yamot peach plum} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/in2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/in2.test --- sqlite3-3.4.2/test/in2.test 2007-05-12 11:41:48.000000000 +0100 +++ sqlite3-3.6.16/test/in2.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,7 +11,7 @@ # This file tests a special case in the b-tree code that can be # hit by the "IN" operator (or EXISTS, NOT IN, etc.). # -# $Id: in2.test,v 1.2 2007/05/12 10:41:48 danielk1977 Exp $ +# $Id: in2.test,v 1.3 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -65,4 +65,3 @@ } finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/in3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/in3.test --- sqlite3-3.4.2/test/in3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/in3.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,289 @@ +# 2007 November 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file tests the optimisations made in November 2007 of expressions +# of the following form: +# +# IN (SELECT FROM ) +# +# $Id: in3.test,v 1.5 2008/08/04 03:51:24 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +# Return the number of OpenEphemeral instructions used in the +# implementation of the sql statement passed as a an argument. +# +proc nEphemeral {sql} { + set nEph 0 + foreach op [execsql "EXPLAIN $sql"] { + if {$op eq "OpenEphemeral"} {incr nEph} + } + set nEph +} + +# This proc works the same way as execsql, except that the number +# of OpenEphemeral instructions used in the implementation of the +# statement is inserted into the start of the returned list. +# +proc exec_neph {sql} { + return [concat [nEphemeral $sql] [execsql $sql]] +} + +do_test in3-1.1 { + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 VALUES(5, 6); + } +} {} + +# All of these queries should avoid using a temp-table: +# +do_test in3-1.2 { + exec_neph { SELECT rowid FROM t1 WHERE rowid IN (SELECT rowid FROM t1); } +} {0 1 2 3} +do_test in3-1.3 { + exec_neph { SELECT a FROM t1 WHERE a IN (SELECT a FROM t1); } +} {0 1 3 5} +do_test in3-1.4 { + exec_neph { SELECT rowid FROM t1 WHERE rowid+0 IN (SELECT rowid FROM t1); } +} {0 1 2 3} +do_test in3-1.5 { + exec_neph { SELECT a FROM t1 WHERE a+0 IN (SELECT a FROM t1); } +} {0 1 3 5} + +# Because none of the sub-select queries in the following statements +# match the pattern ("SELECT FROM
    "), the following do +# require a temp table. +# +do_test in3-1.6 { + exec_neph { SELECT rowid FROM t1 WHERE rowid IN (SELECT rowid+0 FROM t1); } +} {1 1 2 3} +do_test in3-1.7 { + exec_neph { SELECT a FROM t1 WHERE a IN (SELECT a+0 FROM t1); } +} {1 1 3 5} +do_test in3-1.8 { + exec_neph { SELECT a FROM t1 WHERE a IN (SELECT a FROM t1 WHERE 1); } +} {1 1 3 5} +do_test in3-1.9 { + exec_neph { SELECT a FROM t1 WHERE a IN (SELECT a FROM t1 GROUP BY a); } +} {1 1 3 5} + +# This should not use a temp-table. Even though the sub-select does +# not exactly match the pattern "SELECT FROM
    ", in +# this case the ORDER BY is a no-op and can be ignored. +do_test in3-1.10 { + exec_neph { SELECT a FROM t1 WHERE a IN (SELECT a FROM t1 ORDER BY a); } +} {0 1 3 5} + +# These do use the temp-table. Adding the LIMIT clause means the +# ORDER BY cannot be ignored. +do_test in3-1.11 { + exec_neph {SELECT a FROM t1 WHERE a IN (SELECT a FROM t1 ORDER BY a LIMIT 1)} +} {1 1} +do_test in3-1.12 { + exec_neph { + SELECT a FROM t1 WHERE a IN (SELECT a FROM t1 ORDER BY a LIMIT 1 OFFSET 1) + } +} {1 3} + +# Has to use a temp-table because of the compound sub-select. +# +ifcapable compound { + do_test in3-1.13 { + exec_neph { + SELECT a FROM t1 WHERE a IN ( + SELECT a FROM t1 UNION ALL SELECT a FROM t1 + ) + } + } {1 1 3 5} +} + +# The first of these queries has to use the temp-table, because the +# collation sequence used for the index on "t1.a" does not match the +# collation sequence used by the "IN" comparison. The second does not +# require a temp-table, because the collation sequences match. +# +do_test in3-1.14 { + exec_neph { SELECT a FROM t1 WHERE a COLLATE nocase IN (SELECT a FROM t1) } +} {1 1 3 5} +do_test in3-1.15 { + exec_neph { SELECT a FROM t1 WHERE a COLLATE binary IN (SELECT a FROM t1) } +} {0 1 3 5} + +# Neither of these queries require a temp-table. The collation sequence +# makes no difference when using a rowid. +# +do_test in3-1.16 { + exec_neph {SELECT a FROM t1 WHERE a COLLATE nocase IN (SELECT rowid FROM t1)} +} {0 1 3} +do_test in3-1.17 { + exec_neph {SELECT a FROM t1 WHERE a COLLATE binary IN (SELECT rowid FROM t1)} +} {0 1 3} + +# The following tests - in3.2.* - test a bug that was difficult to track +# down during development. They are not particularly well focused. +# +do_test in3-2.1 { + execsql { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(w int, x int, y int); + CREATE TABLE t2(p int, q int, r int, s int); + } + for {set i 1} {$i<=100} {incr i} { + set w $i + set x [expr {int(log($i)/log(2))}] + set y [expr {$i*$i + 2*$i + 1}] + execsql "INSERT INTO t1 VALUES($w,$x,$y)" + } + set maxy [execsql {select max(y) from t1}] + db eval { INSERT INTO t2 SELECT 101-w, x, $maxy+1-y, y FROM t1 } +} {} +do_test in3-2.2 { + execsql { + SELECT rowid + FROM t1 + WHERE rowid IN (SELECT rowid FROM t1 WHERE rowid IN (1, 2)); + } +} {1 2} +do_test in3-2.3 { + execsql { + select rowid from t1 where rowid IN (-1,2,4) + } +} {2 4} +do_test in3-2.4 { + execsql { + SELECT rowid FROM t1 WHERE rowid IN + (select rowid from t1 where rowid IN (-1,2,4)) + } +} {2 4} + +#------------------------------------------------------------------------- +# This next block of tests - in3-3.* - verify that column affinity is +# correctly handled in cases where an index might be used to optimise +# an IN (SELECT) expression. +# +do_test in3-3.1 { + catch {execsql { + DROP TABLE t1; + DROP TABLE t2; + }} + + execsql { + + CREATE TABLE t1(a BLOB, b NUMBER ,c TEXT); + CREATE UNIQUE INDEX t1_i1 ON t1(a); /* no affinity */ + CREATE UNIQUE INDEX t1_i2 ON t1(b); /* numeric affinity */ + CREATE UNIQUE INDEX t1_i3 ON t1(c); /* text affinity */ + + CREATE TABLE t2(x BLOB, y NUMBER, z TEXT); + CREATE UNIQUE INDEX t2_i1 ON t2(x); /* no affinity */ + CREATE UNIQUE INDEX t2_i2 ON t2(y); /* numeric affinity */ + CREATE UNIQUE INDEX t2_i3 ON t2(z); /* text affinity */ + + INSERT INTO t1 VALUES(1, 1, 1); + INSERT INTO t2 VALUES('1', '1', '1'); + } +} {} + +do_test in3-3.2 { + # No affinity is applied before comparing "x" and "a". Therefore + # the index can be used (the comparison is false, text!=number). + exec_neph { SELECT x IN (SELECT a FROM t1) FROM t2 } +} {0 0} +do_test in3-3.3 { + # Logically, numeric affinity is applied to both sides before + # the comparison. Therefore it is possible to use index t1_i2. + exec_neph { SELECT x IN (SELECT b FROM t1) FROM t2 } +} {0 1} +do_test in3-3.4 { + # No affinity is applied before the comparison takes place. Making + # it possible to use index t1_i3. + exec_neph { SELECT x IN (SELECT c FROM t1) FROM t2 } +} {0 1} + +do_test in3-3.5 { + # Numeric affinity should be applied to each side before the comparison + # takes place. Therefore we cannot use index t1_i1, which has no affinity. + exec_neph { SELECT y IN (SELECT a FROM t1) FROM t2 } +} {1 1} +do_test in3-3.6 { + # Numeric affinity is applied to both sides before + # the comparison. Therefore it is possible to use index t1_i2. + exec_neph { SELECT y IN (SELECT b FROM t1) FROM t2 } +} {0 1} +do_test in3-3.7 { + # Numeric affinity is applied before the comparison takes place. + # Making it impossible to use index t1_i3. + exec_neph { SELECT y IN (SELECT c FROM t1) FROM t2 } +} {1 1} + +#--------------------------------------------------------------------- +# +# Test using a multi-column index. +# +do_test in3-4.1 { + execsql { + CREATE TABLE t3(a, b, c); + CREATE UNIQUE INDEX t3_i ON t3(b, a); + } + + execsql { + INSERT INTO t3 VALUES(1, 'numeric', 2); + INSERT INTO t3 VALUES(2, 'text', 2); + INSERT INTO t3 VALUES(3, 'real', 2); + INSERT INTO t3 VALUES(4, 'none', 2); + } +} {} +do_test in3-4.2 { + exec_neph { SELECT 'text' IN (SELECT b FROM t3) } +} {0 1} +do_test in3-4.3 { + exec_neph { SELECT 'TEXT' COLLATE nocase IN (SELECT b FROM t3) } +} {1 1} +do_test in3-4.4 { + # A temp table must be used because t3_i.b is not guaranteed to be unique. + exec_neph { SELECT b FROM t3 WHERE b IN (SELECT b FROM t3) } +} {1 none numeric real text} +do_test in3-4.5 { + execsql { CREATE UNIQUE INDEX t3_i2 ON t3(b) } + exec_neph { SELECT b FROM t3 WHERE b IN (SELECT b FROM t3) } +} {0 none numeric real text} +do_test in3-4.6 { + execsql { DROP INDEX t3_i2 } +} {} + +# The following two test cases verify that ticket #2991 has been fixed. +# +do_test in3-5.1 { + execsql { + CREATE TABLE Folders( + folderid INTEGER PRIMARY KEY, + parentid INTEGER, + rootid INTEGER, + path VARCHAR(255) + ); + } +} {} +do_test in3-5.2 { + catchsql { + DELETE FROM Folders WHERE folderid IN + (SELECT folderid FROM Folder WHERE path LIKE 'C:\MP3\Albums\' || '%'); + } +} {1 {no such table: Folder}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/in4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/in4.test --- sqlite3-3.4.2/test/in4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/in4.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,162 @@ +# 2008 September 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: in4.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test in4-1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + } +} {} +do_test in4-1.2 { + execsql { + SELECT * FROM t1 WHERE a IN ('aaa', 'bbb', 'ccc'); + } +} {} +do_test in4-1.3 { + execsql { + INSERT INTO t1 VALUES('aaa', 1); + INSERT INTO t1 VALUES('ddd', 2); + INSERT INTO t1 VALUES('ccc', 3); + INSERT INTO t1 VALUES('eee', 4); + SELECT b FROM t1 WHERE a IN ('aaa', 'bbb', 'ccc'); + } +} {1 3} +do_test in4-1.4 { + execsql { + SELECT a FROM t1 WHERE rowid IN (1, 3); + } +} {aaa ccc} +do_test in4-1.5 { + execsql { + SELECT a FROM t1 WHERE rowid IN (); + } +} {} +do_test in4-1.6 { + execsql { + SELECT a FROM t1 WHERE a IN ('ddd'); + } +} {ddd} + +do_test in4-2.1 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t2 VALUES(-1, '-one'); + INSERT INTO t2 VALUES(0, 'zero'); + INSERT INTO t2 VALUES(1, 'one'); + INSERT INTO t2 VALUES(2, 'two'); + INSERT INTO t2 VALUES(3, 'three'); + } +} {} + +do_test in4-2.2 { + execsql { SELECT b FROM t2 WHERE a IN (0, 2) } +} {zero two} + +do_test in4-2.3 { + execsql { SELECT b FROM t2 WHERE a IN (2, 0) } +} {zero two} + +do_test in4-2.4 { + execsql { SELECT b FROM t2 WHERE a IN (2, -1) } +} {-one two} + +do_test in4-2.5 { + execsql { SELECT b FROM t2 WHERE a IN (NULL, 3) } +} {three} + +do_test in4-2.6 { + execsql { SELECT b FROM t2 WHERE a IN (1.0, 2.1) } +} {one} + +do_test in4-2.7 { + execsql { SELECT b FROM t2 WHERE a IN ('1', '2') } +} {one two} + +do_test in4-2.8 { + execsql { SELECT b FROM t2 WHERE a IN ('', '0.0.0', '2') } +} {two} + +# The following block of tests test expressions of the form: +# +# IN () +# +# i.e. IN expressions with a literal empty set. +# +# This has led to crashes on more than one occasion. Test case in4-3.2 +# was added in reponse to a bug reported on the mailing list on 11/7/2008. +# See also tickets #3602 and #185. +# +do_test in4-3.1 { + execsql { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + CREATE TABLE t1(x, id); + CREATE TABLE t2(x, id); + INSERT INTO t1 VALUES(NULL, NULL); + INSERT INTO t1 VALUES(0, NULL); + INSERT INTO t1 VALUES(1, 3); + INSERT INTO t1 VALUES(2, 4); + INSERT INTO t1 VALUES(3, 5); + INSERT INTO t1 VALUES(4, 6); + INSERT INTO t2 VALUES(0, NULL); + INSERT INTO t2 VALUES(4, 1); + INSERT INTO t2 VALUES(NULL, 1); + INSERT INTO t2 VALUES(NULL, NULL); + } +} {} +do_test in4-3.2 { + execsql { + SELECT x FROM t1 WHERE id IN () AND x IN (SELECT x FROM t2 WHERE id=1) + } +} {} +do_test in4-3.3 { + execsql { + CREATE TABLE t3(x, y, z); + CREATE INDEX t3i1 ON t3(x, y); + INSERT INTO t3 VALUES(1, 1, 1); + INSERT INTO t3 VALUES(10, 10, 10); + } + execsql { SELECT * FROM t3 WHERE x IN () } +} {} +do_test in4-3.4 { + execsql { SELECT * FROM t3 WHERE x = 10 AND y IN () } +} {} +do_test in4-3.5 { + execsql { SELECT * FROM t3 WHERE x IN () AND y = 10 } +} {} +do_test in4-3.6 { + execsql { SELECT * FROM t3 WHERE x IN () OR x = 10 } +} {10 10 10} +do_test in4-3.7 { + execsql { SELECT * FROM t3 WHERE y IN () } +} {} +do_test in4-3.8 { + execsql { SELECT x IN() AS a FROM t3 WHERE a } +} {} +do_test in4-3.9 { + execsql { SELECT x IN() AS a FROM t3 WHERE NOT a } +} {0 0} +do_test in4-3.10 { + execsql { SELECT * FROM t3 WHERE oid IN () } +} {} +do_test in4-3.11 { + execsql { SELECT * FROM t3 WHERE x IN (1, 2) OR y IN ()} +} {1 1 1} +do_test in4-3.12 { + execsql { SELECT * FROM t3 WHERE x IN (1, 2) AND y IN ()} +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrblob2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrblob2.test --- sqlite3-3.4.2/test/incrblob2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/incrblob2.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,409 @@ +# 2008 June 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test that it is possible to have two open blob handles on a single +# blob object. +# +# $Id: incrblob2.test,v 1.10 2009/03/16 13:19:36 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!autovacuum || !pragma || !incrblob} { + finish_test + return +} + +do_test incrblob2-1.0 { + execsql { + CREATE TABLE blobs(id INTEGER PRIMARY KEY, data BLOB); + INSERT INTO blobs VALUES(NULL, zeroblob(5000)); + INSERT INTO blobs VALUES(NULL, zeroblob(5000)); + INSERT INTO blobs VALUES(NULL, zeroblob(5000)); + INSERT INTO blobs VALUES(NULL, zeroblob(5000)); + } +} {} + +foreach iOffset [list 0 256 4094] { + do_test incrblob2-1.$iOffset.1 { + set fd [db incrblob blobs data 1] + puts $fd "[string repeat x $iOffset]SQLite version 3.6.0" + close $fd + } {} + + do_test incrblob2-1.$iOffset.2 { + set fd1 [db incrblob blobs data 1] + set fd2 [db incrblob blobs data 1] + fconfigure $fd1 -buffering none + fconfigure $fd2 -buffering none + if {$iOffset != 0} { + seek $fd2 $iOffset start + seek $fd1 $iOffset start + } + read $fd1 6 + } {SQLite} + + do_test incrblob2-1.$iOffset.3 { + read $fd2 6 + } {SQLite} + + do_test incrblob2-1.$iOffset.4 { + seek $fd2 $iOffset start + seek $fd1 $iOffset start + puts -nonewline $fd2 "etiLQS" + } {} + + + do_test incrblob2-1.$iOffset.5 { + seek $fd1 $iOffset start + read $fd1 6 + } {etiLQS} + + do_test incrblob2-1.$iOffset.6 { + seek $fd2 $iOffset start + read $fd2 6 + } {etiLQS} + + do_test incrblob2-1.$iOffset.7 { + seek $fd1 $iOffset start + read $fd1 6 + } {etiLQS} + + do_test incrblob2-1.$iOffset.8 { + close $fd1 + close $fd2 + } {} +} + +#-------------------------------------------------------------------------- + +foreach iOffset [list 0 256 4094] { + + do_test incrblob2-2.$iOffset.1 { + set fd1 [db incrblob blobs data 1] + seek $fd1 [expr $iOffset - 5000] end + fconfigure $fd1 -buffering none + + set fd2 [db incrblob blobs data 1] + seek $fd2 [expr $iOffset - 5000] end + fconfigure $fd2 -buffering none + + puts -nonewline $fd1 "123456" + } {} + + do_test incrblob2-2.$iOffset.2 { + read $fd2 6 + } {123456} + + do_test incrblob2-2.$iOffset.3 { + close $fd1 + close $fd2 + } {} +} + +do_test incrblob2-3.1 { + set fd1 [db incrblob blobs data 1] + fconfigure $fd1 -buffering none +} {} +do_test incrblob2-3.2 { + execsql { + INSERT INTO blobs VALUES(5, zeroblob(10240)); + } +} {} +do_test incrblob2-3.3 { + set rc [catch { read $fd1 6 } msg] + list $rc $msg +} {0 123456} +do_test incrblob2-3.4 { + close $fd1 +} {} + +#-------------------------------------------------------------------------- +# The following tests - incrblob2-4.* - test that blob handles are +# invalidated at the correct times. +# +do_test incrblob2-4.1 { + unset -nocomplain data + db eval BEGIN + db eval { CREATE TABLE t1(id INTEGER PRIMARY KEY, data BLOB); } + for {set ii 1} {$ii < 100} {incr ii} { + set data [string repeat "blob$ii" 500] + db eval { INSERT INTO t1 VALUES($ii, $data) } + } + db eval COMMIT +} {} + +proc aborted_handles {} { + global handles + + set aborted {} + for {set ii 1} {$ii < 100} {incr ii} { + set str "blob$ii" + set nByte [string length $str] + set iOffset [expr $nByte * $ii * 2] + + set rc [catch {sqlite3_blob_read $handles($ii) $iOffset $nByte} msg] + if {$rc && $msg eq "SQLITE_ABORT"} { + lappend aborted $ii + } else { + if {$rc || $msg ne $str} { + error "blob $ii: $msg" + } + } + } + set aborted +} + +do_test incrblob2-4.2 { + for {set ii 1} {$ii < 100} {incr ii} { + set handles($ii) [db incrblob t1 data $ii] + } + aborted_handles +} {} + +# Update row 3. This should abort handle 3 but leave all others untouched. +# +do_test incrblob2-4.3 { + db eval {UPDATE t1 SET data = data || '' WHERE id = 3} + aborted_handles +} {3} + +# Test that a write to handle 3 also returns SQLITE_ABORT. +# +do_test incrblob2-4.3.1 { + set rc [catch {sqlite3_blob_write $::handles(3) 10 HELLO} msg] + list $rc $msg +} {1 SQLITE_ABORT} + +# Delete row 14. This should abort handle 6 but leave all others untouched. +# +do_test incrblob2-4.4 { + db eval {DELETE FROM t1 WHERE id = 14} + aborted_handles +} {3 14} + +# Change the rowid of row 15 to 102. Should abort handle 15. +# +do_test incrblob2-4.5 { + db eval {UPDATE t1 SET id = 102 WHERE id = 15} + aborted_handles +} {3 14 15} + +# Clobber row 92 using INSERT OR REPLACE. +# +do_test incrblob2-4.6 { + db eval {INSERT OR REPLACE INTO t1 VALUES(92, zeroblob(1000))} + aborted_handles +} {3 14 15 92} + +# Clobber row 65 using UPDATE OR REPLACE on row 35. This should abort +# handles 35 and 65. +# +do_test incrblob2-4.7 { + db eval {UPDATE OR REPLACE t1 SET id = 65 WHERE id = 35} + aborted_handles +} {3 14 15 35 65 92} + +# Insert a couple of new rows. This should not invalidate any handles. +# +do_test incrblob2-4.9 { + db eval {INSERT INTO t1 SELECT NULL, data FROM t1} + aborted_handles +} {3 14 15 35 65 92} + +# Delete all rows from 1 to 25. This should abort all handles up to 25. +# +do_test incrblob2-4.9 { + db eval {DELETE FROM t1 WHERE id >=1 AND id <= 25} + aborted_handles +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 35 65 92} + +# Delete the whole table (this will use sqlite3BtreeClearTable()). All handles +# should now be aborted. +# +do_test incrblob2-4.10 { + db eval {DELETE FROM t1} + aborted_handles +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99} + +do_test incrblob2-4.1.X { + for {set ii 1} {$ii < 100} {incr ii} { + close $handles($ii) + } +} {} + +#-------------------------------------------------------------------------- +# The following tests - incrblob2-5.* - test that in shared cache an open +# blob handle counts as a read-lock on its table. +# +ifcapable shared_cache { + db close + set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + + do_test incrblob2-5.1 { + sqlite3 db test.db + sqlite3 db2 test.db + + execsql { + INSERT INTO t1 VALUES(1, 'abcde'); + } + } {} + + do_test incrblob2-5.2 { + catchsql { INSERT INTO t1 VALUES(2, 'fghij') } db2 + } {0 {}} + + do_test incrblob2-5.3 { + set blob [db incrblob t1 data 1] + catchsql { INSERT INTO t1 VALUES(3, 'klmno') } db2 + } {1 {database table is locked}} + + do_test incrblob2-5.4 { + close $blob + execsql BEGIN db2 + catchsql { INSERT INTO t1 VALUES(4, 'pqrst') } db2 + } {0 {}} + + do_test incrblob2-5.5 { + set blob [db incrblob -readonly t1 data 1] + catchsql { INSERT INTO t1 VALUES(5, 'uvwxy') } db2 + } {1 {database table is locked}} + + do_test incrblob2-5.6 { + close $blob + catchsql { INSERT INTO t1 VALUES(3, 'klmno') } db2 + } {0 {}} + + db2 close + db close + sqlite3_enable_shared_cache $::enable_shared_cache +} + +#-------------------------------------------------------------------------- +# The following tests - incrblob2-6.* - test a specific scenario that might +# be causing an error. +# +sqlite3 db test.db +do_test incrblob2-6.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1, zeroblob(100)); + } + + set rdHandle [db incrblob -readonly t1 data 1] + set wrHandle [db incrblob t1 data 1] + + sqlite3_blob_read $rdHandle 0 100 + + sqlite3_blob_write $wrHandle 0 ABCDEF + + close $wrHandle + close $rdHandle +} {} + +do_test incrblob2-6.2 { + set rdHandle [db incrblob -readonly t1 data 1] + sqlite3_blob_read $rdHandle 0 2 +} {AB} + +do_test incrblob2-6.3 { + set wrHandle [db incrblob t1 data 1] + sqlite3_blob_write $wrHandle 0 ZZZZZZZZZZ + sqlite3_blob_read $rdHandle 2 4 +} {ZZZZ} + +do_test incrblob2-6.4 { + close $wrHandle + close $rdHandle +} {} + +sqlite3_memory_highwater 1 +do_test incrblob2-7.1 { + db eval { + CREATE TABLE t2(B BLOB); + INSERT INTO t2 VALUES(zeroblob(10 * 1024 * 1024)); + } + expr {[sqlite3_memory_highwater]<(5 * 1024 * 1024)} +} {1} + +do_test incrblob2-7.2 { + set h [db incrblob t2 B 1] + expr {[sqlite3_memory_highwater]<(5 * 1024 * 1024)} +} {1} + +do_test incrblob2-7.3 { + seek $h 0 end + tell $h +} [expr 10 * 1024 * 1024] + +do_test incrblob2-7.4 { + expr {[sqlite3_memory_highwater]<(5 * 1024 * 1024)} +} {1} + +do_test incrblob2-7.5 { + close $h +} {} + +#--------------------------------------------------------------------------- +# The following tests, incrblob2-8.*, test that nothing terrible happens +# when a statement transaction is rolled back while there are open +# incremental-blob handles. At one point an assert() was failing when +# this was attempted. +# +do_test incrblob2-8.1 { + execsql BEGIN + set h [db incrblob t2 B 1] + set rc [catch { + db eval {SELECT rowid FROM t2} { execsql "DROP TABLE t2" } + } msg] + list $rc $msg +} {1 {database table is locked}} +do_test incrblob2-8.2 { + close $h + execsql COMMIT +} {} +do_test incrblob2-8.3 { + execsql { + CREATE TABLE t3(a INTEGER UNIQUE, b TEXT); + INSERT INTO t3 VALUES(1, 'aaaaaaaaaaaaaaaaaaaa'); + INSERT INTO t3 VALUES(2, 'bbbbbbbbbbbbbbbbbbbb'); + INSERT INTO t3 VALUES(3, 'cccccccccccccccccccc'); + INSERT INTO t3 VALUES(4, 'dddddddddddddddddddd'); + INSERT INTO t3 VALUES(5, 'eeeeeeeeeeeeeeeeeeee'); + } +} {} +do_test incrblob2-8.4 { + execsql BEGIN + set h [db incrblob t3 b 3] + sqlite3_blob_read $h 0 20 +} {cccccccccccccccccccc} +do_test incrblob2-8.5 { + catchsql {UPDATE t3 SET a = 6 WHERE a > 3} +} {1 {column a is not unique}} +do_test incrblob2-8.6 { + catchsql {UPDATE t3 SET a = 6 WHERE a > 3} +} {1 {column a is not unique}} +do_test incrblob2-8.7 { + sqlite3_blob_read $h 0 20 +} {cccccccccccccccccccc} +do_test incrblob2-8.8 { + catchsql {UPDATE t3 SET a = 6 WHERE a = 3 OR a = 5} +} {1 {column a is not unique}} +do_test incrblob2-8.9 { + set rc [catch {sqlite3_blob_read $h 0 20} msg] + list $rc $msg +} {1 SQLITE_ABORT} +do_test incrblob2-8.X { + close $h +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrblob_err.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrblob_err.test --- sqlite3-3.4.2/test/incrblob_err.test 2007-05-17 17:38:30.000000000 +0100 +++ sqlite3-3.6.16/test/incrblob_err.test 2009-06-05 18:03:30.000000000 +0100 @@ -9,121 +9,20 @@ # #*********************************************************************** # -# $Id: incrblob_err.test,v 1.3 2007/05/17 16:38:30 danielk1977 Exp $ +# $Id: incrblob_err.test,v 1.14 2008/07/18 17:16:27 drh Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl -ifcapable {!incrblob} { +ifcapable {!incrblob || !memdebug || !tclvar} { finish_test return } -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping incrblob_err tests: not compiled with -DSQLITE_MEMDEBUG..." - finish_test - return -} - -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test incrblob_err-$tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} +source $testdir/malloc_common.tcl +unset -nocomplain ::fd ::data set ::fd [open [info script]] set ::data [read $::fd] close $::fd @@ -173,7 +72,7 @@ error "out of memory" } } -sqlite_malloc_fail 0 + do_ioerr_test incrblob_err-4 -cksum 1 -sqlprep { CREATE TABLE blobs(k, v BLOB); @@ -202,4 +101,37 @@ close $::blob } +do_ioerr_test incrblob_err-7 -cksum 1 -sqlprep { + PRAGMA auto_vacuum = 1; + CREATE TABLE blobs(k INTEGER PRIMARY KEY, v BLOB); + INSERT INTO blobs VALUES(1, zeroblob(500 * 1020)); +} -tclbody { + # Read some data from the end of the large blob inserted into table + # "blobs". This forces the IO error to occur while reading a pointer + # map page for the purposes of seeking to the end of the blob. + # + sqlite3 db2 test.db + set ::blob [db2 incrblob blobs v 1] + sqlite3_blob_read $::blob [expr 500*1020-20] 20 + close $::blob +} +catch {db2 close} + +do_ioerr_test incrblob_err-8 -cksum 1 -sqlprep { + PRAGMA auto_vacuum = 1; + CREATE TABLE blobs(k INTEGER PRIMARY KEY, v BLOB); + INSERT INTO blobs VALUES(1, zeroblob(500 * 1020)); +} -tclbody { + # Read some data from the end of the large blob inserted into table + # "blobs". This forces the IO error to occur while reading a pointer + # map page for the purposes of seeking to the end of the blob. + # + sqlite3 db2 test.db + set ::blob [db2 incrblob blobs v 1] + sqlite3_blob_write $::blob [expr 500*1020-20] 12345678900987654321 + close $::blob +} + +catch {db2 close} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrblob.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrblob.test --- sqlite3-3.4.2/test/incrblob.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/incrblob.test 2009-06-25 12:45:58.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: incrblob.test,v 1.13 2007/08/12 20:07:59 drh Exp $ +# $Id: incrblob.test,v 1.24 2009/06/19 22:23:42 drh Exp $ # set testdir [file dirname $argv0] @@ -32,6 +32,7 @@ set ::blob [db incrblob blobs v 1] string match incrblob_* $::blob } {1} +unset -nocomplain data do_test incrblob-1.2.2 { binary scan [read $::blob] c* data set data @@ -83,7 +84,6 @@ close $::blob } {} - #------------------------------------------------------------------------ # incrblob-2.*: # @@ -96,12 +96,16 @@ # proc nRead {db} { set bt [btree_from_db $db] + db_enter $db array set stats [btree_pager_stats $bt] + db_leave $db return $stats(read) } proc nWrite {db} { set bt [btree_from_db $db] + db_enter $db array set stats [btree_pager_stats $bt] + db_leave $db return $stats(write) } @@ -255,6 +259,9 @@ # 4.8 - Attempt to open an indexed column for writing # 4.9 - Attempt to open an indexed column for reading (this works) # +# 4.11 - Attempt to open a column of a view. +# 4.12 - Attempt to open a column of a virtual table. +# do_test incrblob-4.1 { set rc [catch { set ::blob [db incrblob blobs v 2] @@ -323,37 +330,68 @@ set c } {1 2 3 4 5 6 7 8 9} +do_test incrblob-4.10 { + set ::blob [db incrblob -readonly blobs k 3] + set rc [catch { sqlite3_blob_read $::blob 10 100 } msg] + list $rc $msg +} {1 SQLITE_ERROR} +do_test incrblob-4.10.2 { + close $::blob +} {} + +ifcapable view { + do_test incrblob-4.11 { + execsql { CREATE VIEW blobs_view AS SELECT k, v, i FROM blobs } + set rc [catch { db incrblob blobs_view v 3 } msg] + list $rc $msg + } {1 {cannot open view: blobs_view}} +} +ifcapable vtab { + register_echo_module [sqlite3_connection_pointer db] + do_test incrblob-4.12 { + execsql { CREATE VIRTUAL TABLE blobs_echo USING echo(blobs) } + set rc [catch { db incrblob blobs_echo v 3 } msg] + list $rc $msg + } {1 {cannot open virtual table: blobs_echo}} +} + + #------------------------------------------------------------------------ # incrblob-5.*: # # Test that opening a blob in an attached database works. # -do_test incrblob-5.1 { - file delete -force test2.db test2.db-journal - set ::size [expr [file size [info script]]] - execsql { - ATTACH 'test2.db' AS aux; - CREATE TABLE aux.files(name, text); - INSERT INTO aux.files VALUES('this one', zeroblob($::size)); - } - set fd [db incrblob aux files text 1] - set fd2 [open [info script]] - puts -nonewline $fd [read $fd2] - close $fd - close $fd2 - set ::text [db one {select text from aux.files}] - string length $::text -} [file size [info script]] -do_test incrblob-5.2 { - set fd2 [open [info script]] - set ::data [read $fd2] - close $fd2 - set ::data -} $::text +ifcapable attach { + do_test incrblob-5.1 { + file delete -force test2.db test2.db-journal + set ::size [expr [file size [info script]]] + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.files(name, text); + INSERT INTO aux.files VALUES('this one', zeroblob($::size)); + } + set fd [db incrblob aux files text 1] + fconfigure $fd -translation binary + set fd2 [open [info script]] + fconfigure $fd2 -translation binary + puts -nonewline $fd [read $fd2] + close $fd + close $fd2 + set ::text [db one {select text from aux.files}] + string length $::text + } [file size [info script]] + do_test incrblob-5.2 { + set fd2 [open [info script]] + fconfigure $fd2 -translation binary + set ::data [read $fd2] + close $fd2 + set ::data + } $::text +} # free memory -unset ::data -unset ::text +unset -nocomplain ::data +unset -nocomplain ::text #------------------------------------------------------------------------ # incrblob-6.*: @@ -412,8 +450,10 @@ flush $::blob } {} -# At this point rollback or commit should be illegal (because -# there is an open blob channel). +# At this point rollback should be illegal (because +# there is an open blob channel). But commit is also illegal because +# the open blob is read-write. +# do_test incrblob-6.10 { catchsql { ROLLBACK; @@ -432,11 +472,13 @@ } {} do_test incrblob-6.13 { close $::blob - execsql { - COMMIT; - } db2 } {} do_test incrblob-6.14 { + catchsql { + COMMIT; + } db2 +} {0 {}} +do_test incrblob-6.15 { execsql { SELECT * FROM blobs WHERE rowid = 4; } @@ -513,7 +555,8 @@ } set fd [open [info script]] -set ::data [read $fd] +fconfigure $fd -translation binary +set ::data [read $fd 14000] close $fd db close @@ -527,6 +570,7 @@ INSERT INTO t1 VALUES(123, $::data); } set ::b [db incrblob -readonly t1 b 123] + fconfigure $::b -translation binary read $::b } $::data do_test incrblob-7.2.2 { @@ -549,6 +593,7 @@ INSERT INTO t2 VALUES(456, $::otherdata); } set ::b [db incrblob -readonly t2 b 456] + fconfigure $::b -translation binary read $::b } $::otherdata do_test incrblob-7.3.2 { @@ -577,4 +622,36 @@ sqlite3_errmsg db } {attempt to write a readonly database} +# Test that if either the "offset" or "amount" arguments to +# sqlite3_blob_write() are less than zero, SQLITE_ERROR is returned. +# +do_test incrblob-8.1 { + execsql { INSERT INTO t1 VALUES(314159, 'sqlite') } + set ::b [db incrblob t1 b 314159] + fconfigure $::b -translation binary + set rc [catch {sqlite3_blob_write $::b 10 HELLO -1} msg] + lappend rc $msg +} {1 SQLITE_ERROR} +do_test incrblob-8.2 { + sqlite3_errcode db +} {SQLITE_ERROR} +do_test incrblob-8.3 { + set rc [catch {sqlite3_blob_write $::b -1 HELLO 5} msg] + lappend rc $msg +} {1 SQLITE_ERROR} +do_test incrblob-8.4 { + sqlite3_errcode db +} {SQLITE_ERROR} +do_test incrblob-8.5 { + execsql {SELECT b FROM t1 WHERE a = 314159} +} {sqlite} +do_test incrblob-8.6 { + set rc [catch {sqlite3_blob_write $::b 0 etilqs 6} msg] + lappend rc $msg +} {0 {}} +do_test incrblob-8.7 { + execsql {SELECT b FROM t1 WHERE a = 314159} +} {etilqs} + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrvacuum2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrvacuum2.test --- sqlite3-3.4.2/test/incrvacuum2.test 2007-05-17 07:44:28.000000000 +0100 +++ sqlite3-3.6.16/test/incrvacuum2.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the incremental vacuum feature. # -# $Id: incrvacuum2.test,v 1.3 2007/05/17 06:44:28 danielk1977 Exp $ +# $Id: incrvacuum2.test,v 1.5 2008/05/07 07:13:16 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -33,6 +33,7 @@ db function zeroblob zeroblob } + # Create a database in incremental vacuum mode that has many # pages on the freelist. # @@ -76,50 +77,70 @@ # Make sure incremental vacuum works on attached databases. # -do_test incrvacuum2-2.1 { - file delete -force test2.db test2.db-journal - execsql { - ATTACH DATABASE 'test2.db' AS aux; - PRAGMA aux.auto_vacuum=incremental; - CREATE TABLE aux.t2(x); - INSERT INTO t2 VALUES(zeroblob(30000)); - INSERT INTO t1 SELECT * FROM t2; - DELETE FROM t2; - DELETE FROM t1; - } - list [file size test.db] [file size test2.db] -} {32768 32768} -do_test incrvacuum2-2.2 { - execsql { - PRAGMA aux.incremental_vacuum(1) - } - list [file size test.db] [file size test2.db] -} {32768 31744} -do_test incrvacuum2-2.3 { - execsql { - PRAGMA aux.incremental_vacuum(5) - } - list [file size test.db] [file size test2.db] -} {32768 26624} -do_test incrvacuum2-2.4 { - execsql { - PRAGMA main.incremental_vacuum(5) - } - list [file size test.db] [file size test2.db] -} {27648 26624} -do_test incrvacuum2-2.5 { +ifcapable attach { + do_test incrvacuum2-2.1 { + file delete -force test2.db test2.db-journal + execsql { + ATTACH DATABASE 'test2.db' AS aux; + PRAGMA aux.auto_vacuum=incremental; + CREATE TABLE aux.t2(x); + INSERT INTO t2 VALUES(zeroblob(30000)); + INSERT INTO t1 SELECT * FROM t2; + DELETE FROM t2; + DELETE FROM t1; + } + list [file size test.db] [file size test2.db] + } {32768 32768} + do_test incrvacuum2-2.2 { + execsql { + PRAGMA aux.incremental_vacuum(1) + } + list [file size test.db] [file size test2.db] + } {32768 31744} + do_test incrvacuum2-2.3 { + execsql { + PRAGMA aux.incremental_vacuum(5) + } + list [file size test.db] [file size test2.db] + } {32768 26624} + do_test incrvacuum2-2.4 { + execsql { + PRAGMA main.incremental_vacuum(5) + } + list [file size test.db] [file size test2.db] + } {27648 26624} + do_test incrvacuum2-2.5 { + execsql { + PRAGMA aux.incremental_vacuum + } + list [file size test.db] [file size test2.db] + } {27648 3072} + do_test incrvacuum2-2.6 { + execsql { + PRAGMA incremental_vacuum(1) + } + list [file size test.db] [file size test2.db] + } {26624 3072} +} + +do_test incrvacuum2-3.1 { execsql { - PRAGMA aux.incremental_vacuum + PRAGMA auto_vacuum = 'full'; + BEGIN; + CREATE TABLE abc(a); + INSERT INTO abc VALUES(randstr(1500,1500)); + COMMIT; } - list [file size test.db] [file size test2.db] -} {27648 3072} -do_test incrvacuum2-2.6 { +} {} +do_test incrvacuum2-3.2 { execsql { - PRAGMA incremental_vacuum(1) + BEGIN; + DELETE FROM abc; + PRAGMA incremental_vacuum; + COMMIT; } - list [file size test.db] [file size test2.db] -} {26624 3072} +} {} - +integrity_check incremental2-3.3 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrvacuum_ioerr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrvacuum_ioerr.test --- sqlite3-3.4.2/test/incrvacuum_ioerr.test 2007-05-04 19:30:41.000000000 +0100 +++ sqlite3-3.6.16/test/incrvacuum_ioerr.test 2009-06-05 18:03:30.000000000 +0100 @@ -15,7 +15,7 @@ # The tests in this file use special facilities that are only # available in the SQLite test fixture. # -# $Id: incrvacuum_ioerr.test,v 1.2 2007/05/04 18:30:41 drh Exp $ +# $Id: incrvacuum_ioerr.test,v 1.6 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -37,7 +37,7 @@ DELETE FROM abc; PRAGMA incremental_vacuum; COMMIT; -} +} # do_ioerr_test incrvacuum-ioerr-3 -start 1 -cksum 1 -tclprep { # db eval { @@ -60,7 +60,6 @@ # COMMIT; # } - do_ioerr_test incrvacuum-ioerr-2 -start 1 -cksum 1 -tclprep { db eval { PRAGMA auto_vacuum = 'full'; @@ -86,4 +85,97 @@ PRAGMA incremental_vacuum; COMMIT; } + +do_ioerr_test incrvacuum-ioerr-3 -start 1 -cksum 1 -tclprep { + db eval { + PRAGMA auto_vacuum = 'incremental'; + BEGIN; + CREATE TABLE a(i integer, b blob); + INSERT INTO a VALUES(1, randstr(1500,1500)); + INSERT INTO a VALUES(2, randstr(1500,1500)); + } + db eval COMMIT + db eval {DELETE FROM a WHERE oid} +} -sqlbody { + PRAGMA incremental_vacuum(5); +} -cleanup { + sqlite3 db test.db + integrity_check incrvacuum-ioerr-2.$n.integritycheck + db close +} + + +ifcapable shared_cache { + + catch { db close } + file delete -force test.db + set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + + # Create two connections to a single shared-cache: + # + sqlite3 db1 test.db + sqlite3 db2 test.db + + # Create a database with around 20 free pages. + # + do_test incrvacuum-ioerr-4.0 { + execsql { + PRAGMA page_size = 1024; + PRAGMA locking_mode = exclusive; + PRAGMA auto_vacuum = 'incremental'; + BEGIN; + CREATE TABLE a(i integer, b blob); + } db1 + for {set ii 0} {$ii < 20} {incr ii} { + execsql { INSERT INTO a VALUES($ii, randstr(800,1500)); } db1 + } + execsql COMMIT db1 + execsql {DELETE FROM a WHERE oid} db1 + } {} + + set ::rc 1 + for {set iTest 1} {$::rc && $iTest<2000} {incr iTest} { + + # Figure out how big the database is and how many free pages it + # has before running incremental-vacuum. + # + set nPage [expr {[file size test.db]/1024}] + set nFree [execsql {pragma freelist_count} db1] + + # Now run incremental-vacuum to vacuum 5 pages from the db file. + # The iTest'th I/O call is set to fail. + # + set ::sqlite_io_error_pending $iTest + set ::sqlite_io_error_persist 1 + do_test incrvacuum-ioerr-4.$iTest.1 { + set ::rc [catch {execsql {pragma incremental_vacuum(5)} db1} msg] + expr {$::rc==0 || $msg eq "disk I/O error"} + } {1} + + set ::sqlite_io_error_pending 0 + set ::sqlite_io_error_persist 0 + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_hardhit 0 + + set nFree2 [execsql {pragma freelist_count} db1] + set nPage2 [expr {[file size test.db]/1024}] + + do_test incrvacuum-ioerr-4.$iTest.2 { + set shrink [expr {$nPage-$nPage2}] + expr {$shrink==0 || $shrink==5} + } {1} + + do_test incrvacuum-ioerr-4.$iTest.3 { + expr {$nPage - $nPage2} + } [expr {$nFree - $nFree2}] + } + + # Close the two database connections and restore the default + # shared-cache mode setting. + # + db1 close + db2 close + sqlite3_enable_shared_cache $::enable_shared_cache +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/incrvacuum.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/incrvacuum.test --- sqlite3-3.4.2/test/incrvacuum.test 2007-08-10 17:41:09.000000000 +0100 +++ sqlite3-3.6.16/test/incrvacuum.test 2009-06-12 03:37:53.000000000 +0100 @@ -14,7 +14,7 @@ # Note: There are also some tests for incremental vacuum and IO # errors in incrvacuum_ioerr.test. # -# $Id: incrvacuum.test,v 1.13 2007/08/10 16:41:09 drh Exp $ +# $Id: incrvacuum.test,v 1.23 2009/02/18 20:31:18 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -35,7 +35,10 @@ } } $sqlite_options(default_autovacuum) do_test incrvacuum-1.2.0 { - expr {[file size test.db] > 0} + # File size is sometimes 1 instead of 0 due to the hack we put in + # to work around ticket #3260. Search for comments on #3260 in + # os_unix.c. + expr {[file size test.db] > 1} } {0} do_test incrvacuum-1.2 { # This command will create the database. @@ -298,6 +301,11 @@ } ] +# If this build omits subqueries, step 2 in the above list will not +# work. Replace it with "" in this case. +# +ifcapable !subquery { lset TestScriptList 2 "" } + # Compare the contents of databases $A and $B. # proc compare_dbs {A B tname} { @@ -545,7 +553,7 @@ do_test incrvacuum-10.5 { execsql { - PRAGMA incremental_vacuum("3"); + PRAGMA incremental_vacuum("+3"); } expr [file size test.db] / 1024 } {19} @@ -558,8 +566,23 @@ } {18} do_test incrvacuum-10.7 { + # Use a really big number as an argument to incremetal_vacuum. Should + # be interpreted as "free all possible space". execsql { - PRAGMA incremental_vacuum(0); + PRAGMA incremental_vacuum(2147483649); + } + expr [file size test.db] / 1024 +} {1} + +do_test incrvacuum-10.8 { + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(hex(randomblob(1000))); + DROP TABLE t1; + } + # A negative number means free all possible space. + execsql { + PRAGMA incremental_vacuum=-1; } expr [file size test.db] / 1024 } {1} @@ -578,7 +601,7 @@ execsql { PRAGMA auto_vacuum; } - } {1} + } $AUTOVACUUM } else { do_test incrvacuum-11.1-av-dflt-off { execsql { @@ -650,9 +673,14 @@ do_test incrvacuum-12.3 { execsql { ROLLBACK; } db2 execsql { PRAGMA auto_vacuum } -} {1} +} {2} ;# Still 2 because PRAGMA auto_vacuum setting held in case of vacuum +do_test incrvacuum-12.4 { + db close + sqlite3 db test.db + execsql { PRAGMA auto_vacuum } +} {1} ;# Revert to 1 because the database file did not change -do_test incrvacuum-12.3 { +do_test incrvacuum-12.5 { execsql { SELECT * FROM sqlite_master } execsql { PRAGMA auto_vacuum } } {1} @@ -669,7 +697,10 @@ sqlite3 db2 test.db do_test incrvacuum-13.1 { - expr {[file size test.db]>0} + # File size is sometimes 1 instead of 0 due to the hack we put in + # to work around ticket #3260. Search for comments on #3260 in + # os_unix.c. + expr {[file size test.db]>1} } {0} do_test incrvacuum-13.2 { set ::STMT [sqlite3_prepare $::DB {PRAGMA auto_vacuum = 2} -1 DUMMY] @@ -692,5 +723,62 @@ } } {0} -db2 close + +# Verify that the incremental_vacuum pragma fails gracefully if it +# is used against an invalid database file. +# +do_test incrvacuum-14.1 { + set out [open invalid.db w] + puts $out "This is not an SQLite database file" + close $out + sqlite3 db3 invalid.db + catchsql { + PRAGMA incremental_vacuum(10); + } db3 +} {1 {file is encrypted or is not a database}} + +do_test incrvacuum-15.1 { + db close + db2 close + file delete -force test.db + sqlite3 db test.db + + set str [string repeat "abcdefghij" 500] + + execsql { + PRAGMA cache_size = 10; + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(x, y); + INSERT INTO t1 VALUES('a', $str); + INSERT INTO t1 VALUES('b', $str); + INSERT INTO t1 VALUES('c', $str); + INSERT INTO t1 VALUES('d', $str); + INSERT INTO t1 VALUES('e', $str); + INSERT INTO t1 VALUES('f', $str); + INSERT INTO t1 VALUES('g', $str); + INSERT INTO t1 VALUES('h', $str); + INSERT INTO t1 VALUES('i', $str); + INSERT INTO t1 VALUES('j', $str); + INSERT INTO t1 VALUES('j', $str); + + CREATE TABLE t2(x PRIMARY KEY, y); + INSERT INTO t2 VALUES('a', $str); + INSERT INTO t2 VALUES('b', $str); + INSERT INTO t2 VALUES('c', $str); + INSERT INTO t2 VALUES('d', $str); + + BEGIN; + DELETE FROM t2; + PRAGMA incremental_vacuum; + } + + catchsql {INSERT INTO t2 SELECT * FROM t1} + + execsql { + COMMIT; + PRAGMA integrity_check; + } +} {ok} + +db3 close finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/index3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/index3.test --- sqlite3-3.4.2/test/index3.test 2007-03-27 15:43:05.000000000 +0100 +++ sqlite3-3.6.16/test/index3.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the CREATE INDEX statement. # -# $Id: index3.test,v 1.2 2005/08/20 03:03:04 drh Exp $ +# $Id: index3.test,v 1.3 2008/03/19 13:03:34 drh Exp $ set testdir [file dirname $argv0] @@ -53,6 +53,6 @@ catchsql { DROP INDEX i1; } -} {1 {malformed database schema - near "nonsense": syntax error}} +} {1 {malformed database schema (t1) - near "nonsense": syntax error}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/indexedby.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/indexedby.test --- sqlite3-3.4.2/test/indexedby.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/indexedby.test 2009-06-12 03:37:53.000000000 +0100 @@ -0,0 +1,254 @@ +# 2008 October 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: indexedby.test,v 1.5 2009/03/22 20:36:19 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a schema with some indexes. +# +do_test indexedby-1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + + CREATE TABLE t2(c, d); + CREATE INDEX i3 ON t2(c); + CREATE INDEX i4 ON t2(d); + + CREATE TABLE t3(e PRIMARY KEY, f); + + CREATE VIEW v1 AS SELECT * FROM t1; + } +} {} + +# Explain Query Plan +# +proc EQP {sql} { + uplevel "execsql {EXPLAIN QUERY PLAN $sql}" +} + +# These tests are to check that "EXPLAIN QUERY PLAN" is working as expected. +# +do_test indexedby-1.2 { + EQP { select * from t1 WHERE a = 10; } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-1.3 { + EQP { select * from t1 ; } +} {0 0 {TABLE t1}} +do_test indexedby-1.4 { + EQP { select * from t1, t2 WHERE c = 10; } +} {0 1 {TABLE t2 WITH INDEX i3} 1 0 {TABLE t1}} + +# Parser tests. Test that an INDEXED BY or NOT INDEX clause can be +# attached to a table in the FROM clause, but not to a sub-select or +# SQL view. Also test that specifying an index that does not exist or +# is attached to a different table is detected as an error. +# +do_test indexedby-2.1 { + execsql { SELECT * FROM t1 NOT INDEXED WHERE a = 'one' AND b = 'two'} +} {} +do_test indexedby-2.2 { + execsql { SELECT * FROM t1 INDEXED BY i1 WHERE a = 'one' AND b = 'two'} +} {} +do_test indexedby-2.3 { + execsql { SELECT * FROM t1 INDEXED BY i2 WHERE a = 'one' AND b = 'two'} +} {} + +do_test indexedby-2.4 { + catchsql { SELECT * FROM t1 INDEXED BY i3 WHERE a = 'one' AND b = 'two'} +} {1 {no such index: i3}} +do_test indexedby-2.5 { + catchsql { SELECT * FROM t1 INDEXED BY i5 WHERE a = 'one' AND b = 'two'} +} {1 {no such index: i5}} +do_test indexedby-2.6 { + catchsql { SELECT * FROM t1 INDEXED BY WHERE a = 'one' AND b = 'two'} +} {1 {near "WHERE": syntax error}} +do_test indexedby-2.7 { + catchsql { SELECT * FROM v1 INDEXED BY i1 WHERE a = 'one' } +} {1 {no such index: i1}} + +# Tests for single table cases. +# +do_test indexedby-3.1 { + EQP { SELECT * FROM t1 NOT INDEXED WHERE a = 'one' AND b = 'two'} +} {0 0 {TABLE t1}} +do_test indexedby-3.2 { + EQP { SELECT * FROM t1 INDEXED BY i1 WHERE a = 'one' AND b = 'two'} +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-3.3 { + EQP { SELECT * FROM t1 INDEXED BY i2 WHERE a = 'one' AND b = 'two'} +} {0 0 {TABLE t1 WITH INDEX i2}} +do_test indexedby-3.4 { + catchsql { SELECT * FROM t1 INDEXED BY i2 WHERE a = 'one' } +} {1 {cannot use index: i2}} +do_test indexedby-3.5 { + catchsql { SELECT * FROM t1 INDEXED BY i2 ORDER BY a } +} {1 {cannot use index: i2}} +do_test indexedby-3.6 { + catchsql { SELECT * FROM t1 INDEXED BY i1 WHERE a = 'one' } +} {0 {}} +do_test indexedby-3.7 { + catchsql { SELECT * FROM t1 INDEXED BY i1 ORDER BY a } +} {0 {}} + +do_test indexedby-3.8 { + EQP { SELECT * FROM t3 INDEXED BY sqlite_autoindex_t3_1 ORDER BY e } +} {0 0 {TABLE t3 WITH INDEX sqlite_autoindex_t3_1 ORDER BY}} +do_test indexedby-3.9 { + EQP { SELECT * FROM t3 INDEXED BY sqlite_autoindex_t3_1 WHERE e = 10 } +} {0 0 {TABLE t3 WITH INDEX sqlite_autoindex_t3_1}} +do_test indexedby-3.10 { + catchsql { SELECT * FROM t3 INDEXED BY sqlite_autoindex_t3_1 WHERE f = 10 } +} {1 {cannot use index: sqlite_autoindex_t3_1}} +do_test indexedby-3.11 { + catchsql { SELECT * FROM t3 INDEXED BY sqlite_autoindex_t3_2 WHERE f = 10 } +} {1 {no such index: sqlite_autoindex_t3_2}} + +# Tests for multiple table cases. +# +do_test indexedby-4.1 { + EQP { SELECT * FROM t1, t2 WHERE a = c } +} {0 0 {TABLE t1} 1 1 {TABLE t2 WITH INDEX i3}} +do_test indexedby-4.2 { + EQP { SELECT * FROM t1 INDEXED BY i1, t2 WHERE a = c } +} {0 1 {TABLE t2} 1 0 {TABLE t1 WITH INDEX i1}} + +# Test embedding an INDEXED BY in a CREATE VIEW statement. This block +# also tests that nothing bad happens if an index refered to by +# a CREATE VIEW statement is dropped and recreated. +# +do_test indexedby-5.1 { + execsql { + CREATE VIEW v2 AS SELECT * FROM t1 INDEXED BY i1 WHERE a > 5; + } + EQP { SELECT * FROM v2 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-5.2 { + EQP { SELECT * FROM v2 WHERE b = 10 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-5.3 { + execsql { DROP INDEX i1 } + catchsql { SELECT * FROM v2 } +} {1 {no such index: i1}} +do_test indexedby-5.4 { + # Recreate index i1 in such a way as it cannot be used by the view query. + execsql { CREATE INDEX i1 ON t1(b) } + catchsql { SELECT * FROM v2 } +} {1 {cannot use index: i1}} +do_test indexedby-5.5 { + # Drop and recreate index i1 again. This time, create it so that it can + # be used by the query. + execsql { DROP INDEX i1 ; CREATE INDEX i1 ON t1(a) } + catchsql { SELECT * FROM v2 } +} {0 {}} + +# Test that "NOT INDEXED" may use the rowid index, but not others. +# +do_test indexedby-6.1 { + EQP { SELECT * FROM t1 WHERE b = 10 ORDER BY rowid } +} {0 0 {TABLE t1 WITH INDEX i2 ORDER BY}} +do_test indexedby-6.2 { + EQP { SELECT * FROM t1 NOT INDEXED WHERE b = 10 ORDER BY rowid } +} {0 0 {TABLE t1 USING PRIMARY KEY ORDER BY}} + +# Test that "INDEXED BY" can be used in a DELETE statement. +# +do_test indexedby-7.1 { + EQP { DELETE FROM t1 WHERE a = 5 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-7.2 { + EQP { DELETE FROM t1 NOT INDEXED WHERE a = 5 } +} {0 0 {TABLE t1}} +do_test indexedby-7.3 { + EQP { DELETE FROM t1 INDEXED BY i1 WHERE a = 5 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-7.4 { + EQP { DELETE FROM t1 INDEXED BY i1 WHERE a = 5 AND b = 10} +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-7.5 { + EQP { DELETE FROM t1 INDEXED BY i2 WHERE a = 5 AND b = 10} +} {0 0 {TABLE t1 WITH INDEX i2}} +do_test indexedby-7.6 { + catchsql { DELETE FROM t1 INDEXED BY i2 WHERE a = 5} +} {1 {cannot use index: i2}} + +# Test that "INDEXED BY" can be used in an UPDATE statement. +# +do_test indexedby-8.1 { + EQP { UPDATE t1 SET rowid=rowid+1 WHERE a = 5 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-8.2 { + EQP { UPDATE t1 NOT INDEXED SET rowid=rowid+1 WHERE a = 5 } +} {0 0 {TABLE t1}} +do_test indexedby-8.3 { + EQP { UPDATE t1 INDEXED BY i1 SET rowid=rowid+1 WHERE a = 5 } +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-8.4 { + EQP { UPDATE t1 INDEXED BY i1 SET rowid=rowid+1 WHERE a = 5 AND b = 10} +} {0 0 {TABLE t1 WITH INDEX i1}} +do_test indexedby-8.5 { + EQP { UPDATE t1 INDEXED BY i2 SET rowid=rowid+1 WHERE a = 5 AND b = 10} +} {0 0 {TABLE t1 WITH INDEX i2}} +do_test indexedby-8.6 { + catchsql { UPDATE t1 INDEXED BY i2 SET rowid=rowid+1 WHERE a = 5} +} {1 {cannot use index: i2}} + +# Test that bug #3560 is fixed. +# +do_test indexedby-9.1 { + execsql { + CREATE TABLE maintable( id integer); + CREATE TABLE joinme(id_int integer, id_text text); + CREATE INDEX joinme_id_text_idx on joinme(id_text); + CREATE INDEX joinme_id_int_idx on joinme(id_int); + } +} {} +do_test indexedby-9.2 { + catchsql { + select * from maintable as m inner join + joinme as j indexed by joinme_id_text_idx + on ( m.id = j.id_int) + } +} {1 {cannot use index: joinme_id_text_idx}} +do_test indexedby-9.3 { + catchsql { select * from maintable, joinme INDEXED by joinme_id_text_idx } +} {1 {cannot use index: joinme_id_text_idx}} + +# Make sure we can still create tables, indices, and columns whose name +# is "indexed". +# +do_test indexedby-10.1 { + execsql { + CREATE TABLE indexed(x,y); + INSERT INTO indexed VALUES(1,2); + SELECT * FROM indexed; + } +} {1 2} +do_test indexedby-10.2 { + execsql { + CREATE INDEX i10 ON indexed(x); + SELECT * FROM indexed indexed by i10 where x>0; + } +} {1 2} +do_test indexedby-10.3 { + execsql { + DROP TABLE indexed; + CREATE TABLE t10(indexed INTEGER); + INSERT INTO t10 VALUES(1); + CREATE INDEX indexed ON t10(indexed); + SELECT * FROM t10 indexed by indexed WHERE indexed>0 + } +} {1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/index.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/index.test --- sqlite3-3.4.2/test/index.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/index.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the CREATE INDEX statement. # -# $Id: index.test,v 1.42 2006/03/29 00:24:07 drh Exp $ +# $Id: index.test,v 1.43 2008/01/16 18:20:42 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -114,7 +114,7 @@ do_test index-4.1 { execsql {CREATE TABLE test1(cnt int, power int)} for {set i 1} {$i<20} {incr i} { - execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + execsql "INSERT INTO test1 VALUES($i,[expr {1<<$i}])" } execsql {CREATE INDEX index9 ON test1(cnt)} execsql {CREATE INDEX indext ON test1(power)} @@ -221,7 +221,7 @@ do_test index-7.1 { execsql {CREATE TABLE test1(f1 int, f2 int primary key)} for {set i 1} {$i<20} {incr i} { - execsql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + execsql "INSERT INTO test1 VALUES($i,[expr {1<<$i}])" } execsql {SELECT count(*) FROM test1} } {19} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/insert2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/insert2.test --- sqlite3-3.4.2/test/insert2.test 2007-03-27 15:43:05.000000000 +0100 +++ sqlite3-3.6.16/test/insert2.test 2009-06-05 18:03:30.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the INSERT statement that takes is # result from a SELECT. # -# $Id: insert2.test,v 1.18 2005/10/05 11:35:09 drh Exp $ +# $Id: insert2.test,v 1.19 2008/01/16 18:20:42 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -22,7 +22,7 @@ do_test insert2-1.0 { execsql {CREATE TABLE d1(n int, log int);} for {set i 1} {$i<=20} {incr i} { - for {set j 0} {pow(2,$j)<$i} {incr j} {} + for {set j 0} {(1<<$j)<$i} {incr j} {} execsql "INSERT INTO d1 VALUES($i,$j)" } execsql {SELECT * FROM d1 ORDER BY n} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/insert3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/insert3.test --- sqlite3-3.4.2/test/insert3.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/insert3.test 2009-06-25 12:24:39.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing corner cases of the INSERT statement. # -# $Id: insert3.test,v 1.6 2007/04/16 17:07:55 drh Exp $ +# $Id: insert3.test,v 1.9 2009/04/23 14:58:40 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -156,13 +156,50 @@ SELECT * FROM t5; } } {1 xyz 2 xyz} -do_test insert3-3.7 { + +ifcapable bloblit { + do_test insert3-3.7 { + execsql { + CREATE TABLE t6(x,y DEFAULT 4.3, z DEFAULT x'6869'); + INSERT INTO t6 DEFAULT VALUES; + SELECT * FROM t6; + } + } {{} 4.3 hi} +} + +foreach tab [db eval {SELECT name FROM sqlite_master WHERE type = 'table'}] { + db eval "DROP TABLE $tab" +} +db close +sqlite3 db test.db + +#------------------------------------------------------------------------- +# While developing tests for a different feature (savepoint) the following +# sequence was found to cause an assert() in btree.c to fail. These +# tests are included to ensure that that bug is fixed. +# +do_test insert3-4.1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(a, b); + BEGIN; + INSERT INTO t1 VALUES(randstr(10,400),randstr(10,400),randstr(10,400)); + } + set r "randstr(10,400)" + for {set ii 0} {$ii < 10} {incr ii} { + execsql "INSERT INTO t1 SELECT $r, $r, $r FROM t1" + } + execsql { COMMIT } +} {} +do_test insert3-4.2 { execsql { - CREATE TABLE t6(x,y DEFAULT 4.3, z DEFAULT x'6869'); - INSERT INTO t6 DEFAULT VALUES; - SELECT * FROM t6; + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET a = randstr(10,10) WHERE (rowid%4)==0; + DELETE FROM t1 WHERE rowid%2; + INSERT INTO t1 SELECT randstr(10,400), randstr(10,400), c FROM t1; + COMMIT; } -} {{} 4.3 hi} -db close +} {} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/insert4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/insert4.test --- sqlite3-3.4.2/test/insert4.test 2007-04-12 22:25:02.000000000 +0100 +++ sqlite3-3.6.16/test/insert4.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,11 +11,16 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the INSERT transfer optimization. # -# $Id: insert4.test,v 1.5 2007/04/12 21:25:02 drh Exp $ +# $Id: insert4.test,v 1.10 2008/01/21 16:22:46 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !view||!subquery { + finish_test + return +} + # The sqlite3_xferopt_count variable is incremented whenever the # insert transfer optimization applies. # @@ -29,6 +34,7 @@ # Create tables used for testing. # execsql { + PRAGMA legacy_file_format = 0; CREATE TABLE t1(a int, b int, check(b>a)); CREATE TABLE t2(x int, y int); CREATE VIEW v2 AS SELECT y, x FROM t2; @@ -106,7 +112,7 @@ INSERT INTO t3 SELECT DISTINCT * FROM t2; SELECT * FROM t3; } -} {9 1 1 9} +} {1 9 9 1} xferopt_test insert4-2.4.2 0 do_test insert4-2.4.3 { catchsql { @@ -226,17 +232,75 @@ {a int, b int} \ {x integer, b int} - # Ticket #2291. # -do_test insert4-4.1 { + +do_test insert4-4.1a { + execsql {CREATE TABLE t4(a, b, UNIQUE(a,b))} +} {} +ifcapable vacuum { + do_test insert4-4.1b { + execsql { + INSERT INTO t4 VALUES(NULL,0); + INSERT INTO t4 VALUES(NULL,1); + INSERT INTO t4 VALUES(NULL,1); + VACUUM; + } + } {} +} + +# Check some error conditions: +# +do_test insert4-5.1 { + # Table does not exist. + catchsql { INSERT INTO t2 SELECT * FROM nosuchtable } +} {1 {no such table: nosuchtable}} +do_test insert4-5.2 { + # Number of columns does not match. + catchsql { + CREATE TABLE t5(a, b, c); + INSERT INTO t4 SELECT * FROM t5; + } +} {1 {table t4 has 2 columns but 3 values were supplied}} + +do_test insert4-6.1 { + set ::sqlite3_xferopt_count 0 + execsql { + CREATE INDEX t2_i2 ON t2(x, y COLLATE nocase); + CREATE INDEX t2_i1 ON t2(x ASC, y DESC); + CREATE INDEX t3_i1 ON t3(a, b); + INSERT INTO t2 SELECT * FROM t3; + } + set ::sqlite3_xferopt_count +} {0} +do_test insert4-6.2 { + set ::sqlite3_xferopt_count 0 execsql { - CREATE TABLE t4(a, b, UNIQUE(a,b)); - INSERT INTO t4 VALUES(NULL,0); - INSERT INTO t4 VALUES(NULL,1); - INSERT INTO t4 VALUES(NULL,1); - VACUUM; + DROP INDEX t2_i2; + INSERT INTO t2 SELECT * FROM t3; } -} {} + set ::sqlite3_xferopt_count +} {0} +do_test insert4-6.3 { + set ::sqlite3_xferopt_count 0 + execsql { + DROP INDEX t2_i1; + CREATE INDEX t2_i1 ON t2(x ASC, y ASC); + INSERT INTO t2 SELECT * FROM t3; + } + set ::sqlite3_xferopt_count +} {1} +do_test insert4-6.4 { + set ::sqlite3_xferopt_count 0 + execsql { + DROP INDEX t2_i1; + CREATE INDEX t2_i1 ON t2(x ASC, y COLLATE RTRIM); + INSERT INTO t2 SELECT * FROM t3; + } + set ::sqlite3_xferopt_count +} {0} + + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/insert5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/insert5.test --- sqlite3-3.4.2/test/insert5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/insert5.test 2009-06-05 18:03:30.000000000 +0100 @@ -0,0 +1,117 @@ +# 2007 November 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The tests in this file ensure that a temporary table is used +# when required by an "INSERT INTO ... SELECT ..." statement. +# +# $Id: insert5.test,v 1.5 2008/08/04 03:51:24 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +# Return true if the compilation of the sql passed as an argument +# includes the opcode OpenEphemeral. An "INSERT INTO ... SELECT" +# statement includes such an opcode if a temp-table is used +# to store intermediate results. +# +proc uses_temp_table {sql} { + return [expr {[lsearch [execsql "EXPLAIN $sql"] OpenEphemeral]>=0}] +} + +# Construct the sample database. +# +do_test insert5-1.0 { + file delete -force test2.db test2.db-journal + execsql { + CREATE TABLE MAIN(Id INTEGER, Id1 INTEGER); + CREATE TABLE B(Id INTEGER, Id1 INTEGER); + CREATE VIEW v1 AS SELECT * FROM B; + CREATE VIEW v2 AS SELECT * FROM MAIN; + INSERT INTO MAIN(Id,Id1) VALUES(2,3); + INSERT INTO B(Id,Id1) VALUES(2,3); + } +} {} + +# Run the query. +# +ifcapable compound { + do_test insert5-1.1 { + execsql { + INSERT INTO B + SELECT * FROM B UNION ALL + SELECT * FROM MAIN WHERE exists (select * FROM B WHERE B.Id = MAIN.Id); + SELECT * FROM B; + } + } {2 3 2 3 2 3} +} else { + do_test insert5-1.1 { + execsql { + INSERT INTO B SELECT * FROM B; + INSERT INTO B + SELECT * FROM MAIN WHERE exists (select * FROM B WHERE B.Id = MAIN.Id); + SELECT * FROM B; + } + } {2 3 2 3 2 3} +} +do_test insert5-2.1 { + uses_temp_table { INSERT INTO b SELECT * FROM main } +} {0} +do_test insert5-2.2 { + uses_temp_table { INSERT INTO b SELECT * FROM b } +} {1} +do_test insert5-2.3 { + uses_temp_table { INSERT INTO b SELECT (SELECT id FROM b), id1 FROM main } +} {1} +do_test insert5-2.4 { + uses_temp_table { INSERT INTO b SELECT id1, (SELECT id FROM b) FROM main } +} {1} +do_test insert5-2.5 { + uses_temp_table { + INSERT INTO b + SELECT * FROM main WHERE id = (SELECT id1 FROM b WHERE main.id = b.id) } +} {1} +do_test insert5-2.6 { + uses_temp_table { INSERT INTO b SELECT * FROM v1 } +} {1} +do_test insert5-2.7 { + uses_temp_table { INSERT INTO b SELECT * FROM v2 } +} {0} +do_test insert5-2.8 { + uses_temp_table { + INSERT INTO b + SELECT * FROM main WHERE id > 10 AND max(id1, (SELECT id FROM b)) > 10; + } +} {1} + +# UPDATE: Using a column from the outer query (main.id) in the GROUP BY +# or ORDER BY of a sub-query is no longer supported. +# +# do_test insert5-2.9 { +# uses_temp_table { +# INSERT INTO b +# SELECT * FROM main +# WHERE id > 10 AND (SELECT count(*) FROM v2 GROUP BY main.id) +# } +# } {} +do_test insert5-2.9 { + catchsql { + INSERT INTO b + SELECT * FROM main + WHERE id > 10 AND (SELECT count(*) FROM v2 GROUP BY main.id) + } +} {1 {no such column: main.id}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/interrupt.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/interrupt.test --- sqlite3-3.4.2/test/interrupt.test 2007-06-13 18:58:52.000000000 +0100 +++ sqlite3-3.6.16/test/interrupt.test 2009-06-05 19:43:17.000000000 +0100 @@ -11,28 +11,13 @@ # This file implements regression tests for SQLite library. The # focus of this script is the sqlite_interrupt() API. # -# $Id: interrupt.test,v 1.15 2007/06/13 16:49:49 danielk1977 Exp $ +# $Id: interrupt.test,v 1.16 2008/01/16 17:46:38 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl set DB [sqlite3_connection_pointer db] -# Compute a checksum on the entire database. -# -proc cksum {{db db}} { - set txt [$db eval {SELECT name, type, sql FROM sqlite_master}]\n - foreach tbl [$db eval {SELECT name FROM sqlite_master WHERE type='table'}] { - append txt [$db eval "SELECT * FROM $tbl"]\n - } - foreach prag {default_synchronous default_cache_size} { - append txt $prag-[$db eval "PRAGMA $prag"]\n - } - set cksum [string length $txt]-[md5 $txt] - # puts $cksum-[file size test.db] - return $cksum -} - # This routine attempts to execute the sql in $sql. It triggers an # interrupt at progressively later and later points during the processing # and checks to make sure SQLITE_INTERRUPT is returned. Eventually, diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/in.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/in.test --- sqlite3-3.4.2/test/in.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/in.test 2009-06-05 18:03:30.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the IN and BETWEEN operator. # -# $Id: in.test,v 1.17 2006/05/23 23:25:10 drh Exp $ +# $Id: in.test,v 1.22 2008/08/04 03:51:24 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -24,7 +24,7 @@ CREATE TABLE t1(a int, b int); } for {set i 1} {$i<=10} {incr i} { - execsql "INSERT INTO t1 VALUES($i,[expr {int(pow(2,$i))}])" + execsql "INSERT INTO t1 VALUES($i,[expr {1<<$i}])" } execsql { COMMIT; @@ -364,4 +364,217 @@ } } {} +# Test error conditions with expressions of the form IN(). +# +ifcapable compound { +do_test in-12.1 { + execsql { + CREATE TABLE t2(a, b, c); + CREATE TABLE t3(a, b, c); + } +} {} +do_test in-12.2 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a, b FROM t3 UNION ALL SELECT a, b FROM t2 + ); + } +} {1 {only a single result allowed for a SELECT that is part of an expression}} +do_test in-12.3 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a, b FROM t3 UNION SELECT a, b FROM t2 + ); + } +} {1 {only a single result allowed for a SELECT that is part of an expression}} +do_test in-12.4 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a, b FROM t3 EXCEPT SELECT a, b FROM t2 + ); + } +} {1 {only a single result allowed for a SELECT that is part of an expression}} +do_test in-12.5 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a, b FROM t3 INTERSECT SELECT a, b FROM t2 + ); + } +} {1 {only a single result allowed for a SELECT that is part of an expression}} +do_test in-12.6 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a FROM t3 UNION ALL SELECT a, b FROM t2 + ); + } +} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test in-12.7 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a FROM t3 UNION SELECT a, b FROM t2 + ); + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} +do_test in-12.8 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a FROM t3 EXCEPT SELECT a, b FROM t2 + ); + } +} {1 {SELECTs to the left and right of EXCEPT do not have the same number of result columns}} +do_test in-12.9 { + catchsql { + SELECT * FROM t2 WHERE a IN ( + SELECT a FROM t3 INTERSECT SELECT a, b FROM t2 + ); + } +} {1 {SELECTs to the left and right of INTERSECT do not have the same number of result columns}} +} + + +#------------------------------------------------------------------------ +# The following tests check that NULL is handled correctly when it +# appears as part of a set of values on the right-hand side of an +# IN or NOT IN operator. +# +# When it appears in such a set, NULL is handled as an "unknown value". +# If, because of the unknown value in the set, the result of the expression +# cannot be determined, then it itself evaluates to NULL. +# + +# Warm body test to demonstrate the principles being tested: +# +do_test in-13.1 { + db nullvalue "null" + execsql { SELECT + 1 IN (NULL, 1, 2), -- The value 1 is a member of the set, return true. + 3 IN (NULL, 1, 2), -- Ambiguous, return NULL. + 1 NOT IN (NULL, 1, 2), -- The value 1 is a member of the set, return false. + 3 NOT IN (NULL, 1, 2) -- Ambiguous, return NULL. + } +} {1 null 0 null} + +do_test in-13.2 { + execsql { + CREATE TABLE t7(a, b, c NOT NULL); + INSERT INTO t7 VALUES(1, 1, 1); + INSERT INTO t7 VALUES(2, 2, 2); + INSERT INTO t7 VALUES(3, 3, 3); + INSERT INTO t7 VALUES(NULL, 4, 4); + INSERT INTO t7 VALUES(NULL, 5, 5); + } +} {} + +do_test in-13.3 { + execsql { SELECT 2 IN (SELECT a FROM t7) } +} {1} +do_test in-13.4 { + execsql { SELECT 6 IN (SELECT a FROM t7) } +} {null} + +do_test in-13.5 { + execsql { SELECT 2 IN (SELECT b FROM t7) } +} {1} +do_test in-13.6 { + execsql { SELECT 6 IN (SELECT b FROM t7) } +} {0} + +do_test in-13.7 { + execsql { SELECT 2 IN (SELECT c FROM t7) } +} {1} +do_test in-13.8 { + execsql { SELECT 6 IN (SELECT c FROM t7) } +} {0} + +do_test in-13.9 { + execsql { + SELECT + 2 NOT IN (SELECT a FROM t7), + 6 NOT IN (SELECT a FROM t7), + 2 NOT IN (SELECT b FROM t7), + 6 NOT IN (SELECT b FROM t7), + 2 NOT IN (SELECT c FROM t7), + 6 NOT IN (SELECT c FROM t7) + } +} {0 null 0 1 0 1} + +do_test in-13.10 { + execsql { + SELECT b IN ( + SELECT inside.a + FROM t7 AS inside + WHERE inside.b BETWEEN outside.b+1 AND outside.b+2 + ) + FROM t7 AS outside ORDER BY b; + } +} {0 null null null 0} + +do_test in-13.11 { + execsql { + SELECT b NOT IN ( + SELECT inside.a + FROM t7 AS inside + WHERE inside.b BETWEEN outside.b+1 AND outside.b+2 + ) + FROM t7 AS outside ORDER BY b; + } +} {1 null null null 1} + +do_test in-13.12 { + execsql { + CREATE INDEX i1 ON t7(a); + CREATE INDEX i2 ON t7(b); + CREATE INDEX i3 ON t7(c); + } + execsql { + SELECT + 2 IN (SELECT a FROM t7), + 6 IN (SELECT a FROM t7), + 2 IN (SELECT b FROM t7), + 6 IN (SELECT b FROM t7), + 2 IN (SELECT c FROM t7), + 6 IN (SELECT c FROM t7) + } +} {1 null 1 0 1 0} + +do_test in-13.13 { + execsql { + SELECT + 2 NOT IN (SELECT a FROM t7), + 6 NOT IN (SELECT a FROM t7), + 2 NOT IN (SELECT b FROM t7), + 6 NOT IN (SELECT b FROM t7), + 2 NOT IN (SELECT c FROM t7), + 6 NOT IN (SELECT c FROM t7) + } +} {0 null 0 1 0 1} + +do_test in-13.14 { + execsql { + BEGIN TRANSACTION; + CREATE TABLE a(id INTEGER); + INSERT INTO a VALUES(1); + INSERT INTO a VALUES(2); + INSERT INTO a VALUES(3); + CREATE TABLE b(id INTEGER); + INSERT INTO b VALUES(NULL); + INSERT INTO b VALUES(3); + INSERT INTO b VALUES(4); + INSERT INTO b VALUES(5); + COMMIT; + SELECT * FROM a WHERE id NOT IN (SELECT id FROM b); + } +} {} +do_test in-13.14 { + execsql { + CREATE INDEX i5 ON b(id); + SELECT * FROM a WHERE id NOT IN (SELECT id FROM b); + } +} {} + + +do_test in-13.X { + db nullvalue "" +} {} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/intpkey.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/intpkey.test --- sqlite3-3.4.2/test/intpkey.test 2007-03-27 15:43:05.000000000 +0100 +++ sqlite3-3.6.16/test/intpkey.test 2009-06-05 18:03:30.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for the special processing associated # with INTEGER PRIMARY KEY columns. # -# $Id: intpkey.test,v 1.23 2005/07/21 03:48:20 drh Exp $ +# $Id: intpkey.test,v 1.24 2007/11/29 17:43:28 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -293,7 +293,7 @@ count { SELECT * FROM t1 WHERE c=='world' AND a>7; } -} {11 hello world 5} +} {11 hello world 4} do_test intpkey-3.9 { count { SELECT * FROM t1 WHERE 7 85; + COMMIT; + } +} -sqlbody { + BEGIN; + INSERT INTO t2 VALUES(randstr(22000,22000)); + DELETE FROM t1 WHERE oid = 83; + COMMIT; +} + +# This test verifies that IO errors that occur within the obscure branch +# of code executed by tkt3762.test are correctly reported. +# +ifcapable vacuum&&autovacuum&&pragma { + do_ioerr_test ioerr-16 -erc 1 -ckrefcount 1 -sqlprep { + PRAGMA auto_vacuum=INCREMENTAL; + PRAGMA page_size=1024; + BEGIN; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + DELETE FROM t1 WHERE rowid>202; + COMMIT; + VACUUM; + PRAGMA cache_size = 10; + BEGIN; + DELETE FROM t1 WHERE rowid IN (10,11,12) ; + } -sqlbody { + PRAGMA incremental_vacuum(10); + COMMIT; + } +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/io.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/io.test --- sqlite3-3.4.2/test/io.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/io.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,555 @@ +# 2007 August 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing some specific characteristics of the +# IO traffic generated by SQLite (making sure SQLite is not writing out +# more database pages than it has to, stuff like that). +# +# $Id: io.test,v 1.21 2009/03/28 07:03:42 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db close +sqlite3_simulate_device +sqlite3 db test.db -vfs devsym + +# Test summary: +# +# io-1.* - Test that quick-balance does not journal pages unnecessarily. +# +# io-2.* - Test the "atomic-write optimization". +# +# io-3.* - Test the IO traffic enhancements triggered when the +# IOCAP_SEQUENTIAL device capability flag is set (no +# fsync() calls on the journal file). +# +# io-4.* - Test the IO traffic enhancements triggered when the +# IOCAP_SAFE_APPEND device capability flag is set (fewer +# fsync() calls on the journal file, no need to set nRec +# field in the single journal header). +# +# io-5.* - Test that the default page size is selected and used +# correctly. +# + +set ::nWrite 0 +proc nWrite {db} { + set bt [btree_from_db $db] + db_enter $db + array set stats [btree_pager_stats $bt] + db_leave $db + set res [expr $stats(write) - $::nWrite] + set ::nWrite $stats(write) + set res +} + +set ::nSync 0 +proc nSync {} { + set res [expr {$::sqlite_sync_count - $::nSync}] + set ::nSync $::sqlite_sync_count + set res +} + +do_test io-1.1 { + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 1024; + CREATE TABLE abc(a,b); + } + nWrite db +} {2} + +# Insert into the table 4 records of aproximately 240 bytes each. +# This should completely fill the root-page of the table. Each +# INSERT causes 2 db pages to be written - the root-page of "abc" +# and page 1 (db change-counter page). +do_test io-1.2 { + set ret [list] + execsql { INSERT INTO abc VALUES(1,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(2,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(3,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(4,randstr(230,230)); } + lappend ret [nWrite db] +} {2 2 2 2} + +# Insert another 240 byte record. This causes two leaf pages +# to be added to the root page of abc. 4 pages in total +# are written to the db file - the two leaf pages, the root +# of abc and the change-counter page. +do_test io-1.3 { + execsql { INSERT INTO abc VALUES(5,randstr(230,230)); } + nWrite db +} {4} + +# Insert another 3 240 byte records. After this, the tree consists of +# the root-node, which is close to empty, and two leaf pages, both of +# which are full. +do_test io-1.4 { + set ret [list] + execsql { INSERT INTO abc VALUES(6,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(7,randstr(230,230)); } + lappend ret [nWrite db] + execsql { INSERT INTO abc VALUES(8,randstr(230,230)); } + lappend ret [nWrite db] +} {2 2 2} + +# This insert should use the quick-balance trick to add a third leaf +# to the b-tree used to store table abc. It should only be necessary to +# write to 3 pages to do this: the change-counter, the root-page and +# the new leaf page. +do_test io-1.5 { + execsql { INSERT INTO abc VALUES(9,randstr(230,230)); } + nWrite db +} {3} + +ifcapable atomicwrite { + +#---------------------------------------------------------------------- +# Test cases io-2.* test the atomic-write optimization. +# +do_test io-2.1 { + execsql { DELETE FROM abc; VACUUM; } +} {} + +# Clear the write and sync counts. +nWrite db ; nSync + +# The following INSERT updates 2 pages and requires 4 calls to fsync(): +# +# 1) The directory in which the journal file is created, +# 2) The journal file (to sync the page data), +# 3) The journal file (to sync the journal file header), +# 4) The database file. +# +do_test io-2.2 { + execsql { INSERT INTO abc VALUES(1, 2) } + list [nWrite db] [nSync] +} {2 4} + +# Set the device-characteristic mask to include the SQLITE_IOCAP_ATOMIC, +# then do another INSERT similar to the one in io-2.2. This should +# only write 1 page and require a single fsync(). +# +# The single fsync() is the database file. Only one page is reported as +# written because page 1 - the change-counter page - is written using +# an out-of-band method that bypasses the write counter. +# +sqlite3_simulate_device -char atomic +do_test io-2.3 { + execsql { INSERT INTO abc VALUES(3, 4) } + list [nWrite db] [nSync] +} {1 1} + +# Test that the journal file is not created and the change-counter is +# updated when the atomic-write optimization is used. +# +do_test io-2.4.1 { + execsql { + BEGIN; + INSERT INTO abc VALUES(5, 6); + } + sqlite3 db2 test.db -vfs devsym + execsql { SELECT * FROM abc } db2 +} {1 2 3 4} +do_test io-2.4.2 { + file exists test.db-journal +} {0} +do_test io-2.4.3 { + execsql { COMMIT } + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} +db2 close + +# Test that the journal file is created and sync()d if the transaction +# modifies more than one database page, even if the IOCAP_ATOMIC flag +# is set. +# +do_test io-2.5.1 { + execsql { CREATE TABLE def(d, e) } + nWrite db ; nSync + execsql { + BEGIN; + INSERT INTO abc VALUES(7, 8); + } + file exists test.db-journal +} {0} +do_test io-2.5.2 { + execsql { INSERT INTO def VALUES('a', 'b'); } + file exists test.db-journal +} {1} +do_test io-2.5.3 { + execsql { COMMIT } + list [nWrite db] [nSync] +} {3 4} + +# Test that the journal file is created and sync()d if the transaction +# modifies a single database page and also appends a page to the file. +# Internally, this case is handled differently to the one above. The +# journal file is not actually created until the 'COMMIT' statement +# is executed. +# +do_test io-2.6.1 { + execsql { + BEGIN; + INSERT INTO abc VALUES(9, randstr(1000,1000)); + } + file exists test.db-journal +} {0} +do_test io-2.6.2 { + # Create a file at "test.db-journal". This will prevent SQLite from + # opening the journal for exclusive access. As a result, the COMMIT + # should fail with SQLITE_CANTOPEN and the transaction rolled back. + # + file mkdir test.db-journal + catchsql { COMMIT } +} {1 {unable to open database file}} +do_test io-2.6.3 { + file delete -force test.db-journal + catchsql { COMMIT } +} {1 {cannot commit - no transaction is active}} +do_test io-2.6.4 { + execsql { SELECT * FROM abc } +} {1 2 3 4 5 6 7 8} + +# Test that if the database modification is part of multi-file commit, +# the journal file is always created. In this case, the journal file +# is created during execution of the COMMIT statement, so we have to +# use the same technique to check that it is created as in the above +# block. +file delete -force test2.db test2.db-journal +ifcapable attach { + do_test io-2.7.1 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.page_size = 1024; + CREATE TABLE aux.abc2(a, b); + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal + } {0} + do_test io-2.7.2 { + execsql { INSERT INTO abc2 SELECT * FROM abc } + file exists test2.db-journal + } {0} + do_test io-2.7.3 { + execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } + } {1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10} + do_test io-2.7.4 { + file mkdir test2.db-journal + catchsql { COMMIT } + } {1 {unable to open database file}} + do_test io-2.7.5 { + file delete -force test2.db-journal + catchsql { COMMIT } + } {1 {cannot commit - no transaction is active}} + do_test io-2.7.6 { + execsql { SELECT * FROM abc UNION ALL SELECT * FROM abc2 } + } {1 2 3 4 5 6 7 8} +} + +# Try an explicit ROLLBACK before the journal file is created. +# +do_test io-2.8.1 { + execsql { + BEGIN; + DELETE FROM abc; + } + file exists test.db-journal +} {0} +do_test io-2.8.2 { + execsql { SELECT * FROM abc } +} {} +do_test io-2.8.3 { + execsql { + ROLLBACK; + SELECT * FROM abc; + } +} {1 2 3 4 5 6 7 8} + +# Test that the atomic write optimisation is not enabled if the sector +# size is larger than the page-size. +# +do_test io-2.9.1 { + db close + sqlite3 db test.db + sqlite3_simulate_device -char atomic -sectorsize 2048 + execsql { + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal +} {1} +do_test io-2.9.2 { + execsql { ROLLBACK; } + db close + file delete -force test.db test.db-journal + sqlite3 db test.db -vfs devsym + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 2048; + CREATE TABLE abc(a, b); + } + execsql { + BEGIN; + INSERT INTO abc VALUES(9, 10); + } + file exists test.db-journal +} {0} +do_test io-2.9.3 { + execsql { COMMIT } +} {} + +# Test a couple of the more specific IOCAP_ATOMIC flags +# (i.e IOCAP_ATOMIC2K etc.). +# +do_test io-2.10.1 { + sqlite3_simulate_device -char atomic1k + execsql { + BEGIN; + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {1} +do_test io-2.10.2 { + execsql { ROLLBACK } + sqlite3_simulate_device -char atomic2k + execsql { + BEGIN; + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {0} +do_test io-2.10.3 { + execsql { ROLLBACK } +} {} + +do_test io-2.11.0 { + execsql { + PRAGMA locking_mode = exclusive; + PRAGMA locking_mode; + } +} {exclusive exclusive} +do_test io-2.11.1 { + execsql { + INSERT INTO abc VALUES(11, 12); + } + file exists test.db-journal +} {0} + +do_test io-2.11.2 { + execsql { + PRAGMA locking_mode = normal; + INSERT INTO abc VALUES(13, 14); + } + file exists test.db-journal +} {0} + +} ;# /* ifcapable atomicwrite */ + +#---------------------------------------------------------------------- +# Test cases io-3.* test the IOCAP_SEQUENTIAL optimization. +# +sqlite3_simulate_device -char sequential -sectorsize 0 +ifcapable pager_pragmas { + do_test io-3.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db -vfs devsym + db eval { + PRAGMA auto_vacuum=OFF; + } + # File size might be 1 due to the hack to work around ticket #3260. + # Search for #3260 in os_unix.c for additional information. + expr {[file size test.db]>1} + } {0} + do_test io-3.2 { + execsql { CREATE TABLE abc(a, b) } + nSync + execsql { + PRAGMA temp_store = memory; + PRAGMA cache_size = 10; + BEGIN; + INSERT INTO abc VALUES('hello', 'world'); + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + # File has grown - showing there was a cache-spill - but there + # have been no calls to fsync(). The file is probably about 30KB. + # But some VFS implementations (symbian) buffer writes so the actual + # size may be a little less than that. So this test case just tests + # that the file is now greater than 20000 bytes in size. + list [expr [file size test.db]>20000] [nSync] + } {1 0} + do_test io-3.3 { + # The COMMIT requires a single fsync() - to the database file. + execsql { COMMIT } + list [file size test.db] [nSync] + } {39936 1} +} + +#---------------------------------------------------------------------- +# Test cases io-4.* test the IOCAP_SAFE_APPEND optimization. +# +sqlite3_simulate_device -char safe_append + +# With the SAFE_APPEND flag set, simple transactions require 3, rather +# than 4, calls to fsync(). The fsync() calls are on: +# +# 1) The directory in which the journal file is created, (unix only) +# 2) The journal file (to sync the page data), +# 3) The database file. +# +# Normally, when the SAFE_APPEND flag is not set, there is another fsync() +# on the journal file between steps (2) and (3) above. +# +if {$::tcl_platform(platform)=="unix"} { + set expected_sync_count 3 +} else { + set expected_sync_count 2 +} +do_test io-4.1 { + execsql { DELETE FROM abc } + nSync + execsql { INSERT INTO abc VALUES('a', 'b') } + nSync +} $expected_sync_count + +# With SAFE_APPEND set, the nRec field of the journal file header should +# be set to 0xFFFFFFFF before the first journal sync. The nRec field +# occupies bytes 8-11 of the journal file. +# +do_test io-4.2.1 { + execsql { BEGIN } + execsql { INSERT INTO abc VALUES('c', 'd') } + file exists test.db-journal +} {1} +if {$::tcl_platform(platform)=="unix"} { + do_test io-4.2.2 { + hexio_read test.db-journal 8 4 + } {FFFFFFFF} +} +do_test io-4.2.3 { + execsql { COMMIT } + nSync +} $expected_sync_count +sqlite3_simulate_device -char safe_append + +# With SAFE_APPEND set, there should only ever be one journal-header +# written to the database, even though the sync-mode is "full". +# +do_test io-4.3.1 { + execsql { + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + INSERT INTO abc SELECT * FROM abc; + } + expr {[file size test.db]/1024} +} {43} +ifcapable pager_pragmas { + do_test io-4.3.2 { + execsql { + PRAGMA synchronous = full; + PRAGMA cache_size = 10; + PRAGMA synchronous; + } + } {2} +} +do_test io-4.3.3 { + execsql { + BEGIN; + UPDATE abc SET a = 'x'; + } + file exists test.db-journal +} {1} +if {$tcl_platform(platform) != "symbian"} { + # This test is not run on symbian because the file-buffer makes it + # difficult to predict the exact size of the file as reported by + # [file size]. + do_test io-4.3.4 { + # The UPDATE statement in the statement above modifies 41 pages + # (all pages in the database except page 1 and the root page of + # abc). Because the cache_size is set to 10, this must have required + # at least 4 cache-spills. If there were no journal headers written + # to the journal file after the cache-spill, then the size of the + # journal file is give by: + # + # = + nPage * ( + 8) + # + # If the journal file contains additional headers, this formula + # will not predict the size of the journal file. + # + file size test.db-journal + } [expr 512 + (1024+8)*41] +} + +#---------------------------------------------------------------------- +# Test cases io-5.* test that the default page size is selected and +# used correctly. +# +set tn 0 +foreach {char sectorsize pgsize} { + {} 512 1024 + {} 1024 1024 + {} 2048 2048 + {} 8192 8192 + {} 16384 8192 + {atomic} 512 8192 + {atomic512} 512 1024 + {atomic2K} 512 2048 + {atomic2K} 4096 4096 + {atomic2K atomic} 512 8192 + {atomic64K} 512 1024 +} { + incr tn + if {$pgsize>$::SQLITE_MAX_PAGE_SIZE} continue + db close + file delete -force test.db test.db-journal + sqlite3_simulate_device -char $char -sectorsize $sectorsize + sqlite3 db test.db -vfs devsym + db eval { + PRAGMA auto_vacuum=OFF; + } + ifcapable !atomicwrite { + if {[regexp {^atomic} $char]} continue + } + do_test io-5.$tn { + execsql { + CREATE TABLE abc(a, b, c); + } + expr {[file size test.db]/2} + } $pgsize +} + +sqlite3_simulate_device -char {} -sectorsize 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/join.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/join.test --- sqlite3-3.4.2/test/join.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/join.test 2009-06-12 03:37:54.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests for joins, including outer joins. # -# $Id: join.test,v 1.22 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: join.test,v 1.26 2008/12/05 00:00:07 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -61,6 +61,40 @@ SELECT b FROM t1 NATURAL JOIN t2; } } {2 3} + +# ticket #3522 +do_test join-1.3.5 { + execsql2 { + SELECT t2.* FROM t2 NATURAL JOIN t1 + } +} {b 2 c 3 d 4 b 3 c 4 d 5} +do_test join-1.3.6 { + execsql2 { + SELECT xyzzy.* FROM t2 AS xyzzy NATURAL JOIN t1 + } +} {b 2 c 3 d 4 b 3 c 4 d 5} +do_test join-1.3.7 { + execsql2 { + SELECT t1.* FROM t2 NATURAL JOIN t1 + } +} {a 1 b 2 c 3 a 2 b 3 c 4} +do_test join-1.3.8 { + execsql2 { + SELECT xyzzy.* FROM t2 NATURAL JOIN t1 AS xyzzy + } +} {a 1 b 2 c 3 a 2 b 3 c 4} +do_test join-1.3.9 { + execsql2 { + SELECT aaa.*, bbb.* FROM t2 AS aaa NATURAL JOIN t1 AS bbb + } +} {b 2 c 3 d 4 a 1 b 2 c 3 b 3 c 4 d 5 a 2 b 3 c 4} +do_test join-1.3.10 { + execsql2 { + SELECT t1.*, t2.* FROM t2 NATURAL JOIN t1 + } +} {a 1 b 2 c 3 b 2 c 3 d 4 a 2 b 3 c 4 b 3 c 4 d 5} + + do_test join-1.4.1 { execsql2 { SELECT * FROM t1 INNER JOIN t2 USING(b,c); @@ -86,6 +120,19 @@ SELECT b FROM t1 JOIN t2 USING(b); } } {2 3} + +# Ticket #3522 +do_test join-1.4.6 { + execsql2 { + SELECT t1.* FROM t1 JOIN t2 USING(b); + } +} {a 1 b 2 c 3 a 2 b 3 c 4} +do_test join-1.4.7 { + execsql2 { + SELECT t2.* FROM t1 JOIN t2 USING(b); + } +} {b 2 c 3 d 4 b 3 c 4 d 5} + do_test join-1.5 { execsql2 { SELECT * FROM t1 INNER JOIN t2 USING(b); @@ -192,6 +239,24 @@ SELECT * FROM t1 NATURAL LEFT JOIN t2; } } {1 2 3 4 2 3 4 5 3 4 5 {}} + +# ticket #3522 +do_test join-2.1.1 { + execsql2 { + SELECT * FROM t1 NATURAL LEFT JOIN t2; + } +} {a 1 b 2 c 3 d 4 a 2 b 3 c 4 d 5 a 3 b 4 c 5 d {}} +do_test join-2.1.2 { + execsql2 { + SELECT t1.* FROM t1 NATURAL LEFT JOIN t2; + } +} {a 1 b 2 c 3 a 2 b 3 c 4 a 3 b 4 c 5} +do_test join-2.1.3 { + execsql2 { + SELECT t2.* FROM t1 NATURAL LEFT JOIN t2; + } +} {b 2 c 3 d 4 b 3 c 4 d 5 b {} c {} d {}} + do_test join-2.2 { execsql { SELECT * FROM t2 NATURAL LEFT OUTER JOIN t1; @@ -233,11 +298,16 @@ SELECT * FROM t1 JOIN t2 ON t1.a=t2.b USING(b); } } {1 {cannot have both ON and USING clauses in the same join}} -do_test join-3.4 { +do_test join-3.4.1 { catchsql { SELECT * FROM t1 JOIN t2 USING(a); } } {1 {cannot join using column a - column not present in both tables}} +do_test join-3.4.2 { + catchsql { + SELECT * FROM t1 JOIN t2 USING(d); + } +} {1 {cannot join using column d - column not present in both tables}} do_test join-3.5 { catchsql { SELECT * FROM t1 USING(a); @@ -253,11 +323,31 @@ SELECT * FROM t1 INNER OUTER JOIN t2; } } {1 {unknown or unsupported join type: INNER OUTER}} -do_test join-3.7 { +do_test join-3.8 { + catchsql { + SELECT * FROM t1 INNER OUTER CROSS JOIN t2; + } +} {1 {unknown or unsupported join type: INNER OUTER CROSS}} +do_test join-3.9 { + catchsql { + SELECT * FROM t1 OUTER NATURAL INNER JOIN t2; + } +} {1 {unknown or unsupported join type: OUTER NATURAL INNER}} +do_test join-3.10 { catchsql { SELECT * FROM t1 LEFT BOGUS JOIN t2; } } {1 {unknown or unsupported join type: LEFT BOGUS}} +do_test join-3.11 { + catchsql { + SELECT * FROM t1 INNER BOGUS CROSS JOIN t2; + } +} {1 {unknown or unsupported join type: INNER BOGUS CROSS}} +do_test join-3.12 { + catchsql { + SELECT * FROM t1 NATURAL AWK SED JOIN t2; + } +} {1 {unknown or unsupported join type: NATURAL AWK SED}} do_test join-4.1 { execsql { @@ -397,6 +487,16 @@ SELECT * FROM v10_11 LEFT JOIN t9 ON( a=x ); } } {1 111 1 11 3 333 {} {}} +ifcapable subquery { + # Constant expressions in a subquery that is the right element of a + # LEFT JOIN evaluate to NULL for rows where the LEFT JOIN does not + # match. Ticket #3300 + do_test join-8.4 { + execsql { + SELECT * FROM t9 LEFT JOIN (SELECT 44, p, q FROM t11) AS sub1 ON p=a + } + } {1 11 {} {} {} 2 22 44 2 111} +} } ;# ifcapable view # Ticket #350 describes a scenario where LEFT OUTER JOIN does not @@ -443,19 +543,39 @@ } ;# ifcapable view } ;# ifcapable compound -# Ticket #1697: Left Join WHERE clause terms that contain an -# aggregate subquery. -# ifcapable subquery { -do_test join-10.1 { - execsql { - CREATE TABLE t21(a,b,c); - CREATE TABLE t22(p,q); - CREATE INDEX i22 ON t22(q); - SELECT a FROM t21 LEFT JOIN t22 ON b=p WHERE q= - (SELECT max(m.q) FROM t22 m JOIN t21 n ON n.b=m.p WHERE n.c=1); - } -} {} + # Ticket #1697: Left Join WHERE clause terms that contain an + # aggregate subquery. + # + do_test join-10.1 { + execsql { + CREATE TABLE t21(a,b,c); + CREATE TABLE t22(p,q); + CREATE INDEX i22 ON t22(q); + SELECT a FROM t21 LEFT JOIN t22 ON b=p WHERE q= + (SELECT max(m.q) FROM t22 m JOIN t21 n ON n.b=m.p WHERE n.c=1); + } + } {} + + # Test a LEFT JOIN when the right-hand side of hte join is an empty + # sub-query. Seems fine. + # + do_test join-10.2 { + execsql { + CREATE TABLE t23(a, b, c); + CREATE TABLE t24(a, b, c); + INSERT INTO t23 VALUES(1, 2, 3); + } + execsql { + SELECT * FROM t23 LEFT JOIN t24; + } + } {1 2 3 {} {} {}} + do_test join-10.3 { + execsql { + SELECT * FROM t23 LEFT JOIN (SELECT * FROM t24); + } + } {1 2 3 {} {} {}} + } ;# ifcapable subquery finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/jrnlmode2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/jrnlmode2.test --- sqlite3-3.4.2/test/jrnlmode2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/jrnlmode2.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,99 @@ +# 2009 March 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: jrnlmode2.test,v 1.6 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +#------------------------------------------------------------------------- +# Test overview: +# +# jrnlmode2-1.*: Demonstrate bug #3745 +# jrnlmode2-2.*: Demonstrate bug #3751 +# + +do_test jrnlmode2-1.1 { + execsql { + PRAGMA journal_mode = persist; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } +} {persist} + +do_test jrnlmode2-1.2 { + file exists test.db-journal +} {1} + +do_test jrnlmode2-1.3 { + sqlite3 db2 test.db + execsql { SELECT * FROM t1 } db2 +} {1 2} + +do_test jrnlmode2-1.4 { + execsql { + INSERT INTO t1 VALUES(3, 4); + BEGIN; + SELECT * FROM t1; + } + execsql { PRAGMA lock_status } +} {main shared temp closed} + +do_test jrnlmode2-1.5 { + file exists test.db-journal +} {1} + +do_test jrnlmode2-1.6 { + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2 3 4}} + +do_test jrnlmode2-1.7 { + execsql { COMMIT } + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2 3 4}} + + + +do_test jrnlmode2-2.1 { + db2 close + execsql { PRAGMA journal_mode = truncate } + execsql { INSERT INTO t1 VALUES(5, 6) } +} {} + +do_test jrnlmode2-2.2 { + file exists test.db-journal +} {1} + +do_test jrnlmode2-2.3 { + file size test.db-journal +} {0} + +do_test jrnlmode2-2.4 { + sqlite3 db2 test.db -readonly 1 + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2 3 4 5 6}} + +do_test jrnlmode2-2.5 { + file delete test.db-journal +} {} + +do_test jrnlmode2-2.6 { + sqlite3 db2 test.db -readonly 1 + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2 3 4 5 6}} + +catch { db2 close } +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/jrnlmode3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/jrnlmode3.test --- sqlite3-3.4.2/test/jrnlmode3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/jrnlmode3.test 2009-04-20 18:43:03.000000000 +0100 @@ -0,0 +1,150 @@ +# 2009 April 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test cases inspired by ticket #3811. Tests to make sure that +# the journal_mode can only be changed at appropriate times and that +# all reported changes are effective. +# +# $Id: jrnlmode3.test,v 1.5 2009/04/20 17:43:03 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +# +# Verify that journal_mode=OFF works as long as it occurs before the first +# transaction, even if locking_mode=EXCLUSIVE is enabled. The behavior if +# journal_mode is changed after the first transaction is undefined and hence +# untested. +# +do_test jrnlmode3-1.1 { + db eval { + PRAGMA journal_mode=OFF; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1; + } +} {off exclusive 1} +do_test jrnlmode3-1.2 { + db eval { + BEGIN; + INSERT INTO t1 VALUES(2); + ROLLBACK; + SELECT * FROM t1; + } +} {1 2} + +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +do_test jrnlmode3-2.1 { + db eval { + PRAGMA locking_mode=EXCLUSIVE; + PRAGMA journal_mode=OFF; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1; + } +} {exclusive off 1} +do_test jrnlmode3-2.2 { + db eval { + BEGIN; + INSERT INTO t1 VALUES(2); + ROLLBACK; + SELECT * FROM t1; + } +} {1 2} + +# Test cases to verify that we can move from any journal_mode +# to any other, as long as we are not in a transaction. Verify +# that we cannot change journal_mode while a transaction is active. +# +set all_journal_modes {delete persist truncate memory off} +set cnt 0 +foreach fromjmode $all_journal_modes { + foreach tojmode $all_journal_modes { + + # Skip the no-change cases + if {$fromjmode==$tojmode} continue + incr cnt + + # Start with a fresh database connection an empty database file. + # + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + + # Initialize the journal mode. + # + do_test jrnlmode3-3.$cnt.1-($fromjmode-to-$tojmode) { + db eval "PRAGMA journal_mode = $fromjmode;" + } $fromjmode + + # Verify that the initial journal mode takes. + # + do_test jrnlmode3-3.$cnt.2 { + db eval {PRAGMA main.journal_mode} + } $fromjmode + + # Start a transaction and try to change the journal mode within + # the transaction. This should fail. + # + do_test jrnlmode3-3.$cnt.3 { + db eval { + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES($cnt); + } + db eval "PRAGMA journal_mode=$tojmode" + } $fromjmode + + # Rollback the transaction. Verify that the rollback occurred + # if journal_mode!=OFF. + # + do_test jrnlmode3-3.$cnt.4 { + db eval { + ROLLBACK; + SELECT * FROM t1; + } + } [expr {$fromjmode=="off"?$cnt:""}] + + # Now change the journal mode again. This time the new mode + # should take. + # + do_test jrnlmode3-3.$cnt.5 { + db eval "PRAGMA journal_mode=$tojmode" + } $tojmode + + # Do a the transaction. Verify that the rollback occurred + # if journal_mode!=OFF. + # + do_test jrnlmode3-3.$cnt.6 { + db eval { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(x); + BEGIN; + INSERT INTO t1 VALUES(1); + } + db eval ROLLBACK + db eval { + SELECT * FROM t1; + } + } [expr {$tojmode=="off"?"1":""}] + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/jrnlmode.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/jrnlmode.test --- sqlite3-3.4.2/test/jrnlmode.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/jrnlmode.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,507 @@ +# 2008 April 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# of these tests is the journal mode pragma. +# +# $Id: jrnlmode.test,v 1.16 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas} { + finish_test + return +} + +if {[info exists TEMP_STORE] && $TEMP_STORE>=2} { + set temp_persist memory + set temp_delete memory + set temp_truncate memory + set temp_off off +} else { + set temp_persist persist + set temp_delete delete + set temp_truncate truncate + set temp_off off +} + +proc temp_journal_mode {newmode} { + if {[info exists ::TEMP_STORE] && $::TEMP_STORE>=2} { + if {$newmode ne "off" && $newmode ne "memory"} { + execsql {PRAGMA temp.journal_mode} + set newmode [db one {PRAGMA temp.journal_mode}] + } + } + set newmode +} + +#---------------------------------------------------------------------- +# Test cases jrnlmode-1.X test the PRAGMA logic. +# +do_test jrnlmode-1.0 { + execsql { + PRAGMA journal_mode; + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + } +} [list delete delete [temp_journal_mode delete]] +do_test jrnlmode-1.1 { + execsql { + PRAGMA journal_mode = persist; + } +} {persist} +do_test jrnlmode-1.2 { + execsql { + PRAGMA journal_mode; + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + } +} [list persist persist [temp_journal_mode persist]] +do_test jrnlmode-1.4 { + execsql { + PRAGMA journal_mode = off; + } +} {off} +do_test jrnlmode-1.5 { + execsql { + PRAGMA journal_mode; + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + } +} [list off off [temp_journal_mode off]] +do_test jrnlmode-1.6 { + execsql { + PRAGMA journal_mode = delete; + } +} {delete} +do_test jrnlmode-1.7 { + execsql { + PRAGMA journal_mode; + PRAGMA main.journal_mode; + PRAGMA Temp.journal_mode; + } +} [list delete delete [temp_journal_mode delete]] +do_test jrnlmode-1.7.1 { + execsql { + PRAGMA journal_mode = truncate; + } +} {truncate} +do_test jrnlmode-1.7.2 { + execsql { + PRAGMA journal_mode; + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + } +} [list truncate truncate [temp_journal_mode truncate]] +do_test jrnlmode-1.8 { + execsql { + PRAGMA journal_mode = off; + PRAGMA journal_mode = invalid; + } +} {off off} +ifcapable attach { + do_test jrnlmode-1.9 { + execsql { + PRAGMA journal_mode = PERSIST; + ATTACH ':memory:' as aux1; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA aux1.journal_mode; + } + } {persist memory} + do_test jrnlmode-1.10 { + execsql { + PRAGMA main.journal_mode = OFF; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + PRAGMA aux1.journal_mode; + } + } [list off [temp_journal_mode persist] memory] + do_test jrnlmode-1.11 { + execsql { + PRAGMA journal_mode; + } + } {persist} + do_test jrnlmode-1.12 { + execsql { + ATTACH ':memory:' as aux2; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA aux1.journal_mode; + PRAGMA aux2.journal_mode; + } + } {off memory memory} + do_test jrnlmode-1.13 { + # The journal-mode used by in-memory databases cannot be changed. + execsql { + PRAGMA aux1.journal_mode = DELETE; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA aux1.journal_mode; + PRAGMA aux2.journal_mode; + } + } {off memory memory} + do_test jrnlmode-1.14 { + execsql { + PRAGMA journal_mode = delete; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + PRAGMA aux1.journal_mode; + PRAGMA aux2.journal_mode; + } + } [list delete [temp_journal_mode delete] memory memory] + do_test jrnlmode-1.15 { + execsql { + ATTACH ':memory:' as aux3; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + PRAGMA aux1.journal_mode; + PRAGMA aux2.journal_mode; + PRAGMA aux3.journal_mode; + } + } [list delete [temp_journal_mode delete] memory memory memory] + do_test jrnlmode-1.16 { + execsql { + PRAGMA journal_mode = TRUNCATE; + } + execsql { + PRAGMA main.journal_mode; + PRAGMA temp.journal_mode; + PRAGMA aux1.journal_mode; + PRAGMA aux2.journal_mode; + PRAGMA aux3.journal_mode; + } + } [list truncate [temp_journal_mode truncate] memory memory memory] + + do_test jrnlmode-1.99 { + execsql { + DETACH aux1; + DETACH aux2; + DETACH aux3; + } + } {} +} + +ifcapable attach { + file delete -force test2.db + do_test jrnlmode-2.1 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA main.journal_mode = persist; + PRAGMA aux.journal_mode = persist; + CREATE TABLE abc(a, b, c); + CREATE TABLE aux.def(d, e, f); + } + execsql { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO def VALUES(4, 5, 6); + COMMIT; + } + list [file exists test.db-journal] [file exists test2.db-journal] + } {1 1} + + do_test jrnlmode-2.2 { + file size test.db-journal + } {0} + + do_test jrnlmode-2.3 { + execsql { + SELECT * FROM abc; + } + } {1 2 3} + + do_test jrnlmode-2.4 { + file size test.db-journal + } {0} + + do_test jrnlmode-2.5 { + execsql { + SELECT * FROM def; + } + } {4 5 6} + +#---------------------------------------------------------------------- +# Test caes jrnlmode-3.X verify that ticket #3127 has been fixed. +# + db close + file delete -force test2.db + file delete -force test.db + sqlite3 db test.db + + do_test jrnlmode-3.1 { + execsql { + CREATE TABLE x(n INTEGER); + ATTACH 'test2.db' AS a; + create table a.x ( n integer ); + insert into a.x values(1); + insert into a.x values (2); + insert into a.x values (3); + insert into a.x values (4); + } + } {} + + do_test jrnlmode-3.2 { + execsql { PRAGMA journal_mode=off; } + execsql { + BEGIN IMMEDIATE; + INSERT OR IGNORE INTO main.x SELECT * FROM a.x; + COMMIT; + } + } {} +} + +ifcapable autovacuum&&pragma { + db close + file delete -force test.db + sqlite3 db test.db + do_test jrnlmode-4.1 { + execsql { + PRAGMA cache_size = 1; + PRAGMA auto_vacuum = 1; + CREATE TABLE abc(a, b, c); + } + execsql { PRAGMA page_count } + } {3} + + do_test jrnlmode-4.2 { + execsql { PRAGMA journal_mode = off } + } {off} + + do_test jrnlmode-4.3 { + execsql { INSERT INTO abc VALUES(1, 2, randomblob(2000)) } + } {} + + # This will attempt to truncate the database file. Check that this + # is not a problem when journal_mode=off. + do_test jrnlmode-4.4 { + execsql { DELETE FROM abc } + } {} + + integrity_check jrnlmode-4.5 +} + +#------------------------------------------------------------------------ +# The following test caes, jrnlmode-5.*, test the journal_size_limit +# pragma. +ifcapable pragma { + db close + file delete -force test.db test2.db test3.db + sqlite3 db test.db + + do_test jrnlmode-5.1 { + execsql {pragma page_size=1024} + execsql {pragma journal_mode=persist} + } {persist} + + do_test jrnlmode-5.2 { + execsql { PRAGMA journal_size_limit } + } {-1} + do_test jrnlmode-5.3 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.journal_size_limit; + } + } {-1} + do_test jrnlmode-5.4.1 { + execsql { PRAGMA aux.journal_size_limit = 999999999999 } + } {999999999999} + do_test jrnlmode-5.4.2 { + execsql { PRAGMA aux.journal_size_limit = 10240 } + } {10240} + do_test jrnlmode-5.5 { + execsql { PRAGMA main.journal_size_limit = 20480 } + } {20480} + do_test jrnlmode-5.6 { + execsql { PRAGMA journal_size_limit } + } {20480} + do_test jrnlmode-5.7 { + execsql { PRAGMA aux.journal_size_limit } + } {10240} + + do_test jrnlmode-5.8 { + execsql { ATTACH 'test3.db' AS aux2 } + } {} + + do_test jrnlmode-5.9 { + execsql { + CREATE TABLE main.t1(a, b, c); + CREATE TABLE aux.t2(a, b, c); + CREATE TABLE aux2.t3(a, b, c); + } + } {} + do_test jrnlmode-5.10 { + list \ + [file exists test.db-journal] \ + [file exists test2.db-journal] \ + [file exists test3.db-journal] + } {1 1 1} + do_test jrnlmode-5.11 { + execsql { + BEGIN; + INSERT INTO t3 VALUES(randomblob(1000),randomblob(1000),randomblob(1000)); + INSERT INTO t3 + SELECT randomblob(1000),randomblob(1000),randomblob(1000) FROM t3; + INSERT INTO t3 + SELECT randomblob(1000),randomblob(1000),randomblob(1000) FROM t3; + INSERT INTO t3 + SELECT randomblob(1000),randomblob(1000),randomblob(1000) FROM t3; + INSERT INTO t3 + SELECT randomblob(1000),randomblob(1000),randomblob(1000) FROM t3; + INSERT INTO t3 + SELECT randomblob(1000),randomblob(1000),randomblob(1000) FROM t3; + INSERT INTO t2 SELECT * FROM t3; + INSERT INTO t1 SELECT * FROM t2; + COMMIT; + } + list \ + [file exists test.db-journal] \ + [file exists test2.db-journal] \ + [file exists test3.db-journal] \ + [file size test.db-journal] \ + [file size test2.db-journal] \ + [file size test3.db-journal] + } {1 1 1 0 0 0} + + do_test jrnlmode-5.12 { + execsql { + BEGIN; + UPDATE t1 SET a = randomblob(1000); + } + expr {[file size test.db-journal]>30000} + } {1} + do_test jrnlmode-5.13 { + execsql COMMIT + file size test.db-journal + } {20480} + + do_test jrnlmode-5.14 { + execsql { + BEGIN; + UPDATE t2 SET a = randomblob(1000); + } + expr {[file size test2.db-journal]>30000} + } {1} + do_test jrnlmode-5.15 { + execsql COMMIT + file size test2.db-journal + } {10240} + + do_test jrnlmode-5.16 { + execsql { + BEGIN; + UPDATE t3 SET a = randomblob(1000); + } + set journalsize [file size test3.db-journal] + expr {$journalsize>30000} + } {1} + do_test jrnlmode-5.17 { + execsql COMMIT + set sz [file size test3.db-journal] + expr {$sz>=$journalsize} + } {1} + + do_test jrnlmode-5.18 { + execsql { + PRAGMA journal_size_limit = -4; + BEGIN; + UPDATE t1 SET a = randomblob(1000); + } + set journalsize [file size test.db-journal] + expr {$journalsize>30000} + } {1} + do_test jrnlmode-5.19 { + execsql COMMIT + set sz [file size test.db-journal] + expr {$sz>=$journalsize} + } {1} + + # Test a size-limit of 0. + # + do_test jrnlmode-5.20 { + execsql { + PRAGMA journal_size_limit = 0; + BEGIN; + UPDATE t1 SET a = randomblob(1000); + } + } {0} + do_test jrnlmode-5.21 { + expr {[file size test.db-journal] > 1024} + } {1} + do_test jrnlmode-5.22 { + execsql COMMIT + list [file exists test.db-journal] [file size test.db-journal] + } {1 0} +} + +ifcapable pragma { + # These tests are not run as part of the "journaltest" permutation, + # as the test_journal.c layer is incompatible with in-memory journaling. + if {[catch {set ::permutations_test_prefix} z] || $z ne "journaltest"} { + + do_test jrnlmode-6.1 { + execsql { + PRAGMA journal_mode = truncate; + CREATE TABLE t4(a, b); + BEGIN; + INSERT INTO t4 VALUES(1, 2); + PRAGMA journal_mode = memory; + } + } {truncate truncate} + do_test jrnlmode-6.2 { + file exists test.db-journal + } {1} + do_test jrnlmode-6.3 { + execsql { + COMMIT; + SELECT * FROM t4; + } + } {1 2} + do_test jrnlmode-6.4 { + file exists test.db-journal + } {1} + do_test jrnlmode-6.5 { + execsql { + PRAGMA journal_mode = MEMORY; + BEGIN; + INSERT INTO t4 VALUES(3, 4); + } + file exists test.db-journal + } {1} + do_test jrnlmode-6.7 { + execsql { + COMMIT; + SELECT * FROM t4; + } + } {1 2 3 4} + do_test jrnlmode-6.8 { + file exists test.db-journal + } {1} + do_test jrnlmode-6.9 { + execsql { + PRAGMA journal_mode = DELETE; + BEGIN IMMEDIATE; COMMIT; + } + file exists test.db-journal + } {0} + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/keyword1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/keyword1.test --- sqlite3-3.4.2/test/keyword1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/keyword1.test 2009-01-29 19:27:47.000000000 +0000 @@ -0,0 +1,118 @@ +# 2009 January 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Verify that certain keywords can be used as identifiers. +# +# $Id: keyword1.test,v 1.1 2009/01/29 19:27:47 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db eval { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + INSERT INTO t1 VALUES(3, 'three'); +} + +set kwlist { + abort + after + analyze + asc + attach + before + begin + by + cascade + cast + column + conflict + current_date + current_time + current_timestamp + database + deferred + desc + detach + end + each + exclusive + explain + fail + for + glob + if + ignore + immediate + initially + instead + key + like + match + of + offset + plan + pragma + query + raise + regexp + reindex + release + rename + replace + restrict + rollback + row + savepoint + temp + temporary + trigger + vacuum + view + virtual +}; +set exprkw { + cast + current_date + current_time + current_timestamp + raise +} +foreach kw $kwlist { + do_test keyword1-$kw.1 { + if {$kw=="if"} { + db eval "CREATE TABLE \"$kw\"($kw $kw)" + } else { + db eval "CREATE TABLE ${kw}($kw $kw)" + } + db eval "INSERT INTO $kw VALUES(99)" + db eval "INSERT INTO $kw SELECT a FROM t1" + if {[lsearch $exprkw $kw]<0} { + db eval "SELECT * FROM $kw ORDER BY $kw ASC" + } else { + db eval "SELECT * FROM $kw ORDER BY \"$kw\" ASC" + } + } {1 2 3 99} + do_test keyword1-$kw.2 { + if {$kw=="if"} { + db eval "DROP TABLE \"$kw\"" + db eval "CREATE INDEX \"$kw\" ON t1(a)" + } else { + db eval "DROP TABLE $kw" + db eval "CREATE INDEX $kw ON t1(a)" + } + db eval "SELECT b FROM t1 INDEXED BY $kw WHERE a=2" + } {two} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/laststmtchanges.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/laststmtchanges.test --- sqlite3-3.4.2/test/laststmtchanges.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/laststmtchanges.test 2009-06-12 03:37:54.000000000 +0100 @@ -1,3 +1,4 @@ +# # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # @@ -19,6 +20,7 @@ # Note 3: changes() is not changed by a change to a view (since everything # is done within instead of trigger context). # +# $Id: laststmtchanges.test,v 1.7 2008/10/27 13:59:34 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -278,4 +280,52 @@ } ;# ifcapable view + +# ---------------------------------------------------------------------------- +# 6.x - Test "DELETE FROM
    " in the absence of triggers +# +do_test laststmtchanges-6.1 { + execsql { + CREATE TABLE t3(a, b, c); + INSERT INTO t3 VALUES(1, 2, 3); + INSERT INTO t3 VALUES(4, 5, 6); + } +} {} +do_test laststmtchanges-6.2 { + execsql { + BEGIN; + DELETE FROM t3; + SELECT changes(); + } +} {2} +do_test laststmtchanges-6.3 { + execsql { + ROLLBACK; + BEGIN; + DELETE FROM t3 WHERE a IS NOT NULL; + SELECT changes(); + } +} {2} +do_test laststmtchanges-6.4 { + execsql { + ROLLBACK; + CREATE INDEX t3_i1 ON t3(a); + BEGIN; + DELETE FROM t3; + SELECT changes(); + } +} {2} +do_test laststmtchanges-6.5 { + execsql { ROLLBACK } + set nTotalChange [execsql {SELECT total_changes()}] + expr 0 +} {0} +do_test laststmtchanges-6.6 { + execsql { + SELECT total_changes(); + DELETE FROM t3; + SELECT total_changes(); + } +} [list $nTotalChange [expr $nTotalChange+2]] + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/like2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/like2.test --- sqlite3-3.4.2/test/like2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/like2.test 2009-06-05 18:03:32.000000000 +0100 @@ -0,0 +1,1009 @@ +# 2008 May 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the LIKE and GLOB operators and +# in particular the optimizations that occur to help those operators +# run faster. +# +# $Id: like2.test,v 1.1 2008/05/26 18:33:41 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test like2-1.1 { + db eval { + CREATE TABLE t1(x INT, y COLLATE NOCASE); + INSERT INTO t1(x,y) VALUES(1,CAST(x'01' AS TEXT)); + INSERT INTO t1(x,y) VALUES(2,CAST(x'02' AS TEXT)); + INSERT INTO t1(x,y) VALUES(3,CAST(x'03' AS TEXT)); + INSERT INTO t1(x,y) VALUES(4,CAST(x'04' AS TEXT)); + INSERT INTO t1(x,y) VALUES(5,CAST(x'05' AS TEXT)); + INSERT INTO t1(x,y) VALUES(6,CAST(x'06' AS TEXT)); + INSERT INTO t1(x,y) VALUES(7,CAST(x'07' AS TEXT)); + INSERT INTO t1(x,y) VALUES(8,CAST(x'08' AS TEXT)); + INSERT INTO t1(x,y) VALUES(9,CAST(x'09' AS TEXT)); + INSERT INTO t1(x,y) VALUES(10,CAST(x'0a' AS TEXT)); + INSERT INTO t1(x,y) VALUES(11,CAST(x'0b' AS TEXT)); + INSERT INTO t1(x,y) VALUES(12,CAST(x'0c' AS TEXT)); + INSERT INTO t1(x,y) VALUES(13,CAST(x'0d' AS TEXT)); + INSERT INTO t1(x,y) VALUES(14,CAST(x'0e' AS TEXT)); + INSERT INTO t1(x,y) VALUES(15,CAST(x'0f' AS TEXT)); + INSERT INTO t1(x,y) VALUES(16,CAST(x'10' AS TEXT)); + INSERT INTO t1(x,y) VALUES(17,CAST(x'11' AS TEXT)); + INSERT INTO t1(x,y) VALUES(18,CAST(x'12' AS TEXT)); + INSERT INTO t1(x,y) VALUES(19,CAST(x'13' AS TEXT)); + INSERT INTO t1(x,y) VALUES(20,CAST(x'14' AS TEXT)); + INSERT INTO t1(x,y) VALUES(21,CAST(x'15' AS TEXT)); + INSERT INTO t1(x,y) VALUES(22,CAST(x'16' AS TEXT)); + INSERT INTO t1(x,y) VALUES(23,CAST(x'17' AS TEXT)); + INSERT INTO t1(x,y) VALUES(24,CAST(x'18' AS TEXT)); + INSERT INTO t1(x,y) VALUES(25,CAST(x'19' AS TEXT)); + INSERT INTO t1(x,y) VALUES(26,CAST(x'1a' AS TEXT)); + INSERT INTO t1(x,y) VALUES(27,CAST(x'1b' AS TEXT)); + INSERT INTO t1(x,y) VALUES(28,CAST(x'1c' AS TEXT)); + INSERT INTO t1(x,y) VALUES(29,CAST(x'1d' AS TEXT)); + INSERT INTO t1(x,y) VALUES(30,CAST(x'1e' AS TEXT)); + INSERT INTO t1(x,y) VALUES(31,CAST(x'1f' AS TEXT)); + INSERT INTO t1(x,y) VALUES(32,' '); + INSERT INTO t1(x,y) VALUES(33,'!'); + INSERT INTO t1(x,y) VALUES(34,'"'); + INSERT INTO t1(x,y) VALUES(35,'#'); + INSERT INTO t1(x,y) VALUES(36,'$'); + INSERT INTO t1(x,y) VALUES(37,'%'); + INSERT INTO t1(x,y) VALUES(38,'&'); + INSERT INTO t1(x,y) VALUES(39,''''); + INSERT INTO t1(x,y) VALUES(40,'('); + INSERT INTO t1(x,y) VALUES(41,')'); + INSERT INTO t1(x,y) VALUES(42,'*'); + INSERT INTO t1(x,y) VALUES(43,'+'); + INSERT INTO t1(x,y) VALUES(44,','); + INSERT INTO t1(x,y) VALUES(45,'-'); + INSERT INTO t1(x,y) VALUES(46,'.'); + INSERT INTO t1(x,y) VALUES(47,'/'); + INSERT INTO t1(x,y) VALUES(48,'0'); + INSERT INTO t1(x,y) VALUES(49,'1'); + INSERT INTO t1(x,y) VALUES(50,'2'); + INSERT INTO t1(x,y) VALUES(51,'3'); + INSERT INTO t1(x,y) VALUES(52,'4'); + INSERT INTO t1(x,y) VALUES(53,'5'); + INSERT INTO t1(x,y) VALUES(54,'6'); + INSERT INTO t1(x,y) VALUES(55,'7'); + INSERT INTO t1(x,y) VALUES(56,'8'); + INSERT INTO t1(x,y) VALUES(57,'9'); + INSERT INTO t1(x,y) VALUES(58,':'); + INSERT INTO t1(x,y) VALUES(59,';'); + INSERT INTO t1(x,y) VALUES(60,'<'); + INSERT INTO t1(x,y) VALUES(61,'='); + INSERT INTO t1(x,y) VALUES(62,'>'); + INSERT INTO t1(x,y) VALUES(63,'?'); + INSERT INTO t1(x,y) VALUES(64,'@'); + INSERT INTO t1(x,y) VALUES(65,'A'); + INSERT INTO t1(x,y) VALUES(66,'B'); + INSERT INTO t1(x,y) VALUES(67,'C'); + INSERT INTO t1(x,y) VALUES(68,'D'); + INSERT INTO t1(x,y) VALUES(69,'E'); + INSERT INTO t1(x,y) VALUES(70,'F'); + INSERT INTO t1(x,y) VALUES(71,'G'); + INSERT INTO t1(x,y) VALUES(72,'H'); + INSERT INTO t1(x,y) VALUES(73,'I'); + INSERT INTO t1(x,y) VALUES(74,'J'); + INSERT INTO t1(x,y) VALUES(75,'K'); + INSERT INTO t1(x,y) VALUES(76,'L'); + INSERT INTO t1(x,y) VALUES(77,'M'); + INSERT INTO t1(x,y) VALUES(78,'N'); + INSERT INTO t1(x,y) VALUES(79,'O'); + INSERT INTO t1(x,y) VALUES(80,'P'); + INSERT INTO t1(x,y) VALUES(81,'Q'); + INSERT INTO t1(x,y) VALUES(82,'R'); + INSERT INTO t1(x,y) VALUES(83,'S'); + INSERT INTO t1(x,y) VALUES(84,'T'); + INSERT INTO t1(x,y) VALUES(85,'U'); + INSERT INTO t1(x,y) VALUES(86,'V'); + INSERT INTO t1(x,y) VALUES(87,'W'); + INSERT INTO t1(x,y) VALUES(88,'X'); + INSERT INTO t1(x,y) VALUES(89,'Y'); + INSERT INTO t1(x,y) VALUES(90,'Z'); + INSERT INTO t1(x,y) VALUES(91,'['); + INSERT INTO t1(x,y) VALUES(92,'\'); + INSERT INTO t1(x,y) VALUES(93,']'); + INSERT INTO t1(x,y) VALUES(94,'^'); + INSERT INTO t1(x,y) VALUES(95,'_'); + INSERT INTO t1(x,y) VALUES(96,'`'); + INSERT INTO t1(x,y) VALUES(97,'a'); + INSERT INTO t1(x,y) VALUES(98,'b'); + INSERT INTO t1(x,y) VALUES(99,'c'); + INSERT INTO t1(x,y) VALUES(100,'d'); + INSERT INTO t1(x,y) VALUES(101,'e'); + INSERT INTO t1(x,y) VALUES(102,'f'); + INSERT INTO t1(x,y) VALUES(103,'g'); + INSERT INTO t1(x,y) VALUES(104,'h'); + INSERT INTO t1(x,y) VALUES(105,'i'); + INSERT INTO t1(x,y) VALUES(106,'j'); + INSERT INTO t1(x,y) VALUES(107,'k'); + INSERT INTO t1(x,y) VALUES(108,'l'); + INSERT INTO t1(x,y) VALUES(109,'m'); + INSERT INTO t1(x,y) VALUES(110,'n'); + INSERT INTO t1(x,y) VALUES(111,'o'); + INSERT INTO t1(x,y) VALUES(112,'p'); + INSERT INTO t1(x,y) VALUES(113,'q'); + INSERT INTO t1(x,y) VALUES(114,'r'); + INSERT INTO t1(x,y) VALUES(115,'s'); + INSERT INTO t1(x,y) VALUES(116,'t'); + INSERT INTO t1(x,y) VALUES(117,'u'); + INSERT INTO t1(x,y) VALUES(118,'v'); + INSERT INTO t1(x,y) VALUES(119,'w'); + INSERT INTO t1(x,y) VALUES(120,'x'); + INSERT INTO t1(x,y) VALUES(121,'y'); + INSERT INTO t1(x,y) VALUES(122,'z'); + INSERT INTO t1(x,y) VALUES(123,'{'); + INSERT INTO t1(x,y) VALUES(124,'|'); + INSERT INTO t1(x,y) VALUES(125,'}'); + INSERT INTO t1(x,y) VALUES(126,'~'); + INSERT INTO t1(x,y) VALUES(127,CAST(x'7f' AS TEXT)); + SELECT count(*) FROM t1; + } +} {127} +do_test like2-1.2 { + db eval { + CREATE TABLE t2(x INT, y COLLATE NOCASE); + INSERT INTO t2 SELECT * FROM t1; + CREATE INDEX i2 ON t2(y); + SELECT count(*) FROM t2; + } +} {127} +do_test like2-1.3 { + db eval { + CREATE TABLE t3(x INT, y COLLATE NOCASE); + INSERT INTO t3 SELECT x, 'abc' || y || 'xyz' FROM t1; + CREATE INDEX i3 ON t3(y); + SELECT count(*) FROM t2; + } +} {127} +do_test like-2.32.1 { + db eval "SELECT x FROM t1 WHERE y LIKE ' %'" +} {32} +do_test like-2.32.2 { + db eval "SELECT x FROM t2 WHERE y LIKE ' %'" +} {32} +do_test like-2.32.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc %'" +} {32} +do_test like-2.33.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '!%'" +} {33} +do_test like-2.33.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '!%'" +} {33} +do_test like-2.33.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc!%'" +} {33} +do_test like-2.34.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\"%'" +} {34} +do_test like-2.34.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\"%'" +} {34} +do_test like-2.34.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\"%'" +} {34} +do_test like-2.35.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '#%'" +} {35} +do_test like-2.35.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '#%'" +} {35} +do_test like-2.35.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc#%'" +} {35} +do_test like-2.36.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\$%'" +} {36} +do_test like-2.36.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\$%'" +} {36} +do_test like-2.36.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\$%'" +} {36} +do_test like-2.38.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '&%'" +} {38} +do_test like-2.38.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '&%'" +} {38} +do_test like-2.38.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc&%'" +} {38} +do_test like-2.39.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '''%'" +} {39} +do_test like-2.39.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '''%'" +} {39} +do_test like-2.39.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc''%'" +} {39} +do_test like-2.40.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '(%'" +} {40} +do_test like-2.40.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '(%'" +} {40} +do_test like-2.40.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc(%'" +} {40} +do_test like-2.41.1 { + db eval "SELECT x FROM t1 WHERE y LIKE ')%'" +} {41} +do_test like-2.41.2 { + db eval "SELECT x FROM t2 WHERE y LIKE ')%'" +} {41} +do_test like-2.41.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc)%'" +} {41} +do_test like-2.42.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '*%'" +} {42} +do_test like-2.42.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '*%'" +} {42} +do_test like-2.42.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc*%'" +} {42} +do_test like-2.43.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '+%'" +} {43} +do_test like-2.43.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '+%'" +} {43} +do_test like-2.43.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc+%'" +} {43} +do_test like-2.44.1 { + db eval "SELECT x FROM t1 WHERE y LIKE ',%'" +} {44} +do_test like-2.44.2 { + db eval "SELECT x FROM t2 WHERE y LIKE ',%'" +} {44} +do_test like-2.44.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc,%'" +} {44} +do_test like-2.45.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '-%'" +} {45} +do_test like-2.45.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '-%'" +} {45} +do_test like-2.45.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc-%'" +} {45} +do_test like-2.46.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '.%'" +} {46} +do_test like-2.46.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '.%'" +} {46} +do_test like-2.46.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc.%'" +} {46} +do_test like-2.47.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '/%'" +} {47} +do_test like-2.47.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '/%'" +} {47} +do_test like-2.47.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc/%'" +} {47} +do_test like-2.48.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '0%'" +} {48} +do_test like-2.48.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '0%'" +} {48} +do_test like-2.48.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc0%'" +} {48} +do_test like-2.49.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '1%'" +} {49} +do_test like-2.49.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '1%'" +} {49} +do_test like-2.49.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc1%'" +} {49} +do_test like-2.50.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '2%'" +} {50} +do_test like-2.50.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '2%'" +} {50} +do_test like-2.50.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc2%'" +} {50} +do_test like-2.51.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '3%'" +} {51} +do_test like-2.51.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '3%'" +} {51} +do_test like-2.51.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc3%'" +} {51} +do_test like-2.52.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '4%'" +} {52} +do_test like-2.52.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '4%'" +} {52} +do_test like-2.52.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc4%'" +} {52} +do_test like-2.53.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '5%'" +} {53} +do_test like-2.53.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '5%'" +} {53} +do_test like-2.53.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc5%'" +} {53} +do_test like-2.54.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '6%'" +} {54} +do_test like-2.54.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '6%'" +} {54} +do_test like-2.54.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc6%'" +} {54} +do_test like-2.55.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '7%'" +} {55} +do_test like-2.55.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '7%'" +} {55} +do_test like-2.55.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc7%'" +} {55} +do_test like-2.56.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '8%'" +} {56} +do_test like-2.56.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '8%'" +} {56} +do_test like-2.56.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc8%'" +} {56} +do_test like-2.57.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '9%'" +} {57} +do_test like-2.57.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '9%'" +} {57} +do_test like-2.57.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc9%'" +} {57} +do_test like-2.58.1 { + db eval "SELECT x FROM t1 WHERE y LIKE ':%'" +} {58} +do_test like-2.58.2 { + db eval "SELECT x FROM t2 WHERE y LIKE ':%'" +} {58} +do_test like-2.58.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc:%'" +} {58} +do_test like-2.59.1 { + db eval "SELECT x FROM t1 WHERE y LIKE ';%'" +} {59} +do_test like-2.59.2 { + db eval "SELECT x FROM t2 WHERE y LIKE ';%'" +} {59} +do_test like-2.59.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc;%'" +} {59} +do_test like-2.60.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '<%'" +} {60} +do_test like-2.60.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '<%'" +} {60} +do_test like-2.60.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc<%'" +} {60} +do_test like-2.61.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '=%'" +} {61} +do_test like-2.61.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '=%'" +} {61} +do_test like-2.61.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc=%'" +} {61} +do_test like-2.62.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '>%'" +} {62} +do_test like-2.62.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '>%'" +} {62} +do_test like-2.62.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc>%'" +} {62} +do_test like-2.63.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '?%'" +} {63} +do_test like-2.63.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '?%'" +} {63} +do_test like-2.63.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc?%'" +} {63} +do_test like-2.64.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '@%'" +} {64} +do_test like-2.64.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '@%'" +} {64} +do_test like-2.64.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc@%'" +} {64} +do_test like-2.65.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'A%'" +} {65 97} +do_test like-2.65.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'A%'" +} {65 97} +do_test like-2.65.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcA%'" +} {65 97} +do_test like-2.66.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'B%'" +} {66 98} +do_test like-2.66.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'B%'" +} {66 98} +do_test like-2.66.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcB%'" +} {66 98} +do_test like-2.67.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'C%'" +} {67 99} +do_test like-2.67.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'C%'" +} {67 99} +do_test like-2.67.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcC%'" +} {67 99} +do_test like-2.68.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'D%'" +} {68 100} +do_test like-2.68.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'D%'" +} {68 100} +do_test like-2.68.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcD%'" +} {68 100} +do_test like-2.69.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'E%'" +} {69 101} +do_test like-2.69.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'E%'" +} {69 101} +do_test like-2.69.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcE%'" +} {69 101} +do_test like-2.70.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'F%'" +} {70 102} +do_test like-2.70.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'F%'" +} {70 102} +do_test like-2.70.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcF%'" +} {70 102} +do_test like-2.71.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'G%'" +} {71 103} +do_test like-2.71.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'G%'" +} {71 103} +do_test like-2.71.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcG%'" +} {71 103} +do_test like-2.72.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'H%'" +} {72 104} +do_test like-2.72.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'H%'" +} {72 104} +do_test like-2.72.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcH%'" +} {72 104} +do_test like-2.73.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'I%'" +} {73 105} +do_test like-2.73.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'I%'" +} {73 105} +do_test like-2.73.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcI%'" +} {73 105} +do_test like-2.74.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'J%'" +} {74 106} +do_test like-2.74.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'J%'" +} {74 106} +do_test like-2.74.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcJ%'" +} {74 106} +do_test like-2.75.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'K%'" +} {75 107} +do_test like-2.75.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'K%'" +} {75 107} +do_test like-2.75.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcK%'" +} {75 107} +do_test like-2.76.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'L%'" +} {76 108} +do_test like-2.76.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'L%'" +} {76 108} +do_test like-2.76.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcL%'" +} {76 108} +do_test like-2.77.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'M%'" +} {77 109} +do_test like-2.77.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'M%'" +} {77 109} +do_test like-2.77.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcM%'" +} {77 109} +do_test like-2.78.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'N%'" +} {78 110} +do_test like-2.78.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'N%'" +} {78 110} +do_test like-2.78.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcN%'" +} {78 110} +do_test like-2.79.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'O%'" +} {79 111} +do_test like-2.79.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'O%'" +} {79 111} +do_test like-2.79.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcO%'" +} {79 111} +do_test like-2.80.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'P%'" +} {80 112} +do_test like-2.80.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'P%'" +} {80 112} +do_test like-2.80.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcP%'" +} {80 112} +do_test like-2.81.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'Q%'" +} {81 113} +do_test like-2.81.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'Q%'" +} {81 113} +do_test like-2.81.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcQ%'" +} {81 113} +do_test like-2.82.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'R%'" +} {82 114} +do_test like-2.82.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'R%'" +} {82 114} +do_test like-2.82.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcR%'" +} {82 114} +do_test like-2.83.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'S%'" +} {83 115} +do_test like-2.83.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'S%'" +} {83 115} +do_test like-2.83.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcS%'" +} {83 115} +do_test like-2.84.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'T%'" +} {84 116} +do_test like-2.84.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'T%'" +} {84 116} +do_test like-2.84.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcT%'" +} {84 116} +do_test like-2.85.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'U%'" +} {85 117} +do_test like-2.85.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'U%'" +} {85 117} +do_test like-2.85.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcU%'" +} {85 117} +do_test like-2.86.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'V%'" +} {86 118} +do_test like-2.86.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'V%'" +} {86 118} +do_test like-2.86.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcV%'" +} {86 118} +do_test like-2.87.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'W%'" +} {87 119} +do_test like-2.87.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'W%'" +} {87 119} +do_test like-2.87.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcW%'" +} {87 119} +do_test like-2.88.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'X%'" +} {88 120} +do_test like-2.88.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'X%'" +} {88 120} +do_test like-2.88.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcX%'" +} {88 120} +do_test like-2.89.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'Y%'" +} {89 121} +do_test like-2.89.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'Y%'" +} {89 121} +do_test like-2.89.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcY%'" +} {89 121} +do_test like-2.90.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'Z%'" +} {90 122} +do_test like-2.90.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'Z%'" +} {90 122} +do_test like-2.90.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcZ%'" +} {90 122} +do_test like-2.91.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\[%'" +} {91} +do_test like-2.91.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\[%'" +} {91} +do_test like-2.91.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\[%'" +} {91} +do_test like-2.92.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\\%'" +} {92} +do_test like-2.92.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\\%'" +} {92} +do_test like-2.92.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\\%'" +} {92} +do_test like-2.93.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\]%'" +} {93} +do_test like-2.93.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\]%'" +} {93} +do_test like-2.93.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\]%'" +} {93} +do_test like-2.94.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '^%'" +} {94} +do_test like-2.94.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '^%'" +} {94} +do_test like-2.94.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc^%'" +} {94} +do_test like-2.96.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '`%'" +} {96} +do_test like-2.96.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '`%'" +} {96} +do_test like-2.96.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc`%'" +} {96} +do_test like-2.97.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'a%'" +} {65 97} +do_test like-2.97.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'a%'" +} {65 97} +do_test like-2.97.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abca%'" +} {65 97} +do_test like-2.98.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'b%'" +} {66 98} +do_test like-2.98.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'b%'" +} {66 98} +do_test like-2.98.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcb%'" +} {66 98} +do_test like-2.99.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'c%'" +} {67 99} +do_test like-2.99.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'c%'" +} {67 99} +do_test like-2.99.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcc%'" +} {67 99} +do_test like-2.100.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'd%'" +} {68 100} +do_test like-2.100.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'd%'" +} {68 100} +do_test like-2.100.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcd%'" +} {68 100} +do_test like-2.101.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'e%'" +} {69 101} +do_test like-2.101.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'e%'" +} {69 101} +do_test like-2.101.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abce%'" +} {69 101} +do_test like-2.102.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'f%'" +} {70 102} +do_test like-2.102.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'f%'" +} {70 102} +do_test like-2.102.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcf%'" +} {70 102} +do_test like-2.103.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'g%'" +} {71 103} +do_test like-2.103.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'g%'" +} {71 103} +do_test like-2.103.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcg%'" +} {71 103} +do_test like-2.104.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'h%'" +} {72 104} +do_test like-2.104.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'h%'" +} {72 104} +do_test like-2.104.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abch%'" +} {72 104} +do_test like-2.105.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'i%'" +} {73 105} +do_test like-2.105.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'i%'" +} {73 105} +do_test like-2.105.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abci%'" +} {73 105} +do_test like-2.106.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'j%'" +} {74 106} +do_test like-2.106.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'j%'" +} {74 106} +do_test like-2.106.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcj%'" +} {74 106} +do_test like-2.107.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'k%'" +} {75 107} +do_test like-2.107.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'k%'" +} {75 107} +do_test like-2.107.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abck%'" +} {75 107} +do_test like-2.108.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'l%'" +} {76 108} +do_test like-2.108.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'l%'" +} {76 108} +do_test like-2.108.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcl%'" +} {76 108} +do_test like-2.109.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'm%'" +} {77 109} +do_test like-2.109.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'm%'" +} {77 109} +do_test like-2.109.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcm%'" +} {77 109} +do_test like-2.110.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'n%'" +} {78 110} +do_test like-2.110.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'n%'" +} {78 110} +do_test like-2.110.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcn%'" +} {78 110} +do_test like-2.111.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'o%'" +} {79 111} +do_test like-2.111.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'o%'" +} {79 111} +do_test like-2.111.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abco%'" +} {79 111} +do_test like-2.112.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'p%'" +} {80 112} +do_test like-2.112.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'p%'" +} {80 112} +do_test like-2.112.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcp%'" +} {80 112} +do_test like-2.113.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'q%'" +} {81 113} +do_test like-2.113.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'q%'" +} {81 113} +do_test like-2.113.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcq%'" +} {81 113} +do_test like-2.114.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'r%'" +} {82 114} +do_test like-2.114.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'r%'" +} {82 114} +do_test like-2.114.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcr%'" +} {82 114} +do_test like-2.115.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 's%'" +} {83 115} +do_test like-2.115.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 's%'" +} {83 115} +do_test like-2.115.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcs%'" +} {83 115} +do_test like-2.116.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 't%'" +} {84 116} +do_test like-2.116.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 't%'" +} {84 116} +do_test like-2.116.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abct%'" +} {84 116} +do_test like-2.117.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'u%'" +} {85 117} +do_test like-2.117.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'u%'" +} {85 117} +do_test like-2.117.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcu%'" +} {85 117} +do_test like-2.118.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'v%'" +} {86 118} +do_test like-2.118.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'v%'" +} {86 118} +do_test like-2.118.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcv%'" +} {86 118} +do_test like-2.119.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'w%'" +} {87 119} +do_test like-2.119.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'w%'" +} {87 119} +do_test like-2.119.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcw%'" +} {87 119} +do_test like-2.120.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'x%'" +} {88 120} +do_test like-2.120.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'x%'" +} {88 120} +do_test like-2.120.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcx%'" +} {88 120} +do_test like-2.121.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'y%'" +} {89 121} +do_test like-2.121.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'y%'" +} {89 121} +do_test like-2.121.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcy%'" +} {89 121} +do_test like-2.122.1 { + db eval "SELECT x FROM t1 WHERE y LIKE 'z%'" +} {90 122} +do_test like-2.122.2 { + db eval "SELECT x FROM t2 WHERE y LIKE 'z%'" +} {90 122} +do_test like-2.122.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abcz%'" +} {90 122} +do_test like-2.123.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\173%'" +} {123} +do_test like-2.123.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\173%'" +} {123} +do_test like-2.123.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\173%'" +} {123} +do_test like-2.124.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '|%'" +} {124} +do_test like-2.124.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '|%'" +} {124} +do_test like-2.124.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc|%'" +} {124} +do_test like-2.125.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '\175%'" +} {125} +do_test like-2.125.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '\175%'" +} {125} +do_test like-2.125.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc\175%'" +} {125} +do_test like-2.126.1 { + db eval "SELECT x FROM t1 WHERE y LIKE '~%'" +} {126} +do_test like-2.126.2 { + db eval "SELECT x FROM t2 WHERE y LIKE '~%'" +} {126} +do_test like-2.126.3 { + db eval "SELECT x FROM t3 WHERE y LIKE 'abc~%'" +} {126} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/like.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/like.test --- sqlite3-3.4.2/test/like.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/like.test 2009-06-25 12:35:52.000000000 +0100 @@ -13,7 +13,7 @@ # in particular the optimizations that occur to help those operators # run faster. # -# $Id: like.test,v 1.6 2007/06/11 12:56:15 drh Exp $ +# $Id: like.test,v 1.13 2009/06/07 23:45:11 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -69,12 +69,18 @@ SELECT x FROM t1 WHERE x LIKE 'aBc' ORDER BY 1; } } {ABC abc} -do_test like-1.5 { +do_test like-1.5.1 { execsql { PRAGMA case_sensitive_like=on; SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; } } {abc} +do_test like-1.5.2 { + execsql { + PRAGMA case_sensitive_like; -- no argument; does not change setting + SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; + } +} {abc} do_test like-1.6 { execsql { SELECT x FROM t1 WHERE x GLOB 'abc' ORDER BY 1; @@ -96,6 +102,12 @@ SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; } } {ABC abc} +do_test like-1.10 { + execsql { + PRAGMA case_sensitive_like; -- No argument, does not change setting. + SELECT x FROM t1 WHERE x LIKE 'abc' ORDER BY 1; + } +} {ABC abc} # Tests of the REGEXP operator # @@ -120,7 +132,7 @@ proc test_match {a b} { return [string match $a $b] } - db function match test_match + db function match -argcount 2 test_match execsql { SELECT x FROM t1 WHERE x MATCH '*abc*' ORDER BY 1; } @@ -345,7 +357,7 @@ } 12 do_test like-5.3 { execsql { - CREATE TABLE t2(x COLLATE NOCASE); + CREATE TABLE t2(x TEXT COLLATE NOCASE); INSERT INTO t2 SELECT * FROM t1; CREATE INDEX i2 ON t2(x COLLATE NOCASE); } @@ -381,6 +393,114 @@ do_test like-5.8 { set sqlite_like_count } 12 +do_test like-5.11 { + execsql {PRAGMA case_sensitive_like=off} + set sqlite_like_count 0 + queryplan { + SELECT x FROM t1 WHERE x LIKE 'ABC%' ORDER BY 1 + } +} {ABC {ABC abc xyz} abc abcd nosort {} i1} +do_test like-5.12 { + set sqlite_like_count +} 12 +do_test like-5.13 { + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x LIKE 'ABC%' ORDER BY 1 + } +} {abc ABC {ABC abc xyz} abcd nosort {} i2} +do_test like-5.14 { + set sqlite_like_count +} 0 +do_test like-5.15 { + execsql { + PRAGMA case_sensitive_like=on; + } + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x LIKE 'ABC%' ORDER BY 1 + } +} {ABC {ABC abc xyz} nosort {} i2} +do_test like-5.16 { + set sqlite_like_count +} 12 +do_test like-5.17 { + execsql { + PRAGMA case_sensitive_like=off; + } + set sqlite_like_count 0 + queryplan { + SELECT x FROM t2 WHERE x GLOB 'ABC*' ORDER BY 1 + } +} {ABC {ABC abc xyz} nosort {} i2} +do_test like-5.18 { + set sqlite_like_count +} 12 + +# Boundary case. The prefix for a LIKE comparison is rounded up +# when constructing the comparison. Example: "ab" becomes "ac". +# In other words, the last character is increased by one. +# +# Make sure this happens correctly when the last character is a +# "z" and we are doing case-insensitive comparisons. +# +# Ticket #2959 +# +do_test like-5.21 { + execsql { + PRAGMA case_sensitive_like=off; + INSERT INTO t2 VALUES('ZZ-upper-upper'); + INSERT INTO t2 VALUES('zZ-lower-upper'); + INSERT INTO t2 VALUES('Zz-upper-lower'); + INSERT INTO t2 VALUES('zz-lower-lower'); + } + queryplan { + SELECT x FROM t2 WHERE x LIKE 'zz%'; + } +} {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} +do_test like-5.22 { + queryplan { + SELECT x FROM t2 WHERE x LIKE 'zZ%'; + } +} {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} +do_test like-5.23 { + queryplan { + SELECT x FROM t2 WHERE x LIKE 'Zz%'; + } +} {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} +do_test like-5.24 { + queryplan { + SELECT x FROM t2 WHERE x LIKE 'ZZ%'; + } +} {zz-lower-lower zZ-lower-upper Zz-upper-lower ZZ-upper-upper nosort {} i2} +do_test like-5.25 { + queryplan { + PRAGMA case_sensitive_like=on; + CREATE TABLE t3(x TEXT); + CREATE INDEX i3 ON t3(x); + INSERT INTO t3 VALUES('ZZ-upper-upper'); + INSERT INTO t3 VALUES('zZ-lower-upper'); + INSERT INTO t3 VALUES('Zz-upper-lower'); + INSERT INTO t3 VALUES('zz-lower-lower'); + SELECT x FROM t3 WHERE x LIKE 'zz%'; + } +} {zz-lower-lower nosort {} i3} +do_test like-5.26 { + queryplan { + SELECT x FROM t3 WHERE x LIKE 'zZ%'; + } +} {zZ-lower-upper nosort {} i3} +do_test like-5.27 { + queryplan { + SELECT x FROM t3 WHERE x LIKE 'Zz%'; + } +} {Zz-upper-lower nosort {} i3} +do_test like-5.28 { + queryplan { + SELECT x FROM t3 WHERE x LIKE 'ZZ%'; + } +} {ZZ-upper-upper nosort {} i3} + # ticket #2407 # @@ -389,11 +509,229 @@ # do_test like-6.1 { foreach x { 'abc 'bcd 'def 'ax } { - db eval {INSERT INTO t2 VALUES($x)} + set x2 '[string map {' ''} $x]' + db eval "INSERT INTO t2 VALUES($x2)" } execsql { SELECT * FROM t2 WHERE x LIKE '''a%' } } {'abc 'ax} +do_test like-7.1 { + execsql { + SELECT rowid, * FROM t1 WHERE rowid GLOB '1*' ORDER BY rowid; + } +} {1 a 10 ABC 11 CDE 12 {ABC abc xyz}} + +# ticket #3345. +# +# Overloading the LIKE function with -1 for the number of arguments +# will overload both the 2-argument and the 3-argument LIKE. +# +do_test like-8.1 { + db eval { + CREATE TABLE t8(x); + INSERT INTO t8 VALUES('abcdef'); + INSERT INTO t8 VALUES('ghijkl'); + INSERT INTO t8 VALUES('mnopqr'); + SELECT 1, x FROM t8 WHERE x LIKE '%h%'; + SELECT 2, x FROM t8 WHERE x LIKE '%h%' ESCAPE 'x'; + } +} {1 ghijkl 2 ghijkl} +do_test like-8.2 { + proc newlike {args} {return 1} ;# Alternative LIKE is always return TRUE + db function like newlike ;# Uses -1 for nArg in sqlite3_create_function + db cache flush + db eval { + SELECT 1, x FROM t8 WHERE x LIKE '%h%'; + SELECT 2, x FROM t8 WHERE x LIKE '%h%' ESCAPE 'x'; + } +} {1 ghijkl 2 ghijkl} +do_test like-8.3 { + db function like -argcount 2 newlike + db eval { + SELECT 1, x FROM t8 WHERE x LIKE '%h%'; + SELECT 2, x FROM t8 WHERE x LIKE '%h%' ESCAPE 'x'; + } +} {1 abcdef 1 ghijkl 1 mnopqr 2 ghijkl} +do_test like-8.4 { + db function like -argcount 3 newlike + db eval { + SELECT 1, x FROM t8 WHERE x LIKE '%h%'; + SELECT 2, x FROM t8 WHERE x LIKE '%h%' ESCAPE 'x'; + } +} {1 abcdef 1 ghijkl 1 mnopqr 2 abcdef 2 ghijkl 2 mnopqr} + + +ifcapable like_opt { + # Evaluate SQL. Return the result set followed by the + # and the number of full-scan steps. + # + db close + sqlite3 db test.db + proc count_steps {sql} { + set r [db eval $sql] + lappend r scan [db status step] sort [db status sort] + } + do_test like-9.1 { + count_steps { + SELECT x FROM t2 WHERE x LIKE 'x%' + } + } {xyz scan 0 sort 0} + do_test like-9.2 { + count_steps { + SELECT x FROM t2 WHERE x LIKE '_y%' + } + } {xyz scan 19 sort 0} + do_test like-9.3.1 { + set res [sqlite3_exec_hex db { + SELECT x FROM t2 WHERE x LIKE '%78%25' + }] + } {0 {x xyz}} + ifcapable explain { + do_test like-9.3.2 { + set res [sqlite3_exec_hex db { + EXPLAIN QUERY PLAN SELECT x FROM t2 WHERE x LIKE '%78%25' + }] + regexp {INDEX i2} $res + } {1} + } + do_test like-9.4.1 { + sqlite3_exec_hex db {INSERT INTO t2 VALUES('%ffhello')} + set res [sqlite3_exec_hex db { + SELECT substr(x,2) AS x FROM t2 WHERE +x LIKE '%ff%25' + }] + } {0 {x hello}} + do_test like-9.4.2 { + set res [sqlite3_exec_hex db { + SELECT substr(x,2) AS x FROM t2 WHERE x LIKE '%ff%25' + }] + } {0 {x hello}} + ifcapable explain { + do_test like-9.4.3 { + set res [sqlite3_exec_hex db { + EXPLAIN QUERY PLAN SELECT x FROM t2 WHERE x LIKE '%ff%25' + }] + regexp {INDEX i2} $res + } {0} + } + do_test like-9.5.1 { + set res [sqlite3_exec_hex db { + SELECT x FROM t2 WHERE x LIKE '%fe%25' + }] + } {0 {}} + ifcapable explain { + do_test like-9.5.2 { + set res [sqlite3_exec_hex db { + EXPLAIN QUERY PLAN SELECT x FROM t2 WHERE x LIKE '%fe%25' + }] + regexp {INDEX i2} $res + } {1} + } +} + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + set ::sqlite_like_count 0 + return [concat [execsql $sql] scan $::sqlite_search_count \ + like $::sqlite_like_count] +} + +# The LIKE and GLOB optimizations do not work on columns with +# affinity other than TEXT. +# Ticket #3901 +# +do_test like-10.1 { + db close + sqlite3 db test.db + execsql { + CREATE TABLE t10( + a INTEGER PRIMARY KEY, + b INTEGER COLLATE nocase UNIQUE, + c NUMBER COLLATE nocase UNIQUE, + d BLOB COLLATE nocase UNIQUE, + e COLLATE nocase UNIQUE, + f TEXT COLLATE nocase UNIQUE + ); + INSERT INTO t10 VALUES(1,1,1,1,1,1); + INSERT INTO t10 VALUES(12,12,12,12,12,12); + INSERT INTO t10 VALUES(123,123,123,123,123,123); + INSERT INTO t10 VALUES(234,234,234,234,234,234); + INSERT INTO t10 VALUES(345,345,345,345,345,345); + INSERT INTO t10 VALUES(45,45,45,45,45,45); + } + count { + SELECT a FROM t10 WHERE b LIKE '12%' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.2 { + count { + SELECT a FROM t10 WHERE c LIKE '12%' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.3 { + count { + SELECT a FROM t10 WHERE d LIKE '12%' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.4 { + count { + SELECT a FROM t10 WHERE e LIKE '12%' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.5 { + count { + SELECT a FROM t10 WHERE f LIKE '12%' ORDER BY a; + } +} {12 123 scan 3 like 0} +do_test like-10.6 { + count { + SELECT a FROM t10 WHERE a LIKE '12%' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.10 { + execsql { + CREATE TABLE t10b( + a INTEGER PRIMARY KEY, + b INTEGER UNIQUE, + c NUMBER UNIQUE, + d BLOB UNIQUE, + e UNIQUE, + f TEXT UNIQUE + ); + INSERT INTO t10b SELECT * FROM t10; + } + count { + SELECT a FROM t10b WHERE b GLOB '12*' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.11 { + count { + SELECT a FROM t10b WHERE c GLOB '12*' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.12 { + count { + SELECT a FROM t10b WHERE d GLOB '12*' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.13 { + count { + SELECT a FROM t10b WHERE e GLOB '12*' ORDER BY a; + } +} {12 123 scan 5 like 6} +do_test like-10.14 { + count { + SELECT a FROM t10b WHERE f GLOB '12*' ORDER BY a; + } +} {12 123 scan 3 like 0} +do_test like-10.15 { + count { + SELECT a FROM t10b WHERE a GLOB '12*' ORDER BY a; + } +} {12 123 scan 5 like 6} + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/limit.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/limit.test --- sqlite3-3.4.2/test/limit.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/limit.test 2009-06-05 18:03:32.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the LIMIT ... OFFSET ... clause # of SELECT statements. # -# $Id: limit.test,v 1.30 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: limit.test,v 1.32 2008/08/02 03:50:39 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -24,7 +24,7 @@ BEGIN; } for {set i 1} {$i<=32} {incr i} { - for {set j 0} {pow(2,$j)<$i} {incr j} {} + for {set j 0} {(1<<$j)<$i} {incr j} {} execsql "INSERT INTO t1 VALUES([expr {32-$i}],[expr {10-$j}])" } execsql { @@ -445,4 +445,28 @@ } {} } ;# ifcapable subquery +# Test error processing. +# +do_test limit-12.1 { + catchsql { + SELECT * FROM t1 LIMIT replace(1) + } +} {1 {wrong number of arguments to function replace()}} +do_test limit-12.2 { + catchsql { + SELECT * FROM t1 LIMIT 5 OFFSET replace(1) + } +} {1 {wrong number of arguments to function replace()}} +do_test limit-12.3 { + catchsql { + SELECT * FROM t1 LIMIT x + } +} {1 {no such column: x}} +do_test limit-12.4 { + catchsql { + SELECT * FROM t1 LIMIT 1 OFFSET x + } +} {1 {no such column: x}} + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/loadext2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/loadext2.test --- sqlite3-3.4.2/test/loadext2.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/loadext2.test 2009-06-05 18:03:32.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this script is automatic extension loading and the # sqlite3_auto_extension() API. # -# $Id: loadext2.test,v 1.2 2007/04/06 21:42:22 drh Exp $ +# $Id: loadext2.test,v 1.3 2008/03/19 16:08:54 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -140,4 +140,5 @@ sqlite3_reset_auto_extension +autoinstall_test_functions finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/loadext.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/loadext.test --- sqlite3-3.4.2/test/loadext.test 2007-06-12 13:18:02.000000000 +0100 +++ sqlite3-3.6.16/test/loadext.test 2009-06-12 03:37:54.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is extension loading. # -# $Id: loadext.test,v 1.9 2007/04/06 21:42:22 drh Exp $ +# $Id: loadext.test,v 1.17 2009/03/20 09:09:37 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -23,11 +23,48 @@ # The name of the test extension varies by operating system. # -if {$::tcl_platform(platform) eq "windows"} { +if {$::tcl_platform(platform) eq "windows" || $::tcl_platform(platform) eq "os2"} { set testextension ./testloadext.dll } else { set testextension ./libtestloadext.so } +set gcc_shared "-shared -fPIC" +if {$::tcl_platform(os) eq "Darwin"} { + set gcc_shared -dynamiclib +} + +# The error messages tested by this file are operating system dependent +# (because they are returned by sqlite3OsDlError()). For now, they only +# work with UNIX (and probably only certain kinds of UNIX). +# +# When a shared-object cannot be opened because it does not exist, the +# format of the message returned is: +# +# [format $dlerror_nosuchfile ] +# +# When a shared-object cannot be opened because it consists of the 4 +# characters "blah" only, we expect the error message to be: +# +# [format $dlerror_notadll ] +# +# When a symbol cannot be found within an open shared-object, the error +# message should be: +# +# [format $dlerror_nosymbol ] +# +# The exact error messages are not important. The important bit is +# that SQLite is correctly copying the message from xDlError(). +# +set dlerror_nosuchfile \ + {%s: cannot open shared object file: No such file or directory} +set dlerror_notadll {%s: file too short} +set dlerror_nosymbol {%s: undefined symbol: %s} + +if {$::tcl_platform(os) eq "Darwin"} { + set dlerror_nosuchfile {dlopen(%s, 10): image not found} + set dlerror_notadll {dlopen(%1$s, 10): no suitable image found.*} + set dlerror_nosymbol {dlsym(XXX, %2$s): symbol not found} +} # Make sure the test extension actually exists. If it does not # exist, try to create it. If unable to create it, then skip this @@ -36,9 +73,11 @@ if {![file exists $testextension]} { set srcdir [file dir $testdir]/src set testextsrc $srcdir/test_loadext.c - if {[catch { - exec gcc -Wall -I$srcdir -I. -g -shared $testextsrc -o $testextension - } msg]} { + + set cmdline [concat exec gcc $gcc_shared] + lappend cmdline -Wall -I$srcdir -I. -g $testextsrc -o $testextension + + if {[catch $cmdline msg]} { puts "Skipping loadext tests: Test extension not built..." puts $msg finish_test @@ -95,13 +134,12 @@ # Try to load an extension for which the file does not exist. # do_test loadext-2.1 { + file delete -force ${testextension}xx set rc [catch { sqlite3_load_extension db "${testextension}xx" } msg] list $rc $msg -} [list 1 [subst -nocommands \ - {unable to open shared library [${testextension}xx]} -]] +} [list 1 [format $dlerror_nosuchfile ${testextension}xx]] # Try to load an extension for which the file is not a shared object # @@ -112,10 +150,9 @@ set rc [catch { sqlite3_load_extension db "${testextension}xx" } msg] - list $rc $msg -} [list 1 [subst -nocommands \ - {unable to open shared library [${testextension}xx]} -]] + set expected_error_pattern [format $dlerror_notadll ${testextension}xx] + list $rc [string match $expected_error_pattern $msg] +} [list 1 1] # Try to load an extension for which the file is present but the # entry point is not. @@ -124,10 +161,11 @@ set rc [catch { sqlite3_load_extension db $testextension icecream } msg] + if {$::tcl_platform(os) eq "Darwin"} { + regsub {0x[1234567890abcdefABCDEF]*} $msg XXX msg + } list $rc $msg -} [list 1 [subst -nocommands \ - {no entry point [icecream] in shared library [$testextension]} -]] +} [list 1 [format $dlerror_nosymbol $testextension icecream]] # Try to load an extension for which the entry point fails (returns non-zero) # @@ -151,11 +189,14 @@ } } {1 {no such function: half}} do_test loadext-3.2 { - catchsql { + set res [catchsql { SELECT load_extension($::testextension) + }] + if {$::tcl_platform(os) eq "Darwin"} { + regsub {0x[1234567890abcdefABCDEF]*} $res XXX res } -} [list 1 "no entry point \[sqlite3_extension_init\]\ - in shared library \[$testextension\]"] + set res +} [list 1 [format $dlerror_nosymbol $testextension sqlite3_extension_init]] do_test loadext-3.3 { catchsql { SELECT load_extension($::testextension,'testloadext_init') @@ -166,6 +207,28 @@ SELECT half(5); } } {0 2.5} +do_test loadext-3.5 { + db eval { + SELECT sqlite3_status('MEMORY_USED') AS mused + } break + puts -nonewline " (memory_used=$mused) " + expr {$mused>0} +} {1} +do_test loadext-3.6 { + catchsql { + SELECT sqlite3_status('MEMORY_USED_X') AS mused + } +} {1 {unknown status property: MEMORY_USED_X}} +do_test loadext-3.7 { + catchsql { + SELECT sqlite3_status(4.53) AS mused + } +} {1 {unknown status type}} +do_test loadext-3.8 { + catchsql { + SELECT sqlite3_status(23) AS mused + } +} {1 {sqlite3_status(23,...) returns 21}} # Ticket #1863 # Make sure the extension loading mechanism will not work unless it @@ -192,6 +255,20 @@ } } {1 {not authorized}} +source $testdir/malloc_common.tcl + +# Malloc failure in sqlite3_auto_extension and sqlite3_load_extension +# +do_malloc_test loadext-5 -tclprep { + sqlite3_reset_auto_extension +} -tclbody { + if {[autoinstall_test_functions]==7} {error "out of memory"} +} +do_malloc_test loadext-6 -tclbody { + db enable_load_extension 1 + sqlite3_load_extension db $::testextension testloadext_init +} +autoinstall_test_functions finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock2.test --- sqlite3-3.4.2/test/lock2.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/lock2.test 2009-06-25 12:24:39.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is database locks between competing processes. # -# $Id: lock2.test,v 1.8 2007/08/12 20:07:59 drh Exp $ +# $Id: lock2.test,v 1.11 2009/05/01 10:55:34 danielk1977 Exp $ set testdir [file dirname $argv0] @@ -42,6 +42,9 @@ if { $line == "OVER" } { return $r } + if {[eof $chan]} { + return "ERROR: Child process hung up" + } append r $line } } @@ -49,7 +52,7 @@ # Write the main loop for the child testfixture processes into file # tf_main.tcl. The parent (this script) interacts with the child processes # via a two way pipe. The parent writes a script to the stdin of the child -# process, followed by the word "OVER" on a line of it's own. The child +# process, followed by the word "OVER" on a line of its own. The child # process evaluates the script and writes the results to stdout, followed # by an "OVER" of its own. set f [open tf_main.tcl w] @@ -92,7 +95,7 @@ # do_test lock2-1.1 { set ::tf1 [launch_testfixture] - testfixture $::tf1 "set sqlite_pending_byte $::sqlite_pending_byte" + testfixture $::tf1 "sqlite3_test_control_pending_byte $::sqlite_pending_byte" testfixture $::tf1 { sqlite3 db test.db -key xyzzy db eval {select * from sqlite_master} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock3.test --- sqlite3-3.4.2/test/lock3.test 2007-03-27 15:43:05.000000000 +0100 +++ sqlite3-3.6.16/test/lock3.test 2009-06-25 12:22:34.000000000 +0100 @@ -13,7 +13,7 @@ # DEFERRED, IMMEDIATE, and EXCLUSIVE keywords as modifiers to the # BEGIN command. # -# $Id: lock3.test,v 1.1 2004/10/05 02:41:43 drh Exp $ +# $Id: lock3.test,v 1.4 2009/03/28 15:04:24 drh Exp $ set testdir [file dirname $argv0] @@ -23,7 +23,8 @@ # sample data into the database. # do_test lock3-1.1 { - sqlite3 db2 test.db + file mkdir tempdir/t1/t2/t3 + sqlite3 db2 ./tempdir/t1//t2/./t3//./../..//./../../tempdir/..//test.db// execsql { CREATE TABLE t1(a); INSERT INTO t1 VALUES(1); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock4.test --- sqlite3-3.4.2/test/lock4.test 2007-04-06 22:42:22.000000000 +0100 +++ sqlite3-3.6.16/test/lock4.test 2009-06-25 12:24:39.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is database locks. # -# $Id: lock4.test,v 1.4 2007/04/06 21:42:22 drh Exp $ +# $Id: lock4.test,v 1.10 2009/05/06 00:52:41 drh Exp $ set testdir [file dirname $argv0] @@ -49,8 +49,11 @@ # to continue. # do_test lock4-1.2 { + + # Create a script for the second process to run. + # set out [open test2-script.tcl w] - puts $out "set sqlite_pending_byte [set sqlite_pending_byte]" + puts $out "sqlite3_test_control_pending_byte [set sqlite_pending_byte]" puts $out { sqlite3 db2 test2.db db2 eval { @@ -62,18 +65,27 @@ db eval { INSERT INTO t1 VALUES(2); } + db close db2 eval COMMIT exit } close $out + + # Begin a transaction on test.db. db eval { - BEGIN; + BEGIN EXCLUSIVE; INSERT INTO t1 VALUES(1); } + + # Kick off the second process. exec [info nameofexec] ./test2-script.tcl & + + # Wait until the second process has started its transaction on test2.db. while {![file exists test2.db-journal]} { after 10 } + + # Try to write to test2.db. We are locked out. sqlite3 db2 test2.db catchsql { INSERT INTO t2 VALUES(1) @@ -86,6 +98,11 @@ while {[file exists test2.db-journal]} { after 10 } + # The other process has committed its transaction on test2.db by + # deleting the journal file. But it might retain the lock for a + # fraction longer + # + after 25 db2 eval { SELECT * FROM t2 } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock5.test --- sqlite3-3.4.2/test/lock5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/lock5.test 2009-06-12 03:37:54.000000000 +0100 @@ -0,0 +1,195 @@ +# 2008 June 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks. +# +# $Id: lock5.test,v 1.6 2008/12/04 12:34:16 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# This file is only run if using the unix backend compiled with the +# SQLITE_ENABLE_LOCKING_STYLE macro. +db close +if {[catch {sqlite3 db test.db -vfs unix-none} msg]} { + finish_test + return +} +db close +file delete -force test.db.lock + +ifcapable lock_proxy_pragmas { + set ::using_proxy 0 + foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set ::using_proxy $value + } + # Disable the proxy locking for these tests + set env(SQLITE_FORCE_PROXY_LOCKING) "0" +} + + +do_test lock5-dotfile.1 { + sqlite3 db test.db -vfs unix-dotfile + execsql { + BEGIN; + CREATE TABLE t1(a, b); + } +} {} + +do_test lock5-dotfile.2 { + file exists test.db.lock +} {1} + +do_test lock5-dotfile.3 { + execsql COMMIT + file exists test.db.lock +} {0} + +do_test lock5-dotfile.4 { + sqlite3 db2 test.db -vfs unix-dotfile + execsql { + INSERT INTO t1 VALUES('a', 'b'); + SELECT * FROM t1; + } db2 +} {a b} + +do_test lock5-dotfile.5 { + execsql { + BEGIN; + SELECT * FROM t1; + } db2 +} {a b} + +do_test lock5-dotfile.6 { + file exists test.db.lock +} {1} + +do_test lock5-dotfile.7 { + catchsql { SELECT * FROM t1; } +} {1 {database is locked}} + +do_test lock5-dotfile.8 { + execsql { + SELECT * FROM t1; + ROLLBACK; + } db2 +} {a b} + +do_test lock5-dotfile.9 { + catchsql { SELECT * FROM t1; } +} {0 {a b}} + +do_test lock5-dotfile.10 { + file exists test.db.lock +} {0} + +do_test lock5-dotfile.X { + db2 close + execsql {BEGIN EXCLUSIVE} + db close + file exists test.db.lock +} {0} + +##################################################################### + +file delete -force test.db +if {[catch {sqlite3 db test.db -vfs unix-flock} msg]} { + finish_test + return +} + +do_test lock5-flock.1 { + sqlite3 db test.db -vfs unix-flock + execsql { + CREATE TABLE t1(a, b); + BEGIN; + INSERT INTO t1 VALUES(1, 2); + } +} {} + +# Make sure we are not accidentally using the dotfile locking scheme. +do_test lock5-flock.2 { + file exists test.db.lock +} {0} + +do_test lock5-flock.3 { + sqlite3 db2 test.db -vfs unix-flock + catchsql { SELECT * FROM t1 } db2 +} {1 {database is locked}} + +do_test lock5-flock.4 { + execsql COMMIT + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2}} + +do_test lock5-flock.5 { + execsql BEGIN + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2}} + +do_test lock5-flock.6 { + execsql {SELECT * FROM t1} + catchsql { SELECT * FROM t1 } db2 +} {1 {database is locked}} + +do_test lock5-flock.7 { + db close + catchsql { SELECT * FROM t1 } db2 +} {0 {1 2}} + +do_test lock5-flock.8 { + db2 close +} {} + +##################################################################### + +do_test lock5-none.1 { + sqlite3 db test.db -vfs unix-none + sqlite3 db2 test.db -vfs unix-none + execsql { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + } +} {} +do_test lock5-none.2 { + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test lock5-flock.3 { + execsql { SELECT * FROM t1 } db2 +} {1 2} +do_test lock5-none.4 { + execsql { + BEGIN; + SELECT * FROM t1; + } db2 +} {1 2} +do_test lock5-none.5 { + execsql COMMIT + execsql {SELECT * FROM t1} db2 +} {1 2} + +ifcapable memorymanage { + do_test lock5-none.6 { + sqlite3_release_memory 1000000 + execsql {SELECT * FROM t1} db2 + } {1 2 3 4} +} + +do_test lock5-flock.X { + db close + db2 close +} {} + +ifcapable lock_proxy_pragmas { + set env(SQLITE_FORCE_PROXY_LOCKING) $::using_proxy +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock6.test --- sqlite3-3.4.2/test/lock6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/lock6.test 2009-02-05 16:31:46.000000000 +0000 @@ -0,0 +1,168 @@ +# 2008 October 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script is database locks. +# +# $Id: lock6.test,v 1.3 2009/02/05 16:31:46 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Launch another testfixture process to be controlled by this one. A +# channel name is returned that may be passed as the first argument to proc +# 'testfixture' to execute a command. The child testfixture process is shut +# down by closing the channel. +proc launch_testfixture {} { + set prg [info nameofexec] + if {$prg eq ""} { + set prg [file join . testfixture] + } + set chan [open "|$prg tf_main2.tcl" r+] + fconfigure $chan -buffering line + return $chan +} + +# Execute a command in a child testfixture process, connected by two-way +# channel $chan. Return the result of the command, or an error message. +proc testfixture {chan cmd} { + puts $chan $cmd + puts $chan OVER + set r "" + while { 1 } { + set line [gets $chan] + if { $line == "OVER" } { + return $r + } + append r $line + } +} + +# Write the main loop for the child testfixture processes into file +# tf_main2.tcl. The parent (this script) interacts with the child processes +# via a two way pipe. The parent writes a script to the stdin of the child +# process, followed by the word "OVER" on a line of its own. The child +# process evaluates the script and writes the results to stdout, followed +# by an "OVER" of its own. +set f [open tf_main2.tcl w] +puts $f { + set l [open log w] + set script "" + while {![eof stdin]} { + flush stdout + set line [gets stdin] + puts $l "READ $line" + if { $line == "OVER" } { + catch {eval $script} result + puts $result + puts $l "WRITE $result" + puts OVER + puts $l "WRITE OVER" + flush stdout + set script "" + } else { + append script $line + append script " ; " + } + } + close $l +} +close $f + + +ifcapable lock_proxy_pragmas&&prefer_proxy_locking { + set sqlite_hostid_num 1 + + set using_proxy 0 + foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set using_proxy $value + } + + # Test the lock_proxy_file pragmas. + # + set env(SQLITE_FORCE_PROXY_LOCKING) "1" + + do_test lock6-1.1 { + set ::tf1 [launch_testfixture] + testfixture $::tf1 "sqlite3_test_control_pending_byte $::sqlite_pending_byte" + testfixture $::tf1 { + set sqlite_hostid_num 2 + sqlite3 db test.db -key xyzzy + set lockpath [db eval { + PRAGMA lock_proxy_file=":auto:"; + select * from sqlite_master; + PRAGMA lock_proxy_file; + }] + string match "*test.db:auto:" $lockpath + } + } {1} + + set sqlite_hostid_num 3 + do_test lock6-1.2 { + execsql {pragma lock_status} + } {main unlocked temp closed} + + sqlite3_soft_heap_limit 0 + do_test lock6-1.3 { + sqlite3 db test.db + catchsql { + select * from sqlite_master; + } + } {1 {database is locked}} + + do_test lock6-1.4 { + set lockpath [execsql { + PRAGMA lock_proxy_file=":auto:"; + PRAGMA lock_proxy_file; + } db] + set lockpath + } {{:auto: (not held)}} + + do_test lock6-1.4.1 { + catchsql { + PRAGMA lock_proxy_file="notmine"; + select * from sqlite_master; + } db + } {1 {database is locked}} + + do_test lock6-1.4.2 { + execsql { + PRAGMA lock_proxy_file; + } db + } {notmine} + + do_test lock6-1.5 { + testfixture $::tf1 { + db eval { + BEGIN; + SELECT * FROM sqlite_master; + } + } + } {} + + catch {testfixture $::tf1 {db close}} + + do_test lock6-1.6 { + execsql { + PRAGMA lock_proxy_file="mine"; + select * from sqlite_master; + } db + } {} + + catch {close $::tf1} + set env(SQLITE_FORCE_PROXY_LOCKING) $using_proxy + set sqlite_hostid_num 0 + + sqlite3_soft_heap_limit $soft_limit + +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lock.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lock.test --- sqlite3-3.4.2/test/lock.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/lock.test 2009-06-25 12:44:46.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is database locks. # -# $Id: lock.test,v 1.33 2006/08/16 16:42:48 drh Exp $ +# $Id: lock.test,v 1.40 2009/06/16 17:49:36 drh Exp $ set testdir [file dirname $argv0] @@ -20,7 +20,10 @@ # Create an alternative connection to the database # do_test lock-1.0 { - sqlite3 db2 ./test.db + # Give a complex pathname to stress the path simplification logic in + # the vxworks driver and in test_async. + file mkdir tempdir/t1/t2 + sqlite3 db2 ./tempdir/../tempdir/t1/.//t2/../../..//test.db set dummy {} } {} do_test lock-1.1 { @@ -77,7 +80,7 @@ } {8 9} do_test lock-1.14.1 { catchsql {SELECT * FROM t2} db2 -} {1 {no such table: t2}} +} {0 {8 9}} do_test lock-1.14.2 { catchsql {SELECT * FROM t1} db2 } {0 {2 1}} @@ -347,6 +350,75 @@ } {9} } +do_test lock-6.1 { + execsql { + CREATE TABLE t4(a PRIMARY KEY, b); + INSERT INTO t4 VALUES(1, 'one'); + INSERT INTO t4 VALUES(2, 'two'); + INSERT INTO t4 VALUES(3, 'three'); + } + + set STMT [sqlite3_prepare $DB "SELECT * FROM sqlite_master" -1 TAIL] + sqlite3_step $STMT + + execsql { DELETE FROM t4 } + execsql { SELECT * FROM sqlite_master } db2 + execsql { SELECT * FROM t4 } db2 +} {} + +do_test lock-6.2 { + execsql { + BEGIN; + INSERT INTO t4 VALUES(1, 'one'); + INSERT INTO t4 VALUES(2, 'two'); + INSERT INTO t4 VALUES(3, 'three'); + COMMIT; + } + + execsql { SELECT * FROM t4 } db2 +} {1 one 2 two 3 three} + +do_test lock-6.3 { + execsql { SELECT a FROM t4 ORDER BY a } db2 +} {1 2 3} + +do_test lock-6.4 { + execsql { PRAGMA integrity_check } db2 +} {ok} + +do_test lock-6.5 { + sqlite3_finalize $STMT +} {SQLITE_OK} + +# At one point the following set of conditions would cause SQLite to +# retain a RESERVED or EXCLUSIVE lock after the transaction was committed: +# +# * The journal-mode is set to something other than 'delete', and +# * there exists one or more active read-only statements, and +# * a transaction that modified zero database pages is committed. +# +set temp_status unlocked +if {$TEMP_STORE==3} {set temp_status unknown} +do_test lock-7.1 { + set STMT [sqlite3_prepare $DB "SELECT * FROM sqlite_master" -1 TAIL] + sqlite3_step $STMT +} {SQLITE_ROW} +do_test lock-7.2 { + execsql { PRAGMA lock_status } +} [list main shared temp $temp_status] +do_test lock-7.3 { + execsql { + PRAGMA journal_mode = truncate; + BEGIN; + UPDATE t4 SET a = 10 WHERE 0; + COMMIT; + } + execsql { PRAGMA lock_status } +} [list main shared temp $temp_status] +do_test lock-7.4 { + sqlite3_finalize $STMT +} {SQLITE_OK} + do_test lock-999.1 { rename db2 {} } {} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/lookaside.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/lookaside.test --- sqlite3-3.4.2/test/lookaside.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/lookaside.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,110 @@ +# 2008 August 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for the lookaside memory allocator. +# +# $Id: lookaside.test,v 1.10 2009/04/09 01:23:49 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !lookaside { + finish_test + return +} + +catch {db close} +sqlite3_shutdown +sqlite3_config_pagecache 0 0 +sqlite3_config_scratch 0 0 +sqlite3_initialize +autoinstall_test_functions +sqlite3 db test.db + +# Make sure sqlite3_db_config() and sqlite3_db_status are working. +# +do_test lookaside-1.1 { + catch {sqlite3_config_error db} +} {0} +do_test lookaside-1.2 { + sqlite3_db_config_lookaside db 1 18 18 +} {0} +do_test lookaside-1.3 { + sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0 +} {0 0 0} +do_test lookaside-1.4 { + db eval {CREATE TABLE t1(w,x,y,z);} + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0] break + expr {$x==0 && $y<$z && $z==18} +} {0} +do_test lookaside-1.5 { + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 1] break + expr {$x==0 && $y<$z && $z==18} +} {0} +do_test lookaside-1.6 { + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0] break + expr {$x==0 && $y==$z && $y<18} +} {1} +do_test lookaside-1.7 { + db cache flush + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0] break + expr {$x==0 && $y==0 && $z<18} +} {1} +do_test lookaside-1.8 { + db cache flush + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 1] break + expr {$x==0 && $y==0 && $z<18} +} {1} +do_test lookaside-1.9 { + db cache flush + sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0 +} {0 0 0} + +do_test lookaside-2.1 { + sqlite3_db_config_lookaside db 0 100 1000 +} {0} +do_test lookaside-2.2 { + db eval {CREATE TABLE t2(x);} + foreach {x y z} [sqlite3_db_status db SQLITE_DBSTATUS_LOOKASIDE_USED 0] break + expr {$x==0 && $y<$z && $z>10 && $z<100} +} {1} +do_test lookaside-2.3 { + sqlite3_db_config_lookaside db 0 50 50 +} {5} ;# SQLITE_BUSY +do_test lookaside-2.4 { + db cache flush + sqlite3_db_config_lookaside db 0 50 50 +} {0} ;# SQLITE_OK +do_test lookaside-2.5 { + sqlite3_db_config_lookaside db 0 -1 50 +} {0} ;# SQLITE_OK +do_test lookaside-2.6 { + sqlite3_db_config_lookaside db 0 50 -1 +} {0} ;# SQLITE_OK + +# sqlite3_db_status() with an invalid verb returns an error. +# +do_test lookaside-3.1 { + sqlite3_db_status db 99999 0 +} {1 0 0} + +# Test that an invalid verb on sqlite3_config() is detected and +# reported as an error. +# +do_test lookaside-4.1 { + db close + sqlite3_shutdown + catch sqlite3_config_error +} {0} +sqlite3_initialize +autoinstall_test_functions + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/main.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/main.test --- sqlite3-3.4.2/test/main.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/main.test 2009-06-25 12:24:39.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is exercising the code in main.c. # -# $Id: main.test,v 1.25 2006/02/09 22:24:41 drh Exp $ +# $Id: main.test,v 1.32 2009/04/28 04:51:29 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -85,6 +85,16 @@ CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; } } {0} + do_test main-1.17.2 { + db complete { + EXPLAIN CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; + } + } {0} + do_test main-1.17.3 { + db complete { + EXPLAIN QUERY PLAN CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; + } + } {0} do_test main-1.18 { db complete { CREATE TRIGGER xyz AFTER DELETE abc BEGIN UPDATE pqr; END; @@ -133,26 +143,26 @@ do_test main-1.25 { db complete { CREATE TRIGGER xyz AFTER DELETE backend BEGIN - UPDATE pqr SET a=[;end;];;; + UPDATE cantor SET a=[;end;];;; } } {0} do_test main-1.26 { db complete { CREATE -- a comment - TRIGGER xyz AFTER DELETE backend BEGIN + TRIGGER exy AFTER DELETE backend BEGIN UPDATE pqr SET a=5; } } {0} do_test main-1.27.1 { db complete { CREATE -- a comment - TRIGGERX xyz AFTER DELETE backend BEGIN + TRIGGERX tangentxx AFTER DELETE backend BEGIN UPDATE pqr SET a=5; } } {1} do_test main-1.27.2 { db complete { - CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN + CREATE/**/TRIGGER tiger00 AFTER DELETE backend BEGIN UPDATE pqr SET a=5; } } {0} @@ -160,7 +170,7 @@ do_test main-1.27.3 { db complete { /* */ EXPLAIN -- A comment - CREATE/**/TRIGGER xyz AFTER DELETE backend BEGIN + CREATE/**/TRIGGER ezxyz12 AFTER DELETE backend BEGIN UPDATE pqr SET a=5; } } {0} @@ -194,6 +204,9 @@ EXPLAIN select * from xyz; } } {0} + +} ;# end ifcapable {complete} + } do_test main-1.30 { db complete { @@ -242,8 +255,26 @@ do_test main-1.36 { db complete {hi there/***/;} } {1} +do_test main-1.37 { + db complete {hi there/**} +} {0} +do_test main-1.38 { + db complete {hi [there} +} {0} -} ;# end ifcapable {complete} +ifcapable {trigger} { + # Characters less than \040 can never be part of an identifier. + # Characters greater than \u177 are always identifier characters. + do_test main-1.100 { + db complete "create \037\036\035\034trigger\001\002;" + } {1} + do_test main-1.101 { + db complete "create trigger\200;" + } {1} + do_test main-1.102 { + db complete "create \200trigger;" + } {1} +} # Try to open a database with a corrupt database file. @@ -282,6 +313,111 @@ do_test main-3.2.3 { catchsql {select "abc} } {1 {unrecognized token: ""abc"}} +do_test main-3.2.4 { + catchsql {select [abc} +} {1 {unrecognized token: "[abc"}} +do_test main-3.2.5 { + catchsql {select x'4869} +} {1 {unrecognized token: "x'4869"}} +do_test main-3.2.6 { + catchsql {select x'4869'} +} {0 Hi} +do_test main-3.2.7 { + catchsql {select x'48695'} +} {1 {unrecognized token: "x'48695'"}} +do_test main-3.2.8 { + catchsql {select x'486x'} +} {1 {unrecognized token: "x'486x'"}} +do_test main-3.2.9 { + catchsql {select $abc(} +} {1 {unrecognized token: "$abc("}} +do_test main-3.2.10 { + catchsql {select $abc(x} +} {1 {unrecognized token: "$abc(x"}} +set xyz 123 +do_test main-3.2.11 { + catchsql {select $::xyz} +} {0 123} +namespace eval ::testnamespace { + variable xyz 321 +} +do_test main-3.2.12 { + catchsql {select $testnamespace::xyz} +} {0 321} +do_test main-3.2.13 { + catchsql {select $(abc)} +} {1 {unrecognized token: "$"}} +do_test main-3.2.14 { + set hi\u1234x 987 + db eval "select \$hi\u1234x" +} {987} +do_test main-3.2.15 { + catchsql "select 456\u1234" +} [list 1 "unrecognized token: \"456\u1234\""] +do_test main-3.2.16 { + catchsql {select cast(3.14e+4 AS integer)} +} {0 31400} +do_test main-3.2.17 { + catchsql {select cast(3.14e+04 AS integer)} +} {0 31400} +do_test main-3.2.18 { + catchsql {select cast(3.14e+004 AS integer)} +} {0 31400} +do_test main-3.2.19 { + catchsql {select cast(3.14e4 AS integer)} +} {0 31400} +do_test main-3.2.20 { + catchsql {select cast(3.14e04 AS integer)} +} {0 31400} +do_test main-3.2.21 { + catchsql {select cast(3.14e004 AS integer)} +} {0 31400} +do_test main-3.2.16 { + catchsql {select cast(3.14E+4 AS integer)} +} {0 31400} +do_test main-3.2.17 { + catchsql {select cast(3.14E+04 AS integer)} +} {0 31400} +do_test main-3.2.18 { + catchsql {select cast(3.14E+004 AS integer)} +} {0 31400} +do_test main-3.2.19 { + catchsql {select cast(3.14E4 AS integer)} +} {0 31400} +do_test main-3.2.20 { + catchsql {select cast(3.14E04 AS integer)} +} {0 31400} +do_test main-3.2.21 { + catchsql {select cast(3.14E004 AS integer)} +} {0 31400} +do_test main-3.2.22 { + catchsql {select cast(3.14e-4 * 1e8 AS integer)} +} {0 31400} +do_test main-3.2.23 { + catchsql {select cast(3.14E-04 * 1E08 AS integer)} +} {0 31400} +do_test main-3.2.24 { + catchsql {select cast(3.14e-004 * 01.0E+8 AS integer)} +} {0 31400} +do_test main-3.2.25 { + catchsql {select 123/*abc} +} {0 123} +do_test main-3.2.26 { + catchsql {select 123/***abc} +} {0 123} +do_test main-3.2.27 { + catchsql {select 123/*/*2} +} {0 123} +do_test main-3.2.28 { + catchsql {select 123/**/*2} +} {0 246} +do_test main-3.2.29 { + catchsql {select 123/} +} {1 {near "/": syntax error}} +do_test main-3.2.30 { + catchsql {select 123--5} +} {0 123} + do_test main-3.3 { catch {db close} @@ -315,5 +451,47 @@ catchsql {SELECT 'abc' + #9} } {1 {near "#9": syntax error}} - +# The following test-case tests the linked list code used to manage +# sqlite3_vfs structures. +if {$::tcl_platform(platform)=="unix" + && [info command sqlite3async_initialize]!=""} { + ifcapable threadsafe { + do_test main-4.1 { + sqlite3_crash_enable 1 + sqlite3_crash_enable 0 + + sqlite3async_initialize "" 1 + sqlite3async_shutdown + + sqlite3_crash_enable 1 + sqlite3async_initialize "" 1 + sqlite3_crash_enable 0 + sqlite3async_shutdown + + sqlite3_crash_enable 1 + sqlite3async_initialize "" 1 + sqlite3async_shutdown + sqlite3_crash_enable 0 + + sqlite3async_initialize "" 1 + sqlite3_crash_enable 1 + sqlite3_crash_enable 0 + sqlite3async_shutdown + + sqlite3async_initialize "" 1 + sqlite3_crash_enable 1 + sqlite3async_shutdown + sqlite3_crash_enable 0 + } {} + do_test main-4.2 { + set rc [catch {sqlite3 db test.db -vfs crash} msg] + list $rc $msg + } {1 {no such vfs: crash}} + do_test main-4.3 { + set rc [catch {sqlite3 db test.db -vfs async} msg] + list $rc $msg + } {1 {no such vfs: async}} + } +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/make-where7.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/make-where7.tcl --- sqlite3-3.4.2/test/make-where7.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/make-where7.tcl 2009-06-25 12:24:39.000000000 +0100 @@ -0,0 +1,120 @@ +#!/usr/bin/tclsh +# +# Run this script to generate randomized test cases for the where7.test +# script. The output will need to be manually copied and pasted into +# the where7.test script. +# +puts "do_test where7-2.1 \173" +puts " db eval \173" +puts " CREATE TABLE t2(a INTEGER PRIMARY KEY,b,c,d,e,f,g);" +set NA 100 +for {set a 1} {$a<=$NA} {incr a} { + set b [expr {$a*11}] + set div3 [expr {int(($a+2)/3)}] + set c [expr {$div3*1001}] + set d [expr {$a*1.001}] + set e [expr {$div3*100.1}] + set x [expr {$a%26}] + set f [string range {abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz} \ + $x [expr {$x+8}]] + set div5 [expr {int(($a+7)/5)}] + set x [expr {$div5%26}] + set g [string range {zyxwvutsrqponmlkjihgfedcbazyxwvutsrqponmlkjihgfedcba} \ + $x [expr {$x+6}]] + puts " INSERT INTO t2 VALUES($a,$b,$c,$d,$e,'$f','$g');" + lappend fidx($f) $a + lappend gidx($g) $a + set gof($a) $g + set fof($a) $f + + set expr "a=$a" + set term($expr) $a + set expr "((a BETWEEN [expr {$a-1}] AND [expr {$a+1}]) AND a!=$a)" + set x {} + if {$a>1} {set x [expr {$a-1}]} + if {$a<$NA} {lappend x [expr {$a+1}]} + set term($expr) $x + set expr "b=$b" + set term($expr) $a + set expr "b=[expr {$a*11+3}]" + set term($expr) {} + set expr "c=$c" + lappend term($expr) $a + set expr "(d>=$a.0 AND d<[expr {$a+1.0}] AND d NOT NULL)" + lappend term($expr) $a + set expr "f='$f'" + lappend term($expr) $a + set expr \ + "(f GLOB '?[string range $f 1 4]*' AND f GLOB '[string range $f 0 3]*')" + lappend term($expr) $a + set expr "(g='$g' AND f GLOB '[string range $f 0 4]*')" + lappend term($expr) $a +} +puts " CREATE INDEX t2b ON t2(b);" +puts " CREATE INDEX t2c ON t2(c);" +puts " CREATE INDEX t2d ON t2(d);" +puts " CREATE INDEX t2e ON t2(e);" +puts " CREATE INDEX t2f ON t2(f);" +puts " CREATE INDEX t2g ON t2(g);" +puts " CREATE TABLE t3(a INTEGER PRIMARY KEY,b,c,d,e,f,g);" +puts " INSERT INTO t3 SELECT * FROM t2;" +puts " CREATE INDEX t3b ON t3(b,c);" +puts " CREATE INDEX t3c ON t3(c,e);" +puts " CREATE INDEX t3d ON t3(d,g);" +puts " CREATE INDEX t3e ON t3(e,f,g);" +puts " CREATE INDEX t3f ON t3(f,b,d,c);" +puts " CREATE INDEX t3g ON t3(g,f);" + +puts " \175" +puts "\175 {}" + +set term(b<0) {} +set term(1000000=[expr {int(($NA+2)/3)*1001+1}]) {} +set term(d<0.0) {} +set term(d>1e10) {} +set expr {e IS NULL} +set term($expr) {} +set expr {f IS NULL} +set term($expr) {} +set expr {g IS NULL} +set term($expr) {} + +set NT 1000 +set termlist [array names term] +set nterm [llength $termlist] +for {set i 2} {$i<=$NT+1} {incr i} { + set n [expr {int(rand()*10)+2}] + set w {} + unset -nocomplain r + for {set j 0} {$j<$n} {incr j} { + set k [expr {int(rand()*$nterm)}] + set t [lindex $termlist $k] + lappend w $t + foreach a $term($t) { + set r($a) 1 + } + } + if {[info exists seen($w)]} { + incr i -1 + continue + } + set seen($w) 1 + set result [lsort -int [array names r]] + puts "do_test where7-2.$i.1 \173" + puts " count_steps_sort \173" + puts " SELECT a FROM t2" + set wc [join $w "\n OR "] + puts " WHERE $wc" + puts " \175" + puts "\175 {$result scan 0 sort 0}" + puts "do_test where7-2.$i.2 \173" + puts " count_steps_sort \173" + puts " SELECT a FROM t3" + set wc [join $w "\n OR "] + puts " WHERE $wc" + puts " \175" + puts "\175 {$result scan 0 sort 0}" +} +puts "finish_test" diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc2.test --- sqlite3-3.4.2/test/malloc2.test 2007-08-13 13:05:12.000000000 +0100 +++ sqlite3-3.6.16/test/malloc2.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,359 +0,0 @@ -# 2005 March 18 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file attempts to check that the library can recover from a malloc() -# failure when sqlite3_global_recover() is invoked. -# -# $Id: malloc2.test,v 1.5 2006/09/04 18:54:14 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl - -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG=1" - finish_test - return -} - -ifcapable !globalrecover { - finish_test - return -} - -# Generate a checksum based on the contents of the database. If the -# checksum of two databases is the same, and the integrity-check passes -# for both, the two databases are identical. -# -proc cksum {db} { - set ret [list] - ifcapable tempdb { - set sql { - SELECT name FROM sqlite_master WHERE type = 'table' UNION - SELECT name FROM sqlite_temp_master WHERE type = 'table' UNION - SELECT 'sqlite_master' UNION - SELECT 'sqlite_temp_master' - } - } else { - set sql { - SELECT name FROM sqlite_master WHERE type = 'table' UNION - SELECT 'sqlite_master' - } - } - set tbllist [$db eval $sql] - set txt {} - foreach tbl $tbllist { - append txt [$db eval "SELECT * FROM $tbl"] - } - # puts txt=$txt - return [md5 $txt] -} - -proc do_malloc2_test {tn args} { - array set ::mallocopts $args - set sum [cksum db] - - for {set ::n 1} {true} {incr ::n} { - - # Run the SQL. Malloc number $::n is set to fail. A malloc() failure - # may or may not be reported. - sqlite_malloc_fail $::n - do_test malloc2-$tn.$::n.2 { - set res [catchsql [string trim $::mallocopts(-sql)]] - set rc [expr { - 0==[string compare $res {1 {out of memory}}] || - 0==[lindex $res 0] - }] - if {$rc!=1} { - puts "Error: $res" - } - set rc - } {1} - - # If $::n is greater than the number of malloc() calls required to - # execute the SQL, then this test is finished. Break out of the loop. - if {[lindex [sqlite_malloc_stat] 2]>0} { - sqlite_malloc_fail -1 - break - } - - # Nothing should work now, because the allocator should refuse to - # allocate any memory. - # - # Update: SQLite now automatically recovers from a malloc() failure. - # So the statement in the test below would work. -if 0 { - do_test malloc2-$tn.$::n.3 { - catchsql {SELECT 'nothing should work'} - } {1 {out of memory}} -} - - # Recover from the malloc failure. - # - # Update: The new malloc() failure handling means that a transaction may - # still be active even if a malloc() has failed. But when these tests were - # written this was not the case. So do a manual ROLLBACK here so that the - # tests pass. - do_test malloc2-$tn.$::n.4 { - sqlite3_global_recover - catch { - execsql { - ROLLBACK; - } - } - expr 0 - } {0} - - # Checksum the database. - do_test malloc2-$tn.$::n.5 { - cksum db - } $sum - - integrity_check malloc2-$tn.$::n.6 - if {$::nErr>1} return - } - unset ::mallocopts -} - -do_test malloc2.1.setup { - execsql { - CREATE TABLE abc(a, b, c); - INSERT INTO abc VALUES(10, 20, 30); - INSERT INTO abc VALUES(40, 50, 60); - CREATE INDEX abc_i ON abc(a, b, c); - } -} {} -do_malloc2_test 1.1 -sql { - SELECT * FROM abc; -} -do_malloc2_test 1.2 -sql { - UPDATE abc SET c = c+10; -} -do_malloc2_test 1.3 -sql { - INSERT INTO abc VALUES(70, 80, 90); -} -do_malloc2_test 1.4 -sql { - DELETE FROM abc; -} -do_test malloc2.1.5 { - execsql { - SELECT * FROM abc; - } -} {} - -do_test malloc2.2.setup { - execsql { - CREATE TABLE def(a, b, c); - CREATE INDEX def_i1 ON def(a); - CREATE INDEX def_i2 ON def(c); - BEGIN; - } - for {set i 0} {$i<20} {incr i} { - execsql { - INSERT INTO def VALUES(randstr(300,300),randstr(300,300),randstr(300,300)); - } - } - execsql { - COMMIT; - } -} {} -do_malloc2_test 2 -sql { - BEGIN; - UPDATE def SET a = randstr(100,100) WHERE (oid%9)==0; - INSERT INTO def SELECT * FROM def WHERE (oid%13)==0; - - CREATE INDEX def_i3 ON def(b); - - UPDATE def SET a = randstr(100,100) WHERE (oid%9)==1; - INSERT INTO def SELECT * FROM def WHERE (oid%13)==1; - - CREATE TABLE def2 AS SELECT * FROM def; - DROP TABLE def; - CREATE TABLE def AS SELECT * FROM def2; - DROP TABLE def2; - - DELETE FROM def WHERE (oid%9)==2; - INSERT INTO def SELECT * FROM def WHERE (oid%13)==2; - COMMIT; -} - -ifcapable tempdb { - do_test malloc2.3.setup { - execsql { - CREATE TEMP TABLE ghi(a, b, c); - BEGIN; - } - for {set i 0} {$i<20} {incr i} { - execsql { - INSERT INTO ghi VALUES(randstr(300,300),randstr(300,300),randstr(300,300)); - } - } - execsql { - COMMIT; - } - } {} - do_malloc2_test 3 -sql { - BEGIN; - CREATE INDEX ghi_i1 ON ghi(a); - UPDATE def SET a = randstr(100,100) WHERE (oid%2)==0; - UPDATE ghi SET a = randstr(100,100) WHERE (oid%2)==0; - COMMIT; - } -} - -############################################################################ -# The test cases below are to increase the code coverage in btree.c and -# pager.c of this test file. The idea is that each malloc() that occurs in -# these two source files should be made to fail at least once. -# -catchsql { - DROP TABLE ghi; -} -do_malloc2_test 4.1 -sql { - SELECT * FROM def ORDER BY oid ASC; - SELECT * FROM def ORDER BY oid DESC; -} -do_malloc2_test 4.2 -sql { - PRAGMA cache_size = 10; - BEGIN; - - -- This will put about 25 pages on the free list. - DELETE FROM def WHERE 1; - - -- Allocate 32 new root pages. This will exercise the 'extract specific - -- page from the freelist' code when in auto-vacuum mode (see the - -- allocatePage() routine in btree.c). - CREATE TABLE t1(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t2(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t3(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t4(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t5(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t6(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t7(a UNIQUE, b UNIQUE, c UNIQUE); - CREATE TABLE t8(a UNIQUE, b UNIQUE, c UNIQUE); - - ROLLBACK; -} - -######################################################################## -# Test that the global linked list of database handles works. An assert() -# will fail if there is some problem. -do_test malloc2-5 { - sqlite3 db1 test.db - sqlite3 db2 test.db - sqlite3 db3 test.db - sqlite3 db4 test.db - sqlite3 db5 test.db - - # Close the head of the list: - db5 close - - # Close the end of the list: - db1 close - - # Close a handle from the middle of the list: - db3 close - - # Close the other two. Then open and close one more database, to make - # sure the head of the list was set back to NULL. - db2 close - db4 close - sqlite db1 test.db - db1 close -} {} - -######################################################################## -# Check that if a statement is active sqlite3_global_recover doesn't reset -# the sqlite3_malloc_failed variable. -# -# Update: There is now no sqlite3_malloc_failed variable, so these tests -# are not run. -# -# do_test malloc2-6.1 { -# set ::STMT [sqlite3_prepare $::DB {SELECT * FROM def} -1 DUMMY] -# sqlite3_step $::STMT -# } {SQLITE_ROW} -# do_test malloc2-6.2 { -# sqlite3 db1 test.db -# sqlite_malloc_fail 100 -# catchsql { -# SELECT * FROM def; -# } db1 -# } {1 {out of memory}} -# do_test malloc2-6.3 { -# sqlite3_global_recover -# } {SQLITE_BUSY} -# do_test malloc2-6.4 { -# catchsql { -# SELECT 'hello'; -# } -# } {1 {out of memory}} -# do_test malloc2-6.5 { -# sqlite3_reset $::STMT -# } {SQLITE_OK} -# do_test malloc2-6.6 { -# sqlite3_global_recover -# } {SQLITE_OK} -# do_test malloc2-6.7 { -# catchsql { -# SELECT 'hello'; -# } -# } {0 hello} -# do_test malloc2-6.8 { -# sqlite3_step $::STMT -# } {SQLITE_ERROR} -# do_test malloc2-6.9 { -# sqlite3_finalize $::STMT -# } {SQLITE_SCHEMA} -# do_test malloc2-6.10 { -# db1 close -# } {} - -######################################################################## -# Check that if an in-memory database is being used it is not possible -# to recover from a malloc() failure. -# -# Update: An in-memory database can now survive a malloc() failure, so these -# tests are not run. -# -# ifcapable memorydb { -# do_test malloc2-7.1 { -# sqlite3 db1 :memory: -# list -# } {} -# do_test malloc2-7.2 { -# sqlite_malloc_fail 100 -# catchsql { -# SELECT * FROM def; -# } -# } {1 {out of memory}} -# do_test malloc2-7.3 { -# sqlite3_global_recover -# } {SQLITE_ERROR} -# do_test malloc2-7.4 { -# catchsql { -# SELECT 'hello'; -# } -# } {1 {out of memory}} -# do_test malloc2-7.5 { -# db1 close -# } {} -# do_test malloc2-7.6 { -# sqlite3_global_recover -# } {SQLITE_OK} -# do_test malloc2-7.7 { -# catchsql { -# SELECT 'hello'; -# } -# } {0 hello} -# } - -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc3.test --- sqlite3-3.4.2/test/malloc3.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/malloc3.test 2009-06-12 03:37:54.000000000 +0100 @@ -13,14 +13,16 @@ # correctly. The emphasis of these tests are the _prepare(), _step() and # _finalize() calls. # -# $Id: malloc3.test,v 1.10 2007/03/28 01:59:34 drh Exp $ +# $Id: malloc3.test,v 1.24 2008/10/14 15:54:08 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +# +if {!$MEMDEBUG} { + puts "Skipping malloc3 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } @@ -103,16 +105,16 @@ # TEST. Each primitive has a single argument. Primitives are processed in # the order they are specified in the file. # -# A TEST primitive specifies a TCL script as it's argument. When a TEST +# A TEST primitive specifies a TCL script as its argument. When a TEST # directive is encountered the Tcl script is evaluated. Usually, this Tcl # script contains one or more calls to [do_test]. # -# A PREP primitive specifies an SQL script as it's argument. When a PREP +# A PREP primitive specifies an SQL script as its argument. When a PREP # directive is encountered the SQL is evaluated using database connection # [db]. # # The SQL primitives are where the action happens. An SQL primitive must -# contain a single, valid SQL statement as it's argument. When an SQL +# contain a single, valid SQL statement as its argument. When an SQL # primitive is encountered, it is evaluated one or more times to test the # behaviour of the system when malloc() fails during preparation or # execution of said statement. The Nth time the statement is executed, @@ -148,6 +150,7 @@ set ::run_test_script [list] proc TEST {id t} {lappend ::run_test_script -test [list $id $t]} proc PREP {p} {lappend ::run_test_script -prep [string trim $p]} +proc DEBUG {s} {lappend ::run_test_script -debug $s} # SQL -- # @@ -176,7 +179,7 @@ # argument is either 1 or 0, the expected value of the auto-commit flag. # proc TEST_AUTOCOMMIT {id a} { - TEST $id "do_test \$testid { sqlite3_get_autocommit $::DB } {$a}" + TEST $id "do_test \$testid { sqlite3_get_autocommit \$::DB } {$a}" } #-------------------------------------------------------------------------- @@ -194,7 +197,7 @@ } {} } SQL { - CREATE TABLE abc(a, b, c); + CREATE TABLE IF NOT EXISTS abc(a, b, c); } TEST 2 { do_test $testid.1 { @@ -202,7 +205,7 @@ } {abc} } -# Insert a couple of rows into the table. each insert is in it's own +# Insert a couple of rows into the table. each insert is in its own # transaction. test that the table is unpopulated before running the inserts # (and hence after each failure of the first insert), and that it has been # populated correctly after the final insert succeeds. @@ -255,7 +258,7 @@ set sql { BEGIN;DELETE FROM abc; } -for {set i 1} {$i < 100} {incr i} { +for {set i 1} {$i < 15} {incr i} { set a $i set b "String value $i" set c [string repeat X $i] @@ -461,25 +464,27 @@ # Test a simple multi-file transaction # file delete -force test2.db -SQL {ATTACH 'test2.db' AS aux;} -SQL {BEGIN} -SQL {CREATE TABLE aux.tbl2(x, y, z)} -SQL {INSERT INTO tbl2 VALUES(1, 2, 3)} -SQL {INSERT INTO def VALUES(4, 5, 6)} -TEST 30 { - do_test $testid { - execsql { - SELECT * FROM tbl2, def WHERE d = x; - } - } {1 2 3 1 2 3} -} -SQL {COMMIT} -TEST 31 { - do_test $testid { - execsql { - SELECT * FROM tbl2, def WHERE d = x; - } - } {1 2 3 1 2 3} +ifcapable attach { + SQL {ATTACH 'test2.db' AS aux;} + SQL {BEGIN} + SQL {CREATE TABLE aux.tbl2(x, y, z)} + SQL {INSERT INTO tbl2 VALUES(1, 2, 3)} + SQL {INSERT INTO def VALUES(4, 5, 6)} + TEST 30 { + do_test $testid { + execsql { + SELECT * FROM tbl2, def WHERE d = x; + } + } {1 2 3 1 2 3} + } + SQL {COMMIT} + TEST 31 { + do_test $testid { + execsql { + SELECT * FROM tbl2, def WHERE d = x; + } + } {1 2 3 1 2 3} + } } # Test what happens when a malloc() fails while there are other active @@ -518,7 +523,7 @@ # End of test program declaration #-------------------------------------------------------------------------- -proc run_test {arglist {pcstart 0} {iFailStart 1}} { +proc run_test {arglist iRepeat {pcstart 0} {iFailStart 1}} { if {[llength $arglist] %2} { error "Uneven number of arguments to TEST" } @@ -542,16 +547,14 @@ while {$pc*2 < [llength $arglist]} { # Id of this iteration: - set iterid "(pc $pc).(iFail $iFail)" set k [lindex $arglist [expr 2 * $pc]] + set iterid "pc=$pc.iFail=$iFail$k" set v [lindex $arglist [expr 2 * $pc + 1]] switch -- $k { -test { foreach {id script} $v {} - set testid "malloc3-(test $id).$iterid" - eval $script incr pc } @@ -559,26 +562,27 @@ set ::rollback_hook_count 0 set ac [sqlite3_get_autocommit $::DB] ;# Auto-Commit - sqlite_malloc_fail $iFail + sqlite3_memdebug_fail $iFail -repeat 0 set rc [catch {db eval [lindex $v 1]} msg] ;# True error occurs set nac [sqlite3_get_autocommit $::DB] ;# New Auto-Commit - if {$rc != 0 && $nac && !$ac} { # Before [db eval] the auto-commit flag was clear. Now it # is set. Since an error occured we assume this was not a - # commit - therefore a rollback occured. Check that the - # rollback-hook was invoked. + # commit - therefore a rollback occured. Check that the + # rollback-hook was invoked. do_test malloc3-rollback_hook.$iterid { set ::rollback_hook_count } {1} } + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] if {$rc == 0} { - # Successful execution of sql. Our "mallocs-until-failure" - # count should be greater than 0. Otherwise a malloc() failed - # and the error was not reported. - if {[lindex [sqlite_malloc_stat] 2] <= 0} { + # Successful execution of sql. The number of failed malloc() + # calls should be equal to the number of benign failures. + # Otherwise a malloc() failed and the error was not reported. + # + if {$nFail!=$nBenign} { error "Unreported malloc() failure" } @@ -591,16 +595,16 @@ incr pc set iFail 1 - sqlite_malloc_fail 0 integrity_check "malloc3-(integrity).$iterid" - } elseif {[regexp {.*out of memory} $msg]} { - # Out of memory error, as expected + } elseif {[regexp {.*out of memory} $msg] || [db errorcode] == 3082} { + # Out of memory error, as expected. + # integrity_check "malloc3-(integrity).$iterid" incr iFail if {$nac && !$ac} { - if {![lindex $v 0]} { - error "Statement \"[lindex $v 1]\" caused a rollback" + if {![lindex $v 0] && [db errorcode] != 3082} { + # error "Statement \"[lindex $v 1]\" caused a rollback" } for {set i $begin_pc} {$i < $pc} {incr i} { @@ -628,19 +632,33 @@ incr pc } + -debug { + eval $v + incr pc + } + default { error "Unknown switch: $k" } } } } -# Turn of the Tcl interface's prepared statement caching facility. +# Turn of the Tcl interface's prepared statement caching facility. Then +# run the tests with "persistent" malloc failures. +sqlite3_extended_result_codes db 1 db cache size 0 +run_test $::run_test_script 1 -run_test $::run_test_script 9 1 -# run_test [lrange $::run_test_script 0 3] 0 63 -sqlite_malloc_fail 0 +# Close and reopen the db. db close +file delete -force test.db test.db-journal test2.db test2.db-journal +sqlite3 db test.db +sqlite3_extended_result_codes db 1 +set ::DB [sqlite3_connection_pointer db] -pp_check_for_leaks +# Turn of the Tcl interface's prepared statement caching facility in +# the new connnection. Then run the tests with "transient" malloc failures. +db cache size 0 +run_test $::run_test_script 0 +sqlite3_memdebug_fail -1 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc4.test --- sqlite3-3.4.2/test/malloc4.test 2007-04-25 19:22:53.000000000 +0100 +++ sqlite3-3.6.16/test/malloc4.test 2009-06-05 18:03:33.000000000 +0100 @@ -12,7 +12,7 @@ # This file contains tests to ensure that the library handles malloc() failures # correctly. The emphasis in this file is on sqlite3_column_XXX() APIs. # -# $Id: malloc4.test,v 1.3 2006/01/23 07:52:41 danielk1977 Exp $ +# $Id: malloc4.test,v 1.10 2008/02/18 22:24:58 drh Exp $ #--------------------------------------------------------------------------- # NOTES ON EXPECTED BEHAVIOUR @@ -25,10 +25,11 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping malloc4 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } @@ -41,8 +42,8 @@ proc do_stmt_test {id sql} { set ::sql $sql set go 1 - for {set n 1} {$go} {incr n} { - set testid "malloc4-$id.(iFail $n)" + for {set n 0} {$go} {incr n} { + set testid "malloc4-$id.$n" # Prepare the statement do_test ${testid}.1 { @@ -51,7 +52,7 @@ } {1} # Set the Nth malloc() to fail. - sqlite_malloc_fail $n + sqlite3_memdebug_fail $n -repeat 0 # Test malloc failure in the _name(), _name16(), decltype() and # decltype16() APIs. Calls that occur after the malloc() failure should @@ -69,26 +70,26 @@ # about explicitly testing them. # do_test ${testid}.2.1 { - set mf1 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf1 [expr [sqlite3_memdebug_pending] < 0] set ::name8 [sqlite3_column_name $::STMT 0] - set mf2 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf2 [expr [sqlite3_memdebug_pending] < 0] expr {$mf1 == $mf2 || $::name8 == ""} } {1} do_test ${testid}.2.2 { - set mf1 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf1 [expr [sqlite3_memdebug_pending] < 0] set ::name16 [sqlite3_column_name16 $::STMT 0] set ::name16 [encoding convertfrom unicode $::name16] set ::name16 [string range $::name16 0 end-1] - set mf2 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf2 [expr [sqlite3_memdebug_pending] < 0] expr {$mf1 == $mf2 || $::name16 == ""} } {1} do_test ${testid}.2.3 { - set mf1 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf1 [expr [sqlite3_memdebug_pending] < 0] set ::name8_2 [sqlite3_column_name $::STMT 0] - set mf2 [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set mf2 [expr [sqlite3_memdebug_pending] < 0] expr {$mf1 == $mf2 || $::name8_2 == ""} } {1} - set ::mallocFailed [expr [lindex [sqlite_malloc_stat] 2] <= 0] + set ::mallocFailed [expr [sqlite3_memdebug_pending] < 0] do_test ${testid}.2.4 { expr { $::name8 == $::name8_2 && $::name16 == $::name8 && !$::mallocFailed || @@ -102,30 +103,30 @@ # running sqlite3_step(), make sure that malloc() is not about to fail. # Memory allocation failures that occur within sqlite3_step() are tested # elsewhere. - set mf [lindex [sqlite_malloc_stat] 2] - sqlite_malloc_fail 0 + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 do_test ${testid}.3 { sqlite3_step $::STMT } {SQLITE_ROW} - sqlite_malloc_fail $mf + sqlite3_memdebug_fail $mf # Test for malloc() failures within _text() and _text16(). # do_test ${testid}.4.1 { set ::text8 [sqlite3_column_text $::STMT 0] - set mf [expr [lindex [sqlite_malloc_stat] 2] <= 0 && !$::mallocFailed] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] expr {$mf==0 || $::text8 == ""} } {1} do_test ${testid}.4.2 { set ::text16 [sqlite3_column_text16 $::STMT 0] set ::text16 [encoding convertfrom unicode $::text16] set ::text16 [string range $::text16 0 end-1] - set mf [expr [lindex [sqlite_malloc_stat] 2] <= 0 && !$::mallocFailed] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] expr {$mf==0 || $::text16 == ""} } {1} do_test ${testid}.4.3 { set ::text8_2 [sqlite3_column_text $::STMT 0] - set mf [expr [lindex [sqlite_malloc_stat] 2] <= 0 && !$::mallocFailed] + set mf [expr [sqlite3_memdebug_pending] < 0 && !$::mallocFailed] expr {$mf==0 || $::text8_2 == "" || ($::text16 == "" && $::text8 != "")} } {1} @@ -133,33 +134,33 @@ # way this can occur is if the string has to be translated from UTF-16 to # UTF-8 before being converted to a numeric value. do_test ${testid}.4.4.1 { - set mf [lindex [sqlite_malloc_stat] 2] - sqlite_malloc_fail 0 + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 sqlite3_column_text16 $::STMT 0 - sqlite_malloc_fail $mf + sqlite3_memdebug_fail $mf sqlite3_column_int $::STMT 0 } {0} do_test ${testid}.4.5 { - set mf [lindex [sqlite_malloc_stat] 2] - sqlite_malloc_fail 0 + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 sqlite3_column_text16 $::STMT 0 - sqlite_malloc_fail $mf + sqlite3_memdebug_fail $mf sqlite3_column_int64 $::STMT 0 } {0} do_test ${testid}.4.6 { - set mf [lindex [sqlite_malloc_stat] 2] - sqlite_malloc_fail 0 + set mf [sqlite3_memdebug_pending] + sqlite3_memdebug_fail -1 sqlite3_column_text16 $::STMT 0 - sqlite_malloc_fail $mf + sqlite3_memdebug_fail $mf sqlite3_column_double $::STMT 0 } {0.0} set mallocFailedAfterStep [expr \ - [lindex [sqlite_malloc_stat] 2] <= 0 && !$::mallocFailed + [sqlite3_memdebug_pending] < 0 && !$::mallocFailed ] - sqlite_malloc_fail 0 + sqlite3_memdebug_fail -1 # Test that if a malloc() failed the next call to sqlite3_step() returns # SQLITE_ERROR. If malloc() did not fail, it should return SQLITE_DONE. # @@ -172,7 +173,7 @@ } [expr {$mallocFailedAfterStep ? "SQLITE_NOMEM" : "SQLITE_OK"}] if {$::mallocFailed == 0 && $mallocFailedAfterStep == 0} { - sqlite_malloc_fail 0 + sqlite3_memdebug_fail -1 set go 0 } } @@ -189,6 +190,5 @@ do_stmt_test 1 "SELECT * FROM tbl" -sqlite_malloc_fail 0 +sqlite3_memdebug_fail -1 finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc5.test --- sqlite3-3.4.2/test/malloc5.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/malloc5.test 2009-06-25 12:23:19.000000000 +0100 @@ -12,21 +12,23 @@ # This file contains test cases focused on the two memory-management APIs, # sqlite3_soft_heap_limit() and sqlite3_release_memory(). # -# $Id: malloc5.test,v 1.12 2007/08/12 20:07:59 drh Exp $ - -#--------------------------------------------------------------------------- -# NOTES ON EXPECTED BEHAVIOUR +# Prior to version 3.6.2, calling sqlite3_release_memory() or exceeding +# the configured soft heap limit could cause sqlite to upgrade database +# locks and flush dirty pages to the file system. As of 3.6.2, this is +# no longer the case. In version 3.6.2, sqlite3_release_memory() only +# reclaims clean pages. This test file has been updated accordingly. # -#--------------------------------------------------------------------------- - +# $Id: malloc5.test,v 1.22 2009/04/11 19:09:54 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl db close # Only run these tests if memory debugging is turned on. -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +# +if {!$MEMDEBUG} { + puts "Skipping malloc5 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } @@ -42,35 +44,38 @@ do_test malloc5-1.1 { # Simplest possible test. Call sqlite3_release_memory when there is exactly - # one unused page in a single pager cache. This test case set's the - # value of the ::pgalloc variable, which is used in subsequent tests. + # one unused page in a single pager cache. The page cannot be freed, as + # it is dirty. So sqlite3_release_memory() returns 0. # - # Note: Even though executing this statement on an empty database - # modifies 2 pages (the root of sqlite_master and the new root page), - # the sqlite_master root (page 1) is never freed because the btree layer - # retains a reference to it for the entire transaction. execsql { PRAGMA auto_vacuum=OFF; BEGIN; CREATE TABLE abc(a, b, c); } - set ::pgalloc [sqlite3_release_memory] - expr $::pgalloc > 0 -} {1} + sqlite3_release_memory +} {0} + do_test malloc5-1.2 { # Test that the transaction started in the above test is still active. - # Because the page freed had been written to, freeing it required a - # journal sync and exclusive lock on the database file. Test the file - # appears to be locked. + # The lock on the database file should not have been upgraded (this was + # not the case before version 3.6.2). + # sqlite3 db2 test.db - catchsql { - SELECT * FROM abc; - } db2 -} {1 {database is locked}} + execsql { SELECT * FROM sqlite_master } db2 +} {} do_test malloc5-1.3 { - # Again call [sqlite3_release_memory] when there is exactly one unused page - # in the cache. The same amount of memory is required, but no journal-sync - # or exclusive lock should be established. + # Call [sqlite3_release_memory] when there is exactly one unused page + # in the cache belonging to db2. + # + set ::pgalloc [sqlite3_release_memory] + expr $::pgalloc > 0 +} {1} + +do_test malloc5-1.4 { + # Commit the transaction and open a new one. Read 1 page into the cache. + # Because the page is not dirty, it is eligible for collection even + # before the transaction is concluded. + # execsql { COMMIT; BEGIN; @@ -78,43 +83,44 @@ } sqlite3_release_memory } $::pgalloc -do_test malloc5-1.4 { - # Database should not be locked this time. - catchsql { - SELECT * FROM abc; - } db2 -} {0 {}} + do_test malloc5-1.5 { + # Conclude the transaction opened in the previous [do_test] block. This + # causes another page (page 1) to become eligible for recycling. + # + execsql { COMMIT } + sqlite3_release_memory +} $::pgalloc + +do_test malloc5-1.6 { # Manipulate the cache so that it contains two unused pages. One requires # a journal-sync to free, the other does not. db2 close execsql { + BEGIN; SELECT * FROM abc; CREATE TABLE def(d, e, f); } sqlite3_release_memory 500 } $::pgalloc -do_test malloc5-1.6 { - # Database should not be locked this time. The above test case only - # requested 500 bytes of memory, which can be obtained by freeing the page - # that does not require an fsync(). + +do_test malloc5-1.7 { + # Database should not be locked this time. sqlite3 db2 test.db - catchsql { - SELECT * FROM abc; - } db2 + catchsql { SELECT * FROM abc } db2 } {0 {}} -do_test malloc5-1.7 { - # Release another 500 bytes of memory. This time we require a sync(), - # so the database file will be locked afterwards. +do_test malloc5-1.8 { + # Try to release another block of memory. This will fail as the only + # pages currently in the cache are dirty (page 3) or pinned (page 1). db2 close sqlite3_release_memory 500 -} $::pgalloc +} 0 do_test malloc5-1.8 { + # Database is still not locked. + # sqlite3 db2 test.db - catchsql { - SELECT * FROM abc; - } db2 -} {1 {database is locked}} + catchsql { SELECT * FROM abc } db2 +} {0 {}} do_test malloc5-1.9 { execsql { COMMIT; @@ -136,6 +142,7 @@ # Halfway through the query call sqlite3_release_memory(). The goal of this # test is to make sure we don't free pages that are in use (specifically, # the root of table abc). + sqlite3_release_memory set nRelease 0 execsql { BEGIN; @@ -174,7 +181,7 @@ } {1 2 3 4 5 6 7 8 9 10 11 12} db2 close -sqlite_malloc_outstanding -clearmaxbytes +puts "Highwater mark: [sqlite3_memory_highwater]" # The following two test cases each execute a transaction in which # 10000 rows are inserted into table abc. The first test case is used @@ -198,29 +205,28 @@ execsql "INSERT INTO abc VALUES($i, $i, '[string repeat X 100]');" } execsql {COMMIT;} - set ::nMaxBytes [sqlite_malloc_outstanding -maxbytes] - if {$::nMaxBytes==""} {set ::nMaxBytes 1000001} - expr $::nMaxBytes > 1000000 + sqlite3_release_memory + sqlite3_memory_highwater 1 + execsql {SELECT * FROM abc} + set nMaxBytes [sqlite3_memory_highwater 1] + puts -nonewline " (Highwater mark: $nMaxBytes) " + expr $nMaxBytes > 1000000 } {1} do_test malloc5-4.2 { sqlite3_release_memory - sqlite_malloc_outstanding -clearmaxbytes sqlite3_soft_heap_limit 100000 - execsql {BEGIN;} - for {set i 0} {$i < 10000} {incr i} { - execsql "INSERT INTO abc VALUES($i, $i, '[string repeat X 100]');" - } - execsql {COMMIT;} - set ::nMaxBytes [sqlite_malloc_outstanding -maxbytes] - if {$::nMaxBytes==""} {set ::nMaxBytes 0} - expr $::nMaxBytes <= 100000 + sqlite3_memory_highwater 1 + execsql {SELECT * FROM abc} + set nMaxBytes [sqlite3_memory_highwater 1] + puts -nonewline " (Highwater mark: $nMaxBytes) " + expr $nMaxBytes <= 100000 } {1} do_test malloc5-4.3 { # Check that the content of table abc is at least roughly as expected. execsql { SELECT count(*), sum(a), sum(b) FROM abc; } -} [list 20000 [expr int(20000.0 * 4999.5)] [expr int(20000.0 * 4999.5)]] +} [list 10000 [expr int(10000.0 * 4999.5)] [expr int(10000.0 * 4999.5)]] # Restore the soft heap limit. sqlite3_soft_heap_limit $::soft_limit @@ -247,7 +253,7 @@ } sqlite3_release_memory } 0 -do_test malloc5-5.1 { +do_test malloc5-5.2 { sqlite3_soft_heap_limit 5000 execsql { COMMIT; @@ -256,6 +262,131 @@ } expr 1 } {1} +sqlite3_soft_heap_limit $::soft_limit + +#------------------------------------------------------------------------- +# The following test cases (malloc5-6.*) test the new global LRU list +# used to determine the pages to recycle when sqlite3_release_memory is +# called and there is more than one pager open. +# +proc nPage {db} { + set bt [btree_from_db $db] + array set stats [btree_pager_stats $bt] + set stats(page) +} +db close +file delete -force test.db test.db-journal test2.db test2.db-journal + +# This block of test-cases (malloc5-6.1.*) prepares two database files +# for the subsequent tests. +do_test malloc5-6.1.1 { + sqlite3 db test.db + execsql { + PRAGMA page_size=1024; + PRAGMA default_cache_size=10; + } + execsql { + PRAGMA temp_store = memory; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc VALUES(randstr(50,50), randstr(75,75), randstr(100,100)); + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(50,50), randstr(75,75), randstr(100,100) FROM abc; + COMMIT; + } + copy_file test.db test2.db + sqlite3 db2 test2.db + list \ + [expr ([file size test.db]/1024)>20] [expr ([file size test2.db]/1024)>20] +} {1 1} +do_test malloc5-6.1.2 { + list [execsql {PRAGMA cache_size}] [execsql {PRAGMA cache_size} db2] +} {10 10} + +do_test malloc5-6.2.1 { + execsql {SELECT * FROM abc} db2 + execsql {SELECT * FROM abc} db + expr [nPage db] + [nPage db2] +} {20} + +do_test malloc5-6.2.2 { + # If we now try to reclaim some memory, it should come from the db2 cache. + sqlite3_release_memory 3000 + expr [nPage db] + [nPage db2] +} {17} +do_test malloc5-6.2.3 { + # Access the db2 cache again, so that all the db2 pages have been used + # more recently than all the db pages. Then try to reclaim 3000 bytes. + # This time, 3 pages should be pulled from the db cache. + execsql { SELECT * FROM abc } db2 + sqlite3_release_memory 3000 + expr [nPage db] + [nPage db2] +} {17} + +do_test malloc5-6.3.1 { + # Now open a transaction and update 2 pages in the db2 cache. Then + # do a SELECT on the db cache so that all the db pages are more recently + # used than the db2 pages. When we try to free memory, SQLite should + # free the non-dirty db2 pages, then the db pages, then finally use + # sync() to free up the dirty db2 pages. The only page that cannot be + # freed is page1 of db2. Because there is an open transaction, the + # btree layer holds a reference to page 1 in the db2 cache. + execsql { + BEGIN; + UPDATE abc SET c = randstr(100,100) + WHERE rowid = 1 OR rowid = (SELECT max(rowid) FROM abc); + } db2 + execsql { SELECT * FROM abc } db + expr [nPage db] + [nPage db2] +} {20} +do_test malloc5-6.3.2 { + # Try to release 7700 bytes. This should release all the + # non-dirty pages held by db2. + sqlite3_release_memory [expr 7*1100] + list [nPage db] [nPage db2] +} {10 3} +do_test malloc5-6.3.3 { + # Try to release another 1000 bytes. This should come fromt the db + # cache, since all three pages held by db2 are either in-use or diry. + sqlite3_release_memory 1000 + list [nPage db] [nPage db2] +} {9 3} +do_test malloc5-6.3.4 { + # Now release 9900 more (about 9 pages worth). This should expunge + # the rest of the db cache. But the db2 cache remains intact, because + # SQLite tries to avoid calling sync(). + if {$::tcl_platform(wordSize)==8} { + sqlite3_release_memory 10177 + } else { + sqlite3_release_memory 9900 + } + list [nPage db] [nPage db2] +} {0 3} +do_test malloc5-6.3.5 { + # But if we are really insistent, SQLite will consent to call sync() + # if there is no other option. UPDATE: As of 3.6.2, SQLite will not + # call sync() in this scenario. So no further memory can be reclaimed. + sqlite3_release_memory 1000 + list [nPage db] [nPage db2] +} {0 3} +do_test malloc5-6.3.6 { + # The referenced page (page 1 of the db2 cache) will not be freed no + # matter how much memory we ask for: + sqlite3_release_memory 31459 + list [nPage db] [nPage db2] +} {0 3} + +db2 close sqlite3_soft_heap_limit $::soft_limit finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc6.test --- sqlite3-3.4.2/test/malloc6.test 2006-06-26 13:50:09.000000000 +0100 +++ sqlite3-3.6.16/test/malloc6.test 2009-06-05 18:03:33.000000000 +0100 @@ -9,127 +9,30 @@ # #*********************************************************************** # This file attempts to check the library in an out-of-memory situation. -# When compiled with -DSQLITE_DEBUG=1, the SQLite library accepts a special -# command (sqlite_malloc_fail N) which causes the N-th malloc to fail. This -# special feature is used to see what happens in the library if a malloc -# were to really fail due to an out-of-memory situation. # -# $Id: malloc6.test,v 1.1 2006/06/26 12:50:09 drh Exp $ +# $Id: malloc6.test,v 1.5 2008/02/18 22:24:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping malloc6 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test malloc6-$tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} set sqlite_os_trace 0 -do_malloc_test 1 -tclprep { +do_malloc_test malloc6-1 -tclprep { db close } -tclbody { if {[catch {sqlite3 db test.db}]} { error "out of memory" } + sqlite3_extended_result_codes db 1 } -sqlbody { DROP TABLE IF EXISTS t1; CREATE TABLE IF NOT EXISTS t1( @@ -149,5 +52,4 @@ set sqlite_open_file_count } {0} -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc7.test --- sqlite3-3.4.2/test/malloc7.test 2006-07-26 15:57:30.000000000 +0100 +++ sqlite3-3.6.16/test/malloc7.test 2009-06-05 18:03:33.000000000 +0100 @@ -11,130 +11,33 @@ # This file contains additional out-of-memory checks (see malloc.tcl) # added to expose a bug in out-of-memory handling for sqlite3_prepare16(). # -# $Id: malloc7.test,v 1.2 2006/07/26 14:57:30 drh Exp $ +# $Id: malloc7.test,v 1.5 2008/02/18 22:24:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping malloc7 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test malloc7-$tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} -db eval { +do_malloc_test malloc7-1 -sqlprep { CREATE TABLE t1(a,b,c,d); CREATE INDEX i1 ON t1(b,c); -} - -do_malloc_test 1 -tclbody { +} -tclbody { set sql16 [encoding convertto unicode "SELECT * FROM sqlite_master"] append sql16 "\00\00" set nbyte [string length $sql16] - set ::STMT [sqlite3_prepare16 $::DB $sql16 $nbyte DUMMY] + set ::STMT [sqlite3_prepare16 db $sql16 $nbyte DUMMY] sqlite3_finalize $::STMT } - # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} @@ -142,5 +45,4 @@ } {0} puts open-file-count=$sqlite_open_file_count -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc8.test --- sqlite3-3.4.2/test/malloc8.test 2007-05-07 20:31:17.000000000 +0100 +++ sqlite3-3.6.16/test/malloc8.test 2009-06-05 18:03:33.000000000 +0100 @@ -11,107 +11,20 @@ # This file contains additional out-of-memory checks (see malloc.tcl) # added to expose a bug in out-of-memory handling for sqlite3_value_text() # -# $Id: malloc8.test,v 1.3 2007/05/07 19:31:17 drh Exp $ +# $Id: malloc8.test,v 1.7 2008/02/18 22:24:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping malloc8 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test malloc8-$tn.$::n { - - sqlite_malloc_fail 0 - catch {db close} - sqlite3 db test.db - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} # The setup is a database with UTF-16 encoding that contains a single # large string. We will be running lots of queries against this @@ -120,34 +33,56 @@ # to fail and for sqlite3_value_text() to return 0 even though # sqlite3_value_type() returns SQLITE_TEXT. # -db close -file delete -force test.db test.db-journal -sqlite3 db test.db -db eval { + +do_malloc_test malloc8-1 -sqlprep { PRAGMA encoding='UTF-16'; CREATE TABLE t1(a); INSERT INTO t1 VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); -} - - -do_malloc_test 1 -sqlbody { +} -sqlbody { SELECT lower(a), upper(a), quote(a), trim(a), trim('x',a) FROM t1; } -do_malloc_test 2 -sqlbody { +do_malloc_test malloc8-2 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { SELECT replace(a,'x','y'), replace('x',a,'y'), replace('x','y',a) FROM t1; } -do_malloc_test 3 -sqlbody { +do_malloc_test malloc8-3 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { SELECT length(a), substr(a, 4, 4) FROM t1; } -do_malloc_test 4 -sqlbody { - SELECT julianday(a,a) FROM t1; +ifcapable datetime { + do_malloc_test malloc8-4 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); + } -sqlbody { + SELECT julianday(a,a) FROM t1; + } } -do_malloc_test 5 -sqlbody { +do_malloc_test malloc8-5 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { SELECT 1 FROM t1 WHERE a LIKE 'hello' ESCAPE NULL; } -do_malloc_test 6 -sqlbody { +do_malloc_test malloc8-6 -sqlprep { + PRAGMA encoding='UTF-16'; + CREATE TABLE t1(a); + INSERT INTO t1 + VALUES('0123456789aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'); +} -sqlbody { SELECT hex(randomblob(100)); } @@ -157,5 +92,4 @@ set sqlite_open_file_count } {0} -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc9.test --- sqlite3-3.4.2/test/malloc9.test 2007-04-30 22:39:16.000000000 +0100 +++ sqlite3-3.6.16/test/malloc9.test 2009-06-05 18:03:33.000000000 +0100 @@ -11,112 +11,22 @@ # This file contains additional out-of-memory checks (see malloc.tcl) # added to expose a bug in out-of-memory handling for sqlite3_prepare(). # -# $Id: malloc9.test,v 1.1 2007/04/30 21:39:16 drh Exp $ +# $Id: malloc9.test,v 1.5 2008/04/04 12:21:26 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping malloc9 tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test malloc9-$tn.$::n { - - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - sqlite3 db test.db - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || [regexp {out of memory} $msg]}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} - -do_malloc_test 1 -tclprep { +do_malloc_test malloc-9.1 -tclprep { set sql {CREATE TABLE t1(x)} set sqlbytes [string length $sql] append sql {; INSERT INTO t1 VALUES(1)} @@ -133,10 +43,9 @@ } # Ensure that no file descriptors were leaked. -do_test malloc-99.X { +do_test malloc9-99.X { catch {db close} set sqlite_open_file_count } {0} -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocAll.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocAll.test --- sqlite3-3.4.2/test/mallocAll.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocAll.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,67 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all out-of-memory tests. +# +# $Id: mallocAll.test,v 1.1 2007/11/26 13:36:00 drh Exp $ + +proc lshift {lvar} { + upvar $lvar l + set ret [lindex $l 0] + set l [lrange $l 1 end] + return $ret +} +while {[set arg [lshift argv]] != ""} { + switch -- $arg { + -sharedpagercache { + sqlite3_enable_shared_cache 1 + } + default { + set argv [linsert $argv 0 $arg] + break + } + } +} + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +rename finish_test really_finish_test +proc finish_test {} {} +set ISQUICK 1 + +set EXCLUDE { + mallocAll.test +} + +if {[sqlite3 -has-codec]} { + # lappend EXCLUDE \ + # conflict.test +} + + +# Files to include in the test. If this list is empty then everything +# that is not in the EXCLUDE list is run. +# +set INCLUDE { +} + +foreach testfile [lsort -dictionary [glob $testdir/*malloc*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } +} +source $testdir/misuse.test + +set sqlite_open_file_count 0 +really_finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocA.test --- sqlite3-3.4.2/test/mallocA.test 2007-05-12 16:00:15.000000000 +0100 +++ sqlite3-3.6.16/test/mallocA.test 2009-06-05 18:03:33.000000000 +0100 @@ -10,109 +10,20 @@ #*********************************************************************** # This file contains additional out-of-memory checks (see malloc.tcl). # -# $Id: mallocA.test,v 1.2 2007/05/12 15:00:15 drh Exp $ +# $Id: mallocA.test,v 1.8 2008/02/18 22:24:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping mallocA tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test mallocA-$tn.$::n { - - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db test.db-journal} - catch {file copy test.db.bu test.db} - sqlite3 db test.db - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || [regexp {out of memory} $msg]}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} # Construct a test database # @@ -128,23 +39,33 @@ } db close file copy test.db test.db.bu -sqlite3 db test.db -do_malloc_test 1 -sqlbody { +do_malloc_test mallocA-1 -testdb test.db.bu -sqlbody { ANALYZE } -do_malloc_test 2 -sqlbody { - REINDEX; +do_malloc_test mallocA-1.1 -testdb test.db.bu -sqlbody { + ANALYZE t1 } -do_malloc_test 3 -sqlbody { - REINDEX t1; +do_malloc_test mallocA-1.2 -testdb test.db.bu -sqlbody { + ANALYZE main } -do_malloc_test 4 -sqlbody { - REINDEX main.t1; +do_malloc_test mallocA-1.3 -testdb test.db.bu -sqlbody { + ANALYZE main.t1 } -do_malloc_test 5 -sqlbody { - REINDEX nocase; +ifcapable reindex { + do_malloc_test mallocA-2 -testdb test.db.bu -sqlbody { + REINDEX; + } + do_malloc_test mallocA-3 -testdb test.db.bu -sqlbody { + REINDEX t1; + } + do_malloc_test mallocA-4 -testdb test.db.bu -sqlbody { + REINDEX main.t1; + } + do_malloc_test mallocA-5 -testdb test.db.bu -sqlbody { + REINDEX nocase; + } } # Ensure that no file descriptors were leaked. @@ -154,5 +75,4 @@ } {0} file delete -force test.db.bu -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocB.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocB.test --- sqlite3-3.4.2/test/mallocB.test 2007-07-30 21:36:33.000000000 +0100 +++ sqlite3-3.6.16/test/mallocB.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # that they have little in common. # # -# $Id: mallocB.test,v 1.3 2007/07/26 06:50:06 danielk1977 Exp $ +# $Id: mallocB.test,v 1.9 2008/02/18 22:24:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -21,17 +21,20 @@ # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." +if {!$MEMDEBUG} { + puts "Skipping mallocB tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } +source $testdir/malloc_common.tcl do_malloc_test mallocB-1 -sqlbody {SELECT - 456} do_malloc_test mallocB-2 -sqlbody {SELECT - 456.1} do_malloc_test mallocB-3 -sqlbody {SELECT random()} -do_malloc_test mallocB-4 -sqlbody {SELECT zeroblob(1000)} -do_malloc_test mallocB-5 -sqlbody {SELECT * FROM (SELECT 1) GROUP BY 1;} +do_malloc_test mallocB-4 -sqlbody {SELECT length(zeroblob(1000))} +ifcapable subquery { + do_malloc_test mallocB-5 -sqlbody {SELECT * FROM (SELECT 1) GROUP BY 1;} +} # The following test checks that there are no resource leaks following a # malloc() failure in sqlite3_set_auxdata(). @@ -41,5 +44,8 @@ # do_malloc_test mallocB-6 -sqlbody { SELECT test_auxdata('hello world'); } -sqlite_malloc_fail 0 +do_malloc_test mallocB-7 -sqlbody { + SELECT strftime(hex(randomblob(50)) || '%Y', 'now') +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc_common.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc_common.tcl --- sqlite3-3.4.2/test/malloc_common.tcl 2007-06-15 18:02:57.000000000 +0100 +++ sqlite3-3.6.16/test/malloc_common.tcl 2009-05-05 04:40:07.000000000 +0100 @@ -1,3 +1,27 @@ +# 2007 May 05 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains common code used by many different malloc tests +# within the test suite. +# +# $Id: malloc_common.tcl,v 1.22 2008/09/23 16:41:30 danielk1977 Exp $ + +# If we did not compile with malloc testing enabled, then do nothing. +# +ifcapable builtin_test { + set MEMDEBUG 1 +} else { + set MEMDEBUG 0 + return 0 +} # Usage: do_malloc_test # @@ -30,72 +54,114 @@ if {[string is integer $tn]} { set tn malloc-$tn } + if {[info exists ::mallocopts(-start)]} { + set start $::mallocopts(-start) + } else { + set start 0 + } + if {[info exists ::mallocopts(-end)]} { + set end $::mallocopts(-end) + } else { + set end 50000 + } + save_prng_state - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test $tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. + foreach ::iRepeat {0 10000000} { + set ::go 1 + for {set ::n $start} {$::go && $::n <= $end} {incr ::n} { + + # If $::iRepeat is 0, then the malloc() failure is transient - it + # fails and then subsequent calls succeed. If $::iRepeat is 1, + # then the failure is persistent - once malloc() fails it keeps + # failing. # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v && [info command db]=="db" && [info exists ::mallocopts(-sqlbody)] - && [db errorcode]!=7} { - set v 999 - } + set zRepeat "transient" + if {$::iRepeat} {set zRepeat "persistent"} + restore_prng_state + foreach file [glob -nocomplain test.db-mj*] {file delete -force $file} + + do_test ${tn}.${zRepeat}.${::n} { + + # Remove all traces of database files test.db and test2.db + # from the file-system. Then open (empty database) "test.db" + # with the handle [db]. + # + catch {db close} + catch {file delete -force test.db} + catch {file delete -force test.db-journal} + catch {file delete -force test2.db} + catch {file delete -force test2.db-journal} + if {[info exists ::mallocopts(-testdb)]} { + file copy $::mallocopts(-testdb) test.db + } + catch { sqlite3 db test.db } + if {[info commands db] ne ""} { + sqlite3_extended_result_codes db 1 + } + sqlite3_db_config_lookaside db 0 0 0 + + # Execute any -tclprep and -sqlprep scripts. + # + if {[info exists ::mallocopts(-tclprep)]} { + eval $::mallocopts(-tclprep) + } + if {[info exists ::mallocopts(-sqlprep)]} { + execsql $::mallocopts(-sqlprep) + } + + # Now set the ${::n}th malloc() to fail and execute the -tclbody + # and -sqlbody scripts. + # + sqlite3_memdebug_fail $::n -repeat $::iRepeat + set ::mallocbody {} + if {[info exists ::mallocopts(-tclbody)]} { + append ::mallocbody "$::mallocopts(-tclbody)\n" + } + if {[info exists ::mallocopts(-sqlbody)]} { + append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" + } - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" + # The following block sets local variables as follows: + # + # isFail - True if an error (any error) was reported by sqlite. + # nFail - The total number of simulated malloc() failures. + # nBenign - The number of benign simulated malloc() failures. + # + set isFail [catch $::mallocbody msg] + set nFail [sqlite3_memdebug_fail -1 -benigncnt nBenign] + # puts -nonewline " (isFail=$isFail nFail=$nFail nBenign=$nBenign) " + + # If one or more mallocs failed, run this loop body again. + # + set go [expr {$nFail>0}] + + if {($nFail-$nBenign)==0} { + if {$isFail} { + set v2 $msg + } else { + set isFail 1 + set v2 1 + } + } elseif {!$isFail} { + set v2 $msg + } elseif { + [info command db]=="" || + [db errorcode]==7 || + $msg=="out of memory" + } { + set v2 1 } else { - set v {1 1} + set v2 $msg + puts [db errorcode] } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 + lappend isFail $v2 + } {1 1} + + if {[info exists ::mallocopts(-cleanup)]} { + catch [list uplevel #0 $::mallocopts(-cleanup)] msg } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg } } unset ::mallocopts + sqlite3_memdebug_fail -1 } - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocC.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocC.test --- sqlite3-3.4.2/test/mallocC.test 2007-08-13 15:44:08.000000000 +0100 +++ sqlite3-3.6.16/test/mallocC.test 2009-06-25 12:23:19.000000000 +0100 @@ -12,60 +12,34 @@ # This file tests aspects of the malloc failure while parsing # CREATE TABLE statements in auto_vacuum mode. # -# $Id: mallocC.test,v 1.2 2007/08/13 12:58:18 drh Exp $ +# $Id: mallocC.test,v 1.10 2009/04/11 16:27:50 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG=1" - finish_test - return -} - -# Generate a checksum based on the contents of the database. If the -# checksum of two databases is the same, and the integrity-check passes -# for both, the two databases are identical. -# -proc cksum {db} { - set ret [list] - ifcapable tempdb { - set sql { - SELECT name FROM sqlite_master WHERE type = 'table' UNION - SELECT name FROM sqlite_temp_master WHERE type = 'table' UNION - SELECT 'sqlite_master' UNION - SELECT 'sqlite_temp_master' - } - } else { - set sql { - SELECT name FROM sqlite_master WHERE type = 'table' UNION - SELECT 'sqlite_master' - } - } - set tbllist [$db eval $sql] - set txt {} - foreach tbl $tbllist { - append txt [$db eval "SELECT * FROM $tbl"] - } - # puts txt=$txt - return [md5 $txt] +if {!$MEMDEBUG} { + puts "Skipping mallocC tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return } proc do_mallocC_test {tn args} { array set ::mallocopts $args - set sum [cksum db] + #set sum [allcksum db] for {set ::n 1} {true} {incr ::n} { # Run the SQL. Malloc number $::n is set to fail. A malloc() failure # may or may not be reported. - sqlite_malloc_fail $::n + sqlite3_memdebug_fail $::n -repeat 1 do_test mallocC-$tn.$::n.1 { set res [catchsql [string trim $::mallocopts(-sql)]] set rc [expr { 0==[string compare $res {1 {out of memory}}] || + [db errorcode] == 3082 || 0==[lindex $res 0] }] if {$rc!=1} { @@ -76,8 +50,8 @@ # If $::n is greater than the number of malloc() calls required to # execute the SQL, then this test is finished. Break out of the loop. - if {[lindex [sqlite_malloc_stat] 2]>0} { - sqlite_malloc_fail -1 + set nFail [sqlite3_memdebug_fail -1] + if {$nFail==0} { break } @@ -98,7 +72,7 @@ # Checksum the database. #do_test mallocC-$tn.$::n.3 { - # cksum db + # allcksum db #} $sum #integrity_check mallocC-$tn.$::n.4 @@ -107,10 +81,28 @@ unset ::mallocopts } +sqlite3_extended_result_codes db 1 + execsql { PRAGMA auto_vacuum=1; CREATE TABLE t0(a, b, c); } + +# The number of memory allocation failures is different on 64-bit +# and 32-bit systems due to larger structures on 64-bit systems +# overflowing the lookaside more often. To debug problems, it is +# sometimes helpful to reduce the size of the lookaside allocation +# blocks. But this is normally disabled. +# +if {0} { + db close + sqlite3_shutdown + sqlite3_config_lookaside 50 500 + sqlite3_initialize + autoinstall_test_functions + sqlite3 db test.db +} + do_mallocC_test 1 -sql { BEGIN; -- Allocate 32 new root pages. This will exercise the 'extract specific diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocD.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocD.test --- sqlite3-3.4.2/test/mallocD.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocD.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,65 @@ +# 2007 Aug 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: mallocD.test,v 1.6 2008/02/18 22:24:58 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Only run these tests if memory debugging is turned on. +# +if {!$MEMDEBUG} { + puts "Skipping mallocD tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +db close +sqlite3_simulate_device -char atomic +sqlite3 db test.db -vfs devsym + +set PREP { + PRAGMA page_size = 1024; + CREATE TABLE abc(a, b, c); +} + +do_malloc_test mallocD-1 -sqlprep $PREP -sqlbody { + INSERT INTO abc VALUES(1, 2, 3); +} + +do_malloc_test mallocD-2 -sqlprep $PREP -sqlbody { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + ROLLBACK; +} + +do_malloc_test mallocD-3 -sqlprep $PREP -sqlbody { + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, randstr(1500,1500)); + COMMIT; +} + +ifcapable attach { + do_malloc_test mallocD-4 -sqlprep $PREP -sqlbody { + ATTACH 'test2.db' AS aux; + BEGIN; + CREATE TABLE aux.def(d, e, f); + INSERT INTO abc VALUES(4, 5, 6); + COMMIT; + } +} + +sqlite3_simulate_device -char {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocE.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocE.test --- sqlite3-3.4.2/test/mallocE.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocE.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,48 @@ +# 2007 Aug 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks that tickets #2784 and #2789 have been fixed. +# +# $Id: mallocE.test,v 1.3 2008/02/18 22:24:58 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Only run these tests if memory debugging is turned on. +# +if {!$MEMDEBUG} { + puts "Skipping mallocE tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +# ticket #2784 +# +set PREP { + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(x, y, z); +} +do_malloc_test mallocE-1 -sqlprep $PREP -sqlbody { + SELECT p, q FROM (SELECT a+b AS p, b+c AS q FROM t1, t2 WHERE c>5) + LEFT JOIN t2 ON p=x; +} + +# Ticket #2789 +# +do_malloc_test mallocE-2 -sqlprep $PREP -sqlbody { + SELECT x, y2 FROM (SELECT a+b AS x, b+c AS y2 FROM t1, t2 WHERE c>5) + LEFT JOIN t2 USING(x) WHERE y2>11; +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocF.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocF.test --- sqlite3-3.4.2/test/mallocF.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocF.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,76 @@ +# 2007 Aug 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks that tickets #2794, #2795, #2796, and #2797 +# have been fixed. +# +# $Id: mallocF.test,v 1.4 2008/02/18 22:24:58 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Only run these tests if memory debugging is turned on. +# +if {!$MEMDEBUG} { + puts "Skipping mallocF tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +# tickets #2794 and #2795 and #2797 +# +set PREP { + CREATE TABLE t1(x,y); + INSERT INTO t1 VALUES('abc123', 5); + INSERT INTO t1 VALUES('xyz987', 42); +} +do_malloc_test malloeF-1 -sqlprep $PREP -sqlbody { + SELECT * FROM t1 WHERE x GLOB 'abc*' +} + +# ticket #2796 +# +set PREP { + CREATE TABLE t1(x PRIMARY KEY,y UNIQUE); + INSERT INTO t1 VALUES('abc123', 5); + INSERT INTO t1 VALUES('xyz987', 42); +} +do_malloc_test malloeF-2 -sqlprep $PREP -sqlbody { + SELECT x FROM t1 + WHERE y=1 OR y=2 OR y=3 OR y=4 OR y=5 + OR y=6 OR y=7 OR y=8 OR y=9 OR y=10 + OR y=11 OR y=12 OR y=13 OR y=14 OR y=15 + OR y=x +} + +set PREP { + CREATE TABLE t1(x PRIMARY KEY,y UNIQUE); + INSERT INTO t1 VALUES('abc123', 5); + INSERT INTO t1 VALUES('xyz987', 42); +} +do_malloc_test malloeF-3 -sqlprep $PREP -sqlbody { + SELECT x FROM t1 WHERE y BETWEEN 10 AND 29 +} + +# Ticket #2843 +# +set PREP { + CREATE TABLE t1(x); + CREATE TRIGGER r1 BEFORE INSERT ON t1 BEGIN + SELECT 'hello'; + END; +} +do_malloc_test mallocF-4 -sqlprep $PREP -sqlbody { + INSERT INTO t1 VALUES(random()); +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocG.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocG.test --- sqlite3-3.4.2/test/mallocG.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocG.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,68 @@ +# 2007 Aug 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks malloc failures in various obscure operations. +# +# $Id: mallocG.test,v 1.5 2008/08/01 18:47:02 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Only run these tests if memory debugging is turned on. +# +if {!$MEMDEBUG} { + puts "Skipping mallocG tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +# Malloc failures while opening a database connection. +# +do_malloc_test mallocG-1 -tclbody { + db close + sqlite3 db test.db +} + +do_malloc_test mallocG-2 -sqlprep { + CREATE TABLE t1(x, y); + CREATE TABLE t2(x INTEGER PRIMARY KEY); +} -sqlbody { + SELECT y FROM t1 WHERE x IN t2; +} + +do_malloc_test mallocG-3 -sqlprep { + CREATE TABLE t1(x UNIQUE); + INSERT INTO t1 VALUES ('hello'); + INSERT INTO t1 VALUES ('out there'); +} -sqlbody { + SELECT * FROM t1 + WHERE x BETWEEN 'a' AND 'z' + AND x BETWEEN 'c' AND 'w' + AND x BETWEEN 'e' AND 'u' + AND x BETWEEN 'g' AND 'r' + AND x BETWEEN 'i' AND 'q' + AND x BETWEEN 'i' AND 'm' +} + +proc utf16 {utf8} { + set utf16 [encoding convertto unicode $utf8] + append utf16 "\x00\x00" + return $utf16 +} + +do_malloc_test mallocG-4 -tclbody { + set rc [sqlite3_complete16 [utf16 "SELECT * FROM t1;"]] + if {$rc==1} {set rc 0} {error "out of memory"} + set rc +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocH.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocH.test --- sqlite3-3.4.2/test/mallocH.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocH.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,73 @@ +# 2008 August 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks malloc failures in various obscure operations. +# +# $Id: mallocH.test,v 1.2 2008/08/01 20:10:09 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Malloc failures in journaling of in-memory databases. +# +do_malloc_test mallocH-1 -tclprep { + db close + sqlite3 db :memory: + db eval { + CREATE TABLE t1(x UNIQUE, y); + INSERT INTO t1 VALUES(1,2); + } +} -sqlbody { + INSERT INTO t1 SELECT x+1, y+100 FROM t1; +} + +# Malloc failures while parsing a CASE expression. +# +do_malloc_test mallocH-2 -sqlbody { + SELECT CASE WHEN 1 THEN 1 END; +} + +# Malloc failures while parsing a EXISTS(SELECT ...) +# +do_malloc_test mallocH-3 -sqlbody { + SELECT 3+EXISTS(SELECT * FROM sqlite_master); +} + +# Malloc failures within the replace() function. +# +do_malloc_test mallocH-3 -sqlbody { + SELECT replace('ababa','a','xyzzy'); +} + +# Malloc failures during EXPLAIN. +# +ifcapable explain { + do_malloc_test mallocH-4 -sqlprep { + CREATE TABLE abc(a PRIMARY KEY, b, c); + } -sqlbody { + EXPLAIN SELECT * FROM abc AS t2 WHERE rowid=1; + EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE rowid=1; + } +} + +# Malloc failure during integrity_check pragma. +# +do_malloc_test mallocH-5 -sqlprep { + CREATE TABLE t1(a PRIMARY KEY, b UNIQUE); + CREATE TABLE t2(x,y); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t2 SELECT * FROM t1; +} -sqlbody { + PRAGMA integrity_check; +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocI.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocI.test --- sqlite3-3.4.2/test/mallocI.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocI.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,43 @@ +# 2008 August 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks malloc failures in various obscure operations. +# +# $Id: mallocI.test,v 1.1 2008/08/02 03:50:39 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +# Malloc failures in a view. +# +do_malloc_test mallocI-1 -sqlprep { + CREATE TABLE t1(a,b,c,d); + CREATE VIEW v1 AS SELECT a*b, c*d FROM t1 ORDER BY b-d; +} -sqlbody { + SELECT * FROM v1 +} + +# Malloc failure while trying to service a pragma on a TEMP database. +# +do_malloc_test mallocI-2 -sqlbody { + PRAGMA temp.page_size +} + +# Malloc failure while creating a table from a SELECT statement. +# +do_malloc_test mallocI-3 -sqlprep { + CREATE TABLE t1(a,b,c); +} -sqlbody { + CREATE TABLE t2 AS SELECT b,c FROM t1; +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocJ.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocJ.test --- sqlite3-3.4.2/test/mallocJ.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocJ.test 2009-06-12 03:37:54.000000000 +0100 @@ -0,0 +1,73 @@ +# 2008 August 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks malloc failures in LIMIT operations for +# UPDATE/DELETE statements. +# +# $Id: mallocJ.test,v 1.6 2009/01/09 02:49:32 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +ifcapable {update_delete_limit} { + + do_malloc_test mallocJ-1 -sqlprep { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(x int, y int); + INSERT INTO t1 VALUES(1,1); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(2,1); + INSERT INTO t1 VALUES(2,2); + INSERT INTO t1 VALUES(2,3); + } -sqlbody { + UPDATE t1 SET x=1 ORDER BY y LIMIT 2 OFFSET 2; + UPDATE t1 SET x=2 WHERE y=1 ORDER BY y LIMIT 2 OFFSET 2; + DELETE FROM t1 WHERE x=1 ORDER BY y LIMIT 2 OFFSET 2; + DELETE FROM t1 ORDER BY y LIMIT 2 OFFSET 2; + } + +} + +# ticket #3467 +do_malloc_test mallocJ-2 -sqlprep { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + PRAGMA vdbe_trace=ON; +} -sqlbody { + SELECT a, b, 'abc' FROM t1 + UNION + SELECT b, a, 'xyz' FROM t1 + ORDER BY 2, 3; +} + +# ticket #3478 +do_malloc_test mallocJ-3 -sqlbody { + EXPLAIN COMMIT +} + +# ticket #3485 +do_malloc_test mallocJ-4 -sqlprep { + CREATE TABLE t1(a,b,c); + CREATE TABLE t2(x,y,z); +} -sqlbody { + SELECT * FROM (SELECT a,b FROM t1 UNION ALL SELECT x, y FROM t2) ORDER BY 1 +} + +# coverage testing +do_malloc_test mallocJ-5 -sqlprep { + CREATE TABLE t1(["a"]); +} -sqlbody { + SELECT * FROM t1 +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mallocK.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mallocK.test --- sqlite3-3.4.2/test/mallocK.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mallocK.test 2009-01-08 21:00:03.000000000 +0000 @@ -0,0 +1,72 @@ +# 2008 August 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test script checks malloc failures in WHERE clause analysis. +# +# $Id: mallocK.test,v 1.3 2009/01/08 21:00:03 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +source $testdir/malloc_common.tcl + +set sql {SELECT * FROM t1, t2 WHERE (a=1 OR a=2)} +for {set x 1} {$x<5} {incr x} { + append sql " AND b=y" + do_malloc_test mallocK-1.$x -sqlbody $sql -sqlprep { + CREATE TABLE t1(a,b); + CREATE TABLE t2(x,y); + } +} + +set sql {SELECT * FROM t1 WHERE a GLOB 'xyz*' AND (a=1 OR a=2)} +for {set x 1} {$x<5} {incr x} { + append sql " AND b!=$x" + do_malloc_test mallocK-2.$x -sqlbody $sql -sqlprep { + CREATE TABLE t1(a,b); + } +} + +set sql {SELECT * FROM t1 WHERE a BETWEEN 5 AND 10} +for {set x 1} {$x<5} {incr x} { + append sql " AND b=$x" + do_malloc_test mallocK-3.$x -sqlbody $sql -sqlprep { + CREATE TABLE t1(a,b); + } +} + +set sql {SELECT * FROM t1 WHERE b=0} +for {set x 1} {$x<5} {incr x} { + set term "(b=$x" + for {set y 0} {$y<$x} {incr y} { + append term " AND a!=$y" + } + append sql " OR $term)" + do_malloc_test mallocK-4.$x -sqlbody $sql -sqlprep { + CREATE TABLE t1(a,b); + } +} + +ifcapable vtab { + set sql {SELECT * FROM t2 WHERE a MATCH 'xyz'} + for {set x 1} {$x<5} {incr x} { + append sql " AND b!=$x" + do_malloc_test mallocK-5.$x -sqlbody $sql -tclprep { + register_echo_module [sqlite3_connection_pointer db] + db eval { + CREATE TABLE t1(a,b); + CREATE VIRTUAL TABLE t2 USING echo(t1); + } + } + } +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/malloc.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/malloc.test --- sqlite3-3.4.2/test/malloc.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/malloc.test 2009-06-25 12:45:59.000000000 +0100 @@ -8,47 +8,61 @@ # May you share freely, never taking more than you give. # #*********************************************************************** -# This file attempts to check the library in an out-of-memory situation. -# When compiled with -DSQLITE_DEBUG=1, the SQLite library accepts a special -# command (sqlite_malloc_fail N) which causes the N-th malloc to fail. This -# special feature is used to see what happens in the library if a malloc -# were to really fail due to an out-of-memory situation. # -# $Id: malloc.test,v 1.42 2007/05/30 10:36:47 danielk1977 Exp $ +# This file attempts to check the behavior of the SQLite library in +# an out-of-memory situation. When compiled with -DSQLITE_DEBUG=1, +# the SQLite library accepts a special command (sqlite3_memdebug_fail N C) +# which causes the N-th malloc to fail. This special feature is used +# to see what happens in the library if a malloc were to really fail +# due to an out-of-memory situation. +# +# $Id: malloc.test,v 1.81 2009/06/24 13:13:45 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl + # Only run these tests if memory debugging is turned on. # -if {[info command sqlite_malloc_stat]==""} { +source $testdir/malloc_common.tcl +if {!$MEMDEBUG} { puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." finish_test return } -source $testdir/malloc_common.tcl - -do_malloc_test 1 -tclprep { - db close -} -tclbody { - if {[catch {sqlite3 db test.db}]} { - error "out of memory" - } -} -sqlbody { - DROP TABLE IF EXISTS t1; - CREATE TABLE t1( - a int, b float, c double, d text, e varchar(20), - primary key(a,b,c) - ); - CREATE INDEX i1 ON t1(a,b); - INSERT INTO t1 VALUES(1,2.3,4.5,'hi',x'746865726500'); - INSERT INTO t1 VALUES(6,7.0,0.8,'hello','out yonder'); - SELECT * FROM t1; - SELECT avg(b) FROM t1 GROUP BY a HAVING b>20.0; - DELETE FROM t1 WHERE a IN (SELECT min(a) FROM t1); - SELECT count(*) FROM t1; -} +# Do a couple of memory dumps just to exercise the memory dump logic +# that that we can say that we have. +# +puts stderr "This is a test. Ignore the error that follows:" +sqlite3_memdebug_dump $testdir +puts "Memory dump to file memdump.txt..." +sqlite3_memdebug_dump memdump.txt + +ifcapable bloblit&&subquery { + do_malloc_test 1 -tclprep { + db close + } -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 + } -sqlbody { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1( + a int, b float, c double, d text, e varchar(20), + primary key(a,b,c) + ); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,2.3,4.5,'hi',x'746865726500'); + INSERT INTO t1 VALUES(6,7.0,0.8,'hello','out yonder'); + SELECT * FROM t1; + SELECT avg(b) FROM t1 GROUP BY a HAVING b>20.0; + DELETE FROM t1 WHERE a IN (SELECT min(a) FROM t1); + SELECT count(*), group_concat(e) FROM t1; + SELECT b FROM t1 ORDER BY 1 COLLATE nocase; + } +} # Ensure that no file descriptors were leaked. do_test malloc-1.X { @@ -56,25 +70,27 @@ set sqlite_open_file_count } {0} -do_malloc_test 2 -sqlbody { - CREATE TABLE t1(a int, b int default 'abc', c int default 1); - CREATE INDEX i1 ON t1(a,b); - INSERT INTO t1 VALUES(1,1,'99 abcdefghijklmnopqrstuvwxyz'); - INSERT INTO t1 VALUES(2,4,'98 abcdefghijklmnopqrstuvwxyz'); - INSERT INTO t1 VALUES(3,9,'97 abcdefghijklmnopqrstuvwxyz'); - INSERT INTO t1 VALUES(4,16,'96 abcdefghijklmnopqrstuvwxyz'); - INSERT INTO t1 VALUES(5,25,'95 abcdefghijklmnopqrstuvwxyz'); - INSERT INTO t1 VALUES(6,36,'94 abcdefghijklmnopqrstuvwxyz'); - SELECT 'stuff', count(*) as 'other stuff', max(a+10) FROM t1; - UPDATE t1 SET b=b||b||b||b; - UPDATE t1 SET b=a WHERE a in (10,12,22); - INSERT INTO t1(c,b,a) VALUES(20,10,5); - INSERT INTO t1 SELECT * FROM t1 - WHERE a IN (SELECT a FROM t1 WHERE a<10); - DELETE FROM t1 WHERE a>=10; - DROP INDEX i1; - DELETE FROM t1; -} +ifcapable subquery { + do_malloc_test 2 -sqlbody { + CREATE TABLE t1(a int, b int default 'abc', c int default 1); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,1,'99 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(2,4,'98 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(3,9,'97 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(4,16,'96 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(5,25,'95 abcdefghijklmnopqrstuvwxyz'); + INSERT INTO t1 VALUES(6,36,'94 abcdefghijklmnopqrstuvwxyz'); + SELECT 'stuff', count(*) as 'other stuff', max(a+10) FROM t1; + UPDATE t1 SET b=b||b||b||b; + UPDATE t1 SET b=a WHERE a in (10,12,22); + INSERT INTO t1(c,b,a) VALUES(20,10,5); + INSERT INTO t1 SELECT * FROM t1 + WHERE a IN (SELECT a FROM t1 WHERE a<10); + DELETE FROM t1 WHERE a>=10; + DROP INDEX i1; + DELETE FROM t1; + } +} # Ensure that no file descriptors were leaked. do_test malloc-2.X { @@ -106,23 +122,25 @@ set sqlite_open_file_count } {0} -do_malloc_test 4 -sqlbody { - BEGIN TRANSACTION; - CREATE TABLE t1(a int, b int, c int); - CREATE INDEX i1 ON t1(a,b); - INSERT INTO t1 VALUES(1,1,99); - INSERT INTO t1 VALUES(2,4,98); - INSERT INTO t1 VALUES(3,9,97); - INSERT INTO t1 VALUES(4,16,96); - INSERT INTO t1 VALUES(5,25,95); - INSERT INTO t1 VALUES(6,36,94); - UPDATE t1 SET b=a WHERE a in (10,12,22); - INSERT INTO t1 SELECT * FROM t1 - WHERE a IN (SELECT a FROM t1 WHERE a<10); - DROP INDEX i1; - DELETE FROM t1; - COMMIT; -} +ifcapable subquery { + do_malloc_test 4 -sqlbody { + BEGIN TRANSACTION; + CREATE TABLE t1(a int, b int, c int); + CREATE INDEX i1 ON t1(a,b); + INSERT INTO t1 VALUES(1,1,99); + INSERT INTO t1 VALUES(2,4,98); + INSERT INTO t1 VALUES(3,9,97); + INSERT INTO t1 VALUES(4,16,96); + INSERT INTO t1 VALUES(5,25,95); + INSERT INTO t1 VALUES(6,36,94); + UPDATE t1 SET b=a WHERE a in (10,12,22); + INSERT INTO t1 SELECT * FROM t1 + WHERE a IN (SELECT a FROM t1 WHERE a<10); + DROP INDEX i1; + DELETE FROM t1; + COMMIT; + } +} # Ensure that no file descriptors were leaked. do_test malloc-4.X { @@ -130,19 +148,23 @@ set sqlite_open_file_count } {0} -do_malloc_test 5 -sqlbody { - BEGIN TRANSACTION; - CREATE TABLE t1(a,b); - CREATE TABLE t2(x,y); - CREATE TRIGGER r1 AFTER INSERT ON t1 BEGIN - INSERT INTO t2(x,y) VALUES(new.rowid,1); - UPDATE t2 SET y=y+1 WHERE x=new.rowid; - SELECT 123; - DELETE FROM t2 WHERE x=new.rowid; - END; - INSERT INTO t1(a,b) VALUES(2,3); - COMMIT; -} +ifcapable trigger { + do_malloc_test 5 -sqlbody { + BEGIN TRANSACTION; + CREATE TABLE t1(a,b); + CREATE TABLE t2(x,y); + CREATE TRIGGER r1 AFTER INSERT ON t1 WHEN new.a = 2 BEGIN + INSERT INTO t2(x,y) VALUES(new.rowid,1); + INSERT INTO t2(x,y) SELECT * FROM t2; + INSERT INTO t2 SELECT * FROM t2; + UPDATE t2 SET y=y+1 WHERE x=new.rowid; + SELECT 123; + DELETE FROM t2 WHERE x=new.rowid; + END; + INSERT INTO t1(a,b) VALUES(2,3); + COMMIT; + } +} # Ensure that no file descriptors were leaked. do_test malloc-5.X { @@ -150,26 +172,29 @@ set sqlite_open_file_count } {0} -do_malloc_test 6 -sqlprep { - BEGIN TRANSACTION; - CREATE TABLE t1(a); - INSERT INTO t1 VALUES(1); - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - INSERT INTO t1 SELECT a*2 FROM t1; - DELETE FROM t1 where rowid%5 = 0; - COMMIT; -} -sqlbody { - VACUUM; -} +ifcapable vacuum { + do_malloc_test 6 -sqlprep { + BEGIN TRANSACTION; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + INSERT INTO t1 SELECT a*2 FROM t1; + DELETE FROM t1 where rowid%5 = 0; + COMMIT; + } -sqlbody { + VACUUM; + } +} +autoinstall_test_functions do_malloc_test 7 -sqlprep { CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); @@ -180,7 +205,7 @@ SELECT min(a) FROM t1 WHERE a<6 GROUP BY b; SELECT a FROM t1 WHERE a<6 ORDER BY a; SELECT b FROM t1 WHERE a>6; -} +} # This block is designed to test that some malloc failures that may # occur in vdbeapi.c. Specifically, if a malloc failure that occurs @@ -194,30 +219,35 @@ # # These tests only run if UTF-16 support is compiled in. # -if {$::sqlite_options(utf16)} { +ifcapable utf16 { + set ::STMT {} do_malloc_test 8 -tclprep { set sql "SELECT '[string repeat abc 20]', '[string repeat def 20]', ?" - set ::STMT [sqlite3_prepare $::DB $sql -1 X] + set ::STMT [sqlite3_prepare db $sql -1 X] sqlite3_step $::STMT if { $::tcl_platform(byteOrder)=="littleEndian" } { set ::bomstr "\xFF\xFE" } else { set ::bomstr "\xFE\xFF" } - append ::bomstr [encoding convertto unicode "123456789_123456789_12345678"] + append ::bomstr [encoding convertto unicode "123456789_123456789_123456789"] } -tclbody { sqlite3_column_text16 $::STMT 0 sqlite3_column_int $::STMT 0 sqlite3_column_text16 $::STMT 1 sqlite3_column_double $::STMT 1 - sqlite3_reset $::STMT + set rc [sqlite3_reset $::STMT] + if {$rc eq "SQLITE_NOMEM"} {error "out of memory"} sqlite3_bind_text16 $::STMT 1 $::bomstr 60 - catch {sqlite3_finalize $::STMT} - if {[lindex [sqlite_malloc_stat] 2]<=0} { - error "out of memory" - } + #catch {sqlite3_finalize $::STMT} + #if {[lindex [sqlite_malloc_stat] 2]<=0} { + # error "out of memory" + #} } -cleanup { - sqlite3_finalize $::STMT + if {$::STMT!=""} { + sqlite3_finalize $::STMT + set ::STMT {} + } } } @@ -237,19 +267,28 @@ # This block tests malloc() failures that occur while opening a # connection to a database. -do_malloc_test 10 -sqlprep { - CREATE TABLE abc(a, b, c); +do_malloc_test 10 -tclprep { + catch {db2 close} + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + sqlite3_extended_result_codes db 1 + db eval {CREATE TABLE abc(a, b, c)} } -tclbody { db close sqlite3 db2 test.db + sqlite3_extended_result_codes db2 1 db2 eval {SELECT * FROM sqlite_master} db2 close -} +} # This block tests malloc() failures that occur within calls to # sqlite3_create_function(). -do_malloc_test 11 -tclbody { - set rc [sqlite3_create_function $::DB] +do_malloc_test 11 -tclbody { + set rc [sqlite3_create_function db] + if {[string match $rc SQLITE_OK]} { + set rc [sqlite3_create_aggregate db] + } if {[string match $rc SQLITE_NOMEM]} { error "out of memory" } @@ -258,13 +297,13 @@ do_malloc_test 12 -tclbody { set sql16 [encoding convertto unicode "SELECT * FROM sqlite_master"] append sql16 "\00\00" - set ::STMT [sqlite3_prepare16 $::DB $sql16 -1 DUMMY] + set ::STMT [sqlite3_prepare16 db $sql16 -1 DUMMY] sqlite3_finalize $::STMT } # Test malloc errors when replaying two hot journals from a 2-file # transaction. -ifcapable crashtest { +ifcapable crashtest&&attach { do_malloc_test 13 -tclprep { set rc [crashsql -delay 1 -file test2.db { ATTACH 'test2.db' as aux; @@ -293,6 +332,7 @@ do_malloc_test 14 -tclprep { catch {db close} sqlite3 db2 test2.db + sqlite3_extended_result_codes db2 1 db2 eval { PRAGMA synchronous = 0; CREATE TABLE t1(a, b); @@ -305,9 +345,15 @@ db2 close } -tclbody { sqlite3 db test.db - db eval { - SELECT * FROM t1; - } + sqlite3_extended_result_codes db 1 + + # If an out-of-memory occurs within a call to a VFS layer function during + # hot-journal rollback, sqlite will report SQLITE_CORRUPT. See commit + # [5668] for details. + set rc [catch {db eval { SELECT * FROM t1 }} msg] + if {$msg eq "database disk image is malformed"} { set msg "out of memory" } + if {$rc} { error $msg } + set msg } } @@ -318,20 +364,22 @@ # Test for malloc() failures in sqlite3_create_collation() and # sqlite3_create_collation16(). # -do_malloc_test 15 -tclbody { - db collate string_compare string_compare - if {[catch {add_test_collate $::DB 1 1 1} msg]} { - if {$msg=="SQLITE_NOMEM"} {set msg "out of memory"} - error $msg - } - - db complete {SELECT "hello """||'world"' [microsoft], * FROM anicetable;} - db complete {-- Useful comment} - - execsql { - CREATE TABLE t1(a, b COLLATE string_compare); - INSERT INTO t1 VALUES(10, 'string'); - INSERT INTO t1 VALUES(10, 'string2'); +ifcapable utf16 { + do_malloc_test 15 -start 4 -tclbody { + db collate string_compare string_compare + if {[catch {add_test_collate db 1 1 1} msg]} { + if {$msg=="SQLITE_NOMEM"} {set msg "out of memory"} + error $msg + } + + db complete {SELECT "hello """||'world"' [microsoft], * FROM anicetable;} + db complete {-- Useful comment} + + execsql { + CREATE TABLE t1(a, b COLLATE string_compare); + INSERT INTO t1 VALUES(10, 'string'); + INSERT INTO t1 VALUES(10, 'string2'); + } } } @@ -348,58 +396,69 @@ # Test handling of malloc() failures in sqlite3_open16(). # -do_malloc_test 17 -tclbody { - set DB2 0 - set STMT 0 - - # open database using sqlite3_open16() - set filename [encoding convertto unicode test.db] - append filename "\x00\x00" - set DB2 [sqlite3_open16 $filename -unused] - if {0==$DB2} { - error "out of memory" - } - - # Prepare statement - set rc [catch {sqlite3_prepare $DB2 {SELECT * FROM sqlite_master} -1 X} msg] - if {$rc} { - error [string range $msg 4 end] - } - set STMT $msg - - # Finalize statement - set rc [sqlite3_finalize $STMT] - if {$rc!="SQLITE_OK"} { - error [sqlite3_errmsg $DB2] - } - set STMT 0 - - # Close database - set rc [sqlite3_close $DB2] - if {$rc!="SQLITE_OK"} { - error [sqlite3_errmsg $DB2] - } - set DB2 0 -} -cleanup { - if {$STMT!="0"} { - sqlite3_finalize $STMT - } - if {$DB2!="0"} { +ifcapable utf16 { + do_malloc_test 17 -tclbody { + set DB2 0 + set STMT 0 + + # open database using sqlite3_open16() + set filename [encoding convertto unicode test.db] + append filename "\x00\x00" + set DB2 [sqlite3_open16 $filename -unused] + if {0==$DB2} { + error "out of memory" + } + sqlite3_extended_result_codes $DB2 1 + + # Prepare statement + set rc [catch {sqlite3_prepare $DB2 {SELECT * FROM sqlite_master} -1 X} msg] + if {[sqlite3_errcode $DB2] eq "SQLITE_IOERR+12"} { + error "out of memory" + } + if {[regexp ".*automatic extension loading.*" [sqlite3_errmsg $DB2]]} { + error "out of memory" + } + if {$rc} { + error [string range $msg 4 end] + } + set STMT $msg + + # Finalize statement + set rc [sqlite3_finalize $STMT] + if {$rc!="SQLITE_OK"} { + error [sqlite3_errmsg $DB2] + } + set STMT 0 + + # Close database set rc [sqlite3_close $DB2] + if {$rc!="SQLITE_OK"} { + error [sqlite3_errmsg $DB2] + } + set DB2 0 + } -cleanup { + if {$STMT!="0"} { + sqlite3_finalize $STMT + } + if {$DB2!="0"} { + set rc [sqlite3_close $DB2] + } } } # Test handling of malloc() failures in sqlite3_errmsg16(). # -do_malloc_test 18 -tclbody { - catch { - db eval "SELECT [string repeat longcolumnname 10] FROM sqlite_master" - } msg - if {$msg=="out of memory"} {error $msg} - set utf16 [sqlite3_errmsg16 [sqlite3_connection_pointer db]] - binary scan $utf16 c* bytes - if {[llength $bytes]==0} { - error "out of memory" +ifcapable utf16 { + do_malloc_test 18 -tclprep { + catch { + db eval "SELECT [string repeat longcolumnname 10] FROM sqlite_master" + } + } -tclbody { + set utf16 [sqlite3_errmsg16 [sqlite3_connection_pointer db]] + binary scan $utf16 c* bytes + if {[llength $bytes]==0} { + error "out of memory" + } } } @@ -421,7 +480,7 @@ } -tclbody { unset -nocomplain ::STMT set r [catch { - set ::STMT [sqlite3_prepare $::DB {SELECT ?} -1 DUMMY] + set ::STMT [sqlite3_prepare db {SELECT ?} -1 DUMMY] sqlite3_bind_text16 -static $::STMT 1 $static_string 112 } msg] if {$r} {error [string range $msg 4 end]} @@ -436,27 +495,33 @@ # Make sure SQLITE_NOMEM is reported out on an ATTACH failure even # when the malloc failure occurs within the nested parse. # -do_malloc_test 20 -tclprep { - db close - file delete -force test2.db test2.db-journal - sqlite3 db test2.db - db eval {CREATE TABLE t1(x);} - db close -} -tclbody { - if {[catch {sqlite3 db test.db}]} { - error "out of memory" - } -} -sqlbody { - ATTACH DATABASE 'test2.db' AS t2; - SELECT * FROM t1; - DETACH DATABASE t2; -} +ifcapable attach { + do_malloc_test 20 -tclprep { + db close + file delete -force test2.db test2.db-journal + sqlite3 db test2.db + sqlite3_extended_result_codes db 1 + db eval {CREATE TABLE t1(x);} + db close + } -tclbody { + if {[catch {sqlite3 db test.db}]} { + error "out of memory" + } + sqlite3_extended_result_codes db 1 + } -sqlbody { + ATTACH DATABASE 'test2.db' AS t2; + SELECT * FROM t1; + DETACH DATABASE t2; + } +} # Test malloc failure whilst installing a foreign key. # -do_malloc_test 21 -sqlbody { - CREATE TABLE abc(a, b, c, FOREIGN KEY(a) REFERENCES abc(b)) -} +ifcapable foreignkey { + do_malloc_test 21 -sqlbody { + CREATE TABLE abc(a, b, c, FOREIGN KEY(a) REFERENCES abc(b)) + } +} # Test malloc failure in an sqlite3_prepare_v2() call. # @@ -464,7 +529,7 @@ set ::STMT "" set r [catch { set ::STMT [ - sqlite3_prepare_v2 $::DB "SELECT * FROM sqlite_master" -1 DUMMY + sqlite3_prepare_v2 db "SELECT * FROM sqlite_master" -1 DUMMY ] } msg] if {$r} {error [string range $msg 4 end]} @@ -475,6 +540,332 @@ } } +ifcapable {pager_pragmas} { + # This tests a special case - that an error that occurs while the pager + # is trying to recover from error-state in exclusive-access mode works. + # + do_malloc_test 23 -tclprep { + db eval { + PRAGMA cache_size = 10; + PRAGMA locking_mode = exclusive; + BEGIN; + CREATE TABLE abc(a, b, c); + CREATE INDEX abc_i ON abc(a, b, c); + INSERT INTO abc + VALUES(randstr(100,100), randstr(100,100), randstr(100,100)); + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + INSERT INTO abc + SELECT randstr(100,100), randstr(100,100), randstr(100,100) FROM abc; + COMMIT; + } + + # This puts the pager into error state. + # + db eval BEGIN + db eval {UPDATE abc SET a = 0 WHERE oid%2} + set ::sqlite_io_error_pending 10 + catch {db eval {ROLLBACK}} msg + + } -tclbody { + # If an out-of-memory occurs within a call to a VFS layer function during + # hot-journal rollback, sqlite will report SQLITE_CORRUPT. See commit + # [5668] for details. + set rc [catch {db eval { SELECT * FROM abc LIMIT 10 }} msg] + if {$msg eq "database disk image is malformed"} { set msg "out of memory" } + if {$rc} { error $msg } + set msg + } -cleanup { + set e [db eval {PRAGMA integrity_check}] + if {$e ne "ok"} {error $e} + } +} + +ifcapable compound { + do_malloc_test 24 -sqlprep { + CREATE TABLE t1(a, b, c) + } -sqlbody { + SELECT 1 FROM t1 UNION SELECT 2 FROM t1 ORDER BY 1 + } +} + +ifcapable view&&trigger { + do_malloc_test 25 -sqlprep { + CREATE TABLE t1(a, b, c); + CREATE VIEW v1 AS SELECT * FROM t1; + CREATE TRIGGER v1t1 INSTEAD OF DELETE ON v1 BEGIN SELECT 1; END; + CREATE TRIGGER v1t2 INSTEAD OF INSERT ON v1 BEGIN SELECT 1; END; + CREATE TRIGGER v1t3 INSTEAD OF UPDATE ON v1 BEGIN SELECT 1; END; + } -sqlbody { + DELETE FROM v1 WHERE a = 1; + INSERT INTO v1 VALUES(1, 2, 3); + UPDATE v1 SET a = 1 WHERE b = 2; + } +} + +do_malloc_test 25 -sqlprep { + CREATE TABLE abc(a, b, c); + CREATE INDEX i1 ON abc(a, b); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); +} -tclbody { + # For each UPDATE executed, the cursor used for the SELECT statement + # must be "saved". Because the cursor is open on an index, this requires + # a malloc() to allocate space to save the index key. This test case is + # aimed at testing the response of the library to a failure in that + # particular malloc() call. + db eval {SELECT a FROM abc ORDER BY a} { + db eval {UPDATE abc SET b = b - 1 WHERE a = $a} + } +} + +# This test is designed to test a specific juncture in the sqlite code. +# The database set up by -sqlprep script contains a single table B-Tree +# of height 2. In the -tclbody script, the existing database connection +# is closed and a new one opened and used to insert a new row into the +# table B-Tree. By using a new connection, the outcome of a malloc() +# failure while seeking to the right-hand side of the B-Tree to insert +# a new record can be tested. +# +do_malloc_test 26 -sqlprep { + BEGIN; + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, randomblob(210)); + INSERT INTO t1 VALUES(1, randomblob(210)); + INSERT INTO t1 VALUES(1, randomblob(210)); + INSERT INTO t1 VALUES(1, randomblob(210)); + INSERT INTO t1 VALUES(1, randomblob(210)); + COMMIT; +} -tclbody { + db close + sqlite3 db test.db + db eval { INSERT INTO t1 VALUES(1, randomblob(210)) } +} + +# Test that no memory is leaked following a malloc() failure in +# sqlite3_initialize(). +# +do_malloc_test 27 -tclprep { + db close + sqlite3_shutdown +} -tclbody { + set rc [sqlite3_initialize] + if {$rc == "SQLITE_NOMEM"} { + error "out of memory" + } +} +autoinstall_test_functions + +# Test that malloc failures that occur while processing INDEXED BY +# clauses are handled correctly. +do_malloc_test 28 -sqlprep { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + CREATE VIEW v1 AS SELECT * FROM t1 INDEXED BY i1 WHERE a = 10; +} -sqlbody { + SELECT * FROM t1 INDEXED BY i1 ORDER BY a; + SELECT * FROM v1; +} + +do_malloc_test 29 -sqlprep { + CREATE TABLE t1(a TEXT, b TEXT); +} -sqlbody { + INSERT INTO t1 VALUES(1, -234); + INSERT INTO t1 SELECT * FROM t1 UNION ALL SELECT * FROM t1; +} + +do_malloc_test 30 -tclprep { + db eval { + CREATE TABLE t1(x PRIMARY KEY); + INSERT INTO t1 VALUES(randstr(500,500)); + INSERT INTO t1 VALUES(randstr(500,500)); + INSERT INTO t1 VALUES(randstr(500,500)); + } + db close + sqlite3 db test.db + + # The DELETE command in the following block moves the overflow pages that + # are part of the primary key index to the free-list. But it does not + # actually load the content of the pages. This leads to the peculiar + # situation where cache entries exist, but are not populated with data. + # They are populated next time they are requested by the b-tree layer. + # + db eval { + BEGIN; + DELETE FROM t1; + ROLLBACK; + } +} -sqlbody { + -- This statement requires the 'no-content' pages loaded by the DELETE + -- statement above. When requesting the pages, the content is loaded + -- from the database file. The point of this test case is to test handling + -- of malloc errors (including SQLITE_IOERR_NOMEM errors) when loading + -- the content. + SELECT * FROM t1 ORDER BY x; +} + +# After committing a transaction in persistent-journal mode, if a journal +# size limit is configured SQLite may attempt to truncate the journal file. +# This test verifies the libraries response to a malloc() failure during +# this operation. +# +do_malloc_test 31 -sqlprep { + PRAGMA journal_mode = persist; + PRAGMA journal_size_limit = 1024; + CREATE TABLE t1(a PRIMARY KEY, b); +} -sqlbody { + INSERT INTO t1 VALUES(1, 2); +} + +# When written, this test provoked an obscure change-counter bug. +# +# If, when running in exclusive mode, a malloc() failure occurs +# after the database file change-counter has been written but +# before the transaction has been committed, then the transaction +# is automatically rolled back. However, internally the +# Pager.changeCounterDone flag was being left set. This means +# that if the same connection attempts another transaction following +# the malloc failure and rollback, the change counter will not +# be updated. This could corrupt another processes cache. +# +do_malloc_test 32 -tclprep { + # Build a small database containing an indexed table. + # + db eval { + PRAGMA locking_mode = normal; + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + INSERT INTO t1 VALUES(3, 'three'); + COMMIT; + PRAGMA locking_mode = exclusive; + } + + # Open a second database connection. Load the table (but not index) + # into the second connections pager cache. + # + sqlite3 db2 test.db + db2 eval { + PRAGMA locking_mode = normal; + SELECT b FROM t1; + } + +} -tclbody { + # Running in exclusive mode, perform a database transaction that + # modifies both the database table and index. For iterations where + # the malloc failure occurs after updating the change counter but + # before committing the transaction, this should result in the + # transaction being rolled back but the changeCounterDone flag + # left set. + # + db eval { UPDATE t1 SET a = a + 3 } +} -cleanup { + + # Perform another transaction using the first connection. Unlock + # the database after doing so. If this is one of the right iterations, + # then this should result in the database contents being updated but + # the change-counter left as it is. + # + db eval { + PRAGMA locking_mode = normal; + UPDATE t1 SET a = a + 3; + } + + # Now do an integrity check with the second connection. The second + # connection still has the database table in its cache. If this is + # one of the magic iterations and the change counter was not modified, + # then it won't realize that the cached data is out of date. Since + # the cached data won't match the up to date index data read from + # the database file, the integrity check should fail. + # + set zRepeat "transient" + if {$::iRepeat} {set zRepeat "persistent"} + do_test malloc-32.$zRepeat.${::n}.integrity { + execsql {PRAGMA integrity_check} db2 + } {ok} + db2 close +} + +# The following two OOM tests verify that OOM handling works in the +# code used to optimize "SELECT count(*) FROM ". +# +do_malloc_test 33 -tclprep { + db eval { PRAGMA cache_size = 10 } + db transaction { + db eval { CREATE TABLE abc(a, b) } + for {set i 0} {$i<500} {incr i} { + db eval {INSERT INTO abc VALUES(randstr(100,100), randstr(1000,1000))} + } + } +} -sqlbody { + SELECT count(*) FROM abc; +} +do_malloc_test 34 -tclprep { + db eval { PRAGMA cache_size = 10 } + db transaction { + db eval { CREATE TABLE abc(a PRIMARY KEY, b) } + for {set i 0} {$i<500} {incr i} { + db eval {INSERT INTO abc VALUES(randstr(100,100), randstr(1000,1000))} + } + } +} -sqlbody { + SELECT count(*) FROM abc; +} + +proc f {args} { error "Quite a long error!" } +do_malloc_test 35 -tclprep { + db func f f + set ::STMT [sqlite3_prepare db "SELECT f()" -1 DUMMY] + sqlite3_step $::STMT +} -tclbody { + sqlite3_finalize $::STMT +} -cleanup { + # At one point an assert( !db->mallocFailed ) could fail in the following + # call to sqlite3_errmsg(). Because sqlite3_finalize() had failed to clear + # the flag before returning. + sqlite3_errmsg16 db +} + +do_malloc_test 36 -sqlprep { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + INSERT INTO t1 VALUES(3, 4); +} -sqlbody { + SELECT test_agg_errmsg16(), group_concat(a) FROM t1 +} + +# At one point, if an OOM occured immediately after obtaining a shared lock +# on the database file, the file remained locked. This test case ensures +# that bug has been fixed.i +if {[db eval {PRAGMA locking_mode}]!="exclusive"} { + do_malloc_test 37 -tclprep { + sqlite3 db2 test.db + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } db2 + } -sqlbody { + SELECT * FROM t1; + } -cleanup { + # Try to write to the database using connection [db2]. If connection [db] + # has correctly released the shared lock, this write attempt should + # succeed. If [db] has not released the lock, this should hit an + # SQLITE_BUSY error. + do_test malloc-36.$zRepeat.${::n}.unlocked { + execsql {INSERT INTO t1 VALUES(3, 4)} db2 + } {} + db2 close + } + catch { db2 close } +} + # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} @@ -482,5 +873,4 @@ } {0} puts open-file-count=$sqlite_open_file_count -sqlite_malloc_fail 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/manydb.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/manydb.test --- sqlite3-3.4.2/test/manydb.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/manydb.test 2009-06-12 03:37:54.000000000 +0100 @@ -13,12 +13,23 @@ # This file implements tests the ability of the library to open # many different databases at the same time without leaking memory. # -# $Id: manydb.test,v 1.3 2006/01/11 01:08:34 drh Exp $ +# $Id: manydb.test,v 1.4 2008/11/21 00:10:35 aswift Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl set N 300 +# if we're using proxy locks, we use 5 filedescriptors for a db +# that is open and in the middle of writing changes, normally +# sqlite uses 3 (proxy locking adds the conch and the local lock) +set using_proxy 0 +foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set using_proxy value +} +set num_fd_per_openwrite_db 3 +if {$using_proxy>0} { + set num_fd_per_openwrite_db 5 +} # First test how many file descriptors are available for use. To open a # database for writing SQLite requires 3 file descriptors (the database, the @@ -35,7 +46,7 @@ catch { file delete -force testfile.1 } -set N [expr $i / 3] +set N [expr $i / $num_fd_per_openwrite_db] # Create a bunch of random database names # diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/memdb.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/memdb.test --- sqlite3-3.4.2/test/memdb.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/memdb.test 2009-06-25 12:35:52.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is in-memory database backend. # -# $Id: memdb.test,v 1.15 2006/01/30 22:48:44 drh Exp $ +# $Id: memdb.test,v 1.19 2009/05/18 16:04:38 danielk1977 Exp $ set testdir [file dirname $argv0] @@ -244,7 +244,7 @@ do_test memdb-5.$i { if {$conf1!=""} {set conf1 "ON CONFLICT $conf1"} if {$conf2!=""} {set conf2 "ON CONFLICT $conf2"} - set r0 [catch {execsql [subst { + set r0 [catch {execsql " DROP TABLE t1; CREATE TABLE t1(a,b,c, UNIQUE(a) $conf1); INSERT INTO t1 SELECT * FROM t2; @@ -253,7 +253,7 @@ $cmd t3 SET x=1; $cmd t1 SET b=b*2; $cmd t1 SET a=c+5; - }]} r1] + "} r1] catch {execsql {COMMIT}} if {!$r0} {set r1 [execsql {SELECT a FROM t1 ORDER BY b}]} set r2 [execsql {SELECT x FROM t3}] @@ -411,6 +411,27 @@ } } 0 +# Test that auto-vacuum works with in-memory databases. +# +ifcapable autovacuum { + do_test memdb-9.1 { + db close + sqlite3 db test.db + db cache size 0 + execsql { + PRAGMA auto_vacuum = full; + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(randstr(1000,1000)); + INSERT INTO t1 VALUES(randstr(1000,1000)); + INSERT INTO t1 VALUES(randstr(1000,1000)); + } + set memused [lindex [sqlite3_status SQLITE_STATUS_MEMORY_USED 0] 1] + set pgovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 1] + execsql { DELETE FROM t1 } + set memused2 [lindex [sqlite3_status SQLITE_STATUS_MEMORY_USED 0] 1] + expr {($memused2 + 2048 < $memused) || $pgovfl==0} + } {1} +} } ;# ifcapable memorydb diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/memsubsys1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/memsubsys1.test --- sqlite3-3.4.2/test/memsubsys1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/memsubsys1.test 2009-06-25 12:23:19.000000000 +0100 @@ -0,0 +1,309 @@ +# 2008 June 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests of the memory allocation subsystem +# +# $Id: memsubsys1.test,v 1.15 2009/04/11 14:46:43 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +sqlite3_reset_auto_extension + +# This procedure constructs a new database in test.db. It fills +# this database with many small records (enough to force multiple +# rebalance operations in the btree-layer and to require a large +# page cache), verifies correct results, then returns. +# +proc build_test_db {testname pragmas} { + catch {db close} + file delete -force test.db test.db-journal + sqlite3 db test.db + sqlite3_db_config_lookaside db 0 0 0 + db eval $pragmas + db eval { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b); + CREATE INDEX i1 ON t1(x,y); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 200); + } + for {set i 2} {$i<5000} {incr i $i} { + db eval {INSERT INTO t2 SELECT * FROM t1} + db eval {INSERT INTO t1 SELECT a+$i, a+b*100 FROM t2} + db eval {DELETE FROM t2} + } + do_test $testname.1 { + db eval {SELECT count(*) FROM t1} + } 8192 + integrity_check $testname.2 +} + +# Reset all of the highwater marks. +# +proc reset_highwater_marks {} { + sqlite3_status SQLITE_STATUS_MEMORY_USED 1 + sqlite3_status SQLITE_STATUS_MALLOC_SIZE 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_USED 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_SIZE 1 + sqlite3_status SQLITE_STATUS_SCRATCH_USED 1 + sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 1 + sqlite3_status SQLITE_STATUS_SCRATCH_SIZE 1 + sqlite3_status SQLITE_STATUS_PARSER_STACK 1 +} + +set xtra_size 256 + +# Test 1: Both PAGECACHE and SCRATCH are shut down. +# +db close +sqlite3_shutdown +sqlite3_config_lookaside 0 0 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-1 {PRAGMA page_size=1024} +do_test memsubsys1-1.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 0 +do_test memsubsys1-1.4 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 +set max_pagecache [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] +#show_memstats + +# Test 2: Activate PAGECACHE with 20 pages +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 1024+$xtra_size] 20 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-2 {PRAGMA page_size=1024} +#show_memstats +do_test memsubsys1-2.3 { + set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] + expr { + ($pg_used*1024 + $pg_ovfl) < $max_pagecache && + ($pg_used*(1024+$xtra_size) + $pg_ovfl) >= $max_pagecache + } +} 1 +do_test memsubsys1-2.4 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 19 +do_test memsubsys1-2.5 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 + +# Test 3: Activate PAGECACHE with 20 pages but use the wrong page size +# so that PAGECACHE is not used. +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 512+$xtra_size] 20 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-3.1 {PRAGMA page_size=1024} +#show_memstats +do_test memsubsys1-3.1.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 0 +do_test memsubsys1-3.1.4 { + set overflow [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] +} $max_pagecache +do_test memsubsys1-3.1.5 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 2048+$xtra_size] 20 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-3.2 {PRAGMA page_size=2048} +#show_memstats +do_test memsubsys1-3.2.3 { + db eval {PRAGMA page_size} +} 2048 +do_test memsubsys1-3.2.4 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 19 +do_test memsubsys1-3.2.5 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 + +# Test 4: Activate both PAGECACHE and SCRATCH. +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 1024+$xtra_size] 50 +sqlite3_config_scratch 6000 2 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-4 {PRAGMA page_size=1024} +#show_memstats +do_test memsubsys1-4.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 49 +do_test memsubsys1-4.4 { + set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] + expr { + ($pg_used*1024 + $pg_ovfl) < $max_pagecache && + ($pg_used*(1024+$xtra_size) + $pg_ovfl) >= $max_pagecache + } +} 1 +do_test memsubsys1-4.5 { + set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] + expr {$maxreq<7000} +} 1 +do_test memsubsys1-4.6 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 1 + +# Test 5: Activate both PAGECACHE and SCRATCH. But make the page size +# such that the SCRATCH allocations are too small. +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 4096+$xtra_size] 24 +sqlite3_config_scratch 6000 2 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-5 {PRAGMA page_size=4096} +#show_memstats +do_test memsubsys1-5.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 23 +do_test memsubsys1-5.4 { + set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] + expr {$maxreq>4096} +} 1 +do_test memsubsys1-5.5 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 +do_test memsubsys1-5.6 { + set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2] + expr {$s_ovfl>6000} +} 1 + +# Test 6: Activate both PAGECACHE and SCRATCH with a 4k page size. +# Make it so that SCRATCH is large enough +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 4096+$xtra_size] 24 +sqlite3_config_scratch 25300 1 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-6 {PRAGMA page_size=4096} +#show_memstats +do_test memsubsys1-6.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] +} 23 +do_test memsubsys1-6.4 { + set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] + expr {$maxreq>4096 && $maxreq<=(4096+$xtra_size)} +} 1 +do_test memsubsys1-6.5 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 1 +do_test memsubsys1-6.6 { + set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2] +} 0 + +# Test 7: Activate both PAGECACHE and SCRATCH with a 4k page size. +# Set cache_size small so that no PAGECACHE overflow occurs. Verify +# that maximum allocation size is small. +# +db close +sqlite3_shutdown +sqlite3_config_pagecache [expr 4096+$xtra_size] 24 +sqlite3_config_scratch 25300 1 +sqlite3_initialize +reset_highwater_marks +build_test_db memsubsys1-7 { + PRAGMA page_size=4096; + PRAGMA cache_size=10; + PRAGMA temp_store=memory; +} +#show_memstats +do_test memsubsys1-7.3 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2] + expr {$pg_used<24} +} 1 +do_test memsubsys1-7.4 { + set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] +} 0 +do_test memsubsys1-7.5 { + set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] + expr {$maxreq<4100} +} 1 +do_test memsubsys1-7.6 { + set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 1 +do_test memsubsys1-7.7 { + set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2] +} 0 + +# Test 8: Disable PAGECACHE. Make available SCRATCH zero. Verify that +# the SCRATCH overflow logic works. +# +db close +sqlite3_shutdown +sqlite3_config_pagecache 0 0 +sqlite3_config_scratch 25000 0 +sqlite3_initialize +reset_highwater_marks +do_test memsubsys1-8.1 { + set pg_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] +} 0 +do_test memsubsys1-8.2 { + set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2] +} 0 +do_test memsubsys1-8.3 { + sqlite3 db :memory: + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(400)); + INSERT INTO t1 VALUES(zeroblob(400)); + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + } + expr {[lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2]>0} +} 1 +db close +sqlite3_shutdown +sqlite3_config_memstatus 0 +sqlite3_initialize +do_test memsubsys1-8.4 { + sqlite3 db :memory: + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(400)); + INSERT INTO t1 VALUES(zeroblob(400)); + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 SELECT * FROM t1; + SELECT rowid FROM t1; + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} + + +db close +sqlite3_shutdown +sqlite3_config_memstatus 1 +sqlite3_config_pagecache 0 0 +sqlite3_config_scratch 0 0 +sqlite3_config_lookaside 100 500 +sqlite3_initialize +autoinstall_test_functions +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/memsubsys2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/memsubsys2.test --- sqlite3-3.4.2/test/memsubsys2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/memsubsys2.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,173 @@ +# 2008 June 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests of the memory allocation subsystem. +# +# $Id: memsubsys2.test,v 1.2 2008/08/12 15:21:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +sqlite3_reset_auto_extension + +# This procedure constructs a new database in test.db. It fills +# this database with many small records (enough to force multiple +# rebalance operations in the btree-layer and to require a large +# page cache), verifies correct results, then returns. +# +proc build_test_db {testname pragmas} { + catch {db close} + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval $pragmas + db eval { + CREATE TABLE t1(x, y); + CREATE TABLE t2(a, b); + CREATE INDEX i1 ON t1(x,y); + INSERT INTO t1 VALUES(1, 100); + INSERT INTO t1 VALUES(2, 200); + } + for {set i 2} {$i<5000} {incr i $i} { + db eval {INSERT INTO t2 SELECT * FROM t1} + db eval {INSERT INTO t1 SELECT a+$i, a+b*100 FROM t2} + db eval {DELETE FROM t2} + } + do_test $testname.1 { + db eval {SELECT count(*) FROM t1} + } 8192 + integrity_check $testname.2 +} + +# Reset all of the highwater marks. +# +proc reset_highwater_marks {} { + sqlite3_status SQLITE_STATUS_MEMORY_USED 1 + sqlite3_status SQLITE_STATUS_MALLOC_SIZE 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_USED 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_SIZE 1 + sqlite3_status SQLITE_STATUS_SCRATCH_USED 1 + sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 1 + sqlite3_status SQLITE_STATUS_SCRATCH_SIZE 1 + sqlite3_status SQLITE_STATUS_PARSER_STACK 1 +} + +# Test 1: Verify that calling sqlite3_malloc(0) returns a NULL +# pointer. +# +set highwater [sqlite3_memory_highwater 0] +do_test memsubsys2-1.1 { + sqlite3_malloc 0 +} {0} +do_test memsubsys2-1.2 { + sqlite3_memory_highwater 0 +} $highwater + + +# Test 2: Verify that the highwater mark increases after a large +# allocation. +# +sqlite3_memory_highwater 1 +set highwater [sqlite3_memory_highwater 0] +do_test memsubsys2-2.1 { + sqlite3_free [set x [sqlite3_malloc 100000]] + expr {$x!="0"} +} {1} +do_test memsubsys2-2.2 { + expr {[sqlite3_memory_highwater 0]>=[sqlite3_memory_used]+$highwater} +} {1} + +# Test 3: Verify that turning of memstatus disables the statistics +# tracking. +# +db close +sqlite3_shutdown +sqlite3_config_memstatus 0 +sqlite3_initialize +reset_highwater_marks +set highwater [sqlite3_memory_highwater 0] +do_test memsubsys2-3.1 { + set highwater +} {0} +do_test memsubsys2-3.2 { + sqlite3_malloc 0 +} {0} +do_test memsubsys2-3.3 { + sqlite3_memory_highwater 0 +} {0} +do_test memsubsys2-3.4 { + sqlite3_memory_used +} {0} +do_test memsubsys2-3.5 { + set ::allocation [sqlite3_malloc 100000] + expr {$::allocation!="0"} +} {1} +do_test memsubsys2-3.6 { + sqlite3_memory_highwater 0 +} {0} +do_test memsubsys2-3.7 { + sqlite3_memory_used +} {0} +do_test memsubsys2-3.8 { + sqlite3_free $::allocation +} {} +do_test memsubsys2-3.9 { + sqlite3_free 0 +} {} + + +# Test 4: Verify that turning on memstatus reenables the statistics +# tracking. +# +sqlite3_shutdown +sqlite3_config_memstatus 1 +sqlite3_initialize +reset_highwater_marks +set highwater [sqlite3_memory_highwater 0] +do_test memsubsys2-4.1 { + set highwater +} {0} +do_test memsubsys2-4.2 { + sqlite3_malloc 0 +} {0} +do_test memsubsys2-4.3 { + sqlite3_memory_highwater 0 +} {0} +do_test memsubsys2-4.4 { + sqlite3_memory_used +} {0} +do_test memsubsys2-4.5 { + set ::allocation [sqlite3_malloc 100000] + expr {$::allocation!="0"} +} {1} +do_test memsubsys2-4.6 { + expr {[sqlite3_memory_highwater 0]>=100000} +} {1} +do_test memsubsys2-4.7 { + expr {[sqlite3_memory_used]>=100000} +} {1} +do_test memsubsys2-4.8 { + sqlite3_free $::allocation +} {} +do_test memsubsys2-4.9 { + sqlite3_free 0 +} {} +do_test memsubsys2-4.10 { + expr {[sqlite3_memory_highwater 0]>=100000} +} {1} +do_test memsubsys2-4.11 { + sqlite3_memory_used +} {0} + + + + +autoinstall_test_functions +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/minmax2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/minmax2.test --- sqlite3-3.4.2/test/minmax2.test 2007-07-18 19:17:12.000000000 +0100 +++ sqlite3-3.6.16/test/minmax2.test 2009-06-05 18:03:33.000000000 +0100 @@ -15,7 +15,7 @@ # optimization works right in the presence of descending # indices. Ticket #2514. # -# $Id: minmax2.test,v 1.1 2007/07/18 18:17:12 drh Exp $ +# $Id: minmax2.test,v 1.2 2008/01/05 17:39:30 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -71,14 +71,14 @@ } {1} do_test minmax2-1.6 { set sqlite_search_count -} {2} +} {1} do_test minmax2-1.7 { set sqlite_search_count 0 execsql {SELECT max(x) FROM t1} } {20} do_test minmax2-1.8 { set sqlite_search_count -} {1} +} {0} do_test minmax2-1.9 { set sqlite_search_count 0 execsql {SELECT max(y) FROM t1} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/minmax3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/minmax3.test --- sqlite3-3.4.2/test/minmax3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/minmax3.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,217 @@ +# 2008 January 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# $Id: minmax3.test,v 1.5 2008/07/12 14:52:20 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_search_count 0 + return [concat [execsql $sql] $::sqlite_search_count] +} + +# This procedure sets the value of the file-format in file 'test.db' +# to $newval. Also, the schema cookie is incremented. +# +proc set_file_format {newval} { + hexio_write test.db 44 [hexio_render_int32 $newval] + set schemacookie [hexio_get_int [hexio_read test.db 40 4]] + incr schemacookie + hexio_write test.db 40 [hexio_render_int32 $schemacookie] + return {} +} + +do_test minmax3-1.0 { + execsql { + CREATE TABLE t1(x, y, z); + } + db close + set_file_format 4 + sqlite3 db test.db + execsql { + BEGIN; + INSERT INTO t1 VALUES('1', 'I', 'one'); + INSERT INTO t1 VALUES('2', 'IV', 'four'); + INSERT INTO t1 VALUES('2', NULL, 'three'); + INSERT INTO t1 VALUES('2', 'II', 'two'); + INSERT INTO t1 VALUES('2', 'V', 'five'); + INSERT INTO t1 VALUES('3', 'VI', 'six'); + COMMIT; + } +} {} +do_test minmax3-1.1.1 { + # Linear scan. + count { SELECT max(y) FROM t1 WHERE x = '2'; } +} {V 5} +do_test minmax3-1.1.2 { + # Index optimizes the WHERE x='2' constraint. + execsql { CREATE INDEX i1 ON t1(x) } + count { SELECT max(y) FROM t1 WHERE x = '2'; } +} {V 9} +do_test minmax3-1.1.3 { + # Index optimizes the WHERE x='2' constraint and the MAX(y). + execsql { CREATE INDEX i2 ON t1(x,y) } + count { SELECT max(y) FROM t1 WHERE x = '2'; } +} {V 1} +do_test minmax3-1.1.4 { + # Index optimizes the WHERE x='2' constraint and the MAX(y). + execsql { DROP INDEX i2 ; CREATE INDEX i2 ON t1(x, y DESC) } + count { SELECT max(y) FROM t1 WHERE x = '2'; } +} {V 1} +do_test minmax3-1.1.5 { + count { SELECT max(y) FROM t1 WHERE x = '2' AND y != 'V'; } +} {IV 2} +do_test minmax3-1.1.6 { + count { SELECT max(y) FROM t1 WHERE x = '2' AND y < 'V'; } +} {IV 1} +do_test minmax3-1.1.6 { + count { SELECT max(y) FROM t1 WHERE x = '2' AND z != 'five'; } +} {IV 4} + +do_test minmax3-1.2.1 { + # Linear scan of t1. + execsql { DROP INDEX i1 ; DROP INDEX i2 } + count { SELECT min(y) FROM t1 WHERE x = '2'; } +} {II 5} +do_test minmax3-1.2.2 { + # Index i1 optimizes the WHERE x='2' constraint. + execsql { CREATE INDEX i1 ON t1(x) } + count { SELECT min(y) FROM t1 WHERE x = '2'; } +} {II 9} +do_test minmax3-1.2.3 { + # Index i2 optimizes the WHERE x='2' constraint and the min(y). + execsql { CREATE INDEX i2 ON t1(x,y) } + count { SELECT min(y) FROM t1 WHERE x = '2'; } +} {II 1} +do_test minmax3-1.2.4 { + # Index optimizes the WHERE x='2' constraint and the MAX(y). + execsql { DROP INDEX i2 ; CREATE INDEX i2 ON t1(x, y DESC) } + count { SELECT min(y) FROM t1 WHERE x = '2'; } +} {II 1} + +do_test minmax3-1.3.1 { + # Linear scan + execsql { DROP INDEX i1 ; DROP INDEX i2 } + count { SELECT min(y) FROM t1; } +} {I 5} +do_test minmax3-1.3.2 { + # Index i1 optimizes the min(y) + execsql { CREATE INDEX i1 ON t1(y) } + count { SELECT min(y) FROM t1; } +} {I 1} +do_test minmax3-1.3.3 { + # Index i1 optimizes the min(y) + execsql { DROP INDEX i1 ; CREATE INDEX i1 ON t1(y DESC) } + count { SELECT min(y) FROM t1; } +} {I 1} + +do_test minmax3-1.4.1 { + # Linear scan + execsql { DROP INDEX i1 } + count { SELECT max(y) FROM t1; } +} {VI 5} +do_test minmax3-1.4.2 { + # Index i1 optimizes the max(y) + execsql { CREATE INDEX i1 ON t1(y) } + count { SELECT max(y) FROM t1; } +} {VI 0} +do_test minmax3-1.4.3 { + # Index i1 optimizes the max(y) + execsql { DROP INDEX i1 ; CREATE INDEX i1 ON t1(y DESC) } + execsql { SELECT y from t1} + count { SELECT max(y) FROM t1; } +} {VI 0} +do_test minmax3-1.4.4 { + execsql { DROP INDEX i1 } +} {} + +do_test minmax3-2.1 { + execsql { + CREATE TABLE t2(a, b); + CREATE INDEX i3 ON t2(a, b); + INSERT INTO t2 VALUES(1, NULL); + INSERT INTO t2 VALUES(1, 1); + INSERT INTO t2 VALUES(1, 2); + INSERT INTO t2 VALUES(1, 3); + INSERT INTO t2 VALUES(2, NULL); + INSERT INTO t2 VALUES(2, 1); + INSERT INTO t2 VALUES(2, 2); + INSERT INTO t2 VALUES(2, 3); + INSERT INTO t2 VALUES(3, 1); + INSERT INTO t2 VALUES(3, 2); + INSERT INTO t2 VALUES(3, 3); + } +} {} +do_test minmax3-2.2 { + execsql { SELECT min(b) FROM t2 WHERE a = 1; } +} {1} +do_test minmax3-2.3 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b>1; } +} {2} +do_test minmax3-2.4 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b>-1; } +} {1} +do_test minmax3-2.5 { + execsql { SELECT min(b) FROM t2 WHERE a = 1; } +} {1} +do_test minmax3-2.6 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b<2; } +} {1} +do_test minmax3-2.7 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b<1; } +} {{}} +do_test minmax3-2.8 { + execsql { SELECT min(b) FROM t2 WHERE a = 3 AND b<1; } +} {{}} + +do_test minmax3-2.1 { + execsql { + DROP TABLE t2; + CREATE TABLE t2(a, b); + CREATE INDEX i3 ON t2(a, b DESC); + INSERT INTO t2 VALUES(1, NULL); + INSERT INTO t2 VALUES(1, 1); + INSERT INTO t2 VALUES(1, 2); + INSERT INTO t2 VALUES(1, 3); + INSERT INTO t2 VALUES(2, NULL); + INSERT INTO t2 VALUES(2, 1); + INSERT INTO t2 VALUES(2, 2); + INSERT INTO t2 VALUES(2, 3); + INSERT INTO t2 VALUES(3, 1); + INSERT INTO t2 VALUES(3, 2); + INSERT INTO t2 VALUES(3, 3); + } +} {} +do_test minmax3-2.2 { + execsql { SELECT min(b) FROM t2 WHERE a = 1; } +} {1} +do_test minmax3-2.3 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b>1; } +} {2} +do_test minmax3-2.4 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b>-1; } +} {1} +do_test minmax3-2.5 { + execsql { SELECT min(b) FROM t2 WHERE a = 1; } +} {1} +do_test minmax3-2.6 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b<2; } +} {1} +do_test minmax3-2.7 { + execsql { SELECT min(b) FROM t2 WHERE a = 1 AND b<1; } +} {{}} +do_test minmax3-2.8 { + execsql { SELECT min(b) FROM t2 WHERE a = 3 AND b<1; } +} {{}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/minmax.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/minmax.test --- sqlite3-3.4.2/test/minmax.test 2007-07-18 19:04:27.000000000 +0100 +++ sqlite3-3.6.16/test/minmax.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # aggregate min() and max() functions and which are handled as # as a special case. # -# $Id: minmax.test,v 1.19 2006/03/26 01:21:23 drh Exp $ +# $Id: minmax.test,v 1.21 2008/07/08 18:05:26 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -68,14 +68,14 @@ } {1} do_test minmax-1.6 { set sqlite_search_count -} {2} +} {1} do_test minmax-1.7 { set sqlite_search_count 0 execsql {SELECT max(x) FROM t1} } {20} do_test minmax-1.8 { set sqlite_search_count -} {1} +} {0} do_test minmax-1.9 { set sqlite_search_count 0 execsql {SELECT max(y) FROM t1} @@ -84,6 +84,19 @@ set sqlite_search_count } {19} +do_test minmax-1.21 { + execsql {SELECT min(x) FROM t1 WHERE x=5} +} {5} +do_test minmax-1.22 { + execsql {SELECT min(x) FROM t1 WHERE x>=5} +} {5} +do_test minmax-1.23 { + execsql {SELECT min(x) FROM t1 WHERE x>=4.5} +} {5} +do_test minmax-1.24 { + execsql {SELECT min(x) FROM t1 WHERE x<4.5} +} {1} + do_test minmax-2.0 { execsql { CREATE TABLE t2(a INTEGER PRIMARY KEY, b); @@ -381,4 +394,149 @@ } {{} {}} +do_test minmax-11.1 { + execsql { + CREATE INDEX t1i2 ON t1(y,x); + SELECT min(x) FROM t1 WHERE y=5; + } +} {16} +do_test minmax-11.2 { + execsql { + SELECT max(x) FROM t1 WHERE y=5; + } +} {20} +do_test minmax-11.3 { + execsql { + SELECT min(x) FROM t1 WHERE y=6; + } +} {{}} +do_test minmax-11.4 { + execsql { + SELECT max(x) FROM t1 WHERE y=6; + } +} {{}} +do_test minmax-11.5 { + execsql { + SELECT min(x) FROM t1 WHERE y=1; + } +} {1} +do_test minmax-11.6 { + execsql { + SELECT max(x) FROM t1 WHERE y=1; + } +} {1} +do_test minmax-11.7 { + execsql { + SELECT min(x) FROM t1 WHERE y=0; + } +} {{}} +do_test minmax-11.8 { + execsql { + SELECT max(x) FROM t1 WHERE y=0; + } +} {{}} +do_test minmax-11.9 { + execsql { + SELECT min(x) FROM t1 WHERE y=5 AND x>=17.5; + } +} {18} +do_test minmax-11.10 { + execsql { + SELECT max(x) FROM t1 WHERE y=5 AND x>=17.5; + } +} {20} + +do_test minmax-12.1 { + execsql { + CREATE TABLE t7(a,b,c); + INSERT INTO t7 SELECT y, x, x*y FROM t1; + INSERT INTO t7 SELECT y, x, x*y+1000 FROM t1; + CREATE INDEX t7i1 ON t7(a,b,c); + SELECT min(a) FROM t7; + } +} {1} +do_test minmax-12.2 { + execsql { + SELECT max(a) FROM t7; + } +} {5} +do_test minmax-12.3 { + execsql { + SELECT max(a) FROM t7 WHERE a=5; + } +} {5} +do_test minmax-12.4 { + execsql { + SELECT min(b) FROM t7 WHERE a=5; + } +} {16} +do_test minmax-12.5 { + execsql { + SELECT max(b) FROM t7 WHERE a=5; + } +} {20} +do_test minmax-12.6 { + execsql { + SELECT min(b) FROM t7 WHERE a=4; + } +} {8} +do_test minmax-12.7 { + execsql { + SELECT max(b) FROM t7 WHERE a=4; + } +} {15} +do_test minmax-12.8 { + execsql { + SELECT min(c) FROM t7 WHERE a=4 AND b=10; + } +} {40} +do_test minmax-12.9 { + execsql { + SELECT max(c) FROM t7 WHERE a=4 AND b=10; + } +} {1040} +do_test minmax-12.10 { + execsql { + SELECT min(rowid) FROM t7; + } +} {1} +do_test minmax-12.11 { + execsql { + SELECT max(rowid) FROM t7; + } +} {40} +do_test minmax-12.12 { + execsql { + SELECT min(rowid) FROM t7 WHERE a=3; + } +} {4} +do_test minmax-12.13 { + execsql { + SELECT max(rowid) FROM t7 WHERE a=3; + } +} {27} +do_test minmax-12.14 { + execsql { + SELECT min(rowid) FROM t7 WHERE a=3 AND b=5; + } +} {5} +do_test minmax-12.15 { + execsql { + SELECT max(rowid) FROM t7 WHERE a=3 AND b=5; + } +} {25} +do_test minmax-12.16 { + execsql { + SELECT min(rowid) FROM t7 WHERE a=3 AND b=5 AND c=1015; + } +} {25} +do_test minmax-12.17 { + execsql { + SELECT max(rowid) FROM t7 WHERE a=3 AND b=5 AND c=15; + } +} {5} + + + + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc1.test --- sqlite3-3.4.2/test/misc1.test 2007-05-15 15:58:41.000000000 +0100 +++ sqlite3-3.6.16/test/misc1.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for miscellanous features that were # left out of other test files. # -# $Id: misc1.test,v 1.41 2006/06/27 20:06:45 drh Exp $ +# $Id: misc1.test,v 1.42 2007/11/05 14:58:23 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -487,7 +487,7 @@ } {1} do_test misc1-14.3 { cd .. - file delete tempdir + file delete -force tempdir execsql {COMMIT} file exists ./test.db-journal } {0} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc2.test --- sqlite3-3.4.2/test/misc2.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/misc2.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for miscellanous features that were # left out of other test files. # -# $Id: misc2.test,v 1.27 2007/04/06 01:03:34 drh Exp $ +# $Id: misc2.test,v 1.28 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -157,201 +157,203 @@ # 2006-08-16: This has changed. It is now permitted to update # the table being SELECTed from within the callback of the query. # -do_test misc2-7.1 { - db close - file delete -force test.db - sqlite3 db test.db - execsql { - CREATE TABLE t1(x); - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - SELECT * FROM t1; - } -} {1 2 3} -do_test misc2-7.2 { - set rc [catch { - db eval {SELECT rowid FROM t1} {} { - db eval "DELETE FROM t1 WHERE rowid=$rowid" - } - } msg] - lappend rc $msg -} {0 {}} -do_test misc2-7.3 { - execsql {SELECT * FROM t1} -} {} -do_test misc2-7.4 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1} { - if {$x & 1} { - db eval {DELETE FROM t1 WHERE rowid=$rowid} - } - } - execsql {SELECT * FROM t1} -} {2 4} -do_test misc2-7.5 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1} { - if {$x & 1} { - db eval {DELETE FROM t1 WHERE rowid=$rowid+1} - } - } - execsql {SELECT * FROM t1} -} {1 3} -do_test misc2-7.6 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1} { - if {$x & 1} { - db eval {DELETE FROM t1} - } - } - execsql {SELECT * FROM t1} -} {} -do_test misc2-7.7 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1} { - if {$x & 1} { - db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} - } - } - execsql {SELECT * FROM t1} -} {101 2 103 4} -do_test misc2-7.8 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - } - db eval {SELECT rowid, x FROM t1} { - if {$x<10} { - db eval {INSERT INTO t1 VALUES($x+1)} - } - } - execsql {SELECT * FROM t1} -} {1 2 3 4 5 6 7 8 9 10} - -# Repeat the tests 7.1 through 7.8 about but this time do the SELECTs -# in reverse order so that we exercise the sqlite3BtreePrev() routine -# instead of sqlite3BtreeNext() -# -do_test misc2-7.11 { - db close - file delete -force test.db - sqlite3 db test.db - execsql { - CREATE TABLE t1(x); - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - SELECT * FROM t1; - } -} {1 2 3} -do_test misc2-7.12 { - set rc [catch { - db eval {SELECT rowid FROM t1 ORDER BY rowid DESC} {} { - db eval "DELETE FROM t1 WHERE rowid=$rowid" +ifcapable tclvar { + do_test misc2-7.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + SELECT * FROM t1; + } + } {1 2 3} + do_test misc2-7.2 { + set rc [catch { + db eval {SELECT rowid FROM t1} {} { + db eval "DELETE FROM t1 WHERE rowid=$rowid" + } + } msg] + lappend rc $msg + } {0 {}} + do_test misc2-7.3 { + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.4 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {2 4} + do_test misc2-7.5 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid+1} + } + } + execsql {SELECT * FROM t1} + } {1 3} + do_test misc2-7.6 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1} + } } - } msg] - lappend rc $msg -} {0 {}} -do_test misc2-7.13 { - execsql {SELECT * FROM t1} -} {} -do_test misc2-7.14 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { - if {$x & 1} { - db eval {DELETE FROM t1 WHERE rowid=$rowid} + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.7 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {101 2 103 4} + do_test misc2-7.8 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); } - } - execsql {SELECT * FROM t1} -} {2 4} -do_test misc2-7.15 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1} { - if {$x & 1} { - db eval {DELETE FROM t1 WHERE rowid=$rowid+1} + db eval {SELECT rowid, x FROM t1} { + if {$x<10} { + db eval {INSERT INTO t1 VALUES($x+1)} + } } - } - execsql {SELECT * FROM t1} -} {1 3} -do_test misc2-7.16 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { - if {$x & 1} { - db eval {DELETE FROM t1} + execsql {SELECT * FROM t1} + } {1 2 3 4 5 6 7 8 9 10} + + # Repeat the tests 7.1 through 7.8 about but this time do the SELECTs + # in reverse order so that we exercise the sqlite3BtreePrev() routine + # instead of sqlite3BtreeNext() + # + do_test misc2-7.11 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + SELECT * FROM t1; + } + } {1 2 3} + do_test misc2-7.12 { + set rc [catch { + db eval {SELECT rowid FROM t1 ORDER BY rowid DESC} {} { + db eval "DELETE FROM t1 WHERE rowid=$rowid" + } + } msg] + lappend rc $msg + } {0 {}} + do_test misc2-7.13 { + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.14 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {2 4} + do_test misc2-7.15 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1} { + if {$x & 1} { + db eval {DELETE FROM t1 WHERE rowid=$rowid+1} + } + } + execsql {SELECT * FROM t1} + } {1 3} + do_test misc2-7.16 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {DELETE FROM t1} + } } - } - execsql {SELECT * FROM t1} -} {} -do_test misc2-7.17 { - execsql { - DELETE FROM t1; - INSERT INTO t1 VALUES(1); - INSERT INTO t1 VALUES(2); - INSERT INTO t1 VALUES(3); - INSERT INTO t1 VALUES(4); - } - db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { - if {$x & 1} { - db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} + execsql {SELECT * FROM t1} + } {} + do_test misc2-7.17 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(4); + } + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x & 1} { + db eval {UPDATE t1 SET x=x+100 WHERE rowid=$rowid} + } + } + execsql {SELECT * FROM t1} + } {101 2 103 4} + do_test misc2-7.18 { + execsql { + DELETE FROM t1; + INSERT INTO t1(rowid,x) VALUES(10,10); } - } - execsql {SELECT * FROM t1} -} {101 2 103 4} -do_test misc2-7.18 { - execsql { - DELETE FROM t1; - INSERT INTO t1(rowid,x) VALUES(10,10); - } - db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { - if {$x>1} { - db eval {INSERT INTO t1(rowid,x) VALUES($x-1,$x-1)} + db eval {SELECT rowid, x FROM t1 ORDER BY rowid DESC} { + if {$x>1} { + db eval {INSERT INTO t1(rowid,x) VALUES($x-1,$x-1)} + } } - } - execsql {SELECT * FROM t1} -} {1 2 3 4 5 6 7 8 9 10} + execsql {SELECT * FROM t1} + } {1 2 3 4 5 6 7 8 9 10} +} db close file delete -force test.db diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc3.test --- sqlite3-3.4.2/test/misc3.test 2007-03-27 15:43:05.000000000 +0100 +++ sqlite3-3.6.16/test/misc3.test 2009-06-25 12:24:39.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for miscellanous features that were # left out of other test files. # -# $Id: misc3.test,v 1.16 2005/01/21 03:12:16 danielk1977 Exp $ +# $Id: misc3.test,v 1.20 2009/05/06 00:49:01 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -258,6 +258,40 @@ execsql {BEGIN; EXPLAIN ROLLBACK} catchsql {ROLLBACK} } {0 {}} + + # Do some additional EXPLAIN operations to exercise the displayP4 logic. + do_test misc3-6.10 { + set x [execsql { + CREATE TABLE ex1( + a INTEGER DEFAULT 54321, + b TEXT DEFAULT "hello", + c REAL DEFAULT 3.1415926 + ); + CREATE UNIQUE INDEX ex1i1 ON ex1(a); + EXPLAIN REINDEX; + }] + regexp { IsUnique \d+ \d+ \d+ \d+ } $x + } {1} + if {[regexp {16} [db one {PRAGMA encoding}]]} { + do_test misc3-6.11-utf16 { + set x [execsql { + EXPLAIN SELECT a+123456789012, b*4.5678, c FROM ex1 ORDER BY +a, b DESC + }] + set y [regexp { 123456789012 } $x] + lappend y [regexp { 4.5678 } $x] + lappend y [regexp {,-BINARY} $x] + } {1 1 1} + } else { + do_test misc3-6.11-utf8 { + set x [execsql { + EXPLAIN SELECT a+123456789012, b*4.5678, c FROM ex1 ORDER BY +a, b DESC + }] + set y [regexp { 123456789012 } $x] + lappend y [regexp { 4.5678 } $x] + lappend y [regexp { hello } $x] + lappend y [regexp {,-BINARY} $x] + } {1 1 1 1} + } } ifcapable {trigger} { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc4.test --- sqlite3-3.4.2/test/misc4.test 2007-08-13 18:30:11.000000000 +0100 +++ sqlite3-3.6.16/test/misc4.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for miscellanous features that were # left out of other test files. # -# $Id: misc4.test,v 1.22 2007/08/13 15:28:35 danielk1977 Exp $ +# $Id: misc4.test,v 1.23 2007/12/08 18:01:31 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -98,7 +98,6 @@ # Ticket #966 # -ifcapable compound { do_test misc4-3.1 { execsql { CREATE TABLE Table1(ID integer primary key, Value TEXT); @@ -106,18 +105,33 @@ CREATE TABLE Table2(ID integer NOT NULL, Value TEXT); INSERT INTO Table2 VALUES(1, 'z'); INSERT INTO Table2 VALUES (1, 'a'); - SELECT ID, Value FROM Table1 - UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1 - ORDER BY 1, 2; } -} {1 x 1 z} -do_test misc4-3.2 { catchsql { - SELECT ID, Value FROM Table1 - UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1, 2 - ORDER BY 1, 2; + SELECT ID, max(Value) FROM Table2 GROUP BY 1, 2 ORDER BY 1, 2; } } {1 {aggregate functions are not allowed in the GROUP BY clause}} +ifcapable compound { + do_test misc4-3.2 { + execsql { + SELECT ID, Value FROM Table1 + UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1 + ORDER BY 1, 2; + } + } {1 x 1 z} + do_test misc4-3.3 { + catchsql { + SELECT ID, Value FROM Table1 + UNION SELECT ID, max(Value) FROM Table2 GROUP BY 1, 2 + ORDER BY 1, 2; + } + } {1 {aggregate functions are not allowed in the GROUP BY clause}} + do_test misc4-3.4 { + catchsql { + SELECT ID, max(Value) FROM Table2 GROUP BY 1, 2 + UNION SELECT ID, Value FROM Table1 + ORDER BY 1, 2; + } + } {1 {aggregate functions are not allowed in the GROUP BY clause}} } ;# ifcapable compound # Ticket #1047. Make sure column types are preserved in subqueries. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc5.test --- sqlite3-3.4.2/test/misc5.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/misc5.test 2009-06-05 18:03:33.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests for miscellanous features that were # left out of other test files. # -# $Id: misc5.test,v 1.16 2007/01/03 23:37:29 drh Exp $ +# $Id: misc5.test,v 1.22 2008/07/29 10:26:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -481,8 +481,18 @@ SELECT songid FROM songs WHERE LOWER(artist) = ( + -- This sub-query is indeterminate. Because there is no ORDER BY, + -- it may return 'one', 'two' or 'three'. Because of this, the + -- outermost parent query may correctly return any of 'one', 'two' + -- or 'three' as well. SELECT DISTINCT LOWER(artist) FROM ( + -- This sub-query returns the table: + -- + -- two 14 + -- one 10 + -- three 5 + -- SELECT DISTINCT artist,sum(timesplayed) AS total FROM songs GROUP BY LOWER(artist) @@ -495,7 +505,7 @@ ) ORDER BY LOWER(artist) ASC; } - } {two} + } {one} } # Ticket #1370. Do not overwrite small files (less than 1024 bytes) @@ -548,12 +558,12 @@ CREATE TABLE logs(msg TEXT, timestamp INTEGER, dbtime TEXT); } catchsql { - SELECT * FROM logs WHERE logs.id >= (SELECT head FROM logs_base) + SELECT * FROM logs WHERE logs.oid >= (SELECT head FROM logs_base) UNION ALL SELECT * FROM logs LIMIT (SELECT lmt FROM logs_base) ; } - } {1 {no such column: logs.id}} + } {1 {no such table: logs_base}} } # Overflow the lemon parser stack by providing an overly complex @@ -571,33 +581,26 @@ catchsql $sql } {1 {parser stack overflow}} -# Check the MISUSE return from sqlitee3_busy_timeout -# -do_test misc5-8.1-misuse { - set DB [sqlite3_connection_pointer db] - db close - sqlite3_busy_timeout $DB 1000 -} SQLITE_MISUSE -sqlite3 db test.db - # Ticket #1911 # -do_test misc5-9.1 { - execsql { - SELECT name, type FROM sqlite_master WHERE name IS NULL - UNION - SELECT type, name FROM sqlite_master WHERE type IS NULL - ORDER BY 1, 2, 1, 2, 1, 2 - } -} {} -do_test misc5-9.2 { - execsql { - SELECT name, type FROM sqlite_master WHERE name IS NULL - UNION - SELECT type, name FROM sqlite_master WHERE type IS NULL - ORDER BY 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2 - } -} {} +ifcapable compound { + do_test misc5-9.1 { + execsql { + SELECT name, type FROM sqlite_master WHERE name IS NULL + UNION + SELECT type, name FROM sqlite_master WHERE type IS NULL + ORDER BY 1, 2, 1, 2, 1, 2 + } + } {} + do_test misc5-9.2 { + execsql { + SELECT name, type FROM sqlite_master WHERE name IS NULL + UNION + SELECT type, name FROM sqlite_master WHERE type IS NULL + ORDER BY 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2 + } + } {} +} # Ticket #1912. Make the tokenizer require a space after a numeric # literal. @@ -614,5 +617,1256 @@ } {1 {unrecognized token: "123.4e5ghi"}} +# Additional integer encoding tests. +# +do_test misc5-11.1 { + db eval { + CREATE TABLE t3(x); + INSERT INTO t3 VALUES(-18); + INSERT INTO t3 VALUES(-17); + INSERT INTO t3 VALUES(-16); + INSERT INTO t3 VALUES(-15); + INSERT INTO t3 VALUES(-14); + INSERT INTO t3 VALUES(-13); + INSERT INTO t3 VALUES(-12); + INSERT INTO t3 VALUES(-11); + INSERT INTO t3 VALUES(-10); + INSERT INTO t3 VALUES(-9); + INSERT INTO t3 VALUES(-8); + INSERT INTO t3 VALUES(-7); + INSERT INTO t3 VALUES(-6); + INSERT INTO t3 VALUES(-5); + INSERT INTO t3 VALUES(-4); + INSERT INTO t3 VALUES(-3); + INSERT INTO t3 VALUES(-2); + INSERT INTO t3 VALUES(-1); + INSERT INTO t3 VALUES(0); + INSERT INTO t3 VALUES(1); + INSERT INTO t3 VALUES(2); + INSERT INTO t3 VALUES(3); + INSERT INTO t3 VALUES(4); + INSERT INTO t3 VALUES(5); + INSERT INTO t3 VALUES(6); + INSERT INTO t3 VALUES(7); + INSERT INTO t3 VALUES(8); + INSERT INTO t3 VALUES(9); + INSERT INTO t3 VALUES(10); + INSERT INTO t3 VALUES(11); + INSERT INTO t3 VALUES(12); + INSERT INTO t3 VALUES(13); + INSERT INTO t3 VALUES(14); + INSERT INTO t3 VALUES(15); + INSERT INTO t3 VALUES(16); + INSERT INTO t3 VALUES(17); + INSERT INTO t3 VALUES(18); + INSERT INTO t3 VALUES(30); + INSERT INTO t3 VALUES(31); + INSERT INTO t3 VALUES(32); + INSERT INTO t3 VALUES(33); + INSERT INTO t3 VALUES(34); + INSERT INTO t3 VALUES(-30); + INSERT INTO t3 VALUES(-31); + INSERT INTO t3 VALUES(-32); + INSERT INTO t3 VALUES(-33); + INSERT INTO t3 VALUES(-34); + INSERT INTO t3 VALUES(62); + INSERT INTO t3 VALUES(63); + INSERT INTO t3 VALUES(64); + INSERT INTO t3 VALUES(65); + INSERT INTO t3 VALUES(66); + INSERT INTO t3 VALUES(-62); + INSERT INTO t3 VALUES(-63); + INSERT INTO t3 VALUES(-64); + INSERT INTO t3 VALUES(-65); + INSERT INTO t3 VALUES(-66); + INSERT INTO t3 VALUES(126); + INSERT INTO t3 VALUES(127); + INSERT INTO t3 VALUES(128); + INSERT INTO t3 VALUES(129); + INSERT INTO t3 VALUES(130); + INSERT INTO t3 VALUES(-126); + INSERT INTO t3 VALUES(-127); + INSERT INTO t3 VALUES(-128); + INSERT INTO t3 VALUES(-129); + INSERT INTO t3 VALUES(-130); + INSERT INTO t3 VALUES(254); + INSERT INTO t3 VALUES(255); + INSERT INTO t3 VALUES(256); + INSERT INTO t3 VALUES(257); + INSERT INTO t3 VALUES(258); + INSERT INTO t3 VALUES(-254); + INSERT INTO t3 VALUES(-255); + INSERT INTO t3 VALUES(-256); + INSERT INTO t3 VALUES(-257); + INSERT INTO t3 VALUES(-258); + INSERT INTO t3 VALUES(510); + INSERT INTO t3 VALUES(511); + INSERT INTO t3 VALUES(512); + INSERT INTO t3 VALUES(513); + INSERT INTO t3 VALUES(514); + INSERT INTO t3 VALUES(-510); + INSERT INTO t3 VALUES(-511); + INSERT INTO t3 VALUES(-512); + INSERT INTO t3 VALUES(-513); + INSERT INTO t3 VALUES(-514); + INSERT INTO t3 VALUES(1022); + INSERT INTO t3 VALUES(1023); + INSERT INTO t3 VALUES(1024); + INSERT INTO t3 VALUES(1025); + INSERT INTO t3 VALUES(1026); + INSERT INTO t3 VALUES(-1022); + INSERT INTO t3 VALUES(-1023); + INSERT INTO t3 VALUES(-1024); + INSERT INTO t3 VALUES(-1025); + INSERT INTO t3 VALUES(-1026); + INSERT INTO t3 VALUES(2046); + INSERT INTO t3 VALUES(2047); + INSERT INTO t3 VALUES(2048); + INSERT INTO t3 VALUES(2049); + INSERT INTO t3 VALUES(2050); + INSERT INTO t3 VALUES(-2046); + INSERT INTO t3 VALUES(-2047); + INSERT INTO t3 VALUES(-2048); + INSERT INTO t3 VALUES(-2049); + INSERT INTO t3 VALUES(-2050); + INSERT INTO t3 VALUES(4094); + INSERT INTO t3 VALUES(4095); + INSERT INTO t3 VALUES(4096); + INSERT INTO t3 VALUES(4097); + INSERT INTO t3 VALUES(4098); + INSERT INTO t3 VALUES(-4094); + INSERT INTO t3 VALUES(-4095); + INSERT INTO t3 VALUES(-4096); + INSERT INTO t3 VALUES(-4097); + INSERT INTO t3 VALUES(-4098); + INSERT INTO t3 VALUES(8190); + INSERT INTO t3 VALUES(8191); + INSERT INTO t3 VALUES(8192); + INSERT INTO t3 VALUES(8193); + INSERT INTO t3 VALUES(8194); + INSERT INTO t3 VALUES(-8190); + INSERT INTO t3 VALUES(-8191); + INSERT INTO t3 VALUES(-8192); + INSERT INTO t3 VALUES(-8193); + INSERT INTO t3 VALUES(-8194); + INSERT INTO t3 VALUES(16382); + INSERT INTO t3 VALUES(16383); + INSERT INTO t3 VALUES(16384); + INSERT INTO t3 VALUES(16385); + INSERT INTO t3 VALUES(16386); + INSERT INTO t3 VALUES(-16382); + INSERT INTO t3 VALUES(-16383); + INSERT INTO t3 VALUES(-16384); + INSERT INTO t3 VALUES(-16385); + INSERT INTO t3 VALUES(-16386); + INSERT INTO t3 VALUES(32766); + INSERT INTO t3 VALUES(32767); + INSERT INTO t3 VALUES(32768); + INSERT INTO t3 VALUES(32769); + INSERT INTO t3 VALUES(32770); + INSERT INTO t3 VALUES(-32766); + INSERT INTO t3 VALUES(-32767); + INSERT INTO t3 VALUES(-32768); + INSERT INTO t3 VALUES(-32769); + INSERT INTO t3 VALUES(-32770); + INSERT INTO t3 VALUES(65534); + INSERT INTO t3 VALUES(65535); + INSERT INTO t3 VALUES(65536); + INSERT INTO t3 VALUES(65537); + INSERT INTO t3 VALUES(65538); + INSERT INTO t3 VALUES(-65534); + INSERT INTO t3 VALUES(-65535); + INSERT INTO t3 VALUES(-65536); + INSERT INTO t3 VALUES(-65537); + INSERT INTO t3 VALUES(-65538); + INSERT INTO t3 VALUES(131070); + INSERT INTO t3 VALUES(131071); + INSERT INTO t3 VALUES(131072); + INSERT INTO t3 VALUES(131073); + INSERT INTO t3 VALUES(131074); + INSERT INTO t3 VALUES(-131070); + INSERT INTO t3 VALUES(-131071); + INSERT INTO t3 VALUES(-131072); + INSERT INTO t3 VALUES(-131073); + INSERT INTO t3 VALUES(-131074); + INSERT INTO t3 VALUES(262142); + INSERT INTO t3 VALUES(262143); + INSERT INTO t3 VALUES(262144); + INSERT INTO t3 VALUES(262145); + INSERT INTO t3 VALUES(262146); + INSERT INTO t3 VALUES(-262142); + INSERT INTO t3 VALUES(-262143); + INSERT INTO t3 VALUES(-262144); + INSERT INTO t3 VALUES(-262145); + INSERT INTO t3 VALUES(-262146); + INSERT INTO t3 VALUES(524286); + INSERT INTO t3 VALUES(524287); + INSERT INTO t3 VALUES(524288); + INSERT INTO t3 VALUES(524289); + INSERT INTO t3 VALUES(524290); + INSERT INTO t3 VALUES(-524286); + INSERT INTO t3 VALUES(-524287); + INSERT INTO t3 VALUES(-524288); + INSERT INTO t3 VALUES(-524289); + INSERT INTO t3 VALUES(-524290); + INSERT INTO t3 VALUES(1048574); + INSERT INTO t3 VALUES(1048575); + INSERT INTO t3 VALUES(1048576); + INSERT INTO t3 VALUES(1048577); + INSERT INTO t3 VALUES(1048578); + INSERT INTO t3 VALUES(-1048574); + INSERT INTO t3 VALUES(-1048575); + INSERT INTO t3 VALUES(-1048576); + INSERT INTO t3 VALUES(-1048577); + INSERT INTO t3 VALUES(-1048578); + INSERT INTO t3 VALUES(2097150); + INSERT INTO t3 VALUES(2097151); + INSERT INTO t3 VALUES(2097152); + INSERT INTO t3 VALUES(2097153); + INSERT INTO t3 VALUES(2097154); + INSERT INTO t3 VALUES(-2097150); + INSERT INTO t3 VALUES(-2097151); + INSERT INTO t3 VALUES(-2097152); + INSERT INTO t3 VALUES(-2097153); + INSERT INTO t3 VALUES(-2097154); + INSERT INTO t3 VALUES(4194302); + INSERT INTO t3 VALUES(4194303); + INSERT INTO t3 VALUES(4194304); + INSERT INTO t3 VALUES(4194305); + INSERT INTO t3 VALUES(4194306); + INSERT INTO t3 VALUES(-4194302); + INSERT INTO t3 VALUES(-4194303); + INSERT INTO t3 VALUES(-4194304); + INSERT INTO t3 VALUES(-4194305); + INSERT INTO t3 VALUES(-4194306); + INSERT INTO t3 VALUES(8388606); + INSERT INTO t3 VALUES(8388607); + INSERT INTO t3 VALUES(8388608); + INSERT INTO t3 VALUES(8388609); + INSERT INTO t3 VALUES(8388610); + INSERT INTO t3 VALUES(-8388606); + INSERT INTO t3 VALUES(-8388607); + INSERT INTO t3 VALUES(-8388608); + INSERT INTO t3 VALUES(-8388609); + INSERT INTO t3 VALUES(-8388610); + INSERT INTO t3 VALUES(16777214); + INSERT INTO t3 VALUES(16777215); + INSERT INTO t3 VALUES(16777216); + INSERT INTO t3 VALUES(16777217); + INSERT INTO t3 VALUES(16777218); + INSERT INTO t3 VALUES(-16777214); + INSERT INTO t3 VALUES(-16777215); + INSERT INTO t3 VALUES(-16777216); + INSERT INTO t3 VALUES(-16777217); + INSERT INTO t3 VALUES(-16777218); + INSERT INTO t3 VALUES(33554430); + INSERT INTO t3 VALUES(33554431); + INSERT INTO t3 VALUES(33554432); + INSERT INTO t3 VALUES(33554433); + INSERT INTO t3 VALUES(33554434); + INSERT INTO t3 VALUES(-33554430); + INSERT INTO t3 VALUES(-33554431); + INSERT INTO t3 VALUES(-33554432); + INSERT INTO t3 VALUES(-33554433); + INSERT INTO t3 VALUES(-33554434); + INSERT INTO t3 VALUES(67108862); + INSERT INTO t3 VALUES(67108863); + INSERT INTO t3 VALUES(67108864); + INSERT INTO t3 VALUES(67108865); + INSERT INTO t3 VALUES(67108866); + INSERT INTO t3 VALUES(-67108862); + INSERT INTO t3 VALUES(-67108863); + INSERT INTO t3 VALUES(-67108864); + INSERT INTO t3 VALUES(-67108865); + INSERT INTO t3 VALUES(-67108866); + INSERT INTO t3 VALUES(134217726); + INSERT INTO t3 VALUES(134217727); + INSERT INTO t3 VALUES(134217728); + INSERT INTO t3 VALUES(134217729); + INSERT INTO t3 VALUES(134217730); + INSERT INTO t3 VALUES(-134217726); + INSERT INTO t3 VALUES(-134217727); + INSERT INTO t3 VALUES(-134217728); + INSERT INTO t3 VALUES(-134217729); + INSERT INTO t3 VALUES(-134217730); + INSERT INTO t3 VALUES(268435454); + INSERT INTO t3 VALUES(268435455); + INSERT INTO t3 VALUES(268435456); + INSERT INTO t3 VALUES(268435457); + INSERT INTO t3 VALUES(268435458); + INSERT INTO t3 VALUES(-268435454); + INSERT INTO t3 VALUES(-268435455); + INSERT INTO t3 VALUES(-268435456); + INSERT INTO t3 VALUES(-268435457); + INSERT INTO t3 VALUES(-268435458); + INSERT INTO t3 VALUES(536870910); + INSERT INTO t3 VALUES(536870911); + INSERT INTO t3 VALUES(536870912); + INSERT INTO t3 VALUES(536870913); + INSERT INTO t3 VALUES(536870914); + INSERT INTO t3 VALUES(-536870910); + INSERT INTO t3 VALUES(-536870911); + INSERT INTO t3 VALUES(-536870912); + INSERT INTO t3 VALUES(-536870913); + INSERT INTO t3 VALUES(-536870914); + INSERT INTO t3 VALUES(1073741822); + INSERT INTO t3 VALUES(1073741823); + INSERT INTO t3 VALUES(1073741824); + INSERT INTO t3 VALUES(1073741825); + INSERT INTO t3 VALUES(1073741826); + INSERT INTO t3 VALUES(-1073741822); + INSERT INTO t3 VALUES(-1073741823); + INSERT INTO t3 VALUES(-1073741824); + INSERT INTO t3 VALUES(-1073741825); + INSERT INTO t3 VALUES(-1073741826); + INSERT INTO t3 VALUES(2147483646); + INSERT INTO t3 VALUES(2147483647); + INSERT INTO t3 VALUES(2147483648); + INSERT INTO t3 VALUES(2147483649); + INSERT INTO t3 VALUES(2147483650); + INSERT INTO t3 VALUES(-2147483646); + INSERT INTO t3 VALUES(-2147483647); + INSERT INTO t3 VALUES(-2147483648); + INSERT INTO t3 VALUES(-2147483649); + INSERT INTO t3 VALUES(-2147483650); + INSERT INTO t3 VALUES(4294967294); + INSERT INTO t3 VALUES(4294967295); + INSERT INTO t3 VALUES(4294967296); + INSERT INTO t3 VALUES(4294967297); + INSERT INTO t3 VALUES(4294967298); + INSERT INTO t3 VALUES(-4294967294); + INSERT INTO t3 VALUES(-4294967295); + INSERT INTO t3 VALUES(-4294967296); + INSERT INTO t3 VALUES(-4294967297); + INSERT INTO t3 VALUES(-4294967298); + INSERT INTO t3 VALUES(8589934590); + INSERT INTO t3 VALUES(8589934591); + INSERT INTO t3 VALUES(8589934592); + INSERT INTO t3 VALUES(8589934593); + INSERT INTO t3 VALUES(8589934594); + INSERT INTO t3 VALUES(-8589934590); + INSERT INTO t3 VALUES(-8589934591); + INSERT INTO t3 VALUES(-8589934592); + INSERT INTO t3 VALUES(-8589934593); + INSERT INTO t3 VALUES(-8589934594); + INSERT INTO t3 VALUES(17179869182); + INSERT INTO t3 VALUES(17179869183); + INSERT INTO t3 VALUES(17179869184); + INSERT INTO t3 VALUES(17179869185); + INSERT INTO t3 VALUES(17179869186); + INSERT INTO t3 VALUES(-17179869182); + INSERT INTO t3 VALUES(-17179869183); + INSERT INTO t3 VALUES(-17179869184); + INSERT INTO t3 VALUES(-17179869185); + INSERT INTO t3 VALUES(-17179869186); + INSERT INTO t3 VALUES(34359738366); + INSERT INTO t3 VALUES(34359738367); + INSERT INTO t3 VALUES(34359738368); + INSERT INTO t3 VALUES(34359738369); + INSERT INTO t3 VALUES(34359738370); + INSERT INTO t3 VALUES(-34359738366); + INSERT INTO t3 VALUES(-34359738367); + INSERT INTO t3 VALUES(-34359738368); + INSERT INTO t3 VALUES(-34359738369); + INSERT INTO t3 VALUES(-34359738370); + INSERT INTO t3 VALUES(68719476734); + INSERT INTO t3 VALUES(68719476735); + INSERT INTO t3 VALUES(68719476736); + INSERT INTO t3 VALUES(68719476737); + INSERT INTO t3 VALUES(68719476738); + INSERT INTO t3 VALUES(-68719476734); + INSERT INTO t3 VALUES(-68719476735); + INSERT INTO t3 VALUES(-68719476736); + INSERT INTO t3 VALUES(-68719476737); + INSERT INTO t3 VALUES(-68719476738); + INSERT INTO t3 VALUES(137438953470); + INSERT INTO t3 VALUES(137438953471); + INSERT INTO t3 VALUES(137438953472); + INSERT INTO t3 VALUES(137438953473); + INSERT INTO t3 VALUES(137438953474); + INSERT INTO t3 VALUES(-137438953470); + INSERT INTO t3 VALUES(-137438953471); + INSERT INTO t3 VALUES(-137438953472); + INSERT INTO t3 VALUES(-137438953473); + INSERT INTO t3 VALUES(-137438953474); + INSERT INTO t3 VALUES(274877906942); + INSERT INTO t3 VALUES(274877906943); + INSERT INTO t3 VALUES(274877906944); + INSERT INTO t3 VALUES(274877906945); + INSERT INTO t3 VALUES(274877906946); + INSERT INTO t3 VALUES(-274877906942); + INSERT INTO t3 VALUES(-274877906943); + INSERT INTO t3 VALUES(-274877906944); + INSERT INTO t3 VALUES(-274877906945); + INSERT INTO t3 VALUES(-274877906946); + INSERT INTO t3 VALUES(549755813886); + INSERT INTO t3 VALUES(549755813887); + INSERT INTO t3 VALUES(549755813888); + INSERT INTO t3 VALUES(549755813889); + INSERT INTO t3 VALUES(549755813890); + INSERT INTO t3 VALUES(-549755813886); + INSERT INTO t3 VALUES(-549755813887); + INSERT INTO t3 VALUES(-549755813888); + INSERT INTO t3 VALUES(-549755813889); + INSERT INTO t3 VALUES(-549755813890); + INSERT INTO t3 VALUES(1099511627774); + INSERT INTO t3 VALUES(1099511627775); + INSERT INTO t3 VALUES(1099511627776); + INSERT INTO t3 VALUES(1099511627777); + INSERT INTO t3 VALUES(1099511627778); + INSERT INTO t3 VALUES(-1099511627774); + INSERT INTO t3 VALUES(-1099511627775); + INSERT INTO t3 VALUES(-1099511627776); + INSERT INTO t3 VALUES(-1099511627777); + INSERT INTO t3 VALUES(-1099511627778); + INSERT INTO t3 VALUES(2199023255550); + INSERT INTO t3 VALUES(2199023255551); + INSERT INTO t3 VALUES(2199023255552); + INSERT INTO t3 VALUES(2199023255553); + INSERT INTO t3 VALUES(2199023255554); + INSERT INTO t3 VALUES(-2199023255550); + INSERT INTO t3 VALUES(-2199023255551); + INSERT INTO t3 VALUES(-2199023255552); + INSERT INTO t3 VALUES(-2199023255553); + INSERT INTO t3 VALUES(-2199023255554); + INSERT INTO t3 VALUES(4398046511102); + INSERT INTO t3 VALUES(4398046511103); + INSERT INTO t3 VALUES(4398046511104); + INSERT INTO t3 VALUES(4398046511105); + INSERT INTO t3 VALUES(4398046511106); + INSERT INTO t3 VALUES(-4398046511102); + INSERT INTO t3 VALUES(-4398046511103); + INSERT INTO t3 VALUES(-4398046511104); + INSERT INTO t3 VALUES(-4398046511105); + INSERT INTO t3 VALUES(-4398046511106); + INSERT INTO t3 VALUES(8796093022206); + INSERT INTO t3 VALUES(8796093022207); + INSERT INTO t3 VALUES(8796093022208); + INSERT INTO t3 VALUES(8796093022209); + INSERT INTO t3 VALUES(8796093022210); + INSERT INTO t3 VALUES(-8796093022206); + INSERT INTO t3 VALUES(-8796093022207); + INSERT INTO t3 VALUES(-8796093022208); + INSERT INTO t3 VALUES(-8796093022209); + INSERT INTO t3 VALUES(-8796093022210); + INSERT INTO t3 VALUES(17592186044414); + INSERT INTO t3 VALUES(17592186044415); + INSERT INTO t3 VALUES(17592186044416); + INSERT INTO t3 VALUES(17592186044417); + INSERT INTO t3 VALUES(17592186044418); + INSERT INTO t3 VALUES(-17592186044414); + INSERT INTO t3 VALUES(-17592186044415); + INSERT INTO t3 VALUES(-17592186044416); + INSERT INTO t3 VALUES(-17592186044417); + INSERT INTO t3 VALUES(-17592186044418); + INSERT INTO t3 VALUES(35184372088830); + INSERT INTO t3 VALUES(35184372088831); + INSERT INTO t3 VALUES(35184372088832); + INSERT INTO t3 VALUES(35184372088833); + INSERT INTO t3 VALUES(35184372088834); + INSERT INTO t3 VALUES(-35184372088830); + INSERT INTO t3 VALUES(-35184372088831); + INSERT INTO t3 VALUES(-35184372088832); + INSERT INTO t3 VALUES(-35184372088833); + INSERT INTO t3 VALUES(-35184372088834); + INSERT INTO t3 VALUES(70368744177662); + INSERT INTO t3 VALUES(70368744177663); + INSERT INTO t3 VALUES(70368744177664); + INSERT INTO t3 VALUES(70368744177665); + INSERT INTO t3 VALUES(70368744177666); + INSERT INTO t3 VALUES(-70368744177662); + INSERT INTO t3 VALUES(-70368744177663); + INSERT INTO t3 VALUES(-70368744177664); + INSERT INTO t3 VALUES(-70368744177665); + INSERT INTO t3 VALUES(-70368744177666); + INSERT INTO t3 VALUES(140737488355326); + INSERT INTO t3 VALUES(140737488355327); + INSERT INTO t3 VALUES(140737488355328); + INSERT INTO t3 VALUES(140737488355329); + INSERT INTO t3 VALUES(140737488355330); + INSERT INTO t3 VALUES(-140737488355326); + INSERT INTO t3 VALUES(-140737488355327); + INSERT INTO t3 VALUES(-140737488355328); + INSERT INTO t3 VALUES(-140737488355329); + INSERT INTO t3 VALUES(-140737488355330); + INSERT INTO t3 VALUES(281474976710654); + INSERT INTO t3 VALUES(281474976710655); + INSERT INTO t3 VALUES(281474976710656); + INSERT INTO t3 VALUES(281474976710657); + INSERT INTO t3 VALUES(281474976710658); + INSERT INTO t3 VALUES(-281474976710654); + INSERT INTO t3 VALUES(-281474976710655); + INSERT INTO t3 VALUES(-281474976710656); + INSERT INTO t3 VALUES(-281474976710657); + INSERT INTO t3 VALUES(-281474976710658); + INSERT INTO t3 VALUES(562949953421310); + INSERT INTO t3 VALUES(562949953421311); + INSERT INTO t3 VALUES(562949953421312); + INSERT INTO t3 VALUES(562949953421313); + INSERT INTO t3 VALUES(562949953421314); + INSERT INTO t3 VALUES(-562949953421310); + INSERT INTO t3 VALUES(-562949953421311); + INSERT INTO t3 VALUES(-562949953421312); + INSERT INTO t3 VALUES(-562949953421313); + INSERT INTO t3 VALUES(-562949953421314); + INSERT INTO t3 VALUES(1125899906842622); + INSERT INTO t3 VALUES(1125899906842623); + INSERT INTO t3 VALUES(1125899906842624); + INSERT INTO t3 VALUES(1125899906842625); + INSERT INTO t3 VALUES(1125899906842626); + INSERT INTO t3 VALUES(-1125899906842622); + INSERT INTO t3 VALUES(-1125899906842623); + INSERT INTO t3 VALUES(-1125899906842624); + INSERT INTO t3 VALUES(-1125899906842625); + INSERT INTO t3 VALUES(-1125899906842626); + INSERT INTO t3 VALUES(2251799813685246); + INSERT INTO t3 VALUES(2251799813685247); + INSERT INTO t3 VALUES(2251799813685248); + INSERT INTO t3 VALUES(2251799813685249); + INSERT INTO t3 VALUES(2251799813685250); + INSERT INTO t3 VALUES(-2251799813685246); + INSERT INTO t3 VALUES(-2251799813685247); + INSERT INTO t3 VALUES(-2251799813685248); + INSERT INTO t3 VALUES(-2251799813685249); + INSERT INTO t3 VALUES(-2251799813685250); + INSERT INTO t3 VALUES(4503599627370494); + INSERT INTO t3 VALUES(4503599627370495); + INSERT INTO t3 VALUES(4503599627370496); + INSERT INTO t3 VALUES(4503599627370497); + INSERT INTO t3 VALUES(4503599627370498); + INSERT INTO t3 VALUES(-4503599627370494); + INSERT INTO t3 VALUES(-4503599627370495); + INSERT INTO t3 VALUES(-4503599627370496); + INSERT INTO t3 VALUES(-4503599627370497); + INSERT INTO t3 VALUES(-4503599627370498); + INSERT INTO t3 VALUES(9007199254740990); + INSERT INTO t3 VALUES(9007199254740991); + INSERT INTO t3 VALUES(9007199254740992); + INSERT INTO t3 VALUES(9007199254740993); + INSERT INTO t3 VALUES(9007199254740994); + INSERT INTO t3 VALUES(-9007199254740990); + INSERT INTO t3 VALUES(-9007199254740991); + INSERT INTO t3 VALUES(-9007199254740992); + INSERT INTO t3 VALUES(-9007199254740993); + INSERT INTO t3 VALUES(-9007199254740994); + INSERT INTO t3 VALUES(18014398509481982); + INSERT INTO t3 VALUES(18014398509481983); + INSERT INTO t3 VALUES(18014398509481984); + INSERT INTO t3 VALUES(18014398509481985); + INSERT INTO t3 VALUES(18014398509481986); + INSERT INTO t3 VALUES(-18014398509481982); + INSERT INTO t3 VALUES(-18014398509481983); + INSERT INTO t3 VALUES(-18014398509481984); + INSERT INTO t3 VALUES(-18014398509481985); + INSERT INTO t3 VALUES(-18014398509481986); + INSERT INTO t3 VALUES(36028797018963966); + INSERT INTO t3 VALUES(36028797018963967); + INSERT INTO t3 VALUES(36028797018963968); + INSERT INTO t3 VALUES(36028797018963969); + INSERT INTO t3 VALUES(36028797018963970); + INSERT INTO t3 VALUES(-36028797018963966); + INSERT INTO t3 VALUES(-36028797018963967); + INSERT INTO t3 VALUES(-36028797018963968); + INSERT INTO t3 VALUES(-36028797018963969); + INSERT INTO t3 VALUES(-36028797018963970); + INSERT INTO t3 VALUES(72057594037927934); + INSERT INTO t3 VALUES(72057594037927935); + INSERT INTO t3 VALUES(72057594037927936); + INSERT INTO t3 VALUES(72057594037927937); + INSERT INTO t3 VALUES(72057594037927938); + INSERT INTO t3 VALUES(-72057594037927934); + INSERT INTO t3 VALUES(-72057594037927935); + INSERT INTO t3 VALUES(-72057594037927936); + INSERT INTO t3 VALUES(-72057594037927937); + INSERT INTO t3 VALUES(-72057594037927938); + INSERT INTO t3 VALUES(144115188075855870); + INSERT INTO t3 VALUES(144115188075855871); + INSERT INTO t3 VALUES(144115188075855872); + INSERT INTO t3 VALUES(144115188075855873); + INSERT INTO t3 VALUES(144115188075855874); + INSERT INTO t3 VALUES(-144115188075855870); + INSERT INTO t3 VALUES(-144115188075855871); + INSERT INTO t3 VALUES(-144115188075855872); + INSERT INTO t3 VALUES(-144115188075855873); + INSERT INTO t3 VALUES(-144115188075855874); + INSERT INTO t3 VALUES(288230376151711742); + INSERT INTO t3 VALUES(288230376151711743); + INSERT INTO t3 VALUES(288230376151711744); + INSERT INTO t3 VALUES(288230376151711745); + INSERT INTO t3 VALUES(288230376151711746); + INSERT INTO t3 VALUES(-288230376151711742); + INSERT INTO t3 VALUES(-288230376151711743); + INSERT INTO t3 VALUES(-288230376151711744); + INSERT INTO t3 VALUES(-288230376151711745); + INSERT INTO t3 VALUES(-288230376151711746); + INSERT INTO t3 VALUES(576460752303423486); + INSERT INTO t3 VALUES(576460752303423487); + INSERT INTO t3 VALUES(576460752303423488); + INSERT INTO t3 VALUES(576460752303423489); + INSERT INTO t3 VALUES(576460752303423490); + INSERT INTO t3 VALUES(-576460752303423486); + INSERT INTO t3 VALUES(-576460752303423487); + INSERT INTO t3 VALUES(-576460752303423488); + INSERT INTO t3 VALUES(-576460752303423489); + INSERT INTO t3 VALUES(-576460752303423490); + INSERT INTO t3 VALUES(1152921504606846974); + INSERT INTO t3 VALUES(1152921504606846975); + INSERT INTO t3 VALUES(1152921504606846976); + INSERT INTO t3 VALUES(1152921504606846977); + INSERT INTO t3 VALUES(1152921504606846978); + INSERT INTO t3 VALUES(-1152921504606846974); + INSERT INTO t3 VALUES(-1152921504606846975); + INSERT INTO t3 VALUES(-1152921504606846976); + INSERT INTO t3 VALUES(-1152921504606846977); + INSERT INTO t3 VALUES(-1152921504606846978); + INSERT INTO t3 VALUES(2305843009213693950); + INSERT INTO t3 VALUES(2305843009213693951); + INSERT INTO t3 VALUES(2305843009213693952); + INSERT INTO t3 VALUES(2305843009213693953); + INSERT INTO t3 VALUES(2305843009213693954); + INSERT INTO t3 VALUES(-2305843009213693950); + INSERT INTO t3 VALUES(-2305843009213693951); + INSERT INTO t3 VALUES(-2305843009213693952); + INSERT INTO t3 VALUES(-2305843009213693953); + INSERT INTO t3 VALUES(-2305843009213693954); + INSERT INTO t3 VALUES(4611686018427387902); + INSERT INTO t3 VALUES(4611686018427387903); + INSERT INTO t3 VALUES(4611686018427387904); + INSERT INTO t3 VALUES(4611686018427387905); + INSERT INTO t3 VALUES(4611686018427387906); + INSERT INTO t3 VALUES(-4611686018427387902); + INSERT INTO t3 VALUES(-4611686018427387903); + INSERT INTO t3 VALUES(-4611686018427387904); + INSERT INTO t3 VALUES(-4611686018427387905); + INSERT INTO t3 VALUES(-4611686018427387906); + INSERT INTO t3 VALUES(9223372036854775806); + INSERT INTO t3 VALUES(9223372036854775807); + INSERT INTO t3 VALUES(-9223372036854775806); + INSERT INTO t3 VALUES(-9223372036854775807); + INSERT INTO t3 VALUES(-9223372036854775808); + SELECT x FROM t3 ORDER BY x; + } +} {-9223372036854775808\ +-9223372036854775807\ +-9223372036854775806\ +-4611686018427387906\ +-4611686018427387905\ +-4611686018427387904\ +-4611686018427387903\ +-4611686018427387902\ +-2305843009213693954\ +-2305843009213693953\ +-2305843009213693952\ +-2305843009213693951\ +-2305843009213693950\ +-1152921504606846978\ +-1152921504606846977\ +-1152921504606846976\ +-1152921504606846975\ +-1152921504606846974\ +-576460752303423490\ +-576460752303423489\ +-576460752303423488\ +-576460752303423487\ +-576460752303423486\ +-288230376151711746\ +-288230376151711745\ +-288230376151711744\ +-288230376151711743\ +-288230376151711742\ +-144115188075855874\ +-144115188075855873\ +-144115188075855872\ +-144115188075855871\ +-144115188075855870\ +-72057594037927938\ +-72057594037927937\ +-72057594037927936\ +-72057594037927935\ +-72057594037927934\ +-36028797018963970\ +-36028797018963969\ +-36028797018963968\ +-36028797018963967\ +-36028797018963966\ +-18014398509481986\ +-18014398509481985\ +-18014398509481984\ +-18014398509481983\ +-18014398509481982\ +-9007199254740994\ +-9007199254740993\ +-9007199254740992\ +-9007199254740991\ +-9007199254740990\ +-4503599627370498\ +-4503599627370497\ +-4503599627370496\ +-4503599627370495\ +-4503599627370494\ +-2251799813685250\ +-2251799813685249\ +-2251799813685248\ +-2251799813685247\ +-2251799813685246\ +-1125899906842626\ +-1125899906842625\ +-1125899906842624\ +-1125899906842623\ +-1125899906842622\ +-562949953421314\ +-562949953421313\ +-562949953421312\ +-562949953421311\ +-562949953421310\ +-281474976710658\ +-281474976710657\ +-281474976710656\ +-281474976710655\ +-281474976710654\ +-140737488355330\ +-140737488355329\ +-140737488355328\ +-140737488355327\ +-140737488355326\ +-70368744177666\ +-70368744177665\ +-70368744177664\ +-70368744177663\ +-70368744177662\ +-35184372088834\ +-35184372088833\ +-35184372088832\ +-35184372088831\ +-35184372088830\ +-17592186044418\ +-17592186044417\ +-17592186044416\ +-17592186044415\ +-17592186044414\ +-8796093022210\ +-8796093022209\ +-8796093022208\ +-8796093022207\ +-8796093022206\ +-4398046511106\ +-4398046511105\ +-4398046511104\ +-4398046511103\ +-4398046511102\ +-2199023255554\ +-2199023255553\ +-2199023255552\ +-2199023255551\ +-2199023255550\ +-1099511627778\ +-1099511627777\ +-1099511627776\ +-1099511627775\ +-1099511627774\ +-549755813890\ +-549755813889\ +-549755813888\ +-549755813887\ +-549755813886\ +-274877906946\ +-274877906945\ +-274877906944\ +-274877906943\ +-274877906942\ +-137438953474\ +-137438953473\ +-137438953472\ +-137438953471\ +-137438953470\ +-68719476738\ +-68719476737\ +-68719476736\ +-68719476735\ +-68719476734\ +-34359738370\ +-34359738369\ +-34359738368\ +-34359738367\ +-34359738366\ +-17179869186\ +-17179869185\ +-17179869184\ +-17179869183\ +-17179869182\ +-8589934594\ +-8589934593\ +-8589934592\ +-8589934591\ +-8589934590\ +-4294967298\ +-4294967297\ +-4294967296\ +-4294967295\ +-4294967294\ +-2147483650\ +-2147483649\ +-2147483648\ +-2147483647\ +-2147483646\ +-1073741826\ +-1073741825\ +-1073741824\ +-1073741823\ +-1073741822\ +-536870914\ +-536870913\ +-536870912\ +-536870911\ +-536870910\ +-268435458\ +-268435457\ +-268435456\ +-268435455\ +-268435454\ +-134217730\ +-134217729\ +-134217728\ +-134217727\ +-134217726\ +-67108866\ +-67108865\ +-67108864\ +-67108863\ +-67108862\ +-33554434\ +-33554433\ +-33554432\ +-33554431\ +-33554430\ +-16777218\ +-16777217\ +-16777216\ +-16777215\ +-16777214\ +-8388610\ +-8388609\ +-8388608\ +-8388607\ +-8388606\ +-4194306\ +-4194305\ +-4194304\ +-4194303\ +-4194302\ +-2097154\ +-2097153\ +-2097152\ +-2097151\ +-2097150\ +-1048578\ +-1048577\ +-1048576\ +-1048575\ +-1048574\ +-524290\ +-524289\ +-524288\ +-524287\ +-524286\ +-262146\ +-262145\ +-262144\ +-262143\ +-262142\ +-131074\ +-131073\ +-131072\ +-131071\ +-131070\ +-65538\ +-65537\ +-65536\ +-65535\ +-65534\ +-32770\ +-32769\ +-32768\ +-32767\ +-32766\ +-16386\ +-16385\ +-16384\ +-16383\ +-16382\ +-8194\ +-8193\ +-8192\ +-8191\ +-8190\ +-4098\ +-4097\ +-4096\ +-4095\ +-4094\ +-2050\ +-2049\ +-2048\ +-2047\ +-2046\ +-1026\ +-1025\ +-1024\ +-1023\ +-1022\ +-514\ +-513\ +-512\ +-511\ +-510\ +-258\ +-257\ +-256\ +-255\ +-254\ +-130\ +-129\ +-128\ +-127\ +-126\ +-66\ +-65\ +-64\ +-63\ +-62\ +-34\ +-33\ +-32\ +-31\ +-30\ +-18\ +-17\ +-16\ +-15\ +-14\ +-13\ +-12\ +-11\ +-10\ +-9\ +-8\ +-7\ +-6\ +-5\ +-4\ +-3\ +-2\ +-1\ +0\ +1\ +2\ +3\ +4\ +5\ +6\ +7\ +8\ +9\ +10\ +11\ +12\ +13\ +14\ +15\ +16\ +17\ +18\ +30\ +31\ +32\ +33\ +34\ +62\ +63\ +64\ +65\ +66\ +126\ +127\ +128\ +129\ +130\ +254\ +255\ +256\ +257\ +258\ +510\ +511\ +512\ +513\ +514\ +1022\ +1023\ +1024\ +1025\ +1026\ +2046\ +2047\ +2048\ +2049\ +2050\ +4094\ +4095\ +4096\ +4097\ +4098\ +8190\ +8191\ +8192\ +8193\ +8194\ +16382\ +16383\ +16384\ +16385\ +16386\ +32766\ +32767\ +32768\ +32769\ +32770\ +65534\ +65535\ +65536\ +65537\ +65538\ +131070\ +131071\ +131072\ +131073\ +131074\ +262142\ +262143\ +262144\ +262145\ +262146\ +524286\ +524287\ +524288\ +524289\ +524290\ +1048574\ +1048575\ +1048576\ +1048577\ +1048578\ +2097150\ +2097151\ +2097152\ +2097153\ +2097154\ +4194302\ +4194303\ +4194304\ +4194305\ +4194306\ +8388606\ +8388607\ +8388608\ +8388609\ +8388610\ +16777214\ +16777215\ +16777216\ +16777217\ +16777218\ +33554430\ +33554431\ +33554432\ +33554433\ +33554434\ +67108862\ +67108863\ +67108864\ +67108865\ +67108866\ +134217726\ +134217727\ +134217728\ +134217729\ +134217730\ +268435454\ +268435455\ +268435456\ +268435457\ +268435458\ +536870910\ +536870911\ +536870912\ +536870913\ +536870914\ +1073741822\ +1073741823\ +1073741824\ +1073741825\ +1073741826\ +2147483646\ +2147483647\ +2147483648\ +2147483649\ +2147483650\ +4294967294\ +4294967295\ +4294967296\ +4294967297\ +4294967298\ +8589934590\ +8589934591\ +8589934592\ +8589934593\ +8589934594\ +17179869182\ +17179869183\ +17179869184\ +17179869185\ +17179869186\ +34359738366\ +34359738367\ +34359738368\ +34359738369\ +34359738370\ +68719476734\ +68719476735\ +68719476736\ +68719476737\ +68719476738\ +137438953470\ +137438953471\ +137438953472\ +137438953473\ +137438953474\ +274877906942\ +274877906943\ +274877906944\ +274877906945\ +274877906946\ +549755813886\ +549755813887\ +549755813888\ +549755813889\ +549755813890\ +1099511627774\ +1099511627775\ +1099511627776\ +1099511627777\ +1099511627778\ +2199023255550\ +2199023255551\ +2199023255552\ +2199023255553\ +2199023255554\ +4398046511102\ +4398046511103\ +4398046511104\ +4398046511105\ +4398046511106\ +8796093022206\ +8796093022207\ +8796093022208\ +8796093022209\ +8796093022210\ +17592186044414\ +17592186044415\ +17592186044416\ +17592186044417\ +17592186044418\ +35184372088830\ +35184372088831\ +35184372088832\ +35184372088833\ +35184372088834\ +70368744177662\ +70368744177663\ +70368744177664\ +70368744177665\ +70368744177666\ +140737488355326\ +140737488355327\ +140737488355328\ +140737488355329\ +140737488355330\ +281474976710654\ +281474976710655\ +281474976710656\ +281474976710657\ +281474976710658\ +562949953421310\ +562949953421311\ +562949953421312\ +562949953421313\ +562949953421314\ +1125899906842622\ +1125899906842623\ +1125899906842624\ +1125899906842625\ +1125899906842626\ +2251799813685246\ +2251799813685247\ +2251799813685248\ +2251799813685249\ +2251799813685250\ +4503599627370494\ +4503599627370495\ +4503599627370496\ +4503599627370497\ +4503599627370498\ +9007199254740990\ +9007199254740991\ +9007199254740992\ +9007199254740993\ +9007199254740994\ +18014398509481982\ +18014398509481983\ +18014398509481984\ +18014398509481985\ +18014398509481986\ +36028797018963966\ +36028797018963967\ +36028797018963968\ +36028797018963969\ +36028797018963970\ +72057594037927934\ +72057594037927935\ +72057594037927936\ +72057594037927937\ +72057594037927938\ +144115188075855870\ +144115188075855871\ +144115188075855872\ +144115188075855873\ +144115188075855874\ +288230376151711742\ +288230376151711743\ +288230376151711744\ +288230376151711745\ +288230376151711746\ +576460752303423486\ +576460752303423487\ +576460752303423488\ +576460752303423489\ +576460752303423490\ +1152921504606846974\ +1152921504606846975\ +1152921504606846976\ +1152921504606846977\ +1152921504606846978\ +2305843009213693950\ +2305843009213693951\ +2305843009213693952\ +2305843009213693953\ +2305843009213693954\ +4611686018427387902\ +4611686018427387903\ +4611686018427387904\ +4611686018427387905\ +4611686018427387906\ +9223372036854775806\ +9223372036854775807} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/misc7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/misc7.test --- sqlite3-3.4.2/test/misc7.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/misc7.test 2009-06-12 03:37:54.000000000 +0100 @@ -10,12 +10,12 @@ #*********************************************************************** # This file implements regression tests for SQLite library. # -# $Id: misc7.test,v 1.14 2007/06/27 23:52:18 drh Exp $ +# $Id: misc7.test,v 1.28 2009/02/10 05:45:42 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -do_test misc7-1 { +do_test misc7-1-misuse { c_misuse_test } {} @@ -38,7 +38,7 @@ list $rc $msg } {1 {unable to open database file}} -# Try to open a file with a directory where it's journal file should be. +# Try to open a file with a directory where its journal file should be. # do_test misc7-5 { file delete mydir @@ -166,6 +166,22 @@ DETACH aux; } } {} +do_test misc7-7.3 { + db close + sqlite3 db test.db -readonly 1 + execsql { + PRAGMA omit_readlock = 1; + ATTACH 'test2.db' AS aux; + SELECT name FROM aux.sqlite_master; + SELECT name FROM aux.sqlite_master; + } +} {hello hello} +do_test misc7-7.3 { + db close + sqlite3 db test.db + set ::DB [sqlite3_connection_pointer db] + list +} {} # Test the UTF-16 version of the "out of memory" message (used when # malloc fails during sqlite3_open() ). @@ -243,7 +259,7 @@ sqlite3 db test.db ifcapable explain { - do_test misc7-14 { + do_test misc7-14.1 { execsql { CREATE TABLE abc(a PRIMARY KEY, b, c); } @@ -251,11 +267,16 @@ EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE rowid = 1; } } {0 0 {TABLE abc AS t2 USING PRIMARY KEY}} - do_test misc7-15 { + do_test misc7-14.2 { execsql { EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 WHERE a = 1; } } {0 0 {TABLE abc AS t2 WITH INDEX sqlite_autoindex_abc_1}} + do_test misc7-14.3 { + execsql { + EXPLAIN QUERY PLAN SELECT * FROM abc AS t2 ORDER BY a; + } + } {0 0 {TABLE abc AS t2 WITH INDEX sqlite_autoindex_abc_1 ORDER BY}} } db close @@ -362,57 +383,72 @@ # These tests do not work on windows due to restrictions in the # windows file system. # -if {$tcl_platform(platform)!="windows"} { -do_test misc7-17.1 { - execsql { - BEGIN; - DELETE FROM t3 WHERE (oid%3)==0; - } - copy_file test.db bak.db - copy_file test.db-journal bak.db-journal - execsql { - COMMIT; - } - - db close - copy_file bak.db test.db - copy_file bak.db-journal test.db-journal - sqlite3 db test.db +if {$tcl_platform(platform)!="windows" && $tcl_platform(platform)!="os2"} { - catch {file attributes test.db-journal -permissions r--------} - catch {file attributes test.db-journal -readonly 1} - catchsql { - SELECT count(*) FROM t3; - } -} {1 {database is locked}} -do_test misc7-17.2 { - catch {file attributes test.db-journal -permissions rw-------} - catch {file attributes test.db-journal -readonly 0} - catchsql { - SELECT count(*) FROM t3; - } -} {0 32} + # Some network filesystems (ex: AFP) do not support setting read-only + # permissions. Only run these tests if full unix permission setting + # capabilities are supported. + # + file attributes test.db -permissions rw-r--r-- + if {[file attributes test.db -permissions]==0644} { -set ::pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1] -do_test misc7-17.3 { - db eval { - pragma writable_schema = true; - UPDATE sqlite_master - SET rootpage = $pending_byte_page - WHERE type = 'table' AND name = 't3'; - } - execsql { - SELECT rootpage FROM sqlite_master WHERE type = 'table' AND name = 't3'; + do_test misc7-17.1 { + execsql { + BEGIN; + DELETE FROM t3 WHERE (oid%3)==0; + } + copy_file test.db bak.db + copy_file test.db-journal bak.db-journal + execsql { + COMMIT; + } + + db close + copy_file bak.db test.db + copy_file bak.db-journal test.db-journal + sqlite3 db test.db + + catch {file attributes test.db-journal -permissions r--------} + catch {file attributes test.db-journal -readonly 1} + catchsql { + SELECT count(*) FROM t3; + } + } {1 {unable to open database file}} + do_test misc7-17.2 { + # Note that the -readonly flag must be cleared before the -permissions + # are set. Otherwise, when using tcl 8.5 on mac, the fact that the + # -readonly flag is set causes the attempt to set the permissions + # to fail. + catch {file attributes test.db-journal -readonly 0} + catch {file attributes test.db-journal -permissions rw-------} + catchsql { + SELECT count(*) FROM t3; + } + } {0 32} + + # sqlite3_test_control_pending_page [expr ($::sqlite_pending_byte / 1024) + 1] + set ::pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1] + sqlite3_test_control_pending_byte $::sqlite_pending_byte + do_test misc7-17.3 { + db eval { + pragma writable_schema = true; + UPDATE sqlite_master + SET rootpage = $pending_byte_page + WHERE type = 'table' AND name = 't3'; + } + execsql { + SELECT rootpage FROM sqlite_master WHERE type = 'table' AND name = 't3'; + } + } $::pending_byte_page + + do_test misc7-17.4 { + db close + sqlite3 db test.db + catchsql { + SELECT count(*) FROM t3; + } + } {1 {database disk image is malformed}} } -} $::pending_byte_page - -do_test misc7-17.4 { - db close - sqlite3 db test.db - catchsql { - SELECT count(*) FROM t3; - } -} {1 {database disk image is malformed}} } # Ticket #2470 @@ -424,15 +460,41 @@ col_1, col_2, col_3, col_4, col_5, col_6, col_7, col_8, col_9, col_10 ); - SELECT col_10 + SELECT a.col_10 FROM - (SELECT table_1.col_10 AS col_10 FROM table_1), + (SELECT table_1.col_10 AS col_10 FROM table_1) a, (SELECT table_1.col_10, table_2.col_9 AS qcol_9 FROM table_1, table_2 GROUP BY table_1.col_10, qcol_9); } } {} +# Testing boundary conditions on sqlite3_status() +# +do_test misc7-19.1 { + sqlite3_status -1 0 +} {21 0 0} +do_test misc7-19.2 { + sqlite3_status 1000 0 +} {21 0 0} + + +# sqlite3_global_recover() is a no-op. But we might as well test it +# if only to get the test coverage. +# +do_test misc7-20.1 { + sqlite3_global_recover +} {SQLITE_OK} + +# Try to open a really long file name. +# +do_test misc7-21.1 { + set zFile [file join [pwd] "[string repeat abcde 104].db"] + set rc [catch {sqlite3 db2 $zFile} msg] + list $rc $msg +} {1 {unable to open database file}} + + db close file delete -force test.db diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mutex1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mutex1.test --- sqlite3-3.4.2/test/mutex1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mutex1.test 2009-06-25 12:24:39.000000000 +0100 @@ -0,0 +1,184 @@ +# 2008 June 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: mutex1.test,v 1.20 2009/04/23 14:58:40 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !mutex { + finish_test + return +} +if {[info exists tester_do_binarylog]} { + finish_test + return +} + +sqlite3_reset_auto_extension +clear_mutex_counters + +proc mutex_counters {varname} { + upvar $varname var + set var(total) 0 + foreach {name value} [read_mutex_counters] { + set var($name) $value + incr var(total) $value + } +} + +#------------------------------------------------------------------------- +# Tests mutex1-1.* test that sqlite3_config() returns SQLITE_MISUSE if +# is called at the wrong time. And that the first time sqlite3_initialize +# is called it obtains the 'static_master' mutex 3 times and a recursive +# mutex (sqlite3Config.pInitMutex) twice. Subsequent calls are no-ops +# that do not require any mutexes. +# +do_test mutex1-1.0 { + install_mutex_counters 1 +} {SQLITE_MISUSE} + +do_test mutex1-1.1 { + db close + install_mutex_counters 1 +} {SQLITE_MISUSE} + +do_test mutex1-1.2 { + sqlite3_shutdown + install_mutex_counters 1 +} {SQLITE_OK} + +do_test mutex1-1.3 { + install_mutex_counters 0 +} {SQLITE_OK} + +do_test mutex1-1.4 { + install_mutex_counters 1 +} {SQLITE_OK} + +do_test mutex1-1.5 { + mutex_counters counters + set counters(total) +} {0} + +do_test mutex1-1.6 { + sqlite3_initialize +} {SQLITE_OK} + +do_test mutex1-1.7 { + mutex_counters counters + # list $counters(total) $counters(static_master) + expr {$counters(total)>0} +} {1} + +do_test mutex1-1.8 { + clear_mutex_counters + sqlite3_initialize +} {SQLITE_OK} + +do_test mutex1-1.9 { + mutex_counters counters + list $counters(total) $counters(static_master) +} {0 0} + +#------------------------------------------------------------------------- +# Tests mutex1-2.* test the three thread-safety related modes that +# can be selected using sqlite3_config: +# +# * Serialized mode, +# * Multi-threaded mode, +# * Single-threaded mode. +# +ifcapable threadsafe&&shared_cache { + set enable_shared_cache [sqlite3_enable_shared_cache 1] + foreach {mode mutexes} { + singlethread {} + multithread {fast static_lru static_master static_mem static_open static_prng } + serialized {fast recursive static_lru static_master static_mem static_open static_prng} + } { + + do_test mutex1.2.$mode.1 { + catch {db close} + sqlite3_shutdown + sqlite3_config $mode + } SQLITE_OK + + do_test mutex1.2.$mode.2 { + sqlite3_initialize + clear_mutex_counters + sqlite3 db test.db -nomutex 0 -fullmutex 0 + catchsql { CREATE TABLE abc(a, b, c) } + db eval { + INSERT INTO abc VALUES(1, 2, 3); + } + } {} + + do_test mutex1.2.$mode.3 { + mutex_counters counters + + set res [list] + foreach {key value} [array get counters] { + if {$key ne "total" && $value > 0} { + lappend res $key + } + } + lsort $res + } [lsort $mutexes] + } + sqlite3_enable_shared_cache $enable_shared_cache + + # Open and use a connection in "nomutex" mode. Test that no recursive + # mutexes are obtained. + do_test mutex1.3.1 { + catch {db close} + clear_mutex_counters + sqlite3 db test.db -nomutex 1 + execsql { SELECT * FROM abc } + } {1 2 3 1 2 3 1 2 3} + do_test mutex1.3.2 { + mutex_counters counters + set counters(recursive) + } {0} +} + +# Test the sqlite3_db_mutex() function. +# +do_test mutex1.4.1 { + catch {db close} + sqlite3 db test.db + enter_db_mutex db + db eval {SELECT 1, 2, 3} +} {1 2 3} +do_test mutex1.4.2 { + leave_db_mutex db + db eval {SELECT 1, 2, 3} +} {1 2 3} +do_test mutex1.4.3 { + catch {db close} + sqlite3 db test.db -nomutex 1 + enter_db_mutex db + db eval {SELECT 1, 2, 3} +} {1 2 3} +do_test mutex1.4.4 { + leave_db_mutex db + db eval {SELECT 1, 2, 3} +} {1 2 3} + +do_test mutex1-X { + catch {db close} + sqlite3_shutdown + clear_mutex_counters + install_mutex_counters 0 + sqlite3_initialize +} {SQLITE_OK} + +autoinstall_test_functions +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/mutex2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/mutex2.test --- sqlite3-3.4.2/test/mutex2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/mutex2.test 2009-06-12 03:28:39.000000000 +0100 @@ -0,0 +1,105 @@ +# 2008 July 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test scripts for deliberate failures of mutex routines. +# +# $Id: mutex2.test,v 1.9 2008/10/07 15:25:49 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable !mutex { + finish_test + return +} + +# deinitialize +# +catch {db close} +sqlite3_reset_auto_extension +sqlite3_shutdown +install_mutex_counters 1 + +# Fix the mutex subsystem so that it will not initialize. In other words, +# make it so that sqlite3_initialize() always fails. +# +do_test mutex2-1.1 { + set ::disable_mutex_init 10 + sqlite3_initialize +} {SQLITE_IOERR} +do_test mutex2-1.1 { + set ::disable_mutex_init 7 + sqlite3_initialize +} {SQLITE_NOMEM} + +proc utf16 {str} { + set r [encoding convertto unicode $str] + append r "\x00\x00" + return $r +} + +# Now that sqlite3_initialize() is failing, try to run various APIs that +# require that SQLite be initialized. Verify that they fail. +# +do_test mutex2-2.1 { + set ::disable_mutex_init 7 + set rc [catch {sqlite db test.db} msg] + lappend rc $msg +} {1 {}} +ifcapable utf16 { + do_test mutex2-2.2 { + set db2 [sqlite3_open16 [utf16 test.db] {}] + } {0} + do_test mutex2-2.3 { + sqlite3_complete16 [utf16 {SELECT * FROM t1;}] + } {7} +} +do_test mutex2-2.4 { + sqlite3_mprintf_int {This is a test %d,%d,%d} 1 2 3 +} {} +ifcapable load_ext { + do_test mutex2-2.5 { + sqlite3_auto_extension_sqr + } {7} +} +do_test mutex2-2.6 { + sqlite3_reset_auto_extension +} {} +do_test mutex2-2.7 { + sqlite3_malloc 10000 +} {0} +do_test mutex2-2.8 { + sqlite3_realloc 0 10000 +} {0} +ifcapable threadsafe { + do_test mutex2-2.9 { + alloc_dealloc_mutex + } {0} +} +do_test mutex2-2.10 { + vfs_initfail_test +} {} + +# Restore the system to a functional state +# +install_mutex_counters 0 +set disable_mutex_init 0 +autoinstall_test_functions + +# Mutex allocation works now. +# + +do_test mutex2-3.1 { + set ptr [alloc_dealloc_mutex] + expr {$ptr!=0} +} {1} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/nan.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/nan.test --- sqlite3-3.4.2/test/nan.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/nan.test 2009-06-05 18:03:33.000000000 +0100 @@ -0,0 +1,320 @@ +# 2008 April 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3060 +# +# Make sure IEEE floating point NaN values are handled properly. +# SQLite should always convert NaN into NULL. +# +# Also verify that the decimal to IEEE754 binary conversion routines +# correctly generate 0.0, +Inf, and -Inf as appropriate for numbers +# out of range. +# +# $Id: nan.test,v 1.5 2008/09/18 11:30:13 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test nan-1.1.1 { + db eval { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + CREATE TABLE t1(x FLOAT); + } + set ::STMT [sqlite3_prepare db "INSERT INTO t1 VALUES(?)" -1 TAIL] + sqlite3_bind_double $::STMT 1 NaN + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} +if {$tcl_platform(platform) != "symbian"} { + do_test nan-1.1.2 { + sqlite3_bind_double $::STMT 1 +Inf + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} + } {{} null inf real} + do_test nan-1.1.3 { + sqlite3_bind_double $::STMT 1 -Inf + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} + } {{} null inf real -inf real} + do_test nan-1.1.4 { + sqlite3_bind_double $::STMT 1 -NaN + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} + } {{} null inf real -inf real {} null} + do_test nan-1.1.5 { + sqlite3_bind_double $::STMT 1 NaN0 + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} + } {{} null inf real -inf real {} null {} null} + do_test nan-1.1.5 { + sqlite3_bind_double $::STMT 1 -NaN0 + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} + } {{} null inf real -inf real {} null {} null {} null} + do_test nan-1.1.6 { + db eval { + UPDATE t1 SET x=x-x; + SELECT x, typeof(x) FROM t1; + } + } {{} null {} null {} null {} null {} null {} null} +} + +# The following block of tests, nan-1.2.*, are the same as the nan-1.1.* +# tests above, except that the SELECT queries used to validate data +# convert floating point values to text internally before returning them +# to Tcl. This allows the tests to be run on platforms where Tcl has +# problems converting "inf" and "-inf" from floating point to text format. +# It also tests the internal float->text conversion routines a bit. +# +do_test nan-1.2.1 { + db eval { + DELETE FROM T1; + } + sqlite3_bind_double $::STMT 1 NaN + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null} +do_test nan-1.2.2 { + sqlite3_bind_double $::STMT 1 +Inf + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null Inf real} +do_test nan-1.2.3 { + sqlite3_bind_double $::STMT 1 -Inf + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null Inf real -Inf real} +do_test nan-1.2.4 { + sqlite3_bind_double $::STMT 1 -NaN + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null Inf real -Inf real {} null} +do_test nan-1.2.5 { + sqlite3_bind_double $::STMT 1 NaN0 + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null Inf real -Inf real {} null {} null} +do_test nan-1.2.5 { + sqlite3_bind_double $::STMT 1 -NaN0 + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {{} null Inf real -Inf real {} null {} null {} null} +do_test nan-1.2.6 { + db eval { + UPDATE t1 SET x=x-x; + SELECT CAST(x AS text), typeof(x) FROM t1; + } +} {{} null {} null {} null {} null {} null {} null} + +do_test nan-2.1 { + db eval { + DELETE FROM T1; + } + sqlite3_bind_double $::STMT 1 NaN + sqlite3_step $::STMT + sqlite3_reset $::STMT + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} +sqlite3_finalize $::STMT + +# SQLite always converts NaN into NULL so it is not possible to write +# a NaN value into the database file using SQLite. The following series +# of tests writes a normal floating point value (0.5) into the database, +# then writes directly into the database file to change the 0.5 into NaN. +# Then it reads the value of the database to verify it is converted into +# NULL. +# +do_test nan-3.1 { + db eval { + DELETE FROM t1; + INSERT INTO t1 VALUES(0.5); + PRAGMA auto_vacuum=OFF; + PRAGMA page_size=1024; + VACUUM; + } + hexio_read test.db 2040 8 +} {3FE0000000000000} +do_test nan-3.2 { + db eval { + SELECT x, typeof(x) FROM t1 + } +} {0.5 real} +do_test nan-3.3 { + db close + hexio_write test.db 2040 FFF8000000000000 + sqlite3 db test.db + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} +do_test nan-3.4 { + db close + hexio_write test.db 2040 7FF8000000000000 + sqlite3 db test.db + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} +do_test nan-3.5 { + db close + hexio_write test.db 2040 FFFFFFFFFFFFFFFF + sqlite3 db test.db + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} +do_test nan-3.6 { + db close + hexio_write test.db 2040 7FFFFFFFFFFFFFFF + sqlite3 db test.db + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} + +# Verify that the sqlite3AtoF routine is able to handle extreme +# numbers. +# +do_test nan-4.1 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES([string repeat 9 307].0)" + db eval {SELECT x, typeof(x) FROM t1} +} {1e+307 real} +do_test nan-4.2 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES([string repeat 9 308].0)" + db eval {SELECT x, typeof(x) FROM t1} +} {1e+308 real} +do_test nan-4.3 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(-[string repeat 9 307].0)" + db eval {SELECT x, typeof(x) FROM t1} +} {-1e+307 real} +do_test nan-4.4 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(-[string repeat 9 308].0)" + db eval {SELECT x, typeof(x) FROM t1} +} {-1e+308 real} +do_test nan-4.5 { + db eval {DELETE FROM t1} + set big -[string repeat 0 10000][string repeat 9 308].[string repeat 0 10000] + db eval "INSERT INTO t1 VALUES($big)" + db eval {SELECT x, typeof(x) FROM t1} +} {-1e+308 real} +do_test nan-4.6 { + db eval {DELETE FROM t1} + set big [string repeat 0 10000][string repeat 9 308].[string repeat 0 10000] + db eval "INSERT INTO t1 VALUES($big)" + db eval {SELECT x, typeof(x) FROM t1} +} {1e+308 real} + +if {$tcl_platform(platform) != "symbian"} { + # Do not run these tests on Symbian, as the Tcl port doesn't like to + # convert from floating point value "-inf" to a string. + # + do_test nan-4.7 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES([string repeat 9 309].0)" + db eval {SELECT x, typeof(x) FROM t1} + } {inf real} + do_test nan-4.8 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(-[string repeat 9 309].0)" + db eval {SELECT x, typeof(x) FROM t1} + } {-inf real} +} +do_test nan-4.9 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES([string repeat 9 309].0)" + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {Inf real} +do_test nan-4.10 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(-[string repeat 9 309].0)" + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {-Inf real} + +do_test nan-4.10 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(1234.5[string repeat 0 10000]12345)" + db eval {SELECT x, typeof(x) FROM t1} +} {1234.5 real} +do_test nan-4.11 { + db eval {DELETE FROM t1} + db eval "INSERT INTO t1 VALUES(-1234.5[string repeat 0 10000]12345)" + db eval {SELECT x, typeof(x) FROM t1} +} {-1234.5 real} +do_test nan-4.12 { + db eval {DELETE FROM t1} + set small [string repeat 0 10000].[string repeat 0 324][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT x, typeof(x) FROM t1} +} {0.0 real} +do_test nan-4.13 { + db eval {DELETE FROM t1} + set small \ + -[string repeat 0 10000].[string repeat 0 324][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT x, typeof(x) FROM t1} +} {0.0 real} + +# These tests test some really, really small floating point numbers. +# +if {$tcl_platform(platform) != "symbian"} { + # These two are not run on symbian because tcl has trouble converting + # the very small numbers back to text form (probably due to a difference + # in the sprintf() implementation). + # + do_test nan-4.14 { + db eval {DELETE FROM t1} + set small \ + [string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT x, typeof(x) FROM t1} + } {9.88131291682493e-324 real} + do_test nan-4.15 { + db eval {DELETE FROM t1} + set small \ + -[string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT x, typeof(x) FROM t1} + } {-9.88131291682493e-324 real} +} +do_test nan-4.16 { + db eval {DELETE FROM t1} + set small [string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {9.88131291682493e-324 real} +do_test nan-4.17 { + db eval {DELETE FROM t1} + set small \ + -[string repeat 0 10000].[string repeat 0 323][string repeat 9 10000] + db eval "INSERT INTO t1 VALUES($small)" + db eval {SELECT CAST(x AS text), typeof(x) FROM t1} +} {-9.88131291682493e-324 real} + +do_test nan-4.20 { + db eval {DELETE FROM t1} + set big [string repeat 9 10000].0e-9000 + db eval "INSERT INTO t1 VALUES($big)" + db eval {SELECT x, typeof(x) FROM t1} +} {{} null} + + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/notify1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/notify1.test --- sqlite3-3.4.2/test/notify1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/notify1.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,500 @@ +# 2009 March 04 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the sqlite3_unlock_notify() API. +# +# $Id: notify1.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !unlock_notify||!shared_cache { + finish_test + return +} +db close +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +#------------------------------------------------------------------------- +# Warm body test. Test that an unlock-notify callback can be registered +# and that it is invoked. +# +do_test notify1-1.1 { + sqlite3 db test.db + sqlite3 db2 test.db + execsql { CREATE TABLE t1(a, b) } +} {} +do_test notify1-1.2 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 2); + } + catchsql { INSERT INTO t1 VALUES(3, 4) } db2 +} {1 {database table is locked}} +do_test notify1-1.3 { + set zScript "" + db2 unlock_notify { + set zScript "db2 eval { INSERT INTO t1 VALUES(3, 4) }" + } + execsql { SELECT * FROM t1 } +} {1 2} +do_test notify1-1.4 { + set zScript +} {} +do_test notify1-1.5 { + execsql { COMMIT } + eval $zScript + execsql { SELECT * FROM t1 } +} {1 2 3 4} + +#------------------------------------------------------------------------- +# Verify that invoking the "unlock_notify" method with no arguments +# (which is the equivalent of invoking sqlite3_unlock_notify() with +# a NULL xNotify argument) cancels a pending notify callback. +# +do_test notify1-1.11 { + execsql { DROP TABLE t1; CREATE TABLE t1(a, b) } +} {} +do_test notify1-1.12 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 2); + } + catchsql { INSERT INTO t1 VALUES(3, 4) } db2 +} {1 {database table is locked}} +do_test notify1-1.13 { + set zScript "" + db2 unlock_notify { + set zScript "db2 eval { INSERT INTO t1 VALUES(3, 4) }" + } + execsql { SELECT * FROM t1 } +} {1 2} +do_test notify1-1.14 { + set zScript +} {} +do_test notify1-1.15 { + db2 unlock_notify + execsql { COMMIT } + eval $zScript + execsql { SELECT * FROM t1 } +} {1 2} + +#------------------------------------------------------------------------- +# The following tests, notify1-2.*, test that deadlock is detected +# correctly. +# +do_test notify1-2.1 { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES('I', 'II'); + } +} {} + +# +# Test for simple deadlock involving two database connections. +# +# 1. Grab a write-lock on t1 with [db]. Then grab a read-lock on t2 with [db2]. +# 2. Try to grab a read-lock on t1 with [db2] (fails). +# 3. Have [db2] wait on the read-lock it failed to obtain in step 2. +# 4. Try to grab a write-lock on t2 with [db] (fails). +# 5. Try to have [db] wait on the lock from step 4. Fails, as the system +# would be deadlocked (since [db2] is already waiting on [db], and this +# operation would have [db] wait on [db2]). +# +do_test notify1-2.2.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(5, 6); + } + execsql { + BEGIN; + SELECT * FROM t2; + } db2 +} {I II} +do_test notify1-2.2.2 { + catchsql { SELECT * FROM t1 } db2 +} {1 {database table is locked: t1}} +do_test notify1-2.2.3 { + db2 unlock_notify {lappend unlock_notify db2} +} {} +do_test notify1-2.2.4 { + catchsql { INSERT INTO t2 VALUES('III', 'IV') } +} {1 {database table is locked: t2}} +do_test notify1-2.2.5 { + set rc [catch { db unlock_notify {lappend unlock_notify db} } msg] + list $rc $msg +} {1 {database is deadlocked}} + +# +# Test for slightly more complex deadlock involving three database +# connections: db, db2 and db3. +# +do_test notify1-2.3.1 { + db close + db2 close + file delete -force test.db test2.db test3.db + foreach con {db db2 db3} { + sqlite3 $con test.db + $con eval { ATTACH 'test2.db' AS aux2 } + $con eval { ATTACH 'test3.db' AS aux3 } + } + execsql { + CREATE TABLE main.t1(a, b); + CREATE TABLE aux2.t2(a, b); + CREATE TABLE aux3.t3(a, b); + } +} {} +do_test notify1-2.3.2 { + execsql { BEGIN ; INSERT INTO t1 VALUES(1, 2) } db + execsql { BEGIN ; INSERT INTO t2 VALUES(1, 2) } db2 + execsql { BEGIN ; INSERT INTO t3 VALUES(1, 2) } db3 +} {} +do_test notify1-2.3.3 { + catchsql { SELECT * FROM t2 } db +} {1 {database table is locked: t2}} +do_test notify1-2.3.4 { + catchsql { SELECT * FROM t3 } db2 +} {1 {database table is locked: t3}} +do_test notify1-2.3.5 { + catchsql { SELECT * FROM t1 } db3 +} {1 {database table is locked: t1}} +do_test notify1-2.3.6 { + set lUnlock [list] + db unlock_notify {lappend lUnlock db} + db2 unlock_notify {lappend lUnlock db2} +} {} +do_test notify1-2.3.7 { + set rc [catch { db3 unlock_notify {lappend lUnlock db3} } msg] + list $rc $msg +} {1 {database is deadlocked}} +do_test notify1-2.3.8 { + execsql { COMMIT } + set lUnlock +} {} +do_test notify1-2.3.9 { + db3 unlock_notify {lappend lUnlock db3} + set lUnlock +} {db3} +do_test notify1-2.3.10 { + execsql { COMMIT } db2 + set lUnlock +} {db3 db} +do_test notify1-2.3.11 { + execsql { COMMIT } db3 + set lUnlock +} {db3 db db2} +catch { db3 close } +catch { db2 close } +catch { db close } + +#------------------------------------------------------------------------- +# The following tests, notify1-3.* and notify1-4.*, test that callbacks +# can be issued when there are many (>16) connections waiting on a single +# unlock event. +# +foreach {tn nConn} {3 20 4 76} { + do_test notify1-$tn.1 { + sqlite3 db test.db + execsql { + BEGIN; + INSERT INTO t1 VALUES('a', 'b'); + } + } {} + set lUnlock [list] + set lUnlockFinal [list] + for {set ii 1} {$ii <= $nConn} {incr ii} { + do_test notify1-$tn.2.$ii.1 { + set cmd "db$ii" + sqlite3 $cmd test.db + catchsql { SELECT * FROM t1 } $cmd + } {1 {database table is locked: t1}} + do_test notify1-$tn.2.$ii.2 { + $cmd unlock_notify "lappend lUnlock $ii" + } {} + lappend lUnlockFinal $ii + } + do_test notify1-$tn.3 { + set lUnlock + } {} + do_test notify1-$tn.4 { + execsql {COMMIT} + lsort -integer $lUnlock + } $lUnlockFinal + do_test notify1-$tn.5 { + for {set ii 1} {$ii <= $nConn} {incr ii} { + "db$ii" close + } + } {} +} +db close + +#------------------------------------------------------------------------- +# These tests, notify1-5.*, test that a malloc() failure that occurs while +# allocating an array to use as an argument to an unlock-notify callback +# is handled correctly. +# +source $testdir/malloc_common.tcl +do_malloc_test notify1-5 -tclprep { + set ::lUnlock [list] + execsql { + CREATE TABLE t1(a, b); + BEGIN; + INSERT INTO t1 VALUES('a', 'b'); + } + for {set ii 1} {$ii <= 60} {incr ii} { + set cmd "db$ii" + sqlite3 $cmd test.db + catchsql { SELECT * FROM t1 } $cmd + $cmd unlock_notify "lappend ::lUnlock $ii" + } +} -sqlbody { + COMMIT; +} -cleanup { + # One of two things should have happened: + # + # 1) The transaction opened by [db] was not committed. No unlock-notify + # callbacks were invoked, OR + # 2) The transaction opened by [db] was committed and 60 unlock-notify + # callbacks were invoked. + # + do_test notify1-5.systemstate { + expr { ([llength $::lUnlock]==0 && [sqlite3_get_autocommit db]==0) + || ([llength $::lUnlock]==60 && [sqlite3_get_autocommit db]==1) + } + } {1} + for {set ii 1} {$ii <= 60} {incr ii} { "db$ii" close } +} + +#------------------------------------------------------------------------- +# Test cases notify1-6.* test cases where the following occur: +# +# notify1-6.1.*: Test encountering an SQLITE_LOCKED error when the +# "blocking connection" has already been set by a previous +# SQLITE_LOCKED. +# +# notify1-6.2.*: Test encountering an SQLITE_LOCKED error when already +# waiting on an unlock-notify callback. +# +# notify1-6.3.*: Test that if an SQLITE_LOCKED error is encountered while +# already waiting on an unlock-notify callback, and then +# the blocker that caused the SQLITE_LOCKED commits its +# transaction, the unlock-notify callback is not invoked. +# +# notify1-6.4.*: Like 6.3.*, except that instead of the second blocker +# committing its transaction, the first does. The +# unlock-notify callback is therefore invoked. +# +db close +do_test notify1-6.1.1 { + file delete -force test.db test2.db + foreach conn {db db2 db3} { + sqlite3 $conn test.db + execsql { ATTACH 'test2.db' AS two } $conn + } + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE two.t2(a, b); + } + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 2); + } db2 + execsql { + BEGIN; + INSERT INTO t2 VALUES(1, 2); + } db3 +} {} +do_test notify1-6.1.2 { + catchsql { SELECT * FROM t2 } +} {1 {database table is locked: t2}} +do_test notify1-6.1.3 { + catchsql { SELECT * FROM t1 } +} {1 {database table is locked: t1}} + +do_test notify1-6.2.1 { + set unlocked 0 + db unlock_notify {set unlocked 1} + set unlocked +} {0} +do_test notify1-6.2.2 { + catchsql { SELECT * FROM t2 } +} {1 {database table is locked: t2}} +do_test notify1-6.2.3 { + execsql { COMMIT } db2 + set unlocked +} {1} + +do_test notify1-6.3.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + } db2 +} {} +do_test notify1-6.3.2 { + catchsql { SELECT * FROM t1 } +} {1 {database table is locked: t1}} +do_test notify1-6.3.3 { + set unlocked 0 + db unlock_notify {set unlocked 1} + set unlocked +} {0} +do_test notify1-6.3.4 { + catchsql { SELECT * FROM t2 } +} {1 {database table is locked: t2}} +do_test notify1-6.3.5 { + execsql { COMMIT } db3 + set unlocked +} {0} + +do_test notify1-6.4.1 { + execsql { + BEGIN; + INSERT INTO t2 VALUES(3, 4); + } db3 + catchsql { SELECT * FROM t2 } +} {1 {database table is locked: t2}} +do_test notify1-6.4.2 { + execsql { COMMIT } db2 + set unlocked +} {1} +do_test notify1-6.4.3 { + execsql { COMMIT } db3 +} {} +db close +db2 close +db3 close + +#------------------------------------------------------------------------- +# Test cases notify1-7.* tests that when more than one distinct +# unlock-notify function is registered, all are invoked correctly. +# +proc unlock_notify {} { + incr ::unlock_notify +} +do_test notify1-7.1 { + foreach conn {db db2 db3} { + sqlite3 $conn test.db + } + execsql { + BEGIN; + INSERT INTO t1 VALUES(5, 6); + } +} {} +do_test notify1-7.2 { + catchsql { SELECT * FROM t1 } db2 +} {1 {database table is locked: t1}} +do_test notify1-7.3 { + catchsql { SELECT * FROM t1 } db3 +} {1 {database table is locked: t1}} +do_test notify1-7.4 { + set unlock_notify 0 + db2 unlock_notify unlock_notify + sqlite3_unlock_notify db3 +} {SQLITE_OK} +do_test notify1-7.5 { + set unlock_notify +} {0} +do_test notify1-7.6 { + execsql { COMMIT } + set unlock_notify +} {2} + +#------------------------------------------------------------------------- +# Test cases notify1-8.* tests that the correct SQLITE_LOCKED extended +# error code is returned in various scenarios. +# +do_test notify1-8.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(7, 8); + } + catchsql { SELECT * FROM t1 } db2 +} {1 {database table is locked: t1}} +do_test notify1-8.2 { + sqlite3_extended_errcode db2 +} {SQLITE_LOCKED_SHAREDCACHE} + +do_test notify1-8.3 { + execsql { + COMMIT; + BEGIN EXCLUSIVE; + } + catchsql { SELECT * FROM t1 } db2 +} {1 {database schema is locked: main}} +do_test notify1-8.4 { + sqlite3_extended_errcode db2 +} {SQLITE_LOCKED_SHAREDCACHE} + +do_test notify1-8.X { + execsql { COMMIT } +} {} + +#------------------------------------------------------------------------- +# Test cases notify1-9.* test the shared-cache 'pending-lock' feature. +# +do_test notify1-9.1 { + execsql { + CREATE TABLE t2(a, b); + BEGIN; + SELECT * FROM t1; + } db2 +} {1 2 3 4 5 6 7 8} +do_test notify1-9.2 { + execsql { SELECT * FROM t1 } db3 +} {1 2 3 4 5 6 7 8} +do_test notify1-9.3 { + catchsql { + BEGIN; + INSERT INTO t1 VALUES(9, 10); + } +} {1 {database table is locked: t1}} +do_test notify1-9.4 { + catchsql { SELECT * FROM t2 } db3 +} {1 {database table is locked}} +do_test notify1-9.5 { + execsql { COMMIT } db2 + execsql { SELECT * FROM t2 } db3 +} {} +do_test notify1-9.6 { + execsql { COMMIT } +} {} + +do_test notify1-9.7 { + execsql { + BEGIN; + SELECT * FROM t1; + } db2 +} {1 2 3 4 5 6 7 8} +do_test notify1-9.8 { + execsql { SELECT * FROM t1 } db3 +} {1 2 3 4 5 6 7 8} +do_test notify1-9.9 { + catchsql { + BEGIN; + INSERT INTO t1 VALUES(9, 10); + } +} {1 {database table is locked: t1}} +do_test notify1-9.10 { + catchsql { SELECT * FROM t2 } db3 +} {1 {database table is locked}} +do_test notify1-9.11 { + execsql { COMMIT } + execsql { SELECT * FROM t2 } db3 +} {} +do_test notify1-9.12 { + execsql { COMMIT } db2 +} {} + +db close +db2 close +db3 close +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/notify2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/notify2.test --- sqlite3-3.4.2/test/notify2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/notify2.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,242 @@ +# 2009 March 04 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: notify2.test,v 1.7 2009/03/30 11:59:31 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } +ifcapable !unlock_notify||!shared_cache { finish_test ; return } + +# The tests in this file test the sqlite3_blocking_step() function in +# test_thread.c. sqlite3_blocking_step() is not an SQLite API function, +# it is just a demonstration of how the sqlite3_unlock_notify() function +# can be used to synchronize multi-threaded access to SQLite databases +# in shared-cache mode. +# +# Since the implementation of sqlite3_blocking_step() is included on the +# website as example code, it is important to test that it works. +# +# notify2-1.*: +# +# This test uses $nThread threads. Each thread opens the main database +# and attaches two other databases. Each database contains a single table. +# +# Each thread repeats transactions over and over for 20 seconds. Each +# transaction consists of 3 operations. Each operation is either a read +# or a write of one of the tables. The read operations verify an invariant +# to make sure that things are working as expected. If an SQLITE_LOCKED +# error is returned the current transaction is rolled back immediately. +# +# This exercise is repeated twice, once using sqlite3_step(), and the +# other using sqlite3_blocking_step(). The results are compared to ensure +# that sqlite3_blocking_step() resulted in higher transaction throughput. +# + +db close +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +# Number of threads to run simultaneously. +# +set nThread 6 +set nSecond 5 + +# The Tcl script executed by each of the $nThread threads used by this test. +# +set ThreadProgram { + + # Proc used by threads to execute SQL. + # + proc execsql_blocking {db zSql} { + set lRes [list] + set rc SQLITE_OK + +set sql $zSql + + while {$rc=="SQLITE_OK" && $zSql ne ""} { + set STMT [$::xPrepare $db $zSql -1 zSql] + while {[set rc [$::xStep $STMT]] eq "SQLITE_ROW"} { + for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} { + lappend lRes [sqlite3_column_text $STMT 0] + } + } + set rc [sqlite3_finalize $STMT] + } + + if {$rc != "SQLITE_OK"} { error "$rc $sql [sqlite3_errmsg $db]" } + return $lRes + } + + proc execsql_retry {db sql} { + set msg "SQLITE_LOCKED blah..." + while { [string match SQLITE_LOCKED* $msg] } { + catch { execsql_blocking $db $sql } msg + } + } + + proc select_one {args} { + set n [llength $args] + lindex $args [expr int($n*rand())] + } + + proc opendb {} { + # Open a database connection. Attach the two auxillary databases. + set ::DB [sqlite3_open test.db] + execsql_retry $::DB { ATTACH 'test2.db' AS aux2; } + execsql_retry $::DB { ATTACH 'test3.db' AS aux3; } + } + + opendb + + #after 2000 + + # This loop runs for ~20 seconds. + # + set iStart [clock_seconds] + while { ([clock_seconds]-$iStart) < $nSecond } { + + # Each transaction does 3 operations. Each operation is either a read + # or write of a randomly selected table (t1, t2 or t3). Set the variables + # $SQL(1), $SQL(2) and $SQL(3) to the SQL commands used to implement + # each operation. + # + for {set ii 1} {$ii <= 3} {incr ii} { + foreach {tbl database} [select_one {t1 main} {t2 aux2} {t3 aux3}] {} + + set SQL($ii) [string map [list xxx $tbl yyy $database] [select_one { + SELECT + (SELECT b FROM xxx WHERE a=(SELECT max(a) FROM xxx))==total(a) + FROM xxx WHERE a!=(SELECT max(a) FROM xxx); + } { + DELETE FROM xxx WHERE a<(SELECT max(a)-100 FROM xxx); + INSERT INTO xxx SELECT NULL, total(a) FROM xxx; + } { + CREATE INDEX IF NOT EXISTS yyy.xxx_i ON xxx(b); + } { + DROP INDEX IF EXISTS yyy.xxx_i; + } + ]] + } + + # Execute the SQL transaction. + # + set rc [catch { execsql_blocking $::DB " + BEGIN; + $SQL(1); + $SQL(2); + $SQL(3); + COMMIT; + " + } msg] + + if {$rc && [string match "SQLITE_LOCKED*" $msg] + || [string match "SQLITE_SCHEMA*" $msg] + } { + # Hit an SQLITE_LOCKED error. Rollback the current transaction. + set rc [catch { execsql_blocking $::DB ROLLBACK } msg] + if {$rc && [string match "SQLITE_LOCKED*" $msg]} { + sqlite3_close $::DB + opendb + } + } elseif {$rc} { + # Hit some other kind of error. This is a malfunction. + error $msg + } else { + # No error occured. Check that any SELECT statements in the transaction + # returned "1". Otherwise, the invariant was false, indicating that + # some malfunction has occured. + foreach r $msg { if {$r != 1} { puts "Invariant check failed: $msg" } } + } + } + + # Close the database connection and return 0. + # + sqlite3_close $::DB + expr 0 +} + +foreach {iTest xStep xPrepare} { + 1 sqlite3_blocking_step sqlite3_blocking_prepare_v2 + 2 sqlite3_step sqlite3_nonblocking_prepare_v2 +} { + file delete -force test.db test2.db test3.db + + set ThreadSetup "set xStep $xStep;set xPrepare $xPrepare;set nSecond $nSecond" + + # Set up the database schema used by this test. Each thread opens file + # test.db as the main database, then attaches files test2.db and test3.db + # as auxillary databases. Each file contains a single table (t1, t2 and t3, in + # files test.db, test2.db and test3.db, respectively). + # + do_test notify2-$iTest.1.1 { + sqlite3 db test.db + execsql { + ATTACH 'test2.db' AS aux2; + ATTACH 'test3.db' AS aux3; + CREATE TABLE main.t1(a INTEGER PRIMARY KEY, b); + CREATE TABLE aux2.t2(a INTEGER PRIMARY KEY, b); + CREATE TABLE aux3.t3(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 SELECT NULL, 0; + INSERT INTO t2 SELECT NULL, 0; + INSERT INTO t3 SELECT NULL, 0; + } + } {} + do_test notify2-$iTest.1.2 { + db close + } {} + + + # Launch $nThread threads. Then wait for them to finish. + # + puts "Running $xStep test for $nSecond seconds" + unset -nocomplain finished + for {set ii 0} {$ii < $nThread} {incr ii} { + thread_spawn finished($ii) $ThreadSetup $ThreadProgram + } + for {set ii 0} {$ii < $nThread} {incr ii} { + do_test notify2-$iTest.2.$ii { + if {![info exists finished($ii)]} { vwait finished($ii) } + set finished($ii) + } {0} + } + + # Count the total number of succesful writes. + do_test notify2-$iTest.3.1 { + sqlite3 db test.db + execsql { + ATTACH 'test2.db' AS aux2; + ATTACH 'test3.db' AS aux3; + } + set anWrite($xStep) [execsql { + SELECT (SELECT max(a) FROM t1) + + (SELECT max(a) FROM t2) + + (SELECT max(a) FROM t3) + }] + db close + } {} +} + +# The following tests checks to make sure sqlite3_blocking_step() is +# faster than sqlite3_step(). blocking_step() is always faster on +# multi-core and is usually faster on single-core. But sometimes, by +# chance, step() will be faster on a single core, in which case the +# following test will fail. +# +puts "The following test seeks to demonstrate that the sqlite3_unlock_notify()" +puts "interface helps multi-core systems to run faster. This test sometimes" +puts "fails on single-core machines." +puts [array get anWrite] +do_test notify2-3 { + expr {$anWrite(sqlite3_blocking_step) > $anWrite(sqlite3_step)} +} {1} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/null.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/null.test --- sqlite3-3.4.2/test/null.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/null.test 2009-06-05 18:03:35.000000000 +0100 @@ -150,14 +150,45 @@ } {{} 0 1} # A UNION to two queries should treat NULL values -# as distinct +# as distinct. +# +# (Later:) We also take this opportunity to test the ability +# of an ORDER BY clause to bind to either SELECT of a UNION. +# The left-most SELECT is preferred. In standard SQL, only +# the left SELECT can be used. The ability to match an ORDER +# BY term to the right SELECT is an SQLite extension. # ifcapable compound { -do_test null-6.1 { - execsql { - select b from t1 union select c from t1 order by c; - } -} {{} 0 1} + do_test null-6.1 { + execsql { + select b from t1 union select c from t1 order by b; + } + } {{} 0 1} + do_test null-6.2 { + execsql { + select b from t1 union select c from t1 order by 1; + } + } {{} 0 1} + do_test null-6.3 { + execsql { + select b from t1 union select c from t1 order by t1.b; + } + } {{} 0 1} + do_test null-6.4 { + execsql { + select b from t1 union select c from t1 order by main.t1.b; + } + } {{} 0 1} + do_test null-6.5 { + catchsql { + select b from t1 union select c from t1 order by t1.a; + } + } {1 {1st ORDER BY term does not match any column in the result set}} + do_test null-6.6 { + catchsql { + select b from t1 union select c from t1 order by main.t1.a; + } + } {1 {1st ORDER BY term does not match any column in the result set}} } ;# ifcapable compound # The UNIQUE constraint only applies to non-null values diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/openv2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/openv2.test --- sqlite3-3.4.2/test/openv2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/openv2.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,55 @@ +# 2007 Sep 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests on the sqlite3_open_v2() interface. +# +# $Id: openv2.test,v 1.2 2009/06/11 17:32:45 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +db close +file delete -force test.db test.db-journal +do_test openv2-1.1 { + set rc [catch {sqlite3 db test.db -create 0} msg] + lappend rc $msg +} {1 {unable to open database file}} +do_test openv2-1.2 { + info commands db +} {} +do_test openv2-1.3 { + sqlite3 db test.db + db eval {CREATE TABLE t1(x)} + db close + sqlite3 db test.db -readonly 1 + db eval {SELECT name FROM sqlite_master} +} {t1} +do_test openv2-1.4 { + catchsql { + INSERT INTO t1 VALUES(123) + } +} {1 {attempt to write a readonly database}} + +# Ticket #3908 +# Honor SQLITE_OPEN_READONLY even on an in-memory database, even though +# this is pointless. +# +do_test openv2-2.1 { + db close + sqlite3 db :memory: -readonly 1 + db eval {SELECT * FROM sqlite_master} +} {} +do_test openv2-2.2 { + catchsql {CREATE TABLE t1(x)} +} {1 {attempt to write a readonly database}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pager2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pager2.test --- sqlite3-3.4.2/test/pager2.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/pager2.test 2009-06-12 03:37:54.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is page cache subsystem. # -# $Id: pager2.test,v 1.6 2007/03/23 18:12:07 danielk1977 Exp $ +# $Id: pager2.test,v 1.9 2008/12/30 17:55:00 drh Exp $ set testdir [file dirname $argv0] @@ -33,13 +33,13 @@ } {0} do_test pager2-1.1 { pager_stats $::p1 -} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +} {ref 0 page 0 max 10 size 0 state 4 err 0 hit 0 miss 0 ovfl 0} do_test pager2-1.2 { pager_pagecount $::p1 } {0} do_test pager2-1.3 { pager_stats $::p1 -} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +} {ref 0 page 0 max 10 size 0 state 4 err 0 hit 0 miss 0 ovfl 0} do_test pager2-1.4 { pager_close $::p1 } {} @@ -62,7 +62,7 @@ } {} do_test pager2-2.3.2 { pager_stats $::p1 -} {ref 0 page 0 max 10 size 0 state 0 err 0 hit 0 miss 0 ovfl 0} +} {ref 0 page 0 max 10 size 0 state 4 err 0 hit 0 miss 0 ovfl 0} do_test pager2-2.3.3 { set v [catch { set ::g1 [page_get $::p1 1] @@ -72,30 +72,30 @@ } {0} do_test pager2-2.3.3 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.3.4 { set ::gx [page_lookup $::p1 1] + page_unref $::gx expr {$::gx!=""} } {1} do_test pager2-2.3.5 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.3.6 { expr {$::g1==$::gx} } {1} do_test pager2-2.3.7 { - page_unref $::gx pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.4 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.5 { pager_pagecount $::p1 } {0} do_test pager2-2.6 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.7 { page_number $::g1 } {1} @@ -107,7 +107,7 @@ } {} do_test pager2-2.10 { pager_stats $::p1 -} {ref 0 page 1 max 10 size 0 state 0 err 0 hit 0 miss 1 ovfl 0} +} {ref 0 page 1 max 10 size 0 state 4 err 0 hit 0 miss 1 ovfl 0} do_test pager2-2.11 { set ::g1 [page_get $::p1 1] expr {$::g1!=0} @@ -117,7 +117,7 @@ } {1} do_test pager2-2.13 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 0 state 1 err 0 hit 1 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 0 state 4 err 0 hit 1 miss 1 ovfl 0} do_test pager2-2.14 { set v [catch { page_write $::g1 "Page-One" @@ -138,19 +138,19 @@ } {0 {}} do_test pager2-2.20 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 1 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 1 state 4 err 0 hit 1 miss 1 ovfl 0} do_test pager2-2.19 { pager_pagecount $::p1 } {1} do_test pager2-2.21 { pager_stats $::p1 -} {ref 1 page 1 max 10 size 1 state 1 err 0 hit 1 miss 1 ovfl 0} +} {ref 1 page 1 max 10 size 1 state 4 err 0 hit 1 miss 1 ovfl 0} do_test pager2-2.22 { page_unref $::g1 } {} do_test pager2-2.23 { pager_stats $::p1 -} {ref 0 page 1 max 10 size 1 state 0 err 0 hit 1 miss 1 ovfl 0} +} {ref 0 page 1 max 10 size 1 state 4 err 0 hit 1 miss 1 ovfl 0} do_test pager2-2.24 { set v [catch { page_get $::p1 1 @@ -181,9 +181,9 @@ set ::g1 [page_get $::p1 1] page_read $::g1 } {Page-One} -#do_test pager2-2.99 { -# pager_close $::p1 -#} {} +do_test pager2-2.99 { + page_unref $::g1 +} {} #do_test pager2-3.1 { # set v [catch { @@ -212,16 +212,20 @@ page_unref $gx } pager_commit $::p1 + page_unref $::g(1) } {} for {set i 2} {$i<=20} {incr i} { + set page1 [page_get $::p1 1] do_test pager2-3.6.[expr {$i-1}] [subst { set gx \[page_get $::p1 $i\] set v \[page_read \$gx\] page_unref \$gx set v }] "Page-$i" + page_unref $page1 } for {set i 1} {$i<=20} {incr i} { + set page1 [page_get $::p1 1] regsub -all CNT { set ::g1 [page_get $::p1 CNT] set ::g2 [page_get $::p1 CNT] @@ -243,6 +247,7 @@ expr {$vy==$::vx} } $i body; do_test pager2-3.7.$i.3 $body {1} + page_unref $page1 } do_test pager2-3.99 { pager_close $::p1 @@ -280,7 +285,7 @@ } {ref 1} do_test pager2-4.4 { lrange [pager_stats $::p1] 8 9 -} {state 1} +} {state 4} for {set i 1} {$i<20} {incr i} { do_test pager2-4.5.$i.0 { @@ -394,10 +399,11 @@ do_test pager2-4.5.$i.10 { pager_commit $p1 lrange [pager_stats $p1] 8 9 - } {state 1} + } {state 4} } do_test pager2-4.99 { + page_unref $::g1 pager_close $::p1 } {} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pageropt.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pageropt.test --- sqlite3-3.4.2/test/pageropt.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/pageropt.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,12 +12,12 @@ # The focus of the tests in this file are to verify that the # pager optimizations implemented in version 3.3.14 work. # -# $Id: pageropt.test,v 1.3 2007/08/12 20:07:59 drh Exp $ +# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -ifcapable {!pager_pragmas} { +ifcapable {!pager_pragmas||secure_delete} { finish_test return } @@ -39,12 +39,10 @@ set sqlite3_pager_readdb_count 0 set sqlite3_pager_writedb_count 0 set sqlite3_pager_writej_count 0 - set sqlite3_pager_pgfree_count 0 set r [$db eval $sql] set cnt [list $sqlite3_pager_readdb_count \ $sqlite3_pager_writedb_count \ - $sqlite3_pager_writej_count \ - $sqlite3_pager_pgfree_count] + $sqlite3_pager_writej_count ] return [concat $cnt $r] } @@ -59,12 +57,12 @@ pagercount_sql { CREATE TABLE t1(x); } -} {0 2 0 0} +} {0 2 0} do_test pageropt-1.2 { pagercount_sql { INSERT INTO t1 VALUES(randomblob(5000)); } -} {0 6 2 0} +} {0 6 2} # Verify that values remain in cache on for subsequent reads. # We should not have to go back to disk. @@ -73,7 +71,7 @@ pagercount_sql { SELECT length(x) FROM t1 } -} {0 0 0 0 5000} +} {0 0 0 5000} # If another thread reads the database, the original cache # remains valid. @@ -84,7 +82,7 @@ pagercount_sql { SELECT hex(x) FROM t1 } -} [list 0 0 0 0 $blobcontent] +} [list 0 0 0 $blobcontent] # But if the other thread modifies the database, then the cache # must refill. @@ -94,12 +92,12 @@ pagercount_sql { SELECT hex(x) FROM t1 } -} [list 6 0 0 6 $blobcontent] +} [list 6 0 0 $blobcontent] do_test pageropt-1.6 { pagercount_sql { SELECT hex(x) FROM t1 } -} [list 0 0 0 0 $blobcontent] +} [list 0 0 0 $blobcontent] # Verify that the last page of an overflow chain is not read from # disk when deleting a row. The one row of t1(x) has four pages @@ -117,7 +115,7 @@ pagercount_sql { DELETE FROM t1 WHERE rowid=1 } -} {5 3 3 0} +} {5 3 3} # When pulling pages off of the freelist, there is no reason # to actually bring in the old content. @@ -128,12 +126,12 @@ pagercount_sql { INSERT INTO t1 VALUES(randomblob(1500)); } -} {3 4 3 0} +} {3 4 3} do_test pageropt-2.3 { pagercount_sql { INSERT INTO t1 VALUES(randomblob(1500)); } -} {0 4 3 0} +} {0 4 3} # Note the new optimization that when pulling the very last page off of the # freelist we do not read the content of that page. @@ -142,7 +140,7 @@ pagercount_sql { INSERT INTO t1 VALUES(randomblob(1500)); } -} {0 5 3 0} +} {0 5 3} # Appending a large quantity of data does not involve writing much # to the journal file. @@ -151,7 +149,7 @@ pagercount_sql { INSERT INTO t2 SELECT * FROM t1; } -} {1 7 2 0} +} {1 7 2} # Once again, we do not need to read the last page of an overflow chain # while deleting. @@ -160,12 +158,12 @@ pagercount_sql { DROP TABLE t2; } -} {0 2 3 0} +} {0 2 3} do_test pageropt-3.3 { pagercount_sql { DELETE FROM t1; } -} {0 3 3 0} +} {0 3 3} # There are now 11 pages on the freelist. Move them all into an # overflow chain by inserting a single large record. Starting from @@ -180,7 +178,7 @@ pagercount_sql { INSERT INTO t1 VALUES(randomblob(11300)) } -} {3 13 3 0} +} {3 13 3} # Now we delete that big entries starting from a cold cache and an # empty freelist. The first 10 of the 11 pages overflow chain have @@ -188,13 +186,13 @@ # reads total. But only page1, the t1 root, and the trunk of the # freelist need to be journalled and written back. # -do_test pageroot-4.2 { +do_test pageropt-4.2 { db close sqlite3 db test.db pagercount_sql { DELETE FROM t1 } -} {12 3 3 0} +} {12 3 3} sqlite3_soft_heap_limit $soft_limit catch {db2 close} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pager.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pager.test --- sqlite3-3.4.2/test/pager.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/pager.test 2009-06-25 12:35:52.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is page cache subsystem. # -# $Id: pager.test,v 1.28 2007/04/05 17:15:53 danielk1977 Exp $ +# $Id: pager.test,v 1.35 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] @@ -76,13 +76,13 @@ expr {$::gx!=""} } {1} do_test pager-2.3.5 { + page_unref $::gx pager_stats $::p1 } {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} do_test pager-2.3.6 { expr {$::g1==$::gx} } {1} do_test pager-2.3.7 { - page_unref $::gx pager_stats $::p1 } {ref 1 page 1 max 10 size 0 state 1 err 0 hit 0 miss 1 ovfl 0} do_test pager-2.4 { @@ -184,6 +184,7 @@ page_read $::g1 } {Page-One} do_test pager-2.99 { + page_unref $::g1 pager_close $::p1 } {} @@ -214,6 +215,7 @@ page_unref $gx } pager_commit $::p1 + page_unref $::g(1) } {} for {set i 2} {$i<=20} {incr i} { do_test pager-3.6.[expr {$i-1}] [subst { @@ -410,16 +412,18 @@ ifcapable memorydb { do_test pager-4.6.2 { set ::p2 [pager_open :memory: 10] - pager_truncate $::p2 5 + pager_truncate $::p2 0 } {} do_test pager-4.6.3 { + set page1 [page_get $::p2 1] for {set i 1} {$i<5} {incr i} { set p [page_get $::p2 $i] page_write $p "Page $i" pager_commit $::p2 page_unref $p } - # pager_truncate $::p2 3 + page_unref $page1 + pager_truncate $::p2 3 } {} do_test pager-4.6.4 { pager_close $::p2 @@ -427,6 +431,7 @@ } do_test pager-4.99 { + page_unref $::g1 pager_close $::p1 } {} @@ -454,8 +459,14 @@ # The following tests cover rolling back hot journal files. # They can't be run on windows because the windows version of # SQLite holds a mandatory exclusive lock on journal files it has open. +# +# They cannot be run during the journaltest permutation because +# "PRAGMA synchronous = 0" is used. # -if {$tcl_platform(platform)!="windows"} { +if {$tcl_platform(platform)!="windows" && ( + 0 == [info exists ::permutations_test_prefix] + || $::permutations_test_prefix ne "journaltest" +)} { do_test pager-6.1 { file delete -force test2.db file delete -force test2.db-journal @@ -569,6 +580,3 @@ } {} } finish_test - - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pagesize.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pagesize.test --- sqlite3-3.4.2/test/pagesize.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/pagesize.test 2009-06-05 18:03:40.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. # This file implements tests for the page_size PRAGMA. # -# $Id: pagesize.test,v 1.12 2007/04/06 21:42:22 drh Exp $ +# $Id: pagesize.test,v 1.13 2008/08/26 21:07:27 drh Exp $ set testdir [file dirname $argv0] @@ -79,12 +79,23 @@ if {[info exists SQLITE_MAX_PAGE_SIZE] && $SQLITE_MAX_PAGE_SIZE<$PGSZ} continue ifcapable memorydb { - do_test pagesize-2.$PGSZ.0 { + do_test pagesize-2.$PGSZ.0.1 { db close sqlite3 db :memory: execsql "PRAGMA page_size=$PGSZ;" execsql {PRAGMA page_size} - } 1024 + } $PGSZ + do_test pagesize-2.$PGSZ.0.2 { + execsql {CREATE TABLE t1(x UNIQUE, y UNIQUE, z UNIQUE)} + execsql {PRAGMA page_size} + } $PGSZ + do_test pagesize-2.$PGSZ.0.3 { + execsql { + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,3,4); + SELECT * FROM t1; + } + } {1 2 3 2 3 4} } do_test pagesize-2.$PGSZ.1 { db close diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pcache2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pcache2.test --- sqlite3-3.4.2/test/pcache2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/pcache2.test 2009-06-12 03:37:54.000000000 +0100 @@ -0,0 +1,79 @@ +# 2008 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is focused on testing the pcache module. +# +# $Id: pcache2.test,v 1.3 2008/11/13 16:21:50 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Set up a pcache memory pool so that we can easily track how many +# pages are being used for cache. +# +do_test pcache2-1.1 { + db close + sqlite3_reset_auto_extension + sqlite3_shutdown + sqlite3_config_pagecache 6000 100 + sqlite3_initialize + autoinstall_test_functions + sqlite3_status SQLITE_STATUS_PAGECACHE_USED 1 + sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0 +} {0 0 0} + +# Open up two database connections to separate files. +# +do_test pcache2-1.2 { + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval {PRAGMA cache_size=10} + lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 1 +} {2} +do_test pcache2-1.3 { + file delete -force test2.db test2.db-journal + sqlite3 db2 test2.db + db2 eval {PRAGMA cache_size=50} + lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 1 +} {4} + +# Make lots of changes on the first connection. Verify that the +# page cache usage does not grow to consume the page space set aside +# for the second connection. +# +do_test pcache2-1.4 { + db eval { + CREATE TABLE t1(a,b); + CREATE TABLE t2(x,y); + INSERT INTO t1 VALUES(1, zeroblob(800)); + INSERT INTO t1 VALUES(2, zeroblob(800)); + INSERT INTO t2 SELECT * FROM t1; + INSERT INTO t1 SELECT x+2, y FROM t2; + INSERT INTO t2 SELECT a+10, b FROM t1; + INSERT INTO t1 SELECT x+10, y FROM t2; + INSERT INTO t2 SELECT a+100, b FROM t1; + INSERT INTO t1 SELECT x+100, y FROM t2; + INSERT INTO t2 SELECT a+1000, b FROM t1; + INSERT INTO t1 SELECT x+1000, y FROM t2; + } + sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0 +} {0 13 13} + +db close +catch {db2 close} +sqlite3_reset_auto_extension +sqlite3_shutdown +sqlite3_config_pagecache 0 0 +sqlite3_initialize +autoinstall_test_functions + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pcache.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pcache.test --- sqlite3-3.4.2/test/pcache.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/pcache.test 2009-06-25 12:31:30.000000000 +0100 @@ -0,0 +1,169 @@ +# 2008 August 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is focused on testing the pcache module. +# +# $Id: pcache.test,v 1.5 2009/05/08 06:52:48 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# The pcache module limits the number of pages available to purgeable +# caches to the sum of the 'cache_size' values for the set of open +# caches. This block of tests, pcache-1.*, test that the library behaves +# corrctly when it is forced to exceed this limit. +# +do_test pcache-1.1 { + db close + pcache_stats +} {current 0 max 0 min 0 recyclable 0} + +do_test pcache-1.2 { + sqlite3 db test.db + execsql { + PRAGMA cache_size=12; + PRAGMA auto_vacuum=0; + } + pcache_stats +} {current 1 max 12 min 10 recyclable 1} + +do_test pcache-1.3 { + execsql { + BEGIN; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + CREATE TABLE t3(a, b, c); + CREATE TABLE t4(a, b, c); + CREATE TABLE t5(a, b, c); + } + pcache_stats +} {current 6 max 12 min 10 recyclable 0} + +do_test pcache-1.4 { + execsql { + CREATE TABLE t6(a, b, c); + CREATE TABLE t7(a, b, c); + CREATE TABLE t8(a, b, c); + CREATE TABLE t9(a, b, c); + } + pcache_stats +} {current 10 max 12 min 10 recyclable 0} + +do_test pcache-1.5 { + sqlite3 db2 test.db + execsql "PRAGMA cache_size=10" db2 + pcache_stats +} {current 11 max 22 min 20 recyclable 1} + +do_test pcache-1.6.1 { + execsql { + BEGIN; + SELECT * FROM sqlite_master; + } db2 + pcache_stats +} {current 11 max 22 min 20 recyclable 0} + +# At this point connection db2 has a read lock on the database file and a +# single pinned page in its cache. Connection [db] is holding 10 dirty +# pages. It cannot recycle them because of the read lock held by db2. +# +do_test pcache-1.6.2 { + execsql { + CREATE INDEX i1 ON t1(a, b); + CREATE INDEX i2 ON t2(a, b); + CREATE INDEX i3 ON t3(a, b); + CREATE INDEX i4 ON t4(a, b); + CREATE INDEX i5 ON t5(a, b); + CREATE INDEX i6 ON t6(a, b); + CREATE INDEX i7 ON t7(a, b); + CREATE INDEX i8 ON t8(a, b); + CREATE INDEX i9 ON t9(a, b); + CREATE INDEX i10 ON t9(a, b); + CREATE INDEX i11 ON t9(a, b); + } + pcache_stats +} {current 23 max 22 min 20 recyclable 0} + +do_test pcache-1.7 { + execsql { + CREATE TABLE t10(a, b, c); + } + pcache_stats +} {current 24 max 22 min 20 recyclable 0} + +# Rolling back the transaction held by db2 at this point releases a pinned +# page. Because the number of allocated pages is greater than the +# configured maximum, this page should be freed immediately instead of +# recycled. +# +do_test pcache-1.8 { + execsql {ROLLBACK} db2 + pcache_stats +} {current 23 max 22 min 20 recyclable 0} + +do_test pcache-1.9 { + execsql COMMIT + pcache_stats +} {current 22 max 22 min 20 recyclable 22} + +do_test pcache-1.10 { + db2 close + pcache_stats +} {current 12 max 12 min 10 recyclable 12} + +do_test pcache-1.11 { + execsql { PRAGMA cache_size = 20 } + pcache_stats +} {current 12 max 20 min 10 recyclable 12} + +do_test pcache-1.12 { + execsql { + SELECT * FROM t1 ORDER BY a; SELECT * FROM t1; + SELECT * FROM t2 ORDER BY a; SELECT * FROM t2; + SELECT * FROM t3 ORDER BY a; SELECT * FROM t3; + SELECT * FROM t4 ORDER BY a; SELECT * FROM t4; + SELECT * FROM t5 ORDER BY a; SELECT * FROM t5; + SELECT * FROM t6 ORDER BY a; SELECT * FROM t6; + SELECT * FROM t7 ORDER BY a; SELECT * FROM t7; + SELECT * FROM t8 ORDER BY a; SELECT * FROM t8; + SELECT * FROM t9 ORDER BY a; SELECT * FROM t9; + } + pcache_stats +} {current 19 max 20 min 10 recyclable 19} + +do_test pcache-1.13 { + execsql { PRAGMA cache_size = 15 } + pcache_stats +} {current 15 max 15 min 10 recyclable 15} + +do_test pcache-1.14 { + hexio_write test.db 24 [hexio_render_int32 1000] + execsql { SELECT * FROM sqlite_master } + pcache_stats +} {current 2 max 15 min 10 recyclable 2} + +do_test pcache-1.15 { + execsql { + SELECT * FROM t1 ORDER BY a; SELECT * FROM t1; + SELECT * FROM t2 ORDER BY a; SELECT * FROM t2; + SELECT * FROM t3 ORDER BY a; SELECT * FROM t3; + SELECT * FROM t4 ORDER BY a; SELECT * FROM t4; + SELECT * FROM t5 ORDER BY a; SELECT * FROM t5; + SELECT * FROM t6 ORDER BY a; SELECT * FROM t6; + SELECT * FROM t7 ORDER BY a; SELECT * FROM t7; + SELECT * FROM t8 ORDER BY a; SELECT * FROM t8; + SELECT * FROM t9 ORDER BY a; SELECT * FROM t9; + } + pcache_stats +} {current 14 max 15 min 10 recyclable 14} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/permutations.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/permutations.test --- sqlite3-3.4.2/test/permutations.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/permutations.test 2009-06-25 12:34:54.000000000 +0100 @@ -0,0 +1,753 @@ +# 2008 June 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: permutations.test,v 1.50 2009/05/13 14:46:10 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Argument processing. +# +#puts "PERM-DEBUG: argv=$argv" +namespace eval ::perm { + variable testmode [lindex $::argv 0] + variable testfile [lindex $::argv 1] +} +set argv [lrange $argv 2 end] +#puts "PERM-DEBUG: testmode=$::perm::testmode tstfile=$::perm::testfile" + +set ::permutations_presql "" +set ::permutations_test_prefix "" + +if {$::perm::testmode eq "veryquick"} { + set ::perm::testmode [list persistent_journal no_journal] + set ISQUICK 1 +} +if {$::perm::testmode eq "quick"} { + set ::perm::testmode [list persistent_journal no_journal autovacuum_ioerr] + set ISQUICK 1 +} +if {$::perm::testmode eq "all" || $::perm::testmode eq ""} { + set ::perm::testmode { + memsubsys1 memsubsys2 singlethread multithread onefile utf16 exclusive + persistent_journal persistent_journal_error no_journal no_journal_error + autovacuum_ioerr no_mutex_try fullmutex journaltest inmemory_journal + pcache0 pcache10 pcache50 pcache90 pcache100 + } +} +if {$::perm::testmode eq "targets"} { + puts "" + puts -nonewline "veryquick " + puts "Same as persistent_journal and no_journal" + puts -nonewline "quick " + puts "Same as persistent_journal, no_journal and autovacuum_ioerr" + puts -nonewline "all " + puts "Everything except autovacuum_crash" +} +#puts "PERM-DEBUG: testmode=$::perm::testmode" + +set EXCLUDE { + all.test in2.test onefile.test + async2.test incrvacuum_ioerr.test permutations.test + async.test jrnlmode2.test quick.test + autovacuum_crash.test jrnlmode3.test shared_err.test + autovacuum_ioerr.test jrnlmode4.test soak.test + btree8.test loadext.test speed1p.test + corrupt.test malloc2.test speed1.test + crash2.test malloc3.test speed2.test + crash3.test malloc4.test speed3.test + crash4.test mallocAll.test speed4p.test + crash6.test malloc.test speed4.test + crash7.test memleak.test sqllimits1.test + crash.test memsubsys1.test thread001.test + exclusive3.test memsubsys2.test thread002.test + fts3.test misc7.test utf16.test + fuzz_malloc.test misuse.test veryquick.test + fuzz.test mutex2.test vtab_err.test + lookaside.test fuzz3.test savepoint4.test + savepoint6.test +} +set ALLTESTS [list] +foreach filename [glob $testdir/*.test] { + set filename [file tail $filename] + if {[lsearch $EXCLUDE $filename] < 0} { lappend ALLTESTS $filename } +} +set ALLTESTS [lsort $ALLTESTS] + +rename finish_test really_finish_test2 +proc finish_test {} {} + +rename do_test really_do_test + +proc do_test {name args} { + eval really_do_test [list "perm-$::permutations_test_prefix.$name"] $args +} + +# Overload the [sqlite3] command +rename sqlite3 really_sqlite3 +proc sqlite3 {args} { + set r [eval really_sqlite3 $args] + if { [llength $args] == 2 && $::permutations_presql ne "" } { + [lindex $args 0] eval $::permutations_presql + } + set r +} + +# run_tests OPTIONS +# +# where available options are: +# +# -initialize SCRIPT (default "") +# -shutdown SCRIPT (default "") +# -include LIST-OF-FILES (default $::ALLTESTS) +# -exclude LIST-OF-FILES (default "") +# -presql SQL (default "") +# -description TITLE (default "") +# +proc run_tests {name args} { + set ::permutations_test_prefix $name + set options(-shutdown) "" + set options(-initialize) "" + set options(-exclude) "" + set options(-include) $::ALLTESTS + set options(-presql) "" + set options(-description) "no description supplied (fixme)" + array set options $args + #puts "PERM-DEBUG: name=$name testfile=$::perm::testfile" + #puts "PERM-DEBUG: [array get options]" + + if {$::perm::testmode eq "targets"} { + puts [format "% -20s %s" $name [string trim $options(-description)]] + return + } + if {$::perm::testmode ne "" && [lsearch $::perm::testmode $name]<0} { + puts "skipping permutation test $name..." + return + } + + uplevel $options(-initialize) + set ::permutations_presql $options(-presql) + + foreach file [lsort $options(-include)] { + if {[lsearch $options(-exclude) $file] < 0 && + ( $::perm::testfile eq "" || + $::perm::testfile eq $file || + "$::perm::testfile.test" eq $file ) + } { + set ::perm::shared_cache_setting [shared_cache_setting] + uplevel source $::testdir/$file + if {$::perm::shared_cache_setting ne [shared_cache_setting]} { + error "File $::testdir/$file changed the shared cache setting from $::perm::shared_cache_setting to [shared_cache_setting]" + } + } else { + # puts "skipping file $file" + } + } + + uplevel $options(-shutdown) + set ::permutations_test_prefix "" +} + +proc shared_cache_setting {} { + set ret 0 + catch { + set ret [sqlite3_enable_shared_cache] + } + return $ret +} + +############################################################################# +# Start of tests + +# Run some tests using pre-allocated page and scratch blocks. +# +run_tests "memsubsys1" -description { + Tests using pre-allocated page and scratch blocks +} -exclude { + ioerr5.test + malloc5.test +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_pagecache 4096 24 + sqlite3_config_scratch 25000 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_pagecache 0 0 + sqlite3_config_scratch 0 0 + sqlite3_initialize + autoinstall_test_functions +} + +# Run some tests using pre-allocated page and scratch blocks. This time +# the allocations are too small to use in most cases. +# +# Both ioerr5.test and malloc5.test are excluded because they test the +# sqlite3_soft_heap_limit() and sqlite3_release_memory() functionality. +# This functionality is disabled if a pre-allocated page block is provided. +# +run_tests "memsubsys2" -description { + Tests using small pre-allocated page and scratch blocks +} -exclude { + ioerr5.test + malloc5.test +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_pagecache 512 5 + sqlite3_config_scratch 1000 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_pagecache 0 0 + sqlite3_config_scratch 0 0 + sqlite3_initialize + autoinstall_test_functions +} + +# Run all tests with the lookaside allocator disabled. +# +run_tests "nolookaside" -description { + OOM tests with lookaside disabled +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_lookaside 0 0 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_lookaside 100 500 + sqlite3_initialize + autoinstall_test_functions +} + +# Run some tests in SQLITE_CONFIG_SINGLETHREAD mode. +# +run_tests "singlethread" -description { + Tests run in SQLITE_CONFIG_SINGLETHREAD mode +} -initialize { + catch {db close} + sqlite3_shutdown + catch {sqlite3_config singlethread} + sqlite3_initialize + autoinstall_test_functions +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test types.test + types2.test types3.test +} -shutdown { + catch {db close} + sqlite3_shutdown + catch {sqlite3_config serialized} + sqlite3_initialize + autoinstall_test_functions +} + +run_tests "nomutex" -description { + Tests run with the SQLITE_OPEN_MULTITHREADED flag passed to sqlite3_open(). +} -initialize { + rename sqlite3 sqlite3_nomutex + proc sqlite3 {args} { + if {[string range [lindex $args 0] 0 0] ne "-"} { + lappend args -fullmutex 0 -nomutex 1 + } + uplevel [concat sqlite3_nomutex $args] + } +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test types.test + types2.test types3.test +} -shutdown { + rename sqlite3 {} + rename sqlite3_nomutex sqlite3 +} + +# Run some tests in SQLITE_CONFIG_MULTITHREAD mode. +# +run_tests "multithread" -description { + Tests run in SQLITE_CONFIG_MULTITHREAD mode +} -initialize { + catch {db close} + sqlite3_shutdown + catch {sqlite3_config multithread} + sqlite3_initialize + autoinstall_test_functions +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test types.test + types2.test types3.test +} -shutdown { + catch {db close} + sqlite3_shutdown + catch {sqlite3_config serialized} + sqlite3_initialize + autoinstall_test_functions +} + +# Run some tests in SQLITE_OPEN_FULLMUTEX mode. +# +run_tests "fullmutex" -description { + Tests run in SQLITE_OPEN_FULLMUTEX mode +} -initialize { + rename sqlite3 sqlite3_fullmutex + proc sqlite3 {args} { + if {[string range [lindex $args 0] 0 0] ne "-"} { + lappend args -nomutex 0 -fullmutex 1 + } + uplevel [concat sqlite3_fullmutex $args] + } +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test types.test + types2.test types3.test +} -shutdown { + rename sqlite3 {} + rename sqlite3_fullmutex sqlite3 +} + +# Run some tests using the "onefile" demo. +# +run_tests "onefile" -description { + Run some tests using the "test_onefile.c" demo +} -initialize { + rename sqlite3 sqlite3_onefile + proc sqlite3 {args} { + if {[string range [lindex $args 0] 0 0] ne "-"} { + lappend args -vfs fs + } + uplevel [concat sqlite3_onefile $args] + } +} -include { + conflict.test insert.test insert2.test insert3.test + rollback.test select1.test select2.test select3.test +} -shutdown { + rename sqlite3 {} + rename sqlite3_onefile sqlite3 +} + +# Run some tests using UTF-16 databases. +# +run_tests "utf16" -description { + Run tests using UTF-16 databases +} -presql { + pragma encoding = 'UTF-16' +} -include { + alter.test alter3.test + auth.test bind.test blob.test capi2.test capi3.test collate1.test + collate2.test collate3.test collate4.test collate5.test collate6.test + conflict.test date.test delete.test expr.test fkey1.test func.test + hook.test index.test insert2.test insert.test interrupt.test in.test + intpkey.test ioerr.test join2.test join.test lastinsert.test + laststmtchanges.test limit.test lock2.test lock.test main.test + memdb.test minmax.test misc1.test misc2.test misc3.test notnull.test + null.test progress.test quote.test rowid.test select1.test select2.test + select3.test select4.test select5.test select6.test sort.test + subselect.test tableapi.test table.test temptable.test + trace.test trigger1.test trigger2.test trigger3.test + trigger4.test types2.test types.test unique.test update.test + vacuum.test view.test where.test +} + +# Run some tests in exclusive locking mode. +# +run_tests "exclusive" -description { + Run tests in exclusive locking mode. +} -presql { + pragma locking_mode = 'exclusive' +} -include { + rollback.test select1.test select2.test + malloc.test ioerr.test +} + +# Run some tests in exclusive locking mode with truncated journals. +# +run_tests "exclusive-truncate" -description { + Run tests in exclusive locking mode and truncate journal mode. +} -presql { + pragma locking_mode = 'exclusive'; + pragma journal_mode = TRUNCATE; +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test update.test malloc.test ioerr.test +} + +# Run some tests in persistent journal mode. +# +run_tests "persistent_journal" -description { + Run tests in persistent-journal mode. +} -presql { + pragma journal_mode = persist +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test +} + +# Run some tests in truncating journal mode. +# +run_tests "truncate_journal" -description { + Run tests in persistent-journal mode. +} -presql { + pragma journal_mode = truncate +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test + malloc.test ioerr.test +} + +# Run some error tests in persistent journal mode. +# +run_tests "persistent_journal_error" -description { + Run malloc.test and ioerr.test in persistent-journal mode. +} -presql { + pragma journal_mode = persist +} -include { + malloc.test ioerr.test +} + +# Run some tests in no journal mode. +# +run_tests "no_journal" -description { + Run tests in no-journal mode. +} -presql { + pragma journal_mode = persist +} -include { + delete.test delete2.test insert.test rollback.test select1.test + select2.test trans.test update.test vacuum.test +} + +# Run some error tests in no journal mode. +# +run_tests "no_journal_error" -description { + Run malloc.test and ioerr.test in no-journal mode. +} -presql { + pragma journal_mode = persist +} -include { + malloc.test ioerr.test +} + +# Run some crash-tests in autovacuum mode. +# +run_tests "autovacuum_crash" -description { + Run crash.test in autovacuum mode. +} -presql { + pragma auto_vacuum = 1 +} -include crash.test + +# Run some ioerr-tests in autovacuum mode. +# +run_tests "autovacuum_ioerr" -description { + Run ioerr.test in autovacuum mode. +} -presql { + pragma auto_vacuum = 1 +} -include ioerr.test + +# Run tests with an in-memory journal. +# +run_tests "inmemory_journal" -description { + Run tests with an in-memory journal file. +} -presql { + pragma journal_mode = 'memory' +} -exclude { + # Exclude all tests that simulate IO errors. + autovacuum_ioerr2.test incrvacuum_ioerr.test ioerr.test + ioerr.test ioerr2.test ioerr3.test ioerr4.test ioerr5.test + vacuum3.test incrblob_err.test diskfull.test backup_ioerr.test + + # Exclude test scripts that use tcl IO to access journal files or count + # the number of fsync() calls. + pager.test exclusive.test jrnlmode.test sync.test misc1.test + journal1.test conflict.test crash8.test tkt3457.test io.test +} + +ifcapable mem3 { + run_tests "memsys3" -description { + Run tests using the allocator in mem3.c. + } -exclude { + autovacuum.test delete3.test manydb.test + bigrow.test incrblob2.test memdb.test + bitvec.test index2.test memsubsys1.test + capi3c.test ioerr.test memsubsys2.test + capi3.test join3.test pagesize.test + collate5.test limit.test + } -initialize { + catch {db close} + sqlite3_reset_auto_extension + sqlite3_shutdown + sqlite3_config_heap 25000000 0 + sqlite3_config_lookaside 0 0 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_heap 0 0 + sqlite3_config_lookaside 100 500 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } +} + +ifcapable mem5 { + run_tests "memsys5" -description { + Run tests using the allocator in mem5.c. + } -exclude { + autovacuum.test delete3.test manydb.test + bigrow.test incrblob2.test memdb.test + bitvec.test index2.test memsubsys1.test + capi3c.test ioerr.test memsubsys2.test + capi3.test join3.test pagesize.test + collate5.test limit.test zeroblob.test + } -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_heap 25000000 64 + sqlite3_config_lookaside 0 0 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_heap 0 0 + sqlite3_config_lookaside 100 500 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } + + run_tests "memsys5-2" -description { + Run tests using the allocator in mem5.c in a different configuration. + } -include { + select1.test + } -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_heap 40000000 16 + sqlite3_config_lookaside 0 0 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_heap 0 0 + sqlite3_config_lookaside 100 500 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + } +} + +ifcapable threadsafe { + run_tests "no_mutex_try" -description { + The sqlite3_mutex_try() interface always fails + } -exclude [concat $EXCLUDE mutex1.test mutex2.test] \ + -initialize { + catch {db close} + sqlite3_shutdown + install_mutex_counters 1 + set ::disable_mutex_try 1 + sqlite3_initialize + autoinstall_test_functions + } -shutdown { + catch {db close} + sqlite3_shutdown + install_mutex_counters 0 + sqlite3_initialize + autoinstall_test_functions + } +} + +# run_tests "crash_safe_append" -description { +# Run crash.test with persistent journals on a SAFE_APPEND file-system. +# } -initialize { +# rename crashsql sa_crashsql +# proc crashsql {args} { +# set options [lrange $args 0 [expr {[llength $args]-2}]] +# lappend options -char safe_append +# set sql [lindex $args end] +# lappend options " +# PRAGMA journal_mode=persistent; +# $sql +# " +# set fd [open test.db-journal w] +# puts $fd [string repeat 1234567890 100000] +# close $fd +# eval sa_crashsql $options +# } +# } -shutdown { +# rename crashsql {} +# rename sa_crashsql crashsql +# } -include crash.test + +run_tests "safe_append" -description { + Run some tests on a SAFE_APPEND file-system. +} -initialize { + rename sqlite3 sqlite3_safeappend + proc sqlite3 {args} { + if {[string range [lindex $args 0] 0 0] ne "-"} { + lappend args -vfs devsym + } + uplevel [concat sqlite3_safeappend $args] + } + sqlite3_simulate_device -char safe_append +} -shutdown { + rename sqlite3 {} + rename sqlite3_shutdown sqlite3 +} -include [lsort [concat shared_err.test $ALLTESTS]] \ + -exclude async3.test + +# The set of tests to run on the alternative-pcache +set perm-alt-pcache-testset { + async.test + attach.test + delete.test delete2.test + index.test + insert.test insert2.test + join.test join2.test + rollback.test + select1.test select2.test + trans.test + update.test +} + +run_tests "pcache0" -description { + Alternative pcache implementation without random discard +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 0 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_config_lookaside 100 500 + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions +} -include ${perm-alt-pcache-testset} + +run_tests "pcache10" -description { + Alternative pcache implementation without 10% random discard +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 50 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_initialize + autoinstall_test_functions +} -include ${perm-alt-pcache-testset} + +run_tests "pcache50" -description { + Alternative pcache implementation without 50% random discard +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 50 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_initialize + autoinstall_test_functions +} -include ${perm-alt-pcache-testset} + +run_tests "pcache90" -description { + Alternative pcache implementation without 90% random discard +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 50 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_initialize + autoinstall_test_functions +} -include ${perm-alt-pcache-testset} + +run_tests "pcache100" -description { + Alternative pcache implementation that always discards when unpinning +} -initialize { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 1 100 1 + sqlite3_initialize + autoinstall_test_functions +} -shutdown { + catch {db close} + sqlite3_shutdown + sqlite3_config_alt_pcache 0 0 0 + sqlite3_initialize + autoinstall_test_functions +} -include ${perm-alt-pcache-testset} + +run_tests "journaltest" -description { + Check that pages are synced before being written (test_journal.c). +} -initialize { + set ISQUICK 1 + catch {db close} + register_jt_vfs -default "" + #sqlite3_instvfs binarylog -default binarylog ostrace.bin +} -shutdown { + #sqlite3_instvfs destroy binarylog + unregister_jt_vfs +} -include [concat $::ALLTESTS savepoint6.test +] -exclude { + incrvacuum.test + ioerr.test + corrupt4.test + io.test + crash8.test + async4.test +} + +# End of tests +############################################################################# + +if {$::perm::testmode eq "targets"} { puts "" ; exit } + +# Restore the [sqlite3] command. +# +rename sqlite3 {} +rename really_sqlite3 sqlite3 + +# Restore the [finish_test] command. +# +rename finish_test "" +rename really_finish_test2 finish_test + +# Restore the [do_test] command. +# +rename do_test "" +rename really_do_test do_test + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pragma2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pragma2.test --- sqlite3-3.4.2/test/pragma2.test 2007-06-27 11:20:01.000000000 +0100 +++ sqlite3-3.6.16/test/pragma2.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests for the PRAGMA command. # -# $Id: pragma2.test,v 1.2 2007/06/27 10:20:01 drh Exp $ +# $Id: pragma2.test,v 1.4 2007/10/09 08:29:33 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -24,7 +24,7 @@ # pragma2-3.*: Test trying to write to the freelist_count is a no-op. # -ifcapable !pragma { +ifcapable !pragma||!schema_pragmas { finish_test return } @@ -64,54 +64,56 @@ file delete -force test2.db file delete -force test2.db-journal -do_test pragma2-2.1 { - execsql { - ATTACH 'test2.db' AS aux; - PRAGMA aux.auto_vacuum=OFF; - PRAGMA aux.freelist_count; - } -} {0} -do_test pragma2-2.2 { - execsql { - CREATE TABLE aux.abc(a, b, c); - PRAGMA aux.freelist_count; - } -} {0} -do_test pragma2-2.3 { - set ::val [string repeat 0123456789 1000] - execsql { - INSERT INTO aux.abc VALUES(1, 2, $::val); - PRAGMA aux.freelist_count; - } -} {0} -do_test pragma2-2.4 { - expr {[file size test2.db] / 1024} -} {11} -do_test pragma2-2.5 { - execsql { - DELETE FROM aux.abc; - PRAGMA aux.freelist_count; - } -} {9} - -do_test pragma2-3.1 { - execsql { - PRAGMA aux.freelist_count; - PRAGMA main.freelist_count; - PRAGMA freelist_count; - } -} {9 1 1} -do_test pragma2-3.2 { - execsql { - PRAGMA freelist_count = 500; - PRAGMA freelist_count; - } -} {1 1} -do_test pragma2-3.3 { - execsql { - PRAGMA aux.freelist_count = 500; - PRAGMA aux.freelist_count; - } -} {9 9} +ifcapable attach { + do_test pragma2-2.1 { + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.auto_vacuum=OFF; + PRAGMA aux.freelist_count; + } + } {0} + do_test pragma2-2.2 { + execsql { + CREATE TABLE aux.abc(a, b, c); + PRAGMA aux.freelist_count; + } + } {0} + do_test pragma2-2.3 { + set ::val [string repeat 0123456789 1000] + execsql { + INSERT INTO aux.abc VALUES(1, 2, $::val); + PRAGMA aux.freelist_count; + } + } {0} + do_test pragma2-2.4 { + expr {[file size test2.db] / 1024} + } {11} + do_test pragma2-2.5 { + execsql { + DELETE FROM aux.abc; + PRAGMA aux.freelist_count; + } + } {9} + + do_test pragma2-3.1 { + execsql { + PRAGMA aux.freelist_count; + PRAGMA main.freelist_count; + PRAGMA freelist_count; + } + } {9 1 1} + do_test pragma2-3.2 { + execsql { + PRAGMA freelist_count = 500; + PRAGMA freelist_count; + } + } {1 1} + do_test pragma2-3.3 { + execsql { + PRAGMA aux.freelist_count = 500; + PRAGMA aux.freelist_count; + } + } {9 9} +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/pragma.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/pragma.test --- sqlite3-3.4.2/test/pragma.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/pragma.test 2009-06-20 00:28:24.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests for the PRAGMA command. # -# $Id: pragma.test,v 1.54 2007/05/17 16:38:30 danielk1977 Exp $ +# $Id: pragma.test,v 1.73 2009/01/12 14:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -31,6 +31,10 @@ # pragma-9.*: Test temp_store and temp_store_directory. # pragma-10.*: Test the count_changes pragma in the presence of triggers. # pragma-11.*: Test the collation_list pragma. +# pragma-14.*: Test the page_count pragma. +# pragma-15.*: Test that the value set using the cache_size pragma is not +# reset when the schema is reloaded. +# pragma-16.*: Test proxy locking # ifcapable !pragma { @@ -85,7 +89,7 @@ } [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 0] do_test pragma-1.5 { execsql { - PRAGMA cache_size=4321; + PRAGMA cache_size=-4321; PRAGMA cache_size; PRAGMA default_cache_size; PRAGMA synchronous; @@ -110,7 +114,7 @@ } [list $DFLT_CACHE_SZ $DFLT_CACHE_SZ 2] do_test pragma-1.8 { execsql { - PRAGMA default_cache_size=123; + PRAGMA default_cache_size=-123; PRAGMA cache_size; PRAGMA default_cache_size; PRAGMA synchronous; @@ -210,33 +214,33 @@ } {} # Test modifying the safety_level of an attached database. -do_test pragma-2.1 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - ATTACH 'test2.db' AS aux; - } -} {} -ifcapable pager_pragmas { -do_test pragma-2.2 { - execsql { - pragma aux.synchronous; - } -} {2} -do_test pragma-2.3 { - execsql { - pragma aux.synchronous = OFF; - pragma aux.synchronous; - pragma synchronous; - } -} {0 2} -do_test pragma-2.4 { - execsql { - pragma aux.synchronous = ON; - pragma synchronous; - pragma aux.synchronous; - } -} {2 1} +ifcapable pager_pragmas&&attach { + do_test pragma-2.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS aux; + } + } {} + do_test pragma-2.2 { + execsql { + pragma aux.synchronous; + } + } {2} + do_test pragma-2.3 { + execsql { + pragma aux.synchronous = OFF; + pragma aux.synchronous; + pragma synchronous; + } + } {0 2} + do_test pragma-2.4 { + execsql { + pragma aux.synchronous = ON; + pragma synchronous; + pragma aux.synchronous; + } + } {2 1} } ;# ifcapable pager_pragmas # Construct a corrupted index and make sure the integrity_check @@ -259,158 +263,165 @@ SELECT rowid, * from t2; } } {1 11 2 3 2 22 3 4} -if {![sqlite3 -has-codec] && $sqlite_options(integrityck)} { - do_test pragma-3.2 { - set rootpage [execsql {SELECT rootpage FROM sqlite_master WHERE name='i2'}] - set db [btree_open test.db 100 0] - btree_begin_transaction $db - set c [btree_cursor $db $rootpage 1] - btree_first $c - btree_delete $c - btree_commit $db - btree_close $db - execsql {PRAGMA integrity_check} - } {{rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.3 { - execsql {PRAGMA integrity_check=1} - } {{rowid 1 missing from index i2}} - do_test pragma-3.4 { - execsql { - ATTACH DATABASE 'test.db' AS t2; - PRAGMA integrity_check - } - } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.5 { - execsql { - PRAGMA integrity_check=3 - } - } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2}} - do_test pragma-3.6 { - execsql { - PRAGMA integrity_check=xyz - } - } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.7 { - execsql { - PRAGMA integrity_check=0 - } - } {{rowid 1 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - - # Add additional corruption by appending unused pages to the end of - # the database file testerr.db - # - do_test pragma-3.8 { - execsql {DETACH t2} - file delete -force testerr.db testerr.db-journal - set out [open testerr.db w] - fconfigure $out -translation binary - set in [open test.db r] - fconfigure $in -translation binary - puts -nonewline $out [read $in] - seek $in 0 - puts -nonewline $out [read $in] - close $in - close $out - execsql {REINDEX t2} - execsql {PRAGMA integrity_check} - } {ok} - do_test pragma-3.9 { - execsql { - ATTACH 'testerr.db' AS t2; - PRAGMA integrity_check - } - } {{*** in database t2 *** +ifcapable attach { + if {![sqlite3 -has-codec] && $sqlite_options(integrityck)} { + do_test pragma-3.2 { + db eval {SELECT rootpage FROM sqlite_master WHERE name='i2'} break + set pgsz [db eval {PRAGMA page_size}] + # overwrite the header on the rootpage of the index in order to + # make the index appear to be empty. + # + set offset [expr {$pgsz*($rootpage-1)}] + hexio_write test.db $offset 0a00000000040000000000 + db close + sqlite3 db test.db + execsql {PRAGMA integrity_check} + } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.3 { + execsql {PRAGMA integrity_check=1} + } {{rowid 1 missing from index i2}} + do_test pragma-3.4 { + execsql { + ATTACH DATABASE 'test.db' AS t2; + PRAGMA integrity_check + } + } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.5 { + execsql { + PRAGMA integrity_check=4 + } + } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2}} + do_test pragma-3.6 { + execsql { + PRAGMA integrity_check=xyz + } + } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.7 { + execsql { + PRAGMA integrity_check=0 + } + } {{rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + + # Add additional corruption by appending unused pages to the end of + # the database file testerr.db + # + do_test pragma-3.8 { + execsql {DETACH t2} + file delete -force testerr.db testerr.db-journal + set out [open testerr.db w] + fconfigure $out -translation binary + set in [open test.db r] + fconfigure $in -translation binary + puts -nonewline $out [read $in] + seek $in 0 + puts -nonewline $out [read $in] + close $in + close $out + execsql {REINDEX t2} + execsql {PRAGMA integrity_check} + } {ok} + do_test pragma-3.8.1 { + execsql {PRAGMA quick_check} + } {ok} + do_test pragma-3.9 { + execsql { + ATTACH 'testerr.db' AS t2; + PRAGMA integrity_check + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.10 { - execsql { - PRAGMA integrity_check=1 - } - } {{*** in database t2 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.10 { + execsql { + PRAGMA integrity_check=1 + } + } {{*** in database t2 *** Page 4 is never used}} - do_test pragma-3.11 { - execsql { - PRAGMA integrity_check=5 - } - } {{*** in database t2 *** + do_test pragma-3.11 { + execsql { + PRAGMA integrity_check=5 + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.12 { - execsql { - PRAGMA integrity_check=4 - } - } {{*** in database t2 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2}} + do_test pragma-3.12 { + execsql { + PRAGMA integrity_check=4 + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used Page 6 is never used} {rowid 1 missing from index i2}} - do_test pragma-3.13 { - execsql { - PRAGMA integrity_check=3 - } - } {{*** in database t2 *** + do_test pragma-3.13 { + execsql { + PRAGMA integrity_check=3 + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used Page 6 is never used}} - do_test pragma-3.14 { - execsql { - PRAGMA integrity_check(2) - } - } {{*** in database t2 *** + do_test pragma-3.14 { + execsql { + PRAGMA integrity_check(2) + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used}} - do_test pragma-3.15 { - execsql { - ATTACH 'testerr.db' AS t3; - PRAGMA integrity_check - } - } {{*** in database t2 *** + do_test pragma-3.15 { + execsql { + ATTACH 'testerr.db' AS t3; + PRAGMA integrity_check + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2}} - do_test pragma-3.16 { - execsql { - PRAGMA integrity_check(9) - } - } {{*** in database t2 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2}} + do_test pragma-3.16 { + execsql { + PRAGMA integrity_check(10) + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** Page 4 is never used Page 5 is never used Page 6 is never used} {rowid 1 missing from index i2}} - do_test pragma-3.17 { - execsql { - PRAGMA integrity_check=7 - } - } {{*** in database t2 *** + do_test pragma-3.17 { + execsql { + PRAGMA integrity_check=8 + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used -Page 6 is never used} {rowid 1 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** +Page 6 is never used} {rowid 1 missing from index i2} {rowid 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 *** Page 4 is never used Page 5 is never used}} - do_test pragma-3.18 { - execsql { - PRAGMA integrity_check=4 - } - } {{*** in database t2 *** + do_test pragma-3.18 { + execsql { + PRAGMA integrity_check=4 + } + } {{*** in database t2 *** Page 4 is never used Page 5 is never used Page 6 is never used} {rowid 1 missing from index i2}} + } + do_test pragma-3.19 { + catch {db close} + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval {PRAGMA integrity_check} + } {ok} } -do_test pragma-3.99 { - catchsql {DETACH t3} - catchsql {DETACH t2} - file delete -force testerr.db testerr.db-journal - catchsql {DROP INDEX i2} -} {0 {}} +#exit # Test modifying the cache_size of an attached database. -ifcapable pager_pragmas { +ifcapable pager_pragmas&&attach { do_test pragma-4.1 { execsql { ATTACH 'test2.db' AS aux; @@ -481,7 +492,7 @@ # Test schema-query pragmas # ifcapable schema_pragmas { -ifcapable tempdb { +ifcapable tempdb&&attach { do_test pragma-6.1 { set res {} execsql {SELECT * FROM sqlite_temp_master} @@ -493,9 +504,15 @@ } do_test pragma-6.2 { execsql { + CREATE TABLE t2(a,b,c); pragma table_info(t2) } } {0 a {} 0 {} 0 1 b {} 0 {} 0 2 c {} 0 {} 0} +do_test pragma-6.2.1 { + execsql { + pragma table_info; + } +} {} db nullvalue <> do_test pragma-6.2.2 { execsql { @@ -511,12 +528,27 @@ } {0 a TEXT 0 CURRENT_TIMESTAMP 0 1 b {} 0 5+3 0 2 c TEXT 0 <> 0 3 d INTEGER 0 NULL 0 4 e TEXT 0 '' 0} db nullvalue {} ifcapable {foreignkey} { - do_test pragma-6.3 { + do_test pragma-6.3.1 { execsql { CREATE TABLE t3(a int references t2(b), b UNIQUE); pragma foreign_key_list(t3); } - } {0 0 t2 a b} + } {0 0 t2 a b RESTRICT RESTRICT NONE} + do_test pragma-6.3.2 { + execsql { + pragma foreign_key_list; + } + } {} + do_test pragma-6.3.3 { + execsql { + pragma foreign_key_list(t3_bogus); + } + } {} + do_test pragma-6.3.4 { + execsql { + pragma foreign_key_list(t5); + } + } {} do_test pragma-6.4 { execsql { pragma index_list(t3); @@ -526,17 +558,68 @@ ifcapable {!foreignkey} { execsql {CREATE TABLE t3(a,b UNIQUE)} } -do_test pragma-6.5 { +do_test pragma-6.5.1 { execsql { CREATE INDEX t3i1 ON t3(a,b); pragma index_info(t3i1); } } {0 0 a 1 1 b} +do_test pragma-6.5.2 { + execsql { + pragma index_info(t3i1_bogus); + } +} {} + +ifcapable tempdb { + # Test for ticket #3320. When a temp table of the same name exists, make + # sure the schema of the main table can still be queried using + # "pragma table_info": + do_test pragma-6.6.1 { + execsql { + CREATE TABLE trial(col_main); + CREATE TEMP TABLE trial(col_temp); + } + } {} + do_test pragma-6.6.2 { + execsql { + PRAGMA table_info(trial); + } + } {0 col_temp {} 0 {} 0} + do_test pragma-6.6.3 { + execsql { + PRAGMA temp.table_info(trial); + } + } {0 col_temp {} 0 {} 0} + do_test pragma-6.6.4 { + execsql { + PRAGMA main.table_info(trial); + } + } {0 col_main {} 0 {} 0} +} + +do_test pragma-6.7 { + execsql { + CREATE TABLE test_table( + one INT NOT NULL DEFAULT -1, + two text, + three VARCHAR(45, 65) DEFAULT 'abcde', + four REAL DEFAULT X'abcdef', + five DEFAULT CURRENT_TIME + ); + PRAGMA table_info(test_table); + } +} [concat \ + {0 one INT 1 -1 0} \ + {1 two text 0 {} 0} \ + {2 three {VARCHAR(45, 65)} 0 'abcde' 0} \ + {3 four REAL 0 X'abcdef' 0} \ + {4 five {} 0 CURRENT_TIME 0} \ +] } ;# ifcapable schema_pragmas # Miscellaneous tests # ifcapable schema_pragmas { -do_test pragma-7.1 { +do_test pragma-7.1.1 { # Make sure a pragma knows to read the schema if it needs to db close sqlite3 db test.db @@ -544,6 +627,11 @@ pragma index_list(t3); } } {0 t3i1 0 1 sqlite_autoindex_t3_1 1} +do_test pragma-7.1.2 { + execsql { + pragma index_list(t3_bogus); + } +} {} } ;# ifcapable schema_pragmas ifcapable {utf16} { do_test pragma-7.2 { @@ -647,18 +735,20 @@ # Make sure the schema-version can be manipulated in an attached database. file delete -force test2.db file delete -force test2.db-journal -do_test pragma-8.1.11 { - execsql { - ATTACH 'test2.db' AS aux; - CREATE TABLE aux.t1(a, b, c); - PRAGMA aux.schema_version = 205; - } -} {} -do_test pragma-8.1.12 { - execsql { - PRAGMA aux.schema_version; - } -} 205 +ifcapable attach { + do_test pragma-8.1.11 { + execsql { + ATTACH 'test2.db' AS aux; + CREATE TABLE aux.t1(a, b, c); + PRAGMA aux.schema_version = 205; + } + } {} + do_test pragma-8.1.12 { + execsql { + PRAGMA aux.schema_version; + } + } 205 +} do_test pragma-8.1.13 { execsql { PRAGMA schema_version; @@ -667,28 +757,30 @@ # And check that modifying the schema-version in an attached database # forces the second connection to reload the schema. -do_test pragma-8.1.14 { - sqlite3 db2 test.db; set ::DB2 [sqlite3_connection_pointer db2] - execsql { - ATTACH 'test2.db' AS aux; - SELECT * FROM aux.t1; - } db2 -} {} -do_test pragma-8.1.15 { - execsql { - PRAGMA aux.schema_version = 206; - } -} {} -do_test pragma-8.1.16 { - set ::STMT [sqlite3_prepare $::DB2 "SELECT * FROM aux.t1" -1 DUMMY] - sqlite3_step $::STMT -} SQLITE_ERROR -do_test pragma-8.1.17 { - sqlite3_finalize $::STMT -} SQLITE_SCHEMA -do_test pragma-8.1.18 { - db2 close -} {} +ifcapable attach { + do_test pragma-8.1.14 { + sqlite3 db2 test.db; set ::DB2 [sqlite3_connection_pointer db2] + execsql { + ATTACH 'test2.db' AS aux; + SELECT * FROM aux.t1; + } db2 + } {} + do_test pragma-8.1.15 { + execsql { + PRAGMA aux.schema_version = 206; + } + } {} + do_test pragma-8.1.16 { + set ::STMT [sqlite3_prepare $::DB2 "SELECT * FROM aux.t1" -1 DUMMY] + sqlite3_step $::STMT + } SQLITE_ERROR + do_test pragma-8.1.17 { + sqlite3_finalize $::STMT + } SQLITE_SCHEMA + do_test pragma-8.1.18 { + db2 close + } {} +} # Now test that the user-version can be read and written (and that we aren't # accidentally manipulating the schema-version instead). @@ -732,61 +824,64 @@ } } {109} } -db eval {ATTACH 'test2.db' AS aux} -# Check that the user-version in the auxilary database can be manipulated ( -# and that we aren't accidentally manipulating the same in the main db). -do_test pragma-8.2.5 { - execsql { - PRAGMA aux.user_version; - } -} {0} -do_test pragma-8.2.6 { - execsql { - PRAGMA aux.user_version = 3; - } -} {} -do_test pragma-8.2.7 { - execsql { - PRAGMA aux.user_version; - } -} {3} -do_test pragma-8.2.8 { - execsql { - PRAGMA main.user_version; - } -} {2} - -# Now check that a ROLLBACK resets the user-version if it has been modified -# within a transaction. -do_test pragma-8.2.9 { - execsql { - BEGIN; - PRAGMA aux.user_version = 10; - PRAGMA user_version = 11; - } -} {} -do_test pragma-8.2.10 { - execsql { - PRAGMA aux.user_version; - } -} {10} -do_test pragma-8.2.11 { - execsql { - PRAGMA main.user_version; - } -} {11} -do_test pragma-8.2.12 { - execsql { - ROLLBACK; - PRAGMA aux.user_version; - } -} {3} -do_test pragma-8.2.13 { - execsql { - PRAGMA main.user_version; - } -} {2} +ifcapable attach { + db eval {ATTACH 'test2.db' AS aux} + + # Check that the user-version in the auxilary database can be manipulated ( + # and that we aren't accidentally manipulating the same in the main db). + do_test pragma-8.2.5 { + execsql { + PRAGMA aux.user_version; + } + } {0} + do_test pragma-8.2.6 { + execsql { + PRAGMA aux.user_version = 3; + } + } {} + do_test pragma-8.2.7 { + execsql { + PRAGMA aux.user_version; + } + } {3} + do_test pragma-8.2.8 { + execsql { + PRAGMA main.user_version; + } + } {2} + + # Now check that a ROLLBACK resets the user-version if it has been modified + # within a transaction. + do_test pragma-8.2.9 { + execsql { + BEGIN; + PRAGMA aux.user_version = 10; + PRAGMA user_version = 11; + } + } {} + do_test pragma-8.2.10 { + execsql { + PRAGMA aux.user_version; + } + } {10} + do_test pragma-8.2.11 { + execsql { + PRAGMA main.user_version; + } + } {11} + do_test pragma-8.2.12 { + execsql { + ROLLBACK; + PRAGMA aux.user_version; + } + } {3} + do_test pragma-8.2.13 { + execsql { + PRAGMA main.user_version; + } + } {2} +} # Try a negative value for the user-version do_test pragma-8.2.14 { @@ -801,6 +896,23 @@ } {-450} } ; # ifcapable schema_version +# Check to see if TEMP_STORE is memory or disk. Return strings +# "memory" or "disk" as appropriate. +# +proc check_temp_store {} { + db eval {CREATE TEMP TABLE IF NOT EXISTS a(b)} + db eval {PRAGMA database_list} { + if {$name=="temp"} { + set bt [btree_from_db db 1] + if {[btree_ismemdb $bt]} { + return "memory" + } + return "disk" + } + } + return "unknown" +} + # Test temp_store and temp_store_directory pragmas # @@ -812,63 +924,101 @@ PRAGMA temp_store; } } {0} +if {$TEMP_STORE<=1} { + do_test pragma-9.1.1 { + check_temp_store + } {disk} +} else { + do_test pragma-9.1.1 { + check_temp_store + } {memory} +} + do_test pragma-9.2 { + db close + sqlite3 db test.db execsql { PRAGMA temp_store=file; PRAGMA temp_store; } } {1} +if {$TEMP_STORE==3} { + # When TEMP_STORE is 3, always use memory regardless of pragma settings. + do_test pragma-9.2.1 { + check_temp_store + } {memory} +} else { + do_test pragma-9.2.1 { + check_temp_store + } {disk} +} + do_test pragma-9.3 { + db close + sqlite3 db test.db execsql { PRAGMA temp_store=memory; PRAGMA temp_store; } } {2} +if {$TEMP_STORE==0} { + # When TEMP_STORE is 0, always use the disk regardless of pragma settings. + do_test pragma-9.3.1 { + check_temp_store + } {disk} +} else { + do_test pragma-9.3.1 { + check_temp_store + } {memory} +} + do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} -do_test pragma-9.5 { - set pwd [string map {' ''} [pwd]] - execsql " - PRAGMA temp_store_directory='$pwd'; - " -} {} -do_test pragma-9.6 { - execsql { - PRAGMA temp_store_directory; - } -} [list [pwd]] -do_test pragma-9.7 { - catchsql { - PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; - } -} {1 {not a writable directory}} -do_test pragma-9.8 { - execsql { - PRAGMA temp_store_directory=''; - } -} {} -if {![info exists TEMP_STORE] || $TEMP_STORE<=1} { - ifcapable tempdb { - do_test pragma-9.9 { - execsql { - PRAGMA temp_store_directory; - PRAGMA temp_store=FILE; - CREATE TEMP TABLE temp_store_directory_test(a integer); - INSERT INTO temp_store_directory_test values (2); - SELECT * FROM temp_store_directory_test; - } - } {2} - do_test pragma-9.10 { - catchsql " - PRAGMA temp_store_directory='$pwd'; - SELECT * FROM temp_store_directory_test; - " - } {1 {no such table: temp_store_directory_test}} +ifcapable wsd { + do_test pragma-9.5 { + set pwd [string map {' ''} [file nativename [pwd]]] + execsql " + PRAGMA temp_store_directory='$pwd'; + " + } {} + do_test pragma-9.6 { + execsql { + PRAGMA temp_store_directory; + } + } [list [file nativename [pwd]]] + do_test pragma-9.7 { + catchsql { + PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; + } + } {1 {not a writable directory}} + do_test pragma-9.8 { + execsql { + PRAGMA temp_store_directory=''; + } + } {} + if {![info exists TEMP_STORE] || $TEMP_STORE<=1} { + ifcapable tempdb { + do_test pragma-9.9 { + execsql { + PRAGMA temp_store_directory; + PRAGMA temp_store=FILE; + CREATE TEMP TABLE temp_store_directory_test(a integer); + INSERT INTO temp_store_directory_test values (2); + SELECT * FROM temp_store_directory_test; + } + } {2} + do_test pragma-9.10 { + catchsql " + PRAGMA temp_store_directory='$pwd'; + SELECT * FROM temp_store_directory_test; + " + } {1 {no such table: temp_store_directory_test}} + } } -} +} do_test pragma-9.11 { execsql { PRAGMA temp_store = 0; @@ -893,7 +1043,6 @@ PRAGMA temp_store; } } {0} -breakpoint do_test pragma-9.15 { catchsql { BEGIN EXCLUSIVE; @@ -908,6 +1057,23 @@ COMMIT; } } {{valuable data}} + +do_test pragma-9.17 { + execsql { + INSERT INTO temp_table VALUES('valuable data II'); + SELECT * FROM temp_table; + } +} {{valuable data} {valuable data II}} + +do_test pragma-9.18 { + set rc [catch { + db eval {SELECT t FROM temp_table} { + execsql {pragma temp_store = 1} + } + } msg] + list $rc $msg +} {1 {temporary storage cannot be changed from within a transaction}} + } ;# ifcapable pager_pragmas ifcapable trigger { @@ -966,13 +1132,13 @@ execsql2 { pragma collation_list; } - } {seq 0 name NOCASE seq 1 name BINARY} + } {seq 0 name NOCASE seq 1 name RTRIM seq 2 name BINARY} do_test pragma-11.2 { db collate New_Collation blah... execsql { pragma collation_list; } - } {0 New_Collation 1 NOCASE 2 BINARY} + } {0 New_Collation 1 NOCASE 2 RTRIM 3 BINARY} } ifcapable schema_pragmas&&tempdb { @@ -1029,9 +1195,278 @@ } ;# ifcapable bloblit +ifcapable pager_pragmas { + db close + file delete -force test.db + sqlite3 db test.db + + do_test pragma-14.1 { + execsql { pragma auto_vacuum = 0 } + execsql { pragma page_count } + } {0} + + do_test pragma-14.2 { + execsql { + CREATE TABLE abc(a, b, c); + PRAGMA page_count; + } + } {2} + + do_test pragma-14.3 { + execsql { + BEGIN; + CREATE TABLE def(a, b, c); + PRAGMA page_count; + } + } {3} + + do_test pragma-14.4 { + set page_size [db one {pragma page_size}] + expr [file size test.db] / $page_size + } {2} + + do_test pragma-14.5 { + execsql { + ROLLBACK; + PRAGMA page_count; + } + } {2} + + do_test pragma-14.6 { + file delete -force test2.db + sqlite3 db2 test2.db + execsql { + PRAGMA auto_vacuum = 0; + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + CREATE TABLE t3(a, b, c); + CREATE TABLE t4(a, b, c); + } db2 + db2 close + execsql { + ATTACH 'test2.db' AS aux; + PRAGMA aux.page_count; + } + } {5} +} + +# Test that the value set using the cache_size pragma is not reset when the +# schema is reloaded. +# +ifcapable pager_pragmas { + db close + sqlite3 db test.db + do_test pragma-15.1 { + execsql { + PRAGMA cache_size=59; + PRAGMA cache_size; + } + } {59} + do_test pragma-15.2 { + sqlite3 db2 test.db + execsql { + CREATE TABLE newtable(a, b, c); + } db2 + db2 close + } {} + do_test pragma-15.3 { + # Evaluating this statement will cause the schema to be reloaded (because + # the schema was changed by another connection in pragma-15.2). At one + # point there was a bug that reset the cache_size to its default value + # when this happened. + execsql { SELECT * FROM sqlite_master } + execsql { PRAGMA cache_size } + } {59} +} + # Reset the sqlite3_temp_directory variable for the next run of tests: sqlite3 dbX :memory: dbX eval {PRAGMA temp_store_directory = ""} dbX close +ifcapable lock_proxy_pragmas&&prefer_proxy_locking { + set sqlite_hostid_num 1 + + set using_proxy 0 + foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set using_proxy $value + } + + # Test the lock_proxy_file pragmas. + # + db close + set env(SQLITE_FORCE_PROXY_LOCKING) "0" + + sqlite3 db test.db + do_test pragma-16.1 { + execsql { + PRAGMA lock_proxy_file="mylittleproxy"; + select * from sqlite_master; + } + execsql { + PRAGMA lock_proxy_file; + } + } {mylittleproxy} + + do_test pragma-16.2 { + sqlite3 db2 test.db + execsql { + PRAGMA lock_proxy_file="mylittleproxy"; + } db2 + } {} + + db2 close + do_test pragma-16.2.1 { + sqlite3 db2 test.db + execsql { + PRAGMA lock_proxy_file=":auto:"; + select * from sqlite_master; + } db2 + execsql { + PRAGMA lock_proxy_file; + } db2 + } {mylittleproxy} + + db2 close + do_test pragma-16.3 { + sqlite3 db2 test.db + execsql { + PRAGMA lock_proxy_file="myotherproxy"; + } db2 + catchsql { + select * from sqlite_master; + } db2 + } {1 {database is locked}} + + do_test pragma-16.4 { + db2 close + db close + sqlite3 db2 test.db + execsql { + PRAGMA lock_proxy_file="myoriginalproxy"; + PRAGMA lock_proxy_file="myotherproxy"; + PRAGMA lock_proxy_file; + } db2 + } {myotherproxy} + + db2 close + set env(SQLITE_FORCE_PROXY_LOCKING) "1" + do_test pragma-16.5 { + sqlite3 db2 test.db + execsql { + PRAGMA lock_proxy_file=":auto:"; + PRAGMA lock_proxy_file; + } db2 + } {myotherproxy} + + do_test pragma-16.6 { + db2 close + sqlite3 db2 test2.db + set lockpath [execsql { + PRAGMA lock_proxy_file=":auto:"; + PRAGMA lock_proxy_file; + } db2] + string match "*test2.db:auto:" $lockpath + } {1} + + set sqlite_hostid_num 2 + do_test pragma-16.7 { + sqlite3 db test2.db + execsql { + PRAGMA lock_proxy_file=":auto:"; + } + catchsql { + select * from sqlite_master; + } + } {1 {database is locked}} + db close + + do_test pragma-16.8 { + sqlite3 db test2.db + catchsql { + select * from sqlite_master; + } + } {1 {database is locked}} + + db2 close + do_test pragma-16.8.1 { + execsql { + PRAGMA lock_proxy_file="yetanotherproxy"; + PRAGMA lock_proxy_file; + } + } {yetanotherproxy} + do_test pragma-16.8.2 { + execsql { + create table mine(x); + } + } {} + + db close + do_test pragma-16.9 { + sqlite3 db proxytest.db + set lockpath2 [execsql { + PRAGMA lock_proxy_file=":auto:"; + PRAGMA lock_proxy_file; + } db] + string match "*proxytest.db:auto:" $lockpath2 + } {1} + + set env(SQLITE_FORCE_PROXY_LOCKING) $using_proxy + set sqlite_hostid_num 0 +} + +# Parsing of auto_vacuum settings. +# +foreach {autovac_setting val} { + 0 0 + 1 1 + 2 2 + 3 0 + -1 0 + none 0 + NONE 0 + NoNe 0 + full 1 + FULL 1 + incremental 2 + INCREMENTAL 2 + -1234 0 + 1234 0 +} { + do_test pragma-17.1.$autovac_setting { + catch {db close} + sqlite3 db :memory: + execsql " + PRAGMA auto_vacuum=$::autovac_setting; + PRAGMA auto_vacuum; + " + } $val +} + +# Parsing of temp_store settings. +# +foreach {temp_setting val} { + 0 0 + 1 1 + 2 2 + 3 0 + -1 0 + file 1 + FILE 1 + fIlE 1 + memory 2 + MEMORY 2 + MeMoRy 2 +} { + do_test pragma-18.1.$temp_setting { + catch {db close} + sqlite3 db :memory: + execsql " + PRAGMA temp_store=$::temp_setting; + PRAGMA temp_store=$::temp_setting; + PRAGMA temp_store; + " + } $val +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/printf.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/printf.test --- sqlite3-3.4.2/test/printf.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/printf.test 2009-06-12 03:37:54.000000000 +0100 @@ -11,69 +11,3442 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the sqlite_*_printf() interface. # -# $Id: printf.test,v 1.24 2007/05/10 17:23:12 drh Exp $ +# $Id: printf.test,v 1.31 2009/02/01 00:21:10 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -set n 1 -foreach v {1 2 5 10 99 100 1000000 999999999 0 -1 -2 -5 -10 -99 -100 -9999999} { - set v32 [expr {$v&0xffffffff}] - do_test printf-1.$n.1 [subst { - sqlite3_mprintf_int {Three integers: %d %x %o} $v $v $v - }] [format {Three integers: %d %x %o} $v $v32 $v32] - do_test printf-1.$n.2 [subst { - sqlite3_mprintf_int {Three integers: (%6d) (%6x) (%6o)} $v $v $v - }] [format {Three integers: (%6d) (%6x) (%6o)} $v $v32 $v32] - do_test printf-1.$n.3 [subst { - sqlite3_mprintf_int {Three integers: (%-6d) (%-6x) (%-6o)} $v $v $v - }] [format {Three integers: (%-6d) (%-6x) (%-6o)} $v $v32 $v32] - do_test printf-1.$n.4 [subst { - sqlite3_mprintf_int {Three integers: (%+6d) (%+6x) (%+6o)} $v $v $v - }] [format {Three integers: (%+6d) (%+6x) (%+6o)} $v $v32 $v32] - do_test printf-1.$n.5 [subst { - sqlite3_mprintf_int {Three integers: (%06d) (%06x) (%06o)} $v $v $v - }] [format {Three integers: (%06d) (%06x) (%06o)} $v $v32 $v32] - do_test printf-1.$n.6 [subst { - sqlite3_mprintf_int {Three integers: (% 6d) (% 6x) (% 6o)} $v $v $v - }] [format {Three integers: (% 6d) (% 6x) (% 6o)} $v $v32 $v32] - do_test printf-1.$n.7 [subst { - sqlite3_mprintf_int {Three integers: (%#6d) (%#6x) (%#6o)} $v $v $v - }] [format {Three integers: (%#6d) (%#6x) (%#6o)} $v $v32 $v32] - incr n -} - - -if {$::tcl_platform(platform)!="windows"} { - -set m 1 -foreach {a b} {1 1 5 5 10 10 10 5} { - set n 1 - foreach x {0.001 1.0e-20 1.0 0.0 100.0 9.99999 -0.00543 -1.0 -99.99999} { - do_test printf-2.$m.$n.1 [subst { - sqlite3_mprintf_double {A double: %*.*f} $a $b $x - }] [format {A double: %*.*f} $a $b $x] - do_test printf-2.$m.$n.2 [subst { - sqlite3_mprintf_double {A double: %*.*e} $a $b $x - }] [format {A double: %*.*e} $a $b $x] - do_test printf-2.$m.$n.3 [subst { - sqlite3_mprintf_double {A double: %*.*g} $a $b $x - }] [format {A double: %*.*g} $a $b $x] - do_test printf-2.$m.$n.4 [subst { - sqlite3_mprintf_double {A double: %d %d %g} $a $b $x - }] [format {A double: %d %d %g} $a $b $x] - do_test printf-2.$m.$n.5 [subst { - sqlite3_mprintf_double {A double: %d %d %#g} $a $b $x - }] [format {A double: %d %d %#g} $a $b $x] - do_test printf-2.$m.$n.6 [subst { - sqlite3_mprintf_double {A double: %d %d %010g} $a $b $x - }] [format {A double: %d %d %010g} $a $b $x] - incr n - } - incr m -} -} ;# endif not windows +do_test printf-1.1.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 1 1 1 +} {abc: 1 1 1 :xyz} +do_test printf-1.1.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 1 1 1 +} {abc: ( 1) ( 1) ( 1) :xyz} +do_test printf-1.1.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 1 1 1 +} {abc: (1 ) (1 ) (1 ) :xyz} +do_test printf-1.1.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 1 1 1 +} {abc: ( +1) ( 1) ( 1) :xyz} +do_test printf-1.1.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 1 1 1 +} {abc: (000001) (000001) (000001) :xyz} +do_test printf-1.1.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 1 1 1 +} {abc: ( 1) ( 1) ( 1) :xyz} +do_test printf-1.1.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 1 1 1 +} {abc: ( 1) ( 0x1) ( 01) :xyz} +do_test printf-1.2.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 2 2 2 +} {abc: 2 2 2 :xyz} +do_test printf-1.2.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 2 2 2 +} {abc: ( 2) ( 2) ( 2) :xyz} +do_test printf-1.2.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 2 2 2 +} {abc: (2 ) (2 ) (2 ) :xyz} +do_test printf-1.2.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 2 2 2 +} {abc: ( +2) ( 2) ( 2) :xyz} +do_test printf-1.2.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 2 2 2 +} {abc: (000002) (000002) (000002) :xyz} +do_test printf-1.2.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 2 2 2 +} {abc: ( 2) ( 2) ( 2) :xyz} +do_test printf-1.2.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 2 2 2 +} {abc: ( 2) ( 0x2) ( 02) :xyz} +do_test printf-1.3.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 5 5 5 +} {abc: 5 5 5 :xyz} +do_test printf-1.3.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 5 5 5 +} {abc: ( 5) ( 5) ( 5) :xyz} +do_test printf-1.3.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 5 5 5 +} {abc: (5 ) (5 ) (5 ) :xyz} +do_test printf-1.3.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 5 5 5 +} {abc: ( +5) ( 5) ( 5) :xyz} +do_test printf-1.3.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 5 5 5 +} {abc: (000005) (000005) (000005) :xyz} +do_test printf-1.3.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 5 5 5 +} {abc: ( 5) ( 5) ( 5) :xyz} +do_test printf-1.3.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 5 5 5 +} {abc: ( 5) ( 0x5) ( 05) :xyz} +do_test printf-1.4.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 10 10 10 +} {abc: 10 a 12 :xyz} +do_test printf-1.4.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 10 10 10 +} {abc: ( 10) ( a) ( 12) :xyz} +do_test printf-1.4.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 10 10 10 +} {abc: (10 ) (a ) (12 ) :xyz} +do_test printf-1.4.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 10 10 10 +} {abc: ( +10) ( a) ( 12) :xyz} +do_test printf-1.4.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 10 10 10 +} {abc: (000010) (00000a) (000012) :xyz} +do_test printf-1.4.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 10 10 10 +} {abc: ( 10) ( a) ( 12) :xyz} +do_test printf-1.4.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 10 10 10 +} {abc: ( 10) ( 0xa) ( 012) :xyz} +do_test printf-1.5.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 99 99 99 +} {abc: 99 63 143 :xyz} +do_test printf-1.5.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 99 99 99 +} {abc: ( 99) ( 63) ( 143) :xyz} +do_test printf-1.5.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 99 99 99 +} {abc: (99 ) (63 ) (143 ) :xyz} +do_test printf-1.5.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 99 99 99 +} {abc: ( +99) ( 63) ( 143) :xyz} +do_test printf-1.5.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 99 99 99 +} {abc: (000099) (000063) (000143) :xyz} +do_test printf-1.5.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 99 99 99 +} {abc: ( 99) ( 63) ( 143) :xyz} +do_test printf-1.5.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 99 99 99 +} {abc: ( 99) ( 0x63) ( 0143) :xyz} +do_test printf-1.6.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 100 100 100 +} {abc: 100 64 144 :xyz} +do_test printf-1.6.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 100 100 100 +} {abc: ( 100) ( 64) ( 144) :xyz} +do_test printf-1.6.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 100 100 100 +} {abc: (100 ) (64 ) (144 ) :xyz} +do_test printf-1.6.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 100 100 100 +} {abc: ( +100) ( 64) ( 144) :xyz} +do_test printf-1.6.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 100 100 100 +} {abc: (000100) (000064) (000144) :xyz} +do_test printf-1.6.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 100 100 100 +} {abc: ( 100) ( 64) ( 144) :xyz} +do_test printf-1.6.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 100 100 100 +} {abc: ( 100) ( 0x64) ( 0144) :xyz} +do_test printf-1.7.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 1000000 1000000 1000000 +} {abc: 1000000 f4240 3641100 :xyz} +do_test printf-1.7.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 1000000 1000000 1000000 +} {abc: (1000000) ( f4240) (3641100) :xyz} +do_test printf-1.7.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 1000000 1000000 1000000 +} {abc: (1000000) (f4240 ) (3641100) :xyz} +do_test printf-1.7.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 1000000 1000000 1000000 +} {abc: (+1000000) ( f4240) (3641100) :xyz} +do_test printf-1.7.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 1000000 1000000 1000000 +} {abc: (1000000) (0f4240) (3641100) :xyz} +do_test printf-1.7.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 1000000 1000000 1000000 +} {abc: ( 1000000) ( f4240) (3641100) :xyz} +do_test printf-1.7.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 1000000 1000000 1000000 +} {abc: (1000000) (0xf4240) (03641100) :xyz} +do_test printf-1.8.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 999999999 999999999 999999999 +} {abc: 999999999 3b9ac9ff 7346544777 :xyz} +do_test printf-1.8.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 999999999 999999999 999999999 +} {abc: (999999999) (3b9ac9ff) (7346544777) :xyz} +do_test printf-1.8.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 999999999 999999999 999999999 +} {abc: (999999999) (3b9ac9ff) (7346544777) :xyz} +do_test printf-1.8.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 999999999 999999999 999999999 +} {abc: (+999999999) (3b9ac9ff) (7346544777) :xyz} +do_test printf-1.8.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 999999999 999999999 999999999 +} {abc: (999999999) (3b9ac9ff) (7346544777) :xyz} +do_test printf-1.8.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 999999999 999999999 999999999 +} {abc: ( 999999999) (3b9ac9ff) (7346544777) :xyz} +do_test printf-1.8.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 999999999 999999999 999999999 +} {abc: (999999999) (0x3b9ac9ff) (07346544777) :xyz} +do_test printf-1.9.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0 0 0 +} {abc: 0 0 0 :xyz} +do_test printf-1.9.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0 0 0 +} {abc: ( 0) ( 0) ( 0) :xyz} +do_test printf-1.9.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0 0 0 +} {abc: (0 ) (0 ) (0 ) :xyz} +do_test printf-1.9.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0 0 0 +} {abc: ( +0) ( 0) ( 0) :xyz} +do_test printf-1.9.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0 0 0 +} {abc: (000000) (000000) (000000) :xyz} +do_test printf-1.9.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0 0 0 +} {abc: ( 0) ( 0) ( 0) :xyz} +do_test printf-1.9.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0 0 0 +} {abc: ( 0) ( 0) ( 0) :xyz} +# 0xffffffff == -1 +do_test printf-1.10.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: -1 ffffffff 37777777777 :xyz} +do_test printf-1.10.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: ( -1) (ffffffff) (37777777777) :xyz} +do_test printf-1.10.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: (-1 ) (ffffffff) (37777777777) :xyz} +do_test printf-1.10.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: ( -1) (ffffffff) (37777777777) :xyz} +do_test printf-1.10.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: (-00001) (ffffffff) (37777777777) :xyz} +do_test printf-1.10.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: ( -1) (ffffffff) (37777777777) :xyz} +do_test printf-1.10.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xffffffff 0xffffffff 0xffffffff +} {abc: ( -1) (0xffffffff) (037777777777) :xyz} +# 0xfffffffe == -2 +do_test printf-1.11.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: -2 fffffffe 37777777776 :xyz} +do_test printf-1.11.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: ( -2) (fffffffe) (37777777776) :xyz} +do_test printf-1.11.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: (-2 ) (fffffffe) (37777777776) :xyz} +do_test printf-1.11.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: ( -2) (fffffffe) (37777777776) :xyz} +do_test printf-1.11.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: (-00002) (fffffffe) (37777777776) :xyz} +do_test printf-1.11.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: ( -2) (fffffffe) (37777777776) :xyz} +do_test printf-1.11.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xfffffffe 0xfffffffe 0xfffffffe +} {abc: ( -2) (0xfffffffe) (037777777776) :xyz} +# 0xfffffffb == -5 +do_test printf-1.12.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: -5 fffffffb 37777777773 :xyz} +do_test printf-1.12.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: ( -5) (fffffffb) (37777777773) :xyz} +do_test printf-1.12.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: (-5 ) (fffffffb) (37777777773) :xyz} +do_test printf-1.12.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: ( -5) (fffffffb) (37777777773) :xyz} +do_test printf-1.12.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: (-00005) (fffffffb) (37777777773) :xyz} +do_test printf-1.12.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: ( -5) (fffffffb) (37777777773) :xyz} +do_test printf-1.12.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xfffffffb 0xfffffffb 0xfffffffb +} {abc: ( -5) (0xfffffffb) (037777777773) :xyz} +# 0xfffffff6 == -10 +do_test printf-1.13.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: -10 fffffff6 37777777766 :xyz} +do_test printf-1.13.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: ( -10) (fffffff6) (37777777766) :xyz} +do_test printf-1.13.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: (-10 ) (fffffff6) (37777777766) :xyz} +do_test printf-1.13.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: ( -10) (fffffff6) (37777777766) :xyz} +do_test printf-1.13.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: (-00010) (fffffff6) (37777777766) :xyz} +do_test printf-1.13.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: ( -10) (fffffff6) (37777777766) :xyz} +do_test printf-1.13.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xfffffff6 0xfffffff6 0xfffffff6 +} {abc: ( -10) (0xfffffff6) (037777777766) :xyz} +# 0xffffff9d == -99 +do_test printf-1.14.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: -99 ffffff9d 37777777635 :xyz} +do_test printf-1.14.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: ( -99) (ffffff9d) (37777777635) :xyz} +do_test printf-1.14.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: (-99 ) (ffffff9d) (37777777635) :xyz} +do_test printf-1.14.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: ( -99) (ffffff9d) (37777777635) :xyz} +do_test printf-1.14.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: (-00099) (ffffff9d) (37777777635) :xyz} +do_test printf-1.14.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: ( -99) (ffffff9d) (37777777635) :xyz} +do_test printf-1.14.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xffffff9d 0xffffff9d 0xffffff9d +} {abc: ( -99) (0xffffff9d) (037777777635) :xyz} +# 0xffffff9c == -100 +do_test printf-1.15.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: -100 ffffff9c 37777777634 :xyz} +do_test printf-1.15.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: ( -100) (ffffff9c) (37777777634) :xyz} +do_test printf-1.15.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: (-100 ) (ffffff9c) (37777777634) :xyz} +do_test printf-1.15.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: ( -100) (ffffff9c) (37777777634) :xyz} +do_test printf-1.15.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: (-00100) (ffffff9c) (37777777634) :xyz} +do_test printf-1.15.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: ( -100) (ffffff9c) (37777777634) :xyz} +do_test printf-1.15.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xffffff9c 0xffffff9c 0xffffff9c +} {abc: ( -100) (0xffffff9c) (037777777634) :xyz} +# 0xff676981 == -9999999 +do_test printf-1.16.1 { + sqlite3_mprintf_int {abc: %d %x %o :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: -9999999 ff676981 37731664601 :xyz} +do_test printf-1.16.2 { + sqlite3_mprintf_int {abc: (%6d) (%6x) (%6o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (ff676981) (37731664601) :xyz} +do_test printf-1.16.3 { + sqlite3_mprintf_int {abc: (%-6d) (%-6x) (%-6o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (ff676981) (37731664601) :xyz} +do_test printf-1.16.4 { + sqlite3_mprintf_int {abc: (%+6d) (%+6x) (%+6o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (ff676981) (37731664601) :xyz} +do_test printf-1.16.5 { + sqlite3_mprintf_int {abc: (%06d) (%06x) (%06o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (ff676981) (37731664601) :xyz} +do_test printf-1.16.6 { + sqlite3_mprintf_int {abc: (% 6d) (% 6x) (% 6o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (ff676981) (37731664601) :xyz} +do_test printf-1.16.7 { + sqlite3_mprintf_int {abc: (%#6d) (%#6x) (%#6o) :xyz}\ + 0xff676981 0xff676981 0xff676981 +} {abc: (-9999999) (0xff676981) (037731664601) :xyz} +do_test printf-2.1.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 0.001 +} {abc: (0.0) :xyz} +do_test printf-2.1.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 0.001 +} {abc: (1.0e-03) :xyz} +do_test printf-2.1.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.1.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 0.001 +} {abc: 1 1 (0.001) :xyz} +do_test printf-2.1.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 0.001 +} {abc: 1 1 (0.00100000) :xyz} +do_test printf-2.1.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 0.001 +} {abc: 1 1 (000000.001) :xyz} +do_test printf-2.1.1.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 0.001 +} {abc: 1 1 (0.0) :xyz} +do_test printf-2.1.1.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 0.001 +} {abc: 1 1 (1.0e-03) :xyz} +do_test printf-2.1.1.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 0.001 +} {abc: 1 1 (0.001) :xyz} +do_test printf-2.1.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 1.0e-20 +} {abc: (0.0) :xyz} +do_test printf-2.1.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 1.0e-20 +} {abc: (1.0e-20) :xyz} +do_test printf-2.1.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.1.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (1e-20) :xyz} +do_test printf-2.1.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (1.00000e-20) :xyz} +do_test printf-2.1.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (000001e-20) :xyz} +do_test printf-2.1.2.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (0.0) :xyz} +do_test printf-2.1.2.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (1.0e-20) :xyz} +do_test printf-2.1.2.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 1.0e-20 +} {abc: 1 1 (1e-20) :xyz} +do_test printf-2.1.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 1.0 +} {abc: (1.0) :xyz} +do_test printf-2.1.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 1.0 +} {abc: (1.0e+00) :xyz} +do_test printf-2.1.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 1.0 +} {abc: (1) :xyz} +do_test printf-2.1.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 1.0 +} {abc: 1 1 (1) :xyz} +do_test printf-2.1.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 1.0 +} {abc: 1 1 (1.00000) :xyz} +do_test printf-2.1.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 1.0 +} {abc: 1 1 (0000000001) :xyz} +do_test printf-2.1.3.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 1.0 +} {abc: 1 1 (1.0) :xyz} +do_test printf-2.1.3.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 1.0 +} {abc: 1 1 (1.0e+00) :xyz} +do_test printf-2.1.3.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 1.0 +} {abc: 1 1 (1) :xyz} +do_test printf-2.1.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 0.0 +} {abc: (0.0) :xyz} +do_test printf-2.1.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 0.0 +} {abc: (0.0e+00) :xyz} +do_test printf-2.1.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 0.0 +} {abc: (0) :xyz} +do_test printf-2.1.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 0.0 +} {abc: 1 1 (0) :xyz} +do_test printf-2.1.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 0.0 +} {abc: 1 1 (0.00000) :xyz} +do_test printf-2.1.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 0.0 +} {abc: 1 1 (0000000000) :xyz} +do_test printf-2.1.4.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 0.0 +} {abc: 1 1 (0.0) :xyz} +do_test printf-2.1.4.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 0.0 +} {abc: 1 1 (0.0e+00) :xyz} +do_test printf-2.1.4.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 0.0 +} {abc: 1 1 (0) :xyz} +do_test printf-2.1.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 100.0 +} {abc: (100.0) :xyz} +do_test printf-2.1.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 100.0 +} {abc: (1.0e+02) :xyz} +do_test printf-2.1.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 100.0 +} {abc: (1e+02) :xyz} +do_test printf-2.1.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 100.0 +} {abc: 1 1 (100) :xyz} +do_test printf-2.1.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 100.0 +} {abc: 1 1 (100.000) :xyz} +do_test printf-2.1.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 100.0 +} {abc: 1 1 (0000000100) :xyz} +do_test printf-2.1.5.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 100.0 +} {abc: 1 1 (100.0) :xyz} +do_test printf-2.1.5.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 100.0 +} {abc: 1 1 (1.0e+02) :xyz} +do_test printf-2.1.5.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 100.0 +} {abc: 1 1 (1e+02) :xyz} +do_test printf-2.1.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 9.99999 +} {abc: (10.0) :xyz} +do_test printf-2.1.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 9.99999 +} {abc: (1.0e+01) :xyz} +do_test printf-2.1.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 9.99999 +} {abc: (1e+01) :xyz} +do_test printf-2.1.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 9.99999 +} {abc: 1 1 (9.99999) :xyz} +do_test printf-2.1.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 9.99999 +} {abc: 1 1 (9.99999) :xyz} +do_test printf-2.1.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 9.99999 +} {abc: 1 1 (0009.99999) :xyz} +do_test printf-2.1.6.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 9.99999 +} {abc: 1 1 (10.0) :xyz} +do_test printf-2.1.6.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 9.99999 +} {abc: 1 1 (1.0e+01) :xyz} +do_test printf-2.1.6.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 9.99999 +} {abc: 1 1 (1e+01) :xyz} +do_test printf-2.1.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 -0.00543 +} {abc: (-0.0) :xyz} +do_test printf-2.1.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 -0.00543 +} {abc: (-5.4e-03) :xyz} +do_test printf-2.1.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 -0.00543 +} {abc: (-0.005) :xyz} +do_test printf-2.1.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-0.00543) :xyz} +do_test printf-2.1.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-0.00543000) :xyz} +do_test printf-2.1.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-000.00543) :xyz} +do_test printf-2.1.7.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-0.0) :xyz} +do_test printf-2.1.7.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-5.4e-03) :xyz} +do_test printf-2.1.7.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 -0.00543 +} {abc: 1 1 (-0.005) :xyz} +do_test printf-2.1.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 -1.0 +} {abc: (-1.0) :xyz} +do_test printf-2.1.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 -1.0 +} {abc: (-1.0e+00) :xyz} +do_test printf-2.1.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 -1.0 +} {abc: (-1) :xyz} +do_test printf-2.1.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 -1.0 +} {abc: 1 1 (-1) :xyz} +do_test printf-2.1.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 -1.0 +} {abc: 1 1 (-1.00000) :xyz} +do_test printf-2.1.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 -1.0 +} {abc: 1 1 (-000000001) :xyz} +do_test printf-2.1.8.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 -1.0 +} {abc: 1 1 (-1.0) :xyz} +do_test printf-2.1.8.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 -1.0 +} {abc: 1 1 (-1.0e+00) :xyz} +do_test printf-2.1.8.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 -1.0 +} {abc: 1 1 (-1) :xyz} +do_test printf-2.1.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 -99.99999 +} {abc: (-100.0) :xyz} +do_test printf-2.1.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 -99.99999 +} {abc: (-1.0e+02) :xyz} +do_test printf-2.1.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 -99.99999 +} {abc: (-1e+02) :xyz} +do_test printf-2.1.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-100) :xyz} +do_test printf-2.1.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-100.000) :xyz} +do_test printf-2.1.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-000000100) :xyz} +do_test printf-2.1.9.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-100.0) :xyz} +do_test printf-2.1.9.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-1.0e+02) :xyz} +do_test printf-2.1.9.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 -99.99999 +} {abc: 1 1 (-1e+02) :xyz} +do_test printf-2.1.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 3.14e+9 +} {abc: (3140000000.0) :xyz} +do_test printf-2.1.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 3.14e+9 +} {abc: (3.1e+09) :xyz} +do_test printf-2.1.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 3.14e+9 +} {abc: (3e+09) :xyz} +do_test printf-2.1.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (3.14e+09) :xyz} +do_test printf-2.1.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (3.14000e+09) :xyz} +do_test printf-2.1.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (003.14e+09) :xyz} +do_test printf-2.1.10.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (3140000000.0) :xyz} +do_test printf-2.1.10.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (3.1e+09) :xyz} +do_test printf-2.1.10.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 3.14e+9 +} {abc: 1 1 (3e+09) :xyz} +do_test printf-2.1.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 -4.72732e+88 +} {abc: (-4.7e+88) :xyz} +do_test printf-2.1.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 -4.72732e+88 +} {abc: (-5e+88) :xyz} +do_test printf-2.1.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 -4.72732e+88 +} {abc: 1 1 (-4.72732e+88) :xyz} +do_test printf-2.1.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 -4.72732e+88 +} {abc: 1 1 (-4.72732e+88) :xyz} +do_test printf-2.1.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 -4.72732e+88 +} {abc: 1 1 (-4.72732e+88) :xyz} +do_test printf-2.1.11.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 -4.72732e+88 +} {abc: 1 1 (-4.7e+88) :xyz} +do_test printf-2.1.11.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 -4.72732e+88 +} {abc: 1 1 (-5e+88) :xyz} +do_test printf-2.1.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 9.87991e+143 +} {abc: (9.9e+143) :xyz} +do_test printf-2.1.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 9.87991e+143 +} {abc: (1e+144) :xyz} +do_test printf-2.1.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 9.87991e+143 +} {abc: 1 1 (9.87991e+143) :xyz} +do_test printf-2.1.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 9.87991e+143 +} {abc: 1 1 (9.87991e+143) :xyz} +do_test printf-2.1.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 9.87991e+143 +} {abc: 1 1 (9.87991e+143) :xyz} +do_test printf-2.1.12.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 9.87991e+143 +} {abc: 1 1 (9.9e+143) :xyz} +do_test printf-2.1.12.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 9.87991e+143 +} {abc: 1 1 (1e+144) :xyz} +do_test printf-2.1.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 -6.287291e-9 +} {abc: (-0.0) :xyz} +do_test printf-2.1.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 -6.287291e-9 +} {abc: (-6.3e-09) :xyz} +do_test printf-2.1.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 -6.287291e-9 +} {abc: (-6e-09) :xyz} +do_test printf-2.1.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-6.28729e-09) :xyz} +do_test printf-2.1.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-6.28729e-09) :xyz} +do_test printf-2.1.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-6.28729e-09) :xyz} +do_test printf-2.1.13.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-0.0) :xyz} +do_test printf-2.1.13.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-6.3e-09) :xyz} +do_test printf-2.1.13.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 -6.287291e-9 +} {abc: 1 1 (-6e-09) :xyz} +do_test printf-2.1.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 1 1 3.38826392e-110 +} {abc: (0.0) :xyz} +do_test printf-2.1.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 1 1 3.38826392e-110 +} {abc: (3.4e-110) :xyz} +do_test printf-2.1.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 1 1 3.38826392e-110 +} {abc: (3e-110) :xyz} +do_test printf-2.1.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (3.38826e-110) :xyz} +do_test printf-2.1.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (3.38826e-110) :xyz} +do_test printf-2.1.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (3.38826e-110) :xyz} +do_test printf-2.1.14.7 { + sqlite3_mprintf_double {abc: %d %d (%1.1f) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (0.0) :xyz} +do_test printf-2.1.14.8 { + sqlite3_mprintf_double {abc: %d %d (%1.1e) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (3.4e-110) :xyz} +do_test printf-2.1.14.9 { + sqlite3_mprintf_double {abc: %d %d (%1.1g) :xyz} 1 1 3.38826392e-110 +} {abc: 1 1 (3e-110) :xyz} +do_test printf-2.2.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 0.001 +} {abc: (0.00100) :xyz} +do_test printf-2.2.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 0.001 +} {abc: (1.00000e-03) :xyz} +do_test printf-2.2.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.2.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 0.001 +} {abc: 5 5 (0.001) :xyz} +do_test printf-2.2.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 0.001 +} {abc: 5 5 (0.00100000) :xyz} +do_test printf-2.2.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 0.001 +} {abc: 5 5 (000000.001) :xyz} +do_test printf-2.2.1.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 0.001 +} {abc: 5 5 (0.00100) :xyz} +do_test printf-2.2.1.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 0.001 +} {abc: 5 5 (1.00000e-03) :xyz} +do_test printf-2.2.1.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 0.001 +} {abc: 5 5 (0.001) :xyz} +do_test printf-2.2.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 1.0e-20 +} {abc: (0.00000) :xyz} +do_test printf-2.2.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 1.0e-20 +} {abc: (1.00000e-20) :xyz} +do_test printf-2.2.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.2.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (1e-20) :xyz} +do_test printf-2.2.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (1.00000e-20) :xyz} +do_test printf-2.2.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (000001e-20) :xyz} +do_test printf-2.2.2.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (0.00000) :xyz} +do_test printf-2.2.2.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (1.00000e-20) :xyz} +do_test printf-2.2.2.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 1.0e-20 +} {abc: 5 5 (1e-20) :xyz} +do_test printf-2.2.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 1.0 +} {abc: (1.00000) :xyz} +do_test printf-2.2.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 1.0 +} {abc: (1.00000e+00) :xyz} +do_test printf-2.2.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.2.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 1.0 +} {abc: 5 5 (1) :xyz} +do_test printf-2.2.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 1.0 +} {abc: 5 5 (1.00000) :xyz} +do_test printf-2.2.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 1.0 +} {abc: 5 5 (0000000001) :xyz} +do_test printf-2.2.3.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 1.0 +} {abc: 5 5 (1.00000) :xyz} +do_test printf-2.2.3.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 1.0 +} {abc: 5 5 (1.00000e+00) :xyz} +do_test printf-2.2.3.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 1.0 +} {abc: 5 5 ( 1) :xyz} +do_test printf-2.2.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 0.0 +} {abc: (0.00000) :xyz} +do_test printf-2.2.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 0.0 +} {abc: (0.00000e+00) :xyz} +do_test printf-2.2.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.2.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 0.0 +} {abc: 5 5 (0) :xyz} +do_test printf-2.2.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 0.0 +} {abc: 5 5 (0.00000) :xyz} +do_test printf-2.2.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 0.0 +} {abc: 5 5 (0000000000) :xyz} +do_test printf-2.2.4.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 0.0 +} {abc: 5 5 (0.00000) :xyz} +do_test printf-2.2.4.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 0.0 +} {abc: 5 5 (0.00000e+00) :xyz} +do_test printf-2.2.4.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 0.0 +} {abc: 5 5 ( 0) :xyz} +do_test printf-2.2.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 100.0 +} {abc: (100.00000) :xyz} +do_test printf-2.2.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 100.0 +} {abc: (1.00000e+02) :xyz} +do_test printf-2.2.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 100.0 +} {abc: ( 100) :xyz} +do_test printf-2.2.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 100.0 +} {abc: 5 5 (100) :xyz} +do_test printf-2.2.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 100.0 +} {abc: 5 5 (100.000) :xyz} +do_test printf-2.2.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 100.0 +} {abc: 5 5 (0000000100) :xyz} +do_test printf-2.2.5.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 100.0 +} {abc: 5 5 (100.00000) :xyz} +do_test printf-2.2.5.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 100.0 +} {abc: 5 5 (1.00000e+02) :xyz} +do_test printf-2.2.5.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 100.0 +} {abc: 5 5 ( 100) :xyz} +do_test printf-2.2.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 9.99999 +} {abc: (9.99999) :xyz} +do_test printf-2.2.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 9.99999 +} {abc: (9.99999e+00) :xyz} +do_test printf-2.2.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 9.99999 +} {abc: ( 10) :xyz} +do_test printf-2.2.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 9.99999 +} {abc: 5 5 (9.99999) :xyz} +do_test printf-2.2.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 9.99999 +} {abc: 5 5 (9.99999) :xyz} +do_test printf-2.2.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 9.99999 +} {abc: 5 5 (0009.99999) :xyz} +do_test printf-2.2.6.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 9.99999 +} {abc: 5 5 (9.99999) :xyz} +do_test printf-2.2.6.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 9.99999 +} {abc: 5 5 (9.99999e+00) :xyz} +do_test printf-2.2.6.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 9.99999 +} {abc: 5 5 ( 10) :xyz} +do_test printf-2.2.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 -0.00543 +} {abc: (-0.00543) :xyz} +do_test printf-2.2.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 -0.00543 +} {abc: (-5.43000e-03) :xyz} +do_test printf-2.2.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 -0.00543 +} {abc: (-0.00543) :xyz} +do_test printf-2.2.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-0.00543) :xyz} +do_test printf-2.2.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-0.00543000) :xyz} +do_test printf-2.2.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-000.00543) :xyz} +do_test printf-2.2.7.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-0.00543) :xyz} +do_test printf-2.2.7.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-5.43000e-03) :xyz} +do_test printf-2.2.7.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 -0.00543 +} {abc: 5 5 (-0.00543) :xyz} +do_test printf-2.2.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 -1.0 +} {abc: (-1.00000) :xyz} +do_test printf-2.2.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 -1.0 +} {abc: (-1.00000e+00) :xyz} +do_test printf-2.2.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 -1.0 +} {abc: ( -1) :xyz} +do_test printf-2.2.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 -1.0 +} {abc: 5 5 (-1) :xyz} +do_test printf-2.2.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 -1.0 +} {abc: 5 5 (-1.00000) :xyz} +do_test printf-2.2.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 -1.0 +} {abc: 5 5 (-000000001) :xyz} +do_test printf-2.2.8.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 -1.0 +} {abc: 5 5 (-1.00000) :xyz} +do_test printf-2.2.8.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 -1.0 +} {abc: 5 5 (-1.00000e+00) :xyz} +do_test printf-2.2.8.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 -1.0 +} {abc: 5 5 ( -1) :xyz} +do_test printf-2.2.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 -99.99999 +} {abc: (-99.99999) :xyz} +do_test printf-2.2.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 -99.99999 +} {abc: (-1.00000e+02) :xyz} +do_test printf-2.2.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 -99.99999 +} {abc: ( -100) :xyz} +do_test printf-2.2.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 -99.99999 +} {abc: 5 5 (-100) :xyz} +do_test printf-2.2.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 -99.99999 +} {abc: 5 5 (-100.000) :xyz} +do_test printf-2.2.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 -99.99999 +} {abc: 5 5 (-000000100) :xyz} +do_test printf-2.2.9.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 -99.99999 +} {abc: 5 5 (-99.99999) :xyz} +do_test printf-2.2.9.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 -99.99999 +} {abc: 5 5 (-1.00000e+02) :xyz} +do_test printf-2.2.9.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 -99.99999 +} {abc: 5 5 ( -100) :xyz} +do_test printf-2.2.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 3.14e+9 +} {abc: (3140000000.00000) :xyz} +do_test printf-2.2.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 3.14e+9 +} {abc: (3.14000e+09) :xyz} +do_test printf-2.2.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 3.14e+9 +} {abc: (3.14e+09) :xyz} +do_test printf-2.2.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (3.14e+09) :xyz} +do_test printf-2.2.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (3.14000e+09) :xyz} +do_test printf-2.2.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (003.14e+09) :xyz} +do_test printf-2.2.10.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (3140000000.00000) :xyz} +do_test printf-2.2.10.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (3.14000e+09) :xyz} +do_test printf-2.2.10.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 3.14e+9 +} {abc: 5 5 (3.14e+09) :xyz} +do_test printf-2.2.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 -4.72732e+88 +} {abc: (-4.72732e+88) :xyz} +do_test printf-2.2.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 -4.72732e+88 +} {abc: (-4.7273e+88) :xyz} +do_test printf-2.2.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 -4.72732e+88 +} {abc: 5 5 (-4.72732e+88) :xyz} +do_test printf-2.2.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 -4.72732e+88 +} {abc: 5 5 (-4.72732e+88) :xyz} +do_test printf-2.2.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 -4.72732e+88 +} {abc: 5 5 (-4.72732e+88) :xyz} +do_test printf-2.2.11.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 -4.72732e+88 +} {abc: 5 5 (-4.72732e+88) :xyz} +do_test printf-2.2.11.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 -4.72732e+88 +} {abc: 5 5 (-4.7273e+88) :xyz} +do_test printf-2.2.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 9.87991e+143 +} {abc: (9.87991e+143) :xyz} +do_test printf-2.2.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 9.87991e+143 +} {abc: (9.8799e+143) :xyz} +do_test printf-2.2.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 9.87991e+143 +} {abc: 5 5 (9.87991e+143) :xyz} +do_test printf-2.2.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 9.87991e+143 +} {abc: 5 5 (9.87991e+143) :xyz} +do_test printf-2.2.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 9.87991e+143 +} {abc: 5 5 (9.87991e+143) :xyz} +do_test printf-2.2.12.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 9.87991e+143 +} {abc: 5 5 (9.87991e+143) :xyz} +do_test printf-2.2.12.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 9.87991e+143 +} {abc: 5 5 (9.8799e+143) :xyz} +do_test printf-2.2.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 -6.287291e-9 +} {abc: (-0.00000) :xyz} +do_test printf-2.2.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 -6.287291e-9 +} {abc: (-6.28729e-09) :xyz} +do_test printf-2.2.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 -6.287291e-9 +} {abc: (-6.2873e-09) :xyz} +do_test printf-2.2.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-6.28729e-09) :xyz} +do_test printf-2.2.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-6.28729e-09) :xyz} +do_test printf-2.2.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-6.28729e-09) :xyz} +do_test printf-2.2.13.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-0.00000) :xyz} +do_test printf-2.2.13.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-6.28729e-09) :xyz} +do_test printf-2.2.13.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 -6.287291e-9 +} {abc: 5 5 (-6.2873e-09) :xyz} +do_test printf-2.2.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 5 5 3.38826392e-110 +} {abc: (0.00000) :xyz} +do_test printf-2.2.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 5 5 3.38826392e-110 +} {abc: (3.38826e-110) :xyz} +do_test printf-2.2.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 5 5 3.38826392e-110 +} {abc: (3.3883e-110) :xyz} +do_test printf-2.2.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (3.38826e-110) :xyz} +do_test printf-2.2.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (3.38826e-110) :xyz} +do_test printf-2.2.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (3.38826e-110) :xyz} +do_test printf-2.2.14.7 { + sqlite3_mprintf_double {abc: %d %d (%5.5f) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (0.00000) :xyz} +do_test printf-2.2.14.8 { + sqlite3_mprintf_double {abc: %d %d (%5.5e) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (3.38826e-110) :xyz} +do_test printf-2.2.14.9 { + sqlite3_mprintf_double {abc: %d %d (%5.5g) :xyz} 5 5 3.38826392e-110 +} {abc: 5 5 (3.3883e-110) :xyz} +do_test printf-2.3.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 0.001 +} {abc: (0.0010000000) :xyz} +do_test printf-2.3.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 0.001 +} {abc: (1.0000000000e-03) :xyz} +do_test printf-2.3.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 0.001 +} {abc: ( 0.001) :xyz} +do_test printf-2.3.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 0.001 +} {abc: 10 10 (0.001) :xyz} +do_test printf-2.3.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 0.001 +} {abc: 10 10 (0.00100000) :xyz} +do_test printf-2.3.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 0.001 +} {abc: 10 10 (000000.001) :xyz} +do_test printf-2.3.1.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 0.001 +} {abc: 10 10 (0.0010000000) :xyz} +do_test printf-2.3.1.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 0.001 +} {abc: 10 10 (1.0000000000e-03) :xyz} +do_test printf-2.3.1.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 0.001 +} {abc: 10 10 ( 0.001) :xyz} +do_test printf-2.3.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 1.0e-20 +} {abc: (0.0000000000) :xyz} +do_test printf-2.3.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 1.0e-20 +} {abc: (1.0000000000e-20) :xyz} +do_test printf-2.3.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 1.0e-20 +} {abc: ( 1e-20) :xyz} +do_test printf-2.3.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 1.0e-20 +} {abc: 10 10 (1e-20) :xyz} +do_test printf-2.3.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 1.0e-20 +} {abc: 10 10 (1.00000e-20) :xyz} +do_test printf-2.3.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 1.0e-20 +} {abc: 10 10 (000001e-20) :xyz} +do_test printf-2.3.2.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 1.0e-20 +} {abc: 10 10 (0.0000000000) :xyz} +do_test printf-2.3.2.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 1.0e-20 +} {abc: 10 10 (1.0000000000e-20) :xyz} +do_test printf-2.3.2.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 1.0e-20 +} {abc: 10 10 ( 1e-20) :xyz} +do_test printf-2.3.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 1.0 +} {abc: (1.0000000000) :xyz} +do_test printf-2.3.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 1.0 +} {abc: (1.0000000000e+00) :xyz} +do_test printf-2.3.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.3.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 1.0 +} {abc: 10 10 (1) :xyz} +do_test printf-2.3.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 1.0 +} {abc: 10 10 (1.00000) :xyz} +do_test printf-2.3.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 1.0 +} {abc: 10 10 (0000000001) :xyz} +do_test printf-2.3.3.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 1.0 +} {abc: 10 10 (1.0000000000) :xyz} +do_test printf-2.3.3.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 1.0 +} {abc: 10 10 (1.0000000000e+00) :xyz} +do_test printf-2.3.3.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 1.0 +} {abc: 10 10 ( 1) :xyz} +do_test printf-2.3.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 0.0 +} {abc: (0.0000000000) :xyz} +do_test printf-2.3.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 0.0 +} {abc: (0.0000000000e+00) :xyz} +do_test printf-2.3.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.3.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 0.0 +} {abc: 10 10 (0) :xyz} +do_test printf-2.3.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 0.0 +} {abc: 10 10 (0.00000) :xyz} +do_test printf-2.3.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 0.0 +} {abc: 10 10 (0000000000) :xyz} +do_test printf-2.3.4.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 0.0 +} {abc: 10 10 (0.0000000000) :xyz} +do_test printf-2.3.4.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 0.0 +} {abc: 10 10 (0.0000000000e+00) :xyz} +do_test printf-2.3.4.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 0.0 +} {abc: 10 10 ( 0) :xyz} +do_test printf-2.3.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 100.0 +} {abc: (100.0000000000) :xyz} +do_test printf-2.3.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 100.0 +} {abc: (1.0000000000e+02) :xyz} +do_test printf-2.3.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 100.0 +} {abc: ( 100) :xyz} +do_test printf-2.3.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 100.0 +} {abc: 10 10 (100) :xyz} +do_test printf-2.3.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 100.0 +} {abc: 10 10 (100.000) :xyz} +do_test printf-2.3.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 100.0 +} {abc: 10 10 (0000000100) :xyz} +do_test printf-2.3.5.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 100.0 +} {abc: 10 10 (100.0000000000) :xyz} +do_test printf-2.3.5.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 100.0 +} {abc: 10 10 (1.0000000000e+02) :xyz} +do_test printf-2.3.5.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 100.0 +} {abc: 10 10 ( 100) :xyz} +do_test printf-2.3.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 9.99999 +} {abc: (9.9999900000) :xyz} +do_test printf-2.3.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 9.99999 +} {abc: (9.9999900000e+00) :xyz} +do_test printf-2.3.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 9.99999 +} {abc: ( 9.99999) :xyz} +do_test printf-2.3.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 9.99999 +} {abc: 10 10 (9.99999) :xyz} +do_test printf-2.3.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 9.99999 +} {abc: 10 10 (9.99999) :xyz} +do_test printf-2.3.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 9.99999 +} {abc: 10 10 (0009.99999) :xyz} +do_test printf-2.3.6.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 9.99999 +} {abc: 10 10 (9.9999900000) :xyz} +do_test printf-2.3.6.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 9.99999 +} {abc: 10 10 (9.9999900000e+00) :xyz} +do_test printf-2.3.6.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 9.99999 +} {abc: 10 10 ( 9.99999) :xyz} +do_test printf-2.3.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 -0.00543 +} {abc: (-0.0054300000) :xyz} +do_test printf-2.3.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 -0.00543 +} {abc: (-5.4300000000e-03) :xyz} +do_test printf-2.3.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 -0.00543 +} {abc: ( -0.00543) :xyz} +do_test printf-2.3.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 -0.00543 +} {abc: 10 10 (-0.00543) :xyz} +do_test printf-2.3.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 -0.00543 +} {abc: 10 10 (-0.00543000) :xyz} +do_test printf-2.3.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 -0.00543 +} {abc: 10 10 (-000.00543) :xyz} +do_test printf-2.3.7.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 -0.00543 +} {abc: 10 10 (-0.0054300000) :xyz} +do_test printf-2.3.7.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 -0.00543 +} {abc: 10 10 (-5.4300000000e-03) :xyz} +do_test printf-2.3.7.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 -0.00543 +} {abc: 10 10 ( -0.00543) :xyz} +do_test printf-2.3.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 -1.0 +} {abc: (-1.0000000000) :xyz} +do_test printf-2.3.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 -1.0 +} {abc: (-1.0000000000e+00) :xyz} +do_test printf-2.3.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 -1.0 +} {abc: ( -1) :xyz} +do_test printf-2.3.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 -1.0 +} {abc: 10 10 (-1) :xyz} +do_test printf-2.3.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 -1.0 +} {abc: 10 10 (-1.00000) :xyz} +do_test printf-2.3.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 -1.0 +} {abc: 10 10 (-000000001) :xyz} +do_test printf-2.3.8.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 -1.0 +} {abc: 10 10 (-1.0000000000) :xyz} +do_test printf-2.3.8.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 -1.0 +} {abc: 10 10 (-1.0000000000e+00) :xyz} +do_test printf-2.3.8.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 -1.0 +} {abc: 10 10 ( -1) :xyz} +do_test printf-2.3.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 -99.99999 +} {abc: (-99.9999900000) :xyz} +do_test printf-2.3.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 -99.99999 +} {abc: (-9.9999990000e+01) :xyz} +do_test printf-2.3.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 -99.99999 +} {abc: ( -99.99999) :xyz} +do_test printf-2.3.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 -99.99999 +} {abc: 10 10 (-100) :xyz} +do_test printf-2.3.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 -99.99999 +} {abc: 10 10 (-100.000) :xyz} +do_test printf-2.3.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 -99.99999 +} {abc: 10 10 (-000000100) :xyz} +do_test printf-2.3.9.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 -99.99999 +} {abc: 10 10 (-99.9999900000) :xyz} +do_test printf-2.3.9.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 -99.99999 +} {abc: 10 10 (-9.9999990000e+01) :xyz} +do_test printf-2.3.9.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 -99.99999 +} {abc: 10 10 ( -99.99999) :xyz} +do_test printf-2.3.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 3.14e+9 +} {abc: (3140000000.0000000000) :xyz} +do_test printf-2.3.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 3.14e+9 +} {abc: (3.1400000000e+09) :xyz} +do_test printf-2.3.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 3.14e+9 +} {abc: (3140000000) :xyz} +do_test printf-2.3.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (3.14e+09) :xyz} +do_test printf-2.3.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (3.14000e+09) :xyz} +do_test printf-2.3.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (003.14e+09) :xyz} +do_test printf-2.3.10.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (3140000000.0000000000) :xyz} +do_test printf-2.3.10.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (3.1400000000e+09) :xyz} +do_test printf-2.3.10.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 3.14e+9 +} {abc: 10 10 (3140000000) :xyz} +do_test printf-2.3.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 -4.72732e+88 +} {abc: (-4.7273200000e+88) :xyz} +do_test printf-2.3.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 -4.72732e+88 +} {abc: (-4.72732e+88) :xyz} +do_test printf-2.3.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 -4.72732e+88 +} {abc: 10 10 (-4.72732e+88) :xyz} +do_test printf-2.3.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 -4.72732e+88 +} {abc: 10 10 (-4.72732e+88) :xyz} +do_test printf-2.3.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 -4.72732e+88 +} {abc: 10 10 (-4.72732e+88) :xyz} +do_test printf-2.3.11.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 -4.72732e+88 +} {abc: 10 10 (-4.7273200000e+88) :xyz} +do_test printf-2.3.11.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 -4.72732e+88 +} {abc: 10 10 (-4.72732e+88) :xyz} +do_test printf-2.3.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 9.87991e+143 +} {abc: (9.8799100000e+143) :xyz} +do_test printf-2.3.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 9.87991e+143 +} {abc: (9.87991e+143) :xyz} +do_test printf-2.3.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 9.87991e+143 +} {abc: 10 10 (9.87991e+143) :xyz} +do_test printf-2.3.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 9.87991e+143 +} {abc: 10 10 (9.87991e+143) :xyz} +do_test printf-2.3.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 9.87991e+143 +} {abc: 10 10 (9.87991e+143) :xyz} +do_test printf-2.3.12.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 9.87991e+143 +} {abc: 10 10 (9.8799100000e+143) :xyz} +do_test printf-2.3.12.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 9.87991e+143 +} {abc: 10 10 (9.87991e+143) :xyz} +do_test printf-2.3.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 -6.287291e-9 +} {abc: (-0.0000000063) :xyz} +do_test printf-2.3.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 -6.287291e-9 +} {abc: (-6.2872910000e-09) :xyz} +do_test printf-2.3.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 -6.287291e-9 +} {abc: (-6.287291e-09) :xyz} +do_test printf-2.3.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-6.28729e-09) :xyz} +do_test printf-2.3.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-6.28729e-09) :xyz} +do_test printf-2.3.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-6.28729e-09) :xyz} +do_test printf-2.3.13.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-0.0000000063) :xyz} +do_test printf-2.3.13.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-6.2872910000e-09) :xyz} +do_test printf-2.3.13.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 -6.287291e-9 +} {abc: 10 10 (-6.287291e-09) :xyz} +do_test printf-2.3.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 10 3.38826392e-110 +} {abc: (0.0000000000) :xyz} +do_test printf-2.3.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 10 3.38826392e-110 +} {abc: (3.3882639200e-110) :xyz} +do_test printf-2.3.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 10 3.38826392e-110 +} {abc: (3.38826392e-110) :xyz} +do_test printf-2.3.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (3.38826e-110) :xyz} +do_test printf-2.3.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (3.38826e-110) :xyz} +do_test printf-2.3.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (3.38826e-110) :xyz} +do_test printf-2.3.14.7 { + sqlite3_mprintf_double {abc: %d %d (%10.10f) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (0.0000000000) :xyz} +do_test printf-2.3.14.8 { + sqlite3_mprintf_double {abc: %d %d (%10.10e) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (3.3882639200e-110) :xyz} +do_test printf-2.3.14.9 { + sqlite3_mprintf_double {abc: %d %d (%10.10g) :xyz} 10 10 3.38826392e-110 +} {abc: 10 10 (3.38826392e-110) :xyz} +do_test printf-2.4.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 0.001 +} {abc: ( 0.00100) :xyz} +do_test printf-2.4.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 0.001 +} {abc: (1.00000e-03) :xyz} +do_test printf-2.4.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 0.001 +} {abc: ( 0.001) :xyz} +do_test printf-2.4.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 0.001 +} {abc: 10 5 (0.001) :xyz} +do_test printf-2.4.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 0.001 +} {abc: 10 5 (0.00100000) :xyz} +do_test printf-2.4.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 0.001 +} {abc: 10 5 (000000.001) :xyz} +do_test printf-2.4.1.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 0.001 +} {abc: 10 5 ( 0.00100) :xyz} +do_test printf-2.4.1.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 0.001 +} {abc: 10 5 (1.00000e-03) :xyz} +do_test printf-2.4.1.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 0.001 +} {abc: 10 5 ( 0.001) :xyz} +do_test printf-2.4.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 1.0e-20 +} {abc: ( 0.00000) :xyz} +do_test printf-2.4.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 1.0e-20 +} {abc: (1.00000e-20) :xyz} +do_test printf-2.4.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 1.0e-20 +} {abc: ( 1e-20) :xyz} +do_test printf-2.4.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 1.0e-20 +} {abc: 10 5 (1e-20) :xyz} +do_test printf-2.4.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 1.0e-20 +} {abc: 10 5 (1.00000e-20) :xyz} +do_test printf-2.4.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 1.0e-20 +} {abc: 10 5 (000001e-20) :xyz} +do_test printf-2.4.2.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 1.0e-20 +} {abc: 10 5 ( 0.00000) :xyz} +do_test printf-2.4.2.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 1.0e-20 +} {abc: 10 5 (1.00000e-20) :xyz} +do_test printf-2.4.2.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 1.0e-20 +} {abc: 10 5 ( 1e-20) :xyz} +do_test printf-2.4.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 1.0 +} {abc: ( 1.00000) :xyz} +do_test printf-2.4.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 1.0 +} {abc: (1.00000e+00) :xyz} +do_test printf-2.4.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.4.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 1.0 +} {abc: 10 5 (1) :xyz} +do_test printf-2.4.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 1.0 +} {abc: 10 5 (1.00000) :xyz} +do_test printf-2.4.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 1.0 +} {abc: 10 5 (0000000001) :xyz} +do_test printf-2.4.3.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 1.0 +} {abc: 10 5 ( 1.00000) :xyz} +do_test printf-2.4.3.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 1.0 +} {abc: 10 5 (1.00000e+00) :xyz} +do_test printf-2.4.3.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 1.0 +} {abc: 10 5 ( 1) :xyz} +do_test printf-2.4.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 0.0 +} {abc: ( 0.00000) :xyz} +do_test printf-2.4.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 0.0 +} {abc: (0.00000e+00) :xyz} +do_test printf-2.4.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.4.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 0.0 +} {abc: 10 5 (0) :xyz} +do_test printf-2.4.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 0.0 +} {abc: 10 5 (0.00000) :xyz} +do_test printf-2.4.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 0.0 +} {abc: 10 5 (0000000000) :xyz} +do_test printf-2.4.4.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 0.0 +} {abc: 10 5 ( 0.00000) :xyz} +do_test printf-2.4.4.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 0.0 +} {abc: 10 5 (0.00000e+00) :xyz} +do_test printf-2.4.4.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 0.0 +} {abc: 10 5 ( 0) :xyz} +do_test printf-2.4.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 100.0 +} {abc: ( 100.00000) :xyz} +do_test printf-2.4.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 100.0 +} {abc: (1.00000e+02) :xyz} +do_test printf-2.4.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 100.0 +} {abc: ( 100) :xyz} +do_test printf-2.4.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 100.0 +} {abc: 10 5 (100) :xyz} +do_test printf-2.4.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 100.0 +} {abc: 10 5 (100.000) :xyz} +do_test printf-2.4.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 100.0 +} {abc: 10 5 (0000000100) :xyz} +do_test printf-2.4.5.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 100.0 +} {abc: 10 5 ( 100.00000) :xyz} +do_test printf-2.4.5.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 100.0 +} {abc: 10 5 (1.00000e+02) :xyz} +do_test printf-2.4.5.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 100.0 +} {abc: 10 5 ( 100) :xyz} +do_test printf-2.4.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 9.99999 +} {abc: ( 9.99999) :xyz} +do_test printf-2.4.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 9.99999 +} {abc: (9.99999e+00) :xyz} +do_test printf-2.4.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 9.99999 +} {abc: ( 10) :xyz} +do_test printf-2.4.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 9.99999 +} {abc: 10 5 (9.99999) :xyz} +do_test printf-2.4.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 9.99999 +} {abc: 10 5 (9.99999) :xyz} +do_test printf-2.4.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 9.99999 +} {abc: 10 5 (0009.99999) :xyz} +do_test printf-2.4.6.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 9.99999 +} {abc: 10 5 ( 9.99999) :xyz} +do_test printf-2.4.6.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 9.99999 +} {abc: 10 5 (9.99999e+00) :xyz} +do_test printf-2.4.6.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 9.99999 +} {abc: 10 5 ( 10) :xyz} +do_test printf-2.4.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 -0.00543 +} {abc: ( -0.00543) :xyz} +do_test printf-2.4.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 -0.00543 +} {abc: (-5.43000e-03) :xyz} +do_test printf-2.4.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 -0.00543 +} {abc: ( -0.00543) :xyz} +do_test printf-2.4.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 -0.00543 +} {abc: 10 5 (-0.00543) :xyz} +do_test printf-2.4.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 -0.00543 +} {abc: 10 5 (-0.00543000) :xyz} +do_test printf-2.4.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 -0.00543 +} {abc: 10 5 (-000.00543) :xyz} +do_test printf-2.4.7.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 -0.00543 +} {abc: 10 5 ( -0.00543) :xyz} +do_test printf-2.4.7.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 -0.00543 +} {abc: 10 5 (-5.43000e-03) :xyz} +do_test printf-2.4.7.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 -0.00543 +} {abc: 10 5 ( -0.00543) :xyz} +do_test printf-2.4.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 -1.0 +} {abc: ( -1.00000) :xyz} +do_test printf-2.4.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 -1.0 +} {abc: (-1.00000e+00) :xyz} +do_test printf-2.4.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 -1.0 +} {abc: ( -1) :xyz} +do_test printf-2.4.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 -1.0 +} {abc: 10 5 (-1) :xyz} +do_test printf-2.4.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 -1.0 +} {abc: 10 5 (-1.00000) :xyz} +do_test printf-2.4.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 -1.0 +} {abc: 10 5 (-000000001) :xyz} +do_test printf-2.4.8.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 -1.0 +} {abc: 10 5 ( -1.00000) :xyz} +do_test printf-2.4.8.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 -1.0 +} {abc: 10 5 (-1.00000e+00) :xyz} +do_test printf-2.4.8.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 -1.0 +} {abc: 10 5 ( -1) :xyz} +do_test printf-2.4.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 -99.99999 +} {abc: ( -99.99999) :xyz} +do_test printf-2.4.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 -99.99999 +} {abc: (-1.00000e+02) :xyz} +do_test printf-2.4.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 -99.99999 +} {abc: ( -100) :xyz} +do_test printf-2.4.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 -99.99999 +} {abc: 10 5 (-100) :xyz} +do_test printf-2.4.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 -99.99999 +} {abc: 10 5 (-100.000) :xyz} +do_test printf-2.4.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 -99.99999 +} {abc: 10 5 (-000000100) :xyz} +do_test printf-2.4.9.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 -99.99999 +} {abc: 10 5 ( -99.99999) :xyz} +do_test printf-2.4.9.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 -99.99999 +} {abc: 10 5 (-1.00000e+02) :xyz} +do_test printf-2.4.9.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 -99.99999 +} {abc: 10 5 ( -100) :xyz} +do_test printf-2.4.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 3.14e+9 +} {abc: (3140000000.00000) :xyz} +do_test printf-2.4.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 3.14e+9 +} {abc: (3.14000e+09) :xyz} +do_test printf-2.4.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 3.14e+9 +} {abc: ( 3.14e+09) :xyz} +do_test printf-2.4.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 3.14e+9 +} {abc: 10 5 (3.14e+09) :xyz} +do_test printf-2.4.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 3.14e+9 +} {abc: 10 5 (3.14000e+09) :xyz} +do_test printf-2.4.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 3.14e+9 +} {abc: 10 5 (003.14e+09) :xyz} +do_test printf-2.4.10.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 3.14e+9 +} {abc: 10 5 (3140000000.00000) :xyz} +do_test printf-2.4.10.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 3.14e+9 +} {abc: 10 5 (3.14000e+09) :xyz} +do_test printf-2.4.10.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 3.14e+9 +} {abc: 10 5 ( 3.14e+09) :xyz} +do_test printf-2.4.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 -4.72732e+88 +} {abc: (-4.72732e+88) :xyz} +do_test printf-2.4.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 -4.72732e+88 +} {abc: (-4.7273e+88) :xyz} +do_test printf-2.4.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 -4.72732e+88 +} {abc: 10 5 (-4.72732e+88) :xyz} +do_test printf-2.4.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 -4.72732e+88 +} {abc: 10 5 (-4.72732e+88) :xyz} +do_test printf-2.4.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 -4.72732e+88 +} {abc: 10 5 (-4.72732e+88) :xyz} +do_test printf-2.4.11.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 -4.72732e+88 +} {abc: 10 5 (-4.72732e+88) :xyz} +do_test printf-2.4.11.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 -4.72732e+88 +} {abc: 10 5 (-4.7273e+88) :xyz} +do_test printf-2.4.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 9.87991e+143 +} {abc: (9.87991e+143) :xyz} +do_test printf-2.4.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 9.87991e+143 +} {abc: (9.8799e+143) :xyz} +do_test printf-2.4.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 9.87991e+143 +} {abc: 10 5 (9.87991e+143) :xyz} +do_test printf-2.4.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 9.87991e+143 +} {abc: 10 5 (9.87991e+143) :xyz} +do_test printf-2.4.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 9.87991e+143 +} {abc: 10 5 (9.87991e+143) :xyz} +do_test printf-2.4.12.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 9.87991e+143 +} {abc: 10 5 (9.87991e+143) :xyz} +do_test printf-2.4.12.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 9.87991e+143 +} {abc: 10 5 (9.8799e+143) :xyz} +do_test printf-2.4.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 -6.287291e-9 +} {abc: ( -0.00000) :xyz} +do_test printf-2.4.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 -6.287291e-9 +} {abc: (-6.28729e-09) :xyz} +do_test printf-2.4.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 -6.287291e-9 +} {abc: (-6.2873e-09) :xyz} +do_test printf-2.4.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 (-6.28729e-09) :xyz} +do_test printf-2.4.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 (-6.28729e-09) :xyz} +do_test printf-2.4.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 (-6.28729e-09) :xyz} +do_test printf-2.4.13.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 ( -0.00000) :xyz} +do_test printf-2.4.13.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 (-6.28729e-09) :xyz} +do_test printf-2.4.13.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 -6.287291e-9 +} {abc: 10 5 (-6.2873e-09) :xyz} +do_test printf-2.4.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 10 5 3.38826392e-110 +} {abc: ( 0.00000) :xyz} +do_test printf-2.4.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 10 5 3.38826392e-110 +} {abc: (3.38826e-110) :xyz} +do_test printf-2.4.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 10 5 3.38826392e-110 +} {abc: (3.3883e-110) :xyz} +do_test printf-2.4.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 (3.38826e-110) :xyz} +do_test printf-2.4.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 (3.38826e-110) :xyz} +do_test printf-2.4.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 (3.38826e-110) :xyz} +do_test printf-2.4.14.7 { + sqlite3_mprintf_double {abc: %d %d (%10.5f) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 ( 0.00000) :xyz} +do_test printf-2.4.14.8 { + sqlite3_mprintf_double {abc: %d %d (%10.5e) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 (3.38826e-110) :xyz} +do_test printf-2.4.14.9 { + sqlite3_mprintf_double {abc: %d %d (%10.5g) :xyz} 10 5 3.38826392e-110 +} {abc: 10 5 (3.3883e-110) :xyz} +do_test printf-2.5.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 0.001 +} {abc: (0.00) :xyz} +do_test printf-2.5.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 0.001 +} {abc: (1.00e-03) :xyz} +do_test printf-2.5.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.5.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 0.001 +} {abc: 2 2 (0.001) :xyz} +do_test printf-2.5.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 0.001 +} {abc: 2 2 (0.00100000) :xyz} +do_test printf-2.5.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 0.001 +} {abc: 2 2 (000000.001) :xyz} +do_test printf-2.5.1.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 0.001 +} {abc: 2 2 (0.00) :xyz} +do_test printf-2.5.1.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 0.001 +} {abc: 2 2 (1.00e-03) :xyz} +do_test printf-2.5.1.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 0.001 +} {abc: 2 2 (0.001) :xyz} +do_test printf-2.5.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 1.0e-20 +} {abc: (0.00) :xyz} +do_test printf-2.5.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 1.0e-20 +} {abc: (1.00e-20) :xyz} +do_test printf-2.5.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.5.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (1e-20) :xyz} +do_test printf-2.5.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (1.00000e-20) :xyz} +do_test printf-2.5.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (000001e-20) :xyz} +do_test printf-2.5.2.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (0.00) :xyz} +do_test printf-2.5.2.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (1.00e-20) :xyz} +do_test printf-2.5.2.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 1.0e-20 +} {abc: 2 2 (1e-20) :xyz} +do_test printf-2.5.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 1.0 +} {abc: (1.00) :xyz} +do_test printf-2.5.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 1.0 +} {abc: (1.00e+00) :xyz} +do_test printf-2.5.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.5.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 1.0 +} {abc: 2 2 (1) :xyz} +do_test printf-2.5.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 1.0 +} {abc: 2 2 (1.00000) :xyz} +do_test printf-2.5.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 1.0 +} {abc: 2 2 (0000000001) :xyz} +do_test printf-2.5.3.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 1.0 +} {abc: 2 2 (1.00) :xyz} +do_test printf-2.5.3.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 1.0 +} {abc: 2 2 (1.00e+00) :xyz} +do_test printf-2.5.3.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 1.0 +} {abc: 2 2 ( 1) :xyz} +do_test printf-2.5.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 0.0 +} {abc: (0.00) :xyz} +do_test printf-2.5.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 0.0 +} {abc: (0.00e+00) :xyz} +do_test printf-2.5.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.5.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 0.0 +} {abc: 2 2 (0) :xyz} +do_test printf-2.5.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 0.0 +} {abc: 2 2 (0.00000) :xyz} +do_test printf-2.5.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 0.0 +} {abc: 2 2 (0000000000) :xyz} +do_test printf-2.5.4.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 0.0 +} {abc: 2 2 (0.00) :xyz} +do_test printf-2.5.4.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 0.0 +} {abc: 2 2 (0.00e+00) :xyz} +do_test printf-2.5.4.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 0.0 +} {abc: 2 2 ( 0) :xyz} +do_test printf-2.5.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 100.0 +} {abc: (100.00) :xyz} +do_test printf-2.5.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 100.0 +} {abc: (1.00e+02) :xyz} +do_test printf-2.5.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 100.0 +} {abc: (1e+02) :xyz} +do_test printf-2.5.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 100.0 +} {abc: 2 2 (100) :xyz} +do_test printf-2.5.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 100.0 +} {abc: 2 2 (100.000) :xyz} +do_test printf-2.5.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 100.0 +} {abc: 2 2 (0000000100) :xyz} +do_test printf-2.5.5.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 100.0 +} {abc: 2 2 (100.00) :xyz} +do_test printf-2.5.5.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 100.0 +} {abc: 2 2 (1.00e+02) :xyz} +do_test printf-2.5.5.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 100.0 +} {abc: 2 2 (1e+02) :xyz} +do_test printf-2.5.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 9.99999 +} {abc: (10.00) :xyz} +do_test printf-2.5.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 9.99999 +} {abc: (1.00e+01) :xyz} +do_test printf-2.5.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 9.99999 +} {abc: (10) :xyz} +do_test printf-2.5.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 9.99999 +} {abc: 2 2 (9.99999) :xyz} +do_test printf-2.5.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 9.99999 +} {abc: 2 2 (9.99999) :xyz} +do_test printf-2.5.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 9.99999 +} {abc: 2 2 (0009.99999) :xyz} +do_test printf-2.5.6.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 9.99999 +} {abc: 2 2 (10.00) :xyz} +do_test printf-2.5.6.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 9.99999 +} {abc: 2 2 (1.00e+01) :xyz} +do_test printf-2.5.6.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 9.99999 +} {abc: 2 2 (10) :xyz} +do_test printf-2.5.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 -0.00543 +} {abc: (-0.01) :xyz} +do_test printf-2.5.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 -0.00543 +} {abc: (-5.43e-03) :xyz} +do_test printf-2.5.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 -0.00543 +} {abc: (-0.0054) :xyz} +do_test printf-2.5.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-0.00543) :xyz} +do_test printf-2.5.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-0.00543000) :xyz} +do_test printf-2.5.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-000.00543) :xyz} +do_test printf-2.5.7.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-0.01) :xyz} +do_test printf-2.5.7.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-5.43e-03) :xyz} +do_test printf-2.5.7.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 -0.00543 +} {abc: 2 2 (-0.0054) :xyz} +do_test printf-2.5.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 -1.0 +} {abc: (-1.00) :xyz} +do_test printf-2.5.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 -1.0 +} {abc: (-1.00e+00) :xyz} +do_test printf-2.5.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 -1.0 +} {abc: (-1) :xyz} +do_test printf-2.5.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 -1.0 +} {abc: 2 2 (-1) :xyz} +do_test printf-2.5.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 -1.0 +} {abc: 2 2 (-1.00000) :xyz} +do_test printf-2.5.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 -1.0 +} {abc: 2 2 (-000000001) :xyz} +do_test printf-2.5.8.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 -1.0 +} {abc: 2 2 (-1.00) :xyz} +do_test printf-2.5.8.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 -1.0 +} {abc: 2 2 (-1.00e+00) :xyz} +do_test printf-2.5.8.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 -1.0 +} {abc: 2 2 (-1) :xyz} +do_test printf-2.5.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 -99.99999 +} {abc: (-100.00) :xyz} +do_test printf-2.5.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 -99.99999 +} {abc: (-1.00e+02) :xyz} +do_test printf-2.5.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 -99.99999 +} {abc: (-1e+02) :xyz} +do_test printf-2.5.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-100) :xyz} +do_test printf-2.5.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-100.000) :xyz} +do_test printf-2.5.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-000000100) :xyz} +do_test printf-2.5.9.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-100.00) :xyz} +do_test printf-2.5.9.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-1.00e+02) :xyz} +do_test printf-2.5.9.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 -99.99999 +} {abc: 2 2 (-1e+02) :xyz} +do_test printf-2.5.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 3.14e+9 +} {abc: (3140000000.00) :xyz} +do_test printf-2.5.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 3.14e+9 +} {abc: (3.14e+09) :xyz} +do_test printf-2.5.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 3.14e+9 +} {abc: (3.1e+09) :xyz} +do_test printf-2.5.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (3.14e+09) :xyz} +do_test printf-2.5.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (3.14000e+09) :xyz} +do_test printf-2.5.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (003.14e+09) :xyz} +do_test printf-2.5.10.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (3140000000.00) :xyz} +do_test printf-2.5.10.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (3.14e+09) :xyz} +do_test printf-2.5.10.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 3.14e+9 +} {abc: 2 2 (3.1e+09) :xyz} +do_test printf-2.5.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 -4.72732e+88 +} {abc: (-4.73e+88) :xyz} +do_test printf-2.5.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 -4.72732e+88 +} {abc: (-4.7e+88) :xyz} +do_test printf-2.5.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 -4.72732e+88 +} {abc: 2 2 (-4.72732e+88) :xyz} +do_test printf-2.5.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 -4.72732e+88 +} {abc: 2 2 (-4.72732e+88) :xyz} +do_test printf-2.5.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 -4.72732e+88 +} {abc: 2 2 (-4.72732e+88) :xyz} +do_test printf-2.5.11.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 -4.72732e+88 +} {abc: 2 2 (-4.73e+88) :xyz} +do_test printf-2.5.11.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 -4.72732e+88 +} {abc: 2 2 (-4.7e+88) :xyz} +do_test printf-2.5.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 9.87991e+143 +} {abc: (9.88e+143) :xyz} +do_test printf-2.5.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 9.87991e+143 +} {abc: (9.9e+143) :xyz} +do_test printf-2.5.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 9.87991e+143 +} {abc: 2 2 (9.87991e+143) :xyz} +do_test printf-2.5.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 9.87991e+143 +} {abc: 2 2 (9.87991e+143) :xyz} +do_test printf-2.5.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 9.87991e+143 +} {abc: 2 2 (9.87991e+143) :xyz} +do_test printf-2.5.12.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 9.87991e+143 +} {abc: 2 2 (9.88e+143) :xyz} +do_test printf-2.5.12.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 9.87991e+143 +} {abc: 2 2 (9.9e+143) :xyz} +do_test printf-2.5.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 -6.287291e-9 +} {abc: (-0.00) :xyz} +do_test printf-2.5.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 -6.287291e-9 +} {abc: (-6.29e-09) :xyz} +do_test printf-2.5.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 -6.287291e-9 +} {abc: (-6.3e-09) :xyz} +do_test printf-2.5.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-6.28729e-09) :xyz} +do_test printf-2.5.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-6.28729e-09) :xyz} +do_test printf-2.5.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-6.28729e-09) :xyz} +do_test printf-2.5.13.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-0.00) :xyz} +do_test printf-2.5.13.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-6.29e-09) :xyz} +do_test printf-2.5.13.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 -6.287291e-9 +} {abc: 2 2 (-6.3e-09) :xyz} +do_test printf-2.5.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 2 3.38826392e-110 +} {abc: (0.00) :xyz} +do_test printf-2.5.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 2 3.38826392e-110 +} {abc: (3.39e-110) :xyz} +do_test printf-2.5.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 2 3.38826392e-110 +} {abc: (3.4e-110) :xyz} +do_test printf-2.5.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (3.38826e-110) :xyz} +do_test printf-2.5.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (3.38826e-110) :xyz} +do_test printf-2.5.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (3.38826e-110) :xyz} +do_test printf-2.5.14.7 { + sqlite3_mprintf_double {abc: %d %d (%2.2f) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (0.00) :xyz} +do_test printf-2.5.14.8 { + sqlite3_mprintf_double {abc: %d %d (%2.2e) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (3.39e-110) :xyz} +do_test printf-2.5.14.9 { + sqlite3_mprintf_double {abc: %d %d (%2.2g) :xyz} 2 2 3.38826392e-110 +} {abc: 2 2 (3.4e-110) :xyz} +do_test printf-2.6.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.6.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 0.001 +} {abc: (1.000e-03) :xyz} +do_test printf-2.6.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.6.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 0.001 +} {abc: 2 3 (0.001) :xyz} +do_test printf-2.6.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 0.001 +} {abc: 2 3 (0.00100000) :xyz} +do_test printf-2.6.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 0.001 +} {abc: 2 3 (000000.001) :xyz} +do_test printf-2.6.1.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 0.001 +} {abc: 2 3 (0.001) :xyz} +do_test printf-2.6.1.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 0.001 +} {abc: 2 3 (1.000e-03) :xyz} +do_test printf-2.6.1.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 0.001 +} {abc: 2 3 (0.001) :xyz} +do_test printf-2.6.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 1.0e-20 +} {abc: (0.000) :xyz} +do_test printf-2.6.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 1.0e-20 +} {abc: (1.000e-20) :xyz} +do_test printf-2.6.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.6.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (1e-20) :xyz} +do_test printf-2.6.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (1.00000e-20) :xyz} +do_test printf-2.6.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (000001e-20) :xyz} +do_test printf-2.6.2.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (0.000) :xyz} +do_test printf-2.6.2.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (1.000e-20) :xyz} +do_test printf-2.6.2.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 1.0e-20 +} {abc: 2 3 (1e-20) :xyz} +do_test printf-2.6.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 1.0 +} {abc: (1.000) :xyz} +do_test printf-2.6.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 1.0 +} {abc: (1.000e+00) :xyz} +do_test printf-2.6.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.6.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 1.0 +} {abc: 2 3 (1) :xyz} +do_test printf-2.6.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 1.0 +} {abc: 2 3 (1.00000) :xyz} +do_test printf-2.6.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 1.0 +} {abc: 2 3 (0000000001) :xyz} +do_test printf-2.6.3.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 1.0 +} {abc: 2 3 (1.000) :xyz} +do_test printf-2.6.3.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 1.0 +} {abc: 2 3 (1.000e+00) :xyz} +do_test printf-2.6.3.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 1.0 +} {abc: 2 3 ( 1) :xyz} +do_test printf-2.6.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 0.0 +} {abc: (0.000) :xyz} +do_test printf-2.6.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 0.0 +} {abc: (0.000e+00) :xyz} +do_test printf-2.6.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.6.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 0.0 +} {abc: 2 3 (0) :xyz} +do_test printf-2.6.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 0.0 +} {abc: 2 3 (0.00000) :xyz} +do_test printf-2.6.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 0.0 +} {abc: 2 3 (0000000000) :xyz} +do_test printf-2.6.4.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 0.0 +} {abc: 2 3 (0.000) :xyz} +do_test printf-2.6.4.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 0.0 +} {abc: 2 3 (0.000e+00) :xyz} +do_test printf-2.6.4.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 0.0 +} {abc: 2 3 ( 0) :xyz} +do_test printf-2.6.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 100.0 +} {abc: (100.000) :xyz} +do_test printf-2.6.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 100.0 +} {abc: (1.000e+02) :xyz} +do_test printf-2.6.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 100.0 +} {abc: (100) :xyz} +do_test printf-2.6.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 100.0 +} {abc: 2 3 (100) :xyz} +do_test printf-2.6.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 100.0 +} {abc: 2 3 (100.000) :xyz} +do_test printf-2.6.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 100.0 +} {abc: 2 3 (0000000100) :xyz} +do_test printf-2.6.5.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 100.0 +} {abc: 2 3 (100.000) :xyz} +do_test printf-2.6.5.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 100.0 +} {abc: 2 3 (1.000e+02) :xyz} +do_test printf-2.6.5.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 100.0 +} {abc: 2 3 (100) :xyz} +do_test printf-2.6.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 9.99999 +} {abc: (10.000) :xyz} +do_test printf-2.6.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 9.99999 +} {abc: (1.000e+01) :xyz} +do_test printf-2.6.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 9.99999 +} {abc: (10) :xyz} +do_test printf-2.6.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 9.99999 +} {abc: 2 3 (9.99999) :xyz} +do_test printf-2.6.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 9.99999 +} {abc: 2 3 (9.99999) :xyz} +do_test printf-2.6.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 9.99999 +} {abc: 2 3 (0009.99999) :xyz} +do_test printf-2.6.6.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 9.99999 +} {abc: 2 3 (10.000) :xyz} +do_test printf-2.6.6.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 9.99999 +} {abc: 2 3 (1.000e+01) :xyz} +do_test printf-2.6.6.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 9.99999 +} {abc: 2 3 (10) :xyz} +do_test printf-2.6.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 -0.00543 +} {abc: (-0.005) :xyz} +do_test printf-2.6.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 -0.00543 +} {abc: (-5.430e-03) :xyz} +do_test printf-2.6.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 -0.00543 +} {abc: (-0.00543) :xyz} +do_test printf-2.6.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-0.00543) :xyz} +do_test printf-2.6.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-0.00543000) :xyz} +do_test printf-2.6.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-000.00543) :xyz} +do_test printf-2.6.7.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-0.005) :xyz} +do_test printf-2.6.7.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-5.430e-03) :xyz} +do_test printf-2.6.7.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 -0.00543 +} {abc: 2 3 (-0.00543) :xyz} +do_test printf-2.6.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 -1.0 +} {abc: (-1.000) :xyz} +do_test printf-2.6.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 -1.0 +} {abc: (-1.000e+00) :xyz} +do_test printf-2.6.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 -1.0 +} {abc: (-1) :xyz} +do_test printf-2.6.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 -1.0 +} {abc: 2 3 (-1) :xyz} +do_test printf-2.6.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 -1.0 +} {abc: 2 3 (-1.00000) :xyz} +do_test printf-2.6.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 -1.0 +} {abc: 2 3 (-000000001) :xyz} +do_test printf-2.6.8.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 -1.0 +} {abc: 2 3 (-1.000) :xyz} +do_test printf-2.6.8.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 -1.0 +} {abc: 2 3 (-1.000e+00) :xyz} +do_test printf-2.6.8.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 -1.0 +} {abc: 2 3 (-1) :xyz} +do_test printf-2.6.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 -99.99999 +} {abc: (-100.000) :xyz} +do_test printf-2.6.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 -99.99999 +} {abc: (-1.000e+02) :xyz} +do_test printf-2.6.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 -99.99999 +} {abc: (-100) :xyz} +do_test printf-2.6.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-100) :xyz} +do_test printf-2.6.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-100.000) :xyz} +do_test printf-2.6.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-000000100) :xyz} +do_test printf-2.6.9.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-100.000) :xyz} +do_test printf-2.6.9.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-1.000e+02) :xyz} +do_test printf-2.6.9.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 -99.99999 +} {abc: 2 3 (-100) :xyz} +do_test printf-2.6.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 3.14e+9 +} {abc: (3140000000.000) :xyz} +do_test printf-2.6.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 3.14e+9 +} {abc: (3.140e+09) :xyz} +do_test printf-2.6.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 3.14e+9 +} {abc: (3.14e+09) :xyz} +do_test printf-2.6.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (3.14e+09) :xyz} +do_test printf-2.6.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (3.14000e+09) :xyz} +do_test printf-2.6.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (003.14e+09) :xyz} +do_test printf-2.6.10.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (3140000000.000) :xyz} +do_test printf-2.6.10.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (3.140e+09) :xyz} +do_test printf-2.6.10.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 3.14e+9 +} {abc: 2 3 (3.14e+09) :xyz} +do_test printf-2.6.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 -4.72732e+88 +} {abc: (-4.727e+88) :xyz} +do_test printf-2.6.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 -4.72732e+88 +} {abc: (-4.73e+88) :xyz} +do_test printf-2.6.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 -4.72732e+88 +} {abc: 2 3 (-4.72732e+88) :xyz} +do_test printf-2.6.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 -4.72732e+88 +} {abc: 2 3 (-4.72732e+88) :xyz} +do_test printf-2.6.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 -4.72732e+88 +} {abc: 2 3 (-4.72732e+88) :xyz} +do_test printf-2.6.11.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 -4.72732e+88 +} {abc: 2 3 (-4.727e+88) :xyz} +do_test printf-2.6.11.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 -4.72732e+88 +} {abc: 2 3 (-4.73e+88) :xyz} +do_test printf-2.6.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 9.87991e+143 +} {abc: (9.880e+143) :xyz} +do_test printf-2.6.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 9.87991e+143 +} {abc: (9.88e+143) :xyz} +do_test printf-2.6.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 9.87991e+143 +} {abc: 2 3 (9.87991e+143) :xyz} +do_test printf-2.6.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 9.87991e+143 +} {abc: 2 3 (9.87991e+143) :xyz} +do_test printf-2.6.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 9.87991e+143 +} {abc: 2 3 (9.87991e+143) :xyz} +do_test printf-2.6.12.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 9.87991e+143 +} {abc: 2 3 (9.880e+143) :xyz} +do_test printf-2.6.12.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 9.87991e+143 +} {abc: 2 3 (9.88e+143) :xyz} +do_test printf-2.6.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 -6.287291e-9 +} {abc: (-0.000) :xyz} +do_test printf-2.6.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 -6.287291e-9 +} {abc: (-6.287e-09) :xyz} +do_test printf-2.6.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 -6.287291e-9 +} {abc: (-6.29e-09) :xyz} +do_test printf-2.6.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-6.28729e-09) :xyz} +do_test printf-2.6.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-6.28729e-09) :xyz} +do_test printf-2.6.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-6.28729e-09) :xyz} +do_test printf-2.6.13.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-0.000) :xyz} +do_test printf-2.6.13.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-6.287e-09) :xyz} +do_test printf-2.6.13.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 -6.287291e-9 +} {abc: 2 3 (-6.29e-09) :xyz} +do_test printf-2.6.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 2 3 3.38826392e-110 +} {abc: (0.000) :xyz} +do_test printf-2.6.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 2 3 3.38826392e-110 +} {abc: (3.388e-110) :xyz} +do_test printf-2.6.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 2 3 3.38826392e-110 +} {abc: (3.39e-110) :xyz} +do_test printf-2.6.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (3.38826e-110) :xyz} +do_test printf-2.6.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (3.38826e-110) :xyz} +do_test printf-2.6.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (3.38826e-110) :xyz} +do_test printf-2.6.14.7 { + sqlite3_mprintf_double {abc: %d %d (%2.3f) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (0.000) :xyz} +do_test printf-2.6.14.8 { + sqlite3_mprintf_double {abc: %d %d (%2.3e) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (3.388e-110) :xyz} +do_test printf-2.6.14.9 { + sqlite3_mprintf_double {abc: %d %d (%2.3g) :xyz} 2 3 3.38826392e-110 +} {abc: 2 3 (3.39e-110) :xyz} +do_test printf-2.7.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.7.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 0.001 +} {abc: (1.000e-03) :xyz} +do_test printf-2.7.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.7.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 0.001 +} {abc: 3 3 (0.001) :xyz} +do_test printf-2.7.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 0.001 +} {abc: 3 3 (0.00100000) :xyz} +do_test printf-2.7.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 0.001 +} {abc: 3 3 (000000.001) :xyz} +do_test printf-2.7.1.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 0.001 +} {abc: 3 3 (0.001) :xyz} +do_test printf-2.7.1.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 0.001 +} {abc: 3 3 (1.000e-03) :xyz} +do_test printf-2.7.1.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 0.001 +} {abc: 3 3 (0.001) :xyz} +do_test printf-2.7.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 1.0e-20 +} {abc: (0.000) :xyz} +do_test printf-2.7.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 1.0e-20 +} {abc: (1.000e-20) :xyz} +do_test printf-2.7.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.7.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (1e-20) :xyz} +do_test printf-2.7.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (1.00000e-20) :xyz} +do_test printf-2.7.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (000001e-20) :xyz} +do_test printf-2.7.2.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (0.000) :xyz} +do_test printf-2.7.2.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (1.000e-20) :xyz} +do_test printf-2.7.2.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 1.0e-20 +} {abc: 3 3 (1e-20) :xyz} +do_test printf-2.7.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 1.0 +} {abc: (1.000) :xyz} +do_test printf-2.7.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 1.0 +} {abc: (1.000e+00) :xyz} +do_test printf-2.7.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.7.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 1.0 +} {abc: 3 3 (1) :xyz} +do_test printf-2.7.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 1.0 +} {abc: 3 3 (1.00000) :xyz} +do_test printf-2.7.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 1.0 +} {abc: 3 3 (0000000001) :xyz} +do_test printf-2.7.3.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 1.0 +} {abc: 3 3 (1.000) :xyz} +do_test printf-2.7.3.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 1.0 +} {abc: 3 3 (1.000e+00) :xyz} +do_test printf-2.7.3.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 1.0 +} {abc: 3 3 ( 1) :xyz} +do_test printf-2.7.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 0.0 +} {abc: (0.000) :xyz} +do_test printf-2.7.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 0.0 +} {abc: (0.000e+00) :xyz} +do_test printf-2.7.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.7.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 0.0 +} {abc: 3 3 (0) :xyz} +do_test printf-2.7.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 0.0 +} {abc: 3 3 (0.00000) :xyz} +do_test printf-2.7.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 0.0 +} {abc: 3 3 (0000000000) :xyz} +do_test printf-2.7.4.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 0.0 +} {abc: 3 3 (0.000) :xyz} +do_test printf-2.7.4.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 0.0 +} {abc: 3 3 (0.000e+00) :xyz} +do_test printf-2.7.4.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 0.0 +} {abc: 3 3 ( 0) :xyz} +do_test printf-2.7.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 100.0 +} {abc: (100.000) :xyz} +do_test printf-2.7.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 100.0 +} {abc: (1.000e+02) :xyz} +do_test printf-2.7.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 100.0 +} {abc: (100) :xyz} +do_test printf-2.7.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 100.0 +} {abc: 3 3 (100) :xyz} +do_test printf-2.7.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 100.0 +} {abc: 3 3 (100.000) :xyz} +do_test printf-2.7.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 100.0 +} {abc: 3 3 (0000000100) :xyz} +do_test printf-2.7.5.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 100.0 +} {abc: 3 3 (100.000) :xyz} +do_test printf-2.7.5.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 100.0 +} {abc: 3 3 (1.000e+02) :xyz} +do_test printf-2.7.5.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 100.0 +} {abc: 3 3 (100) :xyz} +do_test printf-2.7.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 9.99999 +} {abc: (10.000) :xyz} +do_test printf-2.7.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 9.99999 +} {abc: (1.000e+01) :xyz} +do_test printf-2.7.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 9.99999 +} {abc: ( 10) :xyz} +do_test printf-2.7.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 9.99999 +} {abc: 3 3 (9.99999) :xyz} +do_test printf-2.7.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 9.99999 +} {abc: 3 3 (9.99999) :xyz} +do_test printf-2.7.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 9.99999 +} {abc: 3 3 (0009.99999) :xyz} +do_test printf-2.7.6.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 9.99999 +} {abc: 3 3 (10.000) :xyz} +do_test printf-2.7.6.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 9.99999 +} {abc: 3 3 (1.000e+01) :xyz} +do_test printf-2.7.6.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 9.99999 +} {abc: 3 3 ( 10) :xyz} +do_test printf-2.7.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 -0.00543 +} {abc: (-0.005) :xyz} +do_test printf-2.7.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 -0.00543 +} {abc: (-5.430e-03) :xyz} +do_test printf-2.7.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 -0.00543 +} {abc: (-0.00543) :xyz} +do_test printf-2.7.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-0.00543) :xyz} +do_test printf-2.7.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-0.00543000) :xyz} +do_test printf-2.7.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-000.00543) :xyz} +do_test printf-2.7.7.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-0.005) :xyz} +do_test printf-2.7.7.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-5.430e-03) :xyz} +do_test printf-2.7.7.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 -0.00543 +} {abc: 3 3 (-0.00543) :xyz} +do_test printf-2.7.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 -1.0 +} {abc: (-1.000) :xyz} +do_test printf-2.7.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 -1.0 +} {abc: (-1.000e+00) :xyz} +do_test printf-2.7.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 -1.0 +} {abc: ( -1) :xyz} +do_test printf-2.7.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 -1.0 +} {abc: 3 3 (-1) :xyz} +do_test printf-2.7.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 -1.0 +} {abc: 3 3 (-1.00000) :xyz} +do_test printf-2.7.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 -1.0 +} {abc: 3 3 (-000000001) :xyz} +do_test printf-2.7.8.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 -1.0 +} {abc: 3 3 (-1.000) :xyz} +do_test printf-2.7.8.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 -1.0 +} {abc: 3 3 (-1.000e+00) :xyz} +do_test printf-2.7.8.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 -1.0 +} {abc: 3 3 ( -1) :xyz} +do_test printf-2.7.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 -99.99999 +} {abc: (-100.000) :xyz} +do_test printf-2.7.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 -99.99999 +} {abc: (-1.000e+02) :xyz} +do_test printf-2.7.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 -99.99999 +} {abc: (-100) :xyz} +do_test printf-2.7.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-100) :xyz} +do_test printf-2.7.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-100.000) :xyz} +do_test printf-2.7.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-000000100) :xyz} +do_test printf-2.7.9.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-100.000) :xyz} +do_test printf-2.7.9.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-1.000e+02) :xyz} +do_test printf-2.7.9.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 -99.99999 +} {abc: 3 3 (-100) :xyz} +do_test printf-2.7.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 3.14e+9 +} {abc: (3140000000.000) :xyz} +do_test printf-2.7.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 3.14e+9 +} {abc: (3.140e+09) :xyz} +do_test printf-2.7.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 3.14e+9 +} {abc: (3.14e+09) :xyz} +do_test printf-2.7.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (3.14e+09) :xyz} +do_test printf-2.7.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (3.14000e+09) :xyz} +do_test printf-2.7.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (003.14e+09) :xyz} +do_test printf-2.7.10.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (3140000000.000) :xyz} +do_test printf-2.7.10.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (3.140e+09) :xyz} +do_test printf-2.7.10.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 3.14e+9 +} {abc: 3 3 (3.14e+09) :xyz} +do_test printf-2.7.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 -4.72732e+88 +} {abc: (-4.727e+88) :xyz} +do_test printf-2.7.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 -4.72732e+88 +} {abc: (-4.73e+88) :xyz} +do_test printf-2.7.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 -4.72732e+88 +} {abc: 3 3 (-4.72732e+88) :xyz} +do_test printf-2.7.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 -4.72732e+88 +} {abc: 3 3 (-4.72732e+88) :xyz} +do_test printf-2.7.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 -4.72732e+88 +} {abc: 3 3 (-4.72732e+88) :xyz} +do_test printf-2.7.11.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 -4.72732e+88 +} {abc: 3 3 (-4.727e+88) :xyz} +do_test printf-2.7.11.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 -4.72732e+88 +} {abc: 3 3 (-4.73e+88) :xyz} +do_test printf-2.7.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 9.87991e+143 +} {abc: (9.880e+143) :xyz} +do_test printf-2.7.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 9.87991e+143 +} {abc: (9.88e+143) :xyz} +do_test printf-2.7.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 9.87991e+143 +} {abc: 3 3 (9.87991e+143) :xyz} +do_test printf-2.7.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 9.87991e+143 +} {abc: 3 3 (9.87991e+143) :xyz} +do_test printf-2.7.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 9.87991e+143 +} {abc: 3 3 (9.87991e+143) :xyz} +do_test printf-2.7.12.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 9.87991e+143 +} {abc: 3 3 (9.880e+143) :xyz} +do_test printf-2.7.12.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 9.87991e+143 +} {abc: 3 3 (9.88e+143) :xyz} +do_test printf-2.7.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 -6.287291e-9 +} {abc: (-0.000) :xyz} +do_test printf-2.7.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 -6.287291e-9 +} {abc: (-6.287e-09) :xyz} +do_test printf-2.7.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 -6.287291e-9 +} {abc: (-6.29e-09) :xyz} +do_test printf-2.7.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-6.28729e-09) :xyz} +do_test printf-2.7.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-6.28729e-09) :xyz} +do_test printf-2.7.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-6.28729e-09) :xyz} +do_test printf-2.7.13.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-0.000) :xyz} +do_test printf-2.7.13.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-6.287e-09) :xyz} +do_test printf-2.7.13.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 -6.287291e-9 +} {abc: 3 3 (-6.29e-09) :xyz} +do_test printf-2.7.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 3 3.38826392e-110 +} {abc: (0.000) :xyz} +do_test printf-2.7.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 3 3.38826392e-110 +} {abc: (3.388e-110) :xyz} +do_test printf-2.7.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 3 3.38826392e-110 +} {abc: (3.39e-110) :xyz} +do_test printf-2.7.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (3.38826e-110) :xyz} +do_test printf-2.7.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (3.38826e-110) :xyz} +do_test printf-2.7.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (3.38826e-110) :xyz} +do_test printf-2.7.14.7 { + sqlite3_mprintf_double {abc: %d %d (%3.3f) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (0.000) :xyz} +do_test printf-2.7.14.8 { + sqlite3_mprintf_double {abc: %d %d (%3.3e) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (3.388e-110) :xyz} +do_test printf-2.7.14.9 { + sqlite3_mprintf_double {abc: %d %d (%3.3g) :xyz} 3 3 3.38826392e-110 +} {abc: 3 3 (3.39e-110) :xyz} +do_test printf-2.8.1.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 0.001 +} {abc: (0.00) :xyz} +do_test printf-2.8.1.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 0.001 +} {abc: (1.00e-03) :xyz} +do_test printf-2.8.1.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 0.001 +} {abc: (0.001) :xyz} +do_test printf-2.8.1.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 0.001 +} {abc: 3 2 (0.001) :xyz} +do_test printf-2.8.1.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 0.001 +} {abc: 3 2 (0.00100000) :xyz} +do_test printf-2.8.1.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 0.001 +} {abc: 3 2 (000000.001) :xyz} +do_test printf-2.8.1.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 0.001 +} {abc: 3 2 (0.00) :xyz} +do_test printf-2.8.1.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 0.001 +} {abc: 3 2 (1.00e-03) :xyz} +do_test printf-2.8.1.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 0.001 +} {abc: 3 2 (0.001) :xyz} +do_test printf-2.8.2.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 1.0e-20 +} {abc: (0.00) :xyz} +do_test printf-2.8.2.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 1.0e-20 +} {abc: (1.00e-20) :xyz} +do_test printf-2.8.2.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 1.0e-20 +} {abc: (1e-20) :xyz} +do_test printf-2.8.2.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (1e-20) :xyz} +do_test printf-2.8.2.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (1.00000e-20) :xyz} +do_test printf-2.8.2.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (000001e-20) :xyz} +do_test printf-2.8.2.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (0.00) :xyz} +do_test printf-2.8.2.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (1.00e-20) :xyz} +do_test printf-2.8.2.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 1.0e-20 +} {abc: 3 2 (1e-20) :xyz} +do_test printf-2.8.3.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 1.0 +} {abc: (1.00) :xyz} +do_test printf-2.8.3.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 1.0 +} {abc: (1.00e+00) :xyz} +do_test printf-2.8.3.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 1.0 +} {abc: ( 1) :xyz} +do_test printf-2.8.3.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 1.0 +} {abc: 3 2 (1) :xyz} +do_test printf-2.8.3.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 1.0 +} {abc: 3 2 (1.00000) :xyz} +do_test printf-2.8.3.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 1.0 +} {abc: 3 2 (0000000001) :xyz} +do_test printf-2.8.3.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 1.0 +} {abc: 3 2 (1.00) :xyz} +do_test printf-2.8.3.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 1.0 +} {abc: 3 2 (1.00e+00) :xyz} +do_test printf-2.8.3.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 1.0 +} {abc: 3 2 ( 1) :xyz} +do_test printf-2.8.4.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 0.0 +} {abc: (0.00) :xyz} +do_test printf-2.8.4.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 0.0 +} {abc: (0.00e+00) :xyz} +do_test printf-2.8.4.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 0.0 +} {abc: ( 0) :xyz} +do_test printf-2.8.4.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 0.0 +} {abc: 3 2 (0) :xyz} +do_test printf-2.8.4.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 0.0 +} {abc: 3 2 (0.00000) :xyz} +do_test printf-2.8.4.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 0.0 +} {abc: 3 2 (0000000000) :xyz} +do_test printf-2.8.4.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 0.0 +} {abc: 3 2 (0.00) :xyz} +do_test printf-2.8.4.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 0.0 +} {abc: 3 2 (0.00e+00) :xyz} +do_test printf-2.8.4.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 0.0 +} {abc: 3 2 ( 0) :xyz} +do_test printf-2.8.5.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 100.0 +} {abc: (100.00) :xyz} +do_test printf-2.8.5.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 100.0 +} {abc: (1.00e+02) :xyz} +do_test printf-2.8.5.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 100.0 +} {abc: (1e+02) :xyz} +do_test printf-2.8.5.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 100.0 +} {abc: 3 2 (100) :xyz} +do_test printf-2.8.5.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 100.0 +} {abc: 3 2 (100.000) :xyz} +do_test printf-2.8.5.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 100.0 +} {abc: 3 2 (0000000100) :xyz} +do_test printf-2.8.5.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 100.0 +} {abc: 3 2 (100.00) :xyz} +do_test printf-2.8.5.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 100.0 +} {abc: 3 2 (1.00e+02) :xyz} +do_test printf-2.8.5.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 100.0 +} {abc: 3 2 (1e+02) :xyz} +do_test printf-2.8.6.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 9.99999 +} {abc: (10.00) :xyz} +do_test printf-2.8.6.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 9.99999 +} {abc: (1.00e+01) :xyz} +do_test printf-2.8.6.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 9.99999 +} {abc: ( 10) :xyz} +do_test printf-2.8.6.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 9.99999 +} {abc: 3 2 (9.99999) :xyz} +do_test printf-2.8.6.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 9.99999 +} {abc: 3 2 (9.99999) :xyz} +do_test printf-2.8.6.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 9.99999 +} {abc: 3 2 (0009.99999) :xyz} +do_test printf-2.8.6.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 9.99999 +} {abc: 3 2 (10.00) :xyz} +do_test printf-2.8.6.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 9.99999 +} {abc: 3 2 (1.00e+01) :xyz} +do_test printf-2.8.6.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 9.99999 +} {abc: 3 2 ( 10) :xyz} +do_test printf-2.8.7.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 -0.00543 +} {abc: (-0.01) :xyz} +do_test printf-2.8.7.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 -0.00543 +} {abc: (-5.43e-03) :xyz} +do_test printf-2.8.7.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 -0.00543 +} {abc: (-0.0054) :xyz} +do_test printf-2.8.7.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-0.00543) :xyz} +do_test printf-2.8.7.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-0.00543000) :xyz} +do_test printf-2.8.7.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-000.00543) :xyz} +do_test printf-2.8.7.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-0.01) :xyz} +do_test printf-2.8.7.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-5.43e-03) :xyz} +do_test printf-2.8.7.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 -0.00543 +} {abc: 3 2 (-0.0054) :xyz} +do_test printf-2.8.8.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 -1.0 +} {abc: (-1.00) :xyz} +do_test printf-2.8.8.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 -1.0 +} {abc: (-1.00e+00) :xyz} +do_test printf-2.8.8.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 -1.0 +} {abc: ( -1) :xyz} +do_test printf-2.8.8.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 -1.0 +} {abc: 3 2 (-1) :xyz} +do_test printf-2.8.8.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 -1.0 +} {abc: 3 2 (-1.00000) :xyz} +do_test printf-2.8.8.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 -1.0 +} {abc: 3 2 (-000000001) :xyz} +do_test printf-2.8.8.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 -1.0 +} {abc: 3 2 (-1.00) :xyz} +do_test printf-2.8.8.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 -1.0 +} {abc: 3 2 (-1.00e+00) :xyz} +do_test printf-2.8.8.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 -1.0 +} {abc: 3 2 ( -1) :xyz} +do_test printf-2.8.9.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 -99.99999 +} {abc: (-100.00) :xyz} +do_test printf-2.8.9.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 -99.99999 +} {abc: (-1.00e+02) :xyz} +do_test printf-2.8.9.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 -99.99999 +} {abc: (-1e+02) :xyz} +do_test printf-2.8.9.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-100) :xyz} +do_test printf-2.8.9.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-100.000) :xyz} +do_test printf-2.8.9.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-000000100) :xyz} +do_test printf-2.8.9.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-100.00) :xyz} +do_test printf-2.8.9.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-1.00e+02) :xyz} +do_test printf-2.8.9.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 -99.99999 +} {abc: 3 2 (-1e+02) :xyz} +do_test printf-2.8.10.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 3.14e+9 +} {abc: (3140000000.00) :xyz} +do_test printf-2.8.10.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 3.14e+9 +} {abc: (3.14e+09) :xyz} +do_test printf-2.8.10.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 3.14e+9 +} {abc: (3.1e+09) :xyz} +do_test printf-2.8.10.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (3.14e+09) :xyz} +do_test printf-2.8.10.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (3.14000e+09) :xyz} +do_test printf-2.8.10.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (003.14e+09) :xyz} +do_test printf-2.8.10.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (3140000000.00) :xyz} +do_test printf-2.8.10.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (3.14e+09) :xyz} +do_test printf-2.8.10.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 3.14e+9 +} {abc: 3 2 (3.1e+09) :xyz} +do_test printf-2.8.11.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 -4.72732e+88 +} {abc: (-4.73e+88) :xyz} +do_test printf-2.8.11.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 -4.72732e+88 +} {abc: (-4.7e+88) :xyz} +do_test printf-2.8.11.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 -4.72732e+88 +} {abc: 3 2 (-4.72732e+88) :xyz} +do_test printf-2.8.11.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 -4.72732e+88 +} {abc: 3 2 (-4.72732e+88) :xyz} +do_test printf-2.8.11.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 -4.72732e+88 +} {abc: 3 2 (-4.72732e+88) :xyz} +do_test printf-2.8.11.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 -4.72732e+88 +} {abc: 3 2 (-4.73e+88) :xyz} +do_test printf-2.8.11.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 -4.72732e+88 +} {abc: 3 2 (-4.7e+88) :xyz} +do_test printf-2.8.12.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 9.87991e+143 +} {abc: (9.88e+143) :xyz} +do_test printf-2.8.12.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 9.87991e+143 +} {abc: (9.9e+143) :xyz} +do_test printf-2.8.12.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 9.87991e+143 +} {abc: 3 2 (9.87991e+143) :xyz} +do_test printf-2.8.12.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 9.87991e+143 +} {abc: 3 2 (9.87991e+143) :xyz} +do_test printf-2.8.12.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 9.87991e+143 +} {abc: 3 2 (9.87991e+143) :xyz} +do_test printf-2.8.12.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 9.87991e+143 +} {abc: 3 2 (9.88e+143) :xyz} +do_test printf-2.8.12.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 9.87991e+143 +} {abc: 3 2 (9.9e+143) :xyz} +do_test printf-2.8.13.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 -6.287291e-9 +} {abc: (-0.00) :xyz} +do_test printf-2.8.13.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 -6.287291e-9 +} {abc: (-6.29e-09) :xyz} +do_test printf-2.8.13.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 -6.287291e-9 +} {abc: (-6.3e-09) :xyz} +do_test printf-2.8.13.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-6.28729e-09) :xyz} +do_test printf-2.8.13.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-6.28729e-09) :xyz} +do_test printf-2.8.13.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-6.28729e-09) :xyz} +do_test printf-2.8.13.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-0.00) :xyz} +do_test printf-2.8.13.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-6.29e-09) :xyz} +do_test printf-2.8.13.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 -6.287291e-9 +} {abc: 3 2 (-6.3e-09) :xyz} +do_test printf-2.8.14.1 { + sqlite3_mprintf_double {abc: (%*.*f) :xyz} 3 2 3.38826392e-110 +} {abc: (0.00) :xyz} +do_test printf-2.8.14.2 { + sqlite3_mprintf_double {abc: (%*.*e) :xyz} 3 2 3.38826392e-110 +} {abc: (3.39e-110) :xyz} +do_test printf-2.8.14.3 { + sqlite3_mprintf_double {abc: (%*.*g) :xyz} 3 2 3.38826392e-110 +} {abc: (3.4e-110) :xyz} +do_test printf-2.8.14.4 { + sqlite3_mprintf_double {abc: %d %d (%g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (3.38826e-110) :xyz} +do_test printf-2.8.14.5 { + sqlite3_mprintf_double {abc: %d %d (%#g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (3.38826e-110) :xyz} +do_test printf-2.8.14.6 { + sqlite3_mprintf_double {abc: %d %d (%010g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (3.38826e-110) :xyz} +do_test printf-2.8.14.7 { + sqlite3_mprintf_double {abc: %d %d (%3.2f) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (0.00) :xyz} +do_test printf-2.8.14.8 { + sqlite3_mprintf_double {abc: %d %d (%3.2e) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (3.39e-110) :xyz} +do_test printf-2.8.14.9 { + sqlite3_mprintf_double {abc: %d %d (%3.2g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 (3.4e-110) :xyz} +do_test printf-2.8.15.1 { + sqlite3_mprintf_double {abc: (% *.*f) :xyz} 3 2 3.38826392e-110 +} {abc: ( 0.00) :xyz} +do_test printf-2.8.15.2 { + sqlite3_mprintf_double {abc: (% *.*e) :xyz} 3 2 3.38826392e-110 +} {abc: ( 3.39e-110) :xyz} +do_test printf-2.8.15.3 { + sqlite3_mprintf_double {abc: (% *.*g) :xyz} 3 2 3.38826392e-110 +} {abc: ( 3.4e-110) :xyz} +do_test printf-2.8.15.4 { + sqlite3_mprintf_double {abc: %d %d (% g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 3.38826e-110) :xyz} +do_test printf-2.8.15.5 { + sqlite3_mprintf_double {abc: %d %d (% #g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 3.38826e-110) :xyz} +do_test printf-2.8.15.6 { + sqlite3_mprintf_double {abc: %d %d (%0 10g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 3.38826e-110) :xyz} +do_test printf-2.8.15.7 { + sqlite3_mprintf_double {abc: %d %d (% 3.2f) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 0.00) :xyz} +do_test printf-2.8.15.8 { + sqlite3_mprintf_double {abc: %d %d (% 3.2e) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 3.39e-110) :xyz} +do_test printf-2.8.15.9 { + sqlite3_mprintf_double {abc: %d %d (% 3.2g) :xyz} 3 2 3.38826392e-110 +} {abc: 3 2 ( 3.4e-110) :xyz} + +do_test printf-2.9.1 { + sqlite3_mprintf_double {abc: %d %d (%5.0g) :xyz} 0 0 1.234 +} {abc: 0 0 ( 1) :xyz} +do_test printf-2.9.2 { + sqlite3_mprintf_double {abc: %d %d (%+5.0g) :xyz} 0 0 1.234 +} {abc: 0 0 ( +1) :xyz} +do_test printf-2.9.3 { + sqlite3_mprintf_double {abc: %d %d (%+-5.0g) :xyz} 0 0 1.234 +} {abc: 0 0 (+1 ) :xyz} + +do_test printf-2.10.1 { + sqlite3_mprintf_double {abc: %d %d (%-010.5f) :xyz} 0 0 1.234 +} {abc: 0 0 (1.23400 ) :xyz} +do_test printf-2.10.2 { + sqlite3_mprintf_double {abc: %d %d (%010.5f) :xyz} 0 0 1.234 +} {abc: 0 0 (0001.23400) :xyz} +do_test printf-2.10.3 { + sqlite3_mprintf_double {abc: %d %d (%+010.5f) :xyz} 0 0 1.234 +} {abc: 0 0 (+001.23400) :xyz} do_test printf-3.1 { sqlite3_mprintf_str {A String: (%*.*s)} 10 10 {This is the string} @@ -167,7 +3540,7 @@ sqlite3_mprintf_int {%u %u %u} 0x7fffffff 0x80000000 0xffffffff } {2147483647 2147483648 4294967295} do_test printf-8.2 { - sqlite3_mprintf_int {%lu %lu %lu} 0x7fffffff 0x80000000 0xffffffff + sqlite3_mprintf_long {%lu %lu %lu} 0x7fffffff 0x80000000 0xffffffff } {2147483647 2147483648 4294967295} do_test printf-8.3 { sqlite3_mprintf_int64 {%llu %llu %llu} 2147483647 2147483648 4294967296 @@ -181,6 +3554,9 @@ do_test printf-8.6 { sqlite3_mprintf_int64 {%llx %llo %lld} -1 -1 -1 } {ffffffffffffffff 1777777777777777777777 -1} +do_test printf-8.7 { + sqlite3_mprintf_int64 {%llx %llx %llx} +2147483647 +2147483648 +4294967296 +} {7fffffff 80000000 100000000} do_test printf-9.1 { sqlite3_mprintf_int {%*.*c} 4 4 65 @@ -282,6 +3658,28 @@ do_test printf-14.3 { sqlite3_mprintf_str {abc-%T-123} 0 0 {not used} } {abc-} +do_test printf-14.4 { + sqlite3_mprintf_str {abc-%#} 0 0 {not used} +} {abc-} +do_test printf-14.5 { + sqlite3_mprintf_str {abc-%*.*s-xyz} 10 -10 {a_very_long_string} +} {abc-a_very_lon-xyz} +do_test printf-14.6 { + sqlite3_mprintf_str {abc-%5.10/} 0 0 {not used} +} {abc-} +do_test printf-14.7 { + sqlite3_mprintf_str {abc-%05.5d} 123 0 {not used} +} {abc-00123} +do_test printf-14.8 { + sqlite3_mprintf_str {abc-%05.5d} 1234567 0 {not used} +} {abc-1234567} + +for {set i 2} {$i<200} {incr i} { + set res [string repeat { } [expr {$i-1}]]x + do_test printf-14.90.$i " + sqlite3_mprintf_str {%*.*s} $i 500 x + " $res +} do_test printf-15.1 { sqlite3_snprintf_int 5 {12345} 0 @@ -293,4 +3691,28 @@ sqlite3_snprintf_int 0 {} 0 } {abcdefghijklmnopqrstuvwxyz} +# Now test malloc() failure within a sqlite3_mprintf(): +# +ifcapable memdebug { + foreach var {a b c d} { + set $var [string repeat $var 400] + } + set str1 "[string repeat A 360]%d%d%s" + set str2 [string repeat B 5000] + set zSuccess "[string repeat A 360]11[string repeat B 5000]" + foreach ::iRepeat {0 1} { + set nTestNum 1 + while {1} { + sqlite3_memdebug_fail $nTestNum -repeat $::iRepeat + set z [sqlite3_mprintf_str $str1 1 1 $str2] + set nFail [sqlite3_memdebug_fail -1 -benign nBenign] + do_test printf-malloc-$::iRepeat.$nTestNum { + expr {($nFail>0 && $z eq "") || ($nFail==$nBenign && $z eq $zSuccess)} + } {1} + if {$nFail == 0} break + incr nTestNum + } + } +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/ptrchng.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/ptrchng.test --- sqlite3-3.4.2/test/ptrchng.test 2007-04-27 18:16:22.000000000 +0100 +++ sqlite3-3.6.16/test/ptrchng.test 2009-06-05 18:03:40.000000000 +0100 @@ -21,11 +21,16 @@ # sqlite3_value_bytes() # sqlite3_value_bytes16() # -# $Id: ptrchng.test,v 1.1 2007/04/27 17:16:22 drh Exp $ +# $Id: ptrchng.test,v 1.5 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !bloblit { + finish_test + return +} + # Register the "pointer_change" SQL function. # sqlite3_create_function db @@ -47,6 +52,8 @@ # For the short entries that fit in the Mem.zBuf[], the pointer should # never change regardless of what type conversions occur. # +# UPDATE: No longer true, as Mem.zBuf[] has been removed. +# do_test ptrchng-2.1 { execsql { SELECT pointer_change(y, 'text', 'noop', 'blob') FROM t1 WHERE x=1 @@ -62,12 +69,12 @@ execsql { SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=1 } - } {0} + } {1} do_test ptrchng-2.4 { execsql { SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=1 } - } {0} + } {1} do_test ptrchng-2.5 { execsql { SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=1 @@ -77,7 +84,7 @@ execsql { SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=1 } - } {0} + } {1} } do_test ptrchng-2.11 { execsql { @@ -94,23 +101,22 @@ execsql { SELECT pointer_change(y, 'text', 'noop', 'text16') FROM t1 WHERE x=3 } - } {0} + } {1} do_test ptrchng-2.14 { execsql { SELECT pointer_change(y, 'blob', 'noop', 'text16') FROM t1 WHERE x=3 } - } {0} + } {1} do_test ptrchng-2.15 { execsql { SELECT pointer_change(y, 'text16', 'noop', 'blob') FROM t1 WHERE x=3 } } {0} do_test ptrchng-2.16 { -btree_breakpoint execsql { SELECT pointer_change(y, 'text16', 'noop', 'text') FROM t1 WHERE x=3 } - } {0} + } {1} } # For the long entries that do not fit in the Mem.zBuf[], the pointer diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/quick.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/quick.test --- sqlite3-3.4.2/test/quick.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/quick.test 2009-06-12 03:37:54.000000000 +0100 @@ -6,7 +6,7 @@ #*********************************************************************** # This file runs all tests. # -# $Id: quick.test,v 1.59 2007/06/18 12:22:43 drh Exp $ +# $Id: quick.test,v 1.95 2009/03/16 14:48:19 danielk1977 Exp $ proc lshift {lvar} { upvar $lvar l @@ -22,6 +22,9 @@ -soak { set SOAKTEST 1 } + -start { + set STARTAT "[lshift argv]*" + } default { set argv [linsert $argv 0 $arg] break @@ -32,44 +35,69 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl rename finish_test really_finish_test -proc finish_test {} {} +proc finish_test {} { + catch {db close} + show_memstats +} set ISQUICK 1 set EXCLUDE { all.test async.test async2.test - btree2.test - btree3.test - btree4.test - btree5.test - btree6.test + async3.test + backup_ioerr.test corrupt.test + corruptC.test crash.test crash2.test - exclusive3.test + crash3.test + crash4.test + crash5.test + crash6.test + crash7.test + delete3.test + fts3.test fuzz.test + fuzz3.test fuzz_malloc.test in2.test loadext.test - malloc.test - malloc2.test - malloc3.test memleak.test misc7.test misuse.test + mutex2.test + notify2.test + onefile.test + permutations.test quick.test + savepoint4.test + savepoint6.test + select9.test soak.test speed1.test + speed1p.test speed2.test + speed3.test + speed4.test + speed4p.test sqllimits1.test + tkt2686.test + thread001.test + thread002.test + thread003.test + thread004.test + thread005.test + trans2.test + vacuum3.test incrvacuum_ioerr.test autovacuum_crash.test btree8.test - utf16.test shared_err.test vtab_err.test + veryquick.test + mallocAll.test } if {[sqlite3 -has-codec]} { @@ -84,10 +112,29 @@ set INCLUDE { } +# If the QUICKTEST_INCLUDE environment variable is set, then interpret +# it as a list of test files. Always run these files, even if they +# begin with "malloc*" or "ioerr*" or are part of the EXCLUDE list +# defined above. +# +set QUICKTEST_INCLUDE {} +catch { set QUICKTEST_INCLUDE $env(QUICKTEST_INCLUDE) } + foreach testfile [lsort -dictionary [glob $testdir/*.test]] { set tail [file tail $testfile] - if {[lsearch -exact $EXCLUDE $tail]>=0} continue + if { [lsearch $QUICKTEST_INCLUDE $tail]<0 } { + # If this is "veryquick.test", do not run any of the malloc or + # IO error simulations. + if {[info exists ISVERYQUICK] && ( + [string match *malloc* $testfile] || [string match *ioerr* $testfile] + ) } { + continue + } + if {[lsearch -exact $EXCLUDE $tail]>=0} continue + } if {[llength $INCLUDE]>0 && [lsearch -exact $INCLUDE $tail]<0} continue + if {[info exists STARTAT] && [string match $STARTAT $tail]} {unset STARTAT} + if {[info exists STARTAT]} continue source $testfile catch {db close} if {$sqlite_open_file_count>0} { @@ -96,13 +143,10 @@ lappend ::failList $tail set sqlite_open_file_count 0 } - if {$::sqlite3_tsd_count} { - puts "Thread-specific data leak: $::sqlite3_tsd_count instances" - incr nErr - lappend ::failList $tail - set ::sqlite3_tsd_count 0 - } } +#set argv quick +#source $testdir/permutations.test +#set argv "" source $testdir/misuse.test set sqlite_open_file_count 0 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/randexpr1.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/randexpr1.tcl --- sqlite3-3.4.2/test/randexpr1.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/randexpr1.tcl 2008-12-15 16:33:30.000000000 +0000 @@ -0,0 +1,342 @@ +# Run this TCL script to generate thousands of test cases containing +# complicated expressions. +# +# The generated tests are intended to verify expression evaluation +# in SQLite against expression evaluation TCL. +# + +# Terms of the $intexpr list each contain two sub-terms. +# +# * An SQL expression template +# * The equivalent TCL expression +# +# EXPR is replaced by an integer subexpression. BOOL is replaced +# by a boolean subexpression. +# +set intexpr { + {11 wide(11)} + {13 wide(13)} + {17 wide(17)} + {19 wide(19)} + {a $a} + {b $b} + {c $c} + {d $d} + {e $e} + {f $f} + {t1.a $a} + {t1.b $b} + {t1.c $c} + {t1.d $d} + {t1.e $e} + {t1.f $f} + {(EXPR) (EXPR)} + {{ -EXPR} {-EXPR}} + {+EXPR +EXPR} + {~EXPR ~EXPR} + {EXPR+EXPR EXPR+EXPR} + {EXPR-EXPR EXPR-EXPR} + {EXPR*EXPR EXPR*EXPR} + {EXPR+EXPR EXPR+EXPR} + {EXPR-EXPR EXPR-EXPR} + {EXPR*EXPR EXPR*EXPR} + {EXPR+EXPR EXPR+EXPR} + {EXPR-EXPR EXPR-EXPR} + {EXPR*EXPR EXPR*EXPR} + {{EXPR | EXPR} {EXPR | EXPR}} + {(abs(EXPR)/abs(EXPR)) (abs(EXPR)/abs(EXPR))} + { + {case when BOOL then EXPR else EXPR end} + {((BOOL)?EXPR:EXPR)} + } + { + {case when BOOL then EXPR when BOOL then EXPR else EXPR end} + {((BOOL)?EXPR:((BOOL)?EXPR:EXPR))} + } + { + {case EXPR when EXPR then EXPR else EXPR end} + {(((EXPR)==(EXPR))?EXPR:EXPR)} + } + { + {(select AGG from t1)} + {(AGG)} + } + { + {coalesce((select max(EXPR) from t1 where BOOL),EXPR)} + {[coalesce_subquery [expr {EXPR}] [expr {BOOL}] [expr {EXPR}]]} + } + { + {coalesce((select EXPR from t1 where BOOL),EXPR)} + {[coalesce_subquery [expr {EXPR}] [expr {BOOL}] [expr {EXPR}]]} + } +} + +# The $boolexpr list contains terms that show both an SQL boolean +# expression and its equivalent TCL. +# +set boolexpr { + {EXPR=EXPR ((EXPR)==(EXPR))} + {EXPREXPR ((EXPR)>(EXPR))} + {EXPR<=EXPR ((EXPR)<=(EXPR))} + {EXPR>=EXPR ((EXPR)>=(EXPR))} + {EXPR<>EXPR ((EXPR)!=(EXPR))} + { + {EXPR between EXPR and EXPR} + {[betweenop [expr {EXPR}] [expr {EXPR}] [expr {EXPR}]]} + } + { + {EXPR not between EXPR and EXPR} + {(![betweenop [expr {EXPR}] [expr {EXPR}] [expr {EXPR}]])} + } + { + {EXPR in (EXPR,EXPR,EXPR)} + {([inop [expr {EXPR}] [expr {EXPR}] [expr {EXPR}] [expr {EXPR}]])} + } + { + {EXPR not in (EXPR,EXPR,EXPR)} + {(![inop [expr {EXPR}] [expr {EXPR}] [expr {EXPR}] [expr {EXPR}]])} + } + { + {EXPR in (select EXPR from t1 union select EXPR from t1)} + {[inop [expr {EXPR}] [expr {EXPR}] [expr {EXPR}]]} + } + { + {EXPR in (select AGG from t1 union select AGG from t1)} + {[inop [expr {EXPR}] [expr {AGG}] [expr {AGG}]]} + } + { + {exists(select 1 from t1 where BOOL)} + {(BOOL)} + } + { + {not exists(select 1 from t1 where BOOL)} + {!(BOOL)} + } + {{not BOOL} !BOOL} + {{BOOL and BOOL} {BOOL tcland BOOL}} + {{BOOL or BOOL} {BOOL || BOOL}} + {{BOOL and BOOL} {BOOL tcland BOOL}} + {{BOOL or BOOL} {BOOL || BOOL}} + {(BOOL) (BOOL)} + {(BOOL) (BOOL)} +} + +# Aggregate expressions +# +set aggexpr { + {count(*) wide(1)} + {{count(distinct EXPR)} {[one {EXPR}]}} + {{cast(avg(EXPR) AS integer)} (EXPR)} + {min(EXPR) (EXPR)} + {max(EXPR) (EXPR)} + {(AGG) (AGG)} + {{ -AGG} {-AGG}} + {+AGG +AGG} + {~AGG ~AGG} + {abs(AGG) abs(AGG)} + {AGG+AGG AGG+AGG} + {AGG-AGG AGG-AGG} + {AGG*AGG AGG*AGG} + {{AGG | AGG} {AGG | AGG}} + { + {case AGG when AGG then AGG else AGG end} + {(((AGG)==(AGG))?AGG:AGG)} + } +} + +# Convert a string containing EXPR, AGG, and BOOL into a string +# that contains nothing but X, Y, and Z. +# +proc extract_vars {a} { + regsub -all {EXPR} $a X a + regsub -all {AGG} $a Y a + regsub -all {BOOL} $a Z a + regsub -all {[^XYZ]} $a {} a + return $a +} + + +# Test all templates to make sure the number of EXPR, AGG, and BOOL +# expressions match. +# +foreach term [concat $aggexpr $intexpr $boolexpr] { + foreach {a b} $term break + if {[extract_vars $a]!=[extract_vars $b]} { + error "mismatch: $term" + } +} + +# Generate a random expression according to the templates given above. +# If the argument is EXPR or omitted, then an integer expression is +# generated. If the argument is BOOL then a boolean expression is +# produced. +# +proc generate_expr {{e EXPR}} { + set tcle $e + set ne [llength $::intexpr] + set nb [llength $::boolexpr] + set na [llength $::aggexpr] + set div 2 + set mx 50 + set i 0 + while {1} { + set cnt 0 + set re [lindex $::intexpr [expr {int(rand()*$ne)}]] + incr cnt [regsub {EXPR} $e [lindex $re 0] e] + regsub {EXPR} $tcle [lindex $re 1] tcle + set rb [lindex $::boolexpr [expr {int(rand()*$nb)}]] + incr cnt [regsub {BOOL} $e [lindex $rb 0] e] + regsub {BOOL} $tcle [lindex $rb 1] tcle + set ra [lindex $::aggexpr [expr {int(rand()*$na)}]] + incr cnt [regsub {AGG} $e [lindex $ra 0] e] + regsub {AGG} $tcle [lindex $ra 1] tcle + + if {$cnt==0} break + incr i $cnt + + set v1 [extract_vars $e] + if {$v1!=[extract_vars $tcle]} { + exit + } + + if {$i+[string length $v1]>=$mx} { + set ne [expr {$ne/$div}] + set nb [expr {$nb/$div}] + set na [expr {$na/$div}] + set div 1 + set mx [expr {$mx*1000}] + } + } + regsub -all { tcland } $tcle { \&\& } tcle + return [list $e $tcle] +} + +# Implementation of routines used to implement the IN and BETWEEN +# operators. +proc inop {lhs args} { + foreach a $args { + if {$a==$lhs} {return 1} + } + return 0 +} +proc betweenop {lhs first second} { + return [expr {$lhs>=$first && $lhs<=$second}] +} +proc coalesce_subquery {a b e} { + if {$b} { + return $a + } else { + return $e + } +} +proc one {args} { + return 1 +} + +# Begin generating the test script: +# +puts {# 2008 December 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file tests randomly generated SQL expressions. The expressions +# are generated by a TCL script. The same TCL script also computes the +# correct value of the expression. So, from one point of view, this +# file verifies the expression evaluation logic of SQLite against the +# expression evaluation logic of TCL. +# +# An early version of this script is how bug #3541 was detected. +# +# $Id: randexpr1.tcl,v 1.1 2008/12/15 16:33:30 drh Exp $ +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create test data +# +do_test randexpr1-1.1 { + db eval { + CREATE TABLE t1(a,b,c,d,e,f); + INSERT INTO t1 VALUES(100,200,300,400,500,600); + SELECT * FROM t1 + } +} {100 200 300 400 500 600} +} + +# Test data for TCL evaluation. +# +set a [expr {wide(100)}] +set b [expr {wide(200)}] +set c [expr {wide(300)}] +set d [expr {wide(400)}] +set e [expr {wide(500)}] +set f [expr {wide(600)}] + +# A procedure to generate a test case. +# +set tn 0 +proc make_test_case {sql result} { + global tn + incr tn + puts "do_test randexpr-2.$tn {\n db eval {$sql}\n} {$result}" +} + +# Generate many random test cases. +# +expr srand(0) +for {set i 0} {$i<1000} {incr i} { + while {1} { + foreach {sqle tcle} [generate_expr EXPR] break; + if {[catch {expr $tcle} ans]} { + #puts stderr [list $tcle] + #puts stderr ans=$ans + if {![regexp {divide by zero} $ans]} exit + continue + } + set len [string length $sqle] + if {$len<100 || $len>2000} continue + if {[info exists seen($sqle)]} continue + set seen($sqle) 1 + break + } + while {1} { + foreach {sqlb tclb} [generate_expr BOOL] break; + if {[catch {expr $tclb} bans]} { + #puts stderr [list $tclb] + #puts stderr bans=$bans + if {![regexp {divide by zero} $bans]} exit + continue + } + break + } + if {$bans} { + make_test_case "SELECT $sqle FROM t1 WHERE $sqlb" $ans + make_test_case "SELECT $sqle FROM t1 WHERE NOT ($sqlb)" {} + } else { + make_test_case "SELECT $sqle FROM t1 WHERE $sqlb" {} + make_test_case "SELECT $sqle FROM t1 WHERE NOT ($sqlb)" $ans + } + if {[regexp { \| } $sqle]} { + regsub -all { \| } $sqle { \& } sqle + regsub -all { \| } $tcle { \& } tcle + if {[catch {expr $tcle} ans]==0} { + if {$bans} { + make_test_case "SELECT $sqle FROM t1 WHERE $sqlb" $ans + } else { + make_test_case "SELECT $sqle FROM t1 WHERE NOT ($sqlb)" $ans + } + } + } +} + +# Terminate the test script +# +puts {finish_test} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/randexpr1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/randexpr1.test --- sqlite3-3.4.2/test/randexpr1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/randexpr1.test 2008-12-15 16:33:30.000000000 +0000 @@ -0,0 +1,7832 @@ +# 2008 December 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file tests randomly generated SQL expressions. The expressions +# are generated by a TCL script. The same TCL script also computes the +# correct value of the expression. So, from one point of view, this +# file verifies the expression evaluation logic of SQLite against the +# expression evaluation logic of TCL. +# +# An early version of this script is how bug #3541 was detected. +# +# $Id: randexpr1.test,v 1.1 2008/12/15 16:33:30 drh Exp $ +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create test data +# +do_test randexpr1-1.1 { + db eval { + CREATE TABLE t1(a,b,c,d,e,f); + INSERT INTO t1 VALUES(100,200,300,400,500,600); + SELECT * FROM t1 + } +} {100 200 300 400 500 600} + +do_test randexpr-2.1 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (t1.b,+11,coalesce((select max((abs(17)/abs(t1.f))) from t1 where ((abs(t1.f)/abs(t1.b)) in (select case (min(t1.a | d*d)+(abs(count(*)-count(*)+ -count(*)*max( -t1.c))-max(f))) when -count(distinct 19) then ((count(*))) else max(13) end from t1 union select count(distinct b) from t1)) or 19 in (t1.a,t1.c,17)),17) | 17)),13) FROM t1 WHERE not not c=a-+(select case ~case -~+count(distinct (select count(distinct t1.a)*max(13) from t1))+max( -19*f)*max(f)*max(f)* -count(distinct d)-(count(distinct 11)) | max(t1.f)*count(*) when count(distinct b) then count(distinct t1.b) else -min(t1.f) end*cast(avg(11) AS integer) when max(t1.f) then max(c) else count(*) end from t1)+d} +} {} +do_test randexpr-2.2 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (t1.b,+11,coalesce((select max((abs(17)/abs(t1.f))) from t1 where ((abs(t1.f)/abs(t1.b)) in (select case (min(t1.a | d*d)+(abs(count(*)-count(*)+ -count(*)*max( -t1.c))-max(f))) when -count(distinct 19) then ((count(*))) else max(13) end from t1 union select count(distinct b) from t1)) or 19 in (t1.a,t1.c,17)),17) | 17)),13) FROM t1 WHERE NOT (not not c=a-+(select case ~case -~+count(distinct (select count(distinct t1.a)*max(13) from t1))+max( -19*f)*max(f)*max(f)* -count(distinct d)-(count(distinct 11)) | max(t1.f)*count(*) when count(distinct b) then count(distinct t1.b) else -min(t1.f) end*cast(avg(11) AS integer) when max(t1.f) then max(c) else count(*) end from t1)+d)} +} {13} +do_test randexpr-2.3 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (t1.b,+11,coalesce((select max((abs(17)/abs(t1.f))) from t1 where ((abs(t1.f)/abs(t1.b)) in (select case (min(t1.a & d*d)+(abs(count(*)-count(*)+ -count(*)*max( -t1.c))-max(f))) when -count(distinct 19) then ((count(*))) else max(13) end from t1 union select count(distinct b) from t1)) or 19 in (t1.a,t1.c,17)),17) & 17)),13) FROM t1 WHERE NOT (not not c=a-+(select case ~case -~+count(distinct (select count(distinct t1.a)*max(13) from t1))+max( -19*f)*max(f)*max(f)* -count(distinct d)-(count(distinct 11)) | max(t1.f)*count(*) when count(distinct b) then count(distinct t1.b) else -min(t1.f) end*cast(avg(11) AS integer) when max(t1.f) then max(c) else count(*) end from t1)+d)} +} {13} +do_test randexpr-2.4 { + db eval {SELECT t1.c*19-t1.f*19+coalesce((select 17 from t1 where e in (f,case (select cast(avg(t1.a) AS integer) from t1)-t1.d when coalesce((select +case when not exists(select 1 from t1 where 17<>t1.f and ((a))=a) and t1.b between f and 13),t1.a))+case abs(+(max(19))) | count(distinct t1.e) | count(distinct 19) when count(*) then (max(a)) else count(distinct d) end from t1 union select (count(*)) from t1) and t1.b>=19 then d when t1.b in (select d from t1 union select c from t1) then t1.e else a end} +} {-2900} +do_test randexpr-2.5 { + db eval {SELECT t1.c*19-t1.f*19+coalesce((select 17 from t1 where e in (f,case (select cast(avg(t1.a) AS integer) from t1)-t1.d when coalesce((select +case when not exists(select 1 from t1 where 17<>t1.f and ((a))=a) and t1.b between f and 13),t1.a))+case abs(+(max(19))) | count(distinct t1.e) | count(distinct 19) when count(*) then (max(a)) else count(distinct d) end from t1 union select (count(*)) from t1) and t1.b>=19 then d when t1.b in (select d from t1 union select c from t1) then t1.e else a end)} +} {} +do_test randexpr-2.6 { + db eval {SELECT case when case when not exists(select 1 from t1 where e not in (~t1.d,+case when t1.cd) then 17 else t1.e end in (a,t1.e,17) or (not exists(select 1 from t1 where a in (f,t1.c,t1.a))) then 11 else 13 end FROM t1 WHERE t1.bd) then 17 else t1.e end in (a,t1.e,17) or (not exists(select 1 from t1 where a in (f,t1.c,t1.a))) then 11 else 13 end FROM t1 WHERE NOT (t1.bd) then 17 else t1.e end in (a,t1.e,17) or (not exists(select 1 from t1 where a in (f,t1.c,t1.a))) then 11 else 13 end FROM t1 WHERE t1.bb), -11) | f+t1.f not in (((c)),b,13))),f) FROM t1 WHERE case when 19+c>=t1.a then t1.c when not case when not exists(select 1 from t1 where +f | b*b*19+19*13-a | case when t1.e not in (t1.f,t1.c,b) then 11 when 17>t1.c then a else e end<>e) then b when 17=t1.e then b else e end<>t1.b then a else d end-t1.b=(13)} +} {} +do_test randexpr-2.10 { + db eval {SELECT coalesce((select max(11- -19-f-t1.b+a) from t1 where exists(select 1 from t1 where 11-~(d)-c*a*~t1.a-t1.e-t1.e+coalesce((select coalesce((select t1.c from t1 where case (c) when d then e else 11 end=t1.f),t1.d) from t1 where (t1.d)>b), -11) | f+t1.f not in (((c)),b,13))),f) FROM t1 WHERE NOT (case when 19+c>=t1.a then t1.c when not case when not exists(select 1 from t1 where +f | b*b*19+19*13-a | case when t1.e not in (t1.f,t1.c,b) then 11 when 17>t1.c then a else e end<>e) then b when 17=t1.e then b else e end<>t1.b then a else d end-t1.b=(13))} +} {-670} +do_test randexpr-2.11 { + db eval {SELECT coalesce((select max(11- -19-f-t1.b+a) from t1 where exists(select 1 from t1 where 11-~(d)-c*a*~t1.a-t1.e-t1.e+coalesce((select coalesce((select t1.c from t1 where case (c) when d then e else 11 end=t1.f),t1.d) from t1 where (t1.d)>b), -11) & f+t1.f not in (((c)),b,13))),f) FROM t1 WHERE NOT (case when 19+c>=t1.a then t1.c when not case when not exists(select 1 from t1 where +f | b*b*19+19*13-a | case when t1.e not in (t1.f,t1.c,b) then 11 when 17>t1.c then a else e end<>e) then b when 17=t1.e then b else e end<>t1.b then a else d end-t1.b=(13))} +} {-670} +do_test randexpr-2.12 { + db eval {SELECT (abs(17)/abs(~case when (abs(t1.c* -(abs(case b | coalesce((select max(17*e*case when 11 in (select t1.b from t1 union select t1.c from t1) then 19 when d between f and t1.e then t1.e else t1.e end | b) from t1 where (13 not in (11,t1.d,e))),e)*a*d when 11 then e else d end*17)/abs(19)))/abs(t1.e))<>e then t1.b else t1.d end*(19))) FROM t1 WHERE exists(select 1 from t1 where t1.d+11 in (case t1.b++c when +a*(+case (select count(*) from t1) when 19 then t1.c else a end) then t1.a else case when (t1.b> -d) and not exists(select 1 from t1 where not exists(select 1 from t1 where (abs(f)/abs( -f*a+c*11))<>a)) then 13 else 19 end end,17,a)) or c=t1.d} +} {} +do_test randexpr-2.13 { + db eval {SELECT (abs(17)/abs(~case when (abs(t1.c* -(abs(case b | coalesce((select max(17*e*case when 11 in (select t1.b from t1 union select t1.c from t1) then 19 when d between f and t1.e then t1.e else t1.e end | b) from t1 where (13 not in (11,t1.d,e))),e)*a*d when 11 then e else d end*17)/abs(19)))/abs(t1.e))<>e then t1.b else t1.d end*(19))) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.d+11 in (case t1.b++c when +a*(+case (select count(*) from t1) when 19 then t1.c else a end) then t1.a else case when (t1.b> -d) and not exists(select 1 from t1 where not exists(select 1 from t1 where (abs(f)/abs( -f*a+c*11))<>a)) then 13 else 19 end end,17,a)) or c=t1.d)} +} {0} +do_test randexpr-2.14 { + db eval {SELECT (abs(17)/abs(~case when (abs(t1.c* -(abs(case b & coalesce((select max(17*e*case when 11 in (select t1.b from t1 union select t1.c from t1) then 19 when d between f and t1.e then t1.e else t1.e end & b) from t1 where (13 not in (11,t1.d,e))),e)*a*d when 11 then e else d end*17)/abs(19)))/abs(t1.e))<>e then t1.b else t1.d end*(19))) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.d+11 in (case t1.b++c when +a*(+case (select count(*) from t1) when 19 then t1.c else a end) then t1.a else case when (t1.b> -d) and not exists(select 1 from t1 where not exists(select 1 from t1 where (abs(f)/abs( -f*a+c*11))<>a)) then 13 else 19 end end,17,a)) or c=t1.d)} +} {0} +do_test randexpr-2.15 { + db eval {SELECT coalesce((select t1.c from t1 where e not between 19 and b*d or not exists(select 1 from t1 where t1.a not in (case when d not between b+~(t1.d)-case when ((t1.f<=t1.e) and -11e),17))/abs( - -17)) then 17 else t1.d end-t1.b-d) | -f,c)) then 11-t1.f when ( -1313 or t1.e in (select (count(*)) from t1 union select cast(avg(t1.e) AS integer) from t1) or 19>17 and 13 not between 19 and 17 then (t1.f) when not t1.b<>t1.f or d<>b then t1.e else a end and c} +} {} +do_test randexpr-2.20 { + db eval {SELECT case when (a not in (t1.e,(+case when t1.b<=(abs(coalesce((select max(a+(select count(distinct (select max(((abs( -t1.d)/abs(a))) | t1.c)*min(t1.e) from t1)) from t1)) from t1 where +t1.f>e),17))/abs( - -17)) then 17 else t1.d end-t1.b-d) | -f,c)) then 11-t1.f when ( -1313 or t1.e in (select (count(*)) from t1 union select cast(avg(t1.e) AS integer) from t1) or 19>17 and 13 not between 19 and 17 then (t1.f) when not t1.b<>t1.f or d<>b then t1.e else a end and c)} +} {-589} +do_test randexpr-2.21 { + db eval {SELECT case when (a not in (t1.e,(+case when t1.b<=(abs(coalesce((select max(a+(select count(distinct (select max(((abs( -t1.d)/abs(a))) & t1.c)*min(t1.e) from t1)) from t1)) from t1 where +t1.f>e),17))/abs( - -17)) then 17 else t1.d end-t1.b-d) & -f,c)) then 11-t1.f when ( -1313 or t1.e in (select (count(*)) from t1 union select cast(avg(t1.e) AS integer) from t1) or 19>17 and 13 not between 19 and 17 then (t1.f) when not t1.b<>t1.f or d<>b then t1.e else a end and c)} +} {-589} +do_test randexpr-2.22 { + db eval {SELECT t1.a | case when e in (select (13) from t1 union select d from t1) then (17) when c*~case (17) when coalesce((select max(case when case when 19 in (13,t1.f,11) then t1.b else t1.b end | 11>t1.a then 11 else t1.b end-b) from t1 where (13 not in (a,19,b)) and t1.a not between -11 and t1.f),f) then e else t1.c end | a in (select 11 from t1 union select t1.d from t1) then 17 else t1.a end | f FROM t1 WHERE coalesce((select t1.a*t1.a from t1 where ~ -(abs((select abs(count(*))-count(distinct t1.d*11) from t1)*+b)/abs(~t1.c))+c not in (coalesce((select ~13*t1.a from t1 where f+(abs(d* -t1.f)/abs(t1.c))<=t1.b or not t1.a<=t1.c),t1.c),e,(t1.e))),(a))+11<>11} +} {636} +do_test randexpr-2.23 { + db eval {SELECT t1.a | case when e in (select (13) from t1 union select d from t1) then (17) when c*~case (17) when coalesce((select max(case when case when 19 in (13,t1.f,11) then t1.b else t1.b end | 11>t1.a then 11 else t1.b end-b) from t1 where (13 not in (a,19,b)) and t1.a not between -11 and t1.f),f) then e else t1.c end | a in (select 11 from t1 union select t1.d from t1) then 17 else t1.a end | f FROM t1 WHERE NOT (coalesce((select t1.a*t1.a from t1 where ~ -(abs((select abs(count(*))-count(distinct t1.d*11) from t1)*+b)/abs(~t1.c))+c not in (coalesce((select ~13*t1.a from t1 where f+(abs(d* -t1.f)/abs(t1.c))<=t1.b or not t1.a<=t1.c),t1.c),e,(t1.e))),(a))+11<>11)} +} {} +do_test randexpr-2.24 { + db eval {SELECT t1.a & case when e in (select (13) from t1 union select d from t1) then (17) when c*~case (17) when coalesce((select max(case when case when 19 in (13,t1.f,11) then t1.b else t1.b end & 11>t1.a then 11 else t1.b end-b) from t1 where (13 not in (a,19,b)) and t1.a not between -11 and t1.f),f) then e else t1.c end & a in (select 11 from t1 union select t1.d from t1) then 17 else t1.a end & f FROM t1 WHERE coalesce((select t1.a*t1.a from t1 where ~ -(abs((select abs(count(*))-count(distinct t1.d*11) from t1)*+b)/abs(~t1.c))+c not in (coalesce((select ~13*t1.a from t1 where f+(abs(d* -t1.f)/abs(t1.c))<=t1.b or not t1.a<=t1.c),t1.c),e,(t1.e))),(a))+11<>11} +} {64} +do_test randexpr-2.25 { + db eval {SELECT c*coalesce((select ~19-e from t1 where not exists(select 1 from t1 where case when t1.b<>(d) then case when exists(select 1 from t1 where not exists(select 1 from t1 where case t1.e when e*(11) then 13 else 11 end in (select t1.a from t1 union select 13 from t1))) then coalesce((select case when f in (select t1.c from t1 union select (t1.b) from t1) then f else a end from t1 where t1.f not between 17 and f), -f) else 11 end else (a) end*d not in (e,t1.c,t1.e))),t1.e)-a FROM t1 WHERE 19+(select cast(avg((abs(t1.d)/abs(coalesce((select max(+b-(t1.e+b+ -c)*19) from t1 where 11 not between t1.b and f), -17)))*(11)) AS integer) from t1)*f in (select count(*) from t1 union select case max(t1.d)-( -min(t1.d)*(+max(e))) when max(t1.a) then cast(avg(t1.d) AS integer)-(count(distinct t1.f))-( -cast(avg(f) AS integer)) else max(t1.e) end from t1)} +} {} +do_test randexpr-2.26 { + db eval {SELECT c*coalesce((select ~19-e from t1 where not exists(select 1 from t1 where case when t1.b<>(d) then case when exists(select 1 from t1 where not exists(select 1 from t1 where case t1.e when e*(11) then 13 else 11 end in (select t1.a from t1 union select 13 from t1))) then coalesce((select case when f in (select t1.c from t1 union select (t1.b) from t1) then f else a end from t1 where t1.f not between 17 and f), -f) else 11 end else (a) end*d not in (e,t1.c,t1.e))),t1.e)-a FROM t1 WHERE NOT (19+(select cast(avg((abs(t1.d)/abs(coalesce((select max(+b-(t1.e+b+ -c)*19) from t1 where 11 not between t1.b and f), -17)))*(11)) AS integer) from t1)*f in (select count(*) from t1 union select case max(t1.d)-( -min(t1.d)*(+max(e))) when max(t1.a) then cast(avg(t1.d) AS integer)-(count(distinct t1.f))-( -cast(avg(f) AS integer)) else max(t1.e) end from t1))} +} {149900} +do_test randexpr-2.27 { + db eval {SELECT (abs((select (case abs(min(t1.f)) | count(distinct e) when (~+count(*)+max(e)++abs(cast(avg(~19) AS integer))) then +cast(avg(c) AS integer) else ~ -max(+t1.b)+~max(19)*count(distinct a)*min(17) | max(t1.d) end) from t1))/abs(~c+(select -(count(distinct t1.e)) from t1)-11+t1.e)) FROM t1 WHERE 11>=t1.f} +} {} +do_test randexpr-2.28 { + db eval {SELECT (abs((select (case abs(min(t1.f)) | count(distinct e) when (~+count(*)+max(e)++abs(cast(avg(~19) AS integer))) then +cast(avg(c) AS integer) else ~ -max(+t1.b)+~max(19)*count(distinct a)*min(17) | max(t1.d) end) from t1))/abs(~c+(select -(count(distinct t1.e)) from t1)-11+t1.e)) FROM t1 WHERE NOT (11>=t1.f)} +} {0} +do_test randexpr-2.29 { + db eval {SELECT (abs((select (case abs(min(t1.f)) & count(distinct e) when (~+count(*)+max(e)++abs(cast(avg(~19) AS integer))) then +cast(avg(c) AS integer) else ~ -max(+t1.b)+~max(19)*count(distinct a)*min(17) & max(t1.d) end) from t1))/abs(~c+(select -(count(distinct t1.e)) from t1)-11+t1.e)) FROM t1 WHERE NOT (11>=t1.f)} +} {1} +do_test randexpr-2.30 { + db eval {SELECT (select +count(distinct case when 11=a or e<+t1.f+t1.c and not exists(select 1 from t1 where not t1.a-t1.b=~a and f<=coalesce((select max((e)) from t1 where +t1.f>=b),t1.d)*13) and t1.c>t1.e and t1.a<17 then f when a=b),t1.d)*13) and t1.c>t1.e and t1.a<17 then f when a(c)),b)*t1.b-t1.b) then d else t1.e end | f* -13-c) from t1) from t1 where not ~aa} +} {} +do_test randexpr-2.33 { + db eval {SELECT coalesce((select (select max(t1.a)*(max(a))*max(t1.f*~11*case when b not in ((select min(19) from t1),b,coalesce((select max((t1.f)) from t1 where t1.c in (c,t1.b,(t1.f)) or t1.a<>(c)),b)*t1.b-t1.b) then d else t1.e end | f* -13-c) from t1) from t1 where not ~aa)} +} {581} +do_test randexpr-2.34 { + db eval {SELECT coalesce((select (select max(t1.a)*(max(a))*max(t1.f*~11*case when b not in ((select min(19) from t1),b,coalesce((select max((t1.f)) from t1 where t1.c in (c,t1.b,(t1.f)) or t1.a<>(c)),b)*t1.b-t1.b) then d else t1.e end & f* -13-c) from t1) from t1 where not ~aa)} +} {581} +do_test randexpr-2.35 { + db eval {SELECT case when t1.d not in (t1.d,coalesce((select max(11) from t1 where f not between 13 and c-coalesce((select max(d) from t1 where 19 not in ((t1.f),f,11)),t1.d)*t1.e | a),13), -(t1.e)) then t1.b when t1.f=t1.a and exists(select 1 from t1 where b in (select c from t1 union select 11 from t1) and t1.c=b and not -d in (t1.d,t1.d,t1.b) and d between 11 and c) or d<=17 or a not in (e,19,(t1.b)) then 17 else 11 end FROM t1 WHERE a<>f+f} +} {17} +do_test randexpr-2.36 { + db eval {SELECT case when t1.d not in (t1.d,coalesce((select max(11) from t1 where f not between 13 and c-coalesce((select max(d) from t1 where 19 not in ((t1.f),f,11)),t1.d)*t1.e | a),13), -(t1.e)) then t1.b when t1.f=t1.a and exists(select 1 from t1 where b in (select c from t1 union select 11 from t1) and t1.c=b and not -d in (t1.d,t1.d,t1.b) and d between 11 and c) or d<=17 or a not in (e,19,(t1.b)) then 17 else 11 end FROM t1 WHERE NOT (a<>f+f)} +} {} +do_test randexpr-2.37 { + db eval {SELECT case when t1.d not in (t1.d,coalesce((select max(11) from t1 where f not between 13 and c-coalesce((select max(d) from t1 where 19 not in ((t1.f),f,11)),t1.d)*t1.e & a),13), -(t1.e)) then t1.b when t1.f=t1.a and exists(select 1 from t1 where b in (select c from t1 union select 11 from t1) and t1.c=b and not -d in (t1.d,t1.d,t1.b) and d between 11 and c) or d<=17 or a not in (e,19,(t1.b)) then 17 else 11 end FROM t1 WHERE a<>f+f} +} {17} +do_test randexpr-2.38 { + db eval {SELECT case 13 when coalesce((select t1.a*d+++b | b+(abs(t1.e)/abs(c))+(select count(*) from t1) from t1 where not exists(select 1 from t1 where 17 in (select d from t1 union select t1.f from t1))),t1.b) then ~t1.c-(select case min(case when 17 not in (19,19,t1.c) then c when 13 between (19) and t1.e then e else 19 end-19) when cast(avg( -11) AS integer) then max(t1.c)* -min(t1.d) else (count(distinct t1.f)) end from t1) | +t1.f else t1.c end FROM t1 WHERE coalesce((select max(t1.c) from t1 where (abs(t1.b)/abs(a))>=(e)),+ -case when (11<17) then t1.a when t1.a*e=19* -11 and not exists(select 1 from t1 where -t1.d not between coalesce((select a from t1 where -t1.c>t1.e), -19) and t1.d and not exists(select 1 from t1 where (t1.e) in (select d from t1 union select t1.c from t1))) then c else a end*a)-t1.b* -(b) in (e,d,b)} +} {} +do_test randexpr-2.39 { + db eval {SELECT case 13 when coalesce((select t1.a*d+++b | b+(abs(t1.e)/abs(c))+(select count(*) from t1) from t1 where not exists(select 1 from t1 where 17 in (select d from t1 union select t1.f from t1))),t1.b) then ~t1.c-(select case min(case when 17 not in (19,19,t1.c) then c when 13 between (19) and t1.e then e else 19 end-19) when cast(avg( -11) AS integer) then max(t1.c)* -min(t1.d) else (count(distinct t1.f)) end from t1) | +t1.f else t1.c end FROM t1 WHERE NOT (coalesce((select max(t1.c) from t1 where (abs(t1.b)/abs(a))>=(e)),+ -case when (11<17) then t1.a when t1.a*e=19* -11 and not exists(select 1 from t1 where -t1.d not between coalesce((select a from t1 where -t1.c>t1.e), -19) and t1.d and not exists(select 1 from t1 where (t1.e) in (select d from t1 union select t1.c from t1))) then c else a end*a)-t1.b* -(b) in (e,d,b))} +} {300} +do_test randexpr-2.40 { + db eval {SELECT case 13 when coalesce((select t1.a*d+++b & b+(abs(t1.e)/abs(c))+(select count(*) from t1) from t1 where not exists(select 1 from t1 where 17 in (select d from t1 union select t1.f from t1))),t1.b) then ~t1.c-(select case min(case when 17 not in (19,19,t1.c) then c when 13 between (19) and t1.e then e else 19 end-19) when cast(avg( -11) AS integer) then max(t1.c)* -min(t1.d) else (count(distinct t1.f)) end from t1) & +t1.f else t1.c end FROM t1 WHERE NOT (coalesce((select max(t1.c) from t1 where (abs(t1.b)/abs(a))>=(e)),+ -case when (11<17) then t1.a when t1.a*e=19* -11 and not exists(select 1 from t1 where -t1.d not between coalesce((select a from t1 where -t1.c>t1.e), -19) and t1.d and not exists(select 1 from t1 where (t1.e) in (select d from t1 union select t1.c from t1))) then c else a end*a)-t1.b* -(b) in (e,d,b))} +} {300} +do_test randexpr-2.41 { + db eval {SELECT case when not exists(select 1 from t1 where ( -case when b in ((11),coalesce((select f from t1 where (not exists(select 1 from t1 where coalesce((select (t1.a) from t1 where e<=a),11) in (select t1.f from t1 union select 13 from t1)) or t1.a>d)),t1.a),t1.e) or not not exists(select 1 from t1 where (t1.f<=(t1.a))) then e*t1.a else t1.c end)<=d and not (t1.e)>=t1.e) then coalesce((select max( -t1.e) from t1 where t1.d between t1.b and t1.c),t1.d) else t1.c end+13*13 FROM t1 WHERE t1.b=t1.f | 19+coalesce((select max(a) from t1 where case t1.e when case when (11<=11 | t1.f and b>=b or t1.f<=a) then case when c between t1.e and b then e else +t1.f end else -t1.c end then t1.b else 13 end*13 between t1.f and c and exists(select 1 from t1 where (b not between 11 and a)) and f>=t1.b), -t1.b)-c} +} {} +do_test randexpr-2.42 { + db eval {SELECT case when not exists(select 1 from t1 where ( -case when b in ((11),coalesce((select f from t1 where (not exists(select 1 from t1 where coalesce((select (t1.a) from t1 where e<=a),11) in (select t1.f from t1 union select 13 from t1)) or t1.a>d)),t1.a),t1.e) or not not exists(select 1 from t1 where (t1.f<=(t1.a))) then e*t1.a else t1.c end)<=d and not (t1.e)>=t1.e) then coalesce((select max( -t1.e) from t1 where t1.d between t1.b and t1.c),t1.d) else t1.c end+13*13 FROM t1 WHERE NOT (t1.b=t1.f | 19+coalesce((select max(a) from t1 where case t1.e when case when (11<=11 | t1.f and b>=b or t1.f<=a) then case when c between t1.e and b then e else +t1.f end else -t1.c end then t1.b else 13 end*13 between t1.f and c and exists(select 1 from t1 where (b not between 11 and a)) and f>=t1.b), -t1.b)-c)} +} {569} +do_test randexpr-2.43 { + db eval {SELECT coalesce((select c from t1 where t1.a=case when a=t1.b+~(select case cast(avg( -b*13) AS integer) when max(f*13* -c) then (cast(avg(t1.d) AS integer)) else count(distinct t1.e*c) end from t1) then t1.d | t1.a else case when t1.a-b not between e and -t1.c* -13 then 19 else d end end),d) | t1.d FROM t1 WHERE +13 in (f,t1.f+t1.f, -coalesce((select max(t1.f) from t1 where (t1.a-13)<=(case (select count(*) from t1) when 13 then -+t1.e | -(coalesce((select max(~coalesce((select ( -13) from t1 where -c<(11) or 11 between a and t1.d),13)*t1.d) from t1 where d not between b and 13),19))*t1.a else 13 end-b-c)),t1.e)-19)} +} {} +do_test randexpr-2.44 { + db eval {SELECT coalesce((select c from t1 where t1.a=case when a=t1.b+~(select case cast(avg( -b*13) AS integer) when max(f*13* -c) then (cast(avg(t1.d) AS integer)) else count(distinct t1.e*c) end from t1) then t1.d | t1.a else case when t1.a-b not between e and -t1.c* -13 then 19 else d end end),d) | t1.d FROM t1 WHERE NOT (+13 in (f,t1.f+t1.f, -coalesce((select max(t1.f) from t1 where (t1.a-13)<=(case (select count(*) from t1) when 13 then -+t1.e | -(coalesce((select max(~coalesce((select ( -13) from t1 where -c<(11) or 11 between a and t1.d),13)*t1.d) from t1 where d not between b and 13),19))*t1.a else 13 end-b-c)),t1.e)-19))} +} {400} +do_test randexpr-2.45 { + db eval {SELECT coalesce((select c from t1 where t1.a=case when a=t1.b+~(select case cast(avg( -b*13) AS integer) when max(f*13* -c) then (cast(avg(t1.d) AS integer)) else count(distinct t1.e*c) end from t1) then t1.d & t1.a else case when t1.a-b not between e and -t1.c* -13 then 19 else d end end),d) & t1.d FROM t1 WHERE NOT (+13 in (f,t1.f+t1.f, -coalesce((select max(t1.f) from t1 where (t1.a-13)<=(case (select count(*) from t1) when 13 then -+t1.e | -(coalesce((select max(~coalesce((select ( -13) from t1 where -c<(11) or 11 between a and t1.d),13)*t1.d) from t1 where d not between b and 13),19))*t1.a else 13 end-b-c)),t1.e)-19))} +} {400} +do_test randexpr-2.46 { + db eval {SELECT case ~11-13 when 11-d-case when t1.f*+a-t1.a-t1.f=t1.a then a when t1.f between 19-t1.a+coalesce((select a from t1 where e in (select case -max(t1.c) when count(*) then max(t1.d) else -(cast(avg((t1.e)) AS integer)) end from t1 union select (max(t1.b)) from t1)), -b) and e or ((11=t1.d then t1.b else a end+13+(t1.c) then a else -d end FROM t1 WHERE exists(select 1 from t1 where not exists(select 1 from t1 where (not +t1.b-~a*(d*t1.e | + -coalesce((select t1.c from t1 where t1.b*case t1.b when f then -a*b else e end<>a+t1.a),17))+e*t1.e*11 | t1.a not between 13 and -t1.a and b between d and -f)))} +} {-400} +do_test randexpr-2.47 { + db eval {SELECT case ~11-13 when 11-d-case when t1.f*+a-t1.a-t1.f=t1.a then a when t1.f between 19-t1.a+coalesce((select a from t1 where e in (select case -max(t1.c) when count(*) then max(t1.d) else -(cast(avg((t1.e)) AS integer)) end from t1 union select (max(t1.b)) from t1)), -b) and e or ((11=t1.d then t1.b else a end+13+(t1.c) then a else -d end FROM t1 WHERE NOT (exists(select 1 from t1 where not exists(select 1 from t1 where (not +t1.b-~a*(d*t1.e | + -coalesce((select t1.c from t1 where t1.b*case t1.b when f then -a*b else e end<>a+t1.a),17))+e*t1.e*11 | t1.a not between 13 and -t1.a and b between d and -f))))} +} {} +do_test randexpr-2.48 { + db eval {SELECT 11++(case 19 when t1.c-19-coalesce((select t1.b from t1 where e*19-t1.e<=+case when c in (select 19 | 17 from t1 union select 11 from t1) then (abs(a)/abs(19)) when a*t1.d>19 or (11) not in (b,t1.a,t1.a) then b else b end-11),f) then 13 else 11 end-t1.e*a) FROM t1 WHERE f in (17,t1.f | coalesce((select max(t1.e) from t1 where not 17 in (select f from t1 union select +17 from t1)),b+coalesce((select max(b) from t1 where 19 in (select case min(t1.d) when (count(*)) then min(d) else -min(d) end from t1 union select max(d) from t1)),f)*f-17+19),b) or d<=f or t1.f>=11 or t1.b<=11 or t1.c<=11 or t1.b<=b} +} {-49978} +do_test randexpr-2.49 { + db eval {SELECT 11++(case 19 when t1.c-19-coalesce((select t1.b from t1 where e*19-t1.e<=+case when c in (select 19 | 17 from t1 union select 11 from t1) then (abs(a)/abs(19)) when a*t1.d>19 or (11) not in (b,t1.a,t1.a) then b else b end-11),f) then 13 else 11 end-t1.e*a) FROM t1 WHERE NOT (f in (17,t1.f | coalesce((select max(t1.e) from t1 where not 17 in (select f from t1 union select +17 from t1)),b+coalesce((select max(b) from t1 where 19 in (select case min(t1.d) when (count(*)) then min(d) else -min(d) end from t1 union select max(d) from t1)),f)*f-17+19),b) or d<=f or t1.f>=11 or t1.b<=11 or t1.c<=11 or t1.b<=b)} +} {} +do_test randexpr-2.50 { + db eval {SELECT 11++(case 19 when t1.c-19-coalesce((select t1.b from t1 where e*19-t1.e<=+case when c in (select 19 & 17 from t1 union select 11 from t1) then (abs(a)/abs(19)) when a*t1.d>19 or (11) not in (b,t1.a,t1.a) then b else b end-11),f) then 13 else 11 end-t1.e*a) FROM t1 WHERE f in (17,t1.f | coalesce((select max(t1.e) from t1 where not 17 in (select f from t1 union select +17 from t1)),b+coalesce((select max(b) from t1 where 19 in (select case min(t1.d) when (count(*)) then min(d) else -min(d) end from t1 union select max(d) from t1)),f)*f-17+19),b) or d<=f or t1.f>=11 or t1.b<=11 or t1.c<=11 or t1.b<=b} +} {-49978} +do_test randexpr-2.51 { + db eval {SELECT -c-coalesce((select max(t1.a) from t1 where (a>17-t1.a) and (select abs(min(t1.a))*case abs(case (((min((t1.c))))+min(a)) when count(distinct f) then min(11) else count(distinct b) end) when min(t1.f) then (count(distinct t1.b)) else -count(*) end from t1)<>case d when +(select ((min(t1.a))) from t1)-(abs(c)/abs(f)) then b else 17 end+t1.c),coalesce((select t1.d from t1 where d>19),t1.a)) FROM t1 WHERE b+(coalesce((select max(a) from t1 where exists(select 1 from t1 where t1.b<=c-(select ~~max((abs(e)/abs(13 | (t1.e)-c-19))) from t1) or d in (select t1.b from t1 union select d from t1)) or 13 in (select cast(avg(19) AS integer) from t1 union select ( -cast(avg(19) AS integer) | count(*)*min(19)) from t1) or 19<=11),t1.c))-t1.d-c>f} +} {} +do_test randexpr-2.52 { + db eval {SELECT -c-coalesce((select max(t1.a) from t1 where (a>17-t1.a) and (select abs(min(t1.a))*case abs(case (((min((t1.c))))+min(a)) when count(distinct f) then min(11) else count(distinct b) end) when min(t1.f) then (count(distinct t1.b)) else -count(*) end from t1)<>case d when +(select ((min(t1.a))) from t1)-(abs(c)/abs(f)) then b else 17 end+t1.c),coalesce((select t1.d from t1 where d>19),t1.a)) FROM t1 WHERE NOT (b+(coalesce((select max(a) from t1 where exists(select 1 from t1 where t1.b<=c-(select ~~max((abs(e)/abs(13 | (t1.e)-c-19))) from t1) or d in (select t1.b from t1 union select d from t1)) or 13 in (select cast(avg(19) AS integer) from t1 union select ( -cast(avg(19) AS integer) | count(*)*min(19)) from t1) or 19<=11),t1.c))-t1.d-c>f)} +} {-400} +do_test randexpr-2.53 { + db eval {SELECT case when t1.b+coalesce((select max(a) from t1 where exists(select 1 from t1 where not not t1.d>19)),13) | case when f in (select a from t1 union select case when (a)<>t1.c then c when not exists(select 1 from t1 where t1.c not between 17 and -(t1.d)) then f else (11) end from t1) then t1.c when (not exists(select 1 from t1 where 17 not between t1.f and a)) and d=b then t1.c else d end<=19 and (t1.c<>c and (13) not between t1.d and t1.c) and a>= -b or 11>13 and c<>13 then -f else d end FROM t1 WHERE (case when (17 not in (f*11,+coalesce((select max(b-b) from t1 where t1.c+f not between 11 and t1.d),13),c)) or (t1.e in (select ~min(t1.f) from t1 union select ~count(*) from t1) and exists(select 1 from t1 where (not f>=19 and 17 not between a and t1.e)) or (e) not in (e,d,13) or t1.d>=f) then e when 17 between t1.c and 13 then case when 11<>19 then t1.d else t1.a end else b end in (select -t1.a from t1 union select a from t1))} +} {} +do_test randexpr-2.54 { + db eval {SELECT case when t1.b+coalesce((select max(a) from t1 where exists(select 1 from t1 where not not t1.d>19)),13) | case when f in (select a from t1 union select case when (a)<>t1.c then c when not exists(select 1 from t1 where t1.c not between 17 and -(t1.d)) then f else (11) end from t1) then t1.c when (not exists(select 1 from t1 where 17 not between t1.f and a)) and d=b then t1.c else d end<=19 and (t1.c<>c and (13) not between t1.d and t1.c) and a>= -b or 11>13 and c<>13 then -f else d end FROM t1 WHERE NOT ((case when (17 not in (f*11,+coalesce((select max(b-b) from t1 where t1.c+f not between 11 and t1.d),13),c)) or (t1.e in (select ~min(t1.f) from t1 union select ~count(*) from t1) and exists(select 1 from t1 where (not f>=19 and 17 not between a and t1.e)) or (e) not in (e,d,13) or t1.d>=f) then e when 17 between t1.c and 13 then case when 11<>19 then t1.d else t1.a end else b end in (select -t1.a from t1 union select a from t1)))} +} {400} +do_test randexpr-2.55 { + db eval {SELECT case when t1.b+coalesce((select max(a) from t1 where exists(select 1 from t1 where not not t1.d>19)),13) & case when f in (select a from t1 union select case when (a)<>t1.c then c when not exists(select 1 from t1 where t1.c not between 17 and -(t1.d)) then f else (11) end from t1) then t1.c when (not exists(select 1 from t1 where 17 not between t1.f and a)) and d=b then t1.c else d end<=19 and (t1.c<>c and (13) not between t1.d and t1.c) and a>= -b or 11>13 and c<>13 then -f else d end FROM t1 WHERE NOT ((case when (17 not in (f*11,+coalesce((select max(b-b) from t1 where t1.c+f not between 11 and t1.d),13),c)) or (t1.e in (select ~min(t1.f) from t1 union select ~count(*) from t1) and exists(select 1 from t1 where (not f>=19 and 17 not between a and t1.e)) or (e) not in (e,d,13) or t1.d>=f) then e when 17 between t1.c and 13 then case when 11<>19 then t1.d else t1.a end else b end in (select -t1.a from t1 union select a from t1)))} +} {400} +do_test randexpr-2.56 { + db eval {SELECT t1.e+case when d not between f and case when a in (select t1.c from t1 union select t1.e from t1) or coalesce((select 19 from t1 where 17<=t1.d or 19*t1.f*t1.b<>t1.d | t1.d | t1.a),t1.b)*case when (t1.a) in (t1.d,t1.a,a) then -c when 17<13 then t1.f else 17 end-t1.f*e not in (19,t1.d,17) then 19 else t1.e end then -b else t1.b end FROM t1 WHERE (coalesce((select t1.d-coalesce((select max(coalesce((select max(c+13) from t1 where ~13*t1.b in (select abs(abs(abs((cast(avg( -e) AS integer)))))- -count(distinct (13))*max(13) from t1 union select count(distinct 19) from t1)), -t1.f)) from t1 where exists(select 1 from t1 where 17 in (t1.b,13,b))),b)+d+t1.a from t1 where d not in (13,t1.b,b)),17) not between 11 and e or not 17 between d and d)} +} {300} +do_test randexpr-2.57 { + db eval {SELECT t1.e+case when d not between f and case when a in (select t1.c from t1 union select t1.e from t1) or coalesce((select 19 from t1 where 17<=t1.d or 19*t1.f*t1.b<>t1.d | t1.d | t1.a),t1.b)*case when (t1.a) in (t1.d,t1.a,a) then -c when 17<13 then t1.f else 17 end-t1.f*e not in (19,t1.d,17) then 19 else t1.e end then -b else t1.b end FROM t1 WHERE NOT ((coalesce((select t1.d-coalesce((select max(coalesce((select max(c+13) from t1 where ~13*t1.b in (select abs(abs(abs((cast(avg( -e) AS integer)))))- -count(distinct (13))*max(13) from t1 union select count(distinct 19) from t1)), -t1.f)) from t1 where exists(select 1 from t1 where 17 in (t1.b,13,b))),b)+d+t1.a from t1 where d not in (13,t1.b,b)),17) not between 11 and e or not 17 between d and d))} +} {} +do_test randexpr-2.58 { + db eval {SELECT t1.e+case when d not between f and case when a in (select t1.c from t1 union select t1.e from t1) or coalesce((select 19 from t1 where 17<=t1.d or 19*t1.f*t1.b<>t1.d & t1.d & t1.a),t1.b)*case when (t1.a) in (t1.d,t1.a,a) then -c when 17<13 then t1.f else 17 end-t1.f*e not in (19,t1.d,17) then 19 else t1.e end then -b else t1.b end FROM t1 WHERE (coalesce((select t1.d-coalesce((select max(coalesce((select max(c+13) from t1 where ~13*t1.b in (select abs(abs(abs((cast(avg( -e) AS integer)))))- -count(distinct (13))*max(13) from t1 union select count(distinct 19) from t1)), -t1.f)) from t1 where exists(select 1 from t1 where 17 in (t1.b,13,b))),b)+d+t1.a from t1 where d not in (13,t1.b,b)),17) not between 11 and e or not 17 between d and d)} +} {300} +do_test randexpr-2.59 { + db eval {SELECT coalesce((select max(t1.f*case when not exists(select 1 from t1 where exists(select 1 from t1 where (select count(distinct 11) from t1) between coalesce((select max(c) from t1 where t1.e=t1.a+17),a) and t1.b) or c not in (t1.e,19,t1.e)) then 13 when exists(select 1 from t1 where t1.a between 19 and -c) then coalesce((select coalesce((select max((( -d))) from t1 where 17 not in (t1.d,e,t1.e)),e) from t1 where c= -f),( -c)) else t1.f end) from t1 where not (11 between a and t1.e)),19) FROM t1 WHERE case c when case when (exists(select 1 from t1 where t1.d<>coalesce((select 13 from t1 where (t1.b>+e)),(17))+coalesce((select 13 from t1 where exists(select 1 from t1 where 13 in (t1.b+c,case when f between t1.f and b then t1.b else f end,(11)))),f)-17-17)) then -c-(c) when 11 in (b,f,(t1.d)) then t1.c else t1.b end then t1.d else 11 end in (select -count(*) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.60 { + db eval {SELECT coalesce((select max(t1.f*case when not exists(select 1 from t1 where exists(select 1 from t1 where (select count(distinct 11) from t1) between coalesce((select max(c) from t1 where t1.e=t1.a+17),a) and t1.b) or c not in (t1.e,19,t1.e)) then 13 when exists(select 1 from t1 where t1.a between 19 and -c) then coalesce((select coalesce((select max((( -d))) from t1 where 17 not in (t1.d,e,t1.e)),e) from t1 where c= -f),( -c)) else t1.f end) from t1 where not (11 between a and t1.e)),19) FROM t1 WHERE NOT (case c when case when (exists(select 1 from t1 where t1.d<>coalesce((select 13 from t1 where (t1.b>+e)),(17))+coalesce((select 13 from t1 where exists(select 1 from t1 where 13 in (t1.b+c,case when f between t1.f and b then t1.b else f end,(11)))),f)-17-17)) then -c-(c) when 11 in (b,f,(t1.d)) then t1.c else t1.b end then t1.d else 11 end in (select -count(*) from t1 union select count(*) from t1))} +} {360000} +do_test randexpr-2.61 { + db eval {SELECT case when t1.a-coalesce((select t1.b from t1 where (d<=~a)),t1.b+t1.d | 19+13)-t1.a | ~coalesce((select ~17 from t1 where not exists(select 1 from t1 where ~case when 13 in (select +(cast(avg(b) AS integer)) from t1 union select count(*) from t1) then 17 when 11 not in (c,t1.a,t1.a) then e else e end*a not between 11 and c)),t1.c) in (11,t1.a,e) then t1.f when 19 in (select ~count(distinct d) from t1 union select cast(avg(19) AS integer) from t1) then t1.d else 19 end FROM t1 WHERE not exists(select 1 from t1 where (select count(*) from t1)+coalesce((select max(a) from t1 where t1.a between (abs(case when exists(select 1 from t1 where case when t1.e=case when 19 between 11 and e then t1.f else a end then t1.e else t1.b end in (select count(distinct d) | max(11)*abs(case count(*) when max(f) then cast(avg(e) AS integer) else count(distinct t1.e) end) from t1 union select -cast(avg(c) AS integer) from t1)) then (select -min(e) from t1) when (c>e) then t1.d else f end)/abs(b)) and (17)),e) not between t1.a and d)} +} {} +do_test randexpr-2.62 { + db eval {SELECT case when t1.a-coalesce((select t1.b from t1 where (d<=~a)),t1.b+t1.d | 19+13)-t1.a | ~coalesce((select ~17 from t1 where not exists(select 1 from t1 where ~case when 13 in (select +(cast(avg(b) AS integer)) from t1 union select count(*) from t1) then 17 when 11 not in (c,t1.a,t1.a) then e else e end*a not between 11 and c)),t1.c) in (11,t1.a,e) then t1.f when 19 in (select ~count(distinct d) from t1 union select cast(avg(19) AS integer) from t1) then t1.d else 19 end FROM t1 WHERE NOT (not exists(select 1 from t1 where (select count(*) from t1)+coalesce((select max(a) from t1 where t1.a between (abs(case when exists(select 1 from t1 where case when t1.e=case when 19 between 11 and e then t1.f else a end then t1.e else t1.b end in (select count(distinct d) | max(11)*abs(case count(*) when max(f) then cast(avg(e) AS integer) else count(distinct t1.e) end) from t1 union select -cast(avg(c) AS integer) from t1)) then (select -min(e) from t1) when (c>e) then t1.d else f end)/abs(b)) and (17)),e) not between t1.a and d))} +} {400} +do_test randexpr-2.63 { + db eval {SELECT case when t1.a-coalesce((select t1.b from t1 where (d<=~a)),t1.b+t1.d & 19+13)-t1.a & ~coalesce((select ~17 from t1 where not exists(select 1 from t1 where ~case when 13 in (select +(cast(avg(b) AS integer)) from t1 union select count(*) from t1) then 17 when 11 not in (c,t1.a,t1.a) then e else e end*a not between 11 and c)),t1.c) in (11,t1.a,e) then t1.f when 19 in (select ~count(distinct d) from t1 union select cast(avg(19) AS integer) from t1) then t1.d else 19 end FROM t1 WHERE NOT (not exists(select 1 from t1 where (select count(*) from t1)+coalesce((select max(a) from t1 where t1.a between (abs(case when exists(select 1 from t1 where case when t1.e=case when 19 between 11 and e then t1.f else a end then t1.e else t1.b end in (select count(distinct d) | max(11)*abs(case count(*) when max(f) then cast(avg(e) AS integer) else count(distinct t1.e) end) from t1 union select -cast(avg(c) AS integer) from t1)) then (select -min(e) from t1) when (c>e) then t1.d else f end)/abs(b)) and (17)),e) not between t1.a and d))} +} {400} +do_test randexpr-2.64 { + db eval {SELECT case when 11 in (select max(f) from t1 union select min(19++13) | min(t1.c) from t1) and (e in (select t1.d from t1 union select t1.a | 17*case e when coalesce((select max(a) from t1 where b not in (b,f,d) or t1.a<=f),11) then t1.a else t1.b end from t1) and -a not between b and d) then d when -t1.f=b or t1.b between -f and -d then 19 else d end+d FROM t1 WHERE case when (coalesce((select case (select max(t1.b) from t1) when (b* -t1.e+e*e-t1.a) then c else a end from t1 where 17 in (select min(t1.d)-abs(count(*)) from t1 union select count(distinct c) from t1)),t1.d)=(t1.d)) and e in (select -min(t1.b) from t1 union select count(distinct 11) from t1) and a< - -17 then t1.d | 11 else 19 end in (select 17 from t1 union select d from t1)} +} {} +do_test randexpr-2.65 { + db eval {SELECT case when 11 in (select max(f) from t1 union select min(19++13) | min(t1.c) from t1) and (e in (select t1.d from t1 union select t1.a | 17*case e when coalesce((select max(a) from t1 where b not in (b,f,d) or t1.a<=f),11) then t1.a else t1.b end from t1) and -a not between b and d) then d when -t1.f=b or t1.b between -f and -d then 19 else d end+d FROM t1 WHERE NOT (case when (coalesce((select case (select max(t1.b) from t1) when (b* -t1.e+e*e-t1.a) then c else a end from t1 where 17 in (select min(t1.d)-abs(count(*)) from t1 union select count(distinct c) from t1)),t1.d)=(t1.d)) and e in (select -min(t1.b) from t1 union select count(distinct 11) from t1) and a< - -17 then t1.d | 11 else 19 end in (select 17 from t1 union select d from t1))} +} {800} +do_test randexpr-2.66 { + db eval {SELECT case when 11 in (select max(f) from t1 union select min(19++13) & min(t1.c) from t1) and (e in (select t1.d from t1 union select t1.a & 17*case e when coalesce((select max(a) from t1 where b not in (b,f,d) or t1.a<=f),11) then t1.a else t1.b end from t1) and -a not between b and d) then d when -t1.f=b or t1.b between -f and -d then 19 else d end+d FROM t1 WHERE NOT (case when (coalesce((select case (select max(t1.b) from t1) when (b* -t1.e+e*e-t1.a) then c else a end from t1 where 17 in (select min(t1.d)-abs(count(*)) from t1 union select count(distinct c) from t1)),t1.d)=(t1.d)) and e in (select -min(t1.b) from t1 union select count(distinct 11) from t1) and a< - -17 then t1.d | 11 else 19 end in (select 17 from t1 union select d from t1))} +} {800} +do_test randexpr-2.67 { + db eval {SELECT (abs((select min(13) from t1))/abs(coalesce((select t1.e from t1 where coalesce((select t1.f from t1 where -11<=b-t1.c*case when (d)-t1.f+t1.e-13<>t1.b then 11 when exists(select 1 from t1 where t1.d<>((t1.b)) or t1.a not between d and 13) or t1.b<>t1.c then t1.e else -t1.d end+t1.a or t1.c not between f and d),e)<>e), -f))) FROM t1 WHERE t1.c<>(select min((abs(( -19-t1.b*13- -++~t1.f*d-17*19*11))/abs(17))) from t1)} +} {0} +do_test randexpr-2.68 { + db eval {SELECT (abs((select min(13) from t1))/abs(coalesce((select t1.e from t1 where coalesce((select t1.f from t1 where -11<=b-t1.c*case when (d)-t1.f+t1.e-13<>t1.b then 11 when exists(select 1 from t1 where t1.d<>((t1.b)) or t1.a not between d and 13) or t1.b<>t1.c then t1.e else -t1.d end+t1.a or t1.c not between f and d),e)<>e), -f))) FROM t1 WHERE NOT (t1.c<>(select min((abs(( -19-t1.b*13- -++~t1.f*d-17*19*11))/abs(17))) from t1))} +} {} +do_test randexpr-2.69 { + db eval {SELECT case d-case when c< -case when b<=f or coalesce((select t1.e from t1 where 11e then 19 when not exists(select 1 from t1 where not exists(select 1 from t1 where t1.d>=d)) then t1.f else 17 end then e when not exists(select 1 from t1 where d=b) then t1.c else f end when 17 then 19 else t1.f end FROM t1 WHERE coalesce((select max(+t1.d) from t1 where e<>e or not exists(select 1 from t1 where not exists(select 1 from t1 where (abs(f)/abs(f))++t1.b in (select case abs(cast(avg(e) AS integer)) when cast(avg(17) AS integer) | max(d) | cast(avg(t1.e) AS integer) | min(t1.e) then (cast(avg(19) AS integer)) else cast(avg(c) AS integer) end from t1 union select cast(avg(t1.f) AS integer) from t1))) and t1.a between t1.f and b or not 17<13 or -11= -t1.f or t1.f between c and 19),d)+(t1.d) not between b and e} +} {600} +do_test randexpr-2.70 { + db eval {SELECT case d-case when c< -case when b<=f or coalesce((select t1.e from t1 where 11e then 19 when not exists(select 1 from t1 where not exists(select 1 from t1 where t1.d>=d)) then t1.f else 17 end then e when not exists(select 1 from t1 where d=b) then t1.c else f end when 17 then 19 else t1.f end FROM t1 WHERE NOT (coalesce((select max(+t1.d) from t1 where e<>e or not exists(select 1 from t1 where not exists(select 1 from t1 where (abs(f)/abs(f))++t1.b in (select case abs(cast(avg(e) AS integer)) when cast(avg(17) AS integer) | max(d) | cast(avg(t1.e) AS integer) | min(t1.e) then (cast(avg(19) AS integer)) else cast(avg(c) AS integer) end from t1 union select cast(avg(t1.f) AS integer) from t1))) and t1.a between t1.f and b or not 17<13 or -11= -t1.f or t1.f between c and 19),d)+(t1.d) not between b and e)} +} {} +do_test randexpr-2.71 { + db eval {SELECT case when t1.e in (select abs(max(case a+case when t1.a>=e+ -c then a else t1.b end+t1.a when t1.f then 19 else -19 end)+abs( -count(distinct t1.c))+~(count(distinct t1.b)))-min(b) from t1 union select max(f) from t1) and case t1.e when -t1.c then 19 else b end=t1.e))) or c in (select f from t1 union select 13 from t1) and (b)< -t1.c and f in (select max(13) from t1 union select ~+abs(count(distinct e)*abs(((count(*)))*((max(f))))) from t1)} +} {} +do_test randexpr-2.72 { + db eval {SELECT case when t1.e in (select abs(max(case a+case when t1.a>=e+ -c then a else t1.b end+t1.a when t1.f then 19 else -19 end)+abs( -count(distinct t1.c))+~(count(distinct t1.b)))-min(b) from t1 union select max(f) from t1) and case t1.e when -t1.c then 19 else b end=t1.e))) or c in (select f from t1 union select 13 from t1) and (b)< -t1.c and f in (select max(13) from t1 union select ~+abs(count(distinct e)*abs(((count(*)))*((max(f))))) from t1))} +} {-19} +do_test randexpr-2.73 { + db eval {SELECT c*case when t1.a in (select ~count(*) from t1 union select -count(distinct ~case (abs(11)/abs(b-(coalesce((select 17-t1.a from t1 where not t1.d=t1.d),(abs(11)/abs(a)))))) when case when t1.d between 19 and c and t1.b between 11 and t1.a then 13 else (13) end then (13) else 19 end-19*13+11) from t1) then t1.a when t1.f<=t1.e then f else b end-19-c+f FROM t1 WHERE t1.b>=c+case when exists(select 1 from t1 where a>=+f- -c*e+coalesce((select max(b) from t1 where (b not between (t1.b) and 19) or t1.f<11),d)*f | t1.e+t1.b) then ( -t1.e)-19 when ((t1.a in (select 19 from t1 union select t1.f from t1) or b -17)) then f else f end | d} +} {} +do_test randexpr-2.74 { + db eval {SELECT c*case when t1.a in (select ~count(*) from t1 union select -count(distinct ~case (abs(11)/abs(b-(coalesce((select 17-t1.a from t1 where not t1.d=t1.d),(abs(11)/abs(a)))))) when case when t1.d between 19 and c and t1.b between 11 and t1.a then 13 else (13) end then (13) else 19 end-19*13+11) from t1) then t1.a when t1.f<=t1.e then f else b end-19-c+f FROM t1 WHERE NOT (t1.b>=c+case when exists(select 1 from t1 where a>=+f- -c*e+coalesce((select max(b) from t1 where (b not between (t1.b) and 19) or t1.f<11),d)*f | t1.e+t1.b) then ( -t1.e)-19 when ((t1.a in (select 19 from t1 union select t1.f from t1) or b -17)) then f else f end | d)} +} {60281} +do_test randexpr-2.75 { + db eval {SELECT (abs(case when a>=b then t1.c-case when not exists(select 1 from t1 where case d when +17 then 19+c else 11 end-(abs((select - -cast(avg(case when (11- -(abs(e)/abs(13))-t1.f11} +} {0} +do_test randexpr-2.76 { + db eval {SELECT (abs(case when a>=b then t1.c-case when not exists(select 1 from t1 where case d when +17 then 19+c else 11 end-(abs((select - -cast(avg(case when (11- -(abs(e)/abs(13))-t1.f11)} +} {} +do_test randexpr-2.77 { + db eval {SELECT -coalesce((select max(e-t1.b) from t1 where t1.e<11 and t1.b | e in ((abs(13)/abs(t1.b)),(abs((+17-case when t1.c | 13*(d) | b<>a or ((t1.e)>=a) then t1.b-t1.e when not exists(select 1 from t1 where 17<13) then -(13) else 17 end*13))/abs(13)),t1.c)),t1.e) | c FROM t1 WHERE not exists(select 1 from t1 where 17<=(select (case (~+abs(cast(avg((t1.e)-17 | b) AS integer))-((case -cast(avg(case when ~t1.b in (select d from t1 union select 19 from t1) then b when 13 in (t1.f,c,t1.b) then c else f end) AS integer)+count(distinct t1.f)+count(distinct t1.d) when count(*) then ((max(c))) else count(*) end)*cast(avg(t1.e) AS integer))) when ((cast(avg(a) AS integer))) then -max(d) else ((count(*))) end) from t1))} +} {-212} +do_test randexpr-2.78 { + db eval {SELECT -coalesce((select max(e-t1.b) from t1 where t1.e<11 and t1.b | e in ((abs(13)/abs(t1.b)),(abs((+17-case when t1.c | 13*(d) | b<>a or ((t1.e)>=a) then t1.b-t1.e when not exists(select 1 from t1 where 17<13) then -(13) else 17 end*13))/abs(13)),t1.c)),t1.e) | c FROM t1 WHERE NOT (not exists(select 1 from t1 where 17<=(select (case (~+abs(cast(avg((t1.e)-17 | b) AS integer))-((case -cast(avg(case when ~t1.b in (select d from t1 union select 19 from t1) then b when 13 in (t1.f,c,t1.b) then c else f end) AS integer)+count(distinct t1.f)+count(distinct t1.d) when count(*) then ((max(c))) else count(*) end)*cast(avg(t1.e) AS integer))) when ((cast(avg(a) AS integer))) then -max(d) else ((count(*))) end) from t1)))} +} {} +do_test randexpr-2.79 { + db eval {SELECT -coalesce((select max(e-t1.b) from t1 where t1.e<11 and t1.b & e in ((abs(13)/abs(t1.b)),(abs((+17-case when t1.c & 13*(d) & b<>a or ((t1.e)>=a) then t1.b-t1.e when not exists(select 1 from t1 where 17<13) then -(13) else 17 end*13))/abs(13)),t1.c)),t1.e) & c FROM t1 WHERE not exists(select 1 from t1 where 17<=(select (case (~+abs(cast(avg((t1.e)-17 | b) AS integer))-((case -cast(avg(case when ~t1.b in (select d from t1 union select 19 from t1) then b when 13 in (t1.f,c,t1.b) then c else f end) AS integer)+count(distinct t1.f)+count(distinct t1.d) when count(*) then ((max(c))) else count(*) end)*cast(avg(t1.e) AS integer))) when ((cast(avg(a) AS integer))) then -max(d) else ((count(*))) end) from t1))} +} {12} +do_test randexpr-2.80 { + db eval {SELECT 17-coalesce((select c+t1.f from t1 where (select count(distinct d)-abs(min((select case - -count(*) when -max(19) then -count(*) else count(*) end from t1))-count(*)) from t1)-t1.a+case when (abs(t1.e)/abs(t1.e)) not in (a,t1.a,19) and (b between 19 and -t1.c) then 17 else c end-19+t1.e*13<= -(t1.a)),11)-t1.d-c FROM t1 WHERE ((t1.d in ((select +cast(avg(coalesce((select max(coalesce((select max(a) from t1 where not a in (b,~(select ~count(*) from t1),case (select +max(11) | -count(*) from t1) when 17*t1.f*17 then b else (t1.b) end+f | 17)),e)) from t1 where not exists(select 1 from t1 where 17 between a and a)),(19))) AS integer) from t1),coalesce((select max(e) from t1 where t1.e<>11),b),t1.d)))} +} {-694} +do_test randexpr-2.81 { + db eval {SELECT 17-coalesce((select c+t1.f from t1 where (select count(distinct d)-abs(min((select case - -count(*) when -max(19) then -count(*) else count(*) end from t1))-count(*)) from t1)-t1.a+case when (abs(t1.e)/abs(t1.e)) not in (a,t1.a,19) and (b between 19 and -t1.c) then 17 else c end-19+t1.e*13<= -(t1.a)),11)-t1.d-c FROM t1 WHERE NOT (((t1.d in ((select +cast(avg(coalesce((select max(coalesce((select max(a) from t1 where not a in (b,~(select ~count(*) from t1),case (select +max(11) | -count(*) from t1) when 17*t1.f*17 then b else (t1.b) end+f | 17)),e)) from t1 where not exists(select 1 from t1 where 17 between a and a)),(19))) AS integer) from t1),coalesce((select max(e) from t1 where t1.e<>11),b),t1.d))))} +} {} +do_test randexpr-2.82 { + db eval {SELECT case (t1.a) when e then t1.a else t1.d end+(b)*t1.c | (select ~count(distinct (select count(distinct t1.f) from t1)) from t1)* -case c*t1.b when 19 then t1.d else (t1.b) | case when not (e)*f in (select count(distinct a)-count(distinct c) | min(a) from t1 union select -count(*) from t1) and (t1.f in (d, -t1.c,f)) then e when t1.c not in (t1.b,t1.e,b) then t1.e else f end end FROM t1 WHERE t1.d<>coalesce((select max(case when exists(select 1 from t1 where f<=e | b) then (select min(+t1.f) from t1) else 11 end) from t1 where +case when t1.e=c then -t1.c else t1.a end-t1.a-11-f in (select (case -min(a) when count(*)*(( -+abs(cast(avg(a) AS integer))*max((17))) | max(19)) then min(11) else min(a) end | count(distinct -d)) from t1 union select min(t1.e) from t1)),t1.c)} +} {60408} +do_test randexpr-2.83 { + db eval {SELECT case (t1.a) when e then t1.a else t1.d end+(b)*t1.c | (select ~count(distinct (select count(distinct t1.f) from t1)) from t1)* -case c*t1.b when 19 then t1.d else (t1.b) | case when not (e)*f in (select count(distinct a)-count(distinct c) | min(a) from t1 union select -count(*) from t1) and (t1.f in (d, -t1.c,f)) then e when t1.c not in (t1.b,t1.e,b) then t1.e else f end end FROM t1 WHERE NOT (t1.d<>coalesce((select max(case when exists(select 1 from t1 where f<=e | b) then (select min(+t1.f) from t1) else 11 end) from t1 where +case when t1.e=c then -t1.c else t1.a end-t1.a-11-f in (select (case -min(a) when count(*)*(( -+abs(cast(avg(a) AS integer))*max((17))) | max(19)) then min(11) else min(a) end | count(distinct -d)) from t1 union select min(t1.e) from t1)),t1.c))} +} {} +do_test randexpr-2.84 { + db eval {SELECT case (t1.a) when e then t1.a else t1.d end+(b)*t1.c & (select ~count(distinct (select count(distinct t1.f) from t1)) from t1)* -case c*t1.b when 19 then t1.d else (t1.b) & case when not (e)*f in (select count(distinct a)-count(distinct c) & min(a) from t1 union select -count(*) from t1) and (t1.f in (d, -t1.c,f)) then e when t1.c not in (t1.b,t1.e,b) then t1.e else f end end FROM t1 WHERE t1.d<>coalesce((select max(case when exists(select 1 from t1 where f<=e | b) then (select min(+t1.f) from t1) else 11 end) from t1 where +case when t1.e=c then -t1.c else t1.a end-t1.a-11-f in (select (case -min(a) when count(*)*(( -+abs(cast(avg(a) AS integer))*max((17))) | max(19)) then min(11) else min(a) end | count(distinct -d)) from t1 union select min(t1.e) from t1)),t1.c)} +} {384} +do_test randexpr-2.85 { + db eval {SELECT (select abs(max((select (case cast(avg(t1.a) AS integer) when +~abs(cast(avg( -17+case when t1.a in (c,(e),t1.f) and f in (c,t1.d,t1.a) then d when d between c and t1.d then f else e end) AS integer))-(abs(~max((b)))-min(t1.f)) then cast(avg(t1.f) AS integer) else count(*) end) from t1)+case when (exists(select 1 from t1 where t1.a in (select -max(e) from t1 union select -count(*) from t1)) or e in (t1.b*d,a, - -d)) then 17*t1.c else c end)) from t1) FROM t1 WHERE ~t1.e>case 13 when e then a else coalesce((select ~t1.d from t1 where exists(select 1 from t1 where t1.f between coalesce((select t1.a from t1 where ((select min(d-d- -t1.d+a) from t1)) not between f and t1.c or exists(select 1 from t1 where b<>17)),(a)) and -d) or not exists(select 1 from t1 where e in (select min(19)*(count(*))*cast(avg(11) AS integer) | min(b) from t1 union select (cast(avg(t1.e) AS integer)) from t1))),c) end-t1.c} +} {} +do_test randexpr-2.86 { + db eval {SELECT (select abs(max((select (case cast(avg(t1.a) AS integer) when +~abs(cast(avg( -17+case when t1.a in (c,(e),t1.f) and f in (c,t1.d,t1.a) then d when d between c and t1.d then f else e end) AS integer))-(abs(~max((b)))-min(t1.f)) then cast(avg(t1.f) AS integer) else count(*) end) from t1)+case when (exists(select 1 from t1 where t1.a in (select -max(e) from t1 union select -count(*) from t1)) or e in (t1.b*d,a, - -d)) then 17*t1.c else c end)) from t1) FROM t1 WHERE NOT (~t1.e>case 13 when e then a else coalesce((select ~t1.d from t1 where exists(select 1 from t1 where t1.f between coalesce((select t1.a from t1 where ((select min(d-d- -t1.d+a) from t1)) not between f and t1.c or exists(select 1 from t1 where b<>17)),(a)) and -d) or not exists(select 1 from t1 where e in (select min(19)*(count(*))*cast(avg(11) AS integer) | min(b) from t1 union select (cast(avg(t1.e) AS integer)) from t1))),c) end-t1.c)} +} {301} +do_test randexpr-2.87 { + db eval {SELECT t1.c+(abs(t1.b*t1.b)/abs(++t1.a-t1.e-(abs(t1.b)/abs(d))*11*t1.f)) | 19-b-t1.e | t1.e-+~t1.d*13+a+case case when (d)+17-c=d then a else t1.b end when e then t1.a else 19 end FROM t1 WHERE not coalesce((select (abs(case t1.b when coalesce((select case when exists(select 1 from t1 where f between 17 and f) then 11 else f end from t1 where not exists(select 1 from t1 where c>=t1.d)),17)*t1.a then t1.e else 13 end)/abs(17)) from t1 where ((13 in (select abs(~min(d)*count(distinct -(f)) | count(*)*cast(avg(11) AS integer)) from t1 union select count(*) from t1) and a<=13) and c<> -(t1.c))), -t1.a)*t1.e*f+d not in (11,d,t1.c)} +} {} +do_test randexpr-2.88 { + db eval {SELECT t1.c+(abs(t1.b*t1.b)/abs(++t1.a-t1.e-(abs(t1.b)/abs(d))*11*t1.f)) | 19-b-t1.e | t1.e-+~t1.d*13+a+case case when (d)+17-c=d then a else t1.b end when e then t1.a else 19 end FROM t1 WHERE NOT (not coalesce((select (abs(case t1.b when coalesce((select case when exists(select 1 from t1 where f between 17 and f) then 11 else f end from t1 where not exists(select 1 from t1 where c>=t1.d)),17)*t1.a then t1.e else 13 end)/abs(17)) from t1 where ((13 in (select abs(~min(d)*count(distinct -(f)) | count(*)*cast(avg(11) AS integer)) from t1 union select count(*) from t1) and a<=13) and c<> -(t1.c))), -t1.a)*t1.e*f+d not in (11,d,t1.c))} +} {-33} +do_test randexpr-2.89 { + db eval {SELECT t1.c+(abs(t1.b*t1.b)/abs(++t1.a-t1.e-(abs(t1.b)/abs(d))*11*t1.f)) & 19-b-t1.e & t1.e-+~t1.d*13+a+case case when (d)+17-c=d then a else t1.b end when e then t1.a else 19 end FROM t1 WHERE NOT (not coalesce((select (abs(case t1.b when coalesce((select case when exists(select 1 from t1 where f between 17 and f) then 11 else f end from t1 where not exists(select 1 from t1 where c>=t1.d)),17)*t1.a then t1.e else 13 end)/abs(17)) from t1 where ((13 in (select abs(~min(d)*count(distinct -(f)) | count(*)*cast(avg(11) AS integer)) from t1 union select count(*) from t1) and a<=13) and c<> -(t1.c))), -t1.a)*t1.e*f+d not in (11,d,t1.c))} +} {0} +do_test randexpr-2.90 { + db eval {SELECT case when t1.f>t1.d then (select abs(+count(distinct c)+(abs(min((abs(+11)/abs(b)))* -count(*)))) from t1)-t1.a when d+case when (select cast(avg(t1.f) AS integer)-count(distinct 19) from t1)<>17 then c when t1.e in (select cast(avg(t1.a) AS integer) from t1 union select count(distinct f) from t1) and d>t1.c and t1.f not in (11,19,t1.b) then a else (c) end+(17)=c then t1.d else t1.c end-c FROM t1 WHERE 13 not in (e,t1.b,+coalesce((select max(b) from t1 where 13*b-t1.d-case when 11>=a then t1.a-t1.d else (t1.d)*e+19 end*t1.a+19+(11) | t1.c*t1.a-17t1.d then (select abs(+count(distinct c)+(abs(min((abs(+11)/abs(b)))* -count(*)))) from t1)-t1.a when d+case when (select cast(avg(t1.f) AS integer)-count(distinct 19) from t1)<>17 then c when t1.e in (select cast(avg(t1.a) AS integer) from t1 union select count(distinct f) from t1) and d>t1.c and t1.f not in (11,19,t1.b) then a else (c) end+(17)=c then t1.d else t1.c end-c FROM t1 WHERE NOT (13 not in (e,t1.b,+coalesce((select max(b) from t1 where 13*b-t1.d-case when 11>=a then t1.a-t1.d else (t1.d)*e+19 end*t1.a+19+(11) | t1.c*t1.a-17(abs(t1.a)/abs(e)) then d else ~(t1.e)-e+d+13-t1.b+b*19 end) from t1 where (select max(19)-cast(avg(a) AS integer) from t1)> -t1.e),17)+e+t1.e-t1.c+(c))*17) from t1) between -17 and t1.c} +} {} +do_test randexpr-2.93 { + db eval {SELECT coalesce((select +19 from t1 where 19-c+t1.c-f not in (coalesce((select 19 from t1 where (t1.b between t1.c*t1.a*t1.b | a-e | 13-(abs(19)/abs((select max(t1.d | coalesce((select max( -t1.c) from t1 where b in (select e from t1 union select (f) from t1)),t1.d)+(e)) from t1)+c)) and f)),11),f,17)),f) FROM t1 WHERE NOT ((select count(distinct 11)+min((coalesce((select max(case when t1.d<>(abs(t1.a)/abs(e)) then d else ~(t1.e)-e+d+13-t1.b+b*19 end) from t1 where (select max(19)-cast(avg(a) AS integer) from t1)> -t1.e),17)+e+t1.e-t1.c+(c))*17) from t1) between -17 and t1.c)} +} {19} +do_test randexpr-2.94 { + db eval {SELECT coalesce((select +19 from t1 where 19-c+t1.c-f not in (coalesce((select 19 from t1 where (t1.b between t1.c*t1.a*t1.b & a-e & 13-(abs(19)/abs((select max(t1.d & coalesce((select max( -t1.c) from t1 where b in (select e from t1 union select (f) from t1)),t1.d)+(e)) from t1)+c)) and f)),11),f,17)),f) FROM t1 WHERE NOT ((select count(distinct 11)+min((coalesce((select max(case when t1.d<>(abs(t1.a)/abs(e)) then d else ~(t1.e)-e+d+13-t1.b+b*19 end) from t1 where (select max(19)-cast(avg(a) AS integer) from t1)> -t1.e),17)+e+t1.e-t1.c+(c))*17) from t1) between -17 and t1.c)} +} {19} +do_test randexpr-2.95 { + db eval {SELECT (case t1.a when (select (count(distinct t1.f)) from t1)-e then 11 else t1.c+17-f*~f | f+~e-a*t1.b+13 end | 13) FROM t1 WHERE (coalesce((select max(f) from t1 where (c+t1.f | (select count(distinct t1.d) from t1)>=coalesce((select b+(abs(case when t1.f>=(select case max(f) when max(19) then cast(avg(t1.a) AS integer) else count(distinct 13) end from t1) | t1.b and (a in (select (b) from t1 union select t1.b from t1)) then (a)-b when not exists(select 1 from t1 where c between t1.c and t1.a) then a else t1.a end)/abs(t1.b))- -f from t1 where b not in (t1.b,t1.f,c)),t1.d)-t1.d)), -17) between 13 and t1.f)} +} {-19491} +do_test randexpr-2.96 { + db eval {SELECT (case t1.a when (select (count(distinct t1.f)) from t1)-e then 11 else t1.c+17-f*~f | f+~e-a*t1.b+13 end | 13) FROM t1 WHERE NOT ((coalesce((select max(f) from t1 where (c+t1.f | (select count(distinct t1.d) from t1)>=coalesce((select b+(abs(case when t1.f>=(select case max(f) when max(19) then cast(avg(t1.a) AS integer) else count(distinct 13) end from t1) | t1.b and (a in (select (b) from t1 union select t1.b from t1)) then (a)-b when not exists(select 1 from t1 where c between t1.c and t1.a) then a else t1.a end)/abs(t1.b))- -f from t1 where b not in (t1.b,t1.f,c)),t1.d)-t1.d)), -17) between 13 and t1.f))} +} {} +do_test randexpr-2.97 { + db eval {SELECT (case t1.a when (select (count(distinct t1.f)) from t1)-e then 11 else t1.c+17-f*~f & f+~e-a*t1.b+13 end & 13) FROM t1 WHERE (coalesce((select max(f) from t1 where (c+t1.f | (select count(distinct t1.d) from t1)>=coalesce((select b+(abs(case when t1.f>=(select case max(f) when max(19) then cast(avg(t1.a) AS integer) else count(distinct 13) end from t1) | t1.b and (a in (select (b) from t1 union select t1.b from t1)) then (a)-b when not exists(select 1 from t1 where c between t1.c and t1.a) then a else t1.a end)/abs(t1.b))- -f from t1 where b not in (t1.b,t1.f,c)),t1.d)-t1.d)), -17) between 13 and t1.f)} +} {0} +do_test randexpr-2.98 { + db eval {SELECT case when ((select -abs(min(coalesce((select max(11 | t1.f) from t1 where coalesce((select f from t1 where e+c in (select 19 from t1 union select f from t1)),b) not in (t1.a,+case when 13>=t1.e then t1.a when 17 not in (t1.d,b,t1.e) then 13 else t1.b end-t1.e,t1.f)),t1.f))*count(*)*min(t1.c)) from t1)<17) and 13* -f+a not between (a) and f then c-11+b else 19 end FROM t1 WHERE e+case case when a<>c then (abs(case when coalesce((select (abs( -19+11)/abs(t1.c)) from t1 where (17) in (a,13,11) and 17 in (select 11 from t1 union select t1.f from t1)),t1.a) between t1.f and c then t1.d when not exists(select 1 from t1 where not exists(select 1 from t1 where 19=f)) then f else (b) end-19)/abs(e)) else 11 end when 17 then a else t1.f end-13 | e between -t1.e and 13} +} {} +do_test randexpr-2.99 { + db eval {SELECT case when ((select -abs(min(coalesce((select max(11 | t1.f) from t1 where coalesce((select f from t1 where e+c in (select 19 from t1 union select f from t1)),b) not in (t1.a,+case when 13>=t1.e then t1.a when 17 not in (t1.d,b,t1.e) then 13 else t1.b end-t1.e,t1.f)),t1.f))*count(*)*min(t1.c)) from t1)<17) and 13* -f+a not between (a) and f then c-11+b else 19 end FROM t1 WHERE NOT (e+case case when a<>c then (abs(case when coalesce((select (abs( -19+11)/abs(t1.c)) from t1 where (17) in (a,13,11) and 17 in (select 11 from t1 union select t1.f from t1)),t1.a) between t1.f and c then t1.d when not exists(select 1 from t1 where not exists(select 1 from t1 where 19=f)) then f else (b) end-19)/abs(e)) else 11 end when 17 then a else t1.f end-13 | e between -t1.e and 13)} +} {489} +do_test randexpr-2.100 { + db eval {SELECT case when ((select -abs(min(coalesce((select max(11 & t1.f) from t1 where coalesce((select f from t1 where e+c in (select 19 from t1 union select f from t1)),b) not in (t1.a,+case when 13>=t1.e then t1.a when 17 not in (t1.d,b,t1.e) then 13 else t1.b end-t1.e,t1.f)),t1.f))*count(*)*min(t1.c)) from t1)<17) and 13* -f+a not between (a) and f then c-11+b else 19 end FROM t1 WHERE NOT (e+case case when a<>c then (abs(case when coalesce((select (abs( -19+11)/abs(t1.c)) from t1 where (17) in (a,13,11) and 17 in (select 11 from t1 union select t1.f from t1)),t1.a) between t1.f and c then t1.d when not exists(select 1 from t1 where not exists(select 1 from t1 where 19=f)) then f else (b) end-19)/abs(e)) else 11 end when 17 then a else t1.f end-13 | e between -t1.e and 13)} +} {489} +do_test randexpr-2.101 { + db eval {SELECT case when t1.b-t1.e*c<>c then coalesce((select -17 from t1 where (t1.f+((select case count(*) when -cast(avg(19) AS integer)+count(distinct (t1.c)) | (cast(avg(19) AS integer))*max(t1.c)*count(*)-((count(*))) then cast(avg(b) AS integer) else (count(*)) end-min(a) from t1)) | case when t1.b | a in (select t1.a from t1 union select (select count(distinct e) from t1) from t1) then t1.e else b end+t1.a)c then coalesce((select -17 from t1 where (t1.f+((select case count(*) when -cast(avg(19) AS integer)+count(distinct (t1.c)) | (cast(avg(19) AS integer))*max(t1.c)*count(*)-((count(*))) then cast(avg(b) AS integer) else (count(*)) end-min(a) from t1)) | case when t1.b | a in (select t1.a from t1 union select (select count(distinct e) from t1) from t1) then t1.e else b end+t1.a)c then coalesce((select -17 from t1 where (t1.f+((select case count(*) when -cast(avg(19) AS integer)+count(distinct (t1.c)) & (cast(avg(19) AS integer))*max(t1.c)*count(*)-((count(*))) then cast(avg(b) AS integer) else (count(*)) end-min(a) from t1)) & case when t1.b & a in (select t1.a from t1 union select (select count(distinct e) from t1) from t1) then t1.e else b end+t1.a)~t1.a then f else +~11 end) from t1 where ((select case ~abs(count(distinct t1.e)+(count(*)+count(*)+(abs( -~count(*) | max((t1.b))))))*(cast(avg(t1.a) AS integer)) when (count(*)) then min(19) else -(min(b)) end from t1))+11<>t1.f), -c-f) FROM t1 WHERE ~e>coalesce((select max(case case case coalesce((select max(coalesce((select c from t1 where t1.a-t1.c>13),13)) from t1 where -b not in (case d when ~t1.f | t1.c then 13 else 11 end,19,19)),t1.d) when c then t1.a else t1.b end+f-e*b when f then 19 else -t1.b end when t1.b then 11 else 13 end) from t1 where c in (select 19 from t1 union select e from t1)),t1.d)} +} {} +do_test randexpr-2.105 { + db eval {SELECT coalesce((select max(case when (select ~cast(avg(a) AS integer) from t1)>~t1.a then f else +~11 end) from t1 where ((select case ~abs(count(distinct t1.e)+(count(*)+count(*)+(abs( -~count(*) | max((t1.b))))))*(cast(avg(t1.a) AS integer)) when (count(*)) then min(19) else -(min(b)) end from t1))+11<>t1.f), -c-f) FROM t1 WHERE NOT (~e>coalesce((select max(case case case coalesce((select max(coalesce((select c from t1 where t1.a-t1.c>13),13)) from t1 where -b not in (case d when ~t1.f | t1.c then 13 else 11 end,19,19)),t1.d) when c then t1.a else t1.b end+f-e*b when f then 19 else -t1.b end when t1.b then 11 else 13 end) from t1 where c in (select 19 from t1 union select e from t1)),t1.d))} +} {-12} +do_test randexpr-2.106 { + db eval {SELECT coalesce((select max(case when (select ~cast(avg(a) AS integer) from t1)>~t1.a then f else +~11 end) from t1 where ((select case ~abs(count(distinct t1.e)+(count(*)+count(*)+(abs( -~count(*) & max((t1.b))))))*(cast(avg(t1.a) AS integer)) when (count(*)) then min(19) else -(min(b)) end from t1))+11<>t1.f), -c-f) FROM t1 WHERE NOT (~e>coalesce((select max(case case case coalesce((select max(coalesce((select c from t1 where t1.a-t1.c>13),13)) from t1 where -b not in (case d when ~t1.f | t1.c then 13 else 11 end,19,19)),t1.d) when c then t1.a else t1.b end+f-e*b when f then 19 else -t1.b end when t1.b then 11 else 13 end) from t1 where c in (select 19 from t1 union select e from t1)),t1.d))} +} {-12} +do_test randexpr-2.107 { + db eval {SELECT coalesce((select a from t1 where b>=~11-coalesce((select max((abs(19)/abs(t1.e))+19+t1.d) from t1 where (select abs(count(*) | count(*) | case count(*) when cast(avg(17) AS integer) then -cast(avg(b) AS integer) else count(*) end*min(b))*(min(t1.e)) from t1) in (11*(select count(*) from t1)*17*t1.e-e*t1.b,t1.e,t1.f)),a)*t1.d),13)-f FROM t1 WHERE ~((case when not 13-+t1.c>=17 then -~ -b+t1.f+case when t1.ct1.d then 11 else t1.e end)) else t1.b end when d>a then (a) else t1.b end))+t1.c in (13,t1.a,13) or t1.e>c} +} {-500} +do_test randexpr-2.108 { + db eval {SELECT coalesce((select a from t1 where b>=~11-coalesce((select max((abs(19)/abs(t1.e))+19+t1.d) from t1 where (select abs(count(*) | count(*) | case count(*) when cast(avg(17) AS integer) then -cast(avg(b) AS integer) else count(*) end*min(b))*(min(t1.e)) from t1) in (11*(select count(*) from t1)*17*t1.e-e*t1.b,t1.e,t1.f)),a)*t1.d),13)-f FROM t1 WHERE NOT (~((case when not 13-+t1.c>=17 then -~ -b+t1.f+case when t1.ct1.d then 11 else t1.e end)) else t1.b end when d>a then (a) else t1.b end))+t1.c in (13,t1.a,13) or t1.e>c)} +} {} +do_test randexpr-2.109 { + db eval {SELECT coalesce((select a from t1 where b>=~11-coalesce((select max((abs(19)/abs(t1.e))+19+t1.d) from t1 where (select abs(count(*) & count(*) & case count(*) when cast(avg(17) AS integer) then -cast(avg(b) AS integer) else count(*) end*min(b))*(min(t1.e)) from t1) in (11*(select count(*) from t1)*17*t1.e-e*t1.b,t1.e,t1.f)),a)*t1.d),13)-f FROM t1 WHERE ~((case when not 13-+t1.c>=17 then -~ -b+t1.f+case when t1.ct1.d then 11 else t1.e end)) else t1.b end when d>a then (a) else t1.b end))+t1.c in (13,t1.a,13) or t1.e>c} +} {-500} +do_test randexpr-2.110 { + db eval {SELECT case when 13>=~t1.a or (exists(select 1 from t1 where 11 not in (17,t1.f,17))) then f when not exists(select 1 from t1 where exists(select 1 from t1 where 13 not between e and ~+case b when t1.f then case -case f when 11 then -17 else 19 end+17 when b then b else 13 end else 13 end- -t1.a and t1.c not in (t1.f,a,c) or t1.f<11)) then t1.c else 11 end FROM t1 WHERE +t1.e in (select cast(avg(t1.c*t1.a*(abs(13)/abs(t1.e))) AS integer) from t1 union select min(c+13) from t1)} +} {} +do_test randexpr-2.111 { + db eval {SELECT case when 13>=~t1.a or (exists(select 1 from t1 where 11 not in (17,t1.f,17))) then f when not exists(select 1 from t1 where exists(select 1 from t1 where 13 not between e and ~+case b when t1.f then case -case f when 11 then -17 else 19 end+17 when b then b else 13 end else 13 end- -t1.a and t1.c not in (t1.f,a,c) or t1.f<11)) then t1.c else 11 end FROM t1 WHERE NOT (+t1.e in (select cast(avg(t1.c*t1.a*(abs(13)/abs(t1.e))) AS integer) from t1 union select min(c+13) from t1))} +} {600} +do_test randexpr-2.112 { + db eval {SELECT case when coalesce((select max(13) from t1 where coalesce((select max(c) from t1 where coalesce((select max(17) from t1 where 11 in (select ~d | case when t1.a>+19 then 11 when t1.d not in ( -e,e,c) or 13d or t1.e<>b),t1.b)*13<=f then -t1.e when 19 between -t1.f and t1.a then -t1.b else t1.f end FROM t1 WHERE exists(select 1 from t1 where t1.f between a and coalesce((select max(t1.f) from t1 where exists(select 1 from t1 where coalesce((select 19 from t1 where t1.c=b),a)= -t1.b and not exists(select 1 from t1 where b between e and b) and a<= -17 or exists(select 1 from t1 where 17<=f)) or t1.a>=t1.c),13)*~13*t1.c*13 or t1.d>13 and 11>=t1.d and 11 not between c and c and t1.f>=17 and (d)=f)} +} {} +do_test randexpr-2.113 { + db eval {SELECT case when coalesce((select max(13) from t1 where coalesce((select max(c) from t1 where coalesce((select max(17) from t1 where 11 in (select ~d | case when t1.a>+19 then 11 when t1.d not in ( -e,e,c) or 13d or t1.e<>b),t1.b)*13<=f then -t1.e when 19 between -t1.f and t1.a then -t1.b else t1.f end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.f between a and coalesce((select max(t1.f) from t1 where exists(select 1 from t1 where coalesce((select 19 from t1 where t1.c=b),a)= -t1.b and not exists(select 1 from t1 where b between e and b) and a<= -17 or exists(select 1 from t1 where 17<=f)) or t1.a>=t1.c),13)*~13*t1.c*13 or t1.d>13 and 11>=t1.d and 11 not between c and c and t1.f>=17 and (d)=f))} +} {-500} +do_test randexpr-2.114 { + db eval {SELECT case when coalesce((select max(13) from t1 where coalesce((select max(c) from t1 where coalesce((select max(17) from t1 where 11 in (select ~d & case when t1.a>+19 then 11 when t1.d not in ( -e,e,c) or 13d or t1.e<>b),t1.b)*13<=f then -t1.e when 19 between -t1.f and t1.a then -t1.b else t1.f end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.f between a and coalesce((select max(t1.f) from t1 where exists(select 1 from t1 where coalesce((select 19 from t1 where t1.c=b),a)= -t1.b and not exists(select 1 from t1 where b between e and b) and a<= -17 or exists(select 1 from t1 where 17<=f)) or t1.a>=t1.c),13)*~13*t1.c*13 or t1.d>13 and 11>=t1.d and 11 not between c and c and t1.f>=17 and (d)=f))} +} {-500} +do_test randexpr-2.115 { + db eval {SELECT case when coalesce((select +case when not exists(select 1 from t1 where 19*t1.e< -~11*a or t1.b>=t1.c and t1.a<>e) then coalesce((select max(13) from t1 where 11>11), -(e)) | t1.a when (t1.d) in (select f from t1 union select b from t1) then a else e end from t1 where t1.f<> -t1.d),t1.b) not in (d,t1.d,t1.c) then 19 when (t1.b<>t1.e or -t1.b>=t1.b) then (f) else d end FROM t1 WHERE t1.e=a and b>=(abs(case when 19 not between ~+17 and +case when ((b+e-f=t1.d or b not in (t1.a,t1.f,t1.b) or t1.a not between 19 and e or b>=(t1.d) or t1.b not in (d,t1.a,t1.b))) then e-f else t1.e*t1.f end | f then d else -d end)/abs(11))} +} {} +do_test randexpr-2.116 { + db eval {SELECT case when coalesce((select +case when not exists(select 1 from t1 where 19*t1.e< -~11*a or t1.b>=t1.c and t1.a<>e) then coalesce((select max(13) from t1 where 11>11), -(e)) | t1.a when (t1.d) in (select f from t1 union select b from t1) then a else e end from t1 where t1.f<> -t1.d),t1.b) not in (d,t1.d,t1.c) then 19 when (t1.b<>t1.e or -t1.b>=t1.b) then (f) else d end FROM t1 WHERE NOT (t1.e=a and b>=(abs(case when 19 not between ~+17 and +case when ((b+e-f=t1.d or b not in (t1.a,t1.f,t1.b) or t1.a not between 19 and e or b>=(t1.d) or t1.b not in (d,t1.a,t1.b))) then e-f else t1.e*t1.f end | f then d else -d end)/abs(11)))} +} {19} +do_test randexpr-2.117 { + db eval {SELECT case when coalesce((select +case when not exists(select 1 from t1 where 19*t1.e< -~11*a or t1.b>=t1.c and t1.a<>e) then coalesce((select max(13) from t1 where 11>11), -(e)) & t1.a when (t1.d) in (select f from t1 union select b from t1) then a else e end from t1 where t1.f<> -t1.d),t1.b) not in (d,t1.d,t1.c) then 19 when (t1.b<>t1.e or -t1.b>=t1.b) then (f) else d end FROM t1 WHERE NOT (t1.e=a and b>=(abs(case when 19 not between ~+17 and +case when ((b+e-f=t1.d or b not in (t1.a,t1.f,t1.b) or t1.a not between 19 and e or b>=(t1.d) or t1.b not in (d,t1.a,t1.b))) then e-f else t1.e*t1.f end | f then d else -d end)/abs(11)))} +} {19} +do_test randexpr-2.118 { + db eval {SELECT (select (cast(avg(b) AS integer) | count(distinct case when coalesce((select 13 from t1 where (+(abs(e)/abs(t1.b))) in ( -b, -(a),13)),t1.e)>=b then 11 else - -19 end-17+19) | abs(~case count(distinct t1.b) when +min(e) then +abs(~ - -count(distinct 11)-count(distinct t1.b))-count(distinct a) else -min(b) end)*count(distinct 19)-count(distinct t1.b)) from t1) FROM t1 WHERE 11<=t1.a-t1.f-19*t1.b+19+coalesce((select e from t1 where d not between (select count(distinct coalesce((select t1.a from t1 where (select cast(avg(t1.e) AS integer)-+count(distinct c)+max((abs(t1.c)/abs(t1.f))-a)-(count(*)-cast(avg((17)) AS integer)) from t1) in (t1.e, -e | d,b)),c)) from t1) and t1.a),t1.d)-t1.d} +} {} +do_test randexpr-2.119 { + db eval {SELECT (select (cast(avg(b) AS integer) | count(distinct case when coalesce((select 13 from t1 where (+(abs(e)/abs(t1.b))) in ( -b, -(a),13)),t1.e)>=b then 11 else - -19 end-17+19) | abs(~case count(distinct t1.b) when +min(e) then +abs(~ - -count(distinct 11)-count(distinct t1.b))-count(distinct a) else -min(b) end)*count(distinct 19)-count(distinct t1.b)) from t1) FROM t1 WHERE NOT (11<=t1.a-t1.f-19*t1.b+19+coalesce((select e from t1 where d not between (select count(distinct coalesce((select t1.a from t1 where (select cast(avg(t1.e) AS integer)-+count(distinct c)+max((abs(t1.c)/abs(t1.f))-a)-(count(*)-cast(avg((17)) AS integer)) from t1) in (t1.e, -e | d,b)),c)) from t1) and t1.a),t1.d)-t1.d)} +} {207} +do_test randexpr-2.120 { + db eval {SELECT (select (cast(avg(b) AS integer) & count(distinct case when coalesce((select 13 from t1 where (+(abs(e)/abs(t1.b))) in ( -b, -(a),13)),t1.e)>=b then 11 else - -19 end-17+19) & abs(~case count(distinct t1.b) when +min(e) then +abs(~ - -count(distinct 11)-count(distinct t1.b))-count(distinct a) else -min(b) end)*count(distinct 19)-count(distinct t1.b)) from t1) FROM t1 WHERE NOT (11<=t1.a-t1.f-19*t1.b+19+coalesce((select e from t1 where d not between (select count(distinct coalesce((select t1.a from t1 where (select cast(avg(t1.e) AS integer)-+count(distinct c)+max((abs(t1.c)/abs(t1.f))-a)-(count(*)-cast(avg((17)) AS integer)) from t1) in (t1.e, -e | d,b)),c)) from t1) and t1.a),t1.d)-t1.d)} +} {0} +do_test randexpr-2.121 { + db eval {SELECT 11+a*case t1.a*t1.e when 17 then coalesce((select +t1.b from t1 where t1.c<>t1.c and e between +(abs(case b+f*coalesce((select c from t1 where t1.d>17), -t1.c)+(19) when b then t1.f else t1.d end)/abs(t1.d)) and t1.c and t1.a>b or t1.fc or t1.d | 11 in (select t1.e from t1 union select d from t1))) then 17 else (abs(coalesce((select max(f) from t1 where not (c)t1.c and e between +(abs(case b+f*coalesce((select c from t1 where t1.d>17), -t1.c)+(19) when b then t1.f else t1.d end)/abs(t1.d)) and t1.c and t1.a>b or t1.fc or t1.d | 11 in (select t1.e from t1 union select d from t1))) then 17 else (abs(coalesce((select max(f) from t1 where not (c)e then t1.f else t1.b+case when b between coalesce((select max(t1.d-~(abs(a)/abs(c))+t1.c) from t1 where f in (select -min(t1.f)-+cast(avg(e) AS integer) from t1 union select -(cast(avg(13) AS integer)) from t1) and f<=a and not exists(select 1 from t1 where f not between t1.d and t1.d or (f)<=a) and (t1.f) between 11 and d),t1.a) and t1.f then e when (1113),case when c<=11 then t1.e when t1.e<=b then t1.d else t1.e end)-11,11,17)} +} {} +do_test randexpr-2.124 { + db eval {SELECT (e)+case when t1.a>e then t1.f else t1.b+case when b between coalesce((select max(t1.d-~(abs(a)/abs(c))+t1.c) from t1 where f in (select -min(t1.f)-+cast(avg(e) AS integer) from t1 union select -(cast(avg(13) AS integer)) from t1) and f<=a and not exists(select 1 from t1 where f not between t1.d and t1.d or (f)<=a) and (t1.f) between 11 and d),t1.a) and t1.f then e when (1113),case when c<=11 then t1.e when t1.e<=b then t1.d else t1.e end)-11,11,17))} +} {1524} +do_test randexpr-2.125 { + db eval {SELECT (e)+case when t1.a>e then t1.f else t1.b+case when b between coalesce((select max(t1.d-~(abs(a)/abs(c))+t1.c) from t1 where f in (select -min(t1.f)-+cast(avg(e) AS integer) from t1 union select -(cast(avg(13) AS integer)) from t1) and f<=a and not exists(select 1 from t1 where f not between t1.d and t1.d or (f)<=a) and (t1.f) between 11 and d),t1.a) and t1.f then e when (1113),case when c<=11 then t1.e when t1.e<=b then t1.d else t1.e end)-11,11,17))} +} {176} +do_test randexpr-2.126 { + db eval {SELECT coalesce((select max(~d-t1.f+b-13) from t1 where t1.c not between case when +f=t1.b then 13 when ~f not in (~t1.a,coalesce((select max(t1.b) from t1 where b between c-t1.c+e and (t1.b)),coalesce((select max(t1.d) from t1 where not e in (select 17 from t1 union select t1.c+e | a from t1)),b)),t1.e) then t1.b else a end and c),t1.a) FROM t1 WHERE (select (++ -count(*) | cast(avg(t1.b) AS integer) | ++(max(t1.d))+count(*)+max(t1.b)-count(distinct t1.e)-+abs( -+~max((abs(t1.c)/abs((select min(a) from t1)))))+count(distinct t1.a-t1.b)) from t1) between e and c} +} {} +do_test randexpr-2.127 { + db eval {SELECT coalesce((select max(~d-t1.f+b-13) from t1 where t1.c not between case when +f=t1.b then 13 when ~f not in (~t1.a,coalesce((select max(t1.b) from t1 where b between c-t1.c+e and (t1.b)),coalesce((select max(t1.d) from t1 where not e in (select 17 from t1 union select t1.c+e | a from t1)),b)),t1.e) then t1.b else a end and c),t1.a) FROM t1 WHERE NOT ((select (++ -count(*) | cast(avg(t1.b) AS integer) | ++(max(t1.d))+count(*)+max(t1.b)-count(distinct t1.e)-+abs( -+~max((abs(t1.c)/abs((select min(a) from t1)))))+count(distinct t1.a-t1.b)) from t1) between e and c)} +} {100} +do_test randexpr-2.128 { + db eval {SELECT coalesce((select max(~d-t1.f+b-13) from t1 where t1.c not between case when +f=t1.b then 13 when ~f not in (~t1.a,coalesce((select max(t1.b) from t1 where b between c-t1.c+e and (t1.b)),coalesce((select max(t1.d) from t1 where not e in (select 17 from t1 union select t1.c+e & a from t1)),b)),t1.e) then t1.b else a end and c),t1.a) FROM t1 WHERE NOT ((select (++ -count(*) | cast(avg(t1.b) AS integer) | ++(max(t1.d))+count(*)+max(t1.b)-count(distinct t1.e)-+abs( -+~max((abs(t1.c)/abs((select min(a) from t1)))))+count(distinct t1.a-t1.b)) from t1) between e and c)} +} {100} +do_test randexpr-2.129 { + db eval {SELECT a+case when exists(select 1 from t1 where 19>=e) then b*b+coalesce((select max( -coalesce((select max(19 | t1.e) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where +case t1.a when a then a else (17) end*19 in (select max(d) from t1 union select max(t1.a) from t1) and not exists(select 1 from t1 where t1.a<=11)) or 11>(19))),11* -a)) from t1 where t1.e<>17),f) else t1.e end-11 FROM t1 WHERE (t1.a>=(abs(coalesce((select +13*t1.a | c from t1 where 11+case when exists(select 1 from t1 where case when +t1.d in (select case +count(*) when count(distinct 11) then count(*) else (min(11)) end from t1 union select max( -11) from t1) or 11<> -t1.f then t1.d when t1.b in (13,a,d) then c else c end+t1.b=b) then e-c else (t1.d) end>t1.d),d))/abs(t1.f)) and 11 in (select -t1.a from t1 union select 13 from t1))} +} {} +do_test randexpr-2.130 { + db eval {SELECT a+case when exists(select 1 from t1 where 19>=e) then b*b+coalesce((select max( -coalesce((select max(19 | t1.e) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where +case t1.a when a then a else (17) end*19 in (select max(d) from t1 union select max(t1.a) from t1) and not exists(select 1 from t1 where t1.a<=11)) or 11>(19))),11* -a)) from t1 where t1.e<>17),f) else t1.e end-11 FROM t1 WHERE NOT ((t1.a>=(abs(coalesce((select +13*t1.a | c from t1 where 11+case when exists(select 1 from t1 where case when +t1.d in (select case +count(*) when count(distinct 11) then count(*) else (min(11)) end from t1 union select max( -11) from t1) or 11<> -t1.f then t1.d when t1.b in (13,a,d) then c else c end+t1.b=b) then e-c else (t1.d) end>t1.d),d))/abs(t1.f)) and 11 in (select -t1.a from t1 union select 13 from t1)))} +} {589} +do_test randexpr-2.131 { + db eval {SELECT a+case when exists(select 1 from t1 where 19>=e) then b*b+coalesce((select max( -coalesce((select max(19 & t1.e) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where +case t1.a when a then a else (17) end*19 in (select max(d) from t1 union select max(t1.a) from t1) and not exists(select 1 from t1 where t1.a<=11)) or 11>(19))),11* -a)) from t1 where t1.e<>17),f) else t1.e end-11 FROM t1 WHERE NOT ((t1.a>=(abs(coalesce((select +13*t1.a | c from t1 where 11+case when exists(select 1 from t1 where case when +t1.d in (select case +count(*) when count(distinct 11) then count(*) else (min(11)) end from t1 union select max( -11) from t1) or 11<> -t1.f then t1.d when t1.b in (13,a,d) then c else c end+t1.b=b) then e-c else (t1.d) end>t1.d),d))/abs(t1.f)) and 11 in (select -t1.a from t1 union select 13 from t1)))} +} {589} +do_test randexpr-2.132 { + db eval {SELECT case ~13 when t1.e then e else (select case case (abs(count(*))-~count(*)+count(*) | count(*)-count(*)+max(t1.b)* -(cast(avg(13) AS integer)) | -(count(distinct t1.c))) when min(e) then (min(13)) else count(*) end*count(*) when max(e) then count(distinct -d) else count(*) end from t1) end-t1.d*case (abs(e | t1.b)/abs((select cast(avg(t1.c) AS integer) from t1))) when (select cast(avg(a) AS integer) from t1) then t1.c else f+13 end FROM t1 WHERE not exists(select 1 from t1 where coalesce((select case when t1.a>t1.f-~case when ~t1.d+c+t1.f>11 then t1.a when (f between t1.f and t1.c) then t1.f else 13 end*t1.a then t1.d when c19 then c else t1.e end from t1 where e<>a and 19<>t1.f),t1.b)+t1.f-t1.et1.f-~case when ~t1.d+c+t1.f>11 then t1.a when (f between t1.f and t1.c) then t1.f else 13 end*t1.a then t1.d when c19 then c else t1.e end from t1 where e<>a and 19<>t1.f),t1.b)+t1.f-t1.et1.f-~case when ~t1.d+c+t1.f>11 then t1.a when (f between t1.f and t1.c) then t1.f else 13 end*t1.a then t1.d when c19 then c else t1.e end from t1 where e<>a and 19<>t1.f),t1.b)+t1.f-t1.et1.c and 11<>e then 13 else a end-(a))-max(t1.e)*count(distinct t1.f) | case max((f)) when (count(distinct f)) then count(*) else min(t1.c) end+min(19) from t1) not in (11,19,t1.e)),t1.f*17))/abs(a)))/abs(c)) in (select t1.c from t1 union select t1.a from t1)) or ( -t1.f=e)} +} {} +do_test randexpr-2.136 { + db eval {SELECT 11-case when 17<=c and (select ( -abs(case count(*)+~case -min(e) when count(distinct t1.a) then -count(*) else min(a) end+max(t1.c) when count(*) then max(b) else -count(distinct 19) end)*cast(avg(17) AS integer)) from t1) not in (case f when e then 19 | 11 else (t1.d) end,d, -t1.b) then +t1.c when t1.d not in (11,c,t1.a) then 13 else a end-t1.d*t1.d FROM t1 WHERE NOT (((abs((abs(coalesce((select a from t1 where (select max(case when t1.f=b then e when 11>t1.c and 11<>e then 13 else a end-(a))-max(t1.e)*count(distinct t1.f) | case max((f)) when (count(distinct f)) then count(*) else min(t1.c) end+min(19) from t1) not in (11,19,t1.e)),t1.f*17))/abs(a)))/abs(c)) in (select t1.c from t1 union select t1.a from t1)) or ( -t1.f=e))} +} {-160289} +do_test randexpr-2.137 { + db eval {SELECT 11-case when 17<=c and (select ( -abs(case count(*)+~case -min(e) when count(distinct t1.a) then -count(*) else min(a) end+max(t1.c) when count(*) then max(b) else -count(distinct 19) end)*cast(avg(17) AS integer)) from t1) not in (case f when e then 19 & 11 else (t1.d) end,d, -t1.b) then +t1.c when t1.d not in (11,c,t1.a) then 13 else a end-t1.d*t1.d FROM t1 WHERE NOT (((abs((abs(coalesce((select a from t1 where (select max(case when t1.f=b then e when 11>t1.c and 11<>e then 13 else a end-(a))-max(t1.e)*count(distinct t1.f) | case max((f)) when (count(distinct f)) then count(*) else min(t1.c) end+min(19) from t1) not in (11,19,t1.e)),t1.f*17))/abs(a)))/abs(c)) in (select t1.c from t1 union select t1.a from t1)) or ( -t1.f=e))} +} {-160289} +do_test randexpr-2.138 { + db eval {SELECT (f+case +t1.e when t1.d then 17 else f*19 end | -(abs( -coalesce((select 17* -c+19 from t1 where (abs(case 19*e*d when (select +(cast(avg( -11) AS integer)) | -count(*)*max( -13) from t1) then 11 | case t1.b when a then a else t1.f end else t1.a end)/abs(a)) in (select t1.b from t1 union select (a) from t1)),t1.b))/abs( -13))) FROM t1 WHERE +b in (select case case case when not ((~coalesce((select t1.e+d from t1 where (19 between f and t1.b)),(select cast(avg(c+t1.b) AS integer)++cast(avg(b) AS integer) from t1))) not in (t1.c+c,b,e)) or (13 not between d and t1.d) then 13 else b end when -11 then 13 else 19 end*t1.e when (f) then 19 else 19 end from t1 union select f from t1)} +} {} +do_test randexpr-2.139 { + db eval {SELECT (f+case +t1.e when t1.d then 17 else f*19 end | -(abs( -coalesce((select 17* -c+19 from t1 where (abs(case 19*e*d when (select +(cast(avg( -11) AS integer)) | -count(*)*max( -13) from t1) then 11 | case t1.b when a then a else t1.f end else t1.a end)/abs(a)) in (select t1.b from t1 union select (a) from t1)),t1.b))/abs( -13))) FROM t1 WHERE NOT (+b in (select case case case when not ((~coalesce((select t1.e+d from t1 where (19 between f and t1.b)),(select cast(avg(c+t1.b) AS integer)++cast(avg(b) AS integer) from t1))) not in (t1.c+c,b,e)) or (13 not between d and t1.d) then 13 else b end when -11 then 13 else 19 end*t1.e when (f) then 19 else 19 end from t1 union select f from t1))} +} {-15} +do_test randexpr-2.140 { + db eval {SELECT (f+case +t1.e when t1.d then 17 else f*19 end & -(abs( -coalesce((select 17* -c+19 from t1 where (abs(case 19*e*d when (select +(cast(avg( -11) AS integer)) & -count(*)*max( -13) from t1) then 11 & case t1.b when a then a else t1.f end else t1.a end)/abs(a)) in (select t1.b from t1 union select (a) from t1)),t1.b))/abs( -13))) FROM t1 WHERE NOT (+b in (select case case case when not ((~coalesce((select t1.e+d from t1 where (19 between f and t1.b)),(select cast(avg(c+t1.b) AS integer)++cast(avg(b) AS integer) from t1))) not in (t1.c+c,b,e)) or (13 not between d and t1.d) then 13 else b end when -11 then 13 else 19 end*t1.e when (f) then 19 else 19 end from t1 union select f from t1))} +} {12000} +do_test randexpr-2.141 { + db eval {SELECT 11*coalesce((select max(t1.f) from t1 where a-case t1.c when +coalesce((select t1.a-t1.b from t1 where (abs(19)/abs(b)) in (17-case when a17),d)*(t1.e) FROM t1 WHERE (coalesce((select max(~t1.b) from t1 where t1.c in (select ~+case count(distinct a) when (max(t1.a)) then (count(*)) else ~+abs(cast(avg(d | t1.a) AS integer)+case (cast(avg(13) AS integer)) when cast(avg(b) AS integer) then cast(avg(t1.f) AS integer) else min(t1.a) end) | count(distinct c)*( -( -max(t1.f)))*count(*) end from t1 union select count(distinct -e) from t1)),~a-11)<19 or t1.a*13<>t1.d and not exists(select 1 from t1 where t1.e=e))} +} {3300000} +do_test randexpr-2.142 { + db eval {SELECT 11*coalesce((select max(t1.f) from t1 where a-case t1.c when +coalesce((select t1.a-t1.b from t1 where (abs(19)/abs(b)) in (17-case when a17),d)*(t1.e) FROM t1 WHERE NOT ((coalesce((select max(~t1.b) from t1 where t1.c in (select ~+case count(distinct a) when (max(t1.a)) then (count(*)) else ~+abs(cast(avg(d | t1.a) AS integer)+case (cast(avg(13) AS integer)) when cast(avg(b) AS integer) then cast(avg(t1.f) AS integer) else min(t1.a) end) | count(distinct c)*( -( -max(t1.f)))*count(*) end from t1 union select count(distinct -e) from t1)),~a-11)<19 or t1.a*13<>t1.d and not exists(select 1 from t1 where t1.e=e)))} +} {} +do_test randexpr-2.143 { + db eval {SELECT 11+t1.b+17+case when 13 not between t1.d*(17)+d-t1.f and 13 then t1.d when case when case when t1.f not in (t1.b,c,t1.f) and -d between d and 11 then d else 19 end not between a and e then t1.b when t1.d in (17,13,11) then 19 else t1.e end*a>e then t1.a else t1.d end*f*e FROM t1 WHERE not exists(select 1 from t1 where (t1.f) in (b-b,(select +(max( -13*t1.c*c+t1.e+case t1.a when +t1.d-(c) then -t1.a else t1.c end | 19* -17)*count(distinct a) | (count(distinct t1.f)))++cast(avg(f) AS integer)-max(13) | cast(avg(b) AS integer) from t1),t1.f))} +} {} +do_test randexpr-2.144 { + db eval {SELECT 11+t1.b+17+case when 13 not between t1.d*(17)+d-t1.f and 13 then t1.d when case when case when t1.f not in (t1.b,c,t1.f) and -d between d and 11 then d else 19 end not between a and e then t1.b when t1.d in (17,13,11) then 19 else t1.e end*a>e then t1.a else t1.d end*f*e FROM t1 WHERE NOT (not exists(select 1 from t1 where (t1.f) in (b-b,(select +(max( -13*t1.c*c+t1.e+case t1.a when +t1.d-(c) then -t1.a else t1.c end | 19* -17)*count(distinct a) | (count(distinct t1.f)))++cast(avg(f) AS integer)-max(13) | cast(avg(b) AS integer) from t1),t1.f)))} +} {120000228} +do_test randexpr-2.145 { + db eval {SELECT ((abs(+((select count(*)*abs(min((abs(case (abs(c)/abs(f)) | e when case when 19>=d then 13 when a=f then 13 else f end then b else a end+t1.a)/abs(11))*t1.c*17))-cast(avg(e) AS integer)*count(*) from t1)))/abs(coalesce((select max(case when t1.a in (select t1.c from t1 union select 19 from t1) then e else t1.e end) from t1 where (t1.a<= -t1.e)),17)+c*13))) FROM t1 WHERE exists(select 1 from t1 where ~coalesce((select max(c) from t1 where exists(select 1 from t1 where case when not case coalesce((select max(coalesce((select d from t1 where exists(select 1 from t1 where a<(t1.c))),19)) from t1 where e=t1.e),13) when t1.e then t1.b else f end in (select abs((min(e))*(max(t1.d))+min(t1.e)*(( -((max(t1.a)))))) from t1 union select count(distinct t1.a) from t1) then coalesce((select t1.c from t1 where t1.a< -t1.c and a in (t1.b,t1.f,t1.c)),17) else 17 end>=t1.f)),t1.f)+t1.a*b in (select c from t1 union select 19 from t1)) or exists(select 1 from t1 where not t1.f not in (t1.f,11,t1.d) and 19<=19)} +} {23} +do_test randexpr-2.146 { + db eval {SELECT ((abs(+((select count(*)*abs(min((abs(case (abs(c)/abs(f)) | e when case when 19>=d then 13 when a=f then 13 else f end then b else a end+t1.a)/abs(11))*t1.c*17))-cast(avg(e) AS integer)*count(*) from t1)))/abs(coalesce((select max(case when t1.a in (select t1.c from t1 union select 19 from t1) then e else t1.e end) from t1 where (t1.a<= -t1.e)),17)+c*13))) FROM t1 WHERE NOT (exists(select 1 from t1 where ~coalesce((select max(c) from t1 where exists(select 1 from t1 where case when not case coalesce((select max(coalesce((select d from t1 where exists(select 1 from t1 where a<(t1.c))),19)) from t1 where e=t1.e),13) when t1.e then t1.b else f end in (select abs((min(e))*(max(t1.d))+min(t1.e)*(( -((max(t1.a)))))) from t1 union select count(distinct t1.a) from t1) then coalesce((select t1.c from t1 where t1.a< -t1.c and a in (t1.b,t1.f,t1.c)),17) else 17 end>=t1.f)),t1.f)+t1.a*b in (select c from t1 union select 19 from t1)) or exists(select 1 from t1 where not t1.f not in (t1.f,11,t1.d) and 19<=19))} +} {} +do_test randexpr-2.147 { + db eval {SELECT ((abs(+((select count(*)*abs(min((abs(case (abs(c)/abs(f)) & e when case when 19>=d then 13 when a=f then 13 else f end then b else a end+t1.a)/abs(11))*t1.c*17))-cast(avg(e) AS integer)*count(*) from t1)))/abs(coalesce((select max(case when t1.a in (select t1.c from t1 union select 19 from t1) then e else t1.e end) from t1 where (t1.a<= -t1.e)),17)+c*13))) FROM t1 WHERE exists(select 1 from t1 where ~coalesce((select max(c) from t1 where exists(select 1 from t1 where case when not case coalesce((select max(coalesce((select d from t1 where exists(select 1 from t1 where a<(t1.c))),19)) from t1 where e=t1.e),13) when t1.e then t1.b else f end in (select abs((min(e))*(max(t1.d))+min(t1.e)*(( -((max(t1.a)))))) from t1 union select count(distinct t1.a) from t1) then coalesce((select t1.c from t1 where t1.a< -t1.c and a in (t1.b,t1.f,t1.c)),17) else 17 end>=t1.f)),t1.f)+t1.a*b in (select c from t1 union select 19 from t1)) or exists(select 1 from t1 where not t1.f not in (t1.f,11,t1.d) and 19<=19)} +} {23} +do_test randexpr-2.148 { + db eval {SELECT coalesce((select max(case when t1.f between 17 and 11+case when not coalesce((select max(11) from t1 where t1.f in (select max(t1.d) from t1 union select -count(*) from t1) or 17=(13) or f in (( -t1.b),13,a)),case when d>13 then 13 else 11 end) between 19 and 19 then 19 when t1.c=d then e else 17 end then t1.e else t1.d end) from t1 where e in (select -(+max(11)-max((t1.a))+ -count(distinct d) | cast(avg(t1.a) AS integer)*count(distinct b))-count(*)* -count(distinct t1.e) from t1 union select cast(avg(d) AS integer) from t1)),d)+b FROM t1 WHERE (select +~abs( -min(t1.a)-abs(count(*)))+ -abs(+ -+case (cast(avg(a) AS integer)) when min(e+t1.d) then +case cast(avg(coalesce((select max(t1.f+ -t1.e) from t1 where t1.c between 13 and t1.b),19)) AS integer) when ~~min(t1.f) then count(distinct 13) else cast(avg(t1.b) AS integer) end+count(distinct t1.e) else -count(*) end | max(a)+max(t1.e)) from t1)+e13 then 13 else 11 end) between 19 and 19 then 19 when t1.c=d then e else 17 end then t1.e else t1.d end) from t1 where e in (select -(+max(11)-max((t1.a))+ -count(distinct d) | cast(avg(t1.a) AS integer)*count(distinct b))-count(*)* -count(distinct t1.e) from t1 union select cast(avg(d) AS integer) from t1)),d)+b FROM t1 WHERE NOT ((select +~abs( -min(t1.a)-abs(count(*)))+ -abs(+ -+case (cast(avg(a) AS integer)) when min(e+t1.d) then +case cast(avg(coalesce((select max(t1.f+ -t1.e) from t1 where t1.c between 13 and t1.b),19)) AS integer) when ~~min(t1.f) then count(distinct 13) else cast(avg(t1.b) AS integer) end+count(distinct t1.e) else -count(*) end | max(a)+max(t1.e)) from t1)+e13 then 13 else 11 end) between 19 and 19 then 19 when t1.c=d then e else 17 end then t1.e else t1.d end) from t1 where e in (select -(+max(11)-max((t1.a))+ -count(distinct d) & cast(avg(t1.a) AS integer)*count(distinct b))-count(*)* -count(distinct t1.e) from t1 union select cast(avg(d) AS integer) from t1)),d)+b FROM t1 WHERE (select +~abs( -min(t1.a)-abs(count(*)))+ -abs(+ -+case (cast(avg(a) AS integer)) when min(e+t1.d) then +case cast(avg(coalesce((select max(t1.f+ -t1.e) from t1 where t1.c between 13 and t1.b),19)) AS integer) when ~~min(t1.f) then count(distinct 13) else cast(avg(t1.b) AS integer) end+count(distinct t1.e) else -count(*) end | max(a)+max(t1.e)) from t1)+ed or t1.a not between b and 13 or f<> -19 and t1.b>a} +} {11} +do_test randexpr-2.152 { + db eval {SELECT case when ((+b in (select min(coalesce((select c from t1 where t1.b-t1.c+coalesce((select max(case when 13 in (select -count(distinct b)*max(c) from t1 union select (( - -cast(avg(d) AS integer))) from t1) then b else t1.f end) from t1 where t1.fd or t1.a not between b and 13 or f<> -19 and t1.b>a)} +} {} +do_test randexpr-2.153 { + db eval {SELECT case when ((+b in (select min(coalesce((select c from t1 where t1.b-t1.c+coalesce((select max(case when 13 in (select -count(distinct b)*max(c) from t1 union select (( - -cast(avg(d) AS integer))) from t1) then b else t1.f end) from t1 where t1.fd or t1.a not between b and 13 or f<> -19 and t1.b>a} +} {11} +do_test randexpr-2.154 { + db eval {SELECT case when t1.a | (abs(case when -t1.e in (17,11,(abs(t1.b)/abs(b+case when (17 in (select f-c from t1 union select -a from t1)) or t1.b<=a then d when t1.e in (select -(count(*)) from t1 union select max(t1.b) from t1) then c else c end*a-t1.f))) then t1.b else t1.e end)/abs(t1.f))>=11 and not exists(select 1 from t1 where exists(select 1 from t1 where t1.d in (select t1.d from t1 union select 17 from t1))) then c else t1.c end FROM t1 WHERE (coalesce((select f from t1 where case a-e+a when (abs(t1.d)/abs((coalesce((select ~t1.e*+t1.a+b*a*(abs(19)/abs(a))*a+e from t1 where 13=13 or t1.b<= -19),t1.f)+17* -t1.c=11 and not exists(select 1 from t1 where exists(select 1 from t1 where t1.d in (select t1.d from t1 union select 17 from t1))) then c else t1.c end FROM t1 WHERE NOT ((coalesce((select f from t1 where case a-e+a when (abs(t1.d)/abs((coalesce((select ~t1.e*+t1.a+b*a*(abs(19)/abs(a))*a+e from t1 where 13=13 or t1.b<= -19),t1.f)+17* -t1.c=11 and not exists(select 1 from t1 where exists(select 1 from t1 where t1.d in (select t1.d from t1 union select 17 from t1))) then c else t1.c end FROM t1 WHERE (coalesce((select f from t1 where case a-e+a when (abs(t1.d)/abs((coalesce((select ~t1.e*+t1.a+b*a*(abs(19)/abs(a))*a+e from t1 where 13=13 or t1.b<= -19),t1.f)+17* -t1.c~a-11),t1.f) end)} +} {100} +do_test randexpr-2.158 { + db eval {SELECT coalesce((select max(~t1.b) from t1 where b in (select count(distinct b) from t1 union select max(case coalesce((select max(b) from t1 where d in (select t1.f from t1 union select d from t1) and t1.a=d),t1.a) when -t1.a then d*t1.e+t1.c-11+b | e-t1.a+e+t1.f else c end) from t1)),t1.a) FROM t1 WHERE NOT ((e~a-11),t1.f) end))} +} {} +do_test randexpr-2.159 { + db eval {SELECT coalesce((select max(~t1.b) from t1 where b in (select count(distinct b) from t1 union select max(case coalesce((select max(b) from t1 where d in (select t1.f from t1 union select d from t1) and t1.a=d),t1.a) when -t1.a then d*t1.e+t1.c-11+b & e-t1.a+e+t1.f else c end) from t1)),t1.a) FROM t1 WHERE (e~a-11),t1.f) end)} +} {100} +do_test randexpr-2.160 { + db eval {SELECT case when not t1.c not in (13,d,~case when coalesce((select max((abs(t1.c)/abs(t1.e))) from t1 where b-t1.d+b+t1.f>a),t1.a) in (select 19 from t1 union select t1.a from t1) then -b else a end*b | d) and -(t1.b)>=e and 11 -t1.c then t1.f else t1.f end+(t1.b)+a FROM t1 WHERE e>e} +} {} +do_test randexpr-2.161 { + db eval {SELECT case when not t1.c not in (13,d,~case when coalesce((select max((abs(t1.c)/abs(t1.e))) from t1 where b-t1.d+b+t1.f>a),t1.a) in (select 19 from t1 union select t1.a from t1) then -b else a end*b | d) and -(t1.b)>=e and 11 -t1.c then t1.f else t1.f end+(t1.b)+a FROM t1 WHERE NOT (e>e)} +} {900} +do_test randexpr-2.162 { + db eval {SELECT case when not t1.c not in (13,d,~case when coalesce((select max((abs(t1.c)/abs(t1.e))) from t1 where b-t1.d+b+t1.f>a),t1.a) in (select 19 from t1 union select t1.a from t1) then -b else a end*b & d) and -(t1.b)>=e and 11 -t1.c then t1.f else t1.f end+(t1.b)+a FROM t1 WHERE NOT (e>e)} +} {900} +do_test randexpr-2.163 { + db eval {SELECT coalesce((select t1.c*c from t1 where (select (abs(cast(avg(b*13) AS integer))) from t1) in (select 11 from t1 union select e | a-17 from t1)), -case t1.c when t1.b++(select cast(avg(e) AS integer) from t1) then 17 else t1.b end)-d FROM t1 WHERE b<=t1.d} +} {-600} +do_test randexpr-2.164 { + db eval {SELECT coalesce((select t1.c*c from t1 where (select (abs(cast(avg(b*13) AS integer))) from t1) in (select 11 from t1 union select e | a-17 from t1)), -case t1.c when t1.b++(select cast(avg(e) AS integer) from t1) then 17 else t1.b end)-d FROM t1 WHERE NOT (b<=t1.d)} +} {} +do_test randexpr-2.165 { + db eval {SELECT coalesce((select t1.c*c from t1 where (select (abs(cast(avg(b*13) AS integer))) from t1) in (select 11 from t1 union select e & a-17 from t1)), -case t1.c when t1.b++(select cast(avg(e) AS integer) from t1) then 17 else t1.b end)-d FROM t1 WHERE b<=t1.d} +} {-600} +do_test randexpr-2.166 { + db eval {SELECT coalesce((select max((case (abs(t1.b)/abs(e)) when 13 then ((select +case count(*)*count(distinct coalesce((select max((11)+b) from t1 where exists(select 1 from t1 where t1.e=(t1.b))), -13)) when count(distinct -c) then count(*)*max( -d) else max((a)) end+min(c) from t1)) else t1.a end-(abs(case when a=17 and e<=b then a when a>=e then t1.c else t1.c end+13)/abs(e))*(e))) from t1 where t1.a in (select d from t1 union select -t1.e from t1)),13) FROM t1 WHERE not coalesce((select max(case when f<=d | b | case when case when (t1.c in (f,19,c)) then t1.f when d>t1.b then t1.f else t1.f end<=c then 11 else c end- -t1.c-13 and t1.c in (select b from t1 union select -17 from t1) then t1.b else a end) from t1 where 11 between d and a), -17) not between t1.f and t1.f and t1.f>11 or (not exists(select 1 from t1 where t1.b in (select min(t1.d) from t1 union select min(c) from t1)))} +} {13} +do_test randexpr-2.167 { + db eval {SELECT coalesce((select max((case (abs(t1.b)/abs(e)) when 13 then ((select +case count(*)*count(distinct coalesce((select max((11)+b) from t1 where exists(select 1 from t1 where t1.e=(t1.b))), -13)) when count(distinct -c) then count(*)*max( -d) else max((a)) end+min(c) from t1)) else t1.a end-(abs(case when a=17 and e<=b then a when a>=e then t1.c else t1.c end+13)/abs(e))*(e))) from t1 where t1.a in (select d from t1 union select -t1.e from t1)),13) FROM t1 WHERE NOT (not coalesce((select max(case when f<=d | b | case when case when (t1.c in (f,19,c)) then t1.f when d>t1.b then t1.f else t1.f end<=c then 11 else c end- -t1.c-13 and t1.c in (select b from t1 union select -17 from t1) then t1.b else a end) from t1 where 11 between d and a), -17) not between t1.f and t1.f and t1.f>11 or (not exists(select 1 from t1 where t1.b in (select min(t1.d) from t1 union select min(c) from t1))))} +} {} +do_test randexpr-2.168 { + db eval {SELECT case when ((t1.e | (select min( -t1.f) from t1)*((abs(b)/abs(19)))-b<=t1.b and not e>13 or -f in (select count(distinct e)-min(t1.a) from t1 union select count(distinct f) from t1))) then 13 when 17 between f and 19 or 11 between b and 19 or not t1.f>e then t1.d else d | f end FROM t1 WHERE not case when t1.a=t1.a then c when ~case when case when t1.e in (select + -count(distinct t1.d) from t1 union select cast(avg(t1.a) AS integer) from t1) then (t1.e) when 17 in (13, -f,a) then t1.d else d end<17 then e else f end+17+a* -e+b+b>t1.e then t1.e else t1.a end13 or -f in (select count(distinct e)-min(t1.a) from t1 union select count(distinct f) from t1))) then 13 when 17 between f and 19 or 11 between b and 19 or not t1.f>e then t1.d else d | f end FROM t1 WHERE NOT (not case when t1.a=t1.a then c when ~case when case when t1.e in (select + -count(distinct t1.d) from t1 union select cast(avg(t1.a) AS integer) from t1) then (t1.e) when 17 in (13, -f,a) then t1.d else d end<17 then e else f end+17+a* -e+b+b>t1.e then t1.e else t1.a end13 or -f in (select count(distinct e)-min(t1.a) from t1 union select count(distinct f) from t1))) then 13 when 17 between f and 19 or 11 between b and 19 or not t1.f>e then t1.d else d & f end FROM t1 WHERE NOT (not case when t1.a=t1.a then c when ~case when case when t1.e in (select + -count(distinct t1.d) from t1 union select cast(avg(t1.a) AS integer) from t1) then (t1.e) when 17 in (13, -f,a) then t1.d else d end<17 then e else f end+17+a* -e+b+b>t1.e then t1.e else t1.a enda and 19 between 11 and b and 11 between a and b)),t1.f) | c in ( -t1.c,13,b) then 11 when f in (select min(t1.d) from t1 union select cast(avg(f) AS integer) from t1) then t1.b else a end and t1.a),t1.c))/abs(11))+ -f} +} {} +do_test randexpr-2.172 { + db eval {SELECT coalesce((select (select cast(avg(~13) AS integer)-count(distinct ~19-coalesce((select d-17 from t1 where case when (case 17 when b then d else (select min(11) | min(17) from t1)++t1.a end in (d, -c,t1.d)) then b else t1.c end between t1.a and (t1.e)),19)-t1.f) from t1) | (abs(t1.e)/abs(t1.e)) from t1 where (t1.c not in (( -t1.d),(t1.b),(d)))),t1.e) FROM t1 WHERE NOT (13 between 19 and ~ -(abs( -t1.d | coalesce((select t1.f from t1 where 19 not between + -case when ~~b+coalesce((select max(t1.d) from t1 where exists(select 1 from t1 where (select cast(avg(19) AS integer)-cast(avg(b) AS integer) from t1)<>a and 19 between 11 and b and 11 between a and b)),t1.f) | c in ( -t1.c,13,b) then 11 when f in (select min(t1.d) from t1 union select cast(avg(f) AS integer) from t1) then t1.b else a end and t1.a),t1.c))/abs(11))+ -f)} +} {-15} +do_test randexpr-2.173 { + db eval {SELECT coalesce((select (select cast(avg(~13) AS integer)-count(distinct ~19-coalesce((select d-17 from t1 where case when (case 17 when b then d else (select min(11) & min(17) from t1)++t1.a end in (d, -c,t1.d)) then b else t1.c end between t1.a and (t1.e)),19)-t1.f) from t1) & (abs(t1.e)/abs(t1.e)) from t1 where (t1.c not in (( -t1.d),(t1.b),(d)))),t1.e) FROM t1 WHERE NOT (13 between 19 and ~ -(abs( -t1.d | coalesce((select t1.f from t1 where 19 not between + -case when ~~b+coalesce((select max(t1.d) from t1 where exists(select 1 from t1 where (select cast(avg(19) AS integer)-cast(avg(b) AS integer) from t1)<>a and 19 between 11 and b and 11 between a and b)),t1.f) | c in ( -t1.c,13,b) then 11 when f in (select min(t1.d) from t1 union select cast(avg(f) AS integer) from t1) then t1.b else a end and t1.a),t1.c))/abs(11))+ -f)} +} {1} +do_test randexpr-2.174 { + db eval {SELECT coalesce((select max(+(d+a)*b) from t1 where (case e-t1.b when 17+11*13 then t1.a else 17 end-(17)<>(select cast(avg(t1.f) AS integer) from t1))),case when case when 11+13<>19 then 13 else t1.f end*e in (a,t1.f,t1.c) and not exists(select 1 from t1 where t1.b in (select - -count(distinct t1.b)*( -count(*)) from t1 union select count(distinct t1.e) from t1)) then t1.c else a end) FROM t1 WHERE case when t1.d in (select count(*)-count(*)+ -min(case d when t1.c then 19 else 11 end)*count(*) | min(19)-(max((e))) from t1 union select count(*) from t1) then c when exists(select 1 from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (abs(b)/abs(t1.c))>coalesce((select max(f) from t1 where f<=t1.a),t1.d)))) then f*t1.c+11 else t1.f end+e-e*t1.f<=17} +} {100000} +do_test randexpr-2.175 { + db eval {SELECT coalesce((select max(+(d+a)*b) from t1 where (case e-t1.b when 17+11*13 then t1.a else 17 end-(17)<>(select cast(avg(t1.f) AS integer) from t1))),case when case when 11+13<>19 then 13 else t1.f end*e in (a,t1.f,t1.c) and not exists(select 1 from t1 where t1.b in (select - -count(distinct t1.b)*( -count(*)) from t1 union select count(distinct t1.e) from t1)) then t1.c else a end) FROM t1 WHERE NOT (case when t1.d in (select count(*)-count(*)+ -min(case d when t1.c then 19 else 11 end)*count(*) | min(19)-(max((e))) from t1 union select count(*) from t1) then c when exists(select 1 from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (abs(b)/abs(t1.c))>coalesce((select max(f) from t1 where f<=t1.a),t1.d)))) then f*t1.c+11 else t1.f end+e-e*t1.f<=17)} +} {} +do_test randexpr-2.176 { + db eval {SELECT t1.c*case when t1.e>(select +cast(avg(13) AS integer)*min((11)* - -d | - -coalesce((select max(b) from t1 where (select cast(avg((abs(a)/abs(b-(e)))) AS integer) from t1)+11*c=a then t1.b else t1.b end | 19+a-c)*count(distinct t1.d)-+count(distinct t1.d) from t1) not in (19,d,b) and t1.f not between b and t1.f)))))} +} {} +do_test randexpr-2.177 { + db eval {SELECT t1.c*case when t1.e>(select +cast(avg(13) AS integer)*min((11)* - -d | - -coalesce((select max(b) from t1 where (select cast(avg((abs(a)/abs(b-(e)))) AS integer) from t1)+11*c=a then t1.b else t1.b end | 19+a-c)*count(distinct t1.d)-+count(distinct t1.d) from t1) not in (19,d,b) and t1.f not between b and t1.f))))))} +} {90000} +do_test randexpr-2.178 { + db eval {SELECT t1.c*case when t1.e>(select +cast(avg(13) AS integer)*min((11)* - -d & - -coalesce((select max(b) from t1 where (select cast(avg((abs(a)/abs(b-(e)))) AS integer) from t1)+11*c=a then t1.b else t1.b end | 19+a-c)*count(distinct t1.d)-+count(distinct t1.d) from t1) not in (19,d,b) and t1.f not between b and t1.f))))))} +} {120000} +do_test randexpr-2.179 { + db eval {SELECT case when not exists(select 1 from t1 where (not exists(select 1 from t1 where not exists(select 1 from t1 where not d*19=b))) or t1.a-a-t1.d*t1.f in (select ~case cast(avg(t1.d) AS integer) | count(*)+max(t1.c) when max(17) then cast(avg( -f) AS integer) else -min(e) end from t1 union select max( -13) from t1)) then (select abs((+ -min(a)) | min(t1.a)-cast(avg(t1.f) AS integer)) | max(e) from t1) else 11 end*a | t1.a*f FROM t1 WHERE t1.c in (c,t1.b,~e) and 13+19-t1.b | coalesce((select max(case d when +t1.e+13*13 then coalesce((select case when f=e or -f<=e and t1.d<>19),d) else d end) from t1 where t1.d=e or -f<=e and t1.d<>19),d) else d end) from t1 where t1.d=e or -f<=e and t1.d<>19),d) else d end) from t1 where t1.dt1.b then d*t1.f*17 else (b) end)+t1.f*13 when c then t1.a else t1.e end- -t1.f FROM t1 WHERE b-case when t1.b=t1.b and (coalesce((select max(t1.e) from t1 where t1.b in (select ~cast(avg(19) AS integer)+max(t1.b)+min(t1.e) from t1 union select max(t1.a) from t1) or exists(select 1 from t1 where 11 in (select max(t1.e) from t1 union select max(t1.f) from t1))),(abs(17)/abs(17)))) not between c and t1.a then 13 else 13 end | t1.c in (select ++cast(avg(f) AS integer)-(+(count(distinct 13) | max(19))-count(*) | -max(t1.e)) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.185 { + db eval {SELECT ~case +~c+(case when not exists(select 1 from t1 where t1.d in (select abs( -abs(max(t1.f)-count(distinct e)* -cast(avg(11) AS integer) | count(*))+min(f)*min( -17)*cast(avg(19) AS integer)) from t1 union select count(*) from t1)) or (t1.f)<>t1.b then d*t1.f*17 else (b) end)+t1.f*13 when c then t1.a else t1.e end- -t1.f FROM t1 WHERE NOT (b-case when t1.b=t1.b and (coalesce((select max(t1.e) from t1 where t1.b in (select ~cast(avg(19) AS integer)+max(t1.b)+min(t1.e) from t1 union select max(t1.a) from t1) or exists(select 1 from t1 where 11 in (select max(t1.e) from t1 union select max(t1.f) from t1))),(abs(17)/abs(17)))) not between c and t1.a then 13 else 13 end | t1.c in (select ++cast(avg(f) AS integer)-(+(count(distinct 13) | max(19))-count(*) | -max(t1.e)) from t1 union select count(*) from t1))} +} {99} +do_test randexpr-2.186 { + db eval {SELECT ~case +~c+(case when not exists(select 1 from t1 where t1.d in (select abs( -abs(max(t1.f)-count(distinct e)* -cast(avg(11) AS integer) & count(*))+min(f)*min( -17)*cast(avg(19) AS integer)) from t1 union select count(*) from t1)) or (t1.f)<>t1.b then d*t1.f*17 else (b) end)+t1.f*13 when c then t1.a else t1.e end- -t1.f FROM t1 WHERE NOT (b-case when t1.b=t1.b and (coalesce((select max(t1.e) from t1 where t1.b in (select ~cast(avg(19) AS integer)+max(t1.b)+min(t1.e) from t1 union select max(t1.a) from t1) or exists(select 1 from t1 where 11 in (select max(t1.e) from t1 union select max(t1.f) from t1))),(abs(17)/abs(17)))) not between c and t1.a then 13 else 13 end | t1.c in (select ++cast(avg(f) AS integer)-(+(count(distinct 13) | max(19))-count(*) | -max(t1.e)) from t1 union select count(*) from t1))} +} {99} +do_test randexpr-2.187 { + db eval {SELECT case when t1.c in (select c from t1 union select a from t1) then 11 when ((select max(19) from t1) | t1.a+e not between (case -19 when b*e-b-case when b<>~d*t1.d+t1.c*d then t1.d when t1.c=19 then f else d end-f*t1.d then t1.d else 13 end) and 13) then 11 else t1.b end FROM t1 WHERE t1.b*coalesce((select max(t1.d) from t1 where -f<=(select max(t1.e) from t1)*e),case when case when t1.e- -13>t1.b+t1.f+c then 13 else -d end in (select 19 from t1 union select t1.f from t1) then e when t1.b in (select ~count(distinct t1.a)+count(*) from t1 union select abs( - -((cast(avg(c) AS integer)))*(cast(avg(t1.b) AS integer))) from t1) then 17 else d end)+ -17 | t1.a-t1.d in (t1.e,f,f)} +} {} +do_test randexpr-2.188 { + db eval {SELECT case when t1.c in (select c from t1 union select a from t1) then 11 when ((select max(19) from t1) | t1.a+e not between (case -19 when b*e-b-case when b<>~d*t1.d+t1.c*d then t1.d when t1.c=19 then f else d end-f*t1.d then t1.d else 13 end) and 13) then 11 else t1.b end FROM t1 WHERE NOT (t1.b*coalesce((select max(t1.d) from t1 where -f<=(select max(t1.e) from t1)*e),case when case when t1.e- -13>t1.b+t1.f+c then 13 else -d end in (select 19 from t1 union select t1.f from t1) then e when t1.b in (select ~count(distinct t1.a)+count(*) from t1 union select abs( - -((cast(avg(c) AS integer)))*(cast(avg(t1.b) AS integer))) from t1) then 17 else d end)+ -17 | t1.a-t1.d in (t1.e,f,f))} +} {11} +do_test randexpr-2.189 { + db eval {SELECT case when t1.c in (select c from t1 union select a from t1) then 11 when ((select max(19) from t1) & t1.a+e not between (case -19 when b*e-b-case when b<>~d*t1.d+t1.c*d then t1.d when t1.c=19 then f else d end-f*t1.d then t1.d else 13 end) and 13) then 11 else t1.b end FROM t1 WHERE NOT (t1.b*coalesce((select max(t1.d) from t1 where -f<=(select max(t1.e) from t1)*e),case when case when t1.e- -13>t1.b+t1.f+c then 13 else -d end in (select 19 from t1 union select t1.f from t1) then e when t1.b in (select ~count(distinct t1.a)+count(*) from t1 union select abs( - -((cast(avg(c) AS integer)))*(cast(avg(t1.b) AS integer))) from t1) then 17 else d end)+ -17 | t1.a-t1.d in (t1.e,f,f))} +} {11} +do_test randexpr-2.190 { + db eval {SELECT ~(d)-(select case case cast(avg(t1.b- -~b) AS integer) when max(coalesce((select e from t1 where 11<>e),coalesce((select c from t1 where c not in (t1.e,a,(17)) or t1.b in ((b),t1.c,t1.f)),13))) then +cast(avg(d) AS integer) | case max(t1.c) when min((e)) then max(e)+cast(avg(t1.d) AS integer) else min(t1.f) end+max(d) else count(*) end | -cast(avg(t1.f) AS integer) | - -count(distinct c)*max(b) when count(distinct t1.d) then - -min(a) else count(*) end from t1) FROM t1 WHERE t1.c+a=t1.e+t1.c*t1.e | case t1.d when case when 13 in (select min(c) from t1 union select max(d) from t1) or t1.f>=13 then 19 when t1.c<=11 then -t1.b else 13 end then 13 else t1.e end or f in (select t1.a from t1 union select t1.f from t1) or (t1.at1.b and (((t1.d not between -a and t1.f))) or t1.a not in ( -t1.d,13,a) and de),coalesce((select c from t1 where c not in (t1.e,a,(17)) or t1.b in ((b),t1.c,t1.f)),13))) then +cast(avg(d) AS integer) | case max(t1.c) when min((e)) then max(e)+cast(avg(t1.d) AS integer) else min(t1.f) end+max(d) else count(*) end | -cast(avg(t1.f) AS integer) | - -count(distinct c)*max(b) when count(distinct t1.d) then - -min(a) else count(*) end from t1) FROM t1 WHERE NOT (t1.c+a=t1.e+t1.c*t1.e | case t1.d when case when 13 in (select min(c) from t1 union select max(d) from t1) or t1.f>=13 then 19 when t1.c<=11 then -t1.b else 13 end then 13 else t1.e end or f in (select t1.a from t1 union select t1.f from t1) or (t1.at1.b and (((t1.d not between -a and t1.f))) or t1.a not in ( -t1.d,13,a) and de),coalesce((select c from t1 where c not in (t1.e,a,(17)) or t1.b in ((b),t1.c,t1.f)),13))) then +cast(avg(d) AS integer) & case max(t1.c) when min((e)) then max(e)+cast(avg(t1.d) AS integer) else min(t1.f) end+max(d) else count(*) end & -cast(avg(t1.f) AS integer) & - -count(distinct c)*max(b) when count(distinct t1.d) then - -min(a) else count(*) end from t1) FROM t1 WHERE t1.c+a=t1.e+t1.c*t1.e | case t1.d when case when 13 in (select min(c) from t1 union select max(d) from t1) or t1.f>=13 then 19 when t1.c<=11 then -t1.b else 13 end then 13 else t1.e end or f in (select t1.a from t1 union select t1.f from t1) or (t1.at1.b and (((t1.d not between -a and t1.f))) or t1.a not in ( -t1.d,13,a) and dt1.c or ((t1.f>e)) and t1.e-19+c+ -d*t1.d+11<>17+coalesce((select max(t1.d) from t1 where 17+t1.c11))),11) | case e when c then a else t1.b end+a FROM t1 WHERE case when d not in (a,11,(13)) then a when exists(select 1 from t1 where case when case when case when (t1.c) in (c,e,b) then f else (19) end-t1.f=t1.a then 11 else t1.c end in (select abs(~~max(t1.c)) from t1 union select cast(avg(c) AS integer) from t1)) then 11 else t1.f end in (select (~~count(*)*cast(avg(19) AS integer)) from t1 union select min(t1.d)+ -count(distinct f) from t1)} +} {} +do_test randexpr-2.197 { + db eval {SELECT coalesce((select max(~t1.d) from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where c>t1.c or ((t1.f>e)) and t1.e-19+c+ -d*t1.d+11<>17+coalesce((select max(t1.d) from t1 where 17+t1.c11))),11) | case e when c then a else t1.b end+a FROM t1 WHERE NOT (case when d not in (a,11,(13)) then a when exists(select 1 from t1 where case when case when case when (t1.c) in (c,e,b) then f else (19) end-t1.f=t1.a then 11 else t1.c end in (select abs(~~max(t1.c)) from t1 union select cast(avg(c) AS integer) from t1)) then 11 else t1.f end in (select (~~count(*)*cast(avg(19) AS integer)) from t1 union select min(t1.d)+ -count(distinct f) from t1))} +} {303} +do_test randexpr-2.198 { + db eval {SELECT coalesce((select max(~t1.d) from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where c>t1.c or ((t1.f>e)) and t1.e-19+c+ -d*t1.d+11<>17+coalesce((select max(t1.d) from t1 where 17+t1.c11))),11) & case e when c then a else t1.b end+a FROM t1 WHERE NOT (case when d not in (a,11,(13)) then a when exists(select 1 from t1 where case when case when case when (t1.c) in (c,e,b) then f else (19) end-t1.f=t1.a then 11 else t1.c end in (select abs(~~max(t1.c)) from t1 union select cast(avg(c) AS integer) from t1)) then 11 else t1.f end in (select (~~count(*)*cast(avg(19) AS integer)) from t1 union select min(t1.d)+ -count(distinct f) from t1))} +} {8} +do_test randexpr-2.199 { + db eval {SELECT case when t1.e*t1.b<>e then 19 else -11-b+coalesce((select max(t1.e+t1.f) from t1 where 11+f*t1.c<>t1.a*(~f+case when t1.f in (t1.f, -((13)),19) then t1.a when t1.e>17 then a else a end+b-t1.e-(f))),13)+d end+t1.b* -c FROM t1 WHERE exists(select 1 from t1 where coalesce((select max(t1.a*t1.e+13) from t1 where 13 in ((abs(e)/abs(t1.a)),a+t1.c,17*t1.c)),(select -case max(c) | cast(avg(e) AS integer) when ~+ -count(distinct coalesce((select d from t1 where exists(select 1 from t1 where -17e then 19 else -11-b+coalesce((select max(t1.e+t1.f) from t1 where 11+f*t1.c<>t1.a*(~f+case when t1.f in (t1.f, -((13)),19) then t1.a when t1.e>17 then a else a end+b-t1.e-(f))),13)+d end+t1.b* -c FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select max(t1.a*t1.e+13) from t1 where 13 in ((abs(e)/abs(t1.a)),a+t1.c,17*t1.c)),(select -case max(c) | cast(avg(e) AS integer) when ~+ -count(distinct coalesce((select d from t1 where exists(select 1 from t1 where -17c then t1.b when 17 not between t1.e and c then 11 else f end in (select a from t1 union select 13 from t1))),11)) from t1 where not exists(select 1 from t1 where f=17)),e) FROM t1 WHERE ( -13 in (13,b,case when 19=11 then t1.d else case when (d | b-case when t1.f-(select +abs(cast(avg( -17* -17) AS integer)*count(*)+max(t1.e)) from t1)*t1.f not in (f,a,c) then t1.a else t1.b end between 19 and e) then a else 13 end end-t1.a) or t1.c<19)} +} {} +do_test randexpr-2.202 { + db eval {SELECT coalesce((select max(coalesce((select 19 from t1 where (case when ((abs(coalesce((select max(case when t1.a between (t1.c)+ -b and d then e else d end) from t1 where t1.e in (select (count(*)) from t1 union select max( -a)-count(distinct t1.c)-((min(13))) | max((t1.f)) from t1)),11))/abs(19)))+d>c then t1.b when 17 not between t1.e and c then 11 else f end in (select a from t1 union select 13 from t1))),11)) from t1 where not exists(select 1 from t1 where f=17)),e) FROM t1 WHERE NOT (( -13 in (13,b,case when 19=11 then t1.d else case when (d | b-case when t1.f-(select +abs(cast(avg( -17* -17) AS integer)*count(*)+max(t1.e)) from t1)*t1.f not in (f,a,c) then t1.a else t1.b end between 19 and e) then a else 13 end end-t1.a) or t1.c<19))} +} {11} +do_test randexpr-2.203 { + db eval {SELECT coalesce((select max(coalesce((select 19 from t1 where (case when ((abs(coalesce((select max(case when t1.a between (t1.c)+ -b and d then e else d end) from t1 where t1.e in (select (count(*)) from t1 union select max( -a)-count(distinct t1.c)-((min(13))) & max((t1.f)) from t1)),11))/abs(19)))+d>c then t1.b when 17 not between t1.e and c then 11 else f end in (select a from t1 union select 13 from t1))),11)) from t1 where not exists(select 1 from t1 where f=17)),e) FROM t1 WHERE NOT (( -13 in (13,b,case when 19=11 then t1.d else case when (d | b-case when t1.f-(select +abs(cast(avg( -17* -17) AS integer)*count(*)+max(t1.e)) from t1)*t1.f not in (f,a,c) then t1.a else t1.b end between 19 and e) then a else 13 end end-t1.a) or t1.c<19))} +} {11} +do_test randexpr-2.204 { + db eval {SELECT -~d*19-a-c | t1.c+(abs(case -19 | ~case t1.d when c then t1.f else c end when +t1.d then 17 else case case when b>case case when t1.c=d or -t1.a=17 then ((a)) when t1.a<>d then (t1.c) else a end+19+11 when 19 then t1.a else b end-(t1.e) then f else t1.c end when 17 then t1.b else a end end)/abs(d))+t1.b FROM t1 WHERE t1.f+13<>19} +} {7671} +do_test randexpr-2.205 { + db eval {SELECT -~d*19-a-c | t1.c+(abs(case -19 | ~case t1.d when c then t1.f else c end when +t1.d then 17 else case case when b>case case when t1.c=d or -t1.a=17 then ((a)) when t1.a<>d then (t1.c) else a end+19+11 when 19 then t1.a else b end-(t1.e) then f else t1.c end when 17 then t1.b else a end end)/abs(d))+t1.b FROM t1 WHERE NOT (t1.f+13<>19)} +} {} +do_test randexpr-2.206 { + db eval {SELECT -~d*19-a-c & t1.c+(abs(case -19 & ~case t1.d when c then t1.f else c end when +t1.d then 17 else case case when b>case case when t1.c=d or -t1.a=17 then ((a)) when t1.a<>d then (t1.c) else a end+19+11 when 19 then t1.a else b end-(t1.e) then f else t1.c end when 17 then t1.b else a end end)/abs(d))+t1.b FROM t1 WHERE t1.f+13<>19} +} {48} +do_test randexpr-2.207 { + db eval {SELECT (abs(coalesce((select b from t1 where t1.a>(abs(t1.f)/abs(coalesce((select 13 from t1 where not coalesce((select (select cast(avg(case when case when t1.e not between t1.c and 19 then t1.d when d in (11,t1.b,13) then e else 17 end*b between 13 and (t1.b) then t1.f when a<>19 and t1.c not in (a,t1.f,b) then t1.e else t1.f end) AS integer) from t1)+t1.c-b* -a*f from t1 where (t1.d in (select c from t1 union select t1.b from t1))),t1.e)-c<>c),t1.c)))),e))/abs(t1.e)) FROM t1 WHERE (t1.a-t1.c-case when eb then f else d end | t1.d) AS integer) from t1 union select count(distinct 11) from t1)),c) else t1.f end when f then 19 else t1.b end(abs(t1.f)/abs(coalesce((select 13 from t1 where not coalesce((select (select cast(avg(case when case when t1.e not between t1.c and 19 then t1.d when d in (11,t1.b,13) then e else 17 end*b between 13 and (t1.b) then t1.f when a<>19 and t1.c not in (a,t1.f,b) then t1.e else t1.f end) AS integer) from t1)+t1.c-b* -a*f from t1 where (t1.d in (select c from t1 union select t1.b from t1))),t1.e)-c<>c),t1.c)))),e))/abs(t1.e)) FROM t1 WHERE NOT ((t1.a-t1.c-case when eb then f else d end | t1.d) AS integer) from t1 union select count(distinct 11) from t1)),c) else t1.f end when f then 19 else t1.b endd then case 11 when ~+case t1.a when (abs(t1.a)/abs( -c))*t1.c+ -a+coalesce((select t1.c from t1 where ((abs(e)/abs((case when (11>=e) then -17 when (11)<=a then e else b end)+t1.f))>19)),b)-d then c else 17 end+ -b then ( -t1.a) else c end when 11<=t1.f then 11 else 17 end FROM t1 WHERE (coalesce((select max(17) from t1 where ~case ((select min( -e-a)*+cast(avg(11) AS integer)*count(*)-(count(*))*max(b)*min( -13)+count(*)-min( -13) from t1)+19)+a when e then (select min(f) from t1) else coalesce((select t1.f from t1 where 17>t1.c),19) end=11 and f=t1.b),19) not between -a and t1.a)} +} {} +do_test randexpr-2.210 { + db eval {SELECT case when t1.d<>d then case 11 when ~+case t1.a when (abs(t1.a)/abs( -c))*t1.c+ -a+coalesce((select t1.c from t1 where ((abs(e)/abs((case when (11>=e) then -17 when (11)<=a then e else b end)+t1.f))>19)),b)-d then c else 17 end+ -b then ( -t1.a) else c end when 11<=t1.f then 11 else 17 end FROM t1 WHERE NOT ((coalesce((select max(17) from t1 where ~case ((select min( -e-a)*+cast(avg(11) AS integer)*count(*)-(count(*))*max(b)*min( -13)+count(*)-min( -13) from t1)+19)+a when e then (select min(f) from t1) else coalesce((select t1.f from t1 where 17>t1.c),19) end=11 and f=t1.b),19) not between -a and t1.a))} +} {11} +do_test randexpr-2.211 { + db eval {SELECT coalesce((select max(t1.e) from t1 where not d in (select (abs(t1.f)/abs(~case when (abs(t1.e)/abs( -t1.a))+t1.b-t1.e*t1.a-t1.c between b and t1.e then f else a end+19)) from t1 union select 13 from t1) or (t1.e=(a)) and -11 in (select t1.f from t1 union select c from t1)),a)*coalesce((select max(f) from t1 where d=f),f)*c+(t1.c) FROM t1 WHERE case when +(abs(19)/abs(13*t1.e))<>11 then 19 when coalesce((select max(c) from t1 where t1.e+t1.a between t1.f and f),t1.a)=(a) then d else -19 end<=f or (((e in (b,t1.b,13)))) or b<(f) and t1.a<>11 or exists(select 1 from t1 where t1.d=d) or (t1.a)<>a or t1.a<>e} +} {90000300} +do_test randexpr-2.212 { + db eval {SELECT coalesce((select max(t1.e) from t1 where not d in (select (abs(t1.f)/abs(~case when (abs(t1.e)/abs( -t1.a))+t1.b-t1.e*t1.a-t1.c between b and t1.e then f else a end+19)) from t1 union select 13 from t1) or (t1.e=(a)) and -11 in (select t1.f from t1 union select c from t1)),a)*coalesce((select max(f) from t1 where d=f),f)*c+(t1.c) FROM t1 WHERE NOT (case when +(abs(19)/abs(13*t1.e))<>11 then 19 when coalesce((select max(c) from t1 where t1.e+t1.a between t1.f and f),t1.a)=(a) then d else -19 end<=f or (((e in (b,t1.b,13)))) or b<(f) and t1.a<>11 or exists(select 1 from t1 where t1.d=d) or (t1.a)<>a or t1.a<>e)} +} {} +do_test randexpr-2.213 { + db eval {SELECT case (select abs(count(distinct -17+(abs(c)/abs(case when exists(select 1 from t1 where (17<>(f))) then e when t1.c not between 11 and t1.d then +11 else t1.e end | b))+d)*+ -(min(f)*max(t1.e))*+cast(avg(t1.f) AS integer) | count(distinct (f))- - -count(*)-cast(avg((e)) AS integer)) from t1) when (abs(11)/abs(t1.e)) then 13 else b end FROM t1 WHERE case when t1.e not between coalesce((select (select count(*)+case max(17) when count(distinct 19-t1.c*t1.c-e) then -min(b)-cast(avg((13)) AS integer)*count(*) else min(19) end from t1)*a*t1.d-17*a+17 from t1 where t1.d not in (13,t1.f, -c)),t1.c) and e then ( -b) when (13)>=(e) then t1.c else e end in (t1.d,t1.c, -11)} +} {} +do_test randexpr-2.214 { + db eval {SELECT case (select abs(count(distinct -17+(abs(c)/abs(case when exists(select 1 from t1 where (17<>(f))) then e when t1.c not between 11 and t1.d then +11 else t1.e end | b))+d)*+ -(min(f)*max(t1.e))*+cast(avg(t1.f) AS integer) | count(distinct (f))- - -count(*)-cast(avg((e)) AS integer)) from t1) when (abs(11)/abs(t1.e)) then 13 else b end FROM t1 WHERE NOT (case when t1.e not between coalesce((select (select count(*)+case max(17) when count(distinct 19-t1.c*t1.c-e) then -min(b)-cast(avg((13)) AS integer)*count(*) else min(19) end from t1)*a*t1.d-17*a+17 from t1 where t1.d not in (13,t1.f, -c)),t1.c) and e then ( -b) when (13)>=(e) then t1.c else e end in (t1.d,t1.c, -11))} +} {200} +do_test randexpr-2.215 { + db eval {SELECT case (select abs(count(distinct -17+(abs(c)/abs(case when exists(select 1 from t1 where (17<>(f))) then e when t1.c not between 11 and t1.d then +11 else t1.e end & b))+d)*+ -(min(f)*max(t1.e))*+cast(avg(t1.f) AS integer) & count(distinct (f))- - -count(*)-cast(avg((e)) AS integer)) from t1) when (abs(11)/abs(t1.e)) then 13 else b end FROM t1 WHERE NOT (case when t1.e not between coalesce((select (select count(*)+case max(17) when count(distinct 19-t1.c*t1.c-e) then -min(b)-cast(avg((13)) AS integer)*count(*) else min(19) end from t1)*a*t1.d-17*a+17 from t1 where t1.d not in (13,t1.f, -c)),t1.c) and e then ( -b) when (13)>=(e) then t1.c else e end in (t1.d,t1.c, -11))} +} {200} +do_test randexpr-2.216 { + db eval {SELECT -case when dt1.d))) then case +coalesce((select case when 13>t1.b then 17+t1.d+d when 17 in (t1.b,b,b) and 17<=t1.e then (c) else t1.d end from t1 where 17b+t1.a*t1.a-coalesce((select max(d) from t1 where (17 between (t1.d) and (f))),t1.e)-t1.c and t1.d>11)} +} {} +do_test randexpr-2.217 { + db eval {SELECT -case when dt1.d))) then case +coalesce((select case when 13>t1.b then 17+t1.d+d when 17 in (t1.b,b,b) and 17<=t1.e then (c) else t1.d end from t1 where 17b+t1.a*t1.a-coalesce((select max(d) from t1 where (17 between (t1.d) and (f))),t1.e)-t1.c and t1.d>11))} +} {-447} +do_test randexpr-2.218 { + db eval {SELECT (select + -max(t1.c)*abs(min(t1.b) | case (abs(min(13)))+count(*)-max(t1.a) when max(~13) then abs( -~min(t1.b)+abs(count(distinct (select +++cast(avg(t1.b) AS integer)-case -count(*) when -count(*) then -count(distinct b) else count(distinct c) end from t1)))) else count(*) end-max((e))) | -cast(avg(t1.e) AS integer) from t1) FROM t1 WHERE exists(select 1 from t1 where -f-d*t1.f-c-coalesce((select max(t1.a) from t1 where 11 not in ((select count(*) from t1),coalesce((select max(t1.f+e) from t1 where t1.b in (select count(distinct t1.c) from t1 union select -case abs(abs(count(distinct 13))) when max(t1.b) then abs(cast(avg(11) AS integer)) else -cast(avg(17) AS integer) end from t1)),case t1.f when -b then 17 else 17 end)- -t1.f,b)),a)+f+c-t1.ce then 19 else t1.c-t1.d end else b end),(e))-t1.a+(13)-19)) FROM t1 WHERE (not t1.e not in (case when exists(select 1 from t1 where 17<=t1.f*17*t1.b) then f+coalesce((select max(t1.b-c-11+t1.c-a) from t1 where 13 in (select t1.e from t1 union select case when a not between t1.f and d or fe then 19 else t1.c-t1.d end else b end),(e))-t1.a+(13)-19)) FROM t1 WHERE NOT ((not t1.e not in (case when exists(select 1 from t1 where 17<=t1.f*17*t1.b) then f+coalesce((select max(t1.b-c-11+t1.c-a) from t1 where 13 in (select t1.e from t1 union select case when a not between t1.f and d or f=t1.b) and (t1.e<>( -t1.d)) and (t1.c<=13)) then c else d+11 end<=case when (t1.c)>=19 then (t1.e) when (f) not between 11 and f then coalesce((select 13 from t1 where -t1.b>=d),13) else b end} +} {203} +do_test randexpr-2.227 { + db eval {SELECT (select min(b) | case case cast(avg(c) AS integer) when ~+~count(distinct -19*f*f) then count(distinct (select cast(avg(d-case when t1.a<=19 then 19 else 11 end+f+a) AS integer) from t1))+count(distinct t1.c)+ -+ -count(*) | - -max((17))*min(t1.b)*min((e)) else max(t1.c) end when ( - - -min(a)) then ( -min(c)) else max((11)) end from t1) FROM t1 WHERE NOT (case when (((exists(select 1 from t1 where ~t1.d-t1.f between -+t1.f*c and f and not t1.e in (19,a,13))) or -e>=t1.b) and (t1.e<>( -t1.d)) and (t1.c<=13)) then c else d+11 end<=case when (t1.c)>=19 then (t1.e) when (f) not between 11 and f then coalesce((select 13 from t1 where -t1.b>=d),13) else b end)} +} {} +do_test randexpr-2.228 { + db eval {SELECT (select min(b) & case case cast(avg(c) AS integer) when ~+~count(distinct -19*f*f) then count(distinct (select cast(avg(d-case when t1.a<=19 then 19 else 11 end+f+a) AS integer) from t1))+count(distinct t1.c)+ -+ -count(*) & - -max((17))*min(t1.b)*min((e)) else max(t1.c) end when ( - - -min(a)) then ( -min(c)) else max((11)) end from t1) FROM t1 WHERE case when (((exists(select 1 from t1 where ~t1.d-t1.f between -+t1.f*c and f and not t1.e in (19,a,13))) or -e>=t1.b) and (t1.e<>( -t1.d)) and (t1.c<=13)) then c else d+11 end<=case when (t1.c)>=19 then (t1.e) when (f) not between 11 and f then coalesce((select 13 from t1 where -t1.b>=d),13) else b end} +} {8} +do_test randexpr-2.229 { + db eval {SELECT case when 19-b in (select (min(case coalesce((select e from t1 where (c not between c and e)),(select abs(count(distinct coalesce((select max( -coalesce((select max(17) from t1 where (d)>17 and c not between d and b),11)-d) from t1 where ((11)) not in (d,d,e)), - -e))) from t1)+t1.e+t1.a) when f then t1.e else t1.a end)) from t1 union select max(11) from t1) then c else f end+t1.e FROM t1 WHERE coalesce((select max(d) from t1 where coalesce((select max(case when (a++19=case t1.d when t1.e then t1.b else t1.a end*f) and -13=a then case t1.d+13 when t1.f then 17 else t1.f end when (11 not in (d,17,t1.f)) then e else 11 end+b*t1.b) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where t1.f in (select t1.e from t1 union select b from t1)))),(d))<=c),a) between t1.f and d} +} {} +do_test randexpr-2.230 { + db eval {SELECT case when 19-b in (select (min(case coalesce((select e from t1 where (c not between c and e)),(select abs(count(distinct coalesce((select max( -coalesce((select max(17) from t1 where (d)>17 and c not between d and b),11)-d) from t1 where ((11)) not in (d,d,e)), - -e))) from t1)+t1.e+t1.a) when f then t1.e else t1.a end)) from t1 union select max(11) from t1) then c else f end+t1.e FROM t1 WHERE NOT (coalesce((select max(d) from t1 where coalesce((select max(case when (a++19=case t1.d when t1.e then t1.b else t1.a end*f) and -13=a then case t1.d+13 when t1.f then 17 else t1.f end when (11 not in (d,17,t1.f)) then e else 11 end+b*t1.b) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where t1.f in (select t1.e from t1 union select b from t1)))),(d))<=c),a) between t1.f and d)} +} {1100} +do_test randexpr-2.231 { + db eval {SELECT -t1.e*coalesce((select max(17) from t1 where (11*(select case -abs( -+count(*)) when -case +(cast(avg( -f) AS integer)) | (abs(cast(avg(c) AS integer))) when min(d) then -max(t1.f) else count(*) end then cast(avg( -t1.f) AS integer) else max(b) end from t1)*~t1.e*case (select min(e) from t1) when 17-t1.b then (e) else t1.f+t1.f end) in (select t1.d from t1 union select 19 from t1)), -t1.a) FROM t1 WHERE (select min(19) from t1) in (select 17 from t1 union select -11+t1.d+t1.c from t1)} +} {} +do_test randexpr-2.232 { + db eval {SELECT -t1.e*coalesce((select max(17) from t1 where (11*(select case -abs( -+count(*)) when -case +(cast(avg( -f) AS integer)) | (abs(cast(avg(c) AS integer))) when min(d) then -max(t1.f) else count(*) end then cast(avg( -t1.f) AS integer) else max(b) end from t1)*~t1.e*case (select min(e) from t1) when 17-t1.b then (e) else t1.f+t1.f end) in (select t1.d from t1 union select 19 from t1)), -t1.a) FROM t1 WHERE NOT ((select min(19) from t1) in (select 17 from t1 union select -11+t1.d+t1.c from t1))} +} {50000} +do_test randexpr-2.233 { + db eval {SELECT -t1.e*coalesce((select max(17) from t1 where (11*(select case -abs( -+count(*)) when -case +(cast(avg( -f) AS integer)) & (abs(cast(avg(c) AS integer))) when min(d) then -max(t1.f) else count(*) end then cast(avg( -t1.f) AS integer) else max(b) end from t1)*~t1.e*case (select min(e) from t1) when 17-t1.b then (e) else t1.f+t1.f end) in (select t1.d from t1 union select 19 from t1)), -t1.a) FROM t1 WHERE NOT ((select min(19) from t1) in (select 17 from t1 union select -11+t1.d+t1.c from t1))} +} {50000} +do_test randexpr-2.234 { + db eval {SELECT (coalesce((select max(case when t1.e>= -e | 13-19*d then case when c not in (+(select count(*) from t1),coalesce((select max(t1.a) from t1 where 13 in (select case count(distinct t1.a) | count(*) when cast(avg(t1.b) AS integer) then count(distinct a) else min(11) end from t1 union select count(*) from t1)),19),t1.a) then t1.c else c end when t1.f in (select t1.f from t1 union select t1.e from t1) then t1.e else -t1.e end) from t1 where (17 in (e,d,a))),t1.d)* -19) FROM t1 WHERE (((abs(t1.f)/abs(d)) in (select case (case max(coalesce((select max(e*(t1.d*13+t1.b)-t1.b) from t1 where t1.a=13),(t1.c))) when (case max(t1.a) when -abs(max(17))-count(distinct 19) then cast(avg(t1.d) AS integer) else max(13) end)*count(distinct t1.c) then (max((t1.b))) else min(19) end) when max(f) then -(count(distinct t1.f)) else min(t1.b) end from t1 union select cast(avg(13) AS integer) from t1) and (13 in (select ((min(b))) from t1 union select - -count(*) from t1))))} +} {} +do_test randexpr-2.235 { + db eval {SELECT (coalesce((select max(case when t1.e>= -e | 13-19*d then case when c not in (+(select count(*) from t1),coalesce((select max(t1.a) from t1 where 13 in (select case count(distinct t1.a) | count(*) when cast(avg(t1.b) AS integer) then count(distinct a) else min(11) end from t1 union select count(*) from t1)),19),t1.a) then t1.c else c end when t1.f in (select t1.f from t1 union select t1.e from t1) then t1.e else -t1.e end) from t1 where (17 in (e,d,a))),t1.d)* -19) FROM t1 WHERE NOT ((((abs(t1.f)/abs(d)) in (select case (case max(coalesce((select max(e*(t1.d*13+t1.b)-t1.b) from t1 where t1.a=13),(t1.c))) when (case max(t1.a) when -abs(max(17))-count(distinct 19) then cast(avg(t1.d) AS integer) else max(13) end)*count(distinct t1.c) then (max((t1.b))) else min(19) end) when max(f) then -(count(distinct t1.f)) else min(t1.b) end from t1 union select cast(avg(13) AS integer) from t1) and (13 in (select ((min(b))) from t1 union select - -count(*) from t1)))))} +} {-7600} +do_test randexpr-2.236 { + db eval {SELECT (coalesce((select max(case when t1.e>= -e & 13-19*d then case when c not in (+(select count(*) from t1),coalesce((select max(t1.a) from t1 where 13 in (select case count(distinct t1.a) & count(*) when cast(avg(t1.b) AS integer) then count(distinct a) else min(11) end from t1 union select count(*) from t1)),19),t1.a) then t1.c else c end when t1.f in (select t1.f from t1 union select t1.e from t1) then t1.e else -t1.e end) from t1 where (17 in (e,d,a))),t1.d)* -19) FROM t1 WHERE NOT ((((abs(t1.f)/abs(d)) in (select case (case max(coalesce((select max(e*(t1.d*13+t1.b)-t1.b) from t1 where t1.a=13),(t1.c))) when (case max(t1.a) when -abs(max(17))-count(distinct 19) then cast(avg(t1.d) AS integer) else max(13) end)*count(distinct t1.c) then (max((t1.b))) else min(19) end) when max(f) then -(count(distinct t1.f)) else min(t1.b) end from t1 union select cast(avg(13) AS integer) from t1) and (13 in (select ((min(b))) from t1 union select - -count(*) from t1)))))} +} {-7600} +do_test randexpr-2.237 { + db eval {SELECT case when case when b in (select abs(abs( -max(case c+d when f then f else 19 end | c)-( -case min(c) when -(cast(avg((t1.f)) AS integer)) then count(*) else ((cast(avg(t1.b) AS integer))) end))+count(*)+(max(17))) from t1 union select count(*) from t1) then t1.a when ~13*t1.a- -c not in (b,e,19) then t1.c else (t1.d) end<=(b) and 13 between t1.f and 11 then t1.f else -19 end FROM t1 WHERE d>=case when t1.d+case when ( -17)-c>11 or b in ((select min(13)+ -cast(avg(11) AS integer) from t1)+~t1.f, -c,(13)) and e in (select (d) from t1 union select f from t1) then t1.a when d= -t1.d then a- -17 else 17 end-t1.c>d then 11 when exists(select 1 from t1 where 17 not between t1.b and t1.b) then t1.b else (t1.a) end} +} {-19} +do_test randexpr-2.238 { + db eval {SELECT case when case when b in (select abs(abs( -max(case c+d when f then f else 19 end | c)-( -case min(c) when -(cast(avg((t1.f)) AS integer)) then count(*) else ((cast(avg(t1.b) AS integer))) end))+count(*)+(max(17))) from t1 union select count(*) from t1) then t1.a when ~13*t1.a- -c not in (b,e,19) then t1.c else (t1.d) end<=(b) and 13 between t1.f and 11 then t1.f else -19 end FROM t1 WHERE NOT (d>=case when t1.d+case when ( -17)-c>11 or b in ((select min(13)+ -cast(avg(11) AS integer) from t1)+~t1.f, -c,(13)) and e in (select (d) from t1 union select f from t1) then t1.a when d= -t1.d then a- -17 else 17 end-t1.c>d then 11 when exists(select 1 from t1 where 17 not between t1.b and t1.b) then t1.b else (t1.a) end)} +} {} +do_test randexpr-2.239 { + db eval {SELECT case when case when b in (select abs(abs( -max(case c+d when f then f else 19 end & c)-( -case min(c) when -(cast(avg((t1.f)) AS integer)) then count(*) else ((cast(avg(t1.b) AS integer))) end))+count(*)+(max(17))) from t1 union select count(*) from t1) then t1.a when ~13*t1.a- -c not in (b,e,19) then t1.c else (t1.d) end<=(b) and 13 between t1.f and 11 then t1.f else -19 end FROM t1 WHERE d>=case when t1.d+case when ( -17)-c>11 or b in ((select min(13)+ -cast(avg(11) AS integer) from t1)+~t1.f, -c,(13)) and e in (select (d) from t1 union select f from t1) then t1.a when d= -t1.d then a- -17 else 17 end-t1.c>d then 11 when exists(select 1 from t1 where 17 not between t1.b and t1.b) then t1.b else (t1.a) end} +} {-19} +do_test randexpr-2.240 { + db eval {SELECT e-coalesce((select coalesce((select max(19) from t1 where (coalesce((select max(case 11 when b then 13 else t1.a end) from t1 where 19 not in (c,t1.f,b)),t1.a)+t1.d in (select 17 from t1 union select 19 from t1) and 11 in (13,t1.c,17) or 19 not in (t1.f,a,17) or t1.e not between 13 and f and a>=d)),(a)-17+t1.f)+t1.a+17 from t1 where not exists(select 1 from t1 where a>t1.f)),t1.b) FROM t1 WHERE t1.a-17 in (select (abs(count(*)))-case case case abs(abs(count(*))) when min(t1.f) then count(*) else cast(avg(+t1.a+(e) | 19*e+t1.f) AS integer) end when abs(+cast(avg(b) AS integer) | count(distinct (13))*+~ -(min(e)) | cast(avg(19) AS integer)) then cast(avg(19) AS integer) else count(distinct t1.c) end | min(t1.d) when count(*) then (count(distinct d)) else (min(11)) end from t1 union select (cast(avg(t1.a) AS integer)) from t1)} +} {} +do_test randexpr-2.241 { + db eval {SELECT e-coalesce((select coalesce((select max(19) from t1 where (coalesce((select max(case 11 when b then 13 else t1.a end) from t1 where 19 not in (c,t1.f,b)),t1.a)+t1.d in (select 17 from t1 union select 19 from t1) and 11 in (13,t1.c,17) or 19 not in (t1.f,a,17) or t1.e not between 13 and f and a>=d)),(a)-17+t1.f)+t1.a+17 from t1 where not exists(select 1 from t1 where a>t1.f)),t1.b) FROM t1 WHERE NOT (t1.a-17 in (select (abs(count(*)))-case case case abs(abs(count(*))) when min(t1.f) then count(*) else cast(avg(+t1.a+(e) | 19*e+t1.f) AS integer) end when abs(+cast(avg(b) AS integer) | count(distinct (13))*+~ -(min(e)) | cast(avg(19) AS integer)) then cast(avg(19) AS integer) else count(distinct t1.c) end | min(t1.d) when count(*) then (count(distinct d)) else (min(11)) end from t1 union select (cast(avg(t1.a) AS integer)) from t1))} +} {364} +do_test randexpr-2.242 { + db eval {SELECT -coalesce((select case when b>19 | f-(select abs(max(t1.b+case when e>17 then 11 else f end)+count(*)) from t1) and case when (19<11*t1.d*t1.b) then 13 else t1.e end not between c and b then d+17 else c end-f from t1 where t1.b>=f),t1.c)+17-a FROM t1 WHERE +c in (+~a+case when ((case t1.d when coalesce((select f from t1 where e in (select count(distinct t1.e) from t1 union select min((abs(t1.c)/abs(e | b)) | d* -t1.d) from t1)),t1.e)+a then c else b end in (select -cast(avg(t1.c) AS integer)-~cast(avg(11) AS integer) from t1 union select -count(distinct 19) from t1))) then -e*19 else 13 end,t1.a,d)} +} {} +do_test randexpr-2.243 { + db eval {SELECT -coalesce((select case when b>19 | f-(select abs(max(t1.b+case when e>17 then 11 else f end)+count(*)) from t1) and case when (19<11*t1.d*t1.b) then 13 else t1.e end not between c and b then d+17 else c end-f from t1 where t1.b>=f),t1.c)+17-a FROM t1 WHERE NOT (+c in (+~a+case when ((case t1.d when coalesce((select f from t1 where e in (select count(distinct t1.e) from t1 union select min((abs(t1.c)/abs(e | b)) | d* -t1.d) from t1)),t1.e)+a then c else b end in (select -cast(avg(t1.c) AS integer)-~cast(avg(11) AS integer) from t1 union select -count(distinct 19) from t1))) then -e*19 else 13 end,t1.a,d))} +} {-383} +do_test randexpr-2.244 { + db eval {SELECT -coalesce((select case when b>19 & f-(select abs(max(t1.b+case when e>17 then 11 else f end)+count(*)) from t1) and case when (19<11*t1.d*t1.b) then 13 else t1.e end not between c and b then d+17 else c end-f from t1 where t1.b>=f),t1.c)+17-a FROM t1 WHERE NOT (+c in (+~a+case when ((case t1.d when coalesce((select f from t1 where e in (select count(distinct t1.e) from t1 union select min((abs(t1.c)/abs(e | b)) | d* -t1.d) from t1)),t1.e)+a then c else b end in (select -cast(avg(t1.c) AS integer)-~cast(avg(11) AS integer) from t1 union select -count(distinct 19) from t1))) then -e*19 else 13 end,t1.a,d))} +} {-383} +do_test randexpr-2.245 { + db eval {SELECT case when t1.b>=+(abs(t1.e)/abs(coalesce((select c from t1 where 17<=+(e)),(e)-+~11*17+t1.d-e | +coalesce((select max(t1.d) from t1 where f>e),t1.a)*b | (e))))-t1.a and c>=t1.d or (17 not between e and b) then t1.e else d end FROM t1 WHERE exists(select 1 from t1 where (19-((17))+(b) in (select d*e*a from t1 union select ~19 from t1)) and case b-(abs(case a when coalesce((select max(19) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where b not between e and (t1.f) or a not in (t1.c,17,t1.d)))),case t1.b when -d then 13 else 13 end)*a+t1.c then e else a end)/abs(t1.d))*t1.e when -13 then t1.c else 19 end in (select +count(distinct e) from t1 union select count(*) from t1))} +} {} +do_test randexpr-2.246 { + db eval {SELECT case when t1.b>=+(abs(t1.e)/abs(coalesce((select c from t1 where 17<=+(e)),(e)-+~11*17+t1.d-e | +coalesce((select max(t1.d) from t1 where f>e),t1.a)*b | (e))))-t1.a and c>=t1.d or (17 not between e and b) then t1.e else d end FROM t1 WHERE NOT (exists(select 1 from t1 where (19-((17))+(b) in (select d*e*a from t1 union select ~19 from t1)) and case b-(abs(case a when coalesce((select max(19) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where b not between e and (t1.f) or a not in (t1.c,17,t1.d)))),case t1.b when -d then 13 else 13 end)*a+t1.c then e else a end)/abs(t1.d))*t1.e when -13 then t1.c else 19 end in (select +count(distinct e) from t1 union select count(*) from t1)))} +} {500} +do_test randexpr-2.247 { + db eval {SELECT case when t1.b>=+(abs(t1.e)/abs(coalesce((select c from t1 where 17<=+(e)),(e)-+~11*17+t1.d-e & +coalesce((select max(t1.d) from t1 where f>e),t1.a)*b & (e))))-t1.a and c>=t1.d or (17 not between e and b) then t1.e else d end FROM t1 WHERE NOT (exists(select 1 from t1 where (19-((17))+(b) in (select d*e*a from t1 union select ~19 from t1)) and case b-(abs(case a when coalesce((select max(19) from t1 where exists(select 1 from t1 where exists(select 1 from t1 where b not between e and (t1.f) or a not in (t1.c,17,t1.d)))),case t1.b when -d then 13 else 13 end)*a+t1.c then e else a end)/abs(t1.d))*t1.e when -13 then t1.c else 19 end in (select +count(distinct e) from t1 union select count(*) from t1)))} +} {500} +do_test randexpr-2.248 { + db eval {SELECT case when 11 in (select min(+t1.e)* -max(a)+count(distinct d*11*c)*(count(distinct (e))) | ( -min(t1.b)) | count(distinct 19) | (max(t1.f))-max(t1.b) from t1 union select (min(19)) from t1) then coalesce((select max(17) from t1 where -17=13 or c in (select t1.d from t1 union select d from t1)),b)*t1.b-a+19 when t1.d<(d) then (d) else 19 end FROM t1 WHERE case when case when t1.c not in (f,19,b) then t1.d-t1.c-case when 17<>t1.a then -(abs((select cast(avg(b) AS integer)- -max(t1.c) from t1))/abs(t1.c)) else t1.f end-17 | 19 when exists(select 1 from t1 where exists(select 1 from t1 where (e between 13 and (t1.a))) and e<=t1.c) then f else t1.a end>=11 then c when ft1.a then -(abs((select cast(avg(b) AS integer)- -max(t1.c) from t1))/abs(t1.c)) else t1.f end-17 | 19 when exists(select 1 from t1 where exists(select 1 from t1 where (e between 13 and (t1.a))) and e<=t1.c) then f else t1.a end>=11 then c when ft1.a then -(abs((select cast(avg(b) AS integer)- -max(t1.c) from t1))/abs(t1.c)) else t1.f end-17 | 19 when exists(select 1 from t1 where exists(select 1 from t1 where (e between 13 and (t1.a))) and e<=t1.c) then f else t1.a end>=11 then c when f=c then t1.b else t1.f end)) and 17<>f) then case when (e) not in (t1.a,t1.f,e) then case case t1.c when 13 then 19 else t1.d end-19 when t1.d then -t1.b else t1.f end when t1.d not in (17,t1.a,t1.d) or c in (d,(19),t1.b) then -t1.b else c end else 19 end+t1.b-f+t1.e FROM t1 WHERE t1.f-case when (coalesce((select 13*(abs(~11)/abs(t1.b)) from t1 where ~a between +f+~+17 and t1.e),e) not between coalesce((select 19-(abs(11)/abs(t1.b)) from t1 where f<>c),t1.b) and t1.f or t1.d between (select case min(a) when cast(avg(e) AS integer) then -((count(*))) else cast(avg(t1.a) AS integer) end | count(*) from t1) and (t1.a)) then 19 else f end=f} +} {} +do_test randexpr-2.252 { + db eval {SELECT c*~17+~case when (exists(select 1 from t1 where not exists(select 1 from t1 where (abs(e)/abs(c))=case when t1.c not in (a,13,11) and t1.b>=c then t1.b else t1.f end)) and 17<>f) then case when (e) not in (t1.a,t1.f,e) then case case t1.c when 13 then 19 else t1.d end-19 when t1.d then -t1.b else t1.f end when t1.d not in (17,t1.a,t1.d) or c in (d,(19),t1.b) then -t1.b else c end else 19 end+t1.b-f+t1.e FROM t1 WHERE NOT (t1.f-case when (coalesce((select 13*(abs(~11)/abs(t1.b)) from t1 where ~a between +f+~+17 and t1.e),e) not between coalesce((select 19-(abs(11)/abs(t1.b)) from t1 where f<>c),t1.b) and t1.f or t1.d between (select case min(a) when cast(avg(e) AS integer) then -((count(*))) else cast(avg(t1.a) AS integer) end | count(*) from t1) and (t1.a)) then 19 else f end=f)} +} {-5601} +do_test randexpr-2.253 { + db eval {SELECT coalesce((select c-t1.a | 17 | b-~t1.e-11 from t1 where not exists(select 1 from t1 where t1.c<=t1.b)),17) FROM t1 WHERE (((coalesce((select max((abs(b-t1.d-19*t1.a*e+~case when t1.e=t1.f-e then (select abs(abs(cast(avg((f)) AS integer))) from t1) else ((abs(t1.b)/abs(t1.a))) end-t1.b-t1.c)/abs(t1.b))) from t1 where not t1.c in (select count(distinct c) from t1 union select count(*) from t1) and 17< -t1.e),(c)) | -(c))<>e))} +} {763} +do_test randexpr-2.254 { + db eval {SELECT coalesce((select c-t1.a | 17 | b-~t1.e-11 from t1 where not exists(select 1 from t1 where t1.c<=t1.b)),17) FROM t1 WHERE NOT ((((coalesce((select max((abs(b-t1.d-19*t1.a*e+~case when t1.e=t1.f-e then (select abs(abs(cast(avg((f)) AS integer))) from t1) else ((abs(t1.b)/abs(t1.a))) end-t1.b-t1.c)/abs(t1.b))) from t1 where not t1.c in (select count(distinct c) from t1 union select count(*) from t1) and 17< -t1.e),(c)) | -(c))<>e)))} +} {} +do_test randexpr-2.255 { + db eval {SELECT coalesce((select c-t1.a & 17 & b-~t1.e-11 from t1 where not exists(select 1 from t1 where t1.c<=t1.b)),17) FROM t1 WHERE (((coalesce((select max((abs(b-t1.d-19*t1.a*e+~case when t1.e=t1.f-e then (select abs(abs(cast(avg((f)) AS integer))) from t1) else ((abs(t1.b)/abs(t1.a))) end-t1.b-t1.c)/abs(t1.b))) from t1 where not t1.c in (select count(distinct c) from t1 union select count(*) from t1) and 17< -t1.e),(c)) | -(c))<>e))} +} {0} +do_test randexpr-2.256 { + db eval {SELECT case case +(select case count(distinct case when case when (abs(f | t1.d)/abs(13))<= -c then t1.a else t1.b end+t1.b<=19 then c when 19=a then 17 else 11 end+13) when ~+case (cast(avg(t1.b) AS integer) | ((count(*)))-max(t1.d)) when count(distinct d) then count(*) else cast(avg(t1.c) AS integer) end then count(*) else count(*) end from t1) when c*t1.a then 13 else t1.a end when a then -f else t1.a end+d FROM t1 WHERE case when coalesce((select t1.c | c*t1.e-11 from t1 where exists(select 1 from t1 where t1.d-t1.d*d between - -t1.e and a)), -13)<=e and 11 not in (d,t1.a,f) and t1.a not between 11 and 19 or t1.e<>t1.e and t1.e=t1.a or -t1.e>=b then b when 17 not between t1.f and t1.b then a else b end in (select 11 from t1 union select f from t1)} +} {} +do_test randexpr-2.257 { + db eval {SELECT case case +(select case count(distinct case when case when (abs(f | t1.d)/abs(13))<= -c then t1.a else t1.b end+t1.b<=19 then c when 19=a then 17 else 11 end+13) when ~+case (cast(avg(t1.b) AS integer) | ((count(*)))-max(t1.d)) when count(distinct d) then count(*) else cast(avg(t1.c) AS integer) end then count(*) else count(*) end from t1) when c*t1.a then 13 else t1.a end when a then -f else t1.a end+d FROM t1 WHERE NOT (case when coalesce((select t1.c | c*t1.e-11 from t1 where exists(select 1 from t1 where t1.d-t1.d*d between - -t1.e and a)), -13)<=e and 11 not in (d,t1.a,f) and t1.a not between 11 and 19 or t1.e<>t1.e and t1.e=t1.a or -t1.e>=b then b when 17 not between t1.f and t1.b then a else b end in (select 11 from t1 union select f from t1))} +} {-200} +do_test randexpr-2.258 { + db eval {SELECT case case +(select case count(distinct case when case when (abs(f & t1.d)/abs(13))<= -c then t1.a else t1.b end+t1.b<=19 then c when 19=a then 17 else 11 end+13) when ~+case (cast(avg(t1.b) AS integer) & ((count(*)))-max(t1.d)) when count(distinct d) then count(*) else cast(avg(t1.c) AS integer) end then count(*) else count(*) end from t1) when c*t1.a then 13 else t1.a end when a then -f else t1.a end+d FROM t1 WHERE NOT (case when coalesce((select t1.c | c*t1.e-11 from t1 where exists(select 1 from t1 where t1.d-t1.d*d between - -t1.e and a)), -13)<=e and 11 not in (d,t1.a,f) and t1.a not between 11 and 19 or t1.e<>t1.e and t1.e=t1.a or -t1.e>=b then b when 17 not between t1.f and t1.b then a else b end in (select 11 from t1 union select f from t1))} +} {-200} +do_test randexpr-2.259 { + db eval {SELECT f*coalesce((select max((select count(*) from t1)) from t1 where ~case when t1.e<=case when 17>=+case when c*a in (c,t1.b,13) then t1.e when a>t1.b and a<=19 then c else t1.e end then t1.c else 17 end-f*17 or a between b and 19 then t1.c when -f<=11 then c else -11 end*t1.c+d>=e),17) FROM t1 WHERE (19 not between t1.b and ~e)} +} {10200} +do_test randexpr-2.260 { + db eval {SELECT f*coalesce((select max((select count(*) from t1)) from t1 where ~case when t1.e<=case when 17>=+case when c*a in (c,t1.b,13) then t1.e when a>t1.b and a<=19 then c else t1.e end then t1.c else 17 end-f*17 or a between b and 19 then t1.c when -f<=11 then c else -11 end*t1.c+d>=e),17) FROM t1 WHERE NOT ((19 not between t1.b and ~e))} +} {} +do_test randexpr-2.261 { + db eval {SELECT coalesce((select max(t1.c- -case when (~(select max(b*t1.a)*~((min(t1.e))-min((a))) | -cast(avg(t1.e+t1.a) AS integer) from t1))c or (t1.b)> -(11)),11)) from t1 where -11 in ((d),f,t1.d)),t1.a) FROM t1 WHERE c<>e+t1.c} +} {100} +do_test randexpr-2.262 { + db eval {SELECT coalesce((select max(t1.c- -case when (~(select max(b*t1.a)*~((min(t1.e))-min((a))) | -cast(avg(t1.e+t1.a) AS integer) from t1))c or (t1.b)> -(11)),11)) from t1 where -11 in ((d),f,t1.d)),t1.a) FROM t1 WHERE NOT (c<>e+t1.c)} +} {} +do_test randexpr-2.263 { + db eval {SELECT coalesce((select max(t1.c- -case when (~(select max(b*t1.a)*~((min(t1.e))-min((a))) & -cast(avg(t1.e+t1.a) AS integer) from t1))c or (t1.b)> -(11)),11)) from t1 where -11 in ((d),f,t1.d)),t1.a) FROM t1 WHERE c<>e+t1.c} +} {100} +do_test randexpr-2.264 { + db eval {SELECT -case when (not exists(select 1 from t1 where e*d*case 17 when case t1.c-t1.f when t1.a then t1.a else 11 end then t1.c else t1.b end-e+(t1.a)-e not in (e,e,t1.a) and (t1.f not between 19 and t1.e))) and t1.be),c) between t1.e and f then t1.e when -f> - -b then c else t1.c end) from t1 where not 11 in (select f from t1 union select a from t1)),t1.d)-t1.a-13 from t1 where t1.d between -t1.e and t1.a),f) FROM t1 WHERE exists(select 1 from t1 where not exists(select 1 from t1 where c in (select (abs(f)/abs(13)) from t1 union select t1.a+b from t1)))} +} {} +do_test randexpr-2.267 { + db eval {SELECT coalesce((select 17+coalesce((select max(case when -coalesce((select max((abs( -(abs(t1.a)/abs(t1.d))+11)/abs(e))-b) from t1 where t1.b not between f and t1.c and t1.f<>e),c) between t1.e and f then t1.e when -f> - -b then c else t1.c end) from t1 where not 11 in (select f from t1 union select a from t1)),t1.d)-t1.a-13 from t1 where t1.d between -t1.e and t1.a),f) FROM t1 WHERE NOT (exists(select 1 from t1 where not exists(select 1 from t1 where c in (select (abs(f)/abs(13)) from t1 union select t1.a+b from t1))))} +} {600} +do_test randexpr-2.268 { + db eval {SELECT coalesce((select max(t1.d) from t1 where exists(select 1 from t1 where t1.d in (select case (abs(((count(distinct t1.d-c))* -cast(avg(11) AS integer)+max(t1.c)))) when (cast(avg(t1.c+a) AS integer)*+min(a)*max(c)-max(t1.a)+cast(avg(f) AS integer))+cast(avg(t1.e) AS integer) then min(11) else (((min(11)))) end from t1 union select count(*) from t1))),coalesce((select max(b+13) from t1 where exists(select 1 from t1 where f in (select 11 from t1 union select t1.b from t1))),t1.e)) FROM t1 WHERE b>11} +} {500} +do_test randexpr-2.269 { + db eval {SELECT coalesce((select max(t1.d) from t1 where exists(select 1 from t1 where t1.d in (select case (abs(((count(distinct t1.d-c))* -cast(avg(11) AS integer)+max(t1.c)))) when (cast(avg(t1.c+a) AS integer)*+min(a)*max(c)-max(t1.a)+cast(avg(f) AS integer))+cast(avg(t1.e) AS integer) then min(11) else (((min(11)))) end from t1 union select count(*) from t1))),coalesce((select max(b+13) from t1 where exists(select 1 from t1 where f in (select 11 from t1 union select t1.b from t1))),t1.e)) FROM t1 WHERE NOT (b>11)} +} {} +do_test randexpr-2.270 { + db eval {SELECT coalesce((select max( -(select count(*)-case min(19)-abs(count(*))+(max( -b)-+count(distinct (t1.e))) when max(a) then (count(distinct 19)) else count(*) end from t1)) from t1 where exists(select 1 from t1 where a<(abs(case when a-t1.c+ -19+t1.d between 11 and f then t1.f else d end+t1.a+d)/abs(c))) or t1.c>t1.c),17)*f FROM t1 WHERE not f<>t1.b} +} {} +do_test randexpr-2.271 { + db eval {SELECT coalesce((select max( -(select count(*)-case min(19)-abs(count(*))+(max( -b)-+count(distinct (t1.e))) when max(a) then (count(distinct 19)) else count(*) end from t1)) from t1 where exists(select 1 from t1 where a<(abs(case when a-t1.c+ -19+t1.d between 11 and f then t1.f else d end+t1.a+d)/abs(c))) or t1.c>t1.c),17)*f FROM t1 WHERE NOT (not f<>t1.b)} +} {10200} +do_test randexpr-2.272 { + db eval {SELECT case when coalesce((select f-coalesce((select +coalesce((select t1.c from t1 where (t1.c=(abs(t1.e)/abs(+t1.c+f+(f))))),13+17)+t1.b | t1.c from t1 where case when t1.a not in (d,17,t1.c) then f else f end | -13 in (select cast(avg(t1.e) AS integer) from t1 union select min(t1.a) from t1)),c) from t1 where 17>c), -t1.b)>t1.c then 17 when t1.b<>11 then e else t1.f end FROM t1 WHERE case when t1.f in (t1.a+(abs(17)/abs((t1.d))),coalesce((select max(c) from t1 where (not (d- -case f when 17 then d else (17) end+e*t1.d) not in ((t1.d),t1.f,f))),coalesce((select c from t1 where 13 not in (19,d,b)),f)),17) then t1.b when e<=f then 17 else c end -19} +} {500} +do_test randexpr-2.273 { + db eval {SELECT case when coalesce((select f-coalesce((select +coalesce((select t1.c from t1 where (t1.c=(abs(t1.e)/abs(+t1.c+f+(f))))),13+17)+t1.b | t1.c from t1 where case when t1.a not in (d,17,t1.c) then f else f end | -13 in (select cast(avg(t1.e) AS integer) from t1 union select min(t1.a) from t1)),c) from t1 where 17>c), -t1.b)>t1.c then 17 when t1.b<>11 then e else t1.f end FROM t1 WHERE NOT (case when t1.f in (t1.a+(abs(17)/abs((t1.d))),coalesce((select max(c) from t1 where (not (d- -case f when 17 then d else (17) end+e*t1.d) not in ((t1.d),t1.f,f))),coalesce((select c from t1 where 13 not in (19,d,b)),f)),17) then t1.b when e<=f then 17 else c end -19)} +} {} +do_test randexpr-2.274 { + db eval {SELECT case when coalesce((select f-coalesce((select +coalesce((select t1.c from t1 where (t1.c=(abs(t1.e)/abs(+t1.c+f+(f))))),13+17)+t1.b & t1.c from t1 where case when t1.a not in (d,17,t1.c) then f else f end & -13 in (select cast(avg(t1.e) AS integer) from t1 union select min(t1.a) from t1)),c) from t1 where 17>c), -t1.b)>t1.c then 17 when t1.b<>11 then e else t1.f end FROM t1 WHERE case when t1.f in (t1.a+(abs(17)/abs((t1.d))),coalesce((select max(c) from t1 where (not (d- -case f when 17 then d else (17) end+e*t1.d) not in ((t1.d),t1.f,f))),coalesce((select c from t1 where 13 not in (19,d,b)),f)),17) then t1.b when e<=f then 17 else c end -19} +} {500} +do_test randexpr-2.275 { + db eval {SELECT case when d*t1.c*a<>t1.b*~t1.b*t1.f-b then e else 17 end*t1.e+case when a not between t1.a and t1.d or (select - -cast(avg(c) AS integer) from t1)<>17+c then case 11 when t1.f then -t1.b else t1.b end when 13 not in (c,t1.f,19) or not exists(select 1 from t1 where 13=t1.d or t1.e<>a) then (17) else -t1.f end FROM t1 WHERE (not (case e when e then +case when e>=a and 13<>e*t1.e or t1.d<>11 then t1.c else t1.b end else 17 end) in (select cast(avg( -t1.c) AS integer) from t1 union select case min(13) when -+count(distinct 13) then case (+(cast(avg(e) AS integer))+((cast(avg(13) AS integer)))* - -count(distinct c)*count(*)) | count(*) when min(t1.c) then (min(11)) else max(f) end else cast(avg(17) AS integer) end from t1))} +} {250200} +do_test randexpr-2.276 { + db eval {SELECT case when d*t1.c*a<>t1.b*~t1.b*t1.f-b then e else 17 end*t1.e+case when a not between t1.a and t1.d or (select - -cast(avg(c) AS integer) from t1)<>17+c then case 11 when t1.f then -t1.b else t1.b end when 13 not in (c,t1.f,19) or not exists(select 1 from t1 where 13=t1.d or t1.e<>a) then (17) else -t1.f end FROM t1 WHERE NOT ((not (case e when e then +case when e>=a and 13<>e*t1.e or t1.d<>11 then t1.c else t1.b end else 17 end) in (select cast(avg( -t1.c) AS integer) from t1 union select case min(13) when -+count(distinct 13) then case (+(cast(avg(e) AS integer))+((cast(avg(13) AS integer)))* - -count(distinct c)*count(*)) | count(*) when min(t1.c) then (min(11)) else max(f) end else cast(avg(17) AS integer) end from t1)))} +} {} +do_test randexpr-2.277 { + db eval {SELECT (select max(t1.a)*abs(count(distinct f-t1.a-(abs(t1.a)/abs(case e when case t1.f when e*(b)-19 then 11 else t1.d end then t1.f else 11 end)))) | case +~cast(avg(t1.a) AS integer)-+abs(count(distinct 17)) when case count(distinct t1.b)*cast(avg(t1.a) AS integer)+count(distinct t1.c) when count(*) then min(t1.f) else count(distinct e) end then max((t1.e)) else max(d) end-count(*) from t1) FROM t1 WHERE e not between (select min(t1.a) from t1) and d+t1.d} +} {} +do_test randexpr-2.278 { + db eval {SELECT (select max(t1.a)*abs(count(distinct f-t1.a-(abs(t1.a)/abs(case e when case t1.f when e*(b)-19 then 11 else t1.d end then t1.f else 11 end)))) | case +~cast(avg(t1.a) AS integer)-+abs(count(distinct 17)) when case count(distinct t1.b)*cast(avg(t1.a) AS integer)+count(distinct t1.c) when count(*) then min(t1.f) else count(distinct e) end then max((t1.e)) else max(d) end-count(*) from t1) FROM t1 WHERE NOT (e not between (select min(t1.a) from t1) and d+t1.d)} +} {495} +do_test randexpr-2.279 { + db eval {SELECT (select max(t1.a)*abs(count(distinct f-t1.a-(abs(t1.a)/abs(case e when case t1.f when e*(b)-19 then 11 else t1.d end then t1.f else 11 end)))) & case +~cast(avg(t1.a) AS integer)-+abs(count(distinct 17)) when case count(distinct t1.b)*cast(avg(t1.a) AS integer)+count(distinct t1.c) when count(*) then min(t1.f) else count(distinct e) end then max((t1.e)) else max(d) end-count(*) from t1) FROM t1 WHERE NOT (e not between (select min(t1.a) from t1) and d+t1.d)} +} {4} +do_test randexpr-2.280 { + db eval {SELECT e*c-t1.e*t1.c++(select abs(case abs(max(coalesce((select f from t1 where (b*coalesce((select max(b) from t1 where 17>f),t1.e) not between c and f or t1.a>d and t1.a<11 or a<> -f)),(t1.a)))) when -+count(distinct e) then -+(count(distinct 19))+min(t1.c) else -min(t1.f) end) from t1)+t1.b-19 FROM t1 WHERE d-coalesce((select max(t1.d*c+c) from t1 where t1.f between (select count(distinct t1.b-e)+abs(~case case ~cast(avg(t1.f) AS integer)-count(distinct t1.a) when count(*) then count(distinct e) else count(*) end when min(e) then min( -t1.c) else max(a) end) from t1)+17 and -case when d>=d-11 then t1.c else t1.a end),t1.a)*t1.a not in (t1.f,t1.f,d)} +} {781} +do_test randexpr-2.281 { + db eval {SELECT e*c-t1.e*t1.c++(select abs(case abs(max(coalesce((select f from t1 where (b*coalesce((select max(b) from t1 where 17>f),t1.e) not between c and f or t1.a>d and t1.a<11 or a<> -f)),(t1.a)))) when -+count(distinct e) then -+(count(distinct 19))+min(t1.c) else -min(t1.f) end) from t1)+t1.b-19 FROM t1 WHERE NOT (d-coalesce((select max(t1.d*c+c) from t1 where t1.f between (select count(distinct t1.b-e)+abs(~case case ~cast(avg(t1.f) AS integer)-count(distinct t1.a) when count(*) then count(distinct e) else count(*) end when min(e) then min( -t1.c) else max(a) end) from t1)+17 and -case when d>=d-11 then t1.c else t1.a end),t1.a)*t1.a not in (t1.f,t1.f,d))} +} {} +do_test randexpr-2.282 { + db eval {SELECT coalesce((select a from t1 where (exists(select 1 from t1 where 11-case when ((select count(distinct t1.e+d) from t1))<>t1.d*f then -13 when t1.b in (select abs(cast(avg(c) AS integer)) from t1 union select count(distinct e)*count(distinct c)-cast(avg(e) AS integer) from t1) or c in (select t1.d from t1 union select 17 from t1) then a else t1.b end>=t1.c) and f>=d and d>=t1.f)),case when 11=19 and t1.a between t1.d and e then t1.d-b else t1.b end) FROM t1 WHERE 19 not in ( -t1.f,+19,t1.b)} +} {} +do_test randexpr-2.283 { + db eval {SELECT coalesce((select a from t1 where (exists(select 1 from t1 where 11-case when ((select count(distinct t1.e+d) from t1))<>t1.d*f then -13 when t1.b in (select abs(cast(avg(c) AS integer)) from t1 union select count(distinct e)*count(distinct c)-cast(avg(e) AS integer) from t1) or c in (select t1.d from t1 union select 17 from t1) then a else t1.b end>=t1.c) and f>=d and d>=t1.f)),case when 11=19 and t1.a between t1.d and e then t1.d-b else t1.b end) FROM t1 WHERE NOT (19 not in ( -t1.f,+19,t1.b))} +} {200} +do_test randexpr-2.284 { + db eval {SELECT (~case when t1.f>(abs(t1.c)/abs(13-coalesce((select max(t1.a) from t1 where case when e not in (c,t1.a,~11) then 19+( -c) else a end in (select (19) from t1 union select t1.c from t1)),t1.b)+a)) and e>d and (t1.a)=e then t1.e when (not exists(select 1 from t1 where -t1.d=e) and -d in (select d from t1 union select e from t1)) then t1.b else a end) FROM t1 WHERE (t1.f in (select coalesce((select max(coalesce((select c from t1 where coalesce((select max(coalesce((select max(++13*t1.d) from t1 where case when case when e<>13 or b<= -t1.a then t1.c when t1.a not in ( - -13,t1.a,13) then a else c end not in ( -e,d,(11)) or t1.d -b then t1.e else t1.d endt1.b),c) not in (t1.e,c,b)),13)) from t1 where ((t1.f) in (select max(a) from t1 union select abs(abs(min(13))) from t1))),11) from t1 union select b from t1))} +} {} +do_test randexpr-2.285 { + db eval {SELECT (~case when t1.f>(abs(t1.c)/abs(13-coalesce((select max(t1.a) from t1 where case when e not in (c,t1.a,~11) then 19+( -c) else a end in (select (19) from t1 union select t1.c from t1)),t1.b)+a)) and e>d and (t1.a)=e then t1.e when (not exists(select 1 from t1 where -t1.d=e) and -d in (select d from t1 union select e from t1)) then t1.b else a end) FROM t1 WHERE NOT ((t1.f in (select coalesce((select max(coalesce((select c from t1 where coalesce((select max(coalesce((select max(++13*t1.d) from t1 where case when case when e<>13 or b<= -t1.a then t1.c when t1.a not in ( - -13,t1.a,13) then a else c end not in ( -e,d,(11)) or t1.d -b then t1.e else t1.d endt1.b),c) not in (t1.e,c,b)),13)) from t1 where ((t1.f) in (select max(a) from t1 union select abs(abs(min(13))) from t1))),11) from t1 union select b from t1)))} +} {-101} +do_test randexpr-2.286 { + db eval {SELECT (select count(distinct case b when 13 then (select (+(cast(avg(+(d)*case t1.f-c*(abs(case when case when not exists(select 1 from t1 where not exists(select 1 from t1 where (d<=t1.e) or d between c and t1.b)) then 11 else 19 end in ((t1.d),t1.a,t1.f) then t1.f when c<=t1.e then t1.c else b end | a)/abs(c))+t1.b when t1.c then a else d end) AS integer))) from t1) else t1.b*(e) end) from t1) FROM t1 WHERE not t1.a+t1.b | t1.f-11*(select max(+case when case ~e-(abs(d*t1.d+t1.d)/abs( -f)) when t1.a then t1.e else t1.c end in (select cast(avg((f)) AS integer) from t1 union select +~(min(t1.d)) from t1) then (t1.d) when not exists(select 1 from t1 where a not in (17,a,d)) then 19 else -t1.a end* -t1.d) from t1)* -17>13} +} {1} +do_test randexpr-2.287 { + db eval {SELECT (select count(distinct case b when 13 then (select (+(cast(avg(+(d)*case t1.f-c*(abs(case when case when not exists(select 1 from t1 where not exists(select 1 from t1 where (d<=t1.e) or d between c and t1.b)) then 11 else 19 end in ((t1.d),t1.a,t1.f) then t1.f when c<=t1.e then t1.c else b end | a)/abs(c))+t1.b when t1.c then a else d end) AS integer))) from t1) else t1.b*(e) end) from t1) FROM t1 WHERE NOT (not t1.a+t1.b | t1.f-11*(select max(+case when case ~e-(abs(d*t1.d+t1.d)/abs( -f)) when t1.a then t1.e else t1.c end in (select cast(avg((f)) AS integer) from t1 union select +~(min(t1.d)) from t1) then (t1.d) when not exists(select 1 from t1 where a not in (17,a,d)) then 19 else -t1.a end* -t1.d) from t1)* -17>13)} +} {} +do_test randexpr-2.288 { + db eval {SELECT (select count(distinct case b when 13 then (select (+(cast(avg(+(d)*case t1.f-c*(abs(case when case when not exists(select 1 from t1 where not exists(select 1 from t1 where (d<=t1.e) or d between c and t1.b)) then 11 else 19 end in ((t1.d),t1.a,t1.f) then t1.f when c<=t1.e then t1.c else b end & a)/abs(c))+t1.b when t1.c then a else d end) AS integer))) from t1) else t1.b*(e) end) from t1) FROM t1 WHERE not t1.a+t1.b | t1.f-11*(select max(+case when case ~e-(abs(d*t1.d+t1.d)/abs( -f)) when t1.a then t1.e else t1.c end in (select cast(avg((f)) AS integer) from t1 union select +~(min(t1.d)) from t1) then (t1.d) when not exists(select 1 from t1 where a not in (17,a,d)) then 19 else -t1.a end* -t1.d) from t1)* -17>13} +} {1} +do_test randexpr-2.289 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where ((~t1.f) between b and (abs(e)/abs(f)))),13+(select case abs((+count(*)*max(19))) when (~min(b)) then -min(a) | min( -a) else (max(t1.c)) end from t1)+17*f+11)) from t1 where not 19<=coalesce((select e from t1 where t1.d-d not between 19 and 19),11)),13) FROM t1 WHERE t1.f+t1.d=t1.f+17-case when (abs(t1.a*(case when not exists(select 1 from t1 where not exists(select 1 from t1 where (b) between d and t1.f)) then (t1.b | -(t1.c)) | 19 when d=t1.a then 13 else d end)+ -t1.d)/abs(d)) not in (d,e,t1.d) then t1.f when 19 in (select t1.f from t1 union select t1.e from t1) then e else -t1.a end- - -t1.e*17-t1.d-t1.d} +} {} +do_test randexpr-2.290 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where ((~t1.f) between b and (abs(e)/abs(f)))),13+(select case abs((+count(*)*max(19))) when (~min(b)) then -min(a) | min( -a) else (max(t1.c)) end from t1)+17*f+11)) from t1 where not 19<=coalesce((select e from t1 where t1.d-d not between 19 and 19),11)),13) FROM t1 WHERE NOT (t1.f+t1.d=t1.f+17-case when (abs(t1.a*(case when not exists(select 1 from t1 where not exists(select 1 from t1 where (b) between d and t1.f)) then (t1.b | -(t1.c)) | 19 when d=t1.a then 13 else d end)+ -t1.d)/abs(d)) not in (d,e,t1.d) then t1.f when 19 in (select t1.f from t1 union select t1.e from t1) then e else -t1.a end- - -t1.e*17-t1.d-t1.d)} +} {13} +do_test randexpr-2.291 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where ((~t1.f) between b and (abs(e)/abs(f)))),13+(select case abs((+count(*)*max(19))) when (~min(b)) then -min(a) & min( -a) else (max(t1.c)) end from t1)+17*f+11)) from t1 where not 19<=coalesce((select e from t1 where t1.d-d not between 19 and 19),11)),13) FROM t1 WHERE NOT (t1.f+t1.d=t1.f+17-case when (abs(t1.a*(case when not exists(select 1 from t1 where not exists(select 1 from t1 where (b) between d and t1.f)) then (t1.b | -(t1.c)) | 19 when d=t1.a then 13 else d end)+ -t1.d)/abs(d)) not in (d,e,t1.d) then t1.f when 19 in (select t1.f from t1 union select t1.e from t1) then e else -t1.a end- - -t1.e*17-t1.d-t1.d)} +} {13} +do_test randexpr-2.292 { + db eval {SELECT case when -t1.b=c and ((t1.c>t1.b)) and not exists(select 1 from t1 where (abs((select max(t1.d) from t1))/abs((abs(c+t1.b)/abs(t1.d))-t1.e))+c in (select cast(avg((t1.a)) AS integer) | abs(count(distinct t1.d))+count(*) | count(*)+count(*) from t1 union select (min(t1.d)) from t1)) then (abs(t1.b)/abs(t1.f)) when t1.b not in (a,t1.f,(a)) then t1.b else t1.d end-11 FROM t1 WHERE case when ~e in (coalesce((select 13 from t1 where 17 between t1.a and b or not exists(select 1 from t1 where (c in (11,e,a)))),t1.e),c,e) then e else t1.f end in (select ++abs(case case count(*) when (max(t1.d)) then cast(avg(t1.c) AS integer) else count(distinct b) end | count(*) when max(e) then min(c) else max(13) end | count(*)*count(*)) | max(t1.a) from t1 union select count(distinct t1.c) from t1) or b in (select cast(avg(t1.b) AS integer) from t1 union select cast(avg( -t1.a) AS integer) from t1)} +} {189} +do_test randexpr-2.293 { + db eval {SELECT case when -t1.b=c and ((t1.c>t1.b)) and not exists(select 1 from t1 where (abs((select max(t1.d) from t1))/abs((abs(c+t1.b)/abs(t1.d))-t1.e))+c in (select cast(avg((t1.a)) AS integer) | abs(count(distinct t1.d))+count(*) | count(*)+count(*) from t1 union select (min(t1.d)) from t1)) then (abs(t1.b)/abs(t1.f)) when t1.b not in (a,t1.f,(a)) then t1.b else t1.d end-11 FROM t1 WHERE NOT (case when ~e in (coalesce((select 13 from t1 where 17 between t1.a and b or not exists(select 1 from t1 where (c in (11,e,a)))),t1.e),c,e) then e else t1.f end in (select ++abs(case case count(*) when (max(t1.d)) then cast(avg(t1.c) AS integer) else count(distinct b) end | count(*) when max(e) then min(c) else max(13) end | count(*)*count(*)) | max(t1.a) from t1 union select count(distinct t1.c) from t1) or b in (select cast(avg(t1.b) AS integer) from t1 union select cast(avg( -t1.a) AS integer) from t1))} +} {} +do_test randexpr-2.294 { + db eval {SELECT case when -t1.b=c and ((t1.c>t1.b)) and not exists(select 1 from t1 where (abs((select max(t1.d) from t1))/abs((abs(c+t1.b)/abs(t1.d))-t1.e))+c in (select cast(avg((t1.a)) AS integer) & abs(count(distinct t1.d))+count(*) & count(*)+count(*) from t1 union select (min(t1.d)) from t1)) then (abs(t1.b)/abs(t1.f)) when t1.b not in (a,t1.f,(a)) then t1.b else t1.d end-11 FROM t1 WHERE case when ~e in (coalesce((select 13 from t1 where 17 between t1.a and b or not exists(select 1 from t1 where (c in (11,e,a)))),t1.e),c,e) then e else t1.f end in (select ++abs(case case count(*) when (max(t1.d)) then cast(avg(t1.c) AS integer) else count(distinct b) end | count(*) when max(e) then min(c) else max(13) end | count(*)*count(*)) | max(t1.a) from t1 union select count(distinct t1.c) from t1) or b in (select cast(avg(t1.b) AS integer) from t1 union select cast(avg( -t1.a) AS integer) from t1)} +} {189} +do_test randexpr-2.295 { + db eval {SELECT case when (((abs(11 | e)/abs(17-d))+t1.b | a<>11)) and t1.d in (13,t1.c,11) or 17 between t1.f and b or not f in (select 19 from t1 union select t1.e from t1) and b<>19 or e between 19 and t1.d or b<=d or d<17 or t1.a<>t1.f then 17 when t1.e=t1.b then 11 else coalesce((select t1.e from t1 where (b) between - -d and e),t1.b) end FROM t1 WHERE t1.a between d and +17-17*coalesce((select ~ -19-13-19 from t1 where 19>(select abs(count(distinct t1.d-17))+count(distinct case when 19>17 then t1.f else t1.e end+b) | max(t1.b)+count(distinct 13) from t1)-coalesce((select 13 from t1 where t1.e not in (19,17,e) and 11<=t1.d),c)),t1.f)} +} {} +do_test randexpr-2.296 { + db eval {SELECT case when (((abs(11 | e)/abs(17-d))+t1.b | a<>11)) and t1.d in (13,t1.c,11) or 17 between t1.f and b or not f in (select 19 from t1 union select t1.e from t1) and b<>19 or e between 19 and t1.d or b<=d or d<17 or t1.a<>t1.f then 17 when t1.e=t1.b then 11 else coalesce((select t1.e from t1 where (b) between - -d and e),t1.b) end FROM t1 WHERE NOT (t1.a between d and +17-17*coalesce((select ~ -19-13-19 from t1 where 19>(select abs(count(distinct t1.d-17))+count(distinct case when 19>17 then t1.f else t1.e end+b) | max(t1.b)+count(distinct 13) from t1)-coalesce((select 13 from t1 where t1.e not in (19,17,e) and 11<=t1.d),c)),t1.f))} +} {17} +do_test randexpr-2.297 { + db eval {SELECT case when (((abs(11 & e)/abs(17-d))+t1.b & a<>11)) and t1.d in (13,t1.c,11) or 17 between t1.f and b or not f in (select 19 from t1 union select t1.e from t1) and b<>19 or e between 19 and t1.d or b<=d or d<17 or t1.a<>t1.f then 17 when t1.e=t1.b then 11 else coalesce((select t1.e from t1 where (b) between - -d and e),t1.b) end FROM t1 WHERE NOT (t1.a between d and +17-17*coalesce((select ~ -19-13-19 from t1 where 19>(select abs(count(distinct t1.d-17))+count(distinct case when 19>17 then t1.f else t1.e end+b) | max(t1.b)+count(distinct 13) from t1)-coalesce((select 13 from t1 where t1.e not in (19,17,e) and 11<=t1.d),c)),t1.f))} +} {17} +do_test randexpr-2.298 { + db eval {SELECT case when 19=13 or 13+a | 19*11+b<>b and ((19<=b+(select count(*) from t1))) then t1.d when t1.e<= -19 then d-t1.e+(select ~max(19)*+(count(distinct b)) from t1)+t1.d*t1.d+f else t1.c end FROM t1 WHERE (13*case when 19*case when 19 not between 17 and t1.e then t1.e when ((case when case case when e<>11 or t1.b not in (t1.a, -c,17) then b else a end when -e then b else a end*a in (select 19 from t1 union select 13 from t1) then e else t1.c end in (select -abs(+count(*)) from t1 union select count(distinct f) from t1))) then 19 else d end>=f then t1.a when t1.e>=b then a else c end>=t1.d)} +} {400} +do_test randexpr-2.299 { + db eval {SELECT case when 19=13 or 13+a | 19*11+b<>b and ((19<=b+(select count(*) from t1))) then t1.d when t1.e<= -19 then d-t1.e+(select ~max(19)*+(count(distinct b)) from t1)+t1.d*t1.d+f else t1.c end FROM t1 WHERE NOT ((13*case when 19*case when 19 not between 17 and t1.e then t1.e when ((case when case case when e<>11 or t1.b not in (t1.a, -c,17) then b else a end when -e then b else a end*a in (select 19 from t1 union select 13 from t1) then e else t1.c end in (select -abs(+count(*)) from t1 union select count(distinct f) from t1))) then 19 else d end>=f then t1.a when t1.e>=b then a else c end>=t1.d))} +} {} +do_test randexpr-2.300 { + db eval {SELECT case when 19=13 or 13+a & 19*11+b<>b and ((19<=b+(select count(*) from t1))) then t1.d when t1.e<= -19 then d-t1.e+(select ~max(19)*+(count(distinct b)) from t1)+t1.d*t1.d+f else t1.c end FROM t1 WHERE (13*case when 19*case when 19 not between 17 and t1.e then t1.e when ((case when case case when e<>11 or t1.b not in (t1.a, -c,17) then b else a end when -e then b else a end*a in (select 19 from t1 union select 13 from t1) then e else t1.c end in (select -abs(+count(*)) from t1 union select count(distinct f) from t1))) then 19 else d end>=f then t1.a when t1.e>=b then a else c end>=t1.d)} +} {400} +do_test randexpr-2.301 { + db eval {SELECT +coalesce((select t1.d from t1 where coalesce((select t1.c+t1.b-case (19) when t1.c then 13+13+17 else t1.e end from t1 where (not exists(select 1 from t1 where t1.f<13)) and d not in (t1.e,11,19) and not 19>=t1.f or e in (select min(t1.c) from t1 union select cast(avg(t1.c) AS integer) from t1)), -a)-b*(13)(+b))} +} {500} +do_test randexpr-2.302 { + db eval {SELECT +coalesce((select t1.d from t1 where coalesce((select t1.c+t1.b-case (19) when t1.c then 13+13+17 else t1.e end from t1 where (not exists(select 1 from t1 where t1.f<13)) and d not in (t1.e,11,19) and not 19>=t1.f or e in (select min(t1.c) from t1 union select cast(avg(t1.c) AS integer) from t1)), -a)-b*(13)(+b)))} +} {} +do_test randexpr-2.303 { + db eval {SELECT +coalesce((select t1.d from t1 where coalesce((select t1.c+t1.b-case (19) when t1.c then 13+13+17 else t1.e end from t1 where (not exists(select 1 from t1 where t1.f<13)) and d not in (t1.e,11,19) and not 19>=t1.f or e in (select min(t1.c) from t1 union select cast(avg(t1.c) AS integer) from t1)), -a)-b*(13)(+b))} +} {400} +do_test randexpr-2.304 { + db eval {SELECT case when (((t1.c)<=a-11*t1.e)) then t1.b else a end | ((abs(b*(abs(t1.e | +c)/abs(11)))/abs(t1.a))) FROM t1 WHERE case when t1.e in (select t1.a-e from t1 union select t1.e from t1) then 19*coalesce((select a*t1.c-19+f+t1.d-case case when 11>=d then f when not f<>17 then t1.d else t1.f end+t1.e-t1.c when d then b else t1.f end from t1 where t1.f>a), -t1.a)- -(t1.c)-t1.e+b else 11 end=d then f when not f<>17 then t1.d else t1.f end+t1.e-t1.c when d then b else t1.f end from t1 where t1.f>a), -t1.a)- -(t1.c)-t1.e+b else 11 end=d then f when not f<>17 then t1.d else t1.f end+t1.e-t1.c when d then b else t1.f end from t1 where t1.f>a), -t1.a)- -(t1.c)-t1.e+b else 11 enda),13)*11)-t1.c from t1 where 11 between t1.a and -13+t1.f*~coalesce((select max(b-f) from t1 where t1.a<=13 or a not in (13,f,17)),b) or (t1.d<19 or b> -t1.c)),b) FROM t1 WHERE 11 not between d and coalesce((select -(select (cast(avg(d) AS integer)) from t1) from t1 where case when 1713 then e when 19 between t1.a and t1.b then b else b end end*13<=c),t1.c) end in (select t1.e from t1 union select d from t1)),t1.b)} +} {300} +do_test randexpr-2.311 { + db eval {SELECT t1.f+coalesce((select t1.b-13-(coalesce((select 17 from t1 where b>a),13)*11)-t1.c from t1 where 11 between t1.a and -13+t1.f*~coalesce((select max(b-f) from t1 where t1.a<=13 or a not in (13,f,17)),b) or (t1.d<19 or b> -t1.c)),b) FROM t1 WHERE NOT (11 not between d and coalesce((select -(select (cast(avg(d) AS integer)) from t1) from t1 where case when 1713 then e when 19 between t1.a and t1.b then b else b end end*13<=c),t1.c) end in (select t1.e from t1 union select d from t1)),t1.b))} +} {} +do_test randexpr-2.312 { + db eval {SELECT (abs(13)/abs((select min(case when case when exists(select 1 from t1 where not exists(select 1 from t1 where (select count(distinct (t1.e)) from t1)+t1.f>t1.e) and t1.a>=13 or f in (select t1.e from t1 union select t1.e from t1)) then b when f not in (13,13,13) then coalesce((select max(d*f) from t1 where t1.e in (t1.d,t1.e,(e))),t1.e) else a end>=t1.f then b when t1.c in (e,f,t1.e) then b else t1.c end)*abs(count(*))-abs(~count(*)-min(e))-cast(avg(17) AS integer) from t1))) FROM t1 WHERE t1.b not in (a*17-17-case when -17-t1.f in (select coalesce((select t1.a from t1 where (d)=t1.a),d) from t1 union select 19 from t1) or 13 not between 17 and 13 and (19 between (t1.e) and t1.f) or ( -t1.e)>=17 then b when t1.d not in (19,f,t1.f) then a else case when t1.a=t1.d then e when 17 in (t1.e,t1.b,t1.d) then t1.a else a end end,e,11)} +} {0} +do_test randexpr-2.313 { + db eval {SELECT (abs(13)/abs((select min(case when case when exists(select 1 from t1 where not exists(select 1 from t1 where (select count(distinct (t1.e)) from t1)+t1.f>t1.e) and t1.a>=13 or f in (select t1.e from t1 union select t1.e from t1)) then b when f not in (13,13,13) then coalesce((select max(d*f) from t1 where t1.e in (t1.d,t1.e,(e))),t1.e) else a end>=t1.f then b when t1.c in (e,f,t1.e) then b else t1.c end)*abs(count(*))-abs(~count(*)-min(e))-cast(avg(17) AS integer) from t1))) FROM t1 WHERE NOT (t1.b not in (a*17-17-case when -17-t1.f in (select coalesce((select t1.a from t1 where (d)=t1.a),d) from t1 union select 19 from t1) or 13 not between 17 and 13 and (19 between (t1.e) and t1.f) or ( -t1.e)>=17 then b when t1.d not in (19,f,t1.f) then a else case when t1.a=t1.d then e when 17 in (t1.e,t1.b,t1.d) then t1.a else a end end,e,11))} +} {} +do_test randexpr-2.314 { + db eval {SELECT (abs(case coalesce((select t1.d* -case when d in (select cast(avg(13) AS integer)*abs(((cast(avg(d) AS integer)))+ -count(distinct c))-max(t1.c) from t1 union select min(e) from t1) then (abs(19)/abs(case when b not in (19,d,13) or t1.b not in (b,17,17) then e else b end)) else 19 end from t1 where a>=t1.c and 17 not in (t1.c,17,t1.f)),13)*t1.f when e then (11) else 17 end+19 | b)/abs(f))-t1.e FROM t1 WHERE not exists(select 1 from t1 where case a when coalesce((select max(case 19 when 17 then b-d else t1.e end) from t1 where ( -coalesce((select max(t1.a) from t1 where e between t1.b and (abs(e)/abs(17))),(abs(case when case when 13 between e and (t1.b) and 11 not in (t1.f,t1.e,c) then (t1.d) else -c end< -e then 11 when 13>t1.b then 11 else t1.a end*13)/abs((f)))-t1.a) not in (e,(b),t1.c))),19) then t1.b else 13 end not between t1.b and 11)} +} {} +do_test randexpr-2.315 { + db eval {SELECT (abs(case coalesce((select t1.d* -case when d in (select cast(avg(13) AS integer)*abs(((cast(avg(d) AS integer)))+ -count(distinct c))-max(t1.c) from t1 union select min(e) from t1) then (abs(19)/abs(case when b not in (19,d,13) or t1.b not in (b,17,17) then e else b end)) else 19 end from t1 where a>=t1.c and 17 not in (t1.c,17,t1.f)),13)*t1.f when e then (11) else 17 end+19 | b)/abs(f))-t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where case a when coalesce((select max(case 19 when 17 then b-d else t1.e end) from t1 where ( -coalesce((select max(t1.a) from t1 where e between t1.b and (abs(e)/abs(17))),(abs(case when case when 13 between e and (t1.b) and 11 not in (t1.f,t1.e,c) then (t1.d) else -c end< -e then 11 when 13>t1.b then 11 else t1.a end*13)/abs((f)))-t1.a) not in (e,(b),t1.c))),19) then t1.b else 13 end not between t1.b and 11))} +} {-500} +do_test randexpr-2.316 { + db eval {SELECT (abs(case coalesce((select t1.d* -case when d in (select cast(avg(13) AS integer)*abs(((cast(avg(d) AS integer)))+ -count(distinct c))-max(t1.c) from t1 union select min(e) from t1) then (abs(19)/abs(case when b not in (19,d,13) or t1.b not in (b,17,17) then e else b end)) else 19 end from t1 where a>=t1.c and 17 not in (t1.c,17,t1.f)),13)*t1.f when e then (11) else 17 end+19 & b)/abs(f))-t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where case a when coalesce((select max(case 19 when 17 then b-d else t1.e end) from t1 where ( -coalesce((select max(t1.a) from t1 where e between t1.b and (abs(e)/abs(17))),(abs(case when case when 13 between e and (t1.b) and 11 not in (t1.f,t1.e,c) then (t1.d) else -c end< -e then 11 when 13>t1.b then 11 else t1.a end*13)/abs((f)))-t1.a) not in (e,(b),t1.c))),19) then t1.b else 13 end not between t1.b and 11))} +} {-500} +do_test randexpr-2.317 { + db eval {SELECT -(+case when ~t1.d-f-case coalesce((select max((select (min((abs(t1.f)/abs(t1.b))-17)) from t1)) from t1 where e<=17),f) when t1.f then (select case -( -count(distinct b)) when cast(avg( -13) AS integer) then cast(avg(13) AS integer) else max(19) end from t1)-t1.e else 13 end*b between t1.c and t1.d then t1.d when et1.f),d)*t1.e)+max(t1.f)+cast(avg(e) AS integer) | (abs((count(distinct 17)))) from t1 union select max(t1.b) from t1) or 17 in (select -t1.b from t1 union select t1.a from t1) and exists(select 1 from t1 where t1.e=t1.c))),f+(t1.e)) FROM t1 WHERE exists(select 1 from t1 where +t1.d between (abs((abs(t1.c)/abs(case when not exists(select 1 from t1 where d*~(select abs(min(13))-( -(max(t1.f))*( -(count(distinct a)))-max(17)*min(t1.a) | (cast(avg(t1.b) AS integer))) from t1)>case when a in (select max(c) from t1 union select count(distinct -t1.a) from t1) or +17 in (select -count(distinct t1.f) from t1 union select cast(avg(t1.f) AS integer) from t1) then c else t1.a end) then -t1.f else t1.f end)))/abs(c+a)) and d)} +} {17} +do_test randexpr-2.323 { + db eval {SELECT coalesce((select 17 from t1 where not exists(select 1 from t1 where 11 in (select count(distinct d-coalesce((select t1.c-t1.f from t1 where 13 in (19,e,t1.b) and e in (select ( -count(*)) from t1 union select max(e) from t1) or t1.a>t1.f),d)*t1.e)+max(t1.f)+cast(avg(e) AS integer) | (abs((count(distinct 17)))) from t1 union select max(t1.b) from t1) or 17 in (select -t1.b from t1 union select t1.a from t1) and exists(select 1 from t1 where t1.e=t1.c))),f+(t1.e)) FROM t1 WHERE NOT (exists(select 1 from t1 where +t1.d between (abs((abs(t1.c)/abs(case when not exists(select 1 from t1 where d*~(select abs(min(13))-( -(max(t1.f))*( -(count(distinct a)))-max(17)*min(t1.a) | (cast(avg(t1.b) AS integer))) from t1)>case when a in (select max(c) from t1 union select count(distinct -t1.a) from t1) or +17 in (select -count(distinct t1.f) from t1 union select cast(avg(t1.f) AS integer) from t1) then c else t1.a end) then -t1.f else t1.f end)))/abs(c+a)) and d))} +} {} +do_test randexpr-2.324 { + db eval {SELECT coalesce((select 17 from t1 where not exists(select 1 from t1 where 11 in (select count(distinct d-coalesce((select t1.c-t1.f from t1 where 13 in (19,e,t1.b) and e in (select ( -count(*)) from t1 union select max(e) from t1) or t1.a>t1.f),d)*t1.e)+max(t1.f)+cast(avg(e) AS integer) & (abs((count(distinct 17)))) from t1 union select max(t1.b) from t1) or 17 in (select -t1.b from t1 union select t1.a from t1) and exists(select 1 from t1 where t1.e=t1.c))),f+(t1.e)) FROM t1 WHERE exists(select 1 from t1 where +t1.d between (abs((abs(t1.c)/abs(case when not exists(select 1 from t1 where d*~(select abs(min(13))-( -(max(t1.f))*( -(count(distinct a)))-max(17)*min(t1.a) | (cast(avg(t1.b) AS integer))) from t1)>case when a in (select max(c) from t1 union select count(distinct -t1.a) from t1) or +17 in (select -count(distinct t1.f) from t1 union select cast(avg(t1.f) AS integer) from t1) then c else t1.a end) then -t1.f else t1.f end)))/abs(c+a)) and d)} +} {17} +do_test randexpr-2.325 { + db eval {SELECT t1.d*17- -case when case (b) when t1.c then (abs(t1.f)/abs((select + -~abs(min(f) | +cast(avg(++t1.e+t1.d) AS integer) | ((count(distinct t1.a)-count(*)))) from t1))) else c*case t1.b when t1.d then (t1.c) else t1.c end*(b)-c end=t1.d then t1.c when -17=c then t1.e else t1.c end FROM t1 WHERE t1.b in (f,b,a)} +} {7100} +do_test randexpr-2.326 { + db eval {SELECT t1.d*17- -case when case (b) when t1.c then (abs(t1.f)/abs((select + -~abs(min(f) | +cast(avg(++t1.e+t1.d) AS integer) | ((count(distinct t1.a)-count(*)))) from t1))) else c*case t1.b when t1.d then (t1.c) else t1.c end*(b)-c end=t1.d then t1.c when -17=c then t1.e else t1.c end FROM t1 WHERE NOT (t1.b in (f,b,a))} +} {} +do_test randexpr-2.327 { + db eval {SELECT t1.d*17- -case when case (b) when t1.c then (abs(t1.f)/abs((select + -~abs(min(f) & +cast(avg(++t1.e+t1.d) AS integer) & ((count(distinct t1.a)-count(*)))) from t1))) else c*case t1.b when t1.d then (t1.c) else t1.c end*(b)-c end=t1.d then t1.c when -17=c then t1.e else t1.c end FROM t1 WHERE t1.b in (f,b,a)} +} {7100} +do_test randexpr-2.328 { + db eval {SELECT coalesce((select e-t1.d-+b*coalesce((select 11 from t1 where (t1.a<=coalesce((select - -coalesce((select max(case case 19 when t1.d then t1.e else t1.a end when b then 13 else t1.e end) from t1 where (c)>=(b)),t1.c)+t1.b from t1 where t1.e>e and exists(select 1 from t1 where b in (t1.b,e,t1.c) or e not between a and d) and 13(coalesce((select (f)+t1.c from t1 where a>t1.c),b))),t1.e)) then 19 else e end when t1.d then t1.e else b end from t1 where t1.b>=f),13)-b*17) from t1 where 11<>e),t1.b)<(c))} +} {500} +do_test randexpr-2.329 { + db eval {SELECT coalesce((select e-t1.d-+b*coalesce((select 11 from t1 where (t1.a<=coalesce((select - -coalesce((select max(case case 19 when t1.d then t1.e else t1.a end when b then 13 else t1.e end) from t1 where (c)>=(b)),t1.c)+t1.b from t1 where t1.e>e and exists(select 1 from t1 where b in (t1.b,e,t1.c) or e not between a and d) and 13(coalesce((select (f)+t1.c from t1 where a>t1.c),b))),t1.e)) then 19 else e end when t1.d then t1.e else b end from t1 where t1.b>=f),13)-b*17) from t1 where 11<>e),t1.b)<(c)))} +} {} +do_test randexpr-2.330 { + db eval {SELECT (abs(case when ++e not in (~c-(19)*+case t1.c when t1.d-coalesce((select max(t1.f) from t1 where (select count(*) from t1)>case when exists(select 1 from t1 where not exists(select 1 from t1 where (not not exists(select 1 from t1 where 19e),t1.e) when 13 then b else t1.c end end+f),c)*t1.c then t1.e else t1.b end,11,b) then -(t1.b) else a end)/abs(13)) FROM t1 WHERE a*t1.a<=17 and exists(select 1 from t1 where (+case coalesce((select max(f) from t1 where d in (select ((+count(*)-( -max(b))))*min(t1.c)* -max(11) from t1 union select count(distinct (19)) from t1)),t1.c) when 13 then 19 else coalesce((select t1.a from t1 where t1.c not in (t1.c,f,13)),b) end)>t1.c) and ( -( -19) between 13 and b or b in (select 11 from t1 union select a from t1) and 19 in (t1.d,t1.d,19))} +} {} +do_test randexpr-2.331 { + db eval {SELECT (abs(case when ++e not in (~c-(19)*+case t1.c when t1.d-coalesce((select max(t1.f) from t1 where (select count(*) from t1)>case when exists(select 1 from t1 where not exists(select 1 from t1 where (not not exists(select 1 from t1 where 19e),t1.e) when 13 then b else t1.c end end+f),c)*t1.c then t1.e else t1.b end,11,b) then -(t1.b) else a end)/abs(13)) FROM t1 WHERE NOT (a*t1.a<=17 and exists(select 1 from t1 where (+case coalesce((select max(f) from t1 where d in (select ((+count(*)-( -max(b))))*min(t1.c)* -max(11) from t1 union select count(distinct (19)) from t1)),t1.c) when 13 then 19 else coalesce((select t1.a from t1 where t1.c not in (t1.c,f,13)),b) end)>t1.c) and ( -( -19) between 13 and b or b in (select 11 from t1 union select a from t1) and 19 in (t1.d,t1.d,19)))} +} {15} +do_test randexpr-2.332 { + db eval {SELECT case when a>=f then -t1.a-11*t1.c when ((t1.e-t1.e+17 between (select max(c) from t1)-13 and coalesce((select t1.d from t1 where ((coalesce((select max(case when (not b>17) then t1.b-b when -e in (c,a,13) then 11 else t1.a end) from t1 where 19<=11),t1.e)>=e))),t1.a))) then c else t1.b end | 17 FROM t1 WHERE 11-t1.d+d-13+(t1.f*case when exists(select 1 from t1 where ( -coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where 19 in (select d from t1 union select t1.f from t1)))),t1.d)<=case when 13*11+11 in (select abs(+~abs( -min(c))) from t1 union select (min(t1.e)) from t1) then t1.f else 19 end)) then t1.c else (abs(e)/abs(19)) end*t1.f*f)<(t1.d)} +} {} +do_test randexpr-2.333 { + db eval {SELECT case when a>=f then -t1.a-11*t1.c when ((t1.e-t1.e+17 between (select max(c) from t1)-13 and coalesce((select t1.d from t1 where ((coalesce((select max(case when (not b>17) then t1.b-b when -e in (c,a,13) then 11 else t1.a end) from t1 where 19<=11),t1.e)>=e))),t1.a))) then c else t1.b end | 17 FROM t1 WHERE NOT (11-t1.d+d-13+(t1.f*case when exists(select 1 from t1 where ( -coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where 19 in (select d from t1 union select t1.f from t1)))),t1.d)<=case when 13*11+11 in (select abs(+~abs( -min(c))) from t1 union select (min(t1.e)) from t1) then t1.f else 19 end)) then t1.c else (abs(e)/abs(19)) end*t1.f*f)<(t1.d))} +} {217} +do_test randexpr-2.334 { + db eval {SELECT case when a>=f then -t1.a-11*t1.c when ((t1.e-t1.e+17 between (select max(c) from t1)-13 and coalesce((select t1.d from t1 where ((coalesce((select max(case when (not b>17) then t1.b-b when -e in (c,a,13) then 11 else t1.a end) from t1 where 19<=11),t1.e)>=e))),t1.a))) then c else t1.b end & 17 FROM t1 WHERE NOT (11-t1.d+d-13+(t1.f*case when exists(select 1 from t1 where ( -coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where 19 in (select d from t1 union select t1.f from t1)))),t1.d)<=case when 13*11+11 in (select abs(+~abs( -min(c))) from t1 union select (min(t1.e)) from t1) then t1.f else 19 end)) then t1.c else (abs(e)/abs(19)) end*t1.f*f)<(t1.d))} +} {0} +do_test randexpr-2.335 { + db eval {SELECT case ~13*c-t1.f- -t1.f when coalesce((select max(coalesce((select max(t1.c) from t1 where coalesce((select t1.d from t1 where 11*f-a=a),t1.e) in (select case ~count(distinct 11) | min(c) when (count(*))-count(*) then max( -17) else (min(13)) end from t1 union select - -cast(avg(b) AS integer) from t1)),19)-t1.c) from t1 where exists(select 1 from t1 where t1.b not between t1.e and 11 and ((a<>t1.f)) and (t1.a)=t1.d)),17) then t1.d else -b end FROM t1 WHERE case when ~13+t1.e-19>=13 then 17 else t1.c end>=a*19 and (f in (select case min(t1.e*+(abs(11)/abs(13))) when count(distinct t1.c) | abs((min(t1.b)-(max(t1.d)))+abs(max(a))+count(*)) | cast(avg(e) AS integer) then max(c) else max(c) end from t1 union select min(t1.c) from t1))} +} {} +do_test randexpr-2.336 { + db eval {SELECT case ~13*c-t1.f- -t1.f when coalesce((select max(coalesce((select max(t1.c) from t1 where coalesce((select t1.d from t1 where 11*f-a=a),t1.e) in (select case ~count(distinct 11) | min(c) when (count(*))-count(*) then max( -17) else (min(13)) end from t1 union select - -cast(avg(b) AS integer) from t1)),19)-t1.c) from t1 where exists(select 1 from t1 where t1.b not between t1.e and 11 and ((a<>t1.f)) and (t1.a)=t1.d)),17) then t1.d else -b end FROM t1 WHERE NOT (case when ~13+t1.e-19>=13 then 17 else t1.c end>=a*19 and (f in (select case min(t1.e*+(abs(11)/abs(13))) when count(distinct t1.c) | abs((min(t1.b)-(max(t1.d)))+abs(max(a))+count(*)) | cast(avg(e) AS integer) then max(c) else max(c) end from t1 union select min(t1.c) from t1)))} +} {-200} +do_test randexpr-2.337 { + db eval {SELECT case ~13*c-t1.f- -t1.f when coalesce((select max(coalesce((select max(t1.c) from t1 where coalesce((select t1.d from t1 where 11*f-a=a),t1.e) in (select case ~count(distinct 11) & min(c) when (count(*))-count(*) then max( -17) else (min(13)) end from t1 union select - -cast(avg(b) AS integer) from t1)),19)-t1.c) from t1 where exists(select 1 from t1 where t1.b not between t1.e and 11 and ((a<>t1.f)) and (t1.a)=t1.d)),17) then t1.d else -b end FROM t1 WHERE NOT (case when ~13+t1.e-19>=13 then 17 else t1.c end>=a*19 and (f in (select case min(t1.e*+(abs(11)/abs(13))) when count(distinct t1.c) | abs((min(t1.b)-(max(t1.d)))+abs(max(a))+count(*)) | cast(avg(e) AS integer) then max(c) else max(c) end from t1 union select min(t1.c) from t1)))} +} {-200} +do_test randexpr-2.338 { + db eval {SELECT f*13-t1.f*(abs(19*t1.a*c-(select ~max(t1.c)- -( -cast(avg(f) AS integer))+(max(d))*cast(avg(t1.d) AS integer)*(count(*)) from t1))/abs(case when coalesce((select (coalesce((select max(t1.c) from t1 where -(b)<> -t1.a),b)+11) from t1 where not (11)<=t1.e and c not between 17 and t1.e and b not in (f,(t1.a),t1.e)),t1.f) in (select t1.d from t1 union select 19 from t1) then t1.a when f>13 then t1.e else a end)) FROM t1 WHERE -b<>b*t1.e} +} {-484800} +do_test randexpr-2.339 { + db eval {SELECT f*13-t1.f*(abs(19*t1.a*c-(select ~max(t1.c)- -( -cast(avg(f) AS integer))+(max(d))*cast(avg(t1.d) AS integer)*(count(*)) from t1))/abs(case when coalesce((select (coalesce((select max(t1.c) from t1 where -(b)<> -t1.a),b)+11) from t1 where not (11)<=t1.e and c not between 17 and t1.e and b not in (f,(t1.a),t1.e)),t1.f) in (select t1.d from t1 union select 19 from t1) then t1.a when f>13 then t1.e else a end)) FROM t1 WHERE NOT ( -b<>b*t1.e)} +} {} +do_test randexpr-2.340 { + db eval {SELECT case when coalesce((select c from t1 where (13-(11-t1.e | t1.b) in (select +min(c)*+case +min(t1.e) when min(coalesce((select t1.e*t1.a from t1 where t1.f in (t1.d,11,t1.d) or t1.e>11 or t1.d>=c),17))+max(11)-count(distinct -a) | max(11)+min(t1.c)-max(13) then count(distinct c) else (max(13)) end from t1 union select (( -cast(avg(11) AS integer))) from t1))),d) in (select f from t1 union select t1.a from t1) then 17 else 19 end FROM t1 WHERE not exists(select 1 from t1 where (select +min(case when not (f*e between (select min(~~t1.d) from t1)*(abs(c)/abs(d)) and case when f=b then t1.f when not c>=17 then ~t1.b else 11 end else 19 end) from t1)-19 not in (d,t1.a,d))} +} {} +do_test randexpr-2.341 { + db eval {SELECT case when coalesce((select c from t1 where (13-(11-t1.e | t1.b) in (select +min(c)*+case +min(t1.e) when min(coalesce((select t1.e*t1.a from t1 where t1.f in (t1.d,11,t1.d) or t1.e>11 or t1.d>=c),17))+max(11)-count(distinct -a) | max(11)+min(t1.c)-max(13) then count(distinct c) else (max(13)) end from t1 union select (( -cast(avg(11) AS integer))) from t1))),d) in (select f from t1 union select t1.a from t1) then 17 else 19 end FROM t1 WHERE NOT (not exists(select 1 from t1 where (select +min(case when not (f*e between (select min(~~t1.d) from t1)*(abs(c)/abs(d)) and case when f=b then t1.f when not c>=17 then ~t1.b else 11 end else 19 end) from t1)-19 not in (d,t1.a,d)))} +} {19} +do_test randexpr-2.342 { + db eval {SELECT case when coalesce((select c from t1 where (13-(11-t1.e & t1.b) in (select +min(c)*+case +min(t1.e) when min(coalesce((select t1.e*t1.a from t1 where t1.f in (t1.d,11,t1.d) or t1.e>11 or t1.d>=c),17))+max(11)-count(distinct -a) & max(11)+min(t1.c)-max(13) then count(distinct c) else (max(13)) end from t1 union select (( -cast(avg(11) AS integer))) from t1))),d) in (select f from t1 union select t1.a from t1) then 17 else 19 end FROM t1 WHERE NOT (not exists(select 1 from t1 where (select +min(case when not (f*e between (select min(~~t1.d) from t1)*(abs(c)/abs(d)) and case when f=b then t1.f when not c>=17 then ~t1.b else 11 end else 19 end) from t1)-19 not in (d,t1.a,d)))} +} {19} +do_test randexpr-2.343 { + db eval {SELECT t1.c*a+(case when t1.b+11 | case when exists(select 1 from t1 where b<>17 or not exists(select 1 from t1 where t1.f in (19,t1.f,c)) or (t1.d)<>b) then e else d end+t1.b+t1.a*b=t1.d or d=b then t1.e when not exists(select 1 from t1 where b>t1.c or 17 between t1.f and e and d>=t1.c) then t1.d else c end) FROM t1 WHERE exists(select 1 from t1 where not exists(select 1 from t1 where (abs(c)/abs(b)) in (t1.a, -e*~e*17,17)))} +} {30400} +do_test randexpr-2.344 { + db eval {SELECT t1.c*a+(case when t1.b+11 | case when exists(select 1 from t1 where b<>17 or not exists(select 1 from t1 where t1.f in (19,t1.f,c)) or (t1.d)<>b) then e else d end+t1.b+t1.a*b=t1.d or d=b then t1.e when not exists(select 1 from t1 where b>t1.c or 17 between t1.f and e and d>=t1.c) then t1.d else c end) FROM t1 WHERE NOT (exists(select 1 from t1 where not exists(select 1 from t1 where (abs(c)/abs(b)) in (t1.a, -e*~e*17,17))))} +} {} +do_test randexpr-2.345 { + db eval {SELECT t1.c*a+(case when t1.b+11 & case when exists(select 1 from t1 where b<>17 or not exists(select 1 from t1 where t1.f in (19,t1.f,c)) or (t1.d)<>b) then e else d end+t1.b+t1.a*b=t1.d or d=b then t1.e when not exists(select 1 from t1 where b>t1.c or 17 between t1.f and e and d>=t1.c) then t1.d else c end) FROM t1 WHERE exists(select 1 from t1 where not exists(select 1 from t1 where (abs(c)/abs(b)) in (t1.a, -e*~e*17,17)))} +} {30400} +do_test randexpr-2.346 { + db eval {SELECT coalesce((select -b from t1 where t1.f=d),case when case -17-t1.b*t1.d+d when b then t1.a else t1.e end not in (t1.c,13,19) and e>f and 13 not between 17 and -f and e not between d and a then 13 when c<=a then 13 else 19 | e end)*a-d*t1.e FROM t1 WHERE (abs(+t1.f)/abs(coalesce((select max(coalesce((select max(e) from t1 where t1.a in (select (abs( -t1.b)/abs(a)) from t1 union select e from t1)),(abs(case t1.f when c then (select (~cast(avg(b*d) AS integer))*abs(min(11)) from t1) else coalesce((select f from t1 where d in (select ( -min(e)) from t1 union select -count(*) from t1)),t1.c) end*t1.b)/abs(c))+17)) from t1 where exists(select 1 from t1 where d not between t1.f and t1.b)),(t1.c))))*t1.d between t1.f and t1.a} +} {} +do_test randexpr-2.347 { + db eval {SELECT coalesce((select -b from t1 where t1.f=d),case when case -17-t1.b*t1.d+d when b then t1.a else t1.e end not in (t1.c,13,19) and e>f and 13 not between 17 and -f and e not between d and a then 13 when c<=a then 13 else 19 | e end)*a-d*t1.e FROM t1 WHERE NOT ((abs(+t1.f)/abs(coalesce((select max(coalesce((select max(e) from t1 where t1.a in (select (abs( -t1.b)/abs(a)) from t1 union select e from t1)),(abs(case t1.f when c then (select (~cast(avg(b*d) AS integer))*abs(min(11)) from t1) else coalesce((select f from t1 where d in (select ( -min(e)) from t1 union select -count(*) from t1)),t1.c) end*t1.b)/abs(c))+17)) from t1 where exists(select 1 from t1 where d not between t1.f and t1.b)),(t1.c))))*t1.d between t1.f and t1.a)} +} {-149700} +do_test randexpr-2.348 { + db eval {SELECT coalesce((select -b from t1 where t1.f=d),case when case -17-t1.b*t1.d+d when b then t1.a else t1.e end not in (t1.c,13,19) and e>f and 13 not between 17 and -f and e not between d and a then 13 when c<=a then 13 else 19 & e end)*a-d*t1.e FROM t1 WHERE NOT ((abs(+t1.f)/abs(coalesce((select max(coalesce((select max(e) from t1 where t1.a in (select (abs( -t1.b)/abs(a)) from t1 union select e from t1)),(abs(case t1.f when c then (select (~cast(avg(b*d) AS integer))*abs(min(11)) from t1) else coalesce((select f from t1 where d in (select ( -min(e)) from t1 union select -count(*) from t1)),t1.c) end*t1.b)/abs(c))+17)) from t1 where exists(select 1 from t1 where d not between t1.f and t1.b)),(t1.c))))*t1.d between t1.f and t1.a)} +} {-198400} +do_test randexpr-2.349 { + db eval {SELECT coalesce((select t1.b*19 from t1 where ~f in (select 19 from t1 union select ~b-+case 13 when coalesce((select max(a) from t1 where (abs(d)/abs(t1.d)) | t1.e-c-t1.d-t1.c+11*f-c+t1.a>17), -a) then f else t1.d end+17 | a*f | t1.c-d from t1)),19) FROM t1 WHERE t1.e*t1.c>coalesce((select 13 from t1 where ~t1.a<>case when exists(select 1 from t1 where e<>~d and not +17 not between a+case 13 when d then case t1.d when t1.a then b else 13 end else 13 end and 13) or 13 not in (t1.e,t1.f,t1.c) then 19-e+19 when f<=t1.f then b else 11 end),e)-13} +} {19} +do_test randexpr-2.350 { + db eval {SELECT coalesce((select t1.b*19 from t1 where ~f in (select 19 from t1 union select ~b-+case 13 when coalesce((select max(a) from t1 where (abs(d)/abs(t1.d)) | t1.e-c-t1.d-t1.c+11*f-c+t1.a>17), -a) then f else t1.d end+17 | a*f | t1.c-d from t1)),19) FROM t1 WHERE NOT (t1.e*t1.c>coalesce((select 13 from t1 where ~t1.a<>case when exists(select 1 from t1 where e<>~d and not +17 not between a+case 13 when d then case t1.d when t1.a then b else 13 end else 13 end and 13) or 13 not in (t1.e,t1.f,t1.c) then 19-e+19 when f<=t1.f then b else 11 end),e)-13)} +} {} +do_test randexpr-2.351 { + db eval {SELECT coalesce((select t1.b*19 from t1 where ~f in (select 19 from t1 union select ~b-+case 13 when coalesce((select max(a) from t1 where (abs(d)/abs(t1.d)) & t1.e-c-t1.d-t1.c+11*f-c+t1.a>17), -a) then f else t1.d end+17 & a*f & t1.c-d from t1)),19) FROM t1 WHERE t1.e*t1.c>coalesce((select 13 from t1 where ~t1.a<>case when exists(select 1 from t1 where e<>~d and not +17 not between a+case 13 when d then case t1.d when t1.a then b else 13 end else 13 end and 13) or 13 not in (t1.e,t1.f,t1.c) then 19-e+19 when f<=t1.f then b else 11 end),e)-13} +} {19} +do_test randexpr-2.352 { + db eval {SELECT (abs( -coalesce((select max((t1.f)) from t1 where not exists(select 1 from t1 where -b-~b not between t1.c and f and t1.d*( -(select abs(+count(*)) from t1))-d>=a) and case when +case when ((f)) in (select -count(distinct t1.a) from t1 union select count(*) from t1) then t1.b else d end in (select min(17) from t1 union select min(t1.e) from t1) then d else 17 end+c not between 11 and a),f)*11)/abs(a)) FROM t1 WHERE t1.f*t1.a*(+c)+case case when t1.d=f and (t1.a<>~+a*a*e*t1.e+case when t1.c between 11 and b and t1.a<>e then 13 when ( -t1.e)<>(t1.a) then (t1.b) else 13 end-t1.b-e+f) then t1.f+a else 17 end when d then 17 else d end-t1.e not in (13,t1.a,11)} +} {66} +do_test randexpr-2.353 { + db eval {SELECT (abs( -coalesce((select max((t1.f)) from t1 where not exists(select 1 from t1 where -b-~b not between t1.c and f and t1.d*( -(select abs(+count(*)) from t1))-d>=a) and case when +case when ((f)) in (select -count(distinct t1.a) from t1 union select count(*) from t1) then t1.b else d end in (select min(17) from t1 union select min(t1.e) from t1) then d else 17 end+c not between 11 and a),f)*11)/abs(a)) FROM t1 WHERE NOT (t1.f*t1.a*(+c)+case case when t1.d=f and (t1.a<>~+a*a*e*t1.e+case when t1.c between 11 and b and t1.a<>e then 13 when ( -t1.e)<>(t1.a) then (t1.b) else 13 end-t1.b-e+f) then t1.f+a else 17 end when d then 17 else d end-t1.e not in (13,t1.a,11))} +} {} +do_test randexpr-2.354 { + db eval {SELECT case t1.b- -t1.d+t1.d+t1.f when t1.f then 19+~13 else d end-case when not exists(select 1 from t1 where (f not between coalesce((select b from t1 where d in (17,13,t1.a) and 13 between c and 13 and 19>=f),13) and e or d not between 13 and f) or f>=d) then t1.f else coalesce((select max(~e-11) from t1 where t1.b in (f,13,13)),t1.a) end FROM t1 WHERE a=f),13) and e or d not between 13 and f) or f>=d) then t1.f else coalesce((select max(~e-11) from t1 where t1.b in (f,13,13)),t1.a) end FROM t1 WHERE NOT (a=t1.c and exists(select 1 from t1 where t1.f>=13) then t1.d when d between t1.d and t1.a then 19 else a end | e when (a) then e else a end between b and t1.c and not exists(select 1 from t1 where 13 in (t1.d,t1.a,13)) or t1.a>= -13),c) FROM t1 WHERE t1.e not in ( -coalesce((select max(13) from t1 where exists(select 1 from t1 where not t1.d>a) and t1.a not in (d*+case when t1.b in (select max(e*11+t1.a)+ -count(*)-( -max(t1.e))-min(11)-count(distinct t1.a) from t1 union select - -min(t1.b) from t1) then t1.e when e in (select count(distinct t1.a) from t1 union select (max(d)) from t1) then e else -(19) end+ -t1.d, -17,f)),case c when a then f else 17 end),d,11)} +} {-13} +do_test randexpr-2.357 { + db eval {SELECT -coalesce((select max(13) from t1 where case case when f in (select e from t1 union select coalesce((select max(e) from t1 where (select -max(e) from t1)*t1.e=t1.c and exists(select 1 from t1 where t1.f>=13) then t1.d when d between t1.d and t1.a then 19 else a end | e when (a) then e else a end between b and t1.c and not exists(select 1 from t1 where 13 in (t1.d,t1.a,13)) or t1.a>= -13),c) FROM t1 WHERE NOT (t1.e not in ( -coalesce((select max(13) from t1 where exists(select 1 from t1 where not t1.d>a) and t1.a not in (d*+case when t1.b in (select max(e*11+t1.a)+ -count(*)-( -max(t1.e))-min(11)-count(distinct t1.a) from t1 union select - -min(t1.b) from t1) then t1.e when e in (select count(distinct t1.a) from t1 union select (max(d)) from t1) then e else -(19) end+ -t1.d, -17,f)),case c when a then f else 17 end),d,11))} +} {} +do_test randexpr-2.358 { + db eval {SELECT -coalesce((select max(13) from t1 where case case when f in (select e from t1 union select coalesce((select max(e) from t1 where (select -max(e) from t1)*t1.e=t1.c and exists(select 1 from t1 where t1.f>=13) then t1.d when d between t1.d and t1.a then 19 else a end & e when (a) then e else a end between b and t1.c and not exists(select 1 from t1 where 13 in (t1.d,t1.a,13)) or t1.a>= -13),c) FROM t1 WHERE t1.e not in ( -coalesce((select max(13) from t1 where exists(select 1 from t1 where not t1.d>a) and t1.a not in (d*+case when t1.b in (select max(e*11+t1.a)+ -count(*)-( -max(t1.e))-min(11)-count(distinct t1.a) from t1 union select - -min(t1.b) from t1) then t1.e when e in (select count(distinct t1.a) from t1 union select (max(d)) from t1) then e else -(19) end+ -t1.d, -17,f)),case c when a then f else 17 end),d,11)} +} {-13} +do_test randexpr-2.359 { + db eval {SELECT case d when -e+coalesce((select max(case (select +abs((~case case count(distinct b) when -count(*) then count(*) else max(t1.e) end when max(e) then min(t1.c) else count(*) end-min(c))) from t1) when t1.c then t1.a else -11 end) from t1 where case ~t1.b when e then d else 11 end in (b,t1.e,t1.b) or not t1.f>=b or (t1.f in (a,e,t1.e)) or f=b or (t1.f in (a,e,t1.e)) or ft1.b) or t1.a not in (f,t1.b,t1.e) or t1.d<=t1.b and 19 not in (t1.d,11,t1.f) then t1.d else 19 end+e+17 end | -t1.c*d end-13 FROM t1 WHERE e not in (e,t1.b,t1.d)} +} {} +do_test randexpr-2.362 { + db eval {SELECT case a when ~c then e+17-d else case t1.a when c then t1.b else case when ((case t1.e when c then d-11 else t1.f end not in (e,(17),t1.d))) or not exists(select 1 from t1 where e not in (d,t1.f,11) and -t1.a>t1.b) or t1.a not in (f,t1.b,t1.e) or t1.d<=t1.b and 19 not in (t1.d,11,t1.f) then t1.d else 19 end+e+17 end | -t1.c*d end-13 FROM t1 WHERE NOT (e not in (e,t1.b,t1.d))} +} {-119864} +do_test randexpr-2.363 { + db eval {SELECT case a when ~c then e+17-d else case t1.a when c then t1.b else case when ((case t1.e when c then d-11 else t1.f end not in (e,(17),t1.d))) or not exists(select 1 from t1 where e not in (d,t1.f,11) and -t1.a>t1.b) or t1.a not in (f,t1.b,t1.e) or t1.d<=t1.b and 19 not in (t1.d,11,t1.f) then t1.d else 19 end+e+17 end & -t1.c*d end-13 FROM t1 WHERE NOT (e not in (e,t1.b,t1.d))} +} {755} +do_test randexpr-2.364 { + db eval {SELECT coalesce((select max(case t1.a when d then ~11 else t1.e end) from t1 where (t1.f*17=a or t1.e=t1.a and -c>=t1.b)),t1.b) FROM t1 WHERE a+( -t1.c) between case when case a when t1.f then ~t1.b*t1.c+17 else -case when f not in (e,f,17) then a when 11=13 then t1.c else t1.d end end>=t1.b then b when t1.d in (select +max(19) from t1 union select abs(cast(avg(t1.f) AS integer))+case ~count(*)-max(t1.a)+max(11) when (max(a)) then (max(t1.d)) else count(distinct 11) end from t1) and e in (t1.d,17,11) then (11) else b end and a} +} {} +do_test randexpr-2.365 { + db eval {SELECT coalesce((select max(case t1.a when d then ~11 else t1.e end) from t1 where (t1.f*17=a or t1.e=t1.a and -c>=t1.b)),t1.b) FROM t1 WHERE NOT (a+( -t1.c) between case when case a when t1.f then ~t1.b*t1.c+17 else -case when f not in (e,f,17) then a when 11=13 then t1.c else t1.d end end>=t1.b then b when t1.d in (select +max(19) from t1 union select abs(cast(avg(t1.f) AS integer))+case ~count(*)-max(t1.a)+max(11) when (max(a)) then (max(t1.d)) else count(distinct 11) end from t1) and e in (t1.d,17,11) then (11) else b end and a)} +} {200} +do_test randexpr-2.366 { + db eval {SELECT t1.f+case when c*f | (t1.a) in (select b*f from t1 union select -case when d not between ~13 | 13+case a when a then e else a end and f then 11 else e-t1.d end from t1) then -(abs(case when ((case t1.f when t1.b then t1.f else a end*f+t1.c)) in (select d from t1 union select (t1.e) from t1) then d else (t1.f) end)/abs(b)) else t1.a end FROM t1 WHERE ~(f+17-(select +max(t1.f)-case count(distinct d-19) when ~count(distinct (abs(b)/abs(a))) then cast(avg(17) AS integer) else ~count(distinct e)*count(*) end from t1)-f-coalesce((select t1.f from t1 where t1.a>=t1.c or t1.b between -t1.a and 19 or exists(select 1 from t1 where t1.e not in (13,t1.c,e)) and d>a),t1.d)) not between d and c} +} {700} +do_test randexpr-2.367 { + db eval {SELECT t1.f+case when c*f | (t1.a) in (select b*f from t1 union select -case when d not between ~13 | 13+case a when a then e else a end and f then 11 else e-t1.d end from t1) then -(abs(case when ((case t1.f when t1.b then t1.f else a end*f+t1.c)) in (select d from t1 union select (t1.e) from t1) then d else (t1.f) end)/abs(b)) else t1.a end FROM t1 WHERE NOT (~(f+17-(select +max(t1.f)-case count(distinct d-19) when ~count(distinct (abs(b)/abs(a))) then cast(avg(17) AS integer) else ~count(distinct e)*count(*) end from t1)-f-coalesce((select t1.f from t1 where t1.a>=t1.c or t1.b between -t1.a and 19 or exists(select 1 from t1 where t1.e not in (13,t1.c,e)) and d>a),t1.d)) not between d and c)} +} {} +do_test randexpr-2.368 { + db eval {SELECT t1.f+case when c*f & (t1.a) in (select b*f from t1 union select -case when d not between ~13 & 13+case a when a then e else a end and f then 11 else e-t1.d end from t1) then -(abs(case when ((case t1.f when t1.b then t1.f else a end*f+t1.c)) in (select d from t1 union select (t1.e) from t1) then d else (t1.f) end)/abs(b)) else t1.a end FROM t1 WHERE ~(f+17-(select +max(t1.f)-case count(distinct d-19) when ~count(distinct (abs(b)/abs(a))) then cast(avg(17) AS integer) else ~count(distinct e)*count(*) end from t1)-f-coalesce((select t1.f from t1 where t1.a>=t1.c or t1.b between -t1.a and 19 or exists(select 1 from t1 where t1.e not in (13,t1.c,e)) and d>a),t1.d)) not between d and c} +} {700} +do_test randexpr-2.369 { + db eval {SELECT case when coalesce((select d from t1 where not coalesce((select f*t1.b from t1 where exists(select 1 from t1 where (t1.f*t1.a)-19-13 in (select ~(cast(avg(t1.b) AS integer)) from t1 union select min(t1.a)+case min( -t1.a) when cast(avg(19) AS integer) then cast(avg(19) AS integer) else min(13) end+( -min(a)) from t1))),(t1.f)) between 17 and d),11)=19 then t1.b when 17<11 or exists(select 1 from t1 where d in (select 13 from t1 union select f from t1)) then 11 else t1.c end FROM t1 WHERE case c when 13 then +t1.d else t1.a end in (select abs(case + -count(*)+count(distinct coalesce((select (abs(~case when t1.c=19 | 13 then t1.c when not e>(13) then f else d end)/abs(a)) from t1 where 17>=t1.b),t1.b))+min(t1.a)*count(*) | (((max(a)))-cast(avg((17)) AS integer)) when max(d) then count(*) else cast(avg(t1.e) AS integer) end) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.370 { + db eval {SELECT case when coalesce((select d from t1 where not coalesce((select f*t1.b from t1 where exists(select 1 from t1 where (t1.f*t1.a)-19-13 in (select ~(cast(avg(t1.b) AS integer)) from t1 union select min(t1.a)+case min( -t1.a) when cast(avg(19) AS integer) then cast(avg(19) AS integer) else min(13) end+( -min(a)) from t1))),(t1.f)) between 17 and d),11)=19 then t1.b when 17<11 or exists(select 1 from t1 where d in (select 13 from t1 union select f from t1)) then 11 else t1.c end FROM t1 WHERE NOT (case c when 13 then +t1.d else t1.a end in (select abs(case + -count(*)+count(distinct coalesce((select (abs(~case when t1.c=19 | 13 then t1.c when not e>(13) then f else d end)/abs(a)) from t1 where 17>=t1.b),t1.b))+min(t1.a)*count(*) | (((max(a)))-cast(avg((17)) AS integer)) when max(d) then count(*) else cast(avg(t1.e) AS integer) end) from t1 union select count(*) from t1))} +} {300} +do_test randexpr-2.371 { + db eval {SELECT -(select count(*) from t1)*(select (+(cast(avg(+f) AS integer))+++case +max(t1.b-case when t1.e+t1.e>=11 then -a else t1.e end)-abs(min(e)+ -max(c) | ((max(11)))) when max(11) then (min(b)) else cast(avg( -b) AS integer) end-cast(avg(b) AS integer)-count(*)+count(*)) from t1)+~17- -t1.a FROM t1 WHERE f between 19*t1.d and 19} +} {} +do_test randexpr-2.372 { + db eval {SELECT -(select count(*) from t1)*(select (+(cast(avg(+f) AS integer))+++case +max(t1.b-case when t1.e+t1.e>=11 then -a else t1.e end)-abs(min(e)+ -max(c) | ((max(11)))) when max(11) then (min(b)) else cast(avg( -b) AS integer) end-cast(avg(b) AS integer)-count(*)+count(*)) from t1)+~17- -t1.a FROM t1 WHERE NOT (f between 19*t1.d and 19)} +} {-118} +do_test randexpr-2.373 { + db eval {SELECT -(select count(*) from t1)*(select (+(cast(avg(+f) AS integer))+++case +max(t1.b-case when t1.e+t1.e>=11 then -a else t1.e end)-abs(min(e)+ -max(c) & ((max(11)))) when max(11) then (min(b)) else cast(avg( -b) AS integer) end-cast(avg(b) AS integer)-count(*)+count(*)) from t1)+~17- -t1.a FROM t1 WHERE NOT (f between 19*t1.d and 19)} +} {-118} +do_test randexpr-2.374 { + db eval {SELECT case when not t1.a< -~~e then b when c*(select +case max((abs( -a)/abs(a))) when min(t1.b) then case ~min(t1.d)-cast(avg(e) AS integer) | max(e) when count(distinct 11) then count(*) else max(b) end else count(*) end from t1) not between 13 and case when exists(select 1 from t1 where exists(select 1 from t1 where f not in (19,t1.f,19)) and 11 between t1.b and t1.b) then (13) else -t1.d end then f else e end FROM t1 WHERE d>11} +} {200} +do_test randexpr-2.375 { + db eval {SELECT case when not t1.a< -~~e then b when c*(select +case max((abs( -a)/abs(a))) when min(t1.b) then case ~min(t1.d)-cast(avg(e) AS integer) | max(e) when count(distinct 11) then count(*) else max(b) end else count(*) end from t1) not between 13 and case when exists(select 1 from t1 where exists(select 1 from t1 where f not in (19,t1.f,19)) and 11 between t1.b and t1.b) then (13) else -t1.d end then f else e end FROM t1 WHERE NOT (d>11)} +} {} +do_test randexpr-2.376 { + db eval {SELECT case when not t1.a< -~~e then b when c*(select +case max((abs( -a)/abs(a))) when min(t1.b) then case ~min(t1.d)-cast(avg(e) AS integer) & max(e) when count(distinct 11) then count(*) else max(b) end else count(*) end from t1) not between 13 and case when exists(select 1 from t1 where exists(select 1 from t1 where f not in (19,t1.f,19)) and 11 between t1.b and t1.b) then (13) else -t1.d end then f else e end FROM t1 WHERE d>11} +} {200} +do_test randexpr-2.377 { + db eval {SELECT coalesce((select max(t1.c | ~17) from t1 where exists(select 1 from t1 where t1.f<>b-(abs(13)/abs(e)))),13) FROM t1 WHERE case when case (abs(c+a+19+coalesce((select max(t1.a) from t1 where coalesce((select coalesce((select (f) from t1 where t1.d between 19 and b),t1.a) from t1 where 13=(c)),f) in (select ~case count(*) when max(t1.b) then ((count(distinct 11))) else count(*) end- -count(distinct a) from t1 union select count(distinct 13) from t1)),t1.c) | t1.b+t1.d)/abs(e)) when t1.d then a else 17 end not between 19 and t1.f then t1.b else d end in (select t1.c from t1 union select (t1.a) from t1)} +} {} +do_test randexpr-2.378 { + db eval {SELECT coalesce((select max(t1.c | ~17) from t1 where exists(select 1 from t1 where t1.f<>b-(abs(13)/abs(e)))),13) FROM t1 WHERE NOT (case when case (abs(c+a+19+coalesce((select max(t1.a) from t1 where coalesce((select coalesce((select (f) from t1 where t1.d between 19 and b),t1.a) from t1 where 13=(c)),f) in (select ~case count(*) when max(t1.b) then ((count(distinct 11))) else count(*) end- -count(distinct a) from t1 union select count(distinct 13) from t1)),t1.c) | t1.b+t1.d)/abs(e)) when t1.d then a else 17 end not between 19 and t1.f then t1.b else d end in (select t1.c from t1 union select (t1.a) from t1))} +} {-18} +do_test randexpr-2.379 { + db eval {SELECT coalesce((select max(t1.c & ~17) from t1 where exists(select 1 from t1 where t1.f<>b-(abs(13)/abs(e)))),13) FROM t1 WHERE NOT (case when case (abs(c+a+19+coalesce((select max(t1.a) from t1 where coalesce((select coalesce((select (f) from t1 where t1.d between 19 and b),t1.a) from t1 where 13=(c)),f) in (select ~case count(*) when max(t1.b) then ((count(distinct 11))) else count(*) end- -count(distinct a) from t1 union select count(distinct 13) from t1)),t1.c) | t1.b+t1.d)/abs(e)) when t1.d then a else 17 end not between 19 and t1.f then t1.b else d end in (select t1.c from t1 union select (t1.a) from t1))} +} {300} +do_test randexpr-2.380 { + db eval {SELECT (coalesce((select (coalesce((select d from t1 where not exists(select 1 from t1 where 17>t1.c)),d))*c from t1 where t1.b between t1.d | (select count(distinct coalesce((select max(t1.e) from t1 where case when -e>t1.b then f when t1.a(abs(19*case when coalesce((select b*case when 11 not between 19 and (abs(13)/abs(t1.b-t1.d-e+b-f+19*t1.a)) then -t1.a else f end from t1 where (b=17)),f)= -t1.d then 17 when 11<>19 then f else t1.d end)/abs(t1.b)) then d else 19 end} +} {} +do_test randexpr-2.381 { + db eval {SELECT (coalesce((select (coalesce((select d from t1 where not exists(select 1 from t1 where 17>t1.c)),d))*c from t1 where t1.b between t1.d | (select count(distinct coalesce((select max(t1.e) from t1 where case when -e>t1.b then f when t1.a(abs(19*case when coalesce((select b*case when 11 not between 19 and (abs(13)/abs(t1.b-t1.d-e+b-f+19*t1.a)) then -t1.a else f end from t1 where (b=17)),f)= -t1.d then 17 when 11<>19 then f else t1.d end)/abs(t1.b)) then d else 19 end)} +} {-120289} +do_test randexpr-2.382 { + db eval {SELECT (coalesce((select (coalesce((select d from t1 where not exists(select 1 from t1 where 17>t1.c)),d))*c from t1 where t1.b between t1.d & (select count(distinct coalesce((select max(t1.e) from t1 where case when -e>t1.b then f when t1.a(abs(19*case when coalesce((select b*case when 11 not between 19 and (abs(13)/abs(t1.b-t1.d-e+b-f+19*t1.a)) then -t1.a else f end from t1 where (b=17)),f)= -t1.d then 17 when 11<>19 then f else t1.d end)/abs(t1.b)) then d else 19 end)} +} {-489} +do_test randexpr-2.383 { + db eval {SELECT 19*case when coalesce((select max(t1.e-c) from t1 where f=(abs(coalesce((select t1.c from t1 where not (not exists(select 1 from t1 where a in (select b from t1 union select c from t1)))),13))/abs(coalesce((select 13 from t1 where t1.b in (d,t1.d,a)), -t1.b)))),c) in (select ~min(t1.c) from t1 union select count(*) from t1) and -f<>t1.c then t1.a-t1.e when not t1.a in (select cast(avg(a) AS integer) from t1 union select min(13) from t1) then -19 else f end FROM t1 WHERE 11<>13} +} {11400} +do_test randexpr-2.384 { + db eval {SELECT 19*case when coalesce((select max(t1.e-c) from t1 where f=(abs(coalesce((select t1.c from t1 where not (not exists(select 1 from t1 where a in (select b from t1 union select c from t1)))),13))/abs(coalesce((select 13 from t1 where t1.b in (d,t1.d,a)), -t1.b)))),c) in (select ~min(t1.c) from t1 union select count(*) from t1) and -f<>t1.c then t1.a-t1.e when not t1.a in (select cast(avg(a) AS integer) from t1 union select min(13) from t1) then -19 else f end FROM t1 WHERE NOT (11<>13)} +} {} +do_test randexpr-2.385 { + db eval {SELECT t1.b*(select max(t1.b*coalesce((select max(t1.f*(t1.b)) from t1 where 17 not in (d,(a),(t1.b)+13)),t1.c)-t1.a) | cast(avg(t1.d) AS integer) | ~+case abs(abs(count(distinct t1.b))) when ~count(distinct e)-(count(*)) then max(t1.e) else max(d) end | max((19)) | min(19) from t1)-t1.d-t1.d*17 FROM t1 WHERE t1.f-case t1.d+19+13*case when (abs(e)/abs((select count(*)+cast(avg(a) AS integer)*count(*) from t1)-c*(abs(d)/abs(c*t1.e-19)))) not in (17,17,t1.a) then t1.f when t1.d<>t1.f then b else d end+ - -f*t1.b*d when b then 11 else -13 end<=(t1.b)} +} {} +do_test randexpr-2.386 { + db eval {SELECT t1.b*(select max(t1.b*coalesce((select max(t1.f*(t1.b)) from t1 where 17 not in (d,(a),(t1.b)+13)),t1.c)-t1.a) | cast(avg(t1.d) AS integer) | ~+case abs(abs(count(distinct t1.b))) when ~count(distinct e)-(count(*)) then max(t1.e) else max(d) end | max((19)) | min(19) from t1)-t1.d-t1.d*17 FROM t1 WHERE NOT (t1.f-case t1.d+19+13*case when (abs(e)/abs((select count(*)+cast(avg(a) AS integer)*count(*) from t1)-c*(abs(d)/abs(c*t1.e-19)))) not in (17,17,t1.a) then t1.f when t1.d<>t1.f then b else d end+ - -f*t1.b*d when b then 11 else -13 end<=(t1.b))} +} {-7400} +do_test randexpr-2.387 { + db eval {SELECT t1.b*(select max(t1.b*coalesce((select max(t1.f*(t1.b)) from t1 where 17 not in (d,(a),(t1.b)+13)),t1.c)-t1.a) & cast(avg(t1.d) AS integer) & ~+case abs(abs(count(distinct t1.b))) when ~count(distinct e)-(count(*)) then max(t1.e) else max(d) end & max((19)) & min(19) from t1)-t1.d-t1.d*17 FROM t1 WHERE NOT (t1.f-case t1.d+19+13*case when (abs(e)/abs((select count(*)+cast(avg(a) AS integer)*count(*) from t1)-c*(abs(d)/abs(c*t1.e-19)))) not in (17,17,t1.a) then t1.f when t1.d<>t1.f then b else d end+ - -f*t1.b*d when b then 11 else -13 end<=(t1.b))} +} {-7200} +do_test randexpr-2.388 { + db eval {SELECT (select abs(count(distinct t1.e) | min(b))-cast(avg(c) AS integer) | case +~+max(t1.a)*abs(+~ - -+cast(avg(19*f) AS integer) | count(*) | ~max(b) | ((max(t1.e)))-count(distinct t1.d)+ -(count(distinct t1.c)))* -count(distinct 17) | count(distinct a) when ((count(distinct t1.d))) then -min(t1.c) else count(distinct t1.c) end- -min(d) from t1) FROM t1 WHERE ((not exists(select 1 from t1 where c=f)) and case when not 19+t1.c- -11+11+e>=a or t1.d>b then f else t1.e end<> -t1.c) or (t1.a in (select +count(*) from t1 union select abs((case max(d) when count(distinct d) then min( -t1.e) else count(distinct a) end-count(distinct -t1.f)))*count(*) from t1) or not exists(select 1 from t1 where t1.d not in (c,17,e)))} +} {-99} +do_test randexpr-2.389 { + db eval {SELECT (select abs(count(distinct t1.e) | min(b))-cast(avg(c) AS integer) | case +~+max(t1.a)*abs(+~ - -+cast(avg(19*f) AS integer) | count(*) | ~max(b) | ((max(t1.e)))-count(distinct t1.d)+ -(count(distinct t1.c)))* -count(distinct 17) | count(distinct a) when ((count(distinct t1.d))) then -min(t1.c) else count(distinct t1.c) end- -min(d) from t1) FROM t1 WHERE NOT (((not exists(select 1 from t1 where c=f)) and case when not 19+t1.c- -11+11+e>=a or t1.d>b then f else t1.e end<> -t1.c) or (t1.a in (select +count(*) from t1 union select abs((case max(d) when count(distinct d) then min( -t1.e) else count(distinct a) end-count(distinct -t1.f)))*count(*) from t1) or not exists(select 1 from t1 where t1.d not in (c,17,e))))} +} {} +do_test randexpr-2.390 { + db eval {SELECT (select abs(count(distinct t1.e) & min(b))-cast(avg(c) AS integer) & case +~+max(t1.a)*abs(+~ - -+cast(avg(19*f) AS integer) & count(*) & ~max(b) & ((max(t1.e)))-count(distinct t1.d)+ -(count(distinct t1.c)))* -count(distinct 17) & count(distinct a) when ((count(distinct t1.d))) then -min(t1.c) else count(distinct t1.c) end- -min(d) from t1) FROM t1 WHERE ((not exists(select 1 from t1 where c=f)) and case when not 19+t1.c- -11+11+e>=a or t1.d>b then f else t1.e end<> -t1.c) or (t1.a in (select +count(*) from t1 union select abs((case max(d) when count(distinct d) then min( -t1.e) else count(distinct a) end-count(distinct -t1.f)))*count(*) from t1) or not exists(select 1 from t1 where t1.d not in (c,17,e)))} +} {144} +do_test randexpr-2.391 { + db eval {SELECT case when exists(select 1 from t1 where coalesce((select b from t1 where case when (b -11 then -t1.c else -b end then d else t1.a end) not in (t1.b,d,t1.b)} +} {300} +do_test randexpr-2.392 { + db eval {SELECT case when exists(select 1 from t1 where coalesce((select b from t1 where case when (b -11 then -t1.c else -b end then d else t1.a end) not in (t1.b,d,t1.b))} +} {} +do_test randexpr-2.393 { + db eval {SELECT t1.f+coalesce((select t1.a-t1.d*f-coalesce((select max(coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where a=~t1.d+case when (c in (select abs(~( -cast(avg(e) AS integer))) from t1 union select -min(19) from t1)) and 19<>t1.e or t1.e<11 and c in (t1.b,t1.b,t1.d) or 17 between -t1.d and t1.d then 11 else d end)),c-f)) from t1 where t1.a<=t1.c),11)-t1.c from t1 where f between t1.a and d),19) FROM t1 WHERE exists(select 1 from t1 where case +t1.d when d then f else t1.e end in (t1.e,17,11) or 13*e-t1.b in (select t1.e from t1 union select 17 from t1)) and 11<=case -case when not exists(select 1 from t1 where (17 | t1.e in (11,11,11) and b<(19))) then case t1.c+t1.f+c when t1.b then c else 19 end else 17 end when d then t1.b else t1.a end} +} {} +do_test randexpr-2.394 { + db eval {SELECT t1.f+coalesce((select t1.a-t1.d*f-coalesce((select max(coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where a=~t1.d+case when (c in (select abs(~( -cast(avg(e) AS integer))) from t1 union select -min(19) from t1)) and 19<>t1.e or t1.e<11 and c in (t1.b,t1.b,t1.d) or 17 between -t1.d and t1.d then 11 else d end)),c-f)) from t1 where t1.a<=t1.c),11)-t1.c from t1 where f between t1.a and d),19) FROM t1 WHERE NOT (exists(select 1 from t1 where case +t1.d when d then f else t1.e end in (t1.e,17,11) or 13*e-t1.b in (select t1.e from t1 union select 17 from t1)) and 11<=case -case when not exists(select 1 from t1 where (17 | t1.e in (11,11,11) and b<(19))) then case t1.c+t1.f+c when t1.b then c else 19 end else 17 end when d then t1.b else t1.a end)} +} {619} +do_test randexpr-2.395 { + db eval {SELECT case when (coalesce((select coalesce((select max( -11) from t1 where not case t1.c*case when exists(select 1 from t1 where (t1.d) in (select case count(*) when count(distinct t1.c) then count(*) else count(*) end from t1 union select cast(avg(t1.c) AS integer) from t1)) then +e when f=19 then 17 else t1.e end when t1.a then 19 else c end=e),t1.a)- -d*f-a from t1 where (t1.e)<>13),19)*(a)=19) then (c) when (exists(select 1 from t1 where 17=t1.d)) then 19 else c end FROM t1 WHERE t1.d*c*(select cast(avg(((abs(t1.d*13)/abs(f)))) AS integer) from t1)+t1.d=case when +(coalesce((select t1.a from t1 where case when not exists(select 1 from t1 where not exists(select 1 from t1 where c=t1.e)) then 19 | -17 else 13 end not in (t1.f,b,t1.b)),t1.f))*t1.a-t1.c-13 in (e,11,t1.c) then t1.a when (d between -t1.f and f) then t1.d else t1.a end} +} {} +do_test randexpr-2.396 { + db eval {SELECT case when (coalesce((select coalesce((select max( -11) from t1 where not case t1.c*case when exists(select 1 from t1 where (t1.d) in (select case count(*) when count(distinct t1.c) then count(*) else count(*) end from t1 union select cast(avg(t1.c) AS integer) from t1)) then +e when f=19 then 17 else t1.e end when t1.a then 19 else c end=e),t1.a)- -d*f-a from t1 where (t1.e)<>13),19)*(a)=19) then (c) when (exists(select 1 from t1 where 17=t1.d)) then 19 else c end FROM t1 WHERE NOT (t1.d*c*(select cast(avg(((abs(t1.d*13)/abs(f)))) AS integer) from t1)+t1.d=case when +(coalesce((select t1.a from t1 where case when not exists(select 1 from t1 where not exists(select 1 from t1 where c=t1.e)) then 19 | -17 else 13 end not in (t1.f,b,t1.b)),t1.f))*t1.a-t1.c-13 in (e,11,t1.c) then t1.a when (d between -t1.f and f) then t1.d else t1.a end)} +} {300} +do_test randexpr-2.397 { + db eval {SELECT coalesce((select d from t1 where f in (select max(19) from t1 union select ~+max(t1.e) from t1)),b-case -11 when 13 then t1.a-e else case when not exists(select 1 from t1 where c not in ( -t1.c,(t1.e),19)) and d+t1.b-t1.b in (select t1.a from t1 union select t1.d from t1) then 19+19*(t1.f) when t1.b<=a then 17 else t1.a end end+t1.b)*t1.a FROM t1 WHERE (select +cast(avg(~13+(select count(*) from t1)) AS integer)++min(case when (not d<>a) then coalesce((select t1.b from t1 where t1.f not between e and t1.d),13)+13 when t1.d not between 11 and t1.d then 17 else f end) | count(*) | ~count(distinct t1.e) | cast(avg(e) AS integer)-max(t1.d) from t1) not between coalesce((select case t1.a when a then e else 17 end from t1 where t1.a in (t1.e,11,( -t1.d))), -(b)) and t1.a} +} {} +do_test randexpr-2.398 { + db eval {SELECT coalesce((select d from t1 where f in (select max(19) from t1 union select ~+max(t1.e) from t1)),b-case -11 when 13 then t1.a-e else case when not exists(select 1 from t1 where c not in ( -t1.c,(t1.e),19)) and d+t1.b-t1.b in (select t1.a from t1 union select t1.d from t1) then 19+19*(t1.f) when t1.b<=a then 17 else t1.a end end+t1.b)*t1.a FROM t1 WHERE NOT ((select +cast(avg(~13+(select count(*) from t1)) AS integer)++min(case when (not d<>a) then coalesce((select t1.b from t1 where t1.f not between e and t1.d),13)+13 when t1.d not between 11 and t1.d then 17 else f end) | count(*) | ~count(distinct t1.e) | cast(avg(e) AS integer)-max(t1.d) from t1) not between coalesce((select case t1.a when a then e else 17 end from t1 where t1.a in (t1.e,11,( -t1.d))), -(b)) and t1.a)} +} {30000} +do_test randexpr-2.399 { + db eval {SELECT (abs(c)/abs((coalesce((select t1.d from t1 where ((19) in (select count(distinct - -(abs(coalesce((select max(t1.e) from t1 where t1.c*t1.a | t1.d=t1.d or t1.f in (select min(t1.f)-cast(avg(e) AS integer) from t1 union select -count(*) from t1)),c))/abs(a))) from t1 union select case cast(avg(c) AS integer)-abs( -((count(*))))*(count(*))*min(d) when cast(avg(a) AS integer) then count(distinct 11) else count(*) end-(count(distinct 17)) from t1))),a))))*t1.e FROM t1 WHERE case when a between -t1.a+(t1.c) and c-case when (~+t1.b-~case when t1.a not between 19*11 and 11 then t1.a when 11=f and -t1.d not between t1.a and d then e else b end in (select min(19) from t1 union select case count(distinct 17) when count(*) then count(distinct t1.f) else min(c) end from t1)) then t1.f else 17 end then 17 else 13 end in (t1.a,t1.d,a)} +} {} +do_test randexpr-2.400 { + db eval {SELECT (abs(c)/abs((coalesce((select t1.d from t1 where ((19) in (select count(distinct - -(abs(coalesce((select max(t1.e) from t1 where t1.c*t1.a | t1.d=t1.d or t1.f in (select min(t1.f)-cast(avg(e) AS integer) from t1 union select -count(*) from t1)),c))/abs(a))) from t1 union select case cast(avg(c) AS integer)-abs( -((count(*))))*(count(*))*min(d) when cast(avg(a) AS integer) then count(distinct 11) else count(*) end-(count(distinct 17)) from t1))),a))))*t1.e FROM t1 WHERE NOT (case when a between -t1.a+(t1.c) and c-case when (~+t1.b-~case when t1.a not between 19*11 and 11 then t1.a when 11=f and -t1.d not between t1.a and d then e else b end in (select min(19) from t1 union select case count(distinct 17) when count(*) then count(distinct t1.f) else min(c) end from t1)) then t1.f else 17 end then 17 else 13 end in (t1.a,t1.d,a))} +} {1500} +do_test randexpr-2.401 { + db eval {SELECT (abs(c)/abs((coalesce((select t1.d from t1 where ((19) in (select count(distinct - -(abs(coalesce((select max(t1.e) from t1 where t1.c*t1.a & t1.d=t1.d or t1.f in (select min(t1.f)-cast(avg(e) AS integer) from t1 union select -count(*) from t1)),c))/abs(a))) from t1 union select case cast(avg(c) AS integer)-abs( -((count(*))))*(count(*))*min(d) when cast(avg(a) AS integer) then count(distinct 11) else count(*) end-(count(distinct 17)) from t1))),a))))*t1.e FROM t1 WHERE NOT (case when a between -t1.a+(t1.c) and c-case when (~+t1.b-~case when t1.a not between 19*11 and 11 then t1.a when 11=f and -t1.d not between t1.a and d then e else b end in (select min(19) from t1 union select case count(distinct 17) when count(*) then count(distinct t1.f) else min(c) end from t1)) then t1.f else 17 end then 17 else 13 end in (t1.a,t1.d,a))} +} {1500} +do_test randexpr-2.402 { + db eval {SELECT case when d=c-(select (~max(t1.f*t1.c-11)) from t1) then coalesce((select max(19) from t1 where 13d then t1.e when not not exists(select 1 from t1 where t1.c not in (a,a,t1.d) and d not between b and 19) then t1.a else 11 end*f-f from t1 where t1.f between e and e and d between 19 and t1.e or 11<>t1.a),t1.c)),d) else 17 end FROM t1 WHERE (t1.e=11)} +} {} +do_test randexpr-2.403 { + db eval {SELECT case when d=c-(select (~max(t1.f*t1.c-11)) from t1) then coalesce((select max(19) from t1 where 13d then t1.e when not not exists(select 1 from t1 where t1.c not in (a,a,t1.d) and d not between b and 19) then t1.a else 11 end*f-f from t1 where t1.f between e and e and d between 19 and t1.e or 11<>t1.a),t1.c)),d) else 17 end FROM t1 WHERE NOT ((t1.e=11))} +} {17} +do_test randexpr-2.404 { + db eval {SELECT case when t1.a-t1.a | case when (abs(t1.d)/abs(t1.c | t1.a))<>t1.e then e*+t1.c-t1.d*coalesce((select t1.b from t1 where b>=+d+ -e*11),11)+t1.b | t1.f when not exists(select 1 from t1 where d not in (17,e,d)) then 11 else t1.d end>=13 then 11 when d=c then a else f end FROM t1 WHERE (abs(case when not t1.d<=t1.f and not t1.d not between -f and -17-11*t1.d and (13d then t1.d*coalesce((select max(t1.a) from t1 where (17) not between (t1.b) and 11),d) else t1.d end)/abs(f)) not between 13 and 19} +} {11} +do_test randexpr-2.405 { + db eval {SELECT case when t1.a-t1.a | case when (abs(t1.d)/abs(t1.c | t1.a))<>t1.e then e*+t1.c-t1.d*coalesce((select t1.b from t1 where b>=+d+ -e*11),11)+t1.b | t1.f when not exists(select 1 from t1 where d not in (17,e,d)) then 11 else t1.d end>=13 then 11 when d=c then a else f end FROM t1 WHERE NOT ((abs(case when not t1.d<=t1.f and not t1.d not between -f and -17-11*t1.d and (13d then t1.d*coalesce((select max(t1.a) from t1 where (17) not between (t1.b) and 11),d) else t1.d end)/abs(f)) not between 13 and 19)} +} {} +do_test randexpr-2.406 { + db eval {SELECT case when t1.a-t1.a & case when (abs(t1.d)/abs(t1.c & t1.a))<>t1.e then e*+t1.c-t1.d*coalesce((select t1.b from t1 where b>=+d+ -e*11),11)+t1.b & t1.f when not exists(select 1 from t1 where d not in (17,e,d)) then 11 else t1.d end>=13 then 11 when d=c then a else f end FROM t1 WHERE (abs(case when not t1.d<=t1.f and not t1.d not between -f and -17-11*t1.d and (13d then t1.d*coalesce((select max(t1.a) from t1 where (17) not between (t1.b) and 11),d) else t1.d end)/abs(f)) not between 13 and 19} +} {600} +do_test randexpr-2.407 { + db eval {SELECT coalesce((select t1.f*13 from t1 where exists(select 1 from t1 where 13<=t1.e)),(abs(+ -17)/abs(c))) FROM t1 WHERE coalesce((select 17 from t1 where 11>++a-~t1.a+d | (19)),(abs(t1.c)/abs(case when case t1.c*17-case when b not between t1.b and f then t1.a when f>=a then 19 else 13 end when 19 then 13 else t1.f end not between c and a then c else 13 end)))> -e or 13 between f and c and t1.e>=b} +} {7800} +do_test randexpr-2.408 { + db eval {SELECT coalesce((select t1.f*13 from t1 where exists(select 1 from t1 where 13<=t1.e)),(abs(+ -17)/abs(c))) FROM t1 WHERE NOT (coalesce((select 17 from t1 where 11>++a-~t1.a+d | (19)),(abs(t1.c)/abs(case when case t1.c*17-case when b not between t1.b and f then t1.a when f>=a then 19 else 13 end when 19 then 13 else t1.f end not between c and a then c else 13 end)))> -e or 13 between f and c and t1.e>=b)} +} {} +do_test randexpr-2.409 { + db eval {SELECT ~+coalesce((select (select count(distinct +coalesce((select c from t1 where e>=13),+c)-t1.c) from t1) from t1 where case a when t1.d*(select cast(avg(case when t1.b*coalesce((select 17 from t1 where case -a when t1.b then d else (t1.f) end not in ((t1.e),a,t1.f)),t1.c)-c in (d,a,t1.c) then c else b end) AS integer) from t1)+t1.d then 13 else t1.e end not in (19,a,17)),13) FROM t1 WHERE +c in (a,17,t1.d)} +} {} +do_test randexpr-2.410 { + db eval {SELECT ~+coalesce((select (select count(distinct +coalesce((select c from t1 where e>=13),+c)-t1.c) from t1) from t1 where case a when t1.d*(select cast(avg(case when t1.b*coalesce((select 17 from t1 where case -a when t1.b then d else (t1.f) end not in ((t1.e),a,t1.f)),t1.c)-c in (d,a,t1.c) then c else b end) AS integer) from t1)+t1.d then 13 else t1.e end not in (19,a,17)),13) FROM t1 WHERE NOT (+c in (a,17,t1.d))} +} {-2} +do_test randexpr-2.411 { + db eval {SELECT f-coalesce((select t1.d from t1 where t1.a>11+t1.d),coalesce((select coalesce((select t1.d from t1 where 17>=d or (case c when t1.c then (t1.c) else t1.e end in (b,17,17) or t1.b>=t1.a) or t1.b<=e),case d when (19) then t1.d else t1.a end)-t1.d from t1 where 13 in (select ( -count(distinct (e)) | cast(avg(t1.d) AS integer)*count(*))+max( -11) | max( -c) from t1 union select count(distinct 11) from t1)),a)) FROM t1 WHERE not b<=13 or (coalesce((select max(t1.b) from t1 where b in (case when not a<>13 then 17 when -(select ~abs( -(max(f))+count(distinct t1.a)) from t1)+13>=t1.b then case c when t1.a then d else f end-c else t1.d end-19, -t1.d,f)),19)) not in (t1.a,c,t1.d) or t1.e>t1.f} +} {500} +do_test randexpr-2.412 { + db eval {SELECT f-coalesce((select t1.d from t1 where t1.a>11+t1.d),coalesce((select coalesce((select t1.d from t1 where 17>=d or (case c when t1.c then (t1.c) else t1.e end in (b,17,17) or t1.b>=t1.a) or t1.b<=e),case d when (19) then t1.d else t1.a end)-t1.d from t1 where 13 in (select ( -count(distinct (e)) | cast(avg(t1.d) AS integer)*count(*))+max( -11) | max( -c) from t1 union select count(distinct 11) from t1)),a)) FROM t1 WHERE NOT (not b<=13 or (coalesce((select max(t1.b) from t1 where b in (case when not a<>13 then 17 when -(select ~abs( -(max(f))+count(distinct t1.a)) from t1)+13>=t1.b then case c when t1.a then d else f end-c else t1.d end-19, -t1.d,f)),19)) not in (t1.a,c,t1.d) or t1.e>t1.f)} +} {} +do_test randexpr-2.413 { + db eval {SELECT f-coalesce((select t1.d from t1 where t1.a>11+t1.d),coalesce((select coalesce((select t1.d from t1 where 17>=d or (case c when t1.c then (t1.c) else t1.e end in (b,17,17) or t1.b>=t1.a) or t1.b<=e),case d when (19) then t1.d else t1.a end)-t1.d from t1 where 13 in (select ( -count(distinct (e)) & cast(avg(t1.d) AS integer)*count(*))+max( -11) & max( -c) from t1 union select count(distinct 11) from t1)),a)) FROM t1 WHERE not b<=13 or (coalesce((select max(t1.b) from t1 where b in (case when not a<>13 then 17 when -(select ~abs( -(max(f))+count(distinct t1.a)) from t1)+13>=t1.b then case c when t1.a then d else f end-c else t1.d end-19, -t1.d,f)),19)) not in (t1.a,c,t1.d) or t1.e>t1.f} +} {500} +do_test randexpr-2.414 { + db eval {SELECT coalesce((select e from t1 where (coalesce((select max(+e+e*11*t1.c-e) from t1 where exists(select 1 from t1 where (19<=~t1.f))),case (abs(t1.c)/abs(case t1.c when 19 then f*~case when t1.e+t1.d between -t1.f and (d) then b when (d=a) then t1.e else e end else c end*t1.a)) | f when (d) then 17 else t1.f end)coalesce((select coalesce((select e from t1 where case when -d+t1.e>e then coalesce((select d+b from t1 where exists(select 1 from t1 where not 13-b not in (f,a,d)) and (t1.b between (a) and t1.c)),c) when a=13 then t1.e else a end in (select t1.d from t1 union select t1.d from t1) and e not in (d, -19,11)),d) from t1 where not f in (t1.a,19,t1.d)),t1.d)} +} {} +do_test randexpr-2.415 { + db eval {SELECT coalesce((select e from t1 where (coalesce((select max(+e+e*11*t1.c-e) from t1 where exists(select 1 from t1 where (19<=~t1.f))),case (abs(t1.c)/abs(case t1.c when 19 then f*~case when t1.e+t1.d between -t1.f and (d) then b when (d=a) then t1.e else e end else c end*t1.a)) | f when (d) then 17 else t1.f end)coalesce((select coalesce((select e from t1 where case when -d+t1.e>e then coalesce((select d+b from t1 where exists(select 1 from t1 where not 13-b not in (f,a,d)) and (t1.b between (a) and t1.c)),c) when a=13 then t1.e else a end in (select t1.d from t1 union select t1.d from t1) and e not in (d, -19,11)),d) from t1 where not f in (t1.a,19,t1.d)),t1.d))} +} {13} +do_test randexpr-2.416 { + db eval {SELECT coalesce((select e from t1 where (coalesce((select max(+e+e*11*t1.c-e) from t1 where exists(select 1 from t1 where (19<=~t1.f))),case (abs(t1.c)/abs(case t1.c when 19 then f*~case when t1.e+t1.d between -t1.f and (d) then b when (d=a) then t1.e else e end else c end*t1.a)) & f when (d) then 17 else t1.f end)coalesce((select coalesce((select e from t1 where case when -d+t1.e>e then coalesce((select d+b from t1 where exists(select 1 from t1 where not 13-b not in (f,a,d)) and (t1.b between (a) and t1.c)),c) when a=13 then t1.e else a end in (select t1.d from t1 union select t1.d from t1) and e not in (d, -19,11)),d) from t1 where not f in (t1.a,19,t1.d)),t1.d))} +} {13} +do_test randexpr-2.417 { + db eval {SELECT (select count(distinct 11-17-11) from t1)-coalesce((select (abs(13)/abs(t1.b-c*11+coalesce((select max(f) from t1 where t1.a>t1.a),(e))*case when (abs(e)/abs(t1.b)) not between ~f and t1.e then c else (abs(t1.b)/abs(t1.d)) end | 13+b | b+t1.e)) from t1 where t1.c in (select t1.c from t1 union select (a) from t1)),19) FROM t1 WHERE t1.f>=19} +} {1} +do_test randexpr-2.418 { + db eval {SELECT (select count(distinct 11-17-11) from t1)-coalesce((select (abs(13)/abs(t1.b-c*11+coalesce((select max(f) from t1 where t1.a>t1.a),(e))*case when (abs(e)/abs(t1.b)) not between ~f and t1.e then c else (abs(t1.b)/abs(t1.d)) end | 13+b | b+t1.e)) from t1 where t1.c in (select t1.c from t1 union select (a) from t1)),19) FROM t1 WHERE NOT (t1.f>=19)} +} {} +do_test randexpr-2.419 { + db eval {SELECT (select count(distinct 11-17-11) from t1)-coalesce((select (abs(13)/abs(t1.b-c*11+coalesce((select max(f) from t1 where t1.a>t1.a),(e))*case when (abs(e)/abs(t1.b)) not between ~f and t1.e then c else (abs(t1.b)/abs(t1.d)) end & 13+b & b+t1.e)) from t1 where t1.c in (select t1.c from t1 union select (a) from t1)),19) FROM t1 WHERE t1.f>=19} +} {1} +do_test randexpr-2.420 { + db eval {SELECT coalesce((select max(case when 11+(select +abs(count(*)) from t1) not in (case when (case when not exists(select 1 from t1 where t1.d=f) then a else a end+11 in (select ((13)) from t1 union select t1.d from t1)) then (abs(t1.c)/abs(t1.b)) when a>=t1.c then b else f end,t1.a,c) then t1.a when b in (select cast(avg(t1.a) AS integer) | ~ -count(distinct 17) from t1 union select min(c) from t1) then b else -t1.e end) from t1 where d not between c and -t1.b and b=d and b between 13 and t1.c),(17)) FROM t1 WHERE t1.a in (coalesce((select max(e) from t1 where case case (select count(*) from t1)-~19 when t1.f*t1.d+coalesce((select max((select + -(((min(t1.b)))) | -(count(distinct t1.c)) | count(*) from t1)*11) from t1 where f>=case when d in ((d),t1.d,13) then 19 when 17=17 then 11 else a end),t1.e) then c else t1.f end when t1.e then f else t1.d end in (13,(13),c)),t1.d),t1.e,a)} +} {17} +do_test randexpr-2.421 { + db eval {SELECT coalesce((select max(case when 11+(select +abs(count(*)) from t1) not in (case when (case when not exists(select 1 from t1 where t1.d=f) then a else a end+11 in (select ((13)) from t1 union select t1.d from t1)) then (abs(t1.c)/abs(t1.b)) when a>=t1.c then b else f end,t1.a,c) then t1.a when b in (select cast(avg(t1.a) AS integer) | ~ -count(distinct 17) from t1 union select min(c) from t1) then b else -t1.e end) from t1 where d not between c and -t1.b and b=d and b between 13 and t1.c),(17)) FROM t1 WHERE NOT (t1.a in (coalesce((select max(e) from t1 where case case (select count(*) from t1)-~19 when t1.f*t1.d+coalesce((select max((select + -(((min(t1.b)))) | -(count(distinct t1.c)) | count(*) from t1)*11) from t1 where f>=case when d in ((d),t1.d,13) then 19 when 17=17 then 11 else a end),t1.e) then c else t1.f end when t1.e then f else t1.d end in (13,(13),c)),t1.d),t1.e,a))} +} {} +do_test randexpr-2.422 { + db eval {SELECT coalesce((select max(case when 11+(select +abs(count(*)) from t1) not in (case when (case when not exists(select 1 from t1 where t1.d=f) then a else a end+11 in (select ((13)) from t1 union select t1.d from t1)) then (abs(t1.c)/abs(t1.b)) when a>=t1.c then b else f end,t1.a,c) then t1.a when b in (select cast(avg(t1.a) AS integer) & ~ -count(distinct 17) from t1 union select min(c) from t1) then b else -t1.e end) from t1 where d not between c and -t1.b and b=d and b between 13 and t1.c),(17)) FROM t1 WHERE t1.a in (coalesce((select max(e) from t1 where case case (select count(*) from t1)-~19 when t1.f*t1.d+coalesce((select max((select + -(((min(t1.b)))) | -(count(distinct t1.c)) | count(*) from t1)*11) from t1 where f>=case when d in ((d),t1.d,13) then 19 when 17=17 then 11 else a end),t1.e) then c else t1.f end when t1.e then f else t1.d end in (13,(13),c)),t1.d),t1.e,a)} +} {17} +do_test randexpr-2.423 { + db eval {SELECT coalesce((select t1.a-case t1.a when c then coalesce((select case (abs(11-(abs(t1.b)/abs(t1.a)))/abs((abs(t1.c)/abs(t1.a))))-t1.f when t1.d then f else 19 end from t1 where +t1.b*a in (select t1.e from t1 union select c from t1) and e between d and 11),t1.b) else a end from t1 where exists(select 1 from t1 where ((a in (13,c,d)))) and -t1.e in ((t1.d),t1.a,t1.b)),t1.c) FROM t1 WHERE ++e*19 in (case when case when not (d<>case when -case when (t1.a not between b and -e) then 17 when f<>a then -t1.d else t1.a end not in (a,a,c) then b when b<=c then e else b end) then 19 when (t1.b not in ( -13,t1.a,a)) then t1.b | 13 else e end* -19*t1.acase when -case when (t1.a not between b and -e) then 17 when f<>a then -t1.d else t1.a end not in (a,a,c) then b when b<=c then e else b end) then 19 when (t1.b not in ( -13,t1.a,a)) then t1.b | 13 else e end* -19*t1.a~19)),f) when a- -t1.a | t1.f then t1.a*11-t1.e else coalesce((select max(t1.b) from t1 where not exists(select 1 from t1 where 17=t1.a or b>=f)),c) | t1.b end>19),t1.a))+c)-t1.c*e)/abs(t1.d)) FROM t1 WHERE a-t1.b not in (a,a-19-c | t1.d-d | e*t1.c*b,t1.e+t1.a)} +} {375} +do_test randexpr-2.426 { + db eval {SELECT (abs(e-((coalesce((select max(t1.c) from t1 where t1.f-a-case coalesce((select max(d) from t1 where (13<>~19)),f) when a- -t1.a | t1.f then t1.a*11-t1.e else coalesce((select max(t1.b) from t1 where not exists(select 1 from t1 where 17=t1.a or b>=f)),c) | t1.b end>19),t1.a))+c)-t1.c*e)/abs(t1.d)) FROM t1 WHERE NOT (a-t1.b not in (a,a-19-c | t1.d-d | e*t1.c*b,t1.e+t1.a))} +} {} +do_test randexpr-2.427 { + db eval {SELECT (abs(e-((coalesce((select max(t1.c) from t1 where t1.f-a-case coalesce((select max(d) from t1 where (13<>~19)),f) when a- -t1.a & t1.f then t1.a*11-t1.e else coalesce((select max(t1.b) from t1 where not exists(select 1 from t1 where 17=t1.a or b>=f)),c) & t1.b end>19),t1.a))+c)-t1.c*e)/abs(t1.d)) FROM t1 WHERE a-t1.b not in (a,a-19-c | t1.d-d | e*t1.c*b,t1.e+t1.a)} +} {375} +do_test randexpr-2.428 { + db eval {SELECT t1.b-coalesce((select t1.a from t1 where (d<=f)),~~case b when coalesce((select coalesce((select t1.c from t1 where a+~+~(abs(t1.d)/abs(17)) | -d*a*(b++ -13)*b>=b),17) | a from t1 where a=coalesce((select t1.d+(select case min(a) | max(t1.b) when max(e) then count(*) else -count(distinct f) end from t1)-t1.f+( - - -19) from t1 where t1.c in (select 19 from t1 union select 17 from t1)),11))),19)+11 not in ((b),e,e) or t1.e in (select min(t1.f) from t1 union select count(*)- -(cast(avg(17) AS integer)) from t1) or e>=t1.c} +} {100} +do_test randexpr-2.429 { + db eval {SELECT t1.b-coalesce((select t1.a from t1 where (d<=f)),~~case b when coalesce((select coalesce((select t1.c from t1 where a+~+~(abs(t1.d)/abs(17)) | -d*a*(b++ -13)*b>=b),17) | a from t1 where a=coalesce((select t1.d+(select case min(a) | max(t1.b) when max(e) then count(*) else -count(distinct f) end from t1)-t1.f+( - - -19) from t1 where t1.c in (select 19 from t1 union select 17 from t1)),11))),19)+11 not in ((b),e,e) or t1.e in (select min(t1.f) from t1 union select count(*)- -(cast(avg(17) AS integer)) from t1) or e>=t1.c)} +} {} +do_test randexpr-2.430 { + db eval {SELECT t1.b-coalesce((select t1.a from t1 where (d<=f)),~~case b when coalesce((select coalesce((select t1.c from t1 where a+~+~(abs(t1.d)/abs(17)) & -d*a*(b++ -13)*b>=b),17) & a from t1 where a=coalesce((select t1.d+(select case min(a) | max(t1.b) when max(e) then count(*) else -count(distinct f) end from t1)-t1.f+( - - -19) from t1 where t1.c in (select 19 from t1 union select 17 from t1)),11))),19)+11 not in ((b),e,e) or t1.e in (select min(t1.f) from t1 union select count(*)- -(cast(avg(17) AS integer)) from t1) or e>=t1.c} +} {100} +do_test randexpr-2.431 { + db eval {SELECT coalesce((select max(case when e<=t1.d then f*+case 17 when 17 then (abs((select abs(cast(avg(13) AS integer)+min(t1.b)) from t1))/abs(t1.c)) else d end*t1.c else a end) from t1 where coalesce((select t1.c from t1 where a<>t1.e+f-t1.c-a),+t1.a)+t1.d*case when d in (select t1.c from t1 union select 11 from t1) then a when 19 not in (d,e,b) then 11 else 19 end | t1.b in (select 19 from t1 union select t1.a from t1)),17)*11 FROM t1 WHERE 17>=t1.b} +} {} +do_test randexpr-2.432 { + db eval {SELECT coalesce((select max(case when e<=t1.d then f*+case 17 when 17 then (abs((select abs(cast(avg(13) AS integer)+min(t1.b)) from t1))/abs(t1.c)) else d end*t1.c else a end) from t1 where coalesce((select t1.c from t1 where a<>t1.e+f-t1.c-a),+t1.a)+t1.d*case when d in (select t1.c from t1 union select 11 from t1) then a when 19 not in (d,e,b) then 11 else 19 end | t1.b in (select 19 from t1 union select t1.a from t1)),17)*11 FROM t1 WHERE NOT (17>=t1.b)} +} {187} +do_test randexpr-2.433 { + db eval {SELECT coalesce((select max(case when e<=t1.d then f*+case 17 when 17 then (abs((select abs(cast(avg(13) AS integer)+min(t1.b)) from t1))/abs(t1.c)) else d end*t1.c else a end) from t1 where coalesce((select t1.c from t1 where a<>t1.e+f-t1.c-a),+t1.a)+t1.d*case when d in (select t1.c from t1 union select 11 from t1) then a when 19 not in (d,e,b) then 11 else 19 end & t1.b in (select 19 from t1 union select t1.a from t1)),17)*11 FROM t1 WHERE NOT (17>=t1.b)} +} {187} +do_test randexpr-2.434 { + db eval {SELECT case when coalesce((select max(case f when (abs(t1.d)/abs(case when +~e not in (t1.d,c,t1.a) or 17 in (select count(distinct -b) from t1 union select count(distinct d) from t1) and d<=t1.b or b>(f) then coalesce((select c from t1 where t1.ff or t1.b<> -t1.c), -t1.d)<11 then c else f end FROM t1 WHERE not exists(select 1 from t1 where exists(select 1 from t1 where case -case when case t1.c-t1.b-e when t1.b then t1.e else -t1.c+~17 | coalesce((select max(t1.c) from t1 where 19 not between -(a)-t1.c and 17),t1.c) end in (select (t1.f) from t1 union select b from t1) then e when c<>19 then t1.b else f end when -11 then 11 else 19 end in ( -19,t1.a,d)) and not 17(f) then coalesce((select c from t1 where t1.ff or t1.b<> -t1.c), -t1.d)<11 then c else f end FROM t1 WHERE NOT (not exists(select 1 from t1 where exists(select 1 from t1 where case -case when case t1.c-t1.b-e when t1.b then t1.e else -t1.c+~17 | coalesce((select max(t1.c) from t1 where 19 not between -(a)-t1.c and 17),t1.c) end in (select (t1.f) from t1 union select b from t1) then e when c<>19 then t1.b else f end when -11 then 11 else 19 end in ( -19,t1.a,d)) and not 17a) then t1.f else t1.c end FROM t1 WHERE case when (((case coalesce((select -t1.b-t1.b*19*c | 19 from t1 where (exists(select 1 from t1 where e>=t1.a))),t1.a)+11 when d then t1.e else t1.c end=t1.d))) then t1.b when not not exists(select 1 from t1 where t1.f<(t1.a)) and t1.d>=f or t1.c>t1.a or t1.b between t1.f and 19 then t1.c else ~t1.c end=11 or (t1.e) not between 13 and t1.c} +} {600} +do_test randexpr-2.440 { + db eval {SELECT case when (ba) then t1.f else t1.c end FROM t1 WHERE NOT (case when (((case coalesce((select -t1.b-t1.b*19*c | 19 from t1 where (exists(select 1 from t1 where e>=t1.a))),t1.a)+11 when d then t1.e else t1.c end=t1.d))) then t1.b when not not exists(select 1 from t1 where t1.f<(t1.a)) and t1.d>=f or t1.c>t1.a or t1.b between t1.f and 19 then t1.c else ~t1.c end=11 or (t1.e) not between 13 and t1.c)} +} {} +do_test randexpr-2.441 { + db eval {SELECT case when (ba) then t1.f else t1.c end FROM t1 WHERE case when (((case coalesce((select -t1.b-t1.b*19*c | 19 from t1 where (exists(select 1 from t1 where e>=t1.a))),t1.a)+11 when d then t1.e else t1.c end=t1.d))) then t1.b when not not exists(select 1 from t1 where t1.f<(t1.a)) and t1.d>=f or t1.c>t1.a or t1.b between t1.f and 19 then t1.c else ~t1.c end=11 or (t1.e) not between 13 and t1.c} +} {300} +do_test randexpr-2.442 { + db eval {SELECT case when not t1.d not in (coalesce((select 17 | -coalesce((select (select min(a-t1.a+t1.a-t1.c*t1.e) from t1) from t1 where t1.d<=a),11) | 19 from t1 where not ( -13 in ( -t1.d,t1.d,19))),e)+11,11,13) then e when ((t1.bt1.f and a>=17))} +} {} +do_test randexpr-2.443 { + db eval {SELECT case when not t1.d not in (coalesce((select 17 | -coalesce((select (select min(a-t1.a+t1.a-t1.c*t1.e) from t1) from t1 where t1.d<=a),11) | 19 from t1 where not ( -13 in ( -t1.d,t1.d,19))),e)+11,11,13) then e when ((t1.bt1.f and a>=17)))} +} {100} +do_test randexpr-2.444 { + db eval {SELECT case when not t1.d not in (coalesce((select 17 & -coalesce((select (select min(a-t1.a+t1.a-t1.c*t1.e) from t1) from t1 where t1.d<=a),11) & 19 from t1 where not ( -13 in ( -t1.d,t1.d,19))),e)+11,11,13) then e when ((t1.bt1.f and a>=17)))} +} {100} +do_test randexpr-2.445 { + db eval {SELECT e+c-(c)+t1.e*(abs(case when 13 in (select abs(max(17)*case -count(*) when cast(avg(a) AS integer) then count(distinct b) else -max(f) end | (cast(avg(t1.e) AS integer)) | -min(f)) from t1 union select count(distinct d) from t1) and case when (t1.e+a> -(a)) then e when not c=e)),coalesce((select max(case t1.e when t1.c then t1.a else a end) from t1 where b>=t1.c and b<>t1.c),13)) then t1.b else a end<11 then b when (11 in (select a from t1 union select t1.b from t1)) or t1.b=e or a between a and 13 then t1.b else b end>19} +} {1500} +do_test randexpr-2.446 { + db eval {SELECT e+c-(c)+t1.e*(abs(case when 13 in (select abs(max(17)*case -count(*) when cast(avg(a) AS integer) then count(distinct b) else -max(f) end | (cast(avg(t1.e) AS integer)) | -min(f)) from t1 union select count(distinct d) from t1) and case when (t1.e+a> -(a)) then e when not c=e)),coalesce((select max(case t1.e when t1.c then t1.a else a end) from t1 where b>=t1.c and b<>t1.c),13)) then t1.b else a end<11 then b when (11 in (select a from t1 union select t1.b from t1)) or t1.b=e or a between a and 13 then t1.b else b end>19)} +} {} +do_test randexpr-2.447 { + db eval {SELECT e+c-(c)+t1.e*(abs(case when 13 in (select abs(max(17)*case -count(*) when cast(avg(a) AS integer) then count(distinct b) else -max(f) end & (cast(avg(t1.e) AS integer)) & -min(f)) from t1 union select count(distinct d) from t1) and case when (t1.e+a> -(a)) then e when not c=e)),coalesce((select max(case t1.e when t1.c then t1.a else a end) from t1 where b>=t1.c and b<>t1.c),13)) then t1.b else a end<11 then b when (11 in (select a from t1 union select t1.b from t1)) or t1.b=e or a between a and 13 then t1.b else b end>19} +} {1500} +do_test randexpr-2.448 { + db eval {SELECT 13-coalesce((select t1.a*a from t1 where not exists(select 1 from t1 where 17 not between case +coalesce((select -case 19 when a*b then 19 else f end from t1 where (t1.d-t1.e between case f when (select abs(case abs( -min(13)) when min(t1.b) then -(count(distinct f)) else min(t1.b) end) from t1) then 17*( -13) | f else t1.e end and -t1.a)),d)*c when 13 then 11 else t1.a end and c)),t1.a)*19 FROM t1 WHERE ((not f-c<=(abs(a)/abs(+t1.a)) or t1.e not between coalesce((select 11 from t1 where - -a*t1.d*case coalesce((select max(d) from t1 where ((t1.b<=t1.c))),case when f>t1.d or t1.d=t1.a or t1.c<=c then a else -c end*t1.b) when (f) then 11 else 19 end-t1.a<=b),c) and 13))} +} {-1887} +do_test randexpr-2.449 { + db eval {SELECT 13-coalesce((select t1.a*a from t1 where not exists(select 1 from t1 where 17 not between case +coalesce((select -case 19 when a*b then 19 else f end from t1 where (t1.d-t1.e between case f when (select abs(case abs( -min(13)) when min(t1.b) then -(count(distinct f)) else min(t1.b) end) from t1) then 17*( -13) | f else t1.e end and -t1.a)),d)*c when 13 then 11 else t1.a end and c)),t1.a)*19 FROM t1 WHERE NOT (((not f-c<=(abs(a)/abs(+t1.a)) or t1.e not between coalesce((select 11 from t1 where - -a*t1.d*case coalesce((select max(d) from t1 where ((t1.b<=t1.c))),case when f>t1.d or t1.d=t1.a or t1.c<=c then a else -c end*t1.b) when (f) then 11 else 19 end-t1.a<=b),c) and 13)))} +} {} +do_test randexpr-2.450 { + db eval {SELECT 13-coalesce((select t1.a*a from t1 where not exists(select 1 from t1 where 17 not between case +coalesce((select -case 19 when a*b then 19 else f end from t1 where (t1.d-t1.e between case f when (select abs(case abs( -min(13)) when min(t1.b) then -(count(distinct f)) else min(t1.b) end) from t1) then 17*( -13) & f else t1.e end and -t1.a)),d)*c when 13 then 11 else t1.a end and c)),t1.a)*19 FROM t1 WHERE ((not f-c<=(abs(a)/abs(+t1.a)) or t1.e not between coalesce((select 11 from t1 where - -a*t1.d*case coalesce((select max(d) from t1 where ((t1.b<=t1.c))),case when f>t1.d or t1.d=t1.a or t1.c<=c then a else -c end*t1.b) when (f) then 11 else 19 end-t1.a<=b),c) and 13))} +} {-1887} +do_test randexpr-2.451 { + db eval {SELECT ~case when (t1.d | 17 in (b*t1.e,19*case when t1.d<= -a then t1.d*~t1.c*e*t1.c-19 else ~t1.b+17 end-d,(t1.c))) then +t1.d else (abs( -case t1.e when t1.c then (t1.b) else t1.f end-11+ -a)/abs(f))+a end FROM t1 WHERE not exists(select 1 from t1 where not c in (select case count(distinct (f)-19) when ~( -+~cast(avg(c) AS integer) | count(distinct 11)-max(d)+min(t1.a)+ -( -count(distinct b))+(count(*)))-count(*)+cast(avg(c) AS integer) then count(distinct d) else ((max(t1.b))) end-min(b) from t1 union select min(t1.c) from t1)) and t1.c in (select +d from t1 union select case when c+b=17 then a else 13 end+11 from t1)} +} {} +do_test randexpr-2.452 { + db eval {SELECT ~case when (t1.d | 17 in (b*t1.e,19*case when t1.d<= -a then t1.d*~t1.c*e*t1.c-19 else ~t1.b+17 end-d,(t1.c))) then +t1.d else (abs( -case t1.e when t1.c then (t1.b) else t1.f end-11+ -a)/abs(f))+a end FROM t1 WHERE NOT (not exists(select 1 from t1 where not c in (select case count(distinct (f)-19) when ~( -+~cast(avg(c) AS integer) | count(distinct 11)-max(d)+min(t1.a)+ -( -count(distinct b))+(count(*)))-count(*)+cast(avg(c) AS integer) then count(distinct d) else ((max(t1.b))) end-min(b) from t1 union select min(t1.c) from t1)) and t1.c in (select +d from t1 union select case when c+b=17 then a else 13 end+11 from t1))} +} {-102} +do_test randexpr-2.453 { + db eval {SELECT ~case when (t1.d & 17 in (b*t1.e,19*case when t1.d<= -a then t1.d*~t1.c*e*t1.c-19 else ~t1.b+17 end-d,(t1.c))) then +t1.d else (abs( -case t1.e when t1.c then (t1.b) else t1.f end-11+ -a)/abs(f))+a end FROM t1 WHERE NOT (not exists(select 1 from t1 where not c in (select case count(distinct (f)-19) when ~( -+~cast(avg(c) AS integer) | count(distinct 11)-max(d)+min(t1.a)+ -( -count(distinct b))+(count(*)))-count(*)+cast(avg(c) AS integer) then count(distinct d) else ((max(t1.b))) end-min(b) from t1 union select min(t1.c) from t1)) and t1.c in (select +d from t1 union select case when c+b=17 then a else 13 end+11 from t1))} +} {-102} +do_test randexpr-2.454 { + db eval {SELECT (select case case min(d) when abs(cast(avg((abs(a)/abs(coalesce((select (select abs(cast(avg(t1.f) AS integer)) from t1) from t1 where (case when t1.e in (select 11 from t1 union select 17 from t1) then -t1.e- -t1.e else a end<>a) and not not exists(select 1 from t1 where t1.b in (select ~+max( -t1.b) | max(t1.c) from t1 union select count(distinct -t1.a) from t1))),t1.a)))) AS integer)) then (count(*) | ((count(distinct b)))-count(*)) else min(11) end when max(a) then count(distinct f) else (min(17)) end from t1) FROM t1 WHERE coalesce((select max(13) from t1 where t1.b not in ( -case when 13 not between 17 and b then f when t1.e+~+e between coalesce((select max(case 19 when e then 17 else coalesce((select coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.b not between t1.a and a)),e)*b*t1.f from t1 where t1.f>=t1.c),c) end) from t1 where e>b),t1.f) | f and f then c else 11 end,(c),t1.e)),19)-t1.c in (select t1.a from t1 union select e from t1)} +} {} +do_test randexpr-2.455 { + db eval {SELECT (select case case min(d) when abs(cast(avg((abs(a)/abs(coalesce((select (select abs(cast(avg(t1.f) AS integer)) from t1) from t1 where (case when t1.e in (select 11 from t1 union select 17 from t1) then -t1.e- -t1.e else a end<>a) and not not exists(select 1 from t1 where t1.b in (select ~+max( -t1.b) | max(t1.c) from t1 union select count(distinct -t1.a) from t1))),t1.a)))) AS integer)) then (count(*) | ((count(distinct b)))-count(*)) else min(11) end when max(a) then count(distinct f) else (min(17)) end from t1) FROM t1 WHERE NOT (coalesce((select max(13) from t1 where t1.b not in ( -case when 13 not between 17 and b then f when t1.e+~+e between coalesce((select max(case 19 when e then 17 else coalesce((select coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.b not between t1.a and a)),e)*b*t1.f from t1 where t1.f>=t1.c),c) end) from t1 where e>b),t1.f) | f and f then c else 11 end,(c),t1.e)),19)-t1.c in (select t1.a from t1 union select e from t1))} +} {17} +do_test randexpr-2.456 { + db eval {SELECT (select case case min(d) when abs(cast(avg((abs(a)/abs(coalesce((select (select abs(cast(avg(t1.f) AS integer)) from t1) from t1 where (case when t1.e in (select 11 from t1 union select 17 from t1) then -t1.e- -t1.e else a end<>a) and not not exists(select 1 from t1 where t1.b in (select ~+max( -t1.b) & max(t1.c) from t1 union select count(distinct -t1.a) from t1))),t1.a)))) AS integer)) then (count(*) & ((count(distinct b)))-count(*)) else min(11) end when max(a) then count(distinct f) else (min(17)) end from t1) FROM t1 WHERE NOT (coalesce((select max(13) from t1 where t1.b not in ( -case when 13 not between 17 and b then f when t1.e+~+e between coalesce((select max(case 19 when e then 17 else coalesce((select coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.b not between t1.a and a)),e)*b*t1.f from t1 where t1.f>=t1.c),c) end) from t1 where e>b),t1.f) | f and f then c else 11 end,(c),t1.e)),19)-t1.c in (select t1.a from t1 union select e from t1))} +} {17} +do_test randexpr-2.457 { + db eval {SELECT (a*f+c | ~b*(select cast(avg(19) AS integer) from t1)+17*(select max(a*b-d) from t1)+(abs((select count(distinct b) from t1))/abs(case when -13*c between t1.f and t1.c then c when not 17 between 17 and 11 and 17<=((t1.c)) then c else a end*t1.c))-a*a)-c FROM t1 WHERE exists(select 1 from t1 where t1.e-11+t1.d*e+(abs(coalesce((select max(t1.b) from t1 where 19-13- -coalesce((select c*t1.e+t1.f*f from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where (17<=d or -11 in (select cast(avg(c) AS integer) from t1 union select +count(*) from t1))))),t1.b)=t1.c or t1.b>t1.c or 19 not between c and t1.e then 17 else t1.b end) from t1 where (e<=t1.b)),11) FROM t1 WHERE t1.a in (f,t1.d*+a,t1.c* -(select (abs(case +(max(coalesce((select max((select count(*)-cast(avg(b) AS integer) from t1)) from t1 where (c=t1.c or t1.b>t1.c or 19 not between c and t1.e then 17 else t1.b end) from t1 where (e<=t1.b)),11) FROM t1 WHERE NOT (t1.a in (f,t1.d*+a,t1.c* -(select (abs(case +(max(coalesce((select max((select count(*)-cast(avg(b) AS integer) from t1)) from t1 where (ce then case t1.d when 13 then t1.c else 19 end-13 else t1.e+coalesce((select max(19) from t1 where case when e in (+b,coalesce((select a+(11) from t1 where b not between c and d or t1.b not in (a,11,17)),b),11) then 19 when (c) not in (t1.c,t1.b,d) then t1.c else t1.d end*f not between t1.a and d),13) end when c then 11 else c end) FROM t1 WHERE 19 not between coalesce((select max(d*t1.c) from t1 where (select cast(avg((b)) AS integer) from t1) in (select ( - -(abs(case max(b) when cast(avg( -13-11) AS integer) then count(distinct a-e) else ~ -cast(avg((c)) AS integer)-min(17)+count(*) end) | (min(e)))-min((19))) from t1 union select count(distinct 13) from t1)),case when -e | 19 between -11 and f then 19 else 19 end) and (f)} +} {} +do_test randexpr-2.463 { + db eval {SELECT (case case when +c>e then case t1.d when 13 then t1.c else 19 end-13 else t1.e+coalesce((select max(19) from t1 where case when e in (+b,coalesce((select a+(11) from t1 where b not between c and d or t1.b not in (a,11,17)),b),11) then 19 when (c) not in (t1.c,t1.b,d) then t1.c else t1.d end*f not between t1.a and d),13) end when c then 11 else c end) FROM t1 WHERE NOT (19 not between coalesce((select max(d*t1.c) from t1 where (select cast(avg((b)) AS integer) from t1) in (select ( - -(abs(case max(b) when cast(avg( -13-11) AS integer) then count(distinct a-e) else ~ -cast(avg((c)) AS integer)-min(17)+count(*) end) | (min(e)))-min((19))) from t1 union select count(distinct 13) from t1)),case when -e | 19 between -11 and f then 19 else 19 end) and (f))} +} {300} +do_test randexpr-2.464 { + db eval {SELECT (abs((select abs(case -max(e)+max(t1.f)+(count(*)* -count(distinct case e when 19 then c else 13 end))*~min(t1.a) when cast(avg(t1.a) AS integer) then -min(e) else -count(*) end) from t1)*coalesce((select (t1.a) from t1 where case when c+t1.b-b | d in (select max(d) from t1 union select -max(19) from t1) then t1.b else f end=t1.d),t1.b)*(t1.d) | e+t1.a)/abs(t1.d)) FROM t1 WHERE t1.f in (19,d*t1.d,(select min((select count(*)*min((17)+(abs(11)/abs(+~coalesce((select max(f) from t1 where 13 not between t1.c and t1.a),t1.e)-11))) from t1)) from t1)+17)} +} {} +do_test randexpr-2.465 { + db eval {SELECT (abs((select abs(case -max(e)+max(t1.f)+(count(*)* -count(distinct case e when 19 then c else 13 end))*~min(t1.a) when cast(avg(t1.a) AS integer) then -min(e) else -count(*) end) from t1)*coalesce((select (t1.a) from t1 where case when c+t1.b-b | d in (select max(d) from t1 union select -max(19) from t1) then t1.b else f end=t1.d),t1.b)*(t1.d) | e+t1.a)/abs(t1.d)) FROM t1 WHERE NOT (t1.f in (19,d*t1.d,(select min((select count(*)*min((17)+(abs(11)/abs(+~coalesce((select max(f) from t1 where 13 not between t1.c and t1.a),t1.e)-11))) from t1)) from t1)+17))} +} {201} +do_test randexpr-2.466 { + db eval {SELECT (abs((select abs(case -max(e)+max(t1.f)+(count(*)* -count(distinct case e when 19 then c else 13 end))*~min(t1.a) when cast(avg(t1.a) AS integer) then -min(e) else -count(*) end) from t1)*coalesce((select (t1.a) from t1 where case when c+t1.b-b & d in (select max(d) from t1 union select -max(19) from t1) then t1.b else f end=t1.d),t1.b)*(t1.d) & e+t1.a)/abs(t1.d)) FROM t1 WHERE NOT (t1.f in (19,d*t1.d,(select min((select count(*)*min((17)+(abs(11)/abs(+~coalesce((select max(f) from t1 where 13 not between t1.c and t1.a),t1.e)-11))) from t1)) from t1)+17))} +} {0} +do_test randexpr-2.467 { + db eval {SELECT d+case when (coalesce((select max(t1.a) from t1 where t1.a not in (t1.d,f*(select count(distinct t1.d) from t1)+t1.f,c)),b)-c-17-t1.c)-case when 13*t1.a-19*e<>11 then f when t1.f not between 11 and t1.d then b else f end=t1.d then t1.f when not e>t1.f then 17 else b end FROM t1 WHERE exists(select 1 from t1 where 11* -d>=e) or t1.c<+ -e*11 or (17 not between t1.d and case t1.a when t1.a then t1.d+f-b else 11 end | t1.e)} +} {417} +do_test randexpr-2.468 { + db eval {SELECT d+case when (coalesce((select max(t1.a) from t1 where t1.a not in (t1.d,f*(select count(distinct t1.d) from t1)+t1.f,c)),b)-c-17-t1.c)-case when 13*t1.a-19*e<>11 then f when t1.f not between 11 and t1.d then b else f end=t1.d then t1.f when not e>t1.f then 17 else b end FROM t1 WHERE NOT (exists(select 1 from t1 where 11* -d>=e) or t1.c<+ -e*11 or (17 not between t1.d and case t1.a when t1.a then t1.d+f-b else 11 end | t1.e))} +} {} +do_test randexpr-2.469 { + db eval {SELECT t1.f-coalesce((select max(+e | b) from t1 where case when -(abs(11)/abs(~(19 | t1.d)))-~c*e not in (19,b*t1.b,b) then 11 when f<=13 then d else t1.d end in (select (count(*))-cast(avg(11 | 17*t1.b) AS integer) from t1 union select count(distinct a-17) from t1)),t1.c)-c FROM t1 WHERE case when (t1.f*a<=11+d+coalesce((select max(19+19) from t1 where 13 in (select min(f)+cast(avg((abs(t1.e)/abs((select count(distinct c) from t1)))) AS integer)+(~cast(avg(e) AS integer))-min(c) from t1 union select count(*) from t1)),b)) then (a) else t1.b end-t1.b<=t1.b and 13 in (select t1.e from t1 union select t1.e from t1) and a=t1.d or (exists(select 1 from t1 where (t1.e=t1.c)))),t1.c)-11 FROM t1 WHERE (not exists(select 1 from t1 where 11+t1.c-13>=t1.c or not exists(select 1 from t1 where a in (select b from t1 union select 19-d from t1) and ((abs(a+(abs(coalesce((select t1.d from t1 where exists(select 1 from t1 where case when t1.e>= -13 or -e<>f then d when t1.c=13 then (b) else t1.c end not between 13 and b)),d))/abs(t1.d))-b)/abs(t1.a))<> -13 and exists(select 1 from t1 where 19<>e) or 13>=19))))} +} {} +do_test randexpr-2.473 { + db eval {SELECT coalesce((select max(t1.c) from t1 where case when (t1.e*19*f-t1.b in (select cast(avg(d-~d) AS integer) from t1 union select +( -case +~count(*)*count(distinct (c)) when cast(avg(19) AS integer) then min(c) else count(distinct 17) end*cast(avg((13)) AS integer)-max(11)+(count(distinct b))) from t1)) then +11 else t1.b end>=t1.d or (exists(select 1 from t1 where (t1.e=t1.c)))),t1.c)-11 FROM t1 WHERE NOT ((not exists(select 1 from t1 where 11+t1.c-13>=t1.c or not exists(select 1 from t1 where a in (select b from t1 union select 19-d from t1) and ((abs(a+(abs(coalesce((select t1.d from t1 where exists(select 1 from t1 where case when t1.e>= -13 or -e<>f then d when t1.c=13 then (b) else t1.c end not between 13 and b)),d))/abs(t1.d))-b)/abs(t1.a))<> -13 and exists(select 1 from t1 where 19<>e) or 13>=19)))))} +} {289} +do_test randexpr-2.474 { + db eval {SELECT +coalesce((select max(11) from t1 where not case d+t1.c*coalesce((select max(coalesce((select d | e from t1 where 17 not between coalesce((select (t1.d) from t1 where t1.a between (select max(t1.b+d+t1.a) from t1)-t1.e and 17),t1.a) and 11),19)) from t1 where t1.dt1.d*a) then (abs(~a)/abs(+11))*f when (c-(coalesce((select 11 from t1 where t1.d in (case when (19)+11>=c then 19 when e<>b then 13 else c end,a,13)),17))+13) not between 11 and t1.c then (t1.b) else 17 end+b FROM t1 WHERE a*b*11 in (select case when not coalesce((select max(~13 | (select abs((count(distinct t1.f))*cast(avg(t1.f) AS integer)+(max(t1.e))) from t1)) from t1 where coalesce((select max(11) from t1 where t1.b=19),b) in (f,11,19) and t1.f between a and t1.c or 19=t1.a and d=17 or c>=t1.f or t1.d<>t1.c),11)*c in (select f from t1 union select -t1.b from t1) then t1.c when c not between f and 13 then e else -e end from t1 union select e from t1)} +} {} +do_test randexpr-2.478 { + db eval {SELECT case when ((select -min(e)+cast(avg(t1.a) AS integer) from t1)>t1.d*a) then (abs(~a)/abs(+11))*f when (c-(coalesce((select 11 from t1 where t1.d in (case when (19)+11>=c then 19 when e<>b then 13 else c end,a,13)),17))+13) not between 11 and t1.c then (t1.b) else 17 end+b FROM t1 WHERE NOT (a*b*11 in (select case when not coalesce((select max(~13 | (select abs((count(distinct t1.f))*cast(avg(t1.f) AS integer)+(max(t1.e))) from t1)) from t1 where coalesce((select max(11) from t1 where t1.b=19),b) in (f,11,19) and t1.f between a and t1.c or 19=t1.a and d=17 or c>=t1.f or t1.d<>t1.c),11)*c in (select f from t1 union select -t1.b from t1) then t1.c when c not between f and 13 then e else -e end from t1 union select e from t1))} +} {217} +do_test randexpr-2.479 { + db eval {SELECT t1.d-coalesce((select -t1.c from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where t1.d<=case t1.d+e when case when d>19 then 11+t1.c else t1.b end-t1.f-t1.e then (abs(t1.d-19*t1.b)/abs(13+case when t1.f in (select count(*)-count(distinct t1.b) from t1 union select (max((t1.f))) from t1) then (((t1.d))) when t1.b not in (t1.d,13,a) then t1.d else 19 end*t1.c))-t1.d else e end))),13)+c*t1.a FROM t1 WHERE exists(select 1 from t1 where ~coalesce((select a+t1.a from t1 where ~(abs(c)/abs(17 | +13))-t1.f in (select f from t1 union select case when t1.b between 11*e and f then e when a in (e* -t1.f-f,coalesce((select 17 from t1 where t1.e in (select (cast(avg(e) AS integer)) from t1 union select max(b) from t1)),a),d) then t1.c else (f) end from t1)),f) between -19 and (13) or t1.f<>t1.a)} +} {30700} +do_test randexpr-2.480 { + db eval {SELECT t1.d-coalesce((select -t1.c from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where t1.d<=case t1.d+e when case when d>19 then 11+t1.c else t1.b end-t1.f-t1.e then (abs(t1.d-19*t1.b)/abs(13+case when t1.f in (select count(*)-count(distinct t1.b) from t1 union select (max((t1.f))) from t1) then (((t1.d))) when t1.b not in (t1.d,13,a) then t1.d else 19 end*t1.c))-t1.d else e end))),13)+c*t1.a FROM t1 WHERE NOT (exists(select 1 from t1 where ~coalesce((select a+t1.a from t1 where ~(abs(c)/abs(17 | +13))-t1.f in (select f from t1 union select case when t1.b between 11*e and f then e when a in (e* -t1.f-f,coalesce((select 17 from t1 where t1.e in (select (cast(avg(e) AS integer)) from t1 union select max(b) from t1)),a),d) then t1.c else (f) end from t1)),f) between -19 and (13) or t1.f<>t1.a))} +} {} +do_test randexpr-2.481 { + db eval {SELECT (abs((abs(t1.b*t1.b*t1.f+a)/abs(t1.f))*t1.a-case -(abs(coalesce((select (case when 11*19 in (select count(*) from t1 union select -min(13)-(max(t1.a)) from t1) then 19 else -17 end) from t1 where t1.e between 13 and (a)),t1.b)+d)/abs(t1.f))-t1.b-t1.c | d when -t1.f then c else 17 end)/abs(13))-b*t1.d FROM t1 WHERE 13 in (t1.b,17*e+c*a,coalesce((select max(d) from t1 where +f+b not in (a,t1.a*~t1.c | t1.b-case when t1.a<=case when t1.f in (b,11,e) then d else t1.e end+f then 19 else (f) end-b*13- -t1.f-17,t1.c)),17))} +} {} +do_test randexpr-2.482 { + db eval {SELECT (abs((abs(t1.b*t1.b*t1.f+a)/abs(t1.f))*t1.a-case -(abs(coalesce((select (case when 11*19 in (select count(*) from t1 union select -min(13)-(max(t1.a)) from t1) then 19 else -17 end) from t1 where t1.e between 13 and (a)),t1.b)+d)/abs(t1.f))-t1.b-t1.c | d when -t1.f then c else 17 end)/abs(13))-b*t1.d FROM t1 WHERE NOT (13 in (t1.b,17*e+c*a,coalesce((select max(d) from t1 where +f+b not in (a,t1.a*~t1.c | t1.b-case when t1.a<=case when t1.f in (b,11,e) then d else t1.e end+f then 19 else (f) end-b*13- -t1.f-17,t1.c)),17)))} +} {227691} +do_test randexpr-2.483 { + db eval {SELECT (abs((abs(t1.b*t1.b*t1.f+a)/abs(t1.f))*t1.a-case -(abs(coalesce((select (case when 11*19 in (select count(*) from t1 union select -min(13)-(max(t1.a)) from t1) then 19 else -17 end) from t1 where t1.e between 13 and (a)),t1.b)+d)/abs(t1.f))-t1.b-t1.c & d when -t1.f then c else 17 end)/abs(13))-b*t1.d FROM t1 WHERE NOT (13 in (t1.b,17*e+c*a,coalesce((select max(d) from t1 where +f+b not in (a,t1.a*~t1.c | t1.b-case when t1.a<=case when t1.f in (b,11,e) then d else t1.e end+f then 19 else (f) end-b*13- -t1.f-17,t1.c)),17)))} +} {227691} +do_test randexpr-2.484 { + db eval {SELECT coalesce((select t1.c*f from t1 where t1.f-+coalesce((select max(t1.d) from t1 where - -t1.e in (select f from t1 union select t1.e from t1)),a)+case when 19 not in (t1.e,case when ~t1.a<>e+a then -t1.d else t1.e end,f) then t1.b when -(t1.e) in (e,(19),t1.c) then t1.f else e end+17*t1.c+d>b),t1.c)+a FROM t1 WHERE t1.b>=~b} +} {180100} +do_test randexpr-2.485 { + db eval {SELECT coalesce((select t1.c*f from t1 where t1.f-+coalesce((select max(t1.d) from t1 where - -t1.e in (select f from t1 union select t1.e from t1)),a)+case when 19 not in (t1.e,case when ~t1.a<>e+a then -t1.d else t1.e end,f) then t1.b when -(t1.e) in (e,(19),t1.c) then t1.f else e end+17*t1.c+d>b),t1.c)+a FROM t1 WHERE NOT (t1.b>=~b)} +} {} +do_test randexpr-2.486 { + db eval {SELECT case case when case when 13 between coalesce((select max(t1.f) from t1 where b+f in (b+(coalesce((select t1.f from t1 where (t1.c)>=17 or 19 not in (11,t1.e,c)),13)),b,t1.e) and c<=e and not t1.b<>13 and 19 not between t1.c and a or c between f and f),t1.c) and b then t1.b when 13<>t1.a then 11 else d end in (select 13 from t1 union select 19 from t1) then t1.c when t1.f not in (17,b,b) then t1.f else e end when t1.e then t1.e else -d end FROM t1 WHERE case when not exists(select 1 from t1 where t1.f not between coalesce((select coalesce((select max(+case t1.a when f then t1.a else t1.e end+case when t1.f>t1.d or 11 in (t1.e,11,b) then b else a end) from t1 where t1.f in (select a from t1 union select d from t1)),t1.d) from t1 where t1.a=b),(t1.c)) and d) then (select case max(t1.a) when ~min((11)) | min(e) then max(t1.e) else abs(max(t1.c)*count(distinct 19)) end from t1) else t1.b end<>b} +} {} +do_test randexpr-2.487 { + db eval {SELECT case case when case when 13 between coalesce((select max(t1.f) from t1 where b+f in (b+(coalesce((select t1.f from t1 where (t1.c)>=17 or 19 not in (11,t1.e,c)),13)),b,t1.e) and c<=e and not t1.b<>13 and 19 not between t1.c and a or c between f and f),t1.c) and b then t1.b when 13<>t1.a then 11 else d end in (select 13 from t1 union select 19 from t1) then t1.c when t1.f not in (17,b,b) then t1.f else e end when t1.e then t1.e else -d end FROM t1 WHERE NOT (case when not exists(select 1 from t1 where t1.f not between coalesce((select coalesce((select max(+case t1.a when f then t1.a else t1.e end+case when t1.f>t1.d or 11 in (t1.e,11,b) then b else a end) from t1 where t1.f in (select a from t1 union select d from t1)),t1.d) from t1 where t1.a=b),(t1.c)) and d) then (select case max(t1.a) when ~min((11)) | min(e) then max(t1.e) else abs(max(t1.c)*count(distinct 19)) end from t1) else t1.b end<>b)} +} {-400} +do_test randexpr-2.488 { + db eval {SELECT case -coalesce((select max(case when case when t1.e in (11*t1.e*17,(select (min(b) | - -+cast(avg(17) AS integer)*max((t1.c))+cast(avg(t1.a) AS integer))+(( -(min(t1.c))))*count(*) from t1), -~b*t1.d+t1.d) then d else t1.a end<(t1.c) then t1.f else e end) from t1 where 13<>c),e)-(t1.b) when t1.a then 17 else t1.f end FROM t1 WHERE coalesce((select max(t1.e) from t1 where (not exists(select 1 from t1 where b<+c))),case when b | ~case t1.a when -t1.b then -t1.f else t1.f end*f+f in (select t1.b from t1 union select t1.c from t1) then -(t1.f) when a<>11 then (d) else 11 end) in (select abs(~(abs(case (min(t1.b)*abs(count(distinct a)+count(*) | -count(distinct a)))- -max(19) when max(t1.e) then min(d) else (max(a)) end))) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.489 { + db eval {SELECT case -coalesce((select max(case when case when t1.e in (11*t1.e*17,(select (min(b) | - -+cast(avg(17) AS integer)*max((t1.c))+cast(avg(t1.a) AS integer))+(( -(min(t1.c))))*count(*) from t1), -~b*t1.d+t1.d) then d else t1.a end<(t1.c) then t1.f else e end) from t1 where 13<>c),e)-(t1.b) when t1.a then 17 else t1.f end FROM t1 WHERE NOT (coalesce((select max(t1.e) from t1 where (not exists(select 1 from t1 where b<+c))),case when b | ~case t1.a when -t1.b then -t1.f else t1.f end*f+f in (select t1.b from t1 union select t1.c from t1) then -(t1.f) when a<>11 then (d) else 11 end) in (select abs(~(abs(case (min(t1.b)*abs(count(distinct a)+count(*) | -count(distinct a)))- -max(19) when max(t1.e) then min(d) else (max(a)) end))) from t1 union select count(*) from t1))} +} {600} +do_test randexpr-2.490 { + db eval {SELECT case -coalesce((select max(case when case when t1.e in (11*t1.e*17,(select (min(b) & - -+cast(avg(17) AS integer)*max((t1.c))+cast(avg(t1.a) AS integer))+(( -(min(t1.c))))*count(*) from t1), -~b*t1.d+t1.d) then d else t1.a end<(t1.c) then t1.f else e end) from t1 where 13<>c),e)-(t1.b) when t1.a then 17 else t1.f end FROM t1 WHERE NOT (coalesce((select max(t1.e) from t1 where (not exists(select 1 from t1 where b<+c))),case when b | ~case t1.a when -t1.b then -t1.f else t1.f end*f+f in (select t1.b from t1 union select t1.c from t1) then -(t1.f) when a<>11 then (d) else 11 end) in (select abs(~(abs(case (min(t1.b)*abs(count(distinct a)+count(*) | -count(distinct a)))- -max(19) when max(t1.e) then min(d) else (max(a)) end))) from t1 union select count(*) from t1))} +} {600} +do_test randexpr-2.491 { + db eval {SELECT +case when case when ~t1.b<=case when ~17*19 | 11*11*f between t1.c and b then t1.a when e in (t1.a,t1.e,e) then t1.c else 19 end-t1.c-17+a then (t1.d) when 13 not between 13 and 19 then c else c end=(f) or d -coalesce((select t1.b from t1 where c*13+c<>t1.f),a)),t1.e)+t1.f=t1.f then - - -f else b end then -11 else -d end from t1 where ( -t1.c -coalesce((select t1.b from t1 where c*13+c<>t1.f),a)),t1.e)+t1.f=t1.f then - - -f else b end then -11 else -d end from t1 where ( -t1.c -coalesce((select t1.b from t1 where c*13+c<>t1.f),a)),t1.e)+t1.f=t1.f then - - -f else b end then -11 else -d end from t1 where ( -t1.ccase when b between ~11 and (abs(case when (coalesce((select max(+13) from t1 where not not exists(select 1 from t1 where t1.b+e in (b,t1.b,t1.d)) and 13 not in (d,f,17)),t1.a) not in (t1.f,17,17)) then coalesce((select max(19) from t1 where (17) in (t1.a,17,a)),t1.b) when b in (13,t1.a,b) then t1.c else d end)/abs(11)) then t1.f when t1.d>19 then d else d end)} +} {39800} +do_test randexpr-2.495 { + db eval {SELECT case when case when 19 in (select abs(abs(count(distinct f))*~~~count(*)-abs(+abs(count(*))* -cast(avg(17) AS integer) | cast(avg(t1.d) AS integer))+count(distinct -t1.a)+count(*)) from t1 union select -cast(avg(19) AS integer) from t1) then b* -b-c else t1.e*c end=17 then d else coalesce((select -11 from t1 where ecase when b between ~11 and (abs(case when (coalesce((select max(+13) from t1 where not not exists(select 1 from t1 where t1.b+e in (b,t1.b,t1.d)) and 13 not in (d,f,17)),t1.a) not in (t1.f,17,17)) then coalesce((select max(19) from t1 where (17) in (t1.a,17,a)),t1.b) when b in (13,t1.a,b) then t1.c else d end)/abs(11)) then t1.f when t1.d>19 then d else d end))} +} {} +do_test randexpr-2.496 { + db eval {SELECT case when case when 19 in (select abs(abs(count(distinct f))*~~~count(*)-abs(+abs(count(*))* -cast(avg(17) AS integer) & cast(avg(t1.d) AS integer))+count(distinct -t1.a)+count(*)) from t1 union select -cast(avg(19) AS integer) from t1) then b* -b-c else t1.e*c end=17 then d else coalesce((select -11 from t1 where ecase when b between ~11 and (abs(case when (coalesce((select max(+13) from t1 where not not exists(select 1 from t1 where t1.b+e in (b,t1.b,t1.d)) and 13 not in (d,f,17)),t1.a) not in (t1.f,17,17)) then coalesce((select max(19) from t1 where (17) in (t1.a,17,a)),t1.b) when b in (13,t1.a,b) then t1.c else d end)/abs(11)) then t1.f when t1.d>19 then d else d end)} +} {39800} +do_test randexpr-2.497 { + db eval {SELECT t1.a-coalesce((select t1.e from t1 where not exists(select 1 from t1 where a in (select min(d-11) | cast(avg((select -case count(*) when count(*) then +abs(+case ~min((abs(13- -a)/abs(t1.c))) when ~~count(*)+min((13)) then max(t1.e) else cast(avg(t1.a) AS integer) end | max(c))+count(*) else count(*) end*max(t1.d) from t1)) AS integer)+max(t1.c)*count(distinct f) from t1 union select -count(*) from t1))),~17) FROM t1 WHERE case 11 when coalesce((select coalesce((select 19 from t1 where t1.b not between t1.e and e and not not exists(select 1 from t1 where (e in ((abs(c)/abs(17)),(d) | e,(select count(distinct t1.b) from t1)-b)))),case d when a then f else t1.c end) | (t1.d) | b from t1 where t1.c not in (t1.a,19,11)),t1.b)-d then t1.e else t1.d end*t1.d<>f} +} {-400} +do_test randexpr-2.498 { + db eval {SELECT t1.a-coalesce((select t1.e from t1 where not exists(select 1 from t1 where a in (select min(d-11) | cast(avg((select -case count(*) when count(*) then +abs(+case ~min((abs(13- -a)/abs(t1.c))) when ~~count(*)+min((13)) then max(t1.e) else cast(avg(t1.a) AS integer) end | max(c))+count(*) else count(*) end*max(t1.d) from t1)) AS integer)+max(t1.c)*count(distinct f) from t1 union select -count(*) from t1))),~17) FROM t1 WHERE NOT (case 11 when coalesce((select coalesce((select 19 from t1 where t1.b not between t1.e and e and not not exists(select 1 from t1 where (e in ((abs(c)/abs(17)),(d) | e,(select count(distinct t1.b) from t1)-b)))),case d when a then f else t1.c end) | (t1.d) | b from t1 where t1.c not in (t1.a,19,11)),t1.b)-d then t1.e else t1.d end*t1.d<>f)} +} {} +do_test randexpr-2.499 { + db eval {SELECT t1.a-coalesce((select t1.e from t1 where not exists(select 1 from t1 where a in (select min(d-11) & cast(avg((select -case count(*) when count(*) then +abs(+case ~min((abs(13- -a)/abs(t1.c))) when ~~count(*)+min((13)) then max(t1.e) else cast(avg(t1.a) AS integer) end & max(c))+count(*) else count(*) end*max(t1.d) from t1)) AS integer)+max(t1.c)*count(distinct f) from t1 union select -count(*) from t1))),~17) FROM t1 WHERE case 11 when coalesce((select coalesce((select 19 from t1 where t1.b not between t1.e and e and not not exists(select 1 from t1 where (e in ((abs(c)/abs(17)),(d) | e,(select count(distinct t1.b) from t1)-b)))),case d when a then f else t1.c end) | (t1.d) | b from t1 where t1.c not in (t1.a,19,11)),t1.b)-d then t1.e else t1.d end*t1.d<>f} +} {-400} +do_test randexpr-2.500 { + db eval {SELECT case b*coalesce((select coalesce((select max(coalesce((select t1.b from t1 where t1.a<>11),17+~19)) from t1 where (t1.e<>e)),case 13 when 17 then 13 else c-coalesce((select max(case when (select min(d) from t1) in (select max(13) from t1 union select min(t1.f) from t1) then 13 else t1.a end*c) from t1 where t1.b<(t1.b)),f) end) from t1 where 19<=t1.e),(t1.b)) when t1.b then e else t1.d end+t1.c FROM t1 WHERE b+coalesce((select max((d)) from t1 where e=b),t1.c)*((select -(min(coalesce((select 19+(abs((abs((t1.a)*case f when c then t1.b else t1.d end-e)/abs((t1.f))))/abs( -t1.e))+b from t1 where 13 not between d and t1.c),t1.f))) from t1))*case e when 19 then t1.d else t1.b end+t1.e not in (t1.d,t1.a,17)} +} {700} +do_test randexpr-2.501 { + db eval {SELECT case b*coalesce((select coalesce((select max(coalesce((select t1.b from t1 where t1.a<>11),17+~19)) from t1 where (t1.e<>e)),case 13 when 17 then 13 else c-coalesce((select max(case when (select min(d) from t1) in (select max(13) from t1 union select min(t1.f) from t1) then 13 else t1.a end*c) from t1 where t1.b<(t1.b)),f) end) from t1 where 19<=t1.e),(t1.b)) when t1.b then e else t1.d end+t1.c FROM t1 WHERE NOT (b+coalesce((select max((d)) from t1 where e=b),t1.c)*((select -(min(coalesce((select 19+(abs((abs((t1.a)*case f when c then t1.b else t1.d end-e)/abs((t1.f))))/abs( -t1.e))+b from t1 where 13 not between d and t1.c),t1.f))) from t1))*case e when 19 then t1.d else t1.b end+t1.e not in (t1.d,t1.a,17))} +} {} +do_test randexpr-2.502 { + db eval {SELECT case when case when exists(select 1 from t1 where 17 in (select b from t1 union select 13 from t1)) and 19+a in (select count(distinct -(select min(t1.a-f) from t1)) from t1 union select min(case when 11 between coalesce((select max(t1.b) from t1 where case when t1.d<>c or t1.b>=t1.a then ( -11) else t1.d end>=t1.a),t1.c) and -17 then -t1.f else e end) from t1) then a else 19 end+t1.e in (select e from t1 union select -t1.b from t1) then t1.b when not (f) between f and t1.c then t1.e else e end FROM t1 WHERE 17>coalesce((select t1.b from t1 where t1.a+case when b in (19,t1.c,case t1.e when d-(b) then t1.f else 13 end) then 13 when d in (select count(distinct t1.a) from t1 union select case ~max(f) when cast(avg(b) AS integer) then max(d) else min(t1.f) end*min(e) from t1) or (t1.c=t1.e} +} {} +do_test randexpr-2.503 { + db eval {SELECT case when case when exists(select 1 from t1 where 17 in (select b from t1 union select 13 from t1)) and 19+a in (select count(distinct -(select min(t1.a-f) from t1)) from t1 union select min(case when 11 between coalesce((select max(t1.b) from t1 where case when t1.d<>c or t1.b>=t1.a then ( -11) else t1.d end>=t1.a),t1.c) and -17 then -t1.f else e end) from t1) then a else 19 end+t1.e in (select e from t1 union select -t1.b from t1) then t1.b when not (f) between f and t1.c then t1.e else e end FROM t1 WHERE NOT (17>coalesce((select t1.b from t1 where t1.a+case when b in (19,t1.c,case t1.e when d-(b) then t1.f else 13 end) then 13 when d in (select count(distinct t1.a) from t1 union select case ~max(f) when cast(avg(b) AS integer) then max(d) else min(t1.f) end*min(e) from t1) or (t1.c=t1.e)} +} {500} +do_test randexpr-2.504 { + db eval {SELECT c*(select (count(distinct (abs(case when t1.c<>e then e else 19 end)/abs(case when case when +case when a | t1.e+t1.e in (select -count(*) from t1 union select (max(case when 11 not in (t1.f,11,f) then c when c not in (d,t1.f,t1.f) then -t1.a else 13 end)) from t1) then 17 else 13 end>=t1.e then -11 when (t1.f)>c then 11 else 11 end<=11 and not exists(select 1 from t1 where t1.a between t1.e and e) then 19 else (17) end)))) from t1) FROM t1 WHERE ((select -case min(a-13*17+c*coalesce((select 19 from t1 where 19>=t1.d),(t1.b))+t1.e) when +(min(17)) then min(17) else +min(b) end* -cast(avg(19) AS integer)*count(*) from t1) not between a and d) and +c in (c,t1.d,d) or d<>t1.f} +} {300} +do_test randexpr-2.505 { + db eval {SELECT c*(select (count(distinct (abs(case when t1.c<>e then e else 19 end)/abs(case when case when +case when a | t1.e+t1.e in (select -count(*) from t1 union select (max(case when 11 not in (t1.f,11,f) then c when c not in (d,t1.f,t1.f) then -t1.a else 13 end)) from t1) then 17 else 13 end>=t1.e then -11 when (t1.f)>c then 11 else 11 end<=11 and not exists(select 1 from t1 where t1.a between t1.e and e) then 19 else (17) end)))) from t1) FROM t1 WHERE NOT (((select -case min(a-13*17+c*coalesce((select 19 from t1 where 19>=t1.d),(t1.b))+t1.e) when +(min(17)) then min(17) else +min(b) end* -cast(avg(19) AS integer)*count(*) from t1) not between a and d) and +c in (c,t1.d,d) or d<>t1.f)} +} {} +do_test randexpr-2.506 { + db eval {SELECT c*(select (count(distinct (abs(case when t1.c<>e then e else 19 end)/abs(case when case when +case when a & t1.e+t1.e in (select -count(*) from t1 union select (max(case when 11 not in (t1.f,11,f) then c when c not in (d,t1.f,t1.f) then -t1.a else 13 end)) from t1) then 17 else 13 end>=t1.e then -11 when (t1.f)>c then 11 else 11 end<=11 and not exists(select 1 from t1 where t1.a between t1.e and e) then 19 else (17) end)))) from t1) FROM t1 WHERE ((select -case min(a-13*17+c*coalesce((select 19 from t1 where 19>=t1.d),(t1.b))+t1.e) when +(min(17)) then min(17) else +min(b) end* -cast(avg(19) AS integer)*count(*) from t1) not between a and d) and +c in (c,t1.d,d) or d<>t1.f} +} {300} +do_test randexpr-2.507 { + db eval {SELECT a*coalesce((select t1.d+17 from t1 where 13*case a when t1.a then 11 else -t1.a-d-11-coalesce((select max(e-coalesce((select d from t1 where a<=t1.d*(coalesce((select max( -t1.d) from t1 where t1.f not in (11, -t1.d,17) and t1.a between (13) and c and e not in (t1.b,t1.b,b)),b))), -17)) from t1 where t1.b<>a),19)+t1.d+t1.c end<13),t1.e) FROM t1 WHERE t1.e not between f and 19} +} {50000} +do_test randexpr-2.508 { + db eval {SELECT a*coalesce((select t1.d+17 from t1 where 13*case a when t1.a then 11 else -t1.a-d-11-coalesce((select max(e-coalesce((select d from t1 where a<=t1.d*(coalesce((select max( -t1.d) from t1 where t1.f not in (11, -t1.d,17) and t1.a between (13) and c and e not in (t1.b,t1.b,b)),b))), -17)) from t1 where t1.b<>a),19)+t1.d+t1.c end<13),t1.e) FROM t1 WHERE NOT (t1.e not between f and 19)} +} {} +do_test randexpr-2.509 { + db eval {SELECT 17 | case case when exists(select 1 from t1 where (not t1.d between a and f)) then t1.e when not (b=coalesce((select ~t1.c-case when (select (count(*))*(count(distinct t1.f)) from t1)>=11 then b else 19 end from t1 where not not (13<=e) and t1.b=t1.b),13)) then coalesce((select max((abs(11)/abs(t1.b))) from t1 where t1.a not between t1.e and t1.b),(t1.a)) else t1.a end when c then 13 else 19 end+b FROM t1 WHERE ~t1.a+t1.d+c*17 | (select max(t1.d)*abs(case (max(t1.f)+count(distinct t1.b))*(count(distinct f)) when count(distinct e) then -min(t1.e) else min(t1.f) end)+max(b)+count(distinct t1.d) from t1) in (select max(t1.d*(19)) from t1 union select count(distinct case when t1.c+b in (select max(e) from t1 union select cast(avg(f) AS integer) from t1) then (17) when e not in (c,d,19) or 11=19 then (19) else t1.e end+t1.a) from t1)} +} {} +do_test randexpr-2.510 { + db eval {SELECT 17 | case case when exists(select 1 from t1 where (not t1.d between a and f)) then t1.e when not (b=coalesce((select ~t1.c-case when (select (count(*))*(count(distinct t1.f)) from t1)>=11 then b else 19 end from t1 where not not (13<=e) and t1.b=t1.b),13)) then coalesce((select max((abs(11)/abs(t1.b))) from t1 where t1.a not between t1.e and t1.b),(t1.a)) else t1.a end when c then 13 else 19 end+b FROM t1 WHERE NOT (~t1.a+t1.d+c*17 | (select max(t1.d)*abs(case (max(t1.f)+count(distinct t1.b))*(count(distinct f)) when count(distinct e) then -min(t1.e) else min(t1.f) end)+max(b)+count(distinct t1.d) from t1) in (select max(t1.d*(19)) from t1 union select count(distinct case when t1.c+b in (select max(e) from t1 union select cast(avg(f) AS integer) from t1) then (17) when e not in (c,d,19) or 11=19 then (19) else t1.e end+t1.a) from t1))} +} {219} +do_test randexpr-2.511 { + db eval {SELECT 17 & case case when exists(select 1 from t1 where (not t1.d between a and f)) then t1.e when not (b=coalesce((select ~t1.c-case when (select (count(*))*(count(distinct t1.f)) from t1)>=11 then b else 19 end from t1 where not not (13<=e) and t1.b=t1.b),13)) then coalesce((select max((abs(11)/abs(t1.b))) from t1 where t1.a not between t1.e and t1.b),(t1.a)) else t1.a end when c then 13 else 19 end+b FROM t1 WHERE NOT (~t1.a+t1.d+c*17 | (select max(t1.d)*abs(case (max(t1.f)+count(distinct t1.b))*(count(distinct f)) when count(distinct e) then -min(t1.e) else min(t1.f) end)+max(b)+count(distinct t1.d) from t1) in (select max(t1.d*(19)) from t1 union select count(distinct case when t1.c+b in (select max(e) from t1 union select cast(avg(f) AS integer) from t1) then (17) when e not in (c,d,19) or 11=19 then (19) else t1.e end+t1.a) from t1))} +} {17} +do_test randexpr-2.512 { + db eval {SELECT case when coalesce((select max((+t1.a+17+11+case when (abs(19)/abs(t1.a)) in (select ~(case count(*) when min( -c) then count(distinct b) else count(*) end) from t1 union select -min(t1.e) from t1) or 13>=t1.a then case c when 13 then t1.d else e end when -t1.e<=t1.a then t1.c else t1.c end)) from t1 where not t1.c= -t1.f),b)=f and t1.e<=(t1.f) then ~(t1.a) when 17=e then 19 else t1.b end FROM t1 WHERE case when -t1.f in (select (13) from t1 union select coalesce((select max(coalesce((select (~13) from t1 where t1.b>(11)),e*t1.c)) from t1 where (not (abs(t1.c)/abs(a))>=c)),t1.d) from t1) then -t1.e+ -a-coalesce((select t1.d from t1 where not exists(select 1 from t1 where -b=t1.f-f)),t1.c)*17 when 19<=11 then 19 else t1.d end>=b} +} {200} +do_test randexpr-2.513 { + db eval {SELECT case when coalesce((select max((+t1.a+17+11+case when (abs(19)/abs(t1.a)) in (select ~(case count(*) when min( -c) then count(distinct b) else count(*) end) from t1 union select -min(t1.e) from t1) or 13>=t1.a then case c when 13 then t1.d else e end when -t1.e<=t1.a then t1.c else t1.c end)) from t1 where not t1.c= -t1.f),b)=f and t1.e<=(t1.f) then ~(t1.a) when 17=e then 19 else t1.b end FROM t1 WHERE NOT (case when -t1.f in (select (13) from t1 union select coalesce((select max(coalesce((select (~13) from t1 where t1.b>(11)),e*t1.c)) from t1 where (not (abs(t1.c)/abs(a))>=c)),t1.d) from t1) then -t1.e+ -a-coalesce((select t1.d from t1 where not exists(select 1 from t1 where -b=t1.f-f)),t1.c)*17 when 19<=11 then 19 else t1.d end>=b)} +} {} +do_test randexpr-2.514 { + db eval {SELECT +(case when t1.a<=a then t1.d when ((abs(t1.a)/abs((coalesce((select t1.e from t1 where t1.f>=t1.c),e))))) between coalesce((select case when not 19> -11 then (case when t1.d in (select e from t1 union select f from t1) then -d when t1.b not between 11 and t1.d then (19) else c end) else t1.e end from t1 where not exists(select 1 from t1 where e not in (t1.d,a, -a)) and t1.d>t1.b),a) and t1.a then d else b end)-17 FROM t1 WHERE (+e*t1.d in (select (+abs(abs(min(~+b)-min(a)-case -case abs(max(t1.c)) when max(e) then count(*) else count(distinct 19) end- -max(t1.a) when count(distinct t1.a) then (count(*)) else min(t1.f) end)))-(count(*)) from t1 union select max(t1.f) from t1) or ~c*case when not exists(select 1 from t1 where t1.f in (t1.f,t1.a,d)) then t1.c when t1.c=t1.c),e))))) between coalesce((select case when not 19> -11 then (case when t1.d in (select e from t1 union select f from t1) then -d when t1.b not between 11 and t1.d then (19) else c end) else t1.e end from t1 where not exists(select 1 from t1 where e not in (t1.d,a, -a)) and t1.d>t1.b),a) and t1.a then d else b end)-17 FROM t1 WHERE NOT ((+e*t1.d in (select (+abs(abs(min(~+b)-min(a)-case -case abs(max(t1.c)) when max(e) then count(*) else count(distinct 19) end- -max(t1.a) when count(distinct t1.a) then (count(*)) else min(t1.f) end)))-(count(*)) from t1 union select max(t1.f) from t1) or ~c*case when not exists(select 1 from t1 where t1.f in (t1.f,t1.a,d)) then t1.c when t1.ct1.e then t1.a when -d=t1.e or t1.c not between t1.c and e then c else t1.d end)- -17 in (select a from t1 union select -t1.e from t1) then 19 else a end) from t1 where t1.a in (select -a from t1 union select 19 from t1)),t1.a)<>t1.d then 13 when a in (19,d,c) and a<11 then t1.a else b end+t1.a-t1.b FROM t1 WHERE c>=t1.e} +} {} +do_test randexpr-2.517 { + db eval {SELECT -case when coalesce((select max(case when ~(+case when t1.b*t1.b>t1.e then t1.a when -d=t1.e or t1.c not between t1.c and e then c else t1.d end)- -17 in (select a from t1 union select -t1.e from t1) then 19 else a end) from t1 where t1.a in (select -a from t1 union select 19 from t1)),t1.a)<>t1.d then 13 when a in (19,d,c) and a<11 then t1.a else b end+t1.a-t1.b FROM t1 WHERE NOT (c>=t1.e)} +} {-113} +do_test randexpr-2.518 { + db eval {SELECT coalesce((select case coalesce((select max(19) from t1 where +case when exists(select 1 from t1 where not exists(select 1 from t1 where not c in (case when e>=c then b else t1.b end,t1.b,t1.e))) then a-f+13-t1.f+19 when f>=t1.e then t1.e else t1.c end+17 in (select count(distinct 19) from t1 union select cast(avg(b) AS integer) | count(distinct 11)-count(*) from t1)),a) when b then 17 else c end from t1 where t1.f<=t1.e),t1.e) FROM t1 WHERE ~~+d-d*19*case ~(select count(*) from t1)-11 when 19 then -+(abs(t1.a)/abs(a)) | (f)-(abs(+t1.c)/abs((select ~+max(13*e)+min(f)*count(distinct 13) from t1)))*t1.a else -t1.f-t1.b end*t1.a not between -e and t1.e} +} {500} +do_test randexpr-2.519 { + db eval {SELECT coalesce((select case coalesce((select max(19) from t1 where +case when exists(select 1 from t1 where not exists(select 1 from t1 where not c in (case when e>=c then b else t1.b end,t1.b,t1.e))) then a-f+13-t1.f+19 when f>=t1.e then t1.e else t1.c end+17 in (select count(distinct 19) from t1 union select cast(avg(b) AS integer) | count(distinct 11)-count(*) from t1)),a) when b then 17 else c end from t1 where t1.f<=t1.e),t1.e) FROM t1 WHERE NOT (~~+d-d*19*case ~(select count(*) from t1)-11 when 19 then -+(abs(t1.a)/abs(a)) | (f)-(abs(+t1.c)/abs((select ~+max(13*e)+min(f)*count(distinct 13) from t1)))*t1.a else -t1.f-t1.b end*t1.a not between -e and t1.e)} +} {} +do_test randexpr-2.520 { + db eval {SELECT coalesce((select case coalesce((select max(19) from t1 where +case when exists(select 1 from t1 where not exists(select 1 from t1 where not c in (case when e>=c then b else t1.b end,t1.b,t1.e))) then a-f+13-t1.f+19 when f>=t1.e then t1.e else t1.c end+17 in (select count(distinct 19) from t1 union select cast(avg(b) AS integer) & count(distinct 11)-count(*) from t1)),a) when b then 17 else c end from t1 where t1.f<=t1.e),t1.e) FROM t1 WHERE ~~+d-d*19*case ~(select count(*) from t1)-11 when 19 then -+(abs(t1.a)/abs(a)) | (f)-(abs(+t1.c)/abs((select ~+max(13*e)+min(f)*count(distinct 13) from t1)))*t1.a else -t1.f-t1.b end*t1.a not between -e and t1.e} +} {500} +do_test randexpr-2.521 { + db eval {SELECT a*17+ -case when not exists(select 1 from t1 where 11=t1.d) then 11-b when case when t1.c<>t1.c++t1.a or t1.c*t1.a+d-f between 19 and case when not t1.e>=11 and t1.b<=t1.c then coalesce((select max(a) from t1 where 19=11),f) when f not in (t1.b,13,b) then f else -d end then d else t1.c end in (select 17 from t1 union select d from t1) then 17 else b end FROM t1 WHERE -(+~ -d++c+b)>=t1.a-case when coalesce((select t1.a-(select max(t1.e) from t1)-(abs(11)/abs(17))+t1.d from t1 where not (not exists(select 1 from t1 where c=t1.c)) or t1.b in (select abs(max(11)+count(*)) from t1 union select ((max(a))) from t1)),f)>=a then t1.f when d not in (11,c,17) then a else 19 end} +} {} +do_test randexpr-2.522 { + db eval {SELECT a*17+ -case when not exists(select 1 from t1 where 11=t1.d) then 11-b when case when t1.c<>t1.c++t1.a or t1.c*t1.a+d-f between 19 and case when not t1.e>=11 and t1.b<=t1.c then coalesce((select max(a) from t1 where 19=11),f) when f not in (t1.b,13,b) then f else -d end then d else t1.c end in (select 17 from t1 union select d from t1) then 17 else b end FROM t1 WHERE NOT ( -(+~ -d++c+b)>=t1.a-case when coalesce((select t1.a-(select max(t1.e) from t1)-(abs(11)/abs(17))+t1.d from t1 where not (not exists(select 1 from t1 where c=t1.c)) or t1.b in (select abs(max(11)+count(*)) from t1 union select ((max(a))) from t1)),f)>=a then t1.f when d not in (11,c,17) then a else 19 end)} +} {1889} +do_test randexpr-2.523 { + db eval {SELECT coalesce((select max(t1.b+a) from t1 where case (select count(distinct b*case when t1.f*+~a | b+17*13+a+11 in (select c from t1 union select d from t1) then t1.f else t1.b end) from t1) when t1.a then f else t1.c end+ -f between 17 and d and b in (13,t1.e,t1.e) and d not between t1.f and d),13) FROM t1 WHERE coalesce((select max(t1.d) from t1 where (coalesce((select ~t1.c from t1 where -t1.a<=c*+d or not (case when (t1.b)>=t1.d then t1.e when 17a and a<>e and t1.e=19)),19)<>t1.c} +} {13} +do_test randexpr-2.524 { + db eval {SELECT coalesce((select max(t1.b+a) from t1 where case (select count(distinct b*case when t1.f*+~a | b+17*13+a+11 in (select c from t1 union select d from t1) then t1.f else t1.b end) from t1) when t1.a then f else t1.c end+ -f between 17 and d and b in (13,t1.e,t1.e) and d not between t1.f and d),13) FROM t1 WHERE NOT (coalesce((select max(t1.d) from t1 where (coalesce((select ~t1.c from t1 where -t1.a<=c*+d or not (case when (t1.b)>=t1.d then t1.e when 17a and a<>e and t1.e=19)),19)<>t1.c)} +} {} +do_test randexpr-2.525 { + db eval {SELECT coalesce((select max(t1.b+a) from t1 where case (select count(distinct b*case when t1.f*+~a & b+17*13+a+11 in (select c from t1 union select d from t1) then t1.f else t1.b end) from t1) when t1.a then f else t1.c end+ -f between 17 and d and b in (13,t1.e,t1.e) and d not between t1.f and d),13) FROM t1 WHERE coalesce((select max(t1.d) from t1 where (coalesce((select ~t1.c from t1 where -t1.a<=c*+d or not (case when (t1.b)>=t1.d then t1.e when 17a and a<>e and t1.e=19)),19)<>t1.c} +} {13} +do_test randexpr-2.526 { + db eval {SELECT coalesce((select (t1.b-(abs(t1.b-~17 | 13+13)/abs(t1.e))* -t1.a- -19+t1.d) from t1 where exists(select 1 from t1 where 19 in (select t1.a from t1 union select -19 from t1) and a in (c,e,13) and 19 in (select t1.a from t1 union select b from t1) or 13 in (select 19 from t1 union select d from t1) and not t1.c=t1.f or ( -t1.d between e and 19) or -((17))<>c)),e) FROM t1 WHERE ((abs(a)/abs(f-a)) between -(abs(++t1.b)/abs(13)) and t1.c)} +} {619} +do_test randexpr-2.527 { + db eval {SELECT coalesce((select (t1.b-(abs(t1.b-~17 | 13+13)/abs(t1.e))* -t1.a- -19+t1.d) from t1 where exists(select 1 from t1 where 19 in (select t1.a from t1 union select -19 from t1) and a in (c,e,13) and 19 in (select t1.a from t1 union select b from t1) or 13 in (select 19 from t1 union select d from t1) and not t1.c=t1.f or ( -t1.d between e and 19) or -((17))<>c)),e) FROM t1 WHERE NOT (((abs(a)/abs(f-a)) between -(abs(++t1.b)/abs(13)) and t1.c))} +} {} +do_test randexpr-2.528 { + db eval {SELECT coalesce((select (t1.b-(abs(t1.b-~17 & 13+13)/abs(t1.e))* -t1.a- -19+t1.d) from t1 where exists(select 1 from t1 where 19 in (select t1.a from t1 union select -19 from t1) and a in (c,e,13) and 19 in (select t1.a from t1 union select b from t1) or 13 in (select 19 from t1 union select d from t1) and not t1.c=t1.f or ( -t1.d between e and 19) or -((17))<>c)),e) FROM t1 WHERE ((abs(a)/abs(f-a)) between -(abs(++t1.b)/abs(13)) and t1.c)} +} {619} +do_test randexpr-2.529 { + db eval {SELECT case when coalesce((select max(c- -b-t1.c) from t1 where t1.d between 11 and case when b | ~t1.e*(t1.f)>t1.f then a when (19<13) and not exists(select 1 from t1 where f not in (t1.a,((b)),17)) or d in ((a),t1.c,19) or (f)>13 then t1.e else f end*t1.a),t1.a)>=e then t1.d when t1.e>=c then 13 else f end FROM t1 WHERE +11 between case when t1.f>t1.c then t1.b else t1.b end and ((select count(distinct case when t1.a between (abs(coalesce((select t1.b from t1 where case when t1.d<=(abs((c))/abs(coalesce((select max(17) from t1 where 11=b),13))) then 17 when b=t1.e then d else e end-13>=(e)), -d))/abs(b)) and b then t1.d when (b) in (13,t1.b,t1.a) then f else 19 end) from t1))-d} +} {} +do_test randexpr-2.530 { + db eval {SELECT case when coalesce((select max(c- -b-t1.c) from t1 where t1.d between 11 and case when b | ~t1.e*(t1.f)>t1.f then a when (19<13) and not exists(select 1 from t1 where f not in (t1.a,((b)),17)) or d in ((a),t1.c,19) or (f)>13 then t1.e else f end*t1.a),t1.a)>=e then t1.d when t1.e>=c then 13 else f end FROM t1 WHERE NOT (+11 between case when t1.f>t1.c then t1.b else t1.b end and ((select count(distinct case when t1.a between (abs(coalesce((select t1.b from t1 where case when t1.d<=(abs((c))/abs(coalesce((select max(17) from t1 where 11=b),13))) then 17 when b=t1.e then d else e end-13>=(e)), -d))/abs(b)) and b then t1.d when (b) in (13,t1.b,t1.a) then f else 19 end) from t1))-d)} +} {13} +do_test randexpr-2.531 { + db eval {SELECT case when coalesce((select max(c- -b-t1.c) from t1 where t1.d between 11 and case when b & ~t1.e*(t1.f)>t1.f then a when (19<13) and not exists(select 1 from t1 where f not in (t1.a,((b)),17)) or d in ((a),t1.c,19) or (f)>13 then t1.e else f end*t1.a),t1.a)>=e then t1.d when t1.e>=c then 13 else f end FROM t1 WHERE NOT (+11 between case when t1.f>t1.c then t1.b else t1.b end and ((select count(distinct case when t1.a between (abs(coalesce((select t1.b from t1 where case when t1.d<=(abs((c))/abs(coalesce((select max(17) from t1 where 11=b),13))) then 17 when b=t1.e then d else e end-13>=(e)), -d))/abs(b)) and b then t1.d when (b) in (13,t1.b,t1.a) then f else 19 end) from t1))-d)} +} {13} +do_test randexpr-2.532 { + db eval {SELECT (select ~abs(max(case when (case when ( -t1.a>e) then t1.a else (select (~count(*) | max(case when t1.d<>c then -+13 else case when b between 19 and 17 or (t1.a)>=13 then 19 else t1.c end end)) from t1) end<>t1.e) then 19 when c*e+t1.b<=19 then f else (t1.d) end | b)) from t1)+c FROM t1 WHERE f | t1.a+b>=t1.d} +} {80} +do_test randexpr-2.533 { + db eval {SELECT (select ~abs(max(case when (case when ( -t1.a>e) then t1.a else (select (~count(*) | max(case when t1.d<>c then -+13 else case when b between 19 and 17 or (t1.a)>=13 then 19 else t1.c end end)) from t1) end<>t1.e) then 19 when c*e+t1.b<=19 then f else (t1.d) end | b)) from t1)+c FROM t1 WHERE NOT (f | t1.a+b>=t1.d)} +} {} +do_test randexpr-2.534 { + db eval {SELECT (select ~abs(max(case when (case when ( -t1.a>e) then t1.a else (select (~count(*) & max(case when t1.d<>c then -+13 else case when b between 19 and 17 or (t1.a)>=13 then 19 else t1.c end end)) from t1) end<>t1.e) then 19 when c*e+t1.b<=19 then f else (t1.d) end & b)) from t1)+c FROM t1 WHERE f | t1.a+b>=t1.d} +} {299} +do_test randexpr-2.535 { + db eval {SELECT -case when ~case t1.f+13+t1.a when coalesce((select max(17) from t1 where coalesce((select max(( -11)) from t1 where not exists(select 1 from t1 where case case when not exists(select 1 from t1 where (a in (select f from t1 union select b from t1))) then d when (a) in (f,t1.a, -f) then t1.a else b end when t1.d then c else t1.b end<>a and 11= -t1.a and 19=a)),11) in (select c from t1 union select 17 from t1)),t1.c) then e else 11 end=t1.d then 17 else 17 end FROM t1 WHERE not exists(select 1 from t1 where fa and 11= -t1.a and 19=a)),11) in (select c from t1 union select 17 from t1)),t1.c) then e else 11 end=t1.d then 17 else 17 end FROM t1 WHERE NOT (not exists(select 1 from t1 where fcoalesce((select max(case when (17> -t1.e) then f when 19=t1.a then t1.a else a end) from t1 where t1.c<=t1.e or (t1.d)>=t1.d),t1.a)+a then f when b not in (e,t1.e,t1.b) or t1.f not in (f,t1.c, -d) or e<=13 then f else (17) end-t1.d+t1.f*13 when (t1.c) then b else -t1.e end) from t1)) from t1 where t1.b in (t1.b,t1.f,(t1.e))),t1.e)*(a) FROM t1 WHERE t1.f<=a} +} {} +do_test randexpr-2.538 { + db eval {SELECT t1.f-t1.b | coalesce((select max((select max(case -case when t1.f>coalesce((select max(case when (17> -t1.e) then f when 19=t1.a then t1.a else a end) from t1 where t1.c<=t1.e or (t1.d)>=t1.d),t1.a)+a then f when b not in (e,t1.e,t1.b) or t1.f not in (f,t1.c, -d) or e<=13 then f else (17) end-t1.d+t1.f*13 when (t1.c) then b else -t1.e end) from t1)) from t1 where t1.b in (t1.b,t1.f,(t1.e))),t1.e)*(a) FROM t1 WHERE NOT (t1.f<=a)} +} {-49744} +do_test randexpr-2.539 { + db eval {SELECT t1.f-t1.b & coalesce((select max((select max(case -case when t1.f>coalesce((select max(case when (17> -t1.e) then f when 19=t1.a then t1.a else a end) from t1 where t1.c<=t1.e or (t1.d)>=t1.d),t1.a)+a then f when b not in (e,t1.e,t1.b) or t1.f not in (f,t1.c, -d) or e<=13 then f else (17) end-t1.d+t1.f*13 when (t1.c) then b else -t1.e end) from t1)) from t1 where t1.b in (t1.b,t1.f,(t1.e))),t1.e)*(a) FROM t1 WHERE NOT (t1.f<=a)} +} {144} +do_test randexpr-2.540 { + db eval {SELECT t1.e+t1.a*case when not t1.f<=d-(c)*13 then t1.e+13*(abs(case when exists(select 1 from t1 where (select -count(*)* -max(t1.a) | ((((cast(avg((e)) AS integer))))) from t1)>=+c+b) then +11 when t1.a not between t1.c and c then t1.b else -19 end)/abs(b)) | (19) when not exists(select 1 from t1 where t1.c between t1.e and t1.c) then d else t1.f end FROM t1 WHERE b in (+c,case b when f*~19-t1.b*t1.f then t1.f else a end,a | t1.a)} +} {} +do_test randexpr-2.541 { + db eval {SELECT t1.e+t1.a*case when not t1.f<=d-(c)*13 then t1.e+13*(abs(case when exists(select 1 from t1 where (select -count(*)* -max(t1.a) | ((((cast(avg((e)) AS integer))))) from t1)>=+c+b) then +11 when t1.a not between t1.c and c then t1.b else -19 end)/abs(b)) | (19) when not exists(select 1 from t1 where t1.c between t1.e and t1.c) then d else t1.f end FROM t1 WHERE NOT (b in (+c,case b when f*~19-t1.b*t1.f then t1.f else a end,a | t1.a))} +} {50800} +do_test randexpr-2.542 { + db eval {SELECT t1.e+t1.a*case when not t1.f<=d-(c)*13 then t1.e+13*(abs(case when exists(select 1 from t1 where (select -count(*)* -max(t1.a) & ((((cast(avg((e)) AS integer))))) from t1)>=+c+b) then +11 when t1.a not between t1.c and c then t1.b else -19 end)/abs(b)) & (19) when not exists(select 1 from t1 where t1.c between t1.e and t1.c) then d else t1.f end FROM t1 WHERE NOT (b in (+c,case b when f*~19-t1.b*t1.f then t1.f else a end,a | t1.a))} +} {600} +do_test randexpr-2.543 { + db eval {SELECT t1.e++a-c-a+coalesce((select t1.e from t1 where 11=case when t1.f not between 13 and t1.b+a*t1.c then 17 when (select min((select cast(avg(17) AS integer)*max(d)-(cast(avg(c) AS integer))*count(distinct t1.b) from t1)) from t1)>=case coalesce((select t1.b | e from t1 where t1.d in (t1.c,17,t1.c)),a) when 13 then t1.a else 19 end then t1.a else e end),t1.d) FROM t1 WHERE +t1.a*t1.c between case when case coalesce((select max(+t1.f-t1.b*(select min((b))+count(distinct c) from t1)) from t1 where -b between 11 and t1.b or b in (select -max(19) from t1 union select -count(*)+ -count(*) from t1) and 17=d),13)-t1.b*a when t1.b then (17) else c end>b then e else 11 end and ( -b) or t1.e not between a and 11} +} {600} +do_test randexpr-2.544 { + db eval {SELECT t1.e++a-c-a+coalesce((select t1.e from t1 where 11=case when t1.f not between 13 and t1.b+a*t1.c then 17 when (select min((select cast(avg(17) AS integer)*max(d)-(cast(avg(c) AS integer))*count(distinct t1.b) from t1)) from t1)>=case coalesce((select t1.b | e from t1 where t1.d in (t1.c,17,t1.c)),a) when 13 then t1.a else 19 end then t1.a else e end),t1.d) FROM t1 WHERE NOT (+t1.a*t1.c between case when case coalesce((select max(+t1.f-t1.b*(select min((b))+count(distinct c) from t1)) from t1 where -b between 11 and t1.b or b in (select -max(19) from t1 union select -count(*)+ -count(*) from t1) and 17=d),13)-t1.b*a when t1.b then (17) else c end>b then e else 11 end and ( -b) or t1.e not between a and 11)} +} {} +do_test randexpr-2.545 { + db eval {SELECT t1.e++a-c-a+coalesce((select t1.e from t1 where 11=case when t1.f not between 13 and t1.b+a*t1.c then 17 when (select min((select cast(avg(17) AS integer)*max(d)-(cast(avg(c) AS integer))*count(distinct t1.b) from t1)) from t1)>=case coalesce((select t1.b & e from t1 where t1.d in (t1.c,17,t1.c)),a) when 13 then t1.a else 19 end then t1.a else e end),t1.d) FROM t1 WHERE +t1.a*t1.c between case when case coalesce((select max(+t1.f-t1.b*(select min((b))+count(distinct c) from t1)) from t1 where -b between 11 and t1.b or b in (select -max(19) from t1 union select -count(*)+ -count(*) from t1) and 17=d),13)-t1.b*a when t1.b then (17) else c end>b then e else 11 end and ( -b) or t1.e not between a and 11} +} {600} +do_test randexpr-2.546 { + db eval {SELECT (select case ~case count(*) when (count(distinct d)*min(17)+min(13)+min(11))-max(t1.a) then count(distinct -13) else min(t1.c) end-max( - -t1.d)*count(*) when -count(*) then count(*) else (cast(avg(e) AS integer)) end from t1)-case when a>b then t1.d+case when (e= -t1.a) and c=(13) and t1.e<=c then 11 else t1.f end | t1.a)))-abs(abs(count(*)))-+~min(t1.e)+~count(distinct 11)* -count(*) | max(t1.d)-( -cast(avg(13) AS integer))*(min(e)) else -min(t1.a) end from t1 union select max(t1.f) from t1)} +} {} +do_test randexpr-2.547 { + db eval {SELECT (select case ~case count(*) when (count(distinct d)*min(17)+min(13)+min(11))-max(t1.a) then count(distinct -13) else min(t1.c) end-max( - -t1.d)*count(*) when -count(*) then count(*) else (cast(avg(e) AS integer)) end from t1)-case when a>b then t1.d+case when (e= -t1.a) and c=(13) and t1.e<=c then 11 else t1.f end | t1.a)))-abs(abs(count(*)))-+~min(t1.e)+~count(distinct 11)* -count(*) | max(t1.d)-( -cast(avg(13) AS integer))*(min(e)) else -min(t1.a) end from t1 union select max(t1.f) from t1))} +} {481} +do_test randexpr-2.548 { + db eval {SELECT case ~case 19 when t1.e then f else 19*f end*case when exists(select 1 from t1 where t1.e not between t1.f-~case case -e when f then t1.d else case when t1.b in (t1.e,f,11) then e when 11<=d then -t1.d else 17 end end when 13 then 19 else c end and 19) then coalesce((select t1.b from t1 where not exists(select 1 from t1 where (a<>b))),b) else t1.c end | e+t1.b when a then f else 19 end FROM t1 WHERE 17 in (select count(distinct t1.f)+~min(coalesce((select max(e*f-case when case when 11<>e and t1.e<=t1.d or (t1.d)<=t1.e then d when ( -17) not between -t1.c and t1.a then coalesce((select max((19)) from t1 where -b between e and 11), -a) else d end in (b,t1.d,a) and 19 in (13,13,t1.c) or b not in (f,(t1.e),19) then t1.c when 17 not in ((19),(t1.b),f) then +t1.d else f end) from t1 where (not a between 19 and d and (d>a))),d)) from t1 union select cast(avg(f) AS integer) from t1)} +} {} +do_test randexpr-2.549 { + db eval {SELECT case ~case 19 when t1.e then f else 19*f end*case when exists(select 1 from t1 where t1.e not between t1.f-~case case -e when f then t1.d else case when t1.b in (t1.e,f,11) then e when 11<=d then -t1.d else 17 end end when 13 then 19 else c end and 19) then coalesce((select t1.b from t1 where not exists(select 1 from t1 where (a<>b))),b) else t1.c end | e+t1.b when a then f else 19 end FROM t1 WHERE NOT (17 in (select count(distinct t1.f)+~min(coalesce((select max(e*f-case when case when 11<>e and t1.e<=t1.d or (t1.d)<=t1.e then d when ( -17) not between -t1.c and t1.a then coalesce((select max((19)) from t1 where -b between e and 11), -a) else d end in (b,t1.d,a) and 19 in (13,13,t1.c) or b not in (f,(t1.e),19) then t1.c when 17 not in ((19),(t1.b),f) then +t1.d else f end) from t1 where (not a between 19 and d and (d>a))),d)) from t1 union select cast(avg(f) AS integer) from t1))} +} {19} +do_test randexpr-2.550 { + db eval {SELECT case ~case 19 when t1.e then f else 19*f end*case when exists(select 1 from t1 where t1.e not between t1.f-~case case -e when f then t1.d else case when t1.b in (t1.e,f,11) then e when 11<=d then -t1.d else 17 end end when 13 then 19 else c end and 19) then coalesce((select t1.b from t1 where not exists(select 1 from t1 where (a<>b))),b) else t1.c end & e+t1.b when a then f else 19 end FROM t1 WHERE NOT (17 in (select count(distinct t1.f)+~min(coalesce((select max(e*f-case when case when 11<>e and t1.e<=t1.d or (t1.d)<=t1.e then d when ( -17) not between -t1.c and t1.a then coalesce((select max((19)) from t1 where -b between e and 11), -a) else d end in (b,t1.d,a) and 19 in (13,13,t1.c) or b not in (f,(t1.e),19) then t1.c when 17 not in ((19),(t1.b),f) then +t1.d else f end) from t1 where (not a between 19 and d and (d>a))),d)) from t1 union select cast(avg(f) AS integer) from t1))} +} {19} +do_test randexpr-2.551 { + db eval {SELECT (abs(11*coalesce((select max(t1.a) from t1 where d>=case when coalesce((select case when not exists(select 1 from t1 where -d in (select b-(abs(13)/abs(t1.e)) from t1 union select e from t1)) then ~c when not exists(select 1 from t1 where not exists(select 1 from t1 where -t1.d in (select cast(avg(c) AS integer) | count(distinct e)+max( -t1.d)-(max(f)) from t1 union select count(*) from t1))) then t1.a else e end from t1 where exists(select 1 from t1 where t1.f between d and t1.b) or 17 in (11,17,d)),11) not between t1.c and t1.d then 17 when 13=t1.a then 19 else (13) end), -t1.b)-e)/abs(a)) FROM t1 WHERE t1.c+t1.c=t1.f} +} {6} +do_test randexpr-2.552 { + db eval {SELECT (abs(11*coalesce((select max(t1.a) from t1 where d>=case when coalesce((select case when not exists(select 1 from t1 where -d in (select b-(abs(13)/abs(t1.e)) from t1 union select e from t1)) then ~c when not exists(select 1 from t1 where not exists(select 1 from t1 where -t1.d in (select cast(avg(c) AS integer) | count(distinct e)+max( -t1.d)-(max(f)) from t1 union select count(*) from t1))) then t1.a else e end from t1 where exists(select 1 from t1 where t1.f between d and t1.b) or 17 in (11,17,d)),11) not between t1.c and t1.d then 17 when 13=t1.a then 19 else (13) end), -t1.b)-e)/abs(a)) FROM t1 WHERE NOT (t1.c+t1.c=t1.f)} +} {} +do_test randexpr-2.553 { + db eval {SELECT (abs(11*coalesce((select max(t1.a) from t1 where d>=case when coalesce((select case when not exists(select 1 from t1 where -d in (select b-(abs(13)/abs(t1.e)) from t1 union select e from t1)) then ~c when not exists(select 1 from t1 where not exists(select 1 from t1 where -t1.d in (select cast(avg(c) AS integer) & count(distinct e)+max( -t1.d)-(max(f)) from t1 union select count(*) from t1))) then t1.a else e end from t1 where exists(select 1 from t1 where t1.f between d and t1.b) or 17 in (11,17,d)),11) not between t1.c and t1.d then 17 when 13=t1.a then 19 else (13) end), -t1.b)-e)/abs(a)) FROM t1 WHERE t1.c+t1.c=t1.f} +} {6} +do_test randexpr-2.554 { + db eval {SELECT -t1.a*coalesce((select max(c) from t1 where -(17 | case when (select cast(avg(17+coalesce((select max(c) from t1 where exists(select 1 from t1 where (coalesce((select max( -f) from t1 where (t1.b<=t1.b)),t1.e)<>e))),f | -t1.a)) AS integer) from t1) in (13,e,t1.c) then f when (f>e) then b else t1.a end)*c*e not between 19 and t1.b),11)+13-19*(19) FROM t1 WHERE (c<=t1.e)} +} {-30348} +do_test randexpr-2.555 { + db eval {SELECT -t1.a*coalesce((select max(c) from t1 where -(17 | case when (select cast(avg(17+coalesce((select max(c) from t1 where exists(select 1 from t1 where (coalesce((select max( -f) from t1 where (t1.b<=t1.b)),t1.e)<>e))),f | -t1.a)) AS integer) from t1) in (13,e,t1.c) then f when (f>e) then b else t1.a end)*c*e not between 19 and t1.b),11)+13-19*(19) FROM t1 WHERE NOT ((c<=t1.e))} +} {} +do_test randexpr-2.556 { + db eval {SELECT -t1.a*coalesce((select max(c) from t1 where -(17 & case when (select cast(avg(17+coalesce((select max(c) from t1 where exists(select 1 from t1 where (coalesce((select max( -f) from t1 where (t1.b<=t1.b)),t1.e)<>e))),f & -t1.a)) AS integer) from t1) in (13,e,t1.c) then f when (f>e) then b else t1.a end)*c*e not between 19 and t1.b),11)+13-19*(19) FROM t1 WHERE (c<=t1.e)} +} {-30348} +do_test randexpr-2.557 { + db eval {SELECT 19-coalesce((select max(case when 17 | t1.a in (select case case coalesce((select max(t1.b) from t1 where t1.f not between case when -b in (13,f,a) then a else a end and t1.f or (a not in (d,d,t1.c))),c) when a then t1.e else f end when 13 then 13 else b end from t1 union select 17 from t1) then c when not b not in (d, -t1.e,c) then 17 else b end) from t1 where e not in (17,t1.e,b)),t1.e) FROM t1 WHERE t1.a in (select -count(*) from t1 union select min((select (cast(avg((select +max(t1.b) from t1)) AS integer)) from t1)+17*c) from t1)} +} {} +do_test randexpr-2.558 { + db eval {SELECT 19-coalesce((select max(case when 17 | t1.a in (select case case coalesce((select max(t1.b) from t1 where t1.f not between case when -b in (13,f,a) then a else a end and t1.f or (a not in (d,d,t1.c))),c) when a then t1.e else f end when 13 then 13 else b end from t1 union select 17 from t1) then c when not b not in (d, -t1.e,c) then 17 else b end) from t1 where e not in (17,t1.e,b)),t1.e) FROM t1 WHERE NOT (t1.a in (select -count(*) from t1 union select min((select (cast(avg((select +max(t1.b) from t1)) AS integer)) from t1)+17*c) from t1))} +} {-481} +do_test randexpr-2.559 { + db eval {SELECT 19-coalesce((select max(case when 17 & t1.a in (select case case coalesce((select max(t1.b) from t1 where t1.f not between case when -b in (13,f,a) then a else a end and t1.f or (a not in (d,d,t1.c))),c) when a then t1.e else f end when 13 then 13 else b end from t1 union select 17 from t1) then c when not b not in (d, -t1.e,c) then 17 else b end) from t1 where e not in (17,t1.e,b)),t1.e) FROM t1 WHERE NOT (t1.a in (select -count(*) from t1 union select min((select (cast(avg((select +max(t1.b) from t1)) AS integer)) from t1)+17*c) from t1))} +} {-481} +do_test randexpr-2.560 { + db eval {SELECT -f-coalesce((select max(case a-t1.a when t1.f then 13 else +t1.a-t1.f end) from t1 where (select count(distinct a) from t1)>d),( -~t1.a+17+case when (not exists(select 1 from t1 where c=t1.b and c>t1.d)) then ~coalesce((select t1.f from t1 where t1.e not between t1.b and t1.e),11)+a when b<=t1.d then t1.c else 11 end+t1.b)*d)*17+d FROM t1 WHERE +t1.c in (select max(~case when 17>t1.f then 19 else -case when d=11 or t1.b> -t1.e or d not in (b,t1.a,t1.e) then e-( -13)+d else 11 end end-t1.d) from t1 union select max(17)+case min(t1.a) | case + -case abs(cast(avg(t1.d) AS integer)) when -count(*) then count(*) else max(19) end when min(e) then count(*) else cast(avg(t1.c) AS integer) end when min(t1.f) then count(*) else min(b) end from t1)} +} {} +do_test randexpr-2.561 { + db eval {SELECT -f-coalesce((select max(case a-t1.a when t1.f then 13 else +t1.a-t1.f end) from t1 where (select count(distinct a) from t1)>d),( -~t1.a+17+case when (not exists(select 1 from t1 where c=t1.b and c>t1.d)) then ~coalesce((select t1.f from t1 where t1.e not between t1.b and t1.e),11)+a when b<=t1.d then t1.c else 11 end+t1.b)*d)*17+d FROM t1 WHERE NOT (+t1.c in (select max(~case when 17>t1.f then 19 else -case when d=11 or t1.b> -t1.e or d not in (b,t1.a,t1.e) then e-( -13)+d else 11 end end-t1.d) from t1 union select max(17)+case min(t1.a) | case + -case abs(cast(avg(t1.d) AS integer)) when -count(*) then count(*) else max(19) end when min(e) then count(*) else cast(avg(t1.c) AS integer) end when min(t1.f) then count(*) else min(b) end from t1))} +} {-2761000} +do_test randexpr-2.562 { + db eval {SELECT coalesce((select 11 from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not (select +count(*)* -count(distinct case when exists(select 1 from t1 where 13+a=t1.b) then d else 13 end-t1.b) from t1)<=17 | e-13)) or not exists(select 1 from t1 where not t1.e in ((13),17,19) and not t1.d>a and t1.e not between t1.e and b)),coalesce((select max(coalesce((select max(b) from t1 where (b)>11),d)) from t1 where 11> -19),f)-c) FROM t1 WHERE case when -13 not in (b,(abs(coalesce((select max(case when -17<>t1.f then b when coalesce((select t1.b from t1 where t1.c in (select (a) from t1 union select c from t1)),17)<=t1.e then b else f end) from t1 where b<=11),t1.c))/abs((d))),19) then t1.b when (exists(select 1 from t1 where not exists(select 1 from t1 where ((13)=e and t1.f between 17 and t1.a) or t1.f between (a) and -(t1.f)))) and t1.e between b and 13 or b<>(f) then t1.f else d end>=a or d>=13} +} {11} +do_test randexpr-2.563 { + db eval {SELECT coalesce((select 11 from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not (select +count(*)* -count(distinct case when exists(select 1 from t1 where 13+a=t1.b) then d else 13 end-t1.b) from t1)<=17 | e-13)) or not exists(select 1 from t1 where not t1.e in ((13),17,19) and not t1.d>a and t1.e not between t1.e and b)),coalesce((select max(coalesce((select max(b) from t1 where (b)>11),d)) from t1 where 11> -19),f)-c) FROM t1 WHERE NOT (case when -13 not in (b,(abs(coalesce((select max(case when -17<>t1.f then b when coalesce((select t1.b from t1 where t1.c in (select (a) from t1 union select c from t1)),17)<=t1.e then b else f end) from t1 where b<=11),t1.c))/abs((d))),19) then t1.b when (exists(select 1 from t1 where not exists(select 1 from t1 where ((13)=e and t1.f between 17 and t1.a) or t1.f between (a) and -(t1.f)))) and t1.e between b and 13 or b<>(f) then t1.f else d end>=a or d>=13)} +} {} +do_test randexpr-2.564 { + db eval {SELECT coalesce((select 11 from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not (select +count(*)* -count(distinct case when exists(select 1 from t1 where 13+a=t1.b) then d else 13 end-t1.b) from t1)<=17 & e-13)) or not exists(select 1 from t1 where not t1.e in ((13),17,19) and not t1.d>a and t1.e not between t1.e and b)),coalesce((select max(coalesce((select max(b) from t1 where (b)>11),d)) from t1 where 11> -19),f)-c) FROM t1 WHERE case when -13 not in (b,(abs(coalesce((select max(case when -17<>t1.f then b when coalesce((select t1.b from t1 where t1.c in (select (a) from t1 union select c from t1)),17)<=t1.e then b else f end) from t1 where b<=11),t1.c))/abs((d))),19) then t1.b when (exists(select 1 from t1 where not exists(select 1 from t1 where ((13)=e and t1.f between 17 and t1.a) or t1.f between (a) and -(t1.f)))) and t1.e between b and 13 or b<>(f) then t1.f else d end>=a or d>=13} +} {11} +do_test randexpr-2.565 { + db eval {SELECT coalesce((select max(coalesce((select max(13+f | c+a) from t1 where 17d then 13 else c end-c then e else t1.e end),c) FROM t1 WHERE exists(select 1 from t1 where t1.a+( -case when exists(select 1 from t1 where exists(select 1 from t1 where b | c-c+t1.d not between 19 and 11)) then coalesce((select t1.c from t1 where c+17 in (f,13,c) or t1.e*coalesce((select case when (not exists(select 1 from t1 where 17>e)) then t1.d else ~t1.e end from t1 where (t1.f not in (19,a,t1.b) or c=f)),c)=c), -11) else e end)<=t1.b)} +} {300} +do_test randexpr-2.566 { + db eval {SELECT coalesce((select max(coalesce((select max(13+f | c+a) from t1 where 17d then 13 else c end-c then e else t1.e end),c) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.a+( -case when exists(select 1 from t1 where exists(select 1 from t1 where b | c-c+t1.d not between 19 and 11)) then coalesce((select t1.c from t1 where c+17 in (f,13,c) or t1.e*coalesce((select case when (not exists(select 1 from t1 where 17>e)) then t1.d else ~t1.e end from t1 where (t1.f not in (19,a,t1.b) or c=f)),c)=c), -11) else e end)<=t1.b))} +} {} +do_test randexpr-2.567 { + db eval {SELECT coalesce((select max(coalesce((select max(13+f & c+a) from t1 where 17d then 13 else c end-c then e else t1.e end),c) FROM t1 WHERE exists(select 1 from t1 where t1.a+( -case when exists(select 1 from t1 where exists(select 1 from t1 where b | c-c+t1.d not between 19 and 11)) then coalesce((select t1.c from t1 where c+17 in (f,13,c) or t1.e*coalesce((select case when (not exists(select 1 from t1 where 17>e)) then t1.d else ~t1.e end from t1 where (t1.f not in (19,a,t1.b) or c=f)),c)=c), -11) else e end)<=t1.b)} +} {300} +do_test randexpr-2.568 { + db eval {SELECT case when ((coalesce((select max((select ~cast(avg((~d*f)) AS integer) from t1)) from t1 where - -t1.f<>d),e) in (select cast(avg(+13) AS integer)*~count(*) | case count(*) when -abs(abs(max(f))) then cast(avg(e) AS integer) else (count(distinct b)) end from t1 union select count(*) from t1) and b>= -t1.d)) then (17) when b between 13 and t1.d then -e+t1.a else t1.f end FROM t1 WHERE (case when +t1.f-(select abs(count(distinct d)) from t1)*case when c in (select min(b) from t1 union select max(c+f*case 11*t1.b when b then t1.c else c end | t1.a) from t1) then d else f end | t1.a*b in (t1.d,17,a) then -t1.a else t1.d end) in (e,t1.c,f) and -((t1.c)) in (11,t1.e,t1.c)} +} {} +do_test randexpr-2.569 { + db eval {SELECT case when ((coalesce((select max((select ~cast(avg((~d*f)) AS integer) from t1)) from t1 where - -t1.f<>d),e) in (select cast(avg(+13) AS integer)*~count(*) | case count(*) when -abs(abs(max(f))) then cast(avg(e) AS integer) else (count(distinct b)) end from t1 union select count(*) from t1) and b>= -t1.d)) then (17) when b between 13 and t1.d then -e+t1.a else t1.f end FROM t1 WHERE NOT ((case when +t1.f-(select abs(count(distinct d)) from t1)*case when c in (select min(b) from t1 union select max(c+f*case 11*t1.b when b then t1.c else c end | t1.a) from t1) then d else f end | t1.a*b in (t1.d,17,a) then -t1.a else t1.d end) in (e,t1.c,f) and -((t1.c)) in (11,t1.e,t1.c))} +} {-400} +do_test randexpr-2.570 { + db eval {SELECT case when ((coalesce((select max((select ~cast(avg((~d*f)) AS integer) from t1)) from t1 where - -t1.f<>d),e) in (select cast(avg(+13) AS integer)*~count(*) & case count(*) when -abs(abs(max(f))) then cast(avg(e) AS integer) else (count(distinct b)) end from t1 union select count(*) from t1) and b>= -t1.d)) then (17) when b between 13 and t1.d then -e+t1.a else t1.f end FROM t1 WHERE NOT ((case when +t1.f-(select abs(count(distinct d)) from t1)*case when c in (select min(b) from t1 union select max(c+f*case 11*t1.b when b then t1.c else c end | t1.a) from t1) then d else f end | t1.a*b in (t1.d,17,a) then -t1.a else t1.d end) in (e,t1.c,f) and -((t1.c)) in (11,t1.e,t1.c))} +} {-400} +do_test randexpr-2.571 { + db eval {SELECT t1.d-case when e in (17,coalesce((select max((select count(*) from t1)) from t1 where (~t1.f++19*t1.f+b in (a,t1.b, -t1.e)) and b in (select count(distinct t1.a) from t1 union select (abs(abs(abs(max(t1.e)*max(t1.d))-min(11)* -count(*)))) from t1)),17),13) and t1.f in (select t1.b from t1 union select f from t1) then t1.c else c end+f FROM t1 WHERE not (select (abs(abs((+count(*)*~ -cast(avg(+t1.f-c) AS integer)*count(distinct +coalesce((select max((abs(c)/abs(t1.e))) from t1 where coalesce((select coalesce((select max(f) from t1 where t1.d<=t1.d),e)*c from t1 where t1.f<>f),c) not between t1.a and c),t1.e)+13)-count(distinct f) | abs(cast(avg(t1.b) AS integer)))))) from t1)-13f),c) not between t1.a and c),t1.e)+13)-count(distinct f) | abs(cast(avg(t1.b) AS integer)))))) from t1)-13=c or f<>e),+d))) | t1.c*b+t1.d)- -f)/abs(13))<>t1.e then -t1.e when t1.e not between f and t1.a then t1.c else t1.c end)/abs(t1.e)) FROM t1 WHERE (abs(b*t1.e+t1.a)/abs(t1.c))=c or f<>e),+d))) | t1.c*b+t1.d)- -f)/abs(13))<>t1.e then -t1.e when t1.e not between f and t1.a then t1.c else t1.c end)/abs(t1.e)) FROM t1 WHERE NOT ((abs(b*t1.e+t1.a)/abs(t1.c))=c or f<>e),+d))) & t1.c*b+t1.d)- -f)/abs(13))<>t1.e then -t1.e when t1.e not between f and t1.a then t1.c else t1.c end)/abs(t1.e)) FROM t1 WHERE NOT ((abs(b*t1.e+t1.a)/abs(t1.c))case when (17) in (select + -abs(count(distinct t1.e*17*c+f)-max(19)) from t1 union select count(*)-max(case t1.f when a then (11+19*t1.b+t1.f) else (19) end*t1.c) from t1) then (abs(b- -f)/abs(f)) else t1.f end} +} {0} +do_test randexpr-2.577 { + db eval {SELECT (abs(t1.c-case when +coalesce((select max(coalesce((select max(a-a*t1.f) from t1 where ~t1.a*t1.a-t1.b+f* -19 not in (t1.d,e,b)),11)*t1.a) from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where (c) in (b,c,t1.c))) or t1.e between -b and b),a)+a<=e then t1.d else 17 end)/abs(f)) FROM t1 WHERE NOT (t1.d-t1.c+~t1.c<>case when (17) in (select + -abs(count(distinct t1.e*17*c+f)-max(19)) from t1 union select count(*)-max(case t1.f when a then (11+19*t1.b+t1.f) else (19) end*t1.c) from t1) then (abs(b- -f)/abs(f)) else t1.f end)} +} {} +do_test randexpr-2.578 { + db eval {SELECT (abs( -coalesce((select 11 from t1 where (case f when (19+ -(select count(*) from t1)) then (select ++~count(distinct 17+t1.f*(17)) | cast(avg(c) AS integer) from t1) else 13-t1.b*t1.b end*t1.c*(select max(e)*cast(avg(t1.b) AS integer) from t1) | 13) | 17 between 11 and 17),(e))+d)/abs(t1.b))-t1.d FROM t1 WHERE 11<>t1.b} +} {-400} +do_test randexpr-2.579 { + db eval {SELECT (abs( -coalesce((select 11 from t1 where (case f when (19+ -(select count(*) from t1)) then (select ++~count(distinct 17+t1.f*(17)) | cast(avg(c) AS integer) from t1) else 13-t1.b*t1.b end*t1.c*(select max(e)*cast(avg(t1.b) AS integer) from t1) | 13) | 17 between 11 and 17),(e))+d)/abs(t1.b))-t1.d FROM t1 WHERE NOT (11<>t1.b)} +} {} +do_test randexpr-2.580 { + db eval {SELECT (abs( -coalesce((select 11 from t1 where (case f when (19+ -(select count(*) from t1)) then (select ++~count(distinct 17+t1.f*(17)) & cast(avg(c) AS integer) from t1) else 13-t1.b*t1.b end*t1.c*(select max(e)*cast(avg(t1.b) AS integer) from t1) & 13) & 17 between 11 and 17),(e))+d)/abs(t1.b))-t1.d FROM t1 WHERE 11<>t1.b} +} {-400} +do_test randexpr-2.581 { + db eval {SELECT ( -13)-case when (13)+19 between (select cast(avg(11+coalesce((select d from t1 where t1.a in (select b from t1 union select c from t1)),11)+t1.e*d) AS integer)*+(max(t1.c))+cast(avg((c)) AS integer)*count(*) | max( -t1.b) from t1)-t1.d and t1.a then t1.b when b<>t1.f then t1.c else a end-b-t1.b FROM t1 WHERE 11<=t1.e+19*+t1.d} +} {-613} +do_test randexpr-2.582 { + db eval {SELECT ( -13)-case when (13)+19 between (select cast(avg(11+coalesce((select d from t1 where t1.a in (select b from t1 union select c from t1)),11)+t1.e*d) AS integer)*+(max(t1.c))+cast(avg((c)) AS integer)*count(*) | max( -t1.b) from t1)-t1.d and t1.a then t1.b when b<>t1.f then t1.c else a end-b-t1.b FROM t1 WHERE NOT (11<=t1.e+19*+t1.d)} +} {} +do_test randexpr-2.583 { + db eval {SELECT ( -13)-case when (13)+19 between (select cast(avg(11+coalesce((select d from t1 where t1.a in (select b from t1 union select c from t1)),11)+t1.e*d) AS integer)*+(max(t1.c))+cast(avg((c)) AS integer)*count(*) & max( -t1.b) from t1)-t1.d and t1.a then t1.b when b<>t1.f then t1.c else a end-b-t1.b FROM t1 WHERE 11<=t1.e+19*+t1.d} +} {-713} +do_test randexpr-2.584 { + db eval {SELECT coalesce((select max(t1.e++case ~t1.c when t1.f-c then t1.b*+coalesce((select max(+t1.c) from t1 where 19-19-13 | t1.d+(select ~min(17) from t1) not in (case when t1.a<>b then e else -e end,(t1.d),t1.a)),b)+((e))* -t1.e else t1.d end | b) from t1 where a not between 13 and c),t1.e) FROM t1 WHERE not case when (exists(select 1 from t1 where (((abs(19)/abs((case when exists(select 1 from t1 where +t1.f>c) then 19 when f in (t1.d,t1.a,13) and b=t1.e then t1.f else t1.a end))) in (e, -t1.c,t1.b))))) then -t1.f when b in (d,11, -d) then t1.e*t1.a else a end+d<>t1.f and (t1.f=( -17))} +} {} +do_test randexpr-2.585 { + db eval {SELECT coalesce((select max(t1.e++case ~t1.c when t1.f-c then t1.b*+coalesce((select max(+t1.c) from t1 where 19-19-13 | t1.d+(select ~min(17) from t1) not in (case when t1.a<>b then e else -e end,(t1.d),t1.a)),b)+((e))* -t1.e else t1.d end | b) from t1 where a not between 13 and c),t1.e) FROM t1 WHERE NOT (not case when (exists(select 1 from t1 where (((abs(19)/abs((case when exists(select 1 from t1 where +t1.f>c) then 19 when f in (t1.d,t1.a,13) and b=t1.e then t1.f else t1.a end))) in (e, -t1.c,t1.b))))) then -t1.f when b in (d,11, -d) then t1.e*t1.a else a end+d<>t1.f and (t1.f=( -17)))} +} {500} +do_test randexpr-2.586 { + db eval {SELECT coalesce((select max(t1.e++case ~t1.c when t1.f-c then t1.b*+coalesce((select max(+t1.c) from t1 where 19-19-13 & t1.d+(select ~min(17) from t1) not in (case when t1.a<>b then e else -e end,(t1.d),t1.a)),b)+((e))* -t1.e else t1.d end & b) from t1 where a not between 13 and c),t1.e) FROM t1 WHERE NOT (not case when (exists(select 1 from t1 where (((abs(19)/abs((case when exists(select 1 from t1 where +t1.f>c) then 19 when f in (t1.d,t1.a,13) and b=t1.e then t1.f else t1.a end))) in (e, -t1.c,t1.b))))) then -t1.f when b in (d,11, -d) then t1.e*t1.a else a end+d<>t1.f and (t1.f=( -17)))} +} {500} +do_test randexpr-2.587 { + db eval {SELECT case when t1.d-~case t1.b when coalesce((select max(b) from t1 where exists(select 1 from t1 where t1.c<>b)),case when 17 in (select min(c+17) from t1 union select ~ -count(distinct 11) | max(d) from t1) then d when ((t1.a not between t1.f and t1.f)) then t1.a else t1.b end) then (c) else (t1.c) end=d then -13 when b in (select cast(avg((a)) AS integer) from t1 union select -~abs(count(*))*min(t1.f) from t1) then d else 19 end)/abs(19)) when 11 then 11 else t1.f end+13-t1.d else t1.b end+b>=d and not exists(select 1 from t1 where e>=19)} +} {} +do_test randexpr-2.588 { + db eval {SELECT case when t1.d-~case t1.b when coalesce((select max(b) from t1 where exists(select 1 from t1 where t1.c<>b)),case when 17 in (select min(c+17) from t1 union select ~ -count(distinct 11) | max(d) from t1) then d when ((t1.a not between t1.f and t1.f)) then t1.a else t1.b end) then (c) else (t1.c) end=d then -13 when b in (select cast(avg((a)) AS integer) from t1 union select -~abs(count(*))*min(t1.f) from t1) then d else 19 end)/abs(19)) when 11 then 11 else t1.f end+13-t1.d else t1.b end+b>=d and not exists(select 1 from t1 where e>=19))} +} {-5489} +do_test randexpr-2.589 { + db eval {SELECT case when t1.d-~case t1.b when coalesce((select max(b) from t1 where exists(select 1 from t1 where t1.c<>b)),case when 17 in (select min(c+17) from t1 union select ~ -count(distinct 11) & max(d) from t1) then d when ((t1.a not between t1.f and t1.f)) then t1.a else t1.b end) then (c) else (t1.c) end=d then -13 when b in (select cast(avg((a)) AS integer) from t1 union select -~abs(count(*))*min(t1.f) from t1) then d else 19 end)/abs(19)) when 11 then 11 else t1.f end+13-t1.d else t1.b end+b>=d and not exists(select 1 from t1 where e>=19))} +} {-5489} +do_test randexpr-2.590 { + db eval {SELECT b+coalesce((select (t1.e) from t1 where coalesce((select max((select abs(cast(avg( -t1.f*17) AS integer))+count(*) from t1)-19*coalesce((select b*+~(t1.c)*(abs(c)/abs(a))+t1.e from t1 where 11 in (t1.a,17,b* -t1.f)),t1.e)*t1.b) from t1 where t1.f<>17),17) in (11,11, -t1.f)),t1.e) FROM t1 WHERE coalesce((select max(coalesce((select 19-e from t1 where t1.b not in (coalesce((select max(t1.e+coalesce((select 11+ -coalesce((select max(t1.a) from t1 where t1.a in (select cast(avg( -11) AS integer) from t1 union select -max(c) from t1)),11)*t1.a from t1 where exists(select 1 from t1 where a= -e)),e)-t1.c | t1.d) from t1 where t1.e=t1.a),17),(t1.e),c)),t1.b)) from t1 where not exists(select 1 from t1 where (13>=a) or exists(select 1 from t1 where d<>11))), -19)<=t1.b} +} {700} +do_test randexpr-2.591 { + db eval {SELECT b+coalesce((select (t1.e) from t1 where coalesce((select max((select abs(cast(avg( -t1.f*17) AS integer))+count(*) from t1)-19*coalesce((select b*+~(t1.c)*(abs(c)/abs(a))+t1.e from t1 where 11 in (t1.a,17,b* -t1.f)),t1.e)*t1.b) from t1 where t1.f<>17),17) in (11,11, -t1.f)),t1.e) FROM t1 WHERE NOT (coalesce((select max(coalesce((select 19-e from t1 where t1.b not in (coalesce((select max(t1.e+coalesce((select 11+ -coalesce((select max(t1.a) from t1 where t1.a in (select cast(avg( -11) AS integer) from t1 union select -max(c) from t1)),11)*t1.a from t1 where exists(select 1 from t1 where a= -e)),e)-t1.c | t1.d) from t1 where t1.e=t1.a),17),(t1.e),c)),t1.b)) from t1 where not exists(select 1 from t1 where (13>=a) or exists(select 1 from t1 where d<>11))), -19)<=t1.b)} +} {} +do_test randexpr-2.592 { + db eval {SELECT (select count(distinct t1.a)-cast(avg(~c+coalesce((select e from t1 where e in (select t1.f from t1 union select d from t1) or t1.d not in (case when a not in (t1.d,t1.c,t1.b) then a-case when ~t1.b+t1.e | -t1.e not in (a,(b),19) then t1.d else 11 end*b when 13 in (c,13,t1.a) then -t1.a else 17 end,c,d)),t1.e)) AS integer) from t1) FROM t1 WHERE ~e<=(a-(select count(*) from t1)+c+(select -abs(abs( -(max((abs(coalesce((select a*(abs(b-b)/abs((c*a*b)-11+coalesce((select max(b) from t1 where ae),e))/abs(11))*t1.e)))) | abs((count(*))) from t1))} +} {-198} +do_test randexpr-2.593 { + db eval {SELECT (select count(distinct t1.a)-cast(avg(~c+coalesce((select e from t1 where e in (select t1.f from t1 union select d from t1) or t1.d not in (case when a not in (t1.d,t1.c,t1.b) then a-case when ~t1.b+t1.e | -t1.e not in (a,(b),19) then t1.d else 11 end*b when 13 in (c,13,t1.a) then -t1.a else 17 end,c,d)),t1.e)) AS integer) from t1) FROM t1 WHERE NOT (~e<=(a-(select count(*) from t1)+c+(select -abs(abs( -(max((abs(coalesce((select a*(abs(b-b)/abs((c*a*b)-11+coalesce((select max(b) from t1 where ae),e))/abs(11))*t1.e)))) | abs((count(*))) from t1)))} +} {} +do_test randexpr-2.594 { + db eval {SELECT (select count(distinct t1.a)-cast(avg(~c+coalesce((select e from t1 where e in (select t1.f from t1 union select d from t1) or t1.d not in (case when a not in (t1.d,t1.c,t1.b) then a-case when ~t1.b+t1.e & -t1.e not in (a,(b),19) then t1.d else 11 end*b when 13 in (c,13,t1.a) then -t1.a else 17 end,c,d)),t1.e)) AS integer) from t1) FROM t1 WHERE ~e<=(a-(select count(*) from t1)+c+(select -abs(abs( -(max((abs(coalesce((select a*(abs(b-b)/abs((c*a*b)-11+coalesce((select max(b) from t1 where ae),e))/abs(11))*t1.e)))) | abs((count(*))) from t1))} +} {-198} +do_test randexpr-2.595 { + db eval {SELECT 13-(select cast(avg(t1.c-13) AS integer) from t1) | 11+t1.b | coalesce((select max(case when b in (select case -case max(17) when ~cast(avg(13-t1.a) AS integer) then count(distinct 13) else - -((cast(avg(e) AS integer))) end when cast(avg(t1.c) AS integer) then count(distinct d) else count(*) end from t1 union select count(*) from t1) then case when t1.d<>~13 then b when t1.c<>13 then t1.a else a end else t1.a end) from t1 where t1.f<>( -(17))),t1.e)+c FROM t1 WHERE (~coalesce((select (11+t1.d) from t1 where 17 in (select f+a from t1 union select case when 11>coalesce((select -case when (11 not in (t1.f,(abs(t1.a)/abs((select count(distinct -case when 13<=e then c when d between (t1.d) and t1.d then a else 11 end) from t1)-f)),c)) then f when (t1.f>t1.d) then 17 else 13 end from t1 where t1.e>a),17) then f else t1.b end from t1)),b)*13>e)} +} {} +do_test randexpr-2.596 { + db eval {SELECT 13-(select cast(avg(t1.c-13) AS integer) from t1) | 11+t1.b | coalesce((select max(case when b in (select case -case max(17) when ~cast(avg(13-t1.a) AS integer) then count(distinct 13) else - -((cast(avg(e) AS integer))) end when cast(avg(t1.c) AS integer) then count(distinct d) else count(*) end from t1 union select count(*) from t1) then case when t1.d<>~13 then b when t1.c<>13 then t1.a else a end else t1.a end) from t1 where t1.f<>( -(17))),t1.e)+c FROM t1 WHERE NOT ((~coalesce((select (11+t1.d) from t1 where 17 in (select f+a from t1 union select case when 11>coalesce((select -case when (11 not in (t1.f,(abs(t1.a)/abs((select count(distinct -case when 13<=e then c when d between (t1.d) and t1.d then a else 11 end) from t1)-f)),c)) then f when (t1.f>t1.d) then 17 else 13 end from t1 where t1.e>a),17) then f else t1.b end from t1)),b)*13>e))} +} {-1} +do_test randexpr-2.597 { + db eval {SELECT 13-(select cast(avg(t1.c-13) AS integer) from t1) & 11+t1.b & coalesce((select max(case when b in (select case -case max(17) when ~cast(avg(13-t1.a) AS integer) then count(distinct 13) else - -((cast(avg(e) AS integer))) end when cast(avg(t1.c) AS integer) then count(distinct d) else count(*) end from t1 union select count(*) from t1) then case when t1.d<>~13 then b when t1.c<>13 then t1.a else a end else t1.a end) from t1 where t1.f<>( -(17))),t1.e)+c FROM t1 WHERE NOT ((~coalesce((select (11+t1.d) from t1 where 17 in (select f+a from t1 union select case when 11>coalesce((select -case when (11 not in (t1.f,(abs(t1.a)/abs((select count(distinct -case when 13<=e then c when d between (t1.d) and t1.d then a else 11 end) from t1)-f)),c)) then f when (t1.f>t1.d) then 17 else 13 end from t1 where t1.e>a),17) then f else t1.b end from t1)),b)*13>e))} +} {128} +do_test randexpr-2.598 { + db eval {SELECT f+coalesce((select case when ~case when coalesce((select max(b) from t1 where not exists(select 1 from t1 where 11*t1.b>case t1.b when 13 then 11 else b end)),t1.b) not in (a,b,+t1.e | case when t1.a<>17 then 17 else t1.e end) then c when (t1.e)<>d then 11 else f end*(e) not between d and e then 17 else 17 end from t1 where 13 not between 19 and f),t1.b)*11 FROM t1 WHERE not exists(select 1 from t1 where 11 between c and b and -case when 13 between -f | case when t1.b<+c then b when t1.f>13 then a else (d) end and d then t1.d when t1.a in (select 19 from t1 union select 11 from t1) or not exists(select 1 from t1 where exists(select 1 from t1 where not b=t1.b)) then -19 else t1.b end in (select max(17) from t1 union select count(distinct a)+count(distinct 11) from t1) and t1.fcase t1.b when 13 then 11 else b end)),t1.b) not in (a,b,+t1.e | case when t1.a<>17 then 17 else t1.e end) then c when (t1.e)<>d then 11 else f end*(e) not between d and e then 17 else 17 end from t1 where 13 not between 19 and f),t1.b)*11 FROM t1 WHERE NOT (not exists(select 1 from t1 where 11 between c and b and -case when 13 between -f | case when t1.b<+c then b when t1.f>13 then a else (d) end and d then t1.d when t1.a in (select 19 from t1 union select 11 from t1) or not exists(select 1 from t1 where exists(select 1 from t1 where not b=t1.b)) then -19 else t1.b end in (select max(17) from t1 union select count(distinct a)+count(distinct 11) from t1) and t1.fcase t1.b when 13 then 11 else b end)),t1.b) not in (a,b,+t1.e & case when t1.a<>17 then 17 else t1.e end) then c when (t1.e)<>d then 11 else f end*(e) not between d and e then 17 else 17 end from t1 where 13 not between 19 and f),t1.b)*11 FROM t1 WHERE NOT (not exists(select 1 from t1 where 11 between c and b and -case when 13 between -f | case when t1.b<+c then b when t1.f>13 then a else (d) end and d then t1.d when t1.a in (select 19 from t1 union select 11 from t1) or not exists(select 1 from t1 where exists(select 1 from t1 where not b=t1.b)) then -19 else t1.b end in (select max(17) from t1 union select count(distinct a)+count(distinct 11) from t1) and t1.f=t1.a-11),11))/abs(17))=t1.f and ((t1.f)=t1.e) then t1.a+t1.d when not exists(select 1 from t1 where -b>c) or t1.a>=11 then t1.e else e end*13*t1.f | d>=19),t1.b)+t1.a FROM t1 WHERE t1.e not in (t1.f+case when f-d not between coalesce((select max(d) from t1 where (coalesce((select max(17) from t1 where not 19 in (select cast(avg(c) AS integer)-~count(distinct f- -e)+cast(avg(19) AS integer) from t1 union select cast(avg(d) AS integer) from t1)),b) not in (13,t1.c,13))),17)+ -e+t1.f and -(t1.b) then (t1.d) when t1.b in (select min(a) from t1 union select min(17) from t1) then c else 11 end,c, -13)} +} {600} +do_test randexpr-2.602 { + db eval {SELECT coalesce((select max(e) from t1 where e*~t1.b*e+c*case when +(abs(coalesce((select max(t1.b) from t1 where t1.b>=t1.a-11),11))/abs(17))=t1.f and ((t1.f)=t1.e) then t1.a+t1.d when not exists(select 1 from t1 where -b>c) or t1.a>=11 then t1.e else e end*13*t1.f | d>=19),t1.b)+t1.a FROM t1 WHERE NOT (t1.e not in (t1.f+case when f-d not between coalesce((select max(d) from t1 where (coalesce((select max(17) from t1 where not 19 in (select cast(avg(c) AS integer)-~count(distinct f- -e)+cast(avg(19) AS integer) from t1 union select cast(avg(d) AS integer) from t1)),b) not in (13,t1.c,13))),17)+ -e+t1.f and -(t1.b) then (t1.d) when t1.b in (select min(a) from t1 union select min(17) from t1) then c else 11 end,c, -13))} +} {} +do_test randexpr-2.603 { + db eval {SELECT coalesce((select max(e) from t1 where e*~t1.b*e+c*case when +(abs(coalesce((select max(t1.b) from t1 where t1.b>=t1.a-11),11))/abs(17))=t1.f and ((t1.f)=t1.e) then t1.a+t1.d when not exists(select 1 from t1 where -b>c) or t1.a>=11 then t1.e else e end*13*t1.f & d>=19),t1.b)+t1.a FROM t1 WHERE t1.e not in (t1.f+case when f-d not between coalesce((select max(d) from t1 where (coalesce((select max(17) from t1 where not 19 in (select cast(avg(c) AS integer)-~count(distinct f- -e)+cast(avg(19) AS integer) from t1 union select cast(avg(d) AS integer) from t1)),b) not in (13,t1.c,13))),17)+ -e+t1.f and -(t1.b) then (t1.d) when t1.b in (select min(a) from t1 union select min(17) from t1) then c else 11 end,c, -13)} +} {600} +do_test randexpr-2.604 { + db eval {SELECT case when exists(select 1 from t1 where f in (select cast(avg(t1.c) AS integer) from t1 union select min(t1.b) from t1)) then c+d-d when (coalesce((select a from t1 where a<(select abs(count(*)) from t1)),(select count(distinct t1.e) | abs(+ -count(distinct t1.e)*min(t1.b) | count(distinct 11)) from t1))) not in (17,(abs(t1.d)/abs(coalesce((select max(t1.f) from t1 where t1.a*t1.f>=t1.c),t1.f))),t1.e) then t1.c else t1.e end-f FROM t1 WHERE 13*~~(abs(b)/abs(coalesce((select max(t1.a) from t1 where t1.e in ( -(13-17)-b+(e | (abs(t1.e)/abs(case when ~t1.d between t1.e and c and t1.b in (select t1.c from t1 union select b from t1) then 11+t1.b when a=t1.d then b else c end)))*17,t1.c, -17)),13)))-e+b-19>19} +} {} +do_test randexpr-2.605 { + db eval {SELECT case when exists(select 1 from t1 where f in (select cast(avg(t1.c) AS integer) from t1 union select min(t1.b) from t1)) then c+d-d when (coalesce((select a from t1 where a<(select abs(count(*)) from t1)),(select count(distinct t1.e) | abs(+ -count(distinct t1.e)*min(t1.b) | count(distinct 11)) from t1))) not in (17,(abs(t1.d)/abs(coalesce((select max(t1.f) from t1 where t1.a*t1.f>=t1.c),t1.f))),t1.e) then t1.c else t1.e end-f FROM t1 WHERE NOT (13*~~(abs(b)/abs(coalesce((select max(t1.a) from t1 where t1.e in ( -(13-17)-b+(e | (abs(t1.e)/abs(case when ~t1.d between t1.e and c and t1.b in (select t1.c from t1 union select b from t1) then 11+t1.b when a=t1.d then b else c end)))*17,t1.c, -17)),13)))-e+b-19>19)} +} {-300} +do_test randexpr-2.606 { + db eval {SELECT case when exists(select 1 from t1 where f in (select cast(avg(t1.c) AS integer) from t1 union select min(t1.b) from t1)) then c+d-d when (coalesce((select a from t1 where a<(select abs(count(*)) from t1)),(select count(distinct t1.e) & abs(+ -count(distinct t1.e)*min(t1.b) & count(distinct 11)) from t1))) not in (17,(abs(t1.d)/abs(coalesce((select max(t1.f) from t1 where t1.a*t1.f>=t1.c),t1.f))),t1.e) then t1.c else t1.e end-f FROM t1 WHERE NOT (13*~~(abs(b)/abs(coalesce((select max(t1.a) from t1 where t1.e in ( -(13-17)-b+(e | (abs(t1.e)/abs(case when ~t1.d between t1.e and c and t1.b in (select t1.c from t1 union select b from t1) then 11+t1.b when a=t1.d then b else c end)))*17,t1.c, -17)),13)))-e+b-19>19)} +} {-100} +do_test randexpr-2.607 { + db eval {SELECT case when t1.f=17 then t1.d-19-11*f*t1.a*t1.b-case (select +cast(avg(a) AS integer) from t1) when 19 then 17 else a end when t1.a between case t1.c when t1.c*t1.c then (d) else t1.c end and c then e else t1.a end FROM t1 WHERE not exists(select 1 from t1 where exists(select 1 from t1 where (abs(+f-( -~(select count(distinct ~a*e | (select max(f)*count(distinct t1.f) from t1)) | -max(17)+(count(*))+max(11)+count(distinct t1.e)- - -count(distinct f) from t1)+b*b-t1.c) | t1.e)/abs(17))*c*b in (17,19,t1.e)))} +} {-131999719} +do_test randexpr-2.608 { + db eval {SELECT case when t1.f=17 then t1.d-19-11*f*t1.a*t1.b-case (select +cast(avg(a) AS integer) from t1) when 19 then 17 else a end when t1.a between case t1.c when t1.c*t1.c then (d) else t1.c end and c then e else t1.a end FROM t1 WHERE NOT (not exists(select 1 from t1 where exists(select 1 from t1 where (abs(+f-( -~(select count(distinct ~a*e | (select max(f)*count(distinct t1.f) from t1)) | -max(17)+(count(*))+max(11)+count(distinct t1.e)- - -count(distinct f) from t1)+b*b-t1.c) | t1.e)/abs(17))*c*b in (17,19,t1.e))))} +} {} +do_test randexpr-2.609 { + db eval {SELECT f-~+17*(abs(t1.f)/abs(a*case when case when (case when d>=t1.b or t1.b>=11 then t1.c when t1.b>t1.d then t1.d else t1.a end+17) not in (11,t1.e,17) then c when (t1.b>=a and b>b) then t1.d else d end in (select +abs( -max(t1.e)) from t1 union select count(*) | max(17) from t1) then (t1.a) else a end+(t1.e)))*(11)-f | t1.a-f FROM t1 WHERE ((t1.d>=t1.c))} +} {-500} +do_test randexpr-2.610 { + db eval {SELECT f-~+17*(abs(t1.f)/abs(a*case when case when (case when d>=t1.b or t1.b>=11 then t1.c when t1.b>t1.d then t1.d else t1.a end+17) not in (11,t1.e,17) then c when (t1.b>=a and b>b) then t1.d else d end in (select +abs( -max(t1.e)) from t1 union select count(*) | max(17) from t1) then (t1.a) else a end+(t1.e)))*(11)-f | t1.a-f FROM t1 WHERE NOT (((t1.d>=t1.c)))} +} {} +do_test randexpr-2.611 { + db eval {SELECT f-~+17*(abs(t1.f)/abs(a*case when case when (case when d>=t1.b or t1.b>=11 then t1.c when t1.b>t1.d then t1.d else t1.a end+17) not in (11,t1.e,17) then c when (t1.b>=a and b>b) then t1.d else d end in (select +abs( -max(t1.e)) from t1 union select count(*) & max(17) from t1) then (t1.a) else a end+(t1.e)))*(11)-f & t1.a-f FROM t1 WHERE ((t1.d>=t1.c))} +} {0} +do_test randexpr-2.612 { + db eval {SELECT e+ -((abs(f)/abs(17))) | t1.e*11-coalesce((select max(f) from t1 where ((abs(~+t1.b-coalesce((select coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where ((f) not in (11,19,t1.b)))),11* -t1.b) from t1 where -a<=c),t1.f))/abs( -b)) between t1.c and e and not exists(select 1 from t1 where f not in ( -t1.f,19,t1.e)))),case t1.c when 19 then (11) else (t1.a) end) | (13) FROM t1 WHERE d<>13 or t1.c in (select count(distinct -c*11-( -e)-t1.e)-( -case +max(t1.c)-min((select +cast(avg(19*17) AS integer) from t1)* -t1.f)+~(~count(distinct f))+count(*)-cast(avg(t1.c) AS integer) when count(distinct t1.f) then count(distinct e) else -count(*) end-count(*)) from t1 union select count(distinct 11) from t1)} +} {5597} +do_test randexpr-2.613 { + db eval {SELECT e+ -((abs(f)/abs(17))) | t1.e*11-coalesce((select max(f) from t1 where ((abs(~+t1.b-coalesce((select coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where ((f) not in (11,19,t1.b)))),11* -t1.b) from t1 where -a<=c),t1.f))/abs( -b)) between t1.c and e and not exists(select 1 from t1 where f not in ( -t1.f,19,t1.e)))),case t1.c when 19 then (11) else (t1.a) end) | (13) FROM t1 WHERE NOT (d<>13 or t1.c in (select count(distinct -c*11-( -e)-t1.e)-( -case +max(t1.c)-min((select +cast(avg(19*17) AS integer) from t1)* -t1.f)+~(~count(distinct f))+count(*)-cast(avg(t1.c) AS integer) when count(distinct t1.f) then count(distinct e) else -count(*) end-count(*)) from t1 union select count(distinct 11) from t1))} +} {} +do_test randexpr-2.614 { + db eval {SELECT e+ -((abs(f)/abs(17))) & t1.e*11-coalesce((select max(f) from t1 where ((abs(~+t1.b-coalesce((select coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where ((f) not in (11,19,t1.b)))),11* -t1.b) from t1 where -a<=c),t1.f))/abs( -b)) between t1.c and e and not exists(select 1 from t1 where f not in ( -t1.f,19,t1.e)))),case t1.c when 19 then (11) else (t1.a) end) & (13) FROM t1 WHERE d<>13 or t1.c in (select count(distinct -c*11-( -e)-t1.e)-( -case +max(t1.c)-min((select +cast(avg(19*17) AS integer) from t1)* -t1.f)+~(~count(distinct f))+count(*)-cast(avg(t1.c) AS integer) when count(distinct t1.f) then count(distinct e) else -count(*) end-count(*)) from t1 union select count(distinct 11) from t1)} +} {0} +do_test randexpr-2.615 { + db eval {SELECT coalesce((select case t1.e when t1.d then -t1.b else b | t1.f end from t1 where case e when case 11 when t1.f then 13 else c end-19 then case -(select count(distinct 19+ - -d*t1.f+d) from t1)*(abs(c)/abs(coalesce((select max(e) from t1 where not t1.c>t1.d),(17-t1.a)))) when 11 then t1.f else t1.a end else a end<>t1.c),t1.e) FROM t1 WHERE (select case count(*)* -+abs(+count(*)+min(t1.d)) | max(+(17)) when ~count(*)*(count(*))-(count(distinct t1.b)) then count(distinct 11) else max(t1.e) end from t1)>=(abs(coalesce((select t1.b from t1 where (at1.d),(17-t1.a)))) when 11 then t1.f else t1.a end else a end<>t1.c),t1.e) FROM t1 WHERE NOT ((select case count(*)* -+abs(+count(*)+min(t1.d)) | max(+(17)) when ~count(*)*(count(*))-(count(distinct t1.b)) then count(distinct 11) else max(t1.e) end from t1)>=(abs(coalesce((select t1.b from t1 where (at1.d),(17-t1.a)))) when 11 then t1.f else t1.a end else a end<>t1.c),t1.e) FROM t1 WHERE (select case count(*)* -+abs(+count(*)+min(t1.d)) | max(+(17)) when ~count(*)*(count(*))-(count(distinct t1.b)) then count(distinct 11) else max(t1.e) end from t1)>=(abs(coalesce((select t1.b from t1 where (a(t1.c) then t1.d else t1.d end FROM t1 WHERE case when 17-t1.f in (select case -~max(c-~e+19)*max(c)*count(*)+~count(distinct f)+case count(distinct d) when max(t1.b) then cast(avg(t1.d) AS integer) else count(distinct d) end-count(distinct d)*max(f)+ -count(distinct 13) when count(*) then ( -min(t1.c)) else min(( -d)) end from t1 union select -min(c) from t1) then 13 else case t1.c when t1.c then f else d end end-e-(17) in (select c from t1 union select c from t1)} +} {} +do_test randexpr-2.619 { + db eval {SELECT case when 13<11 then 19 when -(abs(f)/abs(13))*17-13 | case t1.b when ~((select (count(*)-abs(case cast(avg(19) AS integer) when count(distinct 19)*(count(*))-count(*) then count(distinct t1.d) else -count(distinct 17) end)+max(17)) from t1)) then t1.c else d end+11*t1.e*t1.a | e>(t1.c) then t1.d else t1.d end FROM t1 WHERE NOT (case when 17-t1.f in (select case -~max(c-~e+19)*max(c)*count(*)+~count(distinct f)+case count(distinct d) when max(t1.b) then cast(avg(t1.d) AS integer) else count(distinct d) end-count(distinct d)*max(f)+ -count(distinct 13) when count(*) then ( -min(t1.c)) else min(( -d)) end from t1 union select -min(c) from t1) then 13 else case t1.c when t1.c then f else d end end-e-(17) in (select c from t1 union select c from t1))} +} {400} +do_test randexpr-2.620 { + db eval {SELECT case when 13<11 then 19 when -(abs(f)/abs(13))*17-13 & case t1.b when ~((select (count(*)-abs(case cast(avg(19) AS integer) when count(distinct 19)*(count(*))-count(*) then count(distinct t1.d) else -count(distinct 17) end)+max(17)) from t1)) then t1.c else d end+11*t1.e*t1.a & e>(t1.c) then t1.d else t1.d end FROM t1 WHERE NOT (case when 17-t1.f in (select case -~max(c-~e+19)*max(c)*count(*)+~count(distinct f)+case count(distinct d) when max(t1.b) then cast(avg(t1.d) AS integer) else count(distinct d) end-count(distinct d)*max(f)+ -count(distinct 13) when count(*) then ( -min(t1.c)) else min(( -d)) end from t1 union select -min(c) from t1) then 13 else case t1.c when t1.c then f else d end end-e-(17) in (select c from t1 union select c from t1))} +} {400} +do_test randexpr-2.621 { + db eval {SELECT coalesce((select case ~(case when -f between t1.e and f then coalesce((select max(t1.a-t1.d) from t1 where d=b),case when (f*11-a-t1.d-c>t1.e and b>=19) then -+d when t1.b>= -13 then 17 else t1.f end) else (19) end) when 11 then b else f end from t1 where -t1.e<=t1.d),a) FROM t1 WHERE (+coalesce((select max((abs(19*a-t1.b+t1.a*a)/abs(19))) from t1 where not t1.f in (select case -count(*)-cast(avg(c) AS integer)+cast(avg(e) AS integer)*count(*) when min(t1.d) then -(cast(avg(t1.f) AS integer)) else max(13) end+min(t1.d)+count(*) | count(*) | min(11) | count(distinct f) from t1 union select -count(*) from t1)),e))-t1.e*d | f in (t1.d,e,e)} +} {} +do_test randexpr-2.622 { + db eval {SELECT coalesce((select case ~(case when -f between t1.e and f then coalesce((select max(t1.a-t1.d) from t1 where d=b),case when (f*11-a-t1.d-c>t1.e and b>=19) then -+d when t1.b>= -13 then 17 else t1.f end) else (19) end) when 11 then b else f end from t1 where -t1.e<=t1.d),a) FROM t1 WHERE NOT ((+coalesce((select max((abs(19*a-t1.b+t1.a*a)/abs(19))) from t1 where not t1.f in (select case -count(*)-cast(avg(c) AS integer)+cast(avg(e) AS integer)*count(*) when min(t1.d) then -(cast(avg(t1.f) AS integer)) else max(13) end+min(t1.d)+count(*) | count(*) | min(11) | count(distinct f) from t1 union select -count(*) from t1)),e))-t1.e*d | f in (t1.d,e,e))} +} {600} +do_test randexpr-2.623 { + db eval {SELECT case d*t1.f when -t1.a then t1.c+(coalesce((select max(13+case when -t1.d*13*c*d | case t1.e when t1.e then t1.b+t1.d else (abs(t1.b)/abs(case when c>b or d in (t1.e,a,d) then d else e end))+f end=t1.f then a when t1.a<=19 then 17 else d end) from t1 where (t1.f>t1.b)),(f))) else f end+t1.a FROM t1 WHERE t1.f+case d when 19 then +t1.c else 19-(abs((t1.b)*~coalesce((select f from t1 where (case when +case when t1.d>13 then t1.e else (t1.a) end<13 then f when t1.c<=e then d else t1.a end*t1.d in (select +case ~min(b) when -max(13) then cast(avg( -(11)) AS integer) else -cast(avg(17) AS integer) end+count(distinct t1.a) from t1 union select min(t1.f) from t1))),t1.a))/abs(f)) end+(a) in (b,t1.c,t1.a)} +} {} +do_test randexpr-2.624 { + db eval {SELECT case d*t1.f when -t1.a then t1.c+(coalesce((select max(13+case when -t1.d*13*c*d | case t1.e when t1.e then t1.b+t1.d else (abs(t1.b)/abs(case when c>b or d in (t1.e,a,d) then d else e end))+f end=t1.f then a when t1.a<=19 then 17 else d end) from t1 where (t1.f>t1.b)),(f))) else f end+t1.a FROM t1 WHERE NOT (t1.f+case d when 19 then +t1.c else 19-(abs((t1.b)*~coalesce((select f from t1 where (case when +case when t1.d>13 then t1.e else (t1.a) end<13 then f when t1.c<=e then d else t1.a end*t1.d in (select +case ~min(b) when -max(13) then cast(avg( -(11)) AS integer) else -cast(avg(17) AS integer) end+count(distinct t1.a) from t1 union select min(t1.f) from t1))),t1.a))/abs(f)) end+(a) in (b,t1.c,t1.a))} +} {700} +do_test randexpr-2.625 { + db eval {SELECT case d*t1.f when -t1.a then t1.c+(coalesce((select max(13+case when -t1.d*13*c*d & case t1.e when t1.e then t1.b+t1.d else (abs(t1.b)/abs(case when c>b or d in (t1.e,a,d) then d else e end))+f end=t1.f then a when t1.a<=19 then 17 else d end) from t1 where (t1.f>t1.b)),(f))) else f end+t1.a FROM t1 WHERE NOT (t1.f+case d when 19 then +t1.c else 19-(abs((t1.b)*~coalesce((select f from t1 where (case when +case when t1.d>13 then t1.e else (t1.a) end<13 then f when t1.c<=e then d else t1.a end*t1.d in (select +case ~min(b) when -max(13) then cast(avg( -(11)) AS integer) else -cast(avg(17) AS integer) end+count(distinct t1.a) from t1 union select min(t1.f) from t1))),t1.a))/abs(f)) end+(a) in (b,t1.c,t1.a))} +} {700} +do_test randexpr-2.626 { + db eval {SELECT case case when (not exists(select 1 from t1 where -e in (select t1.c from t1 union select b from t1))) then coalesce((select max(t1.d) from t1 where (17+~t1.c in (select min(t1.b) from t1 union select min(13) from t1))),t1.d)*t1.a+d when not a=a and t1.b not in (b,13,d) or d<13 or t1.f not between t1.c and f or t1.c<>c then t1.e else t1.d end when t1.c then (t1.d) else -f end FROM t1 WHERE (abs(case when (t1.e in (17,19+t1.b,t1.a)) or (t1.d-19)+(t1.d)+t1.d in (select t1.f from t1 union select t1.e from t1) and 13 in (select t1.c from t1 union select c from t1) or 11 not between e and 17 then -17-a else d end*13)/abs((b)))*d-f-13 between t1.d and (b)} +} {} +do_test randexpr-2.627 { + db eval {SELECT case case when (not exists(select 1 from t1 where -e in (select t1.c from t1 union select b from t1))) then coalesce((select max(t1.d) from t1 where (17+~t1.c in (select min(t1.b) from t1 union select min(13) from t1))),t1.d)*t1.a+d when not a=a and t1.b not in (b,13,d) or d<13 or t1.f not between t1.c and f or t1.c<>c then t1.e else t1.d end when t1.c then (t1.d) else -f end FROM t1 WHERE NOT ((abs(case when (t1.e in (17,19+t1.b,t1.a)) or (t1.d-19)+(t1.d)+t1.d in (select t1.f from t1 union select t1.e from t1) and 13 in (select t1.c from t1 union select c from t1) or 11 not between e and 17 then -17-a else d end*13)/abs((b)))*d-f-13 between t1.d and (b))} +} {-600} +do_test randexpr-2.628 { + db eval {SELECT a+a+(f)-(t1.f+d+t1.e | case when case (13) when t1.c then case coalesce((select -t1.b-t1.f-13 from t1 where t1.f between t1.d and (a)),t1.b) when b then 13 else t1.f end-f else t1.d end>=e then 19 else t1.c end*t1.c)*f-(t1.d)-b FROM t1 WHERE (t1.b)=t1.a+t1.d-13 | +~f+13 | e+t1.c+coalesce((select t1.a+e from t1 where b in (select (~max(b-t1.b-coalesce((select ((a)) from t1 where e=d or b not between b and t1.e),t1.b)))++~case case count(*) when -max(t1.c) then min(b) else min(17) end when max( -t1.a) then (min(t1.f)) else max((a)) end*cast(avg(f) AS integer)+max(19) from t1 union select count(distinct f) from t1)),(t1.e)) | t1.b} +} {} +do_test randexpr-2.629 { + db eval {SELECT a+a+(f)-(t1.f+d+t1.e | case when case (13) when t1.c then case coalesce((select -t1.b-t1.f-13 from t1 where t1.f between t1.d and (a)),t1.b) when b then 13 else t1.f end-f else t1.d end>=e then 19 else t1.c end*t1.c)*f-(t1.d)-b FROM t1 WHERE NOT ((t1.b)=t1.a+t1.d-13 | +~f+13 | e+t1.c+coalesce((select t1.a+e from t1 where b in (select (~max(b-t1.b-coalesce((select ((a)) from t1 where e=d or b not between b and t1.e),t1.b)))++~case case count(*) when -max(t1.c) then min(b) else min(17) end when max( -t1.a) then (min(t1.f)) else max((a)) end*cast(avg(f) AS integer)+max(19) from t1 union select count(distinct f) from t1)),(t1.e)) | t1.b)} +} {-54045400} +do_test randexpr-2.630 { + db eval {SELECT a+a+(f)-(t1.f+d+t1.e & case when case (13) when t1.c then case coalesce((select -t1.b-t1.f-13 from t1 where t1.f between t1.d and (a)),t1.b) when b then 13 else t1.f end-f else t1.d end>=e then 19 else t1.c end*t1.c)*f-(t1.d)-b FROM t1 WHERE NOT ((t1.b)=t1.a+t1.d-13 | +~f+13 | e+t1.c+coalesce((select t1.a+e from t1 where b in (select (~max(b-t1.b-coalesce((select ((a)) from t1 where e=d or b not between b and t1.e),t1.b)))++~case case count(*) when -max(t1.c) then min(b) else min(17) end when max( -t1.a) then (min(t1.f)) else max((a)) end*cast(avg(f) AS integer)+max(19) from t1 union select count(distinct f) from t1)),(t1.e)) | t1.b)} +} {-854200} +do_test randexpr-2.631 { + db eval {SELECT 19-e+t1.d | 19-(abs(11)/abs(t1.b-(select case cast(avg(t1.f) AS integer)-count(*)-count(distinct t1.c)-abs(count(*))-(cast(avg(case when 11b),t1.d) else ~11 end+t1.b,11,19) then t1.a else -t1.d-case d*a when t1.d then t1.e else 13 end end+e+b>19 then t1.a else t1.f end not between t1.d and e} +} {-1} +do_test randexpr-2.632 { + db eval {SELECT 19-e+t1.d | 19-(abs(11)/abs(t1.b-(select case cast(avg(t1.f) AS integer)-count(*)-count(distinct t1.c)-abs(count(*))-(cast(avg(case when 11b),t1.d) else ~11 end+t1.b,11,19) then t1.a else -t1.d-case d*a when t1.d then t1.e else 13 end end+e+b>19 then t1.a else t1.f end not between t1.d and e)} +} {} +do_test randexpr-2.633 { + db eval {SELECT 19-e+t1.d & 19-(abs(11)/abs(t1.b-(select case cast(avg(t1.f) AS integer)-count(*)-count(distinct t1.c)-abs(count(*))-(cast(avg(case when 11b),t1.d) else ~11 end+t1.b,11,19) then t1.a else -t1.d-case d*a when t1.d then t1.e else 13 end end+e+b>19 then t1.a else t1.f end not between t1.d and e} +} {-348} +do_test randexpr-2.634 { + db eval {SELECT (select abs(abs(~ -min(case t1.a-d when (abs(f)/abs(f+t1.b)) then ~e-11-+ -13*t1.a else 13 end)-count(*)*case + -count(*) when (count(distinct t1.e)) then max(t1.f) else (count(*)* - -+count(*) | min(17)) end+(cast(avg(t1.f) AS integer))+min(d))) from t1) FROM t1 WHERE not t1.f>=t1.a or not exists(select 1 from t1 where 19=17) and 13 in (select abs(count(*) | ((count(*)))) from t1 union select count(distinct -t1.c | t1.a) | abs(abs(min(d+c))) from t1)} +} {} +do_test randexpr-2.635 { + db eval {SELECT (select abs(abs(~ -min(case t1.a-d when (abs(f)/abs(f+t1.b)) then ~e-11-+ -13*t1.a else 13 end)-count(*)*case + -count(*) when (count(distinct t1.e)) then max(t1.f) else (count(*)* - -+count(*) | min(17)) end+(cast(avg(t1.f) AS integer))+min(d))) from t1) FROM t1 WHERE NOT (not t1.f>=t1.a or not exists(select 1 from t1 where 19=17) and 13 in (select abs(count(*) | ((count(*)))) from t1 union select count(distinct -t1.c | t1.a) | abs(abs(min(d+c))) from t1))} +} {995} +do_test randexpr-2.636 { + db eval {SELECT (select abs(abs(~ -min(case t1.a-d when (abs(f)/abs(f+t1.b)) then ~e-11-+ -13*t1.a else 13 end)-count(*)*case + -count(*) when (count(distinct t1.e)) then max(t1.f) else (count(*)* - -+count(*) & min(17)) end+(cast(avg(t1.f) AS integer))+min(d))) from t1) FROM t1 WHERE NOT (not t1.f>=t1.a or not exists(select 1 from t1 where 19=17) and 13 in (select abs(count(*) | ((count(*)))) from t1 union select count(distinct -t1.c | t1.a) | abs(abs(min(d+c))) from t1))} +} {1011} +do_test randexpr-2.637 { + db eval {SELECT (case when not exists(select 1 from t1 where ((not t1.f in (b,17-d,t1.a) and f=t1.a))} +} {} +do_test randexpr-2.638 { + db eval {SELECT (case when not exists(select 1 from t1 where ((not t1.f in (b,17-d,t1.a) and f=t1.a)))} +} {-12} +do_test randexpr-2.639 { + db eval {SELECT (case when not exists(select 1 from t1 where ((not t1.f in (b,17-d,t1.a) and f=t1.a)))} +} {6212} +do_test randexpr-2.640 { + db eval {SELECT coalesce((select t1.a from t1 where case when +(abs(e)/abs(19))+(abs(coalesce((select max(b) from t1 where 19 not in (f,(c),11) or -c not in (t1.f,e,d)),13)*e+t1.f)/abs((d)))<>t1.b then t1.e when t1.d not in (t1.f,17,t1.b) or t1.f not between t1.d and 13 then t1.a else t1.e end>e or not exists(select 1 from t1 where a>=e or not exists(select 1 from t1 where d<>t1.b))),f)+19 FROM t1 WHERE 13 in (case when (select +max( -t1.b+a*case when not 13*13t1.f and ~e>= -t1.b then t1.c when 11 not between t1.d and e and (dt1.b then t1.e when t1.d not in (t1.f,17,t1.b) or t1.f not between t1.d and 13 then t1.a else t1.e end>e or not exists(select 1 from t1 where a>=e or not exists(select 1 from t1 where d<>t1.b))),f)+19 FROM t1 WHERE NOT (13 in (case when (select +max( -t1.b+a*case when not 13*13t1.f and ~e>= -t1.b then t1.c when 11 not between t1.d and e and (d+(t1.e*(d))-t1.d then t1.f else 11 end,17,f)),e) | 19-13=t1.b) then (select max(11)-(count(distinct f)) from t1) else t1.a end+c in (t1.d,e,f) or e in (11,11,11)))),b)*t1.b*11))) and t1.b>=a} +} {} +do_test randexpr-2.643 { + db eval {SELECT +coalesce((select case when coalesce((select f+t1.e from t1 where b not in (case when not exists(select 1 from t1 where (f not in (t1.f-t1.b,t1.d,t1.d))) then t1.a when t1.f*t1.c*17>+(t1.e*(d))-t1.d then t1.f else 11 end,17,f)),e) | 19-13=t1.b) then (select max(11)-(count(distinct f)) from t1) else t1.a end+c in (t1.d,e,f) or e in (11,11,11)))),b)*t1.b*11))) and t1.b>=a)} +} {19} +do_test randexpr-2.644 { + db eval {SELECT +coalesce((select case when coalesce((select f+t1.e from t1 where b not in (case when not exists(select 1 from t1 where (f not in (t1.f-t1.b,t1.d,t1.d))) then t1.a when t1.f*t1.c*17>+(t1.e*(d))-t1.d then t1.f else 11 end,17,f)),e) & 19-13=t1.b) then (select max(11)-(count(distinct f)) from t1) else t1.a end+c in (t1.d,e,f) or e in (11,11,11)))),b)*t1.b*11))) and t1.b>=a)} +} {19} +do_test randexpr-2.645 { + db eval {SELECT (case when case 19 when b then 13 else t1.e end in (select (c) from t1 union select a from t1) then 11 else (d*coalesce((select t1.b from t1 where 19 | (t1.c)+case when -17>t1.a then t1.a when (19>=13) then t1.a else t1.e end=f and (e)>a or 11 in (select e from t1 union select t1.b from t1)),t1.d*t1.e))+c+a end) FROM t1 WHERE case case when case when not (abs(t1.d | f)/abs(a)) in (d,a,t1.c) then -d when (not exists(select 1 from t1 where f>=c)) then t1.a else e end+t1.d not in (t1.b,(t1.f),11) or not exists(select 1 from t1 where not exists(select 1 from t1 where t1.b in (19,t1.c,13))) then b+t1.c when t1.d>=t1.a then t1.c else (c) end when (13) then t1.e else b end in (t1.a,t1.b,13)} +} {80000400} +do_test randexpr-2.646 { + db eval {SELECT (case when case 19 when b then 13 else t1.e end in (select (c) from t1 union select a from t1) then 11 else (d*coalesce((select t1.b from t1 where 19 | (t1.c)+case when -17>t1.a then t1.a when (19>=13) then t1.a else t1.e end=f and (e)>a or 11 in (select e from t1 union select t1.b from t1)),t1.d*t1.e))+c+a end) FROM t1 WHERE NOT (case case when case when not (abs(t1.d | f)/abs(a)) in (d,a,t1.c) then -d when (not exists(select 1 from t1 where f>=c)) then t1.a else e end+t1.d not in (t1.b,(t1.f),11) or not exists(select 1 from t1 where not exists(select 1 from t1 where t1.b in (19,t1.c,13))) then b+t1.c when t1.d>=t1.a then t1.c else (c) end when (13) then t1.e else b end in (t1.a,t1.b,13))} +} {} +do_test randexpr-2.647 { + db eval {SELECT (case when case 19 when b then 13 else t1.e end in (select (c) from t1 union select a from t1) then 11 else (d*coalesce((select t1.b from t1 where 19 & (t1.c)+case when -17>t1.a then t1.a when (19>=13) then t1.a else t1.e end=f and (e)>a or 11 in (select e from t1 union select t1.b from t1)),t1.d*t1.e))+c+a end) FROM t1 WHERE case case when case when not (abs(t1.d | f)/abs(a)) in (d,a,t1.c) then -d when (not exists(select 1 from t1 where f>=c)) then t1.a else e end+t1.d not in (t1.b,(t1.f),11) or not exists(select 1 from t1 where not exists(select 1 from t1 where t1.b in (19,t1.c,13))) then b+t1.c when t1.d>=t1.a then t1.c else (c) end when (13) then t1.e else b end in (t1.a,t1.b,13)} +} {80000400} +do_test randexpr-2.648 { + db eval {SELECT case when 19<=19 then d when not exists(select 1 from t1 where case case when case when (select -min(((t1.f)))*((cast(avg(t1.d) AS integer))) from t1) in (select t1.a-b from t1 union select 17 from t1) then f when (19 not between t1.c and 13 and f>13) then d else a end | b-t1.b in (t1.e,t1.f, -d) then t1.e when c=17) then t1.e else t1.e end FROM t1 WHERE coalesce((select +a- -t1.d*b*coalesce((select t1.c from t1 where t1.a in (select (abs(11)/abs(case when not exists(select 1 from t1 where +e=t1.e) then case t1.b when a-c then b else b end when (not exists(select 1 from t1 where 19 in (select 13 from t1 union select t1.b from t1))) and e in (t1.e, -f,e) then 19 else t1.a end)) from t1 union select b from t1)),17)*d from t1 where t1.f not in (t1.a,(t1.b),t1.d)),13)<=(13)} +} {} +do_test randexpr-2.649 { + db eval {SELECT case when 19<=19 then d when not exists(select 1 from t1 where case case when case when (select -min(((t1.f)))*((cast(avg(t1.d) AS integer))) from t1) in (select t1.a-b from t1 union select 17 from t1) then f when (19 not between t1.c and 13 and f>13) then d else a end | b-t1.b in (t1.e,t1.f, -d) then t1.e when c=17) then t1.e else t1.e end FROM t1 WHERE NOT (coalesce((select +a- -t1.d*b*coalesce((select t1.c from t1 where t1.a in (select (abs(11)/abs(case when not exists(select 1 from t1 where +e=t1.e) then case t1.b when a-c then b else b end when (not exists(select 1 from t1 where 19 in (select 13 from t1 union select t1.b from t1))) and e in (t1.e, -f,e) then 19 else t1.a end)) from t1 union select b from t1)),17)*d from t1 where t1.f not in (t1.a,(t1.b),t1.d)),13)<=(13))} +} {400} +do_test randexpr-2.650 { + db eval {SELECT case when 19<=19 then d when not exists(select 1 from t1 where case case when case when (select -min(((t1.f)))*((cast(avg(t1.d) AS integer))) from t1) in (select t1.a-b from t1 union select 17 from t1) then f when (19 not between t1.c and 13 and f>13) then d else a end & b-t1.b in (t1.e,t1.f, -d) then t1.e when c=17) then t1.e else t1.e end FROM t1 WHERE NOT (coalesce((select +a- -t1.d*b*coalesce((select t1.c from t1 where t1.a in (select (abs(11)/abs(case when not exists(select 1 from t1 where +e=t1.e) then case t1.b when a-c then b else b end when (not exists(select 1 from t1 where 19 in (select 13 from t1 union select t1.b from t1))) and e in (t1.e, -f,e) then 19 else t1.a end)) from t1 union select b from t1)),17)*d from t1 where t1.f not in (t1.a,(t1.b),t1.d)),13)<=(13))} +} {400} +do_test randexpr-2.651 { + db eval {SELECT case when c<=t1.b*e+case when not exists(select 1 from t1 where a+ -t1.f= -t1.e*(17)+f and 19 in (select -13 from t1 union select t1.f from t1) and 19>=t1.a or -11 in (select a from t1 union select 17 from t1)) then t1.f else 19 end+t1.d or ((c))<>a then t1.f-t1.c when c not between ((t1.b)) and f then a else d end FROM t1 WHERE coalesce((select t1.e from t1 where t1.c in (t1.a,f,coalesce((select max( -f) from t1 where t1.f=(abs(13)/abs(~19+(abs((select count(distinct t1.f) from t1))/abs(coalesce((select 13 from t1 where d | 17*c*a*b+d<>t1.a),b)-(b)))))),b)),19))),t1.e)*c<>t1.e} +} {300} +do_test randexpr-2.652 { + db eval {SELECT case when c<=t1.b*e+case when not exists(select 1 from t1 where a+ -t1.f= -t1.e*(17)+f and 19 in (select -13 from t1 union select t1.f from t1) and 19>=t1.a or -11 in (select a from t1 union select 17 from t1)) then t1.f else 19 end+t1.d or ((c))<>a then t1.f-t1.c when c not between ((t1.b)) and f then a else d end FROM t1 WHERE NOT (coalesce((select t1.e from t1 where t1.c in (t1.a,f,coalesce((select max( -f) from t1 where t1.f=(abs(13)/abs(~19+(abs((select count(distinct t1.f) from t1))/abs(coalesce((select 13 from t1 where d | 17*c*a*b+d<>t1.a),b)-(b)))))),b)),19))),t1.e)*c<>t1.e)} +} {} +do_test randexpr-2.653 { + db eval {SELECT case when t1.c>=13 or d in (select case ((case min(t1.f)+min(f) when max( -11) then min(19) else cast(avg( -t1.d) AS integer) end | count(*))) when count(distinct 13) then -cast(avg( -t1.e) AS integer) else min(e) end from t1 union select max( -a) from t1) then case when +c-11+(t1.d)13 then d when not exists(select 1 from t1 where t1.c in (f,t1.b,t1.b)) then a else 19 end*t1.c and t1.d>=19)} +} {600} +do_test randexpr-2.654 { + db eval {SELECT case when t1.c>=13 or d in (select case ((case min(t1.f)+min(f) when max( -11) then min(19) else cast(avg( -t1.d) AS integer) end | count(*))) when count(distinct 13) then -cast(avg( -t1.e) AS integer) else min(e) end from t1 union select max( -a) from t1) then case when +c-11+(t1.d)13 then d when not exists(select 1 from t1 where t1.c in (f,t1.b,t1.b)) then a else 19 end*t1.c and t1.d>=19))} +} {} +do_test randexpr-2.655 { + db eval {SELECT case when t1.c>=13 or d in (select case ((case min(t1.f)+min(f) when max( -11) then min(19) else cast(avg( -t1.d) AS integer) end & count(*))) when count(distinct 13) then -cast(avg( -t1.e) AS integer) else min(e) end from t1 union select max( -a) from t1) then case when +c-11+(t1.d)13 then d when not exists(select 1 from t1 where t1.c in (f,t1.b,t1.b)) then a else 19 end*t1.c and t1.d>=19)} +} {600} +do_test randexpr-2.656 { + db eval {SELECT t1.a+(e*a)*t1.d+(abs(d)/abs(case when case f when t1.a then t1.f else d end*t1.f*t1.e*c*17+coalesce((select t1.d from t1 where c<=~t1.f | 13),e)*t1.a=t1.d then t1.d when (t1.b)<>17 or t1.b<=t1.c then a else (e) end in (select min(t1.e) from t1 union select abs( -max((17))) from t1) then c when 19<>17 then 13 else -d end>t1.d))) then 17 when t1.a<=13 then coalesce((select (17) | t1.c from t1 where t1.e>d),b) else 13 end in (select min(f) from t1 union select count(*) from t1) then 19 else t1.f end)) then t1.f else f end FROM t1 WHERE coalesce((select max(11 | e+case c*t1.b+13 when 19 | ~t1.f+case when d>11 then t1.f-(case t1.d when a then 19+t1.f else t1.b end) else d-case e when c+t1.a then 13 else case when not exists(select 1 from t1 where (t1.d)<>17) then d when b in ( -f,11,17) then 11 else t1.f end end end+d then e else t1.a end) from t1 where t1.a>= -11),b)>t1.c} +} {600} +do_test randexpr-2.660 { + db eval {SELECT case f when (abs(t1.f)/abs(case when d+case when exists(select 1 from t1 where exists(select 1 from t1 where exists(select 1 from t1 where case when not case when +11>=t1.d then t1.d when (t1.b)<>17 or t1.b<=t1.c then a else (e) end in (select min(t1.e) from t1 union select abs( -max((17))) from t1) then c when 19<>17 then 13 else -d end>t1.d))) then 17 when t1.a<=13 then coalesce((select (17) | t1.c from t1 where t1.e>d),b) else 13 end in (select min(f) from t1 union select count(*) from t1) then 19 else t1.f end)) then t1.f else f end FROM t1 WHERE NOT (coalesce((select max(11 | e+case c*t1.b+13 when 19 | ~t1.f+case when d>11 then t1.f-(case t1.d when a then 19+t1.f else t1.b end) else d-case e when c+t1.a then 13 else case when not exists(select 1 from t1 where (t1.d)<>17) then d when b in ( -f,11,17) then 11 else t1.f end end end+d then e else t1.a end) from t1 where t1.a>= -11),b)>t1.c)} +} {} +do_test randexpr-2.661 { + db eval {SELECT case f when (abs(t1.f)/abs(case when d+case when exists(select 1 from t1 where exists(select 1 from t1 where exists(select 1 from t1 where case when not case when +11>=t1.d then t1.d when (t1.b)<>17 or t1.b<=t1.c then a else (e) end in (select min(t1.e) from t1 union select abs( -max((17))) from t1) then c when 19<>17 then 13 else -d end>t1.d))) then 17 when t1.a<=13 then coalesce((select (17) & t1.c from t1 where t1.e>d),b) else 13 end in (select min(f) from t1 union select count(*) from t1) then 19 else t1.f end)) then t1.f else f end FROM t1 WHERE coalesce((select max(11 | e+case c*t1.b+13 when 19 | ~t1.f+case when d>11 then t1.f-(case t1.d when a then 19+t1.f else t1.b end) else d-case e when c+t1.a then 13 else case when not exists(select 1 from t1 where (t1.d)<>17) then d when b in ( -f,11,17) then 11 else t1.f end end end+d then e else t1.a end) from t1 where t1.a>= -11),b)>t1.c} +} {600} +do_test randexpr-2.662 { + db eval {SELECT (select abs(case max(t1.b)-(min(17))* -count(distinct case when case t1.f when t1.b then case e when c then t1.c else c end else t1.b end>=t1.e then d else e end) | ~+count(distinct t1.c)-(abs( -count(*))- -max(c)) | count(distinct t1.c) when count(distinct ( -t1.c)) then count(*) else max(t1.b) end* -count(*))-cast(avg( -11) AS integer)*min(19)+max(e) from t1) FROM t1 WHERE t1.a+t1.a>13} +} {909} +do_test randexpr-2.663 { + db eval {SELECT (select abs(case max(t1.b)-(min(17))* -count(distinct case when case t1.f when t1.b then case e when c then t1.c else c end else t1.b end>=t1.e then d else e end) | ~+count(distinct t1.c)-(abs( -count(*))- -max(c)) | count(distinct t1.c) when count(distinct ( -t1.c)) then count(*) else max(t1.b) end* -count(*))-cast(avg( -11) AS integer)*min(19)+max(e) from t1) FROM t1 WHERE NOT (t1.a+t1.a>13)} +} {} +do_test randexpr-2.664 { + db eval {SELECT (select abs(case max(t1.b)-(min(17))* -count(distinct case when case t1.f when t1.b then case e when c then t1.c else c end else t1.b end>=t1.e then d else e end) & ~+count(distinct t1.c)-(abs( -count(*))- -max(c)) & count(distinct t1.c) when count(distinct ( -t1.c)) then count(*) else max(t1.b) end* -count(*))-cast(avg( -11) AS integer)*min(19)+max(e) from t1) FROM t1 WHERE t1.a+t1.a>13} +} {710} +do_test randexpr-2.665 { + db eval {SELECT (t1.d- -c+e-f++case when a between case f when case when ~~~11-coalesce((select b from t1 where coalesce((select max(e) from t1 where not t1.d>= -11),a)+a between 11 and t1.a), -d)*17-a-13 | 13>=17 then a else 11 end then t1.a else t1.d end and t1.f then 19 else t1.a end) FROM t1 WHERE t1.f<> -13} +} {700} +do_test randexpr-2.666 { + db eval {SELECT (t1.d- -c+e-f++case when a between case f when case when ~~~11-coalesce((select b from t1 where coalesce((select max(e) from t1 where not t1.d>= -11),a)+a between 11 and t1.a), -d)*17-a-13 | 13>=17 then a else 11 end then t1.a else t1.d end and t1.f then 19 else t1.a end) FROM t1 WHERE NOT (t1.f<> -13)} +} {} +do_test randexpr-2.667 { + db eval {SELECT (t1.d- -c+e-f++case when a between case f when case when ~~~11-coalesce((select b from t1 where coalesce((select max(e) from t1 where not t1.d>= -11),a)+a between 11 and t1.a), -d)*17-a-13 & 13>=17 then a else 11 end then t1.a else t1.d end and t1.f then 19 else t1.a end) FROM t1 WHERE t1.f<> -13} +} {700} +do_test randexpr-2.668 { + db eval {SELECT (abs(case when (c<=b+t1.b) then -11+t1.a-d when b*(select count(distinct e)*+max(~t1.a*case when (select count(*)*+min(t1.b)+max(t1.a) from t1) in (c,t1.d,t1.d) then (select -(count(*)) from t1) else e end) from t1)<=a-11 then 11 else t1.e end)/abs(t1.e))+c FROM t1 WHERE a<+coalesce((select t1.f from t1 where exists(select 1 from t1 where ~case when (e not between t1.a-~case d when b then -c else 13 end and d) then 11 else a end+17-f-a*19*t1.f not in (f, -t1.d,11))),c)-t1.f and (t1.d<17) and t1.f=c or f<= -e} +} {} +do_test randexpr-2.669 { + db eval {SELECT (abs(case when (c<=b+t1.b) then -11+t1.a-d when b*(select count(distinct e)*+max(~t1.a*case when (select count(*)*+min(t1.b)+max(t1.a) from t1) in (c,t1.d,t1.d) then (select -(count(*)) from t1) else e end) from t1)<=a-11 then 11 else t1.e end)/abs(t1.e))+c FROM t1 WHERE NOT (a<+coalesce((select t1.f from t1 where exists(select 1 from t1 where ~case when (e not between t1.a-~case d when b then -c else 13 end and d) then 11 else a end+17-f-a*19*t1.f not in (f, -t1.d,11))),c)-t1.f and (t1.d<17) and t1.f=c or f<= -e)} +} {300} +do_test randexpr-2.670 { + db eval {SELECT case when case when c in (select abs((max(d*case t1.b when t1.b+t1.f then d else b end+d))- - -~count(*)*cast(avg(t1.b) AS integer)-min(d)*cast(avg(b) AS integer)*count(*)) from t1 union select -count(distinct t1.f) from t1) then case when 17 between t1.f and 11 then c else d end when t1.c>=13 then a else a end<=c then t1.d else 13 end+e FROM t1 WHERE t1.f between -11 and b} +} {} +do_test randexpr-2.671 { + db eval {SELECT case when case when c in (select abs((max(d*case t1.b when t1.b+t1.f then d else b end+d))- - -~count(*)*cast(avg(t1.b) AS integer)-min(d)*cast(avg(b) AS integer)*count(*)) from t1 union select -count(distinct t1.f) from t1) then case when 17 between t1.f and 11 then c else d end when t1.c>=13 then a else a end<=c then t1.d else 13 end+e FROM t1 WHERE NOT (t1.f between -11 and b)} +} {900} +do_test randexpr-2.672 { + db eval {SELECT b*~~t1.b*(a-t1.c+t1.b)+13-+~case t1.b when d then case 17*13 when t1.e then case when case when t1.c<>~(11) then t1.e else coalesce((select max( -c) from t1 where (f<>e)),t1.f) end<=a then (t1.c) else t1.c end else -f end else 13 end*t1.e-t1.f | 13 FROM t1 WHERE b between (d) and t1.c*(abs(t1.c-13 | t1.d+t1.a)/abs(13))} +} {} +do_test randexpr-2.673 { + db eval {SELECT b*~~t1.b*(a-t1.c+t1.b)+13-+~case t1.b when d then case 17*13 when t1.e then case when case when t1.c<>~(11) then t1.e else coalesce((select max( -c) from t1 where (f<>e)),t1.f) end<=a then (t1.c) else t1.c end else -f end else 13 end*t1.e-t1.f | 13 FROM t1 WHERE NOT (b between (d) and t1.c*(abs(t1.c-13 | t1.d+t1.a)/abs(13)))} +} {6413} +do_test randexpr-2.674 { + db eval {SELECT b*~~t1.b*(a-t1.c+t1.b)+13-+~case t1.b when d then case 17*13 when t1.e then case when case when t1.c<>~(11) then t1.e else coalesce((select max( -c) from t1 where (f<>e)),t1.f) end<=a then (t1.c) else t1.c end else -f end else 13 end*t1.e-t1.f & 13 FROM t1 WHERE NOT (b between (d) and t1.c*(abs(t1.c-13 | t1.d+t1.a)/abs(13)))} +} {13} +do_test randexpr-2.675 { + db eval {SELECT case when t1.b in (select case 13 when 13 | t1.e then case +(select (min((e)))-case count(*) | max(t1.b*d+17+e) when abs(count(distinct (11))*count(*)-cast(avg(a) AS integer) | max( -t1.a)) then (count(distinct 13)) else -(cast(avg(t1.b) AS integer)) end from t1) when (select count(distinct e) from t1) then t1.e else a+(t1.b) end-d-t1.d else t1.e end from t1 union select (f) from t1) then t1.b else (b) end FROM t1 WHERE b in (select a from t1 union select 11+t1.e from t1)} +} {} +do_test randexpr-2.676 { + db eval {SELECT case when t1.b in (select case 13 when 13 | t1.e then case +(select (min((e)))-case count(*) | max(t1.b*d+17+e) when abs(count(distinct (11))*count(*)-cast(avg(a) AS integer) | max( -t1.a)) then (count(distinct 13)) else -(cast(avg(t1.b) AS integer)) end from t1) when (select count(distinct e) from t1) then t1.e else a+(t1.b) end-d-t1.d else t1.e end from t1 union select (f) from t1) then t1.b else (b) end FROM t1 WHERE NOT (b in (select a from t1 union select 11+t1.e from t1))} +} {200} +do_test randexpr-2.677 { + db eval {SELECT case when t1.b in (select case 13 when 13 & t1.e then case +(select (min((e)))-case count(*) & max(t1.b*d+17+e) when abs(count(distinct (11))*count(*)-cast(avg(a) AS integer) & max( -t1.a)) then (count(distinct 13)) else -(cast(avg(t1.b) AS integer)) end from t1) when (select count(distinct e) from t1) then t1.e else a+(t1.b) end-d-t1.d else t1.e end from t1 union select (f) from t1) then t1.b else (b) end FROM t1 WHERE NOT (b in (select a from t1 union select 11+t1.e from t1))} +} {200} +do_test randexpr-2.678 { + db eval {SELECT coalesce((select 17 from t1 where d between 17 and 13),case when t1.f in (select c+a from t1 union select a from t1) and (abs(f)/abs(t1.b))-t1.e>=case when not exists(select 1 from t1 where t1.e>(select count(distinct t1.f) from t1)) then t1.f when (11)-~t1.fe),b)) from t1 where (t1.f) in (13,e,t1.c) or t1.c>13),t1.b))/abs(13)) then t1.b else d end)/abs(13)) in (t1.f,t1.e,t1.a) or 17 between 13 and f or (f>11 and t1.f=t1.e} +} {} +do_test randexpr-2.679 { + db eval {SELECT coalesce((select 17 from t1 where d between 17 and 13),case when t1.f in (select c+a from t1 union select a from t1) and (abs(f)/abs(t1.b))-t1.e>=case when not exists(select 1 from t1 where t1.e>(select count(distinct t1.f) from t1)) then t1.f when (11)-~t1.fe),b)) from t1 where (t1.f) in (13,e,t1.c) or t1.c>13),t1.b))/abs(13)) then t1.b else d end)/abs(13)) in (t1.f,t1.e,t1.a) or 17 between 13 and f or (f>11 and t1.f=t1.e)} +} {200} +do_test randexpr-2.680 { + db eval {SELECT (select min(19-case 19 when -t1.a+13 then coalesce((select c from t1 where (abs(13)/abs(11))>=coalesce((select f+b*+t1.c from t1 where t1.c=+11+t1.e),11) or (e in (a,a,13) and t1.a=f or t1.a=t1.e),t1.f)*t1.a+t1.f)*19)) end*f-t1.f in (select d from t1 union select t1.c from t1) and b in (t1.f,t1.d,c)} +} {} +do_test randexpr-2.681 { + db eval {SELECT (select min(19-case 19 when -t1.a+13 then coalesce((select c from t1 where (abs(13)/abs(11))>=coalesce((select f+b*+t1.c from t1 where t1.c=+11+t1.e),11) or (e in (a,a,13) and t1.a=f or t1.a=t1.e),t1.f)*t1.a+t1.f)*19)) end*f-t1.f in (select d from t1 union select t1.c from t1) and b in (t1.f,t1.d,c))} +} {-165} +do_test randexpr-2.682 { + db eval {SELECT (select min(19-case 19 when -t1.a+13 then coalesce((select c from t1 where (abs(13)/abs(11))>=coalesce((select f+b*+t1.c from t1 where t1.c=+11+t1.e),11) or (e in (a,a,13) and t1.a=f or t1.a=t1.e),t1.f)*t1.a+t1.f)*19)) end*f-t1.f in (select d from t1 union select t1.c from t1) and b in (t1.f,t1.d,c))} +} {584} +do_test randexpr-2.683 { + db eval {SELECT ~case when exists(select 1 from t1 where t1.c not between case case ~t1.b*19 when b then t1.c*t1.e else t1.d end when c then case ++13 when case when a+d+~~11 not between e and -19 then 19 else b end then t1.d else 19 end else t1.a end and d) then 19 when c in (select abs(count(distinct 17)) from t1 union select max(t1.b) from t1) then (d) else t1.e end | 19 FROM t1 WHERE t1.d between 11 and ~19-coalesce((select d from t1 where (d>=c)),t1.d)*+t1.e+t1.e} +} {} +do_test randexpr-2.684 { + db eval {SELECT ~case when exists(select 1 from t1 where t1.c not between case case ~t1.b*19 when b then t1.c*t1.e else t1.d end when c then case ++13 when case when a+d+~~11 not between e and -19 then 19 else b end then t1.d else 19 end else t1.a end and d) then 19 when c in (select abs(count(distinct 17)) from t1 union select max(t1.b) from t1) then (d) else t1.e end | 19 FROM t1 WHERE NOT (t1.d between 11 and ~19-coalesce((select d from t1 where (d>=c)),t1.d)*+t1.e+t1.e)} +} {-485} +do_test randexpr-2.685 { + db eval {SELECT ~case when exists(select 1 from t1 where t1.c not between case case ~t1.b*19 when b then t1.c*t1.e else t1.d end when c then case ++13 when case when a+d+~~11 not between e and -19 then 19 else b end then t1.d else 19 end else t1.a end and d) then 19 when c in (select abs(count(distinct 17)) from t1 union select max(t1.b) from t1) then (d) else t1.e end & 19 FROM t1 WHERE NOT (t1.d between 11 and ~19-coalesce((select d from t1 where (d>=c)),t1.d)*+t1.e+t1.e)} +} {3} +do_test randexpr-2.686 { + db eval {SELECT +t1.b-17*11-coalesce((select coalesce((select max(t1.f+a-t1.c+b) from t1 where 17* -b+t1.e=13),coalesce((select +t1.f*coalesce((select b from t1 where 19 between -a and c),t1.d) from t1 where 17<>b),17)*11) from t1 where fb),17)*11) from t1 where fb),17)*11) from t1 where f=case when b not between -t1.f and c then e else f end or a<=11 or t1.e between a and 11 or t1.d not in (b,d,d) then case when f>=c then t1.c else 19 end when t1.e=a then e else t1.e end) | f)/abs(t1.a))) from t1)+e* -t1.a+f+d FROM t1 WHERE t1.b<=t1.c+ -(abs(t1.b)/abs(case t1.b+t1.e when 11 then t1.c else 17 end))*coalesce((select coalesce((select coalesce((select 13 from t1 where b=t1.b),f) | t1.f-11 from t1 where (t1.c) in (select +min(19)*(~max(b)-max(11))*max(11) from t1 union select min(f) from t1)),t1.f) from t1 where f in (select case count(distinct 19) when max(t1.b) then -cast(avg(t1.e) AS integer) else cast(avg(b) AS integer) end from t1 union select count(*) from t1)),t1.a)} +} {} +do_test randexpr-2.690 { + db eval {SELECT -d+t1.d-t1.a-+t1.c-(select +min((abs(+(case when b>=case when b not between -t1.f and c then e else f end or a<=11 or t1.e between a and 11 or t1.d not in (b,d,d) then case when f>=c then t1.c else 19 end when t1.e=a then e else t1.e end) | f)/abs(t1.a))) from t1)+e* -t1.a+f+d FROM t1 WHERE NOT (t1.b<=t1.c+ -(abs(t1.b)/abs(case t1.b+t1.e when 11 then t1.c else 17 end))*coalesce((select coalesce((select coalesce((select 13 from t1 where b=t1.b),f) | t1.f-11 from t1 where (t1.c) in (select +min(19)*(~max(b)-max(11))*max(11) from t1 union select min(f) from t1)),t1.f) from t1 where f in (select case count(distinct 19) when max(t1.b) then -cast(avg(t1.e) AS integer) else cast(avg(b) AS integer) end from t1 union select count(*) from t1)),t1.a))} +} {-49410} +do_test randexpr-2.691 { + db eval {SELECT -d+t1.d-t1.a-+t1.c-(select +min((abs(+(case when b>=case when b not between -t1.f and c then e else f end or a<=11 or t1.e between a and 11 or t1.d not in (b,d,d) then case when f>=c then t1.c else 19 end when t1.e=a then e else t1.e end) & f)/abs(t1.a))) from t1)+e* -t1.a+f+d FROM t1 WHERE NOT (t1.b<=t1.c+ -(abs(t1.b)/abs(case t1.b+t1.e when 11 then t1.c else 17 end))*coalesce((select coalesce((select coalesce((select 13 from t1 where b=t1.b),f) | t1.f-11 from t1 where (t1.c) in (select +min(19)*(~max(b)-max(11))*max(11) from t1 union select min(f) from t1)),t1.f) from t1 where f in (select case count(distinct 19) when max(t1.b) then -cast(avg(t1.e) AS integer) else cast(avg(b) AS integer) end from t1 union select count(*) from t1)),t1.a))} +} {-49400} +do_test randexpr-2.692 { + db eval {SELECT ~case +t1.a when t1.a then 19 else coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where t1.b*f<>e and e not in (17,t1.c-case when t1.e<19 then t1.c else (abs(t1.c)/abs(d*coalesce((select t1.e from t1 where exists(select 1 from t1 where t1.c in (select t1.f from t1 union select 11 from t1)) or t1.a<=t1.d),a))) end*t1.e,t1.e))),coalesce((select max(a) from t1 where (b) in ((11),e,b)),17)) end FROM t1 WHERE not (c)+c*d>t1.e and exists(select 1 from t1 where d*coalesce((select +case when ((select cast(avg((c)) AS integer)- -cast(avg(a) AS integer) from t1) in (select min(b)-count(*) from t1 union select count(distinct 19) from t1)) or not t1.b between 19 and 19 then t1.c else coalesce((select e from t1 where t1.e>=e),c) end from t1 where ( -f)19) and 17<> -t1.e and b>=a} +} {} +do_test randexpr-2.693 { + db eval {SELECT ~case +t1.a when t1.a then 19 else coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where t1.b*f<>e and e not in (17,t1.c-case when t1.e<19 then t1.c else (abs(t1.c)/abs(d*coalesce((select t1.e from t1 where exists(select 1 from t1 where t1.c in (select t1.f from t1 union select 11 from t1)) or t1.a<=t1.d),a))) end*t1.e,t1.e))),coalesce((select max(a) from t1 where (b) in ((11),e,b)),17)) end FROM t1 WHERE NOT (not (c)+c*d>t1.e and exists(select 1 from t1 where d*coalesce((select +case when ((select cast(avg((c)) AS integer)- -cast(avg(a) AS integer) from t1) in (select min(b)-count(*) from t1 union select count(distinct 19) from t1)) or not t1.b between 19 and 19 then t1.c else coalesce((select e from t1 where t1.e>=e),c) end from t1 where ( -f)19) and 17<> -t1.e and b>=a)} +} {-20} +do_test randexpr-2.694 { + db eval {SELECT coalesce((select max(t1.d-t1.e) from t1 where not exists(select 1 from t1 where c*+19<=(d)) or c+e not between a and ~f*t1.f*case when t1.e<> -coalesce((select case when ((e between t1.d and f)) then -(17) | b else t1.d end from t1 where b in (select t1.c from t1 union select 11 from t1)),d) then t1.d when e not in ( -t1.b,t1.c,(t1.e)) then t1.c else d end), -b) FROM t1 WHERE t1.d<>d} +} {} +do_test randexpr-2.695 { + db eval {SELECT coalesce((select max(t1.d-t1.e) from t1 where not exists(select 1 from t1 where c*+19<=(d)) or c+e not between a and ~f*t1.f*case when t1.e<> -coalesce((select case when ((e between t1.d and f)) then -(17) | b else t1.d end from t1 where b in (select t1.c from t1 union select 11 from t1)),d) then t1.d when e not in ( -t1.b,t1.c,(t1.e)) then t1.c else d end), -b) FROM t1 WHERE NOT (t1.d<>d)} +} {-100} +do_test randexpr-2.696 { + db eval {SELECT coalesce((select max(t1.d-t1.e) from t1 where not exists(select 1 from t1 where c*+19<=(d)) or c+e not between a and ~f*t1.f*case when t1.e<> -coalesce((select case when ((e between t1.d and f)) then -(17) & b else t1.d end from t1 where b in (select t1.c from t1 union select 11 from t1)),d) then t1.d when e not in ( -t1.b,t1.c,(t1.e)) then t1.c else d end), -b) FROM t1 WHERE NOT (t1.d<>d)} +} {-100} +do_test randexpr-2.697 { + db eval {SELECT case when a in (select max(17) from t1 union select abs(count(*)) from t1) then t1.a- -e-t1.d | e- -t1.f*19+(select -abs(max(a))*count(distinct e)*max(19) from t1)-11-13-d*11+t1.a else d end-b FROM t1 WHERE t1.e in (select (abs(case when (abs(f)/abs((select ~min(a) from t1)+coalesce((select t1.f from t1 where 11<>t1.b),19)))*t1.e+ -t1.d | e<=t1.f then -e when (b)>t1.a then t1.d else t1.a end)/abs(11)) from t1 union select 13 from t1) and f not between t1.b and e or not exists(select 1 from t1 where not exists(select 1 from t1 where 19t1.b),19)))*t1.e+ -t1.d | e<=t1.f then -e when (b)>t1.a then t1.d else t1.a end)/abs(11)) from t1 union select 13 from t1) and f not between t1.b and e or not exists(select 1 from t1 where not exists(select 1 from t1 where 19t1.b),19)))*t1.e+ -t1.d | e<=t1.f then -e when (b)>t1.a then t1.d else t1.a end)/abs(11)) from t1 union select 13 from t1) and f not between t1.b and e or not exists(select 1 from t1 where not exists(select 1 from t1 where 19t1.d),c) then 19 else 17 end between t1.b and t1.f and t1.d not in (c,e,(17)))),11) FROM t1 WHERE (((select min(t1.f) from t1)<=e))} +} {} +do_test randexpr-2.701 { + db eval {SELECT coalesce((select f from t1 where (case t1.a*t1.b*c when t1.d*coalesce((select max(coalesce((select 19 from t1 where t1.b in (select max(~case -e when c+coalesce((select max(e) from t1 where (t1.d) not between c and t1.f),17) then c else 13 end) from t1 union select count(distinct a) from t1)),t1.f)) from t1 where f>t1.d),c) then 19 else 17 end between t1.b and t1.f and t1.d not in (c,e,(17)))),11) FROM t1 WHERE NOT ((((select min(t1.f) from t1)<=e)))} +} {11} +do_test randexpr-2.702 { + db eval {SELECT case t1.a when e*t1.e then case when exists(select 1 from t1 where (coalesce((select max(17) from t1 where (not (case when not a>=13*c then t1.f else (11)*t1.b end between 11 and t1.f or t1.c>(t1.c)))),t1.b*~t1.c)*b*d>=13)) then (abs(17)/abs(e)) else b end else t1.f end | t1.c+t1.f FROM t1 WHERE coalesce((select max(11) from t1 where c-d not between ~case when (select +~count(distinct t1.f)*cast(avg(t1.e) AS integer) from t1)+19 not in (t1.e,t1.f,t1.a) then t1.a when a in (e,t1.d,d) then t1.b else t1.e end and t1.b and c>=e or (t1.d<>b) and not exists(select 1 from t1 where (t1.f between 13 and a))),t1.a)-e<=t1.e} +} {988} +do_test randexpr-2.703 { + db eval {SELECT case t1.a when e*t1.e then case when exists(select 1 from t1 where (coalesce((select max(17) from t1 where (not (case when not a>=13*c then t1.f else (11)*t1.b end between 11 and t1.f or t1.c>(t1.c)))),t1.b*~t1.c)*b*d>=13)) then (abs(17)/abs(e)) else b end else t1.f end | t1.c+t1.f FROM t1 WHERE NOT (coalesce((select max(11) from t1 where c-d not between ~case when (select +~count(distinct t1.f)*cast(avg(t1.e) AS integer) from t1)+19 not in (t1.e,t1.f,t1.a) then t1.a when a in (e,t1.d,d) then t1.b else t1.e end and t1.b and c>=e or (t1.d<>b) and not exists(select 1 from t1 where (t1.f between 13 and a))),t1.a)-e<=t1.e)} +} {} +do_test randexpr-2.704 { + db eval {SELECT case t1.a when e*t1.e then case when exists(select 1 from t1 where (coalesce((select max(17) from t1 where (not (case when not a>=13*c then t1.f else (11)*t1.b end between 11 and t1.f or t1.c>(t1.c)))),t1.b*~t1.c)*b*d>=13)) then (abs(17)/abs(e)) else b end else t1.f end & t1.c+t1.f FROM t1 WHERE coalesce((select max(11) from t1 where c-d not between ~case when (select +~count(distinct t1.f)*cast(avg(t1.e) AS integer) from t1)+19 not in (t1.e,t1.f,t1.a) then t1.a when a in (e,t1.d,d) then t1.b else t1.e end and t1.b and c>=e or (t1.d<>b) and not exists(select 1 from t1 where (t1.f between 13 and a))),t1.a)-e<=t1.e} +} {512} +do_test randexpr-2.705 { + db eval {SELECT coalesce((select max(t1.e+(t1.b)) from t1 where 17<=t1.d-(select ~+cast(avg(b) AS integer) | case -case cast(avg(t1.e) AS integer) when min((abs((a))/abs(t1.e))) then cast(avg(t1.c) AS integer)+count(*) else min(d) end when max(t1.e) then count(*) else ( -count(*)) end-min(t1.b)-count(distinct -a)*count(*) from t1) | c+coalesce((select f from t1 where (19=t1.c-t1.b)),t1.f+17)),f) FROM t1 WHERE c not between (abs(t1.f)/abs(case when t1.f+e<=t1.c+t1.d then case when d in (select +count(distinct case when -t1.a*19-t1.c not between 11 and c then t1.e when (19<>19) then t1.d else d end) from t1 union select cast(avg(e) AS integer) from t1) then t1.e when c in (select count(*) from t1 union select cast(avg(f) AS integer)* - - -cast(avg(t1.e) AS integer) from t1) then b else (t1.b) end when not (11>=11) then -t1.e else b end)) and t1.f} +} {} +do_test randexpr-2.706 { + db eval {SELECT coalesce((select max(t1.e+(t1.b)) from t1 where 17<=t1.d-(select ~+cast(avg(b) AS integer) | case -case cast(avg(t1.e) AS integer) when min((abs((a))/abs(t1.e))) then cast(avg(t1.c) AS integer)+count(*) else min(d) end when max(t1.e) then count(*) else ( -count(*)) end-min(t1.b)-count(distinct -a)*count(*) from t1) | c+coalesce((select f from t1 where (19=t1.c-t1.b)),t1.f+17)),f) FROM t1 WHERE NOT (c not between (abs(t1.f)/abs(case when t1.f+e<=t1.c+t1.d then case when d in (select +count(distinct case when -t1.a*19-t1.c not between 11 and c then t1.e when (19<>19) then t1.d else d end) from t1 union select cast(avg(e) AS integer) from t1) then t1.e when c in (select count(*) from t1 union select cast(avg(f) AS integer)* - - -cast(avg(t1.e) AS integer) from t1) then b else (t1.b) end when not (11>=11) then -t1.e else b end)) and t1.f)} +} {700} +do_test randexpr-2.707 { + db eval {SELECT coalesce((select max(t1.e+(t1.b)) from t1 where 17<=t1.d-(select ~+cast(avg(b) AS integer) & case -case cast(avg(t1.e) AS integer) when min((abs((a))/abs(t1.e))) then cast(avg(t1.c) AS integer)+count(*) else min(d) end when max(t1.e) then count(*) else ( -count(*)) end-min(t1.b)-count(distinct -a)*count(*) from t1) & c+coalesce((select f from t1 where (19=t1.c-t1.b)),t1.f+17)),f) FROM t1 WHERE NOT (c not between (abs(t1.f)/abs(case when t1.f+e<=t1.c+t1.d then case when d in (select +count(distinct case when -t1.a*19-t1.c not between 11 and c then t1.e when (19<>19) then t1.d else d end) from t1 union select cast(avg(e) AS integer) from t1) then t1.e when c in (select count(*) from t1 union select cast(avg(f) AS integer)* - - -cast(avg(t1.e) AS integer) from t1) then b else (t1.b) end when not (11>=11) then -t1.e else b end)) and t1.f)} +} {700} +do_test randexpr-2.708 { + db eval {SELECT 17-(abs(case when t1.e<+(select count(distinct -f) from t1)+t1.b then +case when t1.b not in (coalesce((select max(coalesce((select case when 17=a and 13 not in (b,t1.f,d)),b),t1.d, -13) then (e) else (13) end-19 when not exists(select 1 from t1 where 19<>f) then c else t1.e end | t1.d)/abs((13)))*f FROM t1 WHERE coalesce((select f from t1 where coalesce((select ~d*19-d*~case c when case a when t1.e*19 then (select - -( -max(t1.f))*cast(avg(t1.c) AS integer) from t1) else 13 end*13-11 then -11 else t1.f end | t1.e | t1.f*19 from t1 where t1.f>t1.a),b)*b not in (17,17,t1.e)),t1.a)-b*d>=11} +} {} +do_test randexpr-2.709 { + db eval {SELECT 17-(abs(case when t1.e<+(select count(distinct -f) from t1)+t1.b then +case when t1.b not in (coalesce((select max(coalesce((select case when 17=a and 13 not in (b,t1.f,d)),b),t1.d, -13) then (e) else (13) end-19 when not exists(select 1 from t1 where 19<>f) then c else t1.e end | t1.d)/abs((13)))*f FROM t1 WHERE NOT (coalesce((select f from t1 where coalesce((select ~d*19-d*~case c when case a when t1.e*19 then (select - -( -max(t1.f))*cast(avg(t1.c) AS integer) from t1) else 13 end*13-11 then -11 else t1.f end | t1.e | t1.f*19 from t1 where t1.f>t1.a),b)*b not in (17,17,t1.e)),t1.a)-b*d>=11)} +} {-22783} +do_test randexpr-2.710 { + db eval {SELECT 17-(abs(case when t1.e<+(select count(distinct -f) from t1)+t1.b then +case when t1.b not in (coalesce((select max(coalesce((select case when 17=a and 13 not in (b,t1.f,d)),b),t1.d, -13) then (e) else (13) end-19 when not exists(select 1 from t1 where 19<>f) then c else t1.e end & t1.d)/abs((13)))*f FROM t1 WHERE NOT (coalesce((select f from t1 where coalesce((select ~d*19-d*~case c when case a when t1.e*19 then (select - -( -max(t1.f))*cast(avg(t1.c) AS integer) from t1) else 13 end*13-11 then -11 else t1.f end | t1.e | t1.f*19 from t1 where t1.f>t1.a),b)*b not in (17,17,t1.e)),t1.a)-b*d>=11)} +} {-17983} +do_test randexpr-2.711 { + db eval {SELECT (select cast(avg(case when (e=case when e>d and case coalesce((select ((t1.b)) from t1 where 17<=t1.c),a) when t1.b then f else d end*b>t1.d then (t1.a) else 11 end or 17 not in (19,f,d) or not not exists(select 1 from t1 where - -e>=13)) and 17 in ( -t1.b,c,t1.f) then t1.c when t1.c not between t1.f and 17 then t1.c else d end) AS integer) from t1)+b+a FROM t1 WHERE (((coalesce((select coalesce((select max(19) from t1 where d between ~t1.a and 11),c+d-coalesce((select e from t1 where (abs(b)/abs(coalesce((select 13 from t1 where case when (17) not in (19,t1.c,t1.b) or 17 not in (t1.b,t1.f,t1.e) then 11 else 19 end<>t1.c),11)*17))=t1.b),d)) from t1 where d not in (b,b,d)),t1.a) not between b and b) and d>=t1.d) and t1.d in (13,c,t1.b))} +} {} +do_test randexpr-2.712 { + db eval {SELECT (select cast(avg(case when (e=case when e>d and case coalesce((select ((t1.b)) from t1 where 17<=t1.c),a) when t1.b then f else d end*b>t1.d then (t1.a) else 11 end or 17 not in (19,f,d) or not not exists(select 1 from t1 where - -e>=13)) and 17 in ( -t1.b,c,t1.f) then t1.c when t1.c not between t1.f and 17 then t1.c else d end) AS integer) from t1)+b+a FROM t1 WHERE NOT ((((coalesce((select coalesce((select max(19) from t1 where d between ~t1.a and 11),c+d-coalesce((select e from t1 where (abs(b)/abs(coalesce((select 13 from t1 where case when (17) not in (19,t1.c,t1.b) or 17 not in (t1.b,t1.f,t1.e) then 11 else 19 end<>t1.c),11)*17))=t1.b),d)) from t1 where d not in (b,b,d)),t1.a) not between b and b) and d>=t1.d) and t1.d in (13,c,t1.b)))} +} {600} +do_test randexpr-2.713 { + db eval {SELECT (abs((select +abs(+max(b) | abs(~~count(distinct -19))) from t1))/abs(coalesce((select max(19*11*d) from t1 where (t1.e<=t1.e)),t1.f)))+case e when (t1.e) then d else c*13 | 11 end FROM t1 WHERE t1.d>=(case when (select count(*) from t1)*t1.c*t1.e+e<>t1.e*13 | f+(select (min(13+t1.b) | ~+~count(*)) from t1)*e*coalesce((select max(19+t1.a) from t1 where t1.e in (select +(count(distinct b))+count(*) from t1 union select ( -min(17)) from t1)),t1.a)-t1.d* -t1.e then e else -a end)} +} {} +do_test randexpr-2.714 { + db eval {SELECT (abs((select +abs(+max(b) | abs(~~count(distinct -19))) from t1))/abs(coalesce((select max(19*11*d) from t1 where (t1.e<=t1.e)),t1.f)))+case e when (t1.e) then d else c*13 | 11 end FROM t1 WHERE NOT (t1.d>=(case when (select count(*) from t1)*t1.c*t1.e+e<>t1.e*13 | f+(select (min(13+t1.b) | ~+~count(*)) from t1)*e*coalesce((select max(19+t1.a) from t1 where t1.e in (select +(count(distinct b))+count(*) from t1 union select ( -min(17)) from t1)),t1.a)-t1.d* -t1.e then e else -a end))} +} {400} +do_test randexpr-2.715 { + db eval {SELECT (abs((select +abs(+max(b) & abs(~~count(distinct -19))) from t1))/abs(coalesce((select max(19*11*d) from t1 where (t1.e<=t1.e)),t1.f)))+case e when (t1.e) then d else c*13 & 11 end FROM t1 WHERE NOT (t1.d>=(case when (select count(*) from t1)*t1.c*t1.e+e<>t1.e*13 | f+(select (min(13+t1.b) | ~+~count(*)) from t1)*e*coalesce((select max(19+t1.a) from t1 where t1.e in (select +(count(distinct b))+count(*) from t1 union select ( -min(17)) from t1)),t1.a)-t1.d* -t1.e then e else -a end))} +} {400} +do_test randexpr-2.716 { + db eval {SELECT -17+c-(abs(t1.a)/abs(t1.c))-case when t1.a<>c then case when exists(select 1 from t1 where 11 not between + -d and d and 13 not between 13 and 17 or not -f<=19 or (t1.a)(11)) and 11=t1.f then b else a end from t1 where t1.e in (e,t1.c,b)),11) end else -17 end FROM t1 WHERE not exists(select 1 from t1 where t1.a>=coalesce((select t1.f from t1 where + -19*t1.c<~+t1.a*13),a))} +} {-217} +do_test randexpr-2.717 { + db eval {SELECT -17+c-(abs(t1.a)/abs(t1.c))-case when t1.a<>c then case when exists(select 1 from t1 where 11 not between + -d and d and 13 not between 13 and 17 or not -f<=19 or (t1.a)(11)) and 11=t1.f then b else a end from t1 where t1.e in (e,t1.c,b)),11) end else -17 end FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.a>=coalesce((select t1.f from t1 where + -19*t1.c<~+t1.a*13),a)))} +} {} +do_test randexpr-2.718 { + db eval {SELECT f*19+17*(select max(b)+count(*)- -~count(*)-min(case when (e<=b-t1.b) then case when t1.b>t1.a or f not in (c, -c,f) or f in (e,13,13) then t1.b else t1.b end else a end*a) from t1)+t1.f*(abs(case b | t1.c+t1.a when ((f)) then f else c end+b)/abs(17))-(c) FROM t1 WHERE t1.e>11} +} {-138117} +do_test randexpr-2.719 { + db eval {SELECT f*19+17*(select max(b)+count(*)- -~count(*)-min(case when (e<=b-t1.b) then case when t1.b>t1.a or f not in (c, -c,f) or f in (e,13,13) then t1.b else t1.b end else a end*a) from t1)+t1.f*(abs(case b | t1.c+t1.a when ((f)) then f else c end+b)/abs(17))-(c) FROM t1 WHERE NOT (t1.e>11)} +} {} +do_test randexpr-2.720 { + db eval {SELECT f*19+17*(select max(b)+count(*)- -~count(*)-min(case when (e<=b-t1.b) then case when t1.b>t1.a or f not in (c, -c,f) or f in (e,13,13) then t1.b else t1.b end else a end*a) from t1)+t1.f*(abs(case b & t1.c+t1.a when ((f)) then f else c end+b)/abs(17))-(c) FROM t1 WHERE t1.e>11} +} {-138117} +do_test randexpr-2.721 { + db eval {SELECT -case when case when (case (abs(13)/abs(t1.e-13*t1.f)) when b- -13+t1.d then 11 else a end) not between t1.b and b then t1.c when d not between t1.e and t1.c or t1.f in (select 11 from t1 union select f from t1) and 17=d then 13 else t1.a end+11>=f then a when f>b then t1.d else 17 end-t1.e FROM t1 WHERE (case when not exists(select 1 from t1 where exists(select 1 from t1 where (abs(b*d-t1.e)/abs(t1.b))>c)) and t1.d in (13,t1.f,17) then e else ( - -t1.a)+11+c end*t1.b in (select (case -+min(17) when +count(*) | min(t1.a) | cast(avg(11) AS integer) then cast(avg(11) AS integer) else cast(avg(a) AS integer) end)-cast(avg(t1.a) AS integer) from t1 union select (min(t1.e)) from t1) or not t1.c in ((f),t1.c,(t1.e)))} +} {} +do_test randexpr-2.722 { + db eval {SELECT -case when case when (case (abs(13)/abs(t1.e-13*t1.f)) when b- -13+t1.d then 11 else a end) not between t1.b and b then t1.c when d not between t1.e and t1.c or t1.f in (select 11 from t1 union select f from t1) and 17=d then 13 else t1.a end+11>=f then a when f>b then t1.d else 17 end-t1.e FROM t1 WHERE NOT ((case when not exists(select 1 from t1 where exists(select 1 from t1 where (abs(b*d-t1.e)/abs(t1.b))>c)) and t1.d in (13,t1.f,17) then e else ( - -t1.a)+11+c end*t1.b in (select (case -+min(17) when +count(*) | min(t1.a) | cast(avg(11) AS integer) then cast(avg(11) AS integer) else cast(avg(a) AS integer) end)-cast(avg(t1.a) AS integer) from t1 union select (min(t1.e)) from t1) or not t1.c in ((f),t1.c,(t1.e))))} +} {-900} +do_test randexpr-2.723 { + db eval {SELECT case t1.a when a then t1.c else -case coalesce((select t1.e from t1 where (abs(case when not (a*case when f>=f*13-19 then t1.e else +t1.a end=t1.a) then (select min(a)+min(+19) from t1) else case when t1.a=(f) then c else c end+17 end)/abs(t1.e)) between 17 and 13),e) when 17 then 13 else t1.d end end FROM t1 WHERE 17 in (select case cast(avg(b+b) AS integer) when abs(abs(count(*))) then min(case t1.c+t1.b+t1.e+17+case case when t1.b<>17 and t1.e>=t1.b then b else -t1.f end when 11 then 13 else f end*t1.f*b when 19 then -11 else t1.d end- -t1.f) else min(t1.c) end from t1 union select abs(abs(count(distinct b)*max(t1.d))+count(distinct 13)-count(*)) from t1)} +} {} +do_test randexpr-2.724 { + db eval {SELECT case t1.a when a then t1.c else -case coalesce((select t1.e from t1 where (abs(case when not (a*case when f>=f*13-19 then t1.e else +t1.a end=t1.a) then (select min(a)+min(+19) from t1) else case when t1.a=(f) then c else c end+17 end)/abs(t1.e)) between 17 and 13),e) when 17 then 13 else t1.d end end FROM t1 WHERE NOT (17 in (select case cast(avg(b+b) AS integer) when abs(abs(count(*))) then min(case t1.c+t1.b+t1.e+17+case case when t1.b<>17 and t1.e>=t1.b then b else -t1.f end when 11 then 13 else f end*t1.f*b when 19 then -11 else t1.d end- -t1.f) else min(t1.c) end from t1 union select abs(abs(count(distinct b)*max(t1.d))+count(distinct 13)-count(*)) from t1))} +} {300} +do_test randexpr-2.725 { + db eval {SELECT case when coalesce((select e from t1 where coalesce((select coalesce((select max( -coalesce((select max(case when exists(select 1 from t1 where not exists(select 1 from t1 where b in (select e+d from t1 union select t1.e from t1))) then t1.d else t1.f end) from t1 where exists(select 1 from t1 where f not in ((c),a,11))),e)) from t1 where t1.b=t1.f and et1.b),t1.b) from t1 where t1.a in (select a from t1 union select -f from t1)),17)>=17),((b))) not in (t1.e,b,e) then 19 when not -a<=f then t1.f else t1.b end FROM t1 WHERE 13=t1.d} +} {} +do_test randexpr-2.726 { + db eval {SELECT case when coalesce((select e from t1 where coalesce((select coalesce((select max( -coalesce((select max(case when exists(select 1 from t1 where not exists(select 1 from t1 where b in (select e+d from t1 union select t1.e from t1))) then t1.d else t1.f end) from t1 where exists(select 1 from t1 where f not in ((c),a,11))),e)) from t1 where t1.b=t1.f and et1.b),t1.b) from t1 where t1.a in (select a from t1 union select -f from t1)),17)>=17),((b))) not in (t1.e,b,e) then 19 when not -a<=f then t1.f else t1.b end FROM t1 WHERE NOT (13=t1.d)} +} {200} +do_test randexpr-2.727 { + db eval {SELECT 19*13+((abs(case when 17*d in ((abs((case (abs((abs((select abs(min(d))+cast(avg(19) AS integer) from t1) | t1.d | 11 | b)/abs(19)))/abs(b)) when 11 then a else e end))/abs(13)),t1.f,b) then t1.b when t1.c>=t1.d then (t1.c) else c end*d)/abs(f))) | t1.a-a-17 FROM t1 WHERE ( -t1.d+coalesce((select max(11) from t1 where t1.c between 13 and t1.d and case d when b then 13+ -~t1.a else +case when coalesce((select max((select +case (cast(avg(e) AS integer)) when count(distinct t1.a) then count(*) else min(( -d)) end from t1)-+a+d) from t1 where (e in (17,t1.e, -a))),b)<>f then a else 11 end end<(t1.e)),t1.d) in (select t1.a from t1 union select b from t1))} +} {} +do_test randexpr-2.728 { + db eval {SELECT 19*13+((abs(case when 17*d in ((abs((case (abs((abs((select abs(min(d))+cast(avg(19) AS integer) from t1) | t1.d | 11 | b)/abs(19)))/abs(b)) when 11 then a else e end))/abs(13)),t1.f,b) then t1.b when t1.c>=t1.d then (t1.c) else c end*d)/abs(f))) | t1.a-a-17 FROM t1 WHERE NOT (( -t1.d+coalesce((select max(11) from t1 where t1.c between 13 and t1.d and case d when b then 13+ -~t1.a else +case when coalesce((select max((select +case (cast(avg(e) AS integer)) when count(distinct t1.a) then count(*) else min(( -d)) end from t1)-+a+d) from t1 where (e in (17,t1.e, -a))),b)<>f then a else 11 end end<(t1.e)),t1.d) in (select t1.a from t1 union select b from t1)))} +} {-1} +do_test randexpr-2.729 { + db eval {SELECT 19*13+((abs(case when 17*d in ((abs((case (abs((abs((select abs(min(d))+cast(avg(19) AS integer) from t1) & t1.d & 11 & b)/abs(19)))/abs(b)) when 11 then a else e end))/abs(13)),t1.f,b) then t1.b when t1.c>=t1.d then (t1.c) else c end*d)/abs(f))) & t1.a-a-17 FROM t1 WHERE NOT (( -t1.d+coalesce((select max(11) from t1 where t1.c between 13 and t1.d and case d when b then 13+ -~t1.a else +case when coalesce((select max((select +case (cast(avg(e) AS integer)) when count(distinct t1.a) then count(*) else min(( -d)) end from t1)-+a+d) from t1 where (e in (17,t1.e, -a))),b)<>f then a else 11 end end<(t1.e)),t1.d) in (select t1.a from t1 union select b from t1)))} +} {431} +do_test randexpr-2.730 { + db eval {SELECT case when (19*b)+11*~f-coalesce((select max(11) from t1 where f<>case when case 17 when 11 then 17 else c end-11 in (t1.c,c,t1.e) and 19=t1.a or 17<=e or e<=19 then 19 | c when t1.b=t1.f then d else 19 end or t1.e not in (13,19,c)),b) | -c not between -e and -a then (t1.c) else t1.d end FROM t1 WHERE c in (select case abs((count(*))) when -max(t1.c) | abs(abs(+count(*)))-~case (case -max(e) when max(11)- -count(*) then ((max(t1.e))) else count(*) end) when count(distinct t1.e) then cast(avg(f) AS integer) else cast(avg(t1.c) AS integer) end* -cast(avg(f) AS integer) then max(f) else (max(b)) end from t1 union select (max(13)) from t1) and not exists(select 1 from t1 where case when t1.e<=19-b then c else c end in (select c from t1 union select (select -((count(distinct t1.d))) from t1)-t1.b from t1))} +} {} +do_test randexpr-2.731 { + db eval {SELECT case when (19*b)+11*~f-coalesce((select max(11) from t1 where f<>case when case 17 when 11 then 17 else c end-11 in (t1.c,c,t1.e) and 19=t1.a or 17<=e or e<=19 then 19 | c when t1.b=t1.f then d else 19 end or t1.e not in (13,19,c)),b) | -c not between -e and -a then (t1.c) else t1.d end FROM t1 WHERE NOT (c in (select case abs((count(*))) when -max(t1.c) | abs(abs(+count(*)))-~case (case -max(e) when max(11)- -count(*) then ((max(t1.e))) else count(*) end) when count(distinct t1.e) then cast(avg(f) AS integer) else cast(avg(t1.c) AS integer) end* -cast(avg(f) AS integer) then max(f) else (max(b)) end from t1 union select (max(13)) from t1) and not exists(select 1 from t1 where case when t1.e<=19-b then c else c end in (select c from t1 union select (select -((count(distinct t1.d))) from t1)-t1.b from t1)))} +} {400} +do_test randexpr-2.732 { + db eval {SELECT case when (19*b)+11*~f-coalesce((select max(11) from t1 where f<>case when case 17 when 11 then 17 else c end-11 in (t1.c,c,t1.e) and 19=t1.a or 17<=e or e<=19 then 19 & c when t1.b=t1.f then d else 19 end or t1.e not in (13,19,c)),b) & -c not between -e and -a then (t1.c) else t1.d end FROM t1 WHERE NOT (c in (select case abs((count(*))) when -max(t1.c) | abs(abs(+count(*)))-~case (case -max(e) when max(11)- -count(*) then ((max(t1.e))) else count(*) end) when count(distinct t1.e) then cast(avg(f) AS integer) else cast(avg(t1.c) AS integer) end* -cast(avg(f) AS integer) then max(f) else (max(b)) end from t1 union select (max(13)) from t1) and not exists(select 1 from t1 where case when t1.e<=19-b then c else c end in (select c from t1 union select (select -((count(distinct t1.d))) from t1)-t1.b from t1)))} +} {300} +do_test randexpr-2.733 { + db eval {SELECT case when t1.e not between e and 13 then (select count(*) from t1) when f>=case when c in (select a from t1 union select t1.e+coalesce((select 11 from t1 where (select count(distinct t1.b) from t1)+17 not in (b,t1.c,t1.f-(select ~count(distinct 17) from t1)+ -e)),13) from t1) or 19 between d and t1.f then (select min(19) from t1) when not (e not between c and a) then d else a end then d else (t1.c) end FROM t1 WHERE f not in (17,f+d | f+case when -f=19 then (select max((select +abs(abs(max(((select ((max(t1.d))+count(distinct t1.e))-min(t1.a) from t1))+t1.e)-abs(count(distinct -t1.b)-count(distinct 11)))*min(a)-count(*))-min(t1.a) from t1)) from t1) else t1.f end,c-a) and c<>11} +} {1} +do_test randexpr-2.734 { + db eval {SELECT case when t1.e not between e and 13 then (select count(*) from t1) when f>=case when c in (select a from t1 union select t1.e+coalesce((select 11 from t1 where (select count(distinct t1.b) from t1)+17 not in (b,t1.c,t1.f-(select ~count(distinct 17) from t1)+ -e)),13) from t1) or 19 between d and t1.f then (select min(19) from t1) when not (e not between c and a) then d else a end then d else (t1.c) end FROM t1 WHERE NOT (f not in (17,f+d | f+case when -f=19 then (select max((select +abs(abs(max(((select ((max(t1.d))+count(distinct t1.e))-min(t1.a) from t1))+t1.e)-abs(count(distinct -t1.b)-count(distinct 11)))*min(a)-count(*))-min(t1.a) from t1)) from t1) else t1.f end,c-a) and c<>11)} +} {} +do_test randexpr-2.735 { + db eval {SELECT case c-+case when t1.f in (t1.a,t1.e,19) then t1.b else case when a<>t1.a then case ~(t1.d) when t1.e then (select max(t1.f) from t1)-(select +(cast(avg(b) AS integer))*cast(avg(t1.a) AS integer)+max(d)*count(*)+count(*) from t1)* -t1.f else 13 end else t1.a+a end*t1.f-t1.b*(e) end when d then d else 17 end FROM t1 WHERE case when not exists(select 1 from t1 where not exists(select 1 from t1 where c in (select t1.a*t1.b from t1 union select t1.d | e from t1))) and (coalesce((select (abs(11)/abs(13-17*c)) from t1 where b in (select 13 from t1 union select (t1.c) from t1)),11) not in (17,b,b)) then (abs(+t1.f)/abs(c)) when exists(select 1 from t1 where not exists(select 1 from t1 where not (not exists(select 1 from t1 where e<=19 and e>19)))) then c else t1.e end not between t1.f and c} +} {17} +do_test randexpr-2.736 { + db eval {SELECT case c-+case when t1.f in (t1.a,t1.e,19) then t1.b else case when a<>t1.a then case ~(t1.d) when t1.e then (select max(t1.f) from t1)-(select +(cast(avg(b) AS integer))*cast(avg(t1.a) AS integer)+max(d)*count(*)+count(*) from t1)* -t1.f else 13 end else t1.a+a end*t1.f-t1.b*(e) end when d then d else 17 end FROM t1 WHERE NOT (case when not exists(select 1 from t1 where not exists(select 1 from t1 where c in (select t1.a*t1.b from t1 union select t1.d | e from t1))) and (coalesce((select (abs(11)/abs(13-17*c)) from t1 where b in (select 13 from t1 union select (t1.c) from t1)),11) not in (17,b,b)) then (abs(+t1.f)/abs(c)) when exists(select 1 from t1 where not exists(select 1 from t1 where not (not exists(select 1 from t1 where e<=19 and e>19)))) then c else t1.e end not between t1.f and c)} +} {} +do_test randexpr-2.737 { + db eval {SELECT 11+a+(select count(distinct coalesce((select -coalesce((select case when (t1.a<=a) then (case when 17=t1.c then (select count(distinct e)-min(11) from t1) else f end) when f>=t1.f and t1.c not in (t1.e,a,17) and 17=t1.f then -t1.d else 11 end*t1.d from t1 where t1.a<=t1.b),t1.b) from t1 where t1.e not between d and 13),t1.a)) | +max(t1.c) from t1) FROM t1 WHERE 11 in (t1.a,+t1.c* -t1.f | t1.d*+t1.c-b-(abs((select min(case when 19 in (select abs(count(distinct c)) from t1 union select ~ -min(t1.c) from t1) then case (11) when a then t1.c else t1.d end when (f)<=t1.b then t1.b else t1.a end)+min((t1.d)) | min(b) from t1))/abs(t1.e))+case 13 when t1.b then b else - -t1.b end*b-11, -f)} +} {} +do_test randexpr-2.738 { + db eval {SELECT 11+a+(select count(distinct coalesce((select -coalesce((select case when (t1.a<=a) then (case when 17=t1.c then (select count(distinct e)-min(11) from t1) else f end) when f>=t1.f and t1.c not in (t1.e,a,17) and 17=t1.f then -t1.d else 11 end*t1.d from t1 where t1.a<=t1.b),t1.b) from t1 where t1.e not between d and 13),t1.a)) | +max(t1.c) from t1) FROM t1 WHERE NOT (11 in (t1.a,+t1.c* -t1.f | t1.d*+t1.c-b-(abs((select min(case when 19 in (select abs(count(distinct c)) from t1 union select ~ -min(t1.c) from t1) then case (11) when a then t1.c else t1.d end when (f)<=t1.b then t1.b else t1.a end)+min((t1.d)) | min(b) from t1))/abs(t1.e))+case 13 when t1.b then b else - -t1.b end*b-11, -f))} +} {412} +do_test randexpr-2.739 { + db eval {SELECT 11+a+(select count(distinct coalesce((select -coalesce((select case when (t1.a<=a) then (case when 17=t1.c then (select count(distinct e)-min(11) from t1) else f end) when f>=t1.f and t1.c not in (t1.e,a,17) and 17=t1.f then -t1.d else 11 end*t1.d from t1 where t1.a<=t1.b),t1.b) from t1 where t1.e not between d and 13),t1.a)) & +max(t1.c) from t1) FROM t1 WHERE NOT (11 in (t1.a,+t1.c* -t1.f | t1.d*+t1.c-b-(abs((select min(case when 19 in (select abs(count(distinct c)) from t1 union select ~ -min(t1.c) from t1) then case (11) when a then t1.c else t1.d end when (f)<=t1.b then t1.b else t1.a end)+min((t1.d)) | min(b) from t1))/abs(t1.e))+case 13 when t1.b then b else - -t1.b end*b-11, -f))} +} {111} +do_test randexpr-2.740 { + db eval {SELECT -c-case 11 when ++e*(select cast(avg( -t1.f+case when case when 19=t1.e+((select min(17 | a) from t1)) then t1.f else t1.b end>=t1.e then t1.e when (((exists(select 1 from t1 where b in (select case -count(*) when (count(distinct d)) then count(*) else (cast(avg(t1.b) AS integer)) end from t1 union select -count(*) from t1))))) then t1.a else a end+t1.a*d) AS integer) from t1) then 13 else f end*e FROM t1 WHERE (select cast(avg(case when +d>=e+17 and (not case when 13>=(select case -count(distinct t1.e) when cast(avg(c) AS integer) then cast(avg(19) AS integer) else count(distinct t1.a) end from t1) then t1.e else t1.f end in (select t1.b from t1 union select 13 from t1)) or t1.a<=e and e in (select count(*) from t1 union select -count(*) from t1) and b>=t1.b or t1.e between c and 19 then c when f between 19 and b then coalesce((select max(t1.d) from t1 where a<>t1.b),13)* -(d) else t1.e end) AS integer) from t1)<=19} +} {} +do_test randexpr-2.741 { + db eval {SELECT -c-case 11 when ++e*(select cast(avg( -t1.f+case when case when 19=t1.e+((select min(17 | a) from t1)) then t1.f else t1.b end>=t1.e then t1.e when (((exists(select 1 from t1 where b in (select case -count(*) when (count(distinct d)) then count(*) else (cast(avg(t1.b) AS integer)) end from t1 union select -count(*) from t1))))) then t1.a else a end+t1.a*d) AS integer) from t1) then 13 else f end*e FROM t1 WHERE NOT ((select cast(avg(case when +d>=e+17 and (not case when 13>=(select case -count(distinct t1.e) when cast(avg(c) AS integer) then cast(avg(19) AS integer) else count(distinct t1.a) end from t1) then t1.e else t1.f end in (select t1.b from t1 union select 13 from t1)) or t1.a<=e and e in (select count(*) from t1 union select -count(*) from t1) and b>=t1.b or t1.e between c and 19 then c when f between 19 and b then coalesce((select max(t1.d) from t1 where a<>t1.b),13)* -(d) else t1.e end) AS integer) from t1)<=19)} +} {-300300} +do_test randexpr-2.742 { + db eval {SELECT -c-case 11 when ++e*(select cast(avg( -t1.f+case when case when 19=t1.e+((select min(17 & a) from t1)) then t1.f else t1.b end>=t1.e then t1.e when (((exists(select 1 from t1 where b in (select case -count(*) when (count(distinct d)) then count(*) else (cast(avg(t1.b) AS integer)) end from t1 union select -count(*) from t1))))) then t1.a else a end+t1.a*d) AS integer) from t1) then 13 else f end*e FROM t1 WHERE NOT ((select cast(avg(case when +d>=e+17 and (not case when 13>=(select case -count(distinct t1.e) when cast(avg(c) AS integer) then cast(avg(19) AS integer) else count(distinct t1.a) end from t1) then t1.e else t1.f end in (select t1.b from t1 union select 13 from t1)) or t1.a<=e and e in (select count(*) from t1 union select -count(*) from t1) and b>=t1.b or t1.e between c and 19 then c when f between 19 and b then coalesce((select max(t1.d) from t1 where a<>t1.b),13)* -(d) else t1.e end) AS integer) from t1)<=19)} +} {-300300} +do_test randexpr-2.743 { + db eval {SELECT case d when 11 then t1.d-(abs(t1.d)/abs(19))+coalesce((select d*d from t1 where (select cast(avg(case when f | c+case when e in (select min(coalesce((select t1.f from t1 where 11 not between t1.b and 19),t1.f)) from t1 union select (max(t1.b)) from t1) then 19 when t1.b=b or t1.c>=c then 13 else e end*19 in (select 17 from t1 union select 11 from t1) then -c else t1.d end) AS integer) from t1)<13),d) else 11 end FROM t1 WHERE (b>~t1.c-+(abs(case when 11>(c) and t1.a>13 then t1.d else (b) end)/abs(t1.d))-13 or t1.f between t1.a and 19 and 11 in (select (min(t1.e))-~( -+count(*)) from t1 union select count(distinct t1.c) from t1)) and 17 not in (f,(t1.b),t1.c) and (t1.f<>t1.e) or d in (f,t1.a,t1.d) or (d)<=t1.e} +} {11} +do_test randexpr-2.744 { + db eval {SELECT case d when 11 then t1.d-(abs(t1.d)/abs(19))+coalesce((select d*d from t1 where (select cast(avg(case when f | c+case when e in (select min(coalesce((select t1.f from t1 where 11 not between t1.b and 19),t1.f)) from t1 union select (max(t1.b)) from t1) then 19 when t1.b=b or t1.c>=c then 13 else e end*19 in (select 17 from t1 union select 11 from t1) then -c else t1.d end) AS integer) from t1)<13),d) else 11 end FROM t1 WHERE NOT ((b>~t1.c-+(abs(case when 11>(c) and t1.a>13 then t1.d else (b) end)/abs(t1.d))-13 or t1.f between t1.a and 19 and 11 in (select (min(t1.e))-~( -+count(*)) from t1 union select count(distinct t1.c) from t1)) and 17 not in (f,(t1.b),t1.c) and (t1.f<>t1.e) or d in (f,t1.a,t1.d) or (d)<=t1.e)} +} {} +do_test randexpr-2.745 { + db eval {SELECT case d when 11 then t1.d-(abs(t1.d)/abs(19))+coalesce((select d*d from t1 where (select cast(avg(case when f & c+case when e in (select min(coalesce((select t1.f from t1 where 11 not between t1.b and 19),t1.f)) from t1 union select (max(t1.b)) from t1) then 19 when t1.b=b or t1.c>=c then 13 else e end*19 in (select 17 from t1 union select 11 from t1) then -c else t1.d end) AS integer) from t1)<13),d) else 11 end FROM t1 WHERE (b>~t1.c-+(abs(case when 11>(c) and t1.a>13 then t1.d else (b) end)/abs(t1.d))-13 or t1.f between t1.a and 19 and 11 in (select (min(t1.e))-~( -+count(*)) from t1 union select count(distinct t1.c) from t1)) and 17 not in (f,(t1.b),t1.c) and (t1.f<>t1.e) or d in (f,t1.a,t1.d) or (d)<=t1.e} +} {11} +do_test randexpr-2.746 { + db eval {SELECT (select -~count(distinct ~+case when c<>coalesce((select max(case when ~13 in (t1.a,t1.f,13+t1.d+(19)*t1.d*f-e) then 13 else a end) from t1 where -t1.d<>19),(a))*d+a then t1.d when exists(select 1 from t1 where b=d) then t1.f else 17 end*11) | count(*)-max(t1.e) from t1) FROM t1 WHERE 19 in (select max(b) from t1 union select count(*) from t1) and +t1.b+c*d+coalesce((select max(17) from t1 where case when t1.a=coalesce((select max((select (min(t1.f)) from t1)) from t1 where not case when not exists(select 1 from t1 where ( -t1.b)=13 and d in (f,c,e)) or 17 between 17 and e then t1.e-t1.c else 11 end>=d),19)-11 then 11 else 17 end<=t1.a),t1.f)+t1.f-f<>17} +} {} +do_test randexpr-2.747 { + db eval {SELECT (select -~count(distinct ~+case when c<>coalesce((select max(case when ~13 in (t1.a,t1.f,13+t1.d+(19)*t1.d*f-e) then 13 else a end) from t1 where -t1.d<>19),(a))*d+a then t1.d when exists(select 1 from t1 where b=d) then t1.f else 17 end*11) | count(*)-max(t1.e) from t1) FROM t1 WHERE NOT (19 in (select max(b) from t1 union select count(*) from t1) and +t1.b+c*d+coalesce((select max(17) from t1 where case when t1.a=coalesce((select max((select (min(t1.f)) from t1)) from t1 where not case when not exists(select 1 from t1 where ( -t1.b)=13 and d in (f,c,e)) or 17 between 17 and e then t1.e-t1.c else 11 end>=d),19)-11 then 11 else 17 end<=t1.a),t1.f)+t1.f-f<>17)} +} {-497} +do_test randexpr-2.748 { + db eval {SELECT (select -~count(distinct ~+case when c<>coalesce((select max(case when ~13 in (t1.a,t1.f,13+t1.d+(19)*t1.d*f-e) then 13 else a end) from t1 where -t1.d<>19),(a))*d+a then t1.d when exists(select 1 from t1 where b=d) then t1.f else 17 end*11) & count(*)-max(t1.e) from t1) FROM t1 WHERE NOT (19 in (select max(b) from t1 union select count(*) from t1) and +t1.b+c*d+coalesce((select max(17) from t1 where case when t1.a=coalesce((select max((select (min(t1.f)) from t1)) from t1 where not case when not exists(select 1 from t1 where ( -t1.b)=13 and d in (f,c,e)) or 17 between 17 and e then t1.e-t1.c else 11 end>=d),19)-11 then 11 else 17 end<=t1.a),t1.f)+t1.f-f<>17)} +} {0} +do_test randexpr-2.749 { + db eval {SELECT coalesce((select max(a) from t1 where exists(select 1 from t1 where (abs(t1.f)/abs(case when (t1.a+case when exists(select 1 from t1 where t1.e>=(abs(coalesce((select f from t1 where (((b>=t1.d) or t1.e= -a) or 17=a)),case when t1.d between t1.a and b then t1.b when e in (e,t1.a,t1.e) then 13 else (f) end))/abs(13))) then 11 else t1.d end* - -t1.f)<=c then d else t1.d end))-t1.c-19>=t1.a)),((t1.b))) FROM t1 WHERE case when t1.f in (e,19,(abs(t1.e)/abs(t1.e))) and (17 between 11+(select +max(case case when f=(t1.b) then e when t1.e in (13,19,c) then c else -t1.b end- -c when (13) then d else t1.f end) from t1) and 17 and (19 in (b,t1.d,11))) and f not between 19 and b then e when 13 not between (t1.e) and t1.a then 11 else 11 end not between d and t1.a} +} {200} +do_test randexpr-2.750 { + db eval {SELECT coalesce((select max(a) from t1 where exists(select 1 from t1 where (abs(t1.f)/abs(case when (t1.a+case when exists(select 1 from t1 where t1.e>=(abs(coalesce((select f from t1 where (((b>=t1.d) or t1.e= -a) or 17=a)),case when t1.d between t1.a and b then t1.b when e in (e,t1.a,t1.e) then 13 else (f) end))/abs(13))) then 11 else t1.d end* - -t1.f)<=c then d else t1.d end))-t1.c-19>=t1.a)),((t1.b))) FROM t1 WHERE NOT (case when t1.f in (e,19,(abs(t1.e)/abs(t1.e))) and (17 between 11+(select +max(case case when f=(t1.b) then e when t1.e in (13,19,c) then c else -t1.b end- -c when (13) then d else t1.f end) from t1) and 17 and (19 in (b,t1.d,11))) and f not between 19 and b then e when 13 not between (t1.e) and t1.a then 11 else 11 end not between d and t1.a)} +} {} +do_test randexpr-2.751 { + db eval {SELECT 11-17 | case when 11=c or 11 in (select cast(avg(t1.c) AS integer)-case cast(avg(t1.b) AS integer) when count(distinct e) then min(t1.a) else -count(distinct 19) end from t1 union select count(*) from t1) or e in (t1.e,t1.b,b) or 13=t1.f and e>=t1.a then e else e end when ( -19<=e) then 13 else e end+a FROM t1 WHERE t1.c*d not in ( -t1.a,coalesce((select d from t1 where coalesce((select c from t1 where ((d<=11))),(17)+(abs(coalesce((select (select ( -+count(*))+count(distinct d) from t1) from t1 where (coalesce((select b from t1 where ~a<>13),(t1.c))) not between d and t1.d),17))/abs(d)))*f17),e),t1.b)} +} {-1} +do_test randexpr-2.752 { + db eval {SELECT 11-17 | case when 11=c or 11 in (select cast(avg(t1.c) AS integer)-case cast(avg(t1.b) AS integer) when count(distinct e) then min(t1.a) else -count(distinct 19) end from t1 union select count(*) from t1) or e in (t1.e,t1.b,b) or 13=t1.f and e>=t1.a then e else e end when ( -19<=e) then 13 else e end+a FROM t1 WHERE NOT (t1.c*d not in ( -t1.a,coalesce((select d from t1 where coalesce((select c from t1 where ((d<=11))),(17)+(abs(coalesce((select (select ( -+count(*))+count(distinct d) from t1) from t1 where (coalesce((select b from t1 where ~a<>13),(t1.c))) not between d and t1.d),17))/abs(d)))*f17),e),t1.b))} +} {} +do_test randexpr-2.753 { + db eval {SELECT 11-17 & case when 11=c or 11 in (select cast(avg(t1.c) AS integer)-case cast(avg(t1.b) AS integer) when count(distinct e) then min(t1.a) else -count(distinct 19) end from t1 union select count(*) from t1) or e in (t1.e,t1.b,b) or 13=t1.f and e>=t1.a then e else e end when ( -19<=e) then 13 else e end+a FROM t1 WHERE t1.c*d not in ( -t1.a,coalesce((select d from t1 where coalesce((select c from t1 where ((d<=11))),(17)+(abs(coalesce((select (select ( -+count(*))+count(distinct d) from t1) from t1 where (coalesce((select b from t1 where ~a<>13),(t1.c))) not between d and t1.d),17))/abs(d)))*f17),e),t1.b)} +} {112} +do_test randexpr-2.754 { + db eval {SELECT (abs(f-case when +(t1.f)-c13 and -ft1.c then 11 else -11 end<>t1.c and t1.f not between (11) and (t1.f) then b else 11 end)*b+t1.f,a,t1.d)) then 11 else t1.d end=e) or e<(t1.f)} +} {99} +do_test randexpr-2.755 { + db eval {SELECT (abs(f-case when +(t1.f)-c13 and -ft1.c then 11 else -11 end<>t1.c and t1.f not between (11) and (t1.f) then b else 11 end)*b+t1.f,a,t1.d)) then 11 else t1.d end=e) or e<(t1.f))} +} {} +do_test randexpr-2.756 { + db eval {SELECT (abs(f-case when +(t1.f)-c13 and -ft1.c then 11 else -11 end<>t1.c and t1.f not between (11) and (t1.f) then b else 11 end)*b+t1.f,a,t1.d)) then 11 else t1.d end=e) or e<(t1.f)} +} {99} +do_test randexpr-2.757 { + db eval {SELECT +(select (max(case when not (((exists(select 1 from t1 where b not between t1.a and t1.d) or t1.a>=coalesce((select max(17) from t1 where -t1.e between t1.e and a),f)-11) or 11<=t1.c)) then b when 19 between 19 and b then t1.f else +d+ -t1.a-t1.f-e end | f-t1.b+ -t1.b)+min(t1.b)) from t1)*t1.c FROM t1 WHERE b in (t1.b-b,coalesce((select max(e) from t1 where t1.d not between 11 and case when d>=+13 then case when t1.a between e and a then t1.c else (t1.f)+(t1.b) end else case (d)*e when b then f else a end end or t1.d in (select cast(avg(d) AS integer) from t1 union select +abs( -max(e))*max(19) | max(d) from t1)),11),t1.c)} +} {} +do_test randexpr-2.758 { + db eval {SELECT +(select (max(case when not (((exists(select 1 from t1 where b not between t1.a and t1.d) or t1.a>=coalesce((select max(17) from t1 where -t1.e between t1.e and a),f)-11) or 11<=t1.c)) then b when 19 between 19 and b then t1.f else +d+ -t1.a-t1.f-e end | f-t1.b+ -t1.b)+min(t1.b)) from t1)*t1.c FROM t1 WHERE NOT (b in (t1.b-b,coalesce((select max(e) from t1 where t1.d not between 11 and case when d>=+13 then case when t1.a between e and a then t1.c else (t1.f)+(t1.b) end else case (d)*e when b then f else a end end or t1.d in (select cast(avg(d) AS integer) from t1 union select +abs( -max(e))*max(19) | max(d) from t1)),11),t1.c))} +} {278400} +do_test randexpr-2.759 { + db eval {SELECT +(select (max(case when not (((exists(select 1 from t1 where b not between t1.a and t1.d) or t1.a>=coalesce((select max(17) from t1 where -t1.e between t1.e and a),f)-11) or 11<=t1.c)) then b when 19 between 19 and b then t1.f else +d+ -t1.a-t1.f-e end & f-t1.b+ -t1.b)+min(t1.b)) from t1)*t1.c FROM t1 WHERE NOT (b in (t1.b-b,coalesce((select max(e) from t1 where t1.d not between 11 and case when d>=+13 then case when t1.a between e and a then t1.c else (t1.f)+(t1.b) end else case (d)*e when b then f else a end end or t1.d in (select cast(avg(d) AS integer) from t1 union select +abs( -max(e))*max(19) | max(d) from t1)),11),t1.c))} +} {81600} +do_test randexpr-2.760 { + db eval {SELECT t1.e*t1.c*(select count(*)+abs(cast(avg(case t1.a when (select -count(*)-(min(t1.a)) | +(abs(case max(a) when abs(min(17)) then +cast(avg(a) AS integer) | count(distinct d) else count(*) end)) from t1) then a else -+b-coalesce((select max(f*19) from t1 where ~~c in (a,13,t1.a)),t1.b) end) AS integer)) from t1) FROM t1 WHERE 19-(abs(b)/abs(t1.b)) | case when t1.e in (select case (+cast(avg(t1.c+a) AS integer) | (cast(avg(t1.d) AS integer) | -cast(avg( -d) AS integer)* -( -count(*))+min(17))) when min(13) then max( -11) else count(*) end from t1 union select min(17) from t1) or 13 in (case when t1.e between e and coalesce((select c from t1 where not exists(select 1 from t1 where (b)>=t1.e)),t1.b) then 19 else 11 end,17,f) then 11 else b end in (select c from t1 union select 19 from t1)} +} {} +do_test randexpr-2.761 { + db eval {SELECT t1.e*t1.c*(select count(*)+abs(cast(avg(case t1.a when (select -count(*)-(min(t1.a)) | +(abs(case max(a) when abs(min(17)) then +cast(avg(a) AS integer) | count(distinct d) else count(*) end)) from t1) then a else -+b-coalesce((select max(f*19) from t1 where ~~c in (a,13,t1.a)),t1.b) end) AS integer)) from t1) FROM t1 WHERE NOT (19-(abs(b)/abs(t1.b)) | case when t1.e in (select case (+cast(avg(t1.c+a) AS integer) | (cast(avg(t1.d) AS integer) | -cast(avg( -d) AS integer)* -( -count(*))+min(17))) when min(13) then max( -11) else count(*) end from t1 union select min(17) from t1) or 13 in (case when t1.e between e and coalesce((select c from t1 where not exists(select 1 from t1 where (b)>=t1.e)),t1.b) then 19 else 11 end,17,f) then 11 else b end in (select c from t1 union select 19 from t1))} +} {60150000} +do_test randexpr-2.762 { + db eval {SELECT t1.e*t1.c*(select count(*)+abs(cast(avg(case t1.a when (select -count(*)-(min(t1.a)) & +(abs(case max(a) when abs(min(17)) then +cast(avg(a) AS integer) & count(distinct d) else count(*) end)) from t1) then a else -+b-coalesce((select max(f*19) from t1 where ~~c in (a,13,t1.a)),t1.b) end) AS integer)) from t1) FROM t1 WHERE NOT (19-(abs(b)/abs(t1.b)) | case when t1.e in (select case (+cast(avg(t1.c+a) AS integer) | (cast(avg(t1.d) AS integer) | -cast(avg( -d) AS integer)* -( -count(*))+min(17))) when min(13) then max( -11) else count(*) end from t1 union select min(17) from t1) or 13 in (case when t1.e between e and coalesce((select c from t1 where not exists(select 1 from t1 where (b)>=t1.e)),t1.b) then 19 else 11 end,17,f) then 11 else b end in (select c from t1 union select 19 from t1))} +} {60150000} +do_test randexpr-2.763 { + db eval {SELECT t1.b+case when e+(select abs(abs(min(d+case 11 when t1.d+19 | b | case when e<=b+coalesce((select 11 from t1 where 11<>13),17)-17 then t1.d else d end+19 then 11 else d end))) from t1)- -d-(t1.e) in (select 17 from t1 union select 11 from t1) then t1.e when exists(select 1 from t1 where e>t1.c) then t1.b else 13 end FROM t1 WHERE e in (select count(*) from t1 union select min(case when (select abs(cast(avg(+17) AS integer)) from t1) not between t1.e and 17+t1.f then f when ((case when ((select cast(avg(t1.d) AS integer) from t1)=17) then 19 else t1.f end)<>t1.c or t1.a in (select -+abs(cast(avg(t1.b) AS integer)) from t1 union select +((cast(avg(11) AS integer)))+ -min(b) from t1)) or not exists(select 1 from t1 where ((t1.d in (b,a,17)) and f<=t1.a)) and t1.f<> - -t1.a then d else - -t1.d end) from t1)} +} {} +do_test randexpr-2.764 { + db eval {SELECT t1.b+case when e+(select abs(abs(min(d+case 11 when t1.d+19 | b | case when e<=b+coalesce((select 11 from t1 where 11<>13),17)-17 then t1.d else d end+19 then 11 else d end))) from t1)- -d-(t1.e) in (select 17 from t1 union select 11 from t1) then t1.e when exists(select 1 from t1 where e>t1.c) then t1.b else 13 end FROM t1 WHERE NOT (e in (select count(*) from t1 union select min(case when (select abs(cast(avg(+17) AS integer)) from t1) not between t1.e and 17+t1.f then f when ((case when ((select cast(avg(t1.d) AS integer) from t1)=17) then 19 else t1.f end)<>t1.c or t1.a in (select -+abs(cast(avg(t1.b) AS integer)) from t1 union select +((cast(avg(11) AS integer)))+ -min(b) from t1)) or not exists(select 1 from t1 where ((t1.d in (b,a,17)) and f<=t1.a)) and t1.f<> - -t1.a then d else - -t1.d end) from t1))} +} {400} +do_test randexpr-2.765 { + db eval {SELECT t1.b+case when e+(select abs(abs(min(d+case 11 when t1.d+19 & b & case when e<=b+coalesce((select 11 from t1 where 11<>13),17)-17 then t1.d else d end+19 then 11 else d end))) from t1)- -d-(t1.e) in (select 17 from t1 union select 11 from t1) then t1.e when exists(select 1 from t1 where e>t1.c) then t1.b else 13 end FROM t1 WHERE NOT (e in (select count(*) from t1 union select min(case when (select abs(cast(avg(+17) AS integer)) from t1) not between t1.e and 17+t1.f then f when ((case when ((select cast(avg(t1.d) AS integer) from t1)=17) then 19 else t1.f end)<>t1.c or t1.a in (select -+abs(cast(avg(t1.b) AS integer)) from t1 union select +((cast(avg(11) AS integer)))+ -min(b) from t1)) or not exists(select 1 from t1 where ((t1.d in (b,a,17)) and f<=t1.a)) and t1.f<> - -t1.a then d else - -t1.d end) from t1))} +} {400} +do_test randexpr-2.766 { + db eval {SELECT (select cast(avg( -coalesce((select max(+(abs(t1.d)/abs(~d))*+c*t1.c) from t1 where e in (select coalesce((select f from t1 where t1.f<> -11),17)+c*19 from t1 union select f from t1)),t1.c)) AS integer)-~case ~count(distinct 17)*count(distinct 19) when count(distinct t1.c)*count(*)-count(distinct 19) then +(max(a)+case - - - -min(t1.f) when max(c) then count(distinct (f)) else min(13) end) else ( -cast(avg( -t1.b) AS integer)) end from t1) FROM t1 WHERE exists(select 1 from t1 where (abs(e*coalesce((select (abs(b*(t1.b)+f)/abs(d)) from t1 where t1.c in (select min(a | t1.b) from t1 union select abs(case -case ~ -~cast(avg(e) AS integer) when ++min(17)+( -cast(avg(a) AS integer))-max(t1.c) then cast(avg(t1.b) AS integer) else cast(avg(f) AS integer) end when - -max((t1.d)) then -min(t1.b) else cast(avg(t1.c) AS integer) end) from t1)),b))/abs(t1.d)) in (select min(t1.c)-cast(avg( -e) AS integer)- -cast(avg(t1.f) AS integer) from t1 union select (max((19))) from t1))} +} {} +do_test randexpr-2.767 { + db eval {SELECT (select cast(avg( -coalesce((select max(+(abs(t1.d)/abs(~d))*+c*t1.c) from t1 where e in (select coalesce((select f from t1 where t1.f<> -11),17)+c*19 from t1 union select f from t1)),t1.c)) AS integer)-~case ~count(distinct 17)*count(distinct 19) when count(distinct t1.c)*count(*)-count(distinct 19) then +(max(a)+case - - - -min(t1.f) when max(c) then count(distinct (f)) else min(13) end) else ( -cast(avg( -t1.b) AS integer)) end from t1) FROM t1 WHERE NOT (exists(select 1 from t1 where (abs(e*coalesce((select (abs(b*(t1.b)+f)/abs(d)) from t1 where t1.c in (select min(a | t1.b) from t1 union select abs(case -case ~ -~cast(avg(e) AS integer) when ++min(17)+( -cast(avg(a) AS integer))-max(t1.c) then cast(avg(t1.b) AS integer) else cast(avg(f) AS integer) end when - -max((t1.d)) then -min(t1.b) else cast(avg(t1.c) AS integer) end) from t1)),b))/abs(t1.d)) in (select min(t1.c)-cast(avg( -e) AS integer)- -cast(avg(t1.f) AS integer) from t1 union select (max((19))) from t1)))} +} {-99} +do_test randexpr-2.768 { + db eval {SELECT t1.a+11*coalesce((select max( -t1.e-coalesce((select max((abs(t1.c)/abs((13*e+t1.c)-t1.f))) from t1 where exists(select 1 from t1 where d<>a) or ~(select case abs(count(distinct f)) when (count(*)) then count(*) else min(t1.f) end from t1)+coalesce((select f from t1 where d in (select min(t1.d) from t1 union select count(*) from t1)),c) in (11,t1.a,t1.b)),11)- -11) from t1 where 13<11),t1.b) FROM t1 WHERE not ++coalesce((select t1.f from t1 where not exists(select 1 from t1 where ((select min(b) from t1)<>case t1.d when coalesce((select max(case e-t1.c when 19-t1.c+f+(t1.c*coalesce((select max(17) from t1 where 17 between 11 and t1.a),t1.d)+b) then -17 else d end-a) from t1 where t1.d>11),b) then 11 else c end))),b | t1.d)*17 not between t1.e and 17} +} {} +do_test randexpr-2.769 { + db eval {SELECT t1.a+11*coalesce((select max( -t1.e-coalesce((select max((abs(t1.c)/abs((13*e+t1.c)-t1.f))) from t1 where exists(select 1 from t1 where d<>a) or ~(select case abs(count(distinct f)) when (count(*)) then count(*) else min(t1.f) end from t1)+coalesce((select f from t1 where d in (select min(t1.d) from t1 union select count(*) from t1)),c) in (11,t1.a,t1.b)),11)- -11) from t1 where 13<11),t1.b) FROM t1 WHERE NOT (not ++coalesce((select t1.f from t1 where not exists(select 1 from t1 where ((select min(b) from t1)<>case t1.d when coalesce((select max(case e-t1.c when 19-t1.c+f+(t1.c*coalesce((select max(17) from t1 where 17 between 11 and t1.a),t1.d)+b) then -17 else d end-a) from t1 where t1.d>11),b) then 11 else c end))),b | t1.d)*17 not between t1.e and 17)} +} {2300} +do_test randexpr-2.770 { + db eval {SELECT coalesce((select max(coalesce((select ((abs(coalesce((select max((select ~(count(distinct e-11)-cast(avg(t1.a) AS integer) | count(*)) from t1)) from t1 where t1.fb or t1.c in (select c from t1 union select 19 from t1)),e | b))/abs(t1.f))) from t1 where e in (select -(b) from t1 union select 13 from t1)),t1.a)) from t1 where f<>11),t1.e) FROM t1 WHERE -11 in (a,case when exists(select 1 from t1 where 11 in (select 13 from t1 union select t1.d from t1)) and exists(select 1 from t1 where exists(select 1 from t1 where (t1.a) not between t1.c and t1.c)) then ~case when ~t1.f not between +t1.b-11 and e or not b in (c,17,e) and (not exists(select 1 from t1 where b in (( -t1.e), -t1.c,t1.c))) or 17 not in (t1.e,t1.c,t1.f) then c when 19= - -11 then t1.b else t1.c end when (13) between t1.f and ( -t1.e) then t1.d else -t1.d end*c,d)} +} {} +do_test randexpr-2.771 { + db eval {SELECT coalesce((select max(coalesce((select ((abs(coalesce((select max((select ~(count(distinct e-11)-cast(avg(t1.a) AS integer) | count(*)) from t1)) from t1 where t1.fb or t1.c in (select c from t1 union select 19 from t1)),e | b))/abs(t1.f))) from t1 where e in (select -(b) from t1 union select 13 from t1)),t1.a)) from t1 where f<>11),t1.e) FROM t1 WHERE NOT ( -11 in (a,case when exists(select 1 from t1 where 11 in (select 13 from t1 union select t1.d from t1)) and exists(select 1 from t1 where exists(select 1 from t1 where (t1.a) not between t1.c and t1.c)) then ~case when ~t1.f not between +t1.b-11 and e or not b in (c,17,e) and (not exists(select 1 from t1 where b in (( -t1.e), -t1.c,t1.c))) or 17 not in (t1.e,t1.c,t1.f) then c when 19= - -11 then t1.b else t1.c end when (13) between t1.f and ( -t1.e) then t1.d else -t1.d end*c,d))} +} {100} +do_test randexpr-2.772 { + db eval {SELECT coalesce((select max(coalesce((select ((abs(coalesce((select max((select ~(count(distinct e-11)-cast(avg(t1.a) AS integer) & count(*)) from t1)) from t1 where t1.fb or t1.c in (select c from t1 union select 19 from t1)),e & b))/abs(t1.f))) from t1 where e in (select -(b) from t1 union select 13 from t1)),t1.a)) from t1 where f<>11),t1.e) FROM t1 WHERE NOT ( -11 in (a,case when exists(select 1 from t1 where 11 in (select 13 from t1 union select t1.d from t1)) and exists(select 1 from t1 where exists(select 1 from t1 where (t1.a) not between t1.c and t1.c)) then ~case when ~t1.f not between +t1.b-11 and e or not b in (c,17,e) and (not exists(select 1 from t1 where b in (( -t1.e), -t1.c,t1.c))) or 17 not in (t1.e,t1.c,t1.f) then c when 19= - -11 then t1.b else t1.c end when (13) between t1.f and ( -t1.e) then t1.d else -t1.d end*c,d))} +} {100} +do_test randexpr-2.773 { + db eval {SELECT 17-13-case 19 when 17+case case t1.b when t1.b then coalesce((select coalesce((select 11 from t1 where (coalesce((select t1.a from t1 where (abs(f)/abs(case t1.e when 11 then a else t1.c end))19 or d not between f and t1.e),(t1.c)) else 11 end when -a then 13 else e end then t1.b else b end+t1.c FROM t1 WHERE not exists(select 1 from t1 where coalesce((select e from t1 where case when not exists(select 1 from t1 where not not exists(select 1 from t1 where f<> -t1.e-t1.c)) or coalesce((select max(17+t1.c) from t1 where t1.f in (select cast(avg(17) AS integer) from t1 union select cast(avg(t1.c) AS integer) from t1)),t1.d)<>19 then 19 else t1.a+ -(b)+t1.c end<=t1.c),t1.b)>t1.d) or (e) in (t1.c,f,13) or d in (select +count(*) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.774 { + db eval {SELECT 17-13-case 19 when 17+case case t1.b when t1.b then coalesce((select coalesce((select 11 from t1 where (coalesce((select t1.a from t1 where (abs(f)/abs(case t1.e when 11 then a else t1.c end))19 or d not between f and t1.e),(t1.c)) else 11 end when -a then 13 else e end then t1.b else b end+t1.c FROM t1 WHERE NOT (not exists(select 1 from t1 where coalesce((select e from t1 where case when not exists(select 1 from t1 where not not exists(select 1 from t1 where f<> -t1.e-t1.c)) or coalesce((select max(17+t1.c) from t1 where t1.f in (select cast(avg(17) AS integer) from t1 union select cast(avg(t1.c) AS integer) from t1)),t1.d)<>19 then 19 else t1.a+ -(b)+t1.c end<=t1.c),t1.b)>t1.d) or (e) in (t1.c,f,13) or d in (select +count(*) from t1 union select count(*) from t1))} +} {104} +do_test randexpr-2.775 { + db eval {SELECT t1.e*(abs(coalesce((select max(case when t1.c in (select (case when d>13 then t1.c else 11 end+e) from t1 union select t1.a from t1) or t1.a between 17 and c then c when t1.d>=a then t1.c else t1.e end-t1.e*t1.b-d) from t1 where b<>(t1.c) or not exists(select 1 from t1 where b in (t1.d,13,a))),11)*t1.a)/abs(t1.a))+t1.c FROM t1 WHERE a>c} +} {} +do_test randexpr-2.776 { + db eval {SELECT t1.e*(abs(coalesce((select max(case when t1.c in (select (case when d>13 then t1.c else 11 end+e) from t1 union select t1.a from t1) or t1.a between 17 and c then c when t1.d>=a then t1.c else t1.e end-t1.e*t1.b-d) from t1 where b<>(t1.c) or not exists(select 1 from t1 where b in (t1.d,13,a))),11)*t1.a)/abs(t1.a))+t1.c FROM t1 WHERE NOT (a>c)} +} {50050300} +do_test randexpr-2.777 { + db eval {SELECT t1.d*(+coalesce((select max(case when coalesce((select (abs(f)/abs((abs(d)/abs((select cast(avg(19+11) AS integer) from t1)))+t1.b)) from t1 where exists(select 1 from t1 where t1.d not between t1.f and t1.e)),17) not in (17,t1.f, -t1.b) then 19 else -19 end) from t1 where exists(select 1 from t1 where 11 in (select abs(count(distinct t1.f)) from t1 union select cast(avg(t1.e) AS integer) from t1) and exists(select 1 from t1 where t1.f -11),(17)) | b-19*d FROM t1 WHERE b>coalesce((select max(13+d) from t1 where 17>=11),t1.b)} +} {} +do_test randexpr-2.780 { + db eval {SELECT coalesce((select max(coalesce((select (select cast(avg(d) AS integer) | case max(~f) | count(distinct t1.d) when count(*) then count(*) else -min(17) end from t1) from t1 where b=t1.e+ -e or t1.a not in (t1.f,t1.d,t1.d) or f not in (t1.a, -t1.b,f)),t1.c)+b | 13-d) from t1 where t1.b> -11),(17)) | b-19*d FROM t1 WHERE NOT (b>coalesce((select max(13+d) from t1 where 17>=11),t1.b))} +} {-1} +do_test randexpr-2.781 { + db eval {SELECT coalesce((select max(coalesce((select (select cast(avg(d) AS integer) & case max(~f) & count(distinct t1.d) when count(*) then count(*) else -min(17) end from t1) from t1 where b=t1.e+ -e or t1.a not in (t1.f,t1.d,t1.d) or f not in (t1.a, -t1.b,f)),t1.c)+b & 13-d) from t1 where t1.b> -11),(17)) & b-19*d FROM t1 WHERE NOT (b>coalesce((select max(13+d) from t1 where 17>=11),t1.b))} +} {8} +do_test randexpr-2.782 { + db eval {SELECT (d+f*t1.c-t1.e+coalesce((select c from t1 where c>=a),case when (c in (select e from t1 union select 13 from t1)) then 11+a else f end))-13 FROM t1 WHERE c not between c and b+coalesce((select c from t1 where t1.d=a),case when (c in (select e from t1 union select 13 from t1)) then 11+a else f end))-13 FROM t1 WHERE NOT (c not between c and b+coalesce((select c from t1 where t1.d=d then (abs(13)/abs(t1.f)) when (d in ((select abs(min(19)) from t1), -(select count(*)-abs( -count(*)) from t1),t1.e)) then coalesce((select max(19) from t1 where (c)>t1.f),17) else 13 end<>a then 11 when b<(d) then b else c end,d) then 17 when (17 in (13,t1.a,t1.c)) then e else c end+t1.c FROM t1 WHERE t1.a+t1.e in (select 19 from t1 union select t1.c from t1)} +} {} +do_test randexpr-2.785 { + db eval {SELECT case when t1.e not in (t1.a,~case when case when 17>=d then (abs(13)/abs(t1.f)) when (d in ((select abs(min(19)) from t1), -(select count(*)-abs( -count(*)) from t1),t1.e)) then coalesce((select max(19) from t1 where (c)>t1.f),17) else 13 end<>a then 11 when b<(d) then b else c end,d) then 17 when (17 in (13,t1.a,t1.c)) then e else c end+t1.c FROM t1 WHERE NOT (t1.a+t1.e in (select 19 from t1 union select t1.c from t1))} +} {317} +do_test randexpr-2.786 { + db eval {SELECT (select ~cast(avg(t1.c) AS integer)*case count(*) | +min(case +case when a<13 or case t1.f when c then 17 else t1.d end in (11,a,(t1.a)) and a<>a then t1.b+f when f not in (d,t1.a,c) then t1.f else t1.e end when t1.f then 17 else f end)+abs(++count(distinct 19))*(count(distinct 13))-count(distinct t1.e)-(count(*))*max(19) when max(11) then cast(avg(a) AS integer) else min(t1.f) end from t1) FROM t1 WHERE t1.a not between 17 and d} +} {} +do_test randexpr-2.787 { + db eval {SELECT (select ~cast(avg(t1.c) AS integer)*case count(*) | +min(case +case when a<13 or case t1.f when c then 17 else t1.d end in (11,a,(t1.a)) and a<>a then t1.b+f when f not in (d,t1.a,c) then t1.f else t1.e end when t1.f then 17 else f end)+abs(++count(distinct 19))*(count(distinct 13))-count(distinct t1.e)-(count(*))*max(19) when max(11) then cast(avg(a) AS integer) else min(t1.f) end from t1) FROM t1 WHERE NOT (t1.a not between 17 and d)} +} {-180600} +do_test randexpr-2.788 { + db eval {SELECT (select ~cast(avg(t1.c) AS integer)*case count(*) & +min(case +case when a<13 or case t1.f when c then 17 else t1.d end in (11,a,(t1.a)) and a<>a then t1.b+f when f not in (d,t1.a,c) then t1.f else t1.e end when t1.f then 17 else f end)+abs(++count(distinct 19))*(count(distinct 13))-count(distinct t1.e)-(count(*))*max(19) when max(11) then cast(avg(a) AS integer) else min(t1.f) end from t1) FROM t1 WHERE NOT (t1.a not between 17 and d)} +} {-180600} +do_test randexpr-2.789 { + db eval {SELECT case t1.a when c then -~t1.d*t1.e-coalesce((select max((abs(d+e+d)/abs(case when 11-t1.d in (select t1.d from t1 union select coalesce((select t1.c from t1 where 17<=t1.d and e between 11 and a),c) from t1) or 11 in (select ( -+(count(*))) from t1 union select count(distinct -t1.a) from t1) then (f) when b=a then c else t1.b end))) from t1 where t1.c not between 17 and t1.b),13)+t1.f else d end FROM t1 WHERE not exists(select 1 from t1 where f<=a)} +} {400} +do_test randexpr-2.790 { + db eval {SELECT case t1.a when c then -~t1.d*t1.e-coalesce((select max((abs(d+e+d)/abs(case when 11-t1.d in (select t1.d from t1 union select coalesce((select t1.c from t1 where 17<=t1.d and e between 11 and a),c) from t1) or 11 in (select ( -+(count(*))) from t1 union select count(distinct -t1.a) from t1) then (f) when b=a then c else t1.b end))) from t1 where t1.c not between 17 and t1.b),13)+t1.f else d end FROM t1 WHERE NOT (not exists(select 1 from t1 where f<=a))} +} {} +do_test randexpr-2.791 { + db eval {SELECT (select -max(d) from t1)+coalesce((select max(t1.e*t1.d*f) from t1 where t1.b-coalesce((select -a*d from t1 where t1.c not in (t1.f,coalesce((select b+t1.b from t1 where t1.a not between f and t1.b),coalesce((select -b from t1 where (t1.d>=t1.c) and 19<>t1.f),t1.a))-e*a,e)),(t1.a))+t1.f between 19 and e),t1.a)*a FROM t1 WHERE exists(select 1 from t1 where not exists(select 1 from t1 where case when t1.d-t1.b-t1.d*(case when t1.b>= -t1.d then c when t1.d not between c and t1.d then (19) else d end*e)+t1.d | 19>t1.c then e when exists(select 1 from t1 where t1.e not between d and t1.b or (17 in (t1.e, - -a,t1.d))) then t1.f else (e) end between 13 and e or 11>t1.b))} +} {9600} +do_test randexpr-2.792 { + db eval {SELECT (select -max(d) from t1)+coalesce((select max(t1.e*t1.d*f) from t1 where t1.b-coalesce((select -a*d from t1 where t1.c not in (t1.f,coalesce((select b+t1.b from t1 where t1.a not between f and t1.b),coalesce((select -b from t1 where (t1.d>=t1.c) and 19<>t1.f),t1.a))-e*a,e)),(t1.a))+t1.f between 19 and e),t1.a)*a FROM t1 WHERE NOT (exists(select 1 from t1 where not exists(select 1 from t1 where case when t1.d-t1.b-t1.d*(case when t1.b>= -t1.d then c when t1.d not between c and t1.d then (19) else d end*e)+t1.d | 19>t1.c then e when exists(select 1 from t1 where t1.e not between d and t1.b or (17 in (t1.e, - -a,t1.d))) then t1.f else (e) end between 13 and e or 11>t1.b)))} +} {} +do_test randexpr-2.793 { + db eval {SELECT 11-t1.d-t1.e-case when (select count(distinct t1.b+coalesce((select max((abs(19)/abs(case when coalesce((select t1.f from t1 where e not between t1.f and t1.f),f)*( -11)=((e)) then 11 else d end))) from t1 where e in (17,t1.b,t1.b)), -t1.a)-(t1.c)) from t1)>t1.f then t1.b when 11>13 and a>=f then 11 else d end-t1.f FROM t1 WHERE (select min(d)-case cast(avg(case (d-~t1.c+~(select max(+d-coalesce((select max(t1.f) from t1 where c<>e),t1.a))-(( - -(cast(avg(13) AS integer)))-count(distinct e))*max(17) from t1) | 19-13*b) when -t1.d then 13 else -13 end) AS integer) when min(t1.c) then cast(avg(e) AS integer) else ~count(*) end from t1)t1.f then t1.b when 11>13 and a>=f then 11 else d end-t1.f FROM t1 WHERE NOT ((select min(d)-case cast(avg(case (d-~t1.c+~(select max(+d-coalesce((select max(t1.f) from t1 where c<>e),t1.a))-(( - -(cast(avg(13) AS integer)))-count(distinct e))*max(17) from t1) | 19-13*b) when -t1.d then 13 else -13 end) AS integer) when min(t1.c) then cast(avg(e) AS integer) else ~count(*) end from t1)(c) then t1.d else 13 end end FROM t1 WHERE not ((abs(f)/abs(t1.b))<=(select min(+t1.d)+~+case count(distinct (abs(case d when 11 then coalesce((select coalesce((select max(t1.a) from t1 where t1.b=t1.e),t1.f) | a from t1 where 13<>(a)),d) else 17 end*f)/abs(t1.f))) when cast(avg(t1.c) AS integer) then case count(*) when min(t1.b) | cast(avg(t1.c) AS integer) then count(distinct f) else max(17) end else cast(avg((c)) AS integer) end from t1))} +} {} +do_test randexpr-2.796 { + db eval {SELECT case f when 13 then 19 else case when (case when c not between b and d then t1.c when +~t1.d+(select -+cast(avg(t1.a) AS integer) | ((max(t1.d)))*(max(e)) from t1)-19+t1.a*t1.d in (select ~max(c) from t1 union select count(distinct d)-count(distinct b) from t1) then f else -11 end not between 11 and 11) or 17>(c) then t1.d else 13 end end FROM t1 WHERE NOT (not ((abs(f)/abs(t1.b))<=(select min(+t1.d)+~+case count(distinct (abs(case d when 11 then coalesce((select coalesce((select max(t1.a) from t1 where t1.b=t1.e),t1.f) | a from t1 where 13<>(a)),d) else 17 end*f)/abs(t1.f))) when cast(avg(t1.c) AS integer) then case count(*) when min(t1.b) | cast(avg(t1.c) AS integer) then count(distinct f) else max(17) end else cast(avg((c)) AS integer) end from t1)))} +} {400} +do_test randexpr-2.797 { + db eval {SELECT case f when 13 then 19 else case when (case when c not between b and d then t1.c when +~t1.d+(select -+cast(avg(t1.a) AS integer) & ((max(t1.d)))*(max(e)) from t1)-19+t1.a*t1.d in (select ~max(c) from t1 union select count(distinct d)-count(distinct b) from t1) then f else -11 end not between 11 and 11) or 17>(c) then t1.d else 13 end end FROM t1 WHERE NOT (not ((abs(f)/abs(t1.b))<=(select min(+t1.d)+~+case count(distinct (abs(case d when 11 then coalesce((select coalesce((select max(t1.a) from t1 where t1.b=t1.e),t1.f) | a from t1 where 13<>(a)),d) else 17 end*f)/abs(t1.f))) when cast(avg(t1.c) AS integer) then case count(*) when min(t1.b) | cast(avg(t1.c) AS integer) then count(distinct f) else max(17) end else cast(avg((c)) AS integer) end from t1)))} +} {400} +do_test randexpr-2.798 { + db eval {SELECT f+case when (not exists(select 1 from t1 where ((17<>+t1.b+t1.f or b between t1.e and t1.e)))) then (abs( -(abs(case when exists(select 1 from t1 where not exists(select 1 from t1 where d in (select (select cast(avg((t1.b)) AS integer)-count(*) from t1) from t1 union select t1.e+(11) from t1))) then c when not t1.d between f and 11 or d>a then (a) else 13 end+t1.e+e)/abs(d)))/abs(t1.b)) else f end-13 FROM t1 WHERE case when 17< - -t1.b-case when not exists(select 1 from t1 where 19<>t1.d) then 13+coalesce((select max((abs((case case when - -t1.c*11 in (select +max(t1.d)*count(distinct -t1.c) from t1 union select max(t1.b) from t1) then 13 else 19 end when f then 19 else (t1.d) end)+e)/abs(f))) from t1 where (13 between t1.d and 19)),t1.b) else 13 end then a else t1.a end-13 not in (t1.d,d,(t1.f))} +} {1187} +do_test randexpr-2.799 { + db eval {SELECT f+case when (not exists(select 1 from t1 where ((17<>+t1.b+t1.f or b between t1.e and t1.e)))) then (abs( -(abs(case when exists(select 1 from t1 where not exists(select 1 from t1 where d in (select (select cast(avg((t1.b)) AS integer)-count(*) from t1) from t1 union select t1.e+(11) from t1))) then c when not t1.d between f and 11 or d>a then (a) else 13 end+t1.e+e)/abs(d)))/abs(t1.b)) else f end-13 FROM t1 WHERE NOT (case when 17< - -t1.b-case when not exists(select 1 from t1 where 19<>t1.d) then 13+coalesce((select max((abs((case case when - -t1.c*11 in (select +max(t1.d)*count(distinct -t1.c) from t1 union select max(t1.b) from t1) then 13 else 19 end when f then 19 else (t1.d) end)+e)/abs(f))) from t1 where (13 between t1.d and 19)),t1.b) else 13 end then a else t1.a end-13 not in (t1.d,d,(t1.f)))} +} {} +do_test randexpr-2.800 { + db eval {SELECT +~t1.a-(abs(+case when (((t1.d*t1.c+11 in (select cast(avg(17) AS integer) from t1 union select -(count(*)) from t1)))) then -coalesce((select t1.e-b from t1 where not exists(select 1 from t1 where t1.c in (select max(f) from t1 union select max(e) | max( -t1.e)-count(*) from t1))),f) else f end)/abs(t1.f))-e+11+t1.b+t1.a- -17 FROM t1 WHERE +b between case when c<=t1.a then t1.b when case when -t1.a*d-17+17*~e- -13 in (select + -max(19) | +cast(avg(19) AS integer) from t1 union select (count(distinct -19)*case max((13)) when max(e)*count(*) then min(t1.d) else count(*) end) from t1) then t1.f else (d) end in (select e from t1 union select d from t1) then f else -t1.b end and b} +} {} +do_test randexpr-2.801 { + db eval {SELECT +~t1.a-(abs(+case when (((t1.d*t1.c+11 in (select cast(avg(17) AS integer) from t1 union select -(count(*)) from t1)))) then -coalesce((select t1.e-b from t1 where not exists(select 1 from t1 where t1.c in (select max(f) from t1 union select max(e) | max( -t1.e)-count(*) from t1))),f) else f end)/abs(t1.f))-e+11+t1.b+t1.a- -17 FROM t1 WHERE NOT (+b between case when c<=t1.a then t1.b when case when -t1.a*d-17+17*~e- -13 in (select + -max(19) | +cast(avg(19) AS integer) from t1 union select (count(distinct -19)*case max((13)) when max(e)*count(*) then min(t1.d) else count(*) end) from t1) then t1.f else (d) end in (select e from t1 union select d from t1) then f else -t1.b end and b)} +} {-274} +do_test randexpr-2.802 { + db eval {SELECT +~t1.a-(abs(+case when (((t1.d*t1.c+11 in (select cast(avg(17) AS integer) from t1 union select -(count(*)) from t1)))) then -coalesce((select t1.e-b from t1 where not exists(select 1 from t1 where t1.c in (select max(f) from t1 union select max(e) & max( -t1.e)-count(*) from t1))),f) else f end)/abs(t1.f))-e+11+t1.b+t1.a- -17 FROM t1 WHERE NOT (+b between case when c<=t1.a then t1.b when case when -t1.a*d-17+17*~e- -13 in (select + -max(19) | +cast(avg(19) AS integer) from t1 union select (count(distinct -19)*case max((13)) when max(e)*count(*) then min(t1.d) else count(*) end) from t1) then t1.f else (d) end in (select e from t1 union select d from t1) then f else -t1.b end and b)} +} {-274} +do_test randexpr-2.803 { + db eval {SELECT case when 19=(select ( -count(*)* -(cast(avg(t1.e) AS integer))+count(distinct 13)*min(t1.f)) | cast(avg(t1.e) AS integer) | -cast(avg(13) AS integer)-(min(d)) from t1) then 13*a when t1.a+((select min((t1.d)) from t1)) in (select t1.b from t1 union select 11 from t1) or not exists(select 1 from t1 where not exists(select 1 from t1 where - -e in (select t1.e from t1 union select t1.d from t1) or t1.d in (select count(distinct t1.a) from t1 union select count(*) from t1)) or -b=(19) and 1117 and t1.a>= -t1.b),t1.f) when t1.d then c else t1.d end end in (select count(*) from t1 union select ~abs(min(t1.a)) | count(*) from t1) then 17 when t1.f not between (e) and t1.c then f else b end+c+a end FROM t1 WHERE t1.f in (case when ~(select max(d) from t1)*t1.f>=case when 17*d17 and t1.a>= -t1.b),t1.f) when t1.d then c else t1.d end end in (select count(*) from t1 union select ~abs(min(t1.a)) | count(*) from t1) then 17 when t1.f not between (e) and t1.c then f else b end+c+a end FROM t1 WHERE NOT (t1.f in (case when ~(select max(d) from t1)*t1.f>=case when 17*d17 and t1.a>= -t1.b),t1.f) when t1.d then c else t1.d end end in (select count(*) from t1 union select ~abs(min(t1.a)) & count(*) from t1) then 17 when t1.f not between (e) and t1.c then f else b end+c+a end FROM t1 WHERE NOT (t1.f in (case when ~(select max(d) from t1)*t1.f>=case when 17*d=(select cast(avg(13) AS integer) from t1)) and (exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select t1.c from t1 where (b between d and 13)),t1.e)>13))) then case when b not between 19 and t1.c then a else 13 end+t1.b when t1.c>11 then c else b end then 13 else t1.d end*t1.f)/abs(17)) FROM t1 WHERE c not in (coalesce((select max(d) from t1 where (select min(t1.e) from t1)-17<=coalesce((select t1.c from t1 where exists(select 1 from t1 where 17<19 or coalesce((select 13 from t1 where t1.e not in (a+t1.c,t1.e*t1.a+t1.d* -a*(a)*f,((b)))),19) not in (c,t1.f,t1.b))),(select abs( -max(19)) from t1))),t1.f)-t1.b, -f,17)} +} {458} +do_test randexpr-2.812 { + db eval {SELECT (abs(case when a*case when e<+(t1.e) then e else d end+t1.f=(select cast(avg(13) AS integer) from t1)) and (exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select t1.c from t1 where (b between d and 13)),t1.e)>13))) then case when b not between 19 and t1.c then a else 13 end+t1.b when t1.c>11 then c else b end then 13 else t1.d end*t1.f)/abs(17)) FROM t1 WHERE NOT (c not in (coalesce((select max(d) from t1 where (select min(t1.e) from t1)-17<=coalesce((select t1.c from t1 where exists(select 1 from t1 where 17<19 or coalesce((select 13 from t1 where t1.e not in (a+t1.c,t1.e*t1.a+t1.d* -a*(a)*f,((b)))),19) not in (c,t1.f,t1.b))),(select abs( -max(19)) from t1))),t1.f)-t1.b, -f,17))} +} {} +do_test randexpr-2.813 { + db eval {SELECT t1.c*case when (t1.f in (select case (coalesce((select t1.d from t1 where not (coalesce((select d from t1 where f<=(11)),19)*e in (select ((min((17)) | -min(t1.b))) from t1 union select -cast(avg(t1.f) AS integer) from t1))),19)+11)*t1.a when t1.d then t1.a else f end-e-t1.a*( -17) from t1 union select c from t1) or 13 not between t1.e and b) then d+19-t1.f else t1.c end-e FROM t1 WHERE t1.f in (~t1.b,13,+e) and 11<>d-case when t1.e<>t1.b or f>t1.f and 19 not between 17 and case when (b not in (b,t1.d,a)) then e else (abs(11)/abs(d)) end and not exists(select 1 from t1 where t1.a<>c) then t1.f when not (t1.b) in (select t1.d from t1 union select t1.c from t1) then 13 else t1.b+a end} +} {} +do_test randexpr-2.814 { + db eval {SELECT t1.c*case when (t1.f in (select case (coalesce((select t1.d from t1 where not (coalesce((select d from t1 where f<=(11)),19)*e in (select ((min((17)) | -min(t1.b))) from t1 union select -cast(avg(t1.f) AS integer) from t1))),19)+11)*t1.a when t1.d then t1.a else f end-e-t1.a*( -17) from t1 union select c from t1) or 13 not between t1.e and b) then d+19-t1.f else t1.c end-e FROM t1 WHERE NOT (t1.f in (~t1.b,13,+e) and 11<>d-case when t1.e<>t1.b or f>t1.f and 19 not between 17 and case when (b not in (b,t1.d,a)) then e else (abs(11)/abs(d)) end and not exists(select 1 from t1 where t1.a<>c) then t1.f when not (t1.b) in (select t1.d from t1 union select t1.c from t1) then 13 else t1.b+a end)} +} {-54800} +do_test randexpr-2.815 { + db eval {SELECT t1.c*case when (t1.f in (select case (coalesce((select t1.d from t1 where not (coalesce((select d from t1 where f<=(11)),19)*e in (select ((min((17)) & -min(t1.b))) from t1 union select -cast(avg(t1.f) AS integer) from t1))),19)+11)*t1.a when t1.d then t1.a else f end-e-t1.a*( -17) from t1 union select c from t1) or 13 not between t1.e and b) then d+19-t1.f else t1.c end-e FROM t1 WHERE NOT (t1.f in (~t1.b,13,+e) and 11<>d-case when t1.e<>t1.b or f>t1.f and 19 not between 17 and case when (b not in (b,t1.d,a)) then e else (abs(11)/abs(d)) end and not exists(select 1 from t1 where t1.a<>c) then t1.f when not (t1.b) in (select t1.d from t1 union select t1.c from t1) then 13 else t1.b+a end)} +} {-54800} +do_test randexpr-2.816 { + db eval {SELECT e-t1.e+a*f*11*c*case when ~f*t1.d-t1.c*b-13*19+(abs(t1.d)/abs(case when t1.c=13 then 17 else 19 end))*a between b*(select abs(cast(avg(( -13)) AS integer)) from t1)-19+19 and t1.b then t1.d else t1.d end+13 FROM t1 WHERE t1.b not between a and e} +} {} +do_test randexpr-2.817 { + db eval {SELECT e-t1.e+a*f*11*c*case when ~f*t1.d-t1.c*b-13*19+(abs(t1.d)/abs(case when t1.c=13 then 17 else 19 end))*a between b*(select abs(cast(avg(( -13)) AS integer)) from t1)-19+19 and t1.b then t1.d else t1.d end+13 FROM t1 WHERE NOT (t1.b not between a and e)} +} {79200000013} +do_test randexpr-2.818 { + db eval {SELECT case case when coalesce((select b from t1 where not exists(select 1 from t1 where d-e+coalesce((select max(b) from t1 where case case when t1.d-t1.e not between t1.f and e then 11 else t1.d end when t1.e then f else b end+t1.e=13),t1.c) not in (( -c),c,t1.d))),t1.f)<>a and c not in (t1.b,c,t1.a) then (select count(*) from t1) else c end-11 when t1.d then t1.e else c end FROM t1 WHERE ~b | coalesce((select case (select +count(distinct b) from t1) when coalesce((select case when coalesce((select e from t1 where c>b),t1.c) between 11 and (b) then t1.d else t1.b end-e from t1 where t1.d in (13,t1.a,f)),17) then e else 17 end from t1 where not b=t1.c or t1.d in (select abs(abs( -min(13)*(cast(avg(t1.c) AS integer)))) from t1 union select (max(c)) from t1) and b>t1.a),t1.f) in (select 11 from t1 union select c from t1)} +} {} +do_test randexpr-2.819 { + db eval {SELECT case case when coalesce((select b from t1 where not exists(select 1 from t1 where d-e+coalesce((select max(b) from t1 where case case when t1.d-t1.e not between t1.f and e then 11 else t1.d end when t1.e then f else b end+t1.e=13),t1.c) not in (( -c),c,t1.d))),t1.f)<>a and c not in (t1.b,c,t1.a) then (select count(*) from t1) else c end-11 when t1.d then t1.e else c end FROM t1 WHERE NOT (~b | coalesce((select case (select +count(distinct b) from t1) when coalesce((select case when coalesce((select e from t1 where c>b),t1.c) between 11 and (b) then t1.d else t1.b end-e from t1 where t1.d in (13,t1.a,f)),17) then e else 17 end from t1 where not b=t1.c or t1.d in (select abs(abs( -min(13)*(cast(avg(t1.c) AS integer)))) from t1 union select (max(c)) from t1) and b>t1.a),t1.f) in (select 11 from t1 union select c from t1))} +} {300} +do_test randexpr-2.820 { + db eval {SELECT case t1.c when t1.e then 19 else -case when not exists(select 1 from t1 where t1.b in (select (t1.b) from t1 union select +13 from t1)) then coalesce((select max(c*t1.b) from t1 where 17>=19*e),coalesce((select (select abs(~min(coalesce((select max(t1.a) from t1 where -19 in (f,a,13)),f)))+min(e) | ( -(min(13))) from t1) from t1 where f between -a and t1.c),t1.e)) when 19<>a then 13 else c end end FROM t1 WHERE t1.e<>f} +} {-13} +do_test randexpr-2.821 { + db eval {SELECT case t1.c when t1.e then 19 else -case when not exists(select 1 from t1 where t1.b in (select (t1.b) from t1 union select +13 from t1)) then coalesce((select max(c*t1.b) from t1 where 17>=19*e),coalesce((select (select abs(~min(coalesce((select max(t1.a) from t1 where -19 in (f,a,13)),f)))+min(e) | ( -(min(13))) from t1) from t1 where f between -a and t1.c),t1.e)) when 19<>a then 13 else c end end FROM t1 WHERE NOT (t1.e<>f)} +} {} +do_test randexpr-2.822 { + db eval {SELECT case t1.c when t1.e then 19 else -case when not exists(select 1 from t1 where t1.b in (select (t1.b) from t1 union select +13 from t1)) then coalesce((select max(c*t1.b) from t1 where 17>=19*e),coalesce((select (select abs(~min(coalesce((select max(t1.a) from t1 where -19 in (f,a,13)),f)))+min(e) & ( -(min(13))) from t1) from t1 where f between -a and t1.c),t1.e)) when 19<>a then 13 else c end end FROM t1 WHERE t1.e<>f} +} {-13} +do_test randexpr-2.823 { + db eval {SELECT 13- -(abs(coalesce((select max(17) from t1 where f*b in (select count(*)*case -min(~t1.f)+min(coalesce((select -(select min(17) from t1) from t1 where (t1.b in (select abs(min(t1.f)) from t1 union select -max(t1.e) from t1))),17)+d) when count(distinct a) then abs(count(*)) else count(*) end from t1 union select min(t1.c) from t1)),coalesce((select max(f) from t1 where d in (select t1.b from t1 union select t1.e from t1)),17)) | t1.a)/abs(c)) | t1.f FROM t1 WHERE (abs(t1.f*11++ -f)/abs(19 | (abs(((abs(f)/abs(17))))/abs(case when a not in (t1.d,(c)-~c*f | -13-t1.b+ -d,13) then t1.f when 11=11 then c else d end))*d+t1.c))+e+ -(t1.d) in (select t1.a from t1 union select 19 from t1)} +} {} +do_test randexpr-2.824 { + db eval {SELECT 13- -(abs(coalesce((select max(17) from t1 where f*b in (select count(*)*case -min(~t1.f)+min(coalesce((select -(select min(17) from t1) from t1 where (t1.b in (select abs(min(t1.f)) from t1 union select -max(t1.e) from t1))),17)+d) when count(distinct a) then abs(count(*)) else count(*) end from t1 union select min(t1.c) from t1)),coalesce((select max(f) from t1 where d in (select t1.b from t1 union select t1.e from t1)),17)) | t1.a)/abs(c)) | t1.f FROM t1 WHERE NOT ((abs(t1.f*11++ -f)/abs(19 | (abs(((abs(f)/abs(17))))/abs(case when a not in (t1.d,(c)-~c*f | -13-t1.b+ -d,13) then t1.f when 11=11 then c else d end))*d+t1.c))+e+ -(t1.d) in (select t1.a from t1 union select 19 from t1))} +} {605} +do_test randexpr-2.825 { + db eval {SELECT 13- -(abs(coalesce((select max(17) from t1 where f*b in (select count(*)*case -min(~t1.f)+min(coalesce((select -(select min(17) from t1) from t1 where (t1.b in (select abs(min(t1.f)) from t1 union select -max(t1.e) from t1))),17)+d) when count(distinct a) then abs(count(*)) else count(*) end from t1 union select min(t1.c) from t1)),coalesce((select max(f) from t1 where d in (select t1.b from t1 union select t1.e from t1)),17)) & t1.a)/abs(c)) & t1.f FROM t1 WHERE NOT ((abs(t1.f*11++ -f)/abs(19 | (abs(((abs(f)/abs(17))))/abs(case when a not in (t1.d,(c)-~c*f | -13-t1.b+ -d,13) then t1.f when 11=11 then c else d end))*d+t1.c))+e+ -(t1.d) in (select t1.a from t1 union select 19 from t1))} +} {8} +do_test randexpr-2.826 { + db eval {SELECT (a+case when (exists(select 1 from t1 where not exists(select 1 from t1 where not t1.e*13=13-f or (e)=t1.c or -d not between t1.b and t1.a or e not between 17 and (a) and 11 not in (a,(t1.f),a) or t1.e<>13 or t1.d between t1.e and t1.c or t1.d=17 or a>=b))) and a between t1.e and t1.b then case when e not between (17) and t1.d then ~c*coalesce((select t1.a | 13+d from t1 where t1.e not between b and 11),t1.a) else t1.f end else t1.f end-11) FROM t1 WHERE case when case case when case when -(select +min(11) from t1)*(13)>11 then t1.e when 13t1.a then t1.d else -t1.e end when (a) then a else t1.e end>t1.a then 13 when t1.a not in (t1.f,t1.b,t1.a) then a else t1.c end in (select cast(avg(13) AS integer) from t1 union select (max(t1.e)+min(17)) from t1)} +} {689} +do_test randexpr-2.827 { + db eval {SELECT (a+case when (exists(select 1 from t1 where not exists(select 1 from t1 where not t1.e*13=13-f or (e)=t1.c or -d not between t1.b and t1.a or e not between 17 and (a) and 11 not in (a,(t1.f),a) or t1.e<>13 or t1.d between t1.e and t1.c or t1.d=17 or a>=b))) and a between t1.e and t1.b then case when e not between (17) and t1.d then ~c*coalesce((select t1.a | 13+d from t1 where t1.e not between b and 11),t1.a) else t1.f end else t1.f end-11) FROM t1 WHERE NOT (case when case case when case when -(select +min(11) from t1)*(13)>11 then t1.e when 13t1.a then t1.d else -t1.e end when (a) then a else t1.e end>t1.a then 13 when t1.a not in (t1.f,t1.b,t1.a) then a else t1.c end in (select cast(avg(13) AS integer) from t1 union select (max(t1.e)+min(17)) from t1))} +} {} +do_test randexpr-2.828 { + db eval {SELECT (a+case when (exists(select 1 from t1 where not exists(select 1 from t1 where not t1.e*13=13-f or (e)=t1.c or -d not between t1.b and t1.a or e not between 17 and (a) and 11 not in (a,(t1.f),a) or t1.e<>13 or t1.d between t1.e and t1.c or t1.d=17 or a>=b))) and a between t1.e and t1.b then case when e not between (17) and t1.d then ~c*coalesce((select t1.a & 13+d from t1 where t1.e not between b and 11),t1.a) else t1.f end else t1.f end-11) FROM t1 WHERE case when case case when case when -(select +min(11) from t1)*(13)>11 then t1.e when 13t1.a then t1.d else -t1.e end when (a) then a else t1.e end>t1.a then 13 when t1.a not in (t1.f,t1.b,t1.a) then a else t1.c end in (select cast(avg(13) AS integer) from t1 union select (max(t1.e)+min(17)) from t1)} +} {689} +do_test randexpr-2.829 { + db eval {SELECT ~b-t1.a-f*+case when exists(select 1 from t1 where 17<>t1.d-f or not exists(select 1 from t1 where (select (min(a)+count(distinct d)) from t1)+( -f)- -cf and not exists(select 1 from t1 where t1.ct1.d-f or not exists(select 1 from t1 where (select (min(a)+count(distinct d)) from t1)+( -f)- -cf and not exists(select 1 from t1 where t1.c19 then 13 else c end*d+t1.c then f else 17 end) from t1 where d<=e), -d) then 19 else 17 end in (t1.b,19,c) then 19 when t1.a in (select a from t1 union select 13 from t1) then t1.d else t1.d end FROM t1 WHERE (select abs(cast(avg(t1.d*t1.f) AS integer))-cast(avg(b) AS integer) from t1)-~coalesce((select max(t1.a+t1.d-coalesce((select max(a*case when t1.e<=e then f when t1.d<>a then 11 else 11 end*t1.a*11) from t1 where t1.d>=d),t1.f)) from t1 where 11 not between (13) and 17),f)*t1.b+17-e not in (t1.d,t1.f,t1.f)} +} {400} +do_test randexpr-2.834 { + db eval {SELECT case when case ~( -t1.e)-a when coalesce((select max(case +t1.e when t1.b*~19 | t1.a*case when c-c<>19 then 13 else c end*d+t1.c then f else 17 end) from t1 where d<=e), -d) then 19 else 17 end in (t1.b,19,c) then 19 when t1.a in (select a from t1 union select 13 from t1) then t1.d else t1.d end FROM t1 WHERE NOT ((select abs(cast(avg(t1.d*t1.f) AS integer))-cast(avg(b) AS integer) from t1)-~coalesce((select max(t1.a+t1.d-coalesce((select max(a*case when t1.e<=e then f when t1.d<>a then 11 else 11 end*t1.a*11) from t1 where t1.d>=d),t1.f)) from t1 where 11 not between (13) and 17),f)*t1.b+17-e not in (t1.d,t1.f,t1.f))} +} {} +do_test randexpr-2.835 { + db eval {SELECT case when case ~( -t1.e)-a when coalesce((select max(case +t1.e when t1.b*~19 & t1.a*case when c-c<>19 then 13 else c end*d+t1.c then f else 17 end) from t1 where d<=e), -d) then 19 else 17 end in (t1.b,19,c) then 19 when t1.a in (select a from t1 union select 13 from t1) then t1.d else t1.d end FROM t1 WHERE (select abs(cast(avg(t1.d*t1.f) AS integer))-cast(avg(b) AS integer) from t1)-~coalesce((select max(t1.a+t1.d-coalesce((select max(a*case when t1.e<=e then f when t1.d<>a then 11 else 11 end*t1.a*11) from t1 where t1.d>=d),t1.f)) from t1 where 11 not between (13) and 17),f)*t1.b+17-e not in (t1.d,t1.f,t1.f)} +} {400} +do_test randexpr-2.836 { + db eval {SELECT t1.d+case when case when case when (19 not between ~(t1.a)*b and t1.c) then (select (abs(case count(distinct t1.e) when min( -f) then -max(t1.d) else cast(avg(19) AS integer) end-(count(distinct t1.e)))) from t1) when a<>t1.f then -19 else 11 end>f or b in ((13),f,11) then t1.a when t1.e in (e,f,t1.f) then (t1.d) else t1.d end in (17,t1.c,17) then 13 when b not between t1.a and (19) then t1.e else -11 end+a+((f)) FROM t1 WHERE t1.f in (select case +cast(avg(t1.f+c) AS integer)++count(*) when count(*) then ~~(count(distinct 11+t1.a)) else +(case max(e-f) | ~ -max(t1.f) | -abs(abs(min(e-19+e))-abs(count(*)))-abs(cast(avg(f) AS integer)) when (min(f)) then cast(avg(a) AS integer) else min(13) end) end from t1 union select -count(distinct 13) from t1)} +} {} +do_test randexpr-2.837 { + db eval {SELECT t1.d+case when case when case when (19 not between ~(t1.a)*b and t1.c) then (select (abs(case count(distinct t1.e) when min( -f) then -max(t1.d) else cast(avg(19) AS integer) end-(count(distinct t1.e)))) from t1) when a<>t1.f then -19 else 11 end>f or b in ((13),f,11) then t1.a when t1.e in (e,f,t1.f) then (t1.d) else t1.d end in (17,t1.c,17) then 13 when b not between t1.a and (19) then t1.e else -11 end+a+((f)) FROM t1 WHERE NOT (t1.f in (select case +cast(avg(t1.f+c) AS integer)++count(*) when count(*) then ~~(count(distinct 11+t1.a)) else +(case max(e-f) | ~ -max(t1.f) | -abs(abs(min(e-19+e))-abs(count(*)))-abs(cast(avg(f) AS integer)) when (min(f)) then cast(avg(a) AS integer) else min(13) end) end from t1 union select -count(distinct 13) from t1))} +} {1600} +do_test randexpr-2.838 { + db eval {SELECT (select (min(case when b+b*(select cast(avg(e+19-t1.f*case when case when not (c) in (select (e) from t1 union select f from t1) or t1.b not in (t1.e,t1.b,11) or 19= -t1.a then 17 else -d end13 then 13 else t1.f end+11*19) AS integer) from t1)+d not between 11 and c then t1.c else t1.c end)) from t1) FROM t1 WHERE 13 not between +b* -f and coalesce((select f*19-case when not a=~f+t1.d then case when d*b not in (case when d not between -c and t1.d then e else 13 end+17,c,t1.a) then b when t1.e>t1.f then b else -d end else f end-e+(d) from t1 where 17 between t1.a and t1.e),t1.d) | t1.b} +} {} +do_test randexpr-2.839 { + db eval {SELECT (select (min(case when b+b*(select cast(avg(e+19-t1.f*case when case when not (c) in (select (e) from t1 union select f from t1) or t1.b not in (t1.e,t1.b,11) or 19= -t1.a then 17 else -d end13 then 13 else t1.f end+11*19) AS integer) from t1)+d not between 11 and c then t1.c else t1.c end)) from t1) FROM t1 WHERE NOT (13 not between +b* -f and coalesce((select f*19-case when not a=~f+t1.d then case when d*b not in (case when d not between -c and t1.d then e else 13 end+17,c,t1.a) then b when t1.e>t1.f then b else -d end else f end-e+(d) from t1 where 17 between t1.a and t1.e),t1.d) | t1.b)} +} {300} +do_test randexpr-2.840 { + db eval {SELECT t1.c-(abs((select (case abs(count(distinct 11-c*11)) when count(*) then count(*) else -min(+e+~e+coalesce((select d-a*t1.d+t1.e from t1 where (t1.f>=17) and t1.e in (11,a,e)),13)*t1.d) end) from t1)-(a-t1.f)-t1.d)/abs(d)) FROM t1 WHERE t1.c in (select d from t1 union select ( -t1.e) from t1)} +} {} +do_test randexpr-2.841 { + db eval {SELECT t1.c-(abs((select (case abs(count(distinct 11-c*11)) when count(*) then count(*) else -min(+e+~e+coalesce((select d-a*t1.d+t1.e from t1 where (t1.f>=17) and t1.e in (11,a,e)),13)*t1.d) end) from t1)-(a-t1.f)-t1.d)/abs(d)) FROM t1 WHERE NOT (t1.c in (select d from t1 union select ( -t1.e) from t1))} +} {300} +do_test randexpr-2.842 { + db eval {SELECT case when coalesce((select max(t1.a-case when not (coalesce((select d+c from t1 where c between ( -(a)) and b or 17 between 11 and (c)),(t1.f)))=f then b | d when f not in (t1.b,f,a) then t1.a else f end-t1.f) from t1 where 19<=11 and t1.c between 19 and t1.a or t1.f>11), -t1.e)*e between b and 17 then -t1.f when -a<=19 or -11<>t1.f then -19 else d end FROM t1 WHERE ((13 in (t1.b,f,b)))} +} {} +do_test randexpr-2.843 { + db eval {SELECT case when coalesce((select max(t1.a-case when not (coalesce((select d+c from t1 where c between ( -(a)) and b or 17 between 11 and (c)),(t1.f)))=f then b | d when f not in (t1.b,f,a) then t1.a else f end-t1.f) from t1 where 19<=11 and t1.c between 19 and t1.a or t1.f>11), -t1.e)*e between b and 17 then -t1.f when -a<=19 or -11<>t1.f then -19 else d end FROM t1 WHERE NOT (((13 in (t1.b,f,b))))} +} {-19} +do_test randexpr-2.844 { + db eval {SELECT case when coalesce((select max(t1.a-case when not (coalesce((select d+c from t1 where c between ( -(a)) and b or 17 between 11 and (c)),(t1.f)))=f then b & d when f not in (t1.b,f,a) then t1.a else f end-t1.f) from t1 where 19<=11 and t1.c between 19 and t1.a or t1.f>11), -t1.e)*e between b and 17 then -t1.f when -a<=19 or -11<>t1.f then -19 else d end FROM t1 WHERE NOT (((13 in (t1.b,f,b))))} +} {-19} +do_test randexpr-2.845 { + db eval {SELECT case -(abs(t1.b-13*d*b*t1.e- -11)/abs(+d-t1.f*(11+ -13)-t1.c))+c+(e*t1.c)+f*17*a | t1.e-13+f when t1.d then t1.e else f end-t1.f FROM t1 WHERE -b*t1.c>13} +} {} +do_test randexpr-2.846 { + db eval {SELECT case -(abs(t1.b-13*d*b*t1.e- -11)/abs(+d-t1.f*(11+ -13)-t1.c))+c+(e*t1.c)+f*17*a | t1.e-13+f when t1.d then t1.e else f end-t1.f FROM t1 WHERE NOT ( -b*t1.c>13)} +} {0} +do_test randexpr-2.847 { + db eval {SELECT case -(abs(t1.b-13*d*b*t1.e- -11)/abs(+d-t1.f*(11+ -13)-t1.c))+c+(e*t1.c)+f*17*a & t1.e-13+f when t1.d then t1.e else f end-t1.f FROM t1 WHERE NOT ( -b*t1.c>13)} +} {0} +do_test randexpr-2.848 { + db eval {SELECT case when t1.c between t1.e and ~t1.d+(abs(~d*case t1.b-19 when t1.e then e else e end)/abs(case case t1.b when c then t1.d else 13+17 | e end when (select abs(abs(min(11) | max((a)))) from t1) then d else ~t1.f end)) | t1.b then t1.c when (not (11>=b)) then 13 else e end-a FROM t1 WHERE +13*case when +11*t1.f=f+t1.b then t1.f else t1.b end not between coalesce((select max(f) from t1 where (not exists(select 1 from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where t1.c in (select abs(min(e)) from t1 union select ~ -case cast(avg(~17) AS integer) when min(e | 17) then min(e) else cast(avg(c) AS integer) end from t1)))) or f>e*t1.a+e)),t1.c) and 17} +} {-87} +do_test randexpr-2.849 { + db eval {SELECT case when t1.c between t1.e and ~t1.d+(abs(~d*case t1.b-19 when t1.e then e else e end)/abs(case case t1.b when c then t1.d else 13+17 | e end when (select abs(abs(min(11) | max((a)))) from t1) then d else ~t1.f end)) | t1.b then t1.c when (not (11>=b)) then 13 else e end-a FROM t1 WHERE NOT (+13*case when +11*t1.f=f+t1.b then t1.f else t1.b end not between coalesce((select max(f) from t1 where (not exists(select 1 from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where t1.c in (select abs(min(e)) from t1 union select ~ -case cast(avg(~17) AS integer) when min(e | 17) then min(e) else cast(avg(c) AS integer) end from t1)))) or f>e*t1.a+e)),t1.c) and 17)} +} {} +do_test randexpr-2.850 { + db eval {SELECT case when t1.c between t1.e and ~t1.d+(abs(~d*case t1.b-19 when t1.e then e else e end)/abs(case case t1.b when c then t1.d else 13+17 & e end when (select abs(abs(min(11) & max((a)))) from t1) then d else ~t1.f end)) & t1.b then t1.c when (not (11>=b)) then 13 else e end-a FROM t1 WHERE +13*case when +11*t1.f=f+t1.b then t1.f else t1.b end not between coalesce((select max(f) from t1 where (not exists(select 1 from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where t1.c in (select abs(min(e)) from t1 union select ~ -case cast(avg(~17) AS integer) when min(e | 17) then min(e) else cast(avg(c) AS integer) end from t1)))) or f>e*t1.a+e)),t1.c) and 17} +} {-87} +do_test randexpr-2.851 { + db eval {SELECT coalesce((select +case when case d when b then e else (select min(case b when 17 then b else t1.c end)+ -+ -(abs(max(t1.e))-min(t1.d)+max( -d)) | count(distinct t1.e)+( -cast(avg(13) AS integer)) | max(t1.f)+min(e)+count(*) from t1) end<>b then t1.a else 19*~t1.e-c | t1.c end from t1 where b in ( -c,t1.f,t1.c)),19) FROM t1 WHERE (abs(d)/abs(coalesce((select max(coalesce((select d*c from t1 where ((abs(t1.b)/abs(t1.d))-(abs(t1.f)/abs(t1.c))+(t1.a)*13-t1.e not between t1.e and ~f*11+t1.f-11 | t1.a+13+d)),19)) from t1 where c not in (+ -11,a,c)),t1.c))) in (b,b,a)} +} {} +do_test randexpr-2.852 { + db eval {SELECT coalesce((select +case when case d when b then e else (select min(case b when 17 then b else t1.c end)+ -+ -(abs(max(t1.e))-min(t1.d)+max( -d)) | count(distinct t1.e)+( -cast(avg(13) AS integer)) | max(t1.f)+min(e)+count(*) from t1) end<>b then t1.a else 19*~t1.e-c | t1.c end from t1 where b in ( -c,t1.f,t1.c)),19) FROM t1 WHERE NOT ((abs(d)/abs(coalesce((select max(coalesce((select d*c from t1 where ((abs(t1.b)/abs(t1.d))-(abs(t1.f)/abs(t1.c))+(t1.a)*13-t1.e not between t1.e and ~f*11+t1.f-11 | t1.a+13+d)),19)) from t1 where c not in (+ -11,a,c)),t1.c))) in (b,b,a))} +} {19} +do_test randexpr-2.853 { + db eval {SELECT coalesce((select +case when case d when b then e else (select min(case b when 17 then b else t1.c end)+ -+ -(abs(max(t1.e))-min(t1.d)+max( -d)) & count(distinct t1.e)+( -cast(avg(13) AS integer)) & max(t1.f)+min(e)+count(*) from t1) end<>b then t1.a else 19*~t1.e-c & t1.c end from t1 where b in ( -c,t1.f,t1.c)),19) FROM t1 WHERE NOT ((abs(d)/abs(coalesce((select max(coalesce((select d*c from t1 where ((abs(t1.b)/abs(t1.d))-(abs(t1.f)/abs(t1.c))+(t1.a)*13-t1.e not between t1.e and ~f*11+t1.f-11 | t1.a+13+d)),19)) from t1 where c not in (+ -11,a,c)),t1.c))) in (b,b,a))} +} {19} +do_test randexpr-2.854 { + db eval {SELECT a-a-(abs(f)/abs(coalesce((select 17+19 from t1 where (select min(t1.e*e) from t1)>t1.b+case when not exists(select 1 from t1 where e not between 11 and t1.f-e) then coalesce((select max(f*case when t1.f not in (19,t1.c,(t1.a)) and c<=17 then 11 when 17 not between 11 and 17 then c else 17 end) from t1 where d not between t1.a and 13),t1.c) else t1.b end-13 and a=t1.e),17)+t1.c))-f FROM t1 WHERE +(select cast(avg(13-t1.a-t1.b++t1.a-(select count(*) from t1)+case b-(select +~ -min(case when exists(select 1 from t1 where ((select count(*)+ -(min(13)) from t1)) not between 13 and 13) then d else e end) from t1) when f*t1.b*t1.b then t1.a else b end*t1.a) AS integer) from t1)*t1.b in (t1.f,17,f)} +} {} +do_test randexpr-2.855 { + db eval {SELECT a-a-(abs(f)/abs(coalesce((select 17+19 from t1 where (select min(t1.e*e) from t1)>t1.b+case when not exists(select 1 from t1 where e not between 11 and t1.f-e) then coalesce((select max(f*case when t1.f not in (19,t1.c,(t1.a)) and c<=17 then 11 when 17 not between 11 and 17 then c else 17 end) from t1 where d not between t1.a and 13),t1.c) else t1.b end-13 and a=t1.e),17)+t1.c))-f FROM t1 WHERE NOT (+(select cast(avg(13-t1.a-t1.b++t1.a-(select count(*) from t1)+case b-(select +~ -min(case when exists(select 1 from t1 where ((select count(*)+ -(min(13)) from t1)) not between 13 and 13) then d else e end) from t1) when f*t1.b*t1.b then t1.a else b end*t1.a) AS integer) from t1)*t1.b in (t1.f,17,f))} +} {-601} +do_test randexpr-2.856 { + db eval {SELECT case when (11+19 | t1.e-e*17=t1.e*~d+t1.a+ -d | a-~(abs((abs((case when 13 not in (b,((t1.a)),f) then t1.d else t1.a end+e))/abs(t1.f))*b+t1.b)/abs(17))+t1.e+t1.a) then 11 else t1.a end FROM t1 WHERE case (case when (select +~ -~ -cast(avg(b*e*case when t1.a between t1.c and a and d<13 then b else 19 end* -f*d) AS integer) | count(*)++~+min((11))*max(19)-min((b)) from t1) in (select 13 from t1 union select (select min(19) from t1) from t1) then t1.f+t1.d else 13 end-t1.e) when t1.e then t1.f else a end -t1.b then t1.f else t1.e end)*min(t1.a)+ -count(*) | +max(e)*(min(t1.e)) | count(distinct t1.d) | (count(*)) when count(distinct ((19))) then count(*) else max(t1.f) end from t1)} +} {} +do_test randexpr-2.860 { + db eval {SELECT coalesce((select max(+t1.a) from t1 where c in (select -max(d | coalesce((select max(t1.b) from t1 where (abs(case d when t1.f then t1.e else t1.c end-t1.f)/abs(13))<=11), -a)) | min(t1.c) | (max(f))++abs(case max(b) when (count(distinct e)) then count(*) else min(17) end) from t1 union select (min(19)) from t1) or not exists(select 1 from t1 where coalesce((select c from t1 where b in (select - - - -count(*) from t1 union select -min((a)) from t1)),13) -t1.b then t1.f else t1.e end)*min(t1.a)+ -count(*) | +max(e)*(min(t1.e)) | count(distinct t1.d) | (count(*)) when count(distinct ((19))) then count(*) else max(t1.f) end from t1))} +} {17} +do_test randexpr-2.861 { + db eval {SELECT coalesce((select max(+t1.a) from t1 where c in (select -max(d & coalesce((select max(t1.b) from t1 where (abs(case d when t1.f then t1.e else t1.c end-t1.f)/abs(13))<=11), -a)) & min(t1.c) & (max(f))++abs(case max(b) when (count(distinct e)) then count(*) else min(17) end) from t1 union select (min(19)) from t1) or not exists(select 1 from t1 where coalesce((select c from t1 where b in (select - - - -count(*) from t1 union select -min((a)) from t1)),13) -t1.b then t1.f else t1.e end)*min(t1.a)+ -count(*) | +max(e)*(min(t1.e)) | count(distinct t1.d) | (count(*)) when count(distinct ((19))) then count(*) else max(t1.f) end from t1))} +} {17} +do_test randexpr-2.862 { + db eval {SELECT case 17 when (select ~count(distinct t1.d) from t1) then e else case when not not exists(select 1 from t1 where (abs(~coalesce((select (coalesce((select max(13) from t1 where e>a),19)) from t1 where (not exists(select 1 from t1 where t1.c in (f,b,c)))),c)*t1.c | 17-(e))/abs(t1.f)) | 17*17*a in (select e from t1 union select f from t1)) then 13 when t1.a not between (b) and t1.c then (abs(17)/abs(17)) else t1.d end*f end FROM t1 WHERE t1.a>t1.c-19} +} {} +do_test randexpr-2.863 { + db eval {SELECT case 17 when (select ~count(distinct t1.d) from t1) then e else case when not not exists(select 1 from t1 where (abs(~coalesce((select (coalesce((select max(13) from t1 where e>a),19)) from t1 where (not exists(select 1 from t1 where t1.c in (f,b,c)))),c)*t1.c | 17-(e))/abs(t1.f)) | 17*17*a in (select e from t1 union select f from t1)) then 13 when t1.a not between (b) and t1.c then (abs(17)/abs(17)) else t1.d end*f end FROM t1 WHERE NOT (t1.a>t1.c-19)} +} {600} +do_test randexpr-2.864 { + db eval {SELECT case 17 when (select ~count(distinct t1.d) from t1) then e else case when not not exists(select 1 from t1 where (abs(~coalesce((select (coalesce((select max(13) from t1 where e>a),19)) from t1 where (not exists(select 1 from t1 where t1.c in (f,b,c)))),c)*t1.c & 17-(e))/abs(t1.f)) & 17*17*a in (select e from t1 union select f from t1)) then 13 when t1.a not between (b) and t1.c then (abs(17)/abs(17)) else t1.d end*f end FROM t1 WHERE NOT (t1.a>t1.c-19)} +} {600} +do_test randexpr-2.865 { + db eval {SELECT case when f>=t1.c then t1.d when coalesce((select max(+11-b) from t1 where a-t1.d in (select +case when f in (13,b | 11,t1.b) then -t1.d when f in (select ~count(distinct 17) from t1 union select count(distinct t1.a) from t1) then 19 else c end-d+b from t1 union select a from t1) and (17) in (t1.f,11,t1.c)),f)< -a then d else t1.c end-11 FROM t1 WHERE a<(select count(distinct +11*coalesce((select max(coalesce((select max(t1.d) from t1 where t1.e>t1.b),b)*t1.f) from t1 where case 11 when t1.a then -t1.e else a end<=17),e)-11+c-19) | count(distinct 19)*count(*)* -(count(distinct t1.f))+cast(avg(17) AS integer)-(~~(count(*)))-cast(avg(11) AS integer) | cast(avg((19)) AS integer) from t1)-t1.c} +} {} +do_test randexpr-2.866 { + db eval {SELECT case when f>=t1.c then t1.d when coalesce((select max(+11-b) from t1 where a-t1.d in (select +case when f in (13,b | 11,t1.b) then -t1.d when f in (select ~count(distinct 17) from t1 union select count(distinct t1.a) from t1) then 19 else c end-d+b from t1 union select a from t1) and (17) in (t1.f,11,t1.c)),f)< -a then d else t1.c end-11 FROM t1 WHERE NOT (a<(select count(distinct +11*coalesce((select max(coalesce((select max(t1.d) from t1 where t1.e>t1.b),b)*t1.f) from t1 where case 11 when t1.a then -t1.e else a end<=17),e)-11+c-19) | count(distinct 19)*count(*)* -(count(distinct t1.f))+cast(avg(17) AS integer)-(~~(count(*)))-cast(avg(11) AS integer) | cast(avg((19)) AS integer) from t1)-t1.c)} +} {389} +do_test randexpr-2.867 { + db eval {SELECT case when f>=t1.c then t1.d when coalesce((select max(+11-b) from t1 where a-t1.d in (select +case when f in (13,b & 11,t1.b) then -t1.d when f in (select ~count(distinct 17) from t1 union select count(distinct t1.a) from t1) then 19 else c end-d+b from t1 union select a from t1) and (17) in (t1.f,11,t1.c)),f)< -a then d else t1.c end-11 FROM t1 WHERE NOT (a<(select count(distinct +11*coalesce((select max(coalesce((select max(t1.d) from t1 where t1.e>t1.b),b)*t1.f) from t1 where case 11 when t1.a then -t1.e else a end<=17),e)-11+c-19) | count(distinct 19)*count(*)* -(count(distinct t1.f))+cast(avg(17) AS integer)-(~~(count(*)))-cast(avg(11) AS integer) | cast(avg((19)) AS integer) from t1)-t1.c)} +} {389} +do_test randexpr-2.868 { + db eval {SELECT coalesce((select max(17) from t1 where -coalesce((select t1.b+coalesce((select case when case t1.f when c then (e) else a end+t1.c+(f)>17 and e>=f and (13>t1.f) then (select ~(cast(avg(t1.e) AS integer)) | min(t1.a) from t1) when -c>c then 19 else t1.f end from t1 where (1717 and e>=f and (13>t1.f) then (select ~(cast(avg(t1.e) AS integer)) | min(t1.a) from t1) when -c>c then 19 else t1.f end from t1 where (1717 and e>=f and (13>t1.f) then (select ~(cast(avg(t1.e) AS integer)) & min(t1.a) from t1) when -c>c then 19 else t1.f end from t1 where (17=a then 13 when ((case when t1.d=a)) then 19 else t1.b end-t1.f) when 19 then - -13 else 17 end from t1 union select -17 from t1) and 19 in (select d from t1 union select e from t1) then (select abs(min(t1.f)) from t1) else c end FROM t1 WHERE case coalesce((select 19 from t1 where 13=e then d when (case when not exists(select 1 from t1 where (a>d and d not between e and ((19)) and c in (e,t1.b,(b)) and e not in (t1.f,t1.a,t1.b))) then t1.b when f not between 11 and d then t1.d else d end<=11) then (abs(13)/abs(11)) else -11 end from t1 where (t1.b)=a then 13 when ((case when t1.d=a)) then 19 else t1.b end-t1.f) when 19 then - -13 else 17 end from t1 union select -17 from t1) and 19 in (select d from t1 union select e from t1) then (select abs(min(t1.f)) from t1) else c end FROM t1 WHERE NOT (case coalesce((select 19 from t1 where 13=e then d when (case when not exists(select 1 from t1 where (a>d and d not between e and ((19)) and c in (e,t1.b,(b)) and e not in (t1.f,t1.a,t1.b))) then t1.b when f not between 11 and d then t1.d else d end<=11) then (abs(13)/abs(11)) else -11 end from t1 where (t1.b)t1.c and e not between 13 and t1.d and t1.e in (t1.d,17,t1.c) then c+t1.e*t1.d when t1.f>=t1.d then t1.a else t1.c end else t1.e end-e) from t1 where t1.b not in (t1.b,t1.d,d)), -c)*13 FROM t1 WHERE (abs(e+17)/abs((select count(distinct 11-t1.b)*cast(avg(case c when coalesce((select max(19-coalesce((select max(f) from t1 where 19 in (17,t1.f,11)),t1.d)) from t1 where c in (t1.a,11,13) and t1.a<=t1.c),t1.c) then t1.c else e end) AS integer) | max(e) | (+~case case cast(avg(t1.d) AS integer) when -min(13) then count(*) else count(*) end when max(t1.c) then count(distinct f) else count(*) end-count(distinct 19)) from t1)))>=t1.e | f} +} {} +do_test randexpr-2.876 { + db eval {SELECT +17+(11)-coalesce((select max(case (11) when f then +e+case when +11< -13-f or b between (b) and t1.b and e<>t1.c and e not between 13 and t1.d and t1.e in (t1.d,17,t1.c) then c+t1.e*t1.d when t1.f>=t1.d then t1.a else t1.c end else t1.e end-e) from t1 where t1.b not in (t1.b,t1.d,d)), -c)*13 FROM t1 WHERE NOT ((abs(e+17)/abs((select count(distinct 11-t1.b)*cast(avg(case c when coalesce((select max(19-coalesce((select max(f) from t1 where 19 in (17,t1.f,11)),t1.d)) from t1 where c in (t1.a,11,13) and t1.a<=t1.c),t1.c) then t1.c else e end) AS integer) | max(e) | (+~case case cast(avg(t1.d) AS integer) when -min(13) then count(*) else count(*) end when max(t1.c) then count(distinct f) else count(*) end-count(distinct 19)) from t1)))>=t1.e | f)} +} {3928} +do_test randexpr-2.877 { + db eval {SELECT case when case when d*c not in (13,a,d) then (t1.b-f)+coalesce((select max((select count(distinct 13)-case max(11) when count(*) then count(*) else min(t1.b) end*min(17)+(min(t1.c)) from t1)) from t1 where not exists(select 1 from t1 where t1.c between 17-d | d and t1.e)),b) else t1.e end<=d then e when f in (a,13,d) or t1.b between f and -b then c else t1.a end FROM t1 WHERE t1.d<>t1.a} +} {500} +do_test randexpr-2.878 { + db eval {SELECT case when case when d*c not in (13,a,d) then (t1.b-f)+coalesce((select max((select count(distinct 13)-case max(11) when count(*) then count(*) else min(t1.b) end*min(17)+(min(t1.c)) from t1)) from t1 where not exists(select 1 from t1 where t1.c between 17-d | d and t1.e)),b) else t1.e end<=d then e when f in (a,13,d) or t1.b between f and -b then c else t1.a end FROM t1 WHERE NOT (t1.d<>t1.a)} +} {} +do_test randexpr-2.879 { + db eval {SELECT case when case when d*c not in (13,a,d) then (t1.b-f)+coalesce((select max((select count(distinct 13)-case max(11) when count(*) then count(*) else min(t1.b) end*min(17)+(min(t1.c)) from t1)) from t1 where not exists(select 1 from t1 where t1.c between 17-d & d and t1.e)),b) else t1.e end<=d then e when f in (a,13,d) or t1.b between f and -b then c else t1.a end FROM t1 WHERE t1.d<>t1.a} +} {500} +do_test randexpr-2.880 { + db eval {SELECT case case c when -b then (abs(b)/abs(c)) else (select count(distinct f) | (~abs(min(c-+case 11 when case when t1.a between b and t1.e-case when (13<=b) then (select -count(*) from t1) else a end then t1.e when e>=11 then (19) else b end then d else 17 end | e))) from t1) end when -13 then t1.f else +17 end FROM t1 WHERE 17 in (select +(count(*) | case abs( -abs(abs( -min(d)))) | max(+11) when -+~count(distinct -(abs(coalesce((select max(f) from t1 where t1.e in (select e from t1 union select t1.a from t1)),f) | t1.c-t1.b)/abs(13))) | +abs(case max(b) when count(*) then -min(t1.e) else (cast(avg(t1.f) AS integer)) end)- -count(*) then -count(*) else max(19) end* -cast(avg(t1.b) AS integer))+(max(t1.f)) from t1 union select count(distinct b) from t1)} +} {} +do_test randexpr-2.881 { + db eval {SELECT case case c when -b then (abs(b)/abs(c)) else (select count(distinct f) | (~abs(min(c-+case 11 when case when t1.a between b and t1.e-case when (13<=b) then (select -count(*) from t1) else a end then t1.e when e>=11 then (19) else b end then d else 17 end | e))) from t1) end when -13 then t1.f else +17 end FROM t1 WHERE NOT (17 in (select +(count(*) | case abs( -abs(abs( -min(d)))) | max(+11) when -+~count(distinct -(abs(coalesce((select max(f) from t1 where t1.e in (select e from t1 union select t1.a from t1)),f) | t1.c-t1.b)/abs(13))) | +abs(case max(b) when count(*) then -min(t1.e) else (cast(avg(t1.f) AS integer)) end)- -count(*) then -count(*) else max(19) end* -cast(avg(t1.b) AS integer))+(max(t1.f)) from t1 union select count(distinct b) from t1))} +} {17} +do_test randexpr-2.882 { + db eval {SELECT case case c when -b then (abs(b)/abs(c)) else (select count(distinct f) & (~abs(min(c-+case 11 when case when t1.a between b and t1.e-case when (13<=b) then (select -count(*) from t1) else a end then t1.e when e>=11 then (19) else b end then d else 17 end & e))) from t1) end when -13 then t1.f else +17 end FROM t1 WHERE NOT (17 in (select +(count(*) | case abs( -abs(abs( -min(d)))) | max(+11) when -+~count(distinct -(abs(coalesce((select max(f) from t1 where t1.e in (select e from t1 union select t1.a from t1)),f) | t1.c-t1.b)/abs(13))) | +abs(case max(b) when count(*) then -min(t1.e) else (cast(avg(t1.f) AS integer)) end)- -count(*) then -count(*) else max(19) end* -cast(avg(t1.b) AS integer))+(max(t1.f)) from t1 union select count(distinct b) from t1))} +} {17} +do_test randexpr-2.883 { + db eval {SELECT case 11 when (case when t1.e in (select +cast(avg(coalesce((select max(13) from t1 where case when exists(select 1 from t1 where t1.b in ((a),d,(t1.e))) then case when 19 in (select -max(17) from t1 union select min(t1.f) from t1) then t1.b else 17 end when a not in ((t1.f),11,t1.e) then 17 else t1.c end(19)),c)) and c and (11 in (select t1.b from t1 union select 13 from t1) and ( -t1.d)=d)),e | 19*t1.a))) and t1.a then t1.a when 19>=t1.a then e else c end> -e} +} {500} +do_test randexpr-2.884 { + db eval {SELECT case 11 when (case when t1.e in (select +cast(avg(coalesce((select max(13) from t1 where case when exists(select 1 from t1 where t1.b in ((a),d,(t1.e))) then case when 19 in (select -max(17) from t1 union select min(t1.f) from t1) then t1.b else 17 end when a not in ((t1.f),11,t1.e) then 17 else t1.c end(19)),c)) and c and (11 in (select t1.b from t1 union select 13 from t1) and ( -t1.d)=d)),e | 19*t1.a))) and t1.a then t1.a when 19>=t1.a then e else c end> -e)} +} {} +do_test randexpr-2.885 { + db eval {SELECT case 11 when (case when t1.e in (select +cast(avg(coalesce((select max(13) from t1 where case when exists(select 1 from t1 where t1.b in ((a),d,(t1.e))) then case when 19 in (select -max(17) from t1 union select min(t1.f) from t1) then t1.b else 17 end when a not in ((t1.f),11,t1.e) then 17 else t1.c end(19)),c)) and c and (11 in (select t1.b from t1 union select 13 from t1) and ( -t1.d)=d)),e | 19*t1.a))) and t1.a then t1.a when 19>=t1.a then e else c end> -e} +} {500} +do_test randexpr-2.886 { + db eval {SELECT case a*t1.a when ~++a*(abs(e)/abs(19)) | t1.d+(select count(*) from t1) then b else t1.a end*coalesce((select max(f) from t1 where f<>t1.d),b) FROM t1 WHERE (abs(c-~t1.b)/abs(d))-((+coalesce((select max(c) from t1 where -+(select ~ -(cast(avg((13+case 11*t1.c when t1.b then f else ~e-coalesce((select max((f)) from t1 where t1.ft1.d),b) FROM t1 WHERE NOT ((abs(c-~t1.b)/abs(d))-((+coalesce((select max(c) from t1 where -+(select ~ -(cast(avg((13+case 11*t1.c when t1.b then f else ~e-coalesce((select max((f)) from t1 where t1.ft1.d),b) FROM t1 WHERE NOT ((abs(c-~t1.b)/abs(d))-((+coalesce((select max(c) from t1 where -+(select ~ -(cast(avg((13+case 11*t1.c when t1.b then f else ~e-coalesce((select max((f)) from t1 where t1.f=+t1.b then f when t1.c+f13 then t1.d else t1.a end)/abs(t1.d))) from t1 where t1.c not between t1.c and 11 and e>d),f)*t1.e+d,a,13)),e) | t1.b FROM t1 WHERE (abs(+(+t1.a)+case a-case when c>=d then 13 when t1.c not in (17,17,e) then a else t1.d end when c then -f else b end*d)/abs(17))<=11 and 11 in (select cast(avg(17) AS integer) from t1 union select min( -19)+(case max(a) when abs(count(*)) then -abs(count(*)*min(t1.e)+max(11)) else min(c) end) from t1)} +} {} +do_test randexpr-2.890 { + db eval {SELECT t1.c-t1.f-coalesce((select max(a) from t1 where e in ((abs(a)/abs(t1.d))*coalesce((select max((abs(case when e>=+t1.b then f when t1.c+f13 then t1.d else t1.a end)/abs(t1.d))) from t1 where t1.c not between t1.c and 11 and e>d),f)*t1.e+d,a,13)),e) | t1.b FROM t1 WHERE NOT ((abs(+(+t1.a)+case a-case when c>=d then 13 when t1.c not in (17,17,e) then a else t1.d end when c then -f else b end*d)/abs(17))<=11 and 11 in (select cast(avg(17) AS integer) from t1 union select min( -19)+(case max(a) when abs(count(*)) then -abs(count(*)*min(t1.e)+max(11)) else min(c) end) from t1))} +} {-792} +do_test randexpr-2.891 { + db eval {SELECT t1.c-t1.f-coalesce((select max(a) from t1 where e in ((abs(a)/abs(t1.d))*coalesce((select max((abs(case when e>=+t1.b then f when t1.c+f13 then t1.d else t1.a end)/abs(t1.d))) from t1 where t1.c not between t1.c and 11 and e>d),f)*t1.e+d,a,13)),e) & t1.b FROM t1 WHERE NOT ((abs(+(+t1.a)+case a-case when c>=d then 13 when t1.c not in (17,17,e) then a else t1.d end when c then -f else b end*d)/abs(17))<=11 and 11 in (select cast(avg(17) AS integer) from t1 union select min( -19)+(case max(a) when abs(count(*)) then -abs(count(*)*min(t1.e)+max(11)) else min(c) end) from t1))} +} {192} +do_test randexpr-2.892 { + db eval {SELECT coalesce((select (select count(*) from t1)*a*b from t1 where ((~b*a-t1.d+11*e-(select -count(*) | count(*) | count(*) from t1)*(select min((abs(coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where -17 in (t1.b,t1.f,t1.d))),11))/abs(t1.d))) from t1)-17+(abs(11)/abs(11)) | b+d | 11 in (select t1.a from t1 union select 13 from t1)))),t1.a) FROM t1 WHERE f>case when f-case when not exists(select 1 from t1 where case when b in (select abs(max(coalesce((select max(~t1.c) from t1 where t1.f<=13),17))) from t1 union select cast(avg(t1.c) AS integer) from t1) then t1.e else (13) end> -t1.e) or d<>19 then coalesce((select max((select abs(max(d)+cast(avg(d) AS integer)) from t1)) from t1 where t1.e19 then b when t1.a<=11 then 11 else t1.d end} +} {100} +do_test randexpr-2.893 { + db eval {SELECT coalesce((select (select count(*) from t1)*a*b from t1 where ((~b*a-t1.d+11*e-(select -count(*) | count(*) | count(*) from t1)*(select min((abs(coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where -17 in (t1.b,t1.f,t1.d))),11))/abs(t1.d))) from t1)-17+(abs(11)/abs(11)) | b+d | 11 in (select t1.a from t1 union select 13 from t1)))),t1.a) FROM t1 WHERE NOT (f>case when f-case when not exists(select 1 from t1 where case when b in (select abs(max(coalesce((select max(~t1.c) from t1 where t1.f<=13),17))) from t1 union select cast(avg(t1.c) AS integer) from t1) then t1.e else (13) end> -t1.e) or d<>19 then coalesce((select max((select abs(max(d)+cast(avg(d) AS integer)) from t1)) from t1 where t1.e19 then b when t1.a<=11 then 11 else t1.d end)} +} {} +do_test randexpr-2.894 { + db eval {SELECT coalesce((select (select count(*) from t1)*a*b from t1 where ((~b*a-t1.d+11*e-(select -count(*) & count(*) & count(*) from t1)*(select min((abs(coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where -17 in (t1.b,t1.f,t1.d))),11))/abs(t1.d))) from t1)-17+(abs(11)/abs(11)) & b+d & 11 in (select t1.a from t1 union select 13 from t1)))),t1.a) FROM t1 WHERE f>case when f-case when not exists(select 1 from t1 where case when b in (select abs(max(coalesce((select max(~t1.c) from t1 where t1.f<=13),17))) from t1 union select cast(avg(t1.c) AS integer) from t1) then t1.e else (13) end> -t1.e) or d<>19 then coalesce((select max((select abs(max(d)+cast(avg(d) AS integer)) from t1)) from t1 where t1.e19 then b when t1.a<=11 then 11 else t1.d end} +} {100} +do_test randexpr-2.895 { + db eval {SELECT coalesce((select e from t1 where ((abs((select case (~max(f) | ~max(19-t1.d) | cast(avg(11) AS integer)) when min(+t1.d*c) then max(case when 19+13 not between 13 and t1.c or t1.c not in ((b),11,(e)) then d else t1.d end+ -t1.d) else min(a) end from t1))/abs(11)))<17),t1.b)+17 FROM t1 WHERE (13 in (a,t1.a,~+(e)+t1.c*t1.c | t1.c- -t1.b-t1.b+e*b | t1.e-t1.e-e*coalesce((select max(t1.b) from t1 where t1.a*d<>coalesce((select max(t1.e-t1.b) from t1 where exists(select 1 from t1 where coalesce((select e from t1 where (17 not between 13 and t1.c)),t1.c)>=t1.c)),b)),t1.f)+17))} +} {} +do_test randexpr-2.896 { + db eval {SELECT coalesce((select e from t1 where ((abs((select case (~max(f) | ~max(19-t1.d) | cast(avg(11) AS integer)) when min(+t1.d*c) then max(case when 19+13 not between 13 and t1.c or t1.c not in ((b),11,(e)) then d else t1.d end+ -t1.d) else min(a) end from t1))/abs(11)))<17),t1.b)+17 FROM t1 WHERE NOT ((13 in (a,t1.a,~+(e)+t1.c*t1.c | t1.c- -t1.b-t1.b+e*b | t1.e-t1.e-e*coalesce((select max(t1.b) from t1 where t1.a*d<>coalesce((select max(t1.e-t1.b) from t1 where exists(select 1 from t1 where coalesce((select e from t1 where (17 not between 13 and t1.c)),t1.c)>=t1.c)),b)),t1.f)+17)))} +} {517} +do_test randexpr-2.897 { + db eval {SELECT coalesce((select e from t1 where ((abs((select case (~max(f) & ~max(19-t1.d) & cast(avg(11) AS integer)) when min(+t1.d*c) then max(case when 19+13 not between 13 and t1.c or t1.c not in ((b),11,(e)) then d else t1.d end+ -t1.d) else min(a) end from t1))/abs(11)))<17),t1.b)+17 FROM t1 WHERE NOT ((13 in (a,t1.a,~+(e)+t1.c*t1.c | t1.c- -t1.b-t1.b+e*b | t1.e-t1.e-e*coalesce((select max(t1.b) from t1 where t1.a*d<>coalesce((select max(t1.e-t1.b) from t1 where exists(select 1 from t1 where coalesce((select e from t1 where (17 not between 13 and t1.c)),t1.c)>=t1.c)),b)),t1.f)+17)))} +} {517} +do_test randexpr-2.898 { + db eval {SELECT c | coalesce((select max(~b*case +coalesce((select max( -(abs(c)/abs(11))+case when t1.c*17 not in (a,a,c) then t1.e when t1.b in (select 13 from t1 union select (c) from t1) then -(t1.c) else d end* -t1.c+a) from t1 where t1.f>=11),17) | t1.b when t1.b then c else t1.a end) from t1 where t1.f>t1.a),t1.c)+13*17 FROM t1 WHERE t1.e=11),17) | t1.b when t1.b then c else t1.a end) from t1 where t1.f>t1.a),t1.c)+13*17 FROM t1 WHERE NOT (t1.e=11),17) & t1.b when t1.b then c else t1.a end) from t1 where t1.f>t1.a),t1.c)+13*17 FROM t1 WHERE t1.ee) and not exists(select 1 from t1 where (19)=t1.e))),19*t1.e)-t1.c)) | 17))*t1.b)/abs(17))) from t1 where (t1.b) between d and t1.b),t1.c)) | t1.f-c-d) FROM t1 WHERE 13 not in (f,t1.c+11*c,coalesce((select e from t1 where case t1.b when c- -case (select cast(avg(+t1.c*t1.f) AS integer) from t1) when 13*case when not case when -17 in (select (t1.c) from t1 union select t1.c from t1) or t1.f in (t1.b,t1.d,t1.b) then t1.a when 11<>t1.c then e else a end>=(11) and a=d or -t1.f between c and b then e else t1.f end then 19 else t1.d end then t1.c else t1.b end<>13),d))} +} {-35} +do_test randexpr-2.902 { + db eval {SELECT coalesce((select max(a) from t1 where t1.be) and not exists(select 1 from t1 where (19)=t1.e))),19*t1.e)-t1.c)) | 17))*t1.b)/abs(17))) from t1 where (t1.b) between d and t1.b),t1.c)) | t1.f-c-d) FROM t1 WHERE NOT (13 not in (f,t1.c+11*c,coalesce((select e from t1 where case t1.b when c- -case (select cast(avg(+t1.c*t1.f) AS integer) from t1) when 13*case when not case when -17 in (select (t1.c) from t1 union select t1.c from t1) or t1.f in (t1.b,t1.d,t1.b) then t1.a when 11<>t1.c then e else a end>=(11) and a=d or -t1.f between c and b then e else t1.f end then 19 else t1.d end then t1.c else t1.b end<>13),d)))} +} {} +do_test randexpr-2.903 { + db eval {SELECT f+b+(t1.a+c)+(case when (select (cast(avg(coalesce((select max(t1.b) from t1 where case -(select +cast(avg(b-t1.e) AS integer)-abs(cast(avg(~(abs(f)/abs(f))) AS integer)) from t1)+d when 11 then d else 13 end+t1.e<>t1.e),(t1.e))-13+13) AS integer)) from t1)-t1.b<>a then t1.e else e end) FROM t1 WHERE c in (select abs((max(t1.d))) from t1 union select count(*) from t1) and (coalesce((select max(+ -(t1.f-d)* -b) from t1 where not exists(select 1 from t1 where (b not in (coalesce((select t1.e-a from t1 where not t1.a between 13 and f or t1.b in (t1.e,e,17)),11),17,t1.a) and (b<>d)))),b) in (select -min(17) from t1 union select abs(max(e)) from t1))} +} {} +do_test randexpr-2.904 { + db eval {SELECT f+b+(t1.a+c)+(case when (select (cast(avg(coalesce((select max(t1.b) from t1 where case -(select +cast(avg(b-t1.e) AS integer)-abs(cast(avg(~(abs(f)/abs(f))) AS integer)) from t1)+d when 11 then d else 13 end+t1.e<>t1.e),(t1.e))-13+13) AS integer)) from t1)-t1.b<>a then t1.e else e end) FROM t1 WHERE NOT (c in (select abs((max(t1.d))) from t1 union select count(*) from t1) and (coalesce((select max(+ -(t1.f-d)* -b) from t1 where not exists(select 1 from t1 where (b not in (coalesce((select t1.e-a from t1 where not t1.a between 13 and f or t1.b in (t1.e,e,17)),11),17,t1.a) and (b<>d)))),b) in (select -min(17) from t1 union select abs(max(e)) from t1)))} +} {1700} +do_test randexpr-2.905 { + db eval {SELECT case when case when not exists(select 1 from t1 where not exists(select 1 from t1 where (case when (select -min(t1.e) from t1) in (select f from t1 union select d from t1) then +e else d end | t1.e not between t1.a and t1.b))) and not exists(select 1 from t1 where a>=t1.e) and b=e then 13 else coalesce((select 19 from t1 where t1.d>=t1.e),13) end<11*t1.b then 17 when 17 in (19,t1.e, -t1.c) or 17=t1.e) and b=e then 13 else coalesce((select 19 from t1 where t1.d>=t1.e),13) end<11*t1.b then 17 when 17 in (19,t1.e, -t1.c) or 17=t1.e) and b=e then 13 else coalesce((select 19 from t1 where t1.d>=t1.e),13) end<11*t1.b then 17 when 17 in (19,t1.e, -t1.c) or 17t1.a then c else 19 end when t1.c then e else t1.d end when 17 then t1.d else -d end between e and t1.e then 13 when (13<>13) then t1.c else 17 end when exists(select 1 from t1 where (t1.c between b and 11)) then e else f end FROM t1 WHERE ~a* -b+11-case when coalesce((select max(case when (t1.c in (case t1.b when t1.c then 13 else 17 end,17,19)) then case e when a then c else 13 end else c end) from t1 where a between b and t1.c),t1.a)=c or (17>d) then case when f= -t1.a then t1.b when (t1.d)t1.a then c else 19 end when t1.c then e else t1.d end when 17 then t1.d else -d end between e and t1.e then 13 when (13<>13) then t1.c else 17 end when exists(select 1 from t1 where (t1.c between b and 11)) then e else f end FROM t1 WHERE NOT (~a* -b+11-case when coalesce((select max(case when (t1.c in (case t1.b when t1.c then 13 else 17 end,17,19)) then case e when a then c else 13 end else c end) from t1 where a between b and t1.c),t1.a)=c or (17>d) then case when f= -t1.a then t1.b when (t1.d)=t1.f)) and t1.f>t1.a or e<>c then 17 else d end)/abs(a))) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (t1.f) in (t1.c,(t1.e),17)))),t1.a) end+e FROM t1 WHERE exists(select 1 from t1 where 17>(case coalesce((select max((abs(+t1.f-case when 11=(abs((select count(*) from t1)-13*13)/abs(t1.d)) then coalesce((select max(t1.c) from t1 where 19 in (select ((max(19))) from t1 union select min(t1.d) from t1) or -t1.a=t1.a and f<17),t1.e-d) when not exists(select 1 from t1 where t1.b not between (d) and -t1.f) then 11 else t1.a end)/abs(t1.a))) from t1 where t1.e not in ((f),11,t1.f)),t1.c) when b then -t1.a else 13 end)-t1.c)} +} {947} +do_test randexpr-2.911 { + db eval {SELECT +case when e in (13, -b,11) then d else coalesce((select max(13 | d | d+19+(abs(case when (19) between (abs(case f when t1.f then (13) else 17 end)/abs(a)) and d then 19 when (not exists(select 1 from t1 where t1.a>=t1.f)) and t1.f>t1.a or e<>c then 17 else d end)/abs(a))) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (t1.f) in (t1.c,(t1.e),17)))),t1.a) end+e FROM t1 WHERE NOT (exists(select 1 from t1 where 17>(case coalesce((select max((abs(+t1.f-case when 11=(abs((select count(*) from t1)-13*13)/abs(t1.d)) then coalesce((select max(t1.c) from t1 where 19 in (select ((max(19))) from t1 union select min(t1.d) from t1) or -t1.a=t1.a and f<17),t1.e-d) when not exists(select 1 from t1 where t1.b not between (d) and -t1.f) then 11 else t1.a end)/abs(t1.a))) from t1 where t1.e not in ((f),11,t1.f)),t1.c) when b then -t1.a else 13 end)-t1.c))} +} {} +do_test randexpr-2.912 { + db eval {SELECT +case when e in (13, -b,11) then d else coalesce((select max(13 & d & d+19+(abs(case when (19) between (abs(case f when t1.f then (13) else 17 end)/abs(a)) and d then 19 when (not exists(select 1 from t1 where t1.a>=t1.f)) and t1.f>t1.a or e<>c then 17 else d end)/abs(a))) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (t1.f) in (t1.c,(t1.e),17)))),t1.a) end+e FROM t1 WHERE exists(select 1 from t1 where 17>(case coalesce((select max((abs(+t1.f-case when 11=(abs((select count(*) from t1)-13*13)/abs(t1.d)) then coalesce((select max(t1.c) from t1 where 19 in (select ((max(19))) from t1 union select min(t1.d) from t1) or -t1.a=t1.a and f<17),t1.e-d) when not exists(select 1 from t1 where t1.b not between (d) and -t1.f) then 11 else t1.a end)/abs(t1.a))) from t1 where t1.e not in ((f),11,t1.f)),t1.c) when b then -t1.a else 13 end)-t1.c)} +} {500} +do_test randexpr-2.913 { + db eval {SELECT (13*case when exists(select 1 from t1 where exists(select 1 from t1 where coalesce((select max(b) from t1 where t1.b=e),a)<(b-a)+13)) then 11 else t1.e end*coalesce((select 17 from t1 where not ((select cast(avg(13) AS integer) from t1)< -t1.c) and 17 between 13 and 13),case when f=case when exists(select 1 from t1 where (13 in ( -(b)*(select count(*)* -cast(avg(case t1.e when t1.b then (f)-f-f else e end) AS integer)*cast(avg(17) AS integer) | cast(avg((17)) AS integer) | min(d) | count(distinct e) from t1),19+(abs(19)/abs(17))*f,t1.b))) then 11 when 17>t1.f then +a else t1.d end)} +} {28600411} +do_test randexpr-2.914 { + db eval {SELECT (13*case when exists(select 1 from t1 where exists(select 1 from t1 where coalesce((select max(b) from t1 where t1.b=e),a)<(b-a)+13)) then 11 else t1.e end*coalesce((select 17 from t1 where not ((select cast(avg(13) AS integer) from t1)< -t1.c) and 17 between 13 and 13),case when f=case when exists(select 1 from t1 where (13 in ( -(b)*(select count(*)* -cast(avg(case t1.e when t1.b then (f)-f-f else e end) AS integer)*cast(avg(17) AS integer) | cast(avg((17)) AS integer) | min(d) | count(distinct e) from t1),19+(abs(19)/abs(17))*f,t1.b))) then 11 when 17>t1.f then +a else t1.d end))} +} {} +do_test randexpr-2.915 { + db eval {SELECT case when +11 | (select min(case 13+a when e then d else 17-t1.b-~b*a end) from t1)-f*t1.f=(coalesce((select e from t1 where a<=f and not exists(select 1 from t1 where d-case 19 when 11 then (b) else t1.e end not between -t1.a and (t1.c))),e)) then t1.e when -17>=d then t1.f else t1.b end*t1.a FROM t1 WHERE e<=19} +} {} +do_test randexpr-2.916 { + db eval {SELECT case when +11 | (select min(case 13+a when e then d else 17-t1.b-~b*a end) from t1)-f*t1.f=(coalesce((select e from t1 where a<=f and not exists(select 1 from t1 where d-case 19 when 11 then (b) else t1.e end not between -t1.a and (t1.c))),e)) then t1.e when -17>=d then t1.f else t1.b end*t1.a FROM t1 WHERE NOT (e<=19)} +} {20000} +do_test randexpr-2.917 { + db eval {SELECT case when +11 & (select min(case 13+a when e then d else 17-t1.b-~b*a end) from t1)-f*t1.f=(coalesce((select e from t1 where a<=f and not exists(select 1 from t1 where d-case 19 when 11 then (b) else t1.e end not between -t1.a and (t1.c))),e)) then t1.e when -17>=d then t1.f else t1.b end*t1.a FROM t1 WHERE NOT (e<=19)} +} {20000} +do_test randexpr-2.918 { + db eval {SELECT 11-coalesce((select max(coalesce((select max((select abs(case +max((e)) when abs(abs( -count(distinct e))) then +cast(avg(f) AS integer) else count(*) end+count(*)+min( -e)) from t1)) from t1 where t1.b in (select count(distinct (17 | case case when e in (13,t1.a,e) or t1.d>=t1.e then 17 else a end when t1.e then 17 else t1.d end*t1.c)) from t1 union select cast(avg(t1.a) AS integer) from t1)),17)) from t1 where t1.c in (select ~cast(avg(t1.b) AS integer) from t1 union select -(cast(avg(t1.c) AS integer)) from t1)),(t1.c)) FROM t1 WHERE t1.b not in (case when 11 in (select case +min(t1.f) when (+cast(avg((case t1.c when a then f else a end-f)) AS integer)) then +count(distinct (t1.f))-count(distinct t1.f) else count(*) end-count(*) | count(*) from t1 union select min(e) from t1) then (abs(b | 13-f | c)/abs(t1.f)) else t1.c end,f,t1.b) or not -t1.a=11} +} {-289} +do_test randexpr-2.919 { + db eval {SELECT 11-coalesce((select max(coalesce((select max((select abs(case +max((e)) when abs(abs( -count(distinct e))) then +cast(avg(f) AS integer) else count(*) end+count(*)+min( -e)) from t1)) from t1 where t1.b in (select count(distinct (17 | case case when e in (13,t1.a,e) or t1.d>=t1.e then 17 else a end when t1.e then 17 else t1.d end*t1.c)) from t1 union select cast(avg(t1.a) AS integer) from t1)),17)) from t1 where t1.c in (select ~cast(avg(t1.b) AS integer) from t1 union select -(cast(avg(t1.c) AS integer)) from t1)),(t1.c)) FROM t1 WHERE NOT (t1.b not in (case when 11 in (select case +min(t1.f) when (+cast(avg((case t1.c when a then f else a end-f)) AS integer)) then +count(distinct (t1.f))-count(distinct t1.f) else count(*) end-count(*) | count(*) from t1 union select min(e) from t1) then (abs(b | 13-f | c)/abs(t1.f)) else t1.c end,f,t1.b) or not -t1.a=11)} +} {} +do_test randexpr-2.920 { + db eval {SELECT 11-coalesce((select max(coalesce((select max((select abs(case +max((e)) when abs(abs( -count(distinct e))) then +cast(avg(f) AS integer) else count(*) end+count(*)+min( -e)) from t1)) from t1 where t1.b in (select count(distinct (17 & case case when e in (13,t1.a,e) or t1.d>=t1.e then 17 else a end when t1.e then 17 else t1.d end*t1.c)) from t1 union select cast(avg(t1.a) AS integer) from t1)),17)) from t1 where t1.c in (select ~cast(avg(t1.b) AS integer) from t1 union select -(cast(avg(t1.c) AS integer)) from t1)),(t1.c)) FROM t1 WHERE t1.b not in (case when 11 in (select case +min(t1.f) when (+cast(avg((case t1.c when a then f else a end-f)) AS integer)) then +count(distinct (t1.f))-count(distinct t1.f) else count(*) end-count(*) | count(*) from t1 union select min(e) from t1) then (abs(b | 13-f | c)/abs(t1.f)) else t1.c end,f,t1.b) or not -t1.a=11} +} {-289} +do_test randexpr-2.921 { + db eval {SELECT (select -abs(max(+(select +count(*) from t1)))-max(case when b in (select (abs(13)/abs(b)) from t1 union select case when exists(select 1 from t1 where not exists(select 1 from t1 where (abs(17)/abs(coalesce((select max(+19*t1.e+case t1.f when (abs(f)/abs(t1.f)) then t1.f else e end) from t1 where 19<17),t1.a)))+11 in (select b from t1 union select t1.e from t1))) then d else t1.c end from t1) then f when 13=d then -a else t1.a end) from t1) FROM t1 WHERE ~(select ~ -(min(f)- -cast(avg(f) AS integer)-abs(count(distinct case when t1.d=b then d when e in (select +cast(avg(e) AS integer) from t1 union select count(*) from t1) then t1.f else c end*17*f))) | (count(*)) from t1)-f<>case when (select + -max(t1.e) from t1)+(select -min(17) from t1)*13 between 17 and 19 then t1.b else t1.d end} +} {-101} +do_test randexpr-2.922 { + db eval {SELECT (select -abs(max(+(select +count(*) from t1)))-max(case when b in (select (abs(13)/abs(b)) from t1 union select case when exists(select 1 from t1 where not exists(select 1 from t1 where (abs(17)/abs(coalesce((select max(+19*t1.e+case t1.f when (abs(f)/abs(t1.f)) then t1.f else e end) from t1 where 19<17),t1.a)))+11 in (select b from t1 union select t1.e from t1))) then d else t1.c end from t1) then f when 13=d then -a else t1.a end) from t1) FROM t1 WHERE NOT (~(select ~ -(min(f)- -cast(avg(f) AS integer)-abs(count(distinct case when t1.d=b then d when e in (select +cast(avg(e) AS integer) from t1 union select count(*) from t1) then t1.f else c end*17*f))) | (count(*)) from t1)-f<>case when (select + -max(t1.e) from t1)+(select -min(17) from t1)*13 between 17 and 19 then t1.b else t1.d end)} +} {} +do_test randexpr-2.923 { + db eval {SELECT case (select ( - - -(+max(c))+min(e)-(count(*))+cast(avg(t1.e) AS integer)-min(t1.a)*(cast(avg(11) AS integer))) from t1) when t1.d then t1.c else case (select max(a) from t1) when -t1.b then case when not 17<>case when t1.e<=t1.d or ((t1.e))=t1.e then t1.f when e>=t1.b then 11 else t1.d end or d in (t1.f,t1.a,t1.a) then c | 17 when t1.f between t1.a and -t1.e then d else t1.c end else t1.c end end-t1.c FROM t1 WHERE not (exists(select 1 from t1 where a<>11+t1.d-t1.a+(select count(distinct t1.d)++max(case e when coalesce((select 11 from t1 where a>=case when coalesce((select f from t1 where exists(select 1 from t1 where 13 in (t1.c,13,t1.a))),(d))+f not between d and t1.e then d else (t1.c) end-13),(d))-t1.d then t1.a else 17 end) from t1)) and not a<(d)*19)} +} {0} +do_test randexpr-2.924 { + db eval {SELECT case (select ( - - -(+max(c))+min(e)-(count(*))+cast(avg(t1.e) AS integer)-min(t1.a)*(cast(avg(11) AS integer))) from t1) when t1.d then t1.c else case (select max(a) from t1) when -t1.b then case when not 17<>case when t1.e<=t1.d or ((t1.e))=t1.e then t1.f when e>=t1.b then 11 else t1.d end or d in (t1.f,t1.a,t1.a) then c | 17 when t1.f between t1.a and -t1.e then d else t1.c end else t1.c end end-t1.c FROM t1 WHERE NOT (not (exists(select 1 from t1 where a<>11+t1.d-t1.a+(select count(distinct t1.d)++max(case e when coalesce((select 11 from t1 where a>=case when coalesce((select f from t1 where exists(select 1 from t1 where 13 in (t1.c,13,t1.a))),(d))+f not between d and t1.e then d else (t1.c) end-13),(d))-t1.d then t1.a else 17 end) from t1)) and not a<(d)*19))} +} {} +do_test randexpr-2.925 { + db eval {SELECT case (select ( - - -(+max(c))+min(e)-(count(*))+cast(avg(t1.e) AS integer)-min(t1.a)*(cast(avg(11) AS integer))) from t1) when t1.d then t1.c else case (select max(a) from t1) when -t1.b then case when not 17<>case when t1.e<=t1.d or ((t1.e))=t1.e then t1.f when e>=t1.b then 11 else t1.d end or d in (t1.f,t1.a,t1.a) then c & 17 when t1.f between t1.a and -t1.e then d else t1.c end else t1.c end end-t1.c FROM t1 WHERE not (exists(select 1 from t1 where a<>11+t1.d-t1.a+(select count(distinct t1.d)++max(case e when coalesce((select 11 from t1 where a>=case when coalesce((select f from t1 where exists(select 1 from t1 where 13 in (t1.c,13,t1.a))),(d))+f not between d and t1.e then d else (t1.c) end-13),(d))-t1.d then t1.a else 17 end) from t1)) and not a<(d)*19)} +} {0} +do_test randexpr-2.926 { + db eval {SELECT case t1.b when c*(case when ((select ++case abs(cast(avg(a) AS integer))*min(c) when min(t1.c) then cast(avg(t1.b) AS integer) else count(*) end | (count(distinct t1.c)) | ((max(t1.a))) | count(distinct b) from t1) not between t1.e and 11 and coalesce((select max(e) from t1 where a between t1.c and c+b),e) in (select t1.d from t1 union select t1.b from t1) or (not exists(select 1 from t1 where 13<>t1.a))) then 11*b else t1.b end) then t1.c else e end FROM t1 WHERE exists(select 1 from t1 where t1.d>=f)} +} {} +do_test randexpr-2.927 { + db eval {SELECT case t1.b when c*(case when ((select ++case abs(cast(avg(a) AS integer))*min(c) when min(t1.c) then cast(avg(t1.b) AS integer) else count(*) end | (count(distinct t1.c)) | ((max(t1.a))) | count(distinct b) from t1) not between t1.e and 11 and coalesce((select max(e) from t1 where a between t1.c and c+b),e) in (select t1.d from t1 union select t1.b from t1) or (not exists(select 1 from t1 where 13<>t1.a))) then 11*b else t1.b end) then t1.c else e end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.d>=f))} +} {500} +do_test randexpr-2.928 { + db eval {SELECT case t1.b when c*(case when ((select ++case abs(cast(avg(a) AS integer))*min(c) when min(t1.c) then cast(avg(t1.b) AS integer) else count(*) end & (count(distinct t1.c)) & ((max(t1.a))) & count(distinct b) from t1) not between t1.e and 11 and coalesce((select max(e) from t1 where a between t1.c and c+b),e) in (select t1.d from t1 union select t1.b from t1) or (not exists(select 1 from t1 where 13<>t1.a))) then 11*b else t1.b end) then t1.c else e end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.d>=f))} +} {500} +do_test randexpr-2.929 { + db eval {SELECT d-coalesce((select d from t1 where case when ~11*a*c>=17 then coalesce((select (abs(t1.e+c+(abs(f*(select abs((min(e))) from t1)-t1.f)/abs( - -a))-a)/abs(t1.d)) from t1 where f not in (e,b,t1.f) and c<>f),f)*19 else f end in (b,e,17)),13)-19 FROM t1 WHERE not f>=11} +} {} +do_test randexpr-2.930 { + db eval {SELECT d-coalesce((select d from t1 where case when ~11*a*c>=17 then coalesce((select (abs(t1.e+c+(abs(f*(select abs((min(e))) from t1)-t1.f)/abs( - -a))-a)/abs(t1.d)) from t1 where f not in (e,b,t1.f) and c<>f),f)*19 else f end in (b,e,17)),13)-19 FROM t1 WHERE NOT (not f>=11)} +} {368} +do_test randexpr-2.931 { + db eval {SELECT f*t1.d+coalesce((select a+17-t1.e from t1 where 13=t1.f and not (t1.f | b*t1.d-t1.c*coalesce((select max(coalesce((select t1.f from t1 where f not in (13,a,d)),d)) from t1 where t1.a in (select cast(avg(c) AS integer) from t1 union select count(distinct b)*count(distinct t1.c) from t1)),((17)))+ -b+19+13)=t1.f and 19>=t1.a or - -a>=t1.b),a) FROM t1 WHERE f not in (coalesce((select max(19) from t1 where (t1.e<=+t1.f*case when ~(abs(b)/abs(b)) in (f,t1.d,17) then (abs(t1.e-11 | d)/abs(coalesce((select d from t1 where not coalesce((select t1.a from t1 where not exists(select 1 from t1 where c>=t1.c)),d) in (select count(distinct f)*cast(avg(19) AS integer) from t1 union select count(distinct d) from t1)),13))) when not 11<=t1.a then t1.f else 19 end-19)),f)-17, -a,e)} +} {240100} +do_test randexpr-2.932 { + db eval {SELECT f*t1.d+coalesce((select a+17-t1.e from t1 where 13=t1.f and not (t1.f | b*t1.d-t1.c*coalesce((select max(coalesce((select t1.f from t1 where f not in (13,a,d)),d)) from t1 where t1.a in (select cast(avg(c) AS integer) from t1 union select count(distinct b)*count(distinct t1.c) from t1)),((17)))+ -b+19+13)=t1.f and 19>=t1.a or - -a>=t1.b),a) FROM t1 WHERE NOT (f not in (coalesce((select max(19) from t1 where (t1.e<=+t1.f*case when ~(abs(b)/abs(b)) in (f,t1.d,17) then (abs(t1.e-11 | d)/abs(coalesce((select d from t1 where not coalesce((select t1.a from t1 where not exists(select 1 from t1 where c>=t1.c)),d) in (select count(distinct f)*cast(avg(19) AS integer) from t1 union select count(distinct d) from t1)),13))) when not 11<=t1.a then t1.f else 19 end-19)),f)-17, -a,e))} +} {} +do_test randexpr-2.933 { + db eval {SELECT f*t1.d+coalesce((select a+17-t1.e from t1 where 13=t1.f and not (t1.f & b*t1.d-t1.c*coalesce((select max(coalesce((select t1.f from t1 where f not in (13,a,d)),d)) from t1 where t1.a in (select cast(avg(c) AS integer) from t1 union select count(distinct b)*count(distinct t1.c) from t1)),((17)))+ -b+19+13)=t1.f and 19>=t1.a or - -a>=t1.b),a) FROM t1 WHERE f not in (coalesce((select max(19) from t1 where (t1.e<=+t1.f*case when ~(abs(b)/abs(b)) in (f,t1.d,17) then (abs(t1.e-11 | d)/abs(coalesce((select d from t1 where not coalesce((select t1.a from t1 where not exists(select 1 from t1 where c>=t1.c)),d) in (select count(distinct f)*cast(avg(19) AS integer) from t1 union select count(distinct d) from t1)),13))) when not 11<=t1.a then t1.f else 19 end-19)),f)-17, -a,e)} +} {240100} +do_test randexpr-2.934 { + db eval {SELECT coalesce((select case e*+(abs(t1.f-t1.b)/abs(e))-d when coalesce((select t1.e from t1 where exists(select 1 from t1 where +d+ -t1.e not between case when (abs(b)/abs(t1.b)) between a and 17 then t1.b when not 11 not between b and d then 19 else 19 end and t1.f and t1.a between 17 and b)), -13) then (t1.a) else 13 end from t1 where 17 between b and 13), -19) | c FROM t1 WHERE case when t1.e*t1.b | f+d | f+11+(a)-+a+ -t1.c-t1.f | coalesce((select case t1.e when 17 then e else t1.a end from t1 where t1.ab then e else t1.d end end-11>=13),e) | 13 FROM t1 WHERE not exists(select 1 from t1 where t1.c< -d or t1.e between c and -t1.d or d<=17)} +} {10143} +do_test randexpr-2.940 { + db eval {SELECT d*(select - - -max( -(abs(e)/abs(~t1.c))) from t1)-13 | coalesce((select max(+t1.a*a) from t1 where t1.e+case e when a then t1.f+(select case ~ -max(d)* - -cast(avg(c) AS integer) when -min( -t1.b) then min(t1.b) else count(distinct 13) end from t1)-a else ~case when t1.f not in (b,b,a) or c between d and t1.a then c when t1.b>b then e else t1.d end end-11>=13),e) | 13 FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.c< -d or t1.e between c and -t1.d or d<=17))} +} {} +do_test randexpr-2.941 { + db eval {SELECT d*(select - - -max( -(abs(e)/abs(~t1.c))) from t1)-13 & coalesce((select max(+t1.a*a) from t1 where t1.e+case e when a then t1.f+(select case ~ -max(d)* - -cast(avg(c) AS integer) when -min( -t1.b) then min(t1.b) else count(distinct 13) end from t1)-a else ~case when t1.f not in (b,b,a) or c between d and t1.a then c when t1.b>b then e else t1.d end end-11>=13),e) & 13 FROM t1 WHERE not exists(select 1 from t1 where t1.c< -d or t1.e between c and -t1.d or d<=17)} +} {0} +do_test randexpr-2.942 { + db eval {SELECT t1.c*+case when a<>+case when b>t1.e or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e in (select coalesce((select b from t1 where (d( -t1.d)),(( -b))) from t1 union select t1.d from t1))) then (select -max(t1.e)-count(*) from t1) else (t1.c) end+b+(t1.f) | f | e then t1.d when t1.f not in (19,13,e) then 13 else 11 end FROM t1 WHERE (f<>coalesce((select max(a) from t1 where (exists(select 1 from t1 where c between t1.b and case when (abs(19)/abs(t1.f)) in (t1.f,17,t1.a) then t1.b when t1.d<=t1.a then t1.a else c end or c=b and c>=t1.d and t1.d between c and t1.c)),e+case (select count(distinct case 19 when 13 then t1.b else 11 end) from t1) when 13 then b else t1.b end)-17-17)} +} {120000} +do_test randexpr-2.943 { + db eval {SELECT t1.c*+case when a<>+case when b>t1.e or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e in (select coalesce((select b from t1 where (d( -t1.d)),(( -b))) from t1 union select t1.d from t1))) then (select -max(t1.e)-count(*) from t1) else (t1.c) end+b+(t1.f) | f | e then t1.d when t1.f not in (19,13,e) then 13 else 11 end FROM t1 WHERE NOT ((f<>coalesce((select max(a) from t1 where (exists(select 1 from t1 where c between t1.b and case when (abs(19)/abs(t1.f)) in (t1.f,17,t1.a) then t1.b when t1.d<=t1.a then t1.a else c end or c=b and c>=t1.d and t1.d between c and t1.c)),e+case (select count(distinct case 19 when 13 then t1.b else 11 end) from t1) when 13 then b else t1.b end)-17-17))} +} {} +do_test randexpr-2.944 { + db eval {SELECT t1.c*+case when a<>+case when b>t1.e or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e in (select coalesce((select b from t1 where (d( -t1.d)),(( -b))) from t1 union select t1.d from t1))) then (select -max(t1.e)-count(*) from t1) else (t1.c) end+b+(t1.f) & f & e then t1.d when t1.f not in (19,13,e) then 13 else 11 end FROM t1 WHERE (f<>coalesce((select max(a) from t1 where (exists(select 1 from t1 where c between t1.b and case when (abs(19)/abs(t1.f)) in (t1.f,17,t1.a) then t1.b when t1.d<=t1.a then t1.a else c end or c=b and c>=t1.d and t1.d between c and t1.c)),e+case (select count(distinct case 19 when 13 then t1.b else 11 end) from t1) when 13 then b else t1.b end)-17-17)} +} {120000} +do_test randexpr-2.945 { + db eval {SELECT case when (e)- -((select case min(case when e in (select coalesce((select max(d) from t1 where 13<>(t1.d-e)),e) from t1 union select c from t1) then d else -11 end+a) when count(distinct -f) then count(*) else -(case cast(avg(17) AS integer)- -count(distinct t1.c)+cast(avg(b) AS integer) when -max(11) then count(*) else cast(avg(t1.e) AS integer) end+max(t1.f)) end from t1))<=t1.b | t1.e then t1.d when not exists(select 1 from t1 where c<>13) then 11 else d end FROM t1 WHERE (select max(t1.a*case f when b then ~case when not exists(select 1 from t1 where f-d>(select abs(cast(avg(~17+b+d) AS integer)) from t1) or f-e in (select b from t1 union select t1.d from t1) or exists(select 1 from t1 where 13<>b)) then (abs(a)/abs( -d)) when (d(t1.d-e)),e) from t1 union select c from t1) then d else -11 end+a) when count(distinct -f) then count(*) else -(case cast(avg(17) AS integer)- -count(distinct t1.c)+cast(avg(b) AS integer) when -max(11) then count(*) else cast(avg(t1.e) AS integer) end+max(t1.f)) end from t1))<=t1.b | t1.e then t1.d when not exists(select 1 from t1 where c<>13) then 11 else d end FROM t1 WHERE NOT ((select max(t1.a*case f when b then ~case when not exists(select 1 from t1 where f-d>(select abs(cast(avg(~17+b+d) AS integer)) from t1) or f-e in (select b from t1 union select t1.d from t1) or exists(select 1 from t1 where 13<>b)) then (abs(a)/abs( -d)) when (d(t1.d-e)),e) from t1 union select c from t1) then d else -11 end+a) when count(distinct -f) then count(*) else -(case cast(avg(17) AS integer)- -count(distinct t1.c)+cast(avg(b) AS integer) when -max(11) then count(*) else cast(avg(t1.e) AS integer) end+max(t1.f)) end from t1))<=t1.b & t1.e then t1.d when not exists(select 1 from t1 where c<>13) then 11 else d end FROM t1 WHERE NOT ((select max(t1.a*case f when b then ~case when not exists(select 1 from t1 where f-d>(select abs(cast(avg(~17+b+d) AS integer)) from t1) or f-e in (select b from t1 union select t1.d from t1) or exists(select 1 from t1 where 13<>b)) then (abs(a)/abs( -d)) when (d=e and d between d and a then t1.c when (t1.c>19) then -11 else a end from t1 where ac then t1.e else -d end-t1.e) between b and 11 then b when exists(select 1 from t1 where a<=d) then 11 else 13 end) then cast(avg(t1.b) AS integer)-(cast(avg(11) AS integer)-min(a))-count(distinct -e) else cast(avg(13) AS integer) end*max(b) from t1) or (t1.f not between t1.d and 17 | 19*e)} +} {} +do_test randexpr-2.949 { + db eval {SELECT (t1.b+coalesce((select case when e-13+11 | f+case when not t1.b not between t1.f and t1.f then e else (t1.e) end-c-b+c>=e and d between d and a then t1.c when (t1.c>19) then -11 else a end from t1 where ac then t1.e else -d end-t1.e) between b and 11 then b when exists(select 1 from t1 where a<=d) then 11 else 13 end) then cast(avg(t1.b) AS integer)-(cast(avg(11) AS integer)-min(a))-count(distinct -e) else cast(avg(13) AS integer) end*max(b) from t1) or (t1.f not between t1.d and 17 | 19*e))} +} {3413413} +do_test randexpr-2.950 { + db eval {SELECT (t1.b+coalesce((select case when e-13+11 & f+case when not t1.b not between t1.f and t1.f then e else (t1.e) end-c-b+c>=e and d between d and a then t1.c when (t1.c>19) then -11 else a end from t1 where ac then t1.e else -d end-t1.e) between b and 11 then b when exists(select 1 from t1 where a<=d) then 11 else 13 end) then cast(avg(t1.b) AS integer)-(cast(avg(11) AS integer)-min(a))-count(distinct -e) else cast(avg(13) AS integer) end*max(b) from t1) or (t1.f not between t1.d and 17 | 19*e))} +} {3413413} +do_test randexpr-2.951 { + db eval {SELECT coalesce((select max(+19) from t1 where t1.f>=(abs(19)/abs(t1.c*(select count(distinct (abs(t1.f)/abs(t1.c))*coalesce((select max(case when (select count(*) from t1)*t1.f in (t1.b,17,t1.f) then d when t1.e not in (t1.b,t1.a,t1.b) then -17 else t1.d end) from t1 where (11 not in (t1.b,(f),t1.c)) and t1.c>=t1.f),e)) from t1))) or ((t1.d<=t1.b))),13) FROM t1 WHERE t1.a<>(coalesce((select coalesce((select max(t1.a) from t1 where case -t1.c when t1.b then coalesce((select t1.b from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where ((select count(*)++count(distinct a) from t1) not between b and +(17))))),19)*t1.e else f end<=(t1.b)),t1.c) from t1 where d<>b),13)-17+a)+d+(19)+a or d between 11 and -11} +} {19} +do_test randexpr-2.952 { + db eval {SELECT coalesce((select max(+19) from t1 where t1.f>=(abs(19)/abs(t1.c*(select count(distinct (abs(t1.f)/abs(t1.c))*coalesce((select max(case when (select count(*) from t1)*t1.f in (t1.b,17,t1.f) then d when t1.e not in (t1.b,t1.a,t1.b) then -17 else t1.d end) from t1 where (11 not in (t1.b,(f),t1.c)) and t1.c>=t1.f),e)) from t1))) or ((t1.d<=t1.b))),13) FROM t1 WHERE NOT (t1.a<>(coalesce((select coalesce((select max(t1.a) from t1 where case -t1.c when t1.b then coalesce((select t1.b from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where ((select count(*)++count(distinct a) from t1) not between b and +(17))))),19)*t1.e else f end<=(t1.b)),t1.c) from t1 where d<>b),13)-17+a)+d+(19)+a or d between 11 and -11)} +} {} +do_test randexpr-2.953 { + db eval {SELECT t1.b-coalesce((select max(~t1.a) from t1 where coalesce((select max(t1.d) from t1 where coalesce((select max(coalesce((select t1.c*t1.d from t1 where not f>=a-t1.c+ -coalesce((select t1.f*e from t1 where (not exists(select 1 from t1 where d<=19))),b)-d),13)* -e) from t1 where (d<19 or 13 not between t1.f and t1.d)),11) between 17 and 19),t1.c)< -b),d) FROM t1 WHERE +case when b in (b+~b+t1.f,coalesce((select max(13) from t1 where exists(select 1 from t1 where 17<>+a or not exists(select 1 from t1 where e+19=f))),(e)),case when c>case when coalesce((select 13 from t1 where f>=t1.b and c not between t1.e and t1.d),11) in (select (case max(11) when (min(b)) then min( -d) else cast(avg(t1.e) AS integer) end) from t1 union select max(19) from t1) then t1.d else 13 end then t1.f else t1.e end) or b>=(t1.f) then t1.e else t1.e end*b>f} +} {-200} +do_test randexpr-2.954 { + db eval {SELECT t1.b-coalesce((select max(~t1.a) from t1 where coalesce((select max(t1.d) from t1 where coalesce((select max(coalesce((select t1.c*t1.d from t1 where not f>=a-t1.c+ -coalesce((select t1.f*e from t1 where (not exists(select 1 from t1 where d<=19))),b)-d),13)* -e) from t1 where (d<19 or 13 not between t1.f and t1.d)),11) between 17 and 19),t1.c)< -b),d) FROM t1 WHERE NOT (+case when b in (b+~b+t1.f,coalesce((select max(13) from t1 where exists(select 1 from t1 where 17<>+a or not exists(select 1 from t1 where e+19=f))),(e)),case when c>case when coalesce((select 13 from t1 where f>=t1.b and c not between t1.e and t1.d),11) in (select (case max(11) when (min(b)) then min( -d) else cast(avg(t1.e) AS integer) end) from t1 union select max(19) from t1) then t1.d else 13 end then t1.f else t1.e end) or b>=(t1.f) then t1.e else t1.e end*b>f)} +} {} +do_test randexpr-2.955 { + db eval {SELECT (select count(distinct + - -f-c-17*a+(t1.d)-+((select abs( - - -count(distinct 13)-count(*)*count(distinct t1.d)*count(*))-cast(avg(f) AS integer) from t1))-~19+13- -d*t1.e*(19)+ -f- -b) from t1)-e FROM t1 WHERE not t1.c between t1.f and coalesce((select coalesce((select case when exists(select 1 from t1 where +~f=case when d<>t1.d then t1.f else -d end and d between 17 and t1.c) then ~case when a in (d,c,t1.d) then 17 else t1.f end when f=f then d else -b end from t1 where c>f),e) from t1 where not exists(select 1 from t1 where t1.d<=c) and 19=(11) and t1.e<>19),a)} +} {-499} +do_test randexpr-2.956 { + db eval {SELECT (select count(distinct + - -f-c-17*a+(t1.d)-+((select abs( - - -count(distinct 13)-count(*)*count(distinct t1.d)*count(*))-cast(avg(f) AS integer) from t1))-~19+13- -d*t1.e*(19)+ -f- -b) from t1)-e FROM t1 WHERE NOT (not t1.c between t1.f and coalesce((select coalesce((select case when exists(select 1 from t1 where +~f=case when d<>t1.d then t1.f else -d end and d between 17 and t1.c) then ~case when a in (d,c,t1.d) then 17 else t1.f end when f=f then d else -b end from t1 where c>f),e) from t1 where not exists(select 1 from t1 where t1.d<=c) and 19=(11) and t1.e<>19),a))} +} {} +do_test randexpr-2.957 { + db eval {SELECT case when f-t1.b*coalesce((select max(11) from t1 where exists(select 1 from t1 where (abs(t1.b)/abs(13-11))=(19+c))),coalesce((select t1.c | f from t1 where (abs(c*19)/abs(17))=t1.e),19))-e between t1.f and -((19)) and 13 not between t1.c and 11 and not exists(select 1 from t1 where t1.b>b) then t1.d when t1.c>=13 then c else f end FROM t1 WHERE e-t1.c=+e*~coalesce((select max(t1.f) from t1 where (t1.f in (coalesce((select max(11) from t1 where -13<>(13)-(select (count(*)) from t1)*t1.f and ( -11+t1.b+b*b-(select max(13) from t1)) in (t1.f,a,f)),d),17,13))),t1.f)-e} +} {} +do_test randexpr-2.958 { + db eval {SELECT case when f-t1.b*coalesce((select max(11) from t1 where exists(select 1 from t1 where (abs(t1.b)/abs(13-11))=(19+c))),coalesce((select t1.c | f from t1 where (abs(c*19)/abs(17))=t1.e),19))-e between t1.f and -((19)) and 13 not between t1.c and 11 and not exists(select 1 from t1 where t1.b>b) then t1.d when t1.c>=13 then c else f end FROM t1 WHERE NOT (e-t1.c=+e*~coalesce((select max(t1.f) from t1 where (t1.f in (coalesce((select max(11) from t1 where -13<>(13)-(select (count(*)) from t1)*t1.f and ( -11+t1.b+b*b-(select max(13) from t1)) in (t1.f,a,f)),d),17,13))),t1.f)-e)} +} {300} +do_test randexpr-2.959 { + db eval {SELECT case when f-t1.b*coalesce((select max(11) from t1 where exists(select 1 from t1 where (abs(t1.b)/abs(13-11))=(19+c))),coalesce((select t1.c & f from t1 where (abs(c*19)/abs(17))=t1.e),19))-e between t1.f and -((19)) and 13 not between t1.c and 11 and not exists(select 1 from t1 where t1.b>b) then t1.d when t1.c>=13 then c else f end FROM t1 WHERE NOT (e-t1.c=+e*~coalesce((select max(t1.f) from t1 where (t1.f in (coalesce((select max(11) from t1 where -13<>(13)-(select (count(*)) from t1)*t1.f and ( -11+t1.b+b*b-(select max(13) from t1)) in (t1.f,a,f)),d),17,13))),t1.f)-e)} +} {300} +do_test randexpr-2.960 { + db eval {SELECT t1.e+case when coalesce((select -(abs(t1.e)/abs(t1.c-t1.e)) from t1 where d | d between -t1.d and ~case t1.d when t1.f then ((abs((select ((min(b)+abs(cast(avg(t1.a) AS integer)*(max(19))))-(cast(avg(t1.d) AS integer))) from t1)*~f)/abs(t1.a*t1.c)))*17 else 19 end),f)- -c<>t1.c then f else t1.f end FROM t1 WHERE 19*coalesce((select max(a) from t1 where not (select count(distinct ~t1.b | t1.d) from t1)t1.e then c when -b<11 then t1.c else -t1.e end and not d in ( -t1.d,t1.a,f) and c=t1.f and f<>t1.a),t1.b) between t1.a and b} +} {} +do_test randexpr-2.961 { + db eval {SELECT t1.e+case when coalesce((select -(abs(t1.e)/abs(t1.c-t1.e)) from t1 where d | d between -t1.d and ~case t1.d when t1.f then ((abs((select ((min(b)+abs(cast(avg(t1.a) AS integer)*(max(19))))-(cast(avg(t1.d) AS integer))) from t1)*~f)/abs(t1.a*t1.c)))*17 else 19 end),f)- -c<>t1.c then f else t1.f end FROM t1 WHERE NOT (19*coalesce((select max(a) from t1 where not (select count(distinct ~t1.b | t1.d) from t1)t1.e then c when -b<11 then t1.c else -t1.e end and not d in ( -t1.d,t1.a,f) and c=t1.f and f<>t1.a),t1.b) between t1.a and b)} +} {1100} +do_test randexpr-2.962 { + db eval {SELECT t1.e+case when coalesce((select -(abs(t1.e)/abs(t1.c-t1.e)) from t1 where d & d between -t1.d and ~case t1.d when t1.f then ((abs((select ((min(b)+abs(cast(avg(t1.a) AS integer)*(max(19))))-(cast(avg(t1.d) AS integer))) from t1)*~f)/abs(t1.a*t1.c)))*17 else 19 end),f)- -c<>t1.c then f else t1.f end FROM t1 WHERE NOT (19*coalesce((select max(a) from t1 where not (select count(distinct ~t1.b | t1.d) from t1)t1.e then c when -b<11 then t1.c else -t1.e end and not d in ( -t1.d,t1.a,f) and c=t1.f and f<>t1.a),t1.b) between t1.a and b)} +} {1100} +do_test randexpr-2.963 { + db eval {SELECT t1.b*coalesce((select max(e) from t1 where ((t1.d))<=case case when +coalesce((select (abs(t1.a)/abs(c))-(abs( -e)/abs(11* -t1.b+t1.d* -c))-t1.c from t1 where not exists(select 1 from t1 where t1.c=(11))), -11) between 17 and t1.a then t1.a when t1.b>t1.e then (b) else b end-c when d then d else t1.f end),((t1.e))) FROM t1 WHERE ~ -case when not exists(select 1 from t1 where t1.b between b and 11 | d) then -(abs(17+d)/abs(e)) else case when not (t1.d>=case when case a when case t1.f when (e) then 17 else 19 end then 13 else 19 end in (t1.e,b,((t1.b))) then d when t1.d not in (19,t1.a, -c) then 11 else -a end) then 11 else c end end*17*t1.b not in (a,b,13)} +} {100000} +do_test randexpr-2.964 { + db eval {SELECT t1.b*coalesce((select max(e) from t1 where ((t1.d))<=case case when +coalesce((select (abs(t1.a)/abs(c))-(abs( -e)/abs(11* -t1.b+t1.d* -c))-t1.c from t1 where not exists(select 1 from t1 where t1.c=(11))), -11) between 17 and t1.a then t1.a when t1.b>t1.e then (b) else b end-c when d then d else t1.f end),((t1.e))) FROM t1 WHERE NOT (~ -case when not exists(select 1 from t1 where t1.b between b and 11 | d) then -(abs(17+d)/abs(e)) else case when not (t1.d>=case when case a when case t1.f when (e) then 17 else 19 end then 13 else 19 end in (t1.e,b,((t1.b))) then d when t1.d not in (19,t1.a, -c) then 11 else -a end) then 11 else c end end*17*t1.b not in (a,b,13))} +} {} +do_test randexpr-2.965 { + db eval {SELECT e*case +case when not (select max(17) from t1)>=case when not d=c+t1.c then 11 else 11 end or (c in (select t1.c from t1 union select c from t1) or 11<=t1.a) or d<=11 or 19 in ( -13,d,19) or a not between 13 and (t1.c) then case when t1.f<=(t1.a) then t1.e+13*e when (t1.b) not between f and a then c else 11 end*11 else a end when 19 then t1.a else 17 end FROM t1 WHERE b<>17} +} {8500} +do_test randexpr-2.966 { + db eval {SELECT e*case +case when not (select max(17) from t1)>=case when not d=c+t1.c then 11 else 11 end or (c in (select t1.c from t1 union select c from t1) or 11<=t1.a) or d<=11 or 19 in ( -13,d,19) or a not between 13 and (t1.c) then case when t1.f<=(t1.a) then t1.e+13*e when (t1.b) not between f and a then c else 11 end*11 else a end when 19 then t1.a else 17 end FROM t1 WHERE NOT (b<>17)} +} {} +do_test randexpr-2.967 { + db eval {SELECT (abs(case when (13+e in (select min(a) | case abs(min(case when t1.e in (select b from t1 union select a from t1) then t1.e when t1.b=t1.c then t1.b else c end-f)) when ~(cast(avg(c) AS integer)) then min(t1.e) else count(*) end from t1 union select min(f) from t1)) and t1.d not between c and 13 then coalesce((select t1.d from t1 where t1.e<>a),19)-t1.d else 19 end)/abs(c))-c+11+c FROM t1 WHERE t1.ea),19)-t1.d else 19 end)/abs(c))-c+11+c FROM t1 WHERE NOT (t1.ea),19)-t1.d else 19 end)/abs(c))-c+11+c FROM t1 WHERE NOT (t1.e=t1.f)} +} {600} +do_test randexpr-2.971 { + db eval {SELECT case when case when not exists(select 1 from t1 where (11 not between ~~t1.d-case c when coalesce((select case f when coalesce((select max(t1.f) from t1 where 11 not in (((13)),d,t1.d)),11) then f else a end from t1 where 11 not between (t1.a) and 17),t1.c)-c*17 then (17) else t1.b end+t1.d+t1.a and d)) then t1.e-11+19 else (d) end*d<=11 then t1.b else f end FROM t1 WHERE NOT (not exists(select 1 from t1 where d>=t1.f))} +} {} +do_test randexpr-2.972 { + db eval {SELECT 13 | t1.c+case when (not t1.d-c between +t1.b and -coalesce((select (select count(distinct 17) from t1)*a from t1 where -case when not t1.f in (select count(distinct 13) from t1 union select min( -t1.a) from t1) then case when (t1.e)< -11 then -c else t1.a end else d end-c*t1.a-d+ -e-t1.a not in (17,e,t1.a)),b) | t1.f) then c else 11 end FROM t1 WHERE case when (+e | (t1.e) | 11)<>b then t1.b when t1.e<>~(case when (t1.a*b) in (select d from t1 union select b from t1) or t1.c not in (t1.a,t1.d,13) and t1.f in (select case min(19) when min(17) then min(19) else count(distinct t1.e) end from t1 union select (count(*)) from t1) then t1.f else b end)+11 then b else 13 end+t1.d between b and 17} +} {} +do_test randexpr-2.973 { + db eval {SELECT 13 | t1.c+case when (not t1.d-c between +t1.b and -coalesce((select (select count(distinct 17) from t1)*a from t1 where -case when not t1.f in (select count(distinct 13) from t1 union select min( -t1.a) from t1) then case when (t1.e)< -11 then -c else t1.a end else d end-c*t1.a-d+ -e-t1.a not in (17,e,t1.a)),b) | t1.f) then c else 11 end FROM t1 WHERE NOT (case when (+e | (t1.e) | 11)<>b then t1.b when t1.e<>~(case when (t1.a*b) in (select d from t1 union select b from t1) or t1.c not in (t1.a,t1.d,13) and t1.f in (select case min(19) when min(17) then min(19) else count(distinct t1.e) end from t1 union select (count(*)) from t1) then t1.f else b end)+11 then b else 13 end+t1.d between b and 17)} +} {605} +do_test randexpr-2.974 { + db eval {SELECT 13 & t1.c+case when (not t1.d-c between +t1.b and -coalesce((select (select count(distinct 17) from t1)*a from t1 where -case when not t1.f in (select count(distinct 13) from t1 union select min( -t1.a) from t1) then case when (t1.e)< -11 then -c else t1.a end else d end-c*t1.a-d+ -e-t1.a not in (17,e,t1.a)),b) & t1.f) then c else 11 end FROM t1 WHERE NOT (case when (+e | (t1.e) | 11)<>b then t1.b when t1.e<>~(case when (t1.a*b) in (select d from t1 union select b from t1) or t1.c not in (t1.a,t1.d,13) and t1.f in (select case min(19) when min(17) then min(19) else count(distinct t1.e) end from t1 union select (count(*)) from t1) then t1.f else b end)+11 then b else 13 end+t1.d between b and 17)} +} {8} +do_test randexpr-2.975 { + db eval {SELECT case when case when t1.a<>t1.d then d*t1.a*11-e-(+11+d)-e else (select case -count(distinct t1.e) when cast(avg(b) AS integer) then count(distinct t1.d) else cast(avg(13) AS integer) end*cast(avg(19) AS integer) from t1) end<>case e when e then t1.e else (t1.f) end or not exists(select 1 from t1 where 19=f) and -t1.d>t1.e then t1.e+e*e when 11=t1.c then e else c end FROM t1 WHERE ~c>(case when t1.e<=e then case when case when 17+f in (d,t1.d | t1.c,t1.b) and 13<>19 or not t1.ct1.d then d*t1.a*11-e-(+11+d)-e else (select case -count(distinct t1.e) when cast(avg(b) AS integer) then count(distinct t1.d) else cast(avg(13) AS integer) end*cast(avg(19) AS integer) from t1) end<>case e when e then t1.e else (t1.f) end or not exists(select 1 from t1 where 19=f) and -t1.d>t1.e then t1.e+e*e when 11=t1.c then e else c end FROM t1 WHERE NOT (~c>(case when t1.e<=e then case when case when 17+f in (d,t1.d | t1.c,t1.b) and 13<>19 or not t1.c=t1.f))) and t1.c not in (17,b,e) then a else (select count(distinct t1.d) from t1) end FROM t1 WHERE (select (count(*)) from t1) between 11 and t1.d} +} {} +do_test randexpr-2.981 { + db eval {SELECT ~case when exists(select 1 from t1 where ((coalesce((select d from t1 where t1.e-f | +t1.d between t1.c and t1.a or exists(select 1 from t1 where 19 in (11,11,13)) and 17 in (select case min(d) when count(*) then (min(13)) else -(cast(avg( -c) AS integer)) end*count(distinct 19) from t1 union select count(distinct e) from t1)),f) | c*b<= -b or not not t1.c<13 or (t1.f)>=t1.f))) and t1.c not in (17,b,e) then a else (select count(distinct t1.d) from t1) end FROM t1 WHERE NOT ((select (count(*)) from t1) between 11 and t1.d)} +} {-101} +do_test randexpr-2.982 { + db eval {SELECT ~case when exists(select 1 from t1 where ((coalesce((select d from t1 where t1.e-f & +t1.d between t1.c and t1.a or exists(select 1 from t1 where 19 in (11,11,13)) and 17 in (select case min(d) when count(*) then (min(13)) else -(cast(avg( -c) AS integer)) end*count(distinct 19) from t1 union select count(distinct e) from t1)),f) & c*b<= -b or not not t1.c<13 or (t1.f)>=t1.f))) and t1.c not in (17,b,e) then a else (select count(distinct t1.d) from t1) end FROM t1 WHERE NOT ((select (count(*)) from t1) between 11 and t1.d)} +} {-101} +do_test randexpr-2.983 { + db eval {SELECT coalesce((select max(b) from t1 where not not c*t1.e>=19 or case when 17<=t1.b and case when 13 not between d and -f then case e when 11 then case when e in (select ( -d) from t1 union select e from t1) and 19<=(a) then t1.a else (19) end else f end else t1.a end+t1.a<19 then ~d when e in (a,11, -b) then b else e end not between f and 13),a) FROM t1 WHERE t1.a>=+a} +} {200} +do_test randexpr-2.984 { + db eval {SELECT coalesce((select max(b) from t1 where not not c*t1.e>=19 or case when 17<=t1.b and case when 13 not between d and -f then case e when 11 then case when e in (select ( -d) from t1 union select e from t1) and 19<=(a) then t1.a else (19) end else f end else t1.a end+t1.a<19 then ~d when e in (a,11, -b) then b else e end not between f and 13),a) FROM t1 WHERE NOT (t1.a>=+a)} +} {} +do_test randexpr-2.985 { + db eval {SELECT case when case when 11 between a+a+t1.b and b then c*e-t1.c*t1.a when +b<= -t1.c-t1.e-19*t1.b+c*t1.b then e else d end-(11)-e=11) or t1.c not between t1.d and - -11 then t1.a when 11 not between -t1.e and t1.a then 11 else t1.b end when (d not in (a,c,19)) then f else c end*17 from t1 where -11<=t1.c),t1.c)+f not in (t1.f, -d,19))} +} {} +do_test randexpr-2.988 { + db eval {SELECT c-case 17 when c+b-t1.a-a*coalesce((select max(t1.c) from t1 where t1.b- -(abs(13)/abs(t1.e)) in (case t1.f when f then coalesce((select max(coalesce((select 13 from t1 where exists(select 1 from t1 where f not between e and c)),e)) from t1 where d not between t1.a and 13),d)-b else a end, -13,(t1.f)) and 13 between e and 17),a) | a then t1.d else e end+ -t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where coalesce((select -+t1.c | case when a in (select case max(+b) when ~count(*) then count(*) else (abs((max(f)))) end from t1 union select -max(e) from t1) then case when (t1.d>=11) or t1.c not between t1.d and - -11 then t1.a when 11 not between -t1.e and t1.a then 11 else t1.b end when (d not in (a,c,19)) then f else c end*17 from t1 where -11<=t1.c),t1.c)+f not in (t1.f, -d,19)))} +} {-700} +do_test randexpr-2.989 { + db eval {SELECT c-case 17 when c+b-t1.a-a*coalesce((select max(t1.c) from t1 where t1.b- -(abs(13)/abs(t1.e)) in (case t1.f when f then coalesce((select max(coalesce((select 13 from t1 where exists(select 1 from t1 where f not between e and c)),e)) from t1 where d not between t1.a and 13),d)-b else a end, -13,(t1.f)) and 13 between e and 17),a) & a then t1.d else e end+ -t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where coalesce((select -+t1.c | case when a in (select case max(+b) when ~count(*) then count(*) else (abs((max(f)))) end from t1 union select -max(e) from t1) then case when (t1.d>=11) or t1.c not between t1.d and - -11 then t1.a when 11 not between -t1.e and t1.a then 11 else t1.b end when (d not in (a,c,19)) then f else c end*17 from t1 where -11<=t1.c),t1.c)+f not in (t1.f, -d,19)))} +} {-700} +do_test randexpr-2.990 { + db eval {SELECT (select ( -count(distinct case case a when e then c*case when e<=t1.d+b then a when (abs(t1.f)/abs(case when b-coalesce((select max(t1.f-t1.b) from t1 where not exists(select 1 from t1 where 17 in (b,13,(b)) or b in (d,c,t1.e))),t1.b) not in (d,((c)),t1.b) then -19 when c between a and 13 then 19 else t1.f end))=13 then f else 13 end else 17 end when 13 then t1.f else t1.b end-t1.c)) from t1) FROM t1 WHERE coalesce((select max(t1.d) from t1 where t1.c<>t1.c-13),t1.a)>=d or b*coalesce((select max((d)) from t1 where c not between +coalesce((select max(13) from t1 where e between -(abs(( -coalesce((select max(case f when t1.f then e else -t1.c end) from t1 where 11>e and t1.bt1.c-13),t1.a)>=d or b*coalesce((select max((d)) from t1 where c not between +coalesce((select max(13) from t1 where e between -(abs(( -coalesce((select max(case f when t1.f then e else -t1.c end) from t1 where 11>e and t1.bb then t1.a else e end | f from t1 where 13 in (select abs(~max((17))) from t1 union select cast(avg(t1.b) AS integer)-min(19) from t1)), -t1.a) when c+case ~e+~t1.d | 13 when case when t1.c<=t1.c or (11)=t1.a then 11 when -11 in (17,11, -d) then t1.a else t1.b end then 19 else 13 end* -t1.c} +} {100500} +do_test randexpr-2.993 { + db eval {SELECT case when (11=(t1.b)) then t1.c-coalesce((select case when t1.f*c-b<>b then t1.a else e end | f from t1 where 13 in (select abs(~max((17))) from t1 union select cast(avg(t1.b) AS integer)-min(19) from t1)), -t1.a) when c+case ~e+~t1.d | 13 when case when t1.c<=t1.c or (11)=t1.a then 11 when -11 in (17,11, -d) then t1.a else t1.b end then 19 else 13 end* -t1.c)} +} {} +do_test randexpr-2.994 { + db eval {SELECT case when (11=(t1.b)) then t1.c-coalesce((select case when t1.f*c-b<>b then t1.a else e end & f from t1 where 13 in (select abs(~max((17))) from t1 union select cast(avg(t1.b) AS integer)-min(19) from t1)), -t1.a) when c+case ~e+~t1.d | 13 when case when t1.c<=t1.c or (11)=t1.a then 11 when -11 in (17,11, -d) then t1.a else t1.b end then 19 else 13 end* -t1.c} +} {100500} +do_test randexpr-2.995 { + db eval {SELECT case when (f in (select count(distinct 17) from t1 union select count(distinct t1.b) from t1)) then d when e in (coalesce((select max(e+e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where 11-t1.a in (select case -max(11)-max(b) when min(t1.f) then count(distinct f) else count(distinct t1.c) end | count(*) from t1 union select min( -a) from t1)) and t1.e in (select count(distinct -t1.e) from t1 union select count(distinct a) from t1) and d not in (b,f,t1.d)) and a between t1.c and t1.e),f),c,11) then c else t1.f end | a FROM t1 WHERE coalesce((select a from t1 where t1.a | (17-coalesce((select t1.b from t1 where ~t1.f | t1.e=d and a<=19+(select (cast(avg(c) AS integer)) from t1)),t1.c))+t1.e*t1.d+t1.c not between t1.d and 19), -t1.d) in (select (count(*)) from t1 union select (++~min(c)*+max(t1.a)+cast(avg( -(t1.e)) AS integer)) from t1)} +} {} +do_test randexpr-2.996 { + db eval {SELECT case when (f in (select count(distinct 17) from t1 union select count(distinct t1.b) from t1)) then d when e in (coalesce((select max(e+e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where 11-t1.a in (select case -max(11)-max(b) when min(t1.f) then count(distinct f) else count(distinct t1.c) end | count(*) from t1 union select min( -a) from t1)) and t1.e in (select count(distinct -t1.e) from t1 union select count(distinct a) from t1) and d not in (b,f,t1.d)) and a between t1.c and t1.e),f),c,11) then c else t1.f end | a FROM t1 WHERE NOT (coalesce((select a from t1 where t1.a | (17-coalesce((select t1.b from t1 where ~t1.f | t1.e=d and a<=19+(select (cast(avg(c) AS integer)) from t1)),t1.c))+t1.e*t1.d+t1.c not between t1.d and 19), -t1.d) in (select (count(*)) from t1 union select (++~min(c)*+max(t1.a)+cast(avg( -(t1.e)) AS integer)) from t1))} +} {636} +do_test randexpr-2.997 { + db eval {SELECT case when (f in (select count(distinct 17) from t1 union select count(distinct t1.b) from t1)) then d when e in (coalesce((select max(e+e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where 11-t1.a in (select case -max(11)-max(b) when min(t1.f) then count(distinct f) else count(distinct t1.c) end & count(*) from t1 union select min( -a) from t1)) and t1.e in (select count(distinct -t1.e) from t1 union select count(distinct a) from t1) and d not in (b,f,t1.d)) and a between t1.c and t1.e),f),c,11) then c else t1.f end & a FROM t1 WHERE NOT (coalesce((select a from t1 where t1.a | (17-coalesce((select t1.b from t1 where ~t1.f | t1.e=d and a<=19+(select (cast(avg(c) AS integer)) from t1)),t1.c))+t1.e*t1.d+t1.c not between t1.d and 19), -t1.d) in (select (count(*)) from t1 union select (++~min(c)*+max(t1.a)+cast(avg( -(t1.e)) AS integer)) from t1))} +} {64} +do_test randexpr-2.998 { + db eval {SELECT case when coalesce((select t1.b from t1 where a not in (19,coalesce((select max(t1.f) from t1 where (select count(distinct t1.b) from t1) in (t1.e,t1.f*13-11,d)),(t1.f))-17*c,c) or t1.c<=11),19)>13 and f not in (t1.a,11,t1.d) then (((e))) when f=11 or f not in (a,19,17) then 13 else e end FROM t1 WHERE c in (select min(t1.a*a+(select count(distinct t1.b+(t1.a)- - -c) from t1)-t1.e) from t1 union select count(*) from t1) and c in (select min(t1.d) from t1 union select cast(avg(d) AS integer)+abs(case count(*) when count(distinct 11) then -count(distinct 19) else count(distinct (t1.b)) end) | cast(avg((t1.c)) AS integer)+count(*) from t1) and e=13 and t1.d>c or 13 not in (17,(t1.f),t1.a) or (11)=t1.b and t1.e>=a or 11<=t1.b or d<=17} +} {500} +do_test randexpr-2.999 { + db eval {SELECT case when coalesce((select t1.b from t1 where a not in (19,coalesce((select max(t1.f) from t1 where (select count(distinct t1.b) from t1) in (t1.e,t1.f*13-11,d)),(t1.f))-17*c,c) or t1.c<=11),19)>13 and f not in (t1.a,11,t1.d) then (((e))) when f=11 or f not in (a,19,17) then 13 else e end FROM t1 WHERE NOT (c in (select min(t1.a*a+(select count(distinct t1.b+(t1.a)- - -c) from t1)-t1.e) from t1 union select count(*) from t1) and c in (select min(t1.d) from t1 union select cast(avg(d) AS integer)+abs(case count(*) when count(distinct 11) then -count(distinct 19) else count(distinct (t1.b)) end) | cast(avg((t1.c)) AS integer)+count(*) from t1) and e=13 and t1.d>c or 13 not in (17,(t1.f),t1.a) or (11)=t1.b and t1.e>=a or 11<=t1.b or d<=17)} +} {} +do_test randexpr-2.1000 { + db eval {SELECT case when t1.d not between (abs(d)/abs(t1.e))+t1.f*t1.a*a and 17*e then case when 11 between a and coalesce((select max(t1.b) from t1 where (case when t1.e+(t1.b) not in (t1.e,d,f) or 13<>17 then 17 when a>e then 11 else a end in (select count(*) from t1 union select min(a) from t1))),17) then 19 when -11 in (select t1.c from t1 union select -19 from t1) or - - -t1.e=t1.a then 17 else t1.a end else t1.e end FROM t1 WHERE (select +count(distinct f) from t1)-t1.f between t1.f and c} +} {} +do_test randexpr-2.1001 { + db eval {SELECT case when t1.d not between (abs(d)/abs(t1.e))+t1.f*t1.a*a and 17*e then case when 11 between a and coalesce((select max(t1.b) from t1 where (case when t1.e+(t1.b) not in (t1.e,d,f) or 13<>17 then 17 when a>e then 11 else a end in (select count(*) from t1 union select min(a) from t1))),17) then 19 when -11 in (select t1.c from t1 union select -19 from t1) or - - -t1.e=t1.a then 17 else t1.a end else t1.e end FROM t1 WHERE NOT ((select +count(distinct f) from t1)-t1.f between t1.f and c)} +} {100} +do_test randexpr-2.1002 { + db eval {SELECT case when (case when e between t1.b and c+(select cast(avg( -case c when a then t1.f+c else 17 end) AS integer) from t1) then t1.e when not 19>=t1.a then 17 else d end in (select cast(avg(13) AS integer)-max(11) | +count(*) from t1 union select case (cast(avg(d) AS integer)) when min(a) then abs( -max(19)) else count(*) end from t1)) then (t1.a) else c end | -e*a FROM t1 WHERE f*t1.d*(abs(case d when coalesce((select max((abs(t1.e)/abs(t1.f))) from t1 where ((e between (b) and 13) or t1.d=(c)) and t1.f between 17 and t1.e),a) then -b else -e end+ -t1.e)/abs(t1.c))+f<=17 or t1.f not between f and t1.e or (t1.c in (select 17 from t1 union select f from t1)) or exists(select 1 from t1 where (a not in (d,t1.a,t1.e) and t1.a in (t1.a,c,19))) and f not in (t1.d,t1.c,c)} +} {-49732} +do_test randexpr-2.1003 { + db eval {SELECT case when (case when e between t1.b and c+(select cast(avg( -case c when a then t1.f+c else 17 end) AS integer) from t1) then t1.e when not 19>=t1.a then 17 else d end in (select cast(avg(13) AS integer)-max(11) | +count(*) from t1 union select case (cast(avg(d) AS integer)) when min(a) then abs( -max(19)) else count(*) end from t1)) then (t1.a) else c end | -e*a FROM t1 WHERE NOT (f*t1.d*(abs(case d when coalesce((select max((abs(t1.e)/abs(t1.f))) from t1 where ((e between (b) and 13) or t1.d=(c)) and t1.f between 17 and t1.e),a) then -b else -e end+ -t1.e)/abs(t1.c))+f<=17 or t1.f not between f and t1.e or (t1.c in (select 17 from t1 union select f from t1)) or exists(select 1 from t1 where (a not in (d,t1.a,t1.e) and t1.a in (t1.a,c,19))) and f not in (t1.d,t1.c,c))} +} {} +do_test randexpr-2.1004 { + db eval {SELECT case when (case when e between t1.b and c+(select cast(avg( -case c when a then t1.f+c else 17 end) AS integer) from t1) then t1.e when not 19>=t1.a then 17 else d end in (select cast(avg(13) AS integer)-max(11) & +count(*) from t1 union select case (cast(avg(d) AS integer)) when min(a) then abs( -max(19)) else count(*) end from t1)) then (t1.a) else c end & -e*a FROM t1 WHERE f*t1.d*(abs(case d when coalesce((select max((abs(t1.e)/abs(t1.f))) from t1 where ((e between (b) and 13) or t1.d=(c)) and t1.f between 17 and t1.e),a) then -b else -e end+ -t1.e)/abs(t1.c))+f<=17 or t1.f not between f and t1.e or (t1.c in (select 17 from t1 union select f from t1)) or exists(select 1 from t1 where (a not in (d,t1.a,t1.e) and t1.a in (t1.a,c,19))) and f not in (t1.d,t1.c,c)} +} {32} +do_test randexpr-2.1005 { + db eval {SELECT -case coalesce((select case when f<>t1.d | c-(e)*t1.b and 17 in (select t1.b from t1 union select -(t1.c) from t1) or t1.e>=11 and t1.f not between b and f or e not in (11,19,t1.e) and t1.c<=a then coalesce((select max(t1.f+t1.f) from t1 where -t1.a=13),e) when t1.e then c else t1.a end FROM t1 WHERE coalesce((select t1.f from t1 where (t1.e>=case (abs(11)/abs(t1.a)) when 11 then case t1.c when t1.a then t1.e else a end else t1.f end+t1.d) and (exists(select 1 from t1 where t1.b not in (t1.b,a,19))) or t1.a>=11),(select abs((~cast(avg(t1.b) AS integer)-~cast(avg((t1.b)) AS integer)* -count(*))+ -count(distinct t1.b)) from t1)*19)<=f and b in (select count(*) from t1 union select -min(d) from t1)} +} {} +do_test randexpr-2.1006 { + db eval {SELECT -case coalesce((select case when f<>t1.d | c-(e)*t1.b and 17 in (select t1.b from t1 union select -(t1.c) from t1) or t1.e>=11 and t1.f not between b and f or e not in (11,19,t1.e) and t1.c<=a then coalesce((select max(t1.f+t1.f) from t1 where -t1.a=13),e) when t1.e then c else t1.a end FROM t1 WHERE NOT (coalesce((select t1.f from t1 where (t1.e>=case (abs(11)/abs(t1.a)) when 11 then case t1.c when t1.a then t1.e else a end else t1.f end+t1.d) and (exists(select 1 from t1 where t1.b not in (t1.b,a,19))) or t1.a>=11),(select abs((~cast(avg(t1.b) AS integer)-~cast(avg((t1.b)) AS integer)* -count(*))+ -count(distinct t1.b)) from t1)*19)<=f and b in (select count(*) from t1 union select -min(d) from t1))} +} {-100} +do_test randexpr-2.1007 { + db eval {SELECT -case coalesce((select case when f<>t1.d & c-(e)*t1.b and 17 in (select t1.b from t1 union select -(t1.c) from t1) or t1.e>=11 and t1.f not between b and f or e not in (11,19,t1.e) and t1.c<=a then coalesce((select max(t1.f+t1.f) from t1 where -t1.a=13),e) when t1.e then c else t1.a end FROM t1 WHERE NOT (coalesce((select t1.f from t1 where (t1.e>=case (abs(11)/abs(t1.a)) when 11 then case t1.c when t1.a then t1.e else a end else t1.f end+t1.d) and (exists(select 1 from t1 where t1.b not in (t1.b,a,19))) or t1.a>=11),(select abs((~cast(avg(t1.b) AS integer)-~cast(avg((t1.b)) AS integer)* -count(*))+ -count(distinct t1.b)) from t1)*19)<=f and b in (select count(*) from t1 union select -min(d) from t1))} +} {-100} +do_test randexpr-2.1008 { + db eval {SELECT case when exists(select 1 from t1 where exists(select 1 from t1 where -case when t1.d<=coalesce((select t1.d | (f)*t1.a from t1 where not exists(select 1 from t1 where t1.f not in ((e),t1.e,13) or t1.a not between a and -t1.d or (f) not between f and 13)),19) then e when (t1.f)<19 or e<=d then b else d end in (select case ~ -(cast(avg(b) AS integer)*cast(avg(t1.b) AS integer)*min(t1.f)) | count(*) when min(t1.c) then ( - -count(distinct -d)) else count(*) end from t1 union select cast(avg(f) AS integer) from t1) and bt1.b or 19<>t1.f} +} {400} +do_test randexpr-2.1009 { + db eval {SELECT case when exists(select 1 from t1 where exists(select 1 from t1 where -case when t1.d<=coalesce((select t1.d | (f)*t1.a from t1 where not exists(select 1 from t1 where t1.f not in ((e),t1.e,13) or t1.a not between a and -t1.d or (f) not between f and 13)),19) then e when (t1.f)<19 or e<=d then b else d end in (select case ~ -(cast(avg(b) AS integer)*cast(avg(t1.b) AS integer)*min(t1.f)) | count(*) when min(t1.c) then ( - -count(distinct -d)) else count(*) end from t1 union select cast(avg(f) AS integer) from t1) and bt1.b or 19<>t1.f)} +} {} +do_test randexpr-2.1010 { + db eval {SELECT case when exists(select 1 from t1 where exists(select 1 from t1 where -case when t1.d<=coalesce((select t1.d & (f)*t1.a from t1 where not exists(select 1 from t1 where t1.f not in ((e),t1.e,13) or t1.a not between a and -t1.d or (f) not between f and 13)),19) then e when (t1.f)<19 or e<=d then b else d end in (select case ~ -(cast(avg(b) AS integer)*cast(avg(t1.b) AS integer)*min(t1.f)) & count(*) when min(t1.c) then ( - -count(distinct -d)) else count(*) end from t1 union select cast(avg(f) AS integer) from t1) and bt1.b or 19<>t1.f} +} {400} +do_test randexpr-2.1011 { + db eval {SELECT case when ~+11<>f then case e when 11 then coalesce((select case d when t1.e-coalesce((select a+t1.c from t1 where (d) in (select +min(t1.f)+count(*) from t1 union select count(distinct t1.e) from t1)),t1.f) then t1.c else d end from t1 where t1.f in (select t1.f from t1 union select b from t1) or (t1.a not in ( -a,t1.a,t1.b))),t1.c) else t1.f end-t1.f when not exists(select 1 from t1 where b between f and t1.a) then t1.c else t1.f end FROM t1 WHERE t1.a<=coalesce((select max(b) from t1 where coalesce((select t1.c*case when coalesce((select max((select count(*) from t1)) from t1 where not not exists(select 1 from t1 where case t1.b | a when t1.f then c else 11 end<>f and 13>=13 and f between t1.b and c)),13)=case e when a then 19 else d end then t1.b when t1.a not in (a,t1.b,a) then e else b end-d from t1 where t1.f between t1.d and t1.e),e)>t1.f),17)+t1.f} +} {0} +do_test randexpr-2.1012 { + db eval {SELECT case when ~+11<>f then case e when 11 then coalesce((select case d when t1.e-coalesce((select a+t1.c from t1 where (d) in (select +min(t1.f)+count(*) from t1 union select count(distinct t1.e) from t1)),t1.f) then t1.c else d end from t1 where t1.f in (select t1.f from t1 union select b from t1) or (t1.a not in ( -a,t1.a,t1.b))),t1.c) else t1.f end-t1.f when not exists(select 1 from t1 where b between f and t1.a) then t1.c else t1.f end FROM t1 WHERE NOT (t1.a<=coalesce((select max(b) from t1 where coalesce((select t1.c*case when coalesce((select max((select count(*) from t1)) from t1 where not not exists(select 1 from t1 where case t1.b | a when t1.f then c else 11 end<>f and 13>=13 and f between t1.b and c)),13)=case e when a then 19 else d end then t1.b when t1.a not in (a,t1.b,a) then e else b end-d from t1 where t1.f between t1.d and t1.e),e)>t1.f),17)+t1.f)} +} {} +do_test randexpr-2.1013 { + db eval {SELECT (abs(d)/abs(t1.f++(select cast(avg(coalesce((select max(b) from t1 where case when t1.a*+(abs( -t1.b)/abs((select cast(avg(13) AS integer) from t1)))<=(t1.c-t1.d- -t1.c) then case d when 17 then -t1.d else f end when t1.b between t1.b and t1.b then a else t1.d end in (select count(*) from t1 union select abs(cast(avg(13) AS integer)) | min(t1.c) from t1) or ((13))>=b),17)) AS integer) from t1)+13)) FROM t1 WHERE coalesce((select +t1.a from t1 where not exists(select 1 from t1 where (select cast(avg(13) AS integer) from t1)<=(abs( -11)/abs(t1.f))*coalesce((select t1.e from t1 where not t1.f in (select max(+b-t1.d) from t1 union select + -(abs(cast(avg((t1.b)) AS integer)))-+count(*)-(min(13)) from t1)),d+t1.d*t1.d) | (17)-t1.b)),f) in (select max(t1.c) from t1 union select count(distinct f) from t1)} +} {} +do_test randexpr-2.1014 { + db eval {SELECT (abs(d)/abs(t1.f++(select cast(avg(coalesce((select max(b) from t1 where case when t1.a*+(abs( -t1.b)/abs((select cast(avg(13) AS integer) from t1)))<=(t1.c-t1.d- -t1.c) then case d when 17 then -t1.d else f end when t1.b between t1.b and t1.b then a else t1.d end in (select count(*) from t1 union select abs(cast(avg(13) AS integer)) | min(t1.c) from t1) or ((13))>=b),17)) AS integer) from t1)+13)) FROM t1 WHERE NOT (coalesce((select +t1.a from t1 where not exists(select 1 from t1 where (select cast(avg(13) AS integer) from t1)<=(abs( -11)/abs(t1.f))*coalesce((select t1.e from t1 where not t1.f in (select max(+b-t1.d) from t1 union select + -(abs(cast(avg((t1.b)) AS integer)))-+count(*)-(min(13)) from t1)),d+t1.d*t1.d) | (17)-t1.b)),f) in (select max(t1.c) from t1 union select count(distinct f) from t1))} +} {0} +do_test randexpr-2.1015 { + db eval {SELECT (abs(d)/abs(t1.f++(select cast(avg(coalesce((select max(b) from t1 where case when t1.a*+(abs( -t1.b)/abs((select cast(avg(13) AS integer) from t1)))<=(t1.c-t1.d- -t1.c) then case d when 17 then -t1.d else f end when t1.b between t1.b and t1.b then a else t1.d end in (select count(*) from t1 union select abs(cast(avg(13) AS integer)) & min(t1.c) from t1) or ((13))>=b),17)) AS integer) from t1)+13)) FROM t1 WHERE NOT (coalesce((select +t1.a from t1 where not exists(select 1 from t1 where (select cast(avg(13) AS integer) from t1)<=(abs( -11)/abs(t1.f))*coalesce((select t1.e from t1 where not t1.f in (select max(+b-t1.d) from t1 union select + -(abs(cast(avg((t1.b)) AS integer)))-+count(*)-(min(13)) from t1)),d+t1.d*t1.d) | (17)-t1.b)),f) in (select max(t1.c) from t1 union select count(distinct f) from t1))} +} {0} +do_test randexpr-2.1016 { + db eval {SELECT case t1.a when (select ~max(coalesce((select (t1.b-a*+t1.c-t1.e) from t1 where not 11*t1.f++t1.f-t1.a*f*+t1.e*t1.b>17 and 19+e not in (t1.c,t1.b,a)),19)) from t1) then 19*11 else d end | (17) | t1.d FROM t1 WHERE exists(select 1 from t1 where coalesce((select max(13) from t1 where t1.e not in (d,13-19,17) or e*19=d or ((t1.a< -t1.a or 11- -t1.d-case when 13<( -t1.e) then a else e end*t1.b+t1.e in (select t1.d from t1 union select t1.e from t1) and 19<>d))),b)+11*11=t1.e)} +} {} +do_test randexpr-2.1017 { + db eval {SELECT case t1.a when (select ~max(coalesce((select (t1.b-a*+t1.c-t1.e) from t1 where not 11*t1.f++t1.f-t1.a*f*+t1.e*t1.b>17 and 19+e not in (t1.c,t1.b,a)),19)) from t1) then 19*11 else d end | (17) | t1.d FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select max(13) from t1 where t1.e not in (d,13-19,17) or e*19=d or ((t1.a< -t1.a or 11- -t1.d-case when 13<( -t1.e) then a else e end*t1.b+t1.e in (select t1.d from t1 union select t1.e from t1) and 19<>d))),b)+11*11=t1.e))} +} {401} +do_test randexpr-2.1018 { + db eval {SELECT case t1.a when (select ~max(coalesce((select (t1.b-a*+t1.c-t1.e) from t1 where not 11*t1.f++t1.f-t1.a*f*+t1.e*t1.b>17 and 19+e not in (t1.c,t1.b,a)),19)) from t1) then 19*11 else d end & (17) & t1.d FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select max(13) from t1 where t1.e not in (d,13-19,17) or e*19=d or ((t1.a< -t1.a or 11- -t1.d-case when 13<( -t1.e) then a else e end*t1.b+t1.e in (select t1.d from t1 union select t1.e from t1) and 19<>d))),b)+11*11=t1.e))} +} {16} +do_test randexpr-2.1019 { + db eval {SELECT t1.a-coalesce((select 11-case when 11<19-f+t1.a then a | (t1.a+++(select cast(avg((abs(t1.d | e)/abs((abs(t1.a)/abs(13))))) AS integer)-max(a) from t1)+e+13*19+t1.a-c) else 13 end+t1.e-19 from t1 where 13>t1.f),e) FROM t1 WHERE ~(abs((select max( -case when d+(11)*d between coalesce((select 11 from t1 where (t1.f)=e),d*f) and t1.e then d-case when a<> -17+(abs(t1.d)/abs(t1.f)) then t1.f else t1.e end*11+c-(c) else d end) from t1)*t1.e)/abs((t1.f)))-f<>11} +} {-400} +do_test randexpr-2.1020 { + db eval {SELECT t1.a-coalesce((select 11-case when 11<19-f+t1.a then a | (t1.a+++(select cast(avg((abs(t1.d | e)/abs((abs(t1.a)/abs(13))))) AS integer)-max(a) from t1)+e+13*19+t1.a-c) else 13 end+t1.e-19 from t1 where 13>t1.f),e) FROM t1 WHERE NOT (~(abs((select max( -case when d+(11)*d between coalesce((select 11 from t1 where (t1.f)=e),d*f) and t1.e then d-case when a<> -17+(abs(t1.d)/abs(t1.f)) then t1.f else t1.e end*11+c-(c) else d end) from t1)*t1.e)/abs((t1.f)))-f<>11)} +} {} +do_test randexpr-2.1021 { + db eval {SELECT t1.a-coalesce((select 11-case when 11<19-f+t1.a then a & (t1.a+++(select cast(avg((abs(t1.d & e)/abs((abs(t1.a)/abs(13))))) AS integer)-max(a) from t1)+e+13*19+t1.a-c) else 13 end+t1.e-19 from t1 where 13>t1.f),e) FROM t1 WHERE ~(abs((select max( -case when d+(11)*d between coalesce((select 11 from t1 where (t1.f)=e),d*f) and t1.e then d-case when a<> -17+(abs(t1.d)/abs(t1.f)) then t1.f else t1.e end*11+c-(c) else d end) from t1)*t1.e)/abs((t1.f)))-f<>11} +} {-400} +do_test randexpr-2.1022 { + db eval {SELECT d | (select (cast(avg((abs(case when b*(abs((abs(case f when 17 then t1.e else t1.c end)/abs( -d)))/abs(t1.c)) in (d,a,a) then 19 when (exists(select 1 from t1 where t1.c=t1.a) or a<>d) then a else c end)/abs(t1.e))-17) AS integer)* -min(19)-count(distinct a)-+count(distinct 17)*min(t1.a)+ -cast(avg(t1.a) AS integer)- -max(17) | max(e)) from t1) FROM t1 WHERE -f-t1.f++e+case when not (((select abs(count(*)) from t1)) not in ((case when (t1.b>t1.c) then t1.b else f end),e,13)) and not exists(select 1 from t1 where 11 in (select t1.f from t1 union select ((t1.f)) from t1)) then (t1.e)+c-c+c when -b in (13,19,t1.c) then 17 else 13 end-d | t1.a not in (a,b,b)} +} {511} +do_test randexpr-2.1023 { + db eval {SELECT d | (select (cast(avg((abs(case when b*(abs((abs(case f when 17 then t1.e else t1.c end)/abs( -d)))/abs(t1.c)) in (d,a,a) then 19 when (exists(select 1 from t1 where t1.c=t1.a) or a<>d) then a else c end)/abs(t1.e))-17) AS integer)* -min(19)-count(distinct a)-+count(distinct 17)*min(t1.a)+ -cast(avg(t1.a) AS integer)- -max(17) | max(e)) from t1) FROM t1 WHERE NOT ( -f-t1.f++e+case when not (((select abs(count(*)) from t1)) not in ((case when (t1.b>t1.c) then t1.b else f end),e,13)) and not exists(select 1 from t1 where 11 in (select t1.f from t1 union select ((t1.f)) from t1)) then (t1.e)+c-c+c when -b in (13,19,t1.c) then 17 else 13 end-d | t1.a not in (a,b,b))} +} {} +do_test randexpr-2.1024 { + db eval {SELECT d & (select (cast(avg((abs(case when b*(abs((abs(case f when 17 then t1.e else t1.c end)/abs( -d)))/abs(t1.c)) in (d,a,a) then 19 when (exists(select 1 from t1 where t1.c=t1.a) or a<>d) then a else c end)/abs(t1.e))-17) AS integer)* -min(19)-count(distinct a)-+count(distinct 17)*min(t1.a)+ -cast(avg(t1.a) AS integer)- -max(17) & max(e)) from t1) FROM t1 WHERE -f-t1.f++e+case when not (((select abs(count(*)) from t1)) not in ((case when (t1.b>t1.c) then t1.b else f end),e,13)) and not exists(select 1 from t1 where 11 in (select t1.f from t1 union select ((t1.f)) from t1)) then (t1.e)+c-c+c when -b in (13,19,t1.c) then 17 else 13 end-d | t1.a not in (a,b,b)} +} {128} +do_test randexpr-2.1025 { + db eval {SELECT 11-(select min((abs(e)/abs(f*t1.e*c+17+~case when f | 11 between (select case -max(19) when (cast(avg(t1.a) AS integer)) then count(distinct t1.f) else count(*) end from t1) and coalesce((select 13 from t1 where a<=(t1.d)),f)-a then t1.a when f in (select 17 from t1 union select -b from t1) and t1.d(17) or 19>=d then (f) else 11 end))) from t1) FROM t1 WHERE e in (e, -17*t1.c*17,e)} +} {11} +do_test randexpr-2.1026 { + db eval {SELECT 11-(select min((abs(e)/abs(f*t1.e*c+17+~case when f | 11 between (select case -max(19) when (cast(avg(t1.a) AS integer)) then count(distinct t1.f) else count(*) end from t1) and coalesce((select 13 from t1 where a<=(t1.d)),f)-a then t1.a when f in (select 17 from t1 union select -b from t1) and t1.d(17) or 19>=d then (f) else 11 end))) from t1) FROM t1 WHERE NOT (e in (e, -17*t1.c*17,e))} +} {} +do_test randexpr-2.1027 { + db eval {SELECT 11-(select min((abs(e)/abs(f*t1.e*c+17+~case when f & 11 between (select case -max(19) when (cast(avg(t1.a) AS integer)) then count(distinct t1.f) else count(*) end from t1) and coalesce((select 13 from t1 where a<=(t1.d)),f)-a then t1.a when f in (select 17 from t1 union select -b from t1) and t1.d(17) or 19>=d then (f) else 11 end))) from t1) FROM t1 WHERE e in (e, -17*t1.c*17,e)} +} {11} +do_test randexpr-2.1028 { + db eval {SELECT coalesce((select max( -t1.d-f+~t1.d*t1.b) from t1 where (select ~+min(t1.f)+abs(min(+(select cast(avg((b)) AS integer) from t1)-d*d))-max(f)-count(distinct 19) | count(*)*count(*) from t1)>c),case 13 when +t1.c-t1.c-t1.a then c else c end*d) FROM t1 WHERE ((t1.b>=case when not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where e>=t1.a or (coalesce((select max(coalesce((select case when a in (select t1.d from t1 union select t1.c from t1) then a when -b in (17,c,t1.f) then f else b end from t1 where not exists(select 1 from t1 where f<=13)),17)) from t1 where t1.f>=t1.b),d) between t1.f and 11) or t1.b between 13 and t1.b))) then a when e<=17 then t1.b+ -~11 else a end and not e>= - -b or 13 not in (c,f,11)) and d not between 11 and t1.a)} +} {-81200} +do_test randexpr-2.1029 { + db eval {SELECT coalesce((select max( -t1.d-f+~t1.d*t1.b) from t1 where (select ~+min(t1.f)+abs(min(+(select cast(avg((b)) AS integer) from t1)-d*d))-max(f)-count(distinct 19) | count(*)*count(*) from t1)>c),case 13 when +t1.c-t1.c-t1.a then c else c end*d) FROM t1 WHERE NOT (((t1.b>=case when not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where e>=t1.a or (coalesce((select max(coalesce((select case when a in (select t1.d from t1 union select t1.c from t1) then a when -b in (17,c,t1.f) then f else b end from t1 where not exists(select 1 from t1 where f<=13)),17)) from t1 where t1.f>=t1.b),d) between t1.f and 11) or t1.b between 13 and t1.b))) then a when e<=17 then t1.b+ -~11 else a end and not e>= - -b or 13 not in (c,f,11)) and d not between 11 and t1.a))} +} {} +do_test randexpr-2.1030 { + db eval {SELECT coalesce((select max( -t1.d-f+~t1.d*t1.b) from t1 where (select ~+min(t1.f)+abs(min(+(select cast(avg((b)) AS integer) from t1)-d*d))-max(f)-count(distinct 19) & count(*)*count(*) from t1)>c),case 13 when +t1.c-t1.c-t1.a then c else c end*d) FROM t1 WHERE ((t1.b>=case when not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where e>=t1.a or (coalesce((select max(coalesce((select case when a in (select t1.d from t1 union select t1.c from t1) then a when -b in (17,c,t1.f) then f else b end from t1 where not exists(select 1 from t1 where f<=13)),17)) from t1 where t1.f>=t1.b),d) between t1.f and 11) or t1.b between 13 and t1.b))) then a when e<=17 then t1.b+ -~11 else a end and not e>= - -b or 13 not in (c,f,11)) and d not between 11 and t1.a)} +} {120000} +do_test randexpr-2.1031 { + db eval {SELECT case when t1.d<>coalesce((select 13 from t1 where (d in (select case when (13<>t1.a*d-19-+11*13-t1.c) then case when t1.c=d then e else t1.b end when not exists(select 1 from t1 where 17<>t1.b) then t1.e else t1.e end | t1.a+t1.e from t1 union select t1.d from t1))),t1.f) or a> -t1.e then 11 else 17 end*d FROM t1 WHERE t1.a-d<=d-(abs(coalesce((select b from t1 where ~e=13 then t1.d else (t1.c) end not between t1.a and 13 then a when (17) not between 11 and t1.c then a else a end) not between 13 and t1.d then 13 else a end),19))/abs(t1.e))+t1.c+t1.e*b+c or ((17 in (select f from t1 union select t1.e from t1)))} +} {4400} +do_test randexpr-2.1032 { + db eval {SELECT case when t1.d<>coalesce((select 13 from t1 where (d in (select case when (13<>t1.a*d-19-+11*13-t1.c) then case when t1.c=d then e else t1.b end when not exists(select 1 from t1 where 17<>t1.b) then t1.e else t1.e end | t1.a+t1.e from t1 union select t1.d from t1))),t1.f) or a> -t1.e then 11 else 17 end*d FROM t1 WHERE NOT (t1.a-d<=d-(abs(coalesce((select b from t1 where ~e=13 then t1.d else (t1.c) end not between t1.a and 13 then a when (17) not between 11 and t1.c then a else a end) not between 13 and t1.d then 13 else a end),19))/abs(t1.e))+t1.c+t1.e*b+c or ((17 in (select f from t1 union select t1.e from t1))))} +} {} +do_test randexpr-2.1033 { + db eval {SELECT case when t1.d<>coalesce((select 13 from t1 where (d in (select case when (13<>t1.a*d-19-+11*13-t1.c) then case when t1.c=d then e else t1.b end when not exists(select 1 from t1 where 17<>t1.b) then t1.e else t1.e end & t1.a+t1.e from t1 union select t1.d from t1))),t1.f) or a> -t1.e then 11 else 17 end*d FROM t1 WHERE t1.a-d<=d-(abs(coalesce((select b from t1 where ~e=13 then t1.d else (t1.c) end not between t1.a and 13 then a when (17) not between 11 and t1.c then a else a end) not between 13 and t1.d then 13 else a end),19))/abs(t1.e))+t1.c+t1.e*b+c or ((17 in (select f from t1 union select t1.e from t1)))} +} {4400} +do_test randexpr-2.1034 { + db eval {SELECT coalesce((select (abs(c)/abs(case when 13<=11 then t1.a when case t1.d when (11) then coalesce((select coalesce((select d-17 from t1 where t1.e=a),11) from t1 where 13 in (select t1.e from t1 union select c from t1)),19) else d end>=t1.f or 11 in (select cast(avg(t1.f) AS integer) from t1 union select ( - -count(*)) from t1) and t1.c between 13 and -b then f else e end)) from t1 where d<=(19)),d)-t1.b FROM t1 WHERE 17>=case when t1.c not in (f,t1.a,c*17*a | t1.c-~t1.d+(+coalesce((select max(case when exists(select 1 from t1 where (t1.e in (select -t1.e from t1 union select t1.c from t1))) then t1.b else t1.b*t1.b end) from t1 where not exists(select 1 from t1 where 17=t1.a)),d))+t1.e | (13)*t1.e+c) and e in (select -c from t1 union select 17 from t1) then t1.d*13 else c end- -e} +} {} +do_test randexpr-2.1035 { + db eval {SELECT coalesce((select (abs(c)/abs(case when 13<=11 then t1.a when case t1.d when (11) then coalesce((select coalesce((select d-17 from t1 where t1.e=a),11) from t1 where 13 in (select t1.e from t1 union select c from t1)),19) else d end>=t1.f or 11 in (select cast(avg(t1.f) AS integer) from t1 union select ( - -count(*)) from t1) and t1.c between 13 and -b then f else e end)) from t1 where d<=(19)),d)-t1.b FROM t1 WHERE NOT (17>=case when t1.c not in (f,t1.a,c*17*a | t1.c-~t1.d+(+coalesce((select max(case when exists(select 1 from t1 where (t1.e in (select -t1.e from t1 union select t1.c from t1))) then t1.b else t1.b*t1.b end) from t1 where not exists(select 1 from t1 where 17=t1.a)),d))+t1.e | (13)*t1.e+c) and e in (select -c from t1 union select 17 from t1) then t1.d*13 else c end- -e)} +} {200} +do_test randexpr-2.1036 { + db eval {SELECT (abs(case when t1.a in (select case when d=(select ~cast(avg(t1.a) AS integer) | ~+min(17*(abs(19)/abs(t1.a))) from t1) or 13 in (select count(distinct t1.b | 11) from t1 union select count(*) | count(distinct c) | count(distinct 17) from t1) and not b<=17 then t1.a when 11=t1.f then (abs(t1.c)/abs(11)) else f end from t1 union select c from t1) then t1.c else a end)/abs(t1.d))-t1.d FROM t1 WHERE ((abs(e)/abs(t1.a* -(coalesce((select max(t1.b-t1.d) from t1 where f>=13),~a+ -a)) | case when not exists(select 1 from t1 where coalesce((select max(c) from t1 where 19-t1.a | e<=17),~( -t1.a)) in (select f from t1 union select t1.d from t1)) then t1.d else (t1.c) end)) in (17,f,t1.a)) or 19=13 or 19=17} +} {} +do_test randexpr-2.1037 { + db eval {SELECT (abs(case when t1.a in (select case when d=(select ~cast(avg(t1.a) AS integer) | ~+min(17*(abs(19)/abs(t1.a))) from t1) or 13 in (select count(distinct t1.b | 11) from t1 union select count(*) | count(distinct c) | count(distinct 17) from t1) and not b<=17 then t1.a when 11=t1.f then (abs(t1.c)/abs(11)) else f end from t1 union select c from t1) then t1.c else a end)/abs(t1.d))-t1.d FROM t1 WHERE NOT (((abs(e)/abs(t1.a* -(coalesce((select max(t1.b-t1.d) from t1 where f>=13),~a+ -a)) | case when not exists(select 1 from t1 where coalesce((select max(c) from t1 where 19-t1.a | e<=17),~( -t1.a)) in (select f from t1 union select t1.d from t1)) then t1.d else (t1.c) end)) in (17,f,t1.a)) or 19=13 or 19=17)} +} {-400} +do_test randexpr-2.1038 { + db eval {SELECT (abs(case when t1.a in (select case when d=(select ~cast(avg(t1.a) AS integer) & ~+min(17*(abs(19)/abs(t1.a))) from t1) or 13 in (select count(distinct t1.b & 11) from t1 union select count(*) & count(distinct c) & count(distinct 17) from t1) and not b<=17 then t1.a when 11=t1.f then (abs(t1.c)/abs(11)) else f end from t1 union select c from t1) then t1.c else a end)/abs(t1.d))-t1.d FROM t1 WHERE NOT (((abs(e)/abs(t1.a* -(coalesce((select max(t1.b-t1.d) from t1 where f>=13),~a+ -a)) | case when not exists(select 1 from t1 where coalesce((select max(c) from t1 where 19-t1.a | e<=17),~( -t1.a)) in (select f from t1 union select t1.d from t1)) then t1.d else (t1.c) end)) in (17,f,t1.a)) or 19=13 or 19=17)} +} {-400} +do_test randexpr-2.1039 { + db eval {SELECT case when f>coalesce((select c from t1 where coalesce((select max(c+c) from t1 where t1.f in (select - -abs(max(19))+(min(a)) from t1 union select count(distinct -t1.d) from t1)),e)>=e),t1.f) | t1.a | 19 or (t1.c) not in (19, - -t1.f,t1.c) or exists(select 1 from t1 where t1.b not between t1.a and -11) then coalesce((select -b from t1 where -t1.a in (select c from t1 union select 17 from t1)),t1.d) when t1.a=c then c else t1.e end FROM t1 WHERE t1.e in (select +cast(avg(f) AS integer) from t1 union select case +max(19) | count(*) when count(*)*+count(*) then ~+count(*) else count(*) end from t1)} +} {} +do_test randexpr-2.1040 { + db eval {SELECT case when f>coalesce((select c from t1 where coalesce((select max(c+c) from t1 where t1.f in (select - -abs(max(19))+(min(a)) from t1 union select count(distinct -t1.d) from t1)),e)>=e),t1.f) | t1.a | 19 or (t1.c) not in (19, - -t1.f,t1.c) or exists(select 1 from t1 where t1.b not between t1.a and -11) then coalesce((select -b from t1 where -t1.a in (select c from t1 union select 17 from t1)),t1.d) when t1.a=c then c else t1.e end FROM t1 WHERE NOT (t1.e in (select +cast(avg(f) AS integer) from t1 union select case +max(19) | count(*) when count(*)*+count(*) then ~+count(*) else count(*) end from t1))} +} {400} +do_test randexpr-2.1041 { + db eval {SELECT case when f>coalesce((select c from t1 where coalesce((select max(c+c) from t1 where t1.f in (select - -abs(max(19))+(min(a)) from t1 union select count(distinct -t1.d) from t1)),e)>=e),t1.f) & t1.a & 19 or (t1.c) not in (19, - -t1.f,t1.c) or exists(select 1 from t1 where t1.b not between t1.a and -11) then coalesce((select -b from t1 where -t1.a in (select c from t1 union select 17 from t1)),t1.d) when t1.a=c then c else t1.e end FROM t1 WHERE NOT (t1.e in (select +cast(avg(f) AS integer) from t1 union select case +max(19) | count(*) when count(*)*+count(*) then ~+count(*) else count(*) end from t1))} +} {400} +do_test randexpr-2.1042 { + db eval {SELECT +case t1.d when d then t1.f else +t1.b+(select -count(distinct coalesce((select max(c) from t1 where exists(select 1 from t1 where 17 not between d and e)),case when t1.f+f-~17<>d then case when exists(select 1 from t1 where t1.d=(13)) then t1.e when c=t1.e then -a else f end when not 17>11 then t1.b else e end))-max(t1.d) from t1)+ -e-(t1.d) end-11-b FROM t1 WHERE e-case +t1.e when e*e then t1.d else case (abs(case when (abs(case when t1.a+11+t1.a>11-t1.e then +t1.f when e<=a then (c) else b end)/abs(13))*b between b and t1.f then -a else t1.c end+d)/abs(a)) when d then 13 else t1.e end end+e+t1.d<=c} +} {} +do_test randexpr-2.1043 { + db eval {SELECT +case t1.d when d then t1.f else +t1.b+(select -count(distinct coalesce((select max(c) from t1 where exists(select 1 from t1 where 17 not between d and e)),case when t1.f+f-~17<>d then case when exists(select 1 from t1 where t1.d=(13)) then t1.e when c=t1.e then -a else f end when not 17>11 then t1.b else e end))-max(t1.d) from t1)+ -e-(t1.d) end-11-b FROM t1 WHERE NOT (e-case +t1.e when e*e then t1.d else case (abs(case when (abs(case when t1.a+11+t1.a>11-t1.e then +t1.f when e<=a then (c) else b end)/abs(13))*b between b and t1.f then -a else t1.c end+d)/abs(a)) when d then 13 else t1.e end end+e+t1.d<=c)} +} {389} +do_test randexpr-2.1044 { + db eval {SELECT + -(select count(*) from t1)*(t1.b)-a-t1.e+17*coalesce((select max(13) from t1 where not exists(select 1 from t1 where case when t1.ft1.d then 17 else -17 end* -(t1.f)=(t1.a))), -d)*t1.f*t1.a* -e FROM t1 WHERE not exists(select 1 from t1 where ~case when d+ -t1.c+11*t1.e-t1.c*a*(t1.b)<=~~t1.b then f else case b when t1.b*t1.c-t1.d then c-b else -t1.c end end-13+f>=case when d not between t1.c and (t1.b) then t1.e else t1.c end) or t1.d in (d,t1.c,t1.c)} +} {-6630000800} +do_test randexpr-2.1045 { + db eval {SELECT + -(select count(*) from t1)*(t1.b)-a-t1.e+17*coalesce((select max(13) from t1 where not exists(select 1 from t1 where case when t1.ft1.d then 17 else -17 end* -(t1.f)=(t1.a))), -d)*t1.f*t1.a* -e FROM t1 WHERE NOT (not exists(select 1 from t1 where ~case when d+ -t1.c+11*t1.e-t1.c*a*(t1.b)<=~~t1.b then f else case b when t1.b*t1.c-t1.d then c-b else -t1.c end end-13+f>=case when d not between t1.c and (t1.b) then t1.e else t1.c end) or t1.d in (d,t1.c,t1.c))} +} {} +do_test randexpr-2.1046 { + db eval {SELECT (abs((select -~~ -count(*)+++count(distinct c) from t1))/abs(coalesce((select e from t1 where (case when case when b not between case t1.d when f then -t1.c else t1.e end and c then d when b=19 then t1.c else d end in (select max(13) from t1 union select count(distinct b) from t1) and t1.d not in (a,t1.b,a) or -t1.d between 13 and e then 17+t1.e when 19 not between t1.f and 17 then t1.b else t1.a end in (17,(t1.e),t1.a))),b) | 17)) FROM t1 WHERE 11>t1.b} +} {} +do_test randexpr-2.1047 { + db eval {SELECT (abs((select -~~ -count(*)+++count(distinct c) from t1))/abs(coalesce((select e from t1 where (case when case when b not between case t1.d when f then -t1.c else t1.e end and c then d when b=19 then t1.c else d end in (select max(13) from t1 union select count(distinct b) from t1) and t1.d not in (a,t1.b,a) or -t1.d between 13 and e then 17+t1.e when 19 not between t1.f and 17 then t1.b else t1.a end in (17,(t1.e),t1.a))),b) | 17)) FROM t1 WHERE NOT (11>t1.b)} +} {0} +do_test randexpr-2.1048 { + db eval {SELECT coalesce((select max(t1.d) from t1 where coalesce((select e from t1 where case when (c)<>11+case f when t1.a then (select abs( -cast(avg(t1.c) AS integer)) | max((17)) | cast(avg(t1.d) AS integer)-count(distinct e) from t1) else 17*(abs(case t1.d when t1.b then a else b end)/abs(b)) end- -c-17+c*t1.c- -t1.d then t1.c else e end>=f),d)<>t1.a), -t1.f) FROM t1 WHERE not a<=17-t1.f} +} {400} +do_test randexpr-2.1049 { + db eval {SELECT coalesce((select max(t1.d) from t1 where coalesce((select e from t1 where case when (c)<>11+case f when t1.a then (select abs( -cast(avg(t1.c) AS integer)) | max((17)) | cast(avg(t1.d) AS integer)-count(distinct e) from t1) else 17*(abs(case t1.d when t1.b then a else b end)/abs(b)) end- -c-17+c*t1.c- -t1.d then t1.c else e end>=f),d)<>t1.a), -t1.f) FROM t1 WHERE NOT (not a<=17-t1.f)} +} {} +do_test randexpr-2.1050 { + db eval {SELECT coalesce((select max(t1.d) from t1 where coalesce((select e from t1 where case when (c)<>11+case f when t1.a then (select abs( -cast(avg(t1.c) AS integer)) & max((17)) & cast(avg(t1.d) AS integer)-count(distinct e) from t1) else 17*(abs(case t1.d when t1.b then a else b end)/abs(b)) end- -c-17+c*t1.c- -t1.d then t1.c else e end>=f),d)<>t1.a), -t1.f) FROM t1 WHERE not a<=17-t1.f} +} {400} +do_test randexpr-2.1051 { + db eval {SELECT coalesce((select coalesce((select ~d from t1 where t1.b-t1.c+13 not between t1.b and (abs(t1.a-case t1.c when t1.c | (select max((abs(case when (select count(*) from t1)<>(~t1.e-13*+d+d) then -t1.e else -t1.f end*13)/abs(f))-t1.f) from t1) then t1.f else t1.c end)/abs(c))),t1.e) from t1 where t1.f=f), - -11) FROM t1 WHERE case ((abs(t1.e)/abs(case b when ~17 then 19+(t1.d)+13 else case t1.a*17 when c then 19*case when c<>coalesce((select max(~a) from t1 where 19<>case case when t1.d in (select +count(distinct 11) from t1 union select ( -max(a)) from t1) then t1.f else b end when t1.f then d else t1.a end-t1.b),11) then t1.b else t1.d end else 11 end end))) when t1.b then 11 else t1.d end*t1.c= -t1.f} +} {} +do_test randexpr-2.1052 { + db eval {SELECT coalesce((select coalesce((select ~d from t1 where t1.b-t1.c+13 not between t1.b and (abs(t1.a-case t1.c when t1.c | (select max((abs(case when (select count(*) from t1)<>(~t1.e-13*+d+d) then -t1.e else -t1.f end*13)/abs(f))-t1.f) from t1) then t1.f else t1.c end)/abs(c))),t1.e) from t1 where t1.f=f), - -11) FROM t1 WHERE NOT (case ((abs(t1.e)/abs(case b when ~17 then 19+(t1.d)+13 else case t1.a*17 when c then 19*case when c<>coalesce((select max(~a) from t1 where 19<>case case when t1.d in (select +count(distinct 11) from t1 union select ( -max(a)) from t1) then t1.f else b end when t1.f then d else t1.a end-t1.b),11) then t1.b else t1.d end else 11 end end))) when t1.b then 11 else t1.d end*t1.c= -t1.f)} +} {-401} +do_test randexpr-2.1053 { + db eval {SELECT coalesce((select coalesce((select ~d from t1 where t1.b-t1.c+13 not between t1.b and (abs(t1.a-case t1.c when t1.c & (select max((abs(case when (select count(*) from t1)<>(~t1.e-13*+d+d) then -t1.e else -t1.f end*13)/abs(f))-t1.f) from t1) then t1.f else t1.c end)/abs(c))),t1.e) from t1 where t1.f=f), - -11) FROM t1 WHERE NOT (case ((abs(t1.e)/abs(case b when ~17 then 19+(t1.d)+13 else case t1.a*17 when c then 19*case when c<>coalesce((select max(~a) from t1 where 19<>case case when t1.d in (select +count(distinct 11) from t1 union select ( -max(a)) from t1) then t1.f else b end when t1.f then d else t1.a end-t1.b),11) then t1.b else t1.d end else 11 end end))) when t1.b then 11 else t1.d end*t1.c= -t1.f)} +} {-401} +do_test randexpr-2.1054 { + db eval {SELECT -13-case when (not d<=11-t1.c*t1.e+t1.b) then case when t1.f | coalesce((select +d from t1 where t1.d= -c),(t1.a)) not between b and t1.a then t1.f+a when t1.a<11 and exists(select 1 from t1 where t1.f in (t1.b,17,11)) then b else 19 end when (t1.b between t1.d and e) then b else -t1.c end FROM t1 WHERE case when b between case when coalesce((select t1.c-t1.c from t1 where 13*(select case min(11) when cast(avg(t1.c) AS integer) then (count(*)) else count(distinct 11) end+cast(avg( -f) AS integer) from t1)>=t1.a),t1.c) in ( -t1.c,d,b) then t1.e when f in (select e from t1 union select t1.d from t1) then f else c end and a and 13<>c and exists(select 1 from t1 where f<>c) then case when c in (11,b, -11) then t1.e else e end else 13 end<=t1.c} +} {-713} +do_test randexpr-2.1055 { + db eval {SELECT -13-case when (not d<=11-t1.c*t1.e+t1.b) then case when t1.f | coalesce((select +d from t1 where t1.d= -c),(t1.a)) not between b and t1.a then t1.f+a when t1.a<11 and exists(select 1 from t1 where t1.f in (t1.b,17,11)) then b else 19 end when (t1.b between t1.d and e) then b else -t1.c end FROM t1 WHERE NOT (case when b between case when coalesce((select t1.c-t1.c from t1 where 13*(select case min(11) when cast(avg(t1.c) AS integer) then (count(*)) else count(distinct 11) end+cast(avg( -f) AS integer) from t1)>=t1.a),t1.c) in ( -t1.c,d,b) then t1.e when f in (select e from t1 union select t1.d from t1) then f else c end and a and 13<>c and exists(select 1 from t1 where f<>c) then case when c in (11,b, -11) then t1.e else e end else 13 end<=t1.c)} +} {} +do_test randexpr-2.1056 { + db eval {SELECT -13-case when (not d<=11-t1.c*t1.e+t1.b) then case when t1.f & coalesce((select +d from t1 where t1.d= -c),(t1.a)) not between b and t1.a then t1.f+a when t1.a<11 and exists(select 1 from t1 where t1.f in (t1.b,17,11)) then b else 19 end when (t1.b between t1.d and e) then b else -t1.c end FROM t1 WHERE case when b between case when coalesce((select t1.c-t1.c from t1 where 13*(select case min(11) when cast(avg(t1.c) AS integer) then (count(*)) else count(distinct 11) end+cast(avg( -f) AS integer) from t1)>=t1.a),t1.c) in ( -t1.c,d,b) then t1.e when f in (select e from t1 union select t1.d from t1) then f else c end and a and 13<>c and exists(select 1 from t1 where f<>c) then case when c in (11,b, -11) then t1.e else e end else 13 end<=t1.c} +} {-713} +do_test randexpr-2.1057 { + db eval {SELECT t1.b | (abs( -(select ~ -~abs(case ~min((c* -b+c))+min(f) when ~abs(count(distinct 13))+(min(13)) then max(11) else count(*) end) from t1)-11 | +(abs(t1.d-t1.e)/abs(t1.b))-t1.f*19+t1.c*17)/abs(f))+t1.b FROM t1 WHERE (a between t1.e and -13)} +} {} +do_test randexpr-2.1058 { + db eval {SELECT t1.b | (abs( -(select ~ -~abs(case ~min((c* -b+c))+min(f) when ~abs(count(distinct 13))+(min(13)) then max(11) else count(*) end) from t1)-11 | +(abs(t1.d-t1.e)/abs(t1.b))-t1.f*19+t1.c*17)/abs(f))+t1.b FROM t1 WHERE NOT ((a between t1.e and -13))} +} {200} +do_test randexpr-2.1059 { + db eval {SELECT t1.b & (abs( -(select ~ -~abs(case ~min((c* -b+c))+min(f) when ~abs(count(distinct 13))+(min(13)) then max(11) else count(*) end) from t1)-11 & +(abs(t1.d-t1.e)/abs(t1.b))-t1.f*19+t1.c*17)/abs(f))+t1.b FROM t1 WHERE NOT ((a between t1.e and -13))} +} {192} +do_test randexpr-2.1060 { + db eval {SELECT t1.c-case t1.b when t1.f-t1.b-f then +coalesce((select b from t1 where (t1.d in (select (select count(*)+abs(count(*)) | ~cast(avg(b | case when 11 between b and (f) then b when (19)>19 then 17 else t1.e end) AS integer) | min(d) from t1) from t1 union select 19 from t1))),(abs(c*c)/abs(11))-a-t1.c+t1.f-t1.c)+b else b end FROM t1 WHERE (t1.d)+t1.b<( -t1.b)} +} {} +do_test randexpr-2.1061 { + db eval {SELECT t1.c-case t1.b when t1.f-t1.b-f then +coalesce((select b from t1 where (t1.d in (select (select count(*)+abs(count(*)) | ~cast(avg(b | case when 11 between b and (f) then b when (19)>19 then 17 else t1.e end) AS integer) | min(d) from t1) from t1 union select 19 from t1))),(abs(c*c)/abs(11))-a-t1.c+t1.f-t1.c)+b else b end FROM t1 WHERE NOT ((t1.d)+t1.b<( -t1.b))} +} {100} +do_test randexpr-2.1062 { + db eval {SELECT t1.c-case t1.b when t1.f-t1.b-f then +coalesce((select b from t1 where (t1.d in (select (select count(*)+abs(count(*)) & ~cast(avg(b & case when 11 between b and (f) then b when (19)>19 then 17 else t1.e end) AS integer) & min(d) from t1) from t1 union select 19 from t1))),(abs(c*c)/abs(11))-a-t1.c+t1.f-t1.c)+b else b end FROM t1 WHERE NOT ((t1.d)+t1.b<( -t1.b))} +} {100} +do_test randexpr-2.1063 { + db eval {SELECT 17*(select (case case min(t1.d+( -d | case 11* -11*11 when 13 then t1.c else -17 end)-d*t1.d)-count(*)-abs( -max(17)+~max(t1.b)) | +abs(cast(avg((a)) AS integer))+(count(*)) when -max(17) then -max(19) else (cast(avg(d) AS integer)) end when count(distinct t1.c) then min(b) else count(*) end) from t1) FROM t1 WHERE d>=(abs(coalesce((select max(d) from t1 where not not case when a in (select count(*)* -case +min(case when t1.e=(abs(coalesce((select max(d) from t1 where not not case when a in (select count(*)* -case +min(case when t1.e=(abs(coalesce((select max(d) from t1 where not not case when a in (select count(*)* -case +min(case when t1.ef),t1.f) and 11 not between 13 and 13 and ( -11<>c and t1.b>c) or 19<>t1.e and t1.b<=t1.f then +f*b when t1.d>=e then -13 else t1.c end when b then (17) else t1.d end+a+f*a FROM t1 WHERE not exists(select 1 from t1 where a between t1.f and 17 or e>=(d)*19+t1.e*c)} +} {59707} +do_test randexpr-2.1067 { + db eval {SELECT 11 | ~case case when e between 17 and coalesce((select max(((f-t1.f))) from t1 where d<>f),t1.f) and 11 not between 13 and 13 and ( -11<>c and t1.b>c) or 19<>t1.e and t1.b<=t1.f then +f*b when t1.d>=e then -13 else t1.c end when b then (17) else t1.d end+a+f*a FROM t1 WHERE NOT (not exists(select 1 from t1 where a between t1.f and 17 or e>=(d)*19+t1.e*c))} +} {} +do_test randexpr-2.1068 { + db eval {SELECT 11 & ~case case when e between 17 and coalesce((select max(((f-t1.f))) from t1 where d<>f),t1.f) and 11 not between 13 and 13 and ( -11<>c and t1.b>c) or 19<>t1.e and t1.b<=t1.f then +f*b when t1.d>=e then -13 else t1.c end when b then (17) else t1.d end+a+f*a FROM t1 WHERE not exists(select 1 from t1 where a between t1.f and 17 or e>=(d)*19+t1.e*c)} +} {3} +do_test randexpr-2.1069 { + db eval {SELECT -case when t1.d | ~b<>coalesce((select max(t1.c) from t1 where t1.c not between coalesce((select max(coalesce((select max(e) from t1 where 11 not in (case when 13 not between d and c then 17 when (t1.a) not in (e,b,b) then 19 else c end,a,t1.d)),b)+f) from t1 where t1.d not in (11,f,t1.e)),t1.b) and f),11) or 11<>(d) then -f* -d else (e) end*f FROM t1 WHERE case f when 11 then coalesce((select b from t1 where 19+case when 17*t1.d-t1.f+t1.f between t1.d-case when +d=coalesce((select max(f) from t1 where e>t1.f),17) and c=t1.e and b not in (17,11,(t1.f)) then -17 else t1.c end and c then -t1.a else t1.d end+11*e not between t1.f and d),13)+t1.d else -13 end=c} +} {} +do_test randexpr-2.1070 { + db eval {SELECT -case when t1.d | ~b<>coalesce((select max(t1.c) from t1 where t1.c not between coalesce((select max(coalesce((select max(e) from t1 where 11 not in (case when 13 not between d and c then 17 when (t1.a) not in (e,b,b) then 19 else c end,a,t1.d)),b)+f) from t1 where t1.d not in (11,f,t1.e)),t1.b) and f),11) or 11<>(d) then -f* -d else (e) end*f FROM t1 WHERE NOT (case f when 11 then coalesce((select b from t1 where 19+case when 17*t1.d-t1.f+t1.f between t1.d-case when +d=coalesce((select max(f) from t1 where e>t1.f),17) and c=t1.e and b not in (17,11,(t1.f)) then -17 else t1.c end and c then -t1.a else t1.d end+11*e not between t1.f and d),13)+t1.d else -13 end=c)} +} {-144000000} +do_test randexpr-2.1071 { + db eval {SELECT -case when t1.d & ~b<>coalesce((select max(t1.c) from t1 where t1.c not between coalesce((select max(coalesce((select max(e) from t1 where 11 not in (case when 13 not between d and c then 17 when (t1.a) not in (e,b,b) then 19 else c end,a,t1.d)),b)+f) from t1 where t1.d not in (11,f,t1.e)),t1.b) and f),11) or 11<>(d) then -f* -d else (e) end*f FROM t1 WHERE NOT (case f when 11 then coalesce((select b from t1 where 19+case when 17*t1.d-t1.f+t1.f between t1.d-case when +d=coalesce((select max(f) from t1 where e>t1.f),17) and c=t1.e and b not in (17,11,(t1.f)) then -17 else t1.c end and c then -t1.a else t1.d end+11*e not between t1.f and d),13)+t1.d else -13 end=c)} +} {-144000000} +do_test randexpr-2.1072 { + db eval {SELECT coalesce((select t1.b from t1 where not (t1.c>19) and 11<=+(abs(t1.b-19)/abs(a))+case when ((coalesce((select 13 from t1 where c in (b,t1.b,t1.d)),e))<>13) or f in ((t1.b),t1.e, -b) then 11+(c) else b end or b<>d),t1.f+t1.f+13) | - -a FROM t1 WHERE 13 between t1.c and 13+~19+c+(select min(f-(abs(17)/abs((t1.e)))) from t1)} +} {} +do_test randexpr-2.1073 { + db eval {SELECT coalesce((select t1.b from t1 where not (t1.c>19) and 11<=+(abs(t1.b-19)/abs(a))+case when ((coalesce((select 13 from t1 where c in (b,t1.b,t1.d)),e))<>13) or f in ((t1.b),t1.e, -b) then 11+(c) else b end or b<>d),t1.f+t1.f+13) | - -a FROM t1 WHERE NOT (13 between t1.c and 13+~19+c+(select min(f-(abs(17)/abs((t1.e)))) from t1))} +} {236} +do_test randexpr-2.1074 { + db eval {SELECT coalesce((select t1.b from t1 where not (t1.c>19) and 11<=+(abs(t1.b-19)/abs(a))+case when ((coalesce((select 13 from t1 where c in (b,t1.b,t1.d)),e))<>13) or f in ((t1.b),t1.e, -b) then 11+(c) else b end or b<>d),t1.f+t1.f+13) & - -a FROM t1 WHERE NOT (13 between t1.c and 13+~19+c+(select min(f-(abs(17)/abs((t1.e)))) from t1))} +} {64} +do_test randexpr-2.1075 { + db eval {SELECT -(coalesce((select max(coalesce((select t1.f from t1 where not exists(select 1 from t1 where (abs(+b+19+19)/abs(t1.b))=(select -max(19) from t1)+t1.f),t1.e+t1.f)* -t1.e+19-f) from t1 where (( -t1.c<19) or d>=c)),t1.c))*c*((t1.b))-17 FROM t1 WHERE t1.e not between t1.f*c and t1.f+ -d} +} {18034859983} +do_test randexpr-2.1076 { + db eval {SELECT -(coalesce((select max(coalesce((select t1.f from t1 where not exists(select 1 from t1 where (abs(+b+19+19)/abs(t1.b))=(select -max(19) from t1)+t1.f),t1.e+t1.f)* -t1.e+19-f) from t1 where (( -t1.c<19) or d>=c)),t1.c))*c*((t1.b))-17 FROM t1 WHERE NOT (t1.e not between t1.f*c and t1.f+ -d)} +} {} +do_test randexpr-2.1077 { + db eval {SELECT case 17 when (t1.d)*coalesce((select (select case (cast(avg(c) AS integer)+max(case when (not d+ -t1.e not between 11 and -f) then f when b<(11) then t1.b else 11 end))+count(distinct t1.c)*max(b) when -min(e) | -count(distinct t1.c) then cast(avg(e) AS integer) else count(*) end from t1) from t1 where case c when a then a else t1.d end-11<>19),t1.a)+t1.b then f else t1.f end FROM t1 WHERE not exists(select 1 from t1 where exists(select 1 from t1 where (abs(d)/abs((f*+coalesce((select -~case when exists(select 1 from t1 where bt1.c or ((t1.c))<>e),(t1.e)))) from t1)*17 when e then a else t1.e end-t1.b-19 from t1 where t1.e in (select abs(+min(f)+cast(avg( -a) AS integer)) from t1 union select count(distinct c) from t1)),d))))<>f))} +} {} +do_test randexpr-2.1078 { + db eval {SELECT case 17 when (t1.d)*coalesce((select (select case (cast(avg(c) AS integer)+max(case when (not d+ -t1.e not between 11 and -f) then f when b<(11) then t1.b else 11 end))+count(distinct t1.c)*max(b) when -min(e) | -count(distinct t1.c) then cast(avg(e) AS integer) else count(*) end from t1) from t1 where case c when a then a else t1.d end-11<>19),t1.a)+t1.b then f else t1.f end FROM t1 WHERE NOT (not exists(select 1 from t1 where exists(select 1 from t1 where (abs(d)/abs((f*+coalesce((select -~case when exists(select 1 from t1 where bt1.c or ((t1.c))<>e),(t1.e)))) from t1)*17 when e then a else t1.e end-t1.b-19 from t1 where t1.e in (select abs(+min(f)+cast(avg( -a) AS integer)) from t1 union select count(distinct c) from t1)),d))))<>f)))} +} {600} +do_test randexpr-2.1079 { + db eval {SELECT case 17 when (t1.d)*coalesce((select (select case (cast(avg(c) AS integer)+max(case when (not d+ -t1.e not between 11 and -f) then f when b<(11) then t1.b else 11 end))+count(distinct t1.c)*max(b) when -min(e) & -count(distinct t1.c) then cast(avg(e) AS integer) else count(*) end from t1) from t1 where case c when a then a else t1.d end-11<>19),t1.a)+t1.b then f else t1.f end FROM t1 WHERE NOT (not exists(select 1 from t1 where exists(select 1 from t1 where (abs(d)/abs((f*+coalesce((select -~case when exists(select 1 from t1 where bt1.c or ((t1.c))<>e),(t1.e)))) from t1)*17 when e then a else t1.e end-t1.b-19 from t1 where t1.e in (select abs(+min(f)+cast(avg( -a) AS integer)) from t1 union select count(distinct c) from t1)),d))))<>f)))} +} {600} +do_test randexpr-2.1080 { + db eval {SELECT ((select -case min(17 | (abs(a)/abs(case (select max(a) from t1)+13+t1.c when c then (abs(case when 17<>a and t1.c=d then ((17)) else e end)/abs(t1.b)) else 17 end)) | f*a) when abs(~(count(distinct t1.e))-abs((~max(c)))-count(*) | min(t1.f)+max(11)) then -cast(avg((b)) AS integer) else min(b) end from t1))*a FROM t1 WHERE (13)-(t1.a*17-t1.b)-coalesce((select (select -cast(avg(t1.c) AS integer)+case - -count(*) | (count(distinct t1.a)) when -min(t1.e) then (max(t1.d)) else count(distinct t1.b) end from t1)-e+(abs(17)/abs(e)) from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where (d)>=13 and (t1.b not in (c,a, -t1.d) or t1.e in (select c from t1 union select t1.c from t1)) or 11<=t1.d))),17) in (select e from t1 union select e from t1)} +} {} +do_test randexpr-2.1081 { + db eval {SELECT ((select -case min(17 | (abs(a)/abs(case (select max(a) from t1)+13+t1.c when c then (abs(case when 17<>a and t1.c=d then ((17)) else e end)/abs(t1.b)) else 17 end)) | f*a) when abs(~(count(distinct t1.e))-abs((~max(c)))-count(*) | min(t1.f)+max(11)) then -cast(avg((b)) AS integer) else min(b) end from t1))*a FROM t1 WHERE NOT ((13)-(t1.a*17-t1.b)-coalesce((select (select -cast(avg(t1.c) AS integer)+case - -count(*) | (count(distinct t1.a)) when -min(t1.e) then (max(t1.d)) else count(distinct t1.b) end from t1)-e+(abs(17)/abs(e)) from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where (d)>=13 and (t1.b not in (c,a, -t1.d) or t1.e in (select c from t1 union select t1.c from t1)) or 11<=t1.d))),17) in (select e from t1 union select e from t1))} +} {-20000} +do_test randexpr-2.1082 { + db eval {SELECT ((select -case min(17 & (abs(a)/abs(case (select max(a) from t1)+13+t1.c when c then (abs(case when 17<>a and t1.c=d then ((17)) else e end)/abs(t1.b)) else 17 end)) & f*a) when abs(~(count(distinct t1.e))-abs((~max(c)))-count(*) & min(t1.f)+max(11)) then -cast(avg((b)) AS integer) else min(b) end from t1))*a FROM t1 WHERE NOT ((13)-(t1.a*17-t1.b)-coalesce((select (select -cast(avg(t1.c) AS integer)+case - -count(*) | (count(distinct t1.a)) when -min(t1.e) then (max(t1.d)) else count(distinct t1.b) end from t1)-e+(abs(17)/abs(e)) from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where (d)>=13 and (t1.b not in (c,a, -t1.d) or t1.e in (select c from t1 union select t1.c from t1)) or 11<=t1.d))),17) in (select e from t1 union select e from t1))} +} {-20000} +do_test randexpr-2.1083 { + db eval {SELECT case when (select -min(11) from t1)<>(abs(17)/abs(t1.d | e)) then case 11+case 11 when b | e-e then 19+e else b end*(abs(t1.a)/abs(b | a)) | coalesce((select case when a in (select cast(avg(d) AS integer) from t1 union select - - -max(c) from t1) or e not in (c,d,a) then +13 when t1.a>a then 19 else f end from t1 where t1.c=13 or f<>t1.e),a) when f then f else c end else t1.e end FROM t1 WHERE ~17 in (11,t1.c,19)} +} {} +do_test randexpr-2.1084 { + db eval {SELECT case when (select -min(11) from t1)<>(abs(17)/abs(t1.d | e)) then case 11+case 11 when b | e-e then 19+e else b end*(abs(t1.a)/abs(b | a)) | coalesce((select case when a in (select cast(avg(d) AS integer) from t1 union select - - -max(c) from t1) or e not in (c,d,a) then +13 when t1.a>a then 19 else f end from t1 where t1.c=13 or f<>t1.e),a) when f then f else c end else t1.e end FROM t1 WHERE NOT (~17 in (11,t1.c,19))} +} {300} +do_test randexpr-2.1085 { + db eval {SELECT case when (select -min(11) from t1)<>(abs(17)/abs(t1.d & e)) then case 11+case 11 when b & e-e then 19+e else b end*(abs(t1.a)/abs(b & a)) & coalesce((select case when a in (select cast(avg(d) AS integer) from t1 union select - - -max(c) from t1) or e not in (c,d,a) then +13 when t1.a>a then 19 else f end from t1 where t1.c=13 or f<>t1.e),a) when f then f else c end else t1.e end FROM t1 WHERE NOT (~17 in (11,t1.c,19))} +} {300} +do_test randexpr-2.1086 { + db eval {SELECT (select case +cast(avg(+19) AS integer) when (abs(count(*))) then (cast(avg(e*t1.b) AS integer)) else cast(avg(t1.a) AS integer) end+count(*) | +max(f-t1.e*t1.e | (f)* -19+ -b*d) | count(*)*count(*)* -count(*) | ~count(distinct -b)*cast(avg(t1.f) AS integer)+count(*) from t1)-t1.c+t1.b FROM t1 WHERE ((select -count(*)+min(case ~coalesce((select max(d) from t1 where (not not exists(select 1 from t1 where t1.b not in ((select -max(a)+(count(*)) from t1) | c+case when t1.e not in (13,e,c) then (t1.f) else t1.c end+17*c+19,c,e)))),t1.f) when (f) then t1.a else t1.f end | b | t1.a) from t1) in (c,t1.c+ -a,f))} +} {} +do_test randexpr-2.1087 { + db eval {SELECT (select case +cast(avg(+19) AS integer) when (abs(count(*))) then (cast(avg(e*t1.b) AS integer)) else cast(avg(t1.a) AS integer) end+count(*) | +max(f-t1.e*t1.e | (f)* -19+ -b*d) | count(*)*count(*)* -count(*) | ~count(distinct -b)*cast(avg(t1.f) AS integer)+count(*) from t1)-t1.c+t1.b FROM t1 WHERE NOT (((select -count(*)+min(case ~coalesce((select max(d) from t1 where (not not exists(select 1 from t1 where t1.b not in ((select -max(a)+(count(*)) from t1) | c+case when t1.e not in (13,e,c) then (t1.f) else t1.c end+17*c+19,c,e)))),t1.f) when (f) then t1.a else t1.f end | b | t1.a) from t1) in (c,t1.c+ -a,f)))} +} {-101} +do_test randexpr-2.1088 { + db eval {SELECT (select case +cast(avg(+19) AS integer) when (abs(count(*))) then (cast(avg(e*t1.b) AS integer)) else cast(avg(t1.a) AS integer) end+count(*) & +max(f-t1.e*t1.e & (f)* -19+ -b*d) & count(*)*count(*)* -count(*) & ~count(distinct -b)*cast(avg(t1.f) AS integer)+count(*) from t1)-t1.c+t1.b FROM t1 WHERE NOT (((select -count(*)+min(case ~coalesce((select max(d) from t1 where (not not exists(select 1 from t1 where t1.b not in ((select -max(a)+(count(*)) from t1) | c+case when t1.e not in (13,e,c) then (t1.f) else t1.c end+17*c+19,c,e)))),t1.f) when (f) then t1.a else t1.f end | b | t1.a) from t1) in (c,t1.c+ -a,f)))} +} {-36} +do_test randexpr-2.1089 { + db eval {SELECT (coalesce((select (select -~+ -abs(case cast(avg(a*t1.d) AS integer) when case ~case count(distinct ((abs(t1.a)/abs(d)))) when case cast(avg(17) AS integer)-count(*) when -count(*) then count(distinct d) else (max(t1.d)) end*count(*) then count(distinct t1.c) else min(19) end-count(*) when cast(avg(t1.a) AS integer) then ((count(*))) else min(a) end then (count(*)) else max(e) end)*(count(*))*max(a) from t1) from t1 where (19>=(17))),d)) FROM t1 WHERE exists(select 1 from t1 where 19 in (t1.a*coalesce((select a from t1 where (11t1.b),t1.e)) from t1 where e not between 13 and t1.c),(a))-t1.d and 13),13*b)-17, -b,17))} +} {} +do_test randexpr-2.1090 { + db eval {SELECT (coalesce((select (select -~+ -abs(case cast(avg(a*t1.d) AS integer) when case ~case count(distinct ((abs(t1.a)/abs(d)))) when case cast(avg(17) AS integer)-count(*) when -count(*) then count(distinct d) else (max(t1.d)) end*count(*) then count(distinct t1.c) else min(19) end-count(*) when cast(avg(t1.a) AS integer) then ((count(*))) else min(a) end then (count(*)) else max(e) end)*(count(*))*max(a) from t1) from t1 where (19>=(17))),d)) FROM t1 WHERE NOT (exists(select 1 from t1 where 19 in (t1.a*coalesce((select a from t1 where (11t1.b),t1.e)) from t1 where e not between 13 and t1.c),(a))-t1.d and 13),13*b)-17, -b,17)))} +} {-49900} +do_test randexpr-2.1091 { + db eval {SELECT case when (coalesce((select max((select +max(c) from t1)) from t1 where t1.c<>t1.a),d) not in (t1.d,+case when 17>19 then e when t1.b<>+(select count(*) from t1) then t1.e else t1.d end,t1.c)) then (select count(*)-count(distinct t1.d)+ -max( -t1.c-a)*case cast(avg((f)) AS integer) when count(distinct d) then max((13)) else max(t1.d) end from t1)*t1.b else t1.c end | -13*t1.d*t1.c FROM t1 WHERE c in (select case count(distinct ~(abs( -a)/abs(d))) when ~count(*) then ~min(coalesce((select max((abs(e)/abs(d))) from t1 where not exists(select 1 from t1 where t1.e>( -+t1.a-t1.e*17))),19))+count(*) else case ~ -cast(avg((11)) AS integer) | +cast(avg(d) AS integer) | (cast(avg(c) AS integer))+min(t1.e) when min(t1.c) then max(11) else -count(*) end end+max(t1.c) from t1 union select -count(*) from t1)} +} {} +do_test randexpr-2.1092 { + db eval {SELECT case when (coalesce((select max((select +max(c) from t1)) from t1 where t1.c<>t1.a),d) not in (t1.d,+case when 17>19 then e when t1.b<>+(select count(*) from t1) then t1.e else t1.d end,t1.c)) then (select count(*)-count(distinct t1.d)+ -max( -t1.c-a)*case cast(avg((f)) AS integer) when count(distinct d) then max((13)) else max(t1.d) end from t1)*t1.b else t1.c end | -13*t1.d*t1.c FROM t1 WHERE NOT (c in (select case count(distinct ~(abs( -a)/abs(d))) when ~count(*) then ~min(coalesce((select max((abs(e)/abs(d))) from t1 where not exists(select 1 from t1 where t1.e>( -+t1.a-t1.e*17))),19))+count(*) else case ~ -cast(avg((11)) AS integer) | +cast(avg(d) AS integer) | (cast(avg(c) AS integer))+min(t1.e) when min(t1.c) then max(11) else -count(*) end end+max(t1.c) from t1 union select -count(*) from t1))} +} {-1559700} +do_test randexpr-2.1093 { + db eval {SELECT case when (coalesce((select max((select +max(c) from t1)) from t1 where t1.c<>t1.a),d) not in (t1.d,+case when 17>19 then e when t1.b<>+(select count(*) from t1) then t1.e else t1.d end,t1.c)) then (select count(*)-count(distinct t1.d)+ -max( -t1.c-a)*case cast(avg((f)) AS integer) when count(distinct d) then max((13)) else max(t1.d) end from t1)*t1.b else t1.c end & -13*t1.d*t1.c FROM t1 WHERE NOT (c in (select case count(distinct ~(abs( -a)/abs(d))) when ~count(*) then ~min(coalesce((select max((abs(e)/abs(d))) from t1 where not exists(select 1 from t1 where t1.e>( -+t1.a-t1.e*17))),19))+count(*) else case ~ -cast(avg((11)) AS integer) | +cast(avg(d) AS integer) | (cast(avg(c) AS integer))+min(t1.e) when min(t1.c) then max(11) else -count(*) end end+max(t1.c) from t1 union select -count(*) from t1))} +} {0} +do_test randexpr-2.1094 { + db eval {SELECT coalesce((select max(t1.c+t1.b-t1.b+(abs(d++t1.f)/abs(t1.a)) | 13*17) from t1 where coalesce((select 11 from t1 where case case case c when d then t1.d else 13 end when b then -11 else t1.b end when 13 then (t1.a) else d end not in (c,13,a)),t1.c) | f not in (t1.a,t1.c,13) and not exists(select 1 from t1 where t1.c in (select e from t1 union select -11 from t1))),t1.f) FROM t1 WHERE (((13+coalesce((select max(13) from t1 where not exists(select 1 from t1 where ((abs(c-case when not exists(select 1 from t1 where ((f) between c and 13)) then (select case min(f) when max(17) then min(13) else min(b) end from t1) when 17 in (t1.e,13,f) then -13 else 11 end)/abs(t1.f)) in (select min(c) from t1 union select -count(*) | + -min(19) from t1))) and f<13),t1.e+t1.c) in (select count(distinct c)-count(*) from t1 union select -case min((e))+cast(avg(t1.a) AS integer) when ((min(t1.c))) then count(distinct e) else -cast(avg(t1.d) AS integer) end from t1))))} +} {} +do_test randexpr-2.1095 { + db eval {SELECT coalesce((select max(t1.c+t1.b-t1.b+(abs(d++t1.f)/abs(t1.a)) | 13*17) from t1 where coalesce((select 11 from t1 where case case case c when d then t1.d else 13 end when b then -11 else t1.b end when 13 then (t1.a) else d end not in (c,13,a)),t1.c) | f not in (t1.a,t1.c,13) and not exists(select 1 from t1 where t1.c in (select e from t1 union select -11 from t1))),t1.f) FROM t1 WHERE NOT ((((13+coalesce((select max(13) from t1 where not exists(select 1 from t1 where ((abs(c-case when not exists(select 1 from t1 where ((f) between c and 13)) then (select case min(f) when max(17) then min(13) else min(b) end from t1) when 17 in (t1.e,13,f) then -13 else 11 end)/abs(t1.f)) in (select min(c) from t1 union select -count(*) | + -min(19) from t1))) and f<13),t1.e+t1.c) in (select count(distinct c)-count(*) from t1 union select -case min((e))+cast(avg(t1.a) AS integer) when ((min(t1.c))) then count(distinct e) else -cast(avg(t1.d) AS integer) end from t1)))))} +} {511} +do_test randexpr-2.1096 { + db eval {SELECT coalesce((select max(t1.c+t1.b-t1.b+(abs(d++t1.f)/abs(t1.a)) & 13*17) from t1 where coalesce((select 11 from t1 where case case case c when d then t1.d else 13 end when b then -11 else t1.b end when 13 then (t1.a) else d end not in (c,13,a)),t1.c) & f not in (t1.a,t1.c,13) and not exists(select 1 from t1 where t1.c in (select e from t1 union select -11 from t1))),t1.f) FROM t1 WHERE NOT ((((13+coalesce((select max(13) from t1 where not exists(select 1 from t1 where ((abs(c-case when not exists(select 1 from t1 where ((f) between c and 13)) then (select case min(f) when max(17) then min(13) else min(b) end from t1) when 17 in (t1.e,13,f) then -13 else 11 end)/abs(t1.f)) in (select min(c) from t1 union select -count(*) | + -min(19) from t1))) and f<13),t1.e+t1.c) in (select count(distinct c)-count(*) from t1 union select -case min((e))+cast(avg(t1.a) AS integer) when ((min(t1.c))) then count(distinct e) else -cast(avg(t1.d) AS integer) end from t1)))))} +} {20} +do_test randexpr-2.1097 { + db eval {SELECT case when t1.a in (select t1.e*case when t1.b between t1.d and 11 or c-case when (t1.e=13) then ~e when e<>a then t1.d else t1.f end in (11,t1.e,t1.c) and 11 not between t1.a and 13 and 19<11 then 11 else t1.e end*t1.d from t1 union select t1.c from t1) then a when e between e and b then ((19)) else 11 end FROM t1 WHERE coalesce((select (select count(distinct t1.f) from t1) from t1 where case coalesce((select max(t1.d) from t1 where 17<>19),t1.d) when a then coalesce((select ~t1.f- - -c-c*17-f-c-t1.a from t1 where (t1.f) in (select max(11) from t1 union select ~cast(avg(17) AS integer) | ~count(*)+max(f)- -cast(avg(11) AS integer) from t1)),13) else e end in (a,t1.c,11)),c)>=t1.e} +} {} +do_test randexpr-2.1098 { + db eval {SELECT case when t1.a in (select t1.e*case when t1.b between t1.d and 11 or c-case when (t1.e=13) then ~e when e<>a then t1.d else t1.f end in (11,t1.e,t1.c) and 11 not between t1.a and 13 and 19<11 then 11 else t1.e end*t1.d from t1 union select t1.c from t1) then a when e between e and b then ((19)) else 11 end FROM t1 WHERE NOT (coalesce((select (select count(distinct t1.f) from t1) from t1 where case coalesce((select max(t1.d) from t1 where 17<>19),t1.d) when a then coalesce((select ~t1.f- - -c-c*17-f-c-t1.a from t1 where (t1.f) in (select max(11) from t1 union select ~cast(avg(17) AS integer) | ~count(*)+max(f)- -cast(avg(11) AS integer) from t1)),13) else e end in (a,t1.c,11)),c)>=t1.e)} +} {11} +do_test randexpr-2.1099 { + db eval {SELECT coalesce((select max(case t1.b+(select ~((cast(avg(17) AS integer)))*min(a)+ -count(distinct b)*cast(avg(t1.b) AS integer)-count(*)+max(19) from t1)-(e) when d then 13 else 13*c end+t1.c) from t1 where t1.b in (select +cast(avg(e) AS integer)*case count(*) when cast(avg(13) AS integer) then count(distinct t1.b) else count(*) end-(count(distinct 19))*min(t1.d) from t1 union select max(a) from t1)),(f))*e+t1.f*19 FROM t1 WHERE 17 not between -t1.e and d} +} {} +do_test randexpr-2.1100 { + db eval {SELECT coalesce((select max(case t1.b+(select ~((cast(avg(17) AS integer)))*min(a)+ -count(distinct b)*cast(avg(t1.b) AS integer)-count(*)+max(19) from t1)-(e) when d then 13 else 13*c end+t1.c) from t1 where t1.b in (select +cast(avg(e) AS integer)*case count(*) when cast(avg(13) AS integer) then count(distinct t1.b) else count(*) end-(count(distinct 19))*min(t1.d) from t1 union select max(a) from t1)),(f))*e+t1.f*19 FROM t1 WHERE NOT (17 not between -t1.e and d)} +} {311400} +do_test randexpr-2.1101 { + db eval {SELECT coalesce((select max(~19) from t1 where 11 in (select t1.b-11*coalesce((select max(b) from t1 where coalesce((select max(e) from t1 where c*case 19*19+coalesce((select 19 from t1 where (11 not in (13+t1.d,b,(11)))),13)*d when t1.e then t1.b else t1.b end+t1.e between d and t1.b),t1.b) | (f)<>13), - -17)+c from t1 union select -19 from t1)),17) FROM t1 WHERE ~t1.d not between 11*13-+t1.d-11-f and t1.d} +} {} +do_test randexpr-2.1102 { + db eval {SELECT coalesce((select max(~19) from t1 where 11 in (select t1.b-11*coalesce((select max(b) from t1 where coalesce((select max(e) from t1 where c*case 19*19+coalesce((select 19 from t1 where (11 not in (13+t1.d,b,(11)))),13)*d when t1.e then t1.b else t1.b end+t1.e between d and t1.b),t1.b) | (f)<>13), - -17)+c from t1 union select -19 from t1)),17) FROM t1 WHERE NOT (~t1.d not between 11*13-+t1.d-11-f and t1.d)} +} {17} +do_test randexpr-2.1103 { + db eval {SELECT coalesce((select max(~19) from t1 where 11 in (select t1.b-11*coalesce((select max(b) from t1 where coalesce((select max(e) from t1 where c*case 19*19+coalesce((select 19 from t1 where (11 not in (13+t1.d,b,(11)))),13)*d when t1.e then t1.b else t1.b end+t1.e between d and t1.b),t1.b) & (f)<>13), - -17)+c from t1 union select -19 from t1)),17) FROM t1 WHERE NOT (~t1.d not between 11*13-+t1.d-11-f and t1.d)} +} {17} +do_test randexpr-2.1104 { + db eval {SELECT case t1.b when coalesce((select max(17+coalesce((select max(coalesce((select (select count(distinct case f when 19 then coalesce((select coalesce((select max(11-(abs(d)/abs(11))) from t1 where not -a in (19,t1.b,17)),17) from t1 where (f in (b,f,t1.a))),b) else b end) from t1) from t1 where 17 between e and t1.f),t1.a)) from t1 where 13>=e),b)) from t1 where t1.f>t1.f),t1.b) then 17 else t1.f end FROM t1 WHERE not exists(select 1 from t1 where 13 between a+17 and (t1.c))} +} {17} +do_test randexpr-2.1105 { + db eval {SELECT case t1.b when coalesce((select max(17+coalesce((select max(coalesce((select (select count(distinct case f when 19 then coalesce((select coalesce((select max(11-(abs(d)/abs(11))) from t1 where not -a in (19,t1.b,17)),17) from t1 where (f in (b,f,t1.a))),b) else b end) from t1) from t1 where 17 between e and t1.f),t1.a)) from t1 where 13>=e),b)) from t1 where t1.f>t1.f),t1.b) then 17 else t1.f end FROM t1 WHERE NOT (not exists(select 1 from t1 where 13 between a+17 and (t1.c)))} +} {} +do_test randexpr-2.1106 { + db eval {SELECT case when coalesce((select max(d) from t1 where t1.a in (select a-a from t1 union select a from t1)),t1.b)*(select max(t1.b) from t1)>=case when t1.b=t1.a} +} {300} +do_test randexpr-2.1107 { + db eval {SELECT case when coalesce((select max(d) from t1 where t1.a in (select a-a from t1 union select a from t1)),t1.b)*(select max(t1.b) from t1)>=case when t1.b=t1.a)} +} {} +do_test randexpr-2.1108 { + db eval {SELECT case when case when f*~case when t1.c*t1.f in (c,t1.b,(17)) then 17 when 11>t1.c then 13 else t1.e end+t1.f in (select +max(e)+case (count(distinct a)*count(*))+max(19) when -max(11) then count(distinct a) else ((count(distinct t1.d))) end from t1 union select count(distinct c) from t1) then 17 else d end+(t1.c)<17 then b when not exists(select 1 from t1 where 11>t1.b) then t1.d else t1.a end FROM t1 WHERE t1.b< -f} +} {} +do_test randexpr-2.1109 { + db eval {SELECT case when case when f*~case when t1.c*t1.f in (c,t1.b,(17)) then 17 when 11>t1.c then 13 else t1.e end+t1.f in (select +max(e)+case (count(distinct a)*count(*))+max(19) when -max(11) then count(distinct a) else ((count(distinct t1.d))) end from t1 union select count(distinct c) from t1) then 17 else d end+(t1.c)<17 then b when not exists(select 1 from t1 where 11>t1.b) then t1.d else t1.a end FROM t1 WHERE NOT (t1.b< -f)} +} {400} +do_test randexpr-2.1110 { + db eval {SELECT (select cast(avg(f) AS integer) | ~++cast(avg((abs(17)/abs( -f+(c-coalesce((select case when c=d and t1.e=t1.b then e when e in (t1.b,19,t1.c) then 17 else d end from t1 where e between t1.a and t1.c),a))+a))) AS integer)-abs( -min(t1.c))-abs(max((t1.b)) | -~(count(*))+max(17))+cast(avg(t1.a) AS integer) | min(a)*(count(*))*min(17) from t1) FROM t1 WHERE b-(t1.e*t1.e)-t1.d=t1.c} +} {} +do_test randexpr-2.1111 { + db eval {SELECT (select cast(avg(f) AS integer) | ~++cast(avg((abs(17)/abs( -f+(c-coalesce((select case when c=d and t1.e=t1.b then e when e in (t1.b,19,t1.c) then 17 else d end from t1 where e between t1.a and t1.c),a))+a))) AS integer)-abs( -min(t1.c))-abs(max((t1.b)) | -~(count(*))+max(17))+cast(avg(t1.a) AS integer) | min(a)*(count(*))*min(17) from t1) FROM t1 WHERE NOT (b-(t1.e*t1.e)-t1.d=t1.c)} +} {-260} +do_test randexpr-2.1112 { + db eval {SELECT (select cast(avg(f) AS integer) & ~++cast(avg((abs(17)/abs( -f+(c-coalesce((select case when c=d and t1.e=t1.b then e when e in (t1.b,19,t1.c) then 17 else d end from t1 where e between t1.a and t1.c),a))+a))) AS integer)-abs( -min(t1.c))-abs(max((t1.b)) & -~(count(*))+max(17))+cast(avg(t1.a) AS integer) & min(a)*(count(*))*min(17) from t1) FROM t1 WHERE NOT (b-(t1.e*t1.e)-t1.d=t1.c)} +} {512} +do_test randexpr-2.1113 { + db eval {SELECT ~+coalesce((select case when t1.f>d then b when (abs(t1.a)/abs((abs(f)/abs(t1.c))))*case ~b when e then (select cast(avg( -+t1.f) AS integer) from t1) else c end+coalesce((select max(17*case t1.a+b when t1.b then t1.d else t1.f end) from t1 where d=e),t1.f)+13>=17 then 19 else f end from t1 where t1.a in (select d from t1 union select e from t1)),b) FROM t1 WHERE 17<>f-~t1.a-case when -b in (t1.b,(select min( -coalesce((select max(coalesce((select a- -t1.b from t1 where 11 in (t1.b,13, -t1.e) and f not between -13 and t1.b),c)) from t1 where b not in (a,11,17)),t1.e)) from t1),e) or not exists(select 1 from t1 where t1.c in (select t1.b from t1 union select t1.b from t1)) then coalesce((select -t1.e from t1 where e<>t1.e),f) when not t1.e<>t1.c then 19 else e end} +} {-201} +do_test randexpr-2.1114 { + db eval {SELECT ~+coalesce((select case when t1.f>d then b when (abs(t1.a)/abs((abs(f)/abs(t1.c))))*case ~b when e then (select cast(avg( -+t1.f) AS integer) from t1) else c end+coalesce((select max(17*case t1.a+b when t1.b then t1.d else t1.f end) from t1 where d=e),t1.f)+13>=17 then 19 else f end from t1 where t1.a in (select d from t1 union select e from t1)),b) FROM t1 WHERE NOT (17<>f-~t1.a-case when -b in (t1.b,(select min( -coalesce((select max(coalesce((select a- -t1.b from t1 where 11 in (t1.b,13, -t1.e) and f not between -13 and t1.b),c)) from t1 where b not in (a,11,17)),t1.e)) from t1),e) or not exists(select 1 from t1 where t1.c in (select t1.b from t1 union select t1.b from t1)) then coalesce((select -t1.e from t1 where e<>t1.e),f) when not t1.e<>t1.c then 19 else e end)} +} {} +do_test randexpr-2.1115 { + db eval {SELECT d- -e*(abs(t1.c)/abs(d)) | t1.a+ -coalesce((select max(t1.e) from t1 where case when case when (a in (13,b,t1.b)) and d in (t1.c,17,a) then 17+t1.e when 19>=t1.b then a else 11 end=t1.f and d between c and (d) then (abs(17)/abs(d)) else t1.d end*f>e and not f<=13 and -t1.f>=t1.e and t1.be then t1.b else c end FROM t1 WHERE b-coalesce((select (coalesce((select max((coalesce((select d from t1 where not -case when case 13 when +~(select case count(*) when -max(t1.b) then max(a) else min(c) end from t1) then c else -a end in (17,t1.b,(b)) then f else b end-f+17<=t1.a),t1.f)-d)) from t1 where t1.e=b),t1.b)) from t1 where (t1.a=t1.c)), -t1.a) not between 19 and 19} +} {-584} +do_test randexpr-2.1116 { + db eval {SELECT d- -e*(abs(t1.c)/abs(d)) | t1.a+ -coalesce((select max(t1.e) from t1 where case when case when (a in (13,b,t1.b)) and d in (t1.c,17,a) then 17+t1.e when 19>=t1.b then a else 11 end=t1.f and d between c and (d) then (abs(17)/abs(d)) else t1.d end*f>e and not f<=13 and -t1.f>=t1.e and t1.be then t1.b else c end FROM t1 WHERE NOT (b-coalesce((select (coalesce((select max((coalesce((select d from t1 where not -case when case 13 when +~(select case count(*) when -max(t1.b) then max(a) else min(c) end from t1) then c else -a end in (17,t1.b,(b)) then f else b end-f+17<=t1.a),t1.f)-d)) from t1 where t1.e=b),t1.b)) from t1 where (t1.a=t1.c)), -t1.a) not between 19 and 19)} +} {} +do_test randexpr-2.1117 { + db eval {SELECT d- -e*(abs(t1.c)/abs(d)) & t1.a+ -coalesce((select max(t1.e) from t1 where case when case when (a in (13,b,t1.b)) and d in (t1.c,17,a) then 17+t1.e when 19>=t1.b then a else 11 end=t1.f and d between c and (d) then (abs(17)/abs(d)) else t1.d end*f>e and not f<=13 and -t1.f>=t1.e and t1.be then t1.b else c end FROM t1 WHERE b-coalesce((select (coalesce((select max((coalesce((select d from t1 where not -case when case 13 when +~(select case count(*) when -max(t1.b) then max(a) else min(c) end from t1) then c else -a end in (17,t1.b,(b)) then f else b end-f+17<=t1.a),t1.f)-d)) from t1 where t1.e=b),t1.b)) from t1 where (t1.a=t1.c)), -t1.a) not between 19 and 19} +} {384} +do_test randexpr-2.1118 { + db eval {SELECT 19-case 13-f when b then t1.a*11-t1.d-+17*t1.b-coalesce((select 17 from t1 where 13<=f),+coalesce((select (abs(17+~coalesce((select a from t1 where 11<>t1.b and 11 not between t1.e and 19),17)+t1.e+t1.d | b)/abs(c))+f from t1 where 19 not between 19 and c),d)) else 19 end*t1.e+b FROM t1 WHERE 11+11 not in (t1.c,t1.e,19) and f+19*19*case when ~t1.a+19*b between t1.a-case case when f=(t1.d) then ((t1.d)) else -d end*e when c then e else t1.a end*t1.e and c then t1.e else b end in (select d from t1 union select t1.a from t1) or t1.d>=t1.f or not exists(select 1 from t1 where a<=13)} +} {-9281} +do_test randexpr-2.1119 { + db eval {SELECT 19-case 13-f when b then t1.a*11-t1.d-+17*t1.b-coalesce((select 17 from t1 where 13<=f),+coalesce((select (abs(17+~coalesce((select a from t1 where 11<>t1.b and 11 not between t1.e and 19),17)+t1.e+t1.d | b)/abs(c))+f from t1 where 19 not between 19 and c),d)) else 19 end*t1.e+b FROM t1 WHERE NOT (11+11 not in (t1.c,t1.e,19) and f+19*19*case when ~t1.a+19*b between t1.a-case case when f=(t1.d) then ((t1.d)) else -d end*e when c then e else t1.a end*t1.e and c then t1.e else b end in (select d from t1 union select t1.a from t1) or t1.d>=t1.f or not exists(select 1 from t1 where a<=13))} +} {} +do_test randexpr-2.1120 { + db eval {SELECT 19-case 13-f when b then t1.a*11-t1.d-+17*t1.b-coalesce((select 17 from t1 where 13<=f),+coalesce((select (abs(17+~coalesce((select a from t1 where 11<>t1.b and 11 not between t1.e and 19),17)+t1.e+t1.d & b)/abs(c))+f from t1 where 19 not between 19 and c),d)) else 19 end*t1.e+b FROM t1 WHERE 11+11 not in (t1.c,t1.e,19) and f+19*19*case when ~t1.a+19*b between t1.a-case case when f=(t1.d) then ((t1.d)) else -d end*e when c then e else t1.a end*t1.e and c then t1.e else b end in (select d from t1 union select t1.a from t1) or t1.d>=t1.f or not exists(select 1 from t1 where a<=13)} +} {-9281} +do_test randexpr-2.1121 { + db eval {SELECT case 19 when e then t1.b-t1.d else 11+coalesce((select 19 from t1 where not (t1.a)+(abs(case coalesce((select coalesce((select t1.e from t1 where 11=d),a) from t1 where t1.d<=13),a) when 17 then 13 else t1.a end)/abs(t1.b))-t1.d*e+t1.d not between b and b or e in (select -min(c) from t1 union select min(t1.b) from t1)),(abs(e)/abs(t1.b)))+t1.c end FROM t1 WHERE exists(select 1 from t1 where c in (select t1.a from t1 union select f from t1))} +} {} +do_test randexpr-2.1122 { + db eval {SELECT case 19 when e then t1.b-t1.d else 11+coalesce((select 19 from t1 where not (t1.a)+(abs(case coalesce((select coalesce((select t1.e from t1 where 11=d),a) from t1 where t1.d<=13),a) when 17 then 13 else t1.a end)/abs(t1.b))-t1.d*e+t1.d not between b and b or e in (select -min(c) from t1 union select min(t1.b) from t1)),(abs(e)/abs(t1.b)))+t1.c end FROM t1 WHERE NOT (exists(select 1 from t1 where c in (select t1.a from t1 union select f from t1)))} +} {313} +do_test randexpr-2.1123 { + db eval {SELECT 17 | t1.d-(select +max(case when not exists(select 1 from t1 where (a<=t1.f)) then -19 else -t1.d end)- -min(t1.f+t1.f-(select (count(*)) from t1)) from t1)- -19 | b-d+coalesce((select max(d) from t1 where e-t1.a in (19,b,11) or e in (19,t1.b,c)),11) FROM t1 WHERE (11<=11 and case coalesce((select max((f)) from t1 where t1.b | case 11 when t1.e*t1.e then e else case when ~b-case when 19 not in (t1.d,13,e) then 13 when c between t1.e and b then 19 else t1.c end-(c) in (select -t1.f from t1 union select d from t1) then c else t1.b end end*t1.a in (19,e,c)),t1.a) when t1.d then t1.a else -a end not in (t1.c,b,t1.f))} +} {-41} +do_test randexpr-2.1124 { + db eval {SELECT 17 | t1.d-(select +max(case when not exists(select 1 from t1 where (a<=t1.f)) then -19 else -t1.d end)- -min(t1.f+t1.f-(select (count(*)) from t1)) from t1)- -19 | b-d+coalesce((select max(d) from t1 where e-t1.a in (19,b,11) or e in (19,t1.b,c)),11) FROM t1 WHERE NOT ((11<=11 and case coalesce((select max((f)) from t1 where t1.b | case 11 when t1.e*t1.e then e else case when ~b-case when 19 not in (t1.d,13,e) then 13 when c between t1.e and b then 19 else t1.c end-(c) in (select -t1.f from t1 union select d from t1) then c else t1.b end end*t1.a in (19,e,c)),t1.a) when t1.d then t1.a else -a end not in (t1.c,b,t1.f)))} +} {} +do_test randexpr-2.1125 { + db eval {SELECT 17 & t1.d-(select +max(case when not exists(select 1 from t1 where (a<=t1.f)) then -19 else -t1.d end)- -min(t1.f+t1.f-(select (count(*)) from t1)) from t1)- -19 & b-d+coalesce((select max(d) from t1 where e-t1.a in (19,b,11) or e in (19,t1.b,c)),11) FROM t1 WHERE (11<=11 and case coalesce((select max((f)) from t1 where t1.b | case 11 when t1.e*t1.e then e else case when ~b-case when 19 not in (t1.d,13,e) then 13 when c between t1.e and b then 19 else t1.c end-(c) in (select -t1.f from t1 union select d from t1) then c else t1.b end end*t1.a in (19,e,c)),t1.a) when t1.d then t1.a else -a end not in (t1.c,b,t1.f))} +} {0} +do_test randexpr-2.1126 { + db eval {SELECT case t1.e-d+b when coalesce((select (select max(t1.a | a | -(select (count(*))+( - - -count(*)) from t1)*19+ -f+t1.e) from t1) from t1 where not exists(select 1 from t1 where (d not between a and 11) or not d>=11 and exists(select 1 from t1 where not t1.a<>13)) or 19 not in (17,t1.c,t1.c)),19)*t1.c+d then b else c end-b FROM t1 WHERE c<17} +} {} +do_test randexpr-2.1127 { + db eval {SELECT case t1.e-d+b when coalesce((select (select max(t1.a | a | -(select (count(*))+( - - -count(*)) from t1)*19+ -f+t1.e) from t1) from t1 where not exists(select 1 from t1 where (d not between a and 11) or not d>=11 and exists(select 1 from t1 where not t1.a<>13)) or 19 not in (17,t1.c,t1.c)),19)*t1.c+d then b else c end-b FROM t1 WHERE NOT (c<17)} +} {100} +do_test randexpr-2.1128 { + db eval {SELECT case t1.e-d+b when coalesce((select (select max(t1.a & a & -(select (count(*))+( - - -count(*)) from t1)*19+ -f+t1.e) from t1) from t1 where not exists(select 1 from t1 where (d not between a and 11) or not d>=11 and exists(select 1 from t1 where not t1.a<>13)) or 19 not in (17,t1.c,t1.c)),19)*t1.c+d then b else c end-b FROM t1 WHERE NOT (c<17)} +} {100} +do_test randexpr-2.1129 { + db eval {SELECT f*f*(abs(case when (17-coalesce((select max(11) from t1 where coalesce((select max(t1.c-11*t1.a) from t1 where +e=(abs(d)/abs(11))+ -13),t1.b)t1.e) then e else 17 end)/abs(c)) FROM t1 WHERE (t1.a in ((t1.f-t1.c)+b,11,t1.a+f))} +} {} +do_test randexpr-2.1130 { + db eval {SELECT f*f*(abs(case when (17-coalesce((select max(11) from t1 where coalesce((select max(t1.c-11*t1.a) from t1 where +e=(abs(d)/abs(11))+ -13),t1.b)t1.e) then e else 17 end)/abs(c)) FROM t1 WHERE NOT ((t1.a in ((t1.f-t1.c)+b,11,t1.a+f)))} +} {0} +do_test randexpr-2.1131 { + db eval {SELECT f*f*(abs(case when (17-coalesce((select max(11) from t1 where coalesce((select max(t1.c-11*t1.a) from t1 where +e=(abs(d)/abs(11))+ -13),t1.b)t1.e) then e else 17 end)/abs(c)) FROM t1 WHERE NOT ((t1.a in ((t1.f-t1.c)+b,11,t1.a+f)))} +} {0} +do_test randexpr-2.1132 { + db eval {SELECT ( -19-f+coalesce((select max( -~case when case 11 when ~coalesce((select max(13) from t1 where not (e)<>(t1.c)),19)+11 | -19-f then -13 else t1.c end-e<=t1.a then 11 when not exists(select 1 from t1 where (not t1.c<19) and not (13<>17)) then -e else f end) from t1 where (b)<=t1.e),t1.d)+ -t1.c) | b FROM t1 WHERE (~(select (max(f)-count(distinct c)) from t1) | case when e>=t1.c then t1.b+(select abs(count(*)+max(c)*abs(count(distinct e)+max(t1.f))+cast(avg(c) AS integer)) | cast(avg(t1.d) AS integer)*min(11) from t1)-t1.c else c end in (select case -t1.b when 17 then 17 else -case when not t1.c not in (17,t1.d,17) then 19 else a end end from t1 union select a from t1)) or (11)<=t1.b} +} {-771} +do_test randexpr-2.1133 { + db eval {SELECT ( -19-f+coalesce((select max( -~case when case 11 when ~coalesce((select max(13) from t1 where not (e)<>(t1.c)),19)+11 | -19-f then -13 else t1.c end-e<=t1.a then 11 when not exists(select 1 from t1 where (not t1.c<19) and not (13<>17)) then -e else f end) from t1 where (b)<=t1.e),t1.d)+ -t1.c) | b FROM t1 WHERE NOT ((~(select (max(f)-count(distinct c)) from t1) | case when e>=t1.c then t1.b+(select abs(count(*)+max(c)*abs(count(distinct e)+max(t1.f))+cast(avg(c) AS integer)) | cast(avg(t1.d) AS integer)*min(11) from t1)-t1.c else c end in (select case -t1.b when 17 then 17 else -case when not t1.c not in (17,t1.d,17) then 19 else a end end from t1 union select a from t1)) or (11)<=t1.b)} +} {} +do_test randexpr-2.1134 { + db eval {SELECT ( -19-f+coalesce((select max( -~case when case 11 when ~coalesce((select max(13) from t1 where not (e)<>(t1.c)),19)+11 & -19-f then -13 else t1.c end-e<=t1.a then 11 when not exists(select 1 from t1 where (not t1.c<19) and not (13<>17)) then -e else f end) from t1 where (b)<=t1.e),t1.d)+ -t1.c) & b FROM t1 WHERE (~(select (max(f)-count(distinct c)) from t1) | case when e>=t1.c then t1.b+(select abs(count(*)+max(c)*abs(count(distinct e)+max(t1.f))+cast(avg(c) AS integer)) | cast(avg(t1.d) AS integer)*min(11) from t1)-t1.c else c end in (select case -t1.b when 17 then 17 else -case when not t1.c not in (17,t1.d,17) then 19 else a end end from t1 union select a from t1)) or (11)<=t1.b} +} {64} +do_test randexpr-2.1135 { + db eval {SELECT +case when (not exists(select 1 from t1 where (abs(t1.a+coalesce((select max(b+ -case when e=t1.f then (abs(d)/abs(t1.e)) when not exists(select 1 from t1 where 17t1.c) then e else e end FROM t1 WHERE not t1.d-d+case b when t1.a+t1.a then t1.f+ -t1.e else 11+b*coalesce((select max(t1.f) from t1 where 11>= -f+t1.d),t1.c)*19 end not between d and t1.c-(11)+17} +} {} +do_test randexpr-2.1136 { + db eval {SELECT +case when (not exists(select 1 from t1 where (abs(t1.a+coalesce((select max(b+ -case when e=t1.f then (abs(d)/abs(t1.e)) when not exists(select 1 from t1 where 17t1.c) then e else e end FROM t1 WHERE NOT (not t1.d-d+case b when t1.a+t1.a then t1.f+ -t1.e else 11+b*coalesce((select max(t1.f) from t1 where 11>= -f+t1.d),t1.c)*19 end not between d and t1.c-(11)+17)} +} {500} +do_test randexpr-2.1137 { + db eval {SELECT +case when (not exists(select 1 from t1 where (abs(t1.a+coalesce((select max(b+ -case when e=t1.f then (abs(d)/abs(t1.e)) when not exists(select 1 from t1 where 17t1.c) then e else e end FROM t1 WHERE NOT (not t1.d-d+case b when t1.a+t1.a then t1.f+ -t1.e else 11+b*coalesce((select max(t1.f) from t1 where 11>= -f+t1.d),t1.c)*19 end not between d and t1.c-(11)+17)} +} {500} +do_test randexpr-2.1138 { + db eval {SELECT t1.d+case when (t1.a)<=11 then ~t1.a else ~19 end-+~ -coalesce((select t1.d from t1 where ~t1.f in (select (cast(avg(t1.e+c) AS integer)) from t1 union select count(distinct (abs(coalesce((select max(19) from t1 where not ~11-t1.a=t1.c+case when t1.d between c and t1.d then 11 when t1.f>=17 then f else 19 end*t1.c),17)+f)/abs(f))) from t1)),11) FROM t1 WHERE e>c} +} {370} +do_test randexpr-2.1139 { + db eval {SELECT t1.d+case when (t1.a)<=11 then ~t1.a else ~19 end-+~ -coalesce((select t1.d from t1 where ~t1.f in (select (cast(avg(t1.e+c) AS integer)) from t1 union select count(distinct (abs(coalesce((select max(19) from t1 where not ~11-t1.a=t1.c+case when t1.d between c and t1.d then 11 when t1.f>=17 then f else 19 end*t1.c),17)+f)/abs(f))) from t1)),11) FROM t1 WHERE NOT (e>c)} +} {} +do_test randexpr-2.1140 { + db eval {SELECT case when coalesce((select max(case b | d*(abs(t1.e)/abs(a))-f | t1.b when t1.e then coalesce((select max(t1.e) from t1 where t1.e between (select ~count(*)*cast(avg(19) AS integer) from t1) and case c when a then 13 else a end),t1.f)+e*c else t1.d end) from t1 where t1.a in (select t1.d from t1 union select 17 from t1)),e)+17<19 then t1.a else 19 end | 11+e-17*19 FROM t1 WHERE d*t1.c-case when case when ((13 in (17+e,b,11)) or 19>(13)) then (abs(coalesce((select t1.a from t1 where t1.a not in (13,c,t1.f)),11))/abs(t1.e))-f when t1.b not between t1.b and f then t1.d else t1.a end in (a,e,t1.d) then t1.f else t1.f end+f+t1.e in (t1.d,e,b)} +} {} +do_test randexpr-2.1141 { + db eval {SELECT case when coalesce((select max(case b | d*(abs(t1.e)/abs(a))-f | t1.b when t1.e then coalesce((select max(t1.e) from t1 where t1.e between (select ~count(*)*cast(avg(19) AS integer) from t1) and case c when a then 13 else a end),t1.f)+e*c else t1.d end) from t1 where t1.a in (select t1.d from t1 union select 17 from t1)),e)+17<19 then t1.a else 19 end | 11+e-17*19 FROM t1 WHERE NOT (d*t1.c-case when case when ((13 in (17+e,b,11)) or 19>(13)) then (abs(coalesce((select t1.a from t1 where t1.a not in (13,c,t1.f)),11))/abs(t1.e))-f when t1.b not between t1.b and f then t1.d else t1.a end in (a,e,t1.d) then t1.f else t1.f end+f+t1.e in (t1.d,e,b))} +} {191} +do_test randexpr-2.1142 { + db eval {SELECT case when coalesce((select max(case b & d*(abs(t1.e)/abs(a))-f & t1.b when t1.e then coalesce((select max(t1.e) from t1 where t1.e between (select ~count(*)*cast(avg(19) AS integer) from t1) and case c when a then 13 else a end),t1.f)+e*c else t1.d end) from t1 where t1.a in (select t1.d from t1 union select 17 from t1)),e)+17<19 then t1.a else 19 end & 11+e-17*19 FROM t1 WHERE NOT (d*t1.c-case when case when ((13 in (17+e,b,11)) or 19>(13)) then (abs(coalesce((select t1.a from t1 where t1.a not in (13,c,t1.f)),11))/abs(t1.e))-f when t1.b not between t1.b and f then t1.d else t1.a end in (a,e,t1.d) then t1.f else t1.f end+f+t1.e in (t1.d,e,b))} +} {16} +do_test randexpr-2.1143 { + db eval {SELECT ((select (count(*) | +~(case ++(min(t1.f+17+(11-case when b between d and d and t1.a not in (13,c,t1.c) and e< -t1.b then 17+(11) else t1.e end+ -b))) when case min(e) when min( -13) | ~max(t1.f) then count(distinct t1.f) | ~count(distinct t1.d) else count(*) end then min(t1.c) else -cast(avg(11) AS integer) end)+count(distinct (f))) from t1)+e) FROM t1 WHERE (a in (11*a-t1.a | t1.b+e*13,(f | a | (select min((select min(13)-min(17)*((count(distinct t1.d))) from t1)) from t1)+t1.a- -11)+(11),t1.c) and (a) between t1.d and t1.f or t1.d not between t1.c and f or d not in (t1.c,t1.f,c))} +} {511} +do_test randexpr-2.1144 { + db eval {SELECT ((select (count(*) | +~(case ++(min(t1.f+17+(11-case when b between d and d and t1.a not in (13,c,t1.c) and e< -t1.b then 17+(11) else t1.e end+ -b))) when case min(e) when min( -13) | ~max(t1.f) then count(distinct t1.f) | ~count(distinct t1.d) else count(*) end then min(t1.c) else -cast(avg(11) AS integer) end)+count(distinct (f))) from t1)+e) FROM t1 WHERE NOT ((a in (11*a-t1.a | t1.b+e*13,(f | a | (select min((select min(13)-min(17)*((count(distinct t1.d))) from t1)) from t1)+t1.a- -11)+(11),t1.c) and (a) between t1.d and t1.f or t1.d not between t1.c and f or d not in (t1.c,t1.f,c)))} +} {} +do_test randexpr-2.1145 { + db eval {SELECT ((select (count(*) & +~(case ++(min(t1.f+17+(11-case when b between d and d and t1.a not in (13,c,t1.c) and e< -t1.b then 17+(11) else t1.e end+ -b))) when case min(e) when min( -13) & ~max(t1.f) then count(distinct t1.f) & ~count(distinct t1.d) else count(*) end then min(t1.c) else -cast(avg(11) AS integer) end)+count(distinct (f))) from t1)+e) FROM t1 WHERE (a in (11*a-t1.a | t1.b+e*13,(f | a | (select min((select min(13)-min(17)*((count(distinct t1.d))) from t1)) from t1)+t1.a- -11)+(11),t1.c) and (a) between t1.d and t1.f or t1.d not between t1.c and f or d not in (t1.c,t1.f,c))} +} {501} +do_test randexpr-2.1146 { + db eval {SELECT coalesce((select max(13) from t1 where d in (select (max(19)*count(distinct case when t1.f between ~e and -b-t1.f then b else d end)-count(distinct f)) from t1 union select cast(avg(case e when case t1.b when -a | t1.e*19 then t1.a else -t1.c end then -d else t1.e end) AS integer) from t1)),case when t1.d in (select t1.e from t1 union select b from t1) then t1.f when t1.f between 13 and f then f else f end) FROM t1 WHERE c in (select (t1.a) from t1 union select e from t1)} +} {} +do_test randexpr-2.1147 { + db eval {SELECT coalesce((select max(13) from t1 where d in (select (max(19)*count(distinct case when t1.f between ~e and -b-t1.f then b else d end)-count(distinct f)) from t1 union select cast(avg(case e when case t1.b when -a | t1.e*19 then t1.a else -t1.c end then -d else t1.e end) AS integer) from t1)),case when t1.d in (select t1.e from t1 union select b from t1) then t1.f when t1.f between 13 and f then f else f end) FROM t1 WHERE NOT (c in (select (t1.a) from t1 union select e from t1))} +} {600} +do_test randexpr-2.1148 { + db eval {SELECT coalesce((select max(13) from t1 where d in (select (max(19)*count(distinct case when t1.f between ~e and -b-t1.f then b else d end)-count(distinct f)) from t1 union select cast(avg(case e when case t1.b when -a & t1.e*19 then t1.a else -t1.c end then -d else t1.e end) AS integer) from t1)),case when t1.d in (select t1.e from t1 union select b from t1) then t1.f when t1.f between 13 and f then f else f end) FROM t1 WHERE NOT (c in (select (t1.a) from t1 union select e from t1))} +} {600} +do_test randexpr-2.1149 { + db eval {SELECT (abs(t1.d+b)/abs(~t1.f+case when t1.f=t1.c or exists(select 1 from t1 where c>=+~t1.c*17) or exists(select 1 from t1 where (select -cast(avg(case when 19<>t1.f then -c when c<>b then t1.e else t1.d end) AS integer) from t1) in (d-t1.f,b,19)) then d+b when 11(19) then c else -case when 11>b or t1.c<=c then f when t1.f>t1.a then t1.a else t1.e end*d-11 end+d*t1.b>t1.e),d)>t1.b and c<>d and not exists(select 1 from t1 where 17>=(t1.f)) and t1.e not in (f,t1.b,t1.b))} +} {} +do_test randexpr-2.1150 { + db eval {SELECT (abs(t1.d+b)/abs(~t1.f+case when t1.f=t1.c or exists(select 1 from t1 where c>=+~t1.c*17) or exists(select 1 from t1 where (select -cast(avg(case when 19<>t1.f then -c when c<>b then t1.e else t1.d end) AS integer) from t1) in (d-t1.f,b,19)) then d+b when 11(19) then c else -case when 11>b or t1.c<=c then f when t1.f>t1.a then t1.a else t1.e end*d-11 end+d*t1.b>t1.e),d)>t1.b and c<>d and not exists(select 1 from t1 where 17>=(t1.f)) and t1.e not in (f,t1.b,t1.b)))} +} {0} +do_test randexpr-2.1151 { + db eval {SELECT t1.b-~case when +a in (select ~min(11) from t1 union select -count(*)+count(distinct ~coalesce((select coalesce((select t1.b-19+c from t1 where a<>13 or 13 in (t1.b,d,t1.e)),11) from t1 where 19 in (19,e,17)),e)) from t1) then (select -min(19)+cast(avg(t1.d) AS integer) from t1) else d end | 17 | t1.e FROM t1 WHERE c-b+e*11+(c+13+11)+(13)+(select cast(avg((abs(e-d+a)/abs(t1.f))) AS integer) from t1)-t1.d+~13<=t1.e-c-17} +} {} +do_test randexpr-2.1152 { + db eval {SELECT t1.b-~case when +a in (select ~min(11) from t1 union select -count(*)+count(distinct ~coalesce((select coalesce((select t1.b-19+c from t1 where a<>13 or 13 in (t1.b,d,t1.e)),11) from t1 where 19 in (19,e,17)),e)) from t1) then (select -min(19)+cast(avg(t1.d) AS integer) from t1) else d end | 17 | t1.e FROM t1 WHERE NOT (c-b+e*11+(c+13+11)+(13)+(select cast(avg((abs(e-d+a)/abs(t1.f))) AS integer) from t1)-t1.d+~13<=t1.e-c-17)} +} {1021} +do_test randexpr-2.1153 { + db eval {SELECT t1.b-~case when +a in (select ~min(11) from t1 union select -count(*)+count(distinct ~coalesce((select coalesce((select t1.b-19+c from t1 where a<>13 or 13 in (t1.b,d,t1.e)),11) from t1 where 19 in (19,e,17)),e)) from t1) then (select -min(19)+cast(avg(t1.d) AS integer) from t1) else d end & 17 & t1.e FROM t1 WHERE NOT (c-b+e*11+(c+13+11)+(13)+(select cast(avg((abs(e-d+a)/abs(t1.f))) AS integer) from t1)-t1.d+~13<=t1.e-c-17)} +} {16} +do_test randexpr-2.1154 { + db eval {SELECT coalesce((select max(b) from t1 where t1.et1.e then 17 when 19 not between 11 and b then t1.e else 19 end*t1.c),t1.c) FROM t1 WHERE not t1.f*(abs(case when ~19-17*e>=t1.d then t1.b when (select cast(avg(t1.e) AS integer) from t1) in ((abs(t1.e)/abs(t1.e))-case when f not in (t1.d,13,c) then c else t1.e end-t1.b-t1.a | b-(t1.c), -a,e) then t1.e else e end+a)/abs(t1.d)) | b not between b and t1.d} +} {} +do_test randexpr-2.1155 { + db eval {SELECT coalesce((select max(b) from t1 where t1.et1.e then 17 when 19 not between 11 and b then t1.e else 19 end*t1.c),t1.c) FROM t1 WHERE NOT (not t1.f*(abs(case when ~19-17*e>=t1.d then t1.b when (select cast(avg(t1.e) AS integer) from t1) in ((abs(t1.e)/abs(t1.e))-case when f not in (t1.d,13,c) then c else t1.e end-t1.b-t1.a | b-(t1.c), -a,e) then t1.e else e end+a)/abs(t1.d)) | b not between b and t1.d)} +} {300} +do_test randexpr-2.1156 { + db eval {SELECT -f+( - -case when 13-b not between case when (a<>t1.f or case when not exists(select 1 from t1 where 11<>t1.a) then b when c<=t1.b then t1.f else 11 end in (select 17 from t1 union select t1.b from t1)) then b-c-b when t1.f=13 then a else 19 end | t1.d and t1.e and not exists(select 1 from t1 where 19=t1.f) then -c else 19 end+17) FROM t1 WHERE not not exists(select 1 from t1 where ~(select cast(avg(t1.d) AS integer) from t1) | +~~case case when f-case when exists(select 1 from t1 where 13 not in (d,11,case when not exists(select 1 from t1 where f between t1.d and e) then 13 when t1.e<>19 then f else 11 end) or b in ( -d,t1.c,17)) then b else f end-(t1.d) in (t1.c,t1.c,b) then t1.b else e end when 11 then t1.e else -c end-c<>t1.b)} +} {-883} +do_test randexpr-2.1157 { + db eval {SELECT -f+( - -case when 13-b not between case when (a<>t1.f or case when not exists(select 1 from t1 where 11<>t1.a) then b when c<=t1.b then t1.f else 11 end in (select 17 from t1 union select t1.b from t1)) then b-c-b when t1.f=13 then a else 19 end | t1.d and t1.e and not exists(select 1 from t1 where 19=t1.f) then -c else 19 end+17) FROM t1 WHERE NOT (not not exists(select 1 from t1 where ~(select cast(avg(t1.d) AS integer) from t1) | +~~case case when f-case when exists(select 1 from t1 where 13 not in (d,11,case when not exists(select 1 from t1 where f between t1.d and e) then 13 when t1.e<>19 then f else 11 end) or b in ( -d,t1.c,17)) then b else f end-(t1.d) in (t1.c,t1.c,b) then t1.b else e end when 11 then t1.e else -c end-c<>t1.b))} +} {} +do_test randexpr-2.1158 { + db eval {SELECT -f+( - -case when 13-b not between case when (a<>t1.f or case when not exists(select 1 from t1 where 11<>t1.a) then b when c<=t1.b then t1.f else 11 end in (select 17 from t1 union select t1.b from t1)) then b-c-b when t1.f=13 then a else 19 end & t1.d and t1.e and not exists(select 1 from t1 where 19=t1.f) then -c else 19 end+17) FROM t1 WHERE not not exists(select 1 from t1 where ~(select cast(avg(t1.d) AS integer) from t1) | +~~case case when f-case when exists(select 1 from t1 where 13 not in (d,11,case when not exists(select 1 from t1 where f between t1.d and e) then 13 when t1.e<>19 then f else 11 end) or b in ( -d,t1.c,17)) then b else f end-(t1.d) in (t1.c,t1.c,b) then t1.b else e end when 11 then t1.e else -c end-c<>t1.b)} +} {-883} +do_test randexpr-2.1159 { + db eval {SELECT +(abs(d+d+a+b+t1.d)/abs(coalesce((select (select abs(cast(avg(17-11+e*t1.d) AS integer)++cast(avg(coalesce((select max(11-+t1.f- -e-a* -19+t1.a) from t1 where -a in (select 11 from t1 union select -11 from t1)),e)) AS integer)) from t1) from t1 where f=17),t1.b)*a)) FROM t1 WHERE t1.b in (select t1.e from t1 union select coalesce((select max(11) from t1 where dt1.e then t1.a+e-a when 19>t1.c then c else t1.c end+c not between b and t1.d),t1.f) and -(t1.b) between t1.e and -t1.d or b<>19 then (select ~(min(13))+max(f) from t1) else (t1.b) end*t1.b FROM t1 WHERE b in (b,e,case coalesce((select max(t1.b) from t1 where e>13 or ( -(select case -~count(distinct 17)-cast(avg((t1.f)) AS integer)*cast(avg(d) AS integer) when -cast(avg(f) AS integer) then max(17) else -(count(*)) end from t1)-(coalesce((select (b) from t1 where t1.b>=t1.e),t1.c)) in (17,t1.e,b)) or (exists(select 1 from t1 where (t1.d=(d))))),t1.c) when b then c else (e) end)} +} {117200} +do_test randexpr-2.1162 { + db eval {SELECT case when t1.b*t1.c=coalesce((select t1.f from t1 where case when exists(select 1 from t1 where t1.d in (select e from t1 union select 19 from t1)) or t1.e>t1.e then t1.a+e-a when 19>t1.c then c else t1.c end+c not between b and t1.d),t1.f) and -(t1.b) between t1.e and -t1.d or b<>19 then (select ~(min(13))+max(f) from t1) else (t1.b) end*t1.b FROM t1 WHERE NOT (b in (b,e,case coalesce((select max(t1.b) from t1 where e>13 or ( -(select case -~count(distinct 17)-cast(avg((t1.f)) AS integer)*cast(avg(d) AS integer) when -cast(avg(f) AS integer) then max(17) else -(count(*)) end from t1)-(coalesce((select (b) from t1 where t1.b>=t1.e),t1.c)) in (17,t1.e,b)) or (exists(select 1 from t1 where (t1.d=(d))))),t1.c) when b then c else (e) end))} +} {} +do_test randexpr-2.1163 { + db eval {SELECT +t1.c+case when t1.e not in ((e)-(case when 17<11 then 19 when (t1.d in (d,coalesce((select max(t1.a) from t1 where not b<>t1.b),case t1.b when coalesce((select max(t1.a) from t1 where exists(select 1 from t1 where e not in (t1.f,11,17))),(19))*t1.f then c else t1.c end+ -t1.d)-t1.b+t1.e,d)) then 19 else b end)-e,t1.c, -13) then e else 17 end FROM t1 WHERE t1.e*t1.b | t1.b<>17*+coalesce((select max(t1.b) from t1 where case when coalesce((select max(b) from t1 where case when (b between 17 and d) then t1.f when t1.c between b and t1.a then t1.e else t1.a end not in (11,19,13)),11) in (t1.d,17,17) then t1.f when d<>13 then 11 else t1.a end+a<17),t1.a)-17+t1.f*d-a} +} {800} +do_test randexpr-2.1164 { + db eval {SELECT +t1.c+case when t1.e not in ((e)-(case when 17<11 then 19 when (t1.d in (d,coalesce((select max(t1.a) from t1 where not b<>t1.b),case t1.b when coalesce((select max(t1.a) from t1 where exists(select 1 from t1 where e not in (t1.f,11,17))),(19))*t1.f then c else t1.c end+ -t1.d)-t1.b+t1.e,d)) then 19 else b end)-e,t1.c, -13) then e else 17 end FROM t1 WHERE NOT (t1.e*t1.b | t1.b<>17*+coalesce((select max(t1.b) from t1 where case when coalesce((select max(b) from t1 where case when (b between 17 and d) then t1.f when t1.c between b and t1.a then t1.e else t1.a end not in (11,19,13)),11) in (t1.d,17,17) then t1.f when d<>13 then 11 else t1.a end+a<17),t1.a)-17+t1.f*d-a)} +} {} +do_test randexpr-2.1165 { + db eval {SELECT 11+case when case case f when (abs(t1.a)/abs(19))+t1.e then (abs(19)/abs(b+e)) else 13 end | 11 when (select ~~+min(t1.e) from t1) then t1.c else -e end<>case t1.a when 17 then case when (c not between 19 and t1.b) and t1.b<>t1.d then coalesce((select a from t1 where t1.d in (t1.e,t1.f,f)),t1.e) else 13 end else d end or t1.cf)} +} {} +do_test randexpr-2.1166 { + db eval {SELECT 11+case when case case f when (abs(t1.a)/abs(19))+t1.e then (abs(19)/abs(b+e)) else 13 end | 11 when (select ~~+min(t1.e) from t1) then t1.c else -e end<>case t1.a when 17 then case when (c not between 19 and t1.b) and t1.b<>t1.d then coalesce((select a from t1 where t1.d in (t1.e,t1.f,f)),t1.e) else 13 end else d end or t1.cf))} +} {11} +do_test randexpr-2.1167 { + db eval {SELECT 11+case when case case f when (abs(t1.a)/abs(19))+t1.e then (abs(19)/abs(b+e)) else 13 end & 11 when (select ~~+min(t1.e) from t1) then t1.c else -e end<>case t1.a when 17 then case when (c not between 19 and t1.b) and t1.b<>t1.d then coalesce((select a from t1 where t1.d in (t1.e,t1.f,f)),t1.e) else 13 end else d end or t1.cf))} +} {11} +do_test randexpr-2.1168 { + db eval {SELECT d-case when ~c<>case when not exists(select 1 from t1 where -t1.b<>19) then coalesce((select max(t1.f) from t1 where exists(select 1 from t1 where (t1.c in (select ~~+~max(~case when 13 not between t1.d and t1.d or c between c and f then t1.f else t1.a end)*abs(count(*)+(min( -t1.e))) from t1 union select cast(avg(e) AS integer) from t1) and (abs(t1.f*f-t1.a)/abs(d))>t1.b)) and (17) not in ( -t1.f,13,17)),19) else a end then b else f end FROM t1 WHERE f=(select case ~ -~count(distinct t1.b)+count(distinct d) | -count(distinct t1.e)*count(*)* -cast(avg(b) AS integer)*(cast(avg(13) AS integer)) when -min(19) then min(t1.e) else ((cast(avg(17) AS integer))) end from t1) or 19>=(19) and not exists(select 1 from t1 where (select count(*) from t1)*t1.d not between 19+11 and d) and -t1.e not in (e,t1.a, -a) and t1.e between t1.f and (t1.b) or c in ( -11,17,t1.c) or t1.a in (a,11,t1.d)} +} {200} +do_test randexpr-2.1169 { + db eval {SELECT d-case when ~c<>case when not exists(select 1 from t1 where -t1.b<>19) then coalesce((select max(t1.f) from t1 where exists(select 1 from t1 where (t1.c in (select ~~+~max(~case when 13 not between t1.d and t1.d or c between c and f then t1.f else t1.a end)*abs(count(*)+(min( -t1.e))) from t1 union select cast(avg(e) AS integer) from t1) and (abs(t1.f*f-t1.a)/abs(d))>t1.b)) and (17) not in ( -t1.f,13,17)),19) else a end then b else f end FROM t1 WHERE NOT (f=(select case ~ -~count(distinct t1.b)+count(distinct d) | -count(distinct t1.e)*count(*)* -cast(avg(b) AS integer)*(cast(avg(13) AS integer)) when -min(19) then min(t1.e) else ((cast(avg(17) AS integer))) end from t1) or 19>=(19) and not exists(select 1 from t1 where (select count(*) from t1)*t1.d not between 19+11 and d) and -t1.e not in (e,t1.a, -a) and t1.e between t1.f and (t1.b) or c in ( -11,17,t1.c) or t1.a in (a,11,t1.d))} +} {} +do_test randexpr-2.1170 { + db eval {SELECT case when a in (select t1.b from t1 union select coalesce((select ~19 from t1 where e>13*t1.d or c in (select b from t1 union select case a*t1.f when e then (t1.c) else (t1.b) end from t1)),t1.a) from t1) and t1.d>b or not exists(select 1 from t1 where t1.e in (select count(*)*min(t1.b)*~count(*) from t1 union select case -min((19)) when -max(b) then (min(t1.a)) else (count(distinct t1.d)) end from t1)) then (t1.c) when exists(select 1 from t1 where f not between 19 and f) then 19 else t1.c end-a FROM t1 WHERE a=coalesce((select f from t1 where t1.c in (11,(abs(coalesce((select max(t1.d) from t1 where (abs(t1.c | 13)/abs(11)) not in (b+case when coalesce((select max(b) from t1 where 11 between d and t1.a),b)=t1.b))),c) and f between 19 and t1.d or c>(f)} +} {} +do_test randexpr-2.1171 { + db eval {SELECT case when a in (select t1.b from t1 union select coalesce((select ~19 from t1 where e>13*t1.d or c in (select b from t1 union select case a*t1.f when e then (t1.c) else (t1.b) end from t1)),t1.a) from t1) and t1.d>b or not exists(select 1 from t1 where t1.e in (select count(*)*min(t1.b)*~count(*) from t1 union select case -min((19)) when -max(b) then (min(t1.a)) else (count(distinct t1.d)) end from t1)) then (t1.c) when exists(select 1 from t1 where f not between 19 and f) then 19 else t1.c end-a FROM t1 WHERE NOT (a=coalesce((select f from t1 where t1.c in (11,(abs(coalesce((select max(t1.d) from t1 where (abs(t1.c | 13)/abs(11)) not in (b+case when coalesce((select max(b) from t1 where 11 between d and t1.a),b)=t1.b))),c) and f between 19 and t1.d or c>(f))} +} {200} +do_test randexpr-2.1172 { + db eval {SELECT case when -(t1.f)- -case when t1.e not in ((t1.d)+a-t1.a,t1.c, - -a) then t1.b when (not exists(select 1 from t1 where t1.d>= -t1.f)) then -19 else f end-t1.f in (select cast(avg(19) AS integer)*case count(*) when abs((++min( -11))*max(13)-count(distinct 13)) then min(t1.a) else count(distinct 17) end from t1 union select count(distinct t1.a) from t1) then t1.c when b not between t1.f and 13 then e else t1.a end FROM t1 WHERE 11 not between ~17+d and ~case when t1.b in (c*((abs(case 13 when -(select abs(count(distinct coalesce((select max(coalesce((select t1.b from t1 where not exists(select 1 from t1 where (t1.c) in (t1.a,e,13))),t1.d)) from t1 where not t1.d>c),b))) from t1) then t1.f else t1.b end- -t1.c)/abs(c))+a),t1.d,t1.a) then (t1.c) else (d) end-13 or -t1.a in (17,a,f)} +} {500} +do_test randexpr-2.1173 { + db eval {SELECT case when -(t1.f)- -case when t1.e not in ((t1.d)+a-t1.a,t1.c, - -a) then t1.b when (not exists(select 1 from t1 where t1.d>= -t1.f)) then -19 else f end-t1.f in (select cast(avg(19) AS integer)*case count(*) when abs((++min( -11))*max(13)-count(distinct 13)) then min(t1.a) else count(distinct 17) end from t1 union select count(distinct t1.a) from t1) then t1.c when b not between t1.f and 13 then e else t1.a end FROM t1 WHERE NOT (11 not between ~17+d and ~case when t1.b in (c*((abs(case 13 when -(select abs(count(distinct coalesce((select max(coalesce((select t1.b from t1 where not exists(select 1 from t1 where (t1.c) in (t1.a,e,13))),t1.d)) from t1 where not t1.d>c),b))) from t1) then t1.f else t1.b end- -t1.c)/abs(c))+a),t1.d,t1.a) then (t1.c) else (d) end-13 or -t1.a in (17,a,f))} +} {} +do_test randexpr-2.1174 { + db eval {SELECT coalesce((select 17 from t1 where not exists(select 1 from t1 where (t1.b- -b+(select +case max(b)-~count(distinct e*(select case max(c) when abs((count(*))) then max(t1.f) else max(t1.c) end+cast(avg(t1.b) AS integer) from t1)) when abs(count(distinct e*19)) then max(b) else max(t1.c) end | count(distinct t1.e) from t1)-b*+t1.b not in ((11),17,13)))),t1.e) FROM t1 WHERE t1.c in (select min(coalesce((select 17 from t1 where t1.c | 17>=(abs(t1.a)/abs(case when t1.e not in (19,t1.d,19) then a when 13<=case when not exists(select 1 from t1 where (abs(e)/abs(t1.c)) in (select 19 from t1 union select d from t1) and t1.c>=17 or 19=t1.a) then f else a end then t1.d else e end))-f),e))*min(f) | ++min(a) from t1 union select cast(avg(f) AS integer) from t1)} +} {} +do_test randexpr-2.1175 { + db eval {SELECT coalesce((select 17 from t1 where not exists(select 1 from t1 where (t1.b- -b+(select +case max(b)-~count(distinct e*(select case max(c) when abs((count(*))) then max(t1.f) else max(t1.c) end+cast(avg(t1.b) AS integer) from t1)) when abs(count(distinct e*19)) then max(b) else max(t1.c) end | count(distinct t1.e) from t1)-b*+t1.b not in ((11),17,13)))),t1.e) FROM t1 WHERE NOT (t1.c in (select min(coalesce((select 17 from t1 where t1.c | 17>=(abs(t1.a)/abs(case when t1.e not in (19,t1.d,19) then a when 13<=case when not exists(select 1 from t1 where (abs(e)/abs(t1.c)) in (select 19 from t1 union select d from t1) and t1.c>=17 or 19=t1.a) then f else a end then t1.d else e end))-f),e))*min(f) | ++min(a) from t1 union select cast(avg(f) AS integer) from t1))} +} {500} +do_test randexpr-2.1176 { + db eval {SELECT coalesce((select 17 from t1 where not exists(select 1 from t1 where (t1.b- -b+(select +case max(b)-~count(distinct e*(select case max(c) when abs((count(*))) then max(t1.f) else max(t1.c) end+cast(avg(t1.b) AS integer) from t1)) when abs(count(distinct e*19)) then max(b) else max(t1.c) end & count(distinct t1.e) from t1)-b*+t1.b not in ((11),17,13)))),t1.e) FROM t1 WHERE NOT (t1.c in (select min(coalesce((select 17 from t1 where t1.c | 17>=(abs(t1.a)/abs(case when t1.e not in (19,t1.d,19) then a when 13<=case when not exists(select 1 from t1 where (abs(e)/abs(t1.c)) in (select 19 from t1 union select d from t1) and t1.c>=17 or 19=t1.a) then f else a end then t1.d else e end))-f),e))*min(f) | ++min(a) from t1 union select cast(avg(f) AS integer) from t1))} +} {500} +do_test randexpr-2.1177 { + db eval {SELECT case coalesce((select t1.e from t1 where +coalesce((select a from t1 where 13 | b | t1.b<=11*a*case when exists(select 1 from t1 where (a-t1.d+t1.e-f<17 and -17 between 19 and a)) then c when 17 between f and t1.c then a*11 else t1.d end),t1.e) not in (b,t1.f,t1.d)),a) when c then -c else t1.b end FROM t1 WHERE (exists(select 1 from t1 where case when not exists(select 1 from t1 where (select cast(avg(d) AS integer) from t1) in (select a from t1 union select case case when t1.e>t1.d or t1.b in (select c from t1 union select e from t1) or t1.a>=f and b>=11 then t1.c-(t1.c)*19 when b not in (e,c,t1.d) then t1.b else t1.c end when 13 then f else -t1.f end from t1)) then 17 when not exists(select 1 from t1 where not exists(select 1 from t1 where f not in (t1.d,f, -t1.b)) or -19<=t1.c) or t1.e between 13 and t1.a then f else d end<>t1.f) and f between 11 and t1.c and t1.e not in (t1.d,c,t1.e)) or d not between t1.b and t1.d} +} {} +do_test randexpr-2.1178 { + db eval {SELECT case coalesce((select t1.e from t1 where +coalesce((select a from t1 where 13 | b | t1.b<=11*a*case when exists(select 1 from t1 where (a-t1.d+t1.e-f<17 and -17 between 19 and a)) then c when 17 between f and t1.c then a*11 else t1.d end),t1.e) not in (b,t1.f,t1.d)),a) when c then -c else t1.b end FROM t1 WHERE NOT ((exists(select 1 from t1 where case when not exists(select 1 from t1 where (select cast(avg(d) AS integer) from t1) in (select a from t1 union select case case when t1.e>t1.d or t1.b in (select c from t1 union select e from t1) or t1.a>=f and b>=11 then t1.c-(t1.c)*19 when b not in (e,c,t1.d) then t1.b else t1.c end when 13 then f else -t1.f end from t1)) then 17 when not exists(select 1 from t1 where not exists(select 1 from t1 where f not in (t1.d,f, -t1.b)) or -19<=t1.c) or t1.e between 13 and t1.a then f else d end<>t1.f) and f between 11 and t1.c and t1.e not in (t1.d,c,t1.e)) or d not between t1.b and t1.d)} +} {200} +do_test randexpr-2.1179 { + db eval {SELECT case coalesce((select t1.e from t1 where +coalesce((select a from t1 where 13 & b & t1.b<=11*a*case when exists(select 1 from t1 where (a-t1.d+t1.e-f<17 and -17 between 19 and a)) then c when 17 between f and t1.c then a*11 else t1.d end),t1.e) not in (b,t1.f,t1.d)),a) when c then -c else t1.b end FROM t1 WHERE NOT ((exists(select 1 from t1 where case when not exists(select 1 from t1 where (select cast(avg(d) AS integer) from t1) in (select a from t1 union select case case when t1.e>t1.d or t1.b in (select c from t1 union select e from t1) or t1.a>=f and b>=11 then t1.c-(t1.c)*19 when b not in (e,c,t1.d) then t1.b else t1.c end when 13 then f else -t1.f end from t1)) then 17 when not exists(select 1 from t1 where not exists(select 1 from t1 where f not in (t1.d,f, -t1.b)) or -19<=t1.c) or t1.e between 13 and t1.a then f else d end<>t1.f) and f between 11 and t1.c and t1.e not in (t1.d,c,t1.e)) or d not between t1.b and t1.d)} +} {200} +do_test randexpr-2.1180 { + db eval {SELECT case when exists(select 1 from t1 where b<>coalesce((select (select + - -count(*)-~count(*)+~+case - -count(distinct (t1.d)) when max(t1.d) then count(*) else max(a) end+(cast(avg(17) AS integer)) from t1) | d from t1 where not exists(select 1 from t1 where (abs(f)/abs(t1.c)) between 13+a and -t1.d)),19) or (coalesce((select t1.d from t1 where (( - -19)>a)),t1.f) in (e,e,t1.e))) then t1.b*t1.f when t1.d not between (a) and a then e else b end FROM t1 WHERE not exists(select 1 from t1 where a in (select 13 from t1 union select 17 from t1))} +} {120000} +do_test randexpr-2.1181 { + db eval {SELECT case when exists(select 1 from t1 where b<>coalesce((select (select + - -count(*)-~count(*)+~+case - -count(distinct (t1.d)) when max(t1.d) then count(*) else max(a) end+(cast(avg(17) AS integer)) from t1) | d from t1 where not exists(select 1 from t1 where (abs(f)/abs(t1.c)) between 13+a and -t1.d)),19) or (coalesce((select t1.d from t1 where (( - -19)>a)),t1.f) in (e,e,t1.e))) then t1.b*t1.f when t1.d not between (a) and a then e else b end FROM t1 WHERE NOT (not exists(select 1 from t1 where a in (select 13 from t1 union select 17 from t1)))} +} {} +do_test randexpr-2.1182 { + db eval {SELECT case when exists(select 1 from t1 where b<>coalesce((select (select + - -count(*)-~count(*)+~+case - -count(distinct (t1.d)) when max(t1.d) then count(*) else max(a) end+(cast(avg(17) AS integer)) from t1) & d from t1 where not exists(select 1 from t1 where (abs(f)/abs(t1.c)) between 13+a and -t1.d)),19) or (coalesce((select t1.d from t1 where (( - -19)>a)),t1.f) in (e,e,t1.e))) then t1.b*t1.f when t1.d not between (a) and a then e else b end FROM t1 WHERE not exists(select 1 from t1 where a in (select 13 from t1 union select 17 from t1))} +} {120000} +do_test randexpr-2.1183 { + db eval {SELECT coalesce((select case when f>17-t1.f then (select +max(coalesce((select +11 from t1 where coalesce((select max(d) from t1 where +t1.c between (f) and c),t1.f) not in (c, -f,t1.e)),d)) from t1) else 17+19 end from t1 where not not exists(select 1 from t1 where t1.a in (select a from t1 union select t1.a from t1) and 17=d or (17)<=(( -19)) and t1.f in (select d from t1 union select 13 from t1) or 19>13 or t1.b not in (t1.b,17,d))), -(t1.c)) FROM t1 WHERE +t1.a-13+(select count(distinct a) from t1)-e*e*case when coalesce((select max(coalesce((select a from t1 where ((case b+t1.c-t1.c*d*17 when t1.a then t1.a else f end<>c))),t1.b+13)) from t1 where t1.f between 17 and t1.d),t1.c)<=(b) then f else t1.e end-13<>t1.f} +} {11} +do_test randexpr-2.1184 { + db eval {SELECT coalesce((select case when f>17-t1.f then (select +max(coalesce((select +11 from t1 where coalesce((select max(d) from t1 where +t1.c between (f) and c),t1.f) not in (c, -f,t1.e)),d)) from t1) else 17+19 end from t1 where not not exists(select 1 from t1 where t1.a in (select a from t1 union select t1.a from t1) and 17=d or (17)<=(( -19)) and t1.f in (select d from t1 union select 13 from t1) or 19>13 or t1.b not in (t1.b,17,d))), -(t1.c)) FROM t1 WHERE NOT (+t1.a-13+(select count(distinct a) from t1)-e*e*case when coalesce((select max(coalesce((select a from t1 where ((case b+t1.c-t1.c*d*17 when t1.a then t1.a else f end<>c))),t1.b+13)) from t1 where t1.f between 17 and t1.d),t1.c)<=(b) then f else t1.e end-13<>t1.f)} +} {} +do_test randexpr-2.1185 { + db eval {SELECT coalesce((select 13 from t1 where ~(select max(b) from t1)<= -+d*f+t1.b*f or coalesce((select max(case when d=(abs(19)/abs(t1.f)) then case b+d when (abs(coalesce((select 11 from t1 where t1.f between -d and c),t1.f))/abs(t1.a)) then b else t1.d end when a=t1.e),17)+ -c FROM t1 WHERE t1.b-+(t1.b) in (select a from t1 union select 19 from t1)} +} {} +do_test randexpr-2.1186 { + db eval {SELECT coalesce((select 13 from t1 where ~(select max(b) from t1)<= -+d*f+t1.b*f or coalesce((select max(case when d=(abs(19)/abs(t1.f)) then case b+d when (abs(coalesce((select 11 from t1 where t1.f between -d and c),t1.f))/abs(t1.a)) then b else t1.d end when a=t1.e),17)+ -c FROM t1 WHERE NOT (t1.b-+(t1.b) in (select a from t1 union select 19 from t1))} +} {-283} +do_test randexpr-2.1187 { + db eval {SELECT case when (abs((abs(d)/abs(t1.f-c)))/abs( -t1.f))*case when (t1.c not in (case when t1.b>case t1.d when t1.b then e else 11 end then t1.a when ( -t1.d in (select -count(*)-cast(avg(f) AS integer) from t1 union select count(*) from t1)) or 11 in (e,t1.e,c) then a else b end,t1.a,11) or t1.a in (17,t1.a,t1.d) and b>(t1.a)) then 11 when ct1.b then t1.c else 17 end FROM t1 WHERE t1.b>f} +} {} +do_test randexpr-2.1188 { + db eval {SELECT case when (abs((abs(d)/abs(t1.f-c)))/abs( -t1.f))*case when (t1.c not in (case when t1.b>case t1.d when t1.b then e else 11 end then t1.a when ( -t1.d in (select -count(*)-cast(avg(f) AS integer) from t1 union select count(*) from t1)) or 11 in (e,t1.e,c) then a else b end,t1.a,11) or t1.a in (17,t1.a,t1.d) and b>(t1.a)) then 11 when ct1.b then t1.c else 17 end FROM t1 WHERE NOT (t1.b>f)} +} {17} +do_test randexpr-2.1189 { + db eval {SELECT t1.e-case when case when ((exists(select 1 from t1 where not exists(select 1 from t1 where case when not 13<=13 or (t1.c<>t1.e) then t1.d else +13 end+13 in (select max(13) | max((t1.b)) from t1 union select max(f) from t1))))) then t1.f else ~t1.e | t1.d*t1.b end+11 not between t1.c and 13 then b when (t1.f not in (19,t1.f,t1.d)) then c else -b end FROM t1 WHERE ( -(select -min(13-t1.b) from t1)-coalesce((select case when -d>=t1.e or 11 between 17 and -t1.e then 19+f when t1.f=11 then -t1.c else t1.b end from t1 where t1.a not in (11,t1.e,t1.a)),t1.c)*11 in (select -case abs((abs(min(t1.d)))) when max(19) then cast(avg(f) AS integer) else case count(*) when count(*) then (count(distinct t1.f)) else cast(avg(11) AS integer) end end from t1 union select min(d) from t1)) or b in (select (count(*)) from t1 union select max(t1.a) from t1)} +} {} +do_test randexpr-2.1190 { + db eval {SELECT t1.e-case when case when ((exists(select 1 from t1 where not exists(select 1 from t1 where case when not 13<=13 or (t1.c<>t1.e) then t1.d else +13 end+13 in (select max(13) | max((t1.b)) from t1 union select max(f) from t1))))) then t1.f else ~t1.e | t1.d*t1.b end+11 not between t1.c and 13 then b when (t1.f not in (19,t1.f,t1.d)) then c else -b end FROM t1 WHERE NOT (( -(select -min(13-t1.b) from t1)-coalesce((select case when -d>=t1.e or 11 between 17 and -t1.e then 19+f when t1.f=11 then -t1.c else t1.b end from t1 where t1.a not in (11,t1.e,t1.a)),t1.c)*11 in (select -case abs((abs(min(t1.d)))) when max(19) then cast(avg(f) AS integer) else case count(*) when count(*) then (count(distinct t1.f)) else cast(avg(11) AS integer) end end from t1 union select min(d) from t1)) or b in (select (count(*)) from t1 union select max(t1.a) from t1))} +} {300} +do_test randexpr-2.1191 { + db eval {SELECT t1.e-case when case when ((exists(select 1 from t1 where not exists(select 1 from t1 where case when not 13<=13 or (t1.c<>t1.e) then t1.d else +13 end+13 in (select max(13) & max((t1.b)) from t1 union select max(f) from t1))))) then t1.f else ~t1.e & t1.d*t1.b end+11 not between t1.c and 13 then b when (t1.f not in (19,t1.f,t1.d)) then c else -b end FROM t1 WHERE NOT (( -(select -min(13-t1.b) from t1)-coalesce((select case when -d>=t1.e or 11 between 17 and -t1.e then 19+f when t1.f=11 then -t1.c else t1.b end from t1 where t1.a not in (11,t1.e,t1.a)),t1.c)*11 in (select -case abs((abs(min(t1.d)))) when max(19) then cast(avg(f) AS integer) else case count(*) when count(*) then (count(distinct t1.f)) else cast(avg(11) AS integer) end end from t1 union select min(d) from t1)) or b in (select (count(*)) from t1 union select max(t1.a) from t1))} +} {300} +do_test randexpr-2.1192 { + db eval {SELECT (coalesce((select c*(select +~count(distinct 19)-cast(avg(~ -c) AS integer)-max( -f)-cast(avg(t1.d) AS integer) from t1)+t1.a*t1.f+t1.e-a*b from t1 where not t1.ed and 11<=t1.e and (not exists(select 1 from t1 where exists(select 1 from t1 where (13=a)) or t1.e>t1.b))),19)) FROM t1 WHERE coalesce((select max(t1.f*11) from t1 where not exists(select 1 from t1 where t1.a<>17) and t1.b>case (abs(b* -t1.a)/abs(11)) when b*case case when f>=t1.e then 11 when t1.b<=d then 17 else a end* -a when e then 11 else c end then (t1.b) else 19 end or t1.f<13), -b)>=e and ((d in (select f from t1 union select f from t1)))} +} {} +do_test randexpr-2.1193 { + db eval {SELECT (coalesce((select c*(select +~count(distinct 19)-cast(avg(~ -c) AS integer)-max( -f)-cast(avg(t1.d) AS integer) from t1)+t1.a*t1.f+t1.e-a*b from t1 where not t1.ed and 11<=t1.e and (not exists(select 1 from t1 where exists(select 1 from t1 where (13=a)) or t1.e>t1.b))),19)) FROM t1 WHERE NOT (coalesce((select max(t1.f*11) from t1 where not exists(select 1 from t1 where t1.a<>17) and t1.b>case (abs(b* -t1.a)/abs(11)) when b*case case when f>=t1.e then 11 when t1.b<=d then 17 else a end* -a when e then 11 else c end then (t1.b) else 19 end or t1.f<13), -b)>=e and ((d in (select f from t1 union select f from t1))))} +} {10200} +do_test randexpr-2.1194 { + db eval {SELECT b-b+case when d+e between 17 and case when exists(select 1 from t1 where coalesce((select max(coalesce((select 11 from t1 where t1.e not between 13 and case when t1.e<>c then c | 19+t1.a else 17 end),t1.a)) from t1 where t1.d<=17 or a not in (b,11,a)),(t1.b))+b>=t1.a) then 11 else a end then d else a end+11*f FROM t1 WHERE (t1.b*19<=17)} +} {} +do_test randexpr-2.1195 { + db eval {SELECT b-b+case when d+e between 17 and case when exists(select 1 from t1 where coalesce((select max(coalesce((select 11 from t1 where t1.e not between 13 and case when t1.e<>c then c | 19+t1.a else 17 end),t1.a)) from t1 where t1.d<=17 or a not in (b,11,a)),(t1.b))+b>=t1.a) then 11 else a end then d else a end+11*f FROM t1 WHERE NOT ((t1.b*19<=17))} +} {6700} +do_test randexpr-2.1196 { + db eval {SELECT b-b+case when d+e between 17 and case when exists(select 1 from t1 where coalesce((select max(coalesce((select 11 from t1 where t1.e not between 13 and case when t1.e<>c then c & 19+t1.a else 17 end),t1.a)) from t1 where t1.d<=17 or a not in (b,11,a)),(t1.b))+b>=t1.a) then 11 else a end then d else a end+11*f FROM t1 WHERE NOT ((t1.b*19<=17))} +} {6700} +do_test randexpr-2.1197 { + db eval {SELECT +(abs(13)/abs(+~~case t1.e | +11-b-coalesce((select max(c) from t1 where 19 in (select cast(avg(coalesce((select max(case when t1.ae*c then 11 else 13 end from t1 where -t1.c not between t1.f and 11),t1.e)) AS integer) when case max( -d) when ((min(17))) then cast(avg(t1.a) AS integer) else min(e) end+cast(avg((t1.b)) AS integer) then cast(avg(t1.d) AS integer) else -(max(c)) end from t1)-f | b*13+e when t1.b then (f) else t1.a end in (select d from t1 union select 17 from t1)),d) FROM t1 WHERE t1.e*c>13-+t1.f | (abs((select abs(min(b)-abs( - -count(distinct 17-d-13+ -e-(t1.d)*e)+max(t1.e)) | (~abs((max(c)))+cast(avg(17) AS integer)-count(*)*cast(avg(11) AS integer))) from t1))/abs(t1.f))} +} {400} +do_test randexpr-2.1201 { + db eval {SELECT coalesce((select t1.f from t1 where case d-(select case cast(avg(coalesce((select case when not 11<>e*c then 11 else 13 end from t1 where -t1.c not between t1.f and 11),t1.e)) AS integer) when case max( -d) when ((min(17))) then cast(avg(t1.a) AS integer) else min(e) end+cast(avg((t1.b)) AS integer) then cast(avg(t1.d) AS integer) else -(max(c)) end from t1)-f | b*13+e when t1.b then (f) else t1.a end in (select d from t1 union select 17 from t1)),d) FROM t1 WHERE NOT (t1.e*c>13-+t1.f | (abs((select abs(min(b)-abs( - -count(distinct 17-d-13+ -e-(t1.d)*e)+max(t1.e)) | (~abs((max(c)))+cast(avg(17) AS integer)-count(*)*cast(avg(11) AS integer))) from t1))/abs(t1.f)))} +} {} +do_test randexpr-2.1202 { + db eval {SELECT coalesce((select t1.f from t1 where case d-(select case cast(avg(coalesce((select case when not 11<>e*c then 11 else 13 end from t1 where -t1.c not between t1.f and 11),t1.e)) AS integer) when case max( -d) when ((min(17))) then cast(avg(t1.a) AS integer) else min(e) end+cast(avg((t1.b)) AS integer) then cast(avg(t1.d) AS integer) else -(max(c)) end from t1)-f & b*13+e when t1.b then (f) else t1.a end in (select d from t1 union select 17 from t1)),d) FROM t1 WHERE t1.e*c>13-+t1.f | (abs((select abs(min(b)-abs( - -count(distinct 17-d-13+ -e-(t1.d)*e)+max(t1.e)) | (~abs((max(c)))+cast(avg(17) AS integer)-count(*)*cast(avg(11) AS integer))) from t1))/abs(t1.f))} +} {400} +do_test randexpr-2.1203 { + db eval {SELECT t1.b-~case when ~(abs(17)/abs(t1.a))+17+t1.b in (select count(distinct d) | count(*) from t1 union select case count(*) | count(distinct e) when count(*) then -count(*) else cast(avg(11) AS integer) end from t1) and t1.e>b or e not between (e) and c and 17 in (t1.c,b,b) and t1.a not between a and (t1.f) then coalesce((select t1.a*t1.c from t1 where -11 in (t1.a,(t1.f), -f)),13)*t1.c else t1.f end FROM t1 WHERE (t1.e<>t1.a+~~c-t1.a+13*e-17+t1.b)} +} {801} +do_test randexpr-2.1204 { + db eval {SELECT t1.b-~case when ~(abs(17)/abs(t1.a))+17+t1.b in (select count(distinct d) | count(*) from t1 union select case count(*) | count(distinct e) when count(*) then -count(*) else cast(avg(11) AS integer) end from t1) and t1.e>b or e not between (e) and c and 17 in (t1.c,b,b) and t1.a not between a and (t1.f) then coalesce((select t1.a*t1.c from t1 where -11 in (t1.a,(t1.f), -f)),13)*t1.c else t1.f end FROM t1 WHERE NOT ((t1.e<>t1.a+~~c-t1.a+13*e-17+t1.b))} +} {} +do_test randexpr-2.1205 { + db eval {SELECT t1.b-~case when ~(abs(17)/abs(t1.a))+17+t1.b in (select count(distinct d) & count(*) from t1 union select case count(*) & count(distinct e) when count(*) then -count(*) else cast(avg(11) AS integer) end from t1) and t1.e>b or e not between (e) and c and 17 in (t1.c,b,b) and t1.a not between a and (t1.f) then coalesce((select t1.a*t1.c from t1 where -11 in (t1.a,(t1.f), -f)),13)*t1.c else t1.f end FROM t1 WHERE (t1.e<>t1.a+~~c-t1.a+13*e-17+t1.b)} +} {801} +do_test randexpr-2.1206 { + db eval {SELECT ((abs(b)/abs(t1.e+coalesce((select max((abs(coalesce((select max(11) from t1 where t1.a<=coalesce((select case when t1.a in (case t1.b when t1.d then t1.d else a end,17,11) then 13 when t1.a>b then 19 else d end from t1 where c in (select (~case count(*) when min(f) then -min(d) else count(distinct (t1.f)) end)*(count(*)) from t1 union select -min((t1.c)) from t1)),11) and (b<=t1.c)),t1.f))/abs(e))-13) from t1 where b not in (d,( -e),e)),c)-t1.c))) FROM t1 WHERE (exists(select 1 from t1 where b in (select +( -abs(max(19+((select case -+cast(avg(e) AS integer) when -max(19)-min(t1.c) | count(*) then count(distinct t1.e) else min(f) end-cast(avg( -19) AS integer)*count(*) | count(*) from t1))-17)) | +cast(avg((select cast(avg(a) AS integer) from t1)*f) AS integer)-cast(avg(13-t1.a+b) AS integer))+ -max(t1.a) from t1 union select cast(avg( -f) AS integer) from t1)))} +} {} +do_test randexpr-2.1207 { + db eval {SELECT ((abs(b)/abs(t1.e+coalesce((select max((abs(coalesce((select max(11) from t1 where t1.a<=coalesce((select case when t1.a in (case t1.b when t1.d then t1.d else a end,17,11) then 13 when t1.a>b then 19 else d end from t1 where c in (select (~case count(*) when min(f) then -min(d) else count(distinct (t1.f)) end)*(count(*)) from t1 union select -min((t1.c)) from t1)),11) and (b<=t1.c)),t1.f))/abs(e))-13) from t1 where b not in (d,( -e),e)),c)-t1.c))) FROM t1 WHERE NOT ((exists(select 1 from t1 where b in (select +( -abs(max(19+((select case -+cast(avg(e) AS integer) when -max(19)-min(t1.c) | count(*) then count(distinct t1.e) else min(f) end-cast(avg( -19) AS integer)*count(*) | count(*) from t1))-17)) | +cast(avg((select cast(avg(a) AS integer) from t1)*f) AS integer)-cast(avg(13-t1.a+b) AS integer))+ -max(t1.a) from t1 union select cast(avg( -f) AS integer) from t1))))} +} {1} +do_test randexpr-2.1208 { + db eval {SELECT t1.f-17+(abs(case when ~e*coalesce((select max(coalesce((select d from t1 where not -(19*t1.d*17) not in (t1.a*t1.c,b,b)),f)) from t1 where 13 between t1.d and 19),11* -t1.e)-(t1.a)>d then 17 when (e in (select -(t1.d) from t1 union select -(t1.b) from t1)) then e else -t1.d end)/abs(17))*a FROM t1 WHERE 17 in (select 19 from t1 union select -17 | d from t1)} +} {} +do_test randexpr-2.1209 { + db eval {SELECT t1.f-17+(abs(case when ~e*coalesce((select max(coalesce((select d from t1 where not -(19*t1.d*17) not in (t1.a*t1.c,b,b)),f)) from t1 where 13 between t1.d and 19),11* -t1.e)-(t1.a)>d then 17 when (e in (select -(t1.d) from t1 union select -(t1.b) from t1)) then e else -t1.d end)/abs(17))*a FROM t1 WHERE NOT (17 in (select 19 from t1 union select -17 | d from t1))} +} {683} +do_test randexpr-2.1210 { + db eval {SELECT case t1.a when case when 13+( -f-b)*c-t1.d-t1.c | c+d>=d*case when not ++c+t1.d<>+b- -a*t1.e then -f else 17 end+t1.f then t1.e when 11<>19 then c else 11 end then e else f end FROM t1 WHERE (case when (abs(t1.f+case f-case when exists(select 1 from t1 where t1.c>13) then t1.e when t1.f>= -f then b else e end when 13 then t1.e else t1.c end)/abs(c)) in (select max(f)*max(f)*( -max( -17)) | min(t1.a) | cast(avg(13) AS integer)+count(*) from t1 union select ( -max(e)) from t1) then ( -d) when c not between f and ( -c) then 13 else b end between t1.a and e) and 19 not between 19 and c} +} {} +do_test randexpr-2.1211 { + db eval {SELECT case t1.a when case when 13+( -f-b)*c-t1.d-t1.c | c+d>=d*case when not ++c+t1.d<>+b- -a*t1.e then -f else 17 end+t1.f then t1.e when 11<>19 then c else 11 end then e else f end FROM t1 WHERE NOT ((case when (abs(t1.f+case f-case when exists(select 1 from t1 where t1.c>13) then t1.e when t1.f>= -f then b else e end when 13 then t1.e else t1.c end)/abs(c)) in (select max(f)*max(f)*( -max( -17)) | min(t1.a) | cast(avg(13) AS integer)+count(*) from t1 union select ( -max(e)) from t1) then ( -d) when c not between f and ( -c) then 13 else b end between t1.a and e) and 19 not between 19 and c)} +} {600} +do_test randexpr-2.1212 { + db eval {SELECT case t1.a when case when 13+( -f-b)*c-t1.d-t1.c & c+d>=d*case when not ++c+t1.d<>+b- -a*t1.e then -f else 17 end+t1.f then t1.e when 11<>19 then c else 11 end then e else f end FROM t1 WHERE NOT ((case when (abs(t1.f+case f-case when exists(select 1 from t1 where t1.c>13) then t1.e when t1.f>= -f then b else e end when 13 then t1.e else t1.c end)/abs(c)) in (select max(f)*max(f)*( -max( -17)) | min(t1.a) | cast(avg(13) AS integer)+count(*) from t1 union select ( -max(e)) from t1) then ( -d) when c not between f and ( -c) then 13 else b end between t1.a and e) and 19 not between 19 and c)} +} {600} +do_test randexpr-2.1213 { + db eval {SELECT case when 19=t1.e or (abs(t1.f)/abs(((select -count(distinct f+~t1.e-t1.b-coalesce((select max(b*c) from t1 where 17<>b),b))+~cast(avg(d) AS integer) from t1)))) not between f and d then coalesce((select max( -a) from t1 where 13<=11 or t1.d>=f),t1.d) when 17 in (select 11 from t1 union select t1.a from t1) then t1.d else t1.c end | 19 FROM t1 WHERE (case t1.d when ~a then coalesce((select max((abs( -~+b)/abs(t1.c))) from t1 where case t1.e when (abs((abs(case t1.a++t1.d | 19-coalesce((select e from t1 where b not between t1.f+b+c and t1.b), -t1.f) when t1.e then t1.a else f end)/abs(t1.b)))/abs(t1.e)) then t1.d else t1.d end>t1.f),t1.d)*t1.b else t1.f end not between a and e)} +} {403} +do_test randexpr-2.1214 { + db eval {SELECT case when 19=t1.e or (abs(t1.f)/abs(((select -count(distinct f+~t1.e-t1.b-coalesce((select max(b*c) from t1 where 17<>b),b))+~cast(avg(d) AS integer) from t1)))) not between f and d then coalesce((select max( -a) from t1 where 13<=11 or t1.d>=f),t1.d) when 17 in (select 11 from t1 union select t1.a from t1) then t1.d else t1.c end | 19 FROM t1 WHERE NOT ((case t1.d when ~a then coalesce((select max((abs( -~+b)/abs(t1.c))) from t1 where case t1.e when (abs((abs(case t1.a++t1.d | 19-coalesce((select e from t1 where b not between t1.f+b+c and t1.b), -t1.f) when t1.e then t1.a else f end)/abs(t1.b)))/abs(t1.e)) then t1.d else t1.d end>t1.f),t1.d)*t1.b else t1.f end not between a and e))} +} {} +do_test randexpr-2.1215 { + db eval {SELECT case when 19=t1.e or (abs(t1.f)/abs(((select -count(distinct f+~t1.e-t1.b-coalesce((select max(b*c) from t1 where 17<>b),b))+~cast(avg(d) AS integer) from t1)))) not between f and d then coalesce((select max( -a) from t1 where 13<=11 or t1.d>=f),t1.d) when 17 in (select 11 from t1 union select t1.a from t1) then t1.d else t1.c end & 19 FROM t1 WHERE (case t1.d when ~a then coalesce((select max((abs( -~+b)/abs(t1.c))) from t1 where case t1.e when (abs((abs(case t1.a++t1.d | 19-coalesce((select e from t1 where b not between t1.f+b+c and t1.b), -t1.f) when t1.e then t1.a else f end)/abs(t1.b)))/abs(t1.e)) then t1.d else t1.d end>t1.f),t1.d)*t1.b else t1.f end not between a and e)} +} {16} +do_test randexpr-2.1216 { + db eval {SELECT coalesce((select t1.c from t1 where -a in (select e from t1 union select 13 from t1)),(select count(distinct coalesce((select max((17)) from t1 where c not between case 19-t1.b when a then ~+(abs((select count(distinct (t1.a)) from t1)+(t1.b))/abs((t1.a)*a+13 | t1.d | t1.a)) else a end and 13 or d in (a,a,t1.f)),d)) from t1) | t1.d) FROM t1 WHERE et1.a then (select +abs(count(distinct t1.f)*max(13)) from t1) else +d end>=b+t1.c and b not in (f,13,b) then t1.d when b>b then d else t1.d end*11 FROM t1 WHERE d<>b} +} {4400} +do_test randexpr-2.1220 { + db eval {SELECT case when case when t1.b<=f+coalesce((select max(11) from t1 where not e in (select count(distinct f) from t1 union select count(*) from t1)),t1.d) or 19 between (e) and 19 and f not in (t1.d,(19),t1.e) and exists(select 1 from t1 where t1.e<=19) or t1.e>t1.a then (select +abs(count(distinct t1.f)*max(13)) from t1) else +d end>=b+t1.c and b not in (f,13,b) then t1.d when b>b then d else t1.d end*11 FROM t1 WHERE NOT (d<>b)} +} {} +do_test randexpr-2.1221 { + db eval {SELECT ~(abs(case when t1.a not between -t1.d*(b) and 17 then f when e<=t1.f then b*t1.d else 13*(a) end)/abs((coalesce((select d from t1 where +(abs((select (min(a)) from t1))/abs((select count(distinct a) from t1)))- -t1.a in (select -case max(e)+count(distinct d) when cast(avg(( -t1.e)) AS integer) then ( -(count(*))) else -min(d) end+max(t1.f) from t1 union select max(t1.a) from t1)),t1.d)))) FROM t1 WHERE (t1.a=13 then 11 else a end then -17 else a end and (t1.c) and (b=17)) then (+(t1.c)) when t1.a<=t1.f then t1.d else 11 end FROM t1 WHERE c<>~13} +} {1000} +do_test randexpr-2.1224 { + db eval {SELECT f+case when (~t1.c between case when +d between (select -abs(count(distinct t1.a+17+t1.f))+case min(19)*count(distinct -t1.a) when (count(*)) then ((count(*))) else ((max(t1.e))) end from t1) and case when -t1.b>=13 then 11 else a end then -17 else a end and (t1.c) and (b=17)) then (+(t1.c)) when t1.a<=t1.f then t1.d else 11 end FROM t1 WHERE NOT (c<>~13)} +} {} +do_test randexpr-2.1225 { + db eval {SELECT case when ~(abs((select count(*) from t1))/abs(c))<>case when 17*case when coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.f=t1.e)),t1.a) in (select min(t1.f) from t1 union select -min(t1.d) from t1) then 11 when 11<=(a) then f else -e end | -t1.b< -c then d else f end then ((t1.a)) when t1.f in (select ~(++(count(*))+cast(avg(t1.f) AS integer))+max(( -t1.d)) from t1 union select cast(avg(e) AS integer) from t1) then 11 else d end FROM t1 WHERE t1.a between case when t1.d=t1.a*11 | (select (cast(avg(t1.a) AS integer)+abs( -count(distinct e)) | ~count(distinct coalesce((select max(b) from t1 where not exists(select 1 from t1 where -e>=13)),(t1.e)))- -( -max(d))* -count(*)) from t1)+(abs(t1.f)/abs((select cast(avg(11) AS integer) from t1))) then t1.a else coalesce((select max(c) from t1 where 11>=t1.e or 11 in (t1.b,t1.b,t1.d) or t1.a>=t1.a),13)*a end and t1.c} +} {} +do_test randexpr-2.1226 { + db eval {SELECT case when ~(abs((select count(*) from t1))/abs(c))<>case when 17*case when coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.f=t1.e)),t1.a) in (select min(t1.f) from t1 union select -min(t1.d) from t1) then 11 when 11<=(a) then f else -e end | -t1.b< -c then d else f end then ((t1.a)) when t1.f in (select ~(++(count(*))+cast(avg(t1.f) AS integer))+max(( -t1.d)) from t1 union select cast(avg(e) AS integer) from t1) then 11 else d end FROM t1 WHERE NOT (t1.a between case when t1.d=t1.a*11 | (select (cast(avg(t1.a) AS integer)+abs( -count(distinct e)) | ~count(distinct coalesce((select max(b) from t1 where not exists(select 1 from t1 where -e>=13)),(t1.e)))- -( -max(d))* -count(*)) from t1)+(abs(t1.f)/abs((select cast(avg(11) AS integer) from t1))) then t1.a else coalesce((select max(c) from t1 where 11>=t1.e or 11 in (t1.b,t1.b,t1.d) or t1.a>=t1.a),13)*a end and t1.c)} +} {100} +do_test randexpr-2.1227 { + db eval {SELECT case when ~(abs((select count(*) from t1))/abs(c))<>case when 17*case when coalesce((select max(f) from t1 where exists(select 1 from t1 where t1.f=t1.e)),t1.a) in (select min(t1.f) from t1 union select -min(t1.d) from t1) then 11 when 11<=(a) then f else -e end & -t1.b< -c then d else f end then ((t1.a)) when t1.f in (select ~(++(count(*))+cast(avg(t1.f) AS integer))+max(( -t1.d)) from t1 union select cast(avg(e) AS integer) from t1) then 11 else d end FROM t1 WHERE NOT (t1.a between case when t1.d=t1.a*11 | (select (cast(avg(t1.a) AS integer)+abs( -count(distinct e)) | ~count(distinct coalesce((select max(b) from t1 where not exists(select 1 from t1 where -e>=13)),(t1.e)))- -( -max(d))* -count(*)) from t1)+(abs(t1.f)/abs((select cast(avg(11) AS integer) from t1))) then t1.a else coalesce((select max(c) from t1 where 11>=t1.e or 11 in (t1.b,t1.b,t1.d) or t1.a>=t1.a),13)*a end and t1.c)} +} {100} +do_test randexpr-2.1228 { + db eval {SELECT (coalesce((select 19 from t1 where - -t1.d | t1.c+~(select max(11) | -~count(*)+count(*)-cast(avg(case when t1.e in (select a from t1 union select f from t1) then c else 11 end) AS integer) from t1)*coalesce((select t1.b from t1 where ((abs(t1.b)/abs(coalesce((select (b)-f from t1 where t1.f not in ( -e, -t1.b, -t1.d) and t1.d>=a or 19 not in (a,13,t1.f)),b))))=a or 19 not in (a,13,t1.f)),b))))=a or 19 not in (a,13,t1.f)),b))))t1.d then (17) else e end+t1.d-19 FROM t1 WHERE not -t1.e+t1.c not between case when -case case when exists(select 1 from t1 where 17 in (select min(19) from t1 union select -min(19) from t1) and 13 not in (17,a,t1.e)) then case when f>=19 then (c) when t1.f>13 then 19 else 19 end | t1.a else e end when b then t1.c else b end in (select -max(13) from t1 union select count(distinct 17) from t1) and 11 not in (11,t1.a,f) and t1.c=f then ~(abs(t1.e)/abs(t1.d)) when f<13 then 17 else 19 end*b and t1.e} +} {} +do_test randexpr-2.1234 { + db eval {SELECT ~case when exists(select 1 from t1 where t1.c in (t1.a,e+t1.b+e-19 | coalesce((select max(t1.f | t1.a+11*a) from t1 where (abs((abs(f)/abs(d)))/abs(coalesce((select max(t1.a) from t1 where (13 between (f) and f and t1.e between e and t1.b)),t1.c)))<=(f)),e)+f+13, -t1.c)) then t1.b when t1.d<>t1.d then (17) else e end+t1.d-19 FROM t1 WHERE NOT (not -t1.e+t1.c not between case when -case case when exists(select 1 from t1 where 17 in (select min(19) from t1 union select -min(19) from t1) and 13 not in (17,a,t1.e)) then case when f>=19 then (c) when t1.f>13 then 19 else 19 end | t1.a else e end when b then t1.c else b end in (select -max(13) from t1 union select count(distinct 17) from t1) and 11 not in (11,t1.a,f) and t1.c=f then ~(abs(t1.e)/abs(t1.d)) when f<13 then 17 else 19 end*b and t1.e)} +} {-120} +do_test randexpr-2.1235 { + db eval {SELECT ~case when exists(select 1 from t1 where t1.c in (t1.a,e+t1.b+e-19 & coalesce((select max(t1.f & t1.a+11*a) from t1 where (abs((abs(f)/abs(d)))/abs(coalesce((select max(t1.a) from t1 where (13 between (f) and f and t1.e between e and t1.b)),t1.c)))<=(f)),e)+f+13, -t1.c)) then t1.b when t1.d<>t1.d then (17) else e end+t1.d-19 FROM t1 WHERE NOT (not -t1.e+t1.c not between case when -case case when exists(select 1 from t1 where 17 in (select min(19) from t1 union select -min(19) from t1) and 13 not in (17,a,t1.e)) then case when f>=19 then (c) when t1.f>13 then 19 else 19 end | t1.a else e end when b then t1.c else b end in (select -max(13) from t1 union select count(distinct 17) from t1) and 11 not in (11,t1.a,f) and t1.c=f then ~(abs(t1.e)/abs(t1.d)) when f<13 then 17 else 19 end*b and t1.e)} +} {-120} +do_test randexpr-2.1236 { + db eval {SELECT ((select min(e+case when b not between coalesce((select t1.d | b from t1 where not exists(select 1 from t1 where 17>11)), -c) and (t1.a) or ed then a when 19>11 then -13 else t1.b end+13)+count(distinct d)+count(*)* -count(distinct t1.f)-((count(distinct t1.c)))+min(a) | max(c) from t1)*t1.e*19*11) FROM t1 WHERE b<>(select count(distinct f) from t1)} +} {104918000} +do_test randexpr-2.1237 { + db eval {SELECT ((select min(e+case when b not between coalesce((select t1.d | b from t1 where not exists(select 1 from t1 where 17>11)), -c) and (t1.a) or ed then a when 19>11 then -13 else t1.b end+13)+count(distinct d)+count(*)* -count(distinct t1.f)-((count(distinct t1.c)))+min(a) | max(c) from t1)*t1.e*19*11) FROM t1 WHERE NOT (b<>(select count(distinct f) from t1))} +} {} +do_test randexpr-2.1238 { + db eval {SELECT ((select min(e+case when b not between coalesce((select t1.d & b from t1 where not exists(select 1 from t1 where 17>11)), -c) and (t1.a) or ed then a when 19>11 then -13 else t1.b end+13)+count(distinct d)+count(*)* -count(distinct t1.f)-((count(distinct t1.c)))+min(a) & max(c) from t1)*t1.e*19*11) FROM t1 WHERE b<>(select count(distinct f) from t1)} +} {836000} +do_test randexpr-2.1239 { + db eval {SELECT coalesce((select max(t1.a) from t1 where +b-b+t1.e<>(case when (case when ((select case -cast(avg(17) AS integer) when max(17) then count(*) else count(distinct t1.d) end from t1))-case 19 when 11 then e else t1.f end<=e then 19 when b not between t1.c and 13 then e else 17 end+t1.c in (select 19 from t1 union select a from t1)) then e-t1.c else t1.f end*t1.b)-e),a) FROM t1 WHERE coalesce((select 13 from t1 where t1.c<>t1.b | t1.e*19-e),f+t1.d) not in (13,(c)*a,e)} +} {} +do_test randexpr-2.1240 { + db eval {SELECT coalesce((select max(t1.a) from t1 where +b-b+t1.e<>(case when (case when ((select case -cast(avg(17) AS integer) when max(17) then count(*) else count(distinct t1.d) end from t1))-case 19 when 11 then e else t1.f end<=e then 19 when b not between t1.c and 13 then e else 17 end+t1.c in (select 19 from t1 union select a from t1)) then e-t1.c else t1.f end*t1.b)-e),a) FROM t1 WHERE NOT (coalesce((select 13 from t1 where t1.c<>t1.b | t1.e*19-e),f+t1.d) not in (13,(c)*a,e))} +} {100} +do_test randexpr-2.1241 { + db eval {SELECT t1.d-case when (11) not between coalesce((select max(a) from t1 where f in (select cast(avg(t1.c+(select case case case max(a) when -count(*) then min(b) else - -(min((t1.a))) end when (max((19))) then max(t1.f) else count(*) end when max(t1.a) then -min(t1.c) else count(*) end from t1)+c+t1.d) AS integer)-min(a) | -cast(avg(f) AS integer)*(cast(avg(f) AS integer))-( -count(*)) from t1 union select min( -c) from t1)),t1.a) and -c then (d) when t1.f<=((t1.c)) then - -13 else c end FROM t1 WHERE 13-e>(abs(t1.f)/abs((t1.d)*d))} +} {} +do_test randexpr-2.1242 { + db eval {SELECT t1.d-case when (11) not between coalesce((select max(a) from t1 where f in (select cast(avg(t1.c+(select case case case max(a) when -count(*) then min(b) else - -(min((t1.a))) end when (max((19))) then max(t1.f) else count(*) end when max(t1.a) then -min(t1.c) else count(*) end from t1)+c+t1.d) AS integer)-min(a) | -cast(avg(f) AS integer)*(cast(avg(f) AS integer))-( -count(*)) from t1 union select min( -c) from t1)),t1.a) and -c then (d) when t1.f<=((t1.c)) then - -13 else c end FROM t1 WHERE NOT (13-e>(abs(t1.f)/abs((t1.d)*d)))} +} {0} +do_test randexpr-2.1243 { + db eval {SELECT t1.d-case when (11) not between coalesce((select max(a) from t1 where f in (select cast(avg(t1.c+(select case case case max(a) when -count(*) then min(b) else - -(min((t1.a))) end when (max((19))) then max(t1.f) else count(*) end when max(t1.a) then -min(t1.c) else count(*) end from t1)+c+t1.d) AS integer)-min(a) & -cast(avg(f) AS integer)*(cast(avg(f) AS integer))-( -count(*)) from t1 union select min( -c) from t1)),t1.a) and -c then (d) when t1.f<=((t1.c)) then - -13 else c end FROM t1 WHERE NOT (13-e>(abs(t1.f)/abs((t1.d)*d)))} +} {0} +do_test randexpr-2.1244 { + db eval {SELECT (abs(coalesce((select max(b) from t1 where not exists(select 1 from t1 where 19>(t1.c))),(11+(abs(c)/abs(19)))+b)+coalesce((select max(t1.c) from t1 where t1.a<=13),t1.d+t1.b+case when exists(select 1 from t1 where -t1.b in (select min(t1.a-+13) from t1 union select max(t1.c) from t1)) then (select ++(cast(avg(11) AS integer)) | cast(avg(b) AS integer)*cast(avg(d) AS integer) from t1) when f>=b then 19 else 19 end))/abs(b)) FROM t1 WHERE not b in (select case when t1.e in (select +cast(avg(d) AS integer)-count(*)*count(*)-(case ~count(distinct d)*cast(avg(t1.b) AS integer) when count(distinct t1.e) then max(19) else cast(avg(t1.b) AS integer) end+(count(distinct t1.c))) from t1 union select -min(t1.f) from t1) or 19<=+11 then ((select (min(t1.b)) from t1)*a-19+17*13) | a when (11 in (select d from t1 union select t1.e from t1)) then 19 else t1.b end from t1 union select t1.c from t1)} +} {} +do_test randexpr-2.1245 { + db eval {SELECT (abs(coalesce((select max(b) from t1 where not exists(select 1 from t1 where 19>(t1.c))),(11+(abs(c)/abs(19)))+b)+coalesce((select max(t1.c) from t1 where t1.a<=13),t1.d+t1.b+case when exists(select 1 from t1 where -t1.b in (select min(t1.a-+13) from t1 union select max(t1.c) from t1)) then (select ++(cast(avg(11) AS integer)) | cast(avg(b) AS integer)*cast(avg(d) AS integer) from t1) when f>=b then 19 else 19 end))/abs(b)) FROM t1 WHERE NOT (not b in (select case when t1.e in (select +cast(avg(d) AS integer)-count(*)*count(*)-(case ~count(distinct d)*cast(avg(t1.b) AS integer) when count(distinct t1.e) then max(19) else cast(avg(t1.b) AS integer) end+(count(distinct t1.c))) from t1 union select -min(t1.f) from t1) or 19<=+11 then ((select (min(t1.b)) from t1)*a-19+17*13) | a when (11 in (select d from t1 union select t1.e from t1)) then 19 else t1.b end from t1 union select t1.c from t1))} +} {4} +do_test randexpr-2.1246 { + db eval {SELECT (abs(coalesce((select max(b) from t1 where not exists(select 1 from t1 where 19>(t1.c))),(11+(abs(c)/abs(19)))+b)+coalesce((select max(t1.c) from t1 where t1.a<=13),t1.d+t1.b+case when exists(select 1 from t1 where -t1.b in (select min(t1.a-+13) from t1 union select max(t1.c) from t1)) then (select ++(cast(avg(11) AS integer)) & cast(avg(b) AS integer)*cast(avg(d) AS integer) from t1) when f>=b then 19 else 19 end))/abs(b)) FROM t1 WHERE NOT (not b in (select case when t1.e in (select +cast(avg(d) AS integer)-count(*)*count(*)-(case ~count(distinct d)*cast(avg(t1.b) AS integer) when count(distinct t1.e) then max(19) else cast(avg(t1.b) AS integer) end+(count(distinct t1.c))) from t1 union select -min(t1.f) from t1) or 19<=+11 then ((select (min(t1.b)) from t1)*a-19+17*13) | a when (11 in (select d from t1 union select t1.e from t1)) then 19 else t1.b end from t1 union select t1.c from t1))} +} {4} +do_test randexpr-2.1247 { + db eval {SELECT +case ~t1.d when case when b*f in (select b from t1 union select 17 from t1) and ((case when t1.f | e in (select ~abs(~(case cast(avg(t1.f) AS integer) when min(b) then min(t1.d) else - -count(distinct e) end | count(*)*( -cast(avg(t1.b) AS integer)))) from t1 union select (count(distinct t1.b)) from t1) then t1.f else t1.f*17+a end-13+e<>t1.c)) then d else t1.c end then t1.f else t1.e end+19 FROM t1 WHERE b>=+t1.e or f<=t1.b and -c*17-t1.d*19 not in (c+coalesce((select (abs(~+d*t1.f)/abs(11)) from t1 where +case when t1.d+ -t1.e in (select max(t1.d)*count(distinct b) from t1 union select count(distinct 11) from t1) then (17) when (t1.e>t1.a) then e else e end not in (e,f,t1.a)),t1.f),t1.a,b)} +} {} +do_test randexpr-2.1248 { + db eval {SELECT +case ~t1.d when case when b*f in (select b from t1 union select 17 from t1) and ((case when t1.f | e in (select ~abs(~(case cast(avg(t1.f) AS integer) when min(b) then min(t1.d) else - -count(distinct e) end | count(*)*( -cast(avg(t1.b) AS integer)))) from t1 union select (count(distinct t1.b)) from t1) then t1.f else t1.f*17+a end-13+e<>t1.c)) then d else t1.c end then t1.f else t1.e end+19 FROM t1 WHERE NOT (b>=+t1.e or f<=t1.b and -c*17-t1.d*19 not in (c+coalesce((select (abs(~+d*t1.f)/abs(11)) from t1 where +case when t1.d+ -t1.e in (select max(t1.d)*count(distinct b) from t1 union select count(distinct 11) from t1) then (17) when (t1.e>t1.a) then e else e end not in (e,f,t1.a)),t1.f),t1.a,b))} +} {519} +do_test randexpr-2.1249 { + db eval {SELECT +case ~t1.d when case when b*f in (select b from t1 union select 17 from t1) and ((case when t1.f & e in (select ~abs(~(case cast(avg(t1.f) AS integer) when min(b) then min(t1.d) else - -count(distinct e) end & count(*)*( -cast(avg(t1.b) AS integer)))) from t1 union select (count(distinct t1.b)) from t1) then t1.f else t1.f*17+a end-13+e<>t1.c)) then d else t1.c end then t1.f else t1.e end+19 FROM t1 WHERE NOT (b>=+t1.e or f<=t1.b and -c*17-t1.d*19 not in (c+coalesce((select (abs(~+d*t1.f)/abs(11)) from t1 where +case when t1.d+ -t1.e in (select max(t1.d)*count(distinct b) from t1 union select count(distinct 11) from t1) then (17) when (t1.e>t1.a) then e else e end not in (e,f,t1.a)),t1.f),t1.a,b))} +} {519} +do_test randexpr-2.1250 { + db eval {SELECT (abs((abs(t1.a)/abs(~17 | coalesce((select ~coalesce((select (d) from t1 where t1.b>=t1.a),case when t1.e-(select ~max( -19*c | e+t1.c) from t1) not between (abs(c)/abs(e)) and t1.e then a else 17 end*17)+13 from t1 where a in (select max(t1.f) from t1 union select ~count(distinct b) from t1)),b) | 13)))/abs(e)) FROM t1 WHERE exists(select 1 from t1 where t1.a+t1.d-11<=t1.e)} +} {0} +do_test randexpr-2.1251 { + db eval {SELECT (abs((abs(t1.a)/abs(~17 | coalesce((select ~coalesce((select (d) from t1 where t1.b>=t1.a),case when t1.e-(select ~max( -19*c | e+t1.c) from t1) not between (abs(c)/abs(e)) and t1.e then a else 17 end*17)+13 from t1 where a in (select max(t1.f) from t1 union select ~count(distinct b) from t1)),b) | 13)))/abs(e)) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.a+t1.d-11<=t1.e))} +} {} +do_test randexpr-2.1252 { + db eval {SELECT (abs((abs(t1.a)/abs(~17 & coalesce((select ~coalesce((select (d) from t1 where t1.b>=t1.a),case when t1.e-(select ~max( -19*c & e+t1.c) from t1) not between (abs(c)/abs(e)) and t1.e then a else 17 end*17)+13 from t1 where a in (select max(t1.f) from t1 union select ~count(distinct b) from t1)),b) & 13)))/abs(e)) FROM t1 WHERE exists(select 1 from t1 where t1.a+t1.d-11<=t1.e)} +} {0} +do_test randexpr-2.1253 { + db eval {SELECT coalesce((select max(d) from t1 where not exists(select 1 from t1 where t1.e not in (13,e,coalesce((select f from t1 where (abs(case -t1.e when ~f then (t1.c)*13-t1.b+a-d+ -t1.a else a end)/abs(11))*19*t1.d>=a),(t1.b)))) or d not in (f,e,19)),coalesce((select max(17) from t1 where e in (select t1.e from t1 union select 17 from t1)),t1.c)) | t1.f FROM t1 WHERE t1.e not in (13,(select +count(*)+abs(cast(avg(case t1.d when 17 then -t1.e+13 else (abs( -c)/abs(case when t1.e*+coalesce((select max(13) from t1 where a>t1.b),t1.f)>=(13) then -f else t1.a end)) end) AS integer)) from t1)*(13),(select ((max(t1.d))) from t1)) and not exists(select 1 from t1 where e+b not in (b,t1.c,t1.e))} +} {} +do_test randexpr-2.1254 { + db eval {SELECT coalesce((select max(d) from t1 where not exists(select 1 from t1 where t1.e not in (13,e,coalesce((select f from t1 where (abs(case -t1.e when ~f then (t1.c)*13-t1.b+a-d+ -t1.a else a end)/abs(11))*19*t1.d>=a),(t1.b)))) or d not in (f,e,19)),coalesce((select max(17) from t1 where e in (select t1.e from t1 union select 17 from t1)),t1.c)) | t1.f FROM t1 WHERE NOT (t1.e not in (13,(select +count(*)+abs(cast(avg(case t1.d when 17 then -t1.e+13 else (abs( -c)/abs(case when t1.e*+coalesce((select max(13) from t1 where a>t1.b),t1.f)>=(13) then -f else t1.a end)) end) AS integer)) from t1)*(13),(select ((max(t1.d))) from t1)) and not exists(select 1 from t1 where e+b not in (b,t1.c,t1.e)))} +} {984} +do_test randexpr-2.1255 { + db eval {SELECT coalesce((select max(d) from t1 where not exists(select 1 from t1 where t1.e not in (13,e,coalesce((select f from t1 where (abs(case -t1.e when ~f then (t1.c)*13-t1.b+a-d+ -t1.a else a end)/abs(11))*19*t1.d>=a),(t1.b)))) or d not in (f,e,19)),coalesce((select max(17) from t1 where e in (select t1.e from t1 union select 17 from t1)),t1.c)) & t1.f FROM t1 WHERE NOT (t1.e not in (13,(select +count(*)+abs(cast(avg(case t1.d when 17 then -t1.e+13 else (abs( -c)/abs(case when t1.e*+coalesce((select max(13) from t1 where a>t1.b),t1.f)>=(13) then -f else t1.a end)) end) AS integer)) from t1)*(13),(select ((max(t1.d))) from t1)) and not exists(select 1 from t1 where e+b not in (b,t1.c,t1.e)))} +} {16} +do_test randexpr-2.1256 { + db eval {SELECT (abs(case when (t1.d*coalesce((select max(case coalesce((select max(e) from t1 where t1.a not in (coalesce((select (select count(distinct t1.e) from t1) from t1 where c>=t1.c),f),19,a)),t1.a) when t1.c then t1.a else b end-a) from t1 where c>t1.d),b)<= -11) then t1.a when not exists(select 1 from t1 where not (t1.d=e) or t1.f between t1.a and -e and (t1.c)=f and a not in (t1.c,a,c)) then t1.a else t1.a end)/abs( -13))+e-d FROM t1 WHERE 17 not in (13,19,t1.c)} +} {107} +do_test randexpr-2.1257 { + db eval {SELECT (abs(case when (t1.d*coalesce((select max(case coalesce((select max(e) from t1 where t1.a not in (coalesce((select (select count(distinct t1.e) from t1) from t1 where c>=t1.c),f),19,a)),t1.a) when t1.c then t1.a else b end-a) from t1 where c>t1.d),b)<= -11) then t1.a when not exists(select 1 from t1 where not (t1.d=e) or t1.f between t1.a and -e and (t1.c)=f and a not in (t1.c,a,c)) then t1.a else t1.a end)/abs( -13))+e-d FROM t1 WHERE NOT (17 not in (13,19,t1.c))} +} {} +do_test randexpr-2.1258 { + db eval {SELECT ~ -coalesce((select c from t1 where (case when b<= -e*t1.f then +coalesce((select case when (select count(*) from t1)=e then t1.f | c else f end from t1 where e>=(select ~case count(*) when -abs(max(11)-count(distinct -t1.d)) then count(distinct e) else max(f) end*count(*) from t1)),13) else 19 end>=t1.e)),case when t1.c=(select ~case count(*) when -abs(max(11)-count(distinct -t1.d)) then count(distinct e) else max(f) end*count(*) from t1)),13) else 19 end>=t1.e)),case when t1.c=(select ~case count(*) when -abs(max(11)-count(distinct -t1.d)) then count(distinct e) else max(f) end*count(*) from t1)),13) else 19 end>=t1.e)),case when t1.ct1.e | case when exists(select 1 from t1 where not ~+11<=coalesce((select max(a) from t1 where ~17*case when a=+(select cast(avg(t1.a*e) AS integer) from t1) then t1.e when c<11 then b else f end+c not between t1.e and -17), -t1.c)) then e+c when not exists(select 1 from t1 where t1.c not between 13 and -f) then 17 else -t1.d end or (f)<=c} +} {300} +do_test randexpr-2.1262 { + db eval {SELECT case t1.b when coalesce((select t1.b from t1 where not exists(select 1 from t1 where +case ( -case f when 13 then (abs(t1.b+f)/abs(c)) else t1.d end) when f*t1.f+t1.d then e else coalesce((select coalesce((select max(b) from t1 where t1.a<=t1.a or t1.d+f<=19 or t1.b in (select (11) from t1 union select (19) from t1)),t1.d) from t1 where not exists(select 1 from t1 where 17=b)),c)+e end between t1.c and b)),d) then c else t1.d end FROM t1 WHERE NOT (b>t1.e | case when exists(select 1 from t1 where not ~+11<=coalesce((select max(a) from t1 where ~17*case when a=+(select cast(avg(t1.a*e) AS integer) from t1) then t1.e when c<11 then b else f end+c not between t1.e and -17), -t1.c)) then e+c when not exists(select 1 from t1 where t1.c not between 13 and -f) then 17 else -t1.d end or (f)<=c)} +} {} +do_test randexpr-2.1263 { + db eval {SELECT case when case when case when exists(select 1 from t1 where exists(select 1 from t1 where t1.a+t1.a | case when t1.b between f and 19 and t1.c=t1.e then t1.e else 19 end | a FROM t1 WHERE t1.b not in ((a),t1.d,b)} +} {} +do_test randexpr-2.1264 { + db eval {SELECT case when case when case when exists(select 1 from t1 where exists(select 1 from t1 where t1.a+t1.a | case when t1.b between f and 19 and t1.c=t1.e then t1.e else 19 end | a FROM t1 WHERE NOT (t1.b not in ((a),t1.d,b))} +} {500} +do_test randexpr-2.1265 { + db eval {SELECT case when case when case when exists(select 1 from t1 where exists(select 1 from t1 where t1.a+t1.a & case when t1.b between f and 19 and t1.c=t1.e then t1.e else 19 end & a FROM t1 WHERE NOT (t1.b not in ((a),t1.d,b))} +} {100} +do_test randexpr-2.1266 { + db eval {SELECT case when exists(select 1 from t1 where a between 13+t1.e and t1.e and (abs((abs(case when not 19 between t1.e and b and not exists(select 1 from t1 where (abs(17)/abs(f-c)) not between (d) and t1.e) then t1.e else 19 end)/abs(11)))/abs(19))=f) or t1.e<= -a then t1.b when exists(select 1 from t1 where t1.d>t1.c) and t1.b<19 then a else 11 end*d FROM t1 WHERE exists(select 1 from t1 where t1.e<>(select count(distinct ((coalesce((select coalesce((select max( -coalesce((select max( -e) from t1 where ((abs(t1.a)/abs(d)) in (select ~~count(*) from t1 union select count(*) from t1) or a in (d,t1.f,d) and -c in (b,e,t1.d))),t1.c)-19 | 11-t1.e) from t1 where exists(select 1 from t1 where a<=e) and f not in (17,t1.b,t1.c) or (t1.a)>=t1.d),e)-t1.c from t1 where bt1.c) and t1.b<19 then a else 11 end*d FROM t1 WHERE NOT (exists(select 1 from t1 where t1.e<>(select count(distinct ((coalesce((select coalesce((select max( -coalesce((select max( -e) from t1 where ((abs(t1.a)/abs(d)) in (select ~~count(*) from t1 union select count(*) from t1) or a in (d,t1.f,d) and -c in (b,e,t1.d))),t1.c)-19 | 11-t1.e) from t1 where exists(select 1 from t1 where a<=e) and f not in (17,t1.b,t1.c) or (t1.a)>=t1.d),e)-t1.c from t1 where b=t1.f) or 11+t1.a between (select -cast(avg( -t1.b | 19) AS integer)+case min(t1.d) when -max(t1.e) then -count(distinct 11) else - -max((t1.c)) end from t1) and f),(e)),13) then t1.a else d end* -t1.f,13)),d)>=e then 17 else -11 end FROM t1 WHERE t1.d*b<=+~t1.c-t1.d+d*~b | e} +} {} +do_test randexpr-2.1272 { + db eval {SELECT case when coalesce((select b from t1 where t1.d not in (t1.c,case when t1.d not in (d,coalesce((select (select count(distinct d) from t1)*e from t1 where not exists(select 1 from t1 where t1.d>=t1.f) or 11+t1.a between (select -cast(avg( -t1.b | 19) AS integer)+case min(t1.d) when -max(t1.e) then -count(distinct 11) else - -max((t1.c)) end from t1) and f),(e)),13) then t1.a else d end* -t1.f,13)),d)>=e then 17 else -11 end FROM t1 WHERE NOT (t1.d*b<=+~t1.c-t1.d+d*~b | e)} +} {-11} +do_test randexpr-2.1273 { + db eval {SELECT case when coalesce((select b from t1 where t1.d not in (t1.c,case when t1.d not in (d,coalesce((select (select count(distinct d) from t1)*e from t1 where not exists(select 1 from t1 where t1.d>=t1.f) or 11+t1.a between (select -cast(avg( -t1.b & 19) AS integer)+case min(t1.d) when -max(t1.e) then -count(distinct 11) else - -max((t1.c)) end from t1) and f),(e)),13) then t1.a else d end* -t1.f,13)),d)>=e then 17 else -11 end FROM t1 WHERE NOT (t1.d*b<=+~t1.c-t1.d+d*~b | e)} +} {-11} +do_test randexpr-2.1274 { + db eval {SELECT t1.a*a+11-case when exists(select 1 from t1 where (t1.d)b) and (19 not in ((t1.a),11,a)) then ~t1.b when t1.c -coalesce((select (select max(e) | min(coalesce((select coalesce((select t1.d from t1 where ab) and (19 not in ((t1.a),11,a)) then ~t1.b when t1.c -coalesce((select (select max(e) | min(coalesce((select coalesce((select t1.d from t1 where ac then 19 else b end- -11 in (select case case count(distinct t1.f) | (max(17)) when (count(*)) then count(distinct b) else count(distinct 19) end when min(t1.f) then (count(distinct t1.f)) else count(*) end-min(13) from t1 union select cast(avg( -t1.c) AS integer) from t1) or f<>t1.a then coalesce((select max(t1.b) from t1 where exists(select 1 from t1 where t1.e>=t1.e)), -t1.a) when exists(select 1 from t1 where 17 between t1.f and t1.d) then t1.c else c end FROM t1 WHERE t1.b=t1.b} +} {200} +do_test randexpr-2.1277 { + db eval {SELECT case when case when case (abs(t1.b)/abs(t1.a)) when e then f else t1.a end>c then 19 else b end- -11 in (select case case count(distinct t1.f) | (max(17)) when (count(*)) then count(distinct b) else count(distinct 19) end when min(t1.f) then (count(distinct t1.f)) else count(*) end-min(13) from t1 union select cast(avg( -t1.c) AS integer) from t1) or f<>t1.a then coalesce((select max(t1.b) from t1 where exists(select 1 from t1 where t1.e>=t1.e)), -t1.a) when exists(select 1 from t1 where 17 between t1.f and t1.d) then t1.c else c end FROM t1 WHERE NOT (t1.b=t1.b)} +} {} +do_test randexpr-2.1278 { + db eval {SELECT case when case when case (abs(t1.b)/abs(t1.a)) when e then f else t1.a end>c then 19 else b end- -11 in (select case case count(distinct t1.f) & (max(17)) when (count(*)) then count(distinct b) else count(distinct 19) end when min(t1.f) then (count(distinct t1.f)) else count(*) end-min(13) from t1 union select cast(avg( -t1.c) AS integer) from t1) or f<>t1.a then coalesce((select max(t1.b) from t1 where exists(select 1 from t1 where t1.e>=t1.e)), -t1.a) when exists(select 1 from t1 where 17 between t1.f and t1.d) then t1.c else c end FROM t1 WHERE t1.b=t1.b} +} {200} +do_test randexpr-2.1279 { + db eval {SELECT (abs(b)/abs(case 19 when case e+13*~t1.c*11 when case when (select ~min(t1.a) from t1)>t1.c then (case when 11 between (a) and (19) then d else d+t1.c end) else f end then (abs((e+t1.d)*e*19-t1.d)/abs(c)) else t1.b end then 11 else ( -t1.b) end*13)) FROM t1 WHERE case when ((select min( -case when b in (d-t1.e,case when e in (select d from t1 union select 17 from t1) then 11 else c end,( -t1.d)) then t1.b when b>e then c else -t1.f end)+ -((min(t1.f)))-+min(f) | ( -(cast(avg(a) AS integer))) from t1)) in (select 11 from t1 union select 19 from t1) then 19 when not exists(select 1 from t1 where -17>t1.f and t1.e in (select t1.e from t1 union select d from t1)) then 11 else d end=t1.d} +} {} +do_test randexpr-2.1280 { + db eval {SELECT (abs(b)/abs(case 19 when case e+13*~t1.c*11 when case when (select ~min(t1.a) from t1)>t1.c then (case when 11 between (a) and (19) then d else d+t1.c end) else f end then (abs((e+t1.d)*e*19-t1.d)/abs(c)) else t1.b end then 11 else ( -t1.b) end*13)) FROM t1 WHERE NOT (case when ((select min( -case when b in (d-t1.e,case when e in (select d from t1 union select 17 from t1) then 11 else c end,( -t1.d)) then t1.b when b>e then c else -t1.f end)+ -((min(t1.f)))-+min(f) | ( -(cast(avg(a) AS integer))) from t1)) in (select 11 from t1 union select 19 from t1) then 19 when not exists(select 1 from t1 where -17>t1.f and t1.e in (select t1.e from t1 union select d from t1)) then 11 else d end=t1.d)} +} {0} +do_test randexpr-2.1281 { + db eval {SELECT coalesce((select max(11) from t1 where not not exists(select 1 from t1 where ((abs(b)/abs(t1.f))-19+f-case ~11 when 19 then c else t1.a end)> -c+t1.a)),(11))- -b FROM t1 WHERE b*case when 19 not in (t1.e,a*+t1.d,(abs(+c)/abs(case 11 when b then +case when (not exists(select 1 from t1 where d<=c or c -c+t1.a)),(11))- -b FROM t1 WHERE NOT (b*case when 19 not in (t1.e,a*+t1.d,(abs(+c)/abs(case 11 when b then +case when (not exists(select 1 from t1 where d<=c or c=11*t1.d} +} {} +do_test randexpr-2.1287 { + db eval {SELECT case coalesce((select t1.e from t1 where 19 in (select 13 from t1 union select 11 | coalesce((select -(t1.c) from t1 where t1.a<=t1.c),case when ~case when t1.e not between 11 and t1.e then d else 19 end+t1.a+f in (select -(count(distinct -c))-max(d)+count(distinct t1.d) from t1 union select cast(avg((b)) AS integer) from t1) then (t1.d) when not (t1.a not in ( -t1.e,t1.b,17) and 11=11*t1.d)} +} {-100} +do_test randexpr-2.1288 { + db eval {SELECT case coalesce((select t1.e from t1 where 19 in (select 13 from t1 union select 11 & coalesce((select -(t1.c) from t1 where t1.a<=t1.c),case when ~case when t1.e not between 11 and t1.e then d else 19 end+t1.a+f in (select -(count(distinct -c))-max(d)+count(distinct t1.d) from t1 union select cast(avg((b)) AS integer) from t1) then (t1.d) when not (t1.a not in ( -t1.e,t1.b,17) and 11=11*t1.d)} +} {-100} +do_test randexpr-2.1289 { + db eval {SELECT (select min(coalesce((select max(case (b | case -e when 11 then +(abs(17)/abs((19))) else 13 end) when t1.f then 17 else a end) from t1 where 17 not between e and -a and 19=13 or t1.f=19 and t1.ct1.b} +} {149900} +do_test randexpr-2.1290 { + db eval {SELECT (select min(coalesce((select max(case (b | case -e when 11 then +(abs(17)/abs((19))) else 13 end) when t1.f then 17 else a end) from t1 where 17 not between e and -a and 19=13 or t1.f=19 and t1.ct1.b)} +} {} +do_test randexpr-2.1291 { + db eval {SELECT (select min(coalesce((select max(case (b & case -e when 11 then +(abs(17)/abs((19))) else 13 end) when t1.f then 17 else a end) from t1 where 17 not between e and -a and 19=13 or t1.f=19 and t1.ct1.b} +} {149900} +do_test randexpr-2.1292 { + db eval {SELECT t1.f*b | coalesce((select max(t1.f) from t1 where (case when (select cast(avg((select (count(distinct e+t1.c)) from t1)) AS integer) from t1)=t1.d then case when not exists(select 1 from t1 where (b)+t1.e+t1.f in (select min(e) from t1 union select case count(distinct t1.c) when count(distinct f) then cast(avg((t1.f)) AS integer) else (cast(avg(13) AS integer)) end from t1)) and not exists(select 1 from t1 where (d=b)) then t1.d when 13 in (f,c,c) then case t1.c when 11 then -a else t1.b end else 13 end else 19 end)<17),19)-t1.a FROM t1 WHERE c>=case when d=case when d=case when d=t1.a),t1.e))/abs(13)) when t1.b then b else -a end=11 then 13 else a end<>t1.f or 19 not in (b,(t1.b), -19) then b else e end<>e) then t1.f else c end | t1.a+d- -f+f FROM t1 WHERE a in (13+17,case when (t1.a>t1.e) then ~t1.c+d*case coalesce((select coalesce((select d from t1 where 11 not between t1.e and coalesce((select t1.b from t1 where -coalesce((select max(t1.b) from t1 where t1.d in (a,11,d) and b=c),c) between b and d),(t1.e))),t1.f) from t1 where t1.e=a),t1.a) when 17 then (t1.f) else t1.d end+b+c when a>=t1.d then t1.a else f end,b)} +} {} +do_test randexpr-2.1296 { + db eval {SELECT +case when b in (13,(t1.f),19+a) and not exists(select 1 from t1 where -case when case when case (abs(coalesce((select b from t1 where t1.b>=t1.a),t1.e))/abs(13)) when t1.b then b else -a end=11 then 13 else a end<>t1.f or 19 not in (b,(t1.b), -19) then b else e end<>e) then t1.f else c end | t1.a+d- -f+f FROM t1 WHERE NOT (a in (13+17,case when (t1.a>t1.e) then ~t1.c+d*case coalesce((select coalesce((select d from t1 where 11 not between t1.e and coalesce((select t1.b from t1 where -coalesce((select max(t1.b) from t1 where t1.d in (a,11,d) and b=c),c) between b and d),(t1.e))),t1.f) from t1 where t1.e=a),t1.a) when 17 then (t1.f) else t1.d end+b+c when a>=t1.d then t1.a else f end,b))} +} {1964} +do_test randexpr-2.1297 { + db eval {SELECT +case when b in (13,(t1.f),19+a) and not exists(select 1 from t1 where -case when case when case (abs(coalesce((select b from t1 where t1.b>=t1.a),t1.e))/abs(13)) when t1.b then b else -a end=11 then 13 else a end<>t1.f or 19 not in (b,(t1.b), -19) then b else e end<>e) then t1.f else c end & t1.a+d- -f+f FROM t1 WHERE NOT (a in (13+17,case when (t1.a>t1.e) then ~t1.c+d*case coalesce((select coalesce((select d from t1 where 11 not between t1.e and coalesce((select t1.b from t1 where -coalesce((select max(t1.b) from t1 where t1.d in (a,11,d) and b=c),c) between b and d),(t1.e))),t1.f) from t1 where t1.e=a),t1.a) when 17 then (t1.f) else t1.d end+b+c when a>=t1.d then t1.a else f end,b))} +} {36} +do_test randexpr-2.1298 { + db eval {SELECT coalesce((select t1.b from t1 where 17+13+t1.b in (e,19,case when 17 in (select case max(b*17-d*t1.b*t1.d)+case max((19)) when ~ -max(t1.f) then +count(*) else cast(avg(13) AS integer) end when count(distinct 11) then min(e) else max(t1.c) end from t1 union select min(d) from t1) then t1.a* -(t1.a) when t1.c=19 then (t1.c) else t1.f end) and t1.f<=13),17) FROM t1 WHERE t1.e<>~t1.b} +} {17} +do_test randexpr-2.1299 { + db eval {SELECT coalesce((select t1.b from t1 where 17+13+t1.b in (e,19,case when 17 in (select case max(b*17-d*t1.b*t1.d)+case max((19)) when ~ -max(t1.f) then +count(*) else cast(avg(13) AS integer) end when count(distinct 11) then min(e) else max(t1.c) end from t1 union select min(d) from t1) then t1.a* -(t1.a) when t1.c=19 then (t1.c) else t1.f end) and t1.f<=13),17) FROM t1 WHERE NOT (t1.e<>~t1.b)} +} {} +do_test randexpr-2.1300 { + db eval {SELECT (abs(coalesce((select -~(abs(t1.d)/abs(a)) from t1 where case when f in (select ~~(+abs( - -count(*)) | cast(avg(d) AS integer)) from t1 union select (min(f)) from t1) then (abs(t1.c)/abs( -13)) | f-13+d*17 when a in (t1.f,t1.c,11) then f else -(f) end*c not in (13,d,d) and 19 in (t1.d,t1.d,b)),13))/abs(17)) FROM t1 WHERE exists(select 1 from t1 where t1.d<>case case when not exists(select 1 from t1 where t1.b(abs(c)/abs(t1.a))) then t1.b else 17 end when +~(e- -coalesce((select e from t1 where not (b) in (select max(t1.b) from t1 union select case max(t1.e)-max(17)-count(distinct (13)) when -cast(avg( -t1.b) AS integer) then count(distinct d) else min(t1.f) end from t1)),c*d))*a*13 | f then (13) else c end)} +} {0} +do_test randexpr-2.1301 { + db eval {SELECT (abs(coalesce((select -~(abs(t1.d)/abs(a)) from t1 where case when f in (select ~~(+abs( - -count(*)) | cast(avg(d) AS integer)) from t1 union select (min(f)) from t1) then (abs(t1.c)/abs( -13)) | f-13+d*17 when a in (t1.f,t1.c,11) then f else -(f) end*c not in (13,d,d) and 19 in (t1.d,t1.d,b)),13))/abs(17)) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.d<>case case when not exists(select 1 from t1 where t1.b(abs(c)/abs(t1.a))) then t1.b else 17 end when +~(e- -coalesce((select e from t1 where not (b) in (select max(t1.b) from t1 union select case max(t1.e)-max(17)-count(distinct (13)) when -cast(avg( -t1.b) AS integer) then count(distinct d) else min(t1.f) end from t1)),c*d))*a*13 | f then (13) else c end))} +} {} +do_test randexpr-2.1302 { + db eval {SELECT (abs(coalesce((select -~(abs(t1.d)/abs(a)) from t1 where case when f in (select ~~(+abs( - -count(*)) & cast(avg(d) AS integer)) from t1 union select (min(f)) from t1) then (abs(t1.c)/abs( -13)) & f-13+d*17 when a in (t1.f,t1.c,11) then f else -(f) end*c not in (13,d,d) and 19 in (t1.d,t1.d,b)),13))/abs(17)) FROM t1 WHERE exists(select 1 from t1 where t1.d<>case case when not exists(select 1 from t1 where t1.b(abs(c)/abs(t1.a))) then t1.b else 17 end when +~(e- -coalesce((select e from t1 where not (b) in (select max(t1.b) from t1 union select case max(t1.e)-max(17)-count(distinct (13)) when -cast(avg( -t1.b) AS integer) then count(distinct d) else min(t1.f) end from t1)),c*d))*a*13 | f then (13) else c end)} +} {0} +do_test randexpr-2.1303 { + db eval {SELECT t1.f*+case when ( -17-a*t1.f | case when (coalesce((select max((case a when +13*19* -b then a else 17 end)) from t1 where (t1.b) not between 13 and t1.a),e))-t1.b=t1.f then f else 17 end-b*a*d)<>11 then f else t1.c end-t1.c*e FROM t1 WHERE (select +cast(avg(case when 13*a in (select 11 from t1 union select b from t1) then e when t1.f in (select ~count(distinct f) | count(*) from t1 union select count(distinct 13*t1.a+coalesce((select max(e) from t1 where ~(abs(t1.e)/abs(a)) not in (t1.a+e-c,t1.e,case when t1.f>t1.c or f>t1.c then 13 when t1.c>c then t1.a else -t1.d end)),f)*19) from t1) then d else d end) AS integer) from t1)=d} +} {210000} +do_test randexpr-2.1304 { + db eval {SELECT t1.f*+case when ( -17-a*t1.f | case when (coalesce((select max((case a when +13*19* -b then a else 17 end)) from t1 where (t1.b) not between 13 and t1.a),e))-t1.b=t1.f then f else 17 end-b*a*d)<>11 then f else t1.c end-t1.c*e FROM t1 WHERE NOT ((select +cast(avg(case when 13*a in (select 11 from t1 union select b from t1) then e when t1.f in (select ~count(distinct f) | count(*) from t1 union select count(distinct 13*t1.a+coalesce((select max(e) from t1 where ~(abs(t1.e)/abs(a)) not in (t1.a+e-c,t1.e,case when t1.f>t1.c or f>t1.c then 13 when t1.c>c then t1.a else -t1.d end)),f)*19) from t1) then d else d end) AS integer) from t1)=d)} +} {} +do_test randexpr-2.1305 { + db eval {SELECT t1.f*+case when ( -17-a*t1.f & case when (coalesce((select max((case a when +13*19* -b then a else 17 end)) from t1 where (t1.b) not between 13 and t1.a),e))-t1.b=t1.f then f else 17 end-b*a*d)<>11 then f else t1.c end-t1.c*e FROM t1 WHERE (select +cast(avg(case when 13*a in (select 11 from t1 union select b from t1) then e when t1.f in (select ~count(distinct f) | count(*) from t1 union select count(distinct 13*t1.a+coalesce((select max(e) from t1 where ~(abs(t1.e)/abs(a)) not in (t1.a+e-c,t1.e,case when t1.f>t1.c or f>t1.c then 13 when t1.c>c then t1.a else -t1.d end)),f)*19) from t1) then d else d end) AS integer) from t1)=d} +} {210000} +do_test randexpr-2.1306 { + db eval {SELECT t1.c+case when t1.a in (select max(coalesce((select coalesce((select coalesce((select max(19+case t1.f when a then e else t1.d end) from t1 where t1.d> -c),b) from t1 where (t1.c=e) and t1.f=17), -a) from t1 where (e not between e and 11)),t1.f)) | cast(avg(t1.a) AS integer)*max(t1.c) from t1 union select count(distinct c)-max(a) | min(t1.b) | (count(*))* -min(f) from t1) then t1.a when exists(select 1 from t1 where not exists(select 1 from t1 where -17>11)) then (13) else 13 end FROM t1 WHERE 19=case when exists(select 1 from t1 where t1.f not between 11 and ~c+b-t1.c*case when t1.f<13+(t1.d) and -t1.f<17 or (b<>b) then ~d else t1.b end) or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e=19)) then c+f*17 else t1.a end and t1.e<13} +} {} +do_test randexpr-2.1307 { + db eval {SELECT t1.c+case when t1.a in (select max(coalesce((select coalesce((select coalesce((select max(19+case t1.f when a then e else t1.d end) from t1 where t1.d> -c),b) from t1 where (t1.c=e) and t1.f=17), -a) from t1 where (e not between e and 11)),t1.f)) | cast(avg(t1.a) AS integer)*max(t1.c) from t1 union select count(distinct c)-max(a) | min(t1.b) | (count(*))* -min(f) from t1) then t1.a when exists(select 1 from t1 where not exists(select 1 from t1 where -17>11)) then (13) else 13 end FROM t1 WHERE NOT (19=case when exists(select 1 from t1 where t1.f not between 11 and ~c+b-t1.c*case when t1.f<13+(t1.d) and -t1.f<17 or (b<>b) then ~d else t1.b end) or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e=19)) then c+f*17 else t1.a end and t1.e<13)} +} {313} +do_test randexpr-2.1308 { + db eval {SELECT t1.c+case when t1.a in (select max(coalesce((select coalesce((select coalesce((select max(19+case t1.f when a then e else t1.d end) from t1 where t1.d> -c),b) from t1 where (t1.c=e) and t1.f=17), -a) from t1 where (e not between e and 11)),t1.f)) & cast(avg(t1.a) AS integer)*max(t1.c) from t1 union select count(distinct c)-max(a) & min(t1.b) & (count(*))* -min(f) from t1) then t1.a when exists(select 1 from t1 where not exists(select 1 from t1 where -17>11)) then (13) else 13 end FROM t1 WHERE NOT (19=case when exists(select 1 from t1 where t1.f not between 11 and ~c+b-t1.c*case when t1.f<13+(t1.d) and -t1.f<17 or (b<>b) then ~d else t1.b end) or exists(select 1 from t1 where not exists(select 1 from t1 where t1.e=19)) then c+f*17 else t1.a end and t1.e<13)} +} {313} +do_test randexpr-2.1309 { + db eval {SELECT 13-case when exists(select 1 from t1 where f+b>t1.f+t1.c) or +17 | -coalesce((select case coalesce((select 13-(select (cast(avg(13) AS integer))-max(t1.b) from t1) from t1 where -c in (select max(17) from t1 union select max(t1.b) from t1)),t1.a) when t1.d then 19 else d end from t1 where d>=11),b) between 13 and 11 and 17<=13 then a | t1.c+t1.f else c end FROM t1 WHERE (case when a not in (t1.d+t1.f,b,t1.b) then case when case f | t1.c when t1.e then e else (t1.a)-d | 13 end<>f then t1.e when (((13) in (select min(t1.a) from t1 union select max( -e) from t1) or t1.f between 13 and -t1.b or (f>=f) or 19t1.f+t1.c) or +17 | -coalesce((select case coalesce((select 13-(select (cast(avg(13) AS integer))-max(t1.b) from t1) from t1 where -c in (select max(17) from t1 union select max(t1.b) from t1)),t1.a) when t1.d then 19 else d end from t1 where d>=11),b) between 13 and 11 and 17<=13 then a | t1.c+t1.f else c end FROM t1 WHERE NOT ((case when a not in (t1.d+t1.f,b,t1.b) then case when case f | t1.c when t1.e then e else (t1.a)-d | 13 end<>f then t1.e when (((13) in (select min(t1.a) from t1 union select max( -e) from t1) or t1.f between 13 and -t1.b or (f>=f) or 19t1.f+t1.c) or +17 & -coalesce((select case coalesce((select 13-(select (cast(avg(13) AS integer))-max(t1.b) from t1) from t1 where -c in (select max(17) from t1 union select max(t1.b) from t1)),t1.a) when t1.d then 19 else d end from t1 where d>=11),b) between 13 and 11 and 17<=13 then a & t1.c+t1.f else c end FROM t1 WHERE NOT ((case when a not in (t1.d+t1.f,b,t1.b) then case when case f | t1.c when t1.e then e else (t1.a)-d | 13 end<>f then t1.e when (((13) in (select min(t1.a) from t1 union select max( -e) from t1) or t1.f between 13 and -t1.b or (f>=f) or 19t1.e or f between b and 13 then t1.c else c end | -b from t1 where 17<=17),t1.e) from t1 where 11=d) and t1.e>17))) then t1.a | (select ~ - -cast(avg( -c) AS integer) from t1) else c end FROM t1 WHERE t1.f>=e and t1.f in (select 17 from t1 union select case when (not exists(select 1 from t1 where case when coalesce((select max(19+t1.c) from t1 where 17>=t1.f),c)+19+t1.c< -d then t1.d when t1.c not in (c,t1.f,e) then 17 else t1.d end11 then d else 13 end from t1)} +} {} +do_test randexpr-2.1313 { + db eval {SELECT a+case when ((~t1.d*13<=e or (coalesce((select coalesce((select case when -t1.e<>t1.e or f between b and 13 then t1.c else c end | -b from t1 where 17<=17),t1.e) from t1 where 11=d) and t1.e>17))) then t1.a | (select ~ - -cast(avg( -c) AS integer) from t1) else c end FROM t1 WHERE NOT (t1.f>=e and t1.f in (select 17 from t1 union select case when (not exists(select 1 from t1 where case when coalesce((select max(19+t1.c) from t1 where 17>=t1.f),c)+19+t1.c< -d then t1.d when t1.c not in (c,t1.f,e) then 17 else t1.d end11 then d else 13 end from t1))} +} {467} +do_test randexpr-2.1314 { + db eval {SELECT a+case when ((~t1.d*13<=e or (coalesce((select coalesce((select case when -t1.e<>t1.e or f between b and 13 then t1.c else c end & -b from t1 where 17<=17),t1.e) from t1 where 11=d) and t1.e>17))) then t1.a & (select ~ - -cast(avg( -c) AS integer) from t1) else c end FROM t1 WHERE NOT (t1.f>=e and t1.f in (select 17 from t1 union select case when (not exists(select 1 from t1 where case when coalesce((select max(19+t1.c) from t1 where 17>=t1.f),c)+19+t1.c< -d then t1.d when t1.c not in (c,t1.f,e) then 17 else t1.d end11 then d else 13 end from t1))} +} {132} +do_test randexpr-2.1315 { + db eval {SELECT case when t1.b not in (17 | t1.e*t1.a,11,(select +case max((select -+case (max(t1.f)) when count(*) then count(distinct d) else -( -(cast(avg(f) AS integer))) end-count(distinct t1.a) from t1)) | count(distinct (t1.e)) when -min(c) then count(*) else -count(distinct e) end | cast(avg(17) AS integer) from t1)) then case when d<=t1.c and (exists(select 1 from t1 where (select count(distinct (e)) from t1)=(abs(17)/abs(d)))) then (abs(t1.f)/abs(t1.c)) when t1.f<>e or t1.c<=d and t1.e<=t1.d then 19 else - -d end-t1.e else (t1.f) end FROM t1 WHERE not t1.b=coalesce((select (abs(c)/abs(11)) from t1 where exists(select 1 from t1 where -coalesce((select max(((abs(coalesce((select max(case (select min((abs(19)/abs(19))+19)*count(*)- -cast(avg(t1.d) AS integer)-max(17)+min( -11) from t1) when e then t1.e else f end) from t1 where t1.d<=t1.f), -f)- -t1.e)/abs(d))-t1.b)) from t1 where c<11),t1.b)*a not in (d,d,f))),c)} +} {-481} +do_test randexpr-2.1316 { + db eval {SELECT case when t1.b not in (17 | t1.e*t1.a,11,(select +case max((select -+case (max(t1.f)) when count(*) then count(distinct d) else -( -(cast(avg(f) AS integer))) end-count(distinct t1.a) from t1)) | count(distinct (t1.e)) when -min(c) then count(*) else -count(distinct e) end | cast(avg(17) AS integer) from t1)) then case when d<=t1.c and (exists(select 1 from t1 where (select count(distinct (e)) from t1)=(abs(17)/abs(d)))) then (abs(t1.f)/abs(t1.c)) when t1.f<>e or t1.c<=d and t1.e<=t1.d then 19 else - -d end-t1.e else (t1.f) end FROM t1 WHERE NOT (not t1.b=coalesce((select (abs(c)/abs(11)) from t1 where exists(select 1 from t1 where -coalesce((select max(((abs(coalesce((select max(case (select min((abs(19)/abs(19))+19)*count(*)- -cast(avg(t1.d) AS integer)-max(17)+min( -11) from t1) when e then t1.e else f end) from t1 where t1.d<=t1.f), -f)- -t1.e)/abs(d))-t1.b)) from t1 where c<11),t1.b)*a not in (d,d,f))),c))} +} {} +do_test randexpr-2.1317 { + db eval {SELECT case when t1.b not in (17 & t1.e*t1.a,11,(select +case max((select -+case (max(t1.f)) when count(*) then count(distinct d) else -( -(cast(avg(f) AS integer))) end-count(distinct t1.a) from t1)) & count(distinct (t1.e)) when -min(c) then count(*) else -count(distinct e) end & cast(avg(17) AS integer) from t1)) then case when d<=t1.c and (exists(select 1 from t1 where (select count(distinct (e)) from t1)=(abs(17)/abs(d)))) then (abs(t1.f)/abs(t1.c)) when t1.f<>e or t1.c<=d and t1.e<=t1.d then 19 else - -d end-t1.e else (t1.f) end FROM t1 WHERE not t1.b=coalesce((select (abs(c)/abs(11)) from t1 where exists(select 1 from t1 where -coalesce((select max(((abs(coalesce((select max(case (select min((abs(19)/abs(19))+19)*count(*)- -cast(avg(t1.d) AS integer)-max(17)+min( -11) from t1) when e then t1.e else f end) from t1 where t1.d<=t1.f), -f)- -t1.e)/abs(d))-t1.b)) from t1 where c<11),t1.b)*a not in (d,d,f))),c)} +} {-481} +do_test randexpr-2.1318 { + db eval {SELECT -t1.a*(select (cast(avg((select count(*)-abs(count(*)*count(*)) from t1)) AS integer)++max(t1.d))*cast(avg(t1.a) AS integer) from t1)+t1.c+t1.c+d-e | 11+t1.b-coalesce((select max(case when exists(select 1 from t1 where t1.a+11=t1.b) then c when t1.a not between t1.f and t1.d or 13<11 then -b else 19 end) from t1 where c>=t1.f),t1.f) FROM t1 WHERE exists(select 1 from t1 where (f=(d)*e))} +} {} +do_test randexpr-2.1319 { + db eval {SELECT -t1.a*(select (cast(avg((select count(*)-abs(count(*)*count(*)) from t1)) AS integer)++max(t1.d))*cast(avg(t1.a) AS integer) from t1)+t1.c+t1.c+d-e | 11+t1.b-coalesce((select max(case when exists(select 1 from t1 where t1.a+11=t1.b) then c when t1.a not between t1.f and t1.d or 13<11 then -b else 19 end) from t1 where c>=t1.f),t1.f) FROM t1 WHERE NOT (exists(select 1 from t1 where (f=(d)*e)))} +} {-257} +do_test randexpr-2.1320 { + db eval {SELECT -t1.a*(select (cast(avg((select count(*)-abs(count(*)*count(*)) from t1)) AS integer)++max(t1.d))*cast(avg(t1.a) AS integer) from t1)+t1.c+t1.c+d-e & 11+t1.b-coalesce((select max(case when exists(select 1 from t1 where t1.a+11=t1.b) then c when t1.a not between t1.f and t1.d or 13<11 then -b else 19 end) from t1 where c>=t1.f),t1.f) FROM t1 WHERE NOT (exists(select 1 from t1 where (f=(d)*e)))} +} {-3999632} +do_test randexpr-2.1321 { + db eval {SELECT -case when t1.a+ -c+coalesce((select d from t1 where (t1.d between a and case when t1.c*t1.b*a between a and 13 then t1.d else 11 end) or not t1.e in (select max(d) from t1 union select max(t1.e)-( -count(distinct t1.e)) from t1) or t1.c>=t1.b or a not in (c,t1.f,t1.d) and b<=t1.c),13)+d-(17) | t1.c*17<=t1.a then t1.c else d end FROM t1 WHERE case a when e then f else case when -t1.c not between e*f | b and t1.d then case (select max((b)) from t1)+coalesce((select max(case when 19 in (b,e,17) then 13 when 11 in (t1.a,t1.a,t1.a) then t1.e else 19 end) from t1 where a<=e and 13 between t1.e and -t1.f),13) when 17 then a else t1.d end when e in (select f from t1 union select -t1.d from t1) then t1.a else c end*d end-13*a not in (19,t1.d,19)} +} {-400} +do_test randexpr-2.1322 { + db eval {SELECT -case when t1.a+ -c+coalesce((select d from t1 where (t1.d between a and case when t1.c*t1.b*a between a and 13 then t1.d else 11 end) or not t1.e in (select max(d) from t1 union select max(t1.e)-( -count(distinct t1.e)) from t1) or t1.c>=t1.b or a not in (c,t1.f,t1.d) and b<=t1.c),13)+d-(17) | t1.c*17<=t1.a then t1.c else d end FROM t1 WHERE NOT (case a when e then f else case when -t1.c not between e*f | b and t1.d then case (select max((b)) from t1)+coalesce((select max(case when 19 in (b,e,17) then 13 when 11 in (t1.a,t1.a,t1.a) then t1.e else 19 end) from t1 where a<=e and 13 between t1.e and -t1.f),13) when 17 then a else t1.d end when e in (select f from t1 union select -t1.d from t1) then t1.a else c end*d end-13*a not in (19,t1.d,19))} +} {} +do_test randexpr-2.1323 { + db eval {SELECT -case when t1.a+ -c+coalesce((select d from t1 where (t1.d between a and case when t1.c*t1.b*a between a and 13 then t1.d else 11 end) or not t1.e in (select max(d) from t1 union select max(t1.e)-( -count(distinct t1.e)) from t1) or t1.c>=t1.b or a not in (c,t1.f,t1.d) and b<=t1.c),13)+d-(17) & t1.c*17<=t1.a then t1.c else d end FROM t1 WHERE case a when e then f else case when -t1.c not between e*f | b and t1.d then case (select max((b)) from t1)+coalesce((select max(case when 19 in (b,e,17) then 13 when 11 in (t1.a,t1.a,t1.a) then t1.e else 19 end) from t1 where a<=e and 13 between t1.e and -t1.f),13) when 17 then a else t1.d end when e in (select f from t1 union select -t1.d from t1) then t1.a else c end*d end-13*a not in (19,t1.d,19)} +} {-400} +do_test randexpr-2.1324 { + db eval {SELECT coalesce((select (abs(t1.a)/abs(13)) from t1 where a<>(select max(b)-abs(+min(f+13*17+~coalesce((select case when 11-t1.c<>19 then t1.c when (c) not between t1.e and d and 11 in (t1.f,13,((c))) then 19 else 11 end from t1 where ( -d)f),11)+c-t1.c)) | - - -+max(19) from t1)-f),b) FROM t1 WHERE t1.b-t1.a+case ((t1.a)) when t1.f then (abs(e*e-(e)*c)/abs(t1.f)) else (abs(t1.a)/abs(t1.b)) end between f and (select case cast(avg(13) AS integer) when (count(*)) then (~abs( -case count(*) when abs((count(*))) then count(distinct 13) else count(*) end)-(count(distinct 11)))-count(distinct 19)*((max(t1.c)))-cast(avg(11) AS integer) else (max(19)) end from t1)} +} {} +do_test randexpr-2.1325 { + db eval {SELECT coalesce((select (abs(t1.a)/abs(13)) from t1 where a<>(select max(b)-abs(+min(f+13*17+~coalesce((select case when 11-t1.c<>19 then t1.c when (c) not between t1.e and d and 11 in (t1.f,13,((c))) then 19 else 11 end from t1 where ( -d)f),11)+c-t1.c)) | - - -+max(19) from t1)-f),b) FROM t1 WHERE NOT (t1.b-t1.a+case ((t1.a)) when t1.f then (abs(e*e-(e)*c)/abs(t1.f)) else (abs(t1.a)/abs(t1.b)) end between f and (select case cast(avg(13) AS integer) when (count(*)) then (~abs( -case count(*) when abs((count(*))) then count(distinct 13) else count(*) end)-(count(distinct 11)))-count(distinct 19)*((max(t1.c)))-cast(avg(11) AS integer) else (max(19)) end from t1))} +} {7} +do_test randexpr-2.1326 { + db eval {SELECT coalesce((select (abs(t1.a)/abs(13)) from t1 where a<>(select max(b)-abs(+min(f+13*17+~coalesce((select case when 11-t1.c<>19 then t1.c when (c) not between t1.e and d and 11 in (t1.f,13,((c))) then 19 else 11 end from t1 where ( -d)f),11)+c-t1.c)) & - - -+max(19) from t1)-f),b) FROM t1 WHERE NOT (t1.b-t1.a+case ((t1.a)) when t1.f then (abs(e*e-(e)*c)/abs(t1.f)) else (abs(t1.a)/abs(t1.b)) end between f and (select case cast(avg(13) AS integer) when (count(*)) then (~abs( -case count(*) when abs((count(*))) then count(distinct 13) else count(*) end)-(count(distinct 11)))-count(distinct 19)*((max(t1.c)))-cast(avg(11) AS integer) else (max(19)) end from t1))} +} {7} +do_test randexpr-2.1327 { + db eval {SELECT t1.d*coalesce((select d from t1 where coalesce((select max(t1.c) from t1 where 11<>13),e)<=(select ~min(13) from t1)*t1.a),c)-coalesce((select (t1.b+17) | (abs( -~17)/abs(t1.d)) from t1 where not exists(select 1 from t1 where case t1.d when case when t1.f not in (t1.e,e,17) then f when t1.c not between 11 and -t1.a then b else 17 end+13 then t1.f else t1.b end+13+t1.b between 17 and 13)),t1.b)+t1.a FROM t1 WHERE (not d in (t1.d,(select max(e) from t1),case d*case when not exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where t1.b<=11-d+(select -max(13-t1.f) from t1)*t1.a),19)>t1.c) and c not between d and 19) then 11+~ -t1.a when t1.e13),e)<=(select ~min(13) from t1)*t1.a),c)-coalesce((select (t1.b+17) | (abs( -~17)/abs(t1.d)) from t1 where not exists(select 1 from t1 where case t1.d when case when t1.f not in (t1.e,e,17) then f when t1.c not between 11 and -t1.a then b else 17 end+13 then t1.f else t1.b end+13+t1.b between 17 and 13)),t1.b)+t1.a FROM t1 WHERE NOT ((not d in (t1.d,(select max(e) from t1),case d*case when not exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where t1.b<=11-d+(select -max(13-t1.f) from t1)*t1.a),19)>t1.c) and c not between d and 19) then 11+~ -t1.a when t1.e13),e)<=(select ~min(13) from t1)*t1.a),c)-coalesce((select (t1.b+17) & (abs( -~17)/abs(t1.d)) from t1 where not exists(select 1 from t1 where case t1.d when case when t1.f not in (t1.e,e,17) then f when t1.c not between 11 and -t1.a then b else 17 end+13 then t1.f else t1.b end+13+t1.b between 17 and 13)),t1.b)+t1.a FROM t1 WHERE NOT ((not d in (t1.d,(select max(e) from t1),case d*case when not exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where t1.b<=11-d+(select -max(13-t1.f) from t1)*t1.a),19)>t1.c) and c not between d and 19) then 11+~ -t1.a when t1.e=f then t1.b else t1.a end*a+19-13 FROM t1 WHERE not exists(select 1 from t1 where exists(select 1 from t1 where (select min(case when 13 not in (19,e-t1.a*17,17- -case 19 when 17+17 then a else t1.d end+f-t1.a*11) then 17 else t1.b end*17+11-b) from t1) between 11 and f or t1.e<>t1.d)) or c not in (t1.c,17,d)} +} {} +do_test randexpr-2.1331 { + db eval {SELECT d+~e-case when exists(select 1 from t1 where -a in (select max( -(select -count(*)-min(e)++cast(avg(case when d in (select +max(c) from t1 union select ((count(distinct (e)))) from t1) then (c) when t1.c=f then t1.b else t1.a end*a+19-13 FROM t1 WHERE NOT (not exists(select 1 from t1 where exists(select 1 from t1 where (select min(case when 13 not in (19,e-t1.a*17,17- -case 19 when 17+17 then a else t1.d end+f-t1.a*11) then 17 else t1.b end*17+11-b) from t1) between 11 and f or t1.e<>t1.d)) or c not in (t1.c,17,d))} +} {-20095} +do_test randexpr-2.1332 { + db eval {SELECT case coalesce((select 11*19*t1.a+(abs(t1.e*(select count(distinct -d) from t1)*case t1.f when +c then d else c end+b)/abs(d)) from t1 where t1.b*13=(e)),13) when (t1.d)*b-19*(e)*e-19+(t1.d) then 13 else b end*a FROM t1 WHERE not d=19 and (abs(t1.c)/abs(17))-ab))) then case when 13 not between t1.e and t1.a then c when t1.c>t1.c then 13 else a end | 19 else (19) end*f*13) from t1 where 17=t1.d and (b<>11)),t1.b)+t1.a | d} +} {20000} +do_test randexpr-2.1333 { + db eval {SELECT case coalesce((select 11*19*t1.a+(abs(t1.e*(select count(distinct -d) from t1)*case t1.f when +c then d else c end+b)/abs(d)) from t1 where t1.b*13=(e)),13) when (t1.d)*b-19*(e)*e-19+(t1.d) then 13 else b end*a FROM t1 WHERE NOT (not d=19 and (abs(t1.c)/abs(17))-ab))) then case when 13 not between t1.e and t1.a then c when t1.c>t1.c then 13 else a end | 19 else (19) end*f*13) from t1 where 17=t1.d and (b<>11)),t1.b)+t1.a | d)} +} {} +do_test randexpr-2.1334 { + db eval {SELECT (c*(abs(coalesce((select t1.b from t1 where (select ~~count(*)+ -count(distinct coalesce((select max(case coalesce((select max(a+(t1.e)) from t1 where -(a)>=t1.e or t1.e<17),c) when t1.d then t1.a else 17 end) from t1 where t1.b not in (e,(c),t1.a)),f)) from t1) between t1.a and b),case when exists(select 1 from t1 where t1.c>=t1.a) then 19 else t1.e end+t1.c))/abs(b))-11 | 19) FROM t1 WHERE not t1.e<=d or not exists(select 1 from t1 where exists(select 1 from t1 where ( -t1.c+d)>t1.a+case when f | -b not between 17 and e then -t1.d else (19) end) or 11 not between 17 and t1.f) or a in (select -cast(avg(13) AS integer)*count(distinct 17)-min(b)- -max(11) | (count(*)) from t1 union select ((((max(b))))) from t1) and t1.e in (13,(13),b)} +} {307} +do_test randexpr-2.1335 { + db eval {SELECT (c*(abs(coalesce((select t1.b from t1 where (select ~~count(*)+ -count(distinct coalesce((select max(case coalesce((select max(a+(t1.e)) from t1 where -(a)>=t1.e or t1.e<17),c) when t1.d then t1.a else 17 end) from t1 where t1.b not in (e,(c),t1.a)),f)) from t1) between t1.a and b),case when exists(select 1 from t1 where t1.c>=t1.a) then 19 else t1.e end+t1.c))/abs(b))-11 | 19) FROM t1 WHERE NOT (not t1.e<=d or not exists(select 1 from t1 where exists(select 1 from t1 where ( -t1.c+d)>t1.a+case when f | -b not between 17 and e then -t1.d else (19) end) or 11 not between 17 and t1.f) or a in (select -cast(avg(13) AS integer)*count(distinct 17)-min(b)- -max(11) | (count(*)) from t1 union select ((((max(b))))) from t1) and t1.e in (13,(13),b))} +} {} +do_test randexpr-2.1336 { + db eval {SELECT (c*(abs(coalesce((select t1.b from t1 where (select ~~count(*)+ -count(distinct coalesce((select max(case coalesce((select max(a+(t1.e)) from t1 where -(a)>=t1.e or t1.e<17),c) when t1.d then t1.a else 17 end) from t1 where t1.b not in (e,(c),t1.a)),f)) from t1) between t1.a and b),case when exists(select 1 from t1 where t1.c>=t1.a) then 19 else t1.e end+t1.c))/abs(b))-11 & 19) FROM t1 WHERE not t1.e<=d or not exists(select 1 from t1 where exists(select 1 from t1 where ( -t1.c+d)>t1.a+case when f | -b not between 17 and e then -t1.d else (19) end) or 11 not between 17 and t1.f) or a in (select -cast(avg(13) AS integer)*count(distinct 17)-min(b)- -max(11) | (count(*)) from t1 union select ((((max(b))))) from t1) and t1.e in (13,(13),b)} +} {1} +do_test randexpr-2.1337 { + db eval {SELECT (select cast(avg(13*13* -coalesce((select b from t1 where case +(abs(17)/abs(d))*e+c-case -case when not a in (d,13,t1.b) then t1.b when t1.f<=b then e else -f end-17 when t1.e then t1.d else a end when c then b else f end<>e or ee or et1.d) or (t1.f in (select 11 from t1 union select t1.c from t1) or c>t1.e and b not in (t1.b, -t1.e,t1.a)) or c<>b),(a))-19*t1.c-c*t1.d and exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (t1.b,e,19) or not exists(select 1 from t1 where t1.c=19)))} +} {} +do_test randexpr-2.1340 { + db eval {SELECT b | t1.d- -coalesce((select case coalesce((select max((select (abs(cast(avg(coalesce((select max(coalesce((select 17 from t1 where -11=t1.e),a)) from t1 where not exists(select 1 from t1 where 13 between t1.f and 13)),11)-c) AS integer)))+min( -b) from t1)+b) from t1 where t1.d not in (b*19,t1.a,a)),t1.e) when t1.e then t1.b else t1.d end from t1 where (e not in (b,t1.a,(a)))),13) FROM t1 WHERE NOT (t1.c=coalesce((select t1.d*(select count(*) from t1) from t1 where ((abs(e)/abs(case 13 when t1.d then t1.e else 11 end+13))>t1.d) or (t1.f in (select 11 from t1 union select t1.c from t1) or c>t1.e and b not in (t1.b, -t1.e,t1.a)) or c<>b),(a))-19*t1.c-c*t1.d and exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (t1.b,e,19) or not exists(select 1 from t1 where t1.c=19))))} +} {1000} +do_test randexpr-2.1341 { + db eval {SELECT b & t1.d- -coalesce((select case coalesce((select max((select (abs(cast(avg(coalesce((select max(coalesce((select 17 from t1 where -11=t1.e),a)) from t1 where not exists(select 1 from t1 where 13 between t1.f and 13)),11)-c) AS integer)))+min( -b) from t1)+b) from t1 where t1.d not in (b*19,t1.a,a)),t1.e) when t1.e then t1.b else t1.d end from t1 where (e not in (b,t1.a,(a)))),13) FROM t1 WHERE NOT (t1.c=coalesce((select t1.d*(select count(*) from t1) from t1 where ((abs(e)/abs(case 13 when t1.d then t1.e else 11 end+13))>t1.d) or (t1.f in (select 11 from t1 union select t1.c from t1) or c>t1.e and b not in (t1.b, -t1.e,t1.a)) or c<>b),(a))-19*t1.c-c*t1.d and exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (t1.b,e,19) or not exists(select 1 from t1 where t1.c=19))))} +} {0} +do_test randexpr-2.1342 { + db eval {SELECT coalesce((select 19 from t1 where not exists(select 1 from t1 where 11+d between b and 19-case when t1.e>e*f+case when 13 in (select count(*) from t1 union select ~min(a*t1.d)+count(*) from t1) then c when (select min(d) from t1) in (select cast(avg(17) AS integer) from t1 union select min(t1.a) from t1) and 11<>(f) then -f else 19 end | c then 13 else 19 end*b*t1.b)),a) FROM t1 WHERE (coalesce((select d from t1 where coalesce((select ~(coalesce((select max(t1.a) from t1 where 13+t1.c-c*coalesce((select t1.a from t1 where ( -t1.d not between a and d)),t1.e)+t1.e- - -11-t1.d*11<17),d)) from t1 where f between t1.a and t1.a),t1.c)<>e),t1.d)*(19)-t1.e- -17-a>=d or t1.c between f and a)} +} {19} +do_test randexpr-2.1343 { + db eval {SELECT coalesce((select 19 from t1 where not exists(select 1 from t1 where 11+d between b and 19-case when t1.e>e*f+case when 13 in (select count(*) from t1 union select ~min(a*t1.d)+count(*) from t1) then c when (select min(d) from t1) in (select cast(avg(17) AS integer) from t1 union select min(t1.a) from t1) and 11<>(f) then -f else 19 end | c then 13 else 19 end*b*t1.b)),a) FROM t1 WHERE NOT ((coalesce((select d from t1 where coalesce((select ~(coalesce((select max(t1.a) from t1 where 13+t1.c-c*coalesce((select t1.a from t1 where ( -t1.d not between a and d)),t1.e)+t1.e- - -11-t1.d*11<17),d)) from t1 where f between t1.a and t1.a),t1.c)<>e),t1.d)*(19)-t1.e- -17-a>=d or t1.c between f and a))} +} {} +do_test randexpr-2.1344 { + db eval {SELECT coalesce((select 19 from t1 where not exists(select 1 from t1 where 11+d between b and 19-case when t1.e>e*f+case when 13 in (select count(*) from t1 union select ~min(a*t1.d)+count(*) from t1) then c when (select min(d) from t1) in (select cast(avg(17) AS integer) from t1 union select min(t1.a) from t1) and 11<>(f) then -f else 19 end & c then 13 else 19 end*b*t1.b)),a) FROM t1 WHERE (coalesce((select d from t1 where coalesce((select ~(coalesce((select max(t1.a) from t1 where 13+t1.c-c*coalesce((select t1.a from t1 where ( -t1.d not between a and d)),t1.e)+t1.e- - -11-t1.d*11<17),d)) from t1 where f between t1.a and t1.a),t1.c)<>e),t1.d)*(19)-t1.e- -17-a>=d or t1.c between f and a)} +} {19} +do_test randexpr-2.1345 { + db eval {SELECT coalesce((select +case case when f>t1.d then a+t1.b-f when coalesce((select max(t1.b-case f when 13*t1.a then 11 else 11 end*c) from t1 where f in (select min(t1.d) from t1 union select +count(distinct f) from t1)),17) not in (e,b,t1.a) then f else t1.b end-e*t1.a when -11 then 19 else (t1.d) end from t1 where 17 not between -13 and t1.e),19) FROM t1 WHERE not t1.b*coalesce((select max(17 | coalesce((select coalesce((select +coalesce((select max(case when (b not in (a,t1.b, -t1.a)) and t1.f=t1.b then t1.c-t1.f when 11>d then 17 else 11 end+f) from t1 where t1.e<>b), -t1.e) from t1 where t1.f in (select min(b) from t1 union select max(19)+count(distinct e)+(count(distinct e)) | - -count(distinct 17)*count(*) from t1)),13) from t1 where t1.f>=t1.f),t1.f)) from t1 where t1.f>=b),c)t1.d then a+t1.b-f when coalesce((select max(t1.b-case f when 13*t1.a then 11 else 11 end*c) from t1 where f in (select min(t1.d) from t1 union select +count(distinct f) from t1)),17) not in (e,b,t1.a) then f else t1.b end-e*t1.a when -11 then 19 else (t1.d) end from t1 where 17 not between -13 and t1.e),19) FROM t1 WHERE NOT (not t1.b*coalesce((select max(17 | coalesce((select coalesce((select +coalesce((select max(case when (b not in (a,t1.b, -t1.a)) and t1.f=t1.b then t1.c-t1.f when 11>d then 17 else 11 end+f) from t1 where t1.e<>b), -t1.e) from t1 where t1.f in (select min(b) from t1 union select max(19)+count(distinct e)+(count(distinct e)) | - -count(distinct 17)*count(*) from t1)),13) from t1 where t1.f>=t1.f),t1.f)) from t1 where t1.f>=b),c)11 then t1.a when 17 not between d and 13 or t1.f in (b,b,t1.a) then t1.d else c end)) then -t1.e else b end*t1.a FROM t1 WHERE a in ((abs(t1.c*+(abs(17-(abs(19)/abs(b-coalesce((select max(t1.c) from t1 where c>t1.f),t1.e))))/abs(t1.e))+t1.b | t1.d*t1.a)/abs(coalesce((select case when t1.f in (case when -e<=t1.a then f else 17 end,f,b) then b else t1.c end from t1 where not a in (t1.d,t1.a,c)),t1.f))),b,t1.b) and e in (select f from t1 union select e from t1)} +} {} +do_test randexpr-2.1348 { + db eval {SELECT -11* -t1.b-t1.b+ -case when t1.e-(select max((select abs((cast(avg(t1.f) AS integer)+++ -cast(avg(a | a) AS integer))) from t1))+(( -max( -b))) from t1)<=(abs(11)/abs((select count(distinct t1.e) from t1)-b*case when t1.a<>11 then t1.a when 17 not between d and 13 or t1.f in (b,b,t1.a) then t1.d else c end)) then -t1.e else b end*t1.a FROM t1 WHERE NOT (a in ((abs(t1.c*+(abs(17-(abs(19)/abs(b-coalesce((select max(t1.c) from t1 where c>t1.f),t1.e))))/abs(t1.e))+t1.b | t1.d*t1.a)/abs(coalesce((select case when t1.f in (case when -e<=t1.a then f else 17 end,f,b) then b else t1.c end from t1 where not a in (t1.d,t1.a,c)),t1.f))),b,t1.b) and e in (select f from t1 union select e from t1))} +} {52000} +do_test randexpr-2.1349 { + db eval {SELECT -11* -t1.b-t1.b+ -case when t1.e-(select max((select abs((cast(avg(t1.f) AS integer)+++ -cast(avg(a & a) AS integer))) from t1))+(( -max( -b))) from t1)<=(abs(11)/abs((select count(distinct t1.e) from t1)-b*case when t1.a<>11 then t1.a when 17 not between d and 13 or t1.f in (b,b,t1.a) then t1.d else c end)) then -t1.e else b end*t1.a FROM t1 WHERE NOT (a in ((abs(t1.c*+(abs(17-(abs(19)/abs(b-coalesce((select max(t1.c) from t1 where c>t1.f),t1.e))))/abs(t1.e))+t1.b | t1.d*t1.a)/abs(coalesce((select case when t1.f in (case when -e<=t1.a then f else 17 end,f,b) then b else t1.c end from t1 where not a in (t1.d,t1.a,c)),t1.f))),b,t1.b) and e in (select f from t1 union select e from t1))} +} {52000} +do_test randexpr-2.1350 { + db eval {SELECT case when ((select count(*) | min(case when (a*e in (select -+ -min(a) | cast(avg(11) AS integer) from t1 union select count(distinct t1.e) from t1) and exists(select 1 from t1 where not 13 in (select count(distinct 11) from t1 union select (max(19)) from t1) or (t1.f) between t1.e and (t1.e)) or t1.a<=f) or t1.et1.c then t1.e else t1.b end FROM t1 WHERE (c not between +(select + -~~max(coalesce((select case when t1.b<=t1.d*a then t1.c else t1.e end*t1.e from t1 where t1.d>=t1.b and c=(f) or t1.e>t1.d and bt1.c then t1.e else t1.b end FROM t1 WHERE NOT ((c not between +(select + -~~max(coalesce((select case when t1.b<=t1.d*a then t1.c else t1.e end*t1.e from t1 where t1.d>=t1.b and c=(f) or t1.e>t1.d and bt1.c then t1.e else t1.b end FROM t1 WHERE NOT ((c not between +(select + -~~max(coalesce((select case when t1.b<=t1.d*a then t1.c else t1.e end*t1.e from t1 where t1.d>=t1.b and c=(f) or t1.e>t1.d and bt1.c),t1.d)) from t1 where b not in (t1.e, -13,f)),f) FROM t1 WHERE d*a | 19<> -t1.d} +} {-500} +do_test randexpr-2.1354 { + db eval {SELECT coalesce((select max(coalesce((select max((abs((11))/abs(13))-e) from t1 where 11 not in (c,f,(select count(distinct -17+t1.a) from t1)) or (d*coalesce((select b | coalesce((select max(17) from t1 where e in (d,t1.c,t1.c)),+b) from t1 where d in (d-13,t1.c,f)),13))<>t1.c),t1.d)) from t1 where b not in (t1.e, -13,f)),f) FROM t1 WHERE NOT (d*a | 19<> -t1.d)} +} {} +do_test randexpr-2.1355 { + db eval {SELECT coalesce((select max(coalesce((select max((abs((11))/abs(13))-e) from t1 where 11 not in (c,f,(select count(distinct -17+t1.a) from t1)) or (d*coalesce((select b & coalesce((select max(17) from t1 where e in (d,t1.c,t1.c)),+b) from t1 where d in (d-13,t1.c,f)),13))<>t1.c),t1.d)) from t1 where b not in (t1.e, -13,f)),f) FROM t1 WHERE d*a | 19<> -t1.d} +} {-500} +do_test randexpr-2.1356 { + db eval {SELECT coalesce((select t1.f from t1 where case when 19<>(c*case when 19 | -t1.e in (select ~+(max(c)) from t1 union select min(19) from t1) then -t1.f else 13 end-f) then ( -17) else -19 end-b in (t1.b,t1.f,b) or c in (select ~case max( -t1.d) | count(*)*cast(avg(b) AS integer) when count(distinct t1.f) then cast(avg(t1.f) AS integer) else cast(avg(f) AS integer) end | (cast(avg( -t1.e) AS integer)) from t1 union select max(c) from t1) and t1.f in (select max(17) from t1 union select max(11) from t1)),e) FROM t1 WHERE not 13 in (select (t1.b)*(coalesce((select (abs(t1.f)/abs( -t1.f-d))-coalesce((select 19 from t1 where not exists(select 1 from t1 where (select count(distinct t1.f+19-(abs(e)/abs(e))) from t1)- -e<>d)),t1.e)* -t1.a-t1.a-(t1.f)*t1.e*t1.d-t1.b from t1 where t1.e between t1.a and b),11)) from t1 union select d from t1)} +} {500} +do_test randexpr-2.1357 { + db eval {SELECT coalesce((select t1.f from t1 where case when 19<>(c*case when 19 | -t1.e in (select ~+(max(c)) from t1 union select min(19) from t1) then -t1.f else 13 end-f) then ( -17) else -19 end-b in (t1.b,t1.f,b) or c in (select ~case max( -t1.d) | count(*)*cast(avg(b) AS integer) when count(distinct t1.f) then cast(avg(t1.f) AS integer) else cast(avg(f) AS integer) end | (cast(avg( -t1.e) AS integer)) from t1 union select max(c) from t1) and t1.f in (select max(17) from t1 union select max(11) from t1)),e) FROM t1 WHERE NOT (not 13 in (select (t1.b)*(coalesce((select (abs(t1.f)/abs( -t1.f-d))-coalesce((select 19 from t1 where not exists(select 1 from t1 where (select count(distinct t1.f+19-(abs(e)/abs(e))) from t1)- -e<>d)),t1.e)* -t1.a-t1.a-(t1.f)*t1.e*t1.d-t1.b from t1 where t1.e between t1.a and b),11)) from t1 union select d from t1))} +} {} +do_test randexpr-2.1358 { + db eval {SELECT coalesce((select t1.f from t1 where case when 19<>(c*case when 19 & -t1.e in (select ~+(max(c)) from t1 union select min(19) from t1) then -t1.f else 13 end-f) then ( -17) else -19 end-b in (t1.b,t1.f,b) or c in (select ~case max( -t1.d) & count(*)*cast(avg(b) AS integer) when count(distinct t1.f) then cast(avg(t1.f) AS integer) else cast(avg(f) AS integer) end & (cast(avg( -t1.e) AS integer)) from t1 union select max(c) from t1) and t1.f in (select max(17) from t1 union select max(11) from t1)),e) FROM t1 WHERE not 13 in (select (t1.b)*(coalesce((select (abs(t1.f)/abs( -t1.f-d))-coalesce((select 19 from t1 where not exists(select 1 from t1 where (select count(distinct t1.f+19-(abs(e)/abs(e))) from t1)- -e<>d)),t1.e)* -t1.a-t1.a-(t1.f)*t1.e*t1.d-t1.b from t1 where t1.e between t1.a and b),11)) from t1 union select d from t1)} +} {500} +do_test randexpr-2.1359 { + db eval {SELECT ~ -case when t1.d in (17,t1.e,case t1.b*c when -c+f-11 | coalesce((select 11 from t1 where (select (min(13)-max(t1.c))+count(*) from t1)+13<=(( -(t1.f)))),b)+17 then t1.d else b end) then c when c between 13 and f and b>17 then f else t1.a end-t1.f FROM t1 WHERE t1.e>= -f} +} {-1} +do_test randexpr-2.1360 { + db eval {SELECT ~ -case when t1.d in (17,t1.e,case t1.b*c when -c+f-11 | coalesce((select 11 from t1 where (select (min(13)-max(t1.c))+count(*) from t1)+13<=(( -(t1.f)))),b)+17 then t1.d else b end) then c when c between 13 and f and b>17 then f else t1.a end-t1.f FROM t1 WHERE NOT (t1.e>= -f)} +} {} +do_test randexpr-2.1361 { + db eval {SELECT ~ -case when t1.d in (17,t1.e,case t1.b*c when -c+f-11 & coalesce((select 11 from t1 where (select (min(13)-max(t1.c))+count(*) from t1)+13<=(( -(t1.f)))),b)+17 then t1.d else b end) then c when c between 13 and f and b>17 then f else t1.a end-t1.f FROM t1 WHERE t1.e>= -f} +} {-1} +do_test randexpr-2.1362 { + db eval {SELECT ((abs(13)/abs( -b | coalesce((select 17 | t1.e from t1 where (d in (select coalesce((select 17 | (a) | c*case when case when t1.c in (select max(13) | cast(avg(t1.b) AS integer) from t1 union select count(*) from t1) then t1.c when 13>=17 then (11) else t1.e end>t1.d and t1.b>=t1.f then 13 when 11 not in (t1.d, -f,13) then f else 11 end from t1 where b<>11),d) from t1 union select t1.e from t1) or b not in (t1.e,b,e))),t1.e)))) FROM t1 WHERE 11<=coalesce((select max(t1.d) from t1 where coalesce((select ~d+t1.d-case when c not in (t1.b-f,19+~coalesce((select t1.a from t1 where t1.f<=b),case when not (e not between f and t1.e) then (t1.e)-d else b end), -t1.d) then 17 when t1.d not between 13 and t1.d then d else (c) end from t1 where c>t1.a),t1.b)=17 then (11) else t1.e end>t1.d and t1.b>=t1.f then 13 when 11 not in (t1.d, -f,13) then f else 11 end from t1 where b<>11),d) from t1 union select t1.e from t1) or b not in (t1.e,b,e))),t1.e)))) FROM t1 WHERE NOT (11<=coalesce((select max(t1.d) from t1 where coalesce((select ~d+t1.d-case when c not in (t1.b-f,19+~coalesce((select t1.a from t1 where t1.f<=b),case when not (e not between f and t1.e) then (t1.e)-d else b end), -t1.d) then 17 when t1.d not between 13 and t1.d then d else (c) end from t1 where c>t1.a),t1.b)=17 then (11) else t1.e end>t1.d and t1.b>=t1.f then 13 when 11 not in (t1.d, -f,13) then f else 11 end from t1 where b<>11),d) from t1 union select t1.e from t1) or b not in (t1.e,b,e))),t1.e)))) FROM t1 WHERE 11<=coalesce((select max(t1.d) from t1 where coalesce((select ~d+t1.d-case when c not in (t1.b-f,19+~coalesce((select t1.a from t1 where t1.f<=b),case when not (e not between f and t1.e) then (t1.e)-d else b end), -t1.d) then 17 when t1.d not between 13 and t1.d then d else (c) end from t1 where c>t1.a),t1.b)t1.d and a between d and d) then c else d end when exists(select 1 from t1 where (11)>19) or a in (t1.b,(t1.c),(a)) and - -11<>13 and t1.b not between -b and d then 19 else a end FROM t1 WHERE 19 | t1.d not between (select ~case case count(distinct t1.c) when (count(*)) then ~~~count(*) else -count(*)- -max(coalesce((select max(19) from t1 where (abs(t1.b-b)/abs(d))+11=t1.a),11))*max(t1.e) | count(*)-abs(max((c)))*max(t1.d) end when -count(*) then cast(avg(f) AS integer) else -(max(c)) end from t1) and t1.c} +} {400} +do_test randexpr-2.1366 { + db eval {SELECT case when (c between 11 and t1.e*t1.a | c) then case when (case when (b=(select max((t1.c))-min(t1.d) from t1)*b) then t1.c else 17 end-e>t1.d and a between d and d) then c else d end when exists(select 1 from t1 where (11)>19) or a in (t1.b,(t1.c),(a)) and - -11<>13 and t1.b not between -b and d then 19 else a end FROM t1 WHERE NOT (19 | t1.d not between (select ~case case count(distinct t1.c) when (count(*)) then ~~~count(*) else -count(*)- -max(coalesce((select max(19) from t1 where (abs(t1.b-b)/abs(d))+11=t1.a),11))*max(t1.e) | count(*)-abs(max((c)))*max(t1.d) end when -count(*) then cast(avg(f) AS integer) else -(max(c)) end from t1) and t1.c)} +} {} +do_test randexpr-2.1367 { + db eval {SELECT case when (c between 11 and t1.e*t1.a & c) then case when (case when (b=(select max((t1.c))-min(t1.d) from t1)*b) then t1.c else 17 end-e>t1.d and a between d and d) then c else d end when exists(select 1 from t1 where (11)>19) or a in (t1.b,(t1.c),(a)) and - -11<>13 and t1.b not between -b and d then 19 else a end FROM t1 WHERE 19 | t1.d not between (select ~case case count(distinct t1.c) when (count(*)) then ~~~count(*) else -count(*)- -max(coalesce((select max(19) from t1 where (abs(t1.b-b)/abs(d))+11=t1.a),11))*max(t1.e) | count(*)-abs(max((c)))*max(t1.d) end when -count(*) then cast(avg(f) AS integer) else -(max(c)) end from t1) and t1.c} +} {100} +do_test randexpr-2.1368 { + db eval {SELECT case when not exists(select 1 from t1 where t1.c not in (t1.d,17,t1.c | coalesce((select t1.f from t1 where t1.c*t1.c-t1.e not in (t1.e*(case when (f) not between c and +f then (a) else t1.e end)+17,13, -((t1.a))) and f not between f and t1.d), -t1.e))) then (abs(e)/abs(a)) when 17<17 or 13<>a then f else (f) end FROM t1 WHERE t1.f in (e,(19),case when not exists(select 1 from t1 where coalesce((select max( -(abs(case -t1.d when t1.d*13+coalesce((select a from t1 where d>19),t1.c) then f else b end)/abs(t1.b))) from t1 where f in (t1.a,t1.d,(c))),t1.d)+c+17a then f else (f) end FROM t1 WHERE NOT (t1.f in (e,(19),case when not exists(select 1 from t1 where coalesce((select max( -(abs(case -t1.d when t1.d*13+coalesce((select a from t1 where d>19),t1.c) then f else b end)/abs(t1.b))) from t1 where f in (t1.a,t1.d,(c))),t1.d)+c+17a then f else (f) end FROM t1 WHERE NOT (t1.f in (e,(19),case when not exists(select 1 from t1 where coalesce((select max( -(abs(case -t1.d when t1.d*13+coalesce((select a from t1 where d>19),t1.c) then f else b end)/abs(t1.b))) from t1 where f in (t1.a,t1.d,(c))),t1.d)+c+17coalesce((select max(coalesce((select max(case when case case when 19 in (select case min(11) when - -cast(avg(c) AS integer) then count(*) else count(*) end-cast(avg(19) AS integer) | cast(avg(17) AS integer) from t1 union select min(t1.d) from t1) then t1.b*t1.a else -t1.a end when t1.f then e else t1.d endd),t1.d)) from t1 where t1.c<>e and -17<=a),t1.d))} +} {413} +do_test randexpr-2.1372 { + db eval {SELECT case +b when coalesce((select max(b) from t1 where exists(select 1 from t1 where (((+~13) in (select case cast(avg(t1.e+(11)+t1.b*e) AS integer) when max(((t1.e))) then min(b)-case -cast(avg(17) AS integer) when min(e) then cast(avg(t1.c) AS integer) else (max(d)) end-count(distinct t1.f) else count(distinct d) end+count(*) from t1 union select max(d) from t1))))),(abs(~d)/abs(11))) then -e else - -13 end+d | t1.d FROM t1 WHERE NOT (((a)>coalesce((select max(coalesce((select max(case when case case when 19 in (select case min(11) when - -cast(avg(c) AS integer) then count(*) else count(*) end-cast(avg(19) AS integer) | cast(avg(17) AS integer) from t1 union select min(t1.d) from t1) then t1.b*t1.a else -t1.a end when t1.f then e else t1.d endd),t1.d)) from t1 where t1.c<>e and -17<=a),t1.d)))} +} {} +do_test randexpr-2.1373 { + db eval {SELECT case +b when coalesce((select max(b) from t1 where exists(select 1 from t1 where (((+~13) in (select case cast(avg(t1.e+(11)+t1.b*e) AS integer) when max(((t1.e))) then min(b)-case -cast(avg(17) AS integer) when min(e) then cast(avg(t1.c) AS integer) else (max(d)) end-count(distinct t1.f) else count(distinct d) end+count(*) from t1 union select max(d) from t1))))),(abs(~d)/abs(11))) then -e else - -13 end+d & t1.d FROM t1 WHERE ((a)>coalesce((select max(coalesce((select max(case when case case when 19 in (select case min(11) when - -cast(avg(c) AS integer) then count(*) else count(*) end-cast(avg(19) AS integer) | cast(avg(17) AS integer) from t1 union select min(t1.d) from t1) then t1.b*t1.a else -t1.a end when t1.f then e else t1.d endd),t1.d)) from t1 where t1.c<>e and -17<=a),t1.d))} +} {400} +do_test randexpr-2.1374 { + db eval {SELECT +c+case when (~t1.e in (+(select case cast(avg(17) AS integer)+min(19)+count(*) when -max(c) then count(distinct t1.c) else - - -(max(13)) end*count(distinct ( -17)) from t1)-case when d+case when f<>d or t1.d<=t1.e then t1.a else -11 end in (select cast(avg(t1.e) AS integer) from t1 union select cast(avg(b) AS integer) from t1) then 19 when ((t1.c) between e and a and -13 not in (d,f,e)) then t1.e else d end,a,t1.b)) then case when t1.b<19 then 13 else 19 end else t1.c end FROM t1 WHERE not exists(select 1 from t1 where 19>t1.f+coalesce((select 13 from t1 where t1.d in (coalesce((select max(e) from t1 where c=t1.c),t1.d) between t1.b and c))) then -f+(t1.b)-t1.e-t1.c+t1.c when c in (select -( -f) from t1 union select 13 from t1) then t1.e else t1.a end),t1.b,a)),11))} +} {600} +do_test randexpr-2.1375 { + db eval {SELECT +c+case when (~t1.e in (+(select case cast(avg(17) AS integer)+min(19)+count(*) when -max(c) then count(distinct t1.c) else - - -(max(13)) end*count(distinct ( -17)) from t1)-case when d+case when f<>d or t1.d<=t1.e then t1.a else -11 end in (select cast(avg(t1.e) AS integer) from t1 union select cast(avg(b) AS integer) from t1) then 19 when ((t1.c) between e and a and -13 not in (d,f,e)) then t1.e else d end,a,t1.b)) then case when t1.b<19 then 13 else 19 end else t1.c end FROM t1 WHERE NOT (not exists(select 1 from t1 where 19>t1.f+coalesce((select 13 from t1 where t1.d in (coalesce((select max(e) from t1 where c=t1.c),t1.d) between t1.b and c))) then -f+(t1.b)-t1.e-t1.c+t1.c when c in (select -( -f) from t1 union select 13 from t1) then t1.e else t1.a end),t1.b,a)),11)))} +} {} +do_test randexpr-2.1376 { + db eval {SELECT (17-t1.e-17*t1.d+coalesce((select -(abs(t1.a)/abs(t1.e))-t1.e*13*c*c-c from t1 where coalesce((select max(t1.a) from t1 where +case when t1.a not in (e,(t1.b),t1.f) then 11 when t1.b not between t1.c and t1.c then d else b end not in (13,t1.c,b)),f) between f and 11),d)-t1.d)-f+13 FROM t1 WHERE t1.f<>case when 11 in (coalesce((select e from t1 where not exists(select 1 from t1 where not case when t1.c between 11 and f then +t1.d else (((abs(coalesce((select -t1.c from t1 where (c in (b,b,t1.e))),13) | t1.e*t1.d)/abs(c))-t1.f)) end-t1.d=c)),t1.e),d,t1.e) or t1.bcase when 11 in (coalesce((select e from t1 where not exists(select 1 from t1 where not case when t1.c between 11 and f then +t1.d else (((abs(coalesce((select -t1.c from t1 where (c in (b,b,t1.e))),13) | t1.e*t1.d)/abs(c))-t1.f)) end-t1.d=c)),t1.e),d,t1.e) or t1.b=e or t1.a not in (t1.c,e,t1.f)),t1.b)+19<=t1.a) then ((b)) when d>=b or d not between 19 and t1.e then t1.c else t1.b end-t1.a*f) from t1 where 13 between t1.f and t1.b or e not in (t1.e,b,19)),t1.a) then d else t1.a end FROM t1 WHERE -b>=(select max(19) from t1)} +} {} +do_test randexpr-2.1379 { + db eval {SELECT case +c when 19*t1.b*coalesce((select t1.b-(19* -case when (coalesce((select t1.a from t1 where c>=e or t1.a not in (t1.c,e,t1.f)),t1.b)+19<=t1.a) then ((b)) when d>=b or d not between 19 and t1.e then t1.c else t1.b end-t1.a*f) from t1 where 13 between t1.f and t1.b or e not in (t1.e,b,19)),t1.a) then d else t1.a end FROM t1 WHERE NOT ( -b>=(select max(19) from t1))} +} {100} +do_test randexpr-2.1380 { + db eval {SELECT d+(abs(case t1.e when e*(abs(17)/abs(b)) then f else case when not not 13=c then coalesce((select t1.b from t1 where ((e-11 in (select 19 from t1 union select 17 from t1))) or f>=f),d) else case when c in (t1.a,t1.c,d) then t1.e when -11 in (t1.a,e,t1.d) then t1.a else 19 end end-t1.e end*19)/abs(t1.c))-t1.e+d*t1.f FROM t1 WHERE t1.d+t1.c*coalesce((select e from t1 where coalesce((select max(t1.f) from t1 where not f in (select abs(case min(case when a not between a and f then 17 when e>17 then t1.d else 13 end) when abs(case cast(avg(t1.a) AS integer) when -min(t1.b) then count(distinct 17) else (count(distinct t1.a)) end)-count(distinct -b) then count(distinct d) else max(t1.f) end)+count(*)*max(e) from t1 union select (cast(avg(t1.c) AS integer)) from t1)),b*case 19 when t1.a then 11 else e end)<=f),17)=c} +} {} +do_test randexpr-2.1381 { + db eval {SELECT d+(abs(case t1.e when e*(abs(17)/abs(b)) then f else case when not not 13=c then coalesce((select t1.b from t1 where ((e-11 in (select 19 from t1 union select 17 from t1))) or f>=f),d) else case when c in (t1.a,t1.c,d) then t1.e when -11 in (t1.a,e,t1.d) then t1.a else 19 end end-t1.e end*19)/abs(t1.c))-t1.e+d*t1.f FROM t1 WHERE NOT (t1.d+t1.c*coalesce((select e from t1 where coalesce((select max(t1.f) from t1 where not f in (select abs(case min(case when a not between a and f then 17 when e>17 then t1.d else 13 end) when abs(case cast(avg(t1.a) AS integer) when -min(t1.b) then count(distinct 17) else (count(distinct t1.a)) end)-count(distinct -b) then count(distinct d) else max(t1.f) end)+count(*)*max(e) from t1 union select (cast(avg(t1.c) AS integer)) from t1)),b*case 19 when t1.a then 11 else e end)<=f),17)=c)} +} {239900} +do_test randexpr-2.1382 { + db eval {SELECT case when ~t1.b=t1.b-coalesce((select 17 from t1 where t1.d-a>=(abs(case when c*11 between 19+(select cast(avg(13*b) AS integer) from t1) and case t1.e-t1.f when t1.a then 11 else a end then 19 else 17 end-c+a)/abs(t1.a))),e) then (f) when exists(select 1 from t1 where (11) in (c,t1.c,t1.b)) then f else 19 end*t1.f FROM t1 WHERE case 19 when t1.b then b*case t1.a when +t1.f then e else 17+t1.a end-11++(select ~~abs(~count(*))+count(distinct 13) from t1)*t1.b-t1.c- -a else 17 end>=t1.e} +} {} +do_test randexpr-2.1383 { + db eval {SELECT case when ~t1.b=t1.b-coalesce((select 17 from t1 where t1.d-a>=(abs(case when c*11 between 19+(select cast(avg(13*b) AS integer) from t1) and case t1.e-t1.f when t1.a then 11 else a end then 19 else 17 end-c+a)/abs(t1.a))),e) then (f) when exists(select 1 from t1 where (11) in (c,t1.c,t1.b)) then f else 19 end*t1.f FROM t1 WHERE NOT (case 19 when t1.b then b*case t1.a when +t1.f then e else 17+t1.a end-11++(select ~~abs(~count(*))+count(distinct 13) from t1)*t1.b-t1.c- -a else 17 end>=t1.e)} +} {11400} +do_test randexpr-2.1384 { + db eval {SELECT case when t1.a<=e and d*(t1.d)+t1.d++a-t1.f-a<=e or c not between b and d and 19 not between t1.e and 11 or -t1.b in (select b from t1 union select 17 from t1) and t1.d<>t1.e or t1.d in (select t1.c from t1 union select t1.d from t1) then t1.e else 17 end+f+t1.b FROM t1 WHERE t1.c*~case 19+coalesce((select 19 from t1 where e>11-d),t1.e) when coalesce((select max(11) from t1 where not t1.f in (select ~abs(cast(avg(coalesce((select max(b+f) from t1 where -f=(t1.b) and a between t1.c and t1.b),a)) AS integer))+ -count(distinct t1.b)- -max(f)*max((f)) from t1 union select min(t1.e) from t1) or a in (select +d*d from t1 union select t1.c from t1)),t1.e) then t1.d else t1.c end*t1.bt1.e or t1.d in (select t1.c from t1 union select t1.d from t1) then t1.e else 17 end+f+t1.b FROM t1 WHERE NOT (t1.c*~case 19+coalesce((select 19 from t1 where e>11-d),t1.e) when coalesce((select max(11) from t1 where not t1.f in (select ~abs(cast(avg(coalesce((select max(b+f) from t1 where -f=(t1.b) and a between t1.c and t1.b),a)) AS integer))+ -count(distinct t1.b)- -max(f)*max((f)) from t1 union select min(t1.e) from t1) or a in (select +d*d from t1 union select t1.c from t1)),t1.e) then t1.d else t1.c end*t1.bb)),e)-13)/abs(t1.e))+17)),t1.a)*t1.d+b FROM t1 WHERE not exists(select 1 from t1 where t1.b=case 19 when f then d+case when t1.c not between 13 and d | (select abs((min(17) | (min(t1.f)))) from t1) and coalesce((select max(11*19) from t1 where not exists(select 1 from t1 where (e>=t1.b))),11) not in (t1.c,19,t1.c) or t1.a in (select count(*) from t1 union select max(c)+cast(avg(11) AS integer) from t1) and (c) not between t1.a and t1.d and t1.b between 11 and a then t1.d else c end else (t1.f) end)} +} {40200} +do_test randexpr-2.1387 { + db eval {SELECT coalesce((select 17 from t1 where (~f=(abs(coalesce((select t1.d from t1 where case (select count(distinct c | 13)- -(cast(avg(11) AS integer)) from t1)+coalesce((select 19 from t1 where d in (select t1.c from t1 union select 17 from t1)),t1.f) when e then t1.c else t1.d end*d<=19 and not exists(select 1 from t1 where (e)<>b)),e)-13)/abs(t1.e))+17)),t1.a)*t1.d+b FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.b=case 19 when f then d+case when t1.c not between 13 and d | (select abs((min(17) | (min(t1.f)))) from t1) and coalesce((select max(11*19) from t1 where not exists(select 1 from t1 where (e>=t1.b))),11) not in (t1.c,19,t1.c) or t1.a in (select count(*) from t1 union select max(c)+cast(avg(11) AS integer) from t1) and (c) not between t1.a and t1.d and t1.b between 11 and a then t1.d else c end else (t1.f) end))} +} {} +do_test randexpr-2.1388 { + db eval {SELECT coalesce((select 17 from t1 where (~f=(abs(coalesce((select t1.d from t1 where case (select count(distinct c & 13)- -(cast(avg(11) AS integer)) from t1)+coalesce((select 19 from t1 where d in (select t1.c from t1 union select 17 from t1)),t1.f) when e then t1.c else t1.d end*d<=19 and not exists(select 1 from t1 where (e)<>b)),e)-13)/abs(t1.e))+17)),t1.a)*t1.d+b FROM t1 WHERE not exists(select 1 from t1 where t1.b=case 19 when f then d+case when t1.c not between 13 and d | (select abs((min(17) | (min(t1.f)))) from t1) and coalesce((select max(11*19) from t1 where not exists(select 1 from t1 where (e>=t1.b))),11) not in (t1.c,19,t1.c) or t1.a in (select count(*) from t1 union select max(c)+cast(avg(11) AS integer) from t1) and (c) not between t1.a and t1.d and t1.b between 11 and a then t1.d else c end else (t1.f) end)} +} {40200} +do_test randexpr-2.1389 { + db eval {SELECT coalesce((select max(t1.f+19+case -d when t1.f then t1.e else e end+case when +d in (select case when exists(select 1 from t1 where coalesce((select max( -17-d+t1.b-t1.d) from t1 where e not in (f,t1.e,11)),c) | t1.e>=c) then b else d end from t1 union select c from t1) then t1.b else d end-e*b+11) from t1 where f<>t1.b),d) FROM t1 WHERE (exists(select 1 from t1 where f> -t1.a*11 or e in (a-~ -t1.e,+a | e*case when coalesce((select t1.c from t1 where t1.f in (select (count(*)) from t1 union select ((count(*))) from t1)),a)-t1.e not in (e,t1.a,d) or 19 in (t1.b,t1.c,13) and c>19 then b*d else ( -t1.d) end-t1.a*t1.b*19,b)))} +} {-98470} +do_test randexpr-2.1390 { + db eval {SELECT coalesce((select max(t1.f+19+case -d when t1.f then t1.e else e end+case when +d in (select case when exists(select 1 from t1 where coalesce((select max( -17-d+t1.b-t1.d) from t1 where e not in (f,t1.e,11)),c) | t1.e>=c) then b else d end from t1 union select c from t1) then t1.b else d end-e*b+11) from t1 where f<>t1.b),d) FROM t1 WHERE NOT ((exists(select 1 from t1 where f> -t1.a*11 or e in (a-~ -t1.e,+a | e*case when coalesce((select t1.c from t1 where t1.f in (select (count(*)) from t1 union select ((count(*))) from t1)),a)-t1.e not in (e,t1.a,d) or 19 in (t1.b,t1.c,13) and c>19 then b*d else ( -t1.d) end-t1.a*t1.b*19,b))))} +} {} +do_test randexpr-2.1391 { + db eval {SELECT coalesce((select max(t1.f+19+case -d when t1.f then t1.e else e end+case when +d in (select case when exists(select 1 from t1 where coalesce((select max( -17-d+t1.b-t1.d) from t1 where e not in (f,t1.e,11)),c) & t1.e>=c) then b else d end from t1 union select c from t1) then t1.b else d end-e*b+11) from t1 where f<>t1.b),d) FROM t1 WHERE (exists(select 1 from t1 where f> -t1.a*11 or e in (a-~ -t1.e,+a | e*case when coalesce((select t1.c from t1 where t1.f in (select (count(*)) from t1 union select ((count(*))) from t1)),a)-t1.e not in (e,t1.a,d) or 19 in (t1.b,t1.c,13) and c>19 then b*d else ( -t1.d) end-t1.a*t1.b*19,b)))} +} {-98670} +do_test randexpr-2.1392 { + db eval {SELECT case t1.b* -(abs(e)/abs(d+b)) when ~a-11 then coalesce((select max(case when (~~19- -t1.d=f) then +(abs(t1.c)/abs(t1.e)) when (t1.d>e or t1.a>=t1.a) then 19 else t1.a end*b-(19)-a-t1.c) from t1 where t1.f>=11),13) else b end FROM t1 WHERE (not exists(select 1 from t1 where t1.e<=t1.f) or c- -e*17 in (t1.c+c, -b-17, -coalesce((select case when +coalesce((select max(coalesce((select t1.f from t1 where t1.d in (select max(t1.d)-count(*) from t1 union select max( -t1.a) from t1)),e)+11) from t1 where ( -19)>=t1.a),t1.e)-13 not between t1.a and d then t1.d else 19 end+t1.b from t1 where t1.d<>19),11)))} +} {} +do_test randexpr-2.1393 { + db eval {SELECT case t1.b* -(abs(e)/abs(d+b)) when ~a-11 then coalesce((select max(case when (~~19- -t1.d=f) then +(abs(t1.c)/abs(t1.e)) when (t1.d>e or t1.a>=t1.a) then 19 else t1.a end*b-(19)-a-t1.c) from t1 where t1.f>=11),13) else b end FROM t1 WHERE NOT ((not exists(select 1 from t1 where t1.e<=t1.f) or c- -e*17 in (t1.c+c, -b-17, -coalesce((select case when +coalesce((select max(coalesce((select t1.f from t1 where t1.d in (select max(t1.d)-count(*) from t1 union select max( -t1.a) from t1)),e)+11) from t1 where ( -19)>=t1.a),t1.e)-13 not between t1.a and d then t1.d else 19 end+t1.b from t1 where t1.d<>19),11))))} +} {200} +do_test randexpr-2.1394 { + db eval {SELECT (select case case min(d-t1.c)-max(t1.a) when ~count(distinct d+case -f*t1.d+11 when b then b else b end)-min(c) then case +max(t1.a)*~abs(count(*)) when -min(13) then max(17) else count(*) end else -max(e) end when max(t1.a) then max(d) else max(17) end-count(distinct t1.b) from t1)-(d) FROM t1 WHERE +19 between b*11 and (select max(d-13) | ~+~+~((count(*)))*cast(avg(b) AS integer) from t1)+b} +} {} +do_test randexpr-2.1395 { + db eval {SELECT (select case case min(d-t1.c)-max(t1.a) when ~count(distinct d+case -f*t1.d+11 when b then b else b end)-min(c) then case +max(t1.a)*~abs(count(*)) when -min(13) then max(17) else count(*) end else -max(e) end when max(t1.a) then max(d) else max(17) end-count(distinct t1.b) from t1)-(d) FROM t1 WHERE NOT (+19 between b*11 and (select max(d-13) | ~+~+~((count(*)))*cast(avg(b) AS integer) from t1)+b)} +} {-384} +do_test randexpr-2.1396 { + db eval {SELECT (abs(case when +a*t1.e+(abs(t1.e | 11+coalesce((select max(d) from t1 where (not not b19),17)) and t1.b>13 and d=t1.a and 13 in (d,t1.a,11)),t1.e)*d)/abs(11+t1.a-19))=c or b<>c then t1.e when b<>19 then 17 else b end)/abs(11)) FROM t1 WHERE (coalesce((select max(case when exists(select 1 from t1 where coalesce((select a from t1 where not 19-~(coalesce((select t1.d from t1 where d between e and 17),t1.a)-t1.a-17) in (t1.d,t1.e,19)),19)*11*d in (select d from t1 union select t1.b from t1)) then c when d between t1.a and -19 then t1.e else 19 end-d*c) from t1 where (t1.f>=t1.c)),11)) not in (17,17,a)} +} {45} +do_test randexpr-2.1397 { + db eval {SELECT (abs(case when +a*t1.e+(abs(t1.e | 11+coalesce((select max(d) from t1 where (not not b19),17)) and t1.b>13 and d=t1.a and 13 in (d,t1.a,11)),t1.e)*d)/abs(11+t1.a-19))=c or b<>c then t1.e when b<>19 then 17 else b end)/abs(11)) FROM t1 WHERE NOT ((coalesce((select max(case when exists(select 1 from t1 where coalesce((select a from t1 where not 19-~(coalesce((select t1.d from t1 where d between e and 17),t1.a)-t1.a-17) in (t1.d,t1.e,19)),19)*11*d in (select d from t1 union select t1.b from t1)) then c when d between t1.a and -19 then t1.e else 19 end-d*c) from t1 where (t1.f>=t1.c)),11)) not in (17,17,a))} +} {} +do_test randexpr-2.1398 { + db eval {SELECT (abs(case when +a*t1.e+(abs(t1.e & 11+coalesce((select max(d) from t1 where (not not b19),17)) and t1.b>13 and d=t1.a and 13 in (d,t1.a,11)),t1.e)*d)/abs(11+t1.a-19))=c or b<>c then t1.e when b<>19 then 17 else b end)/abs(11)) FROM t1 WHERE (coalesce((select max(case when exists(select 1 from t1 where coalesce((select a from t1 where not 19-~(coalesce((select t1.d from t1 where d between e and 17),t1.a)-t1.a-17) in (t1.d,t1.e,19)),19)*11*d in (select d from t1 union select t1.b from t1)) then c when d between t1.a and -19 then t1.e else 19 end-d*c) from t1 where (t1.f>=t1.c)),11)) not in (17,17,a)} +} {45} +do_test randexpr-2.1399 { + db eval {SELECT 19+(t1.f)*case coalesce((select max(b) from t1 where not f not between coalesce((select (select (count(distinct t1.c))+ -max( -(f)) from t1)+t1.c from t1 where t1.c<=d),17)-c-19 and 17 or f<>13),case when not exists(select 1 from t1 where ((a)) between b and t1.f) then c when not ( -f not in (t1.b,t1.a,t1.c)) then -t1.b else 19 end)-t1.d-t1.e when t1.e then 11 else 19 end FROM t1 WHERE not not exists(select 1 from t1 where not 11=c and a*e between coalesce((select max(t1.e) from t1 where f=t1.b*19+case when (e not in (t1.d,t1.d, -c) or t1.a13),case when not exists(select 1 from t1 where ((a)) between b and t1.f) then c when not ( -f not in (t1.b,t1.a,t1.c)) then -t1.b else 19 end)-t1.d-t1.e when t1.e then 11 else 19 end FROM t1 WHERE NOT (not not exists(select 1 from t1 where not 11=c and a*e between coalesce((select max(t1.e) from t1 where f=t1.b*19+case when (e not in (t1.d,t1.d, -c) or t1.a19) or t1.d=a)),19) FROM t1 WHERE t1.c not between f and d} +} {19} +do_test randexpr-2.1402 { + db eval {SELECT coalesce((select max(19) from t1 where e-coalesce((select 13 from t1 where exists(select 1 from t1 where ((+(case when d*t1.b not in (t1.d,t1.f,e) then 11 else t1.c end))+19*11+e<=11))),19+t1.d) between t1.c and -e and exists(select 1 from t1 where t1.d=c and exists(select 1 from t1 where exists(select 1 from t1 where t1.f=b and a in (19,t1.b,d) or t1.b=11) or t1.c>19) or t1.d=a)),19) FROM t1 WHERE NOT (t1.c not between f and d)} +} {} +do_test randexpr-2.1403 { + db eval {SELECT case e when (t1.e+case when t1.e not between t1.c and case when d+coalesce((select t1.d+13+case case a when t1.a then t1.b else t1.c end when 17 then t1.b else f end+19 from t1 where exists(select 1 from t1 where t1.f< -17)),11)>=d then 13 else 13 end*(t1.c) then -c when t1.e not between t1.f and t1.d then t1.a else ( - -t1.a) end-d) then t1.a else t1.d end FROM t1 WHERE t1.d between case when -(t1.e-c)+a between -+coalesce((select max(+t1.c) from t1 where (coalesce((select max(case when d>~t1.e then t1.c when t1.f in (select t1.d from t1 union select t1.e from t1) then (19) else t1.e end) from t1 where not exists(select 1 from t1 where t1.e< -(11))),t1.f) in (select min(t1.f) from t1 union select count(distinct 13) from t1))),t1.e) and t1.d then a when 13<=c then b else t1.c end-c and f} +} {400} +do_test randexpr-2.1404 { + db eval {SELECT case e when (t1.e+case when t1.e not between t1.c and case when d+coalesce((select t1.d+13+case case a when t1.a then t1.b else t1.c end when 17 then t1.b else f end+19 from t1 where exists(select 1 from t1 where t1.f< -17)),11)>=d then 13 else 13 end*(t1.c) then -c when t1.e not between t1.f and t1.d then t1.a else ( - -t1.a) end-d) then t1.a else t1.d end FROM t1 WHERE NOT (t1.d between case when -(t1.e-c)+a between -+coalesce((select max(+t1.c) from t1 where (coalesce((select max(case when d>~t1.e then t1.c when t1.f in (select t1.d from t1 union select t1.e from t1) then (19) else t1.e end) from t1 where not exists(select 1 from t1 where t1.e< -(11))),t1.f) in (select min(t1.f) from t1 union select count(distinct 13) from t1))),t1.e) and t1.d then a when 13<=c then b else t1.c end-c and f)} +} {} +do_test randexpr-2.1405 { + db eval {SELECT (select abs(min(case when d not between coalesce((select max(case case t1.e when e-t1.f-a then case when (not exists(select 1 from t1 where coalesce((select max(t1.f) from t1 where 19<>17 and e not between -13 and t1.a),t1.c)>=t1.e)) then (select abs(cast(avg(17) AS integer)) from t1) when c between e and 17 then (select (count(*)) from t1) else 11 end else 11 end when e then 11 else a end) from t1 where t1.b>t1.b),t1.e) and 13 then t1.d else 17 end))*min(b) | -count(distinct t1.f) from t1) FROM t1 WHERE (e) in (select +t1.f from t1 union select t1.f+coalesce((select t1.d+e from t1 where t1.f not in (t1.b,t1.f,e)),(19))*case when t1.a<=a and t1.e<=e then t1.d else t1.c end from t1) and (t1.c not between 11 and e) or d in (select count(*)-cast(avg(t1.a) AS integer) from t1 union select max(19) from t1) and not t1.a not in (11,t1.d,b) or 17>=t1.a} +} {} +do_test randexpr-2.1406 { + db eval {SELECT (select abs(min(case when d not between coalesce((select max(case case t1.e when e-t1.f-a then case when (not exists(select 1 from t1 where coalesce((select max(t1.f) from t1 where 19<>17 and e not between -13 and t1.a),t1.c)>=t1.e)) then (select abs(cast(avg(17) AS integer)) from t1) when c between e and 17 then (select (count(*)) from t1) else 11 end else 11 end when e then 11 else a end) from t1 where t1.b>t1.b),t1.e) and 13 then t1.d else 17 end))*min(b) | -count(distinct t1.f) from t1) FROM t1 WHERE NOT ((e) in (select +t1.f from t1 union select t1.f+coalesce((select t1.d+e from t1 where t1.f not in (t1.b,t1.f,e)),(19))*case when t1.a<=a and t1.e<=e then t1.d else t1.c end from t1) and (t1.c not between 11 and e) or d in (select count(*)-cast(avg(t1.a) AS integer) from t1 union select max(19) from t1) and not t1.a not in (11,t1.d,b) or 17>=t1.a)} +} {-1} +do_test randexpr-2.1407 { + db eval {SELECT (select abs(min(case when d not between coalesce((select max(case case t1.e when e-t1.f-a then case when (not exists(select 1 from t1 where coalesce((select max(t1.f) from t1 where 19<>17 and e not between -13 and t1.a),t1.c)>=t1.e)) then (select abs(cast(avg(17) AS integer)) from t1) when c between e and 17 then (select (count(*)) from t1) else 11 end else 11 end when e then 11 else a end) from t1 where t1.b>t1.b),t1.e) and 13 then t1.d else 17 end))*min(b) & -count(distinct t1.f) from t1) FROM t1 WHERE NOT ((e) in (select +t1.f from t1 union select t1.f+coalesce((select t1.d+e from t1 where t1.f not in (t1.b,t1.f,e)),(19))*case when t1.a<=a and t1.e<=e then t1.d else t1.c end from t1) and (t1.c not between 11 and e) or d in (select count(*)-cast(avg(t1.a) AS integer) from t1 union select max(19) from t1) and not t1.a not in (11,t1.d,b) or 17>=t1.a)} +} {80000} +do_test randexpr-2.1408 { + db eval {SELECT case when ++coalesce((select (abs(a)/abs((select max((abs(13*f)/abs(t1.d))-t1.d*t1.e) from t1)))+b-t1.c+(select +(cast(avg(c) AS integer))*count(*) from t1)-t1.d-b*t1.e+19 from t1 where t1.c not between 11 and t1.d),19)<=t1.d then t1.d when not exists(select 1 from t1 where -11=t1.a) then e else 11 end FROM t1 WHERE (coalesce((select max(coalesce((select a-19 | ~~t1.b from t1 where (abs(t1.f)/abs(b)) not between case when t1.d<=case when not exists(select 1 from t1 where 19<=t1.d) then case when t1.a<=c then f else t1.d end when d>=17 then t1.c else (b) end*t1.c then - -t1.c when c<>t1.a then t1.f else 11 end and e),t1.e)-(t1.a)) from t1 where e<=a),17)<=t1.e) and t1.d=(f)} +} {} +do_test randexpr-2.1409 { + db eval {SELECT case when ++coalesce((select (abs(a)/abs((select max((abs(13*f)/abs(t1.d))-t1.d*t1.e) from t1)))+b-t1.c+(select +(cast(avg(c) AS integer))*count(*) from t1)-t1.d-b*t1.e+19 from t1 where t1.c not between 11 and t1.d),19)<=t1.d then t1.d when not exists(select 1 from t1 where -11=t1.a) then e else 11 end FROM t1 WHERE NOT ((coalesce((select max(coalesce((select a-19 | ~~t1.b from t1 where (abs(t1.f)/abs(b)) not between case when t1.d<=case when not exists(select 1 from t1 where 19<=t1.d) then case when t1.a<=c then f else t1.d end when d>=17 then t1.c else (b) end*t1.c then - -t1.c when c<>t1.a then t1.f else 11 end and e),t1.e)-(t1.a)) from t1 where e<=a),17)<=t1.e) and t1.d=(f))} +} {400} +do_test randexpr-2.1410 { + db eval {SELECT c*t1.c*(abs(case 11 when case when 11*11 not between b and t1.a then t1.d else -coalesce((select t1.c from t1 where (not exists(select 1 from t1 where e*~13 not in (a*t1.c, -t1.a,case when 13=t1.c or 13 between 11 and a then - -a when e<=t1.e then 13 else 11 end)))),t1.b+t1.b) end then t1.c else ((e)) end)/abs(t1.a))+t1.a-e-13 FROM t1 WHERE t1.a*13+case when (select count(distinct b++(select case count(distinct 19*(select -+min((13*f))-count(distinct t1.c)-max(t1.e) | (count(distinct 11)) from t1)*(f)) when count(*) then min(a)+(cast(avg(t1.d) AS integer)) else -min(17) end from t1) | t1.c-f) from t1)>t1.b then 19 else c end+17t1.b then 19 else c end+17=t1.c and e between d and t1.f),case when (13)<>(t1.e) then d when t1.a<=t1.e then 13 else d end) not in (17,b,17) then t1.d else t1.a end+a),t1.d) FROM t1 WHERE case t1.f when 11 then t1.e | f*case when ((select max(17) from t1) | 11>17+~coalesce((select max(t1.b) from t1 where (coalesce((select max(t1.e) from t1 where t1.e in (select (cast(avg( -t1.a) AS integer)) from t1 union select count(*) from t1)),a)) in (t1.f,a,t1.e)),f)+17) then t1.a when 19 in (t1.c, -e,11) or not exists(select 1 from t1 where t1.e between d and a) then 13 else 19 end+t1.f else 11 end<17} +} {-401} +do_test randexpr-2.1413 { + db eval {SELECT ~coalesce((select b from t1 where b between e and case when t1.c+coalesce((select 11 from t1 where (a in (+d-11,t1.f-a,b)) and t1.c in (select t1.f from t1 union select c from t1) or b between 13 and t1.b and t1.e<(c) and a>=t1.c and e between d and t1.f),case when (13)<>(t1.e) then d when t1.a<=t1.e then 13 else d end) not in (17,b,17) then t1.d else t1.a end+a),t1.d) FROM t1 WHERE NOT (case t1.f when 11 then t1.e | f*case when ((select max(17) from t1) | 11>17+~coalesce((select max(t1.b) from t1 where (coalesce((select max(t1.e) from t1 where t1.e in (select (cast(avg( -t1.a) AS integer)) from t1 union select count(*) from t1)),a)) in (t1.f,a,t1.e)),f)+17) then t1.a when 19 in (t1.c, -e,11) or not exists(select 1 from t1 where t1.e between d and a) then 13 else 19 end+t1.f else 11 end<17)} +} {} +do_test randexpr-2.1414 { + db eval {SELECT ~case when (a<> -++ -11) then e+case when c*t1.ad then t1.a else b end else a end | case when not exists(select 1 from t1 where not exists(select 1 from t1 where 13 | d in (select a from t1 union select d from t1))) or t1.f not between c and 19 then +t1.a else c-a end-17*c FROM t1 WHERE (case when (case when b in (select 11*coalesce((select 11 from t1 where (d not between 13 and 13+17)),coalesce((select max(13) from t1 where t1.c in (select max(e) from t1 union select count(*) from t1)),t1.b)) from t1 union select -t1.e from t1) then 13 when t1.f in ( -f, -(f),c) then 19 else f end in (select t1.a from t1 union select (t1.c) from t1)) then (abs(19)/abs(t1.b)) when (e in (b,11,19)) and t1.a -++ -11) then e+case when c*t1.ad then t1.a else b end else a end | case when not exists(select 1 from t1 where not exists(select 1 from t1 where 13 | d in (select a from t1 union select d from t1))) or t1.f not between c and 19 then +t1.a else c-a end-17*c FROM t1 WHERE NOT ((case when (case when b in (select 11*coalesce((select 11 from t1 where (d not between 13 and 13+17)),coalesce((select max(13) from t1 where t1.c in (select max(e) from t1 union select count(*) from t1)),t1.b)) from t1 union select -t1.e from t1) then 13 when t1.f in ( -f, -(f),c) then 19 else f end in (select t1.a from t1 union select (t1.c) from t1)) then (abs(19)/abs(t1.b)) when (e in (b,11,19)) and t1.a -++ -11) then e+case when c*t1.ad then t1.a else b end else a end & case when not exists(select 1 from t1 where not exists(select 1 from t1 where 13 & d in (select a from t1 union select d from t1))) or t1.f not between c and 19 then +t1.a else c-a end-17*c FROM t1 WHERE NOT ((case when (case when b in (select 11*coalesce((select 11 from t1 where (d not between 13 and 13+17)),coalesce((select max(13) from t1 where t1.c in (select max(e) from t1 union select count(*) from t1)),t1.b)) from t1 union select -t1.e from t1) then 13 when t1.f in ( -f, -(f),c) then 19 else f end in (select t1.a from t1 union select (t1.c) from t1)) then (abs(19)/abs(t1.b)) when (e in (b,11,19)) and t1.a19) then -d when ~c in (select min(e) from t1 union select cast(avg(case when c not between b and t1.a then b else 13-e+t1.e end) AS integer) from t1) then 13-coalesce((select max(coalesce((select max(19) from t1 where (17=a)),13)) from t1 where t1.a>=c),a) else t1.d end*(t1.c) from t1 union select (11) from t1) then a when b not in (e,t1.f,13) then t1.f else 13 end FROM t1 WHERE 13<>e or t1.a in (select f from t1 union select f from t1)} +} {600} +do_test randexpr-2.1418 { + db eval {SELECT case when t1.d in (select case when exists(select 1 from t1 where 13>19) then -d when ~c in (select min(e) from t1 union select cast(avg(case when c not between b and t1.a then b else 13-e+t1.e end) AS integer) from t1) then 13-coalesce((select max(coalesce((select max(19) from t1 where (17=a)),13)) from t1 where t1.a>=c),a) else t1.d end*(t1.c) from t1 union select (11) from t1) then a when b not in (e,t1.f,13) then t1.f else 13 end FROM t1 WHERE NOT (13<>e or t1.a in (select f from t1 union select f from t1))} +} {} +do_test randexpr-2.1419 { + db eval {SELECT 17-(select case max(t1.f*~case when 17>=b then t1.d else t1.a end) when (count(distinct t1.c)) then count(distinct t1.d+~13*19) else -min((select +case abs(count(*)) when abs(+count(*)) then ~case count(distinct a) when max(t1.c) then max(t1.c) else min(t1.f) end+max(17) else count(*) end from t1))- -cast(avg(f) AS integer)-min(a) end from t1) FROM t1 WHERE (select count(*) from t1) in (select abs(min(coalesce((select t1.d from t1 where coalesce((select max((t1.d)) from t1 where not b+t1.e*(select min(t1.a+t1.e) from t1)<=(abs(t1.d)/abs(t1.c))+t1.d),f*17+d+case when f between e and d and t1.c= -c then ~13 when b not between 11 and e then (11) else e end+t1.d)=13),t1.e))) from t1 union select (cast(avg(b) AS integer)) from t1)} +} {} +do_test randexpr-2.1420 { + db eval {SELECT 17-(select case max(t1.f*~case when 17>=b then t1.d else t1.a end) when (count(distinct t1.c)) then count(distinct t1.d+~13*19) else -min((select +case abs(count(*)) when abs(+count(*)) then ~case count(distinct a) when max(t1.c) then max(t1.c) else min(t1.f) end+max(17) else count(*) end from t1))- -cast(avg(f) AS integer)-min(a) end from t1) FROM t1 WHERE NOT ((select count(*) from t1) in (select abs(min(coalesce((select t1.d from t1 where coalesce((select max((t1.d)) from t1 where not b+t1.e*(select min(t1.a+t1.e) from t1)<=(abs(t1.d)/abs(t1.c))+t1.d),f*17+d+case when f between e and d and t1.c= -c then ~13 when b not between 11 and e then (11) else e end+t1.d)=13),t1.e))) from t1 union select (cast(avg(b) AS integer)) from t1))} +} {-1067} +do_test randexpr-2.1421 { + db eval {SELECT case when +coalesce((select 19 from t1 where c between f and 13),t1.f)<>t1.c*t1.d then coalesce((select max(d) from t1 where t1.d between t1.a and t1.a),t1.f-f)-(13 | case when 11 in (select abs(max(t1.b)*cast(avg(t1.c) AS integer))+count(*) from t1 union select count(distinct b) from t1) and t1.a not in (b,t1.a,e) or a not in (e,19,(t1.e)) then t1.b else 17+t1.a end*t1.a) when 13 in (select e from t1 union select c from t1) then a else e end FROM t1 WHERE (select -count(*) from t1)*case when not coalesce((select max(11) from t1 where t1.f<>a),t1.a)<>b then c else ~case when not 19+13-t1.d=t1.d or c>c or (t1.a=t1.d) or 13>=a then a when t1.c not between 13 and 17 then (abs(13+t1.d-t1.a)/abs(b)) else d end end+a | 11<=e} +} {-20013} +do_test randexpr-2.1422 { + db eval {SELECT case when +coalesce((select 19 from t1 where c between f and 13),t1.f)<>t1.c*t1.d then coalesce((select max(d) from t1 where t1.d between t1.a and t1.a),t1.f-f)-(13 | case when 11 in (select abs(max(t1.b)*cast(avg(t1.c) AS integer))+count(*) from t1 union select count(distinct b) from t1) and t1.a not in (b,t1.a,e) or a not in (e,19,(t1.e)) then t1.b else 17+t1.a end*t1.a) when 13 in (select e from t1 union select c from t1) then a else e end FROM t1 WHERE NOT ((select -count(*) from t1)*case when not coalesce((select max(11) from t1 where t1.f<>a),t1.a)<>b then c else ~case when not 19+13-t1.d=t1.d or c>c or (t1.a=t1.d) or 13>=a then a when t1.c not between 13 and 17 then (abs(13+t1.d-t1.a)/abs(b)) else d end end+a | 11<=e)} +} {} +do_test randexpr-2.1423 { + db eval {SELECT case when +coalesce((select 19 from t1 where c between f and 13),t1.f)<>t1.c*t1.d then coalesce((select max(d) from t1 where t1.d between t1.a and t1.a),t1.f-f)-(13 & case when 11 in (select abs(max(t1.b)*cast(avg(t1.c) AS integer))+count(*) from t1 union select count(distinct b) from t1) and t1.a not in (b,t1.a,e) or a not in (e,19,(t1.e)) then t1.b else 17+t1.a end*t1.a) when 13 in (select e from t1 union select c from t1) then a else e end FROM t1 WHERE (select -count(*) from t1)*case when not coalesce((select max(11) from t1 where t1.f<>a),t1.a)<>b then c else ~case when not 19+13-t1.d=t1.d or c>c or (t1.a=t1.d) or 13>=a then a when t1.c not between 13 and 17 then (abs(13+t1.d-t1.a)/abs(b)) else d end end+a | 11<=e} +} {0} +do_test randexpr-2.1424 { + db eval {SELECT case t1.d | t1.f+(e)+case when +t1.e-d*t1.a+17-b*(abs(t1.f)/abs( -a))-case when 19>=e then ~t1.a*case when t1.e>19 and t1.f<>d then t1.a else t1.d end | b*e else 17 end+t1.a=17 then t1.c else t1.b end when 17 then t1.b else t1.b end FROM t1 WHERE case when c<=13*t1.a then 11 else -case when t1.f*e>= -((19)) then case when a>=t1.c then t1.d*e else b end else coalesce((select t1.b from t1 where 11+d-17-t1.d-t1.b not in (case d-17 when f then t1.d else t1.b end,e,t1.c)),t1.c) end*19 end<>b} +} {200} +do_test randexpr-2.1425 { + db eval {SELECT case t1.d | t1.f+(e)+case when +t1.e-d*t1.a+17-b*(abs(t1.f)/abs( -a))-case when 19>=e then ~t1.a*case when t1.e>19 and t1.f<>d then t1.a else t1.d end | b*e else 17 end+t1.a=17 then t1.c else t1.b end when 17 then t1.b else t1.b end FROM t1 WHERE NOT (case when c<=13*t1.a then 11 else -case when t1.f*e>= -((19)) then case when a>=t1.c then t1.d*e else b end else coalesce((select t1.b from t1 where 11+d-17-t1.d-t1.b not in (case d-17 when f then t1.d else t1.b end,e,t1.c)),t1.c) end*19 end<>b)} +} {} +do_test randexpr-2.1426 { + db eval {SELECT case t1.d & t1.f+(e)+case when +t1.e-d*t1.a+17-b*(abs(t1.f)/abs( -a))-case when 19>=e then ~t1.a*case when t1.e>19 and t1.f<>d then t1.a else t1.d end & b*e else 17 end+t1.a=17 then t1.c else t1.b end when 17 then t1.b else t1.b end FROM t1 WHERE case when c<=13*t1.a then 11 else -case when t1.f*e>= -((19)) then case when a>=t1.c then t1.d*e else b end else coalesce((select t1.b from t1 where 11+d-17-t1.d-t1.b not in (case d-17 when f then t1.d else t1.b end,e,t1.c)),t1.c) end*19 end<>b} +} {200} +do_test randexpr-2.1427 { + db eval {SELECT (coalesce((select max(~t1.d*19*t1.f) from t1 where (abs(17)/abs(t1.f-case when not exists(select 1 from t1 where case when a in (f,t1.a,c) then c when t1.a not in (13,t1.c,a) then e else t1.a end in (select abs(abs(min(t1.e))) from t1 union select cast(avg(11) AS integer) from t1)) or c13 or t1.e in (e,t1.c,d)),t1.e) | 13)-d*e FROM t1 WHERE 11 in (select max(+(select max(d | b) from t1)*17-e) from t1 union select (cast(avg(+a-+((coalesce((select t1.e from t1 where exists(select 1 from t1 where (abs( -case when coalesce((select max(b-17) from t1 where not exists(select 1 from t1 where t1.f not between a and 11)),t1.c) in (select t1.d from t1 union select (17) from t1) then t1.c else ( - -t1.d) end)/abs(t1.f))+11>f)),b)))*19) AS integer))-count(distinct t1.f) from t1)} +} {} +do_test randexpr-2.1428 { + db eval {SELECT (coalesce((select max(~t1.d*19*t1.f) from t1 where (abs(17)/abs(t1.f-case when not exists(select 1 from t1 where case when a in (f,t1.a,c) then c when t1.a not in (13,t1.c,a) then e else t1.a end in (select abs(abs(min(t1.e))) from t1 union select cast(avg(11) AS integer) from t1)) or c13 or t1.e in (e,t1.c,d)),t1.e) | 13)-d*e FROM t1 WHERE NOT (11 in (select max(+(select max(d | b) from t1)*17-e) from t1 union select (cast(avg(+a-+((coalesce((select t1.e from t1 where exists(select 1 from t1 where (abs( -case when coalesce((select max(b-17) from t1 where not exists(select 1 from t1 where t1.f not between a and 11)),t1.c) in (select t1.d from t1 union select (17) from t1) then t1.c else ( - -t1.d) end)/abs(t1.f))+11>f)),b)))*19) AS integer))-count(distinct t1.f) from t1))} +} {-4771395} +do_test randexpr-2.1429 { + db eval {SELECT (coalesce((select max(~t1.d*19*t1.f) from t1 where (abs(17)/abs(t1.f-case when not exists(select 1 from t1 where case when a in (f,t1.a,c) then c when t1.a not in (13,t1.c,a) then e else t1.a end in (select abs(abs(min(t1.e))) from t1 union select cast(avg(11) AS integer) from t1)) or c13 or t1.e in (e,t1.c,d)),t1.e) & 13)-d*e FROM t1 WHERE NOT (11 in (select max(+(select max(d | b) from t1)*17-e) from t1 union select (cast(avg(+a-+((coalesce((select t1.e from t1 where exists(select 1 from t1 where (abs( -case when coalesce((select max(b-17) from t1 where not exists(select 1 from t1 where t1.f not between a and 11)),t1.c) in (select t1.d from t1 union select (17) from t1) then t1.c else ( - -t1.d) end)/abs(t1.f))+11>f)),b)))*19) AS integer))-count(distinct t1.f) from t1))} +} {-199992} +do_test randexpr-2.1430 { + db eval {SELECT coalesce((select t1.e from t1 where +~case when d-(abs(+t1.b)/abs( -19*t1.c-(select (abs( -cast(avg((abs(d)/abs(t1.d))) AS integer))) from t1)))+11<19 then t1.a-19-a-f when not (c in (case f when t1.a then 17 else 11 end-a,t1.d, -e)) then 17 else (t1.b) end(t1.b) then t1.d when (not exists(select 1 from t1 where t1.f<>t1.c)) then t1.b else 17 end-e)-19)} +} {500} +do_test randexpr-2.1431 { + db eval {SELECT coalesce((select t1.e from t1 where +~case when d-(abs(+t1.b)/abs( -19*t1.c-(select (abs( -cast(avg((abs(d)/abs(t1.d))) AS integer))) from t1)))+11<19 then t1.a-19-a-f when not (c in (case f when t1.a then 17 else 11 end-a,t1.d, -e)) then 17 else (t1.b) end(t1.b) then t1.d when (not exists(select 1 from t1 where t1.f<>t1.c)) then t1.b else 17 end-e)-19))} +} {} +do_test randexpr-2.1432 { + db eval {SELECT case when (abs(17)/abs(t1.e))=case when (exists(select 1 from t1 where not exists(select 1 from t1 where t1.f<>19))) then coalesce((select 11 from t1 where not exists(select 1 from t1 where t1.e not in ( -t1.d,(b),t1.d))),t1.a)+t1.f*11 else t1.a end then b else e end)),17)) from t1) not in (13,e,17) then t1.e else 11 end FROM t1 WHERE not (t1.e<>coalesce((select (case t1.c when (coalesce((select 19 from t1 where (case when not exists(select 1 from t1 where e not in (c,e,t1.f)) then t1.f | t1.a else t1.a end in (d,19,t1.a)) or t1.f<=t1.c),+13))*t1.b-t1.d then -t1.c else b end) from t1 where 17=case when (exists(select 1 from t1 where not exists(select 1 from t1 where t1.f<>19))) then coalesce((select 11 from t1 where not exists(select 1 from t1 where t1.e not in ( -t1.d,(b),t1.d))),t1.a)+t1.f*11 else t1.a end then b else e end)),17)) from t1) not in (13,e,17) then t1.e else 11 end FROM t1 WHERE NOT (not (t1.e<>coalesce((select (case t1.c when (coalesce((select 19 from t1 where (case when not exists(select 1 from t1 where e not in (c,e,t1.f)) then t1.f | t1.a else t1.a end in (d,19,t1.a)) or t1.f<=t1.c),+13))*t1.b-t1.d then -t1.c else b end) from t1 where 17 -19 then e when a in (17,d,t1.f) then -t1.f else f end)+count(*)-max(t1.f) | count(distinct t1.a) | cast(avg(t1.c) AS integer)) | -max(a) from t1) end end FROM t1 WHERE a | t1.c in (select f from t1 union select case -t1.c when (abs(c | t1.c)/abs(17)) then (abs(t1.e)/abs(t1.c))+~19 else coalesce((select b from t1 where 11<=case d+coalesce((select max((select ( -(cast(avg(17) AS integer)))*max(t1.e)-max(t1.d)*cast(avg(t1.e) AS integer) from t1)) from t1 where (abs(case t1.f when 19 then b else t1.c end)/abs(t1.e))=b),d)*t1.f when -t1.b then t1.d else 19 end),a) end+d+e from t1)} +} {} +do_test randexpr-2.1435 { + db eval {SELECT (select max(t1.c) from t1) | case when d=(abs(t1.d)/abs(t1.b)) then 19 else case b when a then b+17+c-t1.d+d-a*t1.c else e+t1.f+ -(select abs(~+count(distinct case when b -19 then e when a in (17,d,t1.f) then -t1.f else f end)+count(*)-max(t1.f) | count(distinct t1.a) | cast(avg(t1.c) AS integer)) | -max(a) from t1) end end FROM t1 WHERE NOT (a | t1.c in (select f from t1 union select case -t1.c when (abs(c | t1.c)/abs(17)) then (abs(t1.e)/abs(t1.c))+~19 else coalesce((select b from t1 where 11<=case d+coalesce((select max((select ( -(cast(avg(17) AS integer)))*max(t1.e)-max(t1.d)*cast(avg(t1.e) AS integer) from t1)) from t1 where (abs(case t1.f when 19 then b else t1.c end)/abs(t1.e))=b),d)*t1.f when -t1.b then t1.d else 19 end),a) end+d+e from t1))} +} {1391} +do_test randexpr-2.1436 { + db eval {SELECT (select max(t1.c) from t1) & case when d=(abs(t1.d)/abs(t1.b)) then 19 else case b when a then b+17+c-t1.d+d-a*t1.c else e+t1.f+ -(select abs(~+count(distinct case when b -19 then e when a in (17,d,t1.f) then -t1.f else f end)+count(*)-max(t1.f) & count(distinct t1.a) & cast(avg(t1.c) AS integer)) & -max(a) from t1) end end FROM t1 WHERE NOT (a | t1.c in (select f from t1 union select case -t1.c when (abs(c | t1.c)/abs(17)) then (abs(t1.e)/abs(t1.c))+~19 else coalesce((select b from t1 where 11<=case d+coalesce((select max((select ( -(cast(avg(17) AS integer)))*max(t1.e)-max(t1.d)*cast(avg(t1.e) AS integer) from t1)) from t1 where (abs(case t1.f when 19 then b else t1.c end)/abs(t1.e))=b),d)*t1.f when -t1.b then t1.d else 19 end),a) end+d+e from t1))} +} {12} +do_test randexpr-2.1437 { + db eval {SELECT case e*case when case coalesce((select max(13) from t1 where exists(select 1 from t1 where t1.a in (17,b,17))),t1.e)-t1.c when c then 19 else -t1.d end+17 in (t1.d,c,11) or b<= -( - -a) then ~(a) when f in (select case case (max(e)) when count(*) then max(17) else (max(17)) end+count(distinct e)- -count(distinct d) when -count(*) then -count(distinct d) else ((min(t1.f))) end from t1 union select min( -19) from t1) then c else (13) end when b then t1.b else f end FROM t1 WHERE t1.b17 or (t1.c in (19,c,a))) then 13 else c end-11+11*11) then t1.e when (11)<13 then - -t1.c else t1.d end FROM t1 WHERE ((t1.c+17 not between case b*(11-coalesce((select max(t1.a) from t1 where ~case when (not t1.d*17=f) then t1.c-13+c else t1.d end+t1.c>=(b)),b)*e) when t1.f then b else f end-f- -17 and d or t1.d not in (t1.c,11,b)))} +} {300} +do_test randexpr-2.1440 { + db eval {SELECT case when d in (c,t1.a*19,+++case when exists(select 1 from t1 where case 17 when ~t1.b then f-e*~b-17+ -t1.c else c end<>17 or (t1.c in (19,c,a))) then 13 else c end-11+11*11) then t1.e when (11)<13 then - -t1.c else t1.d end FROM t1 WHERE NOT (((t1.c+17 not between case b*(11-coalesce((select max(t1.a) from t1 where ~case when (not t1.d*17=f) then t1.c-13+c else t1.d end+t1.c>=(b)),b)*e) when t1.f then b else f end-f- -17 and d or t1.d not in (t1.c,11,b))))} +} {} +do_test randexpr-2.1441 { + db eval {SELECT t1.e*coalesce((select 17 from t1 where (19>(abs(case coalesce((select coalesce((select ~11 from t1 where b not between (13) and b),17) from t1 where 19 not between e+b+13 and -t1.d),t1.f) when d then t1.e else t1.d end-e | f)/abs((e)))) and (t1.b<=a and 11<> - -t1.e) and t1.dt1.a | t1.c} +} {} +do_test randexpr-2.1442 { + db eval {SELECT t1.e*coalesce((select 17 from t1 where (19>(abs(case coalesce((select coalesce((select ~11 from t1 where b not between (13) and b),17) from t1 where 19 not between e+b+13 and -t1.d),t1.f) when d then t1.e else t1.d end-e | f)/abs((e)))) and (t1.b<=a and 11<> - -t1.e) and t1.dt1.a | t1.c)} +} {150000} +do_test randexpr-2.1443 { + db eval {SELECT t1.e*coalesce((select 17 from t1 where (19>(abs(case coalesce((select coalesce((select ~11 from t1 where b not between (13) and b),17) from t1 where 19 not between e+b+13 and -t1.d),t1.f) when d then t1.e else t1.d end-e & f)/abs((e)))) and (t1.b<=a and 11<> - -t1.e) and t1.dt1.a | t1.c)} +} {150000} +do_test randexpr-2.1444 { + db eval {SELECT ~coalesce((select max(t1.a) from t1 where ((t1.c)<=case 11-~t1.b++(t1.a+ -c)*t1.d-b | d when ~e then 17 else (abs(f)/abs(t1.f-t1.c)) end-19 | b-~t1.e*a-t1.b | +c)),b)*a FROM t1 WHERE not exists(select 1 from t1 where t1.a=((abs(coalesce((select coalesce((select max(+d) from t1 where not exists(select 1 from t1 where -t1.d in (select -max(13)-(min((t1.a))) from t1 union select count(distinct f) from t1)) and 11 in (select f from t1 union select 17 from t1) and a<>13),case when e>(11) then -c else d end) from t1 where e<>11 and et1.e))))} +} {} +do_test randexpr-2.1445 { + db eval {SELECT ~coalesce((select max(t1.a) from t1 where ((t1.c)<=case 11-~t1.b++(t1.a+ -c)*t1.d-b | d when ~e then 17 else (abs(f)/abs(t1.f-t1.c)) end-19 | b-~t1.e*a-t1.b | +c)),b)*a FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.a=((abs(coalesce((select coalesce((select max(+d) from t1 where not exists(select 1 from t1 where -t1.d in (select -max(13)-(min((t1.a))) from t1 union select count(distinct f) from t1)) and 11 in (select f from t1 union select 17 from t1) and a<>13),case when e>(11) then -c else d end) from t1 where e<>11 and et1.e)))))} +} {-20100} +do_test randexpr-2.1446 { + db eval {SELECT ~coalesce((select max(t1.a) from t1 where ((t1.c)<=case 11-~t1.b++(t1.a+ -c)*t1.d-b & d when ~e then 17 else (abs(f)/abs(t1.f-t1.c)) end-19 & b-~t1.e*a-t1.b & +c)),b)*a FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.a=((abs(coalesce((select coalesce((select max(+d) from t1 where not exists(select 1 from t1 where -t1.d in (select -max(13)-(min((t1.a))) from t1 union select count(distinct f) from t1)) and 11 in (select f from t1 union select 17 from t1) and a<>13),case when e>(11) then -c else d end) from t1 where e<>11 and et1.e)))))} +} {-20100} +do_test randexpr-2.1447 { + db eval {SELECT (select count(distinct ~(((abs(b-a)/abs(case (select min((select case cast(avg(11+t1.e) AS integer) when case min( -~coalesce((select 13 from t1 where case t1.e when f then d else 17 end>=b),e)) when case max(t1.b) when (~ -count(*)+(( -max( -t1.a)))-(count(*))) then -count(distinct (t1.c)) else min( -t1.e) end then count(*) else cast(avg( -(e)) AS integer) end then cast(avg(13) AS integer) else max(17) end from t1)) from t1) when 13 then c else (d) end))))) from t1)*t1.b FROM t1 WHERE not t1.c>=t1.b} +} {} +do_test randexpr-2.1448 { + db eval {SELECT (select count(distinct ~(((abs(b-a)/abs(case (select min((select case cast(avg(11+t1.e) AS integer) when case min( -~coalesce((select 13 from t1 where case t1.e when f then d else 17 end>=b),e)) when case max(t1.b) when (~ -count(*)+(( -max( -t1.a)))-(count(*))) then -count(distinct (t1.c)) else min( -t1.e) end then count(*) else cast(avg( -(e)) AS integer) end then cast(avg(13) AS integer) else max(17) end from t1)) from t1) when 13 then c else (d) end))))) from t1)*t1.b FROM t1 WHERE NOT (not t1.c>=t1.b)} +} {200} +do_test randexpr-2.1449 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where t1.b in (select e from t1 union select t1.e from t1)),t1.a)) from t1 where case when t1.d=case when d not between t1.a and case when not t1.c in ((t1.e),a,t1.c) then 19 | t1.c when a not between 19 and b then (19) else 13 end then t1.d else b end-11 and not t1.f=t1.c or 19 not in (t1.d,c, -t1.f) and 17< -t1.d then e when t1.b not between 17 and t1.b then f else d end<11),t1.c)*f FROM t1 WHERE t1.b<=(coalesce((select c from t1 where e in (t1.a,t1.c,case when 17 not between 19 and (19)-t1.a-(abs(coalesce((select d from t1 where case (t1.b+e*b*t1.c) when 13 then t1.b else 19 end<>19),17))/abs(a))*t1.a then 17 when exists(select 1 from t1 where ( -e)=a) or 13>=d then 17 else t1.f end)),c))} +} {180000} +do_test randexpr-2.1450 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where t1.b in (select e from t1 union select t1.e from t1)),t1.a)) from t1 where case when t1.d=case when d not between t1.a and case when not t1.c in ((t1.e),a,t1.c) then 19 | t1.c when a not between 19 and b then (19) else 13 end then t1.d else b end-11 and not t1.f=t1.c or 19 not in (t1.d,c, -t1.f) and 17< -t1.d then e when t1.b not between 17 and t1.b then f else d end<11),t1.c)*f FROM t1 WHERE NOT (t1.b<=(coalesce((select c from t1 where e in (t1.a,t1.c,case when 17 not between 19 and (19)-t1.a-(abs(coalesce((select d from t1 where case (t1.b+e*b*t1.c) when 13 then t1.b else 19 end<>19),17))/abs(a))*t1.a then 17 when exists(select 1 from t1 where ( -e)=a) or 13>=d then 17 else t1.f end)),c)))} +} {} +do_test randexpr-2.1451 { + db eval {SELECT coalesce((select max(coalesce((select max(13) from t1 where t1.b in (select e from t1 union select t1.e from t1)),t1.a)) from t1 where case when t1.d=case when d not between t1.a and case when not t1.c in ((t1.e),a,t1.c) then 19 & t1.c when a not between 19 and b then (19) else 13 end then t1.d else b end-11 and not t1.f=t1.c or 19 not in (t1.d,c, -t1.f) and 17< -t1.d then e when t1.b not between 17 and t1.b then f else d end<11),t1.c)*f FROM t1 WHERE t1.b<=(coalesce((select c from t1 where e in (t1.a,t1.c,case when 17 not between 19 and (19)-t1.a-(abs(coalesce((select d from t1 where case (t1.b+e*b*t1.c) when 13 then t1.b else 19 end<>19),17))/abs(a))*t1.a then 17 when exists(select 1 from t1 where ( -e)=a) or 13>=d then 17 else t1.f end)),c))} +} {180000} +do_test randexpr-2.1452 { + db eval {SELECT (d)+(select -abs(+cast(avg((select max((coalesce((select d from t1 where case when 13>=e then t1.c else 19 end>=d),coalesce((select 19-(select count(distinct 19 | (abs(19)/abs(~13))) from t1) | t1.d from t1 where coalesce((select max(11) from t1 where t1.a in (select t1.d from t1 union select c from t1)),19)>t1.d),e)+t1.b))) from t1)) AS integer)+abs( -max(b))) from t1) FROM t1 WHERE c-t1.c not between t1.c and +case when +case -case when 17 not in (17,(e),t1.c) then a else 19 end-13 when t1.e then b else 13 end>19 or 17 in (select b from t1 union select t1.c from t1) then case c when t1.c then d else c end when not t1.f in (select -cast(avg(a) AS integer)*count(distinct (17))+min(f)* - - -cast(avg(b) AS integer) from t1 union select (count(*)) from t1) then d else a end} +} {-500} +do_test randexpr-2.1453 { + db eval {SELECT (d)+(select -abs(+cast(avg((select max((coalesce((select d from t1 where case when 13>=e then t1.c else 19 end>=d),coalesce((select 19-(select count(distinct 19 | (abs(19)/abs(~13))) from t1) | t1.d from t1 where coalesce((select max(11) from t1 where t1.a in (select t1.d from t1 union select c from t1)),19)>t1.d),e)+t1.b))) from t1)) AS integer)+abs( -max(b))) from t1) FROM t1 WHERE NOT (c-t1.c not between t1.c and +case when +case -case when 17 not in (17,(e),t1.c) then a else 19 end-13 when t1.e then b else 13 end>19 or 17 in (select b from t1 union select t1.c from t1) then case c when t1.c then d else c end when not t1.f in (select -cast(avg(a) AS integer)*count(distinct (17))+min(f)* - - -cast(avg(b) AS integer) from t1 union select (count(*)) from t1) then d else a end)} +} {} +do_test randexpr-2.1454 { + db eval {SELECT (d)+(select -abs(+cast(avg((select max((coalesce((select d from t1 where case when 13>=e then t1.c else 19 end>=d),coalesce((select 19-(select count(distinct 19 & (abs(19)/abs(~13))) from t1) & t1.d from t1 where coalesce((select max(11) from t1 where t1.a in (select t1.d from t1 union select c from t1)),19)>t1.d),e)+t1.b))) from t1)) AS integer)+abs( -max(b))) from t1) FROM t1 WHERE c-t1.c not between t1.c and +case when +case -case when 17 not in (17,(e),t1.c) then a else 19 end-13 when t1.e then b else 13 end>19 or 17 in (select b from t1 union select t1.c from t1) then case c when t1.c then d else c end when not t1.f in (select -cast(avg(a) AS integer)*count(distinct (17))+min(f)* - - -cast(avg(b) AS integer) from t1 union select (count(*)) from t1) then d else a end} +} {-500} +do_test randexpr-2.1455 { + db eval {SELECT case when 19 in (case when coalesce((select max(case when e*t1.b in (coalesce((select t1.a from t1 where (abs(f)/abs(coalesce((select f from t1 where t1.f in (a,b,t1.d)),13))) in (b,b,11)),c),t1.a,17) then b else d end) from t1 where f>=13 and a>=17 and not exists(select 1 from t1 where (b<>f))), -t1.c) between d and 11 then d else t1.f end,t1.c,11) then 13 else 17 end FROM t1 WHERE t1.d in (select (abs(t1.a)/abs(t1.f)) from t1 union select e from t1) and ((b*coalesce((select -11 from t1 where (coalesce((select max( -t1.e+ -coalesce((select d from t1 where t1.c<= - -t1.d),t1.c)) from t1 where b in (select -count(distinct d)+count(distinct d)- -((min(e))) from t1 union select count(*) from t1)),(t1.e))-19*d<>t1.e)),a)*t1.f)=b)} +} {} +do_test randexpr-2.1456 { + db eval {SELECT case when 19 in (case when coalesce((select max(case when e*t1.b in (coalesce((select t1.a from t1 where (abs(f)/abs(coalesce((select f from t1 where t1.f in (a,b,t1.d)),13))) in (b,b,11)),c),t1.a,17) then b else d end) from t1 where f>=13 and a>=17 and not exists(select 1 from t1 where (b<>f))), -t1.c) between d and 11 then d else t1.f end,t1.c,11) then 13 else 17 end FROM t1 WHERE NOT (t1.d in (select (abs(t1.a)/abs(t1.f)) from t1 union select e from t1) and ((b*coalesce((select -11 from t1 where (coalesce((select max( -t1.e+ -coalesce((select d from t1 where t1.c<= - -t1.d),t1.c)) from t1 where b in (select -count(distinct d)+count(distinct d)- -((min(e))) from t1 union select count(*) from t1)),(t1.e))-19*d<>t1.e)),a)*t1.f)=b))} +} {17} +do_test randexpr-2.1457 { + db eval {SELECT coalesce((select max((select count(*) from t1)) from t1 where coalesce((select max(t1.a-coalesce((select t1.d+case when exists(select 1 from t1 where exists(select 1 from t1 where t1.f between t1.a and f)) or d | b between 19 and 19 then f+17 else (t1.b) end from t1 where t1.e not between -t1.d and (19)),(17))) from t1 where not exists(select 1 from t1 where 19 between t1.b and 13 or t1.b>t1.c) or 19>=19),b) between 13 and f), -t1.a) FROM t1 WHERE exists(select 1 from t1 where +t1.f+t1.e<(abs(t1.b)/abs( -f*e))) or case t1.c when case when 19 not between 17 and case when t1.e<>e then t1.b when not exists(select 1 from t1 where coalesce((select max(b) from t1 where (et1.d or (t1.d<>t1.e)) then t1.c else 17 end then t1.c when e<>b then 17 else e end then t1.d else t1.e end between d and b} +} {} +do_test randexpr-2.1458 { + db eval {SELECT coalesce((select max((select count(*) from t1)) from t1 where coalesce((select max(t1.a-coalesce((select t1.d+case when exists(select 1 from t1 where exists(select 1 from t1 where t1.f between t1.a and f)) or d | b between 19 and 19 then f+17 else (t1.b) end from t1 where t1.e not between -t1.d and (19)),(17))) from t1 where not exists(select 1 from t1 where 19 between t1.b and 13 or t1.b>t1.c) or 19>=19),b) between 13 and f), -t1.a) FROM t1 WHERE NOT (exists(select 1 from t1 where +t1.f+t1.e<(abs(t1.b)/abs( -f*e))) or case t1.c when case when 19 not between 17 and case when t1.e<>e then t1.b when not exists(select 1 from t1 where coalesce((select max(b) from t1 where (et1.d or (t1.d<>t1.e)) then t1.c else 17 end then t1.c when e<>b then 17 else e end then t1.d else t1.e end between d and b)} +} {-100} +do_test randexpr-2.1459 { + db eval {SELECT coalesce((select max((select count(*) from t1)) from t1 where coalesce((select max(t1.a-coalesce((select t1.d+case when exists(select 1 from t1 where exists(select 1 from t1 where t1.f between t1.a and f)) or d & b between 19 and 19 then f+17 else (t1.b) end from t1 where t1.e not between -t1.d and (19)),(17))) from t1 where not exists(select 1 from t1 where 19 between t1.b and 13 or t1.b>t1.c) or 19>=19),b) between 13 and f), -t1.a) FROM t1 WHERE NOT (exists(select 1 from t1 where +t1.f+t1.e<(abs(t1.b)/abs( -f*e))) or case t1.c when case when 19 not between 17 and case when t1.e<>e then t1.b when not exists(select 1 from t1 where coalesce((select max(b) from t1 where (et1.d or (t1.d<>t1.e)) then t1.c else 17 end then t1.c when e<>b then 17 else e end then t1.d else t1.e end between d and b)} +} {-100} +do_test randexpr-2.1460 { + db eval {SELECT case when not case 11 when t1.e*coalesce((select max(t1.b) from t1 where (abs(t1.c)/abs(~11)) not between coalesce((select max((abs(a)/abs(c))) from t1 where t1.b>=t1.a or b= -e),t1.f) and e and c not in (t1.a,t1.f,t1.c) and not (a)17 then c else a end | t1.b,a) or not (t1.d<11)),d) from t1 union select t1.a from t1) and f in (select b from t1 union select b from t1))} +} {100} +do_test randexpr-2.1461 { + db eval {SELECT case when not case 11 when t1.e*coalesce((select max(t1.b) from t1 where (abs(t1.c)/abs(~11)) not between coalesce((select max((abs(a)/abs(c))) from t1 where t1.b>=t1.a or b= -e),t1.f) and e and c not in (t1.a,t1.f,t1.c) and not (a)17 then c else a end | t1.b,a) or not (t1.d<11)),d) from t1 union select t1.a from t1) and f in (select b from t1 union select b from t1)))} +} {} +do_test randexpr-2.1462 { + db eval {SELECT case when case when coalesce((select max(case when ((a<=case t1.a+t1.a when t1.d then -c else b end or t1.b<>e)) then 17 else 19*19*a end*13) from t1 where d<>t1.d),d)<=a or 11<=t1.e then t1.c when 17<11 then 17 else e end<=t1.a then t1.d when e= -e then t1.d else 19 end FROM t1 WHERE t1.e<=t1.a or (abs(+a+coalesce((select max(t1.b) from t1 where (select ~count(distinct 17) from t1)-b in (select t1.b+t1.b from t1 union select 19 from t1)), -11))/abs(case when not exists(select 1 from t1 where t1.d not between 17 and c- -c+e) or t1.e>t1.f then t1.d when t1.d not in (17,e,d) then a else 17 end)) in (c,d,11)} +} {} +do_test randexpr-2.1463 { + db eval {SELECT case when case when coalesce((select max(case when ((a<=case t1.a+t1.a when t1.d then -c else b end or t1.b<>e)) then 17 else 19*19*a end*13) from t1 where d<>t1.d),d)<=a or 11<=t1.e then t1.c when 17<11 then 17 else e end<=t1.a then t1.d when e= -e then t1.d else 19 end FROM t1 WHERE NOT (t1.e<=t1.a or (abs(+a+coalesce((select max(t1.b) from t1 where (select ~count(distinct 17) from t1)-b in (select t1.b+t1.b from t1 union select 19 from t1)), -11))/abs(case when not exists(select 1 from t1 where t1.d not between 17 and c- -c+e) or t1.e>t1.f then t1.d when t1.d not in (17,e,d) then a else 17 end)) in (c,d,11))} +} {19} +do_test randexpr-2.1464 { + db eval {SELECT ~case when t1.e<=t1.a then b else (11) end+(select -((cast(avg(coalesce((select a from t1 where (select cast(avg(t1.d) AS integer) | case case min((t1.e)) when cast(avg(t1.b) AS integer) then max(t1.c) else max(19) end*count(*) when -count(distinct e) then count(*) else max( - -17) end from t1) not between c and -t1.e-f+f),11)-11) AS integer)))*max(e) from t1)+e* - -a-t1.d*e FROM t1 WHERE coalesce((select max(case when 11 between d and d then e | b else case (select (case count(distinct coalesce((select -coalesce((select max(t1.f) from t1 where t1.b in (select 11 from t1 union select 19 from t1)),t1.e) from t1 where 13<>17),17)) when (max( -t1.e)) then max(17)+max(11) else ((count(*))) end)*(count(*)) from t1) when 11 then -13+19 else t1.e end end) from t1 where ((t1.a not between d and -t1.c))),t1.a)<11} +} {} +do_test randexpr-2.1465 { + db eval {SELECT ~case when t1.e<=t1.a then b else (11) end+(select -((cast(avg(coalesce((select a from t1 where (select cast(avg(t1.d) AS integer) | case case min((t1.e)) when cast(avg(t1.b) AS integer) then max(t1.c) else max(19) end*count(*) when -count(distinct e) then count(*) else max( - -17) end from t1) not between c and -t1.e-f+f),11)-11) AS integer)))*max(e) from t1)+e* - -a-t1.d*e FROM t1 WHERE NOT (coalesce((select max(case when 11 between d and d then e | b else case (select (case count(distinct coalesce((select -coalesce((select max(t1.f) from t1 where t1.b in (select 11 from t1 union select 19 from t1)),t1.e) from t1 where 13<>17),17)) when (max( -t1.e)) then max(17)+max(11) else ((count(*))) end)*(count(*)) from t1) when 11 then -13+19 else t1.e end end) from t1 where ((t1.a not between d and -t1.c))),t1.a)<11)} +} {-194512} +do_test randexpr-2.1466 { + db eval {SELECT ~case when t1.e<=t1.a then b else (11) end+(select -((cast(avg(coalesce((select a from t1 where (select cast(avg(t1.d) AS integer) & case case min((t1.e)) when cast(avg(t1.b) AS integer) then max(t1.c) else max(19) end*count(*) when -count(distinct e) then count(*) else max( - -17) end from t1) not between c and -t1.e-f+f),11)-11) AS integer)))*max(e) from t1)+e* - -a-t1.d*e FROM t1 WHERE NOT (coalesce((select max(case when 11 between d and d then e | b else case (select (case count(distinct coalesce((select -coalesce((select max(t1.f) from t1 where t1.b in (select 11 from t1 union select 19 from t1)),t1.e) from t1 where 13<>17),17)) when (max( -t1.e)) then max(17)+max(11) else ((count(*))) end)*(count(*)) from t1) when 11 then -13+19 else t1.e end end) from t1 where ((t1.a not between d and -t1.c))),t1.a)<11)} +} {-194512} +do_test randexpr-2.1467 { + db eval {SELECT t1.e*~13-t1.a-~case when (abs(t1.a)/abs(coalesce((select max(t1.a) from t1 where e not between f and e-t1.b+13),case when (a in (select ( -c) from t1 union select 11 from t1) or exists(select 1 from t1 where 11 between f and t1.f)) then t1.b-a else -t1.e end)+13*t1.f)) | 13 -19-a then (a)-coalesce((select max(case when a in (13,t1.b,11) then t1.b else (13) end) from t1 where not exists(select 1 from t1 where t1.b>=b)),t1.c) else -13 end)*(a)<17)} +} {} +do_test randexpr-2.1468 { + db eval {SELECT t1.e*~13-t1.a-~case when (abs(t1.a)/abs(coalesce((select max(t1.a) from t1 where e not between f and e-t1.b+13),case when (a in (select ( -c) from t1 union select 11 from t1) or exists(select 1 from t1 where 11 between f and t1.f)) then t1.b-a else -t1.e end)+13*t1.f)) | 13 -19-a then (a)-coalesce((select max(case when a in (13,t1.b,11) then t1.b else (13) end) from t1 where not exists(select 1 from t1 where t1.b>=b)),t1.c) else -13 end)*(a)<17))} +} {-6499} +do_test randexpr-2.1469 { + db eval {SELECT t1.e*~13-t1.a-~case when (abs(t1.a)/abs(coalesce((select max(t1.a) from t1 where e not between f and e-t1.b+13),case when (a in (select ( -c) from t1 union select 11 from t1) or exists(select 1 from t1 where 11 between f and t1.f)) then t1.b-a else -t1.e end)+13*t1.f)) & 13 -19-a then (a)-coalesce((select max(case when a in (13,t1.b,11) then t1.b else (13) end) from t1 where not exists(select 1 from t1 where t1.b>=b)),t1.c) else -13 end)*(a)<17))} +} {-6499} +do_test randexpr-2.1470 { + db eval {SELECT coalesce((select max(f*b | case when f in (t1.b,t1.b*(select count(*) from t1)+(select -count(*) from t1), -b) and +13<(abs( -~d-case when t1.a in (select -f from t1 union select t1.c from t1) then (t1.a) when t1.a=a),coalesce((select 11 from t1 where 11 not between 19 and t1.c),t1.a))-19 when t1.e then t1.f else e end)) from t1 union select -d from t1) then d when t1.f between 17 and d then t1.d else t1.d end and (f) or d<17} +} {} +do_test randexpr-2.1471 { + db eval {SELECT coalesce((select max(f*b | case when f in (t1.b,t1.b*(select count(*) from t1)+(select -count(*) from t1), -b) and +13<(abs( -~d-case when t1.a in (select -f from t1 union select t1.c from t1) then (t1.a) when t1.a=a),coalesce((select 11 from t1 where 11 not between 19 and t1.c),t1.a))-19 when t1.e then t1.f else e end)) from t1 union select -d from t1) then d when t1.f between 17 and d then t1.d else t1.d end and (f) or d<17)} +} {0} +do_test randexpr-2.1472 { + db eval {SELECT coalesce((select max(f*b & case when f in (t1.b,t1.b*(select count(*) from t1)+(select -count(*) from t1), -b) and +13<(abs( -~d-case when t1.a in (select -f from t1 union select t1.c from t1) then (t1.a) when t1.a=a),coalesce((select 11 from t1 where 11 not between 19 and t1.c),t1.a))-19 when t1.e then t1.f else e end)) from t1 union select -d from t1) then d when t1.f between 17 and d then t1.d else t1.d end and (f) or d<17)} +} {0} +do_test randexpr-2.1473 { + db eval {SELECT case when (t1.b+case when a in (select coalesce((select max(t1.a) from t1 where not exists(select 1 from t1 where 11=case e when (e) then t1.b else t1.c end or c in (t1.f,f,11))),t1.f) from t1 union select t1.d from t1) and t1.e<>e then t1.f when t1.a in (t1.d,a,13) then 17 else t1.d end not in (t1.f,t1.d,17)) or c<> -19 then b when fa-e then b else 19 end from t1 where 11<>t1.b),a) from t1 where not 19 in (select max(19)*count(distinct a) from t1 union select cast(avg(11) AS integer) from t1)),11) from t1 where t1.e<>a),17) then d else 19 end from t1 union select f from t1) and c in (select count(distinct c) from t1 union select -+max(d) | abs(count(*)) from t1)} +} {} +do_test randexpr-2.1474 { + db eval {SELECT case when (t1.b+case when a in (select coalesce((select max(t1.a) from t1 where not exists(select 1 from t1 where 11=case e when (e) then t1.b else t1.c end or c in (t1.f,f,11))),t1.f) from t1 union select t1.d from t1) and t1.e<>e then t1.f when t1.a in (t1.d,a,13) then 17 else t1.d end not in (t1.f,t1.d,17)) or c<> -19 then b when fa-e then b else 19 end from t1 where 11<>t1.b),a) from t1 where not 19 in (select max(19)*count(distinct a) from t1 union select cast(avg(11) AS integer) from t1)),11) from t1 where t1.e<>a),17) then d else 19 end from t1 union select f from t1) and c in (select count(distinct c) from t1 union select -+max(d) | abs(count(*)) from t1))} +} {200} +do_test randexpr-2.1475 { + db eval {SELECT 17+coalesce((select max(19) from t1 where -(select max(t1.d) from t1)>coalesce((select max((13)) from t1 where (coalesce((select 11 from t1 where -e-t1.d-b-(abs(t1.b)/abs(11))*t1.f*b-e not between (f) and t1.e or 11 in (t1.b,t1.e, -e)),t1.a) in (select t1.b from t1 union select 19 from t1)) and exists(select 1 from t1 where 13<19)),t1.e)),e) FROM t1 WHERE t1.a<>t1.d or not exists(select 1 from t1 where t1.a>=coalesce((select case t1.e*d when -case d*11 when t1.f then t1.e else t1.c end then 13*coalesce((select max(f*b*t1.e*t1.c) from t1 where t1.d in (select t1.c from t1 union select f from t1) or ((t1.ecoalesce((select max((13)) from t1 where (coalesce((select 11 from t1 where -e-t1.d-b-(abs(t1.b)/abs(11))*t1.f*b-e not between (f) and t1.e or 11 in (t1.b,t1.e, -e)),t1.a) in (select t1.b from t1 union select 19 from t1)) and exists(select 1 from t1 where 13<19)),t1.e)),e) FROM t1 WHERE NOT (t1.a<>t1.d or not exists(select 1 from t1 where t1.a>=coalesce((select case t1.e*d when -case d*11 when t1.f then t1.e else t1.c end then 13*coalesce((select max(f*b*t1.e*t1.c) from t1 where t1.d in (select t1.c from t1 union select f from t1) or ((t1.e=t1.a then f-coalesce((select -(select max((select min(a)-count(distinct c) from t1)) from t1) from t1 where a between f and d),c) else d end) and c or not exists(select 1 from t1 where 11<=13)),f)) then (e) else t1.b end FROM t1 WHERE not -c between -d+f | 17+t1.a and 11 or 11*e=(+case when c>=d then coalesce((select max(d-t1.f*f) from t1 where 11 in (select count(distinct t1.e) from t1 union select (~case count(*) when -max(t1.f) then count(distinct t1.d) else (cast(avg(t1.f) AS integer)) end) from t1)),t1.c) else d end+t1.b-t1.c+b)+t1.c- -(13)} +} {200} +do_test randexpr-2.1478 { + db eval {SELECT case +a when (coalesce((select d*f*t1.d from t1 where t1.f between (case when e* -13<~ - -t1.e then e when 13>=t1.a then f-coalesce((select -(select max((select min(a)-count(distinct c) from t1)) from t1) from t1 where a between f and d),c) else d end) and c or not exists(select 1 from t1 where 11<=13)),f)) then (e) else t1.b end FROM t1 WHERE NOT (not -c between -d+f | 17+t1.a and 11 or 11*e=(+case when c>=d then coalesce((select max(d-t1.f*f) from t1 where 11 in (select count(distinct t1.e) from t1 union select (~case count(*) when -max(t1.f) then count(distinct t1.d) else (cast(avg(t1.f) AS integer)) end) from t1)),t1.c) else d end+t1.b-t1.c+b)+t1.c- -(13))} +} {} +do_test randexpr-2.1479 { + db eval {SELECT case when case when not exists(select 1 from t1 where a between +d-case when t1.c+e not between case 11 when +t1.b+11 then 19 else -t1.f end+(11) and d and exists(select 1 from t1 where (a)>b) and not exists(select 1 from t1 where 11 in (a,11,t1.c)) then 19 else t1.a end and t1.a) then 17 when t1.d<>e then (t1.a) else e end-a<=e then c else 11 end FROM t1 WHERE coalesce((select a from t1 where not exists(select 1 from t1 where -t1.c not between b and t1.a)),case when t1.d<=19-13 then coalesce((select max((select count(*)*cast(avg(~t1.a-e*~17+t1.e) AS integer) from t1)) from t1 where (exists(select 1 from t1 where + - -e-11*a-t1.d in (11,(t1.b),c)))),19+e)* -b else c end)>=17} +} {300} +do_test randexpr-2.1480 { + db eval {SELECT case when case when not exists(select 1 from t1 where a between +d-case when t1.c+e not between case 11 when +t1.b+11 then 19 else -t1.f end+(11) and d and exists(select 1 from t1 where (a)>b) and not exists(select 1 from t1 where 11 in (a,11,t1.c)) then 19 else t1.a end and t1.a) then 17 when t1.d<>e then (t1.a) else e end-a<=e then c else 11 end FROM t1 WHERE NOT (coalesce((select a from t1 where not exists(select 1 from t1 where -t1.c not between b and t1.a)),case when t1.d<=19-13 then coalesce((select max((select count(*)*cast(avg(~t1.a-e*~17+t1.e) AS integer) from t1)) from t1 where (exists(select 1 from t1 where + - -e-11*a-t1.d in (11,(t1.b),c)))),19+e)* -b else c end)>=17)} +} {} +do_test randexpr-2.1481 { + db eval {SELECT coalesce((select t1.c-~t1.e*19++case t1.b when coalesce((select 17 from t1 where t1.f not between t1.c-+coalesce((select max(a+ -t1.c) from t1 where (select count(distinct 11) from t1) in (select -+case when a not between f and b or b>=t1.d then (t1.d) when a in (t1.e,b,13) then t1.d else t1.c end*a from t1 union select t1.f from t1)),f) and 17),a) | 17*c then 19 else d end+t1.c from t1 where e<>t1.b),19) FROM t1 WHERE not (t1.e not between ~17 and +t1.e*(~b)*e)} +} {} +do_test randexpr-2.1482 { + db eval {SELECT coalesce((select t1.c-~t1.e*19++case t1.b when coalesce((select 17 from t1 where t1.f not between t1.c-+coalesce((select max(a+ -t1.c) from t1 where (select count(distinct 11) from t1) in (select -+case when a not between f and b or b>=t1.d then (t1.d) when a in (t1.e,b,13) then t1.d else t1.c end*a from t1 union select t1.f from t1)),f) and 17),a) | 17*c then 19 else d end+t1.c from t1 where e<>t1.b),19) FROM t1 WHERE NOT (not (t1.e not between ~17 and +t1.e*(~b)*e))} +} {10519} +do_test randexpr-2.1483 { + db eval {SELECT coalesce((select t1.c-~t1.e*19++case t1.b when coalesce((select 17 from t1 where t1.f not between t1.c-+coalesce((select max(a+ -t1.c) from t1 where (select count(distinct 11) from t1) in (select -+case when a not between f and b or b>=t1.d then (t1.d) when a in (t1.e,b,13) then t1.d else t1.c end*a from t1 union select t1.f from t1)),f) and 17),a) & 17*c then 19 else d end+t1.c from t1 where e<>t1.b),19) FROM t1 WHERE NOT (not (t1.e not between ~17 and +t1.e*(~b)*e))} +} {10519} +do_test randexpr-2.1484 { + db eval {SELECT ~case when b<=(t1.c | (abs(d)/abs(a*f)))+case when (13*(t1.b) not in (19,b,t1.d)) and exists(select 1 from t1 where 19 between t1.d and 11) then (select case count(distinct t1.f) when +( -count(distinct c) | cast(avg(t1.d) AS integer)) then cast(avg(t1.c) AS integer) else min(f) end from t1) else 13 end*t1.c or exists(select 1 from t1 where -f in (select e from t1 union select -t1.a from t1)) then t1.d else t1.c end FROM t1 WHERE exists(select 1 from t1 where t1.c+b-t1.d between 11 and 17)} +} {} +do_test randexpr-2.1485 { + db eval {SELECT ~case when b<=(t1.c | (abs(d)/abs(a*f)))+case when (13*(t1.b) not in (19,b,t1.d)) and exists(select 1 from t1 where 19 between t1.d and 11) then (select case count(distinct t1.f) when +( -count(distinct c) | cast(avg(t1.d) AS integer)) then cast(avg(t1.c) AS integer) else min(f) end from t1) else 13 end*t1.c or exists(select 1 from t1 where -f in (select e from t1 union select -t1.a from t1)) then t1.d else t1.c end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.c+b-t1.d between 11 and 17))} +} {-401} +do_test randexpr-2.1486 { + db eval {SELECT ~case when b<=(t1.c & (abs(d)/abs(a*f)))+case when (13*(t1.b) not in (19,b,t1.d)) and exists(select 1 from t1 where 19 between t1.d and 11) then (select case count(distinct t1.f) when +( -count(distinct c) & cast(avg(t1.d) AS integer)) then cast(avg(t1.c) AS integer) else min(f) end from t1) else 13 end*t1.c or exists(select 1 from t1 where -f in (select e from t1 union select -t1.a from t1)) then t1.d else t1.c end FROM t1 WHERE NOT (exists(select 1 from t1 where t1.c+b-t1.d between 11 and 17))} +} {-401} +do_test randexpr-2.1487 { + db eval {SELECT coalesce((select max(~coalesce((select 19+~coalesce((select (abs((19)*c+~t1.a-t1.a*19)/abs(case when t1.d<>t1.b then t1.f when 13<>19 then (11) else t1.c end)) from t1 where f= -t1.a or t1.d not between t1.d and -f),t1.f) from t1 where not exists(select 1 from t1 where -19 not between d and f)),d)*t1.a-f) from t1 where c not in (c,c,c)),t1.b) FROM t1 WHERE (coalesce((select max( -t1.d+19+11+coalesce((select a from t1 where 11 not between e and t1.d),t1.d)) from t1 where not ( -t1.f not between b and f) and t1.e in (b,11,f)),f)+t1.e<=19) or c>t1.d or e in (a,19,11) or t1.f>b or -t1.f<=13 and a between 11 and t1.a} +} {200} +do_test randexpr-2.1488 { + db eval {SELECT coalesce((select max(~coalesce((select 19+~coalesce((select (abs((19)*c+~t1.a-t1.a*19)/abs(case when t1.d<>t1.b then t1.f when 13<>19 then (11) else t1.c end)) from t1 where f= -t1.a or t1.d not between t1.d and -f),t1.f) from t1 where not exists(select 1 from t1 where -19 not between d and f)),d)*t1.a-f) from t1 where c not in (c,c,c)),t1.b) FROM t1 WHERE NOT ((coalesce((select max( -t1.d+19+11+coalesce((select a from t1 where 11 not between e and t1.d),t1.d)) from t1 where not ( -t1.f not between b and f) and t1.e in (b,11,f)),f)+t1.e<=19) or c>t1.d or e in (a,19,11) or t1.f>b or -t1.f<=13 and a between 11 and t1.a)} +} {} +do_test randexpr-2.1489 { + db eval {SELECT case t1.d when 17 then 11 | t1.a-17+13+t1.f*t1.e+t1.e-case when d | t1.e in (t1.b,t1.a,c) then -13 else (b*19) end-(select cast(avg(coalesce((select a from t1 where e in (t1.f,t1.b,f)), -e)) AS integer) from t1)*d-11-t1.e-t1.a else t1.f end FROM t1 WHERE d*13 in (select abs(min(d)) from t1 union select cast(avg(d) AS integer) from t1)} +} {} +do_test randexpr-2.1490 { + db eval {SELECT case t1.d when 17 then 11 | t1.a-17+13+t1.f*t1.e+t1.e-case when d | t1.e in (t1.b,t1.a,c) then -13 else (b*19) end-(select cast(avg(coalesce((select a from t1 where e in (t1.f,t1.b,f)), -e)) AS integer) from t1)*d-11-t1.e-t1.a else t1.f end FROM t1 WHERE NOT (d*13 in (select abs(min(d)) from t1 union select cast(avg(d) AS integer) from t1))} +} {600} +do_test randexpr-2.1491 { + db eval {SELECT case t1.d when 17 then 11 & t1.a-17+13+t1.f*t1.e+t1.e-case when d & t1.e in (t1.b,t1.a,c) then -13 else (b*19) end-(select cast(avg(coalesce((select a from t1 where e in (t1.f,t1.b,f)), -e)) AS integer) from t1)*d-11-t1.e-t1.a else t1.f end FROM t1 WHERE NOT (d*13 in (select abs(min(d)) from t1 union select cast(avg(d) AS integer) from t1))} +} {600} +do_test randexpr-2.1492 { + db eval {SELECT coalesce((select max(19-(abs(t1.c)/abs( -13))-(select count(*)*abs(max(t1.a-b)) from t1)-t1.d) from t1 where (case coalesce((select max(~(select ~count(*) from t1)) from t1 where b=(abs(17)/abs(d))),11*case when not t1.c>=e or 17=c then d else 17*b end*t1.d) when 13 then 11 else t1.c end) in (select c from t1 union select t1.c from t1)),13) FROM t1 WHERE case t1.a*(select -case count(distinct ~(case 13-+t1.d-f when d then c else (19) end)*19)* -max(19) | -count(*)+abs(min(19)) | cast(avg(d) AS integer) when min(a) then count(distinct 11) else count(*) end from t1)*(abs(t1.a)/abs(11))-f-t1.b+(t1.f) when 11 then 17 else 19 end<>d} +} {-504} +do_test randexpr-2.1493 { + db eval {SELECT coalesce((select max(19-(abs(t1.c)/abs( -13))-(select count(*)*abs(max(t1.a-b)) from t1)-t1.d) from t1 where (case coalesce((select max(~(select ~count(*) from t1)) from t1 where b=(abs(17)/abs(d))),11*case when not t1.c>=e or 17=c then d else 17*b end*t1.d) when 13 then 11 else t1.c end) in (select c from t1 union select t1.c from t1)),13) FROM t1 WHERE NOT (case t1.a*(select -case count(distinct ~(case 13-+t1.d-f when d then c else (19) end)*19)* -max(19) | -count(*)+abs(min(19)) | cast(avg(d) AS integer) when min(a) then count(distinct 11) else count(*) end from t1)*(abs(t1.a)/abs(11))-f-t1.b+(t1.f) when 11 then 17 else 19 end<>d)} +} {} +do_test randexpr-2.1494 { + db eval {SELECT +(select abs( -abs(count(distinct case when (abs(t1.f)/abs(t1.a-t1.d)) in (select count(distinct 19) from t1 union select +abs(+ -cast(avg(c) AS integer)) from t1) then -(f) else e end-17)) | case count(distinct b) when count(*) then cast(avg(13) AS integer) else ( -count(distinct d)) end-count(*)* - -cast(avg(a) AS integer)) from t1)+case when t1.f in (select d*b | t1.d from t1 union select d from t1) then t1.f when 19<>t1.e then t1.b else 11 end+11 FROM t1 WHERE ft1.e then t1.b else 11 end+11 FROM t1 WHERE NOT (ft1.e then t1.b else 11 end+11 FROM t1 WHERE f17 or 17 between 13 and a and exists(select 1 from t1 where c not between t1.f and t1.d) then coalesce((select max(d) from t1 where t1.e<>19),17) else a end*(( -f))+t1.d FROM t1 WHERE exists(select 1 from t1 where exists(select 1 from t1 where t1.c between case when (a13 or b>=e or 19 between t1.d and -t1.e then t1.f else f end and f) and c>((t1.c))) and not exists(select 1 from t1 where 1717 or 17 between 13 and a and exists(select 1 from t1 where c not between t1.f and t1.d) then coalesce((select max(d) from t1 where t1.e<>19),17) else a end*(( -f))+t1.d FROM t1 WHERE NOT (exists(select 1 from t1 where exists(select 1 from t1 where t1.c between case when (a13 or b>=e or 19 between t1.d and -t1.e then t1.f else f end and f) and c>((t1.c))) and not exists(select 1 from t1 where 17=13 then t1.e else t1.c end)/abs(19)) | (t1.d) FROM t1 WHERE d>=(case when ((t1.a)) between t1.f and a then case when 11 between 19-case when (~coalesce((select t1.e from t1 where d>t1.c and e<>t1.f),e))>=13 then t1.b when t1.d in (select (count(*)-min(19)) from t1 union select cast(avg(t1.e) AS integer) from t1) then 17 else (t1.d) end*t1.a and (13) then 11 when not exists(select 1 from t1 where not 19<>(t1.e) and t1.a between d and d and t1.c=a) then 17 else t1.a end-17 else e end)} +} {} +do_test randexpr-2.1500 { + db eval {SELECT 17+11+d+t1.d | t1.c+(abs(case when 11 in (select count(distinct 11*+t1.d) from t1 union select ~cast(avg(t1.c) AS integer)*case (count(distinct 19)) when (cast(avg(t1.c) AS integer) | min(t1.a-f)) then cast(avg(a) AS integer) else cast(avg(t1.b) AS integer) end+max( - -d) from t1) then t1.a-13 when (11)>=13 then t1.e else t1.c end)/abs(19)) | (t1.d) FROM t1 WHERE NOT (d>=(case when ((t1.a)) between t1.f and a then case when 11 between 19-case when (~coalesce((select t1.e from t1 where d>t1.c and e<>t1.f),e))>=13 then t1.b when t1.d in (select (count(*)-min(19)) from t1 union select cast(avg(t1.e) AS integer) from t1) then 17 else (t1.d) end*t1.a and (13) then 11 when not exists(select 1 from t1 where not 19<>(t1.e) and t1.a between d and d and t1.c=a) then 17 else t1.a end-17 else e end))} +} {959} +do_test randexpr-2.1501 { + db eval {SELECT 17+11+d+t1.d & t1.c+(abs(case when 11 in (select count(distinct 11*+t1.d) from t1 union select ~cast(avg(t1.c) AS integer)*case (count(distinct 19)) when (cast(avg(t1.c) AS integer) & min(t1.a-f)) then cast(avg(a) AS integer) else cast(avg(t1.b) AS integer) end+max( - -d) from t1) then t1.a-13 when (11)>=13 then t1.e else t1.c end)/abs(19)) & (t1.d) FROM t1 WHERE NOT (d>=(case when ((t1.a)) between t1.f and a then case when 11 between 19-case when (~coalesce((select t1.e from t1 where d>t1.c and e<>t1.f),e))>=13 then t1.b when t1.d in (select (count(*)-min(19)) from t1 union select cast(avg(t1.e) AS integer) from t1) then 17 else (t1.d) end*t1.a and (13) then 11 when not exists(select 1 from t1 where not 19<>(t1.e) and t1.a between d and d and t1.c=a) then 17 else t1.a end-17 else e end))} +} {272} +do_test randexpr-2.1502 { + db eval {SELECT coalesce((select 17 from t1 where ((abs(19)/abs(case when ((f+17<(d))) and (abs(t1.c)/abs((t1.b)))>11 then ~a+11 when (t1.f)>t1.d then 19 else c end))<>t1.b) or (b in (( -t1.d),b,e) and (t1.a)<>c) and (17) not in (t1.f,11,17) or (f) not between t1.c and -t1.c),t1.a) FROM t1 WHERE 17 between -t1.b+f-19 | 17 and t1.e+t1.c} +} {} +do_test randexpr-2.1503 { + db eval {SELECT coalesce((select 17 from t1 where ((abs(19)/abs(case when ((f+17<(d))) and (abs(t1.c)/abs((t1.b)))>11 then ~a+11 when (t1.f)>t1.d then 19 else c end))<>t1.b) or (b in (( -t1.d),b,e) and (t1.a)<>c) and (17) not in (t1.f,11,17) or (f) not between t1.c and -t1.c),t1.a) FROM t1 WHERE NOT (17 between -t1.b+f-19 | 17 and t1.e+t1.c)} +} {17} +do_test randexpr-2.1504 { + db eval {SELECT +(11)-19-case when 19+13-19+f*19*c not in (coalesce((select max((select ~min(t1.b)+(count(distinct t1.d)) | min(13)*count(distinct t1.a)*max(a) from t1)) from t1 where not 19 in (select t1.d from t1 union select 19 from t1) or 19<=t1.c),t1.b | f) | e-f,d,d) then t1.a else d end FROM t1 WHERE t1.d in (select case when t1.e>(case coalesce((select (11)-t1.e from t1 where case t1.e when t1.c then (coalesce((select max(+t1.b) from t1 where (a in (case when 19 in (select cast(avg((t1.b)) AS integer) from t1 union select cast(avg( - -17) AS integer) from t1) then a when 17>=13 then c else t1.e end,11,11) or not 19 between t1.b and t1.a)),t1.e)) else d end<=t1.c),13) when e then a else d end) then a when 19 in (select (11) from t1 union select t1.e from t1) then (e) else t1.d end from t1 union select t1.d from t1)} +} {-108} +do_test randexpr-2.1505 { + db eval {SELECT +(11)-19-case when 19+13-19+f*19*c not in (coalesce((select max((select ~min(t1.b)+(count(distinct t1.d)) | min(13)*count(distinct t1.a)*max(a) from t1)) from t1 where not 19 in (select t1.d from t1 union select 19 from t1) or 19<=t1.c),t1.b | f) | e-f,d,d) then t1.a else d end FROM t1 WHERE NOT (t1.d in (select case when t1.e>(case coalesce((select (11)-t1.e from t1 where case t1.e when t1.c then (coalesce((select max(+t1.b) from t1 where (a in (case when 19 in (select cast(avg((t1.b)) AS integer) from t1 union select cast(avg( - -17) AS integer) from t1) then a when 17>=13 then c else t1.e end,11,11) or not 19 between t1.b and t1.a)),t1.e)) else d end<=t1.c),13) when e then a else d end) then a when 19 in (select (11) from t1 union select t1.e from t1) then (e) else t1.d end from t1 union select t1.d from t1))} +} {} +do_test randexpr-2.1506 { + db eval {SELECT +(11)-19-case when 19+13-19+f*19*c not in (coalesce((select max((select ~min(t1.b)+(count(distinct t1.d)) & min(13)*count(distinct t1.a)*max(a) from t1)) from t1 where not 19 in (select t1.d from t1 union select 19 from t1) or 19<=t1.c),t1.b & f) & e-f,d,d) then t1.a else d end FROM t1 WHERE t1.d in (select case when t1.e>(case coalesce((select (11)-t1.e from t1 where case t1.e when t1.c then (coalesce((select max(+t1.b) from t1 where (a in (case when 19 in (select cast(avg((t1.b)) AS integer) from t1 union select cast(avg( - -17) AS integer) from t1) then a when 17>=13 then c else t1.e end,11,11) or not 19 between t1.b and t1.a)),t1.e)) else d end<=t1.c),13) when e then a else d end) then a when 19 in (select (11) from t1 union select t1.e from t1) then (e) else t1.d end from t1 union select t1.d from t1)} +} {-108} +do_test randexpr-2.1507 { + db eval {SELECT (abs(case t1.f when (abs((abs( -t1.b+t1.f | case when t1.e-b*19<=17 then 13 when t1.d<>c-coalesce((select max(coalesce((select t1.d from t1 where (not exists(select 1 from t1 where (19)<=a) or 11 between t1.e and f)),d | t1.f)) from t1 where (t1.a between 19 and t1.a)),13) then 19 else f end)/abs(t1.f))-t1.a)/abs(e))+t1.e then t1.b else b end)/abs(19))+t1.a FROM t1 WHERE 17 in (select f*f from t1 union select a+d from t1)} +} {} +do_test randexpr-2.1508 { + db eval {SELECT (abs(case t1.f when (abs((abs( -t1.b+t1.f | case when t1.e-b*19<=17 then 13 when t1.d<>c-coalesce((select max(coalesce((select t1.d from t1 where (not exists(select 1 from t1 where (19)<=a) or 11 between t1.e and f)),d | t1.f)) from t1 where (t1.a between 19 and t1.a)),13) then 19 else f end)/abs(t1.f))-t1.a)/abs(e))+t1.e then t1.b else b end)/abs(19))+t1.a FROM t1 WHERE NOT (17 in (select f*f from t1 union select a+d from t1))} +} {110} +do_test randexpr-2.1509 { + db eval {SELECT (abs(case t1.f when (abs((abs( -t1.b+t1.f & case when t1.e-b*19<=17 then 13 when t1.d<>c-coalesce((select max(coalesce((select t1.d from t1 where (not exists(select 1 from t1 where (19)<=a) or 11 between t1.e and f)),d & t1.f)) from t1 where (t1.a between 19 and t1.a)),13) then 19 else f end)/abs(t1.f))-t1.a)/abs(e))+t1.e then t1.b else b end)/abs(19))+t1.a FROM t1 WHERE NOT (17 in (select f*f from t1 union select a+d from t1))} +} {110} +do_test randexpr-2.1510 { + db eval {SELECT (coalesce((select t1.e from t1 where +case when t1.c+f<>d-17 then t1.a when not (abs(t1.c)/abs(case case -f when f then a else t1.b end-t1.e-t1.f when t1.b then -e else t1.c end))>e or t1.b=a or exists(select 1 from t1 where 19>=t1.b) and t1.c< -e then t1.e else 13 end<=t1.b),b)) | d FROM t1 WHERE case 13 when 13 then 17-f else a end=e and (exists(select 1 from t1 where b in (select t1.f from t1 union select coalesce((select case when (abs(t1.c)/abs(17)) in (a,f*c,11) then c when ( -t1.d) in (select ~count(distinct t1.b) from t1 union select min((t1.c)) from t1) then t1.a else t1.e end from t1 where exists(select 1 from t1 where (t1.f) in (t1.d,17, -11))),b)-d*11*c+a from t1)))} +} {} +do_test randexpr-2.1511 { + db eval {SELECT (coalesce((select t1.e from t1 where +case when t1.c+f<>d-17 then t1.a when not (abs(t1.c)/abs(case case -f when f then a else t1.b end-t1.e-t1.f when t1.b then -e else t1.c end))>e or t1.b=a or exists(select 1 from t1 where 19>=t1.b) and t1.c< -e then t1.e else 13 end<=t1.b),b)) | d FROM t1 WHERE NOT (case 13 when 13 then 17-f else a end=e and (exists(select 1 from t1 where b in (select t1.f from t1 union select coalesce((select case when (abs(t1.c)/abs(17)) in (a,f*c,11) then c when ( -t1.d) in (select ~count(distinct t1.b) from t1 union select min((t1.c)) from t1) then t1.a else t1.e end from t1 where exists(select 1 from t1 where (t1.f) in (t1.d,17, -11))),b)-d*11*c+a from t1))))} +} {500} +do_test randexpr-2.1512 { + db eval {SELECT (coalesce((select t1.e from t1 where +case when t1.c+f<>d-17 then t1.a when not (abs(t1.c)/abs(case case -f when f then a else t1.b end-t1.e-t1.f when t1.b then -e else t1.c end))>e or t1.b=a or exists(select 1 from t1 where 19>=t1.b) and t1.c< -e then t1.e else 13 end<=t1.b),b)) & d FROM t1 WHERE NOT (case 13 when 13 then 17-f else a end=e and (exists(select 1 from t1 where b in (select t1.f from t1 union select coalesce((select case when (abs(t1.c)/abs(17)) in (a,f*c,11) then c when ( -t1.d) in (select ~count(distinct t1.b) from t1 union select min((t1.c)) from t1) then t1.a else t1.e end from t1 where exists(select 1 from t1 where (t1.f) in (t1.d,17, -11))),b)-d*11*c+a from t1))))} +} {400} +do_test randexpr-2.1513 { + db eval {SELECT (abs( -case when exists(select 1 from t1 where (abs(e++11-(abs(c)/abs(t1.f)))/abs(19+t1.b))+t1.b-c=(b)) or d in (~d, -f,c) then (select min(13) from t1)+11+~t1.a*t1.a else d end-t1.d)/abs((select (max(d))+count(*)-min(t1.c) from t1))) FROM t1 WHERE (select +min(c+d) | (max(13)+min(case when t1.e not in (t1.d,t1.d,19*t1.d) then t1.a when 17=d then t1.b else a end)+ - -max(t1.a)*count(distinct f) | count(*)+max(t1.d) | count(distinct d))-count(distinct 17)-count(*) from t1) not in (e,d+11,(t1.a+t1.f))} +} {7} +do_test randexpr-2.1514 { + db eval {SELECT (abs( -case when exists(select 1 from t1 where (abs(e++11-(abs(c)/abs(t1.f)))/abs(19+t1.b))+t1.b-c=(b)) or d in (~d, -f,c) then (select min(13) from t1)+11+~t1.a*t1.a else d end-t1.d)/abs((select (max(d))+count(*)-min(t1.c) from t1))) FROM t1 WHERE NOT ((select +min(c+d) | (max(13)+min(case when t1.e not in (t1.d,t1.d,19*t1.d) then t1.a when 17=d then t1.b else a end)+ - -max(t1.a)*count(distinct f) | count(*)+max(t1.d) | count(distinct d))-count(distinct 17)-count(*) from t1) not in (e,d+11,(t1.a+t1.f)))} +} {} +do_test randexpr-2.1515 { + db eval {SELECT case when not exists(select 1 from t1 where (not case t1.b when d then c else coalesce((select max(b+case c+c when t1.c then coalesce((select ((e)) from t1 where c in (e,t1.f,t1.d)),t1.a) else t1.f end+t1.e) from t1 where not t1.e not between t1.f and t1.a),19) end+f<>b)) then e when f in (19,t1.d, -c) then 19 else case when e>=t1.f then 19 else 19 end end FROM t1 WHERE 19 between e+case when d>(select ~max(t1.d)+max(t1.d*t1.c)-~count(distinct c)-count(*)-count(distinct t1.b)* -(count(*)) from t1) then ~a when not exists(select 1 from t1 where t1.c>=13) or (t1.f=t1.a or c in ((f), -c,t1.e)) and b=b and e not between t1.b and -11 or t1.c in (t1.c,17,t1.c) and t1.f=t1.d then t1.a*t1.d*t1.c else d end and e} +} {} +do_test randexpr-2.1516 { + db eval {SELECT case when not exists(select 1 from t1 where (not case t1.b when d then c else coalesce((select max(b+case c+c when t1.c then coalesce((select ((e)) from t1 where c in (e,t1.f,t1.d)),t1.a) else t1.f end+t1.e) from t1 where not t1.e not between t1.f and t1.a),19) end+f<>b)) then e when f in (19,t1.d, -c) then 19 else case when e>=t1.f then 19 else 19 end end FROM t1 WHERE NOT (19 between e+case when d>(select ~max(t1.d)+max(t1.d*t1.c)-~count(distinct c)-count(*)-count(distinct t1.b)* -(count(*)) from t1) then ~a when not exists(select 1 from t1 where t1.c>=13) or (t1.f=t1.a or c in ((f), -c,t1.e)) and b=b and e not between t1.b and -11 or t1.c in (t1.c,17,t1.c) and t1.f=t1.d then t1.a*t1.d*t1.c else d end and e)} +} {500} +do_test randexpr-2.1517 { + db eval {SELECT (select -+case -(case -max(d) when ~+ -+(count(*)) then abs(max(case 17+t1.a-11-t1.c when 19 then f else e end*((t1.c))+b)) else (min(d)*count(distinct t1.e)) end)++abs(count(*))- -cast(avg(c) AS integer)+max(d) when count(distinct c) then min(f) else (count(distinct a)) end from t1) FROM t1 WHERE ((case -a when case when f between 11 and t1.d then d else c+17 end then b else a end+11-c between -a and t1.a) or b>t1.f and t1.at1.f and t1.a=f) or f in (t1.a,(t1.a),a)) then -~c-d else t1.e end)/abs(13))) FROM t1 WHERE (select (+~max(case when t1.e>=coalesce((select max(t1.d) from t1 where +f not in (coalesce((select 11 from t1 where 13 | c in (select c from t1 union select b from t1)),f),t1.c, -a)),13) then e else 19 end)-min(t1.a)) from t1)+((coalesce((select t1.e from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where t1.c=t1.a))),a)))+13 in (select d from t1 union select t1.a from t1) and e>=b} +} {} +do_test randexpr-2.1520 { + db eval {SELECT -coalesce((select max((select case +count(*)+(case min((abs(d)/abs(b))) when +count(*) | (min(b)) then -min((t1.a)) else max( -t1.a) end*max( -t1.e))+max(f) when (count(*)) then count(distinct 17) else cast(avg((t1.c)) AS integer) end from t1)) from t1 where t1.d in (select 13 from t1 union select t1.c from t1)),(abs(case when ((c>=f) or f in (t1.a,(t1.a),a)) then -~c-d else t1.e end)/abs(13))) FROM t1 WHERE NOT ((select (+~max(case when t1.e>=coalesce((select max(t1.d) from t1 where +f not in (coalesce((select 11 from t1 where 13 | c in (select c from t1 union select b from t1)),f),t1.c, -a)),13) then e else 19 end)-min(t1.a)) from t1)+((coalesce((select t1.e from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where t1.c=t1.a))),a)))+13 in (select d from t1 union select t1.a from t1) and e>=b)} +} {-38} +do_test randexpr-2.1521 { + db eval {SELECT -coalesce((select max((select case +count(*)+(case min((abs(d)/abs(b))) when +count(*) & (min(b)) then -min((t1.a)) else max( -t1.a) end*max( -t1.e))+max(f) when (count(*)) then count(distinct 17) else cast(avg((t1.c)) AS integer) end from t1)) from t1 where t1.d in (select 13 from t1 union select t1.c from t1)),(abs(case when ((c>=f) or f in (t1.a,(t1.a),a)) then -~c-d else t1.e end)/abs(13))) FROM t1 WHERE NOT ((select (+~max(case when t1.e>=coalesce((select max(t1.d) from t1 where +f not in (coalesce((select 11 from t1 where 13 | c in (select c from t1 union select b from t1)),f),t1.c, -a)),13) then e else 19 end)-min(t1.a)) from t1)+((coalesce((select t1.e from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where t1.c=t1.a))),a)))+13 in (select d from t1 union select t1.a from t1) and e>=b)} +} {-38} +do_test randexpr-2.1522 { + db eval {SELECT +(select +abs(+cast(avg(t1.b) AS integer)*abs(case -max( -(t1.a*t1.f)+t1.e* -t1.f*13 | 17 | f+t1.c) when abs(cast(avg(d) AS integer))-(++~min(t1.f) | count(*)-cast(avg(f) AS integer)) then count(*) else cast(avg(13) AS integer) end+ -min(b)))+min(a) from t1) FROM t1 WHERE case t1.a-t1.e*+t1.f-(select abs(count(*)) from t1)+coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where ~t1.b in (select +count(*) | count(distinct case b when t1.a then f else case when (t1.d-t1.b)<11 then t1.d else f end end) from t1 union select +cast(avg(( -d)) AS integer) from t1))),t1.d*t1.a) | e when t1.f then (t1.d) else t1.f end not in (d,f,e)} +} {} +do_test randexpr-2.1523 { + db eval {SELECT +(select +abs(+cast(avg(t1.b) AS integer)*abs(case -max( -(t1.a*t1.f)+t1.e* -t1.f*13 | 17 | f+t1.c) when abs(cast(avg(d) AS integer))-(++~min(t1.f) | count(*)-cast(avg(f) AS integer)) then count(*) else cast(avg(13) AS integer) end+ -min(b)))+min(a) from t1) FROM t1 WHERE NOT (case t1.a-t1.e*+t1.f-(select abs(count(*)) from t1)+coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where ~t1.b in (select +count(*) | count(distinct case b when t1.a then f else case when (t1.d-t1.b)<11 then t1.d else f end end) from t1 union select +cast(avg(( -d)) AS integer) from t1))),t1.d*t1.a) | e when t1.f then (t1.d) else t1.f end not in (d,f,e))} +} {37500} +do_test randexpr-2.1524 { + db eval {SELECT +(select +abs(+cast(avg(t1.b) AS integer)*abs(case -max( -(t1.a*t1.f)+t1.e* -t1.f*13 & 17 & f+t1.c) when abs(cast(avg(d) AS integer))-(++~min(t1.f) & count(*)-cast(avg(f) AS integer)) then count(*) else cast(avg(13) AS integer) end+ -min(b)))+min(a) from t1) FROM t1 WHERE NOT (case t1.a-t1.e*+t1.f-(select abs(count(*)) from t1)+coalesce((select max(t1.c) from t1 where not exists(select 1 from t1 where ~t1.b in (select +count(*) | count(distinct case b when t1.a then f else case when (t1.d-t1.b)<11 then t1.d else f end end) from t1 union select +cast(avg(( -d)) AS integer) from t1))),t1.d*t1.a) | e when t1.f then (t1.d) else t1.f end not in (d,f,e))} +} {37500} +do_test randexpr-2.1525 { + db eval {SELECT t1.d+case when coalesce((select b from t1 where case when (select max(t1.a*(select case count(distinct c) when max(17+c) then - -max(d)-count(*) else cast(avg((e)) AS integer) end from t1)) from t1) not between t1.d and t1.d then t1.d when t1.c not between c and t1.f then ( -t1.d) else t1.d end not between e and t1.e),d)<>17 then (t1.f) when 11=t1.c then 11 else t1.f end*c*t1.a-c FROM t1 WHERE (case when coalesce((select ~coalesce((select max(a) from t1 where 11 in (select ~case -count(*) when cast(avg(d) AS integer) then -max(t1.f) else (count(distinct 11)) end+cast(avg( - -13) AS integer) | count(*)-min(f) from t1 union select count(*) from t1)),t1.b*t1.f*17) from t1 where not exists(select 1 from t1 where t1.a not between d and 11)),b)-17*t1.d+t1.a in ( -t1.c,t1.e,11) then b else t1.d end in (select 17 from t1 union select 13 from t1)) and -t1.a in (select (d) from t1 union select t1.b from t1)} +} {} +do_test randexpr-2.1526 { + db eval {SELECT t1.d+case when coalesce((select b from t1 where case when (select max(t1.a*(select case count(distinct c) when max(17+c) then - -max(d)-count(*) else cast(avg((e)) AS integer) end from t1)) from t1) not between t1.d and t1.d then t1.d when t1.c not between c and t1.f then ( -t1.d) else t1.d end not between e and t1.e),d)<>17 then (t1.f) when 11=t1.c then 11 else t1.f end*c*t1.a-c FROM t1 WHERE NOT ((case when coalesce((select ~coalesce((select max(a) from t1 where 11 in (select ~case -count(*) when cast(avg(d) AS integer) then -max(t1.f) else (count(distinct 11)) end+cast(avg( - -13) AS integer) | count(*)-min(f) from t1 union select count(*) from t1)),t1.b*t1.f*17) from t1 where not exists(select 1 from t1 where t1.a not between d and 11)),b)-17*t1.d+t1.a in ( -t1.c,t1.e,11) then b else t1.d end in (select 17 from t1 union select 13 from t1)) and -t1.a in (select (d) from t1 union select t1.b from t1))} +} {18000100} +do_test randexpr-2.1527 { + db eval {SELECT coalesce((select (abs(b+t1.c)/abs(coalesce((select max( -~t1.d) from t1 where coalesce((select max(11) from t1 where not t1.d>=(select count(*) from t1)),case b-coalesce((select t1.e-f from t1 where coalesce((select max(case a when t1.e then t1.f else 11 end) from t1 where t1.b>=t1.f),t1.d) not in (f,e,13)),t1.f) when -11 then t1.c else 19 end)-d not in (f,f,b)),f))) from t1 where d<=t1.d),f) FROM t1 WHERE c+case when coalesce((select d from t1 where coalesce((select max(17) from t1 where c>17-11),17)=(select count(*) from t1)),case b-coalesce((select t1.e-f from t1 where coalesce((select max(case a when t1.e then t1.f else 11 end) from t1 where t1.b>=t1.f),t1.d) not in (f,e,13)),t1.f) when -11 then t1.c else 19 end)-d not in (f,f,b)),f))) from t1 where d<=t1.d),f) FROM t1 WHERE NOT (c+case when coalesce((select d from t1 where coalesce((select max(17) from t1 where c>17-11),17)f and (exists(select 1 from t1 where (17 in (e, -t1.f,c)) and t1.f<=19))),d))) FROM t1 WHERE f*t1.c<>t1.b} +} {0} +do_test randexpr-2.1530 { + db eval {SELECT (abs(19)/abs(coalesce((select max((abs((abs(t1.a)/abs(e)))/abs(b))) from t1 where not t1.f<13 and 17-(abs(b-13*f*(abs(+17-f+t1.a)/abs(t1.d)))/abs(t1.e))+e-17>f and (exists(select 1 from t1 where (17 in (e, -t1.f,c)) and t1.f<=19))),d))) FROM t1 WHERE NOT (f*t1.c<>t1.b)} +} {} +do_test randexpr-2.1531 { + db eval {SELECT case when t1.d in (select count(distinct 11) from t1 union select case abs(++cast(avg(t1.b) AS integer)+count(*) | -(count(*))) when count(*) then count(distinct t1.b) else count(distinct -t1.a) end | max(t1.c) from t1) and t1.f<=case when exists(select 1 from t1 where t1.f+t1.f<>17 and t1.b>d) then 19 when t1.e=a then coalesce((select b from t1 where 13 not in (t1.b,t1.a,t1.a)),t1.c) else t1.d end and t1.d>(t1.b) and c in (e,t1.d,t1.b) then b | c when f=t1.e then e else 19 end FROM t1 WHERE t1.f between (c-t1.d+t1.f*t1.f) and 11+t1.e*coalesce((select max(coalesce((select max(t1.e) from t1 where exists(select 1 from t1 where (c-13 between t1.e*a and 11 or (t1.f>f) or t1.c>=t1.f))),t1.b-b-11)) from t1 where (f>t1.f)),11)-t1.b*e} +} {} +do_test randexpr-2.1532 { + db eval {SELECT case when t1.d in (select count(distinct 11) from t1 union select case abs(++cast(avg(t1.b) AS integer)+count(*) | -(count(*))) when count(*) then count(distinct t1.b) else count(distinct -t1.a) end | max(t1.c) from t1) and t1.f<=case when exists(select 1 from t1 where t1.f+t1.f<>17 and t1.b>d) then 19 when t1.e=a then coalesce((select b from t1 where 13 not in (t1.b,t1.a,t1.a)),t1.c) else t1.d end and t1.d>(t1.b) and c in (e,t1.d,t1.b) then b | c when f=t1.e then e else 19 end FROM t1 WHERE NOT (t1.f between (c-t1.d+t1.f*t1.f) and 11+t1.e*coalesce((select max(coalesce((select max(t1.e) from t1 where exists(select 1 from t1 where (c-13 between t1.e*a and 11 or (t1.f>f) or t1.c>=t1.f))),t1.b-b-11)) from t1 where (f>t1.f)),11)-t1.b*e)} +} {19} +do_test randexpr-2.1533 { + db eval {SELECT case when t1.d in (select count(distinct 11) from t1 union select case abs(++cast(avg(t1.b) AS integer)+count(*) & -(count(*))) when count(*) then count(distinct t1.b) else count(distinct -t1.a) end & max(t1.c) from t1) and t1.f<=case when exists(select 1 from t1 where t1.f+t1.f<>17 and t1.b>d) then 19 when t1.e=a then coalesce((select b from t1 where 13 not in (t1.b,t1.a,t1.a)),t1.c) else t1.d end and t1.d>(t1.b) and c in (e,t1.d,t1.b) then b & c when f=t1.e then e else 19 end FROM t1 WHERE NOT (t1.f between (c-t1.d+t1.f*t1.f) and 11+t1.e*coalesce((select max(coalesce((select max(t1.e) from t1 where exists(select 1 from t1 where (c-13 between t1.e*a and 11 or (t1.f>f) or t1.c>=t1.f))),t1.b-b-11)) from t1 where (f>t1.f)),11)-t1.b*e)} +} {19} +do_test randexpr-2.1534 { + db eval {SELECT coalesce((select t1.e from t1 where (case when 19>=t1.c then t1.d else coalesce((select max(11*t1.b) from t1 where case ~t1.c-(select max(11) from t1)*~t1.b+f*~coalesce((select max(d) from t1 where d>=b),d)-t1.a*t1.c*t1.d when -t1.c then 13 else f end in (select b from t1 union select 13 from t1)),t1.f) end | b<>a)),t1.e) FROM t1 WHERE not exists(select 1 from t1 where not (t1.et1.b+case (select count(distinct e)*cast(avg(case c when e then case a when f then b else d end-a-13 else 13 end*t1.a+ -t1.d) AS integer) from t1)+c | (t1.f)*f when -c then d else 19 end) and -17 in (select min(t1.b) from t1 union select count(distinct f) from t1))} +} {500} +do_test randexpr-2.1535 { + db eval {SELECT coalesce((select t1.e from t1 where (case when 19>=t1.c then t1.d else coalesce((select max(11*t1.b) from t1 where case ~t1.c-(select max(11) from t1)*~t1.b+f*~coalesce((select max(d) from t1 where d>=b),d)-t1.a*t1.c*t1.d when -t1.c then 13 else f end in (select b from t1 union select 13 from t1)),t1.f) end | b<>a)),t1.e) FROM t1 WHERE NOT (not exists(select 1 from t1 where not (t1.et1.b+case (select count(distinct e)*cast(avg(case c when e then case a when f then b else d end-a-13 else 13 end*t1.a+ -t1.d) AS integer) from t1)+c | (t1.f)*f when -c then d else 19 end) and -17 in (select min(t1.b) from t1 union select count(distinct f) from t1)))} +} {} +do_test randexpr-2.1536 { + db eval {SELECT coalesce((select t1.e from t1 where (case when 19>=t1.c then t1.d else coalesce((select max(11*t1.b) from t1 where case ~t1.c-(select max(11) from t1)*~t1.b+f*~coalesce((select max(d) from t1 where d>=b),d)-t1.a*t1.c*t1.d when -t1.c then 13 else f end in (select b from t1 union select 13 from t1)),t1.f) end & b<>a)),t1.e) FROM t1 WHERE not exists(select 1 from t1 where not (t1.et1.b+case (select count(distinct e)*cast(avg(case c when e then case a when f then b else d end-a-13 else 13 end*t1.a+ -t1.d) AS integer) from t1)+c | (t1.f)*f when -c then d else 19 end) and -17 in (select min(t1.b) from t1 union select count(distinct f) from t1))} +} {500} +do_test randexpr-2.1537 { + db eval {SELECT coalesce((select t1.c from t1 where f between case when (abs(coalesce((select 11* -~coalesce((select a*case coalesce((select max(a) from t1 where exists(select 1 from t1 where e in (17,t1.b,13))), -t1.e) when -a then d else c end from t1 where t1.a>=t1.e),17)+t1.a-t1.d from t1 where exists(select 1 from t1 where -b>t1.b)),e))/abs(d))<>17 then t1.d when 11< -f then d else d end*d and 17),b)-f-t1.d FROM t1 WHERE case when e=(abs(t1.b)/abs((abs(t1.f)/abs( -case b when 19 then ((abs((case when (+case when f between e and c then t1.f when t1.b>19 then t1.e else t1.f end+11+t1.a in (select t1.c from t1 union select f from t1)) then e-(13) when not exists(select 1 from t1 where not exists(select 1 from t1 where (13)>t1.f)) then t1.c else t1.f end))/abs(t1.d))) else c end)))) then b else (t1.d) end not between t1.a and t1.a} +} {-800} +do_test randexpr-2.1538 { + db eval {SELECT coalesce((select t1.c from t1 where f between case when (abs(coalesce((select 11* -~coalesce((select a*case coalesce((select max(a) from t1 where exists(select 1 from t1 where e in (17,t1.b,13))), -t1.e) when -a then d else c end from t1 where t1.a>=t1.e),17)+t1.a-t1.d from t1 where exists(select 1 from t1 where -b>t1.b)),e))/abs(d))<>17 then t1.d when 11< -f then d else d end*d and 17),b)-f-t1.d FROM t1 WHERE NOT (case when e=(abs(t1.b)/abs((abs(t1.f)/abs( -case b when 19 then ((abs((case when (+case when f between e and c then t1.f when t1.b>19 then t1.e else t1.f end+11+t1.a in (select t1.c from t1 union select f from t1)) then e-(13) when not exists(select 1 from t1 where not exists(select 1 from t1 where (13)>t1.f)) then t1.c else t1.f end))/abs(t1.d))) else c end)))) then b else (t1.d) end not between t1.a and t1.a)} +} {} +do_test randexpr-2.1539 { + db eval {SELECT case when not exists(select 1 from t1 where (coalesce((select max(coalesce((select 11 from t1 where t1.b*t1.d<=+case t1.c when (select max(case when not exists(select 1 from t1 where e=t1.b) then t1.a else f end) from t1)*t1.b | t1.a then 17 else 13 end-f | t1.e),13)) from t1 where a in (t1.b,t1.b,(t1.e))),11)*11 not in (d,t1.a,17))) then 13 when c in (select e from t1 union select t1.d from t1) then 19 else 13 end FROM t1 WHERE not exists(select 1 from t1 where case when -b*13>+d then t1.e else coalesce((select case when not exists(select 1 from t1 where 11 in (c,+case when (case when e between t1.b and f then t1.a when t1.c not in (a,(c),t1.a) then -t1.b else t1.a end<>d or f>e) then t1.e else t1.c end,t1.b)) then 19 else -17 end*t1.d from t1 where (17 in (19,17,t1.f))),(t1.e)) end between 11 and t1.b and t1.a<>b)} +} {13} +do_test randexpr-2.1540 { + db eval {SELECT case when not exists(select 1 from t1 where (coalesce((select max(coalesce((select 11 from t1 where t1.b*t1.d<=+case t1.c when (select max(case when not exists(select 1 from t1 where e=t1.b) then t1.a else f end) from t1)*t1.b | t1.a then 17 else 13 end-f | t1.e),13)) from t1 where a in (t1.b,t1.b,(t1.e))),11)*11 not in (d,t1.a,17))) then 13 when c in (select e from t1 union select t1.d from t1) then 19 else 13 end FROM t1 WHERE NOT (not exists(select 1 from t1 where case when -b*13>+d then t1.e else coalesce((select case when not exists(select 1 from t1 where 11 in (c,+case when (case when e between t1.b and f then t1.a when t1.c not in (a,(c),t1.a) then -t1.b else t1.a end<>d or f>e) then t1.e else t1.c end,t1.b)) then 19 else -17 end*t1.d from t1 where (17 in (19,17,t1.f))),(t1.e)) end between 11 and t1.b and t1.a<>b))} +} {} +do_test randexpr-2.1541 { + db eval {SELECT case when not exists(select 1 from t1 where (coalesce((select max(coalesce((select 11 from t1 where t1.b*t1.d<=+case t1.c when (select max(case when not exists(select 1 from t1 where e=t1.b) then t1.a else f end) from t1)*t1.b & t1.a then 17 else 13 end-f & t1.e),13)) from t1 where a in (t1.b,t1.b,(t1.e))),11)*11 not in (d,t1.a,17))) then 13 when c in (select e from t1 union select t1.d from t1) then 19 else 13 end FROM t1 WHERE not exists(select 1 from t1 where case when -b*13>+d then t1.e else coalesce((select case when not exists(select 1 from t1 where 11 in (c,+case when (case when e between t1.b and f then t1.a when t1.c not in (a,(c),t1.a) then -t1.b else t1.a end<>d or f>e) then t1.e else t1.c end,t1.b)) then 19 else -17 end*t1.d from t1 where (17 in (19,17,t1.f))),(t1.e)) end between 11 and t1.b and t1.a<>b)} +} {13} +do_test randexpr-2.1542 { + db eval {SELECT a-coalesce((select max(++19) from t1 where (select count(distinct coalesce((select max(t1.c) from t1 where not (t1.c+t1.c)-case when (not coalesce((select max(t1.a-t1.a) from t1 where t1.d>t1.b),e) in (t1.d,19,(t1.e))) then b when exists(select 1 from t1 where t1.e> -13) then c else e end<>c),17)) from t1) not between 19 and t1.f),t1.b) | 19-t1.d*f+b FROM t1 WHERE t1.f | coalesce((select case when c>=t1.f then t1.e+t1.e when -19 | (a)+t1.f in (b,coalesce((select t1.c | (f+e)-e*17 from t1 where -c not in (t1.d,c,(13))),19),t1.e) then t1.c else 17 end+b from t1 where t1.a not in (17,d,c)),17) in (select f from t1 union select f from t1)} +} {} +do_test randexpr-2.1543 { + db eval {SELECT a-coalesce((select max(++19) from t1 where (select count(distinct coalesce((select max(t1.c) from t1 where not (t1.c+t1.c)-case when (not coalesce((select max(t1.a-t1.a) from t1 where t1.d>t1.b),e) in (t1.d,19,(t1.e))) then b when exists(select 1 from t1 where t1.e> -13) then c else e end<>c),17)) from t1) not between 19 and t1.f),t1.b) | 19-t1.d*f+b FROM t1 WHERE NOT (t1.f | coalesce((select case when c>=t1.f then t1.e+t1.e when -19 | (a)+t1.f in (b,coalesce((select t1.c | (f+e)-e*17 from t1 where -c not in (t1.d,c,(13))),19),t1.e) then t1.c else 17 end+b from t1 where t1.a not in (17,d,c)),17) in (select f from t1 union select f from t1))} +} {-239781} +do_test randexpr-2.1544 { + db eval {SELECT a-coalesce((select max(++19) from t1 where (select count(distinct coalesce((select max(t1.c) from t1 where not (t1.c+t1.c)-case when (not coalesce((select max(t1.a-t1.a) from t1 where t1.d>t1.b),e) in (t1.d,19,(t1.e))) then b when exists(select 1 from t1 where t1.e> -13) then c else e end<>c),17)) from t1) not between 19 and t1.f),t1.b) & 19-t1.d*f+b FROM t1 WHERE NOT (t1.f | coalesce((select case when c>=t1.f then t1.e+t1.e when -19 | (a)+t1.f in (b,coalesce((select t1.c | (f+e)-e*17 from t1 where -c not in (t1.d,c,(13))),19),t1.e) then t1.c else 17 end+b from t1 where t1.a not in (17,d,c)),17) in (select f from t1 union select f from t1))} +} {81} +do_test randexpr-2.1545 { + db eval {SELECT coalesce((select (t1.d) from t1 where not not case when c=case when f<>( -t1.c) then c when 19<>t1.d then t1.f else d end) then e-f when c in (select d from t1 union select f from t1) then a else t1.b end | t1.a-f from t1 union select -19 from t1)),19) then 19 when 11 between f and t1.f then t1.c else 19 end=a),t1.f) FROM t1 WHERE coalesce((select t1.d from t1 where (case when exists(select 1 from t1 where not - -19 in (17,f,t1.b)) and 19>c or t1.c in (select t1.e from t1 union select 17 from t1) then a when exists(select 1 from t1 where (t1.c<>d)) and -t1.c not between b and e then 11 else (select cast(avg((abs(b)/abs(d))) AS integer) from t1) end+t1.d not in (t1.f,11,b) and f not between 17 and t1.d)),t1.e)=t1.a} +} {} +do_test randexpr-2.1546 { + db eval {SELECT coalesce((select (t1.d) from t1 where not not case when c=case when f<>( -t1.c) then c when 19<>t1.d then t1.f else d end) then e-f when c in (select d from t1 union select f from t1) then a else t1.b end | t1.a-f from t1 union select -19 from t1)),19) then 19 when 11 between f and t1.f then t1.c else 19 end=a),t1.f) FROM t1 WHERE NOT (coalesce((select t1.d from t1 where (case when exists(select 1 from t1 where not - -19 in (17,f,t1.b)) and 19>c or t1.c in (select t1.e from t1 union select 17 from t1) then a when exists(select 1 from t1 where (t1.c<>d)) and -t1.c not between b and e then 11 else (select cast(avg((abs(b)/abs(d))) AS integer) from t1) end+t1.d not in (t1.f,11,b) and f not between 17 and t1.d)),t1.e)=t1.a)} +} {600} +do_test randexpr-2.1547 { + db eval {SELECT coalesce((select (t1.d) from t1 where not not case when c=case when f<>( -t1.c) then c when 19<>t1.d then t1.f else d end) then e-f when c in (select d from t1 union select f from t1) then a else t1.b end & t1.a-f from t1 union select -19 from t1)),19) then 19 when 11 between f and t1.f then t1.c else 19 end=a),t1.f) FROM t1 WHERE NOT (coalesce((select t1.d from t1 where (case when exists(select 1 from t1 where not - -19 in (17,f,t1.b)) and 19>c or t1.c in (select t1.e from t1 union select 17 from t1) then a when exists(select 1 from t1 where (t1.c<>d)) and -t1.c not between b and e then 11 else (select cast(avg((abs(b)/abs(d))) AS integer) from t1) end+t1.d not in (t1.f,11,b) and f not between 17 and t1.d)),t1.e)=t1.a)} +} {600} +do_test randexpr-2.1548 { + db eval {SELECT coalesce((select max(case when -++t1.c++(abs(case when t1.bb then 11 else b end) from t1 where ((t1.b)) between e and e),t1.e) FROM t1 WHERE not ~t1.c-(coalesce((select max(~case t1.e | b*11+d+19 when t1.a then t1.b else t1.d end) from t1 where (13 not in (t1.a,t1.a,13))),e)) not between t1.a and 11 or exists(select 1 from t1 where (t1.d) not in (11,13,t1.a) and (c<>b or -13<>t1.b and c in (t1.f, -c,t1.d)))} +} {500} +do_test randexpr-2.1549 { + db eval {SELECT coalesce((select max(case when -++t1.c++(abs(case when t1.bb then 11 else b end) from t1 where ((t1.b)) between e and e),t1.e) FROM t1 WHERE NOT (not ~t1.c-(coalesce((select max(~case t1.e | b*11+d+19 when t1.a then t1.b else t1.d end) from t1 where (13 not in (t1.a,t1.a,13))),e)) not between t1.a and 11 or exists(select 1 from t1 where (t1.d) not in (11,13,t1.a) and (c<>b or -13<>t1.b and c in (t1.f, -c,t1.d))))} +} {} +do_test randexpr-2.1550 { + db eval {SELECT case 19*d-+t1.a | coalesce((select f from t1 where +b between coalesce((select +t1.f from t1 where not exists(select 1 from t1 where case 17 when b then f else f end>=~13)),t1.f) and t1.d),c+11)- -t1.e | 17+case t1.b when t1.a then 17 else t1.f end*t1.c+a when f then -f else t1.f end | t1.b FROM t1 WHERE t1.f=f and not exists(select 1 from t1 where case when t1.b>e then (abs(13)/abs(t1.f | 17))+(t1.f) else ~t1.a-(~e) end in (select count(distinct coalesce((select max(coalesce((select 11 from t1 where t1.a=~13)),t1.f) and t1.d),c+11)- -t1.e | 17+case t1.b when t1.a then 17 else t1.f end*t1.c+a when f then -f else t1.f end | t1.b FROM t1 WHERE NOT (t1.f=f and not exists(select 1 from t1 where case when t1.b>e then (abs(13)/abs(t1.f | 17))+(t1.f) else ~t1.a-(~e) end in (select count(distinct coalesce((select max(coalesce((select 11 from t1 where t1.a=~13)),t1.f) and t1.d),c+11)- -t1.e & 17+case t1.b when t1.a then 17 else t1.f end*t1.c+a when f then -f else t1.f end & t1.b FROM t1 WHERE t1.f=f and not exists(select 1 from t1 where case when t1.b>e then (abs(13)/abs(t1.f | 17))+(t1.f) else ~t1.a-(~e) end in (select count(distinct coalesce((select max(coalesce((select 11 from t1 where t1.a=t1.e and e>f then 11 | 19 else t1.c end | b in (select a from t1 union select 13 from t1)),b)+t1.b | (d)*11)/abs(e)) from t1 where a=19),t1.e)*13) from t1 where 13>f),t1.f)) FROM t1 WHERE coalesce((select max(~t1.a*t1.f*t1.b+f+a) from t1 where case when d in (select -+count(distinct (t1.c))*max(t1.e+b)-(( -count(distinct 17))) from t1 union select -count(distinct t1.d) from t1) then t1.d else (abs(case when t1.e in (select t1.c from t1 union select c from t1) then c else d end)/abs(f)) end in (select max( -c) from t1 union select abs(count(distinct 19)) from t1)), -a) not in (d,t1.f,13)} +} {600} +do_test randexpr-2.1554 { + db eval {SELECT (coalesce((select max(coalesce((select (abs(t1.e* -b*coalesce((select max( -t1.e*t1.e+t1.f) from t1 where -case when b>=t1.e and e>f then 11 | 19 else t1.c end | b in (select a from t1 union select 13 from t1)),b)+t1.b | (d)*11)/abs(e)) from t1 where a=19),t1.e)*13) from t1 where 13>f),t1.f)) FROM t1 WHERE NOT (coalesce((select max(~t1.a*t1.f*t1.b+f+a) from t1 where case when d in (select -+count(distinct (t1.c))*max(t1.e+b)-(( -count(distinct 17))) from t1 union select -count(distinct t1.d) from t1) then t1.d else (abs(case when t1.e in (select t1.c from t1 union select c from t1) then c else d end)/abs(f)) end in (select max( -c) from t1 union select abs(count(distinct 19)) from t1)), -a) not in (d,t1.f,13))} +} {} +do_test randexpr-2.1555 { + db eval {SELECT (coalesce((select max(coalesce((select (abs(t1.e* -b*coalesce((select max( -t1.e*t1.e+t1.f) from t1 where -case when b>=t1.e and e>f then 11 & 19 else t1.c end & b in (select a from t1 union select 13 from t1)),b)+t1.b & (d)*11)/abs(e)) from t1 where a=19),t1.e)*13) from t1 where 13>f),t1.f)) FROM t1 WHERE coalesce((select max(~t1.a*t1.f*t1.b+f+a) from t1 where case when d in (select -+count(distinct (t1.c))*max(t1.e+b)-(( -count(distinct 17))) from t1 union select -count(distinct t1.d) from t1) then t1.d else (abs(case when t1.e in (select t1.c from t1 union select c from t1) then c else d end)/abs(f)) end in (select max( -c) from t1 union select abs(count(distinct 19)) from t1)), -a) not in (d,t1.f,13)} +} {600} +do_test randexpr-2.1556 { + db eval {SELECT +(abs(17*(abs(a | t1.a)/abs(11)) | f)/abs(coalesce((select +t1.b+~a from t1 where case when a=t1.b then c when t1.d=t1.e then t1.b else 17 end<=t1.f and (b<>13) or t1.a<>a or t1.d>d and 11 between 17 and t1.a and t1.b<> -e and -13<(17)),a)+ -t1.e))*(t1.c) FROM t1 WHERE not a not between t1.a and 11+ -coalesce((select -c*coalesce((select max(~f*a+11-t1.a | -a+t1.a*t1.b*f) from t1 where 11=11),t1.a)+d from t1 where 13>=e),d) | t1.b or t1.a not between -b and f or 17<>13} +} {300} +do_test randexpr-2.1557 { + db eval {SELECT +(abs(17*(abs(a | t1.a)/abs(11)) | f)/abs(coalesce((select +t1.b+~a from t1 where case when a=t1.b then c when t1.d=t1.e then t1.b else 17 end<=t1.f and (b<>13) or t1.a<>a or t1.d>d and 11 between 17 and t1.a and t1.b<> -e and -13<(17)),a)+ -t1.e))*(t1.c) FROM t1 WHERE NOT (not a not between t1.a and 11+ -coalesce((select -c*coalesce((select max(~f*a+11-t1.a | -a+t1.a*t1.b*f) from t1 where 11=11),t1.a)+d from t1 where 13>=e),d) | t1.b or t1.a not between -b and f or 17<>13)} +} {} +do_test randexpr-2.1558 { + db eval {SELECT +(abs(17*(abs(a & t1.a)/abs(11)) & f)/abs(coalesce((select +t1.b+~a from t1 where case when a=t1.b then c when t1.d=t1.e then t1.b else 17 end<=t1.f and (b<>13) or t1.a<>a or t1.d>d and 11 between 17 and t1.a and t1.b<> -e and -13<(17)),a)+ -t1.e))*(t1.c) FROM t1 WHERE not a not between t1.a and 11+ -coalesce((select -c*coalesce((select max(~f*a+11-t1.a | -a+t1.a*t1.b*f) from t1 where 11=11),t1.a)+d from t1 where 13>=e),d) | t1.b or t1.a not between -b and f or 17<>13} +} {0} +do_test randexpr-2.1559 { + db eval {SELECT b-case e*13+case when not not t1.d in (select t1.b from t1 union select f from t1) then t1.c when coalesce((select max(coalesce((select (select ~min(coalesce((select max(a*f) from t1 where t1.f<=t1.b or a<=f),t1.a)) | -(count(distinct 19)) from t1) from t1 where f between (t1.f) and 11),a)) from t1 where 11 not in (f, -11,19)),e)>=d then t1.f else t1.c end when -a then 11 else t1.e end FROM t1 WHERE coalesce((select case case when case when (coalesce((select max(t1.e) from t1 where c>c and -19>= -a),d) not in (11,t1.b,t1.a)) then d else a end between 11 and t1.c then b else a end+13-t1.a when t1.d then (b) else t1.d end from t1 where b between e and t1.f),11) in (select (+abs(count(*)+case -count(distinct t1.e) when -(max(19)) then (count(distinct 19)) else -count(distinct e) end-max(t1.b)) | count(distinct t1.e) | (cast(avg(11) AS integer))) from t1 union select cast(avg(c) AS integer) from t1)} +} {} +do_test randexpr-2.1560 { + db eval {SELECT b-case e*13+case when not not t1.d in (select t1.b from t1 union select f from t1) then t1.c when coalesce((select max(coalesce((select (select ~min(coalesce((select max(a*f) from t1 where t1.f<=t1.b or a<=f),t1.a)) | -(count(distinct 19)) from t1) from t1 where f between (t1.f) and 11),a)) from t1 where 11 not in (f, -11,19)),e)>=d then t1.f else t1.c end when -a then 11 else t1.e end FROM t1 WHERE NOT (coalesce((select case case when case when (coalesce((select max(t1.e) from t1 where c>c and -19>= -a),d) not in (11,t1.b,t1.a)) then d else a end between 11 and t1.c then b else a end+13-t1.a when t1.d then (b) else t1.d end from t1 where b between e and t1.f),11) in (select (+abs(count(*)+case -count(distinct t1.e) when -(max(19)) then (count(distinct 19)) else -count(distinct e) end-max(t1.b)) | count(distinct t1.e) | (cast(avg(11) AS integer))) from t1 union select cast(avg(c) AS integer) from t1))} +} {-300} +do_test randexpr-2.1561 { + db eval {SELECT b-case e*13+case when not not t1.d in (select t1.b from t1 union select f from t1) then t1.c when coalesce((select max(coalesce((select (select ~min(coalesce((select max(a*f) from t1 where t1.f<=t1.b or a<=f),t1.a)) & -(count(distinct 19)) from t1) from t1 where f between (t1.f) and 11),a)) from t1 where 11 not in (f, -11,19)),e)>=d then t1.f else t1.c end when -a then 11 else t1.e end FROM t1 WHERE NOT (coalesce((select case case when case when (coalesce((select max(t1.e) from t1 where c>c and -19>= -a),d) not in (11,t1.b,t1.a)) then d else a end between 11 and t1.c then b else a end+13-t1.a when t1.d then (b) else t1.d end from t1 where b between e and t1.f),11) in (select (+abs(count(*)+case -count(distinct t1.e) when -(max(19)) then (count(distinct 19)) else -count(distinct e) end-max(t1.b)) | count(distinct t1.e) | (cast(avg(11) AS integer))) from t1 union select cast(avg(c) AS integer) from t1))} +} {-300} +do_test randexpr-2.1562 { + db eval {SELECT coalesce((select case when a not in (~17,a,c) then b when t1.c<>t1.f-13 then +f else 11 end+t1.e*t1.a+case when ((a-c)>case when (exists(select 1 from t1 where (c) between d and -t1.b)) then a+t1.f else f end) or e between t1.c and 13 then -t1.c else t1.e end-t1.d from t1 where ( -17=a)),e) FROM t1 WHERE +case when not exists(select 1 from t1 where f+(select ~ -min(b) | abs(+max(+c)) from t1)+b not between 13-a*17 and t1.e-t1.b) then t1.a when b not between ((11)) and 11 then 13 else 17 end+ - -t1.f in (f,t1.d,f) or t1.b>t1.a and not exists(select 1 from t1 where t1.d in (t1.c,d,t1.c))} +} {} +do_test randexpr-2.1563 { + db eval {SELECT coalesce((select case when a not in (~17,a,c) then b when t1.c<>t1.f-13 then +f else 11 end+t1.e*t1.a+case when ((a-c)>case when (exists(select 1 from t1 where (c) between d and -t1.b)) then a+t1.f else f end) or e between t1.c and 13 then -t1.c else t1.e end-t1.d from t1 where ( -17=a)),e) FROM t1 WHERE NOT (+case when not exists(select 1 from t1 where f+(select ~ -min(b) | abs(+max(+c)) from t1)+b not between 13-a*17 and t1.e-t1.b) then t1.a when b not between ((11)) and 11 then 13 else 17 end+ - -t1.f in (f,t1.d,f) or t1.b>t1.a and not exists(select 1 from t1 where t1.d in (t1.c,d,t1.c)))} +} {500} +do_test randexpr-2.1564 { + db eval {SELECT case coalesce((select 19-case when case when f in (select t1.c from t1 union select (select count(*) from t1) from t1) then a+e else 19 end*19-19-13 not in (11,19,a) then f when (13<>(t1.c) or d>e and b=b) then t1.a else 13 end from t1 where (not exists(select 1 from t1 where not exists(select 1 from t1 where t1.a=17)))),a) when t1.d then 19 else e end FROM t1 WHERE 13-coalesce((select (abs(19)/abs(11)) from t1 where case when a in (select d from t1 union select case when -11-11<>d or t1.e between (t1.d) and b then t1.f else f end from t1) then t1.e else -a end not between 13 and 19 or exists(select 1 from t1 where f< -( -t1.d)) and t1.a not between -e and t1.e or t1.f(t1.f)),t1.d)=11} +} {} +do_test randexpr-2.1565 { + db eval {SELECT case coalesce((select 19-case when case when f in (select t1.c from t1 union select (select count(*) from t1) from t1) then a+e else 19 end*19-19-13 not in (11,19,a) then f when (13<>(t1.c) or d>e and b=b) then t1.a else 13 end from t1 where (not exists(select 1 from t1 where not exists(select 1 from t1 where t1.a=17)))),a) when t1.d then 19 else e end FROM t1 WHERE NOT (13-coalesce((select (abs(19)/abs(11)) from t1 where case when a in (select d from t1 union select case when -11-11<>d or t1.e between (t1.d) and b then t1.f else f end from t1) then t1.e else -a end not between 13 and 19 or exists(select 1 from t1 where f< -( -t1.d)) and t1.a not between -e and t1.e or t1.f(t1.f)),t1.d)=11)} +} {500} +do_test randexpr-2.1566 { + db eval {SELECT (select -count(distinct -case (select (max( -c-t1.b+~f)*count(distinct 11) | +cast(avg(11) AS integer)-min(t1.f)+ -(cast(avg(( -11)) AS integer))-count(distinct t1.d)*(max(13))*(max(d))-(count(*))-min(e)) from t1) when + -c then t1.d else t1.b*19 end-a | t1.d+t1.d) from t1) FROM t1 WHERE not 11-t1.a=a+11} +} {-1} +do_test randexpr-2.1567 { + db eval {SELECT (select -count(distinct -case (select (max( -c-t1.b+~f)*count(distinct 11) | +cast(avg(11) AS integer)-min(t1.f)+ -(cast(avg(( -11)) AS integer))-count(distinct t1.d)*(max(13))*(max(d))-(count(*))-min(e)) from t1) when + -c then t1.d else t1.b*19 end-a | t1.d+t1.d) from t1) FROM t1 WHERE NOT (not 11-t1.a=a+11)} +} {} +do_test randexpr-2.1568 { + db eval {SELECT (select -count(distinct -case (select (max( -c-t1.b+~f)*count(distinct 11) & +cast(avg(11) AS integer)-min(t1.f)+ -(cast(avg(( -11)) AS integer))-count(distinct t1.d)*(max(13))*(max(d))-(count(*))-min(e)) from t1) when + -c then t1.d else t1.b*19 end-a & t1.d+t1.d) from t1) FROM t1 WHERE not 11-t1.a=a+11} +} {-1} +do_test randexpr-2.1569 { + db eval {SELECT (select ~count(distinct t1.d*e+19)+abs(+cast(avg((select (count(*)) from t1)) AS integer)-cast(avg(t1.c) AS integer) | min(case when d | f* -19 in (select min(c) from t1 union select ~count(*)-max(17) from t1) then t1.e when (t1.b between 17 and (b)) then t1.d else t1.d end)*cast(avg(e) AS integer))-count(distinct t1.f) | cast(avg( -d) AS integer)*cast(avg(f) AS integer) from t1)*t1.e FROM t1 WHERE coalesce((select 17 from t1 where coalesce((select max(case when 13-b+coalesce((select case when (f>a) then t1.f when t1.c<=19 then a else a end from t1 where c in (select (e) from t1 union select 13 from t1)),13)<13 then 13 else t1.b end) from t1 where (13)<>13),17) in (select +min(b) from t1 union select ~max(t1.d)*max( -t1.b)*min(t1.e) from t1) and e>c),f) between ((t1.f)) and b and t1.a=19} +} {} +do_test randexpr-2.1570 { + db eval {SELECT (select ~count(distinct t1.d*e+19)+abs(+cast(avg((select (count(*)) from t1)) AS integer)-cast(avg(t1.c) AS integer) | min(case when d | f* -19 in (select min(c) from t1 union select ~count(*)-max(17) from t1) then t1.e when (t1.b between 17 and (b)) then t1.d else t1.d end)*cast(avg(e) AS integer))-count(distinct t1.f) | cast(avg( -d) AS integer)*cast(avg(f) AS integer) from t1)*t1.e FROM t1 WHERE NOT (coalesce((select 17 from t1 where coalesce((select max(case when 13-b+coalesce((select case when (f>a) then t1.f when t1.c<=19 then a else a end from t1 where c in (select (e) from t1 union select 13 from t1)),13)<13 then 13 else t1.b end) from t1 where (13)<>13),17) in (select +min(b) from t1 union select ~max(t1.d)*max( -t1.b)*min(t1.e) from t1) and e>c),f) between ((t1.f)) and b and t1.a=19)} +} {-119980000} +do_test randexpr-2.1571 { + db eval {SELECT (select ~count(distinct t1.d*e+19)+abs(+cast(avg((select (count(*)) from t1)) AS integer)-cast(avg(t1.c) AS integer) & min(case when d & f* -19 in (select min(c) from t1 union select ~count(*)-max(17) from t1) then t1.e when (t1.b between 17 and (b)) then t1.d else t1.d end)*cast(avg(e) AS integer))-count(distinct t1.f) & cast(avg( -d) AS integer)*cast(avg(f) AS integer) from t1)*t1.e FROM t1 WHERE NOT (coalesce((select 17 from t1 where coalesce((select max(case when 13-b+coalesce((select case when (f>a) then t1.f when t1.c<=19 then a else a end from t1 where c in (select (e) from t1 union select 13 from t1)),13)<13 then 13 else t1.b end) from t1 where (13)<>13),17) in (select +min(b) from t1 union select ~max(t1.d)*max( -t1.b)*min(t1.e) from t1) and e>c),f) between ((t1.f)) and b and t1.a=19)} +} {512000} +do_test randexpr-2.1572 { + db eval {SELECT coalesce((select d from t1 where case when (17 between c+t1.d-t1.a and b and t1.b in (13,t1.d,19)) then case when t1.c<(a) then t1.c when t1.e not in (a,11,(t1.d)) then t1.d else d end else t1.a end in (select abs(min(a) | ~~count(distinct t1.e)+min(b)-count(*)) from t1 union select count(*) from t1) and not exists(select 1 from t1 where (t1.a) between t1.a and t1.d)),t1.f)-t1.a FROM t1 WHERE case when t1.e not in (coalesce((select max(coalesce((select max(b+t1.c-19*(e)-~11) from t1 where t1.a in (select ~cast(avg(~t1.a) AS integer) from t1 union select abs(~cast(avg(f) AS integer)) from t1)),e)* -c-c-b-t1.d-f) from t1 where t1.f>=t1.c),((t1.d)))- - -19,17,t1.b) then b else t1.f end=t1.c),((t1.d)))- - -19,17,t1.b) then b else t1.f end=t1.c),((t1.d)))- - -19,17,t1.b) then b else t1.f end=(t1.f)+a then t1.d else 19 end and t1.c or exists(select 1 from t1 where (b in (13,13,b)) and (t1.f<>t1.e)) or (13)<>17 and e<=f and e<= -c) and t1.f>f then t1.e*19-t1.e-b else 17 end from t1)} +} {} +do_test randexpr-2.1576 { + db eval {SELECT (abs((select min(t1.a)*~+ -count(*)-max(t1.f) from t1))/abs((abs(13*~t1.c- -e-( -19) | case 17*~13 when t1.c+case when not 19 between 13 and a then t1.c when -11 not between e and 17 then 19 else a end then 19 else 17 end*19)/abs(t1.d)))) FROM t1 WHERE NOT (t1.e in (select d from t1 union select case when not exists(select 1 from t1 where c+a*17 not between case when t1.f>=(t1.f)+a then t1.d else 19 end and t1.c or exists(select 1 from t1 where (b in (13,13,b)) and (t1.f<>t1.e)) or (13)<>17 and e<=f and e<= -c) and t1.f>f then t1.e*19-t1.e-b else 17 end from t1))} +} {85} +do_test randexpr-2.1577 { + db eval {SELECT b-t1.b-coalesce((select (abs(case t1.c+coalesce((select +17*case when b in (select abs(max(19) | count(distinct t1.e)) | cast(avg(t1.d) AS integer) from t1 union select (count(*)) from t1) then +11+t1.a when 11e)),d) FROM t1 WHERE t1.a<> -a} +} {0} +do_test randexpr-2.1578 { + db eval {SELECT b-t1.b-coalesce((select (abs(case t1.c+coalesce((select +17*case when b in (select abs(max(19) | count(distinct t1.e)) | cast(avg(t1.d) AS integer) from t1 union select (count(*)) from t1) then +11+t1.a when 11e)),d) FROM t1 WHERE NOT (t1.a<> -a)} +} {} +do_test randexpr-2.1579 { + db eval {SELECT b-t1.b-coalesce((select (abs(case t1.c+coalesce((select +17*case when b in (select abs(max(19) & count(distinct t1.e)) & cast(avg(t1.d) AS integer) from t1 union select (count(*)) from t1) then +11+t1.a when 11e)),d) FROM t1 WHERE t1.a<> -a} +} {0} +do_test randexpr-2.1580 { + db eval {SELECT ~(abs(b*t1.b)/abs(case when coalesce((select max((abs(a)/abs(case t1.e when t1.e then c*(t1.d) else t1.a end))) from t1 where b in (select min((t1.a)) from t1 union select + -max(b) from t1)),19)+a not between t1.e and -19 and 13<=a then (select case cast(avg((a)) AS integer) when min(e)*max(f) then -(count(distinct 13)) else cast(avg(13) AS integer) end-count(*) | count(distinct d) | -count(*) from t1) else b end)) FROM t1 WHERE f not between t1.d and b} +} {-40001} +do_test randexpr-2.1581 { + db eval {SELECT ~(abs(b*t1.b)/abs(case when coalesce((select max((abs(a)/abs(case t1.e when t1.e then c*(t1.d) else t1.a end))) from t1 where b in (select min((t1.a)) from t1 union select + -max(b) from t1)),19)+a not between t1.e and -19 and 13<=a then (select case cast(avg((a)) AS integer) when min(e)*max(f) then -(count(distinct 13)) else cast(avg(13) AS integer) end-count(*) | count(distinct d) | -count(*) from t1) else b end)) FROM t1 WHERE NOT (f not between t1.d and b)} +} {} +do_test randexpr-2.1582 { + db eval {SELECT +(abs(19)/abs(d | 11*t1.f))*t1.e-coalesce((select t1.b from t1 where not ((f*case when a>=17 then t1.c else (a) end-b not in (t1.b,t1.f,t1.f) or not exists(select 1 from t1 where t1.c>t1.a) and c not in (t1.d,t1.b,c)))),e) | coalesce((select ( - -f) from t1 where 19 not between 17 and 17),(a))-17*d*c FROM t1 WHERE t1.b>=e} +} {} +do_test randexpr-2.1583 { + db eval {SELECT +(abs(19)/abs(d | 11*t1.f))*t1.e-coalesce((select t1.b from t1 where not ((f*case when a>=17 then t1.c else (a) end-b not in (t1.b,t1.f,t1.f) or not exists(select 1 from t1 where t1.c>t1.a) and c not in (t1.d,t1.b,c)))),e) | coalesce((select ( - -f) from t1 where 19 not between 17 and 17),(a))-17*d*c FROM t1 WHERE NOT (t1.b>=e)} +} {-100} +do_test randexpr-2.1584 { + db eval {SELECT +(abs(19)/abs(d & 11*t1.f))*t1.e-coalesce((select t1.b from t1 where not ((f*case when a>=17 then t1.c else (a) end-b not in (t1.b,t1.f,t1.f) or not exists(select 1 from t1 where t1.c>t1.a) and c not in (t1.d,t1.b,c)))),e) & coalesce((select ( - -f) from t1 where 19 not between 17 and 17),(a))-17*d*c FROM t1 WHERE NOT (t1.b>=e)} +} {-2039800} +do_test randexpr-2.1585 { + db eval {SELECT (abs((abs(a)/abs(~17+t1.b-e+(abs(a)/abs(coalesce((select max(13) from t1 where c not in (c-(abs(+coalesce((select max(t1.b-f) from t1 where (abs(13+case ~t1.f when t1.c then t1.b else b end-e)/abs(c))-t1.e>t1.c),t1.c))/abs(11))+t1.b,t1.a,11)),t1.a)))))+19)/abs( - -t1.c)) FROM t1 WHERE ~t1.e+t1.a+(abs(case when d+e+17*~t1.a-t1.a-a*~(abs(~f)/abs( -13))*+(t1.c | a)*t1.b=e then t1.d when ( -e) not between c and -d then -(t1.f) else t1.b end)/abs(19))>t1.f and 11<(11)} +} {} +do_test randexpr-2.1586 { + db eval {SELECT (abs((abs(a)/abs(~17+t1.b-e+(abs(a)/abs(coalesce((select max(13) from t1 where c not in (c-(abs(+coalesce((select max(t1.b-f) from t1 where (abs(13+case ~t1.f when t1.c then t1.b else b end-e)/abs(c))-t1.e>t1.c),t1.c))/abs(11))+t1.b,t1.a,11)),t1.a)))))+19)/abs( - -t1.c)) FROM t1 WHERE NOT (~t1.e+t1.a+(abs(case when d+e+17*~t1.a-t1.a-a*~(abs(~f)/abs( -13))*+(t1.c | a)*t1.b=e then t1.d when ( -e) not between c and -d then -(t1.f) else t1.b end)/abs(19))>t1.f and 11<(11))} +} {0} +do_test randexpr-2.1587 { + db eval {SELECT (abs((abs( -t1.e)/abs(~17+b)))/abs((case 19 when case when t1.b in (select ~cast(avg(19) AS integer) from t1 union select max(t1.b) from t1) then case when case when exists(select 1 from t1 where t1.c not between d and e) then (11) when t1.d<11 then ( -t1.f) else c end in (select count(distinct 17) from t1 union select ~min(a) | max( -t1.a)- -min(b) from t1) then d else 17 end when 13> -t1.b then f else d end then (t1.b) else c end))) FROM t1 WHERE d in (b,+(abs(t1.b | t1.e)/abs(coalesce((select max(t1.b) from t1 where t1.f=(e*+t1.b-t1.d+b-t1.f-t1.a-case case a*t1.b when coalesce((select max(f) from t1 where c<11),(a)) then -t1.b else t1.f end when e then d else 19 end*t1.e+19)*d),t1.b))),t1.b)} +} {} +do_test randexpr-2.1588 { + db eval {SELECT (abs((abs( -t1.e)/abs(~17+b)))/abs((case 19 when case when t1.b in (select ~cast(avg(19) AS integer) from t1 union select max(t1.b) from t1) then case when case when exists(select 1 from t1 where t1.c not between d and e) then (11) when t1.d<11 then ( -t1.f) else c end in (select count(distinct 17) from t1 union select ~min(a) | max( -t1.a)- -min(b) from t1) then d else 17 end when 13> -t1.b then f else d end then (t1.b) else c end))) FROM t1 WHERE NOT (d in (b,+(abs(t1.b | t1.e)/abs(coalesce((select max(t1.b) from t1 where t1.f=(e*+t1.b-t1.d+b-t1.f-t1.a-case case a*t1.b when coalesce((select max(f) from t1 where c<11),(a)) then -t1.b else t1.f end when e then d else 19 end*t1.e+19)*d),t1.b))),t1.b))} +} {0} +do_test randexpr-2.1589 { + db eval {SELECT (abs((abs( -t1.e)/abs(~17+b)))/abs((case 19 when case when t1.b in (select ~cast(avg(19) AS integer) from t1 union select max(t1.b) from t1) then case when case when exists(select 1 from t1 where t1.c not between d and e) then (11) when t1.d<11 then ( -t1.f) else c end in (select count(distinct 17) from t1 union select ~min(a) & max( -t1.a)- -min(b) from t1) then d else 17 end when 13> -t1.b then f else d end then (t1.b) else c end))) FROM t1 WHERE NOT (d in (b,+(abs(t1.b | t1.e)/abs(coalesce((select max(t1.b) from t1 where t1.f=(e*+t1.b-t1.d+b-t1.f-t1.a-case case a*t1.b when coalesce((select max(f) from t1 where c<11),(a)) then -t1.b else t1.f end when e then d else 19 end*t1.e+19)*d),t1.b))),t1.b))} +} {0} +do_test randexpr-2.1590 { + db eval {SELECT c | case when e<=(abs(case when case when not exists(select 1 from t1 where case when t1.f>case when 19>=17 or f in (19,c,t1.d) then t1.b else b end then t1.e else -t1.b end*t1.d in (select t1.e from t1 union select c from t1)) then e else f end<>t1.d then -a when (11 in (select b from t1 union select a from t1)) then 19 else t1.a end-(c))/abs(c))+t1.d | t1.a then -19 else 17 end+t1.d-(t1.a) FROM t1 WHERE coalesce((select 13 from t1 where not exists(select 1 from t1 where t1.e*t1.d*t1.f>=(11) and d in (13,coalesce((select t1.f+t1.b from t1 where coalesce((select max(t1.a) from t1 where not (t1.d)<=t1.c),c) not in (t1.e,t1.d,b)),b)+f,d)) or (t1.f>f) or 11<=t1.d and 17<>13),a)<(t1.a)} +} {317} +do_test randexpr-2.1591 { + db eval {SELECT c | case when e<=(abs(case when case when not exists(select 1 from t1 where case when t1.f>case when 19>=17 or f in (19,c,t1.d) then t1.b else b end then t1.e else -t1.b end*t1.d in (select t1.e from t1 union select c from t1)) then e else f end<>t1.d then -a when (11 in (select b from t1 union select a from t1)) then 19 else t1.a end-(c))/abs(c))+t1.d | t1.a then -19 else 17 end+t1.d-(t1.a) FROM t1 WHERE NOT (coalesce((select 13 from t1 where not exists(select 1 from t1 where t1.e*t1.d*t1.f>=(11) and d in (13,coalesce((select t1.f+t1.b from t1 where coalesce((select max(t1.a) from t1 where not (t1.d)<=t1.c),c) not in (t1.e,t1.d,b)),b)+f,d)) or (t1.f>f) or 11<=t1.d and 17<>13),a)<(t1.a))} +} {} +do_test randexpr-2.1592 { + db eval {SELECT c & case when e<=(abs(case when case when not exists(select 1 from t1 where case when t1.f>case when 19>=17 or f in (19,c,t1.d) then t1.b else b end then t1.e else -t1.b end*t1.d in (select t1.e from t1 union select c from t1)) then e else f end<>t1.d then -a when (11 in (select b from t1 union select a from t1)) then 19 else t1.a end-(c))/abs(c))+t1.d & t1.a then -19 else 17 end+t1.d-(t1.a) FROM t1 WHERE coalesce((select 13 from t1 where not exists(select 1 from t1 where t1.e*t1.d*t1.f>=(11) and d in (13,coalesce((select t1.f+t1.b from t1 where coalesce((select max(t1.a) from t1 where not (t1.d)<=t1.c),c) not in (t1.e,t1.d,b)),b)+f,d)) or (t1.f>f) or 11<=t1.d and 17<>13),a)<(t1.a)} +} {300} +do_test randexpr-2.1593 { + db eval {SELECT -+t1.f*coalesce((select 17 from t1 where b in (coalesce((select f- -11 from t1 where not exists(select 1 from t1 where t1.d*a<=(case when -17=d-coalesce((select a from t1 where coalesce((select max(19) from t1 where case when 13 between coalesce((select max(b) from t1 where t1.e in (c,t1.a,17)),(a)) and ((17)) then t1.b when f=t1.b then t1.f else 17 end>t1.b),e)+b*a>=t1.a),c))} +} {} +do_test randexpr-2.1594 { + db eval {SELECT -+t1.f*coalesce((select 17 from t1 where b in (coalesce((select f- -11 from t1 where not exists(select 1 from t1 where t1.d*a<=(case when -17=d-coalesce((select a from t1 where coalesce((select max(19) from t1 where case when 13 between coalesce((select max(b) from t1 where t1.e in (c,t1.a,17)),(a)) and ((17)) then t1.b when f=t1.b then t1.f else 17 end>t1.b),e)+b*a>=t1.a),c)))} +} {-360489} +do_test randexpr-2.1595 { + db eval {SELECT -+t1.f*coalesce((select 17 from t1 where b in (coalesce((select f- -11 from t1 where not exists(select 1 from t1 where t1.d*a<=(case when -17=d-coalesce((select a from t1 where coalesce((select max(19) from t1 where case when 13 between coalesce((select max(b) from t1 where t1.e in (c,t1.a,17)),(a)) and ((17)) then t1.b when f=t1.b then t1.f else 17 end>t1.b),e)+b*a>=t1.a),c)))} +} {-360489} +do_test randexpr-2.1596 { + db eval {SELECT ~f+coalesce((select max(17+~a+t1.b) from t1 where 17>t1.c),c+case when t1.f*d not between t1.f-13 and ~t1.b then 19 when t1.a between case coalesce((select 17 from t1 where (e in (f,e,t1.b))),13) | t1.a* - -c*t1.f-a+a when e then t1.a else 13 end and b then b else e end)-11 FROM t1 WHERE 17=19*~d-case when (exists(select 1 from t1 where 17-e not between t1.b and f) or -f not in ( -13,b,c) and t1.f not between t1.f and 13) then 13 else coalesce((select t1.f from t1 where 11 not in (a,(e),t1.f)),(t1.c)) end+t1.c or 17 in (select +(count(*)) from t1 union select ~+case -min(t1.b) when case case max(19) when cast(avg((b)) AS integer) then (count(distinct t1.b)) else -cast(avg(t1.b) AS integer) end when ( -max(t1.e)) then max(17) else -count(distinct t1.f) end then cast(avg(19) AS integer) else count(*) end-count(*) | -cast(avg(11) AS integer) from t1)} +} {} +do_test randexpr-2.1597 { + db eval {SELECT ~f+coalesce((select max(17+~a+t1.b) from t1 where 17>t1.c),c+case when t1.f*d not between t1.f-13 and ~t1.b then 19 when t1.a between case coalesce((select 17 from t1 where (e in (f,e,t1.b))),13) | t1.a* - -c*t1.f-a+a when e then t1.a else 13 end and b then b else e end)-11 FROM t1 WHERE NOT (17=19*~d-case when (exists(select 1 from t1 where 17-e not between t1.b and f) or -f not in ( -13,b,c) and t1.f not between t1.f and 13) then 13 else coalesce((select t1.f from t1 where 11 not in (a,(e),t1.f)),(t1.c)) end+t1.c or 17 in (select +(count(*)) from t1 union select ~+case -min(t1.b) when case case max(19) when cast(avg((b)) AS integer) then (count(distinct t1.b)) else -cast(avg(t1.b) AS integer) end when ( -max(t1.e)) then max(17) else -count(distinct t1.f) end then cast(avg(19) AS integer) else count(*) end-count(*) | -cast(avg(11) AS integer) from t1))} +} {-293} +do_test randexpr-2.1598 { + db eval {SELECT ~f+coalesce((select max(17+~a+t1.b) from t1 where 17>t1.c),c+case when t1.f*d not between t1.f-13 and ~t1.b then 19 when t1.a between case coalesce((select 17 from t1 where (e in (f,e,t1.b))),13) & t1.a* - -c*t1.f-a+a when e then t1.a else 13 end and b then b else e end)-11 FROM t1 WHERE NOT (17=19*~d-case when (exists(select 1 from t1 where 17-e not between t1.b and f) or -f not in ( -13,b,c) and t1.f not between t1.f and 13) then 13 else coalesce((select t1.f from t1 where 11 not in (a,(e),t1.f)),(t1.c)) end+t1.c or 17 in (select +(count(*)) from t1 union select ~+case -min(t1.b) when case case max(19) when cast(avg((b)) AS integer) then (count(distinct t1.b)) else -cast(avg(t1.b) AS integer) end when ( -max(t1.e)) then max(17) else -count(distinct t1.f) end then cast(avg(19) AS integer) else count(*) end-count(*) | -cast(avg(11) AS integer) from t1))} +} {-293} +do_test randexpr-2.1599 { + db eval {SELECT (case when (select count(*) from t1)++t1.d+b*t1.e | t1.a+coalesce((select max(t1.d) from t1 where d-19<>c),t1.a)+t1.f in (select coalesce((select max(b) from t1 where t1.c-case when t1.c in (select c from t1 union select t1.f from t1) then f when t1.c not in ( -e,t1.e,11) then (t1.e) else 13 end in (f,t1.e,t1.f) or t1.a not between 19 and 17),t1.c) from t1 union select e from t1) then -t1.e else t1.a end) FROM t1 WHERE 11*b+~ -coalesce((select t1.e from t1 where not exists(select 1 from t1 where 19=t1.f)),t1.d+(abs( -a)/abs(t1.d)))-t1.d-19<=11} +} {} +do_test randexpr-2.1600 { + db eval {SELECT (case when (select count(*) from t1)++t1.d+b*t1.e | t1.a+coalesce((select max(t1.d) from t1 where d-19<>c),t1.a)+t1.f in (select coalesce((select max(b) from t1 where t1.c-case when t1.c in (select c from t1 union select t1.f from t1) then f when t1.c not in ( -e,t1.e,11) then (t1.e) else 13 end in (f,t1.e,t1.f) or t1.a not between 19 and 17),t1.c) from t1 union select e from t1) then -t1.e else t1.a end) FROM t1 WHERE NOT (11*b+~ -coalesce((select t1.e from t1 where not exists(select 1 from t1 where 19=t1.f)),t1.d+(abs( -a)/abs(t1.d)))-t1.d-19<=11)} +} {100} +do_test randexpr-2.1601 { + db eval {SELECT (case when (select count(*) from t1)++t1.d+b*t1.e & t1.a+coalesce((select max(t1.d) from t1 where d-19<>c),t1.a)+t1.f in (select coalesce((select max(b) from t1 where t1.c-case when t1.c in (select c from t1 union select t1.f from t1) then f when t1.c not in ( -e,t1.e,11) then (t1.e) else 13 end in (f,t1.e,t1.f) or t1.a not between 19 and 17),t1.c) from t1 union select e from t1) then -t1.e else t1.a end) FROM t1 WHERE NOT (11*b+~ -coalesce((select t1.e from t1 where not exists(select 1 from t1 where 19=t1.f)),t1.d+(abs( -a)/abs(t1.d)))-t1.d-19<=11)} +} {100} +do_test randexpr-2.1602 { + db eval {SELECT 19+~case when not not (select + - -min(f | f)-count(*)+count(*)-min(b)+count(distinct a) from t1)<=13-case when +13 between t1.a and a then 11 else 11 end and (11 not in (17, -t1.d,(f))) then t1.b | a-t1.f when t1.d<>17 then e else 17 end FROM t1 WHERE +t1.e-c<>case when c in (select d from t1 union select f from t1) then case (case when 13 in (select +c*a*13-11 from t1 union select b from t1) then t1.a when t1.f in ( -t1.f,11,a) or t1.a<=f or (exists(select 1 from t1 where 13>=d)) then t1.c else b end) when t1.d then 11 else e end when t1.b in (19,t1.d,(19)) then c else a end} +} {-482} +do_test randexpr-2.1603 { + db eval {SELECT 19+~case when not not (select + - -min(f | f)-count(*)+count(*)-min(b)+count(distinct a) from t1)<=13-case when +13 between t1.a and a then 11 else 11 end and (11 not in (17, -t1.d,(f))) then t1.b | a-t1.f when t1.d<>17 then e else 17 end FROM t1 WHERE NOT (+t1.e-c<>case when c in (select d from t1 union select f from t1) then case (case when 13 in (select +c*a*13-11 from t1 union select b from t1) then t1.a when t1.f in ( -t1.f,11,a) or t1.a<=f or (exists(select 1 from t1 where 13>=d)) then t1.c else b end) when t1.d then 11 else e end when t1.b in (19,t1.d,(19)) then c else a end)} +} {} +do_test randexpr-2.1604 { + db eval {SELECT 19+~case when not not (select + - -min(f & f)-count(*)+count(*)-min(b)+count(distinct a) from t1)<=13-case when +13 between t1.a and a then 11 else 11 end and (11 not in (17, -t1.d,(f))) then t1.b & a-t1.f when t1.d<>17 then e else 17 end FROM t1 WHERE +t1.e-c<>case when c in (select d from t1 union select f from t1) then case (case when 13 in (select +c*a*13-11 from t1 union select b from t1) then t1.a when t1.f in ( -t1.f,11,a) or t1.a<=f or (exists(select 1 from t1 where 13>=d)) then t1.c else b end) when t1.d then 11 else e end when t1.b in (19,t1.d,(19)) then c else a end} +} {-482} +do_test randexpr-2.1605 { + db eval {SELECT (case when e not between 19 and t1.a then case coalesce((select t1.e from t1 where (exists(select 1 from t1 where c in (select case count(distinct t1.b) when max(b) then min((t1.d)) else count(distinct 11) end+ -count(*) | - -count(distinct t1.e) from t1 union select min(f) from t1))) or (select min(17) from t1) not between 13 and b and - -c=17 and 17 between t1.e and 13 or 13>d or 11>= -t1.d then a else 13 end)+t1.f FROM t1 WHERE b<=13} +} {} +do_test randexpr-2.1606 { + db eval {SELECT (case when e not between 19 and t1.a then case coalesce((select t1.e from t1 where (exists(select 1 from t1 where c in (select case count(distinct t1.b) when max(b) then min((t1.d)) else count(distinct 11) end+ -count(*) | - -count(distinct t1.e) from t1 union select min(f) from t1))) or (select min(17) from t1) not between 13 and b and - -c=17 and 17 between t1.e and 13 or 13>d or 11>= -t1.d then a else 13 end)+t1.f FROM t1 WHERE NOT (b<=13)} +} {619} +do_test randexpr-2.1607 { + db eval {SELECT (case when e not between 19 and t1.a then case coalesce((select t1.e from t1 where (exists(select 1 from t1 where c in (select case count(distinct t1.b) when max(b) then min((t1.d)) else count(distinct 11) end+ -count(*) & - -count(distinct t1.e) from t1 union select min(f) from t1))) or (select min(17) from t1) not between 13 and b and - -c=17 and 17 between t1.e and 13 or 13>d or 11>= -t1.d then a else 13 end)+t1.f FROM t1 WHERE NOT (b<=13)} +} {619} +do_test randexpr-2.1608 { + db eval {SELECT case c when d then +e else 17-e+case when exists(select 1 from t1 where not -f<19) then (a) when t1.a between case when t1.d- -~t1.a+(select case min(t1.a) when count(distinct t1.e) then max(f) else max(t1.f) end-min(t1.b) from t1)*(select max(17) from t1)+e*13-f in (t1.d,f,d) then f else t1.b end and b then t1.d else d end end FROM t1 WHERE b<>c*f} +} {-83} +do_test randexpr-2.1609 { + db eval {SELECT case c when d then +e else 17-e+case when exists(select 1 from t1 where not -f<19) then (a) when t1.a between case when t1.d- -~t1.a+(select case min(t1.a) when count(distinct t1.e) then max(f) else max(t1.f) end-min(t1.b) from t1)*(select max(17) from t1)+e*13-f in (t1.d,f,d) then f else t1.b end and b then t1.d else d end end FROM t1 WHERE NOT (b<>c*f)} +} {} +do_test randexpr-2.1610 { + db eval {SELECT -(select min(coalesce((select max(case when d>=t1.c then f-t1.c else t1.b end) from t1 where case when (17)-13<=(abs(a-e)/abs(19-t1.a)) then (select -max(c++t1.d | +case when 19 in (select b from t1 union select t1.c from t1) then t1.d else f end) from t1) else 11-19 end not in (e,t1.e,17)),c)+t1.b) from t1) FROM t1 WHERE -case when case when e=case when not 17>+t1.d and b*b in (e, -d,13) then -b*c else t1.e end then t1.f when e>=a then c else 19 end+t1.e<> -t1.d then 11 when not 11<>13 then 13 else e end*17*t1.d+t1.f in (select e from t1 union select t1.b from t1)} +} {} +do_test randexpr-2.1611 { + db eval {SELECT -(select min(coalesce((select max(case when d>=t1.c then f-t1.c else t1.b end) from t1 where case when (17)-13<=(abs(a-e)/abs(19-t1.a)) then (select -max(c++t1.d | +case when 19 in (select b from t1 union select t1.c from t1) then t1.d else f end) from t1) else 11-19 end not in (e,t1.e,17)),c)+t1.b) from t1) FROM t1 WHERE NOT ( -case when case when e=case when not 17>+t1.d and b*b in (e, -d,13) then -b*c else t1.e end then t1.f when e>=a then c else 19 end+t1.e<> -t1.d then 11 when not 11<>13 then 13 else e end*17*t1.d+t1.f in (select e from t1 union select t1.b from t1))} +} {-500} +do_test randexpr-2.1612 { + db eval {SELECT -(select min(coalesce((select max(case when d>=t1.c then f-t1.c else t1.b end) from t1 where case when (17)-13<=(abs(a-e)/abs(19-t1.a)) then (select -max(c++t1.d & +case when 19 in (select b from t1 union select t1.c from t1) then t1.d else f end) from t1) else 11-19 end not in (e,t1.e,17)),c)+t1.b) from t1) FROM t1 WHERE NOT ( -case when case when e=case when not 17>+t1.d and b*b in (e, -d,13) then -b*c else t1.e end then t1.f when e>=a then c else 19 end+t1.e<> -t1.d then 11 when not 11<>13 then 13 else e end*17*t1.d+t1.f in (select e from t1 union select t1.b from t1))} +} {-500} +do_test randexpr-2.1613 { + db eval {SELECT case when b<=(abs(19)/abs(t1.a)) | 13-b then +f | ~d else ~case when case case when t1.c in (select 13+19 from t1 union select t1.a from t1) then -c when 13 between t1.a and b then 17 else c end when t1.c then e else 11 end between t1.e and 11 or f in (select 17 from t1 union select (a) from t1) then t1.f-d when t1.b in (select t1.f from t1 union select t1.f from t1) then t1.c else t1.b end end FROM t1 WHERE (t1.e in (select +17 from t1 union select 11 from t1))} +} {} +do_test randexpr-2.1614 { + db eval {SELECT case when b<=(abs(19)/abs(t1.a)) | 13-b then +f | ~d else ~case when case case when t1.c in (select 13+19 from t1 union select t1.a from t1) then -c when 13 between t1.a and b then 17 else c end when t1.c then e else 11 end between t1.e and 11 or f in (select 17 from t1 union select (a) from t1) then t1.f-d when t1.b in (select t1.f from t1 union select t1.f from t1) then t1.c else t1.b end end FROM t1 WHERE NOT ((t1.e in (select +17 from t1 union select 11 from t1)))} +} {-201} +do_test randexpr-2.1615 { + db eval {SELECT case when b<=(abs(19)/abs(t1.a)) & 13-b then +f & ~d else ~case when case case when t1.c in (select 13+19 from t1 union select t1.a from t1) then -c when 13 between t1.a and b then 17 else c end when t1.c then e else 11 end between t1.e and 11 or f in (select 17 from t1 union select (a) from t1) then t1.f-d when t1.b in (select t1.f from t1 union select t1.f from t1) then t1.c else t1.b end end FROM t1 WHERE NOT ((t1.e in (select +17 from t1 union select 11 from t1)))} +} {-201} +do_test randexpr-2.1616 { + db eval {SELECT coalesce((select coalesce((select coalesce((select ~e+coalesce((select -e-+17 from t1 where (select cast(avg(11) AS integer) from t1) between case when +13+13 between 19 and t1.c then t1.d else 13 end and 13),t1.c)* -(19)+17-t1.a+e from t1 where t1.b>=t1.f),t1.b) | t1.f from t1 where f in (select d from t1 union select 11 from t1)),t1.b) from t1 where t1.b=13),t1.d) FROM t1 WHERE t1.f<>(abs(t1.c)/abs(t1.a))} +} {400} +do_test randexpr-2.1617 { + db eval {SELECT coalesce((select coalesce((select coalesce((select ~e+coalesce((select -e-+17 from t1 where (select cast(avg(11) AS integer) from t1) between case when +13+13 between 19 and t1.c then t1.d else 13 end and 13),t1.c)* -(19)+17-t1.a+e from t1 where t1.b>=t1.f),t1.b) | t1.f from t1 where f in (select d from t1 union select 11 from t1)),t1.b) from t1 where t1.b=13),t1.d) FROM t1 WHERE NOT (t1.f<>(abs(t1.c)/abs(t1.a)))} +} {} +do_test randexpr-2.1618 { + db eval {SELECT coalesce((select coalesce((select coalesce((select ~e+coalesce((select -e-+17 from t1 where (select cast(avg(11) AS integer) from t1) between case when +13+13 between 19 and t1.c then t1.d else 13 end and 13),t1.c)* -(19)+17-t1.a+e from t1 where t1.b>=t1.f),t1.b) & t1.f from t1 where f in (select d from t1 union select 11 from t1)),t1.b) from t1 where t1.b=13),t1.d) FROM t1 WHERE t1.f<>(abs(t1.c)/abs(t1.a))} +} {400} +do_test randexpr-2.1619 { + db eval {SELECT (abs(a)/abs(coalesce((select case when coalesce((select d from t1 where d+case (coalesce((select max(e) from t1 where 17+f*13-(case 19 when t1.f then 17 else t1.e end)<>b),17)) when t1.e then t1.e else t1.a end- -13+ -t1.f between b and f),11)+t1.d between 13 and t1.d then b else (19) end-t1.c from t1 where c in (select t1.e from t1 union select f from t1)),b))) FROM t1 WHERE d>t1.a} +} {0} +do_test randexpr-2.1620 { + db eval {SELECT (abs(a)/abs(coalesce((select case when coalesce((select d from t1 where d+case (coalesce((select max(e) from t1 where 17+f*13-(case 19 when t1.f then 17 else t1.e end)<>b),17)) when t1.e then t1.e else t1.a end- -13+ -t1.f between b and f),11)+t1.d between 13 and t1.d then b else (19) end-t1.c from t1 where c in (select t1.e from t1 union select f from t1)),b))) FROM t1 WHERE NOT (d>t1.a)} +} {} +do_test randexpr-2.1621 { + db eval {SELECT coalesce((select case when coalesce((select max(coalesce((select max(e-c-coalesce((select max(c) from t1 where 19 between 13 and 13),(abs(case when c<=17 or f in (t1.a, -17,t1.f) then b else t1.c end)/abs(e))+t1.b)) from t1 where not exists(select 1 from t1 where t1.e>=b)),e)) from t1 where not exists(select 1 from t1 where 13<=t1.b or t1.f in (select d from t1 union select t1.d from t1))),17) in (select 11 from t1 union select t1.d from t1) then c else t1.e end from t1 where t1.c in (select b from t1 union select b from t1)),f) | t1.c FROM t1 WHERE c<=e} +} {892} +do_test randexpr-2.1622 { + db eval {SELECT coalesce((select case when coalesce((select max(coalesce((select max(e-c-coalesce((select max(c) from t1 where 19 between 13 and 13),(abs(case when c<=17 or f in (t1.a, -17,t1.f) then b else t1.c end)/abs(e))+t1.b)) from t1 where not exists(select 1 from t1 where t1.e>=b)),e)) from t1 where not exists(select 1 from t1 where 13<=t1.b or t1.f in (select d from t1 union select t1.d from t1))),17) in (select 11 from t1 union select t1.d from t1) then c else t1.e end from t1 where t1.c in (select b from t1 union select b from t1)),f) | t1.c FROM t1 WHERE NOT (c<=e)} +} {} +do_test randexpr-2.1623 { + db eval {SELECT coalesce((select case when coalesce((select max(coalesce((select max(e-c-coalesce((select max(c) from t1 where 19 between 13 and 13),(abs(case when c<=17 or f in (t1.a, -17,t1.f) then b else t1.c end)/abs(e))+t1.b)) from t1 where not exists(select 1 from t1 where t1.e>=b)),e)) from t1 where not exists(select 1 from t1 where 13<=t1.b or t1.f in (select d from t1 union select t1.d from t1))),17) in (select 11 from t1 union select t1.d from t1) then c else t1.e end from t1 where t1.c in (select b from t1 union select b from t1)),f) & t1.c FROM t1 WHERE c<=e} +} {8} +do_test randexpr-2.1624 { + db eval {SELECT (select +max(case when t1.a in (f,case when f in (t1.b,+ -(select count(*)-min(17) from t1)+19,11) then -19 else 19 end*(abs(b)/abs(a)),a) then t1.b when (case a when 17 then c | t1.d else t1.c end+11)-13 in (b,11,f) then b else 17 end*17) from t1) FROM t1 WHERE c between (abs(case when (abs(d+19)/abs(19))-c*t1.d+t1.e+case when -(abs(case when exists(select 1 from t1 where exists(select 1 from t1 where 13 in (select cast(avg(t1.b) AS integer) from t1 union select max(d) from t1))) then ~c when t1.d not between 13 and d then -t1.b else -f end)/abs(t1.b))+d in (t1.e,(13),e) then e when (b)<>d then t1.b else 19 end in (select 17 from t1 union select e from t1) then t1.e else t1.e end)/abs(t1.f)) and t1.e} +} {3400} +do_test randexpr-2.1625 { + db eval {SELECT (select +max(case when t1.a in (f,case when f in (t1.b,+ -(select count(*)-min(17) from t1)+19,11) then -19 else 19 end*(abs(b)/abs(a)),a) then t1.b when (case a when 17 then c | t1.d else t1.c end+11)-13 in (b,11,f) then b else 17 end*17) from t1) FROM t1 WHERE NOT (c between (abs(case when (abs(d+19)/abs(19))-c*t1.d+t1.e+case when -(abs(case when exists(select 1 from t1 where exists(select 1 from t1 where 13 in (select cast(avg(t1.b) AS integer) from t1 union select max(d) from t1))) then ~c when t1.d not between 13 and d then -t1.b else -f end)/abs(t1.b))+d in (t1.e,(13),e) then e when (b)<>d then t1.b else 19 end in (select 17 from t1 union select e from t1) then t1.e else t1.e end)/abs(t1.f)) and t1.e)} +} {} +do_test randexpr-2.1626 { + db eval {SELECT (select +max(case when t1.a in (f,case when f in (t1.b,+ -(select count(*)-min(17) from t1)+19,11) then -19 else 19 end*(abs(b)/abs(a)),a) then t1.b when (case a when 17 then c & t1.d else t1.c end+11)-13 in (b,11,f) then b else 17 end*17) from t1) FROM t1 WHERE c between (abs(case when (abs(d+19)/abs(19))-c*t1.d+t1.e+case when -(abs(case when exists(select 1 from t1 where exists(select 1 from t1 where 13 in (select cast(avg(t1.b) AS integer) from t1 union select max(d) from t1))) then ~c when t1.d not between 13 and d then -t1.b else -f end)/abs(t1.b))+d in (t1.e,(13),e) then e when (b)<>d then t1.b else 19 end in (select 17 from t1 union select e from t1) then t1.e else t1.e end)/abs(t1.f)) and t1.e} +} {3400} +do_test randexpr-2.1627 { + db eval {SELECT case when c between f and t1.f*t1.f then (coalesce((select max(d | coalesce((select 19+t1.c from t1 where t1.a between d and (b)),(t1.f))*t1.f*t1.b) from t1 where 11>13 or 19 not between 13 and 11 or t1.a>=b or b>t1.e or 11=a and t1.b<=d),f)) when a in (select min(t1.b)-abs(count(distinct 11)) from t1 union select cast(avg(t1.d) AS integer) from t1) then f else c end-t1.e FROM t1 WHERE (t1.f+a)*t1.b+(abs(b)/abs(b)) in (t1.e,case when 11-t1.a+e+case 19 when +11+ -t1.b then case ~t1.f*(f)-b when d then a else ( -c) end*(c) else t1.a end+t1.a not in (17,11,t1.e) then t1.b else (d) end*19,t1.a)} +} {} +do_test randexpr-2.1628 { + db eval {SELECT case when c between f and t1.f*t1.f then (coalesce((select max(d | coalesce((select 19+t1.c from t1 where t1.a between d and (b)),(t1.f))*t1.f*t1.b) from t1 where 11>13 or 19 not between 13 and 11 or t1.a>=b or b>t1.e or 11=a and t1.b<=d),f)) when a in (select min(t1.b)-abs(count(distinct 11)) from t1 union select cast(avg(t1.d) AS integer) from t1) then f else c end-t1.e FROM t1 WHERE NOT ((t1.f+a)*t1.b+(abs(b)/abs(b)) in (t1.e,case when 11-t1.a+e+case 19 when +11+ -t1.b then case ~t1.f*(f)-b when d then a else ( -c) end*(c) else t1.a end+t1.a not in (17,11,t1.e) then t1.b else (d) end*19,t1.a))} +} {-200} +do_test randexpr-2.1629 { + db eval {SELECT case when c between f and t1.f*t1.f then (coalesce((select max(d & coalesce((select 19+t1.c from t1 where t1.a between d and (b)),(t1.f))*t1.f*t1.b) from t1 where 11>13 or 19 not between 13 and 11 or t1.a>=b or b>t1.e or 11=a and t1.b<=d),f)) when a in (select min(t1.b)-abs(count(distinct 11)) from t1 union select cast(avg(t1.d) AS integer) from t1) then f else c end-t1.e FROM t1 WHERE NOT ((t1.f+a)*t1.b+(abs(b)/abs(b)) in (t1.e,case when 11-t1.a+e+case 19 when +11+ -t1.b then case ~t1.f*(f)-b when d then a else ( -c) end*(c) else t1.a end+t1.a not in (17,11,t1.e) then t1.b else (d) end*19,t1.a))} +} {-200} +do_test randexpr-2.1630 { + db eval {SELECT coalesce((select max(f) from t1 where (a+13>19*t1.e+a | a | 17*e or ~c not between 17 and 19*(select +(~count(distinct e)*(cast(avg(t1.b) AS integer))) | count(distinct t1.c) from t1)-f and t1.a*t1.f+c not between t1.f and f)),t1.c*(t1.e)*e) FROM t1 WHERE case case t1.f when ~17 then 11 else t1.d end when a then t1.a else e end>d*11} +} {} +do_test randexpr-2.1631 { + db eval {SELECT coalesce((select max(f) from t1 where (a+13>19*t1.e+a | a | 17*e or ~c not between 17 and 19*(select +(~count(distinct e)*(cast(avg(t1.b) AS integer))) | count(distinct t1.c) from t1)-f and t1.a*t1.f+c not between t1.f and f)),t1.c*(t1.e)*e) FROM t1 WHERE NOT (case case t1.f when ~17 then 11 else t1.d end when a then t1.a else e end>d*11)} +} {600} +do_test randexpr-2.1632 { + db eval {SELECT coalesce((select max(f) from t1 where (a+13>19*t1.e+a & a & 17*e or ~c not between 17 and 19*(select +(~count(distinct e)*(cast(avg(t1.b) AS integer))) & count(distinct t1.c) from t1)-f and t1.a*t1.f+c not between t1.f and f)),t1.c*(t1.e)*e) FROM t1 WHERE NOT (case case t1.f when ~17 then 11 else t1.d end when a then t1.a else e end>d*11)} +} {600} +do_test randexpr-2.1633 { + db eval {SELECT case when e<11 then (case case t1.a when coalesce((select max(11) from t1 where t1.c<=case when exists(select 1 from t1 where 13=e) then case when c not in (t1.f,t1.e,c) then c else ~(c)+d*f end when a>b then 11 else 11 end),(t1.c)) | 11 then b else t1.a end when 11 then t1.f else t1.d end) when db then 11 else 11 end),(t1.c)) | 11 then b else t1.a end when 11 then t1.f else t1.d end) when db then 11 else 11 end),(t1.c)) & 11 then b else t1.a end when 11 then t1.f else t1.d end) when df)),17) when coalesce((select max((b)) from t1 where not case (abs(t1.b)/abs(t1.d+t1.b)) when t1.d then f-coalesce((select max(a) from t1 where case t1.e when c then c else 11 end not between b and -t1.b),t1.e) else t1.a end-17 between t1.e and f and (t1.e17), -e)*19 then t1.b else t1.c end FROM t1 WHERE not -coalesce((select d from t1 where exists(select 1 from t1 where coalesce((select max(case when 11 not in (case when d*11<>17-(t1.e) then 13 else c end,t1.d,t1.c) then e else t1.c end) from t1 where t1.d<= - -d),11) in (select + -case (+ -cast(avg(19) AS integer)+ - -(count(*))* -count(distinct ((t1.e)))*max(d)) when count(*) then max(b) else count(*) end from t1 union select count(*) from t1))),t1.d)+13>a} +} {300} +do_test randexpr-2.1637 { + db eval {SELECT case coalesce((select a from t1 where exists(select 1 from t1 where t1.e>f)),17) when coalesce((select max((b)) from t1 where not case (abs(t1.b)/abs(t1.d+t1.b)) when t1.d then f-coalesce((select max(a) from t1 where case t1.e when c then c else 11 end not between b and -t1.b),t1.e) else t1.a end-17 between t1.e and f and (t1.e17), -e)*19 then t1.b else t1.c end FROM t1 WHERE NOT (not -coalesce((select d from t1 where exists(select 1 from t1 where coalesce((select max(case when 11 not in (case when d*11<>17-(t1.e) then 13 else c end,t1.d,t1.c) then e else t1.c end) from t1 where t1.d<= - -d),11) in (select + -case (+ -cast(avg(19) AS integer)+ - -(count(*))* -count(distinct ((t1.e)))*max(d)) when count(*) then max(b) else count(*) end from t1 union select count(*) from t1))),t1.d)+13>a)} +} {} +do_test randexpr-2.1638 { + db eval {SELECT ~coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.e-t1.e in (select t1.c+case d when case when not case when t1.f between ( -t1.a) and (b) or 11<>19 then (abs(t1.e)/abs(t1.f)) else e end between t1.b and e then t1.d | t1.e when 19>b then 13 else t1.a end*e*13 | 11+t1.f then t1.b else 17 end from t1 union select e from t1))),t1.a-t1.b)*t1.d*e FROM t1 WHERE d in (select 11 from t1 union select +11 from t1) or coalesce((select max(coalesce((select b-e*case when (c) | e | b in (select 19*t1.d from t1 union select t1.e from t1) then t1.d else e end from t1 where t1.a<11),d)) from t1 where (not ((17 not in ( -t1.a,e,d) and 13<11) and f>=11))),t1.b)<11} +} {} +do_test randexpr-2.1639 { + db eval {SELECT ~coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.e-t1.e in (select t1.c+case d when case when not case when t1.f between ( -t1.a) and (b) or 11<>19 then (abs(t1.e)/abs(t1.f)) else e end between t1.b and e then t1.d | t1.e when 19>b then 13 else t1.a end*e*13 | 11+t1.f then t1.b else 17 end from t1 union select e from t1))),t1.a-t1.b)*t1.d*e FROM t1 WHERE NOT (d in (select 11 from t1 union select +11 from t1) or coalesce((select max(coalesce((select b-e*case when (c) | e | b in (select 19*t1.d from t1 union select t1.e from t1) then t1.d else e end from t1 where t1.a<11),d)) from t1 where (not ((17 not in ( -t1.a,e,d) and 13<11) and f>=11))),t1.b)<11)} +} {-3600000} +do_test randexpr-2.1640 { + db eval {SELECT ~coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.e-t1.e in (select t1.c+case d when case when not case when t1.f between ( -t1.a) and (b) or 11<>19 then (abs(t1.e)/abs(t1.f)) else e end between t1.b and e then t1.d & t1.e when 19>b then 13 else t1.a end*e*13 & 11+t1.f then t1.b else 17 end from t1 union select e from t1))),t1.a-t1.b)*t1.d*e FROM t1 WHERE NOT (d in (select 11 from t1 union select +11 from t1) or coalesce((select max(coalesce((select b-e*case when (c) | e | b in (select 19*t1.d from t1 union select t1.e from t1) then t1.d else e end from t1 where t1.a<11),d)) from t1 where (not ((17 not in ( -t1.a,e,d) and 13<11) and f>=11))),t1.b)<11)} +} {-3600000} +do_test randexpr-2.1641 { + db eval {SELECT case b-f when t1.a then -case when not (case t1.a when (select case cast(avg(c) AS integer) when count(distinct t1.f) then +max(t1.e) | -((( -(count(*))))) else cast(avg(f) AS integer) end from t1)-b then a else coalesce((select t1.a from t1 where (e=a and e>c) or c not between a and 19 then 11 when t1.b not between e and t1.a then 17 else 11 end) from t1)) AS integer) from t1 union select cast(avg((abs(b)/abs(t1.f))) AS integer) from t1) then 17 else t1.e end>=a} +} {} +do_test randexpr-2.1642 { + db eval {SELECT case b-f when t1.a then -case when not (case t1.a when (select case cast(avg(c) AS integer) when count(distinct t1.f) then +max(t1.e) | -((( -(count(*))))) else cast(avg(f) AS integer) end from t1)-b then a else coalesce((select t1.a from t1 where (e=a and e>c) or c not between a and 19 then 11 when t1.b not between e and t1.a then 17 else 11 end) from t1)) AS integer) from t1 union select cast(avg((abs(b)/abs(t1.f))) AS integer) from t1) then 17 else t1.e end>=a)} +} {17} +do_test randexpr-2.1643 { + db eval {SELECT case b-f when t1.a then -case when not (case t1.a when (select case cast(avg(c) AS integer) when count(distinct t1.f) then +max(t1.e) & -((( -(count(*))))) else cast(avg(f) AS integer) end from t1)-b then a else coalesce((select t1.a from t1 where (e=a and e>c) or c not between a and 19 then 11 when t1.b not between e and t1.a then 17 else 11 end) from t1)) AS integer) from t1 union select cast(avg((abs(b)/abs(t1.f))) AS integer) from t1) then 17 else t1.e end>=a)} +} {17} +do_test randexpr-2.1644 { + db eval {SELECT coalesce((select max((abs(t1.b*t1.d*case when f*t1.db),t1.d*19)*e+c+t1.d then t1.c when exists(select 1 from t1 where 13>=e) then t1.e else f end-f*e)/abs(13))) from t1 where (a)<=t1.d),13) | e FROM t1 WHERE +t1.c>t1.d} +} {} +do_test randexpr-2.1645 { + db eval {SELECT coalesce((select max((abs(t1.b*t1.d*case when f*t1.db),t1.d*19)*e+c+t1.d then t1.c when exists(select 1 from t1 where 13>=e) then t1.e else f end-f*e)/abs(13))) from t1 where (a)<=t1.d),13) | e FROM t1 WHERE NOT (+t1.c>t1.d)} +} {1823220} +do_test randexpr-2.1646 { + db eval {SELECT coalesce((select max((abs(t1.b*t1.d*case when f*t1.db),t1.d*19)*e+c+t1.d then t1.c when exists(select 1 from t1 where 13>=e) then t1.e else f end-f*e)/abs(13))) from t1 where (a)<=t1.d),13) & e FROM t1 WHERE NOT (+t1.c>t1.d)} +} {356} +do_test randexpr-2.1647 { + db eval {SELECT case when not exists(select 1 from t1 where 17>=19) or t1.c in (select min(19)-(cast(avg(f) AS integer)) from t1 union select cast(avg(19) AS integer) from t1) and e in (select ~+count(distinct 19)*max(13) from t1 union select min(17+d) from t1) or t1.f in (select count(*) from t1 union select (max(a)) from t1) then coalesce((select -d- -t1.d from t1 where b in (select cast(avg(c) AS integer) from t1 union select (count(*)) from t1) or b<=11),e) else t1.e end FROM t1 WHERE a< -13} +} {} +do_test randexpr-2.1648 { + db eval {SELECT case when not exists(select 1 from t1 where 17>=19) or t1.c in (select min(19)-(cast(avg(f) AS integer)) from t1 union select cast(avg(19) AS integer) from t1) and e in (select ~+count(distinct 19)*max(13) from t1 union select min(17+d) from t1) or t1.f in (select count(*) from t1 union select (max(a)) from t1) then coalesce((select -d- -t1.d from t1 where b in (select cast(avg(c) AS integer) from t1 union select (count(*)) from t1) or b<=11),e) else t1.e end FROM t1 WHERE NOT (a< -13)} +} {500} +do_test randexpr-2.1649 { + db eval {SELECT f+case when 17-t1.f in (select ~count(distinct (abs((abs(c)/abs(c)))/abs((abs(11)/abs(+case -t1.a when t1.d then -13 else 17 end))))-t1.c) from t1 union select ~case cast(avg(t1.c) AS integer) when -~ -count(*)*max((t1.f))+count(*) then (min(d)) else max(a) end-count(distinct t1.a)+ -count(*)-count(*) from t1) then d when b not between t1.f and t1.e then a else 11 end+t1.a FROM t1 WHERE (exists(select 1 from t1 where case when exists(select 1 from t1 where case when (t1.e+case (t1.b) when f then case t1.c when 11 | -13-coalesce((select max(d) from t1 where c not in ( -t1.e,t1.c,17) and t1.c<17),e)*17+t1.a then t1.b else 11 end else d end)+t1.f not in (d,13,d) then f else t1.d end not between (c) and f) then t1.e else f end-t1.c in (t1.b,t1.a,f)))} +} {} +do_test randexpr-2.1650 { + db eval {SELECT f+case when 17-t1.f in (select ~count(distinct (abs((abs(c)/abs(c)))/abs((abs(11)/abs(+case -t1.a when t1.d then -13 else 17 end))))-t1.c) from t1 union select ~case cast(avg(t1.c) AS integer) when -~ -count(*)*max((t1.f))+count(*) then (min(d)) else max(a) end-count(distinct t1.a)+ -count(*)-count(*) from t1) then d when b not between t1.f and t1.e then a else 11 end+t1.a FROM t1 WHERE NOT ((exists(select 1 from t1 where case when exists(select 1 from t1 where case when (t1.e+case (t1.b) when f then case t1.c when 11 | -13-coalesce((select max(d) from t1 where c not in ( -t1.e,t1.c,17) and t1.c<17),e)*17+t1.a then t1.b else 11 end else d end)+t1.f not in (d,13,d) then f else t1.d end not between (c) and f) then t1.e else f end-t1.c in (t1.b,t1.a,f))))} +} {800} +do_test randexpr-2.1651 { + db eval {SELECT case ~t1.d when case when c in (select count(*) from t1 union select +(( -min(17)-~cast(avg(t1.c) AS integer)))* -min(t1.f)-max(t1.d)*count(*) | cast(avg(17) AS integer) from t1) then 17*e*t1.f | t1.e else 19-coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where t1.c>=e)),t1.f) end | 13+13 then 17 else t1.f end FROM t1 WHERE (not exists(select 1 from t1 where +c-19 in (select (~((min(b)))-+count(*) | cast(avg(case when 11 in (select (abs((t1.b))/abs(e)) from t1 union select (17) from t1) then t1.c when -a<=b then t1.a else t1.e end) AS integer)++case max(t1.b) when cast(avg(c) AS integer) then count(*) else cast(avg(f) AS integer) end | -count(*)-(count(*))*max(t1.e)*count(*)*cast(avg(a) AS integer))+min(11) from t1 union select max((d)) from t1)))} +} {600} +do_test randexpr-2.1652 { + db eval {SELECT case ~t1.d when case when c in (select count(*) from t1 union select +(( -min(17)-~cast(avg(t1.c) AS integer)))* -min(t1.f)-max(t1.d)*count(*) | cast(avg(17) AS integer) from t1) then 17*e*t1.f | t1.e else 19-coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where t1.c>=e)),t1.f) end | 13+13 then 17 else t1.f end FROM t1 WHERE NOT ((not exists(select 1 from t1 where +c-19 in (select (~((min(b)))-+count(*) | cast(avg(case when 11 in (select (abs((t1.b))/abs(e)) from t1 union select (17) from t1) then t1.c when -a<=b then t1.a else t1.e end) AS integer)++case max(t1.b) when cast(avg(c) AS integer) then count(*) else cast(avg(f) AS integer) end | -count(*)-(count(*))*max(t1.e)*count(*)*cast(avg(a) AS integer))+min(11) from t1 union select max((d)) from t1))))} +} {} +do_test randexpr-2.1653 { + db eval {SELECT case ~t1.d when case when c in (select count(*) from t1 union select +(( -min(17)-~cast(avg(t1.c) AS integer)))* -min(t1.f)-max(t1.d)*count(*) & cast(avg(17) AS integer) from t1) then 17*e*t1.f & t1.e else 19-coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where t1.c>=e)),t1.f) end & 13+13 then 17 else t1.f end FROM t1 WHERE (not exists(select 1 from t1 where +c-19 in (select (~((min(b)))-+count(*) | cast(avg(case when 11 in (select (abs((t1.b))/abs(e)) from t1 union select (17) from t1) then t1.c when -a<=b then t1.a else t1.e end) AS integer)++case max(t1.b) when cast(avg(c) AS integer) then count(*) else cast(avg(f) AS integer) end | -count(*)-(count(*))*max(t1.e)*count(*)*cast(avg(a) AS integer))+min(11) from t1 union select max((d)) from t1)))} +} {600} +do_test randexpr-2.1654 { + db eval {SELECT 19 | case when not (select min(17) from t1)>t1.e then coalesce((select max(coalesce((select b from t1 where (t1.a not in (a | t1.c,t1.d,19) or not exists(select 1 from t1 where t1.b in (select t1.d from t1 union select a from t1)) and (a in (select count(*) from t1 union select max(t1.b) from t1) and t1.c in (t1.b,f,t1.d)))),t1.c)) from t1 where f=d and -b>c),t1.f)-t1.c when c in ((a),11,t1.c) then 13 else 13 end-c FROM t1 WHERE (t1.e)*case when +a*t1.f*t1.e>=+d*a+b-t1.f*~13 | t1.d*11-f-~f+17-coalesce((select max( -f) from t1 where bt1.e then coalesce((select max(coalesce((select b from t1 where (t1.a not in (a | t1.c,t1.d,19) or not exists(select 1 from t1 where t1.b in (select t1.d from t1 union select a from t1)) and (a in (select count(*) from t1 union select max(t1.b) from t1) and t1.c in (t1.b,f,t1.d)))),t1.c)) from t1 where f=d and -b>c),t1.f)-t1.c when c in ((a),11,t1.c) then 13 else 13 end-c FROM t1 WHERE NOT ((t1.e)*case when +a*t1.f*t1.e>=+d*a+b-t1.f*~13 | t1.d*11-f-~f+17-coalesce((select max( -f) from t1 where bt1.e then coalesce((select max(coalesce((select b from t1 where (t1.a not in (a & t1.c,t1.d,19) or not exists(select 1 from t1 where t1.b in (select t1.d from t1 union select a from t1)) and (a in (select count(*) from t1 union select max(t1.b) from t1) and t1.c in (t1.b,f,t1.d)))),t1.c)) from t1 where f=d and -b>c),t1.f)-t1.c when c in ((a),11,t1.c) then 13 else 13 end-c FROM t1 WHERE NOT ((t1.e)*case when +a*t1.f*t1.e>=+d*a+b-t1.f*~13 | t1.d*11-f-~f+17-coalesce((select max( -f) from t1 where bt1.c),11)*e*t1.e from t1 where e<=t1.d),t1.a)*f=11 then e when t1.a in (select e from t1 union select b from t1) then t1.b-t1.a else e end from t1 where a=t1.a), -17) else e end FROM t1 WHERE not exists(select 1 from t1 where t1.e in (select max((abs(~+t1.a-t1.c*a)/abs(17))) from t1 union select min(11*t1.c+t1.f) from t1))} +} {500} +do_test randexpr-2.1658 { + db eval {SELECT case when not exists(select 1 from t1 where not not not d=17) then t1.b when (not coalesce((select coalesce((select max(d) from t1 where c+c>t1.c),11)*e*t1.e from t1 where e<=t1.d),t1.a)*f=11 then e when t1.a in (select e from t1 union select b from t1) then t1.b-t1.a else e end from t1 where a=t1.a), -17) else e end FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.e in (select max((abs(~+t1.a-t1.c*a)/abs(17))) from t1 union select min(11*t1.c+t1.f) from t1)))} +} {} +do_test randexpr-2.1659 { + db eval {SELECT coalesce((select coalesce((select max(case when not a not in ((abs(11)/abs(coalesce((select t1.d from t1 where 17 in (select count(*) from t1 union select count(distinct t1.a) from t1)),b)*t1.e*c)),t1.a,t1.b) or t1.c<=t1.f or (t1.d>= -a) then (select cast(avg(~t1.f-t1.f) AS integer) from t1) else 17 end) from t1 where t1.b<=c and t1.b<=a),11) from t1 where exists(select 1 from t1 where e=(select min(coalesce((select max(19) from t1 where not exists(select 1 from t1 where (select count(distinct +case when coalesce((select max(case when et1.d),t1.d) in (select abs(cast(avg(t1.d) AS integer)-min(t1.c)) from t1 union select -cast(avg(19) AS integer) from t1) then b when t1.e< -t1.c then t1.b else t1.c end-19*e) from t1)<=a)),17))+ -(max(a))- -cast(avg(11) AS integer)+ -count(distinct c) from t1)-11} +} {100} +do_test randexpr-2.1660 { + db eval {SELECT coalesce((select coalesce((select max(case when not a not in ((abs(11)/abs(coalesce((select t1.d from t1 where 17 in (select count(*) from t1 union select count(distinct t1.a) from t1)),b)*t1.e*c)),t1.a,t1.b) or t1.c<=t1.f or (t1.d>= -a) then (select cast(avg(~t1.f-t1.f) AS integer) from t1) else 17 end) from t1 where t1.b<=c and t1.b<=a),11) from t1 where exists(select 1 from t1 where e=(select min(coalesce((select max(19) from t1 where not exists(select 1 from t1 where (select count(distinct +case when coalesce((select max(case when et1.d),t1.d) in (select abs(cast(avg(t1.d) AS integer)-min(t1.c)) from t1 union select -cast(avg(19) AS integer) from t1) then b when t1.e< -t1.c then t1.b else t1.c end-19*e) from t1)<=a)),17))+ -(max(a))- -cast(avg(11) AS integer)+ -count(distinct c) from t1)-11)} +} {} +do_test randexpr-2.1661 { + db eval {SELECT e*(case when +t1.c+t1.e+f<=13 then (select case cast(avg(coalesce((select (t1.f*a)*11 from t1 where c<>t1.a),b)) AS integer)+cast(avg(b) AS integer)+max( -b)-min(e) when max(11) then cast(avg(t1.e) AS integer) else cast(avg(t1.b) AS integer) end from t1) when ~11*t1.d<=t1.b or (t1.e=t1.a) then t1.b else -19 end)+e FROM t1 WHERE a>13*d+t1.e+c} +} {} +do_test randexpr-2.1662 { + db eval {SELECT e*(case when +t1.c+t1.e+f<=13 then (select case cast(avg(coalesce((select (t1.f*a)*11 from t1 where c<>t1.a),b)) AS integer)+cast(avg(b) AS integer)+max( -b)-min(e) when max(11) then cast(avg(t1.e) AS integer) else cast(avg(t1.b) AS integer) end from t1) when ~11*t1.d<=t1.b or (t1.e=t1.a) then t1.b else -19 end)+e FROM t1 WHERE NOT (a>13*d+t1.e+c)} +} {100500} +do_test randexpr-2.1663 { + db eval {SELECT (abs(t1.d-t1.b)/abs(t1.c))+t1.a*coalesce((select max(coalesce((select max(~11+11) from t1 where e-a>=case when 11*t1.d+13+(select cast(avg(t1.d+f) AS integer) from t1)-d+t1.d-b not between a and a then e else coalesce((select e from t1 where d>b and b between 11 and t1.d),t1.b) end), -t1.b)) from t1 where t1.e<=c),17)+ -t1.d FROM t1 WHERE case when (19>b) then case (abs(t1.d+case when not exists(select 1 from t1 where f in (19,d,11) and f not between t1.e and a) then (19) when a<=t1.d then 17 else t1.c end* -c*13)/abs(d))*f when 19 then t1.c else - - -t1.d end when c in (select max(t1.e) from t1 union select ( - -abs(cast(avg(t1.f) AS integer))-case cast(avg(a) AS integer) when (min(t1.f)) then max(13) else max(11) end | -((min(11)))+max((a))) from t1) then c else -b end*t1.d>d} +} {} +do_test randexpr-2.1664 { + db eval {SELECT (abs(t1.d-t1.b)/abs(t1.c))+t1.a*coalesce((select max(coalesce((select max(~11+11) from t1 where e-a>=case when 11*t1.d+13+(select cast(avg(t1.d+f) AS integer) from t1)-d+t1.d-b not between a and a then e else coalesce((select e from t1 where d>b and b between 11 and t1.d),t1.b) end), -t1.b)) from t1 where t1.e<=c),17)+ -t1.d FROM t1 WHERE NOT (case when (19>b) then case (abs(t1.d+case when not exists(select 1 from t1 where f in (19,d,11) and f not between t1.e and a) then (19) when a<=t1.d then 17 else t1.c end* -c*13)/abs(d))*f when 19 then t1.c else - - -t1.d end when c in (select max(t1.e) from t1 union select ( - -abs(cast(avg(t1.f) AS integer))-case cast(avg(a) AS integer) when (min(t1.f)) then max(13) else max(11) end | -((min(11)))+max((a))) from t1) then c else -b end*t1.d>d)} +} {1300} +do_test randexpr-2.1665 { + db eval {SELECT ~t1.a*(case when not not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where (select count(distinct b | case 13 | case when -11<>a then t1.a else b end when t1.e then t1.b else d end*t1.f) from t1)<>17 or t1.a not between c and t1.a),11) in (t1.e,c,c)) and -11>f then t1.d else f end-17)+ -(t1.b) FROM t1 WHERE (13-coalesce((select -case when 19+c=a then f else 11 end*(e) from t1 where (t1.b in (select t1.a from t1 union select 11 from t1))),(t1.f))*e+f*(t1.f)=a and not 17 in (select max(f) from t1 union select max(19) from t1) or 19<>t1.f) and 13 between f and f and d<>b} +} {} +do_test randexpr-2.1666 { + db eval {SELECT ~t1.a*(case when not not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where (select count(distinct b | case 13 | case when -11<>a then t1.a else b end when t1.e then t1.b else d end*t1.f) from t1)<>17 or t1.a not between c and t1.a),11) in (t1.e,c,c)) and -11>f then t1.d else f end-17)+ -(t1.b) FROM t1 WHERE NOT ((13-coalesce((select -case when 19+c=a then f else 11 end*(e) from t1 where (t1.b in (select t1.a from t1 union select 11 from t1))),(t1.f))*e+f*(t1.f)=a and not 17 in (select max(f) from t1 union select max(19) from t1) or 19<>t1.f) and 13 between f and f and d<>b)} +} {-59083} +do_test randexpr-2.1667 { + db eval {SELECT ~t1.a*(case when not not exists(select 1 from t1 where coalesce((select max(t1.c) from t1 where (select count(distinct b & case 13 & case when -11<>a then t1.a else b end when t1.e then t1.b else d end*t1.f) from t1)<>17 or t1.a not between c and t1.a),11) in (t1.e,c,c)) and -11>f then t1.d else f end-17)+ -(t1.b) FROM t1 WHERE NOT ((13-coalesce((select -case when 19+c=a then f else 11 end*(e) from t1 where (t1.b in (select t1.a from t1 union select 11 from t1))),(t1.f))*e+f*(t1.f)=a and not 17 in (select max(f) from t1 union select max(19) from t1) or 19<>t1.f) and 13 between f and f and d<>b)} +} {-59083} +do_test randexpr-2.1668 { + db eval {SELECT ~13+case when (t1.e-~b+13*case when b>(select max(case when (19*f in (select ~count(distinct a)+count(distinct (e)) from t1 union select (cast(avg(t1.c) AS integer)) from t1)) then coalesce((select -19 from t1 where t1.f<=c or 19 not in (t1.c, -b,19)),t1.e) when t1.e>c then e else 13 end | f) from t1) then 13 else d end in (select t1.f from t1 union select c from t1)) then t1.f when not exists(select 1 from t1 where a between f and f) then (d) else t1.c end FROM t1 WHERE exists(select 1 from t1 where case when 19 in (19,+(abs(case t1.f when e then case (select +cast(avg(a) AS integer)*min(e) from t1) when case when 19 not between -t1.c and d then 11 else t1.f end then f else t1.c end else e end+17+b)/abs(t1.d))*13,t1.f) then a when t1.d not in (f,d, -c) then t1.c else t1.b end not between d and c and t1.b not between t1.b and f)} +} {} +do_test randexpr-2.1669 { + db eval {SELECT ~13+case when (t1.e-~b+13*case when b>(select max(case when (19*f in (select ~count(distinct a)+count(distinct (e)) from t1 union select (cast(avg(t1.c) AS integer)) from t1)) then coalesce((select -19 from t1 where t1.f<=c or 19 not in (t1.c, -b,19)),t1.e) when t1.e>c then e else 13 end | f) from t1) then 13 else d end in (select t1.f from t1 union select c from t1)) then t1.f when not exists(select 1 from t1 where a between f and f) then (d) else t1.c end FROM t1 WHERE NOT (exists(select 1 from t1 where case when 19 in (19,+(abs(case t1.f when e then case (select +cast(avg(a) AS integer)*min(e) from t1) when case when 19 not between -t1.c and d then 11 else t1.f end then f else t1.c end else e end+17+b)/abs(t1.d))*13,t1.f) then a when t1.d not in (f,d, -c) then t1.c else t1.b end not between d and c and t1.b not between t1.b and f))} +} {386} +do_test randexpr-2.1670 { + db eval {SELECT ~13+case when (t1.e-~b+13*case when b>(select max(case when (19*f in (select ~count(distinct a)+count(distinct (e)) from t1 union select (cast(avg(t1.c) AS integer)) from t1)) then coalesce((select -19 from t1 where t1.f<=c or 19 not in (t1.c, -b,19)),t1.e) when t1.e>c then e else 13 end & f) from t1) then 13 else d end in (select t1.f from t1 union select c from t1)) then t1.f when not exists(select 1 from t1 where a between f and f) then (d) else t1.c end FROM t1 WHERE NOT (exists(select 1 from t1 where case when 19 in (19,+(abs(case t1.f when e then case (select +cast(avg(a) AS integer)*min(e) from t1) when case when 19 not between -t1.c and d then 11 else t1.f end then f else t1.c end else e end+17+b)/abs(t1.d))*13,t1.f) then a when t1.d not in (f,d, -c) then t1.c else t1.b end not between d and c and t1.b not between t1.b and f))} +} {386} +do_test randexpr-2.1671 { + db eval {SELECT -case coalesce((select max((case when a>t1.c | t1.a then (select abs(count(distinct a)) from t1) when (e not in (coalesce((select c from t1 where exists(select 1 from t1 where c<=11) and t1.a>e),e),d,a)) or t1.d<>e then t1.c else c end)- -11*b) from t1 where not exists(select 1 from t1 where not (c in ((t1.e),d,19)))),(t1.c))*b when 17 then t1.b else 19 end FROM t1 WHERE e<=coalesce((select t1.b from t1 where coalesce((select c*19 | (a) from t1 where not exists(select 1 from t1 where 19>=13 or case when coalesce((select max(19) from t1 where t1.e=t1.d or t1.f<>e),t1.f)>19 and t1.f=t1.d then 11+ -t1.d when -t1.f not between f and f then t1.a else d end not in ((t1.a),13,t1.a) and c in (select (13) from t1 union select f from t1) and 17<(f)) and t1.a<>t1.e),t1.d)>=t1.c),t1.f)} +} {} +do_test randexpr-2.1672 { + db eval {SELECT -case coalesce((select max((case when a>t1.c | t1.a then (select abs(count(distinct a)) from t1) when (e not in (coalesce((select c from t1 where exists(select 1 from t1 where c<=11) and t1.a>e),e),d,a)) or t1.d<>e then t1.c else c end)- -11*b) from t1 where not exists(select 1 from t1 where not (c in ((t1.e),d,19)))),(t1.c))*b when 17 then t1.b else 19 end FROM t1 WHERE NOT (e<=coalesce((select t1.b from t1 where coalesce((select c*19 | (a) from t1 where not exists(select 1 from t1 where 19>=13 or case when coalesce((select max(19) from t1 where t1.e=t1.d or t1.f<>e),t1.f)>19 and t1.f=t1.d then 11+ -t1.d when -t1.f not between f and f then t1.a else d end not in ((t1.a),13,t1.a) and c in (select (13) from t1 union select f from t1) and 17<(f)) and t1.a<>t1.e),t1.d)>=t1.c),t1.f))} +} {-19} +do_test randexpr-2.1673 { + db eval {SELECT -case coalesce((select max((case when a>t1.c & t1.a then (select abs(count(distinct a)) from t1) when (e not in (coalesce((select c from t1 where exists(select 1 from t1 where c<=11) and t1.a>e),e),d,a)) or t1.d<>e then t1.c else c end)- -11*b) from t1 where not exists(select 1 from t1 where not (c in ((t1.e),d,19)))),(t1.c))*b when 17 then t1.b else 19 end FROM t1 WHERE NOT (e<=coalesce((select t1.b from t1 where coalesce((select c*19 | (a) from t1 where not exists(select 1 from t1 where 19>=13 or case when coalesce((select max(19) from t1 where t1.e=t1.d or t1.f<>e),t1.f)>19 and t1.f=t1.d then 11+ -t1.d when -t1.f not between f and f then t1.a else d end not in ((t1.a),13,t1.a) and c in (select (13) from t1 union select f from t1) and 17<(f)) and t1.a<>t1.e),t1.d)>=t1.c),t1.f))} +} {-19} +do_test randexpr-2.1674 { + db eval {SELECT coalesce((select (11) from t1 where not e in (select -min(d*13) from t1 union select count(distinct f) from t1)),t1.f*t1.d | t1.f) FROM t1 WHERE -t1.c*d>case d when ~coalesce((select max(e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not e=c))),+coalesce((select +t1.e from t1 where not exists(select 1 from t1 where c>=t1.b)),(abs( -t1.c+case t1.c when ~~t1.c+a-t1.c then coalesce((select max(case when t1.b<=c then 19 else f end) from t1 where 13case d when ~coalesce((select max(e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not e=c))),+coalesce((select +t1.e from t1 where not exists(select 1 from t1 where c>=t1.b)),(abs( -t1.c+case t1.c when ~~t1.c+a-t1.c then coalesce((select max(case when t1.b<=c then 19 else f end) from t1 where 13case d when ~coalesce((select max(e) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where not e=c))),+coalesce((select +t1.e from t1 where not exists(select 1 from t1 where c>=t1.b)),(abs( -t1.c+case t1.c when ~~t1.c+a-t1.c then coalesce((select max(case when t1.b<=c then 19 else f end) from t1 where 13=(select max(17) from t1) then case 11 when e then coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.d in (select t1.a from t1 union select e from t1) and e=case when 11<19 and t1.e<19 then t1.e when t1.c in (t1.b,t1.e,t1.d) then 13 else 13 end and e<=d or b between 19 and t1.a or t1.a<=(13))),t1.a) else t1.e-t1.d end*t1.a when -11 not in (17,t1.d,a) then t1.c else b end*19 FROM t1 WHERE not t1.b>t1.e or t1.d not in (~t1.f,d+case when (b+(abs(19*~t1.c | t1.b*d*(b)-t1.e-t1.e+a)/abs(t1.c)) not in (17,b,t1.e)) then 13+f else b end-t1.f,t1.e) or t1.e not in (f,a,t1.d)} +} {190000} +do_test randexpr-2.1678 { + db eval {SELECT +case when t1.e>=(select max(17) from t1) then case 11 when e then coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.d in (select t1.a from t1 union select e from t1) and e=case when 11<19 and t1.e<19 then t1.e when t1.c in (t1.b,t1.e,t1.d) then 13 else 13 end and e<=d or b between 19 and t1.a or t1.a<=(13))),t1.a) else t1.e-t1.d end*t1.a when -11 not in (17,t1.d,a) then t1.c else b end*19 FROM t1 WHERE NOT (not t1.b>t1.e or t1.d not in (~t1.f,d+case when (b+(abs(19*~t1.c | t1.b*d*(b)-t1.e-t1.e+a)/abs(t1.c)) not in (17,b,t1.e)) then 13+f else b end-t1.f,t1.e) or t1.e not in (f,a,t1.d))} +} {} +do_test randexpr-2.1679 { + db eval {SELECT t1.e | t1.b+a*coalesce((select t1.a from t1 where ~(d)-f<>e-c or 17+f in (select -(~case max(t1.c) when abs(count(*)) then abs( - -cast(avg(b) AS integer)*count(*)) else cast(avg(t1.c) AS integer) end) from t1 union select count(distinct (11)) from t1) and exists(select 1 from t1 where not 13>t1.e) or t1.ce-c or 17+f in (select -(~case max(t1.c) when abs(count(*)) then abs( - -cast(avg(b) AS integer)*count(*)) else cast(avg(t1.c) AS integer) end) from t1 union select count(distinct (11)) from t1) and exists(select 1 from t1 where not 13>t1.e) or t1.ce-c or 17+f in (select -(~case max(t1.c) when abs(count(*)) then abs( - -cast(avg(b) AS integer)*count(*)) else cast(avg(t1.c) AS integer) end) from t1 union select count(distinct (11)) from t1) and exists(select 1 from t1 where not 13>t1.e) or t1.ct1.b then 19 when case when case (select ( -max(c)) from t1) when 11 then d else e end*t1.d<>d then a else 11 end=t1.b or 17=d then b else t1.e end+11+t1.a*a-e FROM t1 WHERE coalesce((select coalesce((select max(c) from t1 where -+ -e-t1.d*case when coalesce((select d from t1 where (c=a)),f) between t1.b and t1.b or (13)<>c and e in (t1.e,e,(t1.a)) then b*t1.c else b end+t1.c in (select t1.f from t1 union select t1.b from t1)),19) from t1 where t1.f in (select count(*) from t1 union select cast(avg(t1.e) AS integer)*min((a))*+(max(t1.b))+max(17) from t1)),t1.d)< -e} +} {} +do_test randexpr-2.1683 { + db eval {SELECT ~(select +~count(distinct 17)*count(distinct t1.e) from t1)-t1.e+a*11-case when 13<>t1.b then 19 when case when case (select ( -max(c)) from t1) when 11 then d else e end*t1.d<>d then a else 11 end=t1.b or 17=d then b else t1.e end+11+t1.a*a-e FROM t1 WHERE NOT (coalesce((select coalesce((select max(c) from t1 where -+ -e-t1.d*case when coalesce((select d from t1 where (c=a)),f) between t1.b and t1.b or (13)<>c and e in (t1.e,e,(t1.a)) then b*t1.c else b end+t1.c in (select t1.f from t1 union select t1.b from t1)),19) from t1 where t1.f in (select count(*) from t1 union select cast(avg(t1.e) AS integer)*min((a))*+(max(t1.b))+max(17) from t1)),t1.d)< -e)} +} {10093} +do_test randexpr-2.1684 { + db eval {SELECT coalesce((select max(case when f<=(17-coalesce((select ~t1.e+11 from t1 where t1.f in (select c from t1 union select b from t1)),f)) then -case when e>case when 17+ -t1.b in (select cast(avg(f) AS integer) from t1 union select min(c-17+a) from t1) then -17 else t1.d end then a when 19<11 then t1.a else t1.b end | t1.f else a end) from t1 where e between a and d),19) FROM t1 WHERE case 19 when ((select max(f) from t1)) then 13 else t1.d end-t1.c>a-b-t1.d} +} {19} +do_test randexpr-2.1685 { + db eval {SELECT coalesce((select max(case when f<=(17-coalesce((select ~t1.e+11 from t1 where t1.f in (select c from t1 union select b from t1)),f)) then -case when e>case when 17+ -t1.b in (select cast(avg(f) AS integer) from t1 union select min(c-17+a) from t1) then -17 else t1.d end then a when 19<11 then t1.a else t1.b end | t1.f else a end) from t1 where e between a and d),19) FROM t1 WHERE NOT (case 19 when ((select max(f) from t1)) then 13 else t1.d end-t1.c>a-b-t1.d)} +} {} +do_test randexpr-2.1686 { + db eval {SELECT coalesce((select max(case when f<=(17-coalesce((select ~t1.e+11 from t1 where t1.f in (select c from t1 union select b from t1)),f)) then -case when e>case when 17+ -t1.b in (select cast(avg(f) AS integer) from t1 union select min(c-17+a) from t1) then -17 else t1.d end then a when 19<11 then t1.a else t1.b end & t1.f else a end) from t1 where e between a and d),19) FROM t1 WHERE case 19 when ((select max(f) from t1)) then 13 else t1.d end-t1.c>a-b-t1.d} +} {19} +do_test randexpr-2.1687 { + db eval {SELECT case when (select case min(t1.b) when max(13) | +max(t1.e)*max(f) then abs( -+count(*)) else max(t1.f) end from t1)*t1.a+(abs(case 19 when 19 then d else ((13)) end)/abs(13))+(19) between f and 13 or (not not 17~17),t1.e+(select count(*) from t1) | 11-coalesce((select 13 from t1 where 11 not in ( -t1.f,t1.e,t1.f)),t1.b))+ -(t1.c)) from t1) from t1 where t1.a=(13)), -a)*e FROM t1 WHERE not exists(select 1 from t1 where (select count(distinct t1.e) from t1)<=coalesce((select case when not exists(select 1 from t1 where e=t1.b*coalesce((select max(+b) from t1 where exists(select 1 from t1 where not (not not exists(select 1 from t1 where t1.c in (select 11 from t1 union select 19+13 from t1)))) and not t1.b~17),t1.e+(select count(*) from t1) | 11-coalesce((select 13 from t1 where 11 not in ( -t1.f,t1.e,t1.f)),t1.b))+ -(t1.c)) from t1) from t1 where t1.a=(13)), -a)*e FROM t1 WHERE NOT (not exists(select 1 from t1 where (select count(distinct t1.e) from t1)<=coalesce((select case when not exists(select 1 from t1 where e=t1.b*coalesce((select max(+b) from t1 where exists(select 1 from t1 where not (not not exists(select 1 from t1 where t1.c in (select 11 from t1 union select 19+13 from t1)))) and not t1.b~17),t1.e+(select count(*) from t1) & 11-coalesce((select 13 from t1 where 11 not in ( -t1.f,t1.e,t1.f)),t1.b))+ -(t1.c)) from t1) from t1 where t1.a=(13)), -a)*e FROM t1 WHERE not exists(select 1 from t1 where (select count(distinct t1.e) from t1)<=coalesce((select case when not exists(select 1 from t1 where e=t1.b*coalesce((select max(+b) from t1 where exists(select 1 from t1 where not (not not exists(select 1 from t1 where t1.c in (select 11 from t1 union select 19+13 from t1)))) and not t1.be) then t1.e*c else c end-13+t1.a),f)+t1.d- -t1.a)*t1.c<>f) and t1.b not between (e) and t1.c),f)+11*t1.f FROM t1 WHERE (17 in (select 13 from t1 union select case coalesce((select max( -case when not b in ((select -+case max((b)) when cast(avg(13) AS integer) then count(*) else max(a) end+min(b)-max(d) from t1),coalesce((select t1.d from t1 where case when -13 in ( -t1.e,f,t1.e) then t1.c else e end | 11<=(t1.b)),t1.c)-11,t1.e) then t1.f else 13 end) from t1 where b>=t1.e),f) when e then t1.c else c end from t1))} +} {} +do_test randexpr-2.1694 { + db eval {SELECT ~~t1.b | 11+d*coalesce((select max(t1.f-a) from t1 where not exists(select 1 from t1 where (a-coalesce((select b from t1 where t1.fe) then t1.e*c else c end-13+t1.a),f)+t1.d- -t1.a)*t1.c<>f) and t1.b not between (e) and t1.c),f)+11*t1.f FROM t1 WHERE NOT ((17 in (select 13 from t1 union select case coalesce((select max( -case when not b in ((select -+case max((b)) when cast(avg(13) AS integer) then count(*) else max(a) end+min(b)-max(d) from t1),coalesce((select t1.d from t1 where case when -13 in ( -t1.e,f,t1.e) then t1.c else e end | 11<=(t1.b)),t1.c)-11,t1.e) then t1.f else 13 end) from t1 where b>=t1.e),f) when e then t1.c else c end from t1)))} +} {246747} +do_test randexpr-2.1695 { + db eval {SELECT ~~t1.b & 11+d*coalesce((select max(t1.f-a) from t1 where not exists(select 1 from t1 where (a-coalesce((select b from t1 where t1.fe) then t1.e*c else c end-13+t1.a),f)+t1.d- -t1.a)*t1.c<>f) and t1.b not between (e) and t1.c),f)+11*t1.f FROM t1 WHERE NOT ((17 in (select 13 from t1 union select case coalesce((select max( -case when not b in ((select -+case max((b)) when cast(avg(13) AS integer) then count(*) else max(a) end+min(b)-max(d) from t1),coalesce((select t1.d from t1 where case when -13 in ( -t1.e,f,t1.e) then t1.c else e end | 11<=(t1.b)),t1.c)-11,t1.e) then t1.f else 13 end) from t1 where b>=t1.e),f) when e then t1.c else c end from t1)))} +} {64} +do_test randexpr-2.1696 { + db eval {SELECT case case when not ++b+~e-11-d-f*e*case when (t1.f) not between t1.c and t1.e or (13>e) then (abs(17)/abs(d)) when 11 not between 17 and (t1.b) then c else 13 end-(f)-f between t1.c and t1.f then t1.c when a between t1.d and 13 then t1.e else t1.a end when a then f else 13 end FROM t1 WHERE t1.a in (select case when (+e*t1.f not in ( -(select (case max(+t1.e-case when not exists(select 1 from t1 where 19 in (select 13 from t1 union select ((t1.d)) from t1)) then 13-t1.b else 19 end) when count(*)-(cast(avg(t1.b) AS integer)-cast(avg(b) AS integer)) then count(distinct t1.b) else count(distinct t1.c) end | cast(avg((19)) AS integer)) from t1),~17-f,c)) then (select count(*) from t1) when t1.a>t1.a then b else t1.d end+(f) from t1 union select b from t1)} +} {} +do_test randexpr-2.1697 { + db eval {SELECT case case when not ++b+~e-11-d-f*e*case when (t1.f) not between t1.c and t1.e or (13>e) then (abs(17)/abs(d)) when 11 not between 17 and (t1.b) then c else 13 end-(f)-f between t1.c and t1.f then t1.c when a between t1.d and 13 then t1.e else t1.a end when a then f else 13 end FROM t1 WHERE NOT (t1.a in (select case when (+e*t1.f not in ( -(select (case max(+t1.e-case when not exists(select 1 from t1 where 19 in (select 13 from t1 union select ((t1.d)) from t1)) then 13-t1.b else 19 end) when count(*)-(cast(avg(t1.b) AS integer)-cast(avg(b) AS integer)) then count(distinct t1.b) else count(distinct t1.c) end | cast(avg((19)) AS integer)) from t1),~17-f,c)) then (select count(*) from t1) when t1.a>t1.a then b else t1.d end+(f) from t1 union select b from t1))} +} {13} +do_test randexpr-2.1698 { + db eval {SELECT 13-case when a<=case when e not between t1.b*(abs(t1.c)/abs(e | 19-(select -~count(distinct t1.b)+min(t1.a-t1.c-f+t1.d+t1.d+t1.d) from t1)))-t1.e and coalesce((select 11 from t1 where ct1.c)} +} {596} +do_test randexpr-2.1699 { + db eval {SELECT 13-case when a<=case when e not between t1.b*(abs(t1.c)/abs(e | 19-(select -~count(distinct t1.b)+min(t1.a-t1.c-f+t1.d+t1.d+t1.d) from t1)))-t1.e and coalesce((select 11 from t1 where ct1.c))} +} {} +do_test randexpr-2.1700 { + db eval {SELECT 13-case when a<=case when e not between t1.b*(abs(t1.c)/abs(e & 19-(select -~count(distinct t1.b)+min(t1.a-t1.c-f+t1.d+t1.d+t1.d) from t1)))-t1.e and coalesce((select 11 from t1 where ct1.c)} +} {596} +do_test randexpr-2.1701 { + db eval {SELECT (case when coalesce((select max(~case coalesce((select max(b) from t1 where case (select count(distinct 17) from t1)-e when b then d else t1.b end<13),f) when c then (( -c)) else 13 end) from t1 where t1.e between a and b),t1.b) in (select -min(t1.f) from t1 union select case cast(avg(t1.d) AS integer) when count(distinct b) then case +~ -cast(avg(b) AS integer) when max(t1.f) then count(*) else (max(e)) end else count(*) end from t1) then c when 13<=t1.f then ( -17) else t1.b end) FROM t1 WHERE (select min(19*coalesce((select max(t1.e) from t1 where ~coalesce((select t1.a-coalesce((select max((abs(a)/abs(d))) from t1 where -13 not in ((d)*t1.a+t1.e,13,t1.c) or 17 not in (a,b,t1.d)), -t1.a) from t1 where c<>t1.e),a) | 11+17 between b and 17),c))*~ -max(e) from t1)<>11} +} {-17} +do_test randexpr-2.1702 { + db eval {SELECT (case when coalesce((select max(~case coalesce((select max(b) from t1 where case (select count(distinct 17) from t1)-e when b then d else t1.b end<13),f) when c then (( -c)) else 13 end) from t1 where t1.e between a and b),t1.b) in (select -min(t1.f) from t1 union select case cast(avg(t1.d) AS integer) when count(distinct b) then case +~ -cast(avg(b) AS integer) when max(t1.f) then count(*) else (max(e)) end else count(*) end from t1) then c when 13<=t1.f then ( -17) else t1.b end) FROM t1 WHERE NOT ((select min(19*coalesce((select max(t1.e) from t1 where ~coalesce((select t1.a-coalesce((select max((abs(a)/abs(d))) from t1 where -13 not in ((d)*t1.a+t1.e,13,t1.c) or 17 not in (a,b,t1.d)), -t1.a) from t1 where c<>t1.e),a) | 11+17 between b and 17),c))*~ -max(e) from t1)<>11)} +} {} +do_test randexpr-2.1703 { + db eval {SELECT t1.f* -t1.e-c | b++19*(select (case max(t1.c) when +abs(count(distinct case when e=13 then coalesce((select max(t1.c) from t1 where not -c+b-t1.f<>d),a) else b end FROM t1 WHERE d in (select 11 from t1 union select -d*~t1.b from t1)} +} {} +do_test randexpr-2.1704 { + db eval {SELECT t1.f* -t1.e-c | b++19*(select (case max(t1.c) when +abs(count(distinct case when e=13 then coalesce((select max(t1.c) from t1 where not -c+b-t1.f<>d),a) else b end FROM t1 WHERE NOT (d in (select 11 from t1 union select -d*~t1.b from t1))} +} {-295178} +do_test randexpr-2.1705 { + db eval {SELECT t1.f* -t1.e-c & b++19*(select (case max(t1.c) when +abs(count(distinct case when e=13 then coalesce((select max(t1.c) from t1 where not -c+b-t1.f<>d),a) else b end FROM t1 WHERE NOT (d in (select 11 from t1 union select -d*~t1.b from t1))} +} {52} +do_test randexpr-2.1706 { + db eval {SELECT 11-t1.e+d+case case when (b between -+case when f not between case coalesce((select t1.a from t1 where e not between (t1.d) and t1.a or 17<11),t1.d) when 17 then t1.f else t1.e end and d then 17 when e>=t1.f then t1.b else t1.b end and t1.e) then case f when t1.d then t1.c else -e end when (not exists(select 1 from t1 where -(11)>a) or t1.ct1.e} +} {} +do_test randexpr-2.1707 { + db eval {SELECT 11-t1.e+d+case case when (b between -+case when f not between case coalesce((select t1.a from t1 where e not between (t1.d) and t1.a or 17<11),t1.d) when 17 then t1.f else t1.e end and d then 17 when e>=t1.f then t1.b else t1.b end and t1.e) then case f when t1.d then t1.c else -e end when (not exists(select 1 from t1 where -(11)>a) or t1.ct1.e)} +} {411} +do_test randexpr-2.1708 { + db eval {SELECT coalesce((select max(case when f>11 then 13 when 19*e+(select -count(distinct t1.c)-max(c) | count(distinct 13) from t1) between coalesce((select max(case when t1.ae),17) FROM t1 WHERE case case 13+coalesce((select max( -t1.b*t1.c) from t1 where t1.c*c<>(select -(count(distinct e)) from t1)-t1.c | t1.d*t1.e+11*t1.a+11-t1.a*b),d)+t1.d+e when t1.f then e else -11 end-d when t1.e then 11 else f end+f=19} +} {} +do_test randexpr-2.1709 { + db eval {SELECT coalesce((select max(case when f>11 then 13 when 19*e+(select -count(distinct t1.c)-max(c) | count(distinct 13) from t1) between coalesce((select max(case when t1.ae),17) FROM t1 WHERE NOT (case case 13+coalesce((select max( -t1.b*t1.c) from t1 where t1.c*c<>(select -(count(distinct e)) from t1)-t1.c | t1.d*t1.e+11*t1.a+11-t1.a*b),d)+t1.d+e when t1.f then e else -11 end-d when t1.e then 11 else f end+f=19)} +} {17} +do_test randexpr-2.1710 { + db eval {SELECT coalesce((select max(case when f>11 then 13 when 19*e+(select -count(distinct t1.c)-max(c) & count(distinct 13) from t1) between coalesce((select max(case when t1.ae),17) FROM t1 WHERE NOT (case case 13+coalesce((select max( -t1.b*t1.c) from t1 where t1.c*c<>(select -(count(distinct e)) from t1)-t1.c | t1.d*t1.e+11*t1.a+11-t1.a*b),d)+t1.d+e when t1.f then e else -11 end-d when t1.e then 11 else f end+f=19)} +} {17} +do_test randexpr-2.1711 { + db eval {SELECT 13+coalesce((select -(abs(b)/abs(coalesce((select (abs(e+t1.a)/abs(13)) from t1 where a17 and t1.d<>t1.f),17)+a- -t1.d then t1.d else -t1.b end,b)} +} {413} +do_test randexpr-2.1712 { + db eval {SELECT 13+coalesce((select -(abs(b)/abs(coalesce((select (abs(e+t1.a)/abs(13)) from t1 where a17 and t1.d<>t1.f),17)+a- -t1.d then t1.d else -t1.b end,b))} +} {} +do_test randexpr-2.1713 { + db eval {SELECT (~(abs(case when t1.f*17 not in (coalesce((select 13 from t1 where d in (select count(distinct t1.e- -t1.e+t1.e*11) from t1 union select (abs(~~(cast(avg(d) AS integer))* -(min(13))+max(d) | min(13))) from t1)),d),a,t1.e) then c else t1.c end-c)/abs((t1.d)))*t1.c)*t1.d+t1.b-b FROM t1 WHERE (abs(case when (t1.e+case when t1.f+t1.d*t1.c<=t1.f or t1.c>=a and a not between 19 and t1.e then e else b end-t1.c) | t1.b<>19 then t1.f when 19<=t1.d and not -t1.c>=11 and t1.a between t1.f and (19) then b else t1.c end)/abs(b))+11>=t1.f and t1.f not between f and t1.a} +} {} +do_test randexpr-2.1714 { + db eval {SELECT (~(abs(case when t1.f*17 not in (coalesce((select 13 from t1 where d in (select count(distinct t1.e- -t1.e+t1.e*11) from t1 union select (abs(~~(cast(avg(d) AS integer))* -(min(13))+max(d) | min(13))) from t1)),d),a,t1.e) then c else t1.c end-c)/abs((t1.d)))*t1.c)*t1.d+t1.b-b FROM t1 WHERE NOT ((abs(case when (t1.e+case when t1.f+t1.d*t1.c<=t1.f or t1.c>=a and a not between 19 and t1.e then e else b end-t1.c) | t1.b<>19 then t1.f when 19<=t1.d and not -t1.c>=11 and t1.a between t1.f and (19) then b else t1.c end)/abs(b))+11>=t1.f and t1.f not between f and t1.a)} +} {-120000} +do_test randexpr-2.1715 { + db eval {SELECT (~(abs(case when t1.f*17 not in (coalesce((select 13 from t1 where d in (select count(distinct t1.e- -t1.e+t1.e*11) from t1 union select (abs(~~(cast(avg(d) AS integer))* -(min(13))+max(d) & min(13))) from t1)),d),a,t1.e) then c else t1.c end-c)/abs((t1.d)))*t1.c)*t1.d+t1.b-b FROM t1 WHERE NOT ((abs(case when (t1.e+case when t1.f+t1.d*t1.c<=t1.f or t1.c>=a and a not between 19 and t1.e then e else b end-t1.c) | t1.b<>19 then t1.f when 19<=t1.d and not -t1.c>=11 and t1.a between t1.f and (19) then b else t1.c end)/abs(b))+11>=t1.f and t1.f not between f and t1.a)} +} {-120000} +do_test randexpr-2.1716 { + db eval {SELECT (abs(a)/abs(case when (not t1.f in (select count(*) from t1 union select - -min(c) | min(13*t1.d) from t1)) and ( -t1.a*t1.d)>=d or (t1.a in (a,11,f)) or (c>=t1.a) then 17+case when not exists(select 1 from t1 where e=d or (t1.a in (a,11,f)) or (c>=t1.a) then 17+case when not exists(select 1 from t1 where e=d or (t1.a in (a,11,f)) or (c>=t1.a) then 17+case when not exists(select 1 from t1 where ec)) and 13 between t1.c and 17 or a=13),coalesce((select a from t1 where (t1.d) in (c,11,t1.c)),t1.d)) | c | coalesce((select max(d) from t1 where t1.b>b),t1.a)=19 then e else 13 end between t1.a and t1.f and 17>=c then a when d in (e,13,c) or t1.b<> -19 then (17) else 17 end FROM t1 WHERE -b in (t1.b,t1.f,case when a in (select case +(min(t1.c)*+~cast(avg(+t1.f) AS integer)*cast(avg(d) AS integer) | abs(count(*))+count(*)*count(distinct 17)) when count(distinct e) then -cast(avg(t1.a) AS integer) else min(11) end from t1 union select ((min(13))) from t1) then (abs(t1.d)/abs(case when -19c)) and 13 between t1.c and 17 or a=13),coalesce((select a from t1 where (t1.d) in (c,11,t1.c)),t1.d)) | c | coalesce((select max(d) from t1 where t1.b>b),t1.a)=19 then e else 13 end between t1.a and t1.f and 17>=c then a when d in (e,13,c) or t1.b<> -19 then (17) else 17 end FROM t1 WHERE NOT ( -b in (t1.b,t1.f,case when a in (select case +(min(t1.c)*+~cast(avg(+t1.f) AS integer)*cast(avg(d) AS integer) | abs(count(*))+count(*)*count(distinct 17)) when count(distinct e) then -cast(avg(t1.a) AS integer) else min(11) end from t1 union select ((min(13))) from t1) then (abs(t1.d)/abs(case when -19c)) and 13 between t1.c and 17 or a=13),coalesce((select a from t1 where (t1.d) in (c,11,t1.c)),t1.d)) & c & coalesce((select max(d) from t1 where t1.b>b),t1.a)=19 then e else 13 end between t1.a and t1.f and 17>=c then a when d in (e,13,c) or t1.b<> -19 then (17) else 17 end FROM t1 WHERE NOT ( -b in (t1.b,t1.f,case when a in (select case +(min(t1.c)*+~cast(avg(+t1.f) AS integer)*cast(avg(d) AS integer) | abs(count(*))+count(*)*count(distinct 17)) when count(distinct e) then -cast(avg(t1.a) AS integer) else min(11) end from t1 union select ((min(13))) from t1) then (abs(t1.d)/abs(case when -19e)} +} {} +do_test randexpr-2.1723 { + db eval {SELECT coalesce((select t1.b from t1 where not exists(select 1 from t1 where 13 in (select count(*) | ~min((select max((abs(d)/abs(t1.e+t1.e))) from t1)* -case when f*case e*~a when case e when (d) then 19 else (b) end then d else 17 end*t1.b between 11 and 13 then e when d in (a,(19),e) then c else 13 end-e*17) from t1 union select (count(*)) from t1))),f) FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.a<>e))} +} {200} +do_test randexpr-2.1724 { + db eval {SELECT coalesce((select t1.b from t1 where not exists(select 1 from t1 where 13 in (select count(*) & ~min((select max((abs(d)/abs(t1.e+t1.e))) from t1)* -case when f*case e*~a when case e when (d) then 19 else (b) end then d else 17 end*t1.b between 11 and 13 then e when d in (a,(19),e) then c else 13 end-e*17) from t1 union select (count(*)) from t1))),f) FROM t1 WHERE NOT (not exists(select 1 from t1 where t1.a<>e))} +} {200} +do_test randexpr-2.1725 { + db eval {SELECT - -case b when +~coalesce((select max(coalesce((select c from t1 where coalesce((select max(t1.a+f) from t1 where (t1.a not between t1.a and t1.e)),t1.a)-b not between e and -t1.d),d)+19) from t1 where (f in (select ( -count(*) | count(*)+cast(avg(11) AS integer)) from t1 union select count(distinct 13) from t1) or -b<>19 or 17>=t1.a)),t1.e)-c-t1.d then t1.a else 19 end- - -11 FROM t1 WHERE d+a*~coalesce((select max((select count(distinct t1.e)-max(t1.d) from t1)) from t1 where not exists(select 1 from t1 where t1.e in (11-f*19,a,c-b)) and 11=t1.b),t1.a)=(t1.b) and -t1.b19 or 17>=t1.a)),t1.e)-c-t1.d then t1.a else 19 end- - -11 FROM t1 WHERE NOT (d+a*~coalesce((select max((select count(distinct t1.e)-max(t1.d) from t1)) from t1 where not exists(select 1 from t1 where t1.e in (11-f*19,a,c-b)) and 11=t1.b),t1.a)=(t1.b) and -t1.b19 or 17>=t1.a)),t1.e)-c-t1.d then t1.a else 19 end- - -11 FROM t1 WHERE NOT (d+a*~coalesce((select max((select count(distinct t1.e)-max(t1.d) from t1)) from t1 where not exists(select 1 from t1 where t1.e in (11-f*19,a,c-b)) and 11=t1.b),t1.a)=(t1.b) and -t1.bcoalesce((select max(coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where e | t1.f-19 between t1.e-a and b and e>=t1.d)),+d)) from t1 where (exists(select 1 from t1 where t1.b in (select 19 from t1 union select 17 from t1) or 11 between t1.b and d))),c)),t1.e)*t1.a<=c then b else t1.d end)) FROM t1 WHERE (select abs( -abs(abs(case count(*) when ~ -min(f)+case - -abs(+cast(avg(c*17) AS integer)) when count(*) then max(t1.a) else max(11) end+(cast(avg(e) AS integer)) then min(17) else count(*) end-cast(avg(a) AS integer)-count(*))))+ -cast(avg(t1.a) AS integer) | ((count(*))) from t1)+f in (select max((abs(+11*t1.a)/abs(t1.f))) from t1 union select max(d)-(( -count(distinct 11))) from t1)} +} {} +do_test randexpr-2.1729 { + db eval {SELECT -(abs(t1.a)/abs(case when coalesce((select max( -c) from t1 where t1.a*e>coalesce((select max(coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where e | t1.f-19 between t1.e-a and b and e>=t1.d)),+d)) from t1 where (exists(select 1 from t1 where t1.b in (select 19 from t1 union select 17 from t1) or 11 between t1.b and d))),c)),t1.e)*t1.a<=c then b else t1.d end)) FROM t1 WHERE NOT ((select abs( -abs(abs(case count(*) when ~ -min(f)+case - -abs(+cast(avg(c*17) AS integer)) when count(*) then max(t1.a) else max(11) end+(cast(avg(e) AS integer)) then min(17) else count(*) end-cast(avg(a) AS integer)-count(*))))+ -cast(avg(t1.a) AS integer) | ((count(*))) from t1)+f in (select max((abs(+11*t1.a)/abs(t1.f))) from t1 union select max(d)-(( -count(distinct 11))) from t1))} +} {0} +do_test randexpr-2.1730 { + db eval {SELECT -(abs(t1.a)/abs(case when coalesce((select max( -c) from t1 where t1.a*e>coalesce((select max(coalesce((select max(t1.d) from t1 where not exists(select 1 from t1 where e & t1.f-19 between t1.e-a and b and e>=t1.d)),+d)) from t1 where (exists(select 1 from t1 where t1.b in (select 19 from t1 union select 17 from t1) or 11 between t1.b and d))),c)),t1.e)*t1.a<=c then b else t1.d end)) FROM t1 WHERE NOT ((select abs( -abs(abs(case count(*) when ~ -min(f)+case - -abs(+cast(avg(c*17) AS integer)) when count(*) then max(t1.a) else max(11) end+(cast(avg(e) AS integer)) then min(17) else count(*) end-cast(avg(a) AS integer)-count(*))))+ -cast(avg(t1.a) AS integer) | ((count(*))) from t1)+f in (select max((abs(+11*t1.a)/abs(t1.f))) from t1 union select max(d)-(( -count(distinct 11))) from t1))} +} {0} +do_test randexpr-2.1731 { + db eval {SELECT case when ((select (+~~max(13)*count(distinct t1.c)) from t1)<>(select (+ -count(*)) from t1)) then +(17) else 19 end+~(abs(13)/abs(d))-t1.d FROM t1 WHERE coalesce((select (select -+ -count(distinct a)*count(*)+max(t1.b) from t1) from t1 where exists(select 1 from t1 where t1.b=c)),t1.c)+f<>t1.f} +} {-384} +do_test randexpr-2.1732 { + db eval {SELECT case when ((select (+~~max(13)*count(distinct t1.c)) from t1)<>(select (+ -count(*)) from t1)) then +(17) else 19 end+~(abs(13)/abs(d))-t1.d FROM t1 WHERE NOT (coalesce((select (select -+ -count(distinct a)*count(*)+max(t1.b) from t1) from t1 where exists(select 1 from t1 where t1.b=c)),t1.c)+f<>t1.f)} +} {} +do_test randexpr-2.1733 { + db eval {SELECT case t1.f when case when 17<=c and coalesce((select case when e in (case when (t1.a=c) then b else -13 end,t1.d,t1.a) then 13 else t1.b end from t1 where (b)=13),t1.d) in ((t1.e),b,t1.b) and d not between 19 and 11 then +(select ~ -~max(e)+count(*)+cast(avg((t1.c)) AS integer) from t1) when t1.f<>e and t1.ee and t1.et1.e then t1.d else (d) end in (select count(*) from t1 union select -abs(~abs(count(distinct d)) | min(t1.e))*count(*) from t1)),t1.f)) from t1 where a>13),d) not between t1.d and c)} +} {60000} +do_test randexpr-2.1738 { + db eval {SELECT +case + -(+t1.a)-~13+t1.b*t1.b+ -c+(select abs(case ~max(t1.c)++min( -13)+ -count(*)+(count(*)) | ((cast(avg(t1.e) AS integer))) when min(t1.c) then min(t1.d) else min(b) end)+min(13) | count(distinct f) | min(t1.a) from t1) when t1.e then 11-17 else t1.b*c end FROM t1 WHERE NOT ((~coalesce((select max(coalesce((select max(19) from t1 where case when case f-(coalesce((select ~11 from t1 where -19= -t1.f or b=t1.b),13)) when 19 then t1.d else t1.b end<=17 then 17 when t1.b>t1.e then t1.d else (d) end in (select count(*) from t1 union select -abs(~abs(count(distinct d)) | min(t1.e))*count(*) from t1)),t1.f)) from t1 where a>13),d) not between t1.d and c))} +} {} +do_test randexpr-2.1739 { + db eval {SELECT +case + -(+t1.a)-~13+t1.b*t1.b+ -c+(select abs(case ~max(t1.c)++min( -13)+ -count(*)+(count(*)) & ((cast(avg(t1.e) AS integer))) when min(t1.c) then min(t1.d) else min(b) end)+min(13) & count(distinct f) & min(t1.a) from t1) when t1.e then 11-17 else t1.b*c end FROM t1 WHERE (~coalesce((select max(coalesce((select max(19) from t1 where case when case f-(coalesce((select ~11 from t1 where -19= -t1.f or b=t1.b),13)) when 19 then t1.d else t1.b end<=17 then 17 when t1.b>t1.e then t1.d else (d) end in (select count(*) from t1 union select -abs(~abs(count(distinct d)) | min(t1.e))*count(*) from t1)),t1.f)) from t1 where a>13),d) not between t1.d and c)} +} {60000} +do_test randexpr-2.1740 { + db eval {SELECT (abs(e)/abs((+b)*coalesce((select c+c+t1.a+t1.e+t1.a+t1.a | 17+e*(t1.c) from t1 where a in (select count(distinct t1.c)-+count(*) from t1 union select - -( -max(t1.f))-count(distinct t1.b)+min(t1.b)*count(*)+count(distinct a) from t1)),t1.a)+e))*19 FROM t1 WHERE coalesce((select 11 from t1 where case t1.e-(t1.a) when (f) then ((19)) else t1.f end+ -t1.b+a+coalesce((select coalesce((select d from t1 where exists(select 1 from t1 where 13+(t1.a)<>19 or 17 not in (c,e,t1.c) or f not in (t1.e,t1.a, -f))),~ -13+e) from t1 where b between (11) and t1.f),t1.a)<=t1.d), -c)<=11} +} {0} +do_test randexpr-2.1741 { + db eval {SELECT (abs(e)/abs((+b)*coalesce((select c+c+t1.a+t1.e+t1.a+t1.a | 17+e*(t1.c) from t1 where a in (select count(distinct t1.c)-+count(*) from t1 union select - -( -max(t1.f))-count(distinct t1.b)+min(t1.b)*count(*)+count(distinct a) from t1)),t1.a)+e))*19 FROM t1 WHERE NOT (coalesce((select 11 from t1 where case t1.e-(t1.a) when (f) then ((19)) else t1.f end+ -t1.b+a+coalesce((select coalesce((select d from t1 where exists(select 1 from t1 where 13+(t1.a)<>19 or 17 not in (c,e,t1.c) or f not in (t1.e,t1.a, -f))),~ -13+e) from t1 where b between (11) and t1.f),t1.a)<=t1.d), -c)<=11)} +} {} +do_test randexpr-2.1742 { + db eval {SELECT (abs(e)/abs((+b)*coalesce((select c+c+t1.a+t1.e+t1.a+t1.a & 17+e*(t1.c) from t1 where a in (select count(distinct t1.c)-+count(*) from t1 union select - -( -max(t1.f))-count(distinct t1.b)+min(t1.b)*count(*)+count(distinct a) from t1)),t1.a)+e))*19 FROM t1 WHERE coalesce((select 11 from t1 where case t1.e-(t1.a) when (f) then ((19)) else t1.f end+ -t1.b+a+coalesce((select coalesce((select d from t1 where exists(select 1 from t1 where 13+(t1.a)<>19 or 17 not in (c,e,t1.c) or f not in (t1.e,t1.a, -f))),~ -13+e) from t1 where b between (11) and t1.f),t1.a)<=t1.d), -c)<=11} +} {0} +do_test randexpr-2.1743 { + db eval {SELECT case coalesce((select t1.f from t1 where not exists(select 1 from t1 where coalesce((select max(d) from t1 where 13 not in (11,coalesce((select max((+f)) from t1 where not d>=f or e in (select 11-t1.b from t1 union select t1.d from t1) or not exists(select 1 from t1 where -b in (select t1.d from t1 union select b from t1) or t1.a<>b or t1.d>e) or t1.a not between (t1.f) and t1.e),+f),t1.e)),t1.f)*t1.b not in (13,t1.b,t1.f))),t1.a)*b when c then b else t1.f end FROM t1 WHERE case when c*coalesce((select max(19+t1.e*~case case t1.e when t1.f then -t1.a else 11 end when 11 then t1.a else (select cast(avg(a) AS integer) from t1) end- -t1.b*coalesce((select 19 from t1 where t1.f in ((a),t1.d,t1.c)),a)) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where t1.a not in (t1.b,t1.a,17)))),13)>=a then b when t1.c>t1.a then d else 17 end not in (a,17,t1.b)} +} {600} +do_test randexpr-2.1744 { + db eval {SELECT case coalesce((select t1.f from t1 where not exists(select 1 from t1 where coalesce((select max(d) from t1 where 13 not in (11,coalesce((select max((+f)) from t1 where not d>=f or e in (select 11-t1.b from t1 union select t1.d from t1) or not exists(select 1 from t1 where -b in (select t1.d from t1 union select b from t1) or t1.a<>b or t1.d>e) or t1.a not between (t1.f) and t1.e),+f),t1.e)),t1.f)*t1.b not in (13,t1.b,t1.f))),t1.a)*b when c then b else t1.f end FROM t1 WHERE NOT (case when c*coalesce((select max(19+t1.e*~case case t1.e when t1.f then -t1.a else 11 end when 11 then t1.a else (select cast(avg(a) AS integer) from t1) end- -t1.b*coalesce((select 19 from t1 where t1.f in ((a),t1.d,t1.c)),a)) from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where t1.a not in (t1.b,t1.a,17)))),13)>=a then b when t1.c>t1.a then d else 17 end not in (a,17,t1.b))} +} {} +do_test randexpr-2.1745 { + db eval {SELECT (abs(case when (c*+~t1.d++t1.b+a-case when (select +case count(*)+min(d) when -count(distinct 17) then max(t1.b) else -cast(avg(d) AS integer) end from t1) in (select cast(avg(c) AS integer) from t1 union select ((count(*))) from t1) then t1.a* -f else f end*c*19-(f)-t1.c-t1.f+t1.d>=f) then (t1.d) else d end)/abs(t1.b)) FROM t1 WHERE coalesce((select a from t1 where t1.d not in ( -13*d,19,t1.a)),(abs(coalesce((select case when c>=t1.e then b else case ~t1.d+a-+t1.b+17 when case when t1.e+t1.e not between e and (c) then t1.d when d>a then t1.d else t1.c end | t1.a-(d) then t1.f else c end end from t1 where d>=t1.c),13))/abs(t1.c)))=t1.e} +} {} +do_test randexpr-2.1746 { + db eval {SELECT (abs(case when (c*+~t1.d++t1.b+a-case when (select +case count(*)+min(d) when -count(distinct 17) then max(t1.b) else -cast(avg(d) AS integer) end from t1) in (select cast(avg(c) AS integer) from t1 union select ((count(*))) from t1) then t1.a* -f else f end*c*19-(f)-t1.c-t1.f+t1.d>=f) then (t1.d) else d end)/abs(t1.b)) FROM t1 WHERE NOT (coalesce((select a from t1 where t1.d not in ( -13*d,19,t1.a)),(abs(coalesce((select case when c>=t1.e then b else case ~t1.d+a-+t1.b+17 when case when t1.e+t1.e not between e and (c) then t1.d when d>a then t1.d else t1.c end | t1.a-(d) then t1.f else c end end from t1 where d>=t1.c),13))/abs(t1.c)))=t1.e)} +} {2} +do_test randexpr-2.1747 { + db eval {SELECT d+coalesce((select max(coalesce((select f from t1 where coalesce((select 17 | t1.b from t1 where 11 not in ((t1.e),+a-f,t1.c | b+17*t1.e-c)), -c) in (select -count(distinct e)-+max(t1.b) from t1 union select count(distinct c)+cast(avg(t1.b) AS integer) from t1)),17)) from t1 where not t1.c(select ~~+(min(11-11)) | ~ -(count(*)) from t1)*(case when t1.d=19 then a when not t1.a in (11,t1.e,c) or t1.a not between t1.f and e then t1.a else t1.e end+19) then 13 when 19 between -17 and t1.a then t1.a else 19 end end>=t1.f and t1.e>t1.a} +} {} +do_test randexpr-2.1748 { + db eval {SELECT d+coalesce((select max(coalesce((select f from t1 where coalesce((select 17 | t1.b from t1 where 11 not in ((t1.e),+a-f,t1.c | b+17*t1.e-c)), -c) in (select -count(distinct e)-+max(t1.b) from t1 union select count(distinct c)+cast(avg(t1.b) AS integer) from t1)),17)) from t1 where not t1.c(select ~~+(min(11-11)) | ~ -(count(*)) from t1)*(case when t1.d=19 then a when not t1.a in (11,t1.e,c) or t1.a not between t1.f and e then t1.a else t1.e end+19) then 13 when 19 between -17 and t1.a then t1.a else 19 end end>=t1.f and t1.e>t1.a)} +} {1117} +do_test randexpr-2.1749 { + db eval {SELECT d+coalesce((select max(coalesce((select f from t1 where coalesce((select 17 & t1.b from t1 where 11 not in ((t1.e),+a-f,t1.c & b+17*t1.e-c)), -c) in (select -count(distinct e)-+max(t1.b) from t1 union select count(distinct c)+cast(avg(t1.b) AS integer) from t1)),17)) from t1 where not t1.c(select ~~+(min(11-11)) | ~ -(count(*)) from t1)*(case when t1.d=19 then a when not t1.a in (11,t1.e,c) or t1.a not between t1.f and e then t1.a else t1.e end+19) then 13 when 19 between -17 and t1.a then t1.a else 19 end end>=t1.f and t1.e>t1.a)} +} {1117} +do_test randexpr-2.1750 { + db eval {SELECT (select case ~ -(~case case cast(avg(t1.e) AS integer) when min(case when t1.c between coalesce((select t1.b from t1 where d in (select f from t1 union select t1.e from t1)),(e)) and t1.d and (19=(11)) then 17 else b end) | max((13))-min(t1.c) | -count(distinct t1.d)*max(11) then count(*) else max(f) end*count(*)-(( -count(distinct f))) when min(t1.c) then (count(distinct (13))) else max((b)) end)+max(t1.a) when (max(t1.c)) then -cast(avg(a) AS integer) else cast(avg(t1.c) AS integer) end from t1) FROM t1 WHERE (f) not between c and 17 | t1.b | coalesce((select t1.d from t1 where t1.d*~case when a<=f or (case t1.f when d+17+17 then e else +case when (11<>17) then 11 when -t1.b not in (e,19,(f)) then t1.c else a end+13 end>= -t1.f) then ~ -t1.c else c end*t1.f*a*17<=t1.f),17)} +} {300} +do_test randexpr-2.1751 { + db eval {SELECT (select case ~ -(~case case cast(avg(t1.e) AS integer) when min(case when t1.c between coalesce((select t1.b from t1 where d in (select f from t1 union select t1.e from t1)),(e)) and t1.d and (19=(11)) then 17 else b end) | max((13))-min(t1.c) | -count(distinct t1.d)*max(11) then count(*) else max(f) end*count(*)-(( -count(distinct f))) when min(t1.c) then (count(distinct (13))) else max((b)) end)+max(t1.a) when (max(t1.c)) then -cast(avg(a) AS integer) else cast(avg(t1.c) AS integer) end from t1) FROM t1 WHERE NOT ((f) not between c and 17 | t1.b | coalesce((select t1.d from t1 where t1.d*~case when a<=f or (case t1.f when d+17+17 then e else +case when (11<>17) then 11 when -t1.b not in (e,19,(f)) then t1.c else a end+13 end>= -t1.f) then ~ -t1.c else c end*t1.f*a*17<=t1.f),17))} +} {} +do_test randexpr-2.1752 { + db eval {SELECT (select case ~ -(~case case cast(avg(t1.e) AS integer) when min(case when t1.c between coalesce((select t1.b from t1 where d in (select f from t1 union select t1.e from t1)),(e)) and t1.d and (19=(11)) then 17 else b end) & max((13))-min(t1.c) & -count(distinct t1.d)*max(11) then count(*) else max(f) end*count(*)-(( -count(distinct f))) when min(t1.c) then (count(distinct (13))) else max((b)) end)+max(t1.a) when (max(t1.c)) then -cast(avg(a) AS integer) else cast(avg(t1.c) AS integer) end from t1) FROM t1 WHERE (f) not between c and 17 | t1.b | coalesce((select t1.d from t1 where t1.d*~case when a<=f or (case t1.f when d+17+17 then e else +case when (11<>17) then 11 when -t1.b not in (e,19,(f)) then t1.c else a end+13 end>= -t1.f) then ~ -t1.c else c end*t1.f*a*17<=t1.f),17)} +} {300} +do_test randexpr-2.1753 { + db eval {SELECT (select abs(min(t1.b)) from t1)-f | case when +t1.d*d not in (t1.f,t1.a,case when ~t1.f-17*c<>t1.a then 11 when 19 in (select (+count(distinct b)) | cast(avg(19) AS integer)*count(*) from t1 union select count(distinct t1.d) from t1) then d else d end+19+b) then (a) else 17 end*13+d FROM t1 WHERE t1.f<>(abs(t1.a)/abs(coalesce((select max(case when coalesce((select max((select count(*) from t1)) from t1 where f<>t1.a),coalesce((select max(11) from t1 where 17+coalesce((select t1.a from t1 where a in (select +~cast(avg(d+11) AS integer) from t1 union select count(distinct coalesce((select max((13)) from t1 where not (t1.f)<>19),f)*d) from t1)),19)+t1.e between 13 and (t1.b)),e))>=e then d else t1.f end) from t1 where ((not f>= -t1.b))),t1.c)))} +} {-268} +do_test randexpr-2.1754 { + db eval {SELECT (select abs(min(t1.b)) from t1)-f | case when +t1.d*d not in (t1.f,t1.a,case when ~t1.f-17*c<>t1.a then 11 when 19 in (select (+count(distinct b)) | cast(avg(19) AS integer)*count(*) from t1 union select count(distinct t1.d) from t1) then d else d end+19+b) then (a) else 17 end*13+d FROM t1 WHERE NOT (t1.f<>(abs(t1.a)/abs(coalesce((select max(case when coalesce((select max((select count(*) from t1)) from t1 where f<>t1.a),coalesce((select max(11) from t1 where 17+coalesce((select t1.a from t1 where a in (select +~cast(avg(d+11) AS integer) from t1 union select count(distinct coalesce((select max((13)) from t1 where not (t1.f)<>19),f)*d) from t1)),19)+t1.e between 13 and (t1.b)),e))>=e then d else t1.f end) from t1 where ((not f>= -t1.b))),t1.c))))} +} {} +do_test randexpr-2.1755 { + db eval {SELECT (select abs(min(t1.b)) from t1)-f & case when +t1.d*d not in (t1.f,t1.a,case when ~t1.f-17*c<>t1.a then 11 when 19 in (select (+count(distinct b)) & cast(avg(19) AS integer)*count(*) from t1 union select count(distinct t1.d) from t1) then d else d end+19+b) then (a) else 17 end*13+d FROM t1 WHERE t1.f<>(abs(t1.a)/abs(coalesce((select max(case when coalesce((select max((select count(*) from t1)) from t1 where f<>t1.a),coalesce((select max(11) from t1 where 17+coalesce((select t1.a from t1 where a in (select +~cast(avg(d+11) AS integer) from t1 union select count(distinct coalesce((select max((13)) from t1 where not (t1.f)<>19),f)*d) from t1)),19)+t1.e between 13 and (t1.b)),e))>=e then d else t1.f end) from t1 where ((not f>= -t1.b))),t1.c)))} +} {1568} +do_test randexpr-2.1756 { + db eval {SELECT coalesce((select max(a) from t1 where a>a*(((t1.d)) | a)-+case t1.a-t1.d-19+coalesce((select max(t1.c) from t1 where ~(select -cast(avg(a+d) AS integer) from t1) not in (~((17)),t1.a,(t1.e))),b)+a*(c) when t1.f then a else t1.f end-17+c),13)*t1.a FROM t1 WHERE t1.b*t1.d*11*case 13 | t1.a when (case when not exists(select 1 from t1 where t1.a not between coalesce((select f-a from t1 where a not in (11,d,(a)) or e<=d and 11=t1.e),d)+e and t1.c) and (t1.a>=t1.f or (t1.a)>=t1.a) then (c)+t1.d else a end)* -t1.a then e else t1.f end | 13>=b} +} {1300} +do_test randexpr-2.1757 { + db eval {SELECT coalesce((select max(a) from t1 where a>a*(((t1.d)) | a)-+case t1.a-t1.d-19+coalesce((select max(t1.c) from t1 where ~(select -cast(avg(a+d) AS integer) from t1) not in (~((17)),t1.a,(t1.e))),b)+a*(c) when t1.f then a else t1.f end-17+c),13)*t1.a FROM t1 WHERE NOT (t1.b*t1.d*11*case 13 | t1.a when (case when not exists(select 1 from t1 where t1.a not between coalesce((select f-a from t1 where a not in (11,d,(a)) or e<=d and 11=t1.e),d)+e and t1.c) and (t1.a>=t1.f or (t1.a)>=t1.a) then (c)+t1.d else a end)* -t1.a then e else t1.f end | 13>=b)} +} {} +do_test randexpr-2.1758 { + db eval {SELECT coalesce((select max(a) from t1 where a>a*(((t1.d)) & a)-+case t1.a-t1.d-19+coalesce((select max(t1.c) from t1 where ~(select -cast(avg(a+d) AS integer) from t1) not in (~((17)),t1.a,(t1.e))),b)+a*(c) when t1.f then a else t1.f end-17+c),13)*t1.a FROM t1 WHERE t1.b*t1.d*11*case 13 | t1.a when (case when not exists(select 1 from t1 where t1.a not between coalesce((select f-a from t1 where a not in (11,d,(a)) or e<=d and 11=t1.e),d)+e and t1.c) and (t1.a>=t1.f or (t1.a)>=t1.a) then (c)+t1.d else a end)* -t1.a then e else t1.f end | 13>=b} +} {10000} +do_test randexpr-2.1759 { + db eval {SELECT (abs(t1.f)/abs(11*19 | case when (e+t1.b*t1.d+(abs(case -13+t1.d*t1.a-+coalesce((select t1.d from t1 where t1.b not between b and d and d in (select c from t1 union select d from t1)),(19)) when d then 13 else d end*t1.e)/abs(d))+f not between e and t1.a) then ( -f) else d end*a)) FROM t1 WHERE a in (select abs(max(b))-count(distinct case when d not between coalesce((select max(13) from t1 where a+a<+(19)), -f*case (select -cast(avg(t1.c) AS integer)*cast(avg(c) AS integer)-cast(avg(13) AS integer) from t1)*coalesce((select d from t1 where (e=(f))),t1.d) when t1.a then 17 else 19 end) and d then t1.f when (19 between t1.e and -a) then t1.a else (t1.c) end) from t1 union select max(t1.e)+~min(t1.c) from t1)} +} {} +do_test randexpr-2.1760 { + db eval {SELECT (abs(t1.f)/abs(11*19 | case when (e+t1.b*t1.d+(abs(case -13+t1.d*t1.a-+coalesce((select t1.d from t1 where t1.b not between b and d and d in (select c from t1 union select d from t1)),(19)) when d then 13 else d end*t1.e)/abs(d))+f not between e and t1.a) then ( -f) else d end*a)) FROM t1 WHERE NOT (a in (select abs(max(b))-count(distinct case when d not between coalesce((select max(13) from t1 where a+a<+(19)), -f*case (select -cast(avg(t1.c) AS integer)*cast(avg(c) AS integer)-cast(avg(13) AS integer) from t1)*coalesce((select d from t1 where (e=(f))),t1.d) when t1.a then 17 else 19 end) and d then t1.f when (19 between t1.e and -a) then t1.a else (t1.c) end) from t1 union select max(t1.e)+~min(t1.c) from t1))} +} {0} +do_test randexpr-2.1761 { + db eval {SELECT (abs(t1.f)/abs(11*19 & case when (e+t1.b*t1.d+(abs(case -13+t1.d*t1.a-+coalesce((select t1.d from t1 where t1.b not between b and d and d in (select c from t1 union select d from t1)),(19)) when d then 13 else d end*t1.e)/abs(d))+f not between e and t1.a) then ( -f) else d end*a)) FROM t1 WHERE NOT (a in (select abs(max(b))-count(distinct case when d not between coalesce((select max(13) from t1 where a+a<+(19)), -f*case (select -cast(avg(t1.c) AS integer)*cast(avg(c) AS integer)-cast(avg(13) AS integer) from t1)*coalesce((select d from t1 where (e=(f))),t1.d) when t1.a then 17 else 19 end) and d then t1.f when (19 between t1.e and -a) then t1.a else (t1.c) end) from t1 union select max(t1.e)+~min(t1.c) from t1))} +} {4} +do_test randexpr-2.1762 { + db eval {SELECT +t1.d | (d)*t1.e | (select case abs((max(t1.e)-abs(+case min(a) | (~(min(~t1.c)))*(abs(abs(count(*))+count(distinct b)*count(distinct 17)))*max(c) when max(t1.c) then max(t1.d) else cast(avg(d) AS integer) end | max(t1.a))))*cast(avg(11) AS integer) when (cast(avg( -a) AS integer)) then min(19) else -count(distinct 13) end from t1)+t1.b FROM t1 WHERE not exists(select 1 from t1 where e13) or t1.a*t1.f- -a>=t1.d or d in (select -case cast(avg(b) AS integer) when max(19) then count(distinct -t1.f) else count(distinct f) end* -max(t1.d) from t1 union select -cast(avg(13) AS integer) from t1) and 11 between t1.f and d then case when t1.f not between b and 13 then f else t1.e end-t1.c when t1.d in (t1.e,b,d) then t1.f else -t1.d end-t1.d)*d*f>=a} +} {} +do_test randexpr-2.1766 { + db eval {SELECT coalesce((select max(e) from t1 where -(abs(~(t1.f+b))/abs(t1.f)) not in (~case 11+c*case when -t1.d not between coalesce((select max(d) from t1 where f- -t1.f+t1.f-13-e13) or t1.a*t1.f- -a>=t1.d or d in (select -case cast(avg(b) AS integer) when max(19) then count(distinct -t1.f) else count(distinct f) end* -max(t1.d) from t1 union select -cast(avg(13) AS integer) from t1) and 11 between t1.f and d then case when t1.f not between b and 13 then f else t1.e end-t1.c when t1.d in (t1.e,b,d) then t1.f else -t1.d end-t1.d)*d*f>=a)} +} {500} +do_test randexpr-2.1767 { + db eval {SELECT case when ~case when t1.a | case when exists(select 1 from t1 where 19 in (select (count(*))-count(*) from t1 union select -min(b) from t1) or t1.f not in (17,11,t1.c) and t1.d in (a,19,t1.a)) then ~c | 11 else 17 end* -19 in (select count(*) from t1 union select abs( - -case ~cast(avg(t1.b) AS integer) when min(b) then max(a) else (max( -d)) end | count(*)) from t1) then t1.a else t1.c end | b in (select f from t1 union select 13 from t1) then t1.b when t1.a<>(17) then c else t1.a end FROM t1 WHERE exists(select 1 from t1 where d<>13)} +} {300} +do_test randexpr-2.1768 { + db eval {SELECT case when ~case when t1.a | case when exists(select 1 from t1 where 19 in (select (count(*))-count(*) from t1 union select -min(b) from t1) or t1.f not in (17,11,t1.c) and t1.d in (a,19,t1.a)) then ~c | 11 else 17 end* -19 in (select count(*) from t1 union select abs( - -case ~cast(avg(t1.b) AS integer) when min(b) then max(a) else (max( -d)) end | count(*)) from t1) then t1.a else t1.c end | b in (select f from t1 union select 13 from t1) then t1.b when t1.a<>(17) then c else t1.a end FROM t1 WHERE NOT (exists(select 1 from t1 where d<>13))} +} {} +do_test randexpr-2.1769 { + db eval {SELECT case when ~case when t1.a & case when exists(select 1 from t1 where 19 in (select (count(*))-count(*) from t1 union select -min(b) from t1) or t1.f not in (17,11,t1.c) and t1.d in (a,19,t1.a)) then ~c & 11 else 17 end* -19 in (select count(*) from t1 union select abs( - -case ~cast(avg(t1.b) AS integer) when min(b) then max(a) else (max( -d)) end & count(*)) from t1) then t1.a else t1.c end & b in (select f from t1 union select 13 from t1) then t1.b when t1.a<>(17) then c else t1.a end FROM t1 WHERE exists(select 1 from t1 where d<>13)} +} {300} +do_test randexpr-2.1770 { + db eval {SELECT coalesce((select a+t1.d from t1 where t1.d+coalesce((select max(case when e not between 11 and case when (abs(f)/abs(t1.f+19*t1.b))>e then ~d when not not 17 in (13,17,t1.a) then f else t1.f end then 11 when b<17 then t1.b else f end) from t1 where exists(select 1 from t1 where (c<>c))), -f)+t1.c in (select (t1.e) from t1 union select c from t1)),c) FROM t1 WHERE not (t1.a)<=case a when t1.f then +coalesce((select max(c) from t1 where (abs(d-11)/abs(t1.a))+t1.f<=d*d*t1.a+coalesce((select 19 from t1 where (select cast(avg(f) AS integer) from t1)*t1.f+19+ -13 in ((t1.d),c,t1.a)),t1.c) or ((not exists(select 1 from t1 where 19 in (select e from t1 union select -11 from t1))))),c) else e end} +} {} +do_test randexpr-2.1771 { + db eval {SELECT coalesce((select a+t1.d from t1 where t1.d+coalesce((select max(case when e not between 11 and case when (abs(f)/abs(t1.f+19*t1.b))>e then ~d when not not 17 in (13,17,t1.a) then f else t1.f end then 11 when b<17 then t1.b else f end) from t1 where exists(select 1 from t1 where (c<>c))), -f)+t1.c in (select (t1.e) from t1 union select c from t1)),c) FROM t1 WHERE NOT (not (t1.a)<=case a when t1.f then +coalesce((select max(c) from t1 where (abs(d-11)/abs(t1.a))+t1.f<=d*d*t1.a+coalesce((select 19 from t1 where (select cast(avg(f) AS integer) from t1)*t1.f+19+ -13 in ((t1.d),c,t1.a)),t1.c) or ((not exists(select 1 from t1 where 19 in (select e from t1 union select -11 from t1))))),c) else e end)} +} {300} +do_test randexpr-2.1772 { + db eval {SELECT (abs( -c+b)/abs(case when ~11 in (select t1.c from t1 union select coalesce((select t1.f from t1 where coalesce((select max(17) from t1 where coalesce((select case -t1.d when t1.e then a else b end-(t1.d) from t1 where t1.a not between -a and t1.b or t1.f=t1.b) and (f)>= -13),t1.b) from t1) then t1.c when t1.b<=t1.f then 13 else t1.e end)) FROM t1 WHERE (d-coalesce((select max(case 11 when +13-17 | 13 | coalesce((select t1.b from t1 where ((abs(t1.b)/abs((abs(b)/abs(+(19)))*t1.c)) | a in ( -t1.c,t1.a,t1.b))),d)* -d then 13 else 11 end) from t1 where e not in (d,11,11) or t1.e>t1.e),17)+t1.a>b)} +} {7} +do_test randexpr-2.1773 { + db eval {SELECT (abs( -c+b)/abs(case when ~11 in (select t1.c from t1 union select coalesce((select t1.f from t1 where coalesce((select max(17) from t1 where coalesce((select case -t1.d when t1.e then a else b end-(t1.d) from t1 where t1.a not between -a and t1.b or t1.f=t1.b) and (f)>= -13),t1.b) from t1) then t1.c when t1.b<=t1.f then 13 else t1.e end)) FROM t1 WHERE NOT ((d-coalesce((select max(case 11 when +13-17 | 13 | coalesce((select t1.b from t1 where ((abs(t1.b)/abs((abs(b)/abs(+(19)))*t1.c)) | a in ( -t1.c,t1.a,t1.b))),d)* -d then 13 else 11 end) from t1 where e not in (d,11,11) or t1.e>t1.e),17)+t1.a>b))} +} {} +do_test randexpr-2.1774 { + db eval {SELECT coalesce((select max(17) from t1 where (e* -t1.f*e*c-coalesce((select max(t1.f) from t1 where t1.c between case when e+~ -t1.e+11 in (select c from t1 union select t1.a from t1) and t1.d>=c then 19 else ( - -(t1.a)) end*c+f and t1.b),13) not in (t1.b,e,t1.c) and (11) not in (b,( -t1.b),13))),19) FROM t1 WHERE d>e+f} +} {} +do_test randexpr-2.1775 { + db eval {SELECT coalesce((select max(17) from t1 where (e* -t1.f*e*c-coalesce((select max(t1.f) from t1 where t1.c between case when e+~ -t1.e+11 in (select c from t1 union select t1.a from t1) and t1.d>=c then 19 else ( - -(t1.a)) end*c+f and t1.b),13) not in (t1.b,e,t1.c) and (11) not in (b,( -t1.b),13))),19) FROM t1 WHERE NOT (d>e+f)} +} {17} +do_test randexpr-2.1776 { + db eval {SELECT case when t1.b in (select case 13-(select case count(*) when count(distinct (abs(19)/abs(t1.d)))*~count(distinct 13)*(max((abs(t1.e*d*f)/abs(17)))) then -case - -count(distinct e) when min(19) then (cast(avg(17) AS integer)) else count(distinct t1.a) end*max(t1.b)-cast(avg(a) AS integer) else count(distinct t1.d) end from t1)+t1.b*d when t1.c+t1.a then f else t1.f end from t1 union select t1.c from t1) then t1.a when a in (select 11 from t1 union select a from t1) then a else e end FROM t1 WHERE (case when case 11+case f when e then d else (abs(e)/abs(19))*a end when 19 then b else d end= -19 then 13 else t1.e end between 17 and t1.a or b>=17 and ((( -t1.f))>=t1.c and 17 between t1.c and 19 and t1.c between t1.d and a or t1.d=17) or c in (b,13,17))} +} {} +do_test randexpr-2.1777 { + db eval {SELECT case when t1.b in (select case 13-(select case count(*) when count(distinct (abs(19)/abs(t1.d)))*~count(distinct 13)*(max((abs(t1.e*d*f)/abs(17)))) then -case - -count(distinct e) when min(19) then (cast(avg(17) AS integer)) else count(distinct t1.a) end*max(t1.b)-cast(avg(a) AS integer) else count(distinct t1.d) end from t1)+t1.b*d when t1.c+t1.a then f else t1.f end from t1 union select t1.c from t1) then t1.a when a in (select 11 from t1 union select a from t1) then a else e end FROM t1 WHERE NOT ((case when case 11+case f when e then d else (abs(e)/abs(19))*a end when 19 then b else d end= -19 then 13 else t1.e end between 17 and t1.a or b>=17 and ((( -t1.f))>=t1.c and 17 between t1.c and 19 and t1.c between t1.d and a or t1.d=17) or c in (b,13,17)))} +} {100} +do_test randexpr-2.1778 { + db eval {SELECT case when coalesce((select case when (not exists(select 1 from t1 where t1.f<>t1.f)) then case when d-t1.e*t1.d=((select (count(distinct t1.a)) from t1)) then t1.a when 13 not in ( -b,13,19) then t1.b else e end when t1.e=13 then e else 17 end-17*b from t1 where (t1.d in (d,t1.e,t1.f))),( -f)) not in (d,t1.e,c) then t1.d else -t1.f end* -t1.b FROM t1 WHERE not exists(select 1 from t1 where 11 in (select max(coalesce((select max(case when e+coalesce((select max(d) from t1 where not at1.f)) then case when d-t1.e*t1.d=((select (count(distinct t1.a)) from t1)) then t1.a when 13 not in ( -b,13,19) then t1.b else e end when t1.e=13 then e else 17 end-17*b from t1 where (t1.d in (d,t1.e,t1.f))),( -f)) not in (d,t1.e,c) then t1.d else -t1.f end* -t1.b FROM t1 WHERE NOT (not exists(select 1 from t1 where 11 in (select max(coalesce((select max(case when e+coalesce((select max(d) from t1 where not ab or not t1.f in (11,t1.a,t1.e)),t1.a) | e then c else e end from t1 where -t1.a<11),13))/abs(t1.b))-t1.b*17 from t1 where c=t1.c),17) FROM t1 WHERE t1.e+e>=coalesce((select max(t1.d) from t1 where +(t1.f) not between c and f),case when exists(select 1 from t1 where t1.e+t1.c<>(abs(e*(select -count(*) from t1))/abs(t1.e*coalesce((select max((select cast(avg(13) AS integer) from t1)) from t1 where not (a) not between -c and -17), -d)+ -a+t1.f))) then case when e between 17 and d then a else 19 end when not c between e and 11 then 13 else t1.a end)} +} {-3051} +do_test randexpr-2.1781 { + db eval {SELECT coalesce((select (abs(~c*t1.a+t1.b*coalesce((select case c when b+coalesce((select t1.b-case e when 19 then b else b end*11 from t1 where t1.b<>b or not t1.f in (11,t1.a,t1.e)),t1.a) | e then c else e end from t1 where -t1.a<11),13))/abs(t1.b))-t1.b*17 from t1 where c=t1.c),17) FROM t1 WHERE NOT (t1.e+e>=coalesce((select max(t1.d) from t1 where +(t1.f) not between c and f),case when exists(select 1 from t1 where t1.e+t1.c<>(abs(e*(select -count(*) from t1))/abs(t1.e*coalesce((select max((select cast(avg(13) AS integer) from t1)) from t1 where not (a) not between -c and -17), -d)+ -a+t1.f))) then case when e between 17 and d then a else 19 end when not c between e and 11 then 13 else t1.a end))} +} {} +do_test randexpr-2.1782 { + db eval {SELECT coalesce((select (abs(~c*t1.a+t1.b*coalesce((select case c when b+coalesce((select t1.b-case e when 19 then b else b end*11 from t1 where t1.b<>b or not t1.f in (11,t1.a,t1.e)),t1.a) & e then c else e end from t1 where -t1.a<11),13))/abs(t1.b))-t1.b*17 from t1 where c=t1.c),17) FROM t1 WHERE t1.e+e>=coalesce((select max(t1.d) from t1 where +(t1.f) not between c and f),case when exists(select 1 from t1 where t1.e+t1.c<>(abs(e*(select -count(*) from t1))/abs(t1.e*coalesce((select max((select cast(avg(13) AS integer) from t1)) from t1 where not (a) not between -c and -17), -d)+ -a+t1.f))) then case when e between 17 and d then a else 19 end when not c between e and 11 then 13 else t1.a end)} +} {-3051} +do_test randexpr-2.1783 { + db eval {SELECT t1.a-~17-t1.c+t1.c | (c) | (13)*coalesce((select max((case when 11 in (case when (19 in (select abs(max(c)) from t1 union select ( -cast(avg(19) AS integer)) from t1)) then 13 when t1.a= -t1.a then t1.b else c end,a,13) then t1.e when t1.b in (t1.a,13, -19) then d else t1.a end)) from t1 where t1.d between t1.d and t1.b and t1.d<=17),t1.d)*11 | t1.e FROM t1 WHERE e in (select 11 from t1 union select a from t1)} +} {} +do_test randexpr-2.1784 { + db eval {SELECT t1.a-~17-t1.c+t1.c | (c) | (13)*coalesce((select max((case when 11 in (case when (19 in (select abs(max(c)) from t1 union select ( -cast(avg(19) AS integer)) from t1)) then 13 when t1.a= -t1.a then t1.b else c end,a,13) then t1.e when t1.b in (t1.a,13, -19) then d else t1.a end)) from t1 where t1.d between t1.d and t1.b and t1.d<=17),t1.d)*11 | t1.e FROM t1 WHERE NOT (e in (select 11 from t1 union select a from t1))} +} {57342} +do_test randexpr-2.1785 { + db eval {SELECT t1.a-~17-t1.c+t1.c & (c) & (13)*coalesce((select max((case when 11 in (case when (19 in (select abs(max(c)) from t1 union select ( -cast(avg(19) AS integer)) from t1)) then 13 when t1.a= -t1.a then t1.b else c end,a,13) then t1.e when t1.b in (t1.a,13, -19) then d else t1.a end)) from t1 where t1.d between t1.d and t1.b and t1.d<=17),t1.d)*11 & t1.e FROM t1 WHERE NOT (e in (select 11 from t1 union select a from t1))} +} {32} +do_test randexpr-2.1786 { + db eval {SELECT (abs(case when 11 not between t1.d and case coalesce((select case c+case when b in (case when a<=t1.b then (t1.a) when f not in (d,t1.f, -(a)) then (t1.a) else c end,t1.a,b) then 11 when (t1.c not between 17 and -d) then t1.d else e end+e*t1.a when 19 then b else t1.e end from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where -(19)=d))),t1.d) when c then a else -17 end or a>=19 then 17 else b end+ -13)/abs(b)) FROM t1 WHERE not exists(select 1 from t1 where not e+case 13 when 11 then +t1.d else t1.f+11 end*a-19+t1.c+a in (~e | coalesce((select ~a*+(abs( -case case b*e when f then e else -13 end when t1.d then t1.b else t1.b end)/abs(17))+t1.b from t1 where t1.a=e),d),e,e))} +} {} +do_test randexpr-2.1787 { + db eval {SELECT (abs(case when 11 not between t1.d and case coalesce((select case c+case when b in (case when a<=t1.b then (t1.a) when f not in (d,t1.f, -(a)) then (t1.a) else c end,t1.a,b) then 11 when (t1.c not between 17 and -d) then t1.d else e end+e*t1.a when 19 then b else t1.e end from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where -(19)=d))),t1.d) when c then a else -17 end or a>=19 then 17 else b end+ -13)/abs(b)) FROM t1 WHERE NOT (not exists(select 1 from t1 where not e+case 13 when 11 then +t1.d else t1.f+11 end*a-19+t1.c+a in (~e | coalesce((select ~a*+(abs( -case case b*e when f then e else -13 end when t1.d then t1.b else t1.b end)/abs(17))+t1.b from t1 where t1.a=e),d),e,e)))} +} {0} +do_test randexpr-2.1788 { + db eval {SELECT t1.c-coalesce((select t1.e from t1 where not 17 | (select abs(min((t1.c) | +case 19 when d then f else t1.c end*(13)+a)- -count(*)) from t1)>t1.c and (exists(select 1 from t1 where c in (select f from t1 union select a from t1))) or t1.e>t1.c),coalesce((select e*(e) from t1 where not exists(select 1 from t1 where b not in (f,a,19))),c)) FROM t1 WHERE t1.f*t1.e*b+coalesce((select case when 17 not in (case e++t1.d+19*11 when case when 11 in (select -min(t1.a)*min(t1.e)+cast(avg(t1.a) AS integer) from t1 union select -count(*) from t1) then 13 else 11 end*17 then a else t1.a end,(t1.b),t1.d) then t1.f else -b end from t1 where t1.b not between 17 and (a)),d)-t1.d-c*bt1.c and (exists(select 1 from t1 where c in (select f from t1 union select a from t1))) or t1.e>t1.c),coalesce((select e*(e) from t1 where not exists(select 1 from t1 where b not in (f,a,19))),c)) FROM t1 WHERE NOT (t1.f*t1.e*b+coalesce((select case when 17 not in (case e++t1.d+19*11 when case when 11 in (select -min(t1.a)*min(t1.e)+cast(avg(t1.a) AS integer) from t1 union select -count(*) from t1) then 13 else 11 end*17 then a else t1.a end,(t1.b),t1.d) then t1.f else -b end from t1 where t1.b not between 17 and (a)),d)-t1.d-c*bt1.c and (exists(select 1 from t1 where c in (select f from t1 union select a from t1))) or t1.e>t1.c),coalesce((select e*(e) from t1 where not exists(select 1 from t1 where b not in (f,a,19))),c)) FROM t1 WHERE NOT (t1.f*t1.e*b+coalesce((select case when 17 not in (case e++t1.d+19*11 when case when 11 in (select -min(t1.a)*min(t1.e)+cast(avg(t1.a) AS integer) from t1 union select -count(*) from t1) then 13 else 11 end*17 then a else t1.a end,(t1.b),t1.d) then t1.f else -b end from t1 where t1.b not between 17 and (a)),d)-t1.d-c*b= -b+t1.f | 19 then t1.d else 11 end else t1.a end-t1.e else d end not between f and t1.f) then c else e end) from t1 where f>=t1.b and t1.c between t1.f and t1.a),e)*d*b FROM t1 WHERE t1.b in (select 11-case when (t1.a+19<=case when c>=(19) then 11 else t1.e+t1.a*c end*e and t1.f in (select case abs(+abs( -count(*) | count(*)*count(distinct c)+max(13))) when min(d) then min(b) else min(t1.b) end | cast(avg(t1.e) AS integer) from t1 union select count(distinct t1.e) from t1)) then t1.a else t1.f end*17 from t1 union select t1.a from t1)} +} {} +do_test randexpr-2.1792 { + db eval {SELECT c-coalesce((select max(case when (case t1.a when a then case f when e*e then ~c | case when +c>= -b+t1.f | 19 then t1.d else 11 end else t1.a end-t1.e else d end not between f and t1.f) then c else e end) from t1 where f>=t1.b and t1.c between t1.f and t1.a),e)*d*b FROM t1 WHERE NOT (t1.b in (select 11-case when (t1.a+19<=case when c>=(19) then 11 else t1.e+t1.a*c end*e and t1.f in (select case abs(+abs( -count(*) | count(*)*count(distinct c)+max(13))) when min(d) then min(b) else min(t1.b) end | cast(avg(t1.e) AS integer) from t1 union select count(distinct t1.e) from t1)) then t1.a else t1.f end*17 from t1 union select t1.a from t1))} +} {-39999700} +do_test randexpr-2.1793 { + db eval {SELECT c-coalesce((select max(case when (case t1.a when a then case f when e*e then ~c & case when +c>= -b+t1.f & 19 then t1.d else 11 end else t1.a end-t1.e else d end not between f and t1.f) then c else e end) from t1 where f>=t1.b and t1.c between t1.f and t1.a),e)*d*b FROM t1 WHERE NOT (t1.b in (select 11-case when (t1.a+19<=case when c>=(19) then 11 else t1.e+t1.a*c end*e and t1.f in (select case abs(+abs( -count(*) | count(*)*count(distinct c)+max(13))) when min(d) then min(b) else min(t1.b) end | cast(avg(t1.e) AS integer) from t1 union select count(distinct t1.e) from t1)) then t1.a else t1.f end*17 from t1 union select t1.a from t1))} +} {-39999700} +do_test randexpr-2.1794 { + db eval {SELECT coalesce((select f from t1 where ~coalesce((select max(b-e) from t1 where 13 in (select d from t1 union select d from t1)),(abs(t1.f)/abs(t1.f)))-t1.d+case a when t1.b then 13 else case when 19>case a when t1.a then 13 else -d end or e<>c then coalesce((select max(a) from t1 where t1.e not between (13) and t1.d),11) when b in (select t1.a from t1 union select t1.f from t1) then 19 else a end end-b>11),17) FROM t1 WHERE b in (select e-17 from t1 union select d from t1)} +} {} +do_test randexpr-2.1795 { + db eval {SELECT coalesce((select f from t1 where ~coalesce((select max(b-e) from t1 where 13 in (select d from t1 union select d from t1)),(abs(t1.f)/abs(t1.f)))-t1.d+case a when t1.b then 13 else case when 19>case a when t1.a then 13 else -d end or e<>c then coalesce((select max(a) from t1 where t1.e not between (13) and t1.d),11) when b in (select t1.a from t1 union select t1.f from t1) then 19 else a end end-b>11),17) FROM t1 WHERE NOT (b in (select e-17 from t1 union select d from t1))} +} {17} +do_test randexpr-2.1796 { + db eval {SELECT coalesce((select case case when 13<>((abs(t1.b)/abs(f+t1.c))) then 11 else coalesce((select max(b) from t1 where (b=(abs((select min((select case -min(11) when count(distinct d) then count(*) else min(t1.c) end from t1)) from t1))/abs(t1.f))-c)), -19)*e end*d* -e+t1.d when 13 then c else c end+b from t1 where d>13 or (b between t1.e and 19)),a) FROM t1 WHERE not exists(select 1 from t1 where ((not not exists(select 1 from t1 where ((case when coalesce((select max(19) from t1 where case when b>=e and f between t1.d and t1.c or ( -t1.b)=t1.f then b+a else 11 end in (d,t1.d,(17))),t1.d)>t1.b then (t1.f) else d end*e*t1.e<=t1.b)) or 13<>13)) or 17((abs(t1.b)/abs(f+t1.c))) then 11 else coalesce((select max(b) from t1 where (b=(abs((select min((select case -min(11) when count(distinct d) then count(*) else min(t1.c) end from t1)) from t1))/abs(t1.f))-c)), -19)*e end*d* -e+t1.d when 13 then c else c end+b from t1 where d>13 or (b between t1.e and 19)),a) FROM t1 WHERE NOT (not exists(select 1 from t1 where ((not not exists(select 1 from t1 where ((case when coalesce((select max(19) from t1 where case when b>=e and f between t1.d and t1.c or ( -t1.b)=t1.f then b+a else 11 end in (d,t1.d,(17))),t1.d)>t1.b then (t1.f) else d end*e*t1.e<=t1.b)) or 13<>13)) or 17t1.d then e+t1.c when 19 between t1.f and t1.f then 11 else e end between t1.e and 17) and c=t1.f),t1.c) not between t1.a and (19) then c else t1.b end*a-19 and b then b when a not in (a,t1.f,11) then a else a end FROM t1 WHERE (select abs((+min(t1.c)*min(t1.d)*count(*)))*count(*)++count(distinct ~13*11+t1.f) | case abs(+count(*)) when min(t1.e) then count(*) else max(13) end from t1)*t1.b between d and a++19 and t1.c in (select ~((abs(17)/abs(c)))-d from t1 union select t1.f from t1)} +} {} +do_test randexpr-2.1799 { + db eval {SELECT ~case when 19 not between ~case when coalesce((select max(t1.c*(select cast(avg(13) AS integer) from t1)) from t1 where exists(select 1 from t1 where case when (t1.b in (d,b,t1.c)) and 13>t1.d then e+t1.c when 19 between t1.f and t1.f then 11 else e end between t1.e and 17) and c=t1.f),t1.c) not between t1.a and (19) then c else t1.b end*a-19 and b then b when a not in (a,t1.f,11) then a else a end FROM t1 WHERE NOT ((select abs((+min(t1.c)*min(t1.d)*count(*)))*count(*)++count(distinct ~13*11+t1.f) | case abs(+count(*)) when min(t1.e) then count(*) else max(13) end from t1)*t1.b between d and a++19 and t1.c in (select ~((abs(17)/abs(c)))-d from t1 union select t1.f from t1))} +} {-101} +do_test randexpr-2.1800 { + db eval {SELECT (t1.d)+(abs(coalesce((select ((abs(case t1.e when a+(abs(11)/abs(b)) then t1.f else case coalesce((select max(d+c) from t1 where +19+11t1.c),d))/abs(t1.d))-11 | (17) | b FROM t1 WHERE (select ~min(b)+count(distinct 17) from t1)>(select max((abs((13))/abs(coalesce((select case when b*13 in (case when e=t1.b then c when 11<>e then f else t1.b end,t1.f,t1.a) then d when not exists(select 1 from t1 where t1.f<>e) then t1.a else c end from t1 where t1.e=13),t1.f) | b)))+(~~cast(avg(f) AS integer)-min((13))) from t1)} +} {} +do_test randexpr-2.1801 { + db eval {SELECT (t1.d)+(abs(coalesce((select ((abs(case t1.e when a+(abs(11)/abs(b)) then t1.f else case coalesce((select max(d+c) from t1 where +19+11t1.c),d))/abs(t1.d))-11 | (17) | b FROM t1 WHERE NOT ((select ~min(b)+count(distinct 17) from t1)>(select max((abs((13))/abs(coalesce((select case when b*13 in (case when e=t1.b then c when 11<>e then f else t1.b end,t1.f,t1.a) then d when not exists(select 1 from t1 where t1.f<>e) then t1.a else c end from t1 where t1.e=13),t1.f) | b)))+(~~cast(avg(f) AS integer)-min((13))) from t1))} +} {477} +do_test randexpr-2.1802 { + db eval {SELECT (t1.d)+(abs(coalesce((select ((abs(case t1.e when a+(abs(11)/abs(b)) then t1.f else case coalesce((select max(d+c) from t1 where +19+11t1.c),d))/abs(t1.d))-11 & (17) & b FROM t1 WHERE NOT ((select ~min(b)+count(distinct 17) from t1)>(select max((abs((13))/abs(coalesce((select case when b*13 in (case when e=t1.b then c when 11<>e then f else t1.b end,t1.f,t1.a) then d when not exists(select 1 from t1 where t1.f<>e) then t1.a else c end from t1 where t1.e=13),t1.f) | b)))+(~~cast(avg(f) AS integer)-min((13))) from t1))} +} {0} +do_test randexpr-2.1803 { + db eval {SELECT d+case coalesce((select t1.f from t1 where coalesce((select max((11)) from t1 where (coalesce((select d from t1 where ((case b when ( -a) then 19 else e end)) not in ( -d,t1.c,13)),b))>t1.b and ((c in (select ~~cast(avg(t1.b) AS integer)+ -(max(t1.e)) from t1 union select max(c) from t1) and (e) in (select -count(distinct 17) from t1 union select count(*) from t1) or 11<>t1.c))),t1.b) not between (f) and 11),t1.d) when (19) then e else 13 end-t1.f FROM t1 WHERE exists(select 1 from t1 where (t1.a between case when e | b*c-19 | at1.b and ((c in (select ~~cast(avg(t1.b) AS integer)+ -(max(t1.e)) from t1 union select max(c) from t1) and (e) in (select -count(distinct 17) from t1 union select count(*) from t1) or 11<>t1.c))),t1.b) not between (f) and 11),t1.d) when (19) then e else 13 end-t1.f FROM t1 WHERE NOT (exists(select 1 from t1 where (t1.a between case when e | b*c-19 | a=f or (select -count(*) from t1) in (select t1.c from t1 union select 17 from t1)),b)*e not between case coalesce((select max(~case when 17>t1.e then d else 13 end) from t1 where not t1.c<=17),t1.c) when 13 then c else t1.e end+t1.c-t1.e and t1.e),t1.e) FROM t1 WHERE case ~case when not exists(select 1 from t1 where f<=~t1.d) then (abs(coalesce((select ~19 from t1 where d+t1.f=b),d))/abs(e)) when d in (select +min(11) from t1 union select ( -abs(count(distinct e)))+count(distinct t1.a) from t1) then e else t1.d end when -d then t1.e else t1.b end=t1.f or t1.d not in (t1.c,t1.e,t1.e) and t1.c not in (17,t1.a,e)} +} {300} +do_test randexpr-2.1806 { + db eval {SELECT coalesce((select c from t1 where - -19-coalesce((select t1.e from t1 where not f<=b-17 or not -13>=f or (select -count(*) from t1) in (select t1.c from t1 union select 17 from t1)),b)*e not between case coalesce((select max(~case when 17>t1.e then d else 13 end) from t1 where not t1.c<=17),t1.c) when 13 then c else t1.e end+t1.c-t1.e and t1.e),t1.e) FROM t1 WHERE NOT (case ~case when not exists(select 1 from t1 where f<=~t1.d) then (abs(coalesce((select ~19 from t1 where d+t1.f=b),d))/abs(e)) when d in (select +min(11) from t1 union select ( -abs(count(distinct e)))+count(distinct t1.a) from t1) then e else t1.d end when -d then t1.e else t1.b end=t1.f or t1.d not in (t1.c,t1.e,t1.e) and t1.c not in (17,t1.a,e))} +} {} +do_test randexpr-2.1807 { + db eval {SELECT case when t1.e<=~d then case when t1.d-e in (~t1.c-case e*case when t1.e not in (t1.d,19,f) then e else b end*e when (19) then t1.a else -e end,19,17) then t1.f when t1.d not in (t1.b,b,17) then t1.b else 19 end when 13= -t1.f and not (19=t1.d) then t1.f else (d) end FROM t1 WHERE 19<>d} +} {400} +do_test randexpr-2.1808 { + db eval {SELECT case when t1.e<=~d then case when t1.d-e in (~t1.c-case e*case when t1.e not in (t1.d,19,f) then e else b end*e when (19) then t1.a else -e end,19,17) then t1.f when t1.d not in (t1.b,b,17) then t1.b else 19 end when 13= -t1.f and not (19=t1.d) then t1.f else (d) end FROM t1 WHERE NOT (19<>d)} +} {} +do_test randexpr-2.1809 { + db eval {SELECT (select max(case when (t1.d<=t1.a*t1.b) then t1.e else case when t1.a> -t1.c*t1.f then b else b*(a) end end)*min(+ -19-(select min(a) from t1)-+17*case when not exists(select 1 from t1 where (select abs(cast(avg(a) AS integer)) from t1)=t1.a) then e when 19>=a then 13 else t1.a end*19-e) from t1) FROM t1 WHERE t1.e in (select max(t1.e)-cast(avg(e) AS integer) from t1 union select (~+count(*) | count(*) | cast(avg((abs(coalesce((select max(case f+c-17 when t1.b then t1.e else t1.d end) from t1 where 17 in (select a from t1 union select t1.b from t1)),a))/abs( -t1.e))) AS integer)+~case count(*) when cast(avg(t1.d) AS integer) then -cast(avg(t1.e) AS integer) else count(distinct d) end-max(13)+ -min(t1.a) | min((17)) | count(distinct d))-min(e) from t1)} +} {} +do_test randexpr-2.1810 { + db eval {SELECT (select max(case when (t1.d<=t1.a*t1.b) then t1.e else case when t1.a> -t1.c*t1.f then b else b*(a) end end)*min(+ -19-(select min(a) from t1)-+17*case when not exists(select 1 from t1 where (select abs(cast(avg(a) AS integer)) from t1)=t1.a) then e when 19>=a then 13 else t1.a end*19-e) from t1) FROM t1 WHERE NOT (t1.e in (select max(t1.e)-cast(avg(e) AS integer) from t1 union select (~+count(*) | count(*) | cast(avg((abs(coalesce((select max(case f+c-17 when t1.b then t1.e else t1.d end) from t1 where 17 in (select a from t1 union select t1.b from t1)),a))/abs( -t1.e))) AS integer)+~case count(*) when cast(avg(t1.d) AS integer) then -cast(avg(t1.e) AS integer) else count(distinct d) end-max(13)+ -min(t1.a) | min((17)) | count(distinct d))-min(e) from t1))} +} {-16459500} +do_test randexpr-2.1811 { + db eval {SELECT case when case (select max(t1.b)*min(case when (abs(t1.e)/abs(d)) in (select 19 from t1 union select +t1.d from t1) then t1.d when t1.d=d or 11 in (t1.c,b,t1.b) and 13 in (select max(b)-count(distinct b) from t1 union select count(distinct -t1.c) from t1) then f else 17 end) from t1) when t1.a then 11 else t1.d end+t1.a>c or t1.c between 13 and a then f when (t1.b)>=t1.a then d else b end FROM t1 WHERE (not exists(select 1 from t1 where coalesce((select max(e) from t1 where -b+a-(abs(c)/abs(b))-( -t1.a)-t1.c>=d),d) in (select count(distinct a) from t1 union select cast(avg(t1.b) AS integer) from t1) and exists(select 1 from t1 where not t1.d<>f) and t1.a in (select min(t1.a)+max(a)*max(19)*count(distinct c) from t1 union select ((count(distinct 11))) from t1) or t1.c>(13) and a between (t1.f) and 13))} +} {600} +do_test randexpr-2.1812 { + db eval {SELECT case when case (select max(t1.b)*min(case when (abs(t1.e)/abs(d)) in (select 19 from t1 union select +t1.d from t1) then t1.d when t1.d=d or 11 in (t1.c,b,t1.b) and 13 in (select max(b)-count(distinct b) from t1 union select count(distinct -t1.c) from t1) then f else 17 end) from t1) when t1.a then 11 else t1.d end+t1.a>c or t1.c between 13 and a then f when (t1.b)>=t1.a then d else b end FROM t1 WHERE NOT ((not exists(select 1 from t1 where coalesce((select max(e) from t1 where -b+a-(abs(c)/abs(b))-( -t1.a)-t1.c>=d),d) in (select count(distinct a) from t1 union select cast(avg(t1.b) AS integer) from t1) and exists(select 1 from t1 where not t1.d<>f) and t1.a in (select min(t1.a)+max(a)*max(19)*count(distinct c) from t1 union select ((count(distinct 11))) from t1) or t1.c>(13) and a between (t1.f) and 13)))} +} {} +do_test randexpr-2.1813 { + db eval {SELECT f*~13*(abs(13*d- -(abs(case when ~coalesce((select max(c) from t1 where b>case when not exists(select 1 from t1 where f not in ( - -(t1.b),t1.b,t1.f)) then e when t1.c<>f then c else 13 end),17) | -19 not in (f,b,f) then -b when not 13 in (select max((19)) from t1 union select count(*) | count(*) from t1) and 11 not between t1.b and e or 13 in (t1.a, -e,(t1.c)) then a else 11 end)/abs(t1.c))*b | t1.b)/abs(f))*f*f FROM t1 WHERE ~case f when coalesce((select max(17) from t1 where b+t1.f*case 11 when coalesce((select max(t1.f) from t1 where 17>t1.f),19)-t1.b-t1.b | d then b else c end+t1.d*11-t1.c<=case when b>=a then 19 else f end | t1.a),13) then f else t1.e end*t1.e*17-d in (select 13 from t1 union select t1.e from t1)} +} {} +do_test randexpr-2.1814 { + db eval {SELECT f*~13*(abs(13*d- -(abs(case when ~coalesce((select max(c) from t1 where b>case when not exists(select 1 from t1 where f not in ( - -(t1.b),t1.b,t1.f)) then e when t1.c<>f then c else 13 end),17) | -19 not in (f,b,f) then -b when not 13 in (select max((19)) from t1 union select count(*) | count(*) from t1) and 11 not between t1.b and e or 13 in (t1.a, -e,(t1.c)) then a else 11 end)/abs(t1.c))*b | t1.b)/abs(f))*f*f FROM t1 WHERE NOT (~case f when coalesce((select max(17) from t1 where b+t1.f*case 11 when coalesce((select max(t1.f) from t1 where 17>t1.f),19)-t1.b-t1.b | d then b else c end+t1.d*11-t1.c<=case when b>=a then 19 else f end | t1.a),13) then f else t1.e end*t1.e*17-d in (select 13 from t1 union select t1.e from t1))} +} {-24192000000} +do_test randexpr-2.1815 { + db eval {SELECT f*~13*(abs(13*d- -(abs(case when ~coalesce((select max(c) from t1 where b>case when not exists(select 1 from t1 where f not in ( - -(t1.b),t1.b,t1.f)) then e when t1.c<>f then c else 13 end),17) & -19 not in (f,b,f) then -b when not 13 in (select max((19)) from t1 union select count(*) & count(*) from t1) and 11 not between t1.b and e or 13 in (t1.a, -e,(t1.c)) then a else 11 end)/abs(t1.c))*b & t1.b)/abs(f))*f*f FROM t1 WHERE NOT (~case f when coalesce((select max(17) from t1 where b+t1.f*case 11 when coalesce((select max(t1.f) from t1 where 17>t1.f),19)-t1.b-t1.b | d then b else c end+t1.d*11-t1.c<=case when b>=a then 19 else f end | t1.a),13) then f else t1.e end*t1.e*17-d in (select 13 from t1 union select t1.e from t1))} +} {0} +do_test randexpr-2.1816 { + db eval {SELECT t1.b | (select case max( -17)-min(17) when cast(avg(a) AS integer)*(+cast(avg(f) AS integer)-cast(avg(case when not exists(select 1 from t1 where not case when t1.c<>t1.b or -d not between b and a then 11 when (11)=11 then b else t1.b end in (t1.d,t1.c,13)) then 13 when t1.e not between 17 and t1.b then t1.c else t1.d end*t1.b) AS integer))-cast(avg(19) AS integer)*~max(t1.c)-((max(13)))*(count(*))+(max(11)) then count(distinct 13) else cast(avg(17) AS integer) end from t1) FROM t1 WHERE (abs(t1.c)/abs(case t1.d when t1.f then 13 else t1.f+13 end)) not between c and 19} +} {217} +do_test randexpr-2.1817 { + db eval {SELECT t1.b | (select case max( -17)-min(17) when cast(avg(a) AS integer)*(+cast(avg(f) AS integer)-cast(avg(case when not exists(select 1 from t1 where not case when t1.c<>t1.b or -d not between b and a then 11 when (11)=11 then b else t1.b end in (t1.d,t1.c,13)) then 13 when t1.e not between 17 and t1.b then t1.c else t1.d end*t1.b) AS integer))-cast(avg(19) AS integer)*~max(t1.c)-((max(13)))*(count(*))+(max(11)) then count(distinct 13) else cast(avg(17) AS integer) end from t1) FROM t1 WHERE NOT ((abs(t1.c)/abs(case t1.d when t1.f then 13 else t1.f+13 end)) not between c and 19)} +} {} +do_test randexpr-2.1818 { + db eval {SELECT t1.b & (select case max( -17)-min(17) when cast(avg(a) AS integer)*(+cast(avg(f) AS integer)-cast(avg(case when not exists(select 1 from t1 where not case when t1.c<>t1.b or -d not between b and a then 11 when (11)=11 then b else t1.b end in (t1.d,t1.c,13)) then 13 when t1.e not between 17 and t1.b then t1.c else t1.d end*t1.b) AS integer))-cast(avg(19) AS integer)*~max(t1.c)-((max(13)))*(count(*))+(max(11)) then count(distinct 13) else cast(avg(17) AS integer) end from t1) FROM t1 WHERE (abs(t1.c)/abs(case t1.d when t1.f then 13 else t1.f+13 end)) not between c and 19} +} {0} +do_test randexpr-2.1819 { + db eval {SELECT case +c-case (select case count(distinct 13) when min(19*e) then ~count(*) else ~min(case when not +13*(case when e*c>=e then e else f end)-a+c not in (b,t1.b,t1.e) then 19 else 17 end*a) end from t1) when 11 then 11 else c end+t1.c when t1.b then d else 19 end FROM t1 WHERE t1.a<>~coalesce((select max(e) from t1 where 13-17*13<>19*17-f or coalesce((select max(~case +t1.d-f*e | c+(17)+a | c*((b)) when a then t1.a else d end-d) from t1 where t1.c>= -t1.a),t1.b) | 19>t1.a),c)} +} {19} +do_test randexpr-2.1820 { + db eval {SELECT case +c-case (select case count(distinct 13) when min(19*e) then ~count(*) else ~min(case when not +13*(case when e*c>=e then e else f end)-a+c not in (b,t1.b,t1.e) then 19 else 17 end*a) end from t1) when 11 then 11 else c end+t1.c when t1.b then d else 19 end FROM t1 WHERE NOT (t1.a<>~coalesce((select max(e) from t1 where 13-17*13<>19*17-f or coalesce((select max(~case +t1.d-f*e | c+(17)+a | c*((b)) when a then t1.a else d end-d) from t1 where t1.c>= -t1.a),t1.b) | 19>t1.a),c))} +} {} +do_test randexpr-2.1821 { + db eval {SELECT coalesce((select max(t1.d) from t1 where f between case when (19 not in (t1.b,(a),t1.a)) or t1.a=t1.b | -(select +cast(avg(17-case when -c<=17 then a when -11 not in (11,19,a) then e else 11 end-t1.a*d) AS integer) from t1) then t1.b when (11)*f in (13,t1.a,a) or t1.a=e),f)*b-f and t1.f),t1.b) else d end+19 when t1.a then 11 else e end-t1.a)} +} {} +do_test randexpr-2.1822 { + db eval {SELECT coalesce((select max(t1.d) from t1 where f between case when (19 not in (t1.b,(a),t1.a)) or t1.a=t1.b | -(select +cast(avg(17-case when -c<=17 then a when -11 not in (11,19,a) then e else 11 end-t1.a*d) AS integer) from t1) then t1.b when (11)*f in (13,t1.a,a) or t1.a=e),f)*b-f and t1.f),t1.b) else d end+19 when t1.a then 11 else e end-t1.a))} +} {-200} +do_test randexpr-2.1823 { + db eval {SELECT coalesce((select max(t1.d) from t1 where f between case when (19 not in (t1.b,(a),t1.a)) or t1.a=t1.b & -(select +cast(avg(17-case when -c<=17 then a when -11 not in (11,19,a) then e else 11 end-t1.a*d) AS integer) from t1) then t1.b when (11)*f in (13,t1.a,a) or t1.a=e),f)*b-f and t1.f),t1.b) else d end+19 when t1.a then 11 else e end-t1.a))} +} {-200} +do_test randexpr-2.1824 { + db eval {SELECT case when not -t1.c in (coalesce((select d from t1 where t1.a not in (t1.c+f, -f,19) and t1.b in (select max(t1.d)+min(19)*min(t1.e) from t1 union select count(distinct d) from t1) and (t1.d) in (select -count(*) from t1 union select -count(*) from t1)),d* -t1.e),b,f) or -c not in (a, -t1.a,t1.e) then case when b between 17 and t1.b and a not in (t1.a,e,t1.e) then t1.d else 19 end*t1.b else t1.d end+t1.d FROM t1 WHERE (abs((17))/abs(11 | e*e+d++t1.d)) in (19+t1.f,e,(abs(13)/abs( -t1.c)))} +} {4200} +do_test randexpr-2.1825 { + db eval {SELECT case when not -t1.c in (coalesce((select d from t1 where t1.a not in (t1.c+f, -f,19) and t1.b in (select max(t1.d)+min(19)*min(t1.e) from t1 union select count(distinct d) from t1) and (t1.d) in (select -count(*) from t1 union select -count(*) from t1)),d* -t1.e),b,f) or -c not in (a, -t1.a,t1.e) then case when b between 17 and t1.b and a not in (t1.a,e,t1.e) then t1.d else 19 end*t1.b else t1.d end+t1.d FROM t1 WHERE NOT ((abs((17))/abs(11 | e*e+d++t1.d)) in (19+t1.f,e,(abs(13)/abs( -t1.c))))} +} {} +do_test randexpr-2.1826 { + db eval {SELECT case when ( -(select ~ -~ -(max(c))-min(t1.e) | count(distinct (11)) | min(t1.a) from t1)-case when case d when a then case when t1.e=t1.a then t1.c else c end else c end<=t1.f then t1.b when t1.c not in (a,19,c) then b else 17 end>=19) or d in (19,f, -11) then 19 else (select case min(t1.a)-(cast(avg(t1.f) AS integer))-(min(11)) when count(*) then count(*) else count(distinct a) end from t1) end FROM t1 WHERE not exists(select 1 from t1 where (abs(11-13)/abs(f))>19*~19+t1.d-t1.f+coalesce((select max(f) from t1 where t1.a-t1.e+case when case when a not between t1.c and 19 and -t1.e>t1.c then case when t1.f in (t1.d,e,t1.f) then d when c not in (11,t1.c,d) then t1.a else a end when t1.b=11 then t1.c else t1.e end>e then e else e end-11t1.f} +} {1} +do_test randexpr-2.1827 { + db eval {SELECT case when ( -(select ~ -~ -(max(c))-min(t1.e) | count(distinct (11)) | min(t1.a) from t1)-case when case d when a then case when t1.e=t1.a then t1.c else c end else c end<=t1.f then t1.b when t1.c not in (a,19,c) then b else 17 end>=19) or d in (19,f, -11) then 19 else (select case min(t1.a)-(cast(avg(t1.f) AS integer))-(min(11)) when count(*) then count(*) else count(distinct a) end from t1) end FROM t1 WHERE NOT (not exists(select 1 from t1 where (abs(11-13)/abs(f))>19*~19+t1.d-t1.f+coalesce((select max(f) from t1 where t1.a-t1.e+case when case when a not between t1.c and 19 and -t1.e>t1.c then case when t1.f in (t1.d,e,t1.f) then d when c not in (11,t1.c,d) then t1.a else a end when t1.b=11 then t1.c else t1.e end>e then e else e end-11t1.f)} +} {} +do_test randexpr-2.1828 { + db eval {SELECT case when ( -(select ~ -~ -(max(c))-min(t1.e) & count(distinct (11)) & min(t1.a) from t1)-case when case d when a then case when t1.e=t1.a then t1.c else c end else c end<=t1.f then t1.b when t1.c not in (a,19,c) then b else 17 end>=19) or d in (19,f, -11) then 19 else (select case min(t1.a)-(cast(avg(t1.f) AS integer))-(min(11)) when count(*) then count(*) else count(distinct a) end from t1) end FROM t1 WHERE not exists(select 1 from t1 where (abs(11-13)/abs(f))>19*~19+t1.d-t1.f+coalesce((select max(f) from t1 where t1.a-t1.e+case when case when a not between t1.c and 19 and -t1.e>t1.c then case when t1.f in (t1.d,e,t1.f) then d when c not in (11,t1.c,d) then t1.a else a end when t1.b=11 then t1.c else t1.e end>e then e else e end-11t1.f} +} {1} +do_test randexpr-2.1829 { + db eval {SELECT t1.e | t1.a*~(17)-t1.c*f*f-coalesce((select max( -t1.a+coalesce((select t1.a from t1 where 19 not between 17 and f),(t1.b))*c+11) from t1 where 19 in (select -cast(avg(f) AS integer)-cast(avg(a) AS integer)*count(distinct d) from t1 union select count(*) from t1) and t1.d in (17,t1.d, -t1.c) or 19<>19),a)*11 FROM t1 WHERE not +19+coalesce((select -19 from t1 where t1.c | e in (select ~ -abs(case ~abs(+(count(distinct 17) | case ~(max(t1.a))+(+~ -min(17)) when abs(min(t1.c)) then count(distinct 17) else -cast(avg(t1.e) AS integer) end)*cast(avg(c) AS integer)*cast(avg(t1.b) AS integer)) when count(distinct t1.d) then min(b) else -cast(avg(d) AS integer) end) from t1 union select count(*) from t1)),13)>13} +} {} +do_test randexpr-2.1830 { + db eval {SELECT t1.e | t1.a*~(17)-t1.c*f*f-coalesce((select max( -t1.a+coalesce((select t1.a from t1 where 19 not between 17 and f),(t1.b))*c+11) from t1 where 19 in (select -cast(avg(f) AS integer)-cast(avg(a) AS integer)*count(distinct d) from t1 union select count(*) from t1) and t1.d in (17,t1.d, -t1.c) or 19<>19),a)*11 FROM t1 WHERE NOT (not +19+coalesce((select -19 from t1 where t1.c | e in (select ~ -abs(case ~abs(+(count(distinct 17) | case ~(max(t1.a))+(+~ -min(17)) when abs(min(t1.c)) then count(distinct 17) else -cast(avg(t1.e) AS integer) end)*cast(avg(c) AS integer)*cast(avg(t1.b) AS integer)) when count(distinct t1.d) then min(b) else -cast(avg(d) AS integer) end) from t1 union select count(*) from t1)),13)>13)} +} {-108002820} +do_test randexpr-2.1831 { + db eval {SELECT t1.e & t1.a*~(17)-t1.c*f*f-coalesce((select max( -t1.a+coalesce((select t1.a from t1 where 19 not between 17 and f),(t1.b))*c+11) from t1 where 19 in (select -cast(avg(f) AS integer)-cast(avg(a) AS integer)*count(distinct d) from t1 union select count(*) from t1) and t1.d in (17,t1.d, -t1.c) or 19<>19),a)*11 FROM t1 WHERE NOT (not +19+coalesce((select -19 from t1 where t1.c | e in (select ~ -abs(case ~abs(+(count(distinct 17) | case ~(max(t1.a))+(+~ -min(17)) when abs(min(t1.c)) then count(distinct 17) else -cast(avg(t1.e) AS integer) end)*cast(avg(c) AS integer)*cast(avg(t1.b) AS integer)) when count(distinct t1.d) then min(b) else -cast(avg(d) AS integer) end) from t1 union select count(*) from t1)),13)>13)} +} {420} +do_test randexpr-2.1832 { + db eval {SELECT case when t1.a<(abs((abs(13)/abs(case t1.e+11*17*case +case case when ~19>=t1.c then f when f not in (a,e,d) then 19 else b end when 19 then 19 else b end | -c when c then t1.c else c end*d-t1.e when e then 13 else c end)))/abs(t1.b)) then 17 when t1.b not in (t1.a,13,e) then e else -t1.c end FROM t1 WHERE 19>=11} +} {500} +do_test randexpr-2.1833 { + db eval {SELECT case when t1.a<(abs((abs(13)/abs(case t1.e+11*17*case +case case when ~19>=t1.c then f when f not in (a,e,d) then 19 else b end when 19 then 19 else b end | -c when c then t1.c else c end*d-t1.e when e then 13 else c end)))/abs(t1.b)) then 17 when t1.b not in (t1.a,13,e) then e else -t1.c end FROM t1 WHERE NOT (19>=11)} +} {} +do_test randexpr-2.1834 { + db eval {SELECT case when t1.a<(abs((abs(13)/abs(case t1.e+11*17*case +case case when ~19>=t1.c then f when f not in (a,e,d) then 19 else b end when 19 then 19 else b end & -c when c then t1.c else c end*d-t1.e when e then 13 else c end)))/abs(t1.b)) then 17 when t1.b not in (t1.a,13,e) then e else -t1.c end FROM t1 WHERE 19>=11} +} {500} +do_test randexpr-2.1835 { + db eval {SELECT coalesce((select (select count(*)-+count(*) from t1) from t1 where +t1.f+t1.f<~case when exists(select 1 from t1 where coalesce((select t1.a from t1 where b-19(11) then d else (c) end+e+t1.d),c) FROM t1 WHERE case when t1.f+(select min(t1.c+(abs(13+(abs(t1.e+d-11-e+d+c)/abs(t1.e))-c*t1.f)/abs(t1.b))) from t1)+f=d or t1.b not between t1.e and t1.a then 11-a when (13 in (13,19,19)) then t1.d else t1.b end(11) then d else (c) end+e+t1.d),c) FROM t1 WHERE NOT (case when t1.f+(select min(t1.c+(abs(13+(abs(t1.e+d-11-e+d+c)/abs(t1.e))-c*t1.f)/abs(t1.b))) from t1)+f=d or t1.b not between t1.e and t1.a then 11-a when (13 in (13,19,19)) then t1.d else t1.b end=t1.b)),((d))) and t1.c then 19 else 13 end FROM t1 WHERE d*a in (case when 19>t1.e then c | (select +~count(distinct t1.b)-++abs(min(11)+ -~min(t1.b)+ -(max(t1.d))-min(t1.b)-count(distinct t1.f)) | max(f) from t1) else a+t1.c end,t1.a+case 19 when (11*a)*t1.c then d else a end*t1.a-d,f)} +} {} +do_test randexpr-2.1838 { + db eval {SELECT case when not exists(select 1 from t1 where not exists(select 1 from t1 where (case when (abs(a)/abs(t1.f))=t1.b)),((d))) and t1.c then 19 else 13 end FROM t1 WHERE NOT (d*a in (case when 19>t1.e then c | (select +~count(distinct t1.b)-++abs(min(11)+ -~min(t1.b)+ -(max(t1.d))-min(t1.b)-count(distinct t1.f)) | max(f) from t1) else a+t1.c end,t1.a+case 19 when (11*a)*t1.c then d else a end*t1.a-d,f))} +} {-14} +do_test randexpr-2.1839 { + db eval {SELECT case when b<= -coalesce((select max(t1.b+t1.c+a*a-t1.c-e) from t1 where not 11>e or c<=17 and b<=b), -t1.d)+13 and 17 not between a and c and not exists(select 1 from t1 where f not between t1.d and t1.e and t1.d between 13 and -t1.a) then t1.b when e not in (a,t1.f, -t1.d) then t1.c else e end FROM t1 WHERE not coalesce((select max((c)) from t1 where d* -coalesce((select max(coalesce((select t1.d from t1 where (d<=d)),d)) from t1 where (abs(case 11 when (select -+count(distinct 13)*min(c) | count(*) from t1) then 11 else case when (19 between (19) and 17 or t1.c in (c,19,d)) then c when -t1.e>=c then c else c end end*11)/abs((13))) not in (17, -t1.e, -t1.e)),t1.e)=17),t1.c) between 13 and t1.f} +} {} +do_test randexpr-2.1840 { + db eval {SELECT case when b<= -coalesce((select max(t1.b+t1.c+a*a-t1.c-e) from t1 where not 11>e or c<=17 and b<=b), -t1.d)+13 and 17 not between a and c and not exists(select 1 from t1 where f not between t1.d and t1.e and t1.d between 13 and -t1.a) then t1.b when e not in (a,t1.f, -t1.d) then t1.c else e end FROM t1 WHERE NOT (not coalesce((select max((c)) from t1 where d* -coalesce((select max(coalesce((select t1.d from t1 where (d<=d)),d)) from t1 where (abs(case 11 when (select -+count(distinct 13)*min(c) | count(*) from t1) then 11 else case when (19 between (19) and 17 or t1.c in (c,19,d)) then c when -t1.e>=c then c else c end end*11)/abs((13))) not in (17, -t1.e, -t1.e)),t1.e)=17),t1.c) between 13 and t1.f)} +} {300} +do_test randexpr-2.1841 { + db eval {SELECT 17 | -(case when (a) not between 11 and case (abs(~(abs(13-case (abs(17)/abs(19+e | t1.e)) when (t1.d) then -(t1.c) else t1.f end+t1.f*t1.c)/abs(a)))/abs(17)) when 19 then d else t1.d end then 17 when not exists(select 1 from t1 where t1.e not in (t1.b,t1.b,b) or a between a and 17) then 11 else b end) | t1.e FROM t1 WHERE 17>=e} +} {} +do_test randexpr-2.1842 { + db eval {SELECT 17 | -(case when (a) not between 11 and case (abs(~(abs(13-case (abs(17)/abs(19+e | t1.e)) when (t1.d) then -(t1.c) else t1.f end+t1.f*t1.c)/abs(a)))/abs(17)) when 19 then d else t1.d end then 17 when not exists(select 1 from t1 where t1.e not in (t1.b,t1.b,b) or a between a and 17) then 11 else b end) | t1.e FROM t1 WHERE NOT (17>=e)} +} {-3} +do_test randexpr-2.1843 { + db eval {SELECT 17 & -(case when (a) not between 11 and case (abs(~(abs(13-case (abs(17)/abs(19+e & t1.e)) when (t1.d) then -(t1.c) else t1.f end+t1.f*t1.c)/abs(a)))/abs(17)) when 19 then d else t1.d end then 17 when not exists(select 1 from t1 where t1.e not in (t1.b,t1.b,b) or a between a and 17) then 11 else b end) & t1.e FROM t1 WHERE NOT (17>=e)} +} {16} +do_test randexpr-2.1844 { + db eval {SELECT (abs(11 | t1.e)/abs(case 13 when 17*+t1.a+d*+d then ~ -17*+ - -case f when b then a-b else t1.d end-f+t1.f+17 else t1.a end)) FROM t1 WHERE t1.f<>17} +} {5} +do_test randexpr-2.1845 { + db eval {SELECT (abs(11 | t1.e)/abs(case 13 when 17*+t1.a+d*+d then ~ -17*+ - -case f when b then a-b else t1.d end-f+t1.f+17 else t1.a end)) FROM t1 WHERE NOT (t1.f<>17)} +} {} +do_test randexpr-2.1846 { + db eval {SELECT (abs(11 & t1.e)/abs(case 13 when 17*+t1.a+d*+d then ~ -17*+ - -case f when b then a-b else t1.d end-f+t1.f+17 else t1.a end)) FROM t1 WHERE t1.f<>17} +} {0} +do_test randexpr-2.1847 { + db eval {SELECT case when not coalesce((select max(case 11 when case d when coalesce((select f from t1 where case when (t1.d) | case 11 when t1.d then 11 else (t1.a) end | t1.b in (e,c,(11)) then 17 when c between e and f then t1.e else t1.a end>t1.e),t1.d) then 13 else c end then c else c end) from t1 where t1.e not between (a) and (t1.d)),c) not in ((11),((19)),a) then (t1.c) else (c) end FROM t1 WHERE t1.d-t1.c>=13} +} {300} +do_test randexpr-2.1848 { + db eval {SELECT case when not coalesce((select max(case 11 when case d when coalesce((select f from t1 where case when (t1.d) | case 11 when t1.d then 11 else (t1.a) end | t1.b in (e,c,(11)) then 17 when c between e and f then t1.e else t1.a end>t1.e),t1.d) then 13 else c end then c else c end) from t1 where t1.e not between (a) and (t1.d)),c) not in ((11),((19)),a) then (t1.c) else (c) end FROM t1 WHERE NOT (t1.d-t1.c>=13)} +} {} +do_test randexpr-2.1849 { + db eval {SELECT case when not coalesce((select max(case 11 when case d when coalesce((select f from t1 where case when (t1.d) & case 11 when t1.d then 11 else (t1.a) end & t1.b in (e,c,(11)) then 17 when c between e and f then t1.e else t1.a end>t1.e),t1.d) then 13 else c end then c else c end) from t1 where t1.e not between (a) and (t1.d)),c) not in ((11),((19)),a) then (t1.c) else (c) end FROM t1 WHERE t1.d-t1.c>=13} +} {300} +do_test randexpr-2.1850 { + db eval {SELECT (select case count(distinct ~case when (abs(coalesce((select t1.e from t1 where (not exists(select 1 from t1 where a | e between c and coalesce((select (t1.d) from t1 where (t1.a=t1.a)), -(c))))),b+19))/abs(d))>11 then b else t1.d end) when count(distinct 17) then ~min((t1.d))-case -(case -cast(avg(t1.b) AS integer) when -max(13) then (min(t1.d)) else count(distinct t1.b) end+(min(t1.e)))*(cast(avg( -17) AS integer)) when -count(distinct 19) then count(*) else max(t1.c) end else count(distinct f) end from t1) FROM t1 WHERE t1.e=t1.d} +} {} +do_test randexpr-2.1851 { + db eval {SELECT (select case count(distinct ~case when (abs(coalesce((select t1.e from t1 where (not exists(select 1 from t1 where a | e between c and coalesce((select (t1.d) from t1 where (t1.a=t1.a)), -(c))))),b+19))/abs(d))>11 then b else t1.d end) when count(distinct 17) then ~min((t1.d))-case -(case -cast(avg(t1.b) AS integer) when -max(13) then (min(t1.d)) else count(distinct t1.b) end+(min(t1.e)))*(cast(avg( -17) AS integer)) when -count(distinct 19) then count(*) else max(t1.c) end else count(distinct f) end from t1) FROM t1 WHERE NOT (t1.e=t1.d)} +} {-701} +do_test randexpr-2.1852 { + db eval {SELECT (select case count(distinct ~case when (abs(coalesce((select t1.e from t1 where (not exists(select 1 from t1 where a & e between c and coalesce((select (t1.d) from t1 where (t1.a=t1.a)), -(c))))),b+19))/abs(d))>11 then b else t1.d end) when count(distinct 17) then ~min((t1.d))-case -(case -cast(avg(t1.b) AS integer) when -max(13) then (min(t1.d)) else count(distinct t1.b) end+(min(t1.e)))*(cast(avg( -17) AS integer)) when -count(distinct 19) then count(*) else max(t1.c) end else count(distinct f) end from t1) FROM t1 WHERE NOT (t1.e=t1.d)} +} {-701} +do_test randexpr-2.1853 { + db eval {SELECT case when t1.d<>t1.a and a-t1.b-c in (13,t1.e-(abs(d)/abs(coalesce((select max(e) from t1 where 19 not between -b and 17),b))),t1.b) or (not exists(select 1 from t1 where t1.f not between -17 and a)) then case when e>t1.e then t1.d when t1.cf-11 and not not bt1.a and a-t1.b-c in (13,t1.e-(abs(d)/abs(coalesce((select max(e) from t1 where 19 not between -b and 17),b))),t1.b) or (not exists(select 1 from t1 where t1.f not between -17 and a)) then case when e>t1.e then t1.d when t1.cf-11 and not not bcoalesce((select d from t1 where not exists(select 1 from t1 where c*b in (t1.b*t1.d,t1.c,b) or c between d and a)),19)-t1.c*e) then coalesce((select f from t1 where 11 in (select c from t1 union select b from t1)),(c)) when 17<>t1.b then d else 19 end)) from t1 where f in (a,t1.f,t1.d)),(19)) else t1.e end FROM t1 WHERE not not exists(select 1 from t1 where (select abs(min(19-19))*count(*) | min(13)*min((a))+cast(avg(t1.e) AS integer)+(cast(avg(19) AS integer)) from t1) in (17,+(select min(b) from t1),(select cast(avg(f) AS integer) from t1)) and 11 in (select ( -(min(c))) from t1 union select (min(13)) from t1)) or t1.d between c and -(f) or not t1.c in (select min(e) from t1 union select -(max(e)) from t1) and t1.e in (t1.f,a,11) and f not between a and 17 and 19<=17 and 13 not in (b,t1.b,13)} +} {} +do_test randexpr-2.1856 { + db eval {SELECT ~case 17 when t1.c then coalesce((select max((case when not exists(select 1 from t1 where 11<>coalesce((select d from t1 where not exists(select 1 from t1 where c*b in (t1.b*t1.d,t1.c,b) or c between d and a)),19)-t1.c*e) then coalesce((select f from t1 where 11 in (select c from t1 union select b from t1)),(c)) when 17<>t1.b then d else 19 end)) from t1 where f in (a,t1.f,t1.d)),(19)) else t1.e end FROM t1 WHERE NOT (not not exists(select 1 from t1 where (select abs(min(19-19))*count(*) | min(13)*min((a))+cast(avg(t1.e) AS integer)+(cast(avg(19) AS integer)) from t1) in (17,+(select min(b) from t1),(select cast(avg(f) AS integer) from t1)) and 11 in (select ( -(min(c))) from t1 union select (min(13)) from t1)) or t1.d between c and -(f) or not t1.c in (select min(e) from t1 union select -(max(e)) from t1) and t1.e in (t1.f,a,11) and f not between a and 17 and 19<=17 and 13 not in (b,t1.b,13))} +} {-501} +do_test randexpr-2.1857 { + db eval {SELECT coalesce((select +f+coalesce((select case when b- -f*t1.d-t1.a>d and t1.e in (select b from t1 union select t1.d from t1) then 11 else -a end from t1 where (not (t1.f) not between 13 and t1.f)),11) from t1 where t1.c<=c and (d) between 11 and t1.b or b in (select d from t1 union select t1.e from t1) and 19<>t1.e or 19 not between a and a),19) FROM t1 WHERE t1.e<>(select cast(avg((b)*(select abs( -max(t1.f)) from t1)) AS integer) from t1)} +} {500} +do_test randexpr-2.1858 { + db eval {SELECT coalesce((select +f+coalesce((select case when b- -f*t1.d-t1.a>d and t1.e in (select b from t1 union select t1.d from t1) then 11 else -a end from t1 where (not (t1.f) not between 13 and t1.f)),11) from t1 where t1.c<=c and (d) between 11 and t1.b or b in (select d from t1 union select t1.e from t1) and 19<>t1.e or 19 not between a and a),19) FROM t1 WHERE NOT (t1.e<>(select cast(avg((b)*(select abs( -max(t1.f)) from t1)) AS integer) from t1))} +} {} +do_test randexpr-2.1859 { + db eval {SELECT coalesce((select max(coalesce((select (t1.f) from t1 where f in (select t1.f+(select case count(distinct 19) when count(*) then max(t1.d) else -count(*) end from t1) from t1 union select 13 from t1)),c)) from t1 where not exists(select 1 from t1 where coalesce((select (select +count(*) | ~(~ -count(distinct -13)) from t1) from t1 where case when d*~f-13<>((d)) then 19 else t1.a end not between d and t1.e),t1.f) | t1.d in (select t1.d from t1 union select t1.d from t1))),13) FROM t1 WHERE b= -~case f-t1.a | ~case coalesce((select max(t1.b) from t1 where b>t1.e or t1.d=17-(abs(coalesce((select 17 from t1 where 17 between c and c),a)-c+a)/abs(t1.f))-c and d not in (17,19,c)),c) when 11 then e else t1.d end | t1.c-d when t1.c then c else e end | a} +} {} +do_test randexpr-2.1860 { + db eval {SELECT coalesce((select max(coalesce((select (t1.f) from t1 where f in (select t1.f+(select case count(distinct 19) when count(*) then max(t1.d) else -count(*) end from t1) from t1 union select 13 from t1)),c)) from t1 where not exists(select 1 from t1 where coalesce((select (select +count(*) | ~(~ -count(distinct -13)) from t1) from t1 where case when d*~f-13<>((d)) then 19 else t1.a end not between d and t1.e),t1.f) | t1.d in (select t1.d from t1 union select t1.d from t1))),13) FROM t1 WHERE NOT (b= -~case f-t1.a | ~case coalesce((select max(t1.b) from t1 where b>t1.e or t1.d=17-(abs(coalesce((select 17 from t1 where 17 between c and c),a)-c+a)/abs(t1.f))-c and d not in (17,19,c)),c) when 11 then e else t1.d end | t1.c-d when t1.c then c else e end | a)} +} {300} +do_test randexpr-2.1861 { + db eval {SELECT coalesce((select max(coalesce((select (t1.f) from t1 where f in (select t1.f+(select case count(distinct 19) when count(*) then max(t1.d) else -count(*) end from t1) from t1 union select 13 from t1)),c)) from t1 where not exists(select 1 from t1 where coalesce((select (select +count(*) & ~(~ -count(distinct -13)) from t1) from t1 where case when d*~f-13<>((d)) then 19 else t1.a end not between d and t1.e),t1.f) & t1.d in (select t1.d from t1 union select t1.d from t1))),13) FROM t1 WHERE NOT (b= -~case f-t1.a | ~case coalesce((select max(t1.b) from t1 where b>t1.e or t1.d=17-(abs(coalesce((select 17 from t1 where 17 between c and c),a)-c+a)/abs(t1.f))-c and d not in (17,19,c)),c) when 11 then e else t1.d end | t1.c-d when t1.c then c else e end | a)} +} {300} +do_test randexpr-2.1862 { + db eval {SELECT coalesce((select max(( -17-case when t1.c-13<+11 | (t1.a)-e then 13*t1.a when coalesce((select 11 from t1 where t1.a>t1.c),17)+f+t1.a not between 13 and e then t1.f else c end)) from t1 where exists(select 1 from t1 where (19>=t1.f)) and f in (select max(13) from t1 union select count(*) from t1) and (19)>=t1.f),d) FROM t1 WHERE t1.f in (select 19 from t1 union select -17*11+t1.d from t1)} +} {} +do_test randexpr-2.1863 { + db eval {SELECT coalesce((select max(( -17-case when t1.c-13<+11 | (t1.a)-e then 13*t1.a when coalesce((select 11 from t1 where t1.a>t1.c),17)+f+t1.a not between 13 and e then t1.f else c end)) from t1 where exists(select 1 from t1 where (19>=t1.f)) and f in (select max(13) from t1 union select count(*) from t1) and (19)>=t1.f),d) FROM t1 WHERE NOT (t1.f in (select 19 from t1 union select -17*11+t1.d from t1))} +} {400} +do_test randexpr-2.1864 { + db eval {SELECT coalesce((select max(( -17-case when t1.c-13<+11 & (t1.a)-e then 13*t1.a when coalesce((select 11 from t1 where t1.a>t1.c),17)+f+t1.a not between 13 and e then t1.f else c end)) from t1 where exists(select 1 from t1 where (19>=t1.f)) and f in (select max(13) from t1 union select count(*) from t1) and (19)>=t1.f),d) FROM t1 WHERE NOT (t1.f in (select 19 from t1 union select -17*11+t1.d from t1))} +} {400} +do_test randexpr-2.1865 { + db eval {SELECT (t1.a)+c*case when not b in (select 11 from t1 union select 13 from t1) and a=case when not exists(select 1 from t1 where exists(select 1 from t1 where ~+( -(17)) | t1.d>=b)) then case t1.d when c then 19 else t1.c end*a else d end-a then e when b not between f and d and t1.b>=11 or not (t1.e>=b) and 11<=17 then 13 else t1.d end FROM t1 WHERE b in (e | 19,t1.c,case 17 when 19 then +(case when 19> -coalesce((select max(a) from t1 where t1.a not in (case e when t1.d then (c)-c else 11 end | (11)-c*t1.f,e,t1.f)),13) then t1.e else t1.e end)*e else 13 end-19+ - -t1.d* -b) or -t1.e>b} +} {} +do_test randexpr-2.1866 { + db eval {SELECT (t1.a)+c*case when not b in (select 11 from t1 union select 13 from t1) and a=case when not exists(select 1 from t1 where exists(select 1 from t1 where ~+( -(17)) | t1.d>=b)) then case t1.d when c then 19 else t1.c end*a else d end-a then e when b not between f and d and t1.b>=11 or not (t1.e>=b) and 11<=17 then 13 else t1.d end FROM t1 WHERE NOT (b in (e | 19,t1.c,case 17 when 19 then +(case when 19> -coalesce((select max(a) from t1 where t1.a not in (case e when t1.d then (c)-c else 11 end | (11)-c*t1.f,e,t1.f)),13) then t1.e else t1.e end)*e else 13 end-19+ - -t1.d* -b) or -t1.e>b)} +} {4000} +do_test randexpr-2.1867 { + db eval {SELECT (t1.a)+c*case when not b in (select 11 from t1 union select 13 from t1) and a=case when not exists(select 1 from t1 where exists(select 1 from t1 where ~+( -(17)) & t1.d>=b)) then case t1.d when c then 19 else t1.c end*a else d end-a then e when b not between f and d and t1.b>=11 or not (t1.e>=b) and 11<=17 then 13 else t1.d end FROM t1 WHERE NOT (b in (e | 19,t1.c,case 17 when 19 then +(case when 19> -coalesce((select max(a) from t1 where t1.a not in (case e when t1.d then (c)-c else 11 end | (11)-c*t1.f,e,t1.f)),13) then t1.e else t1.e end)*e else 13 end-19+ - -t1.d* -b) or -t1.e>b)} +} {4000} +do_test randexpr-2.1868 { + db eval {SELECT case when b | case when 13 between ~coalesce((select 19 from t1 where b in (select max(a*17) from t1 union select cast(avg( -19) AS integer)+abs(cast(avg(a) AS integer)) from t1)),17) and 11 then a when not exists(select 1 from t1 where not exists(select 1 from t1 where 19<=e)) and d in (select 17 from t1 union select 17 from t1) then -b else a end-a in (select max(t1.c) from t1 union select min(e) | count(*)+count(*) from t1) then t1.b when not (not 19<=t1.b) then 13 else a end-b FROM t1 WHERE +~+13>=t1.c} +} {} +do_test randexpr-2.1869 { + db eval {SELECT case when b | case when 13 between ~coalesce((select 19 from t1 where b in (select max(a*17) from t1 union select cast(avg( -19) AS integer)+abs(cast(avg(a) AS integer)) from t1)),17) and 11 then a when not exists(select 1 from t1 where not exists(select 1 from t1 where 19<=e)) and d in (select 17 from t1 union select 17 from t1) then -b else a end-a in (select max(t1.c) from t1 union select min(e) | count(*)+count(*) from t1) then t1.b when not (not 19<=t1.b) then 13 else a end-b FROM t1 WHERE NOT (+~+13>=t1.c)} +} {-187} +do_test randexpr-2.1870 { + db eval {SELECT case when b & case when 13 between ~coalesce((select 19 from t1 where b in (select max(a*17) from t1 union select cast(avg( -19) AS integer)+abs(cast(avg(a) AS integer)) from t1)),17) and 11 then a when not exists(select 1 from t1 where not exists(select 1 from t1 where 19<=e)) and d in (select 17 from t1 union select 17 from t1) then -b else a end-a in (select max(t1.c) from t1 union select min(e) & count(*)+count(*) from t1) then t1.b when not (not 19<=t1.b) then 13 else a end-b FROM t1 WHERE NOT (+~+13>=t1.c)} +} {0} +do_test randexpr-2.1871 { + db eval {SELECT coalesce((select case when t1.d in (select min(f) from t1 union select ~ -count(*) from t1) then d else 13 end from t1 where 19<=case when 13>=+17-c+(select count(*)-min(11) from t1)+t1.f*t1.e*c*t1.d then c else t1.b end),t1.c | +t1.e) FROM t1 WHERE a between (abs(coalesce((select max(case when (t1.d+a*coalesce((select d-13 from t1 where a<=19),t1.f))>= -c then c else 13 end) from t1 where f>=d or t1.a>t1.d),t1.a))/abs(t1.f)) and 19 and 13=t1.e and not exists(select 1 from t1 where 13 in (select -( -count(distinct (a))) | -cast(avg(t1.c) AS integer) from t1 union select (min(t1.c)) from t1) or t1.f not between t1.c and t1.c)} +} {} +do_test randexpr-2.1872 { + db eval {SELECT coalesce((select case when t1.d in (select min(f) from t1 union select ~ -count(*) from t1) then d else 13 end from t1 where 19<=case when 13>=+17-c+(select count(*)-min(11) from t1)+t1.f*t1.e*c*t1.d then c else t1.b end),t1.c | +t1.e) FROM t1 WHERE NOT (a between (abs(coalesce((select max(case when (t1.d+a*coalesce((select d-13 from t1 where a<=19),t1.f))>= -c then c else 13 end) from t1 where f>=d or t1.a>t1.d),t1.a))/abs(t1.f)) and 19 and 13=t1.e and not exists(select 1 from t1 where 13 in (select -( -count(distinct (a))) | -cast(avg(t1.c) AS integer) from t1 union select (min(t1.c)) from t1) or t1.f not between t1.c and t1.c))} +} {13} +do_test randexpr-2.1873 { + db eval {SELECT coalesce((select case when t1.d in (select min(f) from t1 union select ~ -count(*) from t1) then d else 13 end from t1 where 19<=case when 13>=+17-c+(select count(*)-min(11) from t1)+t1.f*t1.e*c*t1.d then c else t1.b end),t1.c & +t1.e) FROM t1 WHERE NOT (a between (abs(coalesce((select max(case when (t1.d+a*coalesce((select d-13 from t1 where a<=19),t1.f))>= -c then c else 13 end) from t1 where f>=d or t1.a>t1.d),t1.a))/abs(t1.f)) and 19 and 13=t1.e and not exists(select 1 from t1 where 13 in (select -( -count(distinct (a))) | -cast(avg(t1.c) AS integer) from t1 union select (min(t1.c)) from t1) or t1.f not between t1.c and t1.c))} +} {13} +do_test randexpr-2.1874 { + db eval {SELECT coalesce((select max(f) from t1 where coalesce((select max(+(select cast(avg(t1.d) AS integer) from t1)*19) from t1 where (abs(t1.d*t1.c)/abs(f))+t1.e> -t1.d*t1.e-a),13-case +coalesce((select max(t1.c) from t1 where d<=d and - -t1.a<=(t1.f) or f<>19 and 17<19),(t1.c)) | c*t1.f*b when t1.a then 19 else (b) end)>=a),t1.f) FROM t1 WHERE (~c-13*case when case when a+19-c in (select case count(distinct c)-case cast(avg(t1.a) AS integer) when max( -b) then cast(avg(t1.f) AS integer) else cast(avg((t1.d)) AS integer) end when min(b) then cast(avg(b) AS integer) else count(*) end from t1 union select max(t1.f) from t1) and 17 between (t1.b) and c and 19>t1.d or 19 in (t1.f,t1.a,(b)) then a else t1.e end in (13,t1.a,b) then 17 else t1.e end*t1.d+a+t1.b+t1.f) -t1.d*t1.e-a),13-case +coalesce((select max(t1.c) from t1 where d<=d and - -t1.a<=(t1.f) or f<>19 and 17<19),(t1.c)) | c*t1.f*b when t1.a then 19 else (b) end)>=a),t1.f) FROM t1 WHERE NOT ((~c-13*case when case when a+19-c in (select case count(distinct c)-case cast(avg(t1.a) AS integer) when max( -b) then cast(avg(t1.f) AS integer) else cast(avg((t1.d)) AS integer) end when min(b) then cast(avg(b) AS integer) else count(*) end from t1 union select max(t1.f) from t1) and 17 between (t1.b) and c and 19>t1.d or 19 in (t1.f,t1.a,(b)) then a else t1.e end in (13,t1.a,b) then 17 else t1.e end*t1.d+a+t1.b+t1.f) -t1.d*t1.e-a),13-case +coalesce((select max(t1.c) from t1 where d<=d and - -t1.a<=(t1.f) or f<>19 and 17<19),(t1.c)) & c*t1.f*b when t1.a then 19 else (b) end)>=a),t1.f) FROM t1 WHERE (~c-13*case when case when a+19-c in (select case count(distinct c)-case cast(avg(t1.a) AS integer) when max( -b) then cast(avg(t1.f) AS integer) else cast(avg((t1.d)) AS integer) end when min(b) then cast(avg(b) AS integer) else count(*) end from t1 union select max(t1.f) from t1) and 17 between (t1.b) and c and 19>t1.d or 19 in (t1.f,t1.a,(b)) then a else t1.e end in (13,t1.a,b) then 17 else t1.e end*t1.d+a+t1.b+t1.f) -b then a else f end then t1.e else f end FROM t1 WHERE t1.f-~coalesce((select 17 from t1 where (e+ -case when (d -b then a else f end then t1.e else f end FROM t1 WHERE NOT (t1.f-~coalesce((select 17 from t1 where (e+ -case when (d -b then a else f end then t1.e else f end FROM t1 WHERE t1.f-~coalesce((select 17 from t1 where (e+ -case when (d19) then case when f in (11,11,d) then t1.e when t1.a<=19 then a else e end else -t1.e end from t1) then e when t1.c=t1.b then 19 else e end when f then 19 else c end) | d-c and a} +} {} +do_test randexpr-2.1881 { + db eval {SELECT -(17-case (select max(a)+min( -(abs(t1.c-t1.a | t1.e- -t1.b+19*t1.c)/abs(c)))+ -case min(a) when count(distinct -a) then count(distinct 19) else count(*) end-count(distinct t1.e)*(max(t1.a))-(count(distinct 19)) from t1) when (select count(*) from t1) then b else t1.d end*11)-11* -c FROM t1 WHERE NOT (f between (case case when +(f) in (select t1.f from t1 union select case when t1.b in ((abs(19)/abs(11)),f,c) and (d) in (select -count(distinct b)*count(*) from t1 union select count(distinct -13) from t1) or (t1.e>19) then case when f in (11,11,d) then t1.e when t1.a<=19 then a else e end else -t1.e end from t1) then e when t1.c=t1.b then 19 else e end when f then 19 else c end) | d-c and a)} +} {7683} +do_test randexpr-2.1882 { + db eval {SELECT -(17-case (select max(a)+min( -(abs(t1.c-t1.a & t1.e- -t1.b+19*t1.c)/abs(c)))+ -case min(a) when count(distinct -a) then count(distinct 19) else count(*) end-count(distinct t1.e)*(max(t1.a))-(count(distinct 19)) from t1) when (select count(*) from t1) then b else t1.d end*11)-11* -c FROM t1 WHERE NOT (f between (case case when +(f) in (select t1.f from t1 union select case when t1.b in ((abs(19)/abs(11)),f,c) and (d) in (select -count(distinct b)*count(*) from t1 union select count(distinct -13) from t1) or (t1.e>19) then case when f in (11,11,d) then t1.e when t1.a<=19 then a else e end else -t1.e end from t1) then e when t1.c=t1.b then 19 else e end when f then 19 else c end) | d-c and a)} +} {7683} +do_test randexpr-2.1883 { + db eval {SELECT (case coalesce((select +a from t1 where exists(select 1 from t1 where t1.e not between t1.f-b and (select count(*) from t1)-t1.f | e)),t1.d+case when t1.b not between t1.d and 17+case when a in (t1.d,a,(abs(~case when (t1.f)<=t1.c then -17 else t1.f end)/abs(19))*d*t1.d) then (b) else a end then 11 else f end+(t1.a)) when f then 17 else t1.e end) FROM t1 WHERE a-e*case t1.d+case c when (abs(~t1.d*(select min(case when not exists(select 1 from t1 where ((d)+(19) between 11 and 19)) then 11+13 when e not in (t1.f,17,e) and t1.c<>a then t1.e else 19 end*t1.a-f) from t1))/abs(b)) then 13 else 17 end when t1.f then (f) else t1.c enda then t1.e else 19 end*t1.a-f) from t1))/abs(b)) then 13 else 17 end when t1.f then (f) else t1.c enda then t1.e else 19 end*t1.a-f) from t1))/abs(b)) then 13 else 17 end when t1.f then (f) else t1.c end(13) | -11 and c>f),19)+t1.c) AS integer)-(~case cast(avg(t1.f) AS integer)*abs(max(b)+(count(*))+count(distinct b)) when min(c) then cast(avg(t1.b) AS integer) else count(*) end) from t1) in (select t1.e from t1 union select t1.c from t1) then f when d=(c)),b)) when max(d) then ( -max(c))+((min(11)))+max(t1.c) else max(t1.a) end from t1)-e+t1.b-(b)-11-t1.d*t1.c<>t1.c)} +} {} +do_test randexpr-2.1890 { + db eval {SELECT (abs(b)/abs((select max(c) from t1)))+case when (select +cast(avg(17 | ~coalesce((select e from t1 where t1.b>(13) | -11 and c>f),19)+t1.c) AS integer)-(~case cast(avg(t1.f) AS integer)*abs(max(b)+(count(*))+count(distinct b)) when min(c) then cast(avg(t1.b) AS integer) else count(*) end) from t1) in (select t1.e from t1 union select t1.c from t1) then f when d=(c)),b)) when max(d) then ( -max(c))+((min(11)))+max(t1.c) else max(t1.a) end from t1)-e+t1.b-(b)-11-t1.d*t1.c<>t1.c))} +} {500} +do_test randexpr-2.1891 { + db eval {SELECT (abs(b)/abs((select max(c) from t1)))+case when (select +cast(avg(17 & ~coalesce((select e from t1 where t1.b>(13) & -11 and c>f),19)+t1.c) AS integer)-(~case cast(avg(t1.f) AS integer)*abs(max(b)+(count(*))+count(distinct b)) when min(c) then cast(avg(t1.b) AS integer) else count(*) end) from t1) in (select t1.e from t1 union select t1.c from t1) then f when d=(c)),b)) when max(d) then ( -max(c))+((min(11)))+max(t1.c) else max(t1.a) end from t1)-e+t1.b-(b)-11-t1.d*t1.c<>t1.c))} +} {500} +do_test randexpr-2.1892 { + db eval {SELECT coalesce((select max(case 13 when +coalesce((select max(c) from t1 where t1.a+f in (t1.b,t1.b,(abs(19)/abs(+17+~(abs(t1.a+f)/abs(~t1.c))+case when not exists(select 1 from t1 where coalesce((select max(e) from t1 where (b)> -t1.a),c)=t1.b) then 19 else b end))-f)),t1.a)+a then f else 17 end*t1.e) from t1 where 13<>t1.f),t1.f) FROM t1 WHERE not exists(select 1 from t1 where coalesce((select max(case when 13 not in (t1.b,b,t1.c) then t1.c | t1.a else 13-19+a+t1.e-t1.e end+t1.a*17+( -d)-17 | t1.c-t1.a | 19+b) from t1 where not a=e),11) in (select 17 from t1 union select c from t1) or t1.c>=t1.e)} +} {8500} +do_test randexpr-2.1893 { + db eval {SELECT coalesce((select max(case 13 when +coalesce((select max(c) from t1 where t1.a+f in (t1.b,t1.b,(abs(19)/abs(+17+~(abs(t1.a+f)/abs(~t1.c))+case when not exists(select 1 from t1 where coalesce((select max(e) from t1 where (b)> -t1.a),c)=t1.b) then 19 else b end))-f)),t1.a)+a then f else 17 end*t1.e) from t1 where 13<>t1.f),t1.f) FROM t1 WHERE NOT (not exists(select 1 from t1 where coalesce((select max(case when 13 not in (t1.b,b,t1.c) then t1.c | t1.a else 13-19+a+t1.e-t1.e end+t1.a*17+( -d)-17 | t1.c-t1.a | 19+b) from t1 where not a=e),11) in (select 17 from t1 union select c from t1) or t1.c>=t1.e))} +} {} +do_test randexpr-2.1894 { + db eval {SELECT t1.f++c*coalesce((select +t1.f from t1 where a<~13),case when exists(select 1 from t1 where not exists(select 1 from t1 where 17 in (select 11 from t1 union select (17) from t1))) then b else + -19 | coalesce((select case when (select cast(avg(a) AS integer) from t1)>e then case when (11=(e) and 17=t1.a then -c else 19 end-f) from t1 where not exists(select 1 from t1 where d in (select -~count(distinct 11)+max(t1.a) from t1 union select count(distinct e)* -count(distinct t1.b) from t1)) or (ce then case when (11=(e) and 17=t1.a then -c else 19 end-f) from t1 where not exists(select 1 from t1 where d in (select -~count(distinct 11)+max(t1.a) from t1 union select count(distinct e)* -count(distinct t1.b) from t1)) or (ce then case when (11=(e) and 17=t1.a then -c else 19 end-f) from t1 where not exists(select 1 from t1 where d in (select -~count(distinct 11)+max(t1.a) from t1 union select count(distinct e)* -count(distinct t1.b) from t1)) or (c=t1.c then f else t1.f end-f) from t1) FROM t1 WHERE (~ -t1.c in (select d from t1 union select t1.c*case when (17 in (+case c when t1.f then t1.c else c+e+t1.c*e+coalesce((select max((c)) from t1 where (t1.e)>a),19)-t1.e end | e-t1.b,t1.d,c)) then d when t1.e in (select (max( -e)-~~count(*)) from t1 union select count(*) from t1) then d else t1.f end from t1))} +} {} +do_test randexpr-2.1898 { + db eval {SELECT (select + -min(case when case when exists(select 1 from t1 where a<=c or coalesce((select max(11) from t1 where d=17),19)*11<=f and t1.e in (select t1.c from t1 union select a from t1)) then (f)-d when a=t1.c then f else t1.f end-f) from t1) FROM t1 WHERE NOT ((~ -t1.c in (select d from t1 union select t1.c*case when (17 in (+case c when t1.f then t1.c else c+e+t1.c*e+coalesce((select max((c)) from t1 where (t1.e)>a),19)-t1.e end | e-t1.b,t1.d,c)) then d when t1.e in (select (max( -e)-~~count(*)) from t1 union select count(*) from t1) then d else t1.f end from t1)))} +} {0} +do_test randexpr-2.1899 { + db eval {SELECT ~+t1.f-coalesce((select t1.f from t1 where case when t1.d>=t1.f then b else case c when 11 then t1.f*case when (select (count(*)) from t1)<>a+t1.b | +11-coalesce((select max(t1.f-a) from t1 where (f<=t1.c and t1.d=c)),d)-11 then b when (t1.c)<=t1.b then 19 else 11 end else c end end*19-13< - -b),e) FROM t1 WHERE (exists(select 1 from t1 where -t1.f not between ~17 and t1.a+(b+d+13)))} +} {-1101} +do_test randexpr-2.1900 { + db eval {SELECT ~+t1.f-coalesce((select t1.f from t1 where case when t1.d>=t1.f then b else case c when 11 then t1.f*case when (select (count(*)) from t1)<>a+t1.b | +11-coalesce((select max(t1.f-a) from t1 where (f<=t1.c and t1.d=c)),d)-11 then b when (t1.c)<=t1.b then 19 else 11 end else c end end*19-13< - -b),e) FROM t1 WHERE NOT ((exists(select 1 from t1 where -t1.f not between ~17 and t1.a+(b+d+13))))} +} {} +do_test randexpr-2.1901 { + db eval {SELECT ~+t1.f-coalesce((select t1.f from t1 where case when t1.d>=t1.f then b else case c when 11 then t1.f*case when (select (count(*)) from t1)<>a+t1.b & +11-coalesce((select max(t1.f-a) from t1 where (f<=t1.c and t1.d=c)),d)-11 then b when (t1.c)<=t1.b then 19 else 11 end else c end end*19-13< - -b),e) FROM t1 WHERE (exists(select 1 from t1 where -t1.f not between ~17 and t1.a+(b+d+13)))} +} {-1101} +do_test randexpr-2.1902 { + db eval {SELECT -t1.e+coalesce((select (d) from t1 where t1.f=t1.f*(select +count(distinct case when (coalesce((select max(case when a in (select b+17 from t1 union select -17 from t1) then 19 when 17a),e)<>f or d not between (e) and b) then 11 else (abs(13)/abs( -(d))) end*t1.c) from t1)),e-f) FROM t1 WHERE ((case when t1.e+d*~coalesce((select coalesce((select max(b) from t1 where ~case when t1.f not between t1.c and coalesce((select t1.b from t1 where -t1.e in (select (( - -min(19))) from t1 union select count(distinct t1.d) from t1)),t1.d) then c when t1.f=e then t1.f else b end=13),13) from t1 where t1.e not between 13 and 11 or not exists(select 1 from t1 where t1.a not between (11) and d and 11>=a)),t1.f) in (select count(*) from t1 union select ~max( -13) from t1) then -13 else 17 end) | d in (a,d,13))} +} {} +do_test randexpr-2.1903 { + db eval {SELECT -t1.e+coalesce((select (d) from t1 where t1.f=t1.f*(select +count(distinct case when (coalesce((select max(case when a in (select b+17 from t1 union select -17 from t1) then 19 when 17a),e)<>f or d not between (e) and b) then 11 else (abs(13)/abs( -(d))) end*t1.c) from t1)),e-f) FROM t1 WHERE NOT (((case when t1.e+d*~coalesce((select coalesce((select max(b) from t1 where ~case when t1.f not between t1.c and coalesce((select t1.b from t1 where -t1.e in (select (( - -min(19))) from t1 union select count(distinct t1.d) from t1)),t1.d) then c when t1.f=e then t1.f else b end=13),13) from t1 where t1.e not between 13 and 11 or not exists(select 1 from t1 where t1.a not between (11) and d and 11>=a)),t1.f) in (select count(*) from t1 union select ~max( -13) from t1) then -13 else 17 end) | d in (a,d,13)))} +} {-100} +do_test randexpr-2.1904 { + db eval {SELECT -t1.e+coalesce((select (d) from t1 where t1.f=t1.f*(select +count(distinct case when (coalesce((select max(case when a in (select b+17 from t1 union select -17 from t1) then 19 when 17a),e)<>f or d not between (e) and b) then 11 else (abs(13)/abs( -(d))) end*t1.c) from t1)),e-f) FROM t1 WHERE NOT (((case when t1.e+d*~coalesce((select coalesce((select max(b) from t1 where ~case when t1.f not between t1.c and coalesce((select t1.b from t1 where -t1.e in (select (( - -min(19))) from t1 union select count(distinct t1.d) from t1)),t1.d) then c when t1.f=e then t1.f else b end=13),13) from t1 where t1.e not between 13 and 11 or not exists(select 1 from t1 where t1.a not between (11) and d and 11>=a)),t1.f) in (select count(*) from t1 union select ~max( -13) from t1) then -13 else 17 end) | d in (a,d,13)))} +} {-100} +do_test randexpr-2.1905 { + db eval {SELECT coalesce((select max((19)) from t1 where d* -a<=(select count(*) from t1)+19*19*a-e-d or b in (t1.c+ -t1.b,t1.a,b+t1.b)),d) FROM t1 WHERE exists(select 1 from t1 where exists(select 1 from t1 where not t1.e>e))} +} {19} +do_test randexpr-2.1906 { + db eval {SELECT coalesce((select max((19)) from t1 where d* -a<=(select count(*) from t1)+19*19*a-e-d or b in (t1.c+ -t1.b,t1.a,b+t1.b)),d) FROM t1 WHERE NOT (exists(select 1 from t1 where exists(select 1 from t1 where not t1.e>e)))} +} {} +do_test randexpr-2.1907 { + db eval {SELECT (select -~count(distinct (abs(11)/abs(f-17-+(select cast(avg(+(select count(*) from t1)) AS integer) from t1)))) from t1) FROM t1 WHERE (abs((a))/abs(t1.b))>(select count(distinct case case when 17 in (select -b-case when 17 in (select +(( - -max(t1.b))) from t1 union select max((t1.b)) from t1) then f when t1.e=(t1.d) then 17 else e end | e-t1.d | t1.c* -13*d*t1.b from t1 union select 11 from t1) then c when c>=a then 13 else 13 end+t1.b when -f then 19 else 13 end)*min(t1.f) from t1)-19-f} +} {2} +do_test randexpr-2.1908 { + db eval {SELECT (select -~count(distinct (abs(11)/abs(f-17-+(select cast(avg(+(select count(*) from t1)) AS integer) from t1)))) from t1) FROM t1 WHERE NOT ((abs((a))/abs(t1.b))>(select count(distinct case case when 17 in (select -b-case when 17 in (select +(( - -max(t1.b))) from t1 union select max((t1.b)) from t1) then f when t1.e=(t1.d) then 17 else e end | e-t1.d | t1.c* -13*d*t1.b from t1 union select 11 from t1) then c when c>=a then 13 else 13 end+t1.b when -f then 19 else 13 end)*min(t1.f) from t1)-19-f)} +} {} +do_test randexpr-2.1909 { + db eval {SELECT -(select cast(avg(coalesce((select max(f) from t1 where -case coalesce((select max(~c-case when exists(select 1 from t1 where -b in (~e,t1.e,t1.e)) then t1.e when (17 in (select ++ -(count(distinct t1.a)) from t1 union select (max( -f)) from t1)) then b else t1.f end) from t1 where (13 in (19,b,11))),t1.e) when c then (b) else 11 end-a*b*13=t1.e),13)) AS integer) from t1) FROM t1 WHERE exists(select 1 from t1 where case when not (case when ~17*d>=b then a when c*(abs((abs(f)/abs(t1.c)))/abs(t1.b))+d*t1.b<19 and not case 13 when a then 11 else -t1.e end<>f then 11 else c end+t1.d) not in (t1.c,t1.f,d) then (abs(c)/abs(t1.f)) else t1.a end*t1.f not between t1.d and c)} +} {-13} +do_test randexpr-2.1910 { + db eval {SELECT -(select cast(avg(coalesce((select max(f) from t1 where -case coalesce((select max(~c-case when exists(select 1 from t1 where -b in (~e,t1.e,t1.e)) then t1.e when (17 in (select ++ -(count(distinct t1.a)) from t1 union select (max( -f)) from t1)) then b else t1.f end) from t1 where (13 in (19,b,11))),t1.e) when c then (b) else 11 end-a*b*13=t1.e),13)) AS integer) from t1) FROM t1 WHERE NOT (exists(select 1 from t1 where case when not (case when ~17*d>=b then a when c*(abs((abs(f)/abs(t1.c)))/abs(t1.b))+d*t1.b<19 and not case 13 when a then 11 else -t1.e end<>f then 11 else c end+t1.d) not in (t1.c,t1.f,d) then (abs(c)/abs(t1.f)) else t1.a end*t1.f not between t1.d and c))} +} {} +do_test randexpr-2.1911 { + db eval {SELECT case t1.c when (select abs( -max(t1.a*t1.f*t1.a*e)) from t1)*case when not exists(select 1 from t1 where (select cast(avg(c) AS integer) from t1) in (select case case when ( -case t1.f when 17 then t1.c else e end=17) then 19 else e end when e then c else -a end*t1.d from t1 union select d from t1)) then e*t1.a else b end+e*t1.d+t1.b then t1.d else c end+t1.e FROM t1 WHERE 13 in (select +abs(case count(distinct t1.e+(abs((abs(coalesce((select t1.b from t1 where coalesce((select max(case when (19<=t1.d) then (13) when a>=13 then 13 else t1.f end) from t1 where a in (t1.c,19,t1.b)),19)>13),d))/abs(f))+t1.a*t1.b)/abs(e))) when +cast(avg(c) AS integer) then ~~+max(f) | cast(avg(t1.c) AS integer) else count(distinct 13)+min(t1.b) | count(*) end)*max(t1.d)-count(*) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.1912 { + db eval {SELECT case t1.c when (select abs( -max(t1.a*t1.f*t1.a*e)) from t1)*case when not exists(select 1 from t1 where (select cast(avg(c) AS integer) from t1) in (select case case when ( -case t1.f when 17 then t1.c else e end=17) then 19 else e end when e then c else -a end*t1.d from t1 union select d from t1)) then e*t1.a else b end+e*t1.d+t1.b then t1.d else c end+t1.e FROM t1 WHERE NOT (13 in (select +abs(case count(distinct t1.e+(abs((abs(coalesce((select t1.b from t1 where coalesce((select max(case when (19<=t1.d) then (13) when a>=13 then 13 else t1.f end) from t1 where a in (t1.c,19,t1.b)),19)>13),d))/abs(f))+t1.a*t1.b)/abs(e))) when +cast(avg(c) AS integer) then ~~+max(f) | cast(avg(t1.c) AS integer) else count(distinct 13)+min(t1.b) | count(*) end)*max(t1.d)-count(*) from t1 union select count(*) from t1))} +} {800} +do_test randexpr-2.1913 { + db eval {SELECT case when f in (d, -case when (case 11 when (select max(t1.d) from t1) then (abs(a)/abs(+t1.b)) else ~b end<19) then (abs( -(abs(17)/abs(t1.f)))/abs((select count(*) from t1))) when (f) not between coalesce((select 13 from t1 where t1.a<=case t1.a when 13 then -t1.c else 13 end and 13 not between t1.f and 11),11) and 19 then e else b end,t1.d) then b else t1.b end FROM t1 WHERE t1.f between f and a-c-t1.d-t1.d+t1.d*case when exists(select 1 from t1 where d between (select ~cast(avg(17) AS integer) from t1) and a) then case when case b when f then 13 else b end19),t1.b)) or ((t1.a>19)) or e<>19 then -t1.e else -13 end-b+t1.d else a end then 19 when d between b and t1.c then t1.e else e end)/abs(t1.a)) FROM t1 WHERE b | t1.b>b-case when a-f in (select 17 from t1 union select coalesce((select coalesce((select t1.e+t1.a+d+d from t1 where 13 not between t1.e+19 and 13),(select count(distinct t1.e) from t1)*11) from t1 where b not in (b,(t1.f),t1.a)),13) from t1) then b else c end+a-13-13*(13) | 11} +} {0} +do_test randexpr-2.1916 { + db eval {SELECT (abs(case when 11<=case 17 when t1.d then t1.b+~19-~case when not t1.b not in (t1.a,b,coalesce((select a+(a) from t1 where t1.b>19),t1.b)) or ((t1.a>19)) or e<>19 then -t1.e else -13 end-b+t1.d else a end then 19 when d between b and t1.c then t1.e else e end)/abs(t1.a)) FROM t1 WHERE NOT (b | t1.b>b-case when a-f in (select 17 from t1 union select coalesce((select coalesce((select t1.e+t1.a+d+d from t1 where 13 not between t1.e+19 and 13),(select count(distinct t1.e) from t1)*11) from t1 where b not in (b,(t1.f),t1.a)),13) from t1) then b else c end+a-13-13*(13) | 11)} +} {} +do_test randexpr-2.1917 { + db eval {SELECT -t1.a | coalesce((select t1.f*(select min(f)+min(( -(select -case (min(13)) when -abs(min(t1.b)) then count(distinct a) else count(*) end from t1)*(select - -count(*) from t1)))* -count(*) from t1) from t1 where t1.d in (select case case when t1.a<(f) then c else 19 end when f then b else -c end*a*t1.c from t1 union select t1.b from t1)),b) | a FROM t1 WHERE +19*case when exists(select 1 from t1 where e<+13 | f or t1.d=19) then t1.a when case when exists(select 1 from t1 where t1.c in (select 11 from t1 union select c from t1)) then 13*t1.e else 19 end<=c and (t1.e between (b) and 13) or 11 not between t1.f and 13 or 17 between b and 13 then t1.e+a else 17 end not between b and d or t1.e not between 13 and 17} +} {-4} +do_test randexpr-2.1918 { + db eval {SELECT -t1.a | coalesce((select t1.f*(select min(f)+min(( -(select -case (min(13)) when -abs(min(t1.b)) then count(distinct a) else count(*) end from t1)*(select - -count(*) from t1)))* -count(*) from t1) from t1 where t1.d in (select case case when t1.a<(f) then c else 19 end when f then b else -c end*a*t1.c from t1 union select t1.b from t1)),b) | a FROM t1 WHERE NOT (+19*case when exists(select 1 from t1 where e<+13 | f or t1.d=19) then t1.a when case when exists(select 1 from t1 where t1.c in (select 11 from t1 union select c from t1)) then 13*t1.e else 19 end<=c and (t1.e between (b) and 13) or 11 not between t1.f and 13 or 17 between b and 13 then t1.e+a else 17 end not between b and d or t1.e not between 13 and 17)} +} {} +do_test randexpr-2.1919 { + db eval {SELECT -t1.a & coalesce((select t1.f*(select min(f)+min(( -(select -case (min(13)) when -abs(min(t1.b)) then count(distinct a) else count(*) end from t1)*(select - -count(*) from t1)))* -count(*) from t1) from t1 where t1.d in (select case case when t1.a<(f) then c else 19 end when f then b else -c end*a*t1.c from t1 union select t1.b from t1)),b) & a FROM t1 WHERE +19*case when exists(select 1 from t1 where e<+13 | f or t1.d=19) then t1.a when case when exists(select 1 from t1 where t1.c in (select 11 from t1 union select c from t1)) then 13*t1.e else 19 end<=c and (t1.e between (b) and 13) or 11 not between t1.f and 13 or 17 between b and 13 then t1.e+a else 17 end not between b and d or t1.e not between 13 and 17} +} {0} +do_test randexpr-2.1920 { + db eval {SELECT (select -case +count(distinct case when -+ -~case when ((t1.e)) not between e and f then e when t1.b between -11 and t1.b then d else t1.a end*19 in (select t1.f from t1 union select t1.d from t1) then t1.b when a in (select -19 from t1 union select c from t1) then a else f end) when +~ -(count(*)) then -( -count(distinct t1.e))-count(distinct c) else max(t1.c) end-count(distinct t1.c)-count(distinct 19)-(max(11)) from t1) FROM t1 WHERE -~coalesce((select max(19*t1.b) from t1 where t1.d in (select max( -coalesce((select max(t1.c) from t1 where case 13 when f+ -13+t1.b-case 13 when t1.a | (abs(t1.a)/abs(t1.f)) then 11 else f end then f else 17 end in (b,t1.d,(e))),13)) from t1 union select ~~min(b)*abs(cast(avg(b) AS integer)) from t1)),e)*((t1.d))<>t1.d} +} {-313} +do_test randexpr-2.1921 { + db eval {SELECT (select -case +count(distinct case when -+ -~case when ((t1.e)) not between e and f then e when t1.b between -11 and t1.b then d else t1.a end*19 in (select t1.f from t1 union select t1.d from t1) then t1.b when a in (select -19 from t1 union select c from t1) then a else f end) when +~ -(count(*)) then -( -count(distinct t1.e))-count(distinct c) else max(t1.c) end-count(distinct t1.c)-count(distinct 19)-(max(11)) from t1) FROM t1 WHERE NOT ( -~coalesce((select max(19*t1.b) from t1 where t1.d in (select max( -coalesce((select max(t1.c) from t1 where case 13 when f+ -13+t1.b-case 13 when t1.a | (abs(t1.a)/abs(t1.f)) then 11 else f end then f else 17 end in (b,t1.d,(e))),13)) from t1 union select ~~min(b)*abs(cast(avg(b) AS integer)) from t1)),e)*((t1.d))<>t1.d)} +} {} +do_test randexpr-2.1922 { + db eval {SELECT case a when c then (abs((select abs(~(+~~case max(t1.d) when +count(distinct ~d | (select case max(17) when count(*) then count(distinct (t1.c)) else count(distinct 11) end from t1)) then (+cast(avg((17)) AS integer))+count(*) else -(( - -(cast(avg(c) AS integer)))) end)+count(distinct t1.e)+min(13)) from t1)*+11-((abs(t1.a)/abs(coalesce((select c from t1 where ((t1.f<>d))),(t1.c))))) | t1.d)/abs( -(d))) else a end FROM t1 WHERE coalesce((select max(case t1.a when +t1.e then case when t1.c+(select (max(t1.d))-max(13)+cast(avg(c) AS integer)+min((e)) | max(a) from t1)<= -t1.a | case when not exists(select 1 from t1 where -t1.d<=t1.a) then t1.f when 11=c then a else 13 end | t1.c else t1.a end) from t1 where 19<=e and t1.e between t1.d and 11),13) not in (e,a,c)} +} {100} +do_test randexpr-2.1923 { + db eval {SELECT case a when c then (abs((select abs(~(+~~case max(t1.d) when +count(distinct ~d | (select case max(17) when count(*) then count(distinct (t1.c)) else count(distinct 11) end from t1)) then (+cast(avg((17)) AS integer))+count(*) else -(( - -(cast(avg(c) AS integer)))) end)+count(distinct t1.e)+min(13)) from t1)*+11-((abs(t1.a)/abs(coalesce((select c from t1 where ((t1.f<>d))),(t1.c))))) | t1.d)/abs( -(d))) else a end FROM t1 WHERE NOT (coalesce((select max(case t1.a when +t1.e then case when t1.c+(select (max(t1.d))-max(13)+cast(avg(c) AS integer)+min((e)) | max(a) from t1)<= -t1.a | case when not exists(select 1 from t1 where -t1.d<=t1.a) then t1.f when 11=c then a else 13 end | t1.c else t1.a end) from t1 where 19<=e and t1.e between t1.d and 11),13) not in (e,a,c))} +} {} +do_test randexpr-2.1924 { + db eval {SELECT case a when c then (abs((select abs(~(+~~case max(t1.d) when +count(distinct ~d & (select case max(17) when count(*) then count(distinct (t1.c)) else count(distinct 11) end from t1)) then (+cast(avg((17)) AS integer))+count(*) else -(( - -(cast(avg(c) AS integer)))) end)+count(distinct t1.e)+min(13)) from t1)*+11-((abs(t1.a)/abs(coalesce((select c from t1 where ((t1.f<>d))),(t1.c))))) & t1.d)/abs( -(d))) else a end FROM t1 WHERE coalesce((select max(case t1.a when +t1.e then case when t1.c+(select (max(t1.d))-max(13)+cast(avg(c) AS integer)+min((e)) | max(a) from t1)<= -t1.a | case when not exists(select 1 from t1 where -t1.d<=t1.a) then t1.f when 11=c then a else 13 end | t1.c else t1.a end) from t1 where 19<=e and t1.e between t1.d and 11),13) not in (e,a,c)} +} {100} +do_test randexpr-2.1925 { + db eval {SELECT t1.d-+case ~(select ~+~count(distinct case when case when case when 11 not between t1.c and t1.a-e then t1.d else e end*d-(d) between -13 and e then a else c end in (select b from t1 union select 17 from t1) then a else 13 end)*cast(avg(d) AS integer) from t1) when t1.e then (abs(11)/abs(coalesce((select max(19) from t1 where (a)<>t1.e),19))) else t1.c end+t1.c FROM t1 WHERE 19 not in (t1.a*+case when -c+~t1.a-t1.c in (t1.a,b*case t1.a | coalesce((select max(coalesce((select 17 from t1 where t1.e>t1.a),t1.d)) from t1 where not 11=19),17) when 11 then t1.d else (f) end*b-t1.a+c, -b) then d when a<>f then a else b end-c,d,t1.b)} +} {400} +do_test randexpr-2.1926 { + db eval {SELECT t1.d-+case ~(select ~+~count(distinct case when case when case when 11 not between t1.c and t1.a-e then t1.d else e end*d-(d) between -13 and e then a else c end in (select b from t1 union select 17 from t1) then a else 13 end)*cast(avg(d) AS integer) from t1) when t1.e then (abs(11)/abs(coalesce((select max(19) from t1 where (a)<>t1.e),19))) else t1.c end+t1.c FROM t1 WHERE NOT (19 not in (t1.a*+case when -c+~t1.a-t1.c in (t1.a,b*case t1.a | coalesce((select max(coalesce((select 17 from t1 where t1.e>t1.a),t1.d)) from t1 where not 11=19),17) when 11 then t1.d else (f) end*b-t1.a+c, -b) then d when a<>f then a else b end-c,d,t1.b))} +} {} +do_test randexpr-2.1927 { + db eval {SELECT case when case (select +min(~ -11+f*~b) | case abs(cast(avg(t1.f) AS integer))*~min(c) when count(*)+max( -11) then abs(count(*)) else max(13) end from t1) when coalesce((select max(coalesce((select max(19) from t1 where (17>=17)),t1.f)) from t1 where ct1.a then -d else -t1.b end FROM t1 WHERE t1.a+b-t1.c*17>=c} +} {} +do_test randexpr-2.1928 { + db eval {SELECT case when case (select +min(~ -11+f*~b) | case abs(cast(avg(t1.f) AS integer))*~min(c) when count(*)+max( -11) then abs(count(*)) else max(13) end from t1) when coalesce((select max(coalesce((select max(19) from t1 where (17>=17)),t1.f)) from t1 where ct1.a then -d else -t1.b end FROM t1 WHERE NOT (t1.a+b-t1.c*17>=c)} +} {-400} +do_test randexpr-2.1929 { + db eval {SELECT case when case (select +min(~ -11+f*~b) & case abs(cast(avg(t1.f) AS integer))*~min(c) when count(*)+max( -11) then abs(count(*)) else max(13) end from t1) when coalesce((select max(coalesce((select max(19) from t1 where (17>=17)),t1.f)) from t1 where ct1.a then -d else -t1.b end FROM t1 WHERE NOT (t1.a+b-t1.c*17>=c)} +} {-400} +do_test randexpr-2.1930 { + db eval {SELECT case when ~case when t1.f>11*t1.a then f when case when (t1.c*19<(t1.f)) then (e) when at1.e and a in (d,t1.c,13) then t1.b when -17 not between f and d then f else t1.c end FROM t1 WHERE c in (select + -min(case (select count(*) from t1) when t1.a then t1.c else coalesce((select max(b) from t1 where d in (select -abs(cast(avg(t1.b) AS integer)) | (min(t1.f)) | count(distinct a)+abs(cast(avg(b) AS integer))*( -min(b)) | cast(avg(t1.c) AS integer)*((min(t1.e))) from t1 union select cast(avg(f) AS integer) from t1)),t1.e) | +case b when -t1.c then (select (count(*)) from t1) else b end | 13 end) from t1 union select min(t1.d) from t1)} +} {} +do_test randexpr-2.1931 { + db eval {SELECT case when ~case when t1.f>11*t1.a then f when case when (t1.c*19<(t1.f)) then (e) when at1.e and a in (d,t1.c,13) then t1.b when -17 not between f and d then f else t1.c end FROM t1 WHERE NOT (c in (select + -min(case (select count(*) from t1) when t1.a then t1.c else coalesce((select max(b) from t1 where d in (select -abs(cast(avg(t1.b) AS integer)) | (min(t1.f)) | count(distinct a)+abs(cast(avg(b) AS integer))*( -min(b)) | cast(avg(t1.c) AS integer)*((min(t1.e))) from t1 union select cast(avg(f) AS integer) from t1)),t1.e) | +case b when -t1.c then (select (count(*)) from t1) else b end | 13 end) from t1 union select min(t1.d) from t1))} +} {600} +do_test randexpr-2.1932 { + db eval {SELECT case when (b) between 19 and case when t1.a* -t1.d+t1.a-t1.d-b-a<=d then t1.c when not exists(select 1 from t1 where (13>t1.e) and ((a>a))) and -(13)>t1.b then t1.d else 11 end or d=e then 13+t1.f when ( -a) not between t1.e and t1.b then (c) else t1.e end+t1.a+17 FROM t1 WHERE (case when a<=f+t1.a | f then t1.c*a-coalesce((select max( -e) from t1 where t1.a=t1.d),b)*d- -f else t1.b end) in (select min(t1.f)+~max(b) | count(distinct t1.d)*+ - -case (count(*)) when +count(distinct e) then count(*) else -cast(avg(d) AS integer) end from t1 union select count(*) from t1) and d between 11 and t1.a} +} {} +do_test randexpr-2.1933 { + db eval {SELECT case when (b) between 19 and case when t1.a* -t1.d+t1.a-t1.d-b-a<=d then t1.c when not exists(select 1 from t1 where (13>t1.e) and ((a>a))) and -(13)>t1.b then t1.d else 11 end or d=e then 13+t1.f when ( -a) not between t1.e and t1.b then (c) else t1.e end+t1.a+17 FROM t1 WHERE NOT ((case when a<=f+t1.a | f then t1.c*a-coalesce((select max( -e) from t1 where t1.a=t1.d),b)*d- -f else t1.b end) in (select min(t1.f)+~max(b) | count(distinct t1.d)*+ - -case (count(*)) when +count(distinct e) then count(*) else -cast(avg(d) AS integer) end from t1 union select count(*) from t1) and d between 11 and t1.a)} +} {730} +do_test randexpr-2.1934 { + db eval {SELECT +f*19*~~ -t1.c-13-case when t1.b in (select +case when not exists(select 1 from t1 where (((abs((abs((select count(*)*(count(distinct t1.d)) from t1))/abs(b)))/abs(e))*(e))>=d)) then f when t1.a=f then b else t1.e end+19 | (17) from t1 union select t1.b from t1) or -ee and t1.e<=t1.b} +} {} +do_test randexpr-2.1935 { + db eval {SELECT +f*19*~~ -t1.c-13-case when t1.b in (select +case when not exists(select 1 from t1 where (((abs((abs((select count(*)*(count(distinct t1.d)) from t1))/abs(b)))/abs(e))*(e))>=d)) then f when t1.a=f then b else t1.e end+19 | (17) from t1 union select t1.b from t1) or -ee and t1.e<=t1.b)} +} {-3420224} +do_test randexpr-2.1936 { + db eval {SELECT +f*19*~~ -t1.c-13-case when t1.b in (select +case when not exists(select 1 from t1 where (((abs((abs((select count(*)*(count(distinct t1.d)) from t1))/abs(b)))/abs(e))*(e))>=d)) then f when t1.a=f then b else t1.e end+19 & (17) from t1 union select t1.b from t1) or -ee and t1.e<=t1.b)} +} {-3420224} +do_test randexpr-2.1937 { + db eval {SELECT f-coalesce((select max(d+~+ - -(a)*(select min(t1.d) from t1)*t1.f+case when ( -13)*t1.e<>11 | -t1.e or c=11 then (t1.e) when -t1.d<=t1.b and (t1.f<>t1.c) then a else -t1.f end) from t1 where 11 not between t1.e and b),t1.f)-13 FROM t1 WHERE -a not in (17+~coalesce((select t1.a from t1 where (exists(select 1 from t1 where 11>c))),11)+17-11,11,t1.c) or f in (select case (min((t1.a))) when max(b) then min(13) else cast(avg(c) AS integer) end*max(d)*(case cast(avg(19) AS integer) when ( -min(19)) then count(*) else count(*) end) | -cast(avg(19) AS integer) from t1 union select cast(avg(b) AS integer) from t1) or t1.f>= -t1.d} +} {24239687} +do_test randexpr-2.1938 { + db eval {SELECT f-coalesce((select max(d+~+ - -(a)*(select min(t1.d) from t1)*t1.f+case when ( -13)*t1.e<>11 | -t1.e or c=11 then (t1.e) when -t1.d<=t1.b and (t1.f<>t1.c) then a else -t1.f end) from t1 where 11 not between t1.e and b),t1.f)-13 FROM t1 WHERE NOT ( -a not in (17+~coalesce((select t1.a from t1 where (exists(select 1 from t1 where 11>c))),11)+17-11,11,t1.c) or f in (select case (min((t1.a))) when max(b) then min(13) else cast(avg(c) AS integer) end*max(d)*(case cast(avg(19) AS integer) when ( -min(19)) then count(*) else count(*) end) | -cast(avg(19) AS integer) from t1 union select cast(avg(b) AS integer) from t1) or t1.f>= -t1.d)} +} {} +do_test randexpr-2.1939 { + db eval {SELECT f-coalesce((select max(d+~+ - -(a)*(select min(t1.d) from t1)*t1.f+case when ( -13)*t1.e<>11 & -t1.e or c=11 then (t1.e) when -t1.d<=t1.b and (t1.f<>t1.c) then a else -t1.f end) from t1 where 11 not between t1.e and b),t1.f)-13 FROM t1 WHERE -a not in (17+~coalesce((select t1.a from t1 where (exists(select 1 from t1 where 11>c))),11)+17-11,11,t1.c) or f in (select case (min((t1.a))) when max(b) then min(13) else cast(avg(c) AS integer) end*max(d)*(case cast(avg(19) AS integer) when ( -min(19)) then count(*) else count(*) end) | -cast(avg(19) AS integer) from t1 union select cast(avg(b) AS integer) from t1) or t1.f>= -t1.d} +} {24239687} +do_test randexpr-2.1940 { + db eval {SELECT ~case when ~coalesce((select max(f) from t1 where b | (select +max(13) from t1)+t1.b<~13*t1.d*coalesce((select -t1.b from t1 where coalesce((select t1.e+a from t1 where 11=17),a) between 19 and e),17)*d),t1.b) in (select cast(avg(t1.d) AS integer) from t1 union select ~count(distinct 11) from t1) then 11 when t1.d>=t1.b then 13 else f end-t1.e FROM t1 WHERE not exists(select 1 from t1 where coalesce((select case t1.b when -t1.c then c-17 else case f when (abs(case when ((13*case -e when case when b>c and 11>11 then -a when -(t1.c)>t1.e then d else t1.d end then t1.d else t1.e end<=11)) then ~t1.b+f when (d)<=c then t1.a else (t1.a) end+b)/abs(b)) then -b else 11 end end-19 from t1 where t1.e<=t1.b),a)>t1.a)} +} {-514} +do_test randexpr-2.1941 { + db eval {SELECT ~case when ~coalesce((select max(f) from t1 where b | (select +max(13) from t1)+t1.b<~13*t1.d*coalesce((select -t1.b from t1 where coalesce((select t1.e+a from t1 where 11=17),a) between 19 and e),17)*d),t1.b) in (select cast(avg(t1.d) AS integer) from t1 union select ~count(distinct 11) from t1) then 11 when t1.d>=t1.b then 13 else f end-t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where coalesce((select case t1.b when -t1.c then c-17 else case f when (abs(case when ((13*case -e when case when b>c and 11>11 then -a when -(t1.c)>t1.e then d else t1.d end then t1.d else t1.e end<=11)) then ~t1.b+f when (d)<=c then t1.a else (t1.a) end+b)/abs(b)) then -b else 11 end end-19 from t1 where t1.e<=t1.b),a)>t1.a))} +} {} +do_test randexpr-2.1942 { + db eval {SELECT ~case when ~coalesce((select max(f) from t1 where b & (select +max(13) from t1)+t1.b<~13*t1.d*coalesce((select -t1.b from t1 where coalesce((select t1.e+a from t1 where 11=17),a) between 19 and e),17)*d),t1.b) in (select cast(avg(t1.d) AS integer) from t1 union select ~count(distinct 11) from t1) then 11 when t1.d>=t1.b then 13 else f end-t1.e FROM t1 WHERE not exists(select 1 from t1 where coalesce((select case t1.b when -t1.c then c-17 else case f when (abs(case when ((13*case -e when case when b>c and 11>11 then -a when -(t1.c)>t1.e then d else t1.d end then t1.d else t1.e end<=11)) then ~t1.b+f when (d)<=c then t1.a else (t1.a) end+b)/abs(b)) then -b else 11 end end-19 from t1 where t1.e<=t1.b),a)>t1.a)} +} {-514} +do_test randexpr-2.1943 { + db eval {SELECT +(case when (not exists(select 1 from t1 where t1.c in (select +(abs(a)/abs( -coalesce((select max(17) from t1 where (not (c>=t1.d and t1.c>=f) and t1.e>e)),19-17))) from t1 union select case when (b)>d then d when c not in (f,t1.b,t1.d) then t1.f else t1.d end from t1) or a not in (t1.d,a, -b))) and exists(select 1 from t1 where b<11) then c when t1.c not between e and -19 then t1.b else ~c end) FROM t1 WHERE t1.e*f-a- -13-b= -d} +} {} +do_test randexpr-2.1944 { + db eval {SELECT +(case when (not exists(select 1 from t1 where t1.c in (select +(abs(a)/abs( -coalesce((select max(17) from t1 where (not (c>=t1.d and t1.c>=f) and t1.e>e)),19-17))) from t1 union select case when (b)>d then d when c not in (f,t1.b,t1.d) then t1.f else t1.d end from t1) or a not in (t1.d,a, -b))) and exists(select 1 from t1 where b<11) then c when t1.c not between e and -19 then t1.b else ~c end) FROM t1 WHERE NOT (t1.e*f-a- -13-b= -d)} +} {200} +do_test randexpr-2.1945 { + db eval {SELECT b-case when a not between t1.f and case when t1.d in (select count(*) from t1 union select cast(avg(coalesce((select +11*t1.e from t1 where e+bb and t1.d between c and b then t1.d else t1.d endb and t1.d between c and b then t1.d else t1.d endb and t1.d between c and b then t1.d else t1.d end=t1.d and 17<=13 or f between -t1.c and 11 then case d when t1.c then 13 else t1.e end when (11)>b then a else c end in (t1.e,a,b)} +} {} +do_test randexpr-2.1949 { + db eval {SELECT (select (min(coalesce((select max((case when t1.f=t1.d and 17<=13 or f between -t1.c and 11 then case d when t1.c then 13 else t1.e end when (11)>b then a else c end in (t1.e,a,b))} +} {582} +do_test randexpr-2.1950 { + db eval {SELECT coalesce((select max((abs(e)/abs(t1.a))) from t1 where (select (count(distinct ~ - -11- -t1.a-a*t1.d | 11-19+t1.f+17*c)-cast(avg(17) AS integer)+(cast(avg((19)) AS integer))) from t1)>a),17 | 11+17)+e-(c)-d-b FROM t1 WHERE (d>t1.d-case when case +(abs(t1.d | t1.f)/abs(t1.b)) when +b then coalesce((select max(t1.d | t1.c+(11+t1.d-t1.c)) from t1 where ((t1.e=17))),17) else t1.c end>=b then a else t1.f end) and (not (e<=c)) and not (t1.c<>b)} +} {} +do_test randexpr-2.1951 { + db eval {SELECT coalesce((select max((abs(e)/abs(t1.a))) from t1 where (select (count(distinct ~ - -11- -t1.a-a*t1.d | 11-19+t1.f+17*c)-cast(avg(17) AS integer)+(cast(avg((19)) AS integer))) from t1)>a),17 | 11+17)+e-(c)-d-b FROM t1 WHERE NOT ((d>t1.d-case when case +(abs(t1.d | t1.f)/abs(t1.b)) when +b then coalesce((select max(t1.d | t1.c+(11+t1.d-t1.c)) from t1 where ((t1.e=17))),17) else t1.c end>=b then a else t1.f end) and (not (e<=c)) and not (t1.c<>b))} +} {-371} +do_test randexpr-2.1952 { + db eval {SELECT coalesce((select max((abs(e)/abs(t1.a))) from t1 where (select (count(distinct ~ - -11- -t1.a-a*t1.d & 11-19+t1.f+17*c)-cast(avg(17) AS integer)+(cast(avg((19)) AS integer))) from t1)>a),17 & 11+17)+e-(c)-d-b FROM t1 WHERE NOT ((d>t1.d-case when case +(abs(t1.d | t1.f)/abs(t1.b)) when +b then coalesce((select max(t1.d | t1.c+(11+t1.d-t1.c)) from t1 where ((t1.e=17))),17) else t1.c end>=b then a else t1.f end) and (not (e<=c)) and not (t1.c<>b))} +} {-384} +do_test randexpr-2.1953 { + db eval {SELECT coalesce((select c from t1 where case b when t1.a*(17) | case b++b when e then d else t1.b+f end+a+t1.d then d else t1.b | +e end<+~(select (count(*)) from t1)),17) FROM t1 WHERE (b+t1.c*c in (11, -t1.f,t1.b))} +} {} +do_test randexpr-2.1954 { + db eval {SELECT coalesce((select c from t1 where case b when t1.a*(17) | case b++b when e then d else t1.b+f end+a+t1.d then d else t1.b | +e end<+~(select (count(*)) from t1)),17) FROM t1 WHERE NOT ((b+t1.c*c in (11, -t1.f,t1.b)))} +} {17} +do_test randexpr-2.1955 { + db eval {SELECT coalesce((select c from t1 where case b when t1.a*(17) & case b++b when e then d else t1.b+f end+a+t1.d then d else t1.b & +e end<+~(select (count(*)) from t1)),17) FROM t1 WHERE NOT ((b+t1.c*c in (11, -t1.f,t1.b)))} +} {17} +do_test randexpr-2.1956 { + db eval {SELECT case when coalesce((select max((abs(c)/abs(t1.d))) from t1 where t1.f*case when t1.a in (select (abs(17)/abs(c))*(t1.a) from t1 union select t1.e from t1) then t1.b else b end<19),e)+a-d in (select case max(t1.b)-count(distinct t1.c) when +case abs(cast(avg(t1.f) AS integer)) when +cast(avg(f) AS integer)-min(f) then count(distinct c) else cast(avg(d) AS integer) end then count(distinct t1.a) else cast(avg(b) AS integer) end from t1 union select count(distinct t1.e) from t1) then t1.c else f end FROM t1 WHERE ((19 in (t1.b,case t1.f when 11 then 11 else -+t1.b | 19 end+coalesce((select 11 from t1 where c<=t1.c* -e),f),t1.f)) or (b between t1.b and 17) or not exists(select 1 from t1 where -17 between -f and t1.e or (not b<>11) and 19 in (select 11 from t1 union select t1.b from t1))) or 13>(t1.e)} +} {} +do_test randexpr-2.1957 { + db eval {SELECT case when coalesce((select max((abs(c)/abs(t1.d))) from t1 where t1.f*case when t1.a in (select (abs(17)/abs(c))*(t1.a) from t1 union select t1.e from t1) then t1.b else b end<19),e)+a-d in (select case max(t1.b)-count(distinct t1.c) when +case abs(cast(avg(t1.f) AS integer)) when +cast(avg(f) AS integer)-min(f) then count(distinct c) else cast(avg(d) AS integer) end then count(distinct t1.a) else cast(avg(b) AS integer) end from t1 union select count(distinct t1.e) from t1) then t1.c else f end FROM t1 WHERE NOT (((19 in (t1.b,case t1.f when 11 then 11 else -+t1.b | 19 end+coalesce((select 11 from t1 where c<=t1.c* -e),f),t1.f)) or (b between t1.b and 17) or not exists(select 1 from t1 where -17 between -f and t1.e or (not b<>11) and 19 in (select 11 from t1 union select t1.b from t1))) or 13>(t1.e))} +} {300} +do_test randexpr-2.1958 { + db eval {SELECT f+case when e>+11+t1.e or ~t1.a=f+t1.e+t1.c then t1.a+case when b*coalesce((select max(a) from t1 where not exists(select 1 from t1 where (t1.f=case when exists(select 1 from t1 where t1.ff),11))<(e) then t1.e else 19 end+(c) else t1.a end*c-t1.c FROM t1 WHERE not ~coalesce((select max(t1.f*coalesce((select max(f) from t1 where case 17 when e*19 then case t1.a when 17 then f else t1.c+t1.d end | (e) else ~a end>=e or not t1.b in (select a from t1 union select d from t1)),19)) from t1 where e>coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.c=t1.a)),case t1.b when t1.b then a else 19 end)),13)*t1.b not between t1.e and t1.e} +} {} +do_test randexpr-2.1959 { + db eval {SELECT f+case when e>+11+t1.e or ~t1.a=f+t1.e+t1.c then t1.a+case when b*coalesce((select max(a) from t1 where not exists(select 1 from t1 where (t1.f=case when exists(select 1 from t1 where t1.ff),11))<(e) then t1.e else 19 end+(c) else t1.a end*c-t1.c FROM t1 WHERE NOT (not ~coalesce((select max(t1.f*coalesce((select max(f) from t1 where case 17 when e*19 then case t1.a when 17 then f else t1.c+t1.d end | (e) else ~a end>=e or not t1.b in (select a from t1 union select d from t1)),19)) from t1 where e>coalesce((select 17 from t1 where not exists(select 1 from t1 where t1.c=t1.a)),case t1.b when t1.b then a else 19 end)),13)*t1.b not between t1.e and t1.e)} +} {30300} +do_test randexpr-2.1960 { + db eval {SELECT e*(select -max((case when -case when (+(abs(f)/abs(17))*13)> -a then 17 else f end | -t1.f-e*11<>11 then c when exists(select 1 from t1 where not exists(select 1 from t1 where t1.b not in (t1.d,19,d) and t1.d<>11)) or 13<= -t1.f then 11 else d end-t1.a)*e) from t1) FROM t1 WHERE exists(select 1 from t1 where c<>a)} +} {-50000000} +do_test randexpr-2.1961 { + db eval {SELECT e*(select -max((case when -case when (+(abs(f)/abs(17))*13)> -a then 17 else f end | -t1.f-e*11<>11 then c when exists(select 1 from t1 where not exists(select 1 from t1 where t1.b not in (t1.d,19,d) and t1.d<>11)) or 13<= -t1.f then 11 else d end-t1.a)*e) from t1) FROM t1 WHERE NOT (exists(select 1 from t1 where c<>a))} +} {} +do_test randexpr-2.1962 { + db eval {SELECT e*(select -max((case when -case when (+(abs(f)/abs(17))*13)> -a then 17 else f end & -t1.f-e*11<>11 then c when exists(select 1 from t1 where not exists(select 1 from t1 where t1.b not in (t1.d,19,d) and t1.d<>11)) or 13<= -t1.f then 11 else d end-t1.a)*e) from t1) FROM t1 WHERE exists(select 1 from t1 where c<>a)} +} {-50000000} +do_test randexpr-2.1963 { + db eval {SELECT a-t1.b | (case case when 13 in (t1.c,(t1.c),t1.e) then e+b when b in (select e from t1 union select d from t1) then (select count(distinct (abs(case when t1.e not between c and c then f else case t1.a when t1.c then e+t1.d else c end end)/abs(b))) from t1) else t1.b- -t1.d end when t1.c then 11 else e end- -a)+d-t1.b FROM t1 WHERE (case when f between f+(abs(coalesce((select t1.c*t1.c from t1 where f between 19 and (select count(distinct coalesce((select max(case case when t1.b<>t1.c then t1.d when 19t1.c then t1.d when 19t1.c then t1.d when 19 -case t1.f when +b-case when (17 in (select count(distinct b)+count(distinct t1.a) from t1 union select count(distinct 13) from t1)) then d else 13 end+t1.d | d* -11+t1.d*t1.f | a-17 then 13 else c end*t1.e),d) then t1.b else d end)/abs(t1.a))*19 FROM t1 WHERE t1.f in (select count(*) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.1967 { + db eval {SELECT (abs(case t1.f | d when coalesce((select max(~b) from t1 where +t1.a<> -case t1.f when +b-case when (17 in (select count(distinct b)+count(distinct t1.a) from t1 union select count(distinct 13) from t1)) then d else 13 end+t1.d | d* -11+t1.d*t1.f | a-17 then 13 else c end*t1.e),d) then t1.b else d end)/abs(t1.a))*19 FROM t1 WHERE NOT (t1.f in (select count(*) from t1 union select count(*) from t1))} +} {76} +do_test randexpr-2.1968 { + db eval {SELECT (abs(case t1.f & d when coalesce((select max(~b) from t1 where +t1.a<> -case t1.f when +b-case when (17 in (select count(distinct b)+count(distinct t1.a) from t1 union select count(distinct 13) from t1)) then d else 13 end+t1.d & d* -11+t1.d*t1.f & a-17 then 13 else c end*t1.e),d) then t1.b else d end)/abs(t1.a))*19 FROM t1 WHERE NOT (t1.f in (select count(*) from t1 union select count(*) from t1))} +} {76} +do_test randexpr-2.1969 { + db eval {SELECT t1.d+coalesce((select max(t1.b*t1.b | ~case a | c | t1.d when t1.c then t1.a else t1.e end | e-13*b) from t1 where -t1.a in (select e from t1 union select t1.b from t1) and not exists(select 1 from t1 where (a) in (select min(13)* -cast(avg(t1.e) AS integer)*~case count(*) when count(*) then ((cast(avg(e) AS integer))) else count(*) end | (min(t1.e)) | -count(*) from t1 union select max(t1.a) from t1))), -t1.b)+a FROM t1 WHERE not exists(select 1 from t1 where 11*d+a>=(t1.a-e+17*(select +cast(avg((select cast(avg((abs(case when (abs(t1.a)/abs(coalesce((select max(c+t1.e) from t1 where not exists(select 1 from t1 where 13 in (select -t1.c from t1 union select t1.f from t1))),17) | e*e)) not between e and t1.f then 19 else t1.b end)/abs(19))) AS integer)* -count(*) from t1)) AS integer)*count(distinct 11)-abs(cast(avg(b) AS integer)) from t1)))} +} {} +do_test randexpr-2.1970 { + db eval {SELECT t1.d+coalesce((select max(t1.b*t1.b | ~case a | c | t1.d when t1.c then t1.a else t1.e end | e-13*b) from t1 where -t1.a in (select e from t1 union select t1.b from t1) and not exists(select 1 from t1 where (a) in (select min(13)* -cast(avg(t1.e) AS integer)*~case count(*) when count(*) then ((cast(avg(e) AS integer))) else count(*) end | (min(t1.e)) | -count(*) from t1 union select max(t1.a) from t1))), -t1.b)+a FROM t1 WHERE NOT (not exists(select 1 from t1 where 11*d+a>=(t1.a-e+17*(select +cast(avg((select cast(avg((abs(case when (abs(t1.a)/abs(coalesce((select max(c+t1.e) from t1 where not exists(select 1 from t1 where 13 in (select -t1.c from t1 union select t1.f from t1))),17) | e*e)) not between e and t1.f then 19 else t1.b end)/abs(19))) AS integer)* -count(*) from t1)) AS integer)*count(distinct 11)-abs(cast(avg(b) AS integer)) from t1))))} +} {300} +do_test randexpr-2.1971 { + db eval {SELECT t1.d+coalesce((select max(t1.b*t1.b & ~case a & c & t1.d when t1.c then t1.a else t1.e end & e-13*b) from t1 where -t1.a in (select e from t1 union select t1.b from t1) and not exists(select 1 from t1 where (a) in (select min(13)* -cast(avg(t1.e) AS integer)*~case count(*) when count(*) then ((cast(avg(e) AS integer))) else count(*) end & (min(t1.e)) & -count(*) from t1 union select max(t1.a) from t1))), -t1.b)+a FROM t1 WHERE NOT (not exists(select 1 from t1 where 11*d+a>=(t1.a-e+17*(select +cast(avg((select cast(avg((abs(case when (abs(t1.a)/abs(coalesce((select max(c+t1.e) from t1 where not exists(select 1 from t1 where 13 in (select -t1.c from t1 union select t1.f from t1))),17) | e*e)) not between e and t1.f then 19 else t1.b end)/abs(19))) AS integer)* -count(*) from t1)) AS integer)*count(distinct 11)-abs(cast(avg(b) AS integer)) from t1))))} +} {300} +do_test randexpr-2.1972 { + db eval {SELECT ~case when case when +d+case b when 11 then coalesce((select max(b) from t1 where (((abs(f)/abs((abs(11)/abs(e))+t1.b))>t1.f))),(select max(t1.e) from t1)) else t1.f end*t1.f*17>=(t1.e) or 17>t1.f then t1.a else t1.a end in (c,13,c) then t1.f when t1.a not in (t1.d, -t1.b,a) then -t1.b else 11 end FROM t1 WHERE t1.a in (+e,17,11*13) and b not between t1.f*(abs(t1.c-((abs(t1.a | coalesce((select max(case when -f-t1.c<=t1.f then 19 when (t1.a)<>t1.f and 13 between a and t1.e then -11 else 17 end) from t1 where c>19),f)-t1.e)/abs(e))))/abs(b))-t1.e | (19) and -t1.a and t1.f in (select 13 from t1 union select a from t1)} +} {} +do_test randexpr-2.1973 { + db eval {SELECT ~case when case when +d+case b when 11 then coalesce((select max(b) from t1 where (((abs(f)/abs((abs(11)/abs(e))+t1.b))>t1.f))),(select max(t1.e) from t1)) else t1.f end*t1.f*17>=(t1.e) or 17>t1.f then t1.a else t1.a end in (c,13,c) then t1.f when t1.a not in (t1.d, -t1.b,a) then -t1.b else 11 end FROM t1 WHERE NOT (t1.a in (+e,17,11*13) and b not between t1.f*(abs(t1.c-((abs(t1.a | coalesce((select max(case when -f-t1.c<=t1.f then 19 when (t1.a)<>t1.f and 13 between a and t1.e then -11 else 17 end) from t1 where c>19),f)-t1.e)/abs(e))))/abs(b))-t1.e | (19) and -t1.a and t1.f in (select 13 from t1 union select a from t1))} +} {-12} +do_test randexpr-2.1974 { + db eval {SELECT case when case 17 when case when (abs(t1.c)/abs(t1.c | coalesce((select max(t1.d) from t1 where not t1.e<17 or 11 not in (t1.a,t1.c,a) or c=d and (t1.b)=t1.e and 17 in ( -t1.c,a,d) and t1.f not in (19,a,b)),11) | (select -max(+t1.b) from t1)+19-19 | f))<>t1.a or e>17 then f else a end then 11 else f end=t1.a then f else -t1.f end FROM t1 WHERE -t1.b+a>= -t1.a} +} {-600} +do_test randexpr-2.1975 { + db eval {SELECT case when case 17 when case when (abs(t1.c)/abs(t1.c | coalesce((select max(t1.d) from t1 where not t1.e<17 or 11 not in (t1.a,t1.c,a) or c=d and (t1.b)=t1.e and 17 in ( -t1.c,a,d) and t1.f not in (19,a,b)),11) | (select -max(+t1.b) from t1)+19-19 | f))<>t1.a or e>17 then f else a end then 11 else f end=t1.a then f else -t1.f end FROM t1 WHERE NOT ( -t1.b+a>= -t1.a)} +} {} +do_test randexpr-2.1976 { + db eval {SELECT (abs(coalesce((select ((t1.c)) from t1 where not exists(select 1 from t1 where t1.e*17<13*coalesce((select max(a) from t1 where 19 in (select max(e*11) from t1 union select cast(avg(t1.b) AS integer) from t1)),a | a))),case when f-13t1.e then t1.b else f end))/abs(17)) FROM t1 WHERE ((d not between -a and b))} +} {17} +do_test randexpr-2.1977 { + db eval {SELECT (abs(coalesce((select ((t1.c)) from t1 where not exists(select 1 from t1 where t1.e*17<13*coalesce((select max(a) from t1 where 19 in (select max(e*11) from t1 union select cast(avg(t1.b) AS integer) from t1)),a | a))),case when f-13t1.e then t1.b else f end))/abs(17)) FROM t1 WHERE NOT (((d not between -a and b)))} +} {} +do_test randexpr-2.1978 { + db eval {SELECT (abs(coalesce((select ((t1.c)) from t1 where not exists(select 1 from t1 where t1.e*17<13*coalesce((select max(a) from t1 where 19 in (select max(e*11) from t1 union select cast(avg(t1.b) AS integer) from t1)),a & a))),case when f-13t1.e then t1.b else f end))/abs(17)) FROM t1 WHERE ((d not between -a and b))} +} {17} +do_test randexpr-2.1979 { + db eval {SELECT case f when (d)-case when coalesce((select max(coalesce((select 11 from t1 where case when (t1.f<=17 or exists(select 1 from t1 where 11 | d in (select (abs(max(t1.f))) from t1 union select ( -min(c)) from t1))) then a when t1.f not in (e,13,13) then t1.c else e end between 19 and c),t1.a)) from t1 where f>=t1.d),17) not between e and t1.c and 11<=17 then t1.e-19 when t1.a not in (c,t1.e,t1.c) then 11 else t1.b end then t1.e else t1.f end FROM t1 WHERE (not (case 17 when d then t1.c else c end=f | (select case count(*) when count(*)++max(11)+ -(abs(+cast(avg(c) AS integer)-(~count(*))+count(*)*(~case max( -c) when - -cast(avg(t1.f) AS integer) then (count(*)) else -max(t1.b) end*cast(avg(a) AS integer))-count(*) | ( -( -max(11)))) | ( -min(17))+count(*)) then (max(19)) else max(t1.c) end from t1)))} +} {600} +do_test randexpr-2.1980 { + db eval {SELECT case f when (d)-case when coalesce((select max(coalesce((select 11 from t1 where case when (t1.f<=17 or exists(select 1 from t1 where 11 | d in (select (abs(max(t1.f))) from t1 union select ( -min(c)) from t1))) then a when t1.f not in (e,13,13) then t1.c else e end between 19 and c),t1.a)) from t1 where f>=t1.d),17) not between e and t1.c and 11<=17 then t1.e-19 when t1.a not in (c,t1.e,t1.c) then 11 else t1.b end then t1.e else t1.f end FROM t1 WHERE NOT ((not (case 17 when d then t1.c else c end=f | (select case count(*) when count(*)++max(11)+ -(abs(+cast(avg(c) AS integer)-(~count(*))+count(*)*(~case max( -c) when - -cast(avg(t1.f) AS integer) then (count(*)) else -max(t1.b) end*cast(avg(a) AS integer))-count(*) | ( -( -max(11)))) | ( -min(17))+count(*)) then (max(19)) else max(t1.c) end from t1))))} +} {} +do_test randexpr-2.1981 { + db eval {SELECT case f when (d)-case when coalesce((select max(coalesce((select 11 from t1 where case when (t1.f<=17 or exists(select 1 from t1 where 11 & d in (select (abs(max(t1.f))) from t1 union select ( -min(c)) from t1))) then a when t1.f not in (e,13,13) then t1.c else e end between 19 and c),t1.a)) from t1 where f>=t1.d),17) not between e and t1.c and 11<=17 then t1.e-19 when t1.a not in (c,t1.e,t1.c) then 11 else t1.b end then t1.e else t1.f end FROM t1 WHERE (not (case 17 when d then t1.c else c end=f | (select case count(*) when count(*)++max(11)+ -(abs(+cast(avg(c) AS integer)-(~count(*))+count(*)*(~case max( -c) when - -cast(avg(t1.f) AS integer) then (count(*)) else -max(t1.b) end*cast(avg(a) AS integer))-count(*) | ( -( -max(11)))) | ( -min(17))+count(*)) then (max(19)) else max(t1.c) end from t1)))} +} {600} +do_test randexpr-2.1982 { + db eval {SELECT case when d+t1.d not between t1.f and + -t1.c then ~+a+(select +count(*) from t1) else (select +abs(count(*)) from t1)-t1.a end FROM t1 WHERE t1.b+f | d+a in (select +cast(avg(b) AS integer)++case min(coalesce((select t1.c from t1 where (e) in ((a)+17,t1.b,d)),t1.f)) when ~~cast(avg(17) AS integer)-max((t1.a)) then case min(13) when count(*) then count(distinct t1.a) else cast(avg(b) AS integer) end*max(d)+max(t1.b) else -count(*) end-count(*) from t1 union select cast(avg(t1.f) AS integer) from t1)} +} {} +do_test randexpr-2.1983 { + db eval {SELECT case when d+t1.d not between t1.f and + -t1.c then ~+a+(select +count(*) from t1) else (select +abs(count(*)) from t1)-t1.a end FROM t1 WHERE NOT (t1.b+f | d+a in (select +cast(avg(b) AS integer)++case min(coalesce((select t1.c from t1 where (e) in ((a)+17,t1.b,d)),t1.f)) when ~~cast(avg(17) AS integer)-max((t1.a)) then case min(13) when count(*) then count(distinct t1.a) else cast(avg(b) AS integer) end*max(d)+max(t1.b) else -count(*) end-count(*) from t1 union select cast(avg(t1.f) AS integer) from t1))} +} {-100} +do_test randexpr-2.1984 { + db eval {SELECT case when case when case case when (case when (( -ft1.c or exists(select 1 from t1 where not exists(select 1 from t1 where (a not between b and -t1.e)) and t1.f>=(t1.a)) and t1.f<(t1.b)) then 19 else e end) when a then t1.e else 11 end,(a),19)} +} {} +do_test randexpr-2.1985 { + db eval {SELECT case when case when case case when (case when (( -ft1.c or exists(select 1 from t1 where not exists(select 1 from t1 where (a not between b and -t1.e)) and t1.f>=(t1.a)) and t1.f<(t1.b)) then 19 else e end) when a then t1.e else 11 end,(a),19))} +} {-11300} +do_test randexpr-2.1986 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (select ((count(*)*min(11)*(abs(max(a)))-max(b)*+count(*))) from t1 union select case abs(abs(min(t1.b))*case count(distinct b) when count(distinct t1.a) then count(distinct t1.f-t1.f) else max(11) end) when cast(avg(e) AS integer) then count(*) else count(*) end from t1)),d)*11+t1.c+b | 13+19 FROM t1 WHERE t1.c in (select 19 from t1 union select ~t1.f-t1.f+17- -17 from t1)} +} {} +do_test randexpr-2.1987 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (select ((count(*)*min(11)*(abs(max(a)))-max(b)*+count(*))) from t1 union select case abs(abs(min(t1.b))*case count(distinct b) when count(distinct t1.a) then count(distinct t1.f-t1.f) else max(11) end) when cast(avg(e) AS integer) then count(*) else count(*) end from t1)),d)*11+t1.c+b | 13+19 FROM t1 WHERE NOT (t1.c in (select 19 from t1 union select ~t1.f-t1.f+17- -17 from t1))} +} {4900} +do_test randexpr-2.1988 { + db eval {SELECT coalesce((select 11 from t1 where 19 in (select ((count(*)*min(11)*(abs(max(a)))-max(b)*+count(*))) from t1 union select case abs(abs(min(t1.b))*case count(distinct b) when count(distinct t1.a) then count(distinct t1.f-t1.f) else max(11) end) when cast(avg(e) AS integer) then count(*) else count(*) end from t1)),d)*11+t1.c+b & 13+19 FROM t1 WHERE NOT (t1.c in (select 19 from t1 union select ~t1.f-t1.f+17- -17 from t1))} +} {32} +do_test randexpr-2.1989 { + db eval {SELECT case f when (select ++abs(max(d))+case case count(*) when min(t1.c) then (count(*)) else min(t1.e) end when -count(distinct t1.e) then count(distinct a) else min(t1.b) end from t1) then coalesce((select case when e in (select d from t1 union select f from t1) and t1.a not between b and 13 and d in (select 17 from t1 union select b from t1) then 17 when 11=f then 19 else d end from t1 where 11 not in (t1.c,t1.c,t1.d)),a) else e end-t1.e-t1.d-17*t1.f FROM t1 WHERE case when coalesce((select max(c) from t1 where 19 not between 11 and t1.e and t1.b in (select count(*) from t1 union select ~min(case c when (abs(case when d=( -t1.e) or t1.b<>t1.f then b else t1.b end)/abs(t1.c))+19 then t1.f else d end) from t1) and -b in (select min(17) from t1 union select min( -c) | count(distinct 17)- -(count(*))+max(t1.c) | -cast(avg(t1.c) AS integer) from t1)),t1.c)*t1.f not between -t1.e and t1.f then 11 else f end>f} +} {} +do_test randexpr-2.1990 { + db eval {SELECT case f when (select ++abs(max(d))+case case count(*) when min(t1.c) then (count(*)) else min(t1.e) end when -count(distinct t1.e) then count(distinct a) else min(t1.b) end from t1) then coalesce((select case when e in (select d from t1 union select f from t1) and t1.a not between b and 13 and d in (select 17 from t1 union select b from t1) then 17 when 11=f then 19 else d end from t1 where 11 not in (t1.c,t1.c,t1.d)),a) else e end-t1.e-t1.d-17*t1.f FROM t1 WHERE NOT (case when coalesce((select max(c) from t1 where 19 not between 11 and t1.e and t1.b in (select count(*) from t1 union select ~min(case c when (abs(case when d=( -t1.e) or t1.b<>t1.f then b else t1.b end)/abs(t1.c))+19 then t1.f else d end) from t1) and -b in (select min(17) from t1 union select min( -c) | count(distinct 17)- -(count(*))+max(t1.c) | -cast(avg(t1.c) AS integer) from t1)),t1.c)*t1.f not between -t1.e and t1.f then 11 else f end>f)} +} {-10700} +do_test randexpr-2.1991 { + db eval {SELECT case when ((coalesce((select t1.a from t1 where exists(select 1 from t1 where t1.a<=t1.b-e*coalesce((select max(a) from t1 where t1.e in (select min(13+13) from t1 union select -(count(distinct t1.f)-count(*)) from t1)),17)-f-t1.f) or t1.d in (select t1.a from t1 union select b from t1)),t1.d*t1.f)<>a)) then t1.d when not exists(select 1 from t1 where ((t1.f in (c,19,f)))) then t1.f else 17 end FROM t1 WHERE t1.c<=a} +} {} +do_test randexpr-2.1992 { + db eval {SELECT case when ((coalesce((select t1.a from t1 where exists(select 1 from t1 where t1.a<=t1.b-e*coalesce((select max(a) from t1 where t1.e in (select min(13+13) from t1 union select -(count(distinct t1.f)-count(*)) from t1)),17)-f-t1.f) or t1.d in (select t1.a from t1 union select b from t1)),t1.d*t1.f)<>a)) then t1.d when not exists(select 1 from t1 where ((t1.f in (c,19,f)))) then t1.f else 17 end FROM t1 WHERE NOT (t1.c<=a)} +} {400} +do_test randexpr-2.1993 { + db eval {SELECT case 17 when t1.f then 11 else (case when (c between case when ~t1.e+13-t1.c+case coalesce((select t1.c from t1 where t1.a not between a and 17),coalesce((select max(case when exists(select 1 from t1 where a>=t1.a) then ((t1.e)) when t1.e>=11 then 17 else t1.b end) from t1 where ( -11d),t1.c) | b- -17-(17)*c) from t1 where (19<13)),t1.c) not between a and t1.a} +} {200} +do_test randexpr-2.1994 { + db eval {SELECT case 17 when t1.f then 11 else (case when (c between case when ~t1.e+13-t1.c+case coalesce((select t1.c from t1 where t1.a not between a and 17),coalesce((select max(case when exists(select 1 from t1 where a>=t1.a) then ((t1.e)) when t1.e>=11 then 17 else t1.b end) from t1 where ( -11d),t1.c) | b- -17-(17)*c) from t1 where (19<13)),t1.c) not between a and t1.a)} +} {} +do_test randexpr-2.1995 { + db eval {SELECT case 17 when t1.f then 11 else (case when (c between case when ~t1.e+13-t1.c+case coalesce((select t1.c from t1 where t1.a not between a and 17),coalesce((select max(case when exists(select 1 from t1 where a>=t1.a) then ((t1.e)) when t1.e>=11 then 17 else t1.b end) from t1 where ( -11d),t1.c) | b- -17-(17)*c) from t1 where (19<13)),t1.c) not between a and t1.a} +} {200} +do_test randexpr-2.1996 { + db eval {SELECT 17+(select case ~max((abs(t1.c+coalesce((select max(t1.f) from t1 where t1.f in (select 13-c-case when not a>=t1.c then f else t1.c end+t1.c from t1 union select 19 from t1) or exists(select 1 from t1 where 1317 | e*d-17*t1.e*case a when case when (not f>=19 and t1.f<=t1.b) then coalesce((select case 11 when -c then t1.f else (t1.f) end from t1 where 13 between t1.e and t1.f),(e)) when -t1.f not between b and d then t1.d else t1.a end then 11 else 19 end then d when ad)} +} {1017} +do_test randexpr-2.1997 { + db eval {SELECT 17+(select case ~max((abs(t1.c+coalesce((select max(t1.f) from t1 where t1.f in (select 13-c-case when not a>=t1.c then f else t1.c end+t1.c from t1 union select 19 from t1) or exists(select 1 from t1 where 1317 | e*d-17*t1.e*case a when case when (not f>=19 and t1.f<=t1.b) then coalesce((select case 11 when -c then t1.f else (t1.f) end from t1 where 13 between t1.e and t1.f),(e)) when -t1.f not between b and d then t1.d else t1.a end then 11 else 19 end then d when ad))} +} {} +do_test randexpr-2.1998 { + db eval {SELECT 17+(select case ~max((abs(t1.c+coalesce((select max(t1.f) from t1 where t1.f in (select 13-c-case when not a>=t1.c then f else t1.c end+t1.c from t1 union select 19 from t1) or exists(select 1 from t1 where 1317 | e*d-17*t1.e*case a when case when (not f>=19 and t1.f<=t1.b) then coalesce((select case 11 when -c then t1.f else (t1.f) end from t1 where 13 between t1.e and t1.f),(e)) when -t1.f not between b and d then t1.d else t1.a end then 11 else 19 end then d when ad)} +} {0} +do_test randexpr-2.1999 { + db eval {SELECT ~c-t1.a*case -case 19 when d then t1.f else t1.a end-d when 11 then coalesce((select max(case when (case when exists(select 1 from t1 where 11*11>13) then t1.c else 11 end)<=(select min(~d+case 19 when e then 13 else e end) from t1) then c when ((f))<=17 then d else t1.c end+t1.f) from t1 where (t1.b between -c and t1.e)),t1.e) else 17 end FROM t1 WHERE +(abs(d-(select cast(avg((select (abs(max(t1.d*+d*( -e*(13)) | 17)+ -min(t1.d)+~ - -max(17) | max(t1.a) | -count(distinct t1.a)-count(distinct 13)*(count(distinct t1.a)) | (count(distinct 17)))) from t1)) AS integer) from t1)-11)/abs(c+d-t1.e))>=t1.d} +} {} +do_test randexpr-2.2000 { + db eval {SELECT ~c-t1.a*case -case 19 when d then t1.f else t1.a end-d when 11 then coalesce((select max(case when (case when exists(select 1 from t1 where 11*11>13) then t1.c else 11 end)<=(select min(~d+case 19 when e then 13 else e end) from t1) then c when ((f))<=17 then d else t1.c end+t1.f) from t1 where (t1.b between -c and t1.e)),t1.e) else 17 end FROM t1 WHERE NOT (+(abs(d-(select cast(avg((select (abs(max(t1.d*+d*( -e*(13)) | 17)+ -min(t1.d)+~ - -max(17) | max(t1.a) | -count(distinct t1.a)-count(distinct 13)*(count(distinct t1.a)) | (count(distinct 17)))) from t1)) AS integer) from t1)-11)/abs(c+d-t1.e))>=t1.d)} +} {-2001} +do_test randexpr-2.2001 { + db eval {SELECT coalesce((select 11 from t1 where a not in (17,e,(select -abs(case ( -count(distinct t1.e)) when count(distinct 13) | max((abs(case 11 when -(abs(19)/abs(17 | t1.c*t1.e))*t1.c | t1.b then a else +d end)/abs(b*t1.a))) then max(c*13) else cast(avg(t1.f) AS integer) end)-max(11) from t1)*d)),t1.d) FROM t1 WHERE 19-c*coalesce((select max(case (select count(distinct t1.f) from t1) | t1.a when case a when (abs(case when not c+t1.d>=e then 17*t1.e+a when not exists(select 1 from t1 where 11>b) then c else a end)/abs(t1.e))+t1.f*13 then 11 else t1.f end then 19 else d end-b) from t1 where (d) not between f and e),t1.d)= -c} +} {} +do_test randexpr-2.2002 { + db eval {SELECT coalesce((select 11 from t1 where a not in (17,e,(select -abs(case ( -count(distinct t1.e)) when count(distinct 13) | max((abs(case 11 when -(abs(19)/abs(17 | t1.c*t1.e))*t1.c | t1.b then a else +d end)/abs(b*t1.a))) then max(c*13) else cast(avg(t1.f) AS integer) end)-max(11) from t1)*d)),t1.d) FROM t1 WHERE NOT (19-c*coalesce((select max(case (select count(distinct t1.f) from t1) | t1.a when case a when (abs(case when not c+t1.d>=e then 17*t1.e+a when not exists(select 1 from t1 where 11>b) then c else a end)/abs(t1.e))+t1.f*13 then 11 else t1.f end then 19 else d end-b) from t1 where (d) not between f and e),t1.d)= -c)} +} {11} +do_test randexpr-2.2003 { + db eval {SELECT coalesce((select 11 from t1 where a not in (17,e,(select -abs(case ( -count(distinct t1.e)) when count(distinct 13) & max((abs(case 11 when -(abs(19)/abs(17 & t1.c*t1.e))*t1.c & t1.b then a else +d end)/abs(b*t1.a))) then max(c*13) else cast(avg(t1.f) AS integer) end)-max(11) from t1)*d)),t1.d) FROM t1 WHERE NOT (19-c*coalesce((select max(case (select count(distinct t1.f) from t1) | t1.a when case a when (abs(case when not c+t1.d>=e then 17*t1.e+a when not exists(select 1 from t1 where 11>b) then c else a end)/abs(t1.e))+t1.f*13 then 11 else t1.f end then 19 else d end-b) from t1 where (d) not between f and e),t1.d)= -c)} +} {11} +do_test randexpr-2.2004 { + db eval {SELECT 11+t1.b*~c*17-13-a+t1.d | t1.f+t1.b*coalesce((select 17 from t1 where t1.e in (select (select cast(avg(case case e when (t1.d) then 17 else (abs((abs(case when d19 then 19 else a end)/abs(f)))/abs((t1.e))) end when b then (11) else f end+17) AS integer) from t1) from t1 union select t1.c from t1)),(17)) FROM t1 WHERE e=11} +} {} +do_test randexpr-2.2005 { + db eval {SELECT 11+t1.b*~c*17-13-a+t1.d | t1.f+t1.b*coalesce((select 17 from t1 where t1.e in (select (select cast(avg(case case e when (t1.d) then 17 else (abs((abs(case when d19 then 19 else a end)/abs(f)))/abs((t1.e))) end when b then (11) else f end+17) AS integer) from t1) from t1 union select t1.c from t1)),(17)) FROM t1 WHERE NOT (e=11)} +} {-1019998} +do_test randexpr-2.2006 { + db eval {SELECT 11+t1.b*~c*17-13-a+t1.d & t1.f+t1.b*coalesce((select 17 from t1 where t1.e in (select (select cast(avg(case case e when (t1.d) then 17 else (abs((abs(case when d19 then 19 else a end)/abs(f)))/abs((t1.e))) end when b then (11) else f end+17) AS integer) from t1) from t1 union select t1.c from t1)),(17)) FROM t1 WHERE NOT (e=11)} +} {896} +do_test randexpr-2.2007 { + db eval {SELECT e*~~(coalesce((select +case when not exists(select 1 from t1 where 13*coalesce((select max(coalesce((select max(11) from t1 where not exists(select 1 from t1 where (select -count(distinct 11) | min(11) from t1)>case b when b then c else d end)),13)) from t1 where -c<>d),13)<>t1.f and 13 between 13 and 19) then b-t1.e+e else (t1.f) end from t1 where ((not not e>t1.c and c not between a and e))),t1.d)) FROM t1 WHERE exists(select 1 from t1 where case when case when e -b)} +} {200000} +do_test randexpr-2.2008 { + db eval {SELECT e*~~(coalesce((select +case when not exists(select 1 from t1 where 13*coalesce((select max(coalesce((select max(11) from t1 where not exists(select 1 from t1 where (select -count(distinct 11) | min(11) from t1)>case b when b then c else d end)),13)) from t1 where -c<>d),13)<>t1.f and 13 between 13 and 19) then b-t1.e+e else (t1.f) end from t1 where ((not not e>t1.c and c not between a and e))),t1.d)) FROM t1 WHERE NOT (exists(select 1 from t1 where case when case when e -b))} +} {} +do_test randexpr-2.2009 { + db eval {SELECT e*~~(coalesce((select +case when not exists(select 1 from t1 where 13*coalesce((select max(coalesce((select max(11) from t1 where not exists(select 1 from t1 where (select -count(distinct 11) & min(11) from t1)>case b when b then c else d end)),13)) from t1 where -c<>d),13)<>t1.f and 13 between 13 and 19) then b-t1.e+e else (t1.f) end from t1 where ((not not e>t1.c and c not between a and e))),t1.d)) FROM t1 WHERE exists(select 1 from t1 where case when case when e -b)} +} {200000} +do_test randexpr-2.2010 { + db eval {SELECT (case t1.b-coalesce((select max(t1.f) from t1 where +t1.c=e or coalesce((select max(t1.a) from t1 where exists(select 1 from t1 where ((13-(select +cast(avg( -t1.b) AS integer) from t1) in (case f when (19) then t1.f else t1.f end,11,t1.e))))),(t1.a)+t1.e) not in (t1.f,t1.a,b) and t1.e not in (11,t1.b,t1.b)),d) when 19 then t1.e else c end-t1.e*t1.f) FROM t1 WHERE t1.ecoalesce((select max(f) from t1 where 19>=b),19) then f else t1.e end-f)/abs(t1.a))-11 else t1.e end when exists(select 1 from t1 where not exists(select 1 from t1 where ((c)) not in (b,a,e))) then t1.c else ((11)) end FROM t1 WHERE (not 13 not in (17,a,e))} +} {} +do_test randexpr-2.2013 { + db eval {SELECT t1.a+e-case when 19<+17 then case when 19 not between t1.e and t1.e then (abs(case when e+17*(select count(distinct (abs(+17)/abs(t1.c))) from t1)<>coalesce((select max(f) from t1 where 19>=b),19) then f else t1.e end-f)/abs(t1.a))-11 else t1.e end when exists(select 1 from t1 where not exists(select 1 from t1 where ((c)) not in (b,a,e))) then t1.c else ((11)) end FROM t1 WHERE NOT ((not 13 not in (17,a,e)))} +} {589} +do_test randexpr-2.2014 { + db eval {SELECT case when not exists(select 1 from t1 where case when -~(19)=t1.c then t1.f*~case when t1.f<=(select max(t1.c) from t1) then e when (c<>13) then t1.a else (b) end when t1.b between 19 and 17 then 17 else e end-e+t1.a>e or -19 in (select max(13) from t1 union select min( -19) from t1)) then t1.b when -11>=a then ~b else 19 end FROM t1 WHERE (c not between coalesce((select max(c) from t1 where (not t1.c<>t1.a)), -(abs(case when t1.e<=t1.c then t1.d when exists(select 1 from t1 where 17 in (select case when coalesce((select c from t1 where 17-t1.a-coalesce((select 17 from t1 where t1.f in (t1.b,19,t1.c)),17) not between e and 13),t1.b) not in (e,13,t1.a) then t1.c else b end | f from t1 union select t1.f from t1)) then t1.f else 11 end)/abs(t1.a))) and t1.e)} +} {} +do_test randexpr-2.2015 { + db eval {SELECT case when not exists(select 1 from t1 where case when -~(19)=t1.c then t1.f*~case when t1.f<=(select max(t1.c) from t1) then e when (c<>13) then t1.a else (b) end when t1.b between 19 and 17 then 17 else e end-e+t1.a>e or -19 in (select max(13) from t1 union select min( -19) from t1)) then t1.b when -11>=a then ~b else 19 end FROM t1 WHERE NOT ((c not between coalesce((select max(c) from t1 where (not t1.c<>t1.a)), -(abs(case when t1.e<=t1.c then t1.d when exists(select 1 from t1 where 17 in (select case when coalesce((select c from t1 where 17-t1.a-coalesce((select 17 from t1 where t1.f in (t1.b,19,t1.c)),17) not between e and 13),t1.b) not in (e,13,t1.a) then t1.c else b end | f from t1 union select t1.f from t1)) then t1.f else 11 end)/abs(t1.a))) and t1.e))} +} {19} +do_test randexpr-2.2016 { + db eval {SELECT case t1.c | t1.b when t1.f-c then case when c=11) or t1.c<=t1.e) then + - -b+11 else 13 end from t1 union select (t1.c) from t1) then 17 else 11 end else t1.a end FROM t1 WHERE t1.f | 13+coalesce((select max(11) from t1 where e>=(select ~case cast(avg(19*f+(c)) AS integer) when (min(c))-min(t1.a) then cast(avg(13) AS integer) else count(distinct e) end from t1)),t1.f)>=coalesce((select c from t1 where (exists(select 1 from t1 where t1.f>=f and (t1.d)<>t1.a))),c) or not exists(select 1 from t1 where not 11>=t1.d) or (b between c and 11 and 11< -t1.b and t1.e>=t1.b) or t1.c>e} +} {100} +do_test randexpr-2.2017 { + db eval {SELECT case t1.c | t1.b when t1.f-c then case when c=11) or t1.c<=t1.e) then + - -b+11 else 13 end from t1 union select (t1.c) from t1) then 17 else 11 end else t1.a end FROM t1 WHERE NOT (t1.f | 13+coalesce((select max(11) from t1 where e>=(select ~case cast(avg(19*f+(c)) AS integer) when (min(c))-min(t1.a) then cast(avg(13) AS integer) else count(distinct e) end from t1)),t1.f)>=coalesce((select c from t1 where (exists(select 1 from t1 where t1.f>=f and (t1.d)<>t1.a))),c) or not exists(select 1 from t1 where not 11>=t1.d) or (b between c and 11 and 11< -t1.b and t1.e>=t1.b) or t1.c>e)} +} {} +do_test randexpr-2.2018 { + db eval {SELECT case t1.c & t1.b when t1.f-c then case when c=11) or t1.c<=t1.e) then + - -b+11 else 13 end from t1 union select (t1.c) from t1) then 17 else 11 end else t1.a end FROM t1 WHERE t1.f | 13+coalesce((select max(11) from t1 where e>=(select ~case cast(avg(19*f+(c)) AS integer) when (min(c))-min(t1.a) then cast(avg(13) AS integer) else count(distinct e) end from t1)),t1.f)>=coalesce((select c from t1 where (exists(select 1 from t1 where t1.f>=f and (t1.d)<>t1.a))),c) or not exists(select 1 from t1 where not 11>=t1.d) or (b between c and 11 and 11< -t1.b and t1.e>=t1.b) or t1.c>e} +} {100} +do_test randexpr-2.2019 { + db eval {SELECT coalesce((select 19 from t1 where a<=case (abs(e)/abs(case b+f when b then t1.e else e end)) when + -t1.b+coalesce((select max(t1.f) from t1 where (19<=+e) and (c*b)>= -t1.e and (c>=c)),11+(e)+a)-t1.b then 17 else 17 end-t1.e),a) FROM t1 WHERE +d*t1.f+c+t1.c*13<>t1.e} +} {100} +do_test randexpr-2.2020 { + db eval {SELECT coalesce((select 19 from t1 where a<=case (abs(e)/abs(case b+f when b then t1.e else e end)) when + -t1.b+coalesce((select max(t1.f) from t1 where (19<=+e) and (c*b)>= -t1.e and (c>=c)),11+(e)+a)-t1.b then 17 else 17 end-t1.e),a) FROM t1 WHERE NOT (+d*t1.f+c+t1.c*13<>t1.e)} +} {} +do_test randexpr-2.2021 { + db eval {SELECT (abs(t1.e)/abs(coalesce((select +t1.a*c from t1 where (abs(e)/abs(d)) in (select +~count(*)-min(t1.d) from t1 union select abs(abs(cast(avg(t1.b) AS integer))+cast(avg(b) AS integer)) from t1)),d))) FROM t1 WHERE not b not between (select max(13) from t1) and 13} +} {} +do_test randexpr-2.2022 { + db eval {SELECT (abs(t1.e)/abs(coalesce((select +t1.a*c from t1 where (abs(e)/abs(d)) in (select +~count(*)-min(t1.d) from t1 union select abs(abs(cast(avg(t1.b) AS integer))+cast(avg(b) AS integer)) from t1)),d))) FROM t1 WHERE NOT (not b not between (select max(13) from t1) and 13)} +} {1} +do_test randexpr-2.2023 { + db eval {SELECT +(select +~(count(distinct (abs((abs(coalesce((select max(t1.c) from t1 where -e in (select t1.e from t1 union select (e) from t1)),c)-11)/abs(19))*t1.c)/abs(19))))*count(distinct t1.a)*+count(distinct a)*count(distinct t1.d) | count(*)-max(11)+cast(avg(t1.b) AS integer)*min(t1.a) from t1)+19+t1.c*19+~coalesce((select e from t1 where 13 in (select count(distinct 17) from t1 union select count(distinct 11) from t1) or t1.f<>c),17) FROM t1 WHERE exists(select 1 from t1 where coalesce((select t1.f from t1 where t1.a in (case when exists(select 1 from t1 where b*t1.e in (select (t1.a) from t1 union select (case -13 when f then t1.b else t1.a end) from t1)) then t1.d else 17 end,d,11)),c) not in (17,19,t1.b)) or (b) in (select abs(~~abs(count(*)) | - -min(c)-min(11)+count(*) | max(a) | count(*)-count(distinct 11)) from t1 union select max(t1.f) from t1) or t1.b>=19} +} {5216} +do_test randexpr-2.2024 { + db eval {SELECT +(select +~(count(distinct (abs((abs(coalesce((select max(t1.c) from t1 where -e in (select t1.e from t1 union select (e) from t1)),c)-11)/abs(19))*t1.c)/abs(19))))*count(distinct t1.a)*+count(distinct a)*count(distinct t1.d) | count(*)-max(11)+cast(avg(t1.b) AS integer)*min(t1.a) from t1)+19+t1.c*19+~coalesce((select e from t1 where 13 in (select count(distinct 17) from t1 union select count(distinct 11) from t1) or t1.f<>c),17) FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select t1.f from t1 where t1.a in (case when exists(select 1 from t1 where b*t1.e in (select (t1.a) from t1 union select (case -13 when f then t1.b else t1.a end) from t1)) then t1.d else 17 end,d,11)),c) not in (17,19,t1.b)) or (b) in (select abs(~~abs(count(*)) | - -min(c)-min(11)+count(*) | max(a) | count(*)-count(distinct 11)) from t1 union select max(t1.f) from t1) or t1.b>=19)} +} {} +do_test randexpr-2.2025 { + db eval {SELECT +(select +~(count(distinct (abs((abs(coalesce((select max(t1.c) from t1 where -e in (select t1.e from t1 union select (e) from t1)),c)-11)/abs(19))*t1.c)/abs(19))))*count(distinct t1.a)*+count(distinct a)*count(distinct t1.d) & count(*)-max(11)+cast(avg(t1.b) AS integer)*min(t1.a) from t1)+19+t1.c*19+~coalesce((select e from t1 where 13 in (select count(distinct 17) from t1 union select count(distinct 11) from t1) or t1.f<>c),17) FROM t1 WHERE exists(select 1 from t1 where coalesce((select t1.f from t1 where t1.a in (case when exists(select 1 from t1 where b*t1.e in (select (t1.a) from t1 union select (case -13 when f then t1.b else t1.a end) from t1)) then t1.d else 17 end,d,11)),c) not in (17,19,t1.b)) or (b) in (select abs(~~abs(count(*)) | - -min(c)-min(11)+count(*) | max(a) | count(*)-count(distinct 11)) from t1 union select max(t1.f) from t1) or t1.b>=19} +} {25208} +do_test randexpr-2.2026 { + db eval {SELECT (abs(case t1.c when ~coalesce((select max(case coalesce((select max(b) from t1 where not aa),e) then 17 else a end)/abs(19)) FROM t1 WHERE d not in (d,d+case coalesce((select t1.e from t1 where (case when 17*case case f-+t1.c+coalesce((select t1.c from t1 where t1.df)),d) when t1.e then (t1.c) else -e end,t1.f)} +} {} +do_test randexpr-2.2027 { + db eval {SELECT (abs(case t1.c when ~coalesce((select max(case coalesce((select max(b) from t1 where not aa),e) then 17 else a end)/abs(19)) FROM t1 WHERE NOT (d not in (d,d+case coalesce((select t1.e from t1 where (case when 17*case case f-+t1.c+coalesce((select t1.c from t1 where t1.df)),d) when t1.e then (t1.c) else -e end,t1.f))} +} {5} +do_test randexpr-2.2028 { + db eval {SELECT case 13 when 13 then case when 11 between case case when not 17 not between f-t1.e and (abs(19)/abs(((t1.b))))*19 then t1.a when exists(select 1 from t1 where t1.e not between d and 13) then t1.c else -e end when t1.e then t1.c else e end and 17 then 19 when not exists(select 1 from t1 where 11=f) and exists(select 1 from t1 where d<>t1.a) or t1.f>=17 then a else t1.e end else 19 end*11 FROM t1 WHERE case 17 when t1.f then 11 else c end+d=t1.f} +} {} +do_test randexpr-2.2029 { + db eval {SELECT case 13 when 13 then case when 11 between case case when not 17 not between f-t1.e and (abs(19)/abs(((t1.b))))*19 then t1.a when exists(select 1 from t1 where t1.e not between d and 13) then t1.c else -e end when t1.e then t1.c else e end and 17 then 19 when not exists(select 1 from t1 where 11=f) and exists(select 1 from t1 where d<>t1.a) or t1.f>=17 then a else t1.e end else 19 end*11 FROM t1 WHERE NOT (case 17 when t1.f then 11 else c end+d=t1.f)} +} {1100} +do_test randexpr-2.2030 { + db eval {SELECT coalesce((select max(b-case when exists(select 1 from t1 where case when t1.c>t1.b then t1.c*19 else coalesce((select max((abs(t1.a)/abs(~(select +max(t1.c)*max(a)*cast(avg(t1.d) AS integer) from t1)+case when e=t1.c or t1.c between e and b then +a else 13 end))) from t1 where 17=b),t1.a) end not between f and t1.c) then c else e end+ - -d-17) from t1 where t1.e=(select cast(avg(d) AS integer) from t1)} +} {} +do_test randexpr-2.2031 { + db eval {SELECT coalesce((select max(b-case when exists(select 1 from t1 where case when t1.c>t1.b then t1.c*19 else coalesce((select max((abs(t1.a)/abs(~(select +max(t1.c)*max(a)*cast(avg(t1.d) AS integer) from t1)+case when e=t1.c or t1.c between e and b then +a else 13 end))) from t1 where 17=b),t1.a) end not between f and t1.c) then c else e end+ - -d-17) from t1 where t1.e=(select cast(avg(d) AS integer) from t1))} +} {17} +do_test randexpr-2.2032 { + db eval {SELECT case when -13 in (select +case when case when exists(select 1 from t1 where ~coalesce((select t1.f from t1 where exists(select 1 from t1 where t1.a not between t1.b and -t1.e)),t1.c)+c | b not between f and t1.b) then -11-19 else t1.d end in (select ~abs(max(19))+cast(avg(e) AS integer)-max(13)-count(*) from t1 union select min(f) from t1) then t1.f else b end+t1.e from t1 union select t1.f from t1) then b when e(select count(distinct d) from t1)) then t1.a when +(select cast(avg(e-19*~case 19 when t1.f then (t1.d) else t1.e end*t1.d*c) AS integer) from t1) in (e,t1.c,t1.e) then 13 else a end from t1 where d not between t1.b and f),a)) from t1 union select min((t1.b)) from t1) or (t1.e<=13)} +} {} +do_test randexpr-2.2033 { + db eval {SELECT case when -13 in (select +case when case when exists(select 1 from t1 where ~coalesce((select t1.f from t1 where exists(select 1 from t1 where t1.a not between t1.b and -t1.e)),t1.c)+c | b not between f and t1.b) then -11-19 else t1.d end in (select ~abs(max(19))+cast(avg(e) AS integer)-max(13)-count(*) from t1 union select min(f) from t1) then t1.f else b end+t1.e from t1 union select t1.f from t1) then b when e(select count(distinct d) from t1)) then t1.a when +(select cast(avg(e-19*~case 19 when t1.f then (t1.d) else t1.e end*t1.d*c) AS integer) from t1) in (e,t1.c,t1.e) then 13 else a end from t1 where d not between t1.b and f),a)) from t1 union select min((t1.b)) from t1) or (t1.e<=13))} +} {411} +do_test randexpr-2.2034 { + db eval {SELECT case when -13 in (select +case when case when exists(select 1 from t1 where ~coalesce((select t1.f from t1 where exists(select 1 from t1 where t1.a not between t1.b and -t1.e)),t1.c)+c & b not between f and t1.b) then -11-19 else t1.d end in (select ~abs(max(19))+cast(avg(e) AS integer)-max(13)-count(*) from t1 union select min(f) from t1) then t1.f else b end+t1.e from t1 union select t1.f from t1) then b when e(select count(distinct d) from t1)) then t1.a when +(select cast(avg(e-19*~case 19 when t1.f then (t1.d) else t1.e end*t1.d*c) AS integer) from t1) in (e,t1.c,t1.e) then 13 else a end from t1 where d not between t1.b and f),a)) from t1 union select min((t1.b)) from t1) or (t1.e<=13))} +} {411} +do_test randexpr-2.2035 { + db eval {SELECT (+coalesce((select max(c) from t1 where case when ( -t1.f between t1.a | -a*(abs(11 | e+case when not exists(select 1 from t1 where t1.b<>f or a=t1.a) then 17*(a) else 17 end*c)/abs(c))-d+f* -e-t1.a and b) then -11-a else t1.a end*19+b | t1.a<=t1.e),t1.b)) FROM t1 WHERE t1.e between -case when e not in (t1.e,case when d+t1.c-b-t1.b+case when -ef or a=t1.a) then 17*(a) else 17 end*c)/abs(c))-d+f* -e-t1.a and b) then -11-a else t1.a end*19+b | t1.a<=t1.e),t1.b)) FROM t1 WHERE NOT (t1.e between -case when e not in (t1.e,case when d+t1.c-b-t1.b+case when -ef or a=t1.a) then 17*(a) else 17 end*c)/abs(c))-d+f* -e-t1.a and b) then -11-a else t1.a end*19+b & t1.a<=t1.e),t1.b)) FROM t1 WHERE NOT (t1.e between -case when e not in (t1.e,case when d+t1.c-b-t1.b+case when -ee then t1.d else a end+f)-case (+min(t1.f)*min(e)+cast(avg(c) AS integer))*count(*) when - -((count(distinct f))) then max(a) else count(*) end | -cast(avg(11) AS integer)*count(*))-count(*)*( -max(f)) from t1)} +} {} +do_test randexpr-2.2039 { + db eval {SELECT (select +~~ -(count(*))+max((select ~count(*)+count(distinct d)*~cast(avg(19) AS integer)+case count(*)- -abs(abs(~abs(++abs(cast(avg(t1.c) AS integer)))+count(distinct e)+count(*)))+(min(19) | ( -cast(avg(b) AS integer) | - -(count(distinct t1.f)))) when -count(*) then max( -b) else max((t1.d)) end from t1)) from t1) FROM t1 WHERE NOT ((abs(c)/abs((select (abs(count(*))) from t1))) not between t1.b and (select abs(count(*)* -~min(~e+case when t1.a+(a)>e then t1.d else a end+f)-case (+min(t1.f)*min(e)+cast(avg(c) AS integer))*count(*) when - -((count(distinct f))) then max(a) else count(*) end | -cast(avg(11) AS integer)*count(*))-count(*)*( -max(f)) from t1))} +} {377} +do_test randexpr-2.2040 { + db eval {SELECT (select +~~ -(count(*))+max((select ~count(*)+count(distinct d)*~cast(avg(19) AS integer)+case count(*)- -abs(abs(~abs(++abs(cast(avg(t1.c) AS integer)))+count(distinct e)+count(*)))+(min(19) & ( -cast(avg(b) AS integer) & - -(count(distinct t1.f)))) when -count(*) then max( -b) else max((t1.d)) end from t1)) from t1) FROM t1 WHERE NOT ((abs(c)/abs((select (abs(count(*))) from t1))) not between t1.b and (select abs(count(*)* -~min(~e+case when t1.a+(a)>e then t1.d else a end+f)-case (+min(t1.f)*min(e)+cast(avg(c) AS integer))*count(*) when - -((count(distinct f))) then max(a) else count(*) end | -cast(avg(11) AS integer)*count(*))-count(*)*( -max(f)) from t1))} +} {377} +do_test randexpr-2.2041 { + db eval {SELECT case when ((select abs((count(*)))+min(case 17 when b then 13 | 11+c | f else 19 end) | +count(*)- - -max(t1.d) from t1) not between 19-case when t1.c not between ~e and (e) then t1.d else t1.c end and t1.e) then t1.d when t1.d in (select -~count(distinct t1.f)*(min(t1.b)) from t1 union select (cast(avg(17) AS integer)) from t1) then d else t1.b end FROM t1 WHERE not exists(select 1 from t1 where d in (coalesce((select max((select abs(~min(f))++~count(distinct t1.a)+cast(avg(t1.b) AS integer)*cast(avg(13) AS integer) from t1)) from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where (exists(select 1 from t1 where 11=13 then ~19*13 | t1.c+t1.e when c between t1.d and a then t1.b else t1.d end)-13,17,11))} +} {400} +do_test randexpr-2.2042 { + db eval {SELECT case when ((select abs((count(*)))+min(case 17 when b then 13 | 11+c | f else 19 end) | +count(*)- - -max(t1.d) from t1) not between 19-case when t1.c not between ~e and (e) then t1.d else t1.c end and t1.e) then t1.d when t1.d in (select -~count(distinct t1.f)*(min(t1.b)) from t1 union select (cast(avg(17) AS integer)) from t1) then d else t1.b end FROM t1 WHERE NOT (not exists(select 1 from t1 where d in (coalesce((select max((select abs(~min(f))++~count(distinct t1.a)+cast(avg(t1.b) AS integer)*cast(avg(13) AS integer) from t1)) from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where (exists(select 1 from t1 where 11=13 then ~19*13 | t1.c+t1.e when c between t1.d and a then t1.b else t1.d end)-13,17,11)))} +} {} +do_test randexpr-2.2043 { + db eval {SELECT case when ((select abs((count(*)))+min(case 17 when b then 13 & 11+c & f else 19 end) & +count(*)- - -max(t1.d) from t1) not between 19-case when t1.c not between ~e and (e) then t1.d else t1.c end and t1.e) then t1.d when t1.d in (select -~count(distinct t1.f)*(min(t1.b)) from t1 union select (cast(avg(17) AS integer)) from t1) then d else t1.b end FROM t1 WHERE not exists(select 1 from t1 where d in (coalesce((select max((select abs(~min(f))++~count(distinct t1.a)+cast(avg(t1.b) AS integer)*cast(avg(13) AS integer) from t1)) from t1 where exists(select 1 from t1 where not not exists(select 1 from t1 where (exists(select 1 from t1 where 11=13 then ~19*13 | t1.c+t1.e when c between t1.d and a then t1.b else t1.d end)-13,17,11))} +} {400} +do_test randexpr-2.2044 { + db eval {SELECT case when not exists(select 1 from t1 where e in (select +(count(distinct (abs(case when ((abs(coalesce((select coalesce((select case when c>f and a in (19, -e,t1.c) then +t1.a else t1.e end from t1 where ct1.f),(select case abs(count(distinct t1.e)-cast(avg(t1.d) AS integer)*cast(avg(t1.b) AS integer)) when min(19) then count(*) else min(c) end+count(distinct t1.c) from t1))+t1.b,t1.d, -d)} +} {} +do_test randexpr-2.2045 { + db eval {SELECT case when not exists(select 1 from t1 where e in (select +(count(distinct (abs(case when ((abs(coalesce((select coalesce((select case when c>f and a in (19, -e,t1.c) then +t1.a else t1.e end from t1 where ct1.f),(select case abs(count(distinct t1.e)-cast(avg(t1.d) AS integer)*cast(avg(t1.b) AS integer)) when min(19) then count(*) else min(c) end+count(distinct t1.c) from t1))+t1.b,t1.d, -d))} +} {120000} +do_test randexpr-2.2046 { + db eval {SELECT -case when -b=t1.c then t1.a when (select min(t1.b) from t1)<=d+coalesce((select max(11+a) from t1 where (not exists(select 1 from t1 where c | -((case case when (t1.f between a and t1.b) then b else c end-t1.c when 19 then t1.c else f end+t1.c)+t1.b | a)-t1.c-(c)<=19))),t1.c) then e else -t1.d end-(17) FROM t1 WHERE (t1.b | case t1.b when ((t1.e)) then (abs(19)/abs(t1.a))-coalesce((select max(t1.e*case b when a then 11 else t1.a end) from t1 where +~case when 13=17 and t1.e+11 in (select t1.f from t1 union select t1.d from t1) then t1.b when (b<17 and a>= -17) then f else -t1.e end+(17)<=13),d) else b end | b between t1.d and t1.f)} +} {} +do_test randexpr-2.2047 { + db eval {SELECT -case when -b=t1.c then t1.a when (select min(t1.b) from t1)<=d+coalesce((select max(11+a) from t1 where (not exists(select 1 from t1 where c | -((case case when (t1.f between a and t1.b) then b else c end-t1.c when 19 then t1.c else f end+t1.c)+t1.b | a)-t1.c-(c)<=19))),t1.c) then e else -t1.d end-(17) FROM t1 WHERE NOT ((t1.b | case t1.b when ((t1.e)) then (abs(19)/abs(t1.a))-coalesce((select max(t1.e*case b when a then 11 else t1.a end) from t1 where +~case when 13=17 and t1.e+11 in (select t1.f from t1 union select t1.d from t1) then t1.b when (b<17 and a>= -17) then f else -t1.e end+(17)<=13),d) else b end | b between t1.d and t1.f))} +} {-517} +do_test randexpr-2.2048 { + db eval {SELECT -case when -b=t1.c then t1.a when (select min(t1.b) from t1)<=d+coalesce((select max(11+a) from t1 where (not exists(select 1 from t1 where c & -((case case when (t1.f between a and t1.b) then b else c end-t1.c when 19 then t1.c else f end+t1.c)+t1.b & a)-t1.c-(c)<=19))),t1.c) then e else -t1.d end-(17) FROM t1 WHERE NOT ((t1.b | case t1.b when ((t1.e)) then (abs(19)/abs(t1.a))-coalesce((select max(t1.e*case b when a then 11 else t1.a end) from t1 where +~case when 13=17 and t1.e+11 in (select t1.f from t1 union select t1.d from t1) then t1.b when (b<17 and a>= -17) then f else -t1.e end+(17)<=13),d) else b end | b between t1.d and t1.f))} +} {-517} +do_test randexpr-2.2049 { + db eval {SELECT case when not c=t1.f | t1.d),19),t1.a) then (t1.d) when -a<=( -t1.a) then a else f end*a,13,19)) or e=17),a)*17 FROM t1 WHERE (select case min(+19) when abs((max(17)* -min(a*(abs(case e when t1.b then t1.f else -t1.c end*f)/abs(t1.d))*e)-count(*) | max(19))) then - -case count(distinct 11) when min(11) then max(19) else min(13) end-( -count(distinct 13)) else -(count(distinct b)) end*cast(avg(17) AS integer)-max(e) from t1) between t1.d and coalesce((select max(13) from t1 where not 17>a-f-e),13)} +} {} +do_test randexpr-2.2052 { + db eval {SELECT +11+~coalesce((select -19 from t1 where exists(select 1 from t1 where f*t1.e not in ( -f*~t1.a-f*case when t1.f in ((e),coalesce((select b from t1 where b>=t1.f | t1.d),19),t1.a) then (t1.d) when -a<=( -t1.a) then a else f end*a,13,19)) or e=17),a)*17 FROM t1 WHERE NOT ((select case min(+19) when abs((max(17)* -min(a*(abs(case e when t1.b then t1.f else -t1.c end*f)/abs(t1.d))*e)-count(*) | max(19))) then - -case count(distinct 11) when min(11) then max(19) else min(13) end-( -count(distinct 13)) else -(count(distinct b)) end*cast(avg(17) AS integer)-max(e) from t1) between t1.d and coalesce((select max(13) from t1 where not 17>a-f-e),13))} +} {317} +do_test randexpr-2.2053 { + db eval {SELECT +11+~coalesce((select -19 from t1 where exists(select 1 from t1 where f*t1.e not in ( -f*~t1.a-f*case when t1.f in ((e),coalesce((select b from t1 where b>=t1.f & t1.d),19),t1.a) then (t1.d) when -a<=( -t1.a) then a else f end*a,13,19)) or e=17),a)*17 FROM t1 WHERE NOT ((select case min(+19) when abs((max(17)* -min(a*(abs(case e when t1.b then t1.f else -t1.c end*f)/abs(t1.d))*e)-count(*) | max(19))) then - -case count(distinct 11) when min(11) then max(19) else min(13) end-( -count(distinct 13)) else -(count(distinct b)) end*cast(avg(17) AS integer)-max(e) from t1) between t1.d and coalesce((select max(13) from t1 where not 17>a-f-e),13))} +} {317} +do_test randexpr-2.2054 { + db eval {SELECT coalesce((select 11 from t1 where (b-case (select count(distinct t1.f*e) from t1) when t1.f | 11 | 11*coalesce((select 19 from t1 where (~coalesce((select f from t1 where (t1.c>17)),a)+t1.c not between f and t1.e or 19>=c)),(t1.d)) then a else t1.f end>=f and exists(select 1 from t1 where 13<>c) or - -t1.f17)),a)+t1.c not between f and t1.e or 19>=c)),(t1.d)) then a else t1.f end>=f and exists(select 1 from t1 where 13<>c) or - -t1.f17)),a)+t1.c not between f and t1.e or 19>=c)),(t1.d)) then a else t1.f end>=f and exists(select 1 from t1 where 13<>c) or - -t1.fcoalesce((select max(coalesce((select t1.e from t1 where not (abs(a+t1.e*c | case when t1.b in (select -max(t1.b)*cast(avg(19) AS integer) from t1 union select -max(13) from t1) then t1.f else 19 end-f+(19)+19)/abs(t1.e)) in (d,13,e) or t1.e<=t1.e),t1.b-17)) from t1 where -11<>17 or e in (select max(19) from t1 union select (cast(avg(f) AS integer)) from t1)),a)),17))/abs(t1.e)) FROM t1 WHERE t1.e<>13 | (t1.f)+c-((13))-~13 | (abs(e)/abs(coalesce((select max(t1.c) from t1 where (d)<=t1.d),case when (exists(select 1 from t1 where not exists(select 1 from t1 where (abs(17)/abs(t1.d))>coalesce((select max(a) from t1 where t1.c not between t1.f and t1.f and f not in (t1.f, -t1.c,13)),t1.c)))) then c else 19 end))) | t1.a*11* -e*t1.c or not exists(select 1 from t1 where f>c)} +} {0} +do_test randexpr-2.2058 { + db eval {SELECT (abs(coalesce((select max(t1.e) from t1 where c>coalesce((select max(coalesce((select t1.e from t1 where not (abs(a+t1.e*c | case when t1.b in (select -max(t1.b)*cast(avg(19) AS integer) from t1 union select -max(13) from t1) then t1.f else 19 end-f+(19)+19)/abs(t1.e)) in (d,13,e) or t1.e<=t1.e),t1.b-17)) from t1 where -11<>17 or e in (select max(19) from t1 union select (cast(avg(f) AS integer)) from t1)),a)),17))/abs(t1.e)) FROM t1 WHERE NOT (t1.e<>13 | (t1.f)+c-((13))-~13 | (abs(e)/abs(coalesce((select max(t1.c) from t1 where (d)<=t1.d),case when (exists(select 1 from t1 where not exists(select 1 from t1 where (abs(17)/abs(t1.d))>coalesce((select max(a) from t1 where t1.c not between t1.f and t1.f and f not in (t1.f, -t1.c,13)),t1.c)))) then c else 19 end))) | t1.a*11* -e*t1.c or not exists(select 1 from t1 where f>c))} +} {} +do_test randexpr-2.2059 { + db eval {SELECT (abs(coalesce((select max(t1.e) from t1 where c>coalesce((select max(coalesce((select t1.e from t1 where not (abs(a+t1.e*c & case when t1.b in (select -max(t1.b)*cast(avg(19) AS integer) from t1 union select -max(13) from t1) then t1.f else 19 end-f+(19)+19)/abs(t1.e)) in (d,13,e) or t1.e<=t1.e),t1.b-17)) from t1 where -11<>17 or e in (select max(19) from t1 union select (cast(avg(f) AS integer)) from t1)),a)),17))/abs(t1.e)) FROM t1 WHERE t1.e<>13 | (t1.f)+c-((13))-~13 | (abs(e)/abs(coalesce((select max(t1.c) from t1 where (d)<=t1.d),case when (exists(select 1 from t1 where not exists(select 1 from t1 where (abs(17)/abs(t1.d))>coalesce((select max(a) from t1 where t1.c not between t1.f and t1.f and f not in (t1.f, -t1.c,13)),t1.c)))) then c else 19 end))) | t1.a*11* -e*t1.c or not exists(select 1 from t1 where f>c)} +} {0} +do_test randexpr-2.2060 { + db eval {SELECT case t1.e when a then case when not exists(select 1 from t1 where (abs(~coalesce((select c from t1 where (case when not case when a between t1.b and t1.a then t1.c when -17 not in (11,t1.d,11) then a else t1.a end in (select count(distinct t1.a)+count(distinct t1.a) from t1 union select count(*) from t1) then t1.c when t1.b<(11) then f else a end>=11 and (t1.f)=11)),+11))/abs(e)) not between 17 and -t1.a) or c not between 19 and t1.b then t1.c when t1.e=11 and (t1.f)=11)),+11))/abs(e)) not between 17 and -t1.a) or c not between 19 and t1.b then t1.c when t1.e=11 and (t1.f)=11)),+11))/abs(e)) not between 17 and -t1.a) or c not between 19 and t1.b then t1.c when t1.eb),~case when b between 17 and e | coalesce((select max(a) from t1 where (f in (t1.b,f,a))),13) then f when -d<=d then t1.e else e end+13)+13 then d else d end+t1.f=d),b)+b FROM t1 WHERE b=b} +} {400} +do_test randexpr-2.2064 { + db eval {SELECT ~~coalesce((select t1.a from t1 where case f*+e when ~11-coalesce((select max(t1.f) from t1 where t1.f>b),~case when b between 17 and e | coalesce((select max(a) from t1 where (f in (t1.b,f,a))),13) then f when -d<=d then t1.e else e end+13)+13 then d else d end+t1.f=d),b)+b FROM t1 WHERE NOT (b=b)} +} {} +do_test randexpr-2.2065 { + db eval {SELECT ~~coalesce((select t1.a from t1 where case f*+e when ~11-coalesce((select max(t1.f) from t1 where t1.f>b),~case when b between 17 and e & coalesce((select max(a) from t1 where (f in (t1.b,f,a))),13) then f when -d<=d then t1.e else e end+13)+13 then d else d end+t1.f=d),b)+b FROM t1 WHERE b=b} +} {400} +do_test randexpr-2.2066 { + db eval {SELECT e | t1.e*case d when t1.e then a else case when t1.a<= -~+e then b else 17-t1.e end*c*f+case when case when +c in (select +min(11) from t1 union select cast(avg(f) AS integer) from t1) and b between c and t1.f then 11+t1.a when f>=c then d else t1.c end=c then d else t1.c end=c then d else t1.c endt1.e), -t1.d) FROM t1 WHERE t1.b between coalesce((select (coalesce((select max(case when case when not exists(select 1 from t1 where (17) not in (13,case when 19 not in (19,t1.f,t1.e) and b>=t1.a then e when b<=t1.c then 11 else b end,11)) then c | t1.a else 17 end | 19 in (c,t1.b,d) then t1.a else t1.a end | t1.c) from t1 where not exists(select 1 from t1 where t1.a<=t1.e)),t1.c)) from t1 where t1.d<=t1.a),17)*c-11 and d} +} {} +do_test randexpr-2.2070 { + db eval {SELECT coalesce((select -(select min(f)-+min(f)*~case +cast(avg(t1.e) AS integer) when max(13)-abs( -max(a)) then +count(distinct a) | abs(max( -c+13)) else (count(*)) end from t1) from t1 where t1.a<>t1.e), -t1.d) FROM t1 WHERE NOT (t1.b between coalesce((select (coalesce((select max(case when case when not exists(select 1 from t1 where (17) not in (13,case when 19 not in (19,t1.f,t1.e) and b>=t1.a then e when b<=t1.c then 11 else b end,11)) then c | t1.a else 17 end | 19 in (c,t1.b,d) then t1.a else t1.a end | t1.c) from t1 where not exists(select 1 from t1 where t1.a<=t1.e)),t1.c)) from t1 where t1.d<=t1.a),17)*c-11 and d)} +} {-1800} +do_test randexpr-2.2071 { + db eval {SELECT coalesce((select -(select min(f)-+min(f)*~case +cast(avg(t1.e) AS integer) when max(13)-abs( -max(a)) then +count(distinct a) & abs(max( -c+13)) else (count(*)) end from t1) from t1 where t1.a<>t1.e), -t1.d) FROM t1 WHERE NOT (t1.b between coalesce((select (coalesce((select max(case when case when not exists(select 1 from t1 where (17) not in (13,case when 19 not in (19,t1.f,t1.e) and b>=t1.a then e when b<=t1.c then 11 else b end,11)) then c | t1.a else 17 end | 19 in (c,t1.b,d) then t1.a else t1.a end | t1.c) from t1 where not exists(select 1 from t1 where t1.a<=t1.e)),t1.c)) from t1 where t1.d<=t1.a),17)*c-11 and d)} +} {-1800} +do_test randexpr-2.2072 { + db eval {SELECT coalesce((select c from t1 where case when +case when not 11 | c not in (11,case when 13> -d then e when a>t1.c then 17 else t1.e end,t1.b) then t1.d when c<>t1.a then t1.b else 19 end-b>=17 or 11=19 and b<=e),d) FROM t1 WHERE (+a*t1.a+(19-(abs(case -c when d then (abs(f)/abs(case t1.a when 19 then b else +~( -case t1.f*t1.a | 17 when -t1.a then d else (t1.e) end*d)+t1.a-a end)) else 13 end)/abs(11))*t1.c)+17-d not in (11,a,b))} +} {300} +do_test randexpr-2.2073 { + db eval {SELECT coalesce((select c from t1 where case when +case when not 11 | c not in (11,case when 13> -d then e when a>t1.c then 17 else t1.e end,t1.b) then t1.d when c<>t1.a then t1.b else 19 end-b>=17 or 11=19 and b<=e),d) FROM t1 WHERE NOT ((+a*t1.a+(19-(abs(case -c when d then (abs(f)/abs(case t1.a when 19 then b else +~( -case t1.f*t1.a | 17 when -t1.a then d else (t1.e) end*d)+t1.a-a end)) else 13 end)/abs(11))*t1.c)+17-d not in (11,a,b)))} +} {} +do_test randexpr-2.2074 { + db eval {SELECT coalesce((select c from t1 where case when +case when not 11 & c not in (11,case when 13> -d then e when a>t1.c then 17 else t1.e end,t1.b) then t1.d when c<>t1.a then t1.b else 19 end-b>=17 or 11=19 and b<=e),d) FROM t1 WHERE (+a*t1.a+(19-(abs(case -c when d then (abs(f)/abs(case t1.a when 19 then b else +~( -case t1.f*t1.a | 17 when -t1.a then d else (t1.e) end*d)+t1.a-a end)) else 13 end)/abs(11))*t1.c)+17-d not in (11,a,b))} +} {300} +do_test randexpr-2.2075 { + db eval {SELECT case 17 when t1.c then t1.c*f | t1.b+t1.d+b+c-t1.e+t1.c+t1.c-t1.c*t1.f else case when t1.f+case when (abs(t1.a-coalesce((select a from t1 where (t1.c) in (t1.c,d,19)), -t1.a))/abs(f)) between t1.c and a then -(t1.b) else b end not in (11, -t1.d,t1.d) then t1.d else d end end FROM t1 WHERE ~17 between (select -count(distinct b+b)++max(t1.b)*(~(max(t1.d)))-abs(cast(avg(17) AS integer)*count(*))-max(f) | count(distinct t1.a)-count(distinct 13)-cast(avg(c) AS integer) from t1) and c-case when f<>t1.e then (abs(~(a)+e+t1.a | t1.a)/abs(t1.c)) else 13 end- -t1.e} +} {400} +do_test randexpr-2.2076 { + db eval {SELECT case 17 when t1.c then t1.c*f | t1.b+t1.d+b+c-t1.e+t1.c+t1.c-t1.c*t1.f else case when t1.f+case when (abs(t1.a-coalesce((select a from t1 where (t1.c) in (t1.c,d,19)), -t1.a))/abs(f)) between t1.c and a then -(t1.b) else b end not in (11, -t1.d,t1.d) then t1.d else d end end FROM t1 WHERE NOT (~17 between (select -count(distinct b+b)++max(t1.b)*(~(max(t1.d)))-abs(cast(avg(17) AS integer)*count(*))-max(f) | count(distinct t1.a)-count(distinct 13)-cast(avg(c) AS integer) from t1) and c-case when f<>t1.e then (abs(~(a)+e+t1.a | t1.a)/abs(t1.c)) else 13 end- -t1.e)} +} {} +do_test randexpr-2.2077 { + db eval {SELECT case 17 when t1.c then t1.c*f & t1.b+t1.d+b+c-t1.e+t1.c+t1.c-t1.c*t1.f else case when t1.f+case when (abs(t1.a-coalesce((select a from t1 where (t1.c) in (t1.c,d,19)), -t1.a))/abs(f)) between t1.c and a then -(t1.b) else b end not in (11, -t1.d,t1.d) then t1.d else d end end FROM t1 WHERE ~17 between (select -count(distinct b+b)++max(t1.b)*(~(max(t1.d)))-abs(cast(avg(17) AS integer)*count(*))-max(f) | count(distinct t1.a)-count(distinct 13)-cast(avg(c) AS integer) from t1) and c-case when f<>t1.e then (abs(~(a)+e+t1.a | t1.a)/abs(t1.c)) else 13 end- -t1.e} +} {400} +do_test randexpr-2.2078 { + db eval {SELECT (select cast(avg(coalesce((select max(coalesce((select e from t1 where not ((coalesce((select b from t1 where coalesce((select max(11) from t1 where exists(select 1 from t1 where (a in (select count(*)+max(((t1.a))) from t1 union select cast(avg(a) AS integer) from t1)))),b)<=19), -b))*f=t1.e) and 17<>11 and e in (t1.f,f,17)) or a not between -a and (17)),+~b)-17*b) from t1 where t1.b>=b),t1.a)) AS integer) from t1) FROM t1 WHERE f in (select cast(avg(t1.e-f-t1.e) AS integer)+min(t1.d) from t1 union select max(case b when 11 then case case when f in (a,a,f) then t1.b when (t1.f)>d then -19 else t1.a end when 11 then a else t1.e end else (t1.b) end*a)*cast(avg(a) AS integer)+ -+cast(avg(t1.e) AS integer)*case cast(avg(t1.e) AS integer) when (cast(avg( -t1.a) AS integer)) then count(*) else count(*) end*min(t1.d)*min(e) from t1) and (a between d and t1.f)} +} {} +do_test randexpr-2.2079 { + db eval {SELECT (select cast(avg(coalesce((select max(coalesce((select e from t1 where not ((coalesce((select b from t1 where coalesce((select max(11) from t1 where exists(select 1 from t1 where (a in (select count(*)+max(((t1.a))) from t1 union select cast(avg(a) AS integer) from t1)))),b)<=19), -b))*f=t1.e) and 17<>11 and e in (t1.f,f,17)) or a not between -a and (17)),+~b)-17*b) from t1 where t1.b>=b),t1.a)) AS integer) from t1) FROM t1 WHERE NOT (f in (select cast(avg(t1.e-f-t1.e) AS integer)+min(t1.d) from t1 union select max(case b when 11 then case case when f in (a,a,f) then t1.b when (t1.f)>d then -19 else t1.a end when 11 then a else t1.e end else (t1.b) end*a)*cast(avg(a) AS integer)+ -+cast(avg(t1.e) AS integer)*case cast(avg(t1.e) AS integer) when (cast(avg( -t1.a) AS integer)) then count(*) else count(*) end*min(t1.d)*min(e) from t1) and (a between d and t1.f))} +} {-2900} +do_test randexpr-2.2080 { + db eval {SELECT case when coalesce((select max(coalesce((select max((select min(13) from t1)*d*case when not 19<>(t1.c)-d then 11 else 17 end) from t1 where 13=t1.e then t1.d when not exists(select 1 from t1 where t1.c in (select (count(*)) from t1 union select +count(*) from t1)) then t1.c else 19 end FROM t1 WHERE ((11>=11))} +} {300} +do_test randexpr-2.2081 { + db eval {SELECT case when coalesce((select max(coalesce((select max((select min(13) from t1)*d*case when not 19<>(t1.c)-d then 11 else 17 end) from t1 where 13=t1.e then t1.d when not exists(select 1 from t1 where t1.c in (select (count(*)) from t1 union select +count(*) from t1)) then t1.c else 19 end FROM t1 WHERE NOT (((11>=11)))} +} {} +do_test randexpr-2.2082 { + db eval {SELECT -case when 19 in (select abs((count(*))) from t1 union select abs(min(t1.f)*min(e*e)) from t1) and case when not not (17>a) or d>=c and (19)<=t1.b and - -t1.f<>t1.f then f when t1.b between d and t1.f then case when t1.e=e then coalesce((select max(d) from t1 where t1.b>=b),a) when t1.a not between c and e then e else 13 end else (17) end>t1.d then b else b end+t1.b-a FROM t1 WHERE exists(select 1 from t1 where +coalesce((select 19 from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where case when (select max(11-(select +count(distinct +case + -coalesce((select t1.d from t1 where exists(select 1 from t1 where (b)>t1.c)),t1.f)*11 when -a then 19 else 19 end) from t1)) from t1)<=t1.d-c then f when exists(select 1 from t1 where t1.f not between b and a) then -d else t1.d end not in (t1.b,t1.e, -t1.f)))),f) not in (f,e,13))} +} {} +do_test randexpr-2.2083 { + db eval {SELECT -case when 19 in (select abs((count(*))) from t1 union select abs(min(t1.f)*min(e*e)) from t1) and case when not not (17>a) or d>=c and (19)<=t1.b and - -t1.f<>t1.f then f when t1.b between d and t1.f then case when t1.e=e then coalesce((select max(d) from t1 where t1.b>=b),a) when t1.a not between c and e then e else 13 end else (17) end>t1.d then b else b end+t1.b-a FROM t1 WHERE NOT (exists(select 1 from t1 where +coalesce((select 19 from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where case when (select max(11-(select +count(distinct +case + -coalesce((select t1.d from t1 where exists(select 1 from t1 where (b)>t1.c)),t1.f)*11 when -a then 19 else 19 end) from t1)) from t1)<=t1.d-c then f when exists(select 1 from t1 where t1.f not between b and a) then -d else t1.d end not in (t1.b,t1.e, -t1.f)))),f) not in (f,e,13)))} +} {-100} +do_test randexpr-2.2084 { + db eval {SELECT coalesce((select max(b) from t1 where d not between a-t1.e and a),coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where t1.ee or 11 between b and 13} +} {200} +do_test randexpr-2.2085 { + db eval {SELECT coalesce((select max(b) from t1 where d not between a-t1.e and a),coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where t1.ee or 11 between b and 13)} +} {} +do_test randexpr-2.2086 { + db eval {SELECT (select case count(*) when ~(case case -abs(count(*) | (case case ~case -count(distinct case when t1.a< -t1.d then b when 19 in (t1.e, - -19,t1.b) then t1.c else t1.f end)-count(distinct (t1.f)) when count(*) then max(c) else count(distinct 17) end when count(*) then ((cast(avg(b) AS integer))) else -count(*) end when -count(distinct t1.f) then count(distinct t1.a) else count(distinct t1.e) end))+count(*) when cast(avg(19) AS integer) then -min( -t1.c) else count(*) end when (min(t1.e)) then cast(avg(c) AS integer) else min(t1.f) end) then min(e) else count(*) end | max(11) from t1)+19 FROM t1 WHERE case when (((d not between (+t1.a-coalesce((select max(b*t1.b* -t1.f) from t1 where a>a),13)) and a)) and c>=d or fe and t1.c< -11 and f<>b or t1.b<17),c) else t1.a end>=11} +} {30} +do_test randexpr-2.2087 { + db eval {SELECT (select case count(*) when ~(case case -abs(count(*) | (case case ~case -count(distinct case when t1.a< -t1.d then b when 19 in (t1.e, - -19,t1.b) then t1.c else t1.f end)-count(distinct (t1.f)) when count(*) then max(c) else count(distinct 17) end when count(*) then ((cast(avg(b) AS integer))) else -count(*) end when -count(distinct t1.f) then count(distinct t1.a) else count(distinct t1.e) end))+count(*) when cast(avg(19) AS integer) then -min( -t1.c) else count(*) end when (min(t1.e)) then cast(avg(c) AS integer) else min(t1.f) end) then min(e) else count(*) end | max(11) from t1)+19 FROM t1 WHERE NOT (case when (((d not between (+t1.a-coalesce((select max(b*t1.b* -t1.f) from t1 where a>a),13)) and a)) and c>=d or fe and t1.c< -11 and f<>b or t1.b<17),c) else t1.a end>=11)} +} {} +do_test randexpr-2.2088 { + db eval {SELECT (select case count(*) when ~(case case -abs(count(*) & (case case ~case -count(distinct case when t1.a< -t1.d then b when 19 in (t1.e, - -19,t1.b) then t1.c else t1.f end)-count(distinct (t1.f)) when count(*) then max(c) else count(distinct 17) end when count(*) then ((cast(avg(b) AS integer))) else -count(*) end when -count(distinct t1.f) then count(distinct t1.a) else count(distinct t1.e) end))+count(*) when cast(avg(19) AS integer) then -min( -t1.c) else count(*) end when (min(t1.e)) then cast(avg(c) AS integer) else min(t1.f) end) then min(e) else count(*) end & max(11) from t1)+19 FROM t1 WHERE case when (((d not between (+t1.a-coalesce((select max(b*t1.b* -t1.f) from t1 where a>a),13)) and a)) and c>=d or fe and t1.c< -11 and f<>b or t1.b<17),c) else t1.a end>=11} +} {20} +do_test randexpr-2.2089 { + db eval {SELECT case when case when t1.a*~t1.a-19-c | 17+t1.d*f*c=d then f else -13 end+t1.b+a in (t1.f,t1.d,t1.f) and (not c=11) and t1.e<>f then coalesce((select f from t1 where t1.d not in (t1.e,f,11)), -17) else f end-b FROM t1 WHERE not exists(select 1 from t1 where +d between d*13 and ((abs(13+e+e)/abs(+case when not t1.c in (~(select cast(avg(case e when b then 11 else 13 end) AS integer) from t1)*b*17,t1.e,t1.e) then 11* -c when t1.f in (select min(e)*count(distinct c) from t1 union select - - - -max(e) | cast(avg((d)) AS integer) from t1) then (c) else e end)))) or d<17} +} {400} +do_test randexpr-2.2090 { + db eval {SELECT case when case when t1.a*~t1.a-19-c | 17+t1.d*f*c=d then f else -13 end+t1.b+a in (t1.f,t1.d,t1.f) and (not c=11) and t1.e<>f then coalesce((select f from t1 where t1.d not in (t1.e,f,11)), -17) else f end-b FROM t1 WHERE NOT (not exists(select 1 from t1 where +d between d*13 and ((abs(13+e+e)/abs(+case when not t1.c in (~(select cast(avg(case e when b then 11 else 13 end) AS integer) from t1)*b*17,t1.e,t1.e) then 11* -c when t1.f in (select min(e)*count(distinct c) from t1 union select - - - -max(e) | cast(avg((d)) AS integer) from t1) then (c) else e end)))) or d<17)} +} {} +do_test randexpr-2.2091 { + db eval {SELECT case when case when t1.a*~t1.a-19-c & 17+t1.d*f*c=d then f else -13 end+t1.b+a in (t1.f,t1.d,t1.f) and (not c=11) and t1.e<>f then coalesce((select f from t1 where t1.d not in (t1.e,f,11)), -17) else f end-b FROM t1 WHERE not exists(select 1 from t1 where +d between d*13 and ((abs(13+e+e)/abs(+case when not t1.c in (~(select cast(avg(case e when b then 11 else 13 end) AS integer) from t1)*b*17,t1.e,t1.e) then 11* -c when t1.f in (select min(e)*count(distinct c) from t1 union select - - - -max(e) | cast(avg((d)) AS integer) from t1) then (c) else e end)))) or d<17} +} {400} +do_test randexpr-2.2092 { + db eval {SELECT coalesce((select c from t1 where t1.b between t1.b and -17),coalesce((select -t1.e from t1 where (select count(distinct coalesce((select case t1.b-t1.a*(b)*13 when 11 then (a) else 11 end from t1 where a not in ((t1.e),c,11) and e>b),13)) from t1) in (t1.d,b,13) and not t1.b<13 or 11<> -17 or (11)>17 or t1.f not between b and (t1.a)),(b))) FROM t1 WHERE ((+case +d when f then (d) else case t1.f-(b)-11 when d then -t1.c else a end end not in ( -17,(t1.d),13) and (11 not in ((c),t1.f,t1.d)) and (e in (select 13 from t1 union select t1.a from t1) and d in (select t1.e from t1 union select 19 from t1))) or exists(select 1 from t1 where -t1.a= -t1.d)) and exists(select 1 from t1 where t1.b not between f and e or 19>=t1.d)} +} {} +do_test randexpr-2.2093 { + db eval {SELECT coalesce((select c from t1 where t1.b between t1.b and -17),coalesce((select -t1.e from t1 where (select count(distinct coalesce((select case t1.b-t1.a*(b)*13 when 11 then (a) else 11 end from t1 where a not in ((t1.e),c,11) and e>b),13)) from t1) in (t1.d,b,13) and not t1.b<13 or 11<> -17 or (11)>17 or t1.f not between b and (t1.a)),(b))) FROM t1 WHERE NOT (((+case +d when f then (d) else case t1.f-(b)-11 when d then -t1.c else a end end not in ( -17,(t1.d),13) and (11 not in ((c),t1.f,t1.d)) and (e in (select 13 from t1 union select t1.a from t1) and d in (select t1.e from t1 union select 19 from t1))) or exists(select 1 from t1 where -t1.a= -t1.d)) and exists(select 1 from t1 where t1.b not between f and e or 19>=t1.d))} +} {-500} +do_test randexpr-2.2094 { + db eval {SELECT case d when f then case when (abs(coalesce((select ~f*case when t1.f<(select -count(distinct (select count(*)-min(13) from t1)) from t1) then d when t1.d between t1.b and f and not (t1.b between ( -t1.f) and f) then -d else b end-b | t1.f*17 from t1 where ((d>t1.d))),17))/abs(11))-13=t1.c then a else 11 end else b end FROM t1 WHERE t1.b between coalesce((select case when coalesce((select t1.c from t1 where not ((t1.a>=11+17 and (13+(11)<>13))) and b<=c and c in (c, -c,(e)) and d not in (f,b,c) or t1.d>=13 and 13 between 19 and a),d) not between 13-case when t1.f<>t1.b then t1.f else t1.c end | d and d then b when c= -d),19) and c} +} {} +do_test randexpr-2.2095 { + db eval {SELECT case d when f then case when (abs(coalesce((select ~f*case when t1.f<(select -count(distinct (select count(*)-min(13) from t1)) from t1) then d when t1.d between t1.b and f and not (t1.b between ( -t1.f) and f) then -d else b end-b | t1.f*17 from t1 where ((d>t1.d))),17))/abs(11))-13=t1.c then a else 11 end else b end FROM t1 WHERE NOT (t1.b between coalesce((select case when coalesce((select t1.c from t1 where not ((t1.a>=11+17 and (13+(11)<>13))) and b<=c and c in (c, -c,(e)) and d not in (f,b,c) or t1.d>=13 and 13 between 19 and a),d) not between 13-case when t1.f<>t1.b then t1.f else t1.c end | d and d then b when c= -d),19) and c)} +} {200} +do_test randexpr-2.2096 { + db eval {SELECT case d when f then case when (abs(coalesce((select ~f*case when t1.f<(select -count(distinct (select count(*)-min(13) from t1)) from t1) then d when t1.d between t1.b and f and not (t1.b between ( -t1.f) and f) then -d else b end-b & t1.f*17 from t1 where ((d>t1.d))),17))/abs(11))-13=t1.c then a else 11 end else b end FROM t1 WHERE NOT (t1.b between coalesce((select case when coalesce((select t1.c from t1 where not ((t1.a>=11+17 and (13+(11)<>13))) and b<=c and c in (c, -c,(e)) and d not in (f,b,c) or t1.d>=13 and 13 between 19 and a),d) not between 13-case when t1.f<>t1.b then t1.f else t1.c end | d and d then b when c= -d),19) and c)} +} {200} +do_test randexpr-2.2097 { + db eval {SELECT (coalesce((select case when exists(select 1 from t1 where case when (t1.b)>=f+e*coalesce((select max( -+t1.c) from t1 where case a when e then e else 11 end in (select 17 from t1 union select d from t1) or e not between d and a),t1.f) then ( -t1.d) else e end in (11,c,b)) then 17 else t1.f end+t1.f from t1 where not d in ((b),(f),11) and t1.f=t1.b and 19<13 then 19 else 13 end=19) and b not in (c,t1.a,d) or b between e and -t1.f or t1.b between t1.e and c and t1.a<=t1.d} +} {} +do_test randexpr-2.2098 { + db eval {SELECT (coalesce((select case when exists(select 1 from t1 where case when (t1.b)>=f+e*coalesce((select max( -+t1.c) from t1 where case a when e then e else 11 end in (select 17 from t1 union select d from t1) or e not between d and a),t1.f) then ( -t1.d) else e end in (11,c,b)) then 17 else t1.f end+t1.f from t1 where not d in ((b),(f),11) and t1.f=t1.b and 19<13 then 19 else 13 end=19) and b not in (c,t1.a,d) or b between e and -t1.f or t1.b between t1.e and c and t1.a<=t1.d)} +} {19} +do_test randexpr-2.2099 { + db eval {SELECT 11+17+case when case 17 when coalesce((select max(d) from t1 where 13+(t1.c-e)+19+e-t1.e>=b and not 11 not in (19,t1.e,13) and (t1.f)>b),19)*19 then c else -d end not in (c,19,t1.c) then d when d<=t1.d then 13 else 11 end+ -a FROM t1 WHERE coalesce((select max(~t1.f) from t1 where (13*t1.e)<=case when 17 | a<=t1.e then coalesce((select max(19) from t1 where case when not not exists(select 1 from t1 where 19 in (t1.d+19,11,t1.f) or not exists(select 1 from t1 where a=t1.c then - -11 else t1.b end else f end-f<=b),t1.b) else t1.e end),f)-19>=13} +} {328} +do_test randexpr-2.2100 { + db eval {SELECT 11+17+case when case 17 when coalesce((select max(d) from t1 where 13+(t1.c-e)+19+e-t1.e>=b and not 11 not in (19,t1.e,13) and (t1.f)>b),19)*19 then c else -d end not in (c,19,t1.c) then d when d<=t1.d then 13 else 11 end+ -a FROM t1 WHERE NOT (coalesce((select max(~t1.f) from t1 where (13*t1.e)<=case when 17 | a<=t1.e then coalesce((select max(19) from t1 where case when not not exists(select 1 from t1 where 19 in (t1.d+19,11,t1.f) or not exists(select 1 from t1 where a=t1.c then - -11 else t1.b end else f end-f<=b),t1.b) else t1.e end),f)-19>=13)} +} {} +do_test randexpr-2.2101 { + db eval {SELECT coalesce((select max(t1.a+(select count(distinct 11-case when coalesce((select max(11) from t1 where t1.f<(t1.a)-t1.b),b) in (select ~abs(min(c-case when (case 13 when -coalesce((select (t1.e) from t1 where -a not in (f,t1.d,d)),(11)) then b else t1.e end not between b and -t1.e) then e else t1.a end)) | count(*) from t1 union select -min(17) from t1) then t1.a else t1.d end) from t1)-t1.b) from t1 where 1711) then e else a end-t1.c+(d)*(13) in (select ~count(distinct (t1.f))+ -~ -count(distinct t1.e)+(min(13)) from t1 union select cast(avg(11) AS integer) from t1) or t1.e<>t1.f)) then f when not a<19 then coalesce((select max(11) from t1 where (t1.d) between -d and -t1.e or t1.f not between d and a and -b=19),t1.b) else +t1.f end not between a and e} +} {-99} +do_test randexpr-2.2102 { + db eval {SELECT coalesce((select max(t1.a+(select count(distinct 11-case when coalesce((select max(11) from t1 where t1.f<(t1.a)-t1.b),b) in (select ~abs(min(c-case when (case 13 when -coalesce((select (t1.e) from t1 where -a not in (f,t1.d,d)),(11)) then b else t1.e end not between b and -t1.e) then e else t1.a end)) | count(*) from t1 union select -min(17) from t1) then t1.a else t1.d end) from t1)-t1.b) from t1 where 1711) then e else a end-t1.c+(d)*(13) in (select ~count(distinct (t1.f))+ -~ -count(distinct t1.e)+(min(13)) from t1 union select cast(avg(11) AS integer) from t1) or t1.e<>t1.f)) then f when not a<19 then coalesce((select max(11) from t1 where (t1.d) between -d and -t1.e or t1.f not between d and a and -b=19),t1.b) else +t1.f end not between a and e)} +} {} +do_test randexpr-2.2103 { + db eval {SELECT coalesce((select max(t1.a+(select count(distinct 11-case when coalesce((select max(11) from t1 where t1.f<(t1.a)-t1.b),b) in (select ~abs(min(c-case when (case 13 when -coalesce((select (t1.e) from t1 where -a not in (f,t1.d,d)),(11)) then b else t1.e end not between b and -t1.e) then e else t1.a end)) & count(*) from t1 union select -min(17) from t1) then t1.a else t1.d end) from t1)-t1.b) from t1 where 1711) then e else a end-t1.c+(d)*(13) in (select ~count(distinct (t1.f))+ -~ -count(distinct t1.e)+(min(13)) from t1 union select cast(avg(11) AS integer) from t1) or t1.e<>t1.f)) then f when not a<19 then coalesce((select max(11) from t1 where (t1.d) between -d and -t1.e or t1.f not between d and a and -b=19),t1.b) else +t1.f end not between a and e} +} {-99} +do_test randexpr-2.2104 { + db eval {SELECT t1.b-case when 13 not between 13 and (select +count(*) from t1) then t1.e when +11 in (coalesce((select t1.f from t1 where d-((f))*coalesce((select d from t1 where 17 between ~(select max(d) | count(*) | cast(avg(e) AS integer)+count(*) from t1) and case when c<13 or 17>13 then a else e end-t1.b),e)-t1.a<=(t1.e)),19),b,13) then e else b end-t1.b FROM t1 WHERE t1.b>=coalesce((select max(d-c) from t1 where exists(select 1 from t1 where (~a*(select -count(distinct 11-d*13+d+19) from t1)* -b in (f,t1.d-c,a-(abs(c)/abs(case when (abs(13- -(19))/abs(t1.f)) in (select c from t1 union select -a from t1) then t1.c when b<=c then t1.a else -13 end)))))),19)} +} {-500} +do_test randexpr-2.2105 { + db eval {SELECT t1.b-case when 13 not between 13 and (select +count(*) from t1) then t1.e when +11 in (coalesce((select t1.f from t1 where d-((f))*coalesce((select d from t1 where 17 between ~(select max(d) | count(*) | cast(avg(e) AS integer)+count(*) from t1) and case when c<13 or 17>13 then a else e end-t1.b),e)-t1.a<=(t1.e)),19),b,13) then e else b end-t1.b FROM t1 WHERE NOT (t1.b>=coalesce((select max(d-c) from t1 where exists(select 1 from t1 where (~a*(select -count(distinct 11-d*13+d+19) from t1)* -b in (f,t1.d-c,a-(abs(c)/abs(case when (abs(13- -(19))/abs(t1.f)) in (select c from t1 union select -a from t1) then t1.c when b<=c then t1.a else -13 end)))))),19))} +} {} +do_test randexpr-2.2106 { + db eval {SELECT t1.b-case when 13 not between 13 and (select +count(*) from t1) then t1.e when +11 in (coalesce((select t1.f from t1 where d-((f))*coalesce((select d from t1 where 17 between ~(select max(d) & count(*) & cast(avg(e) AS integer)+count(*) from t1) and case when c<13 or 17>13 then a else e end-t1.b),e)-t1.a<=(t1.e)),19),b,13) then e else b end-t1.b FROM t1 WHERE t1.b>=coalesce((select max(d-c) from t1 where exists(select 1 from t1 where (~a*(select -count(distinct 11-d*13+d+19) from t1)* -b in (f,t1.d-c,a-(abs(c)/abs(case when (abs(13- -(19))/abs(t1.f)) in (select c from t1 union select -a from t1) then t1.c when b<=c then t1.a else -13 end)))))),19)} +} {-500} +do_test randexpr-2.2107 { + db eval {SELECT t1.f*case when c*case when t1.d*t1.b-(~19) in (select (cast(avg(d-t1.d) AS integer)) from t1 union select min(11)+cast(avg(e) AS integer) | abs(count(distinct t1.a) | min(19)) from t1) then t1.d when not 11 not in (d,t1.e,13) then t1.a else 11 end in (select (t1.c) from t1 union select 11 from t1) or t1.e not in (f, -t1.f,t1.e) then c else d end FROM t1 WHERE case when t1.e=t1.c and (abs(case when (11* -17 not in (c,t1.f-t1.c-coalesce((select max(c) from t1 where c in (select min(19) from t1 union select ~count(*)*max(t1.f) from t1)),a),t1.a)) then coalesce((select max(17) from t1 where -b>=(t1.c)),19) else f end+19)/abs(t1.f))*t1.d between d and b then t1.b-t1.e else c end=19} +} {} +do_test randexpr-2.2108 { + db eval {SELECT t1.f*case when c*case when t1.d*t1.b-(~19) in (select (cast(avg(d-t1.d) AS integer)) from t1 union select min(11)+cast(avg(e) AS integer) | abs(count(distinct t1.a) | min(19)) from t1) then t1.d when not 11 not in (d,t1.e,13) then t1.a else 11 end in (select (t1.c) from t1 union select 11 from t1) or t1.e not in (f, -t1.f,t1.e) then c else d end FROM t1 WHERE NOT (case when t1.e=t1.c and (abs(case when (11* -17 not in (c,t1.f-t1.c-coalesce((select max(c) from t1 where c in (select min(19) from t1 union select ~count(*)*max(t1.f) from t1)),a),t1.a)) then coalesce((select max(17) from t1 where -b>=(t1.c)),19) else f end+19)/abs(t1.f))*t1.d between d and b then t1.b-t1.e else c end=19)} +} {240000} +do_test randexpr-2.2109 { + db eval {SELECT t1.f*case when c*case when t1.d*t1.b-(~19) in (select (cast(avg(d-t1.d) AS integer)) from t1 union select min(11)+cast(avg(e) AS integer) & abs(count(distinct t1.a) & min(19)) from t1) then t1.d when not 11 not in (d,t1.e,13) then t1.a else 11 end in (select (t1.c) from t1 union select 11 from t1) or t1.e not in (f, -t1.f,t1.e) then c else d end FROM t1 WHERE NOT (case when t1.e=t1.c and (abs(case when (11* -17 not in (c,t1.f-t1.c-coalesce((select max(c) from t1 where c in (select min(19) from t1 union select ~count(*)*max(t1.f) from t1)),a),t1.a)) then coalesce((select max(17) from t1 where -b>=(t1.c)),19) else f end+19)/abs(t1.f))*t1.d between d and b then t1.b-t1.e else c end=19)} +} {240000} +do_test randexpr-2.2110 { + db eval {SELECT coalesce((select max((abs(t1.d)/abs((coalesce((select c from t1 where (abs(+f)/abs(t1.b))-t1.a between 13 and case (13+c+((abs(coalesce((select 11 from t1 where 11+19a then (coalesce((select -e from t1 where case coalesce((select case when 19 in (select t1.e from t1 union select f from t1) then b when t1.c=11 then a else (13) end from t1 where t1.a in (select ( -cast(avg(13) AS integer)+max(t1.e)) from t1 union select cast(avg(19) AS integer) from t1)),19) when f then f else ( -t1.c) end+17<=e and a>=t1.b or t1.c>=b),t1.e)) else e end)>=e),b)+f FROM t1 WHERE exists(select 1 from t1 where 17 in (select -abs(case (min(t1.b)) when count(*)*max(a) then -abs((max(e))) | ~max( -coalesce((select max(coalesce((select e from t1 where f>=11 or exists(select 1 from t1 where 11 not in (11,t1.c,f))),d)) from t1 where 13=t1.d and 17<>f),t1.d))+count(distinct t1.e) else count(*)+~cast(avg(t1.f) AS integer) end | count(distinct t1.a))+count(distinct t1.a) from t1 union select count(*) from t1))} +} {} +do_test randexpr-2.2113 { + db eval {SELECT f+coalesce((select max(t1.f) from t1 where (case when 13<>a then (coalesce((select -e from t1 where case coalesce((select case when 19 in (select t1.e from t1 union select f from t1) then b when t1.c=11 then a else (13) end from t1 where t1.a in (select ( -cast(avg(13) AS integer)+max(t1.e)) from t1 union select cast(avg(19) AS integer) from t1)),19) when f then f else ( -t1.c) end+17<=e and a>=t1.b or t1.c>=b),t1.e)) else e end)>=e),b)+f FROM t1 WHERE NOT (exists(select 1 from t1 where 17 in (select -abs(case (min(t1.b)) when count(*)*max(a) then -abs((max(e))) | ~max( -coalesce((select max(coalesce((select e from t1 where f>=11 or exists(select 1 from t1 where 11 not in (11,t1.c,f))),d)) from t1 where 13=t1.d and 17<>f),t1.d))+count(distinct t1.e) else count(*)+~cast(avg(t1.f) AS integer) end | count(distinct t1.a))+count(distinct t1.a) from t1 union select count(*) from t1)))} +} {1400} +do_test randexpr-2.2114 { + db eval {SELECT case when case when (abs( -t1.b)/abs(case case case (select ~ -(+abs(case min(19) when (( -min(t1.e))) then count(distinct (t1.e)) else cast(avg(11) AS integer) end)) from t1) when t1.e then (select min(19) from t1) else 17 end when (13)-a*17 then t1.d else t1.a end when a then 19 else -( -f) end))>=d then t1.e else 13 end between f and 19 then f when t1.c<=t1.a or (t1.b>(11)) then 11 else t1.b end FROM t1 WHERE t1.c in (select case when 11<17 then case when (t1.b*coalesce((select (abs((coalesce((select a-19 from t1 where t1.c between e and f and d<>17 and t1.d not between 19 and 19),a)))/abs(t1.c))+t1.b-17 from t1 where c=a), -t1.d)+c>t1.f) then -13+(t1.e) when 11<>t1.c then 13 else t1.c end when 11=c then t1.c else (c) end from t1 union select t1.e from t1)} +} {} +do_test randexpr-2.2115 { + db eval {SELECT case when case when (abs( -t1.b)/abs(case case case (select ~ -(+abs(case min(19) when (( -min(t1.e))) then count(distinct (t1.e)) else cast(avg(11) AS integer) end)) from t1) when t1.e then (select min(19) from t1) else 17 end when (13)-a*17 then t1.d else t1.a end when a then 19 else -( -f) end))>=d then t1.e else 13 end between f and 19 then f when t1.c<=t1.a or (t1.b>(11)) then 11 else t1.b end FROM t1 WHERE NOT (t1.c in (select case when 11<17 then case when (t1.b*coalesce((select (abs((coalesce((select a-19 from t1 where t1.c between e and f and d<>17 and t1.d not between 19 and 19),a)))/abs(t1.c))+t1.b-17 from t1 where c=a), -t1.d)+c>t1.f) then -13+(t1.e) when 11<>t1.c then 13 else t1.c end when 11=c then t1.c else (c) end from t1 union select t1.e from t1))} +} {11} +do_test randexpr-2.2116 { + db eval {SELECT t1.e+coalesce((select case t1.a-case when t1.a in (case (abs(coalesce((select t1.e from t1 where t1.a>t1.b),~c))/abs(t1.b))* -(select count(distinct t1.c)-cast(avg(coalesce((select 19 from t1 where c not between (abs(b-11)/abs( -t1.e)) and (17)),17)) AS integer) from t1) when t1.e then f else 17 end,t1.c,t1.c) then d else t1.e end when d then 17 else 13 end from t1 where 11<11),t1.f) FROM t1 WHERE 11* -19 in (select case when ~coalesce((select t1.b from t1 where coalesce((select d from t1 where t1.a=17*~e+t1.d*case when 11< -~t1.b*t1.b*c+a then (c) else 19 end*a),e)>e),11)<=19 then t1.c when (not exists(select 1 from t1 where t1.d<17)) then b else c end from t1 union select t1.a from t1)} +} {} +do_test randexpr-2.2117 { + db eval {SELECT t1.e+coalesce((select case t1.a-case when t1.a in (case (abs(coalesce((select t1.e from t1 where t1.a>t1.b),~c))/abs(t1.b))* -(select count(distinct t1.c)-cast(avg(coalesce((select 19 from t1 where c not between (abs(b-11)/abs( -t1.e)) and (17)),17)) AS integer) from t1) when t1.e then f else 17 end,t1.c,t1.c) then d else t1.e end when d then 17 else 13 end from t1 where 11<11),t1.f) FROM t1 WHERE NOT (11* -19 in (select case when ~coalesce((select t1.b from t1 where coalesce((select d from t1 where t1.a=17*~e+t1.d*case when 11< -~t1.b*t1.b*c+a then (c) else 19 end*a),e)>e),11)<=19 then t1.c when (not exists(select 1 from t1 where t1.d<17)) then b else c end from t1 union select t1.a from t1))} +} {1100} +do_test randexpr-2.2118 { + db eval {SELECT 19* -coalesce((select coalesce((select t1.d*17 from t1 where t1.b<=t1.b+17), -(a | 17)) from t1 where t1.d in (select case abs(count(*))*count(distinct (17)) when max(11) then abs(+max(13)-case -cast(avg(t1.d) AS integer)- -(max(19))-count(distinct d) when min(t1.c) then cast(avg(c) AS integer) else count(distinct t1.f) end | max(c)) else max(t1.d) end from t1 union select count(*) from t1)),t1.f)-t1.e+c FROM t1 WHERE ~(abs(case 17 when d then coalesce((select t1.d from t1 where 13 in (select t1.d-f | case when (17 in (t1.a,19,t1.f)) then case when a*f in (select e from t1 union select t1.a from t1) or 11=t1.c or t1.a>=(19) and ( -b)13 then b when t1.e in (t1.c, -t1.b,a) then 19 else 19 end else a end from t1 union select t1.b from t1)),d) else 13 end)/abs(11))+19*t1.e in (e, -t1.e,a)} +} {} +do_test randexpr-2.2119 { + db eval {SELECT 19* -coalesce((select coalesce((select t1.d*17 from t1 where t1.b<=t1.b+17), -(a | 17)) from t1 where t1.d in (select case abs(count(*))*count(distinct (17)) when max(11) then abs(+max(13)-case -cast(avg(t1.d) AS integer)- -(max(19))-count(distinct d) when min(t1.c) then cast(avg(c) AS integer) else count(distinct t1.f) end | max(c)) else max(t1.d) end from t1 union select count(*) from t1)),t1.f)-t1.e+c FROM t1 WHERE NOT (~(abs(case 17 when d then coalesce((select t1.d from t1 where 13 in (select t1.d-f | case when (17 in (t1.a,19,t1.f)) then case when a*f in (select e from t1 union select t1.a from t1) or 11=t1.c or t1.a>=(19) and ( -b)13 then b when t1.e in (t1.c, -t1.b,a) then 19 else 19 end else a end from t1 union select t1.b from t1)),d) else 13 end)/abs(11))+19*t1.e in (e, -t1.e,a))} +} {-129400} +do_test randexpr-2.2120 { + db eval {SELECT 19* -coalesce((select coalesce((select t1.d*17 from t1 where t1.b<=t1.b+17), -(a & 17)) from t1 where t1.d in (select case abs(count(*))*count(distinct (17)) when max(11) then abs(+max(13)-case -cast(avg(t1.d) AS integer)- -(max(19))-count(distinct d) when min(t1.c) then cast(avg(c) AS integer) else count(distinct t1.f) end & max(c)) else max(t1.d) end from t1 union select count(*) from t1)),t1.f)-t1.e+c FROM t1 WHERE NOT (~(abs(case 17 when d then coalesce((select t1.d from t1 where 13 in (select t1.d-f | case when (17 in (t1.a,19,t1.f)) then case when a*f in (select e from t1 union select t1.a from t1) or 11=t1.c or t1.a>=(19) and ( -b)13 then b when t1.e in (t1.c, -t1.b,a) then 19 else 19 end else a end from t1 union select t1.b from t1)),d) else 13 end)/abs(11))+19*t1.e in (e, -t1.e,a))} +} {-129400} +do_test randexpr-2.2121 { + db eval {SELECT case t1.f when f then t1.a-case d+t1.d+coalesce((select (select cast(avg( -case when t1.d in (select -min(17)-count(*)+max(f) from t1 union select -count(distinct 13) from t1) then t1.e else a end*a) AS integer) from t1)-t1.d from t1 where -t1.b not in (11, -(t1.b),t1.d) and c in (select b from t1 union select 19 from t1) and t1.a in (select -e from t1 union select t1.e from t1)),a)*t1.f when 19 then t1.f else 11 end else c end FROM t1 WHERE t1.c>11} +} {89} +do_test randexpr-2.2122 { + db eval {SELECT case t1.f when f then t1.a-case d+t1.d+coalesce((select (select cast(avg( -case when t1.d in (select -min(17)-count(*)+max(f) from t1 union select -count(distinct 13) from t1) then t1.e else a end*a) AS integer) from t1)-t1.d from t1 where -t1.b not in (11, -(t1.b),t1.d) and c in (select b from t1 union select 19 from t1) and t1.a in (select -e from t1 union select t1.e from t1)),a)*t1.f when 19 then t1.f else 11 end else c end FROM t1 WHERE NOT (t1.c>11)} +} {} +do_test randexpr-2.2123 { + db eval {SELECT case when (case when (e not in (e | e,17,e)) then 13 else t1.b end>+t1.b*f-b+t1.d-e) or 19 between b and t1.a then 11 when exists(select 1 from t1 where (t1.d) | t1.d in (case when (e>=(11)) or (t1.a)>=t1.f then c-b when t1.c<>t1.e then t1.a else (17) end,c,13)) then 17 else t1.b end FROM t1 WHERE (select min(case when a not in (coalesce((select 19 from t1 where t1.f<((abs(e)/abs(t1.a+e)) | d)-e),e),t1.f,t1.a) then c else 13 end)-(count(distinct d)-abs(count(distinct t1.c)-cast(avg(d) AS integer) | min(b)+(cast(avg(19) AS integer) | case count(distinct t1.c) when cast(avg( -t1.b) AS integer) then cast(avg(t1.a) AS integer) else max(13) end))) from t1) not between b | a and 19} +} {200} +do_test randexpr-2.2124 { + db eval {SELECT case when (case when (e not in (e | e,17,e)) then 13 else t1.b end>+t1.b*f-b+t1.d-e) or 19 between b and t1.a then 11 when exists(select 1 from t1 where (t1.d) | t1.d in (case when (e>=(11)) or (t1.a)>=t1.f then c-b when t1.c<>t1.e then t1.a else (17) end,c,13)) then 17 else t1.b end FROM t1 WHERE NOT ((select min(case when a not in (coalesce((select 19 from t1 where t1.f<((abs(e)/abs(t1.a+e)) | d)-e),e),t1.f,t1.a) then c else 13 end)-(count(distinct d)-abs(count(distinct t1.c)-cast(avg(d) AS integer) | min(b)+(cast(avg(19) AS integer) | case count(distinct t1.c) when cast(avg( -t1.b) AS integer) then cast(avg(t1.a) AS integer) else max(13) end))) from t1) not between b | a and 19)} +} {} +do_test randexpr-2.2125 { + db eval {SELECT case when (case when (e not in (e & e,17,e)) then 13 else t1.b end>+t1.b*f-b+t1.d-e) or 19 between b and t1.a then 11 when exists(select 1 from t1 where (t1.d) & t1.d in (case when (e>=(11)) or (t1.a)>=t1.f then c-b when t1.c<>t1.e then t1.a else (17) end,c,13)) then 17 else t1.b end FROM t1 WHERE (select min(case when a not in (coalesce((select 19 from t1 where t1.f<((abs(e)/abs(t1.a+e)) | d)-e),e),t1.f,t1.a) then c else 13 end)-(count(distinct d)-abs(count(distinct t1.c)-cast(avg(d) AS integer) | min(b)+(cast(avg(19) AS integer) | case count(distinct t1.c) when cast(avg( -t1.b) AS integer) then cast(avg(t1.a) AS integer) else max(13) end))) from t1) not between b | a and 19} +} {200} +do_test randexpr-2.2126 { + db eval {SELECT case when case when t1.c+13<13 then 11 when (t1.b) not in (t1.b,e,t1.c) then t1.f else -t1.c end- -a | 13 in (select max(a) from t1 union select count(distinct 17)* -case count(*) when count(*) then -((count(distinct 17))) else max(t1.b) end | ( -count(*)) from t1) and e<=17 and t1.c<=d then coalesce((select max(coalesce((select max(e) from t1 where t1.f not in (e,17,t1.b) and d= -t1.b),t1.d)) from t1 where t1.d(f) then t1.e else 17 end else t1.c end-19} +} {200} +do_test randexpr-2.2127 { + db eval {SELECT case when case when t1.c+13<13 then 11 when (t1.b) not in (t1.b,e,t1.c) then t1.f else -t1.c end- -a | 13 in (select max(a) from t1 union select count(distinct 17)* -case count(*) when count(*) then -((count(distinct 17))) else max(t1.b) end | ( -count(*)) from t1) and e<=17 and t1.c<=d then coalesce((select max(coalesce((select max(e) from t1 where t1.f not in (e,17,t1.b) and d= -t1.b),t1.d)) from t1 where t1.d(f) then t1.e else 17 end else t1.c end-19)} +} {} +do_test randexpr-2.2128 { + db eval {SELECT case when case when t1.c+13<13 then 11 when (t1.b) not in (t1.b,e,t1.c) then t1.f else -t1.c end- -a & 13 in (select max(a) from t1 union select count(distinct 17)* -case count(*) when count(*) then -((count(distinct 17))) else max(t1.b) end & ( -count(*)) from t1) and e<=17 and t1.c<=d then coalesce((select max(coalesce((select max(e) from t1 where t1.f not in (e,17,t1.b) and d= -t1.b),t1.d)) from t1 where t1.d(f) then t1.e else 17 end else t1.c end-19} +} {200} +do_test randexpr-2.2129 { + db eval {SELECT coalesce((select case t1.e when coalesce((select max(e- -coalesce((select 17-d-e from t1 where ( -e*(d)>t1.c and 17=(t1.b) or not 11>(a) and b not between ((f)) and t1.d)),t1.c)-f-(f)*b+t1.a) from t1 where f<>13),t1.b) then t1.c else t1.b end from t1 where c in (select t1.a from t1 union select (t1.e) from t1)),e) FROM t1 WHERE (c in (select ~abs(cast(avg(19) AS integer))- -(case (++abs(max( -(select case case ( -abs(+cast(avg(b) AS integer) | count(distinct t1.b*17)*min( -11)*count(distinct t1.e))) when (max( -t1.b)) then count(distinct e) else max(11) end when -min(d) then count(*) else cast(avg(t1.a) AS integer) end from t1))*max(11))) when min(b) then max(19) else (count(*)) end)* -(max(t1.a))-count(*) from t1 union select count(distinct t1.a) from t1))} +} {} +do_test randexpr-2.2130 { + db eval {SELECT coalesce((select case t1.e when coalesce((select max(e- -coalesce((select 17-d-e from t1 where ( -e*(d)>t1.c and 17=(t1.b) or not 11>(a) and b not between ((f)) and t1.d)),t1.c)-f-(f)*b+t1.a) from t1 where f<>13),t1.b) then t1.c else t1.b end from t1 where c in (select t1.a from t1 union select (t1.e) from t1)),e) FROM t1 WHERE NOT ((c in (select ~abs(cast(avg(19) AS integer))- -(case (++abs(max( -(select case case ( -abs(+cast(avg(b) AS integer) | count(distinct t1.b*17)*min( -11)*count(distinct t1.e))) when (max( -t1.b)) then count(distinct e) else max(11) end when -min(d) then count(*) else cast(avg(t1.a) AS integer) end from t1))*max(11))) when min(b) then max(19) else (count(*)) end)* -(max(t1.a))-count(*) from t1 union select count(distinct t1.a) from t1)))} +} {500} +do_test randexpr-2.2131 { + db eval {SELECT t1.e+13-(abs(f)/abs(e | coalesce((select case when (f>=~t1.c*case t1.a when -d then (11) else 17 end*19) then case a when t1.e then b else e end else t1.c end from t1 where e in (select t1.a from t1 union select a from t1) or t1.d not in (f,f,17) and t1.b not between f and 17 and e=c),t1.c)*17+d+t1.d)) FROM t1 WHERE coalesce((select ~f+t1.c*t1.d | t1.d*t1.d-t1.b from t1 where t1.b | t1.a between case when (abs(coalesce((select f from t1 where t1.e between 11-17 and t1.c),e)-t1.d*b)/abs(((d))))-d between t1.b and d then a when -(f) between e and t1.e then t1.a else a end and t1.b),t1.a)-11=d} +} {} +do_test randexpr-2.2132 { + db eval {SELECT t1.e+13-(abs(f)/abs(e | coalesce((select case when (f>=~t1.c*case t1.a when -d then (11) else 17 end*19) then case a when t1.e then b else e end else t1.c end from t1 where e in (select t1.a from t1 union select a from t1) or t1.d not in (f,f,17) and t1.b not between f and 17 and e=c),t1.c)*17+d+t1.d)) FROM t1 WHERE NOT (coalesce((select ~f+t1.c*t1.d | t1.d*t1.d-t1.b from t1 where t1.b | t1.a between case when (abs(coalesce((select f from t1 where t1.e between 11-17 and t1.c),e)-t1.d*b)/abs(((d))))-d between t1.b and d then a when -(f) between e and t1.e then t1.a else a end and t1.b),t1.a)-11=d)} +} {513} +do_test randexpr-2.2133 { + db eval {SELECT t1.e+13-(abs(f)/abs(e & coalesce((select case when (f>=~t1.c*case t1.a when -d then (11) else 17 end*19) then case a when t1.e then b else e end else t1.c end from t1 where e in (select t1.a from t1 union select a from t1) or t1.d not in (f,f,17) and t1.b not between f and 17 and e=c),t1.c)*17+d+t1.d)) FROM t1 WHERE NOT (coalesce((select ~f+t1.c*t1.d | t1.d*t1.d-t1.b from t1 where t1.b | t1.a between case when (abs(coalesce((select f from t1 where t1.e between 11-17 and t1.c),e)-t1.d*b)/abs(((d))))-d between t1.b and d then a when -(f) between e and t1.e then t1.a else a end and t1.b),t1.a)-11=d)} +} {511} +do_test randexpr-2.2134 { + db eval {SELECT +(abs(~+13+(abs(t1.c)/abs(t1.c | coalesce((select t1.a from t1 where (select min(t1.c) from t1)<>(abs(t1.f | coalesce((select c from t1 where b=17),f)-b)/abs(coalesce((select max(d-t1.e) from t1 where not exists(select 1 from t1 where t1.e in (select f from t1 union select t1.d from t1))),19)*e+19)) or t1.a in (select 13 from t1 union select t1.d from t1) and (b)<= -t1.f),11))))/abs(e)) FROM t1 WHERE t1.e<>coalesce((select case 17 when f then coalesce((select max(t1.c) from t1 where coalesce((select coalesce((select ~e from t1 where not 17<=e | 17),11)+t1.d+t1.e from t1 where (e>=f or (not t1.e>=t1.a or e between f and t1.f))),t1.b) not in (13,17,c) or t1.a> -t1.d),19) else t1.c end from t1 where (not 17 not in (t1.b,11,b) and f>=t1.f) or 19<=t1.f),d)-11} +} {0} +do_test randexpr-2.2135 { + db eval {SELECT +(abs(~+13+(abs(t1.c)/abs(t1.c | coalesce((select t1.a from t1 where (select min(t1.c) from t1)<>(abs(t1.f | coalesce((select c from t1 where b=17),f)-b)/abs(coalesce((select max(d-t1.e) from t1 where not exists(select 1 from t1 where t1.e in (select f from t1 union select t1.d from t1))),19)*e+19)) or t1.a in (select 13 from t1 union select t1.d from t1) and (b)<= -t1.f),11))))/abs(e)) FROM t1 WHERE NOT (t1.e<>coalesce((select case 17 when f then coalesce((select max(t1.c) from t1 where coalesce((select coalesce((select ~e from t1 where not 17<=e | 17),11)+t1.d+t1.e from t1 where (e>=f or (not t1.e>=t1.a or e between f and t1.f))),t1.b) not in (13,17,c) or t1.a> -t1.d),19) else t1.c end from t1 where (not 17 not in (t1.b,11,b) and f>=t1.f) or 19<=t1.f),d)-11)} +} {} +do_test randexpr-2.2136 { + db eval {SELECT +(abs(~+13+(abs(t1.c)/abs(t1.c & coalesce((select t1.a from t1 where (select min(t1.c) from t1)<>(abs(t1.f & coalesce((select c from t1 where b=17),f)-b)/abs(coalesce((select max(d-t1.e) from t1 where not exists(select 1 from t1 where t1.e in (select f from t1 union select t1.d from t1))),19)*e+19)) or t1.a in (select 13 from t1 union select t1.d from t1) and (b)<= -t1.f),11))))/abs(e)) FROM t1 WHERE t1.e<>coalesce((select case 17 when f then coalesce((select max(t1.c) from t1 where coalesce((select coalesce((select ~e from t1 where not 17<=e | 17),11)+t1.d+t1.e from t1 where (e>=f or (not t1.e>=t1.a or e between f and t1.f))),t1.b) not in (13,17,c) or t1.a> -t1.d),19) else t1.c end from t1 where (not 17 not in (t1.b,11,b) and f>=t1.f) or 19<=t1.f),d)-11} +} {0} +do_test randexpr-2.2137 { + db eval {SELECT (case when not case when ((e not between d and case when not exists(select 1 from t1 where 11>=(a)*t1.d*11 or -(d) not in (a,11,t1.f)) then f else ~f end or not d<>t1.e or t1.c in (a,t1.d,t1.c))) then case when -b between 11 and b then (select count(distinct 17) from t1) when t1.e<>11 then f else d end else t1.d end<=a then (abs(t1.d)/abs(e)) when t1.d=t1.b and 17=t1.e)) then f else 19 end-t1.a*t1.a-e<=t1.b and 17 in (t1.d,13,t1.b) then c else t1.b end<>t1.b or not exists(select 1 from t1 where 17 between (d) and ( -(t1.d)))} +} {0} +do_test randexpr-2.2138 { + db eval {SELECT (case when not case when ((e not between d and case when not exists(select 1 from t1 where 11>=(a)*t1.d*11 or -(d) not in (a,11,t1.f)) then f else ~f end or not d<>t1.e or t1.c in (a,t1.d,t1.c))) then case when -b between 11 and b then (select count(distinct 17) from t1) when t1.e<>11 then f else d end else t1.d end<=a then (abs(t1.d)/abs(e)) when t1.d=t1.b and 17=t1.e)) then f else 19 end-t1.a*t1.a-e<=t1.b and 17 in (t1.d,13,t1.b) then c else t1.b end<>t1.b or not exists(select 1 from t1 where 17 between (d) and ( -(t1.d))))} +} {} +do_test randexpr-2.2139 { + db eval {SELECT t1.b+case when f+f not in (c+case when t1.f<>d then 11 when f between 11 and t1.d then 11 else 13 end,e,(t1.f)) and not exists(select 1 from t1 where (exists(select 1 from t1 where 19<11)) or 11 in ((t1.d),t1.c,b) and t1.b>13) and t1.b=t1.b then (abs(t1.d)/abs(13)) when a not between t1.c and 19 then t1.f else 19 end+13*17-t1.e FROM t1 WHERE not exists(select 1 from t1 where (exists(select 1 from t1 where t1.c>=19)))} +} {} +do_test randexpr-2.2140 { + db eval {SELECT t1.b+case when f+f not in (c+case when t1.f<>d then 11 when f between 11 and t1.d then 11 else 13 end,e,(t1.f)) and not exists(select 1 from t1 where (exists(select 1 from t1 where 19<11)) or 11 in ((t1.d),t1.c,b) and t1.b>13) and t1.b=t1.b then (abs(t1.d)/abs(13)) when a not between t1.c and 19 then t1.f else 19 end+13*17-t1.e FROM t1 WHERE NOT (not exists(select 1 from t1 where (exists(select 1 from t1 where t1.c>=19))))} +} {-49} +do_test randexpr-2.2141 { + db eval {SELECT (case when f>(select abs((((count(*)-+cast(avg(d) AS integer)))) | count(distinct a)) from t1) then case when (abs(coalesce((select max(t1.d) from t1 where coalesce((select max(case when t1.b between t1.f and a then -f when f<> -b then t1.e else -19 end) from t1 where not exists(select 1 from t1 where t1.c not in (t1.f,t1.c,t1.a))),t1.c)<=b), -t1.c))/abs(t1.a))11} +} {600} +do_test randexpr-2.2142 { + db eval {SELECT (case when f>(select abs((((count(*)-+cast(avg(d) AS integer)))) | count(distinct a)) from t1) then case when (abs(coalesce((select max(t1.d) from t1 where coalesce((select max(case when t1.b between t1.f and a then -f when f<> -b then t1.e else -19 end) from t1 where not exists(select 1 from t1 where t1.c not in (t1.f,t1.c,t1.a))),t1.c)<=b), -t1.c))/abs(t1.a))11)} +} {} +do_test randexpr-2.2143 { + db eval {SELECT (case when f>(select abs((((count(*)-+cast(avg(d) AS integer)))) & count(distinct a)) from t1) then case when (abs(coalesce((select max(t1.d) from t1 where coalesce((select max(case when t1.b between t1.f and a then -f when f<> -b then t1.e else -19 end) from t1 where not exists(select 1 from t1 where t1.c not in (t1.f,t1.c,t1.a))),t1.c)<=b), -t1.c))/abs(t1.a))11} +} {600} +do_test randexpr-2.2144 { + db eval {SELECT (abs(+coalesce((select t1.f from t1 where not exists(select 1 from t1 where (((c)<=t1.a)))),f+(select count(distinct t1.b) from t1)*t1.b+case when case (select count(*) from t1) when coalesce((select max(t1.f-e+13) from t1 where (t1.e>a and 19 not in (13,b,19))),t1.c)+e then t1.b else t1.b end in (select -c from t1 union select c from t1) then f when t1.a>t1.e then t1.c else d end+ -13))/abs((t1.f))) FROM t1 WHERE coalesce((select t1.f from t1 where a>=e),13) between -c and t1.a} +} {1} +do_test randexpr-2.2145 { + db eval {SELECT (abs(+coalesce((select t1.f from t1 where not exists(select 1 from t1 where (((c)<=t1.a)))),f+(select count(distinct t1.b) from t1)*t1.b+case when case (select count(*) from t1) when coalesce((select max(t1.f-e+13) from t1 where (t1.e>a and 19 not in (13,b,19))),t1.c)+e then t1.b else t1.b end in (select -c from t1 union select c from t1) then f when t1.a>t1.e then t1.c else d end+ -13))/abs((t1.f))) FROM t1 WHERE NOT (coalesce((select t1.f from t1 where a>=e),13) between -c and t1.a)} +} {} +do_test randexpr-2.2146 { + db eval {SELECT coalesce((select max(~19) from t1 where t1.a between case t1.c when t1.b then t1.e else +~f end and case when 19 not in (e+b,t1.e-d,(select max(e) from t1)) then t1.a else case 13 when case e when e then t1.f else f end-a+t1.f then 19 else t1.b end+11 end-t1.d-17 or t1.b<>t1.a),13)-t1.b | 11 FROM t1 WHERE (+(17)-(select cast(avg( -((17)*case t1.b when 13+f then t1.f else t1.b end)) AS integer) | case case cast(avg(e) AS integer) when abs((case (~min(t1.c)) when max(t1.e) then cast(avg(t1.b) AS integer) else (( -count(*))) end-max(b)))+(min(t1.c)) then -cast(avg(c) AS integer) else min(f) end when cast(avg(17) AS integer) then max(f) else (min(d)) end from t1)+e+c*19>=13)} +} {-209} +do_test randexpr-2.2147 { + db eval {SELECT coalesce((select max(~19) from t1 where t1.a between case t1.c when t1.b then t1.e else +~f end and case when 19 not in (e+b,t1.e-d,(select max(e) from t1)) then t1.a else case 13 when case e when e then t1.f else f end-a+t1.f then 19 else t1.b end+11 end-t1.d-17 or t1.b<>t1.a),13)-t1.b | 11 FROM t1 WHERE NOT ((+(17)-(select cast(avg( -((17)*case t1.b when 13+f then t1.f else t1.b end)) AS integer) | case case cast(avg(e) AS integer) when abs((case (~min(t1.c)) when max(t1.e) then cast(avg(t1.b) AS integer) else (( -count(*))) end-max(b)))+(min(t1.c)) then -cast(avg(c) AS integer) else min(f) end when cast(avg(17) AS integer) then max(f) else (min(d)) end from t1)+e+c*19>=13))} +} {} +do_test randexpr-2.2148 { + db eval {SELECT coalesce((select max(~19) from t1 where t1.a between case t1.c when t1.b then t1.e else +~f end and case when 19 not in (e+b,t1.e-d,(select max(e) from t1)) then t1.a else case 13 when case e when e then t1.f else f end-a+t1.f then 19 else t1.b end+11 end-t1.d-17 or t1.b<>t1.a),13)-t1.b & 11 FROM t1 WHERE (+(17)-(select cast(avg( -((17)*case t1.b when 13+f then t1.f else t1.b end)) AS integer) | case case cast(avg(e) AS integer) when abs((case (~min(t1.c)) when max(t1.e) then cast(avg(t1.b) AS integer) else (( -count(*))) end-max(b)))+(min(t1.c)) then -cast(avg(c) AS integer) else min(f) end when cast(avg(17) AS integer) then max(f) else (min(d)) end from t1)+e+c*19>=13)} +} {0} +do_test randexpr-2.2149 { + db eval {SELECT coalesce((select max(e) from t1 where 11>=coalesce((select max(b) from t1 where (abs(13)/abs(coalesce((select max(11) from t1 where f not between t1.a and t1.d),t1.d)))=case when 11 in (select (11 | d)-coalesce((select t1.e | coalesce((select t1.e from t1 where t1.f in (select 13 from t1 union select t1.c from t1) and d in (select t1.b from t1 union select 17 from t1)),(c))-17-19 from t1 where (t1.b)<=t1.b),(d)) from t1 union select t1.d from t1) then t1.a else (b) end),c)),19) FROM t1 WHERE ~t1.b not in (~++(select (case ~ -min( -11) when abs(count(*)*count(*)) then +~max(t1.a-11 | (19)) else case abs((cast(avg(f) AS integer)))-count(distinct t1.a) when (( -(max(a)))) then (min(t1.d)) else (min(a)) end end) from t1)+(abs(+t1.b)/abs(a))-t1.f+~t1.d+b+t1.c,13,e)} +} {19} +do_test randexpr-2.2150 { + db eval {SELECT coalesce((select max(e) from t1 where 11>=coalesce((select max(b) from t1 where (abs(13)/abs(coalesce((select max(11) from t1 where f not between t1.a and t1.d),t1.d)))=case when 11 in (select (11 | d)-coalesce((select t1.e | coalesce((select t1.e from t1 where t1.f in (select 13 from t1 union select t1.c from t1) and d in (select t1.b from t1 union select 17 from t1)),(c))-17-19 from t1 where (t1.b)<=t1.b),(d)) from t1 union select t1.d from t1) then t1.a else (b) end),c)),19) FROM t1 WHERE NOT (~t1.b not in (~++(select (case ~ -min( -11) when abs(count(*)*count(*)) then +~max(t1.a-11 | (19)) else case abs((cast(avg(f) AS integer)))-count(distinct t1.a) when (( -(max(a)))) then (min(t1.d)) else (min(a)) end end) from t1)+(abs(+t1.b)/abs(a))-t1.f+~t1.d+b+t1.c,13,e))} +} {} +do_test randexpr-2.2151 { + db eval {SELECT coalesce((select max(e) from t1 where 11>=coalesce((select max(b) from t1 where (abs(13)/abs(coalesce((select max(11) from t1 where f not between t1.a and t1.d),t1.d)))=case when 11 in (select (11 & d)-coalesce((select t1.e & coalesce((select t1.e from t1 where t1.f in (select 13 from t1 union select t1.c from t1) and d in (select t1.b from t1 union select 17 from t1)),(c))-17-19 from t1 where (t1.b)<=t1.b),(d)) from t1 union select t1.d from t1) then t1.a else (b) end),c)),19) FROM t1 WHERE ~t1.b not in (~++(select (case ~ -min( -11) when abs(count(*)*count(*)) then +~max(t1.a-11 | (19)) else case abs((cast(avg(f) AS integer)))-count(distinct t1.a) when (( -(max(a)))) then (min(t1.d)) else (min(a)) end end) from t1)+(abs(+t1.b)/abs(a))-t1.f+~t1.d+b+t1.c,13,e)} +} {19} +do_test randexpr-2.2152 { + db eval {SELECT (abs(+b)/abs(coalesce((select max(t1.b) from t1 where (not t1.a not between t1.c-(abs(b)/abs(d)) and c)),19)+19*t1.e | t1.e))-t1.f*coalesce((select case d when e*t1.d*f then 17 else t1.c+t1.c end from t1 where t1.b>=(select count(*) from t1)),+f+t1.b-t1.b+19- -b) FROM t1 WHERE not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where not (b>=(abs(case when case (select count(*) from t1) when t1.a then a+case t1.f when a+ -t1.a*t1.d then 19 else e end+t1.a+f else -d end>=e then f when t1.b>=t1.d or not dt1.b then t1.e else d end)/abs(f)) or a>e))))} +} {-360000} +do_test randexpr-2.2153 { + db eval {SELECT (abs(+b)/abs(coalesce((select max(t1.b) from t1 where (not t1.a not between t1.c-(abs(b)/abs(d)) and c)),19)+19*t1.e | t1.e))-t1.f*coalesce((select case d when e*t1.d*f then 17 else t1.c+t1.c end from t1 where t1.b>=(select count(*) from t1)),+f+t1.b-t1.b+19- -b) FROM t1 WHERE NOT (not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where not (b>=(abs(case when case (select count(*) from t1) when t1.a then a+case t1.f when a+ -t1.a*t1.d then 19 else e end+t1.a+f else -d end>=e then f when t1.b>=t1.d or not dt1.b then t1.e else d end)/abs(f)) or a>e)))))} +} {} +do_test randexpr-2.2154 { + db eval {SELECT (abs(+b)/abs(coalesce((select max(t1.b) from t1 where (not t1.a not between t1.c-(abs(b)/abs(d)) and c)),19)+19*t1.e & t1.e))-t1.f*coalesce((select case d when e*t1.d*f then 17 else t1.c+t1.c end from t1 where t1.b>=(select count(*) from t1)),+f+t1.b-t1.b+19- -b) FROM t1 WHERE not exists(select 1 from t1 where not exists(select 1 from t1 where not exists(select 1 from t1 where not (b>=(abs(case when case (select count(*) from t1) when t1.a then a+case t1.f when a+ -t1.a*t1.d then 19 else e end+t1.a+f else -d end>=e then f when t1.b>=t1.d or not dt1.b then t1.e else d end)/abs(f)) or a>e))))} +} {-360000} +do_test randexpr-2.2155 { + db eval {SELECT (abs((select +abs(case count(*) when +max(19) then count(distinct b)*count(distinct (13+(select max(t1.d) from t1)* -a+t1.e*t1.b*a))+cast(avg(13) AS integer) | min(13)*cast(avg(c) AS integer)-count(distinct t1.e)-((min(f))) | cast(avg(17) AS integer)-( -cast(avg(( -t1.d)) AS integer))- -cast(avg(t1.d) AS integer) | count(*) else max(t1.f) end) from t1))/abs(t1.a)) FROM t1 WHERE b<=e or exists(select 1 from t1 where 17 in (select ~e+coalesce((select d from t1 where 11* -~+case when not (b) not between t1.f and b then -~t1.c-11*+19+((b)) when f not between f and 11 then b else a end+11-(b)-t1.f<>17),d) from t1 union select t1.a from t1))} +} {6} +do_test randexpr-2.2156 { + db eval {SELECT (abs((select +abs(case count(*) when +max(19) then count(distinct b)*count(distinct (13+(select max(t1.d) from t1)* -a+t1.e*t1.b*a))+cast(avg(13) AS integer) | min(13)*cast(avg(c) AS integer)-count(distinct t1.e)-((min(f))) | cast(avg(17) AS integer)-( -cast(avg(( -t1.d)) AS integer))- -cast(avg(t1.d) AS integer) | count(*) else max(t1.f) end) from t1))/abs(t1.a)) FROM t1 WHERE NOT (b<=e or exists(select 1 from t1 where 17 in (select ~e+coalesce((select d from t1 where 11* -~+case when not (b) not between t1.f and b then -~t1.c-11*+19+((b)) when f not between f and 11 then b else a end+11-(b)-t1.f<>17),d) from t1 union select t1.a from t1)))} +} {} +do_test randexpr-2.2157 { + db eval {SELECT (abs((select +abs(case count(*) when +max(19) then count(distinct b)*count(distinct (13+(select max(t1.d) from t1)* -a+t1.e*t1.b*a))+cast(avg(13) AS integer) & min(13)*cast(avg(c) AS integer)-count(distinct t1.e)-((min(f))) & cast(avg(17) AS integer)-( -cast(avg(( -t1.d)) AS integer))- -cast(avg(t1.d) AS integer) & count(*) else max(t1.f) end) from t1))/abs(t1.a)) FROM t1 WHERE b<=e or exists(select 1 from t1 where 17 in (select ~e+coalesce((select d from t1 where 11* -~+case when not (b) not between t1.f and b then -~t1.c-11*+19+((b)) when f not between f and 11 then b else a end+11-(b)-t1.f<>17),d) from t1 union select t1.a from t1))} +} {6} +do_test randexpr-2.2158 { + db eval {SELECT coalesce((select max(11) from t1 where c=(abs(coalesce((select case when t1.c-t1.f+coalesce((select max(b) from t1 where case when (11<>17) then t1.c else c end in (select b from t1 union select t1.b from t1) and 13=11),f)+t1.b>=t1.f then 11 when c>t1.f then 19 else 19 end from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (e>19)))),t1.d))/abs(b)) and -t1.d between t1.b and 13 or 17<>t1.e), -((11))) FROM t1 WHERE case when case when (not exists(select 1 from t1 where 19 not between case when coalesce((select b from t1 where (abs(t1.c)/abs(17))+t1.e between case when not exists(select 1 from t1 where not a<=t1.e) then t1.e when d17) then t1.c else c end in (select b from t1 union select t1.b from t1) and 13=11),f)+t1.b>=t1.f then 11 when c>t1.f then 19 else 19 end from t1 where exists(select 1 from t1 where not exists(select 1 from t1 where (e>19)))),t1.d))/abs(b)) and -t1.d between t1.b and 13 or 17<>t1.e), -((11))) FROM t1 WHERE NOT (case when case when (not exists(select 1 from t1 where 19 not between case when coalesce((select b from t1 where (abs(t1.c)/abs(17))+t1.e between case when not exists(select 1 from t1 where not a<=t1.e) then t1.e when d(t1.c)} +} {-2600247} +do_test randexpr-2.2161 { + db eval {SELECT coalesce((select ( -t1.e*(abs((case when not 13 not in (case coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where d between -b and f)),11) when 11 then 19 else a end,t1.a,t1.a) then f-t1.f when t1.c in (select -abs(count(*)+(max((t1.a)))) from t1 union select -max((19)) from t1) then d else 13 end-19))/abs(t1.b))-t1.d*e-19) from t1 where t1.e not between 13 and c),19)*13 FROM t1 WHERE NOT (19<>(t1.c))} +} {} +do_test randexpr-2.2162 { + db eval {SELECT coalesce((select max(13) from t1 where f= -case when exists(select 1 from t1 where not t1.b>=e and b in (select b from t1 union select (+(t1.f-d)) from t1) or 19 in (select min(e) from t1 union select -~+++ -abs(min(17*t1.d)) from t1) or not t1.a>a) then 13 else t1.c end+a | ~11),19) FROM t1 WHERE (exists(select 1 from t1 where t1.c>19)) and a>d-a} +} {} +do_test randexpr-2.2163 { + db eval {SELECT coalesce((select max(13) from t1 where f= -case when exists(select 1 from t1 where not t1.b>=e and b in (select b from t1 union select (+(t1.f-d)) from t1) or 19 in (select min(e) from t1 union select -~+++ -abs(min(17*t1.d)) from t1) or not t1.a>a) then 13 else t1.c end+a | ~11),19) FROM t1 WHERE NOT ((exists(select 1 from t1 where t1.c>19)) and a>d-a)} +} {19} +do_test randexpr-2.2164 { + db eval {SELECT coalesce((select max(13) from t1 where f= -case when exists(select 1 from t1 where not t1.b>=e and b in (select b from t1 union select (+(t1.f-d)) from t1) or 19 in (select min(e) from t1 union select -~+++ -abs(min(17*t1.d)) from t1) or not t1.a>a) then 13 else t1.c end+a & ~11),19) FROM t1 WHERE NOT ((exists(select 1 from t1 where t1.c>19)) and a>d-a)} +} {19} +do_test randexpr-2.2165 { + db eval {SELECT coalesce((select case when a between (select count(distinct 19) from t1) and case when de then b*t1.e else a end) from t1 where t1.e=t1.e),t1.f) from t1 where (d not between 13 and (f))),17)-d) from t1 where (11<>19)),t1.e)+t1.c)/abs(t1.e))*11*c-t1.d*t1.d+a end FROM t1 WHERE 13-e*t1.e=19),(abs(e)/abs((abs( -d+e)/abs(a))))+t1.a) from t1 where t1.b in (t1.e,(t1.e),t1.f)),f))=t1.e)) or b<=19),t1.b)<=13) then 13 else t1.f end-f} +} {-159900} +do_test randexpr-2.2168 { + db eval {SELECT case 17 when -b then e*c else (abs(coalesce((select max(coalesce((select coalesce((select max(case when t1.d<=19 and t1.d>e then b*t1.e else a end) from t1 where t1.e=t1.e),t1.f) from t1 where (d not between 13 and (f))),17)-d) from t1 where (11<>19)),t1.e)+t1.c)/abs(t1.e))*11*c-t1.d*t1.d+a end FROM t1 WHERE NOT (13-e*t1.e=19),(abs(e)/abs((abs( -d+e)/abs(a))))+t1.a) from t1 where t1.b in (t1.e,(t1.e),t1.f)),f))=t1.e)) or b<=19),t1.b)<=13) then 13 else t1.f end-f)} +} {} +do_test randexpr-2.2169 { + db eval {SELECT case when case when e*(d-case a when 13 then (19) else d end) between 13 and f then f when exists(select 1 from t1 where 19<>t1.a) then e else b end in (select -max(e)+min(11)-case ~~cast(avg(t1.c) AS integer) when max(19) then max(f) else -cast(avg(f) AS integer) end from t1 union select -max(13) from t1) and f>d and (t1.f=c) then t1.b else t1.c+ -t1.f end FROM t1 WHERE t1.e+11>=~t1.a-11} +} {-300} +do_test randexpr-2.2170 { + db eval {SELECT case when case when e*(d-case a when 13 then (19) else d end) between 13 and f then f when exists(select 1 from t1 where 19<>t1.a) then e else b end in (select -max(e)+min(11)-case ~~cast(avg(t1.c) AS integer) when max(19) then max(f) else -cast(avg(f) AS integer) end from t1 union select -max(13) from t1) and f>d and (t1.f=c) then t1.b else t1.c+ -t1.f end FROM t1 WHERE NOT (t1.e+11>=~t1.a-11)} +} {} +do_test randexpr-2.2171 { + db eval {SELECT coalesce((select t1.f from t1 where not (abs(coalesce((select 17*13+(select cast(avg(t1.a) AS integer)-(count(distinct t1.c)*+cast(avg((select count(*) from t1)) AS integer))+count(*) from t1) from t1 where (select count(distinct t1.d*(((t1.d)))+f) from t1) in ( -b*t1.e-11,t1.d,d) and exists(select 1 from t1 where t1.d>c)),t1.e))/abs(b)) not in (t1.f,11,d)),t1.d) FROM t1 WHERE exists(select 1 from t1 where +t1.c-case when exists(select 1 from t1 where (select +case count(distinct case when 19*d in (select (cast(avg(11) AS integer)*cast(avg( -13) AS integer)) from t1 union select count(*) from t1) then t1.c else 13 end) when count(*) then count(*) else (min(t1.b)) end from t1)<>case when (not t1.ft1.d then case when f>=17 then t1.a else t1.e end else b end) then 11+t1.f when d<>17 then t1.e else a end- -13<>e) and t1.a<=17} +} {} +do_test randexpr-2.2172 { + db eval {SELECT coalesce((select t1.f from t1 where not (abs(coalesce((select 17*13+(select cast(avg(t1.a) AS integer)-(count(distinct t1.c)*+cast(avg((select count(*) from t1)) AS integer))+count(*) from t1) from t1 where (select count(distinct t1.d*(((t1.d)))+f) from t1) in ( -b*t1.e-11,t1.d,d) and exists(select 1 from t1 where t1.d>c)),t1.e))/abs(b)) not in (t1.f,11,d)),t1.d) FROM t1 WHERE NOT (exists(select 1 from t1 where +t1.c-case when exists(select 1 from t1 where (select +case count(distinct case when 19*d in (select (cast(avg(11) AS integer)*cast(avg( -13) AS integer)) from t1 union select count(*) from t1) then t1.c else 13 end) when count(*) then count(*) else (min(t1.b)) end from t1)<>case when (not t1.ft1.d then case when f>=17 then t1.a else t1.e end else b end) then 11+t1.f when d<>17 then t1.e else a end- -13<>e) and t1.a<=17)} +} {400} +do_test randexpr-2.2173 { + db eval {SELECT case when not (select - -count(distinct t1.f)-(max(case when e not between f and t1.b+(13)+ -d-t1.a then t1.d else 11 end))+ -max(t1.b)*cast(avg(13) AS integer)-count(*) | (max(t1.e)) from t1)<>~t1.d then b when -coalesce((select (abs(t1.e)/abs(t1.b)) from t1 where a<>b), - -17)<=t1.a then -f else 11 end FROM t1 WHERE t1.a<>(select max(19) from t1)} +} {-600} +do_test randexpr-2.2174 { + db eval {SELECT case when not (select - -count(distinct t1.f)-(max(case when e not between f and t1.b+(13)+ -d-t1.a then t1.d else 11 end))+ -max(t1.b)*cast(avg(13) AS integer)-count(*) | (max(t1.e)) from t1)<>~t1.d then b when -coalesce((select (abs(t1.e)/abs(t1.b)) from t1 where a<>b), - -17)<=t1.a then -f else 11 end FROM t1 WHERE NOT (t1.a<>(select max(19) from t1))} +} {} +do_test randexpr-2.2175 { + db eval {SELECT case when not (select - -count(distinct t1.f)-(max(case when e not between f and t1.b+(13)+ -d-t1.a then t1.d else 11 end))+ -max(t1.b)*cast(avg(13) AS integer)-count(*) & (max(t1.e)) from t1)<>~t1.d then b when -coalesce((select (abs(t1.e)/abs(t1.b)) from t1 where a<>b), - -17)<=t1.a then -f else 11 end FROM t1 WHERE t1.a<>(select max(19) from t1)} +} {-600} +do_test randexpr-2.2176 { + db eval {SELECT case when case coalesce((select t1.d+a*11 from t1 where e<=t1.d),19) when 19 then -t1.d-t1.a else t1.e end not between 17 and d then t1.f when not (t1.c<=e and ((13<>e or t1.c not between t1.c and 13)) or t1.c=17 and t1.a not in (t1.e, -t1.d,t1.b) and b=b then t1.d else 13 end FROM t1 WHERE (case t1.f when t1.b then case when case when not exists(select 1 from t1 where (t1.d+t1.a in (c,coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where (t1.a>=c))),+b), -t1.b))) then (select abs( -case ((abs( - - -count(*)+count(*)))) when min(d) then -(cast(avg(t1.d) AS integer)) else cast(avg(e) AS integer) end | ((max((t1.f)))))+(min(t1.c)) from t1) else 11 end>f then 19 else t1.b end else b end)<=13 or (t1.f>b)} +} {600} +do_test randexpr-2.2177 { + db eval {SELECT case when case coalesce((select t1.d+a*11 from t1 where e<=t1.d),19) when 19 then -t1.d-t1.a else t1.e end not between 17 and d then t1.f when not (t1.c<=e and ((13<>e or t1.c not between t1.c and 13)) or t1.c=17 and t1.a not in (t1.e, -t1.d,t1.b) and b=b then t1.d else 13 end FROM t1 WHERE NOT ((case t1.f when t1.b then case when case when not exists(select 1 from t1 where (t1.d+t1.a in (c,coalesce((select max(t1.c) from t1 where exists(select 1 from t1 where (t1.a>=c))),+b), -t1.b))) then (select abs( -case ((abs( - - -count(*)+count(*)))) when min(d) then -(cast(avg(t1.d) AS integer)) else cast(avg(e) AS integer) end | ((max((t1.f)))))+(min(t1.c)) from t1) else 11 end>f then 19 else t1.b end else b end)<=13 or (t1.f>b))} +} {} +do_test randexpr-2.2178 { + db eval {SELECT (((d)+t1.f))-case +t1.d when coalesce((select max(13) from t1 where (select -max(17*case when d>=coalesce((select 19 from t1 where a<>t1.e),t1.b) then e when -c<>d then e else t1.a end*t1.f)+ -case count(*) when (+cast(avg(c) AS integer)) then count(*) else -count(distinct t1.f) end+count(*) from t1)<=(abs(t1.c)/abs(t1.f))),17) then 17 else t1.d end-t1.b FROM t1 WHERE ~d-c in (select cast(avg(19) AS integer) from t1 union select - -abs((abs((cast(avg(f) AS integer)) | abs(count(distinct 19))))) from t1)} +} {} +do_test randexpr-2.2179 { + db eval {SELECT (((d)+t1.f))-case +t1.d when coalesce((select max(13) from t1 where (select -max(17*case when d>=coalesce((select 19 from t1 where a<>t1.e),t1.b) then e when -c<>d then e else t1.a end*t1.f)+ -case count(*) when (+cast(avg(c) AS integer)) then count(*) else -count(distinct t1.f) end+count(*) from t1)<=(abs(t1.c)/abs(t1.f))),17) then 17 else t1.d end-t1.b FROM t1 WHERE NOT (~d-c in (select cast(avg(19) AS integer) from t1 union select - -abs((abs((cast(avg(f) AS integer)) | abs(count(distinct 19))))) from t1))} +} {400} +do_test randexpr-2.2180 { + db eval {SELECT coalesce((select max(f+t1.c*b) from t1 where (select abs(cast(avg(f) AS integer)) from t1)-c*~ -b+ -coalesce((select max(t1.a) from t1 where (select cast(avg((c) | ~t1.d) AS integer) from t1) not between t1.d-+(abs(t1.d)/abs(t1.c))+d and 11 and 17>=t1.e),t1.e)-b in (t1.c,d,e)),a)+c FROM t1 WHERE 19*e not in (a,t1.d,coalesce((select coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where not exists(select 1 from t1 where e*case when ~c | f+11* -a in ( -t1.c,17,(t1.b)) then (f) when e=e or t1.d=f and (e)>(e) then (11) else a end+a*c in (select d from t1 union select -11 from t1))))),t1.f) from t1 where t1.d<=d),13))} +} {400} +do_test randexpr-2.2181 { + db eval {SELECT coalesce((select max(f+t1.c*b) from t1 where (select abs(cast(avg(f) AS integer)) from t1)-c*~ -b+ -coalesce((select max(t1.a) from t1 where (select cast(avg((c) | ~t1.d) AS integer) from t1) not between t1.d-+(abs(t1.d)/abs(t1.c))+d and 11 and 17>=t1.e),t1.e)-b in (t1.c,d,e)),a)+c FROM t1 WHERE NOT (19*e not in (a,t1.d,coalesce((select coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where not exists(select 1 from t1 where e*case when ~c | f+11* -a in ( -t1.c,17,(t1.b)) then (f) when e=e or t1.d=f and (e)>(e) then (11) else a end+a*c in (select d from t1 union select -11 from t1))))),t1.f) from t1 where t1.d<=d),13)))} +} {} +do_test randexpr-2.2182 { + db eval {SELECT coalesce((select max(f+t1.c*b) from t1 where (select abs(cast(avg(f) AS integer)) from t1)-c*~ -b+ -coalesce((select max(t1.a) from t1 where (select cast(avg((c) & ~t1.d) AS integer) from t1) not between t1.d-+(abs(t1.d)/abs(t1.c))+d and 11 and 17>=t1.e),t1.e)-b in (t1.c,d,e)),a)+c FROM t1 WHERE 19*e not in (a,t1.d,coalesce((select coalesce((select max(t1.d) from t1 where (exists(select 1 from t1 where not exists(select 1 from t1 where e*case when ~c | f+11* -a in ( -t1.c,17,(t1.b)) then (f) when e=e or t1.d=f and (e)>(e) then (11) else a end+a*c in (select d from t1 union select -11 from t1))))),t1.f) from t1 where t1.d<=d),13))} +} {400} +do_test randexpr-2.2183 { + db eval {SELECT c*~case ((abs(t1.b)/abs(t1.b))) when c then e else t1.c end-(case when (c)>=case coalesce((select max((abs(d)/abs(c))) from t1 where 13 not in (a-t1.e-(select count(*) | min(e)+min(c) from t1)*19*t1.b,11,a)),e) when e then b else t1.d end then d when 17<11 then c else t1.d end) | t1.f FROM t1 WHERE case when not (abs(t1.d | (abs( - -coalesce((select max(coalesce((select c+e from t1 where not t1.b in ((c),b,(f))),t1.b)) from t1 where 11=b),t1.f)+13*t1.f*e)/abs(t1.e)))/abs(t1.b))>=c then coalesce((select (t1.c) from t1 where t1.e in (11,e,t1.a)),a) when 13<=t1.b then t1.f else t1.d end in (select e from t1 union select (t1.d) from t1)} +} {} +do_test randexpr-2.2184 { + db eval {SELECT c*~case ((abs(t1.b)/abs(t1.b))) when c then e else t1.c end-(case when (c)>=case coalesce((select max((abs(d)/abs(c))) from t1 where 13 not in (a-t1.e-(select count(*) | min(e)+min(c) from t1)*19*t1.b,11,a)),e) when e then b else t1.d end then d when 17<11 then c else t1.d end) | t1.f FROM t1 WHERE NOT (case when not (abs(t1.d | (abs( - -coalesce((select max(coalesce((select c+e from t1 where not t1.b in ((c),b,(f))),t1.b)) from t1 where 11=b),t1.f)+13*t1.f*e)/abs(t1.e)))/abs(t1.b))>=c then coalesce((select (t1.c) from t1 where t1.e in (11,e,t1.a)),a) when 13<=t1.b then t1.f else t1.d end in (select e from t1 union select (t1.d) from t1))} +} {-90116} +do_test randexpr-2.2185 { + db eval {SELECT c*~case ((abs(t1.b)/abs(t1.b))) when c then e else t1.c end-(case when (c)>=case coalesce((select max((abs(d)/abs(c))) from t1 where 13 not in (a-t1.e-(select count(*) & min(e)+min(c) from t1)*19*t1.b,11,a)),e) when e then b else t1.d end then d when 17<11 then c else t1.d end) & t1.f FROM t1 WHERE NOT (case when not (abs(t1.d | (abs( - -coalesce((select max(coalesce((select c+e from t1 where not t1.b in ((c),b,(f))),t1.b)) from t1 where 11=b),t1.f)+13*t1.f*e)/abs(t1.e)))/abs(t1.b))>=c then coalesce((select (t1.c) from t1 where t1.e in (11,e,t1.a)),a) when 13<=t1.b then t1.f else t1.d end in (select e from t1 union select (t1.d) from t1))} +} {16} +do_test randexpr-2.2186 { + db eval {SELECT case when a+13*+ -~case when 17=t1.c*17 then 19 | (t1.b) when f+t1.e+(d)+b+c not between (d) and e or (a between t1.b and t1.a) then 17 else a end-17-(e)<=17 then b when c in (select 17 from t1 union select t1.a from t1) then c else t1.e end FROM t1 WHERE e-coalesce((select f-case 17+t1.a*(abs(t1.e)/abs((abs(coalesce((select max(+ -case when t1.a>=d then c else t1.e end-d) from t1 where (not exists(select 1 from t1 where (t1.d in ( -f,d,t1.b))) or -t1.f not between -b and t1.b)),11))/abs(13)))) when 13 then d else t1.f end from t1 where t1.e in ((19),t1.e,17) or e>=t1.d),e)<>17 or 19<=19} +} {200} +do_test randexpr-2.2187 { + db eval {SELECT case when a+13*+ -~case when 17=t1.c*17 then 19 | (t1.b) when f+t1.e+(d)+b+c not between (d) and e or (a between t1.b and t1.a) then 17 else a end-17-(e)<=17 then b when c in (select 17 from t1 union select t1.a from t1) then c else t1.e end FROM t1 WHERE NOT (e-coalesce((select f-case 17+t1.a*(abs(t1.e)/abs((abs(coalesce((select max(+ -case when t1.a>=d then c else t1.e end-d) from t1 where (not exists(select 1 from t1 where (t1.d in ( -f,d,t1.b))) or -t1.f not between -b and t1.b)),11))/abs(13)))) when 13 then d else t1.f end from t1 where t1.e in ((19),t1.e,17) or e>=t1.d),e)<>17 or 19<=19)} +} {} +do_test randexpr-2.2188 { + db eval {SELECT case when a+13*+ -~case when 17=t1.c*17 then 19 & (t1.b) when f+t1.e+(d)+b+c not between (d) and e or (a between t1.b and t1.a) then 17 else a end-17-(e)<=17 then b when c in (select 17 from t1 union select t1.a from t1) then c else t1.e end FROM t1 WHERE e-coalesce((select f-case 17+t1.a*(abs(t1.e)/abs((abs(coalesce((select max(+ -case when t1.a>=d then c else t1.e end-d) from t1 where (not exists(select 1 from t1 where (t1.d in ( -f,d,t1.b))) or -t1.f not between -b and t1.b)),11))/abs(13)))) when 13 then d else t1.f end from t1 where t1.e in ((19),t1.e,17) or e>=t1.d),e)<>17 or 19<=19} +} {200} +do_test randexpr-2.2189 { + db eval {SELECT 17-t1.a*(select abs(~count(*)*abs(count(distinct a)) | (~count(distinct case (a* -coalesce((select a from t1 where (13 in (select max((abs(t1.e)/abs(case when not exists(select 1 from t1 where (b<>c)) then (11)-t1.f-e else e end))) from t1 union select (count(distinct 11)) from t1))),t1.b)+11) when (17) then e else f end))) from t1) FROM t1 WHERE 13>case (e) when t1.a then t1.a else 11 end-t1.b} +} {-183} +do_test randexpr-2.2190 { + db eval {SELECT 17-t1.a*(select abs(~count(*)*abs(count(distinct a)) | (~count(distinct case (a* -coalesce((select a from t1 where (13 in (select max((abs(t1.e)/abs(case when not exists(select 1 from t1 where (b<>c)) then (11)-t1.f-e else e end))) from t1 union select (count(distinct 11)) from t1))),t1.b)+11) when (17) then e else f end))) from t1) FROM t1 WHERE NOT (13>case (e) when t1.a then t1.a else 11 end-t1.b)} +} {} +do_test randexpr-2.2191 { + db eval {SELECT 17-t1.a*(select abs(~count(*)*abs(count(distinct a)) & (~count(distinct case (a* -coalesce((select a from t1 where (13 in (select max((abs(t1.e)/abs(case when not exists(select 1 from t1 where (b<>c)) then (11)-t1.f-e else e end))) from t1 union select (count(distinct 11)) from t1))),t1.b)+11) when (17) then e else f end))) from t1) FROM t1 WHERE 13>case (e) when t1.a then t1.a else 11 end-t1.b} +} {-183} +do_test randexpr-2.2192 { + db eval {SELECT case when t1.b= -b then e else (select -(count(distinct t1.a))*max(c)-count(*)+count(distinct t1.b+d)*min(t1.f-case when 19 in (select t1.d from t1 union select e from t1) then 17 when f<=t1.c then t1.b else e end-(d)*c)-count(distinct t1.d)*case +max(f) when max(17) | max(d) | -(cast(avg(d) AS integer)) then -cast(avg(11) AS integer) else -count(distinct 11) end*min(t1.d) from t1) end FROM t1 WHERE t1.f>=f} +} {-119801} +do_test randexpr-2.2193 { + db eval {SELECT case when t1.b= -b then e else (select -(count(distinct t1.a))*max(c)-count(*)+count(distinct t1.b+d)*min(t1.f-case when 19 in (select t1.d from t1 union select e from t1) then 17 when f<=t1.c then t1.b else e end-(d)*c)-count(distinct t1.d)*case +max(f) when max(17) | max(d) | -(cast(avg(d) AS integer)) then -cast(avg(11) AS integer) else -count(distinct 11) end*min(t1.d) from t1) end FROM t1 WHERE NOT (t1.f>=f)} +} {} +do_test randexpr-2.2194 { + db eval {SELECT case when t1.b= -b then e else (select -(count(distinct t1.a))*max(c)-count(*)+count(distinct t1.b+d)*min(t1.f-case when 19 in (select t1.d from t1 union select e from t1) then 17 when f<=t1.c then t1.b else e end-(d)*c)-count(distinct t1.d)*case +max(f) when max(17) & max(d) & -(cast(avg(d) AS integer)) then -cast(avg(11) AS integer) else -count(distinct 11) end*min(t1.d) from t1) end FROM t1 WHERE t1.f>=f} +} {-119801} +do_test randexpr-2.2195 { + db eval {SELECT -~f | (abs(13+t1.b)/abs((select max(t1.b | t1.f+ -17-11) | +case min(e) when count(*) | case (~ -min(c)*cast(avg(19) AS integer)) when -count(*) then min(a) else min(13) end then -((max(t1.c))) else max(e) end-(count(distinct -t1.a)) from t1)))+~(abs((abs(case when a<>19 then t1.a when f>17 then a else b end)/abs(t1.b)))/abs(t1.f)) FROM t1 WHERE (17 between t1.f*(abs(b)/abs((abs((select max(17) from t1) | 17)/abs(13))+c-coalesce((select max(d) from t1 where d between f-a-11-d and a+t1.a+d-t1.b+17+13),case when a not between a and t1.b then t1.d when t1.d not in ((e),a,d) then c else b end*d))) and b and t1.e> -17)} +} {} +do_test randexpr-2.2196 { + db eval {SELECT -~f | (abs(13+t1.b)/abs((select max(t1.b | t1.f+ -17-11) | +case min(e) when count(*) | case (~ -min(c)*cast(avg(19) AS integer)) when -count(*) then min(a) else min(13) end then -((max(t1.c))) else max(e) end-(count(distinct -t1.a)) from t1)))+~(abs((abs(case when a<>19 then t1.a when f>17 then a else b end)/abs(t1.b)))/abs(t1.f)) FROM t1 WHERE NOT ((17 between t1.f*(abs(b)/abs((abs((select max(17) from t1) | 17)/abs(13))+c-coalesce((select max(d) from t1 where d between f-a-11-d and a+t1.a+d-t1.b+17+13),case when a not between a and t1.b then t1.d when t1.d not in ((e),a,d) then c else b end*d))) and b and t1.e> -17))} +} {-1} +do_test randexpr-2.2197 { + db eval {SELECT (select abs(abs( -case case ~count(distinct t1.f+(t1.f)+coalesce((select (abs(t1.e+(b)+a)/abs(11))+b from t1 where exists(select 1 from t1 where t1.f not in (t1.b,(b),e) and not (t1.b)>c)),f)) when ~count(*)-min(t1.f) then +(count(*))*( -count(*)-max(t1.a)) else min(11) end when count(distinct (f)) then -count(distinct a) else count(*) end | count(distinct 13))) from t1) FROM t1 WHERE exists(select 1 from t1 where case when t1.f>(abs(b)/abs(a)) then ~case when t1.e+13-b<=(coalesce((select max(11) from t1 where (select abs(min(11)) from t1) in (select case d when f then 17 else t1.f end from t1 union select t1.f from t1)),b)*13)-19 then 11 when not 19 between t1.c and t1.b then a else e end-t1.a when (t1.a) not between b and t1.f then t1.a else c end=t1.e)} +} {} +do_test randexpr-2.2198 { + db eval {SELECT (select abs(abs( -case case ~count(distinct t1.f+(t1.f)+coalesce((select (abs(t1.e+(b)+a)/abs(11))+b from t1 where exists(select 1 from t1 where t1.f not in (t1.b,(b),e) and not (t1.b)>c)),f)) when ~count(*)-min(t1.f) then +(count(*))*( -count(*)-max(t1.a)) else min(11) end when count(distinct (f)) then -count(distinct a) else count(*) end | count(distinct 13))) from t1) FROM t1 WHERE NOT (exists(select 1 from t1 where case when t1.f>(abs(b)/abs(a)) then ~case when t1.e+13-b<=(coalesce((select max(11) from t1 where (select abs(min(11)) from t1) in (select case d when f then 17 else t1.f end from t1 union select t1.f from t1)),b)*13)-19 then 11 when not 19 between t1.c and t1.b then a else e end-t1.a when (t1.a) not between b and t1.f then t1.a else c end=t1.e))} +} {1} +do_test randexpr-2.2199 { + db eval {SELECT (select abs(abs( -case case ~count(distinct t1.f+(t1.f)+coalesce((select (abs(t1.e+(b)+a)/abs(11))+b from t1 where exists(select 1 from t1 where t1.f not in (t1.b,(b),e) and not (t1.b)>c)),f)) when ~count(*)-min(t1.f) then +(count(*))*( -count(*)-max(t1.a)) else min(11) end when count(distinct (f)) then -count(distinct a) else count(*) end & count(distinct 13))) from t1) FROM t1 WHERE NOT (exists(select 1 from t1 where case when t1.f>(abs(b)/abs(a)) then ~case when t1.e+13-b<=(coalesce((select max(11) from t1 where (select abs(min(11)) from t1) in (select case d when f then 17 else t1.f end from t1 union select t1.f from t1)),b)*13)-19 then 11 when not 19 between t1.c and t1.b then a else e end-t1.a when (t1.a) not between b and t1.f then t1.a else c end=t1.e))} +} {1} +do_test randexpr-2.2200 { + db eval {SELECT 19 | t1.a*(select case max(case when t1.b*17>=t1.b then coalesce((select max(e) from t1 where e in (select count(*) from t1 union select count(distinct t1.f) from t1)),d) when t1.a in (select min(11) from t1 union select (( -( -max(t1.f)))) from t1) and 17 in (select count(distinct 17) from t1 union select -count(distinct d) from t1) and (d)<>t1.c and t1.c not between b and b then a else 11 end)+cast(avg(t1.a) AS integer) when ~count(distinct t1.e) then count(distinct d) else count(distinct e) end from t1) | e*t1.b-t1.d FROM t1 WHERE (((abs(19)/abs(case ~c*d+(abs(e)/abs((coalesce((select case when (f in (select count(*) from t1 union select -min(d) from t1)) then case when -t1.f between t1.c and t1.f then b when 11<>a then t1.a else t1.c end when 13=13 then 13 else t1.a end from t1 where 19 not between 11 and t1.b or t1.b not in ((e),(b),c)),a)- -((t1.c))-c)))+t1.b+17 when 13 then ((13)) else t1.f end*t1.b)) in ( -17,t1.a,11)))} +} {} +do_test randexpr-2.2201 { + db eval {SELECT 19 | t1.a*(select case max(case when t1.b*17>=t1.b then coalesce((select max(e) from t1 where e in (select count(*) from t1 union select count(distinct t1.f) from t1)),d) when t1.a in (select min(11) from t1 union select (( -( -max(t1.f)))) from t1) and 17 in (select count(distinct 17) from t1 union select -count(distinct d) from t1) and (d)<>t1.c and t1.c not between b and b then a else 11 end)+cast(avg(t1.a) AS integer) when ~count(distinct t1.e) then count(distinct d) else count(distinct e) end from t1) | e*t1.b-t1.d FROM t1 WHERE NOT ((((abs(19)/abs(case ~c*d+(abs(e)/abs((coalesce((select case when (f in (select count(*) from t1 union select -min(d) from t1)) then case when -t1.f between t1.c and t1.f then b when 11<>a then t1.a else t1.c end when 13=13 then 13 else t1.a end from t1 where 19 not between 11 and t1.b or t1.b not in ((e),(b),c)),a)- -((t1.c))-c)))+t1.b+17 when 13 then ((13)) else t1.f end*t1.b)) in ( -17,t1.a,11))))} +} {99703} +do_test randexpr-2.2202 { + db eval {SELECT 19 & t1.a*(select case max(case when t1.b*17>=t1.b then coalesce((select max(e) from t1 where e in (select count(*) from t1 union select count(distinct t1.f) from t1)),d) when t1.a in (select min(11) from t1 union select (( -( -max(t1.f)))) from t1) and 17 in (select count(distinct 17) from t1 union select -count(distinct d) from t1) and (d)<>t1.c and t1.c not between b and b then a else 11 end)+cast(avg(t1.a) AS integer) when ~count(distinct t1.e) then count(distinct d) else count(distinct e) end from t1) & e*t1.b-t1.d FROM t1 WHERE NOT ((((abs(19)/abs(case ~c*d+(abs(e)/abs((coalesce((select case when (f in (select count(*) from t1 union select -min(d) from t1)) then case when -t1.f between t1.c and t1.f then b when 11<>a then t1.a else t1.c end when 13=13 then 13 else t1.a end from t1 where 19 not between 11 and t1.b or t1.b not in ((e),(b),c)),a)- -((t1.c))-c)))+t1.b+17 when 13 then ((13)) else t1.f end*t1.b)) in ( -17,t1.a,11))))} +} {0} +do_test randexpr-2.2203 { + db eval {SELECT c-coalesce((select -coalesce((select max(case when (abs(~~case when case when t1.e<=e and t1.f>t1.d then b when t1.a<>t1.a then 17 else 17 end+t1.c>=t1.a then (c) when c not in (f,e,t1.c) then t1.d else 13 end)/abs(t1.a))c),t1.a)*19 FROM t1 WHERE (17) | case when 19 not in ((case when (e*19=f) then case t1.f when a then 13 else t1.d end when bt1.b) then f else a end in (select c from t1 union select -13 from t1) or 17 in (select 19 from t1 union select d from t1) and t1.b in (c,b,19) and t1.b>=( -b)} +} {} +do_test randexpr-2.2204 { + db eval {SELECT c-coalesce((select -coalesce((select max(case when (abs(~~case when case when t1.e<=e and t1.f>t1.d then b when t1.a<>t1.a then 17 else 17 end+t1.c>=t1.a then (c) when c not in (f,e,t1.c) then t1.d else 13 end)/abs(t1.a))c),t1.a)*19 FROM t1 WHERE NOT ((17) | case when 19 not in ((case when (e*19=f) then case t1.f when a then 13 else t1.d end when bt1.b) then f else a end in (select c from t1 union select -13 from t1) or 17 in (select 19 from t1 union select d from t1) and t1.b in (c,b,19) and t1.b>=( -b))} +} {-1600} +do_test randexpr-2.2205 { + db eval {SELECT 13+case when not (t1.d in (11,f,t1.a)) then coalesce((select +t1.e from t1 where case t1.e when t1.b then 19+t1.a+~e+a*t1.a else 11 end not between 19 and 19),(abs(t1.f-case when (19)=17 then t1.c else t1.f end)/abs(19)) from t1 union select c from t1)), -d)) from t1))/abs(t1.d))) from t1)*(c) | 11+t1.c*t1.b) from t1 union select cast(avg(t1.c) AS integer) from t1)} +} {} +do_test randexpr-2.2206 { + db eval {SELECT 13+case when not (t1.d in (11,f,t1.a)) then coalesce((select +t1.e from t1 where case t1.e when t1.b then 19+t1.a+~e+a*t1.a else 11 end not between 19 and 19),(abs(t1.f-case when (19)=17 then t1.c else t1.f end)/abs(19)) from t1 union select c from t1)), -d)) from t1))/abs(t1.d))) from t1)*(c) | 11+t1.c*t1.b) from t1 union select cast(avg(t1.c) AS integer) from t1))} +} {400013} +do_test randexpr-2.2207 { + db eval {SELECT case when -11-t1.b-f-t1.b>~c then t1.e else case when t1.d<>b*~17+t1.d*(select max(+(select count(distinct t1.a)+ -cast(avg(d) AS integer)*max(13) from t1)) from t1) or (abs(c)/abs(c)) not in (11,d,t1.c) then 13 when d in (select f from t1 union select b from t1) then t1.f else -t1.a end+ -f end FROM t1 WHERE ~t1.b*19-d not between ~b and t1.f} +} {-587} +do_test randexpr-2.2208 { + db eval {SELECT case when -11-t1.b-f-t1.b>~c then t1.e else case when t1.d<>b*~17+t1.d*(select max(+(select count(distinct t1.a)+ -cast(avg(d) AS integer)*max(13) from t1)) from t1) or (abs(c)/abs(c)) not in (11,d,t1.c) then 13 when d in (select f from t1 union select b from t1) then t1.f else -t1.a end+ -f end FROM t1 WHERE NOT (~t1.b*19-d not between ~b and t1.f)} +} {} +do_test randexpr-2.2209 { + db eval {SELECT coalesce((select t1.b from t1 where coalesce((select max(coalesce((select max(t1.a) from t1 where t1.c=t1.e),case b-f when f then e else 13 end-t1.d)) from t1 where not exists(select 1 from t1 where -t1.f>=17)),a)*(19) in (select max(d) from t1 union select case -count(*) when +count(distinct (t1.f)) then min(t1.d) else count(*) end from t1) and a<=f and d not between b and 19 and not exists(select 1 from t1 where b between t1.c and e or c<>t1.f)),17) FROM t1 WHERE t1.a-19<>c*d} +} {17} +do_test randexpr-2.2210 { + db eval {SELECT coalesce((select t1.b from t1 where coalesce((select max(coalesce((select max(t1.a) from t1 where t1.c=t1.e),case b-f when f then e else 13 end-t1.d)) from t1 where not exists(select 1 from t1 where -t1.f>=17)),a)*(19) in (select max(d) from t1 union select case -count(*) when +count(distinct (t1.f)) then min(t1.d) else count(*) end from t1) and a<=f and d not between b and 19 and not exists(select 1 from t1 where b between t1.c and e or c<>t1.f)),17) FROM t1 WHERE NOT (t1.a-19<>c*d)} +} {} +do_test randexpr-2.2211 { + db eval {SELECT (coalesce((select case (select ~count(*) | max(b+t1.b)+case max((abs(+coalesce((select max(f) from t1 where not (17=e)),t1.f))/abs(b))*a) when -~~ -(cast(avg(13) AS integer)) | count(*) then min(13) else count(*) end-count(distinct d) from t1) when 17+t1.d then t1.a else 13 end from t1 where e not between 13 and 11),11)+t1.e+b) FROM t1 WHERE not f in (select t1.e from t1 union select t1.f from t1) or 13 not in (t1.b | t1.e,t1.f,d) or (abs(f)/abs(+(abs(f+(select max(case when ((13<>t1.c)) then 13 else t1.c end) | ~min(t1.a)+cast(avg(a) AS integer) from t1)*(d)-b)/abs(d))*e))-t1.b not in (t1.c,f,t1.d) and t1.b<>t1.f} +} {713} +do_test randexpr-2.2212 { + db eval {SELECT (coalesce((select case (select ~count(*) | max(b+t1.b)+case max((abs(+coalesce((select max(f) from t1 where not (17=e)),t1.f))/abs(b))*a) when -~~ -(cast(avg(13) AS integer)) | count(*) then min(13) else count(*) end-count(distinct d) from t1) when 17+t1.d then t1.a else 13 end from t1 where e not between 13 and 11),11)+t1.e+b) FROM t1 WHERE NOT (not f in (select t1.e from t1 union select t1.f from t1) or 13 not in (t1.b | t1.e,t1.f,d) or (abs(f)/abs(+(abs(f+(select max(case when ((13<>t1.c)) then 13 else t1.c end) | ~min(t1.a)+cast(avg(a) AS integer) from t1)*(d)-b)/abs(d))*e))-t1.b not in (t1.c,f,t1.d) and t1.b<>t1.f)} +} {} +do_test randexpr-2.2213 { + db eval {SELECT (coalesce((select case (select ~count(*) & max(b+t1.b)+case max((abs(+coalesce((select max(f) from t1 where not (17=e)),t1.f))/abs(b))*a) when -~~ -(cast(avg(13) AS integer)) & count(*) then min(13) else count(*) end-count(distinct d) from t1) when 17+t1.d then t1.a else 13 end from t1 where e not between 13 and 11),11)+t1.e+b) FROM t1 WHERE not f in (select t1.e from t1 union select t1.f from t1) or 13 not in (t1.b | t1.e,t1.f,d) or (abs(f)/abs(+(abs(f+(select max(case when ((13<>t1.c)) then 13 else t1.c end) | ~min(t1.a)+cast(avg(a) AS integer) from t1)*(d)-b)/abs(d))*e))-t1.b not in (t1.c,f,t1.d) and t1.b<>t1.f} +} {713} +do_test randexpr-2.2214 { + db eval {SELECT t1.b-c*a*t1.b+ -f | (case when 1317)} +} {-6000392} +do_test randexpr-2.2215 { + db eval {SELECT t1.b-c*a*t1.b+ -f | (case when 1317))} +} {} +do_test randexpr-2.2216 { + db eval {SELECT t1.b-c*a*t1.b+ -f & (case when 1317)} +} {192} +do_test randexpr-2.2217 { + db eval {SELECT ~(select (abs(cast(avg(((b)-d*t1.a-c+17+f+e+ -17)) AS integer))) from t1)*t1.c++ -(11-t1.b*~f-e*t1.b)-coalesce((select max(case when not c=13 then 17 else b end) from t1 where (t1.a>17)),c) | -f-f FROM t1 WHERE a+coalesce((select max(coalesce((select max(f) from t1 where ((not b in (select t1.b from t1 union select (select -cast(avg(t1.b) AS integer) from t1) from t1)) or not t1.d= -t1.c and 19<=t1.c)),t1.f)-coalesce((select c from t1 where d<=t1.a), -e)) from t1 where d in (select t1.d from t1 union select t1.d from t1)),t1.d)-a | t1.b>(f) or t1.d between 19 and t1.c and exists(select 1 from t1 where b=13) and not 13 not in (t1.d,(c),d) and e not between 17 and 13} +} {-1040} +do_test randexpr-2.2218 { + db eval {SELECT ~(select (abs(cast(avg(((b)-d*t1.a-c+17+f+e+ -17)) AS integer))) from t1)*t1.c++ -(11-t1.b*~f-e*t1.b)-coalesce((select max(case when not c=13 then 17 else b end) from t1 where (t1.a>17)),c) | -f-f FROM t1 WHERE NOT (a+coalesce((select max(coalesce((select max(f) from t1 where ((not b in (select t1.b from t1 union select (select -cast(avg(t1.b) AS integer) from t1) from t1)) or not t1.d= -t1.c and 19<=t1.c)),t1.f)-coalesce((select c from t1 where d<=t1.a), -e)) from t1 where d in (select t1.d from t1 union select t1.d from t1)),t1.d)-a | t1.b>(f) or t1.d between 19 and t1.c and exists(select 1 from t1 where b=13) and not 13 not in (t1.d,(c),d) and e not between 17 and 13)} +} {} +do_test randexpr-2.2219 { + db eval {SELECT ~(select (abs(cast(avg(((b)-d*t1.a-c+17+f+e+ -17)) AS integer))) from t1)*t1.c++ -(11-t1.b*~f-e*t1.b)-coalesce((select max(case when not c=13 then 17 else b end) from t1 where (t1.a>17)),c) & -f-f FROM t1 WHERE a+coalesce((select max(coalesce((select max(f) from t1 where ((not b in (select t1.b from t1 union select (select -cast(avg(t1.b) AS integer) from t1) from t1)) or not t1.d= -t1.c and 19<=t1.c)),t1.f)-coalesce((select c from t1 where d<=t1.a), -e)) from t1 where d in (select t1.d from t1 union select t1.d from t1)),t1.d)-a | t1.b>(f) or t1.d between 19 and t1.c and exists(select 1 from t1 where b=13) and not 13 not in (t1.d,(c),d) and e not between 17 and 13} +} {-11720688} +do_test randexpr-2.2220 { + db eval {SELECT case when t1.a in (select count(*) from t1 union select cast(avg(t1.a) AS integer)*count(distinct 17) | -count(*) from t1) then ~coalesce((select max(19+t1.a) from t1 where +d=t1.a),case t1.c-(select count(*) from t1) when coalesce((select (abs((select (cast(avg(13) AS integer)) from t1))/abs(13)) from t1 where f<~13-(t1.a)*19),d) then a else 19 end) | b when t1.d>a then f else b end FROM t1 WHERE 19<=case when (t1.a*11*19-e<>(select abs(min(e))+case -abs(abs( - -( -(( -max(c)))))-(min(c))) when min(e) then -(min(f)) else - -min(t1.a) end from t1)*t1.d) then e+f+coalesce((select max(e-a) from t1 where (t1.b) between 11 and -t1.c),c) when t1.c<=17 then 17 else t1.b end and 19<>t1.f} +} {600} +do_test randexpr-2.2221 { + db eval {SELECT case when t1.a in (select count(*) from t1 union select cast(avg(t1.a) AS integer)*count(distinct 17) | -count(*) from t1) then ~coalesce((select max(19+t1.a) from t1 where +d=t1.a),case t1.c-(select count(*) from t1) when coalesce((select (abs((select (cast(avg(13) AS integer)) from t1))/abs(13)) from t1 where f<~13-(t1.a)*19),d) then a else 19 end) | b when t1.d>a then f else b end FROM t1 WHERE NOT (19<=case when (t1.a*11*19-e<>(select abs(min(e))+case -abs(abs( - -( -(( -max(c)))))-(min(c))) when min(e) then -(min(f)) else - -min(t1.a) end from t1)*t1.d) then e+f+coalesce((select max(e-a) from t1 where (t1.b) between 11 and -t1.c),c) when t1.c<=17 then 17 else t1.b end and 19<>t1.f)} +} {} +do_test randexpr-2.2222 { + db eval {SELECT case when t1.a in (select count(*) from t1 union select cast(avg(t1.a) AS integer)*count(distinct 17) & -count(*) from t1) then ~coalesce((select max(19+t1.a) from t1 where +d=t1.a),case t1.c-(select count(*) from t1) when coalesce((select (abs((select (cast(avg(13) AS integer)) from t1))/abs(13)) from t1 where f<~13-(t1.a)*19),d) then a else 19 end) & b when t1.d>a then f else b end FROM t1 WHERE 19<=case when (t1.a*11*19-e<>(select abs(min(e))+case -abs(abs( - -( -(( -max(c)))))-(min(c))) when min(e) then -(min(f)) else - -min(t1.a) end from t1)*t1.d) then e+f+coalesce((select max(e-a) from t1 where (t1.b) between 11 and -t1.c),c) when t1.c<=17 then 17 else t1.b end and 19<>t1.f} +} {200} +do_test randexpr-2.2223 { + db eval {SELECT c-case when t1.e>coalesce((select case -t1.d-case when d in (select -t1.f from t1 union select t1.c+t1.c from t1) and (t1.a<=t1.a) then 11*(17) when d between 11 and e or -c<>t1.f then 19 else 19 end+11 when 19 then b else t1.b end-t1.a from t1 where not e<=f),( -t1.f)) then b when ((dcoalesce((select case -t1.d-case when d in (select -t1.f from t1 union select t1.c+t1.c from t1) and (t1.a<=t1.a) then 11*(17) when d between 11 and e or -c<>t1.f then 19 else 19 end+11 when 19 then b else t1.b end-t1.a from t1 where not e<=f),( -t1.f)) then b when ((dt1.d),d)>=13 then coalesce((select -17+e from t1 where t1.a<=19),b) else (19) end FROM t1 WHERE (abs( -t1.f)/abs(t1.a))<=t1.d} +} {200} +do_test randexpr-2.2229 { + db eval {SELECT case when not (select -case (( -(count(*)) | cast(avg(d) AS integer))) | min((e)) when cast(avg(d) AS integer) then count(*) else count(distinct t1.e) end+max((17)) | max(t1.a)+ -count(*) from t1)-e in (t1.a,11-t1.d,t1.b) and coalesce((select max(t1.c) from t1 where case t1.d when t1.f then d else b end<>t1.d),d)>=13 then coalesce((select -17+e from t1 where t1.a<=19),b) else (19) end FROM t1 WHERE NOT ((abs( -t1.f)/abs(t1.a))<=t1.d)} +} {} +do_test randexpr-2.2230 { + db eval {SELECT case when not (select -case (( -(count(*)) & cast(avg(d) AS integer))) & min((e)) when cast(avg(d) AS integer) then count(*) else count(distinct t1.e) end+max((17)) & max(t1.a)+ -count(*) from t1)-e in (t1.a,11-t1.d,t1.b) and coalesce((select max(t1.c) from t1 where case t1.d when t1.f then d else b end<>t1.d),d)>=13 then coalesce((select -17+e from t1 where t1.a<=19),b) else (19) end FROM t1 WHERE (abs( -t1.f)/abs(t1.a))<=t1.d} +} {200} +do_test randexpr-2.2231 { + db eval {SELECT case when 13 in (select cast(avg(11) AS integer) from t1 union select count(*) from t1) then t1.f-case c-(abs(d+~+t1.d-(abs(13)/abs(t1.e-t1.b-t1.c))*e)/abs(17*19+t1.e)) when 19 then 17 else a end when (t1.a in (select count(distinct 13) from t1 union select max(t1.f) from t1)) and c between ((c)) and (t1.a) then c else 19 end+t1.e FROM t1 WHERE 19<>t1.c} +} {519} +do_test randexpr-2.2232 { + db eval {SELECT case when 13 in (select cast(avg(11) AS integer) from t1 union select count(*) from t1) then t1.f-case c-(abs(d+~+t1.d-(abs(13)/abs(t1.e-t1.b-t1.c))*e)/abs(17*19+t1.e)) when 19 then 17 else a end when (t1.a in (select count(distinct 13) from t1 union select max(t1.f) from t1)) and c between ((c)) and (t1.a) then c else 19 end+t1.e FROM t1 WHERE NOT (19<>t1.c)} +} {} +do_test randexpr-2.2233 { + db eval {SELECT coalesce((select max(case when not exists(select 1 from t1 where coalesce((select a from t1 where (t1.c+t1.d<=t1.b and exists(select 1 from t1 where t1.f not in (~case when t1.d>d then t1.a else a end,( -e),t1.b))) and (c<>17)),e) in (select e from t1 union select d from t1)) then case when b not in (a,17,e) or f=t1.e then t1.d else d end else a end*19) from t1 where ((t1.c not between e and t1.e))),13) FROM t1 WHERE c not in (~t1.e,t1.e,a)} +} {1900} +do_test randexpr-2.2234 { + db eval {SELECT coalesce((select max(case when not exists(select 1 from t1 where coalesce((select a from t1 where (t1.c+t1.d<=t1.b and exists(select 1 from t1 where t1.f not in (~case when t1.d>d then t1.a else a end,( -e),t1.b))) and (c<>17)),e) in (select e from t1 union select d from t1)) then case when b not in (a,17,e) or f=t1.e then t1.d else d end else a end*19) from t1 where ((t1.c not between e and t1.e))),13) FROM t1 WHERE NOT (c not in (~t1.e,t1.e,a))} +} {} +do_test randexpr-2.2235 { + db eval {SELECT coalesce((select t1.f from t1 where d+(select count(distinct e) from t1)+~+ -~e-a+t1.a-b- -a*coalesce((select c-t1.d*case when ( -11<=e or b>= - -c and t1.c19),t1.f) FROM t1 WHERE t1.f<>c} +} {600} +do_test randexpr-2.2236 { + db eval {SELECT coalesce((select t1.f from t1 where d+(select count(distinct e) from t1)+~+ -~e-a+t1.a-b- -a*coalesce((select c-t1.d*case when ( -11<=e or b>= - -c and t1.c19),t1.f) FROM t1 WHERE NOT (t1.f<>c)} +} {} +do_test randexpr-2.2237 { + db eval {SELECT 19+case when d between t1.b and 19+t1.d*(select ~min(t1.e | a) from t1)-case when exists(select 1 from t1 where 19<>t1.a and 19 between case +19 when 19 then t1.e else e end and t1.a) then a when t1.a between 13 and f or (t1.f<19) and d>=t1.c and f not between a and t1.b then a else t1.d end then b else t1.a end+ -d FROM t1 WHERE f>=t1.d+(t1.a)} +} {-281} +do_test randexpr-2.2238 { + db eval {SELECT 19+case when d between t1.b and 19+t1.d*(select ~min(t1.e | a) from t1)-case when exists(select 1 from t1 where 19<>t1.a and 19 between case +19 when 19 then t1.e else e end and t1.a) then a when t1.a between 13 and f or (t1.f<19) and d>=t1.c and f not between a and t1.b then a else t1.d end then b else t1.a end+ -d FROM t1 WHERE NOT (f>=t1.d+(t1.a))} +} {} +do_test randexpr-2.2239 { + db eval {SELECT 19+case when d between t1.b and 19+t1.d*(select ~min(t1.e & a) from t1)-case when exists(select 1 from t1 where 19<>t1.a and 19 between case +19 when 19 then t1.e else e end and t1.a) then a when t1.a between 13 and f or (t1.f<19) and d>=t1.c and f not between a and t1.b then a else t1.d end then b else t1.a end+ -d FROM t1 WHERE f>=t1.d+(t1.a)} +} {-281} +do_test randexpr-2.2240 { + db eval {SELECT t1.d+(select case max(t1.f) when count(distinct ~coalesce((select case when 17 in (select t1.b from t1 union select t1.e from t1) then c when (d<= -(abs(t1.a)/abs(t1.d))) then t1.b+ -case when t1.bt1.a or t1.d>=t1.f),t1.a)-e)<=t1.e} +} {902} +do_test randexpr-2.2241 { + db eval {SELECT t1.d+(select case max(t1.f) when count(distinct ~coalesce((select case when 17 in (select t1.b from t1 union select t1.e from t1) then c when (d<= -(abs(t1.a)/abs(t1.d))) then t1.b+ -case when t1.bt1.a or t1.d>=t1.f),t1.a)-e)<=t1.e)} +} {} +do_test randexpr-2.2242 { + db eval {SELECT t1.d+(select case max(t1.f) when count(distinct ~coalesce((select case when 17 in (select t1.b from t1 union select t1.e from t1) then c when (d<= -(abs(t1.a)/abs(t1.d))) then t1.b+ -case when t1.bt1.a or t1.d>=t1.f),t1.a)-e)<=t1.e} +} {902} +do_test randexpr-2.2243 { + db eval {SELECT (select count(distinct (select case ~abs(~+abs(case (max(17)) when max(coalesce((select ~b from t1 where (not exists(select 1 from t1 where -17<>f or t1.d>=b and 19 between 19 and t1.b))),(abs(case when 19<>a then t1.c else t1.b end)/abs(19)))) then ~+cast(avg(19) AS integer) else min(t1.a) end) | case min(13) when max(11) then count(distinct -d) else count(distinct t1.d) end)*max(t1.d) when count(*) then -min(e) else (count(*)) end+count(*) from t1)) from t1) FROM t1 WHERE (d in (select abs(count(*) | case max((case when b<=13 then t1.b*e when case when t1.c between t1.d and a then t1.a when (f< -19) then t1.c else d end between t1.d and b then t1.e else t1.b end))-max((t1.c)) when max(t1.f) then -abs(+cast(avg(19) AS integer)) else abs(max(t1.c)) end++max(17))- -cast(avg(f) AS integer) from t1 union select min(11) from t1))} +} {} +do_test randexpr-2.2244 { + db eval {SELECT (select count(distinct (select case ~abs(~+abs(case (max(17)) when max(coalesce((select ~b from t1 where (not exists(select 1 from t1 where -17<>f or t1.d>=b and 19 between 19 and t1.b))),(abs(case when 19<>a then t1.c else t1.b end)/abs(19)))) then ~+cast(avg(19) AS integer) else min(t1.a) end) | case min(13) when max(11) then count(distinct -d) else count(distinct t1.d) end)*max(t1.d) when count(*) then -min(e) else (count(*)) end+count(*) from t1)) from t1) FROM t1 WHERE NOT ((d in (select abs(count(*) | case max((case when b<=13 then t1.b*e when case when t1.c between t1.d and a then t1.a when (f< -19) then t1.c else d end between t1.d and b then t1.e else t1.b end))-max((t1.c)) when max(t1.f) then -abs(+cast(avg(19) AS integer)) else abs(max(t1.c)) end++max(17))- -cast(avg(f) AS integer) from t1 union select min(11) from t1)))} +} {1} +do_test randexpr-2.2245 { + db eval {SELECT (select count(distinct (select case ~abs(~+abs(case (max(17)) when max(coalesce((select ~b from t1 where (not exists(select 1 from t1 where -17<>f or t1.d>=b and 19 between 19 and t1.b))),(abs(case when 19<>a then t1.c else t1.b end)/abs(19)))) then ~+cast(avg(19) AS integer) else min(t1.a) end) & case min(13) when max(11) then count(distinct -d) else count(distinct t1.d) end)*max(t1.d) when count(*) then -min(e) else (count(*)) end+count(*) from t1)) from t1) FROM t1 WHERE NOT ((d in (select abs(count(*) | case max((case when b<=13 then t1.b*e when case when t1.c between t1.d and a then t1.a when (f< -19) then t1.c else d end between t1.d and b then t1.e else t1.b end))-max((t1.c)) when max(t1.f) then -abs(+cast(avg(19) AS integer)) else abs(max(t1.c)) end++max(17))- -cast(avg(f) AS integer) from t1 union select min(11) from t1)))} +} {1} +do_test randexpr-2.2246 { + db eval {SELECT case d*coalesce((select e from t1 where t1.f in (case when not exists(select 1 from t1 where 19>=(+f-13)) then case when not not exists(select 1 from t1 where 17>=t1.d-13) then f else d end else t1.c end,c,+(abs(e)/abs(19*d | case 19 when b then 17 else b end-b))) or e between 13 and a),19) when b then c else t1.b end FROM t1 WHERE t1.b<=coalesce((select coalesce((select t1.a from t1 where not t1.b*(select max(t1.d) from t1)=(+f-13)) then case when not not exists(select 1 from t1 where 17>=t1.d-13) then f else d end else t1.c end,c,+(abs(e)/abs(19*d | case 19 when b then 17 else b end-b))) or e between 13 and a),19) when b then c else t1.b end FROM t1 WHERE NOT (t1.b<=coalesce((select coalesce((select t1.a from t1 where not t1.b*(select max(t1.d) from t1) -c then (b)* -19 else t1.d end<=11 or 17 not between t1.f and 13 or a not in (t1.c,t1.b,11) then c else c end<>t1.a and (f) in (select cast(avg(11) AS integer) from t1 union select case max(b) when (min(t1.c)) then count(distinct f) else max(19) end from t1) then 13 else d end | f-t1.d<=t1.f then f else 19 end FROM t1 WHERE ((f not between t1.f and d))} +} {600} +do_test randexpr-2.2251 { + db eval {SELECT case when case when case when case when t1.b<> -c then (b)* -19 else t1.d end<=11 or 17 not between t1.f and 13 or a not in (t1.c,t1.b,11) then c else c end<>t1.a and (f) in (select cast(avg(11) AS integer) from t1 union select case max(b) when (min(t1.c)) then count(distinct f) else max(19) end from t1) then 13 else d end | f-t1.d<=t1.f then f else 19 end FROM t1 WHERE NOT (((f not between t1.f and d)))} +} {} +do_test randexpr-2.2252 { + db eval {SELECT case when case when case when case when t1.b<> -c then (b)* -19 else t1.d end<=11 or 17 not between t1.f and 13 or a not in (t1.c,t1.b,11) then c else c end<>t1.a and (f) in (select cast(avg(11) AS integer) from t1 union select case max(b) when (min(t1.c)) then count(distinct f) else max(19) end from t1) then 13 else d end & f-t1.d<=t1.f then f else 19 end FROM t1 WHERE ((f not between t1.f and d))} +} {600} +do_test randexpr-2.2253 { + db eval {SELECT d-b-(abs(a)/abs(coalesce((select b from t1 where coalesce((select t1.f from t1 where 13>=f-b),t1.c)*case when coalesce((select case (d*t1.e+b) when t1.a-13 then - -t1.d else t1.a end+13 from t1 where e>=t1.f),11) | b=case when coalesce((select max(case when f<>~b | coalesce((select max(11*c) from t1 where 17=t1.f),t1.d)*d then b when a in (c,(t1.d),(f)) then a else 19 end-t1.f) from t1 where not 13 in (select cast(avg(t1.b) AS integer)-cast(avg( -t1.a) AS integer) from t1 union select count(*) from t1)),17) not in ((t1.f),c, -19) and e between 11 and (19) then 19 else t1.d end} +} {199} +do_test randexpr-2.2254 { + db eval {SELECT d-b-(abs(a)/abs(coalesce((select b from t1 where coalesce((select t1.f from t1 where 13>=f-b),t1.c)*case when coalesce((select case (d*t1.e+b) when t1.a-13 then - -t1.d else t1.a end+13 from t1 where e>=t1.f),11) | b=case when coalesce((select max(case when f<>~b | coalesce((select max(11*c) from t1 where 17=t1.f),t1.d)*d then b when a in (c,(t1.d),(f)) then a else 19 end-t1.f) from t1 where not 13 in (select cast(avg(t1.b) AS integer)-cast(avg( -t1.a) AS integer) from t1 union select count(*) from t1)),17) not in ((t1.f),c, -19) and e between 11 and (19) then 19 else t1.d end)} +} {} +do_test randexpr-2.2255 { + db eval {SELECT d-b-(abs(a)/abs(coalesce((select b from t1 where coalesce((select t1.f from t1 where 13>=f-b),t1.c)*case when coalesce((select case (d*t1.e+b) when t1.a-13 then - -t1.d else t1.a end+13 from t1 where e>=t1.f),11) & b=case when coalesce((select max(case when f<>~b | coalesce((select max(11*c) from t1 where 17=t1.f),t1.d)*d then b when a in (c,(t1.d),(f)) then a else 19 end-t1.f) from t1 where not 13 in (select cast(avg(t1.b) AS integer)-cast(avg( -t1.a) AS integer) from t1 union select count(*) from t1)),17) not in ((t1.f),c, -19) and e between 11 and (19) then 19 else t1.d end} +} {199} +do_test randexpr-2.2256 { + db eval {SELECT coalesce((select max(coalesce((select 13 from t1 where not t1.b<>e*b),t1.c*case when d>=case case 13 when coalesce((select ~~t1.f*(abs(t1.f)/abs(t1.e))-e | -t1.b from t1 where a in (11,13,t1.e)),d)*t1.e then -e else t1.b end when t1.e then 11 else t1.e end then -f else c end) | t1.a) from t1 where e>t1.d), -(t1.f)) FROM t1 WHERE not -13 in (select (+(case case min(a+t1.a) when abs(count(*)-count(distinct t1.d-+(f)-case 19 when b then t1.b else a end)) then case count(*)*max(c) when ~ -count(*)*cast(avg(t1.e) AS integer)*count(distinct -a) then count(*) else cast(avg(19) AS integer) end else cast(avg(b) AS integer) end when min(f) then max( -a) else (count(*)) end)*count(distinct t1.f)-count(*)) from t1 union select count(distinct f) from t1)} +} {90100} +do_test randexpr-2.2257 { + db eval {SELECT coalesce((select max(coalesce((select 13 from t1 where not t1.b<>e*b),t1.c*case when d>=case case 13 when coalesce((select ~~t1.f*(abs(t1.f)/abs(t1.e))-e | -t1.b from t1 where a in (11,13,t1.e)),d)*t1.e then -e else t1.b end when t1.e then 11 else t1.e end then -f else c end) | t1.a) from t1 where e>t1.d), -(t1.f)) FROM t1 WHERE NOT (not -13 in (select (+(case case min(a+t1.a) when abs(count(*)-count(distinct t1.d-+(f)-case 19 when b then t1.b else a end)) then case count(*)*max(c) when ~ -count(*)*cast(avg(t1.e) AS integer)*count(distinct -a) then count(*) else cast(avg(19) AS integer) end else cast(avg(b) AS integer) end when min(f) then max( -a) else (count(*)) end)*count(distinct t1.f)-count(*)) from t1 union select count(distinct f) from t1))} +} {} +do_test randexpr-2.2258 { + db eval {SELECT coalesce((select max(coalesce((select 13 from t1 where not t1.b<>e*b),t1.c*case when d>=case case 13 when coalesce((select ~~t1.f*(abs(t1.f)/abs(t1.e))-e & -t1.b from t1 where a in (11,13,t1.e)),d)*t1.e then -e else t1.b end when t1.e then 11 else t1.e end then -f else c end) & t1.a) from t1 where e>t1.d), -(t1.f)) FROM t1 WHERE not -13 in (select (+(case case min(a+t1.a) when abs(count(*)-count(distinct t1.d-+(f)-case 19 when b then t1.b else a end)) then case count(*)*max(c) when ~ -count(*)*cast(avg(t1.e) AS integer)*count(distinct -a) then count(*) else cast(avg(19) AS integer) end else cast(avg(b) AS integer) end when min(f) then max( -a) else (count(*)) end)*count(distinct t1.f)-count(*)) from t1 union select count(distinct f) from t1)} +} {0} +do_test randexpr-2.2259 { + db eval {SELECT coalesce((select max(t1.e*~t1.b- -t1.b-t1.b-coalesce((select t1.a from t1 where exists(select 1 from t1 where ~d+case case when a between b and f then 13 else -17 end-13 when -19 then t1.f else d end+t1.b<>e or not t1.e between 19 and t1.a or 11 in (19,c, -19))),a)*e) from t1 where t1.f<19),t1.e)-13 FROM t1 WHERE t1.b not between e and 13} +} {487} +do_test randexpr-2.2260 { + db eval {SELECT coalesce((select max(t1.e*~t1.b- -t1.b-t1.b-coalesce((select t1.a from t1 where exists(select 1 from t1 where ~d+case case when a between b and f then 13 else -17 end-13 when -19 then t1.f else d end+t1.b<>e or not t1.e between 19 and t1.a or 11 in (19,c, -19))),a)*e) from t1 where t1.f<19),t1.e)-13 FROM t1 WHERE NOT (t1.b not between e and 13)} +} {} +do_test randexpr-2.2261 { + db eval {SELECT (select (count(distinct case t1.d+case when (coalesce((select max((select + -cast(avg(b) AS integer)-min(19) | (cast(avg(19) AS integer)) from t1)) from t1 where ~c+t1.ct1.a and f between t1.e and d and f<=19 then t1.f else t1.b end-c*a when t1.f then e else t1.b end)) from t1) FROM t1 WHERE not b*c+d<>b} +} {} +do_test randexpr-2.2262 { + db eval {SELECT (select (count(distinct case t1.d+case when (coalesce((select max((select + -cast(avg(b) AS integer)-min(19) | (cast(avg(19) AS integer)) from t1)) from t1 where ~c+t1.ct1.a and f between t1.e and d and f<=19 then t1.f else t1.b end-c*a when t1.f then e else t1.b end)) from t1) FROM t1 WHERE NOT (not b*c+d<>b)} +} {1} +do_test randexpr-2.2263 { + db eval {SELECT (select (count(distinct case t1.d+case when (coalesce((select max((select + -cast(avg(b) AS integer)-min(19) & (cast(avg(19) AS integer)) from t1)) from t1 where ~c+t1.ct1.a and f between t1.e and d and f<=19 then t1.f else t1.b end-c*a when t1.f then e else t1.b end)) from t1) FROM t1 WHERE NOT (not b*c+d<>b)} +} {1} +do_test randexpr-2.2264 { + db eval {SELECT t1.b+case case c when f-((abs(t1.f)/abs(coalesce((select max(f+t1.d | t1.c*e+f+~case when c not between b and t1.a or 11<19 then c-d else t1.f end+e-t1.b*11+t1.e) from t1 where 17 not between e and 19),19)))) then d else a end when (t1.c) then t1.f else f end FROM t1 WHERE (b)-t1.c*19-t1.b>17*b-13} +} {} +do_test randexpr-2.2265 { + db eval {SELECT t1.b+case case c when f-((abs(t1.f)/abs(coalesce((select max(f+t1.d | t1.c*e+f+~case when c not between b and t1.a or 11<19 then c-d else t1.f end+e-t1.b*11+t1.e) from t1 where 17 not between e and 19),19)))) then d else a end when (t1.c) then t1.f else f end FROM t1 WHERE NOT ((b)-t1.c*19-t1.b>17*b-13)} +} {800} +do_test randexpr-2.2266 { + db eval {SELECT t1.b+case case c when f-((abs(t1.f)/abs(coalesce((select max(f+t1.d & t1.c*e+f+~case when c not between b and t1.a or 11<19 then c-d else t1.f end+e-t1.b*11+t1.e) from t1 where 17 not between e and 19),19)))) then d else a end when (t1.c) then t1.f else f end FROM t1 WHERE NOT ((b)-t1.c*19-t1.b>17*b-13)} +} {800} +do_test randexpr-2.2267 { + db eval {SELECT coalesce((select coalesce((select ((abs(19)/abs(17))) from t1 where +13 | d>d),13-(select abs(abs(~count(*)))+~count(*) from t1)+(t1.c*t1.e*(abs(19+(13))/abs(a)))-coalesce((select 17 from t1 where c<>e and b not in ((d),t1.b,b)),t1.c)+t1.d)-13 from t1 where t1.f19 then 17 else t1.c end)-count(*))) then max(11) else +max(b) end) from t1))} +} {} +do_test randexpr-2.2268 { + db eval {SELECT coalesce((select coalesce((select ((abs(19)/abs(17))) from t1 where +13 | d>d),13-(select abs(abs(~count(*)))+~count(*) from t1)+(t1.c*t1.e*(abs(19+(13))/abs(a)))-coalesce((select 17 from t1 where c<>e and b not in ((d),t1.b,b)),t1.c)+t1.d)-13 from t1 where t1.f19 then 17 else t1.c end)-count(*))) then max(11) else +max(b) end) from t1)))} +} {-289} +do_test randexpr-2.2269 { + db eval {SELECT coalesce((select coalesce((select ((abs(19)/abs(17))) from t1 where +13 & d>d),13-(select abs(abs(~count(*)))+~count(*) from t1)+(t1.c*t1.e*(abs(19+(13))/abs(a)))-coalesce((select 17 from t1 where c<>e and b not in ((d),t1.b,b)),t1.c)+t1.d)-13 from t1 where t1.f19 then 17 else t1.c end)-count(*))) then max(11) else +max(b) end) from t1)))} +} {-289} +do_test randexpr-2.2270 { + db eval {SELECT coalesce((select case d when e then t1.f else d++(abs((select count(*) from t1)-case when t1.f*case when t1.b-c in (select -cast(avg(e) AS integer) from t1 union select max(t1.a) from t1) or b between t1.a and -17 and -t1.f not between 17 and a then t1.e when 17<>11 then (abs(19)/abs(f)) else 17 end-17 not in (13,11,c) then t1.f else t1.c end)/abs(c))*t1.e end from t1 where f between t1.c and 17),t1.f) FROM t1 WHERE t1.b<>19} +} {600} +do_test randexpr-2.2271 { + db eval {SELECT coalesce((select case d when e then t1.f else d++(abs((select count(*) from t1)-case when t1.f*case when t1.b-c in (select -cast(avg(e) AS integer) from t1 union select max(t1.a) from t1) or b between t1.a and -17 and -t1.f not between 17 and a then t1.e when 17<>11 then (abs(19)/abs(f)) else 17 end-17 not in (13,11,c) then t1.f else t1.c end)/abs(c))*t1.e end from t1 where f between t1.c and 17),t1.f) FROM t1 WHERE NOT (t1.b<>19)} +} {} +do_test randexpr-2.2272 { + db eval {SELECT t1.f-case when ((t1.a in (select case case +min(case when coalesce((select max(a) from t1 where t1.e not between f and t1.b),f)<>(t1.d) then - -t1.f else 19 end)-case count(*)+(count(distinct t1.d)) when -min(e) then max(t1.f) else min(19) end when cast(avg(13) AS integer) then cast(avg( -t1.a) AS integer) else (min(t1.f)) end when (count(distinct t1.e)) then -cast(avg(a) AS integer) else cast(avg(19) AS integer) end | count(distinct 13)+ -count(*) from t1 union select -max(t1.b) from t1))) then a when d between a and 11 or t1.e<>c then +t1.a-(t1.e) else t1.e end FROM t1 WHERE t1.e between d | t1.a*13 and 11+13*17+b*c++13-~b} +} {} +do_test randexpr-2.2273 { + db eval {SELECT t1.f-case when ((t1.a in (select case case +min(case when coalesce((select max(a) from t1 where t1.e not between f and t1.b),f)<>(t1.d) then - -t1.f else 19 end)-case count(*)+(count(distinct t1.d)) when -min(e) then max(t1.f) else min(19) end when cast(avg(13) AS integer) then cast(avg( -t1.a) AS integer) else (min(t1.f)) end when (count(distinct t1.e)) then -cast(avg(a) AS integer) else cast(avg(19) AS integer) end | count(distinct 13)+ -count(*) from t1 union select -max(t1.b) from t1))) then a when d between a and 11 or t1.e<>c then +t1.a-(t1.e) else t1.e end FROM t1 WHERE NOT (t1.e between d | t1.a*13 and 11+13*17+b*c++13-~b)} +} {1000} +do_test randexpr-2.2274 { + db eval {SELECT t1.f-case when ((t1.a in (select case case +min(case when coalesce((select max(a) from t1 where t1.e not between f and t1.b),f)<>(t1.d) then - -t1.f else 19 end)-case count(*)+(count(distinct t1.d)) when -min(e) then max(t1.f) else min(19) end when cast(avg(13) AS integer) then cast(avg( -t1.a) AS integer) else (min(t1.f)) end when (count(distinct t1.e)) then -cast(avg(a) AS integer) else cast(avg(19) AS integer) end & count(distinct 13)+ -count(*) from t1 union select -max(t1.b) from t1))) then a when d between a and 11 or t1.e<>c then +t1.a-(t1.e) else t1.e end FROM t1 WHERE NOT (t1.e between d | t1.a*13 and 11+13*17+b*c++13-~b)} +} {1000} +do_test randexpr-2.2275 { + db eval {SELECT coalesce((select ~(abs(e)/abs(case when exists(select 1 from t1 where 19=+c or exists(select 1 from t1 where not exists(select 1 from t1 where 11>=d or b not between b and 19)) or t1.a not in (t1.a,(11),13)) then e-13+a-c else a end))*t1.f-(19) | 19 from t1 where t1.d in (select -(cast(avg(19) AS integer)) from t1 union select count(distinct (t1.e)) from t1) and (f in (a,c,(t1.a))) and a between 11 and 11 and t1.d>=t1.f and 11 not in ((t1.f),e,(f)) and 13 not in (t1.c,t1.d,t1.c)),t1.d) FROM t1 WHERE not exists(select 1 from t1 where not coalesce((select max(b) from t1 where coalesce((select t1.f from t1 where t1.f not between t1.e and coalesce((select 13*+ -t1.a | e+case when t1.a not in (case t1.f when d then 19 else 11 end,f*e+19,t1.e+(d)) then 13 else t1.d end+t1.c-17 from t1 where t1.c>=t1.d),t1.c)),t1.d)>t1.c),11) not between b and t1.f)} +} {} +do_test randexpr-2.2276 { + db eval {SELECT coalesce((select ~(abs(e)/abs(case when exists(select 1 from t1 where 19=+c or exists(select 1 from t1 where not exists(select 1 from t1 where 11>=d or b not between b and 19)) or t1.a not in (t1.a,(11),13)) then e-13+a-c else a end))*t1.f-(19) | 19 from t1 where t1.d in (select -(cast(avg(19) AS integer)) from t1 union select count(distinct (t1.e)) from t1) and (f in (a,c,(t1.a))) and a between 11 and 11 and t1.d>=t1.f and 11 not in ((t1.f),e,(f)) and 13 not in (t1.c,t1.d,t1.c)),t1.d) FROM t1 WHERE NOT (not exists(select 1 from t1 where not coalesce((select max(b) from t1 where coalesce((select t1.f from t1 where t1.f not between t1.e and coalesce((select 13*+ -t1.a | e+case when t1.a not in (case t1.f when d then 19 else 11 end,f*e+19,t1.e+(d)) then 13 else t1.d end+t1.c-17 from t1 where t1.c>=t1.d),t1.c)),t1.d)>t1.c),11) not between b and t1.f))} +} {400} +do_test randexpr-2.2277 { + db eval {SELECT coalesce((select ~(abs(e)/abs(case when exists(select 1 from t1 where 19=+c or exists(select 1 from t1 where not exists(select 1 from t1 where 11>=d or b not between b and 19)) or t1.a not in (t1.a,(11),13)) then e-13+a-c else a end))*t1.f-(19) & 19 from t1 where t1.d in (select -(cast(avg(19) AS integer)) from t1 union select count(distinct (t1.e)) from t1) and (f in (a,c,(t1.a))) and a between 11 and 11 and t1.d>=t1.f and 11 not in ((t1.f),e,(f)) and 13 not in (t1.c,t1.d,t1.c)),t1.d) FROM t1 WHERE NOT (not exists(select 1 from t1 where not coalesce((select max(b) from t1 where coalesce((select t1.f from t1 where t1.f not between t1.e and coalesce((select 13*+ -t1.a | e+case when t1.a not in (case t1.f when d then 19 else 11 end,f*e+19,t1.e+(d)) then 13 else t1.d end+t1.c-17 from t1 where t1.c>=t1.d),t1.c)),t1.d)>t1.c),11) not between b and t1.f))} +} {400} +do_test randexpr-2.2278 { + db eval {SELECT case when (select +case -+ -case min(t1.f) when cast(avg(t1.a) AS integer) then abs(~case (min(t1.a)) when (count(*)) then (min(e)) else count(*) end) else ( -cast(avg(b) AS integer)) end*count(*) | count(distinct 11) when -cast(avg(13) AS integer) then -min(13) else max(b) end+max(a) from t1) in ((abs(coalesce((select max(11*a) from t1 where t1.b>11-(select -cast(avg(t1.d) AS integer) from t1)),case when exists(select 1 from t1 where t1.f<13) then a else e end))/abs(c)) | t1.f,f,f) then t1.e else a end FROM t1 WHERE (select case min(case when not 17<11 then (abs(+(d)*t1.c+e-f)/abs(13)) when b between a and 11 then e else b end) when abs(min(t1.b) | (+count(*))) | ~case ( -count(*))-cast(avg(a) AS integer) when count(distinct t1.a) then -count(distinct f) else count(distinct 11) end then max( -t1.d) else -max(f) end from t1)-~c*t1.a not in (t1.e,t1.c,t1.a)} +} {100} +do_test randexpr-2.2279 { + db eval {SELECT case when (select +case -+ -case min(t1.f) when cast(avg(t1.a) AS integer) then abs(~case (min(t1.a)) when (count(*)) then (min(e)) else count(*) end) else ( -cast(avg(b) AS integer)) end*count(*) | count(distinct 11) when -cast(avg(13) AS integer) then -min(13) else max(b) end+max(a) from t1) in ((abs(coalesce((select max(11*a) from t1 where t1.b>11-(select -cast(avg(t1.d) AS integer) from t1)),case when exists(select 1 from t1 where t1.f<13) then a else e end))/abs(c)) | t1.f,f,f) then t1.e else a end FROM t1 WHERE NOT ((select case min(case when not 17<11 then (abs(+(d)*t1.c+e-f)/abs(13)) when b between a and 11 then e else b end) when abs(min(t1.b) | (+count(*))) | ~case ( -count(*))-cast(avg(a) AS integer) when count(distinct t1.a) then -count(distinct f) else count(distinct 11) end then max( -t1.d) else -max(f) end from t1)-~c*t1.a not in (t1.e,t1.c,t1.a))} +} {} +do_test randexpr-2.2280 { + db eval {SELECT case when (select +case -+ -case min(t1.f) when cast(avg(t1.a) AS integer) then abs(~case (min(t1.a)) when (count(*)) then (min(e)) else count(*) end) else ( -cast(avg(b) AS integer)) end*count(*) & count(distinct 11) when -cast(avg(13) AS integer) then -min(13) else max(b) end+max(a) from t1) in ((abs(coalesce((select max(11*a) from t1 where t1.b>11-(select -cast(avg(t1.d) AS integer) from t1)),case when exists(select 1 from t1 where t1.f<13) then a else e end))/abs(c)) & t1.f,f,f) then t1.e else a end FROM t1 WHERE (select case min(case when not 17<11 then (abs(+(d)*t1.c+e-f)/abs(13)) when b between a and 11 then e else b end) when abs(min(t1.b) | (+count(*))) | ~case ( -count(*))-cast(avg(a) AS integer) when count(distinct t1.a) then -count(distinct f) else count(distinct 11) end then max( -t1.d) else -max(f) end from t1)-~c*t1.a not in (t1.e,t1.c,t1.a)} +} {100} +do_test randexpr-2.2281 { + db eval {SELECT case when ~t1.b | 13>=~coalesce((select t1.e from t1 where 11 in (select -case +max(case when (t1.f) in (t1.b,t1.a,t1.f) then f else e end) when max(f)*cast(avg(t1.d) AS integer) then cast(avg(c) AS integer) else -( - -max(t1.f)) end from t1 union select count(*) from t1)),b | ( -t1.e))*t1.c*t1.a*t1.d-f then a when 19 not between -t1.e and t1.e or f not between -a and 19 then d else 11 end FROM t1 WHERE not (d in (select t1.c from t1 union select 11 from t1))} +} {400} +do_test randexpr-2.2282 { + db eval {SELECT case when ~t1.b | 13>=~coalesce((select t1.e from t1 where 11 in (select -case +max(case when (t1.f) in (t1.b,t1.a,t1.f) then f else e end) when max(f)*cast(avg(t1.d) AS integer) then cast(avg(c) AS integer) else -( - -max(t1.f)) end from t1 union select count(*) from t1)),b | ( -t1.e))*t1.c*t1.a*t1.d-f then a when 19 not between -t1.e and t1.e or f not between -a and 19 then d else 11 end FROM t1 WHERE NOT (not (d in (select t1.c from t1 union select 11 from t1)))} +} {} +do_test randexpr-2.2283 { + db eval {SELECT case when ~t1.b & 13>=~coalesce((select t1.e from t1 where 11 in (select -case +max(case when (t1.f) in (t1.b,t1.a,t1.f) then f else e end) when max(f)*cast(avg(t1.d) AS integer) then cast(avg(c) AS integer) else -( - -max(t1.f)) end from t1 union select count(*) from t1)),b & ( -t1.e))*t1.c*t1.a*t1.d-f then a when 19 not between -t1.e and t1.e or f not between -a and 19 then d else 11 end FROM t1 WHERE not (d in (select t1.c from t1 union select 11 from t1))} +} {100} +do_test randexpr-2.2284 { + db eval {SELECT coalesce((select c from t1 where 19 in (select case count(*) when count(distinct c)*+cast(avg(13) AS integer) then max(t1.d) else max(case case when exists(select 1 from t1 where 13 not in (d,e,(19)) and 13 between t1.d and t1.f) then t1.a when b in (t1.a,a,17) then case when t1.a not between 13 and c then (t1.b) else t1.b end else 19 end when 13 then a else 19 end) end from t1 union select (abs(abs(~cast(avg(t1.d) AS integer))) | cast(avg(t1.a) AS integer)) from t1)),t1.e-11)*(t1.a) FROM t1 WHERE not exists(select 1 from t1 where t1.e in (coalesce((select max(13) from t1 where 19 in (select f from t1 union select a from t1) or (not exists(select 1 from t1 where t1.a<=(19)*c*c | 13))),+e-(f)*case when t1.c not in (coalesce((select max(t1.a) from t1 where not c<(19)),19),e,(19)) then d else f end)+a,19,17)) or (d13))),t1.a)-f-a+t1.f) | case (cast(avg(t1.c) AS integer)) when cast(avg(t1.a) AS integer)-(count(*))+max(19) then - -max(19) else cast(avg(e) AS integer) end from t1)+(select cast(avg(b) AS integer) from t1) then t1.a else e-t1.d end-17+11 FROM t1 WHERE +c-(c)*d in (coalesce((select max( -t1.d-d-13) from t1 where (t1.c) not between (abs(13)/abs(t1.c)) and t1.b),d),11,c)} +} {} +do_test randexpr-2.2288 { + db eval {SELECT -~a+d-case d when -(select max(d)*~min(coalesce((select max(t1.e) from t1 where (exists(select 1 from t1 where 17<>13))),t1.a)-f-a+t1.f) | case (cast(avg(t1.c) AS integer)) when cast(avg(t1.a) AS integer)-(count(*))+max(19) then - -max(19) else cast(avg(e) AS integer) end from t1)+(select cast(avg(b) AS integer) from t1) then t1.a else e-t1.d end-17+11 FROM t1 WHERE NOT (+c-(c)*d in (coalesce((select max( -t1.d-d-13) from t1 where (t1.c) not between (abs(13)/abs(t1.c)) and t1.b),d),11,c))} +} {395} +do_test randexpr-2.2289 { + db eval {SELECT -~a+d-case d when -(select max(d)*~min(coalesce((select max(t1.e) from t1 where (exists(select 1 from t1 where 17<>13))),t1.a)-f-a+t1.f) & case (cast(avg(t1.c) AS integer)) when cast(avg(t1.a) AS integer)-(count(*))+max(19) then - -max(19) else cast(avg(e) AS integer) end from t1)+(select cast(avg(b) AS integer) from t1) then t1.a else e-t1.d end-17+11 FROM t1 WHERE NOT (+c-(c)*d in (coalesce((select max( -t1.d-d-13) from t1 where (t1.c) not between (abs(13)/abs(t1.c)) and t1.b),d),11,c))} +} {395} +do_test randexpr-2.2290 { + db eval {SELECT case when 17 in (case (abs(coalesce((select max((select cast(avg(case when 17-(t1.b)+t1.a | b+b in (select b from t1 union select 17 from t1) then c when t1.f in (select t1.a from t1 union select 19 from t1) then 11 else t1.c end) AS integer) from t1)) from t1 where t1.e>=t1.e or (b)=b and c not in (f,t1.a,t1.a)),d))/abs(t1.b)) when 17 then t1.b else 13 end-a,13,17) then t1.a else -d end FROM t1 WHERE t1.e=t1.e or (b)=b and c not in (f,t1.a,t1.a)),d))/abs(t1.b)) when 17 then t1.b else 13 end-a,13,17) then t1.a else -d end FROM t1 WHERE NOT (t1.e=t1.e or (b)=b and c not in (f,t1.a,t1.a)),d))/abs(t1.b)) when 17 then t1.b else 13 end-a,13,17) then t1.a else -d end FROM t1 WHERE NOT (t1.e19),b-case when +13>case when (coalesce((select b from t1 where coalesce((select max(t1.f) from t1 where 17<=case c when t1.e+f then d else case when 11<17 and t1.d<=t1.e then 13 when t1.d<>f then (t1.f) else c end end*t1.c),a) in (select 17 from t1 union select b from t1)),t1.c)>= -11) then (a)*t1.a else 19 end then 13 else 17 end*19) then d else (t1.a) end FROM t1 WHERE t1.d+t1.b not in (b,f,coalesce((select max(t1.b) from t1 where exists(select 1 from t1 where c-t1.b not between case when coalesce((select max(~t1.e) from t1 where f not in (t1.b,17,t1.a)),t1.e) not in (e,13,(t1.d)) then 11 when t1.d>=17 then t1.c else 11 end and (11) and (exists(select 1 from t1 where t1.b<>11))) or 13 in (select max(t1.d) from t1 union select max(b) from t1)),t1.b+19+a))} +} {} +do_test randexpr-2.2294 { + db eval {SELECT case t1.a when coalesce((select a from t1 where t1.b>19),b-case when +13>case when (coalesce((select b from t1 where coalesce((select max(t1.f) from t1 where 17<=case c when t1.e+f then d else case when 11<17 and t1.d<=t1.e then 13 when t1.d<>f then (t1.f) else c end end*t1.c),a) in (select 17 from t1 union select b from t1)),t1.c)>= -11) then (a)*t1.a else 19 end then 13 else 17 end*19) then d else (t1.a) end FROM t1 WHERE NOT (t1.d+t1.b not in (b,f,coalesce((select max(t1.b) from t1 where exists(select 1 from t1 where c-t1.b not between case when coalesce((select max(~t1.e) from t1 where f not in (t1.b,17,t1.a)),t1.e) not in (e,13,(t1.d)) then 11 when t1.d>=17 then t1.c else 11 end and (11) and (exists(select 1 from t1 where t1.b<>11))) or 13 in (select max(t1.d) from t1 union select max(b) from t1)),t1.b+19+a)))} +} {400} +do_test randexpr-2.2295 { + db eval {SELECT coalesce((select max((coalesce((select coalesce((select max(case when case when et1.f then t1.b else c end19),t1.f) from t1 where t1.e<>t1.d),t1.b))) from t1 where 11=t1.c), -c) FROM t1 WHERE not exists(select 1 from t1 where -~e not in (c++case when 17+t1.c=d))),f-13) then a else (t1.a) end+f*19-a,b,f)) and t1.a=b} +} {} +do_test randexpr-2.2296 { + db eval {SELECT coalesce((select max((coalesce((select coalesce((select max(case when case when et1.f then t1.b else c end19),t1.f) from t1 where t1.e<>t1.d),t1.b))) from t1 where 11=t1.c), -c) FROM t1 WHERE NOT (not exists(select 1 from t1 where -~e not in (c++case when 17+t1.c=d))),f-13) then a else (t1.a) end+f*19-a,b,f)) and t1.a=b)} +} {-300} +do_test randexpr-2.2297 { + db eval {SELECT coalesce((select max((coalesce((select coalesce((select max(case when case when et1.f then t1.b else c end19),t1.f) from t1 where t1.e<>t1.d),t1.b))) from t1 where 11=t1.c), -c) FROM t1 WHERE NOT (not exists(select 1 from t1 where -~e not in (c++case when 17+t1.c=d))),f-13) then a else (t1.a) end+f*19-a,b,f)) and t1.a=b)} +} {-300} +do_test randexpr-2.2298 { + db eval {SELECT t1.b-11-case when t1.d+13+t1.c | 17+case when (+a*t1.b*t1.c not in (e,t1.f,t1.e)) then coalesce((select max(e) from t1 where t1.f>=f), -d) when f>b and t1.et1.c then 17 else -13 end not in (f,t1.f,a) and exists(select 1 from t1 where a<=11) then t1.e else c end FROM t1 WHERE case f+~d*case when t1.f<=t1.b*t1.a then e*t1.e else f end*a+f when ~case when 11 not between +(abs(b)/abs(case b+d when t1.a then f else t1.e end*13))+a and (b) then t1.e when t1.e<>11 then t1.d else 17 end+11 then t1.d else t1.b end<=13} +} {} +do_test randexpr-2.2299 { + db eval {SELECT t1.b-11-case when t1.d+13+t1.c | 17+case when (+a*t1.b*t1.c not in (e,t1.f,t1.e)) then coalesce((select max(e) from t1 where t1.f>=f), -d) when f>b and t1.et1.c then 17 else -13 end not in (f,t1.f,a) and exists(select 1 from t1 where a<=11) then t1.e else c end FROM t1 WHERE NOT (case f+~d*case when t1.f<=t1.b*t1.a then e*t1.e else f end*a+f when ~case when 11 not between +(abs(b)/abs(case b+d when t1.a then f else t1.e end*13))+a and (b) then t1.e when t1.e<>11 then t1.d else 17 end+11 then t1.d else t1.b end<=13)} +} {-111} +do_test randexpr-2.2300 { + db eval {SELECT t1.b-11-case when t1.d+13+t1.c & 17+case when (+a*t1.b*t1.c not in (e,t1.f,t1.e)) then coalesce((select max(e) from t1 where t1.f>=f), -d) when f>b and t1.et1.c then 17 else -13 end not in (f,t1.f,a) and exists(select 1 from t1 where a<=11) then t1.e else c end FROM t1 WHERE NOT (case f+~d*case when t1.f<=t1.b*t1.a then e*t1.e else f end*a+f when ~case when 11 not between +(abs(b)/abs(case b+d when t1.a then f else t1.e end*13))+a and (b) then t1.e when t1.e<>11 then t1.d else 17 end+11 then t1.d else t1.b end<=13)} +} {-111} +do_test randexpr-2.2301 { + db eval {SELECT coalesce((select t1.a from t1 where d not between 11 and 13 or case 13*17 when -c then case when (coalesce((select 17 from t1 where (~case when t1.e=d then 19 when 11 in (t1.d,b,17) then t1.c else c end-11 in (select max(e) from t1 union select (case cast(avg((t1.b)) AS integer) when max(11) then min(17) else ( -count(*)) end) from t1))),(19))>d) then t1.d else 11 end else t1.c end<=a),c) FROM t1 WHERE 19 | (abs(t1.d)/abs(11))=b} +} {} +do_test randexpr-2.2302 { + db eval {SELECT coalesce((select t1.a from t1 where d not between 11 and 13 or case 13*17 when -c then case when (coalesce((select 17 from t1 where (~case when t1.e=d then 19 when 11 in (t1.d,b,17) then t1.c else c end-11 in (select max(e) from t1 union select (case cast(avg((t1.b)) AS integer) when max(11) then min(17) else ( -count(*)) end) from t1))),(19))>d) then t1.d else 11 end else t1.c end<=a),c) FROM t1 WHERE NOT (19 | (abs(t1.d)/abs(11))=b)} +} {100} +do_test randexpr-2.2303 { + db eval {SELECT coalesce((select max(17-t1.e-17) from t1 where (t1.c>d)),case when coalesce((select +f+~coalesce((select coalesce((select case when (select cast(avg(17) AS integer) | cast(avg(t1.e) AS integer) from t1)<>coalesce((select t1.e from t1 where 19 in (select count(*) from t1 union select -( -count(*)) from t1)), -d) then b when exists(select 1 from t1 where t1.b between (t1.b) and 13) then a else -b end from t1 where 19=e),t1.c) from t1 where t1.c<=b),t1.d) from t1 where 17 between t1.a and b),t1.e) between c and t1.e then -t1.d else 13 end) FROM t1 WHERE exists(select 1 from t1 where t1.e-13-17-f*f*t1.d*coalesce((select 19-case when not not t1.d>e then case when t1.f=t1.c then 19 else f end else -t1.b end when e in (f,t1.e,17) then t1.a else t1.b end*t1.d from t1 where ((f)) in (select (~case count(distinct 11) when count(distinct t1.d) | count(distinct t1.b)-max( -f) then max(e) else (count(distinct (((a))))) end) from t1 union select max(e) from t1)),f)+c>=t1.f)} +} {} +do_test randexpr-2.2304 { + db eval {SELECT coalesce((select max(17-t1.e-17) from t1 where (t1.c>d)),case when coalesce((select +f+~coalesce((select coalesce((select case when (select cast(avg(17) AS integer) | cast(avg(t1.e) AS integer) from t1)<>coalesce((select t1.e from t1 where 19 in (select count(*) from t1 union select -( -count(*)) from t1)), -d) then b when exists(select 1 from t1 where t1.b between (t1.b) and 13) then a else -b end from t1 where 19=e),t1.c) from t1 where t1.c<=b),t1.d) from t1 where 17 between t1.a and b),t1.e) between c and t1.e then -t1.d else 13 end) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.e-13-17-f*f*t1.d*coalesce((select 19-case when not not t1.d>e then case when t1.f=t1.c then 19 else f end else -t1.b end when e in (f,t1.e,17) then t1.a else t1.b end*t1.d from t1 where ((f)) in (select (~case count(distinct 11) when count(distinct t1.d) | count(distinct t1.b)-max( -f) then max(e) else (count(distinct (((a))))) end) from t1 union select max(e) from t1)),f)+c>=t1.f))} +} {-400} +do_test randexpr-2.2305 { + db eval {SELECT coalesce((select max(17-t1.e-17) from t1 where (t1.c>d)),case when coalesce((select +f+~coalesce((select coalesce((select case when (select cast(avg(17) AS integer) & cast(avg(t1.e) AS integer) from t1)<>coalesce((select t1.e from t1 where 19 in (select count(*) from t1 union select -( -count(*)) from t1)), -d) then b when exists(select 1 from t1 where t1.b between (t1.b) and 13) then a else -b end from t1 where 19=e),t1.c) from t1 where t1.c<=b),t1.d) from t1 where 17 between t1.a and b),t1.e) between c and t1.e then -t1.d else 13 end) FROM t1 WHERE NOT (exists(select 1 from t1 where t1.e-13-17-f*f*t1.d*coalesce((select 19-case when not not t1.d>e then case when t1.f=t1.c then 19 else f end else -t1.b end when e in (f,t1.e,17) then t1.a else t1.b end*t1.d from t1 where ((f)) in (select (~case count(distinct 11) when count(distinct t1.d) | count(distinct t1.b)-max( -f) then max(e) else (count(distinct (((a))))) end) from t1 union select max(e) from t1)),f)+c>=t1.f))} +} {-400} +do_test randexpr-2.2306 { + db eval {SELECT coalesce((select t1.b from t1 where exists(select 1 from t1 where (abs(~case (~13*case when c in (select (max(case when b<>11 then d when 13 not in (19,c,e) then t1.f else 19 end*t1.d)) from t1 union select abs(count(*)) | min(19) from t1) then t1.a else t1.c end-a-13-f*e*17) when f then t1.a else 13 end*t1.c)/abs(a))>t1.b)),13) FROM t1 WHERE ++case when a<=t1.a then 17 else 13 end>+f+(coalesce((select max(19) from t1 where 11 not in (case when t1.e<(abs(11)/abs(19*d*t1.c)) and (c in (select t1.c from t1 union select f from t1) or a>a) then t1.a when 11 not between t1.c and t1.a or t1.e not in ( -t1.a,17,f) then t1.d else e end+t1.f,19,(17))),17))* -11} +} {} +do_test randexpr-2.2307 { + db eval {SELECT coalesce((select t1.b from t1 where exists(select 1 from t1 where (abs(~case (~13*case when c in (select (max(case when b<>11 then d when 13 not in (19,c,e) then t1.f else 19 end*t1.d)) from t1 union select abs(count(*)) | min(19) from t1) then t1.a else t1.c end-a-13-f*e*17) when f then t1.a else 13 end*t1.c)/abs(a))>t1.b)),13) FROM t1 WHERE NOT (++case when a<=t1.a then 17 else 13 end>+f+(coalesce((select max(19) from t1 where 11 not in (case when t1.e<(abs(11)/abs(19*d*t1.c)) and (c in (select t1.c from t1 union select f from t1) or a>a) then t1.a when 11 not between t1.c and t1.a or t1.e not in ( -t1.a,17,f) then t1.d else e end+t1.f,19,(17))),17))* -11)} +} {13} +do_test randexpr-2.2308 { + db eval {SELECT coalesce((select t1.b from t1 where exists(select 1 from t1 where (abs(~case (~13*case when c in (select (max(case when b<>11 then d when 13 not in (19,c,e) then t1.f else 19 end*t1.d)) from t1 union select abs(count(*)) & min(19) from t1) then t1.a else t1.c end-a-13-f*e*17) when f then t1.a else 13 end*t1.c)/abs(a))>t1.b)),13) FROM t1 WHERE NOT (++case when a<=t1.a then 17 else 13 end>+f+(coalesce((select max(19) from t1 where 11 not in (case when t1.e<(abs(11)/abs(19*d*t1.c)) and (c in (select t1.c from t1 union select f from t1) or a>a) then t1.a when 11 not between t1.c and t1.a or t1.e not in ( -t1.a,17,f) then t1.d else e end+t1.f,19,(17))),17))* -11)} +} {13} +do_test randexpr-2.2309 { + db eval {SELECT case when c*(abs(t1.f)/abs(c))*b-11-t1.at1.d then -d else t1.f end*e) from t1 where (b)<>t1.a or 11 between ( -a) and a),t1.b) then t1.d when e=t1.e then t1.c else t1.e end FROM t1 WHERE (coalesce((select (t1.b)+f*case case +coalesce((select 13 from t1 where t1.e<=d),f)-a when d then t1.b else t1.c end+t1.e when a then 17 else t1.e end from t1 where not t1.d in (select b from t1 union select 19 from t1) or not (17t1.d then -d else t1.f end*e) from t1 where (b)<>t1.a or 11 between ( -a) and a),t1.b) then t1.d when e=t1.e then t1.c else t1.e end FROM t1 WHERE NOT ((coalesce((select (t1.b)+f*case case +coalesce((select 13 from t1 where t1.e<=d),f)-a when d then t1.b else t1.c end+t1.e when a then 17 else t1.e end from t1 where not t1.d in (select b from t1 union select 19 from t1) or not (17a) or ( -d) between c and c or (19) between t1.f and t1.b) or (b) not in (t1.d,t1.c,e)) then t1.b*t1.b when a>19 then t1.d else 11 end>=11 then t1.e else -e end FROM t1 WHERE not exists(select 1 from t1 where ((11<=+11)))} +} {} +do_test randexpr-2.2314 { + db eval {SELECT case when case when -f-case when t1.a between 13 and (e) then t1.d | a else 13 enda) or ( -d) between c and c or (19) between t1.f and t1.b) or (b) not in (t1.d,t1.c,e)) then t1.b*t1.b when a>19 then t1.d else 11 end>=11 then t1.e else -e end FROM t1 WHERE NOT (not exists(select 1 from t1 where ((11<=+11))))} +} {500} +do_test randexpr-2.2315 { + db eval {SELECT case when case when -f-case when t1.a between 13 and (e) then t1.d & a else 13 enda) or ( -d) between c and c or (19) between t1.f and t1.b) or (b) not in (t1.d,t1.c,e)) then t1.b*t1.b when a>19 then t1.d else 11 end>=11 then t1.e else -e end FROM t1 WHERE NOT (not exists(select 1 from t1 where ((11<=+11))))} +} {500} +do_test randexpr-2.2316 { + db eval {SELECT (abs(+case when 11*t1.d not between 13 and t1.d then coalesce((select 17 from t1 where case ~e when (abs(+(b+(select ~ -cast(avg(t1.c) AS integer) from t1)))/abs(case 17 when 17 then c else a end)) then t1.d else t1.d end<>t1.a),f) when not exists(select 1 from t1 where (19<=(f))) and 17=t1.b or 19 not between a and a then t1.d else d end)/abs(b)) FROM t1 WHERE t1.d-13 in (t1.a,d,(t1.b))} +} {} +do_test randexpr-2.2317 { + db eval {SELECT (abs(+case when 11*t1.d not between 13 and t1.d then coalesce((select 17 from t1 where case ~e when (abs(+(b+(select ~ -cast(avg(t1.c) AS integer) from t1)))/abs(case 17 when 17 then c else a end)) then t1.d else t1.d end<>t1.a),f) when not exists(select 1 from t1 where (19<=(f))) and 17=t1.b or 19 not between a and a then t1.d else d end)/abs(b)) FROM t1 WHERE NOT (t1.d-13 in (t1.a,d,(t1.b)))} +} {0} +do_test randexpr-2.2318 { + db eval {SELECT b+(select case ~min(t1.a+13) when (cast(avg(t1.a) AS integer)) then max(d) else max(case when case t1.b when t1.e then t1.c else t1.f end+19<>+(select count(*) from t1) then case when b in (t1.e,t1.d,t1.e) and t1.c not between t1.f and 13 then (t1.c) when 17>t1.b then 19 else t1.b end-t1.d when d in (e,f,t1.f) and (a)+t1.e then e+case when (exists(select 1 from t1 where d<17) and d<(abs(t1.e)/abs(+ - -t1.f+d))) then coalesce((select max((t1.f)) from t1 where not 19=t1.e),t1.e)+t1.e else t1.d end when not exists(select 1 from t1 where t1.d not in (19,11,t1.c)) then (t1.f) else d end} +} {1800} +do_test randexpr-2.2319 { + db eval {SELECT b+(select case ~min(t1.a+13) when (cast(avg(t1.a) AS integer)) then max(d) else max(case when case t1.b when t1.e then t1.c else t1.f end+19<>+(select count(*) from t1) then case when b in (t1.e,t1.d,t1.e) and t1.c not between t1.f and 13 then (t1.c) when 17>t1.b then 19 else t1.b end-t1.d when d in (e,f,t1.f) and (a)+t1.e then e+case when (exists(select 1 from t1 where d<17) and d<(abs(t1.e)/abs(+ - -t1.f+d))) then coalesce((select max((t1.f)) from t1 where not 19=t1.e),t1.e)+t1.e else t1.d end when not exists(select 1 from t1 where t1.d not in (19,11,t1.c)) then (t1.f) else d end)} +} {} +do_test randexpr-2.2320 { + db eval {SELECT coalesce((select max(t1.d) from t1 where t1.b+b in (select (t1.c)*11*e+f+ -c-t1.b*coalesce((select max(a) from t1 where t1.a*17 between 19 and (((abs(f)/abs(t1.c+d))))),t1.c-(abs(t1.c)/abs(19)) | case when t1.d= -t1.f then b when (t1.c) not in (e, -11,19) then 19 else t1.a end-c)*t1.a-c from t1 union select t1.d from t1)),a) FROM t1 WHERE (select abs(+min(b)) from t1)<=d+t1.a} +} {400} +do_test randexpr-2.2321 { + db eval {SELECT coalesce((select max(t1.d) from t1 where t1.b+b in (select (t1.c)*11*e+f+ -c-t1.b*coalesce((select max(a) from t1 where t1.a*17 between 19 and (((abs(f)/abs(t1.c+d))))),t1.c-(abs(t1.c)/abs(19)) | case when t1.d= -t1.f then b when (t1.c) not in (e, -11,19) then 19 else t1.a end-c)*t1.a-c from t1 union select t1.d from t1)),a) FROM t1 WHERE NOT ((select abs(+min(b)) from t1)<=d+t1.a)} +} {} +do_test randexpr-2.2322 { + db eval {SELECT coalesce((select max(t1.d) from t1 where t1.b+b in (select (t1.c)*11*e+f+ -c-t1.b*coalesce((select max(a) from t1 where t1.a*17 between 19 and (((abs(f)/abs(t1.c+d))))),t1.c-(abs(t1.c)/abs(19)) & case when t1.d= -t1.f then b when (t1.c) not in (e, -11,19) then 19 else t1.a end-c)*t1.a-c from t1 union select t1.d from t1)),a) FROM t1 WHERE (select abs(+min(b)) from t1)<=d+t1.a} +} {400} +do_test randexpr-2.2323 { + db eval {SELECT case when coalesce((select c from t1 where (~13<>d)),t1.d-+d+13*coalesce((select max(17) from t1 where -fd),e))/abs(13))*f not in (t1.e,t1.c,e) then 19 when 17 not between t1.f and 17 then t1.f else t1.e end),f)+t1.c) | d<>11 then a else 19 end FROM t1 WHERE 13=b} +} {} +do_test randexpr-2.2324 { + db eval {SELECT case when coalesce((select c from t1 where (~13<>d)),t1.d-+d+13*coalesce((select max(17) from t1 where -fd),e))/abs(13))*f not in (t1.e,t1.c,e) then 19 when 17 not between t1.f and 17 then t1.f else t1.e end),f)+t1.c) | d<>11 then a else 19 end FROM t1 WHERE NOT (13=b)} +} {100} +do_test randexpr-2.2325 { + db eval {SELECT case when coalesce((select c from t1 where (~13<>d)),t1.d-+d+13*coalesce((select max(17) from t1 where -fd),e))/abs(13))*f not in (t1.e,t1.c,e) then 19 when 17 not between t1.f and 17 then t1.f else t1.e end),f)+t1.c) & d<>11 then a else 19 end FROM t1 WHERE NOT (13=b)} +} {100} +do_test randexpr-2.2326 { + db eval {SELECT case case t1.e+13 when case when not -f>coalesce((select max(t1.b) from t1 where (select min(a) from t1)t1.d and t1.c<19 and d between b and ( -c)) then (select case ~max( -( - -t1.f)) when min(e) then -count(*) else count(*) end-cast(avg(b) AS integer) from t1) when 17<>11 then f else case -17 when t1.d then b else -t1.f end end+t1.e when t1.d>=11 then 17 else 11 end then t1.d else t1.d end when t1.c then t1.a else 11 end FROM t1 WHERE (coalesce((select coalesce((select max(coalesce((select max( -(select + -case ++case (count(*)) when cast(avg(11) AS integer) then count(*) else -min(13) end when max(19) then -cast(avg(11) AS integer) else count(distinct (b)) end | max(t1.a)+max(b) from t1)) from t1 where 11 in (select b from t1 union select 13 from t1)),case (select count(*) from t1) when 19 then case when d not between -d and b then t1.f else t1.b end else t1.c end)+d-t1.e) from t1 where t1.f not between t1.c and 19),t1.b) from t1 where t1.c between d and -t1.b),t1.b))>=c} +} {} +do_test randexpr-2.2327 { + db eval {SELECT case case t1.e+13 when case when not -f>coalesce((select max(t1.b) from t1 where (select min(a) from t1)t1.d and t1.c<19 and d between b and ( -c)) then (select case ~max( -( - -t1.f)) when min(e) then -count(*) else count(*) end-cast(avg(b) AS integer) from t1) when 17<>11 then f else case -17 when t1.d then b else -t1.f end end+t1.e when t1.d>=11 then 17 else 11 end then t1.d else t1.d end when t1.c then t1.a else 11 end FROM t1 WHERE NOT ((coalesce((select coalesce((select max(coalesce((select max( -(select + -case ++case (count(*)) when cast(avg(11) AS integer) then count(*) else -min(13) end when max(19) then -cast(avg(11) AS integer) else count(distinct (b)) end | max(t1.a)+max(b) from t1)) from t1 where 11 in (select b from t1 union select 13 from t1)),case (select count(*) from t1) when 19 then case when d not between -d and b then t1.f else t1.b end else t1.c end)+d-t1.e) from t1 where t1.f not between t1.c and 19),t1.b) from t1 where t1.c between d and -t1.b),t1.b))>=c)} +} {11} +do_test randexpr-2.2328 { + db eval {SELECT case when +case when coalesce((select t1.b from t1 where not exists(select 1 from t1 where f<=coalesce((select c from t1 where t1.a in (select ~f from t1 union select coalesce((select max(case -(13) when 13 then t1.f else a end | 13) from t1 where c<= -c or b not between f and t1.d and t1.b not in (e,t1.a,17)),a)+d from t1)),13) or t1.a>c)),11*19) between e and 17 then c else -(c) end>=t1.c then t1.d else c end FROM t1 WHERE t1.d<17-t1.a | a-+11} +} {} +do_test randexpr-2.2329 { + db eval {SELECT case when +case when coalesce((select t1.b from t1 where not exists(select 1 from t1 where f<=coalesce((select c from t1 where t1.a in (select ~f from t1 union select coalesce((select max(case -(13) when 13 then t1.f else a end | 13) from t1 where c<= -c or b not between f and t1.d and t1.b not in (e,t1.a,17)),a)+d from t1)),13) or t1.a>c)),11*19) between e and 17 then c else -(c) end>=t1.c then t1.d else c end FROM t1 WHERE NOT (t1.d<17-t1.a | a-+11)} +} {300} +do_test randexpr-2.2330 { + db eval {SELECT case when +case when coalesce((select t1.b from t1 where not exists(select 1 from t1 where f<=coalesce((select c from t1 where t1.a in (select ~f from t1 union select coalesce((select max(case -(13) when 13 then t1.f else a end & 13) from t1 where c<= -c or b not between f and t1.d and t1.b not in (e,t1.a,17)),a)+d from t1)),13) or t1.a>c)),11*19) between e and 17 then c else -(c) end>=t1.c then t1.d else c end FROM t1 WHERE NOT (t1.d<17-t1.a | a-+11)} +} {300} +do_test randexpr-2.2331 { + db eval {SELECT t1.c+c- -d*t1.d+t1.e | case when t1.e<=e and (a*t1.c) in (select ((abs(f)/abs(( -+(select abs(~(min( -c))-max( - -b)-count(*)) from t1))))) from t1 union select b from t1) then +a*t1.c-t1.d*t1.c else e end-t1.d+t1.f FROM t1 WHERE (exists(select 1 from t1 where coalesce((select max(++11+coalesce((select max(17) from t1 where case case when exists(select 1 from t1 where c not between t1.e and 17) then 19 else f end when -t1.f then t1.f else t1.a end<=t1.e or 11 in (b,a,e) and e between 11 and 11), -(a)-19)*t1.f) from t1 where t1.b between d and 17 and 17=e),19) not in (a,b,b))) and (e>=d) or b not in (c,13,19)} +} {161788} +do_test randexpr-2.2332 { + db eval {SELECT t1.c+c- -d*t1.d+t1.e | case when t1.e<=e and (a*t1.c) in (select ((abs(f)/abs(( -+(select abs(~(min( -c))-max( - -b)-count(*)) from t1))))) from t1 union select b from t1) then +a*t1.c-t1.d*t1.c else e end-t1.d+t1.f FROM t1 WHERE NOT ((exists(select 1 from t1 where coalesce((select max(++11+coalesce((select max(17) from t1 where case case when exists(select 1 from t1 where c not between t1.e and 17) then 19 else f end when -t1.f then t1.f else t1.a end<=t1.e or 11 in (b,a,e) and e between 11 and 11), -(a)-19)*t1.f) from t1 where t1.b between d and 17 and 17=e),19) not in (a,b,b))) and (e>=d) or b not in (c,13,19))} +} {} +do_test randexpr-2.2333 { + db eval {SELECT t1.c+c- -d*t1.d+t1.e & case when t1.e<=e and (a*t1.c) in (select ((abs(f)/abs(( -+(select abs(~(min( -c))-max( - -b)-count(*)) from t1))))) from t1 union select b from t1) then +a*t1.c-t1.d*t1.c else e end-t1.d+t1.f FROM t1 WHERE (exists(select 1 from t1 where coalesce((select max(++11+coalesce((select max(17) from t1 where case case when exists(select 1 from t1 where c not between t1.e and 17) then 19 else f end when -t1.f then t1.f else t1.a end<=t1.e or 11 in (b,a,e) and e between 11 and 11), -(a)-19)*t1.f) from t1 where t1.b between d and 17 and 17=e),19) not in (a,b,b))) and (e>=d) or b not in (c,13,19)} +} {12} +do_test randexpr-2.2334 { + db eval {SELECT b-case when exists(select 1 from t1 where (exists(select 1 from t1 where t1.a not between 17 and a+t1.f) or not exists(select 1 from t1 where not exists(select 1 from t1 where t1.b+f+a in (select max(19) | -max(t1.e)-cast(avg(t1.c) AS integer)- -count(distinct t1.b) from t1 union select max(t1.b) from t1)) or not exists(select 1 from t1 where not exists(select 1 from t1 where 11>c or (13) in ( -11,c,19)) or 11>f))) and t1.c not in ( -(d),t1.a,t1.a) and 17 in (t1.b,13,19)) then coalesce((select max(+19) from t1 where 17e)),b) from t1))) or ((t1.dc or (13) in ( -11,c,19)) or 11>f))) and t1.c not in ( -(d),t1.a,t1.a) and 17 in (t1.b,13,19)) then coalesce((select max(+19) from t1 where 17e)),b) from t1))) or ((t1.dc or (13) in ( -11,c,19)) or 11>f))) and t1.c not in ( -(d),t1.a,t1.a) and 17 in (t1.b,13,19)) then coalesce((select max(+19) from t1 where 17e)),b) from t1))) or ((t1.d= -t1.f),t1.c))),b) end from t1 where not a=11),t1.c) FROM t1 WHERE not 13<>11+17-case when 17 between (abs(case f when 19 | t1.a then +(select cast(avg(case when not exists(select 1 from t1 where ~ -13*(select -cast(avg(t1.b*d) AS integer) from t1)+17<>(t1.b)) then ~t1.c when b>t1.d then c else (f) end) AS integer) from t1) else a end)/abs(19)) and t1.f then t1.b when a<>t1.c then t1.e else b end} +} {} +do_test randexpr-2.2338 { + db eval {SELECT coalesce((select t1.f*t1.b+case t1.a when 17 then -t1.c else coalesce((select b from t1 where 19 in (f,d+b,coalesce((select t1.e-b from t1 where t1.b*d+case t1.c when case coalesce((select t1.b from t1 where c not in (b,a,13)),(13)) when t1.b then 19 else 19 end then t1.d else 19 end+ -b>= -t1.f),t1.c))),b) end from t1 where not a=11),t1.c) FROM t1 WHERE NOT (not 13<>11+17-case when 17 between (abs(case f when 19 | t1.a then +(select cast(avg(case when not exists(select 1 from t1 where ~ -13*(select -cast(avg(t1.b*d) AS integer) from t1)+17<>(t1.b)) then ~t1.c when b>t1.d then c else (f) end) AS integer) from t1) else a end)/abs(19)) and t1.f then t1.b when a<>t1.c then t1.e else b end)} +} {120200} +do_test randexpr-2.2339 { + db eval {SELECT coalesce((select max(19) from t1 where t1.f*17+case t1.f when 11 then coalesce((select max(t1.a) from t1 where 13<>t1.d),(abs(13+b-case when not c+(abs(case when t1.c not between a and t1.f then -(e) else t1.d end)/abs( -17))=13} +} {119} +do_test randexpr-2.2340 { + db eval {SELECT coalesce((select max(19) from t1 where t1.f*17+case t1.f when 11 then coalesce((select max(t1.a) from t1 where 13<>t1.d),(abs(13+b-case when not c+(abs(case when t1.c not between a and t1.f then -(e) else t1.d end)/abs( -17))=13)} +} {} +do_test randexpr-2.2341 { + db eval {SELECT +coalesce((select max(e) from t1 where -(case 13*(t1.b* -case when e<=19 then b when t1.e=c then (b) else f end*t1.e) when e then 13 else t1.f end)=t1.b and t1.a in (select count(*) | case case min(b)*(count(*)) when abs(cast(avg(13) AS integer))-max(t1.d) then min(19) else -cast(avg(b) AS integer) end when count(*) then (max(c)) else count(distinct 19) end+cast(avg((b)) AS integer) from t1 union select count(*) from t1)),t1.a) FROM t1 WHERE (select (min(13-(abs(c)/abs(case when coalesce((select case when coalesce((select -a from t1 where case a*f when t1.a then -c else (f) end between t1.a and t1.c),b) not between t1.e and b then 19 when 17 not in (19,d,11) then b else a end from t1 where b in (select t1.e from t1 union select t1.b from t1)),t1.a) not in (t1.e,t1.a,t1.f) then (d) else 19 end)))) from t1) in (select +13 from t1 union select 17 from t1)} +} {100} +do_test randexpr-2.2342 { + db eval {SELECT +coalesce((select max(e) from t1 where -(case 13*(t1.b* -case when e<=19 then b when t1.e=c then (b) else f end*t1.e) when e then 13 else t1.f end)=t1.b and t1.a in (select count(*) | case case min(b)*(count(*)) when abs(cast(avg(13) AS integer))-max(t1.d) then min(19) else -cast(avg(b) AS integer) end when count(*) then (max(c)) else count(distinct 19) end+cast(avg((b)) AS integer) from t1 union select count(*) from t1)),t1.a) FROM t1 WHERE NOT ((select (min(13-(abs(c)/abs(case when coalesce((select case when coalesce((select -a from t1 where case a*f when t1.a then -c else (f) end between t1.a and t1.c),b) not between t1.e and b then 19 when 17 not in (19,d,11) then b else a end from t1 where b in (select t1.e from t1 union select t1.b from t1)),t1.a) not in (t1.e,t1.a,t1.f) then (d) else 19 end)))) from t1) in (select +13 from t1 union select 17 from t1))} +} {} +do_test randexpr-2.2343 { + db eval {SELECT +coalesce((select max(e) from t1 where -(case 13*(t1.b* -case when e<=19 then b when t1.e=c then (b) else f end*t1.e) when e then 13 else t1.f end)=t1.b and t1.a in (select count(*) & case case min(b)*(count(*)) when abs(cast(avg(13) AS integer))-max(t1.d) then min(19) else -cast(avg(b) AS integer) end when count(*) then (max(c)) else count(distinct 19) end+cast(avg((b)) AS integer) from t1 union select count(*) from t1)),t1.a) FROM t1 WHERE (select (min(13-(abs(c)/abs(case when coalesce((select case when coalesce((select -a from t1 where case a*f when t1.a then -c else (f) end between t1.a and t1.c),b) not between t1.e and b then 19 when 17 not in (19,d,11) then b else a end from t1 where b in (select t1.e from t1 union select t1.b from t1)),t1.a) not in (t1.e,t1.a,t1.f) then (d) else 19 end)))) from t1) in (select +13 from t1 union select 17 from t1)} +} {100} +do_test randexpr-2.2344 { + db eval {SELECT case when b in (select 19 from t1 union select c from t1) then -d else 19-13+coalesce((select t1.c from t1 where t1.d>=case when e in (select f*t1.b from t1 union select t1.d from t1) then t1.c when not exists(select 1 from t1 where t1.f not in (17,t1.c,11) and b between b and 17 or c in (t1.b,17,a) or t1.d>t1.d and t1.c<>11 and 13=e and t1.f not in ( -t1.c,19,f)) then coalesce((select max(case when t1.c in (t1.e,d,b) then t1.f else t1.a end) from t1 where e<=t1.f),11) else t1.b end*11),t1.e) end FROM t1 WHERE 13 not between +19 and a} +} {506} +do_test randexpr-2.2345 { + db eval {SELECT case when b in (select 19 from t1 union select c from t1) then -d else 19-13+coalesce((select t1.c from t1 where t1.d>=case when e in (select f*t1.b from t1 union select t1.d from t1) then t1.c when not exists(select 1 from t1 where t1.f not in (17,t1.c,11) and b between b and 17 or c in (t1.b,17,a) or t1.d>t1.d and t1.c<>11 and 13=e and t1.f not in ( -t1.c,19,f)) then coalesce((select max(case when t1.c in (t1.e,d,b) then t1.f else t1.a end) from t1 where e<=t1.f),11) else t1.b end*11),t1.e) end FROM t1 WHERE NOT (13 not between +19 and a)} +} {} +do_test randexpr-2.2346 { + db eval {SELECT (abs(13)/abs(case when t1.f not between a and f then 19-t1.d-t1.d*d | c+(select case count(*) | (~ -count(*)) when cast(avg( -19) AS integer) then min(d) else (cast(avg(e) AS integer)) end from t1) | t1.f-t1.b when f<>case when f<>b and 19 in (d,( -t1.a),t1.c) then 11 when e not between b and (t1.d) then t1.e else t1.d end-t1.e or t1.b not in (f,c,(a)) then -d else t1.f end-t1.f)) FROM t1 WHERE (select -count(*) from t1)=d+11} +} {} +do_test randexpr-2.2347 { + db eval {SELECT (abs(13)/abs(case when t1.f not between a and f then 19-t1.d-t1.d*d | c+(select case count(*) | (~ -count(*)) when cast(avg( -19) AS integer) then min(d) else (cast(avg(e) AS integer)) end from t1) | t1.f-t1.b when f<>case when f<>b and 19 in (d,( -t1.a),t1.c) then 11 when e not between b and (t1.d) then t1.e else t1.d end-t1.e or t1.b not in (f,c,(a)) then -d else t1.f end-t1.f)) FROM t1 WHERE NOT ((select -count(*) from t1)=d+11)} +} {0} +do_test randexpr-2.2348 { + db eval {SELECT (abs(13)/abs(case when t1.f not between a and f then 19-t1.d-t1.d*d & c+(select case count(*) & (~ -count(*)) when cast(avg( -19) AS integer) then min(d) else (cast(avg(e) AS integer)) end from t1) & t1.f-t1.b when f<>case when f<>b and 19 in (d,( -t1.a),t1.c) then 11 when e not between b and (t1.d) then t1.e else t1.d end-t1.e or t1.b not in (f,c,(a)) then -d else t1.f end-t1.f)) FROM t1 WHERE NOT ((select -count(*) from t1)=d+11)} +} {0} +do_test randexpr-2.2349 { + db eval {SELECT ((select count(distinct 11+19 | case (d) when t1.b then coalesce((select case coalesce((select max(+c*t1.f) from t1 where (t1.e=f and 13<=t1.d)), -17)+11-t1.a when (t1.c) then 19 else 13 end from t1 where t1.c in (select ~+ -count(distinct t1.f) from t1 union select count(*)+count(distinct b)*min(11) from t1)),e) else 19 end+d) | count(*) from t1)) FROM t1 WHERE case b when 19 then ~b-~e-d+d+t1.c else a end in (select max(t1.b)-~count(*)+abs(abs((case min((abs(~t1.c*t1.f)/abs(t1.e))) when case max(b) when abs(count(distinct t1.b)) then +cast(avg((13)) AS integer) else +case min(b) when count(*) then count(*) else count(distinct t1.f) end end then max(19) else ( -count(*)) end)))*count(distinct f)*cast(avg(t1.c) AS integer) from t1 union select count(distinct a) from t1)} +} {} +do_test randexpr-2.2350 { + db eval {SELECT ((select count(distinct 11+19 | case (d) when t1.b then coalesce((select case coalesce((select max(+c*t1.f) from t1 where (t1.e=f and 13<=t1.d)), -17)+11-t1.a when (t1.c) then 19 else 13 end from t1 where t1.c in (select ~+ -count(distinct t1.f) from t1 union select count(*)+count(distinct b)*min(11) from t1)),e) else 19 end+d) | count(*) from t1)) FROM t1 WHERE NOT (case b when 19 then ~b-~e-d+d+t1.c else a end in (select max(t1.b)-~count(*)+abs(abs((case min((abs(~t1.c*t1.f)/abs(t1.e))) when case max(b) when abs(count(distinct t1.b)) then +cast(avg((13)) AS integer) else +case min(b) when count(*) then count(*) else count(distinct t1.f) end end then max(19) else ( -count(*)) end)))*count(distinct f)*cast(avg(t1.c) AS integer) from t1 union select count(distinct a) from t1))} +} {1} +do_test randexpr-2.2351 { + db eval {SELECT ((select count(distinct 11+19 & case (d) when t1.b then coalesce((select case coalesce((select max(+c*t1.f) from t1 where (t1.e=f and 13<=t1.d)), -17)+11-t1.a when (t1.c) then 19 else 13 end from t1 where t1.c in (select ~+ -count(distinct t1.f) from t1 union select count(*)+count(distinct b)*min(11) from t1)),e) else 19 end+d) & count(*) from t1)) FROM t1 WHERE NOT (case b when 19 then ~b-~e-d+d+t1.c else a end in (select max(t1.b)-~count(*)+abs(abs((case min((abs(~t1.c*t1.f)/abs(t1.e))) when case max(b) when abs(count(distinct t1.b)) then +cast(avg((13)) AS integer) else +case min(b) when count(*) then count(*) else count(distinct t1.f) end end then max(19) else ( -count(*)) end)))*count(distinct f)*cast(avg(t1.c) AS integer) from t1 union select count(distinct a) from t1))} +} {1} +do_test randexpr-2.2352 { + db eval {SELECT 17+ -(abs(case case when a>t1.c then t1.e else a end*11-case when not exists(select 1 from t1 where coalesce((select -t1.e-11+coalesce((select t1.e from t1 where 13=t1.e),d) from t1 where t1.e between t1.d and a),13)-t1.e in (select t1.d from t1 union select d from t1)) then t1.b else t1.d end-t1.e-13+b when 19 then t1.b else c end)/abs(t1.d)) FROM t1 WHERE coalesce((select max(t1.d) from t1 where t1.a+t1.d<19),t1.a) in (select t1.b from t1 union select c from t1) or t1.c in (select -abs(max(case when -f<>t1.b and b<>11 then t1.f when t1.a=e then t1.d else -(f) end) | case cast(avg(f) AS integer) when count(distinct t1.f) then min(e) else -count(*) end+cast(avg((t1.f)) AS integer)) from t1 union select max(17) from t1) or t1.c between ((c)) and -t1.b and -c not between d and 11 or b>=19} +} {17} +do_test randexpr-2.2353 { + db eval {SELECT 17+ -(abs(case case when a>t1.c then t1.e else a end*11-case when not exists(select 1 from t1 where coalesce((select -t1.e-11+coalesce((select t1.e from t1 where 13=t1.e),d) from t1 where t1.e between t1.d and a),13)-t1.e in (select t1.d from t1 union select d from t1)) then t1.b else t1.d end-t1.e-13+b when 19 then t1.b else c end)/abs(t1.d)) FROM t1 WHERE NOT (coalesce((select max(t1.d) from t1 where t1.a+t1.d<19),t1.a) in (select t1.b from t1 union select c from t1) or t1.c in (select -abs(max(case when -f<>t1.b and b<>11 then t1.f when t1.a=e then t1.d else -(f) end) | case cast(avg(f) AS integer) when count(distinct t1.f) then min(e) else -count(*) end+cast(avg((t1.f)) AS integer)) from t1 union select max(17) from t1) or t1.c between ((c)) and -t1.b and -c not between d and 11 or b>=19)} +} {} +do_test randexpr-2.2354 { + db eval {SELECT e*~+(e)-(abs(~case when (t1.f)>13+~t1.d then case when d=t1.b+13 then 17 when 11 in (t1.d,e,e) then ~19*t1.a else coalesce((select t1.c from t1 where 19*(abs(t1.e)/abs(b)) between t1.c and t1.c),t1.d) end when a not between 13 and -17 then 19 else b end)/abs(t1.b)) FROM t1 WHERE (not d>case t1.b+t1.f when +(~(select (case abs(cast(avg(13) AS integer)) when cast(avg(+a) AS integer) then abs(abs(count(distinct case when t1.f*e<=11 then 13 when exists(select 1 from t1 where t1.e<17 or t1.a>=t1.a) then (11) else f end))) else min(t1.b)-min(11) end) from t1))+17*t1.b*f*17 then e else 11 end)} +} {} +do_test randexpr-2.2355 { + db eval {SELECT e*~+(e)-(abs(~case when (t1.f)>13+~t1.d then case when d=t1.b+13 then 17 when 11 in (t1.d,e,e) then ~19*t1.a else coalesce((select t1.c from t1 where 19*(abs(t1.e)/abs(b)) between t1.c and t1.c),t1.d) end when a not between 13 and -17 then 19 else b end)/abs(t1.b)) FROM t1 WHERE NOT ((not d>case t1.b+t1.f when +(~(select (case abs(cast(avg(13) AS integer)) when cast(avg(+a) AS integer) then abs(abs(count(distinct case when t1.f*e<=11 then 13 when exists(select 1 from t1 where t1.e<17 or t1.a>=t1.a) then (11) else f end))) else min(t1.b)-min(11) end) from t1))+17*t1.b*f*17 then e else 11 end))} +} {-250502} +do_test randexpr-2.2356 { + db eval {SELECT -case when t1.c<= -case coalesce((select max((coalesce((select case 11 when t1.b then t1.f else 11 end-d from t1 where c in (select count(*) | ~count(distinct f) from t1 union select count(*) from t1)),a)*t1.a)-t1.f) from t1 where not exists(select 1 from t1 where ((t1.f not in ( -c,t1.d,b)) or t1.a not in (b,11,t1.c)) or t1.d not in (17,a,t1.a))),19)-t1.b when d then t1.e else t1.e end-19 | e then b else -13 end*e-c FROM t1 WHERE b<>coalesce((select max((abs((abs(11)/abs(13)))/abs(case when t1.e between 11 and 19 then t1.d when 11>=b-19 then (select count(distinct ~a | 19*f+case when f not in (t1.e,t1.f, -t1.f) then 17 when c not in ( -t1.a,t1.c,b) then f else 11 end) from t1) else t1.e end))-t1.e*a) from t1 where e in (t1.c,17,17)),t1.a) or a=t1.e or d>t1.b} +} {6200} +do_test randexpr-2.2357 { + db eval {SELECT -case when t1.c<= -case coalesce((select max((coalesce((select case 11 when t1.b then t1.f else 11 end-d from t1 where c in (select count(*) | ~count(distinct f) from t1 union select count(*) from t1)),a)*t1.a)-t1.f) from t1 where not exists(select 1 from t1 where ((t1.f not in ( -c,t1.d,b)) or t1.a not in (b,11,t1.c)) or t1.d not in (17,a,t1.a))),19)-t1.b when d then t1.e else t1.e end-19 | e then b else -13 end*e-c FROM t1 WHERE NOT (b<>coalesce((select max((abs((abs(11)/abs(13)))/abs(case when t1.e between 11 and 19 then t1.d when 11>=b-19 then (select count(distinct ~a | 19*f+case when f not in (t1.e,t1.f, -t1.f) then 17 when c not in ( -t1.a,t1.c,b) then f else 11 end) from t1) else t1.e end))-t1.e*a) from t1 where e in (t1.c,17,17)),t1.a) or a=t1.e or d>t1.b)} +} {} +do_test randexpr-2.2358 { + db eval {SELECT -case when t1.c<= -case coalesce((select max((coalesce((select case 11 when t1.b then t1.f else 11 end-d from t1 where c in (select count(*) & ~count(distinct f) from t1 union select count(*) from t1)),a)*t1.a)-t1.f) from t1 where not exists(select 1 from t1 where ((t1.f not in ( -c,t1.d,b)) or t1.a not in (b,11,t1.c)) or t1.d not in (17,a,t1.a))),19)-t1.b when d then t1.e else t1.e end-19 & e then b else -13 end*e-c FROM t1 WHERE b<>coalesce((select max((abs((abs(11)/abs(13)))/abs(case when t1.e between 11 and 19 then t1.d when 11>=b-19 then (select count(distinct ~a | 19*f+case when f not in (t1.e,t1.f, -t1.f) then 17 when c not in ( -t1.a,t1.c,b) then f else 11 end) from t1) else t1.e end))-t1.e*a) from t1 where e in (t1.c,17,17)),t1.a) or a=t1.e or d>t1.b} +} {-100300} +do_test randexpr-2.2359 { + db eval {SELECT (abs(case when t1.d not in (t1.f,t1.d,(select abs(min(((select +count(*)*case max(t1.e-coalesce((select max(case t1.b when 13 then a else e end) from t1 where t1.b>b),19)+d)- -+count(*) when +cast(avg(d) AS integer) then max(c) else (cast(avg(c) AS integer)) end | count(*) from t1)+t1.f)))-max(t1.c) from t1)) then t1.b*t1.f else f end)/abs(b+19)) FROM t1 WHERE a not in ((11),(t1.d),a) or case when f(abs(d)/abs(t1.d)))) then a when f not between t1.f | t1.b and t1.d then a else 13 end from t1 where 19<=c),f) then t1.a else f end between 13 and 11} +} {} +do_test randexpr-2.2360 { + db eval {SELECT (abs(case when t1.d not in (t1.f,t1.d,(select abs(min(((select +count(*)*case max(t1.e-coalesce((select max(case t1.b when 13 then a else e end) from t1 where t1.b>b),19)+d)- -+count(*) when +cast(avg(d) AS integer) then max(c) else (cast(avg(c) AS integer)) end | count(*) from t1)+t1.f)))-max(t1.c) from t1)) then t1.b*t1.f else f end)/abs(b+19)) FROM t1 WHERE NOT (a not in ((11),(t1.d),a) or case when f(abs(d)/abs(t1.d)))) then a when f not between t1.f | t1.b and t1.d then a else 13 end from t1 where 19<=c),f) then t1.a else f end between 13 and 11)} +} {2} +do_test randexpr-2.2361 { + db eval {SELECT (abs(case when t1.d not in (t1.f,t1.d,(select abs(min(((select +count(*)*case max(t1.e-coalesce((select max(case t1.b when 13 then a else e end) from t1 where t1.b>b),19)+d)- -+count(*) when +cast(avg(d) AS integer) then max(c) else (cast(avg(c) AS integer)) end & count(*) from t1)+t1.f)))-max(t1.c) from t1)) then t1.b*t1.f else f end)/abs(b+19)) FROM t1 WHERE NOT (a not in ((11),(t1.d),a) or case when f(abs(d)/abs(t1.d)))) then a when f not between t1.f | t1.b and t1.d then a else 13 end from t1 where 19<=c),f) then t1.a else f end between 13 and 11)} +} {2} +do_test randexpr-2.2362 { + db eval {SELECT t1.a+d | +case when +t1.a-t1.f++t1.c+(select case ~count(*) when (+cast(avg(f*t1.f) AS integer) | min(t1.b)) then -(cast(avg(t1.f) AS integer)) else min(e) end from t1)-b* - -13-f+c in (select b from t1 union select t1.d from t1) or et1.a),t1.f) between t1.a and 19 then ( -b) else t1.a end) from t1 where t1.c in (a,(f),e)),t1.c))/abs(a)))) from t1)) from t1 where case when exists(select 1 from t1 where e<11) and 17 in (select 11 from t1 union select a from t1) then d else (d) end in (select cast(avg(f) AS integer) from t1 union select count(*) from t1)),t1.d) FROM t1 WHERE exists(select 1 from t1 where b not between case 11 when d then (select + -(abs(abs(case -min(d)+ - -count(*)-max(c) when count(*) then max(t1.b) else count(*) end))) from t1)+t1.f-f*t1.b*((select min(a) from t1))-b+f else t1.e end | t1.c+13 and 19) and bt1.e)} +} {400} +do_test randexpr-2.2366 { + db eval {SELECT coalesce((select max((select (max((abs(coalesce((select max(+case when ~t1.d=t1.a then a when coalesce((select case when t1.c in (select 19 from t1 union select (d) from t1) then e else f end from t1 where t1.a<>t1.a),t1.f) between t1.a and 19 then ( -b) else t1.a end) from t1 where t1.c in (a,(f),e)),t1.c))/abs(a)))) from t1)) from t1 where case when exists(select 1 from t1 where e<11) and 17 in (select 11 from t1 union select a from t1) then d else (d) end in (select cast(avg(f) AS integer) from t1 union select count(*) from t1)),t1.d) FROM t1 WHERE NOT (exists(select 1 from t1 where b not between case 11 when d then (select + -(abs(abs(case -min(d)+ - -count(*)-max(c) when count(*) then max(t1.b) else count(*) end))) from t1)+t1.f-f*t1.b*((select min(a) from t1))-b+f else t1.e end | t1.c+13 and 19) and bt1.e))} +} {} +do_test randexpr-2.2367 { + db eval {SELECT case when t1.d+b+c | case +a-t1.d-f*(select cast(avg(17) AS integer)+count(distinct e)-count(distinct t1.a) | - -cast(avg(t1.b) AS integer) from t1)+~t1.e-a when d then t1.f else d end+t1.e*b<>11 then f when t1.a=b or t1.b>(t1.d) then 19 else t1.f end+19 FROM t1 WHERE coalesce((select case t1.e-case coalesce((select (select min(t1.d) from t1) from t1 where coalesce((select max(coalesce((select max(a) from t1 where t1.d in (select cast(avg(t1.b) AS integer) from t1 union select count(distinct a) from t1)), -19)*t1.e) from t1 where d>11),t1.f)>17),t1.f) when (17) then e else d end when b then (19) else 19 end from t1 where not t1.f in (select ( -max(13)) from t1 union select max(c) from t1)),11)-d not in ( -19,13,d)} +} {619} +do_test randexpr-2.2368 { + db eval {SELECT case when t1.d+b+c | case +a-t1.d-f*(select cast(avg(17) AS integer)+count(distinct e)-count(distinct t1.a) | - -cast(avg(t1.b) AS integer) from t1)+~t1.e-a when d then t1.f else d end+t1.e*b<>11 then f when t1.a=b or t1.b>(t1.d) then 19 else t1.f end+19 FROM t1 WHERE NOT (coalesce((select case t1.e-case coalesce((select (select min(t1.d) from t1) from t1 where coalesce((select max(coalesce((select max(a) from t1 where t1.d in (select cast(avg(t1.b) AS integer) from t1 union select count(distinct a) from t1)), -19)*t1.e) from t1 where d>11),t1.f)>17),t1.f) when (17) then e else d end when b then (19) else 19 end from t1 where not t1.f in (select ( -max(13)) from t1 union select max(c) from t1)),11)-d not in ( -19,13,d))} +} {} +do_test randexpr-2.2369 { + db eval {SELECT case when t1.d+b+c & case +a-t1.d-f*(select cast(avg(17) AS integer)+count(distinct e)-count(distinct t1.a) & - -cast(avg(t1.b) AS integer) from t1)+~t1.e-a when d then t1.f else d end+t1.e*b<>11 then f when t1.a=b or t1.b>(t1.d) then 19 else t1.f end+19 FROM t1 WHERE coalesce((select case t1.e-case coalesce((select (select min(t1.d) from t1) from t1 where coalesce((select max(coalesce((select max(a) from t1 where t1.d in (select cast(avg(t1.b) AS integer) from t1 union select count(distinct a) from t1)), -19)*t1.e) from t1 where d>11),t1.f)>17),t1.f) when (17) then e else d end when b then (19) else 19 end from t1 where not t1.f in (select ( -max(13)) from t1 union select max(c) from t1)),11)-d not in ( -19,13,d)} +} {619} +do_test randexpr-2.2370 { + db eval {SELECT case when (t1.a not in (coalesce((select 17 from t1 where case when t1.b*(abs( -t1.b*f*coalesce((select 17 from t1 where not t1.dt1.d then 13 when c>b then c else 11 end when e then a else t1.e end>c) or 19 in (select ++min(f) from t1 union select -count(distinct t1.a) from t1) then (case when t1.f in (select t1.f from t1 union select -t1.a from t1) then c when b not between t1.a and b then -t1.c else t1.a end) else t1.c end not in (f,a,17) then 11 else 11 end FROM t1 WHERE not c not between (select abs(max(~(19*e-t1.a*c*t1.d))*~abs((abs(min(a)-min(c) | cast(avg(19) AS integer))) | count(distinct 19)) | (cast(avg(d) AS integer))) from t1) and (abs(case coalesce((select max(19) from t1 where b not between a and f or 19 in (t1.c,t1.c,b)),t1.e) when c then f else 11 end)/abs(c))} +} {} +do_test randexpr-2.2373 { + db eval {SELECT case when case when (case (t1.a)*case when ct1.d then 13 when c>b then c else 11 end when e then a else t1.e end>c) or 19 in (select ++min(f) from t1 union select -count(distinct t1.a) from t1) then (case when t1.f in (select t1.f from t1 union select -t1.a from t1) then c when b not between t1.a and b then -t1.c else t1.a end) else t1.c end not in (f,a,17) then 11 else 11 end FROM t1 WHERE NOT (not c not between (select abs(max(~(19*e-t1.a*c*t1.d))*~abs((abs(min(a)-min(c) | cast(avg(19) AS integer))) | count(distinct 19)) | (cast(avg(d) AS integer))) from t1) and (abs(case coalesce((select max(19) from t1 where b not between a and f or 19 in (t1.c,t1.c,b)),t1.e) when c then f else 11 end)/abs(c)))} +} {11} +do_test randexpr-2.2374 { + db eval {SELECT (select count(*)-+ -(max(~coalesce((select ~b from t1 where not t1.c | 19 not between c+t1.c and d),a)*(abs(t1.a)/abs(d*(case t1.a*case when 17>e then 11 else 13 end when 19 then e else t1.d end)))+17))-cast(avg(c) AS integer) | count(distinct f) from t1)+(select abs(min(11)) from t1) FROM t1 WHERE coalesce((select max(11) from t1 where case when b<=13-~case when ~~(select count(*)+count(*)* -count(*) from t1)>17*17 then t1.f+case when not case when 19 in (t1.e,a,19) then 17 when f<=19 then 17 else -d end=19 then f when a>=19 then d else f end else t1.c end | f then 19 else b end*t1.d between e and b),f)e then 11 else 13 end when 19 then e else t1.d end)))+17))-cast(avg(c) AS integer) | count(distinct f) from t1)+(select abs(min(11)) from t1) FROM t1 WHERE NOT (coalesce((select max(11) from t1 where case when b<=13-~case when ~~(select count(*)+count(*)* -count(*) from t1)>17*17 then t1.f+case when not case when 19 in (t1.e,a,19) then 17 when f<=19 then 17 else -d end=19 then f when a>=19 then d else f end else t1.c end | f then 19 else b end*t1.d between e and b),f)e then 11 else 13 end when 19 then e else t1.d end)))+17))-cast(avg(c) AS integer) & count(distinct f) from t1)+(select abs(min(11)) from t1) FROM t1 WHERE NOT (coalesce((select max(11) from t1 where case when b<=13-~case when ~~(select count(*)+count(*)* -count(*) from t1)>17*17 then t1.f+case when not case when 19 in (t1.e,a,19) then 17 when f<=19 then 17 else -d end=19 then f when a>=19 then d else f end else t1.c end | f then 19 else b end*t1.d between e and b),f)19 then 11 else 17 end-t1.d FROM t1 WHERE (select min(a) from t1)19 then 11 else 17 end-t1.d FROM t1 WHERE NOT ((select min(a) from t1)19 then 11 else 17 end-t1.d FROM t1 WHERE (select min(a) from t1)c then t1.a else (d) end),d)<= -t1.c) then t1.e*t1.c else 13 end end FROM t1 WHERE exists(select 1 from t1 where ((b not between f and t1.a)))} +} {15004100} +do_test randexpr-2.2381 { + db eval {SELECT t1.d*11-case 11 when a then 11 else - -t1.b-(t1.b)+c+t1.a* -case when not exists(select 1 from t1 where coalesce((select t1.c from t1 where t1.c=case when 13 in (coalesce((select max(coalesce((select max(t1.c) from t1 where c between t1.d and t1.f and t1.a<=b),e)) from t1 where (11 not in (b,19,c))),b),e, -b) then 17 when t1.a in (b,t1.d,b) and t1.f>c then t1.a else (d) end),d)<= -t1.c) then t1.e*t1.c else 13 end end FROM t1 WHERE NOT (exists(select 1 from t1 where ((b not between f and t1.a))))} +} {} +do_test randexpr-2.2382 { + db eval {SELECT t1.c*case when not b>(+coalesce((select t1.e from t1 where t1.b>(abs(case when t1.d<=c+ -17*t1.f*19*17 then 11+a-13-t1.d else case (select abs(count(distinct 19))+ -count(distinct 17) from t1) when 17*t1.d then a else t1.a end end)/abs(17))),(11)))+17 then (t1.c)*f else 17 end FROM t1 WHERE (t1.b)-b<=t1.a*13} +} {54000000} +do_test randexpr-2.2383 { + db eval {SELECT t1.c*case when not b>(+coalesce((select t1.e from t1 where t1.b>(abs(case when t1.d<=c+ -17*t1.f*19*17 then 11+a-13-t1.d else case (select abs(count(distinct 19))+ -count(distinct 17) from t1) when 17*t1.d then a else t1.a end end)/abs(17))),(11)))+17 then (t1.c)*f else 17 end FROM t1 WHERE NOT ((t1.b)-b<=t1.a*13)} +} {} +do_test randexpr-2.2384 { + db eval {SELECT t1.b+coalesce((select max(~e- -t1.d+case +b+f when a-case when t1.a-d+t1.c between t1.e and a*e then (abs(e)/abs(t1.a)) else f | -f*t1.b-t1.c end*t1.e+f then 19 else t1.b end*19) from t1 where 19 in (select e from t1 union select 17 from t1)),19) FROM t1 WHERE d in (11,t1.c+c*t1.d+case when t1.c-case when ~(19+13)*c=11 and d<>17 or t1.e>=c then case t1.a when c then f else 11 end+t1.c when t1.f in (select max(13) from t1 union select cast(avg(c) AS integer) from t1) or t1.ct1.b then t1.e else 19 end,a)} +} {} +do_test randexpr-2.2385 { + db eval {SELECT t1.b+coalesce((select max(~e- -t1.d+case +b+f when a-case when t1.a-d+t1.c between t1.e and a*e then (abs(e)/abs(t1.a)) else f | -f*t1.b-t1.c end*t1.e+f then 19 else t1.b end*19) from t1 where 19 in (select e from t1 union select 17 from t1)),19) FROM t1 WHERE NOT (d in (11,t1.c+c*t1.d+case when t1.c-case when ~(19+13)*c=11 and d<>17 or t1.e>=c then case t1.a when c then f else 11 end+t1.c when t1.f in (select max(13) from t1 union select cast(avg(c) AS integer) from t1) or t1.ct1.b then t1.e else 19 end,a))} +} {219} +do_test randexpr-2.2386 { + db eval {SELECT t1.b+coalesce((select max(~e- -t1.d+case +b+f when a-case when t1.a-d+t1.c between t1.e and a*e then (abs(e)/abs(t1.a)) else f & -f*t1.b-t1.c end*t1.e+f then 19 else t1.b end*19) from t1 where 19 in (select e from t1 union select 17 from t1)),19) FROM t1 WHERE NOT (d in (11,t1.c+c*t1.d+case when t1.c-case when ~(19+13)*c=11 and d<>17 or t1.e>=c then case t1.a when c then f else 11 end+t1.c when t1.f in (select max(13) from t1 union select cast(avg(c) AS integer) from t1) or t1.ct1.b then t1.e else 19 end,a))} +} {219} +do_test randexpr-2.2387 { + db eval {SELECT (abs((select (case (abs(max( -t1.a))* -case count(*) when case min(c) when max(d) then count(*) else max(17) end-cast(avg(17) AS integer) then max(11) else cast(avg(t1.d) AS integer) end | max(13)+max(f)) when cast(avg(f) AS integer) then cast(avg(19) AS integer) else (max(11)) end) from t1) | coalesce((select (select max( -t1.d) from t1) from t1 where t1.b-~e in (select d from t1 union select e from t1) and t1.e=t1.d), -t1.e)-17)/abs(t1.e)) | -t1.e FROM t1 WHERE exists(select 1 from t1 where coalesce((select +19 from t1 where (coalesce((select max(e) from t1 where ((coalesce((select max(19) from t1 where coalesce((select t1.d*19+(t1.c) from t1 where e in (select -11 from t1 union select t1.f from t1) and (t1.d) in (select ~max(t1.e) from t1 union select (( -count(*))) from t1) or 19>=(a)),f) in ((e),e,t1.a)),t1.a)<=t1.c))),t1.f+t1.c)>13)),11) in (11,17,t1.d) and e>=t1.b)} +} {} +do_test randexpr-2.2388 { + db eval {SELECT (abs((select (case (abs(max( -t1.a))* -case count(*) when case min(c) when max(d) then count(*) else max(17) end-cast(avg(17) AS integer) then max(11) else cast(avg(t1.d) AS integer) end | max(13)+max(f)) when cast(avg(f) AS integer) then cast(avg(19) AS integer) else (max(11)) end) from t1) | coalesce((select (select max( -t1.d) from t1) from t1 where t1.b-~e in (select d from t1 union select e from t1) and t1.e=t1.d), -t1.e)-17)/abs(t1.e)) | -t1.e FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select +19 from t1 where (coalesce((select max(e) from t1 where ((coalesce((select max(19) from t1 where coalesce((select t1.d*19+(t1.c) from t1 where e in (select -11 from t1 union select t1.f from t1) and (t1.d) in (select ~max(t1.e) from t1 union select (( -count(*))) from t1) or 19>=(a)),f) in ((e),e,t1.a)),t1.a)<=t1.c))),t1.f+t1.c)>13)),11) in (11,17,t1.d) and e>=t1.b))} +} {-499} +do_test randexpr-2.2389 { + db eval {SELECT (abs((select (case (abs(max( -t1.a))* -case count(*) when case min(c) when max(d) then count(*) else max(17) end-cast(avg(17) AS integer) then max(11) else cast(avg(t1.d) AS integer) end & max(13)+max(f)) when cast(avg(f) AS integer) then cast(avg(19) AS integer) else (max(11)) end) from t1) & coalesce((select (select max( -t1.d) from t1) from t1 where t1.b-~e in (select d from t1 union select e from t1) and t1.e=t1.d), -t1.e)-17)/abs(t1.e)) & -t1.e FROM t1 WHERE NOT (exists(select 1 from t1 where coalesce((select +19 from t1 where (coalesce((select max(e) from t1 where ((coalesce((select max(19) from t1 where coalesce((select t1.d*19+(t1.c) from t1 where e in (select -11 from t1 union select t1.f from t1) and (t1.d) in (select ~max(t1.e) from t1 union select (( -count(*))) from t1) or 19>=(a)),f) in ((e),e,t1.a)),t1.a)<=t1.c))),t1.f+t1.c)>13)),11) in (11,17,t1.d) and e>=t1.b))} +} {0} +do_test randexpr-2.2390 { + db eval {SELECT f-coalesce((select 19+t1.e+t1.f from t1 where +11>coalesce((select +t1.f from t1 where t1.a between ~ -(select case cast(avg((abs(t1.d)/abs(t1.b))) AS integer) when -~max(19) then - -case count(distinct c) when count(*) then min((a)) else -min(t1.d) end else max( -t1.b) end from t1) and (select count(distinct f) from t1)),(coalesce((select max(t1.e) from t1 where not 11>=d),t1.c)))),19)-t1.f+19+f*t1.d FROM t1 WHERE f in (select (min((abs(d-t1.c+coalesce((select t1.f from t1 where fcoalesce((select +t1.f from t1 where t1.a between ~ -(select case cast(avg((abs(t1.d)/abs(t1.b))) AS integer) when -~max(19) then - -case count(distinct c) when count(*) then min((a)) else -min(t1.d) end else max( -t1.b) end from t1) and (select count(distinct f) from t1)),(coalesce((select max(t1.e) from t1 where not 11>=d),t1.c)))),19)-t1.f+19+f*t1.d FROM t1 WHERE NOT (f in (select (min((abs(d-t1.c+coalesce((select t1.f from t1 where f(c)), -a) from t1 where (e)<=t1.a),d) then d when e=a then t1.f else f end FROM t1 WHERE f in (13*13-(abs(d)/abs(e)),19,case t1.b+coalesce((select max((abs((abs(17)/abs(a)))/abs(11+t1.c))) from t1 where -t1.c>=t1.f and e in (select count(*) | min(t1.c) from t1 union select cast(avg(e) AS integer) from t1) or 11 not in (e,b,a) or t1.c< -a and t1.e not in (t1.e,b,t1.d)),t1.b) when f then 19 else b end-11) or d>t1.b} +} {-400} +do_test randexpr-2.2393 { + db eval {SELECT t1.b-case when b not between 17 and t1.d*11*f*t1.a or t1.c+case when -t1.e between t1.d and coalesce((select coalesce((select max((select ~count(*) from t1)) from t1 where -17*13-19-((d))<>(c)), -a) from t1 where (e)<=t1.a),d) then d when e=a then t1.f else f end FROM t1 WHERE NOT (f in (13*13-(abs(d)/abs(e)),19,case t1.b+coalesce((select max((abs((abs(17)/abs(a)))/abs(11+t1.c))) from t1 where -t1.c>=t1.f and e in (select count(*) | min(t1.c) from t1 union select cast(avg(e) AS integer) from t1) or 11 not in (e,b,a) or t1.c< -a and t1.e not in (t1.e,b,t1.d)),t1.b) when f then 19 else b end-11) or d>t1.b)} +} {} +do_test randexpr-2.2394 { + db eval {SELECT case t1.d when (abs(17)/abs(coalesce((select max(e) from t1 where a not in (t1.e,case when 11+t1.d not between t1.e and -t1.c | t1.a*(abs(b)/abs(c)) then -d when a in (select +11 from t1 union select c from t1) then -t1.c else 13 end | t1.c,t1.b)),d)-f)) | t1.c then case when 13t1.d then 13 else f end else b end FROM t1 WHERE 17>d*~t1.e+case when not t1.f<>(select count(distinct 19)+min(e) from t1) then case when (13-a<= -t1.b) and t1.f<=t1.f then 19*d else f end-t1.f*d+t1.d when b not in (t1.b,19,19) or t1.d not between t1.b and a then t1.f else b end} +} {200} +do_test randexpr-2.2395 { + db eval {SELECT case t1.d when (abs(17)/abs(coalesce((select max(e) from t1 where a not in (t1.e,case when 11+t1.d not between t1.e and -t1.c | t1.a*(abs(b)/abs(c)) then -d when a in (select +11 from t1 union select c from t1) then -t1.c else 13 end | t1.c,t1.b)),d)-f)) | t1.c then case when 13t1.d then 13 else f end else b end FROM t1 WHERE NOT (17>d*~t1.e+case when not t1.f<>(select count(distinct 19)+min(e) from t1) then case when (13-a<= -t1.b) and t1.f<=t1.f then 19*d else f end-t1.f*d+t1.d when b not in (t1.b,19,19) or t1.d not between t1.b and a then t1.f else b end)} +} {} +do_test randexpr-2.2396 { + db eval {SELECT case t1.d when (abs(17)/abs(coalesce((select max(e) from t1 where a not in (t1.e,case when 11+t1.d not between t1.e and -t1.c & t1.a*(abs(b)/abs(c)) then -d when a in (select +11 from t1 union select c from t1) then -t1.c else 13 end & t1.c,t1.b)),d)-f)) & t1.c then case when 13t1.d then 13 else f end else b end FROM t1 WHERE 17>d*~t1.e+case when not t1.f<>(select count(distinct 19)+min(e) from t1) then case when (13-a<= -t1.b) and t1.f<=t1.f then 19*d else f end-t1.f*d+t1.d when b not in (t1.b,19,19) or t1.d not between t1.b and a then t1.f else b end} +} {200} +do_test randexpr-2.2397 { + db eval {SELECT coalesce((select max(d) from t1 where coalesce((select t1.c from t1 where 17>b),d*coalesce((select max(11*19) from t1 where coalesce((select c from t1 where 19<(abs((case a when b then ~case t1.a+(select min(t1.f)+min(b) from t1) when (d) then e else t1.b end else t1.c end+f+t1.f))/abs(t1.b))),c)<=19),13))*d | eb),d*coalesce((select max(11*19) from t1 where coalesce((select c from t1 where 19<(abs((case a when b then ~case t1.a+(select min(t1.f)+min(b) from t1) when (d) then e else t1.b end else t1.c end+f+t1.f))/abs(t1.b))),c)<=19),13))*d | eb),d*coalesce((select max(11*19) from t1 where coalesce((select c from t1 where 19<(abs((case a when b then ~case t1.a+(select min(t1.f)+min(b) from t1) when (d) then e else t1.b end else t1.c end+f+t1.f))/abs(t1.b))),c)<=19),13))*d & et1.e and 11 in (t1.e,t1.e,d)),f)+13 then t1.b else c end))- -f)) from t1)) AS integer))-count(distinct 11+11+13)+count(distinct t1.a) from t1) FROM t1 WHERE t1.d=b then t1.c when -f*e<=t1.b then e else 17 end not between 17 and t1.f then t1.c else t1.b end-19+f+19*(t1.c)} +} {-558600} +do_test randexpr-2.2401 { + db eval {SELECT t1.b+ -t1.d*(select max(t1.a) | count(*) | abs(cast(avg(f+a-(select ~(min((abs(a)/abs(13+case when f=coalesce((select t1.b from t1 where d<>t1.e and 11 in (t1.e,t1.e,d)),f)+13 then t1.b else c end))- -f)) from t1)) AS integer))-count(distinct 11+11+13)+count(distinct t1.a) from t1) FROM t1 WHERE NOT (t1.d=b then t1.c when -f*e<=t1.b then e else 17 end not between 17 and t1.f then t1.c else t1.b end-19+f+19*(t1.c))} +} {} +do_test randexpr-2.2402 { + db eval {SELECT t1.b+ -t1.d*(select max(t1.a) & count(*) & abs(cast(avg(f+a-(select ~(min((abs(a)/abs(13+case when f=coalesce((select t1.b from t1 where d<>t1.e and 11 in (t1.e,t1.e,d)),f)+13 then t1.b else c end))- -f)) from t1)) AS integer))-count(distinct 11+11+13)+count(distinct t1.a) from t1) FROM t1 WHERE t1.d=b then t1.c when -f*e<=t1.b then e else 17 end not between 17 and t1.f then t1.c else t1.b end-19+f+19*(t1.c)} +} {200} +do_test randexpr-2.2403 { + db eval {SELECT case when case when t1.e<~17 or (case case case t1.d*17*13 when t1.a then e+c else c end when d*f+coalesce((select 17 from t1 where 13 not between 13 and -13),(17)) then t1.b else b end when t1.a then a else t1.c end*d<>d) then ~t1.c else a end<>t1.f then -t1.d else t1.a end+11-t1.c-e FROM t1 WHERE (case when (~f- -e in (17,case when d in (select t1.f from t1 union select a+(abs(coalesce((select (t1.c) from t1 where a in (select min(13) | (max(t1.e)) from t1 union select count(distinct t1.d) from t1)),b)+13)/abs(e))*19*f from t1) then b when not a between 13 and 13 then t1.f else 17 end,t1.c)) then (select max(t1.e)-count(*) | count(*) from t1) else c end)>(t1.e)} +} {} +do_test randexpr-2.2404 { + db eval {SELECT case when case when t1.e<~17 or (case case case t1.d*17*13 when t1.a then e+c else c end when d*f+coalesce((select 17 from t1 where 13 not between 13 and -13),(17)) then t1.b else b end when t1.a then a else t1.c end*d<>d) then ~t1.c else a end<>t1.f then -t1.d else t1.a end+11-t1.c-e FROM t1 WHERE NOT ((case when (~f- -e in (17,case when d in (select t1.f from t1 union select a+(abs(coalesce((select (t1.c) from t1 where a in (select min(13) | (max(t1.e)) from t1 union select count(distinct t1.d) from t1)),b)+13)/abs(e))*19*f from t1) then b when not a between 13 and 13 then t1.f else 17 end,t1.c)) then (select max(t1.e)-count(*) | count(*) from t1) else c end)>(t1.e))} +} {-1189} +do_test randexpr-2.2405 { + db eval {SELECT coalesce((select max(t1.a-(abs(e*coalesce((select max(d) from t1 where (case when case e when case when (t1.e)=d then t1.a+11 else 17 end then (c) else b end<=a then b when 19 in (t1.b,b,t1.e) then f else -19 end+17 between 19 and c or b in (select t1.c from t1 union select t1.a from t1))),t1.a))/abs( -t1.d))) from t1 where 13=17),t1.b)+(t1.b) FROM t1 WHERE coalesce((select -case when (d)>=(select ~case +(+~~cast(avg(a) AS integer)*abs(( -count(*))) | count(distinct 11)) when (count(*)) then count(distinct 19) else min(t1.a) end+min(t1.a) from t1)*13-t1.f then 13 else (e) | t1.c*(select (min(17)) from t1) end*case when (c) in (select 11 from t1 union select t1.f from t1) then 17 else 19 end from t1 where t1.cc} +} {400} +do_test randexpr-2.2406 { + db eval {SELECT coalesce((select max(t1.a-(abs(e*coalesce((select max(d) from t1 where (case when case e when case when (t1.e)=d then t1.a+11 else 17 end then (c) else b end<=a then b when 19 in (t1.b,b,t1.e) then f else -19 end+17 between 19 and c or b in (select t1.c from t1 union select t1.a from t1))),t1.a))/abs( -t1.d))) from t1 where 13=17),t1.b)+(t1.b) FROM t1 WHERE NOT (coalesce((select -case when (d)>=(select ~case +(+~~cast(avg(a) AS integer)*abs(( -count(*))) | count(distinct 11)) when (count(*)) then count(distinct 19) else min(t1.a) end+min(t1.a) from t1)*13-t1.f then 13 else (e) | t1.c*(select (min(17)) from t1) end*case when (c) in (select 11 from t1 union select t1.f from t1) then 17 else 19 end from t1 where t1.cc)} +} {} +do_test randexpr-2.2407 { + db eval {SELECT t1.b-t1.d-case when exists(select 1 from t1 where -coalesce((select t1.a from t1 where a in (c,coalesce((select max((abs(t1.d)/abs(e))) from t1 where case t1.b when t1.e then t1.d else t1.d end-17 between b and t1.f),17)-t1.f-t1.e,t1.a) or t1.b in (t1.a,19,t1.f)),19)>=d) then 19-b when t1.c between (f) and t1.f then t1.d else t1.f end-11 FROM t1 WHERE coalesce((select 13 from t1 where ( -t1.a13 or a in (select cast(avg(d) AS integer) | count(*)* -max(t1.f) from t1 union select count(*) from t1) then 17 when not exists(select 1 from t1 where b>=c) then e else 17 end | -t1.a))*b)*t1.d>=b} +} {-811} +do_test randexpr-2.2408 { + db eval {SELECT t1.b-t1.d-case when exists(select 1 from t1 where -coalesce((select t1.a from t1 where a in (c,coalesce((select max((abs(t1.d)/abs(e))) from t1 where case t1.b when t1.e then t1.d else t1.d end-17 between b and t1.f),17)-t1.f-t1.e,t1.a) or t1.b in (t1.a,19,t1.f)),19)>=d) then 19-b when t1.c between (f) and t1.f then t1.d else t1.f end-11 FROM t1 WHERE NOT (coalesce((select 13 from t1 where ( -t1.a13 or a in (select cast(avg(d) AS integer) | count(*)* -max(t1.f) from t1 union select count(*) from t1) then 17 when not exists(select 1 from t1 where b>=c) then e else 17 end | -t1.a))*b)*t1.d>=b)} +} {} +do_test randexpr-2.2409 { + db eval {SELECT 17 | t1.c*t1.a | case when ~ -~(abs(t1.b)/abs(coalesce((select max(coalesce((select max(+t1.d) from t1 where (t1.b not between 17 and t1.b) and d not between c and 19),d)*(t1.d)-d) from t1 where t1.a in (t1.c,(t1.c),t1.f)),19)))-b+19+ -17>=t1.a then 19 when 19 in (select t1.f from t1 union select t1.b from t1) then t1.c else f end-19*t1.c FROM t1 WHERE (17-t1.f in (d | b-17,t1.f,coalesce((select e+(abs(11)/abs(case t1.c when case (abs(a)/abs(11)) when e then t1.c else 17+c end then b else 11+t1.e end)) from t1 where (13 not between + -b and 13)),t1.c)) and 17 between t1.d | t1.f and t1.e)} +} {} +do_test randexpr-2.2410 { + db eval {SELECT 17 | t1.c*t1.a | case when ~ -~(abs(t1.b)/abs(coalesce((select max(coalesce((select max(+t1.d) from t1 where (t1.b not between 17 and t1.b) and d not between c and 19),d)*(t1.d)-d) from t1 where t1.a in (t1.c,(t1.c),t1.f)),19)))-b+19+ -17>=t1.a then 19 when 19 in (select t1.f from t1 union select t1.b from t1) then t1.c else f end-19*t1.c FROM t1 WHERE NOT ((17-t1.f in (d | b-17,t1.f,coalesce((select e+(abs(11)/abs(case t1.c when case (abs(a)/abs(11)) when e then t1.c else 17+c end then b else 11+t1.e end)) from t1 where (13 not between + -b and 13)),t1.c)) and 17 between t1.d | t1.f and t1.e))} +} {-715} +do_test randexpr-2.2411 { + db eval {SELECT 17 & t1.c*t1.a & case when ~ -~(abs(t1.b)/abs(coalesce((select max(coalesce((select max(+t1.d) from t1 where (t1.b not between 17 and t1.b) and d not between c and 19),d)*(t1.d)-d) from t1 where t1.a in (t1.c,(t1.c),t1.f)),19)))-b+19+ -17>=t1.a then 19 when 19 in (select t1.f from t1 union select t1.b from t1) then t1.c else f end-19*t1.c FROM t1 WHERE NOT ((17-t1.f in (d | b-17,t1.f,coalesce((select e+(abs(11)/abs(case t1.c when case (abs(a)/abs(11)) when e then t1.c else 17+c end then b else 11+t1.e end)) from t1 where (13 not between + -b and 13)),t1.c)) and 17 between t1.d | t1.f and t1.e))} +} {16} +do_test randexpr-2.2412 { + db eval {SELECT coalesce((select t1.a*a+t1.f | t1.d from t1 where t1.d<+t1.c*d-c*19+t1.e-(abs( -t1.e)/abs(19))), -t1.c+coalesce((select t1.b from t1 where not not (select max(t1.c) from t1)-17+e in (e,t1.c,b) and not (d in (select ( -((max(b)))) from t1 union select min(d) from t1))),t1.b) | t1.a) FROM t1 WHERE not t1.f in (select ((select abs(count(*)) from t1)-11) from t1 union select t1.a-17 from t1) or not exists(select 1 from t1 where case (abs(e)/abs(+c)) when coalesce((select max((select +(count(distinct t1.a)) from t1)) from t1 where not (e<=c*t1.a)),~(case when t1.c<=17 then e when t1.c<=t1.c then e else 17 end)) then 13 else d end*b-a= -d)} +} {10744} +do_test randexpr-2.2413 { + db eval {SELECT coalesce((select t1.a*a+t1.f | t1.d from t1 where t1.d<+t1.c*d-c*19+t1.e-(abs( -t1.e)/abs(19))), -t1.c+coalesce((select t1.b from t1 where not not (select max(t1.c) from t1)-17+e in (e,t1.c,b) and not (d in (select ( -((max(b)))) from t1 union select min(d) from t1))),t1.b) | t1.a) FROM t1 WHERE NOT (not t1.f in (select ((select abs(count(*)) from t1)-11) from t1 union select t1.a-17 from t1) or not exists(select 1 from t1 where case (abs(e)/abs(+c)) when coalesce((select max((select +(count(distinct t1.a)) from t1)) from t1 where not (e<=c*t1.a)),~(case when t1.c<=17 then e when t1.c<=t1.c then e else 17 end)) then 13 else d end*b-a= -d))} +} {} +do_test randexpr-2.2414 { + db eval {SELECT coalesce((select t1.a*a+t1.f & t1.d from t1 where t1.d<+t1.c*d-c*19+t1.e-(abs( -t1.e)/abs(19))), -t1.c+coalesce((select t1.b from t1 where not not (select max(t1.c) from t1)-17+e in (e,t1.c,b) and not (d in (select ( -((max(b)))) from t1 union select min(d) from t1))),t1.b) & t1.a) FROM t1 WHERE not t1.f in (select ((select abs(count(*)) from t1)-11) from t1 union select t1.a-17 from t1) or not exists(select 1 from t1 where case (abs(e)/abs(+c)) when coalesce((select max((select +(count(distinct t1.a)) from t1)) from t1 where not (e<=c*t1.a)),~(case when t1.c<=17 then e when t1.c<=t1.c then e else 17 end)) then 13 else d end*b-a= -d)} +} {256} +do_test randexpr-2.2415 { + db eval {SELECT case a+(abs(+13)/abs(a)) when ~(e)*(abs(b* -t1.d*19-t1.c+a-~t1.b*b-coalesce((select max(case when not exists(select 1 from t1 where d in (f,t1.c,f) or t1.c not in (13,f,t1.a)) then case when t1.a>= -t1.e then (( - -b)) else t1.b end else c end+e) from t1 where (a>11)),13)*17)/abs(e)) then 11 else c end FROM t1 WHERE 17 between a and t1.a} +} {} +do_test randexpr-2.2416 { + db eval {SELECT case a+(abs(+13)/abs(a)) when ~(e)*(abs(b* -t1.d*19-t1.c+a-~t1.b*b-coalesce((select max(case when not exists(select 1 from t1 where d in (f,t1.c,f) or t1.c not in (13,f,t1.a)) then case when t1.a>= -t1.e then (( - -b)) else t1.b end else c end+e) from t1 where (a>11)),13)*17)/abs(e)) then 11 else c end FROM t1 WHERE NOT (17 between a and t1.a)} +} {300} +do_test randexpr-2.2417 { + db eval {SELECT +~t1.f-c*case when t1.f between coalesce((select max(a) from t1 where (t1.c-17*t1.b | c=e)),t1.b) and (abs(coalesce((select max(19) from t1 where t1.b not between c+(select min(t1.d) from t1)-t1.d+d+t1.d+t1.c*t1.e and 11),t1.a))/abs(a))-c*b then 17 else 13 end FROM t1 WHERE t1.c not between (abs(+case when not -19 in (case when t1.b+~d+11+f>e then (select -cast(avg(coalesce((select 17 from t1 where (t1.b>=f)),19)) AS integer)*min(d)*(min(e))* -min(b) from t1) when 17+b not between 17 and t1.a then 11 else a end,19,(t1.e)) then f else 19 end*t1.a)/abs(b)) and 19} +} {-4501} +do_test randexpr-2.2418 { + db eval {SELECT +~t1.f-c*case when t1.f between coalesce((select max(a) from t1 where (t1.c-17*t1.b | c=e)),t1.b) and (abs(coalesce((select max(19) from t1 where t1.b not between c+(select min(t1.d) from t1)-t1.d+d+t1.d+t1.c*t1.e and 11),t1.a))/abs(a))-c*b then 17 else 13 end FROM t1 WHERE NOT (t1.c not between (abs(+case when not -19 in (case when t1.b+~d+11+f>e then (select -cast(avg(coalesce((select 17 from t1 where (t1.b>=f)),19)) AS integer)*min(d)*(min(e))* -min(b) from t1) when 17+b not between 17 and t1.a then 11 else a end,19,(t1.e)) then f else 19 end*t1.a)/abs(b)) and 19)} +} {} +do_test randexpr-2.2419 { + db eval {SELECT +~t1.f-c*case when t1.f between coalesce((select max(a) from t1 where (t1.c-17*t1.b & c=e)),t1.b) and (abs(coalesce((select max(19) from t1 where t1.b not between c+(select min(t1.d) from t1)-t1.d+d+t1.d+t1.c*t1.e and 11),t1.a))/abs(a))-c*b then 17 else 13 end FROM t1 WHERE t1.c not between (abs(+case when not -19 in (case when t1.b+~d+11+f>e then (select -cast(avg(coalesce((select 17 from t1 where (t1.b>=f)),19)) AS integer)*min(d)*(min(e))* -min(b) from t1) when 17+b not between 17 and t1.a then 11 else a end,19,(t1.e)) then f else 19 end*t1.a)/abs(b)) and 19} +} {-4501} +do_test randexpr-2.2420 { + db eval {SELECT +(abs(case t1.a when coalesce((select max(13) from t1 where d<=case when t1.b=(select abs(count(distinct t1.e)) from t1) and (select -count(distinct e) from t1) not in ( -11,e,13) then 13 else t1.e end and (not exists(select 1 from t1 where t1.d in (select count(*) from t1 union select max(f) from t1))) and 19=13 or -e=17),coalesce((select (abs(17)/abs(b))-t1.b from t1 where (19)<=t1.f),t1.d))*t1.b then 17 else 13 end)/abs(19)) FROM t1 WHERE a in (select case max(13)-~cast(avg(case when t1.d>13 or exists(select 1 from t1 where not (( -c<=e)) and (b)*t1.a in (select count(*) from t1 union select cast(avg(t1.c) AS integer)+max(f) from t1)) then t1.a else t1.c end) AS integer) when abs(count(distinct t1.d)) then count(distinct (~t1.a)) else count(distinct c)-cast(avg(e) AS integer) end from t1 union select abs(count(distinct a)*max(t1.d)) from t1)} +} {} +do_test randexpr-2.2421 { + db eval {SELECT +(abs(case t1.a when coalesce((select max(13) from t1 where d<=case when t1.b=(select abs(count(distinct t1.e)) from t1) and (select -count(distinct e) from t1) not in ( -11,e,13) then 13 else t1.e end and (not exists(select 1 from t1 where t1.d in (select count(*) from t1 union select max(f) from t1))) and 19=13 or -e=17),coalesce((select (abs(17)/abs(b))-t1.b from t1 where (19)<=t1.f),t1.d))*t1.b then 17 else 13 end)/abs(19)) FROM t1 WHERE NOT (a in (select case max(13)-~cast(avg(case when t1.d>13 or exists(select 1 from t1 where not (( -c<=e)) and (b)*t1.a in (select count(*) from t1 union select cast(avg(t1.c) AS integer)+max(f) from t1)) then t1.a else t1.c end) AS integer) when abs(count(distinct t1.d)) then count(distinct (~t1.a)) else count(distinct c)-cast(avg(e) AS integer) end from t1 union select abs(count(distinct a)*max(t1.d)) from t1))} +} {0} +do_test randexpr-2.2422 { + db eval {SELECT +coalesce((select max(f+c) from t1 where t1.b in (select +max(b+t1.b) from t1 union select cast(avg(d-b | f-11-t1.b | coalesce((select ~t1.a from t1 where (t1.a=case when not exists(select 1 from t1 where 13 not in (t1.b,t1.e,t1.f)) then (abs(t1.d)/abs(c)) when c<>17 then c else 19 end)),t1.c)) AS integer) from t1) and t1.e not in (b,d,(13))),t1.b) FROM t1 WHERE (select cast(avg(~t1.f) AS integer) from t1) not in ( -c,17,11*c)} +} {200} +do_test randexpr-2.2423 { + db eval {SELECT +coalesce((select max(f+c) from t1 where t1.b in (select +max(b+t1.b) from t1 union select cast(avg(d-b | f-11-t1.b | coalesce((select ~t1.a from t1 where (t1.a=case when not exists(select 1 from t1 where 13 not in (t1.b,t1.e,t1.f)) then (abs(t1.d)/abs(c)) when c<>17 then c else 19 end)),t1.c)) AS integer) from t1) and t1.e not in (b,d,(13))),t1.b) FROM t1 WHERE NOT ((select cast(avg(~t1.f) AS integer) from t1) not in ( -c,17,11*c))} +} {} +do_test randexpr-2.2424 { + db eval {SELECT +coalesce((select max(f+c) from t1 where t1.b in (select +max(b+t1.b) from t1 union select cast(avg(d-b & f-11-t1.b & coalesce((select ~t1.a from t1 where (t1.a=case when not exists(select 1 from t1 where 13 not in (t1.b,t1.e,t1.f)) then (abs(t1.d)/abs(c)) when c<>17 then c else 19 end)),t1.c)) AS integer) from t1) and t1.e not in (b,d,(13))),t1.b) FROM t1 WHERE (select cast(avg(~t1.f) AS integer) from t1) not in ( -c,17,11*c)} +} {200} +do_test randexpr-2.2425 { + db eval {SELECT t1.e*case when ((13)-13-13-t1.d+t1.c*b>11) then b when case when (t1.a*b not in (t1.a,t1.b,t1.f)) then 11+e else t1.f end<= -f or f in (t1.a,11,13) or exists(select 1 from t1 where not exists(select 1 from t1 where f in (f,d,t1.e))) then t1.d else b end FROM t1 WHERE coalesce((select 19 from t1 where 13-t1.b*c-t1.a+case when case when coalesce((select +t1.b from t1 where a<>b),13) in ((b),t1.f,b) and not t1.b in (select -cast(avg(13) AS integer) from t1 union select cast(avg(11) AS integer) from t1) or a>=13 then (abs(t1.b+a)/abs(11)) else t1.e end-17+t1.d<>b then (19) else f end=e),13)>=t1.c} +} {} +do_test randexpr-2.2426 { + db eval {SELECT t1.e*case when ((13)-13-13-t1.d+t1.c*b>11) then b when case when (t1.a*b not in (t1.a,t1.b,t1.f)) then 11+e else t1.f end<= -f or f in (t1.a,11,13) or exists(select 1 from t1 where not exists(select 1 from t1 where f in (f,d,t1.e))) then t1.d else b end FROM t1 WHERE NOT (coalesce((select 19 from t1 where 13-t1.b*c-t1.a+case when case when coalesce((select +t1.b from t1 where a<>b),13) in ((b),t1.f,b) and not t1.b in (select -cast(avg(13) AS integer) from t1 union select cast(avg(11) AS integer) from t1) or a>=13 then (abs(t1.b+a)/abs(11)) else t1.e end-17+t1.d<>b then (19) else f end=e),13)>=t1.c)} +} {100000} +do_test randexpr-2.2427 { + db eval {SELECT coalesce((select max(d) from t1 where 19+case t1.b when t1.a-a then (abs(case when (e>=case t1.a when coalesce((select max(t1.d) from t1 where (select case -max(c)+cast(avg(t1.a) AS integer) when cast(avg(t1.a) AS integer) then cast(avg(19) AS integer) else cast(avg(t1.b) AS integer) end from t1)>t1.c),coalesce((select max(d) from t1 where 19<>17 and d<=b), -(t1.c)))+e then c else t1.d end+b) then b when 19 between c and 17 then 11 else e end)/abs( -t1.b)) else t1.a end+13>=d),d) FROM t1 WHERE a=case when t1.f in (select (max(f)-++(min(t1.a))-max(19)-count(*)) from t1 union select +max(t1.b) from t1) then t1.b*19*19 when t1.f>d then 17-d else t1.f end and exists(select 1 from t1 where not exists(select 1 from t1 where d in (select count(distinct 11)+max(a) from t1 union select cast(avg(13) AS integer) from t1) or 19 not in (t1.b,c,13)))} +} {} +do_test randexpr-2.2428 { + db eval {SELECT coalesce((select max(d) from t1 where 19+case t1.b when t1.a-a then (abs(case when (e>=case t1.a when coalesce((select max(t1.d) from t1 where (select case -max(c)+cast(avg(t1.a) AS integer) when cast(avg(t1.a) AS integer) then cast(avg(19) AS integer) else cast(avg(t1.b) AS integer) end from t1)>t1.c),coalesce((select max(d) from t1 where 19<>17 and d<=b), -(t1.c)))+e then c else t1.d end+b) then b when 19 between c and 17 then 11 else e end)/abs( -t1.b)) else t1.a end+13>=d),d) FROM t1 WHERE NOT (a=case when t1.f in (select (max(f)-++(min(t1.a))-max(19)-count(*)) from t1 union select +max(t1.b) from t1) then t1.b*19*19 when t1.f>d then 17-d else t1.f end and exists(select 1 from t1 where not exists(select 1 from t1 where d in (select count(distinct 11)+max(a) from t1 union select cast(avg(13) AS integer) from t1) or 19 not in (t1.b,c,13))))} +} {400} +do_test randexpr-2.2429 { + db eval {SELECT coalesce((select max(case t1.c | b-t1.d+13 when 17 then c else 13 end) from t1 where case when exists(select 1 from t1 where b<>t1.c) then t1.d when a not between (abs(t1.d)/abs((t1.b))) and (t1.c) or (a in (select ~max(((17))) from t1 union select case -(count(distinct -t1.c)) when count(distinct t1.c) then (count(*)) else max(f) end from t1)) and t1.f>19 or a<>11 then t1.b else t1.b end>(11)),13)+b*(b) FROM t1 WHERE case when not exists(select 1 from t1 where (19<>11+t1.b*c+d)) and 13<>b or t1.d between t1.c and b and f between t1.b and -a or 19 between 19 and 17 or t1.e not between e and t1.d then ~13 else coalesce((select max(t1.b) from t1 where f=a),(a)) end in (select -( -+count(distinct 11)-count(*))*count(distinct ( -t1.c))+(count(distinct t1.c))+max(17)-count(*) from t1 union select count(*) from t1)} +} {} +do_test randexpr-2.2430 { + db eval {SELECT coalesce((select max(case t1.c | b-t1.d+13 when 17 then c else 13 end) from t1 where case when exists(select 1 from t1 where b<>t1.c) then t1.d when a not between (abs(t1.d)/abs((t1.b))) and (t1.c) or (a in (select ~max(((17))) from t1 union select case -(count(distinct -t1.c)) when count(distinct t1.c) then (count(*)) else max(f) end from t1)) and t1.f>19 or a<>11 then t1.b else t1.b end>(11)),13)+b*(b) FROM t1 WHERE NOT (case when not exists(select 1 from t1 where (19<>11+t1.b*c+d)) and 13<>b or t1.d between t1.c and b and f between t1.b and -a or 19 between 19 and 17 or t1.e not between e and t1.d then ~13 else coalesce((select max(t1.b) from t1 where f=a),(a)) end in (select -( -+count(distinct 11)-count(*))*count(distinct ( -t1.c))+(count(distinct t1.c))+max(17)-count(*) from t1 union select count(*) from t1))} +} {40013} +do_test randexpr-2.2431 { + db eval {SELECT coalesce((select max(case t1.c & b-t1.d+13 when 17 then c else 13 end) from t1 where case when exists(select 1 from t1 where b<>t1.c) then t1.d when a not between (abs(t1.d)/abs((t1.b))) and (t1.c) or (a in (select ~max(((17))) from t1 union select case -(count(distinct -t1.c)) when count(distinct t1.c) then (count(*)) else max(f) end from t1)) and t1.f>19 or a<>11 then t1.b else t1.b end>(11)),13)+b*(b) FROM t1 WHERE NOT (case when not exists(select 1 from t1 where (19<>11+t1.b*c+d)) and 13<>b or t1.d between t1.c and b and f between t1.b and -a or 19 between 19 and 17 or t1.e not between e and t1.d then ~13 else coalesce((select max(t1.b) from t1 where f=a),(a)) end in (select -( -+count(distinct 11)-count(*))*count(distinct ( -t1.c))+(count(distinct t1.c))+max(17)-count(*) from t1 union select count(*) from t1))} +} {40013} +do_test randexpr-2.2432 { + db eval {SELECT coalesce((select max(coalesce((select max(11) from t1 where t1.b< -t1.b),e)+coalesce((select t1.b from t1 where not case when ~case when -17+t1.d> - -t1.d then t1.b when t1.b between 13 and c or t1.a>= -f then t1.b else t1.e end+t1.c<= -c then 19 else t1.f end not in (c,d,e) or t1.f not between t1.b and 19 and 13>= -t1.f or 13 in (e,(t1.b),11)),t1.b)) from t1 where -(f) not in (f,(t1.e),b)),t1.f) FROM t1 WHERE f in (select f from t1 union select t1.f*t1.b from t1)} +} {700} +do_test randexpr-2.2433 { + db eval {SELECT coalesce((select max(coalesce((select max(11) from t1 where t1.b< -t1.b),e)+coalesce((select t1.b from t1 where not case when ~case when -17+t1.d> - -t1.d then t1.b when t1.b between 13 and c or t1.a>= -f then t1.b else t1.e end+t1.c<= -c then 19 else t1.f end not in (c,d,e) or t1.f not between t1.b and 19 and 13>= -t1.f or 13 in (e,(t1.b),11)),t1.b)) from t1 where -(f) not in (f,(t1.e),b)),t1.f) FROM t1 WHERE NOT (f in (select f from t1 union select t1.f*t1.b from t1))} +} {} +do_test randexpr-2.2434 { + db eval {SELECT ( -+(a)*(abs(case coalesce((select max(d) from t1 where t1.d<>c*(select count(distinct e*coalesce((select 13-17 from t1 where not exists(select 1 from t1 where t1.a in (select f from t1 union select (a) from t1))),t1.f)+(t1.c)) from t1) | t1.e*19 and not t1.a<13),t1.b)+t1.b when t1.d then (t1.f) else t1.c end)/abs(t1.f))-13*t1.f) FROM t1 WHERE f>=t1.a+ -(select (count(*))-(~case cast(avg(t1.b-13-e) AS integer) when count(*) then (max(t1.f*case 13 when 11 then 13 else c end))- -cast(avg(t1.f) AS integer)*count(*)-min(19) else count(distinct d) end)+min(t1.d) | -max(t1.b) from t1) | t1.b*17+t1.d+t1.b-d} +} {} +do_test randexpr-2.2435 { + db eval {SELECT ( -+(a)*(abs(case coalesce((select max(d) from t1 where t1.d<>c*(select count(distinct e*coalesce((select 13-17 from t1 where not exists(select 1 from t1 where t1.a in (select f from t1 union select (a) from t1))),t1.f)+(t1.c)) from t1) | t1.e*19 and not t1.a<13),t1.b)+t1.b when t1.d then (t1.f) else t1.c end)/abs(t1.f))-13*t1.f) FROM t1 WHERE NOT (f>=t1.a+ -(select (count(*))-(~case cast(avg(t1.b-13-e) AS integer) when count(*) then (max(t1.f*case 13 when 11 then 13 else c end))- -cast(avg(t1.f) AS integer)*count(*)-min(19) else count(distinct d) end)+min(t1.d) | -max(t1.b) from t1) | t1.b*17+t1.d+t1.b-d)} +} {-7800} +do_test randexpr-2.2436 { + db eval {SELECT ( -+(a)*(abs(case coalesce((select max(d) from t1 where t1.d<>c*(select count(distinct e*coalesce((select 13-17 from t1 where not exists(select 1 from t1 where t1.a in (select f from t1 union select (a) from t1))),t1.f)+(t1.c)) from t1) & t1.e*19 and not t1.a<13),t1.b)+t1.b when t1.d then (t1.f) else t1.c end)/abs(t1.f))-13*t1.f) FROM t1 WHERE NOT (f>=t1.a+ -(select (count(*))-(~case cast(avg(t1.b-13-e) AS integer) when count(*) then (max(t1.f*case 13 when 11 then 13 else c end))- -cast(avg(t1.f) AS integer)*count(*)-min(19) else count(distinct d) end)+min(t1.d) | -max(t1.b) from t1) | t1.b*17+t1.d+t1.b-d)} +} {-7800} +do_test randexpr-2.2437 { + db eval {SELECT coalesce((select t1.a from t1 where c in (select a from t1 union select t1.a*f from t1) and t1.b in (select (select count(*) from t1)*11 from t1 union select -13 from t1)),13) FROM t1 WHERE (d=17 then d when f<=t1.c then t1.a else d end,t1.e,e) or not 19>c then t1.c-19 else 13 end<>t1.c} +} {-180500} +do_test randexpr-2.2440 { + db eval {SELECT case when (not exists(select 1 from t1 where a<=case a | e-case t1.f when c then e else 19 end | t1.b when t1.c then t1.e else 19 end+d)) or f between a and b and (t1.f) not in (d,t1.e,11) then case 17+e | t1.c when 19 then 13 else -(19) end | 11 else a end-t1.f-f*t1.c FROM t1 WHERE NOT (case when t1.b in ( - -case when case when t1.f-case when 19<=c then a else 19 end between 13 and t1.d and not a not in ((f),d,f) then t1.c-a when t1.d not in ( -b,c,11) then 13 else f end>=17 then d when f<=t1.c then t1.a else d end,t1.e,e) or not 19>c then t1.c-19 else 13 end<>t1.c)} +} {} +do_test randexpr-2.2441 { + db eval {SELECT case when (not exists(select 1 from t1 where a<=case a & e-case t1.f when c then e else 19 end & t1.b when t1.c then t1.e else 19 end+d)) or f between a and b and (t1.f) not in (d,t1.e,11) then case 17+e & t1.c when 19 then 13 else -(19) end & 11 else a end-t1.f-f*t1.c FROM t1 WHERE case when t1.b in ( - -case when case when t1.f-case when 19<=c then a else 19 end between 13 and t1.d and not a not in ((f),d,f) then t1.c-a when t1.d not in ( -b,c,11) then 13 else f end>=17 then d when f<=t1.c then t1.a else d end,t1.e,e) or not 19>c then t1.c-19 else 13 end<>t1.c} +} {-180500} +do_test randexpr-2.2442 { + db eval {SELECT coalesce((select d-(abs(case when +coalesce((select 19 from t1 where not exists(select 1 from t1 where d | f in (select ~case count(distinct f) when ( -(cast(avg((( -19))) AS integer))) then (count(*)) else count(distinct 17) end*(max(t1.d)) from t1 union select min(b) from t1))),13) | t1.b+t1.e in (13,(t1.b),t1.a) then t1.a when a in (select t1.f from t1 union select t1.b from t1) then t1.e else a end)/abs(f))-t1.e from t1 where not exists(select 1 from t1 where not 19 in (select t1.b from t1 union select t1.f from t1) or 13 in (select e from t1 union select e from t1))),f) FROM t1 WHERE not exists(select 1 from t1 where case when not exists(select 1 from t1 where a<+case c when (abs(t1.b)/abs(e))+t1.b-b-t1.b then (t1.d)*t1.c else t1.d end) then b else 19 end in (select -count(distinct c)*(max(f)) | ~+case min(t1.a) when cast(avg(c) AS integer) then cast(avg(t1.a) AS integer) | count(distinct t1.d)* -count(distinct t1.e) else ( -count(*)) end | -cast(avg(t1.d) AS integer)+min(a)*count(distinct b) from t1 union select max(13) from t1))} +} {600} +do_test randexpr-2.2443 { + db eval {SELECT coalesce((select d-(abs(case when +coalesce((select 19 from t1 where not exists(select 1 from t1 where d | f in (select ~case count(distinct f) when ( -(cast(avg((( -19))) AS integer))) then (count(*)) else count(distinct 17) end*(max(t1.d)) from t1 union select min(b) from t1))),13) | t1.b+t1.e in (13,(t1.b),t1.a) then t1.a when a in (select t1.f from t1 union select t1.b from t1) then t1.e else a end)/abs(f))-t1.e from t1 where not exists(select 1 from t1 where not 19 in (select t1.b from t1 union select t1.f from t1) or 13 in (select e from t1 union select e from t1))),f) FROM t1 WHERE NOT (not exists(select 1 from t1 where case when not exists(select 1 from t1 where a<+case c when (abs(t1.b)/abs(e))+t1.b-b-t1.b then (t1.d)*t1.c else t1.d end) then b else 19 end in (select -count(distinct c)*(max(f)) | ~+case min(t1.a) when cast(avg(c) AS integer) then cast(avg(t1.a) AS integer) | count(distinct t1.d)* -count(distinct t1.e) else ( -count(*)) end | -cast(avg(t1.d) AS integer)+min(a)*count(distinct b) from t1 union select max(13) from t1)))} +} {} +do_test randexpr-2.2444 { + db eval {SELECT coalesce((select d-(abs(case when +coalesce((select 19 from t1 where not exists(select 1 from t1 where d & f in (select ~case count(distinct f) when ( -(cast(avg((( -19))) AS integer))) then (count(*)) else count(distinct 17) end*(max(t1.d)) from t1 union select min(b) from t1))),13) & t1.b+t1.e in (13,(t1.b),t1.a) then t1.a when a in (select t1.f from t1 union select t1.b from t1) then t1.e else a end)/abs(f))-t1.e from t1 where not exists(select 1 from t1 where not 19 in (select t1.b from t1 union select t1.f from t1) or 13 in (select e from t1 union select e from t1))),f) FROM t1 WHERE not exists(select 1 from t1 where case when not exists(select 1 from t1 where a<+case c when (abs(t1.b)/abs(e))+t1.b-b-t1.b then (t1.d)*t1.c else t1.d end) then b else 19 end in (select -count(distinct c)*(max(f)) | ~+case min(t1.a) when cast(avg(c) AS integer) then cast(avg(t1.a) AS integer) | count(distinct t1.d)* -count(distinct t1.e) else ( -count(*)) end | -cast(avg(t1.d) AS integer)+min(a)*count(distinct b) from t1 union select max(13) from t1))} +} {600} +do_test randexpr-2.2445 { + db eval {SELECT (t1.e)+t1.c-coalesce((select max((abs(coalesce((select case when f>=(abs(coalesce((select max(a) from t1 where 11*(t1.f)*a*t1.d=11),t1.b))/abs(t1.d))+t1.b then c when not exists(select 1 from t1 where t1.c<>b) then 13 else 11 end from t1 where (13 not in (f,t1.c,13))),b)+t1.d)/abs(t1.f))) from t1 where b<>t1.a),t1.a) | t1.d FROM t1 WHERE ~a<>a or d<>t1.e} +} {927} +do_test randexpr-2.2446 { + db eval {SELECT (t1.e)+t1.c-coalesce((select max((abs(coalesce((select case when f>=(abs(coalesce((select max(a) from t1 where 11*(t1.f)*a*t1.d=11),t1.b))/abs(t1.d))+t1.b then c when not exists(select 1 from t1 where t1.c<>b) then 13 else 11 end from t1 where (13 not in (f,t1.c,13))),b)+t1.d)/abs(t1.f))) from t1 where b<>t1.a),t1.a) | t1.d FROM t1 WHERE NOT (~a<>a or d<>t1.e)} +} {} +do_test randexpr-2.2447 { + db eval {SELECT (t1.e)+t1.c-coalesce((select max((abs(coalesce((select case when f>=(abs(coalesce((select max(a) from t1 where 11*(t1.f)*a*t1.d=11),t1.b))/abs(t1.d))+t1.b then c when not exists(select 1 from t1 where t1.c<>b) then 13 else 11 end from t1 where (13 not in (f,t1.c,13))),b)+t1.d)/abs(t1.f))) from t1 where b<>t1.a),t1.a) & t1.d FROM t1 WHERE ~a<>a or d<>t1.e} +} {272} +do_test randexpr-2.2448 { + db eval {SELECT case when coalesce((select 11 from t1 where exists(select 1 from t1 where t1.a<= -19)),t1.c-t1.e*e-19-t1.d-t1.e-~a*coalesce((select max(b) from t1 where b>(abs(+c+case c when 11 then t1.f else 11 end)/abs(b))),t1.e)-f+t1.c+19+e-t1.e)<=t1.f then e else c end FROM t1 WHERE (abs(t1.d)/abs(~b)) in (select count(distinct case when t1.c+t1.fe and ((d) between t1.a and d) then t1.b else 13 end else 11 end) from t1 union select count(*) from t1)} +} {500} +do_test randexpr-2.2449 { + db eval {SELECT case when coalesce((select 11 from t1 where exists(select 1 from t1 where t1.a<= -19)),t1.c-t1.e*e-19-t1.d-t1.e-~a*coalesce((select max(b) from t1 where b>(abs(+c+case c when 11 then t1.f else 11 end)/abs(b))),t1.e)-f+t1.c+19+e-t1.e)<=t1.f then e else c end FROM t1 WHERE NOT ((abs(t1.d)/abs(~b)) in (select count(distinct case when t1.c+t1.fe and ((d) between t1.a and d) then t1.b else 13 end else 11 end) from t1 union select count(*) from t1))} +} {} +do_test randexpr-2.2450 { + db eval {SELECT case when 19*case when case when t1.e>((t1.d)) and 11>t1.d then -17 else e end in (select count(distinct a) from t1 union select min(d) from t1) then (d) else e end | t1.c+a in (select count(*) from t1 union select case +(max(19))+count(distinct -t1.d) when min(19) then cast(avg(e) AS integer) else cast(avg(t1.a) AS integer) end from t1) and 17 between 17 and t1.b then -(t1.e) when b between t1.c and 13 then b else 11 end+t1.f*(t1.d)+t1.c FROM t1 WHERE ~+e+11>b} +} {} +do_test randexpr-2.2451 { + db eval {SELECT case when 19*case when case when t1.e>((t1.d)) and 11>t1.d then -17 else e end in (select count(distinct a) from t1 union select min(d) from t1) then (d) else e end | t1.c+a in (select count(*) from t1 union select case +(max(19))+count(distinct -t1.d) when min(19) then cast(avg(e) AS integer) else cast(avg(t1.a) AS integer) end from t1) and 17 between 17 and t1.b then -(t1.e) when b between t1.c and 13 then b else 11 end+t1.f*(t1.d)+t1.c FROM t1 WHERE NOT (~+e+11>b)} +} {240311} +do_test randexpr-2.2452 { + db eval {SELECT case when 19*case when case when t1.e>((t1.d)) and 11>t1.d then -17 else e end in (select count(distinct a) from t1 union select min(d) from t1) then (d) else e end & t1.c+a in (select count(*) from t1 union select case +(max(19))+count(distinct -t1.d) when min(19) then cast(avg(e) AS integer) else cast(avg(t1.a) AS integer) end from t1) and 17 between 17 and t1.b then -(t1.e) when b between t1.c and 13 then b else 11 end+t1.f*(t1.d)+t1.c FROM t1 WHERE NOT (~+e+11>b)} +} {240311} +do_test randexpr-2.2453 { + db eval {SELECT coalesce((select max((e)) from t1 where not (((abs(c)/abs((abs(t1.e)/abs(t1.d)))) between d and a+t1.f+19-a or (not exists(select 1 from t1 where f*11 in (19,t1.d,case when (11 in (19,17,19) and t1.a>t1.d) then t1.e-19 else t1.e end+f))) and a<11) and 19<>a)),c+d) FROM t1 WHERE 17+case b*f when b then a+e else d end*t1.b<=case when not f=11 then c when t1.ed and exists(select 1 from t1 where 19 in (t1.e,t1.a,(e)))),a)*c from t1 union select f from t1)),t1.a)+f+17 then t1.c else b end} +} {} +do_test randexpr-2.2454 { + db eval {SELECT coalesce((select max((e)) from t1 where not (((abs(c)/abs((abs(t1.e)/abs(t1.d)))) between d and a+t1.f+19-a or (not exists(select 1 from t1 where f*11 in (19,t1.d,case when (11 in (19,17,19) and t1.a>t1.d) then t1.e-19 else t1.e end+f))) and a<11) and 19<>a)),c+d) FROM t1 WHERE NOT (17+case b*f when b then a+e else d end*t1.b<=case when not f=11 then c when t1.ed and exists(select 1 from t1 where 19 in (t1.e,t1.a,(e)))),a)*c from t1 union select f from t1)),t1.a)+f+17 then t1.c else b end)} +} {500} +do_test randexpr-2.2455 { + db eval {SELECT ((select cast(avg(case a when t1.d then (abs(coalesce((select coalesce((select coalesce((select e-a from t1 where not exists(select 1 from t1 where t1.f>11 or t1.a= -f)), -t1.a) from t1 where 11<= -t1.f),t1.f)-13 from t1 where not exists(select 1 from t1 where t1.e in (f,t1.b,d))),t1.d))/abs(11)) else (17) end) AS integer)+abs( -count(distinct e)+count(*)) | -count(*)*+min(e)*count(distinct b)*cast(avg(11) AS integer) from t1)) FROM t1 WHERE d= -19} +} {} +do_test randexpr-2.2456 { + db eval {SELECT ((select cast(avg(case a when t1.d then (abs(coalesce((select coalesce((select coalesce((select e-a from t1 where not exists(select 1 from t1 where t1.f>11 or t1.a= -f)), -t1.a) from t1 where 11<= -t1.f),t1.f)-13 from t1 where not exists(select 1 from t1 where t1.e in (f,t1.b,d))),t1.d))/abs(11)) else (17) end) AS integer)+abs( -count(distinct e)+count(*)) | -count(*)*+min(e)*count(distinct b)*cast(avg(11) AS integer) from t1)) FROM t1 WHERE NOT (d= -19)} +} {-5483} +do_test randexpr-2.2457 { + db eval {SELECT ((select cast(avg(case a when t1.d then (abs(coalesce((select coalesce((select coalesce((select e-a from t1 where not exists(select 1 from t1 where t1.f>11 or t1.a= -f)), -t1.a) from t1 where 11<= -t1.f),t1.f)-13 from t1 where not exists(select 1 from t1 where t1.e in (f,t1.b,d))),t1.d))/abs(11)) else (17) end) AS integer)+abs( -count(distinct e)+count(*)) & -count(*)*+min(e)*count(distinct b)*cast(avg(11) AS integer) from t1)) FROM t1 WHERE NOT (d= -19)} +} {0} +do_test randexpr-2.2458 { + db eval {SELECT 11+t1.e*c | t1.a+t1.a*coalesce((select max(b*t1.d*case when c in (select t1.b from t1 union select case when (t1.b>=d) then 19 else case when ((((t1.a<>t1.b)))) then b when f<=11 then case when e not in (19,t1.d,a) then coalesce((select 11 from t1 where e between a and e),e) when 11<=e then t1.a else 13 end else t1.c end end from t1) then 19 else (t1.d) end-a) from t1 where (t1.d)<= -e),t1.f) FROM t1 WHERE ((f not between t1.a and (abs(19)/abs(17 | (a))) and ((exists(select 1 from t1 where coalesce((select case e*(17) when b-(abs((abs(11)/abs(t1.c+e+t1.c)))/abs(17-((t1.f-t1.e | 11)))) then e else a end from t1 where t1.c not in (19,t1.b,t1.d)),c)=b)))))} +} {} +do_test randexpr-2.2459 { + db eval {SELECT 11+t1.e*c | t1.a+t1.a*coalesce((select max(b*t1.d*case when c in (select t1.b from t1 union select case when (t1.b>=d) then 19 else case when ((((t1.a<>t1.b)))) then b when f<=11 then case when e not in (19,t1.d,a) then coalesce((select 11 from t1 where e between a and e),e) when 11<=e then t1.a else 13 end else t1.c end end from t1) then 19 else (t1.d) end-a) from t1 where (t1.d)<= -e),t1.f) FROM t1 WHERE NOT (((f not between t1.a and (abs(19)/abs(17 | (a))) and ((exists(select 1 from t1 where coalesce((select case e*(17) when b-(abs((abs(11)/abs(t1.c+e+t1.c)))/abs(17-((t1.f-t1.e | 11)))) then e else a end from t1 where t1.c not in (19,t1.b,t1.d)),c)=b))))))} +} {191487} +do_test randexpr-2.2460 { + db eval {SELECT 11+t1.e*c & t1.a+t1.a*coalesce((select max(b*t1.d*case when c in (select t1.b from t1 union select case when (t1.b>=d) then 19 else case when ((((t1.a<>t1.b)))) then b when f<=11 then case when e not in (19,t1.d,a) then coalesce((select 11 from t1 where e between a and e),e) when 11<=e then t1.a else 13 end else t1.c end end from t1) then 19 else (t1.d) end-a) from t1 where (t1.d)<= -e),t1.f) FROM t1 WHERE NOT (((f not between t1.a and (abs(19)/abs(17 | (a))) and ((exists(select 1 from t1 where coalesce((select case e*(17) when b-(abs((abs(11)/abs(t1.c+e+t1.c)))/abs(17-((t1.f-t1.e | 11)))) then e else a end from t1 where t1.c not in (19,t1.b,t1.d)),c)=b))))))} +} {18624} +do_test randexpr-2.2461 { + db eval {SELECT (select (case +~ -abs(count(distinct + -(select (case ~((min(t1.b)+max(a))+count(*))*cast(avg(e) AS integer) when count(distinct (17)) then count(distinct t1.d) else max(a) end) from t1)*(17)-t1.f+case (abs( -t1.e)/abs(17)) when t1.f then b else 11 end)) | max(c)-case max(f) when (max(t1.f)) then min(c) else cast(avg((t1.d)) AS integer) end when count(*) then min(t1.a) else cast(avg(t1.c) AS integer) end) from t1) FROM t1 WHERE 13 in ((select ~ -abs((abs( -max(c)+count(distinct c)-~case +max(f) when min(13) then count(distinct t1.e) else count(*) end-((cast(avg(t1.c) AS integer)))))) from t1),~t1.c-t1.e,case when exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (select case when t1.ca then b else (c) end+t1.d from t1 union select -t1.e from t1)) or 11 not between b and d or e>e) then t1.f else t1.f end*t1.b- -t1.b)} +} {} +do_test randexpr-2.2462 { + db eval {SELECT (select (case +~ -abs(count(distinct + -(select (case ~((min(t1.b)+max(a))+count(*))*cast(avg(e) AS integer) when count(distinct (17)) then count(distinct t1.d) else max(a) end) from t1)*(17)-t1.f+case (abs( -t1.e)/abs(17)) when t1.f then b else 11 end)) | max(c)-case max(f) when (max(t1.f)) then min(c) else cast(avg((t1.d)) AS integer) end when count(*) then min(t1.a) else cast(avg(t1.c) AS integer) end) from t1) FROM t1 WHERE NOT (13 in ((select ~ -abs((abs( -max(c)+count(distinct c)-~case +max(f) when min(13) then count(distinct t1.e) else count(*) end-((cast(avg(t1.c) AS integer)))))) from t1),~t1.c-t1.e,case when exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (select case when t1.ca then b else (c) end+t1.d from t1 union select -t1.e from t1)) or 11 not between b and d or e>e) then t1.f else t1.f end*t1.b- -t1.b))} +} {300} +do_test randexpr-2.2463 { + db eval {SELECT (select (case +~ -abs(count(distinct + -(select (case ~((min(t1.b)+max(a))+count(*))*cast(avg(e) AS integer) when count(distinct (17)) then count(distinct t1.d) else max(a) end) from t1)*(17)-t1.f+case (abs( -t1.e)/abs(17)) when t1.f then b else 11 end)) & max(c)-case max(f) when (max(t1.f)) then min(c) else cast(avg((t1.d)) AS integer) end when count(*) then min(t1.a) else cast(avg(t1.c) AS integer) end) from t1) FROM t1 WHERE NOT (13 in ((select ~ -abs((abs( -max(c)+count(distinct c)-~case +max(f) when min(13) then count(distinct t1.e) else count(*) end-((cast(avg(t1.c) AS integer)))))) from t1),~t1.c-t1.e,case when exists(select 1 from t1 where not exists(select 1 from t1 where t1.a in (select case when t1.ca then b else (c) end+t1.d from t1 union select -t1.e from t1)) or 11 not between b and d or e>e) then t1.f else t1.f end*t1.b- -t1.b))} +} {300} +do_test randexpr-2.2464 { + db eval {SELECT case when (abs(~(t1.e))/abs(case when (( -13c)),f)+t1.b when 13 then t1.c else t1.b end<>a) or t1.a=a),e+t1.e))/abs((f))) when a then e else f end) from t1 where t1.a>t1.d),19)) from t1) FROM t1 WHERE 19*~case when (case when t1.f<>t1.a*17*(abs(t1.e)/abs(a))*a*(select case +min(11)*cast(avg(b) AS integer)*cast(avg(19) AS integer) when count(distinct t1.f) then max( -t1.e) else cast(avg( -f) AS integer) end from t1)*(select min(t1.b) from t1)*t1.d then coalesce((select max(11) from t1 where t1.a between t1.d and 17 and 19 not between 17 and a),17) else 17 end in (select t1.f from t1 union select 17 from t1)) then ~t1.b else t1.d end<=b} +} {} +do_test randexpr-2.2471 { + db eval {SELECT (select min(coalesce((select max(t1.c-case t1.e | (abs(coalesce((select max(t1.b) from t1 where 19c)),f)+t1.b when 13 then t1.c else t1.b end<>a) or t1.a=a),e+t1.e))/abs((f))) when a then e else f end) from t1 where t1.a>t1.d),19)) from t1) FROM t1 WHERE NOT (19*~case when (case when t1.f<>t1.a*17*(abs(t1.e)/abs(a))*a*(select case +min(11)*cast(avg(b) AS integer)*cast(avg(19) AS integer) when count(distinct t1.f) then max( -t1.e) else cast(avg( -f) AS integer) end from t1)*(select min(t1.b) from t1)*t1.d then coalesce((select max(11) from t1 where t1.a between t1.d and 17 and 19 not between 17 and a),17) else 17 end in (select t1.f from t1 union select 17 from t1)) then ~t1.b else t1.d end<=b)} +} {19} +do_test randexpr-2.2472 { + db eval {SELECT (select min(coalesce((select max(t1.c-case t1.e & (abs(coalesce((select max(t1.b) from t1 where 19c)),f)+t1.b when 13 then t1.c else t1.b end<>a) or t1.a=a),e+t1.e))/abs((f))) when a then e else f end) from t1 where t1.a>t1.d),19)) from t1) FROM t1 WHERE NOT (19*~case when (case when t1.f<>t1.a*17*(abs(t1.e)/abs(a))*a*(select case +min(11)*cast(avg(b) AS integer)*cast(avg(19) AS integer) when count(distinct t1.f) then max( -t1.e) else cast(avg( -f) AS integer) end from t1)*(select min(t1.b) from t1)*t1.d then coalesce((select max(11) from t1 where t1.a between t1.d and 17 and 19 not between 17 and a),17) else 17 end in (select t1.f from t1 union select 17 from t1)) then ~t1.b else t1.d end<=b)} +} {19} +do_test randexpr-2.2473 { + db eval {SELECT -case (select ~count(*)-~abs(~ -(count(*)+count(*)))*count(*) from t1)*13++coalesce((select max( -+17+case when f not between e and t1.c and t1.f between t1.e and t1.a then e when (b)>19 then f else t1.b end) from t1 where 17 not in (a,t1.f,17) or (d>=t1.e) or a<=17),d) when t1.c then 19 else t1.f end FROM t1 WHERE (19<+case when not 17>e then +t1.f when e | 13 in (select ~(abs(a)/abs(t1.b)) from t1 union select t1.f from t1) then case when f between case case b*f+19-case t1.c when f then 13 else c end when t1.d then b else b end*11 when 19 then 13 else t1.c end and t1.f then c when t1.b<=c then t1.d else 13 end else t1.d end)} +} {-600} +do_test randexpr-2.2474 { + db eval {SELECT -case (select ~count(*)-~abs(~ -(count(*)+count(*)))*count(*) from t1)*13++coalesce((select max( -+17+case when f not between e and t1.c and t1.f between t1.e and t1.a then e when (b)>19 then f else t1.b end) from t1 where 17 not in (a,t1.f,17) or (d>=t1.e) or a<=17),d) when t1.c then 19 else t1.f end FROM t1 WHERE NOT ((19<+case when not 17>e then +t1.f when e | 13 in (select ~(abs(a)/abs(t1.b)) from t1 union select t1.f from t1) then case when f between case case b*f+19-case t1.c when f then 13 else c end when t1.d then b else b end*11 when 19 then 13 else t1.c end and t1.f then c when t1.b<=c then t1.d else 13 end else t1.d end))} +} {} +do_test randexpr-2.2475 { + db eval {SELECT case when e not in (t1.a,t1.b,t1.e-t1.a) then f-coalesce((select max(~19-t1.b-f) from t1 where case when (select count(*)*count(*) from t1)-coalesce((select max(13) from t1 where (abs(13)/abs(19-f)) in (select count(distinct f) from t1 union select ~ - -count(distinct -f) from t1)),t1.b) between t1.f and t1.d then d else f end between t1.f and a),t1.f)+ -t1.f else t1.e end FROM t1 WHERE d not in (c,t1.d, -coalesce((select max( -coalesce((select f from t1 where t1.b>t1.c* - -t1.f),case when (t1.e-t1.e | 19*t1.c+e<19 and 19<=e or not exists(select 1 from t1 where t1.a<=t1.d or e in (a,t1.e,b))) then -e when f in ((t1.a),d,11) then 13 else d end)) from t1 where c between 19 and f),b))} +} {} +do_test randexpr-2.2476 { + db eval {SELECT case when e not in (t1.a,t1.b,t1.e-t1.a) then f-coalesce((select max(~19-t1.b-f) from t1 where case when (select count(*)*count(*) from t1)-coalesce((select max(13) from t1 where (abs(13)/abs(19-f)) in (select count(distinct f) from t1 union select ~ - -count(distinct -f) from t1)),t1.b) between t1.f and t1.d then d else f end between t1.f and a),t1.f)+ -t1.f else t1.e end FROM t1 WHERE NOT (d not in (c,t1.d, -coalesce((select max( -coalesce((select f from t1 where t1.b>t1.c* - -t1.f),case when (t1.e-t1.e | 19*t1.c+e<19 and 19<=e or not exists(select 1 from t1 where t1.a<=t1.d or e in (a,t1.e,b))) then -e when f in ((t1.a),d,11) then 13 else d end)) from t1 where c between 19 and f),b)))} +} {-600} +do_test randexpr-2.2477 { + db eval {SELECT -case when (17<>t1.b-d+t1.a | t1.f) then ~c when ((19)-coalesce((select case t1.d-t1.f when (abs(((select abs(cast(avg(13) AS integer) | cast(avg( -(t1.b)) AS integer)) from t1)))/abs(case d when t1.b then -11 else 19 end))*17 then t1.e else t1.a end from t1 where (19 not in (f,t1.d,t1.e))),t1.d)<>a) then t1.d else t1.c end FROM t1 WHERE a in (~b-t1.f*c,13 | coalesce((select max(t1.a) from t1 where -t1.a-~case when b in (case when t1.e in (select 11 from t1 union select d from t1) and 13>19 then 13*c when f not in (t1.d,c,13) then t1.a else a end,t1.f,d) then a else 11 end+b in (select +(min(11)) from t1 union select max(11)+cast(avg(19) AS integer) from t1)),13),t1.f)} +} {} +do_test randexpr-2.2478 { + db eval {SELECT -case when (17<>t1.b-d+t1.a | t1.f) then ~c when ((19)-coalesce((select case t1.d-t1.f when (abs(((select abs(cast(avg(13) AS integer) | cast(avg( -(t1.b)) AS integer)) from t1)))/abs(case d when t1.b then -11 else 19 end))*17 then t1.e else t1.a end from t1 where (19 not in (f,t1.d,t1.e))),t1.d)<>a) then t1.d else t1.c end FROM t1 WHERE NOT (a in (~b-t1.f*c,13 | coalesce((select max(t1.a) from t1 where -t1.a-~case when b in (case when t1.e in (select 11 from t1 union select d from t1) and 13>19 then 13*c when f not in (t1.d,c,13) then t1.a else a end,t1.f,d) then a else 11 end+b in (select +(min(11)) from t1 union select max(11)+cast(avg(19) AS integer) from t1)),13),t1.f))} +} {301} +do_test randexpr-2.2479 { + db eval {SELECT -case when (17<>t1.b-d+t1.a & t1.f) then ~c when ((19)-coalesce((select case t1.d-t1.f when (abs(((select abs(cast(avg(13) AS integer) & cast(avg( -(t1.b)) AS integer)) from t1)))/abs(case d when t1.b then -11 else 19 end))*17 then t1.e else t1.a end from t1 where (19 not in (f,t1.d,t1.e))),t1.d)<>a) then t1.d else t1.c end FROM t1 WHERE NOT (a in (~b-t1.f*c,13 | coalesce((select max(t1.a) from t1 where -t1.a-~case when b in (case when t1.e in (select 11 from t1 union select d from t1) and 13>19 then 13*c when f not in (t1.d,c,13) then t1.a else a end,t1.f,d) then a else 11 end+b in (select +(min(11)) from t1 union select max(11)+cast(avg(19) AS integer) from t1)),13),t1.f))} +} {301} +do_test randexpr-2.2480 { + db eval {SELECT case f when case when +t1.a between a and case e when 17 then t1.e else f-t1.e+a-((select ++count(distinct d) | cast(avg(t1.e) AS integer)+count(distinct e) | -count(*) from t1)) end-t1.b*f and t1.b | t1.b-b<=c then -t1.b+d when 11 in (select t1.e from t1 union select 11 from t1) then 13 else a end then b else c end FROM t1 WHERE case when ++c-13*d>t1.a and f=t1.d-t1.b then (select count(distinct 11)*(+cast(avg(t1.b) AS integer)+case cast(avg(e) AS integer) when max(d) | min(11) then min(f) else ( -min(t1.b)) end*(count(*)))* -min(19) from t1) else t1.e end*11 in (select a from t1 union select t1.e from t1) and e=17 or 11 not in ( -t1.d,f,t1.f)} +} {300} +do_test randexpr-2.2481 { + db eval {SELECT case f when case when +t1.a between a and case e when 17 then t1.e else f-t1.e+a-((select ++count(distinct d) | cast(avg(t1.e) AS integer)+count(distinct e) | -count(*) from t1)) end-t1.b*f and t1.b | t1.b-b<=c then -t1.b+d when 11 in (select t1.e from t1 union select 11 from t1) then 13 else a end then b else c end FROM t1 WHERE NOT (case when ++c-13*d>t1.a and f=t1.d-t1.b then (select count(distinct 11)*(+cast(avg(t1.b) AS integer)+case cast(avg(e) AS integer) when max(d) | min(11) then min(f) else ( -min(t1.b)) end*(count(*)))* -min(19) from t1) else t1.e end*11 in (select a from t1 union select t1.e from t1) and e=17 or 11 not in ( -t1.d,f,t1.f))} +} {} +do_test randexpr-2.2482 { + db eval {SELECT case f when case when +t1.a between a and case e when 17 then t1.e else f-t1.e+a-((select ++count(distinct d) & cast(avg(t1.e) AS integer)+count(distinct e) & -count(*) from t1)) end-t1.b*f and t1.b & t1.b-b<=c then -t1.b+d when 11 in (select t1.e from t1 union select 11 from t1) then 13 else a end then b else c end FROM t1 WHERE case when ++c-13*d>t1.a and f=t1.d-t1.b then (select count(distinct 11)*(+cast(avg(t1.b) AS integer)+case cast(avg(e) AS integer) when max(d) | min(11) then min(f) else ( -min(t1.b)) end*(count(*)))* -min(19) from t1) else t1.e end*11 in (select a from t1 union select t1.e from t1) and e=17 or 11 not in ( -t1.d,f,t1.f)} +} {300} +do_test randexpr-2.2483 { + db eval {SELECT -(19)-coalesce((select max(t1.e) from t1 where c not in (t1.b,17,t1.c)),case coalesce((select b+f+coalesce((select t1.a from t1 where t1.e>=t1.e+t1.a+t1.c or t1.f between c and t1.f),t1.e) from t1 where 11<>13 or t1.a=b),t1.c)*f+a when a then 17 else 17 end+t1.e)+a FROM t1 WHERE e | a not between -case when not c between case when exists(select 1 from t1 where (f not in (coalesce((select max(t1.d) from t1 where t1.e=t1.d),t1.e)+a,19,t1.c)) or not exists(select 1 from t1 where a in (19,a,e) and 11>=t1.f)) or 11<17 then t1.e+19*t1.f-b when t1.c in (e,t1.b,e) then f else t1.f end and t1.b or (11>=(t1.a)) then e when t1.a=t1.a then (select count(*) from t1) else e end and t1.b} +} {-436} +do_test randexpr-2.2484 { + db eval {SELECT -(19)-coalesce((select max(t1.e) from t1 where c not in (t1.b,17,t1.c)),case coalesce((select b+f+coalesce((select t1.a from t1 where t1.e>=t1.e+t1.a+t1.c or t1.f between c and t1.f),t1.e) from t1 where 11<>13 or t1.a=b),t1.c)*f+a when a then 17 else 17 end+t1.e)+a FROM t1 WHERE NOT (e | a not between -case when not c between case when exists(select 1 from t1 where (f not in (coalesce((select max(t1.d) from t1 where t1.e=t1.d),t1.e)+a,19,t1.c)) or not exists(select 1 from t1 where a in (19,a,e) and 11>=t1.f)) or 11<17 then t1.e+19*t1.f-b when t1.c in (e,t1.b,e) then f else t1.f end and t1.b or (11>=(t1.a)) then e when t1.a=t1.a then (select count(*) from t1) else e end and t1.b)} +} {} +do_test randexpr-2.2485 { + db eval {SELECT case (abs(case when 13 not in (t1.f,a,t1.b) then d*(abs((coalesce((select coalesce((select coalesce((select max(t1.d-t1.a) from t1 where (t1.c between 19 and (c) and c=17)),t1.f) from t1 where t1.e between e and 17),17))-b)/abs(d)) | b+t1.b when e between t1.f and d then 19 else 17 end*f)/abs(a)) when t1.c then 11 else t1.d end FROM t1 WHERE d>=e} +} {} +do_test randexpr-2.2486 { + db eval {SELECT case (abs(case when 13 not in (t1.f,a,t1.b) then d*(abs((coalesce((select coalesce((select coalesce((select max(t1.d-t1.a) from t1 where (t1.c between 19 and (c) and c=17)),t1.f) from t1 where t1.e between e and 17),17))-b)/abs(d)) | b+t1.b when e between t1.f and d then 19 else 17 end*f)/abs(a)) when t1.c then 11 else t1.d end FROM t1 WHERE NOT (d>=e)} +} {400} +do_test randexpr-2.2487 { + db eval {SELECT case (abs(case when 13 not in (t1.f,a,t1.b) then d*(abs((coalesce((select coalesce((select coalesce((select max(t1.d-t1.a) from t1 where (t1.c between 19 and (c) and c=17)),t1.f) from t1 where t1.e between e and 17),17))-b)/abs(d)) & b+t1.b when e between t1.f and d then 19 else 17 end*f)/abs(a)) when t1.c then 11 else t1.d end FROM t1 WHERE NOT (d>=e)} +} {400} +do_test randexpr-2.2488 { + db eval {SELECT case ((abs(case when case -(select count(distinct case when t1.d in (select t1.b from t1 union select t1.b from t1) then b when f not in (t1.a,b,a) then c else 17 end) | cast(avg(d) AS integer) | min(b) from t1) | +b-e when t1.e then f else a end not between 11 and 13 then f when 17>e and 17=t1.f) or d in ( -t1.c,f,t1.a) then -a else t1.f end)/abs(t1.b))) when 17 then t1.f else t1.f end FROM t1 WHERE t1.d=t1.b+case c-11+coalesce((select case when +(select +(case max(t1.e) when count(*) then (count(*)) else cast(avg(d) AS integer) end) from t1)-d between b and t1.d then 11 else f end from t1 where t1.f<=t1.c and exists(select 1 from t1 where exists(select 1 from t1 where e between t1.e and t1.e))),t1.b)-19 when t1.d then t1.f else (t1.f) end | e-c or t1.f not in (b, -f, -f)} +} {600} +do_test randexpr-2.2489 { + db eval {SELECT case ((abs(case when case -(select count(distinct case when t1.d in (select t1.b from t1 union select t1.b from t1) then b when f not in (t1.a,b,a) then c else 17 end) | cast(avg(d) AS integer) | min(b) from t1) | +b-e when t1.e then f else a end not between 11 and 13 then f when 17>e and 17=t1.f) or d in ( -t1.c,f,t1.a) then -a else t1.f end)/abs(t1.b))) when 17 then t1.f else t1.f end FROM t1 WHERE NOT (t1.d=t1.b+case c-11+coalesce((select case when +(select +(case max(t1.e) when count(*) then (count(*)) else cast(avg(d) AS integer) end) from t1)-d between b and t1.d then 11 else f end from t1 where t1.f<=t1.c and exists(select 1 from t1 where exists(select 1 from t1 where e between t1.e and t1.e))),t1.b)-19 when t1.d then t1.f else (t1.f) end | e-c or t1.f not in (b, -f, -f))} +} {} +do_test randexpr-2.2490 { + db eval {SELECT case ((abs(case when case -(select count(distinct case when t1.d in (select t1.b from t1 union select t1.b from t1) then b when f not in (t1.a,b,a) then c else 17 end) & cast(avg(d) AS integer) & min(b) from t1) & +b-e when t1.e then f else a end not between 11 and 13 then f when 17>e and 17=t1.f) or d in ( -t1.c,f,t1.a) then -a else t1.f end)/abs(t1.b))) when 17 then t1.f else t1.f end FROM t1 WHERE t1.d=t1.b+case c-11+coalesce((select case when +(select +(case max(t1.e) when count(*) then (count(*)) else cast(avg(d) AS integer) end) from t1)-d between b and t1.d then 11 else f end from t1 where t1.f<=t1.c and exists(select 1 from t1 where exists(select 1 from t1 where e between t1.e and t1.e))),t1.b)-19 when t1.d then t1.f else (t1.f) end | e-c or t1.f not in (b, -f, -f)} +} {600} +do_test randexpr-2.2491 { + db eval {SELECT coalesce((select max(11) from t1 where 17 not in (11,case when (case when t1.f>b then t1.d*t1.d else (abs((abs(19)/abs(coalesce((select 17*t1.e-t1.c from t1 where 17<13 and (t1.a in (select (min(17)) from t1 union select max(c) from t1) and -t1.d not in ((t1.b),d,a))), -17))))/abs(c))*13 end)<>(t1.f) then a when a=t1.a then 17 else -19 end-d,a)),e) FROM t1 WHERE 19<=case when not exists(select 1 from t1 where case when case when case when ((t1.a in (select +min(t1.c) from t1 union select max(t1.a) from t1))) or 19 between t1.d and b then +c | 11*f else d endb then c else t1.d end in (17,( -b),t1.c)) then t1.e when t1.f in (t1.a,13,d) then 17 else t1.f end*11*t1.d and a<=t1.d} +} {11} +do_test randexpr-2.2492 { + db eval {SELECT coalesce((select max(11) from t1 where 17 not in (11,case when (case when t1.f>b then t1.d*t1.d else (abs((abs(19)/abs(coalesce((select 17*t1.e-t1.c from t1 where 17<13 and (t1.a in (select (min(17)) from t1 union select max(c) from t1) and -t1.d not in ((t1.b),d,a))), -17))))/abs(c))*13 end)<>(t1.f) then a when a=t1.a then 17 else -19 end-d,a)),e) FROM t1 WHERE NOT (19<=case when not exists(select 1 from t1 where case when case when case when ((t1.a in (select +min(t1.c) from t1 union select max(t1.a) from t1))) or 19 between t1.d and b then +c | 11*f else d endb then c else t1.d end in (17,( -b),t1.c)) then t1.e when t1.f in (t1.a,13,d) then 17 else t1.f end*11*t1.d and a<=t1.d)} +} {} +do_test randexpr-2.2493 { + db eval {SELECT b*case when t1.f not in (a,f,t1.e-e) and t1.b in (select coalesce((select max(t1.e) from t1 where coalesce((select max(~d) from t1 where case when t1.b not in (t1.c,17, -c) then t1.f when t1.c>17 then 17 else e end>t1.f and t1.f not in (b,f,d)),17)>=t1.c or 19<=t1.e),t1.a) from t1 union select (e) from t1) then d when t1.c17 then 17 else e end>t1.f and t1.f not in (b,f,d)),17)>=t1.c or 19<=t1.e),t1.a) from t1 union select (e) from t1) then d when t1.cc and e in (select +max(17) from t1 union select count(*) from t1) then (abs( -c)/abs(a)) when b>=t1.f then t1.b else 11 end when t1.f then 17 else t1.d end*(c)+17 when e then c else t1.f end else -c end-t1.c-c-19+c+a not in (t1.a,13,e)} +} {-90801} +do_test randexpr-2.2496 { + db eval {SELECT +(select ~(~cast(avg(t1.c) AS integer)* -cast(avg(c) AS integer))-case min(t1.e) when case count(*)*~(count(*)-+count(*)*cast(avg(case t1.d when 19 then d else (t1.a) end*e) AS integer) | cast(avg(13) AS integer)-cast(avg( -11) AS integer)+max(b)+cast(avg(19) AS integer) | (max(t1.e))) | min(f) when -max(19) then (count(distinct -t1.a)) else (count(distinct 17)) end then max(t1.d) else cast(avg(t1.e) AS integer) end from t1) FROM t1 WHERE NOT (t1.c-case f when 17 | b then case +case f | case when +t1.a>c and e in (select +max(17) from t1 union select count(*) from t1) then (abs( -c)/abs(a)) when b>=t1.f then t1.b else 11 end when t1.f then 17 else t1.d end*(c)+17 when e then c else t1.f end else -c end-t1.c-c-19+c+a not in (t1.a,13,e))} +} {} +do_test randexpr-2.2497 { + db eval {SELECT +(select ~(~cast(avg(t1.c) AS integer)* -cast(avg(c) AS integer))-case min(t1.e) when case count(*)*~(count(*)-+count(*)*cast(avg(case t1.d when 19 then d else (t1.a) end*e) AS integer) & cast(avg(13) AS integer)-cast(avg( -11) AS integer)+max(b)+cast(avg(19) AS integer) & (max(t1.e))) & min(f) when -max(19) then (count(distinct -t1.a)) else (count(distinct 17)) end then max(t1.d) else cast(avg(t1.e) AS integer) end from t1) FROM t1 WHERE t1.c-case f when 17 | b then case +case f | case when +t1.a>c and e in (select +max(17) from t1 union select count(*) from t1) then (abs( -c)/abs(a)) when b>=t1.f then t1.b else 11 end when t1.f then 17 else t1.d end*(c)+17 when e then c else t1.f end else -c end-t1.c-c-19+c+a not in (t1.a,13,e)} +} {-90801} +do_test randexpr-2.2498 { + db eval {SELECT case t1.d when e then t1.d else (abs(e-b)/abs(t1.c+a*a | 17+19*coalesce((select 17 from t1 where 11 between +(select abs(+cast(avg(13) AS integer)) from t1)*19 and t1.e),t1.e-11-t1.e+( -t1.e*13)*t1.e | e*t1.b)*f)) end FROM t1 WHERE (abs(t1.d)/abs(t1.c)) not between +13 and t1.c} +} {0} +do_test randexpr-2.2499 { + db eval {SELECT case t1.d when e then t1.d else (abs(e-b)/abs(t1.c+a*a | 17+19*coalesce((select 17 from t1 where 11 between +(select abs(+cast(avg(13) AS integer)) from t1)*19 and t1.e),t1.e-11-t1.e+( -t1.e*13)*t1.e | e*t1.b)*f)) end FROM t1 WHERE NOT ((abs(t1.d)/abs(t1.c)) not between +13 and t1.c)} +} {} +do_test randexpr-2.2500 { + db eval {SELECT case t1.d when e then t1.d else (abs(e-b)/abs(t1.c+a*a & 17+19*coalesce((select 17 from t1 where 11 between +(select abs(+cast(avg(13) AS integer)) from t1)*19 and t1.e),t1.e-11-t1.e+( -t1.e*13)*t1.e & e*t1.b)*f)) end FROM t1 WHERE (abs(t1.d)/abs(t1.c)) not between +13 and t1.c} +} {18} +do_test randexpr-2.2501 { + db eval {SELECT coalesce((select coalesce((select t1.e from t1 where case when c+a in (select t1.f from t1 union select 13 from t1) then c when not not exists(select 1 from t1 where d>= -11) then t1.d else -t1.f end between b and 11 or d not between e and 11), -f)+e+ -11*17 from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where t1.e in (select ~min(e) from t1 union select case case count(distinct f) | min(19)+min(a) when count(*) then max( -t1.c) else max(17) end when (max(t1.c)) then min(11) else min(t1.c) end from t1)) or d=a)),t1.e) FROM t1 WHERE +t1.e<~case when t1.e in (select (abs(c)/abs(~t1.d)) from t1 union select a from t1) then coalesce((select e from t1 where t1.d+ -coalesce((select max(11-t1.e*b) from t1 where 19 in (11,t1.b,e)),11)*t1.e+19 in (select t1.d from t1 union select -t1.d from t1)),17) | t1.e+e when t1.f in (17,f,f) then t1.b else 17 end*13} +} {} +do_test randexpr-2.2502 { + db eval {SELECT coalesce((select coalesce((select t1.e from t1 where case when c+a in (select t1.f from t1 union select 13 from t1) then c when not not exists(select 1 from t1 where d>= -11) then t1.d else -t1.f end between b and 11 or d not between e and 11), -f)+e+ -11*17 from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where t1.e in (select ~min(e) from t1 union select case case count(distinct f) | min(19)+min(a) when count(*) then max( -t1.c) else max(17) end when (max(t1.c)) then min(11) else min(t1.c) end from t1)) or d=a)),t1.e) FROM t1 WHERE NOT (+t1.e<~case when t1.e in (select (abs(c)/abs(~t1.d)) from t1 union select a from t1) then coalesce((select e from t1 where t1.d+ -coalesce((select max(11-t1.e*b) from t1 where 19 in (11,t1.b,e)),11)*t1.e+19 in (select t1.d from t1 union select -t1.d from t1)),17) | t1.e+e when t1.f in (17,f,f) then t1.b else 17 end*13)} +} {813} +do_test randexpr-2.2503 { + db eval {SELECT coalesce((select coalesce((select t1.e from t1 where case when c+a in (select t1.f from t1 union select 13 from t1) then c when not not exists(select 1 from t1 where d>= -11) then t1.d else -t1.f end between b and 11 or d not between e and 11), -f)+e+ -11*17 from t1 where not exists(select 1 from t1 where exists(select 1 from t1 where t1.e in (select ~min(e) from t1 union select case case count(distinct f) & min(19)+min(a) when count(*) then max( -t1.c) else max(17) end when (max(t1.c)) then min(11) else min(t1.c) end from t1)) or d=a)),t1.e) FROM t1 WHERE NOT (+t1.e<~case when t1.e in (select (abs(c)/abs(~t1.d)) from t1 union select a from t1) then coalesce((select e from t1 where t1.d+ -coalesce((select max(11-t1.e*b) from t1 where 19 in (11,t1.b,e)),11)*t1.e+19 in (select t1.d from t1 union select -t1.d from t1)),17) | t1.e+e when t1.f in (17,f,f) then t1.b else 17 end*13)} +} {813} +do_test randexpr-2.2504 { + db eval {SELECT case when ((t1.f)+coalesce((select coalesce((select ++d from t1 where not t1.d not between e and e),19) from t1 where t1.d<>19),a)-c*a not between c and t1.a and not exists(select 1 from t1 where 17<=a) and exists(select 1 from t1 where not e in (select t1.b from t1 union select t1.e from t1)) and t1.f between 11 and t1.d) or e between c and 13 then t1.a+e when t1.a not between a and d then 17 else t1.a end FROM t1 WHERE (a)+t1.f>t1.e+a} +} {100} +do_test randexpr-2.2505 { + db eval {SELECT case when ((t1.f)+coalesce((select coalesce((select ++d from t1 where not t1.d not between e and e),19) from t1 where t1.d<>19),a)-c*a not between c and t1.a and not exists(select 1 from t1 where 17<=a) and exists(select 1 from t1 where not e in (select t1.b from t1 union select t1.e from t1)) and t1.f between 11 and t1.d) or e between c and 13 then t1.a+e when t1.a not between a and d then 17 else t1.a end FROM t1 WHERE NOT ((a)+t1.f>t1.e+a)} +} {} +do_test randexpr-2.2506 { + db eval {SELECT b*b+case d*d when t1.f then c else coalesce((select coalesce((select max(f) from t1 where (d=t1.b or case when e=11 then t1.a else f end<>t1.d and exists(select 1 from t1 where f not between t1.c and a))),(select count(distinct f) from t1)+case when t1.b between a and (19) and t1.e=a then 13 else 17 end) from t1 where d>=11),f)*t1.e end-11-a- -(11) FROM t1 WHERE (c<19)} +} {} +do_test randexpr-2.2507 { + db eval {SELECT b*b+case d*d when t1.f then c else coalesce((select coalesce((select max(f) from t1 where (d=t1.b or case when e=11 then t1.a else f end<>t1.d and exists(select 1 from t1 where f not between t1.c and a))),(select count(distinct f) from t1)+case when t1.b between a and (19) and t1.e=a then 13 else 17 end) from t1 where d>=11),f)*t1.e end-11-a- -(11) FROM t1 WHERE NOT ((c<19))} +} {339900} +do_test randexpr-2.2508 { + db eval {SELECT coalesce((select max(c) from t1 where ((d between t1.c and c)) and b not between t1.a and a or f+c in (d | +f*d*coalesce((select case when not -t1.c=a)),e)*f,b,11) or -19>=t1.c and t1.a in (b,t1.f,c)),13) FROM t1 WHERE t1.c in (select count(distinct t1.a)*max(d) | +case (~ -~+count(distinct 13)*min(19) | (max(e))+ -(count(*))) when count(distinct t1.a) then max(d) else count(*) end+min(t1.c) from t1 union select min(e) from t1) and 17=t1.c-t1.d or t1.c-d not in ( -c,(abs(case a when t1.a then 13 else e end+f)/abs(e)),t1.a)} +} {13} +do_test randexpr-2.2509 { + db eval {SELECT coalesce((select max(c) from t1 where ((d between t1.c and c)) and b not between t1.a and a or f+c in (d | +f*d*coalesce((select case when not -t1.c=a)),e)*f,b,11) or -19>=t1.c and t1.a in (b,t1.f,c)),13) FROM t1 WHERE NOT (t1.c in (select count(distinct t1.a)*max(d) | +case (~ -~+count(distinct 13)*min(19) | (max(e))+ -(count(*))) when count(distinct t1.a) then max(d) else count(*) end+min(t1.c) from t1 union select min(e) from t1) and 17=t1.c-t1.d or t1.c-d not in ( -c,(abs(case a when t1.a then 13 else e end+f)/abs(e)),t1.a))} +} {} +do_test randexpr-2.2510 { + db eval {SELECT coalesce((select max(c) from t1 where ((d between t1.c and c)) and b not between t1.a and a or f+c in (d & +f*d*coalesce((select case when not -t1.c=a)),e)*f,b,11) or -19>=t1.c and t1.a in (b,t1.f,c)),13) FROM t1 WHERE t1.c in (select count(distinct t1.a)*max(d) | +case (~ -~+count(distinct 13)*min(19) | (max(e))+ -(count(*))) when count(distinct t1.a) then max(d) else count(*) end+min(t1.c) from t1 union select min(e) from t1) and 17=t1.c-t1.d or t1.c-d not in ( -c,(abs(case a when t1.a then 13 else e end+f)/abs(e)),t1.a)} +} {13} +do_test randexpr-2.2511 { + db eval {SELECT case case when not (abs(t1.d)/abs(17))>=t1.a+((19))+ -e-b or 11 between t1.a and 11 then 13 when t1.f in (select case count(distinct t1.d) when abs( -+(cast(avg(t1.c) AS integer)))*count(distinct (t1.e)) then max(19) else case count(distinct t1.a) when cast(avg(19) AS integer) then count(*) else cast(avg((t1.a)) AS integer) end end from t1 union select max(13) from t1) then -t1.b else t1.f end when c then -c else b end FROM t1 WHERE not exists(select 1 from t1 where not exists(select 1 from t1 where 13>=f))} +} {} +do_test randexpr-2.2512 { + db eval {SELECT case case when not (abs(t1.d)/abs(17))>=t1.a+((19))+ -e-b or 11 between t1.a and 11 then 13 when t1.f in (select case count(distinct t1.d) when abs( -+(cast(avg(t1.c) AS integer)))*count(distinct (t1.e)) then max(19) else case count(distinct t1.a) when cast(avg(19) AS integer) then count(*) else cast(avg((t1.a)) AS integer) end end from t1 union select max(13) from t1) then -t1.b else t1.f end when c then -c else b end FROM t1 WHERE NOT (not exists(select 1 from t1 where not exists(select 1 from t1 where 13>=f)))} +} {200} +do_test randexpr-2.2513 { + db eval {SELECT 11-(abs( -(select ((count(*)-(abs( -min(~19)))+case min(case when not exists(select 1 from t1 where (exists(select 1 from t1 where 17>f) or t1.c>=a)) then d else 11 end) | abs(cast(avg(19) AS integer)) when (count(distinct t1.f*19))-+ -( -count(*)) then ((cast(avg(t1.d) AS integer))) else cast(avg(t1.a) AS integer) end*count(distinct t1.d))+cast(avg(c) AS integer))-max(17) from t1)-17)/abs(t1.f)) FROM t1 WHERE - - -f+d in (select 19 from t1 union select t1.b from t1) or ((abs(a)/abs( -11+case when t1.e>=( -f) | d then ~t1.a when coalesce((select t1.d from t1 where e not in (e-(d)+a,e,t1.b)),a)<>t1.b then a else -t1.c end)))<=t1.e and t1.e in (13,t1.a,t1.f)} +} {} +do_test randexpr-2.2514 { + db eval {SELECT 11-(abs( -(select ((count(*)-(abs( -min(~19)))+case min(case when not exists(select 1 from t1 where (exists(select 1 from t1 where 17>f) or t1.c>=a)) then d else 11 end) | abs(cast(avg(19) AS integer)) when (count(distinct t1.f*19))-+ -( -count(*)) then ((cast(avg(t1.d) AS integer))) else cast(avg(t1.a) AS integer) end*count(distinct t1.d))+cast(avg(c) AS integer))-max(17) from t1)-17)/abs(t1.f)) FROM t1 WHERE NOT ( - - -f+d in (select 19 from t1 union select t1.b from t1) or ((abs(a)/abs( -11+case when t1.e>=( -f) | d then ~t1.a when coalesce((select t1.d from t1 where e not in (e-(d)+a,e,t1.b)),a)<>t1.b then a else -t1.c end)))<=t1.e and t1.e in (13,t1.a,t1.f))} +} {11} +do_test randexpr-2.2515 { + db eval {SELECT 11-(abs( -(select ((count(*)-(abs( -min(~19)))+case min(case when not exists(select 1 from t1 where (exists(select 1 from t1 where 17>f) or t1.c>=a)) then d else 11 end) & abs(cast(avg(19) AS integer)) when (count(distinct t1.f*19))-+ -( -count(*)) then ((cast(avg(t1.d) AS integer))) else cast(avg(t1.a) AS integer) end*count(distinct t1.d))+cast(avg(c) AS integer))-max(17) from t1)-17)/abs(t1.f)) FROM t1 WHERE NOT ( - - -f+d in (select 19 from t1 union select t1.b from t1) or ((abs(a)/abs( -11+case when t1.e>=( -f) | d then ~t1.a when coalesce((select t1.d from t1 where e not in (e-(d)+a,e,t1.b)),a)<>t1.b then a else -t1.c end)))<=t1.e and t1.e in (13,t1.a,t1.f))} +} {11} +do_test randexpr-2.2516 { + db eval {SELECT coalesce((select ~11*b from t1 where (not exists(select 1 from t1 where case d when t1.b then t1.b else d end not between b and t1.b and f in (select ~count(*) from t1 union select count(distinct coalesce((select max( -c+(abs(coalesce((select max(t1.a) from t1 where not exists(select 1 from t1 where d>a)),t1.b)-17)/abs((b)))*(a)) from t1 where t1.f>b),f)) from t1) or t1.e not in ((d),c,c)))),c)*17+t1.c FROM t1 WHERE coalesce((select 17*case when exists(select 1 from t1 where coalesce((select t1.e from t1 where f<>e and e not between c and 11),d) in (select 11 from t1 union select c from t1)) then t1.b when t1.a between t1.f and 19 then t1.d else t1.c end*f | 19 from t1 where a between t1.c and b or t1.e in ((b),b,t1.f)),17) in (select ( -~case case (((count(*)))) when max(e) then max(e) else cast(avg(c) AS integer) end when min(17) then cast(avg(13) AS integer) else cast(avg(t1.b) AS integer) end+ -count(*)* -count(*))+min(d) from t1 union select count(distinct t1.e) from t1)} +} {} +do_test randexpr-2.2517 { + db eval {SELECT coalesce((select ~11*b from t1 where (not exists(select 1 from t1 where case d when t1.b then t1.b else d end not between b and t1.b and f in (select ~count(*) from t1 union select count(distinct coalesce((select max( -c+(abs(coalesce((select max(t1.a) from t1 where not exists(select 1 from t1 where d>a)),t1.b)-17)/abs((b)))*(a)) from t1 where t1.f>b),f)) from t1) or t1.e not in ((d),c,c)))),c)*17+t1.c FROM t1 WHERE NOT (coalesce((select 17*case when exists(select 1 from t1 where coalesce((select t1.e from t1 where f<>e and e not between c and 11),d) in (select 11 from t1 union select c from t1)) then t1.b when t1.a between t1.f and 19 then t1.d else t1.c end*f | 19 from t1 where a between t1.c and b or t1.e in ((b),b,t1.f)),17) in (select ( -~case case (((count(*)))) when max(e) then max(e) else cast(avg(c) AS integer) end when min(17) then cast(avg(13) AS integer) else cast(avg(t1.b) AS integer) end+ -count(*)* -count(*))+min(d) from t1 union select count(distinct t1.e) from t1))} +} {5400} +do_test randexpr-2.2518 { + db eval {SELECT (abs(case when not +c<>coalesce((select max(19) from t1 where not (abs(c)/abs(a))*a-t1.a>=case when (exists(select 1 from t1 where d=11)) then t1.a*~b else case when b in (select (f) from t1 union select 17 from t1) or not 17 not between f and -b then t1.a when fcoalesce((select max(19) from t1 where not (abs(c)/abs(a))*a-t1.a>=case when (exists(select 1 from t1 where d=11)) then t1.a*~b else case when b in (select (f) from t1 union select 17 from t1) or not 17 not between f and -b then t1.a when f=t1.b and (b not in (c,t1.e,t1.a))} +} {} +do_test randexpr-2.2521 { + db eval {SELECT ~case when case 11*17 when f then t1.a else t1.e end in ( -coalesce((select max(case coalesce((select t1.f from t1 where exists(select 1 from t1 where t1.c in (case t1.e when -d then c else (d) end,t1.e,t1.a))), -t1.c) when 19 then 11 else t1.e end) from t1 where t1.e in (19, -t1.b,a)),17)+f-11,17,19) then a when ((d=t1.b and (b not in (c,t1.e,t1.a)))} +} {499} +do_test randexpr-2.2522 { + db eval {SELECT (abs(case f when case when d+f between e and t1.f then f when coalesce((select max(t1.f) from t1 where c in (select t1.a from t1 union select case when a not between t1.c and 13 then a else t1.c end from t1) or f*f not in (case when (11<=d) then case when 13= -d),b)<=t1.b then t1.e else 13 end then f else t1.b end)/abs(f)) FROM t1 WHERE c between c and coalesce((select max(f) from t1 where b in (select count(*) from t1 union select max(13) from t1)),19)} +} {} +do_test randexpr-2.2523 { + db eval {SELECT (abs(case f when case when d+f between e and t1.f then f when coalesce((select max(t1.f) from t1 where c in (select t1.a from t1 union select case when a not between t1.c and 13 then a else t1.c end from t1) or f*f not in (case when (11<=d) then case when 13= -d),b)<=t1.b then t1.e else 13 end then f else t1.b end)/abs(f)) FROM t1 WHERE NOT (c between c and coalesce((select max(f) from t1 where b in (select count(*) from t1 union select max(13) from t1)),19))} +} {0} +do_test randexpr-2.2524 { + db eval {SELECT d++11*a+coalesce((select max(t1.a) from t1 where 17+case when (19>=a) then t1.b when f>=f then -(select cast(avg(t1.e-t1.f*t1.a) AS integer) from t1)-f else t1.f end*a+f*t1.a not in (t1.e,e,13)),17)- -t1.b-a-c*19 FROM t1 WHERE f*t1.a*t1.b>17 and case when 13 not in (19, -(select count(*) from t1),coalesce((select max(coalesce((select max(t1.a) from t1 where -coalesce((select max(case when not t1.f<=(t1.d) then t1.a else (a) end) from t1 where t1.f>c or 13<(t1.a)),17)>=t1.a or not exists(select 1 from t1 where - -c not in (a,t1.b,t1.e))),t1.e)) from t1 where (f not in (b,f,f))),c)) then (t1.e) when t1.a<>e then e else b end- -t1.f not in (a, -c,(c))} +} {-4000} +do_test randexpr-2.2525 { + db eval {SELECT d++11*a+coalesce((select max(t1.a) from t1 where 17+case when (19>=a) then t1.b when f>=f then -(select cast(avg(t1.e-t1.f*t1.a) AS integer) from t1)-f else t1.f end*a+f*t1.a not in (t1.e,e,13)),17)- -t1.b-a-c*19 FROM t1 WHERE NOT (f*t1.a*t1.b>17 and case when 13 not in (19, -(select count(*) from t1),coalesce((select max(coalesce((select max(t1.a) from t1 where -coalesce((select max(case when not t1.f<=(t1.d) then t1.a else (a) end) from t1 where t1.f>c or 13<(t1.a)),17)>=t1.a or not exists(select 1 from t1 where - -c not in (a,t1.b,t1.e))),t1.e)) from t1 where (f not in (b,f,f))),c)) then (t1.e) when t1.a<>e then e else b end- -t1.f not in (a, -c,(c)))} +} {} +do_test randexpr-2.2526 { + db eval {SELECT coalesce((select (a) from t1 where 11-11+t1.b<=17-b),case when case when not t1.e*f>= -t1.f and a in (11,t1.c,t1.b) or 19>e then (select case min((f)-f) when cast(avg(t1.a) AS integer) then (cast(avg(17) AS integer)) else ((cast(avg((13)) AS integer))) end from t1) when 13 not between f and t1.c then (abs(e)/abs(17)) else t1.e end | -17<=f then b when f<19 or t1.b<=t1.d then f else f end) FROM t1 WHERE -f | t1.f*c-t1.a | t1.c+11*case ~17 when -coalesce((select max(t1.e) from t1 where 13*(17)>=11),t1.a) then case case when (t1.b>= -t1.b) then t1.f when t1.fa and t1.e in (e,(t1.d),19) then t1.f else t1.c end when a then t1.a else t1.e end else a end*t1.a-13= -t1.f and a in (11,t1.c,t1.b) or 19>e then (select case min((f)-f) when cast(avg(t1.a) AS integer) then (cast(avg(17) AS integer)) else ((cast(avg((13)) AS integer))) end from t1) when 13 not between f and t1.c then (abs(e)/abs(17)) else t1.e end | -17<=f then b when f<19 or t1.b<=t1.d then f else f end) FROM t1 WHERE NOT ( -f | t1.f*c-t1.a | t1.c+11*case ~17 when -coalesce((select max(t1.e) from t1 where 13*(17)>=11),t1.a) then case case when (t1.b>= -t1.b) then t1.f when t1.fa and t1.e in (e,(t1.d),19) then t1.f else t1.c end when a then t1.a else t1.e end else a end*t1.a-13= -t1.f and a in (11,t1.c,t1.b) or 19>e then (select case min((f)-f) when cast(avg(t1.a) AS integer) then (cast(avg(17) AS integer)) else ((cast(avg((13)) AS integer))) end from t1) when 13 not between f and t1.c then (abs(e)/abs(17)) else t1.e end & -17<=f then b when f<19 or t1.b<=t1.d then f else f end) FROM t1 WHERE -f | t1.f*c-t1.a | t1.c+11*case ~17 when -coalesce((select max(t1.e) from t1 where 13*(17)>=11),t1.a) then case case when (t1.b>= -t1.b) then t1.f when t1.fa and t1.e in (e,(t1.d),19) then t1.f else t1.c end when a then t1.a else t1.e end else a end*t1.a-13a then 11 else e end*11 and t1.e) then t1.e when -17<11 then 17 else t1.a end)/abs(a)) FROM t1 WHERE c<=19} +} {} +do_test randexpr-2.2530 { + db eval {SELECT (abs(case when exists(select 1 from t1 where b not between ~case when (t1.e-(c-t1.c) | t1.a) in (select abs(~((min(t1.f) | cast(avg(c) AS integer))*(min(b))-count(distinct 19)*min(e)) | -((count(*))))-cast(avg(d) AS integer) from t1 union select -count(distinct t1.a) from t1) or f>a then 11 else e end*11 and t1.e) then t1.e when -17<11 then 17 else t1.a end)/abs(a)) FROM t1 WHERE NOT (c<=19)} +} {0} +do_test randexpr-2.2531 { + db eval {SELECT (abs(case when exists(select 1 from t1 where b not between ~case when (t1.e-(c-t1.c) & t1.a) in (select abs(~((min(t1.f) & cast(avg(c) AS integer))*(min(b))-count(distinct 19)*min(e)) & -((count(*))))-cast(avg(d) AS integer) from t1 union select -count(distinct t1.a) from t1) or f>a then 11 else e end*11 and t1.e) then t1.e when -17<11 then 17 else t1.a end)/abs(a)) FROM t1 WHERE NOT (c<=19)} +} {0} +do_test randexpr-2.2532 { + db eval {SELECT case when t1.e-19 in (case (abs(t1.a*+~c*coalesce((select c from t1 where ~ -(select abs(case cast(avg(13) AS integer) when - -cast(avg(f) AS integer) then count(distinct t1.d) else (count(*)) end+count(*)) from t1) not between (abs(a | t1.a)/abs(t1.c)) and 19),t1.b)-c*17*(t1.f))/abs(d)) when 13 then 19 else t1.b end,(b),17) then 19 when (11)>=t1.b then b else b end*11 FROM t1 WHERE (case 11 when case t1.e when coalesce((select a from t1 where t1.a=t1.a),t1.f),d,13) then c when 11 between t1.b and 17 then d else 19 end) then f else 19 end then t1.d else c end< -c) and a not in (11,c,11) or 11<>e or exists(select 1 from t1 where b<17)} +} {2200} +do_test randexpr-2.2533 { + db eval {SELECT case when t1.e-19 in (case (abs(t1.a*+~c*coalesce((select c from t1 where ~ -(select abs(case cast(avg(13) AS integer) when - -cast(avg(f) AS integer) then count(distinct t1.d) else (count(*)) end+count(*)) from t1) not between (abs(a | t1.a)/abs(t1.c)) and 19),t1.b)-c*17*(t1.f))/abs(d)) when 13 then 19 else t1.b end,(b),17) then 19 when (11)>=t1.b then b else b end*11 FROM t1 WHERE NOT ((case 11 when case t1.e when coalesce((select a from t1 where t1.a=t1.a),t1.f),d,13) then c when 11 between t1.b and 17 then d else 19 end) then f else 19 end then t1.d else c end< -c) and a not in (11,c,11) or 11<>e or exists(select 1 from t1 where b<17))} +} {} +do_test randexpr-2.2534 { + db eval {SELECT case when t1.e-19 in (case (abs(t1.a*+~c*coalesce((select c from t1 where ~ -(select abs(case cast(avg(13) AS integer) when - -cast(avg(f) AS integer) then count(distinct t1.d) else (count(*)) end+count(*)) from t1) not between (abs(a & t1.a)/abs(t1.c)) and 19),t1.b)-c*17*(t1.f))/abs(d)) when 13 then 19 else t1.b end,(b),17) then 19 when (11)>=t1.b then b else b end*11 FROM t1 WHERE (case 11 when case t1.e when coalesce((select a from t1 where t1.a=t1.a),t1.f),d,13) then c when 11 between t1.b and 17 then d else 19 end) then f else 19 end then t1.d else c end< -c) and a not in (11,c,11) or 11<>e or exists(select 1 from t1 where b<17)} +} {2200} +do_test randexpr-2.2535 { + db eval {SELECT t1.b*case when (abs(t1.c+case when d not in (a,case when b=(t1.a) then (e) when b not between f and t1.c then t1.b else t1.e end, -11) then t1.d when not exists(select 1 from t1 where 19 not between (a) and a or t1.c>d) then d else b end+t1.a+13-19+d)/abs(t1.d))> -t1.f then t1.a when t1.d in (select d from t1 union select c from t1) or 13 not between d and t1.f then t1.b else e end+ - -a FROM t1 WHERE t1.f<=11} +} {} +do_test randexpr-2.2536 { + db eval {SELECT t1.b*case when (abs(t1.c+case when d not in (a,case when b=(t1.a) then (e) when b not between f and t1.c then t1.b else t1.e end, -11) then t1.d when not exists(select 1 from t1 where 19 not between (a) and a or t1.c>d) then d else b end+t1.a+13-19+d)/abs(t1.d))> -t1.f then t1.a when t1.d in (select d from t1 union select c from t1) or 13 not between d and t1.f then t1.b else e end+ - -a FROM t1 WHERE NOT (t1.f<=11)} +} {20100} +do_test randexpr-2.2537 { + db eval {SELECT coalesce((select max(coalesce((select e+t1.c from t1 where f*11+coalesce((select max(17) from t1 where not ~case a when t1.f+13*e-(11)*t1.f then d else a end>d and (a not between t1.c and t1.c)),t1.d)- -f-b+11=b),19)-t1.f) from t1 where exists(select 1 from t1 where 19<>11)),c) FROM t1 WHERE case when 11 in (select -abs(abs( - -(cast(avg(+11-c) AS integer)+cast(avg(~a) AS integer))*max(11))+cast(avg(d) AS integer)-cast(avg(e) AS integer)-((max(t1.e))))+count(*) from t1 union select ( -count(*)) from t1) then t1.a when e>d then c- - -t1.a else 19 end in (select count(distinct -f) from t1 union select count(distinct d) from t1) and not e<17} +} {} +do_test randexpr-2.2538 { + db eval {SELECT coalesce((select max(coalesce((select e+t1.c from t1 where f*11+coalesce((select max(17) from t1 where not ~case a when t1.f+13*e-(11)*t1.f then d else a end>d and (a not between t1.c and t1.c)),t1.d)- -f-b+11=b),19)-t1.f) from t1 where exists(select 1 from t1 where 19<>11)),c) FROM t1 WHERE NOT (case when 11 in (select -abs(abs( - -(cast(avg(+11-c) AS integer)+cast(avg(~a) AS integer))*max(11))+cast(avg(d) AS integer)-cast(avg(e) AS integer)-((max(t1.e))))+count(*) from t1 union select ( -count(*)) from t1) then t1.a when e>d then c- - -t1.a else 19 end in (select count(distinct -f) from t1 union select count(distinct d) from t1) and not e<17)} +} {-581} +do_test randexpr-2.2539 { + db eval {SELECT case when (coalesce((select max(a*case when 17+f>f-t1.d*t1.f then t1.d when t1.c not between t1.e and d then 19 else (c) end) from t1 where not t1.d not between - -17 and 19),t1.a) in (select min((13)) from t1 union select ~(abs(count(distinct t1.a))*((( -max( -t1.f)))+max( -t1.c) | count(*)))-cast(avg(11) AS integer) from t1)) then c- - -d when t1.d>=d then d else 13 end FROM t1 WHERE ((abs(t1.a-t1.a*t1.d*coalesce((select coalesce((select max(11) from t1 where exists(select 1 from t1 where 11+t1.d | -t1.c-case when b+d in (select count(*) from t1 union select max(17)+ -count(distinct t1.d) from t1) then t1.c else d end-a | 13<>c)),t1.c) from t1 where 13 not in (d,t1.a,f) and 19=t1.f),(13))+(t1.d))/abs(t1.c)) between t1.f and t1.d)} +} {} +do_test randexpr-2.2540 { + db eval {SELECT case when (coalesce((select max(a*case when 17+f>f-t1.d*t1.f then t1.d when t1.c not between t1.e and d then 19 else (c) end) from t1 where not t1.d not between - -17 and 19),t1.a) in (select min((13)) from t1 union select ~(abs(count(distinct t1.a))*((( -max( -t1.f)))+max( -t1.c) | count(*)))-cast(avg(11) AS integer) from t1)) then c- - -d when t1.d>=d then d else 13 end FROM t1 WHERE NOT (((abs(t1.a-t1.a*t1.d*coalesce((select coalesce((select max(11) from t1 where exists(select 1 from t1 where 11+t1.d | -t1.c-case when b+d in (select count(*) from t1 union select max(17)+ -count(distinct t1.d) from t1) then t1.c else d end-a | 13<>c)),t1.c) from t1 where 13 not in (d,t1.a,f) and 19=t1.f),(13))+(t1.d))/abs(t1.c)) between t1.f and t1.d))} +} {400} +do_test randexpr-2.2541 { + db eval {SELECT case when (coalesce((select max(a*case when 17+f>f-t1.d*t1.f then t1.d when t1.c not between t1.e and d then 19 else (c) end) from t1 where not t1.d not between - -17 and 19),t1.a) in (select min((13)) from t1 union select ~(abs(count(distinct t1.a))*((( -max( -t1.f)))+max( -t1.c) & count(*)))-cast(avg(11) AS integer) from t1)) then c- - -d when t1.d>=d then d else 13 end FROM t1 WHERE NOT (((abs(t1.a-t1.a*t1.d*coalesce((select coalesce((select max(11) from t1 where exists(select 1 from t1 where 11+t1.d | -t1.c-case when b+d in (select count(*) from t1 union select max(17)+ -count(distinct t1.d) from t1) then t1.c else d end-a | 13<>c)),t1.c) from t1 where 13 not in (d,t1.a,f) and 19=t1.f),(13))+(t1.d))/abs(t1.c)) between t1.f and t1.d))} +} {400} +do_test randexpr-2.2542 { + db eval {SELECT case when f | c>t1.d-case when (abs(t1.b-t1.d*f)/abs(c))=t1.e then 17 when not exists(select 1 from t1 where t1.b=(11) or c not in (13,17,b)) or aa then t1.c when not 11<(f) then case when t1.c= -b then c else 11 end else e end | f>=t1.b and b not between -(t1.e) and f) then 17 when f in (select -count(distinct t1.e) from t1 union select case min(f) when case cast(avg( -t1.b) AS integer) when cast(avg(e) AS integer) then -max(e) else min(t1.b) end then count(*) else (cast(avg(t1.e) AS integer)) end from t1) then case c when e then t1.f else 13 end else t1.d end} +} {} +do_test randexpr-2.2543 { + db eval {SELECT case when f | c>t1.d-case when (abs(t1.b-t1.d*f)/abs(c))=t1.e then 17 when not exists(select 1 from t1 where t1.b=(11) or c not in (13,17,b)) or aa then t1.c when not 11<(f) then case when t1.c= -b then c else 11 end else e end | f>=t1.b and b not between -(t1.e) and f) then 17 when f in (select -count(distinct t1.e) from t1 union select case min(f) when case cast(avg( -t1.b) AS integer) when cast(avg(e) AS integer) then -max(e) else min(t1.b) end then count(*) else (cast(avg(t1.e) AS integer)) end from t1) then case c when e then t1.f else 13 end else t1.d end)} +} {11} +do_test randexpr-2.2544 { + db eval {SELECT case when f & c>t1.d-case when (abs(t1.b-t1.d*f)/abs(c))=t1.e then 17 when not exists(select 1 from t1 where t1.b=(11) or c not in (13,17,b)) or aa then t1.c when not 11<(f) then case when t1.c= -b then c else 11 end else e end | f>=t1.b and b not between -(t1.e) and f) then 17 when f in (select -count(distinct t1.e) from t1 union select case min(f) when case cast(avg( -t1.b) AS integer) when cast(avg(e) AS integer) then -max(e) else min(t1.b) end then count(*) else (cast(avg(t1.e) AS integer)) end from t1) then case c when e then t1.f else 13 end else t1.d end)} +} {11} +do_test randexpr-2.2545 { + db eval {SELECT 17+coalesce((select max(19) from t1 where 11 in (select (abs((abs(~(case +b+17 when t1.e*t1.b then 17 else case when t1.b+17+d*a in (select (~count(distinct a)) from t1 union select case cast(avg(t1.e) AS integer) when count(*) then count(distinct t1.d) else count(distinct b) end from t1) then t1.f when (t1.e in ( -t1.a,t1.e,d)) then 17 else t1.e end | e end)- -b)/abs(t1.f)))/abs(13)) from t1 union select t1.d from t1)),17) FROM t1 WHERE e+t1.b not in (d,a,t1.c) and case case e when (case when +case 13 when t1.a then 19 else 19 end+d in (select count(*)-min(b) from t1 union select - -min(t1.d) from t1) or f in (select ( -min(t1.e)) from t1 union select cast(avg( -19) AS integer) from t1) or (t1.f in (select -max( -t1.c) from t1 union select (min(t1.c)) from t1)) then b else +t1.b end)-t1.b then b else 19 end when a then b else e end between ( -(t1.c)) and a} +} {} +do_test randexpr-2.2546 { + db eval {SELECT 17+coalesce((select max(19) from t1 where 11 in (select (abs((abs(~(case +b+17 when t1.e*t1.b then 17 else case when t1.b+17+d*a in (select (~count(distinct a)) from t1 union select case cast(avg(t1.e) AS integer) when count(*) then count(distinct t1.d) else count(distinct b) end from t1) then t1.f when (t1.e in ( -t1.a,t1.e,d)) then 17 else t1.e end | e end)- -b)/abs(t1.f)))/abs(13)) from t1 union select t1.d from t1)),17) FROM t1 WHERE NOT (e+t1.b not in (d,a,t1.c) and case case e when (case when +case 13 when t1.a then 19 else 19 end+d in (select count(*)-min(b) from t1 union select - -min(t1.d) from t1) or f in (select ( -min(t1.e)) from t1 union select cast(avg( -19) AS integer) from t1) or (t1.f in (select -max( -t1.c) from t1 union select (min(t1.c)) from t1)) then b else +t1.b end)-t1.b then b else 19 end when a then b else e end between ( -(t1.c)) and a)} +} {34} +do_test randexpr-2.2547 { + db eval {SELECT 17+coalesce((select max(19) from t1 where 11 in (select (abs((abs(~(case +b+17 when t1.e*t1.b then 17 else case when t1.b+17+d*a in (select (~count(distinct a)) from t1 union select case cast(avg(t1.e) AS integer) when count(*) then count(distinct t1.d) else count(distinct b) end from t1) then t1.f when (t1.e in ( -t1.a,t1.e,d)) then 17 else t1.e end & e end)- -b)/abs(t1.f)))/abs(13)) from t1 union select t1.d from t1)),17) FROM t1 WHERE NOT (e+t1.b not in (d,a,t1.c) and case case e when (case when +case 13 when t1.a then 19 else 19 end+d in (select count(*)-min(b) from t1 union select - -min(t1.d) from t1) or f in (select ( -min(t1.e)) from t1 union select cast(avg( -19) AS integer) from t1) or (t1.f in (select -max( -t1.c) from t1 union select (min(t1.c)) from t1)) then b else +t1.b end)-t1.b then b else 19 end when a then b else e end between ( -(t1.c)) and a)} +} {34} +do_test randexpr-2.2548 { + db eval {SELECT (abs(~(abs((select ~count(*) | +(max( -coalesce((select max(13) from t1 where coalesce((select max(t1.d) from t1 where 11<+coalesce((select max((case when 19=t1.a then (d) else 19 end)) from t1 where (11) in (17,c,b)),c)*t1.e),t1.c)<13),11)*b | t1.c+17)) from t1))/abs(~b-f))-t1.d*a)/abs(e)) FROM t1 WHERE c not in (coalesce((select t1.a from t1 where -(t1.a+17*~case when coalesce((select max(t1.c) from t1 where (coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where a<=t1.a)),11) in (select (count(distinct t1.d)+count(distinct c)) from t1 union select count(distinct e) from t1))),13)*f=d then d when t1.d in (select t1.d from t1 union select t1.f from t1) then d else t1.f end+c*c | f)+t1.c*t1.a in (select 17 from t1 union select 13 from t1)),t1.f),17,t1.f)} +} {80} +do_test randexpr-2.2549 { + db eval {SELECT (abs(~(abs((select ~count(*) | +(max( -coalesce((select max(13) from t1 where coalesce((select max(t1.d) from t1 where 11<+coalesce((select max((case when 19=t1.a then (d) else 19 end)) from t1 where (11) in (17,c,b)),c)*t1.e),t1.c)<13),11)*b | t1.c+17)) from t1))/abs(~b-f))-t1.d*a)/abs(e)) FROM t1 WHERE NOT (c not in (coalesce((select t1.a from t1 where -(t1.a+17*~case when coalesce((select max(t1.c) from t1 where (coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where a<=t1.a)),11) in (select (count(distinct t1.d)+count(distinct c)) from t1 union select count(distinct e) from t1))),13)*f=d then d when t1.d in (select t1.d from t1 union select t1.f from t1) then d else t1.f end+c*c | f)+t1.c*t1.a in (select 17 from t1 union select 13 from t1)),t1.f),17,t1.f))} +} {} +do_test randexpr-2.2550 { + db eval {SELECT (abs(~(abs((select ~count(*) & +(max( -coalesce((select max(13) from t1 where coalesce((select max(t1.d) from t1 where 11<+coalesce((select max((case when 19=t1.a then (d) else 19 end)) from t1 where (11) in (17,c,b)),c)*t1.e),t1.c)<13),11)*b & t1.c+17)) from t1))/abs(~b-f))-t1.d*a)/abs(e)) FROM t1 WHERE c not in (coalesce((select t1.a from t1 where -(t1.a+17*~case when coalesce((select max(t1.c) from t1 where (coalesce((select max(t1.f) from t1 where not exists(select 1 from t1 where a<=t1.a)),11) in (select (count(distinct t1.d)+count(distinct c)) from t1 union select count(distinct e) from t1))),13)*f=d then d when t1.d in (select t1.d from t1 union select t1.f from t1) then d else t1.f end+c*c | f)+t1.c*t1.a in (select 17 from t1 union select 13 from t1)),t1.f),17,t1.f)} +} {80} +do_test randexpr-2.2551 { + db eval {SELECT case (19) when t1.a then t1.f+coalesce((select max(t1.f) from t1 where case when t1.c=t1.a or (t1.d | case when t1.f=t1.d then a else coalesce((select max(case when 11>=a then t1.b else d end+t1.a) from t1 where -t1.a not between t1.e and 19),11) end<>t1.d) then t1.e else c end-t1.f*b+f=19),t1.b) | (t1.a)+e else a end FROM t1 WHERE (case e when d then +(select (min(t1.a)) from t1) else case when a in ((select min(case -t1.c when d then d else f end+ -e)-+cast(avg(19) AS integer)+max(d) from t1)*t1.d | -t1.c,b,e) then t1.d when t1.b in (select +min(e) | min(t1.b) from t1 union select -count(distinct e) from t1) then d else 11 end*13-(e) end not in (t1.c,c,17))} +} {100} +do_test randexpr-2.2552 { + db eval {SELECT case (19) when t1.a then t1.f+coalesce((select max(t1.f) from t1 where case when t1.c=t1.a or (t1.d | case when t1.f=t1.d then a else coalesce((select max(case when 11>=a then t1.b else d end+t1.a) from t1 where -t1.a not between t1.e and 19),11) end<>t1.d) then t1.e else c end-t1.f*b+f=19),t1.b) | (t1.a)+e else a end FROM t1 WHERE NOT ((case e when d then +(select (min(t1.a)) from t1) else case when a in ((select min(case -t1.c when d then d else f end+ -e)-+cast(avg(19) AS integer)+max(d) from t1)*t1.d | -t1.c,b,e) then t1.d when t1.b in (select +min(e) | min(t1.b) from t1 union select -count(distinct e) from t1) then d else 11 end*13-(e) end not in (t1.c,c,17)))} +} {} +do_test randexpr-2.2553 { + db eval {SELECT case (19) when t1.a then t1.f+coalesce((select max(t1.f) from t1 where case when t1.c=t1.a or (t1.d & case when t1.f=t1.d then a else coalesce((select max(case when 11>=a then t1.b else d end+t1.a) from t1 where -t1.a not between t1.e and 19),11) end<>t1.d) then t1.e else c end-t1.f*b+f=19),t1.b) & (t1.a)+e else a end FROM t1 WHERE (case e when d then +(select (min(t1.a)) from t1) else case when a in ((select min(case -t1.c when d then d else f end+ -e)-+cast(avg(19) AS integer)+max(d) from t1)*t1.d | -t1.c,b,e) then t1.d when t1.b in (select +min(e) | min(t1.b) from t1 union select -count(distinct e) from t1) then d else 11 end*13-(e) end not in (t1.c,c,17))} +} {100} +do_test randexpr-2.2554 { + db eval {SELECT (abs(( -case when 11*coalesce((select coalesce((select d from t1 where ((+t1.f*(select max(t1.f)*min(t1.a) from t1)*d*t1.f between 11 and 13))), -t1.c) from t1 where ((e)>=t1.b) or not t1.a in (select f from t1 union select c from t1) and 11<=a or c between (t1.c) and t1.d),t1.f)t1.f} +} {} +do_test randexpr-2.2555 { + db eval {SELECT (abs(( -case when 11*coalesce((select coalesce((select d from t1 where ((+t1.f*(select max(t1.f)*min(t1.a) from t1)*d*t1.f between 11 and 13))), -t1.c) from t1 where ((e)>=t1.b) or not t1.a in (select f from t1 union select c from t1) and 11<=a or c between (t1.c) and t1.d),t1.f)t1.f)} +} {53} +do_test randexpr-2.2556 { + db eval {SELECT case (t1.b) | t1.d-+t1.e*(select (~count(*) | min(c)-case max(case when e | b in (select t1.b from t1 union select t1.b from t1) then d when 17 between 13 and t1.d then d else t1.c end) when abs(count(distinct (t1.e)))- -min(t1.e) then cast(avg(17) AS integer) else count(distinct c) end) from t1) | b-a-+f-t1.f when +a then t1.d else d end FROM t1 WHERE (17>t1.d) and coalesce((select max((t1.a)+coalesce((select max(17*b) from t1 where t1.d-coalesce((select t1.e from t1 where t1.b<>13 or 19=t1.a),t1.d)*f<=(19) or (t1.c<11 and not not dt1.d or d=11} +} {} +do_test randexpr-2.2557 { + db eval {SELECT case (t1.b) | t1.d-+t1.e*(select (~count(*) | min(c)-case max(case when e | b in (select t1.b from t1 union select t1.b from t1) then d when 17 between 13 and t1.d then d else t1.c end) when abs(count(distinct (t1.e)))- -min(t1.e) then cast(avg(17) AS integer) else count(distinct c) end) from t1) | b-a-+f-t1.f when +a then t1.d else d end FROM t1 WHERE NOT ((17>t1.d) and coalesce((select max((t1.a)+coalesce((select max(17*b) from t1 where t1.d-coalesce((select t1.e from t1 where t1.b<>13 or 19=t1.a),t1.d)*f<=(19) or (t1.c<11 and not not dt1.d or d=11)} +} {400} +do_test randexpr-2.2558 { + db eval {SELECT case (t1.b) & t1.d-+t1.e*(select (~count(*) & min(c)-case max(case when e & b in (select t1.b from t1 union select t1.b from t1) then d when 17 between 13 and t1.d then d else t1.c end) when abs(count(distinct (t1.e)))- -min(t1.e) then cast(avg(17) AS integer) else count(distinct c) end) from t1) & b-a-+f-t1.f when +a then t1.d else d end FROM t1 WHERE NOT ((17>t1.d) and coalesce((select max((t1.a)+coalesce((select max(17*b) from t1 where t1.d-coalesce((select t1.e from t1 where t1.b<>13 or 19=t1.a),t1.d)*f<=(19) or (t1.c<11 and not not dt1.d or d=11)} +} {400} +do_test randexpr-2.2559 { + db eval {SELECT case when case when coalesce((select max(c*case (select min(case when f in (select t1.e from t1 union select f+case when d<=d then t1.f when b<=11 then t1.c else t1.b end*11 from t1) then f else c end) from t1) when f then 11 else b end) from t1 where d in (t1.b,t1.a,c)),d) between (d) and 19 then -a else a end in ( -c,t1.d,t1.d) then c when e<> -d then a else t1.c end FROM t1 WHERE a=c or case when e=(e)),t1.a) then c | t1.f when exists(select 1 from t1 where a not between 19 and e) then t1.a else 17 end>t1.d} +} {} +do_test randexpr-2.2560 { + db eval {SELECT case when case when coalesce((select max(c*case (select min(case when f in (select t1.e from t1 union select f+case when d<=d then t1.f when b<=11 then t1.c else t1.b end*11 from t1) then f else c end) from t1) when f then 11 else b end) from t1 where d in (t1.b,t1.a,c)),d) between (d) and 19 then -a else a end in ( -c,t1.d,t1.d) then c when e<> -d then a else t1.c end FROM t1 WHERE NOT (a=c or case when e=(e)),t1.a) then c | t1.f when exists(select 1 from t1 where a not between 19 and e) then t1.a else 17 end>t1.d)} +} {100} +do_test randexpr-2.2561 { + db eval {SELECT 13+(select -abs(+cast(avg((select cast(avg(t1.e*(t1.c)- -(abs(b+e)/abs(f))) AS integer) from t1)) AS integer)-~ -+abs(abs(count(*))) | min(t1.e | t1.e-t1.b)*max(e)-max(t1.f) | min(c)*min(t1.b)+min(t1.e)-count(distinct 11) | min(b)*( - -count(distinct 19)))-count(*) from t1) FROM t1 WHERE d in (select count(*) from t1 union select case count(*) when max(17) | -max(coalesce((select case t1.d*t1.e when t1.a then 11 else 13 end*f- -f from t1 where b in (select case ~abs(max(13)) when case ((max(c))) when cast(avg(t1.c) AS integer) then max(13) else count(distinct t1.d) end then cast(avg(t1.a) AS integer) else cast(avg(11) AS integer) end from t1 union select -max(13) from t1)),13))*cast(avg(f) AS integer) then count(*) else count(distinct e) end from t1) and exists(select 1 from t1 where case d when -(abs(17)/abs(t1.f)) then t1.a else t1.d end=19} +} {17017} +do_test randexpr-2.2565 { + db eval {SELECT c+c*(abs(t1.c+coalesce((select case when 17 not in (t1.f,t1.e,t1.d) or (17=19)} +} {} +do_test randexpr-2.2566 { + db eval {SELECT +case when not c in (select count(distinct t1.b) from t1 union select -+(cast(avg(b*t1.b) AS integer))*max(case 17 when b then 13 else t1.a end)-count(distinct a) | cast(avg(t1.f) AS integer)+max(t1.d) from t1) then t1.e when a<=case t1.a when (e) then t1.d else d end then d else t1.b end*11-a | 17 | t1.a-f FROM t1 WHERE (c not in (t1.b,(select case case ++(min(19))*(count(distinct t1.a))+count(distinct e) | cast(avg(t1.e) AS integer) when min( -t1.d) then cast(avg(13) AS integer) else min(17) end*cast(avg(a) AS integer) when count(distinct -b) then cast(avg(11) AS integer) else - -(cast(avg(c) AS integer)) end from t1)-11+t1.e,t1.a) or d*case d when a then t1.c else 17 end*19 not between b and t1.c or -17=b or f in (select a from t1 union select -b from t1))} +} {-227} +do_test randexpr-2.2567 { + db eval {SELECT +case when not c in (select count(distinct t1.b) from t1 union select -+(cast(avg(b*t1.b) AS integer))*max(case 17 when b then 13 else t1.a end)-count(distinct a) | cast(avg(t1.f) AS integer)+max(t1.d) from t1) then t1.e when a<=case t1.a when (e) then t1.d else d end then d else t1.b end*11-a | 17 | t1.a-f FROM t1 WHERE NOT ((c not in (t1.b,(select case case ++(min(19))*(count(distinct t1.a))+count(distinct e) | cast(avg(t1.e) AS integer) when min( -t1.d) then cast(avg(13) AS integer) else min(17) end*cast(avg(a) AS integer) when count(distinct -b) then cast(avg(11) AS integer) else - -(cast(avg(c) AS integer)) end from t1)-11+t1.e,t1.a) or d*case d when a then t1.c else 17 end*19 not between b and t1.c or -17=b or f in (select a from t1 union select -b from t1)))} +} {} +do_test randexpr-2.2568 { + db eval {SELECT +case when not c in (select count(distinct t1.b) from t1 union select -+(cast(avg(b*t1.b) AS integer))*max(case 17 when b then 13 else t1.a end)-count(distinct a) & cast(avg(t1.f) AS integer)+max(t1.d) from t1) then t1.e when a<=case t1.a when (e) then t1.d else d end then d else t1.b end*11-a & 17 & t1.a-f FROM t1 WHERE (c not in (t1.b,(select case case ++(min(19))*(count(distinct t1.a))+count(distinct e) | cast(avg(t1.e) AS integer) when min( -t1.d) then cast(avg(13) AS integer) else min(17) end*cast(avg(a) AS integer) when count(distinct -b) then cast(avg(11) AS integer) else - -(cast(avg(c) AS integer)) end from t1)-11+t1.e,t1.a) or d*case d when a then t1.c else 17 end*19 not between b and t1.c or -17=b or f in (select a from t1 union select -b from t1))} +} {0} +do_test randexpr-2.2569 { + db eval {SELECT coalesce((select (abs(f)/abs(coalesce((select max(19*b+d*coalesce((select max(t1.c) from t1 where case when d>=(abs((11))/abs(t1.a*13+(13)-d*e+11)) then t1.c when t1.c>=a then 19 else -17 end*t1.e> -e),t1.f)) from t1 where 13<=t1.e),t1.d))) from t1 where t1.d in (select d from t1 union select a from t1)),t1.e) FROM t1 WHERE t1.c between (11) and coalesce((select max(b*case t1.c when t1.f then t1.f else 11 end) from t1 where +(select count(distinct case when f not between -13 and 19 then t1.f++(f) | t1.e*(+13)* -(t1.c)-t1.b when 13=t1.d then 19 else t1.b end*t1.a) from t1) in (t1.e,11, -19)),d)+f} +} {0} +do_test randexpr-2.2570 { + db eval {SELECT coalesce((select (abs(f)/abs(coalesce((select max(19*b+d*coalesce((select max(t1.c) from t1 where case when d>=(abs((11))/abs(t1.a*13+(13)-d*e+11)) then t1.c when t1.c>=a then 19 else -17 end*t1.e> -e),t1.f)) from t1 where 13<=t1.e),t1.d))) from t1 where t1.d in (select d from t1 union select a from t1)),t1.e) FROM t1 WHERE NOT (t1.c between (11) and coalesce((select max(b*case t1.c when t1.f then t1.f else 11 end) from t1 where +(select count(distinct case when f not between -13 and 19 then t1.f++(f) | t1.e*(+13)* -(t1.c)-t1.b when 13=t1.d then 19 else t1.b end*t1.a) from t1) in (t1.e,11, -19)),d)+f)} +} {} +do_test randexpr-2.2571 { + db eval {SELECT case when (not 19>=(abs(t1.b)/abs(+f))) then case 13 when case t1.c when t1.b then f else 13 end then t1.e else t1.a end+coalesce((select max(c) from t1 where a not between t1.d*17*coalesce((select max(e*b) from t1 where t1.a=11),t1.c) and t1.d),t1.d) when (case when t1.d not in (e,t1.a,t1.b) then 19 else t1.f end between t1.c and 19) then t1.e else 11 end FROM t1 WHERE t1.b*+c>=17} +} {11} +do_test randexpr-2.2572 { + db eval {SELECT case when (not 19>=(abs(t1.b)/abs(+f))) then case 13 when case t1.c when t1.b then f else 13 end then t1.e else t1.a end+coalesce((select max(c) from t1 where a not between t1.d*17*coalesce((select max(e*b) from t1 where t1.a=11),t1.c) and t1.d),t1.d) when (case when t1.d not in (e,t1.a,t1.b) then 19 else t1.f end between t1.c and 19) then t1.e else 11 end FROM t1 WHERE NOT (t1.b*+c>=17)} +} {} +do_test randexpr-2.2573 { + db eval {SELECT case when d-t1.c<>e then c when (11 not between 13 and 13) then 13 else -case when c*t1.d>=b*b then -t1.f else b-t1.c | d*(case t1.d when -case t1.b when t1.b then t1.b else 19 end*19*(t1.d) then 13 else 19 end) end+19*11 end+b FROM t1 WHERE ~~case when t1.d+(abs(t1.b+coalesce((select coalesce((select max(b-a-a) from t1 where 17 not in (e,case when (t1.d)<=t1.f or -a not between c and t1.b then c when -19 in ( -d,(t1.a),19) then b else f end,t1.e)), -13) from t1 where not b in (t1.b, -t1.b,c)),e))/abs(11)) between 13 and d then (t1.b) when t1.e<13 then 19 else a end-(19)-d*t1.d not in (d,t1.c,13)} +} {500} +do_test randexpr-2.2574 { + db eval {SELECT case when d-t1.c<>e then c when (11 not between 13 and 13) then 13 else -case when c*t1.d>=b*b then -t1.f else b-t1.c | d*(case t1.d when -case t1.b when t1.b then t1.b else 19 end*19*(t1.d) then 13 else 19 end) end+19*11 end+b FROM t1 WHERE NOT (~~case when t1.d+(abs(t1.b+coalesce((select coalesce((select max(b-a-a) from t1 where 17 not in (e,case when (t1.d)<=t1.f or -a not between c and t1.b then c when -19 in ( -d,(t1.a),19) then b else f end,t1.e)), -13) from t1 where not b in (t1.b, -t1.b,c)),e))/abs(11)) between 13 and d then (t1.b) when t1.e<13 then 19 else a end-(19)-d*t1.d not in (d,t1.c,13))} +} {} +do_test randexpr-2.2575 { + db eval {SELECT case when d-t1.c<>e then c when (11 not between 13 and 13) then 13 else -case when c*t1.d>=b*b then -t1.f else b-t1.c & d*(case t1.d when -case t1.b when t1.b then t1.b else 19 end*19*(t1.d) then 13 else 19 end) end+19*11 end+b FROM t1 WHERE ~~case when t1.d+(abs(t1.b+coalesce((select coalesce((select max(b-a-a) from t1 where 17 not in (e,case when (t1.d)<=t1.f or -a not between c and t1.b then c when -19 in ( -d,(t1.a),19) then b else f end,t1.e)), -13) from t1 where not b in (t1.b, -t1.b,c)),e))/abs(11)) between 13 and d then (t1.b) when t1.e<13 then 19 else a end-(19)-d*t1.d not in (d,t1.c,13)} +} {500} +do_test randexpr-2.2576 { + db eval {SELECT 19+t1.d+d-t1.c*case when 11 not between e and ~c+(select min((abs(t1.b*t1.b)/abs(+t1.c-e))) from t1) then 17 when case when 17*13 not in (t1.b,(abs(~c)/abs( -17-e)),(t1.e)) then 17 else (t1.c) end+c+t1.f not in (t1.f,19,t1.f) then -t1.e else t1.f end FROM t1 WHERE (+13) between +b and t1.e+11} +} {} +do_test randexpr-2.2577 { + db eval {SELECT 19+t1.d+d-t1.c*case when 11 not between e and ~c+(select min((abs(t1.b*t1.b)/abs(+t1.c-e))) from t1) then 17 when case when 17*13 not in (t1.b,(abs(~c)/abs( -17-e)),(t1.e)) then 17 else (t1.c) end+c+t1.f not in (t1.f,19,t1.f) then -t1.e else t1.f end FROM t1 WHERE NOT ((+13) between +b and t1.e+11)} +} {-4281} +do_test randexpr-2.2578 { + db eval {SELECT 17+d*~(abs(coalesce((select 19 | f | e*~+((abs(e)/abs(+19))-t1.c-case 17 when t1.f then f else 19 end-17) from t1 where (e in (select ~(11) from t1 union select case when (f)>a then b when e between -e and t1.b then 17 else t1.f end*t1.a from t1))),t1.b)*e)/abs(17))-t1.a-19 FROM t1 WHERE t1.e not in (d,e,case 19 when - -~case when exists(select 1 from t1 where 17 not in (t1.d,t1.a,coalesce((select max(11-a) from t1 where coalesce((select +c from t1 where exists(select 1 from t1 where case when t1.f not in (19+(f),d,t1.c) then t1.c when (exists(select 1 from t1 where t1.c>19)) then 19 else f end not between a and t1.f)),t1.c)<=b),17))) then t1.d-11 else t1.b end then (d) else t1.f end-(c))} +} {} +do_test randexpr-2.2579 { + db eval {SELECT 17+d*~(abs(coalesce((select 19 | f | e*~+((abs(e)/abs(+19))-t1.c-case 17 when t1.f then f else 19 end-17) from t1 where (e in (select ~(11) from t1 union select case when (f)>a then b when e between -e and t1.b then 17 else t1.f end*t1.a from t1))),t1.b)*e)/abs(17))-t1.a-19 FROM t1 WHERE NOT (t1.e not in (d,e,case 19 when - -~case when exists(select 1 from t1 where 17 not in (t1.d,t1.a,coalesce((select max(11-a) from t1 where coalesce((select +c from t1 where exists(select 1 from t1 where case when t1.f not in (19+(f),d,t1.c) then t1.c when (exists(select 1 from t1 where t1.c>19)) then 19 else f end not between a and t1.f)),t1.c)<=b),17))) then t1.d-11 else t1.b end then (d) else t1.f end-(c)))} +} {-2353302} +do_test randexpr-2.2580 { + db eval {SELECT 17+d*~(abs(coalesce((select 19 & f & e*~+((abs(e)/abs(+19))-t1.c-case 17 when t1.f then f else 19 end-17) from t1 where (e in (select ~(11) from t1 union select case when (f)>a then b when e between -e and t1.b then 17 else t1.f end*t1.a from t1))),t1.b)*e)/abs(17))-t1.a-19 FROM t1 WHERE NOT (t1.e not in (d,e,case 19 when - -~case when exists(select 1 from t1 where 17 not in (t1.d,t1.a,coalesce((select max(11-a) from t1 where coalesce((select +c from t1 where exists(select 1 from t1 where case when t1.f not in (19+(f),d,t1.c) then t1.c when (exists(select 1 from t1 where t1.c>19)) then 19 else f end not between a and t1.f)),t1.c)<=b),17))) then t1.d-11 else t1.b end then (d) else t1.f end-(c)))} +} {-2353302} +do_test randexpr-2.2581 { + db eval {SELECT case a when b-coalesce((select coalesce((select max(c-(abs((19))/abs(coalesce((select t1.c+d from t1 where 17 not in (d,t1.c, -t1.e)),t1.b)-(e)))) from t1 where t1.d in (t1.e,e,t1.f)),(e)) from t1 where t1.b=t1.e or t1.b not between b and e or t1.f not between t1.b and e or c not between -b and a),d) then 13 else 17 end FROM t1 WHERE a+17+(17) in ( -t1.a,13,coalesce((select max(e) from t1 where b+a-13<=b),e)) or e in (select abs( -+~abs(+(min(c))))+count(distinct b) from t1 union select abs(+((max(t1.c)) | min(t1.b))) from t1) and 13 not between f and 19 or not e not in ( -17,13,t1.a)} +} {} +do_test randexpr-2.2582 { + db eval {SELECT case a when b-coalesce((select coalesce((select max(c-(abs((19))/abs(coalesce((select t1.c+d from t1 where 17 not in (d,t1.c, -t1.e)),t1.b)-(e)))) from t1 where t1.d in (t1.e,e,t1.f)),(e)) from t1 where t1.b=t1.e or t1.b not between b and e or t1.f not between t1.b and e or c not between -b and a),d) then 13 else 17 end FROM t1 WHERE NOT (a+17+(17) in ( -t1.a,13,coalesce((select max(e) from t1 where b+a-13<=b),e)) or e in (select abs( -+~abs(+(min(c))))+count(distinct b) from t1 union select abs(+((max(t1.c)) | min(t1.b))) from t1) and 13 not between f and 19 or not e not in ( -17,13,t1.a))} +} {17} +do_test randexpr-2.2583 { + db eval {SELECT case when not t1.e between 17 and a then 11+ -(select ~case count(distinct +13+t1.c | c*13)- -+ -cast(avg(c) AS integer)-min(19)+case -max(c) when -cast(avg(b) AS integer) then max(11) else max(13) end+count(distinct t1.f) when max(( -d)) then cast(avg(t1.c) AS integer) else (min((t1.b))) end | cast(avg(t1.f) AS integer)*count(distinct 17) from t1)+b else +t1.e | -t1.a-a end FROM t1 WHERE (t1.f<=~c+f)} +} {} +do_test randexpr-2.2584 { + db eval {SELECT case when not t1.e between 17 and a then 11+ -(select ~case count(distinct +13+t1.c | c*13)- -+ -cast(avg(c) AS integer)-min(19)+case -max(c) when -cast(avg(b) AS integer) then max(11) else max(13) end+count(distinct t1.f) when max(( -d)) then cast(avg(t1.c) AS integer) else (min((t1.b))) end | cast(avg(t1.f) AS integer)*count(distinct 17) from t1)+b else +t1.e | -t1.a-a end FROM t1 WHERE NOT ((t1.f<=~c+f))} +} {340} +do_test randexpr-2.2585 { + db eval {SELECT case when not t1.e between 17 and a then 11+ -(select ~case count(distinct +13+t1.c & c*13)- -+ -cast(avg(c) AS integer)-min(19)+case -max(c) when -cast(avg(b) AS integer) then max(11) else max(13) end+count(distinct t1.f) when max(( -d)) then cast(avg(t1.c) AS integer) else (min((t1.b))) end & cast(avg(t1.f) AS integer)*count(distinct 17) from t1)+b else +t1.e & -t1.a-a end FROM t1 WHERE NOT ((t1.f<=~c+f))} +} {-317} +do_test randexpr-2.2586 { + db eval {SELECT t1.f-coalesce((select max(case when b<>~t1.f then b+f else coalesce((select t1.c from t1 where case a when 19*t1.e+a then 19 else 13 end between 13 and a),t1.d)+b end) from t1 where (t1.c) in (select -~min(c) | max(t1.d) from t1 union select -cast(avg(t1.a) AS integer) from t1) and t1.c in (t1.a,d,11) or 1113 and not exists(select 1 from t1 where (((t1.d<=t1.b))))),e)*b*t1.f>t1.a or not exists(select 1 from t1 where (13) not in (11,t1.b,t1.c)) then c+d else t1.e end>=t1.f) or (not exists(select 1 from t1 where -11 in (c,t1.d, -a)) or a not between - - -(b) and b) or 13 between t1.b and f} +} {-200} +do_test randexpr-2.2587 { + db eval {SELECT t1.f-coalesce((select max(case when b<>~t1.f then b+f else coalesce((select t1.c from t1 where case a when 19*t1.e+a then 19 else 13 end between 13 and a),t1.d)+b end) from t1 where (t1.c) in (select -~min(c) | max(t1.d) from t1 union select -cast(avg(t1.a) AS integer) from t1) and t1.c in (t1.a,d,11) or 1113 and not exists(select 1 from t1 where (((t1.d<=t1.b))))),e)*b*t1.f>t1.a or not exists(select 1 from t1 where (13) not in (11,t1.b,t1.c)) then c+d else t1.e end>=t1.f) or (not exists(select 1 from t1 where -11 in (c,t1.d, -a)) or a not between - - -(b) and b) or 13 between t1.b and f)} +} {} +do_test randexpr-2.2588 { + db eval {SELECT t1.f-coalesce((select max(case when b<>~t1.f then b+f else coalesce((select t1.c from t1 where case a when 19*t1.e+a then 19 else 13 end between 13 and a),t1.d)+b end) from t1 where (t1.c) in (select -~min(c) & max(t1.d) from t1 union select -cast(avg(t1.a) AS integer) from t1) and t1.c in (t1.a,d,11) or 1113 and not exists(select 1 from t1 where (((t1.d<=t1.b))))),e)*b*t1.f>t1.a or not exists(select 1 from t1 where (13) not in (11,t1.b,t1.c)) then c+d else t1.e end>=t1.f) or (not exists(select 1 from t1 where -11 in (c,t1.d, -a)) or a not between - - -(b) and b) or 13 between t1.b and f} +} {-200} +do_test randexpr-2.2589 { + db eval {SELECT case when ( -c=~t1.b and coalesce((select max(coalesce((select b from t1 where t1.f between case when t1.e between t1.f and 19 then (t1.b) else t1.a end and t1.f),a)) from t1 where 11=t1.f),t1.f)+t1.c=d) and a>t1.c and 19> -19 and exists(select 1 from t1 where (b=17)) and 17 between 19 and 13 then 19*case (d) when c then b else a end when t1.d not in ( -t1.a,f,13) then -t1.f else (d) end FROM t1 WHERE 11*case when d | (abs(+t1.e-t1.e*b+(select case abs(count(*)) when count(*) then count(*) else cast(avg(t1.c) AS integer) end from t1)*17+(d)-13-11)/abs(c))-f- -t1.c-d | t1.a not between 19 and f then t1.a when t1.d in (select 11 from t1 union select d from t1) then e else e end-t1.c between 19 and c} +} {} +do_test randexpr-2.2590 { + db eval {SELECT case when ( -c=~t1.b and coalesce((select max(coalesce((select b from t1 where t1.f between case when t1.e between t1.f and 19 then (t1.b) else t1.a end and t1.f),a)) from t1 where 11=t1.f),t1.f)+t1.c=d) and a>t1.c and 19> -19 and exists(select 1 from t1 where (b=17)) and 17 between 19 and 13 then 19*case (d) when c then b else a end when t1.d not in ( -t1.a,f,13) then -t1.f else (d) end FROM t1 WHERE NOT (11*case when d | (abs(+t1.e-t1.e*b+(select case abs(count(*)) when count(*) then count(*) else cast(avg(t1.c) AS integer) end from t1)*17+(d)-13-11)/abs(c))-f- -t1.c-d | t1.a not between 19 and f then t1.a when t1.d in (select 11 from t1 union select d from t1) then e else e end-t1.c between 19 and c)} +} {-600} +do_test randexpr-2.2591 { + db eval {SELECT +case when t1.a-17 in (select cast(avg(t1.a*t1.d+(abs( -t1.e)/abs(case when t1.e>t1.a or a>t1.d then t1.f when 13<11 then t1.b else c end))*t1.d) AS integer) from t1 union select count(distinct t1.a) from t1) then t1.c when d in (select +count(distinct b)-case case - -cast(avg(d) AS integer) when case max(11) when min(17) then count(*) else (cast(avg(a) AS integer)) end then -count(*) else -cast(avg(17) AS integer) end when max(19) then count(distinct t1.f) else count(distinct -t1.d) end | min(e) from t1 union select count(*) from t1) then t1.d else t1.a end FROM t1 WHERE -+b>=19} +} {} +do_test randexpr-2.2592 { + db eval {SELECT +case when t1.a-17 in (select cast(avg(t1.a*t1.d+(abs( -t1.e)/abs(case when t1.e>t1.a or a>t1.d then t1.f when 13<11 then t1.b else c end))*t1.d) AS integer) from t1 union select count(distinct t1.a) from t1) then t1.c when d in (select +count(distinct b)-case case - -cast(avg(d) AS integer) when case max(11) when min(17) then count(*) else (cast(avg(a) AS integer)) end then -count(*) else -cast(avg(17) AS integer) end when max(19) then count(distinct t1.f) else count(distinct -t1.d) end | min(e) from t1 union select count(*) from t1) then t1.d else t1.a end FROM t1 WHERE NOT ( -+b>=19)} +} {100} +do_test randexpr-2.2593 { + db eval {SELECT +case when t1.a-17 in (select cast(avg(t1.a*t1.d+(abs( -t1.e)/abs(case when t1.e>t1.a or a>t1.d then t1.f when 13<11 then t1.b else c end))*t1.d) AS integer) from t1 union select count(distinct t1.a) from t1) then t1.c when d in (select +count(distinct b)-case case - -cast(avg(d) AS integer) when case max(11) when min(17) then count(*) else (cast(avg(a) AS integer)) end then -count(*) else -cast(avg(17) AS integer) end when max(19) then count(distinct t1.f) else count(distinct -t1.d) end & min(e) from t1 union select count(*) from t1) then t1.d else t1.a end FROM t1 WHERE NOT ( -+b>=19)} +} {100} +do_test randexpr-2.2594 { + db eval {SELECT case ~(t1.f)-t1.b-t1.e*c when coalesce((select case when d*13 | d=case when 19 between b+e+a | t1.f and a+11+t1.b*(t1.f) then d else b end then t1.c when e not between 13 and 13 then 19 else a end from t1 where 13=t1.b),c) then t1.c else b end FROM t1 WHERE not t1.d in (select cast(avg(t1.e) AS integer)-~~min(coalesce((select (abs(11)/abs(11+(select count(*)+min(17)+ -cast(avg(t1.d) AS integer) from t1)*13+coalesce((select max(b) from t1 where (t1.c)<>t1.e or e<(a)),13)-d*t1.c))*t1.b from t1 where 19<=e),c))++abs(count(distinct t1.b))+~max(11)*max(11) from t1 union select +max(11) from t1)} +} {200} +do_test randexpr-2.2595 { + db eval {SELECT case ~(t1.f)-t1.b-t1.e*c when coalesce((select case when d*13 | d=case when 19 between b+e+a | t1.f and a+11+t1.b*(t1.f) then d else b end then t1.c when e not between 13 and 13 then 19 else a end from t1 where 13=t1.b),c) then t1.c else b end FROM t1 WHERE NOT (not t1.d in (select cast(avg(t1.e) AS integer)-~~min(coalesce((select (abs(11)/abs(11+(select count(*)+min(17)+ -cast(avg(t1.d) AS integer) from t1)*13+coalesce((select max(b) from t1 where (t1.c)<>t1.e or e<(a)),13)-d*t1.c))*t1.b from t1 where 19<=e),c))++abs(count(distinct t1.b))+~max(11)*max(11) from t1 union select +max(11) from t1))} +} {} +do_test randexpr-2.2596 { + db eval {SELECT case ~(t1.f)-t1.b-t1.e*c when coalesce((select case when d*13 & d=case when 19 between b+e+a & t1.f and a+11+t1.b*(t1.f) then d else b end then t1.c when e not between 13 and 13 then 19 else a end from t1 where 13=t1.b),c) then t1.c else b end FROM t1 WHERE not t1.d in (select cast(avg(t1.e) AS integer)-~~min(coalesce((select (abs(11)/abs(11+(select count(*)+min(17)+ -cast(avg(t1.d) AS integer) from t1)*13+coalesce((select max(b) from t1 where (t1.c)<>t1.e or e<(a)),13)-d*t1.c))*t1.b from t1 where 19<=e),c))++abs(count(distinct t1.b))+~max(11)*max(11) from t1 union select +max(11) from t1)} +} {200} +do_test randexpr-2.2597 { + db eval {SELECT t1.d+(abs(11)/abs(e*~(select -+ -case min(t1.e) when count(distinct case when (exists(select 1 from t1 where (t1.d not in (19+11,t1.e,t1.c)))) then c*b-t1.a else -c end) then count(*)+case (cast(avg(a) AS integer))-count(distinct f) when -(count(*)) then count(distinct e) else max(t1.b) end- -count(distinct 17)* -min(t1.c) else count(distinct -19) end-count(distinct b) | cast(avg(19) AS integer) from t1)))-~t1.c FROM t1 WHERE c*coalesce((select e from t1 where case when exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select +b | 13 from t1 where ((17<(select (+~min(coalesce((select t1.c from t1 where case e when 13 then a else 11 end not in ( -b,t1.b,e)),(a)))) from t1)+d-19))),17) not between t1.b and t1.a)) then 11*d else t1.a end in (select c from t1 union select t1.e from t1)),t1.f) not between ( -d) and 13} +} {701} +do_test randexpr-2.2598 { + db eval {SELECT t1.d+(abs(11)/abs(e*~(select -+ -case min(t1.e) when count(distinct case when (exists(select 1 from t1 where (t1.d not in (19+11,t1.e,t1.c)))) then c*b-t1.a else -c end) then count(*)+case (cast(avg(a) AS integer))-count(distinct f) when -(count(*)) then count(distinct e) else max(t1.b) end- -count(distinct 17)* -min(t1.c) else count(distinct -19) end-count(distinct b) | cast(avg(19) AS integer) from t1)))-~t1.c FROM t1 WHERE NOT (c*coalesce((select e from t1 where case when exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select +b | 13 from t1 where ((17<(select (+~min(coalesce((select t1.c from t1 where case e when 13 then a else 11 end not in ( -b,t1.b,e)),(a)))) from t1)+d-19))),17) not between t1.b and t1.a)) then 11*d else t1.a end in (select c from t1 union select t1.e from t1)),t1.f) not between ( -d) and 13)} +} {} +do_test randexpr-2.2599 { + db eval {SELECT t1.d+(abs(11)/abs(e*~(select -+ -case min(t1.e) when count(distinct case when (exists(select 1 from t1 where (t1.d not in (19+11,t1.e,t1.c)))) then c*b-t1.a else -c end) then count(*)+case (cast(avg(a) AS integer))-count(distinct f) when -(count(*)) then count(distinct e) else max(t1.b) end- -count(distinct 17)* -min(t1.c) else count(distinct -19) end-count(distinct b) & cast(avg(19) AS integer) from t1)))-~t1.c FROM t1 WHERE c*coalesce((select e from t1 where case when exists(select 1 from t1 where not exists(select 1 from t1 where coalesce((select +b | 13 from t1 where ((17<(select (+~min(coalesce((select t1.c from t1 where case e when 13 then a else 11 end not in ( -b,t1.b,e)),(a)))) from t1)+d-19))),17) not between t1.b and t1.a)) then 11*d else t1.a end in (select c from t1 union select t1.e from t1)),t1.f) not between ( -d) and 13} +} {701} +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/rdonly.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/rdonly.test --- sqlite3-3.4.2/test/rdonly.test 2007-04-24 18:27:52.000000000 +0100 +++ sqlite3-3.6.16/test/rdonly.test 2009-06-05 18:03:40.000000000 +0100 @@ -13,7 +13,7 @@ # This file implements tests to make sure SQLite treats a database # as readonly if its write version is set to high. # -# $Id: rdonly.test,v 1.1 2007/04/24 17:27:52 drh Exp $ +# $Id: rdonly.test,v 1.2 2008/07/08 10:19:58 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -62,4 +62,17 @@ } } {0 {1 2}} +# Now, after connection [db] has loaded the database schema, modify the +# write-version of the file (and the change-counter, so that the +# write-version is reloaded). This way, SQLite does not discover that +# the database is read-only until after it is locked. +# +do_test rdonly-1.6 { + hexio_write test.db 18 02 ; # write-version + hexio_write test.db 24 11223344 ; # change-counter + catchsql { + INSERT INTO t1 VALUES(2); + } +} {1 {attempt to write a readonly database}} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/reindex.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/reindex.test --- sqlite3-3.4.2/test/reindex.test 2007-03-27 15:43:06.000000000 +0100 +++ sqlite3-3.6.16/test/reindex.test 2009-06-05 18:03:40.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. # This file implements tests for the REINDEX command. # -# $Id: reindex.test,v 1.3 2005/01/27 00:22:04 danielk1977 Exp $ +# $Id: reindex.test,v 1.4 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -169,4 +169,3 @@ } {} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/rollback.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/rollback.test --- sqlite3-3.4.2/test/rollback.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/rollback.test 2009-06-26 15:19:23.000000000 +0100 @@ -13,7 +13,7 @@ # caused by an ON CONFLICT ROLLBACK clause aborts any other pending # statements. # -# $Id: rollback.test,v 1.4 2006/01/17 09:35:02 danielk1977 Exp $ +# $Id: rollback.test,v 1.11 2009/06/26 07:12:07 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -60,14 +60,15 @@ # do_test rollback-1.5 { sqlite3_step $STMT - } {SQLITE_ABORT} + } {SQLITE_ERROR} + + # Restart the SELECT statement + # + do_test rollback-1.6 { sqlite3_reset $STMT } {SQLITE_ABORT} +} else { + do_test rollback-1.6 { sqlite3_reset $STMT } {SQLITE_OK} } -# Restart the SELECT statement -# -do_test rollback-1.6 { - sqlite3_reset $STMT -} {SQLITE_OK} do_test rollback-1.7 { sqlite3_step $STMT } {SQLITE_ROW} @@ -78,4 +79,74 @@ sqlite3_finalize $STMT } {SQLITE_OK} +set permutation "" +catch {set permutation $::permutations_test_prefix} +if {$tcl_platform(platform) == "unix" + && $permutation ne "onefile" + && $permutation ne "inmemory_journal" +} { + do_test rollback-2.1 { + execsql { + BEGIN; + INSERT INTO t3 VALUES('hello world'); + } + file copy -force test.db testA.db + file copy -force test.db-journal testA.db-journal + execsql { + COMMIT; + } + } {} + + # At this point files testA.db and testA.db-journal are present in the + # file system. This block adds a master-journal file pointer to the + # end of testA.db-journal. The master-journal file does not exist. + # + set mj [file normalize testA.db-mj-123] + binary scan $mj c* a + set cksum 0 + foreach i $a { incr cksum $i } + set mj_pgno [expr $sqlite_pending_byte / 1024] + set zAppend [binary format Ia*IIa8 $mj_pgno $mj [string length $mj] $cksum \ + "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" + ] + set iOffset [expr (([file size testA.db-journal] + 511)/512)*512] + set fd [open testA.db-journal a+] + fconfigure $fd -encoding binary -translation binary + seek $fd $iOffset + puts -nonewline $fd $zAppend + + # Also, fix the first journal-header in the journal-file. Because the + # journal file has not yet been synced, the 8-byte magic string at the + # start of the first journal-header has not been written by SQLite. + # So write it now. + seek $fd 0 + puts -nonewline $fd "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" + close $fd + + # Open a handle on testA.db and use it to query the database. At one + # point the first query would attempt a hot rollback, attempt to open + # the master-journal file and return SQLITE_CANTOPEN when it could not + # be opened. This is incorrect, it should simply delete the journal + # file and proceed with the query. + # + do_test rollback-2.2 { + sqlite3 db2 testA.db + execsql { + SELECT distinct tbl_name FROM sqlite_master; + } db2 + } {t1 t3} + if {[lsearch {exclusive persistent_journal no_journal} $permutation]<0} { + do_test rollback-2.3 { + file exists testA.db-journal + } 0 + } + do_test rollback-2.4 { + execsql { + SELECT distinct tbl_name FROM sqlite_master; + } db2 + } {t1 t3} + + db2 close +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/rowhash.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/rowhash.test --- sqlite3-3.4.2/test/rowhash.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/rowhash.test 2009-05-02 13:02:02.000000000 +0100 @@ -0,0 +1,56 @@ +# 2009 April 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this file is the code in rowhash.c. +# +# $Id: rowhash.test,v 1.5 2009/05/02 12:02:02 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test rowhash-1.1 { + execsql { + CREATE TABLE t1(id INTEGER PRIMARY KEY, a, b, c); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + CREATE INDEX i3 ON t1(c); + } +} {} + +proc do_keyset_test {name lKey} { + db transaction { + execsql { DELETE FROM t1 } + foreach key $lKey { + execsql { INSERT OR IGNORE INTO t1 VALUES($key, 'a', 'b', 'c') } + } + } + do_test $name { + lsort -integer [execsql { + SELECT id FROM t1 WHERE a = 'a' OR b = 'b' OR c = 'c'; + }] + } [lsort -integer -unique $lKey] +} + +do_keyset_test rowhash-2.1 {1 2 3} +do_keyset_test rowhash-2.2 {0 1 2 3} +do_keyset_test rowhash-2.3 {62 125 188} +if {[working_64bit_int]} { + expr srand(1) + for {set i 4} {$i < 10} {incr i} { + for {set j 0} {$j < 5000} {incr j} { + lappend L [expr int(rand()*1000000000)] + } + do_keyset_test rowhash-2.$i $L + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/rowid.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/rowid.test --- sqlite3-3.4.2/test/rowid.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/rowid.test 2009-06-26 16:14:55.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the magic ROWID column that is # found on all tables. # -# $Id: rowid.test,v 1.19 2007/04/25 11:32:30 drh Exp $ +# $Id: rowid.test,v 1.21 2009/06/26 15:14:55 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -657,18 +657,47 @@ # Test the automatic generation of rowids when the table already contains # a rowid with the maximum value. # +# Once the the maximum rowid is taken, rowids are normally chosen at +# random. By by reseting the random number generator, we can cause +# the rowid guessing loop to collide with prior rowids, and test the +# loop out to its limit of 100 iterations. After 100 collisions, the +# rowid guesser gives up and reports SQLITE_FULL. +# do_test rowid-12.1 { execsql { CREATE TABLE t7(x INTEGER PRIMARY KEY, y); + CREATE TABLE t7temp(a INTEGER PRIMARY KEY); INSERT INTO t7 VALUES(9223372036854775807,'a'); SELECT y FROM t7; } } {a} do_test rowid-12.2 { + db close + sqlite3 db test.db + save_prng_state execsql { INSERT INTO t7 VALUES(NULL,'b'); - SELECT y FROM t7; + SELECT x, y FROM t7; + } +} {1 b 9223372036854775807 a} +execsql {INSERT INTO t7 VALUES(2,'y');} +for {set i 1} {$i<100} {incr i} { + do_test rowid-12.3.$i { + db eval {DELETE FROM t7temp; INSERT INTO t7temp VALUES(1);} + restore_prng_state + execsql { + INSERT INTO t7 VALUES(NULL,'x'); + SELECT count(*) FROM t7 WHERE y=='x'; + } + } $i +} +do_test rowid-12.4 { + db eval {DELETE FROM t7temp; INSERT INTO t7temp VALUES(1);} + restore_prng_state + catchsql { + INSERT INTO t7 VALUES(NULL,'x'); } -} {b a} +} {1 {database or disk is full}} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/rtree.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/rtree.test --- sqlite3-3.4.2/test/rtree.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/rtree.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,40 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all rtree related tests. +# +# $Id: rtree.test,v 1.3 2009/05/25 14:17:35 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +rename finish_test rtree_finish_test +proc finish_test {} {} + +set RTREE_EXCLUDE { } +if {[info exists ISQUICK] && $ISQUICK} { + set RTREE_EXCLUDE rtree3.test +} + +set rtreedir [file join $testdir .. ext rtree] + +foreach testfile [lsort -dictionary [glob -nocomplain $rtreedir/*.test]] { + set tail [file tail $testfile] + if {[lsearch -exact $RTREE_EXCLUDE $tail]>=0} continue + source $testfile + catch {db close} + if {$sqlite_open_file_count>0} { + puts "$tail did not close all files: $sqlite_open_file_count" + incr nErr + lappend ::failList $tail + set sqlite_open_file_count 0 + } +} + +set sqlite_open_file_count 0 +rtree_finish_test +rename finish_test {} +rename rtree_finish_test finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/safety.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/safety.test --- sqlite3-3.4.2/test/safety.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/safety.test 2009-06-05 18:03:40.000000000 +0100 @@ -13,11 +13,31 @@ # functions. Those routines are not strictly necessary - they are # designed to detect misuse of the library. # -# $Id: safety.test,v 1.2 2006/01/03 00:33:50 drh Exp $ +# $Id: safety.test,v 1.4 2008/03/18 13:46:53 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !debug { + puts "Skipping safety tests since SQLITE_DEBUG is off" + finish_test + return +} + +# Return the UTF-8 representation of the supplied UTF-16 string $str. +proc utf8 {str} { + # If $str ends in two 0x00 0x00 bytes, knock these off before + # converting to UTF-8 using TCL. + binary scan $str \c* vals + if {[lindex $vals end]==0 && [lindex $vals end-1]==0} { + set str [binary format \c* [lrange $vals 0 end-2]] + } + + set r [encoding convertfrom unicode $str] + return $r +} + + do_test safety-1.1 { set DB [sqlite3_connection_pointer db] db eval {CREATE TABLE t1(a)} @@ -40,6 +60,11 @@ SELECT safety_on(), name FROM sqlite_master } } {1 {library routine called out of sequence}} +ifcapable {utf16} { + do_test safety-2.1.1 { + utf8 [sqlite3_errmsg16 db] + } {library routine called out of sequence} +} do_test safety-2.2 { catchsql { SELECT 'hello' diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint2.test --- sqlite3-3.4.2/test/savepoint2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint2.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,148 @@ +# 2008 December 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: savepoint2.test,v 1.5 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Tests in this file are quite similar to those run by trans.test and +# avtrans.test. +# + +proc signature {} { + return [db eval {SELECT count(*), md5sum(x) FROM t3}] +} + +do_test savepoint2-1 { + execsql { + PRAGMA cache_size=10; + BEGIN; + CREATE TABLE t3(x TEXT); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 VALUES(randstr(10,400)); + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + INSERT INTO t3 SELECT randstr(10,400) FROM t3; + COMMIT; + SELECT count(*) FROM t3; + } +} {1024} + +unset -nocomplain ::sig +unset -nocomplain SQL + +set iterations 20 + +set SQL(1) { + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; +} +set SQL(2) { + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; + DELETE FROM t3 WHERE random()%10!=0; + INSERT INTO t3 SELECT randstr(10,10)||x FROM t3; +} +set SQL(3) { + UPDATE t3 SET x = randstr(10, 400) WHERE random()%10; + INSERT INTO t3 SELECT x FROM t3 WHERE random()%10; + DELETE FROM t3 WHERE random()%10; +} +set SQL(4) { + INSERT INTO t3 SELECT randstr(10,400) FROM t3 WHERE (random()%10 == 0); +} + + + +for {set ii 2} {$ii < ($iterations+2)} {incr ii} { + + # Record the database signature. Optionally (every second run) open a + # transaction. In all cases open savepoint "one", which may or may + # not be a transaction savepoint, depending on whether or not a real + # transaction has been opened. + # + do_test savepoint2-$ii.1 { + if {$ii % 2} { execsql BEGIN } + set ::sig(one) [signature] + execsql "SAVEPOINT one" + } {} + + # Execute some SQL on the database. Then rollback to savepoint "one". + # Check that the database signature is as it was when "one" was opened. + # + do_test savepoint2-$ii.2 { + execsql $SQL(1) + execsql "ROLLBACK to one" + signature + } $::sig(one) + integrity_check savepoint2-$ii.2.1 + + # Execute some SQL. Then open savepoint "two". Savepoint "two" is therefore + # nested in savepoint "one". + # + do_test savepoint2-$ii.3 { + execsql $SQL(1) + set ::sig(two) [signature] + execsql "SAVEPOINT two" + } {} + + # More SQL changes. The rollback to savepoint "two". Check that the + # signature is as it was when savepoint "two" was opened. + # + do_test savepoint2-$ii.4 { + execsql $SQL(2) + execsql "ROLLBACK to two" + signature + } $::sig(two) + integrity_check savepoint2-$ii.4.1 + + # More SQL changes. The rollback to savepoint "two". Check that the + # signature is as it was when savepoint "two" was opened. + # + do_test savepoint2-$ii.5 { + execsql $SQL(2) + execsql "SAVEPOINT three" + execsql $SQL(3) + execsql "RELEASE three" + execsql "ROLLBACK to one" + signature + } $::sig(one) + + # By this point the database is in the same state as it was at the + # top of the for{} loop (everything having been rolled back by the + # "ROLLBACK TO one" command above). So make a few changes to the + # database and COMMIT the open transaction, so that the next iteration + # of the for{} loop works on a different dataset. + # + # The transaction being committed here may have been opened normally using + # "BEGIN", or may have been opened using a transaction savepoint created + # by the "SAVEPOINT one" statement. + # + do_test savepoint2-$ii.6 { + execsql $SQL(4) + execsql COMMIT + sqlite3_get_autocommit db + } {1} + integrity_check savepoint2-$ii.6.1 +} + +unset -nocomplain ::sig +unset -nocomplain SQL + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint3.test --- sqlite3-3.4.2/test/savepoint3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint3.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,136 @@ +# 2008 December 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: savepoint3.test,v 1.5 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +source $testdir/malloc_common.tcl + +do_malloc_test savepoint3-1 -sqlprep { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); +} -sqlbody { + SAVEPOINT one; + INSERT INTO t1 VALUES(4, 5, 6); + SAVEPOINT two; + DELETE FROM t1; + ROLLBACK TO two; + RELEASE one; +} + +do_malloc_test savepoint3-2 -sqlprep { + PRAGMA cache_size = 10; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(randstr(400,400), randstr(400,400), randstr(400,400)); + INSERT INTO t1 SELECT + randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; + INSERT INTO t1 + SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; +} -sqlbody { + PRAGMA cache_size = 10; + SAVEPOINT one; + DELETE FROM t1 WHERE rowid < 5; + SAVEPOINT two; + DELETE FROM t1 WHERE rowid > 10; + ROLLBACK TO two; + ROLLBACK TO one; + RELEASE one; +} + +do_ioerr_test savepoint3.3 -sqlprep { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, randstr(1000,1000), randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000), randstr(1000,1000)); +} -sqlbody { + BEGIN; + UPDATE t1 SET a = 3 WHERE a = 1; + SAVEPOINT one; + UPDATE t1 SET a = 4 WHERE a = 2; + COMMIT; +} -cleanup { + db eval { + SAVEPOINT one; + RELEASE one; + } +} + +# The following test does a really big savepoint rollback. One involving +# more than 4000 pages. The idea is to get a specific sqlite3BitvecSet() +# operation in pagerPlaybackSavepoint() to fail. +#do_malloc_test savepoint3-4 -sqlprep { +# BEGIN; +# CREATE TABLE t1(a, b); +# CREATE INDEX i1 ON t1(a); +# CREATE INDEX i2 ON t1(b); +# INSERT INTO t1 VALUES(randstr(500,500), randstr(500,500)); -- 1 +# INSERT INTO t1 VALUES(randstr(500,500), randstr(500,500)); -- 2 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 4 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 8 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 16 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 32 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 64 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 128 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 256 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 512 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 1024 +# INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 2048 +# COMMIT; +# BEGIN; +# SAVEPOINT abc; +# UPDATE t1 SET a = randstr(500,500); +#} -sqlbody { +# ROLLBACK TO abc; +#} + + +# Cause a specific malloc in savepoint rollback code to fail. +# +do_malloc_test savepoint3-4 -start 7 -sqlprep { + PRAGMA auto_vacuum = incremental; + PRAGMA cache_size = 1000; + + CREATE TABLE t1(a, b); + CREATE TABLE t2(a, b); + CREATE TABLE t3(a, b); + INSERT INTO t1 VALUES(1, randstr(500,500)); + INSERT INTO t1 VALUES(2, randstr(500,500)); + INSERT INTO t1 VALUES(3, randstr(500,500)); + DELETE FROM t1; + + BEGIN; + INSERT INTO t1 VALUES(1, randstr(500,500)); + INSERT INTO t1 VALUES(2, randstr(500,500)); + INSERT INTO t1 VALUES(3, randstr(500,500)); + DROP TABLE t3; -- Page 5 of the database file is now free. + DROP TABLE t2; -- Page 4 of the database file is now free. + + SAVEPOINT abc; + PRAGMA incremental_vacuum; +} -sqlbody { + ROLLBACK TO abc; +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint4.test --- sqlite3-3.4.2/test/savepoint4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint4.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,169 @@ +# 2008 December 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: savepoint4.test,v 1.7 2009/06/09 15:25:33 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !crashtest { + finish_test + return +} + +proc signature {} { + return [db eval {SELECT count(*), md5sum(x) FROM t1}] +} + +set ITERATIONS 25 ;# Number of iterations for savepoint4-1 +set ITERATIONS2 13 ;# Number of iterations for savepoint4-2 +expr srand(0) + +do_test savepoint4-1 { + execsql { + PRAGMA cache_size=10; + BEGIN; + CREATE TABLE t1(x TEXT); + INSERT INTO t1 VALUES(randstr(10,400)); + INSERT INTO t1 VALUES(randstr(10,400)); + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + COMMIT; + SELECT count(*) FROM t1; + } +} {1024} + + +unset -nocomplain ::sig + +for {set ii 1} {$ii<=$ITERATIONS} {incr ii} { + set ::sig [signature] + + for {set iDelay 1 ; set crashed 1} {$crashed} {incr iDelay} { + + do_test savepoint4-1.$ii.1.$iDelay { + set ret [crashsql -delay $iDelay -file test.db-journal { + PRAGMA cache_size = 20; + SAVEPOINT one; + DELETE FROM t1 WHERE random()%2==0; + SAVEPOINT two; + INSERT INTO t1 SELECT randstr(10,10)||x FROM t1; + ROLLBACK TO two; + UPDATE t1 SET x = randstr(10, 400) WHERE random()%10; + RELEASE two; + ROLLBACK TO one; + RELEASE one; + }] + signature + } $::sig + + set crashed [lindex $ret 0] + integrity_check savepoint4-1.$ii.1.$iDelay.integrity + } + + do_test savepoint4-1.$ii.2 { + execsql { + DELETE FROM t1 WHERE random()%10==0; + INSERT INTO t1 SELECT randstr(10,10)||x FROM t1 WHERE random()%9==0; + } + } {} +} + +do_test savepoint4-2 { + execsql { + PRAGMA cache_size=10; + DROP TABLE IF EXISTS t1; + BEGIN; + CREATE TABLE t1(x TEXT); + CREATE INDEX i1 ON t1(x); + INSERT INTO t1 VALUES(randstr(10,400)); + INSERT INTO t1 VALUES(randstr(10,400)); + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + INSERT INTO t1 SELECT randstr(10,400) FROM t1; + COMMIT; + SELECT count(*) FROM t1; + } +} {256} + +for {set ii 1} {$ii<=$ITERATIONS2} {incr ii} { + set ::sig [signature] + set file test.db-journal + + for {set iDelay 1 ; set crashed 1} {$crashed} {incr iDelay} { + + do_test savepoint4-2.$ii.1.$iDelay { + + set ret [crashsql -delay $iDelay -file $file { + SAVEPOINT one; + INSERT INTO t1 SELECT * FROM t1 WHERE rowid<50; + ROLLBACK TO one; + INSERT INTO t1 SELECT * FROM t1 WHERE rowid<50; + SAVEPOINT two; + DELETE FROM t1 WHERE (random()%10)==0; + SAVEPOINT three; + DELETE FROM t1 WHERE (random()%10)==0; + SAVEPOINT four; + DELETE FROM t1 WHERE (random()%10)==0; + RELEASE two; + + SAVEPOINT three; + UPDATE t1 SET x = substr(x||x, 12, 100000) WHERE (rowid%12)==0; + SAVEPOINT four; + UPDATE t1 SET x = substr(x||x, 14, 100000) WHERE (rowid%14)==0; + ROLLBACK TO three; + UPDATE t1 SET x = substr(x||x, 13, 100000) WHERE (rowid%13)==0; + RELEASE three; + + DELETE FROM t1 WHERE rowid > ( + SELECT rowid FROM t1 ORDER BY rowid ASC LIMIT 1 OFFSET 256 + ); + RELEASE one; + }] + + set crashed [lindex $ret 0] + if {$crashed} { + signature + } else { + set ::sig + } + } $::sig + + integrity_check savepoint4-2.$ii.1.$iDelay.integrity + + if {$crashed == 0 && $file == "test.db-journal"} { + set crashed 1 + set iDelay 0 + set file test.db + set ::sig [signature] + } + } + + do_test savepoint4-2.$ii.2 { + execsql { + DELETE FROM t1 WHERE random()%10==0; + INSERT INTO t1 SELECT randstr(10,10)||x FROM t1 WHERE random()%9==0; + } + } {} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint5.test --- sqlite3-3.4.2/test/savepoint5.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint5.test 2009-01-02 21:08:09.000000000 +0000 @@ -0,0 +1,46 @@ +# 2009 January 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Verify that a SAVEPOINT on a new, empty database followed by a +# ROLLBACK TO that savepoint starts over again with another new +# empty database. +# +# $Id: savepoint5.test,v 1.1 2009/01/02 21:08:09 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test savepoint5-1.1 { + db eval { + SAVEPOINT sp1; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT count(*) FROM sqlite_master; + SELECT * FROM t1; + } +} {1 1} +do_test savepoint5-1.2 { + db eval { + ROLLBACK TO sp1; + SELECT count(*) FROM sqlite_master; + } +} {0} +do_test savepoint5-1.3 { + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT count(*) FROM sqlite_master; + SELECT * FROM t1; + } +} {1 1} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint6.test --- sqlite3-3.4.2/test/savepoint6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint6.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,272 @@ +# 2009 January 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: savepoint6.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc sql {zSql} { + uplevel db eval [list $zSql] + #puts stderr "$zSql ;" +} + +set DATABASE_SCHEMA { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(x, y); + CREATE UNIQUE INDEX i1 ON t1(x); + CREATE INDEX i2 ON t1(y); +} + +#-------------------------------------------------------------------------- +# In memory database state. +# +# ::lSavepoint is a list containing one entry for each active savepoint. The +# first entry in the list corresponds to the most recently opened savepoint. +# Each entry consists of two elements: +# +# 1. The savepoint name. +# +# 2. A serialized Tcl array representing the contents of table t1 at the +# start of the savepoint. The keys of the array are the x values. The +# values are the y values. +# +# Array ::aEntry contains the contents of database table t1. Array keys are +# x values, the array data values are y values. +# +set lSavepoint [list] +array set aEntry [list] + +proc x_to_y {x} { + set nChar [expr int(rand()*250) + 250] + set str " $nChar [string repeat $x. $nChar]" + string range $str 1 $nChar +} +#-------------------------------------------------------------------------- + +#------------------------------------------------------------------------- +# Procs to operate on database: +# +# savepoint NAME +# rollback NAME +# release NAME +# +# insert_rows XVALUES +# delete_rows XVALUES +# +proc savepoint {zName} { + catch { sql "SAVEPOINT $zName" } + lappend ::lSavepoint [list $zName [array get ::aEntry]] +} + +proc rollback {zName} { + catch { sql "ROLLBACK TO $zName" } + for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { + set zSavepoint [lindex $::lSavepoint $i 0] + if {$zSavepoint eq $zName} { + unset -nocomplain ::aEntry + array set ::aEntry [lindex $::lSavepoint $i 1] + + + if {$i+1 < [llength $::lSavepoint]} { + set ::lSavepoint [lreplace $::lSavepoint [expr $i+1] end] + } + break + } + } +} + +proc release {zName} { + catch { sql "RELEASE $zName" } + for {set i [expr {[llength $::lSavepoint]-1}]} {$i>=0} {incr i -1} { + set zSavepoint [lindex $::lSavepoint $i 0] + if {$zSavepoint eq $zName} { + set ::lSavepoint [lreplace $::lSavepoint $i end] + break + } + } + + if {[llength $::lSavepoint] == 0} { + #puts stderr "-- End of transaction!!!!!!!!!!!!!" + } +} + +proc insert_rows {lX} { + foreach x $lX { + set y [x_to_y $x] + + # Update database [db] + sql "INSERT OR REPLACE INTO t1 VALUES($x, '$y')" + + # Update the Tcl database. + set ::aEntry($x) $y + } +} + +proc delete_rows {lX} { + foreach x $lX { + # Update database [db] + sql "DELETE FROM t1 WHERE x = $x" + + # Update the Tcl database. + unset -nocomplain ::aEntry($x) + } +} +#------------------------------------------------------------------------- + +#------------------------------------------------------------------------- +# Proc to compare database content with the in-memory representation. +# +# checkdb +# +proc checkdb {} { + set nEntry [db one {SELECT count(*) FROM t1}] + set nEntry2 [array size ::aEntry] + if {$nEntry != $nEntry2} { + error "$nEntry entries in database, $nEntry2 entries in array" + } + db eval {SELECT x, y FROM t1} { + if {![info exists ::aEntry($x)]} { + error "Entry $x exists in database, but not in array" + } + if {$::aEntry($x) ne $y} { + error "Entry $x is set to {$y} in database, {$::aEntry($x)} in array" + } + } + + db eval { PRAGMA integrity_check } +} +#------------------------------------------------------------------------- + +#------------------------------------------------------------------------- +# Proc to return random set of x values. +# +# random_integers +# +proc random_integers {nRes nRange} { + set ret [list] + for {set i 0} {$i<$nRes} {incr i} { + lappend ret [expr int(rand()*$nRange)] + } + return $ret +} +#------------------------------------------------------------------------- + +proc database_op {} { + set i [expr int(rand()*2)] + if {$i==0} { + insert_rows [random_integers 100 1000] + } + if {$i==1} { + delete_rows [random_integers 100 1000] + set i [expr int(rand()*3)] + if {$i==0} { + sql {PRAGMA incremental_vacuum} + } + } +} + +proc savepoint_op {} { + set names {one two three four five} + set cmds {savepoint savepoint savepoint savepoint release rollback} + + set C [lindex $cmds [expr int(rand()*6)]] + set N [lindex $names [expr int(rand()*5)]] + + #puts stderr " $C $N ; " + #flush stderr + + $C $N + return ok +} + +expr srand(0) + +############################################################################ +############################################################################ +# Start of test cases. + +do_test savepoint6-1.1 { + sql $DATABASE_SCHEMA +} {} +do_test savepoint6-1.2 { + insert_rows { + 497 166 230 355 779 588 394 317 290 475 362 193 805 851 564 + 763 44 930 389 819 765 760 966 280 538 414 500 18 25 287 320 + 30 382 751 87 283 981 429 630 974 421 270 810 405 + } + + savepoint one + insert_rows 858 + delete_rows 930 + savepoint two + execsql {PRAGMA incremental_vacuum} + savepoint three + insert_rows 144 + rollback three + rollback two + release one + + execsql {SELECT count(*) FROM t1} +} {44} + +foreach zSetup [list { + set testname normal + sqlite3 db test.db +} { + set testname tempdb + sqlite3 db "" +} { + if {[catch {set ::permutations_test_prefix} z] == 0 && $z eq "journaltest"} { + continue + } + set testname nosync + sqlite3 db test.db + sql { PRAGMA synchronous = off } +} { + set testname smallcache + sqlite3 db test.db + sql { PRAGMA cache_size = 10 } +}] { + + unset -nocomplain ::lSavepoint + unset -nocomplain ::aEntry + + catch { db close } + file delete -force test.db + eval $zSetup + sql $DATABASE_SCHEMA + + do_test savepoint6-$testname.setup { + savepoint one + insert_rows [random_integers 100 1000] + release one + checkdb + } {ok} + + for {set i 0} {$i < 1000} {incr i} { + do_test savepoint6-$testname.$i.1 { + savepoint_op + checkdb + } {ok} + + do_test savepoint6-$testname.$i.2 { + database_op + database_op + checkdb + } {ok} + } +} + +unset -nocomplain ::lSavepoint +unset -nocomplain ::aEntry + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/savepoint.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/savepoint.test --- sqlite3-3.4.2/test/savepoint.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/savepoint.test 2009-02-04 10:09:04.000000000 +0000 @@ -0,0 +1,862 @@ +# 2008 December 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: savepoint.test,v 1.12 2009/02/04 10:09:04 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +#---------------------------------------------------------------------- +# The following tests - savepoint-1.* - test that the SAVEPOINT, RELEASE +# and ROLLBACK TO comands are correctly parsed, and that the auto-commit +# flag is correctly set and unset as a result. +# +do_test savepoint-1.1 { + execsql { + SAVEPOINT sp1; + RELEASE sp1; + } +} {} +do_test savepoint-1.2 { + execsql { + SAVEPOINT sp1; + ROLLBACK TO sp1; + } +} {} +do_test savepoint-1.3 { + execsql { SAVEPOINT sp1 } + db close +} {} +sqlite3 db test.db +do_test savepoint-1.4.1 { + execsql { + SAVEPOINT sp1; + SAVEPOINT sp2; + RELEASE sp1; + } + sqlite3_get_autocommit db +} {1} +do_test savepoint-1.4.2 { + execsql { + SAVEPOINT sp1; + SAVEPOINT sp2; + RELEASE sp2; + } + sqlite3_get_autocommit db +} {0} +do_test savepoint-1.4.3 { + execsql { RELEASE sp1 } + sqlite3_get_autocommit db +} {1} +do_test savepoint-1.4.4 { + execsql { + SAVEPOINT sp1; + SAVEPOINT sp2; + ROLLBACK TO sp1; + } + sqlite3_get_autocommit db +} {0} +do_test savepoint-1.4.5 { + execsql { RELEASE SAVEPOINT sp1 } + sqlite3_get_autocommit db +} {1} +do_test savepoint-1.4.6 { + execsql { + SAVEPOINT sp1; + SAVEPOINT sp2; + SAVEPOINT sp3; + ROLLBACK TO SAVEPOINT sp3; + ROLLBACK TRANSACTION TO sp2; + ROLLBACK TRANSACTION TO SAVEPOINT sp1; + } + sqlite3_get_autocommit db +} {0} +do_test savepoint-1.4.7 { + execsql { RELEASE SAVEPOINT SP1 } + sqlite3_get_autocommit db +} {1} +do_test savepoint-1.5 { + execsql { + SAVEPOINT sp1; + ROLLBACK TO sp1; + } +} {} +do_test savepoint-1.6 { + execsql COMMIT +} {} + +#------------------------------------------------------------------------ +# These tests - savepoint-2.* - test rollbacks and releases of savepoints +# with a very simple data set. +# + +do_test savepoint-2.1 { + execsql { + CREATE TABLE t1(a, b, c); + BEGIN; + INSERT INTO t1 VALUES(1, 2, 3); + SAVEPOINT one; + UPDATE t1 SET a = 2, b = 3, c = 4; + } + execsql { SELECT * FROM t1 } +} {2 3 4} +do_test savepoint-2.2 { + execsql { + ROLLBACK TO one; + } + execsql { SELECT * FROM t1 } +} {1 2 3} +do_test savepoint-2.3 { + execsql { + INSERT INTO t1 VALUES(4, 5, 6); + } + execsql { SELECT * FROM t1 } +} {1 2 3 4 5 6} +do_test savepoint-2.4 { + execsql { + ROLLBACK TO one; + } + execsql { SELECT * FROM t1 } +} {1 2 3} + + +do_test savepoint-2.5 { + execsql { + INSERT INTO t1 VALUES(7, 8, 9); + SAVEPOINT two; + INSERT INTO t1 VALUES(10, 11, 12); + } + execsql { SELECT * FROM t1 } +} {1 2 3 7 8 9 10 11 12} +do_test savepoint-2.6 { + execsql { + ROLLBACK TO two; + } + execsql { SELECT * FROM t1 } +} {1 2 3 7 8 9} +do_test savepoint-2.7 { + execsql { + INSERT INTO t1 VALUES(10, 11, 12); + } + execsql { SELECT * FROM t1 } +} {1 2 3 7 8 9 10 11 12} +do_test savepoint-2.8 { + execsql { + ROLLBACK TO one; + } + execsql { SELECT * FROM t1 } +} {1 2 3} +do_test savepoint-2.9 { + execsql { + INSERT INTO t1 VALUES('a', 'b', 'c'); + SAVEPOINT two; + INSERT INTO t1 VALUES('d', 'e', 'f'); + } + execsql { SELECT * FROM t1 } +} {1 2 3 a b c d e f} +do_test savepoint-2.10 { + execsql { + RELEASE two; + } + execsql { SELECT * FROM t1 } +} {1 2 3 a b c d e f} +do_test savepoint-2.11 { + execsql { + ROLLBACK; + } + execsql { SELECT * FROM t1 } +} {} + +#------------------------------------------------------------------------ +# This block of tests - savepoint-3.* - test that when a transaction +# savepoint is rolled back, locks are not released from database files. +# And that when a transaction savepoint is released, they are released. +# +do_test savepoint-3.1 { + execsql { SAVEPOINT "transaction" } + execsql { PRAGMA lock_status } +} {main unlocked temp closed} + +do_test savepoint-3.2 { + execsql { INSERT INTO t1 VALUES(1, 2, 3) } + execsql { PRAGMA lock_status } +} {main reserved temp closed} + +do_test savepoint-3.3 { + execsql { ROLLBACK TO "transaction" } + execsql { PRAGMA lock_status } +} {main reserved temp closed} + +do_test savepoint-3.4 { + execsql { INSERT INTO t1 VALUES(1, 2, 3) } + execsql { PRAGMA lock_status } +} {main reserved temp closed} + +do_test savepoint-3.5 { + execsql { RELEASE "transaction" } + execsql { PRAGMA lock_status } +} {main unlocked temp closed} + +#------------------------------------------------------------------------ +# Test that savepoints that include schema modifications are handled +# correctly. Test cases savepoint-4.*. +# +do_test savepoint-4.1 { + execsql { + CREATE TABLE t2(d, e, f); + SELECT sql FROM sqlite_master; + } +} {{CREATE TABLE t1(a, b, c)} {CREATE TABLE t2(d, e, f)}} +do_test savepoint-4.2 { + execsql { + BEGIN; + CREATE TABLE t3(g,h); + INSERT INTO t3 VALUES('I', 'II'); + SAVEPOINT one; + DROP TABLE t3; + } +} {} +do_test savepoint-4.3 { + execsql { + CREATE TABLE t3(g, h, i); + INSERT INTO t3 VALUES('III', 'IV', 'V'); + } + execsql {SELECT * FROM t3} +} {III IV V} +do_test savepoint-4.4 { + execsql { ROLLBACK TO one; } + execsql {SELECT * FROM t3} +} {I II} +do_test savepoint-4.5 { + execsql { + ROLLBACK; + SELECT sql FROM sqlite_master; + } +} {{CREATE TABLE t1(a, b, c)} {CREATE TABLE t2(d, e, f)}} + +do_test savepoint-4.6 { + execsql { + BEGIN; + INSERT INTO t1 VALUES('o', 't', 't'); + SAVEPOINT sp1; + CREATE TABLE t3(a, b, c); + INSERT INTO t3 VALUES('z', 'y', 'x'); + } + execsql {SELECT * FROM t3} +} {z y x} +do_test savepoint-4.7 { + execsql { + ROLLBACK TO sp1; + CREATE TABLE t3(a); + INSERT INTO t3 VALUES('value'); + } + execsql {SELECT * FROM t3} +} {value} +do_test savepoint-4.8 { + execsql COMMIT +} {} + +#------------------------------------------------------------------------ +# Test some logic errors to do with the savepoint feature. +# + +do_test savepoint-5.1.1 { + execsql { + CREATE TABLE blobs(x); + INSERT INTO blobs VALUES('a twentyeight character blob'); + } + set fd [db incrblob blobs x 1] + puts -nonewline $fd "hello" + catchsql {SAVEPOINT abc} +} {1 {cannot open savepoint - SQL statements in progress}} +do_test savepoint-5.1.2 { + close $fd + catchsql {SAVEPOINT abc} +} {0 {}} + +do_test savepoint-5.2 { + execsql {RELEASE abc} + catchsql {RELEASE abc} +} {1 {no such savepoint: abc}} + +do_test savepoint-5.3.1 { + execsql {SAVEPOINT abc} + catchsql {ROLLBACK TO def} +} {1 {no such savepoint: def}} +do_test savepoint-5.3.2 { + execsql {SAVEPOINT def} + set fd [db incrblob -readonly blobs x 1] + catchsql {ROLLBACK TO def} +} {1 {cannot rollback savepoint - SQL statements in progress}} +do_test savepoint-5.3.3 { + catchsql {RELEASE def} +} {0 {}} +do_test savepoint-5.3.4 { + close $fd + execsql {savepoint def} + set fd [db incrblob blobs x 1] + catchsql {release def} +} {1 {cannot release savepoint - SQL statements in progress}} +do_test savepoint-5.3.5 { + close $fd + execsql {release abc} +} {} + +do_test savepoint-5.4.1 { + execsql { + SAVEPOINT main; + INSERT INTO blobs VALUES('another blob'); + } +} {} +do_test savepoint-5.4.2 { + sqlite3 db2 test.db + execsql { BEGIN ; SELECT * FROM blobs } db2 + catchsql { RELEASE main } +} {1 {database is locked}} +do_test savepoint-5.4.3 { + db2 close + catchsql { RELEASE main } +} {0 {}} +do_test savepoint-5.4.4 { + execsql { SELECT x FROM blobs WHERE rowid = 2 } +} {{another blob}} + +#------------------------------------------------------------------------- +# The following tests, savepoint-6.*, test an incr-vacuum inside of a +# couple of nested savepoints. +# +ifcapable {autovacuum && pragma} { + db close + file delete -force test.db + sqlite3 db test.db + + do_test savepoint-6.1 { + execsql { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(a, b); + BEGIN; + INSERT INTO t1 VALUES(randstr(10,400),randstr(10,400),randstr(10,400)); + } + set r "randstr(10,400)" + for {set ii 0} {$ii < 10} {incr ii} { + execsql "INSERT INTO t1 SELECT $r, $r, $r FROM t1" + } + execsql { COMMIT } + } {} + + integrity_check savepoint-6.2 + + do_test savepoint-6.3 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET a = randstr(10,10) WHERE (rowid%4)==0; + SAVEPOINT one; + DELETE FROM t1 WHERE rowid%2; + PRAGMA incr_vacuum; + SAVEPOINT two; + INSERT INTO t1 SELECT randstr(10,400), randstr(10,400), c FROM t1; + DELETE FROM t1 WHERE rowid%2; + PRAGMA incr_vacuum; + ROLLBACK TO one; + COMMIT; + } + } {} + + integrity_check savepoint-6.4 +} + +#------------------------------------------------------------------------- +# The following tests, savepoint-7.*, attempt to break the logic +# surrounding savepoints by growing and shrinking the database file. +# +db close +file delete -force test.db +sqlite3 db test.db + +do_test savepoint-7.1 { + execsql { + PRAGMA auto_vacuum = incremental; + PRAGMA cache_size = 10; + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1(a) VALUES('alligator'); + INSERT INTO t1(a) VALUES('angelfish'); + INSERT INTO t1(a) VALUES('ant'); + INSERT INTO t1(a) VALUES('antelope'); + INSERT INTO t1(a) VALUES('ape'); + INSERT INTO t1(a) VALUES('baboon'); + INSERT INTO t1(a) VALUES('badger'); + INSERT INTO t1(a) VALUES('bear'); + INSERT INTO t1(a) VALUES('beetle'); + INSERT INTO t1(a) VALUES('bird'); + INSERT INTO t1(a) VALUES('bison'); + UPDATE t1 SET b = randstr(1000,1000); + UPDATE t1 SET b = b||randstr(1000,1000); + UPDATE t1 SET b = b||randstr(1000,1000); + UPDATE t1 SET b = b||randstr(10,1000); + COMMIT; + } + expr ([execsql { PRAGMA page_count }] > 20) +} {1} +do_test savepoint-7.2.1 { + execsql { + BEGIN; + SAVEPOINT one; + CREATE TABLE t2(a, b); + INSERT INTO t2 SELECT a, b FROM t1; + ROLLBACK TO one; + } + execsql { + PRAGMA integrity_check; + } +} {ok} +do_test savepoint-7.2.2 { + execsql { + COMMIT; + PRAGMA integrity_check; + } +} {ok} + +do_test savepoint-7.3.1 { + execsql { + CREATE TABLE t2(a, b); + INSERT INTO t2 SELECT a, b FROM t1; + } +} {} +do_test savepoint-7.3.2 { + execsql { + BEGIN; + SAVEPOINT one; + DELETE FROM t2; + PRAGMA incremental_vacuum; + SAVEPOINT two; + INSERT INTO t2 SELECT a, b FROM t1; + ROLLBACK TO two; + COMMIT; + } + execsql { PRAGMA integrity_check } +} {ok} + +do_test savepoint-7.4.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(a, b, PRIMARY KEY(a, b)); + INSERT INTO t1 VALUES(randstr(1000,1000), randstr(1000,1000)); + BEGIN; + DELETE FROM t1; + SAVEPOINT one; + PRAGMA incremental_vacuum; + ROLLBACK TO one; + COMMIT; + } + + execsql { PRAGMA integrity_check } +} {ok} + +do_test savepoint-7.5.1 { + execsql { + PRAGMA incremental_vacuum; + CREATE TABLE t5(x, y); + INSERT INTO t5 VALUES(1, randstr(1000,1000)); + INSERT INTO t5 VALUES(2, randstr(1000,1000)); + INSERT INTO t5 VALUES(3, randstr(1000,1000)); + + BEGIN; + INSERT INTO t5 VALUES(4, randstr(1000,1000)); + INSERT INTO t5 VALUES(5, randstr(1000,1000)); + DELETE FROM t5 WHERE x=1 OR x=2; + SAVEPOINT one; + PRAGMA incremental_vacuum; + SAVEPOINT two; + INSERT INTO t5 VALUES(1, randstr(1000,1000)); + INSERT INTO t5 VALUES(2, randstr(1000,1000)); + ROLLBACK TO two; + ROLLBACK TO one; + COMMIT; + PRAGMA integrity_check; + } +} {ok} +do_test savepoint-7.5.2 { + execsql { + DROP TABLE t5; + } +} {} + +# Test oddly named and quoted savepoints. +# +do_test savepoint-8-1 { + execsql { SAVEPOINT "save1" } + execsql { RELEASE save1 } +} {} +do_test savepoint-8-2 { + execsql { SAVEPOINT "Including whitespace " } + execsql { RELEASE "including Whitespace " } +} {} + +# Test that the authorization callback works. +# +ifcapable auth { + proc auth {args} { + eval lappend ::authdata $args + return SQLITE_OK + } + db auth auth + + do_test savepoint-9.1 { + set ::authdata [list] + execsql { SAVEPOINT sp1 } + set ::authdata + } {SQLITE_SAVEPOINT BEGIN sp1 {} {}} + do_test savepoint-9.2 { + set ::authdata [list] + execsql { ROLLBACK TO sp1 } + set ::authdata + } {SQLITE_SAVEPOINT ROLLBACK sp1 {} {}} + do_test savepoint-9.3 { + set ::authdata [list] + execsql { RELEASE sp1 } + set ::authdata + } {SQLITE_SAVEPOINT RELEASE sp1 {} {}} + + proc auth {args} { + eval lappend ::authdata $args + return SQLITE_DENY + } + db auth auth + + do_test savepoint-9.4 { + set ::authdata [list] + set res [catchsql { SAVEPOINT sp1 }] + concat $::authdata $res + } {SQLITE_SAVEPOINT BEGIN sp1 {} {} 1 {not authorized}} + do_test savepoint-9.5 { + set ::authdata [list] + set res [catchsql { ROLLBACK TO sp1 }] + concat $::authdata $res + } {SQLITE_SAVEPOINT ROLLBACK sp1 {} {} 1 {not authorized}} + do_test savepoint-9.6 { + set ::authdata [list] + set res [catchsql { RELEASE sp1 }] + concat $::authdata $res + } {SQLITE_SAVEPOINT RELEASE sp1 {} {} 1 {not authorized}} + + catch { db eval ROLLBACK } + db auth "" +} + +#------------------------------------------------------------------------- +# The following tests - savepoint-10.* - test the interaction of +# savepoints and ATTACH statements. +# + +# First make sure it is not possible to attach or detach a database while +# a savepoint is open (it is not possible if any transaction is open). +# +do_test savepoint-10.1.1 { + catchsql { + SAVEPOINT one; + ATTACH 'test2.db' AS aux; + } +} {1 {cannot ATTACH database within transaction}} +do_test savepoint-10.1.2 { + execsql { + RELEASE one; + ATTACH 'test2.db' AS aux; + } + catchsql { + SAVEPOINT one; + DETACH aux; + } +} {1 {cannot DETACH database within transaction}} +do_test savepoint-10.1.3 { + execsql { + RELEASE one; + DETACH aux; + } +} {} + +# The lock state of the TEMP database can vary if SQLITE_TEMP_STORE=3 +# And the following set of tests is only really interested in the status +# of the aux1 and aux2 locks. So record the current lock status of +# TEMP for use in the answers. +set templockstate [lindex [db eval {PRAGMA lock_status}] 3] + + +do_test savepoint-10.2.1 { + file delete -force test3.db + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux1; + ATTACH 'test3.db' AS aux2; + DROP TABLE t1; + CREATE TABLE main.t1(x, y); + CREATE TABLE aux1.t2(x, y); + CREATE TABLE aux2.t3(x, y); + SELECT name FROM sqlite_master + UNION ALL + SELECT name FROM aux1.sqlite_master + UNION ALL + SELECT name FROM aux2.sqlite_master; + } +} {t1 t2 t3} +do_test savepoint-10.2.2 { + execsql { PRAGMA lock_status } +} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + +do_test savepoint-10.2.3 { + execsql { + SAVEPOINT one; + INSERT INTO t1 VALUES(1, 2); + PRAGMA lock_status; + } +} [list main reserved temp $templockstate aux1 unlocked aux2 unlocked] +do_test savepoint-10.2.4 { + execsql { + INSERT INTO t3 VALUES(3, 4); + PRAGMA lock_status; + } +} [list main reserved temp $templockstate aux1 unlocked aux2 reserved] +do_test savepoint-10.2.5 { + execsql { + SAVEPOINT two; + INSERT INTO t2 VALUES(5, 6); + PRAGMA lock_status; + } +} [list main reserved temp $templockstate aux1 reserved aux2 reserved] +do_test savepoint-10.2.6 { + execsql { SELECT * FROM t2 } +} {5 6} +do_test savepoint-10.2.7 { + execsql { ROLLBACK TO two } + execsql { SELECT * FROM t2 } +} {} +do_test savepoint-10.2.8 { + execsql { PRAGMA lock_status } +} [list main reserved temp $templockstate aux1 reserved aux2 reserved] +do_test savepoint-10.2.9 { + execsql { SELECT 'a', * FROM t1 UNION ALL SELECT 'b', * FROM t3 } +} {a 1 2 b 3 4} +do_test savepoint-10.2.9 { + execsql { + INSERT INTO t2 VALUES(5, 6); + RELEASE one; + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } +} {1 2 5 6 3 4} +do_test savepoint-10.2.9 { + execsql { PRAGMA lock_status } +} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + +do_test savepoint-10.2.10 { + execsql { + SAVEPOINT one; + INSERT INTO t1 VALUES('a', 'b'); + SAVEPOINT two; + INSERT INTO t2 VALUES('c', 'd'); + SAVEPOINT three; + INSERT INTO t3 VALUES('e', 'f'); + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } +} {1 2 a b 5 6 c d 3 4 e f} +do_test savepoint-10.2.11 { + execsql { ROLLBACK TO two } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } +} {1 2 a b 5 6 3 4} +do_test savepoint-10.2.12 { + execsql { + INSERT INTO t3 VALUES('g', 'h'); + ROLLBACK TO two; + } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } +} {1 2 a b 5 6 3 4} +do_test savepoint-10.2.13 { + execsql { ROLLBACK } + execsql { + SELECT * FROM t1; + SELECT * FROM t2; + SELECT * FROM t3; + } +} {1 2 5 6 3 4} +do_test savepoint-10.2.14 { + execsql { PRAGMA lock_status } +} [list main unlocked temp $templockstate aux1 unlocked aux2 unlocked] + +#------------------------------------------------------------------------- +# The following tests - savepoint-11.* - test the interaction of +# savepoints and creating or dropping tables and indexes in +# auto-vacuum mode. +# +do_test savepoint-11.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + PRAGMA auto_vacuum = full; + CREATE TABLE t1(a, b, UNIQUE(a, b)); + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, randstr(1000,1000)); + } +} {} +do_test savepoint-11.2 { + execsql { + SAVEPOINT one; + CREATE TABLE t2(a, b, UNIQUE(a, b)); + SAVEPOINT two; + CREATE TABLE t3(a, b, UNIQUE(a, b)); + } +} {} +integrity_check savepoint-11.3 +do_test savepoint-11.4 { + execsql { ROLLBACK TO two } +} {} +integrity_check savepoint-11.5 +do_test savepoint-11.6 { + execsql { + CREATE TABLE t3(a, b, UNIQUE(a, b)); + ROLLBACK TO one; + } +} {} +integrity_check savepoint-11.7 +do_test savepoint-11.8 { + execsql { ROLLBACK } + file size test.db +} {8192} + + +do_test savepoint-11.9 { + execsql { + DROP TABLE IF EXISTS t1; + DROP TABLE IF EXISTS t2; + DROP TABLE IF EXISTS t3; + } +} {} +do_test savepoint-11.10 { + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE TABLE t2(x, y); + INSERT INTO t2 VALUES(1, 2); + SAVEPOINT one; + INSERT INTO t2 VALUES(3, 4); + SAVEPOINT two; + DROP TABLE t1; + ROLLBACK TO two; + } + execsql {SELECT * FROM t2} +} {1 2 3 4} +do_test savepoint-11.11 { + execsql COMMIT +} {} +do_test savepoint-11.12 { + execsql {SELECT * FROM t2} +} {1 2 3 4} + +#------------------------------------------------------------------------- +# The following tests - savepoint-12.* - test the interaction of +# savepoints and "ON CONFLICT ROLLBACK" clauses. +# +do_test savepoint-12.1 { + execsql { + CREATE TABLE t4(a PRIMARY KEY, b); + INSERT INTO t4 VALUES(1, 'one'); + } +} {} +do_test savepoint-12.2 { + # The final statement of the following SQL hits a constraint when the + # conflict handling mode is "OR ROLLBACK" and there are a couple of + # open savepoints. At one point this would fail to clear the internal + # record of the open savepoints, resulting in an assert() failure + # later on. + # + catchsql { + BEGIN; + INSERT INTO t4 VALUES(2, 'two'); + SAVEPOINT sp1; + INSERT INTO t4 VALUES(3, 'three'); + SAVEPOINT sp2; + INSERT OR ROLLBACK INTO t4 VALUES(1, 'one'); + } +} {1 {column a is not unique}} +do_test savepoint-12.3 { + sqlite3_get_autocommit db +} {1} +do_test savepoint-12.4 { + execsql { SAVEPOINT one } +} {} + +#------------------------------------------------------------------------- +# The following tests - savepoint-13.* - test the interaction of +# savepoints and "journal_mode = off". +# +do_test savepoint-13.1 { + db close + catch {file delete -force test.db} + sqlite3 db test.db + execsql { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 2); + COMMIT; + PRAGMA journal_mode = off; + } +} {off} +do_test savepoint-13.2 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(3, 4); + INSERT INTO t1 SELECT a+4,b+4 FROM t1; + COMMIT; + } +} {} +do_test savepoint-13.3 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(9, 10); + SAVEPOINT s1; + INSERT INTO t1 VALUES(11, 12); + COMMIT; + } +} {} +do_test savepoint-13.4 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(13, 14); + SAVEPOINT s1; + INSERT INTO t1 VALUES(15, 16); + ROLLBACK TO s1; + ROLLBACK; + SELECT * FROM t1; + } +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/schema2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/schema2.test --- sqlite3-3.4.2/test/schema2.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/schema2.test 2009-06-12 03:37:58.000000000 +0100 @@ -14,7 +14,7 @@ # error should be returned. This is a copy of schema.test that # has been altered to use sqlite3_prepare_v2 instead of sqlite3_prepare # -# $Id: schema2.test,v 1.2 2007/05/02 17:54:56 drh Exp $ +# $Id: schema2.test,v 1.4 2009/02/04 17:40:58 drh Exp $ #--------------------------------------------------------------------- # When any of the following types of SQL statements or actions are @@ -142,26 +142,28 @@ # Tests 5.1 to 5.4 check that prepared statements are invalidated when # a database is DETACHed (but not when one is ATTACHed). # -do_test schema2-5.1 { - set sql {SELECT * FROM abc;} - set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] - execsql { - ATTACH 'test2.db' AS aux; - } - sqlite3_step $::STMT -} {SQLITE_DONE} -do_test schema2-5.2 { - sqlite3_reset $::STMT -} {SQLITE_OK} -do_test schema2-5.3 { - execsql { - DETACH aux; - } - sqlite3_step $::STMT -} {SQLITE_DONE} -do_test schema2-5.4 { - sqlite3_finalize $::STMT -} {SQLITE_OK} +ifcapable attach { + do_test schema2-5.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 TAIL] + execsql { + ATTACH 'test2.db' AS aux; + } + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema2-5.2 { + sqlite3_reset $::STMT + } {SQLITE_OK} + do_test schema2-5.3 { + execsql { + DETACH aux; + } + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema2-5.4 { + sqlite3_finalize $::STMT + } {SQLITE_OK} +} #--------------------------------------------------------------------- # Tests 6.* check that prepared statements are invalidated when @@ -312,7 +314,7 @@ db function tstfunc {} } msg] list $rc $msg -} {1 {Unable to delete/modify user-function due to active statements}} +} {1 {unable to delete/modify user-function due to active statements}} do_test schema2-11.4 { sqlite3_finalize $::STMT } {SQLITE_OK} @@ -330,7 +332,7 @@ db collate tstcollate {} } msg] list $rc $msg -} {1 {Unable to delete/modify collation sequence due to active statements}} +} {1 {unable to delete/modify collation sequence due to active statements}} do_test schema2-11.8 { sqlite3_finalize $::STMT } {SQLITE_OK} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/schema.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/schema.test --- sqlite3-3.4.2/test/schema.test 2007-08-13 18:30:11.000000000 +0100 +++ sqlite3-3.6.16/test/schema.test 2009-06-12 03:37:58.000000000 +0100 @@ -13,7 +13,7 @@ # This file tests the various conditions under which an SQLITE_SCHEMA # error should be returned. # -# $Id: schema.test,v 1.7 2007/08/13 15:28:35 danielk1977 Exp $ +# $Id: schema.test,v 1.9 2009/02/04 17:40:58 drh Exp $ #--------------------------------------------------------------------- # When any of the following types of SQL statements or actions are @@ -141,26 +141,28 @@ # Tests 5.1 to 5.4 check that prepared statements are invalidated when # a database is DETACHed (but not when one is ATTACHed). # -do_test schema-5.1 { - set sql {SELECT * FROM abc;} - set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] - execsql { - ATTACH 'test2.db' AS aux; - } - sqlite3_step $::STMT -} {SQLITE_DONE} -do_test schema-5.2 { - sqlite3_reset $::STMT -} {SQLITE_OK} -do_test schema-5.3 { - execsql { - DETACH aux; - } - sqlite3_step $::STMT -} {SQLITE_ERROR} -do_test schema-5.4 { - sqlite3_finalize $::STMT -} {SQLITE_SCHEMA} +ifcapable attach { + do_test schema-5.1 { + set sql {SELECT * FROM abc;} + set ::STMT [sqlite3_prepare $::DB $sql -1 TAIL] + execsql { + ATTACH 'test2.db' AS aux; + } + sqlite3_step $::STMT + } {SQLITE_DONE} + do_test schema-5.2 { + sqlite3_reset $::STMT + } {SQLITE_OK} + do_test schema-5.3 { + execsql { + DETACH aux; + } + sqlite3_step $::STMT + } {SQLITE_ERROR} + do_test schema-5.4 { + sqlite3_finalize $::STMT + } {SQLITE_SCHEMA} +} #--------------------------------------------------------------------- # Tests 6.* check that prepared statements are invalidated when @@ -311,7 +313,7 @@ db function tstfunc {} } msg] list $rc $msg -} {1 {Unable to delete/modify user-function due to active statements}} +} {1 {unable to delete/modify user-function due to active statements}} do_test schema-11.4 { sqlite3_finalize $::STMT } {SQLITE_OK} @@ -329,7 +331,7 @@ db collate tstcollate {} } msg] list $rc $msg -} {1 {Unable to delete/modify collation sequence due to active statements}} +} {1 {unable to delete/modify collation sequence due to active statements}} do_test schema-11.8 { sqlite3_finalize $::STMT } {SQLITE_OK} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select1.test --- sqlite3-3.4.2/test/select1.test 2007-07-23 23:51:15.000000000 +0100 +++ sqlite3-3.6.16/test/select1.test 2009-06-25 12:35:52.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the SELECT statement. # -# $Id: select1.test,v 1.54 2007/07/23 22:51:15 drh Exp $ +# $Id: select1.test,v 1.70 2009/05/28 01:00:56 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -306,7 +306,7 @@ do_test select1-4.4 { set v [catch {execsql {SELECT f1 FROM test1 ORDER BY min(f1)}} msg] lappend v $msg -} {1 {misuse of aggregate function min()}} +} {1 {misuse of aggregate: min()}} # The restriction not allowing constants in the ORDER BY clause # has been removed. See ticket #1768 @@ -374,12 +374,12 @@ catchsql { SELECT * FROM t5 ORDER BY 3; } -} {1 {ORDER BY column number 3 out of range - should be between 1 and 2}} +} {1 {1st ORDER BY term out of range - should be between 1 and 2}} do_test select1-4.10.2 { catchsql { SELECT * FROM t5 ORDER BY -1; } -} {1 {ORDER BY column number -1 out of range - should be between 1 and 2}} +} {1 {1st ORDER BY term out of range - should be between 1 and 2}} do_test select1-4.11 { execsql { INSERT INTO t5 VALUES(3,10); @@ -405,7 +405,7 @@ lappend v $msg } {0 33} -execsql {CREATE TABLE test2(t1 test, t2 text)} +execsql {CREATE TABLE test2(t1 text, t2 text)} execsql {INSERT INTO test2 VALUES('abc','xyz')} # Check for column naming @@ -506,6 +506,111 @@ lappend v $msg } {0 {f1 11 f1 11 f1 33 f1 33 f1 11 f1 11 f1 33 f1 33}} +do_test select1-6.9.3 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=OFF; + } + execsql2 { + SELECT test1 . f1, test1 . f2 FROM test1 LIMIT 1 + } +} {{test1 . f1} 11 {test1 . f2} 22} +do_test select1-6.9.4 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=ON; + } + execsql2 { + SELECT test1 . f1, test1 . f2 FROM test1 LIMIT 1 + } +} {test1.f1 11 test1.f2 22} +do_test select1-6.9.5 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=ON; + } + execsql2 { + SELECT 123.45; + } +} {123.45 123.45} +do_test select1-6.9.6 { + execsql2 { + SELECT * FROM test1 a, test1 b LIMIT 1 + } +} {a.f1 11 a.f2 22 b.f1 11 b.f2 22} +do_test select1-6.9.7 { + set x [execsql2 { + SELECT * FROM test1 a, (select 5, 6) LIMIT 1 + }] + regsub -all {subquery_[0-9a-fA-F]+_} $x {subquery} x + set x +} {a.f1 11 a.f2 22 sqlite_subquery.5 5 sqlite_subquery.6 6} +do_test select1-6.9.8 { + set x [execsql2 { + SELECT * FROM test1 a, (select 5 AS x, 6 AS y) AS b LIMIT 1 + }] + regsub -all {subquery_[0-9a-fA-F]+_} $x {subquery} x + set x +} {a.f1 11 a.f2 22 b.x 5 b.y 6} +do_test select1-6.9.9 { + execsql2 { + SELECT a.f1, b.f2 FROM test1 a, test1 b LIMIT 1 + } +} {test1.f1 11 test1.f2 22} +do_test select1-6.9.10 { + execsql2 { + SELECT f1, t1 FROM test1, test2 LIMIT 1 + } +} {test1.f1 11 test2.t1 abc} +do_test select1-6.9.11 { + db eval { + PRAGMA short_column_names=ON; + PRAGMA full_column_names=ON; + } + execsql2 { + SELECT a.f1, b.f2 FROM test1 a, test1 b LIMIT 1 + } +} {test1.f1 11 test1.f2 22} +do_test select1-6.9.12 { + execsql2 { + SELECT f1, t1 FROM test1, test2 LIMIT 1 + } +} {test1.f1 11 test2.t1 abc} +do_test select1-6.9.13 { + db eval { + PRAGMA short_column_names=ON; + PRAGMA full_column_names=OFF; + } + execsql2 { + SELECT a.f1, b.f1 FROM test1 a, test1 b LIMIT 1 + } +} {f1 11 f1 11} +do_test select1-6.9.14 { + execsql2 { + SELECT f1, t1 FROM test1, test2 LIMIT 1 + } +} {f1 11 t1 abc} +do_test select1-6.9.15 { + db eval { + PRAGMA short_column_names=OFF; + PRAGMA full_column_names=ON; + } + execsql2 { + SELECT a.f1, b.f1 FROM test1 a, test1 b LIMIT 1 + } +} {test1.f1 11 test1.f1 11} +do_test select1-6.9.16 { + execsql2 { + SELECT f1, t1 FROM test1, test2 LIMIT 1 + } +} {test1.f1 11 test2.t1 abc} + + +db eval { + PRAGMA short_column_names=ON; + PRAGMA full_column_names=OFF; +} + ifcapable compound { do_test select1-6.10 { set v [catch {execsql2 { @@ -517,12 +622,13 @@ do_test select1-6.11 { set v [catch {execsql2 { SELECT f1 FROM test1 UNION SELECT f2+100 FROM test1 - ORDER BY f2+100; + ORDER BY f2+101; }} msg] lappend v $msg -} {1 {ORDER BY term number 1 does not match any result column}} +} {1 {1st ORDER BY term does not match any column in the result set}} # Ticket #2296 +ifcapable subquery&&compound { do_test select1-6.20 { execsql { CREATE TABLE t6(a TEXT, b TEXT); @@ -558,6 +664,7 @@ ORDER BY a; } } {b d} +} } ;#ifcapable compound @@ -720,6 +827,11 @@ SELECT f1-22 AS x, f2-22 as y FROM test1 WHERE x>0 AND y<50 } } {11 22} +do_test select1-10.7 { + execsql { + SELECT f1 COLLATE nocase AS x FROM test1 ORDER BY x + } +} {11 33} # Check the ability to specify "TABLE.*" in the result set of a SELECT # @@ -910,4 +1022,49 @@ } {0} } +foreach tab [db eval {SELECT name FROM sqlite_master WHERE type = 'table'}] { + db eval "DROP TABLE $tab" +} +db close +sqlite3 db test.db + +do_test select1-14.1 { + execsql { + SELECT * FROM sqlite_master WHERE rowid>10; + SELECT * FROM sqlite_master WHERE rowid=10; + SELECT * FROM sqlite_master WHERE rowid<10; + SELECT * FROM sqlite_master WHERE rowid<=10; + SELECT * FROM sqlite_master WHERE rowid>=10; + SELECT * FROM sqlite_master; + } +} {} +do_test select1-14.2 { + execsql { + SELECT 10 IN (SELECT rowid FROM sqlite_master); + } +} {0} + +if {[db one {PRAGMA locking_mode}]=="normal"} { + # Check that ticket #3771 has been fixed. This test does not + # work with locking_mode=EXCLUSIVE so disable in that case. + # + do_test select1-15.1 { + execsql { + CREATE TABLE t1(a); + CREATE INDEX i1 ON t1(a); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + } + } {} + do_test select1-15.2 { + sqlite3 db2 test.db + execsql { DROP INDEX i1 } db2 + db2 close + } {} + do_test select1-15.3 { + execsql { SELECT 2 IN (SELECT a FROM t1) } + } {1} +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select2.test --- sqlite3-3.4.2/test/select2.test 2007-03-27 15:43:06.000000000 +0100 +++ sqlite3-3.6.16/test/select2.test 2009-06-12 03:37:58.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the SELECT statement. # -# $Id: select2.test,v 1.25 2005/07/21 03:15:01 drh Exp $ +# $Id: select2.test,v 1.28 2009/01/15 15:23:59 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -55,6 +55,7 @@ } set r } {4: 2 3 4} +unset data # Create a largish table. Do this twice, once using the TCL cache and once # without. Compare the performance to make sure things go faster with the @@ -89,11 +90,11 @@ list } {} puts "time without cache: $t2" -ifcapable tclvar { - do_test select2-2.0.3 { - expr {[lindex $t1 0]<[lindex $t2 0]} - } 1 -} +#ifcapable tclvar { +# do_test select2-2.0.3 { +# expr {[lindex $t1 0]<[lindex $t2 0]} +# } 1 +#} do_test select2-2.1 { execsql {SELECT count(*) FROM tbl2} @@ -117,7 +118,6 @@ } {500} do_test select2-3.2d { set sqlite_search_count 0 -btree_breakpoint execsql {SELECT * FROM tbl2 WHERE 1000=f2} set sqlite_search_count } {3} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select3.test --- sqlite3-3.4.2/test/select3.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/select3.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing aggregate functions and the # GROUP BY and HAVING clauses of SELECT statements. # -# $Id: select3.test,v 1.21 2007/06/20 12:18:31 drh Exp $ +# $Id: select3.test,v 1.23 2008/01/16 18:20:42 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -25,7 +25,7 @@ BEGIN; } for {set i 1} {$i<32} {incr i} { - for {set j 0} {pow(2,$j)<$i} {incr j} {} + for {set j 0} {(1<<$j)<$i} {incr j} {} execsql "INSERT INTO t1 VALUES($i,$j)" } execsql { @@ -93,12 +93,12 @@ catchsql { SELECT log, count(*) FROM t1 GROUP BY 0 ORDER BY log; } -} {1 {GROUP BY column number 0 out of range - should be between 1 and 2}} +} {1 {1st GROUP BY term out of range - should be between 1 and 2}} do_test select3-2.11 { catchsql { SELECT log, count(*) FROM t1 GROUP BY 3 ORDER BY log; } -} {1 {GROUP BY column number 3 out of range - should be between 1 and 2}} +} {1 {1st GROUP BY term out of range - should be between 1 and 2}} do_test select3-2.12 { catchsql { SELECT log, count(*) FROM t1 GROUP BY 1 ORDER BY log; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select4.test --- sqlite3-3.4.2/test/select4.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/select4.test 2009-06-25 12:23:19.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing UNION, INTERSECT and EXCEPT operators # in SELECT statements. # -# $Id: select4.test,v 1.20 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: select4.test,v 1.30 2009/04/16 00:24:24 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -29,7 +29,7 @@ BEGIN; } for {set i 1} {$i<32} {incr i} { - for {set j 0} {pow(2,$j)<$i} {incr j} {} + for {set j 0} {(1<<$j)<$i} {incr j} {} execsql "INSERT INTO t1 VALUES($i,$j)" } execsql { @@ -210,12 +210,15 @@ do_test select4-4.1.2 { execsql { - SELECT DISTINCT log FROM t1 UNION ALL SELECT 6 + SELECT DISTINCT log FROM t1 + UNION ALL + SELECT 6 INTERSECT SELECT n FROM t1 WHERE log=3 - ORDER BY log; + ORDER BY t1.log; } } {5 6} + do_test select4-4.1.3 { execsql { CREATE TABLE t2 AS @@ -283,7 +286,7 @@ SELECT DISTINCT log AS xyzzy FROM t1 UNION ALL SELECT n FROM t1 WHERE log=3 - ORDER BY 'xyzzy'; + ORDER BY "xyzzy"; }} msg] lappend v $msg } {0 {0 1 2 3 4 5 5 6 7 8}} @@ -292,19 +295,19 @@ SELECT DISTINCT log FROM t1 UNION ALL SELECT n FROM t1 WHERE log=3 - ORDER BY 'xyzzy'; + ORDER BY "xyzzy"; }} msg] lappend v $msg -} {1 {ORDER BY term number 1 does not match any result column}} +} {1 {1st ORDER BY term does not match any column in the result set}} do_test select4-5.2d { set v [catch {execsql { SELECT DISTINCT log FROM t1 INTERSECT SELECT n FROM t1 WHERE log=3 - ORDER BY 'xyzzy'; + ORDER BY "xyzzy"; }} msg] lappend v $msg -} {1 {ORDER BY term number 1 does not match any result column}} +} {1 {1st ORDER BY term does not match any column in the result set}} do_test select4-5.2e { set v [catch {execsql { SELECT DISTINCT log FROM t1 @@ -337,7 +340,7 @@ SELECT n FROM t1 WHERE log=3 ORDER BY 2; } -} {1 {ORDER BY position 2 should be between 1 and 1}} +} {1 {1st ORDER BY term out of range - should be between 1 and 1}} do_test select4-5.2i { catchsql { SELECT DISTINCT 1, log FROM t1 @@ -371,6 +374,11 @@ }} msg] lappend v $msg } {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test select4-5.3-3807-1 { + catchsql { + SELECT 1 UNION SELECT 2, 3 UNION SELECT 4, 5 ORDER BY 1; + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} do_test select4-5.4 { set v [catch {execsql { SELECT log FROM t1 WHERE n=2 @@ -510,7 +518,7 @@ } } {1.1 1.10 1.2 1.3} -# Make sure the names of columns are takenf rom the right-most subquery +# Make sure the names of columns are taken from the right-most subquery # right in a compound query. Ticket #1721 # ifcapable compound { @@ -572,13 +580,14 @@ } ;# ifcapable subquery do_test select4-9.8 { - execsql2 { + execsql { SELECT 0 AS x, 1 AS y UNION SELECT 2 AS y, -3 AS x ORDER BY x LIMIT 1; } -} {x 0 y 1} +} {0 1} + do_test select4-9.9.1 { execsql2 { SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS b, 4 AS a @@ -612,6 +621,188 @@ } {a 1 b 2 a 3 b 4} } ;# ifcapable subquery +# Try combining DISTINCT, LIMIT, and OFFSET. Make sure they all work +# together. +# +do_test select4-10.1 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log + } +} {0 1 2 3 4 5} +do_test select4-10.2 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT 4 + } +} {0 1 2 3} +do_test select4-10.3 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT 0 + } +} {} +do_test select4-10.4 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT -1 + } +} {0 1 2 3 4 5} +do_test select4-10.5 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT -1 OFFSET 2 + } +} {2 3 4 5} +do_test select4-10.6 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT 3 OFFSET 2 + } +} {2 3 4} +do_test select4-10.7 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY +log LIMIT 3 OFFSET 20 + } +} {} +do_test select4-10.8 { + execsql { + SELECT DISTINCT log FROM t1 ORDER BY log LIMIT 0 OFFSET 3 + } +} {} +do_test select4-10.9 { + execsql { + SELECT DISTINCT max(n), log FROM t1 ORDER BY +log; -- LIMIT 2 OFFSET 1 + } +} {31 5} + +# Make sure compound SELECTs with wildly different numbers of columns +# do not cause assertion faults due to register allocation issues. +# +do_test select4-11.1 { + catchsql { + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + UNION + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} +do_test select4-11.2 { + catchsql { + SELECT x FROM t2 + UNION + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} +do_test select4-11.3 { + catchsql { + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + UNION ALL + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test select4-11.4 { + catchsql { + SELECT x FROM t2 + UNION ALL + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + } +} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test select4-11.5 { + catchsql { + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + EXCEPT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of EXCEPT do not have the same number of result columns}} +do_test select4-11.6 { + catchsql { + SELECT x FROM t2 + EXCEPT + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + } +} {1 {SELECTs to the left and right of EXCEPT do not have the same number of result columns}} +do_test select4-11.7 { + catchsql { + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + INTERSECT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of INTERSECT do not have the same number of result columns}} +do_test select4-11.8 { + catchsql { + SELECT x FROM t2 + INTERSECT + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + } +} {1 {SELECTs to the left and right of INTERSECT do not have the same number of result columns}} + +do_test select4-11.11 { + catchsql { + SELECT x FROM t2 + UNION + SELECT x FROM t2 + UNION ALL + SELECT x FROM t2 + EXCEPT + SELECT x FROM t2 + INTERSECT + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + } +} {1 {SELECTs to the left and right of INTERSECT do not have the same number of result columns}} +do_test select4-11.12 { + catchsql { + SELECT x FROM t2 + UNION + SELECT x FROM t2 + UNION ALL + SELECT x FROM t2 + EXCEPT + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + EXCEPT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of EXCEPT do not have the same number of result columns}} +do_test select4-11.13 { + catchsql { + SELECT x FROM t2 + UNION + SELECT x FROM t2 + UNION ALL + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + UNION ALL + SELECT x FROM t2 + EXCEPT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}} +do_test select4-11.14 { + catchsql { + SELECT x FROM t2 + UNION + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + UNION + SELECT x FROM t2 + UNION ALL + SELECT x FROM t2 + EXCEPT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} +do_test select4-11.15 { + catchsql { + SELECT x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x,x FROM t2 + UNION + SELECT x FROM t2 + INTERSECT + SELECT x FROM t2 + UNION ALL + SELECT x FROM t2 + EXCEPT + SELECT x FROM t2 + } +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} + +do_test select4-12.1 { + sqlite3 db2 :memory: + catchsql { + SELECT 1 UNION SELECT 2,3 UNION SELECT 4,5 ORDER BY 1; + } db2 +} {1 {SELECTs to the left and right of UNION do not have the same number of result columns}} + } ;# ifcapable compound finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select5.test --- sqlite3-3.4.2/test/select5.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/select5.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing aggregate functions and the # GROUP BY and HAVING clauses of SELECT statements. # -# $Id: select5.test,v 1.16 2006/01/21 12:08:55 danielk1977 Exp $ +# $Id: select5.test,v 1.20 2008/08/21 14:15:59 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -24,7 +24,7 @@ BEGIN; } for {set i 1} {$i<32} {incr i} { - for {set j 0} {pow(2,$j)<$i} {incr j} {} + for {set j 0} {(1<<$j)<$i} {incr j} {} execsql "INSERT INTO t1 VALUES([expr {32-$i}],[expr {10-$j}])" } execsql { @@ -122,7 +122,7 @@ # Some tests for queries with a GROUP BY clause but no aggregate functions. # -# Note: The query in test case 5-5.5 are not legal SQL. So if the +# Note: The query in test cases 5.1 through 5.5 are not legal SQL. So if the # implementation changes in the future and it returns different results, # this is not such a big deal. # @@ -156,6 +156,14 @@ } } {1 4 6 4} +# Test rendering of columns for the GROUP BY clause. +# +do_test select5-5.11 { + execsql { + SELECT max(c), b*a, b, a FROM t2 GROUP BY b*a, b, a + } +} {3 2 2 1 5 4 4 1 7 24 4 6} + # NULL compare equal to each other for the purposes of processing # the GROUP BY clause. # @@ -183,10 +191,67 @@ } } {1 1 2 {} 2 1 3 {} 3 1 {} 5 4 2 {} 6 5 2 {} {} 6 1 7 8} -do_test select5.7.2 { +do_test select5-7.2 { execsql { SELECT count(*), count(x) as cnt FROM t4 GROUP BY y ORDER BY cnt; } } {1 1 1 1 1 1 5 5} + +# See ticket #3324. +# +do_test select5-8.1 { + execsql { + CREATE TABLE t8a(a,b); + CREATE TABLE t8b(x); + INSERT INTO t8a VALUES('one', 1); + INSERT INTO t8a VALUES('one', 2); + INSERT INTO t8a VALUES('two', 3); + INSERT INTO t8a VALUES('one', NULL); + INSERT INTO t8b(rowid,x) VALUES(1,111); + INSERT INTO t8b(rowid,x) VALUES(2,222); + INSERT INTO t8b(rowid,x) VALUES(3,333); + SELECT a, count(b) FROM t8a, t8b WHERE b=t8b.rowid GROUP BY a ORDER BY a; + } +} {one 2 two 1} +do_test select5-8.2 { + execsql { + SELECT a, count(b) FROM t8a, t8b WHERE b=+t8b.rowid GROUP BY a ORDER BY a; + } +} {one 2 two 1} +do_test select5-8.3 { + execsql { + SELECT t8a.a, count(t8a.b) FROM t8a, t8b WHERE t8a.b=t8b.rowid + GROUP BY 1 ORDER BY 1; + } +} {one 2 two 1} +do_test select5-8.4 { + execsql { + SELECT a, count(*) FROM t8a, t8b WHERE b=+t8b.rowid GROUP BY a ORDER BY a; + } +} {one 2 two 1} +do_test select5-8.5 { + execsql { + SELECT a, count(b) FROM t8a, t8b WHERE b4; SELECT * FROM t1 @@ -501,6 +500,16 @@ SELECT x FROM (SELECT x FROM t1 LIMIT -1 OFFSET 1); } } {2 3 4} +do_test select6-9.10 { + execsql { + SELECT x, y FROM (SELECT x, (SELECT 10+x) y FROM t1 LIMIT -1 OFFSET 1); + } +} {2 12 3 13 4 14} +do_test select6-9.11 { + execsql { + SELECT x, y FROM (SELECT x, (SELECT 10)+x y FROM t1 LIMIT -1 OFFSET 1); + } +} {2 12 3 13 4 14} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select7.test --- sqlite3-3.4.2/test/select7.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/select7.test 2009-06-05 18:03:40.000000000 +0100 @@ -10,7 +10,7 @@ # focus of this file is testing compute SELECT statements and nested # views. # -# $Id: select7.test,v 1.10 2007/06/07 10:55:36 drh Exp $ +# $Id: select7.test,v 1.11 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] @@ -138,20 +138,22 @@ # Verify that an error occurs if you have too many terms on a # compound select statement. # -if {$SQLITE_MAX_COMPOUND_SELECT>0} { - set sql {SELECT 0} - set result 0 - for {set i 1} {$i<$SQLITE_MAX_COMPOUND_SELECT} {incr i} { - append sql " UNION ALL SELECT $i" - lappend result $i +ifcapable compound { + if {$SQLITE_MAX_COMPOUND_SELECT>0} { + set sql {SELECT 0} + set result 0 + for {set i 1} {$i<$SQLITE_MAX_COMPOUND_SELECT} {incr i} { + append sql " UNION ALL SELECT $i" + lappend result $i + } + do_test select7-6.1 { + catchsql $sql + } [list 0 $result] + append sql { UNION ALL SELECT 99999999} + do_test select7-6.2 { + catchsql $sql + } {1 {too many terms in compound SELECT}} } - do_test select7-6.1 { - catchsql $sql - } [list 0 $result] - append sql { UNION ALL SELECT 99999999} - do_test select7-6.2 { - catchsql $sql - } {1 {too many terms in compound SELECT}} } finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select8.test --- sqlite3-3.4.2/test/select8.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/select8.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,62 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing that LIMIT and OFFSET work for +# unusual combinations SELECT statements. +# +# $Id: select8.test,v 1.1 2008/01/12 12:48:09 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +execsql { + CREATE TABLE songs(songid, artist, timesplayed); + INSERT INTO songs VALUES(1,'one',1); + INSERT INTO songs VALUES(2,'one',2); + INSERT INTO songs VALUES(3,'two',3); + INSERT INTO songs VALUES(4,'three',5); + INSERT INTO songs VALUES(5,'one',7); + INSERT INTO songs VALUES(6,'two',11); +} +set result [execsql { + SELECT DISTINCT artist,sum(timesplayed) AS total + FROM songs + GROUP BY LOWER(artist) +}] +puts result=$result +do_test select8-1.1 { + execsql { + SELECT DISTINCT artist,sum(timesplayed) AS total + FROM songs + GROUP BY LOWER(artist) + LIMIT 1 OFFSET 1 + } +} [lrange $result 2 3] +do_test select8-1.2 { + execsql { + SELECT DISTINCT artist,sum(timesplayed) AS total + FROM songs + GROUP BY LOWER(artist) + LIMIT 2 OFFSET 1 + } +} [lrange $result 2 5] +do_test select8-1.3 { + execsql { + SELECT DISTINCT artist,sum(timesplayed) AS total + FROM songs + GROUP BY LOWER(artist) + LIMIT -1 OFFSET 2 + } +} [lrange $result 4 end] + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/select9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/select9.test --- sqlite3-3.4.2/test/select9.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/select9.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,421 @@ +# 2008 June 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: select9.test,v 1.4 2008/07/01 14:39:35 danielk1977 Exp $ + +# The tests in this file are focused on test compound SELECT statements +# that have any or all of an ORDER BY, LIMIT or OFFSET clauses. As of +# version 3.6.0, SQLite contains code to use SQL indexes where possible +# to optimize such statements. +# + +# TODO Points: +# +# * Are there any "column affinity" issues to consider? + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +#set ISQUICK 1 + +#------------------------------------------------------------------------- +# test_compound_select TESTNAME SELECT RESULT +# +# This command is used to run multiple LIMIT/OFFSET test cases based on +# the single SELECT statement passed as the second argument. The SELECT +# statement may not contain a LIMIT or OFFSET clause. This proc tests +# many statements of the form: +# +# "$SELECT limit $X offset $Y" +# +# for various values of $X and $Y. +# +# The third argument, $RESULT, should contain the expected result of +# the command [execsql $SELECT]. +# +# The first argument, $TESTNAME, is used as the base test case name to +# pass to [do_test] for each individual LIMIT OFFSET test case. +# +proc test_compound_select {testname sql result} { + + set nCol 1 + db eval $sql A { + set nCol [llength $A(*)] + break + } + set nRow [expr {[llength $result] / $nCol}] + + set ::compound_sql $sql + do_test $testname { + execsql $::compound_sql + } $result +#return + + set iLimitIncr 1 + set iOffsetIncr 1 + if {[info exists ::ISQUICK] && $::ISQUICK && $nRow>=5} { + set iOffsetIncr [expr $nRow / 5] + set iLimitIncr [expr $nRow / 5] + } + + set iLimitEnd [expr $nRow+$iLimitIncr] + set iOffsetEnd [expr $nRow+$iOffsetIncr] + + for {set iOffset 0} {$iOffset < $iOffsetEnd} {incr iOffset $iOffsetIncr} { + for {set iLimit 0} {$iLimit < $iLimitEnd} {incr iLimit} { + + set ::compound_sql "$sql LIMIT $iLimit" + if {$iOffset != 0} { + append ::compound_sql " OFFSET $iOffset" + } + + set iStart [expr {$iOffset*$nCol}] + set iEnd [expr {($iOffset*$nCol) + ($iLimit*$nCol) -1}] + + do_test $testname.limit=$iLimit.offset=$iOffset { + execsql $::compound_sql + } [lrange $result $iStart $iEnd] + } + } +} + +#------------------------------------------------------------------------- +# test_compound_select_flippable TESTNAME SELECT RESULT +# +# This command is for testing statements of the form: +# +# ORDER BY +# +# where each is a simple (non-compound) select statement +# and is one of "INTERSECT", "UNION ALL" or "UNION". +# +# This proc calls [test_compound_select] twice, once with the select +# statement as it is passed to this command, and once with the positions +# of exchanged. +# +proc test_compound_select_flippable {testname sql result} { + test_compound_select $testname $sql $result + + set select [string trim $sql] + set RE {(.*)(UNION ALL|INTERSECT|UNION)(.*)(ORDER BY.*)} + set rc [regexp $RE $select -> s1 op s2 order_by] + if {!$rc} {error "Statement is unflippable: $select"} + + set flipsql "$s2 $op $s1 $order_by" + test_compound_select $testname.flipped $flipsql $result +} + +############################################################################# +# Begin tests. +# + +# Create and populate a sample database. +# +do_test select9-1.0 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(d, e, f); + BEGIN; + INSERT INTO t1 VALUES(1, 'one', 'I'); + INSERT INTO t1 VALUES(3, NULL, NULL); + INSERT INTO t1 VALUES(5, 'five', 'V'); + INSERT INTO t1 VALUES(7, 'seven', 'VII'); + INSERT INTO t1 VALUES(9, NULL, NULL); + INSERT INTO t1 VALUES(2, 'two', 'II'); + INSERT INTO t1 VALUES(4, 'four', 'IV'); + INSERT INTO t1 VALUES(6, NULL, NULL); + INSERT INTO t1 VALUES(8, 'eight', 'VIII'); + INSERT INTO t1 VALUES(10, 'ten', 'X'); + + INSERT INTO t2 VALUES(1, 'two', 'IV'); + INSERT INTO t2 VALUES(2, 'four', 'VIII'); + INSERT INTO t2 VALUES(3, NULL, NULL); + INSERT INTO t2 VALUES(4, 'eight', 'XVI'); + INSERT INTO t2 VALUES(5, 'ten', 'XX'); + INSERT INTO t2 VALUES(6, NULL, NULL); + INSERT INTO t2 VALUES(7, 'fourteen', 'XXVIII'); + INSERT INTO t2 VALUES(8, 'sixteen', 'XXXII'); + INSERT INTO t2 VALUES(9, NULL, NULL); + INSERT INTO t2 VALUES(10, 'twenty', 'XL'); + + COMMIT; + } +} {} + +# Each iteration of this loop runs the same tests with a different set +# of indexes present within the database schema. The data returned by +# the compound SELECT statements in the test cases should be the same +# in each case. +# +set iOuterLoop 1 +foreach indexes [list { + /* Do not create any indexes. */ +} { + CREATE INDEX i1 ON t1(a) +} { + CREATE INDEX i2 ON t1(b) +} { + CREATE INDEX i3 ON t2(d) +} { + CREATE INDEX i4 ON t2(e) +}] { + + do_test select9-1.$iOuterLoop.1 { + execsql $indexes + } {} + + # Test some 2-way UNION ALL queries. No WHERE clauses. + # + test_compound_select select9-1.$iOuterLoop.2 { + SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 + } {1 one 3 {} 5 five 7 seven 9 {} 2 two 4 four 6 {} 8 eight 10 ten 1 two 2 four 3 {} 4 eight 5 ten 6 {} 7 fourteen 8 sixteen 9 {} 10 twenty} + test_compound_select select9-1.$iOuterLoop.3 { + SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY 1 + } {1 one 1 two 2 two 2 four 3 {} 3 {} 4 four 4 eight 5 five 5 ten 6 {} 6 {} 7 seven 7 fourteen 8 eight 8 sixteen 9 {} 9 {} 10 ten 10 twenty} + test_compound_select select9-1.$iOuterLoop.4 { + SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY 2 + } {3 {} 9 {} 6 {} 3 {} 6 {} 9 {} 8 eight 4 eight 5 five 4 four 2 four 7 fourteen 1 one 7 seven 8 sixteen 10 ten 5 ten 10 twenty 2 two 1 two} + test_compound_select_flippable select9-1.$iOuterLoop.5 { + SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY 1, 2 + } {1 one 1 two 2 four 2 two 3 {} 3 {} 4 eight 4 four 5 five 5 ten 6 {} 6 {} 7 fourteen 7 seven 8 eight 8 sixteen 9 {} 9 {} 10 ten 10 twenty} + test_compound_select_flippable select9-1.$iOuterLoop.6 { + SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY 2, 1 + } {3 {} 3 {} 6 {} 6 {} 9 {} 9 {} 4 eight 8 eight 5 five 2 four 4 four 7 fourteen 1 one 7 seven 8 sixteen 5 ten 10 ten 10 twenty 1 two 2 two} + + # Test some 2-way UNION queries. + # + test_compound_select select9-1.$iOuterLoop.7 { + SELECT a, b FROM t1 UNION SELECT d, e FROM t2 + } {1 one 1 two 2 four 2 two 3 {} 4 eight 4 four 5 five 5 ten 6 {} 7 fourteen 7 seven 8 eight 8 sixteen 9 {} 10 ten 10 twenty} + + test_compound_select select9-1.$iOuterLoop.8 { + SELECT a, b FROM t1 UNION SELECT d, e FROM t2 ORDER BY 1 + } {1 one 1 two 2 four 2 two 3 {} 4 eight 4 four 5 five 5 ten 6 {} 7 fourteen 7 seven 8 eight 8 sixteen 9 {} 10 ten 10 twenty} + + test_compound_select select9-1.$iOuterLoop.9 { + SELECT a, b FROM t1 UNION SELECT d, e FROM t2 ORDER BY 2 + } {3 {} 6 {} 9 {} 4 eight 8 eight 5 five 2 four 4 four 7 fourteen 1 one 7 seven 8 sixteen 5 ten 10 ten 10 twenty 1 two 2 two} + + test_compound_select_flippable select9-1.$iOuterLoop.10 { + SELECT a, b FROM t1 UNION SELECT d, e FROM t2 ORDER BY 1, 2 + } {1 one 1 two 2 four 2 two 3 {} 4 eight 4 four 5 five 5 ten 6 {} 7 fourteen 7 seven 8 eight 8 sixteen 9 {} 10 ten 10 twenty} + + test_compound_select_flippable select9-1.$iOuterLoop.11 { + SELECT a, b FROM t1 UNION SELECT d, e FROM t2 ORDER BY 2, 1 + } {3 {} 6 {} 9 {} 4 eight 8 eight 5 five 2 four 4 four 7 fourteen 1 one 7 seven 8 sixteen 5 ten 10 ten 10 twenty 1 two 2 two} + + # Test some 2-way INTERSECT queries. + # + test_compound_select select9-1.$iOuterLoop.11 { + SELECT a, b FROM t1 INTERSECT SELECT d, e FROM t2 + } {3 {} 6 {} 9 {}} + test_compound_select_flippable select9-1.$iOuterLoop.12 { + SELECT a, b FROM t1 INTERSECT SELECT d, e FROM t2 ORDER BY 1 + } {3 {} 6 {} 9 {}} + test_compound_select select9-1.$iOuterLoop.13 { + SELECT a, b FROM t1 INTERSECT SELECT d, e FROM t2 ORDER BY 2 + } {3 {} 6 {} 9 {}} + test_compound_select_flippable select9-1.$iOuterLoop.14 { + SELECT a, b FROM t1 INTERSECT SELECT d, e FROM t2 ORDER BY 2, 1 + } {3 {} 6 {} 9 {}} + test_compound_select_flippable select9-1.$iOuterLoop.15 { + SELECT a, b FROM t1 INTERSECT SELECT d, e FROM t2 ORDER BY 1, 2 + } {3 {} 6 {} 9 {}} + + # Test some 2-way EXCEPT queries. + # + test_compound_select select9-1.$iOuterLoop.16 { + SELECT a, b FROM t1 EXCEPT SELECT d, e FROM t2 + } {1 one 2 two 4 four 5 five 7 seven 8 eight 10 ten} + + test_compound_select select9-1.$iOuterLoop.17 { + SELECT a, b FROM t1 EXCEPT SELECT d, e FROM t2 ORDER BY 1 + } {1 one 2 two 4 four 5 five 7 seven 8 eight 10 ten} + + test_compound_select select9-1.$iOuterLoop.18 { + SELECT a, b FROM t1 EXCEPT SELECT d, e FROM t2 ORDER BY 2 + } {8 eight 5 five 4 four 1 one 7 seven 10 ten 2 two} + + test_compound_select select9-1.$iOuterLoop.19 { + SELECT a, b FROM t1 EXCEPT SELECT d, e FROM t2 ORDER BY 1, 2 + } {1 one 2 two 4 four 5 five 7 seven 8 eight 10 ten} + + test_compound_select select9-1.$iOuterLoop.20 { + SELECT a, b FROM t1 EXCEPT SELECT d, e FROM t2 ORDER BY 2, 1 + } {8 eight 5 five 4 four 1 one 7 seven 10 ten 2 two} + + incr iOuterLoop +} + +do_test select9-2.0 { + execsql { + DROP INDEX i1; + DROP INDEX i2; + DROP INDEX i3; + DROP INDEX i4; + } +} {} + +proc reverse {lhs rhs} { + return [string compare $rhs $lhs] +} +db collate reverse reverse + +# This loop is similar to the previous one (test cases select9-1.*) +# except that the simple select statements have WHERE clauses attached +# to them. Sometimes the WHERE clause may be satisfied using the same +# index used for ORDER BY, sometimes not. +# +set iOuterLoop 1 +foreach indexes [list { + /* Do not create any indexes. */ +} { + CREATE INDEX i1 ON t1(a) +} { + DROP INDEX i1; + CREATE INDEX i1 ON t1(b, a) +} { + CREATE INDEX i2 ON t2(d DESC, e COLLATE REVERSE ASC); +} { + CREATE INDEX i3 ON t1(a DESC); +}] { + do_test select9-2.$iOuterLoop.1 { + execsql $indexes + } {} + + test_compound_select_flippable select9-2.$iOuterLoop.2 { + SELECT * FROM t1 WHERE a<5 UNION SELECT * FROM t2 WHERE d>=5 ORDER BY 1 + } {1 one I 2 two II 3 {} {} 4 four IV 5 ten XX 6 {} {} 7 fourteen XXVIII 8 sixteen XXXII 9 {} {} 10 twenty XL} + + test_compound_select_flippable select9-2.$iOuterLoop.2 { + SELECT * FROM t1 WHERE a<5 UNION SELECT * FROM t2 WHERE d>=5 ORDER BY 2, 1 + } {3 {} {} 6 {} {} 9 {} {} 4 four IV 7 fourteen XXVIII 1 one I 8 sixteen XXXII 5 ten XX 10 twenty XL 2 two II} + + test_compound_select_flippable select9-2.$iOuterLoop.3 { + SELECT * FROM t1 WHERE a<5 UNION SELECT * FROM t2 WHERE d>=5 + ORDER BY 2 COLLATE reverse, 1 + } {3 {} {} 6 {} {} 9 {} {} 2 two II 10 twenty XL 5 ten XX 8 sixteen XXXII 1 one I 7 fourteen XXVIII 4 four IV} + + test_compound_select_flippable select9-2.$iOuterLoop.4 { + SELECT * FROM t1 WHERE a<5 UNION ALL SELECT * FROM t2 WHERE d>=5 ORDER BY 1 + } {1 one I 2 two II 3 {} {} 4 four IV 5 ten XX 6 {} {} 7 fourteen XXVIII 8 sixteen XXXII 9 {} {} 10 twenty XL} + + test_compound_select_flippable select9-2.$iOuterLoop.5 { + SELECT * FROM t1 WHERE a<5 UNION ALL SELECT * FROM t2 WHERE d>=5 ORDER BY 2, 1 + } {3 {} {} 6 {} {} 9 {} {} 4 four IV 7 fourteen XXVIII 1 one I 8 sixteen XXXII 5 ten XX 10 twenty XL 2 two II} + + test_compound_select_flippable select9-2.$iOuterLoop.6 { + SELECT * FROM t1 WHERE a<5 UNION ALL SELECT * FROM t2 WHERE d>=5 + ORDER BY 2 COLLATE reverse, 1 + } {3 {} {} 6 {} {} 9 {} {} 2 two II 10 twenty XL 5 ten XX 8 sixteen XXXII 1 one I 7 fourteen XXVIII 4 four IV} + + test_compound_select select9-2.$iOuterLoop.4 { + SELECT a FROM t1 WHERE a<8 EXCEPT SELECT d FROM t2 WHERE d<=3 ORDER BY 1 + } {4 5 6 7} + + test_compound_select select9-2.$iOuterLoop.4 { + SELECT a FROM t1 WHERE a<8 INTERSECT SELECT d FROM t2 WHERE d<=3 ORDER BY 1 + } {1 2 3} + +} + +do_test select9-2.X { + execsql { + DROP INDEX i1; + DROP INDEX i2; + DROP INDEX i3; + } +} {} + +# This procedure executes the SQL. Then it checks the generated program +# for the SQL and appends a "nosort" to the result if the program contains the +# SortCallback opcode. If the program does not contain the SortCallback +# opcode it appends "sort" +# +proc cksort {sql} { + set ::sqlite_sort_count 0 + set data [execsql $sql] + if {$::sqlite_sort_count} {set x sort} {set x nosort} + lappend data $x + return $data +} + +# If the right indexes exist, the following query: +# +# SELECT t1.a FROM t1 UNION ALL SELECT t2.d FROM t2 ORDER BY 1 +# +# can use indexes to run without doing a in-memory sort operation. +# This block of tests (select9-3.*) is used to check if the same +# is possible with: +# +# CREATE VIEW v1 AS SELECT a FROM t1 UNION ALL SELECT d FROM t2 +# SELECT a FROM v1 ORDER BY 1 +# +# It turns out that it is. +# +do_test select9-3.1 { + cksort { SELECT a FROM t1 ORDER BY 1 } +} {1 2 3 4 5 6 7 8 9 10 sort} +do_test select9-3.2 { + execsql { CREATE INDEX i1 ON t1(a) } + cksort { SELECT a FROM t1 ORDER BY 1 } +} {1 2 3 4 5 6 7 8 9 10 nosort} +do_test select9-3.3 { + cksort { SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 LIMIT 5 } +} {1 1 2 2 3 sort} +do_test select9-3.4 { + execsql { CREATE INDEX i2 ON t2(d) } + cksort { SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 LIMIT 5 } +} {1 1 2 2 3 nosort} +do_test select9-3.5 { + execsql { CREATE VIEW v1 AS SELECT a FROM t1 UNION ALL SELECT d FROM t2 } + cksort { SELECT a FROM v1 ORDER BY 1 LIMIT 5 } +} {1 1 2 2 3 nosort} +do_test select9-3.X { + execsql { + DROP INDEX i1; + DROP INDEX i2; + DROP VIEW v1; + } +} {} + +# This block of tests is the same as the preceding one, except that +# "UNION" is tested instead of "UNION ALL". +# +do_test select9-4.1 { + cksort { SELECT a FROM t1 ORDER BY 1 } +} {1 2 3 4 5 6 7 8 9 10 sort} +do_test select9-4.2 { + execsql { CREATE INDEX i1 ON t1(a) } + cksort { SELECT a FROM t1 ORDER BY 1 } +} {1 2 3 4 5 6 7 8 9 10 nosort} +do_test select9-4.3 { + cksort { SELECT a FROM t1 UNION SELECT d FROM t2 ORDER BY 1 LIMIT 5 } +} {1 2 3 4 5 sort} +do_test select9-4.4 { + execsql { CREATE INDEX i2 ON t2(d) } + cksort { SELECT a FROM t1 UNION SELECT d FROM t2 ORDER BY 1 LIMIT 5 } +} {1 2 3 4 5 nosort} +do_test select9-4.5 { + execsql { CREATE VIEW v1 AS SELECT a FROM t1 UNION SELECT d FROM t2 } + cksort { SELECT a FROM v1 ORDER BY 1 LIMIT 5 } +} {1 2 3 4 5 sort} +do_test select9-4.X { + execsql { + DROP INDEX i1; + DROP INDEX i2; + DROP VIEW v1; + } +} {} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/selectA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/selectA.test --- sqlite3-3.4.2/test/selectA.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/selectA.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,1296 @@ +# 2008 June 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The focus of this file is testing the compound-SELECT merge +# optimization. Or, in other words, making sure that all +# possible combinations of UNION, UNION ALL, EXCEPT, and +# INTERSECT work together with an ORDER BY clause (with or w/o +# explicit sort order and explicit collating secquites) and +# with and without optional LIMIT and OFFSET clauses. +# +# $Id: selectA.test,v 1.6 2008/08/21 14:24:29 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +do_test selectA-1.0 { + execsql { + CREATE TABLE t1(a,b,c COLLATE NOCASE); + INSERT INTO t1 VALUES(1,'a','a'); + INSERT INTO t1 VALUES(9.9, 'b', 'B'); + INSERT INTO t1 VALUES(NULL, 'C', 'c'); + INSERT INTO t1 VALUES('hello', 'd', 'D'); + INSERT INTO t1 VALUES(x'616263', 'e', 'e'); + SELECT * FROM t1; + } +} {1 a a 9.9 b B {} C c hello d D abc e e} +do_test selectA-1.1 { + execsql { + CREATE TABLE t2(x,y,z COLLATE NOCASE); + INSERT INTO t2 VALUES(NULL,'U','u'); + INSERT INTO t2 VALUES('mad', 'Z', 'z'); + INSERT INTO t2 VALUES(x'68617265', 'm', 'M'); + INSERT INTO t2 VALUES(5.2e6, 'X', 'x'); + INSERT INTO t2 VALUES(-23, 'Y', 'y'); + SELECT * FROM t2; + } +} {{} U u mad Z z hare m M 5200000.0 X x -23 Y y} +do_test selectA-1.2 { + execsql { + CREATE TABLE t3(a,b,c COLLATE NOCASE); + INSERT INTO t3 SELECT * FROM t1; + INSERT INTO t3 SELECT * FROM t2; + INSERT INTO t3 SELECT * FROM t1; + INSERT INTO t3 SELECT * FROM t2; + INSERT INTO t3 SELECT * FROM t1; + INSERT INTO t3 SELECT * FROM t2; + SELECT count(*) FROM t3; + } +} {30} + +do_test selectA-2.1 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.1.1 { # Ticket #3314 + execsql { + SELECT t1.a, t1.b, t1.c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.1.2 { # Ticket #3314 + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY t1.a, t1.b, t1.c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.2 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.3 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.4 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.5 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.6 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.7 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.8 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.9 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.10 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.11 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.12 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.13 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.14 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.15 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.16 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.17 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.18 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.19 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.20 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.21 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.22 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.23 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.24 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.25 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.26 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.27 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.28 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.29 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.30 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.31 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.32 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.33 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.34 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.35 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.36 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.37 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.38 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.39 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.40 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.41 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-2.42 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-2.43 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-2.44 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-2.45 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-2.46 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-2.47 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-2.48 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-2.49 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-2.50 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-2.51 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-2.52 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-2.53 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY b, a DESC + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-2.54 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY b + } +} {hello d D abc e e} +do_test selectA-2.55 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY b DESC, c + } +} {abc e e hello d D} +do_test selectA-2.56 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY b, c DESC, a + } +} {hello d D abc e e} +do_test selectA-2.57 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY b COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.58 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY b + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-2.59 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY c, a DESC + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.60 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY c + } +} {hello d D abc e e} +do_test selectA-2.61 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY, b DESC, c, a, b, c, a, b, c + } +} {hello d D abc e e} +do_test selectA-2.62 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-2.63 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.64 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.65 { + execsql { + SELECT a,b,c FROM t3 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.66 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.67 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t3 WHERE b<'d' + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-2.68 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-2.69 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.70 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.71 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t1 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + INTERSECT SELECT a,b,c FROM t1 + EXCEPT SELECT x,y,z FROM t2 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT y,x,z FROM t2 + INTERSECT SELECT a,b,c FROM t1 + EXCEPT SELECT c,b,a FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-2.72 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.73 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.74 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.75 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.76 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.77 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.78 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.79 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.80 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.81 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.82 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.83 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-2.84 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-2.85 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-2.86 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.87 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY y COLLATE NOCASE DESC,x,z + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.88 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.89 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-2.90 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.91 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-2.92 { + execsql { + SELECT x,y,z FROM t2 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + ORDER BY y COLLATE NOCASE DESC,x,z + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-2.93 { + execsql { + SELECT upper((SELECT c FROM t1 UNION SELECT z FROM t2 ORDER BY 1)); + } +} {A} +do_test selectA-2.94 { + execsql { + SELECT lower((SELECT c FROM t1 UNION ALL SELECT z FROM t2 ORDER BY 1)); + } +} {a} +do_test selectA-2.95 { + execsql { + SELECT lower((SELECT c FROM t1 INTERSECT SELECT z FROM t2 ORDER BY 1)); + } +} {{}} +do_test selectA-2.96 { + execsql { + SELECT lower((SELECT z FROM t2 EXCEPT SELECT c FROM t1 ORDER BY 1)); + } +} {m} + + +do_test selectA-3.0 { + execsql { + CREATE UNIQUE INDEX t1a ON t1(a); + CREATE UNIQUE INDEX t1b ON t1(b); + CREATE UNIQUE INDEX t1c ON t1(c); + CREATE UNIQUE INDEX t2x ON t2(x); + CREATE UNIQUE INDEX t2y ON t2(y); + CREATE UNIQUE INDEX t2z ON t2(z); + SELECT name FROM sqlite_master WHERE type='index' + } +} {t1a t1b t1c t2x t2y t2z} +do_test selectA-3.1 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.1.1 { # Ticket #3314 + execsql { + SELECT t1.a,b,t1.c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,t1.b,t1.c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.2 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.3 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.4 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.5 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.6 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.7 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.8 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.9 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.10 { + execsql { + SELECT a,b,c FROM t1 UNION ALL SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.11 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.12 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.13 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.14 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.15 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.16 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.17 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.18 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.19 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.20 { + execsql { + SELECT x,y,z FROM t2 UNION ALL SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.21 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.22 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.23 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.24 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.25 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.26 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.27 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.28 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.29 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.30 { + execsql { + SELECT a,b,c FROM t1 UNION SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.31 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.32 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.33 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.34 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.35 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.36 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.37 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.38 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.39 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.40 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.41 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-3.42 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-3.43 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-3.44 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a,b,c + } +} {hello d D abc e e} +do_test selectA-3.45 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-3.46 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a,b,c + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-3.47 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-3.48 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-3.49 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-3.50 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a DESC + } +} {abc e e hello d D} +do_test selectA-3.51 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-3.52 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY a DESC + } +} {9.9 b B 1 a a {} C c} +do_test selectA-3.53 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY b, a DESC + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-3.54 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY b + } +} {hello d D abc e e} +do_test selectA-3.55 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY b DESC, c + } +} {abc e e hello d D} +do_test selectA-3.56 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY b, c DESC, a + } +} {hello d D abc e e} +do_test selectA-3.57 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY b COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.58 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY b + } +} {{} C c 1 a a 9.9 b B} +do_test selectA-3.59 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY c, a DESC + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.60 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b>='d' + ORDER BY c + } +} {hello d D abc e e} +do_test selectA-3.61 { + execsql { + SELECT a,b,c FROM t1 WHERE b>='d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY c COLLATE BINARY, b DESC, c, a, b, c, a, b, c + } +} {hello d D abc e e} +do_test selectA-3.62 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-3.63 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.64 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.65 { + execsql { + SELECT a,b,c FROM t3 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.66 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.67 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t3 WHERE b<'d' + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-3.68 { + execsql { + SELECT a,b,c FROM t1 EXCEPT SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c DESC, a + } +} {abc e e hello d D} +do_test selectA-3.69 { + execsql { + SELECT a,b,c FROM t1 INTERSECT SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c COLLATE NOCASE + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.70 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' INTERSECT SELECT a,b,c FROM t1 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.71 { + execsql { + SELECT a,b,c FROM t1 WHERE b<'d' + INTERSECT SELECT a,b,c FROM t1 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT b,c,a FROM t3 + INTERSECT SELECT a,b,c FROM t1 + EXCEPT SELECT x,y,z FROM t2 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT y,x,z FROM t2 + INTERSECT SELECT a,b,c FROM t1 + EXCEPT SELECT c,b,a FROM t3 + ORDER BY c + } +} {1 a a 9.9 b B {} C c} +do_test selectA-3.72 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.73 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.74 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.75 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.76 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.77 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY b COLLATE NOCASE DESC,a,c + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.78 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.79 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.80 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.81 { + execsql { + SELECT a,b,c FROM t3 UNION SELECT x,y,z FROM t2 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.82 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a,b,c + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.83 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a DESC,b,c + } +} {hare m M abc e e mad Z z hello d D 5200000.0 X x 9.9 b B 1 a a -23 Y y {} C c {} U u} +do_test selectA-3.84 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY a,c,b + } +} {{} C c {} U u -23 Y y 1 a a 9.9 b B 5200000.0 X x hello d D mad Z z abc e e hare m M} +do_test selectA-3.85 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY b,a,c + } +} {{} C c {} U u 5200000.0 X x -23 Y y mad Z z 1 a a 9.9 b B hello d D abc e e hare m M} +do_test selectA-3.86 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY b COLLATE NOCASE,a,c + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.87 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY y COLLATE NOCASE DESC,x,z + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.88 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c,b,a + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.89 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c,a,b + } +} {1 a a 9.9 b B {} C c hello d D abc e e hare m M {} U u 5200000.0 X x -23 Y y mad Z z} +do_test selectA-3.90 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.91 { + execsql { + SELECT x,y,z FROM t2 UNION SELECT a,b,c FROM t3 + ORDER BY c COLLATE BINARY DESC,a,b + } +} {mad Z z -23 Y y 5200000.0 X x {} U u abc e e {} C c 1 a a hare m M hello d D 9.9 b B} +do_test selectA-3.92 { + execsql { + SELECT x,y,z FROM t2 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + ORDER BY y COLLATE NOCASE DESC,x,z + } +} {mad Z z -23 Y y 5200000.0 X x {} U u hare m M abc e e hello d D {} C c 9.9 b B 1 a a} +do_test selectA-3.93 { + execsql { + SELECT upper((SELECT c FROM t1 UNION SELECT z FROM t2 ORDER BY 1)); + } +} {A} +do_test selectA-3.94 { + execsql { + SELECT lower((SELECT c FROM t1 UNION ALL SELECT z FROM t2 ORDER BY 1)); + } +} {a} +do_test selectA-3.95 { + execsql { + SELECT lower((SELECT c FROM t1 INTERSECT SELECT z FROM t2 ORDER BY 1)); + } +} {{}} +do_test selectA-3.96 { + execsql { + SELECT lower((SELECT z FROM t2 EXCEPT SELECT c FROM t1 ORDER BY 1)); + } +} {m} +do_test selectA-3.97 { + execsql { + SELECT upper((SELECT x FROM ( + SELECT x,y,z FROM t2 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + INTERSECT SELECT a,b,c FROM t3 + EXCEPT SELECT c,b,a FROM t1 + UNION SELECT a,b,c FROM t3 + ORDER BY y COLLATE NOCASE DESC,x,z))) + } +} {MAD} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/selectB.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/selectB.test --- sqlite3-3.4.2/test/selectB.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/selectB.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,381 @@ +# 2008 June 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: selectB.test,v 1.10 2009/04/02 16:59:47 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +proc test_transform {testname sql1 sql2 results} { + set ::vdbe1 [list] + set ::vdbe2 [list] + db eval "explain $sql1" { lappend ::vdbe1 $opcode } + db eval "explain $sql2" { lappend ::vdbe2 $opcode } + + do_test $testname.transform { + set ::vdbe1 + } $::vdbe2 + + set ::sql1 $sql1 + do_test $testname.sql1 { + execsql $::sql1 + } $results + + set ::sql2 $sql2 + do_test $testname.sql2 { + execsql $::sql2 + } $results +} + +do_test selectB-1.1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(d, e, f); + + INSERT INTO t1 VALUES( 2, 4, 6); + INSERT INTO t1 VALUES( 8, 10, 12); + INSERT INTO t1 VALUES(14, 16, 18); + + INSERT INTO t2 VALUES(3, 6, 9); + INSERT INTO t2 VALUES(12, 15, 18); + INSERT INTO t2 VALUES(21, 24, 27); + } +} {} + +for {set ii 1} {$ii <= 2} {incr ii} { + + if {$ii == 2} { + do_test selectB-2.1 { + execsql { + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t2(d); + } + } {} + } + + test_transform selectB-$ii.2 { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 + } {2 8 14 3 12 21} + + test_transform selectB-$ii.3 { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) ORDER BY 1 + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 + } {2 3 8 12 14 21} + + test_transform selectB-$ii.4 { + SELECT * FROM + (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + WHERE a>10 ORDER BY 1 + } { + SELECT a FROM t1 WHERE a>10 UNION ALL SELECT d FROM t2 WHERE d>10 ORDER BY 1 + } {12 14 21} + + test_transform selectB-$ii.5 { + SELECT * FROM + (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + WHERE a>10 ORDER BY a + } { + SELECT a FROM t1 WHERE a>10 + UNION ALL + SELECT d FROM t2 WHERE d>10 + ORDER BY a + } {12 14 21} + + test_transform selectB-$ii.6 { + SELECT * FROM + (SELECT a FROM t1 UNION ALL SELECT d FROM t2 WHERE d > 12) + WHERE a>10 ORDER BY a + } { + SELECT a FROM t1 WHERE a>10 + UNION ALL + SELECT d FROM t2 WHERE d>12 AND d>10 + ORDER BY a + } {14 21} + + test_transform selectB-$ii.7 { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) ORDER BY 1 + LIMIT 2 + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 LIMIT 2 + } {2 3} + + test_transform selectB-$ii.8 { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) ORDER BY 1 + LIMIT 2 OFFSET 3 + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 LIMIT 2 OFFSET 3 + } {12 14} + + test_transform selectB-$ii.9 { + SELECT * FROM ( + SELECT a FROM t1 UNION ALL SELECT d FROM t2 UNION ALL SELECT c FROM t1 + ) + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 UNION ALL SELECT c FROM t1 + } {2 8 14 3 12 21 6 12 18} + + test_transform selectB-$ii.10 { + SELECT * FROM ( + SELECT a FROM t1 UNION ALL SELECT d FROM t2 UNION ALL SELECT c FROM t1 + ) ORDER BY 1 + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 UNION ALL SELECT c FROM t1 + ORDER BY 1 + } {2 3 6 8 12 12 14 18 21} + + test_transform selectB-$ii.11 { + SELECT * FROM ( + SELECT a FROM t1 UNION ALL SELECT d FROM t2 UNION ALL SELECT c FROM t1 + ) WHERE a>=10 ORDER BY 1 LIMIT 3 + } { + SELECT a FROM t1 WHERE a>=10 UNION ALL SELECT d FROM t2 WHERE d>=10 + UNION ALL SELECT c FROM t1 WHERE c>=10 + ORDER BY 1 LIMIT 3 + } {12 12 14} + + test_transform selectB-$ii.12 { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2 LIMIT 2) + } { + SELECT a FROM t1 UNION ALL SELECT d FROM t2 LIMIT 2 + } {2 8} + + # An ORDER BY in a compound subqueries defeats flattening. Ticket #3773 + # test_transform selectB-$ii.13 { + # SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY a ASC) + # } { + # SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 ASC + # } {2 3 8 12 14 21} + # + # test_transform selectB-$ii.14 { + # SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY a DESC) + # } { + # SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 DESC + # } {21 14 12 8 3 2} + # + # test_transform selectB-$ii.14 { + # SELECT * FROM ( + # SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY a DESC + # ) LIMIT 2 OFFSET 2 + # } { + # SELECT a FROM t1 UNION ALL SELECT d FROM t2 ORDER BY 1 DESC + # LIMIT 2 OFFSET 2 + # } {12 8} + # + # test_transform selectB-$ii.15 { + # SELECT * FROM ( + # SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY a ASC, e DESC + # ) + # } { + # SELECT a, b FROM t1 UNION ALL SELECT d, e FROM t2 ORDER BY a ASC, e DESC + # } {2 4 3 6 8 10 12 15 14 16 21 24} +} + +do_test selectB-3.0 { + execsql { + DROP INDEX i1; + DROP INDEX i2; + } +} {} + +for {set ii 3} {$ii <= 4} {incr ii} { + + if {$ii == 4} { + do_test selectB-4.0 { + execsql { + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + CREATE INDEX i3 ON t1(c); + CREATE INDEX i4 ON t2(d); + CREATE INDEX i5 ON t2(e); + CREATE INDEX i6 ON t2(f); + } + } {} + } + + do_test selectB-$ii.1 { + execsql { + SELECT DISTINCT * FROM + (SELECT c FROM t1 UNION ALL SELECT e FROM t2) + ORDER BY 1; + } + } {6 12 15 18 24} + + do_test selectB-$ii.2 { + execsql { + SELECT c, count(*) FROM + (SELECT c FROM t1 UNION ALL SELECT e FROM t2) + GROUP BY c ORDER BY 1; + } + } {6 2 12 1 15 1 18 1 24 1} + do_test selectB-$ii.3 { + execsql { + SELECT c, count(*) FROM + (SELECT c FROM t1 UNION ALL SELECT e FROM t2) + GROUP BY c HAVING count(*)>1; + } + } {6 2} + do_test selectB-$ii.4 { + execsql { + SELECT t4.c, t3.a FROM + (SELECT c FROM t1 UNION ALL SELECT e FROM t2) AS t4, t1 AS t3 + WHERE t3.a=14 + ORDER BY 1 + } + } {6 14 6 14 12 14 15 14 18 14 24 14} + + do_test selectB-$ii.5 { + execsql { + SELECT d FROM t2 + EXCEPT + SELECT a FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + } + } {} + do_test selectB-$ii.6 { + execsql { + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + EXCEPT + SELECT * FROM (SELECT a FROM t1 UNION ALL SELECT d FROM t2) + } + } {} + do_test selectB-$ii.7 { + execsql { + SELECT c FROM t1 + EXCEPT + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + } + } {12} + do_test selectB-$ii.8 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + EXCEPT + SELECT c FROM t1 + } + } {9 15 24 27} + do_test selectB-$ii.9 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + EXCEPT + SELECT c FROM t1 + ORDER BY c DESC + } + } {27 24 15 9} + + do_test selectB-$ii.10 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + UNION + SELECT c FROM t1 + ORDER BY c DESC + } + } {27 24 18 15 12 9 6} + do_test selectB-$ii.11 { + execsql { + SELECT c FROM t1 + UNION + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + ORDER BY c + } + } {6 9 12 15 18 24 27} + do_test selectB-$ii.12 { + execsql { + SELECT c FROM t1 UNION SELECT e FROM t2 UNION ALL SELECT f FROM t2 + ORDER BY c + } + } {6 9 12 15 18 18 24 27} + do_test selectB-$ii.13 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + UNION + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + ORDER BY 1 + } + } {6 9 15 18 24 27} + + do_test selectB-$ii.14 { + execsql { + SELECT c FROM t1 + INTERSECT + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + ORDER BY 1 + } + } {6 18} + do_test selectB-$ii.15 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + INTERSECT + SELECT c FROM t1 + ORDER BY 1 + } + } {6 18} + do_test selectB-$ii.16 { + execsql { + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + INTERSECT + SELECT * FROM (SELECT e FROM t2 UNION ALL SELECT f FROM t2) + ORDER BY 1 + } + } {6 9 15 18 24 27} + + do_test selectB-$ii.17 { + execsql { + SELECT * FROM ( + SELECT a FROM t1 UNION ALL SELECT d FROM t2 LIMIT 4 + ) LIMIT 2 + } + } {2 8} + + do_test selectB-$ii.18 { + execsql { + SELECT * FROM ( + SELECT a FROM t1 UNION ALL SELECT d FROM t2 LIMIT 4 OFFSET 2 + ) LIMIT 2 + } + } {14 3} + + do_test selectB-$ii.19 { + execsql { + SELECT * FROM ( + SELECT DISTINCT (a/10) FROM t1 UNION ALL SELECT DISTINCT(d%2) FROM t2 + ) + } + } {0 1 0 1} + + do_test selectB-$ii.20 { + execsql { + SELECT DISTINCT * FROM ( + SELECT DISTINCT (a/10) FROM t1 UNION ALL SELECT DISTINCT(d%2) FROM t2 + ) + } + } {0 1} + + do_test selectB-$ii.21 { + execsql { + SELECT * FROM (SELECT * FROM t1 UNION ALL SELECT * FROM t2) ORDER BY a+b + } + } {2 4 6 3 6 9 8 10 12 12 15 18 14 16 18 21 24 27} + + do_test selectB-$ii.21 { + execsql { + SELECT * FROM (SELECT 345 UNION ALL SELECT d FROM t2) ORDER BY 1; + } + } {3 12 21 345} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/selectC.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/selectC.test --- sqlite3-3.4.2/test/selectC.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/selectC.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,167 @@ +# 2008 September 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: selectC.test,v 1.5 2009/05/17 15:26:21 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Ticket # +do_test selectC-1.1 { + execsql { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1,'aaa','bbb'); + INSERT INTO t1 SELECT * FROM t1; + INSERT INTO t1 VALUES(2,'ccc','ddd'); + + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE y IN ('aaabbb','xxx'); + } +} {1 aaabbb} +do_test selectC-1.2 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE b||c IN ('aaabbb','xxx'); + } +} {1 aaabbb} +do_test selectC-1.3 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE y='aaabbb' + } +} {1 aaabbb} +do_test selectC-1.4 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE b||c='aaabbb' + } +} {1 aaabbb} +do_test selectC-1.5 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE x=2 + } +} {2 cccddd} +do_test selectC-1.6 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE a=2 + } +} {2 cccddd} +do_test selectC-1.7 { + execsql { + SELECT DISTINCT a AS x, b||c AS y + FROM t1 + WHERE +y='aaabbb' + } +} {1 aaabbb} +do_test selectC-1.8 { + execsql { + SELECT a AS x, b||c AS y + FROM t1 + GROUP BY x, y + HAVING y='aaabbb' + } +} {1 aaabbb} +do_test selectC-1.9 { + execsql { + SELECT a AS x, b||c AS y + FROM t1 + GROUP BY x, y + HAVING b||c='aaabbb' + } +} {1 aaabbb} +do_test selectC-1.10 { + execsql { + SELECT a AS x, b||c AS y + FROM t1 + WHERE y='aaabbb' + GROUP BY x, y + } +} {1 aaabbb} +do_test selectC-1.11 { + execsql { + SELECT a AS x, b||c AS y + FROM t1 + WHERE b||c='aaabbb' + GROUP BY x, y + } +} {1 aaabbb} +proc longname_toupper x {return [string toupper $x]} +db function uppercaseconversionfunctionwithaverylongname longname_toupper +do_test selectC-1.12.1 { + execsql { + SELECT DISTINCT upper(b) AS x + FROM t1 + ORDER BY x + } +} {AAA CCC} +do_test selectC-1.12.2 { + execsql { + SELECT DISTINCT uppercaseconversionfunctionwithaverylongname(b) AS x + FROM t1 + ORDER BY x + } +} {AAA CCC} +do_test selectC-1.13.1 { + execsql { + SELECT upper(b) AS x + FROM t1 + GROUP BY x + ORDER BY x + } +} {AAA CCC} +do_test selectC-1.13.2 { + execsql { + SELECT uppercaseconversionfunctionwithaverylongname(b) AS x + FROM t1 + GROUP BY x + ORDER BY x + } +} {AAA CCC} +do_test selectC-1.14.1 { + execsql { + SELECT upper(b) AS x + FROM t1 + ORDER BY x DESC + } +} {CCC AAA AAA} +do_test selectC-1.14.2 { + execsql { + SELECT uppercaseconversionfunctionwithaverylongname(b) AS x + FROM t1 + ORDER BY x DESC + } +} {CCC AAA AAA} + +# The following query used to leak memory. Verify that has been fixed. +# +do_test selectC-2.1 { + catchsql { + CREATE TABLE t21a(a,b); + INSERT INTO t21a VALUES(1,2); + CREATE TABLE t21b(n); + CREATE TRIGGER r21 AFTER INSERT ON t21b BEGIN + SELECT a FROM t21a WHERE a>new.x UNION ALL + SELECT b FROM t21a WHERE b>new.x ORDER BY 1 LIMIT 2; + END; + INSERT INTO t21b VALUES(6); + } +} {1 {no such column: new.x}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/server1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/server1.test --- sqlite3-3.4.2/test/server1.test 2006-01-15 00:13:16.000000000 +0000 +++ sqlite3-3.6.16/test/server1.test 2009-06-05 18:03:40.000000000 +0100 @@ -13,7 +13,7 @@ # # This file is derived from thread1.test # -# $Id: server1.test,v 1.4 2006/01/15 00:13:16 drh Exp $ +# $Id: server1.test,v 1.5 2007/08/29 18:20:17 drh Exp $ set testdir [file dirname $argv0] @@ -125,7 +125,7 @@ } SQLITE_ROW # Write to a different table from another thread. This is allowed -# becaus in server mode with a shared cache we have table-level locking. +# because in server mode with a shared cache we have table-level locking. # do_test server1-2.3 { client_create C test.db @@ -167,4 +167,5 @@ } SQLITE_OK client_halt * +sqlite3_enable_shared_cache 0 finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared2.test --- sqlite3-3.4.2/test/shared2.test 2006-01-26 13:11:37.000000000 +0000 +++ sqlite3-3.6.16/test/shared2.test 2009-06-25 12:35:52.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: shared2.test,v 1.4 2006/01/26 13:11:37 danielk1977 Exp $ +# $Id: shared2.test,v 1.8 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -124,9 +124,47 @@ db2 close do_test shared2-3.2 { - sqlite3_thread_cleanup sqlite3_enable_shared_cache 1 -} {0} +} {1} + +file delete -force test.db + +sqlite3 db test.db +do_test shared2-4.1 { + execsql { + CREATE TABLE t0(a, b); + CREATE TABLE t1(a, b DEFAULT 'hello world'); + } +} {} +db close + +sqlite3 db test.db +sqlite3 db2 test.db + +do_test shared2-4.2 { + execsql { SELECT a, b FROM t0 } db + execsql { INSERT INTO t1(a) VALUES(1) } db2 +} {} + +do_test shared2-4.3 { + db2 close + db close +} {} + +# At one point, this was causing a crash. +# +do_test shared2-5.1 { + sqlite3 db test.db + sqlite3 db2 test.db + execsql { CREATE TABLE t2(a, b, c) } + + # The following statement would crash when attempting to sqlite3_free() + # a pointer allocated from a lookaside buffer. + execsql { CREATE INDEX i1 ON t2(a) } db2 +} {} + +db close +db2 close sqlite3_enable_shared_cache $::enable_shared_cache finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared3.test --- sqlite3-3.4.2/test/shared3.test 2006-05-24 13:43:28.000000000 +0100 +++ sqlite3-3.6.16/test/shared3.test 2009-06-05 18:03:40.000000000 +0100 @@ -9,7 +9,7 @@ # #*********************************************************************** # -# $Id: shared3.test,v 1.1 2006/05/24 12:43:28 drh Exp $ +# $Id: shared3.test,v 1.4 2008/08/20 14:49:25 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -43,5 +43,65 @@ db1 close db2 close +do_test shared3-2.1 { + sqlite3 db1 test.db + execsql { + PRAGMA main.cache_size = 10; + } db1 +} {} +do_test shared3-2.2 { + execsql { PRAGMA main.cache_size } db1 +} {10} +do_test shared3-2.3 { + sqlite3 db2 test.db + execsql { PRAGMA main.cache_size } db1 +} {10} +do_test shared3-2.4 { + execsql { PRAGMA main.cache_size } db2 +} {10} +do_test shared3-2.5 { + execsql { PRAGMA main.cache_size } db1 +} {10} + +# The cache-size should now be 10 pages. However at one point there was +# a bug that caused the cache size to return to the default value when +# a second connection was opened on the shared-cache (as happened in +# test case shared3-2.3 above). The goal of the following tests is to +# ensure that the cache-size really is 10 pages. +# +if {$::tcl_platform(platform)=="unix"} { + set alternative_name ./test.db +} else { + set alternative_name TEST.DB +} +do_test shared3-2.6 { + sqlite3 db3 $alternative_name + catchsql {select count(*) from sqlite_master} db3 +} {0 1} +do_test shared3-2.7 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(10, randomblob(5000)) + } db1 + catchsql {select count(*) from sqlite_master} db3 +} {0 1} +do_test shared3-2.8 { + db3 close + execsql { + INSERT INTO t1 VALUES(10, randomblob(10000)) + } db1 + sqlite3 db3 $alternative_name + + # If the pager-cache is really still limited to 10 pages, then the INSERT + # statement above should have caused the pager to grab an exclusive lock + # on the database file so that the cache could be spilled. + # + catchsql {select count(*) from sqlite_master} db3 +} {1 {database is locked}} + +db1 close +db2 close +db3 close + sqlite3_enable_shared_cache $::enable_shared_cache finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared4.test --- sqlite3-3.4.2/test/shared4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/shared4.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,237 @@ +# 2008 July 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test the btree mutex protocol for shared cache mode. +# +# $Id: shared4.test,v 1.2 2008/08/04 03:51:24 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close +puts hello + +# This script is only valid if we are running shared-cache mode in a +# threadsafe-capable database engine. +# +ifcapable !shared_cache||!compound { + finish_test + return +} +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +# Prepare multiple databases in shared cache mode. +# +do_test shared4-1.1 { + file delete -force test1.db test1.db-journal + file delete -force test2.db test2.db-journal + file delete -force test3.db test3.db-journal + file delete -force test4.db test4.db-journal + sqlite3 db1 test1.db + sqlite3 db2 test2.db + sqlite3 db3 test3.db + sqlite3 db4 test4.db + db1 eval { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(111); + } + db2 eval { + CREATE TABLE t2(b); + INSERT INTO t2 VALUES(222); + } + db3 eval { + CREATE TABLE t3(c); + INSERT INTO t3 VALUES(333); + } + db4 eval { + CREATE TABLE t4(d); + INSERT INTO t4 VALUES(444); + } + db1 eval { + ATTACH DATABASE 'test2.db' AS two; + ATTACH DATABASE 'test3.db' AS three; + ATTACH DATABASE 'test4.db' AS four; + } + db2 eval { + ATTACH DATABASE 'test4.db' AS four; + ATTACH DATABASE 'test3.db' AS three; + ATTACH DATABASE 'test1.db' AS one; + } + db3 eval { + ATTACH DATABASE 'test1.db' AS one; + ATTACH DATABASE 'test2.db' AS two; + ATTACH DATABASE 'test4.db' AS four; + } + db4 eval { + ATTACH DATABASE 'test3.db' AS three; + ATTACH DATABASE 'test2.db' AS two; + ATTACH DATABASE 'test1.db' AS one; + } + db1 eval { + SELECT a FROM t1 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4; + } +} {111 222 333 444} +do_test shared4-1.2 { + db2 eval { + SELECT a FROM t1 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT c FROM t3; + } +} {111 222 444 333} +do_test shared4-1.3 { + db3 eval { + SELECT a FROM t1 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT d FROM t4; + } +} {111 333 222 444} +do_test shared4-1.4 { + db4 eval { + SELECT a FROM t1 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT b FROM t2; + } +} {111 333 444 222} +do_test shared4-1.5 { + db3 eval { + SELECT a FROM t1 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT c FROM t3; + } +} {111 444 222 333} +do_test shared4-1.6 { + db4 eval { + SELECT a FROM t1 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT b FROM t2; + } +} {111 444 333 222} +do_test shared4-1.7 { + db1 eval { + SELECT b FROM t2 UNION ALL + SELECT a FROM t1 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4; + } +} {222 111 333 444} +do_test shared4-1.8 { + db2 eval { + SELECT b FROM t2 UNION ALL + SELECT a FROM t1 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT c FROM t3; + } +} {222 111 444 333} +do_test shared4-1.9 { + db3 eval { + SELECT b FROM t2 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT a FROM t1 UNION ALL + SELECT d FROM t4; + } +} {222 333 111 444} +do_test shared4-1.10 { + db4 eval { + SELECT b FROM t2 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT a FROM t1; + } +} {222 333 444 111} +do_test shared4-1.11 { + db1 eval { + SELECT c FROM t3 UNION ALL + SELECT a FROM t1 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT d FROM t4; + } +} {333 111 222 444} +do_test shared4-1.12 { + db2 eval { + SELECT c FROM t3 UNION ALL + SELECT a FROM t1 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT b FROM t2; + } +} {333 111 444 222} + +do_test shared4-2.1 { + db1 eval { + UPDATE t1 SET a=a+1000; + UPDATE t2 SET b=b+2000; + UPDATE t3 SET c=c+3000; + UPDATE t4 SET d=d+4000; + } + db2 eval { + UPDATE t1 SET a=a+10000; + UPDATE t2 SET b=b+20000; + UPDATE t3 SET c=c+30000; + UPDATE t4 SET d=d+40000; + } + db3 eval { + UPDATE t1 SET a=a+100000; + UPDATE t2 SET b=b+200000; + UPDATE t3 SET c=c+300000; + UPDATE t4 SET d=d+400000; + } + db4 eval { + UPDATE t1 SET a=a+1000000; + UPDATE t2 SET b=b+2000000; + UPDATE t3 SET c=c+3000000; + UPDATE t4 SET d=d+4000000; + } + db1 eval { + SELECT a FROM t1 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4; + } +} {1111111 2222222 3333333 4444444} +do_test shared4-2.2 { + db2 eval { + SELECT a FROM t1 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT c FROM t3; + } +} {1111111 2222222 4444444 3333333} +do_test shared4-2.3 { + db3 eval { + SELECT a FROM t1 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT b FROM t2 UNION ALL + SELECT d FROM t4; + } +} {1111111 3333333 2222222 4444444} +do_test shared4-2.4 { + db4 eval { + SELECT a FROM t1 UNION ALL + SELECT c FROM t3 UNION ALL + SELECT d FROM t4 UNION ALL + SELECT b FROM t2; + } +} {1111111 3333333 4444444 2222222} + + +db1 close +db2 close +db3 close +db4 close + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared6.test --- sqlite3-3.4.2/test/shared6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/shared6.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,255 @@ +# 2009 April 01 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: shared6.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable !shared_cache { finish_test ; return } + +do_test shared6-1.1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TABLE t2(c, d); + CREATE TABLE t3(e, f); + } + db close +} {} +do_test shared6-1.1.2 { + set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + sqlite3_enable_shared_cache +} {1} + +do_test shared6-1.1.3 { + sqlite3 db1 test.db + sqlite3 db2 test.db +} {} + +# Exclusive shared-cache locks. Test the following: +# +# 1.2.1: If [db1] has an exclusive lock, [db2] cannot read. +# 1.2.2: If [db1] has an exclusive lock, [db1] can read. +# 1.2.3: If [db1] has a non-exclusive write-lock, [db2] can read. +# +do_test shared6-1.2.1 { + execsql { SELECT * FROM t1 } db2 ;# Cache a compiled statement + execsql { BEGIN EXCLUSIVE } db1 + catchsql { SELECT * FROM t1 } db2 ;# Execute the cached compiled statement +} {1 {database table is locked}} +do_test shared6-1.2.2 { + execsql { SELECT * FROM t1 } db1 +} {} +do_test shared6-1.2.3 { + execsql { + COMMIT; + BEGIN; + INSERT INTO t2 VALUES(3, 4); + } db1 + execsql { SELECT * FROM t1 } db2 +} {} +do_test shared6-1.2.X { + execsql { COMMIT } db1 +} {} + +# Regular shared-cache locks. Verify the following: +# +# 1.3.1: If [db1] has a write-lock on t1, [db1] can read from t1. +# 1.3.2: If [db1] has a write-lock on t1, [db2] can read from t2. +# 1.3.3: If [db1] has a write-lock on t1, [db2] cannot read from t1. +# 1.3.4: If [db1] has a write-lock on t1, [db2] cannot write to t1. +# 1.3.5: If [db1] has a read-lock on t1, [db2] can read from t1. +# 1.3.6: If [db1] has a read-lock on t1, [db2] cannot write to t1. +# +do_test shared6-1.3.1 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 2); + } db1 + execsql { SELECT * FROM t1 } db1 +} {1 2} +do_test shared6-1.3.2 { + execsql { SELECT * FROM t2 } db2 +} {3 4} +do_test shared6-1.3.3 { + catchsql { SELECT * FROM t1 } db2 +} {1 {database table is locked: t1}} +do_test shared6-1.3.4 { + catchsql { INSERT INTO t2 VALUES(1, 2) } db2 +} {1 {database table is locked}} +do_test shared6-1.3.5 { + execsql { + COMMIT; + BEGIN; + SELECT * FROM t1; + } db1 + execsql { SELECT * FROM t1 } db2 +} {1 2} +do_test shared6-1.3.5 { + catchsql { INSERT INTO t1 VALUES(5, 6) } db2 +} {1 {database table is locked: t1}} +do_test shared6-1.3.X { + execsql { COMMIT } db1 +} {} + +# Read-uncommitted mode. +# +# For these tests, connection [db2] is in read-uncommitted mode. +# +# 1.4.1: If [db1] has a write-lock on t1, [db2] can still read from t1. +# 1.4.2: If [db1] has a write-lock on the db schema (sqlite_master table), +# [db2] cannot read from the schema. +# 1.4.3: If [db1] has a read-lock on t1, [db2] cannot write to t1. +# +do_test shared6-1.4.1 { + execsql { PRAGMA read_uncommitted = 1 } db2 + execsql { + BEGIN; + INSERT INTO t1 VALUES(5, 6); + } db1 + execsql { SELECT * FROM t1 } db2 +} {1 2 5 6} +do_test shared6-1.4.2 { + execsql { CREATE TABLE t4(a, b) } db1 + catchsql { SELECT * FROM t1 } db2 +} {1 {database table is locked}} +do_test shared6-1.4.3 { + execsql { + COMMIT; + BEGIN; + SELECT * FROM t1; + } db1 + catchsql { INSERT INTO t1 VALUES(7, 8) } db2 +} {1 {database table is locked: t1}} + +do_test shared6-1.X { + db1 close + db2 close +} {} + +#------------------------------------------------------------------------- +# The following tests - shared6-2.* - test that two database connections +# that connect to the same file using different VFS implementations do +# not share a cache. +# +if {$::tcl_platform(platform) eq "unix"} { + do_test shared6-2.1 { + sqlite3 db1 test.db -vfs unix + sqlite3 db2 test.db -vfs unix + sqlite3 db3 test.db -vfs unix-none + sqlite3 db4 test.db -vfs unix-none + } {} + + do_test shared6-2.2 { + execsql { BEGIN; INSERT INTO t1 VALUES(9, 10); } db1 + catchsql { SELECT * FROM t1 } db2 + } {1 {database table is locked: t1}} + do_test shared6-2.3 { + execsql { SELECT * FROM t1 } db3 + } {1 2 5 6} + + do_test shared6-2.3 { + execsql { COMMIT } db1 + execsql { BEGIN; INSERT INTO t1 VALUES(11, 12); } db3 + catchsql { SELECT * FROM t1 } db4 + } {1 {database table is locked: t1}} + + do_test shared6-2.4 { + execsql { SELECT * FROM t1 } db1 + } {1 2 5 6 9 10} + + do_test shared6-2.5 { + execsql { COMMIT } db3 + } {} + + do_test shared6-2.X { + db1 close + db2 close + db3 close + db4 close + } {} +} + +#------------------------------------------------------------------------- +# Test that it is possible to open an exclusive transaction while +# already holding a read-lock on the database file. And that it is +# not possible if some other connection holds such a lock. +# +do_test shared6-3.1 { + sqlite3 db1 test.db + sqlite3 db2 test.db + sqlite3 db3 test.db +} {} +db1 eval {SELECT * FROM t1} { + # Within this block [db1] is holding a read-lock on t1. Test that + # this means t1 cannot be written by [db2]. + # + do_test shared6-3.2 { + catchsql { INSERT INTO t1 VALUES(1, 2) } db2 + } {1 {database table is locked: t1}} + + do_test shared6-3.3 { + execsql { BEGIN EXCLUSIVE } db1 + } {} + break +} +do_test shared6-3.4 { + catchsql { SELECT * FROM t1 } db2 +} {1 {database schema is locked: main}} +do_test shared6-3.5 { + execsql COMMIT db1 +} {} +db2 eval {SELECT * FROM t1} { + do_test shared6-3.6 { + catchsql { BEGIN EXCLUSIVE } db1 + } {1 {database table is locked}} + break +} +do_test shared6-3.7 { + execsql { BEGIN } db1 + execsql { BEGIN } db2 +} {} +db2 eval {SELECT * FROM t1} { + do_test shared6-3.8 { + catchsql { INSERT INTO t1 VALUES(1, 2) } db1 + } {1 {database table is locked: t1}} + break +} +do_test shared6-3.9 { + execsql { BEGIN ; ROLLBACK } db3 +} {} +do_test shared6-3.10 { + catchsql { SELECT * FROM t1 } db3 +} {1 {database table is locked}} +do_test shared6-3.X { + db1 close + db2 close + db3 close +} {} + +do_test shared6-4.1 { + #file delete -force test.db test.db-journal + sqlite3 db1 test.db + sqlite3 db2 test.db + + set ::STMT [sqlite3_prepare_v2 db1 "SELECT * FROM t1" -1 DUMMY] + execsql { CREATE TABLE t5(a, b) } db2 +} {} +do_test shared6-4.2 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test shared6-4.X { + + db1 close + db2 close +} {} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared7.test --- sqlite3-3.4.2/test/shared7.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/shared7.test 2009-04-30 14:30:33.000000000 +0100 @@ -0,0 +1,55 @@ +# 2009 April 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Make sure that attaching the same database multiple times in +# shared cache mode fails. +# +# $Id: shared7.test,v 1.1 2009/04/30 13:30:33 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable !shared_cache { finish_test ; return } + +do_test shared7-1.1 { + set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + sqlite3_enable_shared_cache +} {1} + +do_test shared7-1.2 { + db close + sqlite3 db test.db + db eval { + CREATE TABLE t1(x); + } + catchsql { + ATTACH 'test.db' AS err1; + } +} {1 {database is already attached}} + +do_test shared7-1.3 { + file delete -force test2.db test2.db-journal + db eval { + ATTACH 'test2.db' AS test2; + CREATE TABLE test2.t2(y); + } + catchsql { + ATTACH 'test2.db' AS err2; + } +} {1 {database is already attached}} +do_test shared7-1.4 { + catchsql { + ATTACH 'test.db' AS err1; + } +} {1 {database is already attached}} + + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared_err.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared_err.test --- sqlite3-3.4.2/test/shared_err.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/shared_err.test 2009-06-12 03:37:59.000000000 +0100 @@ -13,114 +13,22 @@ # cache context. What happens to connection B if one connection A encounters # an IO-error whilst reading or writing the file-system? # -# $Id: shared_err.test,v 1.11 2007/04/06 01:04:40 drh Exp $ +# $Id: shared_err.test,v 1.24 2008/10/12 00:27:54 shane Exp $ proc skip {args} {} set testdir [file dirname $argv0] source $testdir/tester.tcl +source $testdir/malloc_common.tcl db close ifcapable !shared_cache||!subquery { finish_test return } -set ::enable_shared_cache [sqlite3_enable_shared_cache 1] - -# Todo: This is a copy of the [do_malloc_test] proc in malloc.test -# It would be better if these were consolidated. - -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test shared_malloc-$tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr {$msg=="" || $msg=="out of memory"}] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - sqlite_malloc_fail 0 - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] do_ioerr_test shared_ioerr-1 -tclprep { sqlite3 db2 test.db @@ -149,12 +57,13 @@ set res [catchsql { SELECT * FROM t1; } db2] - set possible_results [list \ - "1 {disk I/O error}" \ - "0 {1 2 3}" \ - "0 {1 2 3 1 2 3 4 5 6}" \ - "0 {1 2 3 1 2 3 4 5 6 1 2 3 4 5 6}" \ - "0 {}" \ + set possible_results [list \ + "1 {disk I/O error}" \ + "0 {1 2 3}" \ + "0 {1 2 3 1 2 3 4 5 6}" \ + "0 {1 2 3 1 2 3 4 5 6 1 2 3 4 5 6}" \ + "0 {}" \ + "1 {database disk image is malformed}" \ ] set rc [expr [lsearch -exact $possible_results $res] >= 0] if {$rc != 1} { @@ -163,7 +72,15 @@ } set rc } {1} + + # The "database disk image is malformed" is a special case that can + # occur if an IO error occurs during a rollback in the {SELECT * FROM t1} + # statement above. This test is to make sure there is no real database + # corruption. db2 close + do_test shared_ioerr-1.$n.cleanup.2 { + execsql {pragma integrity_check} db + } {ok} } do_ioerr_test shared_ioerr-2 -tclprep { @@ -204,6 +121,7 @@ incr ::residx } -cleanup { + catchsql ROLLBACK do_test shared_ioerr-2.$n.cleanup.1 { set res [catchsql { SELECT max(a), min(a), count(*) FROM (SELECT a FROM t1 order by a); @@ -294,7 +212,7 @@ expr { ($::steprc eq "SQLITE_ROW" && $::finalrc eq "SQLITE_OK") || ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_IOERR") || - ($::steprc eq "SQLITE_ABORT" && $::finalrc eq "SQLITE_OK") + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_ABORT") } } {1} @@ -374,7 +292,7 @@ expr { ($::steprc eq "SQLITE_ROW" && $::finalrc eq "SQLITE_OK") || ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_IOERR") || - ($::steprc eq "SQLITE_ABORT" && $::finalrc eq "SQLITE_OK") + ($::steprc eq "SQLITE_ERROR" && $::finalrc eq "SQLITE_ABORT") } } {1} @@ -382,16 +300,6 @@ db2 close } -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." - db close - sqlite3_enable_shared_cache $::enable_shared_cache - finish_test - return -} - # Provoke a malloc() failure when a cursor position is being saved. This # only happens with index cursors (because they malloc() space to save the # current key value). It does not happen with tables, because an integer @@ -401,7 +309,7 @@ # owns the cursor (the one for which the position is not saved) should # continue unaffected. # -do_malloc_test 4 -tclprep { +do_malloc_test shared_err-4 -tclprep { sqlite3 db2 test.db execsql { PRAGMA read_uncommitted = 1; @@ -425,7 +333,7 @@ } -cleanup { do_test shared_malloc-4.$::n.cleanup.1 { set ::rc [sqlite3_step $::STMT] - expr {$::rc=="SQLITE_ROW" || $::rc=="SQLITE_ABORT"} + expr {$::rc=="SQLITE_ROW" || $::rc=="SQLITE_ERROR"} } {1} if {$::rc=="SQLITE_ROW"} { do_test shared_malloc-4.$::n.cleanup.2 { @@ -433,13 +341,16 @@ } {2222222222} } do_test shared_malloc-4.$::n.cleanup.3 { - sqlite3_finalize $::STMT - } {SQLITE_OK} + set rc [sqlite3_finalize $::STMT] + expr {$rc=="SQLITE_OK" || $rc=="SQLITE_ABORT" || + $rc=="SQLITE_NOMEM" || $rc=="SQLITE_IOERR"} + } {1} # db2 eval {select * from sqlite_master} db2 close } -do_malloc_test 5 -tclbody { +do_malloc_test shared_err-5 -tclbody { + db close sqlite3 dbX test.db sqlite3 dbY test.db dbX close @@ -449,21 +360,25 @@ catch {dbY close} } -do_malloc_test 6 -tclbody { +do_malloc_test shared_err-6 -tclbody { catch {db close} - sqlite3_thread_cleanup + ifcapable deprecated { + sqlite3_thread_cleanup + } sqlite3_enable_shared_cache 0 } -cleanup { sqlite3_enable_shared_cache 1 } -do_test shared_misuse-7.1 { - sqlite3 db test.db - catch { - sqlite3_enable_shared_cache 0 - } msg - set msg -} {library routine called out of sequence} +# As of 3.5.0, sqlite3_enable_shared_cache can be called at +# any time and from any thread +#do_test shared_err-misuse-7.1 { +# sqlite3 db test.db +# catch { +# sqlite3_enable_shared_cache 0 +# } msg +# set msg +#} {library routine called out of sequence} # Again provoke a malloc() failure when a cursor position is being saved, # this time during a ROLLBACK operation by some other handle. @@ -473,7 +388,7 @@ # be aborted. # set ::aborted 0 -do_malloc_test 8 -tclprep { +do_malloc_test shared_err-8 -tclprep { sqlite3 db2 test.db execsql { PRAGMA read_uncommitted = 1; @@ -497,20 +412,35 @@ ROLLBACK; } } -cleanup { + # UPDATE: As of [5668], if the rollback fails SQLITE_CORRUPT is returned. + # So these tests have been updated to expect SQLITE_CORRUPT and its + # associated English language error message. + # do_test shared_malloc-8.$::n.cleanup.1 { - lrange [execsql { - SELECT a FROM t1; - } db2] 0 1 - } {0000000000 1111111111} + set res [catchsql {SELECT a FROM t1} db2] + set ans [lindex $res 1] + if {[lindex $res 0]} { + set r [expr { + $ans=="disk I/O error" || + $ans=="out of memory" || + $ans=="database disk image is malformed" + }] + } else { + set r [expr {[lrange $ans 0 1]=="0000000000 1111111111"}] + } + } {1} do_test shared_malloc-8.$::n.cleanup.2 { set rc1 [sqlite3_step $::STMT] set rc2 [sqlite3_finalize $::STMT] - if {$rc1=="SQLITE_ABORT"} { + if {$rc2=="SQLITE_ABORT"} { incr ::aborted } expr { ($rc1=="SQLITE_DONE" && $rc2=="SQLITE_OK") || - ($rc1=="SQLITE_ABORT" && $rc2=="SQLITE_OK") + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_ABORT") || + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_NOMEM") || + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_IOERR") || + ($rc1=="SQLITE_ERROR" && $rc2=="SQLITE_CORRUPT") } } {1} db2 close @@ -520,6 +450,72 @@ expr $::aborted>=1 } {1} +# This test is designed to catch a specific bug that was present during +# development of 3.5.0. If a malloc() failed while setting the page-size, +# a buffer (Pager.pTmpSpace) was being freed. This could cause a seg-fault +# later if another connection tried to use the pager. +# +# This test will crash 3.4.2. +# +do_malloc_test shared_err-9 -tclprep { + sqlite3 db2 test.db +} -sqlbody { + PRAGMA page_size = 4096; + PRAGMA page_size = 1024; +} -cleanup { + db2 eval { + CREATE TABLE abc(a, b, c); + BEGIN; + INSERT INTO abc VALUES(1, 2, 3); + ROLLBACK; + } + db2 close +} + +catch {db close} +catch {db2 close} +do_malloc_test shared_err-10 -tclprep { + sqlite3 db test.db + sqlite3 db2 test.db + + db eval { SELECT * FROM sqlite_master } + db2 eval { + BEGIN; + CREATE TABLE abc(a, b, c); + } +} -tclbody { + catch {db eval {SELECT * FROM sqlite_master}} + error 1 +} -cleanup { + execsql { SELECT * FROM sqlite_master } +} + +do_malloc_test shared_err-11 -tclprep { + sqlite3 db test.db + sqlite3 db2 test.db + + db eval { SELECT * FROM sqlite_master } + db2 eval { + BEGIN; + CREATE TABLE abc(a, b, c); + } +} -tclbody { + catch {db eval {SELECT * FROM sqlite_master}} + catch {sqlite3_errmsg16 db} + error 1 +} -cleanup { + execsql { SELECT * FROM sqlite_master } +} + +catch {db close} +catch {db2 close} + +do_malloc_test shared_err-12 -sqlbody { + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); +} + catch {db close} +catch {db2 close} sqlite3_enable_shared_cache $::enable_shared_cache finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shared.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shared.test --- sqlite3-3.4.2/test/shared.test 2007-08-05 22:15:13.000000000 +0100 +++ sqlite3-3.6.16/test/shared.test 2009-06-12 03:37:58.000000000 +0100 @@ -9,13 +9,15 @@ # #*********************************************************************** # -# $Id: shared.test,v 1.25 2007/08/03 07:33:10 danielk1977 Exp $ +# $Id: shared.test,v 1.36 2009/03/16 13:19:36 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl db close -ifcapable !shared_cache { +# These tests cannot be run without the ATTACH command. +# +ifcapable !shared_cache||!attach { finish_test return } @@ -40,6 +42,20 @@ } } +# if we're using proxy locks, we use 2 filedescriptors for a db +# that is open but NOT yet locked, after a lock is taken we'll have 3, +# normally sqlite uses 1 (proxy locking adds the conch and the local lock) +set using_proxy 0 +foreach {name value} [array get env SQLITE_FORCE_PROXY_LOCKING] { + set using_proxy $value +} +set extrafds_prelock 0 +set extrafds_postlock 0 +if {$using_proxy>0} { + set extrafds_prelock 1 + set extrafds_postlock 2 +} + # $av is currently 0 if this loop iteration is to test with auto-vacuum turned # off, and 1 if it is turned on. Increment it so that (1 -> no auto-vacuum) # and (2 -> auto-vacuum). The sole reason for this is so that it looks nicer @@ -72,6 +88,7 @@ # opened. sqlite3 db2 test.db set ::sqlite_open_file_count + expr $sqlite_open_file_count-$extrafds_postlock } {1} do_test shared-$av.1.2 { # Add a table and a single row of data via the first connection. @@ -152,6 +169,7 @@ sqlite3 db3 TEST.DB } set ::sqlite_open_file_count + expr $sqlite_open_file_count-($extrafds_prelock+$extrafds_postlock) } {2} do_test shared-$av.2.2 { # Start read transactions on db and db2 (the shared pager cache). Ensure @@ -282,14 +300,17 @@ sqlite3 db2 test2.db do_test shared-$av.4.1.1 { set sqlite_open_file_count + expr $sqlite_open_file_count-($extrafds_prelock*2) } {2} do_test shared-$av.4.1.2 { execsql {ATTACH 'test2.db' AS test2} set sqlite_open_file_count + expr $sqlite_open_file_count-($extrafds_postlock*2) } {2} do_test shared-$av.4.1.3 { execsql {ATTACH 'test.db' AS test} db2 set sqlite_open_file_count + expr $sqlite_open_file_count-($extrafds_postlock*2) } {2} # Sanity check: Create a table in ./test.db via handle db, and test that handle @@ -572,7 +593,7 @@ } {} do_test shared-$av.7.2 { # This test case deletes the contents of table t1 (the one at the start of - # the file) while many cursors are open on table t2 and it's index. All of + # the file) while many cursors are open on table t2 and its index. All of # the non-root pages will be moved from the end to the start of the file # when the DELETE is committed - this test verifies that moving the pages # does not disturb the open cursors. @@ -634,6 +655,7 @@ do_test shared-$av.8.1.2 { string range [execsql {PRAGMA encoding;}] 0 end-2 } {UTF-16} + do_test shared-$av.8.1.3 { sqlite3 db2 test.db execsql { @@ -652,6 +674,7 @@ PRAGMA encoding; } } {UTF-8} + file delete -force test2.db test2.db-journal do_test shared-$av.8.2.1 { execsql { @@ -668,6 +691,35 @@ string range [execsql {PRAGMA encoding;} db2] 0 end-2 } {UTF-16} + catch {db close} + catch {db2 close} + file delete -force test.db test2.db + + do_test shared-$av.8.3.2 { + sqlite3 db test.db + execsql { CREATE TABLE def(d, e, f) } + execsql { PRAGMA encoding } + } {UTF-8} + do_test shared-$av.8.3.3 { + set zDb16 "[encoding convertto unicode test.db]\x00\x00" + set db16 [sqlite3_open16 $zDb16 {}] + + set stmt [sqlite3_prepare $db16 "SELECT sql FROM sqlite_master" -1 DUMMY] + sqlite3_step $stmt + set sql [sqlite3_column_text $stmt 0] + sqlite3_finalize $stmt + set sql + } {CREATE TABLE def(d, e, f)} + do_test shared-$av.8.3.4 { + set stmt [sqlite3_prepare $db16 "PRAGMA encoding" -1 DUMMY] + sqlite3_step $stmt + set enc [sqlite3_column_text $stmt 0] + sqlite3_finalize $stmt + set enc + } {UTF-8} + + sqlite3_close $db16 + # Bug #2547 is causing this to fail. if 0 { do_test shared-$av.8.2.3 { @@ -823,7 +875,7 @@ } {0 {}} do_test shared-$av.11.5 { catchsql {INSERT INTO abc2 VALUES(1, 2, 3);} db2 -} {1 {database is locked}} +} {1 {database table is locked}} do_test shared-$av.11.6 { catchsql {SELECT * FROM abc2} } {0 {}} @@ -841,7 +893,6 @@ } {} do_test shared-$av.11.8 { set res [list] - breakpoint db2 eval { SELECT abc.a as I, abc2.a as II FROM abc, abc2; } { @@ -854,8 +905,8 @@ } {1 4 {} 7} if {[llength [info command sqlite3_shared_cache_report]]==1} { do_test shared-$av.11.9 { - sqlite3_shared_cache_report - } [list [file normalize test.db] 2] + string tolower [sqlite3_shared_cache_report] + } [string tolower [list [file nativename [file normalize test.db]] 2]] } do_test shared-$av.11.11 { @@ -863,6 +914,110 @@ db2 close } {} +# This tests that if it is impossible to free any pages, SQLite will +# exceed the limit set by PRAGMA cache_size. +file delete -force test.db test.db-journal +sqlite3 db test.db +ifcapable pager_pragmas { + do_test shared-$av.12.1 { + execsql { + PRAGMA cache_size = 10; + PRAGMA cache_size; + } + } {10} +} +do_test shared-$av.12.2 { + set ::db_handles [list] + for {set i 1} {$i < 15} {incr i} { + lappend ::db_handles db$i + sqlite3 db$i test.db + execsql "CREATE TABLE db${i}(a, b, c)" db$i + execsql "INSERT INTO db${i} VALUES(1, 2, 3)" + } +} {} +proc nested_select {handles} { + [lindex $handles 0] eval "SELECT * FROM [lindex $handles 0]" { + lappend ::res $a $b $c + if {[llength $handles]>1} { + nested_select [lrange $handles 1 end] + } + } +} +do_test shared-$av.12.3 { + set ::res [list] + nested_select $::db_handles + set ::res +} [string range [string repeat "1 2 3 " [llength $::db_handles]] 0 end-1] + +do_test shared-$av.12.X { + db close + foreach h $::db_handles { + $h close + } +} {} + +# Internally, locks are acquired on shared B-Tree structures in the order +# that the structures appear in the virtual memory address space. This +# test case attempts to cause the order of the structures in memory +# to be different from the order in which they are attached to a given +# database handle. This covers an extra line or two. +# +do_test shared-$av.13.1 { + file delete -force test2.db test3.db test4.db test5.db + sqlite3 db :memory: + execsql { + ATTACH 'test2.db' AS aux2; + ATTACH 'test3.db' AS aux3; + ATTACH 'test4.db' AS aux4; + ATTACH 'test5.db' AS aux5; + DETACH aux2; + DETACH aux3; + DETACH aux4; + ATTACH 'test2.db' AS aux2; + ATTACH 'test3.db' AS aux3; + ATTACH 'test4.db' AS aux4; + } +} {} +do_test shared-$av.13.2 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE TABLE aux2.t2(a, b, c); + CREATE TABLE aux3.t3(a, b, c); + CREATE TABLE aux4.t4(a, b, c); + CREATE TABLE aux5.t5(a, b, c); + SELECT count(*) FROM + aux2.sqlite_master, + aux3.sqlite_master, + aux4.sqlite_master, + aux5.sqlite_master + } +} {1} +do_test shared-$av.13.3 { + db close +} {} + +# Test that nothing horrible happens if a connection to a shared B-Tree +# structure is closed while some other connection has an open cursor. +# +do_test shared-$av.14.1 { + sqlite3 db test.db + sqlite3 db2 test.db + execsql {SELECT name FROM sqlite_master} +} {db1 db2 db3 db4 db5 db6 db7 db8 db9 db10 db11 db12 db13 db14} +do_test shared-$av.14.2 { + set res [list] + db eval {SELECT name FROM sqlite_master} { + if {$name eq "db7"} { + db2 close + } + lappend res $name + } + set res +} {db1 db2 db3 db4 db5 db6 db7 db8 db9 db10 db11 db12 db13 db14} +do_test shared-$av.14.3 { + db close +} {} + } sqlite3_enable_shared_cache $::enable_shared_cache diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/shortread1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/shortread1.test --- sqlite3-3.4.2/test/shortread1.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/shortread1.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,52 @@ +# 2007 Sep 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file attempts to duplicate an error scenario seen on a +# customer system using version 3.2.2. The problem appears to +# have been fixed (perhaps by accident) with check-in [3503]. +# These tests will prevent an accidental recurrance. +# +# $Id: shortread1.test,v 1.1 2007/09/14 01:48:12 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test shortread1-1.1 { + execsql { + CREATE TABLE t1(a TEXT); + BEGIN; + INSERT INTO t1 VALUES(hex(randomblob(5000))); + INSERT INTO t1 VALUES(hex(randomblob(100))); + PRAGMA freelist_count; + } +} {0} +do_test shortread1-1.2 { + execsql { + DELETE FROM t1 WHERE rowid=1; + PRAGMA freelist_count; + } +} {11} +do_test shortread1-1.3 { + sqlite3_release_memory [expr {1024*9}] + execsql { + INSERT INTO t1 VALUES(hex(randomblob(5000))); + PRAGMA freelist_count; + } +} {0} +do_test shortread1-1.4 { + execsql { + COMMIT; + SELECT count(*) FROM t1; + } +} {2} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/sidedelete.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/sidedelete.test --- sqlite3-3.4.2/test/sidedelete.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/sidedelete.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,92 @@ +# 2007 Dec 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains test cases for stressing database +# changes that involve side effects that delete rows from +# the table being changed. Ticket #2832 shows that in +# older versions of SQLite that behavior was implemented +# incorrectly and resulted in corrupt database files. +# +# $Id: sidedelete.test,v 1.2 2008/08/04 03:51:24 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# The sequence table is created to store a sequence of integers +# starting with 1. This is used to reinitialize other tables +# as part of other tests. +# +do_test sidedelete-1.1 { + execsql { + CREATE TABLE sequence(a INTEGER PRIMARY KEY); + INSERT INTO sequence VALUES(1); + INSERT INTO sequence VALUES(2); + } + for {set i 0} {$i<8} {incr i} { + execsql { + INSERT INTO sequence SELECT a+(SELECT max(a) FROM sequence) FROM sequence; + } + } + execsql {SELECT count(*) FROM sequence} +} {512} + +# Make a series of changes using an UPDATE OR REPLACE and a +# correlated subquery. This would cause database corruption +# prior to the fix for ticket #2832. +# +do_test sidedelete-2.0 { + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + CREATE TABLE chng(a PRIMARY KEY, b); + SELECT count(*) FROM t1; + SELECT count(*) FROM chng; + } +} {0 0} +for {set i 2} {$i<=100} {incr i} { + set n [expr {($i+2)/2}] + do_test sidedelete-2.$i.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1 SELECT a, a FROM sequence WHERE a<=$i; + DELETE FROM chng; + INSERT INTO chng SELECT a*2, a*2+1 FROM sequence WHERE a<=$i/2; + UPDATE OR REPLACE t1 SET a=(SELECT b FROM chng WHERE a=t1.a); + SELECT count(*), sum(a) FROM t1; + } + } [list $n [expr {$n*$n-1}]] + integrity_check sidedelete-2.$i.2 +} + +# This will cause stacks leaks but not database corruption prior +# to the #2832 fix. +# +do_test sidedelete-3.0 { + execsql { + DROP TABLE t1; + CREATE TABLE t1(a PRIMARY KEY); + SELECT * FROM t1; + } +} {} +for {set i 1} {$i<=100} {incr i} { + set n [expr {($i+1)/2}] + do_test sidedelete-3.$i.1 { + execsql { + DELETE FROM t1; + INSERT INTO t1 SELECT a FROM sequence WHERE a<=$i; + UPDATE OR REPLACE t1 SET a=a+1; + SELECT count(*), sum(a) FROM t1; + } + } [list $n [expr {$n*($n+1)}]] + integrity_check sidedelete-3.$i.2 +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/soak.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/soak.test --- sqlite3-3.4.2/test/soak.test 2007-05-30 11:36:47.000000000 +0100 +++ sqlite3-3.6.16/test/soak.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file is the driver for the "soak" tests. It is a peer of the # quick.test and all.test scripts. # -# $Id: soak.test,v 1.2 2007/05/30 10:36:47 danielk1977 Exp $ +# $Id: soak.test,v 1.4 2008/11/13 18:29:51 shane Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -47,10 +47,10 @@ # global variable $TIMEOUT - tests are run for at least $TIMEOUT # seconds. # -# fuzz.test (pseudo-random SQL statements) -# trans.test (pseudo-random changes to a database followed by rollbacks) -# -# fuzzy malloc? +# fuzz.test (pseudo-random SQL statements) +# trans.test (pseudo-random changes to a database followed by rollbacks) +# fuzz_malloc.test +# corruptC.test (pseudo-random corruption to a database) # # Many database changes maintaining some kind of invariant. # Storing checksums etc. @@ -62,6 +62,7 @@ fuzz.test fuzz_malloc.test trans.test + corruptC.test } set ISQUICK 1 @@ -87,4 +88,3 @@ } really_finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/softheap1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/softheap1.test --- sqlite3-3.4.2/test/softheap1.test 2007-08-12 21:07:59.000000000 +0100 +++ sqlite3-3.6.16/test/softheap1.test 2009-06-05 18:03:40.000000000 +0100 @@ -13,12 +13,19 @@ # A database corruption bug that occurs in auto_vacuum mode when # the soft_heap_limit is set low enough to be triggered. # -# $Id: softheap1.test,v 1.2 2007/08/12 20:07:59 drh Exp $ +# $Id: softheap1.test,v 1.5 2008/07/08 17:13:59 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !integrityck { + finish_test + return +} + +sqlite3_soft_heap_limit -1 +sqlite3_soft_heap_limit 0 sqlite3_soft_heap_limit 5000 do_test softheap1-1.1 { execsql { @@ -37,6 +44,7 @@ PRAGMA integrity_check; } } {ok} + sqlite3_soft_heap_limit $soft_limit finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed1p.explain /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed1p.explain --- sqlite3-3.4.2/test/speed1p.explain 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/speed1p.explain 2008-04-16 13:57:48.000000000 +0100 @@ -0,0 +1,366 @@ +# 2008 March 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. +# +# This is a copy of speed1.test modified to user prepared statements. +# +# $Id: speed1p.explain,v 1.1 2008/04/16 12:57:48 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed1.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Create a database schema. +# +do_test speed1p-1.0 { + execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + } + execsql { + SELECT name FROM sqlite_master ORDER BY 1; + } +} {i2a i2b t1 t2} + + +# 50000 INSERTs on an unindexed table +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + set x [number_name $r] + lappend list $i $r $x +} +set script { + foreach {i r x} $::list { + db eval {INSERT INTO t1 VALUES($i,$r,$x)} + } +} +explain {INSERT INTO t1 VALUES($i,$r,$x)} +db eval BEGIN +speed_trial_tcl speed1p-insert1 50000 row $script +db eval COMMIT + +# 50000 INSERTs on an indexed table +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + set x [number_name $r] + lappend list $i $r $x +} +set script { + foreach {i r x} $::list { + db eval {INSERT INTO t2 VALUES($i,$r,$x)} + } +} +explain {INSERT INTO t2 VALUES($i,$r,$x)} +db eval BEGIN +speed_trial_tcl speed1p-insert2 50000 row $script +db eval COMMIT + + + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +set list {} +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} + } +} +explain {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} +db eval BEGIN +speed_trial_tcl speed1p-select1 [expr {50*50000}] row $script +db eval COMMIT + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +set list {} +for {set i 0} {$i<50} {incr i} { + lappend list "%[number_name $i]%" +} +set script { + foreach pattern $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE c LIKE $pattern} + } +} +explain {SELECT count(*), avg(b) FROM t1 WHERE c LIKE $pattern} +db eval BEGIN +speed_trial_tcl speed1p-select2 [expr {50*50000}] row $script +db eval COMMIT + +# Create indices +# +explain {CREATE INDEX i1a ON t1(a)} +explain {CREATE INDEX i1b ON t1(b)} +db eval BEGIN +speed_trial speed1p-createidx 150000 row { + CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); +} +db eval COMMIT + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set list {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} + } +} +explain {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} +db eval BEGIN +speed_trial_tcl speed1p-select3 5000 stmt $script +db eval COMMIT + +# 100000 random SELECTs against rowid. +# +set list {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + lappend list $id +} +set script { + foreach id $::list { + db eval {SELECT c FROM t1 WHERE rowid=$id} + } +} +explain {SELECT c FROM t1 WHERE rowid=$id} +db eval BEGIN +speed_trial_tcl speed1p-select4 100000 row $script +db eval COMMIT + +# 100000 random SELECTs against a unique indexed column. +# +set list {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + lappend list $id +} +set script { + foreach id $::list { + db eval {SELECT c FROM t1 WHERE a=$id} + } +} +explain {SELECT c FROM t1 WHERE a=$id} +db eval BEGIN +speed_trial_tcl speed1p-select5 100000 row $script +db eval COMMIT + +# 50000 random SELECTs against an indexed column text column +# +set list [db eval {SELECT c FROM t1 ORDER BY random() LIMIT 50000}] +set script { + foreach c $::list { + db eval {SELECT c FROM t1 WHERE c=$c} + } +} +explain {SELECT c FROM t1 WHERE c=$c} +db eval BEGIN +speed_trial_tcl speed1p-select6 50000 row $script +db eval COMMIT + + +# Vacuum +speed_trial speed1p-vacuum 100000 row VACUUM + +# 5000 updates of ranges where the field being compared is indexed. +# +set list {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*2}] + set upr [expr {($i+1)*2}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr} + } +} +explain {UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr} +db eval BEGIN +speed_trial_tcl speed1p-update1 5000 stmt $script +db eval COMMIT + +# 50000 single-row updates. An index is used to find the row quickly. +# +set list {} +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*500000)}] + lappend list $i $r +} +set script { + foreach {i r} $::list { + db eval {UPDATE t1 SET b=$r WHERE a=$i} + } +} +explain {UPDATE t1 SET b=$r WHERE a=$i} +db eval BEGIN +speed_trial_tcl speed1p-update2 50000 row $script +db eval COMMIT + +# 1 big text update that touches every row in the table. +# +explain {UPDATE t1 SET c=a} +speed_trial speed1p-update3 50000 row { + UPDATE t1 SET c=a; +} + +# Many individual text updates. Each row in the table is +# touched through an index. +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + lappend list $i [number_name $r] +} +set script { + foreach {i x} $::list { + db eval {UPDATE t1 SET c=$x WHERE a=$i} + } +} +explain {UPDATE t1 SET c=$x WHERE a=$i} +db eval BEGIN +speed_trial_tcl speed1p-update4 50000 row $script +db eval COMMIT + +# Delete all content in a table. +# +explain {DELETE FROM t1} +speed_trial speed1p-delete1 50000 row {DELETE FROM t1} + +# Copy one table into another +# +explain {INSERT INTO t1 SELECT * FROM t2} +speed_trial speed1p-copy1 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Delete all content in a table, one row at a time. +# +explain {DELETE FROM t1 WHERE 1} +speed_trial speed1p-delete2 50000 row {DELETE FROM t1 WHERE 1} + +# Refill the table yet again +# +speed_trial speed1p-copy2 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Drop the table and recreate it without its indices. +# +explain {DROP TABLE t1} +explain {CREATE TABLE tX(a INTEGER, b INTEGER, c TEXT)} +db eval BEGIN +speed_trial speed1p-drop1 50000 row { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); +} +db eval COMMIT + +# Refill the table yet again. This copy should be faster because +# there are no indices to deal with. +# +speed_trial speed1p-copy3 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Select 20000 rows from the table at random. +# +explain {SELECT rowid FROM t1 ORDER BY random() LIMIT 20000} +speed_trial speed1p-random1 50000 row { + SELECT rowid FROM t1 ORDER BY random() LIMIT 20000 +} + +# Delete 20000 random rows from the table. +# +explain {DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000)} +speed_trial speed1p-random-del1 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1p-1.1 { + db one {SELECT count(*) FROM t1} +} 30000 + + +# Delete 20000 more rows at random from the table. +# +speed_trial speed1p-random-del2 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1p-1.2 { + db one {SELECT count(*) FROM t1} +} 10000 +speed_trial_summary speed1 + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed1p.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed1p.test --- sqlite3-3.4.2/test/speed1p.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/speed1p.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,346 @@ +# 2008 March 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. +# +# This is a copy of speed1.test modified to user prepared statements. +# +# $Id: speed1p.test,v 1.7 2009/04/09 01:23:49 drh Exp $ +# + +sqlite3_shutdown +#sqlite3_config_scratch 29000 1 +set old_lookaside [sqlite3_config_lookaside 2048 300] +#sqlite3_config_pagecache 1024 11000 +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Create a database schema. +# +do_test speed1p-1.0 { + execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=500; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + } + execsql { + SELECT name FROM sqlite_master ORDER BY 1; + } +} {i2a i2b t1 t2} + + +# 50000 INSERTs on an unindexed table +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + set x [number_name $r] + lappend list $i $r $x +} +set script { + foreach {i r x} $::list { + db eval {INSERT INTO t1 VALUES($i,$r,$x)} + } +} +db eval BEGIN +speed_trial_tcl speed1p-insert1 50000 row $script +db eval COMMIT + +# 50000 INSERTs on an indexed table +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + set x [number_name $r] + lappend list $i $r $x +} +set script { + foreach {i r x} $::list { + db eval {INSERT INTO t2 VALUES($i,$r,$x)} + } +} +db eval BEGIN +speed_trial_tcl speed1p-insert2 50000 row $script +db eval COMMIT + + + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +set list {} +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select1 [expr {50*50000}] row $script +db eval COMMIT + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +set list {} +for {set i 0} {$i<50} {incr i} { + lappend list "%[number_name $i]%" +} +set script { + foreach pattern $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE c LIKE $pattern} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select2 [expr {50*50000}] row $script +db eval COMMIT + +# Create indices +# +db eval BEGIN +speed_trial speed1p-createidx 150000 row { + CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); +} +db eval COMMIT + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set list {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select3 5000 stmt $script +db eval COMMIT + +# 100000 random SELECTs against rowid. +# +set list {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + lappend list $id +} +set script { + foreach id $::list { + db eval {SELECT c FROM t1 WHERE rowid=$id} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select4 100000 row $script +db eval COMMIT + +# 100000 random SELECTs against a unique indexed column. +# +set list {} +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + lappend list $id +} +set script { + foreach id $::list { + db eval {SELECT c FROM t1 WHERE a=$id} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select5 100000 row $script +db eval COMMIT + +# 50000 random SELECTs against an indexed column text column +# +set list [db eval {SELECT c FROM t1 ORDER BY random() LIMIT 50000}] +set script { + foreach c $::list { + db eval {SELECT c FROM t1 WHERE c=$c} + } +} +db eval BEGIN +speed_trial_tcl speed1p-select6 50000 row $script +db eval COMMIT + + +# Vacuum +speed_trial speed1p-vacuum 100000 row VACUUM + +# 5000 updates of ranges where the field being compared is indexed. +# +set list {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*2}] + set upr [expr {($i+1)*2}] + lappend list $lwr $upr +} +set script { + foreach {lwr upr} $::list { + db eval {UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr} + } +} +db eval BEGIN +speed_trial_tcl speed1p-update1 5000 stmt $script +db eval COMMIT + +# 50000 single-row updates. An index is used to find the row quickly. +# +set list {} +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*500000)}] + lappend list $i $r +} +set script { + foreach {i r} $::list { + db eval {UPDATE t1 SET b=$r WHERE a=$i} + } +} +db eval BEGIN +speed_trial_tcl speed1p-update2 50000 row $script +db eval COMMIT + +# 1 big text update that touches every row in the table. +# +speed_trial speed1p-update3 50000 row { + UPDATE t1 SET c=a; +} + +# Many individual text updates. Each row in the table is +# touched through an index. +# +set list {} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + lappend list $i [number_name $r] +} +set script { + foreach {i x} $::list { + db eval {UPDATE t1 SET c=$x WHERE a=$i} + } +} +db eval BEGIN +speed_trial_tcl speed1p-update4 50000 row $script +db eval COMMIT + +# Delete all content in a table. +# +speed_trial speed1p-delete1 50000 row {DELETE FROM t1} + +# Copy one table into another +# +speed_trial speed1p-copy1 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Delete all content in a table, one row at a time. +# +speed_trial speed1p-delete2 50000 row {DELETE FROM t1 WHERE 1} + +# Refill the table yet again +# +speed_trial speed1p-copy2 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Drop the table and recreate it without its indices. +# +db eval BEGIN +speed_trial speed1p-drop1 50000 row { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); +} +db eval COMMIT + +# Refill the table yet again. This copy should be faster because +# there are no indices to deal with. +# +speed_trial speed1p-copy3 50000 row {INSERT INTO t1 SELECT * FROM t2} + +# Select 20000 rows from the table at random. +# +speed_trial speed1p-random1 50000 row { + SELECT rowid FROM t1 ORDER BY random() LIMIT 20000 +} + +# Delete 20000 random rows from the table. +# +speed_trial speed1p-random-del1 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1p-1.1 { + db one {SELECT count(*) FROM t1} +} 30000 + +# Delete 20000 more rows at random from the table. +# +speed_trial speed1p-random-del2 20000 row { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000) +} +do_test speed1p-1.2 { + db one {SELECT count(*) FROM t1} +} 10000 +speed_trial_summary speed1 + +db close +sqlite3_shutdown +eval sqlite3_config_lookaside $old_lookaside +sqlite3_initialize +autoinstall_test_functions +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed1.test --- sqlite3-3.4.2/test/speed1.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/speed1.test 2009-06-25 12:22:34.000000000 +0100 @@ -11,9 +11,13 @@ # This file implements regression tests for SQLite library. The # focus of this script is measuring executing speed. # -# $Id: speed1.test,v 1.5 2007/03/31 22:34:16 drh Exp $ +# $Id: speed1.test,v 1.11 2009/04/09 01:23:49 drh Exp $ # +sqlite3_shutdown +#sqlite3_config_scratch 29000 1 +set old_lookaside [sqlite3_config_lookaside 1000 300] +#sqlite3_config_pagecache 1024 10000 set testdir [file dirname $argv0] source $testdir/tester.tcl speed_trial_init speed1 @@ -286,4 +290,9 @@ } 10000 speed_trial_summary speed1 +db close +sqlite3_shutdown +eval sqlite3_config_lookaside $old_lookaside +sqlite3_initialize +autoinstall_test_functions finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed3.test --- sqlite3-3.4.2/test/speed3.test 2007-05-17 19:28:11.000000000 +0100 +++ sqlite3-3.6.16/test/speed3.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this script is testing that the overflow-page related # enhancements added after version 3.3.17 speed things up. # -# $Id: speed3.test,v 1.2 2007/05/17 18:28:11 danielk1977 Exp $ +# $Id: speed3.test,v 1.5 2007/10/09 08:29:33 danielk1977 Exp $ # #--------------------------------------------------------------------- @@ -34,6 +34,12 @@ set testdir [file dirname $argv0] source $testdir/tester.tcl + +ifcapable !tclvar||!attach { + finish_test + return +} + speed_trial_init speed1 # Set a uniform random seed @@ -89,8 +95,10 @@ proc io_log {db} { + db_enter db array set stats1 [btree_pager_stats [btree_from_db db]] array set stats2 [btree_pager_stats [btree_from_db db 2]] + db_leave db # puts "1: [array get stats1]" # puts "2: [array get stats2]" puts "Incrvacuum: Read $stats1(read), wrote $stats1(write)" @@ -176,4 +184,3 @@ io_log db finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed4p.explain /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed4p.explain --- sqlite3-3.4.2/test/speed4p.explain 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/speed4p.explain 2008-04-16 13:57:48.000000000 +0100 @@ -0,0 +1,283 @@ +# 2007 October 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. More specifically, +# the focus is on the speed of: +# +# * joins +# * views +# * sub-selects +# * triggers +# +# $Id: speed4p.explain,v 1.1 2008/04/16 12:57:48 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed1.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Summary of tests: +# +# speed4p-join1: Join three tables using IPK index. +# speed4p-join2: Join three tables using an index. +# speed4p-join3: Join two tables without an index. +# +# speed4p-view1: Querying a view. +# speed4p-table1: Same queries as in speed4p-view1, but run directly against +# the tables for comparison purposes. +# +# speed4p-subselect1: A SELECT statement that uses many sub-queries.. +# +# speed4p-trigger1: An INSERT statement that fires a trigger. +# speed4p-trigger2: An UPDATE statement that fires a trigger. +# speed4p-trigger3: A DELETE statement that fires a trigger. +# speed4p-notrigger1: Same operation as trigger1, but without the trigger. +# speed4p-notrigger2: " trigger2 " +# speed4p-notrigger3: " trigger3 " +# + +# Set up the schema. Each of the tables t1, t2 and t3 contain 50,000 rows. +# This creates a database of around 16MB. +execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + BEGIN; + CREATE TABLE t1(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t2(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t3(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + + CREATE VIEW v1 AS SELECT rowid, i, t FROM t1; + CREATE VIEW v2 AS SELECT rowid, i, t FROM t2; + CREATE VIEW v3 AS SELECT rowid, i, t FROM t3; +} +for {set jj 1} {$jj <= 3} {incr jj} { + set stmt [string map "%T% t$jj" {INSERT INTO %T% VALUES(NULL, $i, $t)}] + for {set ii 0} {$ii < 50000} {incr ii} { + set i [expr {int(rand()*50000)}] + set t [number_name $i] + execsql $stmt + } +} +execsql { + CREATE INDEX i1 ON t1(t); + CREATE INDEX i2 ON t2(t); + CREATE INDEX i3 ON t3(t); + COMMIT; +} + +# Before running these tests, disable the compiled statement cache built into +# the Tcl interface. This is because we want to test the speed of SQL +# compilation as well as execution. +# +db cache size 0 + +# Join t1, t2, t3 on IPK. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.oid = t2.oid AND t2.oid = t3.oid" +explain $sql +speed_trial speed4p-join1 50000 row $sql + +# Join t1, t2, t3 on the non-IPK index. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.t = t2.t AND t2.t = t3.t" +explain $sql +speed_trial speed4p-join2 50000 row $sql + +# Run 10000 simple queries against the views. +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + set t [expr {$ii%3+1}] + db eval "SELECT * FROM v$t WHERE rowid = \$v" + } +} +explain {SELECT * FROm v1 WHERE rowid=$v} +speed_trial_tcl speed4p-view1 10000 stmt $script + +# Run the same 10000 simple queries as in the previous test case against +# the underlying tables. The compiled vdbe programs should be identical, so +# the only difference in running time is the extra time taken to compile +# the view definitions. +# +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + set t [expr {$ii%3+1}] + db eval "SELECT t FROM t$t WHERE rowid = \$v" + } +} +explain {SELECT * FROM t1 WHERE rowid=$v} +speed_trial_tcl speed4p-table1 10000 stmt $script + +# Run a SELECT that uses sub-queries 10000 times. A total of 30000 sub-selects. +# +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + db eval { + SELECT (SELECT t FROM t1 WHERE rowid = $v), + (SELECT t FROM t2 WHERE rowid = $v), + (SELECT t FROM t3 WHERE rowid = $v) + } + } +} +explain { + SELECT (SELECT t FROM t1 WHERE rowid = $v), + (SELECT t FROM t2 WHERE rowid = $v), + (SELECT t FROM t3 WHERE rowid = $v) +} +speed_trial_tcl speed4p-subselect1 10000 stmt $script + +# The following block tests the speed of some DML statements that cause +# triggers to fire. +# +execsql { + CREATE TABLE log(op TEXT, r INTEGER, i INTEGER, t TEXT); + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TRIGGER t4_trigger1 AFTER INSERT ON t4 BEGIN + INSERT INTO log VALUES('INSERT INTO t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger2 AFTER UPDATE ON t4 BEGIN + INSERT INTO log VALUES('UPDATE OF t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger3 AFTER DELETE ON t4 BEGIN + INSERT INTO log VALUES('DELETE OF t4', old.rowid, old.i, old.t); + END; + BEGIN; +} +set list {} +for {set ii 1} {$ii < 10000} {incr ii} { + lappend list $ii [number_name $ii] +} +set script { + foreach {ii name} $::list { + db eval {INSERT INTO t4 VALUES(NULL, $ii, $name)} + } +} +explain {INSERT INTO t4 VALUES(NULL, $ii, $name)} +speed_trial_tcl speed4p-trigger1 10000 stmt $script + +set list {} +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + lappend list $ii $ii2 [number_name $ii2] +} +set script { + foreach {ii ii2 name} $::list { + db eval { + UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii; + } + } +} +explain {UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii} +speed_trial_tcl speed4p-trigger2 10000 stmt $script + +set script { + for {set ii 1} {$ii < 20000} {incr ii 2} { + db eval {DELETE FROM t4 WHERE rowid = $ii} + } +} +explain {DELETE FROM t4 WHERE rowid = $ii} +speed_trial_tcl speed4p-trigger3 10000 stmt $script +execsql {COMMIT} + +# The following block contains the same tests as the above block that +# tests triggers, with one crucial difference: no triggers are defined. +# So the difference in speed between these tests and the preceding ones +# is the amount of time taken to compile and execute the trigger programs. +# +execsql { + DROP TABLE t4; + DROP TABLE log; + VACUUM; + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + BEGIN; +} +set list {} +for {set ii 1} {$ii < 10000} {incr ii} { + lappend list $ii [number_name $ii] +} +set script { + foreach {ii name} $::list { + db eval {INSERT INTO t4 VALUES(NULL, $ii, $name);} + } +} +explain {INSERT INTO t4 VALUES(NULL, $ii, $name)} +speed_trial_tcl speed4p-notrigger1 10000 stmt $script + +set list {} +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + lappend list $ii $ii2 [number_name $ii2] +} +set script { + foreach {ii ii2 name} $::list { + db eval { + UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii; + } + } +} +explain {UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii} +speed_trial_tcl speed4p-notrigger2 10000 stmt $script + +set script { + for {set ii 1} {$ii < 20000} {incr ii 2} { + db eval {DELETE FROM t4 WHERE rowid = $ii} + } +} +explain {DELETE FROM t4 WHERE rowid = $ii} +speed_trial_tcl speed4p-notrigger3 10000 stmt $script +execsql {COMMIT} + +speed_trial_summary speed4 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed4p.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed4p.test --- sqlite3-3.4.2/test/speed4p.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/speed4p.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,292 @@ +# 2007 October 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. More specifically, +# the focus is on the speed of: +# +# * joins +# * views +# * sub-selects +# * triggers +# +# $Id: speed4p.test,v 1.4 2008/04/10 13:32:37 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed1.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Summary of tests: +# +# speed4p-join1: Join three tables using IPK index. +# speed4p-join2: Join three tables using an index. +# speed4p-join3: Join two tables without an index. +# +# speed4p-view1: Querying a view. +# speed4p-table1: Same queries as in speed4p-view1, but run directly against +# the tables for comparison purposes. +# +# speed4p-subselect1: A SELECT statement that uses many sub-queries.. +# +# speed4p-trigger1: An INSERT statement that fires a trigger. +# speed4p-trigger2: An UPDATE statement that fires a trigger. +# speed4p-trigger3: A DELETE statement that fires a trigger. +# speed4p-notrigger1: Same operation as trigger1, but without the trigger. +# speed4p-notrigger2: " trigger2 " +# speed4p-notrigger3: " trigger3 " +# + +# Set up the schema. Each of the tables t1, t2 and t3 contain 50,000 rows. +# This creates a database of around 16MB. +execsql { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + BEGIN; + CREATE TABLE t1(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t2(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t3(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + + CREATE VIEW v1 AS SELECT rowid, i, t FROM t1; + CREATE VIEW v2 AS SELECT rowid, i, t FROM t2; + CREATE VIEW v3 AS SELECT rowid, i, t FROM t3; +} +for {set jj 1} {$jj <= 3} {incr jj} { + set stmt [string map "%T% t$jj" {INSERT INTO %T% VALUES(NULL, $i, $t)}] + for {set ii 0} {$ii < 50000} {incr ii} { + set i [expr {int(rand()*50000)}] + set t [number_name $i] + execsql $stmt + } +} +execsql { + CREATE INDEX i1 ON t1(t); + CREATE INDEX i2 ON t2(t); + CREATE INDEX i3 ON t3(t); + COMMIT; +} + +# Join t1, t2, t3 on IPK. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.oid = t2.oid AND t2.oid = t3.oid" +speed_trial speed4p-join1 50000 row $sql + +# Join t1, t2, t3 on the non-IPK index. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.t = t2.t AND t2.t = t3.t" +speed_trial speed4p-join2 50000 row $sql + +# Run 10000 simple queries against the views. +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + set t [expr {$ii%3+1}] + db eval "SELECT * FROM v$t WHERE rowid = \$v" + } +} +speed_trial_tcl speed4p-view1 10000 stmt $script + +# Run the same 10000 simple queries as in the previous test case against +# the underlying tables. The compiled vdbe programs should be identical, so +# the only difference in running time is the extra time taken to compile +# the view definitions. +# +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + set t [expr {$ii%3+1}] + db eval "SELECT t FROM t$t WHERE rowid = \$v" + } +} +speed_trial_tcl speed4p-table1 10000 stmt $script + +# Run a SELECT that uses sub-queries 10000 times. A total of 30000 sub-selects. +# +set script { + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + db eval { + SELECT (SELECT t FROM t1 WHERE rowid = $v), + (SELECT t FROM t2 WHERE rowid = $v), + (SELECT t FROM t3 WHERE rowid = $v) + } + } +} +speed_trial_tcl speed4p-subselect1 10000 stmt $script + +# Single-row updates performance. +# +set script { + db eval BEGIN + for {set ii 1} {$ii < 10000} {incr ii} { + set v [expr {$ii*3}] + db eval {UPDATE t1 SET i=i+1 WHERE rowid=$ii} + } + db eval COMMIT +} +speed_trial_tcl speed4p-rowid-update 10000 stmt $script + + +db eval { + CREATE TABLE t5(t TEXT PRIMARY KEY, i INTEGER); +} +speed_trial speed4p-insert-ignore 50000 row { + INSERT OR IGNORE INTO t5 SELECT t, i FROM t1; +} + +set list [db eval {SELECT t FROM t5}] +set script { + db eval BEGIN + foreach t $::list { + db eval {UPDATE t5 SET i=i+1 WHERE t=$t} + } + db eval COMMIT +} +speed_trial_tcl speed4p-unique-update [llength $list] stmt $script + +# The following block tests the speed of some DML statements that cause +# triggers to fire. +# +execsql { + CREATE TABLE log(op TEXT, r INTEGER, i INTEGER, t TEXT); + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TRIGGER t4_trigger1 AFTER INSERT ON t4 BEGIN + INSERT INTO log VALUES('INSERT INTO t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger2 AFTER UPDATE ON t4 BEGIN + INSERT INTO log VALUES('UPDATE OF t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger3 AFTER DELETE ON t4 BEGIN + INSERT INTO log VALUES('DELETE OF t4', old.rowid, old.i, old.t); + END; + BEGIN; +} +set list {} +for {set ii 1} {$ii < 10000} {incr ii} { + lappend list $ii [number_name $ii] +} +set script { + foreach {ii name} $::list { + db eval {INSERT INTO t4 VALUES(NULL, $ii, $name)} + } +} +speed_trial_tcl speed4p-trigger1 10000 stmt $script + +set list {} +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + lappend list $ii $ii2 [number_name $ii2] +} +set script { + foreach {ii ii2 name} $::list { + db eval { + UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii; + } + } +} +speed_trial_tcl speed4p-trigger2 10000 stmt $script + +set script { + for {set ii 1} {$ii < 20000} {incr ii 2} { + db eval {DELETE FROM t4 WHERE rowid = $ii} + } +} +speed_trial_tcl speed4p-trigger3 10000 stmt $script +execsql {COMMIT} + +# The following block contains the same tests as the above block that +# tests triggers, with one crucial difference: no triggers are defined. +# So the difference in speed between these tests and the preceding ones +# is the amount of time taken to compile and execute the trigger programs. +# +execsql { + DROP TABLE t4; + DROP TABLE log; + VACUUM; + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + BEGIN; +} +set list {} +for {set ii 1} {$ii < 10000} {incr ii} { + lappend list $ii [number_name $ii] +} +set script { + foreach {ii name} $::list { + db eval {INSERT INTO t4 VALUES(NULL, $ii, $name);} + } +} +speed_trial_tcl speed4p-notrigger1 10000 stmt $script + +set list {} +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + lappend list $ii $ii2 [number_name $ii2] +} +set script { + foreach {ii ii2 name} $::list { + db eval { + UPDATE t4 SET i = $ii2, t = $name WHERE rowid = $ii; + } + } +} +speed_trial_tcl speed4p-notrigger2 10000 stmt $script + +set script { + for {set ii 1} {$ii < 20000} {incr ii 2} { + db eval {DELETE FROM t4 WHERE rowid = $ii} + } +} +speed_trial_tcl speed4p-notrigger3 10000 stmt $script +execsql {COMMIT} + +speed_trial_summary speed4 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/speed4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/speed4.test --- sqlite3-3.4.2/test/speed4.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/speed4.test 2009-06-05 18:03:40.000000000 +0100 @@ -0,0 +1,231 @@ +# 2007 October 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is measuring executing speed. More specifically, +# the focus is on the speed of: +# +# * joins +# * views +# * sub-selects +# * triggers +# +# $Id: speed4.test,v 1.2 2008/07/12 14:52:20 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +speed_trial_init speed1 + +# Set a uniform random seed +expr srand(0) + +set sqlout [open speed1.txt w] +proc tracesql {sql} { + puts $::sqlout $sql\; +} +#db trace tracesql + +# The number_name procedure below converts its argment (an integer) +# into a string which is the English-language name for that number. +# +# Example: +# +# puts [number_name 123] -> "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Summary of tests: +# +# speed4-join1: Join three tables using IPK index. +# speed4-join2: Join three tables using an index. +# speed4-join3: Join two tables without an index. +# +# speed4-view1: Querying a view. +# speed4-table1: Same queries as in speed4-view1, but run directly against +# the tables for comparison purposes. +# +# speed4-subselect1: A SELECT statement that uses many sub-queries.. +# +# speed4-trigger1: An INSERT statement that fires a trigger. +# speed4-trigger2: An UPDATE statement that fires a trigger. +# speed4-trigger3: A DELETE statement that fires a trigger. +# speed4-notrigger1: Same operation as trigger1, but without the trigger. +# speed4-notrigger2: " trigger2 " +# speed4-notrigger3: " trigger3 " +# + +# Set up the schema. Each of the tables t1, t2 and t3 contain 50,000 rows. +# This creates a database of around 16MB. +execsql { + BEGIN; + CREATE TABLE t1(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t2(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TABLE t3(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + + CREATE VIEW v1 AS SELECT rowid, i, t FROM t1; + CREATE VIEW v2 AS SELECT rowid, i, t FROM t2; + CREATE VIEW v3 AS SELECT rowid, i, t FROM t3; +} +for {set jj 1} {$jj <= 3} {incr jj} { + set stmt [string map "%T% t$jj" {INSERT INTO %T% VALUES(NULL, $i, $t)}] + for {set ii 0} {$ii < 50000} {incr ii} { + set i [expr {int(rand()*50000)}] + set t [number_name $i] + execsql $stmt + } +} +execsql { + CREATE INDEX i1 ON t1(t); + CREATE INDEX i2 ON t2(t); + CREATE INDEX i3 ON t3(t); + COMMIT; +} + +# Before running these tests, disable the compiled statement cache built into +# the Tcl interface. This is because we want to test the speed of SQL +# compilation as well as execution. +# +db cache size 0 + +# Join t1, t2, t3 on IPK. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.oid = t2.oid AND t2.oid = t3.oid" +speed_trial speed4-join1 50000 row $sql + +# Join t1, t2, t3 on the non-IPK index. +set sql "SELECT * FROM t1, t2, t3 WHERE t1.t = t2.t AND t2.t = t3.t" +speed_trial speed4-join2 50000 row $sql + +# Run 10000 simple queries against the views. +set sql "" +for {set ii 1} {$ii < 10000} {incr ii} { + append sql "SELECT * FROM v[expr {($ii%3)+1}] WHERE rowid = [expr {$ii*3}];" +} +speed_trial speed4-view1 10000 stmt $sql + +# Run the same 10000 simple queries as in the previous test case against +# the underlying tables. The compiled vdbe programs should be identical, so +# the only difference in running time is the extra time taken to compile +# the view definitions. +# +set sql "" +for {set ii 1} {$ii < 10000} {incr ii} { + append sql "SELECT t FROM t[expr {($ii%3)+1}] WHERE rowid = [expr {$ii*3}];" +} +speed_trial speed4-table1 10000 stmt $sql + +# Run a SELECT that uses sub-queries 10000 times. A total of 30000 sub-selects. +# +set sql "" +for {set ii 1} {$ii < 10000} {incr ii} { + append sql " + SELECT (SELECT t FROM t1 WHERE rowid = [expr {$ii*3}]), + (SELECT t FROM t2 WHERE rowid = [expr {$ii*3}]), + (SELECT t FROM t3 WHERE rowid = [expr {$ii*3}]) + ;" +} +speed_trial speed4-subselect1 10000 stmt $sql + +# The following block tests the speed of some DML statements that cause +# triggers to fire. +# +execsql { + CREATE TABLE log(op TEXT, r INTEGER, i INTEGER, t TEXT); + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + CREATE TRIGGER t4_trigger1 AFTER INSERT ON t4 BEGIN + INSERT INTO log VALUES('INSERT INTO t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger2 AFTER UPDATE ON t4 BEGIN + INSERT INTO log VALUES('UPDATE OF t4', new.rowid, new.i, new.t); + END; + CREATE TRIGGER t4_trigger3 AFTER DELETE ON t4 BEGIN + INSERT INTO log VALUES('DELETE OF t4', old.rowid, old.i, old.t); + END; + BEGIN; +} +set sql "" +for {set ii 1} {$ii < 10000} {incr ii} { + append sql "INSERT INTO t4 VALUES(NULL, $ii, '[number_name $ii]');" +} +speed_trial speed4-trigger1 10000 stmt $sql +set sql "" +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + append sql " + UPDATE t4 SET i = $ii2, t = '[number_name $ii2]' WHERE rowid = $ii; + " +} +speed_trial speed4-trigger2 10000 stmt $sql +set sql "" +for {set ii 1} {$ii < 20000} {incr ii 2} { + append sql "DELETE FROM t4 WHERE rowid = $ii;" +} +speed_trial speed4-trigger3 10000 stmt $sql +execsql {COMMIT} + +# The following block contains the same tests as the above block that +# tests triggers, with one crucial difference: no triggers are defined. +# So the difference in speed between these tests and the preceding ones +# is the amount of time taken to compile and execute the trigger programs. +# +execsql { + DROP TABLE t4; + DROP TABLE log; + VACUUM; + CREATE TABLE t4(rowid INTEGER PRIMARY KEY, i INTEGER, t TEXT); + BEGIN; +} +set sql "" +for {set ii 1} {$ii < 10000} {incr ii} { + append sql "INSERT INTO t4 VALUES(NULL, $ii, '[number_name $ii]');" +} +speed_trial speed4-notrigger1 10000 stmt $sql +set sql "" +for {set ii 1} {$ii < 20000} {incr ii 2} { + set ii2 [expr {$ii*2}] + append sql " + UPDATE t4 SET i = $ii2, t = '[number_name $ii2]' WHERE rowid = $ii; + " +} +speed_trial speed4-notrigger2 10000 stmt $sql +set sql "" +for {set ii 1} {$ii < 20000} {incr ii 2} { + append sql "DELETE FROM t4 WHERE rowid = $ii;" +} +speed_trial speed4-notrigger3 10000 stmt $sql +execsql {COMMIT} + +speed_trial_summary speed4 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/sqllimits1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/sqllimits1.test --- sqlite3-3.4.2/test/sqllimits1.test 2007-06-07 20:08:34.000000000 +0100 +++ sqlite3-3.6.16/test/sqllimits1.test 2009-06-25 12:45:59.000000000 +0100 @@ -12,93 +12,426 @@ # This file contains tests to verify that the limits defined in # sqlite source file limits.h are enforced. # -# $Id: sqllimits1.test,v 1.10 2007/06/07 19:08:34 drh Exp $ +# $Id: sqllimits1.test,v 1.33 2009/06/25 01:47:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -# Test organization: +# Verify that the default per-connection limits are the same as +# the compile-time hard limits. # -# sqllimits-1.*: SQLITE_MAX_LENGTH -# sqllimits-2.*: SQLITE_MAX_SQL_LENGTH -# sqllimits-3.*: SQLITE_MAX_PAGE_COUNT -# sqllimits-4.*: SQLITE_MAX_COLUMN -# -# -# sqllimits-7.*: SQLITE_MAX_FUNCTION_ARG -# sqllimits-8.*: SQLITE_MAX_ATTACHED -# sqllimits-9.*: SQLITE_MAX_VARIABLE_NUMBER -# sqllimits-10.*: SQLITE_MAX_PAGE_SIZE -# sqllimits-11.*: SQLITE_MAX_LIKE_PATTERN_LENGTH -# -# Todo: -# -# sqllimits-5.*: SQLITE_MAX_EXPR_DEPTH (sqlite todo) -# sqllimits-6.*: SQLITE_MAX_VDBE_OP (sqlite todo) +sqlite3 db2 :memory: +do_test sqllimits1-1.1 { + sqlite3_limit db SQLITE_LIMIT_LENGTH -1 +} $SQLITE_MAX_LENGTH +do_test sqllimits1-1.2 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH -1 +} $SQLITE_MAX_SQL_LENGTH +do_test sqllimits1-1.3 { + sqlite3_limit db SQLITE_LIMIT_COLUMN -1 +} $SQLITE_MAX_COLUMN +do_test sqllimits1-1.4 { + sqlite3_limit db SQLITE_LIMIT_EXPR_DEPTH -1 +} $SQLITE_MAX_EXPR_DEPTH +do_test sqllimits1-1.5 { + sqlite3_limit db SQLITE_LIMIT_COMPOUND_SELECT -1 +} $SQLITE_MAX_COMPOUND_SELECT +do_test sqllimits1-1.6 { + sqlite3_limit db SQLITE_LIMIT_VDBE_OP -1 +} $SQLITE_MAX_VDBE_OP +do_test sqllimits1-1.7 { + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG -1 +} $SQLITE_MAX_FUNCTION_ARG +do_test sqllimits1-1.8 { + sqlite3_limit db SQLITE_LIMIT_ATTACHED -1 +} $SQLITE_MAX_ATTACHED +do_test sqllimits1-1.9 { + sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH -1 +} $SQLITE_MAX_LIKE_PATTERN_LENGTH +do_test sqllimits1-1.10 { + sqlite3_limit db SQLITE_LIMIT_VARIABLE_NUMBER -1 +} $SQLITE_MAX_VARIABLE_NUMBER + +# Limit parameters out of range. +# +do_test sqllimits1-1.20 { + sqlite3_limit db SQLITE_LIMIT_TOOSMALL 123 +} {-1} +do_test sqllimits1-1.21 { + sqlite3_limit db SQLITE_LIMIT_TOOSMALL 123 +} {-1} +do_test sqllimits1-1.22 { + sqlite3_limit db SQLITE_LIMIT_TOOBIG 123 +} {-1} +do_test sqllimits1-1.23 { + sqlite3_limit db SQLITE_LIMIT_TOOBIG 123 +} {-1} + + +# Decrease all limits by half. Verify that the new limits take. +# +if {$SQLITE_MAX_LENGTH>=2} { + do_test sqllimits1-2.1.1 { + sqlite3_limit db SQLITE_LIMIT_LENGTH \ + [expr {$::SQLITE_MAX_LENGTH/2}] + } $SQLITE_MAX_LENGTH + do_test sqllimits1-2.1.2 { + sqlite3_limit db SQLITE_LIMIT_LENGTH -1 + } [expr {$SQLITE_MAX_LENGTH/2}] +} +if {$SQLITE_MAX_SQL_LENGTH>=2} { + do_test sqllimits1-2.2.1 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH \ + [expr {$::SQLITE_MAX_SQL_LENGTH/2}] + } $SQLITE_MAX_SQL_LENGTH + do_test sqllimits1-2.2.2 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH -1 + } [expr {$SQLITE_MAX_SQL_LENGTH/2}] +} +if {$SQLITE_MAX_COLUMN>=2} { + do_test sqllimits1-2.3.1 { + sqlite3_limit db SQLITE_LIMIT_COLUMN \ + [expr {$::SQLITE_MAX_COLUMN/2}] + } $SQLITE_MAX_COLUMN + do_test sqllimits1-2.3.2 { + sqlite3_limit db SQLITE_LIMIT_COLUMN -1 + } [expr {$SQLITE_MAX_COLUMN/2}] +} +if {$SQLITE_MAX_EXPR_DEPTH>=2} { + do_test sqllimits1-2.4.1 { + sqlite3_limit db SQLITE_LIMIT_EXPR_DEPTH \ + [expr {$::SQLITE_MAX_EXPR_DEPTH/2}] + } $SQLITE_MAX_EXPR_DEPTH + do_test sqllimits1-2.4.2 { + sqlite3_limit db SQLITE_LIMIT_EXPR_DEPTH -1 + } [expr {$SQLITE_MAX_EXPR_DEPTH/2}] +} +if {$SQLITE_MAX_COMPOUND_SELECT>=2} { + do_test sqllimits1-2.5.1 { + sqlite3_limit db SQLITE_LIMIT_COMPOUND_SELECT \ + [expr {$::SQLITE_MAX_COMPOUND_SELECT/2}] + } $SQLITE_MAX_COMPOUND_SELECT + do_test sqllimits1-2.5.2 { + sqlite3_limit db SQLITE_LIMIT_COMPOUND_SELECT -1 + } [expr {$SQLITE_MAX_COMPOUND_SELECT/2}] +} +if {$SQLITE_MAX_VDBE_OP>=2} { + do_test sqllimits1-2.6.1 { + sqlite3_limit db SQLITE_LIMIT_VDBE_OP \ + [expr {$::SQLITE_MAX_VDBE_OP/2}] + } $SQLITE_MAX_VDBE_OP + do_test sqllimits1-2.6.2 { + sqlite3_limit db SQLITE_LIMIT_VDBE_OP -1 + } [expr {$SQLITE_MAX_VDBE_OP/2}] +} +if {$SQLITE_MAX_FUNCTION_ARG>=2} { + do_test sqllimits1-2.7.1 { + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG \ + [expr {$::SQLITE_MAX_FUNCTION_ARG/2}] + } $SQLITE_MAX_FUNCTION_ARG + do_test sqllimits1-2.7.2 { + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG -1 + } [expr {$SQLITE_MAX_FUNCTION_ARG/2}] +} +if {$SQLITE_MAX_ATTACHED>=2} { + do_test sqllimits1-2.8.1 { + sqlite3_limit db SQLITE_LIMIT_ATTACHED \ + [expr {$::SQLITE_MAX_ATTACHED/2}] + } $SQLITE_MAX_ATTACHED + do_test sqllimits1-2.8.2 { + sqlite3_limit db SQLITE_LIMIT_ATTACHED -1 + } [expr {$SQLITE_MAX_ATTACHED/2}] +} +if {$SQLITE_MAX_LIKE_PATTERN_LENGTH>=2} { + do_test sqllimits1-2.9.1 { + sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH \ + [expr {$::SQLITE_MAX_LIKE_PATTERN_LENGTH/2}] + } $SQLITE_MAX_LIKE_PATTERN_LENGTH + do_test sqllimits1-2.9.2 { + sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH -1 + } [expr {$SQLITE_MAX_LIKE_PATTERN_LENGTH/2}] +} +if {$SQLITE_MAX_VARIABLE_NUMBER>=2} { + do_test sqllimits1-2.10.1 { + sqlite3_limit db SQLITE_LIMIT_VARIABLE_NUMBER \ + [expr {$::SQLITE_MAX_VARIABLE_NUMBER/2}] + } $SQLITE_MAX_VARIABLE_NUMBER + do_test sqllimits1-2.10.2 { + sqlite3_limit db SQLITE_LIMIT_VARIABLE_NUMBER -1 + } [expr {$SQLITE_MAX_VARIABLE_NUMBER/2}] +} + +# In a separate database connection, verify that the limits are unchanged. # +do_test sqllimits1-3.1 { + sqlite3_limit db2 SQLITE_LIMIT_LENGTH -1 +} $SQLITE_MAX_LENGTH +do_test sqllimits1-3.2 { + sqlite3_limit db2 SQLITE_LIMIT_SQL_LENGTH -1 +} $SQLITE_MAX_SQL_LENGTH +do_test sqllimits1-3.3 { + sqlite3_limit db2 SQLITE_LIMIT_COLUMN -1 +} $SQLITE_MAX_COLUMN +do_test sqllimits1-3.4 { + sqlite3_limit db2 SQLITE_LIMIT_EXPR_DEPTH -1 +} $SQLITE_MAX_EXPR_DEPTH +do_test sqllimits1-3.5 { + sqlite3_limit db2 SQLITE_LIMIT_COMPOUND_SELECT -1 +} $SQLITE_MAX_COMPOUND_SELECT +do_test sqllimits1-3.6 { + sqlite3_limit db2 SQLITE_LIMIT_VDBE_OP -1 +} $SQLITE_MAX_VDBE_OP +do_test sqllimits1-3.7 { + sqlite3_limit db2 SQLITE_LIMIT_FUNCTION_ARG -1 +} $SQLITE_MAX_FUNCTION_ARG +do_test sqllimits1-3.8 { + sqlite3_limit db2 SQLITE_LIMIT_ATTACHED -1 +} $SQLITE_MAX_ATTACHED +do_test sqllimits1-3.9 { + sqlite3_limit db2 SQLITE_LIMIT_LIKE_PATTERN_LENGTH -1 +} $SQLITE_MAX_LIKE_PATTERN_LENGTH +do_test sqllimits1-3.10 { + sqlite3_limit db2 SQLITE_LIMIT_VARIABLE_NUMBER -1 +} $SQLITE_MAX_VARIABLE_NUMBER +db2 close + +# Attempt to set all limits to the maximum 32-bit integer. Verify +# that the limit does not exceed the compile-time upper bound. +# +do_test sqllimits1-4.1.1 { + sqlite3_limit db SQLITE_LIMIT_LENGTH 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_LENGTH -1 +} $SQLITE_MAX_LENGTH +do_test sqllimits1-4.2.1 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH -1 +} $SQLITE_MAX_SQL_LENGTH +do_test sqllimits1-4.3.1 { + sqlite3_limit db SQLITE_LIMIT_COLUMN 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_COLUMN -1 +} $SQLITE_MAX_COLUMN +do_test sqllimits1-4.4.1 { + sqlite3_limit db SQLITE_LIMIT_EXPR_DEPTH 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_EXPR_DEPTH -1 +} $SQLITE_MAX_EXPR_DEPTH +do_test sqllimits1-4.5.1 { + sqlite3_limit db SQLITE_LIMIT_COMPOUND_SELECT 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_COMPOUND_SELECT -1 +} $SQLITE_MAX_COMPOUND_SELECT +do_test sqllimits1-4.6.1 { + sqlite3_limit db SQLITE_LIMIT_VDBE_OP 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_VDBE_OP -1 +} $SQLITE_MAX_VDBE_OP +do_test sqllimits1-4.7.1 { + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG -1 +} $SQLITE_MAX_FUNCTION_ARG +do_test sqllimits1-4.8.1 { + sqlite3_limit db SQLITE_LIMIT_ATTACHED 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_ATTACHED -1 +} $SQLITE_MAX_ATTACHED +do_test sqllimits1-4.9.1 { + sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH -1 +} $SQLITE_MAX_LIKE_PATTERN_LENGTH +do_test sqllimits1-4.10.1 { + sqlite3_limit db SQLITE_LIMIT_VARIABLE_NUMBER 0x7fffffff + sqlite3_limit db SQLITE_LIMIT_VARIABLE_NUMBER -1 +} $SQLITE_MAX_VARIABLE_NUMBER #-------------------------------------------------------------------- -# Test cases sqllimits-1.* test that the SQLITE_MAX_LENGTH limit +# Test cases sqllimits1-5.* test that the SQLITE_MAX_LENGTH limit # is enforced. # -do_test sqllimits-1.1.1 { +db close +sqlite3 db test.db +set LARGESIZE 99999 +set SQLITE_LIMIT_LENGTH 100000 +sqlite3_limit db SQLITE_LIMIT_LENGTH $SQLITE_LIMIT_LENGTH + +do_test sqllimits1-5.1.1 { catchsql { SELECT randomblob(2147483647) } } {1 {string or blob too big}} -do_test sqllimits-1.1.2 { +do_test sqllimits1-5.1.2 { catchsql { SELECT zeroblob(2147483647) } } {1 {string or blob too big}} -# Large, but allowable, blob-size. -# -set ::LARGESIZE [expr $SQLITE_MAX_LENGTH - 1] - -do_test sqllimits-1.2 { +do_test sqllimits1-5.2 { catchsql { SELECT LENGTH(randomblob($::LARGESIZE)) } -} "0 $::LARGESIZE" +} [list 0 $LARGESIZE] -do_test sqllimits-1.3 { +do_test sqllimits1-5.3 { catchsql { SELECT quote(randomblob($::LARGESIZE)) } } {1 {string or blob too big}} -do_test sqllimits-1.4 { +do_test sqllimits1-5.4 { catchsql { SELECT LENGTH(zeroblob($::LARGESIZE)) } -} "0 $::LARGESIZE" +} [list 0 $LARGESIZE] -do_test sqllimits-1.5 { +do_test sqllimits1-5.5 { catchsql { SELECT quote(zeroblob($::LARGESIZE)) } } {1 {string or blob too big}} -do_test sqllimits-1.6 { +do_test sqllimits1-5.6 { catchsql { SELECT zeroblob(-1) } -} {0 {}} +} {0 {{}}} -do_test sqllimits-1.9 { +do_test sqllimits1-5.9 { set ::str [string repeat A 65537] set ::rep [string repeat B 65537] catchsql { SELECT replace($::str, 'A', $::rep) } } {1 {string or blob too big}} +do_test sqllimits1-5.10 { + set ::str [string repeat %J 2100] + catchsql { SELECT strftime($::str, '2003-10-31') } +} {1 {string or blob too big}} + +do_test sqllimits1-5.11 { + set ::str1 [string repeat A [expr {$SQLITE_LIMIT_LENGTH - 10}]] + set ::str2 [string repeat B [expr {$SQLITE_LIMIT_LENGTH - 10}]] + catchsql { SELECT $::str1 || $::str2 } +} {1 {string or blob too big}} + +do_test sqllimits1-5.12 { + set ::str1 [string repeat ' [expr {$SQLITE_LIMIT_LENGTH - 10}]] + catchsql { SELECT quote($::str1) } +} {1 {string or blob too big}} + +do_test sqllimits1-5.13 { + set ::str1 [string repeat ' [expr {$SQLITE_LIMIT_LENGTH - 10}]] + catchsql { SELECT hex($::str1) } +} {1 {string or blob too big}} + +do_test sqllimits1-5.14.1 { + set ::STMT [sqlite3_prepare db "SELECT ?" -1 TAIL] + sqlite3_bind_zeroblob $::STMT 1 [expr {$SQLITE_LIMIT_LENGTH + 1}] +} {} +do_test sqllimits1-5.14.2 { + sqlite3_step $::STMT +} {SQLITE_ERROR} +do_test sqllimits1-5.14.3 { + sqlite3_reset $::STMT +} {SQLITE_TOOBIG} +do_test sqllimits1-5.14.4 { + set np1 [expr {$SQLITE_LIMIT_LENGTH + 1}] + set ::str1 [string repeat A $np1] + catch {sqlite3_bind_text $::STMT 1 $::str1 -1} res + set res +} {SQLITE_TOOBIG} +do_test sqllimits1-5.14.5 { + catch {sqlite3_bind_text16 $::STMT 1 $::str1 -1} res + set res +} {SQLITE_TOOBIG} +do_test sqllimits1-5.14.6 { + catch {sqlite3_bind_text $::STMT 1 $::str1 $np1} res + set res +} {SQLITE_TOOBIG} +do_test sqllimits1-5.14.7 { + catch {sqlite3_bind_text16 $::STMT 1 $::str1 $np1} res + set res +} {SQLITE_TOOBIG} +do_test sqllimits1-5.14.8 { + set n [expr {$np1-1}] + catch {sqlite3_bind_text $::STMT 1 $::str1 $n} res + set res +} {} +do_test sqllimits1-5.14.9 { + catch {sqlite3_bind_text16 $::STMT 1 $::str1 $n} res + set res +} {} +sqlite3_finalize $::STMT + +do_test sqllimits1-5.15 { + execsql { + CREATE TABLE t4(x); + INSERT INTO t4 VALUES(1); + INSERT INTO t4 VALUES(2); + INSERT INTO t4 SELECT 2+x FROM t4; + } + catchsql { + SELECT group_concat(hex(randomblob(20000))) FROM t4; + } +} {1 {string or blob too big}} +db eval {DROP TABLE t4} + +sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH 0x7fffffff +set strvalue [string repeat A $::SQLITE_LIMIT_LENGTH] +do_test sqllimits1-5.16 { + catchsql "SELECT '$strvalue'" +} [list 0 $strvalue] +do_test sqllimits1-5.17.1 { + catchsql "SELECT 'A$strvalue'" +} [list 1 {string or blob too big}] +do_test sqllimits1-5.17.2 { + sqlite3_limit db SQLITE_LIMIT_LENGTH 0x7fffffff + catchsql {SELECT 'A' || $::strvalue} +} [list 0 A$strvalue] +do_test sqllimits1-5.17.3 { + sqlite3_limit db SQLITE_LIMIT_LENGTH $SQLITE_LIMIT_LENGTH + catchsql {SELECT 'A' || $::strvalue} +} [list 1 {string or blob too big}] +set blobvalue [string repeat 41 $::SQLITE_LIMIT_LENGTH] +do_test sqllimits1-5.18 { + catchsql "SELECT x'$blobvalue'" +} [list 0 $strvalue] +do_test sqllimits1-5.19 { + catchsql "SELECT '41$blobvalue'" +} [list 1 {string or blob too big}] +unset blobvalue + +ifcapable datetime { + set strvalue [string repeat D [expr {$SQLITE_LIMIT_LENGTH-12}]] + do_test sqllimits1-5.20 { + catchsql {SELECT strftime('%Y ' || $::strvalue, '2008-01-02')} + } [list 0 [list "2008 $strvalue"]] + do_test sqllimits1-5.21 { + catchsql {SELECT strftime('%Y-%m-%d ' || $::strvalue, '2008-01-02')} + } {1 {string or blob too big}} +} +unset strvalue + #-------------------------------------------------------------------- -# Test cases sqllimits-2.* test that the SQLITE_MAX_SQL_LENGTH limit +# Test cases sqllimits1-6.* test that the SQLITE_MAX_SQL_LENGTH limit # is enforced. # -do_test sqllimits-2.1 { - set sql "SELECT 1 WHERE 1==1" - set N [expr {$::SQLITE_MAX_SQL_LENGTH / [string length " AND 1==1"]}] - append sql [string repeat " AND 1==1" $N] +do_test sqllimits1-6.1 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH 50000 + set sql "SELECT 1 WHERE 1==1" + set tail " /* A comment to take up space in order to make the string\ + longer without increasing the expression depth */\ + AND 1 == 1" + set N [expr {(50000 / [string length $tail])+1}] + append sql [string repeat $tail $N] catchsql $sql -} {1 {String or BLOB exceeded size limit}} +} {1 {string or blob too big}} +do_test sqllimits1-6.3 { + sqlite3_limit db SQLITE_LIMIT_SQL_LENGTH 50000 + set sql "SELECT 1 WHERE 1==1" + set tail " /* A comment to take up space in order to make the string\ + longer without increasing the expression depth */\ + AND 1 == 1" + set N [expr {(50000 / [string length $tail])+1}] + append sql [string repeat $tail $N] + set nbytes [string length $sql] + append sql { AND 0} + set rc [catch {sqlite3_prepare db $sql $nbytes TAIL} STMT] + lappend rc $STMT +} {1 {(18) statement too long}} +do_test sqllimits1-6.4 { + sqlite3_errmsg db +} {statement too long} #-------------------------------------------------------------------- -# Test cases sqllimits-3.* test that the limit set using the +# Test cases sqllimits1-7.* test that the limit set using the # max_page_count pragma. # -do_test sqllimits-3.1 { +do_test sqllimits1-7.1 { execsql { PRAGMA max_page_count = 1000; } } {1000} -do_test sqllimits-3.2 { +do_test sqllimits1-7.2 { execsql { CREATE TABLE trig (a INTEGER, b INTEGER); } # Set up a tree of triggers to fire when a row is inserted @@ -145,13 +478,13 @@ } } {} -do_test sqllimits1-3.3 { +do_test sqllimits1-7.3 { execsql { INSERT INTO trig VALUES (1,1); } } {} -do_test sqllimits1-3.4 { +do_test sqllimits1-7.4 { execsql { SELECT COUNT(*) FROM trig; } @@ -160,52 +493,102 @@ # This tries to insert so many rows it fills up the database (limited # to 1MB, so not that noteworthy an achievement). # -do_test sqllimits1-3.5 { +do_test sqllimits1-7.5 { catchsql { INSERT INTO trig VALUES (1,10); } } {1 {database or disk is full}} -do_test sqllimits1-3.6 { +do_test sqllimits1-7.6 { catchsql { SELECT COUNT(*) FROM trig; } } {0 7} +# Now check the response of the library to opening a file larger than +# the current max_page_count value. The response is to change the +# internal max_page_count value to match the actual size of the file. +if {[db eval {PRAGMA auto_vacuum}]} { + set fsize 1700 +} else { + set fsize 1691 +} +do_test sqllimits1-7.7.1 { + execsql { + PRAGMA max_page_count = 1000000; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a||b||c, b||c||a, c||a||b FROM abc; + INSERT INTO abc SELECT a, b, c FROM abc; + INSERT INTO abc SELECT b, a, c FROM abc; + INSERT INTO abc SELECT c, b, a FROM abc; + } + expr [file size test.db] / 1024 +} $fsize +do_test sqllimits1-7.7.2 { + db close + sqlite3 db test.db + execsql { + PRAGMA max_page_count = 1000; + } + execsql { + SELECT count(*) FROM sqlite_master; + } +} {6} +do_test sqllimits1-7.7.3 { + execsql { + PRAGMA max_page_count; + } +} $fsize +do_test sqllimits1-7.7.4 { + execsql { + DROP TABLE abc; + } +} {} + #-------------------------------------------------------------------- -# Test cases sqllimits1-4.* test the SQLITE_MAX_COLUMN limit. +# Test cases sqllimits1-8.* test the SQLITE_MAX_COLUMN limit. # -do_test sqllimits-1.4.1 { +set SQLITE_LIMIT_COLUMN 200 +sqlite3_limit db SQLITE_LIMIT_COLUMN $SQLITE_LIMIT_COLUMN +do_test sqllimits1-8.1 { # Columns in a table. set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "c$i" } catchsql "CREATE TABLE t([join $cols ,])" } {1 {too many columns on t}} -do_test sqllimits-1.4.2 { +do_test sqllimits1-8.2 { # Columns in the result-set of a SELECT. set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "sql AS sql$i" } catchsql "SELECT [join $cols ,] FROM sqlite_master" } {1 {too many columns in result set}} -do_test sqllimits-1.4.3 { +do_test sqllimits1-8.3 { # Columns in the result-set of a sub-SELECT. set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "sql AS sql$i" } catchsql "SELECT sql4 FROM (SELECT [join $cols ,] FROM sqlite_master)" } {1 {too many columns in result set}} -do_test sqllimits-1.4.4 { +do_test sqllimits1-8.4 { # Columns in an index. set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols c } set sql1 "CREATE TABLE t1(c);" @@ -213,54 +596,74 @@ catchsql "$sql1 ; $sql2" } {1 {too many columns in index}} -do_test sqllimits-1.4.5 { +do_test sqllimits1-8.5 { # Columns in a GROUP BY clause. catchsql "SELECT * FROM t1 GROUP BY [join $cols ,]" } {1 {too many terms in GROUP BY clause}} -do_test sqllimits-1.4.6 { +do_test sqllimits1-8.6 { # Columns in an ORDER BY clause. catchsql "SELECT * FROM t1 ORDER BY [join $cols ,]" } {1 {too many terms in ORDER BY clause}} -do_test sqllimits-1.4.7 { +do_test sqllimits1-8.7 { # Assignments in an UPDATE statement. set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "c = 1" } catchsql "UPDATE t1 SET [join $cols ,];" } {1 {too many columns in set list}} -do_test sqllimits-1.4.8 { +do_test sqllimits1-8.8 { # Columns in a view definition: set cols [list] - for {set i 0} {$i <= $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "c$i" } catchsql "CREATE VIEW v1 AS SELECT [join $cols ,] FROM t1;" } {1 {too many columns in result set}} -do_test sqllimits-1.4.9 { +do_test sqllimits1-8.9 { # Columns in a view definition (testing * expansion): set cols [list] - for {set i 0} {$i < $SQLITE_MAX_COLUMN} {incr i} { + for {set i 0} {$i < $SQLITE_LIMIT_COLUMN} {incr i} { lappend cols "c$i" } catchsql "CREATE TABLE t2([join $cols ,])" catchsql "CREATE VIEW v1 AS SELECT *, c1 AS o FROM t2;" } {1 {too many columns in result set}} +do_test sqllimits1-8.10 { + # ORDER BY columns + set cols [list] + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { + lappend cols c + } + set sql "SELECT c FROM t1 ORDER BY [join $cols ,]" + catchsql $sql +} {1 {too many terms in ORDER BY clause}} +do_test sqllimits1-8.11 { + # ORDER BY columns + set cols [list] + for {set i 0} {$i <= $SQLITE_LIMIT_COLUMN} {incr i} { + lappend cols [expr {$i%3 + 1}] + } + set sql "SELECT c, c+1, c+2 FROM t1 UNION SELECT c-1, c-2, c-3 FROM t1" + append sql " ORDER BY [join $cols ,]" + catchsql $sql +} {1 {too many terms in ORDER BY clause}} + #-------------------------------------------------------------------- -# These tests - sqllimits-5.* - test that the SQLITE_MAX_EXPR_DEPTH +# These tests - sqllimits1-9.* - test that the SQLITE_LIMIT_EXPR_DEPTH # limit is enforced. The limit refers to the number of terms in # the expression. # -if {$::SQLITE_MAX_EXPR_DEPTH != 1000} { +if {$SQLITE_MAX_EXPR_DEPTH==0} { puts -nonewline stderr "WARNING: Compile with -DSQLITE_MAX_EXPR_DEPTH to run " - puts stderr "tests sqllimits-1.5.X" + puts stderr "tests sqllimits1-9.X" } else { - do_test sqllimits-1.5.1 { + do_test sqllimits1-9.1 { set max $::SQLITE_MAX_EXPR_DEPTH set expr "(1 [string repeat {AND 1 } $max])" catchsql [subst { @@ -270,7 +673,7 @@ # Attempting to beat the expression depth limit using nested SELECT # queries causes a parser stack overflow. - do_test sqllimits-1.5.2 { + do_test sqllimits1-9.2 { set max $::SQLITE_MAX_EXPR_DEPTH set expr "SELECT 1" for {set i 0} {$i <= $max} {incr i} { @@ -279,8 +682,8 @@ catchsql [subst { $expr }] } "1 {parser stack overflow}" - - do_test sqllimits-1.5.3 { +if 0 { + do_test sqllimits1-9.3 { execsql { PRAGMA max_page_count = 1000000; -- 1 GB CREATE TABLE v0(a); @@ -296,81 +699,86 @@ } } {} - do_test sqllimits-1.5.4 { + do_test sqllimits1-9.4 { catchsql { SELECT a FROM v199 } } "1 {Expression tree is too large (maximum depth $::SQLITE_MAX_EXPR_DEPTH)}" } +} #-------------------------------------------------------------------- -# Test cases sqllimits-6.* test that the SQLITE_MAX_VDBE_OP +# Test cases sqllimits1-10.* test that the SQLITE_MAX_VDBE_OP # limit works as expected. The limit refers to the number of opcodes # in a single VDBE program. # # TODO #-------------------------------------------------------------------- -# Test the SQLITE_MAX_FUNCTION_ARG limit works. Test case names -# match the pattern "sqllimits-7.*". +# Test the SQLITE_LIMIT_FUNCTION_ARG limit works. Test case names +# match the pattern "sqllimits1-11.*". # -do_test sqllimits-1.7.1 { - set max $::SQLITE_MAX_FUNCTION_ARG - set vals [list] - for {set i 0} {$i < $SQLITE_MAX_FUNCTION_ARG} {incr i} { - lappend vals $i - } - catchsql "SELECT max([join $vals ,])" -} "0 [expr {$::SQLITE_MAX_FUNCTION_ARG - 1}]" -do_test sqllimits-1.7.2 { - set max $::SQLITE_MAX_FUNCTION_ARG - set vals [list] - for {set i 0} {$i <= $SQLITE_MAX_FUNCTION_ARG} {incr i} { - lappend vals $i - } - catchsql "SELECT max([join $vals ,])" -} {1 {too many arguments on function max}} +for {set max 5} {$max<=$SQLITE_MAX_FUNCTION_ARG} {incr max} { + do_test sqllimits1-11.$max.1 { + set vals [list] + sqlite3_limit db SQLITE_LIMIT_FUNCTION_ARG $::max + for {set i 0} {$i < $::max} {incr i} { + lappend vals $i + } + catchsql "SELECT max([join $vals ,])" + } "0 [expr {$::max - 1}]" + do_test sqllimits1-11.$max.2 { + set vals [list] + for {set i 0} {$i <= $::max} {incr i} { + lappend vals $i + } + catchsql "SELECT max([join $vals ,])" + } {1 {too many arguments on function max}} -# Test that it is SQLite, and not the implementation of the -# user function that is throwing the error. -proc myfunc {args} {error "I don't like to be called!"} -do_test sqllimits-1.7.2 { - db function myfunc myfunc - set max $::SQLITE_MAX_FUNCTION_ARG - set vals [list] - for {set i 0} {$i <= $SQLITE_MAX_FUNCTION_ARG} {incr i} { - lappend vals $i - } - catchsql "SELECT myfunc([join $vals ,])" -} {1 {too many arguments on function myfunc}} + # Test that it is SQLite, and not the implementation of the + # user function that is throwing the error. + proc myfunc {args} {error "I don't like to be called!"} + do_test sqllimits1-11.$max.2 { + db function myfunc myfunc + set vals [list] + for {set i 0} {$i <= $::max} {incr i} { + lappend vals $i + } + catchsql "SELECT myfunc([join $vals ,])" + } {1 {too many arguments on function myfunc}} +} #-------------------------------------------------------------------- -# Test cases sqllimits-8.*: Test the SQLITE_MAX_ATTACHED limit. +# Test cases sqllimits1-12.*: Test the SQLITE_MAX_ATTACHED limit. # -# TODO -do_test sqllimits-1.8.1 { - set max $::SQLITE_MAX_ATTACHED - for {set i 0} {$i < ($max)} {incr i} { - execsql "ATTACH 'test${i}.db' AS aux${i}" - } - catchsql "ATTACH 'test${i}.db' AS aux${i}" -} "1 {too many attached databases - max $::SQLITE_MAX_ATTACHED}" -do_test sqllimits-1.8.2 { - set max $::SQLITE_MAX_ATTACHED - for {set i 0} {$i < ($max)} {incr i} { - execsql "DETACH aux${i}" - } -} {} +ifcapable attach { + do_test sqllimits1-12.1 { + set max $::SQLITE_MAX_ATTACHED + for {set i 0} {$i < ($max)} {incr i} { + file delete -force test${i}.db test${i}.db-journal + } + for {set i 0} {$i < ($max)} {incr i} { + execsql "ATTACH 'test${i}.db' AS aux${i}" + } + catchsql "ATTACH 'test${i}.db' AS aux${i}" + } "1 {too many attached databases - max $::SQLITE_MAX_ATTACHED}" + do_test sqllimits1-12.2 { + set max $::SQLITE_MAX_ATTACHED + for {set i 0} {$i < ($max)} {incr i} { + execsql "DETACH aux${i}" + } + } {} +} #-------------------------------------------------------------------- -# Test cases sqllimits-9.*: Check that the SQLITE_MAX_VARIABLE_NUMBER +# Test cases sqllimits1-13.*: Check that the SQLITE_MAX_VARIABLE_NUMBER # limit works. # -do_test sqllimits-1.9.1 { +do_test sqllimits1-13.1 { set max $::SQLITE_MAX_VARIABLE_NUMBER catchsql "SELECT ?[expr {$max+1}] FROM t1" } "1 {variable number must be between ?1 and ?$::SQLITE_MAX_VARIABLE_NUMBER}" -do_test sqllimits-1.9.2 { +do_test sqllimits1-13.2 { set max $::SQLITE_MAX_VARIABLE_NUMBER set vals [list] for {set i 0} {$i < ($max+3)} {incr i} { @@ -381,35 +789,7 @@ #-------------------------------------------------------------------- -# sqllimits-10.*: Test the SQLITE_MAX_PAGE_SIZE define is enforced. -# This is probably tested elsewhere too (pagerX.test). Attempts -# to raise the page size above this limit are silently ignored. -# -do_test sqllimits-1.10.1 { - db close - file delete -force test.db test.db-journal - sqlite3 db test.db - set max $::SQLITE_MAX_PAGE_SIZE - catchsql "PRAGMA page_size = [expr {$max*2}]" -} {0 {}} -do_test sqllimits-1.10.2 { - catchsql "PRAGMA page_size" -} {0 1024} -do_test sqllimits-1.10.3 { - set max $::SQLITE_MAX_PAGE_SIZE - catchsql "PRAGMA page_size = $max" -} {0 {}} -do_test sqllimits-1.10.4 { - execsql "pragma page_size" -} $::SQLITE_MAX_PAGE_SIZE -do_test sqllimits-1.10.5 { - set max $::SQLITE_MAX_PAGE_SIZE - execsql "pragma page_size = [expr {$max - 5}]" - execsql "pragma page_size" -} $::SQLITE_MAX_PAGE_SIZE - -#-------------------------------------------------------------------- -# Test cases sqllimits-11.* verify that the +# Test cases sqllimits1-15.* verify that the # SQLITE_MAX_LIKE_PATTERN_LENGTH limit is enforced. This limit only # applies to the built-in LIKE operator, supplying an external # implementation by overriding the like() scalar function bypasses @@ -419,16 +799,18 @@ # the left-hand-side of the LIKE operator (the string being tested # against the pattern). # -do_test sqllimits-1.11.1 { - set max $::SQLITE_MAX_LIKE_PATTERN_LENGTH +set SQLITE_LIMIT_LIKE_PATTERN 1000 +sqlite3_limit db SQLITE_LIMIT_LIKE_PATTERN_LENGTH $SQLITE_LIMIT_LIKE_PATTERN +do_test sqllimits1-15.1 { + set max $::SQLITE_LIMIT_LIKE_PATTERN set ::pattern [string repeat "A%" [expr $max/2]] set ::string [string repeat "A" [expr {$max*2}]] execsql { SELECT $::string LIKE $::pattern; } } {1} -do_test sqllimits-1.11.2 { - set max $::SQLITE_MAX_LIKE_PATTERN_LENGTH +do_test sqllimits1-15.2 { + set max $::SQLITE_LIMIT_LIKE_PATTERN set ::pattern [string repeat "A%" [expr {($max/2) + 1}]] set ::string [string repeat "A" [expr {$max*2}]] catchsql { @@ -440,15 +822,19 @@ # This test case doesn't really belong with the other limits tests. # It is in this file because it is taxing to run, like the limits tests. # -do_test sqllimits-1.12.1 { +do_test sqllimits1-16.1 { set ::N [expr int(([expr pow(2,32)]/50) + 1)] expr (($::N*50) & 0xffffffff)<55 } {1} -do_test sqllimits-1.12.2 { +do_test sqllimits1-16.2 { set ::format "[string repeat A 60][string repeat "%J" $::N]" catchsql { SELECT strftime($::format, 1); } } {1 {string or blob too big}} + +foreach {key value} [array get saved] { + catch {set $key $value} +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/subquery.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/subquery.test --- sqlite3-3.4.2/test/subquery.test 2007-03-29 19:39:36.000000000 +0100 +++ sqlite3-3.6.16/test/subquery.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is testing correlated subqueries # -# $Id: subquery.test,v 1.14 2006/01/17 09:35:02 danielk1977 Exp $ +# $Id: subquery.test,v 1.17 2009/01/09 01:12:28 drh Exp $ # set testdir [file dirname $argv0] @@ -127,7 +127,7 @@ INSERT INTO t5 VALUES(25, '2003-3'); INSERT INTO t5 VALUES(5, '2003-4'); - SELECT "a.period", vsum + SELECT period, vsum FROM (SELECT a.period, (select sum(val) from t5 where period between a.period and '2002-4') vsum @@ -137,7 +137,7 @@ } {2002-2 30 2002-3 25 2002-4 15} do_test subquery-1.10.5 { execsql { - SELECT "a.period", vsum from + SELECT period, vsum from (select a.period, (select sum(val) from t5 where period between a.period and '2002-4') vsum FROM t5 a where a.period between '2002-1' and '2002-4') @@ -269,6 +269,11 @@ SELECT * FROM v1 WHERE EXISTS(SELECT * FROM t2 WHERE p=v1.b); } } {2} + do_test subquery-3.1.1 { + execsql { + SELECT * FROM v1 WHERE EXISTS(SELECT 1); + } + } {2} } else { catchsql { DROP TABLE t1; } catchsql { DROP TABLE t2; } @@ -417,8 +422,78 @@ set callcnt } {1} +if 0 { ############# disable until we get #2652 fixed +# Ticket #2652. Allow aggregate functions of outer queries inside +# a non-aggregate subquery. +# +do_test subquery-7.1 { + execsql { + CREATE TABLE t7(c7); + INSERT INTO t7 VALUES(1); + INSERT INTO t7 VALUES(2); + INSERT INTO t7 VALUES(3); + CREATE TABLE t8(c8); + INSERT INTO t8 VALUES(100); + INSERT INTO t8 VALUES(200); + INSERT INTO t8 VALUES(300); + CREATE TABLE t9(c9); + INSERT INTO t9 VALUES(10000); + INSERT INTO t9 VALUES(20000); + INSERT INTO t9 VALUES(30000); - - + SELECT (SELECT c7+c8 FROM t7) FROM t8; + } +} {101 201 301} +do_test subquery-7.2 { + execsql { + SELECT (SELECT max(c7)+c8 FROM t7) FROM t8; + } +} {103 203 303} +do_test subquery-7.3 { + execsql { + SELECT (SELECT c7+max(c8) FROM t8) FROM t7 + } +} {301} +do_test subquery-7.4 { + execsql { + SELECT (SELECT max(c7)+max(c8) FROM t8) FROM t7 + } +} {303} +do_test subquery-7.5 { + execsql { + SELECT (SELECT c8 FROM t8 WHERE rowid=max(c7)) FROM t7 + } +} {300} +do_test subquery-7.6 { + execsql { + SELECT (SELECT (SELECT max(c7+c8+c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.7 { + execsql { + SELECT (SELECT (SELECT c7+max(c8+c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.8 { + execsql { + SELECT (SELECT (SELECT max(c7)+c8+c9 FROM t9) FROM t8) FROM t7 + } +} {10103} +do_test subquery-7.9 { + execsql { + SELECT (SELECT (SELECT c7+max(c8)+c9 FROM t9) FROM t8) FROM t7 + } +} {10301 10302 10303} +do_test subquery-7.10 { + execsql { + SELECT (SELECT (SELECT c7+c8+max(c9) FROM t9) FROM t8) FROM t7 + } +} {30101 30102 30103} +do_test subquery-7.11 { + execsql { + SELECT (SELECT (SELECT max(c7)+max(c8)+max(c9) FROM t9) FROM t8) FROM t7 + } +} {30303} +} ;############# Disabled finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/subselect.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/subselect.test --- sqlite3-3.4.2/test/subselect.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/subselect.test 2009-06-05 18:03:40.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing SELECT statements that are part of # expressions. # -# $Id: subselect.test,v 1.14 2007/04/12 03:54:39 drh Exp $ +# $Id: subselect.test,v 1.16 2008/08/04 03:51:24 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -53,9 +53,17 @@ do_test subselect-1.3c { execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=6)} } {6} -do_test subselect-1.3c { +do_test subselect-1.3d { execsql {SELECT b from t1 where a = (SELECT a FROM t1 WHERE b=8)} } {} +ifcapable compound { + do_test subselect-1.3e { + execsql { + SELECT b FROM t1 + WHERE a = (SELECT a FROM t1 UNION SELECT b FROM t1 ORDER BY 1); + } + } {2} +} # What if the subselect doesn't return any value. We should get # NULL as the result. Check it out. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/substr.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/substr.test --- sqlite3-3.4.2/test/substr.test 2007-05-15 02:13:47.000000000 +0100 +++ sqlite3-3.6.16/test/substr.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,11 +11,16 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the built-in SUBSTR() functions. # -# $Id: substr.test,v 1.1 2007/05/15 01:13:47 drh Exp $ +# $Id: substr.test,v 1.7 2009/02/03 13:10:54 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !tclvar { + finish_test + return +} + # Create a table to work with. # execsql { @@ -61,12 +66,38 @@ substr-test 1.2 abcdefg 2 1 b substr-test 1.3 abcdefg 1 2 ab substr-test 1.4 abcdefg 1 100 abcdefg -substr-test 1.5 abcdefg 0 1 a +substr-test 1.5 abcdefg 0 2 a substr-test 1.6 abcdefg -1 1 g substr-test 1.7 abcdefg -1 10 g substr-test 1.8 abcdefg -5 3 cde substr-test 1.9 abcdefg -7 3 abc substr-test 1.10 abcdefg -100 98 abcde +substr-test 1.11 abcdefg 5 -1 d +substr-test 1.12 abcdefg 5 -4 abcd +substr-test 1.13 abcdefg 5 -5 abcd +substr-test 1.14 abcdefg -5 -1 b +substr-test 1.15 abcdefg -5 -2 ab +substr-test 1.16 abcdefg -5 -3 ab +substr-test 1.17 abcdefg 100 200 {} +substr-test 1.18 abcdefg 200 100 {} + +# Make sure NULL is returned if any parameter is NULL +# +do_test substr-1.90 { + db eval {SELECT ifnull(substr(NULL,1,1),'nil')} +} nil +do_test substr-1.91 { + db eval {SELECT ifnull(substr(NULL,1),'nil')} +} nil +do_test substr-1.92 { + db eval {SELECT ifnull(substr('abcdefg',NULL,1),'nil')} +} nil +do_test substr-1.93 { + db eval {SELECT ifnull(substr('abcdefg',NULL),'nil')} +} nil +do_test substr-1.94 { + db eval {SELECT ifnull(substr('abcdefg',1,NULL),'nil')} +} nil # Make sure everything works with long unicode characters # @@ -75,6 +106,7 @@ substr-test 2.3 \u1234\u2345\u3456 1 2 \u1234\u2345 substr-test 2.4 \u1234\u2345\u3456 -1 1 \u3456 substr-test 2.5 a\u1234b\u2345c\u3456c -5 3 b\u2345c +substr-test 2.6 a\u1234b\u2345c\u3456c -2 -3 b\u2345c # Basic functionality for BLOBs # @@ -82,12 +114,14 @@ subblob-test 3.2 61626364656667 2 1 62 subblob-test 3.3 61626364656667 1 2 6162 subblob-test 3.4 61626364656667 1 100 61626364656667 -subblob-test 3.5 61626364656667 0 1 61 +subblob-test 3.5 61626364656667 0 2 61 subblob-test 3.6 61626364656667 -1 1 67 subblob-test 3.7 61626364656667 -1 10 67 subblob-test 3.8 61626364656667 -5 3 636465 subblob-test 3.9 61626364656667 -7 3 616263 subblob-test 3.10 61626364656667 -100 98 6162636465 +subblob-test 3.11 61626364656667 100 200 {} +subblob-test 3.12 61626364656667 200 100 {} # If these blobs were strings, then they would contain multi-byte # characters. But since they are blobs, the substr indices refer @@ -100,4 +134,26 @@ subblob-test 4.5 61E188B462E28D8563E3919663 -5 4 63E39196 subblob-test 4.6 61E188B462E28D8563E3919663 -100 98 61E188B462E28D8563E391 +# Two-argument SUBSTR +# +proc substr-2-test {id string idx result} { + db eval { + DELETE FROM t1; + INSERT INTO t1(t) VALUES($string) + } + do_test substr-$id.1 [subst { + execsql { + SELECT substr(t, $idx) FROM t1 + } + }] [list $result] + set qstr '[string map {' ''} $string]' + do_test substr-$id.2 [subst { + execsql { + SELECT substr($qstr, $idx) + } + }] [list $result] +} +substr-2-test 5.1 abcdefghijklmnop 5 efghijklmnop +substr-2-test 5.2 abcdef -5 bcdef + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/sync.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/sync.test --- sqlite3-3.4.2/test/sync.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/sync.test 2009-06-05 18:03:40.000000000 +0100 @@ -13,20 +13,21 @@ # This file implements tests to verify that fsync is disabled when # pragma synchronous=off even for multi-database commits. # -# $Id: sync.test,v 1.5 2006/02/11 01:25:51 drh Exp $ +# $Id: sync.test,v 1.6 2007/10/09 08:29:33 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # # These tests are only applicable on unix when pager pragma are -# enabled. +# enabled. Also, since every test uses an ATTACHed database, they +# are only run when ATTACH is enabled. # if {$::tcl_platform(platform)!="unix"} { finish_test return } -ifcapable !pager_pragmas { +ifcapable !pager_pragmas||!attach { finish_test return } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tableapi.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tableapi.test --- sqlite3-3.4.2/test/tableapi.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/tableapi.test 2009-06-05 18:03:43.000000000 +0100 @@ -12,11 +12,15 @@ # focus of this file is testing the sqlite_exec_printf() and # sqlite_get_table_printf() APIs. # -# $Id: tableapi.test,v 1.12 2007/01/05 00:14:28 drh Exp $ +# $Id: tableapi.test,v 1.20 2008/07/31 02:05:05 shane Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable memdebug { + source $testdir/malloc_common.tcl +} + do_test tableapi-1.0 { set ::dbx [sqlite3_open test.db] catch {sqlite_exec_printf $::dbx {DROP TABLE xyz} {}} @@ -31,6 +35,8 @@ sqlite3_exec_printf $::dbx {SELECT * FROM xyz} {} } {0 {a b 1 {Hi Y'all}}} +ifcapable gettable { + do_test tableapi-2.1 { sqlite3_get_table_printf $::dbx { BEGIN TRANSACTION; @@ -61,6 +67,16 @@ SELECT * FROM xyz WHERE a>47 ORDER BY a } {} } {0 3 2 a b 48 (48) 49 (49) 50 (50)} +do_test tableapi-2.3.3 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>47 ORDER BY a; invalid + } {} +} {1 {near "invalid": syntax error}} +do_test tableapi-2.3.4 { + sqlite3_get_table_printf $::dbx { + SELECT * FROM xyz WHERE a>47 ORDER BY a + } {} 8 +} {0 a b 48 (48) 49 (49) 50 (50)} do_test tableapi-2.4 { set manyquote '''''''' append manyquote $manyquote @@ -99,6 +115,9 @@ } {} } {0 0 0} +}; # end ifcapable gettable + + # Repeat all tests with the empty_result_callbacks pragma turned on # do_test tableapi-3.1 { @@ -206,9 +225,25 @@ sqlite3_get_table_printf $::dbx {SELECT * FROM t2} {} } {0 2 100 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 x32 x33 x34 x35 x36 x37 x38 x39 x40 x41 x42 x43 x44 x45 x46 x47 x48 x49 x50 x51 x52 x53 x54 x55 x56 x57 x58 x59 x60 x61 x62 x63 x64 x65 x66 x67 x68 x69 x70 x71 x72 x73 x74 x75 x76 x77 x78 x79 x80 x81 x82 x83 x84 x85 x86 x87 x88 x89 x90 x91 x92 x93 x94 x95 x96 x97 x98 x99 x100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100} -do_test tableapi-6.1 { - sqlite3_get_table_printf $::dbx {PRAGMA user_version} {} -} {0 1 1 user_version 0} +ifcapable schema_pragmas { + do_test tableapi-6.1 { + sqlite3_get_table_printf $::dbx {PRAGMA user_version} {} + } {0 1 1 user_version 0} +} + +ifcapable memdebug { + do_malloc_test tableapi-7 -sqlprep { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(3,4); + INSERT INTO t1 SELECT a+4, b+4 FROM t1; + INSERT INTO t1 SELECT a+8, b+8 FROM t1; + } -tclbody { + set r [sqlite3_get_table_printf db {SELECT rowid, a, b FROM t1} {}] + if {[llength $r]<26} {error "out of memory"} + } +} do_test tableapi-99.0 { sqlite3_close $::dbx diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/table.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/table.test --- sqlite3-3.4.2/test/table.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/table.test 2009-06-25 12:35:52.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the CREATE TABLE statement. # -# $Id: table.test,v 1.47 2007/05/02 17:54:56 drh Exp $ +# $Id: table.test,v 1.53 2009/06/05 17:09:12 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -118,9 +118,9 @@ # Verify that we cannot make a table with the same name as an index # do_test table-2.2a { - execsql {CREATE TABLE test2(one text); CREATE INDEX test3 ON test2(one)} - set v [catch {execsql {CREATE TABLE test3(two text)}} msg] - lappend v $msg + execsql {CREATE TABLE test2(one text)} + execsql {CREATE INDEX test3 ON test2(one)} + catchsql {CREATE TABLE test3(two text)} } {1 {there is already an index named test3}} do_test table-2.2b { db close @@ -314,6 +314,14 @@ SELECT * FROM weird; } } {desc a asc b key 9 14_vac 0 fuzzy_dog_12 xyz begin hi end y'all} +do_test table-7.3 { + execsql { + CREATE TABLE savepoint(release); + INSERT INTO savepoint(release) VALUES(10); + UPDATE savepoint SET release = 5; + SELECT release FROM savepoint; + } +} {5} # Try out the CREATE TABLE AS syntax # @@ -328,13 +336,13 @@ SELECT sql FROM sqlite_master WHERE name='t2'; } } {{CREATE TABLE t2( - "desc" text, - "asc" text, - "key" int, - "14_vac" boolean, - fuzzy_dog_12 varchar(10), - "begin" blob, - "end" clob + "desc" TEXT, + "asc" TEXT, + "key" INT, + "14_vac" NUM, + fuzzy_dog_12 TEXT, + "begin", + "end" TEXT )}} do_test table-8.2 { execsql { @@ -391,6 +399,37 @@ } } {1 {no such table: no_such_table}} +do_test table-8.9 { + execsql { + CREATE TABLE t10("col.1" [char.3]); + CREATE TABLE t11 AS SELECT * FROM t10; + SELECT sql FROM sqlite_master WHERE name = 't11'; + } +} {{CREATE TABLE t11("col.1" TEXT)}} +do_test table-8.10 { + execsql { + CREATE TABLE t12( + a INTEGER, + b VARCHAR(10), + c VARCHAR(1,10), + d VARCHAR(+1,-10), + e VARCHAR (+1,-10), + f "VARCHAR (+1,-10, 5)", + g BIG INTEGER + ); + CREATE TABLE t13 AS SELECT * FROM t12; + SELECT sql FROM sqlite_master WHERE name = 't13'; + } +} {{CREATE TABLE t13( + a INT, + b TEXT, + c TEXT, + d TEXT, + e TEXT, + f TEXT, + g INT +)}} + # Make sure we cannot have duplicate column names within a table. # do_test table-9.1 { @@ -545,7 +584,7 @@ execsql { SELECT sql FROM sqlite_master WHERE tbl_name = 't8' } -} {{CREATE TABLE t8(b number(5,10),h,i integer,j BLOB)}} +} {{CREATE TABLE t8(b NUM,h,i INT,j)}} #-------------------------------------------------------------------- # Test cases table-13.* @@ -612,7 +651,7 @@ } {0 {}} # Try to drop a table from within a callback: -do_test table-14.3 { +do_test table-14.2 { set rc [ catch { db eval {SELECT * FROM tablet8 LIMIT 1} {} { @@ -623,33 +662,35 @@ set result [list $rc $msg] } {1 {database table is locked}} -# Now attach a database and ensure that a table can be created in the -# attached database whilst in a callback from a query on the main database. -do_test table-14.4 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - attach 'test2.db' as aux; - } - db eval {SELECT * FROM tablet8 LIMIT 1} {} { - db eval {CREATE TABLE aux.t1(a, b, c)} - } -} {} - -# On the other hand, it should be impossible to drop a table when any VMs -# are active. This is because VerifyCookie instructions may have already -# been executed, and btree root-pages may not move after this (which a -# delete table might do). -do_test table-14.4 { - set rc [ - catch { - db eval {SELECT * FROM tablet8 LIMIT 1} {} { - db eval {DROP TABLE aux.t1;} - } - } msg - ] - set result [list $rc $msg] -} {1 {database table is locked}} +ifcapable attach { + # Now attach a database and ensure that a table can be created in the + # attached database whilst in a callback from a query on the main database. + do_test table-14.3 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' as aux; + } + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {CREATE TABLE aux.t1(a, b, c)} + } + } {} + + # On the other hand, it should be impossible to drop a table when any VMs + # are active. This is because VerifyCookie instructions may have already + # been executed, and btree root-pages may not move after this (which a + # delete table might do). + do_test table-14.4 { + set rc [ + catch { + db eval {SELECT * FROM tablet8 LIMIT 1} {} { + db eval {DROP TABLE aux.t1;} + } + } msg + ] + set result [list $rc $msg] + } {1 {database table is locked}} +} # Create and drop 2000 tables. This is to check that the balance_shallow() # routine works correctly on the sqlite_master table. At one point it diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tclsqlite.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tclsqlite.test --- sqlite3-3.4.2/test/tclsqlite.test 2007-06-28 13:46:19.000000000 +0100 +++ sqlite3-3.6.16/test/tclsqlite.test 2009-06-12 03:37:59.000000000 +0100 @@ -15,7 +15,7 @@ # interface is pretty well tested. This file contains some addition # tests for fringe issues that the main test suite does not cover. # -# $Id: tclsqlite.test,v 1.59 2007/06/19 23:01:42 drh Exp $ +# $Id: tclsqlite.test,v 1.73 2009/03/16 13:19:36 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -25,16 +25,25 @@ if {[sqlite3 -has-codec]} { set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?" } else { - set r "sqlite3 HANDLE FILENAME ?MODE?" + set r "sqlite3 HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN?" } do_test tcl-1.1 { set v [catch {sqlite3 bogus} msg] + regsub {really_sqlite3} $msg {sqlite3} msg lappend v $msg } [list 1 "wrong # args: should be \"$r\""] do_test tcl-1.2 { set v [catch {db bogus} msg] lappend v $msg -} {1 {bad option "bogus": must be authorizer, busy, cache, changes, close, collate, collation_needed, commit_hook, complete, copy, enable_load_extension, errorcode, eval, exists, function, incrblob, interrupt, last_insert_rowid, nullvalue, onecolumn, profile, progress, rekey, rollback_hook, timeout, total_changes, trace, transaction, update_hook, or version}} +} {1 {bad option "bogus": must be authorizer, backup, busy, cache, changes, close, collate, collation_needed, commit_hook, complete, copy, enable_load_extension, errorcode, eval, exists, function, incrblob, interrupt, last_insert_rowid, nullvalue, onecolumn, profile, progress, rekey, restore, rollback_hook, status, timeout, total_changes, trace, transaction, unlock_notify, update_hook, or version}} +do_test tcl-1.2.1 { + set v [catch {db cache bogus} msg] + lappend v $msg +} {1 {bad option "bogus": must be flush or size}} +do_test tcl-1.2.2 { + set v [catch {db cache} msg] + lappend v $msg +} {1 {wrong # args: should be "db cache option ?arg?"}} do_test tcl-1.3 { execsql {CREATE TABLE t1(a int, b int)} execsql {INSERT INTO t1 VALUES(10,20)} @@ -61,15 +70,15 @@ } msg] lappend v $msg } {0 {}} +catch {expr x*} msg do_test tcl-1.6 { set v [catch { db eval {SELECT * FROM t1} data { expr x* } } msg] - regsub {:.*$} $msg {} msg lappend v $msg -} {1 {syntax error in expression "x*"}} +} [list 1 $msg] do_test tcl-1.7 { set v [catch {db} msg] lappend v $msg @@ -109,7 +118,7 @@ do_test tcl-1.15 { set v [catch {db function} msg] lappend v $msg -} {1 {wrong # args: should be "db function NAME SCRIPT"}} +} {1 {wrong # args: should be "db function NAME [-argcount N] SCRIPT"}} do_test tcl-1.16 { set v [catch {db last_insert_rowid xyz} msg] lappend v $msg @@ -138,24 +147,25 @@ set v [catch {db copy} msg] lappend v $msg } {1 {wrong # args: should be "db copy CONFLICT-ALGORITHM TABLE FILENAME ?SEPARATOR? ?NULLINDICATOR?"}} +do_test tcl-1.21 { + set v [catch {sqlite3 db2 test.db -vfs nosuchvfs} msg] + lappend v $msg +} {1 {no such vfs: nosuchvfs}} - -if {[sqlite3 -tcl-uses-utf]} { - catch {unset ::result} - do_test tcl-2.1 { - execsql "CREATE TABLE t\u0123x(a int, b\u1235 float)" - } {} - ifcapable schema_pragmas { - do_test tcl-2.2 { - execsql "PRAGMA table_info(t\u0123x)" - } "0 a int 0 {} 0 1 b\u1235 float 0 {} 0" - } - do_test tcl-2.3 { - execsql "INSERT INTO t\u0123x VALUES(1,2.3)" - db eval "SELECT * FROM t\u0123x" result break - set result(*) - } "a b\u1235" +catch {unset ::result} +do_test tcl-2.1 { + execsql "CREATE TABLE t\u0123x(a int, b\u1235 float)" +} {} +ifcapable schema_pragmas { + do_test tcl-2.2 { + execsql "PRAGMA table_info(t\u0123x)" + } "0 a int 0 {} 0 1 b\u1235 float 0 {} 0" } +do_test tcl-2.3 { + execsql "INSERT INTO t\u0123x VALUES(1,2.3)" + db eval "SELECT * FROM t\u0123x" result break + set result(*) +} "a b\u1235" # Test the onecolumn method @@ -224,13 +234,13 @@ do_test tcl-5.1 { execsql {CREATE TABLE t3(a,b,c)} catch {unset x} - set x(1) 5 - set x(2) 7 + set x(1) A + set x(2) B execsql { INSERT INTO t3 VALUES($::x(1),$::x(2),$::x(3)); SELECT * FROM t3 } - } {5 7 {}} + } {A B {}} do_test tcl-5.2 { execsql { SELECT typeof(a), typeof(b), typeof(c) FROM t3 @@ -404,16 +414,17 @@ } } db eval {SELECT * FROM t4} -} {1 2 3 4} +} {1 2} do_test tcl-10.10 { for {set i 0} {$i<1} {incr i} { db transaction { db eval {INSERT INTO t4 VALUES(5)} continue } + error "This line should not be run" } db eval {SELECT * FROM t4} -} {1 2 3 4 5} +} {1 2 5} do_test tcl-10.11 { for {set i 0} {$i<10} {incr i} { db transaction { @@ -422,7 +433,7 @@ } } db eval {SELECT * FROM t4} -} {1 2 3 4 5 6} +} {1 2 5 6} do_test tcl-10.12 { set rc [catch { for {set i 0} {$i<10} {incr i} { @@ -435,13 +446,125 @@ } {2} do_test tcl-10.13 { db eval {SELECT * FROM t4} -} {1 2 3 4 5 6 7} +} {1 2 5 6 7} + +# Now test that [db transaction] commands may be nested with +# the expected results. +# +do_test tcl-10.14 { + db transaction { + db eval { + DELETE FROM t4; + INSERT INTO t4 VALUES('one'); + } + + catch { + db transaction { + db eval { INSERT INTO t4 VALUES('two') } + db transaction { + db eval { INSERT INTO t4 VALUES('three') } + error "throw an error!" + } + } + } + } + + db eval {SELECT * FROM t4} +} {one} +do_test tcl-10.15 { + # Make sure a transaction has not been left open. + db eval {BEGIN ; COMMIT} +} {} +do_test tcl-10.16 { + db transaction { + db eval { INSERT INTO t4 VALUES('two'); } + db transaction { + db eval { INSERT INTO t4 VALUES('three') } + db transaction { + db eval { INSERT INTO t4 VALUES('four') } + } + } + } + db eval {SELECT * FROM t4} +} {one two three four} +do_test tcl-10.17 { + catch { + db transaction { + db eval { INSERT INTO t4 VALUES('A'); } + db transaction { + db eval { INSERT INTO t4 VALUES('B') } + db transaction { + db eval { INSERT INTO t4 VALUES('C') } + error "throw an error!" + } + } + } + } + db eval {SELECT * FROM t4} +} {one two three four} +do_test tcl-10.18 { + # Make sure a transaction has not been left open. + db eval {BEGIN ; COMMIT} +} {} + +# Mess up a [db transaction] command by locking the database using a +# second connection when it tries to commit. Make sure the transaction +# is not still open after the "database is locked" exception is thrown. +# +do_test tcl-10.18 { + sqlite3 db2 test.db + db2 eval { + BEGIN; + SELECT * FROM sqlite_master; + } + + set rc [catch { + db transaction { + db eval {INSERT INTO t4 VALUES('five')} + } + } msg] + list $rc $msg +} {1 {database is locked}} +do_test tcl-10.19 { + db eval {BEGIN ; COMMIT} +} {} + +# Thwart a [db transaction] command by locking the database using a +# second connection with "BEGIN EXCLUSIVE". Make sure no transaction is +# open after the "database is locked" exception is thrown. +# +do_test tcl-10.20 { + db2 eval { + COMMIT; + BEGIN EXCLUSIVE; + } + set rc [catch { + db transaction { + db eval {INSERT INTO t4 VALUES('five')} + } + } msg] + list $rc $msg +} {1 {database is locked}} +do_test tcl-10.21 { + db2 close + db eval {BEGIN ; COMMIT} +} {} +do_test tcl-10.22 { + sqlite3 db2 test.db + db transaction exclusive { + catch { db2 eval {SELECT * FROM sqlite_master} } msg + set msg "db2: $msg" + } + set msg +} {db2: database is locked} +db2 close do_test tcl-11.1 { - db exists {SELECT x,x*2,x+x FROM t4 WHERE x==4} + db eval {INSERT INTO t4 VALUES(6)} + db exists {SELECT x,x*2,x+x FROM t4 WHERE x==6} } {1} do_test tcl-11.2 { - db exists {SELECT 0 FROM t4 WHERE x==4} + db exists {SELECT 0 FROM t4 WHERE x==6} } {1} do_test tcl-11.3 { db exists {SELECT 1 FROM t4 WHERE x==8} @@ -459,35 +582,37 @@ # of $aaa, that objects are treated as bytearray and are inserted # as BLOBs. # -do_test tcl-13.1 { - db eval {CREATE TABLE t5(x BLOB)} - set x abc123 - db eval {INSERT INTO t5 VALUES($x)} - db eval {SELECT typeof(x) FROM t5} -} {text} -do_test tcl-13.2 { - binary scan $x H notUsed - db eval { - DELETE FROM t5; - INSERT INTO t5 VALUES($x); - SELECT typeof(x) FROM t5; - } -} {text} -do_test tcl-13.3 { - db eval { - DELETE FROM t5; - INSERT INTO t5 VALUES(@x); - SELECT typeof(x) FROM t5; - } -} {blob} -do_test tcl-13.4 { - set y 1234 - db eval { - DELETE FROM t5; - INSERT INTO t5 VALUES(@y); - SELECT hex(x), typeof(x) FROM t5 - } -} {31323334 blob} +ifcapable tclvar { + do_test tcl-13.1 { + db eval {CREATE TABLE t5(x BLOB)} + set x abc123 + db eval {INSERT INTO t5 VALUES($x)} + db eval {SELECT typeof(x) FROM t5} + } {text} + do_test tcl-13.2 { + binary scan $x H notUsed + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES($x); + SELECT typeof(x) FROM t5; + } + } {text} + do_test tcl-13.3 { + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES(@x); + SELECT typeof(x) FROM t5; + } + } {blob} + do_test tcl-13.4 { + set y 1234 + db eval { + DELETE FROM t5; + INSERT INTO t5 VALUES(@y); + SELECT hex(x), typeof(x) FROM t5 + } + } {31323334 blob} +} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tempdb.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tempdb.test --- sqlite3-3.4.2/test/tempdb.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tempdb.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,95 @@ +# 2008 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# The focus of this file is in making sure that rolling back +# a statement journal works correctly. +# +# $Id: tempdb.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Use a temporary database. +# +db close +sqlite3 db {} + +# Force a statement journal rollback on a database file that +# has never been opened. +# +do_test tempdb-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(x UNIQUE); + CREATE TABLE t2(y); + INSERT INTO t2 VALUES('hello'); + INSERT INTO t2 VALUES(NULL); + } + # Because of the transaction, the temporary database file + # has not even been opened yet. The following statement + # will cause a statement journal rollback on this non-existant + # file. + catchsql { + INSERT INTO t1 + SELECT CASE WHEN y IS NULL THEN test_error('oops', 11) ELSE y END + FROM t2; + } +} {1 oops} + +# Verify that no writes occurred in t1. +# +do_test tempdb-1.2 { + execsql { + SELECT * FROM t1 + } +} {} + +do_test tempdb-2.1 { + # Set $::jrnl_in_memory if the journal file is expected to be in-memory. + # Similarly, set $::subj_in_memory if the sub-journal file is expected + # to be in memory. These variables are used to calculate the expected + # number of open files in the test cases below. + # + set jrnl_in_memory [expr { + [info exists ::permutations_test_prefix] && + $::permutations_test_prefix eq "inmemory_journal" + }] + set subj_in_memory [expr {$jrnl_in_memory || $TEMP_STORE == 3}] + + db close + sqlite3 db test.db +} {} +do_test tempdb-2.2 { + execsql { + CREATE TABLE t1 (a PRIMARY KEY, b, c); + CREATE TABLE t2 (a, b, c); + BEGIN; + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t2 SELECT * FROM t1; + } + catchsql { INSERT INTO t1 SELECT * FROM t2 } + set sqlite_open_file_count +} [expr 1 + (0==$jrnl_in_memory) + (0==$subj_in_memory)] +do_test tempdb-2.3 { + execsql { + PRAGMA temp_store = 'memory'; + ROLLBACK; + BEGIN; + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t2 SELECT * FROM t1; + } + catchsql { INSERT INTO t1 SELECT * FROM t2 } + set sqlite_open_file_count +} [expr 1 + (0==$jrnl_in_memory)] + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/temptable.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/temptable.test --- sqlite3-3.4.2/test/temptable.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/temptable.test 2009-06-25 12:44:47.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests for temporary tables and indices. # -# $Id: temptable.test,v 1.17 2006/01/24 00:15:16 drh Exp $ +# $Id: temptable.test,v 1.21 2009/06/16 17:49:36 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -180,7 +180,7 @@ catchsql { SELECT * FROM main.t2; } db2 -} {1 {no such table: main.t2}} +} {0 {9 8 7}} #do_test temptable-4.4.3 { # catchsql { # SELECT name FROM main.sqlite_master WHERE type='table'; @@ -404,11 +404,37 @@ } {1 {no such table: t9}} file delete -force test2.db test2.db-journal -do_test temptable-7.1 { - catchsql { - ATTACH 'test2.db' AS two; - CREATE TEMP TABLE two.abc(x,y); +ifcapable attach { + do_test temptable-7.1 { + catchsql { + ATTACH 'test2.db' AS two; + CREATE TEMP TABLE two.abc(x,y); + } + } {1 {temporary table name must be unqualified}} +} + +# Need to do the following for tcl 8.5 on mac. On that configuration, the +# -readonly flag is taken so seriously that a subsequent [file delete -force] +# (required before the next test file can be executed) will fail. +# +catch {file attributes test.db -readonly 0} + +do_test temptable-8.0 { + db close + catch {file delete -force test.db} + sqlite3 db test.db +} {} +do_test temptable-8.1 { + execsql { CREATE TEMP TABLE tbl2(a, b); } + execsql { + CREATE TABLE tbl(a, b); + INSERT INTO tbl VALUES(1, 2); } -} {1 {temporary table name must be unqualified}} + execsql {SELECT * FROM tbl} +} {1 2} +do_test temptable-8.2 { + execsql { CREATE TEMP TABLE tbl(a, b); } + execsql {SELECT * FROM tbl} +} {} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/temptrigger.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/temptrigger.test --- sqlite3-3.4.2/test/temptrigger.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/temptrigger.test 2009-06-25 12:23:19.000000000 +0100 @@ -0,0 +1,204 @@ +# 2009 February 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: temptrigger.test,v 1.3 2009/04/15 13:07:19 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!trigger || !shared_cache} { finish_test ; return } + +# Test cases: +# +# temptrigger-1.*: Shared cache problem. +# temptrigger-2.*: A similar shared cache problem. +# temptrigger-3.*: Attached database problem. +# + +#------------------------------------------------------------------------- +# Test case temptrigger-1.* demonstrates a problem with temp triggers +# in shared-cache mode. If process 1 connections to a shared-cache and +# creates a temp trigger, the temp trigger is linked into the shared-cache +# schema. If process 2 reloads the shared-cache schema from disk, then +# it does not recreate the temp trigger belonging to process 1. From the +# point of view of process 1, the temp trigger just disappeared. +# +# temptrigger-1.1: In shared cache mode, create a table in the main +# database and add a temp trigger to it. +# +# temptrigger-1.2: Check that the temp trigger is correctly fired. Check +# that the temp trigger is not fired by statements +# executed by a second connection connected to the +# same shared cache. +# +# temptrigger-1.3: Using the second connection to the shared-cache, cause +# the shared-cache schema to be reloaded. +# +# temptrigger-1.4: Check that the temp trigger is still fired correctly. +# +# temptrigger-1.5: Check that the temp trigger can be dropped without error. +# +db close +set ::enable_shared_cache [sqlite3_enable_shared_cache] +sqlite3_enable_shared_cache 1 + +sqlite3 db test.db +sqlite3 db2 test.db + +do_test temptrigger-1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE TEMP TABLE tt1(a, b); + CREATE TEMP TRIGGER tr1 AFTER INSERT ON t1 BEGIN + INSERT INTO tt1 VALUES(new.a, new.b); + END; + } +} {} + +do_test temptrigger-1.2.1 { + execsql { INSERT INTO t1 VALUES(1, 2) } + execsql { SELECT * FROM t1 } +} {1 2} +do_test temptrigger-1.2.2 { + execsql { SELECT * FROM tt1 } +} {1 2} +do_test temptrigger-1.2.3 { + execsql { INSERT INTO t1 VALUES(3, 4) } db2 + execsql { SELECT * FROM t1 } +} {1 2 3 4} +do_test temptrigger-1.2.4 { + execsql { SELECT * FROM tt1 } +} {1 2} + +# Cause the shared-cache schema to be reloaded. +# +do_test temptrigger-1.3 { + execsql { BEGIN; CREATE TABLE t3(a, b); ROLLBACK; } db2 +} {} + +do_test temptrigger-1.4 { + execsql { INSERT INTO t1 VALUES(5, 6) } + execsql { SELECT * FROM tt1 } +} {1 2 5 6} + +do_test temptrigger-1.5 { + # Before the bug was fixed, the following 'DROP TRIGGER' hit an + # assert if executed. + #execsql { DROP TRIGGER tr1 } +} {} + +catch {db close} +catch {db2 close} + +#------------------------------------------------------------------------- +# Tests temptrigger-2.* are similar to temptrigger-1.*, except that +# temptrigger-2.3 simply opens and closes a connection to the shared-cache. +# It does not do anything special to cause the schema to be reloaded. +# +do_test temptrigger-2.1 { + sqlite3 db test.db + execsql { + DELETE FROM t1; + CREATE TEMP TABLE tt1(a, b); + CREATE TEMP TRIGGER tr1 AFTER INSERT ON t1 BEGIN + INSERT INTO tt1 VALUES(new.a, new.b); + END; + } +} {} +do_test temptrigger-2.2 { + execsql { + INSERT INTO t1 VALUES(10, 20); + SELECT * FROM tt1; + } +} {10 20} +do_test temptrigger-2.3 { + sqlite3 db2 test.db + db2 close +} {} +do_test temptrigger-2.4 { + execsql { + INSERT INTO t1 VALUES(30, 40); + SELECT * FROM tt1; + } +} {10 20 30 40} +do_test temptrigger-2.5 { + #execsql { DROP TRIGGER tr1 } +} {} + +catch {db close} +catch {db2 close} +sqlite3_enable_shared_cache $::enable_shared_cache + +#------------------------------------------------------------------------- +# Test case temptrigger-3.* demonstrates a problem with temp triggers +# on tables located in attached databases. At one point when SQLite reloaded +# the schema of an attached database (because some other connection had +# changed the schema cookie) it was not re-creating temp triggers attached +# to tables located within the attached database. +# +# temptrigger-3.1: Attach database 'test2.db' to connection [db]. Add a +# temp trigger to a table in 'test2.db'. +# +# temptrigger-3.2: Check that the temp trigger is correctly fired. +# +# temptrigger-3.3: Update the schema of 'test2.db' using an external +# connection. This forces [db] to reload the 'test2.db' +# schema. Check that the temp trigger is still fired +# correctly. +# +# temptrigger-3.4: Check that the temp trigger can be dropped without error. +# +do_test temptrigger-3.1 { + catch { file delete -force test2.db test2.db-journal } + catch { file delete -force test.db test.db-journal } + sqlite3 db test.db + sqlite3 db2 test2.db + execsql { CREATE TABLE t2(a, b) } db2 + execsql { + ATTACH 'test2.db' AS aux; + CREATE TEMP TABLE tt2(a, b); + CREATE TEMP TRIGGER tr2 AFTER INSERT ON aux.t2 BEGIN + INSERT INTO tt2 VALUES(new.a, new.b); + END; + } +} {} + +do_test temptrigger-3.2.1 { + execsql { + INSERT INTO aux.t2 VALUES(1, 2); + SELECT * FROM aux.t2; + } +} {1 2} +do_test temptrigger-3.2.2 { + execsql { SELECT * FROM tt2 } +} {1 2} + +do_test temptrigger-3.3.1 { + execsql { CREATE TABLE t3(a, b) } db2 + execsql { + INSERT INTO aux.t2 VALUES(3, 4); + SELECT * FROM aux.t2; + } +} {1 2 3 4} +do_test temptrigger-3.3.2 { + execsql { SELECT * FROM tt2 } +} {1 2 3 4} + +do_test temptrigger-3.4 { + # Before the bug was fixed, the following 'DROP TRIGGER' hit an + # assert if executed. + #execsql { DROP TRIGGER tr2 } +} {} + +catch { db close } +catch { db2 close } + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tester.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tester.tcl --- sqlite3-3.4.2/test/tester.tcl 2007-08-13 13:30:35.000000000 +0100 +++ sqlite3-3.6.16/test/tester.tcl 2009-06-25 12:22:34.000000000 +0100 @@ -11,37 +11,23 @@ # This file implements some common TCL routines used for regression # testing the SQLite library # -# $Id: tester.tcl,v 1.82 2007/08/10 16:41:09 drh Exp $ +# $Id: tester.tcl,v 1.143 2009/04/09 01:23:49 drh Exp $ -# Make sure tclsqlite3 was compiled correctly. Abort now with an -# error message if not. # -if {[sqlite3 -tcl-uses-utf]} { - if {"\u1234"=="u1234"} { - puts stderr "***** BUILD PROBLEM *****" - puts stderr "$argv0 was linked against an older version" - puts stderr "of TCL that does not support Unicode, but uses a header" - puts stderr "file (\"tcl.h\") from a new TCL version that does support" - puts stderr "Unicode. This combination causes internal errors." - puts stderr "Recompile using a TCL library and header file that match" - puts stderr "and try again.\n**************************" - exit 1 - } -} else { - if {"\u1234"!="u1234"} { - puts stderr "***** BUILD PROBLEM *****" - puts stderr "$argv0 was linked against an newer version" - puts stderr "of TCL that supports Unicode, but uses a header file" - puts stderr "(\"tcl.h\") from a old TCL version that does not support" - puts stderr "Unicode. This combination causes internal errors." - puts stderr "Recompile using a TCL library and header file that match" - puts stderr "and try again.\n**************************" - exit 1 +# What for user input before continuing. This gives an opportunity +# to connect profiling tools to the process. +# +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[regexp {^-+pause$} [lindex $argv $i] all value]} { + puts -nonewline "Press RETURN to begin..." + flush stdout + gets stdin + set argv [lreplace $argv $i $i] } } set tcl_precision 15 -set sqlite_pending_byte 0x0010000 +sqlite3_test_control_pending_byte 0x0010000 # # Check the command-line arguments for a default soft-heap-limit. @@ -63,6 +49,70 @@ } sqlite3_soft_heap_limit $soft_limit +# +# Check the command-line arguments to set the memory debugger +# backtrace depth. +# +# See the sqlite3_memdebug_backtrace() function in mem2.c or +# test_malloc.c for additional information. +# +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[lindex $argv $i] eq "--malloctrace"} { + set argv [lreplace $argv $i $i] + sqlite3_memdebug_backtrace 10 + sqlite3_memdebug_log start + set tester_do_malloctrace 1 + } +} +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[regexp {^--backtrace=(\d+)$} [lindex $argv $i] all value]} { + sqlite3_memdebug_backtrace $value + set argv [lreplace $argv $i $i] + } +} + + +proc ostrace_call {zCall nClick zFile i32 i64} { + set s "INSERT INTO ostrace VALUES('$zCall', $nClick, '$zFile', $i32, $i64);" + puts $::ostrace_fd $s +} + +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[lindex $argv $i] eq "--ossummary" || [lindex $argv $i] eq "--ostrace"} { + sqlite3_instvfs create -default ostrace + set tester_do_ostrace 1 + set ostrace_fd [open ostrace.sql w] + puts $ostrace_fd "BEGIN;" + if {[lindex $argv $i] eq "--ostrace"} { + set s "CREATE TABLE ostrace" + append s "(method TEXT, clicks INT, file TEXT, i32 INT, i64 INT);" + puts $ostrace_fd $s + sqlite3_instvfs configure ostrace ostrace_call + sqlite3_instvfs configure ostrace ostrace_call + } + set argv [lreplace $argv $i $i] + } + if {[lindex $argv $i] eq "--binarylog"} { + set tester_do_binarylog 1 + set argv [lreplace $argv $i $i] + } +} + +# +# Check the command-line arguments to set the maximum number of +# errors tolerated before halting. +# +if {![info exists maxErr]} { + set maxErr 1000 +} +for {set i 0} {$i<[llength $argv]} {incr i} { + if {[regexp {^--maxerror=(\d+)$} [lindex $argv $i] all maxErr]} { + set argv [lreplace $argv $i $i] + } +} +#puts "Max error = $maxErr" + + # Use the pager codec if it is available # if {[sqlite3 -has-codec] && [info command sqlite_orig]==""} { @@ -78,14 +128,28 @@ # Create a test database # -catch {db close} -file delete -force test.db -file delete -force test.db-journal -sqlite3 db ./test.db -set ::DB [sqlite3_connection_pointer db] -if {[info exists ::SETUP_SQL]} { - db eval $::SETUP_SQL +if {![info exists nTest]} { + sqlite3_shutdown + install_malloc_faultsim 1 + sqlite3_initialize + autoinstall_test_functions + if {[info exists tester_do_binarylog]} { + sqlite3_instvfs binarylog -default binarylog ostrace.bin + sqlite3_instvfs marker binarylog "$argv0 $argv" + } +} + +proc reset_db {} { + catch {db close} + file delete -force test.db + file delete -force test.db-journal + sqlite3 db ./test.db + set ::DB [sqlite3_connection_pointer db] + if {[info exists ::SETUP_SQL]} { + db eval $::SETUP_SQL + } } +reset_db # Abort early if this script has been run before. # @@ -97,16 +161,26 @@ set nTest 0 set skip_test 0 set failList {} -set maxErr 1000 +set omitList {} if {![info exists speedTest]} { set speedTest 0 } +# Record the fact that a sequence of tests were omitted. +# +proc omit_test {name reason} { + global omitList + lappend omitList [list $name $reason] +} + # Invoke the do_test procedure to run a single test # proc do_test {name cmd expected} { global argv nErr nTest skip_test maxErr - set ::sqlite_malloc_id $name + sqlite3_memdebug_settitle $name + if {[info exists ::tester_do_binarylog]} { + sqlite3_instvfs marker binarylog "Start of $name" + } if {$skip_test} { set skip_test 0 return @@ -140,6 +214,9 @@ puts " Ok" } flush stdout + if {[info exists ::tester_do_binarylog]} { + sqlite3_instvfs marker binarylog "End of $name" + } } # Run an SQL script. @@ -150,9 +227,28 @@ flush stdout set speed [time {sqlite3_exec_nr db $sql}] set tm [lindex $speed 0] - set rate [expr {1000000.0*$numstmt/$tm}] + if {$tm == 0} { + set rate [format %20s "many"] + } else { + set rate [format %20.5f [expr {1000000.0*$numstmt/$tm}]] + } set u2 $units/s - puts [format {%12d uS %20.5f %s} $tm $rate $u2] + puts [format {%12d uS %s %s} $tm $rate $u2] + global total_time + set total_time [expr {$total_time+$tm}] +} +proc speed_trial_tcl {name numstmt units script} { + puts -nonewline [format {%-21.21s } $name...] + flush stdout + set speed [time {eval $script}] + set tm [lindex $speed 0] + if {$tm == 0} { + set rate [format %20s "many"] + } else { + set rate [format %20.5f [expr {1000000.0*$numstmt/$tm}]] + } + set u2 $units/s + puts [format {%12d uS %s %s} $tm $rate $u2] global total_time set total_time [expr {$total_time+$tm}] } @@ -165,39 +261,23 @@ puts [format {%-21.21s %12d uS TOTAL} $name $total_time] } -# The procedure uses the special "sqlite_malloc_stat" command -# (which is only available if SQLite is compiled with -DSQLITE_DEBUG=1) -# to see how many malloc()s have not been free()ed. The number -# of surplus malloc()s is stored in the global variable $::Leak. -# If the value in $::Leak grows, it may mean there is a memory leak -# in the library. -# -proc memleak_check {} { - if {[info command sqlite_malloc_stat]!=""} { - set r [sqlite_malloc_stat] - set ::Leak [expr {[lindex $r 0]-[lindex $r 1]}] - } -} - # Run this routine last # proc finish_test {} { finalize_testing } proc finalize_testing {} { - global nTest nErr sqlite_open_file_count - if {$nErr==0} memleak_check + global nTest nErr sqlite_open_file_count omitList catch {db close} catch {db2 close} catch {db3 close} - catch { - pp_check_for_leaks - } + vfs_unlink_test sqlite3 db {} # sqlite3_clear_tsd_memdebug db close + sqlite3_reset_auto_extension set heaplimit [sqlite3_soft_heap_limit] if {$heaplimit!=$::soft_limit} { puts "soft-heap-limit changed by this script\ @@ -206,15 +286,21 @@ puts "soft-heap-limit set to $heaplimit" } sqlite3_soft_heap_limit 0 - if {$::sqlite3_tsd_count} { - puts "Thread-specific data leak: $::sqlite3_tsd_count instances" - incr nErr - } else { - puts "Thread-specific data deallocated properly" - } incr nTest puts "$nErr errors out of $nTest tests" - puts "Failures on these tests: $::failList" + if {$nErr>0} { + puts "Failures on these tests: $::failList" + } + run_thread_tests 1 + if {[llength $omitList]>0} { + puts "Omitted test cases:" + set prec {} + foreach {rec} [lsort $omitList] { + if {$rec==$prec} continue + set prec $rec + puts [format { %-12s %s} [lindex $rec 0] [lindex $rec 1]] + } + } if {$nErr>0 && ![working_64bit_int]} { puts "******************************************************************" puts "N.B.: The version of TCL that you used to build this test harness" @@ -223,10 +309,58 @@ puts "in your TCL build." puts "******************************************************************" } + if {[info exists ::tester_do_binarylog]} { + sqlite3_instvfs destroy binarylog + } if {$sqlite_open_file_count} { puts "$sqlite_open_file_count files were left open" incr nErr } + if {[info exists ::tester_do_ostrace]} { + puts "Writing ostrace.sql..." + set fd $::ostrace_fd + + puts -nonewline $fd "CREATE TABLE ossummary" + puts $fd "(method TEXT, clicks INTEGER, count INTEGER);" + foreach row [sqlite3_instvfs report ostrace] { + foreach {method count clicks} $row break + puts $fd "INSERT INTO ossummary VALUES('$method', $clicks, $count);" + } + puts $fd "COMMIT;" + close $fd + sqlite3_instvfs destroy ostrace + } + if {[sqlite3_memory_used]>0} { + puts "Unfreed memory: [sqlite3_memory_used] bytes" + incr nErr + ifcapable memdebug||mem5||(mem3&&debug) { + puts "Writing unfreed memory log to \"./memleak.txt\"" + sqlite3_memdebug_dump ./memleak.txt + } + } else { + puts "All memory allocations freed - no leaks" + ifcapable memdebug||mem5 { + sqlite3_memdebug_dump ./memusage.txt + } + } + show_memstats + puts "Maximum memory usage: [sqlite3_memory_highwater 1] bytes" + puts "Current memory usage: [sqlite3_memory_highwater] bytes" + if {[info commands sqlite3_memdebug_malloc_count] ne ""} { + puts "Number of malloc() : [sqlite3_memdebug_malloc_count] calls" + } + if {[info exists ::tester_do_malloctrace]} { + puts "Writing mallocs.sql..." + memdebug_log_sql + sqlite3_memdebug_log stop + sqlite3_memdebug_log clear + + if {[sqlite3_memory_used]>0} { + puts "Writing leaks.sql..." + sqlite3_memdebug_log sync + memdebug_log_sql leaks.sql + } + } foreach f [glob -nocomplain test.db-*-journal] { file delete -force $f } @@ -236,6 +370,37 @@ exit [expr {$nErr>0}] } +# Display memory statistics for analysis and debugging purposes. +# +proc show_memstats {} { + set x [sqlite3_status SQLITE_STATUS_MEMORY_USED 0] + set y [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] + set val [format {now %10d max %10d max-size %10d} \ + [lindex $x 1] [lindex $x 2] [lindex $y 2]] + puts "Memory used: $val" + set x [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] + set y [sqlite3_status SQLITE_STATUS_PAGECACHE_SIZE 0] + set val [format {now %10d max %10d max-size %10d} \ + [lindex $x 1] [lindex $x 2] [lindex $y 2]] + puts "Page-cache used: $val" + set x [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] + set val [format {now %10d max %10d} [lindex $x 1] [lindex $x 2]] + puts "Page-cache overflow: $val" + set x [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] + set val [format {now %10d max %10d} [lindex $x 1] [lindex $x 2]] + puts "Scratch memory used: $val" + set x [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] + set y [sqlite3_status SQLITE_STATUS_SCRATCH_SIZE 0] + set val [format {now %10d max %10d max-size %10d} \ + [lindex $x 1] [lindex $x 2] [lindex $y 2]] + puts "Scratch overflow: $val" + ifcapable yytrackmaxstackdepth { + set x [sqlite3_status SQLITE_STATUS_PARSER_STACK 0] + set val [format { max %10d} [lindex $x 2]] + puts "Parser stack depth: $val" + } +} + # A procedure to execute SQL # proc execsql {sql {db db}} { @@ -256,13 +421,24 @@ # proc explain {sql {db db}} { puts "" - puts "addr opcode p1 p2 p3 " - puts "---- ------------ ------ ------ ---------------" + puts "addr opcode p1 p2 p3 p4 p5 #" + puts "---- ------------ ------ ------ ------ --------------- -- -" $db eval "explain $sql" {} { - puts [format {%-4d %-12.12s %-6d %-6d %s} $addr $opcode $p1 $p2 $p3] + puts [format {%-4d %-12.12s %-6d %-6d %-6d % -17s %s %s} \ + $addr $opcode $p1 $p2 $p3 $p4 $p5 $comment + ] } } +# Show the VDBE program for an SQL statement but omit the Trace +# opcode at the beginning. This procedure can be used to prove +# that different SQL statements generate exactly the same VDBE code. +# +proc explain_no_trace {sql} { + set tr [db eval "EXPLAIN $sql"] + return [lrange $tr 7 end] +} + # Another procedure to execute SQL. This one includes the field # names in the returned list. # @@ -311,19 +487,37 @@ # Do an integrity check of the entire database # -proc integrity_check {name} { +proc integrity_check {name {db db}} { ifcapable integrityck { - do_test $name { - execsql {PRAGMA integrity_check} - } {ok} + do_test $name [list execsql {PRAGMA integrity_check} $db] {ok} } } +proc fix_ifcapable_expr {expr} { + set ret "" + set state 0 + for {set i 0} {$i < [string length $expr]} {incr i} { + set char [string range $expr $i $i] + set newstate [expr {[string is alnum $char] || $char eq "_"}] + if {$newstate && !$state} { + append ret {$::sqlite_options(} + } + if {!$newstate && $state} { + append ret ) + } + append ret $char + set state $newstate + } + if {$state} {append ret )} + return $ret +} + # Evaluate a boolean expression of capabilities. If true, execute the # code. Omit the code if false. # proc ifcapable {expr code {else ""} {elsecode ""}} { - regsub -all {[a-z_0-9]+} $expr {$::sqlite_options(&)} e2 + #regsub -all {[a-z_0-9]+} $expr {$::sqlite_options(&)} e2 + set e2 [fix_ifcapable_expr $expr] if ($e2) { set c [catch {uplevel 1 $code} r] } else { @@ -346,7 +540,7 @@ # error message. This is "child process exited abnormally" if the crash # occured. # -# crashsql -delay CRASHDELAY -file CRASHFILE ?-blocksize BLOCKSIZE $sql +# crashsql -delay CRASHDELAY -file CRASHFILE ?-blocksize BLOCKSIZE? $sql # proc crashsql {args} { if {$::tcl_platform(platform)!="unix"} { @@ -355,7 +549,10 @@ set blocksize "" set crashdelay 1 + set prngseed 0 + set tclbody {} set crashfile "" + set dc "" set sql [lindex $args end] for {set ii 0} {$ii < [llength $args]-1} {incr ii 2} { @@ -364,8 +561,11 @@ set z2 [lindex $args [expr $ii+1]] if {$n>1 && [string first $z -delay]==0} {set crashdelay $z2} \ + elseif {$n>1 && [string first $z -seed]==0} {set prngseed $z2} \ elseif {$n>1 && [string first $z -file]==0} {set crashfile $z2} \ - elseif {$n>1 && [string first $z -blocksize]==0} {set blocksize $z2} \ + elseif {$n>1 && [string first $z -tclbody]==0} {set tclbody $z2} \ + elseif {$n>1 && [string first $z -blocksize]==0} {set blocksize "-s $z2" } \ + elseif {$n>1 && [string first $z -characteristics]==0} {set dc "-c {$z2}" } \ else { error "Unrecognized option: $z" } } @@ -376,9 +576,10 @@ set cfile [file join [pwd] $crashfile] set f [open crash.tcl w] - puts $f "sqlite3_crashparams $crashdelay $cfile $blocksize" - puts $f "set sqlite_pending_byte $::sqlite_pending_byte" - puts $f "sqlite3 db test.db" + puts $f "sqlite3_crash_enable 1" + puts $f "sqlite3_crashparams $blocksize $dc $crashdelay $cfile" + puts $f "sqlite3_test_control_pending_byte $::sqlite_pending_byte" + puts $f "sqlite3 db test.db -vfs crash" # This block sets the cache size of the main database to 10 # pages. This is done in case the build is configured to omit @@ -386,10 +587,20 @@ puts $f {db eval {SELECT * FROM sqlite_master;}} puts $f {set bt [btree_from_db db]} puts $f {btree_set_cache_size $bt 10} + if {$prngseed} { + set seed [expr {$prngseed%10007+1}] + # puts seed=$seed + puts $f "db eval {SELECT randomblob($seed)}" + } - puts $f "db eval {" - puts $f "$sql" - puts $f "}" + if {[string length $tclbody]>0} { + puts $f $tclbody + } + if {[string length $sql]>0} { + puts $f "db eval {" + puts $f "$sql" + puts $f "}" + } close $f set r [catch { @@ -423,9 +634,17 @@ set ::ioerropts(-erc) 0 set ::ioerropts(-count) 100000000 set ::ioerropts(-persist) 1 + set ::ioerropts(-ckrefcount) 0 + set ::ioerropts(-restoreprng) 1 array set ::ioerropts $args + # TEMPORARY: For 3.5.9, disable testing of extended result codes. There are + # a couple of obscure IO errors that do not return them. + set ::ioerropts(-erc) 0 + set ::go 1 + #reset_prng_state + save_prng_state for {set n $::ioerropts(-start)} {$::go} {incr n} { set ::TN $n incr ::ioerropts(-count) -1 @@ -435,12 +654,16 @@ if {[info exists ::ioerropts(-exclude)]} { if {[lsearch $::ioerropts(-exclude) $n]!=-1} continue } + if {$::ioerropts(-restoreprng)} { + restore_prng_state + } # Delete the files test.db and test2.db, then execute the TCL and # SQL (in that order) to prepare for the test case. do_test $testname.$n.1 { set ::sqlite_io_error_pending 0 catch {db close} + catch {db2 close} catch {file delete -force test.db} catch {file delete -force test.db-journal} catch {file delete -force test2.db} @@ -460,7 +683,7 @@ if {$::ioerropts(-cksum)} { set checksum [cksum] } - + # Set the Nth IO error to fail. do_test $testname.$n.2 [subst { set ::sqlite_io_error_persist $::ioerropts(-persist) @@ -481,7 +704,10 @@ # there are at least N IO operations performed by SQLite as # a result of the script, the Nth will fail. do_test $testname.$n.3 { + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_hardhit 0 set r [catch $::ioerrorbody msg] + set ::errseen $r set rc [sqlite3_errcode $::DB] if {$::ioerropts(-erc)} { # If we are in extended result code mode, make sure all of the @@ -499,28 +725,91 @@ return $rc } } - # The test repeats as long as $::go is true. - set ::go [expr {$::sqlite_io_error_pending<=0}] + # The test repeats as long as $::go is non-zero. $::go starts out + # as 1. When a test runs to completion without hitting an I/O + # error, that means there is no point in continuing with this test + # case so set $::go to zero. + # + if {$::sqlite_io_error_pending>0} { + set ::go 0 + set q 0 + set ::sqlite_io_error_pending 0 + } else { + set q 1 + } + set s [expr $::sqlite_io_error_hit==0] + if {$::sqlite_io_error_hit>$::sqlite_io_error_hardhit && $r==0} { + set r 1 + } set ::sqlite_io_error_hit 0 # One of two things must have happened. either # 1. We never hit the IO error and the SQL returned OK # 2. An IO error was hit and the SQL failed # - expr { ($s && !$r && !$::go) || (!$s && $r && $::go) } + expr { ($s && !$r && !$q) || (!$s && $r && $q) } } {1} + set ::sqlite_io_error_hit 0 + set ::sqlite_io_error_pending 0 + + # Check that no page references were leaked. There should be + # a single reference if there is still an active transaction, + # or zero otherwise. + # + # UPDATE: If the IO error occurs after a 'BEGIN' but before any + # locks are established on database files (i.e. if the error + # occurs while attempting to detect a hot-journal file), then + # there may 0 page references and an active transaction according + # to [sqlite3_get_autocommit]. + # + if {$::go && $::sqlite_io_error_hardhit && $::ioerropts(-ckrefcount)} { + do_test $testname.$n.4 { + set bt [btree_from_db db] + db_enter db + array set stats [btree_pager_stats $bt] + db_leave db + set nRef $stats(ref) + expr {$nRef == 0 || ([sqlite3_get_autocommit db]==0 && $nRef == 1)} + } {1} + } + + # If there is an open database handle and no open transaction, + # and the pager is not running in exclusive-locking mode, + # check that the pager is in "unlocked" state. Theoretically, + # if a call to xUnlock() failed due to an IO error the underlying + # file may still be locked. + # + ifcapable pragma { + if { [info commands db] ne "" + && $::ioerropts(-ckrefcount) + && [db one {pragma locking_mode}] eq "normal" + && [sqlite3_get_autocommit db] + } { + do_test $testname.$n.5 { + set bt [btree_from_db db] + db_enter db + array set stats [btree_pager_stats $bt] + db_leave db + set stats(state) + } 0 + } + } + # If an IO error occured, then the checksum of the database should # be the same as before the script that caused the IO error was run. - if {$::go && $::ioerropts(-cksum)} { - do_test $testname.$n.4 { + # + if {$::go && $::sqlite_io_error_hardhit && $::ioerropts(-cksum)} { + do_test $testname.$n.6 { catch {db close} + catch {db2 close} set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] cksum } $checksum } + set ::sqlite_io_error_hardhit 0 set ::sqlite_io_error_pending 0 if {[info exists ::ioerropts(-cleanup)]} { catch $::ioerropts(-cleanup) @@ -531,7 +820,8 @@ unset ::ioerropts } -# Return a checksum based on the contents of database 'db'. +# Return a checksum based on the contents of the main database associated +# with connection $db # proc cksum {{db db}} { set txt [$db eval { @@ -550,6 +840,106 @@ return $cksum } +# Generate a checksum based on the contents of the main and temp tables +# database $db. If the checksum of two databases is the same, and the +# integrity-check passes for both, the two databases are identical. +# +proc allcksum {{db db}} { + set ret [list] + ifcapable tempdb { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT name FROM sqlite_temp_master WHERE type = 'table' UNION + SELECT 'sqlite_master' UNION + SELECT 'sqlite_temp_master' ORDER BY 1 + } + } else { + set sql { + SELECT name FROM sqlite_master WHERE type = 'table' UNION + SELECT 'sqlite_master' ORDER BY 1 + } + } + set tbllist [$db eval $sql] + set txt {} + foreach tbl $tbllist { + append txt [$db eval "SELECT * FROM $tbl"] + } + foreach prag {default_cache_size} { + append txt $prag-[$db eval "PRAGMA $prag"]\n + } + # puts txt=$txt + return [md5 $txt] +} + +# Generate a checksum based on the contents of a single database with +# a database connection. The name of the database is $dbname. +# Examples of $dbname are "temp" or "main". +# +proc dbcksum {db dbname} { + if {$dbname=="temp"} { + set master sqlite_temp_master + } else { + set master $dbname.sqlite_master + } + set alltab [$db eval "SELECT name FROM $master WHERE type='table'"] + set txt [$db eval "SELECT * FROM $master"]\n + foreach tab $alltab { + append txt [$db eval "SELECT * FROM $dbname.$tab"]\n + } + return [md5 $txt] +} + +proc memdebug_log_sql {{filename mallocs.sql}} { + + set data [sqlite3_memdebug_log dump] + set nFrame [expr [llength [lindex $data 0]]-2] + if {$nFrame < 0} { return "" } + + set database temp + + set tbl "CREATE TABLE ${database}.malloc(zTest, nCall, nByte, lStack);" + + set sql "" + foreach e $data { + set nCall [lindex $e 0] + set nByte [lindex $e 1] + set lStack [lrange $e 2 end] + append sql "INSERT INTO ${database}.malloc VALUES" + append sql "('test', $nCall, $nByte, '$lStack');\n" + foreach f $lStack { + set frames($f) 1 + } + } + + set tbl2 "CREATE TABLE ${database}.frame(frame INTEGER PRIMARY KEY, line);\n" + set tbl3 "CREATE TABLE ${database}.file(name PRIMARY KEY, content);\n" + + foreach f [array names frames] { + set addr [format %x $f] + set cmd "addr2line -e [info nameofexec] $addr" + set line [eval exec $cmd] + append sql "INSERT INTO ${database}.frame VALUES($f, '$line');\n" + + set file [lindex [split $line :] 0] + set files($file) 1 + } + + foreach f [array names files] { + set contents "" + catch { + set fd [open $f] + set contents [read $fd] + close $fd + } + set contents [string map {' ''} $contents] + append sql "INSERT INTO ${database}.file VALUES('$f', '$contents');\n" + } + + set fd [open $filename w] + puts $fd "BEGIN; ${tbl}${tbl2}${tbl3}${sql} ; COMMIT;" + close $fd +} + # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. @@ -568,63 +958,8 @@ } } -# This command checks for outstanding calls to sqliteMalloc() from within -# the current thread. A list is returned with one entry for each outstanding -# malloc. Each list entry is itself a list of 5 items, as follows: -# -# { } -# -proc check_for_leaks {} { - set ret [list] - set cnt 0 - foreach alloc [sqlite_malloc_outstanding] { - foreach {nBytes file iLine userstring backtrace} $alloc {} - set stack [list] - set skip 0 - - # The first command in this block will probably fail on windows. This - # means there will be no stack dump available. - if {$cnt < 25 && $backtrace!=""} { - catch { - set stuff [eval "exec addr2line -e ./testfixture -f $backtrace"] - foreach {func line} $stuff { - if {$func != "??" || $line != "??:0"} { - regexp {.*/(.*)} $line dummy line - lappend stack "${func}() $line" - } else { - if {[lindex $stack end] != "..."} { - lappend stack "..." - } - } - } - } - incr cnt - } - - if {!$skip} { - lappend ret [list $nBytes $file $iLine $userstring $stack] - } - } - return $ret -} - -# Pretty print a report based on the return value of [check_for_leaks] to -# stdout. -proc pp_check_for_leaks {} { - set l [check_for_leaks] - set n 0 - foreach leak $l { - foreach {nBytes file iLine userstring stack} $leak {} - puts "$nBytes bytes leaked at $file:$iLine ($userstring)" - foreach frame $stack { - puts " $frame" - } - incr n $nBytes - } - puts "Memory leaked: $n bytes in [llength $l] allocations" - puts "" -} - # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) + +source $testdir/thread_common.tcl diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread001.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread001.test --- sqlite3-3.4.2/test/thread001.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread001.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,145 @@ +# 2007 September 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: thread001.test,v 1.10 2009/03/26 14:48:07 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } + +set ::enable_shared_cache [sqlite3_enable_shared_cache] + +set ::NTHREAD 10 + +# Run this test three times: +# +# 1) All threads use the same database handle. +# 2) All threads use their own database handles. +# 3) All threads use their own database handles, shared-cache is enabled. +# +# +# +foreach {tn same_db shared_cache} [list \ + 1 1 0 \ + 2 0 0 \ + 3 0 1 \ +] { + # Empty the database. + # + catchsql { DROP TABLE ab; } + + do_test thread001.$tn.0 { + db close + sqlite3_enable_shared_cache $shared_cache + sqlite3_enable_shared_cache $shared_cache + } $shared_cache + sqlite3 db test.db -fullmutex 1 + + set dbconfig "" + if {$same_db} { + set dbconfig [list set ::DB [sqlite3_connection_pointer db]] + } + + # Set up a database and a schema. The database contains a single + # table with two columns. The first column ("a") is an INTEGER PRIMARY + # KEY. The second contains the md5sum of all rows in the table with + # a smaller value stored in column "a". + # + do_test thread001.$tn.1 { + execsql { + CREATE TABLE ab(a INTEGER PRIMARY KEY, b); + CREATE INDEX ab_i ON ab(b); + INSERT INTO ab SELECT NULL, md5sum(a, b) FROM ab; + SELECT count(*) FROM ab; + } + } {1} + do_test thread001.$tn.2 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + do_test thread001.$tn.3 { + execsql { PRAGMA integrity_check } + } {ok} + + set thread_program { + #sqlthread parent {puts STARTING..} + set needToClose 0 + if {![info exists ::DB]} { + set ::DB [sqlthread open test.db] + #sqlthread parent "puts \"OPEN $::DB\"" + set needToClose 1 + } + + for {set i 0} {$i < 100} {incr i} { + # Test that the invariant is true. + do_test t1 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + + # Add another row to the database. + execsql { INSERT INTO ab SELECT NULL, md5sum(a, b) FROM ab } + } + + if {$needToClose} { + #sqlthread parent "puts \"CLOSE $::DB\"" + sqlite3_close $::DB + } + #sqlthread parent "puts \"DONE\"" + + list OK + } + + # Kick off $::NTHREAD threads: + # + array unset finished + for {set i 0} {$i < $::NTHREAD} {incr i} { + thread_spawn finished($i) $dbconfig $thread_procs $thread_program + } + + # Wait for all threads to finish, then check they all returned "OK". + # + for {set i 0} {$i < $::NTHREAD} {incr i} { + if {![info exists finished($i)]} { + vwait finished($i) + } + do_test thread001.$tn.4.$i { + set ::finished($i) + } OK + } + + # Check the database still looks Ok. + # + do_test thread001.$tn.5 { + execsql { SELECT count(*) FROM ab; } + } [expr {1 + $::NTHREAD*100}] + do_test thread001.$tn.6 { + execsql { + SELECT + (SELECT md5sum(a, b) FROM ab WHERE a < (SELECT max(a) FROM ab)) == + (SELECT b FROM ab WHERE a = (SELECT max(a) FROM ab)) + } + } {1} + do_test thread001.$tn.7 { + execsql { PRAGMA integrity_check } + } {ok} +} + +sqlite3_enable_shared_cache $::enable_shared_cache +set sqlite_open_file_count 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread002.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread002.test --- sqlite3-3.4.2/test/thread002.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread002.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,103 @@ +# 2007 September 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test attempts to deadlock SQLite in shared-cache mode. +# +# +# $Id: thread002.test,v 1.9 2009/03/26 14:48:07 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } + +db close +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +set ::NTHREAD 10 + +do_test thread002.1 { + # Create 3 databases with identical schemas: + for {set ii 0} {$ii < 3} {incr ii} { + file delete -force test${ii}.db + sqlite3 db test${ii}.db + execsql { + CREATE TABLE t1(k, v); + CREATE INDEX t1_i ON t1(v); + INSERT INTO t1(v) VALUES(1.0); + } + db close + } +} {} + +set thread_program { + set ::DB [sqlite3_open test.db] + for {set ii 1} {$ii <= 3} {incr ii} { + set T [lindex $order [expr $ii-1]] + execsql "ATTACH 'test${T}.db' AS aux${ii}" + } + + for {set ii 0} {$ii < 100} {incr ii} { + execsql { SELECT * FROM aux1.t1 } + execsql { INSERT INTO aux1.t1(v) SELECT sum(v) FROM aux2.t1 } + + execsql { SELECT * FROM aux2.t1 } + execsql { INSERT INTO aux2.t1(v) SELECT sum(v) FROM aux3.t1 } + + execsql { SELECT * FROM aux3.t1 } + execsql { INSERT INTO aux3.t1(v) SELECT sum(v) FROM aux1.t1 } + + execsql { CREATE TABLE IF NOT EXISTS aux1.t2(a,b) } + execsql { DROP TABLE IF EXISTS aux1.t2 } + + # if {($ii%10)==0} {puts -nonewline . ; flush stdout} + puts -nonewline . ; flush stdout + } + + sqlite3_close $::DB + list OK +} + +set order_list [list {0 1 2} {0 2 1} {1 0 2} {1 2 0} {2 0 1} {2 1 0}] + +array unset finished +for {set ii 0} {$ii < $::NTHREAD} {incr ii} { + set order [lindex $order_list [expr $ii%6]] + thread_spawn finished($ii) $thread_procs "set order {$order}" $thread_program +} + +# Wait for all threads to finish, then check they all returned "OK". +# +for {set i 0} {$i < $::NTHREAD} {incr i} { + if {![info exists finished($i)]} { + vwait finished($i) + } + do_test thread002.2.$i { + set ::finished($i) + } OK +} + +# Check all three databases are Ok. +for {set ii 0} {$ii < 3} {incr ii} { + do_test thread002.3.$ii { + sqlite3 db test${ii}.db + set res [list \ + [execsql {SELECT count(*) FROM t1}] \ + [execsql {PRAGMA integrity_check}] \ + ] + db close + set res + } [list [expr 1 + $::NTHREAD*100] ok] +} + +sqlite3_enable_shared_cache $::enable_shared_cache +set sqlite_open_file_count 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread003.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread003.test --- sqlite3-3.4.2/test/thread003.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread003.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,189 @@ +# 2007 September 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests that attempt to break the pcache module +# by bombarding it with simultaneous requests from multiple threads. +# +# $Id: thread003.test,v 1.8 2009/03/26 14:48:07 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } + +# Set up a couple of different databases full of pseudo-randomly +# generated data. +# +do_test thread003.1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a, b, c); + } + for {set ii 0} {$ii < 5000} {incr ii} { + execsql {INSERT INTO t1 VALUES($ii, randomblob(200), randomblob(200))} + } + execsql { + CREATE INDEX i1 ON t1(a, b); + COMMIT; + } +} {} +do_test thread003.1.2 { + expr {([file size test.db] / 1024) > 2000} +} {1} +do_test thread003.1.3 { + db close + file delete -force test2.db + sqlite3 db test2.db +} {} +do_test thread003.1.4 { + execsql { + BEGIN; + CREATE TABLE t1(a, b, c); + } + for {set ii 0} {$ii < 5000} {incr ii} { + execsql {INSERT INTO t1 VALUES($ii, randomblob(200), randomblob(200))} + } + execsql { + CREATE INDEX i1 ON t1(a, b); + COMMIT; + } +} {} +do_test thread003.1.5 { + expr {([file size test.db] / 1024) > 2000} +} {1} +do_test thread003.1.6 { + db close +} {} + + +# This test opens a connection on each of the large (>2MB) database files +# created by the previous block. The connections do not share a cache. +# Both "cache_size" parameters are set to 15, so there is a maximum of +# 30 pages available globally. +# +# Then, in separate threads, the databases are randomly queried over and +# over again. This will force the connections to recycle clean pages from +# each other. If there is a thread-safety problem, a segfault or assertion +# failure may eventually occur. +# +set nSecond 30 +puts "Starting thread003.2 (should run for ~$nSecond seconds)" +do_test thread003.2 { + foreach zFile {test.db test2.db} { + set SCRIPT [format { + set iEnd [expr {[clock_seconds] + %d}] + set ::DB [sqlthread open %s] + + # Set the cache size to 15 pages per cache. 30 available globally. + execsql { PRAGMA cache_size = 15 } + + while {[clock_seconds] < $iEnd} { + set iQuery [expr {int(rand()*5000)}] + execsql " SELECT * FROM t1 WHERE a = $iQuery " + } + + sqlite3_close $::DB + expr 1 + } $nSecond $zFile] + + unset -nocomplain finished($zFile) + thread_spawn finished($zFile) $thread_procs $SCRIPT + } + foreach zFile {test.db test2.db} { + if {![info exists finished($zFile)]} { + vwait finished($zFile) + } + } + expr 0 +} {0} + +# This test is the same as the test above, except that each thread also +# writes to the database. This causes pages to be moved back and forth +# between the caches internal dirty and clean lists, which is another +# opportunity for a thread-related bug to present itself. +# +set nSecond 30 +puts "Starting thread003.3 (should run for ~$nSecond seconds)" +do_test thread003.3 { + foreach zFile {test.db test2.db} { + set SCRIPT [format { + set iStart [clock_seconds] + set iEnd [expr {[clock_seconds] + %d}] + set ::DB [sqlthread open %s] + + # Set the cache size to 15 pages per cache. 30 available globally. + execsql { PRAGMA cache_size = 15 } + + while {[clock_seconds] < $iEnd} { + set iQuery [expr {int(rand()*5000)}] + execsql "SELECT * FROM t1 WHERE a = $iQuery" + execsql "UPDATE t1 SET b = randomblob(200) + WHERE a < $iQuery AND a > $iQuery + 20 + " + } + + sqlite3_close $::DB + expr 1 + } $nSecond $zFile] + + unset -nocomplain finished($zFile) + thread_spawn finished($zFile) $thread_procs $SCRIPT + } + foreach zFile {test.db test2.db} { + if {![info exists finished($zFile)]} { + vwait finished($zFile) + } + } + expr 0 +} {0} + +# In this test case, one thread is continually querying the database. +# The other thread does not have a database connection, but calls +# sqlite3_release_memory() over and over again. +# +set nSecond 30 +puts "Starting thread003.4 (should run for ~$nSecond seconds)" +unset -nocomplain finished(1) +unset -nocomplain finished(2) +do_test thread003.4 { + thread_spawn finished(1) $thread_procs [format { + set iEnd [expr {[clock_seconds] + %d}] + set ::DB [sqlthread open test.db] + + # Set the cache size to 15 pages per cache. 30 available globally. + execsql { PRAGMA cache_size = 15 } + + while {[clock_seconds] < $iEnd} { + set iQuery [expr {int(rand()*5000)}] + execsql "SELECT * FROM t1 WHERE a = $iQuery" + } + + sqlite3_close $::DB + expr 1 + } $nSecond] + thread_spawn finished(2) [format { + set iEnd [expr {[clock_seconds] + %d}] + + while {[clock_seconds] < $iEnd} { + sqlite3_release_memory 1000 + } + } $nSecond] + + foreach ii {1 2} { + if {![info exists finished($ii)]} { + vwait finished($ii) + } + } + expr 0 +} {0} + +set sqlite_open_file_count 0 +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread004.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread004.test --- sqlite3-3.4.2/test/thread004.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread004.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,79 @@ +# 2009 February 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: thread004.test,v 1.3 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } +ifcapable !shared_cache { + finish_test + return +} +if { [info commands sqlite3_table_column_metadata] eq "" } { + finish_test + return +} + +# Use shared-cache mode for this test. +# +db close +set ::enable_shared_cache [sqlite3_enable_shared_cache] +sqlite3_enable_shared_cache 1 + +# Create a table in database test.db +# +sqlite3 db test.db +do_test thread004-1.1 { + execsql { CREATE TABLE t1(a, b, c) } +} {} + +do_test thread004-1.2 { + + set ThreadOne { + set iStart [clock_seconds] + while {[clock_seconds]<$iStart+20} { + set ::DB [sqlite3_open test.db] + sqlite3_close $::DB + } + } + set ThreadTwo { + set ::DB [sqlite3_open test.db] + set iStart [clock_seconds] + set nErr 0 + while {[clock_seconds] <$iStart+20} { + incr nErr [catch {sqlite3_table_column_metadata $::DB main t1 a}] + } + sqlite3_close $::DB + set nErr + } + + # Run two threads. The first thread opens and closes database test.db + # repeatedly. Each time this happens, the in-memory schema used by + # all connections to test.db is discarded. + # + # The second thread calls sqlite3_table_column_metadata() over and + # over again. Each time it is called, the database schema is loaded + # if it is not already in memory. At one point this was crashing. + # + unset -nocomplain finished + thread_spawn finished(1) $thread_procs $ThreadOne + thread_spawn finished(2) $thread_procs $ThreadTwo + + foreach t {1 2} { + if {![info exists finished($t)]} { vwait finished($t) } + } + + set finished(2) +} {0} +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread005.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread005.test --- sqlite3-3.4.2/test/thread005.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread005.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,238 @@ +# 2009 March 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Test a race-condition that shows up in shared-cache mode. +# +# $Id: thread005.test,v 1.5 2009/03/26 14:48:07 danielk1977 Exp $ + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl +if {[run_thread_tests]==0} { finish_test ; return } +ifcapable !shared_cache { + finish_test + return +} + +db close + +# Use shared-cache mode for these tests. +# +set ::enable_shared_cache [sqlite3_enable_shared_cache] +sqlite3_enable_shared_cache 1 + +#------------------------------------------------------------------------- +# This test attempts to hit the race condition fixed by commit [6363]. +# +proc runsql {zSql {db {}}} { + set rc SQLITE_OK + while {$rc=="SQLITE_OK" && $zSql ne ""} { + set STMT [sqlite3_prepare_v2 $db $zSql -1 zSql] + while {[set rc [sqlite3_step $STMT]] eq "SQLITE_ROW"} { } + set rc [sqlite3_finalize $STMT] + } + return $rc +} +do_test thread005-1.1 { + sqlite3 db test.db + db eval { CREATE TABLE t1(a, b) } + db close +} {} +for {set ii 2} {$ii < 500} {incr ii} { + unset -nocomplain finished + thread_spawn finished(0) {sqlite3_open test.db} + thread_spawn finished(1) {sqlite3_open test.db} + if {![info exists finished(0)]} { vwait finished(0) } + if {![info exists finished(1)]} { vwait finished(1) } + + do_test thread005-1.$ii { + runsql { BEGIN } $finished(0) + runsql { INSERT INTO t1 VALUES(1, 2) } $finished(0) + + # If the race-condition was hit, then $finished(0 and $finished(1) + # will not use the same pager cache. In this case the next statement + # can be executed succesfully. However, if the race-condition is not + # hit, then $finished(1) will be blocked by the write-lock held by + # $finished(0) on the shared-cache table t1 and the statement will + # return SQLITE_LOCKED. + # + runsql { SELECT * FROM t1 } $finished(1) + } {SQLITE_LOCKED} + + sqlite3_close $finished(0) + sqlite3_close $finished(1) +} + + +#------------------------------------------------------------------------- +# This test tries to exercise a race-condition that existed in shared-cache +# mode at one point. The test uses two threads; each has a database connection +# open on the same shared cache. The schema of the database is: +# +# CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE); +# +# One thread is a reader and the other thread a reader and a writer. The +# writer thread repeats the following transaction as fast as possible: +# +# BEGIN; +# DELETE FROM t1 WHERE a = (SELECT max(a) FROM t1); +# INSERT INTO t1 VALUES(NULL, NULL); +# UPDATE t1 SET b = a WHERE a = (SELECT max(a) FROM t1); +# SELECT count(*) FROM t1 WHERE b IS NULL; +# COMMIT; +# +# The reader thread does the following over and over as fast as possible: +# +# BEGIN; +# SELECT count(*) FROM t1 WHERE b IS NULL; +# COMMIT; +# +# The test runs for 20 seconds or until one of the "SELECT count(*)" +# statements returns a non-zero value. If an SQLITE_LOCKED error occurs, +# the connection issues a ROLLBACK immediately to abandon the current +# transaction. +# +# If everything is working correctly, the "SELECT count(*)" statements +# should never return a value other than 0. The "INSERT" statement +# executed by the writer adds a row with "b IS NULL" to the table, but +# the subsequent UPDATE statement sets its "b" value to an integer +# immediately afterwards. +# +# However, before the race-condition was fixed, if the reader's SELECT +# statement hit an error (say an SQLITE_LOCKED) at the same time as the +# writer was executing the UPDATE statement, then it could incorrectly +# rollback the statement-transaction belonging to the UPDATE statement. +# The UPDATE statement would still be reported as successful to the user, +# but it would have no effect on the database contents. +# +# Note that it has so far only proved possible to hit this race-condition +# when using an ATTACHed database. There doesn't seem to be any reason +# for this, other than that operating on an ATTACHed database means there +# are a few more mutex grabs and releases during the window of time open +# for the race-condition. Maybe this encourages the scheduler to context +# switch or something... +# + +file delete -force test.db test2.db +unset -nocomplain finished + +do_test thread005-2.1 { + sqlite3 db test.db + execsql { ATTACH 'test2.db' AS aux } + execsql { + CREATE TABLE aux.t1(a INTEGER PRIMARY KEY, b UNIQUE); + INSERT INTO t1 VALUES(1, 1); + INSERT INTO t1 VALUES(2, 2); + } + db close +} {} + + +set ThreadProgram { + proc execsql {zSql {db {}}} { + if {$db eq ""} {set db $::DB} + + set lRes [list] + set rc SQLITE_OK + + while {$rc=="SQLITE_OK" && $zSql ne ""} { + set STMT [sqlite3_prepare_v2 $db $zSql -1 zSql] + while {[set rc [sqlite3_step $STMT]] eq "SQLITE_ROW"} { + for {set i 0} {$i < [sqlite3_column_count $STMT]} {incr i} { + lappend lRes [sqlite3_column_text $STMT 0] + } + } + set rc [sqlite3_finalize $STMT] + } + + if {$rc != "SQLITE_OK"} { error "$rc [sqlite3_errmsg $db]" } + return $lRes + } + + if {$isWriter} { + set Sql { + BEGIN; + DELETE FROM t1 WHERE a = (SELECT max(a) FROM t1); + INSERT INTO t1 VALUES(NULL, NULL); + UPDATE t1 SET b = a WHERE a = (SELECT max(a) FROM t1); + SELECT count(*) FROM t1 WHERE b IS NULL; + COMMIT; + } + } else { + set Sql { + BEGIN; + SELECT count(*) FROM t1 WHERE b IS NULL; + COMMIT; + } + } + + set ::DB [sqlite3_open test.db] + + execsql { ATTACH 'test2.db' AS aux } + + set result "ok" + set finish [expr [clock_seconds]+5] + while {$result eq "ok" && [clock_seconds] < $finish} { + set rc [catch {execsql $Sql} msg] + if {$rc} { + if {[string match "SQLITE_LOCKED*" $msg]} { + catch { execsql ROLLBACK } + } else { + sqlite3_close $::DB + error $msg + } + } elseif {$msg ne "0"} { + set result "failed" + } + } + + sqlite3_close $::DB + set result +} + +# There is a race-condition in btree.c that means that if two threads +# attempt to open the same database at roughly the same time, and there +# does not already exist a shared-cache corresponding to that database, +# then two shared-caches can be created instead of one. Things still more +# or less work, but the two database connections do not use the same +# shared-cache. +# +# If the threads run by this test hit this race-condition, the tests +# fail (because SQLITE_BUSY may be unexpectedly returned instead of +# SQLITE_LOCKED). To prevent this from happening, open a couple of +# connections to test.db and test2.db now to make sure that there are +# already shared-caches in memory for all databases opened by the +# test threads. +# +sqlite3 db test.db +sqlite3 db test2.db + +puts "Running thread-tests for ~20 seconds" +thread_spawn finished(0) {set isWriter 0} $ThreadProgram +thread_spawn finished(1) {set isWriter 1} $ThreadProgram +if {![info exists finished(0)]} { vwait finished(0) } +if {![info exists finished(1)]} { vwait finished(1) } + +catch { db close } +catch { db2 close } + +do_test thread005-2.2 { + list $finished(0) $finished(1) +} {ok ok} + +do_test thread005-2.3 { + sqlite3 db test.db + execsql { ATTACH 'test2.db' AS aux } + execsql { SELECT count(*) FROM t1 WHERE b IS NULL } +} {0} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread1.test --- sqlite3-3.4.2/test/thread1.test 2007-03-27 15:43:06.000000000 +0100 +++ sqlite3-3.6.16/test/thread1.test 2009-06-12 03:28:39.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this script is multithreading behavior # -# $Id: thread1.test,v 1.7 2004/06/19 00:16:31 drh Exp $ +# $Id: thread1.test,v 1.8 2008/10/07 15:25:49 drh Exp $ set testdir [file dirname $argv0] @@ -19,6 +19,10 @@ # Skip this whole file if the thread testing code is not enabled # +ifcapable !mutex { + finish_test + return +} if {[llength [info command thread_step]]==0 || [sqlite3 -has-codec]} { finish_test return diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread2.test --- sqlite3-3.4.2/test/thread2.test 2006-01-18 18:33:42.000000000 +0000 +++ sqlite3-3.6.16/test/thread2.test 2009-06-12 03:28:39.000000000 +0100 @@ -11,16 +11,13 @@ # This file implements regression tests for SQLite library. The # focus of this script is multithreading behavior # -# $Id: thread2.test,v 1.2 2006/01/18 18:33:42 danielk1977 Exp $ +# $Id: thread2.test,v 1.3 2008/10/07 15:25:49 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -# This file swaps database connections between threads. This -# is illegal if memory-management is enabled, so skip this file -# in that case. -ifcapable memorymanage { +ifcapable !mutex { finish_test return } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/thread_common.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/test/thread_common.tcl --- sqlite3-3.4.2/test/thread_common.tcl 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/thread_common.tcl 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,113 @@ +# 2007 September 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: thread_common.tcl,v 1.5 2009/03/26 14:48:07 danielk1977 Exp $ + +if {[info exists ::thread_procs]} { + return 0 +} + +# The following script is sourced by every thread spawned using +# [sqlthread spawn]: +set thread_procs { + + # Execute the supplied SQL using database handle $::DB. + # + proc execsql {sql} { + + set rc SQLITE_LOCKED + while {$rc eq "SQLITE_LOCKED" + || $rc eq "SQLITE_BUSY" + || $rc eq "SQLITE_SCHEMA"} { + set res [list] + + enter_db_mutex $::DB + set err [catch { + set ::STMT [sqlite3_prepare_v2 $::DB $sql -1 dummy_tail] + } msg] + + if {$err == 0} { + while {[set rc [sqlite3_step $::STMT]] eq "SQLITE_ROW"} { + for {set i 0} {$i < [sqlite3_column_count $::STMT]} {incr i} { + lappend res [sqlite3_column_text $::STMT 0] + } + } + set rc [sqlite3_finalize $::STMT] + } else { + if {[lindex $msg 0]=="(6)"} { + set rc SQLITE_LOCKED + } else { + set rc SQLITE_ERROR + } + } + + if {[string first locked [sqlite3_errmsg $::DB]]>=0} { + set rc SQLITE_LOCKED + } + if {$rc ne "SQLITE_OK"} { + set errtxt "$rc - [sqlite3_errmsg $::DB] (debug1)" + } + leave_db_mutex $::DB + + if {$rc eq "SQLITE_LOCKED" || $rc eq "SQLITE_BUSY"} { + #sqlthread parent "puts \"thread [sqlthread id] is busy. rc=$rc\"" + after 200 + } else { + #sqlthread parent "puts \"thread [sqlthread id] ran $sql\"" + } + } + + if {$rc ne "SQLITE_OK"} { + error $errtxt + } + set res + } + + proc do_test {name script result} { + set res [eval $script] + if {$res ne $result} { + error "$name failed: expected \"$result\" got \"$res\"" + } + } +} + +proc thread_spawn {varname args} { + sqlthread spawn $varname [join $args ;] +} + +# Return true if this build can run the multi-threaded tests. +# +proc run_thread_tests {{print_warning 0}} { + ifcapable !mutex { + set zProblem "SQLite build is not threadsafe" + } + if {[info commands sqlthread] eq ""} { + set zProblem "SQLite build is not threadsafe" + } + if {![info exists ::tcl_platform(threaded)]} { + set zProblem "Linked against a non-threadsafe Tcl build" + } + if {[info exists zProblem]} { + if {$print_warning} { + if {[info exists ::run_thread_tests_failed]} { + puts "WARNING: Multi-threaded tests skipped: $zProblem" + } + } else { + puts "Skipping thread tests: $zProblem" + set ::run_thread_tests_failed 1 + } + return 0 + } + return 1; +} + +return 0 + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt1514.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt1514.test --- sqlite3-3.4.2/test/tkt1514.test 2007-03-27 15:43:07.000000000 +0100 +++ sqlite3-3.6.16/test/tkt1514.test 2009-06-25 12:35:52.000000000 +0100 @@ -22,6 +22,6 @@ CREATE TABLE t1(a,b); SELECT a FROM t1 WHERE max(b)<10 GROUP BY a; } -} {1 {misuse of aggregate: max(b)}} +} {1 {misuse of aggregate: max()}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt1667.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt1667.test --- sqlite3-3.4.2/test/tkt1667.test 2006-06-20 12:01:09.000000000 +0100 +++ sqlite3-3.6.16/test/tkt1667.test 2009-06-12 03:37:59.000000000 +0100 @@ -14,7 +14,7 @@ # fixed. # # -# $Id: tkt1667.test,v 1.2 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: tkt1667.test,v 1.4 2009/02/05 17:00:54 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -32,7 +32,7 @@ # size of 1024). set first_ptrmap_page [expr 1024/5 + 3] -set sqlite_pending_byte [expr 1024 * ($first_ptrmap_page-1)] +sqlite3_test_control_pending_byte [expr 1024 * ($first_ptrmap_page-1)] sqlite db test.db @@ -81,5 +81,3 @@ integrity_check tkt1667-4.2 finish_test - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt1873.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt1873.test --- sqlite3-3.4.2/test/tkt1873.test 2006-06-27 17:34:58.000000000 +0100 +++ sqlite3-3.6.16/test/tkt1873.test 2009-06-05 18:03:43.000000000 +0100 @@ -14,11 +14,16 @@ # fixed. # # -# $Id: tkt1873.test,v 1.1 2006/06/27 16:34:58 danielk1977 Exp $ +# $Id: tkt1873.test,v 1.2 2007/10/09 08:29:33 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !attach { + finish_test + return +} + file delete -force test2.db test2.db-journal do_test tkt1873-1.1 { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2141.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2141.test --- sqlite3-3.4.2/test/tkt2141.test 2007-01-04 01:20:29.000000000 +0000 +++ sqlite3-3.6.16/test/tkt2141.test 2009-06-05 18:03:43.000000000 +0100 @@ -14,11 +14,15 @@ # fixed. # # -# $Id: tkt2141.test,v 1.1 2007/01/04 01:20:29 drh Exp $ +# $Id: tkt2141.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !subquery { + finish_test + return +} do_test tkt2141-1.1 { execsql { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2192.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2192.test --- sqlite3-3.4.2/test/tkt2192.test 2007-01-26 19:04:00.000000000 +0000 +++ sqlite3-3.6.16/test/tkt2192.test 2009-06-05 18:03:43.000000000 +0100 @@ -14,13 +14,17 @@ # fixed. # # -# $Id: tkt2192.test,v 1.1 2007/01/26 19:04:00 drh Exp $ +# $Id: tkt2192.test,v 1.3 2008/08/04 03:51:24 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !datetime||!compound { + finish_test + return +} -do_test tkt2191-1.1 { +do_test tkt2192-1.1 { execsql { -- Raw data (RBS) -------- diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2213.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2213.test --- sqlite3-3.4.2/test/tkt2213.test 2007-02-05 14:21:48.000000000 +0000 +++ sqlite3-3.6.16/test/tkt2213.test 2009-06-05 18:03:43.000000000 +0100 @@ -14,7 +14,7 @@ # fixed. # # -# $Id: tkt2213.test,v 1.1 2007/02/05 14:21:48 danielk1977 Exp $ +# $Id: tkt2213.test,v 1.2 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -27,4 +27,3 @@ } {0 abcd} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2251.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2251.test --- sqlite3-3.4.2/test/tkt2251.test 2007-02-24 15:18:51.000000000 +0000 +++ sqlite3-3.6.16/test/tkt2251.test 2009-06-05 18:03:43.000000000 +0100 @@ -28,11 +28,16 @@ # raw OP_Column in all places where a table column is extracted from # the database. # -# $Id: tkt2251.test,v 1.1 2007/02/24 15:18:51 drh Exp $ +# $Id: tkt2251.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !altertable { + finish_test + return +} + # Create sample data. Verify that the default value and type of an added # column is correct for aggregates. do_test tkt2251-1.1 { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2285.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2285.test --- sqlite3-3.4.2/test/tkt2285.test 2007-04-05 06:46:14.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2285.test 2009-06-05 18:03:43.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. Specifically. # it contains tests to verify that ticket #2285 has been fixed. # -# $Id: tkt2285.test,v 1.1 2007/04/05 05:46:14 danielk1977 Exp $ +# $Id: tkt2285.test,v 1.2 2008/07/12 14:52:20 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -54,4 +54,3 @@ } finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2332.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2332.test --- sqlite3-3.4.2/test/tkt2332.test 2007-06-15 13:58:23.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2332.test 2009-06-05 18:03:43.000000000 +0100 @@ -9,13 +9,13 @@ # #*********************************************************************** # -# $Id: tkt2332.test,v 1.3 2007/05/04 19:03:03 danielk1977 Exp $ +# $Id: tkt2332.test,v 1.4 2007/09/12 17:01:45 danielk1977 Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl -ifcapable {!incrblob} { +ifcapable !incrblob||!tclvar { finish_test return } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2339.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2339.test --- sqlite3-3.4.2/test/tkt2339.test 2007-06-15 16:30:16.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2339.test 2009-06-05 18:03:43.000000000 +0100 @@ -9,12 +9,17 @@ # #*********************************************************************** # -# $Id: tkt2339.test,v 1.1 2007/05/06 20:04:25 drh Exp $ +# $Id: tkt2339.test,v 1.2 2007/09/12 17:01:45 danielk1977 Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !subquery||!compound { + finish_test + return +} + do_test tkt2339.1 { execsql { create table t1(num int); diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2409.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2409.test --- sqlite3-3.4.2/test/tkt2409.test 2007-06-15 13:41:02.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2409.test 2009-06-05 18:03:43.000000000 +0100 @@ -16,7 +16,7 @@ # any statement other than a COMMIT, an I/O error is returned instead # of SQLITE_BUSY. # -# $Id: tkt2409.test,v 1.2 2007/06/15 12:41:02 drh Exp $ +# $Id: tkt2409.test,v 1.6 2008/08/28 17:46:19 drh Exp $ # Test Outline: # @@ -25,6 +25,11 @@ # Verify that the transaction is automatically rolled back # and SQLITE_IOERR_BLOCKED is returned # +# UPDATE: As of the pcache modifications, failing to upgrade to +# an exclusive lock when attempting a cache-spill is no longer an +# error. The pcache module allocates more space and keeps working +# in memory if this occurs. +# # tkt-2409-2.*: Cause a cache-spill while updating the change-counter # during a database COMMIT. Verify that the transaction is not # rolled back and SQLITE_BUSY is returned. @@ -32,14 +37,24 @@ # tkt-2409-3.*: Similar to 2409-1.*, but using many INSERT statements # within a transaction instead of just one. # +# UPDATE: Again, pcache now just keeps working in main memory. +# # tkt-2409-4.*: Similar to 2409-1.*, but rig it so that the # INSERT statement starts a statement transaction. Verify that -# SQLOTE_BUSY is returned and the transaction is not rolled back. +# SQLITE_BUSY is returned and the transaction is not rolled back. +# +# UPDATE: This time, SQLITE_BUSY is not returned. pcache just uses +# more malloc()'d memory. # set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !pager_pragmas { + finish_test + return +} + sqlite3_extended_result_codes $::DB 1 # Aquire a read-lock on the database using handle [db2]. @@ -81,11 +96,11 @@ BEGIN; INSERT INTO t1 VALUES($::zShort, $::zLong); } -} {1 {disk I/O error}} +} {0 {}} do_test tkt2409-1.2 { sqlite3_errcode $::DB -} {SQLITE_IOERR+11} +} {SQLITE_OK} # Check the integrity of the cache. # @@ -98,8 +113,7 @@ do_test tkt2409-1.4 { unread_lock_db catchsql { ROLLBACK } -} {1 {cannot rollback - no transaction is active}} - +} {0 {}} set ::zShort [string repeat 0123456789 1] set ::zLong [string repeat 0123456789 1500] @@ -134,6 +148,7 @@ } } {0 {}} + do_test tkt2409-3.1 { db close set ::DB [sqlite3 db test.db; sqlite3_connection_pointer db] @@ -149,11 +164,11 @@ BEGIN; INSERT INTO t1 SELECT $::zShort, $::zLong; } -} {1 {database is locked}} +} {0 {}} do_test tkt2409-3.2 { sqlite3_errcode $::DB -} {SQLITE_BUSY} +} {SQLITE_OK} # Check the integrity of the cache. # @@ -167,8 +182,9 @@ unread_lock_db catchsql { ROLLBACK } } {0 {}} +integrity_check tkt2409-3.5 - +expr {srand(1)} do_test tkt2409-4.1 { execsql { PRAGMA cache_size=20; @@ -192,11 +208,11 @@ read_lock_db execsql BEGIN catchsql $sql -} {1 {disk I/O error}} +} {0 {}} do_test tkt2409-4.2 { sqlite3_errcode $::DB -} {SQLITE_IOERR+11} +} {SQLITE_OK} # Check the integrity of the cache. # @@ -204,8 +220,8 @@ do_test tkt2409-4.4 { catchsql { ROLLBACK } -} {1 {cannot rollback - no transaction is active}} - +} {0 {}} +integrity_check tkt2409-4.5 unread_lock_db db2 close diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2565.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2565.test --- sqlite3-3.4.2/test/tkt2565.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2565.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,92 @@ +# 2009 January 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This script attempts to reproduce the circumstances of ticket #2565. +# +# More specifically, this script attempts to generate rollback journals +# that contain headers with nRec==0 that are followed by additional +# valid headers. +# +# $Id: tkt2565.test,v 1.2 2009/04/09 01:23:49 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Use the alternative pcache and rig it to call pagerStress() +# very frequently. +# +db close +sqlite3_shutdown +sqlite3_config_alt_pcache 1 100 0 1 + +# Open two database connections to database "test.db". +# +proc reopen_database {} { + catch {db close} + sqlite3 db test.db + db cache size 0 + execsql { + pragma page_size=512; + pragma auto_vacuum=2; + pragma cache_size=16; + } +} + +# Open two database connections and create a single table in the db. +# +do_test tkt2565-1.0 { + reopen_database + execsql { CREATE TABLE A(Id INTEGER, Name TEXT) } +} {} + +for {set iFail 1} {$iFail<200} {incr iFail} { + reopen_database + execsql { pragma locking_mode=exclusive } + set nRow [db one {SELECT count(*) FROM a}] + + # Dirty (at least) one of the pages in the cache. + do_test tkt2565-1.$iFail.1 { + execsql { + BEGIN EXCLUSIVE; + INSERT INTO a VALUES(1, 'ABCDEFGHIJKLMNOP'); + } + } {} + + # Now try to commit the transaction. Cause an IO error to occur + # within this operation, which moves the pager into the error state. + # + set ::sqlite_io_error_persist 1 + set ::sqlite_io_error_pending $iFail + do_test tkt2565-1.$iFail.2 { + set rc [catchsql {COMMIT}] + list + } {} + set ::sqlite_io_error_persist 0 + set ::sqlite_io_error_pending 0 + if {!$::sqlite_io_error_hit} break + set ::sqlite_io_error_hit 0 +} + +# Make sure this test script doesn't leave any files open. +# +do_test tkt2565-1.X { + catch { db close } + set sqlite_open_file_count +} 0 + +# Restore the pcache configuration for subsequent tests. +# +sqlite3_shutdown +sqlite3_config_alt_pcache 0 +sqlite3_initialize +autoinstall_test_functions + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2640.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2640.test --- sqlite3-3.4.2/test/tkt2640.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2640.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,124 @@ +# 2007 Sep 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2640 has been fixed. +# +# $Id: tkt2640.test,v 1.3 2008/08/04 03:51:24 danielk1977 Exp $ +# + +# The problem in ticket #2640 was that the query optimizer was +# not recognizing all uses of tables within subqueries in the +# WHERE clause. If the subquery contained a compound SELECT, +# then tables that were used by terms of the compound other than +# the last term would not be recognized as dependencies. +# So if one of the SELECT statements within a compound made +# use of a table that occurs later in a join, the query +# optimizer would not recognize this and would try to evaluate +# the subquery too early, before that tables value had been +# established. + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery||!compound { + finish_test + return +} + +do_test tkt2640-1.1 { + execsql { + CREATE TABLE persons(person_id, name); + INSERT INTO persons VALUES(1,'fred'); + INSERT INTO persons VALUES(2,'barney'); + INSERT INTO persons VALUES(3,'wilma'); + INSERT INTO persons VALUES(4,'pebbles'); + INSERT INTO persons VALUES(5,'bambam'); + CREATE TABLE directors(person_id); + INSERT INTO directors VALUES(5); + INSERT INTO directors VALUES(3); + CREATE TABLE writers(person_id); + INSERT INTO writers VALUES(2); + INSERT INTO writers VALUES(3); + INSERT INTO writers VALUES(4); + SELECT DISTINCT p.name + FROM persons p, directors d + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=p.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} +do_test tkt2640-1.2 { + execsql { + SELECT DISTINCT p.name + FROM persons p CROSS JOIN directors d + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=p.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} +do_test tkt2640-1.3 { + execsql { + SELECT DISTINCT p.name + FROM directors d CROSS JOIN persons p + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=p.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} +do_test tkt2640-1.4 { + execsql { + SELECT DISTINCT p.name + FROM persons p, directors d + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=d.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} +do_test tkt2640-1.5 { + execsql { + SELECT DISTINCT p.name + FROM persons p CROSS JOIN directors d + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=d.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} +do_test tkt2640-1.6 { + execsql { + SELECT DISTINCT p.name + FROM directors d CROSS JOIN persons p + WHERE d.person_id=p.person_id + AND NOT EXISTS ( + SELECT person_id FROM directors d1 WHERE d1.person_id=d.person_id + EXCEPT + SELECT person_id FROM writers w + ); + } +} {wilma} + + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2643.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2643.test --- sqlite3-3.4.2/test/tkt2643.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2643.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,39 @@ +# 2007 Sep 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2643 has been fixed. +# +# $Id: tkt2643.test,v 1.1 2007/09/13 17:54:41 drh Exp $ +# + +# The problem in ticket #2643 has to do with the query optimizer +# making bad assumptions about index cost when data from ANALYZE +# is available. + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt2643-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE, c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 VALUES(2,3,4); + ANALYZE; + } + db close + sqlite3 db test.db + execsql { + CREATE INDEX i1 ON t1(c); + SELECT count(*) FROM t1 WHERE c IS NOT NULL + } +} {2} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2686.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2686.test --- sqlite3-3.4.2/test/tkt2686.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2686.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,81 @@ +# 2007 Oct 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2686 has been fixed. +# +# $Id: tkt2686.test,v 1.3 2008/02/02 02:48:52 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +db eval { + PRAGMA page_size=1024; + PRAGMA max_page_count=50; + PRAGMA auto_vacuum=0; + CREATE TABLE filler (fill); +} +for {set i 1} {$i<2000} {incr i} { + do_test tkt2686-$i.1 { + db eval BEGIN + set rc [catch { + while 1 { + db eval {INSERT INTO filler (fill) VALUES (randstr(1000, 10000)) } + } + } msg] + lappend rc $msg + } {1 {database or disk is full}} + do_test tkt2686-$i.2 { + execsql { + DELETE FROM filler + WHERE rowid <= (SELECT MAX(rowid) FROM filler LIMIT 20) + } + } {} + integrity_check tkt2686-$i.3 + catch {db eval COMMIT} +} + +db close +file delete -force test.db test.db-journal +sqlite3 db test.db + +db eval { + PRAGMA page_size=1024; + PRAGMA max_page_count=50; + PRAGMA auto_vacuum=1; + CREATE TABLE filler (fill); +} +for {set i 10000} {$i<12000} {incr i} { + do_test tkt2686-$i.1 { + db eval BEGIN + set rc [catch { + while 1 { + db eval {INSERT INTO filler (fill) VALUES (randstr(1000, 10000)) } + } + } msg] + lappend rc $msg + } {1 {database or disk is full}} + do_test tkt2686-$i.2 { + execsql { + DELETE FROM filler + WHERE rowid <= (SELECT MAX(rowid) FROM filler LIMIT 20) + } + } {} + integrity_check tkt2686-$i.3 + catch {db eval COMMIT} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2767.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2767.test --- sqlite3-3.4.2/test/tkt2767.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2767.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,90 @@ +# 2007 Oct 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2767 has been fixed. +# Ticket #2767 is for a VDBE stack overflow on BEFORE +# triggers that run RAISE(IGNORE). +# +# $Id: tkt2767.test,v 1.3 2009/04/07 14:14:23 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !trigger { finish_test ; return } + +do_test tkt2767-1.1 { + execsql { + -- Construct a table with many rows of data + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 SELECT x+2 FROM t1; + INSERT INTO t1 SELECT x+4 FROM t1; + INSERT INTO t1 SELECT x+8 FROM t1; + INSERT INTO t1 SELECT x+16 FROM t1; + + -- BEFORE triggers that invoke raise(ignore). The effect of + -- these triggers should be to make INSERTs, UPDATEs, and DELETEs + -- into no-ops. + CREATE TRIGGER r1 BEFORE UPDATE ON t1 BEGIN + SELECT raise(ignore); + END; + CREATE TRIGGER r2 BEFORE DELETE ON t1 BEGIN + SELECT raise(ignore); + END; + CREATE TRIGGER r3 BEFORE INSERT ON t1 BEGIN + SELECT raise(ignore); + END; + + -- Verify the table content + SELECT count(*), sum(x) FROM t1; + } +} {32 528} + +# Try to delete all elements of the table. This will invoke the +# DELETE trigger 32 times, which should overflow the VDBE stack if +# the problem of #2767 is not fixed. If the problem is fixed, all +# the deletes should be no-ops so the table should remain unchanged. +# +do_test tkt2767-1.2 { + execsql { + DELETE FROM t1 WHERE x>0; + SELECT count(*), sum(x) FROM t1; + } +} {32 528} + +# Try to update all elements of the table. This will invoke the +# UPDATE trigger 32 times, which should overflow the VDBE stack if +# the problem of #2767 is not fixed. If the problem is fixed, all +# the updates should be no-ops so the table should remain unchanged. +# +do_test tkt2767-1.3 { + execsql { + UPDATE t1 SET x=x+1; + SELECT count(*), sum(x) FROM t1; + } +} {32 528} + +# Invoke the insert trigger. The insert trigger was working +# even prior to the fix of #2767. But it seems good to go ahead +# and verify that it works. +# +do_test tkt2767-1.4 { + execsql { + INSERT INTO t1 SELECT x+32 FROM t1; + SELECT count(*), sum(x) FROM t1; + } +} {32 528} + + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2817.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2817.test --- sqlite3-3.4.2/test/tkt2817.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2817.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,72 @@ +# 2007 December 02 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# Specifically, it tests that bug 2817 is fixed. +# +# $Id: tkt2817.test,v 1.2 2008/07/12 14:52:21 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt2817-1.0 { + execsql { + CREATE TEMP TABLE tbl(a, b, c); + -- INSERT INTO tbl VALUES(1, 'abc', 'def'); + -- INSERT INTO tbl VALUES(2, 'ghi', 'jkl'); + } +} {} +do_test tkt2817-1.1 { + execsql { + CREATE TABLE main.tbl(a, b, c); + CREATE INDEX main.tbli ON tbl(a, b, c); + INSERT INTO main.tbl SELECT a, b, c FROM temp.tbl; + } +} {} + +# When bug #2817 existed, this test was failing. +# +integrity_check tkt2817-1.2 + +# So was this one. +# +db close +sqlite3 db test.db +integrity_check tkt2817-1.3 + + +# These tests - tkt2817-2.* - are the same as the previous block, except +# for the fact that the temp-table and the main table do not share the +# same name. #2817 did not cause a problem with these tests. +# +db close +file delete -force test.db +sqlite3 db test.db +do_test tkt2817-2.0 { + execsql { + CREATE TEMP TABLE tmp(a, b, c); + INSERT INTO tmp VALUES(1, 'abc', 'def'); + INSERT INTO tmp VALUES(2, 'ghi', 'jkl'); + } +} {} +do_test tkt2817-2.1 { + execsql { + CREATE TABLE main.tbl(a, b, c); + CREATE INDEX main.tbli ON tbl(a, b, c); + INSERT INTO main.tbl SELECT a, b, c FROM temp.tmp; + } +} {} +integrity_check tkt2817-2.2 +db close +sqlite3 db test.db +integrity_check tkt2817-2.3 + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2820.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2820.test --- sqlite3-3.4.2/test/tkt2820.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2820.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,94 @@ +# 2007 Dec 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2820 has been fixed. +# Ticket #2820 observes that a DROP TABLE statement that +# occurs while a query is in process will fail with a +# "database is locked" error, but the entry in the sqlite_master +# table will still be removed. This is incorrect. The +# entry in the sqlite_master table should persist when +# the DROP fails due to an error. +# +# $Id: tkt2820.test,v 1.1 2007/12/04 16:54:53 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc test_schema_change {testid init ddl res} { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + execsql $init + do_test tkt2820-$testid.1 { + set STMT [sqlite3_prepare db {SELECT * FROM sqlite_master} -1 DUMMY] + sqlite3_step $STMT + } {SQLITE_ROW} +#if {$testid==3} {execsql {PRAGMA vdbe_trace=ON}} + do_test tkt2820-$testid.2 "catchsql [list $ddl]" \ + {1 {database table is locked}} + do_test tkt2820-$testid.3 { + sqlite3_finalize $STMT + execsql {SELECT name FROM sqlite_master ORDER BY 1} + } $res + integrity_check tkt2820-$testid.4 + db close + sqlite3 db test.db + integrity_check tkt2820-$testid.5 +} + +test_schema_change 1 { + CREATE TABLE t1(a); +} { + DROP TABLE t1 +} {t1} +test_schema_change 2 { + CREATE TABLE t1(a); + CREATE TABLE t2(b); +} { + DROP TABLE t2 +} {t1 t2} +test_schema_change 3 { + CREATE TABLE t1(a); + CREATE INDEX i1 ON t1(a); +} { + DROP INDEX i1 +} {i1 t1} + +# We further observe that prior to the fix associated with ticket #2820, +# no statement journal would be created on an SQL statement that was run +# while a second statement was active, as long as we are in autocommit +# mode. This is incorrect. +# +do_test tkt2820-4.1 { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + db eval { + CREATE TABLE t1(a INTEGER PRIMARY KEY); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + } + + # The INSERT statement within the loop should fail on a + # constraint violation on the second inserted row. This + # should cause the entire INSERT to rollback using a statement + # journal. + # + db eval {SELECT name FROM sqlite_master} { + catch {db eval { + INSERT INTO t1 SELECT a+1 FROM t1 ORDER BY a DESC + }} + } + db eval {SELECT a FROM t1 ORDER BY a} +} {1 2} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2822.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2822.test --- sqlite3-3.4.2/test/tkt2822.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2822.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,336 @@ +# 2007 Dec 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that the issues surrounding expressions in +# ORDER BY clauses on compound SELECT statements raised by ticket +# #2822 have been dealt with. +# +# $Id: tkt2822.test,v 1.6 2008/08/20 16:35:10 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +# The ORDER BY matching algorithm is three steps: +# +# (1) If the ORDER BY term is an integer constant i, then +# sort by the i-th column of the result set. +# +# (2) If the ORDER BY term is an identifier (not x.y or x.y.z +# but simply x) then look for a column alias with the same +# name. If found, then sort by that column. +# +# (3) Evaluate the term as an expression and sort by the +# value of the expression. +# +# For a compound SELECT the rules are modified slightly. +# In the third rule, the expression must exactly match one +# of the result columns. The sequences of three rules is +# attempted first on the left-most SELECT. If that doesn't +# work, we move to the right, one by one. +# +# Rule (3) is not in standard SQL - it is an SQLite extension, +# though one copied from PostgreSQL. The rule for compound +# queries where a search is made of SELECTs to the right +# if the left-most SELECT does not match is not a part of +# standard SQL either. This extension is unique to SQLite +# as far as we know. +# +# Rule (2) was added by the changes ticket #2822. Prior to +# that changes, SQLite did not support rule (2), making it +# technically in violation of standard SQL semantics. +# No body noticed because rule (3) has the same effect as +# rule (2) except in some obscure cases. +# + + +# Test plan: +# +# tkt2822-1.* - Simple identifier as ORDER BY expression. +# tkt2822-2.* - More complex ORDER BY expressions. + +do_test tkt2822-0.1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE TABLE t2(a, b, c); + + INSERT INTO t1 VALUES(1, 3, 9); + INSERT INTO t1 VALUES(3, 9, 27); + INSERT INTO t1 VALUES(5, 15, 45); + + INSERT INTO t2 VALUES(2, 6, 18); + INSERT INTO t2 VALUES(4, 12, 36); + INSERT INTO t2 VALUES(6, 18, 54); + } +} {} + +# Test the "ORDER BY " syntax. +# +do_test tkt2822-1.1 { + execsql { + SELECT a, b, c FROM t1 UNION ALL SELECT a, b, c FROM t2 ORDER BY 1; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} +do_test tkt2822-1.2 { + execsql { + SELECT a, CAST (b AS TEXT), c FROM t1 + UNION ALL + SELECT a, b, c FROM t2 + ORDER BY 2; + } +} {2 6 18 4 12 36 6 18 54 5 15 45 1 3 9 3 9 27} + +# Test the "ORDER BY " syntax. +# +do_test tkt2822-2.1 { + execsql { + SELECT a, b, c FROM t1 UNION ALL SELECT a, b, c FROM t2 ORDER BY a; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} + +do_test tkt2822-2.2 { + execsql { + SELECT a, CAST (b AS TEXT) AS x, c FROM t1 + UNION ALL + SELECT a, b, c FROM t2 + ORDER BY x; + } +} {2 6 18 4 12 36 6 18 54 5 15 45 1 3 9 3 9 27} +do_test tkt2822-2.3 { + execsql { + SELECT t1.a, b, c FROM t1 UNION ALL SELECT t2.a, b, c FROM t2 ORDER BY a; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} + +# Test the "ORDER BY " syntax. +# +do_test tkt2822-3.1 { + execsql { + SELECT a, CAST (b AS TEXT) AS x, c FROM t1 + UNION ALL + SELECT a, b, c FROM t2 + ORDER BY CAST (b AS TEXT); + } +} {2 6 18 4 12 36 6 18 54 5 15 45 1 3 9 3 9 27} +do_test tkt2822-3.2 { + execsql { + SELECT t1.a, b, c FROM t1 UNION ALL SELECT t2.a, b, c FROM t2 ORDER BY t1.a; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} + +# Test that if a match cannot be found in the leftmost SELECT, an +# attempt is made to find a match in subsequent SELECT statements. +# +do_test tkt2822-3.3 { + execsql { + SELECT a, b, c FROM t1 UNION ALL SELECT a AS x, b, c FROM t2 ORDER BY x; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} +do_test tkt2822-3.4 { + # But the leftmost SELECT takes precedence. + execsql { + SELECT a AS b, CAST (b AS TEXT) AS a, c FROM t1 + UNION ALL + SELECT a, b, c FROM t2 + ORDER BY a; + } +} {2 6 18 4 12 36 6 18 54 5 15 45 1 3 9 3 9 27} +do_test tkt2822-3.5 { + execsql { + SELECT a, b, c FROM t2 + UNION ALL + SELECT a AS b, CAST (b AS TEXT) AS a, c FROM t1 + ORDER BY a; + } +} {1 3 9 2 6 18 3 9 27 4 12 36 5 15 45 6 18 54} + +# Test some error conditions (ORDER BY clauses that match no column). +# +do_test tkt2822-4.1 { + catchsql { + SELECT a, b, c FROM t1 UNION ALL SELECT a, b, c FROM t2 ORDER BY x + } +} {1 {1st ORDER BY term does not match any column in the result set}} +do_test tkt2822-4.2 { + catchsql { + SELECT a, CAST (b AS TEXT) AS x, c FROM t1 + UNION ALL + SELECT a, b, c FROM t2 + ORDER BY CAST (b AS INTEGER); + } +} {1 {1st ORDER BY term does not match any column in the result set}} + +# Tests for rule (2). +# +# The "ORDER BY b" should match the column alias (rule 2), not the +# the t3.b value (rule 3). +# +do_test tkt2822-5.1 { + execsql { + CREATE TABLE t3(a,b); + INSERT INTO t3 VALUES(1,8); + INSERT INTO t3 VALUES(9,2); + + SELECT a AS b FROM t3 ORDER BY b; + } +} {1 9} +do_test tkt2822-5.2 { + # Case does not matter. b should match B + execsql { + SELECT a AS b FROM t3 ORDER BY B; + } +} {1 9} +do_test tkt2822-5.3 { + # Quoting should not matter + execsql { + SELECT a AS 'b' FROM t3 ORDER BY "B"; + } +} {1 9} +do_test tkt2822-5.4 { + # Quoting should not matter + execsql { + SELECT a AS "b" FROM t3 ORDER BY [B]; + } +} {1 9} + +# In "ORDER BY +b" the term is now an expression rather than +# a label. It therefore matches by rule (3) instead of rule (2). +# +do_test tkt2822-5.5 { + execsql { + SELECT a AS b FROM t3 ORDER BY +b; + } +} {9 1} + +# Tests for rule 2 in compound queries +# +do_test tkt2822-6.1 { + execsql { + CREATE TABLE t6a(p,q); + INSERT INTO t6a VALUES(1,8); + INSERT INTO t6a VALUES(9,2); + CREATE TABLE t6b(x,y); + INSERT INTO t6b VALUES(1,7); + INSERT INTO t6b VALUES(7,2); + + SELECT p, q FROM t6a UNION ALL SELECT x, y FROM t6b ORDER BY 1, 2 + } +} {1 7 1 8 7 2 9 2} +do_test tkt2822-6.2 { + execsql { + SELECT p PX, q QX FROM t6a UNION ALL SELECT x XX, y YX FROM t6b + ORDER BY PX, YX + } +} {1 7 1 8 7 2 9 2} +do_test tkt2822-6.3 { + execsql { + SELECT p PX, q QX FROM t6a UNION ALL SELECT x XX, y YX FROM t6b + ORDER BY XX, QX + } +} {1 7 1 8 7 2 9 2} +do_test tkt2822-6.4 { + execsql { + SELECT p PX, q QX FROM t6a UNION ALL SELECT x XX, y YX FROM t6b + ORDER BY QX, XX + } +} {7 2 9 2 1 7 1 8} +do_test tkt2822-6.5 { + execsql { + SELECT p PX, q QX FROM t6a UNION ALL SELECT x XX, y YX FROM t6b + ORDER BY t6b.x, QX + } +} {1 7 1 8 7 2 9 2} +do_test tkt2822-6.6 { + execsql { + SELECT p PX, q QX FROM t6a UNION ALL SELECT x XX, y YX FROM t6b + ORDER BY t6a.q, XX + } +} {7 2 9 2 1 7 1 8} + +# More error message tests. This is really more of a test of the +# %r ordinal value formatting capablity added to sqlite3_snprintf() +# by ticket #2822. +# +do_test tkt2822-7.1 { + execsql { + CREATE TABLE t7(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14, + a15,a16,a17,a18,a19,a20,a21,a22,a23,a24,a25); + } + catchsql { + SELECT * FROM t7 ORDER BY 0; + } +} {1 {1st ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.2 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 0; + } +} {1 {2nd ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.3 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 0; + } +} {1 {3rd ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.4 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 0; + } +} {1 {4th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.9 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 0; + } +} {1 {9th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.10 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 0; + } +} {1 {10th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.11 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0; + } +} {1 {11th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.12 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 0; + } +} {1 {12th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.13 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 0; + } +} {1 {13th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.20 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11,12,13,14,15,16,17,18,19, 0 + } +} {1 {20th ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.21 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11,12,13,14,15,16,17,18,19, 20, 0 + } +} {1 {21st ORDER BY term out of range - should be between 1 and 25}} +do_test tkt2822-7.22 { + catchsql { + SELECT * FROM t7 ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11,12,13,14,15,16,17,18,19, 20, 21, 0 + } +} {1 {22nd ORDER BY term out of range - should be between 1 and 25}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2832.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2832.test --- sqlite3-3.4.2/test/tkt2832.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2832.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,67 @@ +# 2007 Dec 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2832 has been fixed. +# +# $Id: tkt2832.test,v 1.5 2009/04/07 14:14:23 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !trigger { finish_test ; return } + +do_test tkt2832-1.1 { + execsql { + CREATE TABLE t1(a PRIMARY KEY); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(3); + } +} {} +do_test tkt2832-1.2 { + execsql { + UPDATE OR REPLACE t1 SET a = 1; + SELECT * FROM t1; + } +} {1} + +do_test tkt2832-2.1 { + execsql { + CREATE TABLE t2(a, b); + CREATE TRIGGER t2_t AFTER UPDATE ON t2 BEGIN + DELETE FROM t2 WHERE a = new.a + 1; + END; + INSERT INTO t2 VALUES(1, 2); + INSERT INTO t2 VALUES(2, 3); + } +} {} +do_test tkt2832-2.2 { + execsql { + UPDATE t2 SET b = 5 + } +} {} + +do_test tkt2832-3.1 { + execsql { + CREATE TABLE t3(a, b); + CREATE TRIGGER t3_t AFTER DELETE ON t3 BEGIN + DELETE FROM t3 WHERE a = old.a + 1; + END; + INSERT INTO t3 VALUES(1, 2); + INSERT INTO t3 VALUES(2, 3); + } +} {} +do_test tkt2832-3.2 { + execsql { DELETE FROM t3 WHERE 1 } +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2854.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2854.test --- sqlite3-3.4.2/test/tkt2854.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2854.test 2009-06-12 03:37:59.000000000 +0100 @@ -0,0 +1,149 @@ +# 2007 December 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt2854.test,v 1.4 2009/03/16 13:19:36 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +db close + +ifcapable !shared_cache { + finish_test + return +} + +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +# Open 3 database connections. Connection "db" and "db2" share a cache. +# Connection "db3" has its own cache. +# +do_test tkt2854-1.1 { + sqlite3 db test.db + sqlite3 db2 test.db + + # This is taken from shared.test. The Windows VFS expands + # ./test.db (and test.db) to be the same thing so the path + # matches and they share a cache. By changing the case + # for Windows platform, we get around this and get a separate + # connection. + if {$::tcl_platform(platform)=="unix"} { + sqlite3 db3 ./test.db + } else { + sqlite3 db3 TEST.DB + } + + db eval { + CREATE TABLE abc(a, b, c); + } +} {} + +# Check that an exclusive lock cannot be obtained if some other +# shared-cache connection has a read-lock on a table. +# +do_test tkt2854-1.2 { + execsql { + BEGIN; + SELECT * FROM abc; + } db2 +} {} +do_test tkt2854-1.3 { + catchsql { BEGIN EXCLUSIVE } db +} {1 {database table is locked}} +do_test tkt2854-1.4 { + execsql { SELECT * FROM abc } db3 +} {} +do_test tkt2854-1.5 { + catchsql { INSERT INTO abc VALUES(1, 2, 3) } db3 +} {1 {database is locked}} +do_test tkt2854-1.6 { + execsql { COMMIT } db2 +} {} + +# Check that an exclusive lock prevents other shared-cache users from +# starting a transaction. +# +do_test tkt2854-1.7 { + set ::DB2 [sqlite3_connection_pointer db2] + set ::STMT1 [sqlite3_prepare $DB2 "SELECT * FROM abc" -1 TAIL] + set ::STMT2 [sqlite3_prepare $DB2 "BEGIN EXCLUSIVE" -1 TAIL] + set ::STMT3 [sqlite3_prepare $DB2 "BEGIN IMMEDIATE" -1 TAIL] + set ::STMT4 [sqlite3_prepare $DB2 "BEGIN" -1 TAIL] + set ::STMT5 [sqlite3_prepare $DB2 "COMMIT" -1 TAIL] + execsql { BEGIN EXCLUSIVE } db +} {} +do_test tkt2854-1.8 { + catchsql { BEGIN EXCLUSIVE } db2 +} {1 {database schema is locked: main}} +do_test tkt2854-1.9 { + catchsql { BEGIN IMMEDIATE } db2 +} {1 {database schema is locked: main}} +do_test tkt2854-1.10 { + # This fails because the schema of main cannot be verified. + catchsql { BEGIN } db2 +} {1 {database schema is locked: main}} + +# Check that an exclusive lock prevents other shared-cache users from +# reading the database. Use stored statements so that the error occurs +# at the b-tree level, not the schema level. +# +do_test tkt2854-1.11 { + list [sqlite3_step $::STMT1] [sqlite3_finalize $::STMT1] +} {SQLITE_ERROR SQLITE_LOCKED} +do_test tkt2854-1.12 { + list [sqlite3_step $::STMT2] [sqlite3_finalize $::STMT2] +} {SQLITE_ERROR SQLITE_LOCKED} +do_test tkt2854-1.13 { + list [sqlite3_step $::STMT3] [sqlite3_finalize $::STMT3] +} {SQLITE_ERROR SQLITE_LOCKED} +do_test tkt2854-1.14 { + # A regular "BEGIN" doesn't touch any databases. So it succeeds. + list [sqlite3_step $::STMT4] [sqlite3_finalize $::STMT4] +} {SQLITE_DONE SQLITE_OK} +do_test tkt2854-1.15 { + # As does a COMMIT. + list [sqlite3_step $::STMT5] [sqlite3_finalize $::STMT5] +} {SQLITE_DONE SQLITE_OK} + +# Try to read the database using connection "db3" (which does not share +# a cache with "db"). The database should be locked. +do_test tkt2854-1.16 { + catchsql { SELECT * FROM abc } db3 +} {1 {database is locked}} +do_test tkt2854-1.17 { + execsql { COMMIT } db +} {} +do_test tkt2854-1.18 { + execsql { SELECT * FROM abc } db2 +} {} + +# Check that if an attempt to obtain an exclusive lock fails because an +# attached db cannot be locked, the internal exclusive flag used by +# shared-cache users is correctly cleared. +do_test tkt2854-1.19 { + file delete -force test2.db test2.db-journal + sqlite3 db4 test2.db + execsql { CREATE TABLE def(d, e, f) } db4 + execsql { ATTACH 'test2.db' AS aux } db +} {} +do_test tkt2854-1.20 { + execsql {BEGIN IMMEDIATE} db4 + catchsql {BEGIN EXCLUSIVE} db +} {1 {database table is locked}} +do_test tkt2854-1.21 { + execsql {SELECT * FROM abc} db2 +} {} + +db close +db2 close +db3 close +db4 close +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2920.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2920.test --- sqlite3-3.4.2/test/tkt2920.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2920.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,78 @@ +# 2008 Feb 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2920 is fixed. +# +# $Id: tkt2920.test,v 1.1 2008/02/02 02:48:52 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Create a database file that is full. +# +do_test tkt2920-1.1 { + db eval { + PRAGMA page_size=1024; + PRAGMA max_page_count=40; + PRAGMA auto_vacuum=0; + CREATE TABLE filler (fill); + } + file size test.db +} {2048} +do_test tkt2920-1.2 { + db eval BEGIN + for {set i 0} {$i<34} {incr i} { + db eval {INSERT INTO filler VALUES(randomblob(1024))} + } + db eval COMMIT +} {} + +# Try to add a single new page to the full database. We get +# a disk full error. But this does not corrupt the database. +# +do_test tkt2920-1.3 { + db eval BEGIN + catchsql { + INSERT INTO filler VALUES(randomblob(1024)) + } +} {1 {database or disk is full}} +integrity_check tkt2920-1.4 + +# Increase the maximum size of the database file by 1 page, +# but then try to add a two-page record. This also fails. +# +do_test tkt2920-1.5 { + db eval {PRAGMA max_page_count=41} + catchsql { + INSERT INTO filler VALUES(randomblob(2048)) + } +} {1 {database or disk is full}} +integrity_check tkt2920-1.6 + +# Increase the maximum size of the database by one more page. +# This time the insert works. +# +do_test tkt2920-1.7 { + db eval {PRAGMA max_page_count=42} + catchsql { + INSERT INTO filler VALUES(randomblob(2048)) + } +} {0 {}} +integrity_check tkt2920-1.8 + +# The previous errors cancelled the transaction. +# +do_test tkt2920-1.9 { + catchsql {COMMIT} +} {1 {cannot commit - no transaction is active}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2927.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2927.test --- sqlite3-3.4.2/test/tkt2927.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2927.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,674 @@ +# 2008 Feb 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file is to test that ticket #2927 is fixed. +# +# $Id: tkt2927.test,v 1.4 2008/08/04 03:51:24 danielk1977 Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !compound { + finish_test + return +} + +# Create a database. +# +do_test tkt2927-1.1 { + db eval { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1,11); + INSERT INTO t1 VALUES(2,22); + INSERT INTO t1 VALUES(3,33); + INSERT INTO t1 VALUES(4,44); + INSERT INTO t1 VALUES(5,55); + SELECT * FROM t1; + } +} {1 11 2 22 3 33 4 44 5 55} + + +do_test tkt2927-2.1 { + db eval { + SELECT a, b FROM t1 + UNION ALL + SELECT a, b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.2 { +#set sqlite_addop_trace 1 + db eval { + SELECT a, b FROM t1 + UNION ALL + SELECT a, abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.3 { + db eval { + SELECT a, b FROM t1 + UNION ALL + SELECT abs(a), b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.4 { + db eval { + SELECT a, b FROM t1 + UNION ALL + SELECT abs(a), abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.5 { + db eval { + SELECT a, abs(b) FROM t1 + UNION ALL + SELECT a, b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.6 { + db eval { + SELECT a, abs(b) FROM t1 + UNION ALL + SELECT a, abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.7 { + db eval { + SELECT a, abs(b) FROM t1 + UNION ALL + SELECT abs(a), b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.8 { + db eval { + SELECT a, abs(b) FROM t1 + UNION ALL + SELECT abs(a), abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.9 { + db eval { + SELECT abs(a), b FROM t1 + UNION ALL + SELECT a, b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.10 { + db eval { + SELECT abs(a), b FROM t1 + UNION ALL + SELECT a, abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.11 { + db eval { + SELECT abs(a), b FROM t1 + UNION ALL + SELECT abs(a), b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.12 { + db eval { + SELECT abs(a), b FROM t1 + UNION ALL + SELECT abs(a), abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.13 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION ALL + SELECT a, b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.14 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION ALL + SELECT a, abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.15 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION ALL + SELECT abs(a), b FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-2.16 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION ALL + SELECT abs(a), abs(b) FROM t1 + } +} {1 11 2 22 3 33 4 44 5 55 1 11 2 22 3 33 4 44 5 55} + + +do_test tkt2927-3.1 { + db eval { + SELECT a, b FROM t1 + UNION + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.2 { + db eval { + SELECT a, b FROM t1 + UNION + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.3 { + db eval { + SELECT a, b FROM t1 + UNION + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.4 { + db eval { + SELECT a, b FROM t1 + UNION + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.5 { + db eval { + SELECT a, abs(b) FROM t1 + UNION + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.6 { + db eval { + SELECT a, abs(b) FROM t1 + UNION + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.7 { + db eval { + SELECT a, abs(b) FROM t1 + UNION + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.8 { + db eval { + SELECT a, abs(b) FROM t1 + UNION + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.9 { + db eval { + SELECT abs(a), b FROM t1 + UNION + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.10 { + db eval { + SELECT abs(a), b FROM t1 + UNION + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.11 { + db eval { + SELECT abs(a), b FROM t1 + UNION + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.12 { + db eval { + SELECT abs(a), b FROM t1 + UNION + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.13 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.14 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.15 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-3.16 { + db eval { + SELECT abs(a), abs(b) FROM t1 + UNION + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} + + +do_test tkt2927-4.1 { + db eval { + SELECT a+b, a-b, a, b FROM t1 + UNION ALL + SELECT a+b, a-b, a, b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.2 { + db eval { + SELECT a+b, a-b, a, b FROM t1 + UNION ALL + SELECT a+b, a-b, a, abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.3 { + db eval { + SELECT a+b, a-b, a, b FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.4 { + db eval { + SELECT a+b, a-b, a, b FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.5 { + db eval { + SELECT a+b, a-b, a, abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, a, b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.6 { + db eval { + SELECT a+b, a-b, a, abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, a, abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.7 { + db eval { + SELECT a+b, a-b, a, abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.8 { + db eval { + SELECT a+b, a-b, a, abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.9 { + db eval { + SELECT a+b, a-b, abs(a), b FROM t1 + UNION ALL + SELECT a+b, a-b, a, b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.10 { + db eval { + SELECT a+b, a-b, abs(a), b FROM t1 + UNION ALL + SELECT a+b, a-b, a, abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.11 { + db eval { + SELECT a+b, a-b, abs(a), b FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.12 { + db eval { + SELECT a+b, a-b, abs(a), b FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.13 { + db eval { + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, a, b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.14 { + db eval { + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, a, abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.15 { + db eval { + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), b FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} +do_test tkt2927-4.16 { + db eval { + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + UNION ALL + SELECT a+b, a-b, abs(a), abs(b) FROM t1 + } +} {12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55 12 -10 1 11 24 -20 2 22 36 -30 3 33 48 -40 4 44 60 -50 5 55} + + +do_test tkt2927-5.1 { + db eval { + SELECT a, b FROM t1 + EXCEPT + SELECT a, b FROM t1 + } +} {} +do_test tkt2927-5.2 { + db eval { + SELECT a, b FROM t1 + EXCEPT + SELECT a, abs(b) FROM t1 + } +} {} +do_test tkt2927-5.3 { + db eval { + SELECT a, b FROM t1 + EXCEPT + SELECT abs(a), b FROM t1 + } +} {} +do_test tkt2927-5.4 { + db eval { + SELECT a, b FROM t1 + EXCEPT + SELECT abs(a), abs(b) FROM t1 + } +} {} +do_test tkt2927-5.5 { + db eval { + SELECT a, abs(b) FROM t1 + EXCEPT + SELECT a, b FROM t1 + } +} {} +do_test tkt2927-5.6 { + db eval { + SELECT a, abs(b) FROM t1 + EXCEPT + SELECT a, abs(b) FROM t1 + } +} {} +do_test tkt2927-5.7 { + db eval { + SELECT a, abs(b) FROM t1 + EXCEPT + SELECT abs(a), b FROM t1 + } +} {} +do_test tkt2927-5.8 { + db eval { + SELECT a, abs(b) FROM t1 + EXCEPT + SELECT abs(a), abs(b) FROM t1 + } +} {} +do_test tkt2927-5.9 { + db eval { + SELECT abs(a), b FROM t1 + EXCEPT + SELECT a, b FROM t1 + } +} {} +do_test tkt2927-5.10 { + db eval { + SELECT abs(a), b FROM t1 + EXCEPT + SELECT a, abs(b) FROM t1 + } +} {} +do_test tkt2927-5.11 { + db eval { + SELECT abs(a), b FROM t1 + EXCEPT + SELECT abs(a), b FROM t1 + } +} {} +do_test tkt2927-5.12 { + db eval { + SELECT abs(a), b FROM t1 + EXCEPT + SELECT abs(a), abs(b) FROM t1 + } +} {} +do_test tkt2927-5.13 { + db eval { + SELECT abs(a), abs(b) FROM t1 + EXCEPT + SELECT a, b FROM t1 + } +} {} +do_test tkt2927-5.14 { + db eval { + SELECT abs(a), abs(b) FROM t1 + EXCEPT + SELECT a, abs(b) FROM t1 + } +} {} +do_test tkt2927-5.15 { + db eval { + SELECT abs(a), abs(b) FROM t1 + EXCEPT + SELECT abs(a), b FROM t1 + } +} {} +do_test tkt2927-5.16 { + db eval { + SELECT abs(a), abs(b) FROM t1 + EXCEPT + SELECT abs(a), abs(b) FROM t1 + } +} {} + + +do_test tkt2927-6.1 { + db eval { + SELECT a, b FROM t1 + INTERSECT + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.2 { + db eval { + SELECT a, b FROM t1 + INTERSECT + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.3 { + db eval { + SELECT a, b FROM t1 + INTERSECT + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.4 { + db eval { + SELECT a, b FROM t1 + INTERSECT + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.5 { + db eval { + SELECT a, abs(b) FROM t1 + INTERSECT + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.6 { + db eval { + SELECT a, abs(b) FROM t1 + INTERSECT + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.7 { + db eval { + SELECT a, abs(b) FROM t1 + INTERSECT + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.8 { + db eval { + SELECT a, abs(b) FROM t1 + INTERSECT + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.9 { + db eval { + SELECT abs(a), b FROM t1 + INTERSECT + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.10 { + db eval { + SELECT abs(a), b FROM t1 + INTERSECT + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.11 { + db eval { + SELECT abs(a), b FROM t1 + INTERSECT + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.12 { + db eval { + SELECT abs(a), b FROM t1 + INTERSECT + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.13 { + db eval { + SELECT abs(a), abs(b) FROM t1 + INTERSECT + SELECT a, b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.14 { + db eval { + SELECT abs(a), abs(b) FROM t1 + INTERSECT + SELECT a, abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.15 { + db eval { + SELECT abs(a), abs(b) FROM t1 + INTERSECT + SELECT abs(a), b FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} +do_test tkt2927-6.16 { + db eval { + SELECT abs(a), abs(b) FROM t1 + INTERSECT + SELECT abs(a), abs(b) FROM t1 + ORDER BY 1 + } +} {1 11 2 22 3 33 4 44 5 55} + +# Ticket #3092 is the same bug. But another test case never hurts. +# +do_test tkt2927-7.1 { + db eval { + CREATE TABLE host ( + hostname text not null primary key, + consoleHost text, + consolePort text + ); + INSERT INTO "host" VALUES('aald04','aalp03','4'); + INSERT INTO "host" VALUES('aald17','aalp01','1'); + CREATE VIEW consolemap1a as + select hostname, consolehost, '/dev/cuaD0.' || (consoleport-1) consoleport + from host where consolehost='aalp01'; + CREATE VIEW consolemap1b as + select hostname hostname, consolehost consolehost, '/dev/cuaD' || + substr('01',1+((consoleport-1)/16),1) || + substr('0123456789abcdef',1+((consoleport-1)%16),1) consoleport + from host where consolehost='aalp03'; + CREATE VIEW consolemap1 as + select * from consolemap1a + union + select * from consolemap1b; + SELECT * from consolemap1b; + } +} {aald04 aalp03 /dev/cuaD03} +do_test tkt2927-7.2 { + db eval { + SELECT * FROM consolemap1 + } +} {aald04 aalp03 /dev/cuaD03 aald17 aalp01 /dev/cuaD0.0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt2942.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt2942.test --- sqlite3-3.4.2/test/tkt2942.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt2942.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,62 @@ +# 2008 February 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #2942. +# +# Queries of the form: +# +# SELECT group_concat(x) FROM (SELECT * FROM table ORDER BY 1); +# +# The ORDER BY would be dropped by the query flattener. This used +# to not matter because aggregate functions sum(), min(), max(), avg(), +# and so forth give the same result regardless of the order of inputs. +# But with the addition of the group_concat() function, suddenly the +# order does matter. +# +# $Id: tkt2942.test,v 1.1 2008/02/15 14:33:04 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +do_test tkt2942.1 { + execsql { + create table t1(num int); + insert into t1 values (2); + insert into t1 values (1); + insert into t1 values (3); + insert into t1 values (4); + SELECT group_concat(num) FROM (SELECT num FROM t1 ORDER BY num DESC); + } +} {4,3,2,1} +do_test tkt2942.2 { + execsql { + SELECT group_concat(num) FROM (SELECT num FROM t1 ORDER BY num); + } +} {1,2,3,4} +do_test tkt2942.3 { + execsql { + SELECT group_concat(num) FROM (SELECT num FROM t1); + } +} {2,1,3,4} +do_test tkt2942.4 { + execsql { + SELECT group_concat(num) FROM (SELECT num FROM t1 ORDER BY rowid DESC); + } +} {4,3,1,2} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3080.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3080.test --- sqlite3-3.4.2/test/tkt3080.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3080.test 2009-06-12 03:37:59.000000000 +0100 @@ -0,0 +1,77 @@ +# 2008 April 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3080 +# +# Make sure that application-defined functions are able to recursively +# invoke SQL statements that create and drop virtual tables. +# +# $Id: tkt3080.test,v 1.2 2008/11/05 16:37:35 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3080.1 { + db function execsql execsql + db eval { + SELECT execsql('CREATE TABLE t1(x)'); + } + execsql {SELECT name FROM sqlite_master} +} {t1} +do_test tkt3080.2 { + db eval { + INSERT INTO t1 VALUES('CREATE TABLE t2(y);'); + SELECT execsql(x) FROM t1; + } + db eval { + SELECT name FROM sqlite_master; + } +} {t1 t2} +do_test tkt3080.3 { + execsql { + INSERT INTO t1 VALUES('CREATE TABLE t3(z); DROP TABLE t3;'); + } + catchsql { + SELECT execsql(x) FROM t1 WHERE rowid=2; + } +} {1 {database table is locked}} +do_test tkt3080.4 { + db eval { + SELECT name FROM sqlite_master; + } +} {t1 t2 t3} + +ifcapable vtab { + register_echo_module [sqlite3_connection_pointer db] + do_test tkt3080.10 { + set sql { + CREATE VIRTUAL TABLE t4 USING echo(t2); + INSERT INTO t4 VALUES(123); + DROP TABLE t4; + } + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES($sql); + } + db eval { + SELECT execsql(x) FROM t1 + } + execsql {SELECT name FROM sqlite_master} + } {t1 t2 t3} + do_test tkt3080.11 { + execsql {SELECT * FROM t2} + } {123} +} + + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3093.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3093.test --- sqlite3-3.4.2/test/tkt3093.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3093.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,86 @@ +# 2008 May 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3093 +# +# Verify that a busy callback waiting on a reserved lock resolves +# once the lock clears. +# +# $Id: tkt3093.test,v 1.2 2008/05/02 14:23:55 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Set up a test database +# +do_test tkt3093.1 { + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + SELECT * FROM t1 + } +} {1} + +# Establish a separate, independent connection to that database. +# +do_test tkt3093.2 { + catch {sqlite3_enable_shared_cache 0} + sqlite3 db2 test.db + db2 eval { + SELECT * FROM t1 + } +} {1} + +# Make sure that clearing a lock allows a pending request for +# a reserved lock to continue. +# +do_test tkt3093.3 { + # This will be the busy callback for connection db2. On the first + # busy callback, commit the transaction in db. This should clear + # the lock so that there should not be a second callback. If the + # busy handler is called a second time, then fail so that we get + # timeout. + proc busy_callback {cnt} { + if {$cnt==0} { + db eval COMMIT + return 0 + } else { + return 1 + } + } + db2 busy ::busy_callback + + # Start a write transaction on db. + db eval { + BEGIN; + INSERT INTO t1 VALUES(2); + } + + # Attempt to modify the database on db2 + catchsql { + UPDATE t1 SET x=x+1; + } db2 +} {0 {}} + +# Verify that everything worked as expected. The db transaction should +# have gone first and added entry 2. Then the db2 transaction would have +# run and added one to each entry. +# +do_test tkt3093.4 { + db eval {SELECT * FROM t1} +} {2 3} +do_test tkt3093.5 { + db2 eval {SELECT * FROM t1} +} {2 3} +db2 close + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3121.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3121.test --- sqlite3-3.4.2/test/tkt3121.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3121.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,50 @@ +# 2008 May 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt3121.test,v 1.2 2008/07/12 14:52:21 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +# Register the module +register_echo_module [sqlite3_connection_pointer db] + +do_test vtabD-1.1 { + execsql { + PRAGMA encoding = 'utf16'; + + CREATE TABLE r1(field); + CREATE TABLE r2(col PRIMARY KEY, descr); + + INSERT INTO r1 VALUES('abcd'); + INSERT INTO r2 VALUES('abcd', 'A nice description'); + INSERT INTO r2 VALUES('efgh', 'Another description'); + + CREATE VIRTUAL TABLE t1 USING echo(r1); + CREATE VIRTUAL TABLE t2 USING echo(r2); + } +} {} + +do_test vtabD-1.2 { + execsql { + select + t1.field as Field, + t2.descr as Descr + from t1 inner join t2 on t1.field = t2.col order by t1.field + } +} {abcd {A nice description}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3201.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3201.test --- sqlite3-3.4.2/test/tkt3201.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3201.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,74 @@ +# 2008 July 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# Specifically, it tests that bug #3201 has been fixed. +# +# $Id: tkt3201.test,v 1.3 2008/07/12 14:52:21 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3201-1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + } +} {} + +do_test tkt3201-2 { + execsql { + SELECT l.a, r.a FROM t1 AS l, t1 AS r WHERE l.a < r.a; + } +} {1 2} + +do_test tkt3201-3 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b TEXT); + INSERT INTO t2 VALUES(2, 'two'); + } + execsql { + SELECT l.a, r.a FROM t1 AS l, t2 AS r WHERE l.a < r.a; + } +} {1 2} + +do_test tkt3201-4 { + execsql { + DELETE FROM t1 WHERE a = 2; + } + execsql { + SELECT l.a, r.a FROM t1 AS l, t2 AS r WHERE l.a < r.a; + } +} {1 2} + +do_test tkt3201-5 { + execsql { + DELETE FROM t1 WHERE a = 2; + } + execsql { + SELECT t1.a, t1.b, t2.a, t2.b FROM t1, t2; + } +} {1 one 2 two} + +do_test tkt3201-6 { + execsql { + CREATE TABLE t3(c INTEGER PRIMARY KEY, d TEXT); + INSERT INTO t3 VALUES(2, 'two'); + } + execsql { SELECT a, b, c, d FROM t1, t3 } +} {1 one 2 two} + +do_test tkt3201-7 { + execsql { SELECT a, b, c, d FROM t1, t3 WHERE a < c } +} {1 one 2 two} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3292.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3292.test --- sqlite3-3.4.2/test/tkt3292.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3292.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,61 @@ +# 2008 August 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# Specifically, it tests the behavior of the sqlite3VdbeRecordCompare() +# routine in cases where the rowid is 0 or 1 in file format 4 +# (meaning that the rowid has type code 8 or 9 with zero bytes of +# data). Ticket #3292. +# +# $Id: tkt3292.test,v 1.1 2008/08/13 14:07:41 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3292-1.1 { + execsql { + PRAGMA legacy_file_format=OFF; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b INT); + INSERT INTO t1 VALUES(0, 1); + INSERT INTO t1 VALUES(1, 1); + INSERT INTO t1 VALUES(2, 1); + CREATE INDEX i1 ON t1(b); + SELECT * FROM t1 WHERE b>=1; + } +} {0 1 1 1 2 1} +do_test tkt3292-1.2 { + execsql { + INSERT INTO t1 VALUES(3, 0); + INSERT INTO t1 VALUES(4, 2); + SELECT * FROM t1 WHERE b>=1; + } +} {0 1 1 1 2 1 4 2} + + +do_test tkt3292-2.1 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b, c, d); + INSERT INTO t2 VALUES(0, 1, 'hello', x'012345'); + INSERT INTO t2 VALUES(1, 1, 'hello', x'012345'); + INSERT INTO t2 VALUES(2, 1, 'hello', x'012345'); + CREATE INDEX i2 ON t2(b,c,d); + SELECT a FROM t2 WHERE b=1 AND c='hello' AND d>=x'012345'; + } +} {0 1 2} +do_test tkt3292-2.2 { + execsql { + INSERT INTO t2 VALUES(3, 1, 'hello', x'012344'); + INSERT INTO t2 VALUES(4, 1, 'hello', x'012346'); + SELECT a FROM t2 WHERE b=1 AND c='hello' AND d>=x'012345'; + } +} {0 1 2 4} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3298.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3298.test --- sqlite3-3.4.2/test/tkt3298.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3298.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,80 @@ +# 2008 August 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file tests changes to the name resolution logic that occurred +# in august of 2008 and where associated with tickets #3298 and #3301 +# +# $Id: tkt3298.test,v 1.3 2009/04/07 14:14:23 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !trigger { finish_test ; return } + +do_test tkt3298-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b INT); + INSERT INTO t1 VALUES(0, 1); + INSERT INTO t1 VALUES(1, 1); + INSERT INTO t1 VALUES(2, 1); + CREATE VIEW v1 AS SELECT a AS x, b+1 AS y FROM t1; + CREATE TRIGGER r1 INSTEAD OF UPDATE ON v1 + BEGIN + UPDATE t1 SET b=new.y-1 WHERE a=new.x; + END; + CREATE TRIGGER r2 INSTEAD OF DELETE ON v1 + BEGIN + DELETE FROM t1 WHERE a=old.x; + END; + SELECT * FROM v1 ORDER BY x; + } +} {0 2 1 2 2 2} +do_test tkt3298-1.2 { + execsql { + UPDATE v1 SET y=3 WHERE x=0; + SELECT * FROM v1 ORDER by x; + } +} {0 3 1 2 2 2} +do_test tkt3298-1.3 { + execsql { + UPDATE v1 SET y=4 WHERE v1.x=2; + SELECT * FROM v1 ORDER by x; + } +} {0 3 1 2 2 4} +do_test tkt3298-1.4 { + execsql { + DELETE FROM v1 WHERE x=1; + SELECT * FROM v1 ORDER BY x; + } +} {0 3 2 4} +do_test tkt3298-1.5 { + execsql { + DELETE FROM v1 WHERE v1.x=2; + SELECT * FROM v1 ORDER BY x; + } +} {0 3} + +# Ticket #3301 +# +do_test tkt3298-2.1 { + execsql { + CREATE TABLE t2(p,q); + INSERT INTO t2 VALUES(1,11); + INSERT INTO t2 VALUES(2,22); + CREATE TABLE t3(x,y); + INSERT INTO t3 VALUES(1,'one'); + + SELECT *, (SELECT z FROM (SELECT y AS z FROM t3 WHERE x=t1.a+1) ) FROM t1; + } +} {0 2 one} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3334.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3334.test --- sqlite3-3.4.2/test/tkt3334.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3334.test 2009-06-05 18:03:43.000000000 +0100 @@ -0,0 +1,84 @@ +# 2008 August 26 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. +# Specifically, it tests that bug #3334 has been fixed by the +# addition of restriction (19) to the subquery flattener optimization. +# +# $Id: tkt3334.test,v 1.1 2008/08/26 12:56:14 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3334-1.0 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,934); + INSERT INTO t1 VALUES(2,221); + INSERT INTO t1 VALUES(1,372); + INSERT INTO t1 VALUES(3,552); + INSERT INTO t1 VALUES(1,719); + INSERT INTO t1 VALUES(4,102); + SELECT * FROM t1 ORDER BY b; + } +} {4 102 2 221 1 372 3 552 1 719 1 934} + +do_test tkt3334-1.1 { + execsql { + SELECT a FROM (SELECT a FROM t1 ORDER BY b LIMIT 2) WHERE a=1; + } +} {} +do_test tkt3334-1.2 { + execsql { + SELECT count(*) FROM (SELECT a FROM t1 ORDER BY b LIMIT 2) WHERE a=1; + } +} {0} +do_test tkt3334-1.3 { + execsql { + SELECT a FROM (SELECT a FROM t1 ORDER BY b LIMIT 3) WHERE a=1; + } +} {1} +do_test tkt3334-1.4 { + execsql { + SELECT count(*) FROM (SELECT a FROM t1 ORDER BY b LIMIT 3) WHERE a=1; + } +} {1} +do_test tkt3334-1.5 { + execsql { + SELECT a FROM (SELECT a FROM t1 ORDER BY b LIMIT 99) WHERE a=1; + } +} {1 1 1} +do_test tkt3334-1.6 { + execsql { + SELECT count(*) FROM (SELECT a FROM t1 ORDER BY b LIMIT 99) WHERE a=1; + } +} {3} +do_test tkt3334-1.7 { + execsql { + SELECT a FROM (SELECT a FROM t1 ORDER BY b) WHERE a=1; + } +} {1 1 1} +do_test tkt3334-1.8 { + execsql { + SELECT count(*) FROM (SELECT a FROM t1 ORDER BY b) WHERE a=1; + } +} {3} +do_test tkt3334-1.9 { + execsql { + SELECT a FROM (SELECT a FROM t1) WHERE a=1; + } +} {1 1 1} +do_test tkt3334-1.10 { + execsql { + SELECT count(*) FROM (SELECT a FROM t1) WHERE a=1; + } +} {3} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3346.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3346.test --- sqlite3-3.4.2/test/tkt3346.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3346.test 2009-06-12 03:37:59.000000000 +0100 @@ -0,0 +1,67 @@ +# 2008 September 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this file is testing the fix for ticket #3346 +# +# $Id: tkt3346.test,v 1.3 2008/12/09 13:12:57 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3346-1.1 { + db eval { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(2,'bob'); + INSERT INTO t1 VALUES(1,'alice'); + INSERT INTO t1 VALUES(3,'claire'); + SELECT *, ( SELECT y FROM (SELECT x.b='alice' AS y) ) + FROM ( SELECT * FROM t1 ) AS x; + } +} {2 bob 0 1 alice 1 3 claire 0} +do_test tkt3346-1.2 { + db eval { + SELECT b FROM (SELECT * FROM t1) AS x + WHERE (SELECT y FROM (SELECT x.b='alice' AS y))=0 + } +} {bob claire} +do_test tkt3346-1.3 { + db eval { + SELECT b FROM (SELECT * FROM t1 ORDER BY a) AS x + WHERE (SELECT y FROM (SELECT a||b y FROM t1 WHERE t1.b=x.b))=(x.a||x.b) + } +} {alice bob claire} +do_test tkt3346-1.4 { + db eval { + SELECT b FROM (SELECT * FROM t1 ORDER BY a) AS x + WHERE (SELECT y FROM (SELECT a||b y FROM t1 WHERE t1.b=x.b))=('2'||x.b) + } +} {bob} + +# Ticket #3530 +# +# As shown by ticket #3346 above (see also ticket #3298) it is important +# that a subquery in the result-set be able to look up through multiple +# FROM levels in order to view tables in the FROM clause at the top level. +# +# But ticket #3530 shows us that a subquery in the FROM clause should not +# be able to look up to higher levels: +# +do_test tkt3346-2.1 { + catchsql { + CREATE TABLE t2(a); + INSERT INTO t2 VALUES(1); + + SELECT * FROM (SELECT * FROM t1 WHERE 1=x.a) AS x; + } +} {1 {no such column: x.a}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3357.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3357.test --- sqlite3-3.4.2/test/tkt3357.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3357.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,64 @@ +# 2008 September 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this file is testing the fix for ticket #3357. +# +# $Id: tkt3357.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3357-1.1 { + execsql { + create table a(id integer primary key, b_id integer, myvalue varchar); + create table b(id integer primary key, bvalue varchar); + insert into a(b_id, myvalue) values(1,'Test'); + insert into a(b_id, myvalue) values(1,'Test2'); + insert into a(b_id, myvalue) values(1,'Test3'); + insert into b(bvalue) values('btest'); + } +} {} + +do_test tkt3357-1.2 { + execsql { + SELECT cc.id, cc.b_id, cc.myvalue, dd.bvalue + FROM ( + SELECT DISTINCT a.id, a.b_id, a.myvalue FROM a + INNER JOIN b ON a.b_id = b.id WHERE b.bvalue = 'btest' + ) cc + LEFT OUTER JOIN b dd ON cc.b_id = dd.id + } +} {1 1 Test btest 2 1 Test2 btest 3 1 Test3 btest} + +do_test tkt3357-1.3 { + execsql { + SELECT cc.id, cc.b_id, cc.myvalue + FROM ( + SELECT a.id, a.b_id, a.myvalue + FROM a, b WHERE a.b_id = b.id + ) cc + LEFT OUTER JOIN b dd ON cc.b_id = dd.id + } +} {1 1 Test 2 1 Test2 3 1 Test3} + +do_test tkt3357-1.4 { + execsql { + SELECT cc.id, cc.b_id, cc.myvalue + FROM ( + SELECT DISTINCT a.id, a.b_id, a.myvalue + FROM a, b WHERE a.b_id = b.id + ) cc + LEFT OUTER JOIN b dd ON cc.b_id = dd.id + } +} {1 1 Test 2 1 Test2 3 1 Test3} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3419.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3419.test --- sqlite3-3.4.2/test/tkt3419.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3419.test 2008-10-06 16:31:13.000000000 +0100 @@ -0,0 +1,73 @@ +# 2008 October 06 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this file is testing the fix for ticket #3419. +# Ticket #3419 is really a duplication of #3408 and had already +# been fixed by the time it was reported. But it never hurts to +# add new test cases. +# +# $Id: tkt3419.test,v 1.1 2008/10/06 15:31:13 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3419-1.1 { + execsql { + create table a(id integer primary key); + create table b(id integer primary key, a_id integer); + create table c(id integer primary key, b_id integer); + + insert into a values (1); + insert into a values (2); + + insert into b values (3, 1); + insert into b values (4, 1); + insert into b values (5, 1); + insert into b values (6, 1); + insert into b values (9, 2); + + insert into c values (4, 3); + insert into c values (5, 5); + insert into c values (6, 4); + insert into c values (7, 6); + insert into c values (8, 9); + + select * FROM a, b, c WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {2 9 2 8 9} +do_test tkt3419-1.2 { + execsql { + select * FROM a, c, b WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {2 8 9 9 2} +do_test tkt3419-1.3 { + execsql { + select * FROM b, a, c WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {9 2 2 8 9} +do_test tkt3419-1.4 { + execsql { + select * FROM b, c, a WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {9 2 8 9 2} +do_test tkt3419-1.5 { + execsql { + select * FROM c, a, b WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {8 9 2 9 2} +do_test tkt3419-1.6 { + execsql { + select * FROM c, b, a WHERE a.id=2 AND b.a_id = a.id AND b.id=c.b_id; + } +} {8 9 9 2 2} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3424.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3424.test --- sqlite3-3.4.2/test/tkt3424.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3424.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,53 @@ +# 2008 October 06 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# +# $Id: tkt3424.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3424-1.1 { + execsql { + CREATE TABLE names(id INTEGER, data TEXT, code TEXT); + INSERT INTO names VALUES(1,'E1','AAA'); + INSERT INTO names VALUES(2,NULL,'BBB'); + + CREATE TABLE orig(code TEXT, data TEXT); + INSERT INTO orig VALUES('AAA','E1'); + INSERT INTO orig VALUES('AAA','E2'); + INSERT INTO orig VALUES('AAA','E3'); + INSERT INTO orig VALUES('AAA','E4'); + INSERT INTO orig VALUES('AAA','E5'); + } +} {} + +do_test tkt3424-1.2 { + execsql { + SELECT * FROM + names LEFT OUTER JOIN orig + ON names.data = orig.data AND names.code = orig.code; + } +} {1 E1 AAA AAA E1 2 {} BBB {} {}} + +do_test tkt3424-1.3 { + execsql { CREATE INDEX udx_orig_code_data ON orig(code, data) } +} {} + +do_test tkt3424-1.4 { + execsql { + SELECT * FROM + names LEFT OUTER JOIN orig + ON names.data = orig.data AND names.code = orig.code; + } +} {1 E1 AAA AAA E1 2 {} BBB {} {}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3442.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3442.test --- sqlite3-3.4.2/test/tkt3442.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3442.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,72 @@ +# 2008 October 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #3442 has been +# fixed. +# +# +# $Id: tkt3442.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +# Create a schema with some indexes. +# +do_test tkt3442-1.1 { + execsql { + CREATE TABLE listhash( + key INTEGER PRIMARY KEY, + id TEXT, + node INTEGER + ); + CREATE UNIQUE INDEX ididx ON listhash(id); + } +} {} + + +# Explain Query Plan +# +proc EQP {sql} { + uplevel "execsql {EXPLAIN QUERY PLAN $sql}" +} + + +# These tests perform an EXPLAIN QUERY PLAN on both versions of the +# SELECT referenced in ticket #3442 (both '5000' and "5000") +# and verify that the query plan is the same. +# +ifcapable explain { + do_test tkt3442-1.2 { + EQP { SELECT node FROM listhash WHERE id='5000' LIMIT 1; } + } {0 0 {TABLE listhash WITH INDEX ididx}} + do_test tkt3442-1.3 { + EQP { SELECT node FROM listhash WHERE id="5000" LIMIT 1; } + } {0 0 {TABLE listhash WITH INDEX ididx}} +} + + +# Some extra tests testing other permutations of 5000. +# +ifcapable explain { + do_test tkt3442-1.4 { + EQP { SELECT node FROM listhash WHERE id=5000 LIMIT 1; } + } {0 0 {TABLE listhash WITH INDEX ididx}} +} +do_test tkt3442-1.5 { + catchsql { + SELECT node FROM listhash WHERE id=[5000] LIMIT 1; + } +} {1 {no such column: 5000}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3457.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3457.test --- sqlite3-3.4.2/test/tkt3457.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3457.test 2009-06-26 15:19:23.000000000 +0100 @@ -0,0 +1,87 @@ +# 2008 October 29 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: tkt3457.test,v 1.3 2009/06/26 07:12:07 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {$tcl_platform(platform) != "unix"} { + finish_test + return +} + +#----------------------------------------------------------------------- +# To roll back a hot-journal file, the application needs read and write +# permission on the journal file in question. The following tests test +# the outcome of trying to rollback a hot-journal file when this is not +# the case. +# +# tkt3457-1.2: Application has neither read, nor write permission on +# the hot-journal file. Result: SQLITE_CANTOPEN. +# +# tkt3457-1.3: Application has write but not read permission on +# the hot-journal file. Result: SQLITE_CANTOPEN. +# +# tkt3457-1.4: Application has read but not write permission on +# the hot-journal file. Result: SQLITE_CANTOPEN. +# +# tkt3457-1.5: Application has read/write permission on the hot-journal +# file. Result: SQLITE_OK. +# +do_test tkt3457-1.1 { + execsql { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + BEGIN; + INSERT INTO t1 VALUES(4, 5, 6); + } + + file copy -force test.db bak.db + file copy -force test.db-journal bak.db-journal + + # Fix the first journal-header in the journal-file. Because the + # journal file has not yet been synced, the 8-byte magic string at the + # start of the first journal-header has not been written by SQLite. + # So write it now. + set fd [open bak.db-journal a+] + fconfigure $fd -encoding binary -translation binary + seek $fd 0 + puts -nonewline $fd "\xd9\xd5\x05\xf9\x20\xa1\x63\xd7" + close $fd + + execsql COMMIT +} {} + +do_test tkt3457-1.2 { + file copy -force bak.db-journal test.db-journal + file attributes test.db-journal -permissions --------- + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} +do_test tkt3457-1.3 { + file copy -force bak.db-journal test.db-journal + file attributes test.db-journal -permissions -w--w--w- + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} +do_test tkt3457-1.4 { + file copy -force bak.db-journal test.db-journal + file attributes test.db-journal -permissions r--r--r-- + catchsql { SELECT * FROM t1 } +} {1 {unable to open database file}} + +do_test tkt3457-1.5 { + file copy -force bak.db-journal test.db-journal + file attributes test.db-journal -permissions rw-rw-rw- + catchsql { SELECT * FROM t1 } +} {0 {1 2 3 4 5 6}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3461.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3461.test --- sqlite3-3.4.2/test/tkt3461.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3461.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,67 @@ +# 2008 October 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #3461 has been +# fixed. +# +# $Id: tkt3461.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +#################################### +#################################### +# REMOVE THESE TWO LINES: +#################################### +#################################### +#finish_test +#return + +do_test tkt3461-1.1 { + execsql { + CREATE TABLE t1(a, b); + INSERT INTO t1 VALUES(1, 2); + } +} {} + +do_test tkt3461-1.2 { + execsql { SELECT a, b+1 AS b_plus_one FROM t1 WHERE a=1 } +} {1 3} + +do_test tkt3461-1.3 { + # explain { SELECT a, b+1 AS b_plus_one FROM t1 WHERE a=1 OR b_plus_one } + # execsql { PRAGMA vdbe_trace = 1; PRAGMA vdbe_listing=1 } + execsql { SELECT a, b+1 AS b_plus_one FROM t1 WHERE a=1 OR b_plus_one } +} {1 3} + +do_test tkt3461-2.1 { + execsql { + SELECT a, b+1 AS b_plus_one + FROM t1 + WHERE CASE WHEN a=1 THEN 1 ELSE b_plus_one END + } +} {1 3} + +do_test tkt3461-3.1 { + execsql { + CREATE TABLE t2(c, d); + INSERT INTO t2 VALUES(3, 4); + } + # execsql { PRAGMA vdbe_trace = 1; PRAGMA vdbe_listing=1 } + execsql { + SELECT a, b+1 AS b_plus_one, c, d + FROM t1 LEFT JOIN t2 + ON (a=c AND d=b_plus_one) + } +} {1 3 {} {}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3472.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3472.test --- sqlite3-3.4.2/test/tkt3472.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3472.test 2008-12-03 22:32:45.000000000 +0000 @@ -0,0 +1,39 @@ +# 2008 November 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt3472.test,v 1.4 2008/12/03 22:32:45 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {![info exists threadsOverrideEachOthersLocks]} { + finish_test + return +} + +set ::correctvalue $threadsOverrideEachOthersLocks +puts "threadsOverrideEachOthersLocks = $::correctvalue" + +do_test tkt3472-1.1 { + db close + set threadsOverrideEachOthersLocks -1 + sqlite3 db test.db + set threadsOverrideEachOthersLocks +} $::correctvalue + +do_test tkt3472-1.2 { + db close + set threadsOverrideEachOthersLocks -1 + sqlite3 db test.db -readonly 1 + set threadsOverrideEachOthersLocks +} $::correctvalue + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3493.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3493.test --- sqlite3-3.4.2/test/tkt3493.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3493.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,153 @@ +# 2008 October 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests that affinities and collation sequences are correctly applied +# in aggregate queries. +# +# $Id: tkt3493.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3493-1.1 { + execsql { + BEGIN; + CREATE TABLE A (id INTEGER PRIMARY KEY AUTOINCREMENT, val TEXT); + INSERT INTO A VALUES(1,'123'); + INSERT INTO A VALUES(2,'456'); + CREATE TABLE B (id INTEGER PRIMARY KEY AUTOINCREMENT, val TEXT); + INSERT INTO B VALUES(1,1); + INSERT INTO B VALUES(2,2); + CREATE TABLE A_B (B_id INTEGER NOT NULL, A_id INTEGER); + INSERT INTO A_B VALUES(1,1); + INSERT INTO A_B VALUES(2,2); + COMMIT; + } +} {} +do_test tkt3493-1.2 { + execsql { + SELECT + CASE + WHEN B.val = 1 THEN 'XYZ' + ELSE A.val + END AS Col1 + FROM B + LEFT OUTER JOIN A_B ON B.id = A_B.B_id + LEFT OUTER JOIN A ON A.id = A_B.A_id + ORDER BY Col1 ASC; + } +} {456 XYZ} +do_test tkt3493-1.3 { + execsql { + SELECT DISTINCT + CASE + WHEN B.val = 1 THEN 'XYZ' + ELSE A.val + END AS Col1 + FROM B + LEFT OUTER JOIN A_B ON B.id = A_B.B_id + LEFT OUTER JOIN A ON A.id = A_B.A_id + ORDER BY Col1 ASC; + } +} {456 XYZ} +do_test tkt3493-1.4 { + execsql { + SELECT b.val, CASE WHEN b.val = 1 THEN 'xyz' ELSE b.val END AS col1 FROM b; + } +} {1 xyz 2 2} +do_test tkt3493-1.5 { + execsql { + SELECT DISTINCT + b.val, + CASE WHEN b.val = 1 THEN 'xyz' ELSE b.val END AS col1 + FROM b; + } +} {1 xyz 2 2} +do_test tkt3493-1.6 { + execsql { + SELECT DISTINCT + b.val, + CASE WHEN b.val = '1' THEN 'xyz' ELSE b.val END AS col1 + FROM b; + } +} {1 xyz 2 2} + + +do_test tkt3493-2.1 { + execsql { + CREATE TABLE t1(a TEXT, b INT); + INSERT INTO t1 VALUES(123, 456); + } +} {} +do_test tkt3493-2.2.1 { + execsql { SELECT a=123 FROM t1 GROUP BY a } +} {1} +do_test tkt3493-2.2.2 { + execsql { SELECT a=123 FROM t1 } +} {1} +do_test tkt3493-2.2.3 { + execsql { SELECT a='123' FROM t1 } +} {1} +do_test tkt3493-2.2.4 { + execsql { SELECT count(*), a=123 FROM t1 } +} {1 1} +do_test tkt3493-2.2.5 { + execsql { SELECT count(*), +a=123 FROM t1 } +} {1 0} +do_test tkt3493-2.3.3 { + execsql { SELECT b='456' FROM t1 GROUP BY a } +} {1} +do_test tkt3493-2.3.1 { + execsql { SELECT b='456' FROM t1 GROUP BY b } +} {1} +do_test tkt3493-2.3.2 { + execsql { SELECT b='456' FROM t1 } +} {1} +do_test tkt3493-2.4.1 { + execsql { SELECT typeof(a), a FROM t1 GROUP BY a HAVING a=123 } +} {text 123} +do_test tkt3493-2.4.2 { + execsql { SELECT typeof(a), a FROM t1 GROUP BY b HAVING a=123 } +} {text 123} +do_test tkt3493-2.5.1 { + execsql { SELECT typeof(b), b FROM t1 GROUP BY a HAVING b='456' } +} {integer 456} +do_test tkt3493-2.5.2 { + execsql { SELECT typeof(b), b FROM t1 GROUP BY b HAVING b='456' } +} {integer 456} + +do_test tkt3493-3.1 { + execsql { + CREATE TABLE t2(a COLLATE NOCASE, b COLLATE BINARY); + INSERT INTO t2 VALUES('aBc', 'DeF'); + } +} {} +do_test tkt3493-3.2.1 { + execsql { SELECT a='abc' FROM t2 GROUP BY a } +} {1} +do_test tkt3493-3.2.2 { + execsql { SELECT a='abc' FROM t2 } +} {1} + +do_test tkt3493-3.3.1 { + execsql { SELECT a>b FROM t2 GROUP BY a, b} +} {0} +do_test tkt3493-3.3.2 { + execsql { SELECT a>b COLLATE BINARY FROM t2 GROUP BY a, b} +} {1} +do_test tkt3493-3.3.3 { + execsql { SELECT b>a FROM t2 GROUP BY a, b} +} {0} +do_test tkt3493-3.3.4 { + execsql { SELECT b>a COLLATE NOCASE FROM t2 GROUP BY a, b} +} {1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3508.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3508.test --- sqlite3-3.4.2/test/tkt3508.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3508.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,38 @@ +# 2008 November 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# $Id: tkt3508.test,v 1.5 2009/05/28 01:00:56 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3508-1.1 { + catchsql { + CREATE TABLE modificationsTmp ( + SUBSTRATE_HPRD_ID VARCHAR(80), + SUBSTRATE_GENE_SYMBOL VARCHAR(80), + SUBSTRATE_ISOFORM_ID VARCHAR(80), + SUBSTRATE_REFSEQ_ID VARCHAR(80), + SITE INTEGER, + RESIDUE VARCHAR(80), + ENZYME_NAME VARCHAR(80), + ENZYME_HPRD_ID VARCHAR(80), + MODIFICATION_TYPE VARCHAR(80), + EXPERIMENT_TYPE VARCHAR(80), + REFERENCE_ID VARCHAR(80) + ); + select SUBSTRATE_HPRD_ID, count(substrate_refseq_id) as c + from modificationsTmp where c > 1 group by SUBSTRATE_HPRD_ID; + } +} {1 {misuse of aggregate: count()}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3522.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3522.test --- sqlite3-3.4.2/test/tkt3522.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3522.test 2008-12-05 00:00:07.000000000 +0000 @@ -0,0 +1,43 @@ +# 2008 December 4 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is a verification that the bugs identified in ticket +# #3522 have been fixed. +# +# $Id: tkt3522.test,v 1.1 2008/12/05 00:00:07 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3522-1.1 { + db eval { + CREATE TABLE tab4( + col0 INTEGER, + col1 INTEGER, + col2 INTEGER, + col3 INTEGER, + col4 INTEGER + ); + SELECT cor1.* + FROM tab4 AS cor0 + JOIN tab4 AS cor1 USING ( col4, col3, col2, col1, col0 ); + } +} {} +do_test tkt3522-1.2 { + db eval { + CREATE TABLE tab1(col0 INTEGER); + CREATE TABLE tab2(col0 INTEGER); + SELECT cor0.* FROM tab1 NATURAL JOIN tab2 AS cor0; + } +} {} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3527.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3527.test --- sqlite3-3.4.2/test/tkt3527.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3527.test 2008-12-08 13:42:36.000000000 +0000 @@ -0,0 +1,118 @@ +# 2008 December 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is a verification that the bugs identified in ticket +# #3527 have been fixed. +# +# $Id: tkt3527.test,v 1.1 2008/12/08 13:42:36 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3527-1.1 { + db eval { + CREATE TABLE Element ( + Code INTEGER PRIMARY KEY, + Name VARCHAR(60) + ); + + CREATE TABLE ElemOr ( + CodeOr INTEGER NOT NULL, + Code INTEGER NOT NULL, + PRIMARY KEY(CodeOr,Code) + ); + + CREATE TABLE ElemAnd ( + CodeAnd INTEGER, + Code INTEGER, + Attr1 INTEGER, + Attr2 INTEGER, + Attr3 INTEGER, + PRIMARY KEY(CodeAnd,Code) + ); + + INSERT INTO Element VALUES(1,'Elem1'); + INSERT INTO Element VALUES(2,'Elem2'); + INSERT INTO Element VALUES(3,'Elem3'); + INSERT INTO Element VALUES(4,'Elem4'); + INSERT INTO Element VALUES(5,'Elem5'); + INSERT INTO ElemOr Values(3,4); + INSERT INTO ElemOr Values(3,5); + INSERT INTO ElemAnd VALUES(1,3,1,1,1); + INSERT INTO ElemAnd VALUES(1,2,1,1,1); + + CREATE VIEW ElemView1 AS + SELECT + CAST(Element.Code AS VARCHAR(50)) AS ElemId, + Element.Code AS ElemCode, + Element.Name AS ElemName, + ElemAnd.Code AS InnerCode, + ElemAnd.Attr1 AS Attr1, + ElemAnd.Attr2 AS Attr2, + ElemAnd.Attr3 AS Attr3, + 0 AS Level, + 0 AS IsOrElem + FROM Element JOIN ElemAnd ON ElemAnd.CodeAnd=Element.Code + WHERE ElemAnd.CodeAnd NOT IN (SELECT CodeOr FROM ElemOr) + UNION ALL + SELECT + CAST(ElemOr.CodeOr AS VARCHAR(50)) AS ElemId, + Element.Code AS ElemCode, + Element.Name AS ElemName, + ElemOr.Code AS InnerCode, + NULL AS Attr1, + NULL AS Attr2, + NULL AS Attr3, + 0 AS Level, + 1 AS IsOrElem + FROM ElemOr JOIN Element ON Element.Code=ElemOr.CodeOr + ORDER BY ElemId, InnerCode; + + CREATE VIEW ElemView2 AS + SELECT + ElemId, + ElemCode, + ElemName, + InnerCode, + Attr1, + Attr2, + Attr3, + Level, + IsOrElem + FROM ElemView1 + UNION ALL + SELECT + Element.ElemId || '.' || InnerElem.ElemId AS ElemId, + InnerElem.ElemCode, + InnerElem.ElemName, + InnerElem.InnerCode, + InnerElem.Attr1, + InnerElem.Attr2, + InnerElem.Attr3, + InnerElem.Level+1, + InnerElem.IsOrElem + FROM ElemView1 AS Element + JOIN ElemView1 AS InnerElem + ON Element.Level=0 AND Element.InnerCode=InnerElem.ElemCode + ORDER BY ElemId, InnerCode; + + SELECT * FROM ElemView1; + } +} {1 1 Elem1 2 1 1 1 0 0 1 1 Elem1 3 1 1 1 0 0 3 3 Elem3 4 {} {} {} 0 1 3 3 Elem3 5 {} {} {} 0 1} + +do_test tkt3527-1.2 { + db eval { + SELECT * FROM ElemView2; + } +} {1 1 Elem1 2 1 1 1 0 0 1 1 Elem1 3 1 1 1 0 0 1.3 3 Elem3 4 {} {} {} 1 1 1.3 3 Elem3 5 {} {} {} 1 1 3 3 Elem3 4 {} {} {} 0 1 3 3 Elem3 5 {} {} {} 0 1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3541.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3541.test --- sqlite3-3.4.2/test/tkt3541.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3541.test 2008-12-15 15:27:52.000000000 +0000 @@ -0,0 +1,35 @@ +# 2008 December 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file is a verification that the bugs identified in ticket +# #3541 have been fixed. +# +# $Id: tkt3541.test,v 1.1 2008/12/15 15:27:52 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3541-1.1 { + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(123); + SELECT CASE ~max(x) WHEN min(x) THEN 1 ELSE max(x) END FROM t1; + } +} {123} +do_test tkt3541-1.2 { + db eval { + SELECT CASE NOT max(x) WHEN min(x) THEN 1 ELSE max(x) END FROM t1; + } +} {123} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3554.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3554.test --- sqlite3-3.4.2/test/tkt3554.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3554.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,67 @@ +# 2008 December 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #3554 has been +# fixed. +# +# $Id: tkt3554.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !trigger { + finish_test + return +} + +do_test tkt3544-1.1 { + execsql { + CREATE TABLE test ( obj, t1, t2, PRIMARY KEY(obj, t1, t2) ); + + CREATE TRIGGER test_insert BEFORE INSERT ON test BEGIN + UPDATE test SET t1 = new.t1 + WHERE obj = new.obj AND new.t1 < t1 AND new.t2 >= t1; + + UPDATE test SET t2 = new.t2 + WHERE obj = new.obj AND new.t2 > t2 AND new.t1 <= t2; + + SELECT RAISE(IGNORE) WHERE EXISTS ( + SELECT obj FROM test + WHERE obj = new.obj AND new.t1 >= t1 AND new.t2 <= t2 + ); + END; + } +} {} + +do_test tkt3544-1.2 { + execsql { + INSERT INTO test VALUES('a', 10000, 11000); + SELECT * FROM test; + } +} {a 10000 11000} + + +do_test tkt3544-1.3 { + execsql { + INSERT INTO test VALUES('a', 9000, 10500); + } + execsql { SELECT * FROM test } +} {a 9000 11000} + +do_test tkt3544-1.4 { + execsql { + INSERT INTO test VALUES('a', 10000, 12000); + } + execsql { SELECT * FROM test } +} {a 9000 12000} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3581.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3581.test --- sqlite3-3.4.2/test/tkt3581.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3581.test 2009-01-14 01:10:40.000000000 +0000 @@ -0,0 +1,54 @@ +# 2008 January 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #3581 has been +# fixed. +# +# $Id: tkt3581.test,v 1.1 2009/01/14 01:10:40 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3581-1.1 { + db eval { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t1 VALUES(0,544,846); + INSERT INTO t1 VALUES(1,345,51); + CREATE TABLE t2(a INTEGER PRIMARY KEY, b, c); + INSERT INTO t2 SELECT * FROM t1; + CREATE INDEX i2 on t2(c); + } +} {} + +do_test tkt3581-1.2 { + db eval { + SELECT a FROM t1 + WHERE (b > 45 AND c < 356) + OR b <= 733 + OR b >= 557 + OR (b >= 614 AND c < 251) + ORDER BY b; + } +} {1 0} + +do_test tkt3581-1.3 { + db eval { + SELECT a FROM t2 + WHERE (b > 45 AND c < 356) + OR b <= 733 + OR b >= 557 + OR (b >= 614 AND c < 251) + ORDER BY b; + } +} {1 0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt35xx.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt35xx.test --- sqlite3-3.4.2/test/tkt35xx.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt35xx.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,102 @@ +# 2008 November 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# When a transaction rolls back, make sure that dirty pages in the +# page cache which are not in the rollback journal are reinitialized +# in the btree layer. +# +# $Id: tkt35xx.test,v 1.4 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt35xx-1.1 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + } +} {} + +# Trigger the problem using explicit rollback. +# +do_test tkt35xx-1.1 { + execsql { + PRAGMA auto_vacuum = 0; + CREATE TABLE t1(a,b,c); + CREATE INDEX i1 ON t1(c); + INSERT INTO t1 VALUES(0, 0, zeroblob(676)); + INSERT INTO t1 VALUES(1, 1, zeroblob(676)); + DELETE FROM t1; + BEGIN; + INSERT INTO t1 VALUES(0, 0, zeroblob(676)); + INSERT INTO t1 VALUES(1, 1, zeroblob(676)); + ROLLBACK; + INSERT INTO t1 VALUES(0, 0, zeroblob(676)); + } + execsql { + INSERT INTO t1 VALUES(1, 1, zeroblob(676)); + } +} {} + +# Trigger the problem using statement rollback. +# +db close +file delete test.db +sqlite3 db test.db +set big [string repeat abcdefghij 22] ;# 220 byte string +do_test tkt35xx-1.2.1 { + execsql { + PRAGMA auto_vacuum = 0; + PRAGMA page_size = 1024; + CREATE TABLE t3(a INTEGER PRIMARY KEY, b); + INSERT INTO t3 VALUES(1, $big); + INSERT INTO t3 VALUES(2, $big); + INSERT INTO t3 VALUES(3, $big); + INSERT INTO t3 VALUES(4, $big); + CREATE TABLE t4(c, d); + INSERT INTO t4 VALUES(5, $big); + INSERT INTO t4 VALUES(1, $big); + } +} {} +do_test tkt35xx-1.2.2 { + catchsql { + BEGIN; + CREATE TABLE t5(e PRIMARY KEY, f); + DROP TABLE t5; + INSERT INTO t3(a, b) SELECT c, d FROM t4; + } +} {1 {PRIMARY KEY must be unique}} +do_test tkt35xx-1.2.3 { + # Show that the transaction has not been rolled back. + catchsql BEGIN +} {1 {cannot start a transaction within a transaction}} +do_test tkt35xx-1.2.4 { + execsql { SELECT count(*) FROM t3 } +} {4} +do_test tkt35xx-1.2.5 { + # Before the bug was fixed, if SQLITE_DEBUG was defined an assert() + # would fail during the following INSERT statement. If SQLITE_DEBUG + # was not defined, then the statement would pass and the transaction + # would be committed. But, the "SELECT count(*)" in tkt35xx-1.2.6 would + # return 1, not 5. Data magically disappeared! + # + execsql { + INSERT INTO t3 VALUES(5, $big); + COMMIT; + } +} {} +do_test tkt35xx-1.2.6 { + execsql { SELECT count(*) FROM t3 } +} {5} +integrity_check tkt35xx-1.2.7 + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3630.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3630.test --- sqlite3-3.4.2/test/tkt3630.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3630.test 2009-02-02 18:03:22.000000000 +0000 @@ -0,0 +1,47 @@ +# 2009 February 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# +# This file checks to make sure the "TEMP" or "TEMPORARY" keyword is +# omitted from the schema of a table that is created using that +# keyword. Ticket #3630. +# +# $Id: tkt3630.test,v 1.1 2009/02/02 18:03:22 drh Exp $ +# + +set testdir [file dirname $argv0] + +source $testdir/tester.tcl + +do_test tkt3630-1 { + db eval { + CREATE TEMP TABLE temp1(a,b,c); + SELECT * FROM sqlite_temp_master WHERE sql GLOB '*TEMP*'; + } +} {} +do_test tkt3630-2 { + db eval { + CREATE TABLE main1(a,b,c); + CREATE TEMP TABLE temp2 AS SELECT * FROM main1; + SELECT * FROM sqlite_temp_master WHERE sql GLOB '*TEMP*'; + } +} {} + +ifcapable altertable { + do_test tkt3630-3 { + db eval { + ALTER TABLE temp2 ADD COLUMN d; + ALTER TABLE temp2 RENAME TO temp2rn; + SELECT name FROM sqlite_temp_master WHERE name LIKE 'temp2%'; + } + } {temp2rn} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3718.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3718.test --- sqlite3-3.4.2/test/tkt3718.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3718.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,230 @@ +# 2001 September 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the execution of SQL statements from +# within callbacks generated by VMs that themselves open statement +# transactions. +# +# $Id: tkt3718.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3718-1.1 { + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 'one'); + INSERT INTO t1 VALUES(2, 'two'); + INSERT INTO t1 VALUES(3, 'three'); + INSERT INTO t1 VALUES(4, 'four'); + INSERT INTO t1 VALUES(5, 'five'); + CREATE TABLE t2(a PRIMARY KEY, b); + } +} {} + +# SQL scalar function: +# +# f1() +# +# Uses database handle [db] to execute "SELECT f2()". Returns either +# the results or error message from the "SELECT f2()" query to the +# caller. +# +proc f1 {args} { + set a [lindex $args 0] + catch { db eval {SELECT f2($a)} } msg + set msg +} + +# SQL scalar function: +# +# f2() +# +# Return the value of . Unless is "three", in which case throw +# an exception. +# +proc f2 {args} { + set a [lindex $args 0] + if {$a == "three"} { error "Three!!" } + return $a +} + +db func f1 f1 +db func f2 f2 + +# The second INSERT statement below uses the f1 user function such that +# half-way through the INSERT operation f1() will run an SQL statement +# that throws an exception. At one point, before #3718 was fixed, this +# caused the statement transaction belonging to the INSERT statement to +# be rolled back. The result was that some (but not all) of the rows that +# should have been inserted went missing. +# +do_test tkt3718-1.2 { + execsql { + BEGIN; + INSERT INTO t2 SELECT a, b FROM t1; + INSERT INTO t2 SELECT a+5, f1(b) FROM t1; + COMMIT; + } + execsql { + SELECT a FROM t2; + } +} {1 2 3 4 5 6 7 8 9 10} + +# This test turns on the count_changes pragma (causing DML statements to +# return SQLITE_ROW once, with a single integer result value reporting the +# number of rows affected by the statement). It then executes an INSERT +# statement that requires a statement journal. After stepping the statement +# once, so that it returns SQLITE_ROW, a second SQL statement that throws an +# exception is run. At one point, before #3718 was fixed, this caused the +# statement transaction belonging to the INSERT statement to be rolled back. +# The result was that none of the rows were actually inserted. +# +# +do_test tkt3718-1.3 { + execsql { + DELETE FROM t2 WHERE a > 5; + PRAGMA count_changes = 1; + BEGIN; + } + db eval {INSERT INTO t2 SELECT a+5, b||'+5' FROM t1} { + catch { db eval {SELECT f2('three')} } msg + } + execsql { + COMMIT; + SELECT a FROM t2; + } +} {1 2 3 4 5 6 7 8 9 10} + +do_test tkt3718-1.4 { + execsql {pragma count_changes=0} +} {} + +# This SQL function executes the SQL specified as an argument against +# database [db]. +# +proc sql {doit zSql} { + if {$doit} { catchsql $zSql } +} +db func sql [list sql] + +# The following tests, tkt3718-2.*, test that a nested statement +# transaction can be successfully committed or reverted without +# affecting the parent statement transaction. +# +do_test tkt3718-2.1 { + execsql { SELECT sql(1, 'DELETE FROM t2 WHERE a = '||a ) FROM t2 WHERE a>5 } + execsql { SELECT a from t2 } +} {1 2 3 4 5} +do_test tkt3718-2.2 { + execsql { + DELETE FROM t2 WHERE a > 5; + BEGIN; + INSERT INTO t2 SELECT a+5, sql(a==3, + 'INSERT INTO t2 SELECT a+10, f2(b) FROM t1' + ) FROM t1; + } + execsql { + COMMIT; + SELECT a FROM t2; + } +} {1 2 3 4 5 6 7 8 9 10} +do_test tkt3718-2.3 { + execsql { + DELETE FROM t2 WHERE a > 5; + BEGIN; + INSERT INTO t2 SELECT a+5, sql(a==3, + 'INSERT INTO t2 SELECT a+10, b FROM t1' + ) FROM t1; + COMMIT; + } + execsql { SELECT a FROM t2 ORDER BY a+0} +} {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15} +integrity_check tkt3718.2-4 + +# The next set of tests, tkt3718-3.*, test that a statement transaction +# that has a committed statement transaction nested inside of it can +# be committed or reverted. +# +foreach {tn io ii results} { + 1 0 10 {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} + 2 1 10 {6 7 8 9 10 16 17 18 19 20} + 3 0 11 {1 2 3 4 5 6 7 8 9 10 16 17 18 19 20} + 4 1 11 {6 7 8 9 10 16 17 18 19 20} +} { + do_test tkt3718-3.$tn { + execsql { + DELETE FROM t2; + INSERT INTO t2 SELECT a+5, b FROM t1; + INSERT INTO t2 SELECT a+15, b FROM t1; + } + + catchsql " + BEGIN; + INSERT INTO t2 SELECT a+$io, sql(a==3, + 'INSERT INTO t2 SELECT a+$ii, b FROM t1' + ) FROM t1; + " + + execsql { COMMIT } + + execsql { SELECT a FROM t2 ORDER BY a+0} + } $results + + integrity_check tkt3718-3.$tn.integrity +} + +# This is the same test as tkt3718-3.*, but with 3 levels of nesting. +# +foreach {tn i1 i2 i3 results} { + 1 0 10 20 {5 10 15 20 25 30} + 2 0 10 21 {5 10 15 20 30} + 3 0 11 20 {5 10 20 30} + 4 0 11 21 {5 10 20 30} + 5 1 10 20 {10 20 30} + 6 1 10 21 {10 20 30} + 7 1 11 20 {10 20 30} + 8 1 11 21 {10 20 30} +} { + do_test tkt3718-4.$tn { + execsql { + DELETE FROM t2; + INSERT INTO t2 SELECT a+5, b FROM t1; + INSERT INTO t2 SELECT a+15, b FROM t1; + INSERT INTO t2 SELECT a+25, b FROM t1; + } + + catchsql " + BEGIN; + INSERT INTO t2 SELECT a+$i1, sql(a==3, + 'INSERT INTO t2 SELECT a+$i2, sql(a==3, + ''INSERT INTO t2 SELECT a+$i3, b FROM t1'' + ) FROM t1' + ) FROM t1; + " + + execsql { COMMIT } + + execsql { SELECT a FROM t2 WHERE (a%5)==0 ORDER BY a+0} + } $results + + do_test tkt3718-4.$tn.extra { + execsql { + SELECT + (SELECT sum(a) FROM t2)==(SELECT sum(a*5-10) FROM t2 WHERE (a%5)==0) + } + } {1} + + integrity_check tkt3718-4.$tn.integrity +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3731.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3731.test --- sqlite3-3.4.2/test/tkt3731.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3731.test 2009-03-17 22:33:01.000000000 +0000 @@ -0,0 +1,51 @@ +# 2009 March 18 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt3731.test,v 1.1 2009/03/17 22:33:01 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +do_test tkt3731-1.1 { + execsql { + CREATE TABLE t1(a PRIMARY KEY, b); + CREATE TRIGGER tr1 AFTER INSERT ON t1 BEGIN + INSERT INTO t1 VALUES(new.a || '+', new.b || '+'); + END; + } +} {} + +do_test tkt3731-1.2 { + execsql { + INSERT INTO t1 VALUES('a', 'b'); + INSERT INTO t1 VALUES('c', 'd'); + SELECT * FROM t1; + } +} {a b a+ b+ c d c+ d+} + +do_test tkt3731-1.3 { + execsql { + DELETE FROM t1; + CREATE TABLE t2(a, b); + INSERT INTO t2 VALUES('e', 'f'); + INSERT INTO t2 VALUES('g', 'h'); + INSERT INTO t1 SELECT * FROM t2; + SELECT * FROM t1; + } +} {e f e+ f+ g h g+ h+} + +integrity_check tkt3731-1.4 + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3757.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3757.test --- sqlite3-3.4.2/test/tkt3757.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3757.test 2009-03-29 00:13:04.000000000 +0000 @@ -0,0 +1,60 @@ +# 2009 March 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3757: The cost functions on the query optimizer for the +# IN operator can be improved. +# +# $Id: tkt3757.test,v 1.1 2009/03/29 00:13:04 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Evaluate SQL. Return the result set followed by the +# and the number of full-scan steps. +# +proc count_steps {sql} { + set r [db eval $sql] + lappend r scan [db status step] sort [db status sort] +} + +# Construct tables +# +do_test tkt3757-1.1 { + db eval { + CREATE TABLE t1(x INTEGER, y INTEGER, z TEXT); + CREATE INDEX t1i1 ON t1(y,z); + INSERT INTO t1 VALUES(1,2,'three'); + CREATE TABLE t2(a INTEGER, b TEXT); + INSERT INTO t2 VALUES(2, 'two'); + ANALYZE; + SELECT * FROM sqlite_stat1; + } +} {t1 t1i1 {1 1 1}} + +# Modify statistics in order to make the optimizer then that: +# +# (1) Table T1 has about 250K entries +# (2) There are only about 5 distinct values of T1. +# +# Then run a query with "t1.y IN (SELECT ..)" in the WHERE clause. +# Make sure the index is used. +# +do_test tkt3757-1.2 { + db eval { + DELETE FROM sqlite_stat1; + INSERT INTO sqlite_stat1 VALUES('t1','t1i1','250000 50000 30'); + } + count_steps { + SELECT * FROM t1 WHERE y IN (SELECT a FROM t2) + } +} {1 2 three scan 0 sort 0} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3761.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3761.test --- sqlite3-3.4.2/test/tkt3761.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3761.test 2009-03-31 03:54:40.000000000 +0100 @@ -0,0 +1,40 @@ +# 2009 March 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3761: Make sure that an incremental vacuum on an in-memory +# database can be rolled back. +# +# $Id: tkt3761.test,v 1.1 2009/03/31 02:54:40 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3761-1.1 { + db close + sqlite3 db :memory: + db eval { + PRAGMA auto_vacuum=INCREMENTAL; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + BEGIN; + DELETE FROM t1 WHERE rowid%2; + PRAGMA incremental_vacuum(4); + ROLLBACK; + } + db eval {PRAGMA integrity_check} +} {ok} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3762.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3762.test --- sqlite3-3.4.2/test/tkt3762.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3762.test 2009-03-31 01:50:36.000000000 +0100 @@ -0,0 +1,54 @@ +# 2009 March 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3762: Make sure that an incremental vacuum that reduces the +# size of the database file such that a pointer-map page is elemented +# can be correctly rolled back. +# +# That ticket #3762 has been fixed has already been verified by the +# savepoint6.test test script. But this script is simplier and a +# redundant test never hurts. +# +# $Id: tkt3762.test,v 1.1 2009/03/31 00:50:36 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3762-1.1 { + db eval { + PRAGMA auto_vacuum=INCREMENTAL; + PRAGMA page_size=1024; + PRAGMA cache_size=10; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 VALUES(zeroblob(900)); + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + INSERT INTO t1 SELECT x FROM t1; + DELETE FROM t1 WHERE rowid>202; + VACUUM; + + BEGIN; + DELETE FROM t1 WHERE rowid IN (10,11,12) ; + PRAGMA incremental_vacuum(10); + UPDATE t1 SET x=zeroblob(900) WHERE rowid BETWEEN 100 AND 110; + INSERT INTO t1 VALUES(zeroblob(39000)); + SELECT count(*) FROM t1; + ROLLBACK; + } + db eval {PRAGMA integrity_check} +} {ok} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3773.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3773.test --- sqlite3-3.4.2/test/tkt3773.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3773.test 2009-04-02 17:59:47.000000000 +0100 @@ -0,0 +1,38 @@ +# 2009 April 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3773: Be careful not to over-optimize when a compound +# subquery contains an ORDER BY clause. +# +# +# $Id: tkt3773.test,v 1.1 2009/04/02 16:59:47 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3773-1.1 { + db eval { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(2,1); + INSERT INTO t1 VALUES(33,3); + CREATE TABLE t2(x,y); + INSERT INTO t2 VALUES(123,2); + INSERT INTO t2 VALUES(4,4); + SELECT a FROM ( + SELECT a, b FROM t1 + UNION ALL + SELECT x, y FROM t2 + ORDER BY 2 + ); + } +} {2 123 33 4} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3791.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3791.test --- sqlite3-3.4.2/test/tkt3791.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3791.test 2009-04-08 13:21:31.000000000 +0100 @@ -0,0 +1,28 @@ +# 2009 April 2 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3791: A segfault when inserting into a table that contains +# an arbitrary expression as its default value. +# +# $Id: tkt3791.test,v 1.1 2009/04/08 12:21:31 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3791-1.1 { + db eval { + CREATE TABLE t1(x, y DEFAULT(datetime('now'))); + INSERT INTO t1(x) VALUES(1); + SELECT x, length(y) FROM t1; + } +} {1 19} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3793.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3793.test --- sqlite3-3.4.2/test/tkt3793.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3793.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,119 @@ +# 2009 April 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests to verify that ticket #3793 has been +# fixed. +# +# $Id: tkt3793.test,v 1.2 2009/06/01 16:42:18 shane Exp $ + + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !shared_cache||!attach { + finish_test + return +} +set ::enable_shared_cache [sqlite3_enable_shared_cache 1] + +do_test tkt3793-1.1 { + # This is taken from shared.test. The Windows VFS expands + # ./test.db (and test.db) to be the same thing so the path + # matches and they share a cache. By changing the case + # for Windows platform, we get around this and get a separate + # connection. + if {$::tcl_platform(platform)=="unix"} { + sqlite3 db1 test.db + sqlite3 db2 test.db + } else { + sqlite3 db1 TEST.DB + sqlite3 db2 TEST.DB + } + execsql { + BEGIN; + CREATE TABLE t1(a, b); + CREATE TABLE t2(a PRIMARY KEY, b); + INSERT INTO t1 VALUES(randstr(50,50), randstr(50,50)); + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t1 SELECT randstr(50,50), randstr(50,50) FROM t1; + INSERT INTO t2 SELECT * FROM t1; + COMMIT; + } +} {} + +proc busyhandler {db args} { set ::busyconnection $db ; return 1 } +db2 busy {busyhandler db2} +db1 busy {busyhandler db1} + +# Establish a read-lock on the database file using connection [db]. +# +do_test tkt3793-1.2 { + execsql { + BEGIN; + SELECT count(*) FROM t1; + } +} {1024} + +# Set the size of the cache shared by [db1] and [db2] to 10. Then update +# more than 10 pages of table t1. At this point the shared-cache will +# hold a RESERVED lock on the database file. Even though there are now +# more than 10 dirty pages in memory, it cannot upgrade to an EXCLUSIVE +# lock because of the read-lock held by [db]. +# +do_test tkt3793-1.3 { + execsql { + PRAGMA cache_size = 10; + BEGIN; + UPDATE t1 SET b = randstr(50,50); + } db1 +} {} + +set x 0 + +# Run one SELECT query on the shared-cache using [db1], then from within +# the callback run another via [db2]. Because of the large number of dirty +# pages within the cache, each time a new page is read from the database +# SQLite will attempt to upgrade to an EXCLUSIVE lock, and hence invoke +# the busy-handler. The tests here verify that the correct busy-handler +# function is invoked (the busy-handler associated with the database +# connection that called sqlite3_step()). When bug #3793 existed, sometimes +# the [db2] busy-handler was invoked from within the call to sqlite3_step() +# associated with [db1]. +# +# Note: Before the bug was fixed, if [db2] was opened with the "-fullmutex 1" +# option, then this test case would cause an assert() to fail. +# +set ::busyconnection db1 +db1 eval {SELECT * FROM t2 ORDER BY a LIMIT 20} { + do_test tkt3793-2.[incr x] { set ::busyconnection } db1 + set ::busyconnection db2 + + db2 eval { SELECT count(*) FROM t2 } + do_test tkt3793-2.[incr x] { set ::busyconnection } db2 + set ::busyconnection db1 +} + +do_test tkt3793-3 { + db1 close + db2 close +} {} + +sqlite3_enable_shared_cache $::enable_shared_cache +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3824.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3824.test --- sqlite3-3.4.2/test/tkt3824.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3824.test 2009-04-24 21:32:31.000000000 +0100 @@ -0,0 +1,99 @@ +# 2009 April 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3824 +# +# When you use an "IS NULL" constraint on a UNIQUE index, the result +# is not necessarily UNIQUE. Make sure the optimizer does not assume +# uniqueness. +# +# $Id: tkt3824.test,v 1.2 2009/04/24 20:32:31 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc execsql_status {sql {db db}} { + set result [uplevel $db eval [list $sql]] + if {[db status sort]} { + concat $result sort + } else { + concat $result nosort + } +} + +do_test tkt3824-1.1 { + db eval { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,NULL); + INSERT INTO t1 VALUES(9,NULL); + INSERT INTO t1 VALUES(5,NULL); + INSERT INTO t1 VALUES(123,NULL); + INSERT INTO t1 VALUES(-10,NULL); + CREATE UNIQUE INDEX t1b ON t1(b); + } + execsql_status { + SELECT a FROM t1 WHERE b IS NULL ORDER BY a; + } +} {-10 1 5 9 123 sort} +do_test tkt3824-1.2 { + execsql_status { + SELECT a FROM t1 WHERE b IS NULL ORDER BY b, a; + } +} {-10 1 5 9 123 sort} + +do_test tkt3824-2.1 { + db eval { + CREATE TABLE t2(a,b,c); + INSERT INTO t2 VALUES(1,1,NULL); + INSERT INTO t2 VALUES(9,2,NULL); + INSERT INTO t2 VALUES(5,2,NULL); + INSERT INTO t2 VALUES(123,3,NULL); + INSERT INTO t2 VALUES(-10,3,NULL); + CREATE UNIQUE INDEX t2bc ON t2(b,c); + } + execsql_status { + SELECT a FROM t2 WHERE b=2 AND c IS NULL ORDER BY a; + } +} {5 9 sort} +do_test tkt3824-2.2 { + execsql_status { + SELECT a FROM t2 WHERE b=2 AND c IS NULL ORDER BY b, a; + } +} {5 9 sort} +do_test tkt3824-2.3 { + lsort [execsql_status { + SELECT a FROM t2 WHERE b=2 AND c IS NULL ORDER BY b; + }] +} {5 9 sort} + +do_test tkt3824-3.1 { + db eval { + CREATE TABLE t3(x,y); + INSERT INTO t3 SELECT a, b FROM t1; + INSERT INTO t3 VALUES(234,567); + CREATE UNIQUE INDEX t3y ON t3(y); + DELETE FROM t3 WHERE y IS NULL; + SELECT * FROM t3; + } +} {234 567} + +do_test tkt3824-4.1 { + db eval { + CREATE TABLE t4(x,y); + INSERT INTO t4 SELECT a, b FROM t1; + INSERT INTO t4 VALUES(234,567); + CREATE UNIQUE INDEX t4y ON t4(y); + UPDATE t4 SET rowid=rowid+100 WHERE y IS NULL; + SELECT rowid, x FROM t4 ORDER BY rowid; + } +} {6 234 101 1 102 9 103 5 104 123 105 -10} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3832.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3832.test --- sqlite3-3.4.2/test/tkt3832.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3832.test 2009-05-01 03:08:04.000000000 +0100 @@ -0,0 +1,37 @@ +# 2009 April 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3832 +# +# A segfault when using a BEFORE trigger on an INSERT and inserting +# a NULL into the INTEGER PRIMARY KEY. +# +# $Id: tkt3832.test,v 1.1 2009/05/01 02:08:04 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +do_test tkt3832-1.1 { + db eval { + CREATE TABLE t1(a INT, b INTEGER PRIMARY KEY); + CREATE TABLE log(x); + CREATE TRIGGER t1r1 BEFORE INSERT ON t1 BEGIN + INSERT INTO log VALUES(new.b); + END; + INSERT INTO t1 VALUES(NULL,5); + INSERT INTO t1 SELECT b, a FROM t1 ORDER BY b; + SELECT rowid, * FROM t1; + SELECT rowid, * FROM log; + } +} {5 {} 5 6 5 6 1 5 2 -1} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3838.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3838.test --- sqlite3-3.4.2/test/tkt3838.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3838.test 2009-05-05 13:54:50.000000000 +0100 @@ -0,0 +1,37 @@ +# 2009 May 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3838 +# +# The ticket reports that the encoding is UTF8 on the DEFAULT VALUE of +# a column added using ALTER TABLE even when the database is UTF16. +# Verify that this has been fixed. +# +# $Id: tkt3838.test,v 1.1 2009/05/05 12:54:50 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + + +do_test tkt3838-1.1 { + db eval { + PRAGMA encoding=UTF16; + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + ALTER TABLE t1 ADD COLUMN b INTEGER DEFAULT '999'; + ALTER TABLE t1 ADD COLUMN c REAL DEFAULT '9e99'; + ALTER TABLE t1 ADD COLUMN d TEXT DEFAULT 'xyzzy'; + UPDATE t1 SET x=x+1; + SELECT * FROM t1; + } +} {2 999 9e+99 xyzzy} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3841.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3841.test --- sqlite3-3.4.2/test/tkt3841.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3841.test 2009-06-25 12:45:59.000000000 +0100 @@ -0,0 +1,48 @@ +# 2009 May 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Ticket #3841 +# +# The sqlite3_aggregate_count() is not being reset when an aggregate +# functio is used in a correlated subquery. +# +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !subquery { + finish_test + return +} + +do_test tkt3841.1 { + execsql { + CREATE TABLE table2 (key TEXT, x TEXT); + CREATE TABLE list (key TEXT, value TEXT); + + INSERT INTO table2 VALUES ("a", "alist"); + INSERT INTO table2 VALUES ("b", "blist"); + INSERT INTO list VALUES ("a", 1); + INSERT INTO list VALUES ("a", 2); + INSERT INTO list VALUES ("a", 3); + INSERT INTO list VALUES ("b", 4); + INSERT INTO list VALUES ("b", 5); + INSERT INTO list VALUES ("b", 6); + + SELECT + table2.x, + (SELECT group_concat(list.value) + FROM list + WHERE list.key = table2.key) + FROM table2; + } +} {alist 1,2,3 blist 4,5,6} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3871.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3871.test --- sqlite3-3.4.2/test/tkt3871.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3871.test 2009-06-05 18:09:12.000000000 +0100 @@ -0,0 +1,53 @@ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +register_echo_module [sqlite3_connection_pointer db] + +do_test tkt3871-1.1 { + execsql { + BEGIN; + CREATE TABLE t1(a PRIMARY KEY, b UNIQUE); + } + for {set i 0} {$i < 500} {incr i} { + execsql { INSERT INTO t1 VALUES($i, $i*$i) } + } + execsql COMMIT + execsql { + CREATE VIRTUAL TABLE e USING echo(t1); + SELECT count(*) FROM e; + } +} {500} + +do_test tkt3871-1.2 { + execsql { SELECT * FROM e WHERE a = 1 OR a = 2 } +} {1 1 2 4} +do_test tkt3871-1.3 { + set echo_module "" + execsql { SELECT * FROM e WHERE a = 1 OR a = 2 } + set echo_module +} [list \ + xFilter {SELECT rowid, * FROM 't1' WHERE a = ?} 1 \ + xFilter {SELECT rowid, * FROM 't1' WHERE a = ?} 2 \ +] + +do_test tkt3871-1.4 { + execsql { SELECT * FROM e WHERE a = 1 OR a = 2 OR b = 9 } +} {1 1 2 4 3 9} +do_test tkt3871-1.5 { + set echo_module "" + execsql { SELECT * FROM e WHERE a = 1 OR a = 2 OR b = 9 } + set echo_module +} [list \ + xFilter {SELECT rowid, * FROM 't1' WHERE a = ?} 1 \ + xFilter {SELECT rowid, * FROM 't1' WHERE a = ?} 2 \ + xFilter {SELECT rowid, * FROM 't1' WHERE b = ?} 9 +] + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3879.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3879.test --- sqlite3-3.4.2/test/tkt3879.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3879.test 2009-06-05 18:09:12.000000000 +0100 @@ -0,0 +1,52 @@ +# 2009 May 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests to verify ticket #3879 is fixed. +# +# $Id: tkt3879.test,v 1.2 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3879.1.1 { + execsql { + CREATE TABLE t1 (a PRIMARY KEY, b); + INSERT INTO t1 VALUES ('w', 1); + INSERT INTO t1 VALUES ('z', -1); + + CREATE TABLE t2 (m INTEGER PRIMARY KEY, n, a, p); + INSERT INTO t2 VALUES (25, 13, 'w', 1); + INSERT INTO t2 VALUES (26, 25, 'z', 153); + INSERT INTO t2 VALUES (27, 25, 'z', 68); + + CREATE TABLE t3 (m); + INSERT INTO t3 VALUES (25); + } +} {} + +do_test tkt3879.1.2 { + execsql { + SELECT 111, t1.b*123 + FROM t3, t2 AS j0, t2 AS j1, t1 + WHERE j0.m=t3.m AND t1.a=j0.a AND j1.n=j0.m; + } +} {111 123 111 123} + +do_test tkt3879.1.3 { + execsql { + SELECT 222, t1.b*123 + FROM t3, t2 AS j0, t2 AS j1, t1 + WHERE j0.m=t3.m AND t1.a=j0.a AND j1.n=j0.m + ORDER BY t1.b; + } +} {222 123 222 123} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3911.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3911.test --- sqlite3-3.4.2/test/tkt3911.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3911.test 2009-06-12 04:27:28.000000000 +0100 @@ -0,0 +1,58 @@ +# 2009 June 11 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests to verify ticket #3911 is fixed. +# +# $Id: tkt3911.test,v 1.1 2009/06/12 03:27:28 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3911.1 { + execsql { + CREATE TABLE t1(a,b); + INSERT INTO t1 VALUES(1,2); + INSERT INTO t1 VALUES(11,12); + + CREATE TABLE t2(b,c); + INSERT INTO t2 VALUES(2,3); + INSERT INTO t2 VALUES(22,23); + + SELECT * FROM t1 JOIN t2 USING(b); + } +} {1 2 3} +do_test tkt3911.2 { + db eval { + SELECT * FROM t1 JOIN (t2) AS x USING (b); + } +} {1 2 3} +do_test tkt3911.3 { + db eval { + SELECT * FROM t1 JOIN (SELECT * FROM t2) AS x USING (b); + } +} {1 2 3} + +do_test tkt3911.4 { + db eval { + CREATE TABLE t3(m,a); + INSERT INTO t3 VALUES('one',1); + INSERT INTO t3 VALUES('two',2); + + SELECT * FROM t3 JOIN (SELECT * FROM t1 NATURAL JOIN t2) AS x USING(a); + } +} {one 1 2 3} +do_test tkt3911.5 { + db eval { + SELECT * FROM t3 JOIN (SELECT * FROM t1 JOIN t2 USING (b)) AS x USING(a); + } +} {one 1 2 3} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3918.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3918.test --- sqlite3-3.4.2/test/tkt3918.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3918.test 2009-06-17 12:13:28.000000000 +0100 @@ -0,0 +1,60 @@ +# 2009 June 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt3918.test,v 1.1 2009/06/17 11:13:28 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3918.1 { + execsql { + PRAGMA page_size = 1024; + PRAGMA auto_vacuum = incremental; + CREATE TABLE t1(i, x); + } +} {} +do_test tkt3918.2 { + execsql { + INSERT INTO t1 VALUES(1, randstr(1000,1000)); + INSERT INTO t1 VALUES(2, zeroblob(248*1020 + 100)); + INSERT INTO t1 VALUES(3, zeroblob(2*1020 + 100)); + } +} {} + +# This set of statements sets up the free list so that the +# first free-list trunk page contains only a single leaf. +# The leaf page is also the last page in the database. The +# second free-list trunk page contains, amongst other things, +# page number 4. +do_test tkt3918.3 { + execsql { + DELETE FROM t1 WHERE i = 2; + DELETE FROM t1 WHERE i = 1; + DELETE FROM t1 WHERE i = 3; + } +} {} + +# Incrementally vacuum the database to reduce its size by a single +# page. This will remove the single leaf from the first page in +# the linked list of free-list trunk pages. +do_test tkt3918.4 { + execsql { PRAGMA incremental_vacuum = 1 } +} {} + +# Create another table. This operation will attempt to extract +# page 4 from the database free-list. Bug 3918 caused sqlite to +# incorrectly report corruption here. +do_test tkt3918.5 { + execsql { CREATE TABLE t2(a, b) } +} {} + +finish_test + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3922.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3922.test --- sqlite3-3.4.2/test/tkt3922.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3922.test 2009-06-26 15:19:55.000000000 +0100 @@ -0,0 +1,89 @@ +# 2009 June 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# $Id: tkt3922.test,v 1.2 2009/06/26 14:17:47 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +if {[working_64bit_int]} { + do_test tkt3922.1 { + execsql { + CREATE TABLE t1(a NUMBER); + INSERT INTO t1 VALUES('-9223372036854775808'); + SELECT a, typeof(a) FROM t1; + } + } {-9223372036854775808 integer} +} else { + # this alternate version of tkt3922.1 doesn't + # really test the same thing as the original, + # but is needed to create the table and + # provided simply as a place holder for + # platforms without working 64bit support. + do_test tkt3922.1 { + execsql { + CREATE TABLE t1(a NUMBER); + INSERT INTO t1 VALUES('-1'); + SELECT a, typeof(a) FROM t1; + } + } {-1 integer} +} +do_test tkt3922.2 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('-9223372036854775809'); + SELECT a, typeof(a) FROM t1; + } +} {-9.22337203685478e+18 real} +do_test tkt3922.3 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('-9223372036854776832'); + SELECT a, typeof(a) FROM t1; + } +} {-9.22337203685478e+18 real} +do_test tkt3922.4 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('-9223372036854776833'); + SELECT a, typeof(a) FROM t1; + } +} {-9.22337203685478e+18 real} +if {[working_64bit_int]} { + do_test tkt3922.5 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('9223372036854775807'); + SELECT a, typeof(a) FROM t1; + } + } {9223372036854775807 integer} +} else { + # this alternate version of tkt3922.5 doesn't + # really test the same thing as the original, + # but provided simply as a place holder for + # platforms without working 64bit support. + do_test tkt3922.5 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('1'); + SELECT a, typeof(a) FROM t1; + } + } {1 integer} +} +do_test tkt3922.6 { + execsql { + DELETE FROM t1; + INSERT INTO t1 VALUES('9223372036854775808'); + SELECT a, typeof(a) FROM t1; + } +} {9.22337203685478e+18 real} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tkt3929.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tkt3929.test --- sqlite3-3.4.2/test/tkt3929.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tkt3929.test 2009-06-23 12:53:09.000000000 +0100 @@ -0,0 +1,49 @@ +# 2009 June 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests to verify ticket #3929 is fixed. +# +# $Id: tkt3929.test,v 1.1 2009/06/23 11:53:09 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tkt3929-1.0 { + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a, b); + CREATE TRIGGER t1_t1 AFTER INSERT ON t1 BEGIN + UPDATE t1 SET b = 'value: ' || a WHERE t1.rowid = new.rowid; + END; + } +} {} + +do_test tkt3929-1.1 { + execsql { + INSERT INTO t1(a) VALUES(1); + INSERT INTO t1(a) VALUES(2); + SELECT * FROM t1; + } +} {1 {value: 1} 2 {value: 2}} + +# Before it was fixed, the following provoked the bug, causing either an +# assertion failure or a "database is malformed" error. +# +do_test tkt3930-1.2 { + for {set i 3} {$i < 100} {incr i} { + execsql { INSERT INTO t1(a) VALUES($i) } + } +} {} + +integrity_check tkt3930-1.3 +finish_test + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/tokenize.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/tokenize.test --- sqlite3-3.4.2/test/tokenize.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/tokenize.test 2009-06-05 18:03:47.000000000 +0100 @@ -0,0 +1,65 @@ +# 2008 July 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this script testing the tokenizer +# +# $Id: tokenize.test,v 1.1 2008/07/08 00:06:51 drh Exp $ +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test tokenize-1.1 { + catchsql {SELECT 1.0e+} +} {1 {unrecognized token: "1.0e"}} +do_test tokenize-1.2 { + catchsql {SELECT 1.0E+} +} {1 {unrecognized token: "1.0E"}} +do_test tokenize-1.3 { + catchsql {SELECT 1.0e-} +} {1 {unrecognized token: "1.0e"}} +do_test tokenize-1.4 { + catchsql {SELECT 1.0E-} +} {1 {unrecognized token: "1.0E"}} +do_test tokenize-1.5 { + catchsql {SELECT 1.0e+/} +} {1 {unrecognized token: "1.0e"}} +do_test tokenize-1.6 { + catchsql {SELECT 1.0E+:} +} {1 {unrecognized token: "1.0E"}} +do_test tokenize-1.7 { + catchsql {SELECT 1.0e-:} +} {1 {unrecognized token: "1.0e"}} +do_test tokenize-1.8 { + catchsql {SELECT 1.0E-/} +} {1 {unrecognized token: "1.0E"}} +do_test tokenize-1.9 { + catchsql {SELECT 1.0F+5} +} {1 {unrecognized token: "1.0F"}} +do_test tokenize-1.10 { + catchsql {SELECT 1.0d-10} +} {1 {unrecognized token: "1.0d"}} +do_test tokenize-1.11 { + catchsql {SELECT 1.0e,5} +} {1 {unrecognized token: "1.0e"}} +do_test tokenize-1.12 { + catchsql {SELECT 1.0E.10} +} {1 {unrecognized token: "1.0E"}} + +do_test tokenize-2.1 { + catchsql {SELECT 1, 2 /*} +} {1 {near "*": syntax error}} +do_test tokenize-2.2 { + catchsql {SELECT 1, 2 /* } +} {0 {1 2}} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trace.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trace.test --- sqlite3-3.4.2/test/trace.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/trace.test 2009-06-25 12:22:34.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests for the "sqlite3_trace()" API. # -# $Id: trace.test,v 1.6 2006/01/03 00:33:50 drh Exp $ +# $Id: trace.test,v 1.8 2009/04/07 14:14:23 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -145,4 +145,27 @@ } {SELECT * FROM t1} catch {sqlite3_finalize $STMT} +# Trigger tracing. +# +ifcapable trigger { + do_test trace-5.1 { + db eval { + CREATE TRIGGER r1t1 AFTER UPDATE ON t1 BEGIN + UPDATE t2 SET a=new.a WHERE rowid=new.rowid; + END; + CREATE TRIGGER r1t2 AFTER UPDATE ON t2 BEGIN + SELECT 'hello'; + END; + } + set TRACE_OUT {} + proc trace_proc cmd { + lappend ::TRACE_OUT [string trim $cmd] + } + db eval { + UPDATE t1 SET a=a+1; + } + set TRACE_OUT + } {{UPDATE t1 SET a=a+1;} {-- TRIGGER r1t1} {-- TRIGGER r1t2} {-- TRIGGER r1t1} {-- TRIGGER r1t2} {-- TRIGGER r1t1} {-- TRIGGER r1t2}} +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trans2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trans2.test --- sqlite3-3.4.2/test/trans2.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/trans2.test 2009-06-05 18:03:47.000000000 +0100 @@ -0,0 +1,232 @@ +# 2008 August 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this script is transactions +# +# $Id: trans2.test,v 1.1 2008/08/27 18:56:36 drh Exp $ +# +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# A procedure to scramble the elements of list $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# Generate a UUID using randomness. +# +expr srand(1) +proc random_uuid {} { + set u {} + for {set i 0} {$i<5} {incr i} { + append u [format %06x [expr {int(rand()*16777216)}]] + } + return $u +} + +# Compute hashes on the u1 and u2 fields of the sample data. +# +proc hash1 {} { + global data + set x "" + foreach rec [lsort -integer -index 0 $data] { + append x [lindex $rec 1] + } + return [md5 $x] +} +proc hash2 {} { + global data + set x "" + foreach rec [lsort -integer -index 0 $data] { + append x [lindex $rec 3] + } + return [md5 $x] +} + +# Create the initial data set +# +unset -nocomplain data i max_rowid todel n rec max1 id origres newres +unset -nocomplain inssql modsql s j z +set data {} +for {set i 0} {$i<400} {incr i} { + set rec [list $i [random_uuid] [expr {int(rand()*5000)+1000}] [random_uuid]] + lappend data $rec +} +set max_rowid [expr {$i-1}] + +# Create the T1 table used to hold test data. Populate that table with +# the initial data set and check hashes to make sure everything is correct. +# +do_test trans2-1.1 { + execsql { + PRAGMA cache_size=100; + CREATE TABLE t1( + id INTEGER PRIMARY KEY, + u1 TEXT UNIQUE, + z BLOB NOT NULL, + u2 TEXT UNIQUE + ); + } + foreach rec [scramble $data] { + foreach {id u1 z u2} $rec break + db eval {INSERT INTO t1 VALUES($id,$u1,zeroblob($z),$u2)} + } + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} +} [list [hash1] [hash2]] + +# Repeat the main test loop multiple times. +# +for {set i 2} {$i<=30} {incr i} { + # Delete one row out of every 10 in the database. This will add + # many pages to the freelist. + # + set todel {} + set n [expr {[llength $data]/10}] + set data [scramble $data] + foreach rec [lrange $data 0 $n] { + lappend todel [lindex $rec 0] + } + set data [lrange $data [expr {$n+1}] end] + set max1 [lindex [lindex $data 0] 0] + foreach rec $data { + set id [lindex $rec 0] + if {$id>$max1} {set max1 $id} + } + set origres [list [hash1] [hash2]] + do_test trans2-$i.1 { + db eval "DELETE FROM t1 WHERE id IN ([join $todel ,])" + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $origres + integrity_check trans2-$i.2 + + # Begin a transaction and insert many new records. + # + set newdata {} + foreach id $todel { + set rec [list $id [random_uuid] \ + [expr {int(rand()*5000)+1000}] [random_uuid]] + lappend newdata $rec + lappend data $rec + } + for {set j 1} {$j<50} {incr j} { + set id [expr {$max_rowid+$j}] + lappend todel $id + set rec [list $id [random_uuid] \ + [expr {int(rand()*5000)+1000}] [random_uuid]] + lappend newdata $rec + lappend data $rec + } + set max_rowid [expr {$max_rowid+$j-1}] + set modsql {} + set inssql {} + set newres [list [hash1] [hash2]] + do_test trans2-$i.3 { + db eval BEGIN + foreach rec [scramble $newdata] { + foreach {id u1 z u2} $rec break + set s "INSERT INTO t1 VALUES($id,'$u1',zeroblob($z),'$u2');" + append modsql $s\n + append inssql $s\n + db eval $s + } + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $newres + integrity_check trans2-$i.4 + + # Do a large update that aborts do to a constraint failure near + # the end. This stresses the statement journal mechanism. + # + do_test trans2-$i.10 { + catchsql { + UPDATE t1 SET u1=u1||'x', + z = CASE WHEN id<$max_rowid + THEN zeroblob((random()&65535)%5000 + 1000) END; + } + } {1 {t1.z may not be NULL}} + do_test trans2-$i.11 { + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $newres + + # Delete all of the newly inserted records. Verify that the database + # is back to its original state. + # + do_test trans2-$i.20 { + set s "DELETE FROM t1 WHERE id IN ([join $todel ,]);" + append modsql $s\n + db eval $s + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $origres + + # Do another large update that aborts do to a constraint failure near + # the end. This stresses the statement journal mechanism. + # + do_test trans2-$i.30 { + catchsql { + UPDATE t1 SET u1=u1||'x', + z = CASE WHEN id<$max1 + THEN zeroblob((random()&65535)%5000 + 1000) END; + } + } {1 {t1.z may not be NULL}} + do_test trans2-$i.31 { + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $origres + + # Redo the inserts + # + do_test trans2-$i.40 { + db eval $inssql + append modsql $inssql + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $newres + + # Rollback the transaction. Verify that the content is restored. + # + do_test trans2-$i.90 { + db eval ROLLBACK + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $origres + integrity_check trans2-$i.91 + + # Repeat all the changes, but this time commit. + # + do_test trans2-$i.92 { + db eval BEGIN + catchsql { + UPDATE t1 SET u1=u1||'x', + z = CASE WHEN id<$max1 + THEN zeroblob((random()&65535)%5000 + 1000) END; + } + db eval $modsql + catchsql { + UPDATE t1 SET u1=u1||'x', + z = CASE WHEN id<$max1 + THEN zeroblob((random()&65535)%5000 + 1000) END; + } + db eval COMMIT + db eval {SELECT md5sum(u1), md5sum(u2) FROM t1 ORDER BY id} + } $newres + integrity_check trans2-$i.93 +} + +unset -nocomplain data i max_rowid todel n rec max1 id origres newres +unset -nocomplain inssql modsql s j z +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trans3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trans3.test --- sqlite3-3.4.2/test/trans3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/trans3.test 2008-11-05 16:37:35.000000000 +0000 @@ -0,0 +1,77 @@ +# 2008 November 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file implements regression tests for SQLite library. The +# focus of this script is the response of COMMIT and ROLLBACK when +# statements are still pending. +# +# $Id: trans3.test,v 1.2 2008/11/05 16:37:35 drh Exp $ +# +set testdir [file dirname $argv0] +source $testdir/tester.tcl +unset -nocomplain ecode + +do_test trans3-1.1 { + db eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + SELECT * FROM t1; + } +} {1 2 3} +do_test trans3-1.2 { + db eval BEGIN + db eval {INSERT INTO t1 VALUES(4);} + set ::ecode {} + set x [catch { + db eval {SELECT * FROM t1 LIMIT 1} { + if {[catch {db eval COMMIT} errmsg]} { + set ::ecode [sqlite3_extended_errcode db] + error $errmsg + } + } + } errmsg] + lappend x $errmsg +} {0 {}} +do_test trans3-1.3 { + set ::ecode +} {} +do_test trans3-1.3.1 { + sqlite3_get_autocommit db +} 1 +do_test trans3-1.4 { + db eval {SELECT * FROM t1} +} {1 2 3 4} +do_test trans3-1.5 { + db eval BEGIN + db eval {INSERT INTO t1 VALUES(5);} + set ::ecode {} + set x [catch { + db eval {SELECT * FROM t1} { + if {[catch {db eval ROLLBACK} errmsg]} { + set ::ecode [sqlite3_extended_errcode db] + error $errmsg + } + } + } errmsg] + lappend x $errmsg +} {1 {cannot rollback transaction - SQL statements in progress}} +do_test trans3-1.6 { + set ::ecode +} {SQLITE_BUSY} +do_test trans3-1.7 { + db eval COMMIT + db eval {SELECT * FROM t1} +} {1 2 3 4 5} +unset -nocomplain ecode + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trans.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trans.test --- sqlite3-3.4.2/test/trans.test 2007-07-13 11:36:48.000000000 +0100 +++ sqlite3-3.6.16/test/trans.test 2009-06-25 12:24:40.000000000 +0100 @@ -11,13 +11,12 @@ # This file implements regression tests for SQLite library. The # focus of this script is database locks. # -# $Id: trans.test,v 1.36 2007/07/13 10:36:48 drh Exp $ +# $Id: trans.test,v 1.41 2009/04/28 16:37:59 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl - # Create several tables to work with. # do_test trans-1.0 { @@ -784,6 +783,7 @@ # set fd [open test.tcl w] puts $fd { + sqlite3_test_control_pending_byte 0x0010000 sqlite3 db test.db db eval { PRAGMA default_cache_size=20; @@ -794,17 +794,37 @@ sqlite_abort } close $fd -file copy -force test.db test.db-bu1 do_test trans-8.1 { catch {exec [info nameofexec] test.tcl} - file copy -force test.db test.db-bu2 - file copy -force test.db-journal test.db-bu2-journal execsql {SELECT md5sum(x,y,z) FROM t2} } $checksum do_test trans-8.2 { execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} } $checksum2 integrity_check trans-8.3 +set fd [open test.tcl w] +puts $fd { + sqlite3_test_control_pending_byte 0x0010000 + sqlite3 db test.db + db eval { + PRAGMA journal_mode=persist; + PRAGMA default_cache_size=20; + BEGIN; + CREATE TABLE t3 AS SELECT * FROM t2; + DELETE FROM t2; + } + sqlite_abort +} +close $fd +do_test trans-8.4 { + catch {exec [info nameofexec] test.tcl} + execsql {SELECT md5sum(x,y,z) FROM t2} +} $checksum +do_test trans-8.5 { + execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master} +} $checksum2 +integrity_check trans-8.6 + # In the following sequence of tests, compute the MD5 sum of the content # of a table, make lots of modifications to that table, then do a rollback. @@ -851,7 +871,14 @@ # t3 a little larger, and thus takes a little longer, so doing 40 tests # is more than 2.0 times slower than doing 20 tests. Considerably more. # -if {[info exists ISQUICK]} { +# Also, if temporary tables are stored in memory and the test pcache +# is in use, only 20 iterations. Otherwise the test pcache runs out +# of page slots and SQLite reports "out of memory". +# +if {[info exists ISQUICK] || ( + $TEMP_STORE==3 && [catch {set ::permutations_test_prefix} val]==0 && + [regexp {^pcache[[:digit:]]*$} $val] +) } { set limit 20 } elseif {[info exists SOAKTEST]} { set limit 100 @@ -908,7 +935,7 @@ } [expr {$i%2==0}] } else { do_test trans-9.$i.5-$cnt { - expr {$sqlite_fullsync_count>0} + expr {$sqlite_fullsync_count==0} } {1} } } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger1.test --- sqlite3-3.4.2/test/trigger1.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/trigger1.test 2009-06-12 03:28:39.000000000 +0100 @@ -532,7 +532,7 @@ # Also verify that references within trigger programs are resolved at # statement compile time, not trigger installation time. This means, for # example, that you can drop and re-create tables referenced by triggers. -ifcapable tempdb { +ifcapable tempdb&&attach { do_test trigger1-10.0 { file delete -force test2.db file delete -force test2.db-journal @@ -627,5 +627,16 @@ catchsql {SELECT raise(abort,'message');} } {1 {RAISE() may only be used within a trigger-program}} +do_test trigger1-15.1 { + execsql { + CREATE TABLE tA(a INTEGER PRIMARY KEY, b, c); + CREATE TRIGGER tA_trigger BEFORE UPDATE ON "tA" BEGIN SELECT 1; END; + INSERT INTO tA VALUES(1, 2, 3); + } + catchsql { UPDATE tA SET a = 'abc' } +} {1 {datatype mismatch}} +do_test trigger1-15.2 { + catchsql { INSERT INTO tA VALUES('abc', 2, 3) } +} {1 {datatype mismatch}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger2.test --- sqlite3-3.4.2/test/trigger2.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/trigger2.test 2009-06-12 03:37:59.000000000 +0100 @@ -735,6 +735,18 @@ } } {3 103 5 205 4 304 9 109 11 211 10 310} +# At one point the following was causing a segfault. +do_test trigger2-9.1 { + execsql { + CREATE TABLE t3(a TEXT, b TEXT); + CREATE VIEW v3 AS SELECT t3.a FROM t3; + CREATE TRIGGER trig1 INSTEAD OF DELETE ON v3 BEGIN + SELECT 1; + END; + DELETE FROM v3 WHERE a = 1; + } +} {} + } ;# ifcapable view integrity_check trigger2-9.9 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger3.test --- sqlite3-3.4.2/test/trigger3.test 2007-03-29 18:41:40.000000000 +0100 +++ sqlite3-3.6.16/test/trigger3.test 2009-06-05 18:03:47.000000000 +0100 @@ -78,6 +78,20 @@ SELECT * FROM tbl; } } {} + +# Verify that a ROLLBACK trigger works like a FAIL trigger if +# we are not within a transaction. Ticket #3035. +# +do_test trigger3-3.3 { + catchsql {COMMIT} + catchsql { + INSERT INTO tbl VALUES (3, 9, 10); + } +} {1 {Trigger rollback}} +do_test trigger3-3.4 { + execsql {SELECT * FROM tbl} +} {} + # IGNORE do_test trigger3-4.1 { catchsql { diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger7.test --- sqlite3-3.4.2/test/trigger7.test 2007-03-27 15:43:07.000000000 +0100 +++ sqlite3-3.6.16/test/trigger7.test 2009-06-05 18:03:47.000000000 +0100 @@ -12,7 +12,7 @@ # # This file implements tests to increase coverage of trigger.c. # -# $Id: trigger7.test,v 1.1 2005/08/19 02:26:27 drh Exp $ +# $Id: trigger7.test,v 1.3 2008/08/11 18:44:58 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -116,6 +116,6 @@ catchsql { DROP TRIGGER t2r5 } -} {1 {malformed database schema - near "nonsense": syntax error}} +} {1 {malformed database schema (t2r12) - near "nonsense": syntax error}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger8.test --- sqlite3-3.4.2/test/trigger8.test 2006-02-27 22:22:29.000000000 +0000 +++ sqlite3-3.6.16/test/trigger8.test 2009-06-05 18:03:47.000000000 +0100 @@ -13,6 +13,7 @@ # This file implements tests to make sure abusively large triggers # (triggers with 100s or 1000s of statements) work. # +# $Id: trigger8.test,v 1.2 2008/09/17 16:14:10 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -21,14 +22,23 @@ return } +# Set variable $nStatement to the number of statements to include in the +# body of the trigger. On a workstation with virtually unlimited memory, +# use 10000. But on symbian, which allows each application at most a 32MB +# heap, use 1000. +# +set nStatement 10000 +if {$tcl_platform(platform) == "symbian"} { + set nStatement 1000 +} do_test trigger8-1.1 { execsql { CREATE TABLE t1(x); CREATE TABLE t2(y); } - set sql "CREATE TRIGGER r10000 AFTER INSERT ON t1 BEGIN\n" - for {set i 0} {$i<10000} {incr i} { + set sql "CREATE TRIGGER r${nStatement} AFTER INSERT ON t1 BEGIN\n" + for {set i 0} {$i<$nStatement} {incr i} { append sql " INSERT INTO t2 VALUES($i);\n" } append sql "END;" @@ -37,6 +47,6 @@ INSERT INTO t1 VALUES(5); SELECT count(*) FROM t2; } -} {10000} +} $nStatement finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/trigger9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/trigger9.test --- sqlite3-3.4.2/test/trigger9.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/trigger9.test 2009-06-05 18:03:47.000000000 +0100 @@ -0,0 +1,223 @@ +# 2008 January 1 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests some compiler optimizations for SQL statements featuring +# triggers: +# +# +# + +# trigger9-1.* - Test that if there are no references to OLD.* cols, or a +# reference to only OLD.rowid, the data is not loaded. +# +# trigger9-2.* - Test that for NEW.* records populated by UPDATE +# statements, unused fields are populated with NULL values. +# +# trigger9-3.* - Test that the temporary tables used for OLD.* references +# in "INSTEAD OF" triggers have NULL values in unused +# fields. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +proc has_rowdata {sql} { + expr {[lsearch [execsql "explain $sql"] RowData]>=0} +} + +do_test trigger9-1.1 { + execsql { + PRAGMA page_size = 1024; + CREATE TABLE t1(x, y, z); + INSERT INTO t1 VALUES('1', randstr(10000,10000), '2'); + INSERT INTO t1 VALUES('2', randstr(10000,10000), '4'); + INSERT INTO t1 VALUES('3', randstr(10000,10000), '6'); + CREATE TABLE t2(x); + } +} {} + +do_test trigger9-1.2.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE DELETE ON t1 BEGIN + INSERT INTO t2 VALUES(old.rowid); + END; + DELETE FROM t1; + SELECT * FROM t2; + } +} {1 2 3} +do_test trigger9-1.2.3 { + has_rowdata {DELETE FROM t1} +} 0 +do_test trigger9-1.2.4 { execsql { ROLLBACK } } {} + +do_test trigger9-1.3.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE DELETE ON t1 BEGIN + INSERT INTO t2 VALUES(old.x); + END; + DELETE FROM t1; + SELECT * FROM t2; + } +} {1 2 3} +do_test trigger9-1.3.2 { + has_rowdata {DELETE FROM t1} +} 1 +do_test trigger9-1.3.3 { execsql { ROLLBACK } } {} + +do_test trigger9-1.4.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE DELETE ON t1 WHEN old.x='1' BEGIN + INSERT INTO t2 VALUES(old.rowid); + END; + DELETE FROM t1; + SELECT * FROM t2; + } +} {1} +do_test trigger9-1.4.2 { + has_rowdata {DELETE FROM t1} +} 1 +do_test trigger9-1.4.3 { execsql { ROLLBACK } } {} + +do_test trigger9-1.5.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE UPDATE ON t1 BEGIN + INSERT INTO t2 VALUES(old.rowid); + END; + UPDATE t1 SET y = ''; + SELECT * FROM t2; + } +} {1 2 3} +do_test trigger9-1.5.2 { + has_rowdata {UPDATE t1 SET y = ''} +} 0 +do_test trigger9-1.5.3 { execsql { ROLLBACK } } {} + +do_test trigger9-1.6.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE UPDATE ON t1 BEGIN + INSERT INTO t2 VALUES(old.x); + END; + UPDATE t1 SET y = ''; + SELECT * FROM t2; + } +} {1 2 3} +do_test trigger9-1.6.2 { + has_rowdata {UPDATE t1 SET y = ''} +} 1 +do_test trigger9-1.6.3 { execsql { ROLLBACK } } {} + +do_test trigger9-1.7.1 { + execsql { + BEGIN; + CREATE TRIGGER trig1 BEFORE UPDATE ON t1 WHEN old.x>='2' BEGIN + INSERT INTO t2 VALUES(old.x); + END; + UPDATE t1 SET y = ''; + SELECT * FROM t2; + } +} {2 3} +do_test trigger9-1.7.2 { + has_rowdata {UPDATE t1 SET y = ''} +} 1 +do_test trigger9-1.7.3 { execsql { ROLLBACK } } {} + +do_test trigger9-3.1 { + execsql { + CREATE TABLE t3(a, b); + INSERT INTO t3 VALUES(1, 'one'); + INSERT INTO t3 VALUES(2, 'two'); + INSERT INTO t3 VALUES(3, 'three'); + } +} {} +do_test trigger9-3.2 { + execsql { + BEGIN; + CREATE VIEW v1 AS SELECT * FROM t3; + CREATE TRIGGER trig1 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO t2 VALUES(old.a); + END; + UPDATE v1 SET b = 'hello'; + SELECT * FROM t2; + ROLLBACK; + } +} {1 2 3} +do_test trigger9-3.3 { + # In this test the 'c' column of the view is not required by + # the INSTEAD OF trigger, but the expression is reused internally as + # part of the view's WHERE clause. Check that this does not cause + # a problem. + # + execsql { + BEGIN; + CREATE VIEW v1 AS SELECT a, b AS c FROM t3 WHERE c > 'one'; + CREATE TRIGGER trig1 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO t2 VALUES(old.a); + END; + UPDATE v1 SET c = 'hello'; + SELECT * FROM t2; + ROLLBACK; + } +} {2 3} +do_test trigger9-3.4 { + execsql { + BEGIN; + INSERT INTO t3 VALUES(3, 'three'); + INSERT INTO t3 VALUES(3, 'four'); + CREATE VIEW v1 AS SELECT DISTINCT a, b FROM t3; + CREATE TRIGGER trig1 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO t2 VALUES(old.a); + END; + UPDATE v1 SET b = 'hello'; + SELECT * FROM t2; + ROLLBACK; + } +} {1 2 3 3} + +ifcapable compound { + do_test trigger9-3.5 { + execsql { + BEGIN; + INSERT INTO t3 VALUES(1, 'uno'); + CREATE VIEW v1 AS SELECT a, b FROM t3 EXCEPT SELECT 1, 'one'; + CREATE TRIGGER trig1 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO t2 VALUES(old.a); + END; + UPDATE v1 SET b = 'hello'; + SELECT * FROM t2; + ROLLBACK; + } + } {1 2 3} + do_test trigger9-3.6 { + execsql { + BEGIN; + INSERT INTO t3 VALUES(1, 'zero'); + CREATE VIEW v1 AS + SELECT sum(a) AS a, max(b) AS b FROM t3 GROUP BY t3.a HAVING b>'two'; + CREATE TRIGGER trig1 INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO t2 VALUES(old.a); + END; + UPDATE v1 SET b = 'hello'; + SELECT * FROM t2; + ROLLBACK; + } + } {2} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/triggerA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/triggerA.test --- sqlite3-3.4.2/test/triggerA.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/triggerA.test 2009-06-05 18:03:47.000000000 +0100 @@ -0,0 +1,230 @@ +# 2008 February 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests issues relating to firing an INSTEAD OF trigger on a VIEW +# when one tries to UPDATE or DELETE from the view. Does the WHERE +# clause of the UPDATE or DELETE statement get passed down correctly +# into the query that manifests the view? +# +# Ticket #2938 +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable !trigger||!compound { + finish_test + return +} + +# Create two table containing some sample data +# +do_test triggerA-1.1 { + db eval { + CREATE TABLE t1(x INTEGER PRIMARY KEY, y TEXT UNIQUE); + CREATE TABLE t2(a INTEGER PRIMARY KEY, b INTEGER UNIQUE, c TEXT); + } + set i 1 + foreach word {one two three four five six seven eight nine ten} { + set j [expr {$i*100 + [string length $word]}] + db eval { + INSERT INTO t1 VALUES($i,$word); + INSERT INTO t2 VALUES(20-$i,$j,$word); + } + incr i + } + db eval { + SELECT count(*) FROM t1 UNION ALL SELECT count(*) FROM t2; + } +} {10 10} + +# Create views of various forms against one or both of the two tables. +# +do_test triggerA-1.2 { + db eval { + CREATE VIEW v1 AS SELECT y, x FROM t1; + SELECT * FROM v1 ORDER BY 1; + } +} {eight 8 five 5 four 4 nine 9 one 1 seven 7 six 6 ten 10 three 3 two 2} +do_test triggerA-1.3 { + db eval { + CREATE VIEW v2 AS SELECT x, y FROM t1 WHERE y GLOB '*e*'; + SELECT * FROM v2 ORDER BY 1; + } +} {1 one 3 three 5 five 7 seven 8 eight 9 nine 10 ten} +do_test triggerA-1.4 { + db eval { + CREATE VIEW v3 AS + SELECT CAST(x AS TEXT) AS c1 FROM t1 UNION SELECT y FROM t1; + SELECT * FROM v3 ORDER BY c1; + } +} {1 10 2 3 4 5 6 7 8 9 eight five four nine one seven six ten three two} +do_test triggerA-1.5 { + db eval { + CREATE VIEW v4 AS + SELECT CAST(x AS TEXT) AS c1 FROM t1 + UNION SELECT y FROM t1 WHERE x BETWEEN 3 and 5; + SELECT * FROM v4 ORDER BY 1; + } +} {1 10 2 3 4 5 6 7 8 9 five four three} +do_test triggerA-1.6 { + db eval { + CREATE VIEW v5 AS SELECT x, b FROM t1, t2 WHERE y=c; + SELECT * FROM v5; + } +} {1 103 2 203 3 305 4 404 5 504 6 603 7 705 8 805 9 904 10 1003} + +# Create INSTEAD OF triggers on the views. Run UPDATE and DELETE statements +# using those triggers. Verify correct operation. +# +do_test triggerA-2.1 { + db eval { + CREATE TABLE result2(a,b); + CREATE TRIGGER r1d INSTEAD OF DELETE ON v1 BEGIN + INSERT INTO result2(a,b) VALUES(old.y, old.x); + END; + DELETE FROM v1 WHERE x=5; + SELECT * FROM result2; + } +} {five 5} +do_test triggerA-2.2 { + db eval { + CREATE TABLE result4(a,b,c,d); + CREATE TRIGGER r1u INSTEAD OF UPDATE ON v1 BEGIN + INSERT INTO result4(a,b,c,d) VALUES(old.y, old.x, new.y, new.x); + END; + UPDATE v1 SET y=y||'-extra' WHERE x BETWEEN 3 AND 5; + SELECT * FROM result4 ORDER BY a; + } +} {five 5 five-extra 5 four 4 four-extra 4 three 3 three-extra 3} + + +do_test triggerA-2.3 { + db eval { + DELETE FROM result2; + CREATE TRIGGER r2d INSTEAD OF DELETE ON v2 BEGIN + INSERT INTO result2(a,b) VALUES(old.y, old.x); + END; + DELETE FROM v2 WHERE x=5; + SELECT * FROM result2; + } +} {five 5} +do_test triggerA-2.4 { + db eval { + DELETE FROM result4; + CREATE TRIGGER r2u INSTEAD OF UPDATE ON v2 BEGIN + INSERT INTO result4(a,b,c,d) VALUES(old.y, old.x, new.y, new.x); + END; + UPDATE v2 SET y=y||'-extra' WHERE x BETWEEN 3 AND 5; + SELECT * FROM result4 ORDER BY a; + } +} {five 5 five-extra 5 three 3 three-extra 3} + + +do_test triggerA-2.5 { + db eval { + CREATE TABLE result1(a); + CREATE TRIGGER r3d INSTEAD OF DELETE ON v3 BEGIN + INSERT INTO result1(a) VALUES(old.c1); + END; + DELETE FROM v3 WHERE c1 BETWEEN '8' AND 'eight'; + SELECT * FROM result1 ORDER BY a; + } +} {8 9 eight} +do_test triggerA-2.6 { + db eval { + DELETE FROM result2; + CREATE TRIGGER r3u INSTEAD OF UPDATE ON v3 BEGIN + INSERT INTO result2(a,b) VALUES(old.c1, new.c1); + END; + UPDATE v3 SET c1 = c1 || '-extra' WHERE c1 BETWEEN '8' and 'eight'; + SELECT * FROM result2 ORDER BY a; + } +} {8 8-extra 9 9-extra eight eight-extra} + + +do_test triggerA-2.7 { + db eval { + DELETE FROM result1; + CREATE TRIGGER r4d INSTEAD OF DELETE ON v4 BEGIN + INSERT INTO result1(a) VALUES(old.c1); + END; + DELETE FROM v4 WHERE c1 BETWEEN '8' AND 'eight'; + SELECT * FROM result1 ORDER BY a; + } +} {8 9} +do_test triggerA-2.8 { + db eval { + DELETE FROM result2; + CREATE TRIGGER r4u INSTEAD OF UPDATE ON v4 BEGIN + INSERT INTO result2(a,b) VALUES(old.c1, new.c1); + END; + UPDATE v4 SET c1 = c1 || '-extra' WHERE c1 BETWEEN '8' and 'eight'; + SELECT * FROM result2 ORDER BY a; + } +} {8 8-extra 9 9-extra} + + +do_test triggerA-2.9 { + db eval { + DELETE FROM result2; + CREATE TRIGGER r5d INSTEAD OF DELETE ON v5 BEGIN + INSERT INTO result2(a,b) VALUES(old.x, old.b); + END; + DELETE FROM v5 WHERE x=5; + SELECT * FROM result2; + } +} {5 504} +do_test triggerA-2.10 { + db eval { + DELETE FROM result4; + CREATE TRIGGER r5u INSTEAD OF UPDATE ON v5 BEGIN + INSERT INTO result4(a,b,c,d) VALUES(old.x, old.b, new.x, new.b); + END; + UPDATE v5 SET b = b+9900000 WHERE x BETWEEN 3 AND 5; + SELECT * FROM result4 ORDER BY a; + } +} {3 305 3 9900305 4 404 4 9900404 5 504 5 9900504} + +# Only run the reamining tests if memory debugging is turned on. +# +ifcapable !memdebug { + puts "Skipping triggerA malloc tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + +# Save a copy of the current database configuration. +# +db close +file delete -force test.db-triggerA +file copy test.db test.db-triggerA +sqlite3 db test.db + +# Run malloc tests on the INSTEAD OF trigger firing. +# +do_malloc_test triggerA-3 -tclprep { + db close + file delete -force test.db test.db-journal + file copy -force test.db-triggerA test.db + sqlite3 db test.db + sqlite3_extended_result_codes db 1 + db eval {SELECT * FROM v5; -- warm up the cache} +} -sqlbody { + DELETE FROM v5 WHERE x=5; + UPDATE v5 SET b=b+9900000 WHERE x BETWEEN 3 AND 5; +} + +# Clean up the saved database copy. +# +file delete -force test.db-triggerA + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/triggerB.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/triggerB.test --- sqlite3-3.4.2/test/triggerB.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/triggerB.test 2009-06-05 18:03:47.000000000 +0100 @@ -0,0 +1,153 @@ +# 2008 April 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice', here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. Specifically, +# it tests updating tables with constraints within a trigger. Ticket #3055. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +ifcapable {!trigger} { + finish_test + return +} + +# Create test tables with constraints. +# +do_test triggerB-1.1 { + execsql { + CREATE TABLE x(x INTEGER PRIMARY KEY, y INT NOT NULL); + INSERT INTO x(y) VALUES(1); + INSERT INTO x(y) VALUES(1); + CREATE TEMP VIEW vx AS SELECT x, y, 0 AS yy FROM x; + CREATE TEMP TRIGGER tx INSTEAD OF UPDATE OF y ON vx + BEGIN + UPDATE x SET y = new.y WHERE x = new.x; + END; + SELECT * FROM vx; + } +} {1 1 0 2 1 0} +do_test triggerB-1.2 { + execsql { + UPDATE vx SET y = yy; + SELECT * FROM vx; + } +} {1 0 0 2 0 0} + +# Added 2008-08-22: +# +# Name resolution within triggers. +# +do_test triggerB-2.1 { + catchsql { + CREATE TRIGGER ty AFTER INSERT ON x BEGIN + SELECT wen.x; -- Unrecognized name + END; + INSERT INTO x VALUES(1,2); + } +} {1 {no such column: wen.x}} +do_test triggerB-2.2 { + catchsql { + CREATE TRIGGER tz AFTER UPDATE ON x BEGIN + SELECT dlo.x; -- Unrecognized name + END; + UPDATE x SET y=y+1; + } +} {1 {no such column: dlo.x}} + +do_test triggerB-2.3 { + execsql { + CREATE TABLE t2(a INTEGER PRIMARY KEY, b); + INSERT INTO t2 VALUES(1,2); + CREATE TABLE changes(x,y); + CREATE TRIGGER r1t2 AFTER UPDATE ON t2 BEGIN + INSERT INTO changes VALUES(new.a, new.b); + END; + } + execsql { + UPDATE t2 SET a=a+10; + SELECT * FROM changes; + } +} {11 2} +do_test triggerB-2.4 { + execsql { + CREATE TRIGGER r2t2 AFTER DELETE ON t2 BEGIN + INSERT INTO changes VALUES(old.a, old.c); + END; + } + catchsql { + DELETE FROM t2; + } +} {1 {no such column: old.c}} + +# Triggers maintain a mask of columns from the invoking table that are +# used in the trigger body as NEW.column or OLD.column. That mask is then +# used to reduce the amount of information that needs to be loaded into +# the NEW and OLD pseudo-tables at run-time. +# +# These tests cases check the logic for when there are many columns - more +# than will fit in a bitmask. +# +do_test triggerB-3.1 { + execsql { + CREATE TABLE t3( + c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, + c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, + c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, + c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, + c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, + c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, + c60, c61, c62, c63, c64, c65 + ); + CREATE TABLE t3_changes(colnum, oldval, newval); + INSERT INTO t3 VALUES( + 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', + 'a10','a11','a12','a13','a14','a15','a16','a17','a18','a19', + 'a20','a21','a22','a23','a24','a25','a26','a27','a28','a29', + 'a30','a31','a32','a33','a34','a35','a36','a37','a38','a39', + 'a40','a41','a42','a43','a44','a45','a46','a47','a48','a49', + 'a50','a51','a52','a53','a54','a55','a56','a57','a58','a59', + 'a60','a61','a62','a63','a64','a65' + ); + } + for {set i 0} {$i<=65} {incr i} { + set sql [subst { + CREATE TRIGGER t3c$i AFTER UPDATE ON t3 + WHEN old.c$i!=new.c$i BEGIN + INSERT INTO t3_changes VALUES($i, old.c$i, new.c$i); + END + }] + db eval $sql + } + execsql { + SELECT * FROM t3_changes + } +} {} +for {set i 0} {$i<=64} {incr i} { + do_test triggerB-3.2.$i.1 [subst { + execsql { + UPDATE t3 SET c$i='b$i'; + SELECT * FROM t3_changes ORDER BY rowid DESC LIMIT 1; + } + }] [subst {$i a$i b$i}] + do_test triggerB-3.2.$i.2 [subst { + execsql { + SELECT count(*) FROM t3_changes + } + }] [expr {$i+1}] + do_test triggerB-3.2.$i.2 [subst { + execsql { + SELECT * FROM t3_changes WHERE colnum=$i + } + }] [subst {$i a$i b$i}] +} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/types3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/types3.test --- sqlite3-3.4.2/test/types3.test 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/test/types3.test 2009-06-05 18:03:47.000000000 +0100 @@ -12,7 +12,7 @@ # of this file is testing the interaction of SQLite manifest types # with Tcl dual-representations. # -# $Id: types3.test,v 1.7 2007/06/26 22:42:56 drh Exp $ +# $Id: types3.test,v 1.8 2008/04/28 13:02:58 drh Exp $ # set testdir [file dirname $argv0] @@ -74,10 +74,11 @@ set V [db one {SELECT 123}] tcl_variable_type V } int +set Vx [expr {1+wide(123456789123456)}] do_test types3-2.3 { set V [db one {SELECT 1234567890123456}] tcl_variable_type V -} wideInt +} [tcl_variable_type Vx] do_test types3-2.4.1 { set V [db one {SELECT 1234567890123456.1}] tcl_variable_type V diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/unique.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/unique.test --- sqlite3-3.4.2/test/unique.test 2007-03-27 15:43:07.000000000 +0100 +++ sqlite3-3.6.16/test/unique.test 2009-06-25 12:24:40.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the CREATE UNIQUE INDEX statement, # and primary keys, and the UNIQUE constraint on table columns # -# $Id: unique.test,v 1.8 2005/06/24 03:53:06 drh Exp $ +# $Id: unique.test,v 1.9 2009/05/02 15:46:47 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -248,6 +248,6 @@ catchsql { INSERT INTO t5 VALUES(1,2,3,4,5,6); } -} {1 {columns first_column_with_long_name, second_column_with_long_name, third_column_with_long_name, fourth_column_with_long_name, fifth_column_with_long_name, ... are not unique}} +} {1 {columns first_column_with_long_name, second_column_with_long_name, third_column_with_long_name, fourth_column_with_long_name, fifth_column_with_long_name, sixth_column_with_long_name are not unique}} finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/update.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/update.test --- sqlite3-3.4.2/test/update.test 2007-03-27 15:43:07.000000000 +0100 +++ sqlite3-3.6.16/test/update.test 2009-06-05 18:03:48.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the UPDATE statement. # -# $Id: update.test,v 1.17 2005/01/21 03:12:16 danielk1977 Exp $ +# $Id: update.test,v 1.19 2008/04/10 18:44:36 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -36,7 +36,7 @@ do_test update-3.1 { execsql {CREATE TABLE test1(f1 int,f2 int)} for {set i 1} {$i<=10} {incr i} { - set sql "INSERT INTO test1 VALUES($i,[expr {int(pow(2,$i))}])" + set sql "INSERT INTO test1 VALUES($i,[expr {1<<$i}])" execsql $sql } execsql {SELECT * FROM test1 ORDER BY f1} @@ -62,6 +62,18 @@ do_test update-3.5 { execsql {UPDATE test1 SET f2=f2*3} } {} +do_test update-3.5.1 { + db changes +} {10} + +# verify that SELECT does not reset the change counter +do_test update-3.5.2 { + db eval {SELECT count(*) FROM test1} +} {10} +do_test update-3.5.3 { + db changes +} {10} + do_test update-3.6 { execsql {SELECT * FROM test1 ORDER BY f1} } {1 6 2 12 3 24 4 48 5 96 6 192 7 384 8 768 9 1536 10 3072} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/utf16align.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/utf16align.test --- sqlite3-3.4.2/test/utf16align.test 2006-02-16 18:16:38.000000000 +0000 +++ sqlite3-3.6.16/test/utf16align.test 2009-06-12 03:37:59.000000000 +0100 @@ -14,7 +14,7 @@ # that all strings passed to that function are aligned on an even # byte boundary. # -# $Id: utf16align.test,v 1.1 2006/02/16 18:16:38 drh Exp $ +# $Id: utf16align.test,v 1.2 2008/11/07 03:29:34 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -81,4 +81,15 @@ } 0 integrity_check utf16align-1.4 +# ticket #3482 +# +db close +sqlite3 db :memory: +do_test utf16align-2.1 { + db eval { + PRAGMA encoding=UTF16be; + SELECT hex(ltrim(x'6efcda')); + } +} {6EFC} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/utf16.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/utf16.test --- sqlite3-3.4.2/test/utf16.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/utf16.test 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -# 2001 September 15 -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -# -#*********************************************************************** -# This file runs all tests. -# -# $Id: utf16.test,v 1.6 2007/01/04 16:37:04 drh Exp $ - -set testdir [file dirname $argv0] -source $testdir/tester.tcl -rename finish_test really_finish_test2 -proc finish_test {} {} -set ISQUICK 1 - -if { [llength $argv]>0 } { - set FILES $argv - set argv [list] -} else { - set F { - alter.test alter3.test - auth.test bind.test blob.test capi2.test capi3.test collate1.test - collate2.test collate3.test collate4.test collate5.test collate6.test - conflict.test date.test delete.test expr.test fkey1.test func.test - hook.test index.test insert2.test insert.test interrupt.test in.test - intpkey.test ioerr.test join2.test join.test lastinsert.test - laststmtchanges.test limit.test lock2.test lock.test main.test - memdb.test minmax.test misc1.test misc2.test misc3.test notnull.test - null.test progress.test quote.test rowid.test select1.test select2.test - select3.test select4.test select5.test select6.test sort.test - subselect.test tableapi.test table.test temptable.test - trace.test trigger1.test trigger2.test trigger3.test - trigger4.test types2.test types.test unique.test update.test - vacuum.test view.test where.test - } - foreach f $F {lappend FILES $testdir/$f} -} - -rename sqlite3 real_sqlite3 -proc sqlite3 {args} { - set r [eval "real_sqlite3 $args"] - if { [llength $args] == 2 } { - [lindex $args 0] eval {pragma encoding = 'UTF-16'} - } - set r -} - -rename do_test really_do_test -proc do_test {args} { - set sc [concat really_do_test "utf16-[lindex $args 0]" [lrange $args 1 end]] - eval $sc -} - -foreach f $FILES { - source $f - catch {db close} - if {$sqlite_open_file_count>0} { - puts "$tail did not close all files: $sqlite_open_file_count" - incr nErr - lappend ::failList $tail - } -} - -rename sqlite3 "" -rename real_sqlite3 sqlite3 -rename finish_test "" -rename really_finish_test2 finish_test -rename do_test "" -rename really_do_test do_test -finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vacuum2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vacuum2.test --- sqlite3-3.4.2/test/vacuum2.test 2007-07-19 17:35:17.000000000 +0100 +++ sqlite3-3.6.16/test/vacuum2.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the VACUUM statement. # -# $Id: vacuum2.test,v 1.3 2007/07/19 16:35:17 drh Exp $ +# $Id: vacuum2.test,v 1.10 2009/02/18 20:31:18 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -57,4 +57,129 @@ hexio_get_int [hexio_read test.db 24 4] } [expr {[hexio_get_int [hexio_read test.db 24 4]]+1}] +############################################################################ +# Verify that we can use the auto_vacuum pragma to request a new +# autovacuum setting, do a VACUUM, and the new setting takes effect. +# Make sure this happens correctly even if there are multiple open +# connections to the same database file. +# +sqlite3 db2 test.db +set pageSize [db eval {pragma page_size}] + +# We are currently not autovacuuming so the database should be 3 pages +# in size. 1 page for each of sqlite_master, t1, and t2. +# +do_test vacuum2-3.1 { + execsql { + INSERT INTO t1 VALUES('hello'); + INSERT INTO t2 VALUES('out there'); + } + expr {[file size test.db]/$pageSize} +} {3} +set cksum [cksum] +do_test vacuum2-3.2 { + cksum db2 +} $cksum + +# Convert the database to an autovacuumed database. +ifcapable autovacuum { + do_test vacuum2-3.3 { + execsql { + PRAGMA auto_vacuum=FULL; + VACUUM; + } + expr {[file size test.db]/$pageSize} + } {4} +} +do_test vacuum2-3.4 { + cksum db2 +} $cksum +do_test vacuum2-3.5 { + cksum +} $cksum +do_test vacuum2-3.6 { + execsql {PRAGMA integrity_check} db2 +} {ok} +do_test vacuum2-3.7 { + execsql {PRAGMA integrity_check} db +} {ok} + +# Convert the database back to a non-autovacuumed database. +do_test vacuum2-3.13 { + execsql { + PRAGMA auto_vacuum=NONE; + VACUUM; + } + expr {[file size test.db]/$pageSize} +} {3} +do_test vacuum2-3.14 { + cksum db2 +} $cksum +do_test vacuum2-3.15 { + cksum +} $cksum +do_test vacuum2-3.16 { + execsql {PRAGMA integrity_check} db2 +} {ok} +do_test vacuum2-3.17 { + execsql {PRAGMA integrity_check} db +} {ok} + +db2 close + +ifcapable autovacuum { + do_test vacuum2-4.1 { + db close + file delete -force test.db + sqlite3 db test.db + execsql { + pragma auto_vacuum=1; + create table t(a, b); + insert into t values(1, 2); + insert into t values(1, 2); + pragma auto_vacuum=0; + vacuum; + pragma auto_vacuum; + } + } {0} + do_test vacuum2-4.2 { + execsql { + pragma auto_vacuum=1; + vacuum; + pragma auto_vacuum; + } + } {1} + do_test vacuum2-4.3 { + execsql { + pragma integrity_check + } + } {ok} + do_test vacuum2-4.4 { + db close + sqlite3 db test.db + execsql { + pragma auto_vacuum; + } + } {1} + do_test vacuum2-4.5 { # Ticket #3663 + execsql { + pragma auto_vacuum=2; + vacuum; + pragma auto_vacuum; + } + } {2} + do_test vacuum2-4.6 { + execsql { + pragma integrity_check + } + } {ok} + do_test vacuum2-4.7 { + db close + sqlite3 db test.db + execsql { + pragma auto_vacuum; + } + } {2} +} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vacuum3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vacuum3.test --- sqlite3-3.4.2/test/vacuum3.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/vacuum3.test 2009-06-05 18:03:48.000000000 +0100 @@ -0,0 +1,340 @@ +# 2007 March 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is changing the database page size using a +# VACUUM statement. +# +# $Id: vacuum3.test,v 1.9 2008/08/26 21:07:27 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# If the VACUUM statement is disabled in the current build, skip all +# the tests in this file. +# +ifcapable !vacuum { + finish_test + return +} + + +#------------------------------------------------------------------- +# Test cases vacuum3-1.* convert a simple 2-page database between a +# few different page sizes. +# +do_test vacuum3-1.1 { + execsql { + PRAGMA auto_vacuum=OFF; + PRAGMA page_size = 1024; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + } +} {} +do_test vacuum3-1.2 { + execsql { PRAGMA page_size } +} {1024} +do_test vacuum3-1.3 { + file size test.db +} {2048} + +set I 4 +foreach {request actual database} [list \ + 2048 2048 4096 \ + 1024 1024 2048 \ + 1170 1024 2048 \ + 256 1024 2048 \ + 512 512 1024 \ + 4096 4096 8192 \ + 1024 1024 2048 \ +] { + do_test vacuum3-1.$I.1 { + execsql " + PRAGMA page_size = $request; + VACUUM; + " + execsql { PRAGMA page_size } + } $actual + do_test vacuum3-1.$I.2 { + file size test.db + } $database + do_test vacuum3-1.$I.3 { + execsql { SELECT * FROM t1 } + } {1 2 3} + integrity_check vacuum3-1.$I.4 + + incr I +} + +#------------------------------------------------------------------- +# Test cases vacuum3-2.* convert a simple 3-page database between a +# few different page sizes. +# +do_test vacuum3-2.1 { + execsql { + PRAGMA page_size = 1024; + VACUUM; + ALTER TABLE t1 ADD COLUMN d; + UPDATE t1 SET d = randomblob(1000); + } + file size test.db +} {3072} +do_test vacuum3-2.2 { + execsql { PRAGMA page_size } +} {1024} +do_test vacuum3-2.3 { + set blob [db one {select d from t1}] + string length $blob +} {1000} + +set I 4 +foreach {request actual database} [list \ + 2048 2048 4096 \ + 1024 1024 3072 \ + 1170 1024 3072 \ + 256 1024 3072 \ + 512 512 2048 \ + 4096 4096 8192 \ + 1024 1024 3072 \ +] { + do_test vacuum3-2.$I.1 { + execsql " + PRAGMA page_size = $request; + VACUUM; + " + execsql { PRAGMA page_size } + } $actual + do_test vacuum3-2.$I.2 { + file size test.db + } $database + do_test vacuum3-2.$I.3 { + execsql { SELECT * FROM t1 } + } [list 1 2 3 $blob] + integrity_check vacuum3-1.$I.4 + + incr I +} + +#------------------------------------------------------------------- +# Test cases vacuum3-3.* converts a database large enough to include +# the locking page (in a test environment) between few different +# page sizes. +# +proc signature {} { + return [db eval {SELECT count(*), md5sum(a), md5sum(b), md5sum(c) FROM abc}] +} +do_test vacuum3-3.1 { + execsql " + PRAGMA page_size = 1024; + BEGIN; + CREATE TABLE abc(a PRIMARY KEY, b, c); + INSERT INTO abc VALUES(randomblob(100), randomblob(200), randomblob(1000)); + INSERT INTO abc + SELECT randomblob(1000), randomblob(200), randomblob(100) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(25), randomblob(45), randomblob(9456) + FROM abc; + INSERT INTO abc + SELECT randomblob(100), randomblob(200), randomblob(1000) + FROM abc; + INSERT INTO abc + SELECT randomblob(25), randomblob(45), randomblob(9456) + FROM abc; + COMMIT; + " +} {} +do_test vacuum3-3.2 { + execsql { PRAGMA page_size } +} {1024} + +set ::sig [signature] + +set I 3 +foreach {request actual} [list \ + 2048 2048 \ + 1024 1024 \ + 1170 1024 \ + 256 1024 \ + 512 512 \ + 4096 4096 \ + 1024 1024 \ +] { + do_test vacuum3-3.$I.1 { + execsql " + PRAGMA page_size = $request; + VACUUM; + " + execsql { PRAGMA page_size } + } $actual + do_test vacuum3-3.$I.2 { + signature + } $::sig + integrity_check vacuum3-3.$I.3 + + incr I +} + +do_test vacuum3-4.1 { + db close + file delete test.db + sqlite3 db test.db + execsql { + PRAGMA page_size=1024; + CREATE TABLE abc(a, b, c); + INSERT INTO abc VALUES(1, 2, 3); + INSERT INTO abc VALUES(4, 5, 6); + } + execsql { SELECT * FROM abc } +} {1 2 3 4 5 6} +do_test vacuum3-4.2 { + sqlite3 db2 test.db + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} +do_test vacuum3-4.3 { + execsql { + PRAGMA page_size = 2048; + VACUUM; + } + execsql { SELECT * FROM abc } +} {1 2 3 4 5 6} +do_test vacuum3-4.4 { + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} +do_test vacuum3-4.5 { + execsql { + PRAGMA page_size=16384; + VACUUM; + } db2 + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} +do_test vacuum3-4.6 { + execsql { + PRAGMA page_size=1024; + VACUUM; + } + execsql { SELECT * FROM abc } db2 +} {1 2 3 4 5 6} + +# Unable to change the page-size of an in-memory using vacuum. +db2 close +sqlite3 db2 :memory: +do_test vacuum3-5.1 { + db2 eval { + CREATE TABLE t1(x); + INSERT INTO t1 VALUES(1234); + PRAGMA page_size=4096; + VACUUM; + SELECT * FROM t1; + } +} {1234} +do_test vacuum3-5.2 { + db2 eval { + PRAGMA page_size + } +} {1024} + +set create_database_sql { + BEGIN; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, randstr(50,50), randstr(50,50)); + INSERT INTO t1 SELECT a+2, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+4, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+8, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+16, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+32, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+64, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+128, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 VALUES(1, randstr(600,600), randstr(600,600)); + CREATE TABLE t2 AS SELECT * FROM t1; + CREATE TABLE t3 AS SELECT * FROM t1; + COMMIT; + DROP TABLE t2; +} + +do_ioerr_test vacuum3-ioerr-1 -cksum true -sqlprep " + PRAGMA page_size = 1024; + $create_database_sql +" -sqlbody { + PRAGMA page_size = 4096; + VACUUM; +} +do_ioerr_test vacuum3-ioerr-2 -cksum true -sqlprep " + PRAGMA page_size = 2048; + $create_database_sql +" -sqlbody { + PRAGMA page_size = 512; + VACUUM; +} + +ifcapable autovacuum { + do_ioerr_test vacuum3-ioerr-3 -cksum true -sqlprep " + PRAGMA auto_vacuum = 0; + $create_database_sql + " -sqlbody { + PRAGMA auto_vacuum = 1; + VACUUM; + } + do_ioerr_test vacuum3-ioerr-4 -cksum true -sqlprep " + PRAGMA auto_vacuum = 1; + $create_database_sql + " -sqlbody { + PRAGMA auto_vacuum = 0; + VACUUM; + } +} + +source $testdir/malloc_common.tcl +if {$MEMDEBUG} { + do_malloc_test vacuum3-malloc-1 -sqlprep { + PRAGMA page_size = 2048; + BEGIN; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, randstr(50,50), randstr(50,50)); + INSERT INTO t1 SELECT a+2, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+4, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+8, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+16, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+32, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+64, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 SELECT a+128, b||'-'||rowid, c||'-'||rowid FROM t1; + INSERT INTO t1 VALUES(1, randstr(600,600), randstr(600,600)); + CREATE TABLE t2 AS SELECT * FROM t1; + CREATE TABLE t3 AS SELECT * FROM t1; + COMMIT; + DROP TABLE t2; + } -sqlbody { + PRAGMA page_size = 512; + VACUUM; + } + do_malloc_test vacuum3-malloc-2 -sqlprep { + PRAGMA encoding=UTF16; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + CREATE TABLE t2(x,y,z); + INSERT INTO t2 SELECT * FROM t1; + } -sqlbody { + VACUUM; + } +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vacuum.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vacuum.test --- sqlite3-3.4.2/test/vacuum.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/vacuum.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the VACUUM statement. # -# $Id: vacuum.test,v 1.38 2006/10/04 11:55:50 drh Exp $ +# $Id: vacuum.test,v 1.43 2009/01/31 14:54:07 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -20,36 +20,17 @@ # the tests in this file. # ifcapable {!vacuum} { + omit_test vacuum.test {Compiled with SQLITE_OMIT_VACUUM} finish_test return } if $AUTOVACUUM { + omit_test vacuum.test {Auto-vacuum is enabled} finish_test return } set fcnt 1 -proc cksum {{db db}} { - set sql "SELECT name, type, sql FROM sqlite_master ORDER BY name, type" - set txt [$db eval $sql]\n - set sql "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name" - foreach tbl [$db eval $sql] { - append txt [$db eval "SELECT * FROM $tbl"]\n - } - foreach prag {default_cache_size} { - append txt $prag-[$db eval "PRAGMA $prag"]\n - } - if 0 { - global fcnt - set fd [open dump$fcnt.txt w] - puts -nonewline $fd $txt - close $fd - incr fcnt - } - set cksum [string length $txt]-[md5 $txt] - # puts $cksum-[file size test.db] - return $cksum -} do_test vacuum-1.1 { execsql { BEGIN; @@ -121,14 +102,18 @@ } {1} } ifcapable vacuum { - do_test vacuum-2.1 { + do_test vacuum-2.1.1 { catchsql { BEGIN; VACUUM; - COMMIT; } } {1 {cannot VACUUM from within a transaction}} - catch {db eval COMMIT} + do_test vacuum-2.1.2 { + sqlite3_get_autocommit db + } {0} + do_test vacuum-2.1.3 { + db eval {COMMIT} + } {} } do_test vacuum-2.2 { sqlite3 db2 test.db @@ -302,7 +287,14 @@ CREATE TABLE t1(t); VACUUM; } db2 -} {} + execsql { + CREATE TABLE t2(t); + CREATE TABLE t3(t); + DROP TABLE t2; + VACUUM; + pragma integrity_check; + } db2 +} {ok} db2 close # Ticket #873. VACUUM a database that has ' in its name. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/veryquick.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/veryquick.test --- sqlite3-3.4.2/test/veryquick.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/veryquick.test 2009-06-05 18:03:48.000000000 +0100 @@ -0,0 +1,15 @@ +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file runs all the tests run by quick.test except for those related +# to malloc or IO error simulation. With these tests omitted, the overall +# run time is reduced by about 75%. +# +# $Id: veryquick.test,v 1.9 2008/07/12 14:52:21 drh Exp $ + +set testdir [file dirname $argv0] +set ISVERYQUICK 1 +source $testdir/quick.test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/view.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/view.test --- sqlite3-3.4.2/test/view.test 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/test/view.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing VIEW statements. # -# $Id: view.test,v 1.33 2006/09/11 23:45:50 drh Exp $ +# $Id: view.test,v 1.39 2008/12/14 14:45:21 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -139,13 +139,19 @@ SELECT * FROM v2 LIMIT 1 } } {x 7 a 8 b 9 c 10} -do_test view-3.3 { +do_test view-3.3.1 { execsql2 { DROP VIEW v1; CREATE VIEW v1 AS SELECT a AS 'xyz', b+c AS 'pqr', c-b FROM t1; SELECT * FROM v1 LIMIT 1 } } {xyz 2 pqr 7 c-b 1} +do_test view-3.3.2 { + execsql2 { + CREATE VIEW v1b AS SELECT t1.a, b+c, t1.c FROM t1; + SELECT * FROM v1b LIMIT 1 + } +} {a 2 b+c 7 c 4} ifcapable compound { do_test view-3.4 { @@ -450,14 +456,16 @@ } } {1 {parameters are not allowed in views}} -do_test view-13.1 { - file delete -force test2.db - catchsql { - ATTACH 'test2.db' AS two; - CREATE TABLE two.t2(x,y); - CREATE VIEW v13 AS SELECT y FROM two.t2; - } -} {1 {view v13 cannot reference objects in database two}} +ifcapable attach { + do_test view-13.1 { + file delete -force test2.db + catchsql { + ATTACH 'test2.db' AS two; + CREATE TABLE two.t2(x,y); + CREATE VIEW v13 AS SELECT y FROM two.t2; + } + } {1 {view v13 cannot reference objects in database two}} +} # Ticket #1658 # @@ -498,4 +506,74 @@ } } {0 {}} +# correct error message when attempting to drop a view that does not +# exist. +# +do_test view-17.1 { + catchsql { + DROP VIEW nosuchview + } +} {1 {no such view: nosuchview}} +do_test view-17.2 { + catchsql { + DROP VIEW main.nosuchview + } +} {1 {no such view: main.nosuchview}} + +do_test view-18.1 { + execsql { + DROP VIEW t1; + DROP TABLE t1; + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + + CREATE VIEW vv1 AS SELECT * FROM t1; + CREATE VIEW vv2 AS SELECT * FROM vv1; + CREATE VIEW vv3 AS SELECT * FROM vv2; + CREATE VIEW vv4 AS SELECT * FROM vv3; + CREATE VIEW vv5 AS SELECT * FROM vv4; + + SELECT * FROM vv5; + } +} {1 2 3 4 5 6} + +# Ticket #3308 +# Make sure "rowid" columns in a view are named correctly. +# +do_test view-19.1 { + execsql { + CREATE VIEW v3308a AS SELECT rowid, * FROM t1; + } + execsql2 { + SELECT * FROM v3308a + } +} {rowid 1 a 1 b 2 c 3 rowid 2 a 4 b 5 c 6} +do_test view-19.2 { + execsql { + CREATE VIEW v3308b AS SELECT t1.rowid, t1.a, t1.b+t1.c FROM t1; + } + execsql2 { + SELECT * FROM v3308b + } +} {rowid 1 a 1 t1.b+t1.c 5 rowid 2 a 4 t1.b+t1.c 11} +do_test view-19.3 { + execsql { + CREATE VIEW v3308c AS SELECT t1.oid, A, t1.b+t1.c AS x FROM t1; + } + execsql2 { + SELECT * FROM v3308c + } +} {rowid 1 a 1 x 5 rowid 2 a 4 x 11} + +# Ticket #3539 had this crashing (see commit [5940]). +do_test view-20.1 { + execsql { + DROP TABLE IF EXISTS t1; + DROP VIEW IF EXISTS v1; + CREATE TABLE t1(c1); + CREATE VIEW v1 AS SELECT c1 FROM (SELECT t1.c1 FROM t1); + } +} {} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab1.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab1.test --- sqlite3-3.4.2/test/vtab1.test 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/test/vtab1.test 2009-06-05 18:03:48.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is creating and dropping virtual tables. # -# $Id: vtab1.test,v 1.43 2007/06/27 15:53:35 danielk1977 Exp $ +# $Id: vtab1.test,v 1.57 2008/08/01 17:51:47 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -41,6 +41,8 @@ # * How to test the sqlite3_index_constraint_usage.omit field? # * vtab1-5.* # +# vtab1-14.*: Test 'IN' constraints - i.e. "SELECT * FROM t1 WHERE id IN(...)" +# #---------------------------------------------------------------------- @@ -50,6 +52,9 @@ # We cannot create a virtual table if the module has not been registered. # do_test vtab1-1.1 { + explain { + CREATE VIRTUAL TABLE t1 USING echo; + } catchsql { CREATE VIRTUAL TABLE t1 USING echo; } @@ -179,10 +184,24 @@ } {1 {no such module: echo}} register_echo_module [sqlite3_connection_pointer db] -do_test vtab1-1.X { +register_echo_module [sqlite3_connection_pointer db] + +# Test an error message returned from a v-table constructor. +# +do_test vtab1-1.16 { execsql { DROP TABLE techo; + CREATE TABLE logmsg(log); + } + catchsql { + CREATE VIRTUAL TABLE techo USING echo(treal, logmsg); + } +} {1 {table 'logmsg' already exists}} + +do_test vtab1-1.17 { + execsql { DROP TABLE treal; + DROP TABLE logmsg; SELECT sql FROM sqlite_master; } } {} @@ -416,6 +435,8 @@ set echo_module } [list xBestIndex {SELECT rowid, * FROM 'treal'} \ xFilter {SELECT rowid, * FROM 'treal'}] +ifcapable subquery { +# The echo module uses a subquery internally to implement the MATCH operator. do_test vtab1-3.14 { set echo_module "" execsql { @@ -429,6 +450,7 @@ xFilter \ {SELECT rowid, * FROM 'treal' WHERE b LIKE (SELECT '%'||?||'%')} \ string ] +}; #ifcapable subquery #---------------------------------------------------------------------- # Test case vtab1-3 test table scans and the echo module's @@ -591,40 +613,116 @@ SELECT name FROM sqlite_master WHERE type = 'table'; } } {treal techo} -do_test vtab1-6-3 { +do_test vtab1-6-3.1.1 { execsql { + PRAGMA count_changes=ON; INSERT INTO techo VALUES(1, 2, 3); + } +} {1} +do_test vtab1-6-3.1.2 { + db changes +} {1} +do_test vtab1-6-3.2 { + execsql { SELECT * FROM techo; } } {1 2 3} -do_test vtab1-6-4 { +do_test vtab1-6-4.1 { execsql { UPDATE techo SET a = 5; + } + db changes +} {1} +do_test vtab1-6-4.2 { + execsql { + SELECT * FROM techo; + } +} {5 2 3} +do_test vtab1-6-4.3 { + execsql { + UPDATE techo SET a=6 WHERE a<0; + } + db changes +} {0} +do_test vtab1-6-4.4 { + execsql { SELECT * FROM techo; } } {5 2 3} -do_test vtab1-6-5 { +do_test vtab1-6-5.1 { execsql { UPDATE techo set a = a||b||c; + } + db changes +} {1} +do_test vtab1-6-5.2 { + execsql { SELECT * FROM techo; } } {523 2 3} -do_test vtab1-6-6 { +do_test vtab1-6-6.1 { execsql { UPDATE techo set rowid = 10; + } + db changes +} {1} +do_test vtab1-6-6.2 { + execsql { SELECT rowid FROM techo; } } {10} -do_test vtab1-6-7 { +do_test vtab1-6-7.1.1 { + execsql { + INSERT INTO techo VALUES(11,12,13); + } +} {1} +do_test vtab1-6-7.1.2 { + db changes +} {1} +do_test vtab1-6-7.2 { + execsql { + SELECT * FROM techo ORDER BY a; + } +} {11 12 13 523 2 3} +do_test vtab1-6-7.3 { + execsql { + UPDATE techo SET b=b+1000 + } + db changes +} {2} +do_test vtab1-6-7.4 { + execsql { + SELECT * FROM techo ORDER BY a; + } +} {11 1012 13 523 1002 3} + + +do_test vtab1-6-8.1 { + execsql { + DELETE FROM techo WHERE a=5; + } + db changes +} {0} +do_test vtab1-6-8.2 { + execsql { + SELECT * FROM techo ORDER BY a; + } +} {11 1012 13 523 1002 3} +do_test vtab1-6-8.3 { execsql { DELETE FROM techo; - SELECT * FROM techo; + } + db changes +} {2} +do_test vtab1-6-8.4 { + execsql { + SELECT * FROM techo ORDER BY a; } } {} - +execsql {PRAGMA count_changes=OFF} file delete -force test2.db file delete -force test2.db-journal @@ -748,20 +846,24 @@ } } {} -do_test vtab1.8-1 { - set echo_module "" - execsql { - ATTACH 'test2.db' AS aux; - CREATE VIRTUAL TABLE aux.e2 USING echo(real_abc); - } - set echo_module -} [list xCreate echo aux e2 real_abc \ - xSync echo(real_abc) \ - xCommit echo(real_abc) \ -] +ifcapable attach { + do_test vtab1.8-1 { + set echo_module "" + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.e2 USING echo(real_abc); + } + set echo_module + } [list xCreate echo aux e2 real_abc \ + xSync echo(real_abc) \ + xCommit echo(real_abc) \ + ] +} do_test vtab1.8-2 { - execsql { + catchsql { DROP TABLE aux.e2; + } + execsql { DROP TABLE treal; DROP TABLE techo; DROP TABLE echo_abc; @@ -908,18 +1010,19 @@ # First test outside of a transaction. do_test vtab1.12-2 { catchsql { INSERT INTO echo_c SELECT * FROM b; } -} {1 {constraint failed}} +} {1 {echo-vtab-error: column a is not unique}} +do_test vtab1.12-2.1 { + sqlite3_errmsg db +} {echo-vtab-error: column a is not unique} do_test vtab1.12-3 { execsql { SELECT * FROM c } } {3 G H} -breakpoint - # Now the real test - wrapped in a transaction. do_test vtab1.12-4 { execsql {BEGIN} catchsql { INSERT INTO echo_c SELECT * FROM b; } -} {1 {constraint failed}} +} {1 {echo-vtab-error: column a is not unique}} do_test vtab1.12-5 { execsql { SELECT * FROM c } } {3 G H} @@ -928,5 +1031,135 @@ execsql { SELECT * FROM c } } {3 G H} +# At one point (ticket #2759), a WHERE clause of the form " IS NULL" +# on a virtual table was causing an assert() to fail in the compiler. +# +# "IS NULL" clauses should not be passed through to the virtual table +# implementation. They are handled by SQLite after the vtab returns its +# data. +# +do_test vtab1.13-1 { + execsql { + SELECT * FROM echo_c WHERE a IS NULL + } +} {} +do_test vtab1.13-2 { + execsql { + INSERT INTO c VALUES(NULL, 15, 16); + SELECT * FROM echo_c WHERE a IS NULL + } +} {{} 15 16} +do_test vtab1.13-3 { + execsql { + INSERT INTO c VALUES(15, NULL, 16); + SELECT * FROM echo_c WHERE b IS NULL + } +} {15 {} 16} +do_test vtab1.13-3 { + execsql { + SELECT * FROM echo_c WHERE b IS NULL AND a = 15; + } +} {15 {} 16} + + +do_test vtab1-14.1 { + execsql { DELETE FROM c } + set echo_module "" + execsql { SELECT * FROM echo_c WHERE rowid IN (1, 2, 3) } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'c'} xFilter {SELECT rowid, * FROM 'c'}] + +do_test vtab1-14.2 { + set echo_module "" + execsql { SELECT * FROM echo_c WHERE rowid = 1 } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'c' WHERE rowid = ?} xFilter {SELECT rowid, * FROM 'c' WHERE rowid = ?} 1] + +do_test vtab1-14.3 { + set echo_module "" + execsql { SELECT * FROM echo_c WHERE a = 1 } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'c' WHERE a = ?} xFilter {SELECT rowid, * FROM 'c' WHERE a = ?} 1] + +do_test vtab1-14.4 { + set echo_module "" + execsql { SELECT * FROM echo_c WHERE a IN (1, 2) } + set echo_module +} [list xBestIndex {SELECT rowid, * FROM 'c'} xFilter {SELECT rowid, * FROM 'c'}] + +do_test vtab1-15.1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE VIRTUAL TABLE echo_t1 USING echo(t1); + } +} {} +do_test vtab1-15.2 { + execsql { + INSERT INTO echo_t1(rowid) VALUES(45); + SELECT rowid, * FROM echo_t1; + } +} {45 {} {} {}} +do_test vtab1-15.3 { + execsql { + INSERT INTO echo_t1(rowid) VALUES(NULL); + SELECT rowid, * FROM echo_t1; + } +} {45 {} {} {} 46 {} {} {}} +do_test vtab1-15.4 { + catchsql { + INSERT INTO echo_t1(rowid) VALUES('new rowid'); + } +} {1 {datatype mismatch}} + +# The following tests - vtab1-16.* - are designed to test that setting +# sqlite3_vtab.zErrMsg variable can be used by the vtab interface to +# return an error message to the user. +# +do_test vtab1-16.1 { + execsql { + CREATE TABLE t2(a PRIMARY KEY, b, c); + INSERT INTO t2 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(4, 5, 6); + CREATE VIRTUAL TABLE echo_t2 USING echo(t2); + } +} {} + +set tn 2 +foreach method [list \ + xBestIndex \ + xOpen \ + xFilter \ + xNext \ + xColumn \ + xRowid \ +] { + do_test vtab1-16.$tn { + set echo_module_fail($method,t2) "the $method method has failed" + catchsql { SELECT rowid, * FROM echo_t2 WHERE a >= 1 } + } "1 {echo-vtab-error: the $method method has failed}" + unset echo_module_fail($method,t2) + incr tn +} + +foreach method [list \ + xUpdate \ + xBegin \ + xSync \ +] { + do_test vtab1-16.$tn { + set echo_module_fail($method,t2) "the $method method has failed" + catchsql { INSERT INTO echo_t2 VALUES(7, 8, 9) } + } "1 {echo-vtab-error: the $method method has failed}" + unset echo_module_fail($method,t2) + incr tn +} + +do_test vtab1-16.$tn { + set echo_module_fail(xRename,t2) "the xRename method has failed" + catchsql { ALTER TABLE echo_t2 RENAME TO another_name } +} "1 {echo-vtab-error: the xRename method has failed}" +unset echo_module_fail(xRename,t2) +incr tn + unset -nocomplain echo_module_begin_fail finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab2.test --- sqlite3-3.4.2/test/vtab2.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/vtab2.test 2009-06-12 03:37:59.000000000 +0100 @@ -10,7 +10,7 @@ #*********************************************************************** # This file implements regression tests for SQLite library. # -# $Id: vtab2.test,v 1.7 2007/02/14 09:19:37 danielk1977 Exp $ +# $Id: vtab2.test,v 1.9 2008/10/13 10:37:50 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -87,4 +87,50 @@ unset result unset var +# Ticket #2894. +# +# Make sure we do call Column(), and Rowid() methods of +# a virtual table when that table is in a LEFT JOIN. +# +do_test vtab2-3.1 { + execsql { + SELECT * FROM schema WHERE dflt_value IS NULL LIMIT 1 + } +} {main schema 0 database {} 0 {} 0} +do_test vtab2-3.2 { + execsql { + SELECT *, b.rowid + FROM schema a LEFT JOIN schema b ON a.dflt_value=b.dflt_value + WHERE a.rowid=1 + } +} {main schema 0 database {} 0 {} 0 {} {} {} {} {} {} {} {} {}} + +do_test vtab2-4.1 { + execsql { + BEGIN TRANSACTION; + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(b, c)); + CREATE TABLE fkey( + to_tbl, + to_col + ); + INSERT INTO "fkey" VALUES('t1',NULL); + COMMIT; + } +} {} +do_test vtab2-4.2 { + execsql { CREATE VIRTUAL TABLE v_col USING schema } +} {} +do_test vtab2-4.3 { + execsql { SELECT name FROM v_col WHERE tablename = 't1' AND pk } +} {a} +do_test vtab2-4.4 { + execsql { + UPDATE fkey + SET to_col = (SELECT name FROM v_col WHERE tablename = 't1' AND pk); + } +} {} +do_test vtab2-4.5 { + execsql { SELECT * FROM fkey } +} {t1 a} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab3.test --- sqlite3-3.4.2/test/vtab3.test 2006-06-20 12:01:09.000000000 +0100 +++ sqlite3-3.6.16/test/vtab3.test 2009-06-05 18:03:48.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is the authorisation callback and virtual tables. # -# $Id: vtab3.test,v 1.2 2006/06/20 11:01:09 danielk1977 Exp $ +# $Id: vtab3.test,v 1.3 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -138,5 +138,3 @@ } {elephant} finish_test - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab4.test --- sqlite3-3.4.2/test/vtab4.test 2006-09-02 23:14:59.000000000 +0100 +++ sqlite3-3.6.16/test/vtab4.test 2009-06-05 18:03:48.000000000 +0100 @@ -16,7 +16,7 @@ # xCommit # xRollback # -# $Id: vtab4.test,v 1.2 2006/09/02 22:14:59 drh Exp $ +# $Id: vtab4.test,v 1.3 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -168,7 +168,6 @@ set echo_module } {xBegin echo(treal) xSync echo(treal) xRollback echo(treal)} -breakpoint do_test vtab4-3.3 { set echo_module [list] set echo_module_sync_fail sreal diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab5.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab5.test --- sqlite3-3.4.2/test/vtab5.test 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/test/vtab5.test 2009-06-05 18:03:48.000000000 +0100 @@ -10,7 +10,7 @@ #*********************************************************************** # This file implements regression tests for SQLite library. # -# $Id: vtab5.test,v 1.7 2007/06/27 15:53:35 danielk1977 Exp $ +# $Id: vtab5.test,v 1.8 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -150,4 +150,3 @@ # } {1 {virtual tables may not be altered}} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab6.test --- sqlite3-3.4.2/test/vtab6.test 2006-06-28 19:18:10.000000000 +0100 +++ sqlite3-3.6.16/test/vtab6.test 2009-06-05 18:03:48.000000000 +0100 @@ -14,7 +14,7 @@ # virtual tables. The test cases in this file are copied from the file # join.test, and some of the comments still reflect that. # -# $Id: vtab6.test,v 1.2 2006/06/28 18:18:10 drh Exp $ +# $Id: vtab6.test,v 1.4 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -157,7 +157,6 @@ } {1 2 3 4 2 3 4 5} ifcapable subquery { -breakpoint do_test vtab6-1.13 { execsql2 { SELECT * FROM t1 NATURAL JOIN @@ -454,4 +453,122 @@ } {} } ;# ifcapable subquery +do_test vtab6-11.1.0 { + execsql { + CREATE TABLE ab_r(a, b); + CREATE TABLE bc_r(b, c); + + CREATE VIRTUAL TABLE ab USING echo(ab_r); + CREATE VIRTUAL TABLE bc USING echo(bc_r); + + INSERT INTO ab VALUES(1, 2); + INSERT INTO bc VALUES(2, 3); + } +} {} + +do_test vtab6-11.1.1 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} +do_test vtab6-11.1.2 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + +set ::echo_module_cost 1.0 + +do_test vtab6-11.1.3 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} +do_test vtab6-11.1.4 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + + +do_test vtab6-11.2.0 { + execsql { + CREATE INDEX ab_i ON ab_r(b); + } +} {} + +unset ::echo_module_cost + +do_test vtab6-11.2.1 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} +do_test vtab6-11.2.2 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + +set ::echo_module_cost 1.0 + +do_test vtab6-11.2.3 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} +do_test vtab6-11.2.4 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + +unset ::echo_module_cost +db close +sqlite3 db test.db +register_echo_module [sqlite3_connection_pointer db] + +do_test vtab6-11.3.1 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} + +do_test vtab6-11.3.2 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + +set ::echo_module_cost 1.0 + +do_test vtab6-11.3.3 { + execsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 2 3} +do_test vtab6-11.3.4 { + execsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 2 3} + +unset ::echo_module_cost + +set ::echo_module_ignore_usable 1 +db cache flush + +do_test vtab6-11.4.1 { + catchsql { + SELECT a, b, c FROM ab NATURAL JOIN bc; + } +} {1 {table ab: xBestIndex returned an invalid plan}} +do_test vtab6-11.4.2 { + catchsql { + SELECT a, b, c FROM bc NATURAL JOIN ab; + } +} {1 {table ab: xBestIndex returned an invalid plan}} + +unset ::echo_module_ignore_usable + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab7.test --- sqlite3-3.4.2/test/vtab7.test 2006-07-26 17:22:16.000000000 +0100 +++ sqlite3-3.6.16/test/vtab7.test 2009-06-05 18:03:48.000000000 +0100 @@ -12,7 +12,7 @@ # of this test is reading and writing to the database from within a # virtual table xSync() callback. # -# $Id: vtab7.test,v 1.2 2006/07/26 16:22:16 danielk1977 Exp $ +# $Id: vtab7.test,v 1.4 2007/12/04 16:54:53 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -114,32 +114,41 @@ } {abc abc2 log newtab} # Drop a database table from within xSync callback. +# This is not allowed. Tables cannot be dropped while +# any other statement is active. +# do_test vtab7-2.6 { set ::callbacks(xSync,abc) { - execsql { DROP TABLE newtab } + set ::rc [catchsql { DROP TABLE newtab }] } execsql { INSERT INTO abc2 VALUES(1, 2, 3); SELECT name FROM sqlite_master ORDER BY name; } -} {abc abc2 log} +} {abc abc2 log newtab} +do_test vtab7-2.6.1 { + set ::rc +} {1 {database table is locked}} +execsql {DROP TABLE newtab} # Write to an attached database from xSync(). -do_test vtab7-3.1 { - file delete -force test2.db - file delete -force test2.db-journal - execsql { - ATTACH 'test2.db' AS db2; - CREATE TABLE db2.stuff(description, shape, color); - } - set ::callbacks(xSync,abc) { - execsql { INSERT INTO db2.stuff VALUES('abc', 'square', 'green'); } - } - execsql { - INSERT INTO abc2 VALUES(1, 2, 3); - SELECT * from stuff; - } -} {abc square green} +ifcapable attach { + do_test vtab7-3.1 { + file delete -force test2.db + file delete -force test2.db-journal + execsql { + ATTACH 'test2.db' AS db2; + CREATE TABLE db2.stuff(description, shape, color); + } + set ::callbacks(xSync,abc) { + execsql { INSERT INTO db2.stuff VALUES('abc', 'square', 'green'); } + } + execsql { + INSERT INTO abc2 VALUES(1, 2, 3); + SELECT * from stuff; + } + } {abc square green} +} # UPDATE: The next test passes, but leaks memory. So leave it out. # @@ -196,4 +205,3 @@ unset -nocomplain ::callbacks finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab_alter.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab_alter.test --- sqlite3-3.4.2/test/vtab_alter.test 2007-06-27 16:53:35.000000000 +0100 +++ sqlite3-3.6.16/test/vtab_alter.test 2009-06-05 18:03:48.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the ALTER TABLE ... RENAME TO # command on virtual tables. # -# $Id: vtab_alter.test,v 1.1 2007/06/27 15:53:35 danielk1977 Exp $ +# $Id: vtab_alter.test,v 1.3 2007/12/13 21:54:11 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -62,7 +62,7 @@ catchsql { SELECT * FROM new } } {0 {}} -# Try to rename an echo table that renames it's base table. Make +# Try to rename an echo table that renames its base table. Make # sure nothing terrible happens. # do_test vtab_alter-2.1 { @@ -89,7 +89,7 @@ execsql { SELECT * FROM x_base; } } {1 2 3} -# Cause an error to occur when the echo module renames it's +# Cause an error to occur when the echo module renames its # backing store table. # do_test vtab_alter-3.1 { @@ -101,4 +101,3 @@ } {1 2 3} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtabA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtabA.test --- sqlite3-3.4.2/test/vtabA.test 2007-06-26 11:38:54.000000000 +0100 +++ sqlite3-3.6.16/test/vtabA.test 2009-06-05 18:03:48.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is 'hidden' virtual table columns. # -# $Id: vtabA.test,v 1.1 2007/06/26 10:38:54 danielk1977 Exp $ +# $Id: vtabA.test,v 1.2 2008/07/12 14:52:21 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -132,4 +132,3 @@ } {{} {whatelse can i test} hidden} finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtabB.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtabB.test --- sqlite3-3.4.2/test/vtabB.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/vtabB.test 2009-06-05 18:03:48.000000000 +0100 @@ -0,0 +1,79 @@ +# 2008 April 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is is verifying that a virtual table in the +# TEMP database that is created and dropped within a transaction +# is handled correctly. Ticket #2994. +# +# Also make sure a virtual table on the right-hand side of an IN operator +# is materialized rather than being used directly. Ticket #3082. +# + +# +# $Id: vtabB.test,v 1.2 2008/04/25 12:10:15 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +do_test vtabB-1.1 { + register_echo_module [sqlite3_connection_pointer db] + execsql { + CREATE TABLE t1(x); + BEGIN; + CREATE VIRTUAL TABLE temp.echo_test1 USING echo(t1); + DROP TABLE echo_test1; + ROLLBACK; + } +} {} + +do_test vtabB-2.1 { + execsql { + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(3); + CREATE TABLE t2(y); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + CREATE VIRTUAL TABLE echo_t2 USING echo(t2); + SELECT * FROM t1 WHERE x IN (SELECT rowid FROM t2); + } +} {2} +do_test vtab8-2.2 { + execsql { + SELECT rowid FROM echo_t2 + } +} {1 2} +do_test vtabB-2.3 { + execsql { + SELECT * FROM t1 WHERE x IN (SELECT rowid FROM t2); + } +} {2} +do_test vtabB-2.4 { + execsql { + SELECT * FROM t1 WHERE x IN (SELECT rowid FROM echo_t2); + } +} {2} +do_test vtabB-2.5 { + execsql { + SELECT * FROM t1 WHERE x IN (SELECT y FROM t2); + } +} {2} +do_test vtabB-2.6 { + execsql { + SELECT * FROM t1 WHERE x IN (SELECT y FROM echo_t2); + } +} {2} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtabC.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtabC.test --- sqlite3-3.4.2/test/vtabC.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/vtabC.test 2009-06-25 12:22:34.000000000 +0100 @@ -0,0 +1,116 @@ +# 2008 April 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is is verifying that the xUpdate, xSync, xCommit +# and xRollback methods are only invoked after an xBegin or xCreate. +# Ticket #3083. +# +# $Id: vtabC.test,v 1.2 2009/04/07 14:14:23 danielk1977 Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab { + finish_test + return +} + +ifcapable !trigger { finish_test ; return } + + +# N will be the number of virtual tables we have defined. +# +unset -nocomplain N +for {set N 1} {$N<=20} {incr N} { + db close + file delete -force test.db test.db-journal + sqlite3 db test.db + register_echo_module [sqlite3_connection_pointer db] + + # Create $N tables and $N virtual tables to echo them. + # + unset -nocomplain tablist + set tablist {} + do_test vtabC-1.$N.1 { + for {set i 1} {$i<=$::N} {incr i} { + execsql "CREATE TABLE t${i}(x)" + execsql "CREATE VIRTUAL TABLE vt$i USING echo(t$i)" + lappend ::tablist t$i vt$i + } + execsql {SELECT count(*) FROM sqlite_master} + } [expr {$N*2}] + do_test vtabC-1.$N.2 { + execsql {SELECT name FROM sqlite_master} + } $tablist + + # Create a table m and add triggers to make changes on all + # of the virtual tables when m is changed. + # + do_test vtabC-1.$N.3 { + execsql {CREATE TABLE m(a)} + set sql "CREATE TRIGGER rins AFTER INSERT ON m BEGIN\n" + for {set i 1} {$i<=$::N} {incr i} { + append sql " INSERT INTO vt$i VALUES(NEW.a+$i);\n" + } + append sql "END;" + execsql $sql + execsql {SELECT count(*) FROM sqlite_master} + } [expr {$N*2+2}] + do_test vtabC-1.$N.4 { + execsql { + INSERT INTO m VALUES(1000); + SELECT * FROM m; + } + } {1000} + for {set j 1} {$j<=$::N} {incr j} { + do_test vtabC-1.$N.5.$j { + execsql "SELECT * FROM t$::j" + } [expr {$j+1000}] + do_test vtabC-1.$N.6.$j { + execsql "SELECT * FROM vt$::j" + } [expr {$j+1000}] + } + do_test vtabC-1.$N.7 { + set sql "CREATE TRIGGER rins2 BEFORE INSERT ON m BEGIN\n" + for {set i 1} {$i<=$::N} {incr i} { + append sql " INSERT INTO vt$i VALUES(NEW.a+$i*100);\n" + } + for {set i 1} {$i<=$::N} {incr i} { + append sql " INSERT INTO vt$i VALUES(NEW.a+$i*10000);\n" + } + append sql "END;" + execsql $sql + execsql {SELECT count(*) FROM sqlite_master} + } [expr {$N*2+3}] + do_test vtabC-1.$N.8 { + execsql { + INSERT INTO m VALUES(9000000); + SELECT * FROM m; + } + } {1000 9000000} + unset -nocomplain res + for {set j 1} {$j<=$::N} {incr j} { + set res [expr {$j+1000}] + lappend res [expr {$j*100+9000000}] + lappend res [expr {$j*10000+9000000}] + lappend res [expr {$j+9000000}] + do_test vtabC-1.$N.9.$j { + execsql "SELECT * FROM t$::j" + } $res + do_test vtabC-1.$N.10.$j { + execsql "SELECT * FROM vt$::j" + } $res + } +} +unset -nocomplain res N i j + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtabD.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtabD.test --- sqlite3-3.4.2/test/vtabD.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/vtabD.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,71 @@ +# 2009 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is creating and dropping virtual tables. +# +# $Id: vtabD.test,v 1.3 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !vtab||!schema_pragmas { finish_test ; return } + +# Register the echo module +register_echo_module [sqlite3_connection_pointer db] + +do_test vtabD-1.1 { + execsql { + CREATE TABLE t1(a, b); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + CREATE VIRTUAL TABLE tv1 USING echo(t1); + } +} {} +do_test vtabD-1.2 { + execsql BEGIN + for {set i 0} {$i < 100000} {incr i} { + execsql { INSERT INTO t1 VALUES($i, $i*$i) } + } + execsql COMMIT +} {} +do_test vtabD-1.3 { + execsql { SELECT * FROM tv1 WHERE a = 1 OR b = 4 } +} {1 1 2 4} +do_test vtabD-1.4 { + execsql { SELECT * FROM tv1 WHERE a = 1 OR b = 1 } +} {1 1} +do_test vtabD-1.5 { + execsql { SELECT * FROM tv1 WHERE (a > 0 AND a < 5) OR (b > 15 AND b < 65) } +} {1 1 2 4 3 9 4 16 5 25 6 36 7 49 8 64} + +do_test vtabD-1.6 { + execsql { SELECT * FROM tv1 WHERE a < 500 OR b = 810000 } +} [execsql { + SELECT * FROM t1 WHERE a < 500 + UNION ALL + SELECT * FROM t1 WHERE b = 810000 AND NOT (a < 500) +}] + +do_test vtabD-1.7 { + execsql { SELECT * FROM tv1 WHERE a < 90000 OR b = 8100000000 } +} [execsql { + SELECT * FROM t1 WHERE a < 90000 + UNION ALL + SELECT * FROM t1 WHERE b = 8100000000 AND NOT (a < 90000) +}] + +if {[working_64bit_int]} { +do_test vtabD-1.8 { + execsql { SELECT * FROM tv1 WHERE a = 90001 OR b = 810000 } +} {90001 8100180001 900 810000} +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab_err.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab_err.test --- sqlite3-3.4.2/test/vtab_err.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/vtab_err.test 2009-06-05 18:03:48.000000000 +0100 @@ -9,123 +9,17 @@ # #*********************************************************************** # -# $Id: vtab_err.test,v 1.5 2007/04/19 11:09:02 danielk1977 Exp $ +# $Id: vtab_err.test,v 1.8 2007/09/03 16:12:10 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl -# Only run these tests if memory debugging is turned on. -# -if {[info command sqlite_malloc_stat]==""} { - puts "Skipping vtab_err tests: not compiled with -DSQLITE_MEMDEBUG=1" - finish_test - return -} - ifcapable !vtab { finish_test return } -# Usage: do_malloc_test -# -# The first argument, , is an integer used to name the -# tests executed by this proc. Options are as follows: -# -# -tclprep TCL script to run to prepare test. -# -sqlprep SQL script to run to prepare test. -# -tclbody TCL script to run with malloc failure simulation. -# -sqlbody TCL script to run with malloc failure simulation. -# -cleanup TCL script to run after the test. -# -# This command runs a series of tests to verify SQLite's ability -# to handle an out-of-memory condition gracefully. It is assumed -# that if this condition occurs a malloc() call will return a -# NULL pointer. Linux, for example, doesn't do that by default. See -# the "BUGS" section of malloc(3). -# -# Each iteration of a loop, the TCL commands in any argument passed -# to the -tclbody switch, followed by the SQL commands in any argument -# passed to the -sqlbody switch are executed. Each iteration the -# Nth call to sqliteMalloc() is made to fail, where N is increased -# each time the loop runs starting from 1. When all commands execute -# successfully, the loop ends. -# -proc do_malloc_test {tn args} { - array unset ::mallocopts - array set ::mallocopts $args - - set ::go 1 - for {set ::n 1} {$::go && $::n < 50000} {incr ::n} { - do_test $tn.$::n { - - # Remove all traces of database files test.db and test2.db from the files - # system. Then open (empty database) "test.db" with the handle [db]. - # - sqlite_malloc_fail 0 - catch {db close} - catch {file delete -force test.db} - catch {file delete -force test.db-journal} - catch {file delete -force test2.db} - catch {file delete -force test2.db-journal} - catch {sqlite3 db test.db} - set ::DB [sqlite3_connection_pointer db] - - # Execute any -tclprep and -sqlprep scripts. - # - if {[info exists ::mallocopts(-tclprep)]} { - eval $::mallocopts(-tclprep) - } - if {[info exists ::mallocopts(-sqlprep)]} { - execsql $::mallocopts(-sqlprep) - } - - # Now set the ${::n}th malloc() to fail and execute the -tclbody and - # -sqlbody scripts. - # - sqlite_malloc_fail $::n - set ::mallocbody {} - if {[info exists ::mallocopts(-tclbody)]} { - append ::mallocbody "$::mallocopts(-tclbody)\n" - } - if {[info exists ::mallocopts(-sqlbody)]} { - append ::mallocbody "db eval {$::mallocopts(-sqlbody)}" - } - set v [catch $::mallocbody msg] - - # If the test fails (if $v!=0) and the database connection actually - # exists, make sure the failure code is SQLITE_NOMEM. - if {$v&&[info command db]=="db"&&[info exists ::mallocopts(-sqlbody)]} { - if {[db errorcode]!=7 && $msg!="vtable constructor failed: e"} { - set v 999 - } - } - - set leftover [lindex [sqlite_malloc_stat] 2] - if {$leftover>0} { - if {$leftover>1} {puts "\nLeftover: $leftover\nReturn=$v Message=$msg"} - set ::go 0 - if {$v} { - puts "\nError message returned: $msg" - } else { - set v {1 1} - } - } else { - set v2 [expr { - $msg == "" || $msg == "out of memory" || - $msg == "vtable constructor failed: e" - }] - if {!$v2} {puts "\nError message returned: $msg"} - lappend v $v2 - } - } {1 1} - - if {[info exists ::mallocopts(-cleanup)]} { - catch [list uplevel #0 $::mallocopts(-cleanup)] msg - } - } - unset ::mallocopts -} + unset -nocomplain echo_module_begin_fail do_ioerr_test vtab_err-1 -tclprep { @@ -146,7 +40,15 @@ COMMIT; } -do_malloc_test vtab_err-2 -tclprep { +ifcapable !memdebug { + puts "Skipping vtab_err-2 tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} +source $testdir/malloc_common.tcl + + +do_malloc_test vtab_err-2 -tclprep { register_echo_module [sqlite3_connection_pointer db] } -sqlbody { BEGIN; @@ -164,5 +66,6 @@ COMMIT; } -sqlite_malloc_fail 0 +sqlite3_memdebug_fail -1 + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/vtab_shared.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/vtab_shared.test --- sqlite3-3.4.2/test/vtab_shared.test 2007-04-16 16:49:42.000000000 +0100 +++ sqlite3-3.6.16/test/vtab_shared.test 2009-06-05 18:03:48.000000000 +0100 @@ -11,7 +11,7 @@ # This file tests interactions between the virtual table and # shared-schema functionality. # -# $Id: vtab_shared.test,v 1.1 2007/04/16 15:49:42 danielk1977 Exp $ +# $Id: vtab_shared.test,v 1.2 2008/03/19 13:03:34 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -54,9 +54,8 @@ SELECT * FROM t1; } } [list 1 \ - {malformed database schema - Cannot use virtual tables in shared-cache mode}] + {malformed database schema (t1) - Cannot use virtual tables in shared-cache mode}] db close sqlite3_enable_shared_cache 0 finish_test - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where2.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where2.test --- sqlite3-3.4.2/test/where2.test 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/test/where2.test 2009-06-12 03:37:59.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the use of indices in WHERE clauses # based on recent changes to the optimizer. # -# $Id: where2.test,v 1.11 2007/02/23 23:13:34 drh Exp $ +# $Id: where2.test,v 1.15 2009/02/02 01:50:40 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -58,9 +58,8 @@ # occurring as expected. # proc cksort {sql} { - set ::sqlite_sort_count 0 set data [execsql $sql] - if {$::sqlite_sort_count} {set x sort} {set x nosort} + if {[db status sort]} {set x sort} {set x nosort} lappend data $x return $data } @@ -98,17 +97,17 @@ # do_test where2-2.1 { queryplan { - SELECT * FROM t1 WHERE w=85 ORDER BY random(5); + SELECT * FROM t1 WHERE w=85 ORDER BY random(); } } {85 6 7396 7402 nosort t1 i1w} do_test where2-2.2 { queryplan { - SELECT * FROM t1 WHERE x=6 AND y=7396 ORDER BY random(5); + SELECT * FROM t1 WHERE x=6 AND y=7396 ORDER BY random(); } } {85 6 7396 7402 sort t1 i1xy} do_test where2-2.3 { queryplan { - SELECT * FROM t1 WHERE rowid=85 AND x=6 AND y=7396 ORDER BY random(5); + SELECT * FROM t1 WHERE rowid=85 AND x=6 AND y=7396 ORDER BY random(); } } {85 6 7396 7402 nosort t1 *} @@ -328,37 +327,44 @@ SELECT * FROM t2249b CROSS JOIN t2249a WHERE a='hello' OR b=a; } } {123 0123 nosort t2249b {} t2249a {}} -do_test where2-6.12 { - # In this case, the +b disables the affinity conflict and allows - # the OR optimization to be used again. The result is now an empty - # set, the same as in where2-6.9. - queryplan { - SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR a='hello'; - } -} {nosort t2249b {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.12.2 { - # In this case, the +b disables the affinity conflict and allows - # the OR optimization to be used again. The result is now an empty - # set, the same as in where2-6.9. - queryplan { - SELECT * FROM t2249b CROSS JOIN t2249a WHERE a='hello' OR +b=a; - } -} {nosort t2249b {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.12.3 { - # In this case, the +b disables the affinity conflict and allows - # the OR optimization to be used again. The result is now an empty - # set, the same as in where2-6.9. - queryplan { - SELECT * FROM t2249b CROSS JOIN t2249a WHERE +b=a OR a='hello'; - } -} {nosort t2249b {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.13 { - # The addition of +a on the second term disabled the OR optimization. - # But we should still get the same empty-set result as in where2-6.9. - queryplan { - SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR +a='hello'; - } -} {nosort t2249b {} t2249a {}} +ifcapable explain&&subquery { + # These tests are not run if subquery support is not included in the + # build. This is because these tests test the "a = 1 OR a = 2" to + # "a IN (1, 2)" optimisation transformation, which is not enabled if + # subqueries and the IN operator is not available. + # + do_test where2-6.12 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR a='hello'; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.12.2 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a='hello' OR +b=a; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.12.3 { + # In this case, the +b disables the affinity conflict and allows + # the OR optimization to be used again. The result is now an empty + # set, the same as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE +b=a OR a='hello'; + } + } {nosort t2249b {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.13 { + # The addition of +a on the second term disabled the OR optimization. + # But we should still get the same empty-set result as in where2-6.9. + queryplan { + SELECT * FROM t2249b CROSS JOIN t2249a WHERE a=+b OR +a='hello'; + } + } {nosort t2249b {} t2249a {}} +} # Variations on the order of terms in a WHERE clause in order # to make sure the OR optimizer can recognize them all. @@ -367,21 +373,28 @@ SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE x.a=y.a } } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.21 { - queryplan { - SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE x.a=y.a OR y.a='hello' - } -} {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.22 { - queryplan { - SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a=x.a OR y.a='hello' - } -} {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} -do_test where2-6.23 { - queryplan { - SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a='hello' OR x.a=y.a - } -} {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} +ifcapable explain&&subquery { + # These tests are not run if subquery support is not included in the + # build. This is because these tests test the "a = 1 OR a = 2" to + # "a IN (1, 2)" optimisation transformation, which is not enabled if + # subqueries and the IN operator is not available. + # + do_test where2-6.21 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE x.a=y.a OR y.a='hello' + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.22 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a=x.a OR y.a='hello' + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} + do_test where2-6.23 { + queryplan { + SELECT * FROM t2249a x CROSS JOIN t2249a y WHERE y.a='hello' OR x.a=y.a + } + } {0123 0123 nosort x {} {} sqlite_autoindex_t2249a_1} +} # Unique queries (queries that are guaranteed to return only a single # row of result) do not call the sorter. But all tables must give @@ -580,7 +593,7 @@ # Make sure WHERE clauses of the form A=1 AND (B=2 OR B=3) are optimized # when we have an index on A and B. # -ifcapable or_opt { +ifcapable or_opt&&tclvar { do_test where2-9.1 { execsql { BEGIN; @@ -598,11 +611,13 @@ SELECT count(*) FROM t10; } } 1000 - do_test where2-9.2 { - count { - SELECT * FROM t10 WHERE a=1 AND (b=2 OR b=3) - } - } {1 2 2 1 3 3 7} + ifcapable subquery { + do_test where2-9.2 { + count { + SELECT * FROM t10 WHERE a=1 AND (b=2 OR b=3) + } + } {1 2 2 1 3 3 7} + } } finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where3.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where3.test --- sqlite3-3.4.2/test/where3.test 2007-06-12 12:39:39.000000000 +0100 +++ sqlite3-3.6.16/test/where3.test 2009-06-05 18:03:48.000000000 +0100 @@ -12,7 +12,7 @@ # focus of this file is testing the join reordering optimization # in cases that include a LEFT JOIN. # -# $Id: where3.test,v 1.3 2006/12/16 16:25:17 drh Exp $ +# $Id: where3.test,v 1.4 2008/04/17 19:14:02 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -45,6 +45,14 @@ } } {222 two 2 222 {} {}} +ifcapable explain { + do_test where3-1.1.1 { + explain_no_trace {SELECT * FROM t1, t2 LEFT JOIN t3 ON q=x + WHERE p=2 AND a=q} + } [explain_no_trace {SELECT * FROM t1, t2 LEFT JOIN t3 ON x=q + WHERE p=2 AND a=q}] +} + # Ticket #1830 # # This is similar to the above but with the LEFT JOIN on the @@ -78,6 +86,22 @@ } } {1 {Value for C1.1} {Value for C2.1} 2 {} {Value for C2.2} 3 {Value for C1.3} {Value for C2.3}} +ifcapable explain { + do_test where3-1.2.1 { + explain_no_trace { + SELECT parent1.parent1key, child1.value, child2.value + FROM parent1 + LEFT OUTER JOIN child1 ON child1.child1key = parent1.child1key + INNER JOIN child2 ON child2.child2key = parent1.child2key; + } + } [explain_no_trace { + SELECT parent1.parent1key, child1.value, child2.value + FROM parent1 + LEFT OUTER JOIN child1 ON parent1.child1key = child1.child1key + INNER JOIN child2 ON child2.child2key = parent1.child2key; + }] +} + # This procedure executes the SQL. Then it appends # the ::sqlite_query_plan variable. # @@ -121,6 +145,36 @@ WHERE cpk=bx AND bpk=ax } } {tA {} tB * tC * tD *} +do_test where3-2.1.1 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON cx=dpk + WHERE cpk=bx AND bpk=ax + } +} {tA {} tB * tC * tD *} +do_test where3-2.1.2 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON cx=dpk + WHERE bx=cpk AND bpk=ax + } +} {tA {} tB * tC * tD *} +do_test where3-2.1.3 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON cx=dpk + WHERE bx=cpk AND ax=bpk + } +} {tA {} tB * tC * tD *} +do_test where3-2.1.4 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE bx=cpk AND ax=bpk + } +} {tA {} tB * tC * tD *} +do_test where3-2.1.5 { + queryplan { + SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx + WHERE cpk=bx AND ax=bpk + } +} {tA {} tB * tC * tD *} do_test where3-2.2 { queryplan { SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where4.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where4.test --- sqlite3-3.4.2/test/where4.test 2007-06-02 08:54:38.000000000 +0100 +++ sqlite3-3.6.16/test/where4.test 2009-06-05 18:03:48.000000000 +0100 @@ -15,11 +15,16 @@ # that IS NULL phrases are correctly optimized. But you can never # have too many tests, so some other tests are thrown in as well. # -# $Id: where4.test,v 1.4 2007/06/02 07:54:38 danielk1977 Exp $ +# $Id: where4.test,v 1.6 2007/12/10 05:03:48 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl +ifcapable !tclvar||!bloblit { + finish_test + return +} + # Build some test data # do_test where4-1.0 { @@ -176,6 +181,7 @@ # Ticket #2273. Problems with IN operators and NULLs. # +ifcapable subquery { do_test where4-5.1 { execsql { CREATE TABLE t4(x,y,z,PRIMARY KEY(x,y)); @@ -262,4 +268,6 @@ } } {{}} +}; #ifcapable subquery + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where6.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where6.test --- sqlite3-3.4.2/test/where6.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/where6.test 2009-06-05 18:03:48.000000000 +0100 @@ -0,0 +1,130 @@ +# 2007 June 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing that terms in the ON clause of +# a LEFT OUTER JOIN are not used with indices. See ticket #3015. +# +# $Id: where6.test,v 1.2 2008/04/17 19:14:02 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Build some test data +# +do_test where6-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c); + INSERT INTO t1 VALUES(1,3,1); + INSERT INTO t1 VALUES(2,4,2); + CREATE TABLE t2(x INTEGER PRIMARY KEY); + INSERT INTO t2 VALUES(3); + + SELECT * FROM t1 LEFT JOIN t2 ON b=x AND c=1; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-1.2 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b AND c=1; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-1.3 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b AND 1=c; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-1.4 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x AND 1=c; + } +} {1 3 1 3 2 4 2 {}} + +ifcapable explain { + do_test where6-1.5 { + explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON x=b AND 1=c} + } [explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON b=x AND c=1}] + do_test where6-1.6 { + explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON x=b WHERE 1=c} + } [explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE c=1}] +} + +do_test where6-1.11 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE c=1; + } +} {1 3 1 3} +do_test where6-1.12 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b WHERE c=1; + } +} {1 3 1 3} +do_test where6-1.13 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE 1=c; + } +} {1 3 1 3} + + + +do_test where6-2.1 { + execsql { + CREATE INDEX i1 ON t1(c); + + SELECT * FROM t1 LEFT JOIN t2 ON b=x AND c=1; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-2.2 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b AND c=1; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-2.3 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b AND 1=c; + } +} {1 3 1 3 2 4 2 {}} +do_test where6-2.4 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x AND 1=c; + } +} {1 3 1 3 2 4 2 {}} + +ifcapable explain { + do_test where6-2.5 { + explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON x=b AND 1=c} + } [explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON b=x AND c=1}] + do_test where6-2.6 { + explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON x=b WHERE 1=c} + } [explain_no_trace {SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE c=1}] +} + + +do_test where6-2.11 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE c=1; + } +} {1 3 1 3} +do_test where6-2.12 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b WHERE c=1; + } +} {1 3 1 3} +do_test where6-2.13 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON x=b WHERE 1=c; + } +} {1 3 1 3} +do_test where6-2.14 { + execsql { + SELECT * FROM t1 LEFT JOIN t2 ON b=x WHERE 1=c; + } +} {1 3 1 3} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where7.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where7.test --- sqlite3-3.4.2/test/where7.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/where7.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,23310 @@ +# 2008 December 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the multi-index OR clause optimizer. +# +# $Id: where7.test,v 1.9 2009/06/07 23:45:11 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !or_opt { + finish_test + return +} + +# Evaluate SQL. Return the result set followed by the +# and the number of full-scan steps. +# +proc count_steps {sql} { + set r [db eval $sql] + lappend r scan [db status step] sort [db status sort] +} + +proc count_steps_sort {sql} { + set r [lsort -integer [db eval $sql]] + return "$r scan [db status step] sort [db status sort]" +} + +# Build some test data +# +do_test where7-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c,d); + INSERT INTO t1 VALUES(1,2,3,4); + INSERT INTO t1 VALUES(2,3,4,5); + INSERT INTO t1 VALUES(3,4,6,8); + INSERT INTO t1 VALUES(4,5,10,15); + INSERT INTO t1 VALUES(5,10,100,1000); + CREATE INDEX t1b ON t1(b); + CREATE INDEX t1c ON t1(c); + SELECT * FROM t1; + } +} {1 2 3 4 2 3 4 5 3 4 6 8 4 5 10 15 5 10 100 1000} +do_test where7-1.2 { + count_steps { + SELECT a FROM t1 WHERE b=3 OR c=6 ORDER BY a + } +} {2 3 scan 0 sort 1} +do_test where7-1.3 { + count_steps { + SELECT a FROM t1 WHERE b=3 OR +c=6 ORDER BY a + } +} {2 3 scan 4 sort 0} +do_test where7-1.4 { + count_steps { + SELECT a FROM t1 WHERE +b=3 OR c=6 ORDER BY 1 + } +} {2 3 scan 4 sort 0} +do_test where7-1.5 { + count_steps { + SELECT a FROM t1 WHERE 3=b OR c=6 ORDER BY rowid + } +} {2 3 scan 0 sort 1} +do_test where7-1.6 { + count_steps { + SELECT a FROM t1 WHERE (3=b OR c=6) AND +a>0 ORDER BY a + } +} {2 3 scan 0 sort 1} +do_test where7-1.7 { + count_steps { + SELECT a FROM t1 WHERE (b=3 OR c>10) + } +} {2 5 scan 0 sort 0} +do_test where7-1.8 { + count_steps { + SELECT a FROM t1 WHERE (b=3 OR c>=10) + } +} {2 4 5 scan 0 sort 0} +do_test where7-1.9 { + count_steps { + SELECT a FROM t1 WHERE (b=3 OR c>=10 OR c=4) + } +} {2 4 5 scan 0 sort 0} +do_test where7-1.10 { + count_steps { + SELECT a FROM t1 WHERE (b=3 OR c>=10 OR c=4 OR b>10) + } +} {2 4 5 scan 0 sort 0} +do_test where7-1.11 { + count_steps { + SELECT a FROM t1 WHERE (d=5 AND b=3) OR c==100 ORDER BY a; + } +} {2 5 scan 0 sort 1} +do_test where7-1.12 { + count_steps { + SELECT a FROM t1 WHERE (b BETWEEN 2 AND 4) OR c=100 ORDER BY a + } +} {1 2 3 5 scan 0 sort 1} +do_test where7-1.13.1 { + count_steps { + SELECT a FROM t1 WHERE (b BETWEEN 0 AND 2) OR (c BETWEEN 9 AND 999) + ORDER BY a DESC + } +} {5 4 1 scan 4 sort 0} +do_test where7-1.13.2 { + count_steps { + SELECT a FROM t1 WHERE (b BETWEEN 0 AND 2) OR (c BETWEEN 9 AND 999) + ORDER BY +a DESC + } +} {5 4 1 scan 0 sort 1} + +do_test where7-1.14 { + count_steps { + SELECT a FROM t1 WHERE (d=8 OR c=6 OR b=4) AND +a>0 + } +} {3 scan 4 sort 0} +do_test where7-1.15 { + count_steps { + SELECT a FROM t1 WHERE +a>=0 AND (d=8 OR c=6 OR b=4) + } +} {3 scan 4 sort 0} + +do_test where7-1.20 { + set sql "SELECT a FROM t1 WHERE a=11 OR b=11" + for {set i 12} {$i<400} {incr i} { + append sql " OR a=$i OR b=$i" + } + append sql " ORDER BY a" + count_steps $sql +} {scan 0 sort 1} +do_test where7-1.21 { + set sql "SELECT a FROM t1 WHERE b=11 OR c=11" + for {set i 12} {$i<400} {incr i} { + append sql " OR b=$i OR c=$i" + } + append sql " ORDER BY a" + count_steps $sql +} {5 scan 0 sort 1} +do_test where7-1.22 { + set sql "SELECT a FROM t1 WHERE (b=11 OR c=11" + for {set i 12} {$i<400} {incr i} { + append sql " OR b=$i OR c=$i" + } + append sql ") AND d>=0 AND d<9999 ORDER BY a" + count_steps $sql +} {5 scan 0 sort 1} +do_test where7-1.23 { + set sql "SELECT a FROM t1 WHERE (b=11 OR c=11" + for {set i 12} {$i<400} {incr i} { + append sql " OR (b=$i AND d!=0) OR (c=$i AND d IS NOT NULL)" + } + append sql ") AND d>=0 AND d<9999 ORDER BY a" + count_steps $sql +} {5 scan 0 sort 1} + +do_test where7-1.31 { + set sql "SELECT a FROM t1 WHERE (a=11 AND b=11)" + for {set i 12} {$i<400} {incr i} { + append sql " OR (a=$i AND b=$i)" + } + append sql " ORDER BY a" + count_steps $sql +} {scan 0 sort 1} +do_test where7-1.32 { + set sql "SELECT a FROM t1 WHERE (b=11 AND c=11)" + for {set i 12} {$i<400} {incr i} { + append sql " OR (b=$i AND c=$i)" + } + append sql " ORDER BY a" + count_steps $sql +} {scan 0 sort 1} + + +do_test where7-2.1 { + db eval { + CREATE TABLE t2(a INTEGER PRIMARY KEY,b,c,d,e,f TEXT,g); + INSERT INTO t2 VALUES(1,11,1001,1.001,100.1,'bcdefghij','yxwvuts'); + INSERT INTO t2 VALUES(2,22,1001,2.002,100.1,'cdefghijk','yxwvuts'); + INSERT INTO t2 VALUES(3,33,1001,3.0029999999999997,100.1,'defghijkl','xwvutsr'); + INSERT INTO t2 VALUES(4,44,2002,4.004,200.2,'efghijklm','xwvutsr'); + INSERT INTO t2 VALUES(5,55,2002,5.004999999999999,200.2,'fghijklmn','xwvutsr'); + INSERT INTO t2 VALUES(6,66,2002,6.005999999999999,200.2,'ghijklmno','xwvutsr'); + INSERT INTO t2 VALUES(7,77,3003,7.007,300.29999999999995,'hijklmnop','xwvutsr'); + INSERT INTO t2 VALUES(8,88,3003,8.008,300.29999999999995,'ijklmnopq','wvutsrq'); + INSERT INTO t2 VALUES(9,99,3003,9.008999999999999,300.29999999999995,'jklmnopqr','wvutsrq'); + INSERT INTO t2 VALUES(10,110,4004,10.009999999999998,400.4,'klmnopqrs','wvutsrq'); + INSERT INTO t2 VALUES(11,121,4004,11.011,400.4,'lmnopqrst','wvutsrq'); + INSERT INTO t2 VALUES(12,132,4004,12.011999999999999,400.4,'mnopqrstu','wvutsrq'); + INSERT INTO t2 VALUES(13,143,5005,13.012999999999998,500.5,'nopqrstuv','vutsrqp'); + INSERT INTO t2 VALUES(14,154,5005,14.014,500.5,'opqrstuvw','vutsrqp'); + INSERT INTO t2 VALUES(15,165,5005,15.014999999999999,500.5,'pqrstuvwx','vutsrqp'); + INSERT INTO t2 VALUES(16,176,6006,16.016,600.5999999999999,'qrstuvwxy','vutsrqp'); + INSERT INTO t2 VALUES(17,187,6006,17.017,600.5999999999999,'rstuvwxyz','vutsrqp'); + INSERT INTO t2 VALUES(18,198,6006,18.017999999999997,600.5999999999999,'stuvwxyza','utsrqpo'); + INSERT INTO t2 VALUES(19,209,7007,19.019,700.6999999999999,'tuvwxyzab','utsrqpo'); + INSERT INTO t2 VALUES(20,220,7007,20.019999999999996,700.6999999999999,'uvwxyzabc','utsrqpo'); + INSERT INTO t2 VALUES(21,231,7007,21.020999999999997,700.6999999999999,'vwxyzabcd','utsrqpo'); + INSERT INTO t2 VALUES(22,242,8008,22.022,800.8,'wxyzabcde','utsrqpo'); + INSERT INTO t2 VALUES(23,253,8008,23.022999999999996,800.8,'xyzabcdef','tsrqpon'); + INSERT INTO t2 VALUES(24,264,8008,24.023999999999997,800.8,'yzabcdefg','tsrqpon'); + INSERT INTO t2 VALUES(25,275,9009,25.025,900.9,'zabcdefgh','tsrqpon'); + INSERT INTO t2 VALUES(26,286,9009,26.025999999999996,900.9,'abcdefghi','tsrqpon'); + INSERT INTO t2 VALUES(27,297,9009,27.026999999999997,900.9,'bcdefghij','tsrqpon'); + INSERT INTO t2 VALUES(28,308,10010,28.028,1001.0,'cdefghijk','srqponm'); + INSERT INTO t2 VALUES(29,319,10010,29.028999999999996,1001.0,'defghijkl','srqponm'); + INSERT INTO t2 VALUES(30,330,10010,30.029999999999998,1001.0,'efghijklm','srqponm'); + INSERT INTO t2 VALUES(31,341,11011,31.030999999999995,1101.1,'fghijklmn','srqponm'); + INSERT INTO t2 VALUES(32,352,11011,32.032,1101.1,'ghijklmno','srqponm'); + INSERT INTO t2 VALUES(33,363,11011,33.032999999999994,1101.1,'hijklmnop','rqponml'); + INSERT INTO t2 VALUES(34,374,12012,34.034,1201.1999999999998,'ijklmnopq','rqponml'); + INSERT INTO t2 VALUES(35,385,12012,35.035,1201.1999999999998,'jklmnopqr','rqponml'); + INSERT INTO t2 VALUES(36,396,12012,36.035999999999994,1201.1999999999998,'klmnopqrs','rqponml'); + INSERT INTO t2 VALUES(37,407,13013,37.037,1301.3,'lmnopqrst','rqponml'); + INSERT INTO t2 VALUES(38,418,13013,38.038,1301.3,'mnopqrstu','qponmlk'); + INSERT INTO t2 VALUES(39,429,13013,39.038999999999994,1301.3,'nopqrstuv','qponmlk'); + INSERT INTO t2 VALUES(40,440,14014,40.03999999999999,1401.3999999999999,'opqrstuvw','qponmlk'); + INSERT INTO t2 VALUES(41,451,14014,41.041,1401.3999999999999,'pqrstuvwx','qponmlk'); + INSERT INTO t2 VALUES(42,462,14014,42.041999999999994,1401.3999999999999,'qrstuvwxy','qponmlk'); + INSERT INTO t2 VALUES(43,473,15015,43.04299999999999,1501.5,'rstuvwxyz','ponmlkj'); + INSERT INTO t2 VALUES(44,484,15015,44.044,1501.5,'stuvwxyza','ponmlkj'); + INSERT INTO t2 VALUES(45,495,15015,45.044999999999995,1501.5,'tuvwxyzab','ponmlkj'); + INSERT INTO t2 VALUES(46,506,16016,46.04599999999999,1601.6,'uvwxyzabc','ponmlkj'); + INSERT INTO t2 VALUES(47,517,16016,47.047,1601.6,'vwxyzabcd','ponmlkj'); + INSERT INTO t2 VALUES(48,528,16016,48.047999999999995,1601.6,'wxyzabcde','onmlkji'); + INSERT INTO t2 VALUES(49,539,17017,49.04899999999999,1701.6999999999998,'xyzabcdef','onmlkji'); + INSERT INTO t2 VALUES(50,550,17017,50.05,1701.6999999999998,'yzabcdefg','onmlkji'); + INSERT INTO t2 VALUES(51,561,17017,51.050999999999995,1701.6999999999998,'zabcdefgh','onmlkji'); + INSERT INTO t2 VALUES(52,572,18018,52.05199999999999,1801.8,'abcdefghi','onmlkji'); + INSERT INTO t2 VALUES(53,583,18018,53.053,1801.8,'bcdefghij','nmlkjih'); + INSERT INTO t2 VALUES(54,594,18018,54.053999999999995,1801.8,'cdefghijk','nmlkjih'); + INSERT INTO t2 VALUES(55,605,19019,55.05499999999999,1901.8999999999999,'defghijkl','nmlkjih'); + INSERT INTO t2 VALUES(56,616,19019,56.056,1901.8999999999999,'efghijklm','nmlkjih'); + INSERT INTO t2 VALUES(57,627,19019,57.056999999999995,1901.8999999999999,'fghijklmn','nmlkjih'); + INSERT INTO t2 VALUES(58,638,20020,58.05799999999999,2002.0,'ghijklmno','mlkjihg'); + INSERT INTO t2 VALUES(59,649,20020,59.05899999999999,2002.0,'hijklmnop','mlkjihg'); + INSERT INTO t2 VALUES(60,660,20020,60.059999999999995,2002.0,'ijklmnopq','mlkjihg'); + INSERT INTO t2 VALUES(61,671,21021,61.06099999999999,2102.1,'jklmnopqr','mlkjihg'); + INSERT INTO t2 VALUES(62,682,21021,62.06199999999999,2102.1,'klmnopqrs','mlkjihg'); + INSERT INTO t2 VALUES(63,693,21021,63.062999999999995,2102.1,'lmnopqrst','lkjihgf'); + INSERT INTO t2 VALUES(64,704,22022,64.064,2202.2,'mnopqrstu','lkjihgf'); + INSERT INTO t2 VALUES(65,715,22022,65.065,2202.2,'nopqrstuv','lkjihgf'); + INSERT INTO t2 VALUES(66,726,22022,66.06599999999999,2202.2,'opqrstuvw','lkjihgf'); + INSERT INTO t2 VALUES(67,737,23023,67.067,2302.2999999999997,'pqrstuvwx','lkjihgf'); + INSERT INTO t2 VALUES(68,748,23023,68.068,2302.2999999999997,'qrstuvwxy','kjihgfe'); + INSERT INTO t2 VALUES(69,759,23023,69.06899999999999,2302.2999999999997,'rstuvwxyz','kjihgfe'); + INSERT INTO t2 VALUES(70,770,24024,70.07,2402.3999999999996,'stuvwxyza','kjihgfe'); + INSERT INTO t2 VALUES(71,781,24024,71.071,2402.3999999999996,'tuvwxyzab','kjihgfe'); + INSERT INTO t2 VALUES(72,792,24024,72.07199999999999,2402.3999999999996,'uvwxyzabc','kjihgfe'); + INSERT INTO t2 VALUES(73,803,25025,73.073,2502.5,'vwxyzabcd','jihgfed'); + INSERT INTO t2 VALUES(74,814,25025,74.074,2502.5,'wxyzabcde','jihgfed'); + INSERT INTO t2 VALUES(75,825,25025,75.07499999999999,2502.5,'xyzabcdef','jihgfed'); + INSERT INTO t2 VALUES(76,836,26026,76.076,2602.6,'yzabcdefg','jihgfed'); + INSERT INTO t2 VALUES(77,847,26026,77.077,2602.6,'zabcdefgh','jihgfed'); + INSERT INTO t2 VALUES(78,858,26026,78.07799999999999,2602.6,'abcdefghi','ihgfedc'); + INSERT INTO t2 VALUES(79,869,27027,79.079,2702.7,'bcdefghij','ihgfedc'); + INSERT INTO t2 VALUES(80,880,27027,80.07999999999998,2702.7,'cdefghijk','ihgfedc'); + INSERT INTO t2 VALUES(81,891,27027,81.08099999999999,2702.7,'defghijkl','ihgfedc'); + INSERT INTO t2 VALUES(82,902,28028,82.082,2802.7999999999997,'efghijklm','ihgfedc'); + INSERT INTO t2 VALUES(83,913,28028,83.08299999999998,2802.7999999999997,'fghijklmn','hgfedcb'); + INSERT INTO t2 VALUES(84,924,28028,84.08399999999999,2802.7999999999997,'ghijklmno','hgfedcb'); + INSERT INTO t2 VALUES(85,935,29029,85.085,2902.8999999999996,'hijklmnop','hgfedcb'); + INSERT INTO t2 VALUES(86,946,29029,86.08599999999998,2902.8999999999996,'ijklmnopq','hgfedcb'); + INSERT INTO t2 VALUES(87,957,29029,87.08699999999999,2902.8999999999996,'jklmnopqr','hgfedcb'); + INSERT INTO t2 VALUES(88,968,30030,88.088,3003.0,'klmnopqrs','gfedcba'); + INSERT INTO t2 VALUES(89,979,30030,89.08899999999998,3003.0,'lmnopqrst','gfedcba'); + INSERT INTO t2 VALUES(90,990,30030,90.08999999999999,3003.0,'mnopqrstu','gfedcba'); + INSERT INTO t2 VALUES(91,1001,31031,91.091,3103.1,'nopqrstuv','gfedcba'); + INSERT INTO t2 VALUES(92,1012,31031,92.09199999999998,3103.1,'opqrstuvw','gfedcba'); + INSERT INTO t2 VALUES(93,1023,31031,93.09299999999999,3103.1,'pqrstuvwx','fedcbaz'); + INSERT INTO t2 VALUES(94,1034,32032,94.094,3203.2,'qrstuvwxy','fedcbaz'); + INSERT INTO t2 VALUES(95,1045,32032,95.09499999999998,3203.2,'rstuvwxyz','fedcbaz'); + INSERT INTO t2 VALUES(96,1056,32032,96.09599999999999,3203.2,'stuvwxyza','fedcbaz'); + INSERT INTO t2 VALUES(97,1067,33033,97.097,3303.2999999999997,'tuvwxyzab','fedcbaz'); + INSERT INTO t2 VALUES(98,1078,33033,98.09799999999998,3303.2999999999997,'uvwxyzabc','edcbazy'); + INSERT INTO t2 VALUES(99,1089,33033,99.09899999999999,3303.2999999999997,'vwxyzabcd','edcbazy'); + INSERT INTO t2 VALUES(100,1100,34034,100.1,3403.3999999999996,'wxyzabcde','edcbazy'); + CREATE INDEX t2b ON t2(b); + CREATE INDEX t2c ON t2(c); + CREATE INDEX t2d ON t2(d); + CREATE INDEX t2e ON t2(e); + CREATE INDEX t2f ON t2(f); + CREATE INDEX t2g ON t2(g); + CREATE TABLE t3(a INTEGER PRIMARY KEY,b,c,d,e,f TEXT,g); + INSERT INTO t3 SELECT * FROM t2; + CREATE INDEX t3b ON t3(b,c); + CREATE INDEX t3c ON t3(c,e); + CREATE INDEX t3d ON t3(d,g); + CREATE INDEX t3e ON t3(e,f,g); + CREATE INDEX t3f ON t3(f,b,d,c); + CREATE INDEX t3g ON t3(g,f); + } +} {} + +do_test where7-2.2.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1070 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + } +} {6 18 20 32 39 58 84 89 96 100 scan 0 sort 0} +do_test where7-2.2.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1070 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + } +} {6 18 20 32 39 58 84 89 96 100 scan 0 sort 0} +do_test where7-2.3.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR b=220 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='qponmlk' AND f GLOB 'pqrst*') + } +} {20 33 35 41 47 67 69 70 98 scan 0 sort 0} +do_test where7-2.3.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR b=220 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='qponmlk' AND f GLOB 'pqrst*') + } +} {20 33 35 41 47 67 69 70 98 scan 0 sort 0} +do_test where7-2.4.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=190 + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR (g='rqponml' AND f GLOB 'hijkl*') + OR b=407 + } +} {33 37 49 51 scan 0 sort 0} +do_test where7-2.4.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=190 + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR (g='rqponml' AND f GLOB 'hijkl*') + OR b=407 + } +} {33 37 49 51 scan 0 sort 0} +do_test where7-2.5.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=795 + OR b=1103 + OR b=583 + } +} {13 39 53 65 91 scan 0 sort 0} +do_test where7-2.5.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=795 + OR b=1103 + OR b=583 + } +} {13 39 53 65 91 scan 0 sort 0} +do_test where7-2.6.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=74 + OR a=50 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR c=21021 + OR ((a BETWEEN 82 AND 84) AND a!=83) + } +} {16 18 50 61 62 63 74 82 84 85 scan 0 sort 0} +do_test where7-2.6.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=74 + OR a=50 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR c=21021 + OR ((a BETWEEN 82 AND 84) AND a!=83) + } +} {16 18 50 61 62 63 74 82 84 85 scan 0 sort 0} +do_test where7-2.7.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 8 AND 10) AND a!=9) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR c=14014 + OR b=828 + } +} {8 10 34 36 40 41 42 94 scan 0 sort 0} +do_test where7-2.7.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 8 AND 10) AND a!=9) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR c=14014 + OR b=828 + } +} {8 10 34 36 40 41 42 94 scan 0 sort 0} +do_test where7-2.8.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE 1000000=72.0 AND d<73.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR c=11011 + OR c=20020 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + } +} {18 31 32 33 58 59 60 72 74 scan 0 sort 0} +do_test where7-2.11.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=72.0 AND d<73.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR c=11011 + OR c=20020 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + } +} {18 31 32 33 58 59 60 72 74 scan 0 sort 0} +do_test where7-2.12.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=50.0 AND d<51.0 AND d NOT NULL) + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR b=792 + OR a=97 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR b=916 + OR a=69 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR c=6006 + } +} {16 17 18 31 50 69 72 81 83 87 97 scan 0 sort 0} +do_test where7-2.12.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=50.0 AND d<51.0 AND d NOT NULL) + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR b=792 + OR a=97 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR b=916 + OR a=69 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR c=6006 + } +} {16 17 18 31 50 69 72 81 83 87 97 scan 0 sort 0} +do_test where7-2.13.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 50 AND 52) AND a!=51) + OR c=9009 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=539 + OR b=297 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=957 + OR f='xyzabcdef' + OR b=619 + } +} {10 15 21 23 25 26 27 49 50 52 75 87 scan 0 sort 0} +do_test where7-2.13.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 50 AND 52) AND a!=51) + OR c=9009 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=539 + OR b=297 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=957 + OR f='xyzabcdef' + OR b=619 + } +} {10 15 21 23 25 26 27 49 50 52 75 87 scan 0 sort 0} +do_test where7-2.14.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + } +} {47 48 49 scan 0 sort 0} +do_test where7-2.14.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + } +} {47 48 49 scan 0 sort 0} +do_test where7-2.15.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=20 + OR a=67 + OR b=58 + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {19 20 21 67 scan 0 sort 0} +do_test where7-2.15.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=20 + OR a=67 + OR b=58 + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {19 20 21 67 scan 0 sort 0} +do_test where7-2.16.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=938 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {17 67 scan 0 sort 0} +do_test where7-2.16.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=938 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {17 67 scan 0 sort 0} +do_test where7-2.17.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=68.0 AND d<69.0 AND d NOT NULL) + OR f='zabcdefgh' + OR b=308 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR b=443 + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR f='uvwxyzabc' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {12 14 15 16 17 20 24 25 28 29 46 50 51 68 72 76 77 98 scan 0 sort 0} +do_test where7-2.17.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=68.0 AND d<69.0 AND d NOT NULL) + OR f='zabcdefgh' + OR b=308 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR b=443 + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR f='uvwxyzabc' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {12 14 15 16 17 20 24 25 28 29 46 50 51 68 72 76 77 98 scan 0 sort 0} +do_test where7-2.18.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=762 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=19 + } +} {19 46 56 scan 0 sort 0} +do_test where7-2.18.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=762 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=19 + } +} {19 46 56 scan 0 sort 0} +do_test where7-2.19.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=46 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR a=73 + OR c=20020 + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR b=267 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + } +} {2 4 20 46 58 59 60 63 68 70 73 scan 0 sort 0} +do_test where7-2.19.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=46 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR a=73 + OR c=20020 + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR b=267 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + } +} {2 4 20 46 58 59 60 63 68 70 73 scan 0 sort 0} +do_test where7-2.20.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 27 AND 29) AND a!=28) + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {27 29 91 scan 0 sort 0} +do_test where7-2.20.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 27 AND 29) AND a!=28) + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {27 29 91 scan 0 sort 0} +do_test where7-2.21.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=13013 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR f='bcdefghij' + OR b=586 + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR a=9 + } +} {1 6 9 27 37 38 39 53 55 58 59 61 75 79 87 89 98 scan 0 sort 0} +do_test where7-2.21.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=13013 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR f='bcdefghij' + OR b=586 + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR a=9 + } +} {1 6 9 27 37 38 39 53 55 58 59 61 75 79 87 89 98 scan 0 sort 0} +do_test where7-2.22.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=399 + OR c=28028 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + } +} {42 82 83 84 98 scan 0 sort 0} +do_test where7-2.22.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=399 + OR c=28028 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + } +} {42 82 83 84 98 scan 0 sort 0} +do_test where7-2.23.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR c=14014 + OR c=33033 + OR a=89 + OR b=770 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR a=35 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR b=253 + OR c=14014 + } +} {4 19 23 30 35 40 41 42 56 70 82 89 95 96 97 98 99 scan 0 sort 0} +do_test where7-2.23.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR c=14014 + OR c=33033 + OR a=89 + OR b=770 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR a=35 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR b=253 + OR c=14014 + } +} {4 19 23 30 35 40 41 42 56 70 82 89 95 96 97 98 99 scan 0 sort 0} +do_test where7-2.24.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=330 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR a=16 + } +} {6 16 21 30 32 34 scan 0 sort 0} +do_test where7-2.24.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=330 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR a=16 + } +} {6 16 21 30 32 34 scan 0 sort 0} +do_test where7-2.25.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=5005 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {2 13 14 15 36 38 47 scan 0 sort 0} +do_test where7-2.25.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=5005 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {2 13 14 15 36 38 47 scan 0 sort 0} +do_test where7-2.26.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=33 + } +} {30 33 58 64 66 68 scan 0 sort 0} +do_test where7-2.26.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=33 + } +} {30 33 58 64 66 68 scan 0 sort 0} +do_test where7-2.27.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1026 + OR b=410 + } +} { scan 0 sort 0} +do_test where7-2.27.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1026 + OR b=410 + } +} { scan 0 sort 0} +do_test where7-2.28.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=18018 + OR a=94 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=1012 + OR a=3 + OR d>1e10 + OR b=905 + OR b=1089 + } +} {3 15 26 41 52 53 54 67 92 93 94 99 scan 0 sort 0} +do_test where7-2.28.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=18018 + OR a=94 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=1012 + OR a=3 + OR d>1e10 + OR b=905 + OR b=1089 + } +} {3 15 26 41 52 53 54 67 92 93 94 99 scan 0 sort 0} +do_test where7-2.29.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=100 + OR c=11011 + OR b=297 + OR a=63 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR a=76 + OR b=1026 + OR a=26 + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR c=30030 + } +} {24 26 27 31 32 33 50 63 76 84 88 89 90 100 scan 0 sort 0} +do_test where7-2.29.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=100 + OR c=11011 + OR b=297 + OR a=63 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR a=76 + OR b=1026 + OR a=26 + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR c=30030 + } +} {24 26 27 31 32 33 50 63 76 84 88 89 90 100 scan 0 sort 0} +do_test where7-2.30.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=561 + OR b=1070 + OR a=59 + OR b=715 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + } +} {23 49 51 59 65 75 scan 0 sort 0} +do_test where7-2.30.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=561 + OR b=1070 + OR a=59 + OR b=715 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + } +} {23 49 51 59 65 75 scan 0 sort 0} +do_test where7-2.31.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=1056 + OR b=1012 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {19 26 52 57 59 67 69 78 92 95 96 scan 0 sort 0} +do_test where7-2.31.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=1056 + OR b=1012 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {19 26 52 57 59 67 69 78 92 95 96 scan 0 sort 0} +do_test where7-2.32.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='rstuvwxyz' + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + } +} {17 43 69 74 90 92 95 98 scan 0 sort 0} +do_test where7-2.32.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='rstuvwxyz' + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + } +} {17 43 69 74 90 92 95 98 scan 0 sort 0} +do_test where7-2.33.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR c=12012 + OR a=18 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + } +} {9 15 17 18 26 34 35 36 41 43 52 61 67 69 76 78 87 93 95 scan 0 sort 0} +do_test where7-2.33.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR c=12012 + OR a=18 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + } +} {9 15 17 18 26 34 35 36 41 43 52 61 67 69 76 78 87 93 95 scan 0 sort 0} +do_test where7-2.34.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=77 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + } +} {58 77 scan 0 sort 0} +do_test where7-2.34.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=77 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + } +} {58 77 scan 0 sort 0} +do_test where7-2.35.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=498 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR c=33033 + OR b=11 + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {1 7 9 11 27 67 69 88 97 98 99 scan 0 sort 0} +do_test where7-2.35.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=498 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR c=33033 + OR b=11 + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {1 7 9 11 27 67 69 88 97 98 99 scan 0 sort 0} +do_test where7-2.36.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=6.0 AND d<7.0 AND d NOT NULL) + OR ((a BETWEEN 58 AND 60) AND a!=59) + } +} {6 58 60 scan 0 sort 0} +do_test where7-2.36.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=6.0 AND d<7.0 AND d NOT NULL) + OR ((a BETWEEN 58 AND 60) AND a!=59) + } +} {6 58 60 scan 0 sort 0} +do_test where7-2.37.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1059 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR c=4004 + OR b=806 + } +} {10 11 12 43 scan 0 sort 0} +do_test where7-2.37.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1059 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR c=4004 + OR b=806 + } +} {10 11 12 43 scan 0 sort 0} +do_test where7-2.38.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=165 + OR b=201 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR a=32 + } +} {15 32 99 scan 0 sort 0} +do_test where7-2.38.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=165 + OR b=201 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR a=32 + } +} {15 32 99 scan 0 sort 0} +do_test where7-2.39.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + } +} {22 48 69 74 100 scan 0 sort 0} +do_test where7-2.39.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + } +} {22 48 69 74 100 scan 0 sort 0} +do_test where7-2.40.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=352 + OR b=278 + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=660 + OR a=18 + OR a=34 + OR b=132 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR c=18018 + } +} {2 12 18 28 30 32 34 52 53 54 60 80 89 90 92 scan 0 sort 0} +do_test where7-2.40.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=352 + OR b=278 + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=660 + OR a=18 + OR a=34 + OR b=132 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR c=18018 + } +} {2 12 18 28 30 32 34 52 53 54 60 80 89 90 92 scan 0 sort 0} +do_test where7-2.41.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 73 scan 0 sort 0} +do_test where7-2.41.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 73 scan 0 sort 0} +do_test where7-2.42.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR b=297 + OR b=113 + OR b=176 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=67 + OR c=26026 + } +} {3 14 16 21 27 29 55 67 75 76 77 78 81 83 scan 0 sort 0} +do_test where7-2.42.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR b=297 + OR b=113 + OR b=176 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=67 + OR c=26026 + } +} {3 14 16 21 27 29 55 67 75 76 77 78 81 83 scan 0 sort 0} +do_test where7-2.43.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=78.0 AND d<79.0 AND d NOT NULL) + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=83 + OR b=44 + OR b=1023 + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR b=1023 + OR f='ijklmnopq' + } +} {4 6 8 11 13 34 60 78 83 86 93 scan 0 sort 0} +do_test where7-2.43.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=78.0 AND d<79.0 AND d NOT NULL) + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=83 + OR b=44 + OR b=1023 + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR b=1023 + OR f='ijklmnopq' + } +} {4 6 8 11 13 34 60 78 83 86 93 scan 0 sort 0} +do_test where7-2.44.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=935 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=487 + OR b=619 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {17 32 34 39 42 85 scan 0 sort 0} +do_test where7-2.44.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=935 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=487 + OR b=619 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {17 32 34 39 42 85 scan 0 sort 0} +do_test where7-2.45.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=68.0 AND d<69.0 AND d NOT NULL) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=938 + OR b=641 + OR c=17017 + OR a=82 + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + } +} {37 39 49 50 51 56 58 65 68 82 94 scan 0 sort 0} +do_test where7-2.45.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=68.0 AND d<69.0 AND d NOT NULL) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=938 + OR b=641 + OR c=17017 + OR a=82 + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + } +} {37 39 49 50 51 56 58 65 68 82 94 scan 0 sort 0} +do_test where7-2.46.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'bcdef*') + OR c=22022 + } +} {64 65 66 79 scan 0 sort 0} +do_test where7-2.46.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'bcdef*') + OR c=22022 + } +} {64 65 66 79 scan 0 sort 0} +do_test where7-2.47.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=7007 + OR b=91 + OR b=212 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR c=28028 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + } +} {19 20 21 65 82 83 84 scan 0 sort 0} +do_test where7-2.47.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=7007 + OR b=91 + OR b=212 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR c=28028 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + } +} {19 20 21 65 82 83 84 scan 0 sort 0} +do_test where7-2.48.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR a=51 + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {12 28 30 51 scan 0 sort 0} +do_test where7-2.48.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR a=51 + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {12 28 30 51 scan 0 sort 0} +do_test where7-2.49.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR c=4004 + OR b=322 + OR c=13013 + OR a=6 + } +} {2 6 9 10 11 12 23 37 38 39 scan 0 sort 0} +do_test where7-2.49.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR c=4004 + OR b=322 + OR c=13013 + OR a=6 + } +} {2 6 9 10 11 12 23 37 38 39 scan 0 sort 0} +do_test where7-2.50.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=297 + OR b=143 + OR a=46 + OR b=660 + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR b=355 + OR a=93 + OR b=297 + } +} {13 17 23 27 41 46 49 60 75 93 scan 0 sort 0} +do_test where7-2.50.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=297 + OR b=143 + OR a=46 + OR b=660 + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR b=355 + OR a=93 + OR b=297 + } +} {13 17 23 27 41 46 49 60 75 93 scan 0 sort 0} +do_test where7-2.51.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=190 + OR a=62 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + } +} {62 99 scan 0 sort 0} +do_test where7-2.51.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=190 + OR a=62 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + } +} {62 99 scan 0 sort 0} +do_test where7-2.52.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1089 + OR b=102 + OR a=6 + OR b=608 + } +} {6 99 scan 0 sort 0} +do_test where7-2.52.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1089 + OR b=102 + OR a=6 + OR b=608 + } +} {6 99 scan 0 sort 0} +do_test where7-2.53.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=473 + OR b=1100 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=20 + OR b=1089 + OR b=330 + OR b=124 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {15 20 30 43 53 56 58 99 100 scan 0 sort 0} +do_test where7-2.53.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=473 + OR b=1100 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=20 + OR b=1089 + OR b=330 + OR b=124 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {15 20 30 43 53 56 58 99 100 scan 0 sort 0} +do_test where7-2.54.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR b=223 + OR a=12 + OR b=1048 + OR b=256 + OR a=72 + OR c>=34035 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=674 + OR a=22 + } +} {12 22 35 68 70 72 scan 0 sort 0} +do_test where7-2.54.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR b=223 + OR a=12 + OR b=1048 + OR b=256 + OR a=72 + OR c>=34035 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=674 + OR a=22 + } +} {12 22 35 68 70 72 scan 0 sort 0} +do_test where7-2.55.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='yzabcdefg' + OR c=14014 + OR a=1 + OR a=9 + OR b=960 + } +} {1 9 24 40 41 42 50 76 78 scan 0 sort 0} +do_test where7-2.55.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='yzabcdefg' + OR c=14014 + OR a=1 + OR a=9 + OR b=960 + } +} {1 9 24 40 41 42 50 76 78 scan 0 sort 0} +do_test where7-2.56.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (g='onmlkji' AND f GLOB 'xyzab*') + } +} {19 49 96 98 scan 0 sort 0} +do_test where7-2.56.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (g='onmlkji' AND f GLOB 'xyzab*') + } +} {19 49 96 98 scan 0 sort 0} +do_test where7-2.57.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=748 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=630 + } +} {9 20 67 68 scan 0 sort 0} +do_test where7-2.57.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=748 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=630 + } +} {9 20 67 68 scan 0 sort 0} +do_test where7-2.58.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=223 + OR b=267 + OR a=40 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR c<=10 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR b=528 + } +} {40 48 55 57 69 71 scan 0 sort 0} +do_test where7-2.58.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=223 + OR b=267 + OR a=40 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR c<=10 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR b=528 + } +} {40 48 55 57 69 71 scan 0 sort 0} +do_test where7-2.59.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='rstuvwxyz' + OR a=41 + OR b=462 + OR a=68 + OR a=84 + OR a=69 + } +} {17 41 42 43 68 69 84 95 scan 0 sort 0} +do_test where7-2.59.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='rstuvwxyz' + OR a=41 + OR b=462 + OR a=68 + OR a=84 + OR a=69 + } +} {17 41 42 43 68 69 84 95 scan 0 sort 0} +do_test where7-2.60.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=979 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (g='vutsrqp' AND f GLOB 'nopqr*') + } +} {3 5 13 89 scan 0 sort 0} +do_test where7-2.60.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=979 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (g='vutsrqp' AND f GLOB 'nopqr*') + } +} {3 5 13 89 scan 0 sort 0} +do_test where7-2.61.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=9.0 AND d<10.0 AND d NOT NULL) + OR a=8 + OR a=62 + OR b=726 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {8 9 10 14 50 52 59 61 62 66 scan 0 sort 0} +do_test where7-2.61.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=9.0 AND d<10.0 AND d NOT NULL) + OR a=8 + OR a=62 + OR b=726 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {8 9 10 14 50 52 59 61 62 66 scan 0 sort 0} +do_test where7-2.62.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=495 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR b=924 + OR c=11011 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=231 + OR b=872 + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {18 20 21 31 32 33 45 47 73 76 84 99 scan 0 sort 0} +do_test where7-2.62.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=495 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR b=924 + OR c=11011 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=231 + OR b=872 + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {18 20 21 31 32 33 45 47 73 76 84 99 scan 0 sort 0} +do_test where7-2.63.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=24 + OR b=473 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR b=509 + OR b=924 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {21 24 43 84 86 96 scan 0 sort 0} +do_test where7-2.63.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=24 + OR b=473 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR b=509 + OR b=924 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {21 24 43 84 86 96 scan 0 sort 0} +do_test where7-2.64.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR b=363 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {2 5 8 23 25 28 33 34 54 56 58 60 80 86 93 100 scan 0 sort 0} +do_test where7-2.64.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR b=363 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {2 5 8 23 25 28 33 34 54 56 58 60 80 86 93 100 scan 0 sort 0} +do_test where7-2.65.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=42 + OR e IS NULL + OR b=495 + OR 1000000=65.0 AND d<66.0 AND d NOT NULL) + } +} {20 42 45 46 65 69 72 85 98 scan 0 sort 0} +do_test where7-2.65.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=42 + OR e IS NULL + OR b=495 + OR 1000000=65.0 AND d<66.0 AND d NOT NULL) + } +} {20 42 45 46 65 69 72 85 98 scan 0 sort 0} +do_test where7-2.66.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=17017 + OR f='ijklmnopq' + OR a=39 + } +} {8 34 39 49 50 51 60 86 scan 0 sort 0} +do_test where7-2.66.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=17017 + OR f='ijklmnopq' + OR a=39 + } +} {8 34 39 49 50 51 60 86 scan 0 sort 0} +do_test where7-2.67.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c>=34035 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=91 + } +} {11 19 27 37 63 89 91 96 98 100 scan 0 sort 0} +do_test where7-2.67.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c>=34035 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=91 + } +} {11 19 27 37 63 89 91 96 98 100 scan 0 sort 0} +do_test where7-2.68.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=649 + OR b=231 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=58 + } +} {9 21 28 29 35 48 59 61 87 91 scan 0 sort 0} +do_test where7-2.68.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=649 + OR b=231 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=58 + } +} {9 21 28 29 35 48 59 61 87 91 scan 0 sort 0} +do_test where7-2.69.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=979 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + } +} {78 89 scan 0 sort 0} +do_test where7-2.69.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=979 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + } +} {78 89 scan 0 sort 0} +do_test where7-2.70.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=825 + OR b=1004 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {56 58 60 62 75 scan 0 sort 0} +do_test where7-2.70.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=825 + OR b=1004 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {56 58 60 62 75 scan 0 sort 0} +do_test where7-2.71.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=65 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR c=22022 + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR b=671 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=91 + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR b=1004 + OR b=960 + } +} {5 31 47 49 51 57 61 64 65 66 83 91 98 scan 0 sort 0} +do_test where7-2.71.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=65 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR c=22022 + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR b=671 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=91 + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR b=1004 + OR b=960 + } +} {5 31 47 49 51 57 61 64 65 66 83 91 98 scan 0 sort 0} +do_test where7-2.72.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=762 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {56 58 93 scan 0 sort 0} +do_test where7-2.72.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=762 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {56 58 93 scan 0 sort 0} +do_test where7-2.73.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=11.0 AND d<12.0 AND d NOT NULL) + OR a=14 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR b=212 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + } +} {11 14 23 54 78 85 scan 0 sort 0} +do_test where7-2.73.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=11.0 AND d<12.0 AND d NOT NULL) + OR a=14 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR b=212 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + } +} {11 14 23 54 78 85 scan 0 sort 0} +do_test where7-2.74.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'bcdef*') + OR b=168 + OR b=25 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {66 79 89 scan 0 sort 0} +do_test where7-2.74.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'bcdef*') + OR b=168 + OR b=25 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {66 79 89 scan 0 sort 0} +do_test where7-2.75.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=28028 + OR f='jklmnopqr' + OR b=1015 + } +} {9 35 61 82 83 84 87 scan 0 sort 0} +do_test where7-2.75.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=28028 + OR f='jklmnopqr' + OR b=1015 + } +} {9 35 61 82 83 84 87 scan 0 sort 0} +do_test where7-2.76.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=31031 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=49 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'klmno*') + } +} {15 41 49 56 62 67 87 89 91 92 93 100 scan 0 sort 0} +do_test where7-2.76.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=31031 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=49 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'klmno*') + } +} {15 41 49 56 62 67 87 89 91 92 93 100 scan 0 sort 0} +do_test where7-2.77.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=80 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=971 + OR a=60 + } +} {4 6 25 29 60 80 scan 0 sort 0} +do_test where7-2.77.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=80 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=971 + OR a=60 + } +} {4 6 25 29 60 80 scan 0 sort 0} +do_test where7-2.78.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR b=1089 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {30 32 43 85 89 99 scan 0 sort 0} +do_test where7-2.78.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR b=1089 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {30 32 43 85 89 99 scan 0 sort 0} +do_test where7-2.79.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=399 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR a=10 + OR b=1026 + } +} {9 10 11 57 90 scan 0 sort 0} +do_test where7-2.79.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=399 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR a=10 + OR b=1026 + } +} {9 10 11 57 90 scan 0 sort 0} +do_test where7-2.80.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'yzabc*') + OR b=465 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 43 65 76 scan 0 sort 0} +do_test where7-2.80.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'yzabc*') + OR b=465 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 43 65 76 scan 0 sort 0} +do_test where7-2.81.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=25 + OR b=792 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + } +} {19 25 45 71 72 97 scan 0 sort 0} +do_test where7-2.81.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=25 + OR b=792 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + } +} {19 25 45 71 72 97 scan 0 sort 0} +do_test where7-2.82.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=979 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=13 + OR a=15 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR a=27 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR a=32 + OR a=39 + } +} {6 8 13 15 21 27 32 39 67 89 98 100 scan 0 sort 0} +do_test where7-2.82.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=979 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=13 + OR a=15 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR a=27 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR a=32 + OR a=39 + } +} {6 8 13 15 21 27 32 39 67 89 98 100 scan 0 sort 0} +do_test where7-2.83.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='hijklmnop' + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR b=528 + OR c=30030 + OR (g='qponmlk' AND f GLOB 'qrstu*') + } +} {1 7 21 31 33 42 48 58 59 77 79 85 88 89 90 scan 0 sort 0} +do_test where7-2.83.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='hijklmnop' + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR b=528 + OR c=30030 + OR (g='qponmlk' AND f GLOB 'qrstu*') + } +} {1 7 21 31 33 42 48 58 59 77 79 85 88 89 90 scan 0 sort 0} +do_test where7-2.84.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=69 + OR e IS NULL + OR b=352 + OR 1000000=16.0 AND d<17.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR c=33033 + } +} {1 2 3 8 9 14 16 78 85 86 97 98 99 scan 0 sort 0} +do_test where7-2.87.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 14 AND 16) AND a!=15) + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR c=1001 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR c=33033 + } +} {1 2 3 8 9 14 16 78 85 86 97 98 99 scan 0 sort 0} +do_test where7-2.88.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=311 + OR b=1103 + OR b=88 + } +} {8 scan 0 sort 0} +do_test where7-2.88.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=311 + OR b=1103 + OR b=88 + } +} {8 scan 0 sort 0} +do_test where7-2.89.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 65 AND 67) AND a!=66) + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR c=5005 + OR b=1045 + OR c=8008 + OR f='bcdefghij' + } +} {1 13 14 15 22 23 24 26 27 28 53 65 67 79 95 scan 0 sort 0} +do_test where7-2.89.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 65 AND 67) AND a!=66) + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR c=5005 + OR b=1045 + OR c=8008 + OR f='bcdefghij' + } +} {1 13 14 15 22 23 24 26 27 28 53 65 67 79 95 scan 0 sort 0} +do_test where7-2.90.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=66 + OR b=553 + OR a=64 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR a=62 + OR b=1081 + OR b=770 + OR b=762 + OR b=803 + OR (g='srqponm' AND f GLOB 'efghi*') + } +} {6 17 30 62 64 70 73 93 scan 0 sort 0} +do_test where7-2.90.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=66 + OR b=553 + OR a=64 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR a=62 + OR b=1081 + OR b=770 + OR b=762 + OR b=803 + OR (g='srqponm' AND f GLOB 'efghi*') + } +} {6 17 30 62 64 70 73 93 scan 0 sort 0} +do_test where7-2.91.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'klmno*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR c=17017 + OR b=168 + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {10 19 45 49 50 51 71 77 79 97 scan 0 sort 0} +do_test where7-2.91.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'klmno*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR c=17017 + OR b=168 + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {10 19 45 49 50 51 71 77 79 97 scan 0 sort 0} +do_test where7-2.92.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=34034 + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR a=44 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=31031 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR b=619 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + } +} {3 12 23 29 31 44 55 68 78 81 91 92 93 100 scan 0 sort 0} +do_test where7-2.92.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=34034 + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR a=44 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=31031 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR b=619 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + } +} {3 12 23 29 31 44 55 68 78 81 91 92 93 100 scan 0 sort 0} +do_test where7-2.93.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=48 + OR c=15015 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=110 + OR f='klmnopqrs' + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (g='onmlkji' AND f GLOB 'abcde*') + OR b=674 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + } +} {10 36 43 44 45 48 52 62 65 67 88 94 96 97 99 scan 0 sort 0} +do_test where7-2.93.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=48 + OR c=15015 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=110 + OR f='klmnopqrs' + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (g='onmlkji' AND f GLOB 'abcde*') + OR b=674 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + } +} {10 36 43 44 45 48 52 62 65 67 88 94 96 97 99 scan 0 sort 0} +do_test where7-2.94.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=72 + OR b=913 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=121 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + } +} {2 11 28 72 83 scan 0 sort 0} +do_test where7-2.94.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=72 + OR b=913 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=121 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + } +} {2 11 28 72 83 scan 0 sort 0} +do_test where7-2.95.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=18 + OR b=286 + OR b=1015 + OR a=49 + OR b=264 + } +} {18 24 26 49 scan 0 sort 0} +do_test where7-2.95.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=18 + OR b=286 + OR b=1015 + OR a=49 + OR b=264 + } +} {18 24 26 49 scan 0 sort 0} +do_test where7-2.96.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=69 + OR a=11 + OR c=1001 + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR a=57 + OR ((a BETWEEN 48 AND 50) AND a!=49) + } +} {1 2 3 11 48 50 54 56 57 scan 0 sort 0} +do_test where7-2.96.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=69 + OR a=11 + OR c=1001 + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR a=57 + OR ((a BETWEEN 48 AND 50) AND a!=49) + } +} {1 2 3 11 48 50 54 56 57 scan 0 sort 0} +do_test where7-2.97.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=231 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {21 84 scan 0 sort 0} +do_test where7-2.97.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=231 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {21 84 scan 0 sort 0} +do_test where7-2.98.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=25 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR b=289 + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {3 5 17 23 81 83 85 87 scan 0 sort 0} +do_test where7-2.98.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=25 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR b=289 + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {3 5 17 23 81 83 85 87 scan 0 sort 0} +do_test where7-2.99.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='defghijkl' + OR b=465 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR c=9009 + OR b=990 + OR b=132 + OR a=35 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR a=81 + OR ((a BETWEEN 71 AND 73) AND a!=72) + } +} {3 12 25 26 27 29 35 46 55 71 73 78 81 90 scan 0 sort 0} +do_test where7-2.99.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='defghijkl' + OR b=465 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR c=9009 + OR b=990 + OR b=132 + OR a=35 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR a=81 + OR ((a BETWEEN 71 AND 73) AND a!=72) + } +} {3 12 25 26 27 29 35 46 55 71 73 78 81 90 scan 0 sort 0} +do_test where7-2.100.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=26026 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR f='lmnopqrst' + OR a=6 + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {6 9 11 37 59 61 63 76 77 78 89 scan 0 sort 0} +do_test where7-2.100.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=26026 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR f='lmnopqrst' + OR a=6 + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {6 9 11 37 59 61 63 76 77 78 89 scan 0 sort 0} +do_test where7-2.101.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 98 AND 100) AND a!=99) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + } +} {7 98 100 scan 0 sort 0} +do_test where7-2.101.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 98 AND 100) AND a!=99) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + } +} {7 98 100 scan 0 sort 0} +do_test where7-2.102.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=11011 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=630 + OR c=19019 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR a=24 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {24 31 32 33 51 53 55 56 57 89 95 scan 0 sort 0} +do_test where7-2.102.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=11011 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=630 + OR c=19019 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR a=24 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {24 31 32 33 51 53 55 56 57 89 95 scan 0 sort 0} +do_test where7-2.103.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 91 AND 93) AND a!=92) + OR b=993 + OR a=81 + OR b=366 + OR b=69 + } +} {81 91 93 scan 0 sort 0} +do_test where7-2.103.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 91 AND 93) AND a!=92) + OR b=993 + OR a=81 + OR b=366 + OR b=69 + } +} {81 91 93 scan 0 sort 0} +do_test where7-2.104.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='stuvwxyza' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR b=1037 + OR f='zabcdefgh' + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {1 3 18 24 25 44 50 51 70 76 77 90 96 scan 0 sort 0} +do_test where7-2.104.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='stuvwxyza' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR b=1037 + OR f='zabcdefgh' + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {1 3 18 24 25 44 50 51 70 76 77 90 96 scan 0 sort 0} +do_test where7-2.105.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR ((a BETWEEN 30 AND 32) AND a!=31) + } +} {4 6 30 32 scan 0 sort 0} +do_test where7-2.105.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR ((a BETWEEN 30 AND 32) AND a!=31) + } +} {4 6 30 32 scan 0 sort 0} +do_test where7-2.106.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=847 + OR b=190 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=704 + } +} {9 23 35 38 40 61 64 70 72 77 87 scan 0 sort 0} +do_test where7-2.106.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=847 + OR b=190 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=704 + } +} {9 23 35 38 40 61 64 70 72 77 87 scan 0 sort 0} +do_test where7-2.107.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=88 + OR f='vwxyzabcd' + OR f='fghijklmn' + OR (g='gfedcba' AND f GLOB 'lmnop*') + } +} {5 8 21 31 47 57 73 83 89 99 scan 0 sort 0} +do_test where7-2.107.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=88 + OR f='vwxyzabcd' + OR f='fghijklmn' + OR (g='gfedcba' AND f GLOB 'lmnop*') + } +} {5 8 21 31 47 57 73 83 89 99 scan 0 sort 0} +do_test where7-2.108.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=498 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR a=1 + } +} {1 69 scan 0 sort 0} +do_test where7-2.108.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=498 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR a=1 + } +} {1 69 scan 0 sort 0} +do_test where7-2.109.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR a=5 + OR b=179 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR a=69 + } +} {5 17 43 47 49 69 95 scan 0 sort 0} +do_test where7-2.109.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR a=5 + OR b=179 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR a=69 + } +} {5 17 43 47 49 69 95 scan 0 sort 0} +do_test where7-2.110.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=971 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=828 + OR a=81 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=627 + OR b=355 + OR b=377 + OR a=44 + } +} {1 7 23 25 44 57 81 scan 0 sort 0} +do_test where7-2.110.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=971 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=828 + OR a=81 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=627 + OR b=355 + OR b=377 + OR a=44 + } +} {1 7 23 25 44 57 81 scan 0 sort 0} +do_test where7-2.111.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=850 + OR ((a BETWEEN 6 AND 8) AND a!=7) + } +} {6 8 scan 0 sort 0} +do_test where7-2.111.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=850 + OR ((a BETWEEN 6 AND 8) AND a!=7) + } +} {6 8 scan 0 sort 0} +do_test where7-2.112.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'opqrs*') + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {17 43 66 69 95 scan 0 sort 0} +do_test where7-2.112.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'opqrs*') + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {17 43 66 69 95 scan 0 sort 0} +do_test where7-2.113.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=726 + OR b=740 + OR a=33 + OR c=8008 + OR f='rstuvwxyz' + OR b=168 + } +} {17 22 23 24 33 43 66 69 95 scan 0 sort 0} +do_test where7-2.113.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=726 + OR b=740 + OR a=33 + OR c=8008 + OR f='rstuvwxyz' + OR b=168 + } +} {17 22 23 24 33 43 66 69 95 scan 0 sort 0} +do_test where7-2.114.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=396 + } +} {17 19 36 scan 0 sort 0} +do_test where7-2.114.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=396 + } +} {17 19 36 scan 0 sort 0} +do_test where7-2.115.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=77 + OR ((a BETWEEN 48 AND 50) AND a!=49) + OR c<=10 + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 48 50 77 scan 0 sort 0} +do_test where7-2.115.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=77 + OR ((a BETWEEN 48 AND 50) AND a!=49) + OR c<=10 + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 48 50 77 scan 0 sort 0} +do_test where7-2.116.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=253 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=396 + OR b=630 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR c=3003 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {1 7 8 9 12 13 14 15 20 22 23 27 36 49 53 79 scan 0 sort 0} +do_test where7-2.116.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=253 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=396 + OR b=630 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR c=3003 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {1 7 8 9 12 13 14 15 20 22 23 27 36 49 53 79 scan 0 sort 0} +do_test where7-2.117.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=957 + OR b=242 + OR b=113 + OR b=957 + OR b=311 + OR b=143 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + } +} {9 10 13 22 35 48 61 87 scan 0 sort 0} +do_test where7-2.117.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=957 + OR b=242 + OR b=113 + OR b=957 + OR b=311 + OR b=143 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + } +} {9 10 13 22 35 48 61 87 scan 0 sort 0} +do_test where7-2.118.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 94 AND 96) AND a!=95) + OR b=451 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {41 66 74 76 94 96 scan 0 sort 0} +do_test where7-2.118.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 94 AND 96) AND a!=95) + OR b=451 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {41 66 74 76 94 96 scan 0 sort 0} +do_test where7-2.119.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR b=451 + OR b=363 + OR b=330 + OR (g='srqponm' AND f GLOB 'efghi*') + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR (g='gfedcba' AND f GLOB 'lmnop*') + } +} {3 30 33 41 52 54 81 83 89 scan 0 sort 0} +do_test where7-2.119.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR b=451 + OR b=363 + OR b=330 + OR (g='srqponm' AND f GLOB 'efghi*') + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR (g='gfedcba' AND f GLOB 'lmnop*') + } +} {3 30 33 41 52 54 81 83 89 scan 0 sort 0} +do_test where7-2.120.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR e IS NULL + OR b=759 + } +} {15 68 69 95 scan 0 sort 0} +do_test where7-2.120.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR e IS NULL + OR b=759 + } +} {15 68 69 95 scan 0 sort 0} +do_test where7-2.121.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {19 21 45 71 74 97 scan 0 sort 0} +do_test where7-2.121.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {19 21 45 71 74 97 scan 0 sort 0} +do_test where7-2.122.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1037 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR ((a BETWEEN 75 AND 77) AND a!=76) + } +} {27 43 45 47 75 77 82 scan 0 sort 0} +do_test where7-2.122.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1037 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR ((a BETWEEN 75 AND 77) AND a!=76) + } +} {27 43 45 47 75 77 82 scan 0 sort 0} +do_test where7-2.123.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1045 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR c=12012 + } +} {34 35 36 37 38 39 95 scan 0 sort 0} +do_test where7-2.123.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1045 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR c=12012 + } +} {34 35 36 37 38 39 95 scan 0 sort 0} +do_test where7-2.124.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'tuvwx*') + OR b=421 + OR b=429 + OR b=498 + OR b=33 + OR b=198 + OR c=14014 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + } +} {3 18 23 39 40 41 42 49 75 97 scan 0 sort 0} +do_test where7-2.124.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'tuvwx*') + OR b=421 + OR b=429 + OR b=498 + OR b=33 + OR b=198 + OR c=14014 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + } +} {3 18 23 39 40 41 42 49 75 97 scan 0 sort 0} +do_test where7-2.125.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=47 + OR c=31031 + OR a=38 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (g='srqponm' AND f GLOB 'fghij*') + OR b=242 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR b=352 + OR a=49 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {8 22 31 32 34 38 49 57 60 70 86 91 92 93 scan 0 sort 0} +do_test where7-2.125.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=47 + OR c=31031 + OR a=38 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (g='srqponm' AND f GLOB 'fghij*') + OR b=242 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR b=352 + OR a=49 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {8 22 31 32 34 38 49 57 60 70 86 91 92 93 scan 0 sort 0} +do_test where7-2.126.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=704 + OR a=7 + OR a=8 + OR a=46 + OR b=740 + OR b=993 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {7 8 38 46 64 87 scan 0 sort 0} +do_test where7-2.126.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=704 + OR a=7 + OR a=8 + OR a=46 + OR b=740 + OR b=993 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {7 8 38 46 64 87 scan 0 sort 0} +do_test where7-2.127.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 62 AND 64) AND a!=63) + OR c=32032 + OR a=76 + } +} {62 64 76 94 95 96 scan 0 sort 0} +do_test where7-2.127.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 62 AND 64) AND a!=63) + OR c=32032 + OR a=76 + } +} {62 64 76 94 95 96 scan 0 sort 0} +do_test where7-2.128.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=528 + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {19 48 91 scan 0 sort 0} +do_test where7-2.128.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=528 + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {19 48 91 scan 0 sort 0} +do_test where7-2.129.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=65 + } +} {26 65 97 scan 0 sort 0} +do_test where7-2.129.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=65 + } +} {26 65 97 scan 0 sort 0} +do_test where7-2.130.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=71.0 AND d<72.0 AND d NOT NULL) + OR 1000000=50.0 AND d<51.0 AND d NOT NULL) + OR a=24 + } +} {2 24 50 71 scan 0 sort 0} +do_test where7-2.130.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=71.0 AND d<72.0 AND d NOT NULL) + OR 1000000=50.0 AND d<51.0 AND d NOT NULL) + OR a=24 + } +} {2 24 50 71 scan 0 sort 0} +do_test where7-2.131.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=60 + OR a=39 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=36 + OR b=814 + OR a=14 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=440 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + } +} {5 14 25 39 40 51 60 61 74 77 93 95 scan 0 sort 0} +do_test where7-2.131.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=60 + OR a=39 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=36 + OR b=814 + OR a=14 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=440 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + } +} {5 14 25 39 40 51 60 61 74 77 93 95 scan 0 sort 0} +do_test where7-2.132.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f IS NULL + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {39 41 scan 0 sort 0} +do_test where7-2.132.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f IS NULL + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {39 41 scan 0 sort 0} +do_test where7-2.133.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=44 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {4 17 19 scan 0 sort 0} +do_test where7-2.133.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=44 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {4 17 19 scan 0 sort 0} +do_test where7-2.134.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=80.0 AND d<81.0 AND d NOT NULL) + OR a=82 + } +} {80 82 scan 0 sort 0} +do_test where7-2.134.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=80.0 AND d<81.0 AND d NOT NULL) + OR a=82 + } +} {80 82 scan 0 sort 0} +do_test where7-2.135.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR c=24024 + OR b=946 + OR a=19 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + } +} {19 47 70 71 72 84 86 scan 0 sort 0} +do_test where7-2.135.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR c=24024 + OR b=946 + OR a=19 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + } +} {19 47 70 71 72 84 86 scan 0 sort 0} +do_test where7-2.136.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=27 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=1045 + OR a=84 + OR f='qrstuvwxy' + } +} {16 19 27 42 45 68 71 82 84 89 91 94 95 97 scan 0 sort 0} +do_test where7-2.136.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=27 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=1045 + OR a=84 + OR f='qrstuvwxy' + } +} {16 19 27 42 45 68 71 82 84 89 91 94 95 97 scan 0 sort 0} +do_test where7-2.137.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=704 + OR b=949 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR c=24024 + OR b=553 + OR a=18 + OR a=92 + } +} {18 22 64 70 71 72 92 scan 0 sort 0} +do_test where7-2.137.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=704 + OR b=949 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR c=24024 + OR b=553 + OR a=18 + OR a=92 + } +} {18 22 64 70 71 72 92 scan 0 sort 0} +do_test where7-2.138.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=902 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=25 + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR f='zabcdefgh' + OR b=385 + } +} {1 16 18 25 27 35 51 53 61 77 79 82 scan 0 sort 0} +do_test where7-2.138.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=902 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=25 + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR f='zabcdefgh' + OR b=385 + } +} {1 16 18 25 27 35 51 53 61 77 79 82 scan 0 sort 0} +do_test where7-2.139.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=22 + OR b=36 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR ((a BETWEEN 81 AND 83) AND a!=82) + } +} {22 31 57 59 81 83 scan 0 sort 0} +do_test where7-2.139.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=22 + OR b=36 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR ((a BETWEEN 81 AND 83) AND a!=82) + } +} {22 31 57 59 81 83 scan 0 sort 0} +do_test where7-2.140.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=253 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {23 60 scan 0 sort 0} +do_test where7-2.140.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=253 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {23 60 scan 0 sort 0} +do_test where7-2.141.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=641 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {1 15 27 36 38 41 53 67 79 93 scan 0 sort 0} +do_test where7-2.141.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=641 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {1 15 27 36 38 41 53 67 79 93 scan 0 sort 0} +do_test where7-2.142.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=30030 + OR a=18 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=11 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR a=52 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=13 + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + } +} {1 13 18 22 40 44 46 52 65 88 89 90 scan 0 sort 0} +do_test where7-2.142.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=30030 + OR a=18 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=11 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR a=52 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=13 + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + } +} {1 13 18 22 40 44 46 52 65 88 89 90 scan 0 sort 0} +do_test where7-2.143.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=23023 + OR f='efghijklm' + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=1045 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {4 20 24 30 39 41 46 50 56 67 68 69 72 76 82 95 98 scan 0 sort 0} +do_test where7-2.143.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=23023 + OR f='efghijklm' + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=1045 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {4 20 24 30 39 41 46 50 56 67 68 69 72 76 82 95 98 scan 0 sort 0} +do_test where7-2.144.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=476 + OR a=11 + OR a=52 + OR b=858 + OR b=264 + OR f='wxyzabcde' + OR c=18018 + OR b=597 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {11 22 24 48 52 53 54 69 74 78 100 scan 0 sort 0} +do_test where7-2.144.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=476 + OR a=11 + OR a=52 + OR b=858 + OR b=264 + OR f='wxyzabcde' + OR c=18018 + OR b=597 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {11 22 24 48 52 53 54 69 74 78 100 scan 0 sort 0} +do_test where7-2.145.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=91 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR b=102 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR b=784 + } +} {12 21 22 36 59 61 85 89 91 scan 0 sort 0} +do_test where7-2.145.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=91 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR b=102 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR b=784 + } +} {12 21 22 36 59 61 85 89 91 scan 0 sort 0} +do_test where7-2.146.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='vutsrqp' AND f GLOB 'opqrs*') + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR b=990 + OR a=52 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {14 38 52 90 91 scan 0 sort 0} +do_test where7-2.146.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='vutsrqp' AND f GLOB 'opqrs*') + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR b=990 + OR a=52 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {14 38 52 90 91 scan 0 sort 0} +do_test where7-2.147.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=22022 + OR b=960 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=48 + OR b=729 + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR a=44 + OR b=773 + } +} {41 43 44 45 48 64 65 66 scan 0 sort 0} +do_test where7-2.147.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=22022 + OR b=960 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=48 + OR b=729 + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR a=44 + OR b=773 + } +} {41 43 44 45 48 64 65 66 scan 0 sort 0} +do_test where7-2.148.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR b=421 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR c=22022 + OR b=825 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + } +} {2 6 17 19 22 24 29 32 58 64 65 66 68 70 75 84 89 scan 0 sort 0} +do_test where7-2.148.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR b=421 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR c=22022 + OR b=825 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + } +} {2 6 17 19 22 24 29 32 58 64 65 66 68 70 75 84 89 scan 0 sort 0} +do_test where7-2.149.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=484 + OR b=1026 + OR a=90 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=608 + OR a=32 + } +} {32 44 74 90 scan 0 sort 0} +do_test where7-2.149.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=484 + OR b=1026 + OR a=90 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=608 + OR a=32 + } +} {32 44 74 90 scan 0 sort 0} +do_test where7-2.150.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c<=10 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR b=154 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=880 + OR a=55 + OR b=773 + OR b=319 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {14 29 55 76 77 80 83 scan 0 sort 0} +do_test where7-2.150.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c<=10 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR b=154 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=880 + OR a=55 + OR b=773 + OR b=319 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {14 29 55 76 77 80 83 scan 0 sort 0} +do_test where7-2.151.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'ijklm*') + OR f='mnopqrstu' + OR a=62 + } +} {8 12 38 62 64 90 scan 0 sort 0} +do_test where7-2.151.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'ijklm*') + OR f='mnopqrstu' + OR a=62 + } +} {8 12 38 62 64 90 scan 0 sort 0} +do_test where7-2.152.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=33 + OR b=1045 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR c=13013 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR b=124 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {33 37 38 39 40 88 90 95 scan 0 sort 0} +do_test where7-2.152.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=33 + OR b=1045 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR c=13013 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR b=124 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {33 37 38 39 40 88 90 95 scan 0 sort 0} +do_test where7-2.153.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=883 + OR c=32032 + OR f='fghijklmn' + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=421 + OR b=803 + OR c=4004 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + } +} {2 5 10 11 12 28 31 49 51 54 57 73 80 83 94 95 96 scan 0 sort 0} +do_test where7-2.153.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=883 + OR c=32032 + OR f='fghijklmn' + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=421 + OR b=803 + OR c=4004 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + } +} {2 5 10 11 12 28 31 49 51 54 57 73 80 83 94 95 96 scan 0 sort 0} +do_test where7-2.154.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=99 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {9 16 42 68 72 94 scan 0 sort 0} +do_test where7-2.154.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=99 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {9 16 42 68 72 94 scan 0 sort 0} +do_test where7-2.155.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='defghijkl' + OR b=308 + } +} {3 28 29 55 81 scan 0 sort 0} +do_test where7-2.155.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='defghijkl' + OR b=308 + } +} {3 28 29 55 81 scan 0 sort 0} +do_test where7-2.156.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=795 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR f='jklmnopqr' + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=1056 + } +} {2 9 28 35 51 54 61 80 87 96 scan 0 sort 0} +do_test where7-2.156.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=795 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR f='jklmnopqr' + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=1056 + } +} {2 9 28 35 51 54 61 80 87 96 scan 0 sort 0} +do_test where7-2.157.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=47 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=410 + OR b=682 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR f='hijklmnop' + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=168 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR a=32 + OR a=72 + } +} {7 32 33 40 47 51 59 62 72 85 94 98 100 scan 0 sort 0} +do_test where7-2.157.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=47 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=410 + OR b=682 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR f='hijklmnop' + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=168 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR a=32 + OR a=72 + } +} {7 32 33 40 47 51 59 62 72 85 94 98 100 scan 0 sort 0} +do_test where7-2.158.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=616 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=96 + } +} {25 27 38 56 96 scan 0 sort 0} +do_test where7-2.158.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=616 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=96 + } +} {25 27 38 56 96 scan 0 sort 0} +do_test where7-2.159.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=352 + } +} {32 66 scan 0 sort 0} +do_test where7-2.159.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=352 + } +} {32 66 scan 0 sort 0} +do_test where7-2.160.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=795 + OR c=13013 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=597 + } +} {28 37 38 39 scan 0 sort 0} +do_test where7-2.160.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=795 + OR c=13013 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=597 + } +} {28 37 38 39 scan 0 sort 0} +do_test where7-2.161.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=23 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=641 + OR b=352 + OR b=179 + OR b=806 + OR b=839 + OR b=33 + } +} {3 23 32 68 scan 0 sort 0} +do_test where7-2.161.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=23 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=641 + OR b=352 + OR b=179 + OR b=806 + OR b=839 + OR b=33 + } +} {3 23 32 68 scan 0 sort 0} +do_test where7-2.162.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1070 + OR b=1078 + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR c=12012 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR b=319 + OR c=5005 + OR 1000000=59.0 AND d<60.0 AND d NOT NULL) + } +} {2 17 28 43 54 59 69 80 81 95 scan 0 sort 0} +do_test where7-2.163.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='cdefghijk' + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + } +} {2 17 28 43 54 59 69 80 81 95 scan 0 sort 0} +do_test where7-2.164.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=65 + OR c=14014 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=47 + OR b=220 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + } +} {20 37 40 41 42 47 65 88 scan 0 sort 0} +do_test where7-2.164.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=65 + OR c=14014 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=47 + OR b=220 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + } +} {20 37 40 41 42 47 65 88 scan 0 sort 0} +do_test where7-2.165.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=891 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR b=484 + OR a=62 + OR (g='ihgfedc' AND f GLOB 'defgh*') + } +} {35 44 57 62 81 86 scan 0 sort 0} +do_test where7-2.165.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=891 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR b=484 + OR a=62 + OR (g='ihgfedc' AND f GLOB 'defgh*') + } +} {35 44 57 62 81 86 scan 0 sort 0} +do_test where7-2.166.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=363 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR a=39 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {2 10 25 33 39 46 54 58 60 scan 0 sort 0} +do_test where7-2.166.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=363 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR a=39 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {2 10 25 33 39 46 54 58 60 scan 0 sort 0} +do_test where7-2.167.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=30030 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=850 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {20 46 52 72 88 89 90 98 scan 0 sort 0} +do_test where7-2.167.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=30030 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=850 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {20 46 52 72 88 89 90 98 scan 0 sort 0} +do_test where7-2.168.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=80 + } +} {23 91 scan 0 sort 0} +do_test where7-2.168.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=80 + } +} {23 91 scan 0 sort 0} +do_test where7-2.169.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 59 AND 61) AND a!=60) + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=462 + OR a=51 + OR b=344 + OR b=333 + OR ((a BETWEEN 61 AND 63) AND a!=62) + } +} {42 51 59 61 63 77 scan 0 sort 0} +do_test where7-2.169.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 59 AND 61) AND a!=60) + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=462 + OR a=51 + OR b=344 + OR b=333 + OR ((a BETWEEN 61 AND 63) AND a!=62) + } +} {42 51 59 61 63 77 scan 0 sort 0} +do_test where7-2.170.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=586 + OR a=21 + OR b=638 + } +} {21 58 scan 0 sort 0} +do_test where7-2.170.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=586 + OR a=21 + OR b=638 + } +} {21 58 scan 0 sort 0} +do_test where7-2.171.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=179 + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR ((a BETWEEN 72 AND 74) AND a!=73) + } +} {2 4 13 40 42 72 74 scan 0 sort 0} +do_test where7-2.171.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=179 + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR ((a BETWEEN 72 AND 74) AND a!=73) + } +} {2 4 13 40 42 72 74 scan 0 sort 0} +do_test where7-2.172.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=333 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=407 + OR a=5 + OR b=817 + OR b=891 + } +} {5 37 53 62 81 scan 0 sort 0} +do_test where7-2.172.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=333 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=407 + OR a=5 + OR b=817 + OR b=891 + } +} {5 37 53 62 81 scan 0 sort 0} +do_test where7-2.173.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b<0 + OR b=352 + OR b=517 + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR b=1012 + OR ((a BETWEEN 11 AND 13) AND a!=12) + } +} {11 12 13 14 32 47 92 97 scan 0 sort 0} +do_test where7-2.173.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b<0 + OR b=352 + OR b=517 + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR b=1012 + OR ((a BETWEEN 11 AND 13) AND a!=12) + } +} {11 12 13 14 32 47 92 97 scan 0 sort 0} +do_test where7-2.174.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR c<=10 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR a=32 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR d<0.0 + } +} {12 14 32 41 scan 0 sort 0} +do_test where7-2.174.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR c<=10 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR a=32 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR d<0.0 + } +} {12 14 32 41 scan 0 sort 0} +do_test where7-2.175.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 20 AND 22) AND a!=21) + OR b=1045 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=26 + OR (g='gfedcba' AND f GLOB 'opqrs*') + } +} {20 22 26 78 92 95 scan 0 sort 0} +do_test where7-2.175.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 20 AND 22) AND a!=21) + OR b=1045 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=26 + OR (g='gfedcba' AND f GLOB 'opqrs*') + } +} {20 22 26 78 92 95 scan 0 sort 0} +do_test where7-2.176.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=49 + OR b=58 + } +} {49 scan 0 sort 0} +do_test where7-2.176.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=49 + OR b=58 + } +} {49 scan 0 sort 0} +do_test where7-2.177.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR c=32032 + OR b=289 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {3 14 16 17 19 75 94 95 96 scan 0 sort 0} +do_test where7-2.177.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR c=32032 + OR b=289 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {3 14 16 17 19 75 94 95 96 scan 0 sort 0} +do_test where7-2.178.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 15 AND 17) AND a!=16) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR b=33 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {3 15 17 43 57 59 69 95 scan 0 sort 0} +do_test where7-2.178.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 15 AND 17) AND a!=16) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR b=33 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {3 15 17 43 57 59 69 95 scan 0 sort 0} +do_test where7-2.179.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=828 + OR b=341 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=902 + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=242 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'qrstu*') + } +} {1 2 16 22 31 42 64 66 68 82 91 94 95 scan 0 sort 0} +do_test where7-2.179.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=828 + OR b=341 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=902 + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=242 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'qrstu*') + } +} {1 2 16 22 31 42 64 66 68 82 91 94 95 scan 0 sort 0} +do_test where7-2.180.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'efghi*') + OR b=982 + OR b=781 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR d>1e10 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + } +} {56 66 68 71 scan 0 sort 0} +do_test where7-2.180.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'efghi*') + OR b=982 + OR b=781 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR d>1e10 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + } +} {56 66 68 71 scan 0 sort 0} +do_test where7-2.181.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR a=31 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR a=76 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=176 + } +} {8 16 23 31 34 57 59 60 69 74 76 86 scan 0 sort 0} +do_test where7-2.181.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR a=31 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR a=76 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=176 + } +} {8 16 23 31 34 57 59 60 69 74 76 86 scan 0 sort 0} +do_test where7-2.182.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=14 + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR f='zabcdefgh' + } +} {12 25 47 51 55 59 60 61 77 88 90 scan 0 sort 0} +do_test where7-2.182.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=14 + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR f='zabcdefgh' + } +} {12 25 47 51 55 59 60 61 77 88 90 scan 0 sort 0} +do_test where7-2.183.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR b=286 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=91 + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {25 26 31 43 45 scan 0 sort 0} +do_test where7-2.183.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR b=286 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=91 + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {25 26 31 43 45 scan 0 sort 0} +do_test where7-2.184.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'nopqr*') + OR c=19019 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR b=374 + } +} {22 34 48 55 56 57 65 74 100 scan 0 sort 0} +do_test where7-2.184.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'nopqr*') + OR c=19019 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR b=374 + } +} {22 34 48 55 56 57 65 74 100 scan 0 sort 0} +do_test where7-2.185.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE g IS NULL + OR (g='onmlkji' AND f GLOB 'wxyza*') + } +} {48 scan 0 sort 0} +do_test where7-2.185.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE g IS NULL + OR (g='onmlkji' AND f GLOB 'wxyza*') + } +} {48 scan 0 sort 0} +do_test where7-2.186.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=4 + OR b=407 + } +} {4 37 scan 0 sort 0} +do_test where7-2.186.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=4 + OR b=407 + } +} {4 37 scan 0 sort 0} +do_test where7-2.187.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 66 AND 68) AND a!=67) + OR b=564 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=234 + OR b=641 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR a=98 + } +} {1 5 12 13 27 39 53 65 66 68 79 91 98 scan 0 sort 0} +do_test where7-2.187.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 66 AND 68) AND a!=67) + OR b=564 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=234 + OR b=641 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR a=98 + } +} {1 5 12 13 27 39 53 65 66 68 79 91 98 scan 0 sort 0} +do_test where7-2.188.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=73 + OR b=44 + OR b=539 + OR c=11011 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=69 + OR b=1001 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + } +} {4 23 26 31 32 33 34 49 73 81 91 95 scan 0 sort 0} +do_test where7-2.188.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=73 + OR b=44 + OR b=539 + OR c=11011 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=69 + OR b=1001 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + } +} {4 23 26 31 32 33 34 49 73 81 91 95 scan 0 sort 0} +do_test where7-2.189.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=264 + OR b=143 + OR a=48 + } +} {13 24 48 scan 0 sort 0} +do_test where7-2.189.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=264 + OR b=143 + OR a=48 + } +} {13 24 48 scan 0 sort 0} +do_test where7-2.190.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1001 + OR b=1070 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR b=14 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR a=66 + } +} {18 56 58 66 72 74 91 scan 0 sort 0} +do_test where7-2.190.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1001 + OR b=1070 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR b=14 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR a=66 + } +} {18 56 58 66 72 74 91 scan 0 sort 0} +do_test where7-2.191.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=23023 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR a=66 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=51 + OR a=23 + OR c=4004 + } +} {10 11 12 23 51 66 67 68 69 83 scan 0 sort 0} +do_test where7-2.191.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=23023 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR a=66 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=51 + OR a=23 + OR c=4004 + } +} {10 11 12 23 51 66 67 68 69 83 scan 0 sort 0} +do_test where7-2.192.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=36 + OR (g='rqponml' AND f GLOB 'lmnop*') + OR a=80 + } +} {37 80 scan 0 sort 0} +do_test where7-2.192.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=36 + OR (g='rqponml' AND f GLOB 'lmnop*') + OR a=80 + } +} {37 80 scan 0 sort 0} +do_test where7-2.193.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=55 + OR f='efghijklm' + OR a=8 + OR a=80 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=256 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {4 8 30 34 37 39 55 56 60 72 80 82 86 scan 0 sort 0} +do_test where7-2.193.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=55 + OR f='efghijklm' + OR a=8 + OR a=80 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=256 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {4 8 30 34 37 39 55 56 60 72 80 82 86 scan 0 sort 0} +do_test where7-2.194.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=836 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=91 + OR b=594 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {2 8 28 47 54 76 80 87 91 scan 0 sort 0} +do_test where7-2.194.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=836 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=91 + OR b=594 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {2 8 28 47 54 76 80 87 91 scan 0 sort 0} +do_test where7-2.195.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'yzabc*') + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR c=6006 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {12 16 17 18 24 43 50 52 62 64 88 90 scan 0 sort 0} +do_test where7-2.195.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'yzabc*') + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR c=6006 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {12 16 17 18 24 43 50 52 62 64 88 90 scan 0 sort 0} +do_test where7-2.196.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 83 AND 85) AND a!=84) + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR a=13 + OR b=121 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR b=660 + OR b=792 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + } +} {6 11 12 13 14 16 18 44 60 72 83 85 scan 0 sort 0} +do_test where7-2.196.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 83 AND 85) AND a!=84) + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR a=13 + OR b=121 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR b=660 + OR b=792 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + } +} {6 11 12 13 14 16 18 44 60 72 83 85 scan 0 sort 0} +do_test where7-2.197.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1089 + OR b=495 + OR b=157 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR f='wxyzabcde' + } +} {1 7 20 22 45 46 48 59 72 74 98 99 100 scan 0 sort 0} +do_test where7-2.197.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1089 + OR b=495 + OR b=157 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR f='wxyzabcde' + } +} {1 7 20 22 45 46 48 59 72 74 98 99 100 scan 0 sort 0} +do_test where7-2.198.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='bcdefghij' + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (g='srqponm' AND f GLOB 'ghijk*') + OR b=157 + OR b=267 + OR c=34034 + } +} {1 27 32 40 42 53 79 100 scan 0 sort 0} +do_test where7-2.198.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='bcdefghij' + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (g='srqponm' AND f GLOB 'ghijk*') + OR b=157 + OR b=267 + OR c=34034 + } +} {1 27 32 40 42 53 79 100 scan 0 sort 0} +do_test where7-2.199.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=19 + OR a=23 + OR c<=10 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {19 23 66 scan 0 sort 0} +do_test where7-2.199.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=19 + OR a=23 + OR c<=10 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {19 23 66 scan 0 sort 0} +do_test where7-2.200.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 1 AND 3) AND a!=2) + OR b=792 + OR b=803 + OR b=36 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 3 27 53 72 73 79 scan 0 sort 0} +do_test where7-2.200.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 1 AND 3) AND a!=2) + OR b=792 + OR b=803 + OR b=36 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 3 27 53 72 73 79 scan 0 sort 0} +do_test where7-2.201.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR f='jklmnopqr' + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR b=891 + OR a=40 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + } +} {9 20 35 40 50 61 67 71 73 76 78 81 87 scan 0 sort 0} +do_test where7-2.201.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR f='jklmnopqr' + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR b=891 + OR a=40 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + } +} {9 20 35 40 50 61 67 71 73 76 78 81 87 scan 0 sort 0} +do_test where7-2.202.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=32 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=95 + OR d>1e10 + OR b=429 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR c=10010 + OR ((a BETWEEN 83 AND 85) AND a!=84) + } +} {15 28 29 30 32 39 54 76 83 85 88 95 scan 0 sort 0} +do_test where7-2.202.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=32 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=95 + OR d>1e10 + OR b=429 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR c=10010 + OR ((a BETWEEN 83 AND 85) AND a!=84) + } +} {15 28 29 30 32 39 54 76 83 85 88 95 scan 0 sort 0} +do_test where7-2.203.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'defgh*') + OR a=22 + OR a=26 + OR a=81 + OR a=53 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR c=30030 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=82 + OR b=594 + } +} {3 8 22 26 53 54 81 82 88 89 90 92 94 scan 0 sort 0} +do_test where7-2.203.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'defgh*') + OR a=22 + OR a=26 + OR a=81 + OR a=53 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR c=30030 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=82 + OR b=594 + } +} {3 8 22 26 53 54 81 82 88 89 90 92 94 scan 0 sort 0} +do_test where7-2.204.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 34 AND 36) AND a!=35) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR a=83 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=1092 + OR (g='srqponm' AND f GLOB 'efghi*') + OR b=25 + } +} {12 30 34 36 57 68 83 86 99 scan 0 sort 0} +do_test where7-2.204.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 34 AND 36) AND a!=35) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR a=83 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=1092 + OR (g='srqponm' AND f GLOB 'efghi*') + OR b=25 + } +} {12 30 34 36 57 68 83 86 99 scan 0 sort 0} +do_test where7-2.205.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=20 + OR b=421 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR a=50 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {5 20 40 50 53 scan 0 sort 0} +do_test where7-2.205.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=20 + OR b=421 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR a=50 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {5 20 40 50 53 scan 0 sort 0} +do_test where7-2.206.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=960 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {13 39 65 91 scan 0 sort 0} +do_test where7-2.206.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=960 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {13 39 65 91 scan 0 sort 0} +do_test where7-2.207.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=891 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR f='nopqrstuv' + } +} {13 31 39 65 81 91 scan 0 sort 0} +do_test where7-2.207.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=891 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR f='nopqrstuv' + } +} {13 31 39 65 81 91 scan 0 sort 0} +do_test where7-2.208.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=157 + OR b=289 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=1001 + OR b=707 + } +} {32 34 91 scan 0 sort 0} +do_test where7-2.208.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=157 + OR b=289 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=1001 + OR b=707 + } +} {32 34 91 scan 0 sort 0} +do_test where7-2.209.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR b=957 + OR ((a BETWEEN 48 AND 50) AND a!=49) + } +} {48 50 87 100 scan 0 sort 0} +do_test where7-2.209.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR b=957 + OR ((a BETWEEN 48 AND 50) AND a!=49) + } +} {48 50 87 100 scan 0 sort 0} +do_test where7-2.210.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=40.0 AND d<41.0 AND d NOT NULL) + OR a=77 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {40 77 85 scan 0 sort 0} +do_test where7-2.210.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=40.0 AND d<41.0 AND d NOT NULL) + OR a=77 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {40 77 85 scan 0 sort 0} +do_test where7-2.211.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=11 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=99 + } +} {1 14 16 38 66 96 99 scan 0 sort 0} +do_test where7-2.211.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=11 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=99 + } +} {1 14 16 38 66 96 99 scan 0 sort 0} +do_test where7-2.212.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='fghijklmn' + OR a=16 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=80 + } +} {3 5 9 11 16 31 52 57 60 62 71 83 90 92 scan 0 sort 0} +do_test where7-2.212.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='fghijklmn' + OR a=16 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=80 + } +} {3 5 9 11 16 31 52 57 60 62 71 83 90 92 scan 0 sort 0} +do_test where7-2.213.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR a=44 + OR a=43 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + OR b=25 + } +} {12 43 44 66 scan 0 sort 0} +do_test where7-2.213.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR a=44 + OR a=43 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + OR b=25 + } +} {12 43 44 66 scan 0 sort 0} +do_test where7-2.214.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='vwxyzabcd' + OR a=73 + OR b=597 + } +} {21 47 73 99 scan 0 sort 0} +do_test where7-2.214.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='vwxyzabcd' + OR a=73 + OR b=597 + } +} {21 47 73 99 scan 0 sort 0} +do_test where7-2.215.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=22 + OR ((a BETWEEN 61 AND 63) AND a!=62) + OR e IS NULL + OR a=1 + } +} {1 2 61 63 scan 0 sort 0} +do_test where7-2.215.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=22 + OR ((a BETWEEN 61 AND 63) AND a!=62) + OR e IS NULL + OR a=1 + } +} {1 2 61 63 scan 0 sort 0} +do_test where7-2.216.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'opqrs*') + OR b=1015 + OR c=16016 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR f='abcdefghi' + OR b=605 + OR a=63 + } +} {3 19 26 45 46 47 48 52 55 63 71 78 92 97 scan 0 sort 0} +do_test where7-2.216.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'opqrs*') + OR b=1015 + OR c=16016 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR f='abcdefghi' + OR b=605 + OR a=63 + } +} {3 19 26 45 46 47 48 52 55 63 71 78 92 97 scan 0 sort 0} +do_test where7-2.217.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='yxwvuts' AND f GLOB 'bcdef*') + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=641 + OR b=795 + } +} {1 44 scan 0 sort 0} +do_test where7-2.217.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='yxwvuts' AND f GLOB 'bcdef*') + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=641 + OR b=795 + } +} {1 44 scan 0 sort 0} +do_test where7-2.218.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='fghijklmn' + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + } +} {5 15 31 44 57 83 scan 0 sort 0} +do_test where7-2.218.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='fghijklmn' + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + } +} {5 15 31 44 57 83 scan 0 sort 0} +do_test where7-2.219.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 99 AND 101) AND a!=100) + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR a=92 + OR b=1100 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR ((a BETWEEN 30 AND 32) AND a!=31) + } +} {30 32 72 74 85 87 92 98 99 100 scan 0 sort 0} +do_test where7-2.219.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 99 AND 101) AND a!=100) + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR a=92 + OR b=1100 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR ((a BETWEEN 30 AND 32) AND a!=31) + } +} {30 32 72 74 85 87 92 98 99 100 scan 0 sort 0} +do_test where7-2.220.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=880 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=1089 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR f IS NULL + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 12 16 31 57 69 71 80 83 86 97 99 scan 0 sort 0} +do_test where7-2.220.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=880 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=1089 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR f IS NULL + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 12 16 31 57 69 71 80 83 86 97 99 scan 0 sort 0} +do_test where7-2.221.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1026 + OR b=407 + OR (g='srqponm' AND f GLOB 'fghij*') + OR b=564 + OR c=23023 + OR b=891 + OR c=22022 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='rqponml' AND f GLOB 'ijklm*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + } +} {9 11 21 22 24 31 34 37 64 65 66 67 68 69 81 scan 0 sort 0} +do_test where7-2.221.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1026 + OR b=407 + OR (g='srqponm' AND f GLOB 'fghij*') + OR b=564 + OR c=23023 + OR b=891 + OR c=22022 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='rqponml' AND f GLOB 'ijklm*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + } +} {9 11 21 22 24 31 34 37 64 65 66 67 68 69 81 scan 0 sort 0} +do_test where7-2.222.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR a=72 + OR a=43 + } +} {43 71 72 73 scan 0 sort 0} +do_test where7-2.222.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR a=72 + OR a=43 + } +} {43 71 72 73 scan 0 sort 0} +do_test where7-2.223.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR c=18018 + OR b=792 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + } +} {8 20 46 52 53 54 61 67 69 72 74 77 79 81 91 98 scan 0 sort 0} +do_test where7-2.223.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR c=18018 + OR b=792 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + } +} {8 20 46 52 53 54 61 67 69 72 74 77 79 81 91 98 scan 0 sort 0} +do_test where7-2.224.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=429 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR b=1070 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {4 17 30 33 39 40 56 82 scan 0 sort 0} +do_test where7-2.224.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=429 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR b=1070 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {4 17 30 33 39 40 56 82 scan 0 sort 0} +do_test where7-2.225.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=572 + } +} {52 61 scan 0 sort 0} +do_test where7-2.225.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=572 + } +} {52 61 scan 0 sort 0} +do_test where7-2.226.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 62 AND 64) AND a!=63) + OR f='abcdefghi' + OR (g='wvutsrq' AND f GLOB 'ijklm*') + } +} {8 26 52 62 64 78 scan 0 sort 0} +do_test where7-2.226.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 62 AND 64) AND a!=63) + OR f='abcdefghi' + OR (g='wvutsrq' AND f GLOB 'ijklm*') + } +} {8 26 52 62 64 78 scan 0 sort 0} +do_test where7-2.227.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=40 + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {40 85 87 scan 0 sort 0} +do_test where7-2.227.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=40 + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {40 85 87 scan 0 sort 0} +do_test where7-2.228.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=43 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR a=1 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR a=75 + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {1 43 44 46 59 61 75 77 83 scan 0 sort 0} +do_test where7-2.228.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=43 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR a=1 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR a=75 + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {1 43 44 46 59 61 75 77 83 scan 0 sort 0} +do_test where7-2.229.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=231 + OR a=87 + } +} {8 21 34 60 86 87 91 scan 0 sort 0} +do_test where7-2.229.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=231 + OR a=87 + } +} {8 21 34 60 86 87 91 scan 0 sort 0} +do_test where7-2.230.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=77 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='rqponml' AND f GLOB 'hijkl*') + OR c=24024 + OR c=5005 + } +} {13 14 15 33 65 70 71 72 77 scan 0 sort 0} +do_test where7-2.230.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=77 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='rqponml' AND f GLOB 'hijkl*') + OR c=24024 + OR c=5005 + } +} {13 14 15 33 65 70 71 72 77 scan 0 sort 0} +do_test where7-2.231.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'ijklm*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='srqponm' AND f GLOB 'defgh*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=682 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + } +} {22 29 34 60 62 64 65 66 89 91 scan 0 sort 0} +do_test where7-2.231.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'ijklm*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='srqponm' AND f GLOB 'defgh*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=682 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + } +} {22 29 34 60 62 64 65 66 89 91 scan 0 sort 0} +do_test where7-2.232.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=212 + OR b=121 + OR c=2002 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='jihgfed' AND f GLOB 'xyzab*') + } +} {4 5 6 11 75 84 86 scan 0 sort 0} +do_test where7-2.232.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=212 + OR b=121 + OR c=2002 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='jihgfed' AND f GLOB 'xyzab*') + } +} {4 5 6 11 75 84 86 scan 0 sort 0} +do_test where7-2.233.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=55.0 AND d<56.0 AND d NOT NULL) + OR f='abcdefghi' + OR b=267 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR a=82 + OR a=54 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=1078 + } +} {16 20 26 52 54 55 78 82 98 scan 0 sort 0} +do_test where7-2.233.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=55.0 AND d<56.0 AND d NOT NULL) + OR f='abcdefghi' + OR b=267 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR a=82 + OR a=54 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=1078 + } +} {16 20 26 52 54 55 78 82 98 scan 0 sort 0} +do_test where7-2.234.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=93.0 AND d<94.0 AND d NOT NULL) + OR f='hijklmnop' + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + } +} {7 33 34 59 85 93 scan 0 sort 0} +do_test where7-2.234.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=93.0 AND d<94.0 AND d NOT NULL) + OR f='hijklmnop' + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + } +} {7 33 34 59 85 93 scan 0 sort 0} +do_test where7-2.235.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 98 AND 100) AND a!=99) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR a=18 + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR 1000000=94.0 AND d<95.0 AND d NOT NULL) + OR 1000000=89.0 AND d<90.0 AND d NOT NULL) + } +} {7 33 59 85 89 91 scan 0 sort 0} +do_test where7-2.236.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1001 + OR b=168 + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + } +} {7 33 59 85 89 91 scan 0 sort 0} +do_test where7-2.237.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=51 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR b=330 + } +} {30 51 96 98 scan 0 sort 0} +do_test where7-2.237.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=51 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR b=330 + } +} {30 51 96 98 scan 0 sort 0} +do_test where7-2.238.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=704 + OR a=62 + OR f='pqrstuvwx' + OR b=495 + OR c=26026 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b<0 + OR b=597 + } +} {15 41 45 62 64 67 68 71 76 77 78 93 scan 0 sort 0} +do_test where7-2.238.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=704 + OR a=62 + OR f='pqrstuvwx' + OR b=495 + OR c=26026 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b<0 + OR b=597 + } +} {15 41 45 62 64 67 68 71 76 77 78 93 scan 0 sort 0} +do_test where7-2.239.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR b=520 + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR f IS NULL + } +} {2 47 49 87 89 scan 0 sort 0} +do_test where7-2.239.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR b=520 + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR f IS NULL + } +} {2 47 49 87 89 scan 0 sort 0} +do_test where7-2.240.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=14014 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=572 + OR c=15015 + } +} {40 41 42 43 44 45 52 95 scan 0 sort 0} +do_test where7-2.240.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=14014 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=572 + OR c=15015 + } +} {40 41 42 43 44 45 52 95 scan 0 sort 0} +do_test where7-2.241.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=850 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR b=88 + OR f='hijklmnop' + OR b=806 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=88 + } +} {3 7 8 15 17 29 33 46 55 59 65 81 85 scan 0 sort 0} +do_test where7-2.241.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=850 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR b=88 + OR f='hijklmnop' + OR b=806 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=88 + } +} {3 7 8 15 17 29 33 46 55 59 65 81 85 scan 0 sort 0} +do_test where7-2.242.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=817 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR a=36 + OR b=960 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR b=374 + OR b=938 + OR b=773 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + } +} {34 36 55 58 63 77 scan 0 sort 0} +do_test where7-2.242.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=817 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR a=36 + OR b=960 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR b=374 + OR b=938 + OR b=773 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + } +} {34 36 55 58 63 77 scan 0 sort 0} +do_test where7-2.243.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=146 + } +} {69 scan 0 sort 0} +do_test where7-2.243.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=146 + } +} {69 scan 0 sort 0} +do_test where7-2.244.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='pqrstuvwx' + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR b=704 + OR a=18 + } +} {6 8 15 18 41 64 67 76 78 93 scan 0 sort 0} +do_test where7-2.244.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='pqrstuvwx' + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR b=704 + OR a=18 + } +} {6 8 15 18 41 64 67 76 78 93 scan 0 sort 0} +do_test where7-2.245.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=399 + OR b=1004 + OR c=16016 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=671 + OR a=25 + OR a=30 + OR a=8 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + } +} {5 8 19 25 30 31 45 46 47 48 61 71 97 scan 0 sort 0} +do_test where7-2.245.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=399 + OR b=1004 + OR c=16016 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=671 + OR a=25 + OR a=30 + OR a=8 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + } +} {5 8 19 25 30 31 45 46 47 48 61 71 97 scan 0 sort 0} +do_test where7-2.246.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=561 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=594 + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=861 + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR b=949 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + } +} {18 28 30 39 41 51 54 90 scan 0 sort 0} +do_test where7-2.246.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=561 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=594 + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=861 + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR b=949 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + } +} {18 28 30 39 41 51 54 90 scan 0 sort 0} +do_test where7-2.247.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=83 + OR c=26026 + OR a=49 + OR a=57 + OR c=23023 + OR f='uvwxyzabc' + } +} {7 20 46 49 57 67 68 69 72 76 77 78 83 98 scan 0 sort 0} +do_test where7-2.247.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=83 + OR c=26026 + OR a=49 + OR a=57 + OR c=23023 + OR f='uvwxyzabc' + } +} {7 20 46 49 57 67 68 69 72 76 77 78 83 98 scan 0 sort 0} +do_test where7-2.248.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE d>1e10 + OR b=355 + OR f='stuvwxyza' + OR b=22 + } +} {2 18 44 70 96 scan 0 sort 0} +do_test where7-2.248.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE d>1e10 + OR b=355 + OR f='stuvwxyza' + OR b=22 + } +} {2 18 44 70 96 scan 0 sort 0} +do_test where7-2.249.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=451 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + } +} {8 34 41 60 86 scan 0 sort 0} +do_test where7-2.249.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=451 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + } +} {8 34 41 60 86 scan 0 sort 0} +do_test where7-2.250.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=47 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 27 53 79 scan 0 sort 0} +do_test where7-2.250.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=47 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 27 53 79 scan 0 sort 0} +do_test where7-2.251.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1037 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=344 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {12 66 68 86 scan 0 sort 0} +do_test where7-2.251.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1037 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=344 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {12 66 68 86 scan 0 sort 0} +do_test where7-2.252.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=506 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=429 + OR b=275 + } +} {20 22 25 39 46 86 scan 0 sort 0} +do_test where7-2.252.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=506 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=429 + OR b=275 + } +} {20 22 25 39 46 86 scan 0 sort 0} +do_test where7-2.253.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR a=28 + OR b=443 + OR b=363 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR a=60 + OR b=80 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=616 + } +} {28 33 47 56 60 62 scan 0 sort 0} +do_test where7-2.253.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR a=28 + OR b=443 + OR b=363 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR a=60 + OR b=80 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=616 + } +} {28 33 47 56 60 62 scan 0 sort 0} +do_test where7-2.254.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=660 + } +} {33 60 scan 0 sort 0} +do_test where7-2.254.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=660 + } +} {33 60 scan 0 sort 0} +do_test where7-2.255.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=43 + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=586 + OR c=17017 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR a=87 + OR b=968 + } +} {7 21 43 47 49 50 51 64 66 73 87 88 99 scan 0 sort 0} +do_test where7-2.255.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=43 + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=586 + OR c=17017 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR a=87 + OR b=968 + } +} {7 21 43 47 49 50 51 64 66 73 87 88 99 scan 0 sort 0} +do_test where7-2.256.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='klmnopqrs' + OR b=982 + OR b=575 + OR b=110 + OR b=99 + } +} {9 10 36 62 88 scan 0 sort 0} +do_test where7-2.256.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='klmnopqrs' + OR b=982 + OR b=575 + OR b=110 + OR b=99 + } +} {9 10 36 62 88 scan 0 sort 0} +do_test where7-2.257.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='rqponml' AND f GLOB 'jklmn*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR c>=34035 + OR b=850 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=924 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=355 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {4 32 34 35 37 56 78 84 86 scan 0 sort 0} +do_test where7-2.257.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='rqponml' AND f GLOB 'jklmn*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR c>=34035 + OR b=850 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=924 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=355 + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {4 32 34 35 37 56 78 84 86 scan 0 sort 0} +do_test where7-2.258.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=982 + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR b=374 + } +} {34 46 81 83 scan 0 sort 0} +do_test where7-2.258.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=982 + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR b=374 + } +} {34 46 81 83 scan 0 sort 0} +do_test where7-2.259.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 87 AND 89) AND a!=88) + OR b=814 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + } +} {19 74 87 89 scan 0 sort 0} +do_test where7-2.259.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 87 AND 89) AND a!=88) + OR b=814 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + } +} {19 74 87 89 scan 0 sort 0} +do_test where7-2.260.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'nopqr*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=993 + } +} {12 39 scan 0 sort 0} +do_test where7-2.260.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'nopqr*') + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=993 + } +} {12 39 scan 0 sort 0} +do_test where7-2.261.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=234 + OR a=22 + OR b=289 + OR b=795 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR b=242 + OR a=59 + OR b=1045 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + } +} {8 22 59 91 95 scan 0 sort 0} +do_test where7-2.261.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=234 + OR a=22 + OR b=289 + OR b=795 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR b=242 + OR a=59 + OR b=1045 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + } +} {8 22 59 91 95 scan 0 sort 0} +do_test where7-2.262.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=245 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR c=3003 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR ((a BETWEEN 71 AND 73) AND a!=72) + } +} {1 7 8 9 10 26 33 52 68 70 71 73 78 scan 0 sort 0} +do_test where7-2.262.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=245 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR c=3003 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR ((a BETWEEN 71 AND 73) AND a!=72) + } +} {1 7 8 9 10 26 33 52 68 70 71 73 78 scan 0 sort 0} +do_test where7-2.263.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=220 + OR b=443 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR a=62 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR b=1023 + OR a=100 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {18 20 44 55 62 70 87 93 96 97 100 scan 0 sort 0} +do_test where7-2.263.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=220 + OR b=443 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR a=62 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR b=1023 + OR a=100 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {18 20 44 55 62 70 87 93 96 97 100 scan 0 sort 0} +do_test where7-2.264.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=11011 + OR f='tuvwxyzab' + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {19 31 32 33 45 47 71 84 97 scan 0 sort 0} +do_test where7-2.264.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=11011 + OR f='tuvwxyzab' + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {19 31 32 33 45 47 71 84 97 scan 0 sort 0} +do_test where7-2.265.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 93 AND 95) AND a!=94) + OR a=79 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR b=462 + } +} {39 42 79 93 95 scan 0 sort 0} +do_test where7-2.265.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 93 AND 95) AND a!=94) + OR a=79 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR b=462 + } +} {39 42 79 93 95 scan 0 sort 0} +do_test where7-2.266.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=146 + OR 1000000=33.0 AND d<34.0 AND d NOT NULL) + OR b=146 + OR 1000000=20.0 AND d<21.0 AND d NOT NULL) + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR a=62 + OR b=619 + OR a=82 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR c=14014 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {20 28 40 41 42 43 62 64 67 82 85 scan 0 sort 0} +do_test where7-2.267.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=935 + OR b=473 + OR a=28 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR a=62 + OR b=619 + OR a=82 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR c=14014 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {20 28 40 41 42 43 62 64 67 82 85 scan 0 sort 0} +do_test where7-2.268.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=443 + OR b=33 + OR b=762 + OR b=575 + OR c=16016 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR b=1092 + } +} {3 40 41 43 46 47 48 72 scan 0 sort 0} +do_test where7-2.268.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=443 + OR b=33 + OR b=762 + OR b=575 + OR c=16016 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR b=1092 + } +} {3 40 41 43 46 47 48 72 scan 0 sort 0} +do_test where7-2.269.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=806 + OR b=872 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR f='uvwxyzabc' + OR b=748 + OR b=586 + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR (g='gfedcba' AND f GLOB 'klmno*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=891 + } +} {15 17 20 32 34 46 68 72 80 81 88 98 scan 0 sort 0} +do_test where7-2.269.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=806 + OR b=872 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR f='uvwxyzabc' + OR b=748 + OR b=586 + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR (g='gfedcba' AND f GLOB 'klmno*') + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=891 + } +} {15 17 20 32 34 46 68 72 80 81 88 98 scan 0 sort 0} +do_test where7-2.270.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=693 + OR f='fghijklmn' + OR (g='rqponml' AND f GLOB 'hijkl*') + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR a=96 + } +} {5 31 33 39 57 63 71 73 83 96 scan 0 sort 0} +do_test where7-2.270.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=693 + OR f='fghijklmn' + OR (g='rqponml' AND f GLOB 'hijkl*') + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR a=96 + } +} {5 31 33 39 57 63 71 73 83 96 scan 0 sort 0} +do_test where7-2.271.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=451 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR a=84 + } +} {41 84 86 96 97 98 99 scan 0 sort 0} +do_test where7-2.271.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=451 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR a=84 + } +} {41 84 86 96 97 98 99 scan 0 sort 0} +do_test where7-2.272.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=75 + OR b=960 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + OR b=616 + OR b=330 + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR a=26 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {16 18 24 26 30 53 56 63 72 75 scan 0 sort 0} +do_test where7-2.272.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=75 + OR b=960 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + OR b=616 + OR b=330 + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR a=26 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {16 18 24 26 30 53 56 63 72 75 scan 0 sort 0} +do_test where7-2.273.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=762 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {53 scan 0 sort 0} +do_test where7-2.273.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=762 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {53 scan 0 sort 0} +do_test where7-2.274.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=14 + OR a=23 + OR b=748 + OR b=407 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=979 + OR ((a BETWEEN 15 AND 17) AND a!=16) + } +} {4 15 17 23 37 68 87 89 scan 0 sort 0} +do_test where7-2.274.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=14 + OR a=23 + OR b=748 + OR b=407 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=979 + OR ((a BETWEEN 15 AND 17) AND a!=16) + } +} {4 15 17 23 37 68 87 89 scan 0 sort 0} +do_test where7-2.275.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 36 AND 38) AND a!=37) + OR a=92 + } +} {36 38 92 scan 0 sort 0} +do_test where7-2.275.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 36 AND 38) AND a!=37) + OR a=92 + } +} {36 38 92 scan 0 sort 0} +do_test where7-2.276.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=14014 + OR b=927 + OR b=176 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=220 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + OR a=4 + } +} {4 16 20 24 34 36 40 41 42 scan 0 sort 0} +do_test where7-2.276.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=14014 + OR b=927 + OR b=176 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=220 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + OR a=4 + } +} {4 16 20 24 34 36 40 41 42 scan 0 sort 0} +do_test where7-2.277.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=29 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=979 + OR b=275 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + OR b=539 + OR a=87 + } +} {19 25 29 41 49 56 58 87 89 scan 0 sort 0} +do_test where7-2.277.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=29 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=979 + OR b=275 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + OR b=539 + OR a=87 + } +} {19 25 29 41 49 56 58 87 89 scan 0 sort 0} +do_test where7-2.278.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 43 AND 45) AND a!=44) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR f='fghijklmn' + OR (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=74 + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {4 5 6 7 9 31 36 43 45 57 59 69 74 83 scan 0 sort 0} +do_test where7-2.278.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 43 AND 45) AND a!=44) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR f='fghijklmn' + OR (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=74 + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {4 5 6 7 9 31 36 43 45 57 59 69 74 83 scan 0 sort 0} +do_test where7-2.279.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 80 AND 82) AND a!=81) + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + } +} {8 34 42 49 51 60 79 80 82 86 scan 0 sort 0} +do_test where7-2.279.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 80 AND 82) AND a!=81) + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + } +} {8 34 42 49 51 60 79 80 82 86 scan 0 sort 0} +do_test where7-2.280.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 58 AND 60) AND a!=59) + OR b=696 + OR f='tuvwxyzab' + OR b=374 + OR b=110 + OR a=90 + } +} {10 19 34 45 58 60 71 90 97 scan 0 sort 0} +do_test where7-2.280.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 58 AND 60) AND a!=59) + OR b=696 + OR f='tuvwxyzab' + OR b=374 + OR b=110 + OR a=90 + } +} {10 19 34 45 58 60 71 90 97 scan 0 sort 0} +do_test where7-2.281.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR c=23023 + OR b=377 + OR b=858 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {6 57 67 68 69 78 scan 0 sort 0} +do_test where7-2.281.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR c=23023 + OR b=377 + OR b=858 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {6 57 67 68 69 78 scan 0 sort 0} +do_test where7-2.282.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=322 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'pqrst*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR b=432 + OR b=55 + OR a=53 + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=25 + } +} {5 7 19 33 38 48 53 59 74 85 93 scan 0 sort 0} +do_test where7-2.282.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=322 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'pqrst*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR b=432 + OR b=55 + OR a=53 + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=25 + } +} {5 7 19 33 38 48 53 59 74 85 93 scan 0 sort 0} +do_test where7-2.283.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=484 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=616 + OR c=5005 + OR ((a BETWEEN 27 AND 29) AND a!=28) + } +} {13 14 15 27 29 44 56 74 scan 0 sort 0} +do_test where7-2.283.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=484 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=616 + OR c=5005 + OR ((a BETWEEN 27 AND 29) AND a!=28) + } +} {13 14 15 27 29 44 56 74 scan 0 sort 0} +do_test where7-2.284.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=916 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=1048 + OR c=6006 + OR b=762 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR b=751 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + } +} {13 14 16 17 18 39 40 59 61 65 66 73 91 92 scan 0 sort 0} +do_test where7-2.284.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=916 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=1048 + OR c=6006 + OR b=762 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR b=751 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + } +} {13 14 16 17 18 39 40 59 61 65 66 73 91 92 scan 0 sort 0} +do_test where7-2.285.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=32.0 AND d<33.0 AND d NOT NULL) + OR b=927 + OR b=275 + OR b=396 + OR c=4004 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR b=319 + OR ((a BETWEEN 83 AND 85) AND a!=84) + OR a=3 + OR ((a BETWEEN 73 AND 75) AND a!=74) + } +} {3 10 11 12 14 25 29 32 36 73 75 83 85 scan 0 sort 0} +do_test where7-2.285.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=32.0 AND d<33.0 AND d NOT NULL) + OR b=927 + OR b=275 + OR b=396 + OR c=4004 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR b=319 + OR ((a BETWEEN 83 AND 85) AND a!=84) + OR a=3 + OR ((a BETWEEN 73 AND 75) AND a!=74) + } +} {3 10 11 12 14 25 29 32 36 73 75 83 85 scan 0 sort 0} +do_test where7-2.286.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'lmnop*') + OR b=718 + OR f='vwxyzabcd' + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {9 11 19 21 22 35 45 47 61 66 68 71 73 87 97 98 99 scan 0 sort 0} +do_test where7-2.286.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'lmnop*') + OR b=718 + OR f='vwxyzabcd' + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {9 11 19 21 22 35 45 47 61 66 68 71 73 87 97 98 99 scan 0 sort 0} +do_test where7-2.287.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=187 + OR b=1056 + OR b=861 + OR b=1081 + OR b=572 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=11 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR a=89 + OR b=421 + } +} {4 11 17 52 89 96 99 scan 0 sort 0} +do_test where7-2.287.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=187 + OR b=1056 + OR b=861 + OR b=1081 + OR b=572 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=11 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR a=89 + OR b=421 + } +} {4 11 17 52 89 96 99 scan 0 sort 0} +do_test where7-2.288.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=231 + OR b=388 + OR d<0.0 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR b=1045 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + } +} {12 21 39 95 scan 0 sort 0} +do_test where7-2.288.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=231 + OR b=388 + OR d<0.0 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR b=1045 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + } +} {12 21 39 95 scan 0 sort 0} +do_test where7-2.289.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=528 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=762 + } +} {48 53 scan 0 sort 0} +do_test where7-2.289.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=528 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=762 + } +} {48 53 scan 0 sort 0} +do_test where7-2.290.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='stuvwxyza' + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR b=916 + } +} {18 44 70 90 92 96 scan 0 sort 0} +do_test where7-2.290.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='stuvwxyza' + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR b=916 + } +} {18 44 70 90 92 96 scan 0 sort 0} +do_test where7-2.291.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + } +} {4 19 52 76 96 98 scan 0 sort 0} +do_test where7-2.291.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + } +} {4 19 52 76 96 98 scan 0 sort 0} +do_test where7-2.292.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=102 + OR c=6006 + OR b=231 + OR b=212 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'klmno*') + OR c=30030 + OR (g='onmlkji' AND f GLOB 'abcde*') + } +} {16 17 18 21 36 52 88 89 90 scan 0 sort 0} +do_test where7-2.292.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=102 + OR c=6006 + OR b=231 + OR b=212 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'klmno*') + OR c=30030 + OR (g='onmlkji' AND f GLOB 'abcde*') + } +} {16 17 18 21 36 52 88 89 90 scan 0 sort 0} +do_test where7-2.293.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=47 + OR a=82 + OR c=25025 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR f='qrstuvwxy' + OR a=5 + } +} {5 16 40 42 47 68 73 74 75 82 94 scan 0 sort 0} +do_test where7-2.293.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=47 + OR a=82 + OR c=25025 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR f='qrstuvwxy' + OR a=5 + } +} {5 16 40 42 47 68 73 74 75 82 94 scan 0 sort 0} +do_test where7-2.294.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=429 + OR a=30 + OR f='vwxyzabcd' + OR b=762 + OR a=60 + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + } +} {4 21 30 39 47 60 73 99 scan 0 sort 0} +do_test where7-2.294.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=429 + OR a=30 + OR f='vwxyzabcd' + OR b=762 + OR a=60 + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + } +} {4 21 30 39 47 60 73 99 scan 0 sort 0} +do_test where7-2.295.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'ghijk*') + OR a=3 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=498 + OR a=100 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR a=69 + } +} {3 13 31 39 58 63 65 69 91 100 scan 0 sort 0} +do_test where7-2.295.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'ghijk*') + OR a=3 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=498 + OR a=100 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR a=69 + } +} {3 13 31 39 58 63 65 69 91 100 scan 0 sort 0} +do_test where7-2.296.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR b=300 + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR b=58 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR b=286 + OR b=234 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR f='ghijklmno' + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + } +} {6 7 26 32 43 45 55 57 58 82 84 scan 0 sort 0} +do_test where7-2.296.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR b=300 + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR b=58 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR b=286 + OR b=234 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR f='ghijklmno' + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + } +} {6 7 26 32 43 45 55 57 58 82 84 scan 0 sort 0} +do_test where7-2.297.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=95 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=594 + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {5 7 18 20 23 25 31 33 37 39 45 53 54 55 56 57 58 59 72 74 83 85 95 scan 99 sort 0} +do_test where7-2.297.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=95 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=594 + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {5 7 18 20 23 25 31 33 37 39 45 53 54 55 56 57 58 59 72 74 83 85 95 scan 0 sort 0} +do_test where7-2.298.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=949 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR (g='vutsrqp' AND f GLOB 'opqrs*') + } +} {5 14 scan 0 sort 0} +do_test where7-2.298.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=949 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR (g='vutsrqp' AND f GLOB 'opqrs*') + } +} {5 14 scan 0 sort 0} +do_test where7-2.299.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=960 + OR a=44 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR a=39 + OR b=828 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR d<0.0 + OR b=770 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR b=594 + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {3 5 6 18 39 44 54 70 89 91 96 scan 0 sort 0} +do_test where7-2.299.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=960 + OR a=44 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR a=39 + OR b=828 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR d<0.0 + OR b=770 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR b=594 + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {3 5 6 18 39 44 54 70 89 91 96 scan 0 sort 0} +do_test where7-2.300.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR b=198 + OR a=51 + OR b=1056 + OR b=748 + OR ((a BETWEEN 9 AND 11) AND a!=10) + } +} {9 11 18 40 42 51 68 96 scan 0 sort 0} +do_test where7-2.300.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR b=198 + OR a=51 + OR b=1056 + OR b=748 + OR ((a BETWEEN 9 AND 11) AND a!=10) + } +} {9 11 18 40 42 51 68 96 scan 0 sort 0} +do_test where7-2.301.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1081 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=1004 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + OR b=660 + OR b=957 + OR b=869 + } +} {29 31 60 66 68 79 87 91 scan 0 sort 0} +do_test where7-2.301.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1081 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=1004 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + OR b=660 + OR b=957 + OR b=869 + } +} {29 31 60 66 68 79 87 91 scan 0 sort 0} +do_test where7-2.302.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=927 + OR c=12012 + OR f='yzabcdefg' + OR b=880 + OR a=63 + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (g='hgfedcb' AND f GLOB 'ijklm*') + } +} {24 34 35 36 44 50 58 63 76 80 86 scan 0 sort 0} +do_test where7-2.302.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=927 + OR c=12012 + OR f='yzabcdefg' + OR b=880 + OR a=63 + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR (g='hgfedcb' AND f GLOB 'ijklm*') + } +} {24 34 35 36 44 50 58 63 76 80 86 scan 0 sort 0} +do_test where7-2.303.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=69 + OR b=1103 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='wxyzabcde' + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR (g='gfedcba' AND f GLOB 'klmno*') + OR f='pqrstuvwx' + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR a=59 + OR b=946 + } +} {15 18 22 26 41 44 48 52 59 67 69 70 73 74 78 86 88 93 96 100 scan 0 sort 0} +do_test where7-2.303.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=69 + OR b=1103 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='wxyzabcde' + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR (g='gfedcba' AND f GLOB 'klmno*') + OR f='pqrstuvwx' + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR a=59 + OR b=946 + } +} {15 18 22 26 41 44 48 52 59 67 69 70 73 74 78 86 88 93 96 100 scan 0 sort 0} +do_test where7-2.304.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR a=68 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 47 68 75 scan 0 sort 0} +do_test where7-2.304.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR a=68 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 47 68 75 scan 0 sort 0} +do_test where7-2.305.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'lmnop*') + } +} {10 63 scan 0 sort 0} +do_test where7-2.305.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'lmnop*') + } +} {10 63 scan 0 sort 0} +do_test where7-2.306.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=32 + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR c=7007 + OR b=968 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + } +} {15 17 18 19 20 21 32 86 88 92 94 scan 0 sort 0} +do_test where7-2.306.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=32 + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR c=7007 + OR b=968 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + } +} {15 17 18 19 20 21 32 86 88 92 94 scan 0 sort 0} +do_test where7-2.307.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='cdefghijk' + OR b=1103 + } +} {2 28 54 80 scan 0 sort 0} +do_test where7-2.307.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='cdefghijk' + OR b=1103 + } +} {2 28 54 80 scan 0 sort 0} +do_test where7-2.308.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR c=14014 + OR b=990 + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR c=14014 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=740 + OR c=3003 + } +} {7 8 9 13 14 21 23 40 41 42 56 90 scan 0 sort 0} +do_test where7-2.308.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR c=14014 + OR b=990 + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR c=14014 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=740 + OR c=3003 + } +} {7 8 9 13 14 21 23 40 41 42 56 90 scan 0 sort 0} +do_test where7-2.309.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR a=67 + OR b=135 + OR f='bcdefghij' + OR b=924 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + } +} {1 22 27 53 60 67 79 84 scan 0 sort 0} +do_test where7-2.309.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR a=67 + OR b=135 + OR f='bcdefghij' + OR b=924 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + } +} {1 22 27 53 60 67 79 84 scan 0 sort 0} +do_test where7-2.310.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=96 + OR a=13 + } +} {13 96 scan 0 sort 0} +do_test where7-2.310.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=96 + OR a=13 + } +} {13 96 scan 0 sort 0} +do_test where7-2.311.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 34 AND 36) AND a!=35) + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 35 AND 37) AND a!=36) + OR a=49 + OR a=38 + OR b=157 + OR a=4 + OR b=311 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=396 + } +} {4 27 34 35 36 37 38 49 50 97 99 scan 0 sort 0} +do_test where7-2.311.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 34 AND 36) AND a!=35) + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 35 AND 37) AND a!=36) + OR a=49 + OR a=38 + OR b=157 + OR a=4 + OR b=311 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=396 + } +} {4 27 34 35 36 37 38 49 50 97 99 scan 0 sort 0} +do_test where7-2.312.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=82 + OR b=333 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR b=99 + OR a=63 + OR a=35 + OR b=176 + } +} {9 16 22 35 48 63 74 82 100 scan 0 sort 0} +do_test where7-2.312.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=82 + OR b=333 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR b=99 + OR a=63 + OR a=35 + OR b=176 + } +} {9 16 22 35 48 63 74 82 100 scan 0 sort 0} +do_test where7-2.313.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=90 + OR a=81 + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR f='mnopqrstu' + OR b=927 + OR b=311 + OR a=34 + OR b=715 + OR f='rstuvwxyz' + } +} {12 17 34 38 43 51 53 64 65 69 81 90 95 scan 0 sort 0} +do_test where7-2.313.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=90 + OR a=81 + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR f='mnopqrstu' + OR b=927 + OR b=311 + OR a=34 + OR b=715 + OR f='rstuvwxyz' + } +} {12 17 34 38 43 51 53 64 65 69 81 90 95 scan 0 sort 0} +do_test where7-2.314.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=484 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='lmnopqrst' + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR b<0 + OR b=231 + OR a=14 + } +} {7 10 11 12 14 21 37 39 44 63 64 89 scan 0 sort 0} +do_test where7-2.314.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=484 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='lmnopqrst' + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR b<0 + OR b=231 + OR a=14 + } +} {7 10 11 12 14 21 37 39 44 63 64 89 scan 0 sort 0} +do_test where7-2.315.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=905 + OR f='hijklmnop' + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR b=817 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + } +} {7 11 20 22 26 33 37 45 59 63 80 85 89 scan 0 sort 0} +do_test where7-2.315.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=905 + OR f='hijklmnop' + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR b=817 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + } +} {7 11 20 22 26 33 37 45 59 63 80 85 89 scan 0 sort 0} +do_test where7-2.316.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=311 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=48 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR c=32032 + OR f='opqrstuvw' + OR b=300 + OR b=1001 + OR ((a BETWEEN 94 AND 96) AND a!=95) + } +} {14 40 43 47 48 61 66 85 91 92 94 95 96 scan 0 sort 0} +do_test where7-2.316.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=311 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=48 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR c=32032 + OR f='opqrstuvw' + OR b=300 + OR b=1001 + OR ((a BETWEEN 94 AND 96) AND a!=95) + } +} {14 40 43 47 48 61 66 85 91 92 94 95 96 scan 0 sort 0} +do_test where7-2.317.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=95.0 AND d<96.0 AND d NOT NULL) + OR b=1070 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR a=22 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR f='tuvwxyzab' + OR a=72 + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {11 19 22 45 53 55 61 71 72 95 97 99 scan 0 sort 0} +do_test where7-2.317.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=95.0 AND d<96.0 AND d NOT NULL) + OR b=1070 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR a=22 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR f='tuvwxyzab' + OR a=72 + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {11 19 22 45 53 55 61 71 72 95 97 99 scan 0 sort 0} +do_test where7-2.318.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR a=21 + OR b=1026 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=473 + } +} {8 16 21 34 36 43 scan 0 sort 0} +do_test where7-2.318.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR a=21 + OR b=1026 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=473 + } +} {8 16 21 34 36 43 scan 0 sort 0} +do_test where7-2.319.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=100 + OR a=29 + OR c=15015 + OR a=87 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {29 43 44 45 71 73 87 88 100 scan 0 sort 0} +do_test where7-2.319.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=100 + OR a=29 + OR c=15015 + OR a=87 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {29 43 44 45 71 73 87 88 100 scan 0 sort 0} +do_test where7-2.320.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=1.0 AND d<2.0 AND d NOT NULL) + OR b=542 + OR b=638 + } +} {1 58 scan 0 sort 0} +do_test where7-2.320.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=1.0 AND d<2.0 AND d NOT NULL) + OR b=542 + OR b=638 + } +} {1 58 scan 0 sort 0} +do_test where7-2.321.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 5 AND 7) AND a!=6) + OR b=1070 + OR a=91 + OR b=1015 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR ((a BETWEEN 91 AND 93) AND a!=92) + } +} {5 7 12 80 91 93 scan 0 sort 0} +do_test where7-2.321.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 5 AND 7) AND a!=6) + OR b=1070 + OR a=91 + OR b=1015 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR ((a BETWEEN 91 AND 93) AND a!=92) + } +} {5 7 12 80 91 93 scan 0 sort 0} +do_test where7-2.322.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=7 + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=1015 + OR b=839 + OR (g='rqponml' AND f GLOB 'klmno*') + OR b=410 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR a=71 + } +} {1 2 7 28 36 54 71 80 scan 0 sort 0} +do_test where7-2.322.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=7 + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=1015 + OR b=839 + OR (g='rqponml' AND f GLOB 'klmno*') + OR b=410 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR a=71 + } +} {1 2 7 28 36 54 71 80 scan 0 sort 0} +do_test where7-2.323.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=880 + OR b=982 + OR a=52 + OR (g='onmlkji' AND f GLOB 'abcde*') + OR a=24 + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {24 47 49 50 52 60 76 80 scan 0 sort 0} +do_test where7-2.323.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=880 + OR b=982 + OR a=52 + OR (g='onmlkji' AND f GLOB 'abcde*') + OR a=24 + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + } +} {24 47 49 50 52 60 76 80 scan 0 sort 0} +do_test where7-2.324.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 22 31 57 67 69 83 scan 0 sort 0} +do_test where7-2.324.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 22 31 57 67 69 83 scan 0 sort 0} +do_test where7-2.325.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='abcdefghi' + OR a=5 + OR b=124 + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=432 + OR 1000000=45.0 AND d<46.0 AND d NOT NULL) + OR b=77 + OR b=605 + } +} {5 7 26 45 52 55 58 69 78 scan 0 sort 0} +do_test where7-2.325.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='abcdefghi' + OR a=5 + OR b=124 + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=432 + OR 1000000=45.0 AND d<46.0 AND d NOT NULL) + OR b=77 + OR b=605 + } +} {5 7 26 45 52 55 58 69 78 scan 0 sort 0} +do_test where7-2.326.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=583 + OR a=62 + } +} {53 62 89 scan 0 sort 0} +do_test where7-2.326.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=583 + OR a=62 + } +} {53 62 89 scan 0 sort 0} +do_test where7-2.327.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR f='pqrstuvwx' + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR b=278 + OR a=10 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR f='uvwxyzabc' + } +} {5 10 15 20 28 41 46 54 63 65 67 68 72 84 86 93 98 scan 0 sort 0} +do_test where7-2.327.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR f='pqrstuvwx' + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR b=278 + OR a=10 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR f='uvwxyzabc' + } +} {5 10 15 20 28 41 46 54 63 65 67 68 72 84 86 93 98 scan 0 sort 0} +do_test where7-2.328.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 57 AND 59) AND a!=58) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=564 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR b=77 + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR b=968 + OR b=847 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {7 14 40 56 57 58 59 66 77 85 88 90 92 scan 0 sort 0} +do_test where7-2.328.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 57 AND 59) AND a!=58) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=564 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR b=77 + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR b=968 + OR b=847 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (g='lkjihgf' AND f GLOB 'opqrs*') + } +} {7 14 40 56 57 58 59 66 77 85 88 90 92 scan 0 sort 0} +do_test where7-2.329.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=539 + OR b=594 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR f='abcdefghi' + OR a=6 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=762 + } +} {6 17 26 49 52 54 63 65 78 scan 0 sort 0} +do_test where7-2.329.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=539 + OR b=594 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR f='abcdefghi' + OR a=6 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=762 + } +} {6 17 26 49 52 54 63 65 78 scan 0 sort 0} +do_test where7-2.330.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=869 + OR b=630 + } +} {79 scan 0 sort 0} +do_test where7-2.330.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=869 + OR b=630 + } +} {79 scan 0 sort 0} +do_test where7-2.331.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=72.0 AND d<73.0 AND d NOT NULL) + OR b=693 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=968 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR b=132 + OR f='nopqrstuv' + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {12 13 28 30 39 63 65 72 86 88 91 scan 0 sort 0} +do_test where7-2.331.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=72.0 AND d<73.0 AND d NOT NULL) + OR b=693 + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR b=968 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR b=132 + OR f='nopqrstuv' + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {12 13 28 30 39 63 65 72 86 88 91 scan 0 sort 0} +do_test where7-2.332.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=32032 + OR b=814 + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR b=814 + OR a=78 + OR a=37 + } +} {37 74 78 90 94 95 96 scan 0 sort 0} +do_test where7-2.332.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=32032 + OR b=814 + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR b=814 + OR a=78 + OR a=37 + } +} {37 74 78 90 94 95 96 scan 0 sort 0} +do_test where7-2.333.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=190 + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=924 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=759 + OR (g='yxwvuts' AND f GLOB 'bcdef*') + } +} {1 40 59 69 84 scan 0 sort 0} +do_test where7-2.333.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=190 + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=924 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=759 + OR (g='yxwvuts' AND f GLOB 'bcdef*') + } +} {1 40 59 69 84 scan 0 sort 0} +do_test where7-2.334.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=97 + OR b=201 + OR b=597 + OR a=6 + OR f='cdefghijk' + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=300 + OR b=693 + OR b=333 + OR b=740 + } +} {2 6 28 54 63 74 76 80 97 scan 0 sort 0} +do_test where7-2.334.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=97 + OR b=201 + OR b=597 + OR a=6 + OR f='cdefghijk' + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=300 + OR b=693 + OR b=333 + OR b=740 + } +} {2 6 28 54 63 74 76 80 97 scan 0 sort 0} +do_test where7-2.335.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=26026 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR c=17017 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {2 4 32 43 49 50 51 60 72 74 76 77 78 scan 0 sort 0} +do_test where7-2.335.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=26026 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR c=17017 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + } +} {2 4 32 43 49 50 51 60 72 74 76 77 78 scan 0 sort 0} +do_test where7-2.336.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=638 + OR b=495 + OR a=44 + OR b=374 + OR a=22 + OR c=12012 + } +} {13 15 22 34 35 36 44 45 58 70 scan 0 sort 0} +do_test where7-2.336.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=638 + OR b=495 + OR a=44 + OR b=374 + OR a=22 + OR c=12012 + } +} {13 15 22 34 35 36 44 45 58 70 scan 0 sort 0} +do_test where7-2.337.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=8008 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR b=300 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR a=41 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=135 + OR b=605 + } +} {1 2 22 23 24 39 41 49 55 100 scan 0 sort 0} +do_test where7-2.337.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=8008 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR b=300 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR a=41 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=135 + OR b=605 + } +} {1 2 22 23 24 39 41 49 55 100 scan 0 sort 0} +do_test where7-2.338.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=762 + OR b=484 + OR b=190 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=1023 + } +} {4 17 30 41 43 44 56 61 69 74 82 93 95 97 scan 0 sort 0} +do_test where7-2.338.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=762 + OR b=484 + OR b=190 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=1023 + } +} {4 17 30 41 43 44 56 61 69 74 82 93 95 97 scan 0 sort 0} +do_test where7-2.339.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR a=34 + OR f='rstuvwxyz' + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=729 + } +} {10 17 34 43 69 82 95 scan 0 sort 0} +do_test where7-2.339.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR a=34 + OR f='rstuvwxyz' + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=729 + } +} {10 17 34 43 69 82 95 scan 0 sort 0} +do_test where7-2.340.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=1004 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR g IS NULL + } +} {37 41 scan 0 sort 0} +do_test where7-2.340.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=1004 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR g IS NULL + } +} {37 41 scan 0 sort 0} +do_test where7-2.341.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=73 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR a=9 + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR a=44 + OR a=23 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {1 9 23 36 37 38 44 51 53 55 63 73 78 scan 0 sort 0} +do_test where7-2.341.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=73 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR a=9 + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR a=44 + OR a=23 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {1 9 23 36 37 38 44 51 53 55 63 73 78 scan 0 sort 0} +do_test where7-2.342.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=487 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR a=11 + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=13 + OR a=15 + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR a=36 + } +} {11 12 13 14 15 29 36 69 71 77 78 79 scan 0 sort 0} +do_test where7-2.342.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=487 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR a=11 + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=13 + OR a=15 + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR a=36 + } +} {11 12 13 14 15 29 36 69 71 77 78 79 scan 0 sort 0} +do_test where7-2.343.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=938 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=32.0 AND d<33.0 AND d NOT NULL) + OR b=245 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + } +} {32 35 54 57 59 scan 0 sort 0} +do_test where7-2.343.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=938 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=32.0 AND d<33.0 AND d NOT NULL) + OR b=245 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + } +} {32 35 54 57 59 scan 0 sort 0} +do_test where7-2.344.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1078 + OR c=19019 + OR a=38 + OR a=59 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR c=25025 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + } +} {30 32 38 51 55 56 57 59 73 74 75 76 79 95 97 98 scan 0 sort 0} +do_test where7-2.344.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1078 + OR c=19019 + OR a=38 + OR a=59 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR c=25025 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + } +} {30 32 38 51 55 56 57 59 73 74 75 76 79 95 97 98 scan 0 sort 0} +do_test where7-2.345.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='cdefghijk' + OR b=168 + OR b=561 + OR a=81 + OR a=87 + } +} {2 28 51 54 80 81 87 scan 0 sort 0} +do_test where7-2.345.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='cdefghijk' + OR b=168 + OR b=561 + OR a=81 + OR a=87 + } +} {2 28 51 54 80 81 87 scan 0 sort 0} +do_test where7-2.346.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'klmno*') + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='rqponml' AND f GLOB 'hijkl*') + OR a=48 + OR b=113 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=880 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {9 11 20 22 33 48 53 73 80 85 87 88 scan 0 sort 0} +do_test where7-2.346.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'klmno*') + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR (g='rqponml' AND f GLOB 'hijkl*') + OR a=48 + OR b=113 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=880 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {9 11 20 22 33 48 53 73 80 85 87 88 scan 0 sort 0} +do_test where7-2.347.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=517 + OR b=187 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR b=1092 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {6 17 47 84 86 scan 0 sort 0} +do_test where7-2.347.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=517 + OR b=187 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR b=1092 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + } +} {6 17 47 84 86 scan 0 sort 0} +do_test where7-2.348.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=982 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=234 + OR c=15015 + OR a=47 + OR f='qrstuvwxy' + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR b=814 + OR b=440 + OR b=454 + } +} {16 40 42 43 44 45 47 65 68 74 94 scan 0 sort 0} +do_test where7-2.348.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=982 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=234 + OR c=15015 + OR a=47 + OR f='qrstuvwxy' + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR b=814 + OR b=440 + OR b=454 + } +} {16 40 42 43 44 45 47 65 68 74 94 scan 0 sort 0} +do_test where7-2.349.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=7007 + OR b=429 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=231 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR c=22022 + OR f='bcdefghij' + } +} {1 19 20 21 25 26 27 39 47 53 64 65 66 79 scan 0 sort 0} +do_test where7-2.349.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=7007 + OR b=429 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=231 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR c=22022 + OR f='bcdefghij' + } +} {1 19 20 21 25 26 27 39 47 53 64 65 66 79 scan 0 sort 0} +do_test where7-2.350.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=17017 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR b=784 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR f='zabcdefgh' + } +} {16 18 22 24 25 49 50 51 54 56 62 77 88 90 scan 0 sort 0} +do_test where7-2.350.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=17017 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR b=784 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR f='zabcdefgh' + } +} {16 18 22 24 25 49 50 51 54 56 62 77 88 90 scan 0 sort 0} +do_test where7-2.351.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=344 + OR b=275 + OR c<=10 + } +} {25 scan 0 sort 0} +do_test where7-2.351.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=344 + OR b=275 + OR c<=10 + } +} {25 scan 0 sort 0} +do_test where7-2.352.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 44 AND 46) AND a!=45) + OR a=76 + OR b=154 + OR a=30 + OR c=3003 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR b=564 + OR b=55 + OR a=38 + } +} {5 7 8 9 14 23 30 38 44 46 49 75 76 88 scan 0 sort 0} +do_test where7-2.352.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 44 AND 46) AND a!=45) + OR a=76 + OR b=154 + OR a=30 + OR c=3003 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR b=564 + OR b=55 + OR a=38 + } +} {5 7 8 9 14 23 30 38 44 46 49 75 76 88 scan 0 sort 0} +do_test where7-2.353.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=52 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {52 54 66 68 scan 0 sort 0} +do_test where7-2.353.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=52 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {52 54 66 68 scan 0 sort 0} +do_test where7-2.354.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=792 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + } +} {9 72 scan 0 sort 0} +do_test where7-2.354.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=792 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + } +} {9 72 scan 0 sort 0} +do_test where7-2.355.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR c=21021 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR f='zabcdefgh' + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=781 + OR a=64 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {1 11 25 51 61 62 63 64 65 71 73 77 scan 0 sort 0} +do_test where7-2.355.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR c=21021 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR f='zabcdefgh' + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR b=781 + OR a=64 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {1 11 25 51 61 62 63 64 65 71 73 77 scan 0 sort 0} +do_test where7-2.356.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'pqrst*') + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR a=34 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR (g='srqponm' AND f GLOB 'defgh*') + OR b=319 + OR b=330 + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {28 29 30 34 36 67 90 scan 0 sort 0} +do_test where7-2.356.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'pqrst*') + OR (d>=90.0 AND d<91.0 AND d NOT NULL) + OR a=34 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR (g='srqponm' AND f GLOB 'defgh*') + OR b=319 + OR b=330 + OR ((a BETWEEN 28 AND 30) AND a!=29) + } +} {28 29 30 34 36 67 90 scan 0 sort 0} +do_test where7-2.357.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=45 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + } +} {15 41 45 67 81 93 scan 0 sort 0} +do_test where7-2.357.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=45 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + } +} {15 41 45 67 81 93 scan 0 sort 0} +do_test where7-2.358.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=53.0 AND d<54.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=165 + OR b=836 + } +} {15 53 54 76 scan 0 sort 0} +do_test where7-2.358.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=53.0 AND d<54.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=165 + OR b=836 + } +} {15 53 54 76 scan 0 sort 0} +do_test where7-2.359.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1034 + OR f='vwxyzabcd' + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + } +} {21 47 57 59 73 91 94 99 scan 0 sort 0} +do_test where7-2.359.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1034 + OR f='vwxyzabcd' + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + } +} {21 47 57 59 73 91 94 99 scan 0 sort 0} +do_test where7-2.360.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=440 + OR a=19 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=22022 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR a=92 + OR b=1026 + OR b=608 + } +} {19 40 47 64 65 66 92 scan 0 sort 0} +do_test where7-2.360.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=440 + OR a=19 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=22022 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR a=92 + OR b=1026 + OR b=608 + } +} {19 40 47 64 65 66 92 scan 0 sort 0} +do_test where7-2.361.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=37 + OR b=88 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR c=23023 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=56 + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR f='ijklmnopq' + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {8 13 15 16 22 34 37 42 56 60 67 68 69 85 86 87 94 scan 0 sort 0} +do_test where7-2.361.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=37 + OR b=88 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR c=23023 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=56 + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR f='ijklmnopq' + OR ((a BETWEEN 85 AND 87) AND a!=86) + } +} {8 13 15 16 22 34 37 42 56 60 67 68 69 85 86 87 94 scan 0 sort 0} +do_test where7-2.362.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR a=74 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 42 AND 44) AND a!=43) + } +} {20 22 24 42 44 74 97 scan 0 sort 0} +do_test where7-2.362.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR a=74 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 42 AND 44) AND a!=43) + } +} {20 22 24 42 44 74 97 scan 0 sort 0} +do_test where7-2.363.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='uvwxyzabc' + OR b=869 + OR ((a BETWEEN 49 AND 51) AND a!=50) + } +} {20 46 49 51 72 79 98 scan 0 sort 0} +do_test where7-2.363.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='uvwxyzabc' + OR b=869 + OR ((a BETWEEN 49 AND 51) AND a!=50) + } +} {20 46 49 51 72 79 98 scan 0 sort 0} +do_test where7-2.364.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=682 + OR b=583 + OR b=685 + OR b=817 + OR ((a BETWEEN 34 AND 36) AND a!=35) + } +} {34 36 53 62 scan 0 sort 0} +do_test where7-2.364.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=682 + OR b=583 + OR b=685 + OR b=817 + OR ((a BETWEEN 34 AND 36) AND a!=35) + } +} {34 36 53 62 scan 0 sort 0} +do_test where7-2.365.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=583 + OR a=39 + OR b=627 + OR ((a BETWEEN 72 AND 74) AND a!=73) + } +} {39 53 57 72 74 scan 0 sort 0} +do_test where7-2.365.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=583 + OR a=39 + OR b=627 + OR ((a BETWEEN 72 AND 74) AND a!=73) + } +} {39 53 57 72 74 scan 0 sort 0} +do_test where7-2.366.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR b=212 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=20 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=627 + } +} {2 4 20 24 26 53 57 68 73 scan 0 sort 0} +do_test where7-2.366.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR b=212 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=20 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=627 + } +} {2 4 20 24 26 53 57 68 73 scan 0 sort 0} +do_test where7-2.367.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=157 + OR b=1026 + } +} {8 34 60 77 86 scan 0 sort 0} +do_test where7-2.367.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=157 + OR b=1026 + } +} {8 34 60 77 86 scan 0 sort 0} +do_test where7-2.368.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=553 + OR a=16 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR f='zabcdefgh' + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 11 16 25 31 33 51 67 77 80 82 scan 0 sort 0} +do_test where7-2.368.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=553 + OR a=16 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR f='zabcdefgh' + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 11 16 25 31 33 51 67 77 80 82 scan 0 sort 0} +do_test where7-2.369.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=858 + OR c=9009 + OR b=792 + OR b=88 + OR b=154 + } +} {8 14 25 26 27 72 78 scan 0 sort 0} +do_test where7-2.369.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=858 + OR c=9009 + OR b=792 + OR b=88 + OR b=154 + } +} {8 14 25 26 27 72 78 scan 0 sort 0} +do_test where7-2.370.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f IS NULL + OR a=37 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR b=168 + OR b=22 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=506 + } +} {2 21 37 46 48 55 57 scan 0 sort 0} +do_test where7-2.370.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f IS NULL + OR a=37 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR b=168 + OR b=22 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=506 + } +} {2 21 37 46 48 55 57 scan 0 sort 0} +do_test where7-2.371.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=29 + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=209 + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR b=146 + } +} {19 25 26 28 29 42 45 51 69 71 77 97 scan 0 sort 0} +do_test where7-2.371.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=29 + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=209 + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR b=146 + } +} {19 25 26 28 29 42 45 51 69 71 77 97 scan 0 sort 0} +do_test where7-2.372.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=63 + OR a=69 + OR b=333 + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR b=135 + OR b=25 + OR b=1037 + OR b=682 + OR c=27027 + OR a=46 + } +} {6 46 62 63 69 79 80 81 scan 0 sort 0} +do_test where7-2.372.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=63 + OR a=69 + OR b=333 + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR b=135 + OR b=25 + OR b=1037 + OR b=682 + OR c=27027 + OR a=46 + } +} {6 46 62 63 69 79 80 81 scan 0 sort 0} +do_test where7-2.373.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=113 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {40 42 52 53 55 59 61 100 scan 0 sort 0} +do_test where7-2.373.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=113 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {40 42 52 53 55 59 61 100 scan 0 sort 0} +do_test where7-2.374.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1026 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + } +} {48 scan 0 sort 0} +do_test where7-2.374.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1026 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + } +} {48 scan 0 sort 0} +do_test where7-2.375.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='rqponml' AND f GLOB 'ijklm*') + OR a=99 + OR a=100 + OR b=429 + OR b=682 + OR b=495 + OR f='efghijklm' + OR a=10 + OR f='mnopqrstu' + OR b=946 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + } +} {4 10 12 30 34 38 39 45 56 62 64 82 86 90 95 99 100 scan 0 sort 0} +do_test where7-2.375.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='rqponml' AND f GLOB 'ijklm*') + OR a=99 + OR a=100 + OR b=429 + OR b=682 + OR b=495 + OR f='efghijklm' + OR a=10 + OR f='mnopqrstu' + OR b=946 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + } +} {4 10 12 30 34 38 39 45 56 62 64 82 86 90 95 99 100 scan 0 sort 0} +do_test where7-2.376.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=11.0 AND d<12.0 AND d NOT NULL) + OR c=23023 + OR b=462 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {11 17 19 42 67 68 69 scan 0 sort 0} +do_test where7-2.376.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=11.0 AND d<12.0 AND d NOT NULL) + OR c=23023 + OR b=462 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {11 17 19 42 67 68 69 scan 0 sort 0} +do_test where7-2.377.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=539 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR c=6006 + OR a=18 + OR c=24024 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR c=19019 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR ((a BETWEEN 44 AND 46) AND a!=45) + } +} {9 11 16 17 18 38 43 44 46 49 55 56 57 70 71 72 87 scan 0 sort 0} +do_test where7-2.377.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=539 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR c=6006 + OR a=18 + OR c=24024 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR c=19019 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR ((a BETWEEN 44 AND 46) AND a!=45) + } +} {9 11 16 17 18 38 43 44 46 49 55 56 57 70 71 72 87 scan 0 sort 0} +do_test where7-2.378.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=20 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=121 + OR a=10 + OR b=792 + } +} {10 11 15 20 72 94 scan 0 sort 0} +do_test where7-2.378.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR a=20 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=121 + OR a=10 + OR b=792 + } +} {10 11 15 20 72 94 scan 0 sort 0} +do_test where7-2.379.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=99 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {9 14 40 66 85 87 92 scan 0 sort 0} +do_test where7-2.379.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=99 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {9 14 40 66 85 87 92 scan 0 sort 0} +do_test where7-2.380.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR b=715 + OR ((a BETWEEN 23 AND 25) AND a!=24) + } +} {6 23 25 32 58 65 79 81 84 scan 0 sort 0} +do_test where7-2.380.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR b=715 + OR ((a BETWEEN 23 AND 25) AND a!=24) + } +} {6 23 25 32 58 65 79 81 84 scan 0 sort 0} +do_test where7-2.381.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR a=46 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + } +} {28 46 97 scan 0 sort 0} +do_test where7-2.381.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR a=46 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + } +} {28 46 97 scan 0 sort 0} +do_test where7-2.382.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR b=1056 + OR b=146 + } +} {18 81 96 97 99 scan 0 sort 0} +do_test where7-2.382.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR b=1056 + OR b=146 + } +} {18 81 96 97 99 scan 0 sort 0} +do_test where7-2.383.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=15 + OR b=388 + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR a=36 + OR b=737 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR a=75 + } +} {15 21 23 36 67 75 82 84 89 scan 0 sort 0} +do_test where7-2.383.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=15 + OR b=388 + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR a=36 + OR b=737 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR a=75 + } +} {15 21 23 36 67 75 82 84 89 scan 0 sort 0} +do_test where7-2.384.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=9009 + OR a=34 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=715 + OR b=619 + OR ((a BETWEEN 98 AND 100) AND a!=99) + } +} {16 25 26 27 34 65 95 98 100 scan 0 sort 0} +do_test where7-2.384.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=9009 + OR a=34 + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=715 + OR b=619 + OR ((a BETWEEN 98 AND 100) AND a!=99) + } +} {16 25 26 27 34 65 95 98 100 scan 0 sort 0} +do_test where7-2.385.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=61.0 AND d<62.0 AND d NOT NULL) + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=242 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=300 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {1 11 13 21 22 24 26 27 32 34 39 41 53 61 74 76 79 93 95 scan 99 sort 0} +do_test where7-2.385.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=61.0 AND d<62.0 AND d NOT NULL) + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=242 + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=300 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {1 11 13 21 22 24 26 27 32 34 39 41 53 61 74 76 79 93 95 scan 0 sort 0} +do_test where7-2.386.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=85 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=212 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=36 + OR b=231 + OR b=1048 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR c=19019 + } +} {21 25 27 33 43 55 56 57 69 71 85 92 scan 0 sort 0} +do_test where7-2.386.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=85 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=212 + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=36 + OR b=231 + OR b=1048 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR c=19019 + } +} {21 25 27 33 43 55 56 57 69 71 85 92 scan 0 sort 0} +do_test where7-2.387.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR b=1059 + OR b=630 + } +} {8 28 30 scan 0 sort 0} +do_test where7-2.387.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR b=1059 + OR b=630 + } +} {8 28 30 scan 0 sort 0} +do_test where7-2.388.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='ghijklmno' + OR f='nopqrstuv' + OR b=297 + } +} {6 13 27 32 39 58 65 84 91 scan 0 sort 0} +do_test where7-2.388.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='ghijklmno' + OR f='nopqrstuv' + OR b=297 + } +} {6 13 27 32 39 58 65 84 91 scan 0 sort 0} +do_test where7-2.389.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1001 + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR a=58 + OR b=333 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=572 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + } +} {7 15 33 43 49 50 52 58 59 68 70 85 87 89 91 scan 0 sort 0} +do_test where7-2.389.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1001 + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR a=58 + OR b=333 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=572 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + } +} {7 15 33 43 49 50 52 58 59 68 70 85 87 89 91 scan 0 sort 0} +do_test where7-2.390.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1034 + OR f='lmnopqrst' + OR (g='qponmlk' AND f GLOB 'mnopq*') + } +} {11 37 38 63 89 94 scan 0 sort 0} +do_test where7-2.390.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1034 + OR f='lmnopqrst' + OR (g='qponmlk' AND f GLOB 'mnopq*') + } +} {11 37 38 63 89 94 scan 0 sort 0} +do_test where7-2.391.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=15015 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=58 + OR b=674 + OR b=979 + } +} {43 44 45 59 87 89 scan 0 sort 0} +do_test where7-2.391.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=15015 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=58 + OR b=674 + OR b=979 + } +} {43 44 45 59 87 89 scan 0 sort 0} +do_test where7-2.392.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR b=660 + OR b=341 + } +} {31 60 62 scan 0 sort 0} +do_test where7-2.392.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR b=660 + OR b=341 + } +} {31 60 62 scan 0 sort 0} +do_test where7-2.393.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=528 + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR b=630 + OR a=19 + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR f='wxyzabcde' + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=377 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR a=77 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + } +} {8 19 22 43 44 48 64 74 77 100 scan 0 sort 0} +do_test where7-2.393.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=528 + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR b=630 + OR a=19 + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR f='wxyzabcde' + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=377 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR a=77 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + } +} {8 19 22 43 44 48 64 74 77 100 scan 0 sort 0} +do_test where7-2.394.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=506 + OR a=70 + } +} {46 70 scan 0 sort 0} +do_test where7-2.394.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=506 + OR a=70 + } +} {46 70 scan 0 sort 0} +do_test where7-2.395.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=64 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'cdefg*') + OR c=14014 + OR b=586 + OR c=27027 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {26 28 40 41 42 52 57 64 74 78 79 80 81 86 scan 0 sort 0} +do_test where7-2.395.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=64 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'cdefg*') + OR c=14014 + OR b=586 + OR c=27027 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {26 28 40 41 42 52 57 64 74 78 79 80 81 86 scan 0 sort 0} +do_test where7-2.396.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=46 + OR b=297 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=275 + OR b=91 + OR b=1015 + OR c=12012 + OR a=23 + OR b=278 + } +} {23 25 27 34 35 36 46 57 59 75 scan 0 sort 0} +do_test where7-2.396.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=46 + OR b=297 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=275 + OR b=91 + OR b=1015 + OR c=12012 + OR a=23 + OR b=278 + } +} {23 25 27 34 35 36 46 57 59 75 scan 0 sort 0} +do_test where7-2.397.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR a=23 + OR b=737 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + } +} {18 20 23 25 27 61 67 68 69 70 71 98 scan 0 sort 0} +do_test where7-2.397.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR a=23 + OR b=737 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + } +} {18 20 23 25 27 61 67 68 69 70 71 98 scan 0 sort 0} +do_test where7-2.398.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=814 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR b=377 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + } +} {71 74 79 scan 0 sort 0} +do_test where7-2.398.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=814 + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR b=377 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + } +} {71 74 79 scan 0 sort 0} +do_test where7-2.399.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=18 + OR b=1059 + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=795 + } +} {9 18 25 46 51 53 77 scan 0 sort 0} +do_test where7-2.399.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=18 + OR b=1059 + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=795 + } +} {9 18 25 46 51 53 77 scan 0 sort 0} +do_test where7-2.400.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR a=93 + OR a=11 + OR f='nopqrstuv' + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR a=17 + OR b=366 + } +} {11 13 17 22 24 27 37 39 63 65 89 91 93 scan 0 sort 0} +do_test where7-2.400.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR a=93 + OR a=11 + OR f='nopqrstuv' + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR a=17 + OR b=366 + } +} {11 13 17 22 24 27 37 39 63 65 89 91 93 scan 0 sort 0} +do_test where7-2.401.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=685 + OR a=33 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=715 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=6 + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {6 16 33 37 39 40 41 42 59 61 65 80 82 93 95 scan 0 sort 0} +do_test where7-2.401.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=685 + OR a=33 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=715 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=6 + OR ((a BETWEEN 59 AND 61) AND a!=60) + } +} {6 16 33 37 39 40 41 42 59 61 65 80 82 93 95 scan 0 sort 0} +do_test where7-2.402.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=89 + OR b=1037 + OR (g='mlkjihg' AND f GLOB 'ijklm*') + } +} {60 89 scan 0 sort 0} +do_test where7-2.402.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=89 + OR b=1037 + OR (g='mlkjihg' AND f GLOB 'ijklm*') + } +} {60 89 scan 0 sort 0} +do_test where7-2.403.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=663 + OR b=531 + OR b=146 + OR b=102 + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR a=26 + } +} {26 28 44 46 87 89 97 scan 0 sort 0} +do_test where7-2.403.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=663 + OR b=531 + OR b=146 + OR b=102 + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR a=26 + } +} {26 28 44 46 87 89 97 scan 0 sort 0} +do_test where7-2.404.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'stuvw*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=726 + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=2002 + OR c=15015 + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=201 + } +} {4 5 6 12 35 43 44 45 64 66 70 73 75 scan 0 sort 0} +do_test where7-2.404.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'stuvw*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=726 + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=2002 + OR c=15015 + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=201 + } +} {4 5 6 12 35 43 44 45 64 66 70 73 75 scan 0 sort 0} +do_test where7-2.405.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=924 + OR f='lmnopqrst' + OR b=1048 + } +} {11 37 63 72 84 89 scan 0 sort 0} +do_test where7-2.405.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=924 + OR f='lmnopqrst' + OR b=1048 + } +} {11 37 63 72 84 89 scan 0 sort 0} +do_test where7-2.406.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=198 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=286 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {12 14 18 20 22 26 58 63 65 67 scan 0 sort 0} +do_test where7-2.406.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=198 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR ((a BETWEEN 12 AND 14) AND a!=13) + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=286 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {12 14 18 20 22 26 58 63 65 67 scan 0 sort 0} +do_test where7-2.407.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=242 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR f='bcdefghij' + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=38 + OR b=187 + } +} {1 17 19 22 27 38 53 57 59 79 88 99 scan 0 sort 0} +do_test where7-2.407.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=242 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR f='bcdefghij' + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=38 + OR b=187 + } +} {1 17 19 22 27 38 53 57 59 79 88 99 scan 0 sort 0} +do_test where7-2.408.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=630 + OR a=55 + OR c=26026 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {10 23 55 68 76 77 78 scan 0 sort 0} +do_test where7-2.408.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=630 + OR a=55 + OR c=26026 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {10 23 55 68 76 77 78 scan 0 sort 0} +do_test where7-2.409.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='uvwxyzabc' + OR f='xyzabcdef' + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=69 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + } +} {8 20 23 31 34 46 49 51 53 60 70 72 75 79 86 98 scan 0 sort 0} +do_test where7-2.409.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='uvwxyzabc' + OR f='xyzabcdef' + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=69 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + } +} {8 20 23 31 34 46 49 51 53 60 70 72 75 79 86 98 scan 0 sort 0} +do_test where7-2.410.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1026 + OR b=454 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR b=179 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='qrstuvwxy' + } +} {16 26 42 52 68 78 92 94 scan 0 sort 0} +do_test where7-2.410.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1026 + OR b=454 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR b=179 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='qrstuvwxy' + } +} {16 26 42 52 68 78 92 94 scan 0 sort 0} +do_test where7-2.411.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 6 AND 8) AND a!=7) + OR b=619 + OR a=20 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=946 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=64 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR b=1001 + OR b=858 + } +} {6 8 13 17 19 20 61 64 78 86 91 scan 0 sort 0} +do_test where7-2.411.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 6 AND 8) AND a!=7) + OR b=619 + OR a=20 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=946 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=64 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR b=1001 + OR b=858 + } +} {6 8 13 17 19 20 61 64 78 86 91 scan 0 sort 0} +do_test where7-2.412.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=902 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=86 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {27 82 86 97 scan 0 sort 0} +do_test where7-2.412.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=902 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=86 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {27 82 86 97 scan 0 sort 0} +do_test where7-2.413.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=32 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR c=32032 + } +} {4 32 38 56 94 95 96 scan 0 sort 0} +do_test where7-2.413.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=32 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR c=32032 + } +} {4 32 38 56 94 95 96 scan 0 sort 0} +do_test where7-2.414.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=168 + OR c=2002 + OR b=77 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR f='qrstuvwxy' + } +} {4 5 6 7 16 27 42 68 94 scan 0 sort 0} +do_test where7-2.414.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=168 + OR c=2002 + OR b=77 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR f='qrstuvwxy' + } +} {4 5 6 7 16 27 42 68 94 scan 0 sort 0} +do_test where7-2.415.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='abcdefghi' + OR b=506 + } +} {26 46 52 78 scan 0 sort 0} +do_test where7-2.415.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='abcdefghi' + OR b=506 + } +} {26 46 52 78 scan 0 sort 0} +do_test where7-2.416.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=264 + OR c=34034 + OR a=96 + } +} {24 96 100 scan 0 sort 0} +do_test where7-2.416.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=264 + OR c=34034 + OR a=96 + } +} {24 96 100 scan 0 sort 0} +do_test where7-2.417.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=32.0 AND d<33.0 AND d NOT NULL) + OR a=27 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + } +} {19 27 32 55 57 scan 0 sort 0} +do_test where7-2.417.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=32.0 AND d<33.0 AND d NOT NULL) + OR a=27 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + } +} {19 27 32 55 57 scan 0 sort 0} +do_test where7-2.418.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR b=77 + } +} {7 74 scan 0 sort 0} +do_test where7-2.418.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR b=77 + } +} {7 74 scan 0 sort 0} +do_test where7-2.419.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=27027 + OR f='vwxyzabcd' + OR b=1048 + OR a=96 + OR a=99 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR b=561 + OR b=352 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR a=95 + } +} {18 21 32 37 47 51 56 58 73 79 80 81 95 96 99 scan 0 sort 0} +do_test where7-2.419.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=27027 + OR f='vwxyzabcd' + OR b=1048 + OR a=96 + OR a=99 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR b=561 + OR b=352 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR a=95 + } +} {18 21 32 37 47 51 56 58 73 79 80 81 95 96 99 scan 0 sort 0} +do_test where7-2.420.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=275 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='ghijklmno' + OR b=619 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=476 + OR a=83 + OR ((a BETWEEN 47 AND 49) AND a!=48) + } +} {6 10 12 25 32 47 49 58 83 84 91 93 99 scan 0 sort 0} +do_test where7-2.420.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=275 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='ghijklmno' + OR b=619 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=476 + OR a=83 + OR ((a BETWEEN 47 AND 49) AND a!=48) + } +} {6 10 12 25 32 47 49 58 83 84 91 93 99 scan 0 sort 0} +do_test where7-2.421.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=542 + OR a=17 + OR f='jklmnopqr' + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR a=23 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {5 7 9 17 23 25 35 39 61 87 scan 0 sort 0} +do_test where7-2.421.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=542 + OR a=17 + OR f='jklmnopqr' + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + OR a=23 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {5 7 9 17 23 25 35 39 61 87 scan 0 sort 0} +do_test where7-2.422.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR b=363 + OR b=454 + } +} {33 74 scan 0 sort 0} +do_test where7-2.422.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR b=363 + OR b=454 + } +} {33 74 scan 0 sort 0} +do_test where7-2.423.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1059 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=47 + OR b=660 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR a=84 + } +} {34 35 36 60 76 84 scan 0 sort 0} +do_test where7-2.423.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1059 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=47 + OR b=660 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR a=84 + } +} {34 35 36 60 76 84 scan 0 sort 0} +do_test where7-2.424.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='ghijklmno' + OR b=1012 + } +} {6 32 58 84 92 scan 0 sort 0} +do_test where7-2.424.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='ghijklmno' + OR b=1012 + } +} {6 32 58 84 92 scan 0 sort 0} +do_test where7-2.425.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=597 + OR f='lmnopqrst' + OR a=24 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR b=1023 + OR a=53 + OR a=78 + OR f='efghijklm' + OR (g='rqponml' AND f GLOB 'lmnop*') + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {4 11 24 30 31 33 37 53 56 63 78 82 85 89 93 96 scan 0 sort 0} +do_test where7-2.425.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=597 + OR f='lmnopqrst' + OR a=24 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR b=1023 + OR a=53 + OR a=78 + OR f='efghijklm' + OR (g='rqponml' AND f GLOB 'lmnop*') + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {4 11 24 30 31 33 37 53 56 63 78 82 85 89 93 96 scan 0 sort 0} +do_test where7-2.426.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=198 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=388 + } +} {18 94 scan 0 sort 0} +do_test where7-2.426.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=198 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=388 + } +} {18 94 scan 0 sort 0} +do_test where7-2.427.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='tuvwxyzab' + OR b=388 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR b=957 + OR b=663 + OR b=847 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + } +} {19 45 71 73 77 84 86 87 96 97 scan 0 sort 0} +do_test where7-2.427.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='tuvwxyzab' + OR b=388 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR b=957 + OR b=663 + OR b=847 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + } +} {19 45 71 73 77 84 86 87 96 97 scan 0 sort 0} +do_test where7-2.428.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=81.0 AND d<82.0 AND d NOT NULL) + OR a=56 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {56 81 84 scan 0 sort 0} +do_test where7-2.428.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=81.0 AND d<82.0 AND d NOT NULL) + OR a=56 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + } +} {56 81 84 scan 0 sort 0} +do_test where7-2.429.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c>=34035 + OR b=168 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 27 53 79 89 scan 0 sort 0} +do_test where7-2.429.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c>=34035 + OR b=168 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + } +} {1 27 53 79 89 scan 0 sort 0} +do_test where7-2.430.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 79 AND 81) AND a!=80) + OR b=564 + OR c=6006 + OR b=979 + } +} {16 17 18 79 81 89 scan 0 sort 0} +do_test where7-2.430.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 79 AND 81) AND a!=80) + OR b=564 + OR c=6006 + OR b=979 + } +} {16 17 18 79 81 89 scan 0 sort 0} +do_test where7-2.431.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR f='rstuvwxyz' + OR (g='qponmlk' AND f GLOB 'nopqr*') + } +} {17 29 39 40 43 69 95 scan 0 sort 0} +do_test where7-2.431.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR f='rstuvwxyz' + OR (g='qponmlk' AND f GLOB 'nopqr*') + } +} {17 29 39 40 43 69 95 scan 0 sort 0} +do_test where7-2.432.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=58 + OR b=484 + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR b=671 + OR a=69 + } +} {44 61 68 69 scan 0 sort 0} +do_test where7-2.432.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=58 + OR b=484 + OR (d>=68.0 AND d<69.0 AND d NOT NULL) + OR b=671 + OR a=69 + } +} {44 61 68 69 scan 0 sort 0} +do_test where7-2.433.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='mnopqrstu' + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=861 + OR b=77 + OR f='qrstuvwxy' + } +} {7 12 16 38 42 64 68 73 90 94 scan 0 sort 0} +do_test where7-2.433.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='mnopqrstu' + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=861 + OR b=77 + OR f='qrstuvwxy' + } +} {7 12 16 38 42 64 68 73 90 94 scan 0 sort 0} +do_test where7-2.434.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=113 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=113 + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR c=6006 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR b=946 + OR a=86 + } +} {4 14 16 17 18 51 62 64 86 scan 0 sort 0} +do_test where7-2.434.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=113 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=113 + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + OR c=6006 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR b=946 + OR a=86 + } +} {4 14 16 17 18 51 62 64 86 scan 0 sort 0} +do_test where7-2.435.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR c=22022 + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=25025 + } +} {8 10 64 65 66 73 74 75 79 81 85 scan 0 sort 0} +do_test where7-2.435.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR c=22022 + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=25025 + } +} {8 10 64 65 66 73 74 75 79 81 85 scan 0 sort 0} +do_test where7-2.436.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=47 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR a=92 + OR b=795 + OR b=25 + OR c=7007 + OR a=93 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + } +} {14 18 19 20 21 40 44 46 66 74 76 92 93 95 scan 0 sort 0} +do_test where7-2.436.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=47 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR a=92 + OR b=795 + OR b=25 + OR c=7007 + OR a=93 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + } +} {14 18 19 20 21 40 44 46 66 74 76 92 93 95 scan 0 sort 0} +do_test where7-2.437.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR a=13 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR c=29029 + OR b=311 + OR b=366 + OR a=94 + OR a=72 + } +} {6 13 66 72 85 86 87 94 scan 0 sort 0} +do_test where7-2.437.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR a=13 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR c=29029 + OR b=311 + OR b=366 + OR a=94 + OR a=72 + } +} {6 13 66 72 85 86 87 94 scan 0 sort 0} +do_test where7-2.438.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=26026 + OR a=96 + OR a=22 + OR b=341 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=872 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 25 AND 27) AND a!=26) + } +} {2 22 25 27 31 76 77 78 96 scan 0 sort 0} +do_test where7-2.438.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=26026 + OR a=96 + OR a=22 + OR b=341 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=872 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR ((a BETWEEN 25 AND 27) AND a!=26) + } +} {2 22 25 27 31 76 77 78 96 scan 0 sort 0} +do_test where7-2.439.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=41 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR b=913 + } +} {6 23 36 41 51 63 65 82 83 scan 0 sort 0} +do_test where7-2.439.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=41 + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR b=913 + } +} {6 23 36 41 51 63 65 82 83 scan 0 sort 0} +do_test where7-2.440.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 42 AND 44) AND a!=43) + OR a=90 + } +} {42 44 90 scan 0 sort 0} +do_test where7-2.440.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 42 AND 44) AND a!=43) + OR a=90 + } +} {42 44 90 scan 0 sort 0} +do_test where7-2.441.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR b=484 + } +} {21 44 scan 0 sort 0} +do_test where7-2.441.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR b=484 + } +} {21 44 scan 0 sort 0} +do_test where7-2.442.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=377 + OR b=363 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR b=737 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR b=506 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR a=16 + } +} {16 22 25 33 46 55 57 67 100 scan 0 sort 0} +do_test where7-2.442.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=377 + OR b=363 + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR b=737 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR b=506 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR a=16 + } +} {16 22 25 33 46 55 57 67 100 scan 0 sort 0} +do_test where7-2.443.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR b=102 + OR b=212 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=487 + OR (g='ihgfedc' AND f GLOB 'efghi*') + } +} {37 77 82 scan 0 sort 0} +do_test where7-2.443.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR b=102 + OR b=212 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=487 + OR (g='ihgfedc' AND f GLOB 'efghi*') + } +} {37 77 82 scan 0 sort 0} +do_test where7-2.444.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=154 + OR a=51 + OR b=520 + } +} {14 51 scan 0 sort 0} +do_test where7-2.444.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=154 + OR a=51 + OR b=520 + } +} {14 51 scan 0 sort 0} +do_test where7-2.445.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=872 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=957 + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR a=67 + OR a=72 + } +} {21 42 47 58 60 67 72 73 87 99 scan 0 sort 0} +do_test where7-2.445.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=872 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=957 + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR a=67 + OR a=72 + } +} {21 42 47 58 60 67 72 73 87 99 scan 0 sort 0} +do_test where7-2.446.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=66 + OR b=102 + OR b=396 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR b=759 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR f='ghijklmno' + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {6 7 9 14 32 36 58 69 84 90 92 97 100 scan 0 sort 0} +do_test where7-2.446.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=66 + OR b=102 + OR b=396 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR b=759 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR f='ghijklmno' + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + } +} {6 7 9 14 32 36 58 69 84 90 92 97 100 scan 0 sort 0} +do_test where7-2.447.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 69 AND 71) AND a!=70) + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR a=72 + OR b=1100 + OR b=102 + OR b=135 + } +} {24 48 50 69 71 72 76 100 scan 0 sort 0} +do_test where7-2.447.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 69 AND 71) AND a!=70) + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR a=72 + OR b=1100 + OR b=102 + OR b=135 + } +} {24 48 50 69 71 72 76 100 scan 0 sort 0} +do_test where7-2.448.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=99 + OR a=76 + } +} {9 76 scan 0 sort 0} +do_test where7-2.448.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=99 + OR a=76 + } +} {9 76 scan 0 sort 0} +do_test where7-2.449.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=891 + OR b=806 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=861 + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + } +} {19 34 81 82 84 85 87 scan 0 sort 0} +do_test where7-2.449.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=891 + OR b=806 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=861 + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + } +} {19 34 81 82 84 85 87 scan 0 sort 0} +do_test where7-2.450.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1034 + OR b=91 + } +} {94 scan 0 sort 0} +do_test where7-2.450.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1034 + OR b=91 + } +} {94 scan 0 sort 0} +do_test where7-2.451.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=47 + OR a=91 + OR d>1e10 + OR (g='srqponm' AND f GLOB 'cdefg*') + } +} {28 91 scan 0 sort 0} +do_test where7-2.451.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=47 + OR a=91 + OR d>1e10 + OR (g='srqponm' AND f GLOB 'cdefg*') + } +} {28 91 scan 0 sort 0} +do_test where7-2.452.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1023 + OR f='zabcdefgh' + OR b=451 + OR b=443 + OR c>=34035 + OR b=58 + } +} {25 41 51 77 93 scan 0 sort 0} +do_test where7-2.452.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1023 + OR f='zabcdefgh' + OR b=451 + OR b=443 + OR c>=34035 + OR b=58 + } +} {25 41 51 77 93 scan 0 sort 0} +do_test where7-2.453.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=971 + OR b=36 + OR a=11 + OR f='hijklmnop' + } +} {7 11 33 59 85 scan 0 sort 0} +do_test where7-2.453.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=971 + OR b=36 + OR a=11 + OR f='hijklmnop' + } +} {7 11 33 59 85 scan 0 sort 0} +do_test where7-2.454.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=619 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR c=11011 + OR b=550 + OR b=1059 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR b=737 + } +} {3 18 29 31 32 33 50 55 67 78 81 84 91 92 93 scan 0 sort 0} +do_test where7-2.454.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=619 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR c=11011 + OR b=550 + OR b=1059 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR b=737 + } +} {3 18 29 31 32 33 50 55 67 78 81 84 91 92 93 scan 0 sort 0} +do_test where7-2.455.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='edcbazy' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR a=78 + OR a=27 + OR b=792 + OR b=946 + OR c=22022 + OR a=23 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=388 + } +} {13 23 27 39 59 61 64 65 66 72 78 80 86 91 99 scan 0 sort 0} +do_test where7-2.455.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='edcbazy' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR a=78 + OR a=27 + OR b=792 + OR b=946 + OR c=22022 + OR a=23 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=388 + } +} {13 23 27 39 59 61 64 65 66 72 78 80 86 91 99 scan 0 sort 0} +do_test where7-2.456.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=32032 + OR f IS NULL + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR b=825 + } +} {4 37 39 74 75 94 95 96 scan 0 sort 0} +do_test where7-2.456.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=32032 + OR f IS NULL + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR b=825 + } +} {4 37 39 74 75 94 95 96 scan 0 sort 0} +do_test where7-2.457.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=84.0 AND d<85.0 AND d NOT NULL) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=1078 + OR b=198 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR b=55 + OR b=517 + OR b=740 + } +} {5 7 18 21 47 54 67 73 84 98 99 scan 0 sort 0} +do_test where7-2.457.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=84.0 AND d<85.0 AND d NOT NULL) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=1078 + OR b=198 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR b=55 + OR b=517 + OR b=740 + } +} {5 7 18 21 47 54 67 73 84 98 99 scan 0 sort 0} +do_test where7-2.458.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'ijklm*') + OR c=25025 + OR b=550 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {8 22 50 53 73 74 75 scan 0 sort 0} +do_test where7-2.458.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'ijklm*') + OR c=25025 + OR b=550 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + } +} {8 22 50 53 73 74 75 scan 0 sort 0} +do_test where7-2.459.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=432 + OR f='opqrstuvw' + OR (g='kjihgfe' AND f GLOB 'qrstu*') + } +} {14 40 66 68 92 scan 0 sort 0} +do_test where7-2.459.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=432 + OR f='opqrstuvw' + OR (g='kjihgfe' AND f GLOB 'qrstu*') + } +} {14 40 66 68 92 scan 0 sort 0} +do_test where7-2.460.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 14 AND 16) AND a!=15) + OR b=847 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR b=583 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=938 + } +} {11 14 16 26 37 40 42 53 63 65 75 77 89 scan 0 sort 0} +do_test where7-2.460.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 14 AND 16) AND a!=15) + OR b=847 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR b=583 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR b=938 + } +} {11 14 16 26 37 40 42 53 63 65 75 77 89 scan 0 sort 0} +do_test where7-2.461.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=671 + OR a=56 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR b=157 + OR a=83 + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR c=21021 + OR b=319 + OR b=187 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR b=839 + } +} {17 29 49 56 61 62 63 65 67 73 75 83 scan 0 sort 0} +do_test where7-2.461.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=671 + OR a=56 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR b=157 + OR a=83 + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR c=21021 + OR b=319 + OR b=187 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR b=839 + } +} {17 29 49 56 61 62 63 65 67 73 75 83 scan 0 sort 0} +do_test where7-2.462.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=586 + OR d<0.0 + OR c=9009 + } +} {25 26 27 72 scan 0 sort 0} +do_test where7-2.462.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=586 + OR d<0.0 + OR c=9009 + } +} {25 26 27 72 scan 0 sort 0} +do_test where7-2.463.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=82 + OR a=34 + OR f='jklmnopqr' + OR a=82 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=454 + OR b=355 + OR c=21021 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=30 + } +} {9 16 30 34 35 61 62 63 65 82 87 scan 0 sort 0} +do_test where7-2.463.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=82 + OR a=34 + OR f='jklmnopqr' + OR a=82 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=454 + OR b=355 + OR c=21021 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=30 + } +} {9 16 30 34 35 61 62 63 65 82 87 scan 0 sort 0} +do_test where7-2.464.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 44 AND 46) AND a!=45) + OR a=53 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=594 + OR b=80 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR d>1e10 + } +} {18 20 23 44 46 49 53 54 scan 0 sort 0} +do_test where7-2.464.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 44 AND 46) AND a!=45) + OR a=53 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=594 + OR b=80 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR d>1e10 + } +} {18 20 23 44 46 49 53 54 scan 0 sort 0} +do_test where7-2.465.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='opqrstuvw' + OR a=7 + } +} {7 14 40 66 92 scan 0 sort 0} +do_test where7-2.465.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='opqrstuvw' + OR a=7 + } +} {7 14 40 66 92 scan 0 sort 0} +do_test where7-2.466.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=627 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=90 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + } +} {33 43 45 57 75 77 90 scan 0 sort 0} +do_test where7-2.466.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=627 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR a=90 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + } +} {33 43 45 57 75 77 90 scan 0 sort 0} +do_test where7-2.467.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=59 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR f='wxyzabcde' + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR a=70 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {5 9 14 16 22 23 25 48 51 59 69 70 71 74 77 100 scan 0 sort 0} +do_test where7-2.467.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=59 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR f='wxyzabcde' + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR a=70 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {5 9 14 16 22 23 25 48 51 59 69 70 71 74 77 100 scan 0 sort 0} +do_test where7-2.468.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=69 + OR (g='ihgfedc' AND f GLOB 'defgh*') + } +} {69 81 scan 0 sort 0} +do_test where7-2.468.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=69 + OR (g='ihgfedc' AND f GLOB 'defgh*') + } +} {69 81 scan 0 sort 0} +do_test where7-2.469.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=41 + OR a=43 + OR a=92 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR (g='mlkjihg' AND f GLOB 'klmno*') + } +} {41 43 62 92 95 scan 0 sort 0} +do_test where7-2.469.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=41 + OR a=43 + OR a=92 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR (g='mlkjihg' AND f GLOB 'klmno*') + } +} {41 43 62 92 95 scan 0 sort 0} +do_test where7-2.470.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=300 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=935 + OR b=190 + } +} {52 85 scan 0 sort 0} +do_test where7-2.470.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=300 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR b=935 + OR b=190 + } +} {52 85 scan 0 sort 0} +do_test where7-2.471.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='fghijklmn' + OR f='fghijklmn' + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR b=465 + OR b=586 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=88 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR b=726 + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {4 5 8 20 30 31 32 51 53 57 66 83 scan 0 sort 0} +do_test where7-2.471.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='fghijklmn' + OR f='fghijklmn' + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR b=465 + OR b=586 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=88 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR b=726 + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {4 5 8 20 30 31 32 51 53 57 66 83 scan 0 sort 0} +do_test where7-2.472.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=34.0 AND d<35.0 AND d NOT NULL) + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR b=814 + OR a=20 + OR 1000000=34.0 AND d<35.0 AND d NOT NULL) + OR (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR b=814 + OR a=20 + OR 1000000=65.0 AND d<66.0 AND d NOT NULL) + OR c<=10 + OR a=92 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR b=1026 + } +} {1 2 3 25 44 53 55 65 72 92 scan 0 sort 0} +do_test where7-2.473.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR c=1001 + OR b=484 + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR c<=10 + OR a=92 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR b=1026 + } +} {1 2 3 25 44 53 55 65 72 92 scan 0 sort 0} +do_test where7-2.474.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=54 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR b=993 + OR c=22022 + OR a=68 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR a=62 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=1015 + } +} {3 29 54 55 62 64 65 66 68 81 99 scan 0 sort 0} +do_test where7-2.474.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=54 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR b=993 + OR c=22022 + OR a=68 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR a=62 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR b=1015 + } +} {3 29 54 55 62 64 65 66 68 81 99 scan 0 sort 0} +do_test where7-2.475.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=319 + OR a=50 + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR a=96 + } +} {10 29 50 55 92 96 scan 0 sort 0} +do_test where7-2.475.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=319 + OR a=50 + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR a=96 + } +} {10 29 50 55 92 96 scan 0 sort 0} +do_test where7-2.476.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=971 + OR c=18018 + OR b=564 + OR b=583 + OR b=80 + } +} {52 53 54 scan 0 sort 0} +do_test where7-2.476.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=971 + OR c=18018 + OR b=564 + OR b=583 + OR b=80 + } +} {52 53 54 scan 0 sort 0} +do_test where7-2.477.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=35.0 AND d<36.0 AND d NOT NULL) + OR b=1026 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 35 scan 0 sort 0} +do_test where7-2.477.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=35.0 AND d<36.0 AND d NOT NULL) + OR b=1026 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 35 scan 0 sort 0} +do_test where7-2.478.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=407 + OR b=454 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=627 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {9 13 34 35 37 39 46 57 61 65 87 91 scan 0 sort 0} +do_test where7-2.478.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=407 + OR b=454 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=627 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {9 13 34 35 37 39 46 57 61 65 87 91 scan 0 sort 0} +do_test where7-2.479.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=34034 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR a=67 + } +} {6 18 20 24 26 32 58 67 79 84 100 scan 0 sort 0} +do_test where7-2.479.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=34034 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR a=67 + } +} {6 18 20 24 26 32 58 67 79 84 100 scan 0 sort 0} +do_test where7-2.480.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=97 + OR b=575 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + } +} {1 2 4 16 42 68 81 94 97 scan 0 sort 0} +do_test where7-2.480.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=97 + OR b=575 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + } +} {1 2 4 16 42 68 81 94 97 scan 0 sort 0} +do_test where7-2.481.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=561 + OR b=773 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=201 + OR a=99 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR b=946 + OR b=993 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + } +} {19 23 36 38 46 51 86 94 99 scan 0 sort 0} +do_test where7-2.481.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=561 + OR b=773 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=201 + OR a=99 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR b=946 + OR b=993 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + } +} {19 23 36 38 46 51 86 94 99 scan 0 sort 0} +do_test where7-2.482.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=806 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR b=916 + OR b<0 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=154 + OR c=10010 + OR b=451 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + } +} {14 24 26 28 29 30 41 62 72 scan 0 sort 0} +do_test where7-2.482.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=806 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR b=916 + OR b<0 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=154 + OR c=10010 + OR b=451 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + } +} {14 24 26 28 29 30 41 62 72 scan 0 sort 0} +do_test where7-2.483.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=836 + OR d>1e10 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR f='pqrstuvwx' + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR f='abcdefghi' + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR a=33 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR b=476 + } +} {3 5 15 19 20 21 26 33 41 52 57 67 76 78 88 90 93 scan 0 sort 0} +do_test where7-2.483.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=836 + OR d>1e10 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR f='pqrstuvwx' + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR f='abcdefghi' + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR a=33 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR b=476 + } +} {3 5 15 19 20 21 26 33 41 52 57 67 76 78 88 90 93 scan 0 sort 0} +do_test where7-2.484.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=48 + OR a=92 + OR a=1 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR b=905 + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {1 4 7 28 30 37 48 51 53 56 82 92 scan 0 sort 0} +do_test where7-2.484.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=48 + OR a=92 + OR a=1 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR b=905 + OR ((a BETWEEN 51 AND 53) AND a!=52) + } +} {1 4 7 28 30 37 48 51 53 56 82 92 scan 0 sort 0} +do_test where7-2.485.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=212 + OR a=42 + OR a=92 + } +} {4 17 42 92 scan 0 sort 0} +do_test where7-2.485.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=212 + OR a=42 + OR a=92 + } +} {4 17 42 92 scan 0 sort 0} +do_test where7-2.486.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=740 + OR b=564 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=11 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR b=322 + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR b=902 + OR c>=34035 + } +} {6 11 22 44 46 51 82 scan 0 sort 0} +do_test where7-2.486.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=740 + OR b=564 + OR (g='onmlkji' AND f GLOB 'zabcd*') + OR a=11 + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR b=322 + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR b=902 + OR c>=34035 + } +} {6 11 22 44 46 51 82 scan 0 sort 0} +do_test where7-2.487.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 52 AND 54) AND a!=53) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=27 + OR a=48 + OR b=927 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR f='abcdefghi' + OR b=91 + OR b=55 + } +} {5 8 26 27 48 52 54 56 58 78 89 91 96 scan 0 sort 0} +do_test where7-2.487.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 52 AND 54) AND a!=53) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=27 + OR a=48 + OR b=927 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='fedcbaz' AND f GLOB 'stuvw*') + OR f='abcdefghi' + OR b=91 + OR b=55 + } +} {5 8 26 27 48 52 54 56 58 78 89 91 96 scan 0 sort 0} +do_test where7-2.488.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='srqponm' AND f GLOB 'efghi*') + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR a=20 + OR b=11 + } +} {1 20 30 88 90 scan 0 sort 0} +do_test where7-2.488.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='srqponm' AND f GLOB 'efghi*') + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR a=20 + OR b=11 + } +} {1 20 30 88 90 scan 0 sort 0} +do_test where7-2.489.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=55 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR a=50 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + } +} {5 13 27 50 51 52 64 73 scan 0 sort 0} +do_test where7-2.489.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=55 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR a=50 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + } +} {5 13 27 50 51 52 64 73 scan 0 sort 0} +do_test where7-2.490.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='rqponml' AND f GLOB 'ijklm*') + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + } +} {22 34 48 74 100 scan 0 sort 0} +do_test where7-2.490.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='rqponml' AND f GLOB 'ijklm*') + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + } +} {22 34 48 74 100 scan 0 sort 0} +do_test where7-2.491.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=704 + OR b=924 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR b=113 + } +} {64 84 90 scan 0 sort 0} +do_test where7-2.491.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=704 + OR b=924 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR b=113 + } +} {64 84 90 scan 0 sort 0} +do_test where7-2.492.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 20 AND 22) AND a!=21) + OR b=289 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 20 22 scan 0 sort 0} +do_test where7-2.492.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 20 AND 22) AND a!=21) + OR b=289 + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 20 22 scan 0 sort 0} +do_test where7-2.493.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=935 + OR b=1001 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=31 + OR a=56 + } +} {31 56 78 80 85 91 scan 0 sort 0} +do_test where7-2.493.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=935 + OR b=1001 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=31 + OR a=56 + } +} {31 56 78 80 85 91 scan 0 sort 0} +do_test where7-2.494.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=726 + OR f='abcdefghi' + OR b=179 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=539 + OR b=66 + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {6 9 19 26 35 49 52 60 61 66 78 86 87 88 scan 0 sort 0} +do_test where7-2.494.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=726 + OR f='abcdefghi' + OR b=179 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=539 + OR b=66 + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {6 9 19 26 35 49 52 60 61 66 78 86 87 88 scan 0 sort 0} +do_test where7-2.495.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=179 + OR b=685 + } +} { scan 0 sort 0} +do_test where7-2.495.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=179 + OR b=685 + } +} { scan 0 sort 0} +do_test where7-2.496.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=201 + OR b=682 + OR b=443 + OR b=836 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR b=110 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + } +} {2 10 11 13 28 39 51 53 54 62 65 76 80 91 scan 0 sort 0} +do_test where7-2.496.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=201 + OR b=682 + OR b=443 + OR b=836 + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR ((a BETWEEN 51 AND 53) AND a!=52) + OR b=110 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + } +} {2 10 11 13 28 39 51 53 54 62 65 76 80 91 scan 0 sort 0} +do_test where7-2.497.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=462 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=22 + OR b=594 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + } +} {4 6 18 22 24 42 44 50 54 57 61 70 74 76 96 scan 0 sort 0} +do_test where7-2.497.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=462 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR a=22 + OR b=594 + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + } +} {4 6 18 22 24 42 44 50 54 57 61 70 74 76 96 scan 0 sort 0} +do_test where7-2.498.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR f='vwxyzabcd' + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR a=37 + OR a=50 + } +} {1 10 13 21 22 37 47 50 73 99 scan 0 sort 0} +do_test where7-2.498.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR f='vwxyzabcd' + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR a=37 + OR a=50 + } +} {1 10 13 21 22 37 47 50 73 99 scan 0 sort 0} +do_test where7-2.499.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 83 AND 85) AND a!=84) + OR b=784 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR b=825 + OR a=80 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=531 + OR a=100 + } +} {20 23 46 72 75 80 83 85 97 98 100 scan 0 sort 0} +do_test where7-2.499.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 83 AND 85) AND a!=84) + OR b=784 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR b=825 + OR a=80 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=531 + OR a=100 + } +} {20 23 46 72 75 80 83 85 97 98 100 scan 0 sort 0} +do_test where7-2.500.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=220 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {19 20 53 scan 0 sort 0} +do_test where7-2.500.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'tuvwx*') + OR b=220 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {19 20 53 scan 0 sort 0} +do_test where7-2.501.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=92 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=990 + } +} {9 90 92 scan 0 sort 0} +do_test where7-2.501.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=92 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=990 + } +} {9 90 92 scan 0 sort 0} +do_test where7-2.502.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 77 AND 79) AND a!=78) + OR b=894 + OR c=28028 + OR b=905 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR b=1037 + } +} {26 45 52 70 77 78 79 82 83 84 scan 0 sort 0} +do_test where7-2.502.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 77 AND 79) AND a!=78) + OR b=894 + OR c=28028 + OR b=905 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR b=1037 + } +} {26 45 52 70 77 78 79 82 83 84 scan 0 sort 0} +do_test where7-2.503.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=72.0 AND d<73.0 AND d NOT NULL) + OR b=773 + OR f='defghijkl' + } +} {3 29 55 72 81 scan 0 sort 0} +do_test where7-2.503.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=72.0 AND d<73.0 AND d NOT NULL) + OR b=773 + OR f='defghijkl' + } +} {3 29 55 72 81 scan 0 sort 0} +do_test where7-2.504.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=861 + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {12 37 scan 0 sort 0} +do_test where7-2.504.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=861 + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {12 37 scan 0 sort 0} +do_test where7-2.505.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=704 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=25 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR b=487 + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + } +} {10 23 51 64 77 79 83 84 89 scan 0 sort 0} +do_test where7-2.505.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=704 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=25 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR b=487 + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + } +} {10 23 51 64 77 79 83 84 89 scan 0 sort 0} +do_test where7-2.506.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=19 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=674 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=355 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR c=28028 + OR b=649 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='srqponm' AND f GLOB 'fghij*') + } +} {17 19 31 41 49 59 60 72 74 82 83 84 scan 0 sort 0} +do_test where7-2.506.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=19 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=674 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=355 + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR c=28028 + OR b=649 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='srqponm' AND f GLOB 'fghij*') + } +} {17 19 31 41 49 59 60 72 74 82 83 84 scan 0 sort 0} +do_test where7-2.507.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 76 AND 78) AND a!=77) + OR a=1 + OR a=22 + OR b=836 + OR c=24024 + } +} {1 22 70 71 72 76 78 scan 0 sort 0} +do_test where7-2.507.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 76 AND 78) AND a!=77) + OR a=1 + OR a=22 + OR b=836 + OR c=24024 + } +} {1 22 70 71 72 76 78 scan 0 sort 0} +do_test where7-2.508.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=135 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {20 39 41 scan 0 sort 0} +do_test where7-2.508.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=135 + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {20 39 41 scan 0 sort 0} +do_test where7-2.509.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {9 35 61 86 87 scan 0 sort 0} +do_test where7-2.509.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'ijklm*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {9 35 61 86 87 scan 0 sort 0} +do_test where7-2.510.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'wxyza*') + OR f='ghijklmno' + } +} {6 32 58 74 84 scan 0 sort 0} +do_test where7-2.510.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'wxyza*') + OR f='ghijklmno' + } +} {6 32 58 74 84 scan 0 sort 0} +do_test where7-2.511.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=825 + OR b=902 + OR a=40 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR a=30 + OR a=10 + OR a=73 + } +} {10 28 30 40 73 75 82 scan 0 sort 0} +do_test where7-2.511.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=825 + OR b=902 + OR a=40 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR a=30 + OR a=10 + OR a=73 + } +} {10 28 30 40 73 75 82 scan 0 sort 0} +do_test where7-2.512.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR a=5 + OR b=432 + OR b=979 + OR b=762 + OR b=352 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR c=27027 + OR c=20020 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {5 22 23 24 32 36 38 58 59 60 79 80 81 89 scan 0 sort 0} +do_test where7-2.512.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR a=5 + OR b=432 + OR b=979 + OR b=762 + OR b=352 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR c=27027 + OR c=20020 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {5 22 23 24 32 36 38 58 59 60 79 80 81 89 scan 0 sort 0} +do_test where7-2.513.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR b=99 + OR a=54 + } +} {5 7 9 10 36 54 62 88 scan 0 sort 0} +do_test where7-2.513.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR b=99 + OR a=54 + } +} {5 7 9 10 36 54 62 88 scan 0 sort 0} +do_test where7-2.514.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=300 + OR (g='mlkjihg' AND f GLOB 'klmno*') + OR b=319 + OR f='fghijklmn' + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 42 AND 44) AND a!=43) + } +} {5 29 31 42 44 57 62 73 83 scan 0 sort 0} +do_test where7-2.514.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=300 + OR (g='mlkjihg' AND f GLOB 'klmno*') + OR b=319 + OR f='fghijklmn' + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 42 AND 44) AND a!=43) + } +} {5 29 31 42 44 57 62 73 83 scan 0 sort 0} +do_test where7-2.515.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=671 + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=1004 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=748 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {5 7 18 20 23 27 61 68 82 86 88 scan 0 sort 0} +do_test where7-2.515.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=671 + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=1004 + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=748 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {5 7 18 20 23 27 61 68 82 86 88 scan 0 sort 0} +do_test where7-2.516.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=47 + OR b=784 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR a=16 + OR a=25 + OR b=572 + } +} {16 21 23 25 47 52 scan 0 sort 0} +do_test where7-2.516.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=47 + OR b=784 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR a=16 + OR a=25 + OR b=572 + } +} {16 21 23 25 47 52 scan 0 sort 0} +do_test where7-2.517.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR b=110 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR c=26026 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR b=850 + OR a=6 + } +} {6 10 67 69 74 76 77 78 91 scan 0 sort 0} +do_test where7-2.517.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR b=110 + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR c=26026 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR b=850 + OR a=6 + } +} {6 10 67 69 74 76 77 78 91 scan 0 sort 0} +do_test where7-2.518.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='mlkjihg' AND f GLOB 'klmno*') + OR b=135 + OR a=28 + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR b=737 + } +} {1 3 19 28 62 67 74 76 scan 0 sort 0} +do_test where7-2.518.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 74 AND 76) AND a!=75) + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='mlkjihg' AND f GLOB 'klmno*') + OR b=135 + OR a=28 + OR ((a BETWEEN 1 AND 3) AND a!=2) + OR b=737 + } +} {1 3 19 28 62 67 74 76 scan 0 sort 0} +do_test where7-2.519.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=242 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {20 22 scan 0 sort 0} +do_test where7-2.519.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=242 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {20 22 scan 0 sort 0} +do_test where7-2.520.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=528 + OR a=41 + OR f='cdefghijk' + OR a=98 + OR b=759 + OR a=43 + OR b=286 + OR f='hijklmnop' + } +} {2 7 26 28 33 41 43 48 54 59 69 80 85 98 scan 0 sort 0} +do_test where7-2.520.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=528 + OR a=41 + OR f='cdefghijk' + OR a=98 + OR b=759 + OR a=43 + OR b=286 + OR f='hijklmnop' + } +} {2 7 26 28 33 41 43 48 54 59 69 80 85 98 scan 0 sort 0} +do_test where7-2.521.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR a=52 + } +} {6 15 52 61 scan 0 sort 0} +do_test where7-2.521.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR a=52 + } +} {6 15 52 61 scan 0 sort 0} +do_test where7-2.522.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'abcde*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR a=86 + OR c=33033 + OR c=2002 + OR a=92 + } +} {2 4 5 6 78 86 92 97 98 99 scan 0 sort 0} +do_test where7-2.522.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'abcde*') + OR ((a BETWEEN 2 AND 4) AND a!=3) + OR a=86 + OR c=33033 + OR c=2002 + OR a=92 + } +} {2 4 5 6 78 86 92 97 98 99 scan 0 sort 0} +do_test where7-2.523.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR b=517 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='srqponm' AND f GLOB 'fghij*') + OR f='defghijkl' + OR b=707 + OR c>=34035 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR a=80 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {3 23 29 31 33 35 47 49 55 63 67 69 80 81 scan 0 sort 0} +do_test where7-2.523.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 47 AND 49) AND a!=48) + OR b=517 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR (g='srqponm' AND f GLOB 'fghij*') + OR f='defghijkl' + OR b=707 + OR c>=34035 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR a=80 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {3 23 29 31 33 35 47 49 55 63 67 69 80 81 scan 0 sort 0} +do_test where7-2.524.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=209 + OR b=399 + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + } +} {19 96 97 scan 0 sort 0} +do_test where7-2.524.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=209 + OR b=399 + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + } +} {19 96 97 scan 0 sort 0} +do_test where7-2.525.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=597 + OR a=95 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=432 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + } +} {18 20 24 38 50 55 76 92 95 scan 0 sort 0} +do_test where7-2.525.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=597 + OR a=95 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=432 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + } +} {18 20 24 38 50 55 76 92 95 scan 0 sort 0} +do_test where7-2.526.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=88.0 AND d<89.0 AND d NOT NULL) + OR b=157 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=3 + OR b=663 + OR a=2 + OR c=21021 + OR b=330 + OR b=231 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + } +} {2 3 21 27 30 61 62 63 78 80 88 scan 0 sort 0} +do_test where7-2.526.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=88.0 AND d<89.0 AND d NOT NULL) + OR b=157 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=3 + OR b=663 + OR a=2 + OR c=21021 + OR b=330 + OR b=231 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + } +} {2 3 21 27 30 61 62 63 78 80 88 scan 0 sort 0} +do_test where7-2.527.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR f IS NULL + } +} {64 66 83 scan 0 sort 0} +do_test where7-2.527.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'fghij*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR f IS NULL + } +} {64 66 83 scan 0 sort 0} +do_test where7-2.528.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 99 AND 101) AND a!=100) + OR (g='fedcbaz' AND f GLOB 'pqrst*') + OR 1000000=54.0 AND d<55.0 AND d NOT NULL) + OR b=814 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=619 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {46 53 54 55 58 74 80 scan 0 sort 0} +do_test where7-2.530.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'defgh*') + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=784 + OR b=583 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=814 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=619 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {46 53 54 55 58 74 80 scan 0 sort 0} +do_test where7-2.531.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=86 + OR b=484 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR b=418 + OR b=509 + OR a=42 + OR b=825 + OR a=91 + OR b=1023 + OR b=814 + OR ((a BETWEEN 99 AND 101) AND a!=100) + } +} {38 42 44 74 75 79 86 91 93 99 scan 0 sort 0} +do_test where7-2.531.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=86 + OR b=484 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR b=418 + OR b=509 + OR a=42 + OR b=825 + OR a=91 + OR b=1023 + OR b=814 + OR ((a BETWEEN 99 AND 101) AND a!=100) + } +} {38 42 44 74 75 79 86 91 93 99 scan 0 sort 0} +do_test where7-2.532.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=231 + OR a=81 + OR a=72 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR b=396 + } +} {21 24 26 36 72 81 86 scan 0 sort 0} +do_test where7-2.532.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=231 + OR a=81 + OR a=72 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR b=396 + } +} {21 24 26 36 72 81 86 scan 0 sort 0} +do_test where7-2.533.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR a=63 + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR a=71 + OR b=22 + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR a=53 + } +} {2 21 53 59 61 63 70 71 72 74 76 78 scan 0 sort 0} +do_test where7-2.533.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=74.0 AND d<75.0 AND d NOT NULL) + OR a=63 + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR a=71 + OR b=22 + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR a=53 + } +} {2 21 53 59 61 63 70 71 72 74 76 78 scan 0 sort 0} +do_test where7-2.534.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=861 + OR b=649 + OR b=146 + OR f='abcdefghi' + } +} {26 52 59 78 scan 0 sort 0} +do_test where7-2.534.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=861 + OR b=649 + OR b=146 + OR f='abcdefghi' + } +} {26 52 59 78 scan 0 sort 0} +do_test where7-2.535.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR c=5005 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR a=93 + OR c=24024 + OR b=619 + OR b=234 + OR b=55 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + } +} {5 9 13 14 15 21 35 47 50 52 56 61 70 71 72 73 87 93 99 scan 0 sort 0} +do_test where7-2.535.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR c=5005 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR a=93 + OR c=24024 + OR b=619 + OR b=234 + OR b=55 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + } +} {5 9 13 14 15 21 35 47 50 52 56 61 70 71 72 73 87 93 99 scan 0 sort 0} +do_test where7-2.536.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=355 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=806 + OR b=462 + OR b=531 + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR f='mnopqrstu' + } +} {12 38 42 49 63 64 69 90 scan 0 sort 0} +do_test where7-2.536.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=355 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=806 + OR b=462 + OR b=531 + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR f='mnopqrstu' + } +} {12 38 42 49 63 64 69 90 scan 0 sort 0} +do_test where7-2.537.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR f='pqrstuvwx' + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR b=495 + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR a=75 + } +} {15 41 45 56 60 62 67 70 75 93 scan 0 sort 0} +do_test where7-2.537.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR f='pqrstuvwx' + OR (g='nmlkjih' AND f GLOB 'efghi*') + OR b=495 + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR a=75 + } +} {15 41 45 56 60 62 67 70 75 93 scan 0 sort 0} +do_test where7-2.538.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=748 + OR b=913 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR a=22 + } +} {4 5 21 22 68 83 scan 0 sort 0} +do_test where7-2.538.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=748 + OR b=913 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR a=22 + } +} {4 5 21 22 68 83 scan 0 sort 0} +do_test where7-2.539.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=19 + OR b=902 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR b=168 + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR a=50 + OR f='uvwxyzabc' + OR b=836 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR a=50 + } +} {19 20 46 50 63 65 67 72 76 77 79 82 98 scan 0 sort 0} +do_test where7-2.539.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=19 + OR b=902 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR b=168 + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR a=50 + OR f='uvwxyzabc' + OR b=836 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR a=50 + } +} {19 20 46 50 63 65 67 72 76 77 79 82 98 scan 0 sort 0} +do_test where7-2.540.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=12012 + OR b=993 + OR b=839 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR a=87 + } +} {30 32 34 35 36 87 scan 0 sort 0} +do_test where7-2.540.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=12012 + OR b=993 + OR b=839 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR a=87 + } +} {30 32 34 35 36 87 scan 0 sort 0} +do_test where7-2.541.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=814 + OR c=30030 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR a=16 + OR b=1048 + OR b=113 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR b=729 + OR a=54 + } +} {3 16 34 40 54 61 74 88 89 90 scan 0 sort 0} +do_test where7-2.541.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=814 + OR c=30030 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR a=16 + OR b=1048 + OR b=113 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR b=729 + OR a=54 + } +} {3 16 34 40 54 61 74 88 89 90 scan 0 sort 0} +do_test where7-2.542.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=399 + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=814 + OR c=22022 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR a=1 + OR b=311 + OR b=121 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=198 + } +} {1 6 8 11 18 32 37 58 63 64 65 66 71 74 84 89 scan 0 sort 0} +do_test where7-2.542.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=399 + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=814 + OR c=22022 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR a=1 + OR b=311 + OR b=121 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=198 + } +} {1 6 8 11 18 32 37 58 63 64 65 66 71 74 84 89 scan 0 sort 0} +do_test where7-2.543.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=146 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR a=57 + } +} {52 57 scan 0 sort 0} +do_test where7-2.543.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=146 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR a=57 + } +} {52 57 scan 0 sort 0} +do_test where7-2.544.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR f='fghijklmn' + OR a=70 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + } +} {4 5 31 57 70 83 100 scan 0 sort 0} +do_test where7-2.544.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR f='fghijklmn' + OR a=70 + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + } +} {4 5 31 57 70 83 100 scan 0 sort 0} +do_test where7-2.545.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=42 + OR b=333 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR b=1089 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR a=22 + OR b=594 + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 12 15 22 31 35 42 54 57 83 99 scan 0 sort 0} +do_test where7-2.545.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=42 + OR b=333 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR b=1089 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR a=22 + OR b=594 + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + } +} {5 12 15 22 31 35 42 54 57 83 99 scan 0 sort 0} +do_test where7-2.546.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=113 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='mnopqrstu' + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=902 + } +} {3 5 12 16 17 25 26 38 52 64 67 69 78 82 90 scan 0 sort 0} +do_test where7-2.546.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=113 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR f='mnopqrstu' + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=902 + } +} {3 5 12 16 17 25 26 38 52 64 67 69 78 82 90 scan 0 sort 0} +do_test where7-2.547.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='onmlkji' AND f GLOB 'zabcd*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=13 + } +} {13 15 41 51 67 93 scan 0 sort 0} +do_test where7-2.547.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='onmlkji' AND f GLOB 'zabcd*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR a=13 + } +} {13 15 41 51 67 93 scan 0 sort 0} +do_test where7-2.548.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR b=410 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=418 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + } +} {38 65 82 88 100 scan 0 sort 0} +do_test where7-2.548.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='edcbazy' AND f GLOB 'wxyza*') + OR b=410 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=418 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + } +} {38 65 82 88 100 scan 0 sort 0} +do_test where7-2.549.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=5 + OR a=95 + OR a=56 + OR a=46 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + } +} {5 10 41 46 56 61 95 100 scan 0 sort 0} +do_test where7-2.549.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=5 + OR a=95 + OR a=56 + OR a=46 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + } +} {5 10 41 46 56 61 95 100 scan 0 sort 0} +do_test where7-2.550.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR a=13 + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=9 + OR a=27 + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=484 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=594 + } +} {9 13 27 37 44 54 75 87 88 90 scan 0 sort 0} +do_test where7-2.550.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=87.0 AND d<88.0 AND d NOT NULL) + OR a=13 + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=9 + OR a=27 + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=484 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR b=594 + } +} {9 13 27 37 44 54 75 87 88 90 scan 0 sort 0} +do_test where7-2.551.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=539 + OR b=418 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=759 + } +} {15 38 49 69 scan 0 sort 0} +do_test where7-2.551.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=539 + OR b=418 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR b=759 + } +} {15 38 49 69 scan 0 sort 0} +do_test where7-2.552.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1001 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR c=34034 + OR a=84 + } +} {8 54 84 91 100 scan 0 sort 0} +do_test where7-2.552.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1001 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR c=34034 + OR a=84 + } +} {8 54 84 91 100 scan 0 sort 0} +do_test where7-2.553.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=795 + OR b=671 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR b=322 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR c=34034 + OR b=410 + } +} {15 38 41 60 61 63 67 71 73 93 100 scan 0 sort 0} +do_test where7-2.553.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=795 + OR b=671 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR b=322 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR c=34034 + OR b=410 + } +} {15 38 41 60 61 63 67 71 73 93 100 scan 0 sort 0} +do_test where7-2.554.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=13013 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=47 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=828 + } +} {37 38 39 42 61 69 79 94 scan 0 sort 0} +do_test where7-2.554.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=13013 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=47 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=828 + } +} {37 38 39 42 61 69 79 94 scan 0 sort 0} +do_test where7-2.555.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=451 + OR b=836 + OR (g='onmlkji' AND f GLOB 'wxyza*') + } +} {41 48 76 scan 0 sort 0} +do_test where7-2.555.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=451 + OR b=836 + OR (g='onmlkji' AND f GLOB 'wxyza*') + } +} {41 48 76 scan 0 sort 0} +do_test where7-2.556.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=575 + OR b=748 + OR b=520 + OR b=154 + OR a=70 + OR f='efghijklm' + } +} {4 14 30 56 68 70 82 scan 0 sort 0} +do_test where7-2.556.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=575 + OR b=748 + OR b=520 + OR b=154 + OR a=70 + OR f='efghijklm' + } +} {4 14 30 56 68 70 82 scan 0 sort 0} +do_test where7-2.557.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='tuvwxyzab' + OR (g='nmlkjih' AND f GLOB 'efghi*') + } +} {19 45 56 71 97 scan 0 sort 0} +do_test where7-2.557.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='tuvwxyzab' + OR (g='nmlkjih' AND f GLOB 'efghi*') + } +} {19 45 56 71 97 scan 0 sort 0} +do_test where7-2.558.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=806 + OR a=47 + OR d<0.0 + OR b=982 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR e IS NULL + OR c=32032 + OR b=795 + } +} {2 12 47 87 94 95 96 97 99 scan 0 sort 0} +do_test where7-2.558.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=806 + OR a=47 + OR d<0.0 + OR b=982 + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR e IS NULL + OR c=32032 + OR b=795 + } +} {2 12 47 87 94 95 96 97 99 scan 0 sort 0} +do_test where7-2.559.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=62 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='edcbazy' AND f GLOB 'vwxyz*') + } +} {23 49 62 75 89 91 99 scan 0 sort 0} +do_test where7-2.559.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=62 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR (g='edcbazy' AND f GLOB 'vwxyz*') + } +} {23 49 62 75 89 91 99 scan 0 sort 0} +do_test where7-2.560.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=1056 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=729 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=220 + OR b=498 + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {9 20 57 73 96 98 scan 0 sort 0} +do_test where7-2.560.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=1056 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=729 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR b=220 + OR b=498 + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {9 20 57 73 96 98 scan 0 sort 0} +do_test where7-2.561.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=44 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='wvutsrq' AND f GLOB 'klmno*') + } +} {4 10 38 scan 0 sort 0} +do_test where7-2.561.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=44 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='wvutsrq' AND f GLOB 'klmno*') + } +} {4 10 38 scan 0 sort 0} +do_test where7-2.562.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=883 + OR b=311 + OR b=880 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR a=88 + OR b=154 + OR a=94 + OR a=37 + OR c=31031 + } +} {14 37 41 57 59 80 88 91 92 93 94 scan 0 sort 0} +do_test where7-2.562.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=883 + OR b=311 + OR b=880 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR a=88 + OR b=154 + OR a=94 + OR a=37 + OR c=31031 + } +} {14 37 41 57 59 80 88 91 92 93 94 scan 0 sort 0} +do_test where7-2.563.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='onmlkji' AND f GLOB 'xyzab*') + OR a=10 + OR b=190 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR b=385 + OR a=82 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR c=22022 + } +} {8 10 35 49 55 63 64 65 66 67 69 82 90 scan 0 sort 0} +do_test where7-2.563.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='onmlkji' AND f GLOB 'xyzab*') + OR a=10 + OR b=190 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR b=385 + OR a=82 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR c=22022 + } +} {8 10 35 49 55 63 64 65 66 67 69 82 90 scan 0 sort 0} +do_test where7-2.564.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1070 + OR a=33 + OR b=363 + OR a=47 + } +} {33 47 scan 0 sort 0} +do_test where7-2.564.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1070 + OR a=33 + OR b=363 + OR a=47 + } +} {33 47 scan 0 sort 0} +do_test where7-2.565.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=1001 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=49 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR c=33033 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR g IS NULL + OR b=220 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + } +} {1 2 3 18 20 33 35 49 60 62 63 65 70 81 97 98 99 scan 0 sort 0} +do_test where7-2.565.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=1001 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=49 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR c=33033 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR g IS NULL + OR b=220 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + } +} {1 2 3 18 20 33 35 49 60 62 63 65 70 81 97 98 99 scan 0 sort 0} +do_test where7-2.566.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=212 + OR b=418 + OR ((a BETWEEN 31 AND 33) AND a!=32) + } +} {31 33 38 71 scan 0 sort 0} +do_test where7-2.566.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=212 + OR b=418 + OR ((a BETWEEN 31 AND 33) AND a!=32) + } +} {31 33 38 71 scan 0 sort 0} +do_test where7-2.567.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=344 + OR f='nopqrstuv' + OR b=704 + OR a=84 + } +} {13 39 64 65 84 91 scan 0 sort 0} +do_test where7-2.567.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=344 + OR f='nopqrstuv' + OR b=704 + OR a=84 + } +} {13 39 64 65 84 91 scan 0 sort 0} +do_test where7-2.568.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + } +} {5 30 32 scan 0 sort 0} +do_test where7-2.568.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + } +} {5 30 32 scan 0 sort 0} +do_test where7-2.569.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'jklmn*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {26 52 78 87 scan 0 sort 0} +do_test where7-2.569.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'jklmn*') + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {26 52 78 87 scan 0 sort 0} +do_test where7-2.570.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 0 AND 2) AND a!=1) + OR b=1100 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR b=421 + OR b=465 + OR b=894 + OR c=13013 + OR b=47 + OR b=674 + OR ((a BETWEEN 0 AND 2) AND a!=1) + } +} {2 37 38 39 70 100 scan 0 sort 0} +do_test where7-2.570.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 0 AND 2) AND a!=1) + OR b=1100 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR b=421 + OR b=465 + OR b=894 + OR c=13013 + OR b=47 + OR b=674 + OR ((a BETWEEN 0 AND 2) AND a!=1) + } +} {2 37 38 39 70 100 scan 0 sort 0} +do_test where7-2.571.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=18018 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR b=410 + OR b=858 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {49 52 53 54 78 scan 0 sort 0} +do_test where7-2.571.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=18018 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR b=410 + OR b=858 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {49 52 53 54 78 scan 0 sort 0} +do_test where7-2.572.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=781 + } +} {47 71 scan 0 sort 0} +do_test where7-2.572.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=781 + } +} {47 71 scan 0 sort 0} +do_test where7-2.573.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1070 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR a=54 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR a=9 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + } +} {7 9 25 33 47 50 52 54 59 63 85 scan 0 sort 0} +do_test where7-2.573.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1070 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR a=54 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR a=9 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + } +} {7 9 25 33 47 50 52 54 59 63 85 scan 0 sort 0} +do_test where7-2.574.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=55 + OR a=62 + OR a=63 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (g='rqponml' AND f GLOB 'ijklm*') + OR ((a BETWEEN 99 AND 101) AND a!=100) + } +} {34 50 55 62 63 99 scan 0 sort 0} +do_test where7-2.574.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=55 + OR a=62 + OR a=63 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (g='rqponml' AND f GLOB 'ijklm*') + OR ((a BETWEEN 99 AND 101) AND a!=100) + } +} {34 50 55 62 63 99 scan 0 sort 0} +do_test where7-2.575.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=421 + OR b=146 + OR b=22 + OR f='efghijklm' + } +} {2 4 30 56 82 scan 0 sort 0} +do_test where7-2.575.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=421 + OR b=146 + OR b=22 + OR f='efghijklm' + } +} {2 4 30 56 82 scan 0 sort 0} +do_test where7-2.576.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=553 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=583 + OR a=56 + } +} {21 23 48 53 56 59 61 scan 0 sort 0} +do_test where7-2.576.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=553 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=583 + OR a=56 + } +} {21 23 48 53 56 59 61 scan 0 sort 0} +do_test where7-2.577.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=83 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR a=1 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=245 + } +} {1 17 19 29 49 51 77 83 scan 0 sort 0} +do_test where7-2.577.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=83 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR a=1 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=245 + } +} {1 17 19 29 49 51 77 83 scan 0 sort 0} +do_test where7-2.578.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=3003 + OR b=619 + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {7 8 9 19 21 scan 0 sort 0} +do_test where7-2.578.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=3003 + OR b=619 + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {7 8 9 19 21 scan 0 sort 0} +do_test where7-2.579.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=924 + OR a=92 + OR a=63 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + } +} {60 63 84 92 97 scan 0 sort 0} +do_test where7-2.579.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=924 + OR a=92 + OR a=63 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + } +} {60 63 84 92 97 scan 0 sort 0} +do_test where7-2.580.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=440 + OR f='vwxyzabcd' + OR b=190 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=88 + OR b=58 + } +} {8 11 21 37 40 42 47 63 73 89 99 scan 0 sort 0} +do_test where7-2.580.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=440 + OR f='vwxyzabcd' + OR b=190 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=88 + OR b=58 + } +} {8 11 21 37 40 42 47 63 73 89 99 scan 0 sort 0} +do_test where7-2.581.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=495 + OR c=24024 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=1001 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR d>1e10 + OR b=531 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR b=1089 + } +} {25 45 49 70 71 72 82 91 99 scan 0 sort 0} +do_test where7-2.581.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=495 + OR c=24024 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=1001 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR d>1e10 + OR b=531 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR b=1089 + } +} {25 45 49 70 71 72 82 91 99 scan 0 sort 0} +do_test where7-2.582.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + } +} {41 48 scan 0 sort 0} +do_test where7-2.582.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=41.0 AND d<42.0 AND d NOT NULL) + } +} {41 48 scan 0 sort 0} +do_test where7-2.583.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR d>1e10 + OR b=22 + OR c=5005 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {1 2 13 14 15 22 24 52 71 73 80 82 91 93 scan 0 sort 0} +do_test where7-2.583.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR d>1e10 + OR b=22 + OR c=5005 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {1 2 13 14 15 22 24 52 71 73 80 82 91 93 scan 0 sort 0} +do_test where7-2.584.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 9 AND 11) AND a!=10) + OR b=1078 + OR b=806 + OR b=605 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + } +} {9 11 15 23 25 41 55 67 76 93 98 scan 0 sort 0} +do_test where7-2.584.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 9 AND 11) AND a!=10) + OR b=1078 + OR b=806 + OR b=605 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + } +} {9 11 15 23 25 41 55 67 76 93 98 scan 0 sort 0} +do_test where7-2.585.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=572 + OR c=10010 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=29 + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + } +} {7 28 29 30 33 52 59 68 84 85 86 scan 0 sort 0} +do_test where7-2.585.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=572 + OR c=10010 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=29 + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + } +} {7 28 29 30 33 52 59 68 84 85 86 scan 0 sort 0} +do_test where7-2.586.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 94 AND 96) AND a!=95) + OR b=858 + OR b=806 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + } +} {63 77 78 94 96 scan 0 sort 0} +do_test where7-2.586.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 94 AND 96) AND a!=95) + OR b=858 + OR b=806 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + } +} {63 77 78 94 96 scan 0 sort 0} +do_test where7-2.587.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='vwxyzabcd' + OR a=72 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=935 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR c=13013 + } +} {21 36 37 38 39 40 47 72 73 85 99 scan 0 sort 0} +do_test where7-2.587.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='vwxyzabcd' + OR a=72 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=935 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR c=13013 + } +} {21 36 37 38 39 40 47 72 73 85 99 scan 0 sort 0} +do_test where7-2.588.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=5005 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR b=143 + OR a=68 + OR a=77 + OR b=80 + } +} {13 14 15 43 44 68 77 88 scan 0 sort 0} +do_test where7-2.588.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=5005 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR b=143 + OR a=68 + OR a=77 + OR b=80 + } +} {13 14 15 43 44 68 77 88 scan 0 sort 0} +do_test where7-2.589.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=5.0 AND d<6.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=99 + OR ((a BETWEEN 12 AND 14) AND a!=13) + } +} {5 6 12 14 68 72 76 78 99 scan 0 sort 0} +do_test where7-2.589.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=5.0 AND d<6.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'ghijk*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=99 + OR ((a BETWEEN 12 AND 14) AND a!=13) + } +} {5 6 12 14 68 72 76 78 99 scan 0 sort 0} +do_test where7-2.590.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'opqrs*') + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR b=971 + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 13 40 88 90 scan 0 sort 0} +do_test where7-2.590.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'opqrs*') + OR ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR b=971 + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {5 13 40 88 90 scan 0 sort 0} +do_test where7-2.591.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR b=806 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=1015 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {10 13 36 39 43 62 65 68 70 88 91 scan 0 sort 0} +do_test where7-2.591.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR b=806 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR b=1015 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + } +} {10 13 36 39 43 62 65 68 70 88 91 scan 0 sort 0} +do_test where7-2.592.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='nopqrstuv' + OR b=993 + OR a=76 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=20020 + OR a=26 + OR b=1048 + OR b=561 + OR (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR a=56 + } +} {13 26 36 39 51 55 56 57 58 59 60 65 76 79 91 scan 0 sort 0} +do_test where7-2.592.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='nopqrstuv' + OR b=993 + OR a=76 + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR c=20020 + OR a=26 + OR b=1048 + OR b=561 + OR (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 55 AND 57) AND a!=56) + OR a=56 + } +} {13 26 36 39 51 55 56 57 58 59 60 65 76 79 91 scan 0 sort 0} +do_test where7-2.593.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=781 + OR b=671 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=113 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=385 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {15 16 35 39 41 60 61 71 83 scan 0 sort 0} +do_test where7-2.593.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=781 + OR b=671 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR b=113 + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=385 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {15 16 35 39 41 60 61 71 83 scan 0 sort 0} +do_test where7-2.594.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=410 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=674 + OR b=825 + OR b=704 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=76 + OR c=32032 + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {9 43 45 58 60 61 64 75 76 85 87 94 95 96 scan 0 sort 0} +do_test where7-2.594.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=410 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=674 + OR b=825 + OR b=704 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=76 + OR c=32032 + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {9 43 45 58 60 61 64 75 76 85 87 94 95 96 scan 0 sort 0} +do_test where7-2.595.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=869 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + } +} {43 79 scan 0 sort 0} +do_test where7-2.595.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=869 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + } +} {43 79 scan 0 sort 0} +do_test where7-2.596.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=869 + OR a=34 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + } +} {34 79 87 scan 0 sort 0} +do_test where7-2.596.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=869 + OR a=34 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + } +} {34 79 87 scan 0 sort 0} +do_test where7-2.597.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='rqponml' AND f GLOB 'hijkl*') + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=8 + OR a=72 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {8 33 44 72 90 95 97 scan 0 sort 0} +do_test where7-2.597.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='rqponml' AND f GLOB 'hijkl*') + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=8 + OR a=72 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {8 33 44 72 90 95 97 scan 0 sort 0} +do_test where7-2.598.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=20 + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=341 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=814 + OR b=1026 + OR a=14 + OR a=13 + OR b=1037 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {13 14 20 26 31 56 58 74 76 scan 0 sort 0} +do_test where7-2.598.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=20 + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=341 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=814 + OR b=1026 + OR a=14 + OR a=13 + OR b=1037 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {13 14 20 26 31 56 58 74 76 scan 0 sort 0} +do_test where7-2.599.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=443 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=839 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR c=7007 + } +} {10 13 19 20 21 49 51 scan 0 sort 0} +do_test where7-2.599.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=443 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=839 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR c=7007 + } +} {10 13 19 20 21 49 51 scan 0 sort 0} +do_test where7-2.600.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR a=21 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR f='zabcdefgh' + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=506 + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=88 + OR b=190 + } +} {3 8 9 14 16 21 25 42 46 51 68 77 94 97 scan 0 sort 0} +do_test where7-2.600.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR a=21 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR f='zabcdefgh' + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=506 + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=88 + OR b=190 + } +} {3 8 9 14 16 21 25 42 46 51 68 77 94 97 scan 0 sort 0} +do_test where7-2.601.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=41.0 AND d<42.0 AND d NOT NULL) + OR f='bcdefghij' + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=762 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {1 27 30 41 53 54 61 63 68 70 76 79 scan 0 sort 0} +do_test where7-2.601.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=41.0 AND d<42.0 AND d NOT NULL) + OR f='bcdefghij' + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=762 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {1 27 30 41 53 54 61 63 68 70 76 79 scan 0 sort 0} +do_test where7-2.602.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=85.0 AND d<86.0 AND d NOT NULL) + OR f='qrstuvwxy' + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + } +} {16 42 56 68 85 94 scan 0 sort 0} +do_test where7-2.602.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=85.0 AND d<86.0 AND d NOT NULL) + OR f='qrstuvwxy' + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + } +} {16 42 56 68 85 94 scan 0 sort 0} +do_test where7-2.603.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR a=21 + OR b<0 + OR f='bcdefghij' + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {1 14 16 21 27 53 57 79 89 scan 0 sort 0} +do_test where7-2.603.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=89.0 AND d<90.0 AND d NOT NULL) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR a=21 + OR b<0 + OR f='bcdefghij' + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {1 14 16 21 27 53 57 79 89 scan 0 sort 0} +do_test where7-2.604.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=1067 + OR b=231 + OR b=113 + OR b=22 + OR a=55 + OR b=663 + } +} {2 21 40 55 83 97 scan 0 sort 0} +do_test where7-2.604.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'fghij*') + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=1067 + OR b=231 + OR b=113 + OR b=22 + OR a=55 + OR b=663 + } +} {2 21 40 55 83 97 scan 0 sort 0} +do_test where7-2.605.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=1 + OR b=454 + OR b=396 + OR b=1059 + OR a=69 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=440 + OR b=825 + } +} {1 21 36 40 47 69 73 75 99 scan 0 sort 0} +do_test where7-2.605.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=1 + OR b=454 + OR b=396 + OR b=1059 + OR a=69 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR b=440 + OR b=825 + } +} {1 21 36 40 47 69 73 75 99 scan 0 sort 0} +do_test where7-2.606.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR b=308 + OR c<=10 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR f='ghijklmno' + OR b=289 + OR a=5 + OR b=267 + OR b=949 + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {5 6 7 9 26 28 32 58 84 scan 0 sort 0} +do_test where7-2.606.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR b=308 + OR c<=10 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + OR f='ghijklmno' + OR b=289 + OR a=5 + OR b=267 + OR b=949 + OR ((a BETWEEN 7 AND 9) AND a!=8) + } +} {5 6 7 9 26 28 32 58 84 scan 0 sort 0} +do_test where7-2.607.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 95 AND 97) AND a!=96) + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=993 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=663 + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=869 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=121 + } +} {11 17 24 43 45 50 76 79 81 95 97 scan 0 sort 0} +do_test where7-2.607.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 95 AND 97) AND a!=96) + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR b=993 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR b=663 + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=869 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=121 + } +} {11 17 24 43 45 50 76 79 81 95 97 scan 0 sort 0} +do_test where7-2.608.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=770 + } +} {4 27 70 87 scan 0 sort 0} +do_test where7-2.608.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=770 + } +} {4 27 70 87 scan 0 sort 0} +do_test where7-2.609.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 80 AND 82) AND a!=81) + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {19 45 57 71 80 82 90 97 scan 0 sort 0} +do_test where7-2.609.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 80 AND 82) AND a!=81) + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {19 45 57 71 80 82 90 97 scan 0 sort 0} +do_test where7-2.610.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=22 + OR c=31031 + OR b=894 + OR a=31 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR a=94 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=21 + OR b=1001 + } +} {2 21 31 84 86 91 92 93 94 95 scan 0 sort 0} +do_test where7-2.610.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=22 + OR c=31031 + OR b=894 + OR a=31 + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR a=94 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=21 + OR b=1001 + } +} {2 21 31 84 86 91 92 93 94 95 scan 0 sort 0} +do_test where7-2.611.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='onmlkji' AND f GLOB 'zabcd*') + OR b=1092 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR a=77 + OR a=63 + OR b=762 + OR b=894 + OR b=685 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + } +} {13 46 51 63 77 80 scan 0 sort 0} +do_test where7-2.611.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='onmlkji' AND f GLOB 'zabcd*') + OR b=1092 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR a=77 + OR a=63 + OR b=762 + OR b=894 + OR b=685 + OR (g='vutsrqp' AND f GLOB 'nopqr*') + } +} {13 46 51 63 77 80 scan 0 sort 0} +do_test where7-2.612.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'klmno*') + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=231 + } +} {10 21 93 95 scan 0 sort 0} +do_test where7-2.612.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'klmno*') + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=231 + } +} {10 21 93 95 scan 0 sort 0} +do_test where7-2.613.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=828 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + } +} {8 10 26 52 78 scan 0 sort 0} +do_test where7-2.613.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=828 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + } +} {8 10 26 52 78 scan 0 sort 0} +do_test where7-2.614.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=520 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR a=21 + } +} {4 6 13 21 31 33 39 47 50 65 91 100 scan 0 sort 0} +do_test where7-2.614.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=520 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR a=21 + } +} {4 6 13 21 31 33 39 47 50 65 91 100 scan 0 sort 0} +do_test where7-2.615.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=553 + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR b=1034 + OR b=418 + OR a=57 + OR f='mnopqrstu' + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + } +} {12 38 57 63 64 90 94 99 scan 0 sort 0} +do_test where7-2.615.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=553 + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR b=1034 + OR b=418 + OR a=57 + OR f='mnopqrstu' + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + } +} {12 38 57 63 64 90 94 99 scan 0 sort 0} +do_test where7-2.616.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=43 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=418 + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=594 + OR a=21 + OR a=78 + OR a=91 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {21 38 43 47 54 70 78 80 91 scan 0 sort 0} +do_test where7-2.616.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=43 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR b=418 + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=594 + OR a=21 + OR a=78 + OR a=91 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {21 38 43 47 54 70 78 80 91 scan 0 sort 0} +do_test where7-2.617.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=671 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + } +} {48 61 95 97 scan 0 sort 0} +do_test where7-2.617.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=671 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + } +} {48 61 95 97 scan 0 sort 0} +do_test where7-2.618.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=2.0 AND d<3.0 AND d NOT NULL) + OR b=726 + OR b=663 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR a=25 + OR f='qrstuvwxy' + } +} {2 13 16 25 42 66 68 94 scan 0 sort 0} +do_test where7-2.618.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=2.0 AND d<3.0 AND d NOT NULL) + OR b=726 + OR b=663 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR a=25 + OR f='qrstuvwxy' + } +} {2 13 16 25 42 66 68 94 scan 0 sort 0} +do_test where7-2.619.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=806 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR b=275 + OR ((a BETWEEN 80 AND 82) AND a!=81) + } +} {10 12 25 50 80 82 scan 0 sort 0} +do_test where7-2.619.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=806 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR b=275 + OR ((a BETWEEN 80 AND 82) AND a!=81) + } +} {10 12 25 50 80 82 scan 0 sort 0} +do_test where7-2.620.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=24024 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=429 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR b=110 + OR a=39 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + } +} {2 10 23 39 70 71 72 scan 0 sort 0} +do_test where7-2.620.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=24024 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=429 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR b=110 + OR a=39 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + } +} {2 10 23 39 70 71 72 scan 0 sort 0} +do_test where7-2.621.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=66 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR b=198 + OR b=682 + OR c=23023 + } +} {18 62 66 67 68 69 70 scan 0 sort 0} +do_test where7-2.621.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=66 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR b=198 + OR b=682 + OR c=23023 + } +} {18 62 66 67 68 69 70 scan 0 sort 0} +do_test where7-2.622.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=806 + OR b=253 + OR a=36 + } +} {23 36 scan 0 sort 0} +do_test where7-2.622.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=806 + OR b=253 + OR a=36 + } +} {23 36 scan 0 sort 0} +do_test where7-2.623.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=509 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=718 + OR a=4 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + } +} {4 13 16 22 24 56 58 69 scan 0 sort 0} +do_test where7-2.623.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=509 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (g='vutsrqp' AND f GLOB 'nopqr*') + OR b=718 + OR a=4 + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + } +} {4 13 16 22 24 56 58 69 scan 0 sort 0} +do_test where7-2.624.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR b=1026 + OR a=93 + OR c=18018 + } +} {52 53 54 77 93 scan 0 sort 0} +do_test where7-2.624.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR b=1026 + OR a=93 + OR c=18018 + } +} {52 53 54 77 93 scan 0 sort 0} +do_test where7-2.625.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=388 + OR a=44 + } +} {44 scan 0 sort 0} +do_test where7-2.625.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=388 + OR a=44 + } +} {44 scan 0 sort 0} +do_test where7-2.626.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=9009 + OR b=542 + OR f='cdefghijk' + OR b=319 + OR b=616 + } +} {2 25 26 27 28 29 54 56 80 scan 0 sort 0} +do_test where7-2.626.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=9009 + OR b=542 + OR f='cdefghijk' + OR b=319 + OR b=616 + } +} {2 25 26 27 28 29 54 56 80 scan 0 sort 0} +do_test where7-2.627.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=990 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=531 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR f='qrstuvwxy' + } +} {6 16 32 41 42 43 57 58 67 68 84 86 90 94 97 scan 0 sort 0} +do_test where7-2.627.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=990 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=531 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR f='qrstuvwxy' + } +} {6 16 32 41 42 43 57 58 67 68 84 86 90 94 97 scan 0 sort 0} +do_test where7-2.628.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=60 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR b=627 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=883 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR f='yzabcdefg' + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + } +} {24 38 50 57 59 60 73 76 78 93 99 scan 0 sort 0} +do_test where7-2.628.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=60 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR b=627 + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=883 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR f='yzabcdefg' + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + } +} {24 38 50 57 59 60 73 76 78 93 99 scan 0 sort 0} +do_test where7-2.629.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=696 + OR b=938 + OR a=18 + OR b=957 + OR c=18018 + OR c=3003 + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {7 8 9 18 33 35 52 53 54 87 scan 0 sort 0} +do_test where7-2.629.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=696 + OR b=938 + OR a=18 + OR b=957 + OR c=18018 + OR c=3003 + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {7 8 9 18 33 35 52 53 54 87 scan 0 sort 0} +do_test where7-2.630.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=29029 + OR a=73 + } +} {73 85 86 87 scan 0 sort 0} +do_test where7-2.630.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=29029 + OR a=73 + } +} {73 85 86 87 scan 0 sort 0} +do_test where7-2.631.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=28 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=69 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=781 + OR a=64 + OR b=91 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR a=16 + OR b=278 + OR a=26 + } +} {16 26 27 28 64 71 82 85 87 scan 0 sort 0} +do_test where7-2.631.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=28 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=69 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=781 + OR a=64 + OR b=91 + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR a=16 + OR b=278 + OR a=26 + } +} {16 26 27 28 64 71 82 85 87 scan 0 sort 0} +do_test where7-2.632.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=70 + OR c=3003 + } +} {7 8 9 70 scan 0 sort 0} +do_test where7-2.632.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=70 + OR c=3003 + } +} {7 8 9 70 scan 0 sort 0} +do_test where7-2.633.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=31031 + OR a=76 + OR b=1023 + OR b=33 + } +} {3 76 91 92 93 scan 0 sort 0} +do_test where7-2.633.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=31031 + OR a=76 + OR b=1023 + OR b=33 + } +} {3 76 91 92 93 scan 0 sort 0} +do_test where7-2.634.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=1001 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {62 86 91 scan 0 sort 0} +do_test where7-2.634.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=1001 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {62 86 91 scan 0 sort 0} +do_test where7-2.635.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='yzabcdefg' + OR ((a BETWEEN 48 AND 50) AND a!=49) + OR a=100 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR a=62 + OR a=67 + OR b=605 + OR c=23023 + OR a=26 + OR b=982 + OR ((a BETWEEN 3 AND 5) AND a!=4) + } +} {3 5 24 26 34 48 50 55 62 67 68 69 76 100 scan 0 sort 0} +do_test where7-2.635.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='yzabcdefg' + OR ((a BETWEEN 48 AND 50) AND a!=49) + OR a=100 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR a=62 + OR a=67 + OR b=605 + OR c=23023 + OR a=26 + OR b=982 + OR ((a BETWEEN 3 AND 5) AND a!=4) + } +} {3 5 24 26 34 48 50 55 62 67 68 69 76 100 scan 0 sort 0} +do_test where7-2.636.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=220 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR f IS NULL + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=784 + } +} {20 24 25 26 27 scan 0 sort 0} +do_test where7-2.636.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=220 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR f IS NULL + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR b=784 + } +} {20 24 25 26 27 scan 0 sort 0} +do_test where7-2.637.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR b=751 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR a=67 + OR b=102 + } +} {10 17 43 67 69 88 95 scan 0 sort 0} +do_test where7-2.637.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR b=751 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR a=67 + OR b=102 + } +} {10 17 43 67 69 88 95 scan 0 sort 0} +do_test where7-2.638.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=69.0 AND d<70.0 AND d NOT NULL) + OR b=256 + OR c=7007 + OR c=26026 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR a=66 + } +} {19 20 21 66 69 74 76 77 78 80 82 scan 0 sort 0} +do_test where7-2.638.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=69.0 AND d<70.0 AND d NOT NULL) + OR b=256 + OR c=7007 + OR c=26026 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR a=66 + } +} {19 20 21 66 69 74 76 77 78 80 82 scan 0 sort 0} +do_test where7-2.639.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=2002 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=33 + OR b=817 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {3 4 5 6 8 10 21 34 41 43 45 60 81 86 scan 0 sort 0} +do_test where7-2.639.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=2002 + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=33 + OR b=817 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {3 4 5 6 8 10 21 34 41 43 45 60 81 86 scan 0 sort 0} +do_test where7-2.640.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='srqponm' AND f GLOB 'cdefg*') + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR a=80 + OR a=53 + OR a=62 + OR a=49 + OR a=53 + OR a=56 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + } +} {28 49 53 56 62 80 81 83 scan 0 sort 0} +do_test where7-2.640.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='srqponm' AND f GLOB 'cdefg*') + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR a=80 + OR a=53 + OR a=62 + OR a=49 + OR a=53 + OR a=56 + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + } +} {28 49 53 56 62 80 81 83 scan 0 sort 0} +do_test where7-2.641.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 7 AND 9) AND a!=8) + OR b=652 + OR a=72 + OR b=209 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR a=38 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR d>1e10 + } +} {7 9 19 23 38 66 68 72 scan 0 sort 0} +do_test where7-2.641.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 7 AND 9) AND a!=8) + OR b=652 + OR a=72 + OR b=209 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR a=38 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR d>1e10 + } +} {7 9 19 23 38 66 68 72 scan 0 sort 0} +do_test where7-2.642.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=949 + OR e IS NULL + } +} { scan 0 sort 0} +do_test where7-2.642.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=949 + OR e IS NULL + } +} { scan 0 sort 0} +do_test where7-2.643.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=179 + OR (g='srqponm' AND f GLOB 'defgh*') + OR b=509 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR f='bcdefghij' + } +} {1 26 27 29 49 53 58 60 79 scan 0 sort 0} +do_test where7-2.643.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=179 + OR (g='srqponm' AND f GLOB 'defgh*') + OR b=509 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR f='bcdefghij' + } +} {1 26 27 29 49 53 58 60 79 scan 0 sort 0} +do_test where7-2.644.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=23 + OR a=43 + OR c=19019 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=18018 + } +} {23 43 47 52 53 54 55 56 57 scan 0 sort 0} +do_test where7-2.644.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=23 + OR a=43 + OR c=19019 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR c=18018 + } +} {23 43 47 52 53 54 55 56 57 scan 0 sort 0} +do_test where7-2.645.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=36 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=231 + } +} {21 22 36 scan 0 sort 0} +do_test where7-2.645.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=36 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=231 + } +} {21 22 36 scan 0 sort 0} +do_test where7-2.646.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=21 + OR b=355 + OR a=97 + } +} {21 97 scan 0 sort 0} +do_test where7-2.646.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=21 + OR b=355 + OR a=97 + } +} {21 97 scan 0 sort 0} +do_test where7-2.647.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=421 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=704 + OR a=90 + OR a=78 + OR 1000000=80.0 AND d<81.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {28 42 53 55 64 78 80 81 90 scan 0 sort 0} +do_test where7-2.647.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=421 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=704 + OR a=90 + OR a=78 + OR 1000000=80.0 AND d<81.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + } +} {28 42 53 55 64 78 80 81 90 scan 0 sort 0} +do_test where7-2.648.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'pqrst*') + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {93 95 scan 0 sort 0} +do_test where7-2.648.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'pqrst*') + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {93 95 scan 0 sort 0} +do_test where7-2.649.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE d<0.0 + OR a=78 + OR b=539 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR e IS NULL + OR a=48 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {8 10 25 27 48 49 57 78 scan 0 sort 0} +do_test where7-2.649.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE d<0.0 + OR a=78 + OR b=539 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR e IS NULL + OR a=48 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {8 10 25 27 48 49 57 78 scan 0 sort 0} +do_test where7-2.650.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 94 AND 96) AND a!=95) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=22 + } +} {2 78 94 96 scan 0 sort 0} +do_test where7-2.650.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 94 AND 96) AND a!=95) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=22 + } +} {2 78 94 96 scan 0 sort 0} +do_test where7-2.651.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=275 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR f='ijklmnopq' + } +} {8 25 34 37 53 57 59 60 86 92 scan 0 sort 0} +do_test where7-2.651.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=275 + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR f='ijklmnopq' + } +} {8 25 34 37 53 57 59 60 86 92 scan 0 sort 0} +do_test where7-2.652.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=256 + OR c=13013 + OR b=44 + OR f='jklmnopqr' + OR b=883 + } +} {4 9 35 37 38 39 61 87 scan 0 sort 0} +do_test where7-2.652.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=256 + OR c=13013 + OR b=44 + OR f='jklmnopqr' + OR b=883 + } +} {4 9 35 37 38 39 61 87 scan 0 sort 0} +do_test where7-2.653.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='zabcdefgh' + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR a=54 + OR b=770 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR a=81 + OR b=190 + OR a=2 + } +} {2 3 25 51 54 70 77 81 96 98 scan 0 sort 0} +do_test where7-2.653.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='zabcdefgh' + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR a=54 + OR b=770 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR a=81 + OR b=190 + OR a=2 + } +} {2 3 25 51 54 70 77 81 96 98 scan 0 sort 0} +do_test where7-2.654.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=12012 + OR a=16 + OR a=15 + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR a=69 + OR b=748 + OR a=61 + OR b=473 + OR ((a BETWEEN 69 AND 71) AND a!=70) + } +} {12 15 16 34 35 36 43 61 68 69 70 71 72 scan 0 sort 0} +do_test where7-2.654.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR c=12012 + OR a=16 + OR a=15 + OR ((a BETWEEN 70 AND 72) AND a!=71) + OR a=69 + OR b=748 + OR a=61 + OR b=473 + OR ((a BETWEEN 69 AND 71) AND a!=70) + } +} {12 15 16 34 35 36 43 61 68 69 70 71 72 scan 0 sort 0} +do_test where7-2.655.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=223 + OR a=14 + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR b=539 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=21 + } +} {14 21 33 35 41 48 49 61 74 76 scan 0 sort 0} +do_test where7-2.655.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=223 + OR a=14 + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR b=539 + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR a=21 + } +} {14 21 33 35 41 48 49 61 74 76 scan 0 sort 0} +do_test where7-2.656.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=99 + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=73 + OR a=56 + OR b=253 + OR b=880 + } +} {5 23 31 56 57 73 80 83 99 scan 0 sort 0} +do_test where7-2.656.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=99 + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=73 + OR a=56 + OR b=253 + OR b=880 + } +} {5 23 31 56 57 73 80 83 99 scan 0 sort 0} +do_test where7-2.657.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=927 + OR b=300 + OR b=223 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=154 + OR b=759 + } +} {9 14 69 95 scan 0 sort 0} +do_test where7-2.657.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=927 + OR b=300 + OR b=223 + OR (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=154 + OR b=759 + } +} {9 14 69 95 scan 0 sort 0} +do_test where7-2.658.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=242 + OR b=905 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR a=24 + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR b=1100 + OR b=850 + OR ((a BETWEEN 55 AND 57) AND a!=56) + } +} {22 24 55 57 66 67 69 86 96 100 scan 0 sort 0} +do_test where7-2.658.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=242 + OR b=905 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'ijklm*') + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR a=24 + OR ((a BETWEEN 67 AND 69) AND a!=68) + OR b=1100 + OR b=850 + OR ((a BETWEEN 55 AND 57) AND a!=56) + } +} {22 24 55 57 66 67 69 86 96 100 scan 0 sort 0} +do_test where7-2.659.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=190 + OR a=72 + OR b=377 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR b=476 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + } +} {2 26 52 72 78 93 scan 0 sort 0} +do_test where7-2.659.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=190 + OR a=72 + OR b=377 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR b=476 + OR (g='yxwvuts' AND f GLOB 'cdefg*') + } +} {2 26 52 72 78 93 scan 0 sort 0} +do_test where7-2.660.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=245 + OR b=638 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR f='opqrstuvw' + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=817 + OR a=85 + OR (g='lkjihgf' AND f GLOB 'mnopq*') + } +} {14 40 58 62 64 66 67 85 86 92 scan 0 sort 0} +do_test where7-2.660.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=245 + OR b=638 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR f='opqrstuvw' + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR b=817 + OR a=85 + OR (g='lkjihgf' AND f GLOB 'mnopq*') + } +} {14 40 58 62 64 66 67 85 86 92 scan 0 sort 0} +do_test where7-2.661.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR b=968 + } +} {22 24 88 scan 0 sort 0} +do_test where7-2.661.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR b=968 + } +} {22 24 88 scan 0 sort 0} +do_test where7-2.662.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=22 + OR b=993 + OR f='tuvwxyzab' + } +} {2 19 45 60 62 71 74 76 97 scan 0 sort 0} +do_test where7-2.662.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 60 AND 62) AND a!=61) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=22 + OR b=993 + OR f='tuvwxyzab' + } +} {2 19 45 60 62 71 74 76 97 scan 0 sort 0} +do_test where7-2.663.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 55 AND 57) AND a!=56) + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR c<=10 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=553 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR b=1045 + } +} {55 57 72 73 75 77 85 95 scan 0 sort 0} +do_test where7-2.663.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 55 AND 57) AND a!=56) + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR c<=10 + OR ((a BETWEEN 75 AND 77) AND a!=76) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=553 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR b=1045 + } +} {55 57 72 73 75 77 85 95 scan 0 sort 0} +do_test where7-2.664.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=440 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=89 + OR c=18018 + OR b=154 + OR b=506 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR a=78 + OR b=751 + } +} {1 3 5 14 27 31 40 44 46 52 53 54 57 78 79 83 89 scan 0 sort 0} +do_test where7-2.664.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=440 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 44 AND 46) AND a!=45) + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=89 + OR c=18018 + OR b=154 + OR b=506 + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR a=78 + OR b=751 + } +} {1 3 5 14 27 31 40 44 46 52 53 54 57 78 79 83 89 scan 0 sort 0} +do_test where7-2.665.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=407 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR b=209 + OR b=814 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR a=44 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=1092 + } +} {10 19 36 37 38 44 65 74 99 scan 0 sort 0} +do_test where7-2.665.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=407 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR b=209 + OR b=814 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR a=44 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=1092 + } +} {10 19 36 37 38 44 65 74 99 scan 0 sort 0} +do_test where7-2.666.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 24 AND 26) AND a!=25) + OR b=1103 + OR b=190 + OR b=737 + OR a=97 + OR b=451 + OR b=583 + OR a=63 + OR c=8008 + OR ((a BETWEEN 45 AND 47) AND a!=46) + } +} {22 23 24 26 41 45 47 53 63 67 97 scan 0 sort 0} +do_test where7-2.666.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 24 AND 26) AND a!=25) + OR b=1103 + OR b=190 + OR b=737 + OR a=97 + OR b=451 + OR b=583 + OR a=63 + OR c=8008 + OR ((a BETWEEN 45 AND 47) AND a!=46) + } +} {22 23 24 26 41 45 47 53 63 67 97 scan 0 sort 0} +do_test where7-2.667.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=12 + OR b=935 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=1070 + OR a=24 + OR a=95 + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR a=40 + OR b=935 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {12 24 27 29 40 53 85 87 95 scan 0 sort 0} +do_test where7-2.667.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=12 + OR b=935 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=1070 + OR a=24 + OR a=95 + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR a=40 + OR b=935 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {12 24 27 29 40 53 85 87 95 scan 0 sort 0} +do_test where7-2.668.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=858 + OR a=82 + OR b=209 + OR b=374 + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR c=22022 + } +} {19 34 40 64 65 66 76 78 82 scan 0 sort 0} +do_test where7-2.668.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=858 + OR a=82 + OR b=209 + OR b=374 + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR c=22022 + } +} {19 34 40 64 65 66 76 78 82 scan 0 sort 0} +do_test where7-2.669.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=27 + OR (g='rqponml' AND f GLOB 'lmnop*') + OR b=121 + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=67 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR c=1001 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {1 2 3 7 8 9 11 19 21 27 30 32 37 50 52 67 scan 0 sort 0} +do_test where7-2.669.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=27 + OR (g='rqponml' AND f GLOB 'lmnop*') + OR b=121 + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=67 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR c=1001 + OR ((a BETWEEN 50 AND 52) AND a!=51) + OR ((a BETWEEN 19 AND 21) AND a!=20) + } +} {1 2 3 7 8 9 11 19 21 27 30 32 37 50 52 67 scan 0 sort 0} +do_test where7-2.670.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=99 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + } +} {1 9 46 57 98 scan 0 sort 0} +do_test where7-2.670.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=99 + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=98.0 AND d<99.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + } +} {1 9 46 57 98 scan 0 sort 0} +do_test where7-2.671.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=3 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=355 + OR b=814 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR a=81 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=542 + OR b=795 + } +} {3 42 62 74 79 81 scan 0 sort 0} +do_test where7-2.671.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=3 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR b=355 + OR b=814 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR a=81 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=542 + OR b=795 + } +} {3 42 62 74 79 81 scan 0 sort 0} +do_test where7-2.672.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR b=363 + OR (g='srqponm' AND f GLOB 'fghij*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=619 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR a=73 + } +} {1 14 31 33 56 64 66 73 scan 0 sort 0} +do_test where7-2.672.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR b=363 + OR (g='srqponm' AND f GLOB 'fghij*') + OR ((a BETWEEN 64 AND 66) AND a!=65) + OR b=619 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + OR a=73 + } +} {1 14 31 33 56 64 66 73 scan 0 sort 0} +do_test where7-2.673.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=935 + OR a=42 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR b=330 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {9 30 35 42 55 61 85 87 scan 0 sort 0} +do_test where7-2.673.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=935 + OR a=42 + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR b=330 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {9 30 35 42 55 61 85 87 scan 0 sort 0} +do_test where7-2.674.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=79 + OR b=201 + OR b=99 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR a=64 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR a=89 + } +} {9 16 19 21 42 64 68 79 89 94 scan 0 sort 0} +do_test where7-2.674.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=79 + OR b=201 + OR b=99 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR a=64 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR a=89 + } +} {9 16 19 21 42 64 68 79 89 94 scan 0 sort 0} +do_test where7-2.675.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=784 + OR a=85 + OR b=663 + OR c=17017 + OR b=561 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=495 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR b=352 + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {32 39 41 45 49 50 51 65 68 85 scan 0 sort 0} +do_test where7-2.675.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=784 + OR a=85 + OR b=663 + OR c=17017 + OR b=561 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR b=495 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR b=352 + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {32 39 41 45 49 50 51 65 68 85 scan 0 sort 0} +do_test where7-2.676.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR f='klmnopqrs' + OR f='lmnopqrst' + } +} {10 11 19 36 37 62 63 88 89 100 scan 0 sort 0} +do_test where7-2.676.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR f='klmnopqrs' + OR f='lmnopqrst' + } +} {10 11 19 36 37 62 63 88 89 100 scan 0 sort 0} +do_test where7-2.677.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=46 + OR a=44 + } +} {44 46 90 92 scan 0 sort 0} +do_test where7-2.677.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=46 + OR a=44 + } +} {44 46 90 92 scan 0 sort 0} +do_test where7-2.678.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=36 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR b=682 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR b=91 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR c=12012 + OR b=267 + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {18 20 34 35 36 39 43 53 55 62 76 95 97 scan 0 sort 0} +do_test where7-2.678.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=36 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR b=682 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR b=91 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR c=12012 + OR b=267 + OR (g='jihgfed' AND f GLOB 'yzabc*') + } +} {18 20 34 35 36 39 43 53 55 62 76 95 97 scan 0 sort 0} +do_test where7-2.679.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=594 + OR f='hijklmnop' + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=707 + OR b=363 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=157 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + } +} {7 12 24 33 54 58 59 65 67 85 scan 0 sort 0} +do_test where7-2.679.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=594 + OR f='hijklmnop' + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=707 + OR b=363 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=157 + OR (g='tsrqpon' AND f GLOB 'yzabc*') + } +} {7 12 24 33 54 58 59 65 67 85 scan 0 sort 0} +do_test where7-2.680.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=2 + OR a=84 + OR b=399 + OR b=828 + OR a=21 + OR b=748 + OR c=13013 + OR a=57 + OR f='mnopqrstu' + } +} {2 12 21 27 37 38 39 57 64 68 84 90 scan 0 sort 0} +do_test where7-2.680.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=2 + OR a=84 + OR b=399 + OR b=828 + OR a=21 + OR b=748 + OR c=13013 + OR a=57 + OR f='mnopqrstu' + } +} {2 12 21 27 37 38 39 57 64 68 84 90 scan 0 sort 0} +do_test where7-2.681.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'defgh*') + OR b=674 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR c=3003 + OR a=19 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {7 8 9 19 20 22 38 40 46 55 scan 0 sort 0} +do_test where7-2.681.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'defgh*') + OR b=674 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR c=3003 + OR a=19 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {7 8 9 19 20 22 38 40 46 55 scan 0 sort 0} +do_test where7-2.682.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=652 + OR a=83 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=102 + OR b=300 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {49 83 97 scan 0 sort 0} +do_test where7-2.682.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=652 + OR a=83 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=102 + OR b=300 + OR (d>=49.0 AND d<50.0 AND d NOT NULL) + } +} {49 83 97 scan 0 sort 0} +do_test where7-2.683.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 53 AND 55) AND a!=54) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR c=4004 + OR a=95 + OR b=707 + OR f='vwxyzabcd' + OR b=286 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=693 + OR ((a BETWEEN 6 AND 8) AND a!=7) + } +} {6 8 10 11 12 21 26 43 45 47 53 55 63 73 95 99 scan 0 sort 0} +do_test where7-2.683.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 53 AND 55) AND a!=54) + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR c=4004 + OR a=95 + OR b=707 + OR f='vwxyzabcd' + OR b=286 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=693 + OR ((a BETWEEN 6 AND 8) AND a!=7) + } +} {6 8 10 11 12 21 26 43 45 47 53 55 63 73 95 99 scan 0 sort 0} +do_test where7-2.684.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=209 + OR b=198 + OR a=52 + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR d<0.0 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=168 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + } +} {7 18 19 20 24 33 35 42 46 52 59 64 72 85 98 scan 0 sort 0} +do_test where7-2.684.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=209 + OR b=198 + OR a=52 + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR d<0.0 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=168 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='uvwxyzabc' + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + } +} {7 18 19 20 24 33 35 42 46 52 59 64 72 85 98 scan 0 sort 0} +do_test where7-2.685.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 4 AND 6) AND a!=5) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR f='rstuvwxyz' + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=14 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + } +} {4 6 8 12 14 17 21 26 43 47 69 73 84 89 91 95 99 scan 0 sort 0} +do_test where7-2.685.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 4 AND 6) AND a!=5) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR f='rstuvwxyz' + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR a=14 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + } +} {4 6 8 12 14 17 21 26 43 47 69 73 84 89 91 95 99 scan 0 sort 0} +do_test where7-2.686.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 13 AND 15) AND a!=14) + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR f='mnopqrstu' + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR a=38 + OR c=26026 + } +} {2 12 13 15 33 35 38 64 76 77 78 90 93 95 97 scan 0 sort 0} +do_test where7-2.686.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 13 AND 15) AND a!=14) + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR f='mnopqrstu' + OR (g='fedcbaz' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR a=38 + OR c=26026 + } +} {2 12 13 15 33 35 38 64 76 77 78 90 93 95 97 scan 0 sort 0} +do_test where7-2.687.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'stuvw*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR a=7 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='srqponm' AND f GLOB 'ghijk*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {7 32 33 35 39 44 71 73 scan 0 sort 0} +do_test where7-2.687.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'stuvw*') + OR ((a BETWEEN 71 AND 73) AND a!=72) + OR a=7 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='srqponm' AND f GLOB 'ghijk*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {7 32 33 35 39 44 71 73 scan 0 sort 0} +do_test where7-2.688.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=6006 + OR b=938 + OR b=484 + OR b=652 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR f='opqrstuvw' + } +} {14 15 16 17 18 40 41 44 58 66 67 92 93 scan 0 sort 0} +do_test where7-2.688.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=6006 + OR b=938 + OR b=484 + OR b=652 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR f='opqrstuvw' + } +} {14 15 16 17 18 40 41 44 58 66 67 92 93 scan 0 sort 0} +do_test where7-2.689.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=27027 + OR b=968 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=487 + OR b=924 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR c=14014 + OR b=1001 + } +} {40 41 42 51 70 79 80 81 84 88 91 scan 0 sort 0} +do_test where7-2.689.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=27027 + OR b=968 + OR (d>=51.0 AND d<52.0 AND d NOT NULL) + OR b=487 + OR b=924 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + OR c=14014 + OR b=1001 + } +} {40 41 42 51 70 79 80 81 84 88 91 scan 0 sort 0} +do_test where7-2.690.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=25 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=443 + OR b=564 + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=531 + OR b=1081 + OR a=96 + } +} {10 19 25 43 45 69 71 90 96 97 scan 0 sort 0} +do_test where7-2.690.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=25 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=443 + OR b=564 + OR (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=531 + OR b=1081 + OR a=96 + } +} {10 19 25 43 45 69 71 90 96 97 scan 0 sort 0} +do_test where7-2.691.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=36 + OR (g='srqponm' AND f GLOB 'defgh*') + } +} {29 scan 0 sort 0} +do_test where7-2.691.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=36 + OR (g='srqponm' AND f GLOB 'defgh*') + } +} {29 scan 0 sort 0} +do_test where7-2.692.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'stuvw*') + OR b=531 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + } +} {3 70 93 95 scan 0 sort 0} +do_test where7-2.692.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'stuvw*') + OR b=531 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + } +} {3 70 93 95 scan 0 sort 0} +do_test where7-2.693.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=256 + OR b=1034 + } +} {94 scan 0 sort 0} +do_test where7-2.693.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=256 + OR b=1034 + } +} {94 scan 0 sort 0} +do_test where7-2.694.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR b=784 + OR b=718 + OR a=18 + OR a=3 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR c=28028 + } +} {3 18 19 21 24 26 47 58 60 73 82 83 84 99 scan 0 sort 0} +do_test where7-2.694.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR (d>=83.0 AND d<84.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR b=784 + OR b=718 + OR a=18 + OR a=3 + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR c=28028 + } +} {3 18 19 21 24 26 47 58 60 73 82 83 84 99 scan 0 sort 0} +do_test where7-2.695.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=473 + OR b=649 + OR ((a BETWEEN 46 AND 48) AND a!=47) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=1100 + OR b=1012 + OR a=72 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR b=176 + OR b=355 + } +} {16 18 43 46 48 59 72 91 92 100 scan 0 sort 0} +do_test where7-2.695.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=473 + OR b=649 + OR ((a BETWEEN 46 AND 48) AND a!=47) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR b=1100 + OR b=1012 + OR a=72 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR b=176 + OR b=355 + } +} {16 18 43 46 48 59 72 91 92 100 scan 0 sort 0} +do_test where7-2.696.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR f='cdefghijk' + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + } +} {2 15 19 28 29 30 32 54 80 91 scan 0 sort 0} +do_test where7-2.696.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR f='cdefghijk' + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + } +} {2 15 19 28 29 30 32 54 80 91 scan 0 sort 0} +do_test where7-2.697.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=883 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=938 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR f='defghijkl' + OR c=2002 + OR b=990 + } +} {3 4 5 6 17 19 22 29 55 60 81 90 scan 0 sort 0} +do_test where7-2.697.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=883 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=938 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR f='defghijkl' + OR c=2002 + OR b=990 + } +} {3 4 5 6 17 19 22 29 55 60 81 90 scan 0 sort 0} +do_test where7-2.698.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 7 AND 9) AND a!=8) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR b=902 + OR b=25 + } +} {7 9 76 82 scan 0 sort 0} +do_test where7-2.698.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 7 AND 9) AND a!=8) + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR b=902 + OR b=25 + } +} {7 9 76 82 scan 0 sort 0} +do_test where7-2.699.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'abcde*') + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=1092 + OR b=495 + } +} {26 45 55 68 70 74 76 scan 0 sort 0} +do_test where7-2.699.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'abcde*') + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR ((a BETWEEN 74 AND 76) AND a!=75) + OR b=1092 + OR b=495 + } +} {26 45 55 68 70 74 76 scan 0 sort 0} +do_test where7-2.700.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=46 + OR a=74 + } +} {46 74 90 92 scan 0 sort 0} +do_test where7-2.700.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=46 + OR a=74 + } +} {46 74 90 92 scan 0 sort 0} +do_test where7-2.701.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=18 + OR b=66 + OR b=498 + OR b=143 + OR b=1034 + OR b=289 + OR b=319 + } +} {6 13 18 29 94 scan 0 sort 0} +do_test where7-2.701.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=18 + OR b=66 + OR b=498 + OR b=143 + OR b=1034 + OR b=289 + OR b=319 + } +} {6 13 18 29 94 scan 0 sort 0} +do_test where7-2.702.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR f='lmnopqrst' + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR b=872 + OR a=44 + OR ((a BETWEEN 38 AND 40) AND a!=39) + } +} {11 13 25 28 30 37 38 40 44 45 51 54 63 77 79 89 scan 0 sort 0} +do_test where7-2.702.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?abcd*' AND f GLOB 'zabc*') + OR (g='srqponm' AND f GLOB 'efghi*') + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR f='lmnopqrst' + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR b=872 + OR a=44 + OR ((a BETWEEN 38 AND 40) AND a!=39) + } +} {11 13 25 28 30 37 38 40 44 45 51 54 63 77 79 89 scan 0 sort 0} +do_test where7-2.703.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR a=20 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR b=1004 + OR b=77 + OR b=927 + OR a=99 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {7 17 20 46 66 71 72 73 75 98 99 scan 0 sort 0} +do_test where7-2.703.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR a=20 + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR b=1004 + OR b=77 + OR b=927 + OR a=99 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {7 17 20 46 66 71 72 73 75 98 99 scan 0 sort 0} +do_test where7-2.704.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=76.0 AND d<77.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=11 + OR ((a BETWEEN 21 AND 23) AND a!=22) + } +} {1 21 23 45 76 scan 0 sort 0} +do_test where7-2.704.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=76.0 AND d<77.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=11 + OR ((a BETWEEN 21 AND 23) AND a!=22) + } +} {1 21 23 45 76 scan 0 sort 0} +do_test where7-2.705.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=572 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {52 57 scan 0 sort 0} +do_test where7-2.705.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=572 + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {52 57 scan 0 sort 0} +do_test where7-2.706.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR f='lmnopqrst' + OR (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR a=23 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {10 11 23 36 37 44 54 56 62 63 69 81 88 89 scan 0 sort 0} +do_test where7-2.706.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 54 AND 56) AND a!=55) + OR f='lmnopqrst' + OR (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR a=23 + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {10 11 23 36 37 44 54 56 62 63 69 81 88 89 scan 0 sort 0} +do_test where7-2.707.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=836 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=605 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR b=759 + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR a=40 + OR f='ghijklmno' + OR (g='hgfedcb' AND f GLOB 'hijkl*') + } +} {6 24 32 38 40 46 50 55 58 69 70 76 84 85 89 91 scan 0 sort 0} +do_test where7-2.707.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=836 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=605 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR b=759 + OR (f GLOB '?zabc*' AND f GLOB 'yzab*') + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR a=40 + OR f='ghijklmno' + OR (g='hgfedcb' AND f GLOB 'hijkl*') + } +} {6 24 32 38 40 46 50 55 58 69 70 76 84 85 89 91 scan 0 sort 0} +do_test where7-2.708.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + } +} {42 51 scan 0 sort 0} +do_test where7-2.708.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + } +} {42 51 scan 0 sort 0} +do_test where7-2.709.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=27027 + OR b=872 + OR a=56 + } +} {56 79 80 81 scan 0 sort 0} +do_test where7-2.709.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=27027 + OR b=872 + OR a=56 + } +} {56 79 80 81 scan 0 sort 0} +do_test where7-2.710.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=685 + OR b=256 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=44 + OR a=63 + OR a=15 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {15 22 24 44 63 78 80 scan 0 sort 0} +do_test where7-2.710.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=685 + OR b=256 + OR ((a BETWEEN 78 AND 80) AND a!=79) + OR a=44 + OR a=63 + OR a=15 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {15 22 24 44 63 78 80 scan 0 sort 0} +do_test where7-2.711.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'efghi*') + OR a=34 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=67 + OR a=28 + } +} {6 8 28 34 56 67 75 scan 0 sort 0} +do_test where7-2.711.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'efghi*') + OR a=34 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (d>=75.0 AND d<76.0 AND d NOT NULL) + OR a=67 + OR a=28 + } +} {6 8 28 34 56 67 75 scan 0 sort 0} +do_test where7-2.712.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR a=52 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='ghijklmno' + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=319 + OR a=34 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR f='hijklmnop' + } +} {6 7 12 18 24 29 32 33 34 41 52 58 59 68 70 84 85 scan 0 sort 0} +do_test where7-2.712.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR a=52 + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR f='ghijklmno' + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=319 + OR a=34 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR f='hijklmnop' + } +} {6 7 12 18 24 29 32 33 34 41 52 58 59 68 70 84 85 scan 0 sort 0} +do_test where7-2.713.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR a=47 + } +} {41 47 69 71 scan 0 sort 0} +do_test where7-2.713.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'pqrst*') + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR a=47 + } +} {41 47 69 71 scan 0 sort 0} +do_test where7-2.714.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 33 AND 35) AND a!=34) + OR c=7007 + } +} {19 20 21 33 35 scan 0 sort 0} +do_test where7-2.714.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 33 AND 35) AND a!=34) + OR c=7007 + } +} {19 20 21 33 35 scan 0 sort 0} +do_test where7-2.715.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=531 + OR a=12 + OR b=583 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=61 + OR b=187 + } +} {12 17 53 61 93 95 scan 0 sort 0} +do_test where7-2.715.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=531 + OR a=12 + OR b=583 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR a=61 + OR b=187 + } +} {12 17 53 61 93 95 scan 0 sort 0} +do_test where7-2.716.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=31031 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=256 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=715 + OR b=212 + OR b=99 + OR c=29029 + } +} {9 12 38 45 65 66 68 77 79 85 86 87 91 92 93 scan 0 sort 0} +do_test where7-2.716.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=31031 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=256 + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=715 + OR b=212 + OR b=99 + OR c=29029 + } +} {9 12 38 45 65 66 68 77 79 85 86 87 91 92 93 scan 0 sort 0} +do_test where7-2.717.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR b=33 + OR a=62 + OR b=916 + OR b=1012 + OR a=2 + OR a=51 + OR b=286 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=80 + } +} {2 3 26 40 42 51 62 92 96 scan 0 sort 0} +do_test where7-2.717.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR b=33 + OR a=62 + OR b=916 + OR b=1012 + OR a=2 + OR a=51 + OR b=286 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=80 + } +} {2 3 26 40 42 51 62 92 96 scan 0 sort 0} +do_test where7-2.718.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=847 + OR f='efghijklm' + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + } +} {4 6 30 56 77 82 scan 0 sort 0} +do_test where7-2.718.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=847 + OR f='efghijklm' + OR (d>=6.0 AND d<7.0 AND d NOT NULL) + } +} {4 6 30 56 77 82 scan 0 sort 0} +do_test where7-2.719.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + } +} {25 62 64 scan 0 sort 0} +do_test where7-2.719.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR ((a BETWEEN 62 AND 64) AND a!=63) + } +} {25 62 64 scan 0 sort 0} +do_test where7-2.720.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 43 AND 45) AND a!=44) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR a=43 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR b=729 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + } +} {14 31 33 43 45 53 scan 0 sort 0} +do_test where7-2.720.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 43 AND 45) AND a!=44) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR a=43 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR b=729 + OR (g='vutsrqp' AND f GLOB 'opqrs*') + } +} {14 31 33 43 45 53 scan 0 sort 0} +do_test where7-2.721.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='efghijklm' + OR a=70 + OR b=278 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR c=8008 + OR f='opqrstuvw' + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {4 5 14 22 23 24 25 30 33 35 40 56 66 70 82 92 scan 0 sort 0} +do_test where7-2.721.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='efghijklm' + OR a=70 + OR b=278 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR c=8008 + OR f='opqrstuvw' + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (g='xwvutsr' AND f GLOB 'fghij*') + } +} {4 5 14 22 23 24 25 30 33 35 40 56 66 70 82 92 scan 0 sort 0} +do_test where7-2.722.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR c<=10 + OR (g='srqponm' AND f GLOB 'fghij*') + OR a=35 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=1089 + OR a=73 + OR b=737 + OR c=18018 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {6 23 31 32 35 40 42 52 53 54 58 62 67 73 84 99 scan 0 sort 0} +do_test where7-2.722.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR c<=10 + OR (g='srqponm' AND f GLOB 'fghij*') + OR a=35 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=1089 + OR a=73 + OR b=737 + OR c=18018 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + } +} {6 23 31 32 35 40 42 52 53 54 58 62 67 73 84 99 scan 0 sort 0} +do_test where7-2.723.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 0 AND 2) AND a!=1) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=762 + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR a=80 + } +} {2 39 41 79 80 95 scan 0 sort 0} +do_test where7-2.723.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 0 AND 2) AND a!=1) + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=762 + OR ((a BETWEEN 39 AND 41) AND a!=40) + OR a=80 + } +} {2 39 41 79 80 95 scan 0 sort 0} +do_test where7-2.724.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR b=737 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=979 + OR a=36 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR a=55 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + } +} {20 28 30 36 46 50 55 67 72 80 82 89 95 98 scan 0 sort 0} +do_test where7-2.724.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR b=737 + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=979 + OR a=36 + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + OR a=55 + OR (g='fedcbaz' AND f GLOB 'rstuv*') + } +} {20 28 30 36 46 50 55 67 72 80 82 89 95 98 scan 0 sort 0} +do_test where7-2.725.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=75 + OR a=61 + OR (g='onmlkji' AND f GLOB 'abcde*') + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {52 61 75 91 scan 0 sort 0} +do_test where7-2.725.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=75 + OR a=61 + OR (g='onmlkji' AND f GLOB 'abcde*') + OR (g='gfedcba' AND f GLOB 'nopqr*') + } +} {52 61 75 91 scan 0 sort 0} +do_test where7-2.726.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1004 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR a=56 + } +} {7 56 61 scan 0 sort 0} +do_test where7-2.726.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1004 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR a=56 + } +} {7 56 61 scan 0 sort 0} +do_test where7-2.727.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=93 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR a=83 + OR b=828 + OR b=454 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=924 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + OR a=50 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {38 50 58 66 83 84 89 91 93 scan 0 sort 0} +do_test where7-2.727.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=93 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR a=83 + OR b=828 + OR b=454 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=924 + OR (g='lkjihgf' AND f GLOB 'opqrs*') + OR a=50 + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + } +} {38 50 58 66 83 84 89 91 93 scan 0 sort 0} +do_test where7-2.728.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='stuvwxyza' + OR a=44 + OR c=2002 + } +} {4 5 6 18 44 70 96 scan 0 sort 0} +do_test where7-2.728.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='stuvwxyza' + OR a=44 + OR c=2002 + } +} {4 5 6 18 44 70 96 scan 0 sort 0} +do_test where7-2.729.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=55 + OR a=65 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {14 40 55 65 66 92 scan 0 sort 0} +do_test where7-2.729.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=55 + OR a=65 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {14 40 55 65 66 92 scan 0 sort 0} +do_test where7-2.730.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 72 AND 74) AND a!=73) + OR b=605 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR f='ijklmnopq' + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR c=9009 + OR b=374 + } +} {8 12 13 25 26 27 34 43 55 60 72 74 86 88 scan 0 sort 0} +do_test where7-2.730.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 72 AND 74) AND a!=73) + OR b=605 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR f='ijklmnopq' + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR c=9009 + OR b=374 + } +} {8 12 13 25 26 27 34 43 55 60 72 74 86 88 scan 0 sort 0} +do_test where7-2.731.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=476 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR b=982 + OR a=43 + OR b=355 + } +} {8 43 scan 0 sort 0} +do_test where7-2.731.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=476 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR b=982 + OR a=43 + OR b=355 + } +} {8 43 scan 0 sort 0} +do_test where7-2.732.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=85 + OR b=718 + OR (g='fedcbaz' AND f GLOB 'pqrst*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {1 25 27 53 79 85 93 scan 0 sort 0} +do_test where7-2.732.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=85 + OR b=718 + OR (g='fedcbaz' AND f GLOB 'pqrst*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {1 25 27 53 79 85 93 scan 0 sort 0} +do_test where7-2.733.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {73 96 98 scan 0 sort 0} +do_test where7-2.733.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=73.0 AND d<74.0 AND d NOT NULL) + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {73 96 98 scan 0 sort 0} +do_test where7-2.734.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=176 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR b=619 + OR b=597 + OR b=198 + OR a=27 + OR b=91 + OR a=77 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {16 18 25 27 29 77 80 scan 0 sort 0} +do_test where7-2.734.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=176 + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR b=619 + OR b=597 + OR b=198 + OR a=27 + OR b=91 + OR a=77 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + } +} {16 18 25 27 29 77 80 scan 0 sort 0} +do_test where7-2.735.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=41 + OR b=528 + OR c=3003 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=22 + } +} {2 7 8 9 20 22 41 48 scan 0 sort 0} +do_test where7-2.735.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=41 + OR b=528 + OR c=3003 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR b=22 + } +} {2 7 8 9 20 22 41 48 scan 0 sort 0} +do_test where7-2.736.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=465 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=37 + OR b=1056 + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=1023 + } +} {4 16 29 37 42 63 65 68 93 94 96 scan 0 sort 0} +do_test where7-2.736.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR b=465 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=37 + OR b=1056 + OR (g='srqponm' AND f GLOB 'defgh*') + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=1023 + } +} {4 16 29 37 42 63 65 68 93 94 96 scan 0 sort 0} +do_test where7-2.737.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=76 + OR a=8 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=495 + OR b=663 + OR a=98 + OR b=748 + } +} {8 27 45 68 76 98 scan 0 sort 0} +do_test where7-2.737.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=76 + OR a=8 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR b=495 + OR b=663 + OR a=98 + OR b=748 + } +} {8 27 45 68 76 98 scan 0 sort 0} +do_test where7-2.738.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1081 + OR b=542 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=828 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR a=18 + } +} {18 47 61 64 67 scan 0 sort 0} +do_test where7-2.738.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1081 + OR b=542 + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=828 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR a=18 + } +} {18 47 61 64 67 scan 0 sort 0} +do_test where7-2.739.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='abcdefghi' + OR a=14 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR c=27027 + OR a=47 + } +} {13 14 26 47 52 78 79 80 81 scan 0 sort 0} +do_test where7-2.739.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='abcdefghi' + OR a=14 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR c=27027 + OR a=47 + } +} {13 14 26 47 52 78 79 80 81 scan 0 sort 0} +do_test where7-2.740.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=31031 + OR b=737 + OR a=37 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR a=91 + OR b=77 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + } +} {7 37 65 67 91 92 93 94 98 100 scan 0 sort 0} +do_test where7-2.740.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=31031 + OR b=737 + OR a=37 + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR a=91 + OR b=77 + OR (d>=94.0 AND d<95.0 AND d NOT NULL) + } +} {7 37 65 67 91 92 93 94 98 100 scan 0 sort 0} +do_test where7-2.741.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=17 + OR b=484 + OR c=3003 + OR b=121 + OR a=53 + } +} {7 8 9 11 17 44 53 scan 0 sort 0} +do_test where7-2.741.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=17 + OR b=484 + OR c=3003 + OR b=121 + OR a=53 + } +} {7 8 9 11 17 44 53 scan 0 sort 0} +do_test where7-2.742.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=880 + OR b=696 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR b=308 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {5 28 65 80 96 98 scan 0 sort 0} +do_test where7-2.742.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=880 + OR b=696 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR b=308 + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR ((a BETWEEN 96 AND 98) AND a!=97) + } +} {5 28 65 80 96 98 scan 0 sort 0} +do_test where7-2.743.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR a=24 + OR f IS NULL + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR c=12012 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + } +} {24 25 34 35 36 57 77 88 scan 0 sort 0} +do_test where7-2.743.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR a=24 + OR f IS NULL + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR c=12012 + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + } +} {24 25 34 35 36 57 77 88 scan 0 sort 0} +do_test where7-2.744.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=94 + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=792 + OR a=77 + OR a=26 + OR b=641 + OR a=38 + } +} {26 38 72 74 77 85 94 scan 0 sort 0} +do_test where7-2.744.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=94 + OR (d>=74.0 AND d<75.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=792 + OR a=77 + OR a=26 + OR b=641 + OR a=38 + } +} {26 38 72 74 77 85 94 scan 0 sort 0} +do_test where7-2.745.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 51 AND 53) AND a!=52) + OR (d>=30.0 AND d<31.0 AND d NOT NULL) + OR b=14 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=121 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {11 15 17 27 30 51 53 63 86 89 91 scan 0 sort 0} +do_test where7-2.745.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 51 AND 53) AND a!=52) + OR (d>=30.0 AND d<31.0 AND d NOT NULL) + OR b=14 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR ((a BETWEEN 15 AND 17) AND a!=16) + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=121 + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + } +} {11 15 17 27 30 51 53 63 86 89 91 scan 0 sort 0} +do_test where7-2.746.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=517 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR f='opqrstuvw' + } +} {14 40 47 66 69 71 92 scan 0 sort 0} +do_test where7-2.746.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=517 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR f='opqrstuvw' + } +} {14 40 47 66 69 71 92 scan 0 sort 0} +do_test where7-2.747.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=84.0 AND d<85.0 AND d NOT NULL) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=267 + OR c=19019 + OR a=42 + OR b=938 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {1 9 17 21 22 24 32 34 35 42 43 55 56 57 61 69 84 87 95 scan 0 sort 0} +do_test where7-2.747.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=84.0 AND d<85.0 AND d NOT NULL) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=267 + OR c=19019 + OR a=42 + OR b=938 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + } +} {1 9 17 21 22 24 32 34 35 42 43 55 56 57 61 69 84 87 95 scan 0 sort 0} +do_test where7-2.748.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=179 + OR a=50 + OR (g='srqponm' AND f GLOB 'defgh*') + } +} {29 50 scan 0 sort 0} +do_test where7-2.748.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=179 + OR a=50 + OR (g='srqponm' AND f GLOB 'defgh*') + } +} {29 50 scan 0 sort 0} +do_test where7-2.749.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='vutsrqp' AND f GLOB 'rstuv*') + OR f='xyzabcdef' + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=575 + OR b=385 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=46 + OR b=220 + OR a=63 + } +} {17 18 20 23 35 46 49 51 63 65 75 scan 0 sort 0} +do_test where7-2.749.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='vutsrqp' AND f GLOB 'rstuv*') + OR f='xyzabcdef' + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=575 + OR b=385 + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=46 + OR b=220 + OR a=63 + } +} {17 18 20 23 35 46 49 51 63 65 75 scan 0 sort 0} +do_test where7-2.750.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1056 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=1078 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + OR c=31031 + OR b=869 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR b=245 + OR a=92 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=880 + } +} {66 77 79 80 91 92 93 96 98 scan 0 sort 0} +do_test where7-2.750.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1056 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=1078 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + OR c=31031 + OR b=869 + OR (g='jihgfed' AND f GLOB 'zabcd*') + OR b=245 + OR a=92 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=880 + } +} {66 77 79 80 91 92 93 96 98 scan 0 sort 0} +do_test where7-2.751.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1078 + OR c=28028 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR c=9009 + OR a=17 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + } +} {16 17 19 25 26 27 38 39 40 42 61 68 82 83 84 94 98 scan 0 sort 0} +do_test where7-2.751.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1078 + OR c=28028 + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR (g='mlkjihg' AND f GLOB 'jklmn*') + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR c=9009 + OR a=17 + OR (d>=39.0 AND d<40.0 AND d NOT NULL) + } +} {16 17 19 25 26 27 38 39 40 42 61 68 82 83 84 94 98 scan 0 sort 0} +do_test where7-2.752.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=762 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR f='tuvwxyzab' + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR b=1034 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + } +} {14 19 31 33 44 45 57 58 71 94 97 scan 0 sort 0} +do_test where7-2.752.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=762 + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR f='tuvwxyzab' + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR b=1034 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + } +} {14 19 31 33 44 45 57 58 71 94 97 scan 0 sort 0} +do_test where7-2.753.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=47 + OR b=187 + OR a=56 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {17 30 32 56 68 70 89 91 scan 0 sort 0} +do_test where7-2.753.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=47 + OR b=187 + OR a=56 + OR ((a BETWEEN 30 AND 32) AND a!=31) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {17 30 32 56 68 70 89 91 scan 0 sort 0} +do_test where7-2.754.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=11011 + OR a=14 + OR c=16016 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR f='jklmnopqr' + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=916 + } +} {9 14 21 25 30 31 32 33 35 46 47 48 61 87 96 scan 0 sort 0} +do_test where7-2.754.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=11011 + OR a=14 + OR c=16016 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'efghi*') + OR f='jklmnopqr' + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=916 + } +} {9 14 21 25 30 31 32 33 35 46 47 48 61 87 96 scan 0 sort 0} +do_test where7-2.755.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=949 + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR c<=10 + OR a=14 + OR b=608 + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=121 + OR b=333 + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {11 14 17 28 66 93 95 98 scan 0 sort 0} +do_test where7-2.755.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=949 + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (g='vutsrqp' AND f GLOB 'rstuv*') + OR c<=10 + OR a=14 + OR b=608 + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=121 + OR b=333 + OR ((a BETWEEN 93 AND 95) AND a!=94) + } +} {11 14 17 28 66 93 95 98 scan 0 sort 0} +do_test where7-2.756.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=355 + OR b=627 + OR b=1001 + OR b=1026 + OR ((a BETWEEN 58 AND 60) AND a!=59) + } +} {57 58 60 69 91 scan 0 sort 0} +do_test where7-2.756.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'rstuv*') + OR b=355 + OR b=627 + OR b=1001 + OR b=1026 + OR ((a BETWEEN 58 AND 60) AND a!=59) + } +} {57 58 60 69 91 scan 0 sort 0} +do_test where7-2.757.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + } +} {4 79 scan 0 sort 0} +do_test where7-2.757.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'efghi*') + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + } +} {4 79 scan 0 sort 0} +do_test where7-2.758.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=685 + OR a=14 + OR b=990 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR f='efghijklm' + OR c=1001 + OR b=784 + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {1 2 3 4 14 26 30 32 56 69 82 90 scan 0 sort 0} +do_test where7-2.758.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=685 + OR a=14 + OR b=990 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR f='efghijklm' + OR c=1001 + OR b=784 + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + } +} {1 2 3 4 14 26 30 32 56 69 82 90 scan 0 sort 0} +do_test where7-2.759.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=54 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR c=26026 + OR ((a BETWEEN 97 AND 99) AND a!=98) + } +} {39 54 76 77 78 97 99 scan 0 sort 0} +do_test where7-2.759.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=54 + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR c=26026 + OR ((a BETWEEN 97 AND 99) AND a!=98) + } +} {39 54 76 77 78 97 99 scan 0 sort 0} +do_test where7-2.760.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'ghijk*') + OR c=24024 + OR a=98 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR a=5 + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='rqponml' AND f GLOB 'klmno*') + OR f='pqrstuvwx' + OR f='bcdefghij' + OR b=1001 + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {1 5 15 21 27 31 33 36 41 53 67 70 71 72 77 79 84 91 93 98 scan 0 sort 0} +do_test where7-2.760.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'ghijk*') + OR c=24024 + OR a=98 + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + OR a=5 + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (g='rqponml' AND f GLOB 'klmno*') + OR f='pqrstuvwx' + OR f='bcdefghij' + OR b=1001 + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {1 5 15 21 27 31 33 36 41 53 67 70 71 72 77 79 84 91 93 98 scan 0 sort 0} +do_test where7-2.761.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=781 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR f='lmnopqrst' + OR a=39 + OR a=100 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {1 11 14 37 39 40 54 56 58 63 66 71 89 92 99 100 scan 0 sort 0} +do_test where7-2.761.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=781 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR f='lmnopqrst' + OR a=39 + OR a=100 + OR ((a BETWEEN 56 AND 58) AND a!=57) + } +} {1 11 14 37 39 40 54 56 58 63 66 71 89 92 99 100 scan 0 sort 0} +do_test where7-2.762.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=4004 + OR b=718 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR a=50 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR b=363 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR b=1023 + } +} {10 11 12 33 34 40 50 93 scan 0 sort 0} +do_test where7-2.762.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=4004 + OR b=718 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR a=50 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR b=363 + OR (g='rqponml' AND f GLOB 'ijklm*') + OR b=1023 + } +} {10 11 12 33 34 40 50 93 scan 0 sort 0} +do_test where7-2.763.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1081 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR b=473 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR b=586 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {20 26 43 45 46 55 72 98 scan 0 sort 0} +do_test where7-2.763.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1081 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR b=473 + OR ((a BETWEEN 43 AND 45) AND a!=44) + OR b=586 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR (f GLOB '?vwxy*' AND f GLOB 'uvwx*') + } +} {20 26 43 45 46 55 72 98 scan 0 sort 0} +do_test where7-2.764.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + } +} {7 13 33 58 59 85 scan 0 sort 0} +do_test where7-2.764.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + } +} {7 13 33 58 59 85 scan 0 sort 0} +do_test where7-2.765.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR a=47 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + OR f='lmnopqrst' + } +} {11 37 47 63 68 76 78 84 85 89 scan 0 sort 0} +do_test where7-2.765.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR ((a BETWEEN 76 AND 78) AND a!=77) + OR a=47 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (g='lkjihgf' AND f GLOB 'lmnop*') + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + OR f='lmnopqrst' + } +} {11 37 47 63 68 76 78 84 85 89 scan 0 sort 0} +do_test where7-2.766.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c>=34035 + OR a=29 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR f='abcdefghi' + OR b=993 + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR ((a BETWEEN 45 AND 47) AND a!=46) + } +} {19 21 26 29 45 47 52 54 73 78 99 scan 0 sort 0} +do_test where7-2.766.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c>=34035 + OR a=29 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (f GLOB '?wxyz*' AND f GLOB 'vwxy*') + OR f='abcdefghi' + OR b=993 + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR ((a BETWEEN 45 AND 47) AND a!=46) + } +} {19 21 26 29 45 47 52 54 73 78 99 scan 0 sort 0} +do_test where7-2.767.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=696 + OR b=154 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR a=22 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR a=52 + OR a=21 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + } +} {14 16 21 22 24 47 52 63 70 scan 0 sort 0} +do_test where7-2.767.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=696 + OR b=154 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR a=22 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR a=52 + OR a=21 + OR (d>=70.0 AND d<71.0 AND d NOT NULL) + } +} {14 16 21 22 24 47 52 63 70 scan 0 sort 0} +do_test where7-2.768.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=693 + OR b=201 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR b=520 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=407 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR c>=34035 + OR b=135 + } +} {23 25 36 37 38 63 scan 0 sort 0} +do_test where7-2.768.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=693 + OR b=201 + OR ((a BETWEEN 36 AND 38) AND a!=37) + OR b=520 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR b=407 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR c>=34035 + OR b=135 + } +} {23 25 36 37 38 63 scan 0 sort 0} +do_test where7-2.769.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=707 + OR b=14 + OR b=1089 + OR b=352 + } +} {32 43 99 scan 0 sort 0} +do_test where7-2.769.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=43.0 AND d<44.0 AND d NOT NULL) + OR b=707 + OR b=14 + OR b=1089 + OR b=352 + } +} {32 43 99 scan 0 sort 0} +do_test where7-2.770.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=278 + OR b=278 + OR b=825 + OR f='rstuvwxyz' + OR b=938 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {17 19 43 45 69 75 95 scan 0 sort 0} +do_test where7-2.770.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=278 + OR b=278 + OR b=825 + OR f='rstuvwxyz' + OR b=938 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR ((a BETWEEN 43 AND 45) AND a!=44) + } +} {17 19 43 45 69 75 95 scan 0 sort 0} +do_test where7-2.771.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 32 AND 34) AND a!=33) + OR b=1045 + OR c=27027 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + } +} {11 32 34 37 63 79 80 81 89 95 scan 0 sort 0} +do_test where7-2.771.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 32 AND 34) AND a!=33) + OR b=1045 + OR c=27027 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + } +} {11 32 34 37 63 79 80 81 89 95 scan 0 sort 0} +do_test where7-2.772.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=87 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=487 + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {47 87 90 scan 0 sort 0} +do_test where7-2.772.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=87 + OR (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=487 + OR (g='gfedcba' AND f GLOB 'mnopq*') + } +} {47 87 90 scan 0 sort 0} +do_test where7-2.773.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR b=69 + OR b=608 + OR b=814 + OR a=67 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=1059 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR a=18 + OR b=407 + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {10 12 18 30 32 37 58 61 67 74 scan 0 sort 0} +do_test where7-2.773.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR b=69 + OR b=608 + OR b=814 + OR a=67 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR b=1059 + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR a=18 + OR b=407 + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {10 12 18 30 32 37 58 61 67 74 scan 0 sort 0} +do_test where7-2.774.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=223 + OR b=80 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR ((a BETWEEN 74 AND 76) AND a!=75) + } +} {74 76 97 99 scan 0 sort 0} +do_test where7-2.774.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=223 + OR b=80 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR ((a BETWEEN 74 AND 76) AND a!=75) + } +} {74 76 97 99 scan 0 sort 0} +do_test where7-2.775.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=220 + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=363 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {10 12 20 33 52 54 55 66 scan 0 sort 0} +do_test where7-2.775.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=220 + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=363 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'defgh*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {10 12 20 33 52 54 55 66 scan 0 sort 0} +do_test where7-2.776.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=498 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR b=880 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR b=828 + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR b=113 + } +} {5 15 60 62 80 scan 0 sort 0} +do_test where7-2.776.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=498 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR b=880 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR b=828 + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR b=113 + } +} {5 15 60 62 80 scan 0 sort 0} +do_test where7-2.777.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1059 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=960 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=894 + OR c=2002 + } +} {4 5 6 12 16 20 42 68 94 scan 0 sort 0} +do_test where7-2.777.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1059 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=960 + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR b=894 + OR c=2002 + } +} {4 5 6 12 16 20 42 68 94 scan 0 sort 0} +do_test where7-2.778.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=14 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + } +} {85 scan 0 sort 0} +do_test where7-2.778.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=14 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + } +} {85 scan 0 sort 0} +do_test where7-2.779.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=806 + OR (g='rqponml' AND f GLOB 'hijkl*') + OR b=795 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR c=23023 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + } +} {21 23 33 67 68 69 86 88 99 scan 0 sort 0} +do_test where7-2.779.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=806 + OR (g='rqponml' AND f GLOB 'hijkl*') + OR b=795 + OR ((a BETWEEN 99 AND 101) AND a!=100) + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR c=23023 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + } +} {21 23 33 67 68 69 86 88 99 scan 0 sort 0} +do_test where7-2.780.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=726 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR f='abcdefghi' + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR b=869 + } +} {8 10 15 26 41 52 66 67 78 79 92 93 scan 0 sort 0} +do_test where7-2.780.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=726 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR f='abcdefghi' + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR b=869 + } +} {8 10 15 26 41 52 66 67 78 79 92 93 scan 0 sort 0} +do_test where7-2.781.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=59 + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR b=1081 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + } +} {5 7 59 96 scan 0 sort 0} +do_test where7-2.781.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=59 + OR ((a BETWEEN 5 AND 7) AND a!=6) + OR b=1081 + OR (g='fedcbaz' AND f GLOB 'stuvw*') + } +} {5 7 59 96 scan 0 sort 0} +do_test where7-2.782.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'nopqr*') + OR b=1037 + OR b=132 + OR c=1001 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=32 + } +} {1 2 3 12 18 20 32 39 58 68 91 scan 0 sort 0} +do_test where7-2.782.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'nopqr*') + OR b=1037 + OR b=132 + OR c=1001 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=32 + } +} {1 2 3 12 18 20 32 39 58 68 91 scan 0 sort 0} +do_test where7-2.783.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=24 + OR b=927 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR a=7 + OR b=462 + OR b=608 + OR b=781 + OR b=253 + OR c=25025 + OR b=132 + } +} {7 12 23 24 42 52 71 73 74 75 scan 0 sort 0} +do_test where7-2.783.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=24 + OR b=927 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR a=7 + OR b=462 + OR b=608 + OR b=781 + OR b=253 + OR c=25025 + OR b=132 + } +} {7 12 23 24 42 52 71 73 74 75 scan 0 sort 0} +do_test where7-2.784.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=1001 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR a=83 + } +} {23 25 61 83 91 scan 0 sort 0} +do_test where7-2.784.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=1001 + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR a=83 + } +} {23 25 61 83 91 scan 0 sort 0} +do_test where7-2.785.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=36 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR ((a BETWEEN 46 AND 48) AND a!=47) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR ((a BETWEEN 26 AND 28) AND a!=27) + } +} {3 26 28 29 31 33 46 48 55 60 73 77 80 81 82 91 scan 0 sort 0} +do_test where7-2.785.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=36 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR ((a BETWEEN 46 AND 48) AND a!=47) + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR (d>=91.0 AND d<92.0 AND d NOT NULL) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR ((a BETWEEN 26 AND 28) AND a!=27) + } +} {3 26 28 29 31 33 46 48 55 60 73 77 80 81 82 91 scan 0 sort 0} +do_test where7-2.786.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=69 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=98 + OR b=300 + OR a=41 + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR a=33 + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {1 2 10 12 28 33 37 39 41 54 58 60 69 80 98 scan 0 sort 0} +do_test where7-2.786.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=69 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=98 + OR b=300 + OR a=41 + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR a=33 + OR ((a BETWEEN 10 AND 12) AND a!=11) + } +} {1 2 10 12 28 33 37 39 41 54 58 60 69 80 98 scan 0 sort 0} +do_test where7-2.787.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR ((a BETWEEN 94 AND 96) AND a!=95) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR b=619 + OR c=6006 + OR b=91 + OR b=297 + OR b=165 + } +} {1 15 16 17 18 22 24 27 53 68 70 71 79 90 94 96 scan 0 sort 0} +do_test where7-2.787.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 68 AND 70) AND a!=69) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR ((a BETWEEN 94 AND 96) AND a!=95) + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR b=619 + OR c=6006 + OR b=91 + OR b=297 + OR b=165 + } +} {1 15 16 17 18 22 24 27 53 68 70 71 79 90 94 96 scan 0 sort 0} +do_test where7-2.788.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 9 AND 11) AND a!=10) + OR a=55 + OR (g='jihgfed' AND f GLOB 'xyzab*') + } +} {9 11 55 75 scan 0 sort 0} +do_test where7-2.788.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 9 AND 11) AND a!=10) + OR a=55 + OR (g='jihgfed' AND f GLOB 'xyzab*') + } +} {9 11 55 75 scan 0 sort 0} +do_test where7-2.789.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=737 + OR b=201 + OR a=7 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR b=957 + } +} {2 7 26 67 84 86 87 scan 0 sort 0} +do_test where7-2.789.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=737 + OR b=201 + OR a=7 + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (g='yxwvuts' AND f GLOB 'cdefg*') + OR b=957 + } +} {2 7 26 67 84 86 87 scan 0 sort 0} +do_test where7-2.790.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=74 + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR a=89 + } +} {18 44 67 70 74 79 89 90 92 95 96 97 scan 0 sort 0} +do_test where7-2.790.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 90 AND 92) AND a!=91) + OR a=74 + OR (g='lkjihgf' AND f GLOB 'pqrst*') + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR (f GLOB '?tuvw*' AND f GLOB 'stuv*') + OR a=89 + } +} {18 44 67 70 74 79 89 90 92 95 96 97 scan 0 sort 0} +do_test where7-2.791.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=179 + OR b=1081 + OR b=377 + OR b=495 + OR b=564 + OR b=289 + OR (g='qponmlk' AND f GLOB 'nopqr*') + } +} {39 45 scan 0 sort 0} +do_test where7-2.791.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=179 + OR b=1081 + OR b=377 + OR b=495 + OR b=564 + OR b=289 + OR (g='qponmlk' AND f GLOB 'nopqr*') + } +} {39 45 scan 0 sort 0} +do_test where7-2.792.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=69 + OR a=12 + OR b=718 + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {12 20 22 69 scan 0 sort 0} +do_test where7-2.792.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=69 + OR a=12 + OR b=718 + OR ((a BETWEEN 20 AND 22) AND a!=21) + } +} {12 20 22 69 scan 0 sort 0} +do_test where7-2.793.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR f='klmnopqrs' + OR b=674 + OR a=96 + OR a=99 + OR b=608 + OR b=707 + OR f='cdefghijk' + OR a=91 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {2 10 23 25 28 36 54 62 80 88 91 96 99 scan 0 sort 0} +do_test where7-2.793.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'zabcd*') + OR f='klmnopqrs' + OR b=674 + OR a=96 + OR a=99 + OR b=608 + OR b=707 + OR f='cdefghijk' + OR a=91 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {2 10 23 25 28 36 54 62 80 88 91 96 99 scan 0 sort 0} +do_test where7-2.794.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=564 + OR b=784 + OR b=418 + OR b=275 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR a=58 + OR c=11011 + OR b=660 + } +} {9 25 31 32 33 35 38 58 60 61 87 88 scan 0 sort 0} +do_test where7-2.794.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=564 + OR b=784 + OR b=418 + OR b=275 + OR (g='gfedcba' AND f GLOB 'klmno*') + OR a=58 + OR c=11011 + OR b=660 + } +} {9 25 31 32 33 35 38 58 60 61 87 88 scan 0 sort 0} +do_test where7-2.795.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=509 + OR b=1004 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR f='pqrstuvwx' + } +} {15 25 28 30 41 57 59 67 93 scan 0 sort 0} +do_test where7-2.795.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=25.0 AND d<26.0 AND d NOT NULL) + OR b=509 + OR b=1004 + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR ((a BETWEEN 57 AND 59) AND a!=58) + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR f='pqrstuvwx' + } +} {15 25 28 30 41 57 59 67 93 scan 0 sort 0} +do_test where7-2.796.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=440 + OR ((a BETWEEN 52 AND 54) AND a!=53) + } +} {40 52 54 scan 0 sort 0} +do_test where7-2.796.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=440 + OR ((a BETWEEN 52 AND 54) AND a!=53) + } +} {40 52 54 scan 0 sort 0} +do_test where7-2.797.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=95.0 AND d<96.0 AND d NOT NULL) + OR f='abcdefghi' + } +} {26 52 78 95 scan 0 sort 0} +do_test where7-2.797.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=95.0 AND d<96.0 AND d NOT NULL) + OR f='abcdefghi' + } +} {26 52 78 95 scan 0 sort 0} +do_test where7-2.798.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=19 + OR a=29 + OR b=476 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=91 + } +} {19 29 41 scan 0 sort 0} +do_test where7-2.798.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=19 + OR a=29 + OR b=476 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=91 + } +} {19 29 41 scan 0 sort 0} +do_test where7-2.799.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='lmnopqrst' + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=47 + OR a=71 + } +} {8 11 37 47 63 71 89 scan 0 sort 0} +do_test where7-2.799.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='lmnopqrst' + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR a=47 + OR a=71 + } +} {8 11 37 47 63 71 89 scan 0 sort 0} +do_test where7-2.800.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=531 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=44 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=707 + OR b=322 + } +} {4 12 32 34 49 84 95 97 scan 0 sort 0} +do_test where7-2.800.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=531 + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=44 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR b=707 + OR b=322 + } +} {4 12 32 34 49 84 95 97 scan 0 sort 0} +do_test where7-2.801.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR f='jklmnopqr' + } +} {3 9 29 35 55 61 81 82 87 89 scan 0 sort 0} +do_test where7-2.801.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?efgh*' AND f GLOB 'defg*') + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR f='jklmnopqr' + } +} {3 9 29 35 55 61 81 82 87 89 scan 0 sort 0} +do_test where7-2.802.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=946 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=47 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR b=80 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {8 23 42 47 60 62 78 86 93 scan 0 sort 0} +do_test where7-2.802.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=946 + OR (g='ihgfedc' AND f GLOB 'abcde*') + OR a=47 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR b=80 + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + } +} {8 23 42 47 60 62 78 86 93 scan 0 sort 0} +do_test where7-2.803.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=48 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR b=1015 + OR a=57 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=165 + } +} {4 9 15 35 47 48 49 50 55 57 61 87 98 100 scan 0 sort 0} +do_test where7-2.803.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=48 + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR b=1015 + OR a=57 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR ((a BETWEEN 47 AND 49) AND a!=48) + OR ((a BETWEEN 98 AND 100) AND a!=99) + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR (d>=4.0 AND d<5.0 AND d NOT NULL) + OR b=165 + } +} {4 9 15 35 47 48 49 50 55 57 61 87 98 100 scan 0 sort 0} +do_test where7-2.804.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 97 AND 99) AND a!=98) + OR a=73 + OR b=1048 + OR c>=34035 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR a=72 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=638 + } +} {58 72 73 80 91 93 97 99 scan 0 sort 0} +do_test where7-2.804.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 97 AND 99) AND a!=98) + OR a=73 + OR b=1048 + OR c>=34035 + OR (g='ihgfedc' AND f GLOB 'cdefg*') + OR a=72 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=638 + } +} {58 72 73 80 91 93 97 99 scan 0 sort 0} +do_test where7-2.805.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR a=39 + OR b=165 + } +} {15 28 30 39 scan 0 sort 0} +do_test where7-2.805.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 28 AND 30) AND a!=29) + OR a=39 + OR b=165 + } +} {15 28 30 39 scan 0 sort 0} +do_test where7-2.806.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=50 + OR ((a BETWEEN 61 AND 63) AND a!=62) + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR a=32 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR a=14 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=946 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR b=124 + } +} {14 17 32 43 45 50 53 55 61 63 69 86 93 95 97 scan 0 sort 0} +do_test where7-2.806.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=50 + OR ((a BETWEEN 61 AND 63) AND a!=62) + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR a=32 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR a=14 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR b=946 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR b=124 + } +} {14 17 32 43 45 50 53 55 61 63 69 86 93 95 97 scan 0 sort 0} +do_test where7-2.807.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {52 66 88 90 scan 0 sort 0} +do_test where7-2.807.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 88 AND 90) AND a!=89) + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {52 66 88 90 scan 0 sort 0} +do_test where7-2.808.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=6 + OR f='tuvwxyzab' + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=286 + OR b=781 + } +} {6 19 26 45 59 71 97 scan 0 sort 0} +do_test where7-2.808.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=6 + OR f='tuvwxyzab' + OR (g='mlkjihg' AND f GLOB 'hijkl*') + OR b=286 + OR b=781 + } +} {6 19 26 45 59 71 97 scan 0 sort 0} +do_test where7-2.809.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR f='vwxyzabcd' + OR b=275 + } +} {9 11 21 25 35 37 43 47 61 63 73 77 79 81 87 89 99 100 scan 0 sort 0} +do_test where7-2.809.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR f='vwxyzabcd' + OR b=275 + } +} {9 11 21 25 35 37 43 47 61 63 73 77 79 81 87 89 99 100 scan 0 sort 0} +do_test where7-2.810.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR a=59 + } +} {4 30 59 64 89 91 scan 0 sort 0} +do_test where7-2.810.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR (g='xwvutsr' AND f GLOB 'efghi*') + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'nopqr*') + OR a=59 + } +} {4 30 59 64 89 91 scan 0 sort 0} +do_test where7-2.811.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR b=663 + OR f='ghijklmno' + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR f='ghijklmno' + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR b=1081 + } +} {6 8 10 14 16 19 22 32 48 58 64 71 74 84 99 100 scan 0 sort 0} +do_test where7-2.811.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR b=663 + OR f='ghijklmno' + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR f='ghijklmno' + OR (d>=64.0 AND d<65.0 AND d NOT NULL) + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + OR b=1081 + } +} {6 8 10 14 16 19 22 32 48 58 64 71 74 84 99 100 scan 0 sort 0} +do_test where7-2.812.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR a=96 + OR b=355 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR b=597 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR b=168 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + } +} {10 15 30 32 36 62 81 88 92 94 96 scan 0 sort 0} +do_test where7-2.812.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 30 AND 32) AND a!=31) + OR a=96 + OR b=355 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR b=597 + OR ((a BETWEEN 92 AND 94) AND a!=93) + OR (d>=88.0 AND d<89.0 AND d NOT NULL) + OR (f GLOB '?lmno*' AND f GLOB 'klmn*') + OR b=168 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + } +} {10 15 30 32 36 62 81 88 92 94 96 scan 0 sort 0} +do_test where7-2.813.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=957 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=40 + } +} {9 40 47 58 60 87 89 scan 0 sort 0} +do_test where7-2.813.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=47.0 AND d<48.0 AND d NOT NULL) + OR b=957 + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR a=40 + } +} {9 40 47 58 60 87 89 scan 0 sort 0} +do_test where7-2.814.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 12 AND 14) AND a!=13) + OR a=36 + OR a=75 + OR b=179 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR b=850 + OR a=62 + } +} {12 14 18 36 43 62 65 75 scan 0 sort 0} +do_test where7-2.814.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 12 AND 14) AND a!=13) + OR a=36 + OR a=75 + OR b=179 + OR (d>=43.0 AND d<44.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'stuvw*') + OR (d>=65.0 AND d<66.0 AND d NOT NULL) + OR b=850 + OR a=62 + } +} {12 14 18 36 43 62 65 75 scan 0 sort 0} +do_test where7-2.815.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR a=79 + OR a=66 + OR b=487 + OR a=1 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR c=5005 + OR a=47 + OR c=5005 + OR b=319 + OR b=1037 + } +} {1 13 14 15 21 23 29 47 54 66 79 scan 0 sort 0} +do_test where7-2.815.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR a=79 + OR a=66 + OR b=487 + OR a=1 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR c=5005 + OR a=47 + OR c=5005 + OR b=319 + OR b=1037 + } +} {1 13 14 15 21 23 29 47 54 66 79 scan 0 sort 0} +do_test where7-2.816.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=176 + OR b=297 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR f='ijklmnopq' + } +} {8 16 25 27 34 60 86 scan 0 sort 0} +do_test where7-2.816.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=176 + OR b=297 + OR (g='tsrqpon' AND f GLOB 'zabcd*') + OR f='ijklmnopq' + } +} {8 16 25 27 34 60 86 scan 0 sort 0} +do_test where7-2.817.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR b=319 + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR a=21 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {9 10 11 21 29 31 33 72 90 92 scan 0 sort 0} +do_test where7-2.817.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=10.0 AND d<11.0 AND d NOT NULL) + OR ((a BETWEEN 90 AND 92) AND a!=91) + OR b=319 + OR ((a BETWEEN 31 AND 33) AND a!=32) + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR a=21 + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {9 10 11 21 29 31 33 72 90 92 scan 0 sort 0} +do_test where7-2.818.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=396 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=1012 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=784 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=979 + OR c<=10 + OR b=913 + OR b=66 + } +} {6 9 22 35 36 60 61 72 83 87 89 92 scan 0 sort 0} +do_test where7-2.818.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=22.0 AND d<23.0 AND d NOT NULL) + OR b=396 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR b=1012 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR b=784 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + OR b=979 + OR c<=10 + OR b=913 + OR b=66 + } +} {6 9 22 35 36 60 61 72 83 87 89 92 scan 0 sort 0} +do_test where7-2.819.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=3 + OR b=803 + } +} {3 73 scan 0 sort 0} +do_test where7-2.819.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=3 + OR b=803 + } +} {3 73 scan 0 sort 0} +do_test where7-2.820.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 79 AND 81) AND a!=80) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + } +} {16 19 23 25 42 45 68 71 79 81 94 97 scan 0 sort 0} +do_test where7-2.820.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 79 AND 81) AND a!=80) + OR (f GLOB '?rstu*' AND f GLOB 'qrst*') + OR ((a BETWEEN 23 AND 25) AND a!=24) + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + } +} {16 19 23 25 42 45 68 71 79 81 94 97 scan 0 sort 0} +do_test where7-2.821.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=1001 + OR a=16 + OR b=132 + OR b=1012 + OR f='xyzabcdef' + OR b=682 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + } +} {1 2 3 12 16 23 49 52 62 75 92 scan 0 sort 0} +do_test where7-2.821.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=1001 + OR a=16 + OR b=132 + OR b=1012 + OR f='xyzabcdef' + OR b=682 + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + } +} {1 2 3 12 16 23 49 52 62 75 92 scan 0 sort 0} +do_test where7-2.822.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=96 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + } +} {19 96 scan 0 sort 0} +do_test where7-2.822.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=96 + OR (d>=19.0 AND d<20.0 AND d NOT NULL) + } +} {19 96 scan 0 sort 0} +do_test where7-2.823.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=2 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR a=23 + OR b=1092 + OR c=19019 + OR b=245 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR b=572 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {2 11 12 22 23 24 38 52 55 56 57 64 68 70 90 97 99 scan 0 sort 0} +do_test where7-2.823.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=2 + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + OR a=23 + OR b=1092 + OR c=19019 + OR b=245 + OR ((a BETWEEN 97 AND 99) AND a!=98) + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR b=572 + OR ((a BETWEEN 22 AND 24) AND a!=23) + } +} {2 11 12 22 23 24 38 52 55 56 57 64 68 70 90 97 99 scan 0 sort 0} +do_test where7-2.824.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=993 + OR c=17017 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=333 + } +} {16 29 37 49 50 51 53 85 scan 0 sort 0} +do_test where7-2.824.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=16.0 AND d<17.0 AND d NOT NULL) + OR b=993 + OR c=17017 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=333 + } +} {16 29 37 49 50 51 53 85 scan 0 sort 0} +do_test where7-2.825.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=330 + OR a=73 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=828 + OR b=363 + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {30 33 37 40 61 73 scan 0 sort 0} +do_test where7-2.825.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=330 + OR a=73 + OR (d>=61.0 AND d<62.0 AND d NOT NULL) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=828 + OR b=363 + OR (g='rqponml' AND f GLOB 'lmnop*') + } +} {30 33 37 40 61 73 scan 0 sort 0} +do_test where7-2.826.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'lmnop*') + OR a=41 + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=825 + } +} {29 41 75 89 scan 0 sort 0} +do_test where7-2.826.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'lmnop*') + OR a=41 + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=825 + } +} {29 41 75 89 scan 0 sort 0} +do_test where7-2.827.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 46 AND 48) AND a!=47) + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=905 + OR b=176 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=561 + OR c=8008 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR b=935 + OR c=1001 + } +} {1 2 3 10 16 22 23 24 46 48 51 84 85 89 91 scan 0 sort 0} +do_test where7-2.827.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 46 AND 48) AND a!=47) + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + OR b=905 + OR b=176 + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR b=561 + OR c=8008 + OR (g='hgfedcb' AND f GLOB 'ghijk*') + OR b=935 + OR c=1001 + } +} {1 2 3 10 16 22 23 24 46 48 51 84 85 89 91 scan 0 sort 0} +do_test where7-2.828.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 75 AND 77) AND a!=76) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {72 75 77 scan 0 sort 0} +do_test where7-2.828.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 75 AND 77) AND a!=76) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {72 75 77 scan 0 sort 0} +do_test where7-2.829.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 5 AND 7) AND a!=6) + OR a=28 + } +} {5 7 28 scan 0 sort 0} +do_test where7-2.829.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 5 AND 7) AND a!=6) + OR a=28 + } +} {5 7 28 scan 0 sort 0} +do_test where7-2.830.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=781 + OR b=410 + } +} {71 scan 0 sort 0} +do_test where7-2.830.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=781 + OR b=410 + } +} {71 scan 0 sort 0} +do_test where7-2.831.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 18 AND 20) AND a!=19) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR f='zabcdefgh' + OR b=861 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR a=28 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=311 + } +} {6 15 18 20 25 28 32 40 42 51 56 58 63 72 77 84 scan 0 sort 0} +do_test where7-2.831.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 18 AND 20) AND a!=19) + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR f='zabcdefgh' + OR b=861 + OR (g='vutsrqp' AND f GLOB 'pqrst*') + OR a=28 + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=311 + } +} {6 15 18 20 25 28 32 40 42 51 56 58 63 72 77 84 scan 0 sort 0} +do_test where7-2.832.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=575 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=418 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR b=792 + OR b=861 + OR b=220 + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {12 15 20 38 41 64 67 72 73 89 90 91 93 scan 0 sort 0} +do_test where7-2.832.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=575 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR b=418 + OR (f GLOB '?qrst*' AND f GLOB 'pqrs*') + OR b=792 + OR b=861 + OR b=220 + OR ((a BETWEEN 89 AND 91) AND a!=90) + } +} {12 15 20 38 41 64 67 72 73 89 90 91 93 scan 0 sort 0} +do_test where7-2.833.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=517 + OR b=913 + OR b=253 + OR b=198 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=17 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {17 18 23 27 47 83 85 scan 0 sort 0} +do_test where7-2.833.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=517 + OR b=913 + OR b=253 + OR b=198 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR a=17 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + } +} {17 18 23 27 47 83 85 scan 0 sort 0} +do_test where7-2.834.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'qrstu*') + OR b=693 + OR a=73 + OR b=627 + OR c=5005 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=267 + OR b=872 + OR a=27 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {13 14 15 27 28 42 57 62 63 73 88 scan 0 sort 0} +do_test where7-2.834.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'qrstu*') + OR b=693 + OR a=73 + OR b=627 + OR c=5005 + OR (d>=62.0 AND d<63.0 AND d NOT NULL) + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR b=267 + OR b=872 + OR a=27 + OR (g='gfedcba' AND f GLOB 'klmno*') + } +} {13 14 15 27 28 42 57 62 63 73 88 scan 0 sort 0} +do_test where7-2.835.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=10 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=1059 + OR a=70 + OR a=93 + } +} {10 13 15 70 93 95 97 scan 0 sort 0} +do_test where7-2.835.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=10 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=1059 + OR a=70 + OR a=93 + } +} {10 13 15 70 93 95 97 scan 0 sort 0} +do_test where7-2.836.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=30 + OR a=32 + OR b=1037 + OR b=198 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR a=25 + } +} {13 18 25 30 32 scan 0 sort 0} +do_test where7-2.836.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=30 + OR a=32 + OR b=1037 + OR b=198 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR a=25 + } +} {13 18 25 30 32 scan 0 sort 0} +do_test where7-2.837.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=66 + OR b=322 + OR b=465 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (g='lkjihgf' AND f GLOB 'mnopq*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=454 + } +} {6 7 38 46 64 77 79 89 scan 0 sort 0} +do_test where7-2.837.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ponmlkj' AND f GLOB 'uvwxy*') + OR b=66 + OR b=322 + OR b=465 + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR (d>=7.0 AND d<8.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + OR (g='lkjihgf' AND f GLOB 'mnopq*') + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=454 + } +} {6 7 38 46 64 77 79 89 scan 0 sort 0} +do_test where7-2.838.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=924 + OR ((a BETWEEN 35 AND 37) AND a!=36) + OR c=15015 + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR b=803 + } +} {3 5 35 37 43 44 45 52 73 84 scan 0 sort 0} +do_test where7-2.838.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=924 + OR ((a BETWEEN 35 AND 37) AND a!=36) + OR c=15015 + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR b=803 + } +} {3 5 35 37 43 44 45 52 73 84 scan 0 sort 0} +do_test where7-2.839.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1100 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR a=75 + OR a=45 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR a=27 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=850 + OR ((a BETWEEN 55 AND 57) AND a!=56) + } +} {12 27 45 55 57 68 70 72 74 75 77 90 100 scan 0 sort 0} +do_test where7-2.839.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1100 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR ((a BETWEEN 72 AND 74) AND a!=73) + OR ((a BETWEEN 68 AND 70) AND a!=69) + OR a=75 + OR a=45 + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR a=27 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR b=850 + OR ((a BETWEEN 55 AND 57) AND a!=56) + } +} {12 27 45 55 57 68 70 72 74 75 77 90 100 scan 0 sort 0} +do_test where7-2.840.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=751 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=89 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {36 38 56 71 89 96 98 scan 0 sort 0} +do_test where7-2.840.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=751 + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR (d>=71.0 AND d<72.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR (d>=56.0 AND d<57.0 AND d NOT NULL) + OR a=89 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {36 38 56 71 89 96 98 scan 0 sort 0} +do_test where7-2.841.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR a=1 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + } +} {1 9 19 scan 0 sort 0} +do_test where7-2.841.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='wvutsrq' AND f GLOB 'jklmn*') + OR (g='yxwvuts' AND f GLOB 'bcdef*') + OR a=1 + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + } +} {1 9 19 scan 0 sort 0} +do_test where7-2.842.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=927 + OR c=15015 + OR f='klmnopqrs' + OR c=8008 + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR b=960 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR b=443 + OR (g='rqponml' AND f GLOB 'ijklm*') + } +} {10 22 23 24 34 36 41 43 44 45 62 76 88 scan 0 sort 0} +do_test where7-2.842.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=927 + OR c=15015 + OR f='klmnopqrs' + OR c=8008 + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR b=960 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR b=443 + OR (g='rqponml' AND f GLOB 'ijklm*') + } +} {10 22 23 24 34 36 41 43 44 45 62 76 88 scan 0 sort 0} +do_test where7-2.843.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=212 + OR f='cdefghijk' + } +} {2 28 37 54 80 scan 0 sort 0} +do_test where7-2.843.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=37.0 AND d<38.0 AND d NOT NULL) + OR b=212 + OR f='cdefghijk' + } +} {2 28 37 54 80 scan 0 sort 0} +do_test where7-2.844.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=685 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR b=520 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR a=53 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=938 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR c=25025 + } +} {43 53 63 73 74 75 76 91 93 scan 0 sort 0} +do_test where7-2.844.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=685 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR b=520 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR a=53 + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=938 + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR c=25025 + } +} {43 53 63 73 74 75 76 91 93 scan 0 sort 0} +do_test where7-2.845.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=583 + OR b=894 + OR c=26026 + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + } +} {53 76 77 78 84 scan 0 sort 0} +do_test where7-2.845.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=583 + OR b=894 + OR c=26026 + OR (d>=84.0 AND d<85.0 AND d NOT NULL) + } +} {53 76 77 78 84 scan 0 sort 0} +do_test where7-2.846.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='abcdefghi' + OR (g='edcbazy' AND f GLOB 'wxyza*') + } +} {26 52 78 100 scan 0 sort 0} +do_test where7-2.846.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='abcdefghi' + OR (g='edcbazy' AND f GLOB 'wxyza*') + } +} {26 52 78 100 scan 0 sort 0} +do_test where7-2.847.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1103 + OR b=638 + OR b=792 + OR b=1034 + OR b=308 + OR f='nopqrstuv' + OR b=264 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + } +} {13 24 28 33 35 39 58 65 72 91 94 scan 0 sort 0} +do_test where7-2.847.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1103 + OR b=638 + OR b=792 + OR b=1034 + OR b=308 + OR f='nopqrstuv' + OR b=264 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR (d>=58.0 AND d<59.0 AND d NOT NULL) + } +} {13 24 28 33 35 39 58 65 72 91 94 scan 0 sort 0} +do_test where7-2.848.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='defghijkl' + OR b=814 + OR f='yzabcdefg' + } +} {3 24 29 50 55 74 76 81 scan 0 sort 0} +do_test where7-2.848.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='defghijkl' + OR b=814 + OR f='yzabcdefg' + } +} {3 24 29 50 55 74 76 81 scan 0 sort 0} +do_test where7-2.849.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=209 + OR b=806 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {8 17 19 scan 0 sort 0} +do_test where7-2.849.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=209 + OR b=806 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'rstuv*') + } +} {8 17 19 scan 0 sort 0} +do_test where7-2.850.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='opqrstuvw' + OR b=69 + OR b=366 + } +} {14 40 66 92 scan 0 sort 0} +do_test where7-2.850.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='opqrstuvw' + OR b=69 + OR b=366 + } +} {14 40 66 92 scan 0 sort 0} +do_test where7-2.851.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=45 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR a=69 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {4 45 69 71 72 scan 0 sort 0} +do_test where7-2.851.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=45 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR a=69 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {4 45 69 71 72 scan 0 sort 0} +do_test where7-2.852.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=9009 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + } +} {9 10 25 26 27 67 85 scan 0 sort 0} +do_test where7-2.852.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=9009 + OR (d>=85.0 AND d<86.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR (d>=9.0 AND d<10.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'pqrst*') + } +} {9 10 25 26 27 67 85 scan 0 sort 0} +do_test where7-2.853.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=98 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=47 + OR c=24024 + OR a=27 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + } +} {20 27 45 47 63 70 71 72 98 100 scan 0 sort 0} +do_test where7-2.853.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=98 + OR (d>=100.0 AND d<101.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + OR a=47 + OR c=24024 + OR a=27 + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + } +} {20 27 45 47 63 70 71 72 98 100 scan 0 sort 0} +do_test where7-2.854.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=19 + } +} {19 22 44 scan 0 sort 0} +do_test where7-2.854.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=19 + } +} {19 22 44 scan 0 sort 0} +do_test where7-2.855.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=12012 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + } +} {16 18 34 35 36 80 98 scan 0 sort 0} +do_test where7-2.855.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=12012 + OR (d>=80.0 AND d<81.0 AND d NOT NULL) + OR ((a BETWEEN 16 AND 18) AND a!=17) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + } +} {16 18 34 35 36 80 98 scan 0 sort 0} +do_test where7-2.856.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 38 AND 40) AND a!=39) + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR b=429 + OR f='jklmnopqr' + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {9 12 35 38 39 40 48 61 64 77 79 87 90 scan 0 sort 0} +do_test where7-2.856.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 38 AND 40) AND a!=39) + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR b=429 + OR f='jklmnopqr' + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR ((a BETWEEN 77 AND 79) AND a!=78) + } +} {9 12 35 38 39 40 48 61 64 77 79 87 90 scan 0 sort 0} +do_test where7-2.857.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=190 + } +} {64 scan 0 sort 0} +do_test where7-2.857.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=190 + } +} {64 scan 0 sort 0} +do_test where7-2.858.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'yzabc*') + OR b=674 + OR b=289 + } +} {76 scan 0 sort 0} +do_test where7-2.858.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'yzabc*') + OR b=674 + OR b=289 + } +} {76 scan 0 sort 0} +do_test where7-2.859.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=17 + OR b=539 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + } +} {17 21 47 49 scan 0 sort 0} +do_test where7-2.859.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=17 + OR b=539 + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (g='utsrqpo' AND f GLOB 'vwxyz*') + } +} {17 21 47 49 scan 0 sort 0} +do_test where7-2.860.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=924 + OR c=27027 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {65 67 79 80 81 84 scan 0 sort 0} +do_test where7-2.860.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=924 + OR c=27027 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {65 67 79 80 81 84 scan 0 sort 0} +do_test where7-2.861.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=564 + OR f='mnopqrstu' + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=1103 + } +} {12 28 30 38 64 90 scan 0 sort 0} +do_test where7-2.861.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=564 + OR f='mnopqrstu' + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=1103 + } +} {12 28 30 38 64 90 scan 0 sort 0} +do_test where7-2.862.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=231 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR a=38 + OR a=4 + OR b=784 + } +} {4 21 24 38 scan 0 sort 0} +do_test where7-2.862.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=231 + OR (d>=24.0 AND d<25.0 AND d NOT NULL) + OR a=38 + OR a=4 + OR b=784 + } +} {4 21 24 38 scan 0 sort 0} +do_test where7-2.863.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='ghijklmno' + OR a=26 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=81 + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=275 + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=311 + OR b=894 + OR b=872 + } +} {3 6 25 26 28 30 32 58 68 81 84 87 scan 0 sort 0} +do_test where7-2.863.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='ghijklmno' + OR a=26 + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR a=81 + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR ((a BETWEEN 28 AND 30) AND a!=29) + OR b=275 + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR b=311 + OR b=894 + OR b=872 + } +} {3 6 25 26 28 30 32 58 68 81 84 87 scan 0 sort 0} +do_test where7-2.864.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=91 + OR b=619 + } +} {91 scan 0 sort 0} +do_test where7-2.864.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=91 + OR b=619 + } +} {91 scan 0 sort 0} +do_test where7-2.865.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=85 + OR f IS NULL + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=154 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {4 14 25 40 42 49 51 66 68 85 87 89 scan 0 sort 0} +do_test where7-2.865.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=4.0 AND d<5.0 AND d NOT NULL) + OR a=85 + OR f IS NULL + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=154 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + } +} {4 14 25 40 42 49 51 66 68 85 87 89 scan 0 sort 0} +do_test where7-2.866.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=44 + OR b=55 + OR a=30 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR 1000000=75.0 AND d<76.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {5 19 24 30 33 44 45 71 75 97 99 scan 0 sort 0} +do_test where7-2.866.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=44 + OR b=55 + OR a=30 + OR (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR 1000000=75.0 AND d<76.0 AND d NOT NULL) + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {5 19 24 30 33 44 45 71 75 97 99 scan 0 sort 0} +do_test where7-2.867.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=16.0 AND d<17.0 AND d NOT NULL) + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=36 + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {16 33 35 80 82 scan 0 sort 0} +do_test where7-2.867.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=16.0 AND d<17.0 AND d NOT NULL) + OR ((a BETWEEN 80 AND 82) AND a!=81) + OR b=36 + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {16 33 35 80 82 scan 0 sort 0} +do_test where7-2.868.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR c=26026 + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=56 + OR b=506 + OR b=781 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + } +} {28 35 37 39 46 56 71 76 77 78 97 scan 0 sort 0} +do_test where7-2.868.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR c=26026 + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=56 + OR b=506 + OR b=781 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + } +} {28 35 37 39 46 56 71 76 77 78 97 scan 0 sort 0} +do_test where7-2.869.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='edcbazy' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR f='xyzabcdef' + OR b=517 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (g='kjihgfe' AND f GLOB 'qrstu*') + } +} {23 25 27 39 47 49 68 75 76 89 91 98 scan 0 sort 0} +do_test where7-2.869.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='edcbazy' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 25 AND 27) AND a!=26) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR ((a BETWEEN 89 AND 91) AND a!=90) + OR f='xyzabcdef' + OR b=517 + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR (g='kjihgfe' AND f GLOB 'qrstu*') + } +} {23 25 27 39 47 49 68 75 76 89 91 98 scan 0 sort 0} +do_test where7-2.870.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=54 + OR a=59 + } +} {54 59 scan 0 sort 0} +do_test where7-2.870.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=54 + OR a=59 + } +} {54 59 scan 0 sort 0} +do_test where7-2.871.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='tsrqpon' AND f GLOB 'yzabc*') + OR b=762 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR a=25 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {24 25 48 65 67 scan 0 sort 0} +do_test where7-2.871.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='tsrqpon' AND f GLOB 'yzabc*') + OR b=762 + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR a=25 + OR ((a BETWEEN 65 AND 67) AND a!=66) + } +} {24 25 48 65 67 scan 0 sort 0} +do_test where7-2.872.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=59.0 AND d<60.0 AND d NOT NULL) + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=839 + OR f='defghijkl' + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=498 + } +} {3 14 16 29 52 55 59 60 81 85 95 scan 0 sort 0} +do_test where7-2.872.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=59.0 AND d<60.0 AND d NOT NULL) + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=839 + OR f='defghijkl' + OR (d>=95.0 AND d<96.0 AND d NOT NULL) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (d>=52.0 AND d<53.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR b=498 + } +} {3 14 16 29 52 55 59 60 81 85 95 scan 0 sort 0} +do_test where7-2.873.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=256 + OR c=19019 + OR a=54 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=498 + OR b=77 + } +} {7 46 54 55 56 57 scan 0 sort 0} +do_test where7-2.873.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=256 + OR c=19019 + OR a=54 + OR (d>=46.0 AND d<47.0 AND d NOT NULL) + OR b=498 + OR b=77 + } +} {7 46 54 55 56 57 scan 0 sort 0} +do_test where7-2.874.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=256 + OR b=586 + OR a=74 + OR b=113 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=495 + } +} {45 61 74 99 scan 0 sort 0} +do_test where7-2.874.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='mlkjihg' AND f GLOB 'jklmn*') + OR b=256 + OR b=586 + OR a=74 + OR b=113 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=495 + } +} {45 61 74 99 scan 0 sort 0} +do_test where7-2.875.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=12 + OR a=50 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR ((a BETWEEN 66 AND 68) AND a!=67) + } +} {12 33 50 66 68 scan 0 sort 0} +do_test where7-2.875.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=12 + OR a=50 + OR (d>=33.0 AND d<34.0 AND d NOT NULL) + OR ((a BETWEEN 66 AND 68) AND a!=67) + } +} {12 33 50 66 68 scan 0 sort 0} +do_test where7-2.876.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=308 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR a=83 + OR c=23023 + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR a=58 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (g='srqponm' AND f GLOB 'efghi*') + OR c=4004 + } +} {10 11 12 17 19 28 30 53 57 58 65 67 68 69 73 83 scan 0 sort 0} +do_test where7-2.876.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=308 + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR a=83 + OR c=23023 + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR (g='lkjihgf' AND f GLOB 'nopqr*') + OR a=58 + OR ((a BETWEEN 17 AND 19) AND a!=18) + OR (g='srqponm' AND f GLOB 'efghi*') + OR c=4004 + } +} {10 11 12 17 19 28 30 53 57 58 65 67 68 69 73 83 scan 0 sort 0} +do_test where7-2.877.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=476 + OR a=26 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=762 + OR b=157 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'cdefg*') + } +} {17 26 54 87 scan 0 sort 0} +do_test where7-2.877.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=476 + OR a=26 + OR (d>=87.0 AND d<88.0 AND d NOT NULL) + OR b=762 + OR b=157 + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (g='nmlkjih' AND f GLOB 'cdefg*') + } +} {17 26 54 87 scan 0 sort 0} +do_test where7-2.878.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=77.0 AND d<78.0 AND d NOT NULL) + OR a=1 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=278 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR f='qrstuvwxy' + OR (g='onmlkji' AND f GLOB 'abcde*') + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {1 3 16 42 52 68 72 74 77 82 84 93 94 95 98 scan 0 sort 0} +do_test where7-2.878.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=77.0 AND d<78.0 AND d NOT NULL) + OR a=1 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=278 + OR (g='xwvutsr' AND f GLOB 'defgh*') + OR f='qrstuvwxy' + OR (g='onmlkji' AND f GLOB 'abcde*') + OR ((a BETWEEN 82 AND 84) AND a!=83) + OR (g='edcbazy' AND f GLOB 'uvwxy*') + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + } +} {1 3 16 42 52 68 72 74 77 82 84 93 94 95 98 scan 0 sort 0} +do_test where7-2.879.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=124 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=759 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR ((a BETWEEN 45 AND 47) AND a!=46) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {12 38 41 43 45 47 64 69 72 90 92 96 scan 0 sort 0} +do_test where7-2.879.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=124 + OR (d>=92.0 AND d<93.0 AND d NOT NULL) + OR ((a BETWEEN 41 AND 43) AND a!=42) + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=759 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR ((a BETWEEN 45 AND 47) AND a!=46) + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + } +} {12 38 41 43 45 47 64 69 72 90 92 96 scan 0 sort 0} +do_test where7-2.880.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=872 + OR b=267 + OR b=814 + OR b=99 + OR c<=10 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR b=44 + OR f='zabcdefgh' + OR b=979 + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {4 8 9 10 25 33 51 74 77 89 scan 0 sort 0} +do_test where7-2.880.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=872 + OR b=267 + OR b=814 + OR b=99 + OR c<=10 + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR b=44 + OR f='zabcdefgh' + OR b=979 + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {4 8 9 10 25 33 51 74 77 89 scan 0 sort 0} +do_test where7-2.881.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR f='xyzabcdef' + } +} {23 26 49 75 scan 0 sort 0} +do_test where7-2.881.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=26.0 AND d<27.0 AND d NOT NULL) + OR f='xyzabcdef' + } +} {23 26 49 75 scan 0 sort 0} +do_test where7-2.882.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=487 + OR b=355 + OR c=9009 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=113 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=90 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR f='nopqrstuv' + } +} {8 13 24 25 26 27 32 34 39 65 66 87 90 91 scan 0 sort 0} +do_test where7-2.882.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=487 + OR b=355 + OR c=9009 + OR (d>=8.0 AND d<9.0 AND d NOT NULL) + OR ((a BETWEEN 32 AND 34) AND a!=33) + OR b=113 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=90 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR (g='hgfedcb' AND f GLOB 'jklmn*') + OR f='nopqrstuv' + } +} {8 13 24 25 26 27 32 34 39 65 66 87 90 91 scan 0 sort 0} +do_test where7-2.883.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=275 + } +} {25 34 scan 0 sort 0} +do_test where7-2.883.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=34.0 AND d<35.0 AND d NOT NULL) + OR b=275 + } +} {25 34 scan 0 sort 0} +do_test where7-2.884.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=355 + OR a=44 + OR b=374 + OR c=25025 + OR b=198 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR d<0.0 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR c=9009 + } +} {4 6 18 25 26 27 34 41 44 69 71 73 74 75 scan 0 sort 0} +do_test where7-2.884.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=355 + OR a=44 + OR b=374 + OR c=25025 + OR b=198 + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR d<0.0 + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR c=9009 + } +} {4 6 18 25 26 27 34 41 44 69 71 73 74 75 scan 0 sort 0} +do_test where7-2.885.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=814 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {19 45 54 71 74 97 scan 0 sort 0} +do_test where7-2.885.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?uvwx*' AND f GLOB 'tuvw*') + OR b=814 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + } +} {19 45 54 71 74 97 scan 0 sort 0} +do_test where7-2.886.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=333 + OR b=275 + } +} {25 64 scan 0 sort 0} +do_test where7-2.886.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'mnopq*') + OR b=333 + OR b=275 + } +} {25 64 scan 0 sort 0} +do_test where7-2.887.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {33 35 82 scan 0 sort 0} +do_test where7-2.887.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='ihgfedc' AND f GLOB 'efghi*') + OR ((a BETWEEN 33 AND 35) AND a!=34) + } +} {33 35 82 scan 0 sort 0} +do_test where7-2.888.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 11 AND 13) AND a!=12) + OR b=253 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=286 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + } +} {10 11 13 14 23 26 40 66 92 scan 0 sort 0} +do_test where7-2.888.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 11 AND 13) AND a!=12) + OR b=253 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=286 + OR (d>=10.0 AND d<11.0 AND d NOT NULL) + } +} {10 11 13 14 23 26 40 66 92 scan 0 sort 0} +do_test where7-2.889.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR b=421 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR f='ijklmnopq' + OR b=891 + OR b=1056 + } +} {5 8 15 26 28 34 60 81 86 90 96 scan 0 sort 0} +do_test where7-2.889.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR ((a BETWEEN 26 AND 28) AND a!=27) + OR b=421 + OR (g='xwvutsr' AND f GLOB 'fghij*') + OR f='ijklmnopq' + OR b=891 + OR b=1056 + } +} {5 8 15 26 28 34 60 81 86 90 96 scan 0 sort 0} +do_test where7-2.890.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='fghijklmn' + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR b=671 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + } +} {5 7 31 39 57 61 83 99 scan 0 sort 0} +do_test where7-2.890.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='fghijklmn' + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR (g='edcbazy' AND f GLOB 'vwxyz*') + OR b=671 + OR (g='xwvutsr' AND f GLOB 'hijkl*') + } +} {5 7 31 39 57 61 83 99 scan 0 sort 0} +do_test where7-2.891.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='lkjihgf' AND f GLOB 'lmnop*') + OR (g='srqponm' AND f GLOB 'fghij*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {4 6 11 31 63 68 scan 0 sort 0} +do_test where7-2.891.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='lkjihgf' AND f GLOB 'lmnop*') + OR (g='srqponm' AND f GLOB 'fghij*') + OR ((a BETWEEN 4 AND 6) AND a!=5) + OR (g='kjihgfe' AND f GLOB 'qrstu*') + OR (d>=11.0 AND d<12.0 AND d NOT NULL) + } +} {4 6 11 31 63 68 scan 0 sort 0} +do_test where7-2.892.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=11011 + OR a=20 + OR b=432 + OR b=410 + OR a=86 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=638 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR b=190 + } +} {20 31 32 33 58 60 86 89 scan 0 sort 0} +do_test where7-2.892.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=11011 + OR a=20 + OR b=432 + OR b=410 + OR a=86 + OR (d>=89.0 AND d<90.0 AND d NOT NULL) + OR b=638 + OR ((a BETWEEN 58 AND 60) AND a!=59) + OR b=190 + } +} {20 31 32 33 58 60 86 89 scan 0 sort 0} +do_test where7-2.893.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=88 + OR ((a BETWEEN 42 AND 44) AND a!=43) + OR a=76 + OR b=69 + OR b=847 + OR b=275 + } +} {8 25 42 44 76 77 scan 0 sort 0} +do_test where7-2.893.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=88 + OR ((a BETWEEN 42 AND 44) AND a!=43) + OR a=76 + OR b=69 + OR b=847 + OR b=275 + } +} {8 25 42 44 76 77 scan 0 sort 0} +do_test where7-2.894.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=781 + OR b=77 + OR b=58 + OR ((a BETWEEN 67 AND 69) AND a!=68) + } +} {7 67 69 71 scan 0 sort 0} +do_test where7-2.894.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=781 + OR b=77 + OR b=58 + OR ((a BETWEEN 67 AND 69) AND a!=68) + } +} {7 67 69 71 scan 0 sort 0} +do_test where7-2.895.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR a=46 + OR b=187 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='onmlkji' AND f GLOB 'yzabc*') + } +} {17 20 46 50 67 69 71 scan 0 sort 0} +do_test where7-2.895.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 67 AND 69) AND a!=68) + OR (d>=69.0 AND d<70.0 AND d NOT NULL) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR a=46 + OR b=187 + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR (g='onmlkji' AND f GLOB 'yzabc*') + } +} {17 20 46 50 67 69 71 scan 0 sort 0} +do_test where7-2.896.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=99 + OR c=3003 + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=300 + OR b=718 + OR c>=34035 + OR b=264 + } +} {7 8 9 24 57 97 99 scan 0 sort 0} +do_test where7-2.896.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=99 + OR c=3003 + OR (d>=57.0 AND d<58.0 AND d NOT NULL) + OR b=300 + OR b=718 + OR c>=34035 + OR b=264 + } +} {7 8 9 24 57 97 99 scan 0 sort 0} +do_test where7-2.897.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=872 + OR b=209 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR b=355 + OR b=729 + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR a=58 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=608 + } +} {14 19 40 58 65 66 67 81 83 92 scan 0 sort 0} +do_test where7-2.897.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=872 + OR b=209 + OR ((a BETWEEN 65 AND 67) AND a!=66) + OR b=355 + OR b=729 + OR ((a BETWEEN 81 AND 83) AND a!=82) + OR a=58 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR b=608 + } +} {14 19 40 58 65 66 67 81 83 92 scan 0 sort 0} +do_test where7-2.898.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=29029 + OR f='efghijklm' + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR a=26 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + } +} {3 4 26 29 30 48 55 56 81 82 85 86 87 scan 0 sort 0} +do_test where7-2.898.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=29029 + OR f='efghijklm' + OR (d>=48.0 AND d<49.0 AND d NOT NULL) + OR a=26 + OR (f GLOB '?efgh*' AND f GLOB 'defg*') + } +} {3 4 26 29 30 48 55 56 81 82 85 86 87 scan 0 sort 0} +do_test where7-2.899.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=59 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR a=7 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=762 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {7 12 14 26 40 59 66 92 scan 0 sort 0} +do_test where7-2.899.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=59 + OR (g='wvutsrq' AND f GLOB 'mnopq*') + OR a=7 + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=762 + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + } +} {7 12 14 26 40 59 66 92 scan 0 sort 0} +do_test where7-2.900.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR b=539 + OR b=399 + } +} {49 91 scan 0 sort 0} +do_test where7-2.900.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'nopqr*') + OR b=539 + OR b=399 + } +} {49 91 scan 0 sort 0} +do_test where7-2.901.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR a=92 + } +} {71 73 92 96 98 scan 0 sort 0} +do_test where7-2.901.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 71 AND 73) AND a!=72) + OR ((a BETWEEN 96 AND 98) AND a!=97) + OR a=92 + } +} {71 73 92 96 98 scan 0 sort 0} +do_test where7-2.902.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR f='lmnopqrst' + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {9 11 35 36 37 57 61 63 87 89 scan 0 sort 0} +do_test where7-2.902.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR (g='rqponml' AND f GLOB 'klmno*') + OR f='lmnopqrst' + OR (g='nmlkjih' AND f GLOB 'fghij*') + } +} {9 11 35 36 37 57 61 63 87 89 scan 0 sort 0} +do_test where7-2.903.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 69 AND 71) AND a!=70) + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=652 + } +} {69 71 91 93 scan 0 sort 0} +do_test where7-2.903.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 69 AND 71) AND a!=70) + OR ((a BETWEEN 91 AND 93) AND a!=92) + OR b=652 + } +} {69 71 91 93 scan 0 sort 0} +do_test where7-2.904.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1067 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=520 + OR b=399 + OR b=209 + OR a=68 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + } +} {18 19 53 54 55 68 73 94 97 scan 0 sort 0} +do_test where7-2.904.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1067 + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR (g='jihgfed' AND f GLOB 'vwxyz*') + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=520 + OR b=399 + OR b=209 + OR a=68 + OR (g='fedcbaz' AND f GLOB 'qrstu*') + } +} {18 19 53 54 55 68 73 94 97 scan 0 sort 0} +do_test where7-2.905.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR a=57 + OR b=55 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR ((a BETWEEN 21 AND 23) AND a!=22) + } +} {2 5 20 21 22 23 34 37 57 79 scan 0 sort 0} +do_test where7-2.905.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR a=57 + OR b=55 + OR (d>=34.0 AND d<35.0 AND d NOT NULL) + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR (g='rqponml' AND f GLOB 'lmnop*') + OR ((a BETWEEN 0 AND 2) AND a!=1) + OR ((a BETWEEN 21 AND 23) AND a!=22) + } +} {2 5 20 21 22 23 34 37 57 79 scan 0 sort 0} +do_test where7-2.906.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR a=2 + OR b=784 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=850 + } +} {2 21 23 81 scan 0 sort 0} +do_test where7-2.906.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 21 AND 23) AND a!=22) + OR a=2 + OR b=784 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='ihgfedc' AND f GLOB 'defgh*') + OR b=850 + } +} {2 21 23 81 scan 0 sort 0} +do_test where7-2.907.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=748 + OR b=209 + OR a=100 + } +} {19 45 51 68 100 scan 0 sort 0} +do_test where7-2.907.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=51.0 AND d<52.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=748 + OR b=209 + OR a=100 + } +} {19 45 51 68 100 scan 0 sort 0} +do_test where7-2.908.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR a=18 + OR a=30 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR b=792 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR c=26026 + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {8 9 10 11 18 19 21 30 33 37 63 72 76 77 78 84 86 89 scan 0 sort 0} +do_test where7-2.908.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'zabcd*') + OR a=18 + OR a=30 + OR ((a BETWEEN 9 AND 11) AND a!=10) + OR ((a BETWEEN 84 AND 86) AND a!=85) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR b=792 + OR (f GLOB '?mnop*' AND f GLOB 'lmno*') + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR c=26026 + OR (g='rqponml' AND f GLOB 'hijkl*') + } +} {8 9 10 11 18 19 21 30 33 37 63 72 76 77 78 84 86 89 scan 0 sort 0} +do_test where7-2.909.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=968 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR a=78 + OR ((a BETWEEN 90 AND 92) AND a!=91) + } +} {16 22 48 63 65 72 74 78 88 90 92 100 scan 0 sort 0} +do_test where7-2.909.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=968 + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR (d>=72.0 AND d<73.0 AND d NOT NULL) + OR a=78 + OR ((a BETWEEN 90 AND 92) AND a!=91) + } +} {16 22 48 63 65 72 74 78 88 90 92 100 scan 0 sort 0} +do_test where7-2.910.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=10010 + OR f='pqrstuvwx' + } +} {15 28 29 30 41 67 93 scan 0 sort 0} +do_test where7-2.910.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=10010 + OR f='pqrstuvwx' + } +} {15 28 29 30 41 67 93 scan 0 sort 0} +do_test where7-2.911.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=86 + OR a=10 + OR b=528 + OR b=253 + OR a=80 + OR a=87 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + } +} {10 23 37 48 80 86 87 scan 0 sort 0} +do_test where7-2.911.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=86 + OR a=10 + OR b=528 + OR b=253 + OR a=80 + OR a=87 + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + } +} {10 23 37 48 80 86 87 scan 0 sort 0} +do_test where7-2.912.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=825 + OR a=100 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR a=60 + } +} {42 60 75 77 100 scan 0 sort 0} +do_test where7-2.912.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR b=825 + OR a=100 + OR (d>=77.0 AND d<78.0 AND d NOT NULL) + OR a=60 + } +} {42 60 75 77 100 scan 0 sort 0} +do_test where7-2.913.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=883 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR a=81 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR f='mnopqrstu' + } +} {3 4 12 30 35 38 45 56 64 78 81 82 90 94 scan 0 sort 0} +do_test where7-2.913.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=94.0 AND d<95.0 AND d NOT NULL) + OR b=883 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR (d>=3.0 AND d<4.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR a=81 + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR f='mnopqrstu' + } +} {3 4 12 30 35 38 45 56 64 78 81 82 90 94 scan 0 sort 0} +do_test where7-2.914.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=443 + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=663 + OR b=905 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=883 + OR c=22022 + OR b=638 + } +} {14 16 58 64 65 66 96 scan 0 sort 0} +do_test where7-2.914.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=443 + OR ((a BETWEEN 14 AND 16) AND a!=15) + OR b=663 + OR b=905 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR b=883 + OR c=22022 + OR b=638 + } +} {14 16 58 64 65 66 96 scan 0 sort 0} +do_test where7-2.915.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=234 + OR a=53 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR b=319 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 20 22 27 29 40 53 84 86 scan 0 sort 0} +do_test where7-2.915.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 84 AND 86) AND a!=85) + OR b=234 + OR a=53 + OR ((a BETWEEN 20 AND 22) AND a!=21) + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR b=319 + OR (g='qponmlk' AND f GLOB 'opqrs*') + OR ((a BETWEEN 14 AND 16) AND a!=15) + } +} {14 16 20 22 27 29 40 53 84 86 scan 0 sort 0} +do_test where7-2.916.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=179 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR a=46 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR a=25 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR f='opqrstuvw' + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=938 + } +} {5 13 14 25 40 46 53 55 66 72 92 95 97 scan 0 sort 0} +do_test where7-2.916.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=179 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR a=46 + OR (g='kjihgfe' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 53 AND 55) AND a!=54) + OR a=25 + OR (d>=5.0 AND d<6.0 AND d NOT NULL) + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR f='opqrstuvw' + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=938 + } +} {5 13 14 25 40 46 53 55 66 72 92 95 97 scan 0 sort 0} +do_test where7-2.917.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + } +} {15 57 scan 0 sort 0} +do_test where7-2.917.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + } +} {15 57 scan 0 sort 0} +do_test where7-2.918.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=748 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=32 + OR b=110 + OR b=297 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR f='ghijklmno' + OR b=473 + OR b=135 + } +} {6 10 13 22 27 32 43 58 60 62 68 84 scan 0 sort 0} +do_test where7-2.918.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=748 + OR (g='utsrqpo' AND f GLOB 'wxyza*') + OR a=32 + OR b=110 + OR b=297 + OR (d>=13.0 AND d<14.0 AND d NOT NULL) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR f='ghijklmno' + OR b=473 + OR b=135 + } +} {6 10 13 22 27 32 43 58 60 62 68 84 scan 0 sort 0} +do_test where7-2.919.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=905 + OR a=97 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR c=27027 + OR f='bcdefghij' + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + } +} {1 4 25 27 30 33 38 40 53 54 56 79 80 81 82 85 97 scan 0 sort 0} +do_test where7-2.919.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=33.0 AND d<34.0 AND d NOT NULL) + OR b=905 + OR a=97 + OR (g='hgfedcb' AND f GLOB 'hijkl*') + OR c=27027 + OR f='bcdefghij' + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR (d>=25.0 AND d<26.0 AND d NOT NULL) + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + } +} {1 4 25 27 30 33 38 40 53 54 56 79 80 81 82 85 97 scan 0 sort 0} +do_test where7-2.920.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 64 AND 66) AND a!=65) + OR ((a BETWEEN 90 AND 92) AND a!=91) + } +} {64 66 90 92 scan 0 sort 0} +do_test where7-2.920.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 64 AND 66) AND a!=65) + OR ((a BETWEEN 90 AND 92) AND a!=91) + } +} {64 66 90 92 scan 0 sort 0} +do_test where7-2.921.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=594 + OR b=80 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=421 + OR b=418 + OR b=828 + OR a=88 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {23 27 38 54 60 88 scan 0 sort 0} +do_test where7-2.921.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=594 + OR b=80 + OR (g='tsrqpon' AND f GLOB 'bcdef*') + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=421 + OR b=418 + OR b=828 + OR a=88 + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {23 27 38 54 60 88 scan 0 sort 0} +do_test where7-2.922.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR b=366 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR c=16016 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR c=9009 + } +} {17 25 26 27 28 46 47 48 75 100 scan 0 sort 0} +do_test where7-2.922.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'xyzab*') + OR b=366 + OR (d>=28.0 AND d<29.0 AND d NOT NULL) + OR c=16016 + OR (g='edcbazy' AND f GLOB 'wxyza*') + OR c=9009 + } +} {17 25 26 27 28 46 47 48 75 100 scan 0 sort 0} +do_test where7-2.923.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=33 + OR f='qrstuvwxy' + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR b=858 + } +} {3 16 20 42 68 78 94 scan 0 sort 0} +do_test where7-2.923.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=33 + OR f='qrstuvwxy' + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR b=858 + } +} {3 16 20 42 68 78 94 scan 0 sort 0} +do_test where7-2.924.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=861 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=682 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=286 + } +} {22 26 29 48 62 74 93 95 100 scan 0 sort 0} +do_test where7-2.924.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=861 + OR (f GLOB '?xyza*' AND f GLOB 'wxyz*') + OR (d>=29.0 AND d<30.0 AND d NOT NULL) + OR b=682 + OR ((a BETWEEN 93 AND 95) AND a!=94) + OR b=286 + } +} {22 26 29 48 62 74 93 95 100 scan 0 sort 0} +do_test where7-2.925.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=740 + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR a=88 + } +} {27 29 88 scan 0 sort 0} +do_test where7-2.925.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=740 + OR ((a BETWEEN 27 AND 29) AND a!=28) + OR a=88 + } +} {27 29 88 scan 0 sort 0} +do_test where7-2.926.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='abcdefghi' + OR c=9009 + OR b=663 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR b=91 + } +} {10 25 26 27 52 78 scan 0 sort 0} +do_test where7-2.926.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='abcdefghi' + OR c=9009 + OR b=663 + OR (g='wvutsrq' AND f GLOB 'klmno*') + OR b=91 + } +} {10 25 26 27 52 78 scan 0 sort 0} +do_test where7-2.927.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='kjihgfe' AND f GLOB 'qrstu*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=1015 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=916 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=69 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {13 29 31 39 42 65 68 83 91 scan 0 sort 0} +do_test where7-2.927.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='kjihgfe' AND f GLOB 'qrstu*') + OR ((a BETWEEN 29 AND 31) AND a!=30) + OR (f GLOB '?opqr*' AND f GLOB 'nopq*') + OR b=1015 + OR (g='qponmlk' AND f GLOB 'qrstu*') + OR b=916 + OR (d>=31.0 AND d<32.0 AND d NOT NULL) + OR b=69 + OR (g='hgfedcb' AND f GLOB 'fghij*') + } +} {13 29 31 39 42 65 68 83 91 scan 0 sort 0} +do_test where7-2.928.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=88 + OR a=1 + OR f='uvwxyzabc' + OR b=498 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR a=63 + OR f='mnopqrstu' + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=495 + OR a=35 + OR a=22 + } +} {1 12 20 22 35 38 45 46 60 63 64 72 88 90 98 scan 0 sort 0} +do_test where7-2.928.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=88 + OR a=1 + OR f='uvwxyzabc' + OR b=498 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR a=63 + OR f='mnopqrstu' + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR b=495 + OR a=35 + OR a=22 + } +} {1 12 20 22 35 38 45 46 60 63 64 72 88 90 98 scan 0 sort 0} +do_test where7-2.929.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=869 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=289 + OR a=62 + OR ((a BETWEEN 9 AND 11) AND a!=10) + } +} {9 11 35 62 79 scan 0 sort 0} +do_test where7-2.929.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=869 + OR (g='rqponml' AND f GLOB 'jklmn*') + OR b=289 + OR a=62 + OR ((a BETWEEN 9 AND 11) AND a!=10) + } +} {9 11 35 62 79 scan 0 sort 0} +do_test where7-2.930.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR b=542 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR a=7 + OR f='klmnopqrs' + } +} {7 10 19 21 22 24 36 62 88 scan 0 sort 0} +do_test where7-2.930.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 22 AND 24) AND a!=23) + OR b=542 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR a=7 + OR f='klmnopqrs' + } +} {7 10 19 21 22 24 36 62 88 scan 0 sort 0} +do_test where7-2.931.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 57 AND 59) AND a!=58) + OR b=1078 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=429 + } +} {20 21 23 39 57 59 60 98 scan 0 sort 0} +do_test where7-2.931.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 57 AND 59) AND a!=58) + OR b=1078 + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (g='mlkjihg' AND f GLOB 'ijklm*') + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR b=429 + } +} {20 21 23 39 57 59 60 98 scan 0 sort 0} +do_test where7-2.932.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=264 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=1048 + OR a=15 + } +} {15 24 82 85 87 scan 0 sort 0} +do_test where7-2.932.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=82.0 AND d<83.0 AND d NOT NULL) + OR b=264 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR b=1048 + OR a=15 + } +} {15 24 82 85 87 scan 0 sort 0} +do_test where7-2.933.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=93 + OR f='ijklmnopq' + OR f='mnopqrstu' + OR ((a BETWEEN 67 AND 69) AND a!=68) + } +} {8 12 34 38 60 64 67 69 86 90 93 scan 0 sort 0} +do_test where7-2.933.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=93 + OR f='ijklmnopq' + OR f='mnopqrstu' + OR ((a BETWEEN 67 AND 69) AND a!=68) + } +} {8 12 34 38 60 64 67 69 86 90 93 scan 0 sort 0} +do_test where7-2.934.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR b=858 + OR a=58 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR c=21021 + OR ((a BETWEEN 45 AND 47) AND a!=46) + OR b=616 + OR b=784 + OR b=55 + } +} {5 45 47 49 56 58 61 62 63 78 scan 0 sort 0} +do_test where7-2.934.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR b=858 + OR a=58 + OR (g='onmlkji' AND f GLOB 'xyzab*') + OR c=21021 + OR ((a BETWEEN 45 AND 47) AND a!=46) + OR b=616 + OR b=784 + OR b=55 + } +} {5 45 47 49 56 58 61 62 63 78 scan 0 sort 0} +do_test where7-2.935.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=682 + OR b=99 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR b=531 + } +} {2 9 28 54 62 80 scan 0 sort 0} +do_test where7-2.935.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=682 + OR b=99 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR b=531 + } +} {2 9 28 54 62 80 scan 0 sort 0} +do_test where7-2.936.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 56 AND 58) AND a!=57) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR b=726 + OR a=79 + OR a=47 + OR b=212 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR c=8008 + } +} {8 22 23 24 26 34 47 52 56 58 60 66 70 78 79 86 scan 0 sort 0} +do_test where7-2.936.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 56 AND 58) AND a!=57) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (f GLOB '?jklm*' AND f GLOB 'ijkl*') + OR b=726 + OR a=79 + OR a=47 + OR b=212 + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + OR c=8008 + } +} {8 22 23 24 26 34 47 52 56 58 60 66 70 78 79 86 scan 0 sort 0} +do_test where7-2.937.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='uvwxyzabc' + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR a=5 + OR b=33 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR a=59 + OR b=44 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + } +} {3 4 5 14 20 23 46 49 59 60 62 72 75 98 scan 0 sort 0} +do_test where7-2.937.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='uvwxyzabc' + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR a=5 + OR b=33 + OR (f GLOB '?yzab*' AND f GLOB 'xyza*') + OR a=59 + OR b=44 + OR (d>=14.0 AND d<15.0 AND d NOT NULL) + OR (d>=59.0 AND d<60.0 AND d NOT NULL) + } +} {3 4 5 14 20 23 46 49 59 60 62 72 75 98 scan 0 sort 0} +do_test where7-2.938.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=564 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR b=451 + OR b=330 + OR a=47 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {17 19 30 41 47 93 scan 0 sort 0} +do_test where7-2.938.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=564 + OR (d>=93.0 AND d<94.0 AND d NOT NULL) + OR b=451 + OR b=330 + OR a=47 + OR ((a BETWEEN 17 AND 19) AND a!=18) + } +} {17 19 30 41 47 93 scan 0 sort 0} +do_test where7-2.939.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=40 + OR b=333 + } +} {40 scan 0 sort 0} +do_test where7-2.939.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=40 + OR b=333 + } +} {40 scan 0 sort 0} +do_test where7-2.940.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=924 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR a=100 + OR c=15015 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + } +} {2 6 8 36 40 43 44 45 82 84 100 scan 0 sort 0} +do_test where7-2.940.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=924 + OR ((a BETWEEN 6 AND 8) AND a!=7) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (d>=40.0 AND d<41.0 AND d NOT NULL) + OR a=100 + OR c=15015 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + OR (d>=2.0 AND d<3.0 AND d NOT NULL) + } +} {2 6 8 36 40 43 44 45 82 84 100 scan 0 sort 0} +do_test where7-2.941.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + } +} {36 44 63 scan 0 sort 0} +do_test where7-2.941.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=63.0 AND d<64.0 AND d NOT NULL) + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + } +} {36 44 63 scan 0 sort 0} +do_test where7-2.942.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=58 + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=31 + OR f='tuvwxyzab' + OR b=341 + OR b=47 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR a=49 + OR b=223 + OR f='qrstuvwxy' + } +} {7 9 16 19 31 42 45 49 63 65 68 71 94 95 97 scan 0 sort 0} +do_test where7-2.942.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=58 + OR ((a BETWEEN 7 AND 9) AND a!=8) + OR ((a BETWEEN 63 AND 65) AND a!=64) + OR a=31 + OR f='tuvwxyzab' + OR b=341 + OR b=47 + OR ((a BETWEEN 95 AND 97) AND a!=96) + OR a=49 + OR b=223 + OR f='qrstuvwxy' + } +} {7 9 16 19 31 42 45 49 63 65 68 71 94 95 97 scan 0 sort 0} +do_test where7-2.943.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=96 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=85 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR c=11011 + OR b=641 + OR f='vwxyzabcd' + OR b=286 + } +} {4 10 12 21 23 26 30 31 32 33 37 39 47 56 73 82 85 96 99 scan 0 sort 0} +do_test where7-2.943.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=96 + OR (d>=23.0 AND d<24.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'vwxyz*') + OR (f GLOB '?fghi*' AND f GLOB 'efgh*') + OR ((a BETWEEN 37 AND 39) AND a!=38) + OR a=85 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR c=11011 + OR b=641 + OR f='vwxyzabcd' + OR b=286 + } +} {4 10 12 21 23 26 30 31 32 33 37 39 47 56 73 82 85 96 99 scan 0 sort 0} +do_test where7-2.944.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR b=1012 + OR a=7 + OR b=773 + OR a=1 + OR b=726 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR b=110 + } +} {1 3 5 7 10 66 87 89 92 99 scan 0 sort 0} +do_test where7-2.944.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR b=1012 + OR a=7 + OR b=773 + OR a=1 + OR b=726 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR ((a BETWEEN 87 AND 89) AND a!=88) + OR b=110 + } +} {1 3 5 7 10 66 87 89 92 99 scan 0 sort 0} +do_test where7-2.945.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=60 + OR a=4 + OR b=520 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR a=44 + OR a=36 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=715 + OR (g='vutsrqp' AND f GLOB 'qrstu*') + } +} {4 7 16 36 44 60 65 76 79 scan 0 sort 0} +do_test where7-2.945.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'hijkl*') + OR a=60 + OR a=4 + OR b=520 + OR (g='ihgfedc' AND f GLOB 'bcdef*') + OR a=44 + OR a=36 + OR (d>=76.0 AND d<77.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=715 + OR (g='vutsrqp' AND f GLOB 'qrstu*') + } +} {4 7 16 36 44 60 65 76 79 scan 0 sort 0} +do_test where7-2.946.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR a=24 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {3 5 15 24 26 52 55 56 58 76 78 99 scan 0 sort 0} +do_test where7-2.946.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 56 AND 58) AND a!=57) + OR (d>=15.0 AND d<16.0 AND d NOT NULL) + OR (d>=55.0 AND d<56.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'yzabc*') + OR a=24 + OR (d>=99.0 AND d<100.0 AND d NOT NULL) + OR (f GLOB '?bcde*' AND f GLOB 'abcd*') + } +} {3 5 15 24 26 52 55 56 58 76 78 99 scan 0 sort 0} +do_test where7-2.947.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=132 + OR f='ghijklmno' + OR b=740 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=1059 + } +} {6 12 21 26 32 38 58 84 scan 0 sort 0} +do_test where7-2.947.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='utsrqpo' AND f GLOB 'vwxyz*') + OR b=132 + OR f='ghijklmno' + OR b=740 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=1059 + } +} {6 12 21 26 32 38 58 84 scan 0 sort 0} +do_test where7-2.948.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=28 + OR b=927 + OR b=520 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=638 + OR f='vwxyzabcd' + } +} {21 28 47 53 58 73 99 scan 0 sort 0} +do_test where7-2.948.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=28 + OR b=927 + OR b=520 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + OR b=638 + OR f='vwxyzabcd' + } +} {21 28 47 53 58 73 99 scan 0 sort 0} +do_test where7-2.949.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=1026 + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR b=355 + OR b=641 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {1 11 53 54 scan 0 sort 0} +do_test where7-2.949.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=1026 + OR (d>=1.0 AND d<2.0 AND d NOT NULL) + OR (g='wvutsrq' AND f GLOB 'lmnop*') + OR b=355 + OR b=641 + OR (d>=53.0 AND d<54.0 AND d NOT NULL) + } +} {1 11 53 54 scan 0 sort 0} +do_test where7-2.950.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 46 AND 48) AND a!=47) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR b=641 + OR a=3 + OR a=35 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR f='opqrstuvw' + OR a=41 + OR a=83 + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=751 + } +} {3 14 35 40 41 46 48 54 60 62 66 81 83 92 scan 0 sort 0} +do_test where7-2.950.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 46 AND 48) AND a!=47) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR b=641 + OR a=3 + OR a=35 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR f='opqrstuvw' + OR a=41 + OR a=83 + OR (g='nmlkjih' AND f GLOB 'cdefg*') + OR b=751 + } +} {3 14 35 40 41 46 48 54 60 62 66 81 83 92 scan 0 sort 0} +do_test where7-2.951.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR c=15015 + OR b=146 + OR b=1092 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {3 5 43 44 45 60 62 scan 0 sort 0} +do_test where7-2.951.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 3 AND 5) AND a!=4) + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR c=15015 + OR b=146 + OR b=1092 + OR (d>=60.0 AND d<61.0 AND d NOT NULL) + } +} {3 5 43 44 45 60 62 scan 0 sort 0} +do_test where7-2.952.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'qrstu*') + OR f='bcdefghij' + OR f='hijklmnop' + OR a=65 + OR f='ijklmnopq' + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR g IS NULL + OR a=26 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR a=9 + OR (d>=32.0 AND d<33.0 AND d NOT NULL) + } +} {1 7 8 9 26 27 32 33 34 38 40 42 53 59 60 65 79 85 86 scan 0 sort 0} +do_test where7-2.952.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'qrstu*') + OR f='bcdefghij' + OR f='hijklmnop' + OR a=65 + OR f='ijklmnopq' + OR (d>=79.0 AND d<80.0 AND d NOT NULL) + OR g IS NULL + OR a=26 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR a=9 + OR (d>=32.0 AND d<33.0 AND d NOT NULL) + } +} {1 7 8 9 26 27 32 33 34 38 40 42 53 59 60 65 79 85 86 scan 0 sort 0} +do_test where7-2.953.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='jihgfed' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=1100 + OR c=6006 + OR c=4004 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR c=33033 + } +} {10 11 12 16 17 18 24 26 41 70 73 79 81 97 98 99 100 scan 0 sort 0} +do_test where7-2.953.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='jihgfed' AND f GLOB 'vwxyz*') + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR ((a BETWEEN 79 AND 81) AND a!=80) + OR (g='kjihgfe' AND f GLOB 'stuvw*') + OR (g='qponmlk' AND f GLOB 'pqrst*') + OR b=1100 + OR c=6006 + OR c=4004 + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR c=33033 + } +} {10 11 12 16 17 18 24 26 41 70 73 79 81 97 98 99 100 scan 0 sort 0} +do_test where7-2.954.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=17 + OR ((a BETWEEN 95 AND 97) AND a!=96) + } +} {17 95 97 scan 0 sort 0} +do_test where7-2.954.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=17 + OR ((a BETWEEN 95 AND 97) AND a!=96) + } +} {17 95 97 scan 0 sort 0} +do_test where7-2.955.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=3003 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=93 + } +} {7 8 9 67 93 scan 0 sort 0} +do_test where7-2.955.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=3003 + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=93 + } +} {7 8 9 67 93 scan 0 sort 0} +do_test where7-2.956.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=737 + } +} {12 21 23 42 44 67 scan 0 sort 0} +do_test where7-2.956.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=42.0 AND d<43.0 AND d NOT NULL) + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR ((a BETWEEN 21 AND 23) AND a!=22) + OR (d>=12.0 AND d<13.0 AND d NOT NULL) + OR b=737 + } +} {12 21 23 42 44 67 scan 0 sort 0} +do_test where7-2.957.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 36 scan 0 sort 0} +do_test where7-2.957.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='rqponml' AND f GLOB 'klmno*') + OR ((a BETWEEN 5 AND 7) AND a!=6) + } +} {5 7 36 scan 0 sort 0} +do_test where7-2.958.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR c=32032 + OR f='opqrstuvw' + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=993 + } +} {14 40 49 51 66 68 85 92 94 95 96 scan 0 sort 0} +do_test where7-2.958.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='hgfedcb' AND f GLOB 'hijkl*') + OR c=32032 + OR f='opqrstuvw' + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR ((a BETWEEN 49 AND 51) AND a!=50) + OR b=993 + } +} {14 40 49 51 66 68 85 92 94 95 96 scan 0 sort 0} +do_test where7-2.959.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=245 + OR b=528 + OR b=366 + OR a=73 + OR a=49 + OR b=421 + OR a=58 + } +} {12 38 48 49 58 59 61 73 86 88 scan 0 sort 0} +do_test where7-2.959.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=12.0 AND d<13.0 AND d NOT NULL) + OR ((a BETWEEN 59 AND 61) AND a!=60) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR b=245 + OR b=528 + OR b=366 + OR a=73 + OR a=49 + OR b=421 + OR a=58 + } +} {12 38 48 49 58 59 61 73 86 88 scan 0 sort 0} +do_test where7-2.960.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR b=146 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=704 + } +} {8 10 20 43 60 62 64 73 75 82 86 88 100 scan 0 sort 0} +do_test where7-2.960.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=100.0 AND d<101.0 AND d NOT NULL) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR ((a BETWEEN 86 AND 88) AND a!=87) + OR b=146 + OR (g='ponmlkj' AND f GLOB 'rstuv*') + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR ((a BETWEEN 60 AND 62) AND a!=61) + OR (g='ihgfedc' AND f GLOB 'efghi*') + OR b=704 + } +} {8 10 20 43 60 62 64 73 75 82 86 88 100 scan 0 sort 0} +do_test where7-2.961.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 65 AND 67) AND a!=66) + OR b=14 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR a=49 + OR b=333 + } +} {3 5 49 65 67 scan 0 sort 0} +do_test where7-2.961.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 65 AND 67) AND a!=66) + OR b=14 + OR ((a BETWEEN 3 AND 5) AND a!=4) + OR a=49 + OR b=333 + } +} {3 5 49 65 67 scan 0 sort 0} +do_test where7-2.962.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=17017 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=971 + OR a=37 + OR a=7 + OR b=641 + OR a=13 + OR b=597 + } +} {7 13 37 38 49 50 51 scan 0 sort 0} +do_test where7-2.962.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=17017 + OR (g='qponmlk' AND f GLOB 'mnopq*') + OR b=971 + OR a=37 + OR a=7 + OR b=641 + OR a=13 + OR b=597 + } +} {7 13 37 38 49 50 51 scan 0 sort 0} +do_test where7-2.963.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='tuvwxyzab' + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {17 19 43 45 69 71 95 97 scan 0 sort 0} +do_test where7-2.963.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='tuvwxyzab' + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + } +} {17 19 43 45 69 71 95 97 scan 0 sort 0} +do_test where7-2.964.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=638 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR b=165 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='stuvwxyza' + OR b=652 + OR b=66 + OR b=770 + OR b=91 + } +} {6 10 12 15 18 44 58 70 89 96 scan 0 sort 0} +do_test where7-2.964.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=638 + OR (d>=44.0 AND d<45.0 AND d NOT NULL) + OR (g='gfedcba' AND f GLOB 'lmnop*') + OR b=165 + OR ((a BETWEEN 10 AND 12) AND a!=11) + OR f='stuvwxyza' + OR b=652 + OR b=66 + OR b=770 + OR b=91 + } +} {6 10 12 15 18 44 58 70 89 96 scan 0 sort 0} +do_test where7-2.965.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=52.0 AND d<53.0 AND d NOT NULL) + OR f='opqrstuvw' + OR a=83 + OR a=93 + OR b=858 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'vwxyz*') + } +} {14 18 40 52 66 73 78 83 92 93 scan 0 sort 0} +do_test where7-2.965.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=52.0 AND d<53.0 AND d NOT NULL) + OR f='opqrstuvw' + OR a=83 + OR a=93 + OR b=858 + OR (d>=18.0 AND d<19.0 AND d NOT NULL) + OR (g='jihgfed' AND f GLOB 'vwxyz*') + } +} {14 18 40 52 66 73 78 83 92 93 scan 0 sort 0} +do_test where7-2.966.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE c=3003 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR a=38 + } +} {7 8 9 38 40 42 scan 0 sort 0} +do_test where7-2.966.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE c=3003 + OR ((a BETWEEN 40 AND 42) AND a!=41) + OR a=38 + } +} {7 8 9 38 40 42 scan 0 sort 0} +do_test where7-2.967.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR ((a BETWEEN 50 AND 52) AND a!=51) + } +} {50 52 60 scan 0 sort 0} +do_test where7-2.967.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=60.0 AND d<61.0 AND d NOT NULL) + OR ((a BETWEEN 50 AND 52) AND a!=51) + } +} {50 52 60 scan 0 sort 0} +do_test where7-2.968.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='qponmlk' AND f GLOB 'mnopq*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=5 + OR b=396 + OR a=13 + } +} {5 13 24 26 36 38 scan 0 sort 0} +do_test where7-2.968.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='qponmlk' AND f GLOB 'mnopq*') + OR ((a BETWEEN 24 AND 26) AND a!=25) + OR a=5 + OR b=396 + OR a=13 + } +} {5 13 24 26 36 38 scan 0 sort 0} +do_test where7-2.969.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=748 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR b=531 + OR b=1092 + OR b=418 + } +} {38 68 69 71 95 97 scan 0 sort 0} +do_test where7-2.969.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR b=748 + OR (d>=97.0 AND d<98.0 AND d NOT NULL) + OR ((a BETWEEN 69 AND 71) AND a!=70) + OR b=531 + OR b=1092 + OR b=418 + } +} {38 68 69 71 95 97 scan 0 sort 0} +do_test where7-2.970.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR a=50 + OR a=46 + OR ((a BETWEEN 38 AND 40) AND a!=39) + } +} {8 10 14 30 38 40 46 50 66 92 scan 0 sort 0} +do_test where7-2.970.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=30.0 AND d<31.0 AND d NOT NULL) + OR ((a BETWEEN 8 AND 10) AND a!=9) + OR (f GLOB '?pqrs*' AND f GLOB 'opqr*') + OR a=50 + OR a=46 + OR ((a BETWEEN 38 AND 40) AND a!=39) + } +} {8 10 14 30 38 40 46 50 66 92 scan 0 sort 0} +do_test where7-2.971.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=24 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR b=487 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=132 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=795 + OR b=737 + } +} {12 13 15 22 24 54 67 96 scan 0 sort 0} +do_test where7-2.971.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=24 + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR b=487 + OR (d>=96.0 AND d<97.0 AND d NOT NULL) + OR ((a BETWEEN 13 AND 15) AND a!=14) + OR b=132 + OR (d>=54.0 AND d<55.0 AND d NOT NULL) + OR b=795 + OR b=737 + } +} {12 13 15 22 24 54 67 96 scan 0 sort 0} +do_test where7-2.972.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=34 + OR c=16016 + OR b=1078 + OR b=960 + OR (g='hgfedcb' AND f GLOB 'jklmn*') + } +} {34 46 47 48 87 88 98 scan 0 sort 0} +do_test where7-2.972.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=88.0 AND d<89.0 AND d NOT NULL) + OR a=34 + OR c=16016 + OR b=1078 + OR b=960 + OR (g='hgfedcb' AND f GLOB 'jklmn*') + } +} {34 46 47 48 87 88 98 scan 0 sort 0} +do_test where7-2.973.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1081 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=6 + } +} {6 19 21 38 45 73 75 scan 0 sort 0} +do_test where7-2.973.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1081 + OR ((a BETWEEN 19 AND 21) AND a!=20) + OR (g='ponmlkj' AND f GLOB 'tuvwx*') + OR ((a BETWEEN 73 AND 75) AND a!=74) + OR (d>=38.0 AND d<39.0 AND d NOT NULL) + OR a=6 + } +} {6 19 21 38 45 73 75 scan 0 sort 0} +do_test where7-2.974.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR (g='rqponml' AND f GLOB 'lmnop*') + OR a=92 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR f='fghijklmn' + OR a=100 + OR b=209 + OR c=9009 + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR a=73 + OR b=902 + } +} {5 9 19 25 26 27 31 35 37 52 54 57 61 73 82 83 87 92 95 100 scan 0 sort 0} +do_test where7-2.974.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='fedcbaz' AND f GLOB 'rstuv*') + OR (g='rqponml' AND f GLOB 'lmnop*') + OR a=92 + OR (f GLOB '?klmn*' AND f GLOB 'jklm*') + OR f='fghijklmn' + OR a=100 + OR b=209 + OR c=9009 + OR ((a BETWEEN 52 AND 54) AND a!=53) + OR a=73 + OR b=902 + } +} {5 9 19 25 26 27 31 35 37 52 54 57 61 73 82 83 87 92 95 100 scan 0 sort 0} +do_test where7-2.975.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=110 + OR f='ghijklmno' + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {6 10 21 32 40 58 84 scan 0 sort 0} +do_test where7-2.975.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=40.0 AND d<41.0 AND d NOT NULL) + OR b=110 + OR f='ghijklmno' + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + } +} {6 10 21 32 40 58 84 scan 0 sort 0} +do_test where7-2.976.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 51 AND 53) AND a!=52) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=91 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=77 + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (g='vutsrqp' AND f GLOB 'pqrst*') + } +} {1 7 15 20 27 45 46 51 53 79 scan 0 sort 0} +do_test where7-2.976.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 51 AND 53) AND a!=52) + OR (g='utsrqpo' AND f GLOB 'uvwxy*') + OR (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR b=91 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR b=77 + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (g='vutsrqp' AND f GLOB 'pqrst*') + } +} {1 7 15 20 27 45 46 51 53 79 scan 0 sort 0} +do_test where7-2.977.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=693 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR a=52 + OR b=377 + } +} {21 26 42 52 56 63 78 scan 0 sort 0} +do_test where7-2.977.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=56.0 AND d<57.0 AND d NOT NULL) + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR b=693 + OR (d>=21.0 AND d<22.0 AND d NOT NULL) + OR (d>=26.0 AND d<27.0 AND d NOT NULL) + OR (d>=42.0 AND d<43.0 AND d NOT NULL) + OR a=52 + OR b=377 + } +} {21 26 42 52 56 63 78 scan 0 sort 0} +do_test where7-2.978.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=99 + OR a=36 + OR b=297 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=1004 + OR b=872 + OR a=95 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=176 + OR b=300 + } +} {16 27 36 66 68 95 99 scan 0 sort 0} +do_test where7-2.978.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=99 + OR a=36 + OR b=297 + OR ((a BETWEEN 66 AND 68) AND a!=67) + OR b=1004 + OR b=872 + OR a=95 + OR (d>=27.0 AND d<28.0 AND d NOT NULL) + OR b=176 + OR b=300 + } +} {16 27 36 66 68 95 99 scan 0 sort 0} +do_test where7-2.979.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=737 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=40 + OR f='uvwxyzabc' + OR b=311 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=927 + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + } +} {5 6 8 20 31 32 40 46 50 53 57 58 67 72 83 84 98 scan 0 sort 0} +do_test where7-2.979.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=737 + OR (g='wvutsrq' AND f GLOB 'ijklm*') + OR (f GLOB '?ghij*' AND f GLOB 'fghi*') + OR a=40 + OR f='uvwxyzabc' + OR b=311 + OR (g='nmlkjih' AND f GLOB 'bcdef*') + OR (f GLOB '?hijk*' AND f GLOB 'ghij*') + OR b=927 + OR (d>=50.0 AND d<51.0 AND d NOT NULL) + } +} {5 6 8 20 31 32 40 46 50 53 57 58 67 72 83 84 98 scan 0 sort 0} +do_test where7-2.980.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE f='fghijklmn' + OR b=1078 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR f='fghijklmn' + } +} {5 31 35 57 83 98 scan 0 sort 0} +do_test where7-2.980.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE f='fghijklmn' + OR b=1078 + OR (d>=35.0 AND d<36.0 AND d NOT NULL) + OR f='fghijklmn' + } +} {5 31 35 57 83 98 scan 0 sort 0} +do_test where7-2.981.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR b=487 + OR f='tuvwxyzab' + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR b=971 + OR c=19019 + OR a=39 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR b=550 + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=660 + } +} {6 12 19 38 39 45 48 50 55 56 57 60 64 71 90 97 scan 0 sort 0} +do_test where7-2.981.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='xwvutsr' AND f GLOB 'ghijk*') + OR b=487 + OR f='tuvwxyzab' + OR (g='onmlkji' AND f GLOB 'wxyza*') + OR b=971 + OR c=19019 + OR a=39 + OR (f GLOB '?nopq*' AND f GLOB 'mnop*') + OR b=550 + OR (g='kjihgfe' AND f GLOB 'tuvwx*') + OR b=660 + } +} {6 12 19 38 39 45 48 50 55 56 57 60 64 71 90 97 scan 0 sort 0} +do_test where7-2.982.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=80 + OR b=839 + } +} { scan 0 sort 0} +do_test where7-2.982.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=80 + OR b=839 + } +} { scan 0 sort 0} +do_test where7-2.983.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=363 + OR b=630 + OR b=935 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR f='yzabcdefg' + OR ((a BETWEEN 37 AND 39) AND a!=38) + } +} {20 24 29 33 37 39 50 76 85 scan 0 sort 0} +do_test where7-2.983.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=363 + OR b=630 + OR b=935 + OR (d>=20.0 AND d<21.0 AND d NOT NULL) + OR (g='srqponm' AND f GLOB 'defgh*') + OR f='yzabcdefg' + OR ((a BETWEEN 37 AND 39) AND a!=38) + } +} {20 24 29 33 37 39 50 76 85 scan 0 sort 0} +do_test where7-2.984.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=40 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR f='abcdefghi' + OR b=696 + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=682 + OR a=32 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=671 + OR a=15 + } +} {15 16 26 32 34 36 40 52 61 62 78 86 97 scan 0 sort 0} +do_test where7-2.984.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=97.0 AND d<98.0 AND d NOT NULL) + OR a=40 + OR (d>=86.0 AND d<87.0 AND d NOT NULL) + OR f='abcdefghi' + OR b=696 + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=682 + OR a=32 + OR ((a BETWEEN 34 AND 36) AND a!=35) + OR b=671 + OR a=15 + } +} {15 16 26 32 34 36 40 52 61 62 78 86 97 scan 0 sort 0} +do_test where7-2.985.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (g='gfedcba' AND f GLOB 'lmnop*') + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=311 + } +} {7 33 59 85 89 scan 0 sort 0} +do_test where7-2.985.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (g='gfedcba' AND f GLOB 'lmnop*') + OR (f GLOB '?ijkl*' AND f GLOB 'hijk*') + OR b=311 + } +} {7 33 59 85 89 scan 0 sort 0} +do_test where7-2.986.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=96.0 AND d<97.0 AND d NOT NULL) + OR a=73 + OR b=729 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=32 + } +} {32 67 73 81 96 scan 0 sort 0} +do_test where7-2.986.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=96.0 AND d<97.0 AND d NOT NULL) + OR a=73 + OR b=729 + OR (d>=81.0 AND d<82.0 AND d NOT NULL) + OR (d>=67.0 AND d<68.0 AND d NOT NULL) + OR a=32 + } +} {32 67 73 81 96 scan 0 sort 0} +do_test where7-2.987.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 98 AND 100) AND a!=99) + OR b=110 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=484 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + } +} {10 23 38 40 44 82 98 100 scan 0 sort 0} +do_test where7-2.987.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 98 AND 100) AND a!=99) + OR b=110 + OR ((a BETWEEN 38 AND 40) AND a!=39) + OR (g='tsrqpon' AND f GLOB 'xyzab*') + OR b=484 + OR (d>=82.0 AND d<83.0 AND d NOT NULL) + } +} {10 23 38 40 44 82 98 100 scan 0 sort 0} +do_test where7-2.988.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=135 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=209 + OR b=363 + OR c=27027 + OR b=1026 + OR c=6006 + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + } +} {16 17 18 19 33 46 66 73 79 80 81 99 scan 0 sort 0} +do_test where7-2.988.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=99.0 AND d<100.0 AND d NOT NULL) + OR b=135 + OR (d>=66.0 AND d<67.0 AND d NOT NULL) + OR b=209 + OR b=363 + OR c=27027 + OR b=1026 + OR c=6006 + OR (g='ponmlkj' AND f GLOB 'uvwxy*') + OR (d>=73.0 AND d<74.0 AND d NOT NULL) + } +} {16 17 18 19 33 46 66 73 79 80 81 99 scan 0 sort 0} +do_test where7-2.989.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR a=97 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=674 + OR c=14014 + OR b=69 + } +} {18 20 22 24 39 40 41 42 45 58 79 97 scan 0 sort 0} +do_test where7-2.989.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=79.0 AND d<80.0 AND d NOT NULL) + OR ((a BETWEEN 18 AND 20) AND a!=19) + OR (g='qponmlk' AND f GLOB 'nopqr*') + OR a=97 + OR (d>=45.0 AND d<46.0 AND d NOT NULL) + OR ((a BETWEEN 22 AND 24) AND a!=23) + OR (g='mlkjihg' AND f GLOB 'ghijk*') + OR b=674 + OR c=14014 + OR b=69 + } +} {18 20 22 24 39 40 41 42 45 58 79 97 scan 0 sort 0} +do_test where7-2.990.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=297 + OR a=83 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR ((a BETWEEN 16 AND 18) AND a!=17) + } +} {16 18 27 78 83 scan 0 sort 0} +do_test where7-2.990.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=297 + OR a=83 + OR (d>=78.0 AND d<79.0 AND d NOT NULL) + OR ((a BETWEEN 16 AND 18) AND a!=17) + } +} {16 18 27 78 83 scan 0 sort 0} +do_test where7-2.991.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=451 + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=539 + OR a=26 + OR (g='srqponm' AND f GLOB 'efghi*') + OR b=465 + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {11 13 26 30 41 49 74 scan 0 sort 0} +do_test where7-2.991.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=451 + OR ((a BETWEEN 11 AND 13) AND a!=12) + OR (g='tsrqpon' AND f GLOB 'abcde*') + OR b=539 + OR a=26 + OR (g='srqponm' AND f GLOB 'efghi*') + OR b=465 + OR (g='jihgfed' AND f GLOB 'wxyza*') + } +} {11 13 26 30 41 49 74 scan 0 sort 0} +do_test where7-2.992.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + } +} {45 63 scan 0 sort 0} +do_test where7-2.992.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (d>=45.0 AND d<46.0 AND d NOT NULL) + OR (d>=63.0 AND d<64.0 AND d NOT NULL) + } +} {45 63 scan 0 sort 0} +do_test where7-2.993.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 16 AND 18) AND a!=17) + OR b=872 + OR c=31031 + } +} {16 18 91 92 93 scan 0 sort 0} +do_test where7-2.993.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 16 AND 18) AND a!=17) + OR b=872 + OR c=31031 + } +} {16 18 91 92 93 scan 0 sort 0} +do_test where7-2.994.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR a=13 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR b=322 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR b=377 + OR f='cdefghijk' + OR b=286 + OR ((a BETWEEN 61 AND 63) AND a!=62) + } +} {1 2 13 17 26 27 28 33 35 43 53 54 61 63 69 79 80 95 scan 0 sort 0} +do_test where7-2.994.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE (f GLOB '?cdef*' AND f GLOB 'bcde*') + OR a=13 + OR (f GLOB '?stuv*' AND f GLOB 'rstu*') + OR b=322 + OR ((a BETWEEN 33 AND 35) AND a!=34) + OR b=377 + OR f='cdefghijk' + OR b=286 + OR ((a BETWEEN 61 AND 63) AND a!=62) + } +} {1 2 13 17 26 27 28 33 35 43 53 54 61 63 69 79 80 95 scan 0 sort 0} +do_test where7-2.995.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=41 + OR b=990 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR b=605 + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=968 + OR a=66 + } +} {16 28 36 41 55 66 88 90 scan 0 sort 0} +do_test where7-2.995.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=41 + OR b=990 + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR b=605 + OR (g='srqponm' AND f GLOB 'cdefg*') + OR (d>=36.0 AND d<37.0 AND d NOT NULL) + OR (g='vutsrqp' AND f GLOB 'qrstu*') + OR b=968 + OR a=66 + } +} {16 28 36 41 55 66 88 90 scan 0 sort 0} +do_test where7-2.996.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1059 + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {17 19 32 37 39 41 52 57 scan 0 sort 0} +do_test where7-2.996.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1059 + OR (g='srqponm' AND f GLOB 'ghijk*') + OR (g='utsrqpo' AND f GLOB 'tuvwx*') + OR (g='nmlkjih' AND f GLOB 'fghij*') + OR (d>=17.0 AND d<18.0 AND d NOT NULL) + OR (d>=37.0 AND d<38.0 AND d NOT NULL) + OR (g='onmlkji' AND f GLOB 'abcde*') + OR ((a BETWEEN 39 AND 41) AND a!=40) + } +} {17 19 32 37 39 41 52 57 scan 0 sort 0} +do_test where7-2.997.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE ((a BETWEEN 41 AND 43) AND a!=42) + OR f='nopqrstuv' + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=42 + OR b=729 + OR b=297 + OR a=77 + OR b=781 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {13 27 36 38 39 41 42 43 44 65 71 77 91 scan 0 sort 0} +do_test where7-2.997.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE ((a BETWEEN 41 AND 43) AND a!=42) + OR f='nopqrstuv' + OR (g='ponmlkj' AND f GLOB 'stuvw*') + OR a=42 + OR b=729 + OR b=297 + OR a=77 + OR b=781 + OR ((a BETWEEN 36 AND 38) AND a!=37) + } +} {13 27 36 38 39 41 42 43 44 65 71 77 91 scan 0 sort 0} +do_test where7-2.998.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE a=12 + OR f='qrstuvwxy' + OR a=47 + OR b=135 + OR a=25 + } +} {12 16 25 42 47 68 94 scan 0 sort 0} +do_test where7-2.998.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE a=12 + OR f='qrstuvwxy' + OR a=47 + OR b=135 + OR a=25 + } +} {12 16 25 42 47 68 94 scan 0 sort 0} +do_test where7-2.999.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=451 + OR b=660 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR b=781 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=198 + OR b=1023 + OR a=98 + OR d<0.0 + OR ((a BETWEEN 79 AND 81) AND a!=80) + } +} {18 41 50 60 71 74 79 81 93 98 scan 0 sort 0} +do_test where7-2.999.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=451 + OR b=660 + OR (g='onmlkji' AND f GLOB 'yzabc*') + OR b=781 + OR (g='jihgfed' AND f GLOB 'wxyza*') + OR b=198 + OR b=1023 + OR a=98 + OR d<0.0 + OR ((a BETWEEN 79 AND 81) AND a!=80) + } +} {18 41 50 60 71 74 79 81 93 98 scan 0 sort 0} +do_test where7-2.1000.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=685 + OR a=86 + OR c=17017 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR a=80 + OR b=773 + } +} {49 50 51 80 85 86 87 90 scan 0 sort 0} +do_test where7-2.1000.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=685 + OR a=86 + OR c=17017 + OR ((a BETWEEN 85 AND 87) AND a!=86) + OR (g='gfedcba' AND f GLOB 'mnopq*') + OR a=80 + OR b=773 + } +} {49 50 51 80 85 86 87 90 scan 0 sort 0} +do_test where7-2.1001.1 { + count_steps_sort { + SELECT a FROM t2 + WHERE b=1092 + OR a=23 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR d<0.0 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR a=91 + } +} {2 22 23 28 54 80 91 scan 0 sort 0} +do_test where7-2.1001.2 { + count_steps_sort { + SELECT a FROM t3 + WHERE b=1092 + OR a=23 + OR (f GLOB '?defg*' AND f GLOB 'cdef*') + OR d<0.0 + OR (d>=22.0 AND d<23.0 AND d NOT NULL) + OR a=91 + } +} {2 22 23 28 54 80 91 scan 0 sort 0} +finish_test + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where8m.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where8m.test --- sqlite3-3.4.2/test/where8m.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/where8m.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,57 @@ +# 2008 December 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# is testing of where.c. More specifically, the focus is the optimization +# of WHERE clauses that feature the OR operator. +# +# $Id: where8m.test,v 1.3 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +source $testdir/malloc_common.tcl + +do_malloc_test where8m-1 -sqlprep { + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); +} -sqlbody { + SELECT c FROM t1 + WHERE + a = 2 OR b = 'three' OR a = 4 OR b = 'five' OR a = 6 OR + b = 'seven' OR a = 8 OR b = 'nine' OR a = 10 + ORDER BY rowid; + + SELECT c FROM t1 WHERE + a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; + + SELECT c FROM t1 WHERE + a BETWEEN 1 AND 3 AND b < 5 AND b > 2 AND c = 4; +} + +do_malloc_test where8m-2 -tclprep { + db eval { + BEGIN; + CREATE TABLE t1(a, b, c); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + } + for {set i 0} {$i < 1000} {incr i} { + set ii [expr $i*$i] + set iii [expr $i*$i] + db eval { INSERT INTO t1 VALUES($i, $ii, $iii) } + } + db eval COMMIT +} -sqlbody { + SELECT count(*) FROM t1 WHERE a BETWEEN 5 AND 995 OR b BETWEEN 5 AND 900000; +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where8.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where8.test --- sqlite3-3.4.2/test/where8.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/where8.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,661 @@ +# 2008 December 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The focus +# is testing of where.c. More specifically, the focus is the optimization +# of WHERE clauses that feature the OR operator. +# +# $Id: where8.test,v 1.8 2009/06/07 23:45:11 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +# Test organization: +# +# where8-1.*: Tests to demonstrate simple cases work with a single table +# in the FROM clause. +# +# where8-2.*: Tests surrounding virtual tables and the OR optimization. +# +# where8-3.*: Tests with more than one table in the FROM clause. +# + +proc execsql_status {sql {db db}} { + set result [uplevel $db eval [list $sql]] + concat $result [db status step] [db status sort] +} + +proc execsql_status2 {sql {db db}} { + set ::sqlite_search_count 0 + set result [uplevel [list execsql_status $sql $db]] + concat $result $::sqlite_search_count +} + +do_test where8-1.1 { + execsql { + CREATE TABLE t1(a, b TEXT, c); + CREATE INDEX i1 ON t1(a); + CREATE INDEX i2 ON t1(b); + + INSERT INTO t1 VALUES(1, 'one', 'I'); + INSERT INTO t1 VALUES(2, 'two', 'II'); + INSERT INTO t1 VALUES(3, 'three', 'III'); + INSERT INTO t1 VALUES(4, 'four', 'IV'); + INSERT INTO t1 VALUES(5, 'five', 'V'); + INSERT INTO t1 VALUES(6, 'six', 'VI'); + INSERT INTO t1 VALUES(7, 'seven', 'VII'); + INSERT INTO t1 VALUES(8, 'eight', 'VIII'); + INSERT INTO t1 VALUES(9, 'nine', 'IX'); + INSERT INTO t1 VALUES(10, 'ten', 'X'); + } +} {} + +do_test where8-1.2 { + execsql_status2 { SELECT c FROM t1 WHERE a = 1 OR b = 'nine' } +} {I IX 0 0 6} + +do_test where8-1.3 { + execsql_status2 { SELECT c FROM t1 WHERE a > 8 OR b = 'two' } +} {IX X II 0 0 6} + +do_test where8-1.4 { + execsql_status2 { SELECT c FROM t1 WHERE a > 8 OR b GLOB 't*' } +} {IX X III II 0 0 9} + +do_test where8-1.5 { + execsql_status2 { SELECT c FROM t1 WHERE a > 8 OR b GLOB 'f*' } +} {IX X V IV 0 0 9} + +do_test where8-1.6 { + execsql_status { SELECT c FROM t1 WHERE a = 1 OR b = 'three' ORDER BY rowid } +} {I III 0 1} + +do_test where8-1.7 { + execsql_status { SELECT c FROM t1 WHERE a = 1 OR b = 'three' ORDER BY a } +} {I III 0 1} + +do_test where8-1.8 { + # 18 searches. 9 on the index cursor and 9 on the table cursor. + execsql_status2 { SELECT c FROM t1 WHERE a > 1 AND c LIKE 'I%' } +} {II III IV IX 0 0 18} + +do_test where8-1.9 { + execsql_status2 { SELECT c FROM t1 WHERE a >= 9 OR b <= 'eight' } +} {IX X VIII 0 0 6} + +do_test where8-1.10 { + execsql_status2 { + SELECT c FROM t1 WHERE (a >= 9 AND c != 'X') OR b <= 'eight' + } +} {IX VIII 0 0 6} + +do_test where8-1.11 { + execsql_status2 { + SELECT c FROM t1 WHERE (a >= 4 AND a <= 6) OR b = 'nine' + } +} {IV V VI IX 0 0 10} + +do_test where8-1.12.1 { + execsql_status2 { + SELECT c FROM t1 WHERE a IN(1, 2, 3) OR a = 5 + } +} {I II III V 0 0 14} + +do_test where8-1.12.2 { + execsql_status2 { + SELECT c FROM t1 WHERE +a IN(1, 2, 3) OR +a = 5 + } +} {I II III V 9 0 9} + +do_test where8-1.13 { + execsql_status2 { + SELECT c FROM t1 + WHERE a = 2 OR b = 'three' OR a = 4 OR b = 'five' OR a = 6 + ORDER BY rowid + } +} {II III IV V VI 0 1 18} +do_test where8-1.14 { + execsql_status2 { + SELECT c FROM t1 + WHERE + a = 2 OR b = 'three' OR a = 4 OR b = 'five' OR a = 6 OR + b = 'seven' OR a = 8 OR b = 'nine' OR a = 10 + ORDER BY rowid + } +} {II III IV V VI VII VIII IX X 0 1 33} + +do_test where8-1.15 { + execsql_status2 { + SELECT c FROM t1 WHERE + a BETWEEN 2 AND 4 OR b = 'nine' + ORDER BY rowid + } +} {II III IV IX 0 1 12} + + +#-------------------------------------------------------------------------- +# Tests where8-2.*: Virtual tables +# + +if 0 { +ifcapable vtab { + # Register the 'echo' module used for testing virtual tables. + # + register_echo_module [sqlite3_connection_pointer db] + + do_test where8-2.1 { + execsql { + CREATE VIRTUAL TABLE e1 USING echo(t1); + SELECT b FROM e1; + } + } {one two three four five six seven eight nine ten} + + do_test where8-2.2.1 { + set echo_module "" + execsql { + SELECT c FROM e1 WHERE a=1 OR b='three'; + } + } {I III} + do_test where8-2.2.2 { + set echo_module + } {TODO: What should this be?} +} +} + +#-------------------------------------------------------------------------- +# Tests where8-3.*: Cases with multiple tables in the FROM clause. +# +do_test where8-3.1 { + execsql { + CREATE TABLE t2(d, e, f); + CREATE INDEX i3 ON t2(d); + CREATE INDEX i4 ON t2(e); + + INSERT INTO t2 VALUES(1, NULL, 'I'); + INSERT INTO t2 VALUES(2, 'four', 'IV'); + INSERT INTO t2 VALUES(3, NULL, 'IX'); + INSERT INTO t2 VALUES(4, 'sixteen', 'XVI'); + INSERT INTO t2 VALUES(5, NULL, 'XXV'); + INSERT INTO t2 VALUES(6, 'thirtysix', 'XXXVI'); + INSERT INTO t2 VALUES(7, 'fortynine', 'XLIX'); + INSERT INTO t2 VALUES(8, 'sixtyeight', 'LXIV'); + INSERT INTO t2 VALUES(9, 'eightyone', 'LXXXIX'); + INSERT INTO t2 VALUES(10, NULL, 'C'); + } +} {} + +do_test where8-3.2 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE b=e + } +} {4 2 9 0} + +do_test where8-3.3 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a = 2 OR a = 3) AND d = 6 + } +} {2 6 3 6 0 0} + +do_test where8-3.4 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a = 2 OR a = 3) AND d = a + } +} {2 2 3 3 0 0} + +do_test where8-3.5 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a = 2 OR a = 3) AND (d = a OR e = 'sixteen') + } +} {2 2 2 4 3 3 3 4 0 0} + +do_test where8-3.6 { + # The first part of the WHERE clause in this query, (a=2 OR a=3) is + # transformed into "a IN (2, 3)". This is why the sort is required. + # + execsql_status { + SELECT a, d + FROM t1, t2 + WHERE (a = 2 OR a = 3) AND (d = a OR e = 'sixteen') + ORDER BY t1.rowid + } +} {2 2 2 4 3 3 3 4 0 1} +do_test where8-3.7 { + execsql_status { + SELECT a, d + FROM t1, t2 + WHERE a = 2 AND (d = a OR e = 'sixteen') + ORDER BY t1.rowid + } +} {2 2 2 4 0 0} +do_test where8-3.8 { + execsql_status { + SELECT a, d + FROM t1, t2 + WHERE (a = 2 OR b = 'three') AND (d = a OR e = 'sixteen') + ORDER BY t1.rowid + } +} {2 2 2 4 3 3 3 4 0 1} + +do_test where8-3.9 { + # The "OR c = 'IX'" term forces a linear scan. + execsql_status { + SELECT a, d + FROM t1, t2 + WHERE (a = 2 OR b = 'three' OR c = 'IX') AND (d = a OR e = 'sixteen') + ORDER BY t1.rowid + } +} {2 2 2 4 3 3 3 4 9 9 9 4 9 0} + +do_test where8-3.10 { + execsql_status { + SELECT d FROM t2 WHERE e IS NULL OR e = 'four' + } +} {1 3 5 10 2 0 0} + +do_test where8-3.11 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a=d OR b=e) AND a<5 ORDER BY a + } +} {1 1 2 2 3 3 4 2 4 4 0 0} +do_test where8-3.12 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a=d OR b=e) AND +a<5 ORDER BY a + } +} {1 1 2 2 3 3 4 2 4 4 0 0} +do_test where8-3.13 { + execsql_status { + SELECT a, d FROM t1, t2 WHERE (a=d OR b=e) AND +a<5 + } +} {1 1 2 2 3 3 4 2 4 4 9 0} + +do_test where8-3.14 { + execsql_status { + SELECT c FROM t1 WHERE a > (SELECT d FROM t2 WHERE e = b) OR a = 5 + } +} {IV V 9 0} + +do_test where8-3.15 { + execsql_status { + SELECT c FROM t1, t2 WHERE a BETWEEN 1 AND 2 OR a = ( + SELECT sum(e IS NULL) FROM t2 AS inner WHERE t2.d>inner.d + ) + } +} {I I I I I I I I I I II II II II II II II II II II III III III III III 99 0} + +#----------------------------------------------------------------------- +# The following tests - where8-4.* - verify that adding or removing +# indexes does not change the results returned by various queries. +# +do_test where8-4.1 { + execsql { + BEGIN; + CREATE TABLE t3(a INTEGER, b REAL, c TEXT); + CREATE TABLE t4(f INTEGER, g REAL, h TEXT); + INSERT INTO t3 VALUES('hills', NULL, 1415926535); + INSERT INTO t3 VALUES('and', 'of', NULL); + INSERT INTO t3 VALUES('have', 'towering', 53594.08128); + INSERT INTO t3 VALUES(NULL, 45.64856692, 'Not'); + INSERT INTO t3 VALUES('same', 5028841971, NULL); + INSERT INTO t3 VALUES('onlookers', 'in', 8214808651); + INSERT INTO t3 VALUES(346.0348610, 2643383279, NULL); + INSERT INTO t3 VALUES(1415926535, 'of', 'are'); + INSERT INTO t3 VALUES(NULL, 0.4811174502, 'snapshots'); + INSERT INTO t3 VALUES('over', 'the', 8628034825); + INSERT INTO t3 VALUES(8628034825, 66.59334461, 2847564.823); + INSERT INTO t3 VALUES('onlookers', 'same', 'and'); + INSERT INTO t3 VALUES(NULL, 'light', 6939937510); + INSERT INTO t3 VALUES('from', 'their', 'viewed'); + INSERT INTO t3 VALUES('from', 'Alpine', 'snapshots'); + INSERT INTO t3 VALUES('from', 'sometimes', 'unalike'); + INSERT INTO t3 VALUES(1339.360726, 'light', 'have'); + INSERT INTO t3 VALUES(6939937510, 3282306647, 'other'); + INSERT INTO t3 VALUES('paintings', 8628034825, 'all'); + INSERT INTO t3 VALUES('paintings', NULL, 'same'); + INSERT INTO t3 VALUES('Alpine', 378678316.5, 'unalike'); + INSERT INTO t3 VALUES('Alpine', NULL, 'same'); + INSERT INTO t3 VALUES(1339.360726, 2847564.823, 'over'); + INSERT INTO t3 VALUES('villages', 'their', 'have'); + INSERT INTO t3 VALUES('unalike', 'remarkably', 'in'); + INSERT INTO t3 VALUES('and', 8979323846, 'and'); + INSERT INTO t3 VALUES(NULL, 1415926535, 'an'); + INSERT INTO t3 VALUES(271.2019091, 8628034825, 0.4811174502); + INSERT INTO t3 VALUES('all', 3421170679, 'the'); + INSERT INTO t3 VALUES('Not', 'and', 1415926535); + INSERT INTO t3 VALUES('of', 'other', 'light'); + INSERT INTO t3 VALUES(NULL, 'towering', 'Not'); + INSERT INTO t3 VALUES(346.0348610, NULL, 'other'); + INSERT INTO t3 VALUES('Not', 378678316.5, NULL); + INSERT INTO t3 VALUES('snapshots', 8628034825, 'of'); + INSERT INTO t3 VALUES(3282306647, 271.2019091, 'and'); + INSERT INTO t3 VALUES(50.58223172, 378678316.5, 5028841971); + INSERT INTO t3 VALUES(50.58223172, 2643383279, 'snapshots'); + INSERT INTO t3 VALUES('writings', 8979323846, 8979323846); + INSERT INTO t3 VALUES('onlookers', 'his', 'in'); + INSERT INTO t3 VALUES('unalike', 8628034825, 1339.360726); + INSERT INTO t3 VALUES('of', 'Alpine', 'and'); + INSERT INTO t3 VALUES('onlookers', NULL, 'from'); + INSERT INTO t3 VALUES('writings', 'it', 1339.360726); + INSERT INTO t3 VALUES('it', 'and', 'villages'); + INSERT INTO t3 VALUES('an', 'the', 'villages'); + INSERT INTO t3 VALUES(8214808651, 8214808651, 'same'); + INSERT INTO t3 VALUES(346.0348610, 'light', 1415926535); + INSERT INTO t3 VALUES(NULL, 8979323846, 'and'); + INSERT INTO t3 VALUES(NULL, 'same', 1339.360726); + INSERT INTO t4 VALUES('his', 'from', 'an'); + INSERT INTO t4 VALUES('snapshots', 'or', NULL); + INSERT INTO t4 VALUES('Alpine', 'have', 'it'); + INSERT INTO t4 VALUES('have', 'peak', 'remarkably'); + INSERT INTO t4 VALUES('hills', NULL, 'Not'); + INSERT INTO t4 VALUES('same', 'from', 2643383279); + INSERT INTO t4 VALUES('have', 'angle', 8628034825); + INSERT INTO t4 VALUES('sometimes', 'it', 2847564.823); + INSERT INTO t4 VALUES(0938446095, 'peak', 'of'); + INSERT INTO t4 VALUES(8628034825, 'and', 'same'); + INSERT INTO t4 VALUES('and', 271.2019091, 'their'); + INSERT INTO t4 VALUES('the', 'of', 'remarkably'); + INSERT INTO t4 VALUES('and', 3421170679, 1415926535); + INSERT INTO t4 VALUES('and', 'in', 'all'); + INSERT INTO t4 VALUES(378678316.5, 0.4811174502, 'snapshots'); + INSERT INTO t4 VALUES('it', 'are', 'have'); + INSERT INTO t4 VALUES('angle', 'snapshots', 378678316.5); + INSERT INTO t4 VALUES('from', 1415926535, 8628034825); + INSERT INTO t4 VALUES('snapshots', 'angle', 'have'); + INSERT INTO t4 VALUES(3421170679, 0938446095, 'Not'); + INSERT INTO t4 VALUES('peak', NULL, 0.4811174502); + INSERT INTO t4 VALUES('same', 'have', 'Alpine'); + INSERT INTO t4 VALUES(271.2019091, 66.59334461, 0938446095); + INSERT INTO t4 VALUES(8979323846, 'his', 'an'); + INSERT INTO t4 VALUES(NULL, 'and', 3282306647); + INSERT INTO t4 VALUES('remarkably', NULL, 'Not'); + INSERT INTO t4 VALUES('villages', 4543.266482, 'his'); + INSERT INTO t4 VALUES(2643383279, 'paintings', 'onlookers'); + INSERT INTO t4 VALUES(1339.360726, 'of', 'the'); + INSERT INTO t4 VALUES('peak', 'other', 'peak'); + INSERT INTO t4 VALUES('it', 'or', 8979323846); + INSERT INTO t4 VALUES('onlookers', 'Not', 'towering'); + INSERT INTO t4 VALUES(NULL, 'peak', 'Not'); + INSERT INTO t4 VALUES('of', 'have', 6939937510); + INSERT INTO t4 VALUES('light', 'hills', 0.4811174502); + INSERT INTO t4 VALUES(5028841971, 'Not', 'it'); + INSERT INTO t4 VALUES('and', 'Not', NULL); + INSERT INTO t4 VALUES(346.0348610, 'villages', NULL); + INSERT INTO t4 VALUES(8979323846, NULL, 6939937510); + INSERT INTO t4 VALUES('an', 'light', 'peak'); + INSERT INTO t4 VALUES(5028841971, 6939937510, 'light'); + INSERT INTO t4 VALUES('sometimes', 'peak', 'peak'); + INSERT INTO t4 VALUES(378678316.5, 5028841971, 'an'); + INSERT INTO t4 VALUES(378678316.5, 'his', 'Alpine'); + INSERT INTO t4 VALUES('from', 'of', 'all'); + INSERT INTO t4 VALUES(0938446095, 'same', NULL); + INSERT INTO t4 VALUES(0938446095, 'Alpine', NULL); + INSERT INTO t4 VALUES('his', 'of', 378678316.5); + INSERT INTO t4 VALUES(271.2019091, 'viewed', 3282306647); + INSERT INTO t4 VALUES('hills', 'all', 'peak'); + COMMIT; + } +} {} + +catch {unset results} +catch {unset A} +catch {unset B} + +set A 2 +foreach idxsql { + { + /* No indexes */ + } { + CREATE INDEX i5 ON t3(a); + } { + CREATE INDEX i5 ON t3(a, b); + CREATE INDEX i6 ON t4(f); + } { + CREATE UNIQUE INDEX i5 ON t3(a, b); + CREATE INDEX i7 ON t3(c); + CREATE INDEX i6 ON t4(f); + CREATE INDEX i8 ON t4(h); + } { + CREATE INDEX i5 ON t3(a, b, c); + CREATE INDEX i6 ON t4(f, g, h); + CREATE INDEX i7 ON t3(c, b, a); + CREATE INDEX i8 ON t4(h, g, f); + } +} { + + execsql { + DROP INDEX IF EXISTS i5; + DROP INDEX IF EXISTS i6; + DROP INDEX IF EXISTS i7; + DROP INDEX IF EXISTS i8; + } + execsql $idxsql + + foreach {B sql} { + 1 { SELECT * FROM t3 WHERE c LIKE b } + 2 { SELECT * FROM t3 WHERE c||'' LIKE 'the%' } + 3 { SELECT * FROM t3 WHERE rowid LIKE '12%' } + 4 { SELECT * FROM t3 WHERE +c LIKE 'the%' } + 5 { SELECT * FROM t3 WHERE c LIKE 'the%' } + 6 { SELECT * FROM t3 WHERE c GLOB '*llo' } + + 7 { SELECT * FROM t3 WHERE a = 'angle' } + 8 { SELECT * FROM t3 WHERE a = 'it' OR b = 6939937510 } + 9 { SELECT * FROM t3, t4 WHERE a = 'painting' OR a = 'are' OR a = f } +10 { SELECT * FROM t3, t4 WHERE a = 'all' OR a = 'and' OR a = h } +11 { SELECT * FROM t3, t4 WHERE a < 'of' OR b > 346 AND c IS NULL } +12 { SELECT * FROM t3, t4 WHERE 'the' > a OR b > 'have' AND c = 1415926535 } + +13 { SELECT * FROM t3 WHERE a BETWEEN 'one' AND 'two' OR a = 3421170679 } +14 { SELECT * FROM t3 WHERE a BETWEEN 'one' AND 'two' OR a IS NULL } +15 { SELECT * FROM t3 WHERE c > 'one' OR c >= 'one' OR c LIKE 'one%' } +16 { SELECT * FROM t3 WHERE c > 'one' OR c = c OR c = a } +17 { SELECT * FROM t3 WHERE c IS NULL OR a >= 'peak' } +18 { SELECT * FROM t3 WHERE c IN ('other', 'all', 'snapshots') OR a>1 } +19 { SELECT * FROM t3 WHERE c IN ('other', 'all', 'snapshots') AND a>1 } +20 { SELECT * FROM t3 WHERE c IS NULL AND a>'one' } +21 { SELECT * FROM t3 WHERE c IS NULL OR a>'one' } +22 { SELECT * FROM t3 WHERE b = b AND a > 'are' } +23 { SELECT * FROM t3 WHERE c <= b OR b < 'snapshots' } +24 { SELECT * FROM t3 WHERE 'onlookers' >= c AND a <= b OR b = 'angle' } +25 { SELECT * FROM t3 WHERE b = 'from' } +26 { SELECT * FROM t3 WHERE b = 4543.266482 } +27 { SELECT * FROM t3 WHERE c < 3282306647 } +28 { SELECT * FROM t3 WHERE c IS NULL AND b >= c } +29 { SELECT * FROM t3 WHERE b > 0.4811174502 AND c = 'other' AND 'viewed' > a } +30 { SELECT * FROM t3 WHERE c = 'peak' } +31 { SELECT * FROM t3 WHERE c < 53594.08128 OR c <= b } +32 { SELECT * FROM t3 WHERE 'writings' <= b } +33 { SELECT * FROM t3 WHERE 2643383279 = b OR c < b AND b <= 3282306647 } +34 { SELECT * FROM t3 WHERE a IS NULL } +35 { SELECT * FROM t3 WHERE 'writings' = a OR b = 378678316.5 } +36 { SELECT * FROM t3 WHERE 'and' >= c } +37 { SELECT * FROM t3 WHERE c < 'from' } +38 { SELECT * FROM t3 WHERE 'his' < c OR b < b } +39 { SELECT * FROM t3 WHERE 53594.08128 = b AND c >= b } +40 { SELECT * FROM t3 WHERE 'unalike' < c AND 'are' >= c AND a <= b } +41 { SELECT * FROM t3 WHERE b >= 4543.266482 OR 'Alpine' > a OR 271.2019091 <= a } +42 { SELECT * FROM t3 WHERE b = c } +43 { SELECT * FROM t3 WHERE c > a AND b < 'all' } +44 { SELECT * FROM t3 WHERE c BETWEEN 'hills' AND 'snapshots' AND c <= 'the' OR c = a } +45 { SELECT * FROM t3 WHERE b > c AND c >= 'hills' } +46 { SELECT * FROM t3 WHERE b > 'or' OR a <= 'hills' OR c IS NULL } +47 { SELECT * FROM t3 WHERE c > b OR b BETWEEN 1339.360726 AND 'onlookers' OR 1415926535 >= b } +48 { SELECT * FROM t3 WHERE a IS NULL } +49 { SELECT * FROM t3 WHERE a > 'other' } +50 { SELECT * FROM t3 WHERE 'the' <= c AND a <= c } +51 { SELECT * FROM t3 WHERE 346.0348610 = a AND c = b } +52 { SELECT * FROM t3 WHERE c BETWEEN 50.58223172 AND 'same' AND a < b } +53 { SELECT * FROM t3 WHERE 'Alpine' <= b AND c >= 'angle' OR b <= 271.2019091 } +54 { SELECT * FROM t3 WHERE a < a AND 1415926535 > b } +55 { SELECT * FROM t3 WHERE c > a AND 'have' >= c } +56 { SELECT * FROM t3 WHERE b <= b AND c > b } +57 { SELECT * FROM t3 WHERE a IS NULL AND c <= c } +58 { SELECT * FROM t3 WHERE b < c OR b = c } +59 { SELECT * FROM t3 WHERE c < b AND b >= 'it' } +60 { SELECT * FROM t3 WHERE a = b AND a <= b OR b >= a } +61 { SELECT * FROM t3 WHERE b = c } +62 { SELECT * FROM t3 WHERE c BETWEEN 'the' AND 271.2019091 OR c <= 3282306647 AND c >= b } +63 { SELECT * FROM t3 WHERE c >= c AND c < 'writings' } +64 { SELECT * FROM t3 WHERE c <= 3282306647 AND b > a OR 'unalike' <= a } +65 { SELECT * FROM t3 WHERE a > c } +66 { SELECT * FROM t3 WHERE c = 'it' OR b >= b } +67 { SELECT * FROM t3 WHERE c = a OR b < c } +68 { SELECT * FROM t3 WHERE b > a } +69 { SELECT * FROM t3 WHERE a < b OR a > 4543.266482 OR 'same' = b } +70 { SELECT * FROM t3 WHERE c < c OR b <= c OR a <= b } +71 { SELECT * FROM t3 WHERE c > a } +72 { SELECT * FROM t3 WHERE c > b } +73 { SELECT * FROM t3 WHERE b <= a } +74 { SELECT * FROM t3 WHERE 3282306647 < b AND a >= 'or' OR a >= 378678316.5 } +75 { SELECT * FROM t3 WHERE 50.58223172 <= c OR c = c AND b < b } +76 { SELECT * FROM t3 WHERE 'and' < b OR b < c OR c > 1339.360726 } +77 { SELECT * FROM t3 WHERE b <= c } +78 { SELECT * FROM t3 WHERE 'in' <= c } +79 { SELECT * FROM t3 WHERE c <= b AND a > a AND c < b } +80 { SELECT * FROM t3 WHERE 'over' < b } +81 { SELECT * FROM t3 WHERE b >= b OR b < c OR a < b } +82 { SELECT * FROM t3 WHERE 'towering' <= b OR 'towering' = a AND c > b } +83 { SELECT * FROM t3 WHERE 'peak' = a OR b BETWEEN 2643383279 AND 'the' } +84 { SELECT * FROM t3 WHERE 'an' < c AND c > 'the' AND c IS NULL } +85 { SELECT * FROM t3 WHERE a <= 'sometimes' AND a BETWEEN 'unalike' AND 1339.360726 } +86 { SELECT * FROM t3 WHERE 1339.360726 < c AND c IS NULL } +87 { SELECT * FROM t3 WHERE b > 'the' } +88 { SELECT * FROM t3 WHERE 'and' = a } +89 { SELECT * FROM t3 WHERE b >= b } +90 { SELECT * FROM t3 WHERE b >= 8979323846 } +91 { SELECT * FROM t3 WHERE c <= a } +92 { SELECT * FROM t3 WHERE a BETWEEN 'have' AND 'light' OR a > b OR a >= 378678316.5 } +93 { SELECT * FROM t3 WHERE c > 3282306647 } +94 { SELECT * FROM t3 WHERE b > c } +95 { SELECT * FROM t3 WHERE b >= a AND 'villages' > a AND b >= c } +96 { SELECT * FROM t3 WHERE 'angle' > a } +97 { SELECT * FROM t3 WHERE 'paintings' >= a } +98 { SELECT * FROM t3 WHERE 'or' >= c } +99 { SELECT * FROM t3 WHERE c < b } + + +101 { SELECT * FROM t3, t4 WHERE f < 'sometimes' OR 'over' <= g AND h < 1415926535 } +102 { SELECT * FROM t3, t4 WHERE h >= 'from' AND h < 6939937510 OR g > h } +103 { SELECT * FROM t3, t4 WHERE c <= h AND g = h AND c >= 'all' } +104 { SELECT * FROM t3, t4 WHERE c = a } +105 { SELECT * FROM t3, t4 WHERE 'of' >= h } +106 { SELECT * FROM t3, t4 WHERE f >= b AND a < g AND h < 'and' } +107 { SELECT * FROM t3, t4 WHERE f <= 8628034825 AND 0938446095 >= b } +108 { SELECT * FROM t3, t4 WHERE a < 'the' } +109 { SELECT * FROM t3, t4 WHERE f = 'sometimes' OR b < 'of' } +110 { SELECT * FROM t3, t4 WHERE c IS NULL } +111 { SELECT * FROM t3, t4 WHERE 'have' = b OR g <= 346.0348610 } +112 { SELECT * FROM t3, t4 WHERE f > b AND b <= h } +113 { SELECT * FROM t3, t4 WHERE f > c OR 'the' = a OR 50.58223172 = a } +114 { SELECT * FROM t3, t4 WHERE 2643383279 <= a AND c = a } +115 { SELECT * FROM t3, t4 WHERE h >= b AND 'it' <= b } +116 { SELECT * FROM t3, t4 WHERE g BETWEEN 'from' AND 'peak' } +117 { SELECT * FROM t3, t4 WHERE 'their' > a AND g > b AND f <= c } +118 { SELECT * FROM t3, t4 WHERE h = 5028841971 AND 'unalike' <= f } +119 { SELECT * FROM t3, t4 WHERE c IS NULL AND a = 3282306647 OR a <= 'Alpine' } +120 { SELECT * FROM t3, t4 WHERE 'sometimes' <= f OR 8214808651 >= a AND b <= 53594.08128 } +121 { SELECT * FROM t3, t4 WHERE 6939937510 <= f OR c < f OR 'sometimes' = c } +122 { SELECT * FROM t3, t4 WHERE b < 'onlookers' AND 'paintings' = g AND c <= h } +123 { SELECT * FROM t3, t4 WHERE a BETWEEN 'all' AND 'from' OR c > 346.0348610 } +124 { SELECT * FROM t3, t4 WHERE 'from' <= b OR a BETWEEN 53594.08128 AND 'their' AND c > a } +125 { SELECT * FROM t3, t4 WHERE h = 2643383279 } +126 { SELECT * FROM t3, t4 WHERE a <= 'the' } +127 { SELECT * FROM t3, t4 WHERE h <= c } +128 { SELECT * FROM t3, t4 WHERE g <= 346.0348610 AND 66.59334461 >= f AND f <= f } +129 { SELECT * FROM t3, t4 WHERE g >= c OR 'in' < b OR b > g } +130 { SELECT * FROM t3, t4 WHERE 'over' > g AND b BETWEEN 'unalike' AND 'remarkably' } +131 { SELECT * FROM t3, t4 WHERE h <= 2847564.823 } +132 { SELECT * FROM t3, t4 WHERE h <= 'remarkably' AND 4543.266482 > h } +133 { SELECT * FROM t3, t4 WHERE a >= c AND 'it' > g AND c < c } +134 { SELECT * FROM t3, t4 WHERE h <= 66.59334461 AND b > 3421170679 } +135 { SELECT * FROM t3, t4 WHERE h < 'are' OR f BETWEEN 0938446095 AND 'are' OR b = b } +136 { SELECT * FROM t3, t4 WHERE h = a OR 66.59334461 <= f } +137 { SELECT * FROM t3, t4 WHERE f > 'of' OR h <= h OR a = f } +138 { SELECT * FROM t3, t4 WHERE 'other' >= g } +139 { SELECT * FROM t3, t4 WHERE b <= 3421170679 } +140 { SELECT * FROM t3, t4 WHERE 'all' = f AND 4543.266482 = b OR f BETWEEN 'and' AND 'angle' } +141 { SELECT * FROM t3, t4 WHERE 'light' = f OR h BETWEEN 'remarkably' AND 1415926535 } +142 { SELECT * FROM t3, t4 WHERE 'hills' = f OR 'the' >= f } +143 { SELECT * FROM t3, t4 WHERE a > 346.0348610 } +144 { SELECT * FROM t3, t4 WHERE 5028841971 = h } +145 { SELECT * FROM t3, t4 WHERE b >= c AND 'the' >= g OR 45.64856692 <= g } +146 { SELECT * FROM t3, t4 WHERE c < 5028841971 } +147 { SELECT * FROM t3, t4 WHERE a > a } +148 { SELECT * FROM t3, t4 WHERE c = 'snapshots' } +149 { SELECT * FROM t3, t4 WHERE h > 1339.360726 AND 'and' > c } +150 { SELECT * FROM t3, t4 WHERE 'and' > g OR 'sometimes' = c } +151 { SELECT * FROM t3, t4 WHERE g >= 'the' AND b >= 'onlookers' } +152 { SELECT * FROM t3, t4 WHERE h BETWEEN 'other' AND 2643383279 } +153 { SELECT * FROM t3, t4 WHERE 'it' = b } +154 { SELECT * FROM t3, t4 WHERE f = c OR c BETWEEN 'and' AND 0.4811174502 } +155 { SELECT * FROM t3, t4 WHERE b <= 'sometimes' OR c <= 0938446095 } +156 { SELECT * FROM t3, t4 WHERE 'and' <= b } +157 { SELECT * FROM t3, t4 WHERE g > a AND f = 'the' AND b < a } +158 { SELECT * FROM t3, t4 WHERE a < 'an' } +159 { SELECT * FROM t3, t4 WHERE a BETWEEN 'his' AND 'same' OR 8628034825 > f } +160 { SELECT * FROM t3, t4 WHERE b = 'peak' } +161 { SELECT * FROM t3, t4 WHERE f IS NULL AND a >= h } +162 { SELECT * FROM t3, t4 WHERE a IS NULL OR 2643383279 = c } +163 { SELECT * FROM t3, t4 WHERE b >= 5028841971 AND f < c AND a IS NULL } +164 { SELECT * FROM t3, t4 WHERE a >= g } +165 { SELECT * FROM t3, t4 WHERE c IS NULL } +166 { SELECT * FROM t3, t4 WHERE h >= h } +167 { SELECT * FROM t3, t4 WHERE 'over' <= h } +168 { SELECT * FROM t3, t4 WHERE b < 4543.266482 OR b = 2643383279 OR 8628034825 < b } +169 { SELECT * FROM t3, t4 WHERE g >= 6939937510 } +170 { SELECT * FROM t3, t4 WHERE 'or' < a OR b < g } +171 { SELECT * FROM t3, t4 WHERE h < 'hills' OR 'and' > g } +172 { SELECT * FROM t3, t4 WHERE 'from' > f OR f <= f } +173 { SELECT * FROM t3, t4 WHERE 'viewed' > b AND f < c } +174 { SELECT * FROM t3, t4 WHERE 'of' <= a } +175 { SELECT * FROM t3, t4 WHERE f > 0938446095 } +176 { SELECT * FROM t3, t4 WHERE a = g } +177 { SELECT * FROM t3, t4 WHERE g >= b AND f BETWEEN 'peak' AND 'and' } +178 { SELECT * FROM t3, t4 WHERE g = a AND 'it' > f } +179 { SELECT * FROM t3, t4 WHERE a <= b OR 'from' > f } +180 { SELECT * FROM t3, t4 WHERE f < 'and' } +181 { SELECT * FROM t3, t4 WHERE 6939937510 < b OR 'sometimes' < h } +182 { SELECT * FROM t3, t4 WHERE f > g AND f < 'peak' } +183 { SELECT * FROM t3, t4 WHERE a <= 53594.08128 AND c <= f AND f >= c } +184 { SELECT * FROM t3, t4 WHERE f = c OR 'it' > b OR g BETWEEN 'the' AND 'all' } +185 { SELECT * FROM t3, t4 WHERE c <= g OR a = h } +186 { SELECT * FROM t3, t4 WHERE 'same' = b OR c >= 2643383279 } +187 { SELECT * FROM t3, t4 WHERE h <= g OR c > 66.59334461 OR a <= f } +188 { SELECT * FROM t3, t4 WHERE b < c AND f = 'writings' } +189 { SELECT * FROM t3, t4 WHERE b < a } +190 { SELECT * FROM t3, t4 WHERE c >= f OR c = 'and' } +191 { SELECT * FROM t3, t4 WHERE f >= 'peak' AND g > f AND h > g } +192 { SELECT * FROM t3, t4 WHERE a >= 8979323846 AND 'same' > b OR c = 'and' } +193 { SELECT * FROM t3, t4 WHERE c >= g OR 'writings' >= c AND b = 'all' } +194 { SELECT * FROM t3, t4 WHERE 'remarkably' < g } +195 { SELECT * FROM t3, t4 WHERE a BETWEEN 'or' AND 'paintings' AND g <= f } +196 { SELECT * FROM t3, t4 WHERE 0938446095 > b OR g <= a OR h > b } +197 { SELECT * FROM t3, t4 WHERE g = 2643383279 AND f = g } +198 { SELECT * FROM t3, t4 WHERE g < 8979323846 } +199 { SELECT * FROM t3, t4 WHERE 'are' <= b } + + } { + do_test where8-4.$A.$B.1 { + set R [execsql $sql] + if {![info exists results($B)]} { + set results($B) $R + } + list + } {} + + do_test where8-4.$A.$B.2 { lsort $R } [lsort $results($B)] + } + incr A +} + +catch {unset results} +catch {unset A} +catch {unset B} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where9.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where9.test --- sqlite3-3.4.2/test/where9.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/where9.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,810 @@ +# 2008 December 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the multi-index OR clause optimizer. +# +# $Id: where9.test,v 1.9 2009/06/05 17:09:12 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable !or_opt { + finish_test + return +} + +# Evaluate SQL. Return the result set followed by the +# and the number of full-scan steps. +# +proc count_steps {sql} { + set r [db eval $sql] + lappend r scan [db status step] sort [db status sort] +} + + +# Construct test data. +# +do_test where9-1.1 { + db eval { + CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c,d,e,f,g); + INSERT INTO t1 VALUES(1,11,1001,1.001,100.1,'bcdefghij','yxwvuts'); + INSERT INTO t1 VALUES(2,22,1001,2.002,100.1,'cdefghijk','yxwvuts'); + INSERT INTO t1 VALUES(3,33,1001,3.003,100.1,'defghijkl','xwvutsr'); + INSERT INTO t1 VALUES(4,44,2002,4.004,200.2,'efghijklm','xwvutsr'); + INSERT INTO t1 VALUES(5,55,2002,5.005,200.2,'fghijklmn','xwvutsr'); + INSERT INTO t1 VALUES(6,66,2002,6.006,200.2,'ghijklmno','xwvutsr'); + INSERT INTO t1 VALUES(7,77,3003,7.007,300.3,'hijklmnop','xwvutsr'); + INSERT INTO t1 VALUES(8,88,3003,8.008,300.3,'ijklmnopq','wvutsrq'); + INSERT INTO t1 VALUES(9,99,3003,9.009,300.3,'jklmnopqr','wvutsrq'); + INSERT INTO t1 VALUES(10,110,4004,10.01,400.4,'klmnopqrs','wvutsrq'); + INSERT INTO t1 VALUES(11,121,4004,11.011,400.4,'lmnopqrst','wvutsrq'); + INSERT INTO t1 VALUES(12,132,4004,12.012,400.4,'mnopqrstu','wvutsrq'); + INSERT INTO t1 VALUES(13,143,5005,13.013,500.5,'nopqrstuv','vutsrqp'); + INSERT INTO t1 VALUES(14,154,5005,14.014,500.5,'opqrstuvw','vutsrqp'); + INSERT INTO t1 VALUES(15,165,5005,15.015,500.5,'pqrstuvwx','vutsrqp'); + INSERT INTO t1 VALUES(16,176,6006,16.016,600.6,'qrstuvwxy','vutsrqp'); + INSERT INTO t1 VALUES(17,187,6006,17.017,600.6,'rstuvwxyz','vutsrqp'); + INSERT INTO t1 VALUES(18,198,6006,18.018,600.6,'stuvwxyza','utsrqpo'); + INSERT INTO t1 VALUES(19,209,7007,19.019,700.7,'tuvwxyzab','utsrqpo'); + INSERT INTO t1 VALUES(20,220,7007,20.02,700.7,'uvwxyzabc','utsrqpo'); + INSERT INTO t1 VALUES(21,231,7007,21.021,700.7,'vwxyzabcd','utsrqpo'); + INSERT INTO t1 VALUES(22,242,8008,22.022,800.8,'wxyzabcde','utsrqpo'); + INSERT INTO t1 VALUES(23,253,8008,23.023,800.8,'xyzabcdef','tsrqpon'); + INSERT INTO t1 VALUES(24,264,8008,24.024,800.8,'yzabcdefg','tsrqpon'); + INSERT INTO t1 VALUES(25,275,9009,25.025,900.9,'zabcdefgh','tsrqpon'); + INSERT INTO t1 VALUES(26,286,9009,26.026,900.9,'abcdefghi','tsrqpon'); + INSERT INTO t1 VALUES(27,297,9009,27.027,900.9,'bcdefghij','tsrqpon'); + INSERT INTO t1 VALUES(28,308,10010,28.028,1001.0,'cdefghijk','srqponm'); + INSERT INTO t1 VALUES(29,319,10010,29.029,1001.0,'defghijkl','srqponm'); + INSERT INTO t1 VALUES(30,330,10010,30.03,1001.0,'efghijklm','srqponm'); + INSERT INTO t1 VALUES(31,341,11011,31.031,1101.1,'fghijklmn','srqponm'); + INSERT INTO t1 VALUES(32,352,11011,32.032,1101.1,'ghijklmno','srqponm'); + INSERT INTO t1 VALUES(33,363,11011,33.033,1101.1,'hijklmnop','rqponml'); + INSERT INTO t1 VALUES(34,374,12012,34.034,1201.2,'ijklmnopq','rqponml'); + INSERT INTO t1 VALUES(35,385,12012,35.035,1201.2,'jklmnopqr','rqponml'); + INSERT INTO t1 VALUES(36,396,12012,36.036,1201.2,'klmnopqrs','rqponml'); + INSERT INTO t1 VALUES(37,407,13013,37.037,1301.3,'lmnopqrst','rqponml'); + INSERT INTO t1 VALUES(38,418,13013,38.038,1301.3,'mnopqrstu','qponmlk'); + INSERT INTO t1 VALUES(39,429,13013,39.039,1301.3,'nopqrstuv','qponmlk'); + INSERT INTO t1 VALUES(40,440,14014,40.04,1401.4,'opqrstuvw','qponmlk'); + INSERT INTO t1 VALUES(41,451,14014,41.041,1401.4,'pqrstuvwx','qponmlk'); + INSERT INTO t1 VALUES(42,462,14014,42.042,1401.4,'qrstuvwxy','qponmlk'); + INSERT INTO t1 VALUES(43,473,15015,43.043,1501.5,'rstuvwxyz','ponmlkj'); + INSERT INTO t1 VALUES(44,484,15015,44.044,1501.5,'stuvwxyza','ponmlkj'); + INSERT INTO t1 VALUES(45,495,15015,45.045,1501.5,'tuvwxyzab','ponmlkj'); + INSERT INTO t1 VALUES(46,506,16016,46.046,1601.6,'uvwxyzabc','ponmlkj'); + INSERT INTO t1 VALUES(47,517,16016,47.047,1601.6,'vwxyzabcd','ponmlkj'); + INSERT INTO t1 VALUES(48,528,16016,48.048,1601.6,'wxyzabcde','onmlkji'); + INSERT INTO t1 VALUES(49,539,17017,49.049,1701.7,'xyzabcdef','onmlkji'); + INSERT INTO t1 VALUES(50,550,17017,50.05,1701.7,'yzabcdefg','onmlkji'); + INSERT INTO t1 VALUES(51,561,17017,51.051,1701.7,'zabcdefgh','onmlkji'); + INSERT INTO t1 VALUES(52,572,18018,52.052,1801.8,'abcdefghi','onmlkji'); + INSERT INTO t1 VALUES(53,583,18018,53.053,1801.8,'bcdefghij','nmlkjih'); + INSERT INTO t1 VALUES(54,594,18018,54.054,1801.8,'cdefghijk','nmlkjih'); + INSERT INTO t1 VALUES(55,605,19019,55.055,1901.9,'defghijkl','nmlkjih'); + INSERT INTO t1 VALUES(56,616,19019,56.056,1901.9,'efghijklm','nmlkjih'); + INSERT INTO t1 VALUES(57,627,19019,57.057,1901.9,'fghijklmn','nmlkjih'); + INSERT INTO t1 VALUES(58,638,20020,58.058,2002.0,'ghijklmno','mlkjihg'); + INSERT INTO t1 VALUES(59,649,20020,59.059,2002.0,'hijklmnop','mlkjihg'); + INSERT INTO t1 VALUES(60,660,20020,60.06,2002.0,'ijklmnopq','mlkjihg'); + INSERT INTO t1 VALUES(61,671,21021,61.061,2102.1,'jklmnopqr','mlkjihg'); + INSERT INTO t1 VALUES(62,682,21021,62.062,2102.1,'klmnopqrs','mlkjihg'); + INSERT INTO t1 VALUES(63,693,21021,63.063,2102.1,'lmnopqrst','lkjihgf'); + INSERT INTO t1 VALUES(64,704,22022,64.064,2202.2,'mnopqrstu','lkjihgf'); + INSERT INTO t1 VALUES(65,715,22022,65.065,2202.2,'nopqrstuv','lkjihgf'); + INSERT INTO t1 VALUES(66,726,22022,66.066,2202.2,'opqrstuvw','lkjihgf'); + INSERT INTO t1 VALUES(67,737,23023,67.067,2302.3,'pqrstuvwx','lkjihgf'); + INSERT INTO t1 VALUES(68,748,23023,68.068,2302.3,'qrstuvwxy','kjihgfe'); + INSERT INTO t1 VALUES(69,759,23023,69.069,2302.3,'rstuvwxyz','kjihgfe'); + INSERT INTO t1 VALUES(70,770,24024,70.07,2402.4,'stuvwxyza','kjihgfe'); + INSERT INTO t1 VALUES(71,781,24024,71.071,2402.4,'tuvwxyzab','kjihgfe'); + INSERT INTO t1 VALUES(72,792,24024,72.072,2402.4,'uvwxyzabc','kjihgfe'); + INSERT INTO t1 VALUES(73,803,25025,73.073,2502.5,'vwxyzabcd','jihgfed'); + INSERT INTO t1 VALUES(74,814,25025,74.074,2502.5,'wxyzabcde','jihgfed'); + INSERT INTO t1 VALUES(75,825,25025,75.075,2502.5,'xyzabcdef','jihgfed'); + INSERT INTO t1 VALUES(76,836,26026,76.076,2602.6,'yzabcdefg','jihgfed'); + INSERT INTO t1 VALUES(77,847,26026,77.077,2602.6,'zabcdefgh','jihgfed'); + INSERT INTO t1 VALUES(78,858,26026,78.078,2602.6,'abcdefghi','ihgfedc'); + INSERT INTO t1 VALUES(79,869,27027,79.079,2702.7,'bcdefghij','ihgfedc'); + INSERT INTO t1 VALUES(80,880,27027,80.08,2702.7,'cdefghijk','ihgfedc'); + INSERT INTO t1 VALUES(81,891,27027,81.081,2702.7,'defghijkl','ihgfedc'); + INSERT INTO t1 VALUES(82,902,28028,82.082,2802.8,'efghijklm','ihgfedc'); + INSERT INTO t1 VALUES(83,913,28028,83.083,2802.8,'fghijklmn','hgfedcb'); + INSERT INTO t1 VALUES(84,924,28028,84.084,2802.8,'ghijklmno','hgfedcb'); + INSERT INTO t1 VALUES(85,935,29029,85.085,2902.9,'hijklmnop','hgfedcb'); + INSERT INTO t1 VALUES(86,946,29029,86.086,2902.9,'ijklmnopq','hgfedcb'); + INSERT INTO t1 VALUES(87,957,29029,87.087,2902.9,'jklmnopqr','hgfedcb'); + INSERT INTO t1 VALUES(88,968,30030,88.088,3003.0,'klmnopqrs','gfedcba'); + INSERT INTO t1 VALUES(89,979,30030,89.089,3003.0,'lmnopqrst','gfedcba'); + INSERT INTO t1 VALUES(90,NULL,30030,90.09,3003.0,'mnopqrstu','gfedcba'); + INSERT INTO t1 VALUES(91,1001,NULL,91.091,3103.1,'nopqrstuv','gfedcba'); + INSERT INTO t1 VALUES(92,1012,31031,NULL,3103.1,'opqrstuvw','gfedcba'); + INSERT INTO t1 VALUES(93,1023,31031,93.093,NULL,'pqrstuvwx','fedcbaz'); + INSERT INTO t1 VALUES(94,1034,32032,94.094,3203.2,NULL,'fedcbaz'); + INSERT INTO t1 VALUES(95,1045,32032,95.095,3203.2,'rstuvwxyz',NULL); + INSERT INTO t1 VALUES(96,NULL,NULL,96.096,3203.2,'stuvwxyza','fedcbaz'); + INSERT INTO t1 VALUES(97,1067,33033,NULL,NULL,'tuvwxyzab','fedcbaz'); + INSERT INTO t1 VALUES(98,1078,33033,98.098,3303.3,NULL,NULL); + INSERT INTO t1 VALUES(99,NULL,NULL,NULL,NULL,NULL,NULL); + CREATE INDEX t1b ON t1(b); + CREATE INDEX t1c ON t1(c); + CREATE INDEX t1d ON t1(d); + CREATE INDEX t1e ON t1(e); + CREATE INDEX t1f ON t1(f); + CREATE INDEX t1g ON t1(g); + CREATE TABLE t2(a INTEGER PRIMARY KEY,b,c,d,e,f,g); + INSERT INTO t2 SELECT * FROM t1; + CREATE INDEX t2b ON t2(b,c); + CREATE INDEX t2c ON t2(c,e); + CREATE INDEX t2d ON t2(d,g); + CREATE INDEX t2e ON t2(e,f,g); + CREATE INDEX t2f ON t2(f,b,d,c); + CREATE INDEX t2g ON t2(g,f); + CREATE TABLE t3(x,y); + INSERT INTO t3 VALUES(1,80); + INSERT INTO t3 VALUES(2,80); + CREATE TABLE t4(a INTEGER PRIMARY KEY,b,c,d,e,f,g); + INSERT INTO t4 SELECT * FROM t1; + CREATE INDEX t4b ON t4(b); + CREATE INDEX t4c ON t4(c); + } +} {} + +do_test where9-1.2.1 { + count_steps { + SELECT a FROM t1 + WHERE b IS NULL + OR c IS NULL + OR d IS NULL + ORDER BY a + } +} {90 91 92 96 97 99 scan 0 sort 1} +do_test where9-1.2.2 { + count_steps { + SELECT a FROM t1 + WHERE +b IS NULL + OR c IS NULL + OR d IS NULL + ORDER BY a + } +} {90 91 92 96 97 99 scan 98 sort 0} +do_test where9-1.2.3 { + count_steps { + SELECT a FROM t1 + WHERE b IS NULL + OR +c IS NULL + OR d IS NULL + ORDER BY a + } +} {90 91 92 96 97 99 scan 98 sort 0} +do_test where9-1.2.4 { + count_steps { + SELECT a FROM t1 + WHERE b IS NULL + OR c IS NULL + OR +d IS NULL + ORDER BY a + } +} {90 91 92 96 97 99 scan 98 sort 0} +do_test where9-1.2.5 { + count_steps { + SELECT a FROM t4 + WHERE b IS NULL + OR c IS NULL + OR d IS NULL + ORDER BY a + } +} {90 91 92 96 97 99 scan 98 sort 0} + +do_test where9-1.3.1 { + count_steps { + SELECT a FROM t1 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + ORDER BY a + } +} {90 91 92 97 scan 0 sort 1} +do_test where9-1.3.2 { + count_steps { + SELECT a FROM t4 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + ORDER BY a + } +} {90 91 92 97 scan 98 sort 0} +do_test where9-1.3.3 { + count_steps { + SELECT a FROM t4 + WHERE (b NOT NULL AND c NOT NULL AND d IS NULL) + OR (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + ORDER BY a + } +} {90 91 92 97 scan 98 sort 0} +do_test where9-1.3.4 { + count_steps { + SELECT a FROM t4 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + ORDER BY a + } +} {90 91 92 97 scan 98 sort 0} + +do_test where9-1.4 { + count_steps { + SELECT a FROM t1 + WHERE (b>=950 AND b<=1010) OR (b IS NULL AND c NOT NULL) + ORDER BY a + } +} {87 88 89 90 91 scan 0 sort 1} +do_test where9-1.5 { + # When this test was originally written, SQLite used a rowset object + # to optimize the "ORDER BY a" clause. Now that it is using a rowhash, + # this is not possible. So we have to comment out one term of the OR + # expression in order to prevent SQLite from deeming a full-table + # scan to be a better strategy than using multiple indexes, which would + # defeat the point of the test. + count_steps { + SELECT a FROM t1 + WHERE a=83 + OR b=913 + OR c=28028 + OR (d>=82 AND d<83) +/* OR (e>2802 AND e<2803) */ + OR f='fghijklmn' + OR g='hgfedcb' + ORDER BY a + } +} {5 31 57 82 83 84 85 86 87 scan 0 sort 1} +do_test where9-1.6 { + count_steps { + SELECT a FROM t1 + WHERE b=1012 + OR (d IS NULL AND e IS NOT NULL) + } +} {92 scan 0 sort 0} +do_test where9-1.7 { + count_steps { + SELECT a FROM t1 + WHERE (b=1012 OR (d IS NULL AND e IS NOT NULL)) + AND f!=g + } +} {92 scan 0 sort 0} +do_test where9-1.8 { + count_steps { + SELECT a FROM t1 + WHERE (b=1012 OR (d IS NULL AND e IS NOT NULL)) + AND f==g + } +} {scan 0 sort 0} + +do_test where9-2.1 { + count_steps { + SELECT t2.a FROM t1, t2 + WHERE t1.a=80 + AND (t1.c=t2.c OR t1.d=t2.d) + ORDER BY 1 + } +} {79 80 81 scan 0 sort 1} +do_test where9-2.2 { + count_steps { + SELECT t2.a FROM t1, t2 + WHERE t1.a=80 + AND ((t1.c=t2.c AND t1.d=t2.d) OR t1.f=t2.f) + ORDER BY 1 + } +} {2 28 54 80 scan 0 sort 1} +do_test where9-2.3 { + count_steps { + SELECT coalesce(t2.a,9999) + FROM t1 LEFT JOIN t2 ON (t1.c=t2.c AND t1.d=t2.d) OR t1.f=t2.f + WHERE t1.a=80 + ORDER BY 1 + } +} {2 28 54 80 scan 0 sort 1} +do_test where9-2.4 { + count_steps { + SELECT coalesce(t2.a,9999) + FROM t1 LEFT JOIN t2 ON (t1.c+1=t2.c AND t1.d=t2.d) OR (t1.f||'x')=t2.f + WHERE t1.a=80 + ORDER BY 1 + } +} {9999 scan 0 sort 1} +do_test where9-2.5 { + count_steps { + SELECT t1.a, coalesce(t2.a,9999) + FROM t1 LEFT JOIN t2 ON (t1.c=t2.c AND t1.d=t2.d) OR (t1.f)=t2.f + WHERE t1.a=80 OR t1.b=880 OR (t1.c=27027 AND round(t1.d)==80) + ORDER BY 1 + } +} {80 80 80 2 80 28 80 54 scan 0 sort 1} +do_test where9-2.6 { + count_steps { + SELECT t1.a, coalesce(t2.a,9999) + FROM t1 LEFT JOIN t2 ON (t1.c+1=t2.c AND t1.d=t2.d) OR (t1.f||'x')=t2.f + WHERE t1.a=80 OR t1.b=880 OR (t1.c=27027 AND round(t1.d)==80) + ORDER BY 1 + } +} {80 9999 scan 0 sort 1} +do_test where9-2.7 { + count_steps { + SELECT t3.x, t1.a, coalesce(t2.a,9999) + FROM t3 JOIN + t1 LEFT JOIN t2 ON (t1.c+1=t2.c AND t1.d=t2.d) OR (t1.f||'x')=t2.f + WHERE t1.a=t3.y OR t1.b=t3.y*11 OR (t1.c=27027 AND round(t1.d)==80) + ORDER BY 1, 2 + } +} {1 80 9999 2 80 9999 scan 1 sort 1} +do_test where9-2.8 { + count_steps { + SELECT t3.x, t1.a, coalesce(t2.a,9999) + FROM t3 JOIN + t1 LEFT JOIN t2 ON (t1.c=t2.c AND t1.d=t2.d) OR (t1.f)=t2.f + WHERE t1.a=t3.y OR t1.b=t3.y*11 OR (t1.c=27027 AND round(t1.d)==80) + ORDER BY 1, 2, 3 + } +} {1 80 2 1 80 28 1 80 54 1 80 80 2 80 2 2 80 28 2 80 54 2 80 80 scan 1 sort 1} + + +ifcapable explain { + do_test where9-3.1 { + set r [db eval { + EXPLAIN QUERY PLAN + SELECT t2.a FROM t1, t2 + WHERE t1.a=80 + AND ((t1.c=t2.c AND t1.d=t2.d) OR t1.f=t2.f) + }] + set a [expr {[lsearch $r {TABLE t2 VIA MULTI-INDEX UNION}]>=0}] + set b [expr {[lsearch $r {TABLE t2 WITH INDEX t2f}]>=0}] + set c [expr {([lsearch $r {TABLE t2 WITH INDEX t2c}]>=0)+ + [lsearch $r {TABLE t2 WITH INDEX t2d}]>=0}] + concat $a $b $c + } {1 1 1} + do_test where9-3.2 { + set r [db eval { + EXPLAIN QUERY PLAN + SELECT coalesce(t2.a,9999) + FROM t1 LEFT JOIN t2 ON (t1.c+1=t2.c AND t1.d=t2.d) OR (t1.f||'x')=t2.f + WHERE t1.a=80 + }] + set a [expr {[lsearch $r {TABLE t2 VIA MULTI-INDEX UNION}]>=0}] + set b [expr {[lsearch $r {TABLE t2 WITH INDEX t2f}]>=0}] + set c [expr {([lsearch $r {TABLE t2 WITH INDEX t2c}]>=0)+ + [lsearch $r {TABLE t2 WITH INDEX t2d}]>=0}] + concat $a $b $c + } {1 1 1} +} + +# Make sure that INDEXED BY and multi-index OR clauses play well with +# one another. +# +do_test where9-4.1 { + count_steps { + SELECT a FROM t1 + WHERE b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {92 93 97 scan 0 sort 1} +do_test where9-4.2 { + count_steps { + SELECT a FROM t1 + WHERE b>1000 + AND (c=31031 OR +d IS NULL) + ORDER BY +a + } +} {92 93 97 scan 0 sort 1} +do_test where9-4.3 { + count_steps { + SELECT a FROM t1 + WHERE +b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {92 93 97 scan 0 sort 1} +do_test where9-4.4 { + count_steps { + SELECT a FROM t1 INDEXED BY t1b + WHERE b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {92 93 97 scan 0 sort 1} +do_test where9-4.5 { + catchsql { + SELECT a FROM t1 INDEXED BY t1b + WHERE +b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {1 {cannot use index: t1b}} +do_test where9-4.6 { + count_steps { + SELECT a FROM t1 NOT INDEXED + WHERE b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {92 93 97 scan 98 sort 1} +do_test where9-4.7 { + catchsql { + SELECT a FROM t1 INDEXED BY t1c + WHERE b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {1 {cannot use index: t1c}} +do_test where9-4.8 { + catchsql { + SELECT a FROM t1 INDEXED BY t1d + WHERE b>1000 + AND (c=31031 OR d IS NULL) + ORDER BY +a + } +} {1 {cannot use index: t1d}} + +ifcapable explain { + # The (c=31031 OR d IS NULL) clause is preferred over b>1000 because + # the former is an equality test which is expected to return fewer rows. + # + do_test where9-5.1 { + set r [db eval { + EXPLAIN QUERY PLAN + SELECT a FROM t1 + WHERE b>1000 + AND (c=31031 OR d IS NULL) + }] + set a [expr {[lsearch $r {TABLE t1 VIA MULTI-INDEX UNION}]>=0}] + set b [expr {[lsearch $r {TABLE t1 WITH INDEX t1b}]>=0}] + concat $a $b + } {1 0} + + # In contrast, b=1000 is preferred over any OR-clause. + # + do_test where9-5.2 { + set r [db eval { + EXPLAIN QUERY PLAN + SELECT a FROM t1 + WHERE b=1000 + AND (c=31031 OR d IS NULL) + }] + set a [expr {[lsearch $r {TABLE t1 VIA MULTI-INDEX UNION}]>=0}] + set b [expr {[lsearch $r {TABLE t1 WITH INDEX t1b}]>=0}] + concat $a $b + } {0 1} + + # Likewise, inequalities in an AND are preferred over inequalities in + # an OR. + # + do_test where9-5.3 { + set r [db eval { + EXPLAIN QUERY PLAN + SELECT a FROM t1 + WHERE b>1000 + AND (c>=31031 OR d IS NULL) + }] + set a [expr {[lsearch $r {TABLE t1 VIA MULTI-INDEX UNION}]>=0}] + set b [expr {[lsearch $r {TABLE t1 WITH INDEX t1b}]>=0}] + concat $a $b + } {0 1} +} + +############################################################################ +# Make sure OR-clauses work correctly on UPDATE and DELETE statements. + +do_test where9-6.2.1 { + db eval {SELECT count(*) FROM t1 UNION ALL SELECT a FROM t1 WHERE a>=85} +} {99 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99} + +do_test where9-6.2.2 { ;# Deletes entries 90 91 92 96 97 99 + count_steps { + BEGIN; + DELETE FROM t1 + WHERE b IS NULL + OR c IS NULL + OR d IS NULL + } +} {scan 0 sort 0} + +do_test where9-6.2.3 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a>=85; + ROLLBACK; + } +} {93 85 86 87 88 89 93 94 95 98} + +do_test where9-6.2.4 { ;# Deletes entries 90 91 92 96 97 99 + count_steps { + BEGIN; + DELETE FROM t1 + WHERE +b IS NULL + OR c IS NULL + OR d IS NULL + } +} {scan 98 sort 0} + +do_test where9-6.2.5 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a>=85; + ROLLBACK; + } +} {93 85 86 87 88 89 93 94 95 98} + +do_test where9-6.2.6 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE (b IS NULL + OR c IS NULL + OR d IS NULL) + AND a!=92 + AND a!=97 + } +} {scan 0 sort 0} ;# Add 100 to entries 90 91 96 99 + +do_test where9-6.2.7 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a>=85; + ROLLBACK + } +} {99 85 86 87 88 89 92 93 94 95 97 98 190 191 196 199} + +do_test where9-6.2.8 { ;# Deletes entries 90 91 92 97 99 + count_steps { + BEGIN; + DELETE FROM t1 + WHERE (b IS NULL + OR c IS NULL + OR d IS NULL) + AND a!=96 + } +} {scan 0 sort 0} + +do_test where9-6.2.9 { + db eval { + SELECT count(*) FROM t1 UNION ALL SELECT a FROM t1 WHERE a>=85; + ROLLBACK; + } +} {94 85 86 87 88 89 93 94 95 96 98} + +do_test where9-6.3.1 { + count_steps { + BEGIN; + DELETE FROM t1 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 0 sort 0} ;# DELETEs rows 90 91 92 97 +do_test where9-6.3.2 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {95 85 86 87 88 89 93 94 95 96 98 99} + +do_test where9-6.3.3 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 0 sort 0} ;# Add 100 to rowids 90 91 92 97 +do_test where9-6.3.4 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 200; + ROLLBACK; + } +} {99 85 86 87 88 89 93 94 95 96 98 99 190 191 192 197} + +do_test where9-6.3.5 { + count_steps { + BEGIN; + DELETE FROM t1 + WHERE (+b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# DELETEs rows 90 91 92 97 +do_test where9-6.3.6 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {95 85 86 87 88 89 93 94 95 96 98 99} + +do_test where9-6.3.7 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND +c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# Add 100 to rowids 90 91 92 97 +do_test where9-6.3.8 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {99 85 86 87 88 89 93 94 95 96 98 99} + + +do_test where9-6.4.1 { + count_steps { + BEGIN; + DELETE FROM t1 + WHERE (b>=950 AND b<=1010) OR (b IS NULL AND c NOT NULL) + } +} {scan 0 sort 0} ;# DELETE rows 87 88 89 90 91 +do_test where9-6.4.2 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {94 85 86 92 93 94 95 96 97 98 99} +do_test where9-6.4.3 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE (b>=950 AND b<=1010) OR (b IS NULL AND c NOT NULL) + } +} {scan 0 sort 0} ;# Add 100 to rowids 87 88 89 90 91 +do_test where9-6.4.4 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {99 85 86 92 93 94 95 96 97 98 99} + + +do_test where9-6.5.1 { + count_steps { + BEGIN; + DELETE FROM t1 + WHERE a=83 + OR b=913 + OR c=28028 + OR (d>=82 AND d<83) + OR (e>2802 AND e<2803) + OR f='fghijklmn' + OR g='hgfedcb' + } +} {scan 0 sort 0} ;# DELETE rows 5 31 57 82 83 84 85 86 87 +do_test where9-6.5.2 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a IN (5,31,57,82,83,84,85,86,87); + ROLLBACK; + } +} {90} + +do_test where9-6.5.3 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE a=83 + OR b=913 + OR c=28028 + OR (d>=82 AND d<83) + OR (e>2802 AND e<2803) + OR f='fghijklmn' + OR g='hgfedcb' + } +} {scan 0 sort 0} ;# Add 100 to rowids 5 31 57 82 83 84 85 86 87 +do_test where9-6.5.4 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a%100 IN (5,31,57,82,83,84,85,86,87); + ROLLBACK; + } +} {99 105 131 157 182 183 184 185 186 187} + +do_test where9-6.6.1 { + count_steps { + BEGIN; + DELETE FROM t1 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND +c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# DELETEs rows 90 91 92 97 +do_test where9-6.6.2 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {95 85 86 87 88 89 93 94 95 96 98 99} + +do_test where9-6.6.3 { + count_steps { + BEGIN; + UPDATE t1 SET a=a+100 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND +c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# Add 100 to rowids 90 91 92 97 +do_test where9-6.6.4 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 200; + ROLLBACK; + } +} {99 85 86 87 88 89 93 94 95 96 98 99 190 191 192 197} + +do_test where9-6.7.1 { + count_steps { + BEGIN; + DELETE FROM t1 NOT INDEXED + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# DELETEs rows 90 91 92 97 +do_test where9-6.7.2 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 100; + ROLLBACK; + } +} {95 85 86 87 88 89 93 94 95 96 98 99} + +do_test where9-6.7.3 { + count_steps { + BEGIN; + UPDATE t1 NOT INDEXED SET a=a+100 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {scan 98 sort 0} ;# Add 100 to rowids 90 91 92 97 +do_test where9-6.7.4 { + db eval { + SELECT count(*) FROM t1 UNION ALL + SELECT a FROM t1 WHERE a BETWEEN 85 AND 200; + ROLLBACK; + } +} {99 85 86 87 88 89 93 94 95 96 98 99 190 191 192 197} + +do_test where9-6.8.1 { + catchsql { + DELETE FROM t1 INDEXED BY t1b + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {1 {cannot use index: t1b}} +do_test where9-6.8.2 { + catchsql { + UPDATE t1 INDEXED BY t1b SET a=a+100 + WHERE (b IS NULL AND c NOT NULL AND d NOT NULL) + OR (b NOT NULL AND c IS NULL AND d NOT NULL) + OR (b NOT NULL AND c NOT NULL AND d IS NULL) + } +} {1 {cannot use index: t1b}} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/whereA.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/whereA.test --- sqlite3-3.4.2/test/whereA.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/whereA.test 2009-06-25 12:35:52.000000000 +0100 @@ -0,0 +1,126 @@ +# 2009 February 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the reverse_select_order pragma. +# +# $Id: whereA.test,v 1.3 2009/06/10 19:33:29 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test whereA-1.1 { + db eval { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b UNIQUE, c); + INSERT INTO t1 VALUES(1,2,3); + INSERT INTO t1 values(2,'hello','world'); + INSERT INTO t1 VALUES(3,4.53,NULL); + SELECT * FROM t1 + } +} {1 2 3 2 hello world 3 4.53 {}} +do_test whereA-1.2 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1; + } +} {3 4.53 {} 2 hello world 1 2 3} + +do_test whereA-1.3 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1 ORDER BY rowid; + } +} {1 2 3 2 hello world 3 4.53 {}} + +do_test whereA-2.1 { + db eval { + PRAGMA reverse_unordered_selects=0; + SELECT * FROM t1 WHERE a>0; + } +} {1 2 3 2 hello world 3 4.53 {}} +do_test whereA-2.2 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1 WHERE a>0; + } +} {3 4.53 {} 2 hello world 1 2 3} + +do_test whereA-2.3 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1 WHERE a>0 ORDER BY rowid; + } +} {1 2 3 2 hello world 3 4.53 {}} + +do_test whereA-3.1 { + db eval { + PRAGMA reverse_unordered_selects=0; + SELECT * FROM t1 WHERE b>0; + } +} {1 2 3 3 4.53 {} 2 hello world} +do_test whereA-3.2 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1 WHERE b>0; + } +} {2 hello world 3 4.53 {} 1 2 3} +do_test whereA-3.3 { + db eval { + PRAGMA reverse_unordered_selects=1; + SELECT * FROM t1 WHERE b>0 ORDER BY b; + } +} {1 2 3 3 4.53 {} 2 hello world} + +do_test whereA-4.1 { + db eval { + CREATE TABLE t2(x); + INSERT INTO t2 VALUES(1); + INSERT INTO t2 VALUES(2); + SELECT x FROM t2; + } +} {2 1} +# Do an SQL statement. Append the search count to the end of the result. +# +proc count sql { + set ::sqlite_sort_count 0 + return [concat [execsql $sql] $::sqlite_sort_count] +} +do_test whereA-4.2 { ;# Ticket #3904 + db eval { + CREATE INDEX t2x ON t2(x); + } + count { + SELECT x FROM t2; + } +} {2 1 0} +do_test whereA-4.3 { + count { + SELECT x FROM t2 ORDER BY x; + } +} {1 2 0} +do_test whereA-4.4 { + count { + SELECT x FROM t2 ORDER BY x DESC; + } +} {2 1 0} +do_test whereA-4.5 { + db eval {DROP INDEX t2x;} + count { + SELECT x FROM t2 ORDER BY x; + } +} {1 2 1} +do_test whereA-4.6 { + count { + SELECT x FROM t2 ORDER BY x DESC; + } +} {2 1 1} + + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/wherelimit.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/wherelimit.test --- sqlite3-3.4.2/test/wherelimit.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/test/wherelimit.test 2008-10-10 19:25:46.000000000 +0100 @@ -0,0 +1,284 @@ +# 2008 October 6 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. The +# focus of this file is testing the LIMIT ... OFFSET ... clause +# of UPDATE and DELETE statements. +# +# $Id: wherelimit.test,v 1.2 2008/10/10 18:25:46 shane Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +proc create_test_data {size} { + # Build some test data + # + execsql { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(x int, y int); + BEGIN; + } + for {set i 1} {$i<=$size} {incr i} { + for {set j 1} {$j<=$size} {incr j} { + execsql "INSERT INTO t1 VALUES([expr {$i}],[expr {$j}])" + } + } + execsql { + COMMIT; + } + return {} +} + +ifcapable {update_delete_limit} { + + # check syntax error support + do_test wherelimit-0.1 { + catchsql {DELETE FROM t1 ORDER BY x} + } {1 {ORDER BY without LIMIT on DELETE}} + do_test wherelimit-0.2 { + catchsql {DELETE FROM t1 WHERE x=1 ORDER BY x} + } {1 {ORDER BY without LIMIT on DELETE}} + do_test wherelimit-0.3 { + catchsql {UPDATE t1 SET y=1 WHERE x=1 ORDER BY x} + } {1 {ORDER BY without LIMIT on UPDATE}} + + # no AS on table sources + do_test wherelimit-0.4 { + catchsql {DELETE FROM t1 AS a WHERE x=1} + } {1 {near "AS": syntax error}} + do_test wherelimit-0.5 { + catchsql {UPDATE t1 AS a SET y=1 WHERE x=1} + } {1 {near "AS": syntax error}} + + # OFFSET w/o LIMIT + do_test wherelimit-0.6 { + catchsql {DELETE FROM t1 WHERE x=1 OFFSET 2} + } {1 {near "OFFSET": syntax error}} + do_test wherelimit-0.7 { + catchsql {UPDATE t1 SET y=1 WHERE x=1 OFFSET 2} + } {1 {near "OFFSET": syntax error}} + + + # check deletes w/o where clauses but with limit/offsets + create_test_data 5 + do_test wherelimit-1.0 { + execsql {SELECT count(*) FROM t1} + } {25} + do_test wherelimit-1.1 { + execsql {DELETE FROM t1} + execsql {SELECT count(*) FROM t1} + } {0} + create_test_data 5 + do_test wherelimit-1.2 { + execsql {DELETE FROM t1 LIMIT 5} + execsql {SELECT count(*) FROM t1} + } {20} + do_test wherelimit-1.3 { + # limit 5 + execsql {DELETE FROM t1 ORDER BY x LIMIT 5} + execsql {SELECT count(*) FROM t1} + } {15} + do_test wherelimit-1.4 { + # limit 5, offset 2 + execsql {DELETE FROM t1 ORDER BY x LIMIT 5 OFFSET 2} + execsql {SELECT count(*) FROM t1} + } {10} + do_test wherelimit-1.5 { + # limit 5, offset -2 + execsql {DELETE FROM t1 ORDER BY x LIMIT 5 OFFSET -2} + execsql {SELECT count(*) FROM t1} + } {5} + do_test wherelimit-1.6 { + # limit -5 (no limit), offset 2 + execsql {DELETE FROM t1 ORDER BY x LIMIT 2, -5} + execsql {SELECT count(*) FROM t1} + } {2} + do_test wherelimit-1.7 { + # limit 5, offset -2 (no offset) + execsql {DELETE FROM t1 ORDER BY x LIMIT -2, 5} + execsql {SELECT count(*) FROM t1} + } {0} + create_test_data 5 + do_test wherelimit-1.8 { + # limit -5 (no limit), offset -2 (no offset) + execsql {DELETE FROM t1 ORDER BY x LIMIT -2, -5} + execsql {SELECT count(*) FROM t1} + } {0} + create_test_data 3 + do_test wherelimit-1.9 { + # limit 5, offset 2 + execsql {DELETE FROM t1 ORDER BY x LIMIT 2, 5} + execsql {SELECT count(*) FROM t1} + } {4} + do_test wherelimit-1.10 { + # limit 5, offset 5 + execsql {DELETE FROM t1 ORDER BY x LIMIT 5 OFFSET 5} + execsql {SELECT count(*) FROM t1} + } {4} + do_test wherelimit-1.11 { + # limit 50, offset 30 + execsql {DELETE FROM t1 ORDER BY x LIMIT 50 OFFSET 30} + execsql {SELECT count(*) FROM t1} + } {4} + do_test wherelimit-1.12 { + # limit 50, offset 30 + execsql {DELETE FROM t1 ORDER BY x LIMIT 30, 50} + execsql {SELECT count(*) FROM t1} + } {4} + do_test wherelimit-1.13 { + execsql {DELETE FROM t1 ORDER BY x LIMIT 50 OFFSET 50} + execsql {SELECT count(*) FROM t1} + } {4} + + + create_test_data 6 + do_test wherelimit-2.0 { + execsql {SELECT count(*) FROM t1} + } {36} + do_test wherelimit-2.1 { + execsql {DELETE FROM t1 WHERE x=1} + execsql {SELECT count(*) FROM t1} + } {30} + create_test_data 6 + do_test wherelimit-2.2 { + execsql {DELETE FROM t1 WHERE x=1 LIMIT 5} + execsql {SELECT count(*) FROM t1} + } {31} + do_test wherelimit-2.3 { + # limit 5 + execsql {DELETE FROM t1 WHERE x=1 ORDER BY x LIMIT 5} + execsql {SELECT count(*) FROM t1} + } {30} + do_test wherelimit-2.4 { + # limit 5, offset 2 + execsql {DELETE FROM t1 WHERE x=2 ORDER BY x LIMIT 5 OFFSET 2} + execsql {SELECT count(*) FROM t1} + } {26} + do_test wherelimit-2.5 { + # limit 5, offset -2 + execsql {DELETE FROM t1 WHERE x=2 ORDER BY x LIMIT 5 OFFSET -2} + execsql {SELECT count(*) FROM t1} + } {24} + do_test wherelimit-2.6 { + # limit -5 (no limit), offset 2 + execsql {DELETE FROM t1 WHERE x=3 ORDER BY x LIMIT 2, -5} + execsql {SELECT count(*) FROM t1} + } {20} + do_test wherelimit-2.7 { + # limit 5, offset -2 (no offset) + execsql {DELETE FROM t1 WHERE x=3 ORDER BY x LIMIT -2, 5} + execsql {SELECT count(*) FROM t1} + } {18} + do_test wherelimit-2.8 { + # limit -5 (no limit), offset -2 (no offset) + execsql {DELETE FROM t1 WHERE x=4 ORDER BY x LIMIT -2, -5} + execsql {SELECT count(*) FROM t1} + } {12} + create_test_data 6 + do_test wherelimit-2.9 { + # limit 5, offset 2 + execsql {DELETE FROM t1 WHERE x=5 ORDER BY x LIMIT 2, 5} + execsql {SELECT count(*) FROM t1} + } {32} + do_test wherelimit-2.10 { + # limit 5, offset 5 + execsql {DELETE FROM t1 WHERE x=6 ORDER BY x LIMIT 5 OFFSET 5} + execsql {SELECT count(*) FROM t1} + } {31} + do_test wherelimit-2.11 { + # limit 50, offset 30 + execsql {DELETE FROM t1 WHERE x=1 ORDER BY x LIMIT 50 OFFSET 30} + execsql {SELECT count(*) FROM t1} + } {31} + do_test wherelimit-2.12 { + # limit 50, offset 30 + execsql {DELETE FROM t1 WHERE x=2 ORDER BY x LIMIT 30, 50} + execsql {SELECT count(*) FROM t1} + } {31} + do_test wherelimit-2.13 { + execsql {DELETE FROM t1 WHERE x=3 ORDER BY x LIMIT 50 OFFSET 50} + execsql {SELECT count(*) FROM t1} + } {31} + + + create_test_data 6 + do_test wherelimit-3.0 { + execsql {SELECT count(*) FROM t1} + } {36} + do_test wherelimit-3.1 { + execsql {UPDATE t1 SET y=1 WHERE x=1} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {11} + create_test_data 6 + do_test wherelimit-3.2 { + execsql {UPDATE t1 SET y=1 WHERE x=1 LIMIT 5} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {10} + do_test wherelimit-3.3 { + # limit 5 + execsql {UPDATE t1 SET y=2 WHERE x=2 ORDER BY x LIMIT 5} + execsql {SELECT count(*) FROM t1 WHERE y=2} + } {9} + create_test_data 6 + do_test wherelimit-3.4 { + # limit 5, offset 2 + execsql {UPDATE t1 SET y=2 WHERE x=2 ORDER BY x LIMIT 5 OFFSET 2} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {6} + do_test wherelimit-3.5 { + # limit 5, offset -2 + execsql {UPDATE t1 SET y=2 WHERE x=2 ORDER BY x LIMIT 5 OFFSET -2} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {5} + do_test wherelimit-3.6 { + # limit -5 (no limit), offset 2 + execsql {UPDATE t1 SET y=3 WHERE x=3 ORDER BY x LIMIT 2, -5} + execsql {SELECT count(*) FROM t1 WHERE y=3} + } {8} + do_test wherelimit-3.7 { + # limit 5, offset -2 (no offset) + execsql {UPDATE t1 SET y=3 WHERE x=3 ORDER BY x LIMIT -2, 5} + execsql {SELECT count(*) FROM t1 WHERE y=3} + } {10} + + do_test wherelimit-3.8 { + # limit -5 (no limit), offset -2 (no offset) + execsql {UPDATE t1 SET y=4 WHERE x=4 ORDER BY x LIMIT -2, -5} + execsql {SELECT count(*) FROM t1 WHERE y=4} + } {9} + create_test_data 6 + do_test wherelimit-3.9 { + # limit 5, offset 2 + execsql {UPDATE t1 SET y=4 WHERE x=5 ORDER BY x LIMIT 2, 5} + execsql {SELECT count(*) FROM t1 WHERE y=4} + } {9} + do_test wherelimit-3.10 { + # limit 5, offset 5 + execsql {UPDATE t1 SET y=4 WHERE x=6 ORDER BY x LIMIT 5 OFFSET 5} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {6} + do_test wherelimit-3.11 { + # limit 50, offset 30 + execsql {UPDATE t1 SET y=1 WHERE x=1 ORDER BY x LIMIT 50 OFFSET 30} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {6} + do_test wherelimit-3.12 { + # limit 50, offset 30 + execsql {UPDATE t1 SET y=1 WHERE x=2 ORDER BY x LIMIT 30, 50} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {6} + do_test wherelimit-3.13 { + execsql {UPDATE t1 SET y=1 WHERE x=3 ORDER BY x LIMIT 50 OFFSET 50} + execsql {SELECT count(*) FROM t1 WHERE y=1} + } {6} + +} + +finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/where.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/where.test --- sqlite3-3.4.2/test/where.test 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/test/where.test 2009-06-12 03:37:59.000000000 +0100 @@ -11,7 +11,7 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing the use of indices in WHERE clases. # -# $Id: where.test,v 1.43 2007/06/25 16:29:34 danielk1977 Exp $ +# $Id: where.test,v 1.50 2008/11/03 09:06:06 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -69,11 +69,26 @@ set sqlite_query_plan } {t1 i1w} do_test where-1.1.3 { + db status step +} {0} +do_test where-1.1.4 { + db eval {SELECT x, y, w FROM t1 WHERE +w=10} +} {3 121 10} +do_test where-1.1.5 { + db status step +} {99} +do_test where-1.1.6 { + set sqlite_query_plan +} {t1 {}} +do_test where-1.1.7 { count {SELECT x, y, w AS abc FROM t1 WHERE abc=10} } {3 121 10 3} -do_test where-1.1.4 { +do_test where-1.1.8 { set sqlite_query_plan } {t1 i1w} +do_test where-1.1.9 { + db status step +} {0} do_test where-1.2.1 { count {SELECT x, y, w FROM t1 WHERE w=11} } {3 144 11 3} @@ -184,7 +199,7 @@ do_test where-1.27 { count {SELECT w FROM t1 WHERE x=3 AND y+1==122} -} {10 17} +} {10 10} do_test where-1.28 { count {SELECT w FROM t1 WHERE x+1=4 AND y+1==122} @@ -344,6 +359,11 @@ SELECT 99 WHERE 0.0 } } {} +do_test where-4.7 { + execsql { + SELECT count(*) FROM t1 WHERE t1.w + } +} {100} # Verify that IN operators in a WHERE clause are handled correctly. # Omit these tests if the build is not capable of sub-queries. @@ -358,7 +378,7 @@ count { SELECT * FROM t1 WHERE rowid+0 IN (1,2,3,1234) order by 1; } - } {1 0 4 2 1 9 3 1 16 199} + } {1 0 4 2 1 9 3 1 16 102} do_test where-5.3 { count { SELECT * FROM t1 WHERE w IN (-1,1,2,3) order by 1; @@ -368,7 +388,7 @@ count { SELECT * FROM t1 WHERE w+0 IN (-1,1,2,3) order by 1; } - } {1 0 4 2 1 9 3 1 16 199} + } {1 0 4 2 1 9 3 1 16 102} do_test where-5.5 { count { SELECT * FROM t1 WHERE rowid IN @@ -382,7 +402,7 @@ (select rowid from t1 where rowid IN (-1,2,4)) ORDER BY 1; } - } {2 1 9 4 2 25 201} + } {2 1 9 4 2 25 103} do_test where-5.7 { count { SELECT * FROM t1 WHERE w IN @@ -396,7 +416,7 @@ (select rowid from t1 where rowid IN (-1,2,4)) ORDER BY 1; } - } {2 1 9 4 2 25 201} + } {2 1 9 4 2 25 103} do_test where-5.9 { count { SELECT * FROM t1 WHERE x IN (1,7) ORDER BY 1; @@ -442,9 +462,8 @@ # occurring as expected. # proc cksort {sql} { - set ::sqlite_sort_count 0 set data [execsql $sql] - if {$::sqlite_sort_count} {set x sort} {set x nosort} + if {[db status sort]} {set x sort} {set x nosort} lappend data $x return $data } @@ -1085,7 +1104,6 @@ SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.a, x.b DESC } } {1/1 1/4 4/1 4/4 nosort} -btree_breakpoint do_test where-14.5 { cksort { SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||x.b @@ -1151,6 +1169,92 @@ } } {} +# Ticket #3408. +# +# The branch of code in where.c that generated rowid lookups was +# incorrectly deallocating a constant register, meaning that if the +# vdbe code ran more than once, the second time around the constant +# value may have been clobbered by some other value. +# +do_test where-16.1 { + execsql { + CREATE TABLE a1(id INTEGER PRIMARY KEY, v); + CREATE TABLE a2(id INTEGER PRIMARY KEY, v); + INSERT INTO a1 VALUES(1, 'one'); + INSERT INTO a1 VALUES(2, 'two'); + INSERT INTO a2 VALUES(1, 'one'); + INSERT INTO a2 VALUES(2, 'two'); + } +} {} +do_test where-16.2 { + execsql { + SELECT * FROM a2 CROSS JOIN a1 WHERE a1.id=1 AND a1.v='one'; + } +} {1 one 1 one 2 two 1 one} + +# The actual problem reported in #3408. +do_test where-16.3 { + execsql { + CREATE TEMP TABLE foo(idx INTEGER); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(1); + INSERT INTO foo VALUES(2); + INSERT INTO foo VALUES(2); + CREATE TEMP TABLE bar(stuff INTEGER); + INSERT INTO bar VALUES(100); + INSERT INTO bar VALUES(200); + INSERT INTO bar VALUES(300); + } +} {} +do_test where-16.4 { + execsql { + SELECT bar.RowID id FROM foo, bar WHERE foo.idx = bar.RowID AND id = 2; + } +} {2 2} + integrity_check {where-99.0} +#--------------------------------------------------------------------- +# These tests test that a bug surrounding the use of ForceInt has been +# fixed in where.c. +# +do_test where-17.1 { + execsql { + CREATE TABLE tbooking ( + id INTEGER PRIMARY KEY, + eventtype INTEGER NOT NULL + ); + INSERT INTO tbooking VALUES(42, 3); + INSERT INTO tbooking VALUES(43, 4); + } +} {} +do_test where-17.2 { + execsql { + SELECT a.id + FROM tbooking AS a + WHERE a.eventtype=3; + } +} {42} +do_test where-17.3 { + execsql { + SELECT a.id, (SELECT b.id FROM tbooking AS b WHERE b.id>a.id) + FROM tbooking AS a + WHERE a.eventtype=3; + } +} {42 43} +do_test where-17.4 { + execsql { + SELECT a.id, (SELECT b.id FROM tbooking AS b WHERE b.id>a.id) + FROM (SELECT 1.5 AS id) AS a + } +} {1.5 42} +do_test where-17.5 { + execsql { + CREATE TABLE tother(a, b); + INSERT INTO tother VALUES(1, 3.7); + SELECT id, a FROM tbooking, tother WHERE id>a; + } +} {42 1 43 1} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/test/zeroblob.test /tmp/3ARg2Grji7/sqlite3-3.6.16/test/zeroblob.test --- sqlite3-3.4.2/test/zeroblob.test 2007-06-07 20:08:34.000000000 +0100 +++ sqlite3-3.6.16/test/zeroblob.test 2009-05-05 04:40:17.000000000 +0100 @@ -13,7 +13,7 @@ # including the sqlite3_bind_zeroblob(), sqlite3_result_zeroblob(), # and the built-in zeroblob() SQL function. # -# $Id: zeroblob.test,v 1.6 2007/06/07 19:08:34 drh Exp $ +# $Id: zeroblob.test,v 1.13 2008/06/13 18:24:28 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -27,21 +27,28 @@ # content of the zeroblob is never instantiated on the VDBE stack. # But it does get inserted into the database correctly. # +db eval {PRAGMA cache_size=10} +sqlite3_memory_highwater 1 +unset -nocomplain memused +set memused [sqlite3_memory_used] do_test zeroblob-1.1 { execsql { CREATE TABLE t1(a,b,c,d); } set ::sqlite3_max_blobsize 0 execsql { - INSERT INTO t1 VALUES(2,3,4,zeroblob(10000)); + INSERT INTO t1 VALUES(2,3,4,zeroblob(1000000)); } set ::sqlite3_max_blobsize } {10} +do_test zeroblob-1.1.1 { + expr {[sqlite3_memory_highwater]<$::memused+25000} +} {1} do_test zeroblob-1.2 { execsql { SELECT length(d) FROM t1 } -} {10000} +} {1000000} # If a non-NULL column follows the zeroblob, then the content of # the zeroblob must be instantiated. @@ -57,7 +64,7 @@ execsql { SELECT length(c), length(d) FROM t1 } -} {1 10000 10000 1} +} {1 1000000 10000 1} # Multiple zeroblobs can appear at the end of record. No instantiation # of the blob content occurs on the stack. @@ -73,7 +80,7 @@ execsql { SELECT length(c), length(d) FROM t1 } -} {1 10000 10000 1 10000 10000} +} {1 1000000 10000 1 10000 10000} # NULLs can follow the zeroblob() or be intermixed with zeroblobs and # no instantiation of the zeroblobs occurs on the stack. @@ -110,23 +117,27 @@ # DISTINCT works for zeroblobs # -do_test zeroblob-3.1 { - execsql { - SELECT count(DISTINCT a) FROM ( - SELECT x'00000000000000000000' AS a - UNION ALL - SELECT zeroblob(10) AS a - ) - } -} {1} +ifcapable bloblit&&subquery&&compound { + do_test zeroblob-3.1 { + execsql { + SELECT count(DISTINCT a) FROM ( + SELECT x'00000000000000000000' AS a + UNION ALL + SELECT zeroblob(10) AS a + ) + } + } {1} +} # Concatentation works with zeroblob # -do_test zeroblob-4.1 { - execsql { - SELECT hex(zeroblob(2) || x'61') - } -} {000061} +ifcapable bloblit { + do_test zeroblob-4.1 { + execsql { + SELECT hex(zeroblob(2) || x'61') + } + } {000061} +} # Check various CAST(...) operations on zeroblob. # @@ -174,6 +185,46 @@ } {1 {string or blob too big}} do_test zeroblob-6.5 { catchsql {select zeroblob(2147483648)} -} {1 {string or blob too big}} +} {1 {string or blob too big}} +do_test zeroblob-6.6 { + execsql {select hex(zeroblob(-1))} +} {{}} +do_test zeroblob-6.7 { + execsql {select typeof(zeroblob(-1))} +} {blob} + +# Test bind_zeroblob() +# +sqlite3_memory_highwater 1 +unset -nocomplain memused +set memused [sqlite3_memory_used] +do_test zeroblob-7.1 { + set ::STMT [sqlite3_prepare $::DB "SELECT length(?)" -1 DUMMY] + set ::sqlite3_max_blobsize 0 + sqlite3_bind_zeroblob $::STMT 1 450000 + sqlite3_step $::STMT +} {SQLITE_ROW} +do_test zeroblob-7.2 { + sqlite3_column_int $::STMT 0 +} {450000} +do_test zeroblob-7.3 { + sqlite3_finalize $::STMT +} {SQLITE_OK} +do_test zeroblob-7.4 { + set ::sqlite3_max_blobsize +} {0} +do_test zeroblob-7.5 { + expr {[sqlite3_memory_highwater]<$::memused+10000} +} {1} + +# Test that MakeRecord can handle a value with some real content +# and a zero-blob tail. +# +do_test zeroblob-8.1 { + llength [execsql { + SELECT 'hello' AS a, zeroblob(10) as b from t1 ORDER BY a, b; + }] +} {8} + finish_test diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/genfkey.README /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/genfkey.README --- sqlite3-3.4.2/tool/genfkey.README 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/tool/genfkey.README 2009-06-12 03:38:01.000000000 +0100 @@ -0,0 +1,137 @@ + +OVERVIEW + + The SQLite library is capable of parsing SQL foreign key constraints + supplied as part of CREATE TABLE statements, but it does not actually + implement them. However, most of the features of foreign keys may be + implemented using SQL triggers, which SQLite does support. This text + file describes a feature of the SQLite shell tool (sqlite3) that + extracts foreign key definitions from an existing SQLite database and + creates the set of CREATE TRIGGER statements required to implement + the foreign key constraints. + +CAPABILITIES + + An SQL foreign key is a constraint that requires that each row in + the "child" table corresponds to a row in the "parent" table. For + example, the following schema: + + CREATE TABLE parent(a, b, c, PRIMARY KEY(a, b)); + CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent(a, b)); + + implies that for each row in table "child", there must be a row in + "parent" for which the expression (child.d==parent.a AND child.e==parent.b) + is true. The columns in the parent table are required to be either the + primary key columns or subject to a UNIQUE constraint. There is no such + requirement for the columns of the child table. + + At this time, all foreign keys are implemented as if they were + "MATCH NONE", even if the declaration specified "MATCH PARTIAL" or + "MATCH FULL". "MATCH NONE" means that if any of the key columns in + the child table are NULL, then there is no requirement for a corresponding + row in the parent table. So, taking this into account, the expression that + must be true for every row of the child table in the above example is + actually: + + (child.d IS NULL) OR + (child.e IS NULL) OR + (child.d==parent.a AND child.e==parent.b) + + Attempting to insert or update a row in the child table so that the + affected row violates this constraint results in an exception being + thrown. + + The effect of attempting to delete or update a row in the parent table + so that the constraint becomes untrue for one or more rows in the child + table depends on the "ON DELETE" or "ON UPDATE" actions specified as + part of the foreign key definition, respectively. Three different actions + are supported: "RESTRICT" (the default), "CASCADE" and "SET NULL". SQLite + will also parse the "SET DEFAULT" action, but this is not implemented + and "RESTRICT" is used instead. + + RESTRICT: Attempting to update or delete a row in the parent table so + that the constraint becomes untrue for one or more rows in + the child table is not allowed. An exception is thrown. + + CASCADE: Instead of throwing an exception, all corresponding child table + rows are either deleted (if the parent row is being deleted) + or updated to match the new parent key values (if the parent + row is being updated). + + SET NULL: Instead of throwing an exception, the foreign key fields of + all corresponding child table rows are set to NULL. + +LIMITATIONS + + Apart from those limitiations described above: + + * Implicit mapping to composite primary keys is not supported. If + a parent table has a composite primary key, then any child table + that refers to it must explicitly map each column. For example, given + the following definition of table "parent": + + CREATE TABLE parent(a, b, c, PRIMARY KEY(a, b)); + + only the first of the following two definitions of table "child" + is supported: + + CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent(a, b)); + CREATE TABLE child(d, e, f, FOREIGN KEY(d, e) REFERENCES parent); + + An implicit reference to a composite primary key is detected as an + error when the program is run (see below). + + * SQLite does not support recursive triggers, and therefore this program + does not support recursive CASCADE or SET NULL foreign key + relationships. If the parent and the child tables of a CASCADE or + SET NULL foreign key are the same table, the generated triggers will + malfunction. This is also true if the recursive foreign key constraint + is indirect (for example if table A references table B which references + table A with a CASCADE or SET NULL foreign key constraint). + + Recursive CASCADE or SET NULL foreign key relationships are *not* + detected as errors when the program is run. Buyer beware. + +USAGE + + The functionality is accessed through an sqlite3 shell tool "dot-command": + + .genfkey ?--no-drop? ?--ignore-errors? ?--exec? + + When this command is run, it first checks the schema of the open SQLite + database for foreign key related errors or inconsistencies. For example, + a foreign key that refers to a parent table that does not exist, or + a foreign key that refers to columns in a parent table that are not + guaranteed to be unique. If such errors are found and the --ignore-errors + option was not present, a message for each one is printed to stderr and + no further processing takes place. + + If errors are found and the --ignore-errors option is passed, then + no error messages are printed. No "CREATE TRIGGER" statements are generated + for foriegn-key definitions that contained errors, they are silently + ignored by subsequent processing. + + All triggers generated by this command have names that match the pattern + "genfkey*". Unless the --no-drop option is specified, then the program + also generates a "DROP TRIGGER" statement for each trigger that exists + in the database with a name that matches this pattern. This allows the + program to be used to upgrade a database schema for which foreign key + triggers have already been installed (i.e. after new tables are created + or existing tables dropped). + + Finally, a series of SQL trigger definitions (CREATE TRIGGER statements) + that implement the foreign key constraints found in the database schema are + generated. + + If the --exec option was passed, then all generated SQL is immediately + executed on the database. Otherwise, the generated SQL strings are output + in the same way as the results of SELECT queries are. Normally, this means + they will be printed to stdout, but this can be configured using other + dot-commands (i.e. ".output"). + + The simplest way to activate the foriegn key definitions in a database + is simply to open it using the shell tool and enter the command + ".genfkey --exec": + + sqlite> .genfkey --exec + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/genfkey.test /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/genfkey.test --- sqlite3-3.4.2/tool/genfkey.test 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/tool/genfkey.test 2009-06-25 12:24:47.000000000 +0100 @@ -0,0 +1,294 @@ + +package require sqlite3 + +proc do_test {name cmd expected} { + puts -nonewline "$name ..." + set res [uplevel $cmd] + if {$res eq $expected} { + puts Ok + } else { + puts Error + puts " Got: $res" + puts " Expected: $expected" + exit + } +} + +proc execsql {sql} { + uplevel [list db eval $sql] +} + +proc catchsql {sql} { + set rc [catch {uplevel [list db eval $sql]} msg] + list $rc $msg +} + +file delete -force test.db test.db.journal +sqlite3 db test.db + +# The following tests - genfkey-1.* - test RESTRICT foreign keys. +# +do_test genfkey-1.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(b, c)); + CREATE TABLE t2(e REFERENCES t1, f); + CREATE TABLE t3(g, h, i, FOREIGN KEY (h, i) REFERENCES t1(b, c)); + } +} {} +do_test genfkey-1.2 { + execsql [exec ./sqlite3 test.db .genfkey] +} {} +do_test genfkey-1.3 { + catchsql { INSERT INTO t2 VALUES(1, 2) } +} {1 {constraint failed}} +do_test genfkey-1.4 { + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(1, 2); + } +} {} +do_test genfkey-1.5 { + execsql { INSERT INTO t2 VALUES(NULL, 3) } +} {} +do_test genfkey-1.6 { + catchsql { UPDATE t2 SET e = 5 WHERE e IS NULL } +} {1 {constraint failed}} +do_test genfkey-1.7 { + execsql { UPDATE t2 SET e = 1 WHERE e IS NULL } +} {} +do_test genfkey-1.8 { + execsql { UPDATE t2 SET e = NULL WHERE f = 3 } +} {} +do_test genfkey-1.9 { + catchsql { UPDATE t1 SET a = 10 } +} {1 {constraint failed}} +do_test genfkey-1.9a { + catchsql { UPDATE t1 SET a = NULL } +} {1 {datatype mismatch}} +do_test genfkey-1.10 { + catchsql { DELETE FROM t1 } +} {1 {constraint failed}} +do_test genfkey-1.11 { + execsql { UPDATE t2 SET e = NULL } +} {} +do_test genfkey-1.12 { + execsql { + UPDATE t1 SET a = 10 ; + DELETE FROM t1; + DELETE FROM t2; + } +} {} + +do_test genfkey-1.13 { + execsql { + INSERT INTO t3 VALUES(1, NULL, NULL); + INSERT INTO t3 VALUES(1, 2, NULL); + INSERT INTO t3 VALUES(1, NULL, 3); + } +} {} +do_test genfkey-1.14 { + catchsql { INSERT INTO t3 VALUES(3, 1, 4) } +} {1 {constraint failed}} +do_test genfkey-1.15 { + execsql { + INSERT INTO t1 VALUES(1, 1, 4); + INSERT INTO t3 VALUES(3, 1, 4); + } +} {} +do_test genfkey-1.16 { + catchsql { DELETE FROM t1 } +} {1 {constraint failed}} +do_test genfkey-1.17 { + catchsql { UPDATE t1 SET b = 10} +} {1 {constraint failed}} +do_test genfkey-1.18 { + execsql { UPDATE t1 SET a = 10} +} {} +do_test genfkey-1.19 { + catchsql { UPDATE t3 SET h = 'hello' WHERE i = 3} +} {1 {constraint failed}} + +do_test genfkey-1.X { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + } +} {} + +# The following tests - genfkey-2.* - test CASCADE foreign keys. +# +do_test genfkey-2.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(b, c)); + CREATE TABLE t2(e REFERENCES t1 ON UPDATE CASCADE ON DELETE CASCADE, f); + CREATE TABLE t3(g, h, i, + FOREIGN KEY (h, i) + REFERENCES t1(b, c) ON UPDATE CASCADE ON DELETE CASCADE + ); + } +} {} +do_test genfkey-2.2 { + execsql [exec ./sqlite3 test.db .genfkey] +} {} +do_test genfkey-2.3 { + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t2 VALUES(1, 'one'); + INSERT INTO t2 VALUES(4, 'four'); + } +} {} +do_test genfkey-2.4 { + execsql { + UPDATE t1 SET a = 2 WHERE a = 1; + SELECT * FROM t2; + } +} {2 one 4 four} +do_test genfkey-2.5 { + execsql { + DELETE FROM t1 WHERE a = 4; + SELECT * FROM t2; + } +} {2 one} +do_test genfkey-2.6 { + execsql { + INSERT INTO t3 VALUES('hello', 2, 3); + UPDATE t1 SET c = 2; + SELECT * FROM t3; + } +} {hello 2 2} +do_test genfkey-2.7 { + execsql { + DELETE FROM t1; + SELECT * FROM t3; + } +} {} +do_test genfkey-2.X { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + } +} {} + + +# The following tests - genfkey-3.* - test SET NULL foreign keys. +# +do_test genfkey-3.1 { + execsql { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, UNIQUE(c, b)); + CREATE TABLE t2(e REFERENCES t1 ON UPDATE SET NULL ON DELETE SET NULL, f); + CREATE TABLE t3(g, h, i, + FOREIGN KEY (h, i) + REFERENCES t1(b, c) ON UPDATE SET NULL ON DELETE SET NULL + ); + } +} {} +do_test genfkey-3.2 { + execsql [exec ./sqlite3 test.db .genfkey] +} {} +do_test genfkey-3.3 { + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(4, 5, 6); + INSERT INTO t2 VALUES(1, 'one'); + INSERT INTO t2 VALUES(4, 'four'); + } +} {} +do_test genfkey-3.4 { + execsql { + UPDATE t1 SET a = 2 WHERE a = 1; + SELECT * FROM t2; + } +} {{} one 4 four} +do_test genfkey-3.5 { + execsql { + DELETE FROM t1 WHERE a = 4; + SELECT * FROM t2; + } +} {{} one {} four} +do_test genfkey-3.6 { + execsql { + INSERT INTO t3 VALUES('hello', 2, 3); + UPDATE t1 SET c = 2; + SELECT * FROM t3; + } +} {hello {} {}} +do_test genfkey-2.7 { + execsql { + UPDATE t3 SET h = 2, i = 2; + DELETE FROM t1; + SELECT * FROM t3; + } +} {hello {} {}} +do_test genfkey-3.X { + execsql { + DROP TABLE t1; + DROP TABLE t2; + DROP TABLE t3; + } +} {} + +# The following tests - genfkey-4.* - test that errors in the schema +# are detected correctly. +# +do_test genfkey-4.1 { + execsql { + CREATE TABLE t1(a REFERENCES nosuchtable, b); + CREATE TABLE t2(a REFERENCES t1, b); + + CREATE TABLE t3(a, b, c, PRIMARY KEY(a, b)); + CREATE TABLE t4(a, b, c, FOREIGN KEY(c, b) REFERENCES t3); + + CREATE TABLE t5(a REFERENCES t4(d), b, c); + CREATE TABLE t6(a REFERENCES t4(a), b, c); + CREATE TABLE t7(a REFERENCES t3(a), b, c); + CREATE TABLE t8(a REFERENCES nosuchtable(a), b, c); + } +} {} + +do_test genfkey-4.X { + set rc [catch {exec ./sqlite3 test.db .genfkey} msg] + list $rc $msg +} "1 {[string trim { +Error in table t5: foreign key columns do not exist +Error in table t8: foreign key columns do not exist +Error in table t4: implicit mapping to composite primary key +Error in table t1: implicit mapping to non-existant primary key +Error in table t2: implicit mapping to non-existant primary key +Error in table t6: foreign key is not unique +Error in table t7: foreign key is not unique +}]}" + +# Test that ticket #3800 has been resolved. +# +do_test genfkey-5.1 { + execsql { + DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; + DROP TABLE t4; DROP TABLE t5; DROP TABLE t6; + DROP TABLE t7; DROP TABLE t8; + } +} {} +do_test genfkey-5.2 { + execsql { + CREATE TABLE "t.3" (c1 PRIMARY KEY); + CREATE TABLE t13 (c1, foreign key(c1) references "t.3"(c1)); + } +} {} +do_test genfkey-5.3 { + set rc [catch {exec ./sqlite3 test.db .genfkey} msg] +} {0} +do_test genfkey-5.4 { + db eval $msg +} {} +do_test genfkey-5.5 { + catchsql { INSERT INTO t13 VALUES(1) } +} {1 {constraint failed}} +do_test genfkey-5.5 { + catchsql { + INSERT INTO "t.3" VALUES(1); + INSERT INTO t13 VALUES(1); + } +} {0 {}} + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/lemon.c /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/lemon.c --- sqlite3-3.4.2/tool/lemon.c 2007-07-30 21:36:33.000000000 +0100 +++ sqlite3-3.6.16/tool/lemon.c 2009-06-25 12:35:52.000000000 +0100 @@ -36,6 +36,13 @@ static char *msort(char*,char**,int(*)(const char*,const char*)); +/* +** Compilers are getting increasingly pedantic about type conversions +** as C evolves ever closer to Ada.... To work around the latest problems +** we have to define the following variant of strlen(). +*/ +#define lemonStrlen(X) ((int)strlen(X)) + static struct action *Action_new(void); static struct action *Action_sort(struct action *); @@ -108,7 +115,7 @@ ** Principal data structures for the LEMON parser generator. */ -typedef enum {B_FALSE=0, B_TRUE} Boolean; +typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean; /* Symbols (terminals and nonterminals) of the grammar are stored ** in the following: */ @@ -128,12 +135,13 @@ RIGHT, NONE, UNK - } assoc; /* Associativity if predecence is defined */ + } assoc; /* Associativity if precedence is defined */ char *firstset; /* First-set for all rules of this symbol */ Boolean lambda; /* True if NT and can generate an empty string */ + int useCnt; /* Number of times used */ char *destructor; /* Code which executes whenever this symbol is ** popped from the stack during error processing */ - int destructorln; /* Line number of destructor code */ + int destLineno; /* Line number for start of destructor */ char *datatype; /* The data type of information held by this ** object. Only used if type==NONTERMINAL */ int dtnum; /* The data type number. In the parser, the value @@ -149,6 +157,7 @@ struct rule { struct symbol *lhs; /* Left-hand side of the rule */ char *lhsalias; /* Alias for the LHS (NULL if none) */ + int lhsStart; /* True if left-hand side is the start symbol */ int ruleline; /* Line number for the rule */ int nrhs; /* Number of RHS symbols */ struct symbol **rhs; /* The RHS symbols */ @@ -190,7 +199,9 @@ ACCEPT, REDUCE, ERROR, - CONFLICT, /* Was a reduce, but part of a conflict */ + SSCONFLICT, /* A shift/shift conflict */ + SRCONFLICT, /* Was a reduce, but part of a conflict */ + RRCONFLICT, /* Was a reduce, but part of a conflict */ SH_RESOLVED, /* Was a shift. Precedence resolved conflict */ RD_RESOLVED, /* Was reduce. Precedence resolved conflict */ NOT_USED /* Deleted by compression */ @@ -208,7 +219,7 @@ struct state { struct config *bp; /* The basis configurations for this state */ struct config *cfp; /* All configurations in this set */ - int statenum; /* Sequencial number for this state */ + int statenum; /* Sequential number for this state */ struct action *ap; /* Array of actions for this state */ int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */ int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */ @@ -246,28 +257,21 @@ char *start; /* Name of the start symbol for the grammar */ char *stacksize; /* Size of the parser stack */ char *include; /* Code to put at the start of the C file */ - int includeln; /* Line number for start of include code */ char *error; /* Code to execute when an error is seen */ - int errorln; /* Line number for start of error code */ char *overflow; /* Code to execute on a stack overflow */ - int overflowln; /* Line number for start of overflow code */ char *failure; /* Code to execute on parser failure */ - int failureln; /* Line number for start of failure code */ char *accept; /* Code to execute when the parser excepts */ - int acceptln; /* Line number for the start of accept code */ char *extracode; /* Code appended to the generated file */ - int extracodeln; /* Line number for the start of the extra code */ char *tokendest; /* Code to execute to destroy token data */ - int tokendestln; /* Line number for token destroyer code */ char *vardest; /* Code for the default non-terminal destructor */ - int vardestln; /* Line number for default non-term destructor code*/ char *filename; /* Name of the input file */ char *outname; /* Name of the current output file */ char *tokenprefix; /* A prefix added to token names in the .h file */ int nconflict; /* Number of parsing conflicts */ int tablesize; /* Size of the parse tables */ int basisflag; /* Print only basis configurations */ - int has_fallback; /* True if any %fallback is seen in the grammer */ + int has_fallback; /* True if any %fallback is seen in the grammar */ + int nolinenosflag; /* True if #line statements should not be printed */ char *argv0; /* Name of the program */ }; @@ -336,7 +340,7 @@ if( freelist==0 ){ int i; int amt = 100; - freelist = (struct action *)malloc( sizeof(struct action)*amt ); + freelist = (struct action *)calloc(amt, sizeof(struct action)); if( freelist==0 ){ fprintf(stderr,"Unable to allocate memory for a new parser action."); exit(1); @@ -359,8 +363,10 @@ ){ int rc; rc = ap1->sp->index - ap2->sp->index; - if( rc==0 ) rc = (int)ap1->type - (int)ap2->type; if( rc==0 ){ + rc = (int)ap1->type - (int)ap2->type; + } + if( rc==0 && ap1->type==REDUCE ){ rc = ap1->x.rp->index - ap2->x.rp->index; } return rc; @@ -436,7 +442,7 @@ /* Allocate a new acttab structure */ acttab *acttab_alloc(void){ - acttab *p = malloc( sizeof(*p) ); + acttab *p = calloc( 1, sizeof(*p) ); if( p==0 ){ fprintf(stderr,"Unable to allocate memory for a new acttab."); exit(1); @@ -610,7 +616,7 @@ int progress; for(i=0; insymbol; i++){ - lemp->symbols[i]->lambda = B_FALSE; + lemp->symbols[i]->lambda = LEMON_FALSE; } for(i=lemp->nterminal; insymbol; i++){ lemp->symbols[i]->firstset = SetNew(); @@ -623,10 +629,10 @@ if( rp->lhs->lambda ) continue; for(i=0; inrhs; i++){ struct symbol *sp = rp->rhs[i]; - if( sp->type!=TERMINAL || sp->lambda==B_FALSE ) break; + if( sp->type!=TERMINAL || sp->lambda==LEMON_FALSE ) break; } if( i==rp->nrhs ){ - rp->lhs->lambda = B_TRUE; + rp->lhs->lambda = LEMON_TRUE; progress = 1; } } @@ -649,10 +655,10 @@ } break; }else if( s1==s2 ){ - if( s1->lambda==B_FALSE ) break; + if( s1->lambda==LEMON_FALSE ) break; }else{ progress += SetUnion(s1->firstset,s2->firstset); - if( s2->lambda==B_FALSE ) break; + if( s2->lambda==LEMON_FALSE ) break; } } } @@ -709,6 +715,7 @@ ** left-hand side */ for(rp=sp->rule; rp; rp=rp->nextlhs){ struct config *newcfp; + rp->lhsStart = 1; newcfp = Configlist_addbasis(rp,0); SetAdd(newcfp->fws,0); } @@ -972,11 +979,11 @@ } /* Report an error for each rule that can never be reduced. */ - for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = B_FALSE; + for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE; for(i=0; instate; i++){ struct action *ap; for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){ - if( ap->type==REDUCE ) ap->x.rp->canReduce = B_TRUE; + if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE; } } for(rp=lemp->rule; rp; rp=rp->next){ @@ -987,7 +994,7 @@ } /* Resolve a conflict between the two given actions. If the -** conflict can't be resolve, return non-zero. +** conflict can't be resolved, return non-zero. ** ** NO LONGER TRUE: ** To resolve a conflict, first look to see if either action @@ -1008,7 +1015,7 @@ int errcnt = 0; assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */ if( apx->type==SHIFT && apy->type==SHIFT ){ - apy->type = CONFLICT; + apy->type = SSCONFLICT; errcnt++; } if( apx->type==SHIFT && apy->type==REDUCE ){ @@ -1016,7 +1023,7 @@ spy = apy->x.rp->precsym; if( spy==0 || spx->prec<0 || spy->prec<0 ){ /* Not enough precedence information. */ - apy->type = CONFLICT; + apy->type = SRCONFLICT; errcnt++; }else if( spx->prec>spy->prec ){ /* Lower precedence wins */ apy->type = RD_RESOLVED; @@ -1028,7 +1035,7 @@ apx->type = SH_RESOLVED; }else{ assert( spx->prec==spy->prec && spx->assoc==NONE ); - apy->type = CONFLICT; + apy->type = SRCONFLICT; errcnt++; } }else if( apx->type==REDUCE && apy->type==REDUCE ){ @@ -1036,7 +1043,7 @@ spy = apy->x.rp->precsym; if( spx==0 || spy==0 || spx->prec<0 || spy->prec<0 || spx->prec==spy->prec ){ - apy->type = CONFLICT; + apy->type = RRCONFLICT; errcnt++; }else if( spx->prec>spy->prec ){ apy->type = RD_RESOLVED; @@ -1047,10 +1054,14 @@ assert( apx->type==SH_RESOLVED || apx->type==RD_RESOLVED || - apx->type==CONFLICT || + apx->type==SSCONFLICT || + apx->type==SRCONFLICT || + apx->type==RRCONFLICT || apy->type==SH_RESOLVED || apy->type==RD_RESOLVED || - apy->type==CONFLICT + apy->type==SSCONFLICT || + apy->type==SRCONFLICT || + apy->type==RRCONFLICT ); /* The REDUCE/SHIFT case cannot happen because SHIFTs come before ** REDUCEs on the list. If we reach this point it must be because @@ -1076,7 +1087,7 @@ if( freelist==0 ){ int i; int amt = 3; - freelist = (struct config *)malloc( sizeof(struct config)*amt ); + freelist = (struct config *)calloc( amt, sizeof(struct config) ); if( freelist==0 ){ fprintf(stderr,"Unable to allocate memory for a new configuration."); exit(1); @@ -1210,7 +1221,7 @@ break; }else{ SetUnion(newcfp->fws,xsp->firstset); - if( xsp->lambda==B_FALSE ) break; + if( xsp->lambda==LEMON_FALSE ) break; } } if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp); @@ -1318,13 +1329,13 @@ }else{ sprintf(prefix,"%.*s: ",PREFIXLIMIT-10,filename); } - prefixsize = strlen(prefix); + prefixsize = lemonStrlen(prefix); availablewidth = LINEWIDTH - prefixsize; /* Generate the error message */ vsprintf(errmsg,format,ap); va_end(ap); - errmsgsize = strlen(errmsg); + errmsgsize = lemonStrlen(errmsg); /* Remove trailing '\n's from the error message. */ while( errmsgsize>0 && errmsg[errmsgsize-1]=='\n' ){ errmsg[--errmsgsize] = 0; @@ -1368,7 +1379,7 @@ exit(1); } paz = &azDefine[nDefine-1]; - *paz = malloc( strlen(z)+1 ); + *paz = malloc( lemonStrlen(z)+1 ); if( *paz==0 ){ fprintf(stderr,"out of memory\n"); exit(1); @@ -1391,12 +1402,14 @@ static int quiet = 0; static int statistics = 0; static int mhflag = 0; + static int nolinenosflag = 0; static struct s_options options[] = { {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."}, {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."}, {OPT_FSTR, "D", (char*)handle_D_option, "Define an %ifdef macro."}, {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."}, - {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file"}, + {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file."}, + {OPT_FLAG, "l", (char*)&nolinenosflag, "Do not print #line statements."}, {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."}, {OPT_FLAG, "s", (char*)&statistics, "Print parser stats to standard output."}, @@ -1425,8 +1438,10 @@ lem.argv0 = argv[0]; lem.filename = OptArg(0); lem.basisflag = basisflag; + lem.nolinenosflag = nolinenosflag; Symbol_new("$"); lem.errsym = Symbol_new("error"); + lem.errsym->useCnt = 0; /* Parse the input file */ Parse(&lem); @@ -1452,7 +1467,7 @@ Reprint(&lem); }else{ /* Initialize the size for all follow and first sets */ - SetSize(lem.nterminal); + SetSize(lem.nterminal+1); /* Find the precedence for every production rule (that has one) */ FindRulePrecedences(&lem); @@ -1645,10 +1660,10 @@ { int spcnt, i; if( argv[0] ) fprintf(err,"%s",argv[0]); - spcnt = strlen(argv[0]) + 1; + spcnt = lemonStrlen(argv[0]) + 1; for(i=1; i%*s %s\n",op[i].label, - (int)(max-strlen(op[i].label)-9),"",op[i].message); + (int)(max-lemonStrlen(op[i].label)-9),"",op[i].message); break; case OPT_DBL: case OPT_FDBL: fprintf(errstream," %s=%*s %s\n",op[i].label, - (int)(max-strlen(op[i].label)-6),"",op[i].message); + (int)(max-lemonStrlen(op[i].label)-6),"",op[i].message); break; case OPT_STR: case OPT_FSTR: fprintf(errstream," %s=%*s %s\n",op[i].label, - (int)(max-strlen(op[i].label)-8),"",op[i].message); + (int)(max-lemonStrlen(op[i].label)-8),"",op[i].message); break; } } @@ -1957,7 +1972,8 @@ struct rule *prevrule; /* Previous rule parsed */ char *declkeyword; /* Keyword of a declaration */ char **declargslot; /* Where the declaration argument should be put */ - int *decllnslot; /* Where the declaration linenumber is put */ + int insertLineMacro; /* Add #line before declaration insert */ + int *decllinenoslot; /* Where to write declaration line number */ enum e_assoc declassoc; /* Assign this association to decl arguments */ int preccounter; /* Assign this precedence to decl arguments */ struct rule *firstrule; /* Pointer to first rule in the grammar */ @@ -1992,7 +2008,7 @@ }else if( x[0]=='{' ){ if( psp->prevrule==0 ){ ErrorMsg(psp->filename,psp->tokenlineno, -"There is not prior rule opon which to attach the code \ +"There is no prior rule opon which to attach the code \ fragment which begins on this line."); psp->errorcnt++; }else if( psp->prevrule->code!=0 ){ @@ -2089,8 +2105,8 @@ case IN_RHS: if( x[0]=='.' ){ struct rule *rp; - rp = (struct rule *)malloc( sizeof(struct rule) + - sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs ); + rp = (struct rule *)calloc( sizeof(struct rule) + + sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1); if( rp==0 ){ ErrorMsg(psp->filename,psp->tokenlineno, "Can't allocate enough memory for this rule."); @@ -2126,7 +2142,7 @@ }else if( isalpha(x[0]) ){ if( psp->nrhs>=MAXRHS ){ ErrorMsg(psp->filename,psp->tokenlineno, - "Too many symbols on RHS or rule beginning at \"%s\".", + "Too many symbols on RHS of rule beginning at \"%s\".", x); psp->errorcnt++; psp->state = RESYNC_AFTER_RULE_ERROR; @@ -2139,11 +2155,11 @@ struct symbol *msp = psp->rhs[psp->nrhs-1]; if( msp->type!=MULTITERMINAL ){ struct symbol *origsp = msp; - msp = malloc(sizeof(*msp)); + msp = calloc(1,sizeof(*msp)); memset(msp, 0, sizeof(*msp)); msp->type = MULTITERMINAL; msp->nsubsym = 1; - msp->subsym = malloc(sizeof(struct symbol*)); + msp->subsym = calloc(1,sizeof(struct symbol*)); msp->subsym[0] = origsp; msp->name = origsp->name; psp->rhs[psp->nrhs-1] = msp; @@ -2191,46 +2207,46 @@ if( isalpha(x[0]) ){ psp->declkeyword = x; psp->declargslot = 0; - psp->decllnslot = 0; + psp->decllinenoslot = 0; + psp->insertLineMacro = 1; psp->state = WAITING_FOR_DECL_ARG; if( strcmp(x,"name")==0 ){ psp->declargslot = &(psp->gp->name); + psp->insertLineMacro = 0; }else if( strcmp(x,"include")==0 ){ psp->declargslot = &(psp->gp->include); - psp->decllnslot = &psp->gp->includeln; }else if( strcmp(x,"code")==0 ){ psp->declargslot = &(psp->gp->extracode); - psp->decllnslot = &psp->gp->extracodeln; }else if( strcmp(x,"token_destructor")==0 ){ psp->declargslot = &psp->gp->tokendest; - psp->decllnslot = &psp->gp->tokendestln; }else if( strcmp(x,"default_destructor")==0 ){ psp->declargslot = &psp->gp->vardest; - psp->decllnslot = &psp->gp->vardestln; }else if( strcmp(x,"token_prefix")==0 ){ psp->declargslot = &psp->gp->tokenprefix; + psp->insertLineMacro = 0; }else if( strcmp(x,"syntax_error")==0 ){ psp->declargslot = &(psp->gp->error); - psp->decllnslot = &psp->gp->errorln; }else if( strcmp(x,"parse_accept")==0 ){ psp->declargslot = &(psp->gp->accept); - psp->decllnslot = &psp->gp->acceptln; }else if( strcmp(x,"parse_failure")==0 ){ psp->declargslot = &(psp->gp->failure); - psp->decllnslot = &psp->gp->failureln; }else if( strcmp(x,"stack_overflow")==0 ){ psp->declargslot = &(psp->gp->overflow); - psp->decllnslot = &psp->gp->overflowln; }else if( strcmp(x,"extra_argument")==0 ){ psp->declargslot = &(psp->gp->arg); + psp->insertLineMacro = 0; }else if( strcmp(x,"token_type")==0 ){ psp->declargslot = &(psp->gp->tokentype); + psp->insertLineMacro = 0; }else if( strcmp(x,"default_type")==0 ){ psp->declargslot = &(psp->gp->vartype); + psp->insertLineMacro = 0; }else if( strcmp(x,"stack_size")==0 ){ psp->declargslot = &(psp->gp->stacksize); + psp->insertLineMacro = 0; }else if( strcmp(x,"start_symbol")==0 ){ psp->declargslot = &(psp->gp->start); + psp->insertLineMacro = 0; }else if( strcmp(x,"left")==0 ){ psp->preccounter++; psp->declassoc = LEFT; @@ -2274,7 +2290,8 @@ }else{ struct symbol *sp = Symbol_new(x); psp->declargslot = &sp->destructor; - psp->decllnslot = &sp->destructorln; + psp->decllinenoslot = &sp->destLineno; + psp->insertLineMacro = 1; psp->state = WAITING_FOR_DECL_ARG; } break; @@ -2287,7 +2304,7 @@ }else{ struct symbol *sp = Symbol_new(x); psp->declargslot = &sp->datatype; - psp->decllnslot = 0; + psp->insertLineMacro = 0; psp->state = WAITING_FOR_DECL_ARG; } break; @@ -2312,18 +2329,56 @@ } break; case WAITING_FOR_DECL_ARG: - if( (x[0]=='{' || x[0]=='\"' || isalnum(x[0])) ){ - if( *(psp->declargslot)!=0 ){ - ErrorMsg(psp->filename,psp->tokenlineno, - "The argument \"%s\" to declaration \"%%%s\" is not the first.", - x[0]=='\"' ? &x[1] : x,psp->declkeyword); - psp->errorcnt++; - psp->state = RESYNC_AFTER_DECL_ERROR; - }else{ - *(psp->declargslot) = (x[0]=='\"' || x[0]=='{') ? &x[1] : x; - if( psp->decllnslot ) *psp->decllnslot = psp->tokenlineno; - psp->state = WAITING_FOR_DECL_OR_RULE; - } + if( x[0]=='{' || x[0]=='\"' || isalnum(x[0]) ){ + char *zOld, *zNew, *zBuf, *z; + int nOld, n, nLine, nNew, nBack; + int addLineMacro; + char zLine[50]; + zNew = x; + if( zNew[0]=='"' || zNew[0]=='{' ) zNew++; + nNew = lemonStrlen(zNew); + if( *psp->declargslot ){ + zOld = *psp->declargslot; + }else{ + zOld = ""; + } + nOld = lemonStrlen(zOld); + n = nOld + nNew + 20; + addLineMacro = !psp->gp->nolinenosflag && psp->insertLineMacro && + (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0); + if( addLineMacro ){ + for(z=psp->filename, nBack=0; *z; z++){ + if( *z=='\\' ) nBack++; + } + sprintf(zLine, "#line %d ", psp->tokenlineno); + nLine = lemonStrlen(zLine); + n += nLine + lemonStrlen(psp->filename) + nBack; + } + *psp->declargslot = zBuf = realloc(*psp->declargslot, n); + zBuf += nOld; + if( addLineMacro ){ + if( nOld && zBuf[-1]!='\n' ){ + *(zBuf++) = '\n'; + } + memcpy(zBuf, zLine, nLine); + zBuf += nLine; + *(zBuf++) = '"'; + for(z=psp->filename; *z; z++){ + if( *z=='\\' ){ + *(zBuf++) = '\\'; + } + *(zBuf++) = *z; + } + *(zBuf++) = '"'; + *(zBuf++) = '\n'; + } + if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){ + psp->decllinenoslot[0] = psp->tokenlineno; + } + memcpy(zBuf, zNew, nNew); + zBuf += nNew; + *zBuf = 0; + psp->state = WAITING_FOR_DECL_OR_RULE; }else{ ErrorMsg(psp->filename,psp->tokenlineno, "Illegal argument to %%%s: %s",psp->declkeyword,x); @@ -2380,7 +2435,7 @@ } } -/* Run the proprocessor over the input file text. The global variables +/* Run the preprocessor over the input file text. The global variables ** azDefine[0] through azDefine[nDefine-1] contains the names of all defined ** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and ** comments them out. Text in between is also commented out as appropriate. @@ -2388,9 +2443,9 @@ static void preprocess_input(char *z){ int i, j, k, n; int exclude = 0; - int start; + int start = 0; int lineno = 1; - int start_lineno; + int start_lineno = 1; for(i=0; z[i]; i++){ if( z[i]=='\n' ) lineno++; if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue; @@ -2411,7 +2466,7 @@ for(n=0; z[j+n] && !isspace(z[j+n]); n++){} exclude = 1; for(k=0; kfilename; ps.errorcnt = 0; @@ -2595,7 +2651,7 @@ if( plink_freelist==0 ){ int i; int amt = 100; - plink_freelist = (struct plink *)malloc( sizeof(struct plink)*amt ); + plink_freelist = (struct plink *)calloc( amt, sizeof(struct plink) ); if( plink_freelist==0 ){ fprintf(stderr, "Unable to allocate memory for a new follow-set propagation link.\n"); @@ -2664,7 +2720,7 @@ char *name; char *cp; - name = malloc( strlen(lemp->filename) + strlen(suffix) + 5 ); + name = malloc( lemonStrlen(lemp->filename) + lemonStrlen(suffix) + 5 ); if( name==0 ){ fprintf(stderr,"Can't allocate space for a filename.\n"); exit(1); @@ -2709,7 +2765,7 @@ maxlen = 10; for(i=0; insymbol; i++){ sp = lemp->symbols[i]; - len = strlen(sp->name); + len = lemonStrlen(sp->name); if( len>maxlen ) maxlen = len; } ncolumns = 76/(maxlen+5); @@ -2821,10 +2877,15 @@ case ERROR: fprintf(fp,"%*s error",indent,ap->sp->name); break; - case CONFLICT: + case SRCONFLICT: + case RRCONFLICT: fprintf(fp,"%*s reduce %-3d ** Parsing conflict **", indent,ap->sp->name,ap->x.rp->index); break; + case SSCONFLICT: + fprintf(fp,"%*s shift %d ** Parsing conflict **", + indent,ap->sp->name,ap->x.stp->statenum); + break; case SH_RESOLVED: case RD_RESOLVED: case NOT_USED: @@ -2919,18 +2980,18 @@ if( cp ){ c = *cp; *cp = 0; - path = (char *)malloc( strlen(argv0) + strlen(name) + 2 ); + path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 ); if( path ) sprintf(path,"%s/%s",argv0,name); *cp = c; }else{ extern char *getenv(); pathlist = getenv("PATH"); if( pathlist==0 ) pathlist = ".:/bin:/usr/bin"; - path = (char *)malloc( strlen(pathlist)+strlen(name)+2 ); + path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 ); if( path!=0 ){ while( *pathlist ){ cp = strchr(pathlist,':'); - if( cp==0 ) cp = &pathlist[strlen(pathlist)]; + if( cp==0 ) cp = &pathlist[lemonStrlen(pathlist)]; c = *cp; *cp = 0; sprintf(path,"%s/%s",pathlist,name); @@ -3055,27 +3116,25 @@ } /* Print a string to the file and keep the linenumber up to date */ -PRIVATE void tplt_print(out,lemp,str,strln,lineno) +PRIVATE void tplt_print(out,lemp,str,lineno) FILE *out; struct lemon *lemp; char *str; -int strln; int *lineno; { if( str==0 ) return; - tplt_linedir(out,strln,lemp->filename); - (*lineno)++; while( *str ){ - if( *str=='\n' ) (*lineno)++; putc(*str,out); + if( *str=='\n' ) (*lineno)++; str++; } if( str[-1]!='\n' ){ putc('\n',out); (*lineno)++; } - tplt_linedir(out,*lineno+2,lemp->outname); - (*lineno)+=2; + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } return; } @@ -3091,21 +3150,18 @@ { char *cp = 0; - int linecnt = 0; if( sp->type==TERMINAL ){ cp = lemp->tokendest; if( cp==0 ) return; - tplt_linedir(out,lemp->tokendestln,lemp->filename); - fprintf(out,"{"); + fprintf(out,"{\n"); (*lineno)++; }else if( sp->destructor ){ cp = sp->destructor; - tplt_linedir(out,sp->destructorln,lemp->filename); - fprintf(out,"{"); + fprintf(out,"{\n"); (*lineno)++; + if (!lemp->nolinenosflag) { (*lineno)++; tplt_linedir(out,sp->destLineno,lemp->filename); } }else if( lemp->vardest ){ cp = lemp->vardest; if( cp==0 ) return; - tplt_linedir(out,lemp->vardestln,lemp->filename); - fprintf(out,"{"); + fprintf(out,"{\n"); (*lineno)++; }else{ assert( 0 ); /* Cannot happen */ } @@ -3115,12 +3171,14 @@ cp++; continue; } - if( *cp=='\n' ) linecnt++; + if( *cp=='\n' ) (*lineno)++; fputc(*cp,out); } - (*lineno) += 3 + linecnt; - fprintf(out,"}\n"); - tplt_linedir(out,*lineno,lemp->outname); + fprintf(out,"\n"); (*lineno)++; + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } + fprintf(out,"}\n"); (*lineno)++; return; } @@ -3168,7 +3226,7 @@ used += n; assert( used>=0 ); } - n = strlen(zText); + n = lemonStrlen(zText); } if( n+sizeof(zInt)*2+used >= alloced ){ alloced = n + sizeof(zInt)*2 + used + 200; @@ -3181,7 +3239,7 @@ sprintf(zInt, "%d", p1); p1 = p2; strcpy(&z[used], zInt); - used += strlen(&z[used]); + used += lemonStrlen(&z[used]); zText++; n--; }else{ @@ -3268,7 +3326,7 @@ lemp->errorcnt++; }else if( rp->rhsalias[i]==0 ){ if( has_destructor(rp->rhs[i],lemp) ){ - append_str(" yy_destructor(%d,&yymsp[%d].minor);\n", 0, + append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0, rp->rhs[i]->index,i-rp->nrhs+1); }else{ /* No destructor defined for this term */ @@ -3292,18 +3350,16 @@ int *lineno; { char *cp; - int linecnt = 0; /* Generate code to do the reduce action */ if( rp->code ){ - tplt_linedir(out,rp->line,lemp->filename); + if (!lemp->nolinenosflag) { (*lineno)++; tplt_linedir(out,rp->line,lemp->filename); } fprintf(out,"{%s",rp->code); for(cp=rp->code; *cp; cp++){ - if( *cp=='\n' ) linecnt++; + if( *cp=='\n' ) (*lineno)++; } /* End loop */ - (*lineno) += 3 + linecnt; - fprintf(out,"}\n"); - tplt_linedir(out,*lineno,lemp->outname); + fprintf(out,"}\n"); (*lineno)++; + if (!lemp->nolinenosflag) { (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); } } /* End if( rp->code ) */ return; @@ -3333,17 +3389,17 @@ /* Allocate and initialize types[] and allocate stddt[] */ arraysize = lemp->nsymbol * 2; - types = (char**)malloc( arraysize * sizeof(char*) ); + types = (char**)calloc( arraysize, sizeof(char*) ); for(i=0; ivartype ){ - maxdtlength = strlen(lemp->vartype); + maxdtlength = lemonStrlen(lemp->vartype); } for(i=0; insymbol; i++){ int len; struct symbol *sp = lemp->symbols[i]; if( sp->datatype==0 ) continue; - len = strlen(sp->datatype); + len = lemonStrlen(sp->datatype); if( len>maxdtlength ) maxdtlength = len; } stddt = (char*)malloc( maxdtlength*2 + 1 ); @@ -3376,6 +3432,10 @@ while( *cp ) stddt[j++] = *cp++; while( j>0 && isspace(stddt[j-1]) ) j--; stddt[j] = 0; + if( lemp->tokentype && strcmp(stddt, lemp->tokentype)==0 ){ + sp->dtnum = 0; + continue; + } hash = 0; for(j=0; stddt[j]; j++){ hash = hash*53 + stddt[j]; @@ -3391,7 +3451,7 @@ } if( types[hash]==0 ){ sp->dtnum = hash + 1; - types[hash] = (char*)malloc( strlen(stddt)+1 ); + types[hash] = (char*)malloc( lemonStrlen(stddt)+1 ); if( types[hash]==0 ){ fprintf(stderr,"Out of memory.\n"); exit(1); @@ -3408,13 +3468,16 @@ lemp->tokentype?lemp->tokentype:"void*"); lineno++; if( mhflag ){ fprintf(out,"#endif\n"); lineno++; } fprintf(out,"typedef union {\n"); lineno++; + fprintf(out," int yyinit;\n"); lineno++; fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++; for(i=0; ierrsym->dtnum); lineno++; + if( lemp->errsym->useCnt ){ + fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++; + } free(stddt); free(types); fprintf(out,"} YYMINORTYPE;\n"); lineno++; @@ -3464,6 +3527,25 @@ return p2->nAction - p1->nAction; } +/* +** Write text on "out" that describes the rule "rp". +*/ +static void writeRuleText(FILE *out, struct rule *rp){ + int j; + fprintf(out,"%s ::=", rp->lhs->name); + for(j=0; jnrhs; j++){ + struct symbol *sp = rp->rhs[j]; + fprintf(out," %s", sp->name); + if( sp->type==MULTITERMINAL ){ + int k; + for(k=1; knsubsym; k++){ + fprintf(out,"|%s",sp->subsym[k]->name); + } + } + } +} + + /* Generate C source code for the parser */ void ReportTable(lemp, mhflag) struct lemon *lemp; @@ -3493,7 +3575,7 @@ tplt_xfer(lemp->name,in,out,&lineno); /* Generate the include code, if any */ - tplt_print(out,lemp,lemp->include,lemp->includeln,&lineno); + tplt_print(out,lemp,lemp->include,&lineno); if( mhflag ){ char *name = file_makename(lemp, ".h"); fprintf(out,"#include \"%s\"\n", name); lineno++; @@ -3517,7 +3599,7 @@ /* Generate the defines */ fprintf(out,"#define YYCODETYPE %s\n", - minimum_size_type(0, lemp->nsymbol+5)); lineno++; + minimum_size_type(0, lemp->nsymbol+1)); lineno++; fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol+1); lineno++; fprintf(out,"#define YYACTIONTYPE %s\n", minimum_size_type(0, lemp->nstate+lemp->nrule+5)); lineno++; @@ -3539,7 +3621,7 @@ name = lemp->name ? lemp->name : "Parse"; if( lemp->arg && lemp->arg[0] ){ int i; - i = strlen(lemp->arg); + i = lemonStrlen(lemp->arg); while( i>=1 && isspace(lemp->arg[i-1]) ) i--; while( i>=1 && (isalnum(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--; fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++; @@ -3559,8 +3641,10 @@ } fprintf(out,"#define YYNSTATE %d\n",lemp->nstate); lineno++; fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++; - fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++; - fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++; + if( lemp->errsym->useCnt ){ + fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++; + fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++; + } if( lemp->has_fallback ){ fprintf(out,"#define YYFALLBACK 1\n"); lineno++; } @@ -3579,7 +3663,7 @@ */ /* Compute the actions on all states and count them up */ - ax = malloc( sizeof(ax[0])*lemp->nstate*2 ); + ax = calloc(lemp->nstate*2, sizeof(ax[0])); if( ax==0 ){ fprintf(stderr,"malloc failed\n"); exit(1); @@ -3730,7 +3814,9 @@ /* Generate the table of fallback tokens. */ if( lemp->has_fallback ){ - for(i=0; interminal; i++){ + int mx = lemp->nterminal - 1; + while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } + for(i=0; i<=mx; i++){ struct symbol *p = lemp->symbols[i]; if( p->fallback==0 ){ fprintf(out, " 0, /* %10s => nothing */\n", p->name); @@ -3754,22 +3840,13 @@ tplt_xfer(lemp->name,in,out,&lineno); /* Generate a table containing a text string that describes every - ** rule in the rule set of the grammer. This information is used + ** rule in the rule set of the grammar. This information is used ** when tracing REDUCE actions. */ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ assert( rp->index==i ); - fprintf(out," /* %3d */ \"%s ::=", i, rp->lhs->name); - for(j=0; jnrhs; j++){ - struct symbol *sp = rp->rhs[j]; - fprintf(out," %s", sp->name); - if( sp->type==MULTITERMINAL ){ - int k; - for(k=1; knsubsym; k++){ - fprintf(out,"|%s",sp->subsym[k]->name); - } - } - } + fprintf(out," /* %3d */ \"", i); + writeRuleText(out, rp); fprintf(out,"\",\n"); lineno++; } tplt_xfer(lemp->name,in,out,&lineno); @@ -3779,10 +3856,15 @@ ** (In other words, generate the %destructor actions) */ if( lemp->tokendest ){ + int once = 1; for(i=0; insymbol; i++){ struct symbol *sp = lemp->symbols[i]; if( sp==0 || sp->type!=TERMINAL ) continue; - fprintf(out," case %d:\n",sp->index); lineno++; + if( once ){ + fprintf(out, " /* TERMINAL Destructor */\n"); lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; } for(i=0; insymbol && lemp->symbols[i]->type!=TERMINAL; i++); if( insymbol ){ @@ -3792,22 +3874,27 @@ } if( lemp->vardest ){ struct symbol *dflt_sp = 0; + int once = 1; for(i=0; insymbol; i++){ struct symbol *sp = lemp->symbols[i]; if( sp==0 || sp->type==TERMINAL || sp->index<=0 || sp->destructor!=0 ) continue; - fprintf(out," case %d:\n",sp->index); lineno++; + if( once ){ + fprintf(out, " /* Default NON-TERMINAL Destructor */\n"); lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; dflt_sp = sp; } if( dflt_sp!=0 ){ emit_destructor_code(out,dflt_sp,lemp,&lineno); - fprintf(out," break;\n"); lineno++; } + fprintf(out," break;\n"); lineno++; } for(i=0; insymbol; i++){ struct symbol *sp = lemp->symbols[i]; if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue; - fprintf(out," case %d:\n",sp->index); lineno++; + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; /* Combine duplicate destructors into a single case */ for(j=i+1; jnsymbol; j++){ @@ -3815,7 +3902,8 @@ if( sp2 && sp2->type!=TERMINAL && sp2->destructor && sp2->dtnum==sp->dtnum && strcmp(sp->destructor,sp2->destructor)==0 ){ - fprintf(out," case %d:\n",sp2->index); lineno++; + fprintf(out," case %d: /* %s */\n", + sp2->index, sp2->name); lineno++; sp2->destructor = 0; } } @@ -3826,7 +3914,7 @@ tplt_xfer(lemp->name,in,out,&lineno); /* Generate code which executes whenever the parser stack overflows */ - tplt_print(out,lemp,lemp->overflow,lemp->overflowln,&lineno); + tplt_print(out,lemp,lemp->overflow,&lineno); tplt_xfer(lemp->name,in,out,&lineno); /* Generate the table of rule information @@ -3843,35 +3931,53 @@ for(rp=lemp->rule; rp; rp=rp->next){ translate_code(lemp, rp); } + /* First output rules other than the default: rule */ for(rp=lemp->rule; rp; rp=rp->next){ - struct rule *rp2; + struct rule *rp2; /* Other rules with the same action */ if( rp->code==0 ) continue; - fprintf(out," case %d:\n",rp->index); lineno++; + if( rp->code[0]=='\n' && rp->code[1]==0 ) continue; /* Will be default: */ + fprintf(out," case %d: /* ", rp->index); + writeRuleText(out, rp); + fprintf(out, " */\n"); lineno++; for(rp2=rp->next; rp2; rp2=rp2->next){ if( rp2->code==rp->code ){ - fprintf(out," case %d:\n",rp2->index); lineno++; + fprintf(out," case %d: /* ", rp2->index); + writeRuleText(out, rp2); + fprintf(out," */ yytestcase(yyruleno==%d);\n", rp2->index); lineno++; rp2->code = 0; } } emit_code(out,rp,lemp,&lineno); fprintf(out," break;\n"); lineno++; + rp->code = 0; + } + /* Finally, output the default: rule. We choose as the default: all + ** empty actions. */ + fprintf(out," default:\n"); lineno++; + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->code==0 ) continue; + assert( rp->code[0]=='\n' && rp->code[1]==0 ); + fprintf(out," /* (%d) ", rp->index); + writeRuleText(out, rp); + fprintf(out, " */ yytestcase(yyruleno==%d);\n", rp->index); lineno++; } + fprintf(out," break;\n"); lineno++; tplt_xfer(lemp->name,in,out,&lineno); /* Generate code which executes if a parse fails */ - tplt_print(out,lemp,lemp->failure,lemp->failureln,&lineno); + tplt_print(out,lemp,lemp->failure,&lineno); tplt_xfer(lemp->name,in,out,&lineno); /* Generate code which executes when a syntax error occurs */ - tplt_print(out,lemp,lemp->error,lemp->errorln,&lineno); + tplt_print(out,lemp,lemp->error,&lineno); tplt_xfer(lemp->name,in,out,&lineno); /* Generate code which executes when the parser accepts its input */ - tplt_print(out,lemp,lemp->accept,lemp->acceptln,&lineno); + tplt_print(out,lemp,lemp->accept,&lineno); tplt_xfer(lemp->name,in,out,&lineno); /* Append any addition code the user desires */ - tplt_print(out,lemp,lemp->extracode,lemp->extracodeln,&lineno); + tplt_print(out,lemp,lemp->extracode,&lineno); fclose(in); fclose(out); @@ -3941,6 +4047,7 @@ } if( ap->type!=REDUCE ) continue; rp = ap->x.rp; + if( rp->lhsStart ) continue; if( rp==rbest ) continue; n = 1; for(ap2=ap->next; ap2; ap2=ap2->next){ @@ -4049,13 +4156,11 @@ /* Allocate a new set */ char *SetNew(){ char *s; - int i; - s = (char*)malloc( size ); + s = (char*)calloc( size, 1); if( s==0 ){ extern void memory_error(); memory_error(); } - for(i=0; i=0 && ename = Strsafe(x); sp->type = isupper(*x) ? TERMINAL : NONTERMINAL; @@ -4271,11 +4377,14 @@ sp->prec = -1; sp->assoc = UNK; sp->firstset = 0; - sp->lambda = B_FALSE; + sp->lambda = LEMON_FALSE; sp->destructor = 0; + sp->destLineno = 0; sp->datatype = 0; + sp->useCnt = 0; Symbol_insert(sp,sp->name); } + sp->useCnt++; return sp; } @@ -4445,7 +4554,7 @@ int i,size; if( x2a==0 ) return 0; size = x2a->count; - array = (struct symbol **)malloc( sizeof(struct symbol *)*size ); + array = (struct symbol **)calloc(size, sizeof(struct symbol *)); if( array ){ for(i=0; itbl[i].data; } @@ -4496,7 +4605,7 @@ struct state *State_new() { struct state *new; - new = (struct state *)malloc( sizeof(struct state) ); + new = (struct state *)calloc(1, sizeof(struct state) ); MemoryCheck(new); return new; } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/lempar.c /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/lempar.c --- sqlite3-3.4.2/tool/lempar.c 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/tool/lempar.c 2009-06-25 12:35:52.000000000 +0100 @@ -1,8 +1,8 @@ /* Driver template for the LEMON parser generator. ** The author disclaims copyright to this source code. */ -/* First off, code is include which follows the "include" declaration -** in the input file. */ +/* First off, code is included that follows the "include" declaration +** in the input grammar file. */ #include %% /* Next is all token values, in a form suitable for use by makeheaders. @@ -60,7 +60,24 @@ #define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1) #define YY_ERROR_ACTION (YYNSTATE+YYNRULE) -/* Next are that tables used to determine what action to take based on the +/* The yyzerominor constant is used to initialize instances of +** YYMINORTYPE objects to zero. */ +static const YYMINORTYPE yyzerominor = { 0 }; + +/* Define the yytestcase() macro to be a no-op if is not already defined +** otherwise. +** +** Applications can choose to define yytestcase() in the %include section +** to a macro that can assist in verifying code coverage. For production +** code the yytestcase() macro should be turned off. But it is useful +** for testing. +*/ +#ifndef yytestcase +# define yytestcase(X) +#endif + + +/* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement ** functions that take a state number and lookahead value and return an ** action integer. @@ -115,7 +132,7 @@ ** ** %fallback ID X Y Z. ** -** appears in the grammer, then ID becomes a fallback token for X, Y, +** appears in the grammar, then ID becomes a fallback token for X, Y, ** and Z. Whenever one of the tokens X, Y, or Z is input to the parser ** but it does not parse, the type of the token is changed to ID and ** the parse is retried before an error is thrown. @@ -139,11 +156,11 @@ ** It is sometimes called the "minor" token. */ struct yyStackEntry { - int stateno; /* The state-number */ - int major; /* The major token value. This is the code - ** number for the token at this stack level */ - YYMINORTYPE minor; /* The user-supplied minor token value. This - ** is the value of the token */ + YYACTIONTYPE stateno; /* The state-number */ + YYCODETYPE major; /* The major token value. This is the code + ** number for the token at this stack level */ + YYMINORTYPE minor; /* The user-supplied minor token value. This + ** is the value of the token */ }; typedef struct yyStackEntry yyStackEntry; @@ -151,6 +168,9 @@ ** the following structure */ struct yyParser { int yyidx; /* Index of top element in stack */ +#ifdef YYTRACKMAXSTACKDEPTH + int yyidxMax; /* Maximum value of yyidx */ +#endif int yyerrcnt; /* Shifts left before out of the error */ ParseARG_SDECL /* A place to hold %extra_argument */ #if YYSTACKDEPTH<=0 @@ -251,7 +271,12 @@ pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) ); if( pParser ){ pParser->yyidx = -1; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyidxMax = 0; +#endif #if YYSTACKDEPTH<=0 + pParser->yystack = NULL; + pParser->yystksz = 0; yyGrowStack(pParser); #endif } @@ -263,7 +288,12 @@ ** "yymajor" is the symbol code, and "yypminor" is a pointer to ** the value. */ -static void yy_destructor(YYCODETYPE yymajor, YYMINORTYPE *yypminor){ +static void yy_destructor( + yyParser *yypParser, /* The parser */ + YYCODETYPE yymajor, /* Type code for object to destroy */ + YYMINORTYPE *yypminor /* The object to be destroyed */ +){ + ParseARG_FETCH; switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen @@ -301,7 +331,7 @@ } #endif yymajor = yytos->major; - yy_destructor( yymajor, &yytos->minor); + yy_destructor(pParser, yymajor, &yytos->minor); pParser->yyidx--; return yymajor; } @@ -332,6 +362,16 @@ } /* +** Return the peak depth of the stack for a parser. +*/ +#ifdef YYTRACKMAXSTACKDEPTH +int ParseStackPeak(void *p){ + yyParser *pParser = (yyParser*)p; + return pParser->yyidxMax; +} +#endif + +/* ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. ** @@ -349,14 +389,12 @@ if( stateno>YY_SHIFT_MAX || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){ return yy_default[stateno]; } - if( iLookAhead==YYNOCODE ){ - return YY_NO_ACTION; - } + assert( iLookAhead!=YYNOCODE ); i += iLookAhead; if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){ if( iLookAhead>0 ){ #ifdef YYFALLBACK - int iFallback; /* Fallback token */ + YYCODETYPE iFallback; /* Fallback token */ if( iLookAheadyystack[pParser->yyidx].stateno; */ - - if( stateno>YY_REDUCE_MAX || - (i = yy_reduce_ofst[stateno])==YY_REDUCE_USE_DFLT ){ +#ifdef YYERRORSYMBOL + if( stateno>YY_REDUCE_MAX ){ return yy_default[stateno]; } - if( iLookAhead==YYNOCODE ){ - return YY_NO_ACTION; - } +#else + assert( stateno<=YY_REDUCE_MAX ); +#endif + i = yy_reduce_ofst[stateno]; + assert( i!=YY_REDUCE_USE_DFLT ); + assert( iLookAhead!=YYNOCODE ); i += iLookAhead; +#ifdef YYERRORSYMBOL if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){ return yy_default[stateno]; - }else{ - return yy_action[i]; } +#else + assert( i>=0 && iyyidx++; +#ifdef YYTRACKMAXSTACKDEPTH + if( yypParser->yyidx>yypParser->yyidxMax ){ + yypParser->yyidxMax = yypParser->yyidx; + } +#endif #if YYSTACKDEPTH>0 if( yypParser->yyidx>=YYSTACKDEPTH ){ yyStackOverflow(yypParser, yypMinor); @@ -463,8 +511,8 @@ } #endif yytos = &yypParser->yystack[yypParser->yyidx]; - yytos->stateno = yyNewState; - yytos->major = yyMajor; + yytos->stateno = (YYACTIONTYPE)yyNewState; + yytos->major = (YYCODETYPE)yyMajor; yytos->minor = *yypMinor; #ifndef NDEBUG if( yyTraceFILE && yypParser->yyidx>0 ){ @@ -527,7 +575,8 @@ ** from wireshark this week. Clearly they are stressing Lemon in ways ** that it has not been previously stressed... (SQLite ticket #2172) */ - memset(&yygotominor, 0, sizeof(yygotominor)); + /*memset(&yygotominor, 0, sizeof(yygotominor));*/ + yygotominor = yyzerominor; switch( yyruleno ){ @@ -544,7 +593,7 @@ yygoto = yyRuleInfo[yyruleno].lhs; yysize = yyRuleInfo[yyruleno].nrhs; yypParser->yyidx -= yysize; - yyact = yy_find_reduce_action(yymsp[-yysize].stateno,yygoto); + yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); if( yyact < YYNSTATE ){ #ifdef NDEBUG /* If we are not debugging and the reduce action popped at least @@ -554,15 +603,16 @@ if( yysize ){ yypParser->yyidx++; yymsp -= yysize-1; - yymsp->stateno = yyact; - yymsp->major = yygoto; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; yymsp->minor = yygotominor; }else #endif { yy_shift(yypParser,yyact,yygoto,&yygotominor); } - }else if( yyact == YYNSTATE + YYNRULE + 1 ){ + }else{ + assert( yyact == YYNSTATE + YYNRULE + 1 ); yy_accept(yypParser); } } @@ -570,6 +620,7 @@ /* ** The following code executes when the parse fails */ +#ifndef YYNOERRORRECOVERY static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ @@ -585,6 +636,7 @@ %% ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ } +#endif /* YYNOERRORRECOVERY */ /* ** The following code executes when a syntax error first occurs. @@ -647,7 +699,9 @@ YYMINORTYPE yyminorunion; int yyact; /* The parser action. */ int yyendofinput; /* True if we are at the end of input */ +#ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ +#endif yyParser *yypParser; /* The parser */ /* (re)initialize the parser, if necessary */ @@ -655,7 +709,8 @@ if( yypParser->yyidx<0 ){ #if YYSTACKDEPTH<=0 if( yypParser->yystksz <=0 ){ - memset(&yyminorunion, 0, sizeof(yyminorunion)); + /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ + yyminorunion = yyzerominor; yyStackOverflow(yypParser, &yyminorunion); return; } @@ -676,19 +731,19 @@ #endif do{ - yyact = yy_find_shift_action(yypParser,yymajor); + yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); if( yyactyyerrcnt--; - if( yyendofinput && yypParser->yyidx>=0 ){ - yymajor = 0; - }else{ - yymajor = YYNOCODE; - } + yymajor = YYNOCODE; }else if( yyact < YYNSTATE + YYNRULE ){ yy_reduce(yypParser,yyact-YYNSTATE); - }else if( yyact == YY_ERROR_ACTION ){ + }else{ + assert( yyact == YY_ERROR_ACTION ); +#ifdef YYERRORSYMBOL int yymx; +#endif #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); @@ -725,7 +780,7 @@ yyTracePrompt,yyTokenName[yymajor]); } #endif - yy_destructor(yymajor,&yyminorunion); + yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); yymajor = YYNOCODE; }else{ while( @@ -738,7 +793,7 @@ yy_pop_parser_stack(yypParser); } if( yypParser->yyidx < 0 || yymajor==0 ){ - yy_destructor(yymajor,&yyminorunion); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); yymajor = YYNOCODE; }else if( yymx!=YYERRORSYMBOL ){ @@ -749,6 +804,18 @@ } yypParser->yyerrcnt = 3; yyerrorhit = 1; +#elif defined(YYNOERRORRECOVERY) + /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to + ** do any kind of error recovery. Instead, simply invoke the syntax + ** error routine and continue going as if nothing had happened. + ** + ** Applications can set this macro (for example inside %include) if + ** they intend to abandon the parse upon the first syntax error seen. + */ + yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yymajor = YYNOCODE; + #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** @@ -763,15 +830,12 @@ yy_syntax_error(yypParser,yymajor,yyminorunion); } yypParser->yyerrcnt = 3; - yy_destructor(yymajor,&yyminorunion); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); if( yyendofinput ){ yy_parse_failed(yypParser); } yymajor = YYNOCODE; #endif - }else{ - yy_accept(yypParser); - yymajor = YYNOCODE; } }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); return; diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/memleak2.awk /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/memleak2.awk --- sqlite3-3.4.2/tool/memleak2.awk 2005-04-23 23:45:24.000000000 +0100 +++ sqlite3-3.6.16/tool/memleak2.awk 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -# This AWK script reads the output of testfixture when compiled for memory -# debugging. It generates SQL commands that can be fed into an sqlite -# instance to determine what memory is never freed. A typical usage would -# be as follows: -# -# make -f memleak.mk fulltest 2>mem.out -# awk -f ../sqlite/tool/memleak2.awk mem.out | ./sqlite :memory: -# -# The job performed by this script is the same as that done by memleak.awk. -# The difference is that this script uses much less memory when the size -# of the mem.out file is huge. -# -BEGIN { - print "CREATE TABLE mem(loc INTEGER PRIMARY KEY, src);" -} -/[0-9]+ malloc / { - print "INSERT INTO mem VALUES(" strtonum($6) ",'" $0 "');" -} -/[0-9]+ realloc / { - print "INSERT INTO mem VALUES(" strtonum($10) \ - ",(SELECT src FROM mem WHERE loc=" strtonum($8) "));" - print "DELETE FROM mem WHERE loc=" strtonum($8) ";" -} -/[0-9]+ free / { - print "DELETE FROM mem WHERE loc=" strtonum($6) ";" -} -END { - print "SELECT src FROM mem;" -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/memleak3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/memleak3.tcl --- sqlite3-3.4.2/tool/memleak3.tcl 2006-12-15 21:21:28.000000000 +0000 +++ sqlite3-3.6.16/tool/memleak3.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,233 +0,0 @@ -#/bin/sh -# \ -exec `which tclsh` $0 "$@" -# -# The author disclaims copyright to this source code. In place of -# a legal notice, here is a blessing: -# -# May you do good and not evil. -# May you find forgiveness for yourself and forgive others. -# May you share freely, never taking more than you give. -###################################################################### - -set doco " -This script is a tool to help track down memory leaks in the sqlite -library. The library must be compiled with the preprocessor symbol -SQLITE_MEMDEBUG set to at least 2. It must be set to 3 to enable stack -traces. - -To use, run the leaky application and save the standard error output. -Then, execute this program with the first argument the name of the -application binary (or interpreter) and the second argument the name of the -text file that contains the collected stderr output. - -If all goes well a summary of unfreed allocations is printed out. If the -GNU C library is in use and SQLITE_DEBUG is 3 or greater a stack trace is -printed out for each unmatched allocation. - -If the \"-r \" option is passed, then the program stops and prints out -the state of the heap immediately after the th call to malloc() or -realloc(). - -Example: - -$ ./testfixture ../sqlite/test/select1.test 2> memtrace.out -$ tclsh $argv0 ?-r ? ./testfixture memtrace.out -" - - -proc usage {} { - set prg [file tail $::argv0] - puts "Usage: $prg ?-r ? " - puts "" - puts [string trim $::doco] - exit -1 -} - -proc shift {listvar} { - upvar $listvar l - set ret [lindex $l 0] - set l [lrange $l 1 end] - return $ret -} - -# Argument handling. The following vars are set: -# -# $exe - the name of the executable (i.e. "testfixture" or "./sqlite3") -# $memfile - the name of the file containing the trace output. -# $report_at - The malloc number to stop and report at. Or -1 to read -# all of $memfile. -# -set report_at -1 -while {[llength $argv]>2} { - set arg [shift argv] - switch -- $arg { - "-r" { - set report_at [shift argv] - } - default { - usage - } - } -} -if {[llength $argv]!=2} usage -set exe [lindex $argv 0] -set memfile [lindex $argv 1] - -# If stack traces are enabled, the 'addr2line' program is called to -# translate a binary stack address into a human-readable form. -set addr2line addr2line - -# When the SQLITE_MEMDEBUG is set as described above, SQLite prints -# out a line for each malloc(), realloc() or free() call that the -# library makes. If SQLITE_MEMDEBUG is 3, then a stack trace is printed -# out before each malloc() and realloc() line. -# -# This program parses each line the SQLite library outputs and updates -# the following global Tcl variables to reflect the "current" state of -# the heap used by SQLite. -# -set nBytes 0 ;# Total number of bytes currently allocated. -set nMalloc 0 ;# Total number of malloc()/realloc() calls. -set nPeak 0 ;# Peak of nBytes. -set iPeak 0 ;# nMalloc when nPeak was set. -# -# More detailed state information is stored in the $memmap array. -# Each key in the memmap array is the address of a chunk of memory -# currently allocated from the heap. The value is a list of the -# following form -# -# { } -# -array unset memmap - -proc process_input {input_file array_name} { - upvar $array_name mem - set input [open $input_file] - - set MALLOC {([[:digit:]]+) malloc ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)} - # set STACK {^[[:digit:]]+: STACK: (.*)$} - set STACK {^STACK: (.*)$} - set FREE {[[:digit:]]+ free ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)} - set REALLOC {([[:digit:]]+) realloc ([[:digit:]]+) to ([[:digit:]]+)} - append REALLOC { bytes at 0x([[:xdigit:]]+) to 0x([[:xdigit:]]+)} - - set stack "" - while { ![eof $input] } { - set line [gets $input] - if {[regexp $STACK $line dummy stack]} { - # Do nothing. The variable $stack now stores the hexadecimal stack dump - # for the next malloc() or realloc(). - - } elseif { [regexp $MALLOC $line dummy mallocid bytes addr] } { - # If this is a 'malloc' line, set an entry in the mem array. Each entry - # is a list of length three, the number of bytes allocated , the malloc - # number and the stack dump when it was allocated. - set mem($addr) [list $bytes "malloc $mallocid" $stack] - set stack "" - - # Increase the current heap usage - incr ::nBytes $bytes - - # Increase the number of malloc() calls - incr ::nMalloc - - if {$::nBytes > $::nPeak} { - set ::nPeak $::nBytes - set ::iPeak $::nMalloc - } - - } elseif { [regexp $FREE $line dummy bytes addr] } { - # If this is a 'free' line, remove the entry from the mem array. If the - # entry does not exist, or is the wrong number of bytes, announce a - # problem. This is more likely a bug in the regular expressions for - # this script than an SQLite defect. - if { [lindex $mem($addr) 0] != $bytes } { - error "byte count mismatch" - } - unset mem($addr) - - # Decrease the current heap usage - incr ::nBytes [expr -1 * $bytes] - - } elseif { [regexp $REALLOC $line dummy mallocid ob b oa a] } { - # "free" the old allocation in the internal model: - incr ::nBytes [expr -1 * $ob] - unset mem($oa); - - # "malloc" the new allocation - set mem($a) [list $b "realloc $mallocid" $stack] - incr ::nBytes $b - set stack "" - - # Increase the number of malloc() calls - incr ::nMalloc - - if {$::nBytes > $::nPeak} { - set ::nPeak $::nBytes - set ::iPeak $::nMalloc - } - - } else { - # puts "REJECT: $line" - } - - if {$::nMalloc==$::report_at} report - } - - close $input -} - -proc printstack {stack} { - set fcount 10 - if {[llength $stack]<10} { - set fcount [llength $stack] - } - foreach frame [lrange $stack 1 $fcount] { - foreach {f l} [split [exec $::addr2line -f --exe=$::exe $frame] \n] {} - puts [format "%-30s %s" $f $l] - } - if {[llength $stack]>0 } {puts ""} -} - -proc report {} { - - foreach key [array names ::memmap] { - set stack [lindex $::memmap($key) 2] - set bytes [lindex $::memmap($key) 0] - lappend summarymap($stack) $bytes - } - - set sorted [list] - foreach stack [array names summarymap] { - set allocs $summarymap($stack) - set sum 0 - foreach a $allocs { - incr sum $a - } - lappend sorted [list $sum $stack] - } - - set sorted [lsort -integer -index 0 $sorted] - foreach s $sorted { - set sum [lindex $s 0] - set stack [lindex $s 1] - set allocs $summarymap($stack) - puts "$sum bytes in [llength $allocs] chunks ($allocs)" - printstack $stack - } - - # Print out summary statistics - puts "Total allocations : $::nMalloc" - puts "Total outstanding allocations: [array size ::memmap]" - puts "Current heap usage : $::nBytes bytes" - puts "Peak heap usage : $::nPeak bytes (malloc #$::iPeak)" - - exit -} - -process_input $memfile memmap -report - - - diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/memleak.awk /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/memleak.awk --- sqlite3-3.4.2/tool/memleak.awk 2006-08-07 12:31:12.000000000 +0100 +++ sqlite3-3.6.16/tool/memleak.awk 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -# -# This script looks for memory leaks by analyzing the output of "sqlite" -# when compiled with the SQLITE_DEBUG=2 option. -# -/[0-9]+ malloc / { - mem[$6] = $0 -} -/[0-9]+ realloc / { - mem[$8] = ""; - mem[$10] = $0 -} -/[0-9]+ free / { - if (mem[$6]=="") { - print "*** free without a malloc at",$6 - } - mem[$6] = ""; - str[$6] = "" -} -/^string at / { - addr = $4 - sub("string at " addr " is ","") - str[addr] = $0 -} -END { - for(addr in mem){ - if( mem[addr]=="" ) continue - print mem[addr], str[addr] - } -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/mkkeywordhash.c /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/mkkeywordhash.c --- sqlite3-3.4.2/tool/mkkeywordhash.c 2007-07-30 21:36:33.000000000 +0100 +++ sqlite3-3.6.16/tool/mkkeywordhash.c 2009-06-25 12:35:52.000000000 +0100 @@ -6,6 +6,7 @@ #include #include #include +#include /* ** A header comment placed at the beginning of generated code. @@ -15,7 +16,7 @@ "**\n" "** The code in this file has been automatically generated by\n" "**\n" - "** $Header: /sqlite/sqlite/tool/mkkeywordhash.c,v 1.31 2007/07/30 18:26:20 rse Exp $\n" + "** $Header: /sqlite/sqlite/tool/mkkeywordhash.c,v 1.38 2009/06/09 14:27:41 drh Exp $\n" "**\n" "** The code in this file implements a function that determines whether\n" "** or not a given identifier is really an SQL keyword. The same thing\n" @@ -27,7 +28,7 @@ ; /* -** All the keywords of the SQL language are stored as in a hash +** All the keywords of the SQL language are stored in a hash ** table composed of instances of the following structure. */ typedef struct Keyword Keyword; @@ -44,6 +45,7 @@ int iNext; /* Index in aKeywordTable[] of next with same hash */ int substrId; /* Id to another keyword this keyword is embedded in */ int substrOffset; /* Offset into substrId for start of this keyword */ + char zOrigName[20]; /* Original keyword name before processing */ }; /* @@ -200,6 +202,7 @@ { "IMMEDIATE", "TK_IMMEDIATE", ALWAYS }, { "IN", "TK_IN", ALWAYS }, { "INDEX", "TK_INDEX", ALWAYS }, + { "INDEXED", "TK_INDEXED", ALWAYS }, { "INITIALLY", "TK_INITIALLY", FKEY }, { "INNER", "TK_JOIN_KW", ALWAYS }, { "INSERT", "TK_INSERT", ALWAYS }, @@ -232,19 +235,21 @@ { "REFERENCES", "TK_REFERENCES", FKEY }, { "REGEXP", "TK_LIKE_KW", ALWAYS }, { "REINDEX", "TK_REINDEX", REINDEX }, + { "RELEASE", "TK_RELEASE", ALWAYS }, { "RENAME", "TK_RENAME", ALTER }, { "REPLACE", "TK_REPLACE", CONFLICT }, { "RESTRICT", "TK_RESTRICT", FKEY }, { "RIGHT", "TK_JOIN_KW", ALWAYS }, { "ROLLBACK", "TK_ROLLBACK", ALWAYS }, { "ROW", "TK_ROW", TRIGGER }, + { "SAVEPOINT", "TK_SAVEPOINT", ALWAYS }, { "SELECT", "TK_SELECT", ALWAYS }, { "SET", "TK_SET", ALWAYS }, { "TABLE", "TK_TABLE", ALWAYS }, { "TEMP", "TK_TEMP", ALWAYS }, { "TEMPORARY", "TK_TEMP", ALWAYS }, { "THEN", "TK_THEN", ALWAYS }, - { "TO", "TK_TO", ALTER }, + { "TO", "TK_TO", ALWAYS }, { "TRANSACTION", "TK_TRANSACTION", ALWAYS }, { "TRIGGER", "TK_TRIGGER", TRIGGER }, { "UNION", "TK_UNION", COMPOUND }, @@ -334,6 +339,7 @@ int nChar; int totalLen = 0; int aHash[1000]; /* 1000 is much bigger than nKeyword */ + char zText[2000]; /* Remove entries from the list of keywords that have mask==0 */ for(i=j=0; ilen = strlen(p->zName); + assert( p->lenzOrigName) ); + strcpy(p->zOrigName, p->zName); totalLen += p->len; p->hash = (UpperToLower[(int)p->zName[0]]*4) ^ (UpperToLower[(int)p->zName[p->len-1]]*3) ^ p->len; @@ -462,20 +470,44 @@ printf("static int keywordCode(const char *z, int n){\n"); printf(" /* zText[] encodes %d bytes of keywords in %d bytes */\n", totalLen + nKeyword, nChar+1 ); - - printf(" static const char zText[%d] =\n", nChar+1); - for(i=j=0; isubstrId ) continue; - if( j==0 ) printf(" \""); + memcpy(&zText[k], p->zName, p->len); + k += p->len; + if( j+p->len>70 ){ + printf("%*s */\n", 74-j, ""); + j = 0; + } + if( j==0 ){ + printf(" /* "); + j = 8; + } printf("%s", p->zName); j += p->len; - if( j>60 ){ - printf("\"\n"); + } + if( j>0 ){ + printf("%*s */\n", 74-j, ""); + } + printf(" static const char zText[%d] = {\n", nChar); + zText[nChar] = 0; + for(i=j=0; i68 ){ + printf("\n"); j = 0; } } - printf("%s;\n", j>0 ? "\"" : " "); + if( j>0 ) printf("\n"); + printf(" };\n"); printf(" static const unsigned char aHash[%d] = {\n", bestSize); for(i=j=0; i=0; i=((int)aNext[i])-1){\n"); printf(" if( aLen[i]==n &&" " sqlite3StrNICmp(&zText[aOffset[i]],z,n)==0 ){\n"); + for(i=0; i "one hundred twenty three" +# +set ones {zero one two three four five six seven eight nine + ten eleven twelve thirteen fourteen fifteen sixteen seventeen + eighteen nineteen} +set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety} +proc number_name {n} { + if {$n>=1000} { + set txt "[number_name [expr {$n/1000}]] thousand" + set n [expr {$n%1000}] + } else { + set txt {} + } + if {$n>=100} { + append txt " [lindex $::ones [expr {$n/100}]] hundred" + set n [expr {$n%100}] + } + if {$n>=20} { + append txt " [lindex $::tens [expr {$n/10}]]" + set n [expr {$n%10}] + } + if {$n>0} { + append txt " [lindex $::ones $n]" + } + set txt [string trim $txt] + if {$txt==""} {set txt zero} + return $txt +} + +# Create a database schema. +# +puts { + PRAGMA page_size=1024; + PRAGMA cache_size=8192; + PRAGMA locking_mode=EXCLUSIVE; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); + CREATE TABLE t2(a INTEGER, b INTEGER, c TEXT); + CREATE INDEX i2a ON t2(a); + CREATE INDEX i2b ON t2(b); + SELECT name FROM sqlite_master ORDER BY 1; +} + + +# 50000 INSERTs on an unindexed table +# +set t1c_list {} +puts {BEGIN;} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + set x [number_name $r] + lappend t1c_list $x + puts "INSERT INTO t1 VALUES($i,$r,'$x');" +} +puts {COMMIT;} + +# 50000 INSERTs on an indexed table +# +puts {BEGIN;} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + puts "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');" +} +puts {COMMIT;} + + +# 50 SELECTs on an integer comparison. There is no index so +# a full table scan is required. +# +for {set i 0} {$i<50} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + puts "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} + +# 50 SELECTs on an LIKE comparison. There is no index so a full +# table scan is required. +# +for {set i 0} {$i<50} {incr i} { + puts "SELECT count(*), avg(b) FROM t1 WHERE c LIKE '%[number_name $i]%';" +} + +# Create indices +# +puts {BEGIN;} +puts { + CREATE INDEX i1a ON t1(a); + CREATE INDEX i1b ON t1(b); + CREATE INDEX i1c ON t1(c); +} +puts {COMMIT;} + +# 5000 SELECTs on an integer comparison where the integer is +# indexed. +# +set sql {} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*100}] + set upr [expr {($i+10)*100}] + puts "SELECT count(*), avg(b) FROM t1 WHERE b>=$lwr AND b<$upr;" +} + +# 100000 random SELECTs against rowid. +# +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + puts "SELECT c FROM t1 WHERE rowid=$id;" +} + +# 100000 random SELECTs against a unique indexed column. +# +for {set i 1} {$i<=100000} {incr i} { + set id [expr {int(rand()*50000)+1}] + puts "SELECT c FROM t1 WHERE a=$id;" +} + +# 50000 random SELECTs against an indexed column text column +# +set nt1c [llength $t1c_list] +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*$nt1c)}] + set c [lindex $t1c_list $i] + puts "SELECT c FROM t1 WHERE c='$c';" +} + + +# Vacuum +puts {VACUUM;} + +# 5000 updates of ranges where the field being compared is indexed. +# +puts {BEGIN;} +for {set i 0} {$i<5000} {incr i} { + set lwr [expr {$i*2}] + set upr [expr {($i+1)*2}] + puts "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;" +} +puts {COMMIT;} + +# 50000 single-row updates. An index is used to find the row quickly. +# +puts {BEGIN;} +for {set i 0} {$i<50000} {incr i} { + set r [expr {int(rand()*500000)}] + puts "UPDATE t1 SET b=$r WHERE a=$i;" +} +puts {COMMIT;} + +# 1 big text update that touches every row in the table. +# +puts { + UPDATE t1 SET c=a; +} + +# Many individual text updates. Each row in the table is +# touched through an index. +# +puts {BEGIN;} +for {set i 1} {$i<=50000} {incr i} { + set r [expr {int(rand()*500000)}] + puts "UPDATE t1 SET c='[number_name $r]' WHERE a=$i;" +} +puts {COMMIT;} + +# Delete all content in a table. +# +puts {DELETE FROM t1;} + +# Copy one table into another +# +puts {INSERT INTO t1 SELECT * FROM t2;} + +# Delete all content in a table, one row at a time. +# +puts {DELETE FROM t1 WHERE 1;} + +# Refill the table yet again +# +puts {INSERT INTO t1 SELECT * FROM t2;} + +# Drop the table and recreate it without its indices. +# +puts {BEGIN;} +puts { + DROP TABLE t1; + CREATE TABLE t1(a INTEGER, b INTEGER, c TEXT); +} +puts {COMMIT;} + +# Refill the table yet again. This copy should be faster because +# there are no indices to deal with. +# +puts {INSERT INTO t1 SELECT * FROM t2;} + +# Select 20000 rows from the table at random. +# +puts { + SELECT rowid FROM t1 ORDER BY random() LIMIT 20000; +} + +# Delete 20000 random rows from the table. +# +puts { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000); +} +puts {SELECT count(*) FROM t1;} + +# Delete 20000 more rows at random from the table. +# +puts { + DELETE FROM t1 WHERE rowid IN + (SELECT rowid FROM t1 ORDER BY random() LIMIT 20000); +} +puts {SELECT count(*) FROM t1;} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/mksqlite3c.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/mksqlite3c.tcl --- sqlite3-3.4.2/tool/mksqlite3c.tcl 2007-08-08 12:48:58.000000000 +0100 +++ sqlite3-3.6.16/tool/mksqlite3c.tcl 2009-06-25 12:24:47.000000000 +0100 @@ -69,6 +69,7 @@ ** ** This amalgamation was generated on $today. */ +#define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1}] if {$addstatic} { puts $out \ @@ -87,16 +88,25 @@ foreach hdr { btree.h btreeInt.h + fts3.h + fts3_expr.h + fts3_hash.h + fts3_tokenizer.h hash.h + hwtime.h keywordhash.h + mutex.h opcodes.h os_common.h os.h os_os2.h pager.h parse.h + pcache.h + rtree.h sqlite3ext.h sqlite3.h + sqliteicu.h sqliteInt.h sqliteLimit.h vdbe.h @@ -104,7 +114,7 @@ } { set available_hdr($hdr) 1 } -set available_hdr(sqlite3.h) 0 +set available_hdr(sqliteInt.h) 0 # 78 stars used for comment formatting. set s78 \ @@ -130,17 +140,17 @@ section_comment "Begin file $tail" set in [open $filename r] set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)} - set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \*?(sqlite3[_a-zA-Z0-9]+)\(} + set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \**(sqlite3[_a-zA-Z0-9]+)\(} if {[file extension $filename]==".h"} { set declpattern " *$declpattern" } set declpattern ^$declpattern while {![eof $in]} { set line [gets $in] - if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[regexp {^\s*#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { if {[info exists available_hdr($hdr)]} { if {$available_hdr($hdr)} { - if {$hdr!="os_common.h"} { + if {$hdr!="os_common.h" && $hdr!="hwtime.h"} { set available_hdr($hdr) 0 } section_comment "Include $hdr in the middle of $tail" @@ -170,13 +180,18 @@ if {![regexp {^sqlite3_} $varname]} { regsub {^extern } $line {} line puts $out "SQLITE_PRIVATE $line" - } elseif {![regexp {^SQLITE_EXTERN} $line]} { - puts $out "SQLITE_API $line" } else { - puts $out $line + if {[regexp {const char sqlite3_version\[\];} $line]} { + set line {const char sqlite3_version[] = SQLITE_VERSION;} + } + regsub {^SQLITE_EXTERN } $line {} line + puts $out "SQLITE_API $line" } - } elseif {[regexp {^void \(\*sqlite3_io_trace\)} $line]} { - puts $out "SQLITE_API $line" + } elseif {[regexp {^(SQLITE_EXTERN )?void \(\*sqlite3IoTrace\)} $line]} { + regsub {^SQLITE_EXTERN } $line {} line + puts $out "SQLITE_PRIVATE $line" + } elseif {[regexp {^void \(\*sqlite3Os} $line]} { + puts $out "SQLITE_PRIVATE $line" } else { puts $out $line } @@ -194,11 +209,24 @@ # inlining opportunities. # foreach file { - sqlite3.h + sqliteInt.h + global.c + status.c date.c os.c + fault.c + mem0.c + mem1.c + mem2.c + mem3.c + mem5.c + mutex.c + mutex_noop.c + mutex_os2.c + mutex_unix.c + mutex_w32.c malloc.c printf.c random.c @@ -211,17 +239,26 @@ os_unix.c os_win.c + bitvec.c + pcache.c + pcache1.c + rowset.c pager.c - + + btmutex.c btree.c + backup.c - vdbefifo.c vdbemem.c vdbeaux.c vdbeapi.c vdbe.c vdbeblob.c + journal.c + memjournal.c + walker.c + resolve.c expr.c alter.c analyze.c @@ -229,7 +266,6 @@ auth.c build.c callback.c - complete.c delete.c func.c insert.c @@ -248,8 +284,21 @@ parse.c tokenize.c + complete.c main.c + notify.c + + fts3.c + fts3_expr.c + fts3_hash.c + fts3_porter.c + fts3_tokenizer.c + fts3_tokenizer1.c + + rtree.c + icu.c + fts3_icu.c } { copy_file tsrc/$file } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/mksqlite3internalh.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/mksqlite3internalh.tcl --- sqlite3-3.4.2/tool/mksqlite3internalh.tcl 2007-06-28 13:45:22.000000000 +0100 +++ sqlite3-3.6.16/tool/mksqlite3internalh.tcl 2009-05-05 04:40:17.000000000 +0100 @@ -56,6 +56,7 @@ btree.h btreeInt.h hash.h + hwtime.h keywordhash.h opcodes.h os_common.h diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/omittest.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/omittest.tcl --- sqlite3-3.4.2/tool/omittest.tcl 2006-06-20 12:01:09.000000000 +0100 +++ sqlite3-3.6.16/tool/omittest.tcl 2009-06-12 03:38:01.000000000 +0100 @@ -1,5 +1,5 @@ -set rcsid {$Id: omittest.tcl,v 1.2 2006/06/20 11:01:09 danielk1977 Exp $} +set rcsid {$Id: omittest.tcl,v 1.8 2008/10/13 15:35:09 drh Exp $} # Documentation for this script. This may be output to stderr # if the script is invoked incorrectly. @@ -47,7 +47,14 @@ # proc run_quick_test {dir omit_symbol_list} { # Compile the value of the OPTS Makefile variable. - set opts "-DSQLITE_MEMDEBUG=2 -DSQLITE_DEBUG -DOS_UNIX" + set opts "-DSQLITE_MEMDEBUG -DSQLITE_DEBUG -DSQLITE_NO_SYNC" + if {$::tcl_platform(platform)=="windows"} { + append opts " -DSQLITE_OS_WIN=1" + } elseif {$::tcl_platform(platform)=="os2"} { + append opts " -DSQLITE_OS_OS2=1" + } else { + append opts " -DSQLITE_OS_UNIX=1" + } foreach sym $omit_symbol_list { append opts " -D${sym}=1" } @@ -57,6 +64,10 @@ file mkdir $dir puts -nonewline "Building $dir..." flush stdout +catch { + file copy -force ./config.h $dir + file copy -force ./libtool $dir +} set rc [catch { exec make -C $dir -f $::MAKEFILE testfixture OPTS=$opts >& $dir/build.log }] @@ -70,8 +81,12 @@ # Create an empty file "$dir/sqlite3". This is to trick the makefile out # of trying to build the sqlite shell. The sqlite shell won't build # with some of the OMIT options (i.e OMIT_COMPLETE). - if {![file exists $dir/sqlite3]} { - set wr [open $dir/sqlite3 w] + set sqlite3_dummy $dir/sqlite3 + if {$::tcl_platform(platform)=="windows" || $::tcl_platform(platform)=="os2"} { + append sqlite3_dummy ".exe" + } + if {![file exists $sqlite3_dummy]} { + set wr [open $sqlite3_dummy w] puts $wr "dummy" close $wr } @@ -96,7 +111,11 @@ # option. # proc process_options {argv} { - set ::MAKEFILE ../Makefile.linux-gcc ;# Default value + if {$::tcl_platform(platform)=="windows" || $::tcl_platform(platform)=="os2"} { + set ::MAKEFILE ../Makefile ;# Default value + } else { + set ::MAKEFILE ../Makefile.linux-gcc ;# Default value + } for {set i 0} {$i < [llength $argv]} {incr i} { switch -- [lindex $argv $i] { -makefile { @@ -119,33 +138,55 @@ proc main {argv} { # List of SQLITE_OMIT_XXX symbols supported by SQLite. set ::SYMBOLS [list \ - SQLITE_OMIT_VIEW \ - SQLITE_OMIT_VIRTUALTABLE \ SQLITE_OMIT_ALTERTABLE \ + SQLITE_OMIT_ANALYZE \ + SQLITE_OMIT_ATTACH \ + SQLITE_OMIT_AUTHORIZATION \ + SQLITE_OMIT_AUTOINCREMENT \ + SQLITE_OMIT_AUTOINIT \ + SQLITE_OMIT_AUTOVACUUM \ + SQLITE_OMIT_BETWEEN_OPTIMIZATION \ + SQLITE_OMIT_BLOB_LITERAL \ + SQLITE_OMIT_BUILTIN_TEST \ + SQLITE_OMIT_CAST \ + SQLITE_OMIT_CHECK \ + SQLITE_OMIT_COMPLETE \ + SQLITE_OMIT_COMPOUND_SELECT \ + SQLITE_OMIT_CONFLICT_CLAUSE \ + SQLITE_OMIT_DATETIME_FUNCS \ + SQLITE_OMIT_DECLTYPE \ + off_SQLITE_OMIT_DISKIO \ SQLITE_OMIT_EXPLAIN \ + SQLITE_OMIT_FLAG_PRAGMAS \ SQLITE_OMIT_FLOATING_POINT \ SQLITE_OMIT_FOREIGN_KEY \ + SQLITE_OMIT_GET_TABLE \ + SQLITE_OMIT_GLOBALRECOVER \ + SQLITE_OMIT_INCRBLOB \ SQLITE_OMIT_INTEGRITY_CHECK \ + SQLITE_OMIT_LIKE_OPTIMIZATION \ + SQLITE_OMIT_LOAD_EXTENSION \ + SQLITE_OMIT_LOCALTIME \ SQLITE_OMIT_MEMORYDB \ + SQLITE_OMIT_OR_OPTIMIZATION \ SQLITE_OMIT_PAGER_PRAGMAS \ SQLITE_OMIT_PRAGMA \ SQLITE_OMIT_PROGRESS_CALLBACK \ + SQLITE_OMIT_QUICKBALANCE \ SQLITE_OMIT_REINDEX \ SQLITE_OMIT_SCHEMA_PRAGMAS \ SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS \ - SQLITE_OMIT_DATETIME_FUNCS \ + SQLITE_OMIT_SHARED_CACHE \ SQLITE_OMIT_SUBQUERY \ SQLITE_OMIT_TCL_VARIABLE \ + SQLITE_OMIT_TEMPDB \ + SQLITE_OMIT_TRACE \ SQLITE_OMIT_TRIGGER \ SQLITE_OMIT_UTF16 \ SQLITE_OMIT_VACUUM \ - SQLITE_OMIT_COMPLETE \ - SQLITE_OMIT_AUTOVACUUM \ - SQLITE_OMIT_AUTHORIZATION \ - SQLITE_OMIT_AUTOINCREMENT \ - SQLITE_OMIT_BLOB_LITERAL \ - SQLITE_OMIT_COMPOUND_SELECT \ - SQLITE_OMIT_CONFLICT_CLAUSE \ + SQLITE_OMIT_VIEW \ + SQLITE_OMIT_VIRTUALTABLE \ + SQLITE_OMIT_XFER_OPT \ ] # Process any command line options. @@ -162,7 +203,7 @@ } } run_quick_test test_OMIT_EVERYTHING $allsyms - + # Now try one quick.test with each of the OMIT symbols defined. Included # are the OMIT_FLOATING_POINT and OMIT_PRAGMA symbols, even though we # know they will fail. It's good to be reminded of this from time to time. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/report1.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/report1.txt --- sqlite3-3.4.2/tool/report1.txt 2005-04-23 23:45:24.000000000 +0100 +++ sqlite3-3.6.16/tool/report1.txt 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -The SQL database used for ACD contains 113 tables and indices implemented -in GDBM. The following are statistics on the sizes of keys and data -within these tables and indices. - -Entries: 962080 -Size: 45573853 -Avg Size: 48 -Key Size: 11045299 -Avg Key Size: 12 -Max Key Size: 99 - - - Size of key Cummulative - and data Instances Percentage ------------- ---------- ----------- - 0..8 266 0% - 9..12 5485 0% - 13..16 73633 8% - 17..24 180918 27% - 25..32 209823 48% - 33..40 148995 64% - 41..48 76304 72% - 49..56 14346 73% - 57..64 15725 75% - 65..80 44916 80% - 81..96 127815 93% - 97..112 34769 96% - 113..128 13314 98% - 129..144 8098 99% - 145..160 3355 99% - 161..176 1159 99% - 177..192 629 99% - 193..208 221 99% - 209..224 210 99% - 225..240 129 99% - 241..256 57 99% - 257..288 496 99% - 289..320 60 99% - 321..352 37 99% - 353..384 46 99% - 385..416 22 99% - 417..448 24 99% - 449..480 26 99% - 481..512 27 99% - 513..1024 471 99% - 1025..2048 389 99% - 2049..4096 182 99% - 4097..8192 74 99% - 8193..16384 34 99% -16385..32768 17 99% -32769..65536 5 99% -65537..131073 3 100% - - -This information is gathered to help design the new built-in -backend for sqlite 2.0. Note in particular that 99% of all -database entries have a combined key and data size of less than -144 bytes. So if a leaf node in the new database is able to -store 144 bytes of combined key and data, only 1% of the leaves -will require overflow pages. Furthermore, note that no key -is larger than 99 bytes, so if the key will never be on an -overflow page. - -The average combined size of key+data is 48. Add in 16 bytes of -overhead for a total of 64. That means that a 1K page will -store (on average) about 16 entries. diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/showdb.c /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/showdb.c --- sqlite3-3.4.2/tool/showdb.c 2006-12-15 21:21:28.000000000 +0000 +++ sqlite3-3.6.16/tool/showdb.c 2009-06-25 12:35:52.000000000 +0100 @@ -25,7 +25,7 @@ int i, j; aData = malloc(pagesize); if( aData==0 ) out_of_memory(); - lseek(db, (iPg-1)*pagesize, SEEK_SET); + lseek(db, (iPg-1)*(long long int)pagesize, SEEK_SET); read(db, aData, pagesize); fprintf(stdout, "Page %d:\n", iPg); for(i=0; i +#include +#include +#include +#include +#include "sqlite3.h" + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +#include "hwtime.h" + +/* +** Convert a zero-terminated ASCII string into a zero-terminated +** UTF-16le string. Memory to hold the returned string comes +** from malloc() and should be freed by the caller. +*/ +static void *asciiToUtf16le(const char *z){ + int n = strlen(z); + char *z16; + int i, j; + + z16 = malloc( n*2 + 2 ); + for(i=j=0; i<=n; i++){ + z16[j++] = z[i]; + z16[j++] = 0; + } + return (void*)z16; +} + +/* +** Timers +*/ +static sqlite_uint64 prepTime = 0; +static sqlite_uint64 runTime = 0; +static sqlite_uint64 finalizeTime = 0; + +/* +** Prepare and run a single statement of SQL. +*/ +static void prepareAndRun(sqlite3 *db, const char *zSql){ + void *utf16; + sqlite3_stmt *pStmt; + const void *stmtTail; + sqlite_uint64 iStart, iElapse; + int rc; + + printf("****************************************************************\n"); + printf("SQL statement: [%s]\n", zSql); + utf16 = asciiToUtf16le(zSql); + iStart = sqlite3Hwtime(); + rc = sqlite3_prepare16_v2(db, utf16, -1, &pStmt, &stmtTail); + iElapse = sqlite3Hwtime() - iStart; + prepTime += iElapse; + printf("sqlite3_prepare16_v2() returns %d in %llu cycles\n", rc, iElapse); + if( rc==SQLITE_OK ){ + int nRow = 0; + iStart = sqlite3Hwtime(); + while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; } + iElapse = sqlite3Hwtime() - iStart; + runTime += iElapse; + printf("sqlite3_step() returns %d after %d rows in %llu cycles\n", + rc, nRow, iElapse); + iStart = sqlite3Hwtime(); + rc = sqlite3_finalize(pStmt); + iElapse = sqlite3Hwtime() - iStart; + finalizeTime += iElapse; + printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse); + } + free(utf16); +} + +int main(int argc, char **argv){ + void *utf16; + sqlite3 *db; + int rc; + int nSql; + char *zSql; + int i, j; + FILE *in; + sqlite_uint64 iStart, iElapse; + sqlite_uint64 iSetup = 0; + int nStmt = 0; + int nByte = 0; + + if( argc!=3 ){ + fprintf(stderr, "Usage: %s FILENAME SQL-SCRIPT\n" + "Runs SQL-SCRIPT as UTF16 against a UTF16 database\n", + argv[0]); + exit(1); + } + in = fopen(argv[2], "r"); + fseek(in, 0L, SEEK_END); + nSql = ftell(in); + zSql = malloc( nSql+1 ); + fseek(in, 0L, SEEK_SET); + nSql = fread(zSql, 1, nSql, in); + zSql[nSql] = 0; + + printf("SQLite version: %d\n", sqlite3_libversion_number()); + unlink(argv[1]); + utf16 = asciiToUtf16le(argv[1]); + iStart = sqlite3Hwtime(); + rc = sqlite3_open16(utf16, &db); + iElapse = sqlite3Hwtime() - iStart; + iSetup = iElapse; + printf("sqlite3_open16() returns %d in %llu cycles\n", rc, iElapse); + free(utf16); + for(i=j=0; j +#include +#include +#include +#include + +#if defined(_MSC_VER) +#include +#else +#include +#include +#include +#endif + +#include "sqlite3.h" + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +#include "hwtime.h" + +/* +** Timers +*/ +static sqlite_uint64 prepTime = 0; +static sqlite_uint64 runTime = 0; +static sqlite_uint64 finalizeTime = 0; + +/* +** Prepare and run a single statement of SQL. +*/ +static void prepareAndRun(sqlite3 *db, const char *zSql, int bQuiet){ + sqlite3_stmt *pStmt; + const char *stmtTail; + sqlite_uint64 iStart, iElapse; + int rc; + + if (!bQuiet){ + printf("***************************************************************\n"); + } + if (!bQuiet) printf("SQL statement: [%s]\n", zSql); + iStart = sqlite3Hwtime(); + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail); + iElapse = sqlite3Hwtime() - iStart; + prepTime += iElapse; + if (!bQuiet){ + printf("sqlite3_prepare_v2() returns %d in %llu cycles\n", rc, iElapse); + } + if( rc==SQLITE_OK ){ + int nRow = 0; + iStart = sqlite3Hwtime(); + while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; } + iElapse = sqlite3Hwtime() - iStart; + runTime += iElapse; + if (!bQuiet){ + printf("sqlite3_step() returns %d after %d rows in %llu cycles\n", + rc, nRow, iElapse); + } + iStart = sqlite3Hwtime(); + rc = sqlite3_finalize(pStmt); + iElapse = sqlite3Hwtime() - iStart; + finalizeTime += iElapse; + if (!bQuiet){ + printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse); + } + } +} + +int main(int argc, char **argv){ + sqlite3 *db; + int rc; + int nSql; + char *zSql; + int i, j; + FILE *in; + sqlite_uint64 iStart, iElapse; + sqlite_uint64 iSetup = 0; + int nStmt = 0; + int nByte = 0; + const char *zArgv0 = argv[0]; + int bQuiet = 0; +#if !defined(_MSC_VER) + struct tms tmsStart, tmsEnd; + clock_t clkStart, clkEnd; +#endif + +#ifdef HAVE_OSINST + extern sqlite3_vfs *sqlite3_instvfs_binarylog(char *, char *, char *); + extern void sqlite3_instvfs_destroy(sqlite3_vfs *); + sqlite3_vfs *pVfs = 0; +#endif + + while (argc>3) + { +#ifdef HAVE_OSINST + if( argc>4 && (strcmp(argv[1], "-log")==0) ){ + pVfs = sqlite3_instvfs_binarylog("oslog", 0, argv[2]); + sqlite3_vfs_register(pVfs, 1); + argv += 2; + argc -= 2; + continue; + } +#endif + + /* + ** Increasing the priority slightly above normal can help with + ** repeatability of testing. Note that with Cygwin, -5 equates + ** to "High", +5 equates to "Low", and anything in between + ** equates to "Normal". + */ + if( argc>4 && (strcmp(argv[1], "-priority")==0) ){ +#if defined(_MSC_VER) + int new_priority = atoi(argv[2]); + if(!SetPriorityClass(GetCurrentProcess(), + (new_priority<=-5) ? HIGH_PRIORITY_CLASS : + (new_priority<=0) ? ABOVE_NORMAL_PRIORITY_CLASS : + (new_priority==0) ? NORMAL_PRIORITY_CLASS : + (new_priority<5) ? BELOW_NORMAL_PRIORITY_CLASS : + IDLE_PRIORITY_CLASS)){ + printf ("error setting priority\n"); + exit(2); + } +#else + struct sched_param myParam; + sched_getparam(0, &myParam); + printf ("Current process priority is %d.\n", (int)myParam.sched_priority); + myParam.sched_priority = atoi(argv[2]); + printf ("Setting process priority to %d.\n", (int)myParam.sched_priority); + if (sched_setparam (0, &myParam) != 0){ + printf ("error setting priority\n"); + exit(2); + } +#endif + argv += 2; + argc -= 2; + continue; + } + + if( argc>3 && strcmp(argv[1], "-quiet")==0 ){ + bQuiet = -1; + argv++; + argc--; + continue; + } + + break; + } + + if( argc!=3 ){ + fprintf(stderr, "Usage: %s [options] FILENAME SQL-SCRIPT\n" + "Runs SQL-SCRIPT against a UTF8 database\n" + "\toptions:\n" +#ifdef HAVE_OSINST + "\t-log \n" +#endif + "\t-priority : set priority of task\n" + "\t-quiet : only display summary results\n", + zArgv0); + exit(1); + } + + in = fopen(argv[2], "r"); + fseek(in, 0L, SEEK_END); + nSql = ftell(in); + zSql = malloc( nSql+1 ); + fseek(in, 0L, SEEK_SET); + nSql = fread(zSql, 1, nSql, in); + zSql[nSql] = 0; + + printf("SQLite version: %d\n", sqlite3_libversion_number()); + unlink(argv[1]); +#if !defined(_MSC_VER) + clkStart = times(&tmsStart); +#endif + iStart = sqlite3Hwtime(); + rc = sqlite3_open(argv[1], &db); + iElapse = sqlite3Hwtime() - iStart; + iSetup = iElapse; + if (!bQuiet) printf("sqlite3_open() returns %d in %llu cycles\n", rc, iElapse); + for(i=j=0; j=6 && memcmp(&zSql[i], ".crash",6)==0 ) exit(1); + nStmt++; + nByte += n; + prepareAndRun(db, &zSql[i], bQuiet); + } + zSql[j] = ';'; + i = j+1; + } + } + } + iStart = sqlite3Hwtime(); + sqlite3_close(db); + iElapse = sqlite3Hwtime() - iStart; +#if !defined(_MSC_VER) + clkEnd = times(&tmsEnd); +#endif + iSetup += iElapse; + if (!bQuiet) printf("sqlite3_close() returns in %llu cycles\n", iElapse); + + printf("\n"); + printf("Statements run: %15d stmts\n", nStmt); + printf("Bytes of SQL text: %15d bytes\n", nByte); + printf("Total prepare time: %15llu cycles\n", prepTime); + printf("Total run time: %15llu cycles\n", runTime); + printf("Total finalize time: %15llu cycles\n", finalizeTime); + printf("Open/Close time: %15llu cycles\n", iSetup); + printf("Total time: %15llu cycles\n", + prepTime + runTime + finalizeTime + iSetup); + +#if !defined(_MSC_VER) + printf("\n"); + printf("Total user CPU time: %15.3g secs\n", (tmsEnd.tms_utime - tmsStart.tms_utime)/(double)CLOCKS_PER_SEC ); + printf("Total system CPU time: %15.3g secs\n", (tmsEnd.tms_stime - tmsStart.tms_stime)/(double)CLOCKS_PER_SEC ); + printf("Total real time: %15.3g secs\n", (clkEnd -clkStart)/(double)CLOCKS_PER_SEC ); +#endif + +#ifdef HAVE_OSINST + if( pVfs ){ + sqlite3_instvfs_destroy(pVfs); + printf("vfs log written to %s\n", argv[0]); + } +#endif + + return 0; +} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/tool/speedtest8inst1.c /tmp/3ARg2Grji7/sqlite3-3.6.16/tool/speedtest8inst1.c --- sqlite3-3.4.2/tool/speedtest8inst1.c 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/tool/speedtest8inst1.c 2008-06-11 12:00:32.000000000 +0100 @@ -0,0 +1,216 @@ +/* +** Performance test for SQLite. +** +** This program reads ASCII text from a file named on the command-line +** and submits that text to SQLite for evaluation. A new database +** is created at the beginning of the program. All statements are +** timed using the high-resolution timer built into Intel-class processors. +** +** To compile this program, first compile the SQLite library separately +** will full optimizations. For example: +** +** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c +** +** Then link against this program. But to do optimize this program +** because that defeats the hi-res timer. +** +** gcc speedtest8.c sqlite3.o -ldl -I../src +** +** Then run this program with a single argument which is the name of +** a file containing SQL script that you want to test: +** +** ./a.out test.db test.sql +*/ +#include +#include +#include +#include +#include +#include +#include "sqlite3.h" + +#include "test_osinst.c" + +/* +** Prepare and run a single statement of SQL. +*/ +static void prepareAndRun(sqlite3_vfs *pInstVfs, sqlite3 *db, const char *zSql){ + sqlite3_stmt *pStmt; + const char *stmtTail; + int rc; + char zMessage[1024]; + zMessage[1023] = '\0'; + + sqlite3_uint64 iTime; + + sqlite3_snprintf(1023, zMessage, "sqlite3_prepare_v2: %s", zSql); + sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage); + + iTime = sqlite3Hwtime(); + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail); + iTime = sqlite3Hwtime() - iTime; + sqlite3_instvfs_binarylog_call(pInstVfs,BINARYLOG_PREPARE_V2,iTime,rc,zSql); + + if( rc==SQLITE_OK ){ + int nRow = 0; + + sqlite3_snprintf(1023, zMessage, "sqlite3_step loop: %s", zSql); + sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage); + iTime = sqlite3Hwtime(); + while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; } + iTime = sqlite3Hwtime() - iTime; + sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_STEP, iTime, rc, zSql); + + sqlite3_snprintf(1023, zMessage, "sqlite3_finalize: %s", zSql); + sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage); + iTime = sqlite3Hwtime(); + rc = sqlite3_finalize(pStmt); + iTime = sqlite3Hwtime() - iTime; + sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_FINALIZE, iTime, rc, zSql); + } +} + +static int stringcompare(const char *zLeft, const char *zRight){ + int ii; + for(ii=0; zLeft[ii] && zRight[ii]; ii++){ + if( zLeft[ii]!=zRight[ii] ) return 0; + } + return( zLeft[ii]==zRight[ii] ); +} + +static char *readScriptFile(const char *zFile, int *pnScript){ + sqlite3_vfs *pVfs = sqlite3_vfs_find(0); + sqlite3_file *p; + int rc; + sqlite3_int64 nByte; + char *zData = 0; + int flags = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_DB; + + p = (sqlite3_file *)malloc(pVfs->szOsFile); + rc = pVfs->xOpen(pVfs, zFile, p, flags, &flags); + if( rc!=SQLITE_OK ){ + goto error_out; + } + + rc = p->pMethods->xFileSize(p, &nByte); + if( rc!=SQLITE_OK ){ + goto close_out; + } + + zData = (char *)malloc(nByte+1); + rc = p->pMethods->xRead(p, zData, nByte, 0); + if( rc!=SQLITE_OK ){ + goto close_out; + } + zData[nByte] = '\0'; + + p->pMethods->xClose(p); + free(p); + *pnScript = nByte; + return zData; + +close_out: + p->pMethods->xClose(p); + +error_out: + free(p); + free(zData); + return 0; +} + +int main(int argc, char **argv){ + + const char zUsageMsg[] = + "Usage: %s options...\n" + " where available options are:\n" + "\n" + " -db DATABASE-FILE (database file to operate on)\n" + " -script SCRIPT-FILE (script file to read sql from)\n" + " -log LOG-FILE (log file to create)\n" + " -logdata (log all data to log file)\n" + "\n" + " Options -db, -script and -log are compulsory\n" + "\n" + ; + + const char *zDb = 0; + const char *zScript = 0; + const char *zLog = 0; + int logdata = 0; + + int ii; + int i, j; + int rc; + + sqlite3_vfs *pInstVfs; /* Instrumentation VFS */ + + char *zSql = 0; + int nSql; + + sqlite3 *db; + + for(ii=1; iivdbe.c +# +# Modifications made: +# +# All modifications are within the sqlite3VdbeExec() function. The +# modifications seek to reduce the amount of stack space allocated by +# this routine by moving local variable declarations out of individual +# opcode implementations and into a single large union. The union contains +# a separate structure for each opcode and that structure contains the +# local variables used by that opcode. In this way, the total amount +# of stack space required by sqlite3VdbeExec() is reduced from the +# sum of all local variables to the maximum of the local variable space +# required for any single opcode. +# +# In order to be recognized by this script, local variables must appear +# on the first line after the open curly-brace that begins a new opcode +# implementation. Local variables must not have initializers, though they +# may be commented. +# +# The union definition is inserted in place of a special marker comment +# in the preamble to the sqlite3VdbeExec() implementation. +# +############################################################################# +# +set beforeUnion {} ;# C code before union +set unionDef {} ;# C code of the union +set afterUnion {} ;# C code after the union +set sCtr 0 ;# Context counter + +# Read program text up to the spot where the union should be +# inserted. +# +while {![eof stdin]} { + set line [gets stdin] + if {[regexp {INSERT STACK UNION HERE} $line]} break + append beforeUnion $line\n +} + +# Process the remaining text. Build up the union definition as we go. +# +set vlist {} +set seenDecl 0 +set namechars {abcdefghijklmnopqrstuvwxyz} +set nnc [string length $namechars] +while {![eof stdin]} { + set line [gets stdin] + if {[regexp "^case (OP_\\w+): \173" $line all operator]} { + append afterUnion $line\n + set vlist {} + while {![eof stdin]} { + set line [gets stdin] + if {[regexp {^ +(const )?\w+ \**(\w+)(\[.*\])?;} $line \ + all constKeyword vname notused1]} { + if {!$seenDecl} { + set sname {} + append sname [string index $namechars [expr {$sCtr/$nnc}]] + append sname [string index $namechars [expr {$sCtr%$nnc}]] + incr sCtr + append unionDef " struct ${operator}_stack_vars \173\n" + append afterUnion \ + "#if 0 /* local variables moved into u.$sname */\n" + set seenDecl 1 + } + append unionDef " $line\n" + append afterUnion $line\n + lappend vlist $vname + } else { + break + } + } + if {$seenDecl} { + append unionDef " \175 $sname;\n" + append afterUnion "#endif /* local variables moved into u.$sname */\n" + } + set seenDecl 0 + } + if {[regexp "^\175" $line]} { + append afterUnion $line\n + set vlist {} + } elseif {[llength $vlist]>0} { + append line " " + foreach v $vlist { + regsub -all "(\[^a-zA-Z0-9>.\])${v}(\\W)" $line "\\1u.$sname.$v\\2" line + } + append afterUnion [string trimright $line]\n + } elseif {$line=="" && [eof stdin]} { + # no-op + } else { + append afterUnion $line\n + } +} + +# Output the resulting text. +# +puts -nonewline $beforeUnion +puts " /********************************************************************" +puts " ** Automatically generated code" +puts " **" +puts " ** The following union is automatically generated by the" +puts " ** vdbe-compress.tcl script. The purpose of this union is to" +puts " ** reduce the amount of stack space required by this function." +puts " ** See comments in the vdbe-compress.tcl script for details." +puts " */" +puts " union vdbeExecUnion \173" +puts -nonewline $unionDef +puts " \175 u;" +puts " /* End automatically generated code" +puts " ********************************************************************/" +puts -nonewline $afterUnion diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/VERSION /tmp/3ARg2Grji7/sqlite3-3.6.16/VERSION --- sqlite3-3.4.2/VERSION 2007-08-13 17:02:14.000000000 +0100 +++ sqlite3-3.6.16/VERSION 2009-06-25 12:45:57.000000000 +0100 @@ -1 +1 @@ -3.4.2 +3.6.16 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/34to35.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/34to35.html --- sqlite3-3.4.2/www/34to35.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/34to35.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,901 @@ + + +SQLite Changes From Version 3.4.2 To 3.5.0 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Moving From SQLite 3.4.2 to 3.5.0

    + SQLite version 3.5.0 introduces a new OS interface layer that + is incompatible with all prior versions of SQLite. In addition, + a few existing interfaces have been generalized to work across all + database connections within a process rather than just all + connections within a thread. The purpose of this article + is to describe the changes to 3.5.0 in detail so that users + of prior versions of SQLite can judge what, if any, effort will + be required to upgrade to newer versions. +

    +

    1.0 Overview Of Changes

    + A quick enumeration of the changes in SQLite version 3.5.0 + is provide here. Subsequent sections will describe these + changes in more detail. +

    +

    +

      +
    1. The OS interface layer has been completely reworked: +
        +
      1. The undocumented sqlite3_os_switch() interface has + been removed.
      2. +
      3. The SQLITE_ENABLE_REDEF_IO compile-time flag no longer functions. + I/O procedures are now always redefinable.
      4. +
      5. Three new objects are defined for specifying I/O procedures: + sqlite3_vfs, sqlite3_file, and sqlite3_io_methods.
      6. +
      7. Three new interfaces are used to create alternative OS interfaces: + sqlite3_vfs_register(), sqlite3_vfs_unregister(), and + sqlite3_vfs_find().
      8. +
      9. A new interface has been added to provided additional control over + the creation of new database connections: sqlite3_open_v2(). + The legacy interfaces of sqlite3_open() and + sqlite3_open16() continue to be fully supported.
      10. +
    2. +
    3. The optional shared cache and memory management features that + were introduced in version 3.3.0 can now be used across multiple + threads within the same process. Formerly, these extensions only + applied to database connections operating within a single thread. +
        +
      1. The sqlite3_enable_shared_cache() interface now applies to all + threads within a process, not to just the one thread in which it + was run.
      2. +
      3. The sqlite3_soft_heap_limit() interface now applies to all threads + within a process, not to just the one thread in which it was run.
      4. +
      5. The sqlite3_release_memory() interface will now attempt to reduce + the memory usages across all database connections in all threads, not + just connections in the thread where the interface is called.
      6. +
      7. The sqlite3_thread_cleanup() interface has become a no-op.
      8. +
    4. +
    5. Restrictions on the use of the same database connection by multiple + threads have been dropped. It is now safe for + multiple threads to use the same database connection at the same + time.
    6. +
    7. There is now a compile-time option that allows an application to + define alternative malloc()/free() implementations without having + to modify any core SQLite code.
    8. +
    9. There is now a compile-time option that allows an application to + define alternative mutex implementations without having + to modify any core SQLite code.
    10. +
    +

    +

    + Of these changes, only 1a and 2a through 2c are incompatibilities + in any formal sense. + But users who have previously made custom modifications to the + SQLite source (for example to add a custom OS layer for embedded + hardware) might find that these changes have a larger impact. + On the other hand, an important goal of these changes is to make + it much easier to customize SQLite for use on different operating + systems. +

    +

    2.0 The OS Interface Layer

    + If your system defines a custom OS interface for SQLite or if you + were using the undocumented sqlite3_os_switch() + interface, then you will need to make modifications in order to + upgrade to SQLite version 3.5.0. This may seem painful at first + glance. But as you look more closely, you will probably discover + that your changes are made smaller and easier to understand and manage + by the new SQLite interface. It is likely that your changes will + now also work seamlessly with the SQLite amalgamation. You will + no longer need to make any changes to the code SQLite source code. + All of your changes can be effected by application code and you can + link against a standard, unmodified version of the SQLite amalgamation. + Furthermore, the OS interface layer, which was formerly undocumented, + is now an officially support interface for SQLite. So you have + some assurance that this will be a one-time change and that your + new backend will continue to work in future versions of SQLite. +

    +

    2.1 The Virtual File System Object

    + The new OS interface for SQLite is built around an object named + sqlite3_vfs. The "vfs" stands for "Virtual File System". + The sqlite3_vfs object is basically a structure containing pointers + to functions that implement the primitive disk I/O operations that + SQLite needs to perform in order to read and write databases. + In this article, we will often refer a sqlite3_vfs objects as a "VFS". +

    +

    + SQLite is able to use multiple VFSes at the same time. Each + individual database connection is associated with just one VFS. + But if you have multiple database connections, each connection + can be associated with a different VFS. +

    +

    + There is always a default VFS. + The legacy interfaces sqlite3_open() and sqlite3_open16() always + use the default VFS. + The new interface for creating database connections, + sqlite3_open_v2(), allows you to specify which VFS you want to + use by name. +

    +

    2.1.1 Registering New VFS Objects

    + Standard builds of SQLite for Unix or Windows come with a single + VFS named "unix" or "win32", as appropriate. This one VFS is also + the default. So if you are using the legacy open functions, everything + will continue to operate as it has before. The change is that an application + now has the flexibility of adding new VFS modules to implement a + customized OS layer. The sqlite3_vfs_register() API can be used + to tell SQLite about one or more application-defined VFS modules: +

    +
    +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
    +

    + Applications can call sqlite3_vfs_register at any time, though of course + a VFS needs to be registered before it can be used. The first argument + is a pointer to a customized VFS object that the application has prepared. + The second argument is true to make the new VFS the default VFS so that + it will be used by the legacy sqlite3_open() and sqlite3_open16() APIs. + If the new VFS is not the default, then you will probably have to use + the new sqlite3_open_v2() API to use it. Note, however, that if + a new VFS is the only VFS known to SQLite (if SQLite was compiled without + its usual default VFS or if the pre-compiled default VFS was removed + using sqlite3_vfs_unregister()) then the new VFS automatic becomes the + default VFS regardless of the makeDflt argument to sqlite3_vfs_register(). +

    +

    + Standard builds include the default "unix" or "win32" VFSes. + But if you use the -DOS_OTHER=1 compile-time option, then SQLite is + built without a default VFS. In that case, the application must + register at least one VFS prior to calling sqlite3_open(). + This is the approach that embedded applications should use. + Rather than modifying the SQLite source to to insert an alternative + OS layer as was done in prior releases of SQLite, instead compile + an unmodified SQLite source file (preferably the amalgamation) + with the -DOS_OTHER=1 option, then invoke sqlite3_vfs_register() + to define the interface to the underlying filesystem prior to + creating any database connections. +

    +

    2.1.2 Additional Control Over VFS Objects

    + The sqlite3_vfs_unregister() API is used to remove an existing + VFS from the system. +

    +
    +int sqlite3_vfs_unregister(sqlite3_vfs*);
    +

    + The sqlite3_vfs_find() API is used to locate a particular VFS + by name. Its prototype is as follows: +

    +
    +sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
    +

    + The argument is the symbolic name for the desired VFS. If the + argument is a NULL pointer, then the default VFS is returned. + The function returns a pointer to the sqlite3_vfs object that + implements the VFS. Or it returns a NULL pointer if no object + could be found that matched the search criteria. +

    +

    2.1.3 Modifications Of Existing VFSes

    + Once a VFS has been registered, it should never be modified. If + a change in behavior is required, a new VFS should be registered. + The application could, perhaps, use sqlite3_vfs_find() to locate + the old VFS, make a copy of the old VFS into a new sqlite3_vfs + object, make the desired modifications to the new VFS, unregister + the old VFS, the register the new VFS in its place. Existing + database connections would continue to use the old VFS even after + it is unregistered, but new database connections would use the + new VFS. +

    +

    2.1.4 The VFS Object

    + A VFS object is an instance of the following structure: +

    +
    +typedef struct sqlite3_vfs sqlite3_vfs;
    +struct sqlite3_vfs {
    +  int iVersion;            /* Structure version number */
    +  int szOsFile;            /* Size of subclassed sqlite3_file */
    +  int mxPathname;          /* Maximum file pathname length */
    +  sqlite3_vfs *pNext;      /* Next registered VFS */
    +  const char *zName;       /* Name of this virtual file system */
    +  void *pAppData;          /* Pointer to application-specific data */
    +  int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
    +               int flags, int *pOutFlags);
    +  int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
    +  int (*xAccess)(sqlite3_vfs*, const char *zName, int flags);
    +  int (*xGetTempName)(sqlite3_vfs*, char *zOut);
    +  int (*xFullPathname)(sqlite3_vfs*, const char *zName, char *zOut);
    +  void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
    +  void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
    +  void *(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol);
    +  void (*xDlClose)(sqlite3_vfs*, void*);
    +  int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
    +  int (*xSleep)(sqlite3_vfs*, int microseconds);
    +  int (*xCurrentTime)(sqlite3_vfs*, double*);
    +  /* New fields may be appended in figure versions.  The iVersion
    +  ** value will increment whenever this happens. */
    +};
    +

    + To create a new VFS, an application fills in an instance of this + structure with appropriate values and then calls sqlite3_vfs_register(). +

    +

    + The iVersion field of sqlite3_vfs should be 1 for SQLite version 3.5.0. + This number may increase in future versions of SQLite if we have to + modify the VFS object in some way. We hope that this never happens, + but the provision is made in case it does. +

    +

    + The szOsFile field is the size in bytes of the structure that defines + an open file: the sqlite3_file object. This object will be described + more fully below. The point here is that each VFS implementation can + define its own sqlite3_file object containing whatever information + the VFS implementation needs to store about an open file. SQLite needs + to know how big this object is, however, in order to preallocate enough + space to hold it. +

    +

    + The mxPathname field is the maximum length of a file pathname that + this VFS can use. SQLite sometimes has to preallocate buffers of + this size, so it should be as small as reasonably possible. Some + filesystems permit huge pathnames, but in practice pathnames rarely + extend beyond 100 bytes or so. You do not have to put the longest + pathname that the underlying filesystem can handle here. You only + have to put the longest pathname that you want SQLite to be able to + handle. A few hundred is a good value in most cases. +

    +

    + The pNext field is used internally by SQLite. Specifically, SQLite + uses this field to form a linked list of registered VFSes. +

    +

    + The zName field is the symbolic name of the VFS. This is the name + that the sqlite3_vfs_find() compares against when it is looking for + a VFS. +

    +

    + The pAppData pointer is unused by the SQLite core. The pointer is + available to store auxiliary information that a VFS information might + want to carry around. +

    +

    + The remaining fields of the sqlite3_vfs object all store pointer + to functions that implement primitive operations. We call these + "methods". The first methods, xOpen, is used to open files on + the underlying storage media. The result is an sqlite3_file + object. There are additional methods, defined by the sqlite3_file + object itself that are used to read and write and close the file. + The additional methods are detailed below. The filename is in UTF-8. + SQLite will guarantee that the zFilename string passed to + xOpen() is a full pathname as generated by xFullPathname() and + that the string will be valid and unchanged until xClose() is + called. So the sqlite3_file can store a pointer to the + filename if it needs to remember the filename for some reason. + The flags argument to xOpen() is a copy of the flags argument + to sqlite3_open_v2(). If sqlite3_open() or sqlite3_open16() + is used, then flags is SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE. + If xOpen() opens a file read-only then it sets *pOutFlags to + include SQLITE_OPEN_READONLY. Other bits in *pOutFlags may be + set. + SQLite will also add one of the following flags to the xOpen() + call, depending on the object being opened: +

    + The file I/O implementation can use the object type flags to + changes the way it deals with files. For example, an application + that does not care about crash recovery or rollback, might make + the open of a journal file a no-op. Writes to this journal are + also a no-op. Any attempt to read the journal returns SQLITE_IOERR. + Or the implementation might recognize the a database file will + be doing page-aligned sector reads and writes in a random order + and set up its I/O subsystem accordingly. + SQLite might also add one of the following flags to the xOpen + method: + + The SQLITE_OPEN_DELETEONCLOSE flag means the file should be + deleted when it is closed. This will always be set for TEMP + databases and journals and for subjournals. The + SQLITE_OPEN_EXCLUSIVE flag means the file should be opened + for exclusive access. This flag is set for all files except + for the main database file. + The sqlite3_file structure passed as the third argument to + xOpen is allocated by the caller. xOpen just fills it in. The + caller allocates a minimum of szOsFile bytes for the sqlite3_file + structure. +

    +

    + The differences between an SQLITE_OPEN_TEMP_DB database and an + SQLITE_OPEN_TRANSIENT_DB database is this: The SQLITE_OPEN_TEMP_DB + is used for explicitly declared and named TEMP tables (using the + CREATE TEMP TABLE syntax) or for named tables in a temporary database + that is created by opening a database with a filename that is an empty + string. An SQLITE_OPEN_TRANSIENT_DB holds an database table that + SQLite creates automatically in order to evaluate a subquery or + ORDER BY or GROUP BY clause. Both TEMP_DB and TRANSIENT_DB databases + are private and are deleted automatically. TEMP_DB databases last + for the duration of the database connection. TRANSIENT_DB databases + last only for the duration of a single SQL statement. +

    +

    + The xDelete method is used delete a file. The name of the file is + given in the second parameter. The filename will be in UTF-8. + The VFS must convert the filename into whatever character representation + the underlying operating system expects. If the syncDir parameter is + true, then the xDelete method should not return until the change + to the directory contents for the directory containing the + deleted file have been synced to disk in order to insure that the + file does not "reappear" if a power failure occurs soon after. +

    +

    + The xAccess method is used to check for access permissions on a file. + The filename will be UTF-8 encoded. The flags argument will be + SQLITE_ACCESS_EXISTS to check for the existence of the file, + SQLITE_ACCESS_READWRITE to check to see if the file is both readable + and writable, or SQLITE_ACCESS_READ to check to see if the file is + at least readable. The "file" named by the second parameter might + be a directory or folder name. +

    +

    + The xGetTempName method computes the name of a temporary file that + SQLite can use. The name should be written into the buffer given + by the second parameter. SQLite will size that buffer to hold + at least mxPathname bytes. The generated filename should be in UTF-8. + To avoid security problems, the generated temporary filename should + contain enough randomness to prevent an attacker from guessing the + temporary filename in advance. +

    +

    + The xFullPathname method is used to convert a relative pathname + into a full pathname. The resulting full pathname is written into + the buffer provided by the third parameter. SQLite will size the + output buffer to at least mxPathname bytes. Both the input and + output names should be in UTF-8. +

    +

    + The xDlOpen, xDlError, xDlSym, and xDlClose methods are all used for + accessing shared libraries at run-time. These methods may be omitted + (and their pointers set to zero) if the library is compiled with + SQLITE_OMIT_LOAD_EXTENSION or if the sqlite3_enable_load_extension() + interface is never used to enable dynamic extension loading. The + xDlOpen method opens a shared library or DLL and returns a pointer to + a handle. NULL is returned if the open fails. If the open fails, + the xDlError method can be used to obtain a text error message. + The message is written into the zErrMsg buffer of the third parameter + which is at least nByte bytes in length. The xDlSym returns a pointer + to a symbol in the shared library. The name of the symbol is given + by the second parameter. UTF-8 encoding is assumed. If the symbol + is not found a NULL pointer is returned. The xDlClose routine closes + the shared library. +

    +

    + The xRandomness method is used exactly once to initialize the + pseudo-random number generator (PRNG) inside of SQLite. Only + the xRandomness method on the default VFS is used. The xRandomness + methods on other VFSes are never accessed by SQLite. + The xRandomness routine requests that nByte bytes of randomness + be written into zOut. The routine returns the actual number of + bytes of randomness obtained. The quality of the randomness so obtained + will determine the quality of the randomness generated by built-in + SQLite functions such as random() and randomblob(). SQLite also + uses its PRNG to generate temporary file names.. On some platforms + (ex: Windows) SQLite assumes that temporary file names are unique + without actually testing for collisions, so it is important to have + good-quality randomness even if the random() and randomblob() + functions are never used. +

    +

    + The xSleep method is used to suspend the calling thread for at + least the number of microseconds given. This method is used to + implement the sqlite3_sleep() and sqlite3_busy_timeout() APIs. + In the case of sqlite3_sleep() the xSleep method of the default + VFS is always used. If the underlying system does not have a + microsecond resolution sleep capability, then the sleep time should + be rounded up. xSleep returns this rounded-up value. +

    +

    + The xCurrentTime method finds the current time and date and writes + the result as double-precision floating point value into pointer + provided by the second parameter. The time and date is in + coordinated universal time (UTC) and is a fractional Julian day number. +

    +

    2.1.5 The Open File Object

    + The result of opening a file is an instance of an sqlite3_file object. + The sqlite3_file object is an abstract base class defined as follows: +

    +
    +typedef struct sqlite3_file sqlite3_file;
    +struct sqlite3_file {
    +  const struct sqlite3_io_methods *pMethods;
    +};
    +

    + Each VFS implementation will subclass the sqlite3_file by adding + additional fields at the end to hold whatever information the VFS + needs to know about an open file. It does not matter what information + is stored as long as the total size of the structure does not exceed + the szOsFile value recorded in the sqlite3_vfs object. +

    +

    + The sqlite3_io_methods object is a structure that contains pointers + to methods for reading, writing, and otherwise dealing with files. + This object is defined as follows: +

    +
    +typedef struct sqlite3_io_methods sqlite3_io_methods;
    +struct sqlite3_io_methods {
    +  int iVersion;
    +  int (*xClose)(sqlite3_file*);
    +  int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
    +  int (*xSync)(sqlite3_file*, int flags);
    +  int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
    +  int (*xLock)(sqlite3_file*, int);
    +  int (*xUnlock)(sqlite3_file*, int);
    +  int (*xCheckReservedLock)(sqlite3_file*);
    +  int (*xFileControl)(sqlite3_file*, int op, void *pArg);
    +  int (*xSectorSize)(sqlite3_file*);
    +  int (*xDeviceCharacteristics)(sqlite3_file*);
    +  /* Additional methods may be added in future releases */
    +};
    +

    + The iVersion field of sqlite3_io_methods is provided as insurance + against future enhancements. The iVersion value should always be + 1 for SQLite version 3.5. +

    +

    + The xClose method closes the file. The space for the sqlite3_file + structure is deallocated by the caller. But if the sqlite3_file + contains pointers to other allocated memory or resources, those + allocations should be released by the xClose method. +

    +

    + The xRead method reads iAmt bytes from the file beginning at a byte + offset to iOfst. The data read is stored in the pointer of the + second parameter. xRead returns the SQLITE_OK on success, + SQLITE_IOERR_SHORT_READ if it was not able to read the full number + of bytes because it reached end-of-file, or SQLITE_IOERR_READ for + any other error. +

    +

    + The xWrite method writes iAmt bytes of data from the second parameter + into the file beginning at an offset of iOfst bytes. If the size of + the file is less than iOfst bytes prior to the write, then xWrite should + ensure that the file is extended with zeros up to iOfst bytes prior + to beginning its write. xWrite continues to extends the file as + necessary so that the size of the file is at least iAmt+iOfst bytes + at the conclusion of the xWrite call. The xWrite method returns + SQLITE_OK on success. If the write cannot complete because the + underlying storage medium is full, then SQLITE_FULL is returned. + SQLITE_IOERR_WRITE should be returned for any other error. +

    +

    + The xTruncate method truncates a file to be nByte bytes in length. + If the file is already nByte bytes or less in length then this + method is a no-op. The xTruncate method returns SQLITE_OK on + success and SQLITE_IOERR_TRUNCATE if anything goes wrong. +

    +

    + The xSync method is used to force previously written data out of + operating system cache and into non-volatile memory. The second + parameter is usually SQLITE_SYNC_NORMAL. If the second parameter + is SQLITE_SYNC_FULL then the xSync method should make sure that + data has also been flushed through the disk controllers cache. + The SQLITE_SYNC_FULL parameter is the equivalent of the F_FULLSYNC + ioctl() on Mac OS X. The xSync method returns + SQLITE_OK on success and SQLITE_IOERR_FSYNC if anything goes wrong. +

    +

    + The xFileSize() method determines the current size of the file + in bytes and writes that value into *pSize. It returns SQLITE_OK + on success and SQLITE_IOERR_FSTAT if something goes wrong. +

    +

    + The xLock and xUnlock methods are used to set and clear file locks. + SQLite supports five levels of file locks, in order: +

    + The underlying implementation can support some subset of these locking + levels as long as it meets the other requirements of this paragraph. + The locking level is specified as the second argument to both xLock + and xUnlock. The xLock method increases the locking level to the + specified locking level or higher. The xUnlock method decreases the + locking level to no lower than the level specified. + SQLITE_LOCK_NONE means that the file is unlocked. SQLITE_LOCK_SHARED + gives permission to read the file. Multiple database connections can + hold SQLITE_LOCK_SHARED at the same time. + SQLITE_LOCK_RESERVED is like SQLITE_LOCK_SHARED in that its is permission + to read the file. But only a single connection can hold a reserved lock + at any point in time. The SQLITE_LOCK_PENDING is also permission to + read the file. Other connections can continue to read the file as well, + but no other connection is allowed to escalate a lock from none to shared. + SQLITE_LOCK_EXCLUSIVE is permission to write on the file. Only a single + connection can hold an exclusive lock and no other connection can hold + any lock (other than "none") while one connection is hold an exclusive + lock. The xLock returns SQLITE_OK on success, SQLITE_BUSY if it + is unable to obtain the lock, or SQLITE_IOERR_RDLOCK if something else + goes wrong. The xUnlock method returns SQLITE_OK on success and + SQLITE_IOERR_UNLOCK for problems. +

    +

    + The xCheckReservedLock method checks to see if another connection or + another process is currently holding a reserved, pending, or exclusive + lock on the file. It returns true or false. +

    +

    + The xFileControl() method is a generic interface that allows custom + VFS implementations to directly control an open file using the + (new and experimental) + sqlite3_file_control() interface. The second "op" argument + is an integer opcode. The third + argument is a generic pointer which is intended to be a pointer + to a structure that may contain arguments or space in which to + write return values. Potential uses for xFileControl() might be + functions to enable blocking locks with timeouts, to change the + locking strategy (for example to use dot-file locks), to inquire + about the status of a lock, or to break stale locks. The SQLite + core reserves opcodes less than 100 for its own use. + A list of opcodes less than 100 is available. + Applications that define a custom xFileControl method should use opcodes + greater than 100 to avoid conflicts. +

    +

    + The xSectorSize returns the "sector size" of the underlying + non-volatile media. A "sector" is defined as the smallest unit of + storage that can be written without disturbing adjacent storage. + On a disk drive the "sector size" has until recently been 512 bytes, + though there is a push to increase this value to 4KiB. SQLite needs + to know the sector size so that it can write a full sector at a + time, and thus avoid corrupting adjacent storage space if a power + lose occurs in the middle of a write. +

    +

    + The xDeviceCharacteristics method returns an integer bit vector that + defines any special properties that the underlying storage medium might + have that SQLite can use to increase performance. The allowed return + is the bit-wise OR of the following values: +

    + The SQLITE_IOCAP_ATOMIC bit means that all writes to this device are + atomic in the sense that either the entire write occurs or none of it + occurs. The other + SQLITE_IOCAP_ATOMICnnn values indicate that + writes of aligned blocks of the indicated size are atomic. + SQLITE_IOCAP_SAFE_APPEND means that when extending a file with new + data, the new data is written first and then the file size is updated. + So if a power failure occurs, there is no chance that the file might have + been extended with randomness. The SQLITE_IOCAP_SEQUENTIAL bit means + that all writes occur in the order that they are issued and are not + reordered by the underlying file system. +

    +

    2.1.6 Checklist For Constructing A New VFS

    + The preceding paragraphs contain a lot of information. + To ease the task of constructing + a new VFS for SQLite we offer the following implementation checklist: +

    +

    +

      +
    1. Define an appropriate subclass of the sqlite3_file object. +
    2. Implement the methods required by the sqlite3_io_methods object. +
    3. Create a static and + constant sqlite3_io_methods object containing pointers + to the methods from the previous step. +
    4. Implement the xOpen method that opens a file and populates an + sqlite3_file object, including setting pMethods to + point to the sqlite3_io_methods object from the previous step. +
    5. Implement the other methods required by sqlite3_vfs. +
    6. Define a static (but not constant) sqlite3_vfs structure that + contains pointers to the xOpen method and the other methods and + which contains the appropriate values for iVersion, szOsFile, + mxPathname, zName, and pAppData. +
    7. Implement a procedure that calls sqlite3_vfs_register() and + passes it a pointer to the sqlite3_vfs structure from the previous + step. This procedure is probably the only exported symbol in the + source file that implements your VFS. +
    +

    +

    + Within your application, call the procedure implemented in the last + step above as part of your initialization process before any + database connections are opened. +

    +

    3.0 The Memory Allocation Subsystem

    + Beginning with version 3.5, SQLite obtains all of the heap memory it + needs using the routines sqlite3_malloc(), sqlite3_free(), and + sqlite3_realloc(). These routines have existed in prior versions + of SQLite, but SQLite has previously bypassed these routines and used + its own memory allocator. This all changes in version 3.5.0. +

    +

    + The SQLite source tree actually contains multiple versions of the + memory allocator. The default high-speed version found in the + "mem1.c" source file is used for most builds. But if the SQLITE_MEMDEBUG + flag is enabled, a separate memory allocator the "mem2.c" source file + is used instead. The mem2.c allocator implements lots of hooks to + do error checking and to simulate memory allocation failures for testing + purposes. Both of these allocators use the malloc()/free() implementation + in the standard C library. +

    +

    + Applications are not required to use either of these standard memory + allocators. If SQLite is compiled with SQLITE_OMIT_MEMORY_ALLOCATION + then no implementation for the sqlite3_malloc(), sqlite3_realloc(), + and sqlite3_free() functions is provided. Instead, the application + that links against SQLite must provide its own implementation of these + functions. The application provided memory allocator is not required + to use the malloc()/free() implementation in the standard C library. + An embedded application might provide an alternative memory allocator + that uses memory for a fixed memory pool set aside for the exclusive + use of SQLite, for example. +

    +

    + Applications that implement their own memory allocator must provide + implementation for the usual three allocation functions + sqlite3_malloc(), sqlite3_realloc(), and sqlite3_free(). + And they must also implement a fourth function: +

    +
    +int sqlite3_memory_alarm(
    +  void(*xCallback)(void *pArg, sqlite3_int64 used, int N),
    +  void *pArg,
    +  sqlite3_int64 iThreshold
    +);
    +

    + The sqlite3_memory_alarm routine is used to register + a callback on memory allocation events. + This routine registers or clears a callbacks that fires when + the amount of memory allocated exceeds iThreshold. Only + a single callback can be registered at a time. Each call + to sqlite3_memory_alarm() overwrites the previous callback. + The callback is disabled by setting xCallback to a NULL + pointer. +

    +

    + The parameters to the callback are the pArg value, the + amount of memory currently in use, and the size of the + allocation that provoked the callback. The callback will + presumably invoke sqlite3_free() to free up memory space. + The callback may invoke sqlite3_malloc() or sqlite3_realloc() + but if it does, no additional callbacks will be invoked by + the recursive calls. +

    +

    + The sqlite3_soft_heap_limit() interface works by registering + a memory alarm at the soft heap limit and invoking + sqlite3_release_memory() in the alarm callback. Application + programs should not attempt to use the sqlite3_memory_alarm() + interface because doing so will interfere with the + sqlite3_soft_heap_limit() module. This interface is exposed + only so that applications can provide their own + alternative implementation when the SQLite core is + compiled with SQLITE_OMIT_MEMORY_ALLOCATION. +

    +

    + The built-in memory allocators in SQLite also provide the following + additional interfaces: +

    +
    +sqlite3_int64 sqlite3_memory_used(void);
    +sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
    +

    + These interfaces can be used by an application to monitor how + much memory SQLite is using. The sqlite3_memory_used() routine + returns the number of bytes of memory currently in use and the + sqlite3_memory_highwater() returns the maximum instantaneous + memory usage. Neither routine includes the overhead associated + with the memory allocator. These routines are provided for use + by the application. SQLite never invokes them itself. So if + the application is providing its own memory allocation subsystem, + it can omit these interfaces if desired. +

    +

    4.0 The Mutex Subsystem

    + SQLite has always been threadsafe in the sense that it is safe to + use different SQLite database connections in different threads at the + same time. The constraint was that the same database connection + could not be used in two separate threads at once. SQLite version 3.5.0 + relaxes this constraint. +

    +

    + In order to allow multiple threads to use the same database connection + at the same time, SQLite must make extensive use of mutexes. And for + this reason a new mutex subsystem as been added. The mutex subsystem + as the following interface: +

    +
    +sqlite3_mutex *sqlite3_mutex_alloc(int);
    +void sqlite3_mutex_free(sqlite3_mutex*);
    +void sqlite3_mutex_enter(sqlite3_mutex*);
    +int sqlite3_mutex_try(sqlite3_mutex*);
    +void sqlite3_mutex_leave(sqlite3_mutex*);
    +

    + Though these routines exist for the use of the SQLite core, + application code is free to use these routines as well, if desired. + A mutex is an sqlite3_mutex object. The sqlite3_mutex_alloc() + routine allocates a new mutex object and returns a pointer to it. + The argument to sqlite3_mutex_alloc() should be + SQLITE_MUTEX_FAST or SQLITE_MUTEX_RECURSIVE for non-recursive + and recursive mutexes, respectively. If the underlying system does + not provide non-recursive mutexes, then a recursive mutex can be + substituted in that case. The argument to sqlite3_mutex_alloc() + can also be a constant designating one of several static mutexes: +

    + These static mutexes are reserved for use internally by SQLite + and should not be used by the application. The static mutexes + are all non-recursive. +

    +

    + The sqlite3_mutex_free() routine should be used to deallocate + a non-static mutex. If a static mutex is passed to this routine + then the behavior is undefined. +

    +

    + The sqlite3_mutex_enter() attempts to enter the mutex and blocks + if another threads is already there. sqlite3_mutex_try() attempts + to enter and returns SQLITE_OK on success or SQLITE_BUSY if another + thread is already there. sqlite3_mutex_leave() exits a mutex. + The mutex is held until the number of exits matches the number of + entrances. If sqlite3_mutex_leave() is called on a mutex that + the thread is not currently holding, then the behavior is undefined. + If any routine is called for a deallocated mutex, then the behavior + is undefined. +

    +

    + The SQLite source code provides multiple implementations of these + APIs, suitable for varying environments. If SQLite is compiled with + the SQLITE_THREADSAFE=0 flag then a no-op mutex implementation that + is fast but does no real mutual exclusion is provided. That + implementation is suitable for use in single-threaded applications + or applications that only use SQLite in a single thread. Other + real mutex implementations are provided based on the underlying + operating system. +

    +

    + Embedded applications may wish to provide their own mutex implementation. + If SQLite is compiled with the -DSQLITE_MUTEX_APPDEF=1 compile-time flag + then the SQLite core provides no mutex subsystem and a mutex subsystem + that matches the interface described above must be provided by the + application that links against SQLite. +

    +

    5.0 Other Interface Changes

    + Version 3.5.0 of SQLite changes the behavior of a few APIs in ways + that are technically incompatible. However, these APIs are seldom + used and even when they are used it is difficult to imagine a + scenario where the change might break something. The changes + actually makes these interface much more useful and powerful. +

    +

    + Prior to version 3.5.0, the sqlite3_enable_shared_cache() API + would enable and disable the shared cache feature for all connections + within a single thread - the same thread from which the + sqlite3_enable_shared_cache() routine was called. Database connections + that used the shared cache were restricted to running in the same + thread in which they were opened. Beginning with version 3.5.0, + the sqlite3_enable_shared_cache() applies to all database connections + in all threads within the process. Now database connections running + in separate threads can share a cache. And database connections that + use shared cache can migrate from one thread to another. +

    +

    + Prior to version 3.5.0 the sqlite3_soft_heap_limit() set an upper + bound on heap memory usage for all database connections within a + single thread. Each thread could have its own heap limit. Beginning + in version 3.5.0, there is a single heap limit for the entire process. + This seems more restrictive (one limit as opposed to many) but in + practice it is what most users want. +

    +

    + Prior to version 3.5.0 the sqlite3_release_memory() function would + try to reclaim memory from all database connections in the same thread + as the sqlite3_release_memory() call. Beginning with version 3.5.0, + the sqlite3_release_memory() function will attempt to reclaim memory + from all database connections in all threads. +

    +

    6.0 Summary

    + The transition from SQLite version 3.4.2 to 3.5.0 is a major change. + Every source code file in the SQLite core had to be modified, some + extensively. And the change introduced some minor incompatibilities + in the C interface. But we feel that the benefits of the transition + from 3.4.2 to 3.5.0 far outweigh the pain of porting. The new + VFS layer is now well-defined and stable and should simplify future + customizations. The VFS layer, and the separable memory allocator + and mutex subsystems allow a standard SQLite source code amalgamation + to be used in an embedded project without change, greatly simplifying + configuration management. And the resulting system is much more + tolerant of highly threaded designs. +

    + +
    +This page last modified 2008/11/12 14:10:29 UTC + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/35to36.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/35to36.html --- sqlite3-3.4.2/www/35to36.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/35to36.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,310 @@ + + +SQLite Changes From Version 3.5.9 To 3.6.0 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    Moving From SQLite 3.5.9 to 3.6.0

    + SQLite version 3.6.0 contains many changes. As is the custom with + the SQLite project, most changes are fully backwards compatible. + However, a few of the changes in version 3.6.0 are incompatible and + might require modifications to application code and/or makefiles. + This document is a briefing on the changes in SQLite 3.6.0 + with special attention to the incompatible changes. +

    +
    + Key Points: +
      +
    • The database file format is unchanged.
    • +
    • All incompatibilities are on obscure interfaces and hence should + have zero impact on most applications.
    • +
    +
    +

    1.0 Incompatible Changes

    + Incompatible changes are covered first since they are the most + important to maintainers and programmers. +

    +

    1.1 Overview Of Incompatible Changes

    +

      +
    1. Changes to the sqlite3_vfs object

      + +
        +
      1. The signature of the xAccess method has been modified to + return an error code and to store its output into an integer pointed + to by a parameter, rather than returning the output directly. + This change allows the xAccess() method to report failures. + In association with this signature change, a new + extended error code SQLITE_IOERR_ACCESS has been added. +

      2. + +
      3. The xGetTempname method has been removed from sqlite3_vfs. + In its place, the xOpen method is enhanced to open a temporary file + of its own invention when the filename parameter is NULL.

      4. + +
      5. Added the xGetLastError() method to sqlite3_vfs for returning + filesystem-specific error messages and error codes back to + SQLite.

      6. +
      +
    2. + +
    3. The signature of the xCheckReservedLock method on sqlite3_io_methods + has been modified so that it returns an error code and stores its + boolean result into an integer pointed to by a parameter. In + association with this change, a new extended error code + SQLITE_IOERR_CHECKRESERVEDLOCK has been added.

    4. + +
    5. When SQLite is ported to new operation systems (operating systems + other than Unix, Windows, and OS/2 for which ports are provided together + with the core) + two new functions, sqlite3_os_init() and sqlite3_os_end(), must + be provided as part of the port.

    6. + +
    7. The way in which the IN and NOT IN operators handle NULL values + in their right-hand expressions has been brought into compliance with + the SQL standard and with other SQL database engines.

    8. + +
    9. The column names for the result sets of SELECT statements have + been tweaked in some cases to work more like other SQL database + engines.

    10. + +
    11. Changes to compile-time options:

      + +
        +
      1. The SQLITE_MUTEX_APPDEF compile-time parameter is no longer + recognized. As a replacement, alternative + mutex implementations may be created + at runtime using sqlite3_config() with the SQLITE_CONFIG_MUTEX + operator and the sqlite3_mutex_methods object.

      2. + +
      3. Compile-time options OS_UNIX, OS_WIN, OS_OS2, OS_OTHER, and + TEMP_STORE have been renamed to include an "SQLITE_" prefix in order + to help avoid namespace collisions with application software. The + new names of these options are respectively: + SQLITE_OS_UNIX, SQLITE_OS_WIN, SQLITE_OS_OS2, SQLITE_OS_OTHER, + and SQLITE_TEMP_STORE.

      4. +
      +
    12. +
    +

    +

    1.2 Changes To The VFS Layer

    + SQLite version 3.5.0 introduced a new OS interface layer that + provided an abstraction of the underlying operating system. + This was an important innovation and has proven to be helpful + in porting and maintaining SQLite. + However, the developers have discovered some minor flaws in the + original "virtual file system" design introduced in version 3.5.0 + and so SQLite 3.6.0 includes some small incompatible changes + to address these flaws. +

    +
    + Key Point: The incompatible + changes in the SQLite operating-system interface for version 3.6.0 + only affect the rare applications that make use of the + virtual file system interface or that + supply a application-defined mutex implementation + or that make use of other obscure compile-time options. The + changes introduced by SQLite version 3.6.0 will have zero impact on the + vast majority of SQLite applications that use the built-in interfaces + to Unix, Windows, and OS/2 and that use the standard build configuration. +
    +

    1.3 Changes In The Way The IN Operator Handles NULLs

    + All versions of SQLite up to and including version 3.5.9 have mishandled + NULL values on the right-hand side of IN and NOT IN operators. + Specifically, SQLite has previously ignored NULLs on the right-hand side + of IN and NOT IN. +

    +

    + Suppose we have a table X1 defined as follows: +

    +
    +  CREATE TABLE x1(x INTEGER);
    +  INSERT INTO x1 VALUES(1);
    +  INSERT INTO x1 VALUES(2);
    +  INSERT INTO x1 VALUES(NULL);
    +

    + Given the definition of X1 above, the following expressions have + historically evaluated to FALSE in SQLite, though the correct + answer is actually NULL: +

    +
    +  3 IN (1,2,NULL)
    +  3 IN (SELECT * FROM x1)
    +

    + Similarly, the following expressions have historically evaluated to + TRUE when in fact NULL is also the correct answer here: +

    +
    +  3 NOT IN (1,2,NULL)
    +  3 NOT IN (SELECT * FROM x1)
    +

    + The historical behavior of SQLite is incorrect according to the SQL:1999 + standard and it is inconsistent with the behavior of MySQL and + PostgreSQL. Version 3.6.0 changes the behavior of the IN and + NOT IN operators to conform to the standard and to give the same + results as other SQL database engines. +

    +
    + Key Point: The change to the way NULL values are handled + by the IN and NOT IN operators is technically a bug fix, not a design + change. However, maintainers should check to insure that applications + do not depend on the older, buggy behavior prior to upgrading to + version 3.6.0. +
    +

    1.4 Changes To Column Naming Rules

    + The column names reported by join subqueries have been modified slightly + in order to work more like other database engines. Consider the following + query: +

    +
    +  CREATE TABLE t1(a);
    +  CREATE TABLE t2(x);
    +  SELECT * FROM (SELECT t1.a FROM t1 JOIN t2 ORDER BY t2.x LIMIT 1) ORDER BY 1;
    +

    + In version 3.5.9 the query above would return a single column named "t1.a". + In version 3.6.0 the column name is just "a". +

    +

    + SQLite has never made any promises about the names of columns in the + result set of SELECT statement unless the column contains an AS clause. + So this change to column name is technically not an incompatibility. + SQLite is merely changing from one undefined behavior to another. + Nevertheless, many applications depend on the unspecified column naming + behavior of SQLite and so this change is discussed under the + incompatible changes subheading. +

    +

    1.5 Changes To Compile-Time Options

    + Compile-time options to SQLite are controlled by C-preprocessor + macros. SQLite version 3.6.0 changes the names of some of these + macros so that all C-preprocessor macros that are specific to + SQLite begin with the "SQLITE_" prefix. This is done to reduce the + risk of name collisions with other software modules. +

    +
    + Key Point: Changes to compile-time options have the + potential to affect makefiles in projects that do customized builds of + SQLite. These changes should have zero impact on application code and for + most projects which use a standard, default build of SQLite. +
    +

    2.0 Fully Backwards-Compatible Enhancements

    + In addition to the incompatible changes listed above, SQLite + version 3.6.0 adds the following backwards compatible changes and + enhancements: +

    +

    +

      + +
    1. The new sqlite3_config() interface allows an application + to customize the behavior of SQLite at run-time. Customizations possible + using sqlite3_config() include the following:

      +
        +
      1. Specify an alternative mutex implementation using the + SQLITE_CONFIG_MUTEX verb with the sqlite3_mutex_methods object.

      2. +
      3. Specify an alternative malloc implementation using the + SQLITE_CONFIG_MALLOC verb with the sqlite3_mem_methods object.

      4. +
      5. Partially or fully disable the use of mutexes using + SQLITE_CONFIG_SINGLETHREAD, SQLITE_CONFIG_MULTITHREAD and + SQLITE_CONFIG_SERIALIZED.

      6. +
      +
    2. + +
    3. A new flag SQLITE_OPEN_NOMUTEX is made available to the + sqlite3_open_v2() interface.

    4. + +
    5. The new sqlite3_status() interface allows an application to query + the performance status of SQLite at runtime. +

    6. + +
    7. The sqlite3_memory_used() and sqlite3_memory_highwater() + interfaces are deprecated. The equivalent functionality is now available + through sqlite3_status().

    8. + +
    9. The sqlite3_initialize() interface can be called to explicitly + initialize the SQLite subsystem. The sqlite3_initialize() interface is + called automatically when invoking certain interfaces so the use of + sqlite3_initialize() is not required, but it is recommended.

    10. + +
    11. The sqlite3_shutdown() interface causes SQLite release any + system resources (memory allocations, mutexes, open file handles) + that it might have been allocated by sqlite3_initialize().

    12. + +
    13. The sqlite3_next_stmt() interface allows an application to discover + all prepared statements associated with a database connection.

    14. + +
    15. Added the page_count PRAGMA for returning the size of the underlying + database file in pages.

    16. + +
    17. Added a new R*Tree index extension.

    18. + +
    +

    + +
    +This page last modified 2008/11/01 13:26:49 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/about.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/about.html --- sqlite3-3.4.2/www/about.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/about.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,177 @@ + + +About SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    About SQLite

    + + + +
    +See Also... +

    +
    + +

    SQLite is a in-process library that implements a +self-contained, +serverless, +zero-configuration, +transactional +SQL database engine. +The code for SQLite is in the +public domain and is thus free for +use for any purpose, commercial or private. +SQLite is currently found in more applications than we can +count, including several high-profile projects.

    + +

    SQLite is an embedded SQL database engine. +Unlike most other SQL databases, SQLite does not have a separate +server process. SQLite reads and writes directly to ordinary disk +files. A complete SQL database with multiple tables, indices, +triggers, and views, is contained in a single disk file. +The database file format is cross-platform - you can freely copy a database +between 32-bit and 64-bit systems or between +big-endian and +little-endian +architectures. These features make SQLite a popular choice as +an Application File Format. +Think of SQLite not as a replacement for +Oracle but +as a replacement for fopen()

    + +

    SQLite is a compact library. +With all features enabled, the library size can be less than 300KiB, +depending on compiler optimization settings. (Some compiler optimizations +such as aggressive function inlining and loop unrolling can cause the +object code to be much larger.) If optional features are omitted, the +size of the SQLite library can be reduced below 180KiB. SQLite can also +be made to run in minimal stack space (16KiB) and +very little heap (100KiB), making SQLite a popular database engine +choice on memory constrained gadgets such as cellphones, PDAs, and MP3 players. +There is a tradeoff between memory usage and speed. +SQLite generally runs faster the more memory +you give it. Nevertheless, performance is usually quite good even +in low-memory environments.

    + +

    SQLite is +very carefully tested prior to every +release and has a reputation for being very reliable. +Most of the SQLite source code is devoted purely to testing and +verification. An automated test suite runs millions of +tests involving hundreds of millions of individual SQL statements and achieves +over 99% statement coverage. SQLite responds gracefully to memory +allocation failures and disk I/O errors. Transactions are +ACID +even if interrupted by system crashes or power failures. +All of this is verified by +the automated tests using special test harnesses which simulate +system failures. +Of course, even with all this testing, there are still bugs. +But unlike some similar projects (especially commercial competitors) +SQLite is open and honest about all bugs and provides +bugs lists +including lists of + +critical bugs and +minute-by-minute +chronologies of bug reports and code changes.

    + +

    The SQLite code base is supported by an +international team of developers who work on +SQLite full-time. +The developers continue to expand the capabilities of SQLite +and enhance its reliability and performance while maintaining +backwards compatibility with the +published interface spec, +SQL syntax, and database file format. +The source code is absolutely free to anybody who wants it, +but +professional support services are also available.

    + +

    We the developers hope that you find SQLite useful and we +charge you to use it well: to make good and beautiful products that +are fast, reliable, and simple to use. Seek forgiveness for yourself +as you forgive others. And just as you have received SQLite for free, +so also freely give, paying the debt forward.

    +
    +This page last modified 2009/03/28 12:45:19 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/amalgamation.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/amalgamation.html --- sqlite3-3.4.2/www/amalgamation.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/amalgamation.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,125 @@ + + +The SQLite Amalgamation + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    The SQLite Amalgamation

    + +

    The core SQLite library consists of about 88 files of C code +(as of Version 3.6.11) in the core with 12 additional files +in the FTS3 and RTREE extensions. +Most of these are "source" files in the sense that they are stored +in the configuration management system and are maintained directly. +But 6 of the core C files are generated automatically during the +compilation process. Of the 88 code files, 67 are C code and +21 are C header files.

    + +

    The standard makefiles for SQLite have a target for building +an object we call the "amalgamation". +The amalgamation is a single C code file, named "sqlite3.c", +that contains all C code +for the core SQLite library and the FTS3 and RTREE extensions. +This file contains about 104K lines of code +(62K if you omit blank lines and comments) and is over 3.5 megabytes +in size.

    + +

    The amalgamation contains everything you need to integrate SQLite +into a larger project. Just copy the amalgamation into your source +directory and compile it along with the other C code files in your project. +You may also want to make use of the "sqlite3.h" header file that +defines the programming API for SQLite. +The sqlite3.h header file is available separately. +The sqlite3.h file is also contained within the amalgamation, in +the first couple of thousand lines. So if you have a copy of +sqlite3.c but cannot seem to locate sqlite3.h, you can always +regenerate the sqlite3.h by copying and pasting from the amalgamation.

    + +

    In addition to making SQLite easier to incorporate into other +projects, the amalgamation also makes it run faster. Many +compilers are able to do additional optimizations on code when +it is contained with in a single translation unit such as it +is in the amalgamation. We have measured performance improvements +of between 5 and 10% when we use the amalgamation to compile +SQLite rather than individual source files. The downside of this +is that the additional optimizations often take the form of +function inlining which tends to make the size of the resulting +binary image larger.

    + +

    The amalgamation and +the sqlite3.h header file are available on +the download page as a file +named sqlite-amalgamation-X_X_X.zip +where the X's are replaced by the appropriate version number.

    +
    +This page last modified 2009/02/16 17:10:08 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch2b.fig /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch2b.fig --- sqlite3-3.4.2/www/arch2b.fig 2004-07-22 17:12:19.000000000 +0100 +++ sqlite3-3.6.16/www/arch2b.fig 1970-01-01 01:00:00.000000000 +0100 @@ -1,125 +0,0 @@ -#FIG 3.2 -Landscape -Center -Inches -Letter -100.00 -Single --2 -1200 2 -0 32 #000000 -0 33 #868686 -0 34 #dfefd7 -0 35 #d7efef -0 36 #efdbef -0 37 #efdbd7 -0 38 #e7efcf -0 39 #9e9e9e -6 3225 3900 4650 6000 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 4350 3900 4650 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 5100 3900 5475 -4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 -4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 -4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 --6 -6 5175 4275 7200 6150 -6 5400 4519 6825 5090 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 4519 6750 4519 6750 5009 5400 5009 5400 4519 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 4601 6825 4601 6825 5090 5475 5090 5475 4601 -4 1 0 50 0 2 12 0.0000 4 135 630 6000 4845 Utilities\001 --6 -6 5400 5416 6825 5987 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 5416 6750 5416 6750 5906 5400 5906 5400 5416 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 5498 6825 5498 6825 5987 5475 5987 5475 5498 -4 1 0 50 0 2 12 0.0000 4 135 855 6000 5742 Test Code\001 --6 -2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 - 5175 4275 7200 4275 7200 6150 5175 6150 5175 4275 -4 1 0 50 0 1 12 1.5708 4 135 885 7125 5253 Accessories\001 --6 -6 5400 2700 6825 3675 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2775 6825 2775 6825 3675 5475 3675 5475 2775 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 2700 6750 2700 6750 3600 5400 3600 5400 2700 -4 1 0 50 0 2 12 0.0000 4 135 420 6075 3075 Code\001 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 3300 Generator\001 --6 -6 5400 1875 6825 2400 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1875 6750 1875 6750 2325 5400 2325 5400 1875 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1950 6825 1950 6825 2400 5475 2400 5475 1950 -4 1 0 50 0 2 12 0.0000 4 135 570 6075 2175 Parser\001 --6 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 1500 3900 1800 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 2250 3900 2550 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 3000 3900 3900 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 4575 1950 5400 1350 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 5400 2925 4650 2175 -2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 - 2850 750 4875 750 4875 3375 2850 3375 2850 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 1500 6075 1800 -2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 - 2850 3675 4875 3675 4875 6150 2850 6150 2850 3675 -2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 - 5175 750 7200 750 7200 3975 5175 3975 5175 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 2400 6075 2700 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 -4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 -4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 -4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 -4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 -4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 -4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 -4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch2.fig /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch2.fig --- sqlite3-3.4.2/www/arch2.fig 2005-12-19 17:31:02.000000000 +0000 +++ sqlite3-3.6.16/www/arch2.fig 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -#FIG 3.2 -Landscape -Center -Inches -Letter -100.00 -Single --2 -1200 2 -0 32 #000000 -0 33 #868686 -0 34 #dfefd7 -0 35 #d7efef -0 36 #efdbef -0 37 #efdbd7 -0 38 #e7efcf -0 39 #9e9e9e -6 3225 3900 4650 6000 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 5475 4575 5475 4575 5925 3225 5925 3225 5475 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 5550 4650 5550 4650 6000 3300 6000 3300 5550 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 4650 4575 4650 4575 5100 3225 5100 3225 4650 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 4725 4650 4725 4650 5175 3300 5175 3300 4725 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 3900 4575 3900 4575 4350 3225 4350 3225 3900 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 3975 4650 3975 4650 4425 3300 4425 3300 3975 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 4350 3900 4650 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 5100 3900 5475 -4 1 0 50 0 2 12 0.0000 4 135 1050 3900 5775 OS Interface\001 -4 1 0 50 0 2 12 0.0000 4 135 615 3900 4200 B-Tree\001 -4 1 0 50 0 2 12 0.0000 4 180 495 3900 4950 Pager\001 --6 -6 5400 4725 6825 5250 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 4725 6750 4725 6750 5175 5400 5175 5400 4725 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 4800 6825 4800 6825 5250 5475 5250 5475 4800 -4 1 0 50 0 2 12 0.0000 4 135 630 6000 5025 Utilities\001 --6 -6 5400 5550 6825 6075 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 5550 6750 5550 6750 6000 5400 6000 5400 5550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 5625 6825 5625 6825 6075 5475 6075 5475 5625 -4 1 0 50 0 2 12 0.0000 4 135 855 6000 5850 Test Code\001 --6 -6 5400 2775 6825 3750 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2850 6825 2850 6825 3750 5475 3750 5475 2850 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 2775 6750 2775 6750 3675 5400 3675 5400 2775 -4 1 0 50 0 2 12 0.0000 4 135 420 6075 3150 Code\001 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 3375 Generator\001 --6 -6 5400 1950 6825 2475 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1950 6750 1950 6750 2400 5400 2400 5400 1950 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 2025 6825 2025 6825 2475 5475 2475 5475 2025 -4 1 0 50 0 2 12 0.0000 4 135 570 6075 2250 Parser\001 --6 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 5400 1050 6750 1050 6750 1500 5400 1500 5400 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 5475 1125 6825 1125 6825 1575 5475 1575 5475 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1050 4575 1050 4575 1500 3225 1500 3225 1050 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1125 4650 1125 4650 1575 3300 1575 3300 1125 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 1800 4575 1800 4575 2250 3225 2250 3225 1800 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 1875 4650 1875 4650 2325 3300 2325 3300 1875 -2 2 0 1 0 7 51 0 20 0.000 0 0 -1 0 0 5 - 3225 2550 4575 2550 4575 3000 3225 3000 3225 2550 -2 2 0 0 0 33 52 0 20 0.000 0 0 -1 0 0 5 - 3300 2625 4650 2625 4650 3075 3300 3075 3300 2625 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 1500 3900 1800 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 2250 3900 2550 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 3900 3000 3900 3900 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 4575 1950 5400 1350 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 5400 2925 4650 2325 -2 2 0 1 0 34 55 0 20 0.000 0 0 -1 0 0 5 - 2850 750 4875 750 4875 3375 2850 3375 2850 750 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 1500 6075 1950 -2 3 0 1 0 35 55 0 20 0.000 0 0 -1 0 0 5 - 2850 3675 4875 3675 4875 6225 2850 6225 2850 3675 -2 2 0 1 0 37 55 0 20 0.000 0 0 -1 0 0 5 - 5175 750 7200 750 7200 4050 5175 4050 5175 750 -2 2 0 1 0 38 55 0 20 0.000 0 0 -1 0 0 5 - 5175 4425 7200 4425 7200 6225 5175 6225 5175 4425 -2 1 0 1 0 38 50 0 -1 0.000 0 0 -1 1 0 2 - 1 1 1.00 60.00 120.00 - 6075 2475 6075 2775 -4 1 0 50 0 2 12 0.0000 4 135 855 6075 1350 Tokenizer\001 -4 1 0 50 0 1 12 1.5708 4 180 1020 7125 2250 SQL Compiler\001 -4 1 0 50 0 1 12 1.5708 4 135 345 3075 2025 Core\001 -4 1 0 50 0 2 12 0.0000 4 135 1290 3900 2850 Virtual Machine\001 -4 1 0 50 0 2 12 0.0000 4 165 1185 3900 1995 SQL Command\001 -4 1 0 50 0 2 12 0.0000 4 135 855 3900 2183 Processor\001 -4 1 0 50 0 2 14 0.0000 4 150 870 3900 1350 Interface\001 -4 1 0 50 0 1 12 1.5708 4 135 885 7125 5400 Accessories\001 -4 1 0 50 0 1 12 1.5708 4 135 645 3075 4875 Backend\001 Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch2.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch.fig /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch.fig --- sqlite3-3.4.2/www/arch.fig 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/www/arch.fig 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -#FIG 3.2 -Portrait -Center -Inches -Letter -100.00 -Single --2 -1200 2 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 8550 3675 9075 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 7200 3675 7725 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 5775 3675 6300 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 3975 3675 4500 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 2625 3675 3150 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 1275 3675 1800 -2 1 0 3 0 7 100 0 -1 0.000 0 0 -1 1 0 2 - 1 1 3.00 75.00 135.00 - 3675 9900 3675 10425 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 10425 4875 10425 4875 11250 2550 11250 2550 10425 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 9075 4875 9075 4875 9900 2550 9900 2550 9075 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 7725 4875 7725 4875 8550 2550 8550 2550 7725 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 6300 4875 6300 4875 7200 2550 7200 2550 6300 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 4500 4875 4500 4875 5775 2550 5775 2550 4500 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 3150 4875 3150 4875 3975 2550 3975 2550 3150 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 1800 4875 1800 4875 2625 2550 2625 2550 1800 -2 2 0 1 0 11 100 0 20 0.000 0 0 7 0 0 5 - 2550 450 4875 450 4875 1275 2550 1275 2550 450 -4 1 0 100 0 0 20 0.0000 4 195 1020 3675 750 Interface\001 -4 1 0 100 0 0 14 0.0000 4 195 2040 3675 1125 main.c table.c tclsqlite.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1920 3675 6675 Virtual Machine\001 -4 1 0 100 0 0 14 0.0000 4 150 570 3675 7050 vdbe.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1830 3675 4875 Code Generator\001 -4 1 0 100 0 0 14 0.0000 4 195 1860 3675 5175 build.c delete.c expr.c\001 -4 1 0 100 0 0 14 0.0000 4 195 2115 3675 5400 insert.c select.c update.c\001 -4 1 0 100 0 0 14 0.0000 4 150 705 3675 5625 where.c\001 -4 1 0 100 0 0 20 0.0000 4 195 735 3675 3450 Parser\001 -4 1 0 100 0 0 20 0.0000 4 195 1140 3675 2100 Tokenizer\001 -4 1 0 100 0 0 14 0.0000 4 150 870 3675 2475 tokenize.c\001 -4 1 0 100 0 0 20 0.0000 4 255 1350 3675 9375 Page Cache\001 -4 1 0 100 0 0 14 0.0000 4 150 630 3675 3825 parse.y\001 -4 1 0 100 0 0 14 0.0000 4 150 600 3675 8400 btree.c\001 -4 1 0 100 0 0 14 0.0000 4 150 645 3675 9750 pager.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1620 3675 8025 B-tree Driver\001 -4 1 0 100 0 0 14 0.0000 4 105 345 3675 11100 os.c\001 -4 1 0 100 0 0 20 0.0000 4 195 1470 3675 10725 OS Interface\001 Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch.html --- sqlite3-3.4.2/www/arch.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/arch.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,287 @@ + + +Architecture of SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    The Architecture Of SQLite

    + +

    Introduction

    + + + + +
    Block Diagram Of SQLite
    +

    This document describes the architecture of the SQLite library. +The information here is useful to those who want to understand or +modify the inner workings of SQLite. +

    + +

    +A block diagram showing the main components of SQLite +and how they interrelate is shown at the right. The text that +follows will provide a quick overview of each of these components. +

    + + +

    +This document describes SQLite version 3.0. Version 2.8 and +earlier are similar but the details differ. +

    + +

    Interface

    + +

    Much of the public interface to the SQLite library is implemented by +functions found in the main.c, legacy.c, and +vdbeapi.c source files +though some routines are +scattered about in other files where they can have access to data +structures with file scope. The +sqlite3_get_table() routine is implemented in table.c. +sqlite3_mprintf() is found in printf.c. +sqlite3_complete() is in tokenize.c. +The Tcl interface is implemented by tclsqlite.c. More +information on the C interface to SQLite is +available separately.

    + +

    To avoid name collisions with other software, all external +symbols in the SQLite library begin with the prefix sqlite3. +Those symbols that are intended for external use (in other words, +those symbols which form the API for SQLite) begin +with sqlite3_.

    + +

    Tokenizer

    + +

    When a string containing SQL statements is to be executed, the +interface passes that string to the tokenizer. The job of the tokenizer +is to break the original string up into tokens and pass those tokens +one by one to the parser. The tokenizer is hand-coded in C in +the file tokenize.c. + +

    Note that in this design, the tokenizer calls the parser. People +who are familiar with YACC and BISON may be used to doing things the +other way around -- having the parser call the tokenizer. The author +of SQLite +has done it both ways and finds things generally work out nicer for +the tokenizer to call the parser. YACC has it backwards.

    + +

    Parser

    + +

    The parser is the piece that assigns meaning to tokens based on +their context. The parser for SQLite is generated using the +Lemon LALR(1) parser +generator. Lemon does the same job as YACC/BISON, but it uses +a different input syntax which is less error-prone. +Lemon also generates a parser which is reentrant and thread-safe. +And lemon defines the concept of a non-terminal destructor so +that it does not leak memory when syntax errors are encountered. +The source file that drives Lemon is found in parse.y.

    + +

    Because +lemon is a program not normally found on development machines, the +complete source code to lemon (just one C file) is included in the +SQLite distribution in the "tool" subdirectory. Documentation on +lemon is found in the "doc" subdirectory of the distribution. +

    + +

    Code Generator

    + +

    After the parser assembles tokens into complete SQL statements, +it calls the code generator to produce virtual machine code that +will do the work that the SQL statements request. There are many +files in the code generator: +attach.c, +auth.c, +build.c, +delete.c, +expr.c, +insert.c, +pragma.c, +select.c, +trigger.c, +update.c, +vacuum.c +and where.c. +In these files is where most of the serious magic happens. +expr.c handles code generation for expressions. +where.c handles code generation for WHERE clauses on +SELECT, UPDATE and DELETE statements. The files attach.c, +delete.c, insert.c, select.c, trigger.c +update.c, and vacuum.c handle the code generation +for SQL statements with the same names. (Each of these files calls routines +in expr.c and where.c as necessary.) All other +SQL statements are coded out of build.c. +The auth.c file implements the functionality of +sqlite3_set_authorizer().

    + +

    Virtual Machine

    + +

    The program generated by the code generator is executed by +the virtual machine. Additional information about the virtual +machine is available separately. +To summarize, the virtual machine implements an abstract computing +engine specifically designed to manipulate database files. The +machine has a stack which is used for intermediate storage. +Each instruction contains an opcode and +up to three additional operands.

    + +

    The virtual machine itself is entirely contained in a single +source file vdbe.c. The virtual machine also has +its own header files: vdbe.h that defines an interface +between the virtual machine and the rest of the SQLite library and +vdbeInt.h which defines structure private the virtual machine. +The vdbeaux.c file contains utilities used by the virtual +machine and interface modules used by the rest of the library to +construct VM programs. The vdbeapi.c file contains external +interfaces to the virtual machine such as the +sqlite3_bind_... family of functions. Individual values +(strings, integer, floating point numbers, and BLOBs) are stored +in an internal object named "Mem" which is implemented by +vdbemem.c.

    + +

    +SQLite implements SQL functions using callbacks to C-language routines. +Even the built-in SQL functions are implemented this way. Most of +the built-in SQL functions (ex: coalesce(), count(), +substr(), and so forth) can be found in func.c. +Date and time conversion functions are found in date.c. +

    + +

    B-Tree

    + +

    An SQLite database is maintained on disk using a B-tree implementation +found in the btree.c source file. A separate B-tree is used for +each table and index in the database. All B-trees are stored in the +same disk file. Details of the file format are recorded in a large +comment at the beginning of btree.c.

    + +

    The interface to the B-tree subsystem is defined by the header file +btree.h. +

    + +

    Page Cache

    + +

    The B-tree module requests information from the disk in fixed-size +chunks. The default chunk size is 1024 bytes but can vary between 512 +and 65536 bytes. +The page cache is responsible for reading, writing, and +caching these chunks. +The page cache also provides the rollback and atomic commit abstraction +and takes care of locking of the database file. The +B-tree driver requests particular pages from the page cache and notifies +the page cache when it wants to modify pages or commit or rollback +changes. The page cache handles all the messy details of making sure +the requests are handled quickly, safely, and efficiently.

    + +

    The code to implement the page cache is contained in the single C +source file pager.c. The interface to the page cache subsystem +is defined by the header file pager.h. +

    + +

    OS Interface

    + +

    +In order to provide portability between POSIX and Win32 operating systems, +SQLite uses an abstraction layer to interface with the operating system. +The interface to the OS abstraction layer is defined in +os.h. Each supported operating system has its own implementation: +os_unix.c for Unix, os_win.c for Windows, and so forth. +Each of these operating-specific implements typically has its own +header file: os_unix.h, os_win.h, etc. +

    + +

    Utilities

    + +

    +Memory allocation and caseless string comparison routines are located +in util.c. +Symbol tables used by the parser are maintained by hash tables found +in hash.c. The utf.c source file contains Unicode +conversion subroutines. +SQLite has its own private implementation of printf() (with +some extensions) in printf.c and its own random number generator +in random.c. +

    + +

    Test Code

    + +

    +If you count regression test scripts, +more than half the total code base of SQLite is devoted to testing. +There are many assert() statements in the main code files. +In additional, the source files test1.c through test5.c +together with md5.c implement extensions used for testing +purposes only. The os_test.c backend interface is used to +simulate power failures to verify the crash-recovery mechanism in +the pager. +

    +
    +This page last modified 2009/03/19 00:04:36 UTC +
    Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch.png and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch.png differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/arch.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/arch.tcl --- sqlite3-3.4.2/www/arch.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/arch.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,221 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: arch.tcl,v 1.16 2004/10/10 17:24:54 drh Exp $} -source common.tcl -header {Architecture of SQLite} -puts { -

    The Architecture Of SQLite

    - -

    Introduction

    - - - - -
    Block Diagram Of SQLite
    -

    This document describes the architecture of the SQLite library. -The information here is useful to those who want to understand or -modify the inner workings of SQLite. -

    - -

    -A block diagram showing the main components of SQLite -and how they interrelate is shown at the right. The text that -follows will provide a quick overview of each of these components. -

    - - -

    -This document describes SQLite version 3.0. Version 2.8 and -earlier are similar but the details differ. -

    - -

    Interface

    - -

    Much of the public interface to the SQLite library is implemented by -functions found in the main.c, legacy.c, and -vdbeapi.c source files -though some routines are -scattered about in other files where they can have access to data -structures with file scope. The -sqlite3_get_table() routine is implemented in table.c. -sqlite3_mprintf() is found in printf.c. -sqlite3_complete() is in tokenize.c. -The Tcl interface is implemented by tclsqlite.c. More -information on the C interface to SQLite is -available separately.

    - -

    To avoid name collisions with other software, all external -symbols in the SQLite library begin with the prefix sqlite3. -Those symbols that are intended for external use (in other words, -those symbols which form the API for SQLite) begin -with sqlite3_.

    - -

    Tokenizer

    - -

    When a string containing SQL statements is to be executed, the -interface passes that string to the tokenizer. The job of the tokenizer -is to break the original string up into tokens and pass those tokens -one by one to the parser. The tokenizer is hand-coded in C in -the file tokenize.c. - -

    Note that in this design, the tokenizer calls the parser. People -who are familiar with YACC and BISON may be used to doing things the -other way around -- having the parser call the tokenizer. The author -of SQLite -has done it both ways and finds things generally work out nicer for -the tokenizer to call the parser. YACC has it backwards.

    - -

    Parser

    - -

    The parser is the piece that assigns meaning to tokens based on -their context. The parser for SQLite is generated using the -Lemon LALR(1) parser -generator. Lemon does the same job as YACC/BISON, but it uses -a different input syntax which is less error-prone. -Lemon also generates a parser which is reentrant and thread-safe. -And lemon defines the concept of a non-terminal destructor so -that it does not leak memory when syntax errors are encountered. -The source file that drives Lemon is found in parse.y.

    - -

    Because -lemon is a program not normally found on development machines, the -complete source code to lemon (just one C file) is included in the -SQLite distribution in the "tool" subdirectory. Documentation on -lemon is found in the "doc" subdirectory of the distribution. -

    - -

    Code Generator

    - -

    After the parser assembles tokens into complete SQL statements, -it calls the code generator to produce virtual machine code that -will do the work that the SQL statements request. There are many -files in the code generator: -attach.c, -auth.c, -build.c, -delete.c, -expr.c, -insert.c, -pragma.c, -select.c, -trigger.c, -update.c, -vacuum.c -and where.c. -In these files is where most of the serious magic happens. -expr.c handles code generation for expressions. -where.c handles code generation for WHERE clauses on -SELECT, UPDATE and DELETE statements. The files attach.c, -delete.c, insert.c, select.c, trigger.c -update.c, and vacuum.c handle the code generation -for SQL statements with the same names. (Each of these files calls routines -in expr.c and where.c as necessary.) All other -SQL statements are coded out of build.c. -The auth.c file implements the functionality of -sqlite3_set_authorizer().

    - -

    Virtual Machine

    - -

    The program generated by the code generator is executed by -the virtual machine. Additional information about the virtual -machine is available separately. -To summarize, the virtual machine implements an abstract computing -engine specifically designed to manipulate database files. The -machine has a stack which is used for intermediate storage. -Each instruction contains an opcode and -up to three additional operands.

    - -

    The virtual machine itself is entirely contained in a single -source file vdbe.c. The virtual machine also has -its own header files: vdbe.h that defines an interface -between the virtual machine and the rest of the SQLite library and -vdbeInt.h which defines structure private the virtual machine. -The vdbeaux.c file contains utilities used by the virtual -machine and interface modules used by the rest of the library to -construct VM programs. The vdbeapi.c file contains external -interfaces to the virtual machine such as the -sqlite3_bind_... family of functions. Individual values -(strings, integer, floating point numbers, and BLOBs) are stored -in an internal object named "Mem" which is implemented by -vdbemem.c.

    - -

    -SQLite implements SQL functions using callbacks to C-language routines. -Even the built-in SQL functions are implemented this way. Most of -the built-in SQL functions (ex: coalesce(), count(), -substr(), and so forth) can be found in func.c. -Date and time conversion functions are found in date.c. -

    - -

    B-Tree

    - -

    An SQLite database is maintained on disk using a B-tree implementation -found in the btree.c source file. A separate B-tree is used for -each table and index in the database. All B-trees are stored in the -same disk file. Details of the file format are recorded in a large -comment at the beginning of btree.c.

    - -

    The interface to the B-tree subsystem is defined by the header file -btree.h. -

    - -

    Page Cache

    - -

    The B-tree module requests information from the disk in fixed-size -chunks. The default chunk size is 1024 bytes but can vary between 512 -and 65536 bytes. -The page cache is responsible for reading, writing, and -caching these chunks. -The page cache also provides the rollback and atomic commit abstraction -and takes care of locking of the database file. The -B-tree driver requests particular pages from the page cache and notifies -the page cache when it wants to modify pages or commit or rollback -changes and the page cache handles all the messy details of making sure -the requests are handled quickly, safely, and efficiently.

    - -

    The code to implement the page cache is contained in the single C -source file pager.c. The interface to the page cache subsystem -is defined by the header file pager.h. -

    - -

    OS Interface

    - -

    -In order to provide portability between POSIX and Win32 operating systems, -SQLite uses an abstraction layer to interface with the operating system. -The interface to the OS abstraction layer is defined in -os.h. Each supported operating system has its own implementation: -os_unix.c for Unix, os_win.c for windows, and so forth. -Each of these operating-specific implements typically has its own -header file: os_unix.h, os_win.h, etc. -

    - -

    Utilities

    - -

    -Memory allocation and caseless string comparison routines are located -in util.c. -Symbol tables used by the parser are maintained by hash tables found -in hash.c. The utf.c source file contains Unicode -conversion subroutines. -SQLite has its own private implementation of printf() (with -some extensions) in printf.c and its own random number generator -in random.c. -

    - -

    Test Code

    - -

    -If you count regression test scripts, -more than half the total code base of SQLite is devoted to testing. -There are many assert() statements in the main code files. -In additional, the source files test1.c through test5.c -together with md5.c implement extensions used for testing -purposes only. The os_test.c backend interface is used to -simulate power failures to verify the crash-recovery mechanism in -the pager. -

    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/asyncvfs.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/asyncvfs.html --- sqlite3-3.4.2/www/asyncvfs.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/asyncvfs.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,243 @@ + + +An Asynchronous I/O Module For SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    An Asynchronous I/O Module For SQLite

    + +

    Normally, when SQLite writes to a database file, it waits until the write +operation is finished before returning control to the calling application. +Since writing to the file-system is usually very slow compared with CPU +bound operations, this can be a performance bottleneck. The asynchronous I/O +backend is an extension that causes SQLite to perform all write requests +using a separate thread running in the background. Although this does not +reduce the overall system resources (CPU, disk bandwidth etc.), it does +allow SQLite to return control to the caller quickly even when writing to +the database. + +

    1.0 FUNCTIONALITY

    + +

    With asynchronous I/O, write requests are handled by a separate thread +running in the background. This means that the thread that initiates +a database write does not have to wait for (sometimes slow) disk I/O +to occur. The write seems to happen very quickly, though in reality +it is happening at its usual slow pace in the background. + +

    Asynchronous I/O appears to give better responsiveness, but at a price. +You lose the Durable property. With the default I/O backend of SQLite, +once a write completes, you know that the information you wrote is +safely on disk. With the asynchronous I/O, this is not the case. If +your program crashes or if a power loss occurs after the database +write but before the asynchronous write thread has completed, then the +database change might never make it to disk and the next user of the +database might not see your change. + +

    You lose Durability with asynchronous I/O, but you still retain the +other parts of ACID: Atomic, Consistent, and Isolated. Many +appliations get along fine without the Durablity. + +

    1.1 How it Works

    + +

    Asynchronous I/O works by creating an SQLite VFS object +and registering it with sqlite3_vfs_register(). +When files opened via +this VFS are written to (using the vfs xWrite() method), the data is not +written directly to disk, but is placed in the "write-queue" to be +handled by the background thread. + +

    When files opened with the asynchronous VFS are read from +(using the vfs xRead() method), the data is read from the file on +disk and the write-queue, so that from the point of view of +the vfs reader the xWrite() appears to have already completed. + +

    The asynchronous I/O VFS is registered (and unregistered) by calls to the +API functions sqlite3async_initialize() and sqlite3async_shutdown(). +See section "Compilation and Usage" below for details. + +

    1.2 Limitations

    + +

    In order to gain experience with the main ideas surrounding asynchronous +IO, this implementation is deliberately kept simple. Additional +capabilities may be added in the future. + +

    For example, as currently implemented, if writes are happening at a +steady stream that exceeds the I/O capability of the background writer +thread, the queue of pending write operations will grow without bound. +If this goes on for long enough, the host system could run out of memory. +A more sophisticated module could to keep track of the quantity of +pending writes and stop accepting new write requests when the queue of +pending writes grows too large. + +

    1.3 Locking and Concurrency

    + +

    Multiple connections from within a single process that use this +implementation of asynchronous IO may access a single database +file concurrently. From the point of view of the user, if all +connections are from within a single process, there is no difference +between the concurrency offered by "normal" SQLite and SQLite +using the asynchronous backend. + +

    If file-locking is enabled (it is enabled by default), then connections +from multiple processes may also read and write the database file. +However concurrency is reduced as follows: + +

      +
    • When a connection using asynchronous IO begins a database + transaction, the database is locked immediately. However the + lock is not released until after all relevant operations + in the write-queue have been flushed to disk. This means + (for example) that the database may remain locked for some + time after a "COMMIT" or "ROLLBACK" is issued. + +

    • If an application using asynchronous IO executes transactions + in quick succession, other database users may be effectively + locked out of the database. This is because when a BEGIN + is executed, a database lock is established immediately. But + when the corresponding COMMIT or ROLLBACK occurs, the lock + is not released until the relevant part of the write-queue + has been flushed through. As a result, if a COMMIT is followed + by a BEGIN before the write-queue is flushed through, the database + is never unlocked,preventing other processes from accessing + the database. +

    + +

    File-locking may be disabled at runtime using the sqlite3async_control() +API (see below). This may improve performance when an NFS or other +network file-system, as the synchronous round-trips to the server be +required to establish file locks are avoided. However, if multiple +connections attempt to access the same database file when file-locking +is disabled, application crashes and database corruption is a likely +outcome. + + +

    2.0 COMPILATION AND USAGE

    + +

    +The asynchronous IO extension consists of a single file of C code +(sqlite3async.c), and a header file (sqlite3async.h), located in the + +ext/async/ subfolder of the SQLite source tree, that defines the +C API used by applications to activate and control the modules +functionality. + +

    +To use the asynchronous IO extension, compile sqlite3async.c as +part of the application that uses SQLite. Then use the APIs defined +in sqlite3async.h to initialize and configure the module. + +

    +The asynchronous IO VFS API is described in detail in comments in +sqlite3async.h. Using the API usually consists of the following steps: + +

      +
    1. Register the asynchronous IO VFS with SQLite by calling the + sqlite3async_initialize() function. + +

    2. Create a background thread to perform write operations and call + sqlite3async_run(). + +

    3. Use the normal SQLite API to read and write to databases via + the asynchronous IO VFS. +

    + +

    Refer to comments in the + +sqlite3async.h header file for details. + + +

    3.0 PORTING

    + +

    Currently the asynchronous IO extension is compatible with win32 systems +and systems that support the pthreads interface, including Mac OSX, Linux, +and other varieties of Unix. + +

    To port the asynchronous IO extension to another platform, the user must +implement mutex and condition variable primitives for the new platform. +Currently there is no externally available interface to allow this, but +modifying the code within sqlite3async.c to include the new platforms +concurrency primitives is relatively easy. Search within sqlite3async.c +for the comment string "PORTING FUNCTIONS" for details. Then implement +new versions of each of the following: + +

    +static void async_mutex_enter(int eMutex);
    +static void async_mutex_leave(int eMutex);
    +static void async_cond_wait(int eCond, int eMutex);
    +static void async_cond_signal(int eCond);
    +static void async_sched_yield(void);
    +
    + +

    The functionality required of each of the above functions is described +in comments in sqlite3async.c. +


    +This page last modified 2009/05/06 15:16:49 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/atomiccommit.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/atomiccommit.html --- sqlite3-3.4.2/www/atomiccommit.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/atomiccommit.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,1364 @@ + + +Atomic Commit In SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Atomic Commit In SQLite +

    + +

    1.0 Introduction

    + +

    An important feature of transactional databases like SQLite +is "atomic commit". +Atomic commit means that either all database changes within a single +transaction occur or none of them occur. With atomic commit, it +is as if many different writes to different sections of the database +file occur instantaneously and simultaneously. +Real hardware serializes writes to mass storage, and writing +a single sector takes a finite amount of time. +So it is impossible to truly write many different sectors of a +database file simultaneously and/or instantaneously. +But the atomic commit logic within +SQLite makes it appear as if the changes for a transaction +are all written instantaneously and simultaneously.

    + +

    SQLite has the important property that transactions appear +to be atomic even if the transaction is interrupted by an +operating system crash or power failure.

    + +

    This article describes the techniques used by SQLite to create the +illusion of atomic commit.

    + + +

    2.0 Hardware Assumptions

    + +

    Throughout this article, we will call the mass storage device "disk" +even though the mass storage device might really be flash memory.

    + +

    We assume that disk is written in chunks which we call a "sector". +It is not possible to modify any part of the disk smaller than a sector. +To change a part of the disk smaller than a sector, you have to read in +the full sector that contains the part you want to change, make the +change, then write back out the complete sector.

    + +

    On a traditional spinning disk, a sector is the minimum unit of transfer +in both directions, both reading and writing. On flash memory, however, +the minimum size of a read is typically much smaller than a minimum write. +SQLite is only concerned with the minimum write amount and so for the +purposes of this article, when we say "sector" we mean the minimum amount +of data that can be written to mass storage in a single go.

    + +

    + Prior to SQLite version 3.3.14, a sector size of 512 bytes was + assumed in all cases. There was a compile-time option to change + this but the code had never been tested with a larger value. The + 512 byte sector assumption seemed reasonable since until very recently + all disk drives used a 512 byte sector internally. However, there + has recently been a push to increase the sector size of disks to + 4096 bytes. Also the sector size + for flash memory is usually larger than 512 bytes. For these reasons, + versions of SQLite beginning with 3.3.14 have a method in the OS + interface layer that interrogates the underlying filesystem to find + the true sector size. As currently implemented (version 3.5.0) this + method still returns a hard-coded value of 512 bytes, since there + is no standard way of discovering the true sector size on either + Unix or Windows. But the method is available for embedded device + manufactures to tweak according to their own needs. And we have + left open the possibility of filling in a more meaningful implementation + on Unix and Windows in the future.

    + +

    SQLite does not assume that a sector write is atomic. +However, it does assume that a sector write is linear. By "linear" +we mean that SQLite assumes that when writing a sector, the hardware begins +at one end of the data and writes byte by byte until it gets to +the other end. The write might go from beginning to end or from +end to beginning. If a power failure occurs in the middle of a +sector write it might be that part of the sector was modified +and another part was left unchanged. The key assumption by SQLite +is that if any part of the sector gets changed, then either the +first or the last bytes will be changed. So the hardware will +never start writing a sector in the middle and work towards the +ends. We do not know if this assumption is always true but it +seems reasonable.

    + +

    The previous paragraph states that SQLite does not assume that +sector writes are atomic. This is true by default. But as of +SQLite version 3.5.0, there is a new interface called the +Virtual File System (VFS) interface. The VFS is the only means +by which SQLite communicates to the underlying filesystem. The +code comes with default VFS implementations for Unix and Windows +and there is a mechanism for creating new custom VFS implementations +at runtime. In this new VFS interface there is a method called +xDeviceCharacteristics. This method interrogates the underlying +filesystem to discover various properties and behaviors that the +filesystem may or may not exhibit. The xDeviceCharacteristics +method might indicate that sector writes are atomic, and if it does +so indicate, SQLite will try to take advantage of that fact. But +the default xDeviceCharacteristics method for both Unix and Windows +does not indicate atomic sector writes and so these optimizations +are normally omitted.

    + +

    SQLite assumes that the operating system will buffer writes and +that a write request will return before data has actually been stored +in the mass storage device. +SQLite further assumes that write operations will be reordered by +the operating system. +For this reason, SQLite does a "flush" or "fsync" operation at key +points. SQLite assumes that the flush or fsync will not return until +all pending write operations for the file that is being flushed have +completed. We are told that the flush and fsync primitives +are broken on some versions of Windows and Linux. This is unfortunate. +It opens SQLite up to the possibility of database corruption following +a power loss in the middle of a commit. However, there is nothing +that SQLite can do to test for or remedy the situation. SQLite +assumes that the operating system that it is running on works as +advertised. If that is not quite the case, well then hopefully you +will not lose power too often.

    + +

    SQLite assumes that when a file grows in length that the new +file space originally contains garbage and then later is filled in +with the data actually written. In other words, SQLite assumes that +the file size is updated before the file content. This is a +pessimistic assumption and SQLite has to do some extra work to make +sure that it does not cause database corruption if power is lost +between the time when the file size is increased and when the +new content is written. The xDeviceCharacteristics method of +the VFS might indicate that the filesystem will always write the +data before updating the file size. (This is the +SQLITE_IOCAP_SAFE_APPEND property for those readers who are looking +at the code.) When the xDeviceCharacteristics method indicates +that files content is written before the file size is increased, +SQLite can forego some of its pedantic database protection steps +and thereby decrease the amount of disk I/O needed to perform a +commit. The current implementation, however, makes no such assumptions +for the default VFSes for Windows and Unix.

    + +

    SQLite assumes that a file deletion is atomic from the +point of view of a user process. By this we mean that if SQLite +requests that a file be deleted and the power is lost during the +delete operation, once power is restored either the file will +exist completely with all if its original content unaltered, or +else the file will not be seen in the filesystem at all. If +after power is restored the file is only partially deleted, +if some of its data has been altered or erased, +or the file has been truncated but not completely removed, then +database corruption will likely result.

    + +

    SQLite assumes that the detection and/or correction of +bit errors caused by cosmic rays, thermal noise, quantum +fluctuations, device driver bugs, or other mechanisms, is the +responsibility of the underlying hardware and operating system. +SQLite does not add any redundancy to the database file for +the purpose of detecting corruption or I/O errors. +SQLite assumes that the data it reads is exactly the same data +that it previously wrote.

    + + +

    3.0 Single File Commit

    + +

    We begin with an overview of the steps SQLite takes in order to +perform an atomic commit of a transaction against a single database +file. The details of file formats used to guard against damage from +power failures and techniques for performing an atomic commit across +multiple databases are discussed in later sections.

    + +

    3.1 Initial State

    + + + +

    The state of the computer when a database connection is +first opened is shown conceptually by the diagram at the +right. +The area of the diagram on the extreme right (labeled "Disk") represents +information stored on the mass storage device. Each rectangle is +a sector. The blue color represents that the sectors contain +original data. +The middle area is the operating systems disk cache. At the +onset of our example, the cache is cold and this is represented +by leaving the rectangles of the disk cache empty. +The left area of the diagram shows the content of memory for +the process that is using SQLite. The database connection has +just been opened and no information has been read yet, so the +user space is empty. +

    +
    + +

    3.2 Acquiring A Read Lock

    + + + +

    Before SQLite can write to a database, it must first read +the database to see what is there already. Even if it is just +appending new data, SQLite still has to read in the database +schema from the sqlite_master table so that it can know +how to parse the INSERT statements and discover where in the +database file the new information should be stored.

    + +

    The first step toward reading from the database file +is obtaining a shared lock on the database file. A "shared" +lock allows two or more database connections to read from the +database file at the same time. But a shared lock prevents +another database connection from writing to the database file +while we are reading it. This is necessary because if another +database connection were writing to the database file at the +same time we are reading from the database file, we might read +some data before the change and other data after the change. +This would make it appears as if the change made by the other +process is not atomic.

    + +

    Notice that the shared lock is on the operating system +disk cache, not on the disk itself. File locks +really are just flags within the operating system kernel, +usually. (The details depend on the specific OS layer +interface.) Hence, the lock will instantly vanish if the +operating system crashes or if there is a power loss. It +is usually also the case that the lock will vanish if the +process that created the lock exits.

    + +
    + + +

    3.3 Reading Information Out Of The Database

    + + + +

    After the shared lock is acquired, we can begin reading +information from the database file. In this scenario, we +are assuming a cold cache, so information must first be +read from mass storage into the operating system cache then +transferred from operating system cache into user space. +On subsequent reads, some or all of the information might +already be found in the operating system cache and so only +the transfer to user space would be required.

    + +

    Usually only a subset of the pages in the database file +are read. In this example we are showing three +pages out of eight being read. In a typical application, a +database will have thousands of pages and a query will normally +only touch a small percentage of those pages.

    + +
    + +

    3.4 Obtaining A Reserved Lock

    + + + +

    Before making changes to the database, SQLite first +obtains a "reserved" lock on the database file. A reserved +lock is similar to a shared lock in that both a reserved lock +and shared lock allow other processes to read from the database +file. A single reserve lock can coexist with multiple shared +locks from other processes. However, there can only be a +single reserved lock on the database file. Hence only a +single process can be attempting to write to the database +at one time.

    + +

    The idea behind a reserved locks is that it signals that +a process intends to modify the database file in the near +future but has not yet started to make the modifications. +And because the modifications have not yet started, other +processes can continue to read from the database. However, +no other process should also begin trying to write to the +database.

    + +
    + +

    3.5 Creating A Rollback Journal File

    + + +

    Prior to making any changes to the database file, SQLite first +creates a separate rollback journal file and writes into the +rollback journal the original +content of the database pages that are to be altered. +The idea behind the rollback journal is that it contains +all information needed to restore the database back to +its original state.

    + +

    The rollback journal contains a small header (shown in green +in the diagram) that records the original size of the database +file. So if a change causes the database file to grow, we +will still know the original size of the database. The page +number is stored together with each database page that is +written into the rollback journal.

    + +

    + When a new file is created, most desktop operating systems + (Windows, Linux, Mac OS X) will not actually write anything to + disk. The new file is created in the operating systems disk + cache only. The file is not created on mass storage until sometime + later, when the operating system has a spare moment. This creates + the impression to users that I/O is happening much faster than + is possible when doing real disk I/O. We illustrate this idea in + the diagram to the right by showing that the new rollback journal + appears in the operating system disk cache only and not on the + disk itself.

    + +
    + +

    3.6 Changing Database Pages In User Space

    + + +

    After the original page content has been saved in the rollback +journal, the pages can be modified in user memory. Each database +connection has its own private copy of user space, so the changes +that are made in user space are only visible to the database connection +that is making the changes. Other database connections still see +the information in operating system disk cache buffers which have +not yet been changed. And so even though one process is busy +modifying the database, other processes can continue to read their +own copies of the original database content.

    + +
    + +

    3.7 Flushing The Rollback Journal File To Mass Storage

    + + +

    The next step is to flush the content of the rollback journal +file to nonvolatile storage. +As we will see later, +this is a critical step in insuring that the database can survive +an unexpected power loss. +This step also takes a lot of time, since writing to nonvolatile +storage is normally a slow operation.

    + +

    This step is usually more complicated than simply flushing +the rollback journal to the disk. On most platforms two separate +flush (or fsync()) operations are required. The first flush writes +out the base rollback journal content. Then the header of the +rollback journal is modified to show the number of pages in the +rollback journal. Then the header is flushed to disk. The details +on why we do this header modification and extra flush are provided +in a later section of this paper.

    + +
    + +

    3.8 Obtaining An Exclusive Lock

    + + +

    Prior to making changes to the database file itself, we must +obtain an exclusive lock on the database file. Obtaining an +exclusive lock is really a two-step process. First SQLite obtains +a "pending" lock. Then it escalates the pending lock to an +exclusive lock.

    + +

    A pending lock allows other processes that already have a +shared lock to continue reading the database file. But it +prevents new shared locks from being established. The idea +behind a pending lock is to prevent writer starvation caused +by a large pool of readers. There might be dozens, even hundreds, +of other processes trying to read the database file. Each process +acquires a shared lock before it starts reading, reads what it +needs, then releases the shared lock. If, however, there are +many different processes all reading from the same database, it +might happen that a new process always acquires its shared lock before +the previous process releases its shared lock. And so there is +never an instant when there are no shared locks on the database +file and hence there is never an opportunity for the writer to +seize the exclusive lock. A pending lock is designed to prevent +that cycle by allowing existing shared locks to proceed but +blocking new shared locks from being established. Eventually +all shared locks will clear and the pending lock will then be +able to escalate into an exclusive lock.

    + +
    + +

    3.9 Writing Changes To The Database File

    + + +

    Once an exclusive lock is held, we know that no other +processes are reading from the database file and it is +safe to write changes into the database file. Usually +those changes only go as far as the operating systems disk +cache and do not make it all the way to mass storage.

    + +
    + +

    3.10 Flushing Changes To Mass Storage

    + + +

    Another flush must occur to make sure that all the +database changes are written into nonvolatile storage. +This is a critical step to insure that the database will +survive a power loss without damage. However, because +of the inherent slowness of writing to disk or flash memory, +this step together with the rollback journal file flush in section +3.7 above takes up most the time required to complete a +transaction commit in SQLite.

    + +
    + +

    3.11 Deleting The Rollback Journal

    + + +

    After the database changes are all safely on the mass +storage device, the rollback journal file is deleted. +This is the instant where the transaction commits. +If a power failure or system crash occurs prior to this +point, then recovery processes to be described later make +it appears as if no changes were ever made to the database +file. If a power failure or system crash occurs after +the rollback journal is deleted, then it appears as if +all changes have been written to disk. Thus, SQLite gives +the appearance of having made no changes to the database +file or having made the complete set of changes to the +database file depending on whether or not the rollback +journal file exists.

    + +

    Deleting a file is not really an atomic operation, but +it appears to be from the point of view of a user process. +A process is always able to ask the operating system "does +this file exist?" and the process will get back a yes or no +answer. After a power failure that occurs during a +transaction commit, SQLite will ask the operating system +whether or not the rollback journal file exists. If the +answer is "yes" then the transaction is incomplete and is +rolled back. If the answer is "no" then it means the transaction +did commit.

    + +

    The existence of a transaction depends on whether or +not the rollback journal file exists and the deletion +of a file appears to be an atomic operation from the point of +view of a user-space process. Therefore, +a transaction appears to be an atomic operation.

    + +

    The act of deleting a file is expensive on many systems. +As an optimization, SQLite can be configured to truncate +the journal file to zero bytes in length +or overwrite the journal file header with zeros. In either +case, the resulting journal file is no longer capable of rolling +back and so the transaction still commits. Truncating a file +to zero length, like deleting a file, is assumed to be an atomic +operation from the point of view of a user process. Overwriting +the header of the journal with zeros is not atomic, but if any +part of the header is malformed the journal will not roll back. +Hence, one can say that the commit occurs as soon as the header +is sufficiently changed to make it invalid. Typically this happens +as soon as the first byte of the header is zeroed.

    + +
    + +

    3.12 Releasing The Lock

    + + +

    The last step in the commit process is to release the +exclusive lock so that other processes can once again +start accessing the database file.

    + +

    In the diagram at the right, we show that the information +that was held in user space is cleared when the lock is released. +This used to be literally true for older versions of SQLite. But +more recent versions of SQLite keep the user space information +in memory in case it might be needed again at the start of the +next transaction. It is cheaper to reuse information that is +already in local memory than to transfer the information back +from the operating system disk cache or to read it off of the +disk drive again. Prior to reusing the information in user space, +we must first reacquire the shared lock and then we have to check +to make sure that no other process modified the database file while +we were not holding a lock. There is a counter in the first page +of the database that is incremented every time the database file +is modified. We can find out if another process has modified the +database by checking that counter. If the database was modified, +then the user space cache must be cleared and reread. But it is +commonly the case that no changes have been made and the user +space cache can be reused for a significant performance savings.

    + +
    +

    4.0 Rollback

    + +

    An atomic commit is supposed to happen instantaneously. But the processing +described above clearly takes a finite amount of time. +Suppose the power to the computer were cut +part way through the commit operation described above. In order +to maintain the illusion that the changes were instantaneous, we +have to "rollback" any partial changes and restore the database to +the state it was in prior to the beginning of the transaction.

    + +

    4.1 When Something Goes Wrong...

    + + +

    Suppose the power loss occurred during step 3.10 above, +while the database changes were being written to disk. +After power is restored, the situation might be something +like what is shown to the right. We were trying to change +three pages of the database file but only one page was +successfully written. Another page was partially written +and a third page was not written at all.

    + +

    The rollback journal is complete and intact on disk when +the power is restored. This is a key point. The reason for +the flush operation in step 3.7 is to make absolutely sure that +all of the rollback journal is safely on nonvolatile storage +prior to making any changes to the database file itself.

    + +
    + +

    4.2 Hot Rollback Journals

    + + +

    The first time that any SQLite process attempts to access +the database file, it obtains a shared lock as described in +section 3.2 above. But then it notices that there is a +rollback journal file present. SQLite then checks to see if +the rollback journal is a "hot journal". A hot journal is +a rollback journal that needs to be played back in order to +restore the database to a sane state. A hot journal only +exists when an earlier process was in the middle of committing +a transaction when it crashed or lost power.

    + +

    A rollback journal is a "hot" journal if all of the following +are true:

    + +
      +
    • The rollback journal exist. +
    • The rollback journal is not an empty file. +
    • There is no reserved lock on the main database file. +
    • The header of the rollback journal is well-formed and in particular + has not been zeroed out. +
    • The rollback journal does not +contain the name of a master journal file (see +section 5.5 below) or if does +contain the name of a master journal, then that master journal +file exists. +
    + +

    The presence of a hot journal is our indication +that a previous process was trying to commit a transaction but +it aborted for some reason prior to the completion of the +commit. A hot journal means that +the database file is in an inconsistent state and needs to +be repaired (by rollback) prior to being used.

    + +
    +

    4.3 Obtaining An Exclusive Lock On The Database

    + + +

    The first step toward dealing with a hot journal is to +obtain an exclusive lock on the database file. This prevents two +or more processes from trying to rollback the same hot journal +at the same time.

    + +
    + +

    4.4 Rolling Back Incomplete Changes

    + + +

    Once a process obtains an exclusive lock, it is permitted +to write to the database file. It then proceeds to read the +original content of pages out of the rollback journal and write +that content back to were it came from in the database file. +Recall that the header of the rollback journal records the original +size of the database file prior to the start of the aborted +transaction. SQLite uses this information to truncate the +database file back to its original size in cases where the +incomplete transaction caused the database to grow. At the +end of this step, the database should be the same size and +contain the same information as it did before the start of +the aborted transaction.

    + +
    +

    4.5 Deleting The Hot Journal

    + + +

    After all information in the rollback journal has been +played back into the database file (and flushed to disk in case +we encounter yet another power failure), the hot rollback journal +can be deleted.

    + +

    As in section 3.11, the journal +file might be truncated to zero length or its header might +be overwritten with zeros as an optimization on systems where +deleting a file is expense. Either way, the journal is no +long hot after this step.

    + +
    +

    4.6 Continue As If The Uncompleted Writes Had Never Happened

    + + +

    The final recovery step is to reduce the exclusive lock back +to a shared lock. Once this happens, the database is back in the +state that it would have been if the aborted transaction had never +started. Since all of this recovery activity happens completely +automatically and transparently, it appears to the program using +SQLite as if the aborted transaction had never begun.

    + +
    +

    5.0 Multi-file Commit

    + +

    SQLite allows a single +database connection to talk to +two or more database files simultaneously through the use of +the ATTACH DATABASE command. +When multiple database files are modified within a single +transaction, all files are updated atomically. +In other words, either all of the database files are updated or +else none of them are. +Achieving an atomic commit across multiple database files is +more complex that doing so for a single file. This section +describes how SQLite works that bit of magic.

    + +

    5.1 Separate Rollback Journals For Each Database

    + + +

    When multiple database files are involved in a transaction, +each database has its own rollback journal and each database +is locked separately. The diagram at the right shows a scenario +where three different database files have been modified within +one transaction. The situation at this step is analogous to +the single-file transaction scenario at +step 3.6. Each database file has +a reserved lock. For each database, the original content of pages +that are being changed have been written into the rollback journal +for that database, but the content of the journals have not yet +been flushed to disk. No changes have been made to the database +file itself yet, though presumably there are changes being held +in user memory.

    + +

    For brevity, the diagrams in this section are simplified from +those that came before. Blue color still signifies original content +and pink still signifies new content. But the individual pages +in the rollback journal and the database file are not shown and +we are not making the distinction between information in the +operating system cache and information that is on disk. All of +these factors still apply in a multi-file commit scenario. They +just take up a lot of space in the diagrams and they do not add +any new information, so they are omitted here.

    + +
    +

    5.2 The Master Journal File

    + + +

    The next step in a multi-file commit is the creation of a +"master journal" file. The name of the master journal file is +the same name as the original database filename (the database +that was opened using the +sqlite3_open() interface, +not one of the ATTACHed auxiliary +databases) with the text "-mjHHHHHHHH" appended where +HHHHHHHH is a random 32-bit hexadecimal number. The +random HHHHHHHH suffix changes for every new master journal.

    + +

    (Nota bene: The formula for computing the master journal filename +given in the previous paragraph corresponds to the implementation as +of SQLite version 3.5.0. But this formula is not part of the SQLite +specification and is subject to change in future releases.)

    + +

    Unlike the rollback journals, the master journal does not contain +any original database page content. Instead, the master journal contains +the full pathnames for rollback journals for every database that is +participating in the transaction.

    + +

    After the master journal is constructed, its content is flushed +to disk before any further actions are taken. On Unix, the directory +that contains the master journal is also synced in order to make sure +the master journal file will appear in the directory following a +power failure.

    + +
    +

    5.3 Updating Rollback Journal Headers

    + + +

    The next step is to record the full pathname of the master journal file +in the header of every rollback journal. Space to hold the master +journal filename was reserved at the beginning of each rollback journal +as the rollback journals were created.

    + +

    The content of each rollback journal is flushed to disk both before +and after the master journal filename is written into the rollback +journal header. It is important to do both of these flushes. Fortunately, +the second flush is usually inexpensive since typically only a single +page of the journal file (the first page) has changed.

    + +

    This step is analogous to +step 3.7 in the single-file commit +scenario described above.

    + +
    +

    5.4 Updating The Database Files

    + + +

    Once all rollback journal files have been flushed to disk, it +is safe to begin updating database files. We have to obtain an +exclusive lock on all database files before writing the changes. +After all the changes are written, it is important to flush the +changes to disk so that they will be preserved in the event of +a power failure or operating system crash.

    + +

    This step corresponds to steps +3.8, +3.9, and +3.10 in the single-file commit +scenario described previously.

    + + +
    + +

    5.5 Delete The Master Journal File

    + + +

    The next step is to delete the master journal file. +This is the point where the multi-file transaction commits. +This step corresponds to +step 3.11 in the single-file +commit scenario where the rollback journal is deleted.

    + +

    If a power failure or operating system crash occurs at this +point, the transaction will not rollback when the system reboots +even though there are rollback journals present. The +difference is the master journal pathname in the header of the +rollback journal. Upon restart, SQLite only considers a journal +to be hot and will only playback the journal if there is no +master journal filename in the header (which is the case for +a single-file commit) or if the master journal file still +exists on disk.

    + +
    +

    5.6 Clean Up The Rollback Journals

    + + +

    The final step in a multi-file commit is to delete the +individual rollback journals and drop the exclusive locks on +the database files so that other processes can see the changes. +This corresponds to +step 3.12 in the single-file +commit sequence.

    + +

    The transaction has already committed at this point so timing +is not critical in the deletion of the rollback journals. +The current implementation deletes a single rollback journal +then unlocks the corresponding database file before proceeding +to the next rollback journal. But in the future we might change +this so that all rollback journals are deleted before any database +files are unlocked. As long as the rollback journal is deleted before +its corresponding database file is unlocked it does not matter in what +order the rollback journals are deleted or the database files are +unlocked.

    + +

    6.0 Additional Details Of The Commit Process

    + +

    Section 3.0 above provides an overview of +how atomic commit works in SQLite. But it glosses over a number of +important details. The following subsections will attempt to fill +in the gaps.

    + +

    6.1 Always Journal Complete Sectors

    + +

    When the original content of a database page is written into +the rollback journal (as shown in section 3.5), +SQLite always writes a complete sectors worth of data, even if the +page size of the database is smaller than the sector size. +Historically, the sector size in SQLite has been hard coded to 512 +bytes and since the minimum page size is also 512 bytes, this has never +been an issue. But beginning with SQLite version 3.3.14, it is possible +for SQLite to use mass storage devices with a sector size larger than 512 +bytes. So, beginning with version 3.3.14, whenever any page within a +sector is written into the journal file, all pages in that same sector +are stored with it.

    + +

    It is important to store all pages of a sector in the rollback +journal in order to prevent database corruption following a power +loss while writing the sector. Suppose that pages 1, 2, 3, and 4 are +all stored in sector 1 and that page 2 is modified. In order to write +the changes to page 2, the underlying hardware must also rewrite the +content of pages 1, 3, and 4 since the hardware must write the complete +sector. If this write operation is interrupted by a power outage, +one or more of the pages 1, 3, or 4 might be left with incorrect data. +Hence, to avoid lasting corruption to the database, the original content +of all of those pages must be contained in the rollback journal.

    + +

    6.2 Dealing With Garbage Written Into Journal Files

    + +

    When data is appended to the end of the rollback journal, +SQLite normally makes the pessimistic assumption that the file +is first extended with invalid "garbage" data and that afterwards +the correct data replaces the garbage. In other words, SQLite assumes +that the file size is increased first and then afterwards the content +is written into the file. If a power failure occurs after the file +size has been increased but before the file content has been written, +the rollback journal can be left containing garbage data. If after +power is restored, another SQLite process sees the rollback journal +containing the garbage data and tries to roll it back into the original +database file, it might copy some of the garbage into the database file +and thus corrupt the database file.

    + +

    SQLite uses two defenses against this problem. In the first place, +SQLite records the number of pages in the rollback journal in the header +of the rollback journal. This number is initially zero. So during an +attempt to rollback an incomplete (and possibly corrupt) rollback +journal, the process doing the rollback will see that the journal +contains zero pages and will thus make no changes to the database. Prior +to a commit, the rollback journal is flushed to disk to ensure that +all content has been synced to disk and there is no "garbage" left +in the file, and only then is the page count in the header changed from +zero to true number of pages in the rollback journal. The rollback journal +header is always kept in a separate sector from any page data so that +it can be overwritten and flushed without risking damage to a data +page if a power outage occurs. Notice that the rollback journal +is flushed to disk twice: once to write the page content and a second +time to write the page count in the header.

    + +

    The previous paragraph describes what happens when the +synchronous pragma setting is "full".

    + +
    +PRAGMA synchronous=FULL; +
    + +

    The default synchronous setting is full so the above is what usually +happens. However, if the synchronous setting is lowered to "normal", +SQLite only flushes the rollback journal once, after the page count has +been written. +This carries a risk of corruption because it might happen that the +modified (non-zero) page count reaches the disk surface before all +of the data does. The data will have been written first, but SQLite +assumes that the underlying filesystem can reorder write requests and +that the page count can be burned into oxide first even though its +write request occurred last. So as a second line of defense, SQLite +also uses a 32-bit checksum on every page of data in the rollback +journal. This checksum is evaluated for each page during rollback +while rolling back a journal as described in +section 4.4. If an incorrect checksum +is seen, the rollback is abandoned. Note that the checksum does +not guarantee that the page data is correct since there is a small +but finite probability that the checksum might be right even if the data is +corrupt. But the checksum does at least make such an error unlikely. +

    + +

    Note that the checksums in the rollback journal are not necessary +if the synchronous setting is FULL. We only depend on the checksums +when synchronous is lowered to NORMAL. Nevertheless, the checksums +never hurt and so they are included in the rollback journal regardless +of the synchronous setting.

    + +

    6.3 Cache Spill Prior To Commit

    + +

    The commit process shown in section 3.0 +assumes that all database changes fit in memory until it is time to +commit. This is the common case. But sometimes a larger change will +overflow the user-space cache prior to transaction commit. In those +cases, the cache must spill to the database before the transaction +is complete.

    + +

    At the beginning of a cache spill, the status of the database +connection is as shown in step 3.6. +Original page content has been saved in the rollback journal and +modifications of the pages exist in user memory. To spill the cache, +SQLite executes steps 3.7 through +3.9. In other words, the rollback journal +is flushed to disk, an exclusive lock is acquired, and changes are +written into the database. But the remaining steps are deferred +until the transaction really commits. A new journal header is +appended to the end of the rollback journal (in its own sector) +and the exclusive database lock is retained, but otherwise processing +returns to step 3.6. When the transaction +commits, or if another cache spill occurs, steps +3.7 and 3.9 are +repeated. (Step 3.8 is omitted on second +and subsequent passes since an exclusive database lock is already held +due to the first pass.)

    + +

    A cache spill causes the lock on the database file to +escalate from reserved to exclusive. This reduces concurrency. +A cache spill also causes extra disk flush or fsync operations to +occur and these operations are slow, hence a cache spill can +seriously reduce performance. +For these reasons a cache spill is avoided whenever possible.

    + +

    7.0 Optimizations

    + +

    Profiling indicates that for most systems and in most circumstances +SQLite spends most of its time doing disk I/O. It follows then that +anything we can do to reduce the amount of disk I/O will likely have a +large positive impact on the performance of SQLite. This section +describes some of the techniques used by SQLite to try to reduce the +amount of disk I/O to a minimum while still preserving atomic commit.

    + +

    7.1 Cache Retained Between Transactions

    + +

    Step 3.12 of the commit process shows +that once the shared lock has been released, all user-space cache +images of database content must be discarded. This is done because +without a shared lock, other processes are free to modify the database +file content and so any user-space image of that content might become +obsolete. Consequently, each new transaction would begin by rereading +data which had previously been read. This is not as bad as it sounds +at first since the data being read is still likely in the operating +systems file cache. So the "read" is really just a copy of data +from kernel space into user space. But even so, it still takes time.

    + +

    Beginning with SQLite version 3.3.14 a mechanism has been added +to try to reduce the needless rereading of data. In newer versions +of SQLite, the data in the user-space pager cache is retained when +the lock on the database file is released. Later, after the +shared lock is acquired at the beginning of the next transaction, +SQLite checks to see if any other process has modified the database +file. If the database has been changed in any way since the lock +was last released, the user-space cache is erased at that point. +But commonly the database file is unchanged and the user-space cache +can be retained, and some unnecessary read operations can be avoided.

    + +

    In order to determine whether or not the database file has changed, +SQLite uses a counter in the database header (in bytes 24 through 27) +which is incremented during every change operation. SQLite saves a copy +of this counter prior to releasing its database lock. Then after +acquiring the next database lock it compares the saved counter value +against the current counter value and erases the cache if the values +are different, or reuses the cache if they are the same.

    + + +

    7.2 Exclusive Access Mode

    + +

    SQLite version 3.3.14 adds the concept of "Exclusive Access Mode". +In exclusive access mode, SQLite retains the exclusive +database lock at the conclusion of each transaction. This prevents +other processes for accessing the database, but in many deployments +only a single process is using a database so this is not a +serious problem. The advantage of exclusive access mode is that +disk I/O can be reduced in three ways:

    + +
      +
    1. It is not necessary to increment the change counter in the +database header for transactions after the first transaction. This +will often save a write of page one to both the rollback +journal and the main database file.

    2. + +
    3. No other processes can change the database so there is never +a need to check the change counter and clear the user-space cache +at the beginning of a transaction.

    4. + +
    5. Each transaction can be committed by overwriting the rollback +journal header with zeros rather than deleting the journal file. +This avoids having to modify the directory entry for the journal file +and it avoids having to deallocate disk sectors associated with the +journal. Furthermore, the next transaction will overwrite existing +journal file content rather than append new content and on most systems +overwriting is much faster than appending.

    6. +
    + +

    The third optimization, zeroing the journal file header rather than +deleting the rollback journal file, +does not depend on holding an exclusive lock at all times. +This optimization can be set independently of exclusive lock mode +using the journal_mode +pragma as described in section 7.6 below.

    + +

    7.3 Do Not Journal Freelist Pages

    + +

    When information is deleted from an SQLite database, the pages used +to hold the deleted information are added to a "freelist". Subsequent +inserts will draw pages off of this freelist rather than expanding the +database file.

    + +

    Some freelist pages contain critical data; specifically the locations +of other freelist pages. But most freelist pages contain nothing useful. +These latter freelist pages are called "leaf" pages. We are free to +modify the content of a leaf freelist page in the database without +changing the meaning of the database in any way.

    + +

    Because the content of leaf freelist pages is unimportant, SQLite +avoids storing leaf freelist page content in the rollback journal +in step 3.5 of the commit process. +If a leaf freelist page is changed and that change does not get rolled back +during a transaction recovery, the database is not harmed by the omission. +Similarly, the content of a new freelist page is never written back +into the database at step 3.9 nor +read from the database at step 3.3. +These optimizations can greatly reduce the amount of I/O that occurs +when making changes to a database file that contains free space.

    + +

    7.4 Single Page Updates And Atomic Sector Writes

    + +

    Beginning in SQLite version 3.5.0, the new Virtual File System (VFS) +interface contains a method named xDeviceCharacteristics which reports +on special properties that the underlying mass storage device +might have. Among the special properties that +xDeviceCharacteristics might report is the ability of to do an +atomic sector write.

    + +

    Recall that by default SQLite assumes that sector writes are +linear but not atomic. A linear write starts at one end of the +sector and changes information byte by byte until it gets to the +other end of the sector. If a power loss occurs in the middle of +a linear write then part of the sector might be modified while the +other end is unchanged. In an atomic sector write, either the entire +sector is overwritten or else nothing in the sector is changed.

    + +

    We believe that most modern disk drives implement atomic sector +writes. When power is lost, the drive uses energy stored in capacitors +and/or the angular momentum of the disk platter to provide power to +complete any operation in progress. Nevertheless, there are so many +layers in between the write system call and the on-board disk drive +electronics that we take the safe approach in both Unix and w32 VFS +implementations and assume that sector writes are not atomic. On the +other hand, device +manufactures with more control over their filesystems might want +to consider enabling the atomic write property of xDeviceCharacteristics +if their hardware really does do atomic writes.

    + +

    When sector writes are atomic and the page size of a database is +the same as a sector size, and when there is a database change that +only touches a single database page, then SQLite skips the whole +journaling and syncing process and simply writes the modified page +directly into the database file. The change counter in the first +page of the database file is modified separately since no harm is +done if power is lost before the change counter can be updated.

    + +

    7.5 Filesystems With Safe Append Semantics

    + +

    Another optimization introduced in SQLite version 3.5.0 makes +use of "safe append" behavior of the underlying disk. +Recall that SQLite assumes that when data is appended to a file +(specifically to the rollback journal) that the size of the file +is increased first and that the content is written second. So +if power is lost after the file size is increased but before the +content is written, the file is left containing invalid "garbage" +data. The xDeviceCharacteristics method of the VFS might, however, +indicate that the filesystem implements "safe append" semantics. +This means that the content is written before the file size is +increased so that it is impossible for garbage to be introduced +into the rollback journal by a power loss or system crash.

    + +

    When safe append semantics are indicated for a filesystem, +SQLite always stores the special value of -1 for the page count +in the header of the rollback journal. The -1 page count value +tells any process attempting to rollback the journal that the +number of pages in the journal should be computed from the journal +size. This -1 value is never changed. So that when a commit +occurs, we save a single flush operation and a sector write of +the first page of the journal file. Furthermore, when a cache +spill occurs we no longer need to append a new journal header +to the end of the journal; we can simply continue appending +new pages to the end of the existing journal.

    + + +

    7.6 Persistent Rollback Journals

    + +

    Deleting a file is an expensive operation on many systems. +So as an optimization, SQLite can be configured to avoid the +delete operation of section 3.11. +Instead of deleting the journal file in order to commit a transaction, +the file is either truncated to zero bytes in length or its +header is overwritten with zeros. Truncating the file to zero +length saves having to make modifications to the directory containing +the file since the file is not removed from the directory. +Overwriting the header has the additional savings of not having +to update the length of the file (in the "inode" on many systems) +and not having to deal with newly freed disk sectors. Furthermore, +at the next transaction the journal will be created by overwriting +existing content rather than appending new content onto the end +of a file, and overwriting is often much faster than appending.

    + +

    SQLite can be configured to commit transactions by overwriting +the journal header with zeros instead of deleting the journal file +by setting the "PERSIST" journaling mode using the +journal_mode PRAGMA. +For example:

    + +
    +PRAGMA journal_mode=PERSIST;
    +
    + +

    The use of persistent journal mode provide a noticeable performance +improvement on many systems. Of course, the drawback is that the +journal files remain on the disk, using disk space and cluttering +directories, long after the transaction commits. The only safe way +to delete a persistent journal file is to commit a transaction +with journaling mode set to DELETE:

    + +
    +PRAGMA journal_mode=DELETE;
    +BEGIN EXCLUSIVE;
    +COMMIT;
    +
    + +

    Beware of deleting persistent journal files by any other means +since the journal file might be hot, in which case deleting it will +corrupt the corresponding database file.

    + +

    Beginning in SQLite version 3.6.4, the TRUNCATE journal mode is +also supported:

    + +
    +PRAGMA journal_mode=TRUNCATE;
    +
    + +

    In truncate journal mode, the transaction is committed by truncating +the journal file to zero length rather than deleting the journal file +(as in DELETE mode) or by zeroing the header (as in PERSIST mode). +TRUNCATE mode shares the advantage of PERSIST mode that the directory +that contains the journal file and database does not need to be updated. +Hence truncating a file is often faster than deleting it. TRUNCATE has +the additional advantage that it is not followed by a +system call (ex: fsync()) to synchronize the change to disk. It might +be safer if it did. +But on many modern filesystems, a truncate is an atomic and +synchronous operation and so we think that TRUNCATE will usually be safe +in the face of power failures. If you are uncertain about whether or +not TRUNCATE will be synchronous and atomic on your filesystem and it is +important to you that your database survive a power loss or operating +system crash that occurs during the truncation operation, then you might +consider using a different journaling mode.

    + +

    On embedded systems with synchronous filesystems, TRUNCATE results +in slower behavior than PERSIST. The commit operation is the same speed. +But subsequent transactions are slower following a TRUNCATE because it is +faster to overwrite existing content than to append to the end of a file. +New journal file entries will always be appended following a TRUNCATE but +will usually overwrite with PERSIST.

    + +

    8.0 Testing Atomic Commit Behavior

    + +

    The developers of SQLite are confident that it is robust +in the face of power failures and system crashes because the +automatic test procedures do extensive checks on +the ability of SQLite to recover from simulated power loss. +We call these the "crash tests".

    + +

    Crash tests in SQLite use a modified VFS that can simulate +the kinds of filesystem damage that occur during a power +loss or operating system crash. The crash-test VFS can simulate +incomplete sector writes, pages filled with garbage data because +a write has not completed, and out of order writes, all occurring +at varying points during a test scenario. Crash tests execute +transactions over and over, varying the time at which a simulated +power loss occurs and the properties of the damage inflicted. +Each test then reopens the database after the simulated crash and +verifies that the transaction either occurred completely +or not at all and that the database is in a completely +consistent state.

    + +

    The crash tests in SQLite have discovered a number of very +subtle bugs (now fixed) in the recovery mechanism. Some of +these bugs were very obscure and unlikely to have been found +using only code inspection and analysis techniques. From this +experience, the developers of SQLite feel confident that any other +database system that does not use a similar crash test system +likely contains undetected bugs that will lead to database +corruption following a system crash or power failure.

    + +

    9.0 Things That Can Go Wrong

    + +

    The atomic commit mechanism in SQLite has proven to be robust, +but it can be circumvented by a sufficiently creative +adversary or a sufficiently broken operating system implementation. +This section describes a few of the ways in which an SQLite database +might be corrupted by a power failure or system crash.

    + +

    9.1 Broken Locking Implementations

    + +

    SQLite uses filesystem locks to make sure that only one +process and database connection is trying to modify the database +at a time. The filesystem locking mechanism is implemented +in the VFS layer and is different for every operating system. +SQLite depends on this implementation being correct. If something +goes wrong and two or more processes are able to write the same +database file at the same time, severe damage can result.

    + +

    We have received reports of implementations of both +Windows network filesystems and NFS in which locking was +subtly broken. We can not verify these reports, but as +locking is difficult to get right on a network filesystem +we have no reason to doubt them. You are advised to +avoid using SQLite on a network filesystem in the first place, +since performance will be slow. But if you must use a +network filesystem to store SQLite database files, consider +using a secondary locking mechanism to prevent simultaneous +writes to the same database even if the native filesystem +locking mechanism malfunctions.

    + +

    The versions of SQLite that come preinstalled on Apple +Mac OS X computers contain a version of SQLite that has been +extended to use alternative locking strategies that work on +all network filesystems that Apple supports. These extensions +used by Apple work great as long as all processes are accessing +the database file in the same way. Unfortunately, the locking +mechanisms do not exclude one another, so if one process is +accessing a file using (for example) AFP locking and another +process (perhaps on a different machine) is using dot-file locks, +the two processes might collide because AFP locks do not exclude +dot-file locks or vice versa.

    + +

    9.2 Incomplete Disk Flushes

    + +

    SQLite uses the fsync() system call on Unix and the FlushFileBuffers() +system call on w32 in order to sync the file system buffers onto disk +oxide as shown in step 3.7 and +step 3.10. Unfortunately, we have received +reports that neither of these interfaces works as advertised on many +systems. We hear that FlushFileBuffers() can be completely disabled +using registry settings on some Windows versions. Some historical +versions of Linux contain versions of fsync() which are no-ops on +some filesystems, we are told. Even on systems where +FlushFileBuffers() and fsync() are said to be working, often +the IDE disk control lies and says that data has reached oxide +while it is still held only in the volatile control cache.

    + +

    On the Mac, you can set this pragma:

    + +
    +PRAGMA fullfsync=ON; +
    + +

    Setting fullfsync on a Mac will guarantee that data really does +get pushed out to the disk platter on a flush. But the implementation +of fullfsync involves resetting the disk controller. And so not only +is it profoundly slow, it also slows down other unrelated disk I/O. +So its use is not recommended.

    + +

    9.3 Partial File Deletions

    + +

    SQLite assumes that file deletion is an atomic operation from the +point of view of a user process. If power fails in the middle of +a file deletion, then after power is restored SQLite expects to see +either the entire file with all of its original data intact, or it +expects not to find the file at all. Transactions may not be atomic +on systems that do not work this way.

    + +

    9.4 Garbage Written Into Files

    + +

    SQLite database files are ordinary disk files that can be +opened and written by ordinary user processes. A rogue process +can open an SQLite database and fill it with corrupt data. +Corrupt data might also be introduced into an SQLite database +by bugs in the operating system or disk controller; especially +bugs triggered by a power failure. There is nothing SQLite can +do to defend against these kinds of problems.

    + +

    9.5 Deleting Or Renaming A Hot Journal

    + +

    If a crash or power loss does occur and a hot journal is left on +the disk, it is essential that the original database file and the hot +journal remain on disk with their original names until the database +file is opened by another SQLite process and rolled back. +During recovery at step 4.2 SQLite locates +the hot journal by looking for a file in the same directory as the +database being opened and whose name is derived from the name of the +file being opened. If either the original database file or the +hot journal have been moved or renamed, then the hot journal will +not be seen and the database will not be rolled back.

    + +

    We suspect that a common failure mode for SQLite recovery happens +like this: A power failure occurs. After power is restored, a well-meaning +user or system administrator begins looking around on the disk for +damage. They see their database file named "important.data". This file +is perhaps familiar to them. But after the crash, there is also a +hot journal named "important.data-journal". The user then deletes +the hot journal, thinking that they are helping to cleanup the system. +We know of no way to prevent this other than user education.

    + +

    If there are multiple (hard or symbolic) links to a database file, +the journal will be created using the name of the link through which +the file was opened. If a crash occurs and the database is opened again +using a different link, the hot journal will not be located and no +rollback will occur.

    + +

    Sometimes a power failure will cause a filesystem to be corrupted +such that recently changed filenames are forgotten and the file is +moved into a "/lost+found" directory. When that happens, the hot +journal will not be found and recovery will not occur. +SQLite tries to prevent this +by opening and syncing the directory containing the rollback journal +at the same time it syncs the journal file itself. However, the +movement of files into /lost+found can be caused by unrelated processes +creating unrelated files in the same directory as the main database file. +And since this is out from under the control of SQLite, there is nothing +that SQLite can do to prevent it. If you are running on a system that +is vulnerable to this kind of filesystem namespace corruption (most +modern journalling filesystems are immune, we believe) then you might +want to consider putting each SQLite database file in its own private +subdirectory.

    + +

    10.0 Future Directions And Conclusion

    + +

    Every now and then someone discovers a new failure mode for +the atomic commit mechanism in SQLite and the developers have to +put in a patch. This is happening less and less and the +failure modes are becoming more and more obscure. But it would +still be foolish to suppose that the atomic commit logic of +SQLite is entirely bug-free. The developers are committed to fixing +these bugs as quickly as they might be found.

    + +

    +The developers are also on the lookout for new ways to +optimize the commit mechanism. The current VFS implementations +for Unix (Linux and Mac OS X) and Windows make pessimistic assumptions about +the behavior of those systems. After consultation with experts +on how these systems work, we might be able to relax some of the +assumptions on these systems and allow them to run faster. In +particular, we suspect that most modern filesystems exhibit the +safe append property and that many of them might support atomic +sector writes. But until this is known for certain, SQLite will +take the conservative approach and assume the worst.

    +
    +This page last modified 2009/03/19 00:04:36 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/audit.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/audit.html --- sqlite3-3.4.2/www/audit.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/audit.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,265 @@ + + +SQLite Security Audit Procedure + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +A security audit for SQLite consists of two components. First, there is +a check for common errors that often lead to security problems. Second, +an attempt is made to construct a proof that SQLite has certain desirable +security properties. +

    + +

    Part I: Things to check

    + +

    +Scan all source code and check for the following common errors: +

    + +
      +
    1. +Verify that the destination buffer is large enough to hold its result +in every call to the following routines: +

        +
      • strcpy()
      • +
      • strncpy()
      • +
      • strcat()
      • +
      • memcpy()
      • +
      • memset()
      • +
      • memmove()
      • +
      • bcopy()
      • +
      • sprintf()
      • +
      • scanf()
      • +
      +

    2. +
    3. +Verify that pointers returned by subroutines are not NULL before using +the pointers. In particular, make sure the return values for the following +routines are checked before they are used: +

        +
      • malloc()
      • +
      • realloc()
      • +
      • sqliteMalloc()
      • +
      • sqliteRealloc()
      • +
      • sqliteStrDup()
      • +
      • sqliteStrNDup()
      • +
      • sqliteExpr()
      • +
      • sqliteExprFunction()
      • +
      • sqliteExprListAppend()
      • +
      • sqliteResultSetOfSelect()
      • +
      • sqliteIdListAppend()
      • +
      • sqliteSrcListAppend()
      • +
      • sqliteSelectNew()
      • +
      • sqliteTableNameToTable()
      • +
      • sqliteTableTokenToSrcList()
      • +
      • sqliteWhereBegin()
      • +
      • sqliteFindTable()
      • +
      • sqliteFindIndex()
      • +
      • sqliteTableNameFromToken()
      • +
      • sqliteGetVdbe()
      • +
      • sqlite_mprintf()
      • +
      • sqliteExprDup()
      • +
      • sqliteExprListDup()
      • +
      • sqliteSrcListDup()
      • +
      • sqliteIdListDup()
      • +
      • sqliteSelectDup()
      • +
      • sqliteFindFunction()
      • +
      • sqliteTriggerSelectStep()
      • +
      • sqliteTriggerInsertStep()
      • +
      • sqliteTriggerUpdateStep()
      • +
      • sqliteTriggerDeleteStep()
      • +
      +

    4. +
    5. +On all functions and procedures, verify that pointer parameters are not NULL +before dereferencing those parameters. +

    6. +
    7. +Check to make sure that temporary files are opened safely: that the process +will not overwrite an existing file when opening the temp file and that +another process is unable to substitute a file for the temp file being +opened. +

    8. +
    + + + +

    Part II: Things to prove

    + +

    +Prove that SQLite exhibits the characteristics outlined below: +

    + +
      +
    1. +The following are preconditions:

      +

        +
      • Z is an arbitrary-length NUL-terminated string.
      • +
      • An existing SQLite database has been opened. The return value + from the call to sqlite_open() is stored in the variable + db.
      • +
      • The database contains at least one table of the form: +
        +CREATE TABLE t1(a CLOB);
        +
      • +
      • There are no user-defined functions other than the standard + build-in functions.
      • +

      +

      The following statement of C code is executed:

      +
      +sqlite_exec_printf(
      +   db,
      +   "INSERT INTO t1(a) VALUES('%q');", 
      +   0, 0, 0, Z
      +);
      +
      +

      Prove the following are true for all possible values of string Z:

      +
        +
      1. +The call to sqlite_exec_printf() will +return in a length of time that is a polynomial in strlen(Z). +It might return an error code but it will not crash. +

      2. +
      3. +At most one new row will be inserted into table t1. +

      4. +
      5. +No preexisting rows of t1 will be deleted or modified. +

      6. +
      7. +No tables other than t1 will be altered in any way. +

      8. +
      9. +No preexisting files on the host computers filesystem, other than +the database file itself, will be deleted or modified. +

      10. +
      11. +For some constants K1 and K2, +if at least K1*strlen(Z) + K2 bytes of contiguous memory are +available to malloc(), then the call to sqlite_exec_printf() +will not return SQLITE_NOMEM. +

      12. +
      +

    2. + + +
    3. +The following are preconditions: +

        +
      • Z is an arbitrary-length NUL-terminated string.
      • +
      • An existing SQLite database has been opened. The return value + from the call to sqlite_open() is stored in the variable + db.
      • +
      • There exists a callback function cb() that appends all + information passed in through its parameters into a single + data buffer called Y.
      • +
      • There are no user-defined functions other than the standard + build-in functions.
      • +

      +

      The following statement of C code is executed:

      +
      +sqlite_exec(db, Z, cb, 0, 0);
      +
      +

      Prove the following are true for all possible values of string Z:

      +
        +
      1. +The call to sqlite_exec() will +return in a length of time which is a polynomial in strlen(Z). +It might return an error code but it will not crash. +

      2. +
      3. +After sqlite_exec() returns, the buffer Y will not contain +any content from any preexisting file on the host computers file system, +except for the database file. +

      4. +
      5. +After the call to sqlite_exec() returns, the database file will +still be well-formed. It might not contain the same data, but it will +still be a properly constructed SQLite database file. +

      6. +
      7. +No preexisting files on the host computers filesystem, other than +the database file itself, will be deleted or modified. +

      8. +
      9. +For some constants K1 and K2, +if at least K1*strlen(Z) + K2 bytes of contiguous memory are +available to malloc(), then the call to sqlite_exec() +will not return SQLITE_NOMEM. +

      10. +
      +

    4. + +
    +
    +This page last modified 2007/11/12 14:15:19 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/audit.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/audit.tcl --- sqlite3-3.4.2/www/audit.tcl 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/www/audit.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,214 +0,0 @@ -# -# Run this Tcl script to generate the audit.html file. -# -set rcsid {$Id: audit.tcl,v 1.1 2002/07/13 16:52:35 drh Exp $} - -puts { - - SQLite Security Audit Procedure - - -

    -SQLite Security Audit Procedure -

    } -puts "

    -(This page was last modified on [lrange $rcsid 3 4] UTC) -

    " - -puts { -

    -A security audit for SQLite consists of two components. First, there is -a check for common errors that often lead to security problems. Second, -an attempt is made to construct a proof that SQLite has certain desirable -security properties. -

    - -

    Part I: Things to check

    - -

    -Scan all source code and check for the following common errors: -

    - -
      -
    1. -Verify that the destination buffer is large enough to hold its result -in every call to the following routines: -

        -
      • strcpy()
      • -
      • strncpy()
      • -
      • strcat()
      • -
      • memcpy()
      • -
      • memset()
      • -
      • memmove()
      • -
      • bcopy()
      • -
      • sprintf()
      • -
      • scanf()
      • -
      -

    2. -
    3. -Verify that pointers returned by subroutines are not NULL before using -the pointers. In particular, make sure the return values for the following -routines are checked before they are used: -

        -
      • malloc()
      • -
      • realloc()
      • -
      • sqliteMalloc()
      • -
      • sqliteRealloc()
      • -
      • sqliteStrDup()
      • -
      • sqliteStrNDup()
      • -
      • sqliteExpr()
      • -
      • sqliteExprFunction()
      • -
      • sqliteExprListAppend()
      • -
      • sqliteResultSetOfSelect()
      • -
      • sqliteIdListAppend()
      • -
      • sqliteSrcListAppend()
      • -
      • sqliteSelectNew()
      • -
      • sqliteTableNameToTable()
      • -
      • sqliteTableTokenToSrcList()
      • -
      • sqliteWhereBegin()
      • -
      • sqliteFindTable()
      • -
      • sqliteFindIndex()
      • -
      • sqliteTableNameFromToken()
      • -
      • sqliteGetVdbe()
      • -
      • sqlite_mprintf()
      • -
      • sqliteExprDup()
      • -
      • sqliteExprListDup()
      • -
      • sqliteSrcListDup()
      • -
      • sqliteIdListDup()
      • -
      • sqliteSelectDup()
      • -
      • sqliteFindFunction()
      • -
      • sqliteTriggerSelectStep()
      • -
      • sqliteTriggerInsertStep()
      • -
      • sqliteTriggerUpdateStep()
      • -
      • sqliteTriggerDeleteStep()
      • -
      -

    4. -
    5. -On all functions and procedures, verify that pointer parameters are not NULL -before dereferencing those parameters. -

    6. -
    7. -Check to make sure that temporary files are opened safely: that the process -will not overwrite an existing file when opening the temp file and that -another process is unable to substitute a file for the temp file being -opened. -

    8. -
    - - - -

    Part II: Things to prove

    - -

    -Prove that SQLite exhibits the characteristics outlined below: -

    - -
      -
    1. -The following are preconditions:

      -

        -
      • Z is an arbitrary-length NUL-terminated string.
      • -
      • An existing SQLite database has been opened. The return value - from the call to sqlite_open() is stored in the variable - db.
      • -
      • The database contains at least one table of the form: -
        -CREATE TABLE t1(a CLOB);
        -
      • -
      • There are no user-defined functions other than the standard - build-in functions.
      • -

      -

      The following statement of C code is executed:

      -
      -sqlite_exec_printf(
      -   db,
      -   "INSERT INTO t1(a) VALUES('%q');", 
      -   0, 0, 0, Z
      -);
      -
      -

      Prove the following are true for all possible values of string Z:

      -
        -
      1. -The call to sqlite_exec_printf() will -return in a length of time that is a polynomial in strlen(Z). -It might return an error code but it will not crash. -

      2. -
      3. -At most one new row will be inserted into table t1. -

      4. -
      5. -No preexisting rows of t1 will be deleted or modified. -

      6. -
      7. -No tables other than t1 will be altered in any way. -

      8. -
      9. -No preexisting files on the host computers filesystem, other than -the database file itself, will be deleted or modified. -

      10. -
      11. -For some constants K1 and K2, -if at least K1*strlen(Z) + K2 bytes of contiguous memory are -available to malloc(), then the call to sqlite_exec_printf() -will not return SQLITE_NOMEM. -

      12. -
      -

    2. - - -
    3. -The following are preconditions: -

        -
      • Z is an arbitrary-length NUL-terminated string.
      • -
      • An existing SQLite database has been opened. The return value - from the call to sqlite_open() is stored in the variable - db.
      • -
      • There exists a callback function cb() that appends all - information passed in through its parameters into a single - data buffer called Y.
      • -
      • There are no user-defined functions other than the standard - build-in functions.
      • -

      -

      The following statement of C code is executed:

      -
      -sqlite_exec(db, Z, cb, 0, 0);
      -
      -

      Prove the following are true for all possible values of string Z:

      -
        -
      1. -The call to sqlite_exec() will -return in a length of time which is a polynomial in strlen(Z). -It might return an error code but it will not crash. -

      2. -
      3. -After sqlite_exec() returns, the buffer Y will not contain -any content from any preexisting file on the host computers file system, -except for the database file. -

      4. -
      5. -After the call to sqlite_exec() returns, the database file will -still be well-formed. It might not contain the same data, but it will -still be a properly constructed SQLite database file. -

      6. -
      7. -No preexisting files on the host computers filesystem, other than -the database file itself, will be deleted or modified. -

      8. -
      9. -For some constants K1 and K2, -if at least K1*strlen(Z) + K2 bytes of contiguous memory are -available to malloc(), then the call to sqlite_exec() -will not return SQLITE_NOMEM. -

      10. -
      -

    4. - -
    -} -puts { -


    -

    -Back to the SQLite Home Page -

    - -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/autoinc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/autoinc.html --- sqlite3-3.4.2/www/autoinc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/autoinc.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,177 @@ + + +SQLite Autoincrement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQLite Autoincrement

    + + +

    +In SQLite, every row of every table has an 64-bit signed integer ROWID. +The ROWID for each row is unique among all rows in the same table. +

    + +

    +You can access the ROWID of an SQLite table using one the special column +names ROWID, _ROWID_, or OID. +Except if you declare an ordinary table column to use one of those special +names, then the use of that name will refer to the declared column not +to the internal ROWID. +

    + +

    +If a table contains a column of type INTEGER PRIMARY KEY, then that +column becomes an alias for the ROWID. You can then access the ROWID +using any of four different names, the original three names described above +or the name given to the INTEGER PRIMARY KEY column. All these names are +aliases for one another and work equally well in any context. +

    + +

    +When a new row is inserted into an SQLite table, the ROWID can either +be specified as part of the INSERT statement or it can be assigned +automatically by the database engine. To specify a ROWID manually, +just include it in the list of values to be inserted. For example: +

    + +
    +CREATE TABLE test1(a INT, b TEXT);
    +INSERT INTO test1(rowid, a, b) VALUES(123, 5, 'hello');
    +
    + +

    +If no ROWID is specified on the insert, an appropriate ROWID is created +automatically. The usual algorithm is to give the newly created row +a ROWID that is one larger than the largest ROWID in the table prior +to the insert. If the table is initially empty, then a ROWID of 1 is +used. If the largest ROWID is equal to the largest possible integer +(9223372036854775807) then the database +engine starts picking candidate ROWIDs at random until it finds one +that is not previously used. +

    + +

    +The normal ROWID selection algorithm described above +will generate monotonically increasing +unique ROWIDs as long as you never use the maximum ROWID value and you never +delete the entry in the table with the largest ROWID. +If you ever delete rows or if you ever create a row with the maximum possible +ROWID, then ROWIDs from previously deleted rows might be reused when creating +new rows and newly created ROWIDs might not be in strictly accending order. +

    + + +

    The AUTOINCREMENT Keyword

    + +

    +If a column has the type INTEGER PRIMARY KEY AUTOINCREMENT then a slightly +different ROWID selection algorithm is used. +The ROWID chosen for the new row is at least one larger than the largest ROWID +that has ever before existed in that same table. If the table has never +before contained any data, then a ROWID of 1 is used. If the table +has previously held a row with the largest possible ROWID, then new INSERTs +are not allowed and any attempt to insert a new row will fail with an +SQLITE_FULL error. +

    + +

    +SQLite keeps track of the largest ROWID that a table has ever held using +the special SQLITE_SEQUENCE table. The SQLITE_SEQUENCE table is created +and initialized automatically whenever a normal table that contains an +AUTOINCREMENT column is created. The content of the SQLITE_SEQUENCE table +can be modified using ordinary UPDATE, INSERT, and DELETE statements. +But making modifications to this table will likely perturb the AUTOINCREMENT +key generation algorithm. Make sure you know what you are doing before +you undertake such changes. +

    + +

    +The behavior implemented by the AUTOINCREMENT keyword is subtly different +from the default behavior. With AUTOINCREMENT, rows with automatically +selected ROWIDs are guaranteed to have ROWIDs that have never been used +before by the same table in the same database. And the automatically generated +ROWIDs are guaranteed to be monotonically increasing. These are important +properties in certain applications. But if your application does not +need these properties, you should probably stay with the default behavior +since the use of AUTOINCREMENT requires additional work to be done +as each row is inserted and thus causes INSERTs to run a little slower. +

    + +

    Note that "monotonically increasing" does not imply that the ROWID always +increases by exactly one. One is the usual increment. However, if an +insert fails due to (for example) a uniqueness constraint, the ROWID of +the failed insertion attempt might not be reused on subsequent inserts, +resulting in gaps in the ROWID sequence. AUTOINCREMENT guarantees that +automatically chosen ROWIDs will be increasing but not that they will be +sequential.

    +
    +This page last modified 2008/12/15 21:18:01 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/autoinc.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/autoinc.tcl --- sqlite3-3.4.2/www/autoinc.tcl 2004-11-21 01:02:01.000000000 +0000 +++ sqlite3-3.6.16/www/autoinc.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -# -# Run this Tcl script to generate the autoinc.html file. -# -set rcsid {$Id: } -source common.tcl - -if {[llength $argv]>0} { - set outputdir [lindex $argv 0] -} else { - set outputdir "" -} - -header {SQLite Autoincrement} -puts { -

    SQLite Autoincrement

    - -

    -In SQLite, every row of every table has an integer ROWID. -The ROWID for each row is unique among all rows in the same table. -In SQLite version 2.8 the ROWID is a 32-bit signed integer. -Version 3.0 of SQLite expanded the ROWID to be a 64-bit signed integer. -

    - -

    -You can access the ROWID of an SQLite table using one the special column -names ROWID, _ROWID_, or OID. -Except if you declare an ordinary table column to use one of those special -names, then the use of that name will refer to the declared column not -to the internal ROWID. -

    - -

    -If a table contains a column of type INTEGER PRIMARY KEY, then that -column becomes an alias for the ROWID. You can then access the ROWID -using any of four different names, the original three names described above -or the name given to the INTEGER PRIMARY KEY column. All these names are -aliases for one another and work equally well in any context. -

    - -

    -When a new row is inserted into an SQLite table, the ROWID can either -be specified as part of the INSERT statement or it can be assigned -automatically by the database engine. To specify a ROWID manually, -just include it in the list of values to be inserted. For example: -

    - -
    -CREATE TABLE test1(a INT, b TEXT);
    -INSERT INTO test1(rowid, a, b) VALUES(123, 5, 'hello');
    -
    - -

    -If no ROWID is specified on the insert, an appropriate ROWID is created -automatically. The usual algorithm is to give the newly created row -a ROWID that is one larger than the largest ROWID in the table prior -to the insert. If the table is initially empty, then a ROWID of 1 is -used. If the largest ROWID is equal to the largest possible integer -(9223372036854775807 in SQLite version 3.0 and later) then the database -engine starts picking candidate ROWIDs at random until it finds one -that is not previously used. -

    - -

    -The normal ROWID selection algorithm described above -will generate monotonically increasing -unique ROWIDs as long as you never use the maximum ROWID value and you never -delete the entry in the table with the largest ROWID. -If you ever delete rows or if you ever create a row with the maximum possible -ROWID, then ROWIDs from previously deleted rows might be reused when creating -new rows and newly created ROWIDs might not be in strictly accending order. -

    - - -

    The AUTOINCREMENT Keyword

    - -

    -If a column has the type INTEGER PRIMARY KEY AUTOINCREMENT then a slightly -different ROWID selection algorithm is used. -The ROWID chosen for the new row is one larger than the largest ROWID -that has ever before existed in that same table. If the table has never -before contained any data, then a ROWID of 1 is used. If the table -has previously held a row with the largest possible ROWID, then new INSERTs -are not allowed and any attempt to insert a new row will fail with an -SQLITE_FULL error. -

    - -

    -SQLite keeps track of the largest ROWID that a table has ever held using -the special SQLITE_SEQUENCE table. The SQLITE_SEQUENCE table is created -and initialized automatically whenever a normal table that contains an -AUTOINCREMENT column is created. The content of the SQLITE_SEQUENCE table -can be modified using ordinary UPDATE, INSERT, and DELETE statements. -But making modifications to this table will likely perturb the AUTOINCREMENT -key generation algorithm. Make sure you know what you are doing before -you undertake such changes. -

    - -

    -The behavior implemented by the AUTOINCREMENT keyword is subtly different -from the default behavior. With AUTOINCREMENT, rows with automatically -selected ROWIDs are guaranteed to have ROWIDs that have never been used -before by the same table in the same database. And the automatically generated -ROWIDs are guaranteed to be monotonically increasing. These are important -properties in certain applications. But if your application does not -need these properties, you should probably stay with the default behavior -since the use of AUTOINCREMENT requires additional work to be done -as each row is inserted and thus causes INSERTs to run a little slower. -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/backup.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/backup.html --- sqlite3-3.4.2/www/backup.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/backup.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,430 @@ + + +SQLite Backup API + + + + + +

    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + +

    Using the SQLite Online Backup API

    + +

    + Historically, backups (copies) of SQLite databases have been created + using the following method: + +

      +
    1. Establish a shared lock on the database file using the SQLite API (i.e. + the shell tool). +
    2. Copy the database file using an external tool (for example the unix 'cp' + utility or the DOS 'copy' command). +
    3. Relinquish the shared lock on the database file obtained in step 1. +
    + +

    + This procedure works well in many scenarios and is usually very + fast. However, this technique has the following shortcomings: + +

      +
    • Any database clients wishing to write to the database file while a + backup is being created must wait until the shared lock is + relinquished. + +
    • It cannot be used to copy data to or from in-memory databases. + +
    • If a power failure or operating system failure occurs while copying + the database file the backup database may be corrupted following + system recovery. +
    + +

    + The Online Backup API was created to + address these concerns. The online backup API allows the contents of + one database to be copied into another database, overwriting the + original contents of the target database. The copy operation may be + done incrementally, in which case the source database does not need + to be locked for the duration of the copy, only for the brief periods + of time when it is actually being read from. This allows other database + users to continue uninterrupted while a backup of an online database is + made. + +

    + The online backup API is documented here. + The remainder of this page contains two C language examples illustrating + common uses of the API and discussions thereof. Reading these examples + is no substitute for reading the API documentation! + +

    Example 1: Loading and Saving In-Memory Databases

    + +
    +/*
    +** This function is used to load the contents of a database file on disk 
    +** into the "main" database of open database connection pInMemory, or
    +** to save the current contents of the database opened by pInMemory into
    +** a database file on disk. pInMemory is probably an in-memory database, 
    +** but this function will also work fine if it is not.
    +**
    +** Parameter zFilename points to a nul-terminated string containing the
    +** name of the database file on disk to load from or save to. If parameter
    +** isSave is non-zero, then the contents of the file zFilename are 
    +** overwritten with the contents of the database opened by pInMemory. If
    +** parameter isSave is zero, then the contents of the database opened by
    +** pInMemory are replaced by data loaded from the file zFilename.
    +**
    +** If the operation is successful, SQLITE_OK is returned. Otherwise, if
    +** an error occurs, an SQLite error code is returned.
    +*/
    +int loadOrSaveDb(sqlite3 *pInMemory, const char *zFilename, int isSave){
    +  int rc;                   /* Function return code */
    +  sqlite3 *pFile;           /* Database connection opened on zFilename */
    +  sqlite3_backup *pBackup;  /* Backup object used to copy data */
    +  sqlite3 *pTo;             /* Database to copy to (pFile or pInMemory) */
    +  sqlite3 *pFrom;           /* Database to copy from (pFile or pInMemory) */
    +
    +  /* Open the database file identified by zFilename. Exit early if this fails
    +  ** for any reason. */
    +  rc = sqlite3_open(zFilename, &pFile);
    +  if( rc==SQLITE_OK ){
    +
    +    /* If this is a 'load' operation (isSave==0), then data is copied
    +    ** from the database file just opened to database pInMemory. 
    +    ** Otherwise, if this is a 'save' operation (isSave==1), then data
    +    ** is copied from pInMemory to pFile.  Set the variables pFrom and
    +    ** pTo accordingly. */
    +    pFrom = (isSave ? pInMemory : pFile);
    +    pTo   = (isSave ? pFile     : pInMemory);
    +
    +    /* Set up the backup procedure to copy from the "main" database of 
    +    ** connection pFile to the main database of connection pInMemory.
    +    ** If something goes wrong, pBackup will be set to NULL and an error
    +    ** code and  message left in connection pTo.
    +    **
    +    ** If the backup object is successfully created, call backup_step()
    +    ** to copy data from pFile to pInMemory. Then call backup_finish()
    +    ** to release resources associated with the pBackup object.  If an
    +    ** error occured, then  an error code and message will be left in
    +    ** connection pTo. If no error occured, then the error code belonging
    +    ** to pTo is set to SQLITE_OK.
    +    */
    +    pBackup = sqlite3_backup_init(pTo, "main", pFrom, "main");
    +    if( pBackup ){
    +      (void)sqlite3_backup_step(pBackup, -1);
    +      (void)sqlite3_backup_finish(pBackup);
    +    }
    +    rc = sqlite3_errcode(pTo);
    +  }
    +
    +  /* Close the database connection opened on database file zFilename
    +  ** and return the result of this function. */
    +  (void)sqlite3_close(pFile);
    +  return rc;
    +}
    +
    + + +

    + The C function to the right demonstrates of one of the simplest, + and most common, uses of the backup API: loading and saving the contents + of an in-memory database to a file on disk. The backup API is used as + follows in this example: + +

      +
    1. Function sqlite3_backup_init() is called to create an sqlite3_backup + object to copy data between the two databases (either from a file and + into the in-memory database, or vice-versa). +
    2. Function sqlite3_backup_step() is called with a parameter of + -1 to copy the entire source database to the destination. +
    3. Function sqlite3_backup_finish() is called to clean up resources + allocated by sqlite3_backup_init(). +
    + +

    Error handling + +

    + If an error occurs in any of the the three main backup API routines + then the error code and message are attached to + the destination database connection. + Additionally, if + sqlite3_backup_step() encounters an error, then the error code is returned + by both the sqlite3_backup_step() call itself, and by the subsequent call + to sqlite3_backup_finish(). So a call to sqlite3_backup_finish() + does not overwrite an error code stored in the destination + database connection by sqlite3_backup_step(). This feature + is used in the example code to reduce amount of error handling required. + The return values of the sqlite3_backup_step() and sqlite3_backup_finish() + calls are ignored and the error code indicating the success or failure of + the copy operation collected from the destination database connection + afterward. + +

    Possible Enhancements + +

    + The implementation of this function could be enhanced in at least two ways: + +

      +
    1. Failing to obtain the lock on database file zFilename (an SQLITE_BUSY + error) could be handled, and +
    2. Cases where the page-sizes of database pInMemory and zFilename are + different could be handled better. +
    + +

    + Since database zFilename is a file on disk, then it may be accessed + externally by another process. This means that when the call to + sqlite3_backup_step() attempts to read from or write data to it, it may + fail to obtain the required file lock. If this happens, this implementation + will fail, returning SQLITE_BUSY immediately. The solution would be to + register a busy-handler callback or + timeout with database connection pFile + using sqlite3_busy_handler() or sqlite3_busy_timeout() + as soon as it is opened. If it fails to obtain a required lock immediately, + sqlite3_backup_step() uses any registered busy-handler callback or timeout + in the same way as sqlite3_step() or sqlite3_exec() does. + +

    + Usually, it does not matter if the page-sizes of the source database and the + destination database are different before the contents of the destination + are overwritten. The page-size of the destination database is simply changed + as part of the backup operation. The exception is if the destination database + happens to be an in-memory database. In this case, if the page sizes + are not the same at the start of the backup operation, then the operation + fails with an SQLITE_READONLY error. Unfortunately, this could occur when + loading a database image from a file into an in-memory database using + function loadOrSaveDb(). + +

    + However, if in-memory database pInMemory has just been opened (and is + therefore completely empty) before being passed to function loadOrSaveDb(), + then it is still possible to change its page size using an SQLite "PRAGMA + page_size" command. Function loadOrSaveDb() could detect this case, and + attempt to set the page-size of the in-memory database to the page-size + of database zFilename before invoking the online backup API functions. + +

    Example 2: Online Backup of a Running Database

    + +
    +/*
    +** Perform an online backup of database pDb to the database file named
    +** by zFilename. This function copies 5 database pages from pDb to
    +** zFilename, then unlocks pDb and sleeps for 250 ms, then repeats the
    +** process until the entire database is backed up.
    +** 
    +** The third argument passed to this function must be a pointer to a progress
    +** function. After each set of 5 pages is backed up, the progress function
    +** is invoked with two integer parameters: the number of pages left to
    +** copy, and the total number of pages in the source file. This information
    +** may be used, for example, to update a GUI progress bar.
    +**
    +** While this function is running, another thread may use the database pDb, or
    +** another process may access the underlying database file via a separate 
    +** connection.
    +**
    +** If the backup process is successfully completed, SQLITE_OK is returned.
    +** Otherwise, if an error occurs, an SQLite error code is returned.
    +*/
    +int backupDb(
    +  sqlite3 *pDb,               /* Database to back up */
    +  const char *zFilename,      /* Name of file to back up to */
    +  void(*xProgress)(int, int)  /* Progress function to invoke */     
    +){
    +  int rc;                     /* Function return code */
    +  sqlite3 *pFile;             /* Database connection opened on zFilename */
    +  sqlite3_backup *pBackup;    /* Backup handle used to copy data */
    +
    +  /* Open the database file identified by zFilename. */
    +  rc = sqlite3_open(zFilename, &pFile);
    +  if( rc==SQLITE_OK ){
    +
    +    /* Open the sqlite3_backup object used to accomplish the transfer */
    +    pBackup = sqlite3_backup_init(pFile, "main", pDb, "main");
    +    if( pBackup ){
    +
    +      /* Each iteration of this loop copies 5 database pages from database
    +      ** pDb to the backup database. If the return value of backup_step()
    +      ** indicates that there are still further pages to copy, sleep for
    +      ** 250 ms before repeating. */
    +      do {
    +        rc = sqlite3_backup_step(pBackup, 5);
    +        xProgress(
    +            sqlite3_backup_remaining(pBackup),
    +            sqlite3_backup_pagecount(pBackup)
    +        );
    +        if( rc==SQLITE_OK || rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){
    +          sqlite3_sleep(250);
    +        }
    +      } while( rc==SQLITE_OK || rc==SQLITE_BUSY || rc==SQLITE_LOCKED );
    +
    +      /* Release resources allocated by backup_init(). */
    +      (void)sqlite3_backup_finish(pBackup);
    +    }
    +    rc = sqlite3_errcode(pFile);
    +  }
    +  
    +  /* Close the database connection opened on database file zFilename
    +  ** and return the result of this function. */
    +  (void)sqlite3_close(pFile);
    +  return rc;
    +}
    +
    + + +

    + The function presented in the previous example copies the entire source + database in one call to sqlite3_backup_step(). This requires holding a + read-lock on the source database file for the duration of the operation, + preventing any other database user from writing to the database. It also + holds the mutex associated with database pInMemory throughout the copy, + preventing any other thread from using it. The C function in this section, + designed to be called by a background thread or process for creating a + backup of an online database, avoids these problems using the following + approach: + +

      +
    1. Function sqlite3_backup_init() is called to create an sqlite3_backup + object to copy data from database pDb to the backup database file + identified by zFilename. +
    2. Function sqlite3_backup_step() is called with a parameter of 5 to + copy 5 pages of database pDb to the backup database (file zFilename). +
    3. If there are still more pages to copy from database pDb, then the + function sleeps for 250 milliseconds (using the sqlite3_sleep() + utility) and then returns to step 2. +
    4. Function sqlite3_backup_finish() is called to clean up resources + allocated by sqlite3_backup_init(). +
    + +

    File and Database Connection Locking + +

    + During the 250 ms sleep in step 3 above, no read-lock is held on the database + file and the mutex associated with pDb is not held. This allows other threads + to use database connection pDb and other connections to write to the + underlying database file. + +

    + If another thread writes to database connection pDb while this function is + sleeping, then the backup database (database connection pFile) is + automatically updated along with pDb. The backup process is continued after + the xSleep() call returns as if nothing had happened. If the database file + underlying connection pDb is written to by a different process or thread + using a different database connection while this function is sleeping, then + SQLite detects this within the next call made to sqlite3_backup_step() and + restarts the backup from the beginning. Either way, + the backup database is kept up to date during the backup process so that + when the operation is complete the backup database contains a consistent + snapshot of the original. However: + +

      +
    • Accounting for writes to the database by an external process or thread + using a different database connection are significantly more expensive + than accounting for writes made using pDb. +
    • If the database is being written using a database connection other + than pDb sufficiently often while the backupDb() function is + running, it may never finish. +
    + +

    backup_remaining() and backup_pagecount() + +

    + The backupDb() function uses the sqlite3_backup_remaining() and + sqlite3_backup_pagecount() functions to report its progress via the + user-supplied xProgress() callback. Function sqlite3_backup_remaining() + returns the number of pages left to copy and sqlite3_backup_pagecount() + returns the total number of pages in the source database (in this case + the database opened by pDb). So the percentage completion of the process + may be calculated as: + +

    + Completion = 100% * (pagecount() - remaining()) / pagecount() + +

    + The sqlite3_backup_remaining() and sqlite3_backup_pagecount() APIs report + values stored by the previous call to sqlite3_backup_step(), they do not + actually inspect the source database file. This means that if the source + database is written to by another thread or process after the call to + sqlite3_backup_step() returns but before the values returned by + sqlite3_backup_remaining() and sqlite3_backup_pagecount() are used, the + values may be technically incorrect. This is not usually a problem. + + +

    +
    +This page last modified 2009/02/17 18:09:33 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/btreemodule.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/btreemodule.html --- sqlite3-3.4.2/www/btreemodule.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/btreemodule.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,2025 @@ + + +SQLite B-Tree Module + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + + + + + +
    SQLite B-Tree Module
    +
    Table Of Contents
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + +

    1 Document Overview

    + + +

    1.1 Scope and Purpose

    + + +

    + This document provides a description of the functionality of and public + interface offered by the SQLite b-tree module. It also, to a certain extent, + describes the algorithm and implementation techniques used internally by + the module. + +

      +
    • To make it easier to maintain, test and improve this critical + sub-system of the SQLite software library. + +

    • To facilitate development of compatible backend modules that can + be used with other SQLite sub-systems, either for experimental or + production purposes. +

    + +

    + It is important to note that, even given the second bullet point above, + the interfaces and sub-systems described in this document are not stable. + They may be changed in any way with each new SQLite release. Any + external software development that uses these interfaces must be prepared + to adapt to interface refactoring without notice. + +

    1.2 Document and Requirements Organization

    + + +

    + Change the following so that those requirements that describe the API + are "low-level" requirements. + + +
    Requirement ids Contents +
    H50**** Requirement statements specifying the functionality required of the B-Tree module. +
    H51**** Requirement statements specifying the API provided by the B-Tree module. +
    L****** Requirement statements specifying some details of the internal workings of the B-Tree module. +
    + +

    1.3 Glossary

    + + + +
    Balance-Siblings Algorithm + The balance-siblings algorithm is one of four algorithms that may be used + used to redistribute data within a b-tree structure after an insert or + delete operation that causes a b-tree node to become overfull or underfull. + See section 4.2.5.4 for details. + +
    B-Tree Cursor + Define this. + +
    B-Tree Database Connection + A B-Tree database connection is a single client connection to an in-memory + page cache, through which a single temporary or persistent database may + be accessed. This term is used throughout this document to avoid confusing + such connections with SQL level SQLite client connections, which are + sometime simply termed "database connections". + +
    Lazy-write cache + Define this. + +
    Overflow Cell + Define this. + +
    Page cache + Define this. + +
    Persistent database + Define this. + +
    Read-through cache + Define this. + +
    Shared-cache mode + Define this. + +
    SQLite Error Code + Define this. + +
    Temporary database + Define this. + +
    + + +

    2 Module Requirements

    + + +

    + The SQLite B-Tree module, the software module described by this document, + is designed to query and modify a database stored using the database image + format described in [1]. Database images may + exist only in volatile main-memory (in-memory databases), or may be stored + persistently within the file-system (also described in + [1]). Or, a database image may be stored primarily + in main-memory with the file-system used as secondary storage if the + database image grows too large. Database images stored only in main-memory, + and those stored primarily in main-memory with the file-system used only to + provide secondary storage space are known collectively as temporary + databases. Database images stored persistently in the file-system are termed + persistent databases. + +

    + This module implements an in-memory page cache to manage database image + content. The size of the pages managed by the cache are the same as the + page-size of the database image. When operating on a persistent database, + the cache operates as a read-through, lazy-write cache. When committing a + database transaction, the user explicitly directs the cache to flush all + dirty pages through to persistent storage. A single in-memory page cache + used to access the content of a persistent database may support multiple + logical client connections. Some brief explanation of what + this means. And maybe a pointer to the "Multi-User Database Requirements" + section. + +

    + When operating on a temporary database, there may only be one client for + each page cache. Depending on the SQLite configuration, either the database + or journal file, or both, may be omitted from the system. + + +

    + + +

    Figure 1 - Role of Page Cache +

    + + +

    + Figure 1 depicts... + +

    + Roughly what is encapsulated by the module. + + +

    2.1 Functional Requirements

    + + +

    + This section contains requirements describing the functionality required + from the B-Tree module. + +

    + Figure out where integrity-check goes. + +

    2.1.1 Opening and Closing Connections

    + + +

    + The B-Tree module provides an interface to open new b-tree database connections. + +

    The B-Tree module shall provide an interface to open a connection +to either a named persistent database file, or an anonymous temporary +database. (C: * H51001)

    +

    When opening a persistent database, the B-Tree module shall allow the user +to specify that the connection be opened for read-only access.

    +

    When opening a persistent database, the B-Tree module shall allow the user +to specify that the connection only be opened if the specified file exists.

    +

    If SQLite is configured to run in shared-cache mode, and a connection is opened +to a persistent database file for which there exists already a page-cache within +the current processes address space, then the connection opened shall be a +connection to the existing page-cache.

    +

    If a new B-Tree database connection is opened and requirement H50040 does not apply, +then a new page-cache shall be created within the processes address space. The +opened connection shall be a connection to the new page-cache.

    + +

    + The B-Tree module also provides an interface to close existing b-tree database + connections. + +

    The B-Tree module shall provide an interface to close a B-Tree database connection.

    +

    If a B-Tree database connection is closed and this causes the associated +page-cache to have zero connections to it, then the page-cache shall be closed +and all associated resources released.

    + +

    2.1.2 New Database Image Configuration

    + + +

    + The following requirements describe database configuration options that + are only applicable to new database images. For the purposes of the + following requirements, a "new database image" is defined as one that is + zero pages in size. + +

    The B-Tree module shall provide an interface to configure the page-size of a +new database image.

    +

    The B-Tree module shall provide an interface to configure whether or not a new +database image is auto-vacuum capable.

    + +

    2.1.3 Transaction and Savepoint Functions

    + + +

    + This needs a lot of work... + +

    + All read and write operations performed on a database image via the + B-Tree module interfaces occur within the context of a read or write + transaction. Something about the ACID nature of + transactions and how this applies to read and write transactions) + +

    The B-Tree module shall provide an interface to open (start) a read-only transaction.

    +

    The B-Tree module shall provide an interface to close (finish) a read-only transaction.

    + +

    + Read/write: + +

    The B-Tree module shall provide an interface to open a read/write transaction +or to upgrade from a read-only transaction to a read/write transaction.

    +

    The B-Tree module shall provide an interface to commit a read/write transaction.

    +

    The B-Tree module shall provide an interface to rollback a read/write transaction.

    + +

    + Multi-file transaction support. + +

    + Transaction state query: +

    The B-Tree module shall provide an interface to query a B-Tree database +connection to determine if there is an open transaction, and if so if the open +transaction is read-only or read/write.

    + +

    + Savepoints: + +

    + Define "savepoint transactions" and fix the following requirements. + +

    The B-Tree module shall provide an interface to open savepoint transactions.

    +

    The B-Tree module shall provide an interface to commit savepoint transactions.

    +

    The B-Tree module shall provide an interface to rollback savepoint transactions.

    + +

    2.1.4 Reading From the Database Image

    + + +

    + The B-Tree module allows the user to read a subset of the fields from the + database image header. Each such field is stored in the header as a 4-byte + unsigned big-endian integer. A complete description of each field and its + interpretation may be found in [1]. + +

    The B-Tree module shall provide an interface to read the value of any of the +4-byte unsigned big-endian integer fields beginning at byte offset 36 of the +database image. (C: H51015 H51016)

    + +

    + In other words, the database image header fields that may be read via + this module are: + +

      +
    • The number of free pages in the database image, +
    • The database image schema version (schema cookie). +
    • The database image schema layer file-format. +
    • The default page-cache size. +
    • The "auto-vacuum last root-page" field. +
    • The database image text-encoding field. +
    • The database image user-cookie value. +
    • The database image incremental-vacuum flag. +
    + +

    + With the exception of the database image header fields described above, + all data is read from the database image using B-Tree cursors. A B-Tree + cursor is a control structure for traversing the contents of a single + table or index b-tree structure within a database image. As well as + "forward" and "back" operations, a B-Tree cursor supports fast seeking to + a table entry identified by key value, or to the first or last entry in + the table. + +

    + When a B-Tree cursor is created, the specific table or index b-tree that + it is used to traverse is identified by the database image page number + of its root page. Since the root-page of the schema table is always page + 1, and the contents of the schema table includes the root page numbers of all + other index and table b-tree structures in the database image, it is + possible for the application to determine the set of valid root-page + numbers by first traversing the schema table. + +

    The B-Tree module shall provide an interface to open a B-Tree cursor on any table or +index b-tree within the database image, given its root page number.

    +

    The B-Tree module shall provide an interface to close a B-Tree cursor.

    +

    The B-Tree module shall provide an interface to move an open B-Tree cursor to +the entry associated with the largest key in the open b-tree structure.

    +

    The B-Tree module shall provide an interface to move an open B-Tree cursor to +the entry associated with the smallest key in the open b-tree structure.

    +

    The B-Tree module shall provide an interface to move an open B-Tree cursor that +currently points at a valid b-tree entry to the next entry in the b-tree +structure, sorted in order of key value, if any.

    +

    The B-Tree module shall provide an interface to move an open B-Tree cursor that +currently points at a valid b-tree entry to the previous entry in the b-tree +structure, sorted in order of key value, if any.

    +

    The B-Tree module shall provide an interface to retrieve the key value +associated with the b-tree structure entry that a B-Tree cursor is pointing to, +if any.

    +

    The B-Tree module shall provide an interface to retrieve the blob of data (the +database record) associated with the b-tree structure entry that a B-Tree +cursor open on a table b-tree is pointing to, if any.

    +

    The B-Tree module shall provide an interface to return the number of entries +currently stored in the b-tree structure that a B-Tree cursor is open on.

    + +

    + As well as traversing a b-tree structure using the operations enumerated + by the above requirements, it is also possible to use a cursor to search + a b-tree structure for a specified key value. If the key value can be + found, the cursor is left pointing at the entry with the specified key + value. Otherwise, the cursor is left pointing at either the entry with the + largest key that is smaller than the specified key, or to the entry with + the smallest key that is larger than the specified key. For table b-tree + structures, where the key values are 64-bit integers, the definition of + smaller, larger and equal to is straightforward. For index b-tree + structures, where the key values are database records, the manner in + which key values must be compared is more complicated. Refer to + [1] for a full explanation. + +

    + There is a specific section in [1] devoted to + record sort order in index b-tree structures. There needs to be some way to + point to it. Or, better, to the requirement or range of requirements. + +

    + Maybe a system that automatically links text like H30100 to the + corresponding requirement. Within a document if it can find it, or a + summary page (hlreq.html for example). + + +

    Given a key value, the B-Tree module shall provide an interface to move a +B-Tree cursor open on a b-tree structure to the B-Tree entry with the matching +key value, if such an entry exists.

    +

    If the interface required by H50119 is used to search for a key value that is +not present in the b-tree structure and the b-tree is not empty, the cursor shall +be moved to an existing entry that would be adjacent to a hypothetical +entry with the specified key value.

    +

    The interface required by H50119 shall provide an indication to the caller as +to whether the cursor is left pointing at an entry with a key value that is +smaller, larger or equal to the requested value, or if it is pointing to no +entry at all (because the b-tree structure is empty).

    + +

    + Does it depend on the structure of the tree whether the cursor is left + pointing to a smaller or larger entry after a failed search? Or is it + possible to determine which it will be based only on the set of keys + stored in the tree? + +

    + As well as the standard search operation described by the above + requirements, cursors open on index b-tree structures are required to + support several variants, as follows: + +

      +
    • Ignore rowid search mode. The final value in a database + record used as an index-btree key is always an integer "rowid" + field. A search in this mode proceeds as if each key in the b-tree + was missing this field. + +
    • Increment key mode. +
    • Prefix match mode. +
    • Prefix search mode. +
    + +

    + Finish the bullet points above and add HLR for each search mode. + +

    + More than one cursor can be open on a single b-tree structure at one time. + It is also possible for a write-cursor to modify the contents of a b-tree + structure while other cursors are open on it. The b-tree module does not + include any type of row-locking mechanism. It is possible for a write-cursor + to be used to delete an entry from a b-tree structure even if there are + one or more other cursors currently pointing to the entry being deleted. + +

    + Requirements to do with how the above is handled. Traceibility to + sqlite3BtreeCursorHasMoved is required. + +

    2.1.5 Writing to the Database Image

    + + +

    + The B-Tree module allows the user to write values to a subset of the + fields from the database image header. The set of writable fields is + the same as the set of fields enumerated in section + 2.1.4 that the B-Tree module is required to + provide read access to by requirement H50109. + +

    The B-Tree module shall provide an interface to write a value to any of the +4-byte unsigned big-endian integer fields beginning at byte offset 36 of the +database image.

    + +

    + The B-Tree module also supports operations to create new b-tree + structures within the database image. Existing b-tree structures may be + deleted from the database image entirely, or their entire contents may be + deleted, leaving an empty b-tree structure. + +

    The B-Tree module shall provide an interface to create a new index or table +b-tree structures within the database image. The interface shall automatically +assign a root-page to the new b-tree structure.

    +

    The B-Tree module shall provide an interface to remove an existing index or +table b-tree structure from the database image, given the root page number of +the b-tree to remove.

    +

    The B-Tree module shall provide an interface to remove all entries from (delete +the contents of) an index or table b-tree, given the root page number of the +b-tree to empty.

    + +

    + As one would expect, the B-Tree module also provides an interface to + insert and delete entries from b-tree structures. These operations are + performed using a B-Tree write cursor, a special type of B-Tree cursor + (see section 2.1.4). + +

    When opening a B-Tree cursor using the interface required by H50110, it shall +be possible to specify that the new cursor be a write cursor, or an ordinary +read-only cursor.

    +

    The B-Tree module shall provide an interface that allows the user to delete the +b-tree entry that a write cursor points to, if any. (C: L50013)

    +

    The B-Tree module shall provide an interface to insert new entries into a table +or index B-Tree, given a write cursor open on the table or index b-tree the new +entry is to be inserted into. (C: L50001 L50002 L50003 L50004 L50012)

    + +

    + Incremental vacuum step. + +

    2.1.6 Page-Cache Configuration Requirements

    + + +

    + A page-cache has a number of operational parameters that may be configured + at run-time via an open b-tree database connection. Note that even though the + interfaces provided by this module allow these parameters to be set via a + b-tree database connection, they are properties of the page-cache, not + the b-tree database connection. In situations where more than one b-tree + database connection is connected to a single page-cache, writes made via + one b-tree database connection may overwrite the values set by another. + The following table summarizes the available configuration parameters. + + +
    Parameter Description Requirements +
    Locking-mode + This! + H50138, H50139, H50140 +
    Journal-mode + This! + H50141, H50142, H50143, H50144, H50145, H50146 +
    Journal-file size limit + The journal-file size limit parameter may be set to any integer + value within the range of a 64-bit signed integer. Any negative + values is interpreted as "no limit". Otherwise, if the + journal-file size limit is set to zero or a positive number, it + represents an upper limit on the size of the journal file in + bytes. If the application executes a database write operation that + would normally cause the journal file to grow larger than this + configured limit, the operation fails and an error is returned + to the user. The default value of this parameter is -1 (no + limit). + H50147, H50148, H50149 +
    Database-file size limit + The database-image size limit parameter may be set to any integer + value greater than zero within the range of a 32-bit signed + integer. The configured value represents an upper limit on the size of + the database image in pages. If the application executes a + database write operation that would normally cause the database image to + grow larger than this configured limit, the operation fails and + an error is returned to the user. + H50150, H50151, H50152 +
    Cache size + The cache-size parameter may be set to any integer value. How it + affects operation depends on the specific P-Cache implementation used + by the page-cache. Refer to details for the + behaviour of the built-in default P-Cache. + H50153 +
    Safety level + The safety-level parameter may be set to "none", "normal" or "full". + Where will the effect of this defined/required? + H50154, H50155 +
    + +

    The B-Tree module shall provide an interface allowing the application to set +the locking-mode of a page-cache to either "normal" or "exclusive", given an +open b-tree database connection to that page-cache.

    +

    If the locking-mode of a page-cache is set to "normal" when a read/write +or read-only transaction is ended, any locks held on the database file-system +representation by the page-cache shall be relinquished.

    +

    If the locking-mode of a page-cache is set to "exclusive" when a read/write +or read-only transaction is ended, any locks held on the database file-system +representation by the page-cache shall be retained.

    + +

    + And if a read/write transaction is downgraded to a read-only transaction? + This scenario should also be dealt with in section 2.1.3. + +

    The B-Tree module shall provide an interface allowing the application to set +the journal-mode of a page-cache to one of "off", "memory", "delete", +"persist", or "truncate", given an open b-tree database connection to that +page-cache.

    +

    If the journal-mode of a page-cache is set to "off" when a read/write +transaction is opened, then the transaction shall use no journal file.

    +

    If the journal-mode of a page-cache is set to "memory" when a read/write +transaction is opened, then instead of using the journal file located in the +file-system, journal-file data shall be stored in main-memory.

    +

    If the journal-mode of a page-cache is set to "delete" when a read/write +transaction is opened, then any journal file used by the transaction shall +be deleted at the conclusion of the transaction.

    +

    If the journal-mode of a page-cache is set to "truncate" when a read/write +transaction is opened, then any journal file used by the transaction shall +be truncated to zero bytes in size at the conclusion of the transaction.

    +

    If the journal-mode of a page-cache is set to "persist" when a read/write +transaction is opened, then any journal file used by the transaction shall +remain in the file-system at the conclusion of the transaction.

    + +

    + The difference in functionality provided by "off", "memory" and the 3 + modes that use a real journal file should also feature in + 2.1.3. + +

    The B-Tree module shall provide an interface to set the value of the +journal-file size limit configuration parameter of a page-cache, given +an open b-tree database connection to that page-cache.

    +

    The default value assigned to the journal-file size limit configuration of a +page-cache shall be -1.

    +

    If the journal-file size limit parameter is set to a non-negative value, and +the user executes a write operation that would otherwise require the journal +file to be extended to a size greater than the configured value in bytes, then +the operation shall fail and an error be returned to the user.

    + +

    The B-Tree module shall provide an interface to set the value of the +database-image size limit configuration parameter of a page-cache, given +an open b-tree database connection to that page-cache.

    +

    The default value assigned to the database-image size limit configuration of a +page-cache shall be the value of the compile time symbol SQLITE_MAX_PAGE_COUNT +(1073741823 by default).

    +

    If the database-image size limit parameter is set to a non-negative value, and +the user executes a write operation that would otherwise require the journal +file to be extended to a size greater than the configured value in bytes, then +the operation shall fail and an error be returned to the user.

    + +

    The B-Tree module shall provide an interface to set the value of the +cache-size configuration parameter of a page-cache, given an open b-tree +database connection to that page-cache.

    + +

    + See section 2.2.1 for a description of and requirements + specifying how the value of the cache-size parameter affects the + operation of a page-cache. Check this reference is + relevant after it is written. Refer to a specific requirement if possible + too. + +

    The B-Tree module shall provide an interface allowing the application to set +the safety-level of a page-cache to one of "off", "normal" or "full", +given an open b-tree database connection to that page-cache.

    +

    The default value assigned to the safety-level configuration parameter of a +page-cache shall be "full".

    + +

    + Description of what the safety-level actually does. Or pointer to where a + description and requirements can be found (transactions section?). + +

    + Interface to set the codec function (encryption). + +

    + The busy-handler. Where exactly does this come in? Transactions and + savepoints section? + +

    + The six page-cache operational parameters listed above may also be + queried. The following requirements specify the required query + interfaces. + +

    The B-Tree module shall provide an interface to query the current locking-mode +of a page-cache, given an open b-tree database connection to that page-cache.

    +

    The B-Tree module shall provide an interface to query the current journal-mode +of a page-cache, given an open b-tree database connection to that page-cache.

    +

    The B-Tree module shall provide an interface to query the current journal file +size-limit of a page-cache, given an open b-tree database connection to that +page-cache.

    +

    The B-Tree module shall provide an interface to query the current database file +size-limit of a page-cache, given an open b-tree database connection to that +page-cache.

    +

    The B-Tree module shall provide an interface to query the current cache-size +of a page-cache, given an open b-tree database connection to that page-cache.

    +

    The B-Tree module shall provide an interface to query the current safety-level +of a page-cache, given an open b-tree database connection to that page-cache.

    + +

    + It is also possible to interrogate a b-tree database handle to determine + if it was opened on a temporary or persistent database. An b-tree + database handle opened on a persistent database may be queried for the + name of (full-path to) either the database or journal file associated + with the open database. + +

    The B-Tree module shall provide an interface to query an open b-tree database +handle to determine if the underlying database is a persistent database or a +temporary database.

    +

    The B-Tree module shall provide an interface allowing the application to query +a b-tree database connection open on a persistent database for the name of the +underlying database file within the file-system.

    +

    The B-Tree module shall provide an interface allowing the application to query +a b-tree database connection open on a persistent database for the name of the +underlying journal file within the file-system.

    + +

    2.1.7 Multi-User Database Requirements

    + +
      +
    • Lock on schema memory object. +
    • Locks on b-tree tables. +
    • "Unlock notify" feature. +
    • Mutexes/thread-safety features. +
    + +

    + The b-tree module preventing deadlock (by always grabbing mutexes in + order of BtShared pointer) should be required here. + +

    2.1.8 Backup/Vacuum API Requirements

    + +
      +
    • Callbacks for backup module. +
    • Page read/write APIs for backup module. +
    + +

    2.1.9 Integrity Check Requirements

    + +
      +
    • Callbacks for backup module. +
    • Page read/write APIs for backup module. +
    + +

    2.2 Other Requirements and Constraints

    + + +

    2.2.1 Caching and Memory Management Requirements

    + +
      +
    • Memory allocation related features (pcache, scratch memory, other...). +
    • Default pcache implementation (sqlite3_release_memory()). +
    • Schema memory object allocation (destructor registration). +
    + +

    2.2.2 Fault Tolerance Requirements

    + +
      +
    • Don't corrupt the database. Various modes and the expectations of them. +
    + +

    2.2.3 Well-Formedness Requirements

    + +
      +
    • Identify the subset of file-format well-formedness requirements that + this module is responsible for implementing. +
    • Define how the module should respond to corrupt database files: don't + crash, return SQLITE_CORRUPT as early as is practical. Should it also + put the b-tree into a permanent error state? +
    + + + + +

    3 Module API

    + + +

    + Description of the interface in btree.h. Also other interfaces accessed by + external modules. Including release_memory() and those pager interfaces that + are accessed directly by other modules. All of these requirements will be + descended/derived from requirements in the previous sections. Some of the + text could/should be pulled in from btree.h. + +

    + The name of sqlite3BtreeBeginStmt() should probably change to + sqlite3BtreeOpenSavepoint(). Matches the pager layer and is a more + accurate description of the function. + +

    + There are only a few places in which the pager object is used directly, + always to call some trivial get/set configuration function. These should + be replaced somehow with sqlite3BtreeXXX() APIs. Also, the current + approach is probably Ok, but worth looking it over for thread-safety + issues. + +

    + It would be easier to write up if the dependency between the B-Tree + layer and the sqlite3 structure did not exist. At present, it is used for: + +
    * The unlock-notify feature (arguments to sqlite3ConnectionBlocked() are database handles), +
    * Accessing the SQLITE_ReadUncommitted flag, +
    * Invoking the busy-handler callback, +
    * During sqlite3BtreeOpen(), to find the VFS to use, +
    * Accessing the SQLITE_SharedCache flag (for setting it), +
    * To check the same B-Tree is not attached more than once in shared-cache mode, +
    * To link the B-Tree into the pointer-order list of shared-cache b-trees used by the same handle (used for mutexes). +
    * To determine if an in-memory sub-journal should be used. +
    * To know how many savepoints are open in BtreeBeginTrans(). +
    * Many, many times to assert() that the db mutex is held when the b-tree layer is accessed.. + + +

    3.1 Opening and Closing Connections

    + + +

    + This section describes the API offered by the B-Tree module to other + SQLite sub-systems to open and close B-Tree database connections. + +

    typedef struct Btree Btree;
    + +

    + A B-Tree database connection is accessed by other SQLite sub-systems + using an opaque handle, modelled in C code using the type "Btree*". + +

    3.1.1 sqlite3BtreeOpen

    + + +
    int sqlite3BtreeOpen(
    +  const char *zFilename,   /* Name of database file to open */
    +  sqlite3 *db,             /* Associated database connection */
    +  Btree **ppBtree,         /* Return open Btree* here */
    +  int flags,               /* Flags */
    +  int vfsFlags             /* Flags passed through to VFS open */
    +);
    + +

    If successful, a call to the sqlite3BtreeOpen function shall return SQLITE_OK +and set the value of *ppBtree to contain a new B-Tree database connection +handle. (P: H50010)

    +

    If unsuccessful, a call to the sqlite3BtreeOpen function shall return an SQLite +error code other than SQLITE_OK indicating the reason for the failure. The +value of *ppBtree shall not be modified in this case.

    + +

    If the zFilename parameter to a call to sqlite3BtreeOpen is NULL or a pointer +to a buffer of which the first byte is a nul (0x00), then sqlite3BtreeOpen +shall attempt to open a connection to a temporary database.

    +

    If the zFilename parameter to a call to sqlite3BtreeOpen is a pointer to a +buffer containing a nul-terminated UTF-8 encoded string, sqlite3BtreeOpen shall +attempt to open a connection to a persistent database.

    + +

    + The combination of the above two requirements implies that if the + zFilename argument passed to sqlite3BtreeOpen is other than a NULL + pointer or a pointer to a nul-terminated string, the type of or + filename of the database that sqlite3BtreeOpen attempts to open a + connection to are undefined. + +

    + Valid values for the flags argument to the sqlite3BtreeOpen + function consist of the bitwise OR of zero or more of the following + symbols. + +

    #define BTREE_OMIT_JOURNAL  1  /* Do not use journal.  No argument */
    +#define BTREE_NO_READLOCK   2  /* Omit readlocks on readonly files */
    + +

    If the BTREE_OMIT_JOURNAL bit is set in the flags parameter passed to a +successful call to sqlite3BtreeOpen to open a temporary database, then the +page-cache created as a result shall not open or use a journal file for any +purpose.

    + +

    + When opening a connection to a persistent database, the value of the + BTREE_OMIT_JOURNAL bit in the flags parameter is ignored by + sqlite3BtreeOpen. + +

    If the BTREE_NO_READLOCK bit is set in the flags parameter passed to a +successful call to sqlite3BtreeOpen to open a persistent database and a +new page-cache is created as a result of the call, then the new page-cache +shall only lock the database file-system representation when writing to +it.

    + +

    + When opening a connection to a temporary database, the value of the + BTREE_NO_READLOCK bit in the flags parameter is ignored, as temporary + databases are never locked for either reading or writing + (reference to some requirement for this statement.). + Whether or not a new page-cache is created when a connection to a + persistent database is opened is governed by requirements H50040 and + H50050. + +

    If the sqlite3BtreeOpen function is called to open a connection to a persistent +database, and the call causes a new page-cache to be created, when opening the +database file using the VFS interface xOpen method the 4th parameter passed to +xOpen (flags) shall be a copy of the vfsFlags value passed to sqlite3BtreeOpen.

    +

    If the sqlite3BtreeOpen function is called to open a connection to a temporary +database, if and when a temporary file is opened to use as secondary storage +using the VFS interface xOpen method the 4th parameter passed to xOpen (flags) +shall be a copy of the vfsFlags value passed to sqlite3BtreeOpen with the +SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_EXCLUSIVE and +SQLITE_OPEN_DELETEONCLOSE bits also set.

    + +

    + Requirements explaining how the db parameter to sqlite3BtreeOpen is used. Must be there for something. + +

    3.1.2 sqlite3BtreeClose

    + + +
    int sqlite3BtreeClose(Btree*);
    + +

    A call to the sqlite3BtreeClose function with a valid b-tree database +connection handle passed as the only argument shall invalidate the handle, +close the b-tree database connection and release all associated resources.

    + +

    + If a call to sqlite3BtreeClose is made with a value that is not a valid + b-tree database connection handle passed as the only argument, the + results are undefined. + +

    If a call to sqlite3BtreeClose is made to close a b-tree database connection +while there exist open B-Tree cursors that were opened using the specified +b-tree database connection, they shall be closed automatically from within +sqlite3BtreeClose, just as if their handles were passed to +sqlite3BtreeCloseCursor.

    + +

    + See also requirement H50070. + + +

    3.2 Database Image Configuration

    + + +

    + This category doesn't work all that well. These APIs are used for other + things too (i.e. switching to incremental-vacuum mode). + +

    #define BTREE_AUTOVACUUM_NONE 0        /* Do not do auto-vacuum */
    +#define BTREE_AUTOVACUUM_FULL 1        /* Do full auto-vacuum */
    +#define BTREE_AUTOVACUUM_INCR 2        /* Incremental vacuum */
    +
    int sqlite3BtreeSetAutoVacuum(Btree *, int);
    +int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix);
    + +

    + Queries: + +

    int sqlite3BtreeGetPageSize(Btree*);
    +
    int sqlite3BtreeGetReserve(Btree*);
    +
    int sqlite3BtreeGetAutoVacuum(Btree *);
    + +

    3.3 Connection Configuration

    + + +
    int sqlite3BtreeSetCacheSize(Btree*,int);
    +int sqlite3BtreeSetSafetyLevel(Btree*,int,int);
    +int sqlite3BtreeMaxPageCount(Btree*,int);
    + +

    + And functions to query the current configuration: + +

    int sqlite3BtreeSyncDisabled(Btree*);
    + +

    3.4 Query Interfaces

    + + +
    const char *sqlite3BtreeGetFilename(Btree *);
    + +

    A call to the sqlite3BtreeGetFilename function with a valid B-Tree database +connection handle opened on a persistent database as the first argument shall +return a pointer to a buffer containing the full-path of the database file +formatted as a nul-terminated, UTF-8 string.

    +

    A call to the sqlite3BtreeGetFilename function with a valid B-Tree database +connection handle opened on a temporary database as the first argument shall +return a pointer to a buffer to a nul-terminated string zero bytes in length +(i.e. the first byte of the buffer shall be 0x00).

    + +
    const char *sqlite3BtreeGetJournalname(Btree *);
    + +

    A call to the sqlite3BtreeGetJournalname function with a valid B-Tree database +connection handle opened on a persistent database as the first argument shall +return a pointer to a buffer containing the full-path of the journal file +formatted as a nul-terminated, UTF-8 string.

    +

    A call to the sqlite3BtreeGetJournalname function with a valid B-Tree database +connection handle opened on a temporary database as the first argument shall +return a pointer to a buffer to a nul-terminated string zero bytes in length +(i.e. the first byte of the buffer shall be 0x00).

    + +

    + Requirement H51013 holds true even if the B-Tree database connection is + configured to use an in-memory journal file or no journal file at all + (ref requirements). In these cases the buffer returned + contains the full-path of the journal file that would be used if the + connection were configured to use a journal file. + +

    3.5 Mutex Functions

    + + +
    typedef struct BtreeMutexArray BtreeMutexArray;
    +struct BtreeMutexArray {
    +  int nMutex;
    +  Btree *aBtree[SQLITE_MAX_ATTACHED+1];
    +};
    + +
    void sqlite3BtreeEnter(Btree*);
    +void sqlite3BtreeEnterAll(sqlite3*);
    +void sqlite3BtreeLeave(Btree*);
    +void sqlite3BtreeEnterCursor(BtCursor*);
    +void sqlite3BtreeLeaveCursor(BtCursor*);
    +void sqlite3BtreeLeaveAll(sqlite3*);
    +void sqlite3BtreeMutexArrayEnter(BtreeMutexArray*);
    +void sqlite3BtreeMutexArrayLeave(BtreeMutexArray*);
    +void sqlite3BtreeMutexArrayInsert(BtreeMutexArray*, Btree*);
    + +

    3.6 Transaction and Savepoint API

    + + +
    int sqlite3BtreeBeginTrans(Btree*,int);
    +int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster);
    +int sqlite3BtreeCommitPhaseTwo(Btree*);
    +int sqlite3BtreeCommit(Btree*);
    +int sqlite3BtreeRollback(Btree*);
    + +
    int sqlite3BtreeBeginStmt(Btree*,int);
    +int sqlite3BtreeSavepoint(Btree *, int, int);
    + +
    int sqlite3BtreeIsInTrans(Btree*);
    +int sqlite3BtreeIsInReadTrans(Btree*);
    +int sqlite3BtreeIsInBackup(Btree*);
    + + +

    3.7 Reading and Traversing the Database Image

    + + +

    + sqlite3BtreeMoveto is never called from outside of the b-tree layer. It + could/should be removed from the API. + +

    + The "bias" argument to sqlite3BtreeMovetoUnpacked is only ever true + when it is called from within sqlite3BtreeInsert. This argument could/should + also be removed from the API, if only to make it simpler to describe. + +

    typedef struct BtCursor BtCursor;
    + +
    int sqlite3BtreeCursor(
    +  Btree*,                              /* BTree containing table to open */
    +  int iTable,                          /* Index of root page */
    +  int wrFlag,                          /* 1 for writing.  0 for read-only */
    +  struct KeyInfo*,                     /* First argument to compare function */
    +  BtCursor *pCursor                    /* Space to write cursor structure */
    +);
    +int sqlite3BtreeCursorSize(void);
    +
    int sqlite3BtreeCloseCursor(BtCursor*);
    +void sqlite3BtreeClearCursor(BtCursor *);
    + +
    int sqlite3BtreeFirst(BtCursor*, int *pRes);
    +int sqlite3BtreeLast(BtCursor*, int *pRes);
    +int sqlite3BtreeNext(BtCursor*, int *pRes);
    +int sqlite3BtreePrevious(BtCursor*, int *pRes);
    +int sqlite3BtreeEof(BtCursor*);
    + +
    int sqlite3BtreeKeySize(BtCursor*, i64 *pSize);
    +int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*);
    +const void *sqlite3BtreeKeyFetch(BtCursor*, int *pAmt);
    +const void *sqlite3BtreeDataFetch(BtCursor*, int *pAmt);
    +int sqlite3BtreeDataSize(BtCursor*, u32 *pSize);
    +int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*);
    + +
    int sqlite3BtreeCount(BtCursor *, i64 *);
    + +

    3.7.1 sqlite3BtreeMovetoUnpacked

    + + +

    + The sqlite3BtreeMovetoUnpacked function is used to + +

    int sqlite3BtreeMovetoUnpacked(
    +  BtCursor*,
    +  UnpackedRecord *pUnKey,
    +  i64 intKey,
    +  int bias,
    +  int *pRes
    +);
    + +

    + The following requirements specify exactly how a b-tree cursor is to be moved + by a successful call to sqlite3BtreeMovetoUnpacked. + +

    If a call is made to sqlite3BtreeMovetoUnpacked specifying a key value for +which there exists an entry with a matching key value in the b-tree structure, +the b-tree cursor shall be moved to point to this entry. In this case *pRes +(the value of the "int" variable pointed to by the pointer passed as the +fifth parameter to sqlite3BtreeMovetoUnpacked) shall be set to 0 before +returning.

    +

    If a call is made to sqlite3BtreeMovetoUnpacked specifying a key value for +which there does not exist an entry with a matching key value in the b-tree +structure, the b-tree cursor shall be moved to point to an entry located +on the leaf page that would contain the requested entry, were it present.

    +

    If the condition specified in L50009 is met and the b-tree structure +contains one or more entries (is not empty), the b-tree cursor shall be left +pointing to an entry that would lie adjacent (immediately before or after in +order by key) to the requested entry on the leaf page, were it present.

    +

    If the condition specified in L50009 is met and the b-tree cursor is left +pointing to an entry with a smaller key than that requested, or the cursor +is left pointing a no entry at all because the b-tree structure is completely +empty, *pRes (the value of the "int" variable pointed to by the pointer passed +as the fifth parameter to sqlite3BtreeMovetoUnpacked) shall be set to -1. +Otherwise, if the b-tree cursor is left pointing to an entry with a larger key +than that requested, *pRes shall be set to 1.

    + + +

    + Not clear how to deal with these. Define an external module to + encapsulate these and define sorting order etc.? That's tricky as things are + because the UnpackedRecord.flags field defines the "search mode" used + by sqlite3BtreeMovetoUnpacked. + +

    typedef struct KeyInfo KeyInfo;
    +struct KeyInfo {
    +  sqlite3 *db;        /* The database connection */
    +  u8 enc;             /* Text encoding - one of the TEXT_Utf* values */
    +  u16 nField;         /* Number of entries in aColl[] */
    +  u8 *aSortOrder;     /* If defined an aSortOrder[i] is true, sort DESC */
    +  CollSeq *aColl[1];  /* Collating sequence for each term of the key */
    +};
    +
    typedef struct UnpackedRecord UnpackedRecord;
    +struct UnpackedRecord {
    +  KeyInfo *pKeyInfo;  /* Collation and sort-order information */
    +  u16 nField;         /* Number of entries in apMem[] */
    +  u16 flags;          /* Boolean settings.  UNPACKED_... below */
    +  i64 rowid;          /* Used by UNPACKED_PREFIX_SEARCH */
    +  Mem *aMem;          /* Values */
    +};
    + +

    3.7.2 sqlite3BtreeGetMeta

    + + +

    + The sqlite3BtreeGetMeta interface may be used to retrieve the current + value of certain fields from the database image header. + +

    int sqlite3BtreeGetMeta(Btree*, int idx, u32 *pValue);
    + +

    If successful, a call to the sqlite3BtreeGetMeta function shall set the +value of *pValue to the current value of the specified 32-bit unsigned +integer in the database image database header and return SQLITE_OK. (P: H50109)

    + +
    #define BTREE_FREE_PAGE_COUNT     0
    +#define BTREE_SCHEMA_VERSION      1
    +#define BTREE_FILE_FORMAT         2
    +#define BTREE_DEFAULT_CACHE_SIZE  3
    +#define BTREE_LARGEST_ROOT_PAGE   4
    +#define BTREE_TEXT_ENCODING       5
    +#define BTREE_USER_VERSION        6
    +#define BTREE_INCR_VACUUM         7
    + +

    The database header field read from the database image by a call to +sqlite3BtreeGetMeta shall be the 32-bit unsigned integer header field stored at +byte offset (36 + 4 * idx) of the database header, where idx is the value of +the second parameter passed to sqlite3BtreeGetMeta. (P: H50109)

    + + + + +

    3.8 Modifying the Database Image

    + + +

    3.8.1 sqlite3BtreeCreateTable

    + +
    int sqlite3BtreeCreateTable(Btree*, int*, int flags);
    +
    #define BTREE_INTKEY     1    /* Table has only 64-bit signed integer keys */
    +#define BTREE_ZERODATA   2    /* Table has keys only - no data */
    +#define BTREE_LEAFDATA   4    /* Data stored in leaves only.  Implies INTKEY */
    + +

    3.8.2 sqlite3BtreeDropTable

    + +
    int sqlite3BtreeDropTable(Btree*, int, int*);
    + +

    3.8.3 sqlite3BtreeClearTable

    + +
    int sqlite3BtreeClearTable(Btree*, int, int*);
    + +

    3.8.4 sqlite3BtreeCursorHasMoved

    + +
    int sqlite3BtreeCursorHasMoved(BtCursor*, int*);
    + +

    3.8.5 sqlite3BtreePutData

    + +
    int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
    + +

    3.8.6 sqlite3BtreeUpdateMeta

    + +
    int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value);
    + +

    3.8.7 sqlite3BtreeDelete

    + + +
    int sqlite3BtreeDelete(BtCursor*);
    + +

    A successful call to the sqlite3BtreeDelete function made with a read/write +b-tree cursor passed as the first argument shall remove the entry pointed to by +the b-tree cursor from the b-tree structure. (P: H50127)

    + +

    + Effect of a delete operation on other cursors that are pointing to the + deleted b-tree entry. + +

    + Malloc and IO error handling. Same as for sqlite3BtreeInsert. + +

    3.8.8 sqlite3BtreeInsert

    + + +
    int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey,
    +                                  const void *pData, int nData,
    +                                  int nZero, int bias, int seekResult);
    + +

    A successful call to the sqlite3BtreeInsert function made with a read/write +b-tree cursor passed as the first argument shall insert a new entry into +the b-tree structure the b-tree cursor is open on. (P: H50128)

    + +

    + The requirement above implies that the results of passing anything else as + the first argument to sqlite3BtreeInsert, for example a read-only b-tree cursor, + are undefined. + +

    If a call to sqlite3BtreeInsert is made to insert an entry specifying a key +value for which there already exists a matching key within the b-tree +structure, the entry with the matching key shall be removed from the b-tree +structure before the new entry is inserted. (P: H50128)

    + +

    + In other words, the sqlite3BtreeInsert API could easily be renamed + sqlite3BtreeInsertOrReplace. We will probably need a module + requirement for the "replace" operation. + +

    If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on a table b-tree, then the values passed as the second parameter (pKey) +shall be ignored. The value passed as the third parameter (nKey) shall be +used as the integer key for the new entry. (P: H50128)

    +

    If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on a table b-tree, then the database record associated with the new entry +shall consist of a copy of the first nData bytes of the buffer pointed to by pData +followed by nZero zero (0x00) bytes, where pData, nData and nZero are the +fourth, fifth and sixth parameters passed to sqlite3BtreeInsert, respectively. (P: H50128)

    +

    If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on an index b-tree, then the values passed as the fourth, fifth and sixth +parameters shall be ignored. The key (a database record) used by the new entry +shall consist of the first nKey bytes of the buffer pointed to by pKey, where +pKey and nKey are the second and third parameters passed to sqlite3BtreeInsert, +respectively. (P: H50128)

    + +

    + The following requirements describe the seventh and eighth paramaters passed + to the sqlite3BtreeInsert function. Both of these are used to provide extra + information used by sqlite3BtreeInsert to optimize the insert operation. They + may be safely ignored by alternative b-tree implementations. + +

    + There should be some rationalization for these, eventually. Some tracebility + from somewhere to show how the b-tree module offering these slightly esoteric + interfaces is helpful to SQLite overall. + +

    If the value passed as the seventh parameter to a call to sqlite3BtreeInsert +is non-zero, sqlite3BtreeInsert shall interpret this to mean that it is likely +(but not certain) that the key belonging to the new entry is larger than the +largest key currently stored in the b-tree structure, and optimize accordingly.

    +

    If the value passed as the eighth parameter to a call to sqlite3BtreeInsert +is non-zero, then the B-Tree module shall interpret this to mean that the +the b-tree cursor has already been positioned by a successful call to +sqlite3BtreeMovetoUnpacked specifying the same key value as is being inserted, +and that sqlite3BtreeMovetoUnpacked has set the output value required by L50011 to +this value.

    + +

    + If a non-zero value is passed as the eighth parameter to sqlite3BtreeInsert + and the b-tree cursor has not been positioned as assumed by L50006, the + results are undefined. + +

    + Malloc and IO error handling. Maybe these should be grouped together + for a whole bunch of APIs. And hook into the above via a defintion of + "successful call". + +

    3.8.9 sqlite3BtreeIncrVacuum

    + +
    int sqlite3BtreeIncrVacuum(Btree *);
    + +

    3.9 What do these do?

    + + +

    + The following is used only from within VdbeExec() to check whether or not + a cursor was opened on a table or index b-tree. Corruption tests can move into + the b-tree layer. + +

    int sqlite3BtreeFlags(BtCursor*);
    + +

    + Where do the following go? + +

    char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*);
    +struct Pager *sqlite3BtreePager(Btree*);
    +int sqlite3BtreeCopyFile(Btree *, Btree *);
    + +
    void *sqlite3BtreeSchema(Btree *, int, void(*)(void *));
    +int sqlite3BtreeSchemaLocked(Btree *);
    +int sqlite3BtreeLockTable(Btree *, int, u8);
    +void sqlite3BtreeTripAllCursors(Btree*, int);
    + +

    + I know what the following do, but is this mechanism ever used? Or has it been superceded by other tricks in OP_NewRowid? + +

    void sqlite3BtreeSetCachedRowid(BtCursor*, sqlite3_int64);
    +sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor*);
    + +

    + Should move to btreeInt.h + +

    typedef struct BtShared BtShared;
    + +

    3.10 APIs not branded sqlite3BtreeXXX()

    + + +
      +
    • sqlite3PagerLockingMode +
    • sqlite3PagerJournalMode +
    • sqlite3PagerIsMemdb (vacuum and backup). +
    • sqlite3PagerJournalSizeLimit +
    • sqlite3PagerFile (used by sqlite3_file_control() and pragma lock_proxy_file). +
    • sqlite3PagerPagecounT (pragma page_count and backup). +
    • Page APIs used by backup routines: +
        +
      • sqlite3PagerGet +
      • sqlite3PagerWrite +
      • sqlite3PagerGetData +
      • sqlite3PagerGetExtra +
      • sqlite3PagerUnref +
      • sqlite3PagerTruncateImage +
      • sqlite3PagerSync +
      • sqlite3PagerFile +
      • sqlite3PagerCommitPhaseOne, sqlite3PagerCommitPhaseTwo +
      • sqlite3PagerBackupPtr +
      +
    + + +

    4 Module Implementation

    + + +

    4.1 Database Image Traversal

    + + +

    4.2 Database Image Manipulation

    + + +

    + This section should describe exactly how bits and bytes are shifted + around when the database image is traversed and modified. i.e. how the b-tree + balancing works, deleting an internal cell from an index b-tree etc. + +

    4.2.1 Creating a B-Tree Structure

    + +

    4.2.2 Clearing a B-Tree Structure

    + +

    4.2.3 Deleting a B-Tree Structure

    + + +

    4.2.4 Inserting, Replacing and Deleting B-Tree Entries

    + + +

    + The following two sections describe the way entries are added and removed + from B-Tree structures within a database image. + +

    + As one might expect, the algorithms described in the following sections + involve adding and removing b-tree cells to and from b-tree node pages. + The format of b-tree node pages is described in detail in + [1]. This document does not describe the exact + way in which content is manipulated within a page, as these details are + considered not considered high-level enough to be documented outside of + the SQLite source code itself. For the purposes of the descriptions in + the following sections, a b-tree node page is considered to be a container + for an ordered list of b-tree cells. Cells may be inserted into or removed + from any position in the ordered list as required. + +

    + A b-tree node page has a finite capacity. If one of the algorithms + described here is required to insert a cell into a b-tree node page, + and there is not enough free space within the page to accomadate the + cell, it is still nominally inserted into the requested position within + the node, but becomes an overflow cell. Overflow cells never remain so + for very long. If an insert, replace or delete entry operation creates + one or more overflow cells, the b-tree structure is rearranged so that + all cells are stored within the body of a b-tree node page before the + operation is considered complete. This process of rearranging the b-tree + structure is termed b-tree balancing, and is described in section + 4.2.5. + + +

    4.2.4.1 B-Tree Insert/Replace Entry

    + + +

    + This section describes the way in which new entries may be inserted + into a b-tree structure, and how existing entries may be replaced. Both + of these operations are accessed using the sqlite3BtreeInsert API. + +

    + An insert/replace operation involves the following steps: + +

      +
    1. Based on the supplied key and value, and the type of b-tree being + inserted into, allocate and populate any required overflow pages. + Should reference file-format requirements that + provide the formula for doing this. + +
    2. Attempt to move the b-tree write cursor to an entry with a key + that matches the new key being inserted. If a matching entry is + found, then the operation is a replace. Otherwise, if the key is + not found, an insert. + +
        +
      1. Requirements L50008, L50009, L50010 and L50011 apply to the cursor + seek operation here. This ensures that if the search does not find + an exact match, the cursor is left pointing to the leaf page that + the new entry should be added into. + +
      2. As specified by L50006, the cursor may already be positioned. In + this case the seek operation is not required. +
      + +
    3. If a matching key was found in the b-tree, then it must be removed and + the new entry added in its place. + +
        +
      1. If there are one or more overflow pages associated with the entry + being replaced, they are moved to the free-list. +
      2. The cell corresponding to the entry being removed is removed from + the b-tree node page. +
      3. The new cell is inserted in the position previously occupied by the + cell removed in the previous step. If the page is not a leaf page, + then the first four-bytes (the child-page pointer) of the old + cell are copied to the first four bytes of the new cell. If the new + cell is larger than the cell that it replaced, then it may become + an overflow cell. +
      + +
    4. If no matching key was found in the b-tree, then the new cell is inserted + into the leaf page that the cursor was left pointing to by step 1. The + new cell may become an overflow cell. + +
    5. If the new cell is now an overflow cell, then the balancing algorithm + (see section 4.2.5) is run on the + overflowing b-tree node page. +
    + +

    4.2.4.2 B-Tree Delete Entry

    + + +

    + This section describes the way in which entries may be removed from + a b-tree structure, as required when the sqlite3BtreeDelete (section + 3.8.7) API is invoked. Removing an entry + from a b-tree table involves the following steps: + +

      +
    1. All overflow pages in the overflow page chain (if any) associated + with the entry must be moved to the database free-list. If the + database image is an autovacuum database, the pointer-map entries + that correspond to each overflow page in the chain must be updated. + +
    2. The b-tree cell corresponding to the entry must be removed from + the b-tree structure. +
    + +

    + Note about the optimization that makes it possible to move overflow pages + to the free-list without reading their contents (i.e. without loading them + into the cache). + +

    + If the b-tree entry being removed is located on a leaf page (as is always the + case with table b-tree structures), then deleting an entry from a b-tree + is quite simple. + + +

    + + +

    Figure 2 - Delete from an Internal Node +

    + + +

    4.2.5 B-Tree Balancing Algorithm

    + + +
      +
    • The balance deeper sub-algorithm is used when the root page of + a b-tree is overfull. It creates a new page and copies the + entire contents of the overfull root page to it. The root page + is then zeroed and the new page installed as its only child. + The balancing algorithm is then run on the new child page (in case + it is overfull). + +

    • The balance shallower sub-algorithm is used when the root page + of a b-tree has only a single child page. If possible, the data from + the child page is copied into the root-page and the child page discarded. + +

    • The balance quick sub-algorithm is used in a very specific, + but common scenario. It is used only for table b-trees, when a new entry that + has a key value greater than all existing keys in the b-tree is inserted and + causes the right-most leaf page of the b-tree structure to become overfull. + +

    • The balance siblings sub-algorithm is run when a b-tree page that + is not the root-page of its b-tree structure is either overfull or underfull. + + + + +

    + +

    4.2.5.1 Balance Deeper

    + +
      +
    1. Allocate a new page (the child-page). +
    2. Copy page data from root-page to child-page (including overflow cells). +
    3. Fix pointer map entries associated with new child-page content. +
    4. Zero the root-page. +
    5. Set the right-child pointer of the root-page to point to the new child-page. +
    6. Set the pointer map entry for the new child page. +
    7. Execute the balance procedure on the new child page. +
    + + +
    + + +

    Figure 3 - Example Balance Deeper Transform +

    + + +

    4.2.5.2 Balance Shallower

    + + +
      +
    1. Copy node data from child-page to root-page. +
    2. Fix pointer map entries associated with new root-page content. +
    3. Move child-page to database image free-list. +
    + + +
    + + +

    Figure 4 - Example Balance Shallower Transform +

    + + +

    4.2.5.3 Balance Quick

    + + +
      +
    1. Allocate a new page (the new sibling-page). + +
    2. Populate the new sibling page with the new b-tree entry. + +
    3. Add a new divider cell to the parent. The divider cell contains a + pointer to the page that is currently the right-child of the parent. + The key in the new divider cell is a copy of the largest key in the + page that is currently the right-child of the parent. + +
    4. Set the right-child of the parent page to point to the new sibling page. + +
    5. If the database is an auto-vacuum database, set the pointer map + entry associated with the new sibling page. If the cell on the new + sibling page contains a pointer to an overflow page, set the pointer map + entry associated with the overflow page. + +
    6. Execute the balance procedure on the parent page. +
    + + +
    + + +

    Figure 5 - Example Balance Quick Transform +

    + + +

    4.2.5.4 Balance Siblings

    + + +

    The balance-siblings algorithm shall redistribute the b-tree cells currently +stored on a overfull or underfull page and up to two sibling pages, adding +or removing siblings as required, such that no sibling page is overfull and +the minimum possible number of sibling pages is used to store the +redistributed b-tree cells.

    + +

    + The following description describes how balance() is to be implemented. This + represents (I think) the lowest level of detail that should be in this document. + One skilled in the art could use this description to reimplement SQLite's + balance-siblings algorithm. We also need requirements at a higher level + of detail in this section. Something to test! + +

    + The balance-siblings algorithm, as implemented by SQLite, is described as + a series of steps below. there are a few terms used + below that need definitions/clarifications. + +

      +
    1. Determine the set of sibling pages to redistribute the cells of, using + the following rules: +
        +
      1. If the parent page has three or fewer child pages, then all child + pages are deemed to be sibling pages for the purposes of the balance-siblings + algorithm. +
      2. If the page being balanced is the left-most child of the parent + page, then the three left-most child pages are used as the siblings. +
      3. If the page being balanced is the right-most child of the parent + page, then the three right-most child pages are used as the siblings. +
      4. Otherwise, if none of the above three conditions are true, then the + sibling pages are page being balanced and the child pages immediately + to the left and right of it. +
      + +
    2. Determine an ordered list of cells to redistribute. There are several + variations of this step, depending on the type of page being balanced. +
        +
      1. If the page being balanced is a leaf page of a table b-tree, + then the list of cells to redistribute is simply the concatenation + of the ordered lists of cells stored on each sibling page, in order + from left-most sibling to right-most. +
      2. If the page being balanced is a leaf page of an index b-tree, then + the list of cells to redistribute is comprised of the cells on each + of the sibling pages and the divider cells in the parent page that + contain the pointers to each sibling page except the right-most. The + list is arranged so that it contains: +
          +
        • The cells from the left-most sibling page, in order, followed by +
        • the divider cell from the parent page that contains the pointer + to the left-most sibling (if there is more than one sibling + page), followed by +
        • the divider cell that contains the pointer to the second left-most + sibling and the cells from the remaining sibling page (if there are three + sibling pages). +
        +
      3. If the page being balanced is an internal b-tree node, then the list of + cells to redistribute is determined as described in the previous case. + However, when balancing an internal node each cell is associated with + the page number of a child page of one of the sibling pages. The page + number associated with cells stored on a sibling page is the same as + the page number stored as the first four bytes of the cell. The page + number associated with a divider cell within the parent page is the page + number of the right-child page of the sibling page to which the divider + cell contains a pointer. +
      + +
    3. Determine the new cell distribution, using the following steps: +
        +
      1. Assign as may cells as will fit from the start of the ordered list of + cells to the left-most sibling page. Then, if any cells remain, assign + one to be a divider cell, and as many as will fit to the next sibling + page. Repeat until all cells have been assigned a location. + no divider cells for table b-tree leaf balance + +
      2. The previous step generates a distribution that is biased towards the + left-hand side. The right-most sibling may even be completely + empty (if the last cell in the ordered list was assigned to be a + divider cell). To rectify this, cells are moved out of the second + right-most sibling page and into the right-most, one at a time, until + there is at least one cell in the right-most sibling page and to move + another cell would mean that the right-most sibling page is more full + than the next to right-most sibling page. This is repeated for the next + right-most pair of sibling pages, shifting cells out of the third + right-most sibling page and into the second right-most, and so on. + note about divider cells +
      + +
    4. Determine the set of database pages to use as the new sibling pages. + +
        +
      1. If there were an equal or greater number of siblings identified + in step 1 than are required by the distribution calculated in step 3, + reuse as many as possible, starting with the left-most. If step 3 + calculated a distribution that requires more sibling pages than were + identified in step 1, allocate the required extra pages using the + Refer to ??? algorithm. + +
      2. Arrange the new sibling pages from left to right in ascending + page number order. The new sibling page with the smallest page number + becomes the left-most sibling page, and so forth. +
      + +
    5. Populate the new sibling pages. +
        +
      1. Populate each new sibling page with the required set of cells. If the + page being balanced is not a leaf page, then the child-page pointer + field of each cell is populated with the page-number associated with + the cell as part of step 2 above. + +
      2. If the page being balanced is not a leaf page, then the right-child + pointer stored in the page header of each new sibling page must also + be populated. For each new sibling page except the right-most, this + field is set to the page number associated with the cell that + immediately follows the cells stored on the page (the cell that was + assigned to be divider cell in step 3). For the right-most sibling page, + the right-child pointer is set to the value that was stored in the + right-child pointer of the right-most original sibling page identified + in step 1. +
      +
    6. Populate the parent page. +
        +
      1. If the page being balanced is (was) not a leaf page of a table + b-tree, the cells that contained pointers to the old sibling + pages are replaced by the cells designated as divider cells as part + of step 3. The right-child pointer field of the first divider cell + is overwritten with the page number of the first new sibling page, and + so on. + +
      2. If the page being balanced is (was) a leaf page of a table + b-tree, the cells that contained pointers to the old sibling + pages are replaced by a divider cell associated with all but the + right-most sibling page. The child-page number stored in each divider + cell is set to the page number of the associated sibling. The ingeger key + value stored in each divider cell is a copy of the largest integer key + value stored on the associated sibling page. + +
      3. Before balancing, the parent page contained a pointer to the right-most + sibling page, either as part of a cell or as the right-child pointer + stored in the page header. Either way, this value must be overwritten + with the page number of the new right-most sibling page. + +
      + +
    7. Populate pointer map entries. +
        +
      1. For each sibling page that was not also an original sibling page, the + associated pointer-map entry must be updated. Similarly, the pointer-map + entry associated with each original sibling page that is no longer a + sibling page must be updated. +
      2. For each cell containing an overflow pointer that has been moved from one + page to another, the pointer-map entry associated with the overflow page + must be updated. +
      3. If the page being balanced is (was) not a leaf, then for each cell that + has moved from one page to another the pointer-map entry associated with + the cell's child page must be updated. +
      4. If the page being balanced is (was) not a leaf, then the pointer-map entry + associated with each sibling's right-child page may need to be updated. +
      +
    + +

    4.2.6 Page Allocation and Deallocation

    + + +

    + Amongst other things, this section needs to explain our old pals the + DontWrite() and DontRollback() optimizations. + +

    4.2.6.1 Moving an overflow-chain to the free-list

    + + +

    + Describe how this can sometimes be done without reading the content of + overflow pages. + +

    4.2.7 Incremental Vacuum Step

    + + + + + +

    4.3 Transactions and Savepoints

    + + +

    + Requirements surrounding how transactions are made atomic and isolated. + Also how savepoints are implemented. What happens to active cursors after + a rollback or savepoint-rollback. + +

    5 References

    + + + +
    [1] + SQLite Online Documentation,SQLite Database File Format, + http://www.sqlite.org/fileformat.html. + +
    [2] + SQLite Online Documentation,Application Defined Page Cache, + http://www.sqlite.org/c3ref/pcache_methods.html. + +
    [3] + SQLite Online Documentation,OS Interface Object, + http://www.sqlite.org/c3ref/vfs.html. + + +
    + + +
    +This page last modified 2009/06/23 14:18:53 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/aggregate_context.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/aggregate_context.html --- sqlite3-3.4.2/www/c3ref/aggregate_context.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/aggregate_context.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,102 @@ + + +Obtain Aggregate Function Context + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Obtain Aggregate Function Context

    void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
    +

    +The implementation of aggregate SQL functions use this routine to allocate +a structure for storing their state.

    + +

    The first time the sqlite3_aggregate_context() routine is called for a +particular aggregate, SQLite allocates nBytes of memory, zeroes out that +memory, and returns a pointer to it. On second and subsequent calls to +sqlite3_aggregate_context() for the same aggregate function index, +the same buffer is returned. The implementation of the aggregate can use +the returned buffer to accumulate data.

    + +

    SQLite automatically frees the allocated buffer when the aggregate +query concludes.

    + +

    The first parameter should be a copy of the +SQL function context that is the first parameter +to the callback routine that implements the aggregate function.

    + +

    This routine must be called from the same thread in which +the aggregate SQL function is running.

    + +

    Requirements: +H16211 H16213 H16215 H16217 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/aggregate_count.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/aggregate_count.html --- sqlite3-3.4.2/www/c3ref/aggregate_count.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/aggregate_count.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Deprecated Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Deprecated Functions

    #ifndef SQLITE_OMIT_DEPRECATED
    +int sqlite3_aggregate_count(sqlite3_context*);
    +int sqlite3_expired(sqlite3_stmt*);
    +int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
    +int sqlite3_global_recover(void);
    +void sqlite3_thread_cleanup(void);
    +int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),void*,sqlite3_int64);
    +#endif
    +

    +These functions are deprecated. In order to maintain +backwards compatibility with older code, these functions continue +to be supported. However, new applications should avoid +the use of these functions. To help encourage people to avoid +using these functions, we are not going to tell you what they do. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/auto_extension.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/auto_extension.html --- sqlite3-3.4.2/www/c3ref/auto_extension.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/auto_extension.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,101 @@ + + +Automatically Load An Extensions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Automatically Load An Extensions

    int sqlite3_auto_extension(void (*xEntryPoint)(void));
    +

    +This API can be invoked at program startup in order to register +one or more statically linked extensions that will be available +to all new database connections.

    + +

    This routine stores a pointer to the extension in an array that is +obtained from sqlite3_malloc(). If you run a memory leak checker +on your program and it reports a leak because of this array, invoke +sqlite3_reset_auto_extension() prior to shutdown to free the memory.

    + +

    This function registers an extension entry point that is +automatically invoked whenever a new database connection +is opened using sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2().

    + +

    Duplicate extensions are detected so calling this routine +multiple times with the same extension is harmless.

    + +

    This routine stores a pointer to the extension in an array +that is obtained from sqlite3_malloc().

    + +

    Automatic extensions apply across all threads. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/backup_finish.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/backup_finish.html --- sqlite3-3.4.2/www/c3ref/backup_finish.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/backup_finish.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,256 @@ + + +Online Backup API. + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Online Backup API.

    sqlite3_backup *sqlite3_backup_init(
    +  sqlite3 *pDest,                        /* Destination database handle */
    +  const char *zDestName,                 /* Destination database name */
    +  sqlite3 *pSource,                      /* Source database handle */
    +  const char *zSourceName                /* Source database name */
    +);
    +int sqlite3_backup_step(sqlite3_backup *p, int nPage);
    +int sqlite3_backup_finish(sqlite3_backup *p);
    +int sqlite3_backup_remaining(sqlite3_backup *p);
    +int sqlite3_backup_pagecount(sqlite3_backup *p);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This API is used to overwrite the contents of one database with that +of another. It is useful either for creating backups of databases or +for copying in-memory databases to or from persistent files.

    + +

    See Also: Using the SQLite Online Backup API

    + +

    Exclusive access is required to the destination database for the +duration of the operation. However the source database is only +read-locked while it is actually being read, it is not locked +continuously for the entire operation. Thus, the backup may be +performed on a live database without preventing other users from +writing to the database for an extended period of time.

    + +

    To perform a backup operation: +

      +
    1. sqlite3_backup_init() is called once to initialize the +backup, +
    2. sqlite3_backup_step() is called one or more times to transfer +the data between the two databases, and finally +
    3. sqlite3_backup_finish() is called to release all resources +associated with the backup operation. +
    +There should be exactly one call to sqlite3_backup_finish() for each +successful call to sqlite3_backup_init().

    + +

    sqlite3_backup_init()

    + +

    The first two arguments passed to sqlite3_backup_init() are the database +handle associated with the destination database and the database name +used to attach the destination database to the handle. The database name +is "main" for the main database, "temp" for the temporary database, or +the name specified as part of the ATTACH statement if the destination is +an attached database. The third and fourth arguments passed to +sqlite3_backup_init() identify the database connection +and database name used +to access the source database. The values passed for the source and +destination database connection parameters must not be the same.

    + +

    If an error occurs within sqlite3_backup_init(), then NULL is returned +and an error code and error message written into the database connection +passed as the first argument. They may be retrieved using the +sqlite3_errcode(), sqlite3_errmsg(), and sqlite3_errmsg16() functions. +Otherwise, if successful, a pointer to an sqlite3_backup object is +returned. This pointer may be used with the sqlite3_backup_step() and +sqlite3_backup_finish() functions to perform the specified backup +operation.

    + +

    sqlite3_backup_step()

    + +

    Function sqlite3_backup_step() is used to copy up to nPage pages between +the source and destination databases, where nPage is the value of the +second parameter passed to sqlite3_backup_step(). If nPage is a negative +value, all remaining source pages are copied. If the required pages are +succesfully copied, but there are still more pages to copy before the +backup is complete, it returns SQLITE_OK. If no error occured and there +are no more pages to copy, then SQLITE_DONE is returned. If an error +occurs, then an SQLite error code is returned. As well as SQLITE_OK and +SQLITE_DONE, a call to sqlite3_backup_step() may return SQLITE_READONLY, +SQLITE_NOMEM, SQLITE_BUSY, SQLITE_LOCKED, or an +SQLITE_IOERR_XXX extended error code.

    + +

    As well as the case where the destination database file was opened for +read-only access, sqlite3_backup_step() may return SQLITE_READONLY if +the destination is an in-memory database with a different page size +from the source database.

    + +

    If sqlite3_backup_step() cannot obtain a required file-system lock, then +the busy-handler function +is invoked (if one is specified). If the +busy-handler returns non-zero before the lock is available, then +SQLITE_BUSY is returned to the caller. In this case the call to +sqlite3_backup_step() can be retried later. If the source +database connection +is being used to write to the source database when sqlite3_backup_step() +is called, then SQLITE_LOCKED is returned immediately. Again, in this +case the call to sqlite3_backup_step() can be retried later on. If +SQLITE_IOERR_XXX, SQLITE_NOMEM, or +SQLITE_READONLY is returned, then +there is no point in retrying the call to sqlite3_backup_step(). These +errors are considered fatal. At this point the application must accept +that the backup operation has failed and pass the backup operation handle +to the sqlite3_backup_finish() to release associated resources.

    + +

    Following the first call to sqlite3_backup_step(), an exclusive lock is +obtained on the destination file. It is not released until either +sqlite3_backup_finish() is called or the backup operation is complete +and sqlite3_backup_step() returns SQLITE_DONE. Additionally, each time +a call to sqlite3_backup_step() is made a shared lock is obtained on +the source database file. This lock is released before the +sqlite3_backup_step() call returns. Because the source database is not +locked between calls to sqlite3_backup_step(), it may be modified mid-way +through the backup procedure. If the source database is modified by an +external process or via a database connection other than the one being +used by the backup operation, then the backup will be transparently +restarted by the next call to sqlite3_backup_step(). If the source +database is modified by the using the same database connection as is used +by the backup operation, then the backup database is transparently +updated at the same time.

    + +

    sqlite3_backup_finish()

    + +

    Once sqlite3_backup_step() has returned SQLITE_DONE, or when the +application wishes to abandon the backup operation, the sqlite3_backup +object should be passed to sqlite3_backup_finish(). This releases all +resources associated with the backup operation. If sqlite3_backup_step() +has not yet returned SQLITE_DONE, then any active write-transaction on the +destination database is rolled back. The sqlite3_backup object is invalid +and may not be used following a call to sqlite3_backup_finish().

    + +

    The value returned by sqlite3_backup_finish is SQLITE_OK if no error +occurred, regardless or whether or not sqlite3_backup_step() was called +a sufficient number of times to complete the backup operation. Or, if +an out-of-memory condition or IO error occured during a call to +sqlite3_backup_step() then SQLITE_NOMEM or an +SQLITE_IOERR_XXX error code +is returned. In this case the error code and an error message are +written to the destination database connection.

    + +

    A return of SQLITE_BUSY or SQLITE_LOCKED from sqlite3_backup_step() is +not a permanent error and does not affect the return value of +sqlite3_backup_finish().

    + +

    sqlite3_backup_remaining(), sqlite3_backup_pagecount()

    + +

    Each call to sqlite3_backup_step() sets two values stored internally +by an sqlite3_backup object. The number of pages still to be backed +up, which may be queried by sqlite3_backup_remaining(), and the total +number of pages in the source database file, which may be queried by +sqlite3_backup_pagecount().

    + +

    The values returned by these functions are only updated by +sqlite3_backup_step(). If the source database is modified during a backup +operation, then the values are not updated to account for any extra +pages that need to be updated or the size of the source database file +changing.

    + +

    Concurrent Usage of Database Handles

    + +

    The source database connection may be used by the application for other +purposes while a backup operation is underway or being initialized. +If SQLite is compiled and configured to support threadsafe database +connections, then the source database connection may be used concurrently +from within other threads.

    + +

    However, the application must guarantee that the destination database +connection handle is not passed to any other API (by any thread) after +sqlite3_backup_init() is called and before the corresponding call to +sqlite3_backup_finish(). Unfortunately SQLite does not currently check +for this, if the application does use the destination database connection +for some other purpose during a backup operation, things may appear to +work correctly but in fact be subtly malfunctioning. Use of the +destination database connection while a backup is in progress might +also cause a mutex deadlock.

    + +

    Furthermore, if running in shared cache mode, the application must +guarantee that the shared cache used by the destination database +is not accessed while the backup is running. In practice this means +that the application must guarantee that the file-system file being +backed up to is not accessed by any connection within the process, +not just the specific connection that was passed to sqlite3_backup_init().

    + +

    The sqlite3_backup object itself is partially threadsafe. Multiple +threads may safely make multiple concurrent calls to sqlite3_backup_step(). +However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() +APIs are not strictly speaking threadsafe. If they are invoked at the +same time as another thread is invoking sqlite3_backup_step() it is +possible that they return invalid values. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/backup.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/backup.html --- sqlite3-3.4.2/www/c3ref/backup.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/backup.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Online Backup Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Online Backup Object

    typedef struct sqlite3_backup sqlite3_backup;
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_backup object records state information about an ongoing +online backup operation. The sqlite3_backup object is created by +a call to sqlite3_backup_init() and is destroyed by a call to +sqlite3_backup_finish().

    + +

    See Also: Using the SQLite Online Backup API +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/bind_blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/bind_blob.html --- sqlite3-3.4.2/www/c3ref/bind_blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/bind_blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,166 @@ + + +Binding Values To Prepared Statements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Binding Values To Prepared Statements

    int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +int sqlite3_bind_double(sqlite3_stmt*, int, double);
    +int sqlite3_bind_int(sqlite3_stmt*, int, int);
    +int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
    +int sqlite3_bind_null(sqlite3_stmt*, int);
    +int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
    +int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
    +int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
    +int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
    +

    +In the SQL strings input to sqlite3_prepare_v2() and its variants, +literals may be replaced by a parameter in one of these forms:

    + +

      +
    • ? +
    • ?NNN +
    • :VVV +
    • @VVV +
    • $VVV +

    + +

    In the parameter forms shown above NNN is an integer literal, +and VVV is an alpha-numeric parameter name. The values of these +parameters (also called "host parameter names" or "SQL parameters") +can be set using the sqlite3_bind_*() routines defined here.

    + +

    The first argument to the sqlite3_bind_*() routines is always +a pointer to the sqlite3_stmt object returned from +sqlite3_prepare_v2() or its variants.

    + +

    The second argument is the index of the SQL parameter to be set. +The leftmost SQL parameter has an index of 1. When the same named +SQL parameter is used more than once, second and subsequent +occurrences have the same index as the first occurrence. +The index for named parameters can be looked up using the +sqlite3_bind_parameter_index() API if desired. The index +for "?NNN" parameters is the value of NNN. +The NNN value must be between 1 and the sqlite3_limit() +parameter SQLITE_LIMIT_VARIABLE_NUMBER (default value: 999).

    + +

    The third argument is the value to bind to the parameter.

    + +

    In those routines that have a fourth argument, its value is the +number of bytes in the parameter. To be clear: the value is the +number of bytes in the value, not the number of characters. +If the fourth parameter is negative, the length of the string is +the number of bytes up to the first zero terminator.

    + +

    The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and +sqlite3_bind_text16() is a destructor used to dispose of the BLOB or +string after SQLite has finished with it. If the fifth argument is +the special value SQLITE_STATIC, then SQLite assumes that the +information is in static, unmanaged space and does not need to be freed. +If the fifth argument has the value SQLITE_TRANSIENT, then +SQLite makes its own private copy of the data immediately, before +the sqlite3_bind_*() routine returns.

    + +

    The sqlite3_bind_zeroblob() routine binds a BLOB of length N that +is filled with zeroes. A zeroblob uses a fixed amount of memory +(just an integer to hold its size) while it is being processed. +Zeroblobs are intended to serve as placeholders for BLOBs whose +content is later written using +incremental BLOB I/O routines. +A negative value for the zeroblob results in a zero-length BLOB.

    + +

    The sqlite3_bind_*() routines must be called after +sqlite3_prepare_v2() (and its variants) or sqlite3_reset() and +before sqlite3_step(). +Bindings are not cleared by the sqlite3_reset() routine. +Unbound parameters are interpreted as NULL.

    + +

    These routines return SQLITE_OK on success or an error code if +anything goes wrong. SQLITE_RANGE is returned if the parameter +index is out of range. SQLITE_NOMEM is returned if malloc() fails. +SQLITE_MISUSE might be returned if these routines are called on a +virtual machine that is the wrong state or which has already been finalized. +Detection of misuse is unreliable. Applications should not depend +on SQLITE_MISUSE returns. SQLITE_MISUSE is intended to indicate a +a logic error in the application. Future versions of SQLite might +panic rather than return SQLITE_MISUSE.

    + +

    See also: sqlite3_bind_parameter_count(), +sqlite3_bind_parameter_name(), and sqlite3_bind_parameter_index().

    + +

    Requirements: +H13506 H13509 H13512 H13515 H13518 H13521 H13524 H13527 +H13530 H13533 H13536 H13539 H13542 H13545 H13548 H13551

    + +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/bind_parameter_count.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/bind_parameter_count.html --- sqlite3-3.4.2/www/c3ref/bind_parameter_count.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/bind_parameter_count.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +Number Of SQL Parameters + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Number Of SQL Parameters

    int sqlite3_bind_parameter_count(sqlite3_stmt*);
    +

    +This routine can be used to find the number of SQL parameters +in a prepared statement. SQL parameters are tokens of the +form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as +placeholders for values that are bound +to the parameters at a later time.

    + +

    This routine actually returns the index of the largest (rightmost) +parameter. For all forms except ?NNN, this will correspond to the +number of unique parameters. If parameters of the ?NNN are used, +there may be gaps in the list.

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_name(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13601 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/bind_parameter_index.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/bind_parameter_index.html --- sqlite3-3.4.2/www/c3ref/bind_parameter_index.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/bind_parameter_index.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +Index Of A Parameter With A Given Name + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Index Of A Parameter With A Given Name

    int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
    +

    +Return the index of an SQL parameter given its name. The +index value returned is suitable for use as the second +parameter to sqlite3_bind(). A zero +is returned if no matching parameter is found. The parameter +name must be given in UTF-8 even if the original statement +was prepared from UTF-16 text using sqlite3_prepare16_v2().

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_count(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13641 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/bind_parameter_name.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/bind_parameter_name.html --- sqlite3-3.4.2/www/c3ref/bind_parameter_name.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/bind_parameter_name.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,104 @@ + + +Name Of A Host Parameter + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Name Of A Host Parameter

    const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
    +

    +This routine returns a pointer to the name of the n-th +SQL parameter in a prepared statement. +SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" +have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" +respectively. +In other words, the initial ":" or "$" or "@" or "?" +is included as part of the name. +Parameters of the form "?" without a following integer have no name +and are also referred to as "anonymous parameters".

    + +

    The first host parameter has an index of 1, not 0.

    + +

    If the value n is out of range or if the n-th parameter is +nameless, then NULL is returned. The returned string is +always in UTF-8 encoding even if the named parameter was +originally specified as UTF-16 in sqlite3_prepare16() or +sqlite3_prepare16_v2().

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_count(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13621 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob_bytes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob_bytes.html --- sqlite3-3.4.2/www/c3ref/blob_bytes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob_bytes.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Return The Size Of An Open BLOB + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Return The Size Of An Open BLOB

    int sqlite3_blob_bytes(sqlite3_blob *);
    +

    +Returns the size in bytes of the BLOB accessible via the +successfully opened BLOB handle in its only argument. The +incremental blob I/O routines can only read or overwriting existing +blob content; they cannot change the size of a blob.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    Requirements: +H17843 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob_close.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob_close.html --- sqlite3-3.4.2/www/c3ref/blob_close.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob_close.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,101 @@ + + +Close A BLOB Handle + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Close A BLOB Handle

    int sqlite3_blob_close(sqlite3_blob *);
    +

    +Closes an open BLOB handle.

    + +

    Closing a BLOB shall cause the current transaction to commit +if there are no other BLOBs, no pending prepared statements, and the +database connection is in autocommit mode. +If any writes were made to the BLOB, they might be held in cache +until the close operation if they will fit.

    + +

    Closing the BLOB often forces the changes +out to disk and so if any I/O errors occur, they will likely occur +at the time when the BLOB is closed. Any errors that occur during +closing are reported as a non-zero return value.

    + +

    The BLOB is closed unconditionally. Even if this routine returns +an error code, the BLOB is still closed.

    + +

    Calling this routine with a null pointer (which as would be returned +by failed call to sqlite3_blob_open()) is a harmless no-op.

    + +

    Requirements: +H17833 H17836 H17839 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob.html --- sqlite3-3.4.2/www/c3ref/blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +A Handle To An Open BLOB + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    A Handle To An Open BLOB

    typedef struct sqlite3_blob sqlite3_blob;
    +

    +An instance of this object represents an open BLOB on which +incremental BLOB I/O can be performed. +Objects of this type are created by sqlite3_blob_open() +and destroyed by sqlite3_blob_close(). +The sqlite3_blob_read() and sqlite3_blob_write() interfaces +can be used to read or write small subsections of the BLOB. +The sqlite3_blob_bytes() interface returns the size of the BLOB in bytes. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob_open.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob_open.html --- sqlite3-3.4.2/www/c3ref/blob_open.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob_open.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,140 @@ + + +Open A BLOB For Incremental I/O + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Open A BLOB For Incremental I/O

    int sqlite3_blob_open(
    +  sqlite3*,
    +  const char *zDb,
    +  const char *zTable,
    +  const char *zColumn,
    +  sqlite3_int64 iRow,
    +  int flags,
    +  sqlite3_blob **ppBlob
    +);
    +

    +This interfaces opens a handle to the BLOB located +in row iRow, column zColumn, table zTable in database zDb; +in other words, the same BLOB that would be selected by:

    + +

    +SELECT zColumn FROM zDb.zTable WHERE rowid = iRow;
    +

    + +

    If the flags parameter is non-zero, then the BLOB is opened for read +and write access. If it is zero, the BLOB is opened for read access.

    + +

    Note that the database name is not the filename that contains +the database but rather the symbolic name of the database that +is assigned when the database is connected using ATTACH. +For the main database file, the database name is "main". +For TEMP tables, the database name is "temp".

    + +

    On success, SQLITE_OK is returned and the new BLOB handle is written +to *ppBlob. Otherwise an error code is returned and *ppBlob is set +to be a null pointer. +This function sets the database connection error code and message +accessible via sqlite3_errcode() and sqlite3_errmsg() and related +functions. Note that the *ppBlob variable is always initialized in a +way that makes it safe to invoke sqlite3_blob_close() on *ppBlob +regardless of the success or failure of this routine.

    + +

    If the row that a BLOB handle points to is modified by an +UPDATE, DELETE, or by ON CONFLICT side-effects +then the BLOB handle is marked as "expired". +This is true if any column of the row is changed, even a column +other than the one the BLOB handle is open on. +Calls to sqlite3_blob_read() and sqlite3_blob_write() for +a expired BLOB handle fail with an return code of SQLITE_ABORT. +Changes written into a BLOB prior to the BLOB expiring are not +rollback by the expiration of the BLOB. Such changes will eventually +commit if the transaction continues to completion.

    + +

    Use the sqlite3_blob_bytes() interface to determine the size of +the opened blob. The size of a blob may not be changed by this +underface. Use the UPDATE SQL command to change the size of a +blob.

    + +

    The sqlite3_bind_zeroblob() and sqlite3_result_zeroblob() interfaces +and the built-in zeroblob SQL function can be used, if desired, +to create an empty, zero-filled blob in which to read or write using +this interface.

    + +

    To avoid a resource leak, every open BLOB handle should eventually +be released by a call to sqlite3_blob_close().

    + +

    Requirements: +H17813 H17814 H17816 H17819 H17821 H17824 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob_read.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob_read.html --- sqlite3-3.4.2/www/c3ref/blob_read.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob_read.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,105 @@ + + +Read Data From A BLOB Incrementally + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Read Data From A BLOB Incrementally

    int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
    +

    +This function is used to read data from an open BLOB handle into a +caller-supplied buffer. N bytes of data are copied into buffer Z +from the open BLOB, starting at offset iOffset.

    + +

    If offset iOffset is less than N bytes from the end of the BLOB, +SQLITE_ERROR is returned and no data is read. If N or iOffset is +less than zero, SQLITE_ERROR is returned and no data is read. +The size of the blob (and hence the maximum value of N+iOffset) +can be determined using the sqlite3_blob_bytes() interface.

    + +

    An attempt to read from an expired BLOB handle fails with an +error code of SQLITE_ABORT.

    + +

    On success, SQLITE_OK is returned. +Otherwise, an error code or an extended error code is returned.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    See also: sqlite3_blob_write().

    + +

    Requirements: +H17853 H17856 H17859 H17862 H17863 H17865 H17868 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/blob_write.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/blob_write.html --- sqlite3-3.4.2/www/c3ref/blob_write.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/blob_write.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,116 @@ + + +Write Data Into A BLOB Incrementally + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Write Data Into A BLOB Incrementally

    int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
    +

    +This function is used to write data into an open BLOB handle from a +caller-supplied buffer. N bytes of data are copied from the buffer Z +into the open BLOB, starting at offset iOffset.

    + +

    If the BLOB handle passed as the first argument was not opened for +writing (the flags parameter to sqlite3_blob_open() was zero), +this function returns SQLITE_READONLY.

    + +

    This function may only modify the contents of the BLOB; it is +not possible to increase the size of a BLOB using this API. +If offset iOffset is less than N bytes from the end of the BLOB, +SQLITE_ERROR is returned and no data is written. If N is +less than zero SQLITE_ERROR is returned and no data is written. +The size of the BLOB (and hence the maximum value of N+iOffset) +can be determined using the sqlite3_blob_bytes() interface.

    + +

    An attempt to write to an expired BLOB handle fails with an +error code of SQLITE_ABORT. Writes to the BLOB that occurred +before the BLOB handle expired are not rolled back by the +expiration of the handle, though of course those changes might +have been overwritten by the statement that expired the BLOB handle +or by other independent statements.

    + +

    On success, SQLITE_OK is returned. +Otherwise, an error code or an extended error code is returned.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    See also: sqlite3_blob_read().

    + +

    Requirements: +H17873 H17874 H17875 H17876 H17877 H17879 H17882 H17885 +H17888 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/busy_handler.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/busy_handler.html --- sqlite3-3.4.2/www/c3ref/busy_handler.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/busy_handler.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,143 @@ + + +Register A Callback To Handle SQLITE_BUSY Errors + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Register A Callback To Handle SQLITE_BUSY Errors

    int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
    +

    +This routine sets a callback function that might be invoked whenever +an attempt is made to open a database table that another thread +or process has locked.

    + +

    If the busy callback is NULL, then SQLITE_BUSY or SQLITE_IOERR_BLOCKED +is returned immediately upon encountering the lock. If the busy callback +is not NULL, then the callback will be invoked with two arguments.

    + +

    The first argument to the handler is a copy of the void* pointer which +is the third argument to sqlite3_busy_handler(). The second argument to +the handler callback is the number of times that the busy handler has +been invoked for this locking event. If the +busy callback returns 0, then no additional attempts are made to +access the database and SQLITE_BUSY or SQLITE_IOERR_BLOCKED is returned. +If the callback returns non-zero, then another attempt +is made to open the database for reading and the cycle repeats.

    + +

    The presence of a busy handler does not guarantee that it will be invoked +when there is lock contention. If SQLite determines that invoking the busy +handler could result in a deadlock, it will go ahead and return SQLITE_BUSY +or SQLITE_IOERR_BLOCKED instead of invoking the busy handler. +Consider a scenario where one process is holding a read lock that +it is trying to promote to a reserved lock and +a second process is holding a reserved lock that it is trying +to promote to an exclusive lock. The first process cannot proceed +because it is blocked by the second and the second process cannot +proceed because it is blocked by the first. If both processes +invoke the busy handlers, neither will make any progress. Therefore, +SQLite returns SQLITE_BUSY for the first process, hoping that this +will induce the first process to release its read lock and allow +the second process to proceed.

    + +

    The default busy callback is NULL.

    + +

    The SQLITE_BUSY error is converted to SQLITE_IOERR_BLOCKED +when SQLite is in the middle of a large transaction where all the +changes will not fit into the in-memory cache. SQLite will +already hold a RESERVED lock on the database file, but it needs +to promote this lock to EXCLUSIVE so that it can spill cache +pages into the database file without harm to concurrent +readers. If it is unable to promote the lock, then the in-memory +cache will be left in an inconsistent state and so the error +code is promoted from the relatively benign SQLITE_BUSY to +the more severe SQLITE_IOERR_BLOCKED. This error code promotion +forces an automatic rollback of the changes. See the + +CorruptionFollowingBusyError wiki page for a discussion of why +this is important.

    + +

    There can only be a single busy handler defined for each +database connection. Setting a new busy handler clears any +previously set handler. Note that calling sqlite3_busy_timeout() +will also set or clear the busy handler.

    + +

    The busy callback should not take any actions which modify the +database connection that invoked the busy handler. Any such actions +result in undefined behavior.

    + +

    Requirements: +H12311 H12312 H12314 H12316 H12318

    + +

    A busy handler must not close the database connection +or prepared statement that invoked the busy handler. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/busy_timeout.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/busy_timeout.html --- sqlite3-3.4.2/www/c3ref/busy_timeout.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/busy_timeout.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +Set A Busy Timeout + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Set A Busy Timeout

    int sqlite3_busy_timeout(sqlite3*, int ms);
    +

    +This routine sets a busy handler that sleeps +for a specified amount of time when a table is locked. The handler +will sleep multiple times until at least "ms" milliseconds of sleeping +have accumulated. After "ms" milliseconds of sleeping, +the handler returns 0 which causes sqlite3_step() to return +SQLITE_BUSY or SQLITE_IOERR_BLOCKED.

    + +

    Calling this routine with an argument less than or equal to zero +turns off all busy handlers.

    + +

    There can only be a single busy handler for a particular +database connection any any given moment. If another busy handler +was defined (using sqlite3_busy_handler()) prior to calling +this routine, that other busy handler is cleared.

    + +

    Requirements: +H12341 H12343 H12344 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_abort.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_abort.html --- sqlite3-3.4.2/www/c3ref/c_abort.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_abort.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,116 @@ + + +Result Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Result Codes

    #define SQLITE_OK           0   /* Successful result */
    +/* beginning-of-error-codes */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* Internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite3_interrupt()*/
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* NOT USED. Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* NOT USED. Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* Database is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* String or BLOB exceeds size limit */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to constraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_FORMAT      24   /* Auxiliary database format error */
    +#define SQLITE_RANGE       25   /* 2nd parameter to sqlite3_bind out of range */
    +#define SQLITE_NOTADB      26   /* File opened that is not a database file */
    +#define SQLITE_ROW         100  /* sqlite3_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite3_step() has finished executing */
    +/* end-of-error-codes */
    +

    +Many SQLite functions return an integer result code from the set shown +here in order to indicates success or failure.

    + +

    New error codes may be added in future versions of SQLite.

    + +

    See also: extended result codes +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_access_exists.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_access_exists.html --- sqlite3-3.4.2/www/c3ref/c_access_exists.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_access_exists.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +Flags for the xAccess VFS method + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Flags for the xAccess VFS method

    #define SQLITE_ACCESS_EXISTS    0
    +#define SQLITE_ACCESS_READWRITE 1
    +#define SQLITE_ACCESS_READ      2
    +

    +These integer constants can be used as the third parameter to +the xAccess method of an sqlite3_vfs object. They determine +what kind of permissions the xAccess method is looking for. +With SQLITE_ACCESS_EXISTS, the xAccess method +simply checks whether the file exists. +With SQLITE_ACCESS_READWRITE, the xAccess method +checks whether the file is both readable and writable. +With SQLITE_ACCESS_READ, the xAccess method +checks whether the file is readable. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_alter_table.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_alter_table.html --- sqlite3-3.4.2/www/c3ref/c_alter_table.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_alter_table.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,131 @@ + + +Authorizer Action Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Authorizer Action Codes

    /******************************************* 3rd ************ 4th ***********/
    +#define SQLITE_CREATE_INDEX          1   /* Index Name      Table Name      */
    +#define SQLITE_CREATE_TABLE          2   /* Table Name      NULL            */
    +#define SQLITE_CREATE_TEMP_INDEX     3   /* Index Name      Table Name      */
    +#define SQLITE_CREATE_TEMP_TABLE     4   /* Table Name      NULL            */
    +#define SQLITE_CREATE_TEMP_TRIGGER   5   /* Trigger Name    Table Name      */
    +#define SQLITE_CREATE_TEMP_VIEW      6   /* View Name       NULL            */
    +#define SQLITE_CREATE_TRIGGER        7   /* Trigger Name    Table Name      */
    +#define SQLITE_CREATE_VIEW           8   /* View Name       NULL            */
    +#define SQLITE_DELETE                9   /* Table Name      NULL            */
    +#define SQLITE_DROP_INDEX           10   /* Index Name      Table Name      */
    +#define SQLITE_DROP_TABLE           11   /* Table Name      NULL            */
    +#define SQLITE_DROP_TEMP_INDEX      12   /* Index Name      Table Name      */
    +#define SQLITE_DROP_TEMP_TABLE      13   /* Table Name      NULL            */
    +#define SQLITE_DROP_TEMP_TRIGGER    14   /* Trigger Name    Table Name      */
    +#define SQLITE_DROP_TEMP_VIEW       15   /* View Name       NULL            */
    +#define SQLITE_DROP_TRIGGER         16   /* Trigger Name    Table Name      */
    +#define SQLITE_DROP_VIEW            17   /* View Name       NULL            */
    +#define SQLITE_INSERT               18   /* Table Name      NULL            */
    +#define SQLITE_PRAGMA               19   /* Pragma Name     1st arg or NULL */
    +#define SQLITE_READ                 20   /* Table Name      Column Name     */
    +#define SQLITE_SELECT               21   /* NULL            NULL            */
    +#define SQLITE_TRANSACTION          22   /* Operation       NULL            */
    +#define SQLITE_UPDATE               23   /* Table Name      Column Name     */
    +#define SQLITE_ATTACH               24   /* Filename        NULL            */
    +#define SQLITE_DETACH               25   /* Database Name   NULL            */
    +#define SQLITE_ALTER_TABLE          26   /* Database Name   Table Name      */
    +#define SQLITE_REINDEX              27   /* Index Name      NULL            */
    +#define SQLITE_ANALYZE              28   /* Table Name      NULL            */
    +#define SQLITE_CREATE_VTABLE        29   /* Table Name      Module Name     */
    +#define SQLITE_DROP_VTABLE          30   /* Table Name      Module Name     */
    +#define SQLITE_FUNCTION             31   /* NULL            Function Name   */
    +#define SQLITE_SAVEPOINT            32   /* Operation       Savepoint Name  */
    +#define SQLITE_COPY                  0   /* No longer used */
    +

    +The sqlite3_set_authorizer() interface registers a callback function +that is invoked to authorize certain SQL statement actions. The +second parameter to the callback is an integer code that specifies +what action is being authorized. These are the integer action codes that +the authorizer callback may be passed.

    + +

    These action code values signify what kind of operation is to be +authorized. The 3rd and 4th parameters to the authorization +callback function will be parameters or NULL depending on which of these +codes is used as the second parameter. The 5th parameter to the +authorizer callback is the name of the database ("main", "temp", +etc.) if applicable. The 6th parameter to the authorizer callback +is the name of the inner-most trigger or view that is responsible for +the access attempt or NULL if this access attempt is directly from +top-level SQL code.

    + +

    Requirements: +H12551 H12552 H12553 H12554 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_any.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_any.html --- sqlite3-3.4.2/www/c3ref/c_any.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_any.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +Text Encodings + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Text Encodings

    #define SQLITE_UTF8           1
    +#define SQLITE_UTF16LE        2
    +#define SQLITE_UTF16BE        3
    +#define SQLITE_UTF16          4    /* Use native byte order */
    +#define SQLITE_ANY            5    /* sqlite3_create_function only */
    +#define SQLITE_UTF16_ALIGNED  8    /* sqlite3_create_collation only */
    +

    +These constant define integer codes that represent the various +text encodings supported by SQLite. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_blob.html --- sqlite3-3.4.2/www/c3ref/c_blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,105 @@ + + +Fundamental Datatypes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Fundamental Datatypes

    #define SQLITE_INTEGER  1
    +#define SQLITE_FLOAT    2
    +#define SQLITE_BLOB     4
    +#define SQLITE_NULL     5
    +#ifdef SQLITE_TEXT
    +# undef SQLITE_TEXT
    +#else
    +# define SQLITE_TEXT     3
    +#endif
    +#define SQLITE3_TEXT     3
    +

    + Every value in SQLite has one of five fundamental datatypes:

    + +

      +
    • 64-bit signed integer +
    • 64-bit IEEE floating point number +
    • string +
    • BLOB +
    • NULL +

    + +

    These constants are codes for each of those types.

    + +

    Note that the SQLITE_TEXT constant was also used in SQLite version 2 +for a completely different meaning. Software that links against both +SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not +SQLITE_TEXT. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_config_getmalloc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_config_getmalloc.html --- sqlite3-3.4.2/www/c3ref/c_config_getmalloc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_config_getmalloc.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,246 @@ + + +Configuration Options + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Configuration Options

    #define SQLITE_CONFIG_SINGLETHREAD  1  /* nil */
    +#define SQLITE_CONFIG_MULTITHREAD   2  /* nil */
    +#define SQLITE_CONFIG_SERIALIZED    3  /* nil */
    +#define SQLITE_CONFIG_MALLOC        4  /* sqlite3_mem_methods* */
    +#define SQLITE_CONFIG_GETMALLOC     5  /* sqlite3_mem_methods* */
    +#define SQLITE_CONFIG_SCRATCH       6  /* void*, int sz, int N */
    +#define SQLITE_CONFIG_PAGECACHE     7  /* void*, int sz, int N */
    +#define SQLITE_CONFIG_HEAP          8  /* void*, int nByte, int min */
    +#define SQLITE_CONFIG_MEMSTATUS     9  /* boolean */
    +#define SQLITE_CONFIG_MUTEX        10  /* sqlite3_mutex_methods* */
    +#define SQLITE_CONFIG_GETMUTEX     11  /* sqlite3_mutex_methods* */
    +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ 
    +#define SQLITE_CONFIG_LOOKASIDE    13  /* int int */
    +#define SQLITE_CONFIG_PCACHE       14  /* sqlite3_pcache_methods* */
    +#define SQLITE_CONFIG_GETPCACHE    15  /* sqlite3_pcache_methods* */
    +

    Important: This interface is experimental and is subject to change without notice.

    +These constants are the available integer configuration options that +can be passed as the first argument to the sqlite3_config() interface.

    + +

    New configuration options may be added in future releases of SQLite. +Existing configuration options might be discontinued. Applications +should check the return code from sqlite3_config() to make sure that +the call worked. The sqlite3_config() interface will return a +non-zero error code if a discontinued or unsupported configuration option +is invoked.

    + +

    +
    SQLITE_CONFIG_SINGLETHREAD
    +
    There are no arguments to this option. This option disables +all mutexing and puts SQLite into a mode where it can only be used +by a single thread.

    + +

    SQLITE_CONFIG_MULTITHREAD
    +
    There are no arguments to this option. This option disables +mutexing on database connection and prepared statement objects. +The application is responsible for serializing access to +database connections and prepared statements. But other mutexes +are enabled so that SQLite will be safe to use in a multi-threaded +environment as long as no two threads attempt to use the same +database connection at the same time. See the threading mode +documentation for additional information.

    + +

    SQLITE_CONFIG_SERIALIZED
    +
    There are no arguments to this option. This option enables +all mutexes including the recursive +mutexes on database connection and prepared statement objects. +In this mode (which is the default when SQLite is compiled with +SQLITE_THREADSAFE=1) the SQLite library will itself serialize access +to database connections and prepared statements so that the +application is free to use the same database connection or the +same prepared statement in different threads at the same time. +See the threading mode documentation for additional information.

    + +

    SQLITE_CONFIG_MALLOC
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mem_methods structure. The argument specifies +alternative low-level memory allocation routines to be used in place of +the memory allocation routines built into SQLite.

    + +

    SQLITE_CONFIG_GETMALLOC
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mem_methods structure. The sqlite3_mem_methods +structure is filled with the currently defined memory allocation routines. +This option can be used to overload the default memory allocation +routines with a wrapper that simulations memory allocation failure or +tracks memory usage, for example.

    + +

    SQLITE_CONFIG_MEMSTATUS
    +
    This option takes single argument of type int, interpreted as a +boolean, which enables or disables the collection of memory allocation +statistics. When disabled, the following SQLite interfaces become +non-operational: + +

    + +

    SQLITE_CONFIG_SCRATCH
    +
    This option specifies a static memory buffer that SQLite can use for +scratch memory. There are three arguments: A pointer an 8-byte +aligned memory buffer from which the scrach allocations will be +drawn, the size of each scratch allocation (sz), +and the maximum number of scratch allocations (N). The sz +argument must be a multiple of 16. The sz parameter should be a few bytes +larger than the actual scratch space required due to internal overhead. +The first argument should pointer to an 8-byte aligned buffer +of at least sz*N bytes of memory. +SQLite will use no more than one scratch buffer at once per thread, so +N should be set to the expected maximum number of threads. The sz +parameter should be 6 times the size of the largest database page size. +Scratch buffers are used as part of the btree balance operation. If +The btree balancer needs additional memory beyond what is provided by +scratch buffers or if no scratch buffer space is specified, then SQLite +goes to sqlite3_malloc() to obtain the memory it needs.

    + +

    SQLITE_CONFIG_PAGECACHE
    +
    This option specifies a static memory buffer that SQLite can use for +the database page cache with the default page cache implemenation. +This configuration should not be used if an application-define page +cache implementation is loaded using the SQLITE_CONFIG_PCACHE option. +There are three arguments to this option: A pointer to 8-byte aligned +memory, the size of each page buffer (sz), and the number of pages (N). +The sz argument should be the size of the largest database page +(a power of two between 512 and 32768) plus a little extra for each +page header. The page header size is 20 to 40 bytes depending on +the host architecture. It is harmless, apart from the wasted memory, +to make sz a little too large. The first +argument should point to an allocation of at least sz*N bytes of memory. +SQLite will use the memory provided by the first argument to satisfy its +memory needs for the first N pages that it adds to cache. If additional +page cache memory is needed beyond what is provided by this option, then +SQLite goes to sqlite3_malloc() for the additional storage space. +The implementation might use one or more of the N buffers to hold +memory accounting information. The pointer in the first argument must +be aligned to an 8-byte boundary or subsequent behavior of SQLite +will be undefined.

    + +

    SQLITE_CONFIG_HEAP
    +
    This option specifies a static memory buffer that SQLite will use +for all of its dynamic memory allocation needs beyond those provided +for by SQLITE_CONFIG_SCRATCH and SQLITE_CONFIG_PAGECACHE. +There are three arguments: An 8-byte aligned pointer to the memory, +the number of bytes in the memory buffer, and the minimum allocation size. +If the first pointer (the memory pointer) is NULL, then SQLite reverts +to using its default memory allocator (the system malloc() implementation), +undoing any prior invocation of SQLITE_CONFIG_MALLOC. If the +memory pointer is not NULL and either SQLITE_ENABLE_MEMSYS3 or +SQLITE_ENABLE_MEMSYS5 are defined, then the alternative memory +allocator is engaged to handle all of SQLites memory allocation needs. +The first pointer (the memory pointer) must be aligned to an 8-byte +boundary or subsequent behavior of SQLite will be undefined.

    + +

    SQLITE_CONFIG_MUTEX
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mutex_methods structure. The argument specifies +alternative low-level mutex routines to be used in place +the mutex routines built into SQLite.

    + +

    SQLITE_CONFIG_GETMUTEX
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mutex_methods structure. The +sqlite3_mutex_methods +structure is filled with the currently defined mutex routines. +This option can be used to overload the default mutex allocation +routines with a wrapper used to track mutex usage for performance +profiling or testing, for example.

    + +

    SQLITE_CONFIG_LOOKASIDE
    +
    This option takes two arguments that determine the default +memory allcation lookaside optimization. The first argument is the +size of each lookaside buffer slot and the second is the number of +slots allocated to each database connection.

    + +

    SQLITE_CONFIG_PCACHE
    +
    This option takes a single argument which is a pointer to +an sqlite3_pcache_methods object. This object specifies the interface +to a custom page cache implementation. SQLite makes a copy of the +object and uses it for page cache memory allocations.

    + +

    SQLITE_CONFIG_GETPCACHE
    +
    This option takes a single argument which is a pointer to an +sqlite3_pcache_methods object. SQLite copies of the current +page cache implementation into that object.

    + +

    +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_dbconfig_lookaside.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_dbconfig_lookaside.html --- sqlite3-3.4.2/www/c3ref/c_dbconfig_lookaside.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_dbconfig_lookaside.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,103 @@ + + +Configuration Options + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Configuration Options

    #define SQLITE_DBCONFIG_LOOKASIDE    1001  /* void* int int */
    +

    Important: This interface is experimental and is subject to change without notice.

    +These constants are the available integer configuration options that +can be passed as the second argument to the sqlite3_db_config() interface.

    + +

    New configuration options may be added in future releases of SQLite. +Existing configuration options might be discontinued. Applications +should check the return code from sqlite3_db_config() to make sure that +the call worked. The sqlite3_db_config() interface will return a +non-zero error code if a discontinued or unsupported configuration option +is invoked.

    + +

    +
    SQLITE_DBCONFIG_LOOKASIDE
    +
    This option takes three additional arguments that determine the +lookaside memory allocator configuration for the database connection. +The first argument (the third parameter to sqlite3_db_config() is a +pointer to an 8-byte aligned memory buffer to use for lookaside memory. +The first argument may be NULL in which case SQLite will allocate the +lookaside buffer itself using sqlite3_malloc(). The second argument is the +size of each lookaside buffer slot and the third argument is the number of +slots. The size of the buffer in the first argument must be greater than +or equal to the product of the second and third arguments.

    + +

    +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_dbstatus_lookaside_used.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_dbstatus_lookaside_used.html --- sqlite3-3.4.2/www/c3ref/c_dbstatus_lookaside_used.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_dbstatus_lookaside_used.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +Status Parameters for database connections + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Status Parameters for database connections

    #define SQLITE_DBSTATUS_LOOKASIDE_USED     0
    +

    Important: This interface is experimental and is subject to change without notice.

    +Status verbs for sqlite3_db_status().

    + +

    +
    SQLITE_DBSTATUS_LOOKASIDE_USED
    +
    This parameter returns the number of lookaside memory slots currently +checked out.
    +
    +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_deny.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_deny.html --- sqlite3-3.4.2/www/c3ref/c_deny.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_deny.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Authorizer Return Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Authorizer Return Codes

    #define SQLITE_DENY   1   /* Abort the SQL statement with an error */
    +#define SQLITE_IGNORE 2   /* Don't allow access, but don't generate an error */
    +

    +The authorizer callback function must +return either SQLITE_OK or one of these two constants in order +to signal SQLite whether or not the action is permitted. See the +authorizer documentation for additional +information. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_fcntl_lockstate.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_fcntl_lockstate.html --- sqlite3-3.4.2/www/c3ref/c_fcntl_lockstate.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_fcntl_lockstate.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,94 @@ + + +Standard File Control Opcodes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Standard File Control Opcodes

    #define SQLITE_FCNTL_LOCKSTATE        1
    +#define SQLITE_GET_LOCKPROXYFILE      2
    +#define SQLITE_SET_LOCKPROXYFILE      3
    +#define SQLITE_LAST_ERRNO             4
    +

    +These integer constants are opcodes for the xFileControl method +of the sqlite3_io_methods object and for the sqlite3_file_control() +interface.

    + +

    The SQLITE_FCNTL_LOCKSTATE opcode is used for debugging. This +opcode causes the xFileControl method to write the current state of +the lock (one of SQLITE_LOCK_NONE, SQLITE_LOCK_SHARED, +SQLITE_LOCK_RESERVED, SQLITE_LOCK_PENDING, or SQLITE_LOCK_EXCLUSIVE) +into an integer that the pArg argument points to. This capability +is used during testing and only needs to be supported when SQLITE_TEST +is defined. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/changes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/changes.html --- sqlite3-3.4.2/www/c3ref/changes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/changes.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,130 @@ + + +Count The Number Of Rows Modified + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Count The Number Of Rows Modified

    int sqlite3_changes(sqlite3*);
    +

    +This function returns the number of database rows that were changed +or inserted or deleted by the most recently completed SQL statement +on the database connection specified by the first parameter. +Only changes that are directly specified by the INSERT, UPDATE, +or DELETE statement are counted. Auxiliary changes caused by +triggers are not counted. Use the sqlite3_total_changes() function +to find the total number of changes including changes caused by triggers.

    + +

    Changes to a view that are simulated by an INSTEAD OF trigger +are not counted. Only real table changes are counted.

    + +

    A "row change" is a change to a single row of a single table +caused by an INSERT, DELETE, or UPDATE statement. Rows that +are changed as side effects of REPLACE constraint resolution, +rollback, ABORT processing, DROP TABLE, or by any other +mechanisms do not count as direct row changes.

    + +

    A "trigger context" is a scope of execution that begins and +ends with the script of a trigger. +Most SQL statements are +evaluated outside of any trigger. This is the "top level" +trigger context. If a trigger fires from the top level, a +new trigger context is entered for the duration of that one +trigger. Subtriggers create subcontexts for their duration.

    + +

    Calling sqlite3_exec() or sqlite3_step() recursively does +not create a new trigger context.

    + +

    This function returns the number of direct row changes in the +most recent INSERT, UPDATE, or DELETE statement within the same +trigger context.

    + +

    Thus, when called from the top level, this function returns the +number of changes in the most recent INSERT, UPDATE, or DELETE +that also occurred at the top level. Within the body of a trigger, +the sqlite3_changes() interface can be called to find the number of +changes in the most recently completed INSERT, UPDATE, or DELETE +statement within the body of the same trigger. +However, the number returned does not include changes +caused by subtriggers since those have their own context.

    + +

    See also the sqlite3_total_changes() interface and the +count_changes pragma.

    + +

    Requirements: +H12241 H12243

    + +

    If a separate thread makes changes on the same database connection +while sqlite3_changes() is running then the value returned +is unpredictable and not meaningful. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_index_constraint_eq.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_index_constraint_eq.html --- sqlite3-3.4.2/www/c3ref/c_index_constraint_eq.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_index_constraint_eq.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,155 @@ + + +Virtual Table Indexing Information + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Virtual Table Indexing Information

    struct sqlite3_index_info {
    +  /* Inputs */
    +  int nConstraint;           /* Number of entries in aConstraint */
    +  struct sqlite3_index_constraint {
    +     int iColumn;              /* Column on left-hand side of constraint */
    +     unsigned char op;         /* Constraint operator */
    +     unsigned char usable;     /* True if this constraint is usable */
    +     int iTermOffset;          /* Used internally - xBestIndex should ignore */
    +  } *aConstraint;            /* Table of WHERE clause constraints */
    +  int nOrderBy;              /* Number of terms in the ORDER BY clause */
    +  struct sqlite3_index_orderby {
    +     int iColumn;              /* Column number */
    +     unsigned char desc;       /* True for DESC.  False for ASC. */
    +  } *aOrderBy;               /* The ORDER BY clause */
    +  /* Outputs */
    +  struct sqlite3_index_constraint_usage {
    +    int argvIndex;           /* if >0, constraint is part of argv to xFilter */
    +    unsigned char omit;      /* Do not code a test for this constraint */
    +  } *aConstraintUsage;
    +  int idxNum;                /* Number used to identify the index */
    +  char *idxStr;              /* String, possibly obtained from sqlite3_malloc */
    +  int needToFreeIdxStr;      /* Free idxStr using sqlite3_free() if true */
    +  int orderByConsumed;       /* True if output is already ordered */
    +  double estimatedCost;      /* Estimated cost of using this index */
    +};
    +#define SQLITE_INDEX_CONSTRAINT_EQ    2
    +#define SQLITE_INDEX_CONSTRAINT_GT    4
    +#define SQLITE_INDEX_CONSTRAINT_LE    8
    +#define SQLITE_INDEX_CONSTRAINT_LT    16
    +#define SQLITE_INDEX_CONSTRAINT_GE    32
    +#define SQLITE_INDEX_CONSTRAINT_MATCH 64
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_index_info structure and its substructures is used to +pass information into and receive the reply from the xBestIndex +method of a virtual table module. The fields under **Inputs** are the +inputs to xBestIndex and are read-only. xBestIndex inserts its +results into the **Outputs** fields.

    + +

    The aConstraint[] array records WHERE clause constraints of the form:

    + +

    column OP expr

    + +

    where OP is =, <, <=, >, or >=. The particular operator is +stored in aConstraint[].op. The index of the column is stored in +aConstraint[].iColumn. aConstraint[].usable is TRUE if the +expr on the right-hand side can be evaluated (and thus the constraint +is usable) and false if it cannot.

    + +

    The optimizer automatically inverts terms of the form "expr OP column" +and makes other simplifications to the WHERE clause in an attempt to +get as many WHERE clause terms into the form shown above as possible. +The aConstraint[] array only reports WHERE clause terms in the correct +form that refer to the particular virtual table being queried.

    + +

    Information about the ORDER BY clause is stored in aOrderBy[]. +Each term of aOrderBy records a column of the ORDER BY clause.

    + +

    The xBestIndex method must fill aConstraintUsage[] with information +about what parameters to pass to xFilter. If argvIndex>0 then +the right-hand side of the corresponding aConstraint[] is evaluated +and becomes the argvIndex-th entry in argv. If aConstraintUsage[].omit +is true, then the constraint is assumed to be fully handled by the +virtual table and is not checked again by SQLite.

    + +

    The idxNum and idxPtr values are recorded and passed into the +xFilter method. +sqlite3_free() is used to free idxPtr if and only iff +needToFreeIdxPtr is true.

    + +

    The orderByConsumed means that output from xFilter/xNext will occur in +the correct order to satisfy the ORDER BY clause so that no separate +sorting step is required.

    + +

    The estimatedCost value is an estimate of the cost of doing the +particular lookup. A full scan of a table with N entries should have +a cost of N. A binary search of a table of N entries should have a +cost of approximately log(N). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_iocap_atomic.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_iocap_atomic.html --- sqlite3-3.4.2/www/c3ref/c_iocap_atomic.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_iocap_atomic.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,106 @@ + + +Device Characteristics + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Device Characteristics

    #define SQLITE_IOCAP_ATOMIC          0x00000001
    +#define SQLITE_IOCAP_ATOMIC512       0x00000002
    +#define SQLITE_IOCAP_ATOMIC1K        0x00000004
    +#define SQLITE_IOCAP_ATOMIC2K        0x00000008
    +#define SQLITE_IOCAP_ATOMIC4K        0x00000010
    +#define SQLITE_IOCAP_ATOMIC8K        0x00000020
    +#define SQLITE_IOCAP_ATOMIC16K       0x00000040
    +#define SQLITE_IOCAP_ATOMIC32K       0x00000080
    +#define SQLITE_IOCAP_ATOMIC64K       0x00000100
    +#define SQLITE_IOCAP_SAFE_APPEND     0x00000200
    +#define SQLITE_IOCAP_SEQUENTIAL      0x00000400
    +

    +The xDeviceCapabilities method of the sqlite3_io_methods +object returns an integer which is a vector of the these +bit values expressing I/O characteristics of the mass storage +device that holds the file that the sqlite3_io_methods +refers to.

    + +

    The SQLITE_IOCAP_ATOMIC property means that all writes of +any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +mean that writes of blocks that are nnn bytes in size and +are aligned to an address which is an integer multiple of +nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +that when data is appended to a file, the data is appended +first then the size of the file is extended, never the other +way around. The SQLITE_IOCAP_SEQUENTIAL property means that +information is written to disk in the same order as calls +to xWrite(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_ioerr_access.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_ioerr_access.html --- sqlite3-3.4.2/www/c3ref/c_ioerr_access.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_ioerr_access.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,114 @@ + + +Extended Result Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Extended Result Codes

    #define SQLITE_IOERR_READ              (SQLITE_IOERR | (1<<8))
    +#define SQLITE_IOERR_SHORT_READ        (SQLITE_IOERR | (2<<8))
    +#define SQLITE_IOERR_WRITE             (SQLITE_IOERR | (3<<8))
    +#define SQLITE_IOERR_FSYNC             (SQLITE_IOERR | (4<<8))
    +#define SQLITE_IOERR_DIR_FSYNC         (SQLITE_IOERR | (5<<8))
    +#define SQLITE_IOERR_TRUNCATE          (SQLITE_IOERR | (6<<8))
    +#define SQLITE_IOERR_FSTAT             (SQLITE_IOERR | (7<<8))
    +#define SQLITE_IOERR_UNLOCK            (SQLITE_IOERR | (8<<8))
    +#define SQLITE_IOERR_RDLOCK            (SQLITE_IOERR | (9<<8))
    +#define SQLITE_IOERR_DELETE            (SQLITE_IOERR | (10<<8))
    +#define SQLITE_IOERR_BLOCKED           (SQLITE_IOERR | (11<<8))
    +#define SQLITE_IOERR_NOMEM             (SQLITE_IOERR | (12<<8))
    +#define SQLITE_IOERR_ACCESS            (SQLITE_IOERR | (13<<8))
    +#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8))
    +#define SQLITE_IOERR_LOCK              (SQLITE_IOERR | (15<<8))
    +#define SQLITE_IOERR_CLOSE             (SQLITE_IOERR | (16<<8))
    +#define SQLITE_IOERR_DIR_CLOSE         (SQLITE_IOERR | (17<<8))
    +#define SQLITE_LOCKED_SHAREDCACHE      (SQLITE_LOCKED | (1<<8) )
    +

    +In its default configuration, SQLite API routines return one of 26 integer +result codes. However, experience has shown that many of +these result codes are too coarse-grained. They do not provide as +much information about problems as programmers might like. In an effort to +address this, newer versions of SQLite (version 3.3.8 and later) include +support for additional result codes that provide more detailed information +about errors. The extended result codes are enabled or disabled +on a per database connection basis using the +sqlite3_extended_result_codes() API.

    + +

    Some of the available extended result codes are listed here. +One may expect the number of extended result codes will be expand +over time. Software that uses extended result codes should expect +to see new result codes in future releases of SQLite.

    + +

    The SQLITE_OK result code will never be extended. It will always +be exactly zero. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/clear_bindings.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/clear_bindings.html --- sqlite3-3.4.2/www/c3ref/clear_bindings.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/clear_bindings.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Reset All Bindings On A Prepared Statement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Reset All Bindings On A Prepared Statement

    int sqlite3_clear_bindings(sqlite3_stmt*);
    +

    +Contrary to the intuition of many, sqlite3_reset() does not reset +the bindings on a prepared statement. +Use this routine to reset all host parameters to NULL.

    + +

    Requirements: +H13661 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_limit_attached.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_limit_attached.html --- sqlite3-3.4.2/www/c3ref/c_limit_attached.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_limit_attached.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,130 @@ + + +Run-Time Limit Categories + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Run-Time Limit Categories

    #define SQLITE_LIMIT_LENGTH                    0
    +#define SQLITE_LIMIT_SQL_LENGTH                1
    +#define SQLITE_LIMIT_COLUMN                    2
    +#define SQLITE_LIMIT_EXPR_DEPTH                3
    +#define SQLITE_LIMIT_COMPOUND_SELECT           4
    +#define SQLITE_LIMIT_VDBE_OP                   5
    +#define SQLITE_LIMIT_FUNCTION_ARG              6
    +#define SQLITE_LIMIT_ATTACHED                  7
    +#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH       8
    +#define SQLITE_LIMIT_VARIABLE_NUMBER           9
    +

    +These constants define various performance limits +that can be lowered at run-time using sqlite3_limit(). +The synopsis of the meanings of the various limits is shown below. +Additional information is available at Limits in SQLite.

    + +

    +
    SQLITE_LIMIT_LENGTH
    +
    The maximum size of any string or BLOB or table row.

    + +

    SQLITE_LIMIT_SQL_LENGTH
    +
    The maximum length of an SQL statement.

    + +

    SQLITE_LIMIT_COLUMN
    +
    The maximum number of columns in a table definition or in the +result set of a SELECT or the maximum number of columns in an index +or in an ORDER BY or GROUP BY clause.

    + +

    SQLITE_LIMIT_EXPR_DEPTH
    +
    The maximum depth of the parse tree on any expression.

    + +

    SQLITE_LIMIT_COMPOUND_SELECT
    +
    The maximum number of terms in a compound SELECT statement.

    + +

    SQLITE_LIMIT_VDBE_OP
    +
    The maximum number of instructions in a virtual machine program +used to implement an SQL statement.

    + +

    SQLITE_LIMIT_FUNCTION_ARG
    +
    The maximum number of arguments on a function.

    + +

    SQLITE_LIMIT_ATTACHED
    +
    The maximum number of attached databases.

    + +

    SQLITE_LIMIT_LIKE_PATTERN_LENGTH
    +
    The maximum length of the pattern argument to the LIKE or +GLOB operators.

    + +

    SQLITE_LIMIT_VARIABLE_NUMBER
    +
    The maximum number of variables in an SQL statement that can +be bound.
    +
    +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_lock_exclusive.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_lock_exclusive.html --- sqlite3-3.4.2/www/c3ref/c_lock_exclusive.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_lock_exclusive.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +File Locking Levels + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    File Locking Levels

    #define SQLITE_LOCK_NONE          0
    +#define SQLITE_LOCK_SHARED        1
    +#define SQLITE_LOCK_RESERVED      2
    +#define SQLITE_LOCK_PENDING       3
    +#define SQLITE_LOCK_EXCLUSIVE     4
    +

    +SQLite uses one of these integer values as the second +argument to calls it makes to the xLock() and xUnlock() methods +of an sqlite3_io_methods object. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/close.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/close.html --- sqlite3-3.4.2/www/c3ref/close.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/close.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,106 @@ + + +Closing A Database Connection + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Closing A Database Connection

    int sqlite3_close(sqlite3 *);
    +

    +This routine is the destructor for the sqlite3 object.

    + +

    Applications should finalize all prepared statements +and close all BLOB handles associated with +the sqlite3 object prior to attempting to close the object. +The sqlite3_next_stmt() interface can be used to locate all +prepared statements associated with a database connection if desired. +Typical code might look like this:

    + +

    +sqlite3_stmt *pStmt;
    +while( (pStmt = sqlite3_next_stmt(db, 0))!=0 ){
    +    sqlite3_finalize(pStmt);
    +}
    +

    + +

    If sqlite3_close() is invoked while a transaction is open, +the transaction is automatically rolled back.

    + +

    The C parameter to sqlite3_close(C) must be either a NULL +pointer or an sqlite3 object pointer obtained +from sqlite3_open(), sqlite3_open16(), or +sqlite3_open_v2(), and not previously closed.

    + +

    Requirements: +H12011 H12012 H12013 H12014 H12015 H12019 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_mutex_fast.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_mutex_fast.html --- sqlite3-3.4.2/www/c3ref/c_mutex_fast.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_mutex_fast.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,94 @@ + + +Mutex Types + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Mutex Types

    #define SQLITE_MUTEX_FAST             0
    +#define SQLITE_MUTEX_RECURSIVE        1
    +#define SQLITE_MUTEX_STATIC_MASTER    2
    +#define SQLITE_MUTEX_STATIC_MEM       3  /* sqlite3_malloc() */
    +#define SQLITE_MUTEX_STATIC_MEM2      4  /* NOT USED */
    +#define SQLITE_MUTEX_STATIC_OPEN      4  /* sqlite3BtreeOpen() */
    +#define SQLITE_MUTEX_STATIC_PRNG      5  /* sqlite3_random() */
    +#define SQLITE_MUTEX_STATIC_LRU       6  /* lru page list */
    +#define SQLITE_MUTEX_STATIC_LRU2      7  /* lru page list */
    +

    +The sqlite3_mutex_alloc() interface takes a single argument +which is one of these integer constants.

    + +

    The set of static mutexes may change from one SQLite release to the +next. Applications that override the built-in mutex logic must be +prepared to accommodate additional static mutexes. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/collation_needed.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/collation_needed.html --- sqlite3-3.4.2/www/c3ref/collation_needed.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/collation_needed.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,114 @@ + + +Collation Needed Callbacks + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Collation Needed Callbacks

    int sqlite3_collation_needed(
    +  sqlite3*, 
    +  void*, 
    +  void(*)(void*,sqlite3*,int eTextRep,const char*)
    +);
    +int sqlite3_collation_needed16(
    +  sqlite3*, 
    +  void*,
    +  void(*)(void*,sqlite3*,int eTextRep,const void*)
    +);
    +

    +To avoid having to register all collation sequences before a database +can be used, a single callback function may be registered with the +database connection to be called whenever an undefined collation +sequence is required.

    + +

    If the function is registered using the sqlite3_collation_needed() API, +then it is passed the names of undefined collation sequences as strings +encoded in UTF-8. If sqlite3_collation_needed16() is used, +the names are passed as UTF-16 in machine native byte order. +A call to either function replaces any existing callback.

    + +

    When the callback is invoked, the first argument passed is a copy +of the second argument to sqlite3_collation_needed() or +sqlite3_collation_needed16(). The second argument is the database +connection. The third argument is one of SQLITE_UTF8, SQLITE_UTF16BE, +or SQLITE_UTF16LE, indicating the most desirable form of the collation +sequence function required. The fourth parameter is the name of the +required collation sequence.

    + +

    The callback function should register the desired collation using +sqlite3_create_collation(), sqlite3_create_collation16(), or +sqlite3_create_collation_v2().

    + +

    Requirements: +H16702 H16704 H16706 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/column_blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/column_blob.html --- sqlite3-3.4.2/www/c3ref/column_blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/column_blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,240 @@ + + +Result Values From A Query + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Result Values From A Query

    const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
    +int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
    +int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
    +double sqlite3_column_double(sqlite3_stmt*, int iCol);
    +int sqlite3_column_int(sqlite3_stmt*, int iCol);
    +sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
    +const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
    +const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
    +int sqlite3_column_type(sqlite3_stmt*, int iCol);
    +sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
    +

    +These routines form the "result set query" interface.

    + +

    These routines return information about a single column of the current +result row of a query. In every case the first argument is a pointer +to the prepared statement that is being evaluated (the sqlite3_stmt* +that was returned from sqlite3_prepare_v2() or one of its variants) +and the second argument is the index of the column for which information +should be returned. The leftmost column of the result set has the index 0.

    + +

    If the SQL statement does not currently point to a valid row, or if the +column index is out of range, the result is undefined. +These routines may only be called when the most recent call to +sqlite3_step() has returned SQLITE_ROW and neither +sqlite3_reset() nor sqlite3_finalize() have been called subsequently. +If any of these routines are called after sqlite3_reset() or +sqlite3_finalize() or after sqlite3_step() has returned +something other than SQLITE_ROW, the results are undefined. +If sqlite3_step() or sqlite3_reset() or sqlite3_finalize() +are called from a different thread while any of these routines +are pending, then the results are undefined.

    + +

    The sqlite3_column_type() routine returns the +datatype code for the initial data type +of the result column. The returned value is one of SQLITE_INTEGER, +SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL. The value +returned by sqlite3_column_type() is only meaningful if no type +conversions have occurred as described below. After a type conversion, +the value returned by sqlite3_column_type() is undefined. Future +versions of SQLite may change the behavior of sqlite3_column_type() +following a type conversion.

    + +

    If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +routine returns the number of bytes in that BLOB or string. +If the result is a UTF-16 string, then sqlite3_column_bytes() converts +the string to UTF-8 and then returns the number of bytes. +If the result is a numeric value then sqlite3_column_bytes() uses +sqlite3_snprintf() to convert that value to a UTF-8 string and returns +the number of bytes in that string. +The value returned does not include the zero terminator at the end +of the string. For clarity: the value returned is the number of +bytes in the string, not the number of characters.

    + +

    Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +even empty strings, are always zero terminated. The return +value from sqlite3_column_blob() for a zero-length BLOB is an arbitrary +pointer, possibly even a NULL pointer.

    + +

    The sqlite3_column_bytes16() routine is similar to sqlite3_column_bytes() +but leaves the result in UTF-16 in native byte order instead of UTF-8. +The zero terminator is not included in this count.

    + +

    The object returned by sqlite3_column_value() is an +unprotected sqlite3_value object. An unprotected sqlite3_value object +may only be used with sqlite3_bind_value() and sqlite3_result_value(). +If the unprotected sqlite3_value object returned by +sqlite3_column_value() is used in any other way, including calls +to routines like sqlite3_value_int(), sqlite3_value_text(), +or sqlite3_value_bytes(), then the behavior is undefined.

    + +

    These routines attempt to convert the value where appropriate. For +example, if the internal representation is FLOAT and a text result +is requested, sqlite3_snprintf() is used internally to perform the +conversion automatically. The following table details the conversions +that are applied:

    + +

    + +
    Internal
    Type
    Requested
    Type
    Conversion

    + +

    NULL INTEGER Result is 0 +
    NULL FLOAT Result is 0.0 +
    NULL TEXT Result is NULL pointer +
    NULL BLOB Result is NULL pointer +
    INTEGER FLOAT Convert from integer to float +
    INTEGER TEXT ASCII rendering of the integer +
    INTEGER BLOB Same as INTEGER->TEXT +
    FLOAT INTEGER Convert from float to integer +
    FLOAT TEXT ASCII rendering of the float +
    FLOAT BLOB Same as FLOAT->TEXT +
    TEXT INTEGER Use atoi() +
    TEXT FLOAT Use atof() +
    TEXT BLOB No change +
    BLOB INTEGER Convert to TEXT then use atoi() +
    BLOB FLOAT Convert to TEXT then use atof() +
    BLOB TEXT Add a zero terminator if needed +
    +

    + +

    The table above makes reference to standard C library functions atoi() +and atof(). SQLite does not really use these functions. It has its +own equivalent internal routines. The atoi() and atof() names are +used in the table for brevity and because they are familiar to most +C programmers.

    + +

    Note that when type conversions occur, pointers returned by prior +calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +sqlite3_column_text16() may be invalidated. +Type conversions and pointer invalidations might occur +in the following cases:

    + +

      +
    • The initial content is a BLOB and sqlite3_column_text() or +sqlite3_column_text16() is called. A zero-terminator might +need to be added to the string.
    • +
    • The initial content is UTF-8 text and sqlite3_column_bytes16() or +sqlite3_column_text16() is called. The content must be converted +to UTF-16.
    • +
    • The initial content is UTF-16 text and sqlite3_column_bytes() or +sqlite3_column_text() is called. The content must be converted +to UTF-8.
    • +

    + +

    Conversions between UTF-16be and UTF-16le are always done in place and do +not invalidate a prior pointer, though of course the content of the buffer +that the prior pointer points to will have been modified. Other kinds +of conversion are done in place when it is possible, but sometimes they +are not possible and in those cases prior pointers are invalidated.

    + +

    The safest and easiest to remember policy is to invoke these routines +in one of the following ways:

    + +

      +
    • sqlite3_column_text() followed by sqlite3_column_bytes()
    • +
    • sqlite3_column_blob() followed by sqlite3_column_bytes()
    • +
    • sqlite3_column_text16() followed by sqlite3_column_bytes16()
    • +

    + +

    In other words, you should call sqlite3_column_text(), +sqlite3_column_blob(), or sqlite3_column_text16() first to force the result +into the desired format, then invoke sqlite3_column_bytes() or +sqlite3_column_bytes16() to find the size of the result. Do not mix calls +to sqlite3_column_text() or sqlite3_column_blob() with calls to +sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() +with calls to sqlite3_column_bytes().

    + +

    The pointers returned are valid until a type conversion occurs as +described above, or until sqlite3_step() or sqlite3_reset() or +sqlite3_finalize() is called. The memory space used to hold strings +and BLOBs is freed automatically. Do not pass the pointers returned +sqlite3_column_blob(), sqlite3_column_text(), etc. into +sqlite3_free().

    + +

    If a memory allocation error occurs during the evaluation of any +of these routines, a default value is returned. The default value +is either the integer 0, the floating point number 0.0, or a NULL +pointer. Subsequent calls to sqlite3_errcode() will return +SQLITE_NOMEM.

    + +

    Requirements: +H13803 H13806 H13809 H13812 H13815 H13818 H13821 H13824 +H13827 H13830 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/column_count.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/column_count.html --- sqlite3-3.4.2/www/c3ref/column_count.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/column_count.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Number Of Columns In A Result Set + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Number Of Columns In A Result Set

    int sqlite3_column_count(sqlite3_stmt *pStmt);
    +

    +Return the number of columns in the result set returned by the +prepared statement. This routine returns 0 if pStmt is an SQL +statement that does not return data (for example an UPDATE).

    + +

    Requirements: +H13711 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/column_database_name.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/column_database_name.html --- sqlite3-3.4.2/www/c3ref/column_database_name.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/column_database_name.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,125 @@ + + +Source Of Data In A Query Result + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Source Of Data In A Query Result

    const char *sqlite3_column_database_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
    +const char *sqlite3_column_table_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
    +const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
    +

    +These routines provide a means to determine what column of what +table in which database a result of a SELECT statement comes from. +The name of the database or table or column can be returned as +either a UTF-8 or UTF-16 string. The _database_ routines return +the database name, the _table_ routines return the table name, and +the origin_ routines return the column name. +The returned string is valid until the prepared statement is destroyed +using sqlite3_finalize() or until the same information is requested +again in a different encoding.

    + +

    The names returned are the original un-aliased names of the +database, table, and column.

    + +

    The first argument to the following calls is a prepared statement. +These functions return information about the Nth column returned by +the statement, where N is the second function argument.

    + +

    If the Nth column returned by the statement is an expression or +subquery and is not a column value, then all of these functions return +NULL. These routine might also return NULL if a memory allocation error +occurs. Otherwise, they return the name of the attached database, table +and column that query result column was extracted from.

    + +

    As with all other SQLite APIs, those postfixed with "16" return +UTF-16 encoded strings, the other functions return UTF-8.

    + +

    These APIs are only available if the library was compiled with the +SQLITE_ENABLE_COLUMN_METADATA C-preprocessor symbol defined.

    + +

    If two or more threads call one or more of these routines against the same +prepared statement and column at the same time then the results are +undefined.

    + +

    Requirements: +H13741 H13742 H13743 H13744 H13745 H13746 H13748

    + +

    If two or more threads call one or more +column metadata interfaces +for the same prepared statement and result column +at the same time then the results are undefined. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/column_decltype.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/column_decltype.html --- sqlite3-3.4.2/www/c3ref/column_decltype.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/column_decltype.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,109 @@ + + +Declared Datatype Of A Query Result + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Declared Datatype Of A Query Result

    const char *sqlite3_column_decltype(sqlite3_stmt*,int);
    +const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
    +

    +The first parameter is a prepared statement. +If this statement is a SELECT statement and the Nth column of the +returned result set of that SELECT is a table column (not an +expression or subquery) then the declared type of the table +column is returned. If the Nth column of the result set is an +expression or subquery, then a NULL pointer is returned. +The returned string is always UTF-8 encoded.

    + +

    For example, given the database schema:

    + +

    CREATE TABLE t1(c1 VARIANT);

    + +

    and the following statement to be compiled:

    + +

    SELECT c1 + 1, c1 FROM t1;

    + +

    this routine would return the string "VARIANT" for the second result +column (i==1), and a NULL pointer for the first result column (i==0).

    + +

    SQLite uses dynamic run-time typing. So just because a column +is declared to contain a particular type does not mean that the +data stored in that column is of the declared type. SQLite is +strongly typed, but the typing is dynamic not static. Type +is associated with individual values, not with the containers +used to hold those values.

    + +

    Requirements: +H13761 H13762 H13763 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/column_name.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/column_name.html --- sqlite3-3.4.2/www/c3ref/column_name.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/column_name.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,104 @@ + + +Column Names In A Result Set + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Column Names In A Result Set

    const char *sqlite3_column_name(sqlite3_stmt*, int N);
    +const void *sqlite3_column_name16(sqlite3_stmt*, int N);
    +

    +These routines return the name assigned to a particular column +in the result set of a SELECT statement. The sqlite3_column_name() +interface returns a pointer to a zero-terminated UTF-8 string +and sqlite3_column_name16() returns a pointer to a zero-terminated +UTF-16 string. The first parameter is the prepared statement +that implements the SELECT statement. The second parameter is the +column number. The leftmost column is number 0.

    + +

    The returned string pointer is valid until either the prepared statement +is destroyed by sqlite3_finalize() or until the next call to +sqlite3_column_name() or sqlite3_column_name16() on the same column.

    + +

    If sqlite3_malloc() fails during the processing of either routine +(for example during a conversion from UTF-8 to UTF-16) then a +NULL pointer is returned.

    + +

    The name of a result column is the value of the "AS" clause for +that column, if there is an AS clause. If there is no AS clause +then the name of the column is unspecified and may change from +one release of SQLite to the next.

    + +

    Requirements: +H13721 H13723 H13724 H13725 H13726 H13727 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/commit_hook.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/commit_hook.html --- sqlite3-3.4.2/www/c3ref/commit_hook.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/commit_hook.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,126 @@ + + +Commit And Rollback Notification Callbacks + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Commit And Rollback Notification Callbacks

    void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
    +void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
    +

    +The sqlite3_commit_hook() interface registers a callback +function to be invoked whenever a transaction is committed. +Any callback set by a previous call to sqlite3_commit_hook() +for the same database connection is overridden. +The sqlite3_rollback_hook() interface registers a callback +function to be invoked whenever a transaction is rolled back. +Any callback set by a previous call to sqlite3_commit_hook() +for the same database connection is overridden. +The pArg argument is passed through to the callback. +If the callback on a commit hook function returns non-zero, +then the commit is converted into a rollback.

    + +

    If another function was previously registered, its +pArg value is returned. Otherwise NULL is returned.

    + +

    The callback implementation must not do anything that will modify +the database connection that invoked the callback. Any actions +to modify the database connection must be deferred until after the +completion of the sqlite3_step() call that triggered the commit +or rollback hook in the first place. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    Registering a NULL function disables the callback.

    + +

    When the commit hook callback routine returns zero, the COMMIT +operation is allowed to continue normally. If the commit hook +returns non-zero, then the COMMIT is converted into a ROLLBACK. +The rollback hook is invoked on a rollback that results from a commit +hook returning non-zero, just as it would be with any other rollback.

    + +

    For the purposes of this API, a transaction is said to have been +rolled back if an explicit "ROLLBACK" statement is executed, or +an error or constraint causes an implicit rollback to occur. +The rollback callback is not invoked if a transaction is +automatically rolled back because the database connection is closed. +The rollback callback is not invoked if a transaction is +rolled back because a commit callback returned non-zero. +(TODO: Check on this )

    + +

    See also the sqlite3_update_hook() interface.

    + +

    Requirements: +H12951 H12952 H12953 H12954 H12955 +H12961 H12962 H12963 H12964 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/complete.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/complete.html --- sqlite3-3.4.2/www/c3ref/complete.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/complete.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,112 @@ + + +Determine If An SQL Statement Is Complete + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Determine If An SQL Statement Is Complete

    int sqlite3_complete(const char *sql);
    +int sqlite3_complete16(const void *sql);
    +

    +These routines are useful during command-line input to determine if the +currently entered text seems to form a complete SQL statement or +if additional input is needed before sending the text into +SQLite for parsing. These routines return 1 if the input string +appears to be a complete SQL statement. A statement is judged to be +complete if it ends with a semicolon token and is not a prefix of a +well-formed CREATE TRIGGER statement. Semicolons that are embedded within +string literals or quoted identifier names or comments are not +independent tokens (they are part of the token in which they are +embedded) and thus do not count as a statement terminator. Whitespace +and comments that follow the final semicolon are ignored.

    + +

    These routines return 0 if the statement is incomplete. If a +memory allocation fails, then SQLITE_NOMEM is returned.

    + +

    These routines do not parse the SQL statements thus +will not detect syntactically incorrect SQL.

    + +

    If SQLite has not been initialized using sqlite3_initialize() prior +to invoking sqlite3_complete16() then sqlite3_initialize() is invoked +automatically by sqlite3_complete16(). If that initialization fails, +then the return value from sqlite3_complete16() will be non-zero +regardless of whether or not the input SQL is complete.

    + +

    Requirements: H10511 H10512

    + +

    The input to sqlite3_complete() must be a zero-terminated +UTF-8 string.

    + +

    The input to sqlite3_complete16() must be a zero-terminated +UTF-16 string in native byte order. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/config.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/config.html --- sqlite3-3.4.2/www/c3ref/config.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/config.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,108 @@ + + +Configuring The SQLite Library + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Configuring The SQLite Library

    int sqlite3_config(int, ...);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_config() interface is used to make global configuration +changes to SQLite in order to tune SQLite to the specific needs of +the application. The default configuration is recommended for most +applications and so this routine is usually not necessary. It is +provided to support rare applications with unusual needs.

    + +

    The sqlite3_config() interface is not threadsafe. The application +must insure that no other SQLite interfaces are invoked by other +threads while sqlite3_config() is running. Furthermore, sqlite3_config() +may only be invoked prior to library initialization using +sqlite3_initialize() or after shutdown by sqlite3_shutdown(). +Note, however, that sqlite3_config() can be called as part of the +implementation of an application-defined sqlite3_os_init().

    + +

    The first argument to sqlite3_config() is an integer +configuration option that determines +what property of SQLite is to be configured. Subsequent arguments +vary depending on the configuration option +in the first argument.

    + +

    When a configuration option is set, sqlite3_config() returns SQLITE_OK. +If the option is unknown or SQLite is unable to set the option +then this routine returns a non-zero error code.

    + +

    Requirements: +H14103 H14106 H14120 H14123 H14126 H14129 H14132 H14135 +H14138 H14141 H14144 H14147 H14150 H14153 H14156 H14159 +H14162 H14165 H14168 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/constlist.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/constlist.html --- sqlite3-3.4.2/www/c3ref/constlist.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/constlist.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,82 @@ + + +List Of SQLite Constants + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    +

    Constants:

    +

    Note: Constants marked with "(exp)" +are experimental and constants marked with +"(obs)" are deprecated

    +

    Other lists: +Objects and +Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/context_db_handle.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/context_db_handle.html --- sqlite3-3.4.2/www/c3ref/context_db_handle.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/context_db_handle.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,88 @@ + + +Database Connection For Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Database Connection For Functions

    sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
    +

    +The sqlite3_context_db_handle() interface returns a copy of +the pointer to the database connection (the 1st parameter) +of the sqlite3_create_function() +and sqlite3_create_function16() routines that originally +registered the application defined function.

    + +

    Requirements: +H16253 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/context.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/context.html --- sqlite3-3.4.2/www/c3ref/context.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/context.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,88 @@ + + +SQL Function Context Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    SQL Function Context Object

    typedef struct sqlite3_context sqlite3_context;
    +

    +The context in which an SQL function executes is stored in an +sqlite3_context object. A pointer to an sqlite3_context object +is always first parameter to application-defined SQL functions. +The application-defined SQL function implementation will pass this +pointer through into calls to sqlite3_result(), +sqlite3_aggregate_context(), sqlite3_user_data(), +sqlite3_context_db_handle(), sqlite3_get_auxdata(), +and/or sqlite3_set_auxdata(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_open_create.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_open_create.html --- sqlite3-3.4.2/www/c3ref/c_open_create.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_open_create.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +Flags For File Open Operations + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Flags For File Open Operations

    #define SQLITE_OPEN_READONLY         0x00000001  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_READWRITE        0x00000002  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_CREATE           0x00000004  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_DELETEONCLOSE    0x00000008  /* VFS only */
    +#define SQLITE_OPEN_EXCLUSIVE        0x00000010  /* VFS only */
    +#define SQLITE_OPEN_MAIN_DB          0x00000100  /* VFS only */
    +#define SQLITE_OPEN_TEMP_DB          0x00000200  /* VFS only */
    +#define SQLITE_OPEN_TRANSIENT_DB     0x00000400  /* VFS only */
    +#define SQLITE_OPEN_MAIN_JOURNAL     0x00000800  /* VFS only */
    +#define SQLITE_OPEN_TEMP_JOURNAL     0x00001000  /* VFS only */
    +#define SQLITE_OPEN_SUBJOURNAL       0x00002000  /* VFS only */
    +#define SQLITE_OPEN_MASTER_JOURNAL   0x00004000  /* VFS only */
    +#define SQLITE_OPEN_NOMUTEX          0x00008000  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_FULLMUTEX        0x00010000  /* Ok for sqlite3_open_v2() */
    +

    +These bit values are intended for use in the +3rd parameter to the sqlite3_open_v2() interface and +in the 4th parameter to the xOpen method of the +sqlite3_vfs object. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/create_collation.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/create_collation.html --- sqlite3-3.4.2/www/c3ref/create_collation.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/create_collation.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,147 @@ + + +Define New Collating Sequences + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Define New Collating Sequences

    int sqlite3_create_collation(
    +  sqlite3*, 
    +  const char *zName, 
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*)
    +);
    +int sqlite3_create_collation_v2(
    +  sqlite3*, 
    +  const char *zName, 
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*),
    +  void(*xDestroy)(void*)
    +);
    +int sqlite3_create_collation16(
    +  sqlite3*, 
    +  const void *zName,
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*)
    +);
    +

    +These functions are used to add new collation sequences to the +database connection specified as the first argument.

    + +

    The name of the new collation sequence is specified as a UTF-8 string +for sqlite3_create_collation() and sqlite3_create_collation_v2() +and a UTF-16 string for sqlite3_create_collation16(). In all cases +the name is passed as the second function argument.

    + +

    The third argument may be one of the constants SQLITE_UTF8, +SQLITE_UTF16LE, or SQLITE_UTF16BE, indicating that the user-supplied +routine expects to be passed pointers to strings encoded using UTF-8, +UTF-16 little-endian, or UTF-16 big-endian, respectively. The +third argument might also be SQLITE_UTF16 to indicate that the routine +expects pointers to be UTF-16 strings in the native byte order, or the +argument can be SQLITE_UTF16_ALIGNED if the +the routine expects pointers to 16-bit word aligned strings +of UTF-16 in the native byte order.

    + +

    A pointer to the user supplied routine must be passed as the fifth +argument. If it is NULL, this is the same as deleting the collation +sequence (so that SQLite cannot call it anymore). +Each time the application supplied function is invoked, it is passed +as its first parameter a copy of the void* passed as the fourth argument +to sqlite3_create_collation() or sqlite3_create_collation16().

    + +

    The remaining arguments to the application-supplied routine are two strings, +each represented by a (length, data) pair and encoded in the encoding +that was passed as the third argument when the collation sequence was +registered. The application defined collation routine should +return negative, zero or positive if the first string is less than, +equal to, or greater than the second string. i.e. (STRING1 - STRING2).

    + +

    The sqlite3_create_collation_v2() works like sqlite3_create_collation() +except that it takes an extra argument which is a destructor for +the collation. The destructor is called when the collation is +destroyed and is passed a copy of the fourth parameter void* pointer +of the sqlite3_create_collation_v2(). +Collations are destroyed when they are overridden by later calls to the +collation creation functions or when the database connection is closed +using sqlite3_close().

    + +

    See also: sqlite3_collation_needed() and sqlite3_collation_needed16().

    + +

    Requirements: +H16603 H16604 H16606 H16609 H16612 H16615 H16618 H16621 +H16624 H16627 H16630 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/create_function.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/create_function.html --- sqlite3-3.4.2/www/c3ref/create_function.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/create_function.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,176 @@ + + +Create Or Redefine SQL Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Create Or Redefine SQL Functions

    int sqlite3_create_function(
    +  sqlite3 *db,
    +  const char *zFunctionName,
    +  int nArg,
    +  int eTextRep,
    +  void *pApp,
    +  void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xFinal)(sqlite3_context*)
    +);
    +int sqlite3_create_function16(
    +  sqlite3 *db,
    +  const void *zFunctionName,
    +  int nArg,
    +  int eTextRep,
    +  void *pApp,
    +  void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xFinal)(sqlite3_context*)
    +);
    +

    +These two functions (collectively known as "function creation routines") +are used to add SQL functions or aggregates or to redefine the behavior +of existing SQL functions or aggregates. The only difference between the +two is that the second parameter, the name of the (scalar) function or +aggregate, is encoded in UTF-8 for sqlite3_create_function() and UTF-16 +for sqlite3_create_function16().

    + +

    The first parameter is the database connection to which the SQL +function is to be added. If a single program uses more than one database +connection internally, then SQL functions must be added individually to +each database connection.

    + +

    The second parameter is the name of the SQL function to be created or +redefined. The length of the name is limited to 255 bytes, exclusive of +the zero-terminator. Note that the name length limit is in bytes, not +characters. Any attempt to create a function with a longer name +will result in SQLITE_ERROR being returned.

    + +

    The third parameter (nArg) +is the number of arguments that the SQL function or +aggregate takes. If this parameter is -1, then the SQL function or +aggregate may take any number of arguments between 0 and the limit +set by sqlite3_limit(SQLITE_LIMIT_FUNCTION_ARG). If the third +parameter is less than -1 or greater than 127 then the behavior is +undefined.

    + +

    The fourth parameter, eTextRep, specifies what +text encoding this SQL function prefers for +its parameters. Any SQL function implementation should be able to work +work with UTF-8, UTF-16le, or UTF-16be. But some implementations may be +more efficient with one encoding than another. It is allowed to +invoke sqlite3_create_function() or sqlite3_create_function16() multiple +times with the same function but with different values of eTextRep. +When multiple implementations of the same function are available, SQLite +will pick the one that involves the least amount of data conversion. +If there is only a single implementation which does not care what text +encoding is used, then the fourth argument should be SQLITE_ANY.

    + +

    The fifth parameter is an arbitrary pointer. The implementation of the +function can gain access to this pointer using sqlite3_user_data().

    + +

    The seventh, eighth and ninth parameters, xFunc, xStep and xFinal, are +pointers to C-language functions that implement the SQL function or +aggregate. A scalar SQL function requires an implementation of the xFunc +callback only, NULL pointers should be passed as the xStep and xFinal +parameters. An aggregate SQL function requires an implementation of xStep +and xFinal and NULL should be passed for xFunc. To delete an existing +SQL function or aggregate, pass NULL for all three function callbacks.

    + +

    It is permitted to register multiple implementations of the same +functions with the same name but with either differing numbers of +arguments or differing preferred text encodings. SQLite will use +the implementation most closely matches the way in which the +SQL function is used. A function implementation with a non-negative +nArg parameter is a better match than a function implementation with +a negative nArg. A function where the preferred text encoding +matches the database encoding is a better +match than a function where the encoding is different. +A function where the encoding difference is between UTF16le and UTF16be +is a closer match than a function where the encoding difference is +between UTF8 and UTF16.

    + +

    Built-in functions may be overloaded by new application-defined functions. +The first application-defined function with a given name overrides all +built-in functions in the same database connection with the same name. +Subsequent application-defined functions of the same name only override +prior application-defined functions that are an exact match for the +number of parameters and preferred encoding.

    + +

    An application-defined function is permitted to call other +SQLite interfaces. However, such calls must not +close the database connection nor finalize or reset the prepared +statement in which the function is running.

    + +

    Requirements: +H16103 H16106 H16109 H16112 H16118 H16121 H16127 +H16130 H16133 H16136 H16139 H16142 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/create_module.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/create_module.html --- sqlite3-3.4.2/www/c3ref/create_module.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/create_module.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +Register A Virtual Table Implementation + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Register A Virtual Table Implementation

    int sqlite3_create_module(
    +  sqlite3 *db,               /* SQLite connection to register module with */
    +  const char *zName,         /* Name of the module */
    +  const sqlite3_module *p,   /* Methods for the module */
    +  void *pClientData          /* Client data for xCreate/xConnect */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This routine is used to register a new virtual table module name. +Module names must be registered before +creating a new virtual table using the module, or before using a +preexisting virtual table for the module.

    + +

    The module name is registered on the database connection specified +by the first parameter. The name of the module is given by the +second parameter. The third parameter is a pointer to +the implementation of the virtual table module. The fourth +parameter is an arbitrary client data pointer that is passed through +into the xCreate and xConnect methods of the virtual table module +when a new virtual table is be being created or reinitialized.

    + +

    This interface has exactly the same effect as calling +sqlite3_create_module_v2() with a NULL client data destructor. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/create_module_v2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/create_module_v2.html --- sqlite3-3.4.2/www/c3ref/create_module_v2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/create_module_v2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +Register A Virtual Table Implementation + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Register A Virtual Table Implementation

    int sqlite3_create_module_v2(
    +  sqlite3 *db,               /* SQLite connection to register module with */
    +  const char *zName,         /* Name of the module */
    +  const sqlite3_module *p,   /* Methods for the module */
    +  void *pClientData,         /* Client data for xCreate/xConnect */
    +  void(*xDestroy)(void*)     /* Module destructor function */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This routine is identical to the sqlite3_create_module() method, +except that it has an extra parameter to specify +a destructor function for the client data pointer. SQLite will +invoke the destructor function (if it is not NULL) when SQLite +no longer needs the pClientData pointer. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_static.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_static.html --- sqlite3-3.4.2/www/c3ref/c_static.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_static.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Constants Defining Special Destructor Behavior + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Constants Defining Special Destructor Behavior

    typedef void (*sqlite3_destructor_type)(void*);
    +#define SQLITE_STATIC      ((sqlite3_destructor_type)0)
    +#define SQLITE_TRANSIENT   ((sqlite3_destructor_type)-1)
    +

    +These are special values for the destructor that is passed in as the +final argument to routines like sqlite3_result_blob(). If the destructor +argument is SQLITE_STATIC, it means that the content pointer is constant +and will never change. It does not need to be destroyed. The +SQLITE_TRANSIENT value means that the content will likely change in +the near future and that SQLite should make its own private copy of +the content before returning.

    + +

    The typedef is necessary to work around problems in certain +C++ compilers. See ticket #2191. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_status_malloc_size.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_status_malloc_size.html --- sqlite3-3.4.2/www/c3ref/c_status_malloc_size.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_status_malloc_size.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,160 @@ + + +Status Parameters + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Status Parameters

    #define SQLITE_STATUS_MEMORY_USED          0
    +#define SQLITE_STATUS_PAGECACHE_USED       1
    +#define SQLITE_STATUS_PAGECACHE_OVERFLOW   2
    +#define SQLITE_STATUS_SCRATCH_USED         3
    +#define SQLITE_STATUS_SCRATCH_OVERFLOW     4
    +#define SQLITE_STATUS_MALLOC_SIZE          5
    +#define SQLITE_STATUS_PARSER_STACK         6
    +#define SQLITE_STATUS_PAGECACHE_SIZE       7
    +#define SQLITE_STATUS_SCRATCH_SIZE         8
    +

    Important: This interface is experimental and is subject to change without notice.

    +These integer constants designate various run-time status parameters +that can be returned by sqlite3_status().

    + +

    +
    SQLITE_STATUS_MEMORY_USED
    +
    This parameter is the current amount of memory checked out +using sqlite3_malloc(), either directly or indirectly. The +figure includes calls made to sqlite3_malloc() by the application +and internal memory usage by the SQLite library. Scratch memory +controlled by SQLITE_CONFIG_SCRATCH and auxiliary page-cache +memory controlled by SQLITE_CONFIG_PAGECACHE is not included in +this parameter. The amount returned is the sum of the allocation +sizes as reported by the xSize method in sqlite3_mem_methods.

    + +

    SQLITE_STATUS_MALLOC_SIZE
    +
    This parameter records the largest memory allocation request +handed to sqlite3_malloc() or sqlite3_realloc() (or their +internal equivalents). Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_PAGECACHE_USED
    +
    This parameter returns the number of pages used out of the +pagecache memory allocator that was configured using +SQLITE_CONFIG_PAGECACHE. The +value returned is in pages, not in bytes.

    + +

    SQLITE_STATUS_PAGECACHE_OVERFLOW
    +
    This parameter returns the number of bytes of page cache +allocation which could not be statisfied by the SQLITE_CONFIG_PAGECACHE +buffer and where forced to overflow to sqlite3_malloc(). The +returned value includes allocations that overflowed because they +where too large (they were larger than the "sz" parameter to +SQLITE_CONFIG_PAGECACHE) and allocations that overflowed because +no space was left in the page cache.

    + +

    SQLITE_STATUS_PAGECACHE_SIZE
    +
    This parameter records the largest memory allocation request +handed to pagecache memory allocator. Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_SCRATCH_USED
    +
    This parameter returns the number of allocations used out of the +scratch memory allocator configured using +SQLITE_CONFIG_SCRATCH. The value returned is in allocations, not +in bytes. Since a single thread may only have one scratch allocation +outstanding at time, this parameter also reports the number of threads +using scratch memory at the same time.

    + +

    SQLITE_STATUS_SCRATCH_OVERFLOW
    +
    This parameter returns the number of bytes of scratch memory +allocation which could not be statisfied by the SQLITE_CONFIG_SCRATCH +buffer and where forced to overflow to sqlite3_malloc(). The values +returned include overflows because the requested allocation was too +larger (that is, because the requested allocation was larger than the +"sz" parameter to SQLITE_CONFIG_SCRATCH) and because no scratch buffer +slots were available. +

    + +

    SQLITE_STATUS_SCRATCH_SIZE
    +
    This parameter records the largest memory allocation request +handed to scratch memory allocator. Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_PARSER_STACK
    +
    This parameter records the deepest parser stack. It is only +meaningful if SQLite is compiled with YYTRACKMAXSTACKDEPTH.
    +

    + +

    New status parameters may be added from time to time. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_stmtstatus_fullscan_step.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_stmtstatus_fullscan_step.html --- sqlite3-3.4.2/www/c3ref/c_stmtstatus_fullscan_step.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_stmtstatus_fullscan_step.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,98 @@ + + +Status Parameters for prepared statements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Status Parameters for prepared statements

    #define SQLITE_STMTSTATUS_FULLSCAN_STEP     1
    +#define SQLITE_STMTSTATUS_SORT              2
    +

    Important: This interface is experimental and is subject to change without notice.

    +These preprocessor macros define integer codes that name counter +values associated with the sqlite3_stmt_status() interface. +The meanings of the various counters are as follows:

    + +

    +
    SQLITE_STMTSTATUS_FULLSCAN_STEP
    +
    This is the number of times that SQLite has stepped forward in +a table as part of a full table scan. Large numbers for this counter +may indicate opportunities for performance improvement through +careful use of indices.

    + +

    SQLITE_STMTSTATUS_SORT
    +
    This is the number of sort operations that have occurred. +A non-zero value in this counter may indicate an opportunity to +improvement performance through careful use of indices.

    + +

    +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_sync_dataonly.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_sync_dataonly.html --- sqlite3-3.4.2/www/c3ref/c_sync_dataonly.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_sync_dataonly.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Synchronization Type Flags + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Synchronization Type Flags

    #define SQLITE_SYNC_NORMAL        0x00002
    +#define SQLITE_SYNC_FULL          0x00003
    +#define SQLITE_SYNC_DATAONLY      0x00010
    +

    +When SQLite invokes the xSync() method of an +sqlite3_io_methods object it uses a combination of +these integer values as the second argument.

    + +

    When the SQLITE_SYNC_DATAONLY flag is used, it means that the +sync operation only needs to flush data to mass storage. Inode +information need not be flushed. If the lower four bits of the flag +equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. +If the lower four bits equal SQLITE_SYNC_FULL, that means +to use Mac OS X style fullsync instead of fsync(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_testctrl_always.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_testctrl_always.html --- sqlite3-3.4.2/www/c3ref/c_testctrl_always.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_testctrl_always.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,95 @@ + + +Testing Interface Operation Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Testing Interface Operation Codes

    #define SQLITE_TESTCTRL_PRNG_SAVE                5
    +#define SQLITE_TESTCTRL_PRNG_RESTORE             6
    +#define SQLITE_TESTCTRL_PRNG_RESET               7
    +#define SQLITE_TESTCTRL_BITVEC_TEST              8
    +#define SQLITE_TESTCTRL_FAULT_INSTALL            9
    +#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS     10
    +#define SQLITE_TESTCTRL_PENDING_BYTE            11
    +#define SQLITE_TESTCTRL_ASSERT                  12
    +#define SQLITE_TESTCTRL_ALWAYS                  13
    +

    +These constants are the valid operation code parameters used +as the first argument to sqlite3_test_control().

    + +

    These parameters and their meanings are subject to change +without notice. These values are for testing purposes only. +Applications should not use any of these parameters or the +sqlite3_test_control() interface. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/c_version.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/c_version.html --- sqlite3-3.4.2/www/c3ref/c_version.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/c_version.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,99 @@ + + +Compile-Time Library Version Numbers + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Compile-Time Library Version Numbers

    #define SQLITE_VERSION         "3.6.16"
    +#define SQLITE_VERSION_NUMBER  3006016
    +

    +The SQLITE_VERSION and SQLITE_VERSION_NUMBER #defines in +the sqlite3.h file specify the version of SQLite with which +that header file is associated.

    + +

    The "version" of SQLite is a string of the form "X.Y.Z". +The phrase "alpha" or "beta" might be appended after the Z. +The X value is major version number always 3 in SQLite3. +The X value only changes when backwards compatibility is +broken and we intend to never break backwards compatibility. +The Y value is the minor version number and only changes when +there are major feature enhancements that are forwards compatible +but not backwards compatible. +The Z value is the release number and is incremented with +each release but resets back to 0 whenever Y is incremented.

    + +

    See also: sqlite3_libversion() and sqlite3_libversion_number().

    + +

    Requirements: H10011 H10014 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/data_count.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/data_count.html --- sqlite3-3.4.2/www/c3ref/data_count.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/data_count.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,84 @@ + + +Number of columns in a result set + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Number of columns in a result set

    int sqlite3_data_count(sqlite3_stmt *pStmt);
    +

    +Returns the number of values in the current row of the result set.

    + +

    Requirements: +H13771 H13772 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/db_config.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/db_config.html --- sqlite3-3.4.2/www/c3ref/db_config.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/db_config.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +Configure database connections + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Configure database connections

    int sqlite3_db_config(sqlite3*, int op, ...);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_db_config() interface is used to make configuration +changes to a database connection. The interface is similar to +sqlite3_config() except that the changes apply to a single +database connection (specified in the first argument). The +sqlite3_db_config() interface can only be used immediately after +the database connection is created using sqlite3_open(), +sqlite3_open16(), or sqlite3_open_v2().

    + +

    The second argument to sqlite3_db_config(D,V,...) is the +configuration verb - an integer code that indicates what +aspect of the database connection is being configured. +The only choice for this value is SQLITE_DBCONFIG_LOOKASIDE. +New verbs are likely to be added in future releases of SQLite. +Additional arguments depend on the verb.

    + +

    Requirements: +H14203 H14206 H14209 H14212 H14215 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/db_handle.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/db_handle.html --- sqlite3-3.4.2/www/c3ref/db_handle.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/db_handle.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +Find The Database Handle Of A Prepared Statement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Find The Database Handle Of A Prepared Statement

    sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
    +

    +The sqlite3_db_handle interface returns the database connection handle +to which a prepared statement belongs. The database connection +returned by sqlite3_db_handle is the same database connection that was the first argument +to the sqlite3_prepare_v2() call (or its variants) that was used to +create the statement in the first place.

    + +

    Requirements: H13123 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/db_mutex.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/db_mutex.html --- sqlite3-3.4.2/www/c3ref/db_mutex.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/db_mutex.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +Retrieve the mutex for a database connection + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Retrieve the mutex for a database connection

    sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
    +

    +This interface returns a pointer the sqlite3_mutex object that +serializes access to the database connection given in the argument +when the threading mode is Serialized. +If the threading mode is Single-thread or Multi-thread then this +routine returns a NULL pointer. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/db_status.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/db_status.html --- sqlite3-3.4.2/www/c3ref/db_status.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/db_status.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +Database Connection Status + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Database Connection Status

    int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This interface is used to retrieve runtime status information +about a single database connection. The first argument is the +database connection object to be interrogated. The second argument +is the parameter to interrogate. Currently, the only allowed value +for the second parameter is SQLITE_DBSTATUS_LOOKASIDE_USED. +Additional options will likely appear in future releases of SQLite.

    + +

    The current value of the requested parameter is written into *pCur +and the highest instantaneous value is written into *pHiwtr. If +the resetFlg is true, then the highest instantaneous value is +reset back down to the current value.

    + +

    See also: sqlite3_status() and sqlite3_stmt_status(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/declare_vtab.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/declare_vtab.html --- sqlite3-3.4.2/www/c3ref/declare_vtab.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/declare_vtab.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,84 @@ + + +Declare The Schema Of A Virtual Table + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Declare The Schema Of A Virtual Table

    int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The xCreate and xConnect methods of a +virtual table module call this interface +to declare the format (the names and datatypes of the columns) of +the virtual tables they implement. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/enable_load_extension.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/enable_load_extension.html --- sqlite3-3.4.2/www/c3ref/enable_load_extension.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/enable_load_extension.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Enable Or Disable Extension Loading + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Enable Or Disable Extension Loading

    int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
    +

    +So as not to open security holes in older applications that are +unprepared to deal with extension loading, and as a means of disabling +extension loading while evaluating user-entered SQL, the following API +is provided to turn the sqlite3_load_extension() mechanism on and off.

    + +

    Extension loading is off by default. See ticket #1863.

    + +

    Call the sqlite3_enable_load_extension() routine with onoff==1 +to turn extension loading on and call it with onoff==0 to turn +it back off again.

    + +

    Extension loading is off by default. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/enable_shared_cache.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/enable_shared_cache.html --- sqlite3-3.4.2/www/c3ref/enable_shared_cache.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/enable_shared_cache.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,108 @@ + + +Enable Or Disable Shared Pager Cache + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Enable Or Disable Shared Pager Cache

    int sqlite3_enable_shared_cache(int);
    +

    +This routine enables or disables the sharing of the database cache +and schema data structures between connections +to the same database. Sharing is enabled if the argument is true +and disabled if the argument is false.

    + +

    Cache sharing is enabled and disabled for an entire process. +This is a change as of SQLite version 3.5.0. In prior versions of SQLite, +sharing was enabled or disabled for each thread separately.

    + +

    The cache sharing mode set by this interface effects all subsequent +calls to sqlite3_open(), sqlite3_open_v2(), and sqlite3_open16(). +Existing database connections continue use the sharing mode +that was in effect at the time they were opened.

    + +

    Virtual tables cannot be used with a shared cache. When shared +cache is enabled, the sqlite3_create_module() API used to register +virtual tables will always return an error.

    + +

    This routine returns SQLITE_OK if shared cache was enabled or disabled +successfully. An error code is returned otherwise.

    + +

    Shared cache is disabled by default. But this might change in +future releases of SQLite. Applications that care about shared +cache setting should set it explicitly.

    + +

    See Also: SQLite Shared-Cache Mode

    + +

    Requirements: H10331 H10336 H10337 H10339 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/errcode.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/errcode.html --- sqlite3-3.4.2/www/c3ref/errcode.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/errcode.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,115 @@ + + +Error Codes And Messages + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Error Codes And Messages

    int sqlite3_errcode(sqlite3 *db);
    +int sqlite3_extended_errcode(sqlite3 *db);
    +const char *sqlite3_errmsg(sqlite3*);
    +const void *sqlite3_errmsg16(sqlite3*);
    +

    +The sqlite3_errcode() interface returns the numeric result code or +extended result code for the most recent failed sqlite3_* API call +associated with a database connection. If a prior API call failed +but the most recent API call succeeded, the return value from +sqlite3_errcode() is undefined. The sqlite3_extended_errcode() +interface is the same except that it always returns the +extended result code even when extended result codes are +disabled.

    + +

    The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +text that describes the error, as either UTF-8 or UTF-16 respectively. +Memory to hold the error message string is managed internally. +The application does not need to worry about freeing the result. +However, the error string might be overwritten or deallocated by +subsequent calls to other SQLite interface functions.

    + +

    When the serialized threading mode is in use, it might be the +case that a second error occurs on a separate thread in between +the time of the first error and the call to these interfaces. +When that happens, the second error will be reported since these +interfaces always report the most recent result. To avoid +this, each thread can obtain exclusive use of the database connection D +by invoking sqlite3_mutex_enter(sqlite3_db_mutex(D)) before beginning +to use D and invoking sqlite3_mutex_leave(sqlite3_db_mutex(D)) after +all calls to the interfaces listed here are completed.

    + +

    If an interface fails with SQLITE_MISUSE, that means the interface +was invoked incorrectly by the application. In that case, the +error code and message may or may not be set.

    + +

    Requirements: +H12801 H12802 H12803 H12807 H12808 H12809 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/exec.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/exec.html --- sqlite3-3.4.2/www/c3ref/exec.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/exec.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,126 @@ + + +One-Step Query Execution Interface + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    One-Step Query Execution Interface

    int sqlite3_exec(
    +  sqlite3*,                                  /* An open database */
    +  const char *sql,                           /* SQL to be evaluated */
    +  int (*callback)(void*,int,char**,char**),  /* Callback function */
    +  void *,                                    /* 1st argument to callback */
    +  char **errmsg                              /* Error msg written here */
    +);
    +

    +The sqlite3_exec() interface is a convenient way of running one or more +SQL statements without having to write a lot of C code. The UTF-8 encoded +SQL statements are passed in as the second parameter to sqlite3_exec(). +The statements are evaluated one by one until either an error or +an interrupt is encountered, or until they are all done. The 3rd parameter +is an optional callback that is invoked once for each row of any query +results produced by the SQL statements. The 5th parameter tells where +to write any error messages.

    + +

    The error message passed back through the 5th parameter is held +in memory obtained from sqlite3_malloc(). To avoid a memory leak, +the calling application should call sqlite3_free() on any error +message returned through the 5th parameter when it has finished using +the error message.

    + +

    If the SQL statement in the 2nd parameter is NULL or an empty string +or a string containing only whitespace and comments, then no SQL +statements are evaluated and the database is not changed.

    + +

    The sqlite3_exec() interface is implemented in terms of +sqlite3_prepare_v2(), sqlite3_step(), and sqlite3_finalize(). +The sqlite3_exec() routine does nothing to the database that cannot be done +by sqlite3_prepare_v2(), sqlite3_step(), and sqlite3_finalize().

    + +

    The first parameter to sqlite3_exec() must be an valid and open +database connection.

    + +

    The database connection must not be closed while +sqlite3_exec() is running.

    + +

    The calling function should use sqlite3_free() to free +the memory that *errmsg is left pointing at once the error +message is no longer needed.

    + +

    The SQL statement text in the 2nd parameter to sqlite3_exec() +must remain unchanged while sqlite3_exec() is running.

    + +

    Requirements: +H12101 H12102 H12104 H12105 H12107 H12110 H12113 H12116 +H12119 H12122 H12125 H12131 H12134 H12137 H12138 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/experimental.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/experimental.html --- sqlite3-3.4.2/www/c3ref/experimental.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/experimental.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,117 @@ + + +Experimental Interfaces + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    + +

    Experimental And Deprecated Interfaces

    + +

    SQLite interfaces can be subdivided into three categories:

    + +
      +
    1. Stable
    2. +
    3. Experimental
    4. +
    5. Deprecated
    6. +
    + +

    Stable interfaces will be maintained indefinitely in a backwards +compatible way. An application that uses only stable interfaces +should always be able to relink against a newer version of SQLite +without any changes.

    + +

    Experiemental interfaces are subject to change. +Applications that use experiemental interfaces +may need to be modified when upgrading to a newer SQLite release. +When new interfaces are added to SQLite, they generally begin +as experimental interfaces. After an interface has been in use for +a while and the developers are confident that the design of the interface +is sound and worthy of long-term support, the interface is marked +as stable.

    + +

    Deprecated interfaces have been superceded by better methods of +accomplishing the same thing and should be avoided in new applications. +Deprecated interfaces continue to be supported for the sake of +backwards compatibility. At some point in the future, it is possible +that deprecated interfaces may be removed.

    + +

    Key points:

    + +
      +
    • Experimental interfaces are subject to change and/or removal +at any time.
    • + +
    • Deprecated interfaces should not be used in new code and might +be removed in some future release.
    • +
    + +
    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/extended_result_codes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/extended_result_codes.html --- sqlite3-3.4.2/www/c3ref/extended_result_codes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/extended_result_codes.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Enable Or Disable Extended Result Codes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Enable Or Disable Extended Result Codes

    int sqlite3_extended_result_codes(sqlite3*, int onoff);
    +

    +The sqlite3_extended_result_codes() routine enables or disables the +extended result codes feature of SQLite. The extended result +codes are disabled by default for historical compatibility considerations.

    + +

    Requirements: +H12201 H12202 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/file_control.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/file_control.html --- sqlite3-3.4.2/www/c3ref/file_control.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/file_control.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +Low-Level Control Of Database Files + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Low-Level Control Of Database Files

    int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
    +

    + The sqlite3_file_control() interface makes a direct call to the +xFileControl method for the sqlite3_io_methods object associated +with a particular database identified by the second argument. The +name of the database is the name assigned to the database by the +ATTACH SQL command that opened the +database. To control the main database file, use the name "main" +or a NULL pointer. The third and fourth parameters to this routine +are passed directly through to the second and third parameters of +the xFileControl method. The return value of the xFileControl +method becomes the return value of this routine.

    + +

    If the second parameter (zDbName) does not match the name of any +open database file, then SQLITE_ERROR is returned. This error +code is not remembered and will not be recalled by sqlite3_errcode() +or sqlite3_errmsg(). The underlying xFileControl method might +also return SQLITE_ERROR. There is no way to distinguish between +an incorrect zDbName and an SQLITE_ERROR return from the underlying +xFileControl method.

    + +

    See also: SQLITE_FCNTL_LOCKSTATE +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/file.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/file.html --- sqlite3-3.4.2/www/c3ref/file.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/file.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +OS Interface Open File Handle + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    OS Interface Open File Handle

    typedef struct sqlite3_file sqlite3_file;
    +struct sqlite3_file {
    +  const struct sqlite3_io_methods *pMethods;  /* Methods for an open file */
    +};
    +

    +An sqlite3_file object represents an open file in the OS +interface layer. Individual OS interface implementations will +want to subclass this object by appending additional fields +for their own use. The pMethods entry is a pointer to an +sqlite3_io_methods object that defines methods for performing +I/O operations on the open file. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/finalize.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/finalize.html --- sqlite3-3.4.2/www/c3ref/finalize.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/finalize.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,95 @@ + + +Destroy A Prepared Statement Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Destroy A Prepared Statement Object

    int sqlite3_finalize(sqlite3_stmt *pStmt);
    +

    +The sqlite3_finalize() function is called to delete a prepared statement. +If the statement was executed successfully or not executed at all, then +SQLITE_OK is returned. If execution of the statement failed then an +error code or extended error code is returned.

    + +

    This routine can be called at any point during the execution of the +prepared statement. If the virtual machine has not +completed execution when this routine is called, that is like +encountering an error or an interrupt. +Incomplete updates may be rolled back and transactions canceled, +depending on the circumstances, and the +error code returned will be SQLITE_ABORT.

    + +

    Requirements: +H11302 H11304 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/free.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/free.html --- sqlite3-3.4.2/www/c3ref/free.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/free.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,159 @@ + + +Memory Allocation Subsystem + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Memory Allocation Subsystem

    void *sqlite3_malloc(int);
    +void *sqlite3_realloc(void*, int);
    +void sqlite3_free(void*);
    +

    +The SQLite core uses these three routines for all of its own +internal memory allocation needs. "Core" in the previous sentence +does not include operating-system specific VFS implementation. The +Windows VFS uses native malloc() and free() for some operations.

    + +

    The sqlite3_malloc() routine returns a pointer to a block +of memory at least N bytes in length, where N is the parameter. +If sqlite3_malloc() is unable to obtain sufficient free +memory, it returns a NULL pointer. If the parameter N to +sqlite3_malloc() is zero or negative then sqlite3_malloc() returns +a NULL pointer.

    + +

    Calling sqlite3_free() with a pointer previously returned +by sqlite3_malloc() or sqlite3_realloc() releases that memory so +that it might be reused. The sqlite3_free() routine is +a no-op if is called with a NULL pointer. Passing a NULL pointer +to sqlite3_free() is harmless. After being freed, memory +should neither be read nor written. Even reading previously freed +memory might result in a segmentation fault or other severe error. +Memory corruption, a segmentation fault, or other severe error +might result if sqlite3_free() is called with a non-NULL pointer that +was not obtained from sqlite3_malloc() or sqlite3_realloc().

    + +

    The sqlite3_realloc() interface attempts to resize a +prior memory allocation to be at least N bytes, where N is the +second parameter. The memory allocation to be resized is the first +parameter. If the first parameter to sqlite3_realloc() +is a NULL pointer then its behavior is identical to calling +sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc(). +If the second parameter to sqlite3_realloc() is zero or +negative then the behavior is exactly the same as calling +sqlite3_free(P) where P is the first parameter to sqlite3_realloc(). +sqlite3_realloc() returns a pointer to a memory allocation +of at least N bytes in size or NULL if sufficient memory is unavailable. +If M is the size of the prior allocation, then min(N,M) bytes +of the prior allocation are copied into the beginning of buffer returned +by sqlite3_realloc() and the prior allocation is freed. +If sqlite3_realloc() returns NULL, then the prior allocation +is not freed.

    + +

    The memory returned by sqlite3_malloc() and sqlite3_realloc() +is always aligned to at least an 8 byte boundary.

    + +

    The default implementation of the memory allocation subsystem uses +the malloc(), realloc() and free() provided by the standard C library. + However, if SQLite is compiled with the +SQLITE_MEMORY_SIZE=NNN C preprocessor macro (where NNN +is an integer), then SQLite create a static array of at least +NNN bytes in size and uses that array for all of its dynamic +memory allocation needs. Additional memory allocator options +may be added in future releases.

    + +

    In SQLite version 3.5.0 and 3.5.1, it was possible to define +the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in +implementation of these routines to be omitted. That capability +is no longer provided. Only built-in memory allocators can be used.

    + +

    The Windows OS interface layer calls +the system malloc() and free() directly when converting +filenames between the UTF-8 encoding used by SQLite +and whatever filename encoding is used by the particular Windows +installation. Memory allocation errors are detected, but +they are reported back as SQLITE_CANTOPEN or +SQLITE_IOERR rather than SQLITE_NOMEM.

    + +

    Requirements: +H17303 H17304 H17305 H17306 H17310 H17312 H17315 H17318 +H17321 H17322 H17323

    + +

    The pointer arguments to sqlite3_free() and sqlite3_realloc() +must be either NULL or else pointers obtained from a prior +invocation of sqlite3_malloc() or sqlite3_realloc() that have +not yet been released.

    + +

    The application must not read or write any part of +a block of memory after it has been released using +sqlite3_free() or sqlite3_realloc(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/free_table.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/free_table.html --- sqlite3-3.4.2/www/c3ref/free_table.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/free_table.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,155 @@ + + +Convenience Routines For Running Queries + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Convenience Routines For Running Queries

    int sqlite3_get_table(
    +  sqlite3 *db,          /* An open database */
    +  const char *zSql,     /* SQL to be evaluated */
    +  char ***pazResult,    /* Results of the query */
    +  int *pnRow,           /* Number of result rows written here */
    +  int *pnColumn,        /* Number of result columns written here */
    +  char **pzErrmsg       /* Error msg written here */
    +);
    +void sqlite3_free_table(char **result);
    +

    +Definition: A result table is memory data structure created by the +sqlite3_get_table() interface. A result table records the +complete query results from one or more queries.

    + +

    The table conceptually has a number of rows and columns. But +these numbers are not part of the result table itself. These +numbers are obtained separately. Let N be the number of rows +and M be the number of columns.

    + +

    A result table is an array of pointers to zero-terminated UTF-8 strings. +There are (N+1)*M elements in the array. The first M pointers point +to zero-terminated strings that contain the names of the columns. +The remaining entries all point to query results. NULL values result +in NULL pointers. All other values are in their UTF-8 zero-terminated +string representation as returned by sqlite3_column_text().

    + +

    A result table might consist of one or more memory allocations. +It is not safe to pass a result table directly to sqlite3_free(). +A result table should be deallocated using sqlite3_free_table().

    + +

    As an example of the result table format, suppose a query result +is as follows:

    + +

    +Name        | Age
    +-----------------------
    +Alice       | 43
    +Bob         | 28
    +Cindy       | 21
    +

    + +

    There are two column (M==2) and three rows (N==3). Thus the +result table has 8 entries. Suppose the result table is stored +in an array names azResult. Then azResult holds this content:

    + +

    +azResult[0] = "Name";
    +azResult[1] = "Age";
    +azResult[2] = "Alice";
    +azResult[3] = "43";
    +azResult[4] = "Bob";
    +azResult[5] = "28";
    +azResult[6] = "Cindy";
    +azResult[7] = "21";
    +

    + +

    The sqlite3_get_table() function evaluates one or more +semicolon-separated SQL statements in the zero-terminated UTF-8 +string of its 2nd parameter. It returns a result table to the +pointer given in its 3rd parameter.

    + +

    After the calling function has finished using the result, it should +pass the pointer to the result table to sqlite3_free_table() in order to +release the memory that was malloced. Because of the way the +sqlite3_malloc() happens within sqlite3_get_table(), the calling +function must not try to call sqlite3_free() directly. Only +sqlite3_free_table() is able to release the memory properly and safely.

    + +

    The sqlite3_get_table() interface is implemented as a wrapper around +sqlite3_exec(). The sqlite3_get_table() routine does not have access +to any internal data structures of SQLite. It uses only the public +interface defined here. As a consequence, errors that occur in the +wrapper layer outside of the internal sqlite3_exec() call are not +reflected in subsequent calls to sqlite3_errcode() or sqlite3_errmsg().

    + +

    Requirements: +H12371 H12373 H12374 H12376 H12379 H12382 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/funclist.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/funclist.html --- sqlite3-3.4.2/www/c3ref/funclist.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/funclist.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,82 @@ + + +List Of SQLite Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    +

    Functions:

    +

    Note: Functions marked with "(exp)" +are experimental and functions marked with +(obs) are deprecated.

    +

    Other lists: +Constants and +Objects.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/get_autocommit.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/get_autocommit.html --- sqlite3-3.4.2/www/c3ref/get_autocommit.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/get_autocommit.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,98 @@ + + +Test For Auto-Commit Mode + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Test For Auto-Commit Mode

    int sqlite3_get_autocommit(sqlite3*);
    +

    +The sqlite3_get_autocommit() interface returns non-zero or +zero if the given database connection is or is not in autocommit mode, +respectively. Autocommit mode is on by default. +Autocommit mode is disabled by a BEGIN statement. +Autocommit mode is re-enabled by a COMMIT or ROLLBACK.

    + +

    If certain kinds of errors occur on a statement within a multi-statement +transaction (errors including SQLITE_FULL, SQLITE_IOERR, +SQLITE_NOMEM, SQLITE_BUSY, and SQLITE_INTERRUPT) then the +transaction might be rolled back automatically. The only way to +find out whether SQLite automatically rolled back the transaction after +an error is to use this function.

    + +

    If another thread changes the autocommit status of the database +connection while this routine is running, then the return value +is undefined.

    + +

    Requirements: H12931 H12932 H12933 H12934 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/get_auxdata.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/get_auxdata.html --- sqlite3-3.4.2/www/c3ref/get_auxdata.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/get_auxdata.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,122 @@ + + +Function Auxiliary Data + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Function Auxiliary Data

    void *sqlite3_get_auxdata(sqlite3_context*, int N);
    +void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
    +

    +The following two functions may be used by scalar SQL functions to +associate metadata with argument values. If the same value is passed to +multiple invocations of the same SQL function during query execution, under +some circumstances the associated metadata may be preserved. This may +be used, for example, to add a regular-expression matching scalar +function. The compiled version of the regular expression is stored as +metadata associated with the SQL value passed as the regular expression +pattern. The compiled regular expression can be reused on multiple +invocations of the same function so that the original pattern string +does not need to be recompiled on each invocation.

    + +

    The sqlite3_get_auxdata() interface returns a pointer to the metadata +associated by the sqlite3_set_auxdata() function with the Nth argument +value to the application-defined function. If no metadata has been ever +been set for the Nth argument of the function, or if the corresponding +function parameter has changed since the meta-data was set, +then sqlite3_get_auxdata() returns a NULL pointer.

    + +

    The sqlite3_set_auxdata() interface saves the metadata +pointed to by its 3rd parameter as the metadata for the N-th +argument of the application-defined function. Subsequent +calls to sqlite3_get_auxdata() might return this data, if it has +not been destroyed. +If it is not NULL, SQLite will invoke the destructor +function given by the 4th parameter to sqlite3_set_auxdata() on +the metadata when the corresponding function parameter changes +or when the SQL statement completes, whichever comes first.

    + +

    SQLite is free to call the destructor and drop metadata on any +parameter of any function at any time. The only guarantee is that +the destructor will be called before the metadata is dropped.

    + +

    In practice, metadata is preserved between function calls for +expressions that are constant at compile time. This includes literal +values and SQL variables.

    + +

    These routines must be called from the same thread in which +the SQL function is running.

    + +

    Requirements: +H16272 H16274 H16276 H16277 H16278 H16279 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/initialize.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/initialize.html --- sqlite3-3.4.2/www/c3ref/initialize.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/initialize.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,144 @@ + + +Initialize The SQLite Library + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Initialize The SQLite Library

    int sqlite3_initialize(void);
    +int sqlite3_shutdown(void);
    +int sqlite3_os_init(void);
    +int sqlite3_os_end(void);
    +

    +The sqlite3_initialize() routine initializes the +SQLite library. The sqlite3_shutdown() routine +deallocates any resources that were allocated by sqlite3_initialize().

    + +

    A call to sqlite3_initialize() is an "effective" call if it is +the first time sqlite3_initialize() is invoked during the lifetime of +the process, or if it is the first time sqlite3_initialize() is invoked +following a call to sqlite3_shutdown(). Only an effective call +of sqlite3_initialize() does any initialization. All other calls +are harmless no-ops.

    + +

    A call to sqlite3_shutdown() is an "effective" call if it is the first +call to sqlite3_shutdown() since the last sqlite3_initialize(). Only +an effective call to sqlite3_shutdown() does any deinitialization. +All other calls to sqlite3_shutdown() are harmless no-ops.

    + +

    Among other things, sqlite3_initialize() shall invoke +sqlite3_os_init(). Similarly, sqlite3_shutdown() +shall invoke sqlite3_os_end().

    + +

    The sqlite3_initialize() routine returns SQLITE_OK on success. +If for some reason, sqlite3_initialize() is unable to initialize +the library (perhaps it is unable to allocate a needed resource such +as a mutex) it returns an error code other than SQLITE_OK.

    + +

    The sqlite3_initialize() routine is called internally by many other +SQLite interfaces so that an application usually does not need to +invoke sqlite3_initialize() directly. For example, sqlite3_open() +calls sqlite3_initialize() so the SQLite library will be automatically +initialized when sqlite3_open() is called if it has not be initialized +already. However, if SQLite is compiled with the SQLITE_OMIT_AUTOINIT +compile-time option, then the automatic calls to sqlite3_initialize() +are omitted and the application must call sqlite3_initialize() directly +prior to using any other SQLite interface. For maximum portability, +it is recommended that applications always invoke sqlite3_initialize() +directly prior to using any other SQLite interface. Future releases +of SQLite may require this. In other words, the behavior exhibited +when SQLite is compiled with SQLITE_OMIT_AUTOINIT might become the +default behavior in some future release of SQLite.

    + +

    The sqlite3_os_init() routine does operating-system specific +initialization of the SQLite library. The sqlite3_os_end() +routine undoes the effect of sqlite3_os_init(). Typical tasks +performed by these routines include allocation or deallocation +of static resources, initialization of global variables, +setting up a default sqlite3_vfs module, or setting up +a default configuration using sqlite3_config().

    + +

    The application should never invoke either sqlite3_os_init() +or sqlite3_os_end() directly. The application should only invoke +sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() +interface is called automatically by sqlite3_initialize() and +sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate +implementations for sqlite3_os_init() and sqlite3_os_end() +are built into SQLite when it is compiled for unix, windows, or os/2. +When built for other platforms (using the SQLITE_OS_OTHER=1 compile-time +option) the application must supply a suitable implementation for +sqlite3_os_init() and sqlite3_os_end(). An application-supplied +implementation of sqlite3_os_init() or sqlite3_os_end() +must return SQLITE_OK on success and some other error code upon +failure. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/int64.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/int64.html --- sqlite3-3.4.2/www/c3ref/int64.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/int64.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,99 @@ + + +64-Bit Integer Types + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    64-Bit Integer Types

    #ifdef SQLITE_INT64_TYPE
    +  typedef SQLITE_INT64_TYPE sqlite_int64;
    +  typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
    +#elif defined(_MSC_VER) || defined(__BORLANDC__)
    +  typedef __int64 sqlite_int64;
    +  typedef unsigned __int64 sqlite_uint64;
    +#else
    +  typedef long long int sqlite_int64;
    +  typedef unsigned long long int sqlite_uint64;
    +#endif
    +typedef sqlite_int64 sqlite3_int64;
    +typedef sqlite_uint64 sqlite3_uint64;
    +

    +Because there is no cross-platform way to specify 64-bit integer types +SQLite includes typedefs for 64-bit signed and unsigned integers.

    + +

    The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. +The sqlite_int64 and sqlite_uint64 types are supported for backwards +compatibility only.

    + +

    Requirements: H10201 H10202 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/interrupt.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/interrupt.html --- sqlite3-3.4.2/www/c3ref/interrupt.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/interrupt.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,116 @@ + + +Interrupt A Long-Running Query + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Interrupt A Long-Running Query

    void sqlite3_interrupt(sqlite3*);
    +

    +This function causes any pending database operation to abort and +return at its earliest opportunity. This routine is typically +called in response to a user action such as pressing "Cancel" +or Ctrl-C where the user wants a long query operation to halt +immediately.

    + +

    It is safe to call this routine from a thread different from the +thread that is currently running the database operation. But it +is not safe to call this routine with a database connection that +is closed or might close before sqlite3_interrupt() returns.

    + +

    If an SQL operation is very nearly finished at the time when +sqlite3_interrupt() is called, then it might not have an opportunity +to be interrupted and might continue to completion.

    + +

    An SQL operation that is interrupted will return SQLITE_INTERRUPT. +If the interrupted SQL operation is an INSERT, UPDATE, or DELETE +that is inside an explicit transaction, then the entire transaction +will be rolled back automatically.

    + +

    The sqlite3_interrupt(D) call is in effect until all currently running +SQL statements on database connection D complete. Any new SQL statements +that are started after the sqlite3_interrupt() call and before the +running statements reaches zero are interrupted as if they had been +running prior to the sqlite3_interrupt() call. New SQL statements +that are started after the running statement count reaches zero are +not effected by the sqlite3_interrupt(). +A call to sqlite3_interrupt(D) that occurs when there are no running +SQL statements is a no-op and has no effect on SQL statements +that are started after the sqlite3_interrupt() call returns.

    + +

    Requirements: +H12271 H12272

    + +

    If the database connection closes while sqlite3_interrupt() +is running then bad things will likely happen. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/intro.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/intro.html --- sqlite3-3.4.2/www/c3ref/intro.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/intro.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,122 @@ + + +Introduction + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    These pages defined the C-language interface to SQLite.

    + +

    This is not a tutorial. These +pages are designed to be precise, not easy to read. +For a tutorial introduction see +SQLite In 3 Minutes Or Less and/or +the Introduction To The SQLite C/C++ Interface. +

    + +

    This version of the C-language interface reference is +broken down into small pages for easy viewing. The +same content is also available as a +single large HTML file +for those who prefer that format.

    + +

    The content on these pages is extracted from comments +in the source code.

    + +

    The interface is broken down into three catagories:

    + +
      +
    1. List Of Objects. + All abstract objects and datatypes used by the + SQLite library. There are a handful of objects, but + only three which most users need to be aware of: + A database connection object sqlite3, + prepared statement object sqlite3_stmt, and the 64-bit integer + type sqlite3_int64.

    2. + +
    3. List Of Constants. + Numeric constants just by SQLite and represented by + #defines in the sqlite3.h header file. These constants + are things such as numeric return parameters from + various interfaces (ex: SQLITE_OK or flags passed + into functions to control behavior + (ex: SQLITE_OPEN_READONLY).

    4. + +
    5. List Of Functions. + Functions and/or methods operating on the + objects and using and/or + returning constants. There + are many function, but most applications only use a handful. +

    6. +
    + +
    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/io_methods.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/io_methods.html --- sqlite3-3.4.2/www/c3ref/io_methods.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/io_methods.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,179 @@ + + +OS Interface File Virtual Methods Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    OS Interface File Virtual Methods Object

    typedef struct sqlite3_io_methods sqlite3_io_methods;
    +struct sqlite3_io_methods {
    +  int iVersion;
    +  int (*xClose)(sqlite3_file*);
    +  int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
    +  int (*xSync)(sqlite3_file*, int flags);
    +  int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
    +  int (*xLock)(sqlite3_file*, int);
    +  int (*xUnlock)(sqlite3_file*, int);
    +  int (*xCheckReservedLock)(sqlite3_file*, int *pResOut);
    +  int (*xFileControl)(sqlite3_file*, int op, void *pArg);
    +  int (*xSectorSize)(sqlite3_file*);
    +  int (*xDeviceCharacteristics)(sqlite3_file*);
    +  /* Additional methods may be added in future releases */
    +};
    +

    +Every file opened by the sqlite3_vfs xOpen method populates an +sqlite3_file object (or, more commonly, a subclass of the +sqlite3_file object) with a pointer to an instance of this object. +This object defines the methods used to perform various operations +against the open file represented by the sqlite3_file object.

    + +

    If the xOpen method sets the sqlite3_file.pMethods element +to a non-NULL pointer, then the sqlite3_io_methods.xClose method +may be invoked even if the xOpen reported that it failed. The +only way to prevent a call to xClose following a failed xOpen +is for the xOpen to set the sqlite3_file.pMethods element to NULL.

    + +

    The flags argument to xSync may be one of SQLITE_SYNC_NORMAL or +SQLITE_SYNC_FULL. The first choice is the normal fsync(). +The second choice is a Mac OS X style fullsync. The SQLITE_SYNC_DATAONLY +flag may be ORed in to indicate that only the data of the file +and not its inode needs to be synced.

    + +

    The integer values to xLock() and xUnlock() are one of +

    +xLock() increases the lock. xUnlock() decreases the lock. +The xCheckReservedLock() method checks whether any database connection, +either in this process or in some other process, is holding a RESERVED, +PENDING, or EXCLUSIVE lock on the file. It returns true +if such a lock exists and false otherwise.

    + +

    The xFileControl() method is a generic interface that allows custom +VFS implementations to directly control an open file using the +sqlite3_file_control() interface. The second "op" argument is an +integer opcode. The third argument is a generic pointer intended to +point to a structure that may contain arguments or space in which to +write return values. Potential uses for xFileControl() might be +functions to enable blocking locks with timeouts, to change the +locking strategy (for example to use dot-file locks), to inquire +about the status of a lock, or to break stale locks. The SQLite +core reserves all opcodes less than 100 for its own use. +A list of opcodes less than 100 is available. +Applications that define a custom xFileControl method should use opcodes +greater than 100 to avoid conflicts.

    + +

    The xSectorSize() method returns the sector size of the +device that underlies the file. The sector size is the +minimum write that can be performed without disturbing +other bytes in the file. The xDeviceCharacteristics() +method returns a bit vector describing behaviors of the +underlying device:

    + +

    + +

    The SQLITE_IOCAP_ATOMIC property means that all writes of +any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +mean that writes of blocks that are nnn bytes in size and +are aligned to an address which is an integer multiple of +nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +that when data is appended to a file, the data is appended +first then the size of the file is extended, never the other +way around. The SQLITE_IOCAP_SEQUENTIAL property means that +information is written to disk in the same order as calls +to xWrite().

    + +

    If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill +in the unread portions of the buffer with zeros. A VFS that +fails to zero-fill short reads might seem to work. However, +failure to zero-fill short reads will eventually lead to +database corruption. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/last_insert_rowid.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/last_insert_rowid.html --- sqlite3-3.4.2/www/c3ref/last_insert_rowid.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/last_insert_rowid.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,119 @@ + + +Last Insert Rowid + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Last Insert Rowid

    sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
    +

    +Each entry in an SQLite table has a unique 64-bit signed +integer key called the "rowid". The rowid is always available +as an undeclared column named ROWID, OID, or _ROWID_ as long as those +names are not also used by explicitly declared columns. If +the table has a column of type INTEGER PRIMARY KEY then that column +is another alias for the rowid.

    + +

    This routine returns the rowid of the most recent +successful INSERT into the database from the database connection +in the first argument. If no successful INSERTs +have ever occurred on that database connection, zero is returned.

    + +

    If an INSERT occurs within a trigger, then the rowid of the inserted +row is returned by this routine as long as the trigger is running. +But once the trigger terminates, the value returned by this routine +reverts to the last value inserted before the trigger fired.

    + +

    An INSERT that fails due to a constraint violation is not a +successful INSERT and does not change the value returned by this +routine. Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, +and INSERT OR ABORT make no changes to the return value of this +routine when their insertion fails. When INSERT OR REPLACE +encounters a constraint violation, it does not fail. The +INSERT continues to completion after deleting rows that caused +the constraint problem so INSERT OR REPLACE will always change +the return value of this interface.

    + +

    For the purposes of this routine, an INSERT is considered to +be successful even if it is subsequently rolled back.

    + +

    Requirements: +H12221 H12223

    + +

    If a separate thread performs a new INSERT on the same +database connection while the sqlite3_last_insert_rowid() +function is running and thus changes the last insert rowid, +then the value returned by sqlite3_last_insert_rowid() is +unpredictable and might not equal either the old or the new +last insert rowid. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/libversion.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/libversion.html --- sqlite3-3.4.2/www/c3ref/libversion.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/libversion.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,95 @@ + + +Run-Time Library Version Numbers + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Run-Time Library Version Numbers

    SQLITE_EXTERN const char sqlite3_version[];
    +const char *sqlite3_libversion(void);
    +int sqlite3_libversion_number(void);
    +

    +These features provide the same information as the SQLITE_VERSION +and SQLITE_VERSION_NUMBER #defines in the header, but are associated +with the library instead of the header file. Cautious programmers might +include a check in their application to verify that +sqlite3_libversion_number() always returns the value +SQLITE_VERSION_NUMBER.

    + +

    The sqlite3_libversion() function returns the same information as is +in the sqlite3_version[] string constant. The function is provided +for use in DLLs since DLL users usually do not have direct access to string +constants within the DLL.

    + +

    Requirements: H10021 H10022 H10023 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/limit.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/limit.html --- sqlite3-3.4.2/www/c3ref/limit.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/limit.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,113 @@ + + +Run-time Limits + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Run-time Limits

    int sqlite3_limit(sqlite3*, int id, int newVal);
    +

    +This interface allows the size of various constructs to be limited +on a connection by connection basis. The first parameter is the +database connection whose limit is to be set or queried. The +second parameter is one of the limit categories that define a +class of constructs to be size limited. The third parameter is the +new limit for that construct. The function returns the old limit.

    + +

    If the new limit is a negative number, the limit is unchanged. +For the limit category of SQLITE_LIMIT_XYZ there is a +hard upper bound +set by a compile-time C preprocessor macro named +SQLITE_MAX_XYZ. +(The "_LIMIT_" in the name is changed to "_MAX_".) +Attempts to increase a limit above its hard upper bound are +silently truncated to the hard upper limit.

    + +

    Run time limits are intended for use in applications that manage +both their own internal database and also databases that are controlled +by untrusted external sources. An example application might be a +web browser that has its own databases for storing history and +separate databases controlled by JavaScript applications downloaded +off the Internet. The internal databases can be given the +large, default limits. Databases managed by external sources can +be given much smaller limits designed to prevent a denial of service +attack. Developers might also want to use the sqlite3_set_authorizer() +interface to further control untrusted SQL. The size of the database +created by an untrusted script can be contained using the +max_page_count PRAGMA.

    + +

    New run-time limit categories may be added in future releases.

    + +

    Requirements: +H12762 H12766 H12769 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/load_extension.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/load_extension.html --- sqlite3-3.4.2/www/c3ref/load_extension.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/load_extension.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,107 @@ + + +Load An Extension + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Load An Extension

    int sqlite3_load_extension(
    +  sqlite3 *db,          /* Load the extension into this database connection */
    +  const char *zFile,    /* Name of the shared library containing extension */
    +  const char *zProc,    /* Entry point.  Derived from zFile if 0 */
    +  char **pzErrMsg       /* Put error message here if not 0 */
    +);
    +

    +This interface loads an SQLite extension library from the named file.

    + +

    The sqlite3_load_extension() interface attempts to load an +SQLite extension library contained in the file zFile.

    + +

    The entry point is zProc.

    + +

    zProc may be 0, in which case the name of the entry point +defaults to "sqlite3_extension_init".

    + +

    The sqlite3_load_extension() interface shall return +SQLITE_OK on success and SQLITE_ERROR if something goes wrong.

    + +

    If an error occurs and pzErrMsg is not 0, then the +sqlite3_load_extension() interface shall attempt to +fill *pzErrMsg with error message text stored in memory +obtained from sqlite3_malloc(). The calling function +should free this memory by calling sqlite3_free().

    + +

    Extension loading must be enabled using +sqlite3_enable_load_extension() prior to calling this API, +otherwise an error will be returned. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mem_methods.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mem_methods.html --- sqlite3-3.4.2/www/c3ref/mem_methods.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mem_methods.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,128 @@ + + +Memory Allocation Routines + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Memory Allocation Routines

    typedef struct sqlite3_mem_methods sqlite3_mem_methods;
    +struct sqlite3_mem_methods {
    +  void *(*xMalloc)(int);         /* Memory allocation function */
    +  void (*xFree)(void*);          /* Free a prior allocation */
    +  void *(*xRealloc)(void*,int);  /* Resize an allocation */
    +  int (*xSize)(void*);           /* Return the size of an allocation */
    +  int (*xRoundup)(int);          /* Round up request size to allocation size */
    +  int (*xInit)(void*);           /* Initialize the memory allocator */
    +  void (*xShutdown)(void*);      /* Deinitialize the memory allocator */
    +  void *pAppData;                /* Argument to xInit() and xShutdown() */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +An instance of this object defines the interface between SQLite +and low-level memory allocation routines.

    + +

    This object is used in only one place in the SQLite interface. +A pointer to an instance of this object is the argument to +sqlite3_config() when the configuration option is +SQLITE_CONFIG_MALLOC. By creating an instance of this object +and passing it to sqlite3_config() during configuration, an +application can specify an alternative memory allocation subsystem +for SQLite to use for all of its dynamic memory needs.

    + +

    Note that SQLite comes with a built-in memory allocator that is +perfectly adequate for the overwhelming majority of applications +and that this object is only useful to a tiny minority of applications +with specialized memory allocation requirements. This object is +also used during testing of SQLite in order to specify an alternative +memory allocator that simulates memory out-of-memory conditions in +order to verify that SQLite recovers gracefully from such +conditions.

    + +

    The xMalloc, xFree, and xRealloc methods must work like the +malloc(), free(), and realloc() functions from the standard library.

    + +

    xSize should return the allocated size of a memory allocation +previously obtained from xMalloc or xRealloc. The allocated size +is always at least as big as the requested size but may be larger.

    + +

    The xRoundup method returns what would be the allocated size of +a memory allocation given a particular requested size. Most memory +allocators round up memory allocations at least to the next multiple +of 8. Some allocators round up to a larger multiple or to a power of 2.

    + +

    The xInit method initializes the memory allocator. (For example, +it might allocate any require mutexes or initialize internal data +structures. The xShutdown method is invoked (indirectly) by +sqlite3_shutdown() and should deallocate any resources acquired +by xInit. The pAppData pointer is used as the only parameter to +xInit and xShutdown. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/memory_highwater.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/memory_highwater.html --- sqlite3-3.4.2/www/c3ref/memory_highwater.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/memory_highwater.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +Memory Allocator Statistics + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Memory Allocator Statistics

    sqlite3_int64 sqlite3_memory_used(void);
    +sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
    +

    +SQLite provides these two interfaces for reporting on the status +of the sqlite3_malloc(), sqlite3_free(), and sqlite3_realloc() +routines, which form the built-in memory allocation subsystem.

    + +

    Requirements: +H17371 H17373 H17374 H17375 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/module.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/module.html --- sqlite3-3.4.2/www/c3ref/module.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/module.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,119 @@ + + +Virtual Table Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Virtual Table Object

    struct sqlite3_module {
    +  int iVersion;
    +  int (*xCreate)(sqlite3*, void *pAux,
    +               int argc, const char *const*argv,
    +               sqlite3_vtab **ppVTab, char**);
    +  int (*xConnect)(sqlite3*, void *pAux,
    +               int argc, const char *const*argv,
    +               sqlite3_vtab **ppVTab, char**);
    +  int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
    +  int (*xDisconnect)(sqlite3_vtab *pVTab);
    +  int (*xDestroy)(sqlite3_vtab *pVTab);
    +  int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
    +  int (*xClose)(sqlite3_vtab_cursor*);
    +  int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
    +                int argc, sqlite3_value **argv);
    +  int (*xNext)(sqlite3_vtab_cursor*);
    +  int (*xEof)(sqlite3_vtab_cursor*);
    +  int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
    +  int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
    +  int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
    +  int (*xBegin)(sqlite3_vtab *pVTab);
    +  int (*xSync)(sqlite3_vtab *pVTab);
    +  int (*xCommit)(sqlite3_vtab *pVTab);
    +  int (*xRollback)(sqlite3_vtab *pVTab);
    +  int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
    +                       void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
    +                       void **ppArg);
    +  int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +This structure, sometimes called a a "virtual table module", +defines the implementation of a virtual tables. +This structure consists mostly of methods for the module.

    + +

    A virtual table module is created by filling in a persistent +instance of this structure and passing a pointer to that instance +to sqlite3_create_module() or sqlite3_create_module_v2(). +The registration remains valid until it is replaced by a different +module or until the database connection closes. The content +of this structure must not change while it is registered with +any database connection. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mprintf.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mprintf.html --- sqlite3-3.4.2/www/c3ref/mprintf.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mprintf.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,173 @@ + + +Formatted String Printing Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Formatted String Printing Functions

    char *sqlite3_mprintf(const char*,...);
    +char *sqlite3_vmprintf(const char*, va_list);
    +char *sqlite3_snprintf(int,char*,const char*, ...);
    +

    +These routines are workalikes of the "printf()" family of functions +from the standard C library.

    + +

    The sqlite3_mprintf() and sqlite3_vmprintf() routines write their +results into memory obtained from sqlite3_malloc(). +The strings returned by these two routines should be +released by sqlite3_free(). Both routines return a +NULL pointer if sqlite3_malloc() is unable to allocate enough +memory to hold the resulting string.

    + +

    In sqlite3_snprintf() routine is similar to "snprintf()" from +the standard C library. The result is written into the +buffer supplied as the second parameter whose size is given by +the first parameter. Note that the order of the +first two parameters is reversed from snprintf(). This is an +historical accident that cannot be fixed without breaking +backwards compatibility. Note also that sqlite3_snprintf() +returns a pointer to its buffer instead of the number of +characters actually written into the buffer. We admit that +the number of characters written would be a more useful return +value but we cannot change the implementation of sqlite3_snprintf() +now without breaking compatibility.

    + +

    As long as the buffer size is greater than zero, sqlite3_snprintf() +guarantees that the buffer is always zero-terminated. The first +parameter "n" is the total size of the buffer, including space for +the zero terminator. So the longest string that can be completely +written will be n-1 characters.

    + +

    These routines all implement some additional formatting +options that are useful for constructing SQL statements. +All of the usual printf() formatting options apply. In addition, there +is are "%q", "%Q", and "%z" options.

    + +

    The %q option works like %s in that it substitutes a null-terminated +string from the argument list. But %q also doubles every '\'' character. +%q is designed for use inside a string literal. By doubling each '\'' +character it escapes that character and allows it to be inserted into +the string.

    + +

    For example, assume the string variable zText contains text as follows:

    + +

    +char *zText = "It's a happy day!";
    +

    + +

    One can use this text in an SQL statement as follows:

    + +

    +char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
    +sqlite3_exec(db, zSQL, 0, 0, 0);
    +sqlite3_free(zSQL);
    +

    + +

    Because the %q format string is used, the '\'' character in zText +is escaped and the SQL generated is as follows:

    + +

    +INSERT INTO table1 VALUES('It''s a happy day!')
    +

    + +

    This is correct. Had we used %s instead of %q, the generated SQL +would have looked like this:

    + +

    +INSERT INTO table1 VALUES('It's a happy day!');
    +

    + +

    This second example is an SQL syntax error. As a general rule you should +always use %q instead of %s when inserting text into a string literal.

    + +

    The %Q option works like %q except it also adds single quotes around +the outside of the total string. Additionally, if the parameter in the +argument list is a NULL pointer, %Q substitutes the text "NULL" (without +single quotes) in place of the %Q option. So, for example, one could say:

    + +

    +char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    +sqlite3_exec(db, zSQL, 0, 0, 0);
    +sqlite3_free(zSQL);
    +

    + +

    The code above will render a correct SQL statement in the zSQL +variable even if the zText variable is a NULL pointer.

    + +

    The "%z" formatting option works exactly like "%s" with the +addition that after the string has been read and copied into +the result, sqlite3_free() is called on the input string.

    + +

    Requirements: +H17403 H17406 H17407 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mutex_alloc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mutex_alloc.html --- sqlite3-3.4.2/www/c3ref/mutex_alloc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mutex_alloc.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,192 @@ + + +Mutexes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Mutexes

    sqlite3_mutex *sqlite3_mutex_alloc(int);
    +void sqlite3_mutex_free(sqlite3_mutex*);
    +void sqlite3_mutex_enter(sqlite3_mutex*);
    +int sqlite3_mutex_try(sqlite3_mutex*);
    +void sqlite3_mutex_leave(sqlite3_mutex*);
    +

    +The SQLite core uses these routines for thread +synchronization. Though they are intended for internal +use by SQLite, code that links against SQLite is +permitted to use any of these routines.

    + +

    The SQLite source code contains multiple implementations +of these mutex routines. An appropriate implementation +is selected automatically at compile-time. The following +implementations are available in the SQLite core:

    + +

      +
    • SQLITE_MUTEX_OS2 +
    • SQLITE_MUTEX_PTHREAD +
    • SQLITE_MUTEX_W32 +
    • SQLITE_MUTEX_NOOP +

    + +

    The SQLITE_MUTEX_NOOP implementation is a set of routines +that does no real locking and is appropriate for use in +a single-threaded application. The SQLITE_MUTEX_OS2, +SQLITE_MUTEX_PTHREAD, and SQLITE_MUTEX_W32 implementations +are appropriate for use on OS/2, Unix, and Windows.

    + +

    If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +implementation is included with the library. In this case the +application must supply a custom mutex implementation using the +SQLITE_CONFIG_MUTEX option of the sqlite3_config() function +before calling sqlite3_initialize() or any other public sqlite3_ +function that calls sqlite3_initialize().

    + +

    The sqlite3_mutex_alloc() routine allocates a new +mutex and returns a pointer to it. If it returns NULL +that means that a mutex could not be allocated. SQLite +will unwind its stack and return an error. The argument +to sqlite3_mutex_alloc() is one of these integer constants:

    + +

      +
    • SQLITE_MUTEX_FAST +
    • SQLITE_MUTEX_RECURSIVE +
    • SQLITE_MUTEX_STATIC_MASTER +
    • SQLITE_MUTEX_STATIC_MEM +
    • SQLITE_MUTEX_STATIC_MEM2 +
    • SQLITE_MUTEX_STATIC_PRNG +
    • SQLITE_MUTEX_STATIC_LRU +
    • SQLITE_MUTEX_STATIC_LRU2 +

    + +

    The first two constants cause sqlite3_mutex_alloc() to create +a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +is used but not necessarily so when SQLITE_MUTEX_FAST is used. +The mutex implementation does not need to make a distinction +between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +not want to. But SQLite will only request a recursive mutex in +cases where it really needs one. If a faster non-recursive mutex +implementation is available on the host platform, the mutex subsystem +might return such a mutex in response to SQLITE_MUTEX_FAST.

    + +

    The other allowed parameters to sqlite3_mutex_alloc() each return +a pointer to a static preexisting mutex. Four static mutexes are +used by the current version of SQLite. Future versions of SQLite +may add additional static mutexes. Static mutexes are for internal +use by SQLite only. Applications that use SQLite mutexes should +use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +SQLITE_MUTEX_RECURSIVE.

    + +

    Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +returns a different mutex on every call. But for the static +mutex types, the same mutex is returned on every call that has +the same type number.

    + +

    The sqlite3_mutex_free() routine deallocates a previously +allocated dynamic mutex. SQLite is careful to deallocate every +dynamic mutex that it allocates. The dynamic mutexes must not be in +use when they are deallocated. Attempting to deallocate a static +mutex results in undefined behavior. SQLite never deallocates +a static mutex.

    + +

    The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +to enter a mutex. If another thread is already within the mutex, +sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +upon successful entry. Mutexes created using +SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. + In such cases the, +mutex must be exited an equal number of times before another thread +can enter. If the same thread tries to enter any other +kind of mutex more than once, the behavior is undefined. + SQLite will never exhibit +such behavior in its own use of mutexes.

    + +

    Some systems (for example, Windows 95) do not support the operation +implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() +will always return SQLITE_BUSY. The SQLite core only ever uses +sqlite3_mutex_try() as an optimization so this is acceptable behavior.

    + +

    The sqlite3_mutex_leave() routine exits a mutex that was +previously entered by the same thread. The behavior +is undefined if the mutex is not currently entered by the +calling thread or is not currently allocated. SQLite will +never do either.

    + +

    If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or +sqlite3_mutex_leave() is a NULL pointer, then all three routines +behave as no-ops.

    + +

    See also: sqlite3_mutex_held() and sqlite3_mutex_notheld(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mutex_held.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mutex_held.html --- sqlite3-3.4.2/www/c3ref/mutex_held.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mutex_held.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,106 @@ + + +Mutex Verification Routines + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Mutex Verification Routines

    int sqlite3_mutex_held(sqlite3_mutex*);
    +int sqlite3_mutex_notheld(sqlite3_mutex*);
    +

    +The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +are intended for use inside assert() statements. The SQLite core +never uses these routines except inside an assert() and applications +are advised to follow the lead of the core. The core only +provides implementations for these routines when it is compiled +with the SQLITE_DEBUG flag. External mutex implementations +are only required to provide these routines if SQLITE_DEBUG is +defined and if NDEBUG is not defined.

    + +

    These routines should return true if the mutex in their argument +is held or not held, respectively, by the calling thread.

    + +

    The implementation is not required to provided versions of these +routines that actually work. If the implementation does not provide working +versions of these routines, it should at least provide stubs that always +return true so that one does not get spurious assertion failures.

    + +

    If the argument to sqlite3_mutex_held() is a NULL pointer then +the routine should return 1. This seems counter-intuitive since +clearly the mutex cannot be held if it does not exist. But the +the reason the mutex does not exist is because the build is not +using mutexes. And we do not want the assert() containing the +call to sqlite3_mutex_held() to fail, so a non-zero return is +the appropriate thing to do. The sqlite3_mutex_notheld() +interface should also return 1 when given a NULL pointer. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mutex.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mutex.html --- sqlite3-3.4.2/www/c3ref/mutex.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mutex.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Mutex Handle + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Mutex Handle

    typedef struct sqlite3_mutex sqlite3_mutex;
    +

    +The mutex module within SQLite defines sqlite3_mutex to be an +abstract type for a mutex object. The SQLite core never looks +at the internal representation of an sqlite3_mutex. It only +deals with pointers to the sqlite3_mutex object.

    + +

    Mutexes are created using sqlite3_mutex_alloc(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/mutex_methods.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/mutex_methods.html --- sqlite3-3.4.2/www/c3ref/mutex_methods.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/mutex_methods.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,137 @@ + + +Mutex Methods Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Mutex Methods Object

    typedef struct sqlite3_mutex_methods sqlite3_mutex_methods;
    +struct sqlite3_mutex_methods {
    +  int (*xMutexInit)(void);
    +  int (*xMutexEnd)(void);
    +  sqlite3_mutex *(*xMutexAlloc)(int);
    +  void (*xMutexFree)(sqlite3_mutex *);
    +  void (*xMutexEnter)(sqlite3_mutex *);
    +  int (*xMutexTry)(sqlite3_mutex *);
    +  void (*xMutexLeave)(sqlite3_mutex *);
    +  int (*xMutexHeld)(sqlite3_mutex *);
    +  int (*xMutexNotheld)(sqlite3_mutex *);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +An instance of this structure defines the low-level routines +used to allocate and use mutexes.

    + +

    Usually, the default mutex implementations provided by SQLite are +sufficient, however the user has the option of substituting a custom +implementation for specialized deployments or systems for which SQLite +does not provide a suitable implementation. In this case, the user +creates and populates an instance of this structure to pass +to sqlite3_config() along with the SQLITE_CONFIG_MUTEX option. +Additionally, an instance of this structure can be used as an +output variable when querying the system for the current mutex +implementation, using the SQLITE_CONFIG_GETMUTEX option.

    + +

    The xMutexInit method defined by this structure is invoked as +part of system initialization by the sqlite3_initialize() function. + The xMutexInit routine shall be called by SQLite once for each +effective call to sqlite3_initialize().

    + +

    The xMutexEnd method defined by this structure is invoked as +part of system shutdown by the sqlite3_shutdown() function. The +implementation of this method is expected to release all outstanding +resources obtained by the mutex methods implementation, especially +those obtained by the xMutexInit method. The xMutexEnd() +interface shall be invoked once for each call to sqlite3_shutdown().

    + +

    The remaining seven methods defined by this structure (xMutexAlloc, +xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and +xMutexNotheld) implement the following interfaces (respectively):

    + +

    + +

    The only difference is that the public sqlite3_XXX functions enumerated +above silently ignore any invocations that pass a NULL pointer instead +of a valid mutex handle. The implementations of the methods defined +by this structure are not required to handle this case, the results +of passing a NULL pointer instead of a valid mutex handle are undefined +(i.e. it is acceptable to provide an implementation that segfaults if +it is passed a NULL pointer). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/next_stmt.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/next_stmt.html --- sqlite3-3.4.2/www/c3ref/next_stmt.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/next_stmt.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +Find the next prepared statement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Find the next prepared statement

    sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
    +

    +This interface returns a pointer to the next prepared statement after +pStmt associated with the database connection pDb. If pStmt is NULL +then this interface returns a pointer to the first prepared statement +associated with the database connection pDb. If no prepared statement +satisfies the conditions of this routine, it returns NULL.

    + +

    The database connection pointer D in a call to +sqlite3_next_stmt(D,S) must refer to an open database +connection and in particular must not be a NULL pointer.

    + +

    Requirements: H13143 H13146 H13149 H13152 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/objlist.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/objlist.html --- sqlite3-3.4.2/www/c3ref/objlist.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/objlist.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,82 @@ + + +List Of SQLite Objects + + + + + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/open.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/open.html --- sqlite3-3.4.2/www/c3ref/open.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/open.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,173 @@ + + +Opening A New Database Connection + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Opening A New Database Connection

    int sqlite3_open(
    +  const char *filename,   /* Database filename (UTF-8) */
    +  sqlite3 **ppDb          /* OUT: SQLite db handle */
    +);
    +int sqlite3_open16(
    +  const void *filename,   /* Database filename (UTF-16) */
    +  sqlite3 **ppDb          /* OUT: SQLite db handle */
    +);
    +int sqlite3_open_v2(
    +  const char *filename,   /* Database filename (UTF-8) */
    +  sqlite3 **ppDb,         /* OUT: SQLite db handle */
    +  int flags,              /* Flags */
    +  const char *zVfs        /* Name of VFS module to use */
    +);
    +

    +These routines open an SQLite database file whose name is given by the +filename argument. The filename argument is interpreted as UTF-8 for +sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte +order for sqlite3_open16(). A database connection handle is usually +returned in *ppDb, even if an error occurs. The only exception is that +if SQLite is unable to allocate memory to hold the sqlite3 object, +a NULL will be written into *ppDb instead of a pointer to the sqlite3 +object. If the database is opened (and/or created) successfully, then +SQLITE_OK is returned. Otherwise an error code is returned. The +sqlite3_errmsg() or sqlite3_errmsg16() routines can be used to obtain +an English language description of the error.

    + +

    The default encoding for the database will be UTF-8 if +sqlite3_open() or sqlite3_open_v2() is called and +UTF-16 in the native byte order if sqlite3_open16() is used.

    + +

    Whether or not an error occurs when it is opened, resources +associated with the database connection handle should be released by +passing it to sqlite3_close() when it is no longer required.

    + +

    The sqlite3_open_v2() interface works like sqlite3_open() +except that it accepts two additional parameters for additional control +over the new database connection. The flags parameter can take one of +the following three values, optionally combined with the +SQLITE_OPEN_NOMUTEX or SQLITE_OPEN_FULLMUTEX flags:

    + +

    +
    SQLITE_OPEN_READONLY
    +
    The database is opened in read-only mode. If the database does not +already exist, an error is returned.

    + +

    SQLITE_OPEN_READWRITE
    +
    The database is opened for reading and writing if possible, or reading +only if the file is write protected by the operating system. In either +case the database must already exist, otherwise an error is returned.

    + +

    SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE
    +
    The database is opened for reading and writing, and is creates it if +it does not already exist. This is the behavior that is always used for +sqlite3_open() and sqlite3_open16().
    +

    + +

    If the 3rd parameter to sqlite3_open_v2() is not one of the +combinations shown above or one of the combinations shown above combined +with the SQLITE_OPEN_NOMUTEX or SQLITE_OPEN_FULLMUTEX flags, +then the behavior is undefined.

    + +

    If the SQLITE_OPEN_NOMUTEX flag is set, then the database connection +opens in the multi-thread threading mode as long as the single-thread +mode has not been set at compile-time or start-time. If the +SQLITE_OPEN_FULLMUTEX flag is set then the database connection opens +in the serialized threading mode unless single-thread was +previously selected at compile-time or start-time.

    + +

    If the filename is ":memory:", then a private, temporary in-memory database +is created for the connection. This in-memory database will vanish when +the database connection is closed. Future versions of SQLite might +make use of additional special filenames that begin with the ":" character. +It is recommended that when a database filename actually does begin with +a ":" character you should prefix the filename with a pathname such as +"./" to avoid ambiguity.

    + +

    If the filename is an empty string, then a private, temporary +on-disk database will be created. This private database will be +automatically deleted as soon as the database connection is closed.

    + +

    The fourth parameter to sqlite3_open_v2() is the name of the +sqlite3_vfs object that defines the operating system interface that +the new database connection should use. If the fourth parameter is +a NULL pointer then the default sqlite3_vfs object is used.

    + +

    Note to Windows users: The encoding used for the filename argument +of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever +codepage is currently defined. Filenames containing international +characters must be converted to UTF-8 prior to passing them into +sqlite3_open() or sqlite3_open_v2().

    + +

    Requirements: +H12701 H12702 H12703 H12704 H12706 H12707 H12709 H12711 +H12712 H12713 H12714 H12717 H12719 H12721 H12723 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/overload_function.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/overload_function.html --- sqlite3-3.4.2/www/c3ref/overload_function.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/overload_function.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Overload A Function For A Virtual Table + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Overload A Function For A Virtual Table

    int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +Virtual tables can provide alternative implementations of functions +using the xFindFunction method of the virtual table module. +But global versions of those functions +must exist in order to be overloaded.

    + +

    This API makes sure a global version of a function with a particular +name and number of parameters exists. If no such function exists +before this API is called, a new function is created. The implementation +of the new function always causes an exception to be thrown. So +the new function is not good for anything by itself. Its only +purpose is to be a placeholder function that can be overloaded +by a virtual table. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/pcache.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/pcache.html --- sqlite3-3.4.2/www/c3ref/pcache.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/pcache.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +Custom Page Cache Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Custom Page Cache Object

    typedef struct sqlite3_pcache sqlite3_pcache;
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_pcache type is opaque. It is implemented by +the pluggable module. The SQLite core has no knowledge of +its size or internal structure and never deals with the +sqlite3_pcache object except by holding and passing pointers +to the object.

    + +

    See sqlite3_pcache_methods for additional information. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/pcache_methods.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/pcache_methods.html --- sqlite3-3.4.2/www/c3ref/pcache_methods.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/pcache_methods.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,202 @@ + + +Application Defined Page Cache. + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Application Defined Page Cache.

    typedef struct sqlite3_pcache_methods sqlite3_pcache_methods;
    +struct sqlite3_pcache_methods {
    +  void *pArg;
    +  int (*xInit)(void*);
    +  void (*xShutdown)(void*);
    +  sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable);
    +  void (*xCachesize)(sqlite3_pcache*, int nCachesize);
    +  int (*xPagecount)(sqlite3_pcache*);
    +  void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
    +  void (*xUnpin)(sqlite3_pcache*, void*, int discard);
    +  void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey);
    +  void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
    +  void (*xDestroy)(sqlite3_pcache*);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_config(SQLITE_CONFIG_PCACHE, ...) interface can +register an alternative page cache implementation by passing in an +instance of the sqlite3_pcache_methods structure. The majority of the +heap memory used by sqlite is used by the page cache to cache data read +from, or ready to be written to, the database file. By implementing a +custom page cache using this API, an application can control more +precisely the amount of memory consumed by sqlite, the way in which +said memory is allocated and released, and the policies used to +determine exactly which parts of a database file are cached and for +how long.

    + +

    The contents of the structure are copied to an internal buffer by sqlite +within the call to sqlite3_config.

    + +

    The xInit() method is called once for each call to sqlite3_initialize() +(usually only once during the lifetime of the process). It is passed +a copy of the sqlite3_pcache_methods.pArg value. It can be used to set +up global structures and mutexes required by the custom page cache +implementation. The xShutdown() method is called from within +sqlite3_shutdown(), if the application invokes this API. It can be used +to clean up any outstanding resources before process shutdown, if required.

    + +

    The xCreate() method is used to construct a new cache instance. The +first parameter, szPage, is the size in bytes of the pages that must +be allocated by the cache. szPage will not be a power of two. The +second argument, bPurgeable, is true if the cache being created will +be used to cache database pages read from a file stored on disk, or +false if it is used for an in-memory database. The cache implementation +does not have to do anything special based on the value of bPurgeable, +it is purely advisory.

    + +

    The xCachesize() method may be called at any time by SQLite to set the +suggested maximum cache-size (number of pages stored by) the cache +instance passed as the first argument. This is the value configured using +the SQLite "PRAGMA cache_size" command. As with the bPurgeable parameter, +the implementation is not required to do anything special with this +value, it is advisory only.

    + +

    The xPagecount() method should return the number of pages currently +stored in the cache supplied as an argument.

    + +

    The xFetch() method is used to fetch a page and return a pointer to it. +A 'page', in this context, is a buffer of szPage bytes aligned at an +8-byte boundary. The page to be fetched is determined by the key. The +mimimum key value is 1. After it has been retrieved using xFetch, the page +is considered to be pinned.

    + +

    If the requested page is already in the page cache, then a pointer to +the cached buffer should be returned with its contents intact. If the +page is not already in the cache, then the expected behaviour of the +cache is determined by the value of the createFlag parameter passed +to xFetch, according to the following table:

    + +

    +
    createFlagExpected Behaviour +
    0NULL should be returned. No new cache entry is created. +
    1If createFlag is set to 1, this indicates that +SQLite is holding pinned pages that can be unpinned +by writing their contents to the database file (a +relatively expensive operation). In this situation the +cache implementation has two choices: it can return NULL, +in which case SQLite will attempt to unpin one or more +pages before re-requesting the same page, or it can +allocate a new page and return a pointer to it. If a new +page is allocated, then the first sizeof(void*) bytes of +it (at least) must be zeroed before it is returned. +
    2If createFlag is set to 2, then SQLite is not holding any +pinned pages associated with the specific cache passed +as the first argument to xFetch() that can be unpinned. The +cache implementation should attempt to allocate a new +cache entry and return a pointer to it. Again, the first +sizeof(void*) bytes of the page should be zeroed before +it is returned. If the xFetch() method returns NULL when +createFlag==2, SQLite assumes that a memory allocation +failed and returns SQLITE_NOMEM to the user. +

    + +

    xUnpin() is called by SQLite with a pointer to a currently pinned page +as its second argument. If the third parameter, discard, is non-zero, +then the page should be evicted from the cache. In this case SQLite +assumes that the next time the page is retrieved from the cache using +the xFetch() method, it will be zeroed. If the discard parameter is +zero, then the page is considered to be unpinned. The cache implementation +may choose to reclaim (free or recycle) unpinned pages at any time. +SQLite assumes that next time the page is retrieved from the cache +it will either be zeroed, or contain the same data that it did when it +was unpinned.

    + +

    The cache is not required to perform any reference counting. A single +call to xUnpin() unpins the page regardless of the number of prior calls +to xFetch().

    + +

    The xRekey() method is used to change the key value associated with the +page passed as the second argument from oldKey to newKey. If the cache +previously contains an entry associated with newKey, it should be +discarded. Any prior cache entry associated with newKey is guaranteed not +to be pinned.

    + +

    When SQLite calls the xTruncate() method, the cache must discard all +existing cache entries with page numbers (keys) greater than or equal +to the value of the iLimit parameter passed to xTruncate(). If any +of these pages are pinned, they are implicitly unpinned, meaning that +they can be safely discarded.

    + +

    The xDestroy() method is used to delete a cache allocated by xCreate(). +All resources associated with the specified cache should be freed. After +calling the xDestroy() method, SQLite considers the sqlite3_pcache* +handle invalid, and will not use it with any other sqlite3_pcache_methods +functions. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/prepare.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/prepare.html --- sqlite3-3.4.2/www/c3ref/prepare.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/prepare.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,177 @@ + + +Compiling An SQL Statement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Compiling An SQL Statement

    int sqlite3_prepare(
    +  sqlite3 *db,            /* Database handle */
    +  const char *zSql,       /* SQL statement, UTF-8 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const char **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare_v2(
    +  sqlite3 *db,            /* Database handle */
    +  const char *zSql,       /* SQL statement, UTF-8 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const char **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare16(
    +  sqlite3 *db,            /* Database handle */
    +  const void *zSql,       /* SQL statement, UTF-16 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const void **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare16_v2(
    +  sqlite3 *db,            /* Database handle */
    +  const void *zSql,       /* SQL statement, UTF-16 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const void **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +

    +To execute an SQL query, it must first be compiled into a byte-code +program using one of these routines.

    + +

    The first argument, "db", is a database connection obtained from a +prior successful call to sqlite3_open(), sqlite3_open_v2() or +sqlite3_open16(). The database connection must not have been closed.

    + +

    The second argument, "zSql", is the statement to be compiled, encoded +as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() +interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() +use UTF-16.

    + +

    If the nByte argument is less than zero, then zSql is read up to the +first zero terminator. If nByte is non-negative, then it is the maximum +number of bytes read from zSql. When nByte is non-negative, the +zSql string ends at either the first '\000' or '\u0000' character or +the nByte-th byte, whichever comes first. If the caller knows +that the supplied string is nul-terminated, then there is a small +performance advantage to be gained by passing an nByte parameter that +is equal to the number of bytes in the input string including +the nul-terminator bytes.

    + +

    If pzTail is not NULL then *pzTail is made to point to the first byte +past the end of the first SQL statement in zSql. These routines only +compile the first statement in zSql, so *pzTail is left pointing to +what remains uncompiled.

    + +

    *ppStmt is left pointing to a compiled prepared statement that can be +executed using sqlite3_step(). If there is an error, *ppStmt is set +to NULL. If the input text contains no SQL (if the input is an empty +string or a comment) then *ppStmt is set to NULL. +The calling procedure is responsible for deleting the compiled +SQL statement using sqlite3_finalize() after it has finished with it. +ppStmt may not be NULL.

    + +

    On success, SQLITE_OK is returned, otherwise an error code is returned.

    + +

    The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are +recommended for all new programs. The two older interfaces are retained +for backwards compatibility, but their use is discouraged. +In the "v2" interfaces, the prepared statement +that is returned (the sqlite3_stmt object) contains a copy of the +original SQL text. This causes the sqlite3_step() interface to +behave a differently in two ways:

    + +

      +
    1. +If the database schema changes, instead of returning SQLITE_SCHEMA as it +always used to do, sqlite3_step() will automatically recompile the SQL +statement and try to run it again. If the schema has changed in +a way that makes the statement no longer valid, sqlite3_step() will still +return SQLITE_SCHEMA. But unlike the legacy behavior, SQLITE_SCHEMA is +now a fatal error. Calling sqlite3_prepare_v2() again will not make the +error go away. Note: use sqlite3_errmsg() to find the text +of the parsing error that results in an SQLITE_SCHEMA return. +
    2. + +

    3. +When an error occurs, sqlite3_step() will return one of the detailed +error codes or extended error codes. The legacy behavior was that +sqlite3_step() would only return a generic SQLITE_ERROR result code +and you would have to make a second call to sqlite3_reset() in order +to find the underlying cause of the problem. With the "v2" prepare +interfaces, the underlying reason for the error is returned immediately. +
    4. +

    + +

    Requirements: +H13011 H13012 H13013 H13014 H13015 H13016 H13019 H13021

    + +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/profile.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/profile.html --- sqlite3-3.4.2/www/c3ref/profile.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/profile.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +Tracing And Profiling Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Tracing And Profiling Functions

    void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
    +void *sqlite3_profile(sqlite3*,
    +   void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
    +

    Important: This interface is experimental and is subject to change without notice.

    +These routines register callback functions that can be used for +tracing and profiling the execution of SQL statements.

    + +

    The callback function registered by sqlite3_trace() is invoked at +various times when an SQL statement is being run by sqlite3_step(). +The callback returns a UTF-8 rendering of the SQL statement text +as the statement first begins executing. Additional callbacks occur +as each triggered subprogram is entered. The callbacks for triggers +contain a UTF-8 SQL comment that identifies the trigger.

    + +

    The callback function registered by sqlite3_profile() is invoked +as each SQL statement finishes. The profile callback contains +the original statement text and an estimate of wall-clock time +of how long that statement took to run.

    + +

    Requirements: +H12281 H12282 H12283 H12284 H12285 H12287 H12288 H12289 +H12290 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/progress_handler.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/progress_handler.html --- sqlite3-3.4.2/www/c3ref/progress_handler.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/progress_handler.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,98 @@ + + +Query Progress Callbacks + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Query Progress Callbacks

    void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
    +

    +This routine configures a callback function - the +progress callback - that is invoked periodically during long +running calls to sqlite3_exec(), sqlite3_step() and +sqlite3_get_table(). An example use for this +interface is to keep a GUI updated during a large query.

    + +

    If the progress callback returns non-zero, the operation is +interrupted. This feature can be used to implement a +"Cancel" button on a GUI progress dialog box.

    + +

    The progress handler must not do anything that will modify +the database connection that invoked the progress handler. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    Requirements: +H12911 H12912 H12913 H12914 H12915 H12916 H12917 H12918

    + +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/randomness.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/randomness.html --- sqlite3-3.4.2/www/c3ref/randomness.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/randomness.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +Pseudo-Random Number Generator + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Pseudo-Random Number Generator

    void sqlite3_randomness(int N, void *P);
    +

    +SQLite contains a high-quality pseudo-random number generator (PRNG) used to +select random ROWIDs when inserting new records into a table that +already uses the largest possible ROWID. The PRNG is also used for +the build-in random() and randomblob() SQL functions. This interface allows +applications to access the same PRNG for other purposes.

    + +

    A call to this routine stores N bytes of randomness into buffer P.

    + +

    The first time this routine is invoked (either internally or by +the application) the PRNG is seeded using randomness obtained +from the xRandomness method of the default sqlite3_vfs object. +On all subsequent invocations, the pseudo-randomness is generated +internally and without recourse to the sqlite3_vfs xRandomness +method.

    + +

    Requirements: +H17392 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/release_memory.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/release_memory.html --- sqlite3-3.4.2/www/c3ref/release_memory.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/release_memory.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,88 @@ + + +Attempt To Free Heap Memory + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Attempt To Free Heap Memory

    int sqlite3_release_memory(int);
    +

    +The sqlite3_release_memory() interface attempts to free N bytes +of heap memory by deallocating non-essential memory allocations +held by the database library. Memory used to cache database +pages to improve performance is an example of non-essential memory. +sqlite3_release_memory() returns the number of bytes actually freed, +which might be more or less than the amount requested.

    + +

    Requirements: H17341 H17342 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/reset_auto_extension.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/reset_auto_extension.html --- sqlite3-3.4.2/www/c3ref/reset_auto_extension.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/reset_auto_extension.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,88 @@ + + +Reset Automatic Extension Loading + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Reset Automatic Extension Loading

    void sqlite3_reset_auto_extension(void);
    +

    +This function disables all previously registered automatic +extensions. It undoes the effect of all prior +sqlite3_auto_extension() calls.

    + +

    This function disables all previously registered +automatic extensions.

    + +

    This function disables automatic extensions in all threads. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/reset.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/reset.html --- sqlite3-3.4.2/www/c3ref/reset.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/reset.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +Reset A Prepared Statement Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Reset A Prepared Statement Object

    int sqlite3_reset(sqlite3_stmt *pStmt);
    +

    +The sqlite3_reset() function is called to reset a prepared statement +object back to its initial state, ready to be re-executed. +Any SQL statement variables that had values bound to them using +the sqlite3_bind_*() API retain their values. +Use sqlite3_clear_bindings() to reset the bindings.

    + +

    The sqlite3_reset(S) interface resets the prepared statement S +back to the beginning of its program.

    + +

    If the most recent call to sqlite3_step(S) for the +prepared statement S returned SQLITE_ROW or SQLITE_DONE, +or if sqlite3_step(S) has never before been called on S, +then sqlite3_reset(S) returns SQLITE_OK.

    + +

    If the most recent call to sqlite3_step(S) for the +prepared statement S indicated an error, then +sqlite3_reset(S) returns an appropriate error code.

    + +

    The sqlite3_reset(S) interface does not change the values +of any bindings on the prepared statement S. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/result_blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/result_blob.html --- sqlite3-3.4.2/www/c3ref/result_blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/result_blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,200 @@ + + +Setting The Result Of An SQL Function + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Setting The Result Of An SQL Function

    void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
    +void sqlite3_result_double(sqlite3_context*, double);
    +void sqlite3_result_error(sqlite3_context*, const char*, int);
    +void sqlite3_result_error16(sqlite3_context*, const void*, int);
    +void sqlite3_result_error_toobig(sqlite3_context*);
    +void sqlite3_result_error_nomem(sqlite3_context*);
    +void sqlite3_result_error_code(sqlite3_context*, int);
    +void sqlite3_result_int(sqlite3_context*, int);
    +void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
    +void sqlite3_result_null(sqlite3_context*);
    +void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
    +void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
    +void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
    +void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
    +void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
    +void sqlite3_result_zeroblob(sqlite3_context*, int n);
    +

    +These routines are used by the xFunc or xFinal callbacks that +implement SQL functions and aggregates. See +sqlite3_create_function() and sqlite3_create_function16() +for additional information.

    + +

    These functions work very much like the parameter binding family of +functions used to bind values to host parameters in prepared statements. +Refer to the SQL parameter documentation for additional information.

    + +

    The sqlite3_result_blob() interface sets the result from +an application-defined function to be the BLOB whose content is pointed +to by the second parameter and which is N bytes long where N is the +third parameter.

    + +

    The sqlite3_result_zeroblob() interfaces set the result of +the application-defined function to be a BLOB containing all zero +bytes and N bytes in size, where N is the value of the 2nd parameter.

    + +

    The sqlite3_result_double() interface sets the result from +an application-defined function to be a floating point value specified +by its 2nd argument.

    + +

    The sqlite3_result_error() and sqlite3_result_error16() functions +cause the implemented SQL function to throw an exception. +SQLite uses the string pointed to by the +2nd parameter of sqlite3_result_error() or sqlite3_result_error16() +as the text of an error message. SQLite interprets the error +message string from sqlite3_result_error() as UTF-8. SQLite +interprets the string from sqlite3_result_error16() as UTF-16 in native +byte order. If the third parameter to sqlite3_result_error() +or sqlite3_result_error16() is negative then SQLite takes as the error +message all text up through the first zero character. +If the third parameter to sqlite3_result_error() or +sqlite3_result_error16() is non-negative then SQLite takes that many +bytes (not characters) from the 2nd parameter as the error message. +The sqlite3_result_error() and sqlite3_result_error16() +routines make a private copy of the error message text before +they return. Hence, the calling function can deallocate or +modify the text after they return without harm. +The sqlite3_result_error_code() function changes the error code +returned by SQLite as a result of an error in a function. By default, +the error code is SQLITE_ERROR. A subsequent call to sqlite3_result_error() +or sqlite3_result_error16() resets the error code to SQLITE_ERROR.

    + +

    The sqlite3_result_toobig() interface causes SQLite to throw an error +indicating that a string or BLOB is to long to represent.

    + +

    The sqlite3_result_nomem() interface causes SQLite to throw an error +indicating that a memory allocation failed.

    + +

    The sqlite3_result_int() interface sets the return value +of the application-defined function to be the 32-bit signed integer +value given in the 2nd argument. +The sqlite3_result_int64() interface sets the return value +of the application-defined function to be the 64-bit signed integer +value given in the 2nd argument.

    + +

    The sqlite3_result_null() interface sets the return value +of the application-defined function to be NULL.

    + +

    The sqlite3_result_text(), sqlite3_result_text16(), +sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces +set the return value of the application-defined function to be +a text string which is represented as UTF-8, UTF-16 native byte order, +UTF-16 little endian, or UTF-16 big endian, respectively. +SQLite takes the text result from the application from +the 2nd parameter of the sqlite3_result_text* interfaces. +If the 3rd parameter to the sqlite3_result_text* interfaces +is negative, then SQLite takes result text from the 2nd parameter +through the first zero character. +If the 3rd parameter to the sqlite3_result_text* interfaces +is non-negative, then as many bytes (not characters) of the text +pointed to by the 2nd parameter are taken as the application-defined +function result. +If the 4th parameter to the sqlite3_result_text* interfaces +or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that +function as the destructor on the text or BLOB result when it has +finished using that result. +If the 4th parameter to the sqlite3_result_text* interfaces or +sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite +assumes that the text or BLOB result is in constant space and does not +copy the it or call a destructor when it has finished using that result. +If the 4th parameter to the sqlite3_result_text* interfaces +or sqlite3_result_blob is the special constant SQLITE_TRANSIENT +then SQLite makes a copy of the result into space obtained from +from sqlite3_malloc() before it returns.

    + +

    The sqlite3_result_value() interface sets the result of +the application-defined function to be a copy the +unprotected sqlite3_value object specified by the 2nd parameter. The +sqlite3_result_value() interface makes a copy of the sqlite3_value +so that the sqlite3_value specified in the parameter may change or +be deallocated after sqlite3_result_value() returns without harm. +A protected sqlite3_value object may always be used where an +unprotected sqlite3_value object is required, so either +kind of sqlite3_value object can be used with this interface.

    + +

    If these routines are called from within the different thread +than the one containing the application-defined function that received +the sqlite3_context pointer, the results are undefined.

    + +

    Requirements: +H16403 H16406 H16409 H16412 H16415 H16418 H16421 H16424 +H16427 H16430 H16433 H16436 H16439 H16442 H16445 H16448 +H16451 H16454 H16457 H16460 H16463 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/set_authorizer.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/set_authorizer.html --- sqlite3-3.4.2/www/c3ref/set_authorizer.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/set_authorizer.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,164 @@ + + +Compile-Time Authorization Callbacks + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Compile-Time Authorization Callbacks

    int sqlite3_set_authorizer(
    +  sqlite3*,
    +  int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
    +  void *pUserData
    +);
    +

    +This routine registers a authorizer callback with a particular +database connection, supplied in the first argument. +The authorizer callback is invoked as SQL statements are being compiled +by sqlite3_prepare() or its variants sqlite3_prepare_v2(), +sqlite3_prepare16() and sqlite3_prepare16_v2(). At various +points during the compilation process, as logic is being created +to perform various actions, the authorizer callback is invoked to +see if those actions are allowed. The authorizer callback should +return SQLITE_OK to allow the action, SQLITE_IGNORE to disallow the +specific action but allow the SQL statement to continue to be +compiled, or SQLITE_DENY to cause the entire SQL statement to be +rejected with an error. If the authorizer callback returns +any value other than SQLITE_IGNORE, SQLITE_OK, or SQLITE_DENY +then the sqlite3_prepare_v2() or equivalent call that triggered +the authorizer will fail with an error message.

    + +

    When the callback returns SQLITE_OK, that means the operation +requested is ok. When the callback returns SQLITE_DENY, the +sqlite3_prepare_v2() or equivalent call that triggered the +authorizer will fail with an error message explaining that +access is denied.

    + +

    The first parameter to the authorizer callback is a copy of the third +parameter to the sqlite3_set_authorizer() interface. The second parameter +to the callback is an integer action code that specifies +the particular action to be authorized. The third through sixth parameters +to the callback are zero-terminated strings that contain additional +details about the action to be authorized.

    + +

    If the action code is SQLITE_READ +and the callback returns SQLITE_IGNORE then the +prepared statement statement is constructed to substitute +a NULL value in place of the table column that would have +been read if SQLITE_OK had been returned. The SQLITE_IGNORE +return can be used to deny an untrusted user access to individual +columns of a table. +If the action code is SQLITE_DELETE and the callback returns +SQLITE_IGNORE then the DELETE operation proceeds but the +truncate optimization is disabled and all rows are deleted individually.

    + +

    An authorizer is used when preparing +SQL statements from an untrusted source, to ensure that the SQL statements +do not try to access data they are not allowed to see, or that they do not +try to execute malicious statements that damage the database. For +example, an application may allow a user to enter arbitrary +SQL queries for evaluation by a database. But the application does +not want the user to be able to make arbitrary changes to the +database. An authorizer could then be put in place while the +user-entered SQL is being prepared that +disallows everything except SELECT statements.

    + +

    Applications that need to process SQL from untrusted sources +might also consider lowering resource limits using sqlite3_limit() +and limiting database size using the max_page_count PRAGMA +in addition to using an authorizer.

    + +

    Only a single authorizer can be in place on a database connection +at a time. Each call to sqlite3_set_authorizer overrides the +previous call. Disable the authorizer by installing a NULL callback. +The authorizer is disabled by default.

    + +

    The authorizer callback must not do anything that will modify +the database connection that invoked the authorizer callback. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    When sqlite3_prepare_v2() is used to prepare a statement, the +statement might be reprepared during sqlite3_step() due to a +schema change. Hence, the application should ensure that the +correct authorizer callback remains in place during the sqlite3_step().

    + +

    Note that the authorizer callback is invoked only during +sqlite3_prepare() or its variants. Authorization is not +performed during statement evaluation in sqlite3_step(), unless +as stated in the previous paragraph, sqlite3_step() invokes +sqlite3_prepare_v2() to reprepare a statement after a schema change.

    + +

    Requirements: +H12501 H12502 H12503 H12504 H12505 H12506 H12507 H12510 +H12511 H12512 H12520 H12521 H12522 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/sleep.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/sleep.html --- sqlite3-3.4.2/www/c3ref/sleep.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/sleep.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +Suspend Execution For A Short Time + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Suspend Execution For A Short Time

    int sqlite3_sleep(int);
    +

    +The sqlite3_sleep() function causes the current thread to suspend execution +for at least a number of milliseconds specified in its parameter.

    + +

    If the operating system does not support sleep requests with +millisecond time resolution, then the time will be rounded up to +the nearest second. The number of milliseconds of sleep actually +requested from the operating system is returned.

    + +

    SQLite implements this interface by calling the xSleep() +method of the default sqlite3_vfs object.

    + +

    Requirements: H10533 H10536 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/soft_heap_limit.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/soft_heap_limit.html --- sqlite3-3.4.2/www/c3ref/soft_heap_limit.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/soft_heap_limit.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,109 @@ + + +Impose A Limit On Heap Size + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Impose A Limit On Heap Size

    void sqlite3_soft_heap_limit(int);
    +

    +The sqlite3_soft_heap_limit() interface places a "soft" limit +on the amount of heap memory that may be allocated by SQLite. +If an internal allocation is requested that would exceed the +soft heap limit, sqlite3_release_memory() is invoked one or +more times to free up some space before the allocation is performed.

    + +

    The limit is called "soft", because if sqlite3_release_memory() +cannot free sufficient memory to prevent the limit from being exceeded, +the memory is allocated anyway and the current operation proceeds.

    + +

    A negative or zero value for N means that there is no soft heap limit and +sqlite3_release_memory() will only be called when memory is exhausted. +The default value for the soft heap limit is zero.

    + +

    SQLite makes a best effort to honor the soft heap limit. +But if the soft heap limit cannot be honored, execution will +continue without error or notification. This is why the limit is +called a "soft" limit. It is advisory only.

    + +

    Prior to SQLite version 3.5.0, this routine only constrained the memory +allocated by a single thread - the same thread in which this routine +runs. Beginning with SQLite version 3.5.0, the soft heap limit is +applied to all threads. The value specified for the soft heap limit +is an upper bound on the total memory allocation for all threads. In +version 3.5.0 there is no mechanism for limiting the heap usage for +individual threads.

    + +

    Requirements: +H16351 H16352 H16353 H16354 H16355 H16358 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/sql.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/sql.html --- sqlite3-3.4.2/www/c3ref/sql.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/sql.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +Retrieving Statement SQL + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Retrieving Statement SQL

    const char *sqlite3_sql(sqlite3_stmt *pStmt);
    +

    +This interface can be used to retrieve a saved copy of the original +SQL text used to create a prepared statement if that statement was +compiled using either sqlite3_prepare_v2() or sqlite3_prepare16_v2().

    + +

    Requirements: +H13101 H13102 H13103 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/sqlite3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/sqlite3.html --- sqlite3-3.4.2/www/c3ref/sqlite3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/sqlite3.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,88 @@ + + +Database Connection Handle + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Database Connection Handle

    typedef struct sqlite3 sqlite3;
    +

    +Each open SQLite database is represented by a pointer to an instance of +the opaque structure named "sqlite3". It is useful to think of an sqlite3 +pointer as an object. The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces are its constructors, and sqlite3_close() +is its destructor. There are many other interfaces (such as +sqlite3_prepare_v2(), sqlite3_create_function(), and +sqlite3_busy_timeout() to name but three) that are methods on an +sqlite3 object. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/status.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/status.html --- sqlite3-3.4.2/www/c3ref/status.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/status.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,105 @@ + + +SQLite Runtime Status + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    SQLite Runtime Status

    int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This interface is used to retrieve runtime status information +about the preformance of SQLite, and optionally to reset various +highwater marks. The first argument is an integer code for +the specific parameter to measure. Recognized integer codes +are of the form SQLITE_STATUS_.... +The current value of the parameter is returned into *pCurrent. +The highest recorded value is returned in *pHighwater. If the +resetFlag is true, then the highest record value is reset after +*pHighwater is written. Some parameters do not record the highest +value. For those parameters +nothing is written into *pHighwater and the resetFlag is ignored. +Other parameters record only the highwater mark and not the current +value. For these latter parameters nothing is written into *pCurrent.

    + +

    This routine returns SQLITE_OK on success and a non-zero +error code on failure.

    + +

    This routine is threadsafe but is not atomic. This routine can +called while other threads are running the same or different SQLite +interfaces. However the values returned in *pCurrent and +*pHighwater reflect the status of SQLite at different points in time +and it is possible that another thread might change the parameter +in between the times when *pCurrent and *pHighwater are written.

    + +

    See also: sqlite3_db_status() +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/step.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/step.html --- sqlite3-3.4.2/www/c3ref/step.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/step.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,144 @@ + + +Evaluate An SQL Statement + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Evaluate An SQL Statement

    int sqlite3_step(sqlite3_stmt*);
    +

    +After a prepared statement has been prepared using either +sqlite3_prepare_v2() or sqlite3_prepare16_v2() or one of the legacy +interfaces sqlite3_prepare() or sqlite3_prepare16(), this function +must be called one or more times to evaluate the statement.

    + +

    The details of the behavior of the sqlite3_step() interface depend +on whether the statement was prepared using the newer "v2" interface +sqlite3_prepare_v2() and sqlite3_prepare16_v2() or the older legacy +interface sqlite3_prepare() and sqlite3_prepare16(). The use of the +new "v2" interface is recommended for new applications but the legacy +interface will continue to be supported.

    + +

    In the legacy interface, the return value will be either SQLITE_BUSY, +SQLITE_DONE, SQLITE_ROW, SQLITE_ERROR, or SQLITE_MISUSE. +With the "v2" interface, any of the other result codes or +extended result codes might be returned as well.

    + +

    SQLITE_BUSY means that the database engine was unable to acquire the +database locks it needs to do its job. If the statement is a COMMIT +or occurs outside of an explicit transaction, then you can retry the +statement. If the statement is not a COMMIT and occurs within a +explicit transaction then you should rollback the transaction before +continuing.

    + +

    SQLITE_DONE means that the statement has finished executing +successfully. sqlite3_step() should not be called again on this virtual +machine without first calling sqlite3_reset() to reset the virtual +machine back to its initial state.

    + +

    If the SQL statement being executed returns any data, then SQLITE_ROW +is returned each time a new row of data is ready for processing by the +caller. The values may be accessed using the column access functions. +sqlite3_step() is called again to retrieve the next row of data.

    + +

    SQLITE_ERROR means that a run-time error (such as a constraint +violation) has occurred. sqlite3_step() should not be called again on +the VM. More information may be found by calling sqlite3_errmsg(). +With the legacy interface, a more specific error code (for example, +SQLITE_INTERRUPT, SQLITE_SCHEMA, SQLITE_CORRUPT, and so forth) +can be obtained by calling sqlite3_reset() on the +prepared statement. In the "v2" interface, +the more specific error code is returned directly by sqlite3_step().

    + +

    SQLITE_MISUSE means that the this routine was called inappropriately. +Perhaps it was called on a prepared statement that has +already been finalized or on one that had +previously returned SQLITE_ERROR or SQLITE_DONE. Or it could +be the case that the same database connection is being used by two or +more threads at the same moment in time.

    + +

    Goofy Interface Alert: In the legacy interface, the sqlite3_step() +API always returns a generic error code, SQLITE_ERROR, following any +error other than SQLITE_BUSY and SQLITE_MISUSE. You must call +sqlite3_reset() or sqlite3_finalize() in order to find one of the +specific error codes that better describes the error. +We admit that this is a goofy design. The problem has been fixed +with the "v2" interface. If you prepare all of your SQL statements +using either sqlite3_prepare_v2() or sqlite3_prepare16_v2() instead +of the legacy sqlite3_prepare() and sqlite3_prepare16() interfaces, +then the more specific error codes are returned directly +by sqlite3_step(). The use of the "v2" interface is recommended.

    + +

    Requirements: +H13202 H15304 H15306 H15308 H15310 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/stmt.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/stmt.html --- sqlite3-3.4.2/www/c3ref/stmt.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/stmt.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,99 @@ + + +SQL Statement Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    SQL Statement Object

    typedef struct sqlite3_stmt sqlite3_stmt;
    +

    +An instance of this object represents a single SQL statement. +This object is variously known as a "prepared statement" or a +"compiled SQL statement" or simply as a "statement".

    + +

    The life of a statement object goes something like this:

    + +

      +
    1. Create the object using sqlite3_prepare_v2() or a related +function. +
    2. Bind values to host parameters using the sqlite3_bind_*() +interfaces. +
    3. Run the SQL by calling sqlite3_step() one or more times. +
    4. Reset the statement using sqlite3_reset() then go back +to step 2. Do this zero or more times. +
    5. Destroy the object using sqlite3_finalize(). +

    + +

    Refer to documentation on individual methods above for additional +information. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/stmt_status.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/stmt_status.html --- sqlite3-3.4.2/www/c3ref/stmt_status.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/stmt_status.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,99 @@ + + +Prepared Statement Status + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Prepared Statement Status

    int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +Each prepared statement maintains various +counters that measure the number +of times it has performed specific operations. These counters can +be used to monitor the performance characteristics of the prepared +statements. For example, if the number of table steps greatly exceeds +the number of table searches or result rows, that would tend to indicate +that the prepared statement is using a full table scan rather than +an index.

    + +

    This interface is used to retrieve and reset counter values from +a prepared statement. The first argument is the prepared statement +object to be interrogated. The second argument +is an integer code for a specific counter +to be interrogated. +The current value of the requested counter is returned. +If the resetFlg is true, then the counter is reset to zero after this +interface call returns.

    + +

    See also: sqlite3_status() and sqlite3_db_status(). +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/table_column_metadata.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/table_column_metadata.html --- sqlite3-3.4.2/www/c3ref/table_column_metadata.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/table_column_metadata.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,148 @@ + + +Extract Metadata About A Column Of A Table + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Extract Metadata About A Column Of A Table

    int sqlite3_table_column_metadata(
    +  sqlite3 *db,                /* Connection handle */
    +  const char *zDbName,        /* Database name or NULL */
    +  const char *zTableName,     /* Table name */
    +  const char *zColumnName,    /* Column name */
    +  char const **pzDataType,    /* OUTPUT: Declared data type */
    +  char const **pzCollSeq,     /* OUTPUT: Collation sequence name */
    +  int *pNotNull,              /* OUTPUT: True if NOT NULL constraint exists */
    +  int *pPrimaryKey,           /* OUTPUT: True if column part of PK */
    +  int *pAutoinc               /* OUTPUT: True if column is auto-increment */
    +);
    +

    +This routine returns metadata about a specific column of a specific +database table accessible using the database connection handle +passed as the first function argument.

    + +

    The column is identified by the second, third and fourth parameters to +this function. The second parameter is either the name of the database +(i.e. "main", "temp" or an attached database) containing the specified +table or NULL. If it is NULL, then all attached databases are searched +for the table using the same algorithm used by the database engine to +resolve unqualified table references.

    + +

    The third and fourth parameters to this function are the table and column +name of the desired column, respectively. Neither of these parameters +may be NULL.

    + +

    Metadata is returned by writing to the memory locations passed as the 5th +and subsequent parameters to this function. Any of these arguments may be +NULL, in which case the corresponding element of metadata is omitted.

    + +

    + +
    Parameter Output
    Type
    Description

    + +

    5th const char* Data type +
    6th const char* Name of default collation sequence +
    7th int True if column has a NOT NULL constraint +
    8th int True if column is part of the PRIMARY KEY +
    9th int True if column is AUTOINCREMENT +
    +

    + +

    The memory pointed to by the character pointers returned for the +declaration type and collation sequence is valid only until the next +call to any SQLite API function.

    + +

    If the specified table is actually a view, an error code is returned.

    + +

    If the specified column is "rowid", "oid" or "_rowid_" and an +INTEGER PRIMARY KEY column has been explicitly declared, then the output +parameters are set for the explicitly declared column. If there is no +explicitly declared INTEGER PRIMARY KEY column, then the output +parameters are set as follows:

    + +

    +data type: "INTEGER"
    +collation sequence: "BINARY"
    +not null: 0
    +primary key: 1
    +auto increment: 0
    +

    + +

    This function may load one or more schemas from database files. If an +error occurs during this process, or if the requested table or column +cannot be found, an error code is returned and an error message left +in the database connection (to be retrieved using sqlite3_errmsg()).

    + +

    This API is only available if the library was compiled with the +SQLITE_ENABLE_COLUMN_METADATA C-preprocessor symbol defined. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/temp_directory.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/temp_directory.html --- sqlite3-3.4.2/www/c3ref/temp_directory.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/temp_directory.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,104 @@ + + +Name Of The Folder Holding Temporary Files + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Name Of The Folder Holding Temporary Files

    SQLITE_EXTERN char *sqlite3_temp_directory;
    +

    +If this global variable is made to point to a string which is +the name of a folder (a.k.a. directory), then all temporary files +created by SQLite will be placed in that directory. If this variable +is a NULL pointer, then SQLite performs a search for an appropriate +temporary file directory.

    + +

    It is not safe to read or modify this variable in more than one +thread at a time. It is not safe to read or modify this variable +if a database connection is being used at the same time in a separate +thread. +It is intended that this variable be set once +as part of process initialization and before any SQLite interface +routines have been called and that this variable remain unchanged +thereafter.

    + +

    The temp_store_directory pragma may modify this variable and cause +it to point to memory obtained from sqlite3_malloc. Furthermore, +the temp_store_directory pragma always assumes that any string +that this variable points to is held in memory obtained from +sqlite3_malloc and the pragma may attempt to free that memory +using sqlite3_free. +Hence, if this variable is modified directly, either it should be +made NULL or made to point to memory obtained from sqlite3_malloc +or else the use of the temp_store_directory pragma should be avoided. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/test_control.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/test_control.html --- sqlite3-3.4.2/www/c3ref/test_control.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/test_control.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +Testing Interface + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Testing Interface

    int sqlite3_test_control(int op, ...);
    +

    +The sqlite3_test_control() interface is used to read out internal +state of SQLite and to inject faults into SQLite for testing +purposes. The first parameter is an operation code that determines +the number, meaning, and operation of all subsequent parameters.

    + +

    This interface is not for use by applications. It exists solely +for verifying the correct operation of the SQLite library. Depending +on how the SQLite library is compiled, this interface might not exist.

    + +

    The details of the operation codes, their meanings, the parameters +they take, and what they do are all subject to change without notice. +Unlike most of the SQLite API, this function is not guaranteed to +operate consistently from one release to the next. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/threadsafe.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/threadsafe.html --- sqlite3-3.4.2/www/c3ref/threadsafe.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/threadsafe.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,108 @@ + + +Test To See If The Library Is Threadsafe + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Test To See If The Library Is Threadsafe

    int sqlite3_threadsafe(void);
    +

    +SQLite can be compiled with or without mutexes. When +the SQLITE_THREADSAFE C preprocessor macro 1 or 2, mutexes +are enabled and SQLite is threadsafe. When the +SQLITE_THREADSAFE macro is 0, +the mutexes are omitted. Without the mutexes, it is not safe +to use SQLite concurrently from more than one thread.

    + +

    Enabling mutexes incurs a measurable performance penalty. +So if speed is of utmost importance, it makes sense to disable +the mutexes. But for maximum safety, mutexes should be enabled. +The default behavior is for mutexes to be enabled.

    + +

    This interface can be used by a program to make sure that the +version of SQLite that it is linking against was compiled with +the desired setting of the SQLITE_THREADSAFE macro.

    + +

    This interface only reports on the compile-time mutex setting +of the SQLITE_THREADSAFE flag. If SQLite is compiled with +SQLITE_THREADSAFE=1 then mutexes are enabled by default but +can be fully or partially disabled using a call to sqlite3_config() +with the verbs SQLITE_CONFIG_SINGLETHREAD, SQLITE_CONFIG_MULTITHREAD, +or SQLITE_CONFIG_MUTEX. The return value of this function shows +only the default compile-time setting, not any run-time changes +to that setting.

    + +

    See the threading mode documentation for additional information.

    + +

    Requirements: H10101 H10102 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/total_changes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/total_changes.html --- sqlite3-3.4.2/www/c3ref/total_changes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/total_changes.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,102 @@ + + +Total Number Of Rows Modified + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Total Number Of Rows Modified

    int sqlite3_total_changes(sqlite3*);
    +

    +This function returns the number of row changes caused by INSERT, +UPDATE or DELETE statements since the database connection was opened. +The count includes all changes from all +trigger contexts. However, +the count does not include changes used to implement REPLACE constraints, +do rollbacks or ABORT processing, or DROP TABLE processing. The +count does not include rows of views that fire an INSTEAD OF trigger, +though if the INSTEAD OF trigger makes changes of its own, those changes +are counted. +The changes are counted as soon as the statement that makes them is +completed (when the statement handle is passed to sqlite3_reset() or +sqlite3_finalize()).

    + +

    See also the sqlite3_changes() interface and the +count_changes pragma.

    + +

    Requirements: +H12261 H12263

    + +

    If a separate thread makes changes on the same database connection +while sqlite3_total_changes() is running then the value +returned is unpredictable and not meaningful. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/unlock_notify.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/unlock_notify.html --- sqlite3-3.4.2/www/c3ref/unlock_notify.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/unlock_notify.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,194 @@ + + +Unlock Notification + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Unlock Notification

    int sqlite3_unlock_notify(
    +  sqlite3 *pBlocked,                          /* Waiting connection */
    +  void (*xNotify)(void **apArg, int nArg),    /* Callback function to invoke */
    +  void *pNotifyArg                            /* Argument to pass to xNotify */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +When running in shared-cache mode, a database operation may fail with +an SQLITE_LOCKED error if the required locks on the shared-cache or +individual tables within the shared-cache cannot be obtained. See +SQLite Shared-Cache Mode for a description of shared-cache locking. +This API may be used to register a callback that SQLite will invoke +when the connection currently holding the required lock relinquishes it. +This API is only available if the library was compiled with the +SQLITE_ENABLE_UNLOCK_NOTIFY C-preprocessor symbol defined.

    + +

    See Also: Using the SQLite Unlock Notification Feature.

    + +

    Shared-cache locks are released when a database connection concludes +its current transaction, either by committing it or rolling it back.

    + +

    When a connection (known as the blocked connection) fails to obtain a +shared-cache lock and SQLITE_LOCKED is returned to the caller, the +identity of the database connection (the blocking connection) that +has locked the required resource is stored internally. After an +application receives an SQLITE_LOCKED error, it may call the +sqlite3_unlock_notify() method with the blocked connection handle as +the first argument to register for a callback that will be invoked +when the blocking connections current transaction is concluded. The +callback is invoked from within the sqlite3_step or sqlite3_close +call that concludes the blocking connections transaction.

    + +

    If sqlite3_unlock_notify() is called in a multi-threaded application, +there is a chance that the blocking connection will have already +concluded its transaction by the time sqlite3_unlock_notify() is invoked. +If this happens, then the specified callback is invoked immediately, +from within the call to sqlite3_unlock_notify().

    + +

    If the blocked connection is attempting to obtain a write-lock on a +shared-cache table, and more than one other connection currently holds +a read-lock on the same table, then SQLite arbitrarily selects one of +the other connections to use as the blocking connection.

    + +

    There may be at most one unlock-notify callback registered by a +blocked connection. If sqlite3_unlock_notify() is called when the +blocked connection already has a registered unlock-notify callback, +then the new callback replaces the old. If sqlite3_unlock_notify() is +called with a NULL pointer as its second argument, then any existing +unlock-notify callback is cancelled. The blocked connections +unlock-notify callback may also be canceled by closing the blocked +connection using sqlite3_close().

    + +

    The unlock-notify callback is not reentrant. If an application invokes +any sqlite3_xxx API functions from within an unlock-notify callback, a +crash or deadlock may be the result.

    + +

    Unless deadlock is detected (see below), sqlite3_unlock_notify() always +returns SQLITE_OK.

    + +

    Callback Invocation Details

    + +

    When an unlock-notify callback is registered, the application provides a +single void* pointer that is passed to the callback when it is invoked. +However, the signature of the callback function allows SQLite to pass +it an array of void* context pointers. The first argument passed to +an unlock-notify callback is a pointer to an array of void* pointers, +and the second is the number of entries in the array.

    + +

    When a blocking connections transaction is concluded, there may be +more than one blocked connection that has registered for an unlock-notify +callback. If two or more such blocked connections have specified the +same callback function, then instead of invoking the callback function +multiple times, it is invoked once with the set of void* context pointers +specified by the blocked connections bundled together into an array. +This gives the application an opportunity to prioritize any actions +related to the set of unblocked database connections.

    + +

    Deadlock Detection

    + +

    Assuming that after registering for an unlock-notify callback a +database waits for the callback to be issued before taking any further +action (a reasonable assumption), then using this API may cause the +application to deadlock. For example, if connection X is waiting for +connection Y's transaction to be concluded, and similarly connection +Y is waiting on connection X's transaction, then neither connection +will proceed and the system may remain deadlocked indefinitely.

    + +

    To avoid this scenario, the sqlite3_unlock_notify() performs deadlock +detection. If a given call to sqlite3_unlock_notify() would put the +system in a deadlocked state, then SQLITE_LOCKED is returned and no +unlock-notify callback is registered. The system is said to be in +a deadlocked state if connection A has registered for an unlock-notify +callback on the conclusion of connection B's transaction, and connection +B has itself registered for an unlock-notify callback when connection +A's transaction is concluded. Indirect deadlock is also detected, so +the system is also considered to be deadlocked if connection B has +registered for an unlock-notify callback on the conclusion of connection +C's transaction, where connection C is waiting on connection A. Any +number of levels of indirection are allowed.

    + +

    The "DROP TABLE" Exception

    + +

    When a call to sqlite3_step() returns SQLITE_LOCKED, it is almost +always appropriate to call sqlite3_unlock_notify(). There is however, +one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, +SQLite checks if there are any currently executing SELECT statements +that belong to the same connection. If there are, SQLITE_LOCKED is +returned. In this case there is no "blocking connection", so invoking +sqlite3_unlock_notify() results in the unlock-notify callback being +invoked immediately. If the application then re-attempts the "DROP TABLE" +or "DROP INDEX" query, an infinite loop might be the result.

    + +

    One way around this problem is to check the extended error code returned +by an sqlite3_step() call. If there is a blocking connection, then the +extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in +the special "DROP TABLE/INDEX" case, the extended error code is just +SQLITE_LOCKED. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/update_hook.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/update_hook.html --- sqlite3-3.4.2/www/c3ref/update_hook.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/update_hook.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,127 @@ + + +Data Change Notification Callbacks + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Data Change Notification Callbacks

    void *sqlite3_update_hook(
    +  sqlite3*, 
    +  void(*)(void *,int ,char const *,char const *,sqlite3_int64),
    +  void*
    +);
    +

    +The sqlite3_update_hook() interface registers a callback function +with the database connection identified by the first argument +to be invoked whenever a row is updated, inserted or deleted. +Any callback set by a previous call to this function +for the same database connection is overridden.

    + +

    The second argument is a pointer to the function to invoke when a +row is updated, inserted or deleted. +The first argument to the callback is a copy of the third argument +to sqlite3_update_hook(). +The second callback argument is one of SQLITE_INSERT, SQLITE_DELETE, +or SQLITE_UPDATE, depending on the operation that caused the callback +to be invoked. +The third and fourth arguments to the callback contain pointers to the +database and table name containing the affected row. +The final callback parameter is the rowid of the row. +In the case of an update, this is the rowid after the update takes place.

    + +

    The update hook is not invoked when internal system tables are +modified (i.e. sqlite_master and sqlite_sequence).

    + +

    In the current implementation, the update hook +is not invoked when duplication rows are deleted because of an +ON CONFLICT REPLACE clause. Nor is the update hook +invoked when rows are deleted using the truncate optimization. +The exceptions defined in this paragraph might change in a future +release of SQLite.

    + +

    The update hook implementation must not do anything that will modify +the database connection that invoked the update hook. Any actions +to modify the database connection must be deferred until after the +completion of the sqlite3_step() call that triggered the update hook. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    If another function was previously registered, its pArg value +is returned. Otherwise NULL is returned.

    + +

    See also the sqlite3_commit_hook() and sqlite3_rollback_hook() +interfaces.

    + +

    Requirements: +H12971 H12973 H12975 H12977 H12979 H12981 H12983 H12986 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/user_data.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/user_data.html --- sqlite3-3.4.2/www/c3ref/user_data.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/user_data.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +User Data For Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    User Data For Functions

    void *sqlite3_user_data(sqlite3_context*);
    +

    +The sqlite3_user_data() interface returns a copy of +the pointer that was the pUserData parameter (the 5th parameter) +of the sqlite3_create_function() +and sqlite3_create_function16() routines that originally +registered the application defined function.

    + +

    This routine must be called from the same thread in which +the application-defined function is running.

    + +

    Requirements: +H16243 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/value_blob.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/value_blob.html --- sqlite3-3.4.2/www/c3ref/value_blob.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/value_blob.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,136 @@ + + +Obtaining SQL Function Parameter Values + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Obtaining SQL Function Parameter Values

    const void *sqlite3_value_blob(sqlite3_value*);
    +int sqlite3_value_bytes(sqlite3_value*);
    +int sqlite3_value_bytes16(sqlite3_value*);
    +double sqlite3_value_double(sqlite3_value*);
    +int sqlite3_value_int(sqlite3_value*);
    +sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
    +const unsigned char *sqlite3_value_text(sqlite3_value*);
    +const void *sqlite3_value_text16(sqlite3_value*);
    +const void *sqlite3_value_text16le(sqlite3_value*);
    +const void *sqlite3_value_text16be(sqlite3_value*);
    +int sqlite3_value_type(sqlite3_value*);
    +int sqlite3_value_numeric_type(sqlite3_value*);
    +

    +The C-language implementation of SQL functions and aggregates uses +this set of interface routines to access the parameter values on +the function or aggregate.

    + +

    The xFunc (for scalar functions) or xStep (for aggregates) parameters +to sqlite3_create_function() and sqlite3_create_function16() +define callbacks that implement the SQL functions and aggregates. +The 4th parameter to these callbacks is an array of pointers to +protected sqlite3_value objects. There is one sqlite3_value object for +each parameter to the SQL function. These routines are used to +extract values from the sqlite3_value objects.

    + +

    These routines work only with protected sqlite3_value objects. +Any attempt to use these routines on an unprotected sqlite3_value +object results in undefined behavior.

    + +

    These routines work just like the corresponding column access functions +except that these routines take a single protected sqlite3_value object +pointer instead of a sqlite3_stmt* pointer and an integer column number.

    + +

    The sqlite3_value_text16() interface extracts a UTF-16 string +in the native byte-order of the host machine. The +sqlite3_value_text16be() and sqlite3_value_text16le() interfaces +extract UTF-16 strings as big-endian and little-endian respectively.

    + +

    The sqlite3_value_numeric_type() interface attempts to apply +numeric affinity to the value. This means that an attempt is +made to convert the value to an integer or floating point. If +such a conversion is possible without loss of information (in other +words, if the value is a string that looks like a number) +then the conversion is performed. Otherwise no conversion occurs. +The datatype after conversion is returned.

    + +

    Please pay particular attention to the fact that the pointer returned +from sqlite3_value_blob(), sqlite3_value_text(), or +sqlite3_value_text16() can be invalidated by a subsequent call to +sqlite3_value_bytes(), sqlite3_value_bytes16(), sqlite3_value_text(), +or sqlite3_value_text16().

    + +

    These routines must be called from the same thread as +the SQL function that supplied the sqlite3_value* parameters.

    + +

    Requirements: +H15103 H15106 H15109 H15112 H15115 H15118 H15121 H15124 +H15127 H15130 H15133 H15136 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/value.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/value.html --- sqlite3-3.4.2/www/c3ref/value.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/value.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,112 @@ + + +Dynamically Typed Value Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Dynamically Typed Value Object

    typedef struct Mem sqlite3_value;
    +

    +SQLite uses the sqlite3_value object to represent all values +that can be stored in a database table. SQLite uses dynamic typing +for the values it stores. Values stored in sqlite3_value objects +can be integers, floating point values, strings, BLOBs, or NULL.

    + +

    An sqlite3_value object may be either "protected" or "unprotected". +Some interfaces require a protected sqlite3_value. Other interfaces +will accept either a protected or an unprotected sqlite3_value. +Every interface that accepts sqlite3_value arguments specifies +whether or not it requires a protected sqlite3_value.

    + +

    The terms "protected" and "unprotected" refer to whether or not +a mutex is held. A internal mutex is held for a protected +sqlite3_value object but no mutex is held for an unprotected +sqlite3_value object. If SQLite is compiled to be single-threaded +(with SQLITE_THREADSAFE=0 and with sqlite3_threadsafe() returning 0) +or if SQLite is run in one of reduced mutex modes +SQLITE_CONFIG_SINGLETHREAD or SQLITE_CONFIG_MULTITHREAD +then there is no distinction between protected and unprotected +sqlite3_value objects and they can be used interchangeably. However, +for maximum code portability it is recommended that applications +still make the distinction between between protected and unprotected +sqlite3_value objects even when not strictly required.

    + +

    The sqlite3_value objects that are passed as parameters into the +implementation of application-defined SQL functions are protected. +The sqlite3_value object returned by +sqlite3_column_value() is unprotected. +Unprotected sqlite3_value objects may only be used with +sqlite3_result_value() and sqlite3_bind_value(). +The sqlite3_value_type() family of +interfaces require protected sqlite3_value objects. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/vfs_find.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/vfs_find.html --- sqlite3-3.4.2/www/c3ref/vfs_find.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/vfs_find.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,110 @@ + + +Virtual File System Objects + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Virtual File System Objects

    sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
    +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
    +int sqlite3_vfs_unregister(sqlite3_vfs*);
    +

    +A virtual filesystem (VFS) is an sqlite3_vfs object +that SQLite uses to interact +with the underlying operating system. Most SQLite builds come with a +single default VFS that is appropriate for the host computer. +New VFSes can be registered and existing VFSes can be unregistered. +The following interfaces are provided.

    + +

    The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. +Names are case sensitive. +Names are zero-terminated UTF-8 strings. +If there is no match, a NULL pointer is returned. +If zVfsName is NULL then the default VFS is returned.

    + +

    New VFSes are registered with sqlite3_vfs_register(). +Each new VFS becomes the default VFS if the makeDflt flag is set. +The same VFS can be registered multiple times without injury. +To make an existing VFS into the default VFS, register it again +with the makeDflt flag set. If two different VFSes with the +same name are registered, the behavior is undefined. If a +VFS is registered with a name that is NULL or an empty string, +then the behavior is undefined.

    + +

    Unregister a VFS with the sqlite3_vfs_unregister() interface. +If the default VFS is unregistered, another VFS is chosen as +the default. The choice for the new VFS is arbitrary.

    + +

    Requirements: +H11203 H11206 H11209 H11212 H11215 H11218 +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/vfs.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/vfs.html --- sqlite3-3.4.2/www/c3ref/vfs.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/vfs.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,228 @@ + + +OS Interface Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    OS Interface Object

    typedef struct sqlite3_vfs sqlite3_vfs;
    +struct sqlite3_vfs {
    +  int iVersion;            /* Structure version number */
    +  int szOsFile;            /* Size of subclassed sqlite3_file */
    +  int mxPathname;          /* Maximum file pathname length */
    +  sqlite3_vfs *pNext;      /* Next registered VFS */
    +  const char *zName;       /* Name of this virtual file system */
    +  void *pAppData;          /* Pointer to application-specific data */
    +  int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
    +               int flags, int *pOutFlags);
    +  int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
    +  int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
    +  int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut);
    +  void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
    +  void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
    +  void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void);
    +  void (*xDlClose)(sqlite3_vfs*, void*);
    +  int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
    +  int (*xSleep)(sqlite3_vfs*, int microseconds);
    +  int (*xCurrentTime)(sqlite3_vfs*, double*);
    +  int (*xGetLastError)(sqlite3_vfs*, int, char *);
    +  /* New fields may be appended in figure versions.  The iVersion
    +  ** value will increment whenever this happens. */
    +};
    +

    +An instance of the sqlite3_vfs object defines the interface between +the SQLite core and the underlying operating system. The "vfs" +in the name of the object stands for "virtual file system".

    + +

    The value of the iVersion field is initially 1 but may be larger in +future versions of SQLite. Additional fields may be appended to this +object when the iVersion value is increased. Note that the structure +of the sqlite3_vfs object changes in the transaction between +SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not +modified.

    + +

    The szOsFile field is the size of the subclassed sqlite3_file +structure used by this VFS. mxPathname is the maximum length of +a pathname in this VFS.

    + +

    Registered sqlite3_vfs objects are kept on a linked list formed by +the pNext pointer. The sqlite3_vfs_register() +and sqlite3_vfs_unregister() interfaces manage this list +in a thread-safe way. The sqlite3_vfs_find() interface +searches the list. Neither the application code nor the VFS +implementation should use the pNext pointer.

    + +

    The pNext field is the only field in the sqlite3_vfs +structure that SQLite will ever modify. SQLite will only access +or modify this field while holding a particular static mutex. +The application should never modify anything within the sqlite3_vfs +object once the object has been registered.

    + +

    The zName field holds the name of the VFS module. The name must +be unique across all VFS modules.

    + +

    SQLite will guarantee that the zFilename parameter to xOpen +is either a NULL pointer or string obtained +from xFullPathname(). SQLite further guarantees that +the string will be valid and unchanged until xClose() is +called. Because of the previous sentence, +the sqlite3_file can safely store a pointer to the +filename if it needs to remember the filename for some reason. +If the zFilename parameter is xOpen is a NULL pointer then xOpen +must invent its own temporary name for the file. Whenever the +xFilename parameter is NULL it will also be the case that the +flags parameter will include SQLITE_OPEN_DELETEONCLOSE.

    + +

    The flags argument to xOpen() includes all bits set in +the flags argument to sqlite3_open_v2(). Or if sqlite3_open() +or sqlite3_open16() is used, then flags includes at least +SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE. +If xOpen() opens a file read-only then it sets *pOutFlags to +include SQLITE_OPEN_READONLY. Other bits in *pOutFlags may be set.

    + +

    SQLite will also add one of the following flags to the xOpen() +call, depending on the object being opened:

    + +

    + +

    The file I/O implementation can use the object type flags to +change the way it deals with files. For example, an application +that does not care about crash recovery or rollback might make +the open of a journal file a no-op. Writes to this journal would +also be no-ops, and any attempt to read the journal would return +SQLITE_IOERR. Or the implementation might recognize that a database +file will be doing page-aligned sector reads and writes in a random +order and set up its I/O subsystem accordingly.

    + +

    SQLite might also add one of the following flags to the xOpen method:

    + +

    + +

    The SQLITE_OPEN_DELETEONCLOSE flag means the file should be +deleted when it is closed. The SQLITE_OPEN_DELETEONCLOSE +will be set for TEMP databases, journals and for subjournals.

    + +

    The SQLITE_OPEN_EXCLUSIVE flag is always used in conjunction +with the SQLITE_OPEN_CREATE flag, which are both directly +analogous to the O_EXCL and O_CREAT flags of the POSIX open() +API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the +SQLITE_OPEN_CREATE, is used to indicate that file should always +be created, and that it is an error if it already exists. +It is not used to indicate the file should be opened +for exclusive access.

    + +

    At least szOsFile bytes of memory are allocated by SQLite +to hold the sqlite3_file structure passed as the third +argument to xOpen. The xOpen method does not have to +allocate the structure; it should just fill it in. Note that +the xOpen method must set the sqlite3_file.pMethods to either +a valid sqlite3_io_methods object or to NULL. xOpen must do +this even if the open fails. SQLite expects that the sqlite3_file.pMethods +element will be valid after xOpen returns regardless of the success +or failure of the xOpen call.

    + +

    The flags argument to xAccess() may be SQLITE_ACCESS_EXISTS +to test for the existence of a file, or SQLITE_ACCESS_READWRITE to +test whether a file is readable and writable, or SQLITE_ACCESS_READ +to test whether a file is at least readable. The file can be a +directory.

    + +

    SQLite will always allocate at least mxPathname+1 bytes for the +output buffer xFullPathname. The exact size of the output buffer +is also passed as a parameter to both methods. If the output buffer +is not large enough, SQLITE_CANTOPEN should be returned. Since this is +handled as a fatal error by SQLite, vfs implementations should endeavor +to prevent this by setting mxPathname to a sufficiently large value.

    + +

    The xRandomness(), xSleep(), and xCurrentTime() interfaces +are not strictly a part of the filesystem, but they are +included in the VFS structure for completeness. +The xRandomness() function attempts to return nBytes bytes +of good-quality randomness into zOut. The return value is +the actual number of bytes of randomness obtained. +The xSleep() method causes the calling thread to sleep for at +least the number of microseconds given. The xCurrentTime() +method returns a Julian Day Number for the current date and time.

    + +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/vtab_cursor.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/vtab_cursor.html --- sqlite3-3.4.2/www/c3ref/vtab_cursor.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/vtab_cursor.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,95 @@ + + +Virtual Table Cursor Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Virtual Table Cursor Object

    struct sqlite3_vtab_cursor {
    +  sqlite3_vtab *pVtab;      /* Virtual table of this cursor */
    +  /* Virtual table implementations will typically add additional fields */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +Every virtual table module implementation uses a subclass of the +following structure to describe cursors that point into the +virtual table and are used +to loop through the virtual table. Cursors are created using the +xOpen method of the module and are destroyed +by the xClose method. Cussors are used +by the xFilter, xNext, xEof, xColumn, and xRowid methods +of the module. Each module implementation will define +the content of a cursor structure to suit its own needs.

    + +

    This superclass exists in order to define fields of the cursor that +are common to all implementations. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c3ref/vtab.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c3ref/vtab.html --- sqlite3-3.4.2/www/c3ref/vtab.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c3ref/vtab.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,98 @@ + + +Virtual Table Instance Object + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite C Interface

    Virtual Table Instance Object

    struct sqlite3_vtab {
    +  const sqlite3_module *pModule;  /* The module for this virtual table */
    +  int nRef;                       /* Used internally */
    +  char *zErrMsg;                  /* Error message from sqlite3_mprintf() */
    +  /* Virtual table implementations will typically add additional fields */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +Every virtual table module implementation uses a subclass +of the following structure to describe a particular instance +of the virtual table. Each subclass will +be tailored to the specific needs of the module implementation. +The purpose of this superclass is to define certain fields that are +common to all module implementations.

    + +

    Virtual tables methods can set an error message by assigning a +string obtained from sqlite3_mprintf() to zErrMsg. The method should +take care that any prior string is freed by a call to sqlite3_free() +prior to assigning a new string to zErrMsg. After the error message +is delivered up to the client application, the string will be automatically +freed by sqlite3_free() and the zErrMsg field will be zeroed. +

    See also lists of + Objects, + Constants, and + Functions.


    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/capi3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/capi3.html --- sqlite3-3.4.2/www/capi3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/capi3.html 2009-06-27 15:07:36.000000000 +0100 @@ -0,0 +1,588 @@ + + +C/C++ Interface For SQLite Version 3 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    +Note: +This document was written in 2004 as a guide to helping programmers +move from using SQLite version 2 to SQLite version 3. The information +in this document is still essentially correct, however there have been +many changes and enhancements over the years. We recommend that the +following documents be used instead: +

    +

    +
    + +

    C/C++ Interface For SQLite Version 3

    + +

    1.0 Overview

    + +

    +SQLite version 3.0 is a new version of SQLite, derived from +the SQLite 2.8.13 code base, but with an incompatible file format +and API. +SQLite version 3.0 was created to answer demand for the following features: +

    + +
      +
    • Support for UTF-16.
    • +
    • User-definable text collating sequences.
    • +
    • The ability to store BLOBs in indexed columns.
    • +
    + +

    +It was necessary to move to version 3.0 to implement these features because +each requires incompatible changes to the database file format. Other +incompatible changes, such as a cleanup of the API, were introduced at the +same time under the theory that it is best to get your incompatible changes +out of the way all at once. +

    + +

    +The API for version 3.0 is similar to the version 2.X API, +but with some important changes. Most noticeably, the "sqlite_" +prefix that occurs on the beginning of all API functions and data +structures are changed to "sqlite3_". +This avoids confusion between the two APIs and allows linking against both +SQLite 2.X and SQLite 3.0 at the same time. +

    + +

    +There is no agreement on what the C datatype for a UTF-16 +string should be. Therefore, SQLite uses a generic type of void* +to refer to UTF-16 strings. Client software can cast the void* +to whatever datatype is appropriate for their system. +

    + +

    2.0 C/C++ Interface

    + +

    +The API for SQLite 3.0 includes 83 separate functions in addition +to several data structures and #defines. (A complete +API reference is provided as a separate +document.) +Fortunately, the interface is not nearly as complex as its size implies. +Simple programs can still make do with only 3 functions: +sqlite3_open(), sqlite3_exec(), and sqlite3_close(). +More control over the execution of the database engine is provided +using sqlite3_prepare_v2() +to compile an SQLite statement into byte code and +sqlite3_step() to execute that bytecode. +A family of routines with names beginning with +sqlite3_column_ +is used to extract information about the result set of a query. +Many interface functions come in pairs, with both a UTF-8 and +UTF-16 version. And there is a collection of routines +used to implement user-defined SQL functions and user-defined +text collating sequences. +

    + + +

    2.1 Opening and closing a database

    + +
    +   typedef struct sqlite3 sqlite3;
    +   int sqlite3_open(const char*, sqlite3**);
    +   int sqlite3_open16(const void*, sqlite3**);
    +   int sqlite3_close(sqlite3*);
    +   const char *sqlite3_errmsg(sqlite3*);
    +   const void *sqlite3_errmsg16(sqlite3*);
    +   int sqlite3_errcode(sqlite3*);
    +
    + +

    +The sqlite3_open() routine returns an integer error code rather than +a pointer to the sqlite3 structure as the version 2 interface did. +The difference between sqlite3_open() +and sqlite3_open16() is that sqlite3_open16() takes UTF-16 (in host native +byte order) for the name of the database file. If a new database file +needs to be created, then sqlite3_open16() sets the internal text +representation to UTF-16 whereas sqlite3_open() sets the text +representation to UTF-8. +

    + +

    +The opening and/or creating of the database file is deferred until the +file is actually needed. This allows options and parameters, such +as the native text representation and default page size, to be +set using PRAGMA statements. +

    + +

    +The sqlite3_errcode() routine returns a result code for the most +recent major API call. sqlite3_errmsg() returns an English-language +text error message for the most recent error. The error message is +represented in UTF-8 and will be ephemeral - it could disappear on +the next call to any SQLite API function. sqlite3_errmsg16() works like +sqlite3_errmsg() except that it returns the error message represented +as UTF-16 in host native byte order. +

    + +

    +The error codes for SQLite version 3 are unchanged from version 2. +They are as follows: +

    + +
    +#define SQLITE_OK           0   /* Successful result */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    +
    + +

    2.2 Executing SQL statements

    + +
    +   typedef int (*sqlite_callback)(void*,int,char**, char**);
    +   int sqlite3_exec(sqlite3*, const char *sql, sqlite_callback, void*, char**);
    +
    + +

    +The sqlite3_exec() function works much as it did in SQLite version 2. +Zero or more SQL statements specified in the second parameter are compiled +and executed. Query results are returned to a callback routine. +

    + +

    +In SQLite version 3, the sqlite3_exec routine is just a wrapper around +calls to the prepared statement interface. +

    + +
    +   typedef struct sqlite3_stmt sqlite3_stmt;
    +   int sqlite3_prepare(sqlite3*, const char*, int, sqlite3_stmt**, const char**);
    +   int sqlite3_prepare16(sqlite3*, const void*, int, sqlite3_stmt**, const void**);
    +   int sqlite3_finalize(sqlite3_stmt*);
    +   int sqlite3_reset(sqlite3_stmt*);
    +
    + +

    +The sqlite3_prepare interface compiles a single SQL statement into byte code +for later execution. This interface is now the preferred way of accessing +the database. +

    + +

    +The SQL statement is a UTF-8 string for sqlite3_prepare(). +The sqlite3_prepare16() works the same way except +that it expects a UTF-16 string as SQL input. +Only the first SQL statement in the input string is compiled. +The fifth parameter is filled in with a pointer to the next (uncompiled) +SQLite statement in the input string, if any. +The sqlite3_finalize() routine deallocates a prepared SQL statement. +All prepared statements must be finalized before the database can be +closed. +The sqlite3_reset() routine resets a prepared SQL statement so that it +can be executed again. +

    + +

    +The SQL statement may contain tokens of the form "?" or "?nnn" or ":aaa" +where "nnn" is an integer and "aaa" is an identifier. +Such tokens represent unspecified literal values (or "wildcards") +to be filled in later by the +sqlite3_bind interface. +Each wildcard has an associated number which is its sequence in the +statement or the "nnn" in the case of a "?nnn" form. +It is allowed for the same wildcard +to occur more than once in the same SQL statement, in which case +all instance of that wildcard will be filled in with the same value. +Unbound wildcards have a value of NULL. +

    + +
    +   int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +   int sqlite3_bind_double(sqlite3_stmt*, int, double);
    +   int sqlite3_bind_int(sqlite3_stmt*, int, int);
    +   int sqlite3_bind_int64(sqlite3_stmt*, int, long long int);
    +   int sqlite3_bind_null(sqlite3_stmt*, int);
    +   int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
    +   int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +   int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
    +
    + +

    +There is an assortment of sqlite3_bind routines used to assign values +to wildcards in a prepared SQL statement. Unbound wildcards +are interpreted as NULLs. Bindings are not reset by sqlite3_reset(). +But wildcards can be rebound to new values after an sqlite3_reset(). +

    + +

    +After an SQL statement has been prepared (and optionally bound), it +is executed using: +

    + +
    +   int sqlite3_step(sqlite3_stmt*);
    +
    + +

    +The sqlite3_step() routine return SQLITE_ROW if it is returning a single +row of the result set, or SQLITE_DONE if execution has completed, either +normally or due to an error. It might also return SQLITE_BUSY if it is +unable to open the database file. If the return value is SQLITE_ROW, then +the following routines can be used to extract information about that row +of the result set: +

    + +
    +   const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_count(sqlite3_stmt*);
    +   const char *sqlite3_column_decltype(sqlite3_stmt *, int iCol);
    +   const void *sqlite3_column_decltype16(sqlite3_stmt *, int iCol);
    +   double sqlite3_column_double(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_int(sqlite3_stmt*, int iCol);
    +   long long int sqlite3_column_int64(sqlite3_stmt*, int iCol);
    +   const char *sqlite3_column_name(sqlite3_stmt*, int iCol);
    +   const void *sqlite3_column_name16(sqlite3_stmt*, int iCol);
    +   const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
    +   const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
    +   int sqlite3_column_type(sqlite3_stmt*, int iCol);
    +
    + +

    +The sqlite3_column_count() +function returns the number of columns in +the results set. sqlite3_column_count() can be called at any time after +sqlite3_prepare_v2(). sqlite3_data_count() works similarly to +sqlite3_column_count() except that it only works following sqlite3_step(). +If the previous call to sqlite3_step() returned SQLITE_DONE or an error code, +then sqlite3_data_count() will return 0 whereas sqlite3_column_count() will +continue to return the number of columns in the result set. +

    + +

    Returned data is examined using the other +sqlite3_column_***() functions, +all of which take a column number as their second parameter. Columns are +zero-indexed from left to right. Note that this is different to parameters, +which are indexed starting at one. +

    + +

    +The sqlite3_column_type() function returns the +datatype for the value in the Nth column. The return value is one +of these: +

    + +
    +   #define SQLITE_INTEGER  1
    +   #define SQLITE_FLOAT    2
    +   #define SQLITE_TEXT     3
    +   #define SQLITE_BLOB     4
    +   #define SQLITE_NULL     5
    +
    + +

    +The sqlite3_column_decltype() routine returns text which is the +declared type of the column in the CREATE TABLE statement. For an +expression, the return type is an empty string. sqlite3_column_name() +returns the name of the Nth column. sqlite3_column_bytes() returns +the number of bytes in a column that has type BLOB or the number of bytes +in a TEXT string with UTF-8 encoding. sqlite3_column_bytes16() returns +the same value for BLOBs but for TEXT strings returns the number of bytes +in a UTF-16 encoding. +sqlite3_column_blob() return BLOB data. +sqlite3_column_text() return TEXT data as UTF-8. +sqlite3_column_text16() return TEXT data as UTF-16. +sqlite3_column_int() return INTEGER data in the host machines native +integer format. +sqlite3_column_int64() returns 64-bit INTEGER data. +Finally, sqlite3_column_double() return floating point data. +

    + +

    +It is not necessary to retrieve data in the format specify by +sqlite3_column_type(). If a different format is requested, the data +is converted automatically. +

    + +

    +Data format conversions can invalidate the pointer returned by +prior calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +sqlite3_column_text16(). Pointers might be invalided in the following +cases: +

    +
      +
    • +The initial content is a BLOB and sqlite3_column_text() +or sqlite3_column_text16() +is called. A zero-terminator might need to be added to the string. +

    • +
    • +The initial content is UTF-8 text and sqlite3_column_bytes16() or +sqlite3_column_text16() is called. The content must be converted to UTF-16. +

    • +
    • +The initial content is UTF-16 text and sqlite3_column_bytes() or +sqlite3_column_text() is called. The content must be converted to UTF-8. +

    • +
    +

    +Note that conversions between UTF-16be and UTF-16le +are always done in place and do +not invalidate a prior pointer, though of course the content of the buffer +that the prior pointer points to will have been modified. Other kinds +of conversion are done in place when it is possible, but sometime it is +not possible and in those cases prior pointers are invalidated. +

    + +

    +The safest and easiest to remember policy is this: assume that any +result from +

      +
    • sqlite3_column_blob(),
    • +
    • sqlite3_column_text(), or
    • +
    • sqlite3_column_text16()
    • +
    +is invalided by subsequent calls to +
      +
    • sqlite3_column_bytes(),
    • +
    • sqlite3_column_bytes16(),
    • +
    • sqlite3_column_text(), or
    • +
    • sqlite3_column_text16().
    • +
    +This means that you should always call sqlite3_column_bytes() or +sqlite3_column_bytes16() before calling sqlite3_column_blob(), +sqlite3_column_text(), or sqlite3_column_text16(). +

    + +

    2.3 User-defined functions

    + +

    +User defined functions can be created using the following routine: +

    + +
    +   typedef struct sqlite3_value sqlite3_value;
    +   int sqlite3_create_function(
    +     sqlite3 *,
    +     const char *zFunctionName,
    +     int nArg,
    +     int eTextRep,
    +     void*,
    +     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xFinal)(sqlite3_context*)
    +   );
    +   int sqlite3_create_function16(
    +     sqlite3*,
    +     const void *zFunctionName,
    +     int nArg,
    +     int eTextRep,
    +     void*,
    +     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +     void (*xFinal)(sqlite3_context*)
    +   );
    +   #define SQLITE_UTF8     1
    +   #define SQLITE_UTF16    2
    +   #define SQLITE_UTF16BE  3
    +   #define SQLITE_UTF16LE  4
    +   #define SQLITE_ANY      5
    +
    + +

    +The nArg parameter specifies the number of arguments to the function. +A value of 0 indicates that any number of arguments is allowed. The +eTextRep parameter specifies what representation text values are expected +to be in for arguments to this function. The value of this parameter should +be one of the parameters defined above. SQLite version 3 allows multiple +implementations of the same function using different text representations. +The database engine chooses the function that minimization the number +of text conversions required. +

    + +

    +Normal functions specify only xFunc and leave xStep and xFinal set to NULL. +Aggregate functions specify xStep and xFinal and leave xFunc set to NULL. +There is no separate sqlite3_create_aggregate() API. +

    + +

    +The function name is specified in UTF-8. A separate sqlite3_create_function16() +API works the same as sqlite_create_function() +except that the function name is specified in UTF-16 host byte order. +

    + +

    +Notice that the parameters to functions are now pointers to sqlite3_value +structures instead of pointers to strings as in SQLite version 2.X. +The following routines are used to extract useful information from these +"values": +

    + +
    +   const void *sqlite3_value_blob(sqlite3_value*);
    +   int sqlite3_value_bytes(sqlite3_value*);
    +   int sqlite3_value_bytes16(sqlite3_value*);
    +   double sqlite3_value_double(sqlite3_value*);
    +   int sqlite3_value_int(sqlite3_value*);
    +   long long int sqlite3_value_int64(sqlite3_value*);
    +   const unsigned char *sqlite3_value_text(sqlite3_value*);
    +   const void *sqlite3_value_text16(sqlite3_value*);
    +   int sqlite3_value_type(sqlite3_value*);
    +
    + +

    +Function implementations use the following APIs to acquire context and +to report results: +

    + +
    +   void *sqlite3_aggregate_context(sqlite3_context*, int nbyte);
    +   void *sqlite3_user_data(sqlite3_context*);
    +   void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*));
    +   void sqlite3_result_double(sqlite3_context*, double);
    +   void sqlite3_result_error(sqlite3_context*, const char*, int);
    +   void sqlite3_result_error16(sqlite3_context*, const void*, int);
    +   void sqlite3_result_int(sqlite3_context*, int);
    +   void sqlite3_result_int64(sqlite3_context*, long long int);
    +   void sqlite3_result_null(sqlite3_context*);
    +   void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*));
    +   void sqlite3_result_text16(sqlite3_context*, const void*, int n, void(*)(void*));
    +   void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
    +   void *sqlite3_get_auxdata(sqlite3_context*, int);
    +   void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*));
    +
    + +

    2.4 User-defined collating sequences

    + +

    +The following routines are used to implement user-defined +collating sequences: +

    + +
    +   sqlite3_create_collation(sqlite3*, const char *zName, int eTextRep, void*,
    +      int(*xCompare)(void*,int,const void*,int,const void*));
    +   sqlite3_create_collation16(sqlite3*, const void *zName, int eTextRep, void*,
    +      int(*xCompare)(void*,int,const void*,int,const void*));
    +   sqlite3_collation_needed(sqlite3*, void*, 
    +      void(*)(void*,sqlite3*,int eTextRep,const char*));
    +   sqlite3_collation_needed16(sqlite3*, void*,
    +      void(*)(void*,sqlite3*,int eTextRep,const void*));
    +
    + +

    +The sqlite3_create_collation() function specifies a collating sequence name +and a comparison function to implement that collating sequence. The +comparison function is only used for comparing text values. The eTextRep +parameter is one of SQLITE_UTF8, SQLITE_UTF16LE, SQLITE_UTF16BE, or +SQLITE_ANY to specify which text representation the comparison function works +with. Separate comparison functions can exist for the same collating +sequence for each of the UTF-8, UTF-16LE and UTF-16BE text representations. +The sqlite3_create_collation16() works like sqlite3_create_collation() except +that the collation name is specified in UTF-16 host byte order instead of +in UTF-8. +

    + +

    +The sqlite3_collation_needed() routine registers a callback which the +database engine will invoke if it encounters an unknown collating sequence. +The callback can lookup an appropriate comparison function and invoke +sqlite_3_create_collation() as needed. The fourth parameter to the callback +is the name of the collating sequence in UTF-8. For sqlite3_collation_need16() +the callback sends the collating sequence name in UTF-16 host byte order. +

    +
    +This page last modified 2008/07/11 13:23:12 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/capi3ref.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/capi3ref.html --- sqlite3-3.4.2/www/capi3ref.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/capi3ref.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,5182 @@ + + +C/C++ Interface For SQLite Version 3 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    C/C++ Interface For SQLite Version 3

    + + + +

    This page defined the C-language interface to SQLite.

    + +

    This is not a tutorial. These +pages are designed to be precise, not easy to read. +For a tutorial introduction see +SQLite In 3 Minutes Or Less and/or +the Introduction To The SQLite C/C++ Interface. +

    + +

    This page contains all C-language interface information +in a single HTML file. The same information is also +available broken out into +lots of small pages +for easier viewing, if you prefer.

    + +

    This document is created by a script which scans comments +in the source code files.

    + +
    + + + +

    Experimental And Deprecated Interfaces

    + +

    SQLite interfaces can be subdivided into three categories:

    + +
      +
    1. Stable
    2. +
    3. Experimental
    4. +
    5. Deprecated
    6. +
    + +

    Stable interfaces will be maintained indefinitely in a backwards +compatible way. An application that uses only stable interfaces +should always be able to relink against a newer version of SQLite +without any changes.

    + +

    Experiemental interfaces are subject to change. +Applications that use experiemental interfaces +may need to be modified when upgrading to a newer SQLite release. +When new interfaces are added to SQLite, they generally begin +as experimental interfaces. After an interface has been in use for +a while and the developers are confident that the design of the interface +is sound and worthy of long-term support, the interface is marked +as stable.

    + +

    Deprecated interfaces have been superceded by better methods of +accomplishing the same thing and should be avoided in new applications. +Deprecated interfaces continue to be supported for the sake of +backwards compatibility. At some point in the future, it is possible +that deprecated interfaces may be removed.

    + +

    Key points:

    + +
      +
    • Experimental interfaces are subject to change and/or removal +at any time.
    • + +
    • Deprecated interfaces should not be used in new code and might +be removed in some future release.
    • +
    + +
    +

    Objects:

    +

    Note: Objects marked with "exp" +are experimental and objects marked with +"(obs)" are deprecated.

    +

    +

    Constants:

    +

    Note: Constants marked with "(exp)" +are experimental and constants marked with +"(obs)" are deprecated

    +

    +

    Functions:

    +

    Note: Functions marked with "(exp)" +are experimental and functions marked with +(obs) are deprecated.

    +

    +

    Configuration Options

    #define SQLITE_DBCONFIG_LOOKASIDE    1001  /* void* int int */
    +

    Important: This interface is experimental and is subject to change without notice.

    +These constants are the available integer configuration options that +can be passed as the second argument to the sqlite3_db_config() interface.

    + +

    New configuration options may be added in future releases of SQLite. +Existing configuration options might be discontinued. Applications +should check the return code from sqlite3_db_config() to make sure that +the call worked. The sqlite3_db_config() interface will return a +non-zero error code if a discontinued or unsupported configuration option +is invoked.

    + +

    +
    SQLITE_DBCONFIG_LOOKASIDE
    +
    This option takes three additional arguments that determine the +lookaside memory allocator configuration for the database connection. +The first argument (the third parameter to sqlite3_db_config() is a +pointer to an 8-byte aligned memory buffer to use for lookaside memory. +The first argument may be NULL in which case SQLite will allocate the +lookaside buffer itself using sqlite3_malloc(). The second argument is the +size of each lookaside buffer slot and the third argument is the number of +slots. The size of the buffer in the first argument must be greater than +or equal to the product of the second and third arguments.

    + +

    +


    +

    Status Parameters for database connections

    #define SQLITE_DBSTATUS_LOOKASIDE_USED     0
    +

    Important: This interface is experimental and is subject to change without notice.

    +Status verbs for sqlite3_db_status().

    + +

    +
    SQLITE_DBSTATUS_LOOKASIDE_USED
    +
    This parameter returns the number of lookaside memory slots currently +checked out.
    +
    +


    +

    Online Backup Object

    typedef struct sqlite3_backup sqlite3_backup;
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_backup object records state information about an ongoing +online backup operation. The sqlite3_backup object is created by +a call to sqlite3_backup_init() and is destroyed by a call to +sqlite3_backup_finish().

    + +

    See Also: Using the SQLite Online Backup API +


    +

    SQL Function Context Object

    typedef struct sqlite3_context sqlite3_context;
    +

    +The context in which an SQL function executes is stored in an +sqlite3_context object. A pointer to an sqlite3_context object +is always first parameter to application-defined SQL functions. +The application-defined SQL function implementation will pass this +pointer through into calls to sqlite3_result(), +sqlite3_aggregate_context(), sqlite3_user_data(), +sqlite3_context_db_handle(), sqlite3_get_auxdata(), +and/or sqlite3_set_auxdata(). +


    +

    OS Interface Open File Handle

    typedef struct sqlite3_file sqlite3_file;
    +struct sqlite3_file {
    +  const struct sqlite3_io_methods *pMethods;  /* Methods for an open file */
    +};
    +

    +An sqlite3_file object represents an open file in the OS +interface layer. Individual OS interface implementations will +want to subclass this object by appending additional fields +for their own use. The pMethods entry is a pointer to an +sqlite3_io_methods object that defines methods for performing +I/O operations on the open file. +


    +

    OS Interface File Virtual Methods Object

    typedef struct sqlite3_io_methods sqlite3_io_methods;
    +struct sqlite3_io_methods {
    +  int iVersion;
    +  int (*xClose)(sqlite3_file*);
    +  int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
    +  int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
    +  int (*xSync)(sqlite3_file*, int flags);
    +  int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
    +  int (*xLock)(sqlite3_file*, int);
    +  int (*xUnlock)(sqlite3_file*, int);
    +  int (*xCheckReservedLock)(sqlite3_file*, int *pResOut);
    +  int (*xFileControl)(sqlite3_file*, int op, void *pArg);
    +  int (*xSectorSize)(sqlite3_file*);
    +  int (*xDeviceCharacteristics)(sqlite3_file*);
    +  /* Additional methods may be added in future releases */
    +};
    +

    +Every file opened by the sqlite3_vfs xOpen method populates an +sqlite3_file object (or, more commonly, a subclass of the +sqlite3_file object) with a pointer to an instance of this object. +This object defines the methods used to perform various operations +against the open file represented by the sqlite3_file object.

    + +

    If the xOpen method sets the sqlite3_file.pMethods element +to a non-NULL pointer, then the sqlite3_io_methods.xClose method +may be invoked even if the xOpen reported that it failed. The +only way to prevent a call to xClose following a failed xOpen +is for the xOpen to set the sqlite3_file.pMethods element to NULL.

    + +

    The flags argument to xSync may be one of SQLITE_SYNC_NORMAL or +SQLITE_SYNC_FULL. The first choice is the normal fsync(). +The second choice is a Mac OS X style fullsync. The SQLITE_SYNC_DATAONLY +flag may be ORed in to indicate that only the data of the file +and not its inode needs to be synced.

    + +

    The integer values to xLock() and xUnlock() are one of +

    +xLock() increases the lock. xUnlock() decreases the lock. +The xCheckReservedLock() method checks whether any database connection, +either in this process or in some other process, is holding a RESERVED, +PENDING, or EXCLUSIVE lock on the file. It returns true +if such a lock exists and false otherwise.

    + +

    The xFileControl() method is a generic interface that allows custom +VFS implementations to directly control an open file using the +sqlite3_file_control() interface. The second "op" argument is an +integer opcode. The third argument is a generic pointer intended to +point to a structure that may contain arguments or space in which to +write return values. Potential uses for xFileControl() might be +functions to enable blocking locks with timeouts, to change the +locking strategy (for example to use dot-file locks), to inquire +about the status of a lock, or to break stale locks. The SQLite +core reserves all opcodes less than 100 for its own use. +A list of opcodes less than 100 is available. +Applications that define a custom xFileControl method should use opcodes +greater than 100 to avoid conflicts.

    + +

    The xSectorSize() method returns the sector size of the +device that underlies the file. The sector size is the +minimum write that can be performed without disturbing +other bytes in the file. The xDeviceCharacteristics() +method returns a bit vector describing behaviors of the +underlying device:

    + +

    + +

    The SQLITE_IOCAP_ATOMIC property means that all writes of +any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +mean that writes of blocks that are nnn bytes in size and +are aligned to an address which is an integer multiple of +nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +that when data is appended to a file, the data is appended +first then the size of the file is extended, never the other +way around. The SQLITE_IOCAP_SEQUENTIAL property means that +information is written to disk in the same order as calls +to xWrite().

    + +

    If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill +in the unread portions of the buffer with zeros. A VFS that +fails to zero-fill short reads might seem to work. However, +failure to zero-fill short reads will eventually lead to +database corruption. +


    +

    Memory Allocation Routines

    typedef struct sqlite3_mem_methods sqlite3_mem_methods;
    +struct sqlite3_mem_methods {
    +  void *(*xMalloc)(int);         /* Memory allocation function */
    +  void (*xFree)(void*);          /* Free a prior allocation */
    +  void *(*xRealloc)(void*,int);  /* Resize an allocation */
    +  int (*xSize)(void*);           /* Return the size of an allocation */
    +  int (*xRoundup)(int);          /* Round up request size to allocation size */
    +  int (*xInit)(void*);           /* Initialize the memory allocator */
    +  void (*xShutdown)(void*);      /* Deinitialize the memory allocator */
    +  void *pAppData;                /* Argument to xInit() and xShutdown() */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +An instance of this object defines the interface between SQLite +and low-level memory allocation routines.

    + +

    This object is used in only one place in the SQLite interface. +A pointer to an instance of this object is the argument to +sqlite3_config() when the configuration option is +SQLITE_CONFIG_MALLOC. By creating an instance of this object +and passing it to sqlite3_config() during configuration, an +application can specify an alternative memory allocation subsystem +for SQLite to use for all of its dynamic memory needs.

    + +

    Note that SQLite comes with a built-in memory allocator that is +perfectly adequate for the overwhelming majority of applications +and that this object is only useful to a tiny minority of applications +with specialized memory allocation requirements. This object is +also used during testing of SQLite in order to specify an alternative +memory allocator that simulates memory out-of-memory conditions in +order to verify that SQLite recovers gracefully from such +conditions.

    + +

    The xMalloc, xFree, and xRealloc methods must work like the +malloc(), free(), and realloc() functions from the standard library.

    + +

    xSize should return the allocated size of a memory allocation +previously obtained from xMalloc or xRealloc. The allocated size +is always at least as big as the requested size but may be larger.

    + +

    The xRoundup method returns what would be the allocated size of +a memory allocation given a particular requested size. Most memory +allocators round up memory allocations at least to the next multiple +of 8. Some allocators round up to a larger multiple or to a power of 2.

    + +

    The xInit method initializes the memory allocator. (For example, +it might allocate any require mutexes or initialize internal data +structures. The xShutdown method is invoked (indirectly) by +sqlite3_shutdown() and should deallocate any resources acquired +by xInit. The pAppData pointer is used as the only parameter to +xInit and xShutdown. +


    +

    Mutex Handle

    typedef struct sqlite3_mutex sqlite3_mutex;
    +

    +The mutex module within SQLite defines sqlite3_mutex to be an +abstract type for a mutex object. The SQLite core never looks +at the internal representation of an sqlite3_mutex. It only +deals with pointers to the sqlite3_mutex object.

    + +

    Mutexes are created using sqlite3_mutex_alloc(). +


    +

    Mutex Methods Object

    typedef struct sqlite3_mutex_methods sqlite3_mutex_methods;
    +struct sqlite3_mutex_methods {
    +  int (*xMutexInit)(void);
    +  int (*xMutexEnd)(void);
    +  sqlite3_mutex *(*xMutexAlloc)(int);
    +  void (*xMutexFree)(sqlite3_mutex *);
    +  void (*xMutexEnter)(sqlite3_mutex *);
    +  int (*xMutexTry)(sqlite3_mutex *);
    +  void (*xMutexLeave)(sqlite3_mutex *);
    +  int (*xMutexHeld)(sqlite3_mutex *);
    +  int (*xMutexNotheld)(sqlite3_mutex *);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +An instance of this structure defines the low-level routines +used to allocate and use mutexes.

    + +

    Usually, the default mutex implementations provided by SQLite are +sufficient, however the user has the option of substituting a custom +implementation for specialized deployments or systems for which SQLite +does not provide a suitable implementation. In this case, the user +creates and populates an instance of this structure to pass +to sqlite3_config() along with the SQLITE_CONFIG_MUTEX option. +Additionally, an instance of this structure can be used as an +output variable when querying the system for the current mutex +implementation, using the SQLITE_CONFIG_GETMUTEX option.

    + +

    The xMutexInit method defined by this structure is invoked as +part of system initialization by the sqlite3_initialize() function. + The xMutexInit routine shall be called by SQLite once for each +effective call to sqlite3_initialize().

    + +

    The xMutexEnd method defined by this structure is invoked as +part of system shutdown by the sqlite3_shutdown() function. The +implementation of this method is expected to release all outstanding +resources obtained by the mutex methods implementation, especially +those obtained by the xMutexInit method. The xMutexEnd() +interface shall be invoked once for each call to sqlite3_shutdown().

    + +

    The remaining seven methods defined by this structure (xMutexAlloc, +xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and +xMutexNotheld) implement the following interfaces (respectively):

    + +

    + +

    The only difference is that the public sqlite3_XXX functions enumerated +above silently ignore any invocations that pass a NULL pointer instead +of a valid mutex handle. The implementations of the methods defined +by this structure are not required to handle this case, the results +of passing a NULL pointer instead of a valid mutex handle are undefined +(i.e. it is acceptable to provide an implementation that segfaults if +it is passed a NULL pointer). +


    +

    Custom Page Cache Object

    typedef struct sqlite3_pcache sqlite3_pcache;
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_pcache type is opaque. It is implemented by +the pluggable module. The SQLite core has no knowledge of +its size or internal structure and never deals with the +sqlite3_pcache object except by holding and passing pointers +to the object.

    + +

    See sqlite3_pcache_methods for additional information. +


    +

    Application Defined Page Cache.

    typedef struct sqlite3_pcache_methods sqlite3_pcache_methods;
    +struct sqlite3_pcache_methods {
    +  void *pArg;
    +  int (*xInit)(void*);
    +  void (*xShutdown)(void*);
    +  sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable);
    +  void (*xCachesize)(sqlite3_pcache*, int nCachesize);
    +  int (*xPagecount)(sqlite3_pcache*);
    +  void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
    +  void (*xUnpin)(sqlite3_pcache*, void*, int discard);
    +  void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey);
    +  void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
    +  void (*xDestroy)(sqlite3_pcache*);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_config(SQLITE_CONFIG_PCACHE, ...) interface can +register an alternative page cache implementation by passing in an +instance of the sqlite3_pcache_methods structure. The majority of the +heap memory used by sqlite is used by the page cache to cache data read +from, or ready to be written to, the database file. By implementing a +custom page cache using this API, an application can control more +precisely the amount of memory consumed by sqlite, the way in which +said memory is allocated and released, and the policies used to +determine exactly which parts of a database file are cached and for +how long.

    + +

    The contents of the structure are copied to an internal buffer by sqlite +within the call to sqlite3_config.

    + +

    The xInit() method is called once for each call to sqlite3_initialize() +(usually only once during the lifetime of the process). It is passed +a copy of the sqlite3_pcache_methods.pArg value. It can be used to set +up global structures and mutexes required by the custom page cache +implementation. The xShutdown() method is called from within +sqlite3_shutdown(), if the application invokes this API. It can be used +to clean up any outstanding resources before process shutdown, if required.

    + +

    The xCreate() method is used to construct a new cache instance. The +first parameter, szPage, is the size in bytes of the pages that must +be allocated by the cache. szPage will not be a power of two. The +second argument, bPurgeable, is true if the cache being created will +be used to cache database pages read from a file stored on disk, or +false if it is used for an in-memory database. The cache implementation +does not have to do anything special based on the value of bPurgeable, +it is purely advisory.

    + +

    The xCachesize() method may be called at any time by SQLite to set the +suggested maximum cache-size (number of pages stored by) the cache +instance passed as the first argument. This is the value configured using +the SQLite "PRAGMA cache_size" command. As with the bPurgeable parameter, +the implementation is not required to do anything special with this +value, it is advisory only.

    + +

    The xPagecount() method should return the number of pages currently +stored in the cache supplied as an argument.

    + +

    The xFetch() method is used to fetch a page and return a pointer to it. +A 'page', in this context, is a buffer of szPage bytes aligned at an +8-byte boundary. The page to be fetched is determined by the key. The +mimimum key value is 1. After it has been retrieved using xFetch, the page +is considered to be pinned.

    + +

    If the requested page is already in the page cache, then a pointer to +the cached buffer should be returned with its contents intact. If the +page is not already in the cache, then the expected behaviour of the +cache is determined by the value of the createFlag parameter passed +to xFetch, according to the following table:

    + +

    +
    createFlagExpected Behaviour +
    0NULL should be returned. No new cache entry is created. +
    1If createFlag is set to 1, this indicates that +SQLite is holding pinned pages that can be unpinned +by writing their contents to the database file (a +relatively expensive operation). In this situation the +cache implementation has two choices: it can return NULL, +in which case SQLite will attempt to unpin one or more +pages before re-requesting the same page, or it can +allocate a new page and return a pointer to it. If a new +page is allocated, then the first sizeof(void*) bytes of +it (at least) must be zeroed before it is returned. +
    2If createFlag is set to 2, then SQLite is not holding any +pinned pages associated with the specific cache passed +as the first argument to xFetch() that can be unpinned. The +cache implementation should attempt to allocate a new +cache entry and return a pointer to it. Again, the first +sizeof(void*) bytes of the page should be zeroed before +it is returned. If the xFetch() method returns NULL when +createFlag==2, SQLite assumes that a memory allocation +failed and returns SQLITE_NOMEM to the user. +

    + +

    xUnpin() is called by SQLite with a pointer to a currently pinned page +as its second argument. If the third parameter, discard, is non-zero, +then the page should be evicted from the cache. In this case SQLite +assumes that the next time the page is retrieved from the cache using +the xFetch() method, it will be zeroed. If the discard parameter is +zero, then the page is considered to be unpinned. The cache implementation +may choose to reclaim (free or recycle) unpinned pages at any time. +SQLite assumes that next time the page is retrieved from the cache +it will either be zeroed, or contain the same data that it did when it +was unpinned.

    + +

    The cache is not required to perform any reference counting. A single +call to xUnpin() unpins the page regardless of the number of prior calls +to xFetch().

    + +

    The xRekey() method is used to change the key value associated with the +page passed as the second argument from oldKey to newKey. If the cache +previously contains an entry associated with newKey, it should be +discarded. Any prior cache entry associated with newKey is guaranteed not +to be pinned.

    + +

    When SQLite calls the xTruncate() method, the cache must discard all +existing cache entries with page numbers (keys) greater than or equal +to the value of the iLimit parameter passed to xTruncate(). If any +of these pages are pinned, they are implicitly unpinned, meaning that +they can be safely discarded.

    + +

    The xDestroy() method is used to delete a cache allocated by xCreate(). +All resources associated with the specified cache should be freed. After +calling the xDestroy() method, SQLite considers the sqlite3_pcache* +handle invalid, and will not use it with any other sqlite3_pcache_methods +functions. +


    +

    Name Of The Folder Holding Temporary Files

    SQLITE_EXTERN char *sqlite3_temp_directory;
    +

    +If this global variable is made to point to a string which is +the name of a folder (a.k.a. directory), then all temporary files +created by SQLite will be placed in that directory. If this variable +is a NULL pointer, then SQLite performs a search for an appropriate +temporary file directory.

    + +

    It is not safe to read or modify this variable in more than one +thread at a time. It is not safe to read or modify this variable +if a database connection is being used at the same time in a separate +thread. +It is intended that this variable be set once +as part of process initialization and before any SQLite interface +routines have been called and that this variable remain unchanged +thereafter.

    + +

    The temp_store_directory pragma may modify this variable and cause +it to point to memory obtained from sqlite3_malloc. Furthermore, +the temp_store_directory pragma always assumes that any string +that this variable points to is held in memory obtained from +sqlite3_malloc and the pragma may attempt to free that memory +using sqlite3_free. +Hence, if this variable is modified directly, either it should be +made NULL or made to point to memory obtained from sqlite3_malloc +or else the use of the temp_store_directory pragma should be avoided. +


    +

    OS Interface Object

    typedef struct sqlite3_vfs sqlite3_vfs;
    +struct sqlite3_vfs {
    +  int iVersion;            /* Structure version number */
    +  int szOsFile;            /* Size of subclassed sqlite3_file */
    +  int mxPathname;          /* Maximum file pathname length */
    +  sqlite3_vfs *pNext;      /* Next registered VFS */
    +  const char *zName;       /* Name of this virtual file system */
    +  void *pAppData;          /* Pointer to application-specific data */
    +  int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
    +               int flags, int *pOutFlags);
    +  int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
    +  int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
    +  int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut);
    +  void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
    +  void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
    +  void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void);
    +  void (*xDlClose)(sqlite3_vfs*, void*);
    +  int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
    +  int (*xSleep)(sqlite3_vfs*, int microseconds);
    +  int (*xCurrentTime)(sqlite3_vfs*, double*);
    +  int (*xGetLastError)(sqlite3_vfs*, int, char *);
    +  /* New fields may be appended in figure versions.  The iVersion
    +  ** value will increment whenever this happens. */
    +};
    +

    +An instance of the sqlite3_vfs object defines the interface between +the SQLite core and the underlying operating system. The "vfs" +in the name of the object stands for "virtual file system".

    + +

    The value of the iVersion field is initially 1 but may be larger in +future versions of SQLite. Additional fields may be appended to this +object when the iVersion value is increased. Note that the structure +of the sqlite3_vfs object changes in the transaction between +SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not +modified.

    + +

    The szOsFile field is the size of the subclassed sqlite3_file +structure used by this VFS. mxPathname is the maximum length of +a pathname in this VFS.

    + +

    Registered sqlite3_vfs objects are kept on a linked list formed by +the pNext pointer. The sqlite3_vfs_register() +and sqlite3_vfs_unregister() interfaces manage this list +in a thread-safe way. The sqlite3_vfs_find() interface +searches the list. Neither the application code nor the VFS +implementation should use the pNext pointer.

    + +

    The pNext field is the only field in the sqlite3_vfs +structure that SQLite will ever modify. SQLite will only access +or modify this field while holding a particular static mutex. +The application should never modify anything within the sqlite3_vfs +object once the object has been registered.

    + +

    The zName field holds the name of the VFS module. The name must +be unique across all VFS modules.

    + +

    SQLite will guarantee that the zFilename parameter to xOpen +is either a NULL pointer or string obtained +from xFullPathname(). SQLite further guarantees that +the string will be valid and unchanged until xClose() is +called. Because of the previous sentence, +the sqlite3_file can safely store a pointer to the +filename if it needs to remember the filename for some reason. +If the zFilename parameter is xOpen is a NULL pointer then xOpen +must invent its own temporary name for the file. Whenever the +xFilename parameter is NULL it will also be the case that the +flags parameter will include SQLITE_OPEN_DELETEONCLOSE.

    + +

    The flags argument to xOpen() includes all bits set in +the flags argument to sqlite3_open_v2(). Or if sqlite3_open() +or sqlite3_open16() is used, then flags includes at least +SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE. +If xOpen() opens a file read-only then it sets *pOutFlags to +include SQLITE_OPEN_READONLY. Other bits in *pOutFlags may be set.

    + +

    SQLite will also add one of the following flags to the xOpen() +call, depending on the object being opened:

    + +

    + +

    The file I/O implementation can use the object type flags to +change the way it deals with files. For example, an application +that does not care about crash recovery or rollback might make +the open of a journal file a no-op. Writes to this journal would +also be no-ops, and any attempt to read the journal would return +SQLITE_IOERR. Or the implementation might recognize that a database +file will be doing page-aligned sector reads and writes in a random +order and set up its I/O subsystem accordingly.

    + +

    SQLite might also add one of the following flags to the xOpen method:

    + +

    + +

    The SQLITE_OPEN_DELETEONCLOSE flag means the file should be +deleted when it is closed. The SQLITE_OPEN_DELETEONCLOSE +will be set for TEMP databases, journals and for subjournals.

    + +

    The SQLITE_OPEN_EXCLUSIVE flag is always used in conjunction +with the SQLITE_OPEN_CREATE flag, which are both directly +analogous to the O_EXCL and O_CREAT flags of the POSIX open() +API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the +SQLITE_OPEN_CREATE, is used to indicate that file should always +be created, and that it is an error if it already exists. +It is not used to indicate the file should be opened +for exclusive access.

    + +

    At least szOsFile bytes of memory are allocated by SQLite +to hold the sqlite3_file structure passed as the third +argument to xOpen. The xOpen method does not have to +allocate the structure; it should just fill it in. Note that +the xOpen method must set the sqlite3_file.pMethods to either +a valid sqlite3_io_methods object or to NULL. xOpen must do +this even if the open fails. SQLite expects that the sqlite3_file.pMethods +element will be valid after xOpen returns regardless of the success +or failure of the xOpen call.

    + +

    The flags argument to xAccess() may be SQLITE_ACCESS_EXISTS +to test for the existence of a file, or SQLITE_ACCESS_READWRITE to +test whether a file is readable and writable, or SQLITE_ACCESS_READ +to test whether a file is at least readable. The file can be a +directory.

    + +

    SQLite will always allocate at least mxPathname+1 bytes for the +output buffer xFullPathname. The exact size of the output buffer +is also passed as a parameter to both methods. If the output buffer +is not large enough, SQLITE_CANTOPEN should be returned. Since this is +handled as a fatal error by SQLite, vfs implementations should endeavor +to prevent this by setting mxPathname to a sufficiently large value.

    + +

    The xRandomness(), xSleep(), and xCurrentTime() interfaces +are not strictly a part of the filesystem, but they are +included in the VFS structure for completeness. +The xRandomness() function attempts to return nBytes bytes +of good-quality randomness into zOut. The return value is +the actual number of bytes of randomness obtained. +The xSleep() method causes the calling thread to sleep for at +least the number of microseconds given. The xCurrentTime() +method returns a Julian Day Number for the current date and time.

    + +


    +

    Virtual Table Instance Object

    struct sqlite3_vtab {
    +  const sqlite3_module *pModule;  /* The module for this virtual table */
    +  int nRef;                       /* Used internally */
    +  char *zErrMsg;                  /* Error message from sqlite3_mprintf() */
    +  /* Virtual table implementations will typically add additional fields */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +Every virtual table module implementation uses a subclass +of the following structure to describe a particular instance +of the virtual table. Each subclass will +be tailored to the specific needs of the module implementation. +The purpose of this superclass is to define certain fields that are +common to all module implementations.

    + +

    Virtual tables methods can set an error message by assigning a +string obtained from sqlite3_mprintf() to zErrMsg. The method should +take care that any prior string is freed by a call to sqlite3_free() +prior to assigning a new string to zErrMsg. After the error message +is delivered up to the client application, the string will be automatically +freed by sqlite3_free() and the zErrMsg field will be zeroed. +


    +

    Obtain Aggregate Function Context

    void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
    +

    +The implementation of aggregate SQL functions use this routine to allocate +a structure for storing their state.

    + +

    The first time the sqlite3_aggregate_context() routine is called for a +particular aggregate, SQLite allocates nBytes of memory, zeroes out that +memory, and returns a pointer to it. On second and subsequent calls to +sqlite3_aggregate_context() for the same aggregate function index, +the same buffer is returned. The implementation of the aggregate can use +the returned buffer to accumulate data.

    + +

    SQLite automatically frees the allocated buffer when the aggregate +query concludes.

    + +

    The first parameter should be a copy of the +SQL function context that is the first parameter +to the callback routine that implements the aggregate function.

    + +

    This routine must be called from the same thread in which +the aggregate SQL function is running.

    + +

    Requirements: +H16211 H16213 H16215 H16217 +


    +

    Automatically Load An Extensions

    int sqlite3_auto_extension(void (*xEntryPoint)(void));
    +

    +This API can be invoked at program startup in order to register +one or more statically linked extensions that will be available +to all new database connections.

    + +

    This routine stores a pointer to the extension in an array that is +obtained from sqlite3_malloc(). If you run a memory leak checker +on your program and it reports a leak because of this array, invoke +sqlite3_reset_auto_extension() prior to shutdown to free the memory.

    + +

    This function registers an extension entry point that is +automatically invoked whenever a new database connection +is opened using sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2().

    + +

    Duplicate extensions are detected so calling this routine +multiple times with the same extension is harmless.

    + +

    This routine stores a pointer to the extension in an array +that is obtained from sqlite3_malloc().

    + +

    Automatic extensions apply across all threads. +


    +

    Number Of SQL Parameters

    int sqlite3_bind_parameter_count(sqlite3_stmt*);
    +

    +This routine can be used to find the number of SQL parameters +in a prepared statement. SQL parameters are tokens of the +form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as +placeholders for values that are bound +to the parameters at a later time.

    + +

    This routine actually returns the index of the largest (rightmost) +parameter. For all forms except ?NNN, this will correspond to the +number of unique parameters. If parameters of the ?NNN are used, +there may be gaps in the list.

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_name(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13601 +


    +

    Index Of A Parameter With A Given Name

    int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
    +

    +Return the index of an SQL parameter given its name. The +index value returned is suitable for use as the second +parameter to sqlite3_bind(). A zero +is returned if no matching parameter is found. The parameter +name must be given in UTF-8 even if the original statement +was prepared from UTF-16 text using sqlite3_prepare16_v2().

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_count(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13641 +


    +

    Name Of A Host Parameter

    const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
    +

    +This routine returns a pointer to the name of the n-th +SQL parameter in a prepared statement. +SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" +have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" +respectively. +In other words, the initial ":" or "$" or "@" or "?" +is included as part of the name. +Parameters of the form "?" without a following integer have no name +and are also referred to as "anonymous parameters".

    + +

    The first host parameter has an index of 1, not 0.

    + +

    If the value n is out of range or if the n-th parameter is +nameless, then NULL is returned. The returned string is +always in UTF-8 encoding even if the named parameter was +originally specified as UTF-16 in sqlite3_prepare16() or +sqlite3_prepare16_v2().

    + +

    See also: sqlite3_bind(), +sqlite3_bind_parameter_count(), and +sqlite3_bind_parameter_index().

    + +

    Requirements: +H13621 +


    +

    Return The Size Of An Open BLOB

    int sqlite3_blob_bytes(sqlite3_blob *);
    +

    +Returns the size in bytes of the BLOB accessible via the +successfully opened BLOB handle in its only argument. The +incremental blob I/O routines can only read or overwriting existing +blob content; they cannot change the size of a blob.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    Requirements: +H17843 +


    +

    Close A BLOB Handle

    int sqlite3_blob_close(sqlite3_blob *);
    +

    +Closes an open BLOB handle.

    + +

    Closing a BLOB shall cause the current transaction to commit +if there are no other BLOBs, no pending prepared statements, and the +database connection is in autocommit mode. +If any writes were made to the BLOB, they might be held in cache +until the close operation if they will fit.

    + +

    Closing the BLOB often forces the changes +out to disk and so if any I/O errors occur, they will likely occur +at the time when the BLOB is closed. Any errors that occur during +closing are reported as a non-zero return value.

    + +

    The BLOB is closed unconditionally. Even if this routine returns +an error code, the BLOB is still closed.

    + +

    Calling this routine with a null pointer (which as would be returned +by failed call to sqlite3_blob_open()) is a harmless no-op.

    + +

    Requirements: +H17833 H17836 H17839 +


    +

    Open A BLOB For Incremental I/O

    int sqlite3_blob_open(
    +  sqlite3*,
    +  const char *zDb,
    +  const char *zTable,
    +  const char *zColumn,
    +  sqlite3_int64 iRow,
    +  int flags,
    +  sqlite3_blob **ppBlob
    +);
    +

    +This interfaces opens a handle to the BLOB located +in row iRow, column zColumn, table zTable in database zDb; +in other words, the same BLOB that would be selected by:

    + +

    +SELECT zColumn FROM zDb.zTable WHERE rowid = iRow;
    +

    + +

    If the flags parameter is non-zero, then the BLOB is opened for read +and write access. If it is zero, the BLOB is opened for read access.

    + +

    Note that the database name is not the filename that contains +the database but rather the symbolic name of the database that +is assigned when the database is connected using ATTACH. +For the main database file, the database name is "main". +For TEMP tables, the database name is "temp".

    + +

    On success, SQLITE_OK is returned and the new BLOB handle is written +to *ppBlob. Otherwise an error code is returned and *ppBlob is set +to be a null pointer. +This function sets the database connection error code and message +accessible via sqlite3_errcode() and sqlite3_errmsg() and related +functions. Note that the *ppBlob variable is always initialized in a +way that makes it safe to invoke sqlite3_blob_close() on *ppBlob +regardless of the success or failure of this routine.

    + +

    If the row that a BLOB handle points to is modified by an +UPDATE, DELETE, or by ON CONFLICT side-effects +then the BLOB handle is marked as "expired". +This is true if any column of the row is changed, even a column +other than the one the BLOB handle is open on. +Calls to sqlite3_blob_read() and sqlite3_blob_write() for +a expired BLOB handle fail with an return code of SQLITE_ABORT. +Changes written into a BLOB prior to the BLOB expiring are not +rollback by the expiration of the BLOB. Such changes will eventually +commit if the transaction continues to completion.

    + +

    Use the sqlite3_blob_bytes() interface to determine the size of +the opened blob. The size of a blob may not be changed by this +underface. Use the UPDATE SQL command to change the size of a +blob.

    + +

    The sqlite3_bind_zeroblob() and sqlite3_result_zeroblob() interfaces +and the built-in zeroblob SQL function can be used, if desired, +to create an empty, zero-filled blob in which to read or write using +this interface.

    + +

    To avoid a resource leak, every open BLOB handle should eventually +be released by a call to sqlite3_blob_close().

    + +

    Requirements: +H17813 H17814 H17816 H17819 H17821 H17824 +


    +

    Read Data From A BLOB Incrementally

    int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
    +

    +This function is used to read data from an open BLOB handle into a +caller-supplied buffer. N bytes of data are copied into buffer Z +from the open BLOB, starting at offset iOffset.

    + +

    If offset iOffset is less than N bytes from the end of the BLOB, +SQLITE_ERROR is returned and no data is read. If N or iOffset is +less than zero, SQLITE_ERROR is returned and no data is read. +The size of the blob (and hence the maximum value of N+iOffset) +can be determined using the sqlite3_blob_bytes() interface.

    + +

    An attempt to read from an expired BLOB handle fails with an +error code of SQLITE_ABORT.

    + +

    On success, SQLITE_OK is returned. +Otherwise, an error code or an extended error code is returned.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    See also: sqlite3_blob_write().

    + +

    Requirements: +H17853 H17856 H17859 H17862 H17863 H17865 H17868 +


    +

    Write Data Into A BLOB Incrementally

    int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
    +

    +This function is used to write data into an open BLOB handle from a +caller-supplied buffer. N bytes of data are copied from the buffer Z +into the open BLOB, starting at offset iOffset.

    + +

    If the BLOB handle passed as the first argument was not opened for +writing (the flags parameter to sqlite3_blob_open() was zero), +this function returns SQLITE_READONLY.

    + +

    This function may only modify the contents of the BLOB; it is +not possible to increase the size of a BLOB using this API. +If offset iOffset is less than N bytes from the end of the BLOB, +SQLITE_ERROR is returned and no data is written. If N is +less than zero SQLITE_ERROR is returned and no data is written. +The size of the BLOB (and hence the maximum value of N+iOffset) +can be determined using the sqlite3_blob_bytes() interface.

    + +

    An attempt to write to an expired BLOB handle fails with an +error code of SQLITE_ABORT. Writes to the BLOB that occurred +before the BLOB handle expired are not rolled back by the +expiration of the handle, though of course those changes might +have been overwritten by the statement that expired the BLOB handle +or by other independent statements.

    + +

    On success, SQLITE_OK is returned. +Otherwise, an error code or an extended error code is returned.

    + +

    This routine only works on a BLOB handle which has been created +by a prior successful call to sqlite3_blob_open() and which has not +been closed by sqlite3_blob_close(). Passing any other pointer in +to this routine results in undefined and probably undesirable behavior.

    + +

    See also: sqlite3_blob_read().

    + +

    Requirements: +H17873 H17874 H17875 H17876 H17877 H17879 H17882 H17885 +H17888 +


    +

    Register A Callback To Handle SQLITE_BUSY Errors

    int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*);
    +

    +This routine sets a callback function that might be invoked whenever +an attempt is made to open a database table that another thread +or process has locked.

    + +

    If the busy callback is NULL, then SQLITE_BUSY or SQLITE_IOERR_BLOCKED +is returned immediately upon encountering the lock. If the busy callback +is not NULL, then the callback will be invoked with two arguments.

    + +

    The first argument to the handler is a copy of the void* pointer which +is the third argument to sqlite3_busy_handler(). The second argument to +the handler callback is the number of times that the busy handler has +been invoked for this locking event. If the +busy callback returns 0, then no additional attempts are made to +access the database and SQLITE_BUSY or SQLITE_IOERR_BLOCKED is returned. +If the callback returns non-zero, then another attempt +is made to open the database for reading and the cycle repeats.

    + +

    The presence of a busy handler does not guarantee that it will be invoked +when there is lock contention. If SQLite determines that invoking the busy +handler could result in a deadlock, it will go ahead and return SQLITE_BUSY +or SQLITE_IOERR_BLOCKED instead of invoking the busy handler. +Consider a scenario where one process is holding a read lock that +it is trying to promote to a reserved lock and +a second process is holding a reserved lock that it is trying +to promote to an exclusive lock. The first process cannot proceed +because it is blocked by the second and the second process cannot +proceed because it is blocked by the first. If both processes +invoke the busy handlers, neither will make any progress. Therefore, +SQLite returns SQLITE_BUSY for the first process, hoping that this +will induce the first process to release its read lock and allow +the second process to proceed.

    + +

    The default busy callback is NULL.

    + +

    The SQLITE_BUSY error is converted to SQLITE_IOERR_BLOCKED +when SQLite is in the middle of a large transaction where all the +changes will not fit into the in-memory cache. SQLite will +already hold a RESERVED lock on the database file, but it needs +to promote this lock to EXCLUSIVE so that it can spill cache +pages into the database file without harm to concurrent +readers. If it is unable to promote the lock, then the in-memory +cache will be left in an inconsistent state and so the error +code is promoted from the relatively benign SQLITE_BUSY to +the more severe SQLITE_IOERR_BLOCKED. This error code promotion +forces an automatic rollback of the changes. See the + +CorruptionFollowingBusyError wiki page for a discussion of why +this is important.

    + +

    There can only be a single busy handler defined for each +database connection. Setting a new busy handler clears any +previously set handler. Note that calling sqlite3_busy_timeout() +will also set or clear the busy handler.

    + +

    The busy callback should not take any actions which modify the +database connection that invoked the busy handler. Any such actions +result in undefined behavior.

    + +

    Requirements: +H12311 H12312 H12314 H12316 H12318

    + +

    A busy handler must not close the database connection +or prepared statement that invoked the busy handler. +


    +

    Set A Busy Timeout

    int sqlite3_busy_timeout(sqlite3*, int ms);
    +

    +This routine sets a busy handler that sleeps +for a specified amount of time when a table is locked. The handler +will sleep multiple times until at least "ms" milliseconds of sleeping +have accumulated. After "ms" milliseconds of sleeping, +the handler returns 0 which causes sqlite3_step() to return +SQLITE_BUSY or SQLITE_IOERR_BLOCKED.

    + +

    Calling this routine with an argument less than or equal to zero +turns off all busy handlers.

    + +

    There can only be a single busy handler for a particular +database connection any any given moment. If another busy handler +was defined (using sqlite3_busy_handler()) prior to calling +this routine, that other busy handler is cleared.

    + +

    Requirements: +H12341 H12343 H12344 +


    +

    Count The Number Of Rows Modified

    int sqlite3_changes(sqlite3*);
    +

    +This function returns the number of database rows that were changed +or inserted or deleted by the most recently completed SQL statement +on the database connection specified by the first parameter. +Only changes that are directly specified by the INSERT, UPDATE, +or DELETE statement are counted. Auxiliary changes caused by +triggers are not counted. Use the sqlite3_total_changes() function +to find the total number of changes including changes caused by triggers.

    + +

    Changes to a view that are simulated by an INSTEAD OF trigger +are not counted. Only real table changes are counted.

    + +

    A "row change" is a change to a single row of a single table +caused by an INSERT, DELETE, or UPDATE statement. Rows that +are changed as side effects of REPLACE constraint resolution, +rollback, ABORT processing, DROP TABLE, or by any other +mechanisms do not count as direct row changes.

    + +

    A "trigger context" is a scope of execution that begins and +ends with the script of a trigger. +Most SQL statements are +evaluated outside of any trigger. This is the "top level" +trigger context. If a trigger fires from the top level, a +new trigger context is entered for the duration of that one +trigger. Subtriggers create subcontexts for their duration.

    + +

    Calling sqlite3_exec() or sqlite3_step() recursively does +not create a new trigger context.

    + +

    This function returns the number of direct row changes in the +most recent INSERT, UPDATE, or DELETE statement within the same +trigger context.

    + +

    Thus, when called from the top level, this function returns the +number of changes in the most recent INSERT, UPDATE, or DELETE +that also occurred at the top level. Within the body of a trigger, +the sqlite3_changes() interface can be called to find the number of +changes in the most recently completed INSERT, UPDATE, or DELETE +statement within the body of the same trigger. +However, the number returned does not include changes +caused by subtriggers since those have their own context.

    + +

    See also the sqlite3_total_changes() interface and the +count_changes pragma.

    + +

    Requirements: +H12241 H12243

    + +

    If a separate thread makes changes on the same database connection +while sqlite3_changes() is running then the value returned +is unpredictable and not meaningful. +


    +

    Reset All Bindings On A Prepared Statement

    int sqlite3_clear_bindings(sqlite3_stmt*);
    +

    +Contrary to the intuition of many, sqlite3_reset() does not reset +the bindings on a prepared statement. +Use this routine to reset all host parameters to NULL.

    + +

    Requirements: +H13661 +


    +

    Closing A Database Connection

    int sqlite3_close(sqlite3 *);
    +

    +This routine is the destructor for the sqlite3 object.

    + +

    Applications should finalize all prepared statements +and close all BLOB handles associated with +the sqlite3 object prior to attempting to close the object. +The sqlite3_next_stmt() interface can be used to locate all +prepared statements associated with a database connection if desired. +Typical code might look like this:

    + +

    +sqlite3_stmt *pStmt;
    +while( (pStmt = sqlite3_next_stmt(db, 0))!=0 ){
    +    sqlite3_finalize(pStmt);
    +}
    +

    + +

    If sqlite3_close() is invoked while a transaction is open, +the transaction is automatically rolled back.

    + +

    The C parameter to sqlite3_close(C) must be either a NULL +pointer or an sqlite3 object pointer obtained +from sqlite3_open(), sqlite3_open16(), or +sqlite3_open_v2(), and not previously closed.

    + +

    Requirements: +H12011 H12012 H12013 H12014 H12015 H12019 +


    +

    Number Of Columns In A Result Set

    int sqlite3_column_count(sqlite3_stmt *pStmt);
    +

    +Return the number of columns in the result set returned by the +prepared statement. This routine returns 0 if pStmt is an SQL +statement that does not return data (for example an UPDATE).

    + +

    Requirements: +H13711 +


    +

    Configuring The SQLite Library

    int sqlite3_config(int, ...);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_config() interface is used to make global configuration +changes to SQLite in order to tune SQLite to the specific needs of +the application. The default configuration is recommended for most +applications and so this routine is usually not necessary. It is +provided to support rare applications with unusual needs.

    + +

    The sqlite3_config() interface is not threadsafe. The application +must insure that no other SQLite interfaces are invoked by other +threads while sqlite3_config() is running. Furthermore, sqlite3_config() +may only be invoked prior to library initialization using +sqlite3_initialize() or after shutdown by sqlite3_shutdown(). +Note, however, that sqlite3_config() can be called as part of the +implementation of an application-defined sqlite3_os_init().

    + +

    The first argument to sqlite3_config() is an integer +configuration option that determines +what property of SQLite is to be configured. Subsequent arguments +vary depending on the configuration option +in the first argument.

    + +

    When a configuration option is set, sqlite3_config() returns SQLITE_OK. +If the option is unknown or SQLite is unable to set the option +then this routine returns a non-zero error code.

    + +

    Requirements: +H14103 H14106 H14120 H14123 H14126 H14129 H14132 H14135 +H14138 H14141 H14144 H14147 H14150 H14153 H14156 H14159 +H14162 H14165 H14168 +


    +

    Database Connection For Functions

    sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
    +

    +The sqlite3_context_db_handle() interface returns a copy of +the pointer to the database connection (the 1st parameter) +of the sqlite3_create_function() +and sqlite3_create_function16() routines that originally +registered the application defined function.

    + +

    Requirements: +H16253 +


    +

    Register A Virtual Table Implementation

    int sqlite3_create_module(
    +  sqlite3 *db,               /* SQLite connection to register module with */
    +  const char *zName,         /* Name of the module */
    +  const sqlite3_module *p,   /* Methods for the module */
    +  void *pClientData          /* Client data for xCreate/xConnect */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This routine is used to register a new virtual table module name. +Module names must be registered before +creating a new virtual table using the module, or before using a +preexisting virtual table for the module.

    + +

    The module name is registered on the database connection specified +by the first parameter. The name of the module is given by the +second parameter. The third parameter is a pointer to +the implementation of the virtual table module. The fourth +parameter is an arbitrary client data pointer that is passed through +into the xCreate and xConnect methods of the virtual table module +when a new virtual table is be being created or reinitialized.

    + +

    This interface has exactly the same effect as calling +sqlite3_create_module_v2() with a NULL client data destructor. +


    +

    Register A Virtual Table Implementation

    int sqlite3_create_module_v2(
    +  sqlite3 *db,               /* SQLite connection to register module with */
    +  const char *zName,         /* Name of the module */
    +  const sqlite3_module *p,   /* Methods for the module */
    +  void *pClientData,         /* Client data for xCreate/xConnect */
    +  void(*xDestroy)(void*)     /* Module destructor function */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This routine is identical to the sqlite3_create_module() method, +except that it has an extra parameter to specify +a destructor function for the client data pointer. SQLite will +invoke the destructor function (if it is not NULL) when SQLite +no longer needs the pClientData pointer. +


    +

    Number of columns in a result set

    int sqlite3_data_count(sqlite3_stmt *pStmt);
    +

    +Returns the number of values in the current row of the result set.

    + +

    Requirements: +H13771 H13772 +


    +

    Configure database connections

    int sqlite3_db_config(sqlite3*, int op, ...);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_db_config() interface is used to make configuration +changes to a database connection. The interface is similar to +sqlite3_config() except that the changes apply to a single +database connection (specified in the first argument). The +sqlite3_db_config() interface can only be used immediately after +the database connection is created using sqlite3_open(), +sqlite3_open16(), or sqlite3_open_v2().

    + +

    The second argument to sqlite3_db_config(D,V,...) is the +configuration verb - an integer code that indicates what +aspect of the database connection is being configured. +The only choice for this value is SQLITE_DBCONFIG_LOOKASIDE. +New verbs are likely to be added in future releases of SQLite. +Additional arguments depend on the verb.

    + +

    Requirements: +H14203 H14206 H14209 H14212 H14215 +


    +

    Find The Database Handle Of A Prepared Statement

    sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
    +

    +The sqlite3_db_handle interface returns the database connection handle +to which a prepared statement belongs. The database connection +returned by sqlite3_db_handle is the same database connection that was the first argument +to the sqlite3_prepare_v2() call (or its variants) that was used to +create the statement in the first place.

    + +

    Requirements: H13123 +


    +

    Retrieve the mutex for a database connection

    sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
    +

    +This interface returns a pointer the sqlite3_mutex object that +serializes access to the database connection given in the argument +when the threading mode is Serialized. +If the threading mode is Single-thread or Multi-thread then this +routine returns a NULL pointer. +


    +

    Database Connection Status

    int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This interface is used to retrieve runtime status information +about a single database connection. The first argument is the +database connection object to be interrogated. The second argument +is the parameter to interrogate. Currently, the only allowed value +for the second parameter is SQLITE_DBSTATUS_LOOKASIDE_USED. +Additional options will likely appear in future releases of SQLite.

    + +

    The current value of the requested parameter is written into *pCur +and the highest instantaneous value is written into *pHiwtr. If +the resetFlg is true, then the highest instantaneous value is +reset back down to the current value.

    + +

    See also: sqlite3_status() and sqlite3_stmt_status(). +


    +

    Declare The Schema Of A Virtual Table

    int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
    +

    Important: This interface is experimental and is subject to change without notice.

    +The xCreate and xConnect methods of a +virtual table module call this interface +to declare the format (the names and datatypes of the columns) of +the virtual tables they implement. +


    +

    Enable Or Disable Extension Loading

    int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
    +

    +So as not to open security holes in older applications that are +unprepared to deal with extension loading, and as a means of disabling +extension loading while evaluating user-entered SQL, the following API +is provided to turn the sqlite3_load_extension() mechanism on and off.

    + +

    Extension loading is off by default. See ticket #1863.

    + +

    Call the sqlite3_enable_load_extension() routine with onoff==1 +to turn extension loading on and call it with onoff==0 to turn +it back off again.

    + +

    Extension loading is off by default. +


    +

    One-Step Query Execution Interface

    int sqlite3_exec(
    +  sqlite3*,                                  /* An open database */
    +  const char *sql,                           /* SQL to be evaluated */
    +  int (*callback)(void*,int,char**,char**),  /* Callback function */
    +  void *,                                    /* 1st argument to callback */
    +  char **errmsg                              /* Error msg written here */
    +);
    +

    +The sqlite3_exec() interface is a convenient way of running one or more +SQL statements without having to write a lot of C code. The UTF-8 encoded +SQL statements are passed in as the second parameter to sqlite3_exec(). +The statements are evaluated one by one until either an error or +an interrupt is encountered, or until they are all done. The 3rd parameter +is an optional callback that is invoked once for each row of any query +results produced by the SQL statements. The 5th parameter tells where +to write any error messages.

    + +

    The error message passed back through the 5th parameter is held +in memory obtained from sqlite3_malloc(). To avoid a memory leak, +the calling application should call sqlite3_free() on any error +message returned through the 5th parameter when it has finished using +the error message.

    + +

    If the SQL statement in the 2nd parameter is NULL or an empty string +or a string containing only whitespace and comments, then no SQL +statements are evaluated and the database is not changed.

    + +

    The sqlite3_exec() interface is implemented in terms of +sqlite3_prepare_v2(), sqlite3_step(), and sqlite3_finalize(). +The sqlite3_exec() routine does nothing to the database that cannot be done +by sqlite3_prepare_v2(), sqlite3_step(), and sqlite3_finalize().

    + +

    The first parameter to sqlite3_exec() must be an valid and open +database connection.

    + +

    The database connection must not be closed while +sqlite3_exec() is running.

    + +

    The calling function should use sqlite3_free() to free +the memory that *errmsg is left pointing at once the error +message is no longer needed.

    + +

    The SQL statement text in the 2nd parameter to sqlite3_exec() +must remain unchanged while sqlite3_exec() is running.

    + +

    Requirements: +H12101 H12102 H12104 H12105 H12107 H12110 H12113 H12116 +H12119 H12122 H12125 H12131 H12134 H12137 H12138 +


    +

    Enable Or Disable Extended Result Codes

    int sqlite3_extended_result_codes(sqlite3*, int onoff);
    +

    +The sqlite3_extended_result_codes() routine enables or disables the +extended result codes feature of SQLite. The extended result +codes are disabled by default for historical compatibility considerations.

    + +

    Requirements: +H12201 H12202 +


    +

    Low-Level Control Of Database Files

    int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
    +

    + The sqlite3_file_control() interface makes a direct call to the +xFileControl method for the sqlite3_io_methods object associated +with a particular database identified by the second argument. The +name of the database is the name assigned to the database by the +ATTACH SQL command that opened the +database. To control the main database file, use the name "main" +or a NULL pointer. The third and fourth parameters to this routine +are passed directly through to the second and third parameters of +the xFileControl method. The return value of the xFileControl +method becomes the return value of this routine.

    + +

    If the second parameter (zDbName) does not match the name of any +open database file, then SQLITE_ERROR is returned. This error +code is not remembered and will not be recalled by sqlite3_errcode() +or sqlite3_errmsg(). The underlying xFileControl method might +also return SQLITE_ERROR. There is no way to distinguish between +an incorrect zDbName and an SQLITE_ERROR return from the underlying +xFileControl method.

    + +

    See also: SQLITE_FCNTL_LOCKSTATE +


    +

    Destroy A Prepared Statement Object

    int sqlite3_finalize(sqlite3_stmt *pStmt);
    +

    +The sqlite3_finalize() function is called to delete a prepared statement. +If the statement was executed successfully or not executed at all, then +SQLITE_OK is returned. If execution of the statement failed then an +error code or extended error code is returned.

    + +

    This routine can be called at any point during the execution of the +prepared statement. If the virtual machine has not +completed execution when this routine is called, that is like +encountering an error or an interrupt. +Incomplete updates may be rolled back and transactions canceled, +depending on the circumstances, and the +error code returned will be SQLITE_ABORT.

    + +

    Requirements: +H11302 H11304 +


    +

    Interrupt A Long-Running Query

    void sqlite3_interrupt(sqlite3*);
    +

    +This function causes any pending database operation to abort and +return at its earliest opportunity. This routine is typically +called in response to a user action such as pressing "Cancel" +or Ctrl-C where the user wants a long query operation to halt +immediately.

    + +

    It is safe to call this routine from a thread different from the +thread that is currently running the database operation. But it +is not safe to call this routine with a database connection that +is closed or might close before sqlite3_interrupt() returns.

    + +

    If an SQL operation is very nearly finished at the time when +sqlite3_interrupt() is called, then it might not have an opportunity +to be interrupted and might continue to completion.

    + +

    An SQL operation that is interrupted will return SQLITE_INTERRUPT. +If the interrupted SQL operation is an INSERT, UPDATE, or DELETE +that is inside an explicit transaction, then the entire transaction +will be rolled back automatically.

    + +

    The sqlite3_interrupt(D) call is in effect until all currently running +SQL statements on database connection D complete. Any new SQL statements +that are started after the sqlite3_interrupt() call and before the +running statements reaches zero are interrupted as if they had been +running prior to the sqlite3_interrupt() call. New SQL statements +that are started after the running statement count reaches zero are +not effected by the sqlite3_interrupt(). +A call to sqlite3_interrupt(D) that occurs when there are no running +SQL statements is a no-op and has no effect on SQL statements +that are started after the sqlite3_interrupt() call returns.

    + +

    Requirements: +H12271 H12272

    + +

    If the database connection closes while sqlite3_interrupt() +is running then bad things will likely happen. +


    +

    Last Insert Rowid

    sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
    +

    +Each entry in an SQLite table has a unique 64-bit signed +integer key called the "rowid". The rowid is always available +as an undeclared column named ROWID, OID, or _ROWID_ as long as those +names are not also used by explicitly declared columns. If +the table has a column of type INTEGER PRIMARY KEY then that column +is another alias for the rowid.

    + +

    This routine returns the rowid of the most recent +successful INSERT into the database from the database connection +in the first argument. If no successful INSERTs +have ever occurred on that database connection, zero is returned.

    + +

    If an INSERT occurs within a trigger, then the rowid of the inserted +row is returned by this routine as long as the trigger is running. +But once the trigger terminates, the value returned by this routine +reverts to the last value inserted before the trigger fired.

    + +

    An INSERT that fails due to a constraint violation is not a +successful INSERT and does not change the value returned by this +routine. Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, +and INSERT OR ABORT make no changes to the return value of this +routine when their insertion fails. When INSERT OR REPLACE +encounters a constraint violation, it does not fail. The +INSERT continues to completion after deleting rows that caused +the constraint problem so INSERT OR REPLACE will always change +the return value of this interface.

    + +

    For the purposes of this routine, an INSERT is considered to +be successful even if it is subsequently rolled back.

    + +

    Requirements: +H12221 H12223

    + +

    If a separate thread performs a new INSERT on the same +database connection while the sqlite3_last_insert_rowid() +function is running and thus changes the last insert rowid, +then the value returned by sqlite3_last_insert_rowid() is +unpredictable and might not equal either the old or the new +last insert rowid. +


    +

    Run-time Limits

    int sqlite3_limit(sqlite3*, int id, int newVal);
    +

    +This interface allows the size of various constructs to be limited +on a connection by connection basis. The first parameter is the +database connection whose limit is to be set or queried. The +second parameter is one of the limit categories that define a +class of constructs to be size limited. The third parameter is the +new limit for that construct. The function returns the old limit.

    + +

    If the new limit is a negative number, the limit is unchanged. +For the limit category of SQLITE_LIMIT_XYZ there is a +hard upper bound +set by a compile-time C preprocessor macro named +SQLITE_MAX_XYZ. +(The "_LIMIT_" in the name is changed to "_MAX_".) +Attempts to increase a limit above its hard upper bound are +silently truncated to the hard upper limit.

    + +

    Run time limits are intended for use in applications that manage +both their own internal database and also databases that are controlled +by untrusted external sources. An example application might be a +web browser that has its own databases for storing history and +separate databases controlled by JavaScript applications downloaded +off the Internet. The internal databases can be given the +large, default limits. Databases managed by external sources can +be given much smaller limits designed to prevent a denial of service +attack. Developers might also want to use the sqlite3_set_authorizer() +interface to further control untrusted SQL. The size of the database +created by an untrusted script can be contained using the +max_page_count PRAGMA.

    + +

    New run-time limit categories may be added in future releases.

    + +

    Requirements: +H12762 H12766 H12769 +


    +

    Load An Extension

    int sqlite3_load_extension(
    +  sqlite3 *db,          /* Load the extension into this database connection */
    +  const char *zFile,    /* Name of the shared library containing extension */
    +  const char *zProc,    /* Entry point.  Derived from zFile if 0 */
    +  char **pzErrMsg       /* Put error message here if not 0 */
    +);
    +

    +This interface loads an SQLite extension library from the named file.

    + +

    The sqlite3_load_extension() interface attempts to load an +SQLite extension library contained in the file zFile.

    + +

    The entry point is zProc.

    + +

    zProc may be 0, in which case the name of the entry point +defaults to "sqlite3_extension_init".

    + +

    The sqlite3_load_extension() interface shall return +SQLITE_OK on success and SQLITE_ERROR if something goes wrong.

    + +

    If an error occurs and pzErrMsg is not 0, then the +sqlite3_load_extension() interface shall attempt to +fill *pzErrMsg with error message text stored in memory +obtained from sqlite3_malloc(). The calling function +should free this memory by calling sqlite3_free().

    + +

    Extension loading must be enabled using +sqlite3_enable_load_extension() prior to calling this API, +otherwise an error will be returned. +


    +

    Find the next prepared statement

    sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
    +

    +This interface returns a pointer to the next prepared statement after +pStmt associated with the database connection pDb. If pStmt is NULL +then this interface returns a pointer to the first prepared statement +associated with the database connection pDb. If no prepared statement +satisfies the conditions of this routine, it returns NULL.

    + +

    The database connection pointer D in a call to +sqlite3_next_stmt(D,S) must refer to an open database +connection and in particular must not be a NULL pointer.

    + +

    Requirements: H13143 H13146 H13149 H13152 +


    +

    Overload A Function For A Virtual Table

    int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +Virtual tables can provide alternative implementations of functions +using the xFindFunction method of the virtual table module. +But global versions of those functions +must exist in order to be overloaded.

    + +

    This API makes sure a global version of a function with a particular +name and number of parameters exists. If no such function exists +before this API is called, a new function is created. The implementation +of the new function always causes an exception to be thrown. So +the new function is not good for anything by itself. Its only +purpose is to be a placeholder function that can be overloaded +by a virtual table. +


    +

    Query Progress Callbacks

    void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
    +

    +This routine configures a callback function - the +progress callback - that is invoked periodically during long +running calls to sqlite3_exec(), sqlite3_step() and +sqlite3_get_table(). An example use for this +interface is to keep a GUI updated during a large query.

    + +

    If the progress callback returns non-zero, the operation is +interrupted. This feature can be used to implement a +"Cancel" button on a GUI progress dialog box.

    + +

    The progress handler must not do anything that will modify +the database connection that invoked the progress handler. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    Requirements: +H12911 H12912 H12913 H12914 H12915 H12916 H12917 H12918

    + +


    +

    Pseudo-Random Number Generator

    void sqlite3_randomness(int N, void *P);
    +

    +SQLite contains a high-quality pseudo-random number generator (PRNG) used to +select random ROWIDs when inserting new records into a table that +already uses the largest possible ROWID. The PRNG is also used for +the build-in random() and randomblob() SQL functions. This interface allows +applications to access the same PRNG for other purposes.

    + +

    A call to this routine stores N bytes of randomness into buffer P.

    + +

    The first time this routine is invoked (either internally or by +the application) the PRNG is seeded using randomness obtained +from the xRandomness method of the default sqlite3_vfs object. +On all subsequent invocations, the pseudo-randomness is generated +internally and without recourse to the sqlite3_vfs xRandomness +method.

    + +

    Requirements: +H17392 +


    +

    Attempt To Free Heap Memory

    int sqlite3_release_memory(int);
    +

    +The sqlite3_release_memory() interface attempts to free N bytes +of heap memory by deallocating non-essential memory allocations +held by the database library. Memory used to cache database +pages to improve performance is an example of non-essential memory. +sqlite3_release_memory() returns the number of bytes actually freed, +which might be more or less than the amount requested.

    + +

    Requirements: H17341 H17342 +


    +

    Reset A Prepared Statement Object

    int sqlite3_reset(sqlite3_stmt *pStmt);
    +

    +The sqlite3_reset() function is called to reset a prepared statement +object back to its initial state, ready to be re-executed. +Any SQL statement variables that had values bound to them using +the sqlite3_bind_*() API retain their values. +Use sqlite3_clear_bindings() to reset the bindings.

    + +

    The sqlite3_reset(S) interface resets the prepared statement S +back to the beginning of its program.

    + +

    If the most recent call to sqlite3_step(S) for the +prepared statement S returned SQLITE_ROW or SQLITE_DONE, +or if sqlite3_step(S) has never before been called on S, +then sqlite3_reset(S) returns SQLITE_OK.

    + +

    If the most recent call to sqlite3_step(S) for the +prepared statement S indicated an error, then +sqlite3_reset(S) returns an appropriate error code.

    + +

    The sqlite3_reset(S) interface does not change the values +of any bindings on the prepared statement S. +


    +

    Reset Automatic Extension Loading

    void sqlite3_reset_auto_extension(void);
    +

    +This function disables all previously registered automatic +extensions. It undoes the effect of all prior +sqlite3_auto_extension() calls.

    + +

    This function disables all previously registered +automatic extensions.

    + +

    This function disables automatic extensions in all threads. +


    +

    Compile-Time Authorization Callbacks

    int sqlite3_set_authorizer(
    +  sqlite3*,
    +  int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
    +  void *pUserData
    +);
    +

    +This routine registers a authorizer callback with a particular +database connection, supplied in the first argument. +The authorizer callback is invoked as SQL statements are being compiled +by sqlite3_prepare() or its variants sqlite3_prepare_v2(), +sqlite3_prepare16() and sqlite3_prepare16_v2(). At various +points during the compilation process, as logic is being created +to perform various actions, the authorizer callback is invoked to +see if those actions are allowed. The authorizer callback should +return SQLITE_OK to allow the action, SQLITE_IGNORE to disallow the +specific action but allow the SQL statement to continue to be +compiled, or SQLITE_DENY to cause the entire SQL statement to be +rejected with an error. If the authorizer callback returns +any value other than SQLITE_IGNORE, SQLITE_OK, or SQLITE_DENY +then the sqlite3_prepare_v2() or equivalent call that triggered +the authorizer will fail with an error message.

    + +

    When the callback returns SQLITE_OK, that means the operation +requested is ok. When the callback returns SQLITE_DENY, the +sqlite3_prepare_v2() or equivalent call that triggered the +authorizer will fail with an error message explaining that +access is denied.

    + +

    The first parameter to the authorizer callback is a copy of the third +parameter to the sqlite3_set_authorizer() interface. The second parameter +to the callback is an integer action code that specifies +the particular action to be authorized. The third through sixth parameters +to the callback are zero-terminated strings that contain additional +details about the action to be authorized.

    + +

    If the action code is SQLITE_READ +and the callback returns SQLITE_IGNORE then the +prepared statement statement is constructed to substitute +a NULL value in place of the table column that would have +been read if SQLITE_OK had been returned. The SQLITE_IGNORE +return can be used to deny an untrusted user access to individual +columns of a table. +If the action code is SQLITE_DELETE and the callback returns +SQLITE_IGNORE then the DELETE operation proceeds but the +truncate optimization is disabled and all rows are deleted individually.

    + +

    An authorizer is used when preparing +SQL statements from an untrusted source, to ensure that the SQL statements +do not try to access data they are not allowed to see, or that they do not +try to execute malicious statements that damage the database. For +example, an application may allow a user to enter arbitrary +SQL queries for evaluation by a database. But the application does +not want the user to be able to make arbitrary changes to the +database. An authorizer could then be put in place while the +user-entered SQL is being prepared that +disallows everything except SELECT statements.

    + +

    Applications that need to process SQL from untrusted sources +might also consider lowering resource limits using sqlite3_limit() +and limiting database size using the max_page_count PRAGMA +in addition to using an authorizer.

    + +

    Only a single authorizer can be in place on a database connection +at a time. Each call to sqlite3_set_authorizer overrides the +previous call. Disable the authorizer by installing a NULL callback. +The authorizer is disabled by default.

    + +

    The authorizer callback must not do anything that will modify +the database connection that invoked the authorizer callback. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    When sqlite3_prepare_v2() is used to prepare a statement, the +statement might be reprepared during sqlite3_step() due to a +schema change. Hence, the application should ensure that the +correct authorizer callback remains in place during the sqlite3_step().

    + +

    Note that the authorizer callback is invoked only during +sqlite3_prepare() or its variants. Authorization is not +performed during statement evaluation in sqlite3_step(), unless +as stated in the previous paragraph, sqlite3_step() invokes +sqlite3_prepare_v2() to reprepare a statement after a schema change.

    + +

    Requirements: +H12501 H12502 H12503 H12504 H12505 H12506 H12507 H12510 +H12511 H12512 H12520 H12521 H12522 +


    +

    Suspend Execution For A Short Time

    int sqlite3_sleep(int);
    +

    +The sqlite3_sleep() function causes the current thread to suspend execution +for at least a number of milliseconds specified in its parameter.

    + +

    If the operating system does not support sleep requests with +millisecond time resolution, then the time will be rounded up to +the nearest second. The number of milliseconds of sleep actually +requested from the operating system is returned.

    + +

    SQLite implements this interface by calling the xSleep() +method of the default sqlite3_vfs object.

    + +

    Requirements: H10533 H10536 +


    +

    Impose A Limit On Heap Size

    void sqlite3_soft_heap_limit(int);
    +

    +The sqlite3_soft_heap_limit() interface places a "soft" limit +on the amount of heap memory that may be allocated by SQLite. +If an internal allocation is requested that would exceed the +soft heap limit, sqlite3_release_memory() is invoked one or +more times to free up some space before the allocation is performed.

    + +

    The limit is called "soft", because if sqlite3_release_memory() +cannot free sufficient memory to prevent the limit from being exceeded, +the memory is allocated anyway and the current operation proceeds.

    + +

    A negative or zero value for N means that there is no soft heap limit and +sqlite3_release_memory() will only be called when memory is exhausted. +The default value for the soft heap limit is zero.

    + +

    SQLite makes a best effort to honor the soft heap limit. +But if the soft heap limit cannot be honored, execution will +continue without error or notification. This is why the limit is +called a "soft" limit. It is advisory only.

    + +

    Prior to SQLite version 3.5.0, this routine only constrained the memory +allocated by a single thread - the same thread in which this routine +runs. Beginning with SQLite version 3.5.0, the soft heap limit is +applied to all threads. The value specified for the soft heap limit +is an upper bound on the total memory allocation for all threads. In +version 3.5.0 there is no mechanism for limiting the heap usage for +individual threads.

    + +

    Requirements: +H16351 H16352 H16353 H16354 H16355 H16358 +


    +

    Retrieving Statement SQL

    const char *sqlite3_sql(sqlite3_stmt *pStmt);
    +

    +This interface can be used to retrieve a saved copy of the original +SQL text used to create a prepared statement if that statement was +compiled using either sqlite3_prepare_v2() or sqlite3_prepare16_v2().

    + +

    Requirements: +H13101 H13102 H13103 +


    +

    SQLite Runtime Status

    int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This interface is used to retrieve runtime status information +about the preformance of SQLite, and optionally to reset various +highwater marks. The first argument is an integer code for +the specific parameter to measure. Recognized integer codes +are of the form SQLITE_STATUS_.... +The current value of the parameter is returned into *pCurrent. +The highest recorded value is returned in *pHighwater. If the +resetFlag is true, then the highest record value is reset after +*pHighwater is written. Some parameters do not record the highest +value. For those parameters +nothing is written into *pHighwater and the resetFlag is ignored. +Other parameters record only the highwater mark and not the current +value. For these latter parameters nothing is written into *pCurrent.

    + +

    This routine returns SQLITE_OK on success and a non-zero +error code on failure.

    + +

    This routine is threadsafe but is not atomic. This routine can +called while other threads are running the same or different SQLite +interfaces. However the values returned in *pCurrent and +*pHighwater reflect the status of SQLite at different points in time +and it is possible that another thread might change the parameter +in between the times when *pCurrent and *pHighwater are written.

    + +

    See also: sqlite3_db_status() +


    +

    Evaluate An SQL Statement

    int sqlite3_step(sqlite3_stmt*);
    +

    +After a prepared statement has been prepared using either +sqlite3_prepare_v2() or sqlite3_prepare16_v2() or one of the legacy +interfaces sqlite3_prepare() or sqlite3_prepare16(), this function +must be called one or more times to evaluate the statement.

    + +

    The details of the behavior of the sqlite3_step() interface depend +on whether the statement was prepared using the newer "v2" interface +sqlite3_prepare_v2() and sqlite3_prepare16_v2() or the older legacy +interface sqlite3_prepare() and sqlite3_prepare16(). The use of the +new "v2" interface is recommended for new applications but the legacy +interface will continue to be supported.

    + +

    In the legacy interface, the return value will be either SQLITE_BUSY, +SQLITE_DONE, SQLITE_ROW, SQLITE_ERROR, or SQLITE_MISUSE. +With the "v2" interface, any of the other result codes or +extended result codes might be returned as well.

    + +

    SQLITE_BUSY means that the database engine was unable to acquire the +database locks it needs to do its job. If the statement is a COMMIT +or occurs outside of an explicit transaction, then you can retry the +statement. If the statement is not a COMMIT and occurs within a +explicit transaction then you should rollback the transaction before +continuing.

    + +

    SQLITE_DONE means that the statement has finished executing +successfully. sqlite3_step() should not be called again on this virtual +machine without first calling sqlite3_reset() to reset the virtual +machine back to its initial state.

    + +

    If the SQL statement being executed returns any data, then SQLITE_ROW +is returned each time a new row of data is ready for processing by the +caller. The values may be accessed using the column access functions. +sqlite3_step() is called again to retrieve the next row of data.

    + +

    SQLITE_ERROR means that a run-time error (such as a constraint +violation) has occurred. sqlite3_step() should not be called again on +the VM. More information may be found by calling sqlite3_errmsg(). +With the legacy interface, a more specific error code (for example, +SQLITE_INTERRUPT, SQLITE_SCHEMA, SQLITE_CORRUPT, and so forth) +can be obtained by calling sqlite3_reset() on the +prepared statement. In the "v2" interface, +the more specific error code is returned directly by sqlite3_step().

    + +

    SQLITE_MISUSE means that the this routine was called inappropriately. +Perhaps it was called on a prepared statement that has +already been finalized or on one that had +previously returned SQLITE_ERROR or SQLITE_DONE. Or it could +be the case that the same database connection is being used by two or +more threads at the same moment in time.

    + +

    Goofy Interface Alert: In the legacy interface, the sqlite3_step() +API always returns a generic error code, SQLITE_ERROR, following any +error other than SQLITE_BUSY and SQLITE_MISUSE. You must call +sqlite3_reset() or sqlite3_finalize() in order to find one of the +specific error codes that better describes the error. +We admit that this is a goofy design. The problem has been fixed +with the "v2" interface. If you prepare all of your SQL statements +using either sqlite3_prepare_v2() or sqlite3_prepare16_v2() instead +of the legacy sqlite3_prepare() and sqlite3_prepare16() interfaces, +then the more specific error codes are returned directly +by sqlite3_step(). The use of the "v2" interface is recommended.

    + +

    Requirements: +H13202 H15304 H15306 H15308 H15310 +


    +

    Prepared Statement Status

    int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
    +

    Important: This interface is experimental and is subject to change without notice.

    +Each prepared statement maintains various +counters that measure the number +of times it has performed specific operations. These counters can +be used to monitor the performance characteristics of the prepared +statements. For example, if the number of table steps greatly exceeds +the number of table searches or result rows, that would tend to indicate +that the prepared statement is using a full table scan rather than +an index.

    + +

    This interface is used to retrieve and reset counter values from +a prepared statement. The first argument is the prepared statement +object to be interrogated. The second argument +is an integer code for a specific counter +to be interrogated. +The current value of the requested counter is returned. +If the resetFlg is true, then the counter is reset to zero after this +interface call returns.

    + +

    See also: sqlite3_status() and sqlite3_db_status(). +


    +

    Extract Metadata About A Column Of A Table

    int sqlite3_table_column_metadata(
    +  sqlite3 *db,                /* Connection handle */
    +  const char *zDbName,        /* Database name or NULL */
    +  const char *zTableName,     /* Table name */
    +  const char *zColumnName,    /* Column name */
    +  char const **pzDataType,    /* OUTPUT: Declared data type */
    +  char const **pzCollSeq,     /* OUTPUT: Collation sequence name */
    +  int *pNotNull,              /* OUTPUT: True if NOT NULL constraint exists */
    +  int *pPrimaryKey,           /* OUTPUT: True if column part of PK */
    +  int *pAutoinc               /* OUTPUT: True if column is auto-increment */
    +);
    +

    +This routine returns metadata about a specific column of a specific +database table accessible using the database connection handle +passed as the first function argument.

    + +

    The column is identified by the second, third and fourth parameters to +this function. The second parameter is either the name of the database +(i.e. "main", "temp" or an attached database) containing the specified +table or NULL. If it is NULL, then all attached databases are searched +for the table using the same algorithm used by the database engine to +resolve unqualified table references.

    + +

    The third and fourth parameters to this function are the table and column +name of the desired column, respectively. Neither of these parameters +may be NULL.

    + +

    Metadata is returned by writing to the memory locations passed as the 5th +and subsequent parameters to this function. Any of these arguments may be +NULL, in which case the corresponding element of metadata is omitted.

    + +

    + +
    Parameter Output
    Type
    Description

    + +

    5th const char* Data type +
    6th const char* Name of default collation sequence +
    7th int True if column has a NOT NULL constraint +
    8th int True if column is part of the PRIMARY KEY +
    9th int True if column is AUTOINCREMENT +
    +

    + +

    The memory pointed to by the character pointers returned for the +declaration type and collation sequence is valid only until the next +call to any SQLite API function.

    + +

    If the specified table is actually a view, an error code is returned.

    + +

    If the specified column is "rowid", "oid" or "_rowid_" and an +INTEGER PRIMARY KEY column has been explicitly declared, then the output +parameters are set for the explicitly declared column. If there is no +explicitly declared INTEGER PRIMARY KEY column, then the output +parameters are set as follows:

    + +

    +data type: "INTEGER"
    +collation sequence: "BINARY"
    +not null: 0
    +primary key: 1
    +auto increment: 0
    +

    + +

    This function may load one or more schemas from database files. If an +error occurs during this process, or if the requested table or column +cannot be found, an error code is returned and an error message left +in the database connection (to be retrieved using sqlite3_errmsg()).

    + +

    This API is only available if the library was compiled with the +SQLITE_ENABLE_COLUMN_METADATA C-preprocessor symbol defined. +


    +

    Testing Interface

    int sqlite3_test_control(int op, ...);
    +

    +The sqlite3_test_control() interface is used to read out internal +state of SQLite and to inject faults into SQLite for testing +purposes. The first parameter is an operation code that determines +the number, meaning, and operation of all subsequent parameters.

    + +

    This interface is not for use by applications. It exists solely +for verifying the correct operation of the SQLite library. Depending +on how the SQLite library is compiled, this interface might not exist.

    + +

    The details of the operation codes, their meanings, the parameters +they take, and what they do are all subject to change without notice. +Unlike most of the SQLite API, this function is not guaranteed to +operate consistently from one release to the next. +


    +

    Test To See If The Library Is Threadsafe

    int sqlite3_threadsafe(void);
    +

    +SQLite can be compiled with or without mutexes. When +the SQLITE_THREADSAFE C preprocessor macro 1 or 2, mutexes +are enabled and SQLite is threadsafe. When the +SQLITE_THREADSAFE macro is 0, +the mutexes are omitted. Without the mutexes, it is not safe +to use SQLite concurrently from more than one thread.

    + +

    Enabling mutexes incurs a measurable performance penalty. +So if speed is of utmost importance, it makes sense to disable +the mutexes. But for maximum safety, mutexes should be enabled. +The default behavior is for mutexes to be enabled.

    + +

    This interface can be used by a program to make sure that the +version of SQLite that it is linking against was compiled with +the desired setting of the SQLITE_THREADSAFE macro.

    + +

    This interface only reports on the compile-time mutex setting +of the SQLITE_THREADSAFE flag. If SQLite is compiled with +SQLITE_THREADSAFE=1 then mutexes are enabled by default but +can be fully or partially disabled using a call to sqlite3_config() +with the verbs SQLITE_CONFIG_SINGLETHREAD, SQLITE_CONFIG_MULTITHREAD, +or SQLITE_CONFIG_MUTEX. The return value of this function shows +only the default compile-time setting, not any run-time changes +to that setting.

    + +

    See the threading mode documentation for additional information.

    + +

    Requirements: H10101 H10102 +


    +

    Total Number Of Rows Modified

    int sqlite3_total_changes(sqlite3*);
    +

    +This function returns the number of row changes caused by INSERT, +UPDATE or DELETE statements since the database connection was opened. +The count includes all changes from all +trigger contexts. However, +the count does not include changes used to implement REPLACE constraints, +do rollbacks or ABORT processing, or DROP TABLE processing. The +count does not include rows of views that fire an INSTEAD OF trigger, +though if the INSTEAD OF trigger makes changes of its own, those changes +are counted. +The changes are counted as soon as the statement that makes them is +completed (when the statement handle is passed to sqlite3_reset() or +sqlite3_finalize()).

    + +

    See also the sqlite3_changes() interface and the +count_changes pragma.

    + +

    Requirements: +H12261 H12263

    + +

    If a separate thread makes changes on the same database connection +while sqlite3_total_changes() is running then the value +returned is unpredictable and not meaningful. +


    +

    Unlock Notification

    int sqlite3_unlock_notify(
    +  sqlite3 *pBlocked,                          /* Waiting connection */
    +  void (*xNotify)(void **apArg, int nArg),    /* Callback function to invoke */
    +  void *pNotifyArg                            /* Argument to pass to xNotify */
    +);
    +

    Important: This interface is experimental and is subject to change without notice.

    +When running in shared-cache mode, a database operation may fail with +an SQLITE_LOCKED error if the required locks on the shared-cache or +individual tables within the shared-cache cannot be obtained. See +SQLite Shared-Cache Mode for a description of shared-cache locking. +This API may be used to register a callback that SQLite will invoke +when the connection currently holding the required lock relinquishes it. +This API is only available if the library was compiled with the +SQLITE_ENABLE_UNLOCK_NOTIFY C-preprocessor symbol defined.

    + +

    See Also: Using the SQLite Unlock Notification Feature.

    + +

    Shared-cache locks are released when a database connection concludes +its current transaction, either by committing it or rolling it back.

    + +

    When a connection (known as the blocked connection) fails to obtain a +shared-cache lock and SQLITE_LOCKED is returned to the caller, the +identity of the database connection (the blocking connection) that +has locked the required resource is stored internally. After an +application receives an SQLITE_LOCKED error, it may call the +sqlite3_unlock_notify() method with the blocked connection handle as +the first argument to register for a callback that will be invoked +when the blocking connections current transaction is concluded. The +callback is invoked from within the sqlite3_step or sqlite3_close +call that concludes the blocking connections transaction.

    + +

    If sqlite3_unlock_notify() is called in a multi-threaded application, +there is a chance that the blocking connection will have already +concluded its transaction by the time sqlite3_unlock_notify() is invoked. +If this happens, then the specified callback is invoked immediately, +from within the call to sqlite3_unlock_notify().

    + +

    If the blocked connection is attempting to obtain a write-lock on a +shared-cache table, and more than one other connection currently holds +a read-lock on the same table, then SQLite arbitrarily selects one of +the other connections to use as the blocking connection.

    + +

    There may be at most one unlock-notify callback registered by a +blocked connection. If sqlite3_unlock_notify() is called when the +blocked connection already has a registered unlock-notify callback, +then the new callback replaces the old. If sqlite3_unlock_notify() is +called with a NULL pointer as its second argument, then any existing +unlock-notify callback is cancelled. The blocked connections +unlock-notify callback may also be canceled by closing the blocked +connection using sqlite3_close().

    + +

    The unlock-notify callback is not reentrant. If an application invokes +any sqlite3_xxx API functions from within an unlock-notify callback, a +crash or deadlock may be the result.

    + +

    Unless deadlock is detected (see below), sqlite3_unlock_notify() always +returns SQLITE_OK.

    + +

    Callback Invocation Details

    + +

    When an unlock-notify callback is registered, the application provides a +single void* pointer that is passed to the callback when it is invoked. +However, the signature of the callback function allows SQLite to pass +it an array of void* context pointers. The first argument passed to +an unlock-notify callback is a pointer to an array of void* pointers, +and the second is the number of entries in the array.

    + +

    When a blocking connections transaction is concluded, there may be +more than one blocked connection that has registered for an unlock-notify +callback. If two or more such blocked connections have specified the +same callback function, then instead of invoking the callback function +multiple times, it is invoked once with the set of void* context pointers +specified by the blocked connections bundled together into an array. +This gives the application an opportunity to prioritize any actions +related to the set of unblocked database connections.

    + +

    Deadlock Detection

    + +

    Assuming that after registering for an unlock-notify callback a +database waits for the callback to be issued before taking any further +action (a reasonable assumption), then using this API may cause the +application to deadlock. For example, if connection X is waiting for +connection Y's transaction to be concluded, and similarly connection +Y is waiting on connection X's transaction, then neither connection +will proceed and the system may remain deadlocked indefinitely.

    + +

    To avoid this scenario, the sqlite3_unlock_notify() performs deadlock +detection. If a given call to sqlite3_unlock_notify() would put the +system in a deadlocked state, then SQLITE_LOCKED is returned and no +unlock-notify callback is registered. The system is said to be in +a deadlocked state if connection A has registered for an unlock-notify +callback on the conclusion of connection B's transaction, and connection +B has itself registered for an unlock-notify callback when connection +A's transaction is concluded. Indirect deadlock is also detected, so +the system is also considered to be deadlocked if connection B has +registered for an unlock-notify callback on the conclusion of connection +C's transaction, where connection C is waiting on connection A. Any +number of levels of indirection are allowed.

    + +

    The "DROP TABLE" Exception

    + +

    When a call to sqlite3_step() returns SQLITE_LOCKED, it is almost +always appropriate to call sqlite3_unlock_notify(). There is however, +one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, +SQLite checks if there are any currently executing SELECT statements +that belong to the same connection. If there are, SQLITE_LOCKED is +returned. In this case there is no "blocking connection", so invoking +sqlite3_unlock_notify() results in the unlock-notify callback being +invoked immediately. If the application then re-attempts the "DROP TABLE" +or "DROP INDEX" query, an infinite loop might be the result.

    + +

    One way around this problem is to check the extended error code returned +by an sqlite3_step() call. If there is a blocking connection, then the +extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in +the special "DROP TABLE/INDEX" case, the extended error code is just +SQLITE_LOCKED. +


    +

    Data Change Notification Callbacks

    void *sqlite3_update_hook(
    +  sqlite3*, 
    +  void(*)(void *,int ,char const *,char const *,sqlite3_int64),
    +  void*
    +);
    +

    +The sqlite3_update_hook() interface registers a callback function +with the database connection identified by the first argument +to be invoked whenever a row is updated, inserted or deleted. +Any callback set by a previous call to this function +for the same database connection is overridden.

    + +

    The second argument is a pointer to the function to invoke when a +row is updated, inserted or deleted. +The first argument to the callback is a copy of the third argument +to sqlite3_update_hook(). +The second callback argument is one of SQLITE_INSERT, SQLITE_DELETE, +or SQLITE_UPDATE, depending on the operation that caused the callback +to be invoked. +The third and fourth arguments to the callback contain pointers to the +database and table name containing the affected row. +The final callback parameter is the rowid of the row. +In the case of an update, this is the rowid after the update takes place.

    + +

    The update hook is not invoked when internal system tables are +modified (i.e. sqlite_master and sqlite_sequence).

    + +

    In the current implementation, the update hook +is not invoked when duplication rows are deleted because of an +ON CONFLICT REPLACE clause. Nor is the update hook +invoked when rows are deleted using the truncate optimization. +The exceptions defined in this paragraph might change in a future +release of SQLite.

    + +

    The update hook implementation must not do anything that will modify +the database connection that invoked the update hook. Any actions +to modify the database connection must be deferred until after the +completion of the sqlite3_step() call that triggered the update hook. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    If another function was previously registered, its pArg value +is returned. Otherwise NULL is returned.

    + +

    See also the sqlite3_commit_hook() and sqlite3_rollback_hook() +interfaces.

    + +

    Requirements: +H12971 H12973 H12975 H12977 H12979 H12981 H12983 H12986 +


    +

    User Data For Functions

    void *sqlite3_user_data(sqlite3_context*);
    +

    +The sqlite3_user_data() interface returns a copy of +the pointer that was the pUserData parameter (the 5th parameter) +of the sqlite3_create_function() +and sqlite3_create_function16() routines that originally +registered the application defined function.

    + +

    This routine must be called from the same thread in which +the application-defined function is running.

    + +

    Requirements: +H16243 +


    +

    Result Codes

    #define SQLITE_OK           0   /* Successful result */
    +/* beginning-of-error-codes */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* Internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite3_interrupt()*/
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* NOT USED. Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* NOT USED. Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* Database is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* String or BLOB exceeds size limit */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to constraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_FORMAT      24   /* Auxiliary database format error */
    +#define SQLITE_RANGE       25   /* 2nd parameter to sqlite3_bind out of range */
    +#define SQLITE_NOTADB      26   /* File opened that is not a database file */
    +#define SQLITE_ROW         100  /* sqlite3_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite3_step() has finished executing */
    +/* end-of-error-codes */
    +

    +Many SQLite functions return an integer result code from the set shown +here in order to indicates success or failure.

    + +

    New error codes may be added in future versions of SQLite.

    + +

    See also: extended result codes +


    +

    Flags for the xAccess VFS method

    #define SQLITE_ACCESS_EXISTS    0
    +#define SQLITE_ACCESS_READWRITE 1
    +#define SQLITE_ACCESS_READ      2
    +

    +These integer constants can be used as the third parameter to +the xAccess method of an sqlite3_vfs object. They determine +what kind of permissions the xAccess method is looking for. +With SQLITE_ACCESS_EXISTS, the xAccess method +simply checks whether the file exists. +With SQLITE_ACCESS_READWRITE, the xAccess method +checks whether the file is both readable and writable. +With SQLITE_ACCESS_READ, the xAccess method +checks whether the file is readable. +


    +

    Authorizer Action Codes

    /******************************************* 3rd ************ 4th ***********/
    +#define SQLITE_CREATE_INDEX          1   /* Index Name      Table Name      */
    +#define SQLITE_CREATE_TABLE          2   /* Table Name      NULL            */
    +#define SQLITE_CREATE_TEMP_INDEX     3   /* Index Name      Table Name      */
    +#define SQLITE_CREATE_TEMP_TABLE     4   /* Table Name      NULL            */
    +#define SQLITE_CREATE_TEMP_TRIGGER   5   /* Trigger Name    Table Name      */
    +#define SQLITE_CREATE_TEMP_VIEW      6   /* View Name       NULL            */
    +#define SQLITE_CREATE_TRIGGER        7   /* Trigger Name    Table Name      */
    +#define SQLITE_CREATE_VIEW           8   /* View Name       NULL            */
    +#define SQLITE_DELETE                9   /* Table Name      NULL            */
    +#define SQLITE_DROP_INDEX           10   /* Index Name      Table Name      */
    +#define SQLITE_DROP_TABLE           11   /* Table Name      NULL            */
    +#define SQLITE_DROP_TEMP_INDEX      12   /* Index Name      Table Name      */
    +#define SQLITE_DROP_TEMP_TABLE      13   /* Table Name      NULL            */
    +#define SQLITE_DROP_TEMP_TRIGGER    14   /* Trigger Name    Table Name      */
    +#define SQLITE_DROP_TEMP_VIEW       15   /* View Name       NULL            */
    +#define SQLITE_DROP_TRIGGER         16   /* Trigger Name    Table Name      */
    +#define SQLITE_DROP_VIEW            17   /* View Name       NULL            */
    +#define SQLITE_INSERT               18   /* Table Name      NULL            */
    +#define SQLITE_PRAGMA               19   /* Pragma Name     1st arg or NULL */
    +#define SQLITE_READ                 20   /* Table Name      Column Name     */
    +#define SQLITE_SELECT               21   /* NULL            NULL            */
    +#define SQLITE_TRANSACTION          22   /* Operation       NULL            */
    +#define SQLITE_UPDATE               23   /* Table Name      Column Name     */
    +#define SQLITE_ATTACH               24   /* Filename        NULL            */
    +#define SQLITE_DETACH               25   /* Database Name   NULL            */
    +#define SQLITE_ALTER_TABLE          26   /* Database Name   Table Name      */
    +#define SQLITE_REINDEX              27   /* Index Name      NULL            */
    +#define SQLITE_ANALYZE              28   /* Table Name      NULL            */
    +#define SQLITE_CREATE_VTABLE        29   /* Table Name      Module Name     */
    +#define SQLITE_DROP_VTABLE          30   /* Table Name      Module Name     */
    +#define SQLITE_FUNCTION             31   /* NULL            Function Name   */
    +#define SQLITE_SAVEPOINT            32   /* Operation       Savepoint Name  */
    +#define SQLITE_COPY                  0   /* No longer used */
    +

    +The sqlite3_set_authorizer() interface registers a callback function +that is invoked to authorize certain SQL statement actions. The +second parameter to the callback is an integer code that specifies +what action is being authorized. These are the integer action codes that +the authorizer callback may be passed.

    + +

    These action code values signify what kind of operation is to be +authorized. The 3rd and 4th parameters to the authorization +callback function will be parameters or NULL depending on which of these +codes is used as the second parameter. The 5th parameter to the +authorizer callback is the name of the database ("main", "temp", +etc.) if applicable. The 6th parameter to the authorizer callback +is the name of the inner-most trigger or view that is responsible for +the access attempt or NULL if this access attempt is directly from +top-level SQL code.

    + +

    Requirements: +H12551 H12552 H12553 H12554 +


    +

    Text Encodings

    #define SQLITE_UTF8           1
    +#define SQLITE_UTF16LE        2
    +#define SQLITE_UTF16BE        3
    +#define SQLITE_UTF16          4    /* Use native byte order */
    +#define SQLITE_ANY            5    /* sqlite3_create_function only */
    +#define SQLITE_UTF16_ALIGNED  8    /* sqlite3_create_collation only */
    +

    +These constant define integer codes that represent the various +text encodings supported by SQLite. +


    +

    Fundamental Datatypes

    #define SQLITE_INTEGER  1
    +#define SQLITE_FLOAT    2
    +#define SQLITE_BLOB     4
    +#define SQLITE_NULL     5
    +#ifdef SQLITE_TEXT
    +# undef SQLITE_TEXT
    +#else
    +# define SQLITE_TEXT     3
    +#endif
    +#define SQLITE3_TEXT     3
    +

    + Every value in SQLite has one of five fundamental datatypes:

    + +

      +
    • 64-bit signed integer +
    • 64-bit IEEE floating point number +
    • string +
    • BLOB +
    • NULL +

    + +

    These constants are codes for each of those types.

    + +

    Note that the SQLITE_TEXT constant was also used in SQLite version 2 +for a completely different meaning. Software that links against both +SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not +SQLITE_TEXT. +


    +

    Configuration Options

    #define SQLITE_CONFIG_SINGLETHREAD  1  /* nil */
    +#define SQLITE_CONFIG_MULTITHREAD   2  /* nil */
    +#define SQLITE_CONFIG_SERIALIZED    3  /* nil */
    +#define SQLITE_CONFIG_MALLOC        4  /* sqlite3_mem_methods* */
    +#define SQLITE_CONFIG_GETMALLOC     5  /* sqlite3_mem_methods* */
    +#define SQLITE_CONFIG_SCRATCH       6  /* void*, int sz, int N */
    +#define SQLITE_CONFIG_PAGECACHE     7  /* void*, int sz, int N */
    +#define SQLITE_CONFIG_HEAP          8  /* void*, int nByte, int min */
    +#define SQLITE_CONFIG_MEMSTATUS     9  /* boolean */
    +#define SQLITE_CONFIG_MUTEX        10  /* sqlite3_mutex_methods* */
    +#define SQLITE_CONFIG_GETMUTEX     11  /* sqlite3_mutex_methods* */
    +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ 
    +#define SQLITE_CONFIG_LOOKASIDE    13  /* int int */
    +#define SQLITE_CONFIG_PCACHE       14  /* sqlite3_pcache_methods* */
    +#define SQLITE_CONFIG_GETPCACHE    15  /* sqlite3_pcache_methods* */
    +

    Important: This interface is experimental and is subject to change without notice.

    +These constants are the available integer configuration options that +can be passed as the first argument to the sqlite3_config() interface.

    + +

    New configuration options may be added in future releases of SQLite. +Existing configuration options might be discontinued. Applications +should check the return code from sqlite3_config() to make sure that +the call worked. The sqlite3_config() interface will return a +non-zero error code if a discontinued or unsupported configuration option +is invoked.

    + +

    +
    SQLITE_CONFIG_SINGLETHREAD
    +
    There are no arguments to this option. This option disables +all mutexing and puts SQLite into a mode where it can only be used +by a single thread.

    + +

    SQLITE_CONFIG_MULTITHREAD
    +
    There are no arguments to this option. This option disables +mutexing on database connection and prepared statement objects. +The application is responsible for serializing access to +database connections and prepared statements. But other mutexes +are enabled so that SQLite will be safe to use in a multi-threaded +environment as long as no two threads attempt to use the same +database connection at the same time. See the threading mode +documentation for additional information.

    + +

    SQLITE_CONFIG_SERIALIZED
    +
    There are no arguments to this option. This option enables +all mutexes including the recursive +mutexes on database connection and prepared statement objects. +In this mode (which is the default when SQLite is compiled with +SQLITE_THREADSAFE=1) the SQLite library will itself serialize access +to database connections and prepared statements so that the +application is free to use the same database connection or the +same prepared statement in different threads at the same time. +See the threading mode documentation for additional information.

    + +

    SQLITE_CONFIG_MALLOC
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mem_methods structure. The argument specifies +alternative low-level memory allocation routines to be used in place of +the memory allocation routines built into SQLite.

    + +

    SQLITE_CONFIG_GETMALLOC
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mem_methods structure. The sqlite3_mem_methods +structure is filled with the currently defined memory allocation routines. +This option can be used to overload the default memory allocation +routines with a wrapper that simulations memory allocation failure or +tracks memory usage, for example.

    + +

    SQLITE_CONFIG_MEMSTATUS
    +
    This option takes single argument of type int, interpreted as a +boolean, which enables or disables the collection of memory allocation +statistics. When disabled, the following SQLite interfaces become +non-operational: + +

    + +

    SQLITE_CONFIG_SCRATCH
    +
    This option specifies a static memory buffer that SQLite can use for +scratch memory. There are three arguments: A pointer an 8-byte +aligned memory buffer from which the scrach allocations will be +drawn, the size of each scratch allocation (sz), +and the maximum number of scratch allocations (N). The sz +argument must be a multiple of 16. The sz parameter should be a few bytes +larger than the actual scratch space required due to internal overhead. +The first argument should pointer to an 8-byte aligned buffer +of at least sz*N bytes of memory. +SQLite will use no more than one scratch buffer at once per thread, so +N should be set to the expected maximum number of threads. The sz +parameter should be 6 times the size of the largest database page size. +Scratch buffers are used as part of the btree balance operation. If +The btree balancer needs additional memory beyond what is provided by +scratch buffers or if no scratch buffer space is specified, then SQLite +goes to sqlite3_malloc() to obtain the memory it needs.

    + +

    SQLITE_CONFIG_PAGECACHE
    +
    This option specifies a static memory buffer that SQLite can use for +the database page cache with the default page cache implemenation. +This configuration should not be used if an application-define page +cache implementation is loaded using the SQLITE_CONFIG_PCACHE option. +There are three arguments to this option: A pointer to 8-byte aligned +memory, the size of each page buffer (sz), and the number of pages (N). +The sz argument should be the size of the largest database page +(a power of two between 512 and 32768) plus a little extra for each +page header. The page header size is 20 to 40 bytes depending on +the host architecture. It is harmless, apart from the wasted memory, +to make sz a little too large. The first +argument should point to an allocation of at least sz*N bytes of memory. +SQLite will use the memory provided by the first argument to satisfy its +memory needs for the first N pages that it adds to cache. If additional +page cache memory is needed beyond what is provided by this option, then +SQLite goes to sqlite3_malloc() for the additional storage space. +The implementation might use one or more of the N buffers to hold +memory accounting information. The pointer in the first argument must +be aligned to an 8-byte boundary or subsequent behavior of SQLite +will be undefined.

    + +

    SQLITE_CONFIG_HEAP
    +
    This option specifies a static memory buffer that SQLite will use +for all of its dynamic memory allocation needs beyond those provided +for by SQLITE_CONFIG_SCRATCH and SQLITE_CONFIG_PAGECACHE. +There are three arguments: An 8-byte aligned pointer to the memory, +the number of bytes in the memory buffer, and the minimum allocation size. +If the first pointer (the memory pointer) is NULL, then SQLite reverts +to using its default memory allocator (the system malloc() implementation), +undoing any prior invocation of SQLITE_CONFIG_MALLOC. If the +memory pointer is not NULL and either SQLITE_ENABLE_MEMSYS3 or +SQLITE_ENABLE_MEMSYS5 are defined, then the alternative memory +allocator is engaged to handle all of SQLites memory allocation needs. +The first pointer (the memory pointer) must be aligned to an 8-byte +boundary or subsequent behavior of SQLite will be undefined.

    + +

    SQLITE_CONFIG_MUTEX
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mutex_methods structure. The argument specifies +alternative low-level mutex routines to be used in place +the mutex routines built into SQLite.

    + +

    SQLITE_CONFIG_GETMUTEX
    +
    This option takes a single argument which is a pointer to an +instance of the sqlite3_mutex_methods structure. The +sqlite3_mutex_methods +structure is filled with the currently defined mutex routines. +This option can be used to overload the default mutex allocation +routines with a wrapper used to track mutex usage for performance +profiling or testing, for example.

    + +

    SQLITE_CONFIG_LOOKASIDE
    +
    This option takes two arguments that determine the default +memory allcation lookaside optimization. The first argument is the +size of each lookaside buffer slot and the second is the number of +slots allocated to each database connection.

    + +

    SQLITE_CONFIG_PCACHE
    +
    This option takes a single argument which is a pointer to +an sqlite3_pcache_methods object. This object specifies the interface +to a custom page cache implementation. SQLite makes a copy of the +object and uses it for page cache memory allocations.

    + +

    SQLITE_CONFIG_GETPCACHE
    +
    This option takes a single argument which is a pointer to an +sqlite3_pcache_methods object. SQLite copies of the current +page cache implementation into that object.

    + +

    +


    +

    Authorizer Return Codes

    #define SQLITE_DENY   1   /* Abort the SQL statement with an error */
    +#define SQLITE_IGNORE 2   /* Don't allow access, but don't generate an error */
    +

    +The authorizer callback function must +return either SQLITE_OK or one of these two constants in order +to signal SQLite whether or not the action is permitted. See the +authorizer documentation for additional +information. +


    +

    Standard File Control Opcodes

    #define SQLITE_FCNTL_LOCKSTATE        1
    +#define SQLITE_GET_LOCKPROXYFILE      2
    +#define SQLITE_SET_LOCKPROXYFILE      3
    +#define SQLITE_LAST_ERRNO             4
    +

    +These integer constants are opcodes for the xFileControl method +of the sqlite3_io_methods object and for the sqlite3_file_control() +interface.

    + +

    The SQLITE_FCNTL_LOCKSTATE opcode is used for debugging. This +opcode causes the xFileControl method to write the current state of +the lock (one of SQLITE_LOCK_NONE, SQLITE_LOCK_SHARED, +SQLITE_LOCK_RESERVED, SQLITE_LOCK_PENDING, or SQLITE_LOCK_EXCLUSIVE) +into an integer that the pArg argument points to. This capability +is used during testing and only needs to be supported when SQLITE_TEST +is defined. +


    +

    Virtual Table Indexing Information

    struct sqlite3_index_info {
    +  /* Inputs */
    +  int nConstraint;           /* Number of entries in aConstraint */
    +  struct sqlite3_index_constraint {
    +     int iColumn;              /* Column on left-hand side of constraint */
    +     unsigned char op;         /* Constraint operator */
    +     unsigned char usable;     /* True if this constraint is usable */
    +     int iTermOffset;          /* Used internally - xBestIndex should ignore */
    +  } *aConstraint;            /* Table of WHERE clause constraints */
    +  int nOrderBy;              /* Number of terms in the ORDER BY clause */
    +  struct sqlite3_index_orderby {
    +     int iColumn;              /* Column number */
    +     unsigned char desc;       /* True for DESC.  False for ASC. */
    +  } *aOrderBy;               /* The ORDER BY clause */
    +  /* Outputs */
    +  struct sqlite3_index_constraint_usage {
    +    int argvIndex;           /* if >0, constraint is part of argv to xFilter */
    +    unsigned char omit;      /* Do not code a test for this constraint */
    +  } *aConstraintUsage;
    +  int idxNum;                /* Number used to identify the index */
    +  char *idxStr;              /* String, possibly obtained from sqlite3_malloc */
    +  int needToFreeIdxStr;      /* Free idxStr using sqlite3_free() if true */
    +  int orderByConsumed;       /* True if output is already ordered */
    +  double estimatedCost;      /* Estimated cost of using this index */
    +};
    +#define SQLITE_INDEX_CONSTRAINT_EQ    2
    +#define SQLITE_INDEX_CONSTRAINT_GT    4
    +#define SQLITE_INDEX_CONSTRAINT_LE    8
    +#define SQLITE_INDEX_CONSTRAINT_LT    16
    +#define SQLITE_INDEX_CONSTRAINT_GE    32
    +#define SQLITE_INDEX_CONSTRAINT_MATCH 64
    +

    Important: This interface is experimental and is subject to change without notice.

    +The sqlite3_index_info structure and its substructures is used to +pass information into and receive the reply from the xBestIndex +method of a virtual table module. The fields under **Inputs** are the +inputs to xBestIndex and are read-only. xBestIndex inserts its +results into the **Outputs** fields.

    + +

    The aConstraint[] array records WHERE clause constraints of the form:

    + +

    column OP expr

    + +

    where OP is =, <, <=, >, or >=. The particular operator is +stored in aConstraint[].op. The index of the column is stored in +aConstraint[].iColumn. aConstraint[].usable is TRUE if the +expr on the right-hand side can be evaluated (and thus the constraint +is usable) and false if it cannot.

    + +

    The optimizer automatically inverts terms of the form "expr OP column" +and makes other simplifications to the WHERE clause in an attempt to +get as many WHERE clause terms into the form shown above as possible. +The aConstraint[] array only reports WHERE clause terms in the correct +form that refer to the particular virtual table being queried.

    + +

    Information about the ORDER BY clause is stored in aOrderBy[]. +Each term of aOrderBy records a column of the ORDER BY clause.

    + +

    The xBestIndex method must fill aConstraintUsage[] with information +about what parameters to pass to xFilter. If argvIndex>0 then +the right-hand side of the corresponding aConstraint[] is evaluated +and becomes the argvIndex-th entry in argv. If aConstraintUsage[].omit +is true, then the constraint is assumed to be fully handled by the +virtual table and is not checked again by SQLite.

    + +

    The idxNum and idxPtr values are recorded and passed into the +xFilter method. +sqlite3_free() is used to free idxPtr if and only iff +needToFreeIdxPtr is true.

    + +

    The orderByConsumed means that output from xFilter/xNext will occur in +the correct order to satisfy the ORDER BY clause so that no separate +sorting step is required.

    + +

    The estimatedCost value is an estimate of the cost of doing the +particular lookup. A full scan of a table with N entries should have +a cost of N. A binary search of a table of N entries should have a +cost of approximately log(N). +


    +

    Device Characteristics

    #define SQLITE_IOCAP_ATOMIC          0x00000001
    +#define SQLITE_IOCAP_ATOMIC512       0x00000002
    +#define SQLITE_IOCAP_ATOMIC1K        0x00000004
    +#define SQLITE_IOCAP_ATOMIC2K        0x00000008
    +#define SQLITE_IOCAP_ATOMIC4K        0x00000010
    +#define SQLITE_IOCAP_ATOMIC8K        0x00000020
    +#define SQLITE_IOCAP_ATOMIC16K       0x00000040
    +#define SQLITE_IOCAP_ATOMIC32K       0x00000080
    +#define SQLITE_IOCAP_ATOMIC64K       0x00000100
    +#define SQLITE_IOCAP_SAFE_APPEND     0x00000200
    +#define SQLITE_IOCAP_SEQUENTIAL      0x00000400
    +

    +The xDeviceCapabilities method of the sqlite3_io_methods +object returns an integer which is a vector of the these +bit values expressing I/O characteristics of the mass storage +device that holds the file that the sqlite3_io_methods +refers to.

    + +

    The SQLITE_IOCAP_ATOMIC property means that all writes of +any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +mean that writes of blocks that are nnn bytes in size and +are aligned to an address which is an integer multiple of +nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +that when data is appended to a file, the data is appended +first then the size of the file is extended, never the other +way around. The SQLITE_IOCAP_SEQUENTIAL property means that +information is written to disk in the same order as calls +to xWrite(). +


    +

    Extended Result Codes

    #define SQLITE_IOERR_READ              (SQLITE_IOERR | (1<<8))
    +#define SQLITE_IOERR_SHORT_READ        (SQLITE_IOERR | (2<<8))
    +#define SQLITE_IOERR_WRITE             (SQLITE_IOERR | (3<<8))
    +#define SQLITE_IOERR_FSYNC             (SQLITE_IOERR | (4<<8))
    +#define SQLITE_IOERR_DIR_FSYNC         (SQLITE_IOERR | (5<<8))
    +#define SQLITE_IOERR_TRUNCATE          (SQLITE_IOERR | (6<<8))
    +#define SQLITE_IOERR_FSTAT             (SQLITE_IOERR | (7<<8))
    +#define SQLITE_IOERR_UNLOCK            (SQLITE_IOERR | (8<<8))
    +#define SQLITE_IOERR_RDLOCK            (SQLITE_IOERR | (9<<8))
    +#define SQLITE_IOERR_DELETE            (SQLITE_IOERR | (10<<8))
    +#define SQLITE_IOERR_BLOCKED           (SQLITE_IOERR | (11<<8))
    +#define SQLITE_IOERR_NOMEM             (SQLITE_IOERR | (12<<8))
    +#define SQLITE_IOERR_ACCESS            (SQLITE_IOERR | (13<<8))
    +#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8))
    +#define SQLITE_IOERR_LOCK              (SQLITE_IOERR | (15<<8))
    +#define SQLITE_IOERR_CLOSE             (SQLITE_IOERR | (16<<8))
    +#define SQLITE_IOERR_DIR_CLOSE         (SQLITE_IOERR | (17<<8))
    +#define SQLITE_LOCKED_SHAREDCACHE      (SQLITE_LOCKED | (1<<8) )
    +

    +In its default configuration, SQLite API routines return one of 26 integer +result codes. However, experience has shown that many of +these result codes are too coarse-grained. They do not provide as +much information about problems as programmers might like. In an effort to +address this, newer versions of SQLite (version 3.3.8 and later) include +support for additional result codes that provide more detailed information +about errors. The extended result codes are enabled or disabled +on a per database connection basis using the +sqlite3_extended_result_codes() API.

    + +

    Some of the available extended result codes are listed here. +One may expect the number of extended result codes will be expand +over time. Software that uses extended result codes should expect +to see new result codes in future releases of SQLite.

    + +

    The SQLITE_OK result code will never be extended. It will always +be exactly zero. +


    +

    Run-Time Limit Categories

    #define SQLITE_LIMIT_LENGTH                    0
    +#define SQLITE_LIMIT_SQL_LENGTH                1
    +#define SQLITE_LIMIT_COLUMN                    2
    +#define SQLITE_LIMIT_EXPR_DEPTH                3
    +#define SQLITE_LIMIT_COMPOUND_SELECT           4
    +#define SQLITE_LIMIT_VDBE_OP                   5
    +#define SQLITE_LIMIT_FUNCTION_ARG              6
    +#define SQLITE_LIMIT_ATTACHED                  7
    +#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH       8
    +#define SQLITE_LIMIT_VARIABLE_NUMBER           9
    +

    +These constants define various performance limits +that can be lowered at run-time using sqlite3_limit(). +The synopsis of the meanings of the various limits is shown below. +Additional information is available at Limits in SQLite.

    + +

    +
    SQLITE_LIMIT_LENGTH
    +
    The maximum size of any string or BLOB or table row.

    + +

    SQLITE_LIMIT_SQL_LENGTH
    +
    The maximum length of an SQL statement.

    + +

    SQLITE_LIMIT_COLUMN
    +
    The maximum number of columns in a table definition or in the +result set of a SELECT or the maximum number of columns in an index +or in an ORDER BY or GROUP BY clause.

    + +

    SQLITE_LIMIT_EXPR_DEPTH
    +
    The maximum depth of the parse tree on any expression.

    + +

    SQLITE_LIMIT_COMPOUND_SELECT
    +
    The maximum number of terms in a compound SELECT statement.

    + +

    SQLITE_LIMIT_VDBE_OP
    +
    The maximum number of instructions in a virtual machine program +used to implement an SQL statement.

    + +

    SQLITE_LIMIT_FUNCTION_ARG
    +
    The maximum number of arguments on a function.

    + +

    SQLITE_LIMIT_ATTACHED
    +
    The maximum number of attached databases.

    + +

    SQLITE_LIMIT_LIKE_PATTERN_LENGTH
    +
    The maximum length of the pattern argument to the LIKE or +GLOB operators.

    + +

    SQLITE_LIMIT_VARIABLE_NUMBER
    +
    The maximum number of variables in an SQL statement that can +be bound.
    +
    +


    +

    File Locking Levels

    #define SQLITE_LOCK_NONE          0
    +#define SQLITE_LOCK_SHARED        1
    +#define SQLITE_LOCK_RESERVED      2
    +#define SQLITE_LOCK_PENDING       3
    +#define SQLITE_LOCK_EXCLUSIVE     4
    +

    +SQLite uses one of these integer values as the second +argument to calls it makes to the xLock() and xUnlock() methods +of an sqlite3_io_methods object. +


    +

    Mutex Types

    #define SQLITE_MUTEX_FAST             0
    +#define SQLITE_MUTEX_RECURSIVE        1
    +#define SQLITE_MUTEX_STATIC_MASTER    2
    +#define SQLITE_MUTEX_STATIC_MEM       3  /* sqlite3_malloc() */
    +#define SQLITE_MUTEX_STATIC_MEM2      4  /* NOT USED */
    +#define SQLITE_MUTEX_STATIC_OPEN      4  /* sqlite3BtreeOpen() */
    +#define SQLITE_MUTEX_STATIC_PRNG      5  /* sqlite3_random() */
    +#define SQLITE_MUTEX_STATIC_LRU       6  /* lru page list */
    +#define SQLITE_MUTEX_STATIC_LRU2      7  /* lru page list */
    +

    +The sqlite3_mutex_alloc() interface takes a single argument +which is one of these integer constants.

    + +

    The set of static mutexes may change from one SQLite release to the +next. Applications that override the built-in mutex logic must be +prepared to accommodate additional static mutexes. +


    +

    Flags For File Open Operations

    #define SQLITE_OPEN_READONLY         0x00000001  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_READWRITE        0x00000002  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_CREATE           0x00000004  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_DELETEONCLOSE    0x00000008  /* VFS only */
    +#define SQLITE_OPEN_EXCLUSIVE        0x00000010  /* VFS only */
    +#define SQLITE_OPEN_MAIN_DB          0x00000100  /* VFS only */
    +#define SQLITE_OPEN_TEMP_DB          0x00000200  /* VFS only */
    +#define SQLITE_OPEN_TRANSIENT_DB     0x00000400  /* VFS only */
    +#define SQLITE_OPEN_MAIN_JOURNAL     0x00000800  /* VFS only */
    +#define SQLITE_OPEN_TEMP_JOURNAL     0x00001000  /* VFS only */
    +#define SQLITE_OPEN_SUBJOURNAL       0x00002000  /* VFS only */
    +#define SQLITE_OPEN_MASTER_JOURNAL   0x00004000  /* VFS only */
    +#define SQLITE_OPEN_NOMUTEX          0x00008000  /* Ok for sqlite3_open_v2() */
    +#define SQLITE_OPEN_FULLMUTEX        0x00010000  /* Ok for sqlite3_open_v2() */
    +

    +These bit values are intended for use in the +3rd parameter to the sqlite3_open_v2() interface and +in the 4th parameter to the xOpen method of the +sqlite3_vfs object. +


    +

    Constants Defining Special Destructor Behavior

    typedef void (*sqlite3_destructor_type)(void*);
    +#define SQLITE_STATIC      ((sqlite3_destructor_type)0)
    +#define SQLITE_TRANSIENT   ((sqlite3_destructor_type)-1)
    +

    +These are special values for the destructor that is passed in as the +final argument to routines like sqlite3_result_blob(). If the destructor +argument is SQLITE_STATIC, it means that the content pointer is constant +and will never change. It does not need to be destroyed. The +SQLITE_TRANSIENT value means that the content will likely change in +the near future and that SQLite should make its own private copy of +the content before returning.

    + +

    The typedef is necessary to work around problems in certain +C++ compilers. See ticket #2191. +


    +

    Status Parameters

    #define SQLITE_STATUS_MEMORY_USED          0
    +#define SQLITE_STATUS_PAGECACHE_USED       1
    +#define SQLITE_STATUS_PAGECACHE_OVERFLOW   2
    +#define SQLITE_STATUS_SCRATCH_USED         3
    +#define SQLITE_STATUS_SCRATCH_OVERFLOW     4
    +#define SQLITE_STATUS_MALLOC_SIZE          5
    +#define SQLITE_STATUS_PARSER_STACK         6
    +#define SQLITE_STATUS_PAGECACHE_SIZE       7
    +#define SQLITE_STATUS_SCRATCH_SIZE         8
    +

    Important: This interface is experimental and is subject to change without notice.

    +These integer constants designate various run-time status parameters +that can be returned by sqlite3_status().

    + +

    +
    SQLITE_STATUS_MEMORY_USED
    +
    This parameter is the current amount of memory checked out +using sqlite3_malloc(), either directly or indirectly. The +figure includes calls made to sqlite3_malloc() by the application +and internal memory usage by the SQLite library. Scratch memory +controlled by SQLITE_CONFIG_SCRATCH and auxiliary page-cache +memory controlled by SQLITE_CONFIG_PAGECACHE is not included in +this parameter. The amount returned is the sum of the allocation +sizes as reported by the xSize method in sqlite3_mem_methods.

    + +

    SQLITE_STATUS_MALLOC_SIZE
    +
    This parameter records the largest memory allocation request +handed to sqlite3_malloc() or sqlite3_realloc() (or their +internal equivalents). Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_PAGECACHE_USED
    +
    This parameter returns the number of pages used out of the +pagecache memory allocator that was configured using +SQLITE_CONFIG_PAGECACHE. The +value returned is in pages, not in bytes.

    + +

    SQLITE_STATUS_PAGECACHE_OVERFLOW
    +
    This parameter returns the number of bytes of page cache +allocation which could not be statisfied by the SQLITE_CONFIG_PAGECACHE +buffer and where forced to overflow to sqlite3_malloc(). The +returned value includes allocations that overflowed because they +where too large (they were larger than the "sz" parameter to +SQLITE_CONFIG_PAGECACHE) and allocations that overflowed because +no space was left in the page cache.

    + +

    SQLITE_STATUS_PAGECACHE_SIZE
    +
    This parameter records the largest memory allocation request +handed to pagecache memory allocator. Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_SCRATCH_USED
    +
    This parameter returns the number of allocations used out of the +scratch memory allocator configured using +SQLITE_CONFIG_SCRATCH. The value returned is in allocations, not +in bytes. Since a single thread may only have one scratch allocation +outstanding at time, this parameter also reports the number of threads +using scratch memory at the same time.

    + +

    SQLITE_STATUS_SCRATCH_OVERFLOW
    +
    This parameter returns the number of bytes of scratch memory +allocation which could not be statisfied by the SQLITE_CONFIG_SCRATCH +buffer and where forced to overflow to sqlite3_malloc(). The values +returned include overflows because the requested allocation was too +larger (that is, because the requested allocation was larger than the +"sz" parameter to SQLITE_CONFIG_SCRATCH) and because no scratch buffer +slots were available. +

    + +

    SQLITE_STATUS_SCRATCH_SIZE
    +
    This parameter records the largest memory allocation request +handed to scratch memory allocator. Only the value returned in the +*pHighwater parameter to sqlite3_status() is of interest. +The value written into the *pCurrent parameter is undefined.

    + +

    SQLITE_STATUS_PARSER_STACK
    +
    This parameter records the deepest parser stack. It is only +meaningful if SQLite is compiled with YYTRACKMAXSTACKDEPTH.
    +

    + +

    New status parameters may be added from time to time. +


    +

    Status Parameters for prepared statements

    #define SQLITE_STMTSTATUS_FULLSCAN_STEP     1
    +#define SQLITE_STMTSTATUS_SORT              2
    +

    Important: This interface is experimental and is subject to change without notice.

    +These preprocessor macros define integer codes that name counter +values associated with the sqlite3_stmt_status() interface. +The meanings of the various counters are as follows:

    + +

    +
    SQLITE_STMTSTATUS_FULLSCAN_STEP
    +
    This is the number of times that SQLite has stepped forward in +a table as part of a full table scan. Large numbers for this counter +may indicate opportunities for performance improvement through +careful use of indices.

    + +

    SQLITE_STMTSTATUS_SORT
    +
    This is the number of sort operations that have occurred. +A non-zero value in this counter may indicate an opportunity to +improvement performance through careful use of indices.

    + +

    +


    +

    Synchronization Type Flags

    #define SQLITE_SYNC_NORMAL        0x00002
    +#define SQLITE_SYNC_FULL          0x00003
    +#define SQLITE_SYNC_DATAONLY      0x00010
    +

    +When SQLite invokes the xSync() method of an +sqlite3_io_methods object it uses a combination of +these integer values as the second argument.

    + +

    When the SQLITE_SYNC_DATAONLY flag is used, it means that the +sync operation only needs to flush data to mass storage. Inode +information need not be flushed. If the lower four bits of the flag +equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. +If the lower four bits equal SQLITE_SYNC_FULL, that means +to use Mac OS X style fullsync instead of fsync(). +


    +

    Testing Interface Operation Codes

    #define SQLITE_TESTCTRL_PRNG_SAVE                5
    +#define SQLITE_TESTCTRL_PRNG_RESTORE             6
    +#define SQLITE_TESTCTRL_PRNG_RESET               7
    +#define SQLITE_TESTCTRL_BITVEC_TEST              8
    +#define SQLITE_TESTCTRL_FAULT_INSTALL            9
    +#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS     10
    +#define SQLITE_TESTCTRL_PENDING_BYTE            11
    +#define SQLITE_TESTCTRL_ASSERT                  12
    +#define SQLITE_TESTCTRL_ALWAYS                  13
    +

    +These constants are the valid operation code parameters used +as the first argument to sqlite3_test_control().

    + +

    These parameters and their meanings are subject to change +without notice. These values are for testing purposes only. +Applications should not use any of these parameters or the +sqlite3_test_control() interface. +


    +

    Compile-Time Library Version Numbers

    #define SQLITE_VERSION         "3.6.16"
    +#define SQLITE_VERSION_NUMBER  3006016
    +

    +The SQLITE_VERSION and SQLITE_VERSION_NUMBER #defines in +the sqlite3.h file specify the version of SQLite with which +that header file is associated.

    + +

    The "version" of SQLite is a string of the form "X.Y.Z". +The phrase "alpha" or "beta" might be appended after the Z. +The X value is major version number always 3 in SQLite3. +The X value only changes when backwards compatibility is +broken and we intend to never break backwards compatibility. +The Y value is the minor version number and only changes when +there are major feature enhancements that are forwards compatible +but not backwards compatible. +The Z value is the release number and is incremented with +each release but resets back to 0 whenever Y is incremented.

    + +

    See also: sqlite3_libversion() and sqlite3_libversion_number().

    + +

    Requirements: H10011 H10014 +


    +

    64-Bit Integer Types

    #ifdef SQLITE_INT64_TYPE
    +  typedef SQLITE_INT64_TYPE sqlite_int64;
    +  typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
    +#elif defined(_MSC_VER) || defined(__BORLANDC__)
    +  typedef __int64 sqlite_int64;
    +  typedef unsigned __int64 sqlite_uint64;
    +#else
    +  typedef long long int sqlite_int64;
    +  typedef unsigned long long int sqlite_uint64;
    +#endif
    +typedef sqlite_int64 sqlite3_int64;
    +typedef sqlite_uint64 sqlite3_uint64;
    +

    +Because there is no cross-platform way to specify 64-bit integer types +SQLite includes typedefs for 64-bit signed and unsigned integers.

    + +

    The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. +The sqlite_int64 and sqlite_uint64 types are supported for backwards +compatibility only.

    + +

    Requirements: H10201 H10202 +


    +

    Virtual Table Object

    struct sqlite3_module {
    +  int iVersion;
    +  int (*xCreate)(sqlite3*, void *pAux,
    +               int argc, const char *const*argv,
    +               sqlite3_vtab **ppVTab, char**);
    +  int (*xConnect)(sqlite3*, void *pAux,
    +               int argc, const char *const*argv,
    +               sqlite3_vtab **ppVTab, char**);
    +  int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
    +  int (*xDisconnect)(sqlite3_vtab *pVTab);
    +  int (*xDestroy)(sqlite3_vtab *pVTab);
    +  int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
    +  int (*xClose)(sqlite3_vtab_cursor*);
    +  int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
    +                int argc, sqlite3_value **argv);
    +  int (*xNext)(sqlite3_vtab_cursor*);
    +  int (*xEof)(sqlite3_vtab_cursor*);
    +  int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
    +  int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
    +  int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
    +  int (*xBegin)(sqlite3_vtab *pVTab);
    +  int (*xSync)(sqlite3_vtab *pVTab);
    +  int (*xCommit)(sqlite3_vtab *pVTab);
    +  int (*xRollback)(sqlite3_vtab *pVTab);
    +  int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
    +                       void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
    +                       void **ppArg);
    +  int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +This structure, sometimes called a a "virtual table module", +defines the implementation of a virtual tables. +This structure consists mostly of methods for the module.

    + +

    A virtual table module is created by filling in a persistent +instance of this structure and passing a pointer to that instance +to sqlite3_create_module() or sqlite3_create_module_v2(). +The registration remains valid until it is replaced by a different +module or until the database connection closes. The content +of this structure must not change while it is registered with +any database connection. +


    +

    Virtual Table Cursor Object

    struct sqlite3_vtab_cursor {
    +  sqlite3_vtab *pVtab;      /* Virtual table of this cursor */
    +  /* Virtual table implementations will typically add additional fields */
    +};
    +

    Important: This interface is experimental and is subject to change without notice.

    +Every virtual table module implementation uses a subclass of the +following structure to describe cursors that point into the +virtual table and are used +to loop through the virtual table. Cursors are created using the +xOpen method of the module and are destroyed +by the xClose method. Cussors are used +by the xFilter, xNext, xEof, xColumn, and xRowid methods +of the module. Each module implementation will define +the content of a cursor structure to suit its own needs.

    + +

    This superclass exists in order to define fields of the cursor that +are common to all implementations. +


    +

    A Handle To An Open BLOB

    typedef struct sqlite3_blob sqlite3_blob;
    +

    +An instance of this object represents an open BLOB on which +incremental BLOB I/O can be performed. +Objects of this type are created by sqlite3_blob_open() +and destroyed by sqlite3_blob_close(). +The sqlite3_blob_read() and sqlite3_blob_write() interfaces +can be used to read or write small subsections of the BLOB. +The sqlite3_blob_bytes() interface returns the size of the BLOB in bytes. +


    +

    Database Connection Handle

    typedef struct sqlite3 sqlite3;
    +

    +Each open SQLite database is represented by a pointer to an instance of +the opaque structure named "sqlite3". It is useful to think of an sqlite3 +pointer as an object. The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces are its constructors, and sqlite3_close() +is its destructor. There are many other interfaces (such as +sqlite3_prepare_v2(), sqlite3_create_function(), and +sqlite3_busy_timeout() to name but three) that are methods on an +sqlite3 object. +


    +

    SQL Statement Object

    typedef struct sqlite3_stmt sqlite3_stmt;
    +

    +An instance of this object represents a single SQL statement. +This object is variously known as a "prepared statement" or a +"compiled SQL statement" or simply as a "statement".

    + +

    The life of a statement object goes something like this:

    + +

      +
    1. Create the object using sqlite3_prepare_v2() or a related +function. +
    2. Bind values to host parameters using the sqlite3_bind_*() +interfaces. +
    3. Run the SQL by calling sqlite3_step() one or more times. +
    4. Reset the statement using sqlite3_reset() then go back +to step 2. Do this zero or more times. +
    5. Destroy the object using sqlite3_finalize(). +

    + +

    Refer to documentation on individual methods above for additional +information. +


    +

    Dynamically Typed Value Object

    typedef struct Mem sqlite3_value;
    +

    +SQLite uses the sqlite3_value object to represent all values +that can be stored in a database table. SQLite uses dynamic typing +for the values it stores. Values stored in sqlite3_value objects +can be integers, floating point values, strings, BLOBs, or NULL.

    + +

    An sqlite3_value object may be either "protected" or "unprotected". +Some interfaces require a protected sqlite3_value. Other interfaces +will accept either a protected or an unprotected sqlite3_value. +Every interface that accepts sqlite3_value arguments specifies +whether or not it requires a protected sqlite3_value.

    + +

    The terms "protected" and "unprotected" refer to whether or not +a mutex is held. A internal mutex is held for a protected +sqlite3_value object but no mutex is held for an unprotected +sqlite3_value object. If SQLite is compiled to be single-threaded +(with SQLITE_THREADSAFE=0 and with sqlite3_threadsafe() returning 0) +or if SQLite is run in one of reduced mutex modes +SQLITE_CONFIG_SINGLETHREAD or SQLITE_CONFIG_MULTITHREAD +then there is no distinction between protected and unprotected +sqlite3_value objects and they can be used interchangeably. However, +for maximum code portability it is recommended that applications +still make the distinction between between protected and unprotected +sqlite3_value objects even when not strictly required.

    + +

    The sqlite3_value objects that are passed as parameters into the +implementation of application-defined SQL functions are protected. +The sqlite3_value object returned by +sqlite3_column_value() is unprotected. +Unprotected sqlite3_value objects may only be used with +sqlite3_result_value() and sqlite3_bind_value(). +The sqlite3_value_type() family of +interfaces require protected sqlite3_value objects. +


    +

    Deprecated Functions

    #ifndef SQLITE_OMIT_DEPRECATED
    +int sqlite3_aggregate_count(sqlite3_context*);
    +int sqlite3_expired(sqlite3_stmt*);
    +int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
    +int sqlite3_global_recover(void);
    +void sqlite3_thread_cleanup(void);
    +int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),void*,sqlite3_int64);
    +#endif
    +

    +These functions are deprecated. In order to maintain +backwards compatibility with older code, these functions continue +to be supported. However, new applications should avoid +the use of these functions. To help encourage people to avoid +using these functions, we are not going to tell you what they do. +


    +

    Online Backup API.

    sqlite3_backup *sqlite3_backup_init(
    +  sqlite3 *pDest,                        /* Destination database handle */
    +  const char *zDestName,                 /* Destination database name */
    +  sqlite3 *pSource,                      /* Source database handle */
    +  const char *zSourceName                /* Source database name */
    +);
    +int sqlite3_backup_step(sqlite3_backup *p, int nPage);
    +int sqlite3_backup_finish(sqlite3_backup *p);
    +int sqlite3_backup_remaining(sqlite3_backup *p);
    +int sqlite3_backup_pagecount(sqlite3_backup *p);
    +

    Important: This interface is experimental and is subject to change without notice.

    +This API is used to overwrite the contents of one database with that +of another. It is useful either for creating backups of databases or +for copying in-memory databases to or from persistent files.

    + +

    See Also: Using the SQLite Online Backup API

    + +

    Exclusive access is required to the destination database for the +duration of the operation. However the source database is only +read-locked while it is actually being read, it is not locked +continuously for the entire operation. Thus, the backup may be +performed on a live database without preventing other users from +writing to the database for an extended period of time.

    + +

    To perform a backup operation: +

      +
    1. sqlite3_backup_init() is called once to initialize the +backup, +
    2. sqlite3_backup_step() is called one or more times to transfer +the data between the two databases, and finally +
    3. sqlite3_backup_finish() is called to release all resources +associated with the backup operation. +
    +There should be exactly one call to sqlite3_backup_finish() for each +successful call to sqlite3_backup_init().

    + +

    sqlite3_backup_init()

    + +

    The first two arguments passed to sqlite3_backup_init() are the database +handle associated with the destination database and the database name +used to attach the destination database to the handle. The database name +is "main" for the main database, "temp" for the temporary database, or +the name specified as part of the ATTACH statement if the destination is +an attached database. The third and fourth arguments passed to +sqlite3_backup_init() identify the database connection +and database name used +to access the source database. The values passed for the source and +destination database connection parameters must not be the same.

    + +

    If an error occurs within sqlite3_backup_init(), then NULL is returned +and an error code and error message written into the database connection +passed as the first argument. They may be retrieved using the +sqlite3_errcode(), sqlite3_errmsg(), and sqlite3_errmsg16() functions. +Otherwise, if successful, a pointer to an sqlite3_backup object is +returned. This pointer may be used with the sqlite3_backup_step() and +sqlite3_backup_finish() functions to perform the specified backup +operation.

    + +

    sqlite3_backup_step()

    + +

    Function sqlite3_backup_step() is used to copy up to nPage pages between +the source and destination databases, where nPage is the value of the +second parameter passed to sqlite3_backup_step(). If nPage is a negative +value, all remaining source pages are copied. If the required pages are +succesfully copied, but there are still more pages to copy before the +backup is complete, it returns SQLITE_OK. If no error occured and there +are no more pages to copy, then SQLITE_DONE is returned. If an error +occurs, then an SQLite error code is returned. As well as SQLITE_OK and +SQLITE_DONE, a call to sqlite3_backup_step() may return SQLITE_READONLY, +SQLITE_NOMEM, SQLITE_BUSY, SQLITE_LOCKED, or an +SQLITE_IOERR_XXX extended error code.

    + +

    As well as the case where the destination database file was opened for +read-only access, sqlite3_backup_step() may return SQLITE_READONLY if +the destination is an in-memory database with a different page size +from the source database.

    + +

    If sqlite3_backup_step() cannot obtain a required file-system lock, then +the busy-handler function +is invoked (if one is specified). If the +busy-handler returns non-zero before the lock is available, then +SQLITE_BUSY is returned to the caller. In this case the call to +sqlite3_backup_step() can be retried later. If the source +database connection +is being used to write to the source database when sqlite3_backup_step() +is called, then SQLITE_LOCKED is returned immediately. Again, in this +case the call to sqlite3_backup_step() can be retried later on. If +SQLITE_IOERR_XXX, SQLITE_NOMEM, or +SQLITE_READONLY is returned, then +there is no point in retrying the call to sqlite3_backup_step(). These +errors are considered fatal. At this point the application must accept +that the backup operation has failed and pass the backup operation handle +to the sqlite3_backup_finish() to release associated resources.

    + +

    Following the first call to sqlite3_backup_step(), an exclusive lock is +obtained on the destination file. It is not released until either +sqlite3_backup_finish() is called or the backup operation is complete +and sqlite3_backup_step() returns SQLITE_DONE. Additionally, each time +a call to sqlite3_backup_step() is made a shared lock is obtained on +the source database file. This lock is released before the +sqlite3_backup_step() call returns. Because the source database is not +locked between calls to sqlite3_backup_step(), it may be modified mid-way +through the backup procedure. If the source database is modified by an +external process or via a database connection other than the one being +used by the backup operation, then the backup will be transparently +restarted by the next call to sqlite3_backup_step(). If the source +database is modified by the using the same database connection as is used +by the backup operation, then the backup database is transparently +updated at the same time.

    + +

    sqlite3_backup_finish()

    + +

    Once sqlite3_backup_step() has returned SQLITE_DONE, or when the +application wishes to abandon the backup operation, the sqlite3_backup +object should be passed to sqlite3_backup_finish(). This releases all +resources associated with the backup operation. If sqlite3_backup_step() +has not yet returned SQLITE_DONE, then any active write-transaction on the +destination database is rolled back. The sqlite3_backup object is invalid +and may not be used following a call to sqlite3_backup_finish().

    + +

    The value returned by sqlite3_backup_finish is SQLITE_OK if no error +occurred, regardless or whether or not sqlite3_backup_step() was called +a sufficient number of times to complete the backup operation. Or, if +an out-of-memory condition or IO error occured during a call to +sqlite3_backup_step() then SQLITE_NOMEM or an +SQLITE_IOERR_XXX error code +is returned. In this case the error code and an error message are +written to the destination database connection.

    + +

    A return of SQLITE_BUSY or SQLITE_LOCKED from sqlite3_backup_step() is +not a permanent error and does not affect the return value of +sqlite3_backup_finish().

    + +

    sqlite3_backup_remaining(), sqlite3_backup_pagecount()

    + +

    Each call to sqlite3_backup_step() sets two values stored internally +by an sqlite3_backup object. The number of pages still to be backed +up, which may be queried by sqlite3_backup_remaining(), and the total +number of pages in the source database file, which may be queried by +sqlite3_backup_pagecount().

    + +

    The values returned by these functions are only updated by +sqlite3_backup_step(). If the source database is modified during a backup +operation, then the values are not updated to account for any extra +pages that need to be updated or the size of the source database file +changing.

    + +

    Concurrent Usage of Database Handles

    + +

    The source database connection may be used by the application for other +purposes while a backup operation is underway or being initialized. +If SQLite is compiled and configured to support threadsafe database +connections, then the source database connection may be used concurrently +from within other threads.

    + +

    However, the application must guarantee that the destination database +connection handle is not passed to any other API (by any thread) after +sqlite3_backup_init() is called and before the corresponding call to +sqlite3_backup_finish(). Unfortunately SQLite does not currently check +for this, if the application does use the destination database connection +for some other purpose during a backup operation, things may appear to +work correctly but in fact be subtly malfunctioning. Use of the +destination database connection while a backup is in progress might +also cause a mutex deadlock.

    + +

    Furthermore, if running in shared cache mode, the application must +guarantee that the shared cache used by the destination database +is not accessed while the backup is running. In practice this means +that the application must guarantee that the file-system file being +backed up to is not accessed by any connection within the process, +not just the specific connection that was passed to sqlite3_backup_init().

    + +

    The sqlite3_backup object itself is partially threadsafe. Multiple +threads may safely make multiple concurrent calls to sqlite3_backup_step(). +However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() +APIs are not strictly speaking threadsafe. If they are invoked at the +same time as another thread is invoking sqlite3_backup_step() it is +possible that they return invalid values. +


    +

    Collation Needed Callbacks

    int sqlite3_collation_needed(
    +  sqlite3*, 
    +  void*, 
    +  void(*)(void*,sqlite3*,int eTextRep,const char*)
    +);
    +int sqlite3_collation_needed16(
    +  sqlite3*, 
    +  void*,
    +  void(*)(void*,sqlite3*,int eTextRep,const void*)
    +);
    +

    +To avoid having to register all collation sequences before a database +can be used, a single callback function may be registered with the +database connection to be called whenever an undefined collation +sequence is required.

    + +

    If the function is registered using the sqlite3_collation_needed() API, +then it is passed the names of undefined collation sequences as strings +encoded in UTF-8. If sqlite3_collation_needed16() is used, +the names are passed as UTF-16 in machine native byte order. +A call to either function replaces any existing callback.

    + +

    When the callback is invoked, the first argument passed is a copy +of the second argument to sqlite3_collation_needed() or +sqlite3_collation_needed16(). The second argument is the database +connection. The third argument is one of SQLITE_UTF8, SQLITE_UTF16BE, +or SQLITE_UTF16LE, indicating the most desirable form of the collation +sequence function required. The fourth parameter is the name of the +required collation sequence.

    + +

    The callback function should register the desired collation using +sqlite3_create_collation(), sqlite3_create_collation16(), or +sqlite3_create_collation_v2().

    + +

    Requirements: +H16702 H16704 H16706 +


    +

    Source Of Data In A Query Result

    const char *sqlite3_column_database_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
    +const char *sqlite3_column_table_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
    +const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
    +const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
    +

    +These routines provide a means to determine what column of what +table in which database a result of a SELECT statement comes from. +The name of the database or table or column can be returned as +either a UTF-8 or UTF-16 string. The _database_ routines return +the database name, the _table_ routines return the table name, and +the origin_ routines return the column name. +The returned string is valid until the prepared statement is destroyed +using sqlite3_finalize() or until the same information is requested +again in a different encoding.

    + +

    The names returned are the original un-aliased names of the +database, table, and column.

    + +

    The first argument to the following calls is a prepared statement. +These functions return information about the Nth column returned by +the statement, where N is the second function argument.

    + +

    If the Nth column returned by the statement is an expression or +subquery and is not a column value, then all of these functions return +NULL. These routine might also return NULL if a memory allocation error +occurs. Otherwise, they return the name of the attached database, table +and column that query result column was extracted from.

    + +

    As with all other SQLite APIs, those postfixed with "16" return +UTF-16 encoded strings, the other functions return UTF-8.

    + +

    These APIs are only available if the library was compiled with the +SQLITE_ENABLE_COLUMN_METADATA C-preprocessor symbol defined.

    + +

    If two or more threads call one or more of these routines against the same +prepared statement and column at the same time then the results are +undefined.

    + +

    Requirements: +H13741 H13742 H13743 H13744 H13745 H13746 H13748

    + +

    If two or more threads call one or more +column metadata interfaces +for the same prepared statement and result column +at the same time then the results are undefined. +


    +

    Declared Datatype Of A Query Result

    const char *sqlite3_column_decltype(sqlite3_stmt*,int);
    +const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
    +

    +The first parameter is a prepared statement. +If this statement is a SELECT statement and the Nth column of the +returned result set of that SELECT is a table column (not an +expression or subquery) then the declared type of the table +column is returned. If the Nth column of the result set is an +expression or subquery, then a NULL pointer is returned. +The returned string is always UTF-8 encoded.

    + +

    For example, given the database schema:

    + +

    CREATE TABLE t1(c1 VARIANT);

    + +

    and the following statement to be compiled:

    + +

    SELECT c1 + 1, c1 FROM t1;

    + +

    this routine would return the string "VARIANT" for the second result +column (i==1), and a NULL pointer for the first result column (i==0).

    + +

    SQLite uses dynamic run-time typing. So just because a column +is declared to contain a particular type does not mean that the +data stored in that column is of the declared type. SQLite is +strongly typed, but the typing is dynamic not static. Type +is associated with individual values, not with the containers +used to hold those values.

    + +

    Requirements: +H13761 H13762 H13763 +


    +

    Column Names In A Result Set

    const char *sqlite3_column_name(sqlite3_stmt*, int N);
    +const void *sqlite3_column_name16(sqlite3_stmt*, int N);
    +

    +These routines return the name assigned to a particular column +in the result set of a SELECT statement. The sqlite3_column_name() +interface returns a pointer to a zero-terminated UTF-8 string +and sqlite3_column_name16() returns a pointer to a zero-terminated +UTF-16 string. The first parameter is the prepared statement +that implements the SELECT statement. The second parameter is the +column number. The leftmost column is number 0.

    + +

    The returned string pointer is valid until either the prepared statement +is destroyed by sqlite3_finalize() or until the next call to +sqlite3_column_name() or sqlite3_column_name16() on the same column.

    + +

    If sqlite3_malloc() fails during the processing of either routine +(for example during a conversion from UTF-8 to UTF-16) then a +NULL pointer is returned.

    + +

    The name of a result column is the value of the "AS" clause for +that column, if there is an AS clause. If there is no AS clause +then the name of the column is unspecified and may change from +one release of SQLite to the next.

    + +

    Requirements: +H13721 H13723 H13724 H13725 H13726 H13727 +


    +

    Commit And Rollback Notification Callbacks

    void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
    +void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
    +

    +The sqlite3_commit_hook() interface registers a callback +function to be invoked whenever a transaction is committed. +Any callback set by a previous call to sqlite3_commit_hook() +for the same database connection is overridden. +The sqlite3_rollback_hook() interface registers a callback +function to be invoked whenever a transaction is rolled back. +Any callback set by a previous call to sqlite3_commit_hook() +for the same database connection is overridden. +The pArg argument is passed through to the callback. +If the callback on a commit hook function returns non-zero, +then the commit is converted into a rollback.

    + +

    If another function was previously registered, its +pArg value is returned. Otherwise NULL is returned.

    + +

    The callback implementation must not do anything that will modify +the database connection that invoked the callback. Any actions +to modify the database connection must be deferred until after the +completion of the sqlite3_step() call that triggered the commit +or rollback hook in the first place. +Note that sqlite3_prepare_v2() and sqlite3_step() both modify their +database connections for the meaning of "modify" in this paragraph.

    + +

    Registering a NULL function disables the callback.

    + +

    When the commit hook callback routine returns zero, the COMMIT +operation is allowed to continue normally. If the commit hook +returns non-zero, then the COMMIT is converted into a ROLLBACK. +The rollback hook is invoked on a rollback that results from a commit +hook returning non-zero, just as it would be with any other rollback.

    + +

    For the purposes of this API, a transaction is said to have been +rolled back if an explicit "ROLLBACK" statement is executed, or +an error or constraint causes an implicit rollback to occur. +The rollback callback is not invoked if a transaction is +automatically rolled back because the database connection is closed. +The rollback callback is not invoked if a transaction is +rolled back because a commit callback returned non-zero. +(TODO: Check on this )

    + +

    See also the sqlite3_update_hook() interface.

    + +

    Requirements: +H12951 H12952 H12953 H12954 H12955 +H12961 H12962 H12963 H12964 +


    +

    Determine If An SQL Statement Is Complete

    int sqlite3_complete(const char *sql);
    +int sqlite3_complete16(const void *sql);
    +

    +These routines are useful during command-line input to determine if the +currently entered text seems to form a complete SQL statement or +if additional input is needed before sending the text into +SQLite for parsing. These routines return 1 if the input string +appears to be a complete SQL statement. A statement is judged to be +complete if it ends with a semicolon token and is not a prefix of a +well-formed CREATE TRIGGER statement. Semicolons that are embedded within +string literals or quoted identifier names or comments are not +independent tokens (they are part of the token in which they are +embedded) and thus do not count as a statement terminator. Whitespace +and comments that follow the final semicolon are ignored.

    + +

    These routines return 0 if the statement is incomplete. If a +memory allocation fails, then SQLITE_NOMEM is returned.

    + +

    These routines do not parse the SQL statements thus +will not detect syntactically incorrect SQL.

    + +

    If SQLite has not been initialized using sqlite3_initialize() prior +to invoking sqlite3_complete16() then sqlite3_initialize() is invoked +automatically by sqlite3_complete16(). If that initialization fails, +then the return value from sqlite3_complete16() will be non-zero +regardless of whether or not the input SQL is complete.

    + +

    Requirements: H10511 H10512

    + +

    The input to sqlite3_complete() must be a zero-terminated +UTF-8 string.

    + +

    The input to sqlite3_complete16() must be a zero-terminated +UTF-16 string in native byte order. +


    +

    Define New Collating Sequences

    int sqlite3_create_collation(
    +  sqlite3*, 
    +  const char *zName, 
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*)
    +);
    +int sqlite3_create_collation_v2(
    +  sqlite3*, 
    +  const char *zName, 
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*),
    +  void(*xDestroy)(void*)
    +);
    +int sqlite3_create_collation16(
    +  sqlite3*, 
    +  const void *zName,
    +  int eTextRep, 
    +  void*,
    +  int(*xCompare)(void*,int,const void*,int,const void*)
    +);
    +

    +These functions are used to add new collation sequences to the +database connection specified as the first argument.

    + +

    The name of the new collation sequence is specified as a UTF-8 string +for sqlite3_create_collation() and sqlite3_create_collation_v2() +and a UTF-16 string for sqlite3_create_collation16(). In all cases +the name is passed as the second function argument.

    + +

    The third argument may be one of the constants SQLITE_UTF8, +SQLITE_UTF16LE, or SQLITE_UTF16BE, indicating that the user-supplied +routine expects to be passed pointers to strings encoded using UTF-8, +UTF-16 little-endian, or UTF-16 big-endian, respectively. The +third argument might also be SQLITE_UTF16 to indicate that the routine +expects pointers to be UTF-16 strings in the native byte order, or the +argument can be SQLITE_UTF16_ALIGNED if the +the routine expects pointers to 16-bit word aligned strings +of UTF-16 in the native byte order.

    + +

    A pointer to the user supplied routine must be passed as the fifth +argument. If it is NULL, this is the same as deleting the collation +sequence (so that SQLite cannot call it anymore). +Each time the application supplied function is invoked, it is passed +as its first parameter a copy of the void* passed as the fourth argument +to sqlite3_create_collation() or sqlite3_create_collation16().

    + +

    The remaining arguments to the application-supplied routine are two strings, +each represented by a (length, data) pair and encoded in the encoding +that was passed as the third argument when the collation sequence was +registered. The application defined collation routine should +return negative, zero or positive if the first string is less than, +equal to, or greater than the second string. i.e. (STRING1 - STRING2).

    + +

    The sqlite3_create_collation_v2() works like sqlite3_create_collation() +except that it takes an extra argument which is a destructor for +the collation. The destructor is called when the collation is +destroyed and is passed a copy of the fourth parameter void* pointer +of the sqlite3_create_collation_v2(). +Collations are destroyed when they are overridden by later calls to the +collation creation functions or when the database connection is closed +using sqlite3_close().

    + +

    See also: sqlite3_collation_needed() and sqlite3_collation_needed16().

    + +

    Requirements: +H16603 H16604 H16606 H16609 H16612 H16615 H16618 H16621 +H16624 H16627 H16630 +


    +

    Error Codes And Messages

    int sqlite3_errcode(sqlite3 *db);
    +int sqlite3_extended_errcode(sqlite3 *db);
    +const char *sqlite3_errmsg(sqlite3*);
    +const void *sqlite3_errmsg16(sqlite3*);
    +

    +The sqlite3_errcode() interface returns the numeric result code or +extended result code for the most recent failed sqlite3_* API call +associated with a database connection. If a prior API call failed +but the most recent API call succeeded, the return value from +sqlite3_errcode() is undefined. The sqlite3_extended_errcode() +interface is the same except that it always returns the +extended result code even when extended result codes are +disabled.

    + +

    The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +text that describes the error, as either UTF-8 or UTF-16 respectively. +Memory to hold the error message string is managed internally. +The application does not need to worry about freeing the result. +However, the error string might be overwritten or deallocated by +subsequent calls to other SQLite interface functions.

    + +

    When the serialized threading mode is in use, it might be the +case that a second error occurs on a separate thread in between +the time of the first error and the call to these interfaces. +When that happens, the second error will be reported since these +interfaces always report the most recent result. To avoid +this, each thread can obtain exclusive use of the database connection D +by invoking sqlite3_mutex_enter(sqlite3_db_mutex(D)) before beginning +to use D and invoking sqlite3_mutex_leave(sqlite3_db_mutex(D)) after +all calls to the interfaces listed here are completed.

    + +

    If an interface fails with SQLITE_MISUSE, that means the interface +was invoked incorrectly by the application. In that case, the +error code and message may or may not be set.

    + +

    Requirements: +H12801 H12802 H12803 H12807 H12808 H12809 +


    +

    Memory Allocation Subsystem

    void *sqlite3_malloc(int);
    +void *sqlite3_realloc(void*, int);
    +void sqlite3_free(void*);
    +

    +The SQLite core uses these three routines for all of its own +internal memory allocation needs. "Core" in the previous sentence +does not include operating-system specific VFS implementation. The +Windows VFS uses native malloc() and free() for some operations.

    + +

    The sqlite3_malloc() routine returns a pointer to a block +of memory at least N bytes in length, where N is the parameter. +If sqlite3_malloc() is unable to obtain sufficient free +memory, it returns a NULL pointer. If the parameter N to +sqlite3_malloc() is zero or negative then sqlite3_malloc() returns +a NULL pointer.

    + +

    Calling sqlite3_free() with a pointer previously returned +by sqlite3_malloc() or sqlite3_realloc() releases that memory so +that it might be reused. The sqlite3_free() routine is +a no-op if is called with a NULL pointer. Passing a NULL pointer +to sqlite3_free() is harmless. After being freed, memory +should neither be read nor written. Even reading previously freed +memory might result in a segmentation fault or other severe error. +Memory corruption, a segmentation fault, or other severe error +might result if sqlite3_free() is called with a non-NULL pointer that +was not obtained from sqlite3_malloc() or sqlite3_realloc().

    + +

    The sqlite3_realloc() interface attempts to resize a +prior memory allocation to be at least N bytes, where N is the +second parameter. The memory allocation to be resized is the first +parameter. If the first parameter to sqlite3_realloc() +is a NULL pointer then its behavior is identical to calling +sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc(). +If the second parameter to sqlite3_realloc() is zero or +negative then the behavior is exactly the same as calling +sqlite3_free(P) where P is the first parameter to sqlite3_realloc(). +sqlite3_realloc() returns a pointer to a memory allocation +of at least N bytes in size or NULL if sufficient memory is unavailable. +If M is the size of the prior allocation, then min(N,M) bytes +of the prior allocation are copied into the beginning of buffer returned +by sqlite3_realloc() and the prior allocation is freed. +If sqlite3_realloc() returns NULL, then the prior allocation +is not freed.

    + +

    The memory returned by sqlite3_malloc() and sqlite3_realloc() +is always aligned to at least an 8 byte boundary.

    + +

    The default implementation of the memory allocation subsystem uses +the malloc(), realloc() and free() provided by the standard C library. + However, if SQLite is compiled with the +SQLITE_MEMORY_SIZE=NNN C preprocessor macro (where NNN +is an integer), then SQLite create a static array of at least +NNN bytes in size and uses that array for all of its dynamic +memory allocation needs. Additional memory allocator options +may be added in future releases.

    + +

    In SQLite version 3.5.0 and 3.5.1, it was possible to define +the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in +implementation of these routines to be omitted. That capability +is no longer provided. Only built-in memory allocators can be used.

    + +

    The Windows OS interface layer calls +the system malloc() and free() directly when converting +filenames between the UTF-8 encoding used by SQLite +and whatever filename encoding is used by the particular Windows +installation. Memory allocation errors are detected, but +they are reported back as SQLITE_CANTOPEN or +SQLITE_IOERR rather than SQLITE_NOMEM.

    + +

    Requirements: +H17303 H17304 H17305 H17306 H17310 H17312 H17315 H17318 +H17321 H17322 H17323

    + +

    The pointer arguments to sqlite3_free() and sqlite3_realloc() +must be either NULL or else pointers obtained from a prior +invocation of sqlite3_malloc() or sqlite3_realloc() that have +not yet been released.

    + +

    The application must not read or write any part of +a block of memory after it has been released using +sqlite3_free() or sqlite3_realloc(). +


    +

    Convenience Routines For Running Queries

    int sqlite3_get_table(
    +  sqlite3 *db,          /* An open database */
    +  const char *zSql,     /* SQL to be evaluated */
    +  char ***pazResult,    /* Results of the query */
    +  int *pnRow,           /* Number of result rows written here */
    +  int *pnColumn,        /* Number of result columns written here */
    +  char **pzErrmsg       /* Error msg written here */
    +);
    +void sqlite3_free_table(char **result);
    +

    +Definition: A result table is memory data structure created by the +sqlite3_get_table() interface. A result table records the +complete query results from one or more queries.

    + +

    The table conceptually has a number of rows and columns. But +these numbers are not part of the result table itself. These +numbers are obtained separately. Let N be the number of rows +and M be the number of columns.

    + +

    A result table is an array of pointers to zero-terminated UTF-8 strings. +There are (N+1)*M elements in the array. The first M pointers point +to zero-terminated strings that contain the names of the columns. +The remaining entries all point to query results. NULL values result +in NULL pointers. All other values are in their UTF-8 zero-terminated +string representation as returned by sqlite3_column_text().

    + +

    A result table might consist of one or more memory allocations. +It is not safe to pass a result table directly to sqlite3_free(). +A result table should be deallocated using sqlite3_free_table().

    + +

    As an example of the result table format, suppose a query result +is as follows:

    + +

    +Name        | Age
    +-----------------------
    +Alice       | 43
    +Bob         | 28
    +Cindy       | 21
    +

    + +

    There are two column (M==2) and three rows (N==3). Thus the +result table has 8 entries. Suppose the result table is stored +in an array names azResult. Then azResult holds this content:

    + +

    +azResult[0] = "Name";
    +azResult[1] = "Age";
    +azResult[2] = "Alice";
    +azResult[3] = "43";
    +azResult[4] = "Bob";
    +azResult[5] = "28";
    +azResult[6] = "Cindy";
    +azResult[7] = "21";
    +

    + +

    The sqlite3_get_table() function evaluates one or more +semicolon-separated SQL statements in the zero-terminated UTF-8 +string of its 2nd parameter. It returns a result table to the +pointer given in its 3rd parameter.

    + +

    After the calling function has finished using the result, it should +pass the pointer to the result table to sqlite3_free_table() in order to +release the memory that was malloced. Because of the way the +sqlite3_malloc() happens within sqlite3_get_table(), the calling +function must not try to call sqlite3_free() directly. Only +sqlite3_free_table() is able to release the memory properly and safely.

    + +

    The sqlite3_get_table() interface is implemented as a wrapper around +sqlite3_exec(). The sqlite3_get_table() routine does not have access +to any internal data structures of SQLite. It uses only the public +interface defined here. As a consequence, errors that occur in the +wrapper layer outside of the internal sqlite3_exec() call are not +reflected in subsequent calls to sqlite3_errcode() or sqlite3_errmsg().

    + +

    Requirements: +H12371 H12373 H12374 H12376 H12379 H12382 +


    +

    Function Auxiliary Data

    void *sqlite3_get_auxdata(sqlite3_context*, int N);
    +void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
    +

    +The following two functions may be used by scalar SQL functions to +associate metadata with argument values. If the same value is passed to +multiple invocations of the same SQL function during query execution, under +some circumstances the associated metadata may be preserved. This may +be used, for example, to add a regular-expression matching scalar +function. The compiled version of the regular expression is stored as +metadata associated with the SQL value passed as the regular expression +pattern. The compiled regular expression can be reused on multiple +invocations of the same function so that the original pattern string +does not need to be recompiled on each invocation.

    + +

    The sqlite3_get_auxdata() interface returns a pointer to the metadata +associated by the sqlite3_set_auxdata() function with the Nth argument +value to the application-defined function. If no metadata has been ever +been set for the Nth argument of the function, or if the corresponding +function parameter has changed since the meta-data was set, +then sqlite3_get_auxdata() returns a NULL pointer.

    + +

    The sqlite3_set_auxdata() interface saves the metadata +pointed to by its 3rd parameter as the metadata for the N-th +argument of the application-defined function. Subsequent +calls to sqlite3_get_auxdata() might return this data, if it has +not been destroyed. +If it is not NULL, SQLite will invoke the destructor +function given by the 4th parameter to sqlite3_set_auxdata() on +the metadata when the corresponding function parameter changes +or when the SQL statement completes, whichever comes first.

    + +

    SQLite is free to call the destructor and drop metadata on any +parameter of any function at any time. The only guarantee is that +the destructor will be called before the metadata is dropped.

    + +

    In practice, metadata is preserved between function calls for +expressions that are constant at compile time. This includes literal +values and SQL variables.

    + +

    These routines must be called from the same thread in which +the SQL function is running.

    + +

    Requirements: +H16272 H16274 H16276 H16277 H16278 H16279 +


    +

    Initialize The SQLite Library

    int sqlite3_initialize(void);
    +int sqlite3_shutdown(void);
    +int sqlite3_os_init(void);
    +int sqlite3_os_end(void);
    +

    +The sqlite3_initialize() routine initializes the +SQLite library. The sqlite3_shutdown() routine +deallocates any resources that were allocated by sqlite3_initialize().

    + +

    A call to sqlite3_initialize() is an "effective" call if it is +the first time sqlite3_initialize() is invoked during the lifetime of +the process, or if it is the first time sqlite3_initialize() is invoked +following a call to sqlite3_shutdown(). Only an effective call +of sqlite3_initialize() does any initialization. All other calls +are harmless no-ops.

    + +

    A call to sqlite3_shutdown() is an "effective" call if it is the first +call to sqlite3_shutdown() since the last sqlite3_initialize(). Only +an effective call to sqlite3_shutdown() does any deinitialization. +All other calls to sqlite3_shutdown() are harmless no-ops.

    + +

    Among other things, sqlite3_initialize() shall invoke +sqlite3_os_init(). Similarly, sqlite3_shutdown() +shall invoke sqlite3_os_end().

    + +

    The sqlite3_initialize() routine returns SQLITE_OK on success. +If for some reason, sqlite3_initialize() is unable to initialize +the library (perhaps it is unable to allocate a needed resource such +as a mutex) it returns an error code other than SQLITE_OK.

    + +

    The sqlite3_initialize() routine is called internally by many other +SQLite interfaces so that an application usually does not need to +invoke sqlite3_initialize() directly. For example, sqlite3_open() +calls sqlite3_initialize() so the SQLite library will be automatically +initialized when sqlite3_open() is called if it has not be initialized +already. However, if SQLite is compiled with the SQLITE_OMIT_AUTOINIT +compile-time option, then the automatic calls to sqlite3_initialize() +are omitted and the application must call sqlite3_initialize() directly +prior to using any other SQLite interface. For maximum portability, +it is recommended that applications always invoke sqlite3_initialize() +directly prior to using any other SQLite interface. Future releases +of SQLite may require this. In other words, the behavior exhibited +when SQLite is compiled with SQLITE_OMIT_AUTOINIT might become the +default behavior in some future release of SQLite.

    + +

    The sqlite3_os_init() routine does operating-system specific +initialization of the SQLite library. The sqlite3_os_end() +routine undoes the effect of sqlite3_os_init(). Typical tasks +performed by these routines include allocation or deallocation +of static resources, initialization of global variables, +setting up a default sqlite3_vfs module, or setting up +a default configuration using sqlite3_config().

    + +

    The application should never invoke either sqlite3_os_init() +or sqlite3_os_end() directly. The application should only invoke +sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() +interface is called automatically by sqlite3_initialize() and +sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate +implementations for sqlite3_os_init() and sqlite3_os_end() +are built into SQLite when it is compiled for unix, windows, or os/2. +When built for other platforms (using the SQLITE_OS_OTHER=1 compile-time +option) the application must supply a suitable implementation for +sqlite3_os_init() and sqlite3_os_end(). An application-supplied +implementation of sqlite3_os_init() or sqlite3_os_end() +must return SQLITE_OK on success and some other error code upon +failure. +


    +

    Run-Time Library Version Numbers

    SQLITE_EXTERN const char sqlite3_version[];
    +const char *sqlite3_libversion(void);
    +int sqlite3_libversion_number(void);
    +

    +These features provide the same information as the SQLITE_VERSION +and SQLITE_VERSION_NUMBER #defines in the header, but are associated +with the library instead of the header file. Cautious programmers might +include a check in their application to verify that +sqlite3_libversion_number() always returns the value +SQLITE_VERSION_NUMBER.

    + +

    The sqlite3_libversion() function returns the same information as is +in the sqlite3_version[] string constant. The function is provided +for use in DLLs since DLL users usually do not have direct access to string +constants within the DLL.

    + +

    Requirements: H10021 H10022 H10023 +


    +

    Memory Allocator Statistics

    sqlite3_int64 sqlite3_memory_used(void);
    +sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
    +

    +SQLite provides these two interfaces for reporting on the status +of the sqlite3_malloc(), sqlite3_free(), and sqlite3_realloc() +routines, which form the built-in memory allocation subsystem.

    + +

    Requirements: +H17371 H17373 H17374 H17375 +


    +

    Formatted String Printing Functions

    char *sqlite3_mprintf(const char*,...);
    +char *sqlite3_vmprintf(const char*, va_list);
    +char *sqlite3_snprintf(int,char*,const char*, ...);
    +

    +These routines are workalikes of the "printf()" family of functions +from the standard C library.

    + +

    The sqlite3_mprintf() and sqlite3_vmprintf() routines write their +results into memory obtained from sqlite3_malloc(). +The strings returned by these two routines should be +released by sqlite3_free(). Both routines return a +NULL pointer if sqlite3_malloc() is unable to allocate enough +memory to hold the resulting string.

    + +

    In sqlite3_snprintf() routine is similar to "snprintf()" from +the standard C library. The result is written into the +buffer supplied as the second parameter whose size is given by +the first parameter. Note that the order of the +first two parameters is reversed from snprintf(). This is an +historical accident that cannot be fixed without breaking +backwards compatibility. Note also that sqlite3_snprintf() +returns a pointer to its buffer instead of the number of +characters actually written into the buffer. We admit that +the number of characters written would be a more useful return +value but we cannot change the implementation of sqlite3_snprintf() +now without breaking compatibility.

    + +

    As long as the buffer size is greater than zero, sqlite3_snprintf() +guarantees that the buffer is always zero-terminated. The first +parameter "n" is the total size of the buffer, including space for +the zero terminator. So the longest string that can be completely +written will be n-1 characters.

    + +

    These routines all implement some additional formatting +options that are useful for constructing SQL statements. +All of the usual printf() formatting options apply. In addition, there +is are "%q", "%Q", and "%z" options.

    + +

    The %q option works like %s in that it substitutes a null-terminated +string from the argument list. But %q also doubles every '\'' character. +%q is designed for use inside a string literal. By doubling each '\'' +character it escapes that character and allows it to be inserted into +the string.

    + +

    For example, assume the string variable zText contains text as follows:

    + +

    +char *zText = "It's a happy day!";
    +

    + +

    One can use this text in an SQL statement as follows:

    + +

    +char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
    +sqlite3_exec(db, zSQL, 0, 0, 0);
    +sqlite3_free(zSQL);
    +

    + +

    Because the %q format string is used, the '\'' character in zText +is escaped and the SQL generated is as follows:

    + +

    +INSERT INTO table1 VALUES('It''s a happy day!')
    +

    + +

    This is correct. Had we used %s instead of %q, the generated SQL +would have looked like this:

    + +

    +INSERT INTO table1 VALUES('It's a happy day!');
    +

    + +

    This second example is an SQL syntax error. As a general rule you should +always use %q instead of %s when inserting text into a string literal.

    + +

    The %Q option works like %q except it also adds single quotes around +the outside of the total string. Additionally, if the parameter in the +argument list is a NULL pointer, %Q substitutes the text "NULL" (without +single quotes) in place of the %Q option. So, for example, one could say:

    + +

    +char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    +sqlite3_exec(db, zSQL, 0, 0, 0);
    +sqlite3_free(zSQL);
    +

    + +

    The code above will render a correct SQL statement in the zSQL +variable even if the zText variable is a NULL pointer.

    + +

    The "%z" formatting option works exactly like "%s" with the +addition that after the string has been read and copied into +the result, sqlite3_free() is called on the input string.

    + +

    Requirements: +H17403 H17406 H17407 +


    +

    Mutexes

    sqlite3_mutex *sqlite3_mutex_alloc(int);
    +void sqlite3_mutex_free(sqlite3_mutex*);
    +void sqlite3_mutex_enter(sqlite3_mutex*);
    +int sqlite3_mutex_try(sqlite3_mutex*);
    +void sqlite3_mutex_leave(sqlite3_mutex*);
    +

    +The SQLite core uses these routines for thread +synchronization. Though they are intended for internal +use by SQLite, code that links against SQLite is +permitted to use any of these routines.

    + +

    The SQLite source code contains multiple implementations +of these mutex routines. An appropriate implementation +is selected automatically at compile-time. The following +implementations are available in the SQLite core:

    + +

      +
    • SQLITE_MUTEX_OS2 +
    • SQLITE_MUTEX_PTHREAD +
    • SQLITE_MUTEX_W32 +
    • SQLITE_MUTEX_NOOP +

    + +

    The SQLITE_MUTEX_NOOP implementation is a set of routines +that does no real locking and is appropriate for use in +a single-threaded application. The SQLITE_MUTEX_OS2, +SQLITE_MUTEX_PTHREAD, and SQLITE_MUTEX_W32 implementations +are appropriate for use on OS/2, Unix, and Windows.

    + +

    If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +implementation is included with the library. In this case the +application must supply a custom mutex implementation using the +SQLITE_CONFIG_MUTEX option of the sqlite3_config() function +before calling sqlite3_initialize() or any other public sqlite3_ +function that calls sqlite3_initialize().

    + +

    The sqlite3_mutex_alloc() routine allocates a new +mutex and returns a pointer to it. If it returns NULL +that means that a mutex could not be allocated. SQLite +will unwind its stack and return an error. The argument +to sqlite3_mutex_alloc() is one of these integer constants:

    + +

      +
    • SQLITE_MUTEX_FAST +
    • SQLITE_MUTEX_RECURSIVE +
    • SQLITE_MUTEX_STATIC_MASTER +
    • SQLITE_MUTEX_STATIC_MEM +
    • SQLITE_MUTEX_STATIC_MEM2 +
    • SQLITE_MUTEX_STATIC_PRNG +
    • SQLITE_MUTEX_STATIC_LRU +
    • SQLITE_MUTEX_STATIC_LRU2 +

    + +

    The first two constants cause sqlite3_mutex_alloc() to create +a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +is used but not necessarily so when SQLITE_MUTEX_FAST is used. +The mutex implementation does not need to make a distinction +between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +not want to. But SQLite will only request a recursive mutex in +cases where it really needs one. If a faster non-recursive mutex +implementation is available on the host platform, the mutex subsystem +might return such a mutex in response to SQLITE_MUTEX_FAST.

    + +

    The other allowed parameters to sqlite3_mutex_alloc() each return +a pointer to a static preexisting mutex. Four static mutexes are +used by the current version of SQLite. Future versions of SQLite +may add additional static mutexes. Static mutexes are for internal +use by SQLite only. Applications that use SQLite mutexes should +use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +SQLITE_MUTEX_RECURSIVE.

    + +

    Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +returns a different mutex on every call. But for the static +mutex types, the same mutex is returned on every call that has +the same type number.

    + +

    The sqlite3_mutex_free() routine deallocates a previously +allocated dynamic mutex. SQLite is careful to deallocate every +dynamic mutex that it allocates. The dynamic mutexes must not be in +use when they are deallocated. Attempting to deallocate a static +mutex results in undefined behavior. SQLite never deallocates +a static mutex.

    + +

    The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +to enter a mutex. If another thread is already within the mutex, +sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +upon successful entry. Mutexes created using +SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. + In such cases the, +mutex must be exited an equal number of times before another thread +can enter. If the same thread tries to enter any other +kind of mutex more than once, the behavior is undefined. + SQLite will never exhibit +such behavior in its own use of mutexes.

    + +

    Some systems (for example, Windows 95) do not support the operation +implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() +will always return SQLITE_BUSY. The SQLite core only ever uses +sqlite3_mutex_try() as an optimization so this is acceptable behavior.

    + +

    The sqlite3_mutex_leave() routine exits a mutex that was +previously entered by the same thread. The behavior +is undefined if the mutex is not currently entered by the +calling thread or is not currently allocated. SQLite will +never do either.

    + +

    If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or +sqlite3_mutex_leave() is a NULL pointer, then all three routines +behave as no-ops.

    + +

    See also: sqlite3_mutex_held() and sqlite3_mutex_notheld(). +


    +

    Mutex Verification Routines

    int sqlite3_mutex_held(sqlite3_mutex*);
    +int sqlite3_mutex_notheld(sqlite3_mutex*);
    +

    +The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +are intended for use inside assert() statements. The SQLite core +never uses these routines except inside an assert() and applications +are advised to follow the lead of the core. The core only +provides implementations for these routines when it is compiled +with the SQLITE_DEBUG flag. External mutex implementations +are only required to provide these routines if SQLITE_DEBUG is +defined and if NDEBUG is not defined.

    + +

    These routines should return true if the mutex in their argument +is held or not held, respectively, by the calling thread.

    + +

    The implementation is not required to provided versions of these +routines that actually work. If the implementation does not provide working +versions of these routines, it should at least provide stubs that always +return true so that one does not get spurious assertion failures.

    + +

    If the argument to sqlite3_mutex_held() is a NULL pointer then +the routine should return 1. This seems counter-intuitive since +clearly the mutex cannot be held if it does not exist. But the +the reason the mutex does not exist is because the build is not +using mutexes. And we do not want the assert() containing the +call to sqlite3_mutex_held() to fail, so a non-zero return is +the appropriate thing to do. The sqlite3_mutex_notheld() +interface should also return 1 when given a NULL pointer. +


    +

    Opening A New Database Connection

    int sqlite3_open(
    +  const char *filename,   /* Database filename (UTF-8) */
    +  sqlite3 **ppDb          /* OUT: SQLite db handle */
    +);
    +int sqlite3_open16(
    +  const void *filename,   /* Database filename (UTF-16) */
    +  sqlite3 **ppDb          /* OUT: SQLite db handle */
    +);
    +int sqlite3_open_v2(
    +  const char *filename,   /* Database filename (UTF-8) */
    +  sqlite3 **ppDb,         /* OUT: SQLite db handle */
    +  int flags,              /* Flags */
    +  const char *zVfs        /* Name of VFS module to use */
    +);
    +

    +These routines open an SQLite database file whose name is given by the +filename argument. The filename argument is interpreted as UTF-8 for +sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte +order for sqlite3_open16(). A database connection handle is usually +returned in *ppDb, even if an error occurs. The only exception is that +if SQLite is unable to allocate memory to hold the sqlite3 object, +a NULL will be written into *ppDb instead of a pointer to the sqlite3 +object. If the database is opened (and/or created) successfully, then +SQLITE_OK is returned. Otherwise an error code is returned. The +sqlite3_errmsg() or sqlite3_errmsg16() routines can be used to obtain +an English language description of the error.

    + +

    The default encoding for the database will be UTF-8 if +sqlite3_open() or sqlite3_open_v2() is called and +UTF-16 in the native byte order if sqlite3_open16() is used.

    + +

    Whether or not an error occurs when it is opened, resources +associated with the database connection handle should be released by +passing it to sqlite3_close() when it is no longer required.

    + +

    The sqlite3_open_v2() interface works like sqlite3_open() +except that it accepts two additional parameters for additional control +over the new database connection. The flags parameter can take one of +the following three values, optionally combined with the +SQLITE_OPEN_NOMUTEX or SQLITE_OPEN_FULLMUTEX flags:

    + +

    +
    SQLITE_OPEN_READONLY
    +
    The database is opened in read-only mode. If the database does not +already exist, an error is returned.

    + +

    SQLITE_OPEN_READWRITE
    +
    The database is opened for reading and writing if possible, or reading +only if the file is write protected by the operating system. In either +case the database must already exist, otherwise an error is returned.

    + +

    SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE
    +
    The database is opened for reading and writing, and is creates it if +it does not already exist. This is the behavior that is always used for +sqlite3_open() and sqlite3_open16().
    +

    + +

    If the 3rd parameter to sqlite3_open_v2() is not one of the +combinations shown above or one of the combinations shown above combined +with the SQLITE_OPEN_NOMUTEX or SQLITE_OPEN_FULLMUTEX flags, +then the behavior is undefined.

    + +

    If the SQLITE_OPEN_NOMUTEX flag is set, then the database connection +opens in the multi-thread threading mode as long as the single-thread +mode has not been set at compile-time or start-time. If the +SQLITE_OPEN_FULLMUTEX flag is set then the database connection opens +in the serialized threading mode unless single-thread was +previously selected at compile-time or start-time.

    + +

    If the filename is ":memory:", then a private, temporary in-memory database +is created for the connection. This in-memory database will vanish when +the database connection is closed. Future versions of SQLite might +make use of additional special filenames that begin with the ":" character. +It is recommended that when a database filename actually does begin with +a ":" character you should prefix the filename with a pathname such as +"./" to avoid ambiguity.

    + +

    If the filename is an empty string, then a private, temporary +on-disk database will be created. This private database will be +automatically deleted as soon as the database connection is closed.

    + +

    The fourth parameter to sqlite3_open_v2() is the name of the +sqlite3_vfs object that defines the operating system interface that +the new database connection should use. If the fourth parameter is +a NULL pointer then the default sqlite3_vfs object is used.

    + +

    Note to Windows users: The encoding used for the filename argument +of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever +codepage is currently defined. Filenames containing international +characters must be converted to UTF-8 prior to passing them into +sqlite3_open() or sqlite3_open_v2().

    + +

    Requirements: +H12701 H12702 H12703 H12704 H12706 H12707 H12709 H12711 +H12712 H12713 H12714 H12717 H12719 H12721 H12723 +


    +

    Tracing And Profiling Functions

    void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*);
    +void *sqlite3_profile(sqlite3*,
    +   void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
    +

    Important: This interface is experimental and is subject to change without notice.

    +These routines register callback functions that can be used for +tracing and profiling the execution of SQL statements.

    + +

    The callback function registered by sqlite3_trace() is invoked at +various times when an SQL statement is being run by sqlite3_step(). +The callback returns a UTF-8 rendering of the SQL statement text +as the statement first begins executing. Additional callbacks occur +as each triggered subprogram is entered. The callbacks for triggers +contain a UTF-8 SQL comment that identifies the trigger.

    + +

    The callback function registered by sqlite3_profile() is invoked +as each SQL statement finishes. The profile callback contains +the original statement text and an estimate of wall-clock time +of how long that statement took to run.

    + +

    Requirements: +H12281 H12282 H12283 H12284 H12285 H12287 H12288 H12289 +H12290 +


    +

    Setting The Result Of An SQL Function

    void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
    +void sqlite3_result_double(sqlite3_context*, double);
    +void sqlite3_result_error(sqlite3_context*, const char*, int);
    +void sqlite3_result_error16(sqlite3_context*, const void*, int);
    +void sqlite3_result_error_toobig(sqlite3_context*);
    +void sqlite3_result_error_nomem(sqlite3_context*);
    +void sqlite3_result_error_code(sqlite3_context*, int);
    +void sqlite3_result_int(sqlite3_context*, int);
    +void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
    +void sqlite3_result_null(sqlite3_context*);
    +void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
    +void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
    +void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
    +void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
    +void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
    +void sqlite3_result_zeroblob(sqlite3_context*, int n);
    +

    +These routines are used by the xFunc or xFinal callbacks that +implement SQL functions and aggregates. See +sqlite3_create_function() and sqlite3_create_function16() +for additional information.

    + +

    These functions work very much like the parameter binding family of +functions used to bind values to host parameters in prepared statements. +Refer to the SQL parameter documentation for additional information.

    + +

    The sqlite3_result_blob() interface sets the result from +an application-defined function to be the BLOB whose content is pointed +to by the second parameter and which is N bytes long where N is the +third parameter.

    + +

    The sqlite3_result_zeroblob() interfaces set the result of +the application-defined function to be a BLOB containing all zero +bytes and N bytes in size, where N is the value of the 2nd parameter.

    + +

    The sqlite3_result_double() interface sets the result from +an application-defined function to be a floating point value specified +by its 2nd argument.

    + +

    The sqlite3_result_error() and sqlite3_result_error16() functions +cause the implemented SQL function to throw an exception. +SQLite uses the string pointed to by the +2nd parameter of sqlite3_result_error() or sqlite3_result_error16() +as the text of an error message. SQLite interprets the error +message string from sqlite3_result_error() as UTF-8. SQLite +interprets the string from sqlite3_result_error16() as UTF-16 in native +byte order. If the third parameter to sqlite3_result_error() +or sqlite3_result_error16() is negative then SQLite takes as the error +message all text up through the first zero character. +If the third parameter to sqlite3_result_error() or +sqlite3_result_error16() is non-negative then SQLite takes that many +bytes (not characters) from the 2nd parameter as the error message. +The sqlite3_result_error() and sqlite3_result_error16() +routines make a private copy of the error message text before +they return. Hence, the calling function can deallocate or +modify the text after they return without harm. +The sqlite3_result_error_code() function changes the error code +returned by SQLite as a result of an error in a function. By default, +the error code is SQLITE_ERROR. A subsequent call to sqlite3_result_error() +or sqlite3_result_error16() resets the error code to SQLITE_ERROR.

    + +

    The sqlite3_result_toobig() interface causes SQLite to throw an error +indicating that a string or BLOB is to long to represent.

    + +

    The sqlite3_result_nomem() interface causes SQLite to throw an error +indicating that a memory allocation failed.

    + +

    The sqlite3_result_int() interface sets the return value +of the application-defined function to be the 32-bit signed integer +value given in the 2nd argument. +The sqlite3_result_int64() interface sets the return value +of the application-defined function to be the 64-bit signed integer +value given in the 2nd argument.

    + +

    The sqlite3_result_null() interface sets the return value +of the application-defined function to be NULL.

    + +

    The sqlite3_result_text(), sqlite3_result_text16(), +sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces +set the return value of the application-defined function to be +a text string which is represented as UTF-8, UTF-16 native byte order, +UTF-16 little endian, or UTF-16 big endian, respectively. +SQLite takes the text result from the application from +the 2nd parameter of the sqlite3_result_text* interfaces. +If the 3rd parameter to the sqlite3_result_text* interfaces +is negative, then SQLite takes result text from the 2nd parameter +through the first zero character. +If the 3rd parameter to the sqlite3_result_text* interfaces +is non-negative, then as many bytes (not characters) of the text +pointed to by the 2nd parameter are taken as the application-defined +function result. +If the 4th parameter to the sqlite3_result_text* interfaces +or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that +function as the destructor on the text or BLOB result when it has +finished using that result. +If the 4th parameter to the sqlite3_result_text* interfaces or +sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite +assumes that the text or BLOB result is in constant space and does not +copy the it or call a destructor when it has finished using that result. +If the 4th parameter to the sqlite3_result_text* interfaces +or sqlite3_result_blob is the special constant SQLITE_TRANSIENT +then SQLite makes a copy of the result into space obtained from +from sqlite3_malloc() before it returns.

    + +

    The sqlite3_result_value() interface sets the result of +the application-defined function to be a copy the +unprotected sqlite3_value object specified by the 2nd parameter. The +sqlite3_result_value() interface makes a copy of the sqlite3_value +so that the sqlite3_value specified in the parameter may change or +be deallocated after sqlite3_result_value() returns without harm. +A protected sqlite3_value object may always be used where an +unprotected sqlite3_value object is required, so either +kind of sqlite3_value object can be used with this interface.

    + +

    If these routines are called from within the different thread +than the one containing the application-defined function that received +the sqlite3_context pointer, the results are undefined.

    + +

    Requirements: +H16403 H16406 H16409 H16412 H16415 H16418 H16421 H16424 +H16427 H16430 H16433 H16436 H16439 H16442 H16445 H16448 +H16451 H16454 H16457 H16460 H16463 +


    +

    Obtaining SQL Function Parameter Values

    const void *sqlite3_value_blob(sqlite3_value*);
    +int sqlite3_value_bytes(sqlite3_value*);
    +int sqlite3_value_bytes16(sqlite3_value*);
    +double sqlite3_value_double(sqlite3_value*);
    +int sqlite3_value_int(sqlite3_value*);
    +sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
    +const unsigned char *sqlite3_value_text(sqlite3_value*);
    +const void *sqlite3_value_text16(sqlite3_value*);
    +const void *sqlite3_value_text16le(sqlite3_value*);
    +const void *sqlite3_value_text16be(sqlite3_value*);
    +int sqlite3_value_type(sqlite3_value*);
    +int sqlite3_value_numeric_type(sqlite3_value*);
    +

    +The C-language implementation of SQL functions and aggregates uses +this set of interface routines to access the parameter values on +the function or aggregate.

    + +

    The xFunc (for scalar functions) or xStep (for aggregates) parameters +to sqlite3_create_function() and sqlite3_create_function16() +define callbacks that implement the SQL functions and aggregates. +The 4th parameter to these callbacks is an array of pointers to +protected sqlite3_value objects. There is one sqlite3_value object for +each parameter to the SQL function. These routines are used to +extract values from the sqlite3_value objects.

    + +

    These routines work only with protected sqlite3_value objects. +Any attempt to use these routines on an unprotected sqlite3_value +object results in undefined behavior.

    + +

    These routines work just like the corresponding column access functions +except that these routines take a single protected sqlite3_value object +pointer instead of a sqlite3_stmt* pointer and an integer column number.

    + +

    The sqlite3_value_text16() interface extracts a UTF-16 string +in the native byte-order of the host machine. The +sqlite3_value_text16be() and sqlite3_value_text16le() interfaces +extract UTF-16 strings as big-endian and little-endian respectively.

    + +

    The sqlite3_value_numeric_type() interface attempts to apply +numeric affinity to the value. This means that an attempt is +made to convert the value to an integer or floating point. If +such a conversion is possible without loss of information (in other +words, if the value is a string that looks like a number) +then the conversion is performed. Otherwise no conversion occurs. +The datatype after conversion is returned.

    + +

    Please pay particular attention to the fact that the pointer returned +from sqlite3_value_blob(), sqlite3_value_text(), or +sqlite3_value_text16() can be invalidated by a subsequent call to +sqlite3_value_bytes(), sqlite3_value_bytes16(), sqlite3_value_text(), +or sqlite3_value_text16().

    + +

    These routines must be called from the same thread as +the SQL function that supplied the sqlite3_value* parameters.

    + +

    Requirements: +H15103 H15106 H15109 H15112 H15115 H15118 H15121 H15124 +H15127 H15130 H15133 H15136 +


    +

    Virtual File System Objects

    sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
    +int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
    +int sqlite3_vfs_unregister(sqlite3_vfs*);
    +

    +A virtual filesystem (VFS) is an sqlite3_vfs object +that SQLite uses to interact +with the underlying operating system. Most SQLite builds come with a +single default VFS that is appropriate for the host computer. +New VFSes can be registered and existing VFSes can be unregistered. +The following interfaces are provided.

    + +

    The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. +Names are case sensitive. +Names are zero-terminated UTF-8 strings. +If there is no match, a NULL pointer is returned. +If zVfsName is NULL then the default VFS is returned.

    + +

    New VFSes are registered with sqlite3_vfs_register(). +Each new VFS becomes the default VFS if the makeDflt flag is set. +The same VFS can be registered multiple times without injury. +To make an existing VFS into the default VFS, register it again +with the makeDflt flag set. If two different VFSes with the +same name are registered, the behavior is undefined. If a +VFS is registered with a name that is NULL or an empty string, +then the behavior is undefined.

    + +

    Unregister a VFS with the sqlite3_vfs_unregister() interface. +If the default VFS is unregistered, another VFS is chosen as +the default. The choice for the new VFS is arbitrary.

    + +

    Requirements: +H11203 H11206 H11209 H11212 H11215 H11218 +


    +

    Binding Values To Prepared Statements

    int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    +int sqlite3_bind_double(sqlite3_stmt*, int, double);
    +int sqlite3_bind_int(sqlite3_stmt*, int, int);
    +int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
    +int sqlite3_bind_null(sqlite3_stmt*, int);
    +int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
    +int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
    +int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
    +int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
    +

    +In the SQL strings input to sqlite3_prepare_v2() and its variants, +literals may be replaced by a parameter in one of these forms:

    + +

      +
    • ? +
    • ?NNN +
    • :VVV +
    • @VVV +
    • $VVV +

    + +

    In the parameter forms shown above NNN is an integer literal, +and VVV is an alpha-numeric parameter name. The values of these +parameters (also called "host parameter names" or "SQL parameters") +can be set using the sqlite3_bind_*() routines defined here.

    + +

    The first argument to the sqlite3_bind_*() routines is always +a pointer to the sqlite3_stmt object returned from +sqlite3_prepare_v2() or its variants.

    + +

    The second argument is the index of the SQL parameter to be set. +The leftmost SQL parameter has an index of 1. When the same named +SQL parameter is used more than once, second and subsequent +occurrences have the same index as the first occurrence. +The index for named parameters can be looked up using the +sqlite3_bind_parameter_index() API if desired. The index +for "?NNN" parameters is the value of NNN. +The NNN value must be between 1 and the sqlite3_limit() +parameter SQLITE_LIMIT_VARIABLE_NUMBER (default value: 999).

    + +

    The third argument is the value to bind to the parameter.

    + +

    In those routines that have a fourth argument, its value is the +number of bytes in the parameter. To be clear: the value is the +number of bytes in the value, not the number of characters. +If the fourth parameter is negative, the length of the string is +the number of bytes up to the first zero terminator.

    + +

    The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and +sqlite3_bind_text16() is a destructor used to dispose of the BLOB or +string after SQLite has finished with it. If the fifth argument is +the special value SQLITE_STATIC, then SQLite assumes that the +information is in static, unmanaged space and does not need to be freed. +If the fifth argument has the value SQLITE_TRANSIENT, then +SQLite makes its own private copy of the data immediately, before +the sqlite3_bind_*() routine returns.

    + +

    The sqlite3_bind_zeroblob() routine binds a BLOB of length N that +is filled with zeroes. A zeroblob uses a fixed amount of memory +(just an integer to hold its size) while it is being processed. +Zeroblobs are intended to serve as placeholders for BLOBs whose +content is later written using +incremental BLOB I/O routines. +A negative value for the zeroblob results in a zero-length BLOB.

    + +

    The sqlite3_bind_*() routines must be called after +sqlite3_prepare_v2() (and its variants) or sqlite3_reset() and +before sqlite3_step(). +Bindings are not cleared by the sqlite3_reset() routine. +Unbound parameters are interpreted as NULL.

    + +

    These routines return SQLITE_OK on success or an error code if +anything goes wrong. SQLITE_RANGE is returned if the parameter +index is out of range. SQLITE_NOMEM is returned if malloc() fails. +SQLITE_MISUSE might be returned if these routines are called on a +virtual machine that is the wrong state or which has already been finalized. +Detection of misuse is unreliable. Applications should not depend +on SQLITE_MISUSE returns. SQLITE_MISUSE is intended to indicate a +a logic error in the application. Future versions of SQLite might +panic rather than return SQLITE_MISUSE.

    + +

    See also: sqlite3_bind_parameter_count(), +sqlite3_bind_parameter_name(), and sqlite3_bind_parameter_index().

    + +

    Requirements: +H13506 H13509 H13512 H13515 H13518 H13521 H13524 H13527 +H13530 H13533 H13536 H13539 H13542 H13545 H13548 H13551

    + +


    +

    Compiling An SQL Statement

    int sqlite3_prepare(
    +  sqlite3 *db,            /* Database handle */
    +  const char *zSql,       /* SQL statement, UTF-8 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const char **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare_v2(
    +  sqlite3 *db,            /* Database handle */
    +  const char *zSql,       /* SQL statement, UTF-8 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const char **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare16(
    +  sqlite3 *db,            /* Database handle */
    +  const void *zSql,       /* SQL statement, UTF-16 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const void **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +int sqlite3_prepare16_v2(
    +  sqlite3 *db,            /* Database handle */
    +  const void *zSql,       /* SQL statement, UTF-16 encoded */
    +  int nByte,              /* Maximum length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
    +  const void **pzTail     /* OUT: Pointer to unused portion of zSql */
    +);
    +

    +To execute an SQL query, it must first be compiled into a byte-code +program using one of these routines.

    + +

    The first argument, "db", is a database connection obtained from a +prior successful call to sqlite3_open(), sqlite3_open_v2() or +sqlite3_open16(). The database connection must not have been closed.

    + +

    The second argument, "zSql", is the statement to be compiled, encoded +as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() +interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() +use UTF-16.

    + +

    If the nByte argument is less than zero, then zSql is read up to the +first zero terminator. If nByte is non-negative, then it is the maximum +number of bytes read from zSql. When nByte is non-negative, the +zSql string ends at either the first '\000' or '\u0000' character or +the nByte-th byte, whichever comes first. If the caller knows +that the supplied string is nul-terminated, then there is a small +performance advantage to be gained by passing an nByte parameter that +is equal to the number of bytes in the input string including +the nul-terminator bytes.

    + +

    If pzTail is not NULL then *pzTail is made to point to the first byte +past the end of the first SQL statement in zSql. These routines only +compile the first statement in zSql, so *pzTail is left pointing to +what remains uncompiled.

    + +

    *ppStmt is left pointing to a compiled prepared statement that can be +executed using sqlite3_step(). If there is an error, *ppStmt is set +to NULL. If the input text contains no SQL (if the input is an empty +string or a comment) then *ppStmt is set to NULL. +The calling procedure is responsible for deleting the compiled +SQL statement using sqlite3_finalize() after it has finished with it. +ppStmt may not be NULL.

    + +

    On success, SQLITE_OK is returned, otherwise an error code is returned.

    + +

    The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are +recommended for all new programs. The two older interfaces are retained +for backwards compatibility, but their use is discouraged. +In the "v2" interfaces, the prepared statement +that is returned (the sqlite3_stmt object) contains a copy of the +original SQL text. This causes the sqlite3_step() interface to +behave a differently in two ways:

    + +

      +
    1. +If the database schema changes, instead of returning SQLITE_SCHEMA as it +always used to do, sqlite3_step() will automatically recompile the SQL +statement and try to run it again. If the schema has changed in +a way that makes the statement no longer valid, sqlite3_step() will still +return SQLITE_SCHEMA. But unlike the legacy behavior, SQLITE_SCHEMA is +now a fatal error. Calling sqlite3_prepare_v2() again will not make the +error go away. Note: use sqlite3_errmsg() to find the text +of the parsing error that results in an SQLITE_SCHEMA return. +
    2. + +

    3. +When an error occurs, sqlite3_step() will return one of the detailed +error codes or extended error codes. The legacy behavior was that +sqlite3_step() would only return a generic SQLITE_ERROR result code +and you would have to make a second call to sqlite3_reset() in order +to find the underlying cause of the problem. With the "v2" prepare +interfaces, the underlying reason for the error is returned immediately. +
    4. +

    + +

    Requirements: +H13011 H13012 H13013 H13014 H13015 H13016 H13019 H13021

    + +


    +

    Create Or Redefine SQL Functions

    int sqlite3_create_function(
    +  sqlite3 *db,
    +  const char *zFunctionName,
    +  int nArg,
    +  int eTextRep,
    +  void *pApp,
    +  void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xFinal)(sqlite3_context*)
    +);
    +int sqlite3_create_function16(
    +  sqlite3 *db,
    +  const void *zFunctionName,
    +  int nArg,
    +  int eTextRep,
    +  void *pApp,
    +  void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    +  void (*xFinal)(sqlite3_context*)
    +);
    +

    +These two functions (collectively known as "function creation routines") +are used to add SQL functions or aggregates or to redefine the behavior +of existing SQL functions or aggregates. The only difference between the +two is that the second parameter, the name of the (scalar) function or +aggregate, is encoded in UTF-8 for sqlite3_create_function() and UTF-16 +for sqlite3_create_function16().

    + +

    The first parameter is the database connection to which the SQL +function is to be added. If a single program uses more than one database +connection internally, then SQL functions must be added individually to +each database connection.

    + +

    The second parameter is the name of the SQL function to be created or +redefined. The length of the name is limited to 255 bytes, exclusive of +the zero-terminator. Note that the name length limit is in bytes, not +characters. Any attempt to create a function with a longer name +will result in SQLITE_ERROR being returned.

    + +

    The third parameter (nArg) +is the number of arguments that the SQL function or +aggregate takes. If this parameter is -1, then the SQL function or +aggregate may take any number of arguments between 0 and the limit +set by sqlite3_limit(SQLITE_LIMIT_FUNCTION_ARG). If the third +parameter is less than -1 or greater than 127 then the behavior is +undefined.

    + +

    The fourth parameter, eTextRep, specifies what +text encoding this SQL function prefers for +its parameters. Any SQL function implementation should be able to work +work with UTF-8, UTF-16le, or UTF-16be. But some implementations may be +more efficient with one encoding than another. It is allowed to +invoke sqlite3_create_function() or sqlite3_create_function16() multiple +times with the same function but with different values of eTextRep. +When multiple implementations of the same function are available, SQLite +will pick the one that involves the least amount of data conversion. +If there is only a single implementation which does not care what text +encoding is used, then the fourth argument should be SQLITE_ANY.

    + +

    The fifth parameter is an arbitrary pointer. The implementation of the +function can gain access to this pointer using sqlite3_user_data().

    + +

    The seventh, eighth and ninth parameters, xFunc, xStep and xFinal, are +pointers to C-language functions that implement the SQL function or +aggregate. A scalar SQL function requires an implementation of the xFunc +callback only, NULL pointers should be passed as the xStep and xFinal +parameters. An aggregate SQL function requires an implementation of xStep +and xFinal and NULL should be passed for xFunc. To delete an existing +SQL function or aggregate, pass NULL for all three function callbacks.

    + +

    It is permitted to register multiple implementations of the same +functions with the same name but with either differing numbers of +arguments or differing preferred text encodings. SQLite will use +the implementation most closely matches the way in which the +SQL function is used. A function implementation with a non-negative +nArg parameter is a better match than a function implementation with +a negative nArg. A function where the preferred text encoding +matches the database encoding is a better +match than a function where the encoding is different. +A function where the encoding difference is between UTF16le and UTF16be +is a closer match than a function where the encoding difference is +between UTF8 and UTF16.

    + +

    Built-in functions may be overloaded by new application-defined functions. +The first application-defined function with a given name overrides all +built-in functions in the same database connection with the same name. +Subsequent application-defined functions of the same name only override +prior application-defined functions that are an exact match for the +number of parameters and preferred encoding.

    + +

    An application-defined function is permitted to call other +SQLite interfaces. However, such calls must not +close the database connection nor finalize or reset the prepared +statement in which the function is running.

    + +

    Requirements: +H16103 H16106 H16109 H16112 H16118 H16121 H16127 +H16130 H16133 H16136 H16139 H16142 +


    +

    Test For Auto-Commit Mode

    int sqlite3_get_autocommit(sqlite3*);
    +

    +The sqlite3_get_autocommit() interface returns non-zero or +zero if the given database connection is or is not in autocommit mode, +respectively. Autocommit mode is on by default. +Autocommit mode is disabled by a BEGIN statement. +Autocommit mode is re-enabled by a COMMIT or ROLLBACK.

    + +

    If certain kinds of errors occur on a statement within a multi-statement +transaction (errors including SQLITE_FULL, SQLITE_IOERR, +SQLITE_NOMEM, SQLITE_BUSY, and SQLITE_INTERRUPT) then the +transaction might be rolled back automatically. The only way to +find out whether SQLite automatically rolled back the transaction after +an error is to use this function.

    + +

    If another thread changes the autocommit status of the database +connection while this routine is running, then the return value +is undefined.

    + +

    Requirements: H12931 H12932 H12933 H12934 +


    +

    Result Values From A Query

    const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
    +int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
    +int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
    +double sqlite3_column_double(sqlite3_stmt*, int iCol);
    +int sqlite3_column_int(sqlite3_stmt*, int iCol);
    +sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
    +const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
    +const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
    +int sqlite3_column_type(sqlite3_stmt*, int iCol);
    +sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
    +

    +These routines form the "result set query" interface.

    + +

    These routines return information about a single column of the current +result row of a query. In every case the first argument is a pointer +to the prepared statement that is being evaluated (the sqlite3_stmt* +that was returned from sqlite3_prepare_v2() or one of its variants) +and the second argument is the index of the column for which information +should be returned. The leftmost column of the result set has the index 0.

    + +

    If the SQL statement does not currently point to a valid row, or if the +column index is out of range, the result is undefined. +These routines may only be called when the most recent call to +sqlite3_step() has returned SQLITE_ROW and neither +sqlite3_reset() nor sqlite3_finalize() have been called subsequently. +If any of these routines are called after sqlite3_reset() or +sqlite3_finalize() or after sqlite3_step() has returned +something other than SQLITE_ROW, the results are undefined. +If sqlite3_step() or sqlite3_reset() or sqlite3_finalize() +are called from a different thread while any of these routines +are pending, then the results are undefined.

    + +

    The sqlite3_column_type() routine returns the +datatype code for the initial data type +of the result column. The returned value is one of SQLITE_INTEGER, +SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL. The value +returned by sqlite3_column_type() is only meaningful if no type +conversions have occurred as described below. After a type conversion, +the value returned by sqlite3_column_type() is undefined. Future +versions of SQLite may change the behavior of sqlite3_column_type() +following a type conversion.

    + +

    If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +routine returns the number of bytes in that BLOB or string. +If the result is a UTF-16 string, then sqlite3_column_bytes() converts +the string to UTF-8 and then returns the number of bytes. +If the result is a numeric value then sqlite3_column_bytes() uses +sqlite3_snprintf() to convert that value to a UTF-8 string and returns +the number of bytes in that string. +The value returned does not include the zero terminator at the end +of the string. For clarity: the value returned is the number of +bytes in the string, not the number of characters.

    + +

    Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +even empty strings, are always zero terminated. The return +value from sqlite3_column_blob() for a zero-length BLOB is an arbitrary +pointer, possibly even a NULL pointer.

    + +

    The sqlite3_column_bytes16() routine is similar to sqlite3_column_bytes() +but leaves the result in UTF-16 in native byte order instead of UTF-8. +The zero terminator is not included in this count.

    + +

    The object returned by sqlite3_column_value() is an +unprotected sqlite3_value object. An unprotected sqlite3_value object +may only be used with sqlite3_bind_value() and sqlite3_result_value(). +If the unprotected sqlite3_value object returned by +sqlite3_column_value() is used in any other way, including calls +to routines like sqlite3_value_int(), sqlite3_value_text(), +or sqlite3_value_bytes(), then the behavior is undefined.

    + +

    These routines attempt to convert the value where appropriate. For +example, if the internal representation is FLOAT and a text result +is requested, sqlite3_snprintf() is used internally to perform the +conversion automatically. The following table details the conversions +that are applied:

    + +

    + +
    Internal
    Type
    Requested
    Type
    Conversion

    + +

    NULL INTEGER Result is 0 +
    NULL FLOAT Result is 0.0 +
    NULL TEXT Result is NULL pointer +
    NULL BLOB Result is NULL pointer +
    INTEGER FLOAT Convert from integer to float +
    INTEGER TEXT ASCII rendering of the integer +
    INTEGER BLOB Same as INTEGER->TEXT +
    FLOAT INTEGER Convert from float to integer +
    FLOAT TEXT ASCII rendering of the float +
    FLOAT BLOB Same as FLOAT->TEXT +
    TEXT INTEGER Use atoi() +
    TEXT FLOAT Use atof() +
    TEXT BLOB No change +
    BLOB INTEGER Convert to TEXT then use atoi() +
    BLOB FLOAT Convert to TEXT then use atof() +
    BLOB TEXT Add a zero terminator if needed +
    +

    + +

    The table above makes reference to standard C library functions atoi() +and atof(). SQLite does not really use these functions. It has its +own equivalent internal routines. The atoi() and atof() names are +used in the table for brevity and because they are familiar to most +C programmers.

    + +

    Note that when type conversions occur, pointers returned by prior +calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +sqlite3_column_text16() may be invalidated. +Type conversions and pointer invalidations might occur +in the following cases:

    + +

      +
    • The initial content is a BLOB and sqlite3_column_text() or +sqlite3_column_text16() is called. A zero-terminator might +need to be added to the string.
    • +
    • The initial content is UTF-8 text and sqlite3_column_bytes16() or +sqlite3_column_text16() is called. The content must be converted +to UTF-16.
    • +
    • The initial content is UTF-16 text and sqlite3_column_bytes() or +sqlite3_column_text() is called. The content must be converted +to UTF-8.
    • +

    + +

    Conversions between UTF-16be and UTF-16le are always done in place and do +not invalidate a prior pointer, though of course the content of the buffer +that the prior pointer points to will have been modified. Other kinds +of conversion are done in place when it is possible, but sometimes they +are not possible and in those cases prior pointers are invalidated.

    + +

    The safest and easiest to remember policy is to invoke these routines +in one of the following ways:

    + +

      +
    • sqlite3_column_text() followed by sqlite3_column_bytes()
    • +
    • sqlite3_column_blob() followed by sqlite3_column_bytes()
    • +
    • sqlite3_column_text16() followed by sqlite3_column_bytes16()
    • +

    + +

    In other words, you should call sqlite3_column_text(), +sqlite3_column_blob(), or sqlite3_column_text16() first to force the result +into the desired format, then invoke sqlite3_column_bytes() or +sqlite3_column_bytes16() to find the size of the result. Do not mix calls +to sqlite3_column_text() or sqlite3_column_blob() with calls to +sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() +with calls to sqlite3_column_bytes().

    + +

    The pointers returned are valid until a type conversion occurs as +described above, or until sqlite3_step() or sqlite3_reset() or +sqlite3_finalize() is called. The memory space used to hold strings +and BLOBs is freed automatically. Do not pass the pointers returned +sqlite3_column_blob(), sqlite3_column_text(), etc. into +sqlite3_free().

    + +

    If a memory allocation error occurs during the evaluation of any +of these routines, a default value is returned. The default value +is either the integer 0, the floating point number 0.0, or a NULL +pointer. Subsequent calls to sqlite3_errcode() will return +SQLITE_NOMEM.

    + +

    Requirements: +H13803 H13806 H13809 H13812 H13815 H13818 H13821 H13824 +H13827 H13830 +


    +

    Enable Or Disable Shared Pager Cache

    int sqlite3_enable_shared_cache(int);
    +

    +This routine enables or disables the sharing of the database cache +and schema data structures between connections +to the same database. Sharing is enabled if the argument is true +and disabled if the argument is false.

    + +

    Cache sharing is enabled and disabled for an entire process. +This is a change as of SQLite version 3.5.0. In prior versions of SQLite, +sharing was enabled or disabled for each thread separately.

    + +

    The cache sharing mode set by this interface effects all subsequent +calls to sqlite3_open(), sqlite3_open_v2(), and sqlite3_open16(). +Existing database connections continue use the sharing mode +that was in effect at the time they were opened.

    + +

    Virtual tables cannot be used with a shared cache. When shared +cache is enabled, the sqlite3_create_module() API used to register +virtual tables will always return an error.

    + +

    This routine returns SQLITE_OK if shared cache was enabled or disabled +successfully. An error code is returned otherwise.

    + +

    Shared cache is disabled by default. But this might change in +future releases of SQLite. Applications that care about shared +cache setting should set it explicitly.

    + +

    See Also: SQLite Shared-Cache Mode

    + +

    Requirements: H10331 H10336 H10337 H10339 +


    +
    +This page last modified 2009/02/18 18:03:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/capi3ref.tcl.old /tmp/3ARg2Grji7/sqlite3-3.6.16/www/capi3ref.tcl.old --- sqlite3-3.4.2/www/capi3ref.tcl.old 2007-06-13 18:42:51.000000000 +0100 +++ sqlite3-3.6.16/www/capi3ref.tcl.old 1970-01-01 01:00:00.000000000 +0100 @@ -1,1916 +0,0 @@ -set rcsid {$Id: capi3ref.tcl,v 1.60 2007/05/19 06:48:43 danielk1977 Exp $} -source common.tcl -header {C/C++ Interface For SQLite Version 3} -puts { -

    C/C++ Interface For SQLite Version 3

    -} - -proc api {name prototype desc {notused x}} { - global apilist specialname - if {$name==""} { - regsub -all {sqlite3_[a-z0-9_]+\(} $prototype \ - {[lappend name [string trimright & (]]} x1 - subst $x1 - } else { - lappend specialname $name - } - lappend apilist [list $name $prototype $desc] -} - -api {extended-result-codes} { -#define SQLITE_IOERR_READ -#define SQLITE_IOERR_SHORT_READ -#define SQLITE_IOERR_WRITE -#define SQLITE_IOERR_FSYNC -#define SQLITE_IOERR_DIR_FSYNC -#define SQLITE_IOERR_TRUNCATE -#define SQLITE_IOERR_FSTAT -#define SQLITE_IOERR_UNLOCK -#define SQLITE_IOERR_RDLOCK -#define SQLITE_IOERR_BLOCKED -... -} { -In its default configuration, SQLite API routines return one of 26 integer -result codes described at result-codes. However, experience has shown that -many of these result codes are too course-grained. They do not provide as -much information about problems as users might like. In an effort to -address this, newer versions of SQLite (version 3.3.8 and later) include -support for additional result codes that provide more detailed information -about errors. The extended result codes are enabled (or disabled) for -each database -connection using the sqlite3_extended_result_codes() API. - -Some of the available extended result codes are listed above. -We expect the number of extended result codes will be expand -over time. Software that uses extended result codes should expect -to see new result codes in future releases of SQLite. - -The symbolic name for an extended result code always contains a related -primary result code as a prefix. Primary result codes contain a single -"_" character. Extended result codes contain two or more "_" characters. -The numeric value of an extended result code can be converted to its -corresponding primary result code by masking off the lower 8 bytes. - -A complete list of available extended result codes and -details about the meaning of the various extended result codes can be -found by consulting the C code, especially the sqlite3.h header -file and its antecedent sqlite.h.in. Additional information -is also available at the SQLite wiki: -http://www.sqlite.org/cvstrac/wiki?p=ExtendedResultCodes -} - - -api {result-codes} { -#define SQLITE_OK 0 /* Successful result */ -#define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* An internal logic error in SQLite */ -#define SQLITE_PERM 3 /* Access permission denied */ -#define SQLITE_ABORT 4 /* Callback routine requested an abort */ -#define SQLITE_BUSY 5 /* The database file is locked */ -#define SQLITE_LOCKED 6 /* A table in the database is locked */ -#define SQLITE_NOMEM 7 /* A malloc() failed */ -#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ -#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite_interrupt() */ -#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ -#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ -#define SQLITE_NOTFOUND 12 /* (Internal Only) Table or record not found */ -#define SQLITE_FULL 13 /* Insertion failed because database is full */ -#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ -#define SQLITE_EMPTY 16 /* (Internal Only) Database table is empty */ -#define SQLITE_SCHEMA 17 /* The database schema changed */ -#define SQLITE_TOOBIG 18 /* Too much data for one row of a table */ -#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ -#define SQLITE_MISMATCH 20 /* Data type mismatch */ -#define SQLITE_MISUSE 21 /* Library used incorrectly */ -#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ -#define SQLITE_AUTH 23 /* Authorization denied */ -#define SQLITE_ROW 100 /* sqlite_step() has another row ready */ -#define SQLITE_DONE 101 /* sqlite_step() has finished executing */ -} { -Many SQLite functions return an integer result code from the set shown -above in order to indicates success or failure. - -The result codes above are the only ones returned by SQLite in its -default configuration. However, the sqlite3_extended_result_codes() -API can be used to set a database connectoin to return more detailed -result codes. See the documentation on sqlite3_extended_result_codes() -or extended-result-codes for additional information. -} - -api {} { - int sqlite3_extended_result_codes(sqlite3*, int onoff); -} { -This routine enables or disabled extended-result-codes feature. -By default, SQLite API routines return one of only 26 integer -result codes described at result-codes. When extended result codes -are enabled by this routine, the repetoire of result codes can be -much larger and can (hopefully) provide more detailed information -about the cause of an error. - -The second argument is a boolean value that turns extended result -codes on and off. Extended result codes are off by default for -backwards compatibility with older versions of SQLite. -} - -api {} { - const char *sqlite3_libversion(void); -} { - Return a pointer to a string which contains the version number of - the library. The same string is available in the global - variable named "sqlite3_version". This interface is provided since - windows is unable to access global variables in DLLs. -} - -api {} { - void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); -} { - Aggregate functions use this routine to allocate - a structure for storing their state. The first time this routine - is called for a particular aggregate, a new structure of size nBytes - is allocated, zeroed, and returned. On subsequent calls (for the - same aggregate instance) the same buffer is returned. The implementation - of the aggregate can use the returned buffer to accumulate data. - - The buffer is freed automatically by SQLite when the query that - invoked the aggregate function terminates. -} - -api {} { - int sqlite3_aggregate_count(sqlite3_context*); -} { - This function is deprecated. It continues to exist so as not to - break any legacy code that might happen to use it. But it should not - be used in any new code. - - In order to encourage people to not use this function, we are not going - to tell you what it does. -} - -api {} { - int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); - int sqlite3_bind_double(sqlite3_stmt*, int, double); - int sqlite3_bind_int(sqlite3_stmt*, int, int); - int sqlite3_bind_int64(sqlite3_stmt*, int, long long int); - int sqlite3_bind_null(sqlite3_stmt*, int); - int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); - int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int n, void(*)(void*) int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); - int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); -); - #define SQLITE_STATIC ((void(*)(void *))0) - #define SQLITE_TRANSIENT ((void(*)(void *))-1) -} { - In the SQL strings input to sqlite3_prepare_v2() and sqlite3_prepare16_v2(), - one or more literals can be replace by a parameter "?" or "?NNN" - or ":AAA" or "@AAA" or "\$VVV" where NNN is an integer literal, - AAA is an alphanumeric identifier and VVV is a variable name according - to the syntax rules of the TCL programming language. - The values of these parameters (also called "host parameter names") - can be set using the sqlite3_bind_*() routines. - - The first argument to the sqlite3_bind_*() routines always is a pointer - to the sqlite3_stmt structure returned from sqlite3_prepare_v2(). The second - argument is the index of the parameter to be set. The first parameter has - an index of 1. When the same named parameter is used more than once, second - and subsequent - occurrences have the same index as the first occurrence. The index for - named parameters can be looked up using the - sqlite3_bind_parameter_name() API if desired. The index for "?NNN" - parametes is the value of NNN. - The NNN value must be between 1 and the compile-time - parameter SQLITE_MAX_VARIABLE_NUMBER (default value: 999). - See limits.html for additional information. - - The third argument is the value to bind to the parameter. - - In those - routines that have a fourth argument, its value is the number of bytes - in the parameter. To be clear: the value is the number of bytes in the - string, not the number of characters. The number - of bytes does not include the zero-terminator at the end of strings. - If the fourth parameter is negative, the length of the string is - number of bytes up to the first zero terminator. - - The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and - sqlite3_bind_text16() is a destructor used to dispose of the BLOB or - text after SQLite has finished with it. If the fifth argument is the - special value SQLITE_STATIC, then the library assumes that the information - is in static, unmanaged space and does not need to be freed. If the - fifth argument has the value SQLITE_TRANSIENT, then SQLite makes its - own private copy of the data immediately, before the sqlite3_bind_*() - routine returns. - - The sqlite3_bind_zeroblob() routine binds a BLOB of length n that - is filled with zeros. A zeroblob uses a fixed amount of memory - (just an integer to hold it size) while it is being processed. - Zeroblobs are intended to serve as place-holders for BLOBs whose - content is later written using increment BLOB I/O routines. See - sqlite3_blob_open() and sqlite3_blob_write() for additional - information. - - The sqlite3_bind_*() routines must be called after - sqlite3_prepare_v2() or sqlite3_reset() and before sqlite3_step(). - Bindings are not cleared by the sqlite3_reset() routine. - Unbound parameters are interpreted as NULL. - - These routines return SQLITE_OK on success or an error code if - anything goes wrong. SQLITE_RANGE is returned if the parameter - index is out of range. SQLITE_NOMEM is returned if malloc fails. - SQLITE_MISUSE is returned if these routines are called on a virtual - machine that is the wrong state or which has already been finalized. -} - -api {} { - int sqlite3_bind_parameter_count(sqlite3_stmt*); -} { - Return the largest host parameter index in the precompiled statement given - as the argument. When the host parameters are of the forms like ":AAA" - or "?", then they are assigned sequential increasing numbers beginning - with one, so the value returned is the number of parameters. However - if the same host parameter name is used multiple times, each occurrance - is given the same number, so the value returned in that case is the number - of unique host parameter names. If host parameters of the form "?NNN" - are used (where NNN is an integer) then there might be gaps in the - numbering and the value returned by this interface is the index of the - host parameter with the largest index value. -} - -api {} { - const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int n); -} { - Return the name of the n-th parameter in the precompiled statement. - Parameters of the form ":AAA" or "@AAA" or "\$VVV" have a name which is the - string ":AAA" or "@AAA" or "\$VVV". - In other words, the initial ":" or "$" or "@" - is included as part of the name. - Parameters of the form "?" or "?NNN" have no name. - - The first bound parameter has an index of 1, not 0. - - If the value n is out of range or if the n-th parameter is nameless, - then NULL is returned. The returned string is always in the - UTF-8 encoding even if the named parameter was originally specified - as UTF-16 in sqlite3_prepare16_v2(). -} - -api {} { - int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); -} { - Return the index of the parameter with the given name. - The name must match exactly. - If there is no parameter with the given name, return 0. - The string zName is always in the UTF-8 encoding. -} - -api {} { - int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); -} { - This routine identifies a callback function that might be invoked - whenever an attempt is made to open a database table - that another thread or process has locked. - If the busy callback is NULL, then SQLITE_BUSY - (or sometimes SQLITE_IOERR_BLOCKED) - is returned immediately upon encountering the lock. - If the busy callback is not NULL, then the - callback will be invoked with two arguments. The - first argument to the handler is a copy of the void* pointer which - is the third argument to this routine. The second argument to - the handler is the number of times that the busy handler has - been invoked for this locking event. If the - busy callback returns 0, then no additional attempts are made to - access the database and SQLITE_BUSY or SQLITE_IOERR_BLOCKED is returned. - If the callback returns non-zero, then another attempt is made to open the - database for reading and the cycle repeats. - - The presence of a busy handler does not guarantee that - it will be invoked when there is lock contention. - If SQLite determines that invoking the busy handler could result in - a deadlock, it will return SQLITE_BUSY instead. - Consider a scenario where one process is holding a read lock that - it is trying to promote to a reserved lock and - a second process is holding a reserved lock that it is trying - to promote to an exclusive lock. The first process cannot proceed - because it is blocked by the second and the second process cannot - proceed because it is blocked by the first. If both processes - invoke the busy handlers, neither will make any progress. Therefore, - SQLite returns SQLITE_BUSY for the first process, hoping that this - will induce the first process to release its read lock and allow - the second process to proceed. - - The default busy callback is NULL. - - The SQLITE_BUSY error is converted to SQLITE_IOERR_BLOCKED when - SQLite is in the middle of a large transaction where all the - changes will not fit into the in-memory cache. SQLite will - already hold a RESERVED lock on the database file, but it needs - to promote this lock to EXCLUSIVE so that it can spill cache - pages into the database file without harm to concurrent - readers. If it is unable to promote the lock, then the in-memory - cache will be left in an inconsistent state and so the error - code is promoted from the relatively benign SQLITE_BUSY to - the more severe SQLITE_IOERR_BLOCKED. This error code promotion - forces an automatic rollback of the changes. See the - - CorruptionFollowingBusyError wiki page for a discussion of why - this is important. - - Sqlite is re-entrant, so the busy handler may start a new query. - (It is not clear why anyone would every want to do this, but it - is allowed, in theory.) But the busy handler may not close the - database. Closing the database from a busy handler will delete - data structures out from under the executing query and will - probably result in a coredump. - - There can only be a single busy handler defined for each database - connection. Setting a new busy handler clears any previous one. - Note that calling sqlite3_busy_timeout() will also set or clear - the busy handler. -} - -api {} { - int sqlite3_busy_timeout(sqlite3*, int ms); -} { - This routine sets a busy handler that sleeps for a while when a - table is locked. The handler will sleep multiple times until - at least "ms" milliseconds of sleeping have been done. After - "ms" milliseconds of sleeping, the handler returns 0 which - causes sqlite3_step() to return SQLITE_BUSY or SQLITE_IOERR_BLOCKED. - - Calling this routine with an argument less than or equal to zero - turns off all busy handlers. - - There can only be a single busy handler for a particular database - connection. If another busy handler was defined - (using sqlite3_busy_handler()) prior to calling - this routine, that other busy handler is cleared. -} - -api {} { - int sqlite3_changes(sqlite3*); -} { - This function returns the number of database rows that were changed - (or inserted or deleted) by the most recently completed - INSERT, UPDATE, or DELETE - statement. Only changes that are directly specified by the INSERT, - UPDATE, or DELETE statement are counted. Auxiliary changes caused by - triggers are not counted. Use the sqlite3_total_changes() function - to find the total number of changes including changes caused by triggers. - - Within the body of a trigger, the sqlite3_changes() function does work - to report the number of rows that were changed for the most recently - completed INSERT, UPDATE, or DELETE statement within the trigger body. - - SQLite implements the command "DELETE FROM table" without a WHERE clause - by dropping and recreating the table. (This is much faster than going - through and deleting individual elements from the table.) Because of - this optimization, the change count for "DELETE FROM table" will be - zero regardless of the number of elements that were originally in the - table. To get an accurate count of the number of rows deleted, use - "DELETE FROM table WHERE 1" instead. -} - -api {} { - int sqlite3_total_changes(sqlite3*); -} { - This function returns the total number of database rows that have - be modified, inserted, or deleted since the database connection was - created using sqlite3_open(). All changes are counted, including - changes by triggers and changes to TEMP and auxiliary databases. - Except, changes to the SQLITE_MASTER table (caused by statements - such as CREATE TABLE) are not counted. Nor are changes counted when - an entire table is deleted using DROP TABLE. - - See also the sqlite3_changes() API. - - SQLite implements the command "DELETE FROM table" without a WHERE clause - by dropping and recreating the table. (This is much faster than going - through and deleting individual elements form the table.) Because of - this optimization, the change count for "DELETE FROM table" will be - zero regardless of the number of elements that were originally in the - table. To get an accurate count of the number of rows deleted, use - "DELETE FROM table WHERE 1" instead. -} - -api {} { - int sqlite3_close(sqlite3*); -} { - Call this function with a pointer to a structure that was previously - returned from sqlite3_open() or sqlite3_open16() - and the corresponding database will by closed. - - SQLITE_OK is returned if the close is successful. If there are - prepared statements that have not been finalized, then SQLITE_BUSY - is returned. SQLITE_ERROR might be returned if the argument is not - a valid connection pointer returned by sqlite3_open() or if the connection - pointer has been closed previously. -} - -api {} { -const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); -int sqlite3_column_bytes(sqlite3_stmt*, int iCol); -int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); -double sqlite3_column_double(sqlite3_stmt*, int iCol); -int sqlite3_column_int(sqlite3_stmt*, int iCol); -long long int sqlite3_column_int64(sqlite3_stmt*, int iCol); -const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); -const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); -int sqlite3_column_type(sqlite3_stmt*, int iCol); -#define SQLITE_INTEGER 1 -#define SQLITE_FLOAT 2 -#define SQLITE_TEXT 3 -#define SQLITE_BLOB 4 -#define SQLITE_NULL 5 -} { - These routines return information about the information - in a single column of the current result row of a query. In every - case the first argument is a pointer to the SQL statement that is being - executed (the sqlite_stmt* that was returned from sqlite3_prepare_v2()) and - the second argument is the index of the column for which information - should be returned. iCol is zero-indexed. The left-most column has an - index of 0. - - If the SQL statement is not currently point to a valid row, or if the - the column index is out of range, the result is undefined. - - The sqlite3_column_type() routine returns the initial data type - of the result column. The returned value is one of SQLITE_INTEGER, - SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL. The value - returned by sqlite3_column_type() is only meaningful if no type - conversions have occurred as described below. After a type conversion, - the value returned by sqlite3_column_type() is undefined. Future - versions of SQLite may change the behavior of sqlite3_column_type() - following a type conversion. - - If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() - routine returns the number of bytes in that BLOB or string. - If the result is a UTF-16 string, then sqlite3_column_bytes() converts - the string to UTF-8 and then returns the number of bytes. - If the result is a numeric value then sqlite3_column_bytes() uses - sqlite3_snprintf() to convert that value to a UTF-8 string and returns - the number of bytes in that string. - The value returned does - not include the \\000 terminator at the end of the string. - - The sqlite3_column_bytes16() routine is similar to sqlite3_column_bytes() - but leaves the result in UTF-16 instead of UTF-8. - The \\u0000 terminator is not included in this count. - - These routines attempt to convert the value where appropriate. For - example, if the internal representation is FLOAT and a text result - is requested, sqlite3_snprintf() is used internally to do the conversion - automatically. The following table details the conversions that - are applied: - -
    - - - - - - - - - - - - - - - - - - -
    Internal TypeRequested TypeConversion
    NULL INTEGERResult is 0
    NULL FLOAT Result is 0.0
    NULL TEXT Result is NULL pointer
    NULL BLOB Result is NULL pointer
    INTEGER FLOAT Convert from integer to float
    INTEGER TEXT ASCII rendering of the integer
    INTEGER BLOB Same as for INTEGER->TEXT
    FLOAT INTEGERConvert from float to integer
    FLOAT TEXT ASCII rendering of the float
    FLOAT BLOB Same as FLOAT->TEXT
    TEXT INTEGERUse atoi()
    TEXT FLOAT Use atof()
    TEXT BLOB No change
    BLOB INTEGERConvert to TEXT then use atoi()
    BLOB FLOAT Convert to TEXT then use atof()
    BLOB TEXT Add a \\000 terminator if needed
    -
    - - Note that when type conversions occur, pointers returned by prior - calls to sqlite3_column_blob(), sqlite3_column_text(), and/or - sqlite3_column_text16() may be invalidated. - Type conversions and pointer invalidations might occur - in the following cases: - -
      -
    • - The initial content is a BLOB and sqlite3_column_text() - or sqlite3_column_text16() - is called. A zero-terminator might need to be added to the string. -

    • -
    • - The initial content is UTF-8 text and sqlite3_column_bytes16() or - sqlite3_column_text16() is called. The content must be converted to UTF-16. -

    • -
    • - The initial content is UTF-16 text and sqlite3_column_bytes() or - sqlite3_column_text() is called. The content must be converted to UTF-8. -

    • -
    - - Conversions between UTF-16be and UTF-16le - are always done in place and do - not invalidate a prior pointer, though of course the content of the buffer - that the prior pointer points to will have been modified. Other kinds - of conversion are done in place when it is possible, but sometime it is - not possible and in those cases prior pointers are invalidated. - - The safest and easiest to remember policy is to invoke these routines - in one of the following ways: - -
      -
    • sqlite3_column_text() followed by sqlite3_column_bytes()
    • -
    • sqlite3_column_blob() followed by sqlite3_column_bytes()
    • -
    • sqlite3_column_text16() followed by sqlite3_column_bytes16()
    • -
    - - In other words, you should call sqlite3_column_text(), sqlite3_column_blob(), - or sqlite3_column_text16() first to force the result into the desired - format, then invoke sqlite3_column_bytes() or sqlite3_column_bytes16() to - find the size of the result. Do not mix call to sqlite3_column_text() or - sqlite3_column_blob() with calls to sqlite3_column_bytes16(). And do not - mix calls to sqlite3_column_text16() with calls to sqlite3_column_bytes(). -} - -api {} { -int sqlite3_column_count(sqlite3_stmt *pStmt); -} { - Return the number of columns in the result set returned by the prepared - SQL statement. This routine returns 0 if pStmt is an SQL statement - that does not return data (for example an UPDATE). - - See also sqlite3_data_count(). -} - -api {} { -const char *sqlite3_column_decltype(sqlite3_stmt *, int i); -const void *sqlite3_column_decltype16(sqlite3_stmt*,int); -} { - The first argument is a prepared SQL statement. If this statement - is a SELECT statement, the Nth column of the returned result set - of the SELECT is a table column then the declared type of the table - column is returned. If the Nth column of the result set is not a table - column, then a NULL pointer is returned. The returned string is - UTF-8 encoded for sqlite3_column_decltype() and UTF-16 encoded - for sqlite3_column_decltype16(). For example, in the database schema: - -
    - CREATE TABLE t1(c1 INTEGER);
    - 
    - - And the following statement compiled: - -
    - SELECT c1 + 1, c1 FROM t1;
    - 
    - - Then this routine would return the string "INTEGER" for the second - result column (i==1), and a NULL pointer for the first result column - (i==0). - - If the following statements were compiled then this routine would - return "INTEGER" for the first (only) result column. - -
    - SELECT (SELECT c1) FROM t1;
    - SELECT (SELECT c1 FROM t1);
    - SELECT c1 FROM (SELECT c1 FROM t1);
    - SELECT * FROM (SELECT c1 FROM t1);
    - SELECT * FROM (SELECT * FROM t1);
    - 
    -} - -api {} { - int sqlite3_table_column_metadata( - sqlite3 *db, /* Connection handle */ - const char *zDbName, /* Database name or NULL */ - const char *zTableName, /* Table name */ - const char *zColumnName, /* Column name */ - char const **pzDataType, /* OUTPUT: Declared data type */ - char const **pzCollSeq, /* OUTPUT: Collation sequence name */ - int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ - int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if colums is auto-increment */ - ); -} { - This routine is used to obtain meta information about a specific column of a - specific database table accessible using the connection handle passed as the - first function argument. - - The column is identified by the second, third and fourth parameters to - this function. The second parameter is either the name of the database - (i.e. "main", "temp" or an attached database) containing the specified - table or NULL. If it is NULL, then all attached databases are searched - for the table using the same algorithm as the database engine uses to - resolve unqualified table references. - - The third and fourth parameters to this function are the table and column - name of the desired column, respectively. Neither of these parameters - may be NULL. - - Meta information is returned by writing to the memory locations passed as - the 5th and subsequent parameters to this function. Any of these - arguments may be NULL, in which case the corresponding element of meta - information is ommitted. - -
    - Parameter     Output Type      Description
    - -----------------------------------
    -   5th         const char*      Declared data type 
    -   6th         const char*      Name of the columns default collation sequence 
    -   7th         int              True if the column has a NOT NULL constraint
    -   8th         int              True if the column is part of the PRIMARY KEY
    -   9th         int              True if the column is AUTOINCREMENT
    -
    - - The memory pointed to by the character pointers returned for the - declaration type and collation sequence is valid only until the next - call to any sqlite API function. - - This function may load one or more schemas from database files. If an - error occurs during this process, or if the requested table or column - cannot be found, an SQLITE error code is returned and an error message - left in the database handle (to be retrieved using sqlite3_errmsg()). - Specifying an SQL view instead of a table as the third argument is also - considered an error. - - If the specified column is "rowid", "oid" or "_rowid_" and an - INTEGER PRIMARY KEY column has been explicitly declared, then the output - parameters are set for the explicitly declared column. If there is no - explicitly declared IPK column, then the data-type is "INTEGER", the - collation sequence "BINARY" and the primary-key flag is set. Both - the not-null and auto-increment flags are clear. - - This API is only available if the library was compiled with the - SQLITE_ENABLE_COLUMN_METADATA preprocessor symbol defined. -} - -api {} { -const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N); -const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N); -} { -If the Nth column returned by statement pStmt is a column reference, -these functions may be used to access the name of the database (either -"main", "temp" or the name of an attached database) that contains -the column. If the Nth column is not a column reference, NULL is -returned. - -See the description of function sqlite3_column_decltype() for a -description of exactly which expressions are considered column references. - -Function sqlite3_column_database_name() returns a pointer to a UTF-8 -encoded string. sqlite3_column_database_name16() returns a pointer -to a UTF-16 encoded string. -} - -api {} { -const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N); -const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N); -} { -If the Nth column returned by statement pStmt is a column reference, -these functions may be used to access the schema name of the referenced -column in the database schema. If the Nth column is not a column -reference, NULL is returned. - -See the description of function sqlite3_column_decltype() for a -description of exactly which expressions are considered column references. - -Function sqlite3_column_origin_name() returns a pointer to a UTF-8 -encoded string. sqlite3_column_origin_name16() returns a pointer -to a UTF-16 encoded string. -} - -api {} { -const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N); -const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N); -} { -If the Nth column returned by statement pStmt is a column reference, -these functions may be used to access the name of the table that -contains the column. If the Nth column is not a column reference, -NULL is returned. - -See the description of function sqlite3_column_decltype() for a -description of exactly which expressions are considered column references. - -Function sqlite3_column_table_name() returns a pointer to a UTF-8 -encoded string. sqlite3_column_table_name16() returns a pointer -to a UTF-16 encoded string. -} - -api {} { -const char *sqlite3_column_name(sqlite3_stmt*,int); -const void *sqlite3_column_name16(sqlite3_stmt*,int); -} { - The first argument is a prepared SQL statement. This function returns - the column heading for the Nth column of that statement, where N is the - second function argument. The string returned is UTF-8 for - sqlite3_column_name() and UTF-16 for sqlite3_column_name16(). -} - -api {} { -void *sqlite3_commit_hook(sqlite3*, int(*xCallback)(void*), void *pArg); -} { - Experimental - - Register a callback function to be invoked whenever a new transaction - is committed. The pArg argument is passed through to the callback. - callback. If the callback function returns non-zero, then the commit - is converted into a rollback. - - If another function was previously registered, its pArg value is returned. - Otherwise NULL is returned. - - Registering a NULL function disables the callback. Only a single commit - hook callback can be registered at a time. -} - -api {} { -int sqlite3_complete(const char *sql); -int sqlite3_complete16(const void *sql); -} { - These functions return true if the given input string comprises - one or more complete SQL statements. - The argument must be a nul-terminated UTF-8 string for sqlite3_complete() - and a nul-terminated UTF-16 string for sqlite3_complete16(). - - These routines do not check to see if the SQL statement is well-formed. - They only check to see that the statement is terminated by a semicolon - that is not part of a string literal and is not inside - the body of a trigger. -} {} - -api {} { -int sqlite3_create_collation( - sqlite3*, - const char *zName, - int pref16, - void*, - int(*xCompare)(void*,int,const void*,int,const void*) -); -int sqlite3_create_collation16( - sqlite3*, - const char *zName, - int pref16, - void*, - int(*xCompare)(void*,int,const void*,int,const void*) -); -#define SQLITE_UTF8 1 -#define SQLITE_UTF16BE 2 -#define SQLITE_UTF16LE 3 -#define SQLITE_UTF16 4 -} { - These two functions are used to add new collation sequences to the - sqlite3 handle specified as the first argument. - - The name of the new collation sequence is specified as a UTF-8 string - for sqlite3_create_collation() and a UTF-16 string for - sqlite3_create_collation16(). In both cases the name is passed as the - second function argument. - - The third argument must be one of the constants SQLITE_UTF8, - SQLITE_UTF16LE or SQLITE_UTF16BE, indicating that the user-supplied - routine expects to be passed pointers to strings encoded using UTF-8, - UTF-16 little-endian or UTF-16 big-endian respectively. The - SQLITE_UTF16 constant indicates that text strings are expected in - UTF-16 in the native byte order of the host machine. - - A pointer to the user supplied routine must be passed as the fifth - argument. If it is NULL, this is the same as deleting the collation - sequence (so that SQLite cannot call it anymore). Each time the user - supplied function is invoked, it is passed a copy of the void* passed as - the fourth argument to sqlite3_create_collation() or - sqlite3_create_collation16() as its first argument. - - The remaining arguments to the user-supplied routine are two strings, - each represented by a [length, data] pair and encoded in the encoding - that was passed as the third argument when the collation sequence was - registered. The user routine should return negative, zero or positive if - the first string is less than, equal to, or greater than the second - string. i.e. (STRING1 - STRING2). -} - -api {} { -int sqlite3_collation_needed( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const char*) -); -int sqlite3_collation_needed16( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const void*) -); -} { - To avoid having to register all collation sequences before a database - can be used, a single callback function may be registered with the - database handle to be called whenever an undefined collation sequence is - required. - - If the function is registered using the sqlite3_collation_needed() API, - then it is passed the names of undefined collation sequences as strings - encoded in UTF-8. If sqlite3_collation_needed16() is used, the names - are passed as UTF-16 in machine native byte order. A call to either - function replaces any existing callback. - - When the user-function is invoked, the first argument passed is a copy - of the second argument to sqlite3_collation_needed() or - sqlite3_collation_needed16(). The second argument is the database - handle. The third argument is one of SQLITE_UTF8, SQLITE_UTF16BE or - SQLITE_UTF16LE, indicating the most desirable form of the collation - sequence function required. The fourth argument is the name of the - required collation sequence. - - The collation sequence is returned to SQLite by a collation-needed - callback using the sqlite3_create_collation() or - sqlite3_create_collation16() APIs, described above. -} - -api {} { -int sqlite3_create_function( - sqlite3 *, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pUserData, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -int sqlite3_create_function16( - sqlite3*, - const void *zFunctionName, - int nArg, - int eTextRep, - void *pUserData, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -#define SQLITE_UTF8 1 -#define SQLITE_UTF16 2 -#define SQLITE_UTF16BE 3 -#define SQLITE_UTF16LE 4 -#define SQLITE_ANY 5 -} { - These two functions are used to add SQL functions or aggregates - implemented in C. The - only difference between these two routines is that the second argument, the - name of the (scalar) function or aggregate, is encoded in UTF-8 for - sqlite3_create_function() and UTF-16 for sqlite3_create_function16(). - The length of the name is limited to 255 bytes, exclusive of the - zero-terminator. Note that the name length limit is in bytes, not - characters. Any attempt to create a function with a longer name - will result in an SQLITE_ERROR error. - - The first argument is the database handle that the new function or - aggregate is to be added to. If a single program uses more than one - database handle internally, then user functions or aggregates must - be added individually to each database handle with which they will be - used. - - The third argument is the number of arguments that the function or - aggregate takes. If this argument is -1 then the function or - aggregate may take any number of arguments. The maximum number - of arguments to a new SQL function is 127. A number larger than - 127 for the third argument results in an SQLITE_ERROR error. - - The fourth argument, eTextRep, specifies what type of text arguments - this function prefers to receive. Any function should be able to work - work with UTF-8, UTF-16le, or UTF-16be. But some implementations may be - more efficient with one representation than another. Users are allowed - to specify separate implementations for the same function which are called - depending on the text representation of the arguments. The the implementation - which provides the best match is used. If there is only a single - implementation which does not care what text representation is used, - then the fourth argument should be SQLITE_ANY. - - The fifth argument is an arbitrary pointer. The function implementations - can gain access to this pointer using the sqlite_user_data() API. - - The sixth, seventh and eighth argumens, xFunc, xStep and xFinal, are - pointers to user implemented C functions that implement the user - function or aggregate. A scalar function requires an implementation of - the xFunc callback only, NULL pointers should be passed as the xStep - and xFinal arguments. An aggregate function requires an implementation - of xStep and xFinal, and NULL should be passed for xFunc. To delete an - existing user function or aggregate, pass NULL for all three function - callbacks. Specifying an inconstant set of callback values, such as an - xFunc and an xFinal, or an xStep but no xFinal, results in an SQLITE_ERROR - return. -} - -api {} { -int sqlite3_data_count(sqlite3_stmt *pStmt); -} { - Return the number of values in the current row of the result set. - - After a call to sqlite3_step() that returns SQLITE_ROW, this routine - will return the same value as the sqlite3_column_count() function. - After sqlite3_step() has returned an SQLITE_DONE, SQLITE_BUSY or - error code, or before sqlite3_step() has been called on a - prepared SQL statement, this routine returns zero. -} - -api {} { -int sqlite3_errcode(sqlite3 *db); -} { - Return the error code for the most recent failed sqlite3_* API call associated - with sqlite3 handle 'db'. If a prior API call failed but the most recent - API call succeeded, the return value from this routine is undefined. - - Calls to many sqlite3_* functions set the error code and string returned - by sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16() - (overwriting the previous values). Note that calls to sqlite3_errcode(), - sqlite3_errmsg() and sqlite3_errmsg16() themselves do not affect the - results of future invocations. Calls to API routines that do not return - an error code (examples: sqlite3_data_count() or sqlite3_mprintf()) do - not change the error code returned by this routine. - - Assuming no other intervening sqlite3_* API calls are made, the error - code returned by this function is associated with the same error as - the strings returned by sqlite3_errmsg() and sqlite3_errmsg16(). -} {} - -api {} { -const char *sqlite3_errmsg(sqlite3*); -const void *sqlite3_errmsg16(sqlite3*); -} { - Return a pointer to a UTF-8 encoded string (sqlite3_errmsg) - or a UTF-16 encoded string (sqlite3_errmsg16) describing in English the - error condition for the most recent sqlite3_* API call. The returned - string is always terminated by an 0x00 byte. - - The string "not an error" is returned when the most recent API call was - successful. -} - -api {} { -int sqlite3_exec( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be executed */ - sqlite_callback, /* Callback function */ - void *, /* 1st argument to callback function */ - char **errmsg /* Error msg written here */ -); -} { - A function to executes one or more statements of SQL. - - If one or more of the SQL statements are queries, then - the callback function specified by the 3rd argument is - invoked once for each row of the query result. This callback - should normally return 0. If the callback returns a non-zero - value then the query is aborted, all subsequent SQL statements - are skipped and the sqlite3_exec() function returns the SQLITE_ABORT. - - The 1st argument is an arbitrary pointer that is passed - to the callback function as its first argument. - - The 2nd argument to the callback function is the number of - columns in the query result. The 3rd argument to the callback - is an array of strings holding the values for each column. - The 4th argument to the callback is an array of strings holding - the names of each column. - - The callback function may be NULL, even for queries. A NULL - callback is not an error. It just means that no callback - will be invoked. - - If an error occurs while parsing or evaluating the SQL (but - not while executing the callback) then an appropriate error - message is written into memory obtained from malloc() and - *errmsg is made to point to that message. The calling function - is responsible for freeing the memory that holds the error - message. Use sqlite3_free() for this. If errmsg==NULL, - then no error message is ever written. - - The return value is is SQLITE_OK if there are no errors and - some other return code if there is an error. The particular - return value depends on the type of error. - - If the query could not be executed because a database file is - locked or busy, then this function returns SQLITE_BUSY. (This - behavior can be modified somewhat using the sqlite3_busy_handler() - and sqlite3_busy_timeout() functions.) -} {} - -api {} { -int sqlite3_finalize(sqlite3_stmt *pStmt); -} { - The sqlite3_finalize() function is called to delete a prepared - SQL statement obtained by a previous call to sqlite3_prepare(), - sqlite3_prepare_v2(), sqlite3_prepare16(), or sqlite3_prepare16_v2(). - If the statement was executed successfully, or - not executed at all, then SQLITE_OK is returned. If execution of the - statement failed then an error code is returned. - - After sqlite_finalize() has been called, the statement handle is - invalidated. Passing it to any other SQLite function may cause a - crash. - - All prepared statements must finalized before sqlite3_close() is - called or else the close will fail with a return code of SQLITE_BUSY. - - This routine can be called at any point during the execution of the - virtual machine. If the virtual machine has not completed execution - when this routine is called, that is like encountering an error or - an interrupt. (See sqlite3_interrupt().) Incomplete updates may be - rolled back and transactions canceled, depending on the circumstances, - and the result code returned will be SQLITE_ABORT. -} - -api {} { -void *sqlite3_malloc(int); -void *sqlite3_realloc(void*, int); -void sqlite3_free(void*); -} { - These routines provide access to the memory allocator used by SQLite. - Depending on how SQLite has been compiled and the OS-layer backend, - the memory allocator used by SQLite might be the standard system - malloc()/realloc()/free(), or it might be something different. With - certain compile-time flags, SQLite will add wrapper logic around the - memory allocator to add memory leak and buffer overrun detection. The - OS layer might substitute a completely different memory allocator. - Use these APIs to be sure you are always using the correct memory - allocator. - - The sqlite3_free() API, not the standard free() from the system library, - should always be used to free the memory buffer returned by - sqlite3_mprintf() or sqlite3_vmprintf() and to free the error message - string returned by sqlite3_exec(). Using free() instead of sqlite3_free() - might accidentally work on some systems and build configurations but - will fail on others. - - Compatibility Note: Prior to version 3.4.0, the sqlite3_free API - was prototyped to take a char* parameter rather than - void*. Like this: -
    -void sqlite3_free(char*);
    -
    - The change to using void* might cause warnings when - compiling older code against - newer libraries, but everything should still work correctly. -} - -api {} { -int sqlite3_get_table( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be executed */ - char ***resultp, /* Result written to a char *[] that this points to */ - int *nrow, /* Number of result rows written here */ - int *ncolumn, /* Number of result columns written here */ - char **errmsg /* Error msg written here */ -); -void sqlite3_free_table(char **result); -} { - This next routine is really just a wrapper around sqlite3_exec(). - Instead of invoking a user-supplied callback for each row of the - result, this routine remembers each row of the result in memory - obtained from malloc(), then returns all of the result after the - query has finished. - - As an example, suppose the query result where this table: - -
    -        Name        | Age
    -        -----------------------
    -        Alice       | 43
    -        Bob         | 28
    -        Cindy       | 21
    - 
    - - If the 3rd argument were &azResult then after the function returns - azResult will contain the following data: - -
    -        azResult[0] = "Name";
    -        azResult[1] = "Age";
    -        azResult[2] = "Alice";
    -        azResult[3] = "43";
    -        azResult[4] = "Bob";
    -        azResult[5] = "28";
    -        azResult[6] = "Cindy";
    -        azResult[7] = "21";
    - 
    - - Notice that there is an extra row of data containing the column - headers. But the *nrow return value is still 3. *ncolumn is - set to 2. In general, the number of values inserted into azResult - will be ((*nrow) + 1)*(*ncolumn). - - After the calling function has finished using the result, it should - pass the result data pointer to sqlite3_free_table() in order to - release the memory that was malloc-ed. Because of the way the - malloc() happens, the calling function must not try to call - malloc() directly. Only sqlite3_free_table() is able to release - the memory properly and safely. - - The return value of this routine is the same as from sqlite3_exec(). -} - -api {sqlite3_interrupt} { - void sqlite3_interrupt(sqlite3*); -} { - This function causes any pending database operation to abort and - return at its earliest opportunity. This routine is typically - called in response to a user action such as pressing "Cancel" - or Ctrl-C where the user wants a long query operation to halt - immediately. -} {} - -api {} { -long long int sqlite3_last_insert_rowid(sqlite3*); -} { - Each entry in an SQLite table has a unique integer key called the "rowid". - The rowid is always available as an undeclared column - named ROWID, OID, or _ROWID_. - If the table has a column of type INTEGER PRIMARY KEY then that column - is another an alias for the rowid. - - This routine - returns the rowid of the most recent INSERT into the database - from the database connection given in the first argument. If - no inserts have ever occurred on this database connection, zero - is returned. - - If an INSERT occurs within a trigger, then the rowid of the - inserted row is returned by this routine as long as the trigger - is running. But once the trigger terminates, the value returned - by this routine reverts to the last value inserted before the - trigger fired. -} {} - -api {} { -char *sqlite3_mprintf(const char*,...); -char *sqlite3_vmprintf(const char*, va_list); -} { - These routines are variants of the "sprintf()" from the - standard C library. The resulting string is written into memory - obtained from malloc() so that there is never a possibility of buffer - overflow. These routines also implement some additional formatting - options that are useful for constructing SQL statements. - - The strings returned by these routines should be freed by calling - sqlite3_free(). - - All of the usual printf formatting options apply. In addition, there - is a "%q" option. %q works like %s in that it substitutes a null-terminated - string from the argument list. But %q also doubles every '\\'' character. - %q is designed for use inside a string literal. By doubling each '\\'' - character it escapes that character and allows it to be inserted into - the string. - - For example, so some string variable contains text as follows: - -
    -  char *zText = "It's a happy day!";
    - 
    - - One can use this text in an SQL statement as follows: - -
    -  sqlite3_exec_printf(db, "INSERT INTO table VALUES('%q')",
    -       callback1, 0, 0, zText);
    -  
    - - Because the %q format string is used, the '\\'' character in zText - is escaped and the SQL generated is as follows: - -
    -  INSERT INTO table1 VALUES('It''s a happy day!')
    - 
    - - This is correct. Had we used %s instead of %q, the generated SQL - would have looked like this: - -
    -  INSERT INTO table1 VALUES('It's a happy day!');
    -  
    - - This second example is an SQL syntax error. As a general rule you - should always use %q instead of %s when inserting text into a string - literal. -} {} - -api {} { -char *sqlite3_snprintf(int bufSize, char *buf, const char *zFormat, ...); -} { - This routine works like "sprintf()", writing a formatted string into - the buf[]. However, no more than bufSize characters will be written - into buf[]. This routine returns a pointer to buf[]. If bufSize is - greater than zero, then buf[] is guaranteed to be zero-terminated. - - This routine uses the same extended formatting options as - sqlite3_mprintf() and sqlite3_vmprintf(). - - Note these differences with the snprintf() function found in many - standard libraries: (1) sqlite3_snprintf() returns a pointer to the - buffer rather than the number of characters written. (It would, - arguably, be more useful to return the number of characters written, - but we discovered that after the interface had been published and - are unwilling to break backwards compatibility.) (2) The order - of the bufSize and buf parameter is reversed from snprintf(). - And (3) sqlite3_snprintf() always writes a zero-terminator if bufSize - is positive. - - Please do not use the return value of this routine. We may - decide to make the minor compatibility break and change this routine - to return the number of characters written rather than a pointer to - the buffer in a future minor version increment. -} - -api {} { -int sqlite3_open( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -int sqlite3_open16( - const void *filename, /* Database filename (UTF-16) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -} { - Open the sqlite database file "filename". The "filename" is UTF-8 - encoded for sqlite3_open() and UTF-16 encoded in the native byte order - for sqlite3_open16(). An sqlite3* handle is returned in *ppDb, even - if an error occurs. If the database is opened (or created) successfully, - then SQLITE_OK is returned. Otherwise an error code is returned. The - sqlite3_errmsg() or sqlite3_errmsg16() routines can be used to obtain - an English language description of the error. - - If the database file does not exist, then a new database will be created - as needed. - The encoding for the database will be UTF-8 if sqlite3_open() is called and - UTF-16 if sqlite3_open16 is used. - - Whether or not an error occurs when it is opened, resources associated - with the sqlite3* handle should be released by passing it to - sqlite3_close() when it is no longer required. - - The returned sqlite3* can only be used in the same thread in which it - was created. It is an error to call sqlite3_open() in one thread then - pass the resulting database handle off to another thread to use. This - restriction is due to goofy design decisions (bugs?) in the way some - threading implementations interact with file locks. - - Note to windows users: The encoding used for the filename argument - of sqlite3_open() must be UTF-8, not whatever codepage is currently - defined. Filenames containing international characters must be converted - to UTF-8 prior to passing them into sqlite3_open(). -} - -api {} { -int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); - -/* Legacy Interfaces */ -int sqlite3_prepare( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -int sqlite3_prepare16( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); -} { - To execute an SQL query, it must first be compiled into a byte-code - program using one of these routines. - - The first argument "db" is an SQLite database handle. The second - argument "zSql" is the statement to be compiled, encoded as either - UTF-8 or UTF-16. The sqlite3_prepare_v2() - interfaces uses UTF-8 and sqlite3_prepare16_v2() - use UTF-16. If the next argument, "nBytes", is less - than zero, then zSql is read up to the first nul terminator. If - "nBytes" is not less than zero, then it is the length of the string zSql - in bytes (not characters). - - *pzTail is made to point to the first byte past the end of the first - SQL statement in zSql. This routine only compiles the first statement - in zSql, so *pzTail is left pointing to what remains uncompiled. - - *ppStmt is left pointing to a compiled SQL statement that can be - executed using sqlite3_step(). Or if there is an error, *ppStmt may be - set to NULL. If the input text contained no SQL (if the input is and - empty string or a comment) then *ppStmt is set to NULL. The calling - procedure is responsible for deleting this compiled SQL statement - using sqlite3_finalize() after it has finished with it. - - On success, SQLITE_OK is returned. Otherwise an error code is returned. - - The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are - recommended for all new programs. The two older interfaces are retained - for backwards compatibility, but their use is discouraged. - In the "v2" interfaces, the prepared statement - that is returned (the sqlite3_stmt object) contains a copy of the original - SQL. This causes the sqlite3_step() interface to behave a differently in - two ways: - -
      -
    1. - If the database schema changes, instead of returning SQLITE_SCHEMA as it - always used to do, sqlite3_step() will automatically recompile the SQL - statement and try to run it again. If the schema has changed in a way - that makes the statement no longer valid, sqlite3_step() will still - return SQLITE_SCHEMA. But unlike the legacy behavior, SQLITE_SCHEMA is - now a fatal error. Calling sqlite3_prepare_v2() again will not make the - error go away. Note: use sqlite3_errmsg() to find the text of the parsing - error that results in an SQLITE_SCHEMA return. -
    2. - -
    3. - When an error occurs, - sqlite3_step() will return one of the detailed result-codes - like SQLITE_IOERR or SQLITE_FULL or SQLITE_SCHEMA directly. The - legacy behavior was that sqlite3_step() would only return a generic - SQLITE_ERROR code and you would have to make a second call to - sqlite3_reset() in order to find the underlying cause of the problem. - With the "v2" prepare interfaces, the underlying reason for the error is - returned directly. -
    4. -
    -} - -api {} { -void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); -} { - This routine configures a callback function - the progress callback - that - is invoked periodically during long running calls to sqlite3_exec(), - sqlite3_step() and sqlite3_get_table(). - An example use for this API is to keep - a GUI updated during a large query. - - The progress callback is invoked once for every N virtual machine opcodes, - where N is the second argument to this function. The progress callback - itself is identified by the third argument to this function. The fourth - argument to this function is a void pointer passed to the progress callback - function each time it is invoked. - - If a call to sqlite3_exec(), sqlite3_step() or sqlite3_get_table() results - in less than N opcodes being executed, then the progress callback is not - invoked. - - To remove the progress callback altogether, pass NULL as the third - argument to this function. - - If the progress callback returns a result other than 0, then the current - query is immediately terminated and any database changes rolled back. - The sqlite3_exec(), sqlite3_step(), or sqlite3_get_table() - call returns SQLITE_INTERRUPT. -} - -api {} { -int sqlite3_reset(sqlite3_stmt *pStmt); -} { - The sqlite3_reset() function is called to reset a prepared SQL - statement obtained by a previous call to - sqlite3_prepare_v2() or - sqlite3_prepare16_v2() back to it's initial state, ready to be re-executed. - Any SQL statement variables that had values bound to them using - the sqlite3_bind_*() API retain their values. -} - -api {} { -void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*)); -void sqlite3_result_double(sqlite3_context*, double); -void sqlite3_result_error(sqlite3_context*, const char*, int); -void sqlite3_result_error16(sqlite3_context*, const void*, int); -void sqlite3_result_int(sqlite3_context*, int); -void sqlite3_result_int64(sqlite3_context*, long long int); -void sqlite3_result_null(sqlite3_context*); -void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*)); -void sqlite3_result_text16(sqlite3_context*, const void*, int n, void(*)(void*)); -void sqlite3_result_text16be(sqlite3_context*, const void*, int n, void(*)(void*)); -void sqlite3_result_text16le(sqlite3_context*, const void*, int n, void(*)(void*)); -void sqlite3_result_value(sqlite3_context*, sqlite3_value*); -} { - User-defined functions invoke these routines in order to - set their return value. The sqlite3_result_value() routine is used - to return an exact copy of one of the arguments to the function. - - The operation of these routines is very similar to the operation of - sqlite3_bind_blob() and its cousins. Refer to the documentation there - for additional information. -} - -api {} { -int sqlite3_set_authorizer( - sqlite3*, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pUserData -); -#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ -#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ -#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ -#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ -#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ -#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ -#define SQLITE_DELETE 9 /* Table Name NULL */ -#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ -#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ -#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ -#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ -#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ -#define SQLITE_DROP_VIEW 17 /* View Name NULL */ -#define SQLITE_INSERT 18 /* Table Name NULL */ -#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ -#define SQLITE_READ 20 /* Table Name Column Name */ -#define SQLITE_SELECT 21 /* NULL NULL */ -#define SQLITE_TRANSACTION 22 /* NULL NULL */ -#define SQLITE_UPDATE 23 /* Table Name Column Name */ -#define SQLITE_ATTACH 24 /* Filename NULL */ -#define SQLITE_DETACH 25 /* Database Name NULL */ -#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ -#define SQLITE_REINDEX 27 /* Index Name NULL */ -#define SQLITE_ANALYZE 28 /* Table Name NULL */ -#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ -#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ -#define SQLITE_FUNCTION 31 /* Function Name NULL */ - -#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ -#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ -} { - This routine registers a callback with the SQLite library. The - callback is invoked by sqlite3_prepare_v2() to authorize various - operations against the database. The callback should - return SQLITE_OK if access is allowed, SQLITE_DENY if the entire - SQL statement should be aborted with an error and SQLITE_IGNORE - if the operation should be treated as a no-op. - - Each database connection have at most one authorizer registered - at a time one time. Each call - to sqlite3_set_authorizer() overrides the previous authorizer. - Setting the callback to NULL disables the authorizer. - - The second argument to the access authorization function will be one - of the defined constants shown. These values signify what kind of operation - is to be authorized. The 3rd and 4th arguments to the authorization - function will be arguments or NULL depending on which of the - codes is used as the second argument. For example, if the the - 2nd argument code is SQLITE_READ then the 3rd argument will be the name - of the table that is being read from and the 4th argument will be the - name of the column that is being read from. Or if the 2nd argument - is SQLITE_FUNCTION then the 3rd argument will be the name of the - function that is being invoked and the 4th argument will be NULL. - - The 5th argument is the name - of the database ("main", "temp", etc.) where applicable. The 6th argument - is the name of the inner-most trigger or view that is responsible for - the access attempt or NULL if this access attempt is directly from - input SQL code. - - The return value of the authorization callback function should be one of the - constants SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. A return of - SQLITE_OK means that the operation is permitted and that - sqlite3_prepare_v2() can proceed as normal. - A return of SQLITE_DENY means that the sqlite3_prepare_v2() - should fail with an error. A return of SQLITE_IGNORE causes the - sqlite3_prepare_v2() to continue as normal but the requested - operation is silently converted into a no-op. A return of SQLITE_IGNORE - in response to an SQLITE_READ or SQLITE_FUNCTION causes the column - being read or the function being invoked to return a NULL. - - The intent of this routine is to allow applications to safely execute - user-entered SQL. An appropriate callback can deny the user-entered - SQL access certain operations (ex: anything that changes the database) - or to deny access to certain tables or columns within the database. - - SQLite is not reentrant through the authorization callback function. - The authorization callback function should not attempt to invoke - any other SQLite APIs for the same database connection. If the - authorization callback function invokes some other SQLite API, an - SQLITE_MISUSE error or a segmentation fault may result. -} - -api {} { -int sqlite3_step(sqlite3_stmt*); -} { - After an SQL query has been prepared with a call to either - sqlite3_prepare_v2() or sqlite3_prepare16_v2() or to one of - the legacy interfaces sqlite3_prepare() or sqlite3_prepare16(), - then this function must be - called one or more times to execute the statement. - - The details of the behavior of this sqlite3_step() interface depend - on whether the statement was prepared using the newer "v2" interface - sqlite3_prepare_v2() and sqlite3_prepare16_v2() or the older legacy - interface sqlite3_prepare() and sqlite3_prepare16(). The use of the - new "v2" interface is recommended for new applications but the legacy - interface will continue to be supported. - - In the lagacy interface, the return value will be either SQLITE_BUSY, - SQLITE_DONE, SQLITE_ROW, SQLITE_ERROR, or SQLITE_MISUSE. With the "v2" - interface, any of the other SQLite result-codes might be returned as - well. - - SQLITE_BUSY means that the database engine attempted to open - a locked database and there is no busy callback registered. - Call sqlite3_step() again to retry the open. - - SQLITE_DONE means that the statement has finished executing - successfully. sqlite3_step() should not be called again on this virtual - machine without first calling sqlite3_reset() to reset the virtual - machine back to its initial state. - - If the SQL statement being executed returns any data, then - SQLITE_ROW is returned each time a new row of data is ready - for processing by the caller. The values may be accessed using - the sqlite3_column_int(), sqlite3_column_text(), and similar functions. - sqlite3_step() is called again to retrieve the next row of data. - - SQLITE_ERROR means that a run-time error (such as a constraint - violation) has occurred. sqlite3_step() should not be called again on - the VM. More information may be found by calling sqlite3_errmsg(). - A more specific error code (example: SQLITE_INTERRUPT, SQLITE_SCHEMA, - SQLITE_CORRUPT, and so forth) can be obtained by calling - sqlite3_reset() on the prepared statement. In the "v2" interface, - the more specific error code is returned directly by sqlite3_step(). - - SQLITE_MISUSE means that the this routine was called inappropriately. - Perhaps it was called on a virtual machine that had already been - finalized or on one that had previously returned SQLITE_ERROR or - SQLITE_DONE. Or it could be the case that a database connection - is being used by a different thread than the one it was created it. - - Goofy Interface Alert: - In the legacy interface, - the sqlite3_step() API always returns a generic error code, - SQLITE_ERROR, following any error other than SQLITE_BUSY and SQLITE_MISUSE. - You must call sqlite3_reset() (or sqlite3_finalize()) in order to find - one of the specific result-codes that better describes the error. - We admit that this is a goofy design. The problem has been fixed - with the "v2" interface. If you prepare all of your SQL statements - using either sqlite3_prepare_v2() or sqlite3_prepare16_v2() instead - of the legacy sqlite3_prepare() and sqlite3_prepare16(), then the - more specific result-codes are returned directly by sqlite3_step(). - The use of the "v2" interface is recommended. -} - -api {} { -void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -} { - Register a function that is called each time an SQL statement is evaluated. - The callback function is invoked on the first call to sqlite3_step() after - calls to sqlite3_prepare_v2() or sqlite3_reset(). - This function can be used (for example) to generate - a log file of all SQL executed against a database. This can be - useful when debugging an application that uses SQLite. -} - -api {} { -void *sqlite3_user_data(sqlite3_context*); -} { - The pUserData argument to the sqlite3_create_function() and - sqlite3_create_function16() routines used to register user functions - is available to the implementation of the function using this - call. -} - -api {} { -const void *sqlite3_value_blob(sqlite3_value*); -int sqlite3_value_bytes(sqlite3_value*); -int sqlite3_value_bytes16(sqlite3_value*); -double sqlite3_value_double(sqlite3_value*); -int sqlite3_value_int(sqlite3_value*); -long long int sqlite3_value_int64(sqlite3_value*); -const unsigned char *sqlite3_value_text(sqlite3_value*); -const void *sqlite3_value_text16(sqlite3_value*); -const void *sqlite3_value_text16be(sqlite3_value*); -const void *sqlite3_value_text16le(sqlite3_value*); -int sqlite3_value_type(sqlite3_value*); -} { - This group of routines returns information about arguments to - a user-defined function. Function implementations use these routines - to access their arguments. These routines are the same as the - sqlite3_column_... routines except that these routines take a single - sqlite3_value* pointer instead of an sqlite3_stmt* and an integer - column number. - - See the documentation under sqlite3_column_blob for additional - information. - - Please pay particular attention to the fact that the pointer that - is returned from sqlite3_value_blob(), sqlite3_value_text(), or - sqlite3_value_text16() can be invalidated by a subsequent call to - sqlite3_value_bytes(), sqlite3_value_bytes16(), sqlite_value_text(), - or sqlite3_value_text16(). -} - -api {} { - int sqlite3_sleep(int); -} { - Sleep for a little while. The second parameter is the number of - miliseconds to sleep for. - - If the operating system does not support sleep requests with - milisecond time resolution, then the time will be rounded up to - the nearest second. The number of miliseconds of sleep actually - requested from the operating system is returned. -} - -api {} { - int sqlite3_expired(sqlite3_stmt*); -} { - Return TRUE (non-zero) if the statement supplied as an argument needs - to be recompiled. A statement needs to be recompiled whenever the - execution environment changes in a way that would alter the program - that sqlite3_prepare() generates. For example, if new functions or - collating sequences are registered or if an authorizer function is - added or changed. -} - -api {} { - int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -} { - Move all bindings from the first prepared statement over to the second. - This routine is useful, for example, if the first prepared statement - fails with an SQLITE_SCHEMA error. The same SQL can be prepared into - the second prepared statement then all of the bindings transfered over - to the second statement before the first statement is finalized. -} - -api {} { - int sqlite3_global_recover(); -} { - This function used to be involved in recovering from out-of-memory - errors. But as of SQLite version 3.3.0, out-of-memory recovery is - automatic and this routine now does nothing. THe interface is retained - to avoid link errors with legacy code. -} - -api {} { - int sqlite3_get_autocommit(sqlite3*); -} { - Test to see whether or not the database connection is in autocommit - mode. Return TRUE if it is and FALSE if not. Autocommit mode is on - by default. Autocommit is disabled by a BEGIN statement and reenabled - by the next COMMIT or ROLLBACK. -} - -api {} { - int sqlite3_clear_bindings(sqlite3_stmt*); -} { - Set all the parameters in the compiled SQL statement back to NULL. -} - -api {} { - sqlite3 *sqlite3_db_handle(sqlite3_stmt*); -} { - Return the sqlite3* database handle to which the prepared statement given - in the argument belongs. This is the same database handle that was - the first argument to the sqlite3_prepare() that was used to create - the statement in the first place. -} - -api {} { - void *sqlite3_update_hook( - sqlite3*, - void(*)(void *,int ,char const *,char const *,sqlite_int64), - void* - ); -} { - Register a callback function with the database connection identified by the - first argument to be invoked whenever a row is updated, inserted or deleted. - Any callback set by a previous call to this function for the same - database connection is overridden. - - The second argument is a pointer to the function to invoke when a - row is updated, inserted or deleted. The first argument to the callback is - a copy of the third argument to sqlite3_update_hook. The second callback - argument is one of SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE, depending - on the operation that caused the callback to be invoked. The third and - fourth arguments to the callback contain pointers to the database and - table name containing the affected row. The final callback parameter is - the rowid of the row. In the case of an update, this is the rowid after - the update takes place. - - The update hook is not invoked when internal system tables are - modified (i.e. sqlite_master and sqlite_sequence). - - If another function was previously registered, its pArg value is returned. - Otherwise NULL is returned. - - See also: sqlite3_commit_hook(), sqlite3_rollback_hook() -} - -api {} { - void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); -} { - Register a callback to be invoked whenever a transaction is rolled - back. - - The new callback function overrides any existing rollback-hook - callback. If there was an existing callback, then it's pArg value - (the third argument to sqlite3_rollback_hook() when it was registered) - is returned. Otherwise, NULL is returned. - - For the purposes of this API, a transaction is said to have been - rolled back if an explicit "ROLLBACK" statement is executed, or - an error or constraint causes an implicit rollback to occur. The - callback is not invoked if a transaction is automatically rolled - back because the database connection is closed. -} - -api {} { - int sqlite3_enable_shared_cache(int); -} { - This routine enables or disables the sharing of the database cache - and schema data structures between connections to the same database. - Sharing is enabled if the argument is true and disabled if the argument - is false. - - Cache sharing is enabled and disabled on a thread-by-thread basis. - Each call to this routine enables or disables cache sharing only for - connections created in the same thread in which this routine is called. - There is no mechanism for sharing cache between database connections - running in different threads. - - Sharing must be disabled prior to shutting down a thread or else - the thread will leak memory. Call this routine with an argument of - 0 to turn off sharing. Or use the sqlite3_thread_cleanup() API. - - This routine must not be called when any database connections - are active in the current thread. Enabling or disabling shared - cache while there are active database connections will result - in memory corruption. - - When the shared cache is enabled, the - following routines must always be called from the same thread: - sqlite3_open(), sqlite3_prepare_v2(), sqlite3_step(), sqlite3_reset(), - sqlite3_finalize(), and sqlite3_close(). - This is due to the fact that the shared cache makes use of - thread-specific storage so that it will be available for sharing - with other connections. - - Virtual tables cannot be used with a shared cache. When shared - cache is enabled, the sqlite3_create_module() API used to register - virtual tables will always return an error. - - This routine returns SQLITE_OK if shared cache was - enabled or disabled successfully. An error code is returned - otherwise. - - Shared cache is disabled by default for backward compatibility. -} - -api {} { - void sqlite3_thread_cleanup(void); -} { - This routine makes sure that all thread local storage used by SQLite - in the current thread has been deallocated. A thread can call this - routine prior to terminating in order to make sure there are no memory - leaks. - - This routine is not strictly necessary. If cache sharing has been - disabled using sqlite3_enable_shared_cache() and if all database - connections have been closed and if SQLITE_ENABLE_MEMORY_MANAGMENT is - on and all memory has been freed, then the thread local storage will - already have been automatically deallocated. This routine is provided - as a convenience to the program who just wants to make sure that there - are no leaks. -} - -api {} { - int sqlite3_release_memory(int N); -} { - This routine attempts to free at least N bytes of memory from the caches - of database connecions that were created in the same thread from which this - routine is called. The value returned is the number of bytes actually - freed. - - This routine is only available if memory management has been enabled - by compiling with the SQLITE_ENABLE_MEMORY_MANAGMENT macro. -} - -api {} { - void sqlite3_soft_heap_limit(int N); -} { - This routine sets the soft heap limit for the current thread to N. - If the total heap usage by SQLite in the current thread exceeds N, - then sqlite3_release_memory() is called to try to reduce the memory usage - below the soft limit. - - Prior to shutting down a thread sqlite3_soft_heap_limit() must be set to - zero (the default) or else the thread will leak memory. Alternatively, use - the sqlite3_thread_cleanup() API. - - A negative or zero value for N means that there is no soft heap limit and - sqlite3_release_memory() will only be called when memory is exhaused. - The default value for the soft heap limit is zero. - - SQLite makes a best effort to honor the soft heap limit. But if it - is unable to reduce memory usage below the soft limit, execution will - continue without error or notification. This is why the limit is - called a "soft" limit. It is advisory only. - - This routine is only available if memory management has been enabled - by compiling with the SQLITE_ENABLE_MEMORY_MANAGMENT macro. -} - -api {} { - void sqlite3_thread_cleanup(void); -} { - This routine ensures that a thread that has used SQLite in the past - has released any thread-local storage it might have allocated. - When the rest of the API is used properly, the cleanup of - thread-local storage should be completely automatic. You should - never really need to invoke this API. But it is provided to you - as a precaution and as a potential work-around for future - thread-releated memory-leaks. -} - -set n 0 -set i 0 -foreach item $apilist { - set namelist [lindex $item 0] - foreach name $namelist { - set n_to_name($n) $name - set n_to_idx($n) $i - set name_to_idx($name) $i - incr n - } - incr i -} -set i 0 -foreach name [lsort [array names name_to_idx]] { - set sname($i) $name - incr i -} -#parray n_to_name -#parray n_to_idx -#parray name_to_idx -#parray sname -incr n -1 -puts "
    " -puts {} -set nrow [expr {($n+2)/3}] -set i 0 -for {set j 0} {$j<3} {incr j} { - if {$j>0} {puts {}} - puts {} -} -puts "
    } - set limit [expr {$i+$nrow}] - puts {
      } - while {$i<$limit && $i<$n} { - set name $sname($i) - if {[regexp {^sqlite} $name]} {set display $name} {set display $name} - puts "
    • $display
    • " - incr i - } - puts {
    " -puts "" -puts "
    " - -proc resolve_name {ignore_list name} { - global name_to_idx - if {![info exists name_to_idx($name)] || [lsearch $ignore_list $name]>=0} { - return $name - } else { - return "$name" - } -} - -foreach name [lsort [array names name_to_idx]] { - set i $name_to_idx($name) - if {[info exists done($i)]} continue - set done($i) 1 - foreach {namelist prototype desc} [lindex $apilist $i] break - foreach name $namelist { - puts "" - } - puts "


    " - puts "
    "
    -  regsub "^( *\n)+" $prototype {} p2
    -  regsub "(\n *)+\$" $p2 {} p3
    -  puts $p3
    -  puts "
    " - regsub -all {\[} $desc {\[} desc - regsub -all {sqlite3_[a-z0-9_]+} $desc "\[resolve_name $name &\]" d2 - foreach x $specialname { - regsub -all $x $d2 "\[resolve_name $name &\]" d2 - } - regsub -all "\n( *\n)+" [subst $d2] "

    \n\n

    " d3 - puts "

    $d3

    " -} - -puts "
    " -footer $rcsid -puts "
    " diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/capi3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/capi3.tcl --- sqlite3-3.4.2/www/capi3.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/capi3.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,516 +0,0 @@ -set rcsid {$Id: capi3.tcl,v 1.10 2007/04/27 17:16:22 drh Exp $} -source common.tcl -header {C/C++ Interface For SQLite Version 3} - -proc AddHyperlinks {txt} { - regsub -all {([^:alnum:>])(sqlite3_\w+)(\([^\)]*\))} $txt \ - {\1\2\3} t2 - puts $t2 -} - -AddHyperlinks { -

    C/C++ Interface For SQLite Version 3

    - -

    1.0 Overview

    - -

    -SQLite version 3.0 is a new version of SQLite, derived from -the SQLite 2.8.13 code base, but with an incompatible file format -and API. -SQLite version 3.0 was created to answer demand for the following features: -

    - -
      -
    • Support for UTF-16.
    • -
    • User-definable text collating sequences.
    • -
    • The ability to store BLOBs in indexed columns.
    • -
    - -

    -It was necessary to move to version 3.0 to implement these features because -each requires incompatible changes to the database file format. Other -incompatible changes, such as a cleanup of the API, were introduced at the -same time under the theory that it is best to get your incompatible changes -out of the way all at once. -

    - -

    -The API for version 3.0 is similar to the version 2.X API, -but with some important changes. Most noticeably, the "sqlite_" -prefix that occurs on the beginning of all API functions and data -structures are changed to "sqlite3_". -This avoids confusion between the two APIs and allows linking against both -SQLite 2.X and SQLite 3.0 at the same time. -

    - -

    -There is no agreement on what the C datatype for a UTF-16 -string should be. Therefore, SQLite uses a generic type of void* -to refer to UTF-16 strings. Client software can cast the void* -to whatever datatype is appropriate for their system. -

    - -

    2.0 C/C++ Interface

    - -

    -The API for SQLite 3.0 includes 83 separate functions in addition -to several data structures and #defines. (A complete -API reference is provided as a separate document.) -Fortunately, the interface is not nearly as complex as its size implies. -Simple programs can still make do with only 3 functions: -sqlite3_open(), -sqlite3_exec(), and -sqlite3_close(). -More control over the execution of the database engine is provided -using -sqlite3_prepare() -to compile an SQLite statement into byte code and -sqlite3_step() -to execute that bytecode. -A family of routines with names beginning with -sqlite3_column_ -is used to extract information about the result set of a query. -Many interface functions come in pairs, with both a UTF-8 and -UTF-16 version. And there is a collection of routines -used to implement user-defined SQL functions and user-defined -text collating sequences. -

    - - -

    2.1 Opening and closing a database

    - -
    -   typedef struct sqlite3 sqlite3;
    -   int sqlite3_open(const char*, sqlite3**);
    -   int sqlite3_open16(const void*, sqlite3**);
    -   int sqlite3_close(sqlite3*);
    -   const char *sqlite3_errmsg(sqlite3*);
    -   const void *sqlite3_errmsg16(sqlite3*);
    -   int sqlite3_errcode(sqlite3*);
    -
    - -

    -The sqlite3_open() routine returns an integer error code rather than -a pointer to the sqlite3 structure as the version 2 interface did. -The difference between sqlite3_open() -and sqlite3_open16() is that sqlite3_open16() takes UTF-16 (in host native -byte order) for the name of the database file. If a new database file -needs to be created, then sqlite3_open16() sets the internal text -representation to UTF-16 whereas sqlite3_open() sets the text -representation to UTF-8. -

    - -

    -The opening and/or creating of the database file is deferred until the -file is actually needed. This allows options and parameters, such -as the native text representation and default page size, to be -set using PRAGMA statements. -

    - -

    -The sqlite3_errcode() routine returns a result code for the most -recent major API call. sqlite3_errmsg() returns an English-language -text error message for the most recent error. The error message is -represented in UTF-8 and will be ephemeral - it could disappear on -the next call to any SQLite API function. sqlite3_errmsg16() works like -sqlite3_errmsg() except that it returns the error message represented -as UTF-16 in host native byte order. -

    - -

    -The error codes for SQLite version 3 are unchanged from version 2. -They are as follows: -

    - -
    -#define SQLITE_OK           0   /* Successful result */
    -#define SQLITE_ERROR        1   /* SQL error or missing database */
    -#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    -#define SQLITE_PERM         3   /* Access permission denied */
    -#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    -#define SQLITE_BUSY         5   /* The database file is locked */
    -#define SQLITE_LOCKED       6   /* A table in the database is locked */
    -#define SQLITE_NOMEM        7   /* A malloc() failed */
    -#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    -#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    -#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    -#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    -#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    -#define SQLITE_FULL        13   /* Insertion failed because database is full */
    -#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    -#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    -#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    -#define SQLITE_SCHEMA      17   /* The database schema changed */
    -#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    -#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    -#define SQLITE_MISMATCH    20   /* Data type mismatch */
    -#define SQLITE_MISUSE      21   /* Library used incorrectly */
    -#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    -#define SQLITE_AUTH        23   /* Authorization denied */
    -#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    -#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    -
    - -

    2.2 Executing SQL statements

    - -
    -   typedef int (*sqlite_callback)(void*,int,char**, char**);
    -   int sqlite3_exec(sqlite3*, const char *sql, sqlite_callback, void*, char**);
    -
    - -

    -The sqlite3_exec function works much as it did in SQLite version 2. -Zero or more SQL statements specified in the second parameter are compiled -and executed. Query results are returned to a callback routine. -See the API reference for additional -information. -

    - -

    -In SQLite version 3, the sqlite3_exec routine is just a wrapper around -calls to the prepared statement interface. -

    - -
    -   typedef struct sqlite3_stmt sqlite3_stmt;
    -   int sqlite3_prepare(sqlite3*, const char*, int, sqlite3_stmt**, const char**);
    -   int sqlite3_prepare16(sqlite3*, const void*, int, sqlite3_stmt**, const void**);
    -   int sqlite3_finalize(sqlite3_stmt*);
    -   int sqlite3_reset(sqlite3_stmt*);
    -
    - -

    -The sqlite3_prepare interface compiles a single SQL statement into byte code -for later execution. This interface is now the preferred way of accessing -the database. -

    - -

    -The SQL statement is a UTF-8 string for sqlite3_prepare(). -The sqlite3_prepare16() works the same way except -that it expects a UTF-16 string as SQL input. -Only the first SQL statement in the input string is compiled. -The fourth parameter is filled in with a pointer to the next (uncompiled) -SQLite statement in the input string, if any. -The sqlite3_finalize() routine deallocates a prepared SQL statement. -All prepared statements must be finalized before the database can be -closed. -The sqlite3_reset() routine resets a prepared SQL statement so that it -can be executed again. -

    - -

    -The SQL statement may contain tokens of the form "?" or "?nnn" or ":aaa" -where "nnn" is an integer and "aaa" is an identifier. -Such tokens represent unspecified literal values (or "wildcards") -to be filled in later by the -sqlite3_bind interface. -Each wildcard has an associated number which is its sequence in the -statement or the "nnn" in the case of a "?nnn" form. -It is allowed for the same wildcard -to occur more than once in the same SQL statement, in which case -all instance of that wildcard will be filled in with the same value. -Unbound wildcards have a value of NULL. -

    - -
    -   int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    -   int sqlite3_bind_double(sqlite3_stmt*, int, double);
    -   int sqlite3_bind_int(sqlite3_stmt*, int, int);
    -   int sqlite3_bind_int64(sqlite3_stmt*, int, long long int);
    -   int sqlite3_bind_null(sqlite3_stmt*, int);
    -   int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
    -   int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
    -   int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
    -
    - -

    -There is an assortment of sqlite3_bind routines used to assign values -to wildcards in a prepared SQL statement. Unbound wildcards -are interpreted as NULLs. Bindings are not reset by sqlite3_reset(). -But wildcards can be rebound to new values after an sqlite3_reset(). -

    - -

    -After an SQL statement has been prepared (and optionally bound), it -is executed using: -

    - -
    -   int sqlite3_step(sqlite3_stmt*);
    -
    - -

    -The sqlite3_step() routine return SQLITE_ROW if it is returning a single -row of the result set, or SQLITE_DONE if execution has completed, either -normally or due to an error. It might also return SQLITE_BUSY if it is -unable to open the database file. If the return value is SQLITE_ROW, then -the following routines can be used to extract information about that row -of the result set: -

    - -
    -   const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
    -   int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
    -   int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
    -   int sqlite3_column_count(sqlite3_stmt*);
    -   const char *sqlite3_column_decltype(sqlite3_stmt *, int iCol);
    -   const void *sqlite3_column_decltype16(sqlite3_stmt *, int iCol);
    -   double sqlite3_column_double(sqlite3_stmt*, int iCol);
    -   int sqlite3_column_int(sqlite3_stmt*, int iCol);
    -   long long int sqlite3_column_int64(sqlite3_stmt*, int iCol);
    -   const char *sqlite3_column_name(sqlite3_stmt*, int iCol);
    -   const void *sqlite3_column_name16(sqlite3_stmt*, int iCol);
    -   const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
    -   const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
    -   int sqlite3_column_type(sqlite3_stmt*, int iCol);
    -
    - -

    -The -sqlite3_column_count() -function returns the number of columns in -the results set. sqlite3_column_count() can be called at any time after -sqlite3_prepare(). -sqlite3_data_count() -works similarly to -sqlite3_column_count() except that it only works following sqlite3_step(). -If the previous call to sqlite3_step() returned SQLITE_DONE or an error code, -then sqlite3_data_count() will return 0 whereas sqlite3_column_count() will -continue to return the number of columns in the result set. -

    - -

    Returned data is examined using the other sqlite3_column_***() functions, -all of which take a column number as their second parameter. Columns are -zero-indexed from left to right. Note that this is different to parameters, -which are indexed starting at one. -

    - -

    -The sqlite3_column_type() function returns the -datatype for the value in the Nth column. The return value is one -of these: -

    - -
    -   #define SQLITE_INTEGER  1
    -   #define SQLITE_FLOAT    2
    -   #define SQLITE_TEXT     3
    -   #define SQLITE_BLOB     4
    -   #define SQLITE_NULL     5
    -
    - -

    -The sqlite3_column_decltype() routine returns text which is the -declared type of the column in the CREATE TABLE statement. For an -expression, the return type is an empty string. sqlite3_column_name() -returns the name of the Nth column. sqlite3_column_bytes() returns -the number of bytes in a column that has type BLOB or the number of bytes -in a TEXT string with UTF-8 encoding. sqlite3_column_bytes16() returns -the same value for BLOBs but for TEXT strings returns the number of bytes -in a UTF-16 encoding. -sqlite3_column_blob() return BLOB data. -sqlite3_column_text() return TEXT data as UTF-8. -sqlite3_column_text16() return TEXT data as UTF-16. -sqlite3_column_int() return INTEGER data in the host machines native -integer format. -sqlite3_column_int64() returns 64-bit INTEGER data. -Finally, sqlite3_column_double() return floating point data. -

    - -

    -It is not necessary to retrieve data in the format specify by -sqlite3_column_type(). If a different format is requested, the data -is converted automatically. -

    - -

    -Data format conversions can invalidate the pointer returned by -prior calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -sqlite3_column_text16(). Pointers might be invalided in the following -cases: -

    -
      -
    • -The initial content is a BLOB and sqlite3_column_text() -or sqlite3_column_text16() -is called. A zero-terminator might need to be added to the string. -

    • -
    • -The initial content is UTF-8 text and sqlite3_column_bytes16() or -sqlite3_column_text16() is called. The content must be converted to UTF-16. -

    • -
    • -The initial content is UTF-16 text and sqlite3_column_bytes() or -sqlite3_column_text() is called. The content must be converted to UTF-8. -

    • -
    -

    -Note that conversions between UTF-16be and UTF-16le -are always done in place and do -not invalidate a prior pointer, though of course the content of the buffer -that the prior pointer points to will have been modified. Other kinds -of conversion are done in place when it is possible, but sometime it is -not possible and in those cases prior pointers are invalidated. -

    - -

    -The safest and easiest to remember policy is this: assume that any -result from -

      -
    • sqlite3_column_blob(),
    • -
    • sqlite3_column_text(), or
    • -
    • sqlite3_column_text16()
    • -
    -is invalided by subsequent calls to -
      -
    • sqlite3_column_bytes(),
    • -
    • sqlite3_column_bytes16(),
    • -
    • sqlite3_column_text(), or
    • -
    • sqlite3_column_text16().
    • -
    -This means that you should always call sqlite3_column_bytes() or -sqlite3_column_bytes16() before calling sqlite3_column_blob(), -sqlite3_column_text(), or sqlite3_column_text16(). -

    - -

    2.3 User-defined functions

    - -

    -User defined functions can be created using the following routine: -

    - -
    -   typedef struct sqlite3_value sqlite3_value;
    -   int sqlite3_create_function(
    -     sqlite3 *,
    -     const char *zFunctionName,
    -     int nArg,
    -     int eTextRep,
    -     void*,
    -     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    -     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    -     void (*xFinal)(sqlite3_context*)
    -   );
    -   int sqlite3_create_function16(
    -     sqlite3*,
    -     const void *zFunctionName,
    -     int nArg,
    -     int eTextRep,
    -     void*,
    -     void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
    -     void (*xStep)(sqlite3_context*,int,sqlite3_value**),
    -     void (*xFinal)(sqlite3_context*)
    -   );
    -   #define SQLITE_UTF8     1
    -   #define SQLITE_UTF16    2
    -   #define SQLITE_UTF16BE  3
    -   #define SQLITE_UTF16LE  4
    -   #define SQLITE_ANY      5
    -
    - -

    -The nArg parameter specifies the number of arguments to the function. -A value of 0 indicates that any number of arguments is allowed. The -eTextRep parameter specifies what representation text values are expected -to be in for arguments to this function. The value of this parameter should -be one of the parameters defined above. SQLite version 3 allows multiple -implementations of the same function using different text representations. -The database engine chooses the function that minimization the number -of text conversions required. -

    - -

    -Normal functions specify only xFunc and leave xStep and xFinal set to NULL. -Aggregate functions specify xStep and xFinal and leave xFunc set to NULL. -There is no separate sqlite3_create_aggregate() API. -

    - -

    -The function name is specified in UTF-8. A separate sqlite3_create_function16() -API works the same as sqlite_create_function() -except that the function name is specified in UTF-16 host byte order. -

    - -

    -Notice that the parameters to functions are now pointers to sqlite3_value -structures instead of pointers to strings as in SQLite version 2.X. -The following routines are used to extract useful information from these -"values": -

    - -
    -   const void *sqlite3_value_blob(sqlite3_value*);
    -   int sqlite3_value_bytes(sqlite3_value*);
    -   int sqlite3_value_bytes16(sqlite3_value*);
    -   double sqlite3_value_double(sqlite3_value*);
    -   int sqlite3_value_int(sqlite3_value*);
    -   long long int sqlite3_value_int64(sqlite3_value*);
    -   const unsigned char *sqlite3_value_text(sqlite3_value*);
    -   const void *sqlite3_value_text16(sqlite3_value*);
    -   int sqlite3_value_type(sqlite3_value*);
    -
    - -

    -Function implementations use the following APIs to acquire context and -to report results: -

    - -
    -   void *sqlite3_aggregate_context(sqlite3_context*, int nbyte);
    -   void *sqlite3_user_data(sqlite3_context*);
    -   void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*));
    -   void sqlite3_result_double(sqlite3_context*, double);
    -   void sqlite3_result_error(sqlite3_context*, const char*, int);
    -   void sqlite3_result_error16(sqlite3_context*, const void*, int);
    -   void sqlite3_result_int(sqlite3_context*, int);
    -   void sqlite3_result_int64(sqlite3_context*, long long int);
    -   void sqlite3_result_null(sqlite3_context*);
    -   void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*));
    -   void sqlite3_result_text16(sqlite3_context*, const void*, int n, void(*)(void*));
    -   void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
    -   void *sqlite3_get_auxdata(sqlite3_context*, int);
    -   void sqlite3_set_auxdata(sqlite3_context*, int, void*, void (*)(void*));
    -
    - -

    2.4 User-defined collating sequences

    - -

    -The following routines are used to implement user-defined -collating sequences: -

    - -
    -   sqlite3_create_collation(sqlite3*, const char *zName, int eTextRep, void*,
    -      int(*xCompare)(void*,int,const void*,int,const void*));
    -   sqlite3_create_collation16(sqlite3*, const void *zName, int eTextRep, void*,
    -      int(*xCompare)(void*,int,const void*,int,const void*));
    -   sqlite3_collation_needed(sqlite3*, void*, 
    -      void(*)(void*,sqlite3*,int eTextRep,const char*));
    -   sqlite3_collation_needed16(sqlite3*, void*,
    -      void(*)(void*,sqlite3*,int eTextRep,const void*));
    -
    - -

    -The sqlite3_create_collation() function specifies a collating sequence name -and a comparison function to implement that collating sequence. The -comparison function is only used for comparing text values. The eTextRep -parameter is one of SQLITE_UTF8, SQLITE_UTF16LE, SQLITE_UTF16BE, or -SQLITE_ANY to specify which text representation the comparison function works -with. Separate comparison functions can exist for the same collating -sequence for each of the UTF-8, UTF-16LE and UTF-16BE text representations. -The sqlite3_create_collation16() works like sqlite3_create_collation() except -that the collation name is specified in UTF-16 host byte order instead of -in UTF-8. -

    - -

    -The sqlite3_collation_needed() routine registers a callback which the -database engine will invoke if it encounters an unknown collating sequence. -The callback can lookup an appropriate comparison function and invoke -sqlite_3_create_collation() as needed. The fourth parameter to the callback -is the name of the collating sequence in UTF-8. For sqlite3_collation_need16() -the callback sends the collating sequence name in UTF-16 host byte order. -

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/changes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/changes.html --- sqlite3-3.4.2/www/changes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/changes.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,2053 @@ + + +SQLite changes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    +This page provides a high-level summary of changes to SQLite. +For more detail, refer the the checkin logs generated by +CVS at + +http://www.sqlite.org/cvstrac/timeline. +

    + + +

    2009 June 27 (3.6.16)

      +
    • Fix a bug (ticket #3929) that occasionally causes INSERT or UPDATE + operations to fail on an indexed table that has a self-modifying trigger. +
    • Other minor bug fixes and performance optimizations. +

    +

    2009 June 15 (3.6.15)

      +
    • Refactor the internal representation of SQL expressions so that they + use less memory on embedded platforms. +
    • Reduce the amount of stack space used +
    • Fix an 64-bit alignment bug on HP/UX and Sparc +
    • The sqlite3_create_function() family of interfaces now return + SQLITE_MISUSE instead of SQLITE_ERROR when passed invalid + parameter combinations. +
    • When new tables are created using CREATE TABLE ... AS SELECT ... the + datatype of the columns is the simplified SQLite datatype (TEXT, INT, + REAL, NUMERIC, or BLOB) instead of a copy of the original datatype from + the source table. +
    • Resolve race conditions when checking for a hot rollback journal. +
    • The sqlite3_shutdown() interface frees all mutexes under windows. +
    • Enhanced robustness against corrupt database files +
    • Continuing improvements to the test suite and fixes to obscure + bugs and inconsistencies that the test suite improvements are + uncovering. + +

    +

    2009 May 25 (3.6.14.2)

      +
    • Fix a code generator bug introduced in version 3.6.14. This bug + can cause incorrect query results under obscure circumstances. + Ticket #3879. +

    +

    2009 May 19 (3.6.14.1)

    +

    2009 May 7 (3.6.14)

      +
    • Added the optional asynchronous VFS module.
    • +
    • Enhanced the query optimizer so that virtual tables are able to + make use of OR and IN operators in the WHERE clause.
    • +
    • Speed improvements in the btree and pager layers.
    • +
    • Added the SQLITE_HAVE_ISNAN compile-time option which will cause + the isnan() function from the standard math library to be used instead + of SQLite's own home-brew NaN checker.
    • +
    • Countless minor bug fixes, documentation improvements, new and + improved test cases, and code simplifications and cleanups.

      +

    +

    2009 April 13 (3.6.13)

      +
    • Fix a bug in version 3.6.12 that causes a segfault when running + a count(*) on the sqlite_master table of an empty database. Ticket #3774. +
    • Fix a bug in version 3.6.12 that causes a segfault that when + inserting into a table using a DEFAULT value where there is a + function as part of the DEFAULT value expression. Ticket #3791. +
    • Fix data structure alignment issues on Sparc. Ticket #3777. +
    • Other minor bug fixes. +

    +

    2009 March 31 (3.6.12)

      +
    • Fixed a bug that caused database corruption when an incremental_vacuum is + rolled back in an in-memory database. Ticket #3761. +
    • Added the sqlite3_unlock_notify() interface. +
    • Added the reverse_unordered_selects pragma. +
    • The default page size on windows is automatically adjusted to match the + capabilities of the underlying filesystem. +
    • Add the new ".genfkey" command in the CLI for generating triggers to + implement foreign key constraints. +
    • Performance improvements for "count(*)" queries. +
    • Reduce the amount of heap memory used, especially by TRIGGERs. +
    • +

    +

    2009 Feb 18 (3.6.11)

    +

    2009 Jan 15 (3.6.10)

      +
    • Fix a cache coherency problem that could lead to database corruption. + Ticket #3584. +

    +

    2009 Jan 14 (3.6.9)

      +
    • Fix two bugs, which when combined might result in incorrect + query results. Both bugs were harmless by themselves; only when + they team up do they cause problems. Ticket #3581. +

    +

    2009 Jan 12 (3.6.8)

    +

    2008 Dec 16 (3.6.7)

      +
    • Reorganize the Unix interface in os_unix.c
    • +
    • Added support for "Proxy Locking" on MacOSX.
    • +
    • Changed the prototype of the sqlite3_auto_extension() interface in a + way that is backwards compatible but which might cause warnings in new + builds of applications that use that interface.
    • +
    • Changed the signature of the xDlSym method of the sqlite3_vfs object + in a way that is backwards compatible but which might cause + compiler warnings.
    • +
    • Added superfluous casts and variable initializations in order + to suppress nuisance compiler warnings.
    • +
    • Fixes for various minor bugs.
    • +

    +

    2008 Nov 26 (3.6.6.2)

      +
    • Fix a bug in the b-tree delete algorithm that seems like it might be + able to cause database corruption. The bug was first introduced in + version 3.6.6 by check-in [5899] on 2008-11-13.
    • +
    • Fix a memory leak that can occur following a disk I/O error.
    • +

    +

    2008 Nov 22 (3.6.6.1)

      +
    • Fix a bug in the page cache that can lead database corruption following + a rollback. This bug was first introduced in version 3.6.4.
    • +
    • Two other very minor bug fixes
    • +

    +

    2008 Nov 19 (3.6.6)

    +

    2008 Nov 12 (3.6.5)

    +

    2008 Oct 15 (3.6.4)

    +

    2008 Sep 22 (3.6.3)

      +
    • Fix for a bug in the SELECT DISTINCT logic that was introduced by the + prior version.
    • +
    • Other minor bug fixes
    • +

    +

    2008 Aug 30 (3.6.2)

      +
    • Split the pager subsystem into separate pager and pcache subsystems.
    • +
    • Factor out identifier resolution procedures into separate files.
    • +
    • Bug fixes
    • +

    +

    2008 Aug 6 (3.6.1)

    2008 July 16 (3.6.0 beta)

      +
    • Modifications to the virtual file system interface + to support a wider range of embedded systems. + See 35to36.html for additional information. + *** Potentially incompatible change ***
    • +
    • All C-preprocessor macros used to control compile-time options + now begin with the prefix "SQLITE_". This may require changes to + applications that compile SQLite using their own makefiles and with + custom compile-time options, hence we mark this as a + *** Potentially incompatible change ***
    • +
    • The SQLITE_MUTEX_APPDEF compile-time option is no longer supported. + Alternative mutex implementations can now be added at run-time using + the sqlite3_config() interface with the SQLITE_CONFIG_MUTEX verb. + *** Potentially incompatible change ***
    • +
    • The handling of IN and NOT IN operators that contain a NULL on their + right-hand side expression is brought into compliance with the SQL + standard and with other SQL database engines. This is a bug fix, + but as it has the potential to break legacy applications that depend + on the older buggy behavior, we mark that as a + *** Potentially incompatible change ***
    • +
    • The result column names generated for compound subqueries have been + simplified to show only the name of the column of the original table and + omit the table name. This makes SQLite operate more like other SQL + database engines.
    • +
    • Added the sqlite3_config() interface for doing run-time configuration + of the entire SQLite library.
    • +
    • Added the sqlite3_status() interface used for querying run-time status + information about the overall SQLite library and its subsystems.
    • +
    • Added the sqlite3_initialize() and sqlite3_shutdown() interfaces.
    • +
    • The SQLITE_OPEN_NOMUTEX option was added to sqlite3_open_v2().
    • +
    • Added the PRAGMA page_count command.
    • +
    • Added the sqlite3_next_stmt() interface.
    • +
    • Added a new R*Tree virtual table
    • +

    +

    2008 May 14 (3.5.9)

      +
    • Added experimental + support for the journal_mode PRAGMA and persistent journal.
    • +
    • Journal mode PERSIST is the default behavior in + exclusive locking mode.
    • +
    • Fix a performance regression on LEFT JOIN (see ticket #3015) + that was mistakenly introduced in version 3.5.8.
    • +
    • Performance enhancement: Reengineer the internal routines used + to interpret and render variable-length integers.
    • +
    • Fix a buffer-overrun problem in sqlite3_mprintf() which occurs + when a string without a zero-terminator is passed to "%.*s".
    • +
    • Always convert IEEE floating point NaN values into NULL during + processing. (Ticket #3060)
    • +
    • Make sure that when a connection blocks on a RESERVED lock that + it is able to continue after the lock is released. (Ticket #3093)
    • +
    • The "configure" scripts should now automatically configure Unix + systems for large file support. Improved error messages for + when large files are encountered and large file support is disabled.
    • +
    • Avoid cache pages leaks following disk-full or I/O errors
    • +
    • And, many more minor bug fixes and performance enhancements....
    • +

    +

    2008 Apr 16 (3.5.8)

      +
    • Expose SQLite's internal pseudo-random number generator (PRNG) + via the sqlite3_randomness() interface
    • +
    • New interface sqlite3_context_db_handle() that returns the + database connection handle that has invoked an application-defined + SQL function.
    • +
    • New interface sqlite3_limit() allows size and length limits to be + set on a per-connection basis and at run-time.
    • +
    • Improved crash-robustness: write the database page size into the rollback + journal header.
    • +
    • Allow the VACUUM command to change the page size of a database file.
    • +
    • The xAccess() method of the VFS is allowed to return -1 to signal + a memory allocation error.
    • +
    • Performance improvement: The OP_IdxDelete opcode uses unpacked records, + obviating the need for one OP_MakeRecord opcode call for each index + record deleted.
    • +
    • Performance improvement: Constant subexpressions are factored out of + loops.
    • +
    • Performance improvement: Results of OP_Column are reused rather than + issuing multiple OP_Column opcodes.
    • +
    • Fix a bug in the RTRIM collating sequence.
    • +
    • Fix a bug in the SQLITE_SECURE_DELETE option that was causing + Firefox crashes. Make arrangements to always test SQLITE_SECURE_DELETE + prior to each release.
    • +
    • Other miscellaneous performance enhancements.
    • +
    • Other miscellaneous minor bug fixes.
    • +

    +

    2008 Mar 17 (3.5.7)

      +
    • Fix a bug (ticket #2927) in the register allocation for +compound selects - introduced by the new VM code in version 3.5.5.
    • +
    • ALTER TABLE uses double-quotes instead of single-quotes for quoting +filenames.
    • +
    • Use the WHERE clause to reduce the size of a materialized VIEW in +an UPDATE or DELETE statement. (Optimization)
    • +
    • Do not apply the flattening optimization if the outer query is an +aggregate and the inner query contains ORDER BY. (Ticket #2943)
    • +
    • Additional OS/2 updates
    • +
    • Added an experimental power-of-two, first-fit memory allocator.
    • +
    • Remove all instances of sprintf() from the code
    • +
    • Accept "Z" as the zulu timezone at the end of date strings
    • +
    • Fix a bug in the LIKE optimizer that occurs when the last character +before the first wildcard is an upper-case "Z"
    • +
    • Added the "bitvec" object for keeping track of which pages have +been journalled. Improves speed and reduces memory consumption, especially +for large database files.
    • +
    • Get the SQLITE_ENABLE_LOCKING_STYLE macro working again on Mac OS X.
    • +
    • Store the statement journal in the temporary file directory instead of +colocated with the database file.
    • +
    • Many improvements and cleanups to the configure script
    • +

    +

    2008 Feb 6 (3.5.6)

      +
    • Fix a bug (ticket #2913) +that prevented virtual tables from working in a LEFT JOIN. +The problem was introduced into shortly before the 3.5.5 release.
    • +
    • Bring the OS/2 porting layer up-to-date.
    • +
    • Add the new sqlite3_result_error_code() API and use it in the +implementation of ATTACH so that proper error codes are returned +when an ATTACH fails.
    • +

    +

    2008 Jan 31 (3.5.5)

      +
    • Convert the underlying virtual machine to be a register-based machine +rather than a stack-based machine. The only user-visible change +is in the output of EXPLAIN.
    • +
    • Add the build-in RTRIM collating sequence.
    • +

    +

    2007 Dec 14 (3.5.4)

      +
    • Fix a critical bug in UPDATE or DELETE that occurs when an +OR REPLACE clause or a trigger causes rows in the same table to +be deleted as side effects. (See ticket #2832.) The most likely +result of this bug is a segmentation fault, though database +corruption is a possibility.
    • +
    • Bring the processing of ORDER BY into compliance with the +SQL standard for case where a result alias and a table column name +are in conflict. Correct behavior is to prefer the result alias. +Older versions of SQLite incorrectly picked the table column. +(See ticket #2822.)
    • +
    • The VACUUM command preserves +the setting of the +legacy_file_format pragma. +(Ticket #2804.)
    • +
    • Productize and officially support the group_concat() SQL function.
    • +
    • Better optimization of some IN operator expressions.
    • +
    • Add the ability to change the +auto_vacuum status of a +database by setting the auto_vaccum pragma and VACUUMing the database.
    • +
    • Prefix search in FTS3 is much more efficient.
    • +
    • Relax the SQL statement length restriction in the CLI so that +the ".dump" output of databases with very large BLOBs and strings can +be played back to recreate the database.
    • +
    • Other small bug fixes and optimizations.
    • +

    +

    2007 Nov 27 (3.5.3)

      +
    • Move website and documentation files out of the source tree into +a separate CM system. +
    • Fix a long-standing bug in INSERT INTO ... SELECT ... statements +where the SELECT is compound. +
    • Fix a long-standing bug in RAISE(IGNORE) as used in BEFORE triggers. +
    • Fixed the operator precedence for the ~ operator. +
    • On Win32, do not return an error when attempting to delete a file +that does not exist. +
    • Allow collating sequence names to be quoted. +
    • Modify the TCL interface to use sqlite3_prepare_v2(). +
    • Fix multiple bugs that can occur following a malloc() failure. +
    • sqlite3_step() returns SQLITE_MISUSE instead of crashing when +called with a NULL parameter. +
    • FTS3 now uses the SQLite memory allocator exclusively. The +FTS3 amalgamation can now be appended to the SQLite amalgamation to +generate a super-amalgamation containing both. +
    • The DISTINCT keyword now will sometimes use an INDEX if an +appropriate index is available and the optimizer thinks its use +might be advantageous. +

    +

    2007 Nov 05 (3.5.2)

      +
    • Dropped support for the SQLITE_OMIT_MEMORY_ALLOCATION compile-time +option. +
    • Always open files using FILE_FLAG_RANDOM_ACCESS under Windows. +
    • The 3rd parameter of the built-in SUBSTR() function is now optional. +
    • Bug fix: do not invoke the authorizer when reparsing the schema after +a schema change. +
    • Added the experimental malloc-free memory allocator in mem3.c. +
    • Virtual machine stores 64-bit integer and floating point constants +in binary instead of text for a performance boost. +
    • Fix a race condition in test_async.c. +
    • Added the ".timer" command to the CLI +

    +

    2007 Oct 04 (3.5.1)

      +
    • Nota Bene: We are not using terms "alpha" or "beta" on this + release because the code is stable and because if we use those terms, + nobody will upgrade. However, we still reserve the right to make + incompatible changes to the new VFS interface in future releases.
    • + +
    • Fix a bug in the handling of SQLITE_FULL errors that could lead + to database corruption. Ticket #2686. +
    • The test_async.c drive now does full file locking and works correctly + when used simultaneously by multiple processes on the same database. +
    • The CLI ignores whitespace (including comments) at the end of lines +
    • Make sure the query optimizer checks dependencies on all terms of + a compound SELECT statement. Ticket #2640. +
    • Add demonstration code showing how to build a VFS for a raw + mass storage without a filesystem. +
    • Added an output buffer size parameter to the xGetTempname() method + of the VFS layer. +
    • Sticky SQLITE_FULL or SQLITE_IOERR errors in the pager are reset + when a new transaction is started. +

    +

    2007 Sep 04 (3.5.0) alpha

      +
    • Redesign the OS interface layer. See + 34to35.html for details. + *** Potentially incompatible change *** +
    • The sqlite3_release_memory(), sqlite3_soft_heap_limit(), + and sqlite3_enable_shared_cache() interfaces now work cross all + threads in the process, not just the single thread in which they + are invoked. + *** Potentially incompatible change *** +
    • Added the sqlite3_open_v2() interface. +
    • Reimplemented the memory allocation subsystem and made it + replaceable at compile-time. +
    • Created a new mutex subsystem and made it replicable at + compile-time. +
    • The same database connection may now be used simultaneously by + separate threads. +

    +

    2007 August 13 (3.4.2)

      +
    • Fix a database corruption bug that might occur if a ROLLBACK command +is executed in auto-vacuum mode +and a very small sqlite3_soft_heap_limit is set. +Ticket #2565. + +
    • Add the ability to run a full regression test with a small +sqlite3_soft_heap_limit. + +
    • Fix other minor problems with using small soft heap limits. + +
    • Work-around for +GCC bug 32575. + +
    • Improved error detection of misused aggregate functions. + +
    • Improvements to the amalgamation generator script so that all symbols +are prefixed with either SQLITE_PRIVATE or SQLITE_API. +

    +

    2007 July 20 (3.4.1)

      +
    • Fix a bug in VACUUM that can lead to + + database corruption if two + processes are connected to the database at the same time and one + VACUUMs then the other then modifies the database.
    • +
    • The expression "+column" is now considered the same as "column" + when computing the collating sequence to use on the expression.
    • +
    • In the TCL language interface, + "@variable" instead of "$variable" always binds as a blob.
    • +
    • Added PRAGMA freelist_count + for determining the current size of the freelist.
    • +
    • The + PRAGMA auto_vacuum=incremental setting is now persistent.
    • +
    • Add FD_CLOEXEC to all open files under Unix.
    • +
    • Fix a bug in the + min()/max() optimization when applied to + descending indices.
    • +
    • Make sure the TCL language interface works correctly with 64-bit + integers on 64-bit machines.
    • +
    • Allow the value -9223372036854775808 as an integer literal in SQL + statements.
    • +
    • Add the capability of "hidden" columns in virtual tables.
    • +
    • Use the macro SQLITE_PRIVATE (defaulting to "static") on all + internal functions in the amalgamation.
    • +
    • Add pluggable tokenizers and ICU + tokenization support to FTS2
    • +
    • Other minor bug fixes and documentation enhancements
    • +

    +

    2007 June 18 (3.4.0)

      +
    • Fix a bug that can lead to database corruption if an SQLITE_BUSY error + occurs in the middle of an explicit transaction and that transaction + is later committed. Ticket #2409. + See the + + CorruptionFollowingBusyError wiki page for details. +
    • Fix a bug that can lead to database corruption if autovacuum mode is + on and a malloc() failure follows a CREATE TABLE or CREATE INDEX statement + which itself follows a cache overflow inside a transaction. See + ticket #2418. +
    • +
    • Added explicit upper bounds on the sizes and + quantities of things SQLite can process. This change might cause + compatibility problems for + applications that use SQLite in the extreme, which is why the current + release is 3.4.0 instead of 3.3.18.
    • +
    • Added support for Incremental BLOB I/O.
    • +
    • Added the sqlite3_bind_zeroblob() API + and the zeroblob() SQL function.
    • +
    • Added support for + Incremental Vacuum.
    • +
    • Added the SQLITE_MIXED_ENDIAN_64BIT_FLOAT compile-time option to support + ARM7 processors with goofy endianness.
    • +
    • Removed all instances of sprintf() and strcpy() from the core library.
    • +
    • Added support for + International Components for Unicode (ICU) to the full-text search + extensions. +

    +

      +
    • In the Windows OS driver, reacquire a SHARED lock if an attempt to + acquire an EXCLUSIVE lock fails. Ticket #2354
    • +
    • Fix the REPLACE() function so that it returns NULL if the second argument + is an empty string. Ticket #2324.
    • +
    • Document the hazards of type conversions in + sqlite3_column_blob() + and related APIs. Fix unnecessary type conversions. Ticket #2321.
    • +
    • Internationalization of the TRIM() function. Ticket #2323
    • +
    • Use memmove() instead of memcpy() when moving between memory regions + that might overlap. Ticket #2334
    • +
    • Fix an optimizer bug involving subqueries in a compound SELECT that has + both an ORDER BY and a LIMIT clause. Ticket #2339.
    • +
    • Make sure the sqlite3_snprintf() + interface does not zero-terminate the buffer if the buffer size is + less than 1. Ticket #2341
    • +
    • Fix the built-in printf logic so that it prints "NaN" not "Inf" for + floating-point NaNs. Ticket #2345
    • +
    • When converting BLOB to TEXT, use the text encoding of the main database. + Ticket #2349
    • +
    • Keep the full precision of integers (if possible) when casting to + NUMERIC. Ticket #2364
    • +
    • Fix a bug in the handling of UTF16 codepoint 0xE000
    • +
    • Consider explicit collate clauses when matching WHERE constraints + to indices in the query optimizer. Ticket #2391
    • +
    • Fix the query optimizer to correctly handle constant expressions in + the ON clause of a LEFT JOIN. Ticket #2403
    • +
    • Fix the query optimizer to handle rowid comparisons to NULL + correctly. Ticket #2404
    • +
    • Fix many potential segfaults that could be caused by malicious SQL + statements.
    • +

    +

    2007 April 25 (3.3.17)

      +
    • When the "write_version" value of the database header is larger than + what the library understands, make the database read-only instead of + unreadable.
    • +
    • Other minor bug fixes
    • +

    +

    2007 April 18 (3.3.16)

      +
    • Fix a bug that caused VACUUM to fail if NULLs appeared in a + UNIQUE column.
    • +
    • Reinstate performance improvements that were added in + Version 3.3.14 + but regressed in Version 3.3.15.
    • +
    • Fix problems with the handling of ORDER BY expressions on + compound SELECT statements in subqueries.
    • +
    • Fix a potential segfault when destroying locks on WinCE in + a multi-threaded environment.
    • +
    • Documentation updates.
    • +

    +

    2007 April 9 (3.3.15)

      +
    • Fix a bug introduced in 3.3.14 that caused a rollback of + CREATE TEMP TABLE to leave the database connection wedged.
    • +
    • Fix a bug that caused an extra NULL row to be returned when + a descending query was interrupted by a change to the database.
    • +
    • The FOR EACH STATEMENT clause on a trigger now causes a syntax + error. It used to be silently ignored.
    • +
    • Fix an obscure and relatively harmless problem that might have caused + a resource leak following an I/O error.
    • +
    • Many improvements to the test suite. Test coverage now exceeded 98%
    • +

    +

    2007 April 2 (3.3.14)

      +
    • Fix a bug (ticket #2273) + that could cause a segfault when the IN operator + is used one one term of a two-column index and the right-hand side of + the IN operator contains a NULL.
    • +
    • Added a new OS interface method for determining the sector size + of underlying media: sqlite3OsSectorSize().
    • +
    • A new algorithm for statements of the form + INSERT INTO table1 SELECT * FROM table2 + is faster and reduces fragmentation. VACUUM uses statements of + this form and thus runs faster and defragments better.
    • +
    • Performance enhancements through reductions in disk I/O: +
        +
      • Do not read the last page of an overflow chain when + deleting the row - just add that page to the freelist.
      • +
      • Do not store pages being deleted in the + rollback journal.
      • +
      • Do not read in the (meaningless) content of + pages extracted from the freelist.
      • +
      • Do not flush the page cache (and thus avoiding + a cache refill) unless another process changes the underlying + database file.
      • +
      • Truncate rather than delete the rollback journal when committing + a transaction in exclusive access mode, or when committing the TEMP + database.
      • +
    • +
    • Added support for exclusive access mode using + + "PRAGMA locking_mode=EXCLUSIVE"
    • +
    • Use heap space instead of stack space for large buffers in the + pager - useful on embedded platforms with stack-space + limitations.
    • +
    • Add a makefile target "sqlite3.c" that builds an amalgamation containing + the core SQLite library C code in a single file.
    • +
    • Get the library working correctly when compiled + with GCC option "-fstrict-aliasing".
    • +
    • Removed the vestigal SQLITE_PROTOCOL error.
    • +
    • Improvements to test coverage, other minor bugs fixed, + memory leaks plugged, + code refactored and/or recommended in places for easier reading.
    • +

    +

    2007 February 13 (3.3.13)

      +
    • Add a "fragmentation" measurement in the output of sqlite3_analyzer.
    • +
    • Add the COLLATE operator used to explicitly set the collating sequence +used by an expression. This feature is considered experimental pending +additional testing.
    • +
    • Allow up to 64 tables in a join - the old limit was 32.
    • +
    • Added two new experimental functions: +randomBlob() and +hex(). +Their intended use is to facilitate generating +UUIDs. +
    • +
    • Fix a problem where +PRAGMA count_changes was +causing incorrect results for updates on tables with triggers
    • +
    • Fix a bug in the ORDER BY clause optimizer for joins where the +left-most table in the join is constrained by a UNIQUE index.
    • +
    • Fixed a bug in the "copy" method of the TCL interface.
    • +
    • Bug fixes in fts1 and fts2 modules.
    • +

    +

    2007 January 27 (3.3.12)

      +
    • Fix another bug in the IS NULL optimization that was added in +version 3.3.9.
    • +
    • Fix a assertion fault that occurred on deeply nested views.
    • +
    • Limit the amount of output that +PRAGMA integrity_check +generates.
    • +
    • Minor syntactic changes to support a wider variety of compilers.
    • +

    +

    2007 January 22 (3.3.11)

      +
    • Fix another bug in the implementation of the new +sqlite3_prepare_v2() API. +We'll get it right eventually...
    • +
    • Fix a bug in the IS NULL optimization that was added in version 3.3.9 - +the bug was causing incorrect results on certain LEFT JOINs that included +in the WHERE clause an IS NULL constraint for the right table of the +LEFT JOIN.
    • +
    • Make AreFileApisANSI() a no-op macro in WinCE since WinCE does not +support this function.
    • +

    +

    2007 January 9 (3.3.10)

      +
    • Fix bugs in the implementation of the new +sqlite3_prepare_v2() API +that can lead to segfaults.
    • +
    • Fix 1-second round-off errors in the + +strftime() function
    • +
    • Enhance the Windows OS layer to provide detailed error codes
    • +
    • Work around a win2k problem so that SQLite can use single-character +database file names
    • +
    • The +user_version and +schema_version pragmas +correctly set their column names in the result set
    • +
    • Documentation updates
    • +

    +

    2007 January 4 (3.3.9)

      +
    • Fix bugs in pager.c that could lead to database corruption if two +processes both try to recover a hot journal at the same instant
    • +
    • Added the sqlite3_prepare_v2() +API.
    • +
    • Fixed the ".dump" command in the command-line shell to show +indices, triggers and views again.
    • +
    • Change the table_info pragma so that it returns NULL for the default +value if there is no default value
    • +
    • Support for non-ASCII characters in win95 filenames
    • +
    • Query optimizer enhancements: +
        +
      • Optimizer does a better job of using indices to satisfy ORDER BY +clauses that sort on the integer primary key
      • +
      • Use an index to satisfy an IS NULL operator in the WHERE clause
      • +
      • Fix a bug that was causing the optimizer to miss an OR optimization +opportunity
      • +
      • The optimizer has more freedom to reorder tables in the FROM clause +even in there are LEFT joins.
      • +
      +
    • Extension loading supported added to WinCE
    • +
    • Allow constraint names on the DEFAULT clause in a table definition
    • +
    • Added the ".bail" command to the command-line shell
    • +
    • Make CSV (comma separate value) output from the command-line shell +more closely aligned to accepted practice
    • +
    • Experimental FTS2 module added
    • +
    • Use sqlite3_mprintf() instead of strdup() to avoid libc dependencies
    • +
    • VACUUM uses a temporary file in the official TEMP folder, not in the +same directory as the original database
    • +
    • The prefix on temporary filenames on Windows is changed from "sqlite" +to "etilqs".
    • +

    +

    2006 October 9 (3.3.8)

      +
    • Support for full text search using the +FTS1 module +(beta)
    • +
    • Added OS-X locking patches (beta - disabled by default)
    • +
    • Introduce extended error codes and add error codes for various +kinds of I/O errors.
    • +
    • Added support for IF EXISTS on CREATE/DROP TRIGGER/VIEW
    • +
    • Fix the regression test suite so that it works with Tcl8.5
    • +
    • Enhance sqlite3_set_authorizer() to provide notification of calls to + SQL functions.
    • +
    • Added experimental API: sqlite3_auto_extension()
    • +
    • Various minor bug fixes
    • +

    +

    2006 August 12 (3.3.7)

    +

    2006 June 6 (3.3.6)

      +
    • Plays better with virus scanners on Windows
    • +
    • Faster :memory: databases
    • +
    • Fix an obscure segfault in UTF-8 to UTF-16 conversions
    • +
    • Added driver for OS/2
    • +
    • Correct column meta-information returned for aggregate queries
    • +
    • Enhanced output from EXPLAIN QUERY PLAN
    • +
    • LIMIT 0 now works on subqueries
    • +
    • Bug fixes and performance enhancements in the query optimizer
    • +
    • Correctly handle NULL filenames in ATTACH and DETACH
    • +
    • Inproved syntax error messages in the parser
    • +
    • Fix type coercion rules for the IN operator
    • +

    +

    2006 April 5 (3.3.5)

      +
    • CHECK constraints use conflict resolution algorithms correctly.
    • +
    • The SUM() function throws an error on integer overflow.
    • +
    • Choose the column names in a compound query from the left-most SELECT + instead of the right-most.
    • +
    • The sqlite3_create_collation() function + honors the SQLITE_UTF16_ALIGNED flag.
    • +
    • SQLITE_SECURE_DELETE compile-time option causes deletes to overwrite + old data with zeros.
    • +
    • Detect integer overflow in abs().
    • +
    • The random() function provides 64 bits of randomness instead of + only 32 bits.
    • +
    • Parser detects and reports automaton stack overflow.
    • +
    • Change the round() function to return REAL instead of TEXT.
    • +
    • Allow WHERE clause terms on the left table of a LEFT OUTER JOIN to + contain aggregate subqueries.
    • +
    • Skip over leading spaces in text to numeric conversions.
    • +
    • Various minor bug and documentation typo fixes and + performance enhancements.
    • +

    +

    2006 February 11 (3.3.4)

      +
    • Fix a blunder in the Unix mutex implementation that can lead to +deadlock on multithreaded systems.
    • +
    • Fix an alignment problem on 64-bit machines
    • +
    • Added the fullfsync pragma.
    • +
    • Fix an optimizer bug that could have caused some unusual LEFT OUTER JOINs +to give incorrect results.
    • +
    • The SUM function detects integer overflow and converts to accumulating +an approximate result using floating point numbers
    • +
    • Host parameter names can begin with '@' for compatibility with SQL Server. +
    • +
    • Other miscellaneous bug fixes
    • +

    +

    2006 January 31 (3.3.3)

      +
    • Removed support for an ON CONFLICT clause on CREATE INDEX - it never +worked correctly so this should not present any backward compatibility +problems.
    • +
    • Authorizer callback now notified of ALTER TABLE ADD COLUMN commands
    • +
    • After any changes to the TEMP database schema, all prepared statements +are invalidated and must be recreated using a new call to +sqlite3_prepare()
    • +
    • Other minor bug fixes in preparation for the first stable release +of version 3.3
    • +

    2006 January 24 (3.3.2 beta)

      +
    • Bug fixes and speed improvements. Improved test coverage.
    • +
    • Changes to the OS-layer interface: mutexes must now be recursive.
    • +
    • Discontinue the use of thread-specific data for out-of-memory +exception handling
    • +

    2006 January 16 (3.3.1 alpha)

      +
    • Countless bug fixes
    • +
    • Speed improvements
    • +
    • Database connections can now be used by multiple threads, not just +the thread in which they were created.
    • +

    2006 January 10 (3.3.0 alpha)

      +
    • CHECK constraints
    • +
    • IF EXISTS and IF NOT EXISTS clauses on CREATE/DROP TABLE/INDEX.
    • +
    • DESC indices
    • +
    • More efficient encoding of boolean values resulting in smaller database +files
    • +
    • More aggressive SQLITE_OMIT_FLOATING_POINT
    • +
    • Separate INTEGER and REAL affinity
    • +
    • Added a virtual function layer for the OS interface
    • +
    • "exists" method added to the TCL interface
    • +
    • Improved response to out-of-memory errors
    • +
    • Database cache can be optionally shared between connections +in the same thread
    • +
    • Optional READ UNCOMMITTED isolation (instead of the default +isolation level of SERIALIZABLE) and table level locking when +database connections share a common cache.
    • +

    +

    2005 December 19 (3.2.8)

      +
    • Fix an obscure bug that can cause database corruption under the +following unusual circumstances: A large INSERT or UPDATE statement which +is part of an even larger transaction fails due to a uniqueness contraint +but the containing transaction commits.
    • +

    +

    2005 December 19 (2.8.17)

      +
    • Fix an obscure bug that can cause database corruption under the +following unusual circumstances: A large INSERT or UPDATE statement which +is part of an even larger transaction fails due to a uniqueness contraint +but the containing transaction commits.
    • +

    +

    2005 September 24 (3.2.7)

      +
    • GROUP BY now considers NULLs to be equal again, as it should +
    • +
    • Now compiles on Solaris and OpenBSD and other Unix variants +that lack the fdatasync() function
    • +
    • Now compiles on MSVC++6 again
    • +
    • Fix uninitialized variables causing malfunctions for various obscure +queries
    • +
    • Correctly compute a LEFT OUTER JOINs that is constrained on the +left table only
    • +

    +

    2005 September 17 (3.2.6)

      +
    • Fix a bug that can cause database corruption if a VACUUM (or + autovacuum) fails and is rolled back on a database that is + larger than 1GiB
    • +
    • LIKE optiization now works for columns with COLLATE NOCASE
    • +
    • ORDER BY and GROUP BY now use bounded memory
    • +
    • Added support for COUNT(DISTINCT expr)
    • +
    • Change the way SUM() handles NULL values in order to comply with + the SQL standard
    • +
    • Use fdatasync() instead of fsync() where possible in order to speed + up commits slightly
    • +
    • Use of the CROSS keyword in a join turns off the table reordering + optimization
    • +
    • Added the experimental and undocumented EXPLAIN QUERY PLAN capability
    • +
    • Use the unicode API in Windows
    • +

    +

    2005 August 27 (3.2.5)

      +
    • Fix a bug effecting DELETE and UPDATE statements that changed +more than 40960 rows.
    • +
    • Change the makefile so that it no longer requires GNUmake extensions
    • +
    • Fix the --enable-threadsafe option on the configure script
    • +
    • Fix a code generator bug that occurs when the left-hand side of an IN +operator is constant and the right-hand side is a SELECT statement
    • +
    • The PRAGMA synchronous=off statement now disables syncing of the +master journal file in addition to the normal rollback journals
    • +

    +

    2005 August 24 (3.2.4)

      +
    • Fix a bug introduced in the previous release +that can cause a segfault while generating code +for complex WHERE clauses.
    • +
    • Allow floating point literals to begin or end with a decimal point.
    • +

    +

    2005 August 21 (3.2.3)

      +
    • Added support for the CAST operator
    • +
    • Tcl interface allows BLOB values to be transferred to user-defined +functions
    • +
    • Added the "transaction" method to the Tcl interface
    • +
    • Allow the DEFAULT value of a column to call functions that have constant +operands
    • +
    • Added the ANALYZE command for gathering statistics on indices and +using those statistics when picking an index in the optimizer
    • +
    • Remove the limit (formerly 100) on the number of terms in the +WHERE clause
    • +
    • The right-hand side of the IN operator can now be a list of expressions +instead of just a list of constants
    • +
    • Rework the optimizer so that it is able to make better use of indices
    • +
    • The order of tables in a join is adjusted automatically to make +better use of indices
    • +
    • The IN operator is now a candidate for optimization even if the left-hand +side is not the left-most term of the index. Multiple IN operators can be +used with the same index.
    • +
    • WHERE clause expressions using BETWEEN and OR are now candidates +for optimization
    • +
    • Added the "case_sensitive_like" pragma and the SQLITE_CASE_SENSITIVE_LIKE +compile-time option to set its default value to "on".
    • +
    • Use indices to help with GLOB expressions and LIKE expressions too +when the case_sensitive_like pragma is enabled
    • +
    • Added support for grave-accent quoting for compatibility with MySQL
    • +
    • Improved test coverage
    • +
    • Dozens of minor bug fixes
    • +

    +

    2005 June 13 (3.2.2)

      +
    • Added the sqlite3_db_handle() API
    • +
    • Added the sqlite3_get_autocommit() API
    • +
    • Added a REGEXP operator to the parser. There is no function to back +up this operator in the standard build but users can add their own using +sqlite3_create_function()
    • +
    • Speed improvements and library footprint reductions.
    • +
    • Fix byte alignment problems on 64-bit architectures.
    • +
    • Many, many minor bug fixes and documentation updates.
    • +

    +

    2005 March 29 (3.2.1)

      +
    • Fix a memory allocation error in the new ADD COLUMN comment.
    • +
    • Documentation updates
    • +

    +

    2005 March 21 (3.2.0)

      +
    • Added support for ALTER TABLE ADD COLUMN.
    • +
    • Added support for the "T" separator in ISO-8601 date/time strings.
    • +
    • Improved support for Cygwin.
    • +
    • Numerous bug fixes and documentation updates.
    • +

    +

    2005 March 16 (3.1.6)

      +
    • Fix a bug that could cause database corruption when inserting + record into tables with around 125 columns.
    • +
    • sqlite3_step() is now much more likely to invoke the busy handler + and less likely to return SQLITE_BUSY.
    • +
    • Fix memory leaks that used to occur after a malloc() failure.
    • +

    +

    2005 March 11 (3.1.5)

      +
    • The ioctl on OS-X to control syncing to disk is F_FULLFSYNC, + not F_FULLSYNC. The previous release had it wrong.
    • +

    +

    2005 March 10 (3.1.4)

      +
    • Fix a bug in autovacuum that could cause database corruption if +a CREATE UNIQUE INDEX fails because of a constraint violation. +This problem only occurs if the new autovacuum feature introduced in +version 3.1 is turned on.
    • +
    • The F_FULLSYNC ioctl (currently only supported on OS-X) is disabled +if the synchronous pragma is set to something other than "full".
    • +
    • Add additional forward compatibility to the future version 3.2 database +file format.
    • +
    • Fix a bug in WHERE clauses of the form (rowid<'2')
    • +
    • New SQLITE_OMIT_... compile-time options added
    • +
    • Updates to the man page
    • +
    • Remove the use of strcasecmp() from the shell
    • +
    • Windows DLL exports symbols Tclsqlite_Init and Sqlite_Init
    • +

    +

    2005 February 19 (3.1.3)

      +
    • Fix a problem with VACUUM on databases from which tables containing +AUTOINCREMENT have been dropped.
    • +
    • Add forward compatibility to the future version 3.2 database file +format.
    • +
    • Documentation updates
    • +

    +

    2005 February 15 (3.1.2)

      +
    • Fix a bug that can lead to database corruption if there are two +open connections to the same database and one connection does a VACUUM +and the second makes some change to the database.
    • +
    • Allow "?" parameters in the LIMIT clause.
    • +
    • Fix VACUUM so that it works with AUTOINCREMENT.
    • +
    • Fix a race condition in AUTOVACUUM that can lead to corrupt databases
    • +
    • Add a numeric version number to the sqlite3.h include file.
    • +
    • Other minor bug fixes and performance enhancements.
    • +

    +

    2005 February 15 (2.8.16)

      +
    • Fix a bug that can lead to database corruption if there are two +open connections to the same database and one connection does a VACUUM +and the second makes some change to the database.
    • +
    • Correctly handle quoted names in CREATE INDEX statements.
    • +
    • Fix a naming conflict between sqlite.h and sqlite3.h.
    • +
    • Avoid excess heap usage when copying expressions.
    • +
    • Other minor bug fixes.
    • +

    2005 February 1 (3.1.1 BETA)

      +
    • Automatic caching of prepared statements in the TCL interface
    • +
    • ATTACH and DETACH as well as some other operations cause existing + prepared statements to expire.
    • +
    • Numerious minor bug fixes
    • +

    2005 January 21 (3.1.0 ALPHA)

      +
    • Autovacuum support added
    • +
    • CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP added
    • +
    • Support for the EXISTS clause added.
    • +
    • Support for correlated subqueries added.
    • +
    • Added the ESCAPE clause on the LIKE operator.
    • +
    • Support for ALTER TABLE ... RENAME TABLE ... added
    • +
    • AUTOINCREMENT keyword supported on INTEGER PRIMARY KEY
    • +
    • Many SQLITE_OMIT_ macros inserts to omit features at compile-time + and reduce the library footprint.
    • +
    • The REINDEX command was added.
    • +
    • The engine no longer consults the main table if it can get + all the information it needs from an index.
    • +
    • Many nuisance bugs fixed.
    • +

    +

    2004 October 11 (3.0.8)

      +
    • Add support for DEFERRED, IMMEDIATE, and EXCLUSIVE transactions.
    • +
    • Allow new user-defined functions to be created when there are +already one or more precompiled SQL statements.
    • +
    • Fix portability problems for Mingw/MSYS.
    • +
    • Fix a byte alignment problem on 64-bit Sparc machines.
    • +
    • Fix the ".import" command of the shell so that it ignores \r +characters at the end of lines.
    • +
    • The "csv" mode option in the shell puts strings inside double-quotes.
    • +
    • Fix typos in documentation.
    • +
    • Convert array constants in the code to have type "const".
    • +
    • Numerous code optimizations, specially optimizations designed to +make the code footprint smaller.
    • +

    +

    2004 September 18 (3.0.7)

      +
    • The BTree module allocates large buffers using malloc() instead of + off of the stack, in order to play better on machines with limited + stack space.
    • +
    • Fixed naming conflicts so that versions 2.8 and 3.0 can be + linked and used together in the same ANSI-C source file.
    • +
    • New interface: sqlite3_bind_parameter_index()
    • +
    • Add support for wildcard parameters of the form: "?nnn"
    • +
    • Fix problems found on 64-bit systems.
    • +
    • Removed encode.c file (containing unused routines) from the + version 3.0 source tree.
    • +
    • The sqlite3_trace() callbacks occur before each statement + is executed, not when the statement is compiled.
    • +
    • Makefile updates and miscellaneous bug fixes.
    • +

    2004 September 02 (3.0.6 beta)

      +
    • Better detection and handling of corrupt database files.
    • +
    • The sqlite3_step() interface returns SQLITE_BUSY if it is unable + to commit a change because of a lock
    • +
    • Combine the implementations of LIKE and GLOB into a single + pattern-matching subroutine.
    • +
    • Miscellaneous code size optimizations and bug fixes
    • +

    2004 August 29 (3.0.5 beta)

      +
    • Support for ":AAA" style bind parameter names.
    • +
    • Added the new sqlite3_bind_parameter_name() interface.
    • +
    • Support for TCL variable names embedded in SQL statements in the + TCL bindings.
    • +
    • The TCL bindings transfer data without necessarily doing a conversion + to a string.
    • +
    • The database for TEMP tables is not created until it is needed.
    • +
    • Add the ability to specify an alternative temporary file directory + using the "sqlite_temp_directory" global variable.
    • +
    • A compile-time option (SQLITE_BUSY_RESERVED_LOCK) causes the busy + handler to be called when there is contention for a RESERVED lock.
    • +
    • Various bug fixes and optimizations
    • +

    2004 August 8 (3.0.4 beta)

      +
    • CREATE TABLE and DROP TABLE now work correctly as prepared statements.
    • +
    • Fix a bug in VACUUM and UNIQUE indices.
    • +
    • Add the ".import" command to the command-line shell.
    • +
    • Fix a bug that could cause index corruption when an attempt to + delete rows of a table is blocked by a pending query.
    • +
    • Library size optimizations.
    • +
    • Other minor bug fixes.
    • +

    +

    2004 July 22 (2.8.15)

      +
    • This is a maintenance release only. Various minor bugs have been +fixed and some portability enhancements are added.
    • +

    2004 July 22 (3.0.3 beta)

      +
    • The second beta release for SQLite 3.0.
    • +
    • Add support for "PRAGMA page_size" to adjust the page size of +the database.
    • +
    • Various bug fixes and documentation updates.
    • +

    2004 June 30 (3.0.2 beta)

      +
    • The first beta release for SQLite 3.0.
    • +

    2004 June 22 (3.0.1 alpha)

      +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Lots of bug fixes.
    • +

    2004 June 18 (3.0.0 alpha)

      +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Support for internationalization including UTF-8, UTF-16, and + user defined collating sequences.
    • +
    • New file format that is 25% to 35% smaller for typical use.
    • +
    • Improved concurrency.
    • +
    • Atomic commits for ATTACHed databases.
    • +
    • Remove cruft from the APIs.
    • +
    • BLOB support.
    • +
    • 64-bit rowids.
    • +
    • More information. +

    +

    2004 June 9 (2.8.14)

      +
    • Fix the min() and max() optimizer so that it works when the FROM + clause consists of a subquery.
    • +
    • Ignore extra whitespace at the end of of "." commands in the shell.
    • +
    • Bundle sqlite_encode_binary() and sqlite_decode_binary() with the + library.
    • +
    • The TEMP_STORE and DEFAULT_TEMP_STORE pragmas now work.
    • +
    • Code changes to compile cleanly using OpenWatcom.
    • +
    • Fix VDBE stack overflow problems with INSTEAD OF triggers and + NULLs in IN operators.
    • +
    • Add the global variable sqlite_temp_directory which if set defines the + directory in which temporary files are stored.
    • +
    • sqlite_interrupt() plays well with VACUUM.
    • +
    • Other minor bug fixes.
    • +

    +

    2004 March 8 (2.8.13)

      +
    • Refactor parts of the code in order to make the code footprint + smaller. The code is now also a little bit faster.
    • +
    • sqlite_exec() is now implemented as a wrapper around sqlite_compile() + and sqlite_step().
    • +
    • The built-in min() and max() functions now honor the difference between + NUMERIC and TEXT datatypes. Formerly, min() and max() always assumed + their arguments were of type NUMERIC.
    • +
    • New HH:MM:SS modifier to the built-in date/time functions.
    • +
    • Experimental sqlite_last_statement_changes() API added. Fixed the + the last_insert_rowid() function so that it works correctly with + triggers.
    • +
    • Add functions prototypes for the database encryption API.
    • +
    • Fix several nuisance bugs.
    • +

    +

    2004 February 8 (2.8.12)

      +
    • Fix a bug that will might corrupt the rollback journal if a power failure + or external program halt occurs in the middle of a COMMIT. The corrupt + journal can lead to database corruption when it is rolled back.
    • +
    • Reduce the size and increase the speed of various modules, especially + the virtual machine.
    • +
    • Allow "<expr> IN <table>" as a shorthand for + "<expr> IN (SELECT * FROM <table>".
    • +
    • Optimizations to the sqlite_mprintf() routine.
    • +
    • Make sure the MIN() and MAX() optimizations work within subqueries.
    • +

    +

    2004 January 14 (2.8.11)

      +
    • Fix a bug in how the IN operator handles NULLs in subqueries. The bug + was introduced by the previous release.
    • +

    +

    2004 January 13 (2.8.10)

      +
    • Fix a potential database corruption problem on Unix caused by the fact + that all POSIX advisory locks are cleared whenever you close() a file. + The work around it to embargo all close() calls while locks are + outstanding.
    • +
    • Performance enhancements on some corner cases of COUNT(*).
    • +
    • Make sure the in-memory backend response sanely if malloc() fails.
    • +
    • Allow sqlite_exec() to be called from within user-defined SQL + functions.
    • +
    • Improved accuracy of floating-point conversions using "long double".
    • +
    • Bug fixes in the experimental date/time functions.
    • +

    +

    2004 January 5 (2.8.9)

      +
    • Fix a 32-bit integer overflow problem that could result in corrupt + indices in a database if large negative numbers (less than -2147483648) + were inserted into a indexed numeric column.
    • +
    • Fix a locking problem on multi-threaded Linux implementations.
    • +
    • Always use "." instead of "," as the decimal point even if the locale + requests ",".
    • +
    • Added UTC to localtime conversions to the experimental date/time + functions.
    • +
    • Bug fixes to date/time functions.
    • +

    +

    2003 December 17 (2.8.8)

      +
    • Fix a critical bug introduced into 2.8.0 which could cause + database corruption.
    • +
    • Fix a problem with 3-way joins that do not use indices
    • +
    • The VACUUM command now works with the non-callback API
    • +
    • Improvements to the "PRAGMA integrity_check" command
    • +

    +

    2003 December 4 (2.8.7)

      +
    • Added experimental sqlite_bind() and sqlite_reset() APIs.
    • +
    • If the name of the database is an empty string, open a new database + in a temporary file that is automatically deleted when the database + is closed.
    • +
    • Performance enhancements in the lemon-generated parser
    • +
    • Experimental date/time functions revised.
    • +
    • Disallow temporary indices on permanent tables.
    • +
    • Documentation updates and typo fixes
    • +
    • Added experimental sqlite_progress_handler() callback API
    • +
    • Removed support for the Oracle8 outer join syntax.
    • +
    • Allow GLOB and LIKE operators to work as functions.
    • +
    • Other minor documentation and makefile changes and bug fixes.
    • +

    +

    2003 August 21 (2.8.6)

      +
    • Moved the CVS repository to www.sqlite.org
    • +
    • Update the NULL-handling documentation.
    • +
    • Experimental date/time functions added.
    • +
    • Bug fix: correctly evaluate a view of a view without segfaulting.
    • +
    • Bug fix: prevent database corruption if you dropped a + trigger that had the same name as a table.
    • +
    • Bug fix: allow a VACUUM (without segfaulting) on an empty + database after setting the EMPTY_RESULT_CALLBACKS pragma.
    • +
    • Bug fix: if an integer value will not fit in a 32-bit int, store it in + a double instead.
    • +
    • Bug fix: Make sure the journal file directory entry is committed to disk + before writing the database file.
    • +

    +

    2003 July 22 (2.8.5)

      +
    • Make LIMIT work on a compound SELECT statement.
    • +
    • LIMIT 0 now shows no rows. Use LIMIT -1 to see all rows.
    • +
    • Correctly handle comparisons between an INTEGER PRIMARY KEY and + a floating point number.
    • +
    • Fix several important bugs in the new ATTACH and DETACH commands.
    • +
    • Updated the NULL-handling document.
    • +
    • Allow NULL arguments in sqlite_compile() and sqlite_step().
    • +
    • Many minor bug fixes
    • +

    +

    2003 June 29 (2.8.4)

      +
    • Enhanced the "PRAGMA integrity_check" command to verify indices.
    • +
    • Added authorization hooks for the new ATTACH and DETACH commands.
    • +
    • Many documentation updates
    • +
    • Many minor bug fixes
    • +

    +

    2003 June 4 (2.8.3)

      +
    • Fix a problem that will corrupt the indices on a table if you + do an INSERT OR REPLACE or an UPDATE OR REPLACE on a table that + contains an INTEGER PRIMARY KEY plus one or more indices.
    • +
    • Fix a bug in Windows locking code so that locks work correctly + when simultaneously accessed by Win95 and WinNT systems.
    • +
    • Add the ability for INSERT and UPDATE statements to refer to the + "rowid" (or "_rowid_" or "oid") columns.
    • +
    • Other important bug fixes
    • +

    +

    2003 May 17 (2.8.2)

      +
    • Fix a problem that will corrupt the database file if you drop a + table from the main database that has a TEMP index.
    • +

    +

    2003 May 16 (2.8.1)

      +
    • Reactivated the VACUUM command that reclaims unused disk space in + a database file.
    • +
    • Added the ATTACH and DETACH commands to allow interacting with multiple + database files at the same time.
    • +
    • Added support for TEMP triggers and indices.
    • +
    • Added support for in-memory databases.
    • +
    • Removed the experimental sqlite_open_aux_file(). Its function is + subsumed in the new ATTACH command.
    • +
    • The precedence order for ON CONFLICT clauses was changed so that + ON CONFLICT clauses on BEGIN statements have a higher precedence than + ON CONFLICT clauses on constraints. +
    • Many, many bug fixes and compatibility enhancements.
    • +

    +

    2003 Feb 16 (2.8.0)

      +
    • Modified the journal file format to make it more resistant to corruption + that can occur after an OS crash or power failure.
    • +
    • Added a new C/C++ API that does not use callback for returning data.
    • +

    +

    2003 Jan 25 (2.7.6)

      +
    • Performance improvements. The library is now much faster.
    • +
    • Added the sqlite_set_authorizer() API. Formal documentation has + not been written - see the source code comments for instructions on + how to use this function.
    • +
    • Fix a bug in the GLOB operator that was preventing it from working + with upper-case letters.
    • +
    • Various minor bug fixes.
    • +

    +

    2002 Dec 27 (2.7.5)

      +
    • Fix an uninitialized variable in pager.c which could (with a probability + of about 1 in 4 billion) result in a corrupted database.
    • +

    +

    2002 Dec 17 (2.7.4)

      +
    • Database files can now grow to be up to 2^41 bytes. The old limit + was 2^31 bytes.
    • +
    • The optimizer will now scan tables in the reverse if doing so will + satisfy an ORDER BY ... DESC clause.
    • +
    • The full pathname of the database file is now remembered even if + a relative path is passed into sqlite_open(). This allows + the library to continue operating correctly after a chdir().
    • +
    • Speed improvements in the VDBE.
    • +
    • Lots of little bug fixes.
    • +

    +

    2002 Oct 30 (2.7.3)

      +
    • Various compiler compatibility fixes.
    • +
    • Fix a bug in the "expr IN ()" operator.
    • +
    • Accept column names in parentheses.
    • +
    • Fix a problem with string memory management in the VDBE
    • +
    • Fix a bug in the "table_info" pragma"
    • +
    • Export the sqlite_function_type() API function in the Windows DLL
    • +
    • Fix locking behavior under Windows
    • +
    • Fix a bug in LEFT OUTER JOIN
    • +

    +

    2002 Sep 25 (2.7.2)

      +
    • Prevent journal file overflows on huge transactions.
    • +
    • Fix a memory leak that occurred when sqlite_open() failed.
    • +
    • Honor the ORDER BY and LIMIT clause of a SELECT even if the + result set is used for an INSERT.
    • +
    • Do not put write locks on the file used to hold TEMP tables.
    • +
    • Added documentation on SELECT DISTINCT and on how SQLite handles NULLs.
    • +
    • Fix a problem that was causing poor performance when many thousands + of SQL statements were executed by a single sqlite_exec() call.
    • +

    +

    2002 Aug 31 (2.7.1)

      +
    • Fix a bug in the ORDER BY logic that was introduced in version 2.7.0
    • +
    • C-style comments are now accepted by the tokenizer.
    • +
    • INSERT runs a little faster when the source is a SELECT statement.
    • +

    +

    2002 Aug 25 (2.7.0)

      +
    • Make a distinction between numeric and text values when sorting. + Text values sort according to memcmp(). Numeric values sort in + numeric order.
    • +
    • Allow multiple simultaneous readers under Windows by simulating + the reader/writers locks that are missing from Win95/98/ME.
    • +
    • An error is now returned when trying to start a transaction if + another transaction is already active.
    • +

    +

    2002 Aug 12 (2.6.3)

      +
    • Add the ability to read both little-endian and big-endian databases. + So a database created under SunOS or Mac OS X can be read and written + under Linux or Windows and vice versa.
    • +
    • Convert to the new website: http://www.sqlite.org/
    • +
    • Allow transactions to span Linux Threads
    • +
    • Bug fix in the processing of the ORDER BY clause for GROUP BY queries
    • +

    +

    2002 Jly 30 (2.6.2)

      +
    • Text files read by the COPY command can now have line terminators + of LF, CRLF, or CR.
    • +
    • SQLITE_BUSY is handled correctly if encountered during database + initialization.
    • +
    • Fix to UPDATE triggers on TEMP tables.
    • +
    • Documentation updates.
    • +

    +

    2002 Jly 19 (2.6.1)

      +
    • Include a static string in the library that responds to the RCS + "ident" command and which contains the library version number.
    • +
    • Fix an assertion failure that occurred when deleting all rows of + a table with the "count_changes" pragma turned on.
    • +
    • Better error reporting when problems occur during the automatic + 2.5.6 to 2.6.0 database format upgrade.
    • +

    +

    2002 Jly 17 (2.6.0)

      +
    • Change the format of indices to correct a design flaw the originated + with version 2.1.0. *** This is an incompatible + file format change *** When version 2.6.0 or later of the + library attempts to open a database file created by version 2.5.6 or + earlier, it will automatically and irreversibly convert the file format. + Make backup copies of older database files before opening them with + version 2.6.0 of the library. +
    • +

    +

    2002 Jly 7 (2.5.6)

      +
    • Fix more problems with rollback. Enhance the test suite to exercise + the rollback logic extensively in order to prevent any future problems. +
    • +

    +

    2002 Jly 6 (2.5.5)

      +
    • Fix a bug which could cause database corruption during a rollback. + This bugs was introduced in version 2.4.0 by the freelist + optimization of checkin [410].
    • +
    • Fix a bug in aggregate functions for VIEWs.
    • +
    • Other minor changes and enhancements.
    • +

    +

    2002 Jly 1 (2.5.4)

      +
    • Make the "AS" keyword optional again.
    • +
    • The datatype of columns now appear in the 4th argument to the + callback.
    • +
    • Added the sqlite_open_aux_file() API, though it is still + mostly undocumented and untested.
    • +
    • Added additional test cases and fixed a few bugs that those + test cases found.
    • +

    +

    2002 Jun 24 (2.5.3)

      +
    • Bug fix: Database corruption can occur due to the optimization + that was introduced in version 2.4.0 (check-in [410]). The problem + should now be fixed. The use of versions 2.4.0 through 2.5.2 is + not recommended.
    • +

    +

    2002 Jun 24 (2.5.2)

      +
    • Added the new SQLITE_TEMP_MASTER table which records the schema + for temporary tables in the same way that SQLITE_MASTER does for + persistent tables.
    • +
    • Added an optimization to UNION ALL
    • +
    • Fixed a bug in the processing of LEFT OUTER JOIN
    • +
    • The LIMIT clause now works on subselects
    • +
    • ORDER BY works on subselects
    • +
    • There is a new TypeOf() function used to determine if an expression + is numeric or text.
    • +
    • Autoincrement now works for INSERT from a SELECT.
    • +

    +

    2002 Jun 19 (2.5.1)

      +
    • The query optimizer now attempts to implement the ORDER BY clause + using an index. Sorting is still used if not suitable index is + available.
    • +

    +

    2002 Jun 17 (2.5.0)

      +
    • Added support for row triggers.
    • +
    • Added SQL-92 compliant handling of NULLs.
    • +
    • Add support for the full SQL-92 join syntax and LEFT OUTER JOINs.
    • +
    • Double-quoted strings interpreted as column names not text literals.
    • +
    • Parse (but do not implement) foreign keys.
    • +
    • Performance improvements in the parser, pager, and WHERE clause code + generator.
    • +
    • Make the LIMIT clause work on subqueries. (ORDER BY still does not + work, though.)
    • +
    • Added the "%Q" expansion to sqlite_*_printf().
    • +
    • Bug fixes too numerous to mention (see the change log).
    • +

    +

    2002 May 09 (2.4.12)

      +
    • Added logic to detect when the library API routines are called out + of sequence.
    • +

    +

    2002 May 08 (2.4.11)

      +
    • Bug fix: Column names in the result set were not being generated + correctly for some (rather complex) VIEWs. This could cause a + segfault under certain circumstances.
    • +

    +

    2002 May 02 (2.4.10)

      +
    • Bug fix: Generate correct column headers when a compound SELECT is used + as a subquery.
    • +
    • Added the sqlite_encode_binary() and sqlite_decode_binary() functions to + the source tree. But they are not yet linked into the library.
    • +
    • Documentation updates.
    • +
    • Export the sqlite_changes() function from Windows DLLs.
    • +
    • Bug fix: Do not attempt the subquery flattening optimization on queries + that lack a FROM clause. To do so causes a segfault.
    • +

    +

    2002 Apr 21 (2.4.9)

      +
    • Fix a bug that was causing the precompiled binary of SQLITE.EXE to + report "out of memory" under Windows 98.
    • +

    +

    2002 Apr 20 (2.4.8)

      +
    • Make sure VIEWs are created after their corresponding TABLEs in the + output of the .dump command in the shell.
    • +
    • Speed improvements: Do not do synchronous updates on TEMP tables.
    • +
    • Many improvements and enhancements to the shell.
    • +
    • Make the GLOB and LIKE operators functions that can be overridden + by a programmer. This allows, for example, the LIKE operator to + be changed to be case sensitive.
    • +

    +

    2002 Apr 06 (2.4.7)

      +
    • Add the ability to put TABLE.* in the column list of a + SELECT statement.
    • +
    • Permit SELECT statements without a FROM clause.
    • +
    • Added the last_insert_rowid() SQL function.
    • +
    • Do not count rows where the IGNORE conflict resolution occurs in + the row count.
    • +
    • Make sure functions expressions in the VALUES clause of an INSERT + are correct.
    • +
    • Added the sqlite_changes() API function to return the number + of row that changed in the most recent operation.
    • +

    +

    2002 Apr 02 (2.4.6)

      +
    • Bug fix: Correctly handle terms in the WHERE clause of a join that + do not contain a comparison operator.
    • +

    +

    2002 Apr 01 (2.4.5)

      +
    • Bug fix: Correctly handle functions that appear in the WHERE clause + of a join.
    • +
    • When the PRAGMA vdbe_trace=ON is set, correctly print the P3 operand + value when it is a pointer to a structure rather than a pointer to + a string.
    • +
    • When inserting an explicit NULL into an INTEGER PRIMARY KEY, convert + the NULL value into a unique key automatically.
    • +

    +

    2002 Mar 24 (2.4.4)

      +
    • Allow "VIEW" to be a column name
    • +
    • Added support for CASE expressions (patch from Dan Kennedy)
    • +
    • Added RPMS to the delivery (patches from Doug Henry)
    • +
    • Fix typos in the documentation
    • +
    • Cut over configuration management to a new CVS repository with + its own CVSTrac bug tracking system.
    • +

    +

    2002 Mar 22 (2.4.3)

      +
    • Fix a bug in SELECT that occurs when a compound SELECT is used as a + subquery in the FROM of a SELECT.
    • +
    • The sqlite_get_table() function now returns an error if you + give it two or more SELECTs that return different numbers of columns.
    • +

    +

    2002 Mar 14 (2.4.2)

      +
    • Bug fix: Fix an assertion failure that occurred when ROWID was a column + in a SELECT statement on a view.
    • +
    • Bug fix: Fix an uninitialized variable in the VDBE that would could an + assert failure.
    • +
    • Make the os.h header file more robust in detecting when the compile is + for Windows and when it is for Unix.
    • +

    +

    2002 Mar 13 (2.4.1)

      +
    • Using an unnamed subquery in a FROM clause would cause a segfault.
    • +
    • The parser now insists on seeing a semicolon or the end of input before + executing a statement. This avoids an accidental disaster if the + WHERE keyword is misspelled in an UPDATE or DELETE statement.
    • +

    +

    2002 Mar 10 (2.4.0)

      +
    • Change the name of the sanity_check PRAGMA to integrity_check + and make it available in all compiles.
    • +
    • SELECT min() or max() of an indexed column with no WHERE or GROUP BY + clause is handled as a special case which avoids a complete table scan.
    • +
    • Automatically generated ROWIDs are now sequential.
    • +
    • Do not allow dot-commands of the command-line shell to occur in the + middle of a real SQL command.
    • +
    • Modifications to the "lemon" parser generator so that the parser tables + are 4 times smaller.
    • +
    • Added support for user-defined functions implemented in C.
    • +
    • Added support for new functions: coalesce(), lower(), + upper(), and random() +
    • Added support for VIEWs.
    • +
    • Added the subquery flattening optimizer.
    • +
    • Modified the B-Tree and Pager modules so that disk pages that do not + contain real data (free pages) are not journaled and are not + written from memory back to the disk when they change. This does not + impact database integrity, since the + pages contain no real data, but it does make large INSERT operations + about 2.5 times faster and large DELETEs about 5 times faster.
    • +
    • Made the CACHE_SIZE pragma persistent
    • +
    • Added the SYNCHRONOUS pragma
    • +
    • Fixed a bug that was causing updates to fail inside of transactions when + the database contained a temporary table.
    • +

    +

    2002 Feb 18 (2.3.3)

      +
    • Allow identifiers to be quoted in square brackets, for compatibility + with MS-Access.
    • +
    • Added support for sub-queries in the FROM clause of a SELECT.
    • +
    • More efficient implementation of sqliteFileExists() under Windows. + (by Joel Luscy)
    • +
    • The VALUES clause of an INSERT can now contain expressions, including + scalar SELECT clauses.
    • +
    • Added support for CREATE TABLE AS SELECT
    • +
    • Bug fix: Creating and dropping a table all within a single + transaction was not working.
    • +

    +

    2002 Feb 14 (2.3.2)

      +
    • Bug fix: There was an incorrect assert() in pager.c. The real code was + all correct (as far as is known) so everything should work OK if you + compile with -DNDEBUG=1. When asserts are not disabled, there + could be a fault.
    • +

    +

    2002 Feb 13 (2.3.1)

      +
    • Bug fix: An assertion was failing if "PRAGMA full_column_names=ON;" was + set and you did a query that used a rowid, like this: + "SELECT rowid, * FROM ...".
    • +

    +

    2002 Jan 30 (2.3.0)

      +
    • Fix a serious bug in the INSERT command which was causing data to go + into the wrong columns if the data source was a SELECT and the INSERT + clauses specified its columns in some order other than the default.
    • +
    • Added the ability to resolve constraint conflicts is ways other than + an abort and rollback. See the documentation on the "ON CONFLICT" + clause for details.
    • +
    • Temporary files are now automatically deleted by the operating system + when closed. There are no more dangling temporary files on a program + crash. (If the OS crashes, fsck will delete the file after reboot + under Unix. I do not know what happens under Windows.)
    • +
    • NOT NULL constraints are honored.
    • +
    • The COPY command puts NULLs in columns whose data is '\N'.
    • +
    • In the COPY command, backslash can now be used to escape a newline.
    • +
    • Added the SANITY_CHECK pragma.
    • +

    +

    2002 Jan 28 (2.2.5)

      +
    • Important bug fix: the IN operator was not working if either the + left-hand or right-hand side was derived from an INTEGER PRIMARY KEY.
    • +
    • Do not escape the backslash '\' character in the output of the + sqlite command-line access program.
    • +

    +

    2002 Jan 22 (2.2.4)

      +
    • The label to the right of an AS in the column list of a SELECT can now + be used as part of an expression in the WHERE, ORDER BY, GROUP BY, and/or + HAVING clauses.
    • +
    • Fix a bug in the -separator command-line option to the sqlite + command.
    • +
    • Fix a problem with the sort order when comparing upper-case strings against + characters greater than 'Z' but less than 'a'.
    • +
    • Report an error if an ORDER BY or GROUP BY expression is constant.
    • +

    +

    2002 Jan 16 (2.2.3)

      +
    • Fix warning messages in VC++ 7.0. (Patches from nicolas352001)
    • +
    • Make the library thread-safe. (The code is there and appears to work + but has not been stressed.)
    • +
    • Added the new sqlite_last_insert_rowid() API function.
    • +

    +

    2002 Jan 13 (2.2.2)

      +
    • Bug fix: An assertion was failing when a temporary table with an index + had the same name as a permanent table created by a separate process.
    • +
    • Bug fix: Updates to tables containing an INTEGER PRIMARY KEY and an + index could fail.
    • +

    +

    2002 Jan 9 (2.2.1)

      +
    • Bug fix: An attempt to delete a single row of a table with a WHERE + clause of "ROWID=x" when no such rowid exists was causing an error.
    • +
    • Bug fix: Passing in a NULL as the 3rd parameter to sqlite_open() + would sometimes cause a coredump.
    • +
    • Bug fix: DROP TABLE followed by a CREATE TABLE with the same name all + within a single transaction was causing a coredump.
    • +
    • Makefile updates from A. Rottmann
    • +

    +

    2001 Dec 22 (2.2.0)

      +
    • Columns of type INTEGER PRIMARY KEY are actually used as the primary + key in underlying B-Tree representation of the table.
    • +
    • Several obscure, unrelated bugs were found and fixed while + implemented the integer primary key change of the previous bullet.
    • +
    • Added the ability to specify "*" as part of a larger column list in + the result section of a SELECT statement. For example: + "SELECT rowid, * FROM table1;".
    • +
    • Updates to comments and documentation.
    • +

    +

    2001 Dec 14 (2.1.7)

      +
    • Fix a bug in CREATE TEMPORARY TABLE which was causing the + table to be initially allocated in the main database file instead + of in the separate temporary file. This bug could cause the library + to suffer an assertion failure and it could cause "page leaks" in the + main database file. +
    • Fix a bug in the b-tree subsystem that could sometimes cause the first + row of a table to be repeated during a database scan.
    • +

    +

    2001 Dec 14 (2.1.6)

      +
    • Fix the locking mechanism yet again to prevent + sqlite_exec() from returning SQLITE_PROTOCOL + unnecessarily. This time the bug was a race condition in + the locking code. This change effects both POSIX and Windows users.
    • +

    +

    2001 Dec 6 (2.1.5)

      +
    • Fix for another problem (unrelated to the one fixed in 2.1.4) + that sometimes causes sqlite_exec() to return SQLITE_PROTOCOL + unnecessarily. This time the bug was + in the POSIX locking code and should not effect Windows users.
    • +

    +

    2001 Dec 4 (2.1.4)

      +
    • Sometimes sqlite_exec() would return SQLITE_PROTOCOL when it + should have returned SQLITE_BUSY.
    • +
    • The fix to the previous bug uncovered a deadlock which was also + fixed.
    • +
    • Add the ability to put a single .command in the second argument + of the sqlite shell
    • +
    • Updates to the FAQ
    • +

    +

    2001 Nov 23 (2.1.3)

      +
    • Fix the behavior of comparison operators + (ex: "<", "==", etc.) + so that they are consistent with the order of entries in an index.
    • +
    • Correct handling of integers in SQL expressions that are larger than + what can be represented by the machine integer.
    • +

    +

    2001 Nov 22 (2.1.2)

      +
    • Changes to support 64-bit architectures.
    • +
    • Fix a bug in the locking protocol.
    • +
    • Fix a bug that could (rarely) cause the database to become + unreadable after a DROP TABLE due to corruption to the SQLITE_MASTER + table.
    • +
    • Change the code so that version 2.1.1 databases that were rendered + unreadable by the above bug can be read by this version of + the library even though the SQLITE_MASTER table is (slightly) + corrupted.
    • +

    +

    2001 Nov 13 (2.1.1)

      +
    • Bug fix: Sometimes arbitrary strings were passed to the callback + function when the actual value of a column was NULL.
    • +

    +

    2001 Nov 12 (2.1.0)

      +
    • Change the format of data records so that records up to 16MB in size + can be stored.
    • +
    • Change the format of indices to allow for better query optimization.
    • +
    • Implement the "LIMIT ... OFFSET ..." clause on SELECT statements.
    • +

    +

    2001 Nov 3 (2.0.8)

      +
    • Made selected parameters in API functions const. This should + be fully backwards compatible.
    • +
    • Documentation updates
    • +
    • Simplify the design of the VDBE by restricting the number of sorters + and lists to 1. + In practice, no more than one sorter and one list was ever used anyhow. +
    • +

    +

    2001 Oct 21 (2.0.7)

      +
    • Any UTF-8 character or ISO8859 character can be used as part of + an identifier.
    • +
    • Patches from Christian Werner to improve ODBC compatibility and to + fix a bug in the round() function.
    • +
    • Plug some memory leaks that use to occur if malloc() failed. + We have been and continue to be memory leak free as long as + malloc() works.
    • +
    • Changes to some test scripts so that they work on Windows in + addition to Unix.
    • +

    +

    2001 Oct 19 (2.0.6)

      +
    • Added the EMPTY_RESULT_CALLBACKS pragma
    • +
    • Support for UTF-8 and ISO8859 characters in column and table names.
    • +
    • Bug fix: Compute correct table names with the FULL_COLUMN_NAMES pragma + is turned on.
    • +

    +

    2001 Oct 14 (2.0.5)

      +
    • Added the COUNT_CHANGES pragma.
    • +
    • Changes to the FULL_COLUMN_NAMES pragma to help out the ODBC driver.
    • +
    • Bug fix: "SELECT count(*)" was returning NULL for empty tables. + Now it returns 0.
    • +

    +

    2001 Oct 13 (2.0.4)

      +
    • Bug fix: an obscure and relatively harmless bug was causing one of + the tests to fail when gcc optimizations are turned on. This release + fixes the problem.
    • +

    +

    2001 Oct 13 (2.0.3)

      +
    • Bug fix: the sqlite_busy_timeout() function was delaying 1000 + times too long before failing.
    • +
    • Bug fix: an assertion was failing if the disk holding the database + file became full or stopped accepting writes for some other reason. + New tests were added to detect similar problems in the future.
    • +
    • Added new operators: & (bitwise-and) + | (bitwise-or), ~ (ones-complement), + << (shift left), >> (shift right).
    • +
    • Added new functions: round() and abs().
    • +

    +

    2001 Oct 9 (2.0.2)

      +
    • Fix two bugs in the locking protocol. (One was masking the other.)
    • +
    • Removed some unused "#include " that were causing problems + for VC++.
    • +
    • Fixed sqlite.h so that it is usable from C++
    • +
    • Added the FULL_COLUMN_NAMES pragma. When set to "ON", the names of + columns are reported back as TABLE.COLUMN instead of just COLUMN.
    • +
    • Added the TABLE_INFO() and INDEX_INFO() pragmas to help support the + ODBC interface.
    • +
    • Added support for TEMPORARY tables and indices.
    • +

    +

    2001 Oct 2 (2.0.1)

      +
    • Remove some C++ style comments from btree.c so that it will compile + using compilers other than gcc.
    • +
    • The ".dump" output from the shell does not work if there are embedded + newlines anywhere in the data. This is an old bug that was carried + forward from version 1.0. To fix it, the ".dump" output no longer + uses the COPY command. It instead generates INSERT statements.
    • +
    • Extend the expression syntax to support "expr NOT NULL" (with a + space between the "NOT" and the "NULL") in addition to "expr NOTNULL" + (with no space).
    • +

    +

    2001 Sep 28 (2.0.0)

      +
    • Automatically build binaries for Linux and Windows and put them on + the website.
    • +

    2001 Sep 28 (2.0-alpha-4)

      +
    • Incorporate makefile patches form A. Rottmann to use LIBTOOL
    • +

    2001 Sep 27 (2.0-alpha-3)

      +
    • SQLite now honors the UNIQUE keyword in CREATE UNIQUE INDEX. Primary + keys are required to be unique.
    • +
    • File format changed back to what it was for alpha-1
    • +
    • Fixes to the rollback and locking behavior
    • +

    2001 Sep 20 (2.0-alpha-2)

      +
    • Initial release of version 2.0. The idea of renaming the library + to "SQLus" was abandoned in favor of keeping the "SQLite" name and + bumping the major version number.
    • +
    • The pager and btree subsystems added back. They are now the only + available backend.
    • +
    • The Dbbe abstraction and the GDBM and memory drivers were removed.
    • +
    • Copyright on all code was disclaimed. The library is now in the + public domain.
    • +

    +

    2001 Jul 23 (1.0.32)

      +
    • Pager and btree subsystems removed. These will be used in a follow-on + SQL server library named "SQLus".
    • +
    • Add the ability to use quoted strings as table and column names in + expressions.
    • +

    +

    2001 Apr 14 (1.0.31)

      +
    • Pager subsystem added but not yet used.
    • +
    • More robust handling of out-of-memory errors.
    • +
    • New tests added to the test suite.
    • +

    +

    2001 Apr 6 (1.0.30)

      +
    • Remove the sqlite_encoding TCL variable that was introduced + in the previous version.
    • +
    • Add options -encoding and -tcl-uses-utf to the + sqlite TCL command.
    • +
    • Add tests to make sure that tclsqlite was compiled using Tcl header + files and libraries that match.
    • +

    +

    2001 Apr 5 (1.0.29)

      +
    • The library now assumes data is stored as UTF-8 if the --enable-utf8 + option is given to configure. The default behavior is to assume + iso8859-x, as it has always done. This only makes a difference for + LIKE and GLOB operators and the LENGTH and SUBSTR functions.
    • +
    • If the library is not configured for UTF-8 and the Tcl library + is one of the newer ones that uses UTF-8 internally, + then a conversion from UTF-8 to iso8859 and + back again is done inside the TCL interface.
    • +

    +

    2001 Apr 4 (1.0.28)

      +
    • Added limited support for transactions. At this point, transactions + will do table locking on the GDBM backend. There is no support (yet) + for rollback or atomic commit.
    • +
    • Added special column names ROWID, OID, and _ROWID_ that refer to the + unique random integer key associated with every row of every table.
    • +
    • Additional tests added to the regression suite to cover the new ROWID + feature and the TCL interface bugs mentioned below.
    • +
    • Changes to the "lemon" parser generator to help it work better when + compiled using MSVC.
    • +
    • Bug fixes in the TCL interface identified by Oleg Oleinick.
    • +

    +

    2001 Mar 20 (1.0.27)

      +
    • When doing DELETE and UPDATE, the library used to write the record + numbers of records to be deleted or updated into a temporary file. + This is changed so that the record numbers are held in memory.
    • +
    • The DELETE command without a WHILE clause just removes the database + files from the disk, rather than going through and deleting record + by record.
    • +

    +

    2001 Mar 20 (1.0.26)

      +
    • A serious bug fixed on Windows. Windows users should upgrade. + No impact to Unix.
    • +

    +

    2001 Mar 15 (1.0.25)

      +
    • Modify the test scripts to identify tests that depend on system + load and processor speed and + to warn the user that a failure of one of those (rare) tests does + not necessarily mean the library is malfunctioning. No changes to + code. +
    • +

    +

    2001 Mar 14 (1.0.24)

      +
    • Fix a bug which was causing + the UPDATE command to fail on systems where "malloc(0)" returns + NULL. The problem does not appear Windows, Linux, or HPUX but does + cause the library to fail on QNX. +
    • +

    +

    2001 Feb 19 (1.0.23)

      +
    • An unrelated (and minor) bug from Mark Muranwski fixed. The algorithm + for figuring out where to put temporary files for a "memory:" database + was not working quite right. +
    • +

    +

    2001 Feb 19 (1.0.22)

      +
    • The previous fix was not quite right. This one seems to work better. +
    • +

    +

    2001 Feb 19 (1.0.21)

      +
    • The UPDATE statement was not working when the WHERE clause contained + some terms that could be satisfied using indices and other terms that + could not. Fixed.
    • +

    +

    2001 Feb 11 (1.0.20)

      +
    • Merge development changes into the main trunk. Future work toward + using a BTree file structure will use a separate CVS source tree. This + CVS tree will continue to support the GDBM version of SQLite only.
    • +

    +

    2001 Feb 6 (1.0.19)

      +
    • Fix a strange (but valid) C declaration that was causing problems + for QNX. No logical changes.
    • +

    +

    2001 Jan 4 (1.0.18)

      +
    • Print the offending SQL statement when an error occurs.
    • +
    • Do not require commas between constraints in CREATE TABLE statements.
    • +
    • Added the "-echo" option to the shell.
    • +
    • Changes to comments.
    • +

    +

    2000 Dec 10 (1.0.17)

      +
    • Rewrote sqlite_complete() to make it faster.
    • +
    • Minor tweaks to other code to make it run a little faster.
    • +
    • Added new tests for sqlite_complete() and for memory leaks.
    • +

    +

    2000 Dec 4 (1.0.16)

      +
    • Documentation updates. Mostly fixing of typos and spelling errors.
    • +

    +

    2000 Oct 23 (1.0.15)

      +
    • Documentation updates
    • +
    • Some sanity checking code was removed from the inner loop of vdbe.c + to help the library to run a little faster. The code is only + removed if you compile with -DNDEBUG.
    • +

    +

    2000 Oct 19 (1.0.14)

      +
    • Added a "memory:" backend driver that stores its database in an + in-memory hash table.
    • +

    +

    2000 Oct 18 (1.0.13)

      +
    • Break out the GDBM driver into a separate file in anticipation + to added new drivers.
    • +
    • Allow the name of a database to be prefixed by the driver type. + For now, the only driver type is "gdbm:".
    • +

    +

    2000 Oct 16 (1.0.12)

      +
    • Fixed an off-by-one error that was causing a coredump in + the '%q' format directive of the new + sqlite_..._printf() routines.
    • +
    • Added the sqlite_interrupt() interface.
    • +
    • In the shell, sqlite_interrupt() is invoked when the + user presses Control-C
    • +
    • Fixed some instances where sqlite_exec() was + returning the wrong error code.
    • +

    +

    2000 Oct 11 (1.0.10)

      +
    • Added notes on how to compile for Windows95/98.
    • +
    • Removed a few variables that were not being used. Etc.
    • +

    +

    2000 Oct 8 (1.0.9)

      +
    • Added the sqlite_..._printf() interface routines.
    • +
    • Modified the sqlite shell program to use the new interface + routines.
    • +
    • Modified the sqlite shell program to print the schema for + the built-in SQLITE_MASTER table, if explicitly requested.
    • +

    +

    2000 Sep 30 (1.0.8)

      +
    • Begin writing documentation on the TCL interface.
    • +

    2000 Sep 29 (Not Released)

      +
    • Added the sqlite_get_table() API
    • +
    • Updated the documentation for due to the above change.
    • +
    • Modified the sqlite shell to make use of the new + sqlite_get_table() API in order to print a list of tables + in multiple columns, similar to the way "ls" prints filenames.
    • +
    • Modified the sqlite shell to print a semicolon at the + end of each CREATE statement in the output of the ".schema" command.
    • +

    2000 Sep 21 (Not Released)

      +
    • Change the tclsqlite "eval" method to return a list of results if + no callback script is specified.
    • +
    • Change tclsqlite.c to use the Tcl_Obj interface
    • +
    • Add tclsqlite.c to the libsqlite.a library
    • +

    2000 Sep 13 (Version 1.0.5)

      +
    • Changed the print format for floating point values from "%g" to "%.15g". +
    • +
    • Changed the comparison function so that numbers in exponential notation + (ex: 1.234e+05) sort in numerical order.
    • +

    2000 Aug 28 (Version 1.0.4)

      +
    • Added functions length() and substr().
    • +
    • Fix a bug in the sqlite shell program that was causing + a coredump when the output mode was "column" and the first row + of data contained a NULL.
    • +

    2000 Aug 22 (Version 1.0.3)

      +
    • In the sqlite shell, print the "Database opened READ ONLY" message + to stderr instead of stdout.
    • +
    • In the sqlite shell, now print the version number on initial startup.
    • +
    • Add the sqlite_version[] string constant to the library
    • +
    • Makefile updates
    • +
    • Bug fix: incorrect VDBE code was being generated for the following + circumstance: a query on an indexed table containing a WHERE clause with + an IN operator that had a subquery on its right-hand side.
    • +

    2000 Aug 18 (Version 1.0.1)

      +
    • Fix a bug in the configure script.
    • +
    • Minor revisions to the website.
    • +

    2000 Aug 17 (Version 1.0)

      +
    • Change the sqlite program so that it can read + databases for which it lacks write permission. (It used to + refuse all access if it could not write.)
    • +

    2000 Aug 9

      +
    • Treat carriage returns as white space.
    • +

    2000 Aug 8

      +
    • Added pattern matching to the ".table" command in the "sqlite" +command shell.
    • +

    2000 Aug 4

      +
    • Documentation updates
    • +
    • Added "busy" and "timeout" methods to the Tcl interface
    • +

    2000 Aug 3

      +
    • File format version number was being stored in sqlite_master.tcl + multiple times. This was harmless, but unnecessary. It is now fixed.
    • +

    2000 Aug 2

      +
    • The file format for indices was changed slightly in order to work + around an inefficiency that can sometimes come up with GDBM when + there are large indices having many entries with the same key. + ** Incompatible Change **
    • +

    2000 Aug 1

      +
    • The parser's stack was overflowing on a very long UPDATE statement. + This is now fixed.
    • +

    2000 July 31

      +
    • Finish the VDBE tutorial.
    • +
    • Added documentation on compiling to WindowsNT.
    • +
    • Fix a configuration program for WindowsNT.
    • +
    • Fix a configuration problem for HPUX.
    • +

    2000 July 29

      +
    • Better labels on column names of the result.
    • +

    2000 July 28

      +
    • Added the sqlite_busy_handler() + and sqlite_busy_timeout() interface.
    • +

    2000 June 23

    2000 June 21

      +
    • Clean up comments and variable names. Changes to documentation. + No functional changes to the code.
    • +

    2000 June 19

      +
    • Column names in UPDATE statements were case sensitive. + This mistake has now been fixed.
    • +

    2000 June 16

      +
    • Added the concatenate string operator (||)
    • +

    2000 June 12

      +
    • Added the fcnt() function to the SQL interpreter. The fcnt() function + returns the number of database "Fetch" operations that have occurred. + This function is designed for use in test scripts to verify that + queries are efficient and appropriately optimized. Fcnt() has no other + useful purpose, as far as I know.
    • +
    • Added a bunch more tests that take advantage of the new fcnt() function. + The new tests did not uncover any new problems.
    • +

    2000 June 8

      +
    • Added lots of new test cases
    • +
    • Fix a few bugs discovered while adding test cases
    • +
    • Begin adding lots of new documentation
    • +

    2000 June 6

      +
    • Added compound select operators: UNION, UNION ALL, +INTERSECT, and EXCEPT
    • +
    • Added support for using (SELECT ...) within expressions
    • +
    • Added support for IN and BETWEEN operators
    • +
    • Added support for GROUP BY and HAVING
    • +
    • NULL values are now reported to the callback as a NULL pointer + rather than an empty string.
    • +

    2000 June 3

      +
    • Added support for default values on columns of a table.
    • +
    • Improved test coverage. Fixed a few obscure bugs found by the +improved tests.
    • +

    2000 June 2

      +
    • All database files to be modified by an UPDATE, INSERT or DELETE are +now locked before any changes are made to any files. +This makes it safe (I think) to access +the same database simultaneously from multiple processes.
    • +
    • The code appears stable so we are now calling it "beta".
    • +

    2000 June 1

      +
    • Better support for file locking so that two or more processes +(or threads) +can access the same database simultaneously. More work needed in +this area, though.
    • +

    2000 May 31

      +
    • Added support for aggregate functions (Ex: COUNT(*), MIN(...)) +to the SELECT statement.
    • +
    • Added support for SELECT DISTINCT ...
    • +

    2000 May 30

      +
    • Added the LIKE operator.
    • +
    • Added a GLOB operator: similar to LIKE +but it uses Unix shell globbing wildcards instead of the '%' +and '_' wildcards of SQL.
    • +
    • Added the COPY command patterned after +PostgreSQL so that SQLite +can now read the output of the pg_dump database dump utility +of PostgreSQL.
    • +
    • Added a VACUUM command that that calls the +gdbm_reorganize() function on the underlying database +files.
    • +
    • And many, many bug fixes...
    • +

    2000 May 29

      +
    • Initial Public Release of Alpha code
    • +

    + +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/changes.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/changes.tcl --- sqlite3-3.4.2/www/changes.tcl 2007-08-13 17:04:28.000000000 +0100 +++ sqlite3-3.6.16/www/changes.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,1819 +0,0 @@ -# -# Run this script to generated a changes.html output file -# -source common.tcl -header {SQLite changes} -puts { -

    -This page provides a high-level summary of changes to SQLite. -For more detail, refer the the checkin logs generated by -CVS at - -http://www.sqlite.org/cvstrac/timeline. -

    - -
    -} - - -proc chng {date desc} { - if {[regexp {\(([0-9.]+)\)} $date all vers]} { - set label [string map {. _} $vers] - puts "" - } - puts "
    $date
    " - regsub -all {[Tt]icket #(\d+)} $desc \ - {
    \0} desc - puts "

      $desc

    " - puts "
    " -} - -chng {2007 August 13 (3.4.2)} { -
  • Fix a database corruption bug that might occur if a ROLLBACK command -is executed in auto-vacuum mode -and a very small -soft_heap_limit is set. -Ticket #2565. - -
  • Add the ability to run a full regression test with a small -soft_heap_limit. - -
  • Fix other minor problems with using small soft heap limits. - -
  • Work-around for -GCC bug 32575. - -
  • Improved error detection of misused aggregate functions. - -
  • Improvements to the amalgamation generator script so that all symbols -are prefixed with either SQLITE_PRIVATE or SQLITE_API. -} - -chng {2007 July 20 (3.4.1)} { -
  • Fix a bug in VACUUM that can lead to - - database corruption if two - processes are connected to the database at the same time and one - VACUUMs then the other then modifies the database.
  • -
  • The expression "+column" is now considered the same as "column" - when computing the collating sequence to use on the expression.
  • -
  • In the TCL language interface, - "@variable" instead of "$variable" always binds as a blob.
  • -
  • Added PRAGMA freelist_count - for determining the current size of the freelist.
  • -
  • The - PRAGMA auto_vacuum=incremental setting is now persistent.
  • -
  • Add FD_CLOEXEC to all open files under unix.
  • -
  • Fix a bug in the - min()/max() optimization when applied to - descending indices.
  • -
  • Make sure the TCL language interface works correctly with 64-bit - integers on 64-bit machines.
  • -
  • Allow the value -9223372036854775808 as an integer literal in SQL - statements.
  • -
  • Add the capability of "hidden" columns in virtual tables.
  • -
  • Use the macro SQLITE_PRIVATE (defaulting to "static") on all - internal functions in the amalgamation.
  • -
  • Add pluggable tokenizers and ICU - tokenization support to FTS2
  • -
  • Other minor bug fixes and documentation enhancements
  • -} - -chng {2007 June 18 (3.4.0)} { -
  • Fix a bug that can lead to database corruption if an SQLITE_BUSY error - occurs in the middle of an explicit transaction and that transaction - is later committed. - Ticket #2409. - See the - - CorruptionFollowingBusyError wiki page for details. -
  • Fix a bug that can lead to database corruption if autovacuum mode is - on and a malloc() failure follows a CREATE TABLE or CREATE INDEX statement - which itself follows a cache overflow inside a transaction. See - ticket #2418. -
  • -
  • Added explicit upper bounds on the sizes and - quantities of things SQLite can process. This change might cause - compatibility problems for - applications that use SQLite in the extreme, which is why the current - release is 3.4.0 instead of 3.3.18.
  • -
  • Added support for - Incremental BLOB I/O.
  • -
  • Added the zeroblob API - and the zeroblob() SQL function.
  • -
  • Added support for - Incremental Vacuum.
  • -
  • Added the SQLITE_MIXED_ENDIAN_64BIT_FLOAT compile-time option to suppport - ARM7 processors with goofy endianness.
  • -
  • Removed all instances of sprintf() and strcpy() from the core library.
  • -
  • Added support for - International Components for Unicode (ICU) to the full-text search - extensions. -

    -

      -
    • In the windows OS driver, reacquire a SHARED lock if an attempt to - acquire an EXCLUSIVE lock fails. Ticket #2354
    • -
    • Fix the REPLACE() function so that it returns NULL if the second argument - is an empty string. Ticket #2324.
    • -
    • Document the hazards of type coversions in - sqlite3_column_blob() - and related APIs. Fix unnecessary type conversions. Ticket #2321.
    • -
    • Internationalization of the TRIM() function. Ticket #2323
    • -
    • Use memmove() instead of memcpy() when moving between memory regions - that might overlap. Ticket #2334
    • -
    • Fix an optimizer bug involving subqueries in a compound SELECT that has - both an ORDER BY and a LIMIT clause. Ticket #2339.
    • -
    • Make sure the sqlite3_snprintf() - interface does not zero-terminate the buffer if the buffer size is - less than 1. Ticket #2341
    • -
    • Fix the built-in printf logic so that it prints "NaN" not "Inf" for - floating-point NaNs. Ticket #2345
    • -
    • When converting BLOB to TEXT, use the text encoding of the main database. - Ticket #2349
    • -
    • Keep the full precision of integers (if possible) when casting to - NUMERIC. Ticket #2364
    • -
    • Fix a bug in the handling of UTF16 codepoint 0xE000
    • -
    • Consider explicit collate clauses when matching WHERE constraints - to indices in the query optimizer. Ticket #2391
    • -
    • Fix the query optimizer to correctly handle constant expressions in - the ON clause of a LEFT JOIN. Ticket #2403
    • -
    • Fix the query optimizer to handle rowid comparisions to NULL - correctly. Ticket #2404
    • -
    • Fix many potental segfaults that could be caused by malicious SQL - statements.
    • -} - -chng {2007 April 25 (3.3.17)} { -
    • When the "write_version" value of the database header is larger than - what the library understands, make the database read-only instead of - unreadable.
    • -
    • Other minor bug fixes
    • -} - -chng {2007 April 18 (3.3.16)} { -
    • Fix a bug that caused VACUUM to fail if NULLs appeared in a - UNIQUE column.
    • -
    • Reinstate performance improvements that were added in 3.3.14 - but regressed in 3.3.15.
    • -
    • Fix problems with the handling of ORDER BY expressions on - compound SELECT statements in subqueries.
    • -
    • Fix a potential segfault when destroying locks on WinCE in - a multi-threaded environment.
    • -
    • Documentation updates.
    • -} - -chng {2007 April 9 (3.3.15)} { -
    • Fix a bug introduced in 3.3.14 that caused a rollback of - CREATE TEMP TABLE to leave the database connection wedged.
    • -
    • Fix a bug that caused an extra NULL row to be returned when - a descending query was interrupted by a change to the database.
    • -
    • The FOR EACH STATEMENT clause on a trigger now causes a syntax - error. It used to be silently ignored.
    • -
    • Fix an obscure and relatively harmless problem that might have caused - a resource leak following an I/O error.
    • -
    • Many improvements to the test suite. Test coverage now exceeded 98%
    • -} - -chng {2007 April 2 (3.3.14)} { -
    • Fix a bug - in 3.3.13 that could cause a segfault when the IN operator - is used one one term of a two-column index and the right-hand side of - the IN operator contains a NULL.
    • -
    • Added a new OS interface method for determining the sector size - of underlying media: sqlite3OsSectorSize().
    • -
    • A new algorithm for statements of the form - INSERT INTO table1 SELECT * FROM table2 - is faster and reduces fragmentation. VACUUM uses statements of - this form and thus runs faster and defragments better.
    • -
    • Performance enhancements through reductions in disk I/O: -
        -
      • Do not read the last page of an overflow chain when - deleting the row - just add that page to the freelist.
      • -
      • Do not store pages being deleted in the - rollback journal.
      • -
      • Do not read in the (meaningless) content of - pages extracted from the freelist.
      • -
      • Do not flush the page cache (and thus avoiding - a cache refill) unless another process changes the underlying - database file.
      • -
      • Truncate rather than delete the rollback journal when committing - a transaction in exclusive access mode, or when committing the TEMP - database.
      • -
    • -
    • Added support for exclusive access mode using - - "PRAGMA locking_mode=EXCLUSIVE"
    • -
    • Use heap space instead of stack space for large buffers in the - pager - useful on embedded platforms with stack-space - limitations.
    • -
    • Add a makefile target "sqlite3.c" that builds an amalgamation containing - the core SQLite library C code in a single file.
    • -
    • Get the library working correctly when compiled - with GCC option "-fstrict-aliasing".
    • -
    • Removed the vestigal SQLITE_PROTOCOL error.
    • -
    • Improvements to test coverage, other minor bugs fixed, - memory leaks plugged, - code refactored and/or recommented in places for easier reading.
    • -} - -chng {2007 February 13 (3.3.13)} { -
    • Add a "fragmentation" measurement in the output of sqlite3_analyzer.
    • -
    • Add the COLLATE operator used to explicitly set the collating sequence -used by an expression. This feature is considered experimental pending -additional testing.
    • -
    • Allow up to 64 tables in a join - the old limit was 32.
    • -
    • Added two new experimental functions: -randomBlob() and -hex(). -Their intended use is to facilitate generating -UUIDs. -
    • -
    • Fix a problem where -PRAGMA count_changes was -causing incorrect results for updates on tables with triggers
    • -
    • Fix a bug in the ORDER BY clause optimizer for joins where the -left-most table in the join is constrained by a UNIQUE index.
    • -
    • Fixed a bug in the "copy" method of the TCL interface.
    • -
    • Bug fixes in fts1 and fts2 modules.
    • -} - -chng {2007 January 27 (3.3.12)} { -
    • Fix another bug in the IS NULL optimization that was added in -version 3.3.9.
    • -
    • Fix a assertion fault that occurred on deeply nested views.
    • -
    • Limit the amount of output that -PRAGMA integrity_check -generates.
    • -
    • Minor syntactic changes to support a wider variety of compilers.
    • -} - -chng {2007 January 22 (3.3.11)} { -
    • Fix another bug in the implementation of the new -sqlite3_prepare_v2() API. -We'll get it right eventually...
    • -
    • Fix a bug in the IS NULL optimization that was added in version 3.3.9 - -the bug was causing incorrect results on certain LEFT JOINs that included -in the WHERE clause an IS NULL constraint for the right table of the -LEFT JOIN.
    • -
    • Make AreFileApisANSI() a no-op macro in winCE since winCE does not -support this function.
    • -} - -chng {2007 January 9 (3.3.10)} { -
    • Fix bugs in the implementation of the new -sqlite3_prepare_v2() API -that can lead to segfaults.
    • -
    • Fix 1-second round-off errors in the - -strftime() function
    • -
    • Enhance the windows OS layer to provide detailed error codes
    • -
    • Work around a win2k problem so that SQLite can use single-character -database file names
    • -
    • The -user_version and -schema_version pragmas -correctly set their column names in the result set
    • -
    • Documentation updates
    • -} - -chng {2007 January 4 (3.3.9)} { -
    • Fix bugs in pager.c that could lead to database corruption if two -processes both try to recover a hot journal at the same instant
    • -
    • Added the sqlite3_prepare_v2() -API.
    • -
    • Fixed the ".dump" command in the command-line shell to show -indices, triggers and views again.
    • -
    • Change the table_info pragma so that it returns NULL for the default -value if there is no default value
    • -
    • Support for non-ASCII characters in win95 filenames
    • -
    • Query optimizer enhancements: -
        -
      • Optimizer does a better job of using indices to satisfy ORDER BY -clauses that sort on the integer primary key
      • -
      • Use an index to satisfy an IS NULL operator in the WHERE clause
      • -
      • Fix a bug that was causing the optimizer to miss an OR optimization -opportunity
      • -
      • The optimizer has more freedom to reorder tables in the FROM clause -even in there are LEFT joins.
      • -
      -
    • Extension loading supported added to winCE
    • -
    • Allow constraint names on the DEFAULT clause in a table definition
    • -
    • Added the ".bail" command to the command-line shell
    • -
    • Make CSV (comma separate value) output from the command-line shell -more closely aligned to accepted practice
    • -
    • Experimental FTS2 module added
    • -
    • Use sqlite3_mprintf() instead of strdup() to avoid libc dependencies
    • -
    • VACUUM uses a temporary file in the official TEMP folder, not in the -same directory as the original database
    • -
    • The prefix on temporary filenames on windows is changed from "sqlite" -to "etilqs".
    • -} - -chng {2006 October 9 (3.3.8)} { -
    • Support for full text search using the -FTS1 module -(beta)
    • -
    • Added OS-X locking patches (beta - disabled by default)
    • -
    • Introduce extended error codes and add error codes for various -kinds of I/O errors.
    • -
    • Added support for IF EXISTS on CREATE/DROP TRIGGER/VIEW
    • -
    • Fix the regression test suite so that it works with Tcl8.5
    • -
    • Enhance sqlite3_set_authorizer() to provide notification of calls to - SQL functions.
    • -
    • Added experimental API: sqlite3_auto_extension()
    • -
    • Various minor bug fixes
    • -} - -chng {2006 August 12 (3.3.7)} { -
    • Added support for -virtual tables -(beta)
    • -
    • Added support for - -dynamically loaded extensions (beta)
    • -
    • The -sqlite3_interrupt() -routine can be called for a different thread
    • -
    • Added the MATCH operator.
    • -
    • The default file format is now 1. -} - -chng {2006 June 6 (3.3.6)} { -
    • Plays better with virus scanners on windows
    • -
    • Faster :memory: databases
    • -
    • Fix an obscure segfault in UTF-8 to UTF-16 conversions
    • -
    • Added driver for OS/2
    • -
    • Correct column meta-information returned for aggregate queries
    • -
    • Enhanced output from EXPLAIN QUERY PLAN
    • -
    • LIMIT 0 now works on subqueries
    • -
    • Bug fixes and performance enhancements in the query optimizer
    • -
    • Correctly handle NULL filenames in ATTACH and DETACH
    • -
    • Inproved syntax error messages in the parser
    • -
    • Fix type coercion rules for the IN operator
    • -} - -chng {2006 April 5 (3.3.5)} { -
    • CHECK constraints use conflict resolution algorithms correctly.
    • -
    • The SUM() function throws an error on integer overflow.
    • -
    • Choose the column names in a compound query from the left-most SELECT - instead of the right-most.
    • -
    • The sqlite3_create_collation() function - honors the SQLITE_UTF16_ALIGNED flag.
    • -
    • SQLITE_SECURE_DELETE compile-time option causes deletes to overwrite - old data with zeros.
    • -
    • Detect integer overflow in abs().
    • -
    • The random() function provides 64 bits of randomness instead of - only 32 bits.
    • -
    • Parser detects and reports automaton stack overflow.
    • -
    • Change the round() function to return REAL instead of TEXT.
    • -
    • Allow WHERE clause terms on the left table of a LEFT OUTER JOIN to - contain aggregate subqueries.
    • -
    • Skip over leading spaces in text to numeric conversions.
    • -
    • Various minor bug and documentation typo fixes and - performance enhancements.
    • -} - -chng {2006 February 11 (3.3.4)} { -
    • Fix a blunder in the Unix mutex implementation that can lead to -deadlock on multithreaded systems.
    • -
    • Fix an alignment problem on 64-bit machines
    • -
    • Added the fullfsync pragma.
    • -
    • Fix an optimizer bug that could have caused some unusual LEFT OUTER JOINs -to give incorrect results.
    • -
    • The SUM function detects integer overflow and converts to accumulating -an approximate result using floating point numbers
    • -
    • Host parameter names can begin with '@' for compatibility with SQL Server. -
    • -
    • Other miscellaneous bug fixes
    • -} - -chng {2006 January 31 (3.3.3)} { -
    • Removed support for an ON CONFLICT clause on CREATE INDEX - it never -worked correctly so this should not present any backward compatibility -problems.
    • -
    • Authorizer callback now notified of ALTER TABLE ADD COLUMN commands
    • -
    • After any changes to the TEMP database schema, all prepared statements -are invalidated and must be recreated using a new call to -sqlite3_prepare()
    • -
    • Other minor bug fixes in preparation for the first stable release -of version 3.3
    • -} - -chng {2006 January 24 (3.3.2 beta)} { -
    • Bug fixes and speed improvements. Improved test coverage.
    • -
    • Changes to the OS-layer interface: mutexes must now be recursive.
    • -
    • Discontinue the use of thread-specific data for out-of-memory -exception handling
    • -} - -chng {2006 January 16 (3.3.1 alpha)} { -
    • Countless bug fixes
    • -
    • Speed improvements
    • -
    • Database connections can now be used by multiple threads, not just -the thread in which they were created.
    • -} - -chng {2006 January 10 (3.3.0 alpha)} { -
    • CHECK constraints
    • -
    • IF EXISTS and IF NOT EXISTS clauses on CREATE/DROP TABLE/INDEX.
    • -
    • DESC indices
    • -
    • More efficient encoding of boolean values resulting in smaller database -files
    • -
    • More aggressive SQLITE_OMIT_FLOATING_POINT
    • -
    • Separate INTEGER and REAL affinity
    • -
    • Added a virtual function layer for the OS interface
    • -
    • "exists" method added to the TCL interface
    • -
    • Improved response to out-of-memory errors
    • -
    • Database cache can be optionally shared between connections -in the same thread
    • -
    • Optional READ UNCOMMITTED isolation (instead of the default -isolation level of SERIALIZABLE) and table level locking when -database connections share a common cache.
    • -} - -chng {2005 December 19 (3.2.8)} { -
    • Fix an obscure bug that can cause database corruption under the -following unusual circumstances: A large INSERT or UPDATE statement which -is part of an even larger transaction fails due to a uniqueness contraint -but the containing transaction commits.
    • -} - -chng {2005 December 19 (2.8.17)} { -
    • Fix an obscure bug that can cause database corruption under the -following unusual circumstances: A large INSERT or UPDATE statement which -is part of an even larger transaction fails due to a uniqueness contraint -but the containing transaction commits.
    • -} - -chng {2005 September 24 (3.2.7)} { -
    • GROUP BY now considers NULLs to be equal again, as it should -
    • -
    • Now compiles on Solaris and OpenBSD and other Unix variants -that lack the fdatasync() function
    • -
    • Now compiles on MSVC++6 again
    • -
    • Fix uninitialized variables causing malfunctions for various obscure -queries
    • -
    • Correctly compute a LEFT OUTER JOINs that is constrained on the -left table only
    • -} - -chng {2005 September 17 (3.2.6)} { -
    • Fix a bug that can cause database corruption if a VACUUM (or - autovacuum) fails and is rolled back on a database that is - larger than 1GiB
    • -
    • LIKE optiization now works for columns with COLLATE NOCASE
    • -
    • ORDER BY and GROUP BY now use bounded memory
    • -
    • Added support for COUNT(DISTINCT expr)
    • -
    • Change the way SUM() handles NULL values in order to comply with - the SQL standard
    • -
    • Use fdatasync() instead of fsync() where possible in order to speed - up commits slightly
    • -
    • Use of the CROSS keyword in a join turns off the table reordering - optimization
    • -
    • Added the experimental and undocumented EXPLAIN QUERY PLAN capability
    • -
    • Use the unicode API in windows
    • -} - -chng {2005 August 27 (3.2.5)} { -
    • Fix a bug effecting DELETE and UPDATE statements that changed -more than 40960 rows.
    • -
    • Change the makefile so that it no longer requires GNUmake extensions
    • -
    • Fix the --enable-threadsafe option on the configure script
    • -
    • Fix a code generator bug that occurs when the left-hand side of an IN -operator is constant and the right-hand side is a SELECT statement
    • -
    • The PRAGMA synchronous=off statement now disables syncing of the -master journal file in addition to the normal rollback journals
    • -} - -chng {2005 August 24 (3.2.4)} { -
    • Fix a bug introduced in the previous release -that can cause a segfault while generating code -for complex WHERE clauses.
    • -
    • Allow floating point literals to begin or end with a decimal point.
    • -} - -chng {2005 August 21 (3.2.3)} { -
    • Added support for the CAST operator
    • -
    • Tcl interface allows BLOB values to be transferred to user-defined -functions
    • -
    • Added the "transaction" method to the Tcl interface
    • -
    • Allow the DEFAULT value of a column to call functions that have constant -operands
    • -
    • Added the ANALYZE command for gathering statistics on indices and -using those statistics when picking an index in the optimizer
    • -
    • Remove the limit (formerly 100) on the number of terms in the -WHERE clause
    • -
    • The right-hand side of the IN operator can now be a list of expressions -instead of just a list of constants
    • -
    • Rework the optimizer so that it is able to make better use of indices
    • -
    • The order of tables in a join is adjusted automatically to make -better use of indices
    • -
    • The IN operator is now a candidate for optimization even if the left-hand -side is not the left-most term of the index. Multiple IN operators can be -used with the same index.
    • -
    • WHERE clause expressions using BETWEEN and OR are now candidates -for optimization
    • -
    • Added the "case_sensitive_like" pragma and the SQLITE_CASE_SENSITIVE_LIKE -compile-time option to set its default value to "on".
    • -
    • Use indices to help with GLOB expressions and LIKE expressions too -when the case_sensitive_like pragma is enabled
    • -
    • Added support for grave-accent quoting for compatibility with MySQL
    • -
    • Improved test coverage
    • -
    • Dozens of minor bug fixes
    • -} - -chng {2005 June 13 (3.2.2)} { -
    • Added the sqlite3_db_handle() API
    • -
    • Added the sqlite3_get_autocommit() API
    • -
    • Added a REGEXP operator to the parser. There is no function to back -up this operator in the standard build but users can add their own using -sqlite3_create_function()
    • -
    • Speed improvements and library footprint reductions.
    • -
    • Fix byte alignment problems on 64-bit architectures.
    • -
    • Many, many minor bug fixes and documentation updates.
    • -} - -chng {2005 March 29 (3.2.1)} { -
    • Fix a memory allocation error in the new ADD COLUMN comment.
    • -
    • Documentation updates
    • -} - -chng {2005 March 21 (3.2.0)} { -
    • Added support for ALTER TABLE ADD COLUMN.
    • -
    • Added support for the "T" separator in ISO-8601 date/time strings.
    • -
    • Improved support for Cygwin.
    • -
    • Numerous bug fixes and documentation updates.
    • -} - -chng {2005 March 16 (3.1.6)} { -
    • Fix a bug that could cause database corruption when inserting - record into tables with around 125 columns.
    • -
    • sqlite3_step() is now much more likely to invoke the busy handler - and less likely to return SQLITE_BUSY.
    • -
    • Fix memory leaks that used to occur after a malloc() failure.
    • -} - -chng {2005 March 11 (3.1.5)} { -
    • The ioctl on OS-X to control syncing to disk is F_FULLFSYNC, - not F_FULLSYNC. The previous release had it wrong.
    • -} - -chng {2005 March 10 (3.1.4)} { -
    • Fix a bug in autovacuum that could cause database corruption if -a CREATE UNIQUE INDEX fails because of a constraint violation. -This problem only occurs if the new autovacuum feature introduced in -version 3.1 is turned on.
    • -
    • The F_FULLSYNC ioctl (currently only supported on OS-X) is disabled -if the synchronous pragma is set to something other than "full".
    • -
    • Add additional forward compatibility to the future version 3.2 database -file format.
    • -
    • Fix a bug in WHERE clauses of the form (rowid<'2')
    • -
    • New SQLITE_OMIT_... compile-time options added
    • -
    • Updates to the man page
    • -
    • Remove the use of strcasecmp() from the shell
    • -
    • Windows DLL exports symbols Tclsqlite_Init and Sqlite_Init
    • -} - -chng {2005 February 19 (3.1.3)} { -
    • Fix a problem with VACUUM on databases from which tables containing -AUTOINCREMENT have been dropped.
    • -
    • Add forward compatibility to the future version 3.2 database file -format.
    • -
    • Documentation updates
    • -} - -chng {2005 February 15 (3.1.2)} { -
    • Fix a bug that can lead to database corruption if there are two -open connections to the same database and one connection does a VACUUM -and the second makes some change to the database.
    • -
    • Allow "?" parameters in the LIMIT clause.
    • -
    • Fix VACUUM so that it works with AUTOINCREMENT.
    • -
    • Fix a race condition in AUTOVACUUM that can lead to corrupt databases
    • -
    • Add a numeric version number to the sqlite3.h include file.
    • -
    • Other minor bug fixes and performance enhancements.
    • -} - -chng {2005 February 15 (2.8.16)} { -
    • Fix a bug that can lead to database corruption if there are two -open connections to the same database and one connection does a VACUUM -and the second makes some change to the database.
    • -
    • Correctly handle quoted names in CREATE INDEX statements.
    • -
    • Fix a naming conflict between sqlite.h and sqlite3.h.
    • -
    • Avoid excess heap usage when copying expressions.
    • -
    • Other minor bug fixes.
    • -} - -chng {2005 February 1 (3.1.1 BETA)} { -
    • Automatic caching of prepared statements in the TCL interface
    • -
    • ATTACH and DETACH as well as some other operations cause existing - prepared statements to expire.
    • -
    • Numerious minor bug fixes
    • -} - -chng {2005 January 21 (3.1.0 ALPHA)} { -
    • Autovacuum support added
    • -
    • CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP added
    • -
    • Support for the EXISTS clause added.
    • -
    • Support for correlated subqueries added.
    • -
    • Added the ESCAPE clause on the LIKE operator.
    • -
    • Support for ALTER TABLE ... RENAME TABLE ... added
    • -
    • AUTOINCREMENT keyword supported on INTEGER PRIMARY KEY
    • -
    • Many SQLITE_OMIT_ macros inserts to omit features at compile-time - and reduce the library footprint.
    • -
    • The REINDEX command was added.
    • -
    • The engine no longer consults the main table if it can get - all the information it needs from an index.
    • -
    • Many nuisance bugs fixed.
    • -} - -chng {2004 October 11 (3.0.8)} { -
    • Add support for DEFERRED, IMMEDIATE, and EXCLUSIVE transactions.
    • -
    • Allow new user-defined functions to be created when there are -already one or more precompiled SQL statements.
    • -
    • Fix portability problems for Mingw/MSYS.
    • -
    • Fix a byte alignment problem on 64-bit Sparc machines.
    • -
    • Fix the ".import" command of the shell so that it ignores \r -characters at the end of lines.
    • -
    • The "csv" mode option in the shell puts strings inside double-quotes.
    • -
    • Fix typos in documentation.
    • -
    • Convert array constants in the code to have type "const".
    • -
    • Numerous code optimizations, specially optimizations designed to -make the code footprint smaller.
    • -} - -chng {2004 September 18 (3.0.7)} { -
    • The BTree module allocates large buffers using malloc() instead of - off of the stack, in order to play better on machines with limited - stack space.
    • -
    • Fixed naming conflicts so that versions 2.8 and 3.0 can be - linked and used together in the same ANSI-C source file.
    • -
    • New interface: sqlite3_bind_parameter_index()
    • -
    • Add support for wildcard parameters of the form: "?nnn"
    • -
    • Fix problems found on 64-bit systems.
    • -
    • Removed encode.c file (containing unused routines) from the - version 3.0 source tree.
    • -
    • The sqlite3_trace() callbacks occur before each statement - is executed, not when the statement is compiled.
    • -
    • Makefile updates and miscellaneous bug fixes.
    • -} - -chng {2004 September 02 (3.0.6 beta)} { -
    • Better detection and handling of corrupt database files.
    • -
    • The sqlite3_step() interface returns SQLITE_BUSY if it is unable - to commit a change because of a lock
    • -
    • Combine the implementations of LIKE and GLOB into a single - pattern-matching subroutine.
    • -
    • Miscellaneous code size optimizations and bug fixes
    • -} - -chng {2004 August 29 (3.0.5 beta)} { -
    • Support for ":AAA" style bind parameter names.
    • -
    • Added the new sqlite3_bind_parameter_name() interface.
    • -
    • Support for TCL variable names embedded in SQL statements in the - TCL bindings.
    • -
    • The TCL bindings transfer data without necessarily doing a conversion - to a string.
    • -
    • The database for TEMP tables is not created until it is needed.
    • -
    • Add the ability to specify an alternative temporary file directory - using the "sqlite_temp_directory" global variable.
    • -
    • A compile-time option (SQLITE_BUSY_RESERVED_LOCK) causes the busy - handler to be called when there is contention for a RESERVED lock.
    • -
    • Various bug fixes and optimizations
    • -} - -chng {2004 August 8 (3.0.4 beta)} { -
    • CREATE TABLE and DROP TABLE now work correctly as prepared statements.
    • -
    • Fix a bug in VACUUM and UNIQUE indices.
    • -
    • Add the ".import" command to the command-line shell.
    • -
    • Fix a bug that could cause index corruption when an attempt to - delete rows of a table is blocked by a pending query.
    • -
    • Library size optimizations.
    • -
    • Other minor bug fixes.
    • -} - -chng {2004 July 22 (2.8.15)} { -
    • This is a maintenance release only. Various minor bugs have been -fixed and some portability enhancements are added.
    • -} - -chng {2004 July 22 (3.0.3 beta)} { -
    • The second beta release for SQLite 3.0.
    • -
    • Add support for "PRAGMA page_size" to adjust the page size of -the database.
    • -
    • Various bug fixes and documentation updates.
    • -} - -chng {2004 June 30 (3.0.2 beta)} { -
    • The first beta release for SQLite 3.0.
    • -} - -chng {2004 June 22 (3.0.1 alpha)} { -
    • - *** Alpha Release - Research And Testing Use Only *** -
    • Lots of bug fixes.
    • -} - -chng {2004 June 18 (3.0.0 alpha)} { -
    • - *** Alpha Release - Research And Testing Use Only *** -
    • Support for internationalization including UTF-8, UTF-16, and - user defined collating sequences.
    • -
    • New file format that is 25% to 35% smaller for typical use.
    • -
    • Improved concurrency.
    • -
    • Atomic commits for ATTACHed databases.
    • -
    • Remove cruft from the APIs.
    • -
    • BLOB support.
    • -
    • 64-bit rowids.
    • -
    • More information. -} - -chng {2004 June 9 (2.8.14)} { -
    • Fix the min() and max() optimizer so that it works when the FROM - clause consists of a subquery.
    • -
    • Ignore extra whitespace at the end of of "." commands in the shell.
    • -
    • Bundle sqlite_encode_binary() and sqlite_decode_binary() with the - library.
    • -
    • The TEMP_STORE and DEFAULT_TEMP_STORE pragmas now work.
    • -
    • Code changes to compile cleanly using OpenWatcom.
    • -
    • Fix VDBE stack overflow problems with INSTEAD OF triggers and - NULLs in IN operators.
    • -
    • Add the global variable sqlite_temp_directory which if set defines the - directory in which temporary files are stored.
    • -
    • sqlite_interrupt() plays well with VACUUM.
    • -
    • Other minor bug fixes.
    • -} - -chng {2004 March 8 (2.8.13)} { -
    • Refactor parts of the code in order to make the code footprint - smaller. The code is now also a little bit faster.
    • -
    • sqlite_exec() is now implemented as a wrapper around sqlite_compile() - and sqlite_step().
    • -
    • The built-in min() and max() functions now honor the difference between - NUMERIC and TEXT datatypes. Formerly, min() and max() always assumed - their arguments were of type NUMERIC.
    • -
    • New HH:MM:SS modifier to the built-in date/time functions.
    • -
    • Experimental sqlite_last_statement_changes() API added. Fixed the - the last_insert_rowid() function so that it works correctly with - triggers.
    • -
    • Add functions prototypes for the database encryption API.
    • -
    • Fix several nuisance bugs.
    • -} - -chng {2004 February 8 (2.8.12)} { -
    • Fix a bug that will might corrupt the rollback journal if a power failure - or external program halt occurs in the middle of a COMMIT. The corrupt - journal can lead to database corruption when it is rolled back.
    • -
    • Reduce the size and increase the speed of various modules, especially - the virtual machine.
    • -
    • Allow "<expr> IN <table>" as a shorthand for - "<expr> IN (SELECT * FROM <table>".
    • -
    • Optimizations to the sqlite_mprintf() routine.
    • -
    • Make sure the MIN() and MAX() optimizations work within subqueries.
    • -} - -chng {2004 January 14 (2.8.11)} { -
    • Fix a bug in how the IN operator handles NULLs in subqueries. The bug - was introduced by the previous release.
    • -} - -chng {2004 January 13 (2.8.10)} { -
    • Fix a potential database corruption problem on Unix caused by the fact - that all posix advisory locks are cleared whenever you close() a file. - The work around it to embargo all close() calls while locks are - outstanding.
    • -
    • Performance enhancements on some corner cases of COUNT(*).
    • -
    • Make sure the in-memory backend response sanely if malloc() fails.
    • -
    • Allow sqlite_exec() to be called from within user-defined SQL - functions.
    • -
    • Improved accuracy of floating-point conversions using "long double".
    • -
    • Bug fixes in the experimental date/time functions.
    • -} - -chng {2004 January 5 (2.8.9)} { -
    • Fix a 32-bit integer overflow problem that could result in corrupt - indices in a database if large negative numbers (less than -2147483648) - were inserted into a indexed numeric column.
    • -
    • Fix a locking problem on multi-threaded Linux implementations.
    • -
    • Always use "." instead of "," as the decimal point even if the locale - requests ",".
    • -
    • Added UTC to localtime conversions to the experimental date/time - functions.
    • -
    • Bug fixes to date/time functions.
    • -} - -chng {2003 December 17 (2.8.8)} { -
    • Fix a critical bug introduced into 2.8.0 which could cause - database corruption.
    • -
    • Fix a problem with 3-way joins that do not use indices
    • -
    • The VACUUM command now works with the non-callback API
    • -
    • Improvements to the "PRAGMA integrity_check" command
    • -} - -chng {2003 December 4 (2.8.7)} { -
    • Added experimental sqlite_bind() and sqlite_reset() APIs.
    • -
    • If the name of the database is an empty string, open a new database - in a temporary file that is automatically deleted when the database - is closed.
    • -
    • Performance enhancements in the lemon-generated parser
    • -
    • Experimental date/time functions revised.
    • -
    • Disallow temporary indices on permanent tables.
    • -
    • Documentation updates and typo fixes
    • -
    • Added experimental sqlite_progress_handler() callback API
    • -
    • Removed support for the Oracle8 outer join syntax.
    • -
    • Allow GLOB and LIKE operators to work as functions.
    • -
    • Other minor documentation and makefile changes and bug fixes.
    • -} - -chng {2003 August 21 (2.8.6)} { -
    • Moved the CVS repository to www.sqlite.org
    • -
    • Update the NULL-handling documentation.
    • -
    • Experimental date/time functions added.
    • -
    • Bug fix: correctly evaluate a view of a view without segfaulting.
    • -
    • Bug fix: prevent database corruption if you dropped a - trigger that had the same name as a table.
    • -
    • Bug fix: allow a VACUUM (without segfaulting) on an empty - database after setting the EMPTY_RESULT_CALLBACKS pragma.
    • -
    • Bug fix: if an integer value will not fit in a 32-bit int, store it in - a double instead.
    • -
    • Bug fix: Make sure the journal file directory entry is committed to disk - before writing the database file.
    • -} - -chng {2003 July 22 (2.8.5)} { -
    • Make LIMIT work on a compound SELECT statement.
    • -
    • LIMIT 0 now shows no rows. Use LIMIT -1 to see all rows.
    • -
    • Correctly handle comparisons between an INTEGER PRIMARY KEY and - a floating point number.
    • -
    • Fix several important bugs in the new ATTACH and DETACH commands.
    • -
    • Updated the NULL-handling document.
    • -
    • Allow NULL arguments in sqlite_compile() and sqlite_step().
    • -
    • Many minor bug fixes
    • -} - -chng {2003 June 29 (2.8.4)} { -
    • Enhanced the "PRAGMA integrity_check" command to verify indices.
    • -
    • Added authorization hooks for the new ATTACH and DETACH commands.
    • -
    • Many documentation updates
    • -
    • Many minor bug fixes
    • -} - -chng {2003 June 4 (2.8.3)} { -
    • Fix a problem that will corrupt the indices on a table if you - do an INSERT OR REPLACE or an UPDATE OR REPLACE on a table that - contains an INTEGER PRIMARY KEY plus one or more indices.
    • -
    • Fix a bug in windows locking code so that locks work correctly - when simultaneously accessed by Win95 and WinNT systems.
    • -
    • Add the ability for INSERT and UPDATE statements to refer to the - "rowid" (or "_rowid_" or "oid") columns.
    • -
    • Other important bug fixes
    • -} - -chng {2003 May 17 (2.8.2)} { -
    • Fix a problem that will corrupt the database file if you drop a - table from the main database that has a TEMP index.
    • -} - -chng {2003 May 16 (2.8.1)} { -
    • Reactivated the VACUUM command that reclaims unused disk space in - a database file.
    • -
    • Added the ATTACH and DETACH commands to allow interacting with multiple - database files at the same time.
    • -
    • Added support for TEMP triggers and indices.
    • -
    • Added support for in-memory databases.
    • -
    • Removed the experimental sqlite_open_aux_file(). Its function is - subsumed in the new ATTACH command.
    • -
    • The precedence order for ON CONFLICT clauses was changed so that - ON CONFLICT clauses on BEGIN statements have a higher precedence than - ON CONFLICT clauses on constraints. -
    • Many, many bug fixes and compatibility enhancements.
    • -} - -chng {2003 Feb 16 (2.8.0)} { -
    • Modified the journal file format to make it more resistant to corruption - that can occur after an OS crash or power failure.
    • -
    • Added a new C/C++ API that does not use callback for returning data.
    • -} - -chng {2003 Jan 25 (2.7.6)} { -
    • Performance improvements. The library is now much faster.
    • -
    • Added the sqlite_set_authorizer() API. Formal documentation has - not been written - see the source code comments for instructions on - how to use this function.
    • -
    • Fix a bug in the GLOB operator that was preventing it from working - with upper-case letters.
    • -
    • Various minor bug fixes.
    • -} - -chng {2002 Dec 27 (2.7.5)} { -
    • Fix an uninitialized variable in pager.c which could (with a probability - of about 1 in 4 billion) result in a corrupted database.
    • -} - -chng {2002 Dec 17 (2.7.4)} { -
    • Database files can now grow to be up to 2^41 bytes. The old limit - was 2^31 bytes.
    • -
    • The optimizer will now scan tables in the reverse if doing so will - satisfy an ORDER BY ... DESC clause.
    • -
    • The full pathname of the database file is now remembered even if - a relative path is passed into sqlite_open(). This allows - the library to continue operating correctly after a chdir().
    • -
    • Speed improvements in the VDBE.
    • -
    • Lots of little bug fixes.
    • -} - -chng {2002 Oct 30 (2.7.3)} { -
    • Various compiler compatibility fixes.
    • -
    • Fix a bug in the "expr IN ()" operator.
    • -
    • Accept column names in parentheses.
    • -
    • Fix a problem with string memory management in the VDBE
    • -
    • Fix a bug in the "table_info" pragma"
    • -
    • Export the sqlite_function_type() API function in the Windows DLL
    • -
    • Fix locking behavior under windows
    • -
    • Fix a bug in LEFT OUTER JOIN
    • -} - -chng {2002 Sep 25 (2.7.2)} { -
    • Prevent journal file overflows on huge transactions.
    • -
    • Fix a memory leak that occurred when sqlite_open() failed.
    • -
    • Honor the ORDER BY and LIMIT clause of a SELECT even if the - result set is used for an INSERT.
    • -
    • Do not put write locks on the file used to hold TEMP tables.
    • -
    • Added documentation on SELECT DISTINCT and on how SQLite handles NULLs.
    • -
    • Fix a problem that was causing poor performance when many thousands - of SQL statements were executed by a single sqlite_exec() call.
    • -} - -chng {2002 Aug 31 (2.7.1)} { -
    • Fix a bug in the ORDER BY logic that was introduced in version 2.7.0
    • -
    • C-style comments are now accepted by the tokenizer.
    • -
    • INSERT runs a little faster when the source is a SELECT statement.
    • -} - -chng {2002 Aug 25 (2.7.0)} { -
    • Make a distinction between numeric and text values when sorting. - Text values sort according to memcmp(). Numeric values sort in - numeric order.
    • -
    • Allow multiple simultaneous readers under windows by simulating - the reader/writers locks that are missing from Win95/98/ME.
    • -
    • An error is now returned when trying to start a transaction if - another transaction is already active.
    • -} - -chng {2002 Aug 12 (2.6.3)} { -
    • Add the ability to read both little-endian and big-endian databases. - So database created under SunOS or MacOSX can be read and written - under Linux or Windows and vice versa.
    • -
    • Convert to the new website: http://www.sqlite.org/
    • -
    • Allow transactions to span Linux Threads
    • -
    • Bug fix in the processing of the ORDER BY clause for GROUP BY queries
    • -} - -chng {2002 Jly 30 (2.6.2)} { -
    • Text files read by the COPY command can now have line terminators - of LF, CRLF, or CR.
    • -
    • SQLITE_BUSY is handled correctly if encountered during database - initialization.
    • -
    • Fix to UPDATE triggers on TEMP tables.
    • -
    • Documentation updates.
    • -} - -chng {2002 Jly 19 (2.6.1)} { -
    • Include a static string in the library that responds to the RCS - "ident" command and which contains the library version number.
    • -
    • Fix an assertion failure that occurred when deleting all rows of - a table with the "count_changes" pragma turned on.
    • -
    • Better error reporting when problems occur during the automatic - 2.5.6 to 2.6.0 database format upgrade.
    • -} - -chng {2002 Jly 17 (2.6.0)} { -
    • Change the format of indices to correct a design flaw the originated - with version 2.1.0. *** This is an incompatible - file format change *** When version 2.6.0 or later of the - library attempts to open a database file created by version 2.5.6 or - earlier, it will automatically and irreversibly convert the file format. - Make backup copies of older database files before opening them with - version 2.6.0 of the library. -
    • -} - -chng {2002 Jly 7 (2.5.6)} { -
    • Fix more problems with rollback. Enhance the test suite to exercise - the rollback logic extensively in order to prevent any future problems. -
    • -} - -chng {2002 Jly 6 (2.5.5)} { -
    • Fix a bug which could cause database corruption during a rollback. - This bugs was introduced in version 2.4.0 by the freelist - optimization of checking [410].
    • -
    • Fix a bug in aggregate functions for VIEWs.
    • -
    • Other minor changes and enhancements.
    • -} - -chng {2002 Jly 1 (2.5.4)} { -
    • Make the "AS" keyword optional again.
    • -
    • The datatype of columns now appear in the 4th argument to the - callback.
    • -
    • Added the sqlite_open_aux_file() API, though it is still - mostly undocumented and untested.
    • -
    • Added additional test cases and fixed a few bugs that those - test cases found.
    • -} - -chng {2002 Jun 24 (2.5.3)} { -
    • Bug fix: Database corruption can occur due to the optimization - that was introduced in version 2.4.0 (check-in [410]). The problem - should now be fixed. The use of versions 2.4.0 through 2.5.2 is - not recommended.
    • -} - -chng {2002 Jun 24 (2.5.2)} { -
    • Added the new SQLITE_TEMP_MASTER table which records the schema - for temporary tables in the same way that SQLITE_MASTER does for - persistent tables.
    • -
    • Added an optimization to UNION ALL
    • -
    • Fixed a bug in the processing of LEFT OUTER JOIN
    • -
    • The LIMIT clause now works on subselects
    • -
    • ORDER BY works on subselects
    • -
    • There is a new TypeOf() function used to determine if an expression - is numeric or text.
    • -
    • Autoincrement now works for INSERT from a SELECT.
    • -} - -chng {2002 Jun 19 (2.5.1)} { -
    • The query optimizer now attempts to implement the ORDER BY clause - using an index. Sorting is still used if not suitable index is - available.
    • -} - -chng {2002 Jun 17 (2.5.0)} { -
    • Added support for row triggers.
    • -
    • Added SQL-92 compliant handling of NULLs.
    • -
    • Add support for the full SQL-92 join syntax and LEFT OUTER JOINs.
    • -
    • Double-quoted strings interpreted as column names not text literals.
    • -
    • Parse (but do not implement) foreign keys.
    • -
    • Performance improvements in the parser, pager, and WHERE clause code - generator.
    • -
    • Make the LIMIT clause work on subqueries. (ORDER BY still does not - work, though.)
    • -
    • Added the "%Q" expansion to sqlite_*_printf().
    • -
    • Bug fixes too numerous to mention (see the change log).
    • -} - -chng {2002 May 09 (2.4.12)} { -
    • Added logic to detect when the library API routines are called out - of sequence.
    • -} - -chng {2002 May 08 (2.4.11)} { -
    • Bug fix: Column names in the result set were not being generated - correctly for some (rather complex) VIEWs. This could cause a - segfault under certain circumstances.
    • -} - -chng {2002 May 02 (2.4.10)} { -
    • Bug fix: Generate correct column headers when a compound SELECT is used - as a subquery.
    • -
    • Added the sqlite_encode_binary() and sqlite_decode_binary() functions to - the source tree. But they are not yet linked into the library.
    • -
    • Documentation updates.
    • -
    • Export the sqlite_changes() function from windows DLLs.
    • -
    • Bug fix: Do not attempt the subquery flattening optimization on queries - that lack a FROM clause. To do so causes a segfault.
    • -} - -chng {2002 Apr 21 (2.4.9)} { -
    • Fix a bug that was causing the precompiled binary of SQLITE.EXE to - report "out of memory" under Windows 98.
    • -} - -chng {2002 Apr 20 (2.4.8)} { -
    • Make sure VIEWs are created after their corresponding TABLEs in the - output of the .dump command in the shell.
    • -
    • Speed improvements: Do not do synchronous updates on TEMP tables.
    • -
    • Many improvements and enhancements to the shell.
    • -
    • Make the GLOB and LIKE operators functions that can be overridden - by a programmer. This allows, for example, the LIKE operator to - be changed to be case sensitive.
    • -} - -chng {2002 Apr 06 (2.4.7)} { -
    • Add the ability to put TABLE.* in the column list of a - SELECT statement.
    • -
    • Permit SELECT statements without a FROM clause.
    • -
    • Added the last_insert_rowid() SQL function.
    • -
    • Do not count rows where the IGNORE conflict resolution occurs in - the row count.
    • -
    • Make sure functions expressions in the VALUES clause of an INSERT - are correct.
    • -
    • Added the sqlite_changes() API function to return the number - of row that changed in the most recent operation.
    • -} - -chng {2002 Apr 02 (2.4.6)} { -
    • Bug fix: Correctly handle terms in the WHERE clause of a join that - do not contain a comparison operator.
    • -} - -chng {2002 Apr 01 (2.4.5)} { -
    • Bug fix: Correctly handle functions that appear in the WHERE clause - of a join.
    • -
    • When the PRAGMA vdbe_trace=ON is set, correctly print the P3 operand - value when it is a pointer to a structure rather than a pointer to - a string.
    • -
    • When inserting an explicit NULL into an INTEGER PRIMARY KEY, convert - the NULL value into a unique key automatically.
    • -} - -chng {2002 Mar 24 (2.4.4)} { -
    • Allow "VIEW" to be a column name
    • -
    • Added support for CASE expressions (patch from Dan Kennedy)
    • -
    • Added RPMS to the delivery (patches from Doug Henry)
    • -
    • Fix typos in the documentation
    • -
    • Cut over configuration management to a new CVS repository with - its own CVSTrac bug tracking system.
    • -} - -chng {2002 Mar 22 (2.4.3)} { -
    • Fix a bug in SELECT that occurs when a compound SELECT is used as a - subquery in the FROM of a SELECT.
    • -
    • The sqlite_get_table() function now returns an error if you - give it two or more SELECTs that return different numbers of columns.
    • -} - -chng {2002 Mar 14 (2.4.2)} { -
    • Bug fix: Fix an assertion failure that occurred when ROWID was a column - in a SELECT statement on a view.
    • -
    • Bug fix: Fix an uninitialized variable in the VDBE that would could an - assert failure.
    • -
    • Make the os.h header file more robust in detecting when the compile is - for windows and when it is for unix.
    • -} - -chng {2002 Mar 13 (2.4.1)} { -
    • Using an unnamed subquery in a FROM clause would cause a segfault.
    • -
    • The parser now insists on seeing a semicolon or the end of input before - executing a statement. This avoids an accidental disaster if the - WHERE keyword is misspelled in an UPDATE or DELETE statement.
    • -} - - -chng {2002 Mar 10 (2.4.0)} { -
    • Change the name of the sanity_check PRAGMA to integrity_check - and make it available in all compiles.
    • -
    • SELECT min() or max() of an indexed column with no WHERE or GROUP BY - clause is handled as a special case which avoids a complete table scan.
    • -
    • Automatically generated ROWIDs are now sequential.
    • -
    • Do not allow dot-commands of the command-line shell to occur in the - middle of a real SQL command.
    • -
    • Modifications to the "lemon" parser generator so that the parser tables - are 4 times smaller.
    • -
    • Added support for user-defined functions implemented in C.
    • -
    • Added support for new functions: coalesce(), lower(), - upper(), and random() -
    • Added support for VIEWs.
    • -
    • Added the subquery flattening optimizer.
    • -
    • Modified the B-Tree and Pager modules so that disk pages that do not - contain real data (free pages) are not journaled and are not - written from memory back to the disk when they change. This does not - impact database integrity, since the - pages contain no real data, but it does make large INSERT operations - about 2.5 times faster and large DELETEs about 5 times faster.
    • -
    • Made the CACHE_SIZE pragma persistent
    • -
    • Added the SYNCHRONOUS pragma
    • -
    • Fixed a bug that was causing updates to fail inside of transactions when - the database contained a temporary table.
    • -} - -chng {2002 Feb 18 (2.3.3)} { -
    • Allow identifiers to be quoted in square brackets, for compatibility - with MS-Access.
    • -
    • Added support for sub-queries in the FROM clause of a SELECT.
    • -
    • More efficient implementation of sqliteFileExists() under Windows. - (by Joel Luscy)
    • -
    • The VALUES clause of an INSERT can now contain expressions, including - scalar SELECT clauses.
    • -
    • Added support for CREATE TABLE AS SELECT
    • -
    • Bug fix: Creating and dropping a table all within a single - transaction was not working.
    • -} - -chng {2002 Feb 14 (2.3.2)} { -
    • Bug fix: There was an incorrect assert() in pager.c. The real code was - all correct (as far as is known) so everything should work OK if you - compile with -DNDEBUG=1. When asserts are not disabled, there - could be a fault.
    • -} - -chng {2002 Feb 13 (2.3.1)} { -
    • Bug fix: An assertion was failing if "PRAGMA full_column_names=ON;" was - set and you did a query that used a rowid, like this: - "SELECT rowid, * FROM ...".
    • -} - -chng {2002 Jan 30 (2.3.0)} { -
    • Fix a serious bug in the INSERT command which was causing data to go - into the wrong columns if the data source was a SELECT and the INSERT - clauses specified its columns in some order other than the default.
    • -
    • Added the ability to resolve constraint conflicts is ways other than - an abort and rollback. See the documentation on the "ON CONFLICT" - clause for details.
    • -
    • Temporary files are now automatically deleted by the operating system - when closed. There are no more dangling temporary files on a program - crash. (If the OS crashes, fsck will delete the file after reboot - under Unix. I do not know what happens under Windows.)
    • -
    • NOT NULL constraints are honored.
    • -
    • The COPY command puts NULLs in columns whose data is '\N'.
    • -
    • In the COPY command, backslash can now be used to escape a newline.
    • -
    • Added the SANITY_CHECK pragma.
    • -} - -chng {2002 Jan 28 (2.2.5)} { -
    • Important bug fix: the IN operator was not working if either the - left-hand or right-hand side was derived from an INTEGER PRIMARY KEY.
    • -
    • Do not escape the backslash '\' character in the output of the - sqlite command-line access program.
    • -} - -chng {2002 Jan 22 (2.2.4)} { -
    • The label to the right of an AS in the column list of a SELECT can now - be used as part of an expression in the WHERE, ORDER BY, GROUP BY, and/or - HAVING clauses.
    • -
    • Fix a bug in the -separator command-line option to the sqlite - command.
    • -
    • Fix a problem with the sort order when comparing upper-case strings against - characters greater than 'Z' but less than 'a'.
    • -
    • Report an error if an ORDER BY or GROUP BY expression is constant.
    • -} - -chng {2002 Jan 16 (2.2.3)} { -
    • Fix warning messages in VC++ 7.0. (Patches from nicolas352001)
    • -
    • Make the library thread-safe. (The code is there and appears to work - but has not been stressed.)
    • -
    • Added the new sqlite_last_insert_rowid() API function.
    • -} - -chng {2002 Jan 13 (2.2.2)} { -
    • Bug fix: An assertion was failing when a temporary table with an index - had the same name as a permanent table created by a separate process.
    • -
    • Bug fix: Updates to tables containing an INTEGER PRIMARY KEY and an - index could fail.
    • -} - -chng {2002 Jan 9 (2.2.1)} { -
    • Bug fix: An attempt to delete a single row of a table with a WHERE - clause of "ROWID=x" when no such rowid exists was causing an error.
    • -
    • Bug fix: Passing in a NULL as the 3rd parameter to sqlite_open() - would sometimes cause a coredump.
    • -
    • Bug fix: DROP TABLE followed by a CREATE TABLE with the same name all - within a single transaction was causing a coredump.
    • -
    • Makefile updates from A. Rottmann
    • -} - -chng {2001 Dec 22 (2.2.0)} { -
    • Columns of type INTEGER PRIMARY KEY are actually used as the primary - key in underlying B-Tree representation of the table.
    • -
    • Several obscure, unrelated bugs were found and fixed while - implemented the integer primary key change of the previous bullet.
    • -
    • Added the ability to specify "*" as part of a larger column list in - the result section of a SELECT statement. For example: - "SELECT rowid, * FROM table1;".
    • -
    • Updates to comments and documentation.
    • -} - -chng {2001 Dec 14 (2.1.7)} { -
    • Fix a bug in CREATE TEMPORARY TABLE which was causing the - table to be initially allocated in the main database file instead - of in the separate temporary file. This bug could cause the library - to suffer an assertion failure and it could cause "page leaks" in the - main database file. -
    • Fix a bug in the b-tree subsystem that could sometimes cause the first - row of a table to be repeated during a database scan.
    • -} - -chng {2001 Dec 14 (2.1.6)} { -
    • Fix the locking mechanism yet again to prevent - sqlite_exec() from returning SQLITE_PROTOCOL - unnecessarily. This time the bug was a race condition in - the locking code. This change effects both POSIX and Windows users.
    • -} - -chng {2001 Dec 6 (2.1.5)} { -
    • Fix for another problem (unrelated to the one fixed in 2.1.4) - that sometimes causes sqlite_exec() to return SQLITE_PROTOCOL - unnecessarily. This time the bug was - in the POSIX locking code and should not effect windows users.
    • -} - -chng {2001 Dec 4 (2.1.4)} { -
    • Sometimes sqlite_exec() would return SQLITE_PROTOCOL when it - should have returned SQLITE_BUSY.
    • -
    • The fix to the previous bug uncovered a deadlock which was also - fixed.
    • -
    • Add the ability to put a single .command in the second argument - of the sqlite shell
    • -
    • Updates to the FAQ
    • -} - -chng {2001 Nov 23 (2.1.3)} { -
    • Fix the behavior of comparison operators - (ex: "<", "==", etc.) - so that they are consistent with the order of entries in an index.
    • -
    • Correct handling of integers in SQL expressions that are larger than - what can be represented by the machine integer.
    • -} - -chng {2001 Nov 22 (2.1.2)} { -
    • Changes to support 64-bit architectures.
    • -
    • Fix a bug in the locking protocol.
    • -
    • Fix a bug that could (rarely) cause the database to become - unreadable after a DROP TABLE due to corruption to the SQLITE_MASTER - table.
    • -
    • Change the code so that version 2.1.1 databases that were rendered - unreadable by the above bug can be read by this version of - the library even though the SQLITE_MASTER table is (slightly) - corrupted.
    • -} - -chng {2001 Nov 13 (2.1.1)} { -
    • Bug fix: Sometimes arbitrary strings were passed to the callback - function when the actual value of a column was NULL.
    • -} - -chng {2001 Nov 12 (2.1.0)} { -
    • Change the format of data records so that records up to 16MB in size - can be stored.
    • -
    • Change the format of indices to allow for better query optimization.
    • -
    • Implement the "LIMIT ... OFFSET ..." clause on SELECT statements.
    • -} - -chng {2001 Nov 3 (2.0.8)} { -
    • Made selected parameters in API functions const. This should - be fully backwards compatible.
    • -
    • Documentation updates
    • -
    • Simplify the design of the VDBE by restricting the number of sorters - and lists to 1. - In practice, no more than one sorter and one list was ever used anyhow. -
    • -} - -chng {2001 Oct 21 (2.0.7)} { -
    • Any UTF-8 character or ISO8859 character can be used as part of - an identifier.
    • -
    • Patches from Christian Werner to improve ODBC compatibility and to - fix a bug in the round() function.
    • -
    • Plug some memory leaks that use to occur if malloc() failed. - We have been and continue to be memory leak free as long as - malloc() works.
    • -
    • Changes to some test scripts so that they work on Windows in - addition to Unix.
    • -} - -chng {2001 Oct 19 (2.0.6)} { -
    • Added the EMPTY_RESULT_CALLBACKS pragma
    • -
    • Support for UTF-8 and ISO8859 characters in column and table names.
    • -
    • Bug fix: Compute correct table names with the FULL_COLUMN_NAMES pragma - is turned on.
    • -} - -chng {2001 Oct 14 (2.0.5)} { -
    • Added the COUNT_CHANGES pragma.
    • -
    • Changes to the FULL_COLUMN_NAMES pragma to help out the ODBC driver.
    • -
    • Bug fix: "SELECT count(*)" was returning NULL for empty tables. - Now it returns 0.
    • -} - -chng {2001 Oct 13 (2.0.4)} { -
    • Bug fix: an obscure and relatively harmless bug was causing one of - the tests to fail when gcc optimizations are turned on. This release - fixes the problem.
    • -} - -chng {2001 Oct 13 (2.0.3)} { -
    • Bug fix: the sqlite_busy_timeout() function was delaying 1000 - times too long before failing.
    • -
    • Bug fix: an assertion was failing if the disk holding the database - file became full or stopped accepting writes for some other reason. - New tests were added to detect similar problems in the future.
    • -
    • Added new operators: & (bitwise-and) - | (bitwise-or), ~ (ones-complement), - << (shift left), >> (shift right).
    • -
    • Added new functions: round() and abs().
    • -} - -chng {2001 Oct 9 (2.0.2)} { -
    • Fix two bugs in the locking protocol. (One was masking the other.)
    • -
    • Removed some unused "#include " that were causing problems - for VC++.
    • -
    • Fixed sqlite.h so that it is usable from C++
    • -
    • Added the FULL_COLUMN_NAMES pragma. When set to "ON", the names of - columns are reported back as TABLE.COLUMN instead of just COLUMN.
    • -
    • Added the TABLE_INFO() and INDEX_INFO() pragmas to help support the - ODBC interface.
    • -
    • Added support for TEMPORARY tables and indices.
    • -} - -chng {2001 Oct 2 (2.0.1)} { -
    • Remove some C++ style comments from btree.c so that it will compile - using compilers other than gcc.
    • -
    • The ".dump" output from the shell does not work if there are embedded - newlines anywhere in the data. This is an old bug that was carried - forward from version 1.0. To fix it, the ".dump" output no longer - uses the COPY command. It instead generates INSERT statements.
    • -
    • Extend the expression syntax to support "expr NOT NULL" (with a - space between the "NOT" and the "NULL") in addition to "expr NOTNULL" - (with no space).
    • -} - -chng {2001 Sep 28 (2.0.0)} { -
    • Automatically build binaries for Linux and Windows and put them on - the website.
    • -} - -chng {2001 Sep 28 (2.0-alpha-4)} { -
    • Incorporate makefile patches form A. Rottmann to use LIBTOOL
    • -} - -chng {2001 Sep 27 (2.0-alpha-3)} { -
    • SQLite now honors the UNIQUE keyword in CREATE UNIQUE INDEX. Primary - keys are required to be unique.
    • -
    • File format changed back to what it was for alpha-1
    • -
    • Fixes to the rollback and locking behavior
    • -} - -chng {2001 Sep 20 (2.0-alpha-2)} { -
    • Initial release of version 2.0. The idea of renaming the library - to "SQLus" was abandoned in favor of keeping the "SQLite" name and - bumping the major version number.
    • -
    • The pager and btree subsystems added back. They are now the only - available backend.
    • -
    • The Dbbe abstraction and the GDBM and memory drivers were removed.
    • -
    • Copyright on all code was disclaimed. The library is now in the - public domain.
    • -} - -chng {2001 Jul 23 (1.0.32)} { -
    • Pager and btree subsystems removed. These will be used in a follow-on - SQL server library named "SQLus".
    • -
    • Add the ability to use quoted strings as table and column names in - expressions.
    • -} - -chng {2001 Apr 14 (1.0.31)} { -
    • Pager subsystem added but not yet used.
    • -
    • More robust handling of out-of-memory errors.
    • -
    • New tests added to the test suite.
    • -} - -chng {2001 Apr 6 (1.0.30)} { -
    • Remove the sqlite_encoding TCL variable that was introduced - in the previous version.
    • -
    • Add options -encoding and -tcl-uses-utf to the - sqlite TCL command.
    • -
    • Add tests to make sure that tclsqlite was compiled using Tcl header - files and libraries that match.
    • -} - -chng {2001 Apr 5 (1.0.29)} { -
    • The library now assumes data is stored as UTF-8 if the --enable-utf8 - option is given to configure. The default behavior is to assume - iso8859-x, as it has always done. This only makes a difference for - LIKE and GLOB operators and the LENGTH and SUBSTR functions.
    • -
    • If the library is not configured for UTF-8 and the Tcl library - is one of the newer ones that uses UTF-8 internally, - then a conversion from UTF-8 to iso8859 and - back again is done inside the TCL interface.
    • -} - -chng {2001 Apr 4 (1.0.28)} { -
    • Added limited support for transactions. At this point, transactions - will do table locking on the GDBM backend. There is no support (yet) - for rollback or atomic commit.
    • -
    • Added special column names ROWID, OID, and _ROWID_ that refer to the - unique random integer key associated with every row of every table.
    • -
    • Additional tests added to the regression suite to cover the new ROWID - feature and the TCL interface bugs mentioned below.
    • -
    • Changes to the "lemon" parser generator to help it work better when - compiled using MSVC.
    • -
    • Bug fixes in the TCL interface identified by Oleg Oleinick.
    • -} - -chng {2001 Mar 20 (1.0.27)} { -
    • When doing DELETE and UPDATE, the library used to write the record - numbers of records to be deleted or updated into a temporary file. - This is changed so that the record numbers are held in memory.
    • -
    • The DELETE command without a WHILE clause just removes the database - files from the disk, rather than going through and deleting record - by record.
    • -} - -chng {2001 Mar 20 (1.0.26)} { -
    • A serious bug fixed on Windows. Windows users should upgrade. - No impact to Unix.
    • -} - -chng {2001 Mar 15 (1.0.25)} { -
    • Modify the test scripts to identify tests that depend on system - load and processor speed and - to warn the user that a failure of one of those (rare) tests does - not necessarily mean the library is malfunctioning. No changes to - code. -
    • -} - -chng {2001 Mar 14 (1.0.24)} { -
    • Fix a bug which was causing - the UPDATE command to fail on systems where "malloc(0)" returns - NULL. The problem does not appear Windows, Linux, or HPUX but does - cause the library to fail on QNX. -
    • -} - -chng {2001 Feb 19 (1.0.23)} { -
    • An unrelated (and minor) bug from Mark Muranwski fixed. The algorithm - for figuring out where to put temporary files for a "memory:" database - was not working quite right. -
    • -} - -chng {2001 Feb 19 (1.0.22)} { -
    • The previous fix was not quite right. This one seems to work better. -
    • -} - -chng {2001 Feb 19 (1.0.21)} { -
    • The UPDATE statement was not working when the WHERE clause contained - some terms that could be satisfied using indices and other terms that - could not. Fixed.
    • -} - -chng {2001 Feb 11 (1.0.20)} { -
    • Merge development changes into the main trunk. Future work toward - using a BTree file structure will use a separate CVS source tree. This - CVS tree will continue to support the GDBM version of SQLite only.
    • -} - -chng {2001 Feb 6 (1.0.19)} { -
    • Fix a strange (but valid) C declaration that was causing problems - for QNX. No logical changes.
    • -} - -chng {2001 Jan 4 (1.0.18)} { -
    • Print the offending SQL statement when an error occurs.
    • -
    • Do not require commas between constraints in CREATE TABLE statements.
    • -
    • Added the "-echo" option to the shell.
    • -
    • Changes to comments.
    • -} - -chng {2000 Dec 10 (1.0.17)} { -
    • Rewrote sqlite_complete() to make it faster.
    • -
    • Minor tweaks to other code to make it run a little faster.
    • -
    • Added new tests for sqlite_complete() and for memory leaks.
    • -} - -chng {2000 Dec 4 (1.0.16)} { -
    • Documentation updates. Mostly fixing of typos and spelling errors.
    • -} - -chng {2000 Oct 23 (1.0.15)} { -
    • Documentation updates
    • -
    • Some sanity checking code was removed from the inner loop of vdbe.c - to help the library to run a little faster. The code is only - removed if you compile with -DNDEBUG.
    • -} - -chng {2000 Oct 19 (1.0.14)} { -
    • Added a "memory:" backend driver that stores its database in an - in-memory hash table.
    • -} - -chng {2000 Oct 18 (1.0.13)} { -
    • Break out the GDBM driver into a separate file in anticipation - to added new drivers.
    • -
    • Allow the name of a database to be prefixed by the driver type. - For now, the only driver type is "gdbm:".
    • -} - -chng {2000 Oct 16 (1.0.12)} { -
    • Fixed an off-by-one error that was causing a coredump in - the '%q' format directive of the new - sqlite_..._printf() routines.
    • -
    • Added the sqlite_interrupt() interface.
    • -
    • In the shell, sqlite_interrupt() is invoked when the - user presses Control-C
    • -
    • Fixed some instances where sqlite_exec() was - returning the wrong error code.
    • -} - -chng {2000 Oct 11 (1.0.10)} { -
    • Added notes on how to compile for Windows95/98.
    • -
    • Removed a few variables that were not being used. Etc.
    • -} - -chng {2000 Oct 8 (1.0.9)} { -
    • Added the sqlite_..._printf() interface routines.
    • -
    • Modified the sqlite shell program to use the new interface - routines.
    • -
    • Modified the sqlite shell program to print the schema for - the built-in SQLITE_MASTER table, if explicitly requested.
    • -} - -chng {2000 Sep 30 (1.0.8)} { -
    • Begin writing documentation on the TCL interface.
    • -} - -chng {2000 Sep 29 (Not Released)} { -
    • Added the sqlite_get_table() API
    • -
    • Updated the documentation for due to the above change.
    • -
    • Modified the sqlite shell to make use of the new - sqlite_get_table() API in order to print a list of tables - in multiple columns, similar to the way "ls" prints filenames.
    • -
    • Modified the sqlite shell to print a semicolon at the - end of each CREATE statement in the output of the ".schema" command.
    • -} - -chng {2000 Sep 21 (Not Released)} { -
    • Change the tclsqlite "eval" method to return a list of results if - no callback script is specified.
    • -
    • Change tclsqlite.c to use the Tcl_Obj interface
    • -
    • Add tclsqlite.c to the libsqlite.a library
    • -} - -chng {2000 Sep 13 (Version 1.0.5)} { -
    • Changed the print format for floating point values from "%g" to "%.15g". -
    • -
    • Changed the comparison function so that numbers in exponential notation - (ex: 1.234e+05) sort in numerical order.
    • -} - -chng {2000 Aug 28 (Version 1.0.4)} { -
    • Added functions length() and substr().
    • -
    • Fix a bug in the sqlite shell program that was causing - a coredump when the output mode was "column" and the first row - of data contained a NULL.
    • -} - -chng {2000 Aug 22 (Version 1.0.3)} { -
    • In the sqlite shell, print the "Database opened READ ONLY" message - to stderr instead of stdout.
    • -
    • In the sqlite shell, now print the version number on initial startup.
    • -
    • Add the sqlite_version[] string constant to the library
    • -
    • Makefile updates
    • -
    • Bug fix: incorrect VDBE code was being generated for the following - circumstance: a query on an indexed table containing a WHERE clause with - an IN operator that had a subquery on its right-hand side.
    • -} - -chng {2000 Aug 18 (Version 1.0.1)} { -
    • Fix a bug in the configure script.
    • -
    • Minor revisions to the website.
    • -} - -chng {2000 Aug 17 (Version 1.0)} { -
    • Change the sqlite program so that it can read - databases for which it lacks write permission. (It used to - refuse all access if it could not write.)
    • -} - -chng {2000 Aug 9} { -
    • Treat carriage returns as white space.
    • -} - -chng {2000 Aug 8} { -
    • Added pattern matching to the ".table" command in the "sqlite" -command shell.
    • -} - -chng {2000 Aug 4} { -
    • Documentation updates
    • -
    • Added "busy" and "timeout" methods to the Tcl interface
    • -} - -chng {2000 Aug 3} { -
    • File format version number was being stored in sqlite_master.tcl - multiple times. This was harmless, but unnecessary. It is now fixed.
    • -} - -chng {2000 Aug 2} { -
    • The file format for indices was changed slightly in order to work - around an inefficiency that can sometimes come up with GDBM when - there are large indices having many entries with the same key. - ** Incompatible Change **
    • -} - -chng {2000 Aug 1} { -
    • The parser's stack was overflowing on a very long UPDATE statement. - This is now fixed.
    • -} - -chng {2000 July 31} { -
    • Finish the VDBE tutorial.
    • -
    • Added documentation on compiling to WindowsNT.
    • -
    • Fix a configuration program for WindowsNT.
    • -
    • Fix a configuration problem for HPUX.
    • -} - -chng {2000 July 29} { -
    • Better labels on column names of the result.
    • -} - -chng {2000 July 28} { -
    • Added the sqlite_busy_handler() - and sqlite_busy_timeout() interface.
    • -} - -chng {2000 June 23} { -
    • Begin writing the VDBE tutorial.
    • -} - -chng {2000 June 21} { -
    • Clean up comments and variable names. Changes to documentation. - No functional changes to the code.
    • -} - -chng {2000 June 19} { -
    • Column names in UPDATE statements were case sensitive. - This mistake has now been fixed.
    • -} - -chng {2000 June 16} { -
    • Added the concatenate string operator (||)
    • -} - -chng {2000 June 12} { -
    • Added the fcnt() function to the SQL interpreter. The fcnt() function - returns the number of database "Fetch" operations that have occurred. - This function is designed for use in test scripts to verify that - queries are efficient and appropriately optimized. Fcnt() has no other - useful purpose, as far as I know.
    • -
    • Added a bunch more tests that take advantage of the new fcnt() function. - The new tests did not uncover any new problems.
    • -} - -chng {2000 June 8} { -
    • Added lots of new test cases
    • -
    • Fix a few bugs discovered while adding test cases
    • -
    • Begin adding lots of new documentation
    • -} - -chng {2000 June 6} { -
    • Added compound select operators: UNION, UNION ALL, -INTERSECT, and EXCEPT
    • -
    • Added support for using (SELECT ...) within expressions
    • -
    • Added support for IN and BETWEEN operators
    • -
    • Added support for GROUP BY and HAVING
    • -
    • NULL values are now reported to the callback as a NULL pointer - rather than an empty string.
    • -} - -chng {2000 June 3} { -
    • Added support for default values on columns of a table.
    • -
    • Improved test coverage. Fixed a few obscure bugs found by the -improved tests.
    • -} - -chng {2000 June 2} { -
    • All database files to be modified by an UPDATE, INSERT or DELETE are -now locked before any changes are made to any files. -This makes it safe (I think) to access -the same database simultaneously from multiple processes.
    • -
    • The code appears stable so we are now calling it "beta".
    • -} - -chng {2000 June 1} { -
    • Better support for file locking so that two or more processes -(or threads) -can access the same database simultaneously. More work needed in -this area, though.
    • -} - -chng {2000 May 31} { -
    • Added support for aggregate functions (Ex: COUNT(*), MIN(...)) -to the SELECT statement.
    • -
    • Added support for SELECT DISTINCT ...
    • -} - -chng {2000 May 30} { -
    • Added the LIKE operator.
    • -
    • Added a GLOB operator: similar to LIKE -but it uses Unix shell globbing wildcards instead of the '%' -and '_' wildcards of SQL.
    • -
    • Added the COPY command patterned after -PostgreSQL so that SQLite -can now read the output of the pg_dump database dump utility -of PostgreSQL.
    • -
    • Added a VACUUM command that that calls the -gdbm_reorganize() function on the underlying database -files.
    • -
    • And many, many bug fixes...
    • -} - -chng {2000 May 29} { -
    • Initial Public Release of Alpha code
    • -} - -puts { -
  • -} -footer {$Id:} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c_interface.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c_interface.html --- sqlite3-3.4.2/www/c_interface.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/c_interface.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,1184 @@ + + +The C language interface to SQLite Version 2 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    The C language interface to SQLite Version 2

    + +

    The SQLite library is designed to be very easy to use from +a C or C++ program. This document gives an overview of the C/C++ +programming interface.

    + +

    1.0 The Core API

    + +

    The interface to the SQLite library consists of three core functions, +one opaque data structure, and some constants used as return values. +The core interface is as follows:

    + +
    +typedef struct sqlite sqlite;
    +#define SQLITE_OK           0   /* Successful result */
    +
    +sqlite *sqlite_open(const char *dbname, int mode, char **errmsg);
    +
    +void sqlite_close(sqlite *db);
    +
    +int sqlite_exec(
    +  sqlite *db,
    +  char *sql,
    +  int (*xCallback)(void*,int,char**,char**),
    +  void *pArg,
    +  char **errmsg
    +);
    +
    + +

    +The above is all you really need to know in order to use SQLite +in your C or C++ programs. There are other interface functions +available (and described below) but we will begin by describing +the core functions shown above. +

    + + +

    1.1 Opening a database

    + +

    Use the sqlite_open function to open an existing SQLite +database or to create a new SQLite database. The first argument +is the database name. The second argument is intended to signal +whether the database is going to be used for reading and writing +or just for reading. But in the current implementation, the +second argument to sqlite_open is ignored. +The third argument is a pointer to a string pointer. +If the third argument is not NULL and an error occurs +while trying to open the database, then an error message will be +written to memory obtained from malloc() and *errmsg will be made +to point to this error message. The calling function is responsible +for freeing the memory when it has finished with it.

    + +

    The name of an SQLite database is the name of a file that will +contain the database. If the file does not exist, SQLite attempts +to create and initialize it. If the file is read-only (due to +permission bits or because it is located on read-only media like +a CD-ROM) then SQLite opens the database for reading only. The +entire SQL database is stored in a single file on the disk. But +additional temporary files may be created during the execution of +an SQL command in order to store the database rollback journal or +temporary and intermediate results of a query.

    + +

    The return value of the sqlite_open function is a +pointer to an opaque sqlite structure. This pointer will +be the first argument to all subsequent SQLite function calls that +deal with the same database. NULL is returned if the open fails +for any reason.

    + + +

    1.2 Closing the database

    + +

    To close an SQLite database, call the sqlite_close +function passing it the sqlite structure pointer that was obtained +from a prior call to sqlite_open. +If a transaction is active when the database is closed, the transaction +is rolled back.

    + + +

    1.3 Executing SQL statements

    + +

    The sqlite_exec function is used to process SQL statements +and queries. This function requires 5 parameters as follows:

    + +
      +
    1. A pointer to the sqlite structure obtained from a prior call + to sqlite_open.

    2. +
    3. A null-terminated string containing the text of one or more + SQL statements and/or queries to be processed.

    4. +
    5. A pointer to a callback function which is invoked once for each + row in the result of a query. This argument may be NULL, in which + case no callbacks will ever be invoked.

    6. +
    7. A pointer that is forwarded to become the first argument + to the callback function.

    8. +
    9. A pointer to an error string. Error messages are written to space + obtained from malloc() and the error string is made to point to + the malloced space. The calling function is responsible for freeing + this space when it has finished with it. + This argument may be NULL, in which case error messages are not + reported back to the calling function.

    10. +
    + +

    +The callback function is used to receive the results of a query. A +prototype for the callback function is as follows:

    + +
    +int Callback(void *pArg, int argc, char **argv, char **columnNames){
    +  return 0;
    +}
    +
    + + +

    The first argument to the callback is just a copy of the fourth argument +to sqlite_exec This parameter can be used to pass arbitrary +information through to the callback function from client code. +The second argument is the number of columns in the query result. +The third argument is an array of pointers to strings where each string +is a single column of the result for that record. Note that the +callback function reports a NULL value in the database as a NULL pointer, +which is very different from an empty string. If the i-th parameter +is an empty string, we will get:

    +
    +argv[i][0] == 0
    +
    +

    But if the i-th parameter is NULL we will get:

    +
    +argv[i] == 0
    +
    + +

    The names of the columns are contained in first argc +entries of the fourth argument. +If the SHOW_DATATYPES pragma +is on (it is off by default) then +the second argc entries in the 4th argument are the datatypes +for the corresponding columns. +

    + +

    If the +EMPTY_RESULT_CALLBACKS pragma is set to ON and the result of +a query is an empty set, then the callback is invoked once with the +third parameter (argv) set to 0. In other words +

    +argv == 0
    +
    +The second parameter (argc) +and the fourth parameter (columnNames) are still valid +and can be used to determine the number and names of the result +columns if there had been a result. +The default behavior is not to invoke the callback at all if the +result set is empty.

    + + +

    The callback function should normally return 0. If the callback +function returns non-zero, the query is immediately aborted and +sqlite_exec will return SQLITE_ABORT.

    + +

    1.4 Error Codes

    + +

    +The sqlite_exec function normally returns SQLITE_OK. But +if something goes wrong it can return a different value to indicate +the type of error. Here is a complete list of the return codes: +

    + +
    +#define SQLITE_OK           0   /* Successful result */
    +#define SQLITE_ERROR        1   /* SQL error or missing database */
    +#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    +#define SQLITE_PERM         3   /* Access permission denied */
    +#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    +#define SQLITE_BUSY         5   /* The database file is locked */
    +#define SQLITE_LOCKED       6   /* A table in the database is locked */
    +#define SQLITE_NOMEM        7   /* A malloc() failed */
    +#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    +#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    +#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    +#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    +#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    +#define SQLITE_FULL        13   /* Insertion failed because database is full */
    +#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    +#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    +#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    +#define SQLITE_SCHEMA      17   /* The database schema changed */
    +#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    +#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    +#define SQLITE_MISMATCH    20   /* Data type mismatch */
    +#define SQLITE_MISUSE      21   /* Library used incorrectly */
    +#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    +#define SQLITE_AUTH        23   /* Authorization denied */
    +#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    +#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    +
    + +

    +The meanings of these various return values are as follows: +

    + +
    +
    +
    SQLITE_OK
    +

    This value is returned if everything worked and there were no errors. +

    +
    SQLITE_INTERNAL
    +

    This value indicates that an internal consistency check within +the SQLite library failed. This can only happen if there is a bug in +the SQLite library. If you ever get an SQLITE_INTERNAL reply from +an sqlite_exec call, please report the problem on the SQLite +mailing list. +

    +
    SQLITE_ERROR
    +

    This return value indicates that there was an error in the SQL +that was passed into the sqlite_exec. +

    +
    SQLITE_PERM
    +

    This return value says that the access permissions on the database +file are such that the file cannot be opened. +

    +
    SQLITE_ABORT
    +

    This value is returned if the callback function returns non-zero. +

    +
    SQLITE_BUSY
    +

    This return code indicates that another program or thread has +the database locked. SQLite allows two or more threads to read the +database at the same time, but only one thread can have the database +open for writing at the same time. Locking in SQLite is on the +entire database.

    +

    +
    SQLITE_LOCKED
    +

    This return code is similar to SQLITE_BUSY in that it indicates +that the database is locked. But the source of the lock is a recursive +call to sqlite_exec. This return can only occur if you attempt +to invoke sqlite_exec from within a callback routine of a query +from a prior invocation of sqlite_exec. Recursive calls to +sqlite_exec are allowed as long as they do +not attempt to write the same table. +

    +
    SQLITE_NOMEM
    +

    This value is returned if a call to malloc fails. +

    +
    SQLITE_READONLY
    +

    This return code indicates that an attempt was made to write to +a database file that is opened for reading only. +

    +
    SQLITE_INTERRUPT
    +

    This value is returned if a call to sqlite_interrupt +interrupts a database operation in progress. +

    +
    SQLITE_IOERR
    +

    This value is returned if the operating system informs SQLite +that it is unable to perform some disk I/O operation. This could mean +that there is no more space left on the disk. +

    +
    SQLITE_CORRUPT
    +

    This value is returned if SQLite detects that the database it is +working on has become corrupted. Corruption might occur due to a rogue +process writing to the database file or it might happen due to an +perviously undetected logic error in of SQLite. This value is also +returned if a disk I/O error occurs in such a way that SQLite is forced +to leave the database file in a corrupted state. The latter should only +happen due to a hardware or operating system malfunction. +

    +
    SQLITE_FULL
    +

    This value is returned if an insertion failed because there is +no space left on the disk, or the database is too big to hold any +more information. The latter case should only occur for databases +that are larger than 2GB in size. +

    +
    SQLITE_CANTOPEN
    +

    This value is returned if the database file could not be opened +for some reason. +

    +
    SQLITE_PROTOCOL
    +

    This value is returned if some other process is messing with +file locks and has violated the file locking protocol that SQLite uses +on its rollback journal files. +

    +
    SQLITE_SCHEMA
    +

    When the database first opened, SQLite reads the database schema +into memory and uses that schema to parse new SQL statements. If another +process changes the schema, the command currently being processed will +abort because the virtual machine code generated assumed the old +schema. This is the return code for such cases. Retrying the +command usually will clear the problem. +

    +
    SQLITE_TOOBIG
    +

    SQLite will not store more than about 1 megabyte of data in a single +row of a single table. If you attempt to store more than 1 megabyte +in a single row, this is the return code you get. +

    +
    SQLITE_CONSTRAINT
    +

    This constant is returned if the SQL statement would have violated +a database constraint. +

    +
    SQLITE_MISMATCH
    +

    This error occurs when there is an attempt to insert non-integer +data into a column labeled INTEGER PRIMARY KEY. For most columns, SQLite +ignores the data type and allows any kind of data to be stored. But +an INTEGER PRIMARY KEY column is only allowed to store integer data. +

    +
    SQLITE_MISUSE
    +

    This error might occur if one or more of the SQLite API routines +is used incorrectly. Examples of incorrect usage include calling +sqlite_exec after the database has been closed using +sqlite_close or +calling sqlite_exec with the same +database pointer simultaneously from two separate threads. +

    +
    SQLITE_NOLFS
    +

    This error means that you have attempts to create or access a file +database file that is larger that 2GB on a legacy Unix machine that +lacks large file support. +

    +
    SQLITE_AUTH
    +

    This error indicates that the authorizer callback +has disallowed the SQL you are attempting to execute. +

    +
    SQLITE_ROW
    +

    This is one of the return codes from the +sqlite_step routine which is part of the non-callback API. +It indicates that another row of result data is available. +

    +
    SQLITE_DONE
    +

    This is one of the return codes from the +sqlite_step routine which is part of the non-callback API. +It indicates that the SQL statement has been completely executed and +the sqlite_finalize routine is ready to be called. +

    +
    +
    + +

    2.0 Accessing Data Without Using A Callback Function

    + +

    +The sqlite_exec routine described above used to be the only +way to retrieve data from an SQLite database. But many programmers found +it inconvenient to use a callback function to obtain results. So beginning +with SQLite version 2.7.7, a second access interface is available that +does not use callbacks. +

    + +

    +The new interface uses three separate functions to replace the single +sqlite_exec function. +

    + +
    +typedef struct sqlite_vm sqlite_vm;
    +
    +int sqlite_compile(
    +  sqlite *db,              /* The open database */
    +  const char *zSql,        /* SQL statement to be compiled */
    +  const char **pzTail,     /* OUT: uncompiled tail of zSql */
    +  sqlite_vm **ppVm,        /* OUT: the virtual machine to execute zSql */
    +  char **pzErrmsg          /* OUT: Error message. */
    +);
    +
    +int sqlite_step(
    +  sqlite_vm *pVm,          /* The virtual machine to execute */
    +  int *pN,                 /* OUT: Number of columns in result */
    +  const char ***pazValue,  /* OUT: Column data */
    +  const char ***pazColName /* OUT: Column names and datatypes */
    +);
    +
    +int sqlite_finalize(
    +  sqlite_vm *pVm,          /* The virtual machine to be finalized */
    +  char **pzErrMsg          /* OUT: Error message */
    +);
    +
    + +

    +The strategy is to compile a single SQL statement using +sqlite_compile then invoke sqlite_step multiple times, +once for each row of output, and finally call sqlite_finalize +to clean up after the SQL has finished execution. +

    + +

    2.1 Compiling An SQL Statement Into A Virtual Machine

    + +

    +The sqlite_compile "compiles" a single SQL statement (specified +by the second parameter) and generates a virtual machine that is able +to execute that statement. +As with must interface routines, the first parameter must be a pointer +to an sqlite structure that was obtained from a prior call to +sqlite_open. + +

    +A pointer to the virtual machine is stored in a pointer which is passed +in as the 4th parameter. +Space to hold the virtual machine is dynamically allocated. To avoid +a memory leak, the calling function must invoke +sqlite_finalize on the virtual machine after it has finished +with it. +The 4th parameter may be set to NULL if an error is encountered during +compilation. +

    + +

    +If any errors are encountered during compilation, an error message is +written into memory obtained from malloc and the 5th parameter +is made to point to that memory. If the 5th parameter is NULL, then +no error message is generated. If the 5th parameter is not NULL, then +the calling function should dispose of the memory containing the error +message by calling sqlite_freemem. +

    + +

    +If the 2nd parameter actually contains two or more statements of SQL, +only the first statement is compiled. (This is different from the +behavior of sqlite_exec which executes all SQL statements +in its input string.) The 3rd parameter to sqlite_compile +is made to point to the first character beyond the end of the first +statement of SQL in the input. If the 2nd parameter contains only +a single SQL statement, then the 3rd parameter will be made to point +to the '\000' terminator at the end of the 2nd parameter. +

    + +

    +On success, sqlite_compile returns SQLITE_OK. +Otherwise and error code is returned. +

    + +

    2.2 Step-By-Step Execution Of An SQL Statement

    + +

    +After a virtual machine has been generated using sqlite_compile +it is executed by one or more calls to sqlite_step. Each +invocation of sqlite_step, except the last one, +returns a single row of the result. +The number of columns in the result is stored in the integer that +the 2nd parameter points to. +The pointer specified by the 3rd parameter is made to point +to an array of pointers to column values. +The pointer in the 4th parameter is made to point to an array +of pointers to column names and datatypes. +The 2nd through 4th parameters to sqlite_step convey the +same information as the 2nd through 4th parameters of the +callback routine when using +the sqlite_exec interface. Except, with sqlite_step +the column datatype information is always included in the in the +4th parameter regardless of whether or not the +SHOW_DATATYPES pragma +is on or off. +

    + +

    +Each invocation of sqlite_step returns an integer code that +indicates what happened during that step. This code may be +SQLITE_BUSY, SQLITE_ROW, SQLITE_DONE, SQLITE_ERROR, or +SQLITE_MISUSE. +

    + +

    +If the virtual machine is unable to open the database file because +it is locked by another thread or process, sqlite_step +will return SQLITE_BUSY. The calling function should do some other +activity, or sleep, for a short amount of time to give the lock a +chance to clear, then invoke sqlite_step again. This can +be repeated as many times as desired. +

    + +

    +Whenever another row of result data is available, +sqlite_step will return SQLITE_ROW. The row data is +stored in an array of pointers to strings and the 2nd parameter +is made to point to this array. +

    + +

    +When all processing is complete, sqlite_step will return +either SQLITE_DONE or SQLITE_ERROR. SQLITE_DONE indicates that the +statement completed successfully and SQLITE_ERROR indicates that there +was a run-time error. (The details of the error are obtained from +sqlite_finalize.) It is a misuse of the library to attempt +to call sqlite_step again after it has returned SQLITE_DONE +or SQLITE_ERROR. +

    + +

    +When sqlite_step returns SQLITE_DONE or SQLITE_ERROR, +the *pN and *pazColName values are set to the number of columns +in the result set and to the names of the columns, just as they +are for an SQLITE_ROW return. This allows the calling code to +find the number of result columns and the column names and datatypes +even if the result set is empty. The *pazValue parameter is always +set to NULL when the return codes is SQLITE_DONE or SQLITE_ERROR. +If the SQL being executed is a statement that does not +return a result (such as an INSERT or an UPDATE) then *pN will +be set to zero and *pazColName will be set to NULL. +

    + +

    +If you abuse the library by trying to call sqlite_step +inappropriately it will attempt return SQLITE_MISUSE. +This can happen if you call sqlite_step() on the same virtual machine +at the same +time from two or more threads or if you call sqlite_step() +again after it returned SQLITE_DONE or SQLITE_ERROR or if you +pass in an invalid virtual machine pointer to sqlite_step(). +You should not depend on the SQLITE_MISUSE return code to indicate +an error. It is possible that a misuse of the interface will go +undetected and result in a program crash. The SQLITE_MISUSE is +intended as a debugging aid only - to help you detect incorrect +usage prior to a mishap. The misuse detection logic is not guaranteed +to work in every case. +

    + +

    2.3 Deleting A Virtual Machine

    + +

    +Every virtual machine that sqlite_compile creates should +eventually be handed to sqlite_finalize. The sqlite_finalize() +procedure deallocates the memory and other resources that the virtual +machine uses. Failure to call sqlite_finalize() will result in +resource leaks in your program. +

    + +

    +The sqlite_finalize routine also returns the result code +that indicates success or failure of the SQL operation that the +virtual machine carried out. +The value returned by sqlite_finalize() will be the same as would +have been returned had the same SQL been executed by sqlite_exec. +The error message returned will also be the same. +

    + +

    +It is acceptable to call sqlite_finalize on a virtual machine +before sqlite_step has returned SQLITE_DONE. Doing so has +the effect of interrupting the operation in progress. Partially completed +changes will be rolled back and the database will be restored to its +original state (unless an alternative recovery algorithm is selected using +an ON CONFLICT clause in the SQL being executed.) The effect is the +same as if a callback function of sqlite_exec had returned +non-zero. +

    + +

    +It is also acceptable to call sqlite_finalize on a virtual machine +that has never been passed to sqlite_step even once. +

    + +

    3.0 The Extended API

    + +

    Only the three core routines described in section 1.0 are required to use +SQLite. But there are many other functions that provide +useful interfaces. These extended routines are as follows: +

    + +
    +int sqlite_last_insert_rowid(sqlite*);
    +
    +int sqlite_changes(sqlite*);
    +
    +int sqlite_get_table(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg
    +);
    +
    +void sqlite_free_table(char**);
    +
    +void sqlite_interrupt(sqlite*);
    +
    +int sqlite_complete(const char *sql);
    +
    +void sqlite_busy_handler(sqlite*, int (*)(void*,const char*,int), void*);
    +
    +void sqlite_busy_timeout(sqlite*, int ms);
    +
    +const char sqlite_version[];
    +
    +const char sqlite_encoding[];
    +
    +int sqlite_exec_printf(
    +  sqlite*,
    +  char *sql,
    +  int (*)(void*,int,char**,char**),
    +  void*,
    +  char **errmsg,
    +  ...
    +);
    +
    +int sqlite_exec_vprintf(
    +  sqlite*,
    +  char *sql,
    +  int (*)(void*,int,char**,char**),
    +  void*,
    +  char **errmsg,
    +  va_list
    +);
    +
    +int sqlite_get_table_printf(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg,
    +  ...
    +);
    +
    +int sqlite_get_table_vprintf(
    +  sqlite*,
    +  char *sql,
    +  char ***result,
    +  int *nrow,
    +  int *ncolumn,
    +  char **errmsg,
    +  va_list
    +);
    +
    +char *sqlite_mprintf(const char *zFormat, ...);
    +
    +char *sqlite_vmprintf(const char *zFormat, va_list);
    +
    +void sqlite_freemem(char*);
    +
    +void sqlite_progress_handler(sqlite*, int, int (*)(void*), void*);
    +
    +
    + +

    All of the above definitions are included in the "sqlite.h" +header file that comes in the source tree.

    + +

    3.1 The ROWID of the most recent insert

    + +

    Every row of an SQLite table has a unique integer key. If the +table has a column labeled INTEGER PRIMARY KEY, then that column +serves as the key. If there is no INTEGER PRIMARY KEY column then +the key is a unique integer. The key for a row can be accessed in +a SELECT statement or used in a WHERE or ORDER BY clause using any +of the names "ROWID", "OID", or "_ROWID_".

    + +

    When you do an insert into a table that does not have an INTEGER PRIMARY +KEY column, or if the table does have an INTEGER PRIMARY KEY but the value +for that column is not specified in the VALUES clause of the insert, then +the key is automatically generated. You can find the value of the key +for the most recent INSERT statement using the +sqlite_last_insert_rowid API function.

    + +

    3.2 The number of rows that changed

    + +

    The sqlite_changes API function returns the number of rows +that have been inserted, deleted, or modified since the database was +last quiescent. A "quiescent" database is one in which there are +no outstanding calls to sqlite_exec and no VMs created by +sqlite_compile that have not been finalized by sqlite_finalize. +In common usage, sqlite_changes returns the number +of rows inserted, deleted, or modified by the most recent sqlite_exec +call or since the most recent sqlite_compile. But if you have +nested calls to sqlite_exec (that is, if the callback routine +of one sqlite_exec invokes another sqlite_exec) or if +you invoke sqlite_compile to create a new VM while there is +still another VM in existance, then +the meaning of the number returned by sqlite_changes is more +complex. +The number reported includes any changes +that were later undone by a ROLLBACK or ABORT. But rows that are +deleted because of a DROP TABLE are not counted.

    + +

    SQLite implements the command "DELETE FROM table" (without +a WHERE clause) by dropping the table then recreating it. +This is much faster than deleting the elements of the table individually. +But it also means that the value returned from sqlite_changes +will be zero regardless of the number of elements that were originally +in the table. If an accurate count of the number of elements deleted +is necessary, use "DELETE FROM table WHERE 1" instead.

    + +

    3.3 Querying into memory obtained from malloc()

    + +

    The sqlite_get_table function is a wrapper around +sqlite_exec that collects all the information from successive +callbacks and writes it into memory obtained from malloc(). This +is a convenience function that allows the application to get the +entire result of a database query with a single function call.

    + +

    The main result from sqlite_get_table is an array of pointers +to strings. There is one element in this array for each column of +each row in the result. NULL results are represented by a NULL +pointer. In addition to the regular data, there is an added row at the +beginning of the array that contains the name of each column of the +result.

    + +

    As an example, consider the following query:

    + +
    +SELECT employee_name, login, host FROM users WHERE login LIKE 'd%'; +
    + +

    This query will return the name, login and host computer name +for every employee whose login begins with the letter "d". If this +query is submitted to sqlite_get_table the result might +look like this:

    + +
    +nrow = 2
    +ncolumn = 3
    +result[0] = "employee_name"
    +result[1] = "login"
    +result[2] = "host"
    +result[3] = "dummy"
    +result[4] = "No such user"
    +result[5] = 0
    +result[6] = "D. Richard Hipp"
    +result[7] = "drh"
    +result[8] = "zadok" +
    + +

    Notice that the "host" value for the "dummy" record is NULL so +the result[] array contains a NULL pointer at that slot.

    + +

    If the result set of a query is empty, then by default +sqlite_get_table will set nrow to 0 and leave its +result parameter is set to NULL. But if the EMPTY_RESULT_CALLBACKS +pragma is ON then the result parameter is initialized to the names +of the columns only. For example, consider this query which has +an empty result set:

    + +
    +SELECT employee_name, login, host FROM users WHERE employee_name IS NULL; +
    + +

    +The default behavior gives this results: +

    + +
    +nrow = 0
    +ncolumn = 0
    +result = 0
    +
    + +

    +But if the EMPTY_RESULT_CALLBACKS pragma is ON, then the following +is returned: +

    + +
    +nrow = 0
    +ncolumn = 3
    +result[0] = "employee_name"
    +result[1] = "login"
    +result[2] = "host"
    +
    + +

    Memory to hold the information returned by sqlite_get_table +is obtained from malloc(). But the calling function should not try +to free this information directly. Instead, pass the complete table +to sqlite_free_table when the table is no longer needed. +It is safe to call sqlite_free_table with a NULL pointer such +as would be returned if the result set is empty.

    + +

    The sqlite_get_table routine returns the same integer +result code as sqlite_exec.

    + +

    3.4 Interrupting an SQLite operation

    + +

    The sqlite_interrupt function can be called from a +different thread or from a signal handler to cause the current database +operation to exit at its first opportunity. When this happens, +the sqlite_exec routine (or the equivalent) that started +the database operation will return SQLITE_INTERRUPT.

    + +

    3.5 Testing for a complete SQL statement

    + +

    The next interface routine to SQLite is a convenience function used +to test whether or not a string forms a complete SQL statement. +If the sqlite_complete function returns true when its input +is a string, then the argument forms a complete SQL statement. +There are no guarantees that the syntax of that statement is correct, +but we at least know the statement is complete. If sqlite_complete +returns false, then more text is required to complete the SQL statement.

    + +

    For the purpose of the sqlite_complete function, an SQL +statement is complete if it ends in a semicolon.

    + +

    The sqlite command-line utility uses the sqlite_complete +function to know when it needs to call sqlite_exec. After each +line of input is received, sqlite calls sqlite_complete +on all input in its buffer. If sqlite_complete returns true, +then sqlite_exec is called and the input buffer is reset. If +sqlite_complete returns false, then the prompt is changed to +the continuation prompt and another line of text is read and added to +the input buffer.

    + +

    3.6 Library version string

    + +

    The SQLite library exports the string constant named +sqlite_version which contains the version number of the +library. The header file contains a macro SQLITE_VERSION +with the same information. If desired, a program can compare +the SQLITE_VERSION macro against the sqlite_version +string constant to verify that the version number of the +header file and the library match.

    + +

    3.7 Library character encoding

    + +

    By default, SQLite assumes that all data uses a fixed-size +8-bit character (iso8859). But if you give the --enable-utf8 option +to the configure script, then the library assumes UTF-8 variable +sized characters. This makes a difference for the LIKE and GLOB +operators and the LENGTH() and SUBSTR() functions. The static +string sqlite_encoding will be set to either "UTF-8" or +"iso8859" to indicate how the library was compiled. In addition, +the sqlite.h header file will define one of the +macros SQLITE_UTF8 or SQLITE_ISO8859, as appropriate.

    + +

    Note that the character encoding mechanism used by SQLite cannot +be changed at run-time. This is a compile-time option only. The +sqlite_encoding character string just tells you how the library +was compiled.

    + +

    3.8 Changing the library's response to locked files

    + +

    The sqlite_busy_handler procedure can be used to register +a busy callback with an open SQLite database. The busy callback will +be invoked whenever SQLite tries to access a database that is locked. +The callback will typically do some other useful work, or perhaps sleep, +in order to give the lock a chance to clear. If the callback returns +non-zero, then SQLite tries again to access the database and the cycle +repeats. If the callback returns zero, then SQLite aborts the current +operation and returns SQLITE_BUSY.

    + +

    The arguments to sqlite_busy_handler are the opaque +structure returned from sqlite_open, a pointer to the busy +callback function, and a generic pointer that will be passed as +the first argument to the busy callback. When SQLite invokes the +busy callback, it sends it three arguments: the generic pointer +that was passed in as the third argument to sqlite_busy_handler, +the name of the database table or index that the library is trying +to access, and the number of times that the library has attempted to +access the database table or index.

    + +

    For the common case where we want the busy callback to sleep, +the SQLite library provides a convenience routine sqlite_busy_timeout. +The first argument to sqlite_busy_timeout is a pointer to +an open SQLite database and the second argument is a number of milliseconds. +After sqlite_busy_timeout has been executed, the SQLite library +will wait for the lock to clear for at least the number of milliseconds +specified before it returns SQLITE_BUSY. Specifying zero milliseconds for +the timeout restores the default behavior.

    + +

    3.9 Using the _printf() wrapper functions

    + +

    The four utility functions

    + +

    +

      +
    • sqlite_exec_printf()
    • +
    • sqlite_exec_vprintf()
    • +
    • sqlite_get_table_printf()
    • +
    • sqlite_get_table_vprintf()
    • +
    +

    + +

    implement the same query functionality as sqlite_exec +and sqlite_get_table. But instead of taking a complete +SQL statement as their second argument, the four _printf +routines take a printf-style format string. The SQL statement to +be executed is generated from this format string and from whatever +additional arguments are attached to the end of the function call.

    + +

    There are two advantages to using the SQLite printf +functions instead of sprintf. First of all, with the +SQLite printf routines, there is never a danger of overflowing a +static buffer as there is with sprintf. The SQLite +printf routines automatically allocate (and later frees) +as much memory as is +necessary to hold the SQL statements generated.

    + +

    The second advantage the SQLite printf routines have over +sprintf are two new formatting options specifically designed +to support string literals in SQL. Within the format string, +the %q formatting option works very much like %s in that it +reads a null-terminated string from the argument list and inserts +it into the result. But %q translates the inserted string by +making two copies of every single-quote (') character in the +substituted string. This has the effect of escaping the end-of-string +meaning of single-quote within a string literal. The %Q formatting +option works similar; it translates the single-quotes like %q and +additionally encloses the resulting string in single-quotes. +If the argument for the %Q formatting options is a NULL pointer, +the resulting string is NULL without single quotes. +

    + +

    Consider an example. Suppose you are trying to insert a string +value into a database table where the string value was obtained from +user input. Suppose the string to be inserted is stored in a variable +named zString. The code to do the insertion might look like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES('%s')",
    +  0, 0, 0, zString);
    +
    + +

    If the zString variable holds text like "Hello", then this statement +will work just fine. But suppose the user enters a string like +"Hi y'all!". The SQL statement generated reads as follows: + +

    +INSERT INTO table1 VALUES('Hi y'all')
    +
    + +

    This is not valid SQL because of the apostrophy in the word "y'all". +But if the %q formatting option is used instead of %s, like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES('%q')",
    +  0, 0, 0, zString);
    +
    + +

    Then the generated SQL will look like the following:

    + +
    +INSERT INTO table1 VALUES('Hi y''all')
    +
    + +

    Here the apostrophy has been escaped and the SQL statement is well-formed. +When generating SQL on-the-fly from data that might contain a +single-quote character ('), it is always a good idea to use the +SQLite printf routines and the %q formatting option instead of sprintf. +

    + +

    If the %Q formatting option is used instead of %q, like this:

    + +
    +sqlite_exec_printf(db,
    +  "INSERT INTO table1 VALUES(%Q)",
    +  0, 0, 0, zString);
    +
    + +

    Then the generated SQL will look like the following:

    + +
    +INSERT INTO table1 VALUES('Hi y''all')
    +
    + +

    If the value of the zString variable is NULL, the generated SQL +will look like the following:

    + +
    +INSERT INTO table1 VALUES(NULL)
    +
    + +

    All of the _printf() routines above are built around the following +two functions:

    + +
    +char *sqlite_mprintf(const char *zFormat, ...);
    +char *sqlite_vmprintf(const char *zFormat, va_list);
    +
    + +

    The sqlite_mprintf() routine works like the the standard library +sprintf() except that it writes its results into memory obtained +from malloc() and returns a pointer to the malloced buffer. +sqlite_mprintf() also understands the %q and %Q extensions described +above. The sqlite_vmprintf() is a varargs version of the same +routine. The string pointer that these routines return should be freed +by passing it to sqlite_freemem(). +

    + +

    3.10 Performing background jobs during large queries

    + +

    The sqlite_progress_handler() routine can be used to register a +callback routine with an SQLite database to be invoked periodically during long +running calls to sqlite_exec(), sqlite_step() and the various +wrapper functions. +

    + +

    The callback is invoked every N virtual machine operations, where N is +supplied as the second argument to sqlite_progress_handler(). The third +and fourth arguments to sqlite_progress_handler() are a pointer to the +routine to be invoked and a void pointer to be passed as the first argument to +it. +

    + +

    The time taken to execute each virtual machine operation can vary based on +many factors. A typical value for a 1 GHz PC is between half and three million +per second but may be much higher or lower, depending on the query. As such it +is difficult to schedule background operations based on virtual machine +operations. Instead, it is recommended that a callback be scheduled relatively +frequently (say every 1000 instructions) and external timer routines used to +determine whether or not background jobs need to be run. +

    + + +

    4.0 Adding New SQL Functions

    + +

    Beginning with version 2.4.0, SQLite allows the SQL language to be +extended with new functions implemented as C code. The following interface +is used: +

    + +
    +typedef struct sqlite_func sqlite_func;
    +
    +int sqlite_create_function(
    +  sqlite *db,
    +  const char *zName,
    +  int nArg,
    +  void (*xFunc)(sqlite_func*,int,const char**),
    +  void *pUserData
    +);
    +int sqlite_create_aggregate(
    +  sqlite *db,
    +  const char *zName,
    +  int nArg,
    +  void (*xStep)(sqlite_func*,int,const char**),
    +  void (*xFinalize)(sqlite_func*),
    +  void *pUserData
    +);
    +
    +char *sqlite_set_result_string(sqlite_func*,const char*,int);
    +void sqlite_set_result_int(sqlite_func*,int);
    +void sqlite_set_result_double(sqlite_func*,double);
    +void sqlite_set_result_error(sqlite_func*,const char*,int);
    +
    +void *sqlite_user_data(sqlite_func*);
    +void *sqlite_aggregate_context(sqlite_func*, int nBytes);
    +int sqlite_aggregate_count(sqlite_func*);
    +
    + +

    +The sqlite_create_function() interface is used to create +regular functions and sqlite_create_aggregate() is used to +create new aggregate functions. In both cases, the db +parameter is an open SQLite database on which the functions should +be registered, zName is the name of the new function, +nArg is the number of arguments, and pUserData is +a pointer which is passed through unchanged to the C implementation +of the function. Both routines return 0 on success and non-zero +if there are any errors. +

    + +

    +The length of a function name may not exceed 255 characters. +Any attempt to create a function whose name exceeds 255 characters +in length will result in an error. +

    + +

    +For regular functions, the xFunc callback is invoked once +for each function call. The implementation of xFunc should call +one of the sqlite_set_result_... interfaces to return its +result. The sqlite_user_data() routine can be used to +retrieve the pUserData pointer that was passed in when the +function was registered. +

    + +

    +For aggregate functions, the xStep callback is invoked once +for each row in the result and then xFinalize is invoked at the +end to compute a final answer. The xStep routine can use the +sqlite_aggregate_context() interface to allocate memory that +will be unique to that particular instance of the SQL function. +This memory will be automatically deleted after xFinalize is called. +The sqlite_aggregate_count() routine can be used to find out +how many rows of data were passed to the aggregate. The xFinalize +callback should invoke one of the sqlite_set_result_... +interfaces to set the final result of the aggregate. +

    + +

    +SQLite now implements all of its built-in functions using this +interface. For additional information and examples on how to create +new SQL functions, review the SQLite source code in the file +func.c. +

    + +

    5.0 Multi-Threading And SQLite

    + +

    +If SQLite is compiled with the THREADSAFE preprocessor macro set to 1, +then it is safe to use SQLite from two or more threads of the same process +at the same time. But each thread should have its own sqlite* +pointer returned from sqlite_open. It is never safe for two +or more threads to access the same sqlite* pointer at the same time. +

    + +

    +In precompiled SQLite libraries available on the website, the Unix +versions are compiled with THREADSAFE turned off but the Windows +versions are compiled with THREADSAFE turned on. If you need something +different that this you will have to recompile. +

    + +

    +Under Unix, an sqlite* pointer should not be carried across a +fork() system call into the child process. The child process +should open its own copy of the database after the fork(). +

    + +

    6.0 Usage Examples

    + +

    For examples of how the SQLite C/C++ interface can be used, +refer to the source code for the sqlite program in the +file src/shell.c of the source tree. +Additional information about sqlite is available at +sqlite.html. +See also the sources to the Tcl interface for SQLite in +the source file src/tclsqlite.c.

    +
    +This page last modified 2008/11/01 13:26:49 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/c_interface.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/c_interface.tcl --- sqlite3-3.4.2/www/c_interface.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/c_interface.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,1116 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: c_interface.tcl,v 1.43 2004/11/19 11:59:24 danielk1977 Exp $} -source common.tcl -header {The C language interface to the SQLite library} -puts { -

    The C language interface to the SQLite library

    - -

    The SQLite library is designed to be very easy to use from -a C or C++ program. This document gives an overview of the C/C++ -programming interface.

    - -

    1.0 The Core API

    - -

    The interface to the SQLite library consists of three core functions, -one opaque data structure, and some constants used as return values. -The core interface is as follows:

    - -
    -typedef struct sqlite sqlite;
    -#define SQLITE_OK           0   /* Successful result */
    -
    -sqlite *sqlite_open(const char *dbname, int mode, char **errmsg);
    -
    -void sqlite_close(sqlite *db);
    -
    -int sqlite_exec(
    -  sqlite *db,
    -  char *sql,
    -  int (*xCallback)(void*,int,char**,char**),
    -  void *pArg,
    -  char **errmsg
    -);
    -
    - -

    -The above is all you really need to know in order to use SQLite -in your C or C++ programs. There are other interface functions -available (and described below) but we will begin by describing -the core functions shown above. -

    - - -

    1.1 Opening a database

    - -

    Use the sqlite_open function to open an existing SQLite -database or to create a new SQLite database. The first argument -is the database name. The second argument is intended to signal -whether the database is going to be used for reading and writing -or just for reading. But in the current implementation, the -second argument to sqlite_open is ignored. -The third argument is a pointer to a string pointer. -If the third argument is not NULL and an error occurs -while trying to open the database, then an error message will be -written to memory obtained from malloc() and *errmsg will be made -to point to this error message. The calling function is responsible -for freeing the memory when it has finished with it.

    - -

    The name of an SQLite database is the name of a file that will -contain the database. If the file does not exist, SQLite attempts -to create and initialize it. If the file is read-only (due to -permission bits or because it is located on read-only media like -a CD-ROM) then SQLite opens the database for reading only. The -entire SQL database is stored in a single file on the disk. But -additional temporary files may be created during the execution of -an SQL command in order to store the database rollback journal or -temporary and intermediate results of a query.

    - -

    The return value of the sqlite_open function is a -pointer to an opaque sqlite structure. This pointer will -be the first argument to all subsequent SQLite function calls that -deal with the same database. NULL is returned if the open fails -for any reason.

    - -
    -

    1.2 Closing the database

    - -

    To close an SQLite database, call the sqlite_close -function passing it the sqlite structure pointer that was obtained -from a prior call to sqlite_open. -If a transaction is active when the database is closed, the transaction -is rolled back.

    - -
    -

    1.3 Executing SQL statements

    - -

    The sqlite_exec function is used to process SQL statements -and queries. This function requires 5 parameters as follows:

    - -
      -
    1. A pointer to the sqlite structure obtained from a prior call - to sqlite_open.

    2. -
    3. A null-terminated string containing the text of one or more - SQL statements and/or queries to be processed.

    4. -
    5. A pointer to a callback function which is invoked once for each - row in the result of a query. This argument may be NULL, in which - case no callbacks will ever be invoked.

    6. -
    7. A pointer that is forwarded to become the first argument - to the callback function.

    8. -
    9. A pointer to an error string. Error messages are written to space - obtained from malloc() and the error string is made to point to - the malloced space. The calling function is responsible for freeing - this space when it has finished with it. - This argument may be NULL, in which case error messages are not - reported back to the calling function.

    10. -
    - -

    -The callback function is used to receive the results of a query. A -prototype for the callback function is as follows:

    - -
    -int Callback(void *pArg, int argc, char **argv, char **columnNames){
    -  return 0;
    -}
    -
    - -
    -

    The first argument to the callback is just a copy of the fourth argument -to sqlite_exec This parameter can be used to pass arbitrary -information through to the callback function from client code. -The second argument is the number of columns in the query result. -The third argument is an array of pointers to strings where each string -is a single column of the result for that record. Note that the -callback function reports a NULL value in the database as a NULL pointer, -which is very different from an empty string. If the i-th parameter -is an empty string, we will get:

    -
    -argv[i][0] == 0
    -
    -

    But if the i-th parameter is NULL we will get:

    -
    -argv[i] == 0
    -
    - -

    The names of the columns are contained in first argc -entries of the fourth argument. -If the SHOW_DATATYPES pragma -is on (it is off by default) then -the second argc entries in the 4th argument are the datatypes -for the corresponding columns. -

    - -

    If the -EMPTY_RESULT_CALLBACKS pragma is set to ON and the result of -a query is an empty set, then the callback is invoked once with the -third parameter (argv) set to 0. In other words -

    -argv == 0
    -
    -The second parameter (argc) -and the fourth parameter (columnNames) are still valid -and can be used to determine the number and names of the result -columns if there had been a result. -The default behavior is not to invoke the callback at all if the -result set is empty.

    - - -

    The callback function should normally return 0. If the callback -function returns non-zero, the query is immediately aborted and -sqlite_exec will return SQLITE_ABORT.

    - -

    1.4 Error Codes

    - -

    -The sqlite_exec function normally returns SQLITE_OK. But -if something goes wrong it can return a different value to indicate -the type of error. Here is a complete list of the return codes: -

    - -
    -#define SQLITE_OK           0   /* Successful result */
    -#define SQLITE_ERROR        1   /* SQL error or missing database */
    -#define SQLITE_INTERNAL     2   /* An internal logic error in SQLite */
    -#define SQLITE_PERM         3   /* Access permission denied */
    -#define SQLITE_ABORT        4   /* Callback routine requested an abort */
    -#define SQLITE_BUSY         5   /* The database file is locked */
    -#define SQLITE_LOCKED       6   /* A table in the database is locked */
    -#define SQLITE_NOMEM        7   /* A malloc() failed */
    -#define SQLITE_READONLY     8   /* Attempt to write a readonly database */
    -#define SQLITE_INTERRUPT    9   /* Operation terminated by sqlite_interrupt() */
    -#define SQLITE_IOERR       10   /* Some kind of disk I/O error occurred */
    -#define SQLITE_CORRUPT     11   /* The database disk image is malformed */
    -#define SQLITE_NOTFOUND    12   /* (Internal Only) Table or record not found */
    -#define SQLITE_FULL        13   /* Insertion failed because database is full */
    -#define SQLITE_CANTOPEN    14   /* Unable to open the database file */
    -#define SQLITE_PROTOCOL    15   /* Database lock protocol error */
    -#define SQLITE_EMPTY       16   /* (Internal Only) Database table is empty */
    -#define SQLITE_SCHEMA      17   /* The database schema changed */
    -#define SQLITE_TOOBIG      18   /* Too much data for one row of a table */
    -#define SQLITE_CONSTRAINT  19   /* Abort due to contraint violation */
    -#define SQLITE_MISMATCH    20   /* Data type mismatch */
    -#define SQLITE_MISUSE      21   /* Library used incorrectly */
    -#define SQLITE_NOLFS       22   /* Uses OS features not supported on host */
    -#define SQLITE_AUTH        23   /* Authorization denied */
    -#define SQLITE_ROW         100  /* sqlite_step() has another row ready */
    -#define SQLITE_DONE        101  /* sqlite_step() has finished executing */
    -
    - -

    -The meanings of these various return values are as follows: -

    - -
    -
    -
    SQLITE_OK
    -

    This value is returned if everything worked and there were no errors. -

    -
    SQLITE_INTERNAL
    -

    This value indicates that an internal consistency check within -the SQLite library failed. This can only happen if there is a bug in -the SQLite library. If you ever get an SQLITE_INTERNAL reply from -an sqlite_exec call, please report the problem on the SQLite -mailing list. -

    -
    SQLITE_ERROR
    -

    This return value indicates that there was an error in the SQL -that was passed into the sqlite_exec. -

    -
    SQLITE_PERM
    -

    This return value says that the access permissions on the database -file are such that the file cannot be opened. -

    -
    SQLITE_ABORT
    -

    This value is returned if the callback function returns non-zero. -

    -
    SQLITE_BUSY
    -

    This return code indicates that another program or thread has -the database locked. SQLite allows two or more threads to read the -database at the same time, but only one thread can have the database -open for writing at the same time. Locking in SQLite is on the -entire database.

    -

    -
    SQLITE_LOCKED
    -

    This return code is similar to SQLITE_BUSY in that it indicates -that the database is locked. But the source of the lock is a recursive -call to sqlite_exec. This return can only occur if you attempt -to invoke sqlite_exec from within a callback routine of a query -from a prior invocation of sqlite_exec. Recursive calls to -sqlite_exec are allowed as long as they do -not attempt to write the same table. -

    -
    SQLITE_NOMEM
    -

    This value is returned if a call to malloc fails. -

    -
    SQLITE_READONLY
    -

    This return code indicates that an attempt was made to write to -a database file that is opened for reading only. -

    -
    SQLITE_INTERRUPT
    -

    This value is returned if a call to sqlite_interrupt -interrupts a database operation in progress. -

    -
    SQLITE_IOERR
    -

    This value is returned if the operating system informs SQLite -that it is unable to perform some disk I/O operation. This could mean -that there is no more space left on the disk. -

    -
    SQLITE_CORRUPT
    -

    This value is returned if SQLite detects that the database it is -working on has become corrupted. Corruption might occur due to a rogue -process writing to the database file or it might happen due to an -perviously undetected logic error in of SQLite. This value is also -returned if a disk I/O error occurs in such a way that SQLite is forced -to leave the database file in a corrupted state. The latter should only -happen due to a hardware or operating system malfunction. -

    -
    SQLITE_FULL
    -

    This value is returned if an insertion failed because there is -no space left on the disk, or the database is too big to hold any -more information. The latter case should only occur for databases -that are larger than 2GB in size. -

    -
    SQLITE_CANTOPEN
    -

    This value is returned if the database file could not be opened -for some reason. -

    -
    SQLITE_PROTOCOL
    -

    This value is returned if some other process is messing with -file locks and has violated the file locking protocol that SQLite uses -on its rollback journal files. -

    -
    SQLITE_SCHEMA
    -

    When the database first opened, SQLite reads the database schema -into memory and uses that schema to parse new SQL statements. If another -process changes the schema, the command currently being processed will -abort because the virtual machine code generated assumed the old -schema. This is the return code for such cases. Retrying the -command usually will clear the problem. -

    -
    SQLITE_TOOBIG
    -

    SQLite will not store more than about 1 megabyte of data in a single -row of a single table. If you attempt to store more than 1 megabyte -in a single row, this is the return code you get. -

    -
    SQLITE_CONSTRAINT
    -

    This constant is returned if the SQL statement would have violated -a database constraint. -

    -
    SQLITE_MISMATCH
    -

    This error occurs when there is an attempt to insert non-integer -data into a column labeled INTEGER PRIMARY KEY. For most columns, SQLite -ignores the data type and allows any kind of data to be stored. But -an INTEGER PRIMARY KEY column is only allowed to store integer data. -

    -
    SQLITE_MISUSE
    -

    This error might occur if one or more of the SQLite API routines -is used incorrectly. Examples of incorrect usage include calling -sqlite_exec after the database has been closed using -sqlite_close or -calling sqlite_exec with the same -database pointer simultaneously from two separate threads. -

    -
    SQLITE_NOLFS
    -

    This error means that you have attempts to create or access a file -database file that is larger that 2GB on a legacy Unix machine that -lacks large file support. -

    -
    SQLITE_AUTH
    -

    This error indicates that the authorizer callback -has disallowed the SQL you are attempting to execute. -

    -
    SQLITE_ROW
    -

    This is one of the return codes from the -sqlite_step routine which is part of the non-callback API. -It indicates that another row of result data is available. -

    -
    SQLITE_DONE
    -

    This is one of the return codes from the -sqlite_step routine which is part of the non-callback API. -It indicates that the SQL statement has been completely executed and -the sqlite_finalize routine is ready to be called. -

    -
    -
    - -

    2.0 Accessing Data Without Using A Callback Function

    - -

    -The sqlite_exec routine described above used to be the only -way to retrieve data from an SQLite database. But many programmers found -it inconvenient to use a callback function to obtain results. So beginning -with SQLite version 2.7.7, a second access interface is available that -does not use callbacks. -

    - -

    -The new interface uses three separate functions to replace the single -sqlite_exec function. -

    - -
    -typedef struct sqlite_vm sqlite_vm;
    -
    -int sqlite_compile(
    -  sqlite *db,              /* The open database */
    -  const char *zSql,        /* SQL statement to be compiled */
    -  const char **pzTail,     /* OUT: uncompiled tail of zSql */
    -  sqlite_vm **ppVm,        /* OUT: the virtual machine to execute zSql */
    -  char **pzErrmsg          /* OUT: Error message. */
    -);
    -
    -int sqlite_step(
    -  sqlite_vm *pVm,          /* The virtual machine to execute */
    -  int *pN,                 /* OUT: Number of columns in result */
    -  const char ***pazValue,  /* OUT: Column data */
    -  const char ***pazColName /* OUT: Column names and datatypes */
    -);
    -
    -int sqlite_finalize(
    -  sqlite_vm *pVm,          /* The virtual machine to be finalized */
    -  char **pzErrMsg          /* OUT: Error message */
    -);
    -
    - -

    -The strategy is to compile a single SQL statement using -sqlite_compile then invoke sqlite_step multiple times, -once for each row of output, and finally call sqlite_finalize -to clean up after the SQL has finished execution. -

    - -

    2.1 Compiling An SQL Statement Into A Virtual Machine

    - -

    -The sqlite_compile "compiles" a single SQL statement (specified -by the second parameter) and generates a virtual machine that is able -to execute that statement. -As with must interface routines, the first parameter must be a pointer -to an sqlite structure that was obtained from a prior call to -sqlite_open. - -

    -A pointer to the virtual machine is stored in a pointer which is passed -in as the 4th parameter. -Space to hold the virtual machine is dynamically allocated. To avoid -a memory leak, the calling function must invoke -sqlite_finalize on the virtual machine after it has finished -with it. -The 4th parameter may be set to NULL if an error is encountered during -compilation. -

    - -

    -If any errors are encountered during compilation, an error message is -written into memory obtained from malloc and the 5th parameter -is made to point to that memory. If the 5th parameter is NULL, then -no error message is generated. If the 5th parameter is not NULL, then -the calling function should dispose of the memory containing the error -message by calling sqlite_freemem. -

    - -

    -If the 2nd parameter actually contains two or more statements of SQL, -only the first statement is compiled. (This is different from the -behavior of sqlite_exec which executes all SQL statements -in its input string.) The 3rd parameter to sqlite_compile -is made to point to the first character beyond the end of the first -statement of SQL in the input. If the 2nd parameter contains only -a single SQL statement, then the 3rd parameter will be made to point -to the '\000' terminator at the end of the 2nd parameter. -

    - -

    -On success, sqlite_compile returns SQLITE_OK. -Otherwise and error code is returned. -

    - -

    2.2 Step-By-Step Execution Of An SQL Statement

    - -

    -After a virtual machine has been generated using sqlite_compile -it is executed by one or more calls to sqlite_step. Each -invocation of sqlite_step, except the last one, -returns a single row of the result. -The number of columns in the result is stored in the integer that -the 2nd parameter points to. -The pointer specified by the 3rd parameter is made to point -to an array of pointers to column values. -The pointer in the 4th parameter is made to point to an array -of pointers to column names and datatypes. -The 2nd through 4th parameters to sqlite_step convey the -same information as the 2nd through 4th parameters of the -callback routine when using -the sqlite_exec interface. Except, with sqlite_step -the column datatype information is always included in the in the -4th parameter regardless of whether or not the -SHOW_DATATYPES pragma -is on or off. -

    - -

    -Each invocation of sqlite_step returns an integer code that -indicates what happened during that step. This code may be -SQLITE_BUSY, SQLITE_ROW, SQLITE_DONE, SQLITE_ERROR, or -SQLITE_MISUSE. -

    - -

    -If the virtual machine is unable to open the database file because -it is locked by another thread or process, sqlite_step -will return SQLITE_BUSY. The calling function should do some other -activity, or sleep, for a short amount of time to give the lock a -chance to clear, then invoke sqlite_step again. This can -be repeated as many times as desired. -

    - -

    -Whenever another row of result data is available, -sqlite_step will return SQLITE_ROW. The row data is -stored in an array of pointers to strings and the 2nd parameter -is made to point to this array. -

    - -

    -When all processing is complete, sqlite_step will return -either SQLITE_DONE or SQLITE_ERROR. SQLITE_DONE indicates that the -statement completed successfully and SQLITE_ERROR indicates that there -was a run-time error. (The details of the error are obtained from -sqlite_finalize.) It is a misuse of the library to attempt -to call sqlite_step again after it has returned SQLITE_DONE -or SQLITE_ERROR. -

    - -

    -When sqlite_step returns SQLITE_DONE or SQLITE_ERROR, -the *pN and *pazColName values are set to the number of columns -in the result set and to the names of the columns, just as they -are for an SQLITE_ROW return. This allows the calling code to -find the number of result columns and the column names and datatypes -even if the result set is empty. The *pazValue parameter is always -set to NULL when the return codes is SQLITE_DONE or SQLITE_ERROR. -If the SQL being executed is a statement that does not -return a result (such as an INSERT or an UPDATE) then *pN will -be set to zero and *pazColName will be set to NULL. -

    - -

    -If you abuse the library by trying to call sqlite_step -inappropriately it will attempt return SQLITE_MISUSE. -This can happen if you call sqlite_step() on the same virtual machine -at the same -time from two or more threads or if you call sqlite_step() -again after it returned SQLITE_DONE or SQLITE_ERROR or if you -pass in an invalid virtual machine pointer to sqlite_step(). -You should not depend on the SQLITE_MISUSE return code to indicate -an error. It is possible that a misuse of the interface will go -undetected and result in a program crash. The SQLITE_MISUSE is -intended as a debugging aid only - to help you detect incorrect -usage prior to a mishap. The misuse detection logic is not guaranteed -to work in every case. -

    - -

    2.3 Deleting A Virtual Machine

    - -

    -Every virtual machine that sqlite_compile creates should -eventually be handed to sqlite_finalize. The sqlite_finalize() -procedure deallocates the memory and other resources that the virtual -machine uses. Failure to call sqlite_finalize() will result in -resource leaks in your program. -

    - -

    -The sqlite_finalize routine also returns the result code -that indicates success or failure of the SQL operation that the -virtual machine carried out. -The value returned by sqlite_finalize() will be the same as would -have been returned had the same SQL been executed by sqlite_exec. -The error message returned will also be the same. -

    - -

    -It is acceptable to call sqlite_finalize on a virtual machine -before sqlite_step has returned SQLITE_DONE. Doing so has -the effect of interrupting the operation in progress. Partially completed -changes will be rolled back and the database will be restored to its -original state (unless an alternative recovery algorithm is selected using -an ON CONFLICT clause in the SQL being executed.) The effect is the -same as if a callback function of sqlite_exec had returned -non-zero. -

    - -

    -It is also acceptable to call sqlite_finalize on a virtual machine -that has never been passed to sqlite_step even once. -

    - -

    3.0 The Extended API

    - -

    Only the three core routines described in section 1.0 are required to use -SQLite. But there are many other functions that provide -useful interfaces. These extended routines are as follows: -

    - -
    -int sqlite_last_insert_rowid(sqlite*);
    -
    -int sqlite_changes(sqlite*);
    -
    -int sqlite_get_table(
    -  sqlite*,
    -  char *sql,
    -  char ***result,
    -  int *nrow,
    -  int *ncolumn,
    -  char **errmsg
    -);
    -
    -void sqlite_free_table(char**);
    -
    -void sqlite_interrupt(sqlite*);
    -
    -int sqlite_complete(const char *sql);
    -
    -void sqlite_busy_handler(sqlite*, int (*)(void*,const char*,int), void*);
    -
    -void sqlite_busy_timeout(sqlite*, int ms);
    -
    -const char sqlite_version[];
    -
    -const char sqlite_encoding[];
    -
    -int sqlite_exec_printf(
    -  sqlite*,
    -  char *sql,
    -  int (*)(void*,int,char**,char**),
    -  void*,
    -  char **errmsg,
    -  ...
    -);
    -
    -int sqlite_exec_vprintf(
    -  sqlite*,
    -  char *sql,
    -  int (*)(void*,int,char**,char**),
    -  void*,
    -  char **errmsg,
    -  va_list
    -);
    -
    -int sqlite_get_table_printf(
    -  sqlite*,
    -  char *sql,
    -  char ***result,
    -  int *nrow,
    -  int *ncolumn,
    -  char **errmsg,
    -  ...
    -);
    -
    -int sqlite_get_table_vprintf(
    -  sqlite*,
    -  char *sql,
    -  char ***result,
    -  int *nrow,
    -  int *ncolumn,
    -  char **errmsg,
    -  va_list
    -);
    -
    -char *sqlite_mprintf(const char *zFormat, ...);
    -
    -char *sqlite_vmprintf(const char *zFormat, va_list);
    -
    -void sqlite_freemem(char*);
    -
    -void sqlite_progress_handler(sqlite*, int, int (*)(void*), void*);
    -
    -
    - -

    All of the above definitions are included in the "sqlite.h" -header file that comes in the source tree.

    - -

    3.1 The ROWID of the most recent insert

    - -

    Every row of an SQLite table has a unique integer key. If the -table has a column labeled INTEGER PRIMARY KEY, then that column -serves as the key. If there is no INTEGER PRIMARY KEY column then -the key is a unique integer. The key for a row can be accessed in -a SELECT statement or used in a WHERE or ORDER BY clause using any -of the names "ROWID", "OID", or "_ROWID_".

    - -

    When you do an insert into a table that does not have an INTEGER PRIMARY -KEY column, or if the table does have an INTEGER PRIMARY KEY but the value -for that column is not specified in the VALUES clause of the insert, then -the key is automatically generated. You can find the value of the key -for the most recent INSERT statement using the -sqlite_last_insert_rowid API function.

    - -

    3.2 The number of rows that changed

    - -

    The sqlite_changes API function returns the number of rows -that have been inserted, deleted, or modified since the database was -last quiescent. A "quiescent" database is one in which there are -no outstanding calls to sqlite_exec and no VMs created by -sqlite_compile that have not been finalized by sqlite_finalize. -In common usage, sqlite_changes returns the number -of rows inserted, deleted, or modified by the most recent sqlite_exec -call or since the most recent sqlite_compile. But if you have -nested calls to sqlite_exec (that is, if the callback routine -of one sqlite_exec invokes another sqlite_exec) or if -you invoke sqlite_compile to create a new VM while there is -still another VM in existance, then -the meaning of the number returned by sqlite_changes is more -complex. -The number reported includes any changes -that were later undone by a ROLLBACK or ABORT. But rows that are -deleted because of a DROP TABLE are not counted.

    - -

    SQLite implements the command "DELETE FROM table" (without -a WHERE clause) by dropping the table then recreating it. -This is much faster than deleting the elements of the table individually. -But it also means that the value returned from sqlite_changes -will be zero regardless of the number of elements that were originally -in the table. If an accurate count of the number of elements deleted -is necessary, use "DELETE FROM table WHERE 1" instead.

    - -

    3.3 Querying into memory obtained from malloc()

    - -

    The sqlite_get_table function is a wrapper around -sqlite_exec that collects all the information from successive -callbacks and writes it into memory obtained from malloc(). This -is a convenience function that allows the application to get the -entire result of a database query with a single function call.

    - -

    The main result from sqlite_get_table is an array of pointers -to strings. There is one element in this array for each column of -each row in the result. NULL results are represented by a NULL -pointer. In addition to the regular data, there is an added row at the -beginning of the array that contains the name of each column of the -result.

    - -

    As an example, consider the following query:

    - -
    -SELECT employee_name, login, host FROM users WHERE login LIKE 'd%'; -
    - -

    This query will return the name, login and host computer name -for every employee whose login begins with the letter "d". If this -query is submitted to sqlite_get_table the result might -look like this:

    - -
    -nrow = 2
    -ncolumn = 3
    -result[0] = "employee_name"
    -result[1] = "login"
    -result[2] = "host"
    -result[3] = "dummy"
    -result[4] = "No such user"
    -result[5] = 0
    -result[6] = "D. Richard Hipp"
    -result[7] = "drh"
    -result[8] = "zadok" -
    - -

    Notice that the "host" value for the "dummy" record is NULL so -the result[] array contains a NULL pointer at that slot.

    - -

    If the result set of a query is empty, then by default -sqlite_get_table will set nrow to 0 and leave its -result parameter is set to NULL. But if the EMPTY_RESULT_CALLBACKS -pragma is ON then the result parameter is initialized to the names -of the columns only. For example, consider this query which has -an empty result set:

    - -
    -SELECT employee_name, login, host FROM users WHERE employee_name IS NULL; -
    - -

    -The default behavior gives this results: -

    - -
    -nrow = 0
    -ncolumn = 0
    -result = 0
    -
    - -

    -But if the EMPTY_RESULT_CALLBACKS pragma is ON, then the following -is returned: -

    - -
    -nrow = 0
    -ncolumn = 3
    -result[0] = "employee_name"
    -result[1] = "login"
    -result[2] = "host"
    -
    - -

    Memory to hold the information returned by sqlite_get_table -is obtained from malloc(). But the calling function should not try -to free this information directly. Instead, pass the complete table -to sqlite_free_table when the table is no longer needed. -It is safe to call sqlite_free_table with a NULL pointer such -as would be returned if the result set is empty.

    - -

    The sqlite_get_table routine returns the same integer -result code as sqlite_exec.

    - -

    3.4 Interrupting an SQLite operation

    - -

    The sqlite_interrupt function can be called from a -different thread or from a signal handler to cause the current database -operation to exit at its first opportunity. When this happens, -the sqlite_exec routine (or the equivalent) that started -the database operation will return SQLITE_INTERRUPT.

    - -

    3.5 Testing for a complete SQL statement

    - -

    The next interface routine to SQLite is a convenience function used -to test whether or not a string forms a complete SQL statement. -If the sqlite_complete function returns true when its input -is a string, then the argument forms a complete SQL statement. -There are no guarantees that the syntax of that statement is correct, -but we at least know the statement is complete. If sqlite_complete -returns false, then more text is required to complete the SQL statement.

    - -

    For the purpose of the sqlite_complete function, an SQL -statement is complete if it ends in a semicolon.

    - -

    The sqlite command-line utility uses the sqlite_complete -function to know when it needs to call sqlite_exec. After each -line of input is received, sqlite calls sqlite_complete -on all input in its buffer. If sqlite_complete returns true, -then sqlite_exec is called and the input buffer is reset. If -sqlite_complete returns false, then the prompt is changed to -the continuation prompt and another line of text is read and added to -the input buffer.

    - -

    3.6 Library version string

    - -

    The SQLite library exports the string constant named -sqlite_version which contains the version number of the -library. The header file contains a macro SQLITE_VERSION -with the same information. If desired, a program can compare -the SQLITE_VERSION macro against the sqlite_version -string constant to verify that the version number of the -header file and the library match.

    - -

    3.7 Library character encoding

    - -

    By default, SQLite assumes that all data uses a fixed-size -8-bit character (iso8859). But if you give the --enable-utf8 option -to the configure script, then the library assumes UTF-8 variable -sized characters. This makes a difference for the LIKE and GLOB -operators and the LENGTH() and SUBSTR() functions. The static -string sqlite_encoding will be set to either "UTF-8" or -"iso8859" to indicate how the library was compiled. In addition, -the sqlite.h header file will define one of the -macros SQLITE_UTF8 or SQLITE_ISO8859, as appropriate.

    - -

    Note that the character encoding mechanism used by SQLite cannot -be changed at run-time. This is a compile-time option only. The -sqlite_encoding character string just tells you how the library -was compiled.

    - -

    3.8 Changing the library's response to locked files

    - -

    The sqlite_busy_handler procedure can be used to register -a busy callback with an open SQLite database. The busy callback will -be invoked whenever SQLite tries to access a database that is locked. -The callback will typically do some other useful work, or perhaps sleep, -in order to give the lock a chance to clear. If the callback returns -non-zero, then SQLite tries again to access the database and the cycle -repeats. If the callback returns zero, then SQLite aborts the current -operation and returns SQLITE_BUSY.

    - -

    The arguments to sqlite_busy_handler are the opaque -structure returned from sqlite_open, a pointer to the busy -callback function, and a generic pointer that will be passed as -the first argument to the busy callback. When SQLite invokes the -busy callback, it sends it three arguments: the generic pointer -that was passed in as the third argument to sqlite_busy_handler, -the name of the database table or index that the library is trying -to access, and the number of times that the library has attempted to -access the database table or index.

    - -

    For the common case where we want the busy callback to sleep, -the SQLite library provides a convenience routine sqlite_busy_timeout. -The first argument to sqlite_busy_timeout is a pointer to -an open SQLite database and the second argument is a number of milliseconds. -After sqlite_busy_timeout has been executed, the SQLite library -will wait for the lock to clear for at least the number of milliseconds -specified before it returns SQLITE_BUSY. Specifying zero milliseconds for -the timeout restores the default behavior.

    - -

    3.9 Using the _printf() wrapper functions

    - -

    The four utility functions

    - -

    -

      -
    • sqlite_exec_printf()
    • -
    • sqlite_exec_vprintf()
    • -
    • sqlite_get_table_printf()
    • -
    • sqlite_get_table_vprintf()
    • -
    -

    - -

    implement the same query functionality as sqlite_exec -and sqlite_get_table. But instead of taking a complete -SQL statement as their second argument, the four _printf -routines take a printf-style format string. The SQL statement to -be executed is generated from this format string and from whatever -additional arguments are attached to the end of the function call.

    - -

    There are two advantages to using the SQLite printf -functions instead of sprintf. First of all, with the -SQLite printf routines, there is never a danger of overflowing a -static buffer as there is with sprintf. The SQLite -printf routines automatically allocate (and later frees) -as much memory as is -necessary to hold the SQL statements generated.

    - -

    The second advantage the SQLite printf routines have over -sprintf are two new formatting options specifically designed -to support string literals in SQL. Within the format string, -the %q formatting option works very much like %s in that it -reads a null-terminated string from the argument list and inserts -it into the result. But %q translates the inserted string by -making two copies of every single-quote (') character in the -substituted string. This has the effect of escaping the end-of-string -meaning of single-quote within a string literal. The %Q formatting -option works similar; it translates the single-quotes like %q and -additionally encloses the resulting string in single-quotes. -If the argument for the %Q formatting options is a NULL pointer, -the resulting string is NULL without single quotes. -

    - -

    Consider an example. Suppose you are trying to insert a string -value into a database table where the string value was obtained from -user input. Suppose the string to be inserted is stored in a variable -named zString. The code to do the insertion might look like this:

    - -
    -sqlite_exec_printf(db,
    -  "INSERT INTO table1 VALUES('%s')",
    -  0, 0, 0, zString);
    -
    - -

    If the zString variable holds text like "Hello", then this statement -will work just fine. But suppose the user enters a string like -"Hi y'all!". The SQL statement generated reads as follows: - -

    -INSERT INTO table1 VALUES('Hi y'all')
    -
    - -

    This is not valid SQL because of the apostrophy in the word "y'all". -But if the %q formatting option is used instead of %s, like this:

    - -
    -sqlite_exec_printf(db,
    -  "INSERT INTO table1 VALUES('%q')",
    -  0, 0, 0, zString);
    -
    - -

    Then the generated SQL will look like the following:

    - -
    -INSERT INTO table1 VALUES('Hi y''all')
    -
    - -

    Here the apostrophy has been escaped and the SQL statement is well-formed. -When generating SQL on-the-fly from data that might contain a -single-quote character ('), it is always a good idea to use the -SQLite printf routines and the %q formatting option instead of sprintf. -

    - -

    If the %Q formatting option is used instead of %q, like this:

    - -
    -sqlite_exec_printf(db,
    -  "INSERT INTO table1 VALUES(%Q)",
    -  0, 0, 0, zString);
    -
    - -

    Then the generated SQL will look like the following:

    - -
    -INSERT INTO table1 VALUES('Hi y''all')
    -
    - -

    If the value of the zString variable is NULL, the generated SQL -will look like the following:

    - -
    -INSERT INTO table1 VALUES(NULL)
    -
    - -

    All of the _printf() routines above are built around the following -two functions:

    - -
    -char *sqlite_mprintf(const char *zFormat, ...);
    -char *sqlite_vmprintf(const char *zFormat, va_list);
    -
    - -

    The sqlite_mprintf() routine works like the the standard library -sprintf() except that it writes its results into memory obtained -from malloc() and returns a pointer to the malloced buffer. -sqlite_mprintf() also understands the %q and %Q extensions described -above. The sqlite_vmprintf() is a varargs version of the same -routine. The string pointer that these routines return should be freed -by passing it to sqlite_freemem(). -

    - -

    3.10 Performing background jobs during large queries

    - -

    The sqlite_progress_handler() routine can be used to register a -callback routine with an SQLite database to be invoked periodically during long -running calls to sqlite_exec(), sqlite_step() and the various -wrapper functions. -

    - -

    The callback is invoked every N virtual machine operations, where N is -supplied as the second argument to sqlite_progress_handler(). The third -and fourth arguments to sqlite_progress_handler() are a pointer to the -routine to be invoked and a void pointer to be passed as the first argument to -it. -

    - -

    The time taken to execute each virtual machine operation can vary based on -many factors. A typical value for a 1 GHz PC is between half and three million -per second but may be much higher or lower, depending on the query. As such it -is difficult to schedule background operations based on virtual machine -operations. Instead, it is recommended that a callback be scheduled relatively -frequently (say every 1000 instructions) and external timer routines used to -determine whether or not background jobs need to be run. -

    - - -

    4.0 Adding New SQL Functions

    - -

    Beginning with version 2.4.0, SQLite allows the SQL language to be -extended with new functions implemented as C code. The following interface -is used: -

    - -
    -typedef struct sqlite_func sqlite_func;
    -
    -int sqlite_create_function(
    -  sqlite *db,
    -  const char *zName,
    -  int nArg,
    -  void (*xFunc)(sqlite_func*,int,const char**),
    -  void *pUserData
    -);
    -int sqlite_create_aggregate(
    -  sqlite *db,
    -  const char *zName,
    -  int nArg,
    -  void (*xStep)(sqlite_func*,int,const char**),
    -  void (*xFinalize)(sqlite_func*),
    -  void *pUserData
    -);
    -
    -char *sqlite_set_result_string(sqlite_func*,const char*,int);
    -void sqlite_set_result_int(sqlite_func*,int);
    -void sqlite_set_result_double(sqlite_func*,double);
    -void sqlite_set_result_error(sqlite_func*,const char*,int);
    -
    -void *sqlite_user_data(sqlite_func*);
    -void *sqlite_aggregate_context(sqlite_func*, int nBytes);
    -int sqlite_aggregate_count(sqlite_func*);
    -
    - -

    -The sqlite_create_function() interface is used to create -regular functions and sqlite_create_aggregate() is used to -create new aggregate functions. In both cases, the db -parameter is an open SQLite database on which the functions should -be registered, zName is the name of the new function, -nArg is the number of arguments, and pUserData is -a pointer which is passed through unchanged to the C implementation -of the function. Both routines return 0 on success and non-zero -if there are any errors. -

    - -

    -The length of a function name may not exceed 255 characters. -Any attempt to create a function whose name exceeds 255 characters -in length will result in an error. -

    - -

    -For regular functions, the xFunc callback is invoked once -for each function call. The implementation of xFunc should call -one of the sqlite_set_result_... interfaces to return its -result. The sqlite_user_data() routine can be used to -retrieve the pUserData pointer that was passed in when the -function was registered. -

    - -

    -For aggregate functions, the xStep callback is invoked once -for each row in the result and then xFinalize is invoked at the -end to compute a final answer. The xStep routine can use the -sqlite_aggregate_context() interface to allocate memory that -will be unique to that particular instance of the SQL function. -This memory will be automatically deleted after xFinalize is called. -The sqlite_aggregate_count() routine can be used to find out -how many rows of data were passed to the aggregate. The xFinalize -callback should invoke one of the sqlite_set_result_... -interfaces to set the final result of the aggregate. -

    - -

    -SQLite now implements all of its built-in functions using this -interface. For additional information and examples on how to create -new SQL functions, review the SQLite source code in the file -func.c. -

    - -

    5.0 Multi-Threading And SQLite

    - -

    -If SQLite is compiled with the THREADSAFE preprocessor macro set to 1, -then it is safe to use SQLite from two or more threads of the same process -at the same time. But each thread should have its own sqlite* -pointer returned from sqlite_open. It is never safe for two -or more threads to access the same sqlite* pointer at the same time. -

    - -

    -In precompiled SQLite libraries available on the website, the Unix -versions are compiled with THREADSAFE turned off but the windows -versions are compiled with THREADSAFE turned on. If you need something -different that this you will have to recompile. -

    - -

    -Under Unix, an sqlite* pointer should not be carried across a -fork() system call into the child process. The child process -should open its own copy of the database after the fork(). -

    - -

    6.0 Usage Examples

    - -

    For examples of how the SQLite C/C++ interface can be used, -refer to the source code for the sqlite program in the -file src/shell.c of the source tree. -Additional information about sqlite is available at -sqlite.html. -See also the sources to the Tcl interface for SQLite in -the source file src/tclsqlite.c.

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/cintro.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/cintro.html --- sqlite3-3.4.2/www/cintro.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/cintro.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,438 @@ + + +An Introduction To The SQLite C/C++ Interface + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    An Introduction To The SQLite C/C++ Interface

    + +

    + This article provides an overview and roadmap to the C/C++ interface + to SQLite. +

    + +

    + Early versions of SQLite were very easy to learn since they only + supported 5 C/C++ interfaces. But as SQLite has grown in capability + new C/C++ interfaces have been added so that now there + are over 150 distinct APIs. This can be overwhelming to a new programmer. + Fortunately, most of the C/C++ interfaces in SQLite are very specialized + and never need to be used. Despite having so many + entry points, the core API is still relatively simple and easy to code to. + This article aims to provide all of the background information needed to + easily understand how SQLite works. +

    + +

    + A separate document, The SQLite C/C++ Interface, + provides detailed + specifications for all of the various C/C++ APIs for SQLite. Once + the reader + understands the basic principles of operation for SQLite, + that document should be used as a reference + guide. This article is intended as introduction only and is neither a + complete or authoritative reference for the SQLite API. +

    + +

    1.0 Core Objects And Interfaces

    + +

    + The principal task of an SQL database engine is to evaluate statements + of SQL. In order to accomplish this purpose, the developer needs + to know about two objects: +

    + +

    + +

    + Strictly speaking, the prepared statement object is not required since + the convenience wrapper interfaces, sqlite3_exec or + sqlite3_get_table, can be used and these convenience wrappers + encapsulate and hide the prepared statement object. + Nevertheless, an understanding of + prepared statements is needed to make full use of SQLite. +

    + +

    + The database connection and prepared statement objects are controlled + by a small set of C/C++ interface routine listed below. +

    + +

    + +

    + The six C/C++ interface routines and two objects listed above form the core + functionality of SQLite. The developer who understands them + will have a good foundation for using SQLite. +

    + +

    + Note that the list of routines is conceptual rather than actual. + Many of these routines come in multiple versions. + For example, the list above shows a single routine + named sqlite3_open() when in fact there are three separate routines + that accomplish the same thing in slightly different ways: + sqlite3_open(), sqlite3_open16() and sqlite3_open_v2(). + The list mentions sqlite3_column() + when in fact no such routine exists. + The "sqlite3_column()" shown in the list is place holders for + an entire families of routines to be used for extracting column + data in various datatypes. +

    + +

    + Here is a summary of what the core interfaces do: +

    + + + + + + + + + + + + + + + + + + + + + + +
    sqlite3_open() + This routine + opens a connection to an SQLite database file and returns a + database connection object. This is often the first SQLite API + call that an application makes and is a prerequisite for most other + SQLite APIs. Many SQLite interfaces require a pointer to + the database connection object as their first parameter and can + be thought of as methods on the database connection object. + This routine is the constructor for the database connection object. +
    sqlite3_prepare() + This routine + converts SQL text into a prepared statement object and returns a pointer + to that object. This interface requires a database connection pointer + created by a prior call to sqlite3_open() and a text string containing + the SQL statement to be prepared. This API does not actually evaluate + the SQL statement. It merely prepares the SQL statement for evaluation. + +

    Note that the use of sqlite3_prepare() is not recommended for new + applications. The alternative routine sqlite3_prepare_v2() should + be used instead.

    +
    sqlite3_step() + This routine is used to evaluate a prepared statement that has been + previously created by the sqlite3_prepare() interface. The statement + is evaluated up to the point where the first row of results are available. + To advance to the second row of results, invoke sqlite3_step() again. + Continue invoking sqlite3_step() until the statement is complete. + Statements that do not return results (ex: INSERT, UPDATE, or DELETE + statements) run to completion on a single call to sqlite3_step(). +
    sqlite3_column() + This routine returns a single column from the current row of a result + set for a prepared statement that is being evaluated by sqlite3_step(). + Each time sqlite3_step() stops with a new result set row, this routine + can be called multiple times to find the values of all columns in that row. + As noted above, there really is no such thing as a "sqlite3_column()" + function in the SQLite API. Instead, what we here call "sqlite3_column()" + is really a place-holder for an entire family of functions that return + a value from the result set in various data types. There are also routines + in this family that return the size of the result (if it is a string or + BLOB) and the number of columns in the result set. + +

    +
    sqlite3_finalize() + This routine destroys a prepared statement created by a prior call + to sqlite3_prepare(). Every prepared statement must be destroyed using + a call to this routine in order to avoid memory leaks. +
    sqlite3_close() + This routine closes a database connection previously opened by a call + to sqlite3_open(). All prepared statements associated with the + connection should be finalized prior to closing the + connection. +
    + +

    1.1 Typical Usage Of Core Routines And Objects

    + +

    + An application that wants to use SQLite will typically use + sqlite3_open() to create a single database connection + during initialization. + Note that sqlite3_open() can be used to either open existing database + files or to create and open new database files. + While many applications use only a single database connection, there is + no reason why an application cannot call sqlite3_open() multiple times + in order to open multiple database connections - either to the same + database or to different databases. Sometimes a multi-threaded application + will create separate database connections for each threads. + Note too that is not necessary to open separate database connections in + order to access two or more databases. A single database connection + can be made to access two or more databases at one time using the + ATTACH SQL command. +

    + +

    + Many applications destroy their database connections using calls to + sqlite3_close() at shutdown. Or, for example, an application might + open database connections in response to a File->Open menu action + and then destroy the corresponding database connection in response + to the File->Close menu. +

    + +

    + To run an SQL statement, the application follows these steps: +

    + +

      +
    1. Create a prepared statement using sqlite3_prepare().
    2. +
    3. Evaluate the prepared statement by calling sqlite3_step() one + or more times.
    4. +
    5. For queries, extract results by calling + sqlite3_column() in between + two calls to sqlite3_step().
    6. +
    7. Destroy the prepared statement using sqlite3_finalize().
    8. +

    + +

    + The foregoing is all one really needs to know in order to use SQLite + effectively. All the rest is just ornimentation and detail. +

    + +

    2.0 Convenience Wrappers Around Core Routines

    + +

    + The sqlite3_exec() interface is a convenience wrapper that carries out + all four of the above steps with a single function call. A callback + function passed into sqlite3_exec() is used to process each row of + the result set. The sqlite3_get_table() is another convenience wrapper + that does all four of the above steps. The sqlite3_get_table() interface + differs from sqlite3_exec() in that it stores the results of queries + in heap memory rather than invoking a callback. +

    + +

    + It is important to realize that neither sqlite3_exec() nor + sqlite3_get_table() do anything that cannot be accomplished using + the core routines. In fact, these wrappers are implemented purely in + terms of the core routines. +

    + + +

    3.0 Binding Parameters and Reusing Prepared Statements

    + +

    + In prior discussion, it was assumed that each SQL statement is prepared + onces, evaluated, then destroyed. However, the SQLite allows the same + prepared statement to evaluated multiple times. These is accomplished + using the following routines: +

    + +

    + +

    + After a prepared statement has been evaluated by one or more calls to + sqlite3_step(), it can be reset in order to be evaluted again by a + call to sqlite3_reset(). + Using sqlite3_reset() on an existing prepared statement rather + creating a new prepared statement avoids unnecessary calls to + sqlite3_prepare(). + In many SQL statements, the time needed + to run sqlite3_prepare() equals or exceeds the time needed by + sqlite3_step(). So avoiding calls to sqlite3_prepare() can result + in a significant performance improvement. +

    + +

    + Usually, though, it is not useful to evaluate exactly the same SQL + statement more than once. More often, one wants to evalute similar + statements. For example, you might want to evaluate an INSERT statement + multiple times though with different values to insert. To accommodate + this kind of flexibility, SQLite allows SQL statements to contain + parameters + which are "bound" to values prior to being evaluated. These values can + later be changed and the same prepared statement can be evaluated + a second time using the new values. +

    + +

    + In SQLite, whereever it is valid to include a string literal, one can use + a parameter in one of the following forms: +

    + +

      +
    • ?
    • +
    • ?NNN
    • +
    • :AAA
    • +
    • $AAA
    • +
    • @AAA
    • +

    + +

    + In the examples above, NNN is an integer value and + AAA is an identifier. + A parameter initially has a value of NULL. + Prior to calling sqlite3_step() for the first time or immediately + after sqlite3_reset(), the application can invoke one of the + sqlite3_bind() interfaces to attach values + to the parameters. Each call to sqlite3_bind() + overrides prior bindings on the same parameter. +

    + +

    + An application is allows to prepare multiple SQL statements in advance + and evaluate them as needed. + There is no arbitrary limit to the number of outstanding + prepared statements. +

    + +

    4.0 Extending SQLite

    + +

    + SQLite includes interface that can be used to extend its functionality. + Such routines include: +

    + +

    + +

    + The sqlite3_create_collation() interface is used to create new + collating sequences for sorting text. + The sqlite3_create_module() interface is used register new + virtual table implementations. +

    + +

    + The sqlite3_create_function() interface creates new SQL functions - + either scalar or aggregate. The new function implementation typically + makes use of the following additional interfaces: +

    + +

    + +

    + All of the built-in SQL functions of SQLite are created using exactly + these same interfaces. Refer to the SQLite source code, and in particular + the date.c and func.c source files for examples. +

    + +

    5.0 Other Interfaces

    + +

    + This article only mentions the foundational SQLite interfaces. + The SQLite library includes many other APIs implementing useful + features that are not described here. + A complete list of functions that form the SQLite + application program interface is found at the + C/C++ Interface Specification. + Refer to that document for complete and authoritative information about + all SQLite interfaces. +

    +
    +This page last modified 2009/03/19 00:04:36 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/common.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/common.tcl --- sqlite3-3.4.2/www/common.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/common.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -# This file contains TCL procedures used to generate standard parts of -# web pages. -# - -proc header {txt} { - puts "$txt" - puts {
    } - puts \ -{ - - - - - - - - -
    - - - - - - -
    - - -
    } - puts
    -} - -proc footer {{rcsid {}}} { - puts { - - -
    } - set date [lrange $rcsid 3 4] - if {$date!=""} { - puts "This page last modified on $date" - } - puts {} -} - - -# The following proc is used to ensure consistent formatting in the -# HTML generated by lang.tcl and pragma.tcl. -# -proc Syntax {args} { - puts {} - foreach {rule body} $args { - puts "" - regsub -all < $body {%LT} body - regsub -all > $body {%GT} body - regsub -all %LT $body {} body - regsub -all %GT $body {} body - regsub -all {[]|[*?]} $body {&} body - regsub -all "\n" [string trim $body] "
    \n" body - regsub -all "\n *" $body "\n\\ \\ \\ \\ " body - regsub -all {[|,.*()]} $body {&} body - regsub -all { = } $body { = } body - regsub -all {STAR} $body {*} body - ## These metacharacters must be handled to undo being - ## treated as SQL punctuation characters above. - regsub -all {RPPLUS} $body {
    )+} body - regsub -all {LP} $body {(} body - regsub -all {RP} $body {)} body - ## Place the left-hand side of the rule in the 2nd table column. - puts "" - } - puts {
    " - puts "$rule ::=$body
    } -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/compile.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/compile.html --- sqlite3-3.4.2/www/compile.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/compile.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,890 @@ + + +Compilation Options For SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    1.0 Compilation Options For SQLite

    + +

    +For most purposes, SQLite can be built just fine using the default +compilation options. However, if required, the compile-time options +documented below can be used to +omit SQLite features (resulting in +a smaller compiled library size) or to change the +default values of some parameters. +

    + +

    +Every effort has been made to ensure that the various combinations +of compilation options work harmoniously and produce a working library. +Nevertheless, it is strongly recommended that the SQLite test-suite +be executed to check for errors before using an SQLite library built +with non-standard compilation options. +

    + +

    1.1 Options To Set Default Parameter Values

    + + +

    SQLITE_DEFAULT_AUTOVACUUM=<1 or 0>

    + This macro determines if SQLite creates databases with the + auto_vacuum flag set by default. The default value is 0 + (do not create auto-vacuum databases). In any case the + compile-time default may be overridden by the + PRAGMA auto_vacuum command. +

    +

    SQLITE_DEFAULT_CACHE_SIZE=<pages>

    + This macro sets the default size of the page-cache for each attached + database, in pages. This can be overridden by the + PRAGMA cache_size command. The default value is 2000. +

    +

    SQLITE_DEFAULT_FILE_FORMAT=<1 or 4>

    + The default schema-level file format used by SQLite when creating + new database files is set by this macro. The file formats are all + very similar. The difference between formats 1 and 4 is that format + 4 understands descending indices and has a tighter encoding for + boolean values.

    + +

    SQLite (as of version 3.6.0) can read and write any file format + between 1 and 4. But older versions of SQLite might not be able to + read formats greater than 1. So that older versions of SQLite will + be able to read and write database files created by newer versions + of SQLite, the default file format is set to 1 for maximum + compatibility.

    + +

    The file format for a new database can be set at runtime using + the PRAGMA legacy_file_format command. +

    +

    SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT=<bytes>

    + This option sets the size limit on rollback journal files in + persistent journal mode. When this + compile-time option is omitted there is no upper bound on the + size of the rollback journal file. The journal file size limit + can be changed at run-time using the journal_size_limit pragma. +

    +

    SQLITE_DEFAULT_MEMSTATUS=<1 or 0>

    + This macro is used to determine whether or not the features enabled and + disabled using the SQLITE_CONFIG_MEMSTATUS argument to sqlite3_config() + are available by default. The default value is 1 (SQLITE_CONFIG_MEMSTATUS + related features enabled). +

    +

    SQLITE_DEFAULT_PAGE_SIZE=<bytes>

    + This macro is used to set the default page-size used when a + database is created. The value assigned must be a power of 2. The + default value is 1024. The compile-time default may be overridden at + runtime by the PRAGMA page_size command. +

    +

    SQLITE_DEFAULT_TEMP_CACHE_SIZE=<pages>

    + This macro sets the default size of the page-cache for temporary files + created by SQLite to store intermediate results, in pages. It does + not affect the page-cache for the temp database, where tables created + using CREATE TEMP TABLE are stored. The default value + is 500. +

    +

    YYSTACKDEPTH=<max_depth>

    + This macro sets the maximum depth of the LALR(1) stack used by + the SQL parser within SQLite. The default value is 100. A typical + application will use less than about 20 levels of the stack. + Developers whose applications contain SQL statements that + need more than 100 LALR(1) stack entries should seriously + consider refactoring their SQL as it is likely to be well beyond + the ability of any human to comprehend. +

    + +

    1.2 Options To Set Size Limits

    + +

    There are compile-time options that will set upper bounds +on the sizes of various structures in SQLite. The compile-time +options normally set a hard upper bound that can be changed +at run-time on individual database connections using the +sqlite3_limit() interface.

    + +

    The compile-time options for setting upper bounds are +documented separately. The following is a list of +the available settings:

    + + + + +

    1.3 Options To Control Operating Characteristics

    + + +

    SQLITE_HAVE_ISNAN

    + If this option is present, then SQLite will use the isnan() function from + the system math library. Without this option (the default behavior) + SQLite uses its own internal implementation of isnan(). SQLite uses + its own internal isnan() implementation by default because of past + problems with system isnan() functions. +

    +

    SQLITE_OS_OTHER=<0 or 1>

    + The option causes SQLite to omit its built-in operating system interfaces + for Unix, Windows, and OS/2. The resulting library will have no default + operating system interface. Applications must use + sqlite3_vfs_register() to register an appropriate interface before + using SQLite. Applications must also supply implementations for the + sqlite3_os_init() and sqlite3_os_end() interfaces. The usual practice + is for the supplied sqlite3_os_init() to invoke sqlite3_vfs_register(). + SQLite will automatically invoke sqlite3_os_init() when it initializes.

    + +

    This option is typically used when building SQLite for an embedded + platform with a custom operating system. +

    +

    SQLITE_SECURE_DELETE

    + This compile-time option causes SQLite to overwrite deleted + information with zeros in addition to marking the space + as available for reuse. Without this option, deleted data + might be recoverable from a database using a binary editor. + However, there is a performance penalty for using this option.

    + +

    This option does not cause deleted data is securely + removed from the underlying storage media. +

    +

    SQLITE_THREADSAFE=<0 or 1 or 2>

    + This option controls whether or not code is included in SQLite to + enable it to operate safely in a multithreaded environment. The + default is SQLITE_THREADSAFE=1 which is safe for use in a multithreaded + environment. When compiled with SQLITE_THREADSAFE=0 all mutexing code + is omitted and it is unsafe to use SQLite in a multithreaded program. + When compiled with SQLITE_THREADSAFE=2, SQLite can be used in a multithreaded + program so long as no two threads attempt to use the same + database connection at the same time.

    + +

    To put it another way, SQLITE_THREADSAFE=1 sets the default + threading mode to Serialized. SQLITE_THREADSAFE=2 sets the default + threading mode to Multi-threaded. And SQLITE_THREADSAFE=0 sets the + threading mode to Single-threaded.

    + +

    The value of SQLITE_THREADSAFE can be determined at run-time + using the sqlite3_threadsafe() interface.

    + +

    When SQLite has been compiled with SQLITE_THREADSAFE=1 or + SQLITE_THREADSAFE=2 then the threading mode + can be altered at run-time using the sqlite3_config() interface together + with one of these verbs:

    + +

    + +

    The SQLITE_OPEN_NOMUTEX and + SQLITE_OPEN_FULLMUTEX flags to sqlite3_open_v2() can also be used + to adjust the threading mode of individual database connections + at run-time.

    + +

    Note that when SQLite is compiled with SQLITE_THREADSAFE=0, the code + to make SQLite threadsafe is omitted from the build. When this occurs, + it is impossible to change the threading mode at start-time or run-time.

    + +

    See the threading mode documentation for additional information + on aspects of using SQLite in a multithreaded environment. +

    +

    SQLITE_TEMP_STORE=<0 through 3>

    + This option controls whether temporary files are stored on disk or + in memory. The meanings for various settings of this compile-time + option are as follows:

    + +

    + + + + + +
    SQLITE_TEMP_STOREMeaning
    0Always use temporary files
    1Use files by default but allow the + PRAGMA temp_store command to override
    2Use memory by default but allow the + PRAGMA temp_store command to override
    3Always use memory

    + +

    The default setting is 1. + Additional information can be found in tempfiles.html. +

    + + +

    1.4 Options To Enable Features Normally Turned Off

    + + +

    SQLITE_ENABLE_ATOMIC_WRITE

    + If this C-preprocessor macro is defined and if the + xDeviceCharacteristics method of sqlite3_io_methods object for + a database file reports (via one of the SQLITE_IOCAP_ATOMIC bits) + that the filesystem supports atomic writes and if a transaction + involves a change to only a single page of the database file, + then the transaction commits with just a single write request of + a single page of the database and no rollback journal is created + or written. On filesystems that support atomic writes, this + optimization can result in significant speed improvements for + small updates. However, few filesystems support this capability + and the code paths that check for this capability slow down write + performance on systems that lack atomic write capability, so this + feature is disabled by default. +

    +

    SQLITE_ENABLE_COLUMN_METADATA

    + When this C-preprocessor macro is defined, SQLite includes some + additional APIs that provide convenient access to meta-data about + tables and queries. The APIs that are enabled by this option are:

    + +

    +

    +

    SQLITE_ENABLE_FTS3

    + When this option is defined in the amalgamation, version 3 + of the full-text search engine is added to the build automatically. +

    +

    SQLITE_ENABLE_FTS3_PARENTHESIS

    + This option modifies the query pattern parser in FTS3 such that it + supports operators AND and NOT (in addition to the usual OR and NEAR) + and also allows query expressions to contain nested parenthesesis. +

    +

    SQLITE_ENABLE_ICU

    + This option causes the + International Components for Unicode + or "ICU" extension to SQLite to be added to the build. +

    +

    SQLITE_ENABLE_IOTRACE

    + When both the SQLite core and the Command Line Interface (CLI) are both + compiled with this option, then the CLI provides an extra command + named ".iotrace" that provides a low-level log of I/O activity. + This option is experimental and may be discontinued in a future release. +

    +

    SQLITE_ENABLE_LOCKING_STYLE

    + This option enables additional logic in the OS interface layer for + Mac OS X. The additional logic attempts to determine the type of the + underlying filesystem and choose and alternative locking strategy + that works correctly for that filesystem type. Five locking strategies + are available:

    + +

      +
    • POSIX locking style. This is the default locking style and the + style used by other (non Mac OS X) Unixes. Locks are obtained and + released using the fcntl() system call.

      + +

    • AFP locking style. This locking style is used for network file + systems that use the AFP (Apple Filing Protocol) protocol. Locks + are obtained by calling the library function _AFPFSSetLock().

      + +

    • Flock locking style. This is used for file-systems that do not + support POSIX locking style. Locks are obtained and released using + the flock() system call.

      + +

    • Dot-file locking style. This locking style is used when neither + flock nor POSIX locking styles are supported by the file system. + Database locks are obtained by creating and entry in the file-system + at a well-known location relative to the database file (a "dot-file") + and relinquished by deleting the same file.

      + +

    • No locking style. If none of the above can be supported, this + locking style is used. No database locking mechanism is used. When + this system is used it is not safe for a single database to be + accessed by multiple clients. +

    + +

    Additionally, five extra VFS implementations are provided as well as the + default. By specifying one of the extra VFS implementations + when calling sqlite3_open_v2(), an application may bypass the file-system + detection logic and explicitly select one of the above locking styles. The + five extra VFS implementations are called "unix-posix", "unix-afp", + "unix-flock", "unix-dotfile" and "unix-none". +

    +

    SQLITE_ENABLE_MEMORY_MANAGEMENT

    + This option adds extra logic to SQLite that allows it to release unused + memory upon request. This option must be enabled in order for the + sqlite3_release_memory() interface to work. If this compile-time + option is not used, the sqlite3_release_memory() interface is a + no-op. Since sqlite3_soft_heap_limit() depends on + sqlite3_release_memory(), this option is also necessary for + the correct operation of sqlite3_soft_heap_limit(). +

    +

    SQLITE_ENABLE_MEMSYS3

    + This option includes code in SQLite that implements an alternative + memory allocator. This alternative memory allocator is only engaged + when the SQLITE_CONFIG_HEAP option to sqlite3_config() is used to + supply a large chunk of memory from which all memory allocations are + taken. + The MEMSYS3 memory allocator uses a hybrid allocation algorithm + patterned after dlmalloc(). Only one of SQLITE_ENABLE_MEMSYS3 and + SQLITE_ENABLE_MEMSYS5 may be enabled at once. +

    +

    SQLITE_ENABLE_MEMSYS5

    + This option includes code in SQLite that implements an alternative + memory allocator. This alternative memory allocator is only engaged + when the SQLITE_CONFIG_HEAP option to sqlite3_config() is used to + supply a large chunk of memory from which all memory allocations are + taken. + The MEMSYS5 module rounds all allocations up to the next power + of two and uses a first-fit, buddy-allocator algorithm + that provides strong guarantees against fragmentation and breakdown + subject to certain operating constraints. +

    +

    SQLITE_ENABLE_RTREE

    + This option causes SQLite to include support for the + R*Tree index extension. +

    +

    SQLITE_ENABLE_UPDATE_DELETE_LIMIT

    + This option enables an optional ORDER BY and LIMIT clause on + UPDATE and DELETE statements.

    + +

    If this option is defined, then it must also be + defined when using the 'lemon' tool to generate a parse.c + file. Because of this, this option may only be used when the library is built + from source, not from the amalgamation or from the collection of + pre-packaged C files provided for non-Unix like platforms on the website. +

    +

    +

    SQLITE_ENABLE_UNLOCK_NOTIFY

    + This option enables the sqlite3_unlock_notify() interface and + its associated functionality. See the documentation titled + Using the SQLite Unlock Notification Feature for additional + information. +

    +

    YYTRACKMAXSTACKDEPTH

    + This option causes the LALR(1) parser stack depth to be tracked + and reported using the sqlite3_status(SQLITE_STATUS_PARSER_STACK,...) + interface. SQLite's LALR(1) parser has a fixed stack depth + (determined at compile-time using the YYSTACKDEPTH options). + This option can be used to help determine if an application is + getting close to exceeding the maximum LALR(1) stack depth. +

    + + +

    1.5 Options To Disable Features Normally Turned On

    + + +

    SQLITE_DISABLE_LFS

    + If this C-preprocessor macro is defined, large file support + is disabled. +

    +

    SQLITE_DISABLE_DIRSYNC

    + If this C-preprocessor macro is defined, directory syncs + are disabled. SQLite typically attempts to sync the parent + directory when a file is deleted to ensure the directory + entries are updated immediately on disk. +

    + + + +

    1.6 Options To Omit Features

    + +

    The following options can used to reduce the size of the compiled +library by omitting optional features. This is probably only useful +in embedded systems where space is especially tight, as even with all +features included the SQLite library is relatively small. Don't forget +to tell your compiler to optimize for binary size! (the -Os option if +using GCC). Telling your compiler to optimize for size usually has +a much larger impact on library footprint than employing any of these +compile-time options. You should also verify that +debugging options are disabled.

    + +

    The macros in this section do not require values. The following +compilation switches all have the same effect:
    +-DSQLITE_OMIT_ALTERTABLE
    +-DSQLITE_OMIT_ALTERTABLE=1
    +-DSQLITE_OMIT_ALTERTABLE=0 +

    + +

    If any of these options are defined, then the same set of SQLITE_OMIT_* +options must also be defined when using the 'lemon' tool to generate the +parse.c file and when compiling the 'mkkeywordhash' tool which generates +the keywordhash.h file. +Because of this, these options may only be used when the library is built +from canonical source, not from the amalgamation or from the collection of +pre-packaged C files provided for non-Unix like platforms on the website. +Any SQLITE_OMIT_* options which can be used directly with the amalgamation +are listed below, however, the warnings in the following paragraph should be noted. +

    + +
    +Important Note: The SQLITE_OMIT_* options do not work with the +amalgamation or with pre-packaged C code files. SQLITE_OMIT_* compile-time +options only work correctly when SQLite is built from canonical source files. + +
    + + +

    Special versions of the SQLite amalgamation that do work with a +predetermined set of SQLITE_OMIT_* options can be generated. To do so, +make a copy of the Makefile.linux-gcc makefile template in the canonical +source code distribution. Change the name of your copy to simply "Makefile". +Then edit "Makefile" to set up appropriate compile-time options. Then +type: +

    make clean; make sqlite3.c
    +The resulting "sqlite3.c" amalgamation code file (and its associated +header file "sqlite3.h") can then be moved to a non-unix platform +for final compilation using a native compiler.

    + +

    +The following SQLITE_OMIT_* options are available: +

    + +

    + + + + +
    + + + +
    +

    + + +

    All of the SQLITE_OMIT_* options are unsupported.

    + +
    +Important Note: +The SQLITE_OMIT_* compile-time options are unsupported. +
    + +

    +The SQLITE_OMIT_* compile-time options are usually untested and +are almost certainly untested in combination. +Any or all of these options may be removed from the code in future releases +and without warning. For any particular release, some of these +options may cause compile-time or run-time failures, particularly +when used in combination with other options.

    + + +

    SQLITE_OMIT_ALTERTABLE

    + When this option is defined, the + ALTER TABLE command is not included in the + library. Executing an ALTER TABLE statement causes a parse error. +

    +

    SQLITE_OMIT_ANALYZE

    + When this option is defined, the ANALYZE command is omitted from + the build. +

    +

    SQLITE_OMIT_ATTACH

    + When this option is defined, the ATTACH and DETACH commands are + omitted from the build. +

    +

    SQLITE_OMIT_AUTHORIZATION

    + Defining this option omits the authorization callback feature from the + library. The sqlite3_set_authorizer() API function is not present + in the library. +

    +

    SQLITE_OMIT_AUTOINCREMENT

    + This option is used to omit the + AUTOINCREMENT functionality. When this + is macro is defined, columns declared as + "INTEGER PRIMARY KEY AUTOINCREMENT" + behave in the same way as columns declared as "INTEGER PRIMARY KEY" when a + NULL is inserted. The sqlite_sequence system table is neither created, nor + respected if it already exists. +

    +

    SQLITE_OMIT_AUTOINIT

    + For backwards compatibility with older versions of SQLite that lack + the sqlite3_initialize() interface, the sqlite3_initialize() interface + is called automatically upon entry to certain key interfaces such as + sqlite3_open(), sqlite3_vfs_register(), and sqlite3_mprintf(). + The overhead of invoking sqlite3_initialize() automatically in this + way may be omitted by building SQLite with the SQLITE_OMIT_AUTOINIT + C-preprocessor macro. When built using SQLITE_OMIT_AUTOINIT, SQLite + will not automatically initialize itself and the application is required + to invoke sqlite3_initialize() directly prior to beginning use of the + SQLite library. +

    +

    SQLITE_OMIT_AUTOVACUUM

    + If this option is defined, the library cannot create or write to + databases that support auto_vacuum. + Executing a PRAGMA auto_vacuum statement is not an error + (since unknown PRAGMAs are silently ignored), but does not return a value + or modify the auto-vacuum flag in the database file. If a database that + supports auto-vacuum is opened by a library compiled with this option, it + is automatically opened in read-only mode. +

    +

    SQLITE_OMIT_BETWEEN_OPTIMIZATION

    + This option disables the use of indices with WHERE clause terms + that employ the BETWEEN operator. +

    +

    SQLITE_OMIT_BLOB_LITERAL

    + When this option is defined, it is not possible to specify a blob in + an SQL statement using the X'ABCD' syntax. +

    +

    SQLITE_OMIT_BUILTIN_TEST

    + A standard SQLite build includes a small amount of logic controlled + by the sqlite3_test_control() interface that is used to exercise + parts of the SQLite core that are difficult to control and measure using + the standard API. This option omits that built-in test logic. +

    +

    SQLITE_OMIT_CAST

    + This option causes SQLite to omit support for the CAST operator. +

    +

    SQLITE_OMIT_CHECK

    + This option causes SQLite to omit support for CHECK constraints. + The parser will still accept CHECK constraints in SQL statements, + they will just not be enforced. +

    +

    SQLITE_OMIT_COMPLETE

    + This option causes the sqlite3_complete() and sqlite3_complete16() + interfaces to be omitted. +

    +

    SQLITE_OMIT_COMPOUND_SELECT

    + This option is used to omit the compound SELECT functionality. + SELECT statements that use the + UNION, UNION ALL, INTERSECT or EXCEPT compound SELECT operators will + cause a parse error. +

    +

    SQLITE_OMIT_CONFLICT_CLAUSE

    + In the future, this option will be used to omit the + ON CONFLICT clause from the library. +

    +

    SQLITE_OMIT_DATETIME_FUNCS

    + If this option is defined, SQLite's built-in date and time manipulation + functions are omitted. Specifically, the SQL functions julianday(), date(), + time(), datetime() and strftime() are not available. The default column + values CURRENT_TIME, CURRENT_DATE and CURRENT_DATETIME are still available. +

    +

    SQLITE_OMIT_DECLTYPE

    + This option causes SQLite to omit support for the + sqlite3_column_decltype() and sqlite3_column_decltype16() + interfaces. +

    +

    SQLITE_OMIT_DEPRECATED

    + This option causes SQLite to omit support for interfaces + marked as deprecated. This includes + sqlite3_aggregate_count(), + sqlite3_expired(), + sqlite3_transfer_bindings(), + sqlite3_global_recover(), + sqlite3_thread_cleanup() and + sqlite3_memory_alarm() interfaces. +

    +

    SQLITE_OMIT_DISKIO

    + This option omits all support for writing to the disk and forces + databases to exist in memory only. +

    +

    SQLITE_OMIT_EXPLAIN

    + Defining this option causes the EXPLAIN command to be omitted from the + library. Attempting to execute an EXPLAIN statement will cause a parse + error. +

    +

    SQLITE_OMIT_FLAG_PRAGMAS

    + This option omits support for a subset of PRAGMA commands that + query and set boolean properties. +

    +

    SQLITE_OMIT_FLOATING_POINT

    + This option is used to omit floating-point number support from the SQLite + library. When specified, specifying a floating point number as a literal + (i.e. "1.01") results in a parse error.

    + +

    In the future, this option may also disable other floating point + functionality, for example the sqlite3_result_double(), + sqlite3_bind_double(), sqlite3_value_double() and + sqlite3_column_double() API functions. +

    +

    +

    SQLITE_OMIT_FOREIGN_KEY

    + If this option is defined, FOREIGN KEY clauses in column declarations are + ignored. +

    +

    SQLITE_OMIT_GET_TABLE

    + This option causes support for sqlite3_get_table() and + sqlite3_free_table() to be omitted. +

    +

    SQLITE_OMIT_INCRBLOB

    + This option causes support for incremental BLOB I/O + to be omitted. +

    +

    SQLITE_OMIT_INTEGRITY_CHECK

    + This option omits support for the integrity_check pragma. +

    +

    SQLITE_OMIT_LIKE_OPTIMIZATION

    + This option disables the ability of SQLite to use indices to help + resolve LIKE and GLOB operators in a WHERE clause. +

    +

    SQLITE_OMIT_LOAD_EXTENSION

    + This option omits the entire extension loading mechanism from + SQLite, including sqlite3_enable_load_extension() and + sqlite3_load_extension() interfaces. +

    +

    SQLITE_OMIT_LOCALTIME

    + This option omits the "localtime" modifier from the date and time + functions. This option is sometimes useful when trying to compile + the date and time functions on a platform that does not support the + concept of local time. +

    +

    SQLITE_OMIT_MEMORYDB

    + When this is defined, the library does not respect the special database + name ":memory:" (normally used to create an in-memory database). If + ":memory:" is passed to sqlite3_open(), sqlite3_open16(), or + sqlite3_open_v2(), a file with this name will be + opened or created. +

    +

    SQLITE_OMIT_OR_OPTIMIZATION

    + This option disables the ability of SQLite to use an index together + with terms of a WHERE clause connected by the OR operator. +

    +

    SQLITE_OMIT_PAGER_PRAGMAS

    + Defining this option omits pragmas related to the pager subsystem from + the build. +

    +

    SQLITE_OMIT_PRAGMA

    + This option is used to omit the PRAGMA command + from the library. Note that it is useful to define the macros that omit + specific pragmas in addition to this, as they may also remove supporting code + in other sub-systems. This macro removes the PRAGMA command only. +

    +

    SQLITE_OMIT_PROGRESS_CALLBACK

    + This option may be defined to omit the capability to issue "progress" + callbacks during long-running SQL statements. The + sqlite3_progress_handler() + API function is not present in the library. +

    +

    SQLITE_OMIT_QUICKBALANCE

    + This option omits an alternative, faster B-Tree balancing routine. + Using this option makes SQLite slightly smaller at the expense of + making it run slightly slower. +

    +

    SQLITE_OMIT_REINDEX

    + When this option is defined, the REINDEX + command is not included in the library. + Executing a REINDEX statement causes + a parse error. +

    +

    SQLITE_OMIT_SCHEMA_PRAGMAS

    + Defining this option omits pragmas for querying the database schema from + the build. +

    +

    SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS

    + Defining this option omits pragmas for querying and modifying the + database schema version and user version from the build. Specifically, the + schema_version and user_version PRAGMAs are omitted. +

    +

    SQLITE_OMIT_SHARED_CACHE

    + This option builds SQLite without support for shared-cache mode. + The sqlite3_enable_shared_cache() is omitted along with a fair + amount of logic within the B-Tree subsystem associated with shared + cache management. +

    +

    SQLITE_OMIT_SUBQUERY

    + If defined, support for sub-selects and the IN() operator are omitted. +

    +

    SQLITE_OMIT_TCL_VARIABLE

    + If this macro is defined, then the special "$" syntax + used to automatically bind SQL variables to TCL variables is omitted. +

    +

    SQLITE_OMIT_TEMPDB

    + This option omits support for TEMP or TEMPORARY tables. +

    +

    SQLITE_OMIT_TRACE

    + This option omits support for the sqlite3_profile() and + sqlite3_trace() interfaces and their associated logic. +

    +

    SQLITE_OMIT_TRIGGER

    + Defining this option omits support for TRIGGER objects. Neither the + CREATE TRIGGER or DROP TRIGGER + commands are available in this case, and attempting to execute + either will result in a parse error.

    + +

    WARNING: If this macro is defined, it will not be possible to open a database + for which the schema contains TRIGGER objects. +

    +

    SQLITE_OMIT_TRUNCATE_OPTIMIZATION

    + A default build of SQLite, if a DELETE statement has no WHERE clause + and operates on a table with no triggers, an optimization occurs that + causes the DELETE to occur by dropping and recreating the table. + Dropping and recreating a table is usually much faster than deleting + the table content row by row. This is the "truncate optimization". +

    +

    SQLITE_OMIT_UTF16

    + This macro is used to omit support for UTF16 text encoding. When this is + defined all API functions that return or accept UTF16 encoded text are + unavailable. These functions can be identified by the fact that they end + with '16', for example sqlite3_prepare16(), sqlite3_column_text16() and + sqlite3_bind_text16(). +

    +

    SQLITE_OMIT_VACUUM

    + When this option is defined, the VACUUM + command is not included in the library. + Executing a VACUUM statement causes + a parse error. +

    +

    SQLITE_OMIT_VIEW

    + Defining this option omits support for VIEW objects. Neither the + CREATE VIEW nor the DROP VIEW + commands are available in this case, and + attempting to execute either will result in a parse error.

    + +

    WARNING: If this macro is defined, it will not be possible to open a database + for which the schema contains VIEW objects. +

    +

    SQLITE_OMIT_VIRTUALTABLE

    + This option omits support for the Virtual Table + mechanism in SQLite. +

    +

    SQLITE_OMIT_WSD

    + This options builds a version of the SQLite library that contains no + Writable Static Data (WSD). WSD is global variables and/or static + variables. Some platforms do not support WSD, and this option is necessary + in order for SQLite to work those platforms.

    + +

    Unlike other OMIT options which make the SQLite library smaller, + this option actually increases the size of SQLite and makes it run + a little slower. Only use this option if SQLite is being built for an + embedded target that does not support WSD. +

    +

    SQLITE_OMIT_XFER_OPT

    + This option omits support for optimizations that help statements + of the form "INSERT INTO ... SELECT ..." run faster. +

    +

    SQLITE_ZERO_MALLOC

    + This option omits both the default memory allocator and the + debugging memory allocator from the build and substitutes a stub + memory allocator that always fails. SQLite will not run with this + stub memory allocator since it will be unable to allocate memory. But + this stub can be replaced at start-time using + sqlite3_config(SQLITE_CONFIG_MALLOC,...) or + sqlite3_config(SQLITE_CONFIG_HEAP,...). + So the net effect of this compile-time option is that it allows SQLite + to be compiled and linked against a system library that does not support + malloc(), free(), and/or realloc(). +

    + +

    1.7 Analysis and Debugging Options

    + +

    SQLITE_DEBUG

    + The SQLite source code contains literally thousands of assert() statements + used to verify internal assumptions and subroutine preconditions and + postconditions. These assert() statements are normally turned off + (they generate no code) since turning them on makes SQLite run approximately + three times slower. But for testing and analysis, it is useful to turn + the assert() statements on. The SQLITE_DEBUG compile-time option does this. + SQLITE_DEBUG also turns on some other debugging features. +

    +

    SQLITE_MEMDEBUG

    + The SQLITE_MEMDEBUG option causes an instrumented + debugging memory allocator + to be used as the default memory allocator within SQLite. The + instrumented memory allocator checks for misuse of dynamically allocated + memory. Examples of misuse include using memory after it is freed, + writing off the ends of a memory allocation, freeing memory not previously + obtained from the memory allocator, or failing to initialize newly + allocated memory. +

    +
    +This page last modified 2009/06/06 20:41:03 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/compile.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/compile.tcl --- sqlite3-3.4.2/www/compile.tcl 2005-03-19 15:10:45.000000000 +0000 +++ sqlite3-3.6.16/www/compile.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,278 +0,0 @@ -# -# Run this Tcl script to generate the compile.html file. -# -set rcsid {$Id: compile.tcl,v 1.5 2005/03/19 15:10:45 drh Exp $ } -source common.tcl -header {Compilation Options For SQLite} - -puts { -

    Compilation Options For SQLite

    - -

    -For most purposes, SQLite can be built just fine using the default -compilation options. However, if required, the compile-time options -documented below can be used to -omit SQLite features (resulting in -a smaller compiled library size) or to change the -default values of some parameters. -

    -

    -Every effort has been made to ensure that the various combinations -of compilation options work harmoniously and produce a working library. -Nevertheless, it is strongly recommended that the SQLite test-suite -be executed to check for errors before using an SQLite library built -with non-standard compilation options. -

    - -

    Options To Set Default Parameter Values

    - -

    SQLITE_DEFAULT_AUTOVACUUM=<1 or 0>
    -This macro determines if SQLite creates databases with the -auto-vacuum -flag set by default. The default value is 0 (do not create auto-vacuum -databases). In any case the compile-time default may be overridden by the -"PRAGMA auto_vacuum" command. -

    - -

    SQLITE_DEFAULT_CACHE_SIZE=<pages>
    -This macro sets the default size of the page-cache for each attached -database, in pages. This can be overridden by the "PRAGMA cache_size" -comamnd. The default value is 2000. -

    - -

    SQLITE_DEFAULT_PAGE_SIZE=<bytes>
    -This macro is used to set the default page-size used when a -database is created. The value assigned must be a power of 2. The -default value is 1024. The compile-time default may be overridden at -runtime by the "PRAGMA page_size" command. -

    - -

    SQLITE_DEFAULT_TEMP_CACHE_SIZE=<pages>
    -This macro sets the default size of the page-cache for temporary files -created by SQLite to store intermediate results, in pages. It does -not affect the page-cache for the temp database, where tables created -using "CREATE TEMP TABLE" are stored. The default value is 500. -

    - -

    SQLITE_MAX_PAGE_SIZE=<bytes>
    -This is used to set the maximum allowable page-size that can -be specified by the "PRAGMA page_size" command. The default value -is 8192. -

    - - -

    Options To Omit Features

    - -

    The following options are used to reduce the size of the compiled -library by omiting optional features. This is probably only useful -in embedded systems where space is especially tight, as even with all -features included the SQLite library is relatively small. Don't forget -to tell your compiler to optimize for binary size! (the -Os option if -using GCC).

    - -

    The macros in this section do not require values. The following -compilation switches all have the same effect:
    --DSQLITE_OMIT_ALTERTABLE
    --DSQLITE_OMIT_ALTERTABLE=1
    --DSQLITE_OMIT_ALTERTABLE=0 -

    - -

    If any of these options are defined, then the same set of SQLITE_OMIT_XXX -options must also be defined when using the 'lemon' tool to generate a parse.c -file. Because of this, these options may only used when the library is built -from source, not from the collection of pre-packaged C files provided for -non-UNIX like platforms on the website. -

    - -

    SQLITE_OMIT_ALTERTABLE
    -When this option is defined, the -ALTER TABLE command is not included in the -library. Executing an ALTER TABLE statement causes a parse error. -

    - -

    SQLITE_OMIT_AUTHORIZATION
    -Defining this option omits the authorization callback feature from the -library. The -sqlite3_set_authorizer() API function is not present in the library. -

    - -

    SQLITE_OMIT_AUTOVACUUM
    -If this option is defined, the library cannot create or write to -databases that support -auto-vacuum. Executing a -"PRAGMA auto_vacuum" statement is not an error, but does not return a value -or modify the auto-vacuum flag in the database file. If a database that -supports auto-vacuum is opened by a library compiled with this option, it -is automatically opened in read-only mode. -

    - -

    SQLITE_OMIT_AUTOINCREMENT
    -This option is used to omit the AUTOINCREMENT functionality. When this -is macro is defined, columns declared as "INTEGER PRIMARY KEY AUTOINCREMENT" -behave in the same way as columns declared as "INTEGER PRIMARY KEY" when a -NULL is inserted. The sqlite_sequence system table is neither created, nor -respected if it already exists. -

    -

    TODO: Need a link here - AUTOINCREMENT is not yet documented

    - -

    SQLITE_OMIT_BLOB_LITERAL
    -When this option is defined, it is not possible to specify a blob in -an SQL statement using the X'ABCD' syntax.

    -} -#

    WARNING: The VACUUM command depends on this syntax for vacuuming databases -#that contain blobs, so disabling this functionality may render a database -#unvacuumable. -#

    -#

    TODO: Need a link here - is that syntax documented anywhere?

    -puts { - -

    SQLITE_OMIT_COMPLETE
    -This option causes the -sqlite3_complete API to be omitted. -

    - -

    SQLITE_OMIT_COMPOUND_SELECT
    -This option is used to omit the compound SELECT functionality. -SELECT statements that use the -UNION, UNION ALL, INTERSECT or EXCEPT compound SELECT operators will -cause a parse error. -

    - -

    SQLITE_OMIT_CONFLICT_CLAUSE
    -In the future, this option will be used to omit the -ON CONFLICT clause from the library. -

    - -

    SQLITE_OMIT_DATETIME_FUNCS
    -If this option is defined, SQLite's built-in date and time manipulation -functions are omitted. Specifically, the SQL functions julianday(), date(), -time(), datetime() and strftime() are not available. The default column -values CURRENT_TIME, CURRENT_DATE and CURRENT_DATETIME are still available. -

    - -

    SQLITE_OMIT_EXPLAIN
    -Defining this option causes the EXPLAIN command to be omitted from the -library. Attempting to execute an EXPLAIN statement will cause a parse -error. -

    - -

    SQLITE_OMIT_FLOATING_POINT
    -This option is used to omit floating-point number support from the SQLite -library. When specified, specifying a floating point number as a literal -(i.e. "1.01") results in a parse error. -

    -

    In the future, this option may also disable other floating point -functionality, for example the sqlite3_result_double(), -sqlite3_bind_double(), sqlite3_value_double() and sqlite3_column_double() -API functions. -

    - -

    SQLITE_OMIT_FOREIGN_KEY
    -If this option is defined, FOREIGN KEY clauses in column declarations are -ignored. -

    - -

    SQLITE_OMIT_INTEGRITY_CHECK
    -This option may be used to omit the -"PRAGMA integrity_check" -command from the compiled library. -

    - -

    SQLITE_OMIT_MEMORYDB
    -When this is defined, the library does not respect the special database -name ":memory:" (normally used to create an in-memory database). If -":memory:" is passed to sqlite3_open(), a file with this name will be -opened or created. -

    - -

    SQLITE_OMIT_PAGER_PRAGMAS
    -Defining this option omits pragmas related to the pager subsystem from -the build. Currently, the -default_cache_size and -cache_size pragmas are omitted. -

    - -

    SQLITE_OMIT_PRAGMA
    -This option is used to omit the PRAGMA command -from the library. Note that it is useful to define the macros that omit -specific pragmas in addition to this, as they may also remove supporting code -in other sub-systems. This macro removes the PRAGMA command only. -

    - -

    SQLITE_OMIT_PROGRESS_CALLBACK
    -This option may be defined to omit the capability to issue "progress" -callbacks during long-running SQL statements. The -sqlite3_progress_handler() -API function is not present in the library. - -

    SQLITE_OMIT_REINDEX
    -When this option is defined, the REINDEX -command is not included in the library. Executing a REINDEX statement causes -a parse error. -

    - -

    SQLITE_OMIT_SCHEMA_PRAGMAS
    -Defining this option omits pragmas for querying the database schema from -the build. Currently, the -table_info, -index_info, -index_list and -database_list -pragmas are omitted. -

    - -

    SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
    -Defining this option omits pragmas for querying and modifying the -database schema version and user version from the build. Specifically, the -schema_version and -user_version -pragmas are omitted. - -

    SQLITE_OMIT_SUBQUERY
    -

    If defined, support for sub-selects and the IN() operator are omitted. -

    - -

    SQLITE_OMIT_TCL_VARIABLE
    -

    If this macro is defined, then the special "$" syntax -used to automatically bind SQL variables to TCL variables is omitted. -

    - -

    SQLITE_OMIT_TRIGGER
    -Defining this option omits support for VIEW objects. Neither the -CREATE TRIGGER or -DROP TRIGGER -commands are available in this case, attempting to execute either will result -in a parse error. -

    -

    -WARNING: If this macro is defined, it will not be possible to open a database -for which the schema contains TRIGGER objects. -

    - -

    SQLITE_OMIT_UTF16
    -This macro is used to omit support for UTF16 text encoding. When this is -defined all API functions that return or accept UTF16 encoded text are -unavailable. These functions can be identified by the fact that they end -with '16', for example sqlite3_prepare16(), sqlite3_column_text16() and -sqlite3_bind_text16(). -

    - -

    SQLITE_OMIT_VACUUM
    -When this option is defined, the VACUUM -command is not included in the library. Executing a VACUUM statement causes -a parse error. -

    - -

    SQLITE_OMIT_VIEW
    -Defining this option omits support for VIEW objects. Neither the -CREATE VIEW or -DROP VIEW -commands are available in this case, attempting to execute either will result -in a parse error. -

    -

    -WARNING: If this macro is defined, it will not be possible to open a database -for which the schema contains VIEW objects. -

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/conflict.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/conflict.html --- sqlite3-3.4.2/www/conflict.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/conflict.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,134 @@ + + +Constraint Conflict Resolution in SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Constraint Conflict Resolution in SQLite

    + +

    +In most SQL databases, if you have a UNIQUE constraint on +a table and you try to do an UPDATE or INSERT that violates +the constraint, the database will abort the operation in +progress, back out any prior changes associated with the same +UPDATE or INSERT statement, and return an error. +This is the default behavior of SQLite, though SQLite also allows one to +define alternative ways for dealing with constraint violations. +This article describes those alternatives and how to use them. +

    + +

    Conflict Resolution Algorithms

    + +

    +SQLite defines five constraint conflict resolution algorithms +as follows: +

    + +
    +
    ROLLBACK
    +

    When a constraint violation occurs, an immediate ROLLBACK +occurs, thus ending the current transaction, and the command aborts +with a return code of SQLITE_CONSTRAINT. If no transaction is +active (other than the implied transaction that is created on every +command) then this algorithm works the same as ABORT.

    + +
    ABORT
    +

    When a constraint violation occurs, the command backs out +any prior changes it might have made and aborts with a return code +of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes +from prior commands within the same transaction +are preserved. This is the default behavior for SQLite.

    + +
    FAIL
    +

    When a constraint violation occurs, the command aborts with a +return code SQLITE_CONSTRAINT. But any changes to the database that +the command made prior to encountering the constraint violation +are preserved and are not backed out. For example, if an UPDATE +statement encountered a constraint violation on the 100th row that +it attempts to update, then the first 99 row changes are preserved +but change to rows 100 and beyond never occur.

    + +
    IGNORE
    +

    When a constraint violation occurs, the one row that contains +the constraint violation is not inserted or changed. But the command +continues executing normally. Other rows before and after the row that +contained the constraint violation continue to be inserted or updated +normally. No error is returned.

    + +
    REPLACE
    +

    When a UNIQUE constraint violation occurs, the pre-existing row +that caused the constraint violation is removed prior to inserting +or updating the current row. Thus the insert or update always occurs. +The command continues executing normally. No error is returned.

    +
    +
    +This page last modified 2007/12/16 00:35:09 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/conflict.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/conflict.tcl --- sqlite3-3.4.2/www/conflict.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/conflict.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -# -# Run this Tcl script to generate the constraint.html file. -# -set rcsid {$Id: conflict.tcl,v 1.4 2004/10/10 17:24:55 drh Exp $ } -source common.tcl -header {Constraint Conflict Resolution in SQLite} -puts { -

    Constraint Conflict Resolution in SQLite

    - -

    -In most SQL databases, if you have a UNIQUE constraint on -a table and you try to do an UPDATE or INSERT that violates -the constraint, the database will abort the operation in -progress, back out any prior changes associated with -UPDATE or INSERT command, and return an error. -This is the default behavior of SQLite. -Beginning with version 2.3.0, though, SQLite allows you to -define alternative ways for dealing with constraint violations. -This article describes those alternatives and how to use them. -

    - -

    Conflict Resolution Algorithms

    - -

    -SQLite defines five constraint conflict resolution algorithms -as follows: -

    - -
    -
    ROLLBACK
    -

    When a constraint violation occurs, an immediate ROLLBACK -occurs, thus ending the current transaction, and the command aborts -with a return code of SQLITE_CONSTRAINT. If no transaction is -active (other than the implied transaction that is created on every -command) then this algorithm works the same as ABORT.

    - -
    ABORT
    -

    When a constraint violation occurs, the command backs out -any prior changes it might have made and aborts with a return code -of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes -from prior commands within the same transaction -are preserved. This is the default behavior for SQLite.

    - -
    FAIL
    -

    When a constraint violation occurs, the command aborts with a -return code SQLITE_CONSTRAINT. But any changes to the database that -the command made prior to encountering the constraint violation -are preserved and are not backed out. For example, if an UPDATE -statement encountered a constraint violation on the 100th row that -it attempts to update, then the first 99 row changes are preserved -by change to rows 100 and beyond never occur.

    - -
    IGNORE
    -

    When a constraint violation occurs, the one row that contains -the constraint violation is not inserted or changed. But the command -continues executing normally. Other rows before and after the row that -contained the constraint violation continue to be inserted or updated -normally. No error is returned.

    - -
    REPLACE
    -

    When a UNIQUE constraint violation occurs, the pre-existing row -that caused the constraint violation is removed prior to inserting -or updating the current row. Thus the insert or update always occurs. -The command continues executing normally. No error is returned.

    -
    - -

    Why So Many Choices?

    - -

    SQLite provides multiple conflict resolution algorithms for a -couple of reasons. First, SQLite tries to be roughly compatible with as -many other SQL databases as possible, but different SQL database -engines exhibit different conflict resolution strategies. For -example, PostgreSQL always uses ROLLBACK, Oracle always uses ABORT, and -MySQL usually uses FAIL but can be instructed to use IGNORE or REPLACE. -By supporting all five alternatives, SQLite provides maximum -portability.

    - -

    Another reason for supporting multiple algorithms is that sometimes -it is useful to use an algorithm other than the default. -Suppose, for example, you are -inserting 1000 records into a database, all within a single -transaction, but one of those records is malformed and causes -a constraint error. Under PostgreSQL or Oracle, none of the -1000 records would get inserted. In MySQL, some subset of the -records that appeared before the malformed record would be inserted -but the rest would not. Neither behavior is especially helpful. -What you really want is to use the IGNORE algorithm to insert -all but the malformed record.

    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/consortium_agreement-20071201.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/consortium_agreement-20071201.html --- sqlite3-3.4.2/www/consortium_agreement-20071201.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/consortium_agreement-20071201.html 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,558 @@ + + +SQLite Consortium Agreement + + +

    +SQLite Consortium Agreement +

    + +

    This SQLite Consortium Agreement ("Agreement") is made and +entered into as of the _____ day of ______________, 2007 ("Effective Date") +by and between ___________________ (the "Company"), and +Hipp, Wyrick & Company, Inc., a Georgia Corporation with +headquarters at 6200 Maple Cove Lane, Charlotte, NC ("Hwaci").

    + +

    WHEREAS, Hwaci is the developer of a serverless, embeddable, public +domain SQL database engine available at +http://www.sqlite.org/ +("SQLite"); and

    + +

    WHEREAS, Company wishes to support the independent development of +the SQLite database engine to insure the continuing reliability, +vitality, and independence of SQLite, and in connection therewith, +Company wishes to provide funds for ongoing research, development, +and maintenance of the SQLite code base, operation of the SQLite +website, protection and maintenance of the SQLite +trademark, as well as for promotion of SQLite at conferences and +trade shows; and

    + +

    WHEREAS, Company wishes to receive benefits in the form of +services from Hwaci in consideration for their support of SQLite; and + +

    WHEREAS, Company and Hwaci each wish SQLite to be supported and +maintained as an independent, community-driven project, and to +ensure the SQLite development team remains independent.

    + +

    NOW, THEREFORE, in consideration of the mutual promises contained +herein, the parties agree as follows:

    + +

    1.0 Definitions

    + +

    1.1 SQLite Developers

    +

    The "SQLite Developers" are the employees or contractors of Hwaci engaged +in the development of SQLite, including without limitation the +SQLite Architect.

    + +

    1.2 SQLite Architect

    +

    The "SQLite Architect" is the SQLite Developer with ultimate +responsibility and authority over changes and maintenance of the +SQLite code. The current SQLite Architect is the original designer +and developer of SQLite, D. Richard Hipp. Hwaci may change the +SQLite Architect from time to time upon approval by Company.

    + +

    1.3 Consortium Member

    +

    A "Consortium Member" is a company or organization, including +but not limited to Company, which has entered into an agreement +with Hwaci which is substantially similar to this Agreement. +

    + +

    2.0 Agreement

    + +

    2.1 Responsibilities And Obligations Of SQLite Developers

    + +

    2.1.1 General Support

    +

    The SQLite Developers shall provide, without limit, +timely and accurate answers, advice, and instruction +in response to questions and comments from Company about SQLite +submitted by email or by telephone at any time day or night. +

    + +

    2.1.2 Debugging Support

    +The SQLite Developers shall at Company's request provide +Company with direct assistance +in debugging applications that are built upon or use SQLite. +The combined time for Debugging Support and Custom +Development for Company is limited to 330 hours per year. +

    + +

    2.1.3 Custom Development

    +

    The SQLite Developers shall at the request of Company +write new extensions or enhancements +to SQLite according to Company's specifications. These extensions +and enhancements may be proprietary to Company or open source or +with the consent of both Company and the SQLite Architect may +be folded into the public domain SQLite source tree. +The time devoted to Custom Development for Company is limited 220 hours +per year. +

    + +

    2.1.4 Custom Builds

    +The SQLite Developers shall at Company's request provide Company +with specialized builds of SQLite according to Company's specifications. +

    + +

    2.1.5 Legacy Support

    +

    The SQLite Developers shall at Company's request provide support +and bug-fix patches for any historical version of SQLite. +

    + +

    2.1.6 Custom Testing

    +

    The SQLite Developers shall at Company's request set up and run +acceptance tests according Company's specifications that are specifically +designed to exercise SQLite as configured and used by Company. +These acceptance tests shall be run and shall pass prior to any release +of SQLite. +

    + +

    2.1.7 Priority Bug Fixes

    +

    The SQLite Developers shall assign highest priority to fixing bugs +in SQLite that are reported by Company or in which Company has +expressed a specific interest. +

    + +

    2.1.8 Code Quality

    +

    The SQLite Developers shall at all times keep the SQLite source +code well commented and documented and clearly structured and +organized so that an experienced and competent programmer can +understand it and support it after no more than a few months of study. +

    + +

    2.1.9 Backwards Compatibility

    +

    The SQLite Developers recognize that Company uses SQLite +in mission-critical applications and therefore shall work diligently +to ensure continuing bug-free correct operation and backwards +compatibility with prior releases. +

    + +

    2.1.10 Test Coverage

    +

    The SQLite Developers shall maintain the SQLite test suite such +that no less than 95% source code coverage is provided. +

    + +

    2.1.11 Website

    +

    The SQLite Developers shall maintain the SQLite website at +http://www.sqlite.org/ +in good working order and all SQLite source code shall +be made freely available at such website. +

    + + +

    2.2 Responsibilities And Obligations Of The SQLite Architect

    + +

    2.2.1 Private Briefings

    +

    The SQLite Architect shall, at the request of Company, +provide Company with private notification and briefings +of any new bugs or new features in SQLite that might +effect Company's products. +

    + +

    2.2.2 Direction

    +

    The SQLite Architect shall coordinate the activities of SQLite +Developers and shall publish from time to time a roadmap of +proposed future changes and enhancements to SQLite. +

    + +

    2.2.3 Oversight

    +

    The SQLite Architect shall personally review all changes to +the SQLite Source Code and take responsibility for the quality +and correctness of those changes. +

    + +

    2.2.4 Continuity

    +

    The initial SQLite Architect shall be the original developer +of SQLite, D. Richard Hipp. The role of SQLite Architect shall +not be assigned to another during the term of this Agreement except +due to the disability of Dr. Hipp and with approval of Company. +

    + +

    2.3 Responsibilities And Obligations Of Hwaci

    + +

    2.3.1 Employer

    +

    Hwaci shall act as the employer of the SQLite Developers and +SQLite Architect and shall make all tax and legal filings +and tax withholding required by employers. Hwaci shall provide +Company with W-9 information upon request. +

    + +

    2.3.2 Responsible Party

    +

    All obligations of the SQLite Developers set forth in this +agreement are deemed obligations of Hwaci. Hwaci shall recruit, +employ, and supervise SQLite Developers in such a way that the +responsibilities and obligations of the SQLite Developers set +forth in this agreement are upheld.

    + +

    2.3.3 Multiple Developers

    +

    Hwaci shall recruit and employ as many SQLite Developers for as +many hours as can be reasonably achieved using the funds received +from Consortium Members under this agreement. +So that support for SQLite will not be interrupted by the +disability or withdrawal of any one SQLite Developer, Hwaci will +keep at least two knowledgeable and competent SQLite Developers +on staff.

    + +

    2.3.4 Mentorship

    +

    Hwaci shall work to encourage independent +programmers from around the world +to become familiar with SQLite internals so that there will be a +pool of talent able to support SQLite in the future. +

    + +

    2.3.5 Audits

    +

    Hwaci shall keep and maintain complete and accurate records +of the use of development funds provided by Company +and shall allow Company, or its representative, a certified +public accountant mutually acceptable to Hwaci and Company, +during office hours and at reasonable intervals, no more than +once every 12 months, to inspect and make extracts or copies +of such records solely for the purpose of ascertaining Hwaci's +compliance with the objectives and requirements of this agreement. +

    + +

    2.3.6 Disaster Planning

    +

    Hwaci shall maintain backup copies of all +SQLite source files and documentation, current and historical, +at at least two separate locations separated from each +other and from the primary on-line SQLite repository +by at least 400 kilometers. +

    + +

    2.3.7 Trademark

    +

    Hwaci shall maintain ownership of the SQLite trademark and +the sqlite.org domain name and shall purchase bandwidth and +server space for the +http://www.sqlite.org/ website. +

    + +

    2.3.8 No Take-overs or Buy-outs

    +

    Neither Hwaci nor the SQLite Architect shall relinquish development +control of SQLite during the term of this Agreement, by acquisition or merger +or by any other means, +except with the consent of Company. +

    + +

    2.3.9 New Consortium Members

    +

    New Consortium Members may be accepted into the consortium from +time to time under identical terms as this agreement, or under +substantially similar terms that have been approved by existing +Consortium Members.

    + +

    2.3.10 Adequate Staff

    +Hwaci shall recruit and employ a sufficient number of qualified +SQLite Developers to easily cover all custom development, +debugging, and general support service obligations for all +Consortium Members while still providing ample time for +the SQLite Developers to engage in general maintenance +and extension of SQLite. +

    + +

    2.3.11 Use Of Funds

    +

    Hwaci shall use the funds provided hereunder +solely for the development and maintenance of SQLite as set forth in +this Agreement. +Interest on funds received in advance and held in trust will +be reinvested and used for the same purposes as the principal. +

    + +

    2.4 Responsibilities And Obligations Of Company

    + +

    2.4.1 Funding

    +

    Company shall provide funding for the ongoing support and +maintenance of SQLite as set forth in section 3.0 "Fees". +

    + +

    2.4.2 Copyright Disclaimer

    +

    Company acknowledges that SQLite source code and documentation +published on the SQLite website +is in the public domain and that nothing in this agreement shall change +that fact. +

    + +

    3.0 Fees

    + +

    In consideration of the performance +by Hwaci, the SQLite Developers, and the SQLite Architect +of the obligations described herein, +Company shall pay Hwaci at least US $75,000 per year in advance +either annually, quarterly, or monthly, at Company's discretion.

    + +

    4.0 Confidentiality

    + +

    4.1 Definition of Confidential Information

    + +

    "Confidential Information" means any Company proprietary information, +technical data, trade secrets or know-how, including, but not limited to, +research, product plans, products, services, customers, customer lists, +markets, software, developments, inventions, processes, formulas, +technology, designs, drawings, engineering, hardware configuration +information, marketing, finances or other business information +disclosed by Company either directly or indirectly in writing, +orally or by drawings or inspection of parts or equipment.

    + +

    4.2 Non-Use and Non-Disclosure.

    + +

    Hwaci shall not, during or subsequent to the term of this Agreement, +use Company's Confidential Information for any purpose whatsoever +other than the performance of the Services or disclose +Company's Confidential Information to any third party. The parties +acknowledge that Confidential Information will remain the sole +property of Company. Hwaci shall take all reasonable precautions +to prevent any unauthorized disclosure of Confidential Information +including, but not limited to, having each employee or +consultant of Hwaci, if any, with access to any +Confidential Information, execute a nondisclosure agreement +containing provisions in Company's favor substantially similar +to this Agreement. Confidential Information does not include +information that: (i) is known to Hwaci at the time of +disclosure to Hwaci by Company as evidenced by written +records of Hwaci; (ii) has become publicly known and made +generally available through no wrongful act of Hwaci; or +(iii) has been received by Hwaci from a third party who is +authorized to make such disclosure.

    + +

    4.2.1 Disclosure Required by Law

    +In the event any Confidential Information is required to be disclosed +by Hwaci under the terms of a valid and effective subpoena or order +issued by a court of competent jurisdiction, or by a demand or +information request from an executive or administrative agency +or other governmental authority, Hwaci shall, unless prohibited +by the terms of a subpoena, order, or demand, promptly notify +Company of the existence, terms and circumstances surrounding +such demand or request, shall consult with Company on the +advisability of taking legally available steps to resist +or narrow such demand or request, and, if disclosure of +such Confidential Information is required, shall exercise +its reasonable best efforts to narrow the scope of disclosure +and obtain an order or other reliable assurance that +confidential treatment will be accorded to such +Confidential Information. To the extent that Hwaci +is prohibited from notifying Company of a subpoena, +order or demand, by the terms of same, Hwaci shall exercise +its reasonable efforts to narrow the scope of disclosure.

    + +

    4.3 Return of Materials.

    + +

    Upon the termination of this Agreement, or upon Company's +earlier request, Hwaci shall deliver to Company all of +Company's property or Confidential Information that Hwaci +may have in Hwaci's possession or control.

    + +

    5.0 Intellectual Property

    + +

    5.1 No Assignment

    + +

    Company acknowledges that all copyrightable material, notes, +records, drawings, designs, inventions, improvements, developments, +discoveries and trade secrets made, generated, conceived, or +reduced to practice by Hwaci related to SQLite +will remain the property of Hwaci. Nothing in this Agreement +will be construed to transfer any intellectual property right +of Hwaci to Company.

    + +

    5.2 Availability and Public Domain Dedication

    + +

    The SQLite Developers and Hwaci shall, +subject to their discretion as to the quality +and suitability of the SQLite source code and documentation +for public release, make +the SQLite source code and documentation +publicly available as downloadable files +and make a public statement ceding all intellectual +property rights, including but not limited to copyright +and patent rights, in the SQLite source code and documentation +to the public domain. +To the extent that the SQLite Developers and Hwaci +elect not to release the SQLite +source code and documentation +publicly, they shall provide copies thereof to +Company and hereby grants to Company, under all of the +SQLite Developers' and Hwaci's +rights +including but not limited to copyright and patent rights, +in and to the SQLite source code and documentation, +perpetual, irrevocable, +worldwide, non-exclusive, sublicenseable license to use, +copy, prepare derivative works of, publicly perform and +display the SQLite source code and documentation +and derivative works thereof.

    + +

    5.3 Trademark

    + +

    Hwaci shall use the name "SQLite" only to apply to the +publicly available project known by such name as of the +Effective Date. Hwaci may in its discretion file such +trademark applications or registrations as it deems +appropriate to protect or record its rights therein, +and may set such policies as it deems appropriate for +licensing the use of the trademark.

    + +

    6.0 Representations And Warranties

    + +

    6.1 Intellectual Property Clearances.

    + +

    Hwaci hereby represents and warrants that Hwaci shall +enter into agreements with the SQLite Developers sufficient +to enable Hwaci to undertake the obligations of Section 5.

    + +

    6.2 Disclaimer.

    + +

    THE WORK PRODUCT AND ALL MATERIAL PROVIDED BY HWACI AND +COMPANY ARE PROVIDED "AS IS." NEITHER PARTY MAKES ANY +REPRESENTATIONS OR WARRANTIES OF ANY KIND, WHETHER ORAL +OR WRITTEN, WHETHER EXPRESS, IMPLIED, OR ARISING BY STATUTE, +CUSTOM, COURSE OF DEALING OR TRADE USAGE, WITH RESPECT TO +THE SUBJECT MATTER HEREOF, IN CONNECTION WITH THIS AGREEMENT. +EACH PARTY SPECIFICALLY DISCLAIMS ANY AND ALL IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT.

    + +

    7.0 Term And Termination

    + +

    7.1 Term.

    + +

    This Agreement will commence on the Effective Date and +will continue until 12 months after the Effective Date. +Thereafter, the parties may by mutual consent renew +this Agreement subject to agreement on fees to be paid by +Company for sponsorship for additional periods.

    + +

    7.2 Termination.

    + +

    If either party materially defaults in the performance +of any of its material obligations hereunder and if any +such default is not corrected within 30 days after notice +in writing, then the non-defaulting party, at its option, +may, in addition to any other remedies it may have, thereupon +terminate this Agreement by giving written notice of +termination to the defaulting party.

    + +

    7.3 Survival.

    + +

    Upon such termination all rights and duties of the +parties toward each other will cease except: +Sections 4 (Confidentiality), 5 (Intellectual Property), +and 8 (Miscellaneous) will survive termination of this Agreement.

    + +

    8. Miscellaneous

    + +

    8.1 Nonassignment/Binding Agreement.

    + +

    The parties acknowledge that the unique nature of +Hwaci's services are substantial consideration for the +parties' entering into this Agreement. Neither this +Agreement nor any rights under this Agreement may be +assigned or otherwise transferred by Hwaci, in whole +or in part, whether voluntarily or by operation of law, +without the prior written consent of Company, which +consent will not be unreasonably withheld. +Subject to the foregoing, this Agreement will be binding +upon and will inure to the benefit of the parties and +their respective successors and assigns. Any assignment +in violation of the foregoing will be null and void.

    + +

    8.2 Notices.

    + +

    Any notice required or permitted under the terms of +this Agreement or required by law must be in writing +and must be: (a) delivered in person; (b) sent by first +class registered mail, or air mail, as appropriate; or +(c) sent by overnight air courier, in each case properly +posted and fully prepaid to the appropriate address set +forth in the preamble to this Agreement. Either party +may change its address for notice by notice to the other +party given in accordance with this Section. Notices +will be considered to have been given at the time of +actual delivery in person, three business days after +deposit in the mail as set forth above, or one day after +delivery to an overnight air courier service.

    + +

    8.3 Waiver.

    + +

    Any waiver of the provisions of this Agreement or +of a party's rights or remedies under this Agreement +must be in writing to be effective. Failure, neglect, +or delay by a party to enforce the provisions of this +Agreement or its rights or remedies at any time, will +not be construed as a waiver of such party's rights +under this Agreement and will not in any way affect +the validity of the whole or any part of this Agreement +or prejudice such party's right to take subsequent action. +No exercise or enforcement by either party of any right +or remedy under this Agreement will preclude the +enforcement by such party of any other right or remedy +under this Agreement or that such party is entitled by +law to enforce.

    + +

    8.4 Severability.

    + +

    If any term, condition, or provision in this Agreement +is found to be invalid, unlawful or unenforceable to +any extent, the parties shall endeavor in good faith +to agree to such amendments that will preserve, as far +as possible, the intentions expressed in this Agreement. +If the parties fail to agree on such an amendment, +such invalid term, condition or provision will be +severed from the remaining terms, conditions and +provisions, which will continue to be valid and +enforceable to the fullest extent permitted by law.

    + +

    8.5 Integration.

    + +

    This Agreement contains the entire agreement of the +parties with respect to the subject matter of this +Agreement and supersedes all previous communications, +representations, understandings and agreements, +either oral or written, between the parties with +respect to said subject matter. This Agreement +may not be amended, except by a writing signed by both parties.

    + +

    8.6 Counterparts.

    + +

    This Agreement may be executed in counterparts, +each of which so executed will be deemed to be an +original and such counterparts together will constitute +one and the same agreement.

    + +

    8.7 Governing Law.

    + +

    This Agreement will be interpreted and construed in +accordance with the laws of the State of North Carolina +and the United States of America, without regard to +conflict of law principles. All disputes arising out +of this Agreement will be subject to the exclusive +jurisdiction of the state and federal courts located in +North Carolina, and each party hereby consents to the +personal jurisdiction thereof.

    + +

    8.8 Independent Contractors.

    + +

    It is the intention of the parties that Hwaci is an +independent contractor. Nothing in this Agreement will +in any way be construed to constitute Hwaci or any of +its employees as an agent, employee or representative +of Company.

    + +

    9.0 Signatures

    + +

    The parties have executed this Agreement below to +indicate their acceptance of its terms.

    + + + +
    +HWACI
    +By:
    +

     

    +Print Name:
    +

     

    +Title:
    +

     

    +
    +COMPANY
    +By:
    +

     

    +Print Name:
    +

     

    +Title:
    +

     

    +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/consortium.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/consortium.html --- sqlite3-3.4.2/www/consortium.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/consortium.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,205 @@ + + +SQLite Consortium + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    The SQLite Consortium

    + +

    +The SQLite Consortium is a membership association dedicated to +insuring the continuing vitality and independence of SQLite. +SQLite is high-quality, public domain software. The goal of +the SQLite Consortium is to make sure it stays that way. +

    + +

    +Consortium members contribute funding to support the ongoing +development and maintenance of SQLite and in return receive +enterprise-level technical support and legal assurances that +SQLite will remain independent and open-source. Consortium +members have direct access to +the SQLite developers and receive priority consideration for +any bug fixes or feature requests. However, technical control +and direction of SQLite remains with the SQLite architect and +developers. An important goal of the SQLite Consortium, after +all, is to make sure that SQLite does not fall under the governance +of any single company but remains independent and fully under the +control of the SQLite developers. +

    + +

    +Companies that use SQLite as a critical component in their +products or infrastructure may want to consider becoming +SQLite Consortium Members as inexpensive insurance +in the future vitality and independence of SQLite. The SQLite Consortium +membership fee is a fraction of the cost of devoting internal +staff to supporting the SQLite code base. Benefits of +membership include: +

    + +
      +
    • +Consortium members have the guaranteed, undivided attention of the SQLite +developers for 23 staff-days per year and for as much additional time +above and beyond that amount that the core developers have available. +There are no arbitrary limits on contact time. +The consortium will never be over-subscribed. New SQLite developers +will be recruited and trained as necessary to cover the 23 day/year +support commitment. +

    • + +
    • +Consortium members can call any developer at any time, day or +night, and expect to get their full and immediate attention. +Consortium members have access to the home telephone numbers +and cellphone numbers and work schedules of the core developers +so that the developers can be tracked down quickly in a crisis. +

    • + +
    • +The SQLite developers are eager and happy to write new code +or debug existing code for consortium members. +Consortium members can request and expect to receive significant +new enhancements to the public SQLite code base. Consortium +members can also request their own private, proprietary extensions +and enhancements. +

    • + +
    • +Consortium members can receive support for any +version of SQLite no matter how old. +Bug fixes will be back ported to older versions of SQLite upon +request. +

    • + +
    • +Consortium members can receive private +email and/or telephone notifications and briefings covering +new bugs or recently added features. +The core developers are constantly watching out for the +interests of consortium members and will attempt to protect +those interests in every way possible and alert members +when their interests are threatened in any way. +

    • + +
    • +The SQLite developers will set up customized regression tests +to fully and completely test SQLite as configured by consortium +members. Normally, only the standard SQLite configuration is fully +tested at each release. Consortium members that use a non-standard +configuration can request that their configuration be tested to the +same level of detail. +

    • + +
    • +Support requests are accepted from any employee or contractor working +for a consortium member. +Support is not limited to a single development +team as it is with a maintenance subscription or technical support +agreement. +

    • + +
    • +Support requests, comments, and suggestions for future +changes to SQLite coming from consortium members take priority +over requests from all other sources. +Consortium members go to the front of the line. +

    • + +
    • +We will be happy to recognize consortium members on the SQLite website +with a logo and/or a brief acknowledgment of their contribution to the +project. This is an opportunity for companies to build good will by +demonstrating that they are giving back to the community. Or, members +can remain anonymous. +

    • + +
    + + +

    How To Become A Consortium Member

    + +

    +Membership in the SQLite Consortium is via +contract with +Hipp, Wyrick & Company, Inc.. +Please call +1.704.948.4565 during US east-coast business hours +for additional information. +

    + + + + +
    +This page last modified 2008/11/12 14:30:43 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/copyright.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/copyright.html --- sqlite3-3.4.2/www/copyright.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/copyright.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,190 @@ + + +SQLite Copyright + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Copyright

    + + + +
    +
    +SQLite is in the
    +Public Domain +
    + +

    +All of the deliverable code in SQLite has been dedicated to the +public domain +by the authors. +All code authors, and representatives of the companies they work for, +have signed affidavits dedicating their contributions to +the public domain and originals of +those signed affidavits are stored in a firesafe at the main offices +of Hwaci. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute +the original SQLite code, either in source code form or as a compiled binary, +for any purpose, commercial or non-commercial, and by any means. +

    + +

    +The previous paragraph applies to the deliverable code in SQLite - +those parts of the SQLite library that you actually bundle and +ship with a larger application. Portions of the documentation and +some code used as part of the build process might fall under +other licenses. The details here are unclear. We do not worry +about the licensing of the documentation and build code so much +because none of these things are part of the core deliverable +SQLite library. +

    + +

    +All of the deliverable code in SQLite has been written from scratch. +No code has been taken from other projects or from the open +internet. Every line of code can be traced back to its original +author, and all of those authors have public domain dedications +on file. So the SQLite code base is clean and is +uncontaminated with licensed code from other projects. +

    + +

    Obtaining An Explicit License To Use SQLite

    + +

    +Even though SQLite is in the public domain and does not require +a license, some users want to obtain a license anyway. Some reasons +for obtaining a license include: +

    + +
      +
    • You are using SQLite in a jurisdiction that does not recognize + the public domain.
    • +
    • You are using SQLite in a jurisdiction that does not recognize + the right of an author to dedicate their work to the public + domain.
    • +
    • You want to hold a tangible legal document + as evidence that you have the legal right to use and distribute + SQLite.
    • +
    • Your legal department tells you that you have to purchase a license. +
    • +
    + +

    +If you feel like you really have to purchase a license for SQLite, +Hwaci, the company that employs +the architect and principal developers of SQLite, will +sell you +one. +

    + +

    Contributed Code

    + +

    +In order to keep SQLite completely free and unencumbered by copyright, +all new contributors to the SQLite code base are asked to dedicate +their contributions to the public domain. +If you want to send a patch or enhancement for possible inclusion in the +SQLite source tree, please accompany the patch with the following statement: +

    + +
    +The author or authors of this code dedicate any and all copyright interest +in this code to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and successors. +We intend this dedication to be an overt act of relinquishment in +perpetuity of all present and future rights to this code under copyright law. +
    + +

    +We are not able to accept patches or changes to +SQLite that are not accompanied by a statement such as the above. +In addition, if you make +changes or enhancements as an employee, then a simple statement such as the +above is insufficient. You must also send by surface mail a copyright release +signed by a company officer. +A signed original of the copyright release should be mailed to:

    + +
    +Hwaci
    +6200 Maple Cove Lane
    +Charlotte, NC 28269
    +USA +
    + +

    +A template copyright release is available +in PDF or +HTML. +You can use this release to make future changes. +

    +
    +This page last modified 2007/11/14 15:25:42 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/copyright.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/copyright.tcl --- sqlite3-3.4.2/www/copyright.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/copyright.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -set rcsid {$Id: copyright.tcl,v 1.7 2007/05/06 21:20:43 drh Exp $} -source common.tcl -header {SQLite Copyright} -puts { -

    SQLite Copyright

    - - - -
    -
    -SQLite is in the
    -Public Domain -
    - -

    -All of the deliverable code in SQLite has been dedicated to the -public domain -by the authors. -All code authors, and representatives of the companies they work for, -have signed affidavits dedicating their contributions to -the public domain and originals of -those signed affidavits are stored in a firesafe at the main offices -of Hwaci. -Anyone is free to copy, modify, publish, use, compile, sell, or distribute -the original SQLite code, either in source code form or as a compiled binary, -for any purpose, commercial or non-commercial, and by any means. -

    - -

    -The previous paragraph applies to the deliverable code in SQLite - -those parts of the SQLite library that you actually bundle and -ship with a larger application. Portions of the documentation and -some code used as part of the build process might fall under -other licenses. The details here are unclear. We do not worry -about the licensing of the documentation and build code so much -because none of these things are part of the core deliverable -SQLite library. -

    - -

    -All of the deliverable code in SQLite has been written from scratch. -No code has been taken from other projects or from the open -internet. Every line of code can be traced back to its original -author, and all of those authors have public domain dedications -on file. So the SQLite code base is clean and is -uncontaminated with licensed code from other projects. -

    - -

    Obtaining An Explicit License To Use SQLite

    - -

    -Even though SQLite is in the public domain and does not require -a license, some users want to obtain a license anyway. Some reasons -for obtaining a license include: -

    - -
      -
    • You are using SQLite in a jurisdiction that does not recognize - the public domain.
    • -
    • You are using SQLite in a jurisdiction that does not recognize - the right of an author to dedicate their work to the public - domain.
    • -
    • You want to hold a tangible legal document - as evidence that you have the legal right to use and distribute - SQLite.
    • -
    • Your legal department tells you that you have to purchase a license. -
    • -
    - -

    -If you feel like you really have to purchase a license for SQLite, -Hwaci, the company that employs -the architect and principal developers of SQLite, will sell you -one. -Please contact: -

    - -
    -D. Richard Hipp
    -Hwaci - Applied Software Research
    -704.948.4565
    -drh@hwaci.com -
    - -

    Contributed Code

    - -

    -In order to keep SQLite completely free and unencumbered by copyright, -all new contributors to the SQLite code base are asked to dedicate -their contributions to the public domain. -If you want to send a patch or enhancement for possible inclusion in the -SQLite source tree, please accompany the patch with the following statement: -

    - -
    -The author or authors of this code dedicate any and all copyright interest -in this code to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and successors. -We intend this dedication to be an overt act of relinquishment in -perpetuity of all present and future rights to this code under copyright law. -
    - -

    -We are not able to accept patches or changes to -SQLite that are not accompanied by a statement such as the above. -In addition, if you make -changes or enhancements as an employee, then a simple statement such as the -above is insufficient. You must also send by surface mail a copyright release -signed by a company officer. -A signed original of the copyright release should be mailed to:

    - -
    -Hwaci
    -6200 Maple Cove Lane
    -Charlotte, NC 28269
    -USA -
    - -

    -A template copyright release is available -in PDF or -HTML. -You can use this release to make future changes. -

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/crew.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/crew.html --- sqlite3-3.4.2/www/crew.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/crew.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,126 @@ + + +SQLite Developers + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    The SQLite Development Team

    + + +

    +D. Richard Hipp + began the SQLite project in on 2000-May-29 +and continues to serve as the project architect. Richard was born, +lives, and works in +Charlotte, +North Carolina. He holds degrees from +Georgia Tech (MSEE, 1984) and +Duke University (PhD, 1992) and is +the founder of the consulting firm +Hwaci.

    + +

    +
    + + +

    +Dan Kennedy is an Australian currently based in South-East Asia. +He holds a degree in Computer System Engineering from the University of +Queensland and has worked in a variety of fields, including industrial +automation, computer graphics and embedded software development. +Dan has been a key contributor to SQLite since 2002. +

    + +

    +
    + + +

    +Shane Harrelson +graduated from Clemson University, S.C. in 1995 with a BS +in Mechanical Engineering and an MS in Computer Science. Shane has worked +in several fields including retail/financial systems software, device driver +development, and embedded systems software.

    + +

    +
    + + +

    +Christian Werner +is a software developer located in Germany. +He has been active in industrial automation, +communications, and embedded software development since 1989.

    + +

    +
    +This page last modified 2008/11/17 14:10:22 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/custombuild.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/custombuild.html --- sqlite3-3.4.2/www/custombuild.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/custombuild.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,375 @@ + + +Custom Builds Of SQLite 3.6 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Custom Builds Of SQLite Version 3.6
    +or
    +Porting SQLite 3.6 To New Operating Systems Or Unusual Platforms +

    + +

    1.0 Introduction

    + +

    For most applications, the recommended method for building +SQLite is to use the amalgamation code +file, sqlite3.c, and its corresponding header file +sqlite3.h. The sqlite3.c code file should compile and +run on any Unix, Windows, OS/2, or Mac OS X system without any changes +or special compiler options. Most applications can simply include +the sqlite3.c file together with the other C code files that make +up the application, compile them all together, and have working +and well configured version of SQLite.

    + +
    Most applications work great with SQLite in its +default configuration and with no special compile-time configuration. +Most developers should be able to completely ignore this document +and simply build SQLite from +the amalgamation without any +special knowledge and without taking any special actions.
    + +

    However, highly tuned and specialized +applications may want or need to replace some of +SQLite's built-in system interface with alternative implementations +more suitable for the needs of the application. SQLite is designed +to be easily reconfigured at compile-time to meet the specific +needs of individual projects. Among the compile-time configuration +options for SQLite are these:

    + +
      +
    • Replace the built-in mutex subsystem with an alternative + implementation.

    • + +
    • Completely disable all mutexing for use in single-threaded + applications.

    • + +
    • Reconfigure the memory allocation subsystem to use a memory + allocator other the malloc() implementation from the standard + library.

    • + +
    • Realign the memory allocation subsystem so that it never calls + malloc() at all but instead satisfies all memory requests using + a fixed-size memory buffer assigned to SQLite at startup.

    • + +
    • Replace the interface to the file system with an alternative + design. In other words, override all of the system calls that + SQLite makes in order to talk to the disk with a completely different + set of system calls.

    • + +
    • Override other operating system interfaces such as calls to obtain + Zulu or local time.

    • +
    + +

    Generally speaking, there are three separate subsystems within +SQLite that can be modified or overridden at compile-time. The +mutex subsystem is used to serialize access to SQLite resources that +are shared among threads. The memory allocation subsystem is used +to allocate memory required by SQLite objects and for the database +cache. Finally, the Virtual File System subsystem is +used to provide a portable interface between SQLite and the underlying +operating system and especially the file system. We call these three +subsystems the "interface" subsystems of SQLite.

    + +

    We emphasis that most applications are well-served by the +built-in default implementations of the SQLite interface subsystems. +Developers are encouraged to use the +default built-in implementations whenever possible +and to build SQLite without any special compile-time options or parameters. +However, some highly specialized applications may benefit from +substituting or modifying one or more of these built-in SQLite +interface subsystems. +Or, if SQLite is used on an operating system other than +Unix (Linux or Mac OS X), Windows (Win32 or WinCE), or OS/2 then none +of the interface subsystems that come built into SQLite will work +and the application will need to provide alternative implementations +suitable for the target platform.

    + +

    2.0 Configuring Or Replacing The Mutex Subsystem

    + +

    In a multithreaded environment, SQLite uses mutexes to serialize +access to shared resources. +The mutex subsystem is only required for applications that access +SQLite from multiple threads. For single-threaded applications, or +applications which only call SQLite from a single thread, the mutex +subsystem can be completely disabled by recompiling with the following +option:

    + +
    +-DSQLITE_THREADSAFE=0
    +
    + +

    Mutexes are cheap but they are not free, so performance will be better +when mutexes are completely disabled. The resulting library footprint +will also be a little smaller. Disability the mutexes as compile-time +is a recommended optimization for applications were it makes sense.

    + +

    When using SQLite as a shared library, an application can test to see +whether or not mutexes have been disabled using the +sqlite3_threadsafe() API. Applications that link against SQLite at +run-time and use SQLite from multiple threads should probably check this +API to make sure they did not accidently get linked against a version of +the SQLite library that has its mutexes disabled. Single-threaded +applications will, of course, work correctly regardless of whether or +not SQLite is configured to be threadsafe, though they will be a little +bit faster when using versions of SQLite with mutexes disabled.

    + +

    SQLite mutexes can also be disabled at run-time using the +the sqlite3_config() interface. To completely disable all mutexing, +the application can invoke:

    + +
    +sqlite3_config(SQLITE_CONFIG_SINGLETHREAD);
    +
    + +

    Disabling mutexes at run-time is not as effective as disabling them +at compile-time since SQLite still must do a test of a boolean variable +to see if mutexes are enabled or disabled at each point where a mutex +might be required. But there is still a performance advantage for +disabling mutexes at run-time.

    + +

    For multi-threaded applications that are careful about how they +manage threads, SQLite supports an alternative run-time configuration +that is half way between not using any mutexes and the default situation +of mutexing everything in sight. This in-the-middle mutex alignment can +be established as follows:

    + +
    +sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
    +sqlite3_config(SQLITE_CONFIG_MEMSTATUS, 0);
    +
    + +

    There are two separate configuration changes here which can +be used either togethr or separately. The +SQLITE_CONFIG_MULTITHREAD setting disables the mutexes that +serialize access to database connection objects and +prepared statement objects. With this setting, the application +is free to use SQLite from multiple threads, but it must make sure +than no two threads try to access the same database connection +or any prepared statements associated with the same +database connection at the same time. Two threads can use SQLite +at the same time, but they must use separate database connections. +The second SQLITE_CONFIG_MEMSTATUS setting disables the mechanism +in SQLite that tracks the total size of all outstanding memory +allocation requests. This omits the need to mutex each call +to sqlite3_malloc() and sqlite3_free(), which saves a huge +number of mutex operations. But a consequence of disabling the +memory statistics mechanism is that the +sqlite3_memory_used(), sqlite3_memory_highwater(), and +sqlite3_soft_heap_limit() interfaces cease to work. +

    + +

    SQLite uses pthreads for its mutex implementation on Unix and +SQLite requires a recursive mutex. Most modern pthread implementations +support recursive mutexes, but not all do. For systems that do not +support recursive mutexes, it is recommended that applications operate +in single-threaded mode only. If this is not possible, SQLite provides +an alternative recursive mutex implementation built on top of the +standard "fast" mutexes of pthreads. This alternative +implementation should work correctly as long as pthread_equal() is +atomic and the processor has a coherient data cache. The alternative +recursive mutex implementation is enabled by the following +compiler command-line switch:

    + +
    +-DSQLITE_HOMEGROWN_RECURSIVE_MUTEX=1
    +
    + +

    When porting SQLite to a new operating system, it is usually necessary +to completely replace the built-in mutex subsystem with an alternative +built around the mutex primitives of the new operating system. This +is accomplished by compiling SQLite with the following option:

    + +
    +-DSQLITE_MUTEX_APPDEF=1
    +
    + +

    When SQLite is compiled with the SQLITE_MUTEX_APPDEF=1 option, it +completely omits the implementation of its +mutex primitive functions. But the SQLite +library still attempts to call these functions where necessary, so the +application must itself implement the +mutex primitive functions and link them together +with SQLite.

    + +

    3.0 Configuring Or Replacing The Memory Allocation Subsystem

    + +

    By default, SQLite obtains the memory it needs for objects and +cache from the malloc()/free() implementation of the standard library. +There is also on-going work with experimental memory allocators that +satisfy all memory requests from a single fixed memory buffer handed +to SQLite at application start. Additional information on these +experimental memory allocators will be provided in a future revision +of this document.

    + +

    SQLite supports the ability of an application to specify an alternative +memory allocator at run-time using the sqlite3_config() interface. +For example:

    + +
    +sqlite3_config(SQLITE_CONFIG_MALLOC, dlmalloc, dlfree, dlrealloc, dlmalloc_usable_size);
    +
    + +

    The SQLITE_CONFIG_MALLOC setting to sqlite3_config() passes four +function pointers into SQLite. The first three functions work exactly +like malloc(), free(), and realloc(), respectively, from the standard +C library. The fourth function pointer must be for a routine that +returns the size of a memory allocation given a pointer to that allocation. +In the default memory allocator implementation for SQLite, these fourth +"memsize" function is implemented by prepending an 8-byte size integer +to the beginning of every allocation. The memsize function is not a +standard part of must memory alloction libraries and so must be implemented +in this way. However, Doug Lea's dlmalloc implementation, as shown in the +example above, does provide an implementation of memsize which, if used, +reduces the size overhead and execution time of every memory allocation +and deallocation.

    + +

    TBD: Talk about alternative zero-malloc implementations and how to +select them at compile-time.

    + +

    TBD: Talk about how to disable any built-in memory allocator so that +an application is required to register the memory allocator at +startup.

    + +

    4.0 Adding New Virtual File Systems

    + +

    Since version 3.5.0, SQLite has supported an interface called the +virtual file system or "VFS". +This object is somewhat misnamed since it +is really an interface to whole underlying operating system, +just the filesystem.

    + +

    One of the interesting features +of the VFS interface is that SQLite can support multiple VFSes at the +same time. Each database connection has to choose a single VFS for its +use when the connection is first opened using sqlite3_open_v2(). +But if a process contains multiple database connections each can choose +a different VFS. VFSes can be added at run-time using the +sqlite3_vfs_register() interface.

    + +

    The default builds for SQLite on Unix, Windows, and OS/2 include +a VFS appropriate for the target platform. SQLite builds for other +operating systems do not contain a VFS by default, but the application +can register one or more at run-time.

    + +

    5.0 Porting SQLite To A New Operating System

    + +

    In order to port SQLite to a new operating system - an operating +system not supported by default - the application +must provide...

    + +
      +
    • a working mutex subsystem (but only if it is multithreaded),
    • +
    • a working memory allocation subsystem (assuming it lacks malloc() +in its standard library), and
    • +
    • a working VFS implementation.
    • +
    + +

    All of these things can be provided in a single auxiliary C code file +and then linked with the stock "sqlite3.c" code file to generate a working +SQLite build for the target operating system. In addition to the +alternative mutex and memory allocation subsystems and the new VFS, +the auxiliary C code file should contain implementations for the +following two routines:

    + + + +

    The "sqlite3.c" code file contains default implementations of a VFS +and of the sqlite3_initialize() and sqlite3_shutdown() functions that +are appropriate for Unix, Windows, and OS/2. +To prevent one of these default components from being loaded when sqlite3.c +is compiled, it is necessary to add the following compile-time +option:

    + +
    +-DSQLITE_OS_OTHER=1
    +
    + +

    + +

    The SQLite core will call sqlite3_initialize() early. The auxiliary +C code file can contain an implementation of sqlite3_initialize() that +registers an appropriate VFS and also perhaps initializes an alternative +mutex system (if mutexes are required) or does any memory allocation +subsystem initialization that is required. +The SQLite core never calls sqlite3_shutdown() but it is part of the +official SQLite API and is not otherwise provided when compiled with +-DSQLITE_OS_OTHER=1, so the auxiliary C code file should probably provide +it for completeness.

    + + +

    +


    +This page last modified 2008/11/01 13:26:49 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/cvstrac.css /tmp/3ARg2Grji7/sqlite3-3.6.16/www/cvstrac.css --- sqlite3-3.4.2/www/cvstrac.css 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/cvstrac.css 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,315 @@ + +body { + margin: auto; + font-family: "Verdana" "sans-serif"; + padding: 8px; +} + +.sqlite_header { + margin: auto; +} + +a { color: #45735f } +a:visited { color: #734559 } + +.logo { position:absolute; margin:3px; } +.tagline { + float:right; + text-align:right; + font-style:italic; + width:240px; + margin:12px; + margin-top:58px; +} + +.toolbar { + font-variant: small-caps; + text-align: center; + line-height: 1.6em; + margin: 0; + padding:1px 8px; +} +.toolbar a { color: white; text-decoration: none; padding: 6px 12px; } +.toolbar a:visited { color: white; } +.toolbar a:hover { color: #80a796; background: white; } + +.content { margin: 5%; } +.content dt { font-weight:bold; } +.content dd { margin-bottom: 25px; margin-left:20%; } +.content ul { padding:0px; padding-left: 15px; margin:0px; } + +/* rounded corners */ +.se { background: url(/images/se.png) 100% 100% no-repeat #80a796} +.sw { background: url(/images/sw.png) 0% 100% no-repeat } +.ne { background: url(/images/ne.png) 100% 0% no-repeat } +.nw { background: url(/images/nw.png) 0% 0% no-repeat } +/* CVSTrac default stylesheet. +** This more or less replicates the "original" CVSTrac style. +*/ + +/* ********* Standard/default header and footer ************************* */ +body {background-color: white} +#footer {font-size: .67em; clear: both;} + +hr {clear: both;} + +/* ********* Page header ************************************************ */ + +#header { + background: #f0ffff; + padding: 0px; + border: 2px #80a796 solid; + margin: 0px; + display: block; + position: relative; + } + +#title {font-weight: bold; + font-size: 1.17em; + margin: 2px; + max-width: 30%; + } + +#identity {font-size: .83em; + font-weight: normal; + margin: 2px; + max-width: 30%; + } + +/* The "current" item is traditionally omitted from the navigation bar */ +#navigation #current {display: none;} + +#navigation li {display: inline; margin: 1px; white-space: nowrap;} +#navigation li :before {content: "["; color: black;} +#navigation li :after {content: "]"; color: black;} +#navigation {text-align: right; list-style-type: none; + padding: 0; + margin: 0; + float: right; + max-width: 50%; + position: absolute; + top: 0em; + right: 0em; + } + +#action li {display: inline; margin: 1px; white-space: nowrap;} +#action li :before {content: "["; color: black;} +#action li :after {content: "]"; color: black;} +#action {list-style-type: none; text-align: right; background: #f0f0f0; + margin: 1px; + padding: 1px; + padding: 1px; + clear: both; + } + +#content {clear: both;} + +/* File breadcrumb off by default in stock CVSTrac */ +#breadcrumb {display: none} + +/* ********* Bits and boxes ******************************************** */ + +/* wiki history */ +#history { + float: right; + border: 2px #a0b5f4 solid; + white-space: nowrap; + margin: 2px; padding: 0px; + } +#history h3 { margin: 2px; padding: 0px; background: #d0d9f4; + font-size: 1em; font-weight: bold; + text-align: center; + } +#history ul { list-style-type: none; text-align: left; + margin: 2px; padding: 0px; + } +#history .current { font-weight: bold; } +#history .off { font-weight: bold; } +#history p { margin: 2px; padding: 0px; } + +#wikitoc {border-spacing: 20px 0px;} +#wikitoc th {background: #d0d0d0;} + +/* ********* Timeline page ******************************************** */ +#timeline {text-align: left; + margin: .5em 0 0 0; + padding: 0; + } + +/* date and milestone dividers */ +#timeline .dt {background: #f0ffff; + border: 2px #80a796 solid; + font-size: 1em; font-weight: normal; + padding: .25em; + margin: .2em 0 .2em 0; + float: left; + clear: left; + } +#timeline .dm {background: #f7c0c0; + border: 2px #ec9898 solid; + font-size: 1em; font-weight: normal; + padding: .25em; + margin: .2em 0 .2em 0; + float: left; + clear: left; + } + +#timeline .entries { + vertical-align: top; + clear: both; + margin: 0; padding: 0; + margin-left: 1em; + } +#timeline .entries dt { + clear: both; + float: left; + margin: 0; padding: 0; + } +#timeline .entries dd { + float: left; + margin: 0; padding: 0; + } +#timeline .entries .datetime {text-align: right; + width: 10%; + } +#timeline .entries .time {text-align: center; /* width: 8%; */} +#timeline .entries .hint {text-align: center; width: 5%;} +#timeline .entries .branch { + background: #dddddd; + width: 85%; + } +#timeline .entries .action { + width: 85%; + } + +/* timeline options, down at the bottom */ +#timelineopts { + margin-top: 1em; + padding: 3px; + padding-top: 1em; + clear: both; + border-top: 1px solid black; + } +#timelineopts fieldset { + margin: 1px; + padding: 3px; + border: 1px solid black; + } +#timelineopts #days {} +#timelineopts #checkout {width: 45%; float: left; padding: 3px;} +#timelineopts #ticket {width: 45%; float: left; padding: 3px;} +#timelineopts #timeline {width: 45%; clear: left; float: left; padding: 3px;} +#timelineopts #other {width: 45%; float: left; padding: 3px;} +#timelineopts #show {clear: both;} + +/* ********* Setup ******************************************** */ +/* setup menu */ +#index, #setup {margin: 1em 0; padding: 0;} + +#index dt, #setup dt { + display: block; + width: 30%; + float: left; + margin: 0 0 0 0; + padding: .5em; + font-weight: bold; + clear: left; + white-space: nowrap; + } +#index dd, #setup dd { + width: 55%; + float: left; + margin: 0 0 0 0; + padding: .5em; + } + +/* ********* Wiki formatting ******************************************** */ +/* ticket markup... various forms of "fixed" are shown as striked. This list +** would need to be updated if the user changes the ticket states +*/ +/* .ticket {font-weight: bold} */ +.ticket .closed, .ticket .fixed, .ticket .tested, .ticket .defer + {text-decoration: line-through} + +/* chng markup. Check-ins and milestones. */ +/* .chng {font-weight: bold} */ +/* .chng .event {color: orange} */ +/* .chng .release {color: orange} */ +/* .chng .checkin {color: red} */ +.chng .branch {background: #dddddd;} + +/* links to Wiki pages */ +/* .wiki {font-weight: bold} */ +.wiki .missing {color: gray; font-style: italic; font-weight: normal;} + +/* .diff {background: #e0e0e0} */ +/* hr.diff {visibility: hidden} */ +div.difftop {float: right} + +/* raw HTML in wiki */ +/* div.restricted {background-color: #e0e0e0} */ + +/* URL's in wiki markup. This puts a special character in front, but +** you could also use a link to an image or something... +*/ +a.external:before {content: "\0000a4"; color: #a0a0a0;} + +/* Path's to code mentioned in wiki markup. */ +a.file {font-family: monospace;} + +/* two and three asterisks... The class names are a bit lame, but +** I'm not sure what else to call 'em +*/ +.two {font-size: 1.17em} +.three {font-size: 1.33em} + +/* user id */ +/* .user {font-weight: bold;} */ + +/* revision */ +/* .vers {font-style: italic;} */ + +/* ********* Icons ****************************************************** */ +.icon:before {content: "\002022"; color: black;} + +.icon.milestone:before {content: "\0000a4"; color: #007878;} +.icon.del:before {content: "\0000d7"; color: red;} +.icon.file:before {content: "\002022"; color: black;} +.icon.dir:before {content: "\0000bb"; color: green;} +.icon.backup:before {content: "\0000ab"; color: black;} +.icon.checkin:before {content: "\002022"; color: blue;} +.icon.ticket:before {content: "\00203a"; color: blue;} +.icon.edit:before {content: "\00002a"; color: #8C80A3;} +.icon.attach:before {content: "\0000bb"; color: blue;} +.icon.inspect:before {content: "\002022"; color: blue;} + +/* each ticket type can have a timeline icon. This is usually +** used for the ticket creation. +*/ +.icon.type:before {content: "\0000bb"; color: purple;} +.icon.type.code:before {content: "\0000d7"; color: red;} + +/* each ticket state can also have a timeline icon */ +.icon.state:before {content: "\002666"; color: #ffa000;} +.icon.state.new:before {content: "\0000d7"; color: red;} +.icon.state.active:before {content: "\0000d7"; color: red;} +.icon.state.fixed:before {content: "\00221a"; color: green;} +.icon.state.closed:before {content: "\00221a"; color: green;} +.icon.state.defer:before {content: "\00221a"; color: green;} + +/* deprecated icons... they'll disappear shortly, hopefully */ +.icon.dot:before {content: "\002022"; color: blue;} +.icon.ptr1:before {content: "\0000bb"; color: purple;} + +/* ********* Other ****************************************************** */ + +.rlog {padding: 0; margin: 0; border-width: 0;} +.rlog td, .rlog th {text-align: left;} +.rlog .version {width: 80px; text-align: center;} +.rlog .date {white-space: nowrap; text-align: left;} + +/* Zebra stripe background effect */ +.rlog .odd {background: #efefff;} +.rlog .milestone {background: #f7c0c0;} + +.rlog .branch {background: #dddddd;} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/datatype3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/datatype3.html --- sqlite3-3.4.2/www/datatype3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/datatype3.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,551 @@ + + +Datatypes In SQLite Version 3 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    Datatypes In SQLite Version 3

    + +

    Most SQL database engines (every SQL database engine other than SQLite, +as far as we know) uses static typing. With static typing, the datatype +of a value is determined by its container - the particular column the value +is stored in.

    + +

    SQLite uses a more general dynamic type system. In SQLite, the datatype +of a value is associated with the value itself, not with the container in +which it is stored. The dynamic type system of SQLite is backwards +compatible with the more common static type systems of other database engines +in the sense that SQL statement that work on statically typed databases should +would the same way in SQLite. However, the dynamic typing in SQLite allowed +it to do things which are not possible in traditional statically typed +databases.

    + +

    1. Storage Classes

    + +

    Each value stored in an SQLite database (or manipulated by the +database engine) has one of the following storage classes:

    +
      +
    • NULL. The value is a NULL value.

      +
    • INTEGER. The value is a signed integer, stored in 1, + 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.

      +
    • REAL. The value is a floating point value, stored as + an 8-byte IEEE floating point number.

      +
    • TEXT. The value is a text string, stored using the + database encoding (UTF-8, UTF-16BE or UTF-16-LE).

      +
    • BLOB. The value is a blob of data, stored exactly as + it was input.

      +
    + +

    Any column in a version 3 database, except an INTEGER PRIMARY KEY +column, may be used to store any type of value.

    + +

    All values supplied to SQLite, whether as literals embedded in SQL +statements or values bound to pre-compiled SQL statements +are assigned a storage class before the SQL statement is executed. +Under circumstances described below, the +database engine may convert values between numeric storage classes +(INTEGER and REAL) and TEXT during query execution. +

    + +

    Storage classes are initially assigned as follows:

    +
      +
    • Values specified as literals as part of SQL statements are + assigned storage class TEXT if they are enclosed by single or double + quotes, INTEGER if the literal is specified as an unquoted number + with no decimal point or exponent, REAL if the literal is an + unquoted number with a decimal point or exponent and NULL if the + value is a NULL. Literals with storage class BLOB are specified + using the X'ABCD' notation.

      + +
    • Values supplied using the sqlite3_bind_* + APIs are assigned + the storage class that most closely matches the native type bound + (i.e. sqlite3_bind_blob() binds a value with storage class BLOB).

      +
    +

    The storage class of a value that is the result of an SQL scalar +operator depends on the outermost operator of the expression. +User-defined functions may return values with any storage class. It +is not generally possible to determine the storage class of the +result of an expression at compile time.

    + + + +

    2. Column Affinity

    + +

    +In SQLite version 3, the type of a value is associated with the value +itself, not with the column or variable in which the value is stored. +(This is sometimes called + +manifest typing or +duck typing.) +All other SQL databases engines that we are aware of use the more +restrictive system of static typing where the type is associated with +the container, not the value. +To look at it another way, SQLite provides dynamic datatypes such as +one finds in "script" programming languages such as Awk, Perl, Tcl, +Python, and Ruby, whereas other SQL database engines provide only +compile-time fixed, static typing such as found in Pascal, C++, and Java. +

    + +

    +In order to maximize compatibility between SQLite and other database +engines, SQLite support the concept of "type affinity" on columns. +The type affinity of a column is the recommended type for data stored +in that column. The key here is that the type is recommended, not +required. Any column can still store any type of data, in theory. +It is just that some columns, given the choice, will prefer to use +one storage class over another. The preferred storage class for +a column is called its "affinity". +

    + +

    Each column in an SQLite 3 database is assigned one of the +following type affinities:

    +
      +
    • TEXT
    • +
    • NUMERIC
    • +
    • INTEGER
    • +
    • REAL
    • +
    • NONE
    • +
    + +

    A column with TEXT affinity stores all data using storage classes +NULL, TEXT or BLOB. If numerical data is inserted into a column with +TEXT affinity it is converted to text form before being stored.

    + +

    A column with NUMERIC affinity may contain values using all five +storage classes. When text data is inserted into a NUMERIC column, an +attempt is made to convert it to an integer or real number before it +is stored. If the conversion is successful (meaning that the conversion +occurs without loss of information), then the value is stored +using the INTEGER or REAL storage class. If the conversion cannot be +performed without loss of information then +the value is stored using the TEXT storage class. No +attempt is made to convert NULL or blob values.

    + +

    A column that uses INTEGER affinity behaves in the same way as a +column with NUMERIC affinity, except that if a real value with no +fractional component and a magnitude that is less than or equal to the +largest possible integer (or text value that converts to such) is +inserted it is converted to an integer and stored using the INTEGER +storage class.

    + +

    A column with REAL affinity behaves like a column with NUMERIC +affinity except that it forces integer values into floating point +representation. (As an internal optimization, small floating point +values with no fractional component are stored on +disk as integers in order to take up less space and are converted +back into floating point as the value is read out.)

    + +

    A column with affinity NONE does not prefer one storage class over +another. No attempt is made to coerce data from one storage class into +another. The data is stored on disk exactly as specified.

    + +

    2.1 Determination Of Column Affinity

    + +

    The type affinity of a column is determined by the declared type +of the column, according to the following rules:

    +
      +
    1. If the datatype contains the string "INT" then it + is assigned INTEGER affinity.

      + +
    2. If the datatype of the column contains any of the strings + "CHAR", "CLOB", or "TEXT" then that + column has TEXT affinity. Notice that the type VARCHAR contains the + string "CHAR" and is thus assigned TEXT affinity.

      + +
    3. If the datatype for a column + contains the string "BLOB" or if + no datatype is specified then the column has affinity NONE.

      + +
    4. If the datatype for a column + contains any of the strings "REAL", "FLOA", + or "DOUB" then the column has REAL affinity

      + +
    5. Otherwise, the affinity is NUMERIC.

      +
    + +

    If a table is created using a "CREATE TABLE <table> AS +SELECT..." statement, then all columns have no datatype specified +and they are given no affinity.

    + +

    2.2 Column Affinity Example

    + +
    +
    CREATE TABLE t1(
    +    t  TEXT,
    +    nu NUMERIC, 
    +    i  INTEGER,
    +    no BLOB
    +);
    +
    +-- Storage classes for the following row:
    +-- TEXT, REAL, INTEGER, TEXT
    +INSERT INTO t1 VALUES('500.0', '500.0', '500.0', '500.0');
    +
    +-- Storage classes for the following row:
    +-- TEXT, REAL, INTEGER, REAL
    +INSERT INTO t1 VALUES(500.0, 500.0, 500.0, 500.0);
    +
    +
    + + +

    3. Comparison Expressions

    + +

    Like SQLite version 2, version 3 +features the binary comparison operators '=', +'<', '<=', '>=' and '!=', an operation to test for set +membership, 'IN', and the ternary comparison operator 'BETWEEN'.

    +

    The results of a comparison depend on the storage classes of the +two values being compared, according to the following rules:

    +
      +
    • A value with storage class NULL is considered less than any + other value (including another value with storage class NULL).

      + +
    • An INTEGER or REAL value is less than any TEXT or BLOB value. + When an INTEGER or REAL is compared to another INTEGER or REAL, a + numerical comparison is performed.

      + +
    • A TEXT value is less than a BLOB value. When two TEXT values + are compared, the C library function memcmp() is usually used to + determine the result. However this can be overridden, as described + under 'User-defined collation Sequences' below.

      + +
    • When two BLOB values are compared, the result is always + determined using memcmp().

      +
    + +

    SQLite may attempt to convert values between the numeric storage +classes (INTEGER and REAL) and TEXT before performing a comparison. + +Whether or not any conversions are attempted before the comparison takes +place depends on the nominal affinity assigned to the expressions on +either side of the binary operator. Affinities are assigned to expressions +in the following cases: +

      +
    • An expression that is a simple reference to a column value + has the same affinity as the column it refers to. Note that if X and Y.Z + are column names, then +X and +Y.Z are considered expressions.

      +
    • An expression of the form "CAST(<expr> TO <type>)" + is assigned an affinity as if it were a reference to a column declared + with type <type> +

    + +

    Conversions are applied before the comparison as described below. +In the following bullet points, the two operands are +refered to as expression A and expression B. Expressions A and B may +appear as either the left or right operands - the following statements +are true when considering both "A <op>B" and "B <op>A". +

      +
    • When two expressions are compared, if expression A has + INTEGER or REAL or NUMERIC affinity and expression B does not, + then NUMERIC affinity is applied to the value of expression B + before the comparison takes place. +

    • When two expressions are compared, if expression A has + been assigned an affinity and expression B has not, then the + affinity of expression A is applied to the value of expression B + before the comparison takes place. +

    • Otherwise, if neither of the above applies, no conversions + occur. The results are compared as is. If a string is compared to a + number, the number will always be less than the string. +

    + +

    +In SQLite, the expression "a BETWEEN b AND c" is equivalent to "a >= b +AND a <= c", even if this means that different affinities are applied to +'a' in each of the comparisons required to evaluate the expression. +

    + +

    Expressions of the type "a IN (SELECT b ....)" are handled by the three +rules enumerated above for binary comparisons (e.g. in a +similar manner to "a = b"). For example if 'b' is a column value +and 'a' is an expression, then the affinity of 'b' is applied to 'a' +before any comparisons take place.

    + +

    SQLite treats the expression "a IN (x, y, z)" as equivalent to "a = +x OR +a = +y OR a = +z". The values to the right of the IN operator (the "x", "y", +and "z" values in this example) are considered to be expressions, even if they +happen to be column values. If the value of the left of the IN operator is +a column, then the affinity of that column is used. If the value is an +expression then no conversions occur. +

    + +

    3.1 Comparison Example

    + +
    +
    +CREATE TABLE t1(
    +    a TEXT,
    +    b NUMERIC,
    +    c BLOB
    +);
    +
    +-- Storage classes for the following row:
    +-- TEXT, REAL, TEXT
    +INSERT INTO t1 VALUES('500', '500', '500');
    +
    +-- 60 and 40 are converted to '60' and '40' and values are compared as TEXT.
    +SELECT a < 60, a < 40 FROM t1;
    +1|0
    +
    +-- Comparisons are numeric. No conversions are required.
    +SELECT b < 60, b < 600 FROM t1;
    +0|1
    +
    +-- Both 60 and 600 (storage class NUMERIC) are less than '500'
    +-- (storage class TEXT).
    +SELECT c < 60, c < 600 FROM t1;
    +0|0
    +
    +
    +

    4. Operators

    + +

    All mathematical operators (which is to say, all operators other +than the concatenation operator "||") apply NUMERIC +affinity to all operands prior to being carried out. If one or both +operands cannot be converted to NUMERIC then the result of the +operation is NULL.

    + +

    For the concatenation operator, TEXT affinity is applied to both +operands. If either operand cannot be converted to TEXT (because it +is NULL or a BLOB) then the result of the concatenation is NULL.

    + +

    5. Sorting, Grouping and Compound SELECTs

    + +

    When values are sorted by an ORDER by clause, values with storage +class NULL come first, followed by INTEGER and REAL values +interspersed in numeric order, followed by TEXT values usually in +memcmp() order, and finally BLOB values in memcmp() order. No storage +class conversions occur before the sort.

    + +

    When grouping values with the GROUP BY clause values with +different storage classes are considered distinct, except for INTEGER +and REAL values which are considered equal if they are numerically +equal. No affinities are applied to any values as the result of a +GROUP by clause.

    + +

    The compound SELECT operators UNION, +INTERSECT and EXCEPT perform implicit comparisons between values. +Before these comparisons are performed an affinity may be applied to +each value. The same affinity, if any, is applied to all values that +may be returned in a single column of the compound SELECT result set. +The affinity applied is the affinity of the column returned by the +left most component SELECTs that has a column value (and not some +other kind of expression) in that position. If for a given compound +SELECT column none of the component SELECTs return a column value, no +affinity is applied to the values from that column before they are +compared.

    + +

    6. Other Affinity Modes

    + +

    The above sections describe the operation of the database engine +in 'normal' affinity mode. SQLite version 3 will feature two other affinity +modes, as follows:

    +
      +
    • Strict affinity mode. In this mode if a conversion + between storage classes is ever required, the database engine + returns an error and the current statement is rolled back.

      + +
    • No affinity mode. In this mode no conversions between + storage classes are ever performed. Comparisons between values of + different storage classes (except for INTEGER and REAL) are always + false.

      +
    + + + +

    7. User-defined Collation Sequences

    + +

    +By default, when SQLite compares two text values, the result of the +comparison is determined using memcmp(), regardless of the encoding of the +string. SQLite v3 provides the ability for users to supply arbitrary +comparison functions, known as user-defined "collation sequences" or +"collating functions", to be used instead of memcmp(). +

    +

    +Aside from the default collation sequence BINARY, implemented using +memcmp(), SQLite features two extra built-in collation sequences +intended for testing purposes, the NOCASE and RTRIM collations: +

    +
      +
    • BINARY - Compares string data using memcmp(), regardless + of text encoding.
    • +
    • NOCASE - The same as binary, except the 26 upper case + characters of ASCII are + folded to their lower case equivalents before + the comparison is performed. Note that only ASCII characters + are case folded. SQLite does not attempt to do full + UTF case folding due to the size of the tables required.
    • + +
    • RTRIM - The same as binary, except that trailing space + characters are ignored.
    • +
    + + + +

    7.1 Assigning Collation Sequences from SQL

    + +

    +Each column of each table has a default collation type. If a collation type +other than BINARY is required, a COLLATE clause is specified as part of the +column definition to define it. +

    + +

    +Whenever two text values are compared by SQLite, a collation sequence is +used to determine the results of the comparison according to the following +rules. Sections 3 and 5 of this document describe the circumstances under +which such a comparison takes place. +

    + +

    +For binary comparison operators (=, <, >, <= and >=) if either +operand is a column, then the default collation type of the column determines +the collation sequence to use for the comparison. If both operands are +columns, then the collation type for the left operand determines the collation +sequence used. If neither operand is a column, then the BINARY collation +sequence is used. For the purposes of this paragraph, a column name +preceded by one or more unary "+" operators is considered a column name. +

    + +

    +The expression "x BETWEEN y and z" is equivalent to "x >= y AND x <= +z". The expression "x IN (SELECT y ...)" is handled in the same way as the +expression "x = y" for the purposes of determining the collation sequence +to use. The collation sequence used for expressions of the form "x IN (y, z +...)" is the default collation type of x if x is a column, or BINARY +otherwise. +

    + +

    +An ORDER BY clause that is part of a SELECT +statement may be assigned a collation sequence to be used for the sort +operation explicitly. In this case the explicit collation sequence is +always used. Otherwise, if the expression sorted by an ORDER BY clause is +a column, then the default collation type of the column is used to +determine sort order. If the expression is not a column, then the BINARY +collation sequence is used. +

    + +

    7.2 Collation Sequences Example

    +

    +The examples below identify the collation sequences that would be used to +determine the results of text comparisons that may be performed by various +SQL statements. Note that a text comparison may not be required, and no +collation sequence used, in the case of numeric, blob or NULL values. +

    +
    +
    +CREATE TABLE t1(
    +    a,                 -- default collation type BINARY
    +    b COLLATE BINARY,  -- default collation type BINARY
    +    c COLLATE REVERSE, -- default collation type REVERSE
    +    d COLLATE NOCASE   -- default collation type NOCASE
    +);
    +
    +-- Text comparison is performed using the BINARY collation sequence.
    +SELECT (a = b) FROM t1;
    +
    +-- Text comparison is performed using the NOCASE collation sequence.
    +SELECT (d = a) FROM t1;
    +
    +-- Text comparison is performed using the BINARY collation sequence.
    +SELECT (a = d) FROM t1;
    +
    +-- Text comparison is performed using the REVERSE collation sequence.
    +SELECT ('abc' = c) FROM t1;
    +
    +-- Text comparison is performed using the REVERSE collation sequence.
    +SELECT (c = 'abc') FROM t1;
    +
    +-- Grouping is performed using the NOCASE collation sequence (i.e. values
    +-- 'abc' and 'ABC' are placed in the same group).
    +SELECT count(*) GROUP BY d FROM t1;
    +
    +-- Grouping is performed using the BINARY collation sequence.
    +SELECT count(*) GROUP BY (d || '') FROM t1;
    +
    +-- Sorting is performed using the REVERSE collation sequence.
    +SELECT * FROM t1 ORDER BY c;
    +
    +-- Sorting is performed using the BINARY collation sequence.
    +SELECT * FROM t1 ORDER BY (c || '');
    +
    +-- Sorting is performed using the NOCASE collation sequence.
    +SELECT * FROM t1 ORDER BY c COLLATE NOCASE;
    +
    +
    +
    +
    +This page last modified 2009/06/08 12:57:30 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/datatype3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/datatype3.tcl --- sqlite3-3.4.2/www/datatype3.tcl 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/www/datatype3.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,440 +0,0 @@ -set rcsid {$Id: datatype3.tcl,v 1.17 2007/06/20 16:13:23 drh Exp $} -source common.tcl -header {Datatypes In SQLite Version 3} -puts { -

    Datatypes In SQLite Version 3

    - -

    1. Storage Classes

    - -

    Version 2 of SQLite stores all column values as ASCII text. -Version 3 enhances this by providing the ability to store integer and -real numbers in a more compact format and the capability to store -BLOB data.

    - -

    Each value stored in an SQLite database (or manipulated by the -database engine) has one of the following storage classes:

    -
      -
    • NULL. The value is a NULL value.

      -
    • INTEGER. The value is a signed integer, stored in 1, - 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.

      -
    • REAL. The value is a floating point value, stored as - an 8-byte IEEE floating point number.

      -
    • TEXT. The value is a text string, stored using the - database encoding (UTF-8, UTF-16BE or UTF-16-LE).

      -
    • BLOB. The value is a blob of data, stored exactly as - it was input.

      -
    - -

    As in SQLite version 2, any column in a version 3 database except an INTEGER -PRIMARY KEY may be used to store any type of value. The exception to -this rule is described below under 'Strict Affinity Mode'.

    - -

    All values supplied to SQLite, whether as literals embedded in SQL -statements or values bound to pre-compiled SQL statements -are assigned a storage class before the SQL statement is executed. -Under circumstances described below, the -database engine may convert values between numeric storage classes -(INTEGER and REAL) and TEXT during query execution. -

    - -

    Storage classes are initially assigned as follows:

    -
      -
    • Values specified as literals as part of SQL statements are - assigned storage class TEXT if they are enclosed by single or double - quotes, INTEGER if the literal is specified as an unquoted number - with no decimal point or exponent, REAL if the literal is an - unquoted number with a decimal point or exponent and NULL if the - value is a NULL. Literals with storage class BLOB are specified - using the X'ABCD' notation.

      -
    • Values supplied using the sqlite3_bind_* APIs are assigned - the storage class that most closely matches the native type bound - (i.e. sqlite3_bind_blob() binds a value with storage class BLOB).

      -
    -

    The storage class of a value that is the result of an SQL scalar -operator depends on the outermost operator of the expression. -User-defined functions may return values with any storage class. It -is not generally possible to determine the storage class of the -result of an expression at compile time.

    - - -

    2. Column Affinity

    - -

    -In SQLite version 3, the type of a value is associated with the value -itself, not with the column or variable in which the value is stored. -(This is sometimes called - -manifest typing.) -All other SQL databases engines that we are aware of use the more -restrictive system of static typing where the type is associated with -the container, not the value. -

    - -

    -In order to maximize compatibility between SQLite and other database -engines, SQLite support the concept of "type affinity" on columns. -The type affinity of a column is the recommended type for data stored -in that column. The key here is that the type is recommended, not -required. Any column can still store any type of data, in theory. -It is just that some columns, given the choice, will prefer to use -one storage class over another. The preferred storage class for -a column is called its "affinity". -

    - -

    Each column in an SQLite 3 database is assigned one of the -following type affinities:

    -
      -
    • TEXT
    • -
    • NUMERIC
    • -
    • INTEGER
    • -
    • REAL
    • -
    • NONE
    • -
    - -

    A column with TEXT affinity stores all data using storage classes -NULL, TEXT or BLOB. If numerical data is inserted into a column with -TEXT affinity it is converted to text form before being stored.

    - -

    A column with NUMERIC affinity may contain values using all five -storage classes. When text data is inserted into a NUMERIC column, an -attempt is made to convert it to an integer or real number before it -is stored. If the conversion is successful, then the value is stored -using the INTEGER or REAL storage class. If the conversion cannot be -performed the value is stored using the TEXT storage class. No -attempt is made to convert NULL or blob values.

    - -

    A column that uses INTEGER affinity behaves in the same way as a -column with NUMERIC affinity, except that if a real value with no -floating point component (or text value that converts to such) is -inserted it is converted to an integer and stored using the INTEGER -storage class.

    - -

    A column with REAL affinity behaves like a column with NUMERIC -affinity except that it forces integer values into floating point -representation. (As an optimization, integer values are stored on -disk as integers in order to take up less space and are only converted -to floating point as the value is read out of the table.)

    - -

    A column with affinity NONE does not prefer one storage class over -another. It makes no attempt to coerce data before -it is inserted.

    - -

    2.1 Determination Of Column Affinity

    - -

    The type affinity of a column is determined by the declared type -of the column, according to the following rules:

    -
      -
    1. If the datatype contains the string "INT" then it - is assigned INTEGER affinity.

      - -
    2. If the datatype of the column contains any of the strings - "CHAR", "CLOB", or "TEXT" then that - column has TEXT affinity. Notice that the type VARCHAR contains the - string "CHAR" and is thus assigned TEXT affinity.

      - -
    3. If the datatype for a column - contains the string "BLOB" or if - no datatype is specified then the column has affinity NONE.

      - -
    4. If the datatype for a column - contains any of the strings "REAL", "FLOA", - or "DOUB" then the column has REAL affinity

      - -
    5. Otherwise, the affinity is NUMERIC.

      -
    - -

    If a table is created using a "CREATE TABLE <table> AS -SELECT..." statement, then all columns have no datatype specified -and they are given no affinity.

    - -

    2.2 Column Affinity Example

    - -
    -
    CREATE TABLE t1(
    -    t  TEXT,
    -    nu NUMERIC, 
    -    i  INTEGER,
    -    no BLOB
    -);
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, INTEGER, TEXT
    -INSERT INTO t1 VALUES('500.0', '500.0', '500.0', '500.0');
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, INTEGER, REAL
    -INSERT INTO t1 VALUES(500.0, 500.0, 500.0, 500.0);
    -
    -
    - - -

    3. Comparison Expressions

    - -

    Like SQLite version 2, version 3 -features the binary comparison operators '=', -'<', '<=', '>=' and '!=', an operation to test for set -membership, 'IN', and the ternary comparison operator 'BETWEEN'.

    -

    The results of a comparison depend on the storage classes of the -two values being compared, according to the following rules:

    -
      -
    • A value with storage class NULL is considered less than any - other value (including another value with storage class NULL).

      - -
    • An INTEGER or REAL value is less than any TEXT or BLOB value. - When an INTEGER or REAL is compared to another INTEGER or REAL, a - numerical comparison is performed.

      - -
    • A TEXT value is less than a BLOB value. When two TEXT values - are compared, the C library function memcmp() is usually used to - determine the result. However this can be overridden, as described - under 'User-defined collation Sequences' below.

      - -
    • When two BLOB values are compared, the result is always - determined using memcmp().

      -
    - -

    SQLite may attempt to convert values between the numeric storage -classes (INTEGER and REAL) and TEXT before performing a comparison. -For binary comparisons, this is done in the cases enumerated below. -The term "expression" used in the bullet points below means any -SQL scalar expression or literal other than a column value. Note that -if X and Y.Z are a column names, then +X and +Y.Z are considered -expressions.

    -
      -
    • When a column value is compared to the result of an - expression, the affinity of the column is applied to the result of - the expression before the comparison takes place.

      - -
    • When two column values are compared, if one column has - INTEGER or REAL or NUMERIC affinity and the other does not, - then NUMERIC affinity is applied to any values with storage - class TEXT extracted from the non-NUMERIC column.

      - -
    • When the results of two expressions are compared, no - conversions occur. The results are compared as is. If a string - is compared to a number, the number will always be less than the - string.

      -
    - -

    -In SQLite, the expression "a BETWEEN b AND c" is equivalent to "a >= b -AND a <= c", even if this means that different affinities are applied to -'a' in each of the comparisons required to evaluate the expression. -

    - -

    Expressions of the type "a IN (SELECT b ....)" are handled by the three -rules enumerated above for binary comparisons (e.g. in a -similar manner to "a = b"). For example if 'b' is a column value -and 'a' is an expression, then the affinity of 'b' is applied to 'a' -before any comparisons take place.

    - -

    SQLite treats the expression "a IN (x, y, z)" as equivalent to "a = +x OR -a = +y OR a = +z". The values to the right of the IN operator (the "x", "y", -and "z" values in this example) are considered to be expressions, even if they -happen to be column values. If the value of the left of the IN operator is -a column, then the affinity of that column is used. If the value is an -expression then no conversions occur. -

    - -

    3.1 Comparison Example

    - -
    -
    -CREATE TABLE t1(
    -    a TEXT,
    -    b NUMERIC,
    -    c BLOB
    -);
    -
    --- Storage classes for the following row:
    --- TEXT, REAL, TEXT
    -INSERT INTO t1 VALUES('500', '500', '500');
    -
    --- 60 and 40 are converted to '60' and '40' and values are compared as TEXT.
    -SELECT a < 60, a < 40 FROM t1;
    -1|0
    -
    --- Comparisons are numeric. No conversions are required.
    -SELECT b < 60, b < 600 FROM t1;
    -0|1
    -
    --- Both 60 and 600 (storage class NUMERIC) are less than '500'
    --- (storage class TEXT).
    -SELECT c < 60, c < 600 FROM t1;
    -0|0
    -
    -
    -

    4. Operators

    - -

    All mathematical operators (which is to say, all operators other -than the concatenation operator "||") apply NUMERIC -affinity to all operands prior to being carried out. If one or both -operands cannot be converted to NUMERIC then the result of the -operation is NULL.

    - -

    For the concatenation operator, TEXT affinity is applied to both -operands. If either operand cannot be converted to TEXT (because it -is NULL or a BLOB) then the result of the concatenation is NULL.

    - -

    5. Sorting, Grouping and Compound SELECTs

    - -

    When values are sorted by an ORDER by clause, values with storage -class NULL come first, followed by INTEGER and REAL values -interspersed in numeric order, followed by TEXT values usually in -memcmp() order, and finally BLOB values in memcmp() order. No storage -class conversions occur before the sort.

    - -

    When grouping values with the GROUP BY clause values with -different storage classes are considered distinct, except for INTEGER -and REAL values which are considered equal if they are numerically -equal. No affinities are applied to any values as the result of a -GROUP by clause.

    - -

    The compound SELECT operators UNION, -INTERSECT and EXCEPT perform implicit comparisons between values. -Before these comparisons are performed an affinity may be applied to -each value. The same affinity, if any, is applied to all values that -may be returned in a single column of the compound SELECT result set. -The affinity applied is the affinity of the column returned by the -left most component SELECTs that has a column value (and not some -other kind of expression) in that position. If for a given compound -SELECT column none of the component SELECTs return a column value, no -affinity is applied to the values from that column before they are -compared.

    - -

    6. Other Affinity Modes

    - -

    The above sections describe the operation of the database engine -in 'normal' affinity mode. SQLite version 3 will feature two other affinity -modes, as follows:

    -
      -
    • Strict affinity mode. In this mode if a conversion - between storage classes is ever required, the database engine - returns an error and the current statement is rolled back.

      - -
    • No affinity mode. In this mode no conversions between - storage classes are ever performed. Comparisons between values of - different storage classes (except for INTEGER and REAL) are always - false.

      -
    - -
    -

    7. User-defined Collation Sequences

    - -

    -By default, when SQLite compares two text values, the result of the -comparison is determined using memcmp(), regardless of the encoding of the -string. SQLite v3 provides the ability for users to supply arbitrary -comparison functions, known as user-defined collation sequences, to be used -instead of memcmp(). -

    -

    -Aside from the default collation sequence BINARY, implemented using -memcmp(), SQLite features one extra built-in collation sequences -intended for testing purposes, the NOCASE collation: -

    -
      -
    • BINARY - Compares string data using memcmp(), regardless - of text encoding.
    • -
    • NOCASE - The same as binary, except the 26 upper case - characters used by the English language are - folded to their lower case equivalents before - the comparison is performed.
    - - -

    7.1 Assigning Collation Sequences from SQL

    - -

    -Each column of each table has a default collation type. If a collation type -other than BINARY is required, a COLLATE clause is specified as part of the -column definition to define it. -

    - -

    -Whenever two text values are compared by SQLite, a collation sequence is -used to determine the results of the comparison according to the following -rules. Sections 3 and 5 of this document describe the circumstances under -which such a comparison takes place. -

    - -

    -For binary comparison operators (=, <, >, <= and >=) if either operand is a -column, then the default collation type of the column determines the -collation sequence to use for the comparison. If both operands are columns, -then the collation type for the left operand determines the collation -sequence used. If neither operand is a column, then the BINARY collation -sequence is used. For the purposes of this paragraph, a column name -preceded by one or more unary "+" operators is considered a column name. -

    - -

    -The expression "x BETWEEN y and z" is equivalent to "x >= y AND x <= -z". The expression "x IN (SELECT y ...)" is handled in the same way as the -expression "x = y" for the purposes of determining the collation sequence -to use. The collation sequence used for expressions of the form "x IN (y, z -...)" is the default collation type of x if x is a column, or BINARY -otherwise. -

    - -

    -An ORDER BY clause that is part of a SELECT -statement may be assigned a collation sequence to be used for the sort -operation explicitly. In this case the explicit collation sequence is -always used. Otherwise, if the expression sorted by an ORDER BY clause is -a column, then the default collation type of the column is used to -determine sort order. If the expression is not a column, then the BINARY -collation sequence is used. -

    - -

    7.2 Collation Sequences Example

    -

    -The examples below identify the collation sequences that would be used to -determine the results of text comparisons that may be performed by various -SQL statements. Note that a text comparison may not be required, and no -collation sequence used, in the case of numeric, blob or NULL values. -

    -
    -
    -CREATE TABLE t1(
    -    a,                 -- default collation type BINARY
    -    b COLLATE BINARY,  -- default collation type BINARY
    -    c COLLATE REVERSE, -- default collation type REVERSE
    -    d COLLATE NOCASE   -- default collation type NOCASE
    -);
    -
    --- Text comparison is performed using the BINARY collation sequence.
    -SELECT (a = b) FROM t1;
    -
    --- Text comparison is performed using the NOCASE collation sequence.
    -SELECT (d = a) FROM t1;
    -
    --- Text comparison is performed using the BINARY collation sequence.
    -SELECT (a = d) FROM t1;
    -
    --- Text comparison is performed using the REVERSE collation sequence.
    -SELECT ('abc' = c) FROM t1;
    -
    --- Text comparison is performed using the REVERSE collation sequence.
    -SELECT (c = 'abc') FROM t1;
    -
    --- Grouping is performed using the NOCASE collation sequence (i.e. values
    --- 'abc' and 'ABC' are placed in the same group).
    -SELECT count(*) GROUP BY d FROM t1;
    -
    --- Grouping is performed using the BINARY collation sequence.
    -SELECT count(*) GROUP BY (d || '') FROM t1;
    -
    --- Sorting is performed using the REVERSE collation sequence.
    -SELECT * FROM t1 ORDER BY c;
    -
    --- Sorting is performed using the BINARY collation sequence.
    -SELECT * FROM t1 ORDER BY (c || '');
    -
    --- Sorting is performed using the NOCASE collation sequence.
    -SELECT * FROM t1 ORDER BY c COLLATE NOCASE;
    -
    -
    -
    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/datatypes.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/datatypes.html --- sqlite3-3.4.2/www/datatypes.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/datatypes.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,311 @@ + + +Datatypes In SQLite version 2 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Datatypes In SQLite Version 2

    + +

    1.0   Typelessness

    +

    +SQLite is "typeless". This means that you can store any +kind of data you want in any column of any table, regardless of the +declared datatype of that column. +(See the one exception to this rule in section 2.0 below.) +This behavior is a feature, not +a bug. A database is suppose to store and retrieve data and it +should not matter to the database what format that data is in. +The strong typing system found in most other SQL engines and +codified in the SQL language spec is a misfeature - +it is an example of the implementation showing through into the +interface. SQLite seeks to overcome this misfeature by allowing +you to store any kind of data into any kind of column and by +allowing flexibility in the specification of datatypes. +

    + +

    +A datatype to SQLite is any sequence of zero or more names +optionally followed by a parenthesized lists of one or two +signed integers. Notice in particular that a datatype may +be zero or more names. That means that an empty +string is a valid datatype as far as SQLite is concerned. +So you can declare tables where the datatype of each column +is left unspecified, like this: +

    + +
    +CREATE TABLE ex1(a,b,c);
    +
    + +

    +Even though SQLite allows the datatype to be omitted, it is +still a good idea to include it in your CREATE TABLE statements, +since the data type often serves as a good hint to other +programmers about what you intend to put in the column. And +if you ever port your code to another database engine, that +other engine will probably require a datatype of some kind. +SQLite accepts all the usual datatypes. For example: +

    + +
    +CREATE TABLE ex2(
    +  a VARCHAR(10),
    +  b NVARCHAR(15),
    +  c TEXT,
    +  d INTEGER,
    +  e FLOAT,
    +  f BOOLEAN,
    +  g CLOB,
    +  h BLOB,
    +  i TIMESTAMP,
    +  j NUMERIC(10,5)
    +  k VARYING CHARACTER (24),
    +  l NATIONAL VARYING CHARACTER(16)
    +);
    +
    + +

    +And so forth. Basically any sequence of names optionally followed by +one or two signed integers in parentheses will do. +

    + +

    2.0   The INTEGER PRIMARY KEY

    + +

    +One exception to the typelessness of SQLite is a column whose type +is INTEGER PRIMARY KEY. (And you must use "INTEGER" not "INT". +A column of type INT PRIMARY KEY is typeless just like any other.) +INTEGER PRIMARY KEY columns must contain a 32-bit signed integer. Any +attempt to insert non-integer data will result in an error. +

    + +

    +INTEGER PRIMARY KEY columns can be used to implement the equivalent +of AUTOINCREMENT. If you try to insert a NULL into an INTEGER PRIMARY +KEY column, the column will actually be filled with a integer that is +one greater than the largest key already in the table. Or if the +largest key is 2147483647, then the column will be filled with a +random integer. Either way, the INTEGER PRIMARY KEY column will be +assigned a unique integer. You can retrieve this integer using +the sqlite_last_insert_rowid() API function or using the +last_insert_rowid() SQL function in a subsequent SELECT statement. +

    + +

    3.0   Comparison and Sort Order

    + +

    +SQLite is typeless for the purpose of deciding what data is allowed +to be stored in a column. But some notion of type comes into play +when sorting and comparing data. For these purposes, a column or +an expression can be one of two types: numeric and text. +The sort or comparison may give different results depending on which +type of data is being sorted or compared. +

    + +

    +If data is of type text then the comparison is determined by +the standard C data comparison functions memcmp() or +strcmp(). The comparison looks at bytes from two inputs one +by one and returns the first non-zero difference. +Strings are '\000' terminated so shorter +strings sort before longer strings, as you would expect. +

    + +

    +For numeric data, this situation is more complex. If both inputs +look like well-formed numbers, then they are converted +into floating point values using atof() and compared numerically. +If one input is not a well-formed number but the other is, then the +number is considered to be less than the non-number. If neither inputs +is a well-formed number, then strcmp() is used to do the +comparison. +

    + +

    +Do not be confused by the fact that a column might have a "numeric" +datatype. This does not mean that the column can contain only numbers. +It merely means that if the column does contain a number, that number +will sort in numerical order. +

    + +

    +For both text and numeric values, NULL sorts before any other value. +A comparison of any value against NULL using operators like "<" or +">=" is always false. +

    + +

    4.0   How SQLite Determines Datatypes

    + +

    +For SQLite version 2.6.3 and earlier, all values used the numeric datatype. +The text datatype appears in version 2.7.0 and later. In the sequel it +is assumed that you are using version 2.7.0 or later of SQLite. +

    + +

    +For an expression, the datatype of the result is often determined by +the outermost operator. For example, arithmetic operators ("+", "*", "%") +always return a numeric results. The string concatenation operator +("||") returns a text result. And so forth. If you are ever in doubt +about the datatype of an expression you can use the special typeof() +SQL function to determine what the datatype is. For example: +

    + +
    +sqlite> SELECT typeof('abc'+123);
    +numeric
    +sqlite> SELECT typeof('abc'||123);
    +text
    +
    + +

    +For table columns, the datatype is determined by the type declaration +of the CREATE TABLE statement. The datatype is text if and only if +the type declaration contains one or more of the following strings: +

    + +
    +BLOB
    +CHAR
    +CLOB
    +TEXT +
    + +

    +The search for these strings in the type declaration is case insensitive, +of course. If any of the above strings occur anywhere in the type +declaration, then the datatype of the column is text. Notice that +the type "VARCHAR" contains "CHAR" as a substring so it is considered +text.

    + +

    If none of the strings above occur anywhere in the type declaration, +then the datatype is numeric. Note in particular that the datatype for columns +with an empty type declaration is numeric. +

    + +

    5.0   Examples

    + +

    +Consider the following two command sequences: +

    + +
    +CREATE TABLE t1(a INTEGER UNIQUE);        CREATE TABLE t2(b TEXT UNIQUE);
    +INSERT INTO t1 VALUES('0');               INSERT INTO t2 VALUES(0);
    +INSERT INTO t1 VALUES('0.0');             INSERT INTO t2 VALUES(0.0);
    +
    + +

    In the sequence on the left, the second insert will fail. In this case, +the strings '0' and '0.0' are treated as numbers since they are being +inserted into a numeric column but 0==0.0 which violates the uniqueness +constraint. However, the second insert in the right-hand sequence works. In +this case, the constants 0 and 0.0 are treated a strings which means that +they are distinct.

    + +

    SQLite always converts numbers into double-precision (64-bit) floats +for comparison purposes. This means that a long sequence of digits that +differ only in insignificant digits will compare equal if they +are in a numeric column but will compare unequal if they are in a text +column. We have:

    + +
    +INSERT INTO t1                            INSERT INTO t2
    +   VALUES('12345678901234567890');           VALUES(12345678901234567890);
    +INSERT INTO t1                            INSERT INTO t2
    +   VALUES('12345678901234567891');           VALUES(12345678901234567891);
    +
    + +

    As before, the second insert on the left will fail because the comparison +will convert both strings into floating-point number first and the only +difference in the strings is in the 20-th digit which exceeds the resolution +of a 64-bit float. In contrast, the second insert on the right will work +because in that case, the numbers being inserted are strings and are +compared using memcmp().

    + +

    +Numeric and text types make a difference for the DISTINCT keyword too: +

    + +
    +CREATE TABLE t3(a INTEGER);               CREATE TABLE t4(b TEXT);
    +INSERT INTO t3 VALUES('0');               INSERT INTO t4 VALUES(0);
    +INSERT INTO t3 VALUES('0.0');             INSERT INTO t4 VALUES(0.0);
    +SELECT DISTINCT * FROM t3;                SELECT DISTINCT * FROM t4;
    +
    + +

    +The SELECT statement on the left returns a single row since '0' and '0.0' +are treated as numbers and are therefore indistinct. But the SELECT +statement on the right returns two rows since 0 and 0.0 are treated +a strings which are different.

    +
    +This page last modified 2008/10/28 18:18:52 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/datatypes.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/datatypes.tcl --- sqlite3-3.4.2/www/datatypes.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/datatypes.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,243 +0,0 @@ -# -# Run this script to generated a datatypes.html output file -# -set rcsid {$Id: datatypes.tcl,v 1.8 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {Datatypes In SQLite version 2} -puts { -

    Datatypes In SQLite Version 2

    - -

    1.0   Typelessness

    -

    -SQLite is "typeless". This means that you can store any -kind of data you want in any column of any table, regardless of the -declared datatype of that column. -(See the one exception to this rule in section 2.0 below.) -This behavior is a feature, not -a bug. A database is suppose to store and retrieve data and it -should not matter to the database what format that data is in. -The strong typing system found in most other SQL engines and -codified in the SQL language spec is a misfeature - -it is an example of the implementation showing through into the -interface. SQLite seeks to overcome this misfeature by allowing -you to store any kind of data into any kind of column and by -allowing flexibility in the specification of datatypes. -

    - -

    -A datatype to SQLite is any sequence of zero or more names -optionally followed by a parenthesized lists of one or two -signed integers. Notice in particular that a datatype may -be zero or more names. That means that an empty -string is a valid datatype as far as SQLite is concerned. -So you can declare tables where the datatype of each column -is left unspecified, like this: -

    - -
    -CREATE TABLE ex1(a,b,c);
    -
    - -

    -Even though SQLite allows the datatype to be omitted, it is -still a good idea to include it in your CREATE TABLE statements, -since the data type often serves as a good hint to other -programmers about what you intend to put in the column. And -if you ever port your code to another database engine, that -other engine will probably require a datatype of some kind. -SQLite accepts all the usual datatypes. For example: -

    - -
    -CREATE TABLE ex2(
    -  a VARCHAR(10),
    -  b NVARCHAR(15),
    -  c TEXT,
    -  d INTEGER,
    -  e FLOAT,
    -  f BOOLEAN,
    -  g CLOB,
    -  h BLOB,
    -  i TIMESTAMP,
    -  j NUMERIC(10,5)
    -  k VARYING CHARACTER (24),
    -  l NATIONAL VARYING CHARACTER(16)
    -);
    -
    - -

    -And so forth. Basically any sequence of names optionally followed by -one or two signed integers in parentheses will do. -

    - -

    2.0   The INTEGER PRIMARY KEY

    - -

    -One exception to the typelessness of SQLite is a column whose type -is INTEGER PRIMARY KEY. (And you must use "INTEGER" not "INT". -A column of type INT PRIMARY KEY is typeless just like any other.) -INTEGER PRIMARY KEY columns must contain a 32-bit signed integer. Any -attempt to insert non-integer data will result in an error. -

    - -

    -INTEGER PRIMARY KEY columns can be used to implement the equivalent -of AUTOINCREMENT. If you try to insert a NULL into an INTEGER PRIMARY -KEY column, the column will actually be filled with a integer that is -one greater than the largest key already in the table. Or if the -largest key is 2147483647, then the column will be filled with a -random integer. Either way, the INTEGER PRIMARY KEY column will be -assigned a unique integer. You can retrieve this integer using -the sqlite_last_insert_rowid() API function or using the -last_insert_rowid() SQL function in a subsequent SELECT statement. -

    - -

    3.0   Comparison and Sort Order

    - -

    -SQLite is typeless for the purpose of deciding what data is allowed -to be stored in a column. But some notion of type comes into play -when sorting and comparing data. For these purposes, a column or -an expression can be one of two types: numeric and text. -The sort or comparison may give different results depending on which -type of data is being sorted or compared. -

    - -

    -If data is of type text then the comparison is determined by -the standard C data comparison functions memcmp() or -strcmp(). The comparison looks at bytes from two inputs one -by one and returns the first non-zero difference. -Strings are '\000' terminated so shorter -strings sort before longer strings, as you would expect. -

    - -

    -For numeric data, this situation is more complex. If both inputs -look like well-formed numbers, then they are converted -into floating point values using atof() and compared numerically. -If one input is not a well-formed number but the other is, then the -number is considered to be less than the non-number. If neither inputs -is a well-formed number, then strcmp() is used to do the -comparison. -

    - -

    -Do not be confused by the fact that a column might have a "numeric" -datatype. This does not mean that the column can contain only numbers. -It merely means that if the column does contain a number, that number -will sort in numerical order. -

    - -

    -For both text and numeric values, NULL sorts before any other value. -A comparison of any value against NULL using operators like "<" or -">=" is always false. -

    - -

    4.0   How SQLite Determines Datatypes

    - -

    -For SQLite version 2.6.3 and earlier, all values used the numeric datatype. -The text datatype appears in version 2.7.0 and later. In the sequel it -is assumed that you are using version 2.7.0 or later of SQLite. -

    - -

    -For an expression, the datatype of the result is often determined by -the outermost operator. For example, arithmetic operators ("+", "*", "%") -always return a numeric results. The string concatenation operator -("||") returns a text result. And so forth. If you are ever in doubt -about the datatype of an expression you can use the special typeof() -SQL function to determine what the datatype is. For example: -

    - -
    -sqlite> SELECT typeof('abc'+123);
    -numeric
    -sqlite> SELECT typeof('abc'||123);
    -text
    -
    - -

    -For table columns, the datatype is determined by the type declaration -of the CREATE TABLE statement. The datatype is text if and only if -the type declaration contains one or more of the following strings: -

    - -
    -BLOB
    -CHAR
    -CLOB
    -TEXT -
    - -

    -The search for these strings in the type declaration is case insensitive, -of course. If any of the above strings occur anywhere in the type -declaration, then the datatype of the column is text. Notice that -the type "VARCHAR" contains "CHAR" as a substring so it is considered -text.

    - -

    If none of the strings above occur anywhere in the type declaration, -then the datatype is numeric. Note in particular that the datatype for columns -with an empty type declaration is numeric. -

    - -

    5.0   Examples

    - -

    -Consider the following two command sequences: -

    - -
    -CREATE TABLE t1(a INTEGER UNIQUE);        CREATE TABLE t2(b TEXT UNIQUE);
    -INSERT INTO t1 VALUES('0');               INSERT INTO t2 VALUES(0);
    -INSERT INTO t1 VALUES('0.0');             INSERT INTO t2 VALUES(0.0);
    -
    - -

    In the sequence on the left, the second insert will fail. In this case, -the strings '0' and '0.0' are treated as numbers since they are being -inserted into a numeric column but 0==0.0 which violates the uniqueness -constraint. However, the second insert in the right-hand sequence works. In -this case, the constants 0 and 0.0 are treated a strings which means that -they are distinct.

    - -

    SQLite always converts numbers into double-precision (64-bit) floats -for comparison purposes. This means that a long sequence of digits that -differ only in insignificant digits will compare equal if they -are in a numeric column but will compare unequal if they are in a text -column. We have:

    - -
    -INSERT INTO t1                            INSERT INTO t2
    -   VALUES('12345678901234567890');           VALUES(12345678901234567890);
    -INSERT INTO t1                            INSERT INTO t2
    -   VALUES('12345678901234567891');           VALUES(12345678901234567891);
    -
    - -

    As before, the second insert on the left will fail because the comparison -will convert both strings into floating-point number first and the only -difference in the strings is in the 20-th digit which exceeds the resolution -of a 64-bit float. In contrast, the second insert on the right will work -because in that case, the numbers being inserted are strings and are -compared using memcmp().

    - -

    -Numeric and text types make a difference for the DISTINCT keyword too: -

    - -
    -CREATE TABLE t3(a INTEGER);               CREATE TABLE t4(b TEXT);
    -INSERT INTO t3 VALUES('0');               INSERT INTO t4 VALUES(0);
    -INSERT INTO t3 VALUES('0.0');             INSERT INTO t4 VALUES(0.0);
    -SELECT DISTINCT * FROM t3;                SELECT DISTINCT * FROM t4;
    -
    - -

    -The SELECT statement on the left returns a single row since '0' and '0.0' -are treated as numbers and are therefore indistinct. But the SELECT -statement on the right returns two rows since 0 and 0.0 are treated -a strings which are different.

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/different.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/different.html --- sqlite3-3.4.2/www/different.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/different.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,308 @@ + + +Distinctive Features Of SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +This page highlights some of the characteristics of SQLite that are +unusual and which make SQLite different from many other SQL +database engines. +

    + + +

    Zero-Configuration

    +
    + SQLite does not need to be "installed" before it is used. + There is no "setup" procedure. There is no + server process that needs to be started, stopped, or configured. + There is + no need for an administrator to create a new database instance or assign + access permissions to users. + SQLite uses no configuration files. + Nothing needs to be done to tell the system that SQLite is running. + No actions are required to recover after a system crash or power failure. + There is nothing to troubleshoot. +

    + SQLite just works. +

    + Other more familiar database engines run great once you get them going. + But doing the initial installation and configuration can be + intimidatingly complex. +

    + +

    Serverless

    +
    + Most SQL database engines are implemented as a separate server + process. Programs that want to access the database communicate + with the server using some kind of interprocess communication + (typically TCP/IP) to send requests to the server and to receive + back results. SQLite does not work this way. With SQLite, the + process that wants to access the database reads and writes + directly from the database files on disk. There is no intermediary + server process. +

    + There are advantages and disadvantages to being serverless. The + main advantage is that there is no separate server process + to install, setup, configure, initialize, manage, and troubleshoot. + This is one reason why SQLite is a "zero-configuration" database + engine. Programs that use SQLite require no administrative support + for setting up the database engine before they are run. Any program + that is able to access the disk is able to use an SQLite database. +

    + On the other hand, a database engine that uses a server can provide + better protection from bugs in the client application - stray pointers + in a client cannot corrupt memory on the server. And because a server + is a single persistent process, it is able control database access with + more precision, allowing for finer grain locking and better concurrency. +

    + Most SQL database engines are client/server based. Of those that are + serverless, SQLite is the only one that this author knows of that + allows multiple applications to access the same database at the same time. +

    + +

    Single Database File

    +
    + An SQLite database is a single ordinary disk file that can be located + anywhere in the directory hierarchy. If SQLite can read + the disk file then it can read anything in the database. If the disk + file and its directory are writable, then SQLite can change anything + in the database. Database files can easily be copied onto a USB + memory stick or emailed for sharing. +

    + Other SQL database engines tend to store data as a large collection of + files. Often these files are in a standard location that only the + database engine itself can access. This makes the data more secure, + but also makes it harder to access. Some SQL database engines provide + the option of writing directly to disk and bypassing the filesystem + all together. This provides added performance, but at the cost of + considerable setup and maintenance complexity. +

    + +

    Stable Cross-Platform Database File

    +
    + The SQLite file format is cross-platform. A database file written + on one machine can be copied to and used on a different machine with + a different architecture. Big-endian or little-endian, 32-bit or + 64-bit does not matter. All machines use the same file format. + Furthermore, the developers have pledged to keep the file format + stable and backwards compatible, so newer versions of SQLite can + read and write older database files. +

    + Most other SQL database engines require you to dump and restore + the database when moving from one platform to another and often + when upgrading to a newer version of the software. +

    + +

    Compact

    +
    + When optimized for size, the whole SQLite library with everything enabled + is less than 225KiB in size (as measured on an ix86 using the "size" + utility from the GNU compiler suite.) Unneeded features can be disabled + at compile-time to further reduce the size of the library to under + 170KiB if desired. +

    + Most other SQL database engines are much larger than this. IBM boasts + that its recently released CloudScape database engine is "only" a 2MiB + jar file - 10 times larger than SQLite even after it is compressed! + Firebird boasts that its client-side library is only 350KiB. That's + 50% larger than SQLite and does not even contain the database engine. + The Berkeley DB library from Sleepycat is 450KiB and it omits SQL + support, providing the programmer with only simple key/value pairs. +

    + +

    Manifest typing

    +
    + Most SQL database engines use static typing. A datatype is associated + with each column in a table and only values of that particular datatype + are allowed to be stored in that column. SQLite relaxes this restriction + by using manifest typing. + In manifest typing, the datatype is a property of the value itself, not + of the column in which the value is stored. + SQLite thus allows the user to store + any value of any datatype into any column regardless of the declared type + of that column. (There are some exceptions to this rule: An INTEGER + PRIMARY KEY column may only store integers. And SQLite attempts to coerce + values into the declared datatype of the column when it can.) +

    + As far as we can tell, the SQL language specification allows the use + of manifest typing. Nevertheless, most other SQL database engines are + statically typed and so some people + feel that the use of manifest typing is a bug in SQLite. But the authors + of SQLite feel very strongly that this is a feature. The use of manifest + typing in SQLite is a deliberate design decision which has proven in practice + to make SQLite more reliable and easier to use, especially when used in + combination with dynamically typed programming languages such as Tcl and + Python. +

    + +

    Variable-length records

    +
    + Most other SQL database engines allocated a fixed amount of disk space + for each row in most tables. They play special tricks for handling + BLOBs and CLOBs which can be of wildly varying length. But for most + tables, if you declare a column to be a VARCHAR(100) then the database + engine will allocate + 100 bytes of disk space regardless of how much information you actually + store in that column. +

    + SQLite, in contrast, use only the amount of disk space actually + needed to store the information in a row. If you store a single + character in a VARCHAR(100) column, then only a single byte of disk + space is consumed. (Actually two bytes - there is some overhead at + the beginning of each column to record its datatype and length.) +

    + The use of variable-length records by SQLite has a number of advantages. + It results in smaller database files, obviously. It also makes the + database run faster, since there is less information to move to and from + disk. And, the use of variable-length records makes it possible for + SQLite to employ manifest typing instead of static typing. +

    + +

    Readable source code

    +
    + The source code to SQLite is designed to be readable and accessible to + the average programmer. All procedures and data structures and many + automatic variables are carefully commented with useful information about + what they do. Boilerplate commenting is omitted. +
    + +

    SQL statements compile into virtual machine code

    +
    + Every SQL database engine compiles each SQL statement into some kind of + internal data structure which is then used to carry out the work of the + statement. But in most SQL engines that internal data structure is a + complex web of interlinked structures and objects. In SQLite, the compiled + form of statements is a short program in a machine-language like + representation. Users of the database can view this + virtual machine language + by prepending the EXPLAIN keyword + to a query. +

    + The use of a virtual machine in SQLite has been a great benefit to the + library's development. The virtual machine provides a crisp, well-defined + junction between the front-end of SQLite (the part that parses SQL + statements and generates virtual machine code) and the back-end (the + part that executes the virtual machine code and computes a result.) + The virtual machine allows the developers to see clearly and in an + easily readable form what SQLite is trying to do with each statement + it compiles, which is a tremendous help in debugging. + Depending on how it is compiled, SQLite also has the capability of + tracing the execution of the virtual machine - printing each + virtual machine instruction and its result as it executes. +

    + +

    Public domain

    +
    + The source code for SQLite is in the public domain. No claim of copyright + is made on any part of the core source code. (The documentation and test + code is a different matter - some sections of documentation and test logic + are governed by open-source licenses.) All contributors to the + SQLite core software have signed affidavits specifically disavowing any + copyright interest in the code. This means that anybody is able to legally + do anything they want with the SQLite source code. +

    + There are other SQL database engines with liberal licenses that allow + the code to be broadly and freely used. But those other engines are + still governed by copyright law. SQLite is different in that copyright + law simply does not apply. +

    + The source code files for other SQL database engines typically begin + with a comment describing your license rights to view and copy that file. + The SQLite source code contains no license since it is not governed by + copyright. Instead of a license, the SQLite source code offers a blessing: +

    + May you do good and not evil
    + May you find forgiveness for yourself and forgive others
    + May you share freely, never taking more than you give.
    +
    +
    + +

    SQL language extensions

    +
    + SQLite provides a number of enhancements to the SQL language + not normally found in other database engines. + The EXPLAIN keyword and manifest typing have already been mentioned + above. SQLite also provides statements such as + REPLACE and the + ON CONFLICT clause that allow for + added control over the resolution of constraint conflicts. + SQLite supports ATTACH and + DETACH commands that allow multiple + independent databases to be used together in the same query. + And SQLite defines APIs that allows the user to add new + SQL functions + and collating sequences. +
    + +
    +This page last modified 2008/03/03 14:21:11 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/different.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/different.tcl --- sqlite3-3.4.2/www/different.tcl 2007-06-12 12:39:39.000000000 +0100 +++ sqlite3-3.6.16/www/different.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,224 +0,0 @@ -set rcsid {$Id: different.tcl,v 1.8 2006/12/18 14:12:21 drh Exp $} -source common.tcl -header {Distinctive Features Of SQLite} -puts { -

    -This page highlights some of the characteristics of SQLite that are -unusual and which make SQLite different from many other SQL -database engines. -

    -} -proc feature {tag name text} { - puts "" - puts "

    $name

    \n" - puts "
    $text
    \n" -} - -feature zeroconfig {Zero-Configuration} { - SQLite does not need to be "installed" before it is used. - There is no "setup" procedure. There is no - server process that needs to be started, stopped, or configured. - There is - no need for an administrator to create a new database instance or assign - access permissions to users. - SQLite uses no configuration files. - Nothing needs to be done to tell the system that SQLite is running. - No actions are required to recover after a system crash or power failure. - There is nothing to troubleshoot. -

    - SQLite just works. -

    - Other more familiar database engines run great once you get them going. - But doing the initial installation and configuration can be - intimidatingly complex. -} - -feature serverless {Serverless} { - Most SQL database engines are implemented as a separate server - process. Programs that want to access the database communicate - with the server using some kind of interprocess communcation - (typically TCP/IP) to send requests to the server and to receive - back results. SQLite does not work this way. With SQLite, the - process that wants to access the database reads and writes - directly from the database files on disk. There is no intermediary - server process. -

    - There are advantages and disadvantages to being serverless. The - main advantage is that there is no separate server process - to install, setup, configure, initialize, manage, and troubleshoot. - This is one reason why SQLite is a "zero-configuration" database - engine. Programs that use SQLite require no administrative support - for setting up the database engine before they are run. Any program - that is able to access the disk is able to use an SQLite database. -

    - On the other hand, a database engine that uses a server can provide - better protection from bugs in the client application - stray pointers - in a client cannot corrupt memory on the server. And because a server - is a single persistent process, it is able control database access with - more precision, allowing for finer grain locking and better concurrancy. -

    - Most SQL database engines are client/server based. Of those that are - serverless, SQLite is the only one that this author knows of that - allows multiple applications to access the same database at the same time. -} - -feature onefile {Single Database File} { - An SQLite database is a single ordinary disk file that can be located - anywhere in the directory hierarchy. If SQLite can read - the disk file then it can read anything in the database. If the disk - file and its directory are writable, then SQLite can change anything - in the database. Database files can easily be copied onto a USB - memory stick or emailed for sharing. -

    - Other SQL database engines tend to store data as a large collection of - files. Often these files are in a standard location that only the - database engine itself can access. This makes the data more secure, - but also makes it harder to access. Some SQL database engines provide - the option of writing directly to disk and bypassing the filesystem - all together. This provides added performance, but at the cost of - considerable setup and maintenance complexity. -} - -feature small {Compact} { - When optimized for size, the whole SQLite library with everything enabled - is less than 225KiB in size (as measured on an ix86 using the "size" - utility from the GNU compiler suite.) Unneeded features can be disabled - at compile-time to further reduce the size of the library to under - 170KiB if desired. -

    - Most other SQL database engines are much larger than this. IBM boasts - that it's recently released CloudScape database engine is "only" a 2MiB - jar file - 10 times larger than SQLite even after it is compressed! - Firebird boasts that it's client-side library is only 350KiB. That's - 50% larger than SQLite and does not even contain the database engine. - The Berkeley DB library from Sleepycat is 450KiB and it omits SQL - support, providing the programmer with only simple key/value pairs. -} - -feature typing {Manifest typing} { - Most SQL database engines use static typing. A datatype is associated - with each column in a table and only values of that particular datatype - are allowed to be stored in that column. SQLite relaxes this restriction - by using manifest typing. - In manifest typing, the datatype is a property of the value itself, not - of the column in which the value is stored. - SQLite thus allows the user to store - any value of any datatype into any column regardless of the declared type - of that column. (There are some exceptions to this rule: An INTEGER - PRIMARY KEY column may only store integers. And SQLite attempts to coerce - values into the declared datatype of the column when it can.) -

    - As far as we can tell, the SQL language specification allows the use - of manifest typing. Nevertheless, most other SQL database engines are - statically typed and so some people - feel that the use of manifest typing is a bug in SQLite. But the authors - of SQLite feel very strongly that this is a feature. The use of manifest - typing in SQLite is a deliberate design decision which has proven in practice - to make SQLite more reliable and easier to use, especially when used in - combination with dynamically typed programming languages such as Tcl and - Python. -} - -feature flex {Variable-length records} { - Most other SQL database engines allocated a fixed amount of disk space - for each row in most tables. They play special tricks for handling - BLOBs and CLOBs which can be of wildly varying length. But for most - tables, if you declare a column to be a VARCHAR(100) then the database - engine will allocate - 100 bytes of disk space regardless of how much information you actually - store in that column. -

    - SQLite, in contrast, use only the amount of disk space actually - needed to store the information in a row. If you store a single - character in a VARCHAR(100) column, then only a single byte of disk - space is consumed. (Actually two bytes - there is some overhead at - the beginning of each column to record its datatype and length.) -

    - The use of variable-length records by SQLite has a number of advantages. - It results in smaller database files, obviously. It also makes the - database run faster, since there is less information to move to and from - disk. And, the use of variable-length records makes it possible for - SQLite to employ manifest typing instead of static typing. -} - -feature readable {Readable source code} { - The source code to SQLite is designed to be readable and accessible to - the average programmer. All procedures and data structures and many - automatic variables are carefully commented with useful information about - what they do. Boilerplate commenting is omitted. -} - -feature vdbe {SQL statements compile into virtual machine code} { - Every SQL database engine compiles each SQL statement into some kind of - internal data structure which is then used to carry out the work of the - statement. But in most SQL engines that internal data structure is a - complex web of interlinked structures and objects. In SQLite, the compiled - form of statements is a short program in a machine-language like - representation. Users of the database can view this - virtual machine language - by prepending the EXPLAIN keyword - to a query. -

    - The use of a virtual machine in SQLite has been a great benefit to - library's development. The virtual machine provides a crisp, well-defined - junction between the front-end of SQLite (the part that parses SQL - statements and generates virtual machine code) and the back-end (the - part that executes the virtual machine code and computes a result.) - The virtual machine allows the developers to see clearly and in an - easily readable form what SQLite is trying to do with each statement - it compiles, which is a tremendous help in debugging. - Depending on how it is compiled, SQLite also has the capability of - tracing the execution of the virtual machine - printing each - virtual machine instruction and its result as it executes. -} - -#feature binding {Tight bindings to dynamic languages} { -# Because it is embedded, SQLite can have a much tighter and more natural -# binding to high-level dynamic languages such as Tcl, Perl, Python, -# PHP, and Ruby. -# For example, -#} - -feature license {Public domain} { - The source code for SQLite is in the public domain. No claim of copyright - is made on any part of the core source code. (The documentation and test - code is a different matter - some sections of documentation and test logic - are governed by open-sources licenses.) All contributors to the - SQLite core software have signed affidavits specifically disavowing any - copyright interest in the code. This means that anybody is able to legally - do anything they want with the SQLite source code. -

    - There are other SQL database engines with liberal licenses that allow - the code to be broadly and freely used. But those other engines are - still governed by copyright law. SQLite is different in that copyright - law simply does not apply. -

    - The source code files for other SQL database engines typically begin - with a comment describing your license rights to view and copy that file. - The SQLite source code contains no license since it is not governed by - copyright. Instead of a license, the SQLite source code offers a blessing: -

    - May you do good and not evil
    - May you find forgiveness for yourself and forgive others
    - May you share freely, never taking more than you give.
    -
    -} - -feature extensions {SQL language extensions} { - SQLite provides a number of enhancements to the SQL language - not normally found in other database engines. - The EXPLAIN keyword and manifest typing have already been mentioned - above. SQLite also provides statements such as - REPLACE and the - ON CONFLICT clause that allow for - added control over the resolution of constraint conflicts. - SQLite supports ATTACH and - DETACH commands that allow multiple - independent databases to be used together in the same query. - And SQLite defines APIs that allows the user to add new - SQL functions - and collating sequences. -} - - -footer $rcsid Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/direct1b.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/direct1b.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/doc_backlink_crossref.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/doc_backlink_crossref.html --- sqlite3-3.4.2/www/doc_backlink_crossref.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/doc_backlink_crossref.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,75 @@ + + +Backlink Crossreference + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +
    +This page last modified 2009/06/08 13:38:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/doc_keyword_crossref.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/doc_keyword_crossref.html --- sqlite3-3.4.2/www/doc_keyword_crossref.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/doc_keyword_crossref.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,75 @@ + + +Keyword Crossreference + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +
    +This page last modified 2009/06/08 13:38:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/doc_pagelink_crossref.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/doc_pagelink_crossref.html --- sqlite3-3.4.2/www/doc_pagelink_crossref.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/doc_pagelink_crossref.html 2009-06-27 15:07:46.000000000 +0100 @@ -0,0 +1,75 @@ + + +Pagelink Crossreference + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    Target Page - Which pages reference it.

    Pages matching (news|changes|releaselog|[0-9]to[0-9]|^doc_.*_crossref) are skipped.


    +This page last modified 2009/06/08 13:38:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/docs.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/docs.html --- sqlite3-3.4.2/www/docs.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/docs.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,217 @@ + + +SQLite Documentation + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Available Documentation

    + + + +
    Appropriate Uses For SQLite + This document describes situations where SQLite is an appropriate + database engine to use versus situations where a client/server + database engine might be a better choice. +
    Distinctive Features + This document enumerates and describes some of the features of + SQLite that make it different from other SQL database engines. +
    How SQLite Is Tested + The reliability and robustness of SQLite is achieved in large part + by thorough and careful testing. This document identifies the + many tests that occur before every release of SQLite. +
    Copyright + SQLite is in the public domain. This document describes what that means + and the implications for contributors. +
    Frequently Asked Questions + The title of the document says all... +
    SQLite Programming Interfaces
    + Documentation describing the APIs used to program SQLite, and the SQL + dialect that it interprets. +
    SQLite In 5 Minutes Or Less + A very quick introduction to programming with SQLite. +
    Introduction to the C/C++ API  + This document introduces the C/C++ API. Users should read this document + before the C/C++ API Reference Guide linked below. +
    C/C++ API Reference + This document describes each API function separately. +
    Tcl API + A description of the TCL interface bindings for SQLite. +
    SQL Syntax + This document describes the SQL language that is understood by + SQLite. +
    Pragma commands + This document describes SQLite performance tuning options and other + special purpose database commands. +
    Version 3 DataTypes  + SQLite version 3 introduces the concept of manifest typing, where the + type of a value is associated with the value itself, not the column that + it is stored in. + This page describes data typing for SQLite version 3 in further detail. +
    Null Handling + Different SQL database engines handle NULLs in different ways. The + SQL standards are ambiguous. This document describes how SQLite handles + NULLs in comparison with other SQL database engines. +
    Unsupported SQL + This page describes features of SQL that SQLite does not support. +
    SQLite Features and Extensions
    + Pages describing specific features or extension modules of SQLite. +
    Using The Online Backup Interface + The online-backup interface can be used to + copy content from a disk file into an in-memory database or vice + versa and it can make a hot backup of a live database. This application + note gives examples of how. +
    Sharing Cache Mode + Version 3.3.0 and later supports the ability for two or more + database connections to share the same page and schema cache. + This feature is useful for certain specialized applications. +
    Unlock Notify + The "unlock notify" feature can be used in conjunction with shared + cache mode to more efficiently manage resource conflict (database + table locks). +
    Asynchronous IO Mode + This page describes the asynchronous IO extension developed alongside + SQLite. Using asynchronous IO can cause SQLite to appear more responsive + by delegating database writes to a background thread. +
    Virtual R-Tree Tables + A description of the SQLite R-Tree extension. An R-Tree is a specialized + data structure that supports fast multi-dimensional range queries often + used in geo-spatial systems. +
    Upgrading SQLite, Backwards Compatibility
    Moving From SQLite 3.5 to 3.6 + A document describing the differences between SQLite version 3.5.9 + and 3.6.0. +
    Moving From SQLite 3.4 to 3.5 + A document describing the differences between SQLite version 3.4.2 + and 3.5.0. +
    Release History + A chronology of SQLite releases going back to version 1.0.0 +
    Backwards Compatibility + This document details all of the incompatible changes to the SQLite + file format that have occurred since version 1.0.0. +
    SQLite Technical/Design Documentation
    Temporary Files Used By SQLite + SQLite can potentially use many different temporary files when + processing certain SQL statements. This document describes the + many kinds of temporary files that SQLite uses and offers suggestions + for avoiding them on systems where creating a temporary file is an + expensive operation. +
    How SQLite Implements Atomic Commit + A description of the logic within SQLite that implements + transactions with atomic commit, even in the face of power + failures. +
    Locking And Concurrency
    In SQLite Version 3
    + A description of how the new locking code in version 3 increases + concurrency and decreases the problem of writer starvation. +
    Overview Of The Optimizer + A quick overview of the various query optimizations that are + attempted by the SQLite code generator. +
    Architecture + An architectural overview of the SQLite library, useful for those who want + to hack the code. +
    VDBE Opcodes + This document is an automatically generated description of the various + opcodes that the VDBE understands. Programmers can use this document as + a reference to better understand the output of EXPLAIN listings from + SQLite. +
    SQLite File Format + A description of the format used for SQLite database and journal files, and + other details required to create software to read and write SQLite + databases without using SQLite. +
    Compilation Options + This document describes the compile time options that may be set to + modify the default behavior of the library or omit optional features + in order to reduce binary size. +
    Old Documents
    + These documents either pertain to SQLite version 2 or were written + during the transition period between versions 2 and 3. They are no longer + strictly applicable to recent versions of SQLite, but the information + found within may still be useful. +
    Version 2 C/C++ API + A description of the C/C++ interface bindings for SQLite through version + 2.8 +
    Version 2 DataTypes  + A description of how SQLite version 2 handles SQL datatypes. + Short summary: Everything is a string. +
    VDBE Tutorial + The VDBE is the subsystem within SQLite that does the actual work of + executing SQL statements. This page describes the principles of operation + for the VDBE in SQLite version 2.7. This is essential reading for anyone + who want to modify the SQLite sources. +
    SQLite Version 3 + A summary of of the changes between SQLite version 2.8 and SQLite version 3.0. +
    Version 3 C/C++ API + A summary of of the API related changes between SQLite version 2.8 and + SQLite version 3.0. +
    Speed Comparison + The speed of version 2.7.6 of SQLite is compared against PostgreSQL and + MySQL. +
    +
    +This page last modified 2009/06/02 19:16:23 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/docs.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/docs.tcl --- sqlite3-3.4.2/www/docs.tcl 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/www/docs.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,149 +0,0 @@ -# This script generates the "docs.html" page that describes various -# sources of documentation available for SQLite. -# -set rcsid {$Id: docs.tcl,v 1.14 2006/01/30 16:20:30 drh Exp $} -source common.tcl -header {SQLite Documentation} -puts { -

    Available Documentation

    - -} - -proc doc {name url desc} { - puts {" - puts {} - puts {} -} - -doc {Appropriate Uses For SQLite} {whentouse.html} { - This document describes situations where SQLite is an approriate - database engine to use versus situations where a client/server - database engine might be a better choice. -} - -doc {Distinctive Features} {different.html} { - This document enumerates and describes some of the features of - SQLite that make it different from other SQL database engines. -} - -doc {SQLite In 5 Minutes Or Less} {quickstart.html} { - A very quick introduction to programming with SQLite. -} - -doc {SQL Syntax} {lang.html} { - This document describes the SQL language that is understood by - SQLite. -} -doc {Version 3 C/C++ API
    Reference} {capi3ref.html} { - This document describes each API function separately. -} -doc {Sharing Cache Mode} {sharedcache.html} { - Version 3.3.0 and later supports the ability for two or more - database connections to share the same page and schema cache. - This feature is useful for certain specialized applications. -} -doc {Tcl API} {tclsqlite.html} { - A description of the TCL interface bindings for SQLite. -} - -doc {Pragma commands} {pragma.html} { - This document describes SQLite performance tuning options and other - special purpose database commands. -} -doc {SQLite Version 3} {version3.html} { - A summary of of the changes between SQLite version 2.8 and SQLite version 3.0. -} -doc {Version 3 C/C++ API} {capi3.html} { - A description of the C/C++ interface bindings for SQLite version 3.0.0 - and following. -} -doc {Version 3 DataTypes } {datatype3.html} { - SQLite version 3 introduces the concept of manifest typing, where the - type of a value is associated with the value itself, not the column that - it is stored in. - This page describes data typing for SQLite version 3 in further detail. -} - -doc {Locking And Concurrency
    In SQLite Version 3} {lockingv3.html} { - A description of how the new locking code in version 3 increases - concurrancy and decreases the problem of writer starvation. -} - -doc {Overview Of The Optimizer} {optoverview.html} { - A quick overview of the various query optimizations that are - attempted by the SQLite code generator. -} - - -doc {Null Handling} {nulls.html} { - Different SQL database engines handle NULLs in different ways. The - SQL standards are ambiguous. This document describes how SQLite handles - NULLs in comparison with other SQL database engines. -} - -doc {Copyright} {copyright.html} { - SQLite is in the public domain. This document describes what that means - and the implications for contributors. -} - -doc {Unsupported SQL} {omitted.html} { - This page describes features of SQL that SQLite does not support. -} - -doc {Version 2 C/C++ API} {c_interface.html} { - A description of the C/C++ interface bindings for SQLite through version - 2.8 -} - - -doc {Version 2 DataTypes } {datatypes.html} { - A description of how SQLite version 2 handles SQL datatypes. - Short summary: Everything is a string. -} - -doc {Release History} {changes.html} { - A chronology of SQLite releases going back to version 1.0.0 -} - - -doc {Speed Comparison} {speed.html} { - The speed of version 2.7.6 of SQLite is compared against PostgreSQL and - MySQL. -} - -doc {Architecture} {arch.html} { - An architectural overview of the SQLite library, useful for those who want - to hack the code. -} - -doc {VDBE Tutorial} {vdbe.html} { - The VDBE is the subsystem within SQLite that does the actual work of - executing SQL statements. This page describes the principles of operation - for the VDBE in SQLite version 2.7. This is essential reading for anyone - who want to modify the SQLite sources. -} - -doc {VDBE Opcodes} {opcode.html} { - This document is an automatically generated description of the various - opcodes that the VDBE understands. Programmers can use this document as - a reference to better understand the output of EXPLAIN listings from - SQLite. -} - -doc {Compilation Options} {compile.html} { - This document describes the compile time options that may be set to - modify the default behaviour of the library or omit optional features - in order to reduce binary size. -} - -doc {Backwards Compatibility} {formatchng.html} { - This document details all of the incompatible changes to the SQLite - file format that have occurred since version 1.0.0. -} - -puts {
    } - regsub -all { +} $name {\ } name - puts "$name} - puts $desc - puts {
    } -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/doc_target_crossref.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/doc_target_crossref.html --- sqlite3-3.4.2/www/doc_target_crossref.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/doc_target_crossref.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,75 @@ + + +Target Crossreference + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +
    +This page last modified 2009/06/08 13:38:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/download.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/download.html --- sqlite3-3.4.2/www/download.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/download.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,130 @@ + + +SQLite Download Page + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Download Page

    + + + +
    + + +

    Direct Access To The Sources

    + +

    +All SQLite source code is maintained in a +CVS repository that is +available for read-only access by anyone. You can +interactively view the +repository contents and download individual files +by visiting

    + +
    + +http://www.sqlite.org/cvstrac/dir?d=sqlite. +
    + +

    +To access the repository directly, use the following +commands: +

    + +
    +cvs -d :pserver:anonymous@www.sqlite.org:/sqlite login
    +cvs -d :pserver:anonymous@www.sqlite.org:/sqlite checkout sqlite
    +
    + +

    +When the first command prompts you for a password, enter "anonymous". +

    + +

    +To access the SQLite version 2.8 sources, begin by getting the 3.0 +tree as described above. Then update to the "version_2" branch +as follows: +

    + +
    +cvs update -r version_2
    +
    + +

    The documentation is maintained in a +fossil repository located +at:

    + +
    +http://www.sqlite.org/docsrc +
    +
    +This page last modified 2009/06/02 19:16:32 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/download.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/download.tcl --- sqlite3-3.4.2/www/download.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/download.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,236 +0,0 @@ -# -# Run this TCL script to generate HTML for the download.html file. -# -set rcsid {$Id: download.tcl,v 1.27 2007/05/08 18:30:36 drh Exp $} -source common.tcl -header {SQLite Download Page} - -puts { -

    SQLite Download Page

    - -} - -proc Product {pattern desc} { - regsub {V[23]} $pattern {*} p3 - regsub V2 $pattern {(2[0-9a-z._]+)} pattern - regsub V3 $pattern {(3[0-9a-z._]+)} pattern - set p2 [string map {* .*} $pattern] - set flist [glob -nocomplain $p3] - foreach file [lsort -dict $flist] { - if {![regexp ^$p2\$ $file all version]} continue - regsub -all _ $version . version - set size [file size $file] - set units bytes - if {$size>1024*1024} { - set size [format %.2f [expr {$size/(1024.0*1024.0)}]] - set units MiB - } elseif {$size>1024} { - set size [format %.2f [expr {$size/(1024.0)}]] - set units KiB - } - puts "" - puts "" - puts "" - regsub -all VERSION $desc $version d2 - puts "" - } -} -cd doc - -proc Heading {title} { - puts "" -} - -Heading {Precompiled Binaries for Linux} - -Product sqlite3-V3.bin.gz { - A command-line program for accessing and modifying - SQLite version 3.* databases. - See the documentation for additional information. -} - -Product sqlite-V3.bin.gz { - A command-line program for accessing and modifying - SQLite databases. - See the documentation for additional information. -} - -Product tclsqlite-V3.so.gz { - Bindings for Tcl/Tk. - You can import this shared library into either - tclsh or wish to get SQLite database access from Tcl/Tk. - See the documentation for details. -} - -Product sqlite-V3.so.gz { - A precompiled shared-library for Linux without the TCL bindings. -} - -Product fts1-V3.so.gz { - A precompiled - FTS1 Module - for Linux. -} - -Product fts2-V3.so.gz { - A precompiled - FTS2 Module - for Linux. -} - -Product sqlite-devel-V3.i386.rpm { - RPM containing documentation, header files, and static library for - SQLite version VERSION. -} -Product sqlite-V3-1.i386.rpm { - RPM containing shared libraries and the sqlite command-line - program for SQLite version VERSION. -} - -Product sqlite*_analyzer-V3.bin.gz { - An analysis program for database files compatible with SQLite - version VERSION and later. -} - -Heading {Precompiled Binaries For Windows} - -Product sqlite-V3.zip { - A command-line program for accessing and modifing SQLite databases. - See the documentation for additional information. -} -Product tclsqlite-V3.zip { - Bindings for Tcl/Tk. - You can import this shared library into either - tclsh or wish to get SQLite database access from Tcl/Tk. - See the documentation for details. -} -Product sqlitedll-V3.zip { - This is a DLL of the SQLite library without the TCL bindings. - The only external dependency is MSVCRT.DLL. -} - -Product fts1dll-V3.zip { - A precompiled - FTS1 Module - for win32. -} - -Product fts2dll-V3.zip { - A precompiled - FTS2 Module - for win32. -} - -Product sqlite*_analyzer-V3.zip { - An analysis program for database files compatible with SQLite version - VERSION and later. -} - - -Heading {Source Code} - -Product {sqlite-V3.tar.gz} { - A tarball of the complete source tree for SQLite version VERSION - including all of the documentation. -} - -Product {sqlite-source-V3.zip} { - This ZIP archive contains preprocessed C code for the SQLite library as - individual source files. - Unlike the tarballs below, all of the preprocessing and automatic - code generation has already been done on these C code files, so they - can be converted to object code directly with any ordinary C compiler. -} - -Product {sqlite-amalgamation-V3.zip} { - This ZIP archive contains all preprocessed C code combined into a - single source file (the - - amalgamation). -} - -Product {sqlite-V3-tea.tar.gz} { - A tarball of proprocessed source code together with a - Tcl Extension Architecture (TEA) - compatible configure script and makefile. -} - -Product {sqlite-V3.src.rpm} { - An RPM containing complete source code for SQLite version VERSION -} - -Heading {Cross-Platform Binaries} - -Product {sqlite-V3.kit} { - A starkit containing - precompiled SQLite binaries and Tcl bindings for Linux-x86, Windows, - and Mac OS-X ppc and x86. -} - -Heading {Historical Binaries And Source Code} - -Product sqlite-V2.bin.gz { - A command-line program for accessing and modifying - SQLite version 2.* databases on Linux-x86. -} -Product sqlite-V2.zip { - A command-line program for accessing and modifying - SQLite version 2.* databases on win32. -} - -Product sqlite*_analyzer-V2.bin.gz { - An analysis program for version 2.* database files on Linux-x86 -} -Product sqlite*_analyzer-V2.zip { - An analysis program for version 2.* database files on win32. -} -Product {sqlite-source-V2.zip} { - This ZIP archive contains C source code for the SQLite library - version VERSION. -} - - - - -puts { -
    " - puts "$file
    ($size $units)
    [string trim $d2]
    $title
    - - -

    Direct Access To The Sources Via Anonymous CVS

    - -

    -All SQLite source code is maintained in a -CVS repository that is -available for read-only access by anyone. You can -interactively view the -repository contents and download individual files -by visiting - -http://www.sqlite.org/cvstrac/dir?d=sqlite. -To access the repository directly, use the following -commands: -

    - -
    -cvs -d :pserver:anonymous@www.sqlite.org:/sqlite login
    -cvs -d :pserver:anonymous@www.sqlite.org:/sqlite checkout sqlite
    -
    - -

    -When the first command prompts you for a password, enter "anonymous". -

    - -

    -To access the SQLite version 2.8 sources, begin by getting the 3.0 -tree as described above. Then update to the "version_2" branch -as follows: -

    - -
    -cvs update -r version_2
    -
    - -} - -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/dynload.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/dynload.html --- sqlite3-3.4.2/www/dynload.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/dynload.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,136 @@ + + +How to build a dynamically loaded Tcl extension for SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +How To Build A Dynamically Loaded Tcl Extension +

    +

    +This note was contributed by +Bill Saunders. Thanks, Bill! +

    + +

    +To compile the SQLite Tcl extension into a dynamically loaded module +I did the following: +

    + +
      +
    1. Do a standard compile +(I had a dir called bld at the same level as sqlite ie + /root/bld + /root/sqlite +I followed the directions and did a standard build in the bld +directory)

    2. + +
    3. +Now do the following in the bld directory +

      +gcc -shared -I. -lgdbm ../sqlite/src/tclsqlite.c libsqlite.a -o sqlite.so
      +

    4. + +
    5. +This should produce the file sqlite.so in the bld directory

    6. + +
    7. +Create a pkgIndex.tcl file that contains this line + +

      +package ifneeded sqlite 1.0 [list load [file join $dir sqlite.so]]
      +

    8. + +
    9. +To use this put sqlite.so and pkgIndex.tcl in the same directory

    10. + +
    11. +From that directory start wish

    12. + +
    13. +Execute the following tcl command (tells tcl where to fine loadable +modules) +

      +lappend auto_path [exec pwd]
      +

    14. + +
    15. +Load the package +

      +package require sqlite
      +

    16. + +
    17. +Have fun....

    18. + +
      +This page last modified 2007/12/20 02:23:05 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/dynload.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/dynload.tcl --- sqlite3-3.4.2/www/dynload.tcl 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/www/dynload.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -# -# Run this Tcl script to generate the dynload.html file. -# -set rcsid {$Id: dynload.tcl,v 1.1 2001/02/11 16:58:22 drh Exp $} - -puts { - - How to build a dynamically loaded Tcl extension for SQLite - - -

    -How To Build A Dynamically Loaded Tcl Extension -

    } -puts {

    -This note was contributed by -Bill Saunders. Thanks, Bill! - -

    -To compile the SQLite Tcl extension into a dynamically loaded module -I did the following: -

    - -
      -
    1. Do a standard compile -(I had a dir called bld at the same level as sqlite ie - /root/bld - /root/sqlite -I followed the directions and did a standard build in the bld -directory)

    2. - -
    3. -Now do the following in the bld directory -

      -gcc -shared -I. -lgdbm ../sqlite/src/tclsqlite.c libsqlite.a -o sqlite.so
      -

    4. - -
    5. -This should produce the file sqlite.so in the bld directory

    6. - -
    7. -Create a pkgIndex.tcl file that contains this line - -

      -package ifneeded sqlite 1.0 [list load [file join $dir sqlite.so]]
      -

    8. - -
    9. -To use this put sqlite.so and pkgIndex.tcl in the same directory

    10. - -
    11. -From that directory start wish

    12. - -
    13. -Execute the following tcl command (tells tcl where to fine loadable -modules) -

      -lappend auto_path [exec pwd]
      -

    14. - -
    15. -Load the package -

      -package require sqlite
      -

    16. - -
    17. -Have fun....

    18. - - -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/errlist.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/errlist.html --- sqlite3-3.4.2/www/errlist.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/errlist.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,81 @@ + + +SQLite Error List + + + + + +
      + + + +
      +
      Small. Fast. Reliable.
      Choose any three.
      + +
      + +
      + + + +

      Error List

      +

      This is a place holder for upcoming, comprehensive documentation +on the errors returned by SQLite.

      + +
      +This page last modified 2009/05/11 17:40:08 UTC +
      diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/famous.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/famous.html --- sqlite3-3.4.2/www/famous.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/famous.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,287 @@ + + +Well-Known Users Of SQLite + + + + + +
      + + + +
      +
      Small. Fast. Reliable.
      Choose any three.
      + +
      + +
      + + + +

      Well-Known Users of SQLite:

      + +

      +A few of the better-known users of SQLite are shown below +in alphabetical order. +There is no complete list of projects and companies +that use SQLite. +SQLite is in the +public domain and so many people use it +in their projects without ever telling us. +

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + + + +Adobe uses SQLite as the +application +file format for their + +Photoshop Lightroom product. +This is publically acknowledged on the Lightroom/About_Lightroom menu popup. +Adobe has also announced that their +AIR project +will include SQLite. It +is reported that +Acrobat +Reader also uses SQLite. +
      + + + +Apple uses SQLite for many functions +within Mac OS-X, including +Apple Mail, +Safari, +and in Aperture. +There are unconfirmed reports on the internet that Apple also uses SQLite in +the iPhone and in the +iPod touch. +
      + + + + +The Firefox Web Browser from Mozilla +has been slowly replacing their legacy file format "mork" with SQLite +for about two years. At this point, SQLite is used to store most +persistent metadata in Firefox. +
      + + + +We believe that General Electric +uses SQLite in some product or +another because they twice wrote the to SQLite developers requesting +the US Export Control Number for SQLite. So presumably GE is using +SQLite in something that they are exporting. But nobody +(outside of GE) seems to know what that might be. +
      + + + +It is known that +Google +uses SQLite in their +Desktop for Mac, +in Google +Gears, and in the +Android cell-phone +operating system. +People are suspicious that Google uses SQLite for lots of other things +that we do not know about yet. +Engineers at Google have made extensive contributions to the +full-text search subsystem within SQLite. +
      + + +McAfee uses SQLite in its antivirus +programs. +Mentioned here and implied +here. +
      + + + +It can inferred from + +traffic on the SQLite mailing list that at least +one group within +Microsoft +is using SQLite in the development of a +game program. No word yet if this game has actually been released or +if they are still using SQLite. +
      + + + +The +Monotone + configuration management system stores an entire project history in +an SQLite database. Each file is a separate BLOB. +
      + + + +It is reported that +Philips MP3 Players +use SQLite to store metadata about the music they hold. +Apparently, if you plug a Philips MP3 player into your USB port, you +can see the SQLite database file there in plain sight. +
      + + + +The popular +PHP +programming language comes with both SQLite2 and SQLite3 built in. +
      + + + +SQLite comes bundled with the +Python +programming language since Python 2.5. +
      + + + +The REALbasic +programming environment comes bundled with an enhanced version of +SQLite that supports AES encryption. +
      + + + +There are + +multiple + +sightings of SQLite in the Skype client for Mac OS X and Windows. +
      + + + +Solaris 10 +uses SQLite as the storage format for its Service Management Facility. +Thus, Sun has essentially replaced the traditional unix /etc/inittab +file with an SQLite database. +
      + + + +SQLite is an integral part of +Symbian's operating system commonly +found on high-end cellphones. +
      + + + +A representative of +Toshiba wrote to the SQLite developers +and requested the US Export Control Number for SQLite. We infer from this +that Toshiba is exporting something from the US that uses SQLite, but we +do not know what that something is. +
      +
      +This page last modified 2008/11/01 13:26:49 UTC +
      diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/faq.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/faq.html --- sqlite3-3.4.2/www/faq.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/faq.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,611 @@ + + +SQLite Frequently Asked Questions + + + + + +
      + + + +
      +
      Small. Fast. Reliable.
      Choose any three.
      + +
      + +
      + + + +

      Frequently Asked Questions

      See also SqliteWikiFaq.

      1. How do I create an AUTOINCREMENT field.
      2. What datatypes does SQLite support?
      3. SQLite lets me insert a string into a database column of type integer!
      4. Why doesn't SQLite allow me to use '0' and '0.0' as the primary + key on two different rows of the same table?
      5. Can multiple applications or multiple instances of the same + application access a single database file at the same time?
      6. Is SQLite threadsafe?
      7. How do I list all tables/indices contained in an SQLite database
      8. Are there any known size limits to SQLite databases?
      9. What is the maximum size of a VARCHAR in SQLite?
      10. Does SQLite support a BLOB type?
      11. How do I add or delete columns from an existing table in SQLite.
      12. I deleted a lot of data but the database file did not get any + smaller. Is this a bug?
      13. Can I use SQLite in my commercial product without paying royalties?
      14. How do I use a string literal that contains an embedded single-quote (') + character?
      15. What is an SQLITE_SCHEMA error, and why am I getting one?
      16. Why does ROUND(9.95,1) return 9.9 instead of 10.0? + Shouldn't 9.95 round up?
      17. I get hundreds of compiler warnings when I compile SQLite. + Isn't this a problem? Doesn't it indicate poor code quality?
      18. Case-insensitive matching of Unicode characters does not work.
      19. INSERT is really slow - I can only do few dozen INSERTs per second
      20. I accidentally deleted some important information from my SQLite database. + How can I recover it?
      21. What is an SQLITE_CORRUPT error? What does it mean for the database + to be "malformed"? Why am I getting this error?
      22. Does SQLite support foreign keys?
      23. I get a compiler error if I use the SQLITE_OMIT_... + compile-time options when building SQLite.
      24. My WHERE clause expression column1="column1" does not work. + It causes every row of the table to be returned, not just the rows + where column1 has the value "column1".
      25. How are the syntax diagrams (a.k.a. "railroad" diagrams) for + SQLite generated?
      +

      (1) How do I create an AUTOINCREMENT field.

      +

      Short answer: A column declared INTEGER PRIMARY KEY will + autoincrement.

      + +

      Here is the long answer: + If you declare a column of a table to be INTEGER PRIMARY KEY, then + whenever you insert a NULL + into that column of the table, the NULL is automatically converted + into an integer which is one greater than the largest value of that + column over all other rows in the table, or 1 if the table is empty. + (If the largest possible integer key, 9223372036854775807, then an + unused key value is chosen at random.) + For example, suppose you have a table like this: +

      +CREATE TABLE t1(
      +  a INTEGER PRIMARY KEY,
      +  b INTEGER
      +);
      +
      +

      With this table, the statement

      +
      +INSERT INTO t1 VALUES(NULL,123);
      +
      +

      is logically equivalent to saying:

      +
      +INSERT INTO t1 VALUES((SELECT max(a) FROM t1)+1,123);
      +
      + +

      There is a function named + sqlite3_last_insert_rowid() which will return the integer key + for the most recent insert operation.

      + +

      Note that the integer key is one greater than the largest + key that was in the table just prior to the insert. The new key + will be unique over all keys currently in the table, but it might + overlap with keys that have been previously deleted from the + table. To create keys that are unique over the lifetime of the + table, add the AUTOINCREMENT keyword to the INTEGER PRIMARY KEY + declaration. Then the key chosen will be one more than than the + largest key that has ever existed in that table. If the largest + possible key has previously existed in that table, then the INSERT + will fail with an SQLITE_FULL error code.

      + +

      (2) What datatypes does SQLite support?

      +
      SQLite uses dynamic typing. Content can be stored as INTEGER, + REAL, TEXT, BLOB, or as NULL.
      + +

      (3) SQLite lets me insert a string into a database column of type integer!

      +

      This is a feature, not a bug. SQLite uses dynamic typing. + It does not enforce data type constraints. Any data can be + inserted into any column. You can put arbitrary length strings into + integer columns, floating point numbers in boolean columns, or dates + in character columns. The datatype you assign to a column in the + CREATE TABLE command does not restrict what data can be put into + that column. Every column is able to hold + an arbitrary length string. (There is one exception: Columns of + type INTEGER PRIMARY KEY may only hold a 64-bit signed integer. + An error will result + if you try to put anything other than an integer into an + INTEGER PRIMARY KEY column.)

      + +

      But SQLite does use the declared type of a column as a hint + that you prefer values in that format. So, for example, if a + column is of type INTEGER and you try to insert a string into + that column, SQLite will attempt to convert the string into an + integer. If it can, it inserts the integer instead. If not, + it inserts the string. This feature is called type affinity. +

      + +

      (4) Why doesn't SQLite allow me to use '0' and '0.0' as the primary + key on two different rows of the same table?

      +

      This problem occurs when your primary key is a numeric type. Change the + datatype of your primary key to TEXT and it should work.

      + +

      Every row must have a unique primary key. For a column with a + numeric type, SQLite thinks that '0' and '0.0' are the + same value because they compare equal to one another numerically. + (See the previous question.) Hence the values are not unique.

      + +

      (5) Can multiple applications or multiple instances of the same + application access a single database file at the same time?

      +

      Multiple processes can have the same database open at the same + time. Multiple processes can be doing a SELECT + at the same time. But only one process can be making changes to + the database at any moment in time, however.

      + +

      SQLite uses reader/writer locks to control access to the database. + (Under Win95/98/ME which lacks support for reader/writer locks, a + probabilistic simulation is used instead.) + But use caution: this locking mechanism might + not work correctly if the database file is kept on an NFS filesystem. + This is because fcntl() file locking is broken on many NFS implementations. + You should avoid putting SQLite database files on NFS if multiple + processes might try to access the file at the same time. On Windows, + Microsoft's documentation says that locking may not work under FAT + filesystems if you are not running the Share.exe daemon. People who + have a lot of experience with Windows tell me that file locking of + network files is very buggy and is not dependable. If what they + say is true, sharing an SQLite database between two or more Windows + machines might cause unexpected problems.

      + +

      We are aware of no other embedded SQL database engine that + supports as much concurrency as SQLite. SQLite allows multiple processes + to have the database file open at once, and for multiple processes to + read the database at once. When any process wants to write, it must + lock the entire database file for the duration of its update. But that + normally only takes a few milliseconds. Other processes just wait on + the writer to finish then continue about their business. Other embedded + SQL database engines typically only allow a single process to connect to + the database at once.

      + +

      However, client/server database engines (such as PostgreSQL, MySQL, + or Oracle) usually support a higher level of concurrency and allow + multiple processes to be writing to the same database at the same time. + This is possible in a client/server database because there is always a + single well-controlled server process available to coordinate access. + If your application has a need for a lot of concurrency, then you should + consider using a client/server database. But experience suggests that + most applications need much less concurrency than their designers imagine. +

      + +

      When SQLite tries to access a file that is locked by another + process, the default behavior is to return SQLITE_BUSY. You can + adjust this behavior from C code using the + sqlite3_busy_handler() or sqlite3_busy_timeout() + API functions.

      + +

      (6) Is SQLite threadsafe?

      +

      + Threads are evil. Avoid them. + +

      SQLite is threadsafe. We make this concession since many users choose + to ignore the advice given in the previous paragraph. + But in order to be thread-safe, SQLite must be compiled + with the SQLITE_THREADSAFE preprocessor macro set to 1. Both the Windows + and Linux precompiled binaries in the distribution are compiled this way. + If you are unsure if the SQLite library you are linking against is compiled + to be threadsafe you can call the sqlite3_threadsafe() + interface to find out. +

      + +

      Prior to version 3.3.1, + an sqlite3 structure could only be used in the same thread + that called sqlite3_open() to create it. + You could not open a + database in one thread then pass the handle off to another thread for + it to use. This was due to limitations (bugs?) in many common threading + implementations such as on RedHat9. Specifically, an fcntl() lock + created by one thread cannot be removed or modified by a different + thread on the troublesome systems. And since SQLite uses fcntl() + locks heavily for concurrency control, serious problems arose if you + start moving database connections across threads.

      + +

      The restriction on moving database connections across threads + was relaxed somewhat in version 3.3.1. With that and subsequent + versions, it is safe to move a connection handle across threads + as long as the connection is not holding any fcntl() locks. You + can safely assume that no locks are being held if no + transaction is pending and all + statements have been finalized.

      + +

      Under Unix, you should not carry an open SQLite database across + a fork() system call into the child process. Problems will result + if you do.

      + +

      (7) How do I list all tables/indices contained in an SQLite database

      +

      If you are running the sqlite3 command-line access program + you can type ".tables" to get a list of all tables. Or you + can type ".schema" to see the complete database schema including + all tables and indices. Either of these commands can be followed by + a LIKE pattern that will restrict the tables that are displayed.

      + +

      From within a C/C++ program (or a script using Tcl/Ruby/Perl/Python + bindings) you can get access to table and index names by doing a SELECT + on a special table named "SQLITE_MASTER". Every SQLite database + has an SQLITE_MASTER table that defines the schema for the database. + The SQLITE_MASTER table looks like this:

      +
      +CREATE TABLE sqlite_master (
      +  type TEXT,
      +  name TEXT,
      +  tbl_name TEXT,
      +  rootpage INTEGER,
      +  sql TEXT
      +);
      +
      +

      For tables, the type field will always be 'table' and the + name field will be the name of the table. So to get a list of + all tables in the database, use the following SELECT command:

      +
      +SELECT name FROM sqlite_master
      +WHERE type='table'
      +ORDER BY name;
      +
      +

      For indices, type is equal to 'index', name is the + name of the index and tbl_name is the name of the table to which + the index belongs. For both tables and indices, the sql field is + the text of the original CREATE TABLE or CREATE INDEX statement that + created the table or index. For automatically created indices (used + to implement the PRIMARY KEY or UNIQUE constraints) the sql field + is NULL.

      + +

      The SQLITE_MASTER table is read-only. You cannot change this table + using UPDATE, INSERT, or DELETE. The table is automatically updated by + CREATE TABLE, CREATE INDEX, DROP TABLE, and DROP INDEX commands.

      + +

      Temporary tables do not appear in the SQLITE_MASTER table. Temporary + tables and their indices and triggers occur in another special table + named SQLITE_TEMP_MASTER. SQLITE_TEMP_MASTER works just like SQLITE_MASTER + except that it is only visible to the application that created the + temporary tables. To get a list of all tables, both permanent and + temporary, one can use a command similar to the following: +

      +SELECT name FROM 
      +   (SELECT * FROM sqlite_master UNION ALL
      +    SELECT * FROM sqlite_temp_master)
      +WHERE type='table'
      +ORDER BY name
      +
      + +

      (8) Are there any known size limits to SQLite databases?

      +

      See limits.html for a full discussion of + the limits of SQLite.

      + +

      (9) What is the maximum size of a VARCHAR in SQLite?

      +

      SQLite does not enforce the length of a VARCHAR. You can declare + a VARCHAR(10) and SQLite will be happy to let you put 500 characters + in it. And it will keep all 500 characters intact - it never truncates. +

      + +

      (10) Does SQLite support a BLOB type?

      +

      SQLite versions 3.0 and later allow you to store BLOB data in any + column, even columns that are declared to hold some other type.

      + +

      (11) How do I add or delete columns from an existing table in SQLite.

      +

      SQLite has limited + ALTER TABLE support that you can + use to add a column to the end of a table or to change the name of + a table. + If you want to make more complex changes in the structure of a table, + you will have to recreate the + table. You can save existing data to a temporary table, drop the + old table, create the new table, then copy the data back in from + the temporary table.

      + +

      For example, suppose you have a table named "t1" with columns + names "a", "b", and "c" and that you want to delete column "c" from + this table. The following steps illustrate how this could be done: +

      + +
      +BEGIN TRANSACTION;
      +CREATE TEMPORARY TABLE t1_backup(a,b);
      +INSERT INTO t1_backup SELECT a,b FROM t1;
      +DROP TABLE t1;
      +CREATE TABLE t1(a,b);
      +INSERT INTO t1 SELECT a,b FROM t1_backup;
      +DROP TABLE t1_backup;
      +COMMIT;
      +
      + +

      (12) I deleted a lot of data but the database file did not get any + smaller. Is this a bug?

      +

      No. When you delete information from an SQLite database, the + unused disk space is added to an internal "free-list" and is reused + the next time you insert data. The disk space is not lost. But + neither is it returned to the operating system.

      + +

      If you delete a lot of data and want to shrink the database file, + run the VACUUM command. + VACUUM will reconstruct + the database from scratch. This will leave the database with an empty + free-list and a file that is minimal in size. Note, however, that the + VACUUM can take some time to run (around a half second per megabyte + on the Linux box where SQLite is developed) and it can use up to twice + as much temporary disk space as the original file while it is running. +

      + +

      As of SQLite version 3.1, an alternative to using the VACUUM command + is auto-vacuum mode, enabled using the + auto_vacuum pragma.

      + +

      (13) Can I use SQLite in my commercial product without paying royalties?

      +

      Yes. SQLite is in the + public domain. No claim of ownership is made + to any part of the code. You can do anything you want with it.

      + +

      (14) How do I use a string literal that contains an embedded single-quote (') + character?

      +

      The SQL standard specifies that single-quotes in strings are escaped + by putting two single quotes in a row. SQL works like the Pascal programming + language in the regard. SQLite follows this standard. Example: +

      + +
      +    INSERT INTO xyz VALUES('5 O''clock');
      +  
      + +

      (15) What is an SQLITE_SCHEMA error, and why am I getting one?

      +

      An SQLITE_SCHEMA error is returned when a + prepared SQL statement is no longer valid and cannot be executed. + When this occurs, the statement must be recompiled from SQL using + the sqlite3_prepare() API. + An SQLITE_SCHEMA error can only occur when using the sqlite3_prepare(), + and sqlite3_step() interfaces to run SQL. + You will never receive an SQLITE_SCHEMA error from + sqlite3_exec(). Nor will you receive a the error if you + prepare statements using sqlite3_prepare_v2() instead of + sqlite3_prepare().

      + +

      The sqlite3_prepare_v2() interface creates a + prepared statement that will automatically recompile itself if + the schema changes. The easiest way to deal with + SQLITE_SCHEMA errors is to always use sqlite3_prepare_v2() + instead of sqlite3_prepare().

      + +

      (16) Why does ROUND(9.95,1) return 9.9 instead of 10.0? + Shouldn't 9.95 round up?

      +

      SQLite uses binary arithmetic and in binary, there is no + way to write 9.95 in a finite number of bits. The closest to + you can get to 9.95 in a 64-bit IEEE float (which is what + SQLite uses) is 9.949999999999999289457264239899814128875732421875. + So when you type "9.95", SQLite really understands the number to be + the much longer value shown above. And that value rounds down.

      + +

      This kind of problem comes up all the time when dealing with + floating point binary numbers. The general rule to remember is + that most fractional numbers that have a finite representation in decimal + (a.k.a "base-10") + do not have a finite representation in binary (a.k.a "base-2"). + And so they are + approximated using the closest binary number available. That + approximation is usually very close, but it will be slightly off + and in some cases can cause your results to be a little different + from what you might expect.

      + +

      (17) I get hundreds of compiler warnings when I compile SQLite. + Isn't this a problem? Doesn't it indicate poor code quality?

      +

      Quality assurance in SQLite is done using full-coverage + testing, not by compiler warnings or other static code analysis + tools. In other words, we verify that SQLite actually gets the + correct answer, not that it merely satisfies stylistic constraints. + Over two-thirds of the SQLite code base is devoted purely to testing. + The SQLite test suite runs many thousands of separate test cases and + many of those test cases are parameterized so that hundreds of thousands + of tests involving millions of SQL statements are run and evaluated + for correctness prior to every release. The developers use code + coverage tools to verify that all paths through the code are tested. + Whenever a bug is found in SQLite, new test cases are written to + exhibit the bug so that the bug cannot recur undetected in the future.

      + +

      During testing, the SQLite library is compiled with special + instrumentation that allows the test scripts to simulate a wide + variety of failures in order to verify that SQLite recovers + correctly. Memory allocation is carefully tracked and no memory + leaks occur, even following memory allocation failures. A custom + VFS layer is used to simulate operating system crashes and power + failures in order to insure that transactions are atomic across + these events. A mechanism for deliberately injecting I/O errors + shows that SQLite is resilient to such malfunctions. (As an + experiment, try inducing these kinds of errors on other SQL database + engines and see what happens!)

      + +

      We also run SQLite using valgrind + on Linux and verify that it detects no problems.

      + +

      Some people say that we should eliminate all warnings because + benign warnings mask real warnings that might arise in future changes. + This is true enough. But in reply, the developers observe that all + warnings have already been fixed in the + compilers used for SQLite development (various versions of GCC). + Compiler warnings only arise from compilers that the developers do + not have access to.

      + +

      (18) Case-insensitive matching of Unicode characters does not work.

      +
      The default configuration of SQLite only supports case-insensitive + comparisons of ASCII characters. The reason for this is that doing + full Unicode case-insensitive comparisons and case conversions + requires tables and logic that would nearly double the size of + the SQLite library. The + SQLite developers reason that any application that needs full + Unicode case support probably already has the necessary tables and + functions and so SQLite should not take up space to + duplicate this ability.

      + +

      Instead of providing full Unicode case support by default, + SQLite provides the ability to link against external + Unicode comparison and conversion routines. + The application can overload the built-in NOCASE collating + sequence (using sqlite3_create_collation()) and the built-in + like(), upper(), and lower() functions + (using sqlite3_create_function()). + The SQLite source code includes an "ICU" extension that does + these overloads. Or, developers can write their own overloads + based on their own Unicode-aware comparison routines already + contained within their project.

      + +

      (19) INSERT is really slow - I can only do few dozen INSERTs per second

      +
      Actually, SQLite will easily do 50,000 or more INSERT statements per second + on an average desktop computer. But it will only do a few dozen transactions + per second. Transaction speed is limited by the rotational speed of + your disk drive. A transaction normally requires two complete rotations + of the disk platter, which on a 7200RPM disk drive limits you to about + 60 transactions per second. + +

      Transaction speed is limited by disk drive speed because (by default) + SQLite actually waits until the data really is safely stored on the disk + surface before the transaction is complete. That way, if you suddenly lose + power or if your OS crashes, your data is still safe. For details, + read about atomic commit in SQLite.. + +

      By default, each INSERT statement is its own transaction. But if you + surround multiple INSERT statements with BEGIN...COMMIT then all the + inserts are grouped into a single transaction. The time needed to commit + the transaction is amortized over all the enclosed insert statements and + so the time per insert statement is greatly reduced. + +

      Another option is to run PRAGMA synchronous=OFF. This command will + cause SQLite to not wait on data to reach the disk surface, which will make + write operations appear to be much faster. But if you lose power in the + middle of a transaction, your database file might go corrupt.

      + +

      (20) I accidentally deleted some important information from my SQLite database. + How can I recover it?

      +
      If you have a backup copy of your database file, recover the information + from your backup. + +

      If you do not have a backup, recovery is very difficult. You might + be able to find partial string data in a binary dump of the raw database + file. Recovering numeric data might also be possible given special tools, + though to our knowledge no such tools exist. SQLite is sometimes compiled + with the SQLITE_SECURE_DELETE option which overwrites all deleted content + with zeros. If that is the case then recovery is clearly impossible. + Recovery is also impossible if you have run VACUUM since the data was + deleted. If SQLITE_SECURE_DELETE is not used and VACUUM has not been run, + then some of the deleted content might still be in the database file, in + areas marked for reuse. But, again, there exist no procedures or tools + that we know of to help you recover that data.

      + +

      (21) What is an SQLITE_CORRUPT error? What does it mean for the database + to be "malformed"? Why am I getting this error?

      +

      An SQLITE_CORRUPT error is returned when SQLite detects an error + in the structure, format, or other control elements of the + database file.

      + +

      SQLite does not corrupt database files, except in the case of very + rare bugs (see + DatabaseCorruption) + and even then the bugs are normally difficult to + reproduce. Even if your application crashes in the middle of an + update, your database is safe. The database is safe even if your OS + crashes or takes a power loss. The crash-resistance of SQLite has + been extensively studied and tested and is attested by years of real-world + experience by millions of users."

      + +

      That said, there are a number of things that external programs or bugs + in your hardware or OS can do to corrupt a database file. Details + can be found in the discussions on the + atomic commit and + locking support in SQLite + as well as in the mailing list archives.

      + +

      Your can use PRAGMA integrity_check + to do a thorough but time intensive test of the database integrity.

      + +

      Your can use PRAGMA quick_check to do a faster + but less thorough test of the database integrity.

      + +

      Depending how badly your database is corrupted, you may be able to + recover some of the data by using the CLI to dump the schema and contents + to a file and then recreate. Unfortunately, once humpty-dumpty falls off + the wall, it is generally not possible to put him back together again.

      + +

      (22) Does SQLite support foreign keys?

      +

      FOREIGN KEY constraints are parsed but are not enforced. + However, the equivalent constraint enforcement can be + achieved using + triggers. + The SQLite source tree contains + source code and documentation for a C program (genfkey) that will + read an SQLite database, analyze the foreign key constraints, + and generate appropriate triggers automatically.

      + +

      The readme + for the genfkey utility contains more information.

      + +

      As of Version 3.6.12 this feature is incorporated into the CLI.

      + +

      You can read about other possible solutions for foreign key + support in the + SQLite Wiki.

      + +

      (23) I get a compiler error if I use the SQLITE_OMIT_... + compile-time options when building SQLite.

      +
      The SQLITE_OMIT_... compile-time options only work + when building from canonically source files. They do not work + when you build from the SQLite amalgamation or from the pre-processed + source files. + +

      It is possible to build a special amalgamation that will work with + a predetermined set of SQLITE_OMIT_... options. Instructions for doing + so can be found with the SQLITE_OMIT_... documentation.

      + +

      (24) My WHERE clause expression column1="column1" does not work. + It causes every row of the table to be returned, not just the rows + where column1 has the value "column1".

      +
      Use single-quotes, not double-quotes, around string literals in SQL. + This is what the SQL standard requires. Your WHERE clause expression + should read: column1='column2' + +

      SQL uses double-quotes around identifiers (column or table names) that + contains special characters or which are keywords. So double-quotes are + a way of escaping identifier names. Hence, when you say + column1="column1" that is equivalent to + column1=column1 which is obviously always true.

      + +

      (25) How are the syntax diagrams (a.k.a. "railroad" diagrams) for + SQLite generated?

      +
      The process is explained at + http://wiki.tcl.tk/21708.
      +
    +
    +This page last modified 2009/05/11 17:40:08 UTC + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/faq.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/faq.tcl --- sqlite3-3.4.2/www/faq.tcl 2007-08-05 22:15:14.000000000 +0100 +++ sqlite3-3.6.16/www/faq.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,478 +0,0 @@ -# -# Run this script to generated a faq.html output file -# -set rcsid {$Id: faq.tcl,v 1.39 2007/08/03 08:18:45 danielk1977 Exp $} -source common.tcl -header {SQLite Frequently Asked Questions} - -set cnt 1 -proc faq {question answer} { - set ::faq($::cnt) [list [string trim $question] [string trim $answer]] - incr ::cnt -} - -############# -# Enter questions and answers here. - -faq { - How do I create an AUTOINCREMENT field. -} { -

    Short answer: A column declared INTEGER PRIMARY KEY will - autoincrement.

    - -

    Here is the long answer: - If you declare a column of a table to be INTEGER PRIMARY KEY, then - whenever you insert a NULL - into that column of the table, the NULL is automatically converted - into an integer which is one greater than the largest value of that - column over all other rows in the table, or 1 if the table is empty. - (If the largest possible integer key, 9223372036854775807, then an - unused key value is chosen at random.) - For example, suppose you have a table like this: -

    -CREATE TABLE t1(
    -  a INTEGER PRIMARY KEY,
    -  b INTEGER
    -);
    -
    -

    With this table, the statement

    -
    -INSERT INTO t1 VALUES(NULL,123);
    -
    -

    is logically equivalent to saying:

    -
    -INSERT INTO t1 VALUES((SELECT max(a) FROM t1)+1,123);
    -
    - -

    There is a new API function named - - sqlite3_last_insert_rowid() which will return the integer key - for the most recent insert operation.

    - -

    Note that the integer key is one greater than the largest - key that was in the table just prior to the insert. The new key - will be unique over all keys currently in the table, but it might - overlap with keys that have been previously deleted from the - table. To create keys that are unique over the lifetime of the - table, add the AUTOINCREMENT keyword to the INTEGER PRIMARY KEY - declaration. Then the key chosen will be one more than than the - largest key that has ever existed in that table. If the largest - possible key has previously existed in that table, then the INSERT - will fail with an SQLITE_FULL error code.

    -} - -faq { - What datatypes does SQLite support? -} { -

    See http://www.sqlite.org/datatype3.html.

    -} - -faq { - SQLite lets me insert a string into a database column of type integer! -} { -

    This is a feature, not a bug. SQLite does not enforce data type - constraints. Any data can be - inserted into any column. You can put arbitrary length strings into - integer columns, floating point numbers in boolean columns, or dates - in character columns. The datatype you assign to a column in the - CREATE TABLE command does not restrict what data can be put into - that column. Every column is able to hold - an arbitrary length string. (There is one exception: Columns of - type INTEGER PRIMARY KEY may only hold a 64-bit signed integer. - An error will result - if you try to put anything other than an integer into an - INTEGER PRIMARY KEY column.)

    - -

    But SQLite does use the declared type of a column as a hint - that you prefer values in that format. So, for example, if a - column is of type INTEGER and you try to insert a string into - that column, SQLite will attempt to convert the string into an - integer. If it can, it inserts the integer instead. If not, - it inserts the string. This feature is sometimes - call type or column affinity. -

    -} - -faq { - Why doesn't SQLite allow me to use '0' and '0.0' as the primary - key on two different rows of the same table? -} { -

    Your primary key must have a numeric type. Change the datatype of - your primary key to TEXT and it should work.

    - -

    Every row must have a unique primary key. For a column with a - numeric type, SQLite thinks that '0' and '0.0' are the - same value because they compare equal to one another numerically. - (See the previous question.) Hence the values are not unique.

    -} - - -faq { - Can multiple applications or multiple instances of the same - application access a single database file at the same time? -} { -

    Multiple processes can have the same database open at the same - time. Multiple processes can be doing a SELECT - at the same time. But only one process can be making changes to - the database at any moment in time, however.

    - -

    SQLite uses reader/writer locks to control access to the database. - (Under Win95/98/ME which lacks support for reader/writer locks, a - probabilistic simulation is used instead.) - But use caution: this locking mechanism might - not work correctly if the database file is kept on an NFS filesystem. - This is because fcntl() file locking is broken on many NFS implementations. - You should avoid putting SQLite database files on NFS if multiple - processes might try to access the file at the same time. On Windows, - Microsoft's documentation says that locking may not work under FAT - filesystems if you are not running the Share.exe daemon. People who - have a lot of experience with Windows tell me that file locking of - network files is very buggy and is not dependable. If what they - say is true, sharing an SQLite database between two or more Windows - machines might cause unexpected problems.

    - -

    We are aware of no other embedded SQL database engine that - supports as much concurrancy as SQLite. SQLite allows multiple processes - to have the database file open at once, and for multiple processes to - read the database at once. When any process wants to write, it must - lock the entire database file for the duration of its update. But that - normally only takes a few milliseconds. Other processes just wait on - the writer to finish then continue about their business. Other embedded - SQL database engines typically only allow a single process to connect to - the database at once.

    - -

    However, client/server database engines (such as PostgreSQL, MySQL, - or Oracle) usually support a higher level of concurrency and allow - multiple processes to be writing to the same database at the same time. - This is possible in a client/server database because there is always a - single well-controlled server process available to coordinate access. - If your application has a need for a lot of concurrency, then you should - consider using a client/server database. But experience suggests that - most applications need much less concurrency than their designers imagine. -

    - -

    When SQLite tries to access a file that is locked by another - process, the default behavior is to return SQLITE_BUSY. You can - adjust this behavior from C code using the - sqlite3_busy_handler() or - sqlite3_busy_timeout() - API functions.

    -} - -faq { - Is SQLite threadsafe? -} { -

    Yes. Sometimes. In order to be thread-safe, SQLite must be compiled - with the THREADSAFE preprocessor macro set to 1. In the default - distribution, the windows binaries are compiled to be threadsafe but - the linux binaries are not. If you want to change this, you'll have to - recompile.

    - -

    "Threadsafe" in the previous paragraph means that two or more threads - can run SQLite at the same time on different "sqlite3" structures - returned from separate calls to - sqlite3_open(). It is never safe - to use the same sqlite3 structure pointer in two - or more threads.

    - -

    Prior to version 3.3.1, - an sqlite3 structure could only be used in the same thread - that called sqlite3_open - to create it. - You could not open a - database in one thread then pass the handle off to another thread for - it to use. This was due to limitations (bugs?) in many common threading - implementations such as on RedHat9. Specifically, an fcntl() lock - created by one thread cannot be removed or modified by a different - thread on the troublesome systems. And since SQLite uses fcntl() - locks heavily for concurrency control, serious problems arose if you - start moving database connections across threads.

    - -

    The restriction on moving database connections across threads - was relaxed somewhat in version 3.3.1. With that and subsequent - versions, it is safe to move a connection handle across threads - as long as the connection is not holding any fcntl() locks. You - can safely assume that no locks are being held if no - transaction is pending and all statements have been finalized.

    - -

    If you turn on - shared cache - mode or if you compile with the -DSQLITE_ENABLE_MEMORY_MANAGEMENT=1 - option, then you can never move an sqlite3 pointer across - threads. The sqlite3 pointer must only be used in the same - thread in which it was created by - sqlite3_open(). If you - break the rules and use an sqlite3 handle in more than one - thread under these circumstances you will likely corrupt some - internal data structures, resulting in a crash.

    - -

    Under UNIX, you should not carry an open SQLite database across - a fork() system call into the child process. Problems will result - if you do.

    -} - -faq { - How do I list all tables/indices contained in an SQLite database -} { -

    If you are running the sqlite3 command-line access program - you can type ".tables" to get a list of all tables. Or you - can type ".schema" to see the complete database schema including - all tables and indices. Either of these commands can be followed by - a LIKE pattern that will restrict the tables that are displayed.

    - -

    From within a C/C++ program (or a script using Tcl/Ruby/Perl/Python - bindings) you can get access to table and index names by doing a SELECT - on a special table named "SQLITE_MASTER". Every SQLite database - has an SQLITE_MASTER table that defines the schema for the database. - The SQLITE_MASTER table looks like this:

    -
    -CREATE TABLE sqlite_master (
    -  type TEXT,
    -  name TEXT,
    -  tbl_name TEXT,
    -  rootpage INTEGER,
    -  sql TEXT
    -);
    -
    -

    For tables, the type field will always be 'table' and the - name field will be the name of the table. So to get a list of - all tables in the database, use the following SELECT command:

    -
    -SELECT name FROM sqlite_master
    -WHERE type='table'
    -ORDER BY name;
    -
    -

    For indices, type is equal to 'index', name is the - name of the index and tbl_name is the name of the table to which - the index belongs. For both tables and indices, the sql field is - the text of the original CREATE TABLE or CREATE INDEX statement that - created the table or index. For automatically created indices (used - to implement the PRIMARY KEY or UNIQUE constraints) the sql field - is NULL.

    - -

    The SQLITE_MASTER table is read-only. You cannot change this table - using UPDATE, INSERT, or DELETE. The table is automatically updated by - CREATE TABLE, CREATE INDEX, DROP TABLE, and DROP INDEX commands.

    - -

    Temporary tables do not appear in the SQLITE_MASTER table. Temporary - tables and their indices and triggers occur in another special table - named SQLITE_TEMP_MASTER. SQLITE_TEMP_MASTER works just like SQLITE_MASTER - except that it is only visible to the application that created the - temporary tables. To get a list of all tables, both permanent and - temporary, one can use a command similar to the following: -

    -SELECT name FROM 
    -   (SELECT * FROM sqlite_master UNION ALL
    -    SELECT * FROM sqlite_temp_master)
    -WHERE type='table'
    -ORDER BY name
    -
    -} - -faq { - Are there any known size limits to SQLite databases? -} { -

    See limits.html for a full discussion of - the limits of SQLite.

    -} - -faq { - What is the maximum size of a VARCHAR in SQLite? -} { -

    SQLite does not enforce the length of a VARCHAR. You can declare - a VARCHAR(10) and SQLite will be happy to let you put 500 characters - in it. And it will keep all 500 characters intact - it never truncates. -

    -} - -faq { - Does SQLite support a BLOB type? -} { -

    SQLite versions 3.0 and later allow you to store BLOB data in any - column, even columns that are declared to hold some other type.

    -} - -faq { - How do I add or delete columns from an existing table in SQLite. -} { -

    SQLite has limited - ALTER TABLE support that you can - use to add a column to the end of a table or to change the name of - a table. - If you what make more complex changes the structure of a table, - you will have to recreate the - table. You can save existing data to a temporary table, drop the - old table, create the new table, then copy the data back in from - the temporary table.

    - -

    For example, suppose you have a table named "t1" with columns - names "a", "b", and "c" and that you want to delete column "c" from - this table. The following steps illustrate how this could be done: -

    - -
    -BEGIN TRANSACTION;
    -CREATE TEMPORARY TABLE t1_backup(a,b);
    -INSERT INTO t1_backup SELECT a,b FROM t1;
    -DROP TABLE t1;
    -CREATE TABLE t1(a,b);
    -INSERT INTO t1 SELECT a,b FROM t1_backup;
    -DROP TABLE t1_backup;
    -COMMIT;
    -
    -} - -faq { - I deleted a lot of data but the database file did not get any - smaller. Is this a bug? -} { -

    No. When you delete information from an SQLite database, the - unused disk space is added to an internal "free-list" and is reused - the next time you insert data. The disk space is not lost. But - neither is it returned to the operating system.

    - -

    If you delete a lot of data and want to shrink the database file, - run the VACUUM command. - VACUUM will reconstruct - the database from scratch. This will leave the database with an empty - free-list and a file that is minimal in size. Note, however, that the - VACUUM can take some time to run (around a half second per megabyte - on the Linux box where SQLite is developed) and it can use up to twice - as much temporary disk space as the original file while it is running. -

    - -

    As of SQLite version 3.1, an alternative to using the VACUUM command - is auto-vacuum mode, enabled using the - auto_vacuum pragma.

    -} - -faq { - Can I use SQLite in my commercial product without paying royalties? -} { -

    Yes. SQLite is in the - public domain. No claim of ownership is made - to any part of the code. You can do anything you want with it.

    -} - -faq { - How do I use a string literal that contains an embedded single-quote (') - character? -} { -

    The SQL standard specifies that single-quotes in strings are escaped - by putting two single quotes in a row. SQL works like the Pascal programming - language in the regard. SQLite follows this standard. Example: -

    - -
    -    INSERT INTO xyz VALUES('5 O''clock');
    -  
    -} - -faq {What is an SQLITE_SCHEMA error, and why am I getting one?} { -

    An SQLITE_SCHEMA error is returned when a - prepared SQL statement is no longer valid and cannot be executed. - When this occurs, the statement must be recompiled from SQL using - the - sqlite3_prepare() API. - In SQLite version 3, an SQLITE_SCHEMA error can - only occur when using the - sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() - API to execute SQL, not when using the - sqlite3_exec(). This was not - the case in version 2.

    - -

    The most common reason for a prepared statement to become invalid - is that the schema of the database was modified after the SQL was - prepared (possibly by another process). The other reasons this can - happen are:

    -
      -
    • A database was DETACHed. -
    • The database was VACUUMed -
    • A user-function definition was deleted or changed. -
    • A collation sequence definition was deleted or changed. -
    • The authorization function was changed. -
    - -

    In all cases, the solution is to recompile the statement from SQL - and attempt to execute it again. Because a prepared statement can be - invalidated by another process changing the database schema, all code - that uses the - sqlite3_prepare()/sqlite3_step()/sqlite3_finalize() - API should be prepared to handle SQLITE_SCHEMA errors. An example - of one approach to this follows:

    - -
    -
    -    int rc;
    -    sqlite3_stmt *pStmt;
    -    char zSql[] = "SELECT .....";
    -
    -    do {
    -      /* Compile the statement from SQL. Assume success. */
    -      sqlite3_prepare(pDb, zSql, -1, &pStmt, 0);
    -
    -      while( SQLITE_ROW==sqlite3_step(pStmt) ){
    -        /* Do something with the row of available data */
    -      }
    -
    -      /* Finalize the statement. If an SQLITE_SCHEMA error has
    -      ** occured, then the above call to sqlite3_step() will have
    -      ** returned SQLITE_ERROR. sqlite3_finalize() will return
    -      ** SQLITE_SCHEMA. In this case the loop will execute again.
    -      */
    -      rc = sqlite3_finalize(pStmt);
    -    } while( rc==SQLITE_SCHEMA );
    -    
    -  
    -} - -faq {Why does ROUND(9.95,1) return 9.9 instead of 10.0? - Shouldn't 9.95 round up?} { -

    SQLite uses binary arithmetic and in binary, there is no - way to write 9.95 in a finite number of bits. The closest to - you can get to 9.95 in a 64-bit IEEE float (which is what - SQLite uses) is 9.949999999999999289457264239899814128875732421875. - So when you type "9.95", SQLite really understands the number to be - the much longer value shown above. And that value rounds down.

    - -

    This kind of problem comes up all the time when dealing with - floating point binary numbers. The general rule to remember is - that most fractional numbers that have a finite representation in decimal - (a.k.a "base-10") - do not have a finite representation in binary (a.k.a "base-2"). - And so they are - approximated using the closest binary number available. That - approximation is usually very close, but it will be slightly off - and in some cases can cause your results to be a little different - from what you might expect.

    -} - -# End of questions and answers. -############# - -puts {

    Frequently Asked Questions

    } - -# puts {
    } -# for {set i 1} {$i<$cnt} {incr i} { -# puts "
    ($i)
    " -# puts "
    [lindex $faq($i) 0]
    " -# } -# puts {
    } -puts {
      } -for {set i 1} {$i<$cnt} {incr i} { - puts "
    1. [lindex $faq($i) 0]
    2. " -} -puts {
    } - -for {set i 1} {$i<$cnt} {incr i} { - puts "
    " - puts "

    ($i) [lindex $faq($i) 0]

    \n" - puts "
    [lindex $faq($i) 1]
    \n" -} - -puts {} -footer $rcsid Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/favicon.ico and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/favicon.ico differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/features.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/features.html --- sqlite3-3.4.2/www/features.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/features.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,148 @@ + + +SQLite Features + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    About SQLite

    + +

    SQLite Features:

    + +

      +
    • Transactions + are atomic, consistent, isolated, and durable (ACID) + even after system crashes and power failures.
    • +
    • Zero-configuration + - no setup or administration needed.
    • +
    • Implements most of SQL92. + (Features not supported)
    • +
    • A complete database is stored in a + single cross-platform disk file.
    • +
    • Supports terabyte-sized databases and gigabyte-sized strings + and blobs. (See limits.html.) +
    • Small code footprint: + + less than 300KiB fully configured or less + than 180KiB with optional features omitted.
    • +
    • Faster than popular client/server database + engines for most common operations.
    • +
    • Simple, easy to use API.
    • +
    • Written in ANSI-C. TCL bindings included. + Bindings for dozens of other languages + + available separately.
    • +
    • Well-commented source code with over 99% statement test coverage.
    • +
    • Available as a + single ANSI-C source-code file + that you can easily drop into another project. +
    • Self-contained: + no external dependencies.
    • +
    • Cross-platform: Unix (Linux and Mac OS X), OS/2, and Windows + (Win32 and WinCE) + are supported out of the box. Easy to port to other systems. +
    • Sources are in the public domain. + Use for any purpose.
    • +
    • Comes with a standalone command-line interface + (CLI) client that can be used to administer SQLite databases.
    • +
    +

    + +

    Suggested Uses For SQLite:

    + +

      +
    • Application File Format. +Rather than using fopen() to write XML or some proprietary format into +disk files used by your application, use an SQLite database instead. +You'll avoid having to write and troubleshoot a parser, your data +will be more easily accessible and cross-platform, and your updates +will be transactional.

    • + +
    • Database For Gadgets. +SQLite is popular choice for the database engine in cellphones, +PDAs, MP3 players, set-top boxes, and other electronic gadgets. +SQLite has a small code footprint, makes efficient use of memory, +disk space, and disk bandwidth, is highly reliable, and requires +no maintenance from a Database Adminstrator.

    • + +
    • Website Database. +Because it requires no configuration and stores information in ordinary +disk files, SQLite is a popular choice as the database to back small +to medium-sized websites.

    • + +
    • Stand-in For An Enterprise RDBMS. +SQLite is often used as a surrogate for an enterprise RDBMS for +demonstration purposes or for testing. SQLite is fast and requires +no setup, which takes a lot of the hassle out of testing and which +makes demos perky and easy to launch.

    • +
    +
    +This page last modified 2009/03/19 00:04:36 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/fileformat.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/fileformat.html --- sqlite3-3.4.2/www/fileformat.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/fileformat.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,3921 @@ + + +SQLite Database File Format + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + + + + + +
    SQLite Database File Format
    +
    Table Of Contents
    + + + + +

    1 Document Overview

    + + +

    1.1 Scope and Purpose

    + + +

    + + This document provides an engineering guide to the file formats used by + SQLite to store databases on disk. It also contains a description of the + file locking protocol used by SQLite to control read and write access to + the files and other protocols for safely modifying the database in a live + system (one that may contain other database clients). It is intended that + this document shall provide all the information required to create an + system that reads and writes SQLite databases in a way that is completely + compatible with SQLite itself. There are two broad purposes for providing + this information: + +

      +
    • To make it easier to maintain, test and improve the SQLite + software library. + +

    • To facilitate the development of external (non-SQLite) software that may + operate directly on SQLite databases stored within a file-system. For + example a database space analysis utility or a competing database + client implementation. +

    + +

    + The availability of this information makes an SQLite database an even safer + choice for long-term data storage. If at some point in the future the + SQLite software library cannot be used to access an SQLite database that + contains useful data, a procedure or software module may be developed based + on the content of this document to extract the required data. + +

    + None of the information contained in this document is required by programmers + wishing to use the SQLite library in applications. The intended audience is + engineers working on SQLite itself or those interested in creating alternative + methods of accessing SQLite databases (without using SQLite). + + +

    1.2 Document and Requirements Organization

    + + +

    + The content of this document is divided into three sections. + +

    + Section 2 describes the format + of a database image. A database image is the serialized form of an + SQLite database that is stored on disk. + +

    + Usually, an SQLite database image is stored in a single file on disk, + an SQLite database file. However, while the database image as stored + on disk is being modified, it may be temporarily stored in a more + convoluted format, distributed between two files, the database file + and a journal file. If a failure occurs while modifying a database image + in this fashion, then the database image must be extracted from the + database and journal files found in the file-system following recovery + (other documentation refers to this as "hot journal rollback"). Section + 3 describes the format used by the + journal file and the rules for correctly reading a database image from + the combination of a database file and journal file. + + +

    Section 4 contains + descriptions of and software requirements related to other protocols that + must be observed by software that reads and writes SQLite databases + within a live system, including: + + +

      +
    • requirements governing the integrity of database file-system representations, +
    • the locking protocol used by SQLite to manage read and write access + to the database and journal files within the file-system, and +
    • the change-counter and schema-cookie protocols that must be followed + by all database writers to facilitate the implementation of + efficient in-memory caches of the database schema and content by + readers and writers. +
    + +

    1.3 Glossary

    + + +
    Auto-vacuum last root-page + A page number stored as 32-bit integer at byte offset 52 of the + database header (see section 2.2.1). In + an auto-vacuum database, this is the numerically largest + root-page number in the database. Additionally, all pages that + occur before this page in the database are either B-Tree root + pages, pointer-map pages or the locking page. + +
    Auto-vacuum database + Each database is either an auto-vacuum database or a non auto-vacuum + database. Auto-vacuum databases feature pointer-map pages (section + 2.5) and have a non-zero value stored + as a 4-byte big-endian integer at offset 52 of the database header (section + 2.2.1). +
    B-Tree + A B-Tree is a tree structure optimized for offline storage. The table + and index data in an SQLite database file is stored in B-Tree + structures. + +
    B-Tree cell + Each database page that is part of a B-Tree structure contains zero + or more B-Tree cells. A B-Tree cell contains a single B-Tree key value + (either an integer or database record) and optionally an associated + database record value. + +
    B-Tree page + A database page that is part of a B-Tree tree structure (not an + overflow page). + +
    (B-Tree) page header + The 8 (leaf pages) or 12 (internal node pages) byte header that + occurs at the start of each B-Tree page. + +
    Cell content area + The area within a B-Tree page in which the B-Tree cells are stored. + +
    (Database) text encoding + The text encoding used for all text values in the database file. One + of UTF-8, big-endian UTF-16 and little-endian UTF-16. The database + text encoding is defined by a 4 byte field stored at byte offset + 56 of the database header (see section 2.2.1). + +
    Database header + The first 100 bytes of an SQLite database image constitute the + database header. See section 2.2.1 for details. + + +
    (Database) page size + An SQLite database file is divided into one or more pages of + page-size bytes each. + +
    Database record + A database record is a blob of data containing the serialized + representation of an ordered list of one or more SQL values. + +
    Database record header + The first part of each database record contains the database + record header. It encodes the types and lengths of values stored + in the record (see section 2.3.2). + +
    Database record data area + Following the database record header in each database record is + the database record data area. It contains the actual data (string + content, numeric value etc.) of all values in the record + (see section 2.3.2). + +
    Default pager cache size + A 32-bit integer field stored at byte offset 48 of the database file + header (see section 2.2.1). + +
    (Database) usable page size + The number of bytes of each database page that is usable. This + is the page-size less the number of bytes left unused at the end + of each page. The number of bytes left unused is governed by the + value stored at offset 20 of the database header (see section + 2.2.1). + +
    File format read version + Single byte field stored at byte offset 20 of the database header + (see section 2.2.1). + +
    File format write version + Single byte field stored at byte offset 19 of the database header + (see section 2.2.1). + +
    File change counter + A 32-bit integer field stored at byte offset 24 of the database file + header (see section 2.2.1). Normally, SQLite + increments this value each time it commits a transaction. + +
    Fragment + A block of 3 or less bytes of unused space within the cell content + area of a B-Tree page. + +
    Free block + A block of 4 or more bytes of unused space within the cell content + area of a B-Tree page. + +
    Free block list + The linked list of all free blocks on a single B-Tree page (see + section 2.3.3.3). + +
    Free page + A page that is not currently being used to store any database data + or meta data. Part of the free-page list. + +
    Free page list + A data structure within an SQLite database file that links all the + free-pages together. + +
    Index B-Tree + One of two variants on the B-Tree data structure used within SQLite + database files. An index B-Tree (section 2.3.3) + uses database records as keys. + +
    Incremental Vacuum flag + A 32-bit integer field stored at byte offset 64 of the database file + header (see section 2.2.1). In auto-vacuum + databases, if this field is non-zero then the database is not + automatically compacted at the end of each transaction. + +
    Locking page + The database page that begins at the 1GB (230 byte) + boundary. This page is always left unused. + +
    Logical database + An SQLite database file is a serialized representation of a logical + database. A logical database consists of the SQL database schema, + the content of the various tables in the database, and assorted + database properties that may be set by the user (auto-vacuum, + page-size, user-cookie value etc.), + +
    Non-auto-vacuum database + Any database that is not an auto-vacuum database. A non-auto-vacuum + database contains no pointer-map pages and has a zero value stored + in the 4-byte big-endian integer field at offset 52 of the database + database header (section 2.2.1). + +
    Overflow chain + A linked list of overflow pages across which a single (large) + database record is stored (see section + 2.3.5). + +
    Overflow page + If a B-Tree cell is too large to store within a B-Tree page, a + portion of it is stored using a chain of one or more overflow pages + (see section 2.3.5). + +
    Pointer-map page + A database page used to store meta data only present in auto-vacuum + databases (see section 2.5). + +
    Right child page + Each internal B-Tree node page has one or more child pages. The + rightmost of these (the one containing the largest key values) is + known as the right child page. + +
    Root page + A root page is a database page used to store the root node of a + B-Tree data structure. + +
    Schema layer file format + An integer between 1 and 4 stored as a 4 byte big-endian integer at + offset 44 of the database header (section 2.2.1). + Certain file format constructions may only be present in databases + with a certain minimum schema layer file format value. + +
    Schema table + The table B-Tree with root-page 1 used to store database records + describing the database schema. Accessible as the "sqlite_master" + table from within SQLite. + +
    Schema version + A 32-bit integer field stored at byte offset 40 of the database file + header (see section 2.2.1). Normally, SQLite + increments this value each time it modifies the databas schema. + +
    Table B-Tree + One of two variants on the B-Tree data structure used within SQLite + database files. A table B-Tree (section 2.3.4) + uses 64 bit integers as key values and stores an associated database + record along with each key value. + +
    User cookie + A 32-bit integer field stored at byte offset 60 of the database file + header (see section 2.2.1). Normally, SQLite + increments this value each time it modifies the databas schema. + +
    Variable Length Integer + A format used for storing 64-bit signed integer values in SQLite + database files. Consumes between 1 and 9 bytes of space, depending + on the precise value being stored. + +
    Well formed database file + An SQLite database file that meets all the criteria laid out in + section 2 of this document. + +
    Database image + A serialized blob of data representing an SQLite database. The + contents of a database file are usually a valid database image. + +
    Database file + A database file is a file on disk that usually, but not always, + contains a well-formed database image. + +
    Journal file + For each database file, there may exist an associated journal file + stored in the same file-system directory. Under some circumstances, + the database image may be distributed between the database and journal + files (instead of being stored wholly within the database file). + +
    Page size + An SQLite database image is divided into fixed size pages, each + "page size" bytes in size. + +
    Sector size + In this document, the term "sector size" refers to a field in a + journal header which determines some aspects of the layout of the + journal file. It is set by SQLite (or a compatible) application + based on the properties of the underlying file-system that the journal + file is being written to. + +
    Journal Section + A journal file may contain multiple journal sections. A journal section + consists of a journal header followed by zero or more journal records. + +
    Journal Header + A journal header is a control block sector-size bytes in size that + appears at the start of each journal section within a journal file. + +
    Journal Record + A journal record is a structure used to store data for a single + database page within a journal file. A single journal file may contain + many journal records. + +
    Master Journal Pointer + A master journal pointer is a structure that may appear at the end of + a journal file. It contains a full file-system path identifying + a master-journal file. + +
    Database File-System Representation + A file or files within the file-system used to store an SQLite + database image. + + +
    Database user-cookie + An SQLite database contains a single 32-bit signed integer field known + as the database user-cookie. Applications may read and write this field + for any purpose. + + +
    + + + + + + +

    2 Database Image Format Details

    + + +

    + This section describes the various fields and sub-structures that make up + the format used by SQLite to serialize a logical SQL database. A serialized + logical database is referred to as a database image. Section + 3 describes the way a database image is stored + in the file-system. Most of the time a database image is stored in a single + file, the database file. So while reading this section, the term database + image may be understood to mean "contents of the database file". However, + it is important to remember that there are exceptions to this. + +

    + This section does not contain requirements governing the behaviour of any + software system. Instead, along with the plain language description of the + file format are a series of succinct, testable statements describing the + properties of "well-formed SQLite database files". Some of these + statements describe the contents of the database file in terms of the + contents of the logical SQL database that it is a serialization of. e.g. + "For each SQL table in the database, the database file shall...". The + contents of a logical database consist of: + +

      +
    • The database schema: The set of database tables, virtual tables, + indexes, triggers and views stored in the database. + +
    • The database contents: The set of tuples (rows) stored in + each database table. + +
    • Other database properties, as follows: +
        +
      1. The page-size of the database. +
      2. The text-encoding of the database. +
      3. A flag indicating whether or not the database is an auto-vacuum + database. +
      4. The value of the database user-cookie. +
      5. If the database is an auto-vacuum database, a flag indicating + whether or not the database is in incremental vacuum mode or not. +
      6. The default page cache size in pages to use with the database (an + integer field). +
      +
    + +

    + Of the six database properties enumerated above, the values taken by the + initial three dramatically affect the structure of the database image. Any + software system that handles SQLite database images will need to understand + and interpret them. Properties 4 to 6 may be considered advisory. Although + properties 5 and 6 modify the operation of the SQLite library in + well-defined manners, an alternative SQLite database client is free to + interpret them differently, or not interpret them at all. + +

    + The concept of a logical database and its contents should be defined + properly in some requirements document so that it can be referenced from + here and other places. The definition will be something like the list of + bullet points above. + +

    + Many of the numbered requirements in the following sub-sections describe + the relationship between the contents of the logical database, as itemized + above, and the contents of the serialized database image. Others describe + the relationships between various database image substructures, invariants + that are true for all well-formed database images. + +

    + A well-formed SQLite database image is defined as an image for which + all of the statements itemized as requirements within this section + are true. mention the requirements numbering scheme + here. A software system that wishes to interoperate with other + systems using the SQLite database image format should only ever + output well-formed SQLite databases. In the case of SQLite itself, + the system should ensure that the database file contains a well-formed + database image the conclusion of each transaction. + +

    2.1 Image Format Overview

    + +

    + A B-Tree is a data structure designed for offline storage of a set of + unique key values. It is structured so as to support fast querying + for a single key or range of keys. As implemented in SQLite, each + entry may be associated with a blob of data that is not part of the + key. For the canonical introduction to the B-Tree and its variants, + refer to reference [1]. The B-Tree + implementation in SQLite also adopts some of the enhancements + suggested in [2]. +

    + An SQLite database image contains one or more B-Tree structures. Each + B-Tree structure stores the data for a single database table or + index. Hence each database file contains a single B-Tree to store + the contents of the sqlite_master table, and one B-Tree + for each database table or index created by the user. If the database + uses auto-increment integer primary keys, then the database file + also contains a B-Tree to store the contents of the automatically + created sqlite_sequence table. +

    + SQLite uses two distinct variants of the B-Tree structure. One variant, + hereafter refered to as a "table B-Tree" uses signed 64-bit integer + values as keys. Each entry has an associated variable length blob of + data used to store a database record (see section + 2.3.2). Each SQLite database file contains one + table B-Tree for the schema table and one table B-Tree for each + additional database table created by the user. If it is present, the + sqlite_sequence table is also stored as a table B-Tree. +

    + A database record is a blob of data containing an ordered list of + SQL values (integers, real numbers, NULL values, blobs or strings). + For each row in each table in the logical database, there is an + entry in the corresponding table B-Tree structure in the database + image. The entry's integer key value is same as the SQL "rowid" or + "integer primary key" field of the table row. The associated database + record is made up of the row's column values, in declaration (CREATE + TABLE) order. +

    + The other B-Tree variant used by SQLite, hereafter an "index B-Tree" + uses database records (section 2.3.2) as keys. + For this kind of B-Tree, there is no additional data associated with + each entry. SQLite databases contain an index B-Tree for each database + index created by the user. Database indexes may be created by CREATE + INDEX statements, or by UNIQUE or PRIMARY KEY (but not INTEGER PRIMARY + KEY) clauses added to CREATE TABLE statements. +

    + Index B-Tree structures contain one entry for each row in the + associated table in the logical SQL database. The database record used + as the key consists of the row's value for each of the indexed columns in + declaration (CREATE INDEX) order, followed by the row's "rowid" or + "integer primary key" column value. +

    + For example, the following SQL script: +

    +      CREATE TABLE t1(a INTEGER PRIMARY KEY, b, c, d);
    +      CREATE INDEX i1 ON t1(d, c);
    +
    +      INSERT INTO t1 VALUES(1, 'triangle', 3, 180, 'green');
    +      INSERT INTO t1 VALUES(2, 'square',   4, 360, 'gold');
    +      INSERT INTO t1 VALUES(3, 'pentagon', 5, 540, 'grey');
    +      ...
    +

    + Creates a database image containing three B-Tree structures: one table + B-Tree to store the sqlite_master table, one table B-Tree to + store table "t1", and one index B-Tree to store index "i1". The + B-Tree structures created for the user table and index are populated + as shown in figure 1. + + +

    + + +

    Figure 1 - Example B-Tree Data +

    + + +

    + The following sections and sub-sections describe precisely the format + used to serialize the B-Tree structures within an SQLite database image. + +

    2.2 Global Structure

    + + +

    2.2.1 Database Header

    + +

    + An SQLite database image begins with a 100-byte database header. The database + header consists of a well known 16-byte sequence followed by a series of + 1, 2 and 4 byte unsigned integers. All integers in the database header (as + well as the rest of the database file) are stored in big-endian format. + +

    + The well known 16-byte sequence that begins every SQLite database file + is: +

    +          0x53 0x51 0x4c 0x69 0x74 0x65 0x20 0x66 0x6f 0x72 0x6d 0x61 0x74 0x20 0x33 0x00
    + +

    + Interpreted as UTF-8 encoded text, this byte sequence corresponds + to the string "SQLite format 3" followed by a nul-terminator byte. + +

    The first 16 bytes of a well-formed database file shall contain +the UTF-8 encoding of the string "SQLite format 3" followed by a +single nul-terminator byte.

    + +

    + The 1, 2 and 4 byte unsigned integers that make up the rest of the + database header are described in the following table. + + +
    Byte Range Byte Size Description Reqs +
    16..17 2 + Database page size in bytes. See section + 2.2.2 for details. + H30190 + +
    18 1 +

    + File-format "write version". Currently, this field + is always set to 1. If a value greater than 1 is read by SQLite, + then the library will only open the file for read-only access. + +

    + This field and the next one are intended to be used for + forwards compatibility, should the need ever arise. If in the + future a version of SQLite is created that uses a file format + that may be safely read but not written by older versions of + SQLite, then this field will be set to a value greater than 1 + to prevent older SQLite versions from writing to a file that + uses the new format. +

    H30040 + +
    19 1 +

    + File-format "read version". Currently, this + field is always set to 1. If a value greater than 1 is read + by SQLite, then the library will refuse to open the database + +

    + Like the "write version" described above, this field exists + to facilitate some degree of forwards compatibility, in case + it is ever required. If a version of SQLite created in the + future uses a file format that may not be safely read by older + SQLite versions, then this field will be set to a value greater + than 1. +

    H30040 + +
    20 1 + Number of bytes of unused space at the end of each database + page. Usually this field is set to 0. If it is non-zero, then + it contains the number of bytes that are left unused at the + end of every database page (see section + 2.2.2 for a description of a + database page). + H30040 + +
    21 1 + Maximum fraction of an index tree page to use for + embedded content. This value is used to determine the maximum + size of a B-Tree cell to store as embedded content on a + page that is part of an index B-Tree. Refer to section + 2.3.3.4 for details. + H30040 + +
    22 1 + Minimum fraction of an index B-Tree page to use for + embedded content when an entry uses one or more overflow pages. + This value is used to determine the portion of a B-Tree cell + that requires one or more overflow pages to store as embedded + content on a page that is part of an index B-Tree. Refer to + section 2.3.3.4 for details. + H30040 + +
    23 1 + Minimum fraction of an table B-Tree leaf page to use for + embedded content when an entry uses one or more overflow pages. + This value is used to determine the portion of a B-Tree cell + that requires one or more overflow pages to store as embedded + content on a page that is a leaf of a table B-Tree. Refer to + section 2.3.4.3 for details. + H30040 + +
    24..27 4 +

    + The file change counter. Each time a database transaction is + committed, the value of the 32-bit unsigned integer stored in + this field is incremented. +

    + SQLite uses this field to test the validity of its internal + cache. After unlocking the database file, SQLite may retain + a portion of the file cached in memory. However, since the file + is unlocked, another process may use SQLite to modify the + contents of the file, invalidating the internal cache of the + first process. When the file is relocked, the first process can + check if the value of the file change counter has been modified + since the file was unlocked. If it has not, then the internal + cache may be assumed to be valid and may be reused. +

    H33040 + +
    32..35 4 + Page number of first freelist trunk page. + For more details, refer to section 2.4. + H31320 + +
    36..39 4 + Number of free pages in the database file. + For more details, refer to section 2.4. + H31310 + +
    40..43 4 + The schema version. Each time the database schema is modified (by + creating or deleting a database table, index, trigger or view) + the value of the 32-bit unsigned integer stored in this field + is incremented. + H33050 + +
    44..47 4 +

    + Schema layer file-format. This value is similar to the + "read-version" and "write-version" fields at offsets 18 and 19 + of the database header. If SQLite encounters a database + with a schema layer file-format value greater than the file-format + that it understands (currently 4), then SQLite will refuse to + access the database. +

    + Usually, this value is set to 1. However, if any of the following + file-format features are used, then the schema layer file-format + must be set to the corresponding value or greater: +

      +
    1. Implicit NULL values at the end of table records + (see section 2.3.4.1). +
    2. Implicit default (non-NULL) values at the end of table + records (see section 2.3.4.1). +
    3. Descending indexes (see section + 2.3.3.2) and Boolean values + in database records (see section 2.3.2, + serial types 8 and 9). +
    + +

    + Turns out SQLite can be tricked into violating this. If you delete + all tables from a database and then VACUUM the database, the + schema layer file-format field somehow gets set to 0. +

    H30120 + +
    48..51 4 + Default pager cache size. This field is used by SQLite to store + the recommended pager cache size to use for the database. + H30130 + +
    52..55 4 + For auto-vacuum capable databases, the numerically largest + root-page number in the database. Since page 1 is always the + root-page of the schema table (section 2.2.3), + this value is always non-zero for auto-vacuum databases. For + non-auto-vacuum databases, this value is always zero. + H30140, H30141 + +
    56..59 4 + (constant) Database text encoding. A value of 1 means all + text values are stored using UTF-8 encoding. 2 indicates + little-endian UTF-16 text. A value of 3 means that the database + contains big-endian UTF-16 text. + H30150 + +
    60..63 4 + The user-cookie value. A 32-bit integer value available to the + user for read/write access. + H30160 + +
    64..67 4 + The incremental-vacuum flag. In non-auto-vacuum databases this + value is always zero. In auto-vacuum databases, this field is + set to 1 if "incremental vacuum" mode is enabled. If incremental + vacuum mode is not enabled, then the database file is reorganized + so that it contains no free pages (section + 2.4) at the end of each database + transaction. If incremental vacuum mode is enabled, then the + reorganization is not performed until explicitly requested + by the user. + H30171 + +
    + +

    + The four byte block beginning at offset 28 is unused. As is the + 32 byte block beginning at offset 68. +

    + +

    + The following requirements state that certain database header + fields must contain defined constant values, even though the sqlite + database file format is designed to allow various values. These fields + were intended to be flexible when the SQLite database image format + was designed, but it has since been determined that it is faster and + safer to require these parameters to be populated with well-known + values. Specifically, in a well-formed database, the following header + fields are always set to well-known values: + +

      +
    • The file-format write version (single byte field, byte offset 18), + is always set to 0x01. +
    • The file-format read version (single byte field, byte offset 19), + is always set to 0x01. +
    • The number of unused bytes on each page (single byte field, byte + offset 20), is always set to 0x00. +
    • The maximum fraction of an index B-Tree page to use for embedded content + (single byte field, byte offset 21), is always set to 0x40.
    • + The minimum fraction of an index B-Tree page to use for embedded + content when using overflow pages (single byte field, byte + offset 22), is always set to 0x20. +
    • The minimum fraction of a table B-Tree page to use for embedded + content when using overflow pages (single byte field, byte offset 23), + is always set to 0x20. +
    + +

    + The following requirement encompasses all of the above. + +

    The 6 bytes beginning at byte offset 18 of a well-formed database +image shall contain the values 0x01, 0x01, 0x00, 0x40, 0x20 and +0x20, respectively.

    + +

    + Section 2 identifies six persistent + user-visible properties of an SQLite database. The following + requirements describe the way in which these properties are stored. + +

    The 2-byte big-endian unsigned integer field at byte offset 16 of a +well-formed database image shall be set to the value of the database +page-size.

    +

    The page-size of an SQLite database in bytes shall be an integer power +of 2 between 512 and 32768, inclusive.

    +

    The 4 byte big-endian unsigned integer field at byte offset 56 of a +well-formed database image shall be set to 1 if the database text-encoding +is UTF-8, 2 if the database text-encoding is little-endian UTF-16, and 3 +if the database text-encoding is big-endian UTF-16.

    +

    If the database is not an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the value 0.

    +

    If the database is an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the numerically largest root-page number +of any table or index B-Tree within the database image.

    +

    The 4-byte big-endian unsigned integer field at byte offset 60 of a +well-formed database image shall be set to the value of the +database user-cookie.

    +

    The 4-byte big-endian unsigned integer field at byte offset 64 of a +well-formed database image shall be set to the value of the database +incremental-vacuum flag.

    +

    The value of the incremental-vacuum flag of an SQLite database shall be +either 0 or 1.

    +

    The 4-byte big-endian unsigned integer field at byte offset 48 of a +well-formed database image shall be set to the value of the +database default page-cache size.

    + +

    + The following requirement describes the valid range of values for the + schema layer file format field. + +

    The 4-byte big-endian signed integer field at byte offset 44 of a +well-formed database image, the schema layer file format field, +shall be set to an integer value between 1 and 4, inclusive.

    + +

    + See the note to do with the schema file format version above. Turns + out this field may also be set to 0 by SQLite. + +

    2.2.2 Pages and Page Types

    + +

    + The entire database file is divided into pages, each page consisting + of page-size bytes, where page-size is the 2-byte + integer value stored at offset 16 of the database header (see above). + The page-size is always a power of two between 512 + (29) and 32768 (215). SQLite database files + always consist of an exact number of pages. +

    + Pages are numbered beginning from 1, not 0. Page 1 consists of + the first page-size bytes of the database file. The database header + described in the previous section consumes the first 100 bytes of page + 1. +

    + Each page of the database file is one of the following: +

      +
    • A B-Tree page. B-Tree pages are part of the tree + structures used to store database tables and indexes. +
    • An overflow page. Overflow pages are used by particularly + large database records that do not fit on a single B-Tree page. +
    • A free page. Free pages are pages within the database file + that are not being used to store meaningful data. +
    • A "pointer-map" page. In auto-vacuum capable databases + (databases for which the 4 byte big-endian integer stored at + byte offset 52 of the database header is non-zero) some pages are + permanently designated "pointer-map" pages. See section + 2.5 for details. +
    • The locking page. The database page that starts at + byte offset 230, if it is large enough to contain + such a page, is always left unused. +
    + +

    The size of a well formed database image shall be an integer +multiple of the database page size.

    +

    Each page of a well formed database image shall be exactly one of a +B-Tree page, an overflow page, a free page, a +pointer-map page or the locking page.

    +

    The database page that starts at byte offset 230, the +locking page, shall never be used for any purpose.

    + + +

    2.2.3 The Schema Table

    + +

    + Apart from being the page that contains the file-header, page 1 of + a database image is special because it is the root page of the + B-Tree structure that contains the schema table data. From the SQL + level, the schema table is accessible via the name "sqlite_master". +

    + The exact format of the B-Tree structure and the meaning of the term + "root page" is discussed in section 2.3. + For now, it is sufficient to know that the B-Tree structure is a + data structure that stores a set of records. Each record is an + ordered set of SQL values (the format of which is described in + section 2.3.2). Given the root page number of + the B-Tree structure (which is well known for the schema table), it + is possible to iterate through the set of records. +

    + The schema table contains a record for each SQL table (including + virtual tables) except for sqlite_master, and for each index, trigger + and view in the logical database. There is also an entry for each + UNIQUE or PRIMARY KEY clause present in the definition of a database + table. Each record in the schema table contains exactly 5 values, in + the following order: + + +
    FieldDescription +
    Schema item type. + A string value. One of "table", "index", "trigger" or "view", + according to the schema item type. Entries associated with + UNIQUE or PRIMARY KEY clauses have this field set to "index". +
    Schema item name. + A string value. The name of the database schema item (table, + index, trigger or view) associated with this record, if any. + Entries associated with UNIQUE or PRIMARY KEY clauses have + this field set to a string of the form + "sqlite_autoindex_<name>_<idx>" where <name> + is the name of the SQL table and <idx> is an integer + value. + +
    Associated table name. + A string value. For "table" + or "view" records this is a copy of the second (previous) value. + For "index" and "trigger" records, this field is set to the name + of the associated database table. +
    The "root page" number. + For "trigger" and "view" records, as well as "table" records + associated with virtual tables, this is set to integer value 0. + For other "table" and "index" records (including those associated + with UNIQUE or PRIMARY KEY clauses), this field contains the root + page number (an integer) of the B-Tree structure that contains + the table or index data. +
    The SQL statement. + A string value. The SQL statement used to create the schema + item (i.e. the complete text of an SQL "CREATE TABLE" + statement). This field contains an empty string for table + entries associated with PRIMARY KEY or UNIQUE clauses. + Refer to some document that describes these + SQL statements more precisely. +
    +

    + Logical database schema items other than non-virtual tables and indexes + (including indexes created by UNIQUE or PRIMARY key constraints) do not + require any other data structures to be created within the database + file. + +

    + Tables and indexes on the other hand, are represented within the + database file by both an entry in the schema table and a B-Tree + structure stored elsewhere in the file. The specific B-Tree associated + with each database table or index is identified by its root page + number, which is stored in the 4th field of the schema table record. + In a non-auto-vacuum database, the B-Tree root pages may be stored + anywhere within the database file. For an auto-vacuum database, all + B-Tree root pages must at all times form a contiguous set starting + at page 3 of the database file, skipping any pages that are required to + be used as pointer-map pages (see section + 2.5). +

    + As noted in section 2.2.1, in an auto-vacuum + database the page number of the page immediately following the + final root page in the contiguous set of root pages is stored + as a 4 byte big-endian integer at byte offset 52 of the database + header. Unless that page is itself a pointer-map page, in which + case the page number of the page following it is stored instead. + +

    + For example, if the schema of a logical database is created using + the following SQL statements: +

    +          CREATE TABLE abc(a, b, c);
    +          CREATE INDEX i1 ON abc(b, c);
    +          CREATE TABLE main.def(a PRIMARY KEY, b, c, UNIQUE(b, c));
    +          CREATE VIEW v1 AS SELECT * FROM abc;
    +      
    +

    + Then the schema table would contain a total of 7 records, as follows: + + +
    Field 1Field 2Field 3Field 4Field 5 +
    table abc abc 2 CREATE TABLE abc(a, b, c) +
    index i1 abc 3 CREATE INDEX i1 ON abc(b, c) +
    table def def 4 CREATE TABLE def(a PRIMARY KEY, b, c, UNIQUE(b, c)) +
    index sqlite_autoindex_def_1 def 5 +
    index sqlite_autoindex_def_2 def 6 +
    view v1 v1 0 CREATE VIEW v1 AS SELECT * FROM abc +
    + +

    In a well-formed database file, the portion of the first +database page not consumed by the database file-header (all but the +first 100 bytes) contains the root node of a table B-Tree, +the schema table.

    +

    All records stored in the schema table contain exactly five +fields.

    + +

    The following requirements describe "table" records. + +

    For each SQL table in the database apart from itself +("sqlite_master"), the schema table of a well-formed +database file contains an associated record.

    +

    The first field of each schema table record associated with an +SQL table shall be the text value "table".

    +

    The second field of each schema table record associated with an +SQL table shall be a text value set to the name of the SQL table.

    +

    In a well-formed database file, the third field of all +schema table records associated with SQL tables shall contain +the same value as the second field.

    +

    In a well-formed database file, the fourth field of all +schema table records associated with SQL tables that are not +virtual tables contains the page number (an integer value) of the root +page of the associated table B-Tree structure within the +database file.

    +

    If the associated database table is a virtual table, the fourth +field of the schema table record shall contain the integer +value 0 (zero).

    +

    In a well-formed database, the fifth field of all schema table +records associated with SQL tables shall contain a "CREATE TABLE" +or "CREATE VIRTUAL TABLE" statment (a text value). The details +of the statement shall be such that executing the statement +would create a table of precisely the same name and schema as the +existing database table.

    + +

    The following requirements describe "implicit index" records. + +

    For each PRIMARY KEY or UNIQUE constraint present in the definition +of each SQL table in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index", and the second field set to a text value containing a +string of the form "sqlite_autoindex_<name>_<idx>", where +<name> is the name of the SQL table and <idx> is an +integer value.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the name of the table to which the constraint applies (a +text value).

    +

    In a well-formed database, the fourth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the page number (an integer value) of the root page of the +associated index B-Tree structure.

    +

    In a well-formed database, the fifth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain an SQL NULL value.

    + +

    The following requirements describe "explicit index" records. + +

    For each SQL index in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index" and the second field set to a text value containing the +name of the SQL index.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL indexes shall contain the name of the +SQL table that the index applies to.

    +

    In a well-formed database, the fourth field of all schema table +records associated with SQL indexes shall contain the page number +(an integer value) of the root page of the associated index B-Tree +structure.

    +

    In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +INDEX" statement (a text value). The details of the statement shall +be such that executing the statement would create an index of +precisely the same name and content as the existing database index.

    + +

    The following requirements describe "view" records. + +

    For each SQL view in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "view" and the second field set to a text value containing the +name of the SQL view.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the same value as +the second field.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the integer value 0.

    +

    In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +VIEW" statement (a text value). The details of the statement shall +be such that executing the statement would create a view of +precisely the same name and definition as the existing database view.

    + +

    The following requirements describe "trigger" records. + +

    For each SQL trigger in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "trigger" and the second field set to a text value containing the +name of the SQL trigger.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the name of the +database table or view to which the trigger applies.

    +

    In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the integer value 0.

    +

    In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +TRIGGER" statement (a text value). The details of the statement shall +be such that executing the statement would create a trigger of +precisely the same name and definition as the existing database trigger.

    + +

    The following requirements describe the placement of B-Tree root + pages in auto-vacuum databases. + +

    In an auto-vacuum database, all pages that occur before the page +number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page.

    +

    In an auto-vacuum database, no B-Tree root pages may occur +on or after the page number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page.

    + + +

    2.3 B-Tree Structures

    + +

    + A large part of any SQLite database file is given over to one or more + B-Tree structures. A single B-Tree structure is stored using one or more + database pages. Each page contains a single B-Tree node. + The pages used to store a single B-Tree structure need not form a + contiguous block. The page that contains the root node of a B-Tree + structure is known as the "root page". + +

    + SQLite uses two distinct variants of the B-Tree structure: +

      +
    • The table B-Tree, which uses 64-bit integer values for keys. + In a table B-Tree, an associated database record (section + 2.3.2) is stored along with each entry. Table + B-Tree structures are described in detail in section + 2.3.4. +
    • The index B-Tree, which uses database records as keys. Index + B-Tree structures are described in detail in section + 2.3.3. +
    + +

    As well as the schema table, a well-formed database file +contains N table B-Tree structures, where N is the +number of non-virtual tables in the logical database, excluding the +sqlite_master table but including sqlite_sequence and other system +tables.

    +

    A well-formed database file contains N table B-Tree structures, +where N is the number of indexes in the logical database, +including indexes created by UNIQUE or PRIMARY KEY clauses in the +declaration of SQL tables.

    + +

    2.3.1 Variable Length Integer Format

    + +

    + In several parts of the B-Tree structure, 64-bit twos-complement signed + integer values are stored in the "variable length integer format" + described here. +

    + A variable length integer consumes from one to nine bytes of space, + depending on the value stored. Seven bits are used from each of + the first eight bytes present, and, if present, all eight from + the final ninth byte. Unless the full nine byte format is used, the + serialized form consists of all bytes up to and including the first + byte with the 0x80 bit cleared. +

    + The number of bytes present depends on the position of the most + significant set bit in the 64-bit word. Negative numbers always have + the most significant bit of the word (the sign bit) set and so are + always encoded using the full nine bytes. Positive integers may be + encoded using less space. The following table shows the 9 different + length formats available for storing a variable length integer + value. + + +
    BytesValue RangeBit Pattern +
    17 bit0xxxxxxx +
    214 bit1xxxxxxx 0xxxxxxx +
    321 bit1xxxxxxx 1xxxxxxx 0xxxxxxx +
    428 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 0xxxxxxx +
    535 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 0xxxxxxx +
    642 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 0xxxxxxx +
    749 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 0xxxxxxx +
    856 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 0xxxxxxx +
    964 bit1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx 1xxxxxxx xxxxxxxx +
    +

    + When using the full 9 byte representation, the first byte contains + the 7 most significant bits of the 64-bit value. The final byte of + the 9 byte representation contains the 8 least significant bits of + the 64-bit value. When using one of the other representations, the + final byte contains the 7 least significant bits of the 64-bit value. + The second last byte, if present, contains the 7 next least signficant + bits of the value, and so on. The significant bits of the 64-bit + value for which no storage is provided are assumed to be zero. +

    + When encoding a variable length integer, SQLite usually selects the + most compact representation that provides enough storage to accomadate + the most significant set bit of the value. This is not required + however, using more bytes than is strictly necessary when encoding + an integer is valid. + + +
    DecimalHexadecimal Variable Length Integer +
    43 0x000000000000002B 0x2B +
    200815 0x000000000003106F 0x8C 0xA0 0x6F +
    -1 0xFFFFFFFFFFFFFFFF + 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF +
    -78506 0xFFFFFFFFFFFECD56 + 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF 0xFD 0xCD 0x56 +
    + +

    A 64-bit signed integer value stored in variable length integer +format consumes from 1 to 9 bytes of space.

    +

    The most significant bit of all bytes except the last in a serialized +variable length integer is always set. Unless the serialized +form consumes the maximum 9 bytes available, then the most significant +bit of the final byte of the representation is always cleared.

    +

    The eight least significant bytes of the 64-bit twos-compliment +representation of a value stored in a 9 byte variable length +integer are stored in the final byte (byte offset 8) of the +serialized variable length integer. The other 56 bits are +stored in the 7 least significant bits of each of the first 8 bytes +of the serialized variable length integer, in order from +most significant to least significant.

    +

    A variable length integer that consumes less than 9 bytes of +space contains a value represented as an N-bit unsigned +integer, where N is equal to the number of bytes consumed by +the serial representation (between 1 and 8) multiplied by 7. The +N bits are stored in the 7 least significant bits of each +byte of the serial representation, from most to least significant.

    + + +

    2.3.2 Database Record Format

    + +

    + A database record is a blob of data that represents an ordered + list of one or more SQL values. Database records are used in two + places in SQLite database files - as the associated data for entries + in table B-Tree structures, and as the key values in index B-Tree + structures. The size (number of bytes consumed by) a database record + depends on the values it contains. +

    + Each database record consists of a short record header followed by + a data area. The record header consists of N+1 variable + length integers (see section 2.3.1), where + N is the number of values stored in the record. +

    + The first variable length integer in a record header contains the + size of the record header in bytes. The following N variable + length integer values each describe the type and size of the + corresponding SQL value within the record (the second integer in the + record header describes the first value in the record, etc.). The + second and subsequent integer values in a record header are interpreted + according to the following table: + +
    Header Value Data type and size +
    0 + An SQL NULL value (type SQLITE_NULL). This value + consumes zero bytes of space in the record's data area. +
    1 + An SQL integer value (type SQLITE_INTEGER), stored as a + big-endian 1-byte signed integer. +
    2 + An SQL integer value (type SQLITE_INTEGER), stored as a + big-endian 2-byte signed integer. +
    3 + An SQL integer value (type SQLITE_INTEGER), stored as a + big-endian 3-byte signed integer. +
    4 + An SQL integer value (type SQLITE_INTEGER), stored as a + big-endian 4-byte signed integer. +
    5 + An SQL integer value (type SQLITE_INTEGER), stored as a + big-endian 6-byte signed integer. +
    6 + An SQL integer value (type SQLITE_INTEGER), stored as an + big-endian 8-byte signed integer. +
    7 + An SQL real value (type SQLITE_FLOAT), stored as an + 8-byte IEEE floating point value. +
    8 + The literal SQL integer 0 (type SQLITE_INTEGER). The value + consumes zero bytes of space in the record's data area. + Values of this type are only present in databases with + a schema file format (the 32-bit integer at byte offset 44 + of the database header) value of 4 or greater. + +
    9 + The literal SQL integer 1 (type SQLITE_INTEGER). The value + consumes zero bytes of space in the record's data area. + Values of this type are only present in databases with + a schema file format (the 32-bit integer at byte offset 44 + of the database header) value of 4 or greater. + +
    bytes * 2 + 12 + Even values greater than or equal to 12 are used to signify a + blob of data (type SQLITE_BLOB) (n-12)/2 bytes in length, + where n is the integer value stored in the record header. + +
    bytes * 2 + 13 + Odd values greater than 12 are used to signify a string + (type SQLITE_TEXT) (n-13)/2 bytes in length, where + n is the integer value stored in the record header. +
    +

    + Immediately following the record header is the data for each + of the record's values. A record containing N values is + depicted in figure 2. + + +

    + + +

    Figure 2 - Database Record Format +

    + + +

    + For each SQL value in the record, there is a blob of data stored + in the records data area. If the corresponding integer type value + in the record header is 0 (NULL), 8 (integer value 0) or 9 (integer + value 1), then the blob of data is zero bytes in length. Otherwise, + the length of the data field is as described in the table above. +

    + The data field associated with a string value contains the string + encoded using the database encoding, as defined in the database + header (see section 2.2.1). No + nul-terminator character is stored in the database. + +

    A database record consists of a database record header, +followed by database record data. The first part of the +database record header is a variable length integer +containing the total size (including itself) of the header in bytes.

    +

    Following the length field, the remainder of the database record +header is populated with N variable length integer +fields, where N is the number of database values stored in +the record.

    +

    Following the database record header, the database record +data is made up of N variable length blobs of data, where +N is again the number of database values stored in the record. +The n blob contains the data for the nth value in +the database record. The size and format of each blob of data is +encoded in the corresponding variable length integer field +in the database record header.

    +

    A value of 0 stored within the database record header indicates +that the corresponding database value is an SQL NULL. In this case +the blob of data in the data area is 0 bytes in size.

    +

    A value of 1 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 1-byte +big-endian signed integer.

    +

    A value of 2 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 2-byte +big-endian signed integer.

    +

    A value of 3 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 3-byte +big-endian signed integer.

    +

    A value of 4 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 4-byte +big-endian signed integer.

    +

    A value of 5 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 6-byte +big-endian signed integer.

    +

    A value of 6 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 8-byte +big-endian signed integer.

    +

    A value of 7 stored within the database record header indicates +that the corresponding database value is an SQL real (floating +point number). In this case the blob of data contains an 8-byte +IEEE floating point number, stored in big-endian byte order.

    +

    A value of 8 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 0. +In this case the blob of data in the data area is 0 bytes in size.

    +

    A value of 9 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 1. +In this case the blob of data in the data area is 0 bytes in size.

    +

    An even value greater than or equal to 12 stored within the +database record header indicates that the corresponding +database value is an SQL blob field. The blob of data contains the +value data. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header.

    +

    An odd value greater than or equal to 13 stored within the +database record header indicates that the corresponding +database value is an SQL text field. The blob of data contains the +value text stored using the database encoding, with no +nul-terminator. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header.

    + +

    + The following database file properties define restrictions on the + integer values that may be stored within a + database record header. + +

    In a well-formed database file, if the values 8 or 9 appear within +any database record header within the database, then the +schema-layer file format (stored at byte offset 44 of the +database file header) must be set to 4.

    +

    In a well-formed database file, the values 10 and 11, and all +negative values may not appear within any database record header +in the database.

    + +

    2.3.3 Index B-Trees

    + +

    + As specified in section 2.1, index + B-Tree structures store a unique set of the database records described + in the previous section. While in some cases, when there are very + few entries in the B-Tree, the entire structure may fit on a single + database page, usually the database records must be spread across + two or more pages. In this case, the pages are organized into a + tree structure with a single "root" page at the head of the tree. +

    + Within the tree structure, each page is either an internal tree + node containing an ordered list of N references to child nodes + (page numbers) and N-1 database records, or a leaf node containing + M database records. The value of N may be different for each page, but + is always two or greater. Similarly, each leaf page may have a + different non-zero positive value for M. The tree is always of + uniform height, meaning the number of intermediate levels between + each leaf node page and the root page is the same. +

    + Within both internal and leaf node pages, the records are stored in + sorted order. The comparison function used to determine the sort order + is described in section 2.3.3.2. +

    + Records are distributed throughout the tree such that for each + internal node, all records stored in the sub-tree headed by + the first child node ( C(0) ) are considered less than + the first record stored on the internal node ( R(0) ) by the + comparison function described in section + 2.3.3.2. Similarly all records stored + in the sub-tree headed by C(n) are considered greater than R(n-1) but + less than R(n) for values of n between 1 and N-2, inclusive. All + records in the sub-tree headed by C(N-1) are greater than the + largest record stored on the internal node. + + +

    + + +

    Figure 3 - Index B-Tree Tree Structure +

    + + +

    + Figure 3 depicts one possible record + distribution for an index B-Tree containing records R1 to R26, assuming + that for all values of N, R(N+1)>R(N). In total the B-Tree + structure uses 11 database file pages. Internal tree nodes contain + database records and references to child node pages. Leaf nodes contain + database records only. + +

    The pages in an index B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth.

    +

    Each leaf node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a database record.

    +

    Each internal node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a child page number, C, +and a database record R. All database records stored within +the sub-tree headed by page C are smaller than record R, +according to the index sort order (see below). Additionally, unless +R is the smallest database record stored on the internal node +page, all integer keys within the sub-tree headed by C are +greater than R-1, where R-1 is the +largest database record on the internal node page that is smaller +than R.

    +

    As well as child page numbers associated with B-Tree cells, each +internal node page in an index B-Tree contains the page number +of an extra child page, the right-child page. All database +records stored in all B-Tree cells within the sub-tree headed by the +right-child page are greater than all database records +stored within B-Tree cells on the internal node page.

    + +

    + The precise way in which index B-Tree pages and cells are formatted is + described in subsequent sections. + + +

    2.3.3.1 Index B-Tree Content

    + +

    + The database file contains one index B-Tree for each database index + in the logical database, including those created by UNIQUE or + PRIMARY KEY clauses in table declarations. Each record stored in + an index B-Tree contains the same number of fields, the number of + indexed columns in the database index declaration plus one. +

    + An index B-Tree contains an entry for each row in its associated + database table. The fields of the record used as the index B-Tree + key are copies of each of the indexed columns of the associated + database row, in order, followed by the rowid value of the same + row. See figure 1 for an example. + +

    In a well-formed database, each index B-Tree contains a single entry +for each row in the indexed logical database table.

    +

    Each database record (key) stored by an index B-Tree in a +well-formed database contains the same number of values, the number +of indexed columns plus one.

    +

    The final value in each database record (key) stored by an +index B-Tree in a well-formed database contains the rowid (an integer +value) of the corresponding logical database row.

    +

    The first N values in each database record (key) +stored in an index B-Tree where N is the number of indexed +columns, contain the values of the indexed columns from the +corresponding logical database row, in the order specified for the +index.

    + +

    2.3.3.2 Record Sort Order

    + +

    + This section defines the comparison function used when database + records are used as B-Tree keys for index B-Trees. The comparison + function is only defined when both database records contain the same + number of fields. +

    + When comparing two database records, the first field of one + record is compared to the first field of the other. If they + are not equal, the next pair of fields are compared, and so + on. If all the fields in the database records are equal, then + the two records are considered equal. Otherwise, the result + of the comparison is determined by the first pair of inequal + fields. +

    + Two database record fields (SQL values) are compared using the + following rules: +

      +
    1. If both values are NULL, then they are considered equal. +
    2. If one value is a NULL and the other is not, it is considered + the lesser of the two. +
    3. If both values are either real or integer values, then the + comparison is done numerically. +
    4. If one value is a real or integer value, and the other is + a text or blob value, then the numeric value is considered + lesser. +
    5. If both values are text, then the collation function is used + to compare them. The collation function is a property of the + index column in which the values are found. + Link to document with CREATE INDEX syntax. +
    6. If one value is text and the other a blob, the text value + is considered lesser. +
    7. If both values are blobs, memcmp() is used to determine the + results of the comparison function. If one blob is a prefix + of the other, the shorter blob is considered lesser. +
    +

    + Each column of a database index may be declared as "descending". + Link to document with CREATE INDEX syntax. + In SQLite database files with a schema layer file-format equal + to 4, this modifies the order in which the records are stored in + the corresponding index B-Tree structure. For each index column + declared as descending, the results of the above comparison + procedure are inverted. +

    + The columns of database indexes created by UNIQUE or PRIMARY + KEY clauses are never treated as descending. + +

    + Need requirements style statements for this information. Easier + to do once collation sequences have been defined somewhere. + + +

    2.3.3.3 Index B-Tree Page Format

    + +

    + Each index B-Tree page is divided into four sections that occur + in order on the page: +

      +
    • The 8 (leaf node pages) or 12 (internal tree node pages) + byte page-header. +
    • The cell offset array. This is a series of N big-endian 2-byte + integer values, where N is the number of records stored on + the page. +
    • A block of unused space. This may be 0 bytes in size. +
    • The cell content area consumes the remaining space on the page. +
    + +
    + + +

    Figure 4 - Index B-Tree Page Data +

    + +

    + The 8 (leaf node pages) or 12 (internal tree node pages) byte page + header that begins each index B-Tree page is made up of a series of + 1, 2 and 4 byte unsigned integer values as shown in the following + table. All values are stored in big-endian byte order. + + +
    Byte Range Byte Size Description +
    0 1B-Tree page flags. For an index B-Tree internal + tree node page, this is set to 0x02. For a + leaf node page, 0x0A. +
    1..2 2Byte offset of first block of free space on + this page. If there are no free blocks on this + page, this field is set to 0. +
    3..4 2Number of cells (entries) on this page. +
    5..6 2Byte offset of the first byte of the cell + content area (see figure + 4), relative to the + start of the page. +
    7 1Number of fragmented free bytes on page. +
    8..11 4Page number of rightmost child-page (the + child-page that heads the sub-tree in which all + records are larger than all records stored on + this page). This field is not present for leaf + node pages. +
    +

    + The cell content area, which occurs last on the page, contains one + B-Tree cell for each record stored on the B-Tree page. On a leaf node + page, each cell is responsible for storing a database record only. On + an internal tree node page, each cell contains a database record and + the corresponding child page number ((R(0) and C(0)) are stored + together, for example - the cell record is considered greater than + all records stored in the sub-tree headed by the child page). The + final child page number is stored as part of the page header. +

    + The B-Tree cells may be distributed throughout the cell content area + and may be interspersed with blocks of unused space. They are not + sorted within the cell content area in any particular order. The + serialized format of a B-Tree cell is described in detail in + section 2.3.3.4. +

    + The byte offset of each cell in the cell content area, relative + to the start of the page, is stored in the cell offset array. The + offsets are in sorted order according to the database records stored + in the corresponding cells. The first offset in the array is the + offset of the cell containing the smallest record on the page, + according to the comparison function defined in section + 2.3.3.2. +

    + As well as the block of unused space between the cell offset array and + the cell content area, which may be any size, there may be small blocks + of free space interspersed with the B-Tree cells within the cell + content area. These are classified into two classes, depending on their + size: +

      +
    • Blocks of free-space consisting of 3 bytes or less are called + fragments. The total number of bytes consumed by all + fragments on a page is stored in the 1 byte unsigned integer at + byte offset 7 of the page header. The total number of fragmented + bytes on a single page is never greater than 255. +
    • Blocks of free-space consisting of more than 3 bytes of contiguous + space are called free blocks. All free blocks on a single + page are linked together into a singly linked list. The byte + offset (relative to the start of the page) of the first block in + the list is stored in the 2 byte unsigned integer stored at byte + offset 1 of the page header. The first two bytes of each free + block contain the byte offset (again relative to the start of + the page) of the next block in the list stored as a big-endian + unsigned integer. The first two bytes of the final block in the + list are set to zero. The third and fourth bytes of each free + block contain the total size of the free block in bytes, stored + as a 2 byte big-endian unsigned integer. +
    + +

    + The list of free blocks is kept in order, sorted by offset. Right? + Later: True statement. SQLite function sqlite3BtreeInitPage() returns + SQLITE_CORRUPT if they are not. + +

    The b-tree page flags field (the first byte) of each database +page used as an internal node of an index B-Tree structure is set to +0x02.

    +

    The b-tree page flags field (the first byte) of each database +page used as a leaf node of an index B-Tree structure is set to 0x0A.

    + +

    + The following requirements describe the B-Tree page header + present at the start of both index and table B-Tree pages. + +

    The first byte of each database page used as a B-Tree page contains +the b-tree page flags field. On page 1, the b-tree page +flags field is stored directly after the 100 byte file header +at byte offset 100.

    +

    The number of B-Tree cells stored on a B-Tree page is stored as a +2-byte big-endian integer starting at byte offset 3 of the B-Tree +page. On page 1, this field is stored at byte offset 103.

    +

    The 2-byte big-endian integer starting at byte offset 5 of each +B-Tree page contains the byte-offset from the start of the page +to the start of the cell content area, which consumes all space +from this offset to the end of the usable region of the page. +On page 1, this field is stored at byte offset 105. All B-Tree +cells on the page are stored within the cell-content area.

    +

    On each page used as an internal node a of B-Tree structures, the +page number of the rightmost child node in the B-Tree structure is +stored as a 4-byte big-endian unsigned integer beginning at byte +offset 8 of the database page, or byte offset 108 on page 1.

    + +

    + This requirement describes the cell content offset array. It applies + to both B-Tree variants. + +

    Immediately following the page header on each B-Tree page is the +cell offset array, consisting of N 2-byte big-endian +unsigned integers, where N is the number of cells stored +on the B-Tree page (H30840). On an internal node B-Tree page, +the cell offset array begins at byte offset 12, or on a leaf +page, byte offset 8. For the B-Tree node on page 1, these +offsets are 112 and 108, respectively.

    +

    The cell offset array and the cell content area (H30850) +may not overlap.

    +

    Each value stored in the cell offset array must be greater +than or equal to the offset to the cell content area (H30850), +and less than the database page size.

    +

    The N values stored within the cell offset array are the +byte offsets from the start of the B-Tree page to the beginning of +each of the N cells stored on the page.

    +

    No two B-Tree cells may overlap.

    + +

    + The following requirements govern management of free-space within the + page content area (both table and index B-Tree pages). + +

    Within the cell content area, all blocks of contiguous +free-space (space not used by B-Tree cells) greater than 3 bytes in +size are linked together into a linked list, the free block list. +Such blocks of free space are known as free blocks.

    +

    The first two bytes of each free block contain the offset +of the next free block in the free block list formatted +as a 2-byte big-endian integer, relative to the start of the database +page. If there is no next free block, then the first two +bytes are set to 0x00.

    +

    The second two bytes (byte offsets 2 and 3) of each free block +contain the total size of the free block, formatted as a 2-byte +big-endian integer.

    +

    On all B-Tree pages, the offset of the first free block in the +free block list, relative to the start of the database page, +is stored as a 2-byte big-endian integer starting at byte offset +1 of the database page. If there is no first free block +(because the free block list is empty), then the two bytes +at offsets 1 and 2 of the database page are set to 0x00. On page 1, +this field is stored at byte offset 101 of the page.

    +

    Within the cell-content area, all blocks of contiguous free-space +(space not used by B-Tree cells) less than or equal to 3 bytes in +size are known as fragments. The total size of all +fragments on a B-Tree page is stored as a 1-byte unsigned +integer at byte offset 7 of the database page. On page 1, this +field is stored at byte offset 107.

    + +

    2.3.3.4 Index B-Tree Cell Format

    + +

    + For index B-Tree internal tree node pages, each B-Tree cell begins + with a child page-number, stored as a 4-byte big-endian unsigned + integer. This field is omitted for leaf pages, which have no + children. +

    + Following the child page number is the total number of bytes + consumed by the cell's record, stored as a variable length integer + (see section 2.3.1). +

    + If the record is small enough, it is stored verbatim in the cell. + A record is deemed to be small enough to be completely stored in + the cell if it consists of less than or equal to: +

    +            max-local := (usable-size - 12) * max-embedded-fraction / 255 - 23
    +
    +

    + bytes. In the formula above, usable-size is the page-size + in bytes less the number of unused bytes left at the end of every + page (as read from byte offset 20 of the database header), and + max-embedded-fraction is the value read from byte offset + 21 of the database header. + +

    + + +

    Figure 5 - Small Record Index B-Tree Cell +

    + +

    + If the cell record is larger than the maximum size identified by + the formula above, then only the first part of the record is stored + within the cell. The remainder is stored in an overflow-chain (see + section 2.3.5 for details). Following + the part of the record stored within the cell is the page number + of the first page in the overflow chain, stored as a 4 byte + big-endian unsigned integer. The size of the part of the record + stored within the B-Tree cell (local-size in figure + 6) is calculated according to the + following algorithm: +

    +            min-local := (usable-size - 12) * min-embedded-fraction / 255 - 23
    +            max-local := (usable-size - 12) * max-embedded-fraction / 255 - 23
    +            local-size := min-local + (record-size - min-local) % (usable-size - 4)
    +            if( local-size > max-local )
    +                local-size := min-local
    +
    +

    + In the formula above, usable-size is the page-size + in bytes less the number of unused bytes left at the end of every + page (as read from byte offset 20 of the database header), and + max-embedded-fraction and min-embedded-fraction are + the values read from byte offsets 21 and 22 of the database header, + respectively. + +

    + + +

    Figure 6 - Large Record Index B-Tree Cell +

    + + +

    Each B-Tree cell belonging to an internal node page of an index +B-Tree consists of a 4-byte big-endian unsigned integer, the +child page number, followed by a variable length integer +field, followed by a database record. The +variable length integer field contains the length of the +database record in bytes.

    +

    Each B-Tree cell belonging to an leaf page of an index B-Tree +consists of a variable length integer field, followed by +a database record. The variable length integer field +contains the length of the database record in bytes.

    +

    If the database record stored in an index B-Tree page is +sufficiently small, then the entire cell is stored within the +index B-Tree page. Sufficiently small is defined as equal to or +less than max-local, where: + +max-local := (usable-size - 12) * 64 / 255 - 23

    +

    If the database record stored as part of an index B-Tree cell is too +large to be stored entirely within the B-Tree page (as defined by +H30520), then only a prefix of the database record is stored +within the B-Tree page and the remainder stored in an overflow +chain. In this case, the database record prefix is immediately +followed by the page number of the first page of the +overflow chain, formatted as a 4-byte big-endian unsigned +integer.

    +

    When a database record belonging to a table B-Tree cell is +stored partially within an overflow page chain, the size +of the prefix stored within the index B-Tree page is N bytes, +where N is calculated using the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 12) * 64 / 255 - 23 +N := min-local + ((record-size - min-local) % (usable-size - 4)) +if( N > max-local ) N := min-local

    + +

    + Requirements H31010 and H30990 are similar to the algorithms + presented in the text above. However instead of + min-embedded-fraction and max-embedded-fraction the + requirements use the constant values 32 and 64, as well-formed + database files are required by H30080 and H30070 to store these + values in the relevant database database header fields. + +

    2.3.4 Table B-Trees

    + +

    + As noted in section 2.1, table B-Trees + store a set of unique 64-bit signed integer keys. Associated with + each key is a database record. As with index B-Trees, the database + file pages that make up a table B-Tree are organized into a tree + structure with a single "root" page at the head of the tree. +

    + Unlike index B-Tree structures, where entries are stored on both + internal and leaf nodes, all entries in a table B-Tree are stored + in the leaf nodes. Within each leaf node, keys are stored in sorted + order. +

    + Each internal tree node contains an ordered list of N references + to child pages, where N is some number greater than one. In a + similar manner to the way in which an index B-Tree page would + contain N-1 records, each internal table B-Tree node page also + contains a list of N-1 64-bit signed integer values in sorted order. + The keys are distributed throughout the tree such that for all internal + tree nodes, integer I(n) is equal to the largest key value stored in + the sub-tree headed by child page C(n) for values of n between 0 and + N-2, inclusive. Additionally, all keys stored in the sub-tree headed + by child page C(n+1) have values larger than that of I(n), for values + of n in the same range. + + +

    + + +

    Figure 7 - Table B-Tree Tree Structure +

    + + +

    + Figure 7 depicts a table B-Tree containing + a contiguous set of 14 integer keys starting with 1. Each key n + has an associated database record Rn. All the keys and their + associated records are stored in the leaf pages. The internal node + pages contain no database data, their only purpose is to provide + a way to navigate the tree structure. + +

    The pages in a table B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth.

    +

    Each leaf page in a table B-Tree structure contains one or more +B-Tree cells, where each cell contains a 64-bit signed integer key +value and a database record.

    +

    Each internal node page in a table B-Tree structure contains one or +more B-Tree cells, where each cell contains a 64-bit signed integer +key value, K, and a child page number, C. All integer key +values in all B-Tree cells within the sub-tree headed by page C +are less than or equal to K. Additionally, unless K +is the smallest integer key value stored on the internal node page, +all integer keys within the sub-tree headed by C are greater +than K-1, where K-1 is the largest +integer key on the internal node page that is smaller than K.

    +

    As well as child page numbers associated with B-Tree cells, each +internal node page in a table B-Tree contains the page number +of an extra child page, the right-child page. All key values +in all B-Tree cells within the sub-tree headed by the right-child +page are greater than all key values stored within B-Tree cells +on the internal node page.

    + +

    + The special case for root page 1. Root page 1 may contain zero cells, + just a right-child pointer to the only other b-tree page in the tree. + +

    + The precise way in which table B-Tree pages and cells are formatted is + described in subsequent sections. + +

    2.3.4.1 Table B-Tree Content

    + +

    + The database file contains one table B-Tree for each database table + in the logical database. Although some data may be duplicated in + index B-Tree structures, the table B-Tree is the primary location + of table data. +

    + The table B-Tree contains exactly one entry for each row in the + database table. The integer key value used for the B-Tree entry is + the value of the "rowid" field of the corresponding logical row + in the database table. The database row fields are stored in the + record associated with the table B-Tree entry, in the same order + as they appear in the logical database table. The first field in + the record (see section 2.3.2) contains the + value of the leftmost field in the database row, and so on. +

    + If a database table column is declared as an INTEGER PRIMARY KEY, + then it is an alias for the rowid field, which is stored as the + table B-Tree key value. Instead of duplicating the integer value + in the associated record, the record field associated with the + INTEGER PRIMARY KEY column is always set to an SQL NULL. +

    + Finally, if the schema layer file-format is greater than or equal + to 2, some of the records stored in table B-Trees may contain + less fields than the associated logical database table does columns. + If the schema layer file-format is exactly 2, then the logical + database table column values associated with the "missing" fields + are SQL NULL. If the schema layer file-format is greater than + 2, then the values associated with the "missing" fields are + determined by the default value of the associated database table + columns. + Reference to CREATE TABLE syntax. How are default + values determined? + +

    In a well-formed database, each table B-Tree contains a single entry +for each row in the corresponding logical database table.

    +

    The key value (a 64-bit signed integer) for each B-Tree entry is +the same as the value of the rowid field of the corresponding +logical database row.

    +

    The SQL values serialized to make up each database record +stored as ancillary data in a table B-Tree shall be the equal to the +values taken by the N leftmost columns of the corresponding +logical database row, where N is the number of values in the +database record.

    +

    If a logical database table column is declared as an "INTEGER +PRIMARY KEY", then instead of its integer value, an SQL NULL +shall be stored in its place in any database records used as +ancillary data in a table B-Tree.

    + +

    The following database properties discuss table B-Tree records + with implicit (default) values. + +

    If the database schema layer file-format (the value stored +as a 4-byte integer at byte offset 44 of the file header) is 1, +then all database records stored as ancillary data in a table +B-Tree structure have the same number of fields as there are +columns in the corresponding logical database table.

    +

    If the database schema layer file-format value is two or +greater and the rightmost M columns of a row contain SQL NULL +values, then the corresponding record stored as ancillary data in +the table B-Tree has between N-M and N fields, +where N is the number of columns in the logical database +table.

    +

    If the database schema layer file-format value is three or +greater and the rightmost M columns of a row contain their +default values according to the logical table declaration, then the +corresponding record stored as ancillary data in the table B-Tree +may have as few as N-M fields, where N is the +number of columns in the logical database table.

    + +

    2.3.4.2 Table B-Tree Page Format

    + +

    + Table B-Tree structures use the same page format as index B-Tree + structures, described in section 2.3.3.3, + with the following differences: +

      +
    • The first byte of the page-header, the "flags" field, is set to + 0x05 for internal tree node pages, and 0x0D for leaf pages. +
    • The content and format of the B-Tree cells is different. See + section 2.3.4.3 for details. +
    • The format of page 1 is the same as any other table B-Tree, + except that 100 bytes less than usual is available for content. + The first 100 bytes of page 1 is consumed by the database + header. +
    + +

    In a well-formed database file, the first byte of each page used +as an internal node of a table B-Tree structure is set to 0x05.

    +

    In a well-formed database file, the first byte of each page used +as a leaf node of a table B-Tree structure is set to 0x0D.

    + +

    + Most of the requirements specified in section + 2.3.3.3 also apply to table B-Tree + pages. The wording of the requirements make it clear when this is + the case, either by refering to generic "B-Tree pages" or by + explicitly stating that the statement applies to both "table and + index B-Tree pages". + +

    2.3.4.3 Table B-Tree Cell Format

    + +

    + Cells stored on internal table B-Tree nodes consist of exactly two + fields. The associated child page number, stored as a 4-byte + big-endian unsigned integer, followed by the 64-bit signed integer + value, stored as a variable length integer (section + 2.3.1). This is depicted graphically in figure + 8. + +

    + + +

    Figure 8 - Table B-Tree Internal Node Cell +

    + +

    + Cells of table B-Tree leaf pages are required to store a 64-bit + signed integer key and its associated database record. The first + two fields of all table B-Tree leaf page cells are the size of + the database record, stored as a variable length integer + (see section 2.3.1), followed by the key + value, also stored as a variable length integer. For + sufficiently small records, the entire record is stored in the + B-Tree cell following the record-size field. In this case, + sufficiently small is defined as less than or equal to: +

    +          max-local := usable-size - 35
    +
    +

    + bytes. Where usable-size is defined as the page-size + in bytes less the number of unused bytes left at the end of every + page (as read from byte offset 20 of the database header). + This scenario, where the entire record is + stored within the B-Tree cell, is depicted in figure + 9. + +

    + + +

    Figure 9 - Table B-Tree Small Record Leaf Node Cell +

    + + +

    + If the record is too large to be stored entirely within the B-Tree + cell, then the first part of it is stored within the cell and the + remainder in an overflow chain (see section + 2.3.5). The size of the part of the + record stored within the B-Tree cell (local-size in figure + 10) is calculated according to + the following algorithm (a similar procedure to that used to + calculate the portion of an index B-Tree key to store within the cell + when an overflow chain is required): +

    +            min-local := (usable-size - 12) * min-embedded-fraction / 255 - 23
    +            max-local := usable-size - 35
    +            local-size := min-local + (record-size - min-local) % (usable-size - 4)
    +            if( local-size > max-local )
    +                local-size := min-local
    +
    +

    + In this case, min-embedded-fraction is the value read from + byte offset 22 of the database header. The layout of the cell in this + case, when an overflow-chain is required, is shown in figure + 10. + + +

    + + +

    Figure 10 - Table B-Tree Large Record Leaf Node Cell +

    + + +

    + If the leaf page is page 1, then the value of usable-size is + as it would be for any other B-Tree page, even though the actual + usable size is 100 bytes less than this for page 1 (because the + first 100 bytes of the page is consumed by the database file + header). + +

    + The following requirements describe the format of table B-Tree + cells, and the distribution thereof between B-Tree and overflow + pages. + +

    B-Tree cells belonging to table B-Tree internal node pages consist +of exactly two fields, a 4-byte big-endian unsigned integer +immediately followed by a variable length integer. These +fields contain the child page number and key value respectively +(see H31030).

    +

    B-Tree cells belonging to table B-Tree leaf node pages consist +of three fields, two variable length integer values +followed by a database record. The size of the database record +in bytes is stored in the first of the two +variable length integer fields. The second of the two +variable length integer fields contains the 64-bit signed +integer key (see H31030).

    +

    If the size of the record stored in a table B-Tree leaf page cell +is less than or equal to (usable page size-35) bytes, then +the entire cell is stored on the B-Tree leaf page. In a well-formed +database, usable page size is the same as the database +page size.

    +

    If a table B-Tree cell is too large to be stored entirely on +a leaf page (as defined by H31170), then a prefix of the cell +is stored on the leaf page, and the remainder stored in an +overflow page chain. In this case the cell prefix +stored on the B-Tree leaf page is immediately followed by a +4-byte big-endian unsigned integer containing the page number +of the first overflow page in the chain.

    +

    When a table B-Tree cell is stored partially in an +overflow page chain, the prefix stored on the B-Tree +leaf page consists of the two variable length integer fields, +followed by the first N bytes of the database record, where +N is determined by the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 35) +N := min-local + (record-size - min-local) % (usable-size - 4) +if( N > max-local ) N := min-local +

    + +

    + Requirement H31190 is very similar to the algorithm presented in + the text above. Instead of min-embedded-fraction, it uses + the constant value 32, as well-formed database files are required + by H30090 to store this value in the relevant database file + header field. + +

    2.3.5 Overflow Page Chains

    + +

    + Sometimes, a database record stored in either an index or table + B-Trees is too large to fit entirely within a B-Tree cell. In this + case part of the record is stored within the B-Tree cell and the + remainder stored on one or more overflow pages. The overflow pages + are chained together using a singly linked list. The first 4 bytes + of each overflow page is a big-endian unsigned integer value + containing the page number of the next page in the list. The + remaining usable database page space is available for record data. + + +

    + + +

    Figure 11 - Overflow Page Format +

    + + +

    + The scenarios in which overflow pages are required and the number + of bytes stored within the B-Tree cell in each are described for + index and table B-Trees in sections + 2.3.3.4 and + 2.3.4.3 respectively. In each case + the B-Tree cell also stores the page number of the first page in + a linked list of overflow pages. +

    + The amount of space available for record data on each overflow + page is: +

    +        available-space := usable-size - 4
    +
    +

    + Where usable-size is defined as the page-size in bytes less the + number of unused bytes left at the end of every page (as read from + byte offset 20 of the database header). +

    + Each overflow page except for the last one in the linked list + contains available-space bytes of record data. The last + page in the list contains the remaining data, starting at byte + offset 4. The value of the "next page" field on the last page + in an overflow chain is undefined. + +

    A single overflow page may store up to available-space +bytes of database record data, where available-space is equal +to (usable-size - 4).

    +

    When a database record is too large to store within a B-Tree page +(see H31170 and H31000), a prefix of the record is stored within +the B-Tree page and the remainder stored across N overflow +pages. In this case N is the minimum number of pages required +to store the portion of the record not stored on the B-Tree page, +given the maximum payload per overflow page defined by H31200.

    +

    The list of overflow pages used to store a single database record +are linked together in a singly linked list known as an +overflow chain. The first four bytes of each page except the +last in an overflow chain are used to store the page number +of the next page in the linked list, formatted as an unsigned +big-endian integer. The first four bytes of the last page in an +overflow chain are set to 0x00.

    +

    Each overflow page except the last in an overflow chain +contains N bytes of record data starting at byte offset 4 of +the page, where N is the maximum payload per overflow page, +as defined by H31200. The final page in an overflow chain +contains the remaining data, also starting at byte offset 4.

    + +

    2.4 The Free Page List

    + +

    + Sometimes, after deleting data from the database, SQLite removes pages + from B-Tree structures. If these pages are not immediately required + for some other purpose, they are placed on the free page list. The + free page list contains those pages that are not currently being + used to store any valid data. +

    + Each page in the free-list is classified as a free-list trunk page + or a free-list leaf page. All trunk pages are linked together into + a singly linked list (in the same way as pages in an overflow chain + are - see section 2.3.5). The first four + bytes of each trunk page contain the page number of the next trunk + page in the list, formatted as an unsigned big-endian integer. If + the trunk page is the last page in the linked list, the first four + bytes are set to zero. +

    + Bytes 4 to 7 of each free-list trunk page contain the number of + references to free-list leaf pages (page numbers) stored on the + free-list trunk page. Each leaf page on the free-list is referenced + by exactly one trunk page. +

    + The remaining space on a free-list trunk page is used to store the + page numbers of free-list leaf pages as 4 byte big-endian integers. + Each free-list trunk page contains up to: +

    +        max-leaf-pointers := (usable-size - 8) / 4
    +
    +

    + pointers, where usable-size is defined as the page-size in bytes + less the number of unused bytes left at the end of every page (as read + from byte offset 20 of the database header). + + +

    + + +

    Figure 12 - Free List Trunk Page Format +

    + +

    + All trunk pages in the free-list except for the first contain the + maximum possible number of references to leaf pages. Is this actually true in an auto-vacuum capable database? Later: No, not even nearly true. It is a false statement. The page number + of the first page in the linked list of free-list trunk pages is + stored as a 4-byte big-endian unsigned integer at offset 32 of the + database header (section 2.2.1). + +

    All free pages in a well-formed database file are part of +the database free page list.

    +

    Each free page is either a free list trunk page or a +free list leaf page.

    +

    All free list trunk pages are linked together into a singly +linked list. The first 4 bytes of each page in the linked list +contains the page number of the next page in the list, formatted +as an unsigned big-endian integer. The first 4 bytes of the last +page in the linked list are set to 0x00.

    +

    The second 4 bytes of each free list trunk page contains +the number of free list leaf page numbers stored on the free list +trunk page, formatted as an unsigned big-endian integer.

    +

    Beginning at byte offset 8 of each free list trunk page are +N page numbers, each formatted as a 4-byte unsigned big-endian +integers, where N is the value described in requirement H31270.

    +

    All page numbers stored on all free list trunk pages refer to +database pages that are free list leaves.

    +

    The page number of each free list leaf page in a well-formed +database file appears exactly once within the set of pages numbers +stored on free list trunk pages.

    + +

    The following statements govern the two 4-byte big-endian integers + associated with the free page list structure in the database + header. + +

    The total number of pages in the free list, including all free list +trunk and free list leaf pages, is stored as a 4-byte unsigned +big-endian integer at offset 36 of the database file header.

    +

    The page number of the first page in the linked list of free list +trunk pages is stored as a 4-byte big-endian unsigned integer at +offset 32 of the database file header. If there are no free list +trunk pages in the database file, then the value stored at +offset 32 of the database file header is 0.

    + + +

    2.5 Pointer Map Pages

    + +

    + Pointer map pages are only present in auto-vacuum capable databases. + A database is an auto-vacuum capable database if the value stored + at byte offset 52 of the file-header is non-zero. +

    + If they are present, the pointer-map pages together form a lookup + table that can be used to determine the type and "parent page" of + any page in the database, given its page number. The lookup table + classifies pages into the following categories: + +
    Page Type Byte Value Description +
    B-Tree Root Page0x01 + The page is the root page of a table or index B-Tree structure. + There is no parent page number in this case, the value stored + in the pointer map lookup table is always zero. +
    Free Page0x02 + The page is part of the free page list (section + 2.4). There is no parent page in this + case, zero is stored in the lookup table instead of a parent + page number. +
    Overflow type 10x03 + The page is the first page in an overflow chain. The parent + page is the B-Tree page containing the B-Tree cell to which + the overflow chain belongs. +
    Overflow type 20x04 + The page is part of an overflow chain, but is not the first + page in that chain. The parent page is the previous page in + the overflow chain linked-list. +
    B-Tree Page0x05 + The page is part of a table or index B-Tree structure, and is + not an overflow page or root page. The parent page is the page + containing the parent tree node in the B-Tree structure. +
    +

    + Pointer map pages themselves do not appear in the pointer-map lookup + table. Page 1 does not appear in the pointer-map lookup table either. + + +

    + + +

    Figure 13 - Pointer Map Entry Format +

    + +

    + Each pointer-map lookup table entry consumes 5 bytes of space. + The first byte of each entry indicates the page type, according to the + key described in the table above. The following 4 bytes store the + parent page number as a big-endian unsigned integer. This format is + depicted in figure 13. Each + pointer-map page may therefore contain: +

    +        num-entries := usable-size / 5
    +
    +

    + entries, where usable-size is defined as the page-size in bytes + less the number of unused bytes left at the end of every page (as read + from byte offset 20 of the database header). +

    + Assuming the database is auto-vacuum capable, page 2 is always a + pointer map page. It contains the pointer map lookup table entries for + pages 3 through (2 + num-entries), inclusive. The first 5 bytes + of page 2 contain the pointer map lookup table entry for page 3. Bytes + 5 through 9, inclusive, contain the pointer map lookup table entry + for page 4, and so on. +

    + The next pointer map page in the database is page number (3 + + num-entries), which contains the pointer map entries for pages + (4 + num-entries) through (3 + 2 * num-entries) + inclusive. In general, for any value of n greater than zero, + the following page is a pointer-map page that contains lookup + table entries for the num-entries pages that follow it in the + database file: +

    +        pointer-map-page-number := 2 + n * num-entries
    +
    + + +

    Non auto-vacuum databases do not contain pointer map pages.

    +

    In an auto-vacuum database file, every (num-entries + 1)th +page beginning with page 2 is designated a pointer-map page, where +num-entries is calculated as: + +num-entries := database-usable-page-size / 5 +

    +

    In an auto-vacuum database file, each pointer-map page contains +a pointer map entry for each of the num-entries (defined by +H31340) pages that follow it, if they exist.

    +

    Each pointer-map page entry consists of a 1-byte page type and a +4-byte page parent number, 5 bytes in total.

    +

    Pointer-map entries are packed into the pointer-map page in order, +starting at offset 0. The entry associated with the database +page that immediately follows the pointer-map page is located at +offset 0. The entry for the following page at offset 5 etc.

    + +

    + The following requirements govern the content of pointer-map entries. + +

    For each page except page 1 in an auto-vacuum database file that is +the root page of a B-Tree structure, the page type of the +corresponding pointer-map entry is set to the value 0x01 and the +parent page number is zero.

    +

    For each page that is a part of an auto-vacuum database file free-list, +the page type of the corresponding pointer-map entry is set to the +value 0x02 and the parent page number is zero.

    +

    For each page in a well-formed auto-vacuum database that is the first +page in an overflow chain, the page type of the corresponding +pointer-map entry is set to 0x03 and the parent page number field +is set to the page number of the B-Tree page that contains the start +of the B-Tree cell stored in the overflow-chain.

    +

    For each page that is the second or a subsequent page in an overflow +chain, the page type of the corresponding pointer-map entry is set to +0x04 and the parent page number field is set to the page number of the +preceding page in the overflow chain.

    +

    For each page that is not a root page but is a part of a B-Tree tree +structure (not part of an overflow chain), the page type of the +corresponding pointer-map entry is set to the value 0x05 and the parent +page number field is set to the page number of the parent node in the +B-Tree structure.

    + +

    3 Database File-System Representation

    + + +

    + The previous section, section 2 + describes the format of an SQLite database image. A database + image is the serialized form of a logical SQLite database. Normally, + a database image is stored within the file-system in a single + file, a database file. In this case no other data is stored + within the database file. The first byte of the database + file is the first byte of the database image, and the last + byte of the database file is the last byte of the database + image. For this reason, SQLite is often described as a "single-file + database system". However, an SQLite database image is not always + stored in a single file within the file-system. It is also possible + for it to be distributed between the database file and a journal file. A + third file, a master-journal file may also be part of the + file-system representation. Although a master-journal file never + contains any part of the database image, it can contain meta-data + that helps determine which parts of the database image are stored within + the database file, and which parts are stored within the journal file. + +

    + In other words, the file-system representation of an SQLite database + consists of the following: + +

    + +

    + Usually, a database image is stored entirely within the database + file. Other configurations, where the database image data + is distributed between the database file and its journal + file, are used as interim states when modifying the contents of + the database image to commit a database transaction. In practice, + a database reader only encounters such a configuration if a previous + attempt to modify the database image on disk was interrupted by an + application, OS or power failure. The most practical approach (and + that taken by SQLite) is to extract the subset of the database image + currently stored within the journal file and write it into the database + file, thus restoring the system to a state where the database file + contains the entire database image. Other SQLite documentation, and + the comments in the SQLite source code, identify this process as hot + journal rollback. Instead of focusing on the hot journal + rollback process, this document describes how journal and + master-journal files must be interpreted in order to extract the + current database image from the file-system representation in the + general case. + +

    + Sub-section 3.1 describes the formats + used by journal and master-journal files. + +

    + Sub-section 3.2 contains a precise + description of the various ways a database image may be + distributed between the database file and journal file, + and the rules that must be followed to extract it. In other words, a + description of how SQLite or compatible software reads the database + image from the file-system. + +

    3.1 Journal File Formats

    + + +

    + The following sub-sections describe the formats used by SQLite journal + files (section 3.1.1) and master journal files + (section 3.1.2). + + +

    3.1.1 Journal File Details

    + + +

    + This section describes the format used by an SQLite journal file. + +

    + A journal file consists of one or more journal sections, optionally + followed by a master journal pointer field. The first journal section + starts at the beginning of the journal file. There is no limit to the + number of journal sections that may be present in a single journal file. + +

    + Each journal section consists of a journal header immediately followed + by zero or more journal records. The format of journal header and journal + records are described in sections 3.1.1.1 and + 3.1.1.2 respectively. One of the numeric fields + stored in a journal header is the sector size field. Each journal section + in a journal file must be an integer multiple of the sector size stored + in the first journal header of the journal file (the value of the sector + size field in the second and subsequent journal headers is not used). If + the sum of the sizes of the journal header and journal records in a journal + section is not an integer multiple of the sector size, then up to + (sector-size - 1) bytes of unused space (padding) follow the end of the + last journal record to make up the required length. + +

    + Figure 14 illustrates a journal file that + contains N journal sections and a master journal pointer. The first + journal section in the file is depicted as containing M journal + records. + + +

    + + +

    Figure 14 - Journal File Format +

    + + +

    + The following requirements define a well-formed journal section. This concept + is used in section 3.2. + +

    A buffer shall be considered to contain a well-formed journal section +if it is not excluded from this category by requirements H32220, H32230 or +H32240.

    +

    A buffer shall only be considered to contain a well-formed journal section +if the first 28 bytes of it contain a well-formed journal header.

    +

    A buffer shall only be considered to contain a well-formed journal section +if, beginning at byte offset sector-size, it contains a sequence of +record-count well-formed journal records. In this case sector-size and +record-count are the integer values stored in the sector size and record +count fields of the journal section's journal header.

    +

    A buffer shall only be considered to contain a well-formed journal section +if it is an integer multiple of sector-size bytes in size, where sector-size +is the value stored in the sector size field of the journal section's journal +header.

    + +

    + Note that a journal section that is not strictly speaking a well-formed + journal section often contains important data. For example, many journal + files created by SQLite that consist of a single journal section and no + master journal pointer contain a journal section that is not well-formed + according to requirement H32240. See section 3.2 + for details on when well-formedness is an important property of journal + sections and when it is not. + +

    3.1.1.1 Journal Header Format

    + + +

    + A journal header is sector-size bytes in size, where sector-size is the + value stored as a 32-bit big-endian unsigned integer at byte offset 20 of + the first journal header that occurs in the journal file. The sector-size + must be an integer power of two greater than or equal to 512. The + sector-size is chosen by the process that creates the journal file based + on the considerations described in section writing_to_files. + Only the first 28 bytes of the journal header are used, the remainder may + contain garbage data. The first 28 bytes of each journal header + consists of an eight byte block set to a well-known value, followed by + five big-endian 32-bit unsigned integer fields. + + +

    + + +

    Figure 15 - Journal Header Format +

    + + +

    + Figure 15 graphically depicts the layout + of a journal header. The individual fields are described in + the following table. The offsets in the 'byte offset' column of the + table are relative to the start of the journal header. + + +
    Byte offsetSize in bytesDescription +
    08The journal magic field always contains a + well-known 8-byte string value used to identify SQLite + journal files. The well-known sequence of byte values + is: +
    0xd9 0xd5 0x05 0xf9 0x20 0xa1 0x63 0xd7
    +
    84This field, the record count, is set to the + number of journal records that follow this + journal header in the journal file. +
    124The checksum initializer field is set to a + pseudo-random value. It is used as part of the + algorithm to calculate the checksum for all journal + records that follow this journal header. +
    164This field, the database page count, is set + to the number of pages that the database file + contained before any modifications associated with + write transaction are applied. +
    204This field, the sector size, is set to the + sector size of the device on which the + journal file was created, in bytes. This value + is required when reading the journal file to determine + the size of each journal header. +
    244The page size field contains the database page + size used by the corresponding database file + when the journal file was created, in bytes. +
    + +

    + Because a journal header always occurs at the start of a journal + section, and because the size of each journal section is always a + multiple of sector-size bytes, journal headers are always positioned + in the file such that they start at a sector-size aligned offset. + +

    + The following requirements define a "well-formed journal header". This + concept is used in the following sections. A well-formed journal header + is defined as a blob of 28 bytes for which the journal magic field is set + correctly and for which both the page size and sector size fields are set + to power of two values greater than 512. Because there are no + restrictions on the values that may be stored in the record count, + checksum initializer or database page count fields, they do not enter + into the definition of a well-formed journal header. + +

    A buffer of 28 bytes shall be considered a well-formed journal +header if it is not excluded by requirements H32180, H32190 or H32200.

    +

    A buffer of 28 bytes shall only be considered a well-formed journal +header if the first eight bytes of the buffer contain the values 0xd9, +0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.

    +

    A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the sector size field (the 4-byte big-endian +unsigned integer at offset 20 of the buffer) contains a value that +is an integer power of two greater than 512.

    +

    A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the page size field (the 4-byte big-endian +unsigned integer at offset 24 of the buffer) contains a value that +is an integer power of two greater than 512.

    + +

    3.1.1.2 Journal Record Format

    + + +

    + Each journal record contains the data for a single database page, + a page number identifying the page, and a checksum value used to help + detect journal file corruption. + + +

    + + +

    Figure 16 - Journal Record Format +

    + + +

    + A journal record, depicted graphically by figure + 16, contains three fields, as described + in the following table. Byte offsets are relative to the start of the + journal record. + + +
    Byte offsetSize in bytesDescription +
    04The page number of the database page associated with + this journal record, stored as a 4 byte + big-endian unsigned integer. +
    4page-size + This field contains the original data for the page, + exactly as it appeared in the database file before the + write transaction began. +
    4 + page-size4 + This field contains a checksum value, calculated based + on the contents of the journaled database page data + (the previous field) and the values stored in the + checksum initializer field of the preceding + journal header. +
    + +

    + The checksum value stored in each journal record is calculated based + on the contents of the page data field of the record and the value + stored in the checksum initializer field of the journal header that + occurs immediately before the journal record. The checksum initializer + field is interpreted as a 32-bit unsigned integer. To this value is + added the value stored in every 200th byte of the page data field, + interpreted as an 8-bit unsigned integer, beginning with the byte + at offset (page-size % 200). The sum is accumulated in a 32-bit + unsigned integer. Overflow is handled by wrapping around to zero. + +

    +
    + Example Checksum Calculation: +
    +  Sum of values:
    +       0xFFFFFFE1 + 
    +       0x00000023 +
    +       0x00000032 +
    +       0x0000009E +
    +       0x00000062 +
    +       0x0000001F
    +      -----------
    +      0x100000155
    +
    +  Truncated to 32-bits: 
    +       0x00000155
    +
    + +

    + For example, if the page-size is 1024 bytes, then the offsets within + the page of the bytes added to the checksum initializer value are + 24, 224, 424, 624 and 824 (the first byte of the page is offset 0, the + last byte is offset 1023). If the values of the bytes at these offsets + are 0x23, 0x32, 0x9E, 0x62 and 0x1F, and the value of the checksum + initializer field is 0xFFFFFFE1, then the value stored in the checksum + field of the journal record is 0x00000155. + + +

    + The set of journal records that follow a journal header + in a journal file are packed tightly together. There are no alignment + requirements for journal records. + +

    A buffer of (8 + page size) bytes shall be considered a well-formed journal +record if it is not excluded by requirements H32110 or H32120.

    +

    A journal record shall only be considered to be well-formed if the page number +field contains a value other than zero and the locking-page number, calculated +using the page size found in the first journal header of the journal file that +contains the journal record.

    +

    A journal record shall only be considered to be well-formed if the checksum +field contains a value equal to the sum of the value stored in the +checksum-initializer field of the journal header that precedes the record +and the value stored in every 200th byte of the page data field, interpreted +as an 8-bit unsigned integer), starting with byte offset (page-size % 200) and +ending with the byte at byte offset (page-size - 200).

    + + +

    3.1.1.3 Master Journal Pointer

    + + +

    + If present, a master journal pointer occurs at the end of a journal file. + There may or may not be unused space between the end of the final journal + section and the start of the master journal pointer. + +

    + A master journal pointer contains the full path of a + master journal-file along with a check-sum and some well-known values + that allow the master journal pointer to be unambiguously distinguished + from a journal record or journal header. + + +

    + + +

    Figure 17 - Master Journal Pointer Format +

    + + +

    + A master journal pointer, depicted graphically by figure + 17, contains five fields, as + described in the following table. Byte offsets are relative to the + start of the master journal pointer. + + +
    Byte offsetSize in bytesDescription +
    04This field, the locking page number, is always + set to the page number of the database locking page + stored as a 4-byte big-endian integer. The locking page + is the page that begins at byte offset 230 of the + database file. Even if the database file is large enough to + contain the locking page, the locking page is + never used to store any data and so the first four bytes of of a + valid journal record will never contain this value. + +
    4name-length + The master journal name field contains the name of the + master journal file, encoded as a utf-8 string. There is no + nul-terminator appended to the string. +
    4 + name-length4 + The name-length field contains the length of the + previous field in bytes, formatted as a 4-byte big-endian + unsigned integer. +
    8 + name-length4 + The checksum field contains a checksum value stored as + a 4-byte big-endian signed integer. The checksum value is + calculated as the sum of the bytes that make up the + master journal name field, interpreting each byte as + an 8-bit signed integer. +
    12 + name-length8 + Finally, the journal magic field always contains a + well-known 8-byte string value; the same value stored in the + first 8 bytes of a journal header. The well-known + sequence of bytes is: +
    0xd9 0xd5 0x05 0xf9 0x20 0xa1 0x63 0xd7
    +
    + +

    A buffer shall only be considered to be a well-formed master journal pointer +if the final eight bytes of the buffer contain the values 0xd9, 0xd5, 0x05, +0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.

    +

    A buffer shall only be considered to be a well-formed master journal pointer +if the size of the buffer in bytes is equal to the value stored as a 4-byte +big-endian unsigned integer starting 16 bytes before the end of the buffer.

    +

    A buffer shall only be considered to be a well-formed master journal pointer +if the first four bytes of the buffer, interpreted as a big-endian unsigned +integer, contain the page number of the locking page (the value +(1 + 230 / page-size), where page-size is the value stored in +the page-size field of the first journal header of the journal file).

    +

    A buffer shall only be considered to be a well-formed master journal pointer +if the value stored as a 4-byte big-endian integer starting 12 bytes before +the end of the buffer is equal to the sum of all bytes, each interpreted +as an 8-bit unsigned integer, starting at offset 4 of the buffer and continuing +until offset (buffer-size - 16) (the 17th last byte of the buffer).

    + +

    3.1.2 Master-Journal File Details

    + + +

    + A master-journal file contains the full paths to two or more + journal files, each encoded using UTF-8 encoding and terminated + by a single nul character (byte value 0x00). There is no padding + between the journal paths, each UTF-8 encoded path begins immediately + after the nul character that terminates the previous one. + +

    + Note that the contents of a master-journal is not really all that + important, and is not required at all to read the database image. + Used for cleanup only. + +

    3.2 Reading an SQLite Database

    + + +

    + As described in section 2.2.2 of this document, + an SQLite database image is a set of contiguously numbered fixed size + pages. The numbering starts at 1, not 0. Page 1 contains the + database header and the root page of the schema table, + and all other pages within the database image are somehow referenced + by number, either directly or indirectly, from page 1, either directly + or indirectly. In order to be able to read the database image from within + the file-system, a database reader needs to be able to ascertain: + +

      +
    1. The page-size used by the database image, +
    2. The number of pages in the database image, and +
    3. The content of each database page. +
    + +

    + Usually, the database image is simply the contents of the database file. + In this case, reading the database image is straightforward. The + page-size used by the database image can be read from the 2-byte + big-endian integer field stored at byte offset 16 of + the database file (see section 2.2.1). The number of + pages in the database image can be determined by querying the size of + the database file in bytes and then dividing by the page-size. + Reading the contents of a database page is a simple matter of + reading a block of page-size bytes from an offset calculated from + the page-number of the required page: +

    +        offset := (page-number - 1) * page-size
    +
    + +

    + However, if there is a valid journal file corresponding to the + database file present within the file-system then the situation + is more complicated. The file-system is considered to contain a valid + journal file if each of the following conditions are met: + +

    + +

    + If the file system contains a valid journal file, then the + page-size used by and the number of pages in the database + image are stored in the first journal header of the + journal file. Specifically, the page-size is stored as a 4-byte + big-endian unsigned integer at byte offset 24 of the journal file, and the + number of pages in the database image is stored as a 4-byte big-endian + unsigned integer at byte offset of 16 of the same file. + +

    + The current data for each page of the database image may be stored + within the database file at a file offset based on its page number as + it normally is, or the current version of the data may be stored + somewhere within the journal file. For each page within the database + image, if the journal file contains a valid journal record for the + corresponding page-number, then the current content of the database + image page is the blob of data stored in the page data field of the + journal record. If the journal file does not contain a valid journal + record for a page, then the current content of the database image page + is the blob of data currently stored in the corresponding region of + the database file. + +

    + Not all journal records within a journal file are valid. A journal + record is said to be valid if: + +

    + +

    + Note that it is not necessary for a journal record to be part of a + well-formed journal section to be considered valid. + +

    + Figure 18 illustrates two distinct ways + to store a database image within the file system. In this example, the + database image consists of 4 pages of page-size bytes each. The + content of each of the 4 pages is designated A, B, C and D, respectively. + Representation 1 uses only the database file. In this case the entire + database image is stored in the database file. + +

    + In representation 2 of figure 18, the current + database images is stored using both the journal file and the database + file. The size and page-size of the database image are both stored in + the first (in this case only) journal header in the journal file. + Following the journal header are two valid journal records. These contain + the data for pages 3 and 4 of the database image. Because there are no + valid journal records for pages 1 and 2 of the database image, the content + for each of these is stored in the database file. Even though the contents + of the file-system is quite different in representation 2 as in + representation 1, the stored database image is the same in each case: 4 + pages of page-size bytes each, content A, B, C and D respectively. + + +

    + + +

    Figure 18 - Two ways to store the same database image +

    + + +

    + The requirements that follow talk about "well-formed" journal sections, + records and master-journal-pointers. There should be some kind of reference + back to the definitions of these things. Either in the requirements + themselves (refer to other requirements by number) or in the surrounding + text (point to document sections). Or, better, both. + +

    + These requirements describe the way a database reader must determine + whether or not there is a valid journal file within the + file-system. + +

    If a journal file contains a well-formed master-journal pointer and the +named master-journal file does not exist then the journal file shall be +considered invalid.

    +

    If the first 28 bytes of a journal file do not contain a well-formed +journal header, then the journal file shall be considered +invalid.

    +

    If the journal file exists within the file-system and neither H32000 +, H32010 nor H33080 apply, then the journal file shall be considered valid.

    + +

    + If there is a valid journal file within the file-system, the + following requirements govern how a reader should determine the set + of valid journal records that it contains. + +

    A journal record found within a valid journal file shall be considered a valid +journal record if it is not excluded from this category by requirement H32260, +H32270 or H32280.

    +

    A journal record shall only be considered a valid journal record if it and any +other journal records that occur before it within the same journal section are +well-formed.

    +

    A journal record shall only be considered a valid journal record if the journal +section to which it belongs begins with a well-formed journal header.

    +

    A journal record shall only be considered a valid journal record if all journal +sections that occur before the journal section containing the journal record +are well-formed journal sections.

    + +

    + The following requirements dictate the way in which database + page-size and the number of pages in the database image + should be determined by the reader. + +

    If there exists a valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 4-byte big-endian unsigned integer at byte +offset 24 of the journal file.

    +

    If there exists a valid journal file in the file-system, then the +number of pages in the database image shall be the value stored as +a 4-byte big-endian unsigned integer at byte offset 24 of the +journal file.

    +

    If there is no valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 2-byte big-endian unsigned integer at byte +offset 16 of the database file.

    +

    If there is no valid journal file in the file-system, then the +number of pages in the database image shall be calculated by dividing +the size of the database file in bytes by the database page-size.

    + +

    + The following requirements dictate the way in which the data for each + page of the database image can be located within the file-system + by a database reader. + +

    If there exists a valid journal file in the file-system, then the +contents of each page of the database image for which there is a valid +journal record in the journal file shall be read from the +corresponding journal record.

    +

    The contents of all database image pages for which there is no valid +journal record shall be read from the database file.

    + +

    4 SQLite Interoperabilty Requirements

    + + +

    + This section contains requirements that further constrains the behaviour + of software that accesses (reads and/or writes) SQLite databases stored + within the file-system. These requirements need only be implemented by + systems that access databases while other clients may also be doing so. + More specifically, they need only be implemented by software operating + within a system where one or more of the database clients writes to the + database. If the database file-system representation remains constant + at all times, or if there is only ever a single database client for each + database within the system, the requirements in this section can be + ignored. + +

    + The requirements in this section fall into three categories: + +

      +
    • Database Writer Requirements. Section 4.1 + contains notes on and requirements that must be observed by software + systems that update an existing SQLite database image within the file-system. + +

    • Locking Requirements. Section 4.2 + contains a description of the file-system locks that must be obtained + on the database file, and how locks placed by other database clients + should be interpreted. + +

    • Header Cookie Requirements. An SQLite database image header + (see section 2.2.1) contains two "cookie" values + that must sometimes be incremented when the database image stored in + the file-system is updated. Section + 4.3 contains requirements + identifying exactly when the cookie values must be incremented, and + how they can be used by a database client to determine if cached + data is valid or not. +

    + +

    4.1 Writing to an SQLite Database File

    + + +

    + When writing to an SQLite database, the database representation on disk + must be modified to reflect the new, modified, database image. Exactly + how this is done in terms of raw IO operations depends on the + characteristics of the file-system in which the database is stored and + the degree to which the application is required to handle failures within + the system. A failure may be an application crash, an operating system + crash, a power failure or other unexpected event that terminates + processing. For example, SQLite itself runs in several different modes + with various levels of guarantees on how failures are handled as follows: + +

      +
    • In-memory journal mode (PRAGMA journal_mode=memory). In this + mode any failure may cause database file-system corruption, including an + application crash or unexpected exit. +
    • Non-synchronous mode (PRAGMA sychronous=off). In this mode + an application crash or unexpected exit may not cause database + corruption, however an operating system crash or power failure may. +
    • Synchronous mode (PRAGMA sychronous=full). In this mode + neither an application crash, operating system crash or power failure + may cause database file-system corruption. +
    + +

    + If a process attempts to modify a database so as to replace database + image A with database image B and a failure occurs while doing so, + then following recovery the file-system must contain a database image + equivalent to A or B. Otherwise, the database file-system is considered + corrupt. + +

    + Two database images are considered to be equivalent if each of the + following are true: + +

    + +

    + The exception for free-list leaf pages (see section + 2.4) in the third bullet point above is made + because free-list leaf pages contain no valid data and are never read + by SQLite database readers. Since the blob of data stored on such a + page is never read for any purpose, two database images may have a + different blob stored on a free-list leaf page and still be considered + equivalent. This concept can sometimes be exploited to more efficiently + update an SQLite database file-system representation. + +

    Two database images shall be considered to be equivalent if they (a) have the +same page size, (b) contain the same number of pages and (c) the content of +each page of the first database image that is not a free-list leaf page is +the same as the content of the corresponding page in the second database image.

    + +

    + The following requirement constrains the way in which a database + file-system representation may be updated. In many ways, it is + equivalent to "do not corrupt the database file-system representation + under those conditions where the file-system should not be corrupted". + The definition of "handled failure" depends on the mode that SQLite + is running in (or on the requirements of the external system accessing + the database file-system representation). + +

    If, while writing to an SQLite database file-system representation in +order to replace database image A with database image B, a failure that +should be handled gracefully occurs, then following recovery the database +file-system representation shall contain a database image equivalent to +either A or B.

    + +

    + The following two sections, 4.1.1 + and 4.1.2, are somewhat advisory in nature. + They contain descriptions of two different methods used by SQLite to + modify a database image within a database file-system representation in + accordance with the above requirements. They are not the only methods + that can be used. So long as the above requirements (and + those in sections 4.2 and + 4.3) are honoured, any method may + be used by an SQLite database writer to update the database file-system + representation. Sections 4.1.1 and + 4.1.2 do not contain formal requirements. Formal + requirements governing the way in which SQLite safely updates database + file-system representations may be found in Not available yet!. + An informal description is available in [3]. + +

    4.1.1 The Rollback-Journal Method

    + + +

    + This section describes the method usually used by SQLite to update a database + image within a database file-system representation. This is one way + to modify a database image in accordance with the requirements in the + parent and other sections. When overwriting database image A with database + image B using this method, assuming that to begin with database image A is + entirely contained within the database file and that the page-size of + database image B is the same as that of database image A, the following + steps are taken: + +

      +
    1. The start of the journal file is populated with data that is not + a valid journal header. +

    2. For each page in database image A that is not a free-list leaf + page and either does not exist in database image B or exists but + is populated with different content, a record is written to the + journal file. The record contains a copy of the original database + image A page. +

    3. The start of the journal file is populated with a valid journal + header. The page-count field of the journal header is set to the + number of pages in database image A. The record-count is set to the + number of records written to the journal file in step 2. +

    4. The content of each page of database image B that is either not + present or populated differently in database image A is copied + into the database file. If database image B is smaller than database + image A, the database file is truncated to the size required by + database image B. +

    5. One of several file-system operations that cause the journal file + to become invalid is performed. For example: +

      +
    + +

    + During steps 1 and 2 of the above procedure, the database file-system + representation clearly contains database image A. The database file + itself has not been modified, and the journal file is not valid (since + it does not begin with a valid journal file header). Following step 3, + the database file-system representation still contains database image + A. The number of pages in the database image and the content of some + pages now resides in the journal file, but the database image remains + unchanged. During and following step 4, the current database image is + still database image A. Although some or all of the pages in the + database file may have been overwritten or truncated away, a valid + journal records containing the original database image A data exists + for all such pages that were not free-list leaf pages in database + image A. And although the size of the database file may have been + modified, the size of the current database image, database image A, + is stored in the journal header. + +

    + Once step 5 of the above procedure is performed, the database file-system + representation contains database image B. The journal file is no longer + valid, so the database image consists of the contents of the database + file, database image B. +

    + Figure 19 depicts a possible interim state + of the database file-system representation used while committing a transaction + that replaces a four page database image with a three page database image. + The contents of the initial database image pages are A, B, C and D respectively. + The final database image content is A, E and C. The intermim state depicted + is that reached at the end of step 4 in the above procedure. In this state, + the file-system contains the initial database image, ABCD. However, if the + journal file were to be somehow invalidated, then the file-system would + contain the final database image, AEC. + + +

    + + +

    Figure 19 - Interim file-system state used to atomically overwrite database image ABCD with AEC +

    + + +

    + The procedure described above can be onourous to implement, as it requires + that the data for all modified pages of database image B be available + (presumably in main memory) at the same time, when step 4 is performed. + For transactions that write to a large number of database pages, this + may be undesirable. A solution is to create a journal-file containing + two or more journal headers. If, while modifying a database image within + main-memory, a client wishes to reduce the amount of data held in memory, + it may perform steps 3 and 4 of the above procedure in order to write + modified content out to the file-system. Once the modified pages have been + written into the database file, the in-memory copies may be discarded. + The writer process may then continue accumulating changes in memory. When + it is ready to write these changes out to the file-system, either to free + up main-memory or because all changes associated with the transaction have + been prepared, it adds a second (or subsequent) journal header to the + journal file, followed by journal records containing the original data + for pages about to be modified. It may then write the changes accumulated + in-memory to the database file, as described in step 4 above. + +

    + This technique can also be modified to support atomic modification of + multiple databases. In this case the first 4 steps of the procedure outlined + above are followed for each individual database. Following this a + master-journal file is created somewhere within the file-system and a + master-journal pointer added to each individual journal file. Since + a journal-file that contains a master-journal pointer to a master-journal + file that does not exist is considered invalid (requirement H32000), + all journal-files may be simultaneously invalidated by deleting the + master-journal file from the file-system. This delete operation takes the + place of step 5 of the procedure as outlined above. + +

    4.1.2 The Atomic-Write Method

    + + +

    + On some systems, SQLite is able to overwrite a single page of the + database file as an atomic operation. If, while updating the page, + a failure occurs, the system guarantees that following recovery, the + page will be found to have been correctly and completely updated or + not modified at all. When running in such an environment, if SQLite + is required to update a database image so that only a single page + is modified, it can do so simply by overwriting the page. + +

    + Assuming the database page being updated is not page 1, if requirement + H33040 requires that the database header change counter be updated, then + the database image modification is no longer confined to a single page. + In this case it can be split in two: SQLite first atomically updates + page 1 of the database file to increment the database header change + counter, then updates the page that it is actually required to update + using a second atomic write operation. If a failure occurs some time + between the two write operations, following recovery the database + image may be found to be in its original state except for the value + of the database header change counter It would be good + to have some requirement to say that that is Ok. Some modification to + the definition of equivalent databases perhaps. + + + +

    4.2 SQLite Locking Protocol

    + + +

    + An SQLite database client may hold at any time one of four different types + of locks on a database file-system representation. This document does not + describe how these locks are to be implemented. Possible implementation + techniques include mapping the four SQLite locks to operating system file + locks, using an external software module to manage locks, or by creating + special "lock files" within the file-system. Regardless of how the locks + are implemented, it is important that all database clients in a system + use the same implementation. The following table summarizes the four + types of locks used by SQLite: + + +
    Lock type Description Blocks Blocked By +
    SHARED + It is only possible to obtain a SHARED lock if no other client is + holding a PENDING or EXCLUSIVE lock. Holding a SHARED lock prevents + any other client from obtaining an EXCLUSIVE lock. + EXCLUSIVE PENDING, EXCLUSIVE + +
    RESERVED + A RESERVED lock may only be obtained if no other client holds a + RESERVED, PENDING or EXCLUSIVE lock on the database. While a + client holds a RESERVED lock, other clients may obtain new SHARED + locks, but may not obtain new RESERVED, PENDING or EXCLUSIVE locks. + RESERVED, PENDING, EXCLUSIVE RESERVED, PENDING, EXCLUSIVE + +
    PENDING + It is only possible to obtain a PENDING lock if no other client holds + a RESERVED, PENDING or EXCLUSIVE lock. While a database client is + holding a PENDING lock, no other client may obtain any new lock. + All RESERVED, PENDING, EXCLUSIVE + +
    EXCLUSIVE + An EXCLUSIVE lock may only be obtained if no other client holds any + lock on the database. While an EXCLUSIVE lock is held, no other + client may obtain any kind of lock on the database. + All All + +
    + +

    + The most important types of locks are SHARED and EXCLUSIVE. Before any + part of the database file is read, a database client must obtain a SHARED + lock or greater. + +

    Before reading from a database file , a database reader shall establish a +SHARED or greater lock on the database file-system representation.

    + +

    + Before the database file may be written to, a database client must + be holding an EXCLUSIVE lock. Because holding an EXCLUSIVE lock + guarantees that no other client is holding a SHARED lock, it also + guarantees that no other client may be reading from the database file + as it is being written. + +

    Before writing to a database file, a database writer shall establish +an EXCLUSIVE lock on the database file-system representation.

    + +

    + The two requirements above govern reading from and writing to the + database file. In order to write to a journal file, a database client + must obtain at least a RESERVED lock. + +

    Before writing to a journal file, a database writer shall establish +a RESERVED, PENDING or EXCLUSIVE lock on the database file-system +representation.

    + +

    + The requirement above implies that a database writer may write to the + journal file at the same time as a reader is reading from the database + file. This improves concurrency in environments that feature multiple + clients, as a database writer may perform part of its IO before locking + the database file-system representation with an EXCLUSIVE lock. In order + for this to work though, the following must be true: + +

    + +

    + The following requirements formally restate the above bullet points. + +

    Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that the database file contains a valid +database image.

    +

    Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that any journal file that may be present +is not a valid journal file.

    +

    If another database client holds either a RESERVED or PENDING lock on the +database file-system representation, then any journal file that exists within +the file system shall be considered invalid.

    + +

    4.3 SQLite Database Header Cookie Protocol

    + + +

    + While a database reader is holding a SHARED lock on the database + file-system representation, it may freely cache data in main memory + since there is no way that another client can modify the database + image. However, if a client relinquishes all locks on a database + file-system representation and then re-establishes a SHARED lock + at some point in the future, any cached data may or may not be + valid (as the database image may have been modified while the client + was not holding a lock). The requirements in this section dictate + the way in which database writers must update two fields of the database + image header (the "cookies") in order to enable readers to determine + when cached data can be safely reused and when it must be discarded. + +

    + SQLite clients may cache two types of data from a database image in + main-memory: + +

      +
    • The database schema. In order to access database content, + the contents of the schema table must be parsed (see section + 2.2.3). Since this is a relatively expensive + process, it is advantageous for clients to cache the parsed + representation in memory. + +

    • Database image page content. Clients may also cache raw + page content in order to reduce the number of file-system read + operations required when reading the database image. +

    + +

    + Similar mechanisms are used to support cache validation for each class + of data. If a database writer changes the database schema in any way, it + is also required to increment the value stored in the database schema + version field of the database image header (see section + 2.2.1). This way, when a database reader establishes + a SHARED lock on a database file-system representation, it may validate + any cached schema data by checking if the value of the database schema + version field has changed since the data was cached. If the value has not + changed, then the cached schema data may be retained and reused. + Otherwise, if the value of the database schema version field is not the + same as it was when the schema data was last cached, then the reader + can deduce that some other database client has modified the database + schema in some way and it must be reparsed. + +

    + Each time a database image stored within a database file-system + representation is modified, the database writer is required to increment + the value stored in the change counter field of the database image header + (see section 2.2.1). This allows database readers to + validate any cache of raw database image page content that may be present + when a database reader establishes a SHARED (or other) lock on the + database file-system representation. If the value stored in the change + counter field of the database image has not changed since the cached + data was read, then it may be safely reused. Otherwise, if the change + counter value has changed, then any cached page content data must be + deemed untrustworthy and discarded. + +

    + If a database image is modified more than once while a writer is holding + an EXCLUSIVE lock, then each header value need only be updated once, as + part of the first image modification that modifies the associated class + of data. Specifically, the change counter field need only be incremented + as part of the first image modification that takes place, and the + database schema version need only be incremented as part of the first + modification that includes a schema change. + +

    A database writer shall increment the value of the database header change +counter field, a 4-byte big-endian unsigned integer field stored at byte offset 24 +of the database header, as part of the first database image modification +that it performs after obtaining an EXCLUSIVE lock.

    +

    A database writer shall increment the value of the database schema version +field, a 4-byte big-endian unsigned integer field stored at byte offset 40 +of the database header, as part of the first database image modification that +includes a schema change that it performs after obtaining an EXCLUSIVE lock.

    +

    If a database writer is required by either H33050 or H33040 to increment a +database header field, and that header field already contains the maximum +value possible (0xFFFFFFFF, or 4294967295 for 32-bit unsigned integer +fields), "incrementing" the field shall be interpreted to mean setting it to +zero.

    + + +

    5 References

    + + + +
    [1] + Douglas Comer, Ubiquitous B-Tree, ACM Computing Surveys (CSUR), + v.11 n.2, pages 121-137, June 1979. + +
    [2] + Donald E. Knuth, The Art Of Computer Programming, Volume 3: + "Sorting And Searching", pages 473-480. Addison-Wesley + Publishing Company, Reading, Massachusetts. + +
    [3] + SQLite Online Documentation,How SQLite Implements Atomic Commit, + http://www.sqlite.org/atomiccommit.html. + +
    + + + +
    +This page last modified 2009/06/23 14:18:53 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/fileformat.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/fileformat.tcl --- sqlite3-3.4.2/www/fileformat.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/fileformat.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,785 +0,0 @@ -# -# Run this script to generated a fileformat.html output file -# -set rcsid {$Id: fileformat.tcl,v 1.13 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {SQLite Database File Format (Version 2)} -puts { -

    SQLite 2.X Database File Format

    - -

    -This document describes the disk file format for SQLite versions 2.1 -through 2.8. SQLite version 3.0 and following uses a very different -format which is described separately. -

    - -

    1.0   Layers

    - -

    -SQLite is implemented in layers. -(See the architecture description.) -The format of database files is determined by three different -layers in the architecture. -

    - -
      -
    • The schema layer implemented by the VDBE.
    • -
    • The b-tree layer implemented by btree.c
    • -
    • The pager layer implemented by pager.c
    • -
    - -

    -We will describe each layer beginning with the bottom (pager) -layer and working upwards. -

    - -

    2.0   The Pager Layer

    - -

    -An SQLite database consists of -"pages" of data. Each page is 1024 bytes in size. -Pages are numbered beginning with 1. -A page number of 0 is used to indicate "no such page" in the -B-Tree and Schema layers. -

    - -

    -The pager layer is responsible for implementing transactions -with atomic commit and rollback. It does this using a separate -journal file. Whenever a new transaction is started, a journal -file is created that records the original state of the database. -If the program terminates before completing the transaction, the next -process to open the database can use the journal file to restore -the database to its original state. -

    - -

    -The journal file is located in the same directory as the database -file and has the same name as the database file but with the -characters "-journal" appended. -

    - -

    -The pager layer does not impose any content restrictions on the -main database file. As far as the pager is concerned, each page -contains 1024 bytes of arbitrary data. But there is structure to -the journal file. -

    - -

    -A journal file begins with 8 bytes as follows: -0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd6. -Processes that are attempting to rollback a journal use these 8 bytes -as a sanity check to make sure the file they think is a journal really -is a valid journal. Prior version of SQLite used different journal -file formats. The magic numbers for these prior formats are different -so that if a new version of the library attempts to rollback a journal -created by an earlier version, it can detect that the journal uses -an obsolete format and make the necessary adjustments. This article -describes only the newest journal format - supported as of version -2.8.0. -

    - -

    -Following the 8 byte prefix is a three 4-byte integers that tell us -the number of pages that have been committed to the journal, -a magic number used for -sanity checking each page, and the -original size of the main database file before the transaction was -started. The number of committed pages is used to limit how far -into the journal to read. The use of the checksum magic number is -described below. -The original size of the database is used to restore the database -file back to its original size. -The size is expressed in pages (1024 bytes per page). -

    - -

    -All three integers in the journal header and all other multi-byte -numbers used in the journal file are big-endian. -That means that the most significant byte -occurs first. That way, a journal file that is -originally created on one machine can be rolled back by another -machine that uses a different byte order. So, for example, a -transaction that failed to complete on your big-endian SparcStation -can still be rolled back on your little-endian Linux box. -

    - -

    -After the 8-byte prefix and the three 4-byte integers, the -journal file consists of zero or more page records. Each page -record is a 4-byte (big-endian) page number followed by 1024 bytes -of data and a 4-byte checksum. -The data is the original content of the database page -before the transaction was started. So to roll back the transaction, -the data is simply written into the corresponding page of the -main database file. Pages can appear in the journal in any order, -but they are guaranteed to appear only once. All page numbers will be -between 1 and the maximum specified by the page size integer that -appeared at the beginning of the journal. -

    - -

    -The so-called checksum at the end of each record is not really a -checksum - it is the sum of the page number and the magic number which -was the second integer in the journal header. The purpose of this -value is to try to detect journal corruption that might have occurred -because of a power loss or OS crash that occurred which the journal -file was being written to disk. It could have been the case that the -meta-data for the journal file, specifically the size of the file, had -been written to the disk so that when the machine reboots it appears that -file is large enough to hold the current record. But even though the -file size has changed, the data for the file might not have made it to -the disk surface at the time of the OS crash or power loss. This means -that after reboot, the end of the journal file will contain quasi-random -garbage data. The checksum is an attempt to detect such corruption. If -the checksum does not match, that page of the journal is not rolled back. -

    - -

    -Here is a summary of the journal file format: -

    - -
      -
    • 8 byte prefix: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd6
    • -
    • 4 byte number of records in journal
    • -
    • 4 byte magic number used for page checksums
    • -
    • 4 byte initial database page count
    • -
    • Zero or more instances of the following: -
        -
      • 4 byte page number
      • -
      • 1024 bytes of original data for the page
      • -
      • 4 byte checksum
      • -
      -
    • -
    - -

    3.0   The B-Tree Layer

    - -

    -The B-Tree layer builds on top of the pager layer to implement -one or more separate b-trees all in the same disk file. The -algorithms used are taken from Knuth's The Art Of Computer -Programming.

    - -

    -Page 1 of a database contains a header string used for sanity -checking, a few 32-bit words of configuration data, and a pointer -to the beginning of a list of unused pages in the database. -All other pages in the -database are either pages of a b-tree, overflow pages, or unused -pages on the freelist. -

    - -

    -Each b-tree page contains zero or more database entries. -Each entry has an unique key of one or more bytes and data of -zero or more bytes. -Both the key and data are arbitrary byte sequences. The combination -of key and data are collectively known as "payload". The current -implementation limits the amount of payload in a single entry to -1048576 bytes. This limit can be raised to 16777216 by adjusting -a single #define in the source code and recompiling. But most entries -contain less than a hundred bytes of payload so a megabyte limit seems -more than enough. -

    - -

    -Up to 238 bytes of payload for an entry can be held directly on -a b-tree page. Any additional payload is contained on a linked list -of overflow pages. This limit on the amount of payload held directly -on b-tree pages guarantees that each b-tree page can hold at least -4 entries. In practice, most entries are smaller than 238 bytes and -thus most pages can hold more than 4 entries. -

    - -

    -A single database file can hold any number of separate, independent b-trees. -Each b-tree is identified by its root page, which never changes. -Child pages of the b-tree may change as entries are added and removed -and pages split and combine. But the root page always stays the same. -The b-tree itself does not record which pages are root pages and which -are not. That information is handled entirely at the schema layer. -

    - -

    3.1   B-Tree Page 1 Details

    - -

    -Page 1 begins with the following 48-byte string: -

    - -
    -** This file contains an SQLite 2.1 database **
    -
    - -

    -If you count the number of characters in the string above, you will -see that there are only 47. A '\000' terminator byte is added to -bring the total to 48. -

    - -

    -A frequent question is why the string says version 2.1 when (as -of this writing) we are up to version 2.7.0 of SQLite and any -change to the second digit of the version is suppose to represent -a database format change. The answer to this is that the B-tree -layer has not changed any since version 2.1. There have been -database format changes since version 2.1 but those changes have -all been in the schema layer. Because the format of the b-tree -layer is unchanged since version 2.1.0, the header string still -says version 2.1. -

    - -

    -After the format string is a 4-byte integer used to determine the -byte-order of the database. The integer has a value of -0xdae37528. If this number is expressed as 0xda, 0xe3, 0x75, 0x28, then -the database is in a big-endian format and all 16 and 32-bit integers -elsewhere in the b-tree layer are also big-endian. If the number is -expressed as 0x28, 0x75, 0xe3, and 0xda, then the database is in a -little-endian format and all other multi-byte numbers in the b-tree -layer are also little-endian. -Prior to version 2.6.3, the SQLite engine was only able to read databases -that used the same byte order as the processor they were running on. -But beginning with 2.6.3, SQLite can read or write databases in any -byte order. -

    - -

    -After the byte-order code are six 4-byte integers. Each integer is in the -byte order determined by the byte-order code. The first integer is the -page number for the first page of the freelist. If there are no unused -pages in the database, then this integer is 0. The second integer is -the number of unused pages in the database. The last 4 integers are -not used by the b-tree layer. These are the so-called "meta" values that -are passed up to the schema layer -and used there for configuration and format version information. -All bytes of page 1 past beyond the meta-value integers are unused -and are initialized to zero. -

    - -

    -Here is a summary of the information contained on page 1 in the b-tree layer: -

    - -
      -
    • 48 byte header string
    • -
    • 4 byte integer used to determine the byte-order
    • -
    • 4 byte integer which is the first page of the freelist
    • -
    • 4 byte integer which is the number of pages on the freelist
    • -
    • 36 bytes of meta-data arranged as nine 4-byte integers
    • -
    • 928 bytes of unused space
    • -
    - -

    3.2   Structure Of A Single B-Tree Page

    - -

    -Conceptually, a b-tree page contains N database entries and N+1 pointers -to other b-tree pages. -

    - -
    - - - - - - - - - - - -
    Ptr
    0
    Entry
    0
    Ptr
    1
    Entry
    1
    ...Ptr
    N-1
    Entry
    N-1
    Ptr
    N
    -
    - -

    -The entries are arranged in increasing order. That is, the key to -Entry 0 is less than the key to Entry 1, and the key to Entry 1 is -less than the key of Entry 2, and so forth. The pointers point to -pages containing additional entries that have keys in between the -entries on either side. So Ptr 0 points to another b-tree page that -contains entries that all have keys less than Key 0, and Ptr 1 -points to a b-tree pages where all entries have keys greater than Key 0 -but less than Key 1, and so forth. -

    - -

    -Each b-tree page in SQLite consists of a header, zero or more "cells" -each holding a single entry and pointer, and zero or more "free blocks" -that represent unused space on the page. -

    - -

    -The header on a b-tree page is the first 8 bytes of the page. -The header contains the value -of the right-most pointer (Ptr N) and the byte offset into the page -of the first cell and the first free block. The pointer is a 32-bit -value and the offsets are each 16-bit values. We have: -

    - -
    - - - - - - - - - - - - - - - - -
    01234567
    Ptr NCell 0Freeblock 0
    -
    - -

    -The 1016 bytes of a b-tree page that come after the header contain -cells and freeblocks. All 1016 bytes are covered by either a cell -or a freeblock. -

    - -

    -The cells are connected in a linked list. Cell 0 contains Ptr 0 and -Entry 0. Bytes 4 and 5 of the header point to Cell 0. Cell 0 then -points to Cell 1 which contains Ptr 1 and Entry 1. And so forth. -Cells vary in size. Every cell has a 12-byte header and at least 4 -bytes of payload space. Space is allocated to payload in increments -of 4 bytes. Thus the minimum size of a cell is 16 bytes and up to -63 cells can fit on a single page. The size of a cell is always a multiple -of 4 bytes. -A cell can have up to 238 bytes of payload space. If -the payload is more than 238 bytes, then an additional 4 byte page -number is appended to the cell which is the page number of the first -overflow page containing the additional payload. The maximum size -of a cell is thus 254 bytes, meaning that a least 4 cells can fit into -the 1016 bytes of space available on a b-tree page. -An average cell is usually around 52 to 100 bytes in size with about -10 or 20 cells to a page. -

    - -

    -The data layout of a cell looks like this: -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    0123456789101112 ... 249250251252253
    PtrKeysize
    (low)
    NextKsz
    (hi)
    Dsz
    (hi)
    Datasize
    (low)
    PayloadOverflow
    Pointer
    -
    - -

    -The first four bytes are the pointer. The size of the key is a 24-bit -where the upper 8 bits are taken from byte 8 and the lower 16 bits are -taken from bytes 4 and 5 (or bytes 5 and 4 on little-endian machines.) -The size of the data is another 24-bit value where the upper 8 bits -are taken from byte 9 and the lower 16 bits are taken from bytes 10 and -11 or 11 and 10, depending on the byte order. Bytes 6 and 7 are the -offset to the next cell in the linked list of all cells on the current -page. This offset is 0 for the last cell on the page. -

    - -

    -The payload itself can be any number of bytes between 1 and 1048576. -But space to hold the payload is allocated in 4-byte chunks up to -238 bytes. If the entry contains more than 238 bytes of payload, then -additional payload data is stored on a linked list of overflow pages. -A 4 byte page number is appended to the cell that contains the first -page of this linked list. -

    - -

    -Each overflow page begins with a 4-byte value which is the -page number of the next overflow page in the list. This value is -0 for the last page in the list. The remaining -1020 bytes of the overflow page are available for storing payload. -Note that a full page is allocated regardless of the number of overflow -bytes stored. Thus, if the total payload for an entry is 239 bytes, -the first 238 are stored in the cell and the overflow page stores just -one byte. -

    - -

    -The structure of an overflow page looks like this: -

    - -
    - - - - - - - - - - - - -
    01234 ... 1023
    Next PageOverflow Data
    -
    - -

    -All space on a b-tree page which is not used by the header or by cells -is filled by freeblocks. Freeblocks, like cells, are variable in size. -The size of a freeblock is at least 4 bytes and is always a multiple of -4 bytes. -The first 4 bytes contain a header and the remaining bytes -are unused. The structure of the freeblock is as follows: -

    - -
    - - - - - - - - - - - - - -
    01234 ... 1015
    SizeNextUnused
    -
    - -

    -Freeblocks are stored in a linked list in increasing order. That is -to say, the first freeblock occurs at a lower index into the page than -the second free block, and so forth. The first 2 bytes of the header -are an integer which is the total number of bytes in the freeblock. -The second 2 bytes are the index into the page of the next freeblock -in the list. The last freeblock has a Next value of 0. -

    - -

    -When a new b-tree is created in a database, the root page of the b-tree -consist of a header and a single 1016 byte freeblock. As entries are -added, space is carved off of that freeblock and used to make cells. -When b-tree entries are deleted, the space used by their cells is converted -into freeblocks. Adjacent freeblocks are merged, but the page can still -become fragmented. The b-tree code will occasionally try to defragment -the page by moving all cells to the beginning and constructing a single -freeblock at the end to take up all remaining space. -

    - -

    3.3   The B-Tree Free Page List

    - -

    -When information is removed from an SQLite database such that one or -more pages are no longer needed, those pages are added to a list of -free pages so that they can be reused later when new information is -added. This subsection describes the structure of this freelist. -

    - -

    -The 32-bit integer beginning at byte-offset 52 in page 1 of the database -contains the address of the first page in a linked list of free pages. -If there are no free pages available, this integer has a value of 0. -The 32-bit integer at byte-offset 56 in page 1 contains the number of -free pages on the freelist. -

    - -

    -The freelist contains a trunk and many branches. The trunk of -the freelist is composed of overflow pages. That is to say, each page -contains a single 32-bit integer at byte offset 0 which -is the page number of the next page on the freelist trunk. -The payload area -of each trunk page is used to record pointers to branch pages. -The first 32-bit integer in the payload area of a trunk page -is the number of branch pages to follow (between 0 and 254) -and each subsequent 32-bit integer is a page number for a branch page. -The following diagram shows the structure of a trunk freelist page: -

    - -
    - - - - - - - - - - - - - - - - - -
    012345678 ... 1023
    Next trunk page# of branch pagesPage numbers for branch pages
    -
    - -

    -It is important to note that only the pages on the trunk of the freelist -contain pointers to other pages. The branch pages contain no -data whatsoever. The fact that the branch pages are completely -blank allows for an important optimization in the paging layer. When -a branch page is removed from the freelist to be reused, it is not -necessary to write the original content of that page into the rollback -journal. The branch page contained no data to begin with, so there is -no need to restore the page in the event of a rollback. Similarly, -when a page is not longer needed and is added to the freelist as a branch -page, it is not necessary to write the content of that page -into the database file. -Again, the page contains no real data so it is not necessary to record the -content of that page. By reducing the amount of disk I/O required, -these two optimizations allow some database operations -to go four to six times faster than they would otherwise. -

    - -

    4.0   The Schema Layer

    - -

    -The schema layer implements an SQL database on top of one or more -b-trees and keeps track of the root page numbers for all b-trees. -Where the b-tree layer provides only unformatted data storage with -a unique key, the schema layer allows each entry to contain multiple -columns. The schema layer also allows indices and non-unique key values. -

    - -

    -The schema layer implements two separate data storage abstractions: -tables and indices. Each table and each index uses its own b-tree -but they use the b-tree capabilities in different ways. For a table, -the b-tree key is a unique 4-byte integer and the b-tree data is the -content of the table row, encoded so that columns can be separately -extracted. For indices, the b-tree key varies in size depending on the -size of the fields being indexed and the b-tree data is empty. -

    - -

    4.1   SQL Table Implementation Details

    - -

    Each row of an SQL table is stored in a single b-tree entry. -The b-tree key is a 4-byte big-endian integer that is the ROWID -or INTEGER PRIMARY KEY for that table row. -The key is stored in a big-endian format so -that keys will sort in numerical order using memcmp() function.

    - -

    The content of a table row is stored in the data portion of -the corresponding b-tree table. The content is encoded to allow -individual columns of the row to be extracted as necessary. Assuming -that the table has N columns, the content is encoded as N+1 offsets -followed by N column values, as follows: -

    - -
    - - - - - - - - - - - - -
    offset 0offset 1...offset N-1offset Nvalue 0value 1...value N-1
    -
    - -

    -The offsets can be either 8-bit, 16-bit, or 24-bit integers depending -on how much data is to be stored. If the total size of the content -is less than 256 bytes then 8-bit offsets are used. If the total size -of the b-tree data is less than 65536 then 16-bit offsets are used. -24-bit offsets are used otherwise. Offsets are always little-endian, -which means that the least significant byte occurs first. -

    - -

    -Data is stored as a nul-terminated string. Any empty string consists -of just the nul terminator. A NULL value is an empty string with no -nul-terminator. Thus a NULL value occupies zero bytes and an empty string -occupies 1 byte. -

    - -

    -Column values are stored in the order that they appear in the CREATE TABLE -statement. The offsets at the beginning of the record contain the -byte index of the corresponding column value. Thus, Offset 0 contains -the byte index for Value 0, Offset 1 contains the byte offset -of Value 1, and so forth. The number of bytes in a column value can -always be found by subtracting offsets. This allows NULLs to be -recovered from the record unambiguously. -

    - -

    -Most columns are stored in the b-tree data as described above. -The one exception is column that has type INTEGER PRIMARY KEY. -INTEGER PRIMARY KEY columns correspond to the 4-byte b-tree key. -When an SQL statement attempts to read the INTEGER PRIMARY KEY, -the 4-byte b-tree key is read rather than information out of the -b-tree data. But there is still an Offset associated with the -INTEGER PRIMARY KEY, just like any other column. But the Value -associated with that offset is always NULL. -

    - -

    4.2   SQL Index Implementation Details

    - -

    -SQL indices are implement using a b-tree in which the key is used -but the data is always empty. The purpose of an index is to map -one or more column values into the ROWID for the table entry that -contains those column values. -

    - -

    -Each b-tree in an index consists of one or more column values followed -by a 4-byte ROWID. Each column value is nul-terminated (even NULL values) -and begins with a single character that indicates the datatype for that -column value. Only three datatypes are supported: NULL, Number, and -Text. NULL values are encoded as the character 'a' followed by the -nul terminator. Numbers are encoded as the character 'b' followed by -a string that has been crafted so that sorting the string using memcmp() -will sort the corresponding numbers in numerical order. (See the -sqliteRealToSortable() function in util.c of the SQLite sources for -additional information on this encoding.) Numbers are also nul-terminated. -Text values consists of the character 'c' followed by a copy of the -text string and a nul-terminator. These encoding rules result in -NULLs being sorted first, followed by numerical values in numerical -order, followed by text values in lexicographical order. -

    - -

    4.4   SQL Schema Storage And Root B-Tree Page Numbers

    - -

    -The database schema is stored in the database in a special tabled named -"sqlite_master" and which always has a root b-tree page number of 2. -This table contains the original CREATE TABLE, -CREATE INDEX, CREATE VIEW, and CREATE TRIGGER statements used to define -the database to begin with. Whenever an SQLite database is opened, -the sqlite_master table is scanned from beginning to end and -all the original CREATE statements are played back through the parser -in order to reconstruct an in-memory representation of the database -schema for use in subsequent command parsing. For each CREATE TABLE -and CREATE INDEX statement, the root page number for the corresponding -b-tree is also recorded in the sqlite_master table so that SQLite will -know where to look for the appropriate b-tree. -

    - -

    -SQLite users can query the sqlite_master table just like any other table -in the database. But the sqlite_master table cannot be directly written. -The sqlite_master table is automatically updated in response to CREATE -and DROP statements but it cannot be changed using INSERT, UPDATE, or -DELETE statements as that would risk corrupting the database. -

    - -

    -SQLite stores temporary tables and indices in a separate -file from the main database file. The temporary table database file -is the same structure as the main database file. The schema table -for the temporary tables is stored on page 2 just as in the main -database. But the schema table for the temporary database named -"sqlite_temp_master" instead of "sqlite_master". Other than the -name change, it works exactly the same. -

    - -

    4.4   Schema Version Numbering And Other Meta-Information

    - -

    -The nine 32-bit integers that are stored beginning at byte offset -60 of Page 1 in the b-tree layer are passed up into the schema layer -and used for versioning and configuration information. The meaning -of the first four integers is shown below. The other five are currently -unused. -

    - -
      -
    1. The schema version number
    2. -
    3. The format version number
    4. -
    5. The recommended pager cache size
    6. -
    7. The safety level
    8. -
    - -

    -The first meta-value, the schema version number, is used to detect when -the schema of the database is changed by a CREATE or DROP statement. -Recall that when a database is first opened the sqlite_master table is -scanned and an internal representation of the tables, indices, views, -and triggers for the database is built in memory. This internal -representation is used for all subsequent SQL command parsing and -execution. But what if another process were to change the schema -by adding or removing a table, index, view, or trigger? If the original -process were to continue using the old schema, it could potentially -corrupt the database by writing to a table that no longer exists. -To avoid this problem, the schema version number is changed whenever -a CREATE or DROP statement is executed. Before each command is -executed, the current schema version number for the database file -is compared against the schema version number from when the sqlite_master -table was last read. If those numbers are different, the internal -schema representation is erased and the sqlite_master table is reread -to reconstruct the internal schema representation. -(Calls to sqlite_exec() generally return SQLITE_SCHEMA when this happens.) -

    - -

    -The second meta-value is the schema format version number. This -number tells what version of the schema layer should be used to -interpret the file. There have been changes to the schema layer -over time and this number is used to detect when an older database -file is being processed by a newer version of the library. -As of this writing (SQLite version 2.7.0) the current format version -is "4". -

    - -

    -The third meta-value is the recommended pager cache size as set -by the DEFAULT_CACHE_SIZE pragma. If the value is positive it -means that synchronous behavior is enable (via the DEFAULT_SYNCHRONOUS -pragma) and if negative it means that synchronous behavior is -disabled. -

    - -

    -The fourth meta-value is safety level added in version 2.8.0. -A value of 1 corresponds to a SYNCHRONOUS setting of OFF. In other -words, SQLite does not pause to wait for journal data to reach the disk -surface before overwriting pages of the database. A value of 2 corresponds -to a SYNCHRONOUS setting of NORMAL. A value of 3 corresponds to a -SYNCHRONOUS setting of FULL. If the value is 0, that means it has not -been initialized so the default synchronous setting of NORMAL is used. -

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/fileio.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/fileio.html --- sqlite3-3.4.2/www/fileio.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/fileio.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,1960 @@ + + +No Title + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + + + +
    SQLite File IO Specification
    +
    Table Of Contents
    +
    + Javascript is required for some features of this document, including + table of contents, figure numbering and internal references (section + numbers and hyper-links. + +
    + +

    Overview

    +

    + SQLite stores an entire database within a single file, the format of + which is described in the SQLite File Database File Format + document ff_sqlitert_requirements. Each database file is + stored within a file system, presumably provided by the host operating + system. Instead of interfacing with the operating system directly, + the host application is required to supply an adaptor component that + implements the SQLite Virtual File System interface + (described in capi_sqlitert_requirements). The adaptor + component is responsible for translating the calls made by SQLite to + the VFS interface into calls to the file-system interface + provided by the operating system. This arrangement is depicted in figure + figure_vfs_role. +

    +

    Figure - Virtual File System (VFS) Adaptor +

    +

    + Although it would be easy to design a system that uses the VFS + interface to read and update the content of a database file stored + within a file-system, there are several complicated issues that need + to be addressed by such a system: +

      +
    1. SQLite is required to implement atomic and durable + transactions (the 'A' and 'D' from the ACID acronym), even if an + application, operating system or power failure occurs midway through or + shortly after updating a database file. +

      To implement atomic transactions in the face of potential + application, operating system or power failures, database writers write + a copy of those portions of the database file that they are going to + modify into a second file, the journal file, before writing + to the database file. If a failure does occur while modifying the + database file, SQLite can reconstruct the original database + (before the modifications were attempted) based on the contents of + the journal file. +

    2. SQLite is required to implement isolated transactions (the 'I' + from the ACID acronym). +

      This is done by using the file locking facililities provided by the + VFS adaptor to serialize writers (write transactions) and preventing + readers (read transactions) from accessing database files while writers + are midway through updating them. +

    3. For performance reasons, it is advantageous to minimize the + quantity of data read and written to and from the file-system. +

      As one might expect, the amount of data read from the database + file is minimized by caching portions of the database file in main + memory. Additionally, multiple updates to the database file that + are part of the same write transaction may be cached in + main memory and written to the file together, allowing for + more efficient IO patterns and eliminating the redundant write + operations that could take place if part of the database file is + modified more than once within a single write transaction. +

    +

    + System requirement references for the above points. +

    + This document describes in detail the way that SQLite uses the API + provided by the VFS adaptor component to solve the problems and implement + the strategies enumerated above. It also specifies the assumptions made + about the properties of the system that the VFS adaptor provides + access to. For example, specific assumptions about the extent of + data corruption that may occur if a power failure occurs while a + database file is being updated are presented in section + fs_characteristics. +

    + This document does not specify the details of the interface that must + be implemented by the VFS adaptor component, that is left to + capi_sqlitert_requirements. +

    Relationship to Other Documents

    +

    + Related to C-API requirements: +

      +
    1. Opening a connection. +
    2. Closing a connection. +
    +

    + Related to SQL requirements: +

      +
    1. Opening a read-only transaction. +
    2. Terminating a read-only transaction. +
    3. Opening a read-write transaction. +
    4. Committing a read-write transaction. +
    5. Rolling back a read-write transaction. +
    6. Opening a statement transaction. +
    7. Committing a statement transaction. +
    8. Rolling back a statement transaction. +
    9. Committing a multi-file transaction. +
    +

    + Related to file-format requirements: +

      +
    1. Pinning (reading) a database page. +
    2. Unpinning a database page. +
    3. Modifying the contents of a database page. +
    4. Appending a new page to the database file. +
    5. Truncating a page from the end of the database file. +
    +

    Document Structure

    +

    + Section vfs_assumptions of this document describes the + various assumptions made about the system to which the VFS adaptor + component provides access. The basic capabilities and functions + required from the VFS implementation are presented along with the + description of the VFS interface in + capi_sqlitert_requirements. Section + vfs_assumptions compliments this by describing in more + detail the assumptions made about VFS implementations on which the + algorithms presented in this document depend. Some of these assumptions + relate to performance issues, but most concern the expected state of + the file-system following a failure that occurs midway through + modifying a database file. +

    + Section database_connections introduces the concept of + a database connection, a combination of a file-handle and + in-memory cache used to access a database file. It also describes the + VFS operations required when a new database connection is + created (opened), and when one is destroyed (closed). +

    + Section reading_data describes the steps required to + open a read transaction and read data from a database file. +

    + Section writing_data describes the steps required to + open a write transaction and write data to a database file. +

    + Section rollback describes the way in which aborted + write transactions may be rolled back (reverted), either as + a result of an explicit user directive or because an application, + operating system or power failure occured while SQLite was midway + through updating a database file. +

    + Section page_cache_algorithms describes some of the + algorithms used to determine exactly which portions of the database + file are cached by a page cache, and the effect that they + have on the quantity and nature of the required VFS operations. + It may at first seem odd to include the page cache, which is + primarily an implementation detail, in this document. However, it is + necessary to acknowledge and describe the page cache in order to + provide a more complete explanation of the nature and quantity of IO + performed by SQLite. +

    Glossary

    +

    + After this document is ready, make the vocabulary consistent and + then add a glossary here. +

    VFS Adaptor Related Assumptions

    +

    + This section documents those assumptions made about the system that + the VFS adaptor provides access to. The assumptions noted in section + fs_characteristics are particularly important. If these + assumptions are not true, then a power or operating system failure + may cause SQLite databases to become corrupted. +

    Performance Related Assumptions

    +

    + SQLite uses the assumptions in this section to try to speed up + reading from and writing to the database file. +

    + It is assumed that writing a series of sequential blocks of data to + a file in order is faster than writing the same blocks in an arbitrary + order. +

    System Failure Related Assumptions

    +

    + In the event of an operating system or power failure, the various + combinations of file-system software and storage hardware available + provide varying levels of guarantee as to the integrity of the data + written to the file system just before or during the failure. The exact + combination of IO operations that SQLite is required to perform in + order to safely modify a database file depend on the exact + characteristics of the target platform. +

    + This section describes the assumptions that SQLite makes about the + the content of a file-system following a power or system failure. In + other words, it describes the extent of file and file-system corruption + that such an event may cause. +

    + SQLite queries an implementation for file-system characteristics + using the xDeviceCharacteristics() and xSectorSize() methods of the + database file file-handle. These two methods are only ever called + on file-handles open on database files. They are not called for + journal files, master-journal files or + temporary database files. +

    + The file-system sector size value determined by calling the + xSectorSize() method is a power of 2 value between 512 and 32768, + inclusive reference to exactly how this is + determined. SQLite assumes that the underlying storage + device stores data in blocks of sector-size bytes each, + sectors. It is also assumed that each aligned block of + sector-size bytes of each file is stored in a single device + sector. If the file is not an exact multiple of sector-size + bytes in size, then the final device sector is partially empty. +

    + Normally, SQLite assumes that if a power failure occurs while + updating any portion of a sector then the contents of the entire + device sector is suspect following recovery. After writing to + any part of a sector within a file, it is assumed that the modified + sector contents are held in a volatile buffer somewhere within + the system (main memory, disk cache etc.). SQLite does not assume + that the updated data has reached the persistent storage media, until + after it has successfully synced the corresponding file by + invoking the VFS xSync() method. Syncing a file causes all + modifications to the file up until that point to be committed to + persistent storage. +

    + Based on the above, SQLite is designed around a model of the + file-system whereby any sector of a file written to is considered to be + in a transient state until after the file has been successfully + synced. Should a power or system failure occur while a sector + is in a transient state, it is impossible to predict its contents + following recovery. It may be written correctly, not written at all, + overwritten with random data, or any combination thereof. +

    + For example, if the sector-size of a given file-system is + 2048 bytes, and SQLite opens a file and writes a 1024 byte block + of data to offset 3072 of the file, then according to the model + the second sector of the file is in the transient state. If a + power failure or operating system crash occurs before or during + the next call to xSync() on the file handle, then following system + recovery SQLite assumes that all file data between byte offsets 2048 + and 4095, inclusive, is invalid. It also assumes that since the first + sector of the file, containing the data from byte offset 0 to 2047 + inclusive, is valid, since it was not in a transient state when the + crash occured. +

    + Assuming that any and all sectors in the transient state may be + corrupted following a power or system failure is a very pessimistic + approach. Some modern systems provide more sophisticated guarantees + than this. SQLite allows the VFS implementation to specify at runtime + that the current platform supports zero or more of the following + properties: +

      +
    • The safe-append property. If a system supports the + safe-append property, it means that when a file is extended + the new data is written to the persistent media before the size + of the file itself is updated. This guarantees that if a failure + occurs after a file has been extended, following recovery + the write operations that extended the file will appear to have + succeeded or not occurred at all. It is not possible for invalid + or garbage data to appear in the extended region of the file. +

    • The atomic-write property. A system that supports this + property also specifies the size or sizes of the blocks that it + is capable of writing. Valid sizes are powers of two greater than + 512. If a write operation modifies a block of n bytes, + where n is one of the block sizes for which atomic-write + is supported, then it is impossible for an aligned write of n + bytes to cause data corruption. If a failure occurs after such + a write operation and before the applicable file handle is + synced, then following recovery it will appear as if the + write operation succeeded or did not take place at all. It is not + possible that only part of the data specified by the write operation + was written to persistent media, nor is it possible for any content + of the sectors spanned by the write operation to be replaced with + garbage data, as it is normally assumed to be. +

    • The sequential-write property. A system that supports the + sequential-write property guarantees that the various write + operations on files within the same file-system are written to the + persistent media in the same order that they are performed by the + application and that each operation is concluded before the next + is begun. If a system supports the sequential-write + property, then the model used to determine the possible states of + the file-system following a failure is different. +

      If a system supports sequential-write it is assumed that + syncing any file within the file system flushes all write + operations on all files (not just the synced file) to + the persistent media. If a failure does occur, it is not known + whether or not any of the write operations performed by SQLite + since the last time a file was synced. SQLite is able to + assume that if the write operations of unknown status are arranged + in the order that they occured: +

        +
      1. the first n operations will have been executed + successfully, +
      2. the next operation puts all device sectors that it modifies + into the transient state, so that following recovery each + sector may be partially written, completely written, not + written at all or populated with garbage data, +
      3. the remaining operations will not have had any effect on + the contents of the file-system. +
      +
    +

    Failure Related Assumption Details

    +

    + This section describes how the assumptions presented in the parent + section apply to the individual API functions and operations provided + by the VFS to SQLite for the purposes of modifying the contents of the + file-system. +

    + SQLite manipulates the contents of the file-system using a combination + of the following four types of operation: +

      +
    • Create file operations. SQLite may create new files + within the file-system by invoking the xOpen() method of + the sqlite3_io_methods object. +
    • Delete file operations. SQLite may remove files from the + file system by calling the xDelete() method of the + sqlite3_io_methods object. +
    • Truncate file operations. SQLite may truncate existing + files by invoking the xTruncate() method of the sqlite3_file + object. +
    • Write file operations. SQLite may modify the contents + and increase the size of a file by files by invoking the xWrite() + method of the sqlite3_file object. +
    +

    + Additionally, all VFS implementations are required to provide the + sync file operation, accessed via the xSync() method of the + sqlite3_file object, used to flush create, write and truncate operations + on a file to the persistent storage medium. +

    + The formalized assumptions in this section refer to system failure + events. In this context, this should be interpreted as any failure that + causes the system to stop operating. For example a power failure or + operating system crash. +

    + SQLite does not assume that a create file operation has actually + modified the file-system records within perisistent storage until + after the file has been successfully synced. +

    + If a system failure occurs during or after a "create file" + operation, but before the created file has been synced, then + SQLite assumes that it is possible that the created file may not + exist following system recovery. +

    + Of course, it is also possible that it does exist following system + recovery. +

    + If a "create file" operation is executed by SQLite, and then the + created file synced, then SQLite assumes that the file-system + modifications corresponding to the "create file" operation have been + committed to persistent media. It is assumed that if a system + failure occurs any time after the file has been successfully + synced, then the file is guaranteed to appear in the file-system + following system recovery. +

    + A delete file operation (invoked by a call to the VFS xDelete() + method) is assumed to be an atomic and durable operation. +

    +

    + If a system failure occurs at any time after a "delete file" + operation (call to the VFS xDelete() method) returns successfully, it is + assumed that the file-system will not contain the deleted file following + system recovery. +

    + If a system failure occurs during a "delete file" operation, + it is assumed that following system recovery the file-system will + either contain the file being deleted in the state it was in before + the operation was attempted, or not contain the file at all. It is + assumed that it is not possible for the file to have become corrupted + purely as a result of a failure occuring during a "delete file" + operation. +

    + The effects of a truncate file operation are not assumed to + be made persistent until after the corresponding file has been + synced. +

    + If a system failure occurs during or after a "truncate file" + operation, but before the truncated file has been synced, then + SQLite assumes that the size of the truncated file is either as large + or larger than the size that it was to be truncated to. +

    + If a system failure occurs during or after a "truncate file" + operation, but before the truncated file has been synced, then + it is assumed that the contents of the file up to the size that the + file was to be truncated to are not corrupted. +

    + The above two assumptions may be interpreted to mean that if a + system failure occurs after file truncation but before the truncated + file is synced, the contents of the file following the point + at which it was to be truncated may not be trusted. They may contain + the original file data, or may contain garbage. +

    + If a "truncate file" operation is executed by SQLite, and then the + truncated file synced, then SQLite assumes that the file-system + modifications corresponding to the "truncate file" operation have been + committed to persistent media. It is assumed that if a system + failure occurs any time after the file has been successfully + synced, then the effects of the file truncation are guaranteed + to appear in the file system following recovery. +

    + A write file operation modifies the contents of an existing file + within the file-system. It may also increase the size of the file. + The effects of a write file operation are not assumed to + be made persistent until after the corresponding file has been + synced. +

    + If a system failure occurs during or after a "write file" + operation, but before the corresponding file has been synced, + then it is assumed that the content of all sectors spanned by the + write file operation are untrustworthy following system + recovery. This includes regions of the sectors that were not + actually modified by the write file operation. +

    + If a system failure occurs on a system that supports the + atomic-write property for blocks of size N bytes + following an aligned write of N + bytes to a file but before the file has been succesfully synced, + then is is assumed following recovery that all sectors spanned by the + write operation were correctly updated, or that none of the sectors were + modified at all. +

    + If a system failure occurs on a system that supports the + safe-append following a write operation that appends data + to the end of the file without modifying any of the existing file + content but before the file has been succesfully synced, + then is is assumed following recovery that either the data was + correctly appended to the file, or that the file size remains + unchanged. It is assumed that it is impossible that the file be + extended but populated with incorrect data. +

    + Following a system recovery, if a device sector is deemed to be + untrustworthy as defined by A21008 and neither A21011 or A21012 + apply to the range of bytes written, then no assumption can be + made about the content of the sector following recovery. It is + assumed that it is possible for such a sector to be written + correctly, not written at all, populated with garbage data or any + combination thereof. +

    + If a system failure occurs during or after a "write file" + operation that causes the file to grow, but before the corresponding + file has been synced, then it is assumed that the size of + the file following recovery is as large or larger than it was when + it was most recently synced. +

    + If a system supports the sequential-write property, then further + assumptions may be made with respect to the state of the file-system + following recovery from a system failure. Specifically, it is + assumed that create, truncate, delete and write file operations are + applied to the persistent representation in the same order as they + are performed by SQLite. Furthermore, it is assumed that the + file-system waits until one operation is safely written to the + persistent media before the next is attempted, just as if the relevant + file were synced following each operation. +

    + If a system failure occurs on a system that supports the + sequential-write property, then it is assumed that all + operations completed before the last time any file was synced + have been successfully committed to persistent media. +

    + If a system failure occurs on a system that supports the + sequential-write property, then it is assumed that the set + of possible states that the file-system may be in following recovery + is the same as if each of the write operations performed since the most + recent time a file was synced was itself followed by a sync + file operation, and that the system failure may have occured during + any of the write or sync file operations. +

    +

    Database Connections

    +

    + Within this document, the term database connection has a slightly + different meaning from that which one might assume. The handles returned + by the sqlite3_open() and sqlite3_open16() + APIs (reference) are referred to as database + handles. A database connection is a connection to a single + database file using a single file-handle, which is held open for the + lifetime of the connection. Using the SQL ATTACH syntax, multiple + database connections may be accessed via a single database + handle. Or, using SQLite's shared-cache mode feature, multiple + database handles may access a single database connection. +

    + Usually, a new database connection is opened whenever the user opens + new database handle on a real database file (not an in-memory + database) or when a database file is attached to an existing database + connection using the SQL ATTACH syntax. However if the shared-cache + mode feature is enabled, then the database file may be accessed through + an existing database connection. For more information on + shared-cache mode, refer to Reference. The + various IO operations required to open a new connection are detailed in + section open_new_connection of this document. +

    + Similarly, a database connection is usually closed when the user + closes a database handle that is open on a real database file or + has had one or more real database files attached to it using the ATTACH + mechanism, or when a real database file is detached from a database + connection using the DETACH syntax. Again, the exception is if + shared-cache mode is enabled. In this case, a database + connection is not closed until its number of users reaches zero. + The IO related steps required to close a database connection are + described in section closing_database_connection. +

    + After sections 4 and 5 are finished, come back here and see if we can add a + list of state items associated with each database connection to make things + easier to understand. i.e each database connection has a file handle, a set + of entries in the page cache, an expected page size etc. +

    Opening a New Connection

    +

    + This section describes the VFS operations that take place when a + new database connection is created. +

    + Opening a new database connection is a two-step process: +

      +
    1. A file-handle is opened on the database file. +
    2. If step 1 was successful, an attempt is made to read the + database file header from the database file using the + new file-handle. +
    +

    + In step 2 of the procedure above, the database file is not locked + before it is read from. This is the only exception to the locking + rules described in section reading_data. +

    + The reason for attempting to read the database file header + is to determine the page-size used by the database file. + Because it is not possible to be certain as to the page-size + without holding at least a shared lock on the database file + (because some other database connection might have changed it + since the database file header was read), the value read from the + database file header is known as the expected page size. +

    +When a new database connection is required, SQLite shall attempt +to open a file-handle on the database file. If the attempt fails, then +no new database connection is created and an error returned. +

    +When a new database connection is required, after opening the +new file-handle, SQLite shall attempt to read the first 100 bytes +of the database file. If the attempt fails for any other reason than +that the opened file is less than 100 bytes in size, then +the file-handle is closed, no new database connection is created +and an error returned instead. +

    +If the database file header is successfully read from a newly +opened database file, the connections expected page-size shall +be set to the value stored in the page-size field of the +database header. +

    +If the database file header cannot be read from a newly opened +database file (because the file is less than 100 bytes in size), the +connections expected page-size shall be set to the compile time +value of the SQLITE_DEFAULT_PAGESIZE option. +

    Closing a Connection

    +

    + This section describes the VFS operations that take place when an + existing database connection is closed (destroyed). +

    + Closing a database connection is a simple matter. The open VFS + file-handle is closed and in-memory page cache related resources + are released. +

    +When a database connection is closed, SQLite shall close the +associated file handle at the VFS level. +

    +When a database connection is closed, all associated page +cache entries shall be discarded. +

    The Page Cache

    +

    + The contents of an SQLite database file are formatted as a set of + fixed size pages. See ff_sqlitert_requirements for a + complete description of the format used. The page size used + for a particular database is stored as part of the database file + header at a well-known offset within the first 100 bytes of the + file. Almost all read and write operations performed by SQLite on + database files are done on blocks of data page-size bytes + in size. +

    + All SQLite database connections running within a single process share + a single page cache. The page cache caches data read from + database files in main-memory on a per-page basis. When SQLite requires + data from a database file to satisfy a database query, it checks the + page cache for usable cached versions of the required database + pages before loading it from the database file. If no usable cache + entry can be found and the database page data is loaded from the database + file, it is cached in the page cache in case the same data is + needed again later. Because reading from the database file is assumed to + be an order of magnitude faster than reading from main-memory, caching + database page content in the page cache to minimize the number + of read operations performed on the database file is a significant + performance enhancement. +

    + The page cache is also used to buffer database write operations. + When SQLite is required to modify one of more of the database pages + that make up a database file, it first modifies the cached version of + the page in the page cache. At that point the page is considered + a "dirty" page. At some point later on, the new content of the "dirty" + page is copied from the page cache into the database file via + the VFS interface. Buffering writes in the page cache can reduce + the number of write operations required on the database file (in cases + where the same page is updated twice) and allows optimizations based + on the assumptions outlined in section fs_performance. +

    + Database read and write operations, and the way in which they interact + with and use the page cache, are described in detail in sections + reading_data and writing_data of this document, + respectively. +

    + At any one time, the page cache contains zero or more page cache + entries, each of which has the following data associated with it: +

      +
    • + A reference to the associated database connection. Each + entry in the page cache is associated with a single database + connection; the database connection that created the entry. + A page cache entry is only ever used by the database + connection that created it. Page cache entries are not shared between + database connections. +

    • + The page number of the cached page. Pages are sequentially + numbered within a database file starting from page 1 (page 1 begins at + byte offset 0). Refer to ff_sqlitert_requirements for + details. +

    • + The cached data; a blob of data page-size bytes in size. +

    +

    + The first two elements in the list above, the associated database + connection and the page number, uniquely identify the + page cache entry. At no time may the page cache contain two + entries for which both the database connection and page + number are identical. Or, put another way, a single database + connection never caches more than one copy of a database page + within the page cache. +

    + At any one time, each page cache entry may be said to be a clean + page, a non-writable dirty page or a writable dirty page, + according to the following definitions: +

      +
    • A clean page is one for which the cached data + currently matches the contents of the corresponding page of + the database file. The page has not been modified since it was + loaded from the file. +

    • A dirty page is a page cache entry for which + the cached data has been modified since it was loaded from the database + file, and so no longer matches the current contents of the + corresponding database file page. A dirty page is one that is + currently buffering a modification made to the database file as part + of a write transaction. +

    • Within this document, the term non-writable dirty + page is used specifically to refer to a page cache + entry with modified content for which it is not yet safe to update + the database file with. It is not safe to update a database file with + a buffered write if a power or system failure that occurs during or + soon after the update may cause the database to become corrupt + following system recovery, according to the assumptions made in + section fs_assumption_details. +

    • A dirty page for which it would be safe to update the + corresponding database file page with the modified contents of + without risking database corruption is known as a + writable dirty page. +

    +

    + The exact logic used to determine if a page cache entry with + modified content is a dirty page or writable page is + presented in section page_cache_algorithms. +

    + Because main-memory is a limited resource, the page cache cannot + be allowed to grow indefinitely. As a result, unless all database files + opened by database connections within the process are quite small, + sometimes data must be discarded from the page cache. In practice + this means page cache entries must be purged to make room + for new ones. If a page cache entry being removed from the page + cache to free main-memory is a dirty page, then its contents + must be saved into the database file before it can be discarded without + data loss. The following two sub-sections describe the algorithms used by + the page cache to determine exactly when existing page cache + entries are purged (discarded). +

    Page Cache Configuration

    +

    + Describe the parameters set to configure the page cache limits. +

    Page Cache Algorithms

    +

    + Requirements describing the way in which the configuration parameters + are used. About LRU etc. +

    Reading Data

    +

    + In order to return data from the database to the user, for example as + the results of a SELECT query, SQLite must at some point read data + from the database file. Usually, data is read from the database file in + aligned blocks of page-size bytes. The exception is when the + database file header fields are being inspected, before the + page-size used by the database can be known. +

    + With two exceptions, a database connection must have an open + transaction (either a read-only transaction or a + read/write transaction) on the database before data may be + read from the database file. +

    + The two exceptions are: +

      +
    • When an attempt is made to read the 100 byte database file + header immediately after opening the database connection + (see section open_new_connection). When this occurs + no lock is held on the database file. +
    • Data read while in the process of opening a read-only transaction + (see section open_read_only_trans). These read + operations occur after a shared lock is held on the database + file. +
    +

    + Once a transaction has been opened, reading data from a database + connection is a simple operation. Using the xRead() method of the + file-handle open on the database file, the required database file + pages are read one at a time. SQLite never reads partial pages and + always uses a single call to xRead() for each required page. +

    + After reading the data for a database page, SQLite stores the raw + page of data in the page cache. Each time a page of data is + required by the upper layers, the page cache is queried + to see if it contains a copy of the required page stored by + the current database connection. If such an entry can be + found, then the required data is read from the page cache instead + of the database file. Only a connection with an open transaction + transaction (either a read-only transaction or a + read/write transaction) on the database may read data from the + page cache. In this sense reading from the page cache is no + different to reading from the database file. +

    + Refer to section page_cache_algorithms for a description + of exactly how and for how long page data is stored in the + page cache. +

    +Except for the read operation required by H35070 and those reads made +as part of opening a read-only transaction, SQLite shall ensure that +a database connection has an open read-only or read/write +transaction when any data is read from the database file. +

    +Aside from those read operations described by H35070 and H21XXX, SQLite +shall read data from the database file in aligned blocks of +page-size bytes, where page-size is the database page size +used by the database file. +

    +SQLite shall ensure that a database connection has an open +read-only or read/write transaction before using data stored in the page +cache to satisfy user queries. +

    Opening a Read-Only Transaction

    +

    + Before data may be read from a database file or queried from + the page cache, a read-only transaction must be + successfully opened by the associated database connection (this is true + even if the connection will eventually write to the database, as a + read/write transaction may only be opened by upgrading from a + read-only transaction). This section describes the procedure + for opening a read-only transaction. +

    + The key element of a read-only transaction is that the + file-handle open on the database file obtains and holds a + shared-lock on the database file. Because a connection requires + an exclusive-lock before it may actually modify the contents + of the database file, and by definition while one connection is holding + a shared-lock no other connection may hold an + exclusive-lock, holding a shared-lock guarantees that + no other process may modify the database file while the read-only + transaction remains open. This ensures that read-only + transactions are sufficiently isolated from the transactions of + other database users (see section overview). +

    Obtaining the shared lock itself on the database file is quite + simple, SQLite just calls the xLock() method of the database file + handle. Some of the other processes that take place as part of + opening the read-only transaction are quite complex. The + steps that SQLite is required to take to open a read-only + transaction, in the order in which they must occur, is as follows: +

      +
    1. A shared-lock is obtained on the database file. +
    2. The connection checks if a hot journal file exists in the + file-system. If one does, then it is rolled back before continuing. +
    3. The connection checks if the data in the page cache may + still be trusted. If not, all page cache data is discarded. +
    4. If the file-size is not zero bytes and the page cache does not + contain valid data for the first page of the database, then the + data for the first page must be read from the database. +
    +

    + Of course, an error may occur while attempting any of the 4 steps + enumerated above. If this happens, then the shared-lock is + released (if it was obtained) and an error returned to the user. + Step 2 of the procedure above is described in more detail in section + hot_journal_detection. Section cache_validation + describes the process identified by step 3 above. Further detail + on step 4 may be found in section read_page_one. +

    +When required to open a read-only transaction using a +database connection, SQLite shall first attempt to obtain +a shared-lock on the file-handle open on the database file. +

    +If, while opening a read-only transaction, SQLite fails to obtain +the shared-lock on the database file, then the process is +abandoned, no transaction is opened and an error returned to the user. +

    + The most common reason an attempt to obtain a shared-lock may + fail is that some other connection is holding an exclusive or + pending lock. However it may also fail because some other + error (e.g. an IO or comms related error) occurs within the call to the + xLock() method. +

    +While opening a read-only transaction, after successfully +obtaining a shared lock on the database file, SQLite shall +attempt to detect and roll back a hot journal file associated +with the same database file. +

    +If, while opening a read-only transaction, SQLite encounters +an error while attempting to detect or roll back a hot journal +file, then the shared-lock on the database file is released, +no transaction is opened and an error returned to the user. +

    + Section hot_journal_detection contains a description of + and requirements governing the detection of a hot-journal file refered + to in the above requirements. +

    +Assuming no errors have occured, then after attempting to detect and +roll back a hot journal file, if the page cache contains +any entries associated with the current database connection, +then SQLite shall validate the contents of the page cache by +testing the file change counter. This procedure is known as +cache validiation. +

    + The cache validiation process is described in detail in section + cache_validation +

    +If the cache validiate procedure prescribed by H35040 is required and +does not prove that the page cache entries associated with the +current database connection are valid, then SQLite shall discard +all entries associated with the current database connection from +the page cache. +

    + The numbered list above notes that the data for the first page of the + database file, if it exists and is not already loaded into the page + cache, must be read from the database file before the read-only + transaction may be considered opened. This is handled by + requirement H35240. +

    Hot Journal Detection

    +

    + This section describes the procedure that SQLite uses to detect a + hot journal file. If a hot journal file is detected, + this indicates that at some point the process of writing a + transaction to the database was interrupted and a recovery operation + (hot journal rollback) needs to take place. This section does + not describe the process of hot journal rollback (see section + hot_journal_rollback) or the processes by which a + hot journal file may be created (see section + writing_data). +

    + The procedure used to detect a hot-journal file is quite + complex. The following steps take place: +

      +
    1. Using the VFS xAccess() method, SQLite queries the file-system + to see if the journal file associated with the database exists. + If it does not, then there is no hot-journal file. +
    2. By invoking the xCheckReservedLock() method of the file-handle + opened on the database file, SQLite checks if some other connection + holds a reserved lock or greater. If some other connection + does hold a reserved lock, this indicates that the other + connection is midway through a read/write transaction (see + section writing_data). In this case the + journal file is not a hot-journal and must not be + rolled back. +
    3. Using the xFileSize() method of the file-handle opened + on the database file, SQLite checks if the database file is + 0 bytes in size. If it is, the journal file is not considered + to be a hot journal file. Instead of rolling back the + journal file, in this case it is deleted from the file-system + by calling the VFS xDelete() method. Technically, + there is a race condition here. This step should be moved to + after the exclusive lock is held. +
    4. An attempt is made to upgrade to an exclusive lock on the + database file. If the attempt fails, then all locks, including + the recently obtained shared lock are dropped. The attempt + to open a read-only transaction has failed. This occurs + when some other connection is also attempting to open a + read-only transaction and the attempt to gain the + exclusive lock fails because the other connection is also + holding a shared lock. It is left to the other connection + to roll back the hot journal. +
      + It is important that the file-handle lock is upgraded + directly from shared to exclusive in this case, + instead of first upgrading to reserved or pending + locks as is required when obtaining an exclusive lock to + write to the database file (section writing_data). + If SQLite were to first upgrade to a reserved or + pending lock in this scenario, then a second process also + trying to open a read-transaction on the database file might + detect the reserved lock in step 2 of this process, + conclude that there was no hot journal, and commence + reading data from the database file. +
    5. The xAccess() method is invoked again to detect if the journal + file is still in the file system. If it is, then it is a + hot-journal file and SQLite tries to roll it back (see section + rollback). +
    +

    Master journal file pointers? +

    + The following requirements describe step 1 of the above procedure in + more detail. +

    +When required to attempt to detect a hot-journal file, SQLite +shall first use the xAccess() method of the VFS layer to check if a +journal file exists in the file-system. +

    +If the call to xAccess() required by H35140 fails (due to an IO error or +similar), then SQLite shall abandon the attempt to open a read-only +transaction, relinquish the shared lock held on the database +file and return an error to the user. +

    +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file does +not exist, then SQLite shall conclude that there is no hot-journal +file in the file system and therefore that no hot journal +rollback is required. +

    + The following requirements describe step 2 of the above procedure in + more detail. +

    +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file +is present, then the xCheckReservedLock() method of the database file +file-handle is invoked to determine whether or not some other +process is holding a reserved or greater lock on the database +file. +

    +If the call to xCheckReservedLock() required by H35160 fails (due to an +IO or other internal VFS error), then SQLite shall abandon the attempt +to open a read-only transaction, relinquish the shared lock +held on the database file and return an error to the user. +

    +If the call to xCheckReservedLock() required by H35160 indicates that +some other database connection is holding a reserved +or greater lock on the database file, then SQLite shall conclude that +there is no hot journal file. In this case the attempt to detect +a hot journal file is concluded. +

    + The following requirements describe step 3 of the above procedure in + more detail. +

    +If while attempting to detect a hot-journal file the call to +xCheckReservedLock() indicates that no process holds a reserved +or greater lock on the database file, then SQLite shall open +a file handle on the potentially hot journal file using the VFS xOpen() +method. +

    +If the call to xOpen() required by H35440 fails (due to an IO or other +internal VFS error), then SQLite shall abandon the attempt to open a +read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. +

    +After successfully opening a file-handle on a potentially hot journal +file, SQLite shall query the file for its size in bytes using the +xFileSize() method of the open file handle. +

    +If the call to xFileSize() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file, close the file handle opened on the journal file and +return an error to the user. +

    +If the size of a potentially hot journal file is revealed to be zero +bytes by a query required by H35450, then SQLite shall close the +file handle opened on the journal file and delete the journal file using +a call to the VFS xDelete() method. In this case SQLite shall conclude +that there is no hot journal file. +

    +If the call to xDelete() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. +

    + The following requirements describe step 4 of the above procedure in + more detail. +

    +If the size of a potentially hot journal file is revealed to be greater +than zero bytes by a query required by H35450, then SQLite shall attempt +to upgrade the shared lock held by the database connection +on the database file directly to an exclusive lock. +

    +If an attempt to upgrade to an exclusive lock prescribed by +H35470 fails for any reason, then SQLite shall release all locks held by +the database connection and close the file handle opened on the +journal file. The attempt to open a read-only transaction +shall be deemed to have failed and an error returned to the user. +

    + Finally, the following requirements describe step 5 of the above + procedure in more detail. +

    +If, as part of the hot journal file detection process, the +attempt to upgrade to an exclusive lock mandated by H35470 is +successful, then SQLite shall query the file-system using the xAccess() +method of the VFS implementation to test whether or not the journal +file is still present in the file-system. +

    +If the call to xAccess() required by H35490 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the lock held on the +database file, close the file handle opened on the journal file and +return an error to the user. +

    +If the call to xAccess() required by H35490 reveals that the journal +file is no longer present in the file system, then SQLite shall abandon +the attempt to open a read-only transaction, relinquish the +lock held on the database file, close the file handle opened on the +journal file and return an SQLITE_BUSY error to the user. +

    +If the xAccess() query required by H35490 reveals that the journal +file is still present in the file system, then SQLite shall conclude +that the journal file is a hot journal file that needs to +be rolled back. SQLite shall immediately begin hot journal +rollback. +

    Cache Validation

    +

    + When a database connection opens a read transaction, the + page cache may already contain data associated with the + database connection. However, if another process has modified + the database file since the cached pages were loaded it is possible that + the cached data is invalid. +

    + SQLite determines whether or not the page cache entries belonging + to the database connection are valid or not using the file + change counter, a field in the database file header. The + file change counter is a 4-byte big-endian integer field stored + starting at byte offset 24 of the database file header. Before the + conclusion of a read/write transaction that modifies the contents + of the database file in any way (see section writing_data), + the value stored in the file change counter is incremented. When + a database connection unlocks the database file, it stores the + current value of the file change counter. Later, while opening a + new read-only transaction, SQLite checks the value of the file + change counter stored in the database file. If the value has not + changed since the database file was unlocked, then the page cache + entries can be trusted. If the value has changed, then the page + cache entries cannot be trusted and all entries associated with + the current database connection are discarded. +

    +When a file-handle open on a database file is unlocked, if the +page cache contains one or more entries belonging to the +associated database connection, SQLite shall store the value +of the file change counter internally. +

    +When required to perform cache validation as part of opening +a read transaction, SQLite shall read a 16 byte block +starting at byte offset 24 of the database file using the xRead() +method of the database connections file handle. +

    + Why a 16 byte block? Why not 4? (something to do with encrypted + databases). +

    +While performing cache validation, after loading the 16 byte +block as required by H35190, SQLite shall compare the 32-bit big-endian +integer stored in the first 4 bytes of the block to the most +recently stored value of the file change counter (see H35180). +If the values are not the same, then SQLite shall conclude that +the contents of the cache are invalid. +

    + Requirement H35050 (section open_read_only_trans) + specifies the action SQLite is required to take upon determining that + the cache contents are invalid. +

    Page 1 and the Expected Page Size

    +

    + As the last step in opening a read transaction on a database + file that is more than 0 bytes in size, SQLite is required to load + data for page 1 of the database into the page cache, if it is + not already there. This is slightly more complicated than it seems, + as the database page-size is no known at this point. +

    + Even though the database page-size cannot be known for sure, + SQLite is usually able to guess correctly by assuming it to be equal to + the connections expected page size. The expected page size + is the value of the page-size field read from the + database file header while opening the database connection + (see section open_new_connection), or the page-size + of the database file when the most read transaction was concluded. +

    +During the conclusion of a read transaction, before unlocking +the database file, SQLite shall set the connections +expected page size to the current database page-size. +

    +As part of opening a new read transaction, immediately after +performing cache validation, if there is no data for database +page 1 in the page cache, SQLite shall read N bytes from +the start of the database file using the xRead() method of the +connections file handle, where N is the connections current +expected page size value. +

    +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is not the same as the +connections current expected page size, then the +expected page size is set to this value, the database file is +unlocked and the entire procedure to open a read transaction +is repeated. +

    +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is the same as the +connections current expected page size, then the block of data +read is stored in the page cache as page 1. +

    Reading Database Data

    +

    + Add something about checking the page-cache first etc. +

    Ending a Read-only Transaction

    +

    + To end a read-only transaction, SQLite simply relinquishes the + shared lock on the file-handle open on the database file. No + other action is required. +

    +When required to end a read-only transaction, SQLite shall +relinquish the shared lock held on the database file by +calling the xUnlock() method of the file-handle. +

    + See also requirements H35180 and H35210 above. +

    Writing Data

    +

    + Using DDL or DML SQL statements, SQLite users may modify the contents and + size of a database file. Exactly how changes to the logical database are + translated to modifications to the database file is described in + ff_sqlitert_requirements. From the point of view of the + sub-systems described in this document, each DDL or DML statement executed + results in the contents of zero or more database file pages being + overwritten with new data. A DDL or DML statement may also append or + truncate one or more pages to or from the end of the database file. One + or more DDL and/or DML statements are grouped together to make up a + single write transaction. A write transaction is required + to have the special properties described in section overview; + a write transaction must be isolated, durable and atomic. +

    + SQLite accomplishes these goals using the following techniques: +

      +
    • + To ensure that write transactions are isolated, before + beginning to modify the contents of the database file to reflect the + results of a write transaction, SQLite obtains an exclusive + lock on the database file. The lock is not relinquished + until the write transaction is concluded. Because reading from + the database file requires a shared lock (see section + reading_data) and holding an exclusive + lock guarantees that no other database connection is holding + or can obtain a shared lock, this ensures that no other + connection may read data from the database file at a point when + a write transaction has been partially applied. +

    • Ensuring that write transactions are atomic is the most + complex task required of the system. In this case, atomic means + that even if a system failure occurs, an attempt to commit a write + transaction to the database file either results in all changes + that are a part of the transaction being successfully applied to the + database file, or none of the changes are successfully applied. There + is no chance that a subset of the changes only are applied. Hence from + the point of view of an external observer, the write transaction + appears to be an atomic event. +

      + Of course, it is usually not possible to atomically apply all the + changes required by a write transaction to a database file + within the file-system. For example, if a write transaction + requires ten pages of a database file to be modified, and a power + outage causes a system failure after sqlite has modified only five + pages, then the database file will almost certainly be in an + inconsistent state following system recovery. +

      + SQLite solves this problem by using a journal file. In almost + all cases, before the database file is modified in any way, + SQLite stores sufficient information in the journal file to + allow the original the database file to be reconstructed if a system + failure occurs while the database file is being updated to reflect + the modifications made by the write transaction. Each time + SQLite opens a database file, it checks if such a system failure has + occured and, if so, reconstructs the database file based on the contents + of the journal file. The procedure used to detect whether or not this + process, coined hot journal rollback, is required is described + in section hot_journal_detection. Hot journal rollback + itself is described in section hot_journal_rollback. +

      + The same technique ensures that an SQLite database file cannot be + corrupted by a system failure that occurs at an inopportune moment. + If a system failure does occur before SQLite has had a chance to + execute sufficient sync file operations to ensure that the + changes that make up a write transaction have made it safely + to persistent storage, then the journal file will be used + to restore the database to a known good state following system + recovery. +

    • + So that write transactions are durable in the face of + a system failure, SQLite executes a sync file operation on the + database file before concluding the write transaction +

    +

    + The page cache is used to buffer modifications to the database + file image before they are written to the database file. When + the contents of a page is required to be modified as the results of + an operation within a write transaction, the modified copy is + stored in the page cache. Similarly, if new pages are appended + to the end of a database file, they are added to the page cache + instead of being immediately written to the database file within the + file-system. +

    + Ideally, all changes for an entire write transaction are buffered in + the page cache until the end of the transaction. When the user commits + the transaction, all changes are applied to the database file in the + most efficient way possible, taking into account the assumptions + enumerated in section fs_performance. Unfortunately, since + main-memory is a limited resource, this is not always possible for + large transactions. In this case changes are buffered in the page + cache until some internal condition or limit is reached, + then written out to the database file in order to free resources + as they are required. Section page_cache_algorithms + describes the circumstances under which changes are flushed through + to the database file mid-transaction to free page cache resources. +

    + Even if an application or system failure does not occur while a + write transaction is in progress, a rollback operation to restore + the database file and page cache to the state that it was in before + the transaction started may be required. This may occur if the user + explicitly requests transaction rollback (by issuing a "ROLLBACK" command), + or automatically, as a result of encountering an SQL constraint (see + sql_sqlitert_requirements). For this reason, the original page + content is stored in the journal file before the page is even + modified within the page cache. +

    + Introduce the following sub-sections. +

    Journal File Format

    +

    + This section describes the format used by an SQLite journal file. +

    + A journal file consists of one or more journal headers, zero + or more journal records and optionally a master journal + pointer. Each journal file always begins with a + journal header, followed by zero or more journal records. + Following this may be a second journal header followed by a + second set of zero or more journal records and so on. There + is no limit to the number of journal headers a journal file + may contain. Following the journal headers and their accompanying + sets of journal records may be the optional master journal + pointer. Or, the file may simply end following the final journal + record. +

    + This section only describes the format of the journal file and the + various objects that make it up. But because a journal file may be + read by an SQLite process following recovery from a system failure + (hot journal rollback, see section + hot_journal_rollback) it is also important to describe + the way the file is created and populated within the file-system + using a combination of write file, sync file and + truncate file operations. These are described in section + write_transactions. +

    Journal Header Format

    +

    + A journal header is sector-size bytes in size, where + sector-size is the value returned by the xSectorSize method of + the file handle opened on the database file. Only the first 28 bytes + of the journal header are used, the remainder may contain garbage + data. The first 28 bytes of each journal header consists of an + eight byte block set to a well-known value, followed by five big-endian + 32-bit unsigned integer fields. +

    +

    Figure - Journal Header Format +

    +

    + Figure figure_journal_header graphically depicts the layout + of a journal header. The individual fields are described in + the following table. The offsets in the 'byte offset' column of the + table are relative to the start of the journal header. + +
    Byte offsetSize in bytesDescription +
    08The journal magic field always contains a + well-known 8-byte string value used to identify SQLite + journal files. The well-known sequence of byte values + is: +
    0xd9 0xd5 0x05 0xf9 0x20 0xa1 0x63 0xd7
    +
    84This field, the record count, is set to the + number of journal records that follow this + journal header in the journal file. +
    124The checksum initializer field is set to a + pseudo-random value. It is used as part of the + algorithm to calculate the checksum for all journal + records that follow this journal header. +
    164This field, the database page count, is set + to the number of pages that the database file + contained before any modifications associated with + write transaction are applied. +
    204This field, the sector size, is set to the + sector size of the device on which the + journal file was created, in bytes. This value + is required when reading the journal file to determine + the size of each journal header. +
    244The page size field contains the database page + size used by the corresponding database file + when the journal file was created, in bytes. +
    +

    + All journal headers are positioned in the file so that they + start at a sector size aligned offset. To achieve this, unused + space may be left between the start of the second and subsequent + journal headers and the end of the journal records + associated with the previous header. +

    Journal Record Format

    +

    + Each journal record contains the original data for a database page + modified by the write transaction. If a rollback is required, then + this data may be used to restore the contents of the database page to the + state it was in before the write transaction was started. +

    +

    Figure - Journal Record Format +

    +

    + A journal record, depicted graphically by figure + figure_journal_record, contains three fields, as described + in the following table. Byte offsets are relative to the start of the + journal record. + +
    Byte offsetSize in bytesDescription +
    04The page number of the database page associated with + this journal record, stored as a 4 byte + big-endian unsigned integer. +
    4page-size + This field contains the original data for the page, + exactly as it appeared in the database file before the + write transaction began. +
    4 + page-size4 + This field contains a checksum value, calculated based + on the contents of the journaled database page data + (the previous field) and the values stored in the + checksum initializer field of the preceding + journal header. +
    +

    + The set of journal records that follow a journal header + in a journal file are packed tightly together. There are no + alignment requirements for journal records as there are for + journal headers. +

    Master Journal Pointer

    +

    + To support atomic transactions that modify more than one + database file, SQLite sometimes includes a master journal pointer + record in a journal file. Multiple file transactions are + described in section multifile_transactions. A + master journal pointer contains the name of a master journal-file + along with a check-sum and some well known values that allow + the master journal pointer to be recognized as such when + the journal file is read during a rollback operation (section + rollback). +

    + As is the case for a journal header, the start of a master + journal pointer is always positioned at a sector size + aligned offset. If the journal record or journal header + that appears immediately before the master journal pointer does + not end at an aligned offset, then unused space is left between the + end of the journal record or journal header and the start + of the master journal pointer. +

    +

    Figure - Master Journal Pointer Format +

    +

    + A master journal pointer, depicted graphically by figure + figure_master_journal_ptr, contains five fields, as + described in the following table. Byte offsets are relative to the + start of the master journal pointer. + +
    Byte offsetSize in bytesDescription +
    04This field, the locking page number, is always + set to the page number of the database locking page + stored as a 4-byte big-endian integer. The locking page + is the page that begins at byte offset 230 of the + database file. Even if the database file is large enough to + contain the locking page, the locking page is + never used to store any data and so the first four bytes of of a + valid journal record will never contain this value. For + further description of the locking page, refer to + ff_sqlitert_requirements. +
    4name-length + The master journal name field contains the name of the + master journal file, encoded as a utf-8 string. There is no + nul-terminator appended to the string. +
    4 + name-length4 + The name-length field contains the length of the + previous field in bytes, formatted as a 4-byte big-endian + unsigned integer. +
    8 + name-length4 + The checksum field contains a checksum value stored as + a 4-byte big-endian signed integer. The checksum value is + calculated as the sum of the bytes that make up the + master journal name field, interpreting each byte as + an 8-bit signed integer. +
    12 + name-length8 + Finally, the journal magic field always contains a + well-known 8-byte string value; the same value stored in the + first 8 bytes of a journal header. The well-known + sequence of bytes is: +
    0xd9 0xd5 0x05 0xf9 0x20 0xa1 0x63 0xd7
    +
    +

    Write Transactions

    +

    + This section describes the progression of an SQLite write + transaction. From the point of view of the systems described in + this document, most write transactions consist of three steps: +

      +
    1. The write transaction is opened. This process is described + in section opening_a_write_transaction. +

    2. The end-user executes DML or DDL SQL statements that require the + structure of the database file of the database file to be modified. + These modifications may be any combination of operations to +

      • modify the content of an existing database page, +
      • append a new database page to the database file image, or +
      • truncate (discard) a database page from the end of the + database file. +
      + These operations are described in detail in section + modifying_appending_truncating. How user DDL or DML + SQL statements are mapped to combinations of these three operations + is described in ff_sqlitert_requirements. +
    3. The write transaction is concluded and the changes made + permanently committed to the database. The process required to + commit a transaction is described in section + committing_a_transaction. +

    +

    + As an alternative to step 3 above, the transaction may be rolled back. + Transaction rollback is described in section rollback. + Finally, it is also important to remember that a write transaction + may be interrupted by a system failure at any point. In this + case, the contents of the file-system (the database file and + journal file) must be left in such a state so as to enable + the database file to be restored to the state it was in before + the interrupted write transaction was started. This is known + as hot journal rollback, and is described in section + hot_journal_rollback. Section + fs_assumption_details describes the assumptions made + regarding the effects of a system failure on the file-system + contents following recovery. +

    Beginning a Write Transaction

    +

    + Before any database pages may be modified within the page cache, + the database connection must open a write transaction. + Opening a write transaction requires that the database + connection obtains a reserved lock (or greater) on the + database file. Because a obtaining a reserved lock on + a database file guarantees that no other database + connection may hold or obtain a reserved lock or greater, + it follows that no other database connection may have an + open write transaction. +

    + A reserved lock on the database file may be thought of + as an exclusive lock on the journal file. No + database connection may read from or write to a journal + file without a reserved or greater lock on the corresponding + database file. +

    + Before opening a write transaction, a database connection + must have an open read transaction, opened via the procedure + described in section open_read_only_trans. This ensures + that there is no hot-journal file that needs to be rolled back + and that any data stored in the page cache can be trusted. +

    + Once a read transaction has been opened, upgrading to a + write transaction is a two step process, as follows: +

      +
    1. A reserved lock is obtained on the database file. +
    2. The journal file is opened and created if necessary (using + the VFS xOpen method), and a journal file header written + to the start of it using a single call to the file handles xWrite + method. +
    +

    + Requirements describing step 1 of the above procedure in detail: +

    +When required to open a write transaction on the database, +SQLite shall first open a read transaction, if the database +connection in question has not already opened one. +

    +When required to open a write transaction on the database, after +ensuring a read transaction has already been opened, SQLite +shall obtain a reserved lock on the database file by calling +the xLock method of the file-handle open on the database file. +

    +If an attempt to acquire a reserved lock prescribed by +requirement H35360 fails, then SQLite shall deem the attempt to +open a write transaction to have failed and return an error +to the user. +

    + Requirements describing step 2 of the above procedure in detail: +

    +When required to open a write transaction on the database, after +obtaining a reserved lock on the database file, SQLite shall +open a read/write file-handle on the corresponding journal file. +

    +When required to open a write transaction on the database, after +opening a file-handle on the journal file, SQLite shall append +a journal header to the (currently empty) journal file. +

    Writing a Journal Header

    +

    + Requirements describing how a journal header is appended to + a journal file: +

    +When required to append a journal header to the journal +file, SQLite shall do so by writing a block of sector-size +bytes using a single call to the xWrite method of the file-handle +open on the journal file. The block of data written shall begin +at the smallest sector-size aligned offset at or following the current +end of the journal file. +

    +The first 8 bytes of the journal header required to be written +by H35680 shall contain the following values, in order from byte offset 0 +to 7: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63 and 0xd7. +

    +Bytes 8-11 of the journal header required to be written by +H35680 shall contain 0x00. +

    +Bytes 12-15 of the journal header required to be written by +H35680 shall contain the number of pages that the database file +contained when the current write-transaction was started, +formatted as a 4-byte big-endian unsigned integer. +

    +Bytes 16-19 of the journal header required to be written by +H35680 shall contain pseudo-randomly generated values. +

    +Bytes 20-23 of the journal header required to be written by +H35680 shall contain the sector size used by the VFS layer, +formatted as a 4-byte big-endian unsigned integer. +

    +Bytes 24-27 of the journal header required to be written by +H35680 shall contain the page size used by the database at +the start of the write transaction, formatted as a 4-byte +big-endian unsigned integer. +

    + Modifying, Adding or Truncating a Database Page +

    +

    + When the end-user executes a DML or DDL SQL statement to modify the + database schema or content, SQLite is required to update the database + file image to reflect the new database state. This involves modifying + the content of, appending or truncating one of more database file + pages. Instead of modifying the database file directly using the VFS + interface, changes are first buffered within the page cache. +

    + Before modifying a database page within the page cache that + may need to be restored by a rollback operation, the page must be + journalled. Journalling a page is the process of copying + that pages original data into the journal file so that it can be + recovered if the write transaction is rolled back. The process + of journalling a page is described in section + journalling_a_page. +

    +When required to modify the contents of an existing database page that +existed and was not a free-list leaf page when the write +transaction was opened, SQLite shall journal the page if it has not +already been journalled within the current write transaction. +

    +When required to modify the contents of an existing database page, +SQLite shall update the cached version of the database page content +stored as part of the page cache entry associated with the page. +

    + When a new database page is appended to a database file, there is + no requirement to add a record to the journal file. If a + rollback is required the database file will simply be truncated back + to its original size based on the value stored at byte offset 12 + of the journal file. +

    +When required to append a new database page to the database file, +SQLite shall create a new page cache entry corresponding to +the new page and insert it into the page cache. The dirty +flag of the new page cache entry shall be set. +

    + If required to truncate a database page from the end of the database + file, the associated page cache entry is discarded. The adjusted + size of the database file is stored internally. The database file + is not actually truncated until the current write transaction + is committed (see section committing_a_transaction). +

    +When required to truncate (remove) a database page that existed and was +not a free-list leaf page when the write transaction was +opened from the end of a database file, SQLite shall journal the page if +it has not already been journalled within the current write +transaction. +

    +When required to truncate a database page from the end of the database +file, SQLite shall discard the associated page cache entry +from the page cache. +

    Journalling a Database Page

    +

    + A page is journalled by adding a journal record to the + journal file. The format of a journal record is described + in section journal_record_format. +

    +When required to journal a database page, SQLite shall first +append the page number of the page being journalled to the +journal file, formatted as a 4-byte big-endian unsigned integer, +using a single call to the xWrite method of the file-handle opened +on the journal file. +

    +When required to journal a database page, if the attempt to +append the page number to the journal file is successful, +then the current page data (page-size bytes) shall be appended +to the journal file, using a single call to the xWrite method of the +file-handle opened on the journal file. +

    +When required to journal a database page, if the attempt to +append the current page data to the journal file is successful, +then SQLite shall append a 4-byte big-endian integer checksum value +to the to the journal file, using a single call to the xWrite method +of the file-handle opened on the journal file. +

    + The checksum value written to the journal file immediately after + the page data (requirement H35290), is a function of both the page + data and the checksum initializer field stored in the + journal header (see section journal_header_format). + Specifically, it is the sum of the checksum initializer and + the value of every 200th byte of page data interpreted as an 8-bit + unsigned integer, starting with the (page-size % 200)'th + byte of page data. For example, if the page-size is 1024 bytes, + then a checksum is calculated by adding the values of the bytes at + offsets 23, 223, 423, 623, 823 and 1023 (the last byte of the page) + together with the value of the checksum initializer. +

    +The checksum value written to the journal file by the write +required by H35290 shall be equal to the sum of the checksum +initializer field stored in the journal header (H35700) and +every 200th byte of the page data, beginning with the +(page-size % 200)th byte. +

    + The '%' character is used in requirement H35300 to represent the + modulo operator, just as it is in programming languages such as C, Java + and Javascript. +

    Syncing the Journal File

    +

    + Even after the original data of a database page has been written into + the journal file using calls to the journal file file-handle xWrite + method (section journalling_a_page), it is still not + safe to write to the page within the database file. This is because + in the event of a system failure the data written to the journal file + may still be corrupted (see section fs_characteristics). + Before the page can be updated within the database itself, the + following procedure takes place: +

      +
    1. The xSync method of the file-handle opened on the journal file + is called. This operation ensures that all journal records + in the journal file have been written to persistent storage, and + that they will not become corrupted as a result of a subsequent + system failure. +
    2. The journal record count field (see section + journal_header_format) of the most recently written + journal header in the journal file is updated to contain the + number of journal records added to the journal file since + the header was written. +
    3. The xSync method is called again, to ensure that the update to + the journal record count has been committed to persistent + storage. +
    +

    + If all three of the steps enumerated above are executed successfully, + then it is safe to modify the content of the journalled + database pages within the database file itself. The combination of + the three steps above is refered to as syncing the journal file. +

    +When required to sync the journal file, SQLite shall invoke the +xSync method of the file handle open on the journal file. +

    +When required to sync the journal file, after invoking the +xSync method as required by H35750, SQLite shall update the record +count of the journal header most recently written to the +journal file. The 4-byte field shall be updated to contain +the number of journal records that have been written to the +journal file since the journal header was written, +formatted as a 4-byte big-endian unsigned integer. +

    +When required to sync the journal file, after updating the +record count field of a journal header as required by +H35760, SQLite shall invoke the xSync method of the file handle open +on the journal file. +

    Upgrading to an Exclusive Lock

    +

    + Before the content of a page modified within the page cache may + be written to the database file, an exclusive lock must be held + on the database file. The purpose of this lock is to prevent another + connection from reading from the database file while the first + connection is midway through writing to it. Whether the reason for + writing to the database file is because a transaction is being committed, + or to free up space within the page cache, upgrading to an + exclusive lock always occurs immediately after + syncing the journal file. +

    +When required to upgrade to an exclusive lock as part of a write +transaction, SQLite shall first attempt to obtain a pending lock +on the database file if one is not already held by invoking the xLock +method of the file handle opened on the database file. +

    +When required to upgrade to an exclusive lock as part of a write +transaction, after successfully obtaining a pending lock SQLite +shall attempt to obtain an exclusive lock by invoking the +xLock method of the file handle opened on the database file. +

    + What happens if the exclusive lock cannot be obtained? It is not + possible for the attempt to upgrade from a reserved to a pending + lock to fail. +

    Committing a Transaction

    +

    + Committing a write transaction is the final step in updating the + database file. Committing a transaction is a seven step process, + summarized as follows: +

      +
    1. + The database file header change counter field is incremented. + The change counter, described in + ff_sqlitert_requirements, is used by the cache + validiation procedure described in section + cache_validation. +

    2. + The journal file is synced. The steps required to sync the + journal file are described in section + syncing_journal_file. +

    3. + Upgrade to an exclusive lock on the database file, if an + exclusive lock is not already held. Upgrading to an + exclusive lock is described in section + upgrading_to_exclusive_lock. +

    4. + Copy the contents of all dirty pages stored in the page + cache into the database file. The set of dirty pages are written + to the database file in page number order in order to improve + performance (see the assumptions in section fs_performance + for details). +

    5. + The database file is synced to ensure that all updates are stored + safely on the persistent media. +

    6. + The file-handle open on the journal file is closed and the + journal file itself deleted. At this point the write transaction + transaction has been irrevocably committed. +

    7. + The database file is unlocked. +

    +

    + Expand on and explain the above a bit. +

    + The following requirements describe the steps enumerated above in more + detail. +

    +When required to commit a write-transaction, SQLite shall +modify page 1 to increment the value stored in the change counter +field of the database file header. +

    + The change counter is a 4-byte big-endian integer field stored + at byte offset 24 of the database file. The modification to page 1 + required by H35800 is made using the process described in section + modifying_appending_truncating. If page 1 has not already + been journalled as a part of the current write-transaction, then + incrementing the change counter may require that page 1 be + journalled. In all cases the page cache entry corresponding to + page 1 becomes a dirty page as part of incrementing the change + counter value. +

    +When required to commit a write-transaction, after incrementing +the change counter field, SQLite shall sync the journal +file. +

    +When required to commit a write-transaction, after syncing +the journal file as required by H35810, if an exclusive lock +on the database file is not already held, SQLite shall attempt to +upgrade to an exclusive lock. +

    +When required to commit a write-transaction, after syncing +the journal file as required by H35810 and ensuring that an +exclusive lock is held on the database file as required by +H35830, SQLite shall copy the contents of all dirty page +stored in the page cache into the database file using +calls to the xWrite method of the database connection file +handle. Each call to xWrite shall write the contents of a single +dirty page (page-size bytes of data) to the database +file. Dirty pages shall be written in order of page number, +from lowest to highest. +

    +When required to commit a write-transaction, after copying the +contents of any dirty pages to the database file as required +by H35830, SQLite shall sync the database file by invoking the xSync +method of the database connection file handle. +

    +When required to commit a write-transaction, after syncing +the database file as required by H35840, SQLite shall close the +file-handle opened on the journal file and delete the +journal file from the file system via a call to the VFS +xDelete method. +

    +When required to commit a write-transaction, after deleting +the journal file as required by H35850, SQLite shall relinquish +all locks held on the database file by invoking the xUnlock +method of the database connection file handle. +

    + Is the shared lock held after committing a write transaction? +

    Purging a Dirty Page

    +

    + Usually, no data is actually written to the database file until the + user commits the active write transaction. The exception is + if a single write transaction contains too many modifications + to be stored in the page cache. In this case, some of the + database file modifications stored in the page cache must be + applied to the database file before the transaction is committed so + that the associated page cache entries can be purged from the + page cache to free memory. Exactly when this condition is reached and + dirty pages must be purged is described in section + page_cache_algorithms. +

    + Before the contents of the page cache entry can be written into + the database file, the page cache entry must meet the criteria + for a writable dirty page, as defined in section + page_cache_algorithms. If the dirty page selected by the + algorithms in section page_cache_algorithms for purging, + SQLite is required to sync the journal file. Immediately after + the journal file is synced, all dirty pages associated with the + database connection are classified as writable dirty pages. +

    +When required to purge a non-writable dirty page from the +page cache, SQLite shall sync the journal file before +proceding with the write operation required by H35670. +

    +After syncing the journal file as required by H35640, SQLite +shall append a new journal header to the journal file +before proceding with the write operation required by H35670. +

    + Appending a new journal header to the journal file is described + in section writing_journal_header. +

    + Once the dirty page being purged is writable, it is simply written + into the database file. +

    +When required to purge a page cache entry that is a +dirty page SQLite shall write the page data into the database +file, using a single call to the xWrite method of the database +connection file handle. +

    Multi-File Transactions

    +

    Statement Transactions

    +

    Rollback

    +

    Hot Journal Rollback

    +

    Transaction Rollback

    +

    Statement Rollback

    +

    References

    + +
    [1] + C API Requirements Document. +
    [2] + SQL Requirements Document. +
    [3] + File Format Requirements Document. +
    + +
    +This page last modified 2009/02/20 14:03:56 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/formatchng.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/formatchng.html --- sqlite3-3.4.2/www/formatchng.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/formatchng.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,366 @@ + + +File Format Changes in SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    File Format Changes in SQLite

    + +

    +Every effort is made to keep SQLite fully backwards compatible from +one release to the next. Rarely, however, some +enhancements or bug fixes may require a change to +the underlying file format. When this happens and you +must convert the contents of your +databases into a portable ASCII representation using the old version +of the library then reload the data using the new version of the +library. +

    + +

    +You can tell if you should reload your databases by comparing the +version numbers of the old and new libraries. If the first digit +of the version number is different, then a reload of the database will +be required. If the second digit changes, newer versions of SQLite +will be able to read and write older database files, but older versions +of the library may have difficulty reading or writing newer database +files. +For example, upgrading from +version 2.8.14 to 3.0.0 requires a reload. Going from +version 3.0.8 to 3.1.0 is backwards compatible but not necessarily +forwards compatible. +

    + +

    +The following table summarizes the SQLite file format changes that have +occurred since version 1.0.0: +

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version ChangeApprox. DateDescription Of File Format Change
    1.0.32 to 2.0.02001-Sep-20Version 1.0.X of SQLite used the GDBM library as its backend + interface to the disk. Beginning in version 2.0.0, GDBM was replaced + by a custom B-Tree library written especially for SQLite. The new + B-Tree backend is twice as fast as GDBM, supports atomic commits and + rollback, and stores an entire database in a single disk file instead + using a separate file for each table as GDBM does. The two + file formats are not even remotely similar.
    2.0.8 to 2.1.02001-Nov-12The same basic B-Tree format is used but the details of the + index keys were changed in order to provide better query + optimization opportunities. Some of the headers were also changed in order + to increase the maximum size of a row from 64KB to 24MB.

    + + This change is an exception to the version number rule described above + in that it is neither forwards or backwards compatible. A complete + reload of the database is required. This is the only exception.

    2.1.7 to 2.2.02001-Dec-21Beginning with version 2.2.0, SQLite no longer builds an index for + an INTEGER PRIMARY KEY column. Instead, it uses that column as the actual + B-Tree key for the main table.

    Version 2.2.0 and later of the library + will automatically detect when it is reading a 2.1.x database and will + disable the new INTEGER PRIMARY KEY feature. In other words, version + 2.2.x is backwards compatible to version 2.1.x. But version 2.1.x is not + forward compatible with version 2.2.x. If you try to open + a 2.2.x database with an older 2.1.x library and that database contains + an INTEGER PRIMARY KEY, you will likely get a coredump. If the database + schema does not contain any INTEGER PRIMARY KEYs, then the version 2.1.x + and version 2.2.x database files will be identical and completely + interchangeable.

    +
    2.2.5 to 2.3.02002-Jan-30Beginning with version 2.3.0, SQLite supports some additional syntax + (the "ON CONFLICT" clause) in the CREATE TABLE and CREATE INDEX statements + that are stored in the SQLITE_MASTER table. If you create a database that + contains this new syntax, then try to read that database using version 2.2.5 + or earlier, the parser will not understand the new syntax and you will get + an error. Otherwise, databases for 2.2.x and 2.3.x are interchangeable.
    2.3.3 to 2.4.02002-Mar-10Beginning with version 2.4.0, SQLite added support for views. + Information about views is stored in the SQLITE_MASTER table. If an older + version of SQLite attempts to read a database that contains VIEW information + in the SQLITE_MASTER table, the parser will not understand the new syntax + and initialization will fail. Also, the + way SQLite keeps track of unused disk blocks in the database file + changed slightly. + If an older version of SQLite attempts to write a database that + was previously written by version 2.4.0 or later, then it may leak disk + blocks.
    2.4.12 to 2.5.02002-Jun-17Beginning with version 2.5.0, SQLite added support for triggers. + Information about triggers is stored in the SQLITE_MASTER table. If an older + version of SQLite attempts to read a database that contains a CREATE TRIGGER + in the SQLITE_MASTER table, the parser will not understand the new syntax + and initialization will fail. +
    2.5.6 to 2.6.02002-July-17A design flaw in the layout of indices required a file format change + to correct. This change appeared in version 2.6.0.

    + + If you use version 2.6.0 or later of the library to open a database file + that was originally created by version 2.5.6 or earlier, an attempt to + rebuild the database into the new format will occur automatically. + This can take some time for a large database. (Allow 1 or 2 seconds + per megabyte of database under Unix - longer under Windows.) This format + conversion is irreversible. It is strongly suggested + that you make a backup copy of older database files prior to opening them + with version 2.6.0 or later of the library, in case there are errors in + the format conversion logic.

    + + Version 2.6.0 or later of the library cannot open read-only database + files from version 2.5.6 or earlier, since read-only files cannot be + upgraded to the new format.

    +
    2.6.3 to 2.7.02002-Aug-13

    Beginning with version 2.7.0, SQLite understands two different + datatypes: text and numeric. Text data sorts in memcmp() order. + Numeric data sorts in numerical order if it looks like a number, + or in memcmp() order if it does not.

    + +

    When SQLite version 2.7.0 or later opens a 2.6.3 or earlier database, + it assumes all columns of all tables have type "numeric". For 2.7.0 + and later databases, columns have type "text" if their datatype + string contains the substrings "char" or "clob" or "blob" or "text". + Otherwise they are of type "numeric".

    + +

    Because "text" columns have a different sort order from numeric, + indices on "text" columns occur in a different order for version + 2.7.0 and later database. Hence version 2.6.3 and earlier of SQLite + will be unable to read a 2.7.0 or later database. But version 2.7.0 + and later of SQLite will read earlier databases.

    +
    2.7.6 to 2.8.02003-Feb-14

    Version 2.8.0 introduces a change to the format of the rollback + journal file. The main database file format is unchanged. Versions + 2.7.6 and earlier can read and write 2.8.0 databases and vice versa. + Version 2.8.0 can rollback a transaction that was started by version + 2.7.6 and earlier. But version 2.7.6 and earlier cannot rollback a + transaction started by version 2.8.0 or later.

    + +

    The only time this would ever be an issue is when you have a program + using version 2.8.0 or later that crashes with an incomplete + transaction, then you try to examine the database using version 2.7.6 or + earlier. The 2.7.6 code will not be able to read the journal file + and thus will not be able to rollback the incomplete transaction + to restore the database.

    +
    2.8.14 to 3.0.02004-Jun-18

    Version 3.0.0 is a major upgrade for SQLite that incorporates + support for UTF-16, BLOBs, and a more compact encoding that results + in database files that are typically 25% to 50% smaller. The new file + format is very different and is completely incompatible with the + version 2 file format.

    +
    3.0.8 to 3.1.02005-Jan-21

    Version 3.1.0 adds support for + autovacuum mode. + Prior versions of SQLite will be able to read an autovacuumed + database but will not be able to write it. If autovaccum is disabled + (which is the default condition) + then databases are fully forwards and backwards compatible.

    +
    3.1.6 to 3.2.02005-Mar-19

    Version 3.2.0 adds support for the + ALTER TABLE ADD COLUMN + command. A database that has been modified by this command can + not be read by a version of SQLite prior to 3.1.4. Running + VACUUM + after the ALTER TABLE + restores the database to a format such that it can be read by earlier + SQLite versions.

    +
    3.2.8 to 3.3.02006-Jan-10

    Version 3.3.0 adds support for descending indices and + uses a new encoding for boolean values that requires + less disk space. Version 3.3.0 can read and write database + files created by prior versions of SQLite. But prior versions + of SQLite will not be able to read or write databases created + by Version 3.3.0

    +

    If you need backwards and forwards capatibility, you can + compile with -DSQLITE_DEFAULT_FILE_FORMAT=1. Or at runtime + you can say "PRAGMA legacy_file_format=ON" prior to creating + a new database file

    +

    Once a database file is created, its format is fixed. So + a database file created by SQLite 3.2.8 and merely modified + by version 3.3.0 or later will retain the old format. Except, + the VACUUM command recreates the database so running VACUUM + on 3.3.0 or later will change the file format to the latest + edition.

    +
    3.3.6 to 3.3.72006-Aug-12

    The previous file format change has caused so much + grief that the default behavior has been changed back to + the original file format. This means that DESC option on + indices is ignored by default that the more efficient encoding + of boolean values is not used. In that way, older versions + of SQLite can read and write databases created by newer + versions. If the new features are desired, they can be + enabled using pragma: "PRAGMA legacy_file_format=OFF".

    +

    To be clear: both old and new file formats continue to + be understood and continue to work. But the old file format + is used by default instead of the new. This might change + again in some future release - we may go back to generating + the new file format by default - but probably not until + all users have upgraded to a version of SQLite that will + understand the new file format. That might take several + years.

    3.4.2 to 3.5.02007-Sep-3

    The design of the OS interface layer was changed for + release 3.5.0. Applications that implemented a custom OS + interface will need to be modified in order to upgrade. + There are also some subtly different semantics a few obscure + APIs. An article is avilable which + describing the changes in detail.

    + +

    The on-disk file format is unchanged.

    +
    3.5.9 to 3.6.02008-July-16

    There are minor tweaks to the new OS interface layer that + was added in version 3.5.0. + Applications that implemented a custom OS + interface will need to be adjusted. + An article is avilable which + describing the changes in detail.

    + +

    The on-disk file format is unchanged.

    +
    +
    + +

    +To perform a database reload, have ready versions of the +sqlite command-line utility for both the old and new +version of SQLite. Call these two executables "sqlite-old" +and "sqlite-new". Suppose the name of your old database +is "old.db" and you want to create a new database with +the same information named "new.db". The command to do +this is as follows: +

    + +
    + sqlite-old old.db .dump | sqlite-new new.db +
    +
    +This page last modified 2008/06/26 13:09:13 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/formatchng.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/formatchng.tcl --- sqlite3-3.4.2/www/formatchng.tcl 2007-06-15 13:03:40.000000000 +0100 +++ sqlite3-3.6.16/www/formatchng.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,272 +0,0 @@ -# -# Run this Tcl script to generate the formatchng.html file. -# -set rcsid {$Id: formatchng.tcl,v 1.19 2006/08/12 14:38:47 drh Exp $ } -source common.tcl -header {File Format Changes in SQLite} -puts { -

    File Format Changes in SQLite

    - -

    -Every effort is made to keep SQLite fully backwards compatible from -one release to the next. Rarely, however, some -enhancements or bug fixes may require a change to -the underlying file format. When this happens and you -must convert the contents of your -databases into a portable ASCII representation using the old version -of the library then reload the data using the new version of the -library. -

    - -

    -You can tell if you should reload your databases by comparing the -version numbers of the old and new libraries. If the first digit -of the version number is different, then a reload of the database will -be required. If the second digit changes, newer versions of SQLite -will be able to read and write older database files, but older versions -of the library may have difficulty reading or writing newer database -files. -For example, upgrading from -version 2.8.14 to 3.0.0 requires a reload. Going from -version 3.0.8 to 3.1.0 is backwards compatible but not necessarily -forwards compatible. -

    - -

    -The following table summarizes the SQLite file format changes that have -occurred since version 1.0.0: -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Version ChangeApprox. DateDescription Of File Format Change
    1.0.32 to 2.0.02001-Sep-20Version 1.0.X of SQLite used the GDBM library as its backend - interface to the disk. Beginning in version 2.0.0, GDBM was replaced - by a custom B-Tree library written especially for SQLite. The new - B-Tree backend is twice as fast as GDBM, supports atomic commits and - rollback, and stores an entire database in a single disk file instead - using a separate file for each table as GDBM does. The two - file formats are not even remotely similar.
    2.0.8 to 2.1.02001-Nov-12The same basic B-Tree format is used but the details of the - index keys were changed in order to provide better query - optimization opportunities. Some of the headers were also changed in order - to increase the maximum size of a row from 64KB to 24MB.

    - - This change is an exception to the version number rule described above - in that it is neither forwards or backwards compatible. A complete - reload of the database is required. This is the only exception.

    2.1.7 to 2.2.02001-Dec-21Beginning with version 2.2.0, SQLite no longer builds an index for - an INTEGER PRIMARY KEY column. Instead, it uses that column as the actual - B-Tree key for the main table.

    Version 2.2.0 and later of the library - will automatically detect when it is reading a 2.1.x database and will - disable the new INTEGER PRIMARY KEY feature. In other words, version - 2.2.x is backwards compatible to version 2.1.x. But version 2.1.x is not - forward compatible with version 2.2.x. If you try to open - a 2.2.x database with an older 2.1.x library and that database contains - an INTEGER PRIMARY KEY, you will likely get a coredump. If the database - schema does not contain any INTEGER PRIMARY KEYs, then the version 2.1.x - and version 2.2.x database files will be identical and completely - interchangeable.

    -
    2.2.5 to 2.3.02002-Jan-30Beginning with version 2.3.0, SQLite supports some additional syntax - (the "ON CONFLICT" clause) in the CREATE TABLE and CREATE INDEX statements - that are stored in the SQLITE_MASTER table. If you create a database that - contains this new syntax, then try to read that database using version 2.2.5 - or earlier, the parser will not understand the new syntax and you will get - an error. Otherwise, databases for 2.2.x and 2.3.x are interchangeable.
    2.3.3 to 2.4.02002-Mar-10Beginning with version 2.4.0, SQLite added support for views. - Information about views is stored in the SQLITE_MASTER table. If an older - version of SQLite attempts to read a database that contains VIEW information - in the SQLITE_MASTER table, the parser will not understand the new syntax - and initialization will fail. Also, the - way SQLite keeps track of unused disk blocks in the database file - changed slightly. - If an older version of SQLite attempts to write a database that - was previously written by version 2.4.0 or later, then it may leak disk - blocks.
    2.4.12 to 2.5.02002-Jun-17Beginning with version 2.5.0, SQLite added support for triggers. - Information about triggers is stored in the SQLITE_MASTER table. If an older - version of SQLite attempts to read a database that contains a CREATE TRIGGER - in the SQLITE_MASTER table, the parser will not understand the new syntax - and initialization will fail. -
    2.5.6 to 2.6.02002-July-17A design flaw in the layout of indices required a file format change - to correct. This change appeared in version 2.6.0.

    - - If you use version 2.6.0 or later of the library to open a database file - that was originally created by version 2.5.6 or earlier, an attempt to - rebuild the database into the new format will occur automatically. - This can take some time for a large database. (Allow 1 or 2 seconds - per megabyte of database under Unix - longer under Windows.) This format - conversion is irreversible. It is strongly suggested - that you make a backup copy of older database files prior to opening them - with version 2.6.0 or later of the library, in case there are errors in - the format conversion logic.

    - - Version 2.6.0 or later of the library cannot open read-only database - files from version 2.5.6 or earlier, since read-only files cannot be - upgraded to the new format.

    -
    2.6.3 to 2.7.02002-Aug-13

    Beginning with version 2.7.0, SQLite understands two different - datatypes: text and numeric. Text data sorts in memcmp() order. - Numeric data sorts in numerical order if it looks like a number, - or in memcmp() order if it does not.

    - -

    When SQLite version 2.7.0 or later opens a 2.6.3 or earlier database, - it assumes all columns of all tables have type "numeric". For 2.7.0 - and later databases, columns have type "text" if their datatype - string contains the substrings "char" or "clob" or "blob" or "text". - Otherwise they are of type "numeric".

    - -

    Because "text" columns have a different sort order from numeric, - indices on "text" columns occur in a different order for version - 2.7.0 and later database. Hence version 2.6.3 and earlier of SQLite - will be unable to read a 2.7.0 or later database. But version 2.7.0 - and later of SQLite will read earlier databases.

    -
    2.7.6 to 2.8.02003-Feb-14

    Version 2.8.0 introduces a change to the format of the rollback - journal file. The main database file format is unchanged. Versions - 2.7.6 and earlier can read and write 2.8.0 databases and vice versa. - Version 2.8.0 can rollback a transaction that was started by version - 2.7.6 and earlier. But version 2.7.6 and earlier cannot rollback a - transaction started by version 2.8.0 or later.

    - -

    The only time this would ever be an issue is when you have a program - using version 2.8.0 or later that crashes with an incomplete - transaction, then you try to examine the database using version 2.7.6 or - earlier. The 2.7.6 code will not be able to read the journal file - and thus will not be able to rollback the incomplete transaction - to restore the database.

    -
    2.8.14 to 3.0.02004-Jun-18

    Version 3.0.0 is a major upgrade for SQLite that incorporates - support for UTF-16, BLOBs, and a more compact encoding that results - in database files that are typically 25% to 50% smaller. The new file - format is very different and is completely incompatible with the - version 2 file format.

    -
    3.0.8 to 3.1.02005-Jan-21

    Version 3.1.0 adds support for - autovacuum mode. - Prior versions of SQLite will be able to read an autovacuumed - database but will not be able to write it. If autovaccum is disabled - (which is the default condition) - then databases are fully forwards and backwards compatible.

    -
    3.1.6 to 3.2.02005-Mar-19

    Version 3.2.0 adds support for the - ALTER TABLE ADD COLUMN - command. A database that has been modified by this command can - not be read by a version of SQLite prior to 3.1.4. Running - VACUUM - after the ALTER TABLE - restores the database to a format such that it can be read by earlier - SQLite versions.

    -
    3.2.8 to 3.3.02006-Jan-10

    Version 3.3.0 adds support for descending indices and - uses a new encoding for boolean values that requires - less disk space. Version 3.3.0 can read and write database - files created by prior versions of SQLite. But prior versions - of SQLite will not be able to read or write databases created - by Version 3.3.0

    -

    If you need backwards and forwards capatibility, you can - compile with -DSQLITE_DEFAULT_FILE_FORMAT=1. Or at runtime - you can say "PRAGMA legacy_file_format=ON" prior to creating - a new database file

    -

    Once a database file is created, its format is fixed. So - a database file created by SQLite 3.2.8 and merely modified - by version 3.3.0 or later will retain the old format. Except, - the VACUUM command recreates the database so running VACUUM - on 3.3.0 or later will change the file format to the latest - edition.

    -
    3.3.6 to 3.3.72006-Aug-12

    The previous file format change has caused so much - grief that the default behavior has been changed back to - the original file format. This means that DESC option on - indices is ignored by default that the more efficient encoding - of boolean values is not used. In that way, older versions - of SQLite can read and write databases created by newer - versions. If the new features are desired, they can be - enabled using pragma: "PRAGMA legacy_file_format=OFF".

    -

    To be clear: both old and new file formats continue to - be understood and continue to work. But the old file format - is used by default instead of the new. This might change - again in some future release - we may go back to generating - the new file format by default - but probably not until - all users have upgraded to a version of SQLite that will - understand the new file format. That might take several - years.

    -
    - -

    -To perform a database reload, have ready versions of the -sqlite command-line utility for both the old and new -version of SQLite. Call these two executables "sqlite-old" -and "sqlite-new". Suppose the name of your old database -is "old.db" and you want to create a new database with -the same information named "new.db". The command to do -this is as follows: -

    - -
    - sqlite-old old.db .dump | sqlite-new new.db -
    -} -footer $rcsid Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/fullscanb.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/fullscanb.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/hlr10000.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/hlr10000.html --- sqlite3-3.4.2/www/hlr10000.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/hlr10000.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,2424 @@ + + +SQLite Application C-langauge Interface Requirements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Requirements for the SQLite C-language Application Interface +

    + +

    This document details requirements for the primary C-language interface +to SQLite.

    + + +

    H10011: +The SQLITE_VERSION #define in the sqlite3.h header file shall +evaluate to a string literal that is the SQLite version +with which the header file is associated. + + +

    H10014: +The SQLITE_VERSION_NUMBER #define shall resolve to an integer +with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z +are the major version, minor version, and release number. + + +

    H10021: +The sqlite3_libversion_number() interface shall return +an integer equal to SQLITE_VERSION_NUMBER. + + +

    H10022: +The sqlite3_version string constant shall contain +the text of the SQLITE_VERSION string. + + +

    H10023: +The sqlite3_libversion() function shall return +a pointer to the sqlite3_version string constant. + + +

    H10101: +The sqlite3_threadsafe() function shall return zero if +and only if SQLite was compiled with mutexing code omitted. + + +

    H10102: +The value returned by sqlite3_threadsafe() shall remain the same +across calls to sqlite3_config(). + + +

    H10201: +The sqlite_int64 and sqlite3_int64 types shall specify +a 64-bit signed integer. + + +

    H10202: +The sqlite_uint64 and sqlite3_uint64 types shall specify +a 64-bit unsigned integer. + + +

    H10331: +A successful invocation of sqlite3_enable_shared_cache(B) +will enable or disable shared cache mode for subsequently +created database connection in the same process. + + +

    H10336: +When shared cache is enabled, the sqlite3_create_module() +interface will always return an error. + + +

    H10337: +The sqlite3_enable_shared_cache(B) interface returns +SQLITE_OK if shared cache was enabled or disabled successfully. + + +

    H10339: +Shared cache is disabled by default. + + +

    H10511: +A successful evaluation of sqlite3_complete() or +sqlite3_complete16() functions shall +return a numeric 1 if and only if the input string contains +one or more non-whitespace tokens and the last non-whitespace +token in is a semicolon that is not in the middle of +a CREATE TRIGGER statement. + + +

    H10512: +If a memory allocation error occurs during an invocation +of sqlite3_complete() or sqlite3_complete16() then the +routine shall return SQLITE_NOMEM. + + +

    H10533: +The sqlite3_sleep(M) interface invokes the xSleep +method of the default VFS in order to +suspend execution of the current thread for at least +M milliseconds. + + +

    H10536: +The sqlite3_sleep(M) interface returns the number of +milliseconds of sleep actually requested of the operating +system, which might be larger than the parameter M. + + +

    H11203: +The sqlite3_vfs_find(N) interface returns a pointer to the +registered sqlite3_vfs object whose name exactly matches +the zero-terminated UTF-8 string N, or it returns NULL if +there is no match. + + +

    H11206: +If the N parameter to sqlite3_vfs_find(N) is NULL then +the function returns a pointer to the default sqlite3_vfs +object if there is one, or NULL if there is no default +sqlite3_vfs object. + + +

    H11209: +The sqlite3_vfs_register(P,F) interface registers the +well-formed sqlite3_vfs object P using the name given +by the zName field of the object P. + + +

    H11212: +Using the sqlite3_vfs_register(P,F) interface to register +the same sqlite3_vfs object multiple times is a harmless no-op. + + +

    H11215: +The sqlite3_vfs_register(P,F) interface makes the sqlite3_vfs +object P the default sqlite3_vfs object if F is non-zero. + + +

    H11218: +The sqlite3_vfs_unregister(P) interface unregisters the +sqlite3_vfs object P so that it is no longer returned by +subsequent calls to sqlite3_vfs_find(). + + +

    H11302: +The sqlite3_finalize(S) interface destroys the +prepared statement S and releases all +memory and file resources held by that object. + + +

    H11304: +If the most recent call to sqlite3_step(S) for the +prepared statement S returned an error, +then sqlite3_finalize(S) returns that same error. + + +

    H12011: +A successful call to sqlite3_close(C) shall destroy the +database connection object C. + + +

    H12012: +A successful call to sqlite3_close(C) shall return SQLITE_OK. + + +

    H12013: +A successful call to sqlite3_close(C) shall release all +memory and system resources associated with database connection C. + + +

    H12014: +A call to sqlite3_close(C) on a database connection C that +has one or more open prepared statements shall fail with +an SQLITE_BUSY error code. + + +

    H12015: +A call to sqlite3_close(C) where C is a NULL pointer shall +be a harmless no-op returning SQLITE_OK. + + +

    H12019: +When sqlite3_close(C) is invoked on a database connection C +that has a pending transaction, the transaction shall be +rolled back. + + +

    H12101: +A successful invocation of sqlite3_exec(D,S,C,A,E) +shall sequentially evaluate all of the UTF-8 encoded, +semicolon-separated SQL statements in the zero-terminated +string S within the context of the database connection D. + + +

    H12102: +If the S parameter to sqlite3_exec(D,S,C,A,E) is NULL then +the actions of the interface shall be the same as if the +S parameter were an empty string. + + +

    H12104: +The return value of sqlite3_exec() shall be SQLITE_OK if all +SQL statements run successfully and to completion. + + +

    H12105: +The return value of sqlite3_exec() shall be an appropriate +non-zero error code if any SQL statement fails. + + +

    H12107: +If one or more of the SQL statements handed to sqlite3_exec() +return results and the 3rd parameter is not NULL, then +the callback function specified by the 3rd parameter shall be +invoked once for each row of result. + + +

    H12110: +If the callback returns a non-zero value then sqlite3_exec() +shall abort the SQL statement it is currently evaluating, +skip all subsequent SQL statements, and return SQLITE_ABORT. + + +

    H12113: +The sqlite3_exec() routine shall pass its 4th parameter through +as the 1st parameter of the callback. + + +

    H12116: +The sqlite3_exec() routine shall set the 2nd parameter of its +callback to be the number of columns in the current row of +result. + + +

    H12119: +The sqlite3_exec() routine shall set the 3rd parameter of its +callback to be an array of pointers to strings holding the +values for each column in the current result set row as +obtained from sqlite3_column_text(). + + +

    H12122: +The sqlite3_exec() routine shall set the 4th parameter of its +callback to be an array of pointers to strings holding the +names of result columns as obtained from sqlite3_column_name(). + + +

    H12125: +If the 3rd parameter to sqlite3_exec() is NULL then +sqlite3_exec() shall silently discard query results. + + +

    H12131: +If an error occurs while parsing or evaluating any of the SQL +statements in the S parameter of sqlite3_exec(D,S,C,A,E) and if +the E parameter is not NULL, then sqlite3_exec() shall store +in *E an appropriate error message written into memory obtained +from sqlite3_malloc(). + + +

    H12134: +The sqlite3_exec(D,S,C,A,E) routine shall set the value of +*E to NULL if E is not NULL and there are no errors. + + +

    H12137: +The sqlite3_exec(D,S,C,A,E) function shall set the error code +and message accessible via sqlite3_errcode(), +sqlite3_extended_errcode(), +sqlite3_errmsg(), and sqlite3_errmsg16(). + + +

    H12138: +If the S parameter to sqlite3_exec(D,S,C,A,E) is NULL or an +empty string or contains nothing other than whitespace, comments, +and/or semicolons, then results of sqlite3_errcode(), +sqlite3_extended_errcode(), +sqlite3_errmsg(), and sqlite3_errmsg16() +shall reset to indicate no errors. + + +

    H12201: +Each new database connection shall have the +extended result codes feature disabled by default. + + +

    H12202: +The sqlite3_extended_result_codes(D,F) interface shall enable +extended result codes for the database connection D +if the F parameter is true, or disable them if F is false. + + +

    H12221: +The sqlite3_last_insert_rowid() function shall return the rowid +of the most recent successful INSERT performed on the same +database connection and within the same or higher level +trigger context, or zero if there have been no qualifying +INSERT statements. + + +

    H12223: +The sqlite3_last_insert_rowid() function shall return the +same value when called from the same trigger context +immediately before and after a ROLLBACK. + + +

    H12241: +The sqlite3_changes() function shall return the number of +row changes caused by the most recent INSERT, UPDATE, +or DELETE statement on the same database connection and +within the same or higher trigger context, or zero if there have +not been any qualifying row changes. + + +

    H12243: +Statements of the form "DELETE FROM tablename" with no +WHERE clause shall cause subsequent calls to +sqlite3_changes() to return zero, regardless of the +number of rows originally in the table. + + +

    H12261: +The sqlite3_total_changes() returns the total number +of row changes caused by INSERT, UPDATE, and/or DELETE +statements on the same database connection, in any +trigger context, since the database connection was created. + + +

    H12263: +Statements of the form "DELETE FROM tablename" with no +WHERE clause shall not change the value returned +by sqlite3_total_changes(). + + +

    H12271: +The sqlite3_interrupt() interface will force all running +SQL statements associated with the same database connection +to halt after processing at most one additional row of data. + + +

    H12272: +Any SQL statement that is interrupted by sqlite3_interrupt() +will return SQLITE_INTERRUPT. + + +

    H12281: +The callback function registered by sqlite3_trace() +shall be invoked +whenever an SQL statement first begins to execute and +whenever a trigger subprogram first begins to run. + + +

    H12282: +Each call to sqlite3_trace() shall override the previously +registered trace callback. + + +

    H12283: +A NULL trace callback shall disable tracing. + + +

    H12284: +The first argument to the trace callback shall be a copy of +the pointer which was the 3rd argument to sqlite3_trace(). + + +

    H12285: +The second argument to the trace callback is a +zero-terminated UTF-8 string containing the original text +of the SQL statement as it was passed into sqlite3_prepare_v2() +or the equivalent, or an SQL comment indicating the beginning +of a trigger subprogram. + + +

    H12287: +The callback function registered by sqlite3_profile() is invoked +as each SQL statement finishes. + + +

    H12288: +The first parameter to the profile callback is a copy of +the 3rd parameter to sqlite3_profile(). + + +

    H12289: +The second parameter to the profile callback is a +zero-terminated UTF-8 string that contains the complete text of +the SQL statement as it was processed by sqlite3_prepare_v2() +or the equivalent. + + +

    H12290: +The third parameter to the profile callback is an estimate +of the number of nanoseconds of wall-clock time required to +run the SQL statement from start to finish. + + +

    H12311: +The sqlite3_busy_handler(D,C,A) function shall replace +busy callback in the database connection D with a new +a new busy handler C and application data pointer A. + + +

    H12312: +Newly created database connections shall have a busy +handler of NULL. + + +

    H12314: +When two or more database connections share a +common cache, +the busy handler for the database connection currently using +the cache shall be invoked when the cache encounters a lock. + + +

    H12316: +If a busy handler callback returns zero, then the SQLite interface +that provoked the locking event shall return SQLITE_BUSY. + + +

    H12318: +SQLite shall invokes the busy handler with two arguments which +are a copy of the pointer supplied by the 3rd parameter to +sqlite3_busy_handler() and a count of the number of prior +invocations of the busy handler for the same locking event. + + +

    H12341: +The sqlite3_busy_timeout() function shall override any prior +sqlite3_busy_timeout() or sqlite3_busy_handler() setting +on the same database connection. + + +

    H12343: +If the 2nd parameter to sqlite3_busy_timeout() is less than +or equal to zero, then the busy handler shall be cleared so that +all subsequent locking events immediately return SQLITE_BUSY. + + +

    H12344: +If the 2nd parameter to sqlite3_busy_timeout() is a positive +number N, then a busy handler shall be set that repeatedly calls +the xSleep() method in the VFS interface until +either the lock clears or until the cumulative sleep time +reported back by xSleep() exceeds N milliseconds. + + +

    H12371: +If a sqlite3_get_table() fails a memory allocation, then +it shall free the result table under construction, abort the +query in process, skip any subsequent queries, set the +*pazResult output pointer to NULL and return SQLITE_NOMEM. + + +

    H12373: +If the pnColumn parameter to sqlite3_get_table() is not NULL +then a successful invocation of sqlite3_get_table() shall +write the number of columns in the +result set of the query into *pnColumn. + + +

    H12374: +If the pnRow parameter to sqlite3_get_table() is not NULL +then a successful invocation of sqlite3_get_table() shall +writes the number of rows in the +result set of the query into *pnRow. + + +

    H12376: +A successful invocation of sqlite3_get_table() that computes +N rows of result with C columns per row shall make *pazResult +point to an array of pointers to (N+1)*C strings where the first +C strings are column names as obtained from +sqlite3_column_name() and the rest are column result values +obtained from sqlite3_column_text(). + + +

    H12379: +The values in the pazResult array returned by sqlite3_get_table() +shall remain valid until cleared by sqlite3_free_table(). + + +

    H12382: +When an error other than SQLITE_NOMEM occurs during evaluation +of sqlite3_get_table() the function shall set *pazResult to NULL, +write an error message into memory obtained from sqlite3_malloc(), make +**pzErrmsg point to that error message, and return a +appropriate error code. + + +

    H12501: +The sqlite3_set_authorizer(D,...) interface registers a +authorizer callback with database connection D. + + +

    H12502: +The authorizer callback is invoked as SQL statements are +being parseed and compiled. + + +

    H12503: +If the authorizer callback returns any value other than +SQLITE_IGNORE, SQLITE_OK, or SQLITE_DENY, then +the application interface call that caused +the authorizer callback to run shall fail with an +SQLITE_ERROR error code and an appropriate error message. + + +

    H12504: +When the authorizer callback returns SQLITE_OK, the operation +described is processed normally. + + +

    H12505: +When the authorizer callback returns SQLITE_DENY, the +application interface call that caused the +authorizer callback to run shall fail +with an SQLITE_ERROR error code and an error message +explaining that access is denied. + + +

    H12506: +If the authorizer code (the 2nd parameter to the authorizer +callback) is SQLITE_READ and the authorizer callback returns +SQLITE_IGNORE, then the prepared statement is constructed to +insert a NULL value in place of the table column that would have +been read if SQLITE_OK had been returned. + + +

    H12507: +If the authorizer code (the 2nd parameter to the authorizer +callback) is anything other than SQLITE_READ, then +a return of SQLITE_IGNORE has the same effect as SQLITE_DENY. + + +

    H12510: +The first parameter to the authorizer callback is a copy of +the third parameter to the sqlite3_set_authorizer() interface. + + +

    H12511: +The second parameter to the callback is an integer +action code that specifies the particular action +to be authorized. + + +

    H12512: +The third through sixth parameters to the callback are +zero-terminated strings that contain +additional details about the action to be authorized. + + +

    H12520: +Each call to sqlite3_set_authorizer() overrides +any previously installed authorizer. + + +

    H12521: +A NULL authorizer means that no authorization +callback is invoked. + + +

    H12522: +The default authorizer is NULL. + + +

    H12551: +The second parameter to an +authorizer callback shall be an integer +authorizer code that specifies what action +is being authorized. + + +

    H12552: +The 3rd and 4th parameters to the +authorization callback +shall be parameters or NULL depending on which +authorizer code is used as the second parameter. + + +

    H12553: +The 5th parameter to the +authorizer callback shall be the name +of the database (example: "main", "temp", etc.) if applicable. + + +

    H12554: +The 6th parameter to the +authorizer callback shall be the name +of the inner-most trigger or view that is responsible for +the access attempt or NULL if this access attempt is directly from +top-level SQL code. + + +

    H12701: +The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces create a new +database connection associated with +the database file given in their first parameter. + + +

    H12702: +The filename argument is interpreted as UTF-8 +for sqlite3_open() and sqlite3_open_v2() and as UTF-16 +in the native byte order for sqlite3_open16(). + + +

    H12703: +A successful invocation of sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2() writes a pointer to a new +database connection into *ppDb. + + +

    H12704: +The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces return SQLITE_OK upon success, +or an appropriate error code on failure. + + +

    H12706: +The default text encoding for a new database created using +sqlite3_open() or sqlite3_open_v2() will be UTF-8. + + +

    H12707: +The default text encoding for a new database created using +sqlite3_open16() will be UTF-16. + + +

    H12709: +The sqlite3_open(F,D) interface is equivalent to +sqlite3_open_v2(F,D,G,0) where the G parameter is +SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE. + + +

    H12711: +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_READONLY then the database is opened +for reading only. + + +

    H12712: +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_READWRITE then the database is opened +reading and writing if possible, or for reading only if the +file is write protected by the operating system. + + +

    H12713: +If the G parameter to sqlite3_open_v2(F,D,G,V) omits the +bit value SQLITE_OPEN_CREATE and the database does not +previously exist, an error is returned. + + +

    H12714: +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_CREATE and the database does not +previously exist, then an attempt is made to create and +initialize the database. + + +

    H12717: +If the filename argument to sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2() is ":memory:", then an private, +ephemeral, in-memory database is created for the connection. +Is SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE required +in sqlite3_open_v2()? + + +

    H12719: +If the filename is NULL or an empty string, then a private, +ephemeral on-disk database will be created. +Is SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE required +in sqlite3_open_v2()? + + +

    H12721: +The database connection created by sqlite3_open_v2(F,D,G,V) +will use the sqlite3_vfs object identified by the V parameter, +or the default sqlite3_vfs object if V is a NULL pointer. + + +

    H12723: +Two database connections will share a common cache if both were +opened with the same VFS while shared cache mode was enabled and +if both filenames compare equal using memcmp() after having been +processed by the xFullPathname method of the VFS. + + +

    H12762: +A successful call to sqlite3_limit(D,C,V) where V is +positive changes the limit on the size of construct C in the +database connection D to the lesser of V and the hard upper +bound on the size of C that is set at compile-time. + + +

    H12766: +A successful call to sqlite3_limit(D,C,V) where V is negative +leaves the state of the database connection D unchanged. + + +

    H12769: +A successful call to sqlite3_limit(D,C,V) returns the +value of the limit on the size of construct C in the +database connection D as it was prior to the call. + + +

    H12801: +The sqlite3_errcode(D) interface returns the numeric +result code or extended result code for the most recently +failed interface call associated with the database connection D. + + +

    H12802: +The sqlite3_extended_errcode(D) interface returns the numeric +extended result code for the most recently +failed interface call associated with the database connection D. + + +

    H12803: +The sqlite3_errmsg(D) and sqlite3_errmsg16(D) +interfaces return English-language text that describes +the error in the mostly recently failed interface call, +encoded as either UTF-8 or UTF-16 respectively. + + +

    H12807: +The strings returned by sqlite3_errmsg() and sqlite3_errmsg16() +are valid until the next SQLite interface call. + + +

    H12808: +Calls to API routines that do not return an error code +(example: sqlite3_data_count()) do not +change the error code or message returned by +sqlite3_errcode(), sqlite3_extended_errcode(), +sqlite3_errmsg(), or sqlite3_errmsg16(). + + +

    H12809: +Interfaces that are not associated with a specific +database connection (examples: +sqlite3_mprintf() or sqlite3_enable_shared_cache() +do not change the values returned by +sqlite3_errcode(), sqlite3_extended_errcode(), +sqlite3_errmsg(), or sqlite3_errmsg16(). + + +

    H12911: +The callback function registered by sqlite3_progress_handler() +is invoked periodically during long running calls to +sqlite3_step(). + + +

    H12912: +The progress callback is invoked once for every N virtual +machine opcodes, where N is the second argument to +the sqlite3_progress_handler() call that registered +the callback. If N is less than 1, sqlite3_progress_handler() +acts as if a NULL progress handler had been specified. + + +

    H12913: +The progress callback itself is identified by the third +argument to sqlite3_progress_handler(). + + +

    H12914: +The fourth argument to sqlite3_progress_handler() is a +void pointer passed to the progress callback +function each time it is invoked. + + +

    H12915: +If a call to sqlite3_step() results in fewer than N opcodes +being executed, then the progress callback is never invoked. + + +

    H12916: +Every call to sqlite3_progress_handler() +overwrites any previously registered progress handler. + + +

    H12917: +If the progress handler callback is NULL then no progress +handler is invoked. + + +

    H12918: +If the progress callback returns a result other than 0, then +the behavior is a if sqlite3_interrupt() had been called. + + +

    H12931: +The sqlite3_get_autocommit(D) interface returns non-zero or +zero if the database connection D is or is not in autocommit +mode, respectively. + + +

    H12932: +Autocommit mode is on by default. + + +

    H12933: +Autocommit mode is disabled by a successful BEGIN statement. + + +

    H12934: +Autocommit mode is enabled by a successful COMMIT or ROLLBACK +statement. + + +

    H12951: +The sqlite3_commit_hook(D,F,P) interface registers the +callback function F to be invoked with argument P whenever +a transaction commits on the database connection D. + + +

    H12952: +The sqlite3_commit_hook(D,F,P) interface returns the P argument +from the previous call with the same database connection D, +or NULL on the first call for a particular database connection D. + + +

    H12953: +Each call to sqlite3_commit_hook() overwrites the callback +registered by prior calls. + + +

    H12954: +If the F argument to sqlite3_commit_hook(D,F,P) is NULL +then the commit hook callback is canceled and no callback +is invoked when a transaction commits. + + +

    H12955: +If the commit callback returns non-zero then the commit is +converted into a rollback. + + +

    H12961: +The sqlite3_rollback_hook(D,F,P) interface registers the +callback function F to be invoked with argument P whenever +a transaction rolls back on the database connection D. + + +

    H12962: +The sqlite3_rollback_hook(D,F,P) interface returns the P +argument from the previous call with the same +database connection D, or NULL on the first call +for a particular database connection D. + + +

    H12963: +Each call to sqlite3_rollback_hook() overwrites the callback +registered by prior calls. + + +

    H12964: +If the F argument to sqlite3_rollback_hook(D,F,P) is NULL +then the rollback hook callback is canceled and no callback +is invoked when a transaction rolls back. + + +

    H12971: +The sqlite3_update_hook(D,F,P) interface causes the callback +function F to be invoked with first parameter P whenever +a table row is modified, inserted, or deleted on +the database connection D. + + +

    H12973: +The sqlite3_update_hook(D,F,P) interface returns the value +of P for the previous call on the same database connection D, +or NULL for the first call. + + +

    H12975: +If the update hook callback F in sqlite3_update_hook(D,F,P) +is NULL then the no update callbacks are made. + + +

    H12977: +Each call to sqlite3_update_hook(D,F,P) overrides prior calls +to the same interface on the same database connection D. + + +

    H12979: +The update hook callback is not invoked when internal system +tables such as sqlite_master and sqlite_sequence are modified. + + +

    H12981: +The second parameter to the update callback +is one of SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE, +depending on the operation that caused the callback to be invoked. + + +

    H12983: +The third and fourth arguments to the callback contain pointers +to zero-terminated UTF-8 strings which are the names of the +database and table that is being updated. + + +

    H12986: +The final callback parameter is the rowid of the row after +the change occurs. + + +

    H13011: +The sqlite3_prepare(db,zSql,...) and +sqlite3_prepare_v2(db,zSql,...) interfaces interpret the +text in their zSql parameter as UTF-8. + + +

    H13012: +The sqlite3_prepare16(db,zSql,...) and +sqlite3_prepare16_v2(db,zSql,...) interfaces interpret the +text in their zSql parameter as UTF-16 in the native byte order. + + +

    H13013: +If the nByte argument to sqlite3_prepare_v2(db,zSql,nByte,...) +and its variants is less than zero, the SQL text is +read from zSql is read up to the first zero terminator. + + +

    H13014: +If the nByte argument to sqlite3_prepare_v2(db,zSql,nByte,...) +and its variants is non-negative, then at most nBytes bytes of +SQL text is read from zSql. + + +

    H13015: +In sqlite3_prepare_v2(db,zSql,N,P,pzTail) and its variants +if the zSql input text contains more than one SQL statement +and pzTail is not NULL, then *pzTail is made to point to the +first byte past the end of the first SQL statement in zSql. +What does *pzTail point to if there is one statement? + + +

    H13016: +A successful call to sqlite3_prepare_v2(db,zSql,N,ppStmt,...) +or one of its variants writes into *ppStmt a pointer to a new +prepared statement or a pointer to NULL if zSql contains +nothing other than whitespace or comments. + + +

    H13019: +The sqlite3_prepare_v2() interface and its variants return +SQLITE_OK or an appropriate error code upon failure. + + +

    H13021: +Before sqlite3_prepare(db,zSql,nByte,ppStmt,pzTail) or its +variants returns an error (any value other than SQLITE_OK), +they first set *ppStmt to NULL. + + +

    H13101: +If the prepared statement passed as the argument to +sqlite3_sql() was compiled using either sqlite3_prepare_v2() or +sqlite3_prepare16_v2(), then sqlite3_sql() returns +a pointer to a zero-terminated string containing a UTF-8 rendering +of the original SQL statement. + + +

    H13102: +If the prepared statement passed as the argument to +sqlite3_sql() was compiled using either sqlite3_prepare() or +sqlite3_prepare16(), then sqlite3_sql() returns a NULL pointer. + + +

    H13103: +The string returned by sqlite3_sql(S) is valid until the +prepared statement S is deleted using sqlite3_finalize(S). + + +

    H13123: +The sqlite3_db_handle(S) interface returns a pointer +to the database connection associated with the +prepared statement S. + + +

    H13143: +If D is a database connection that holds one or more +unfinalized prepared statements and S is a NULL pointer, +then sqlite3_next_stmt(D, S) routine shall return a pointer +to one of the prepared statements associated with D. + + +

    H13146: +If D is a database connection that holds no unfinalized +prepared statements and S is a NULL pointer, then +sqlite3_next_stmt(D, S) routine shall return a NULL pointer. + + +

    H13149: +If S is a prepared statement in the database connection D +and S is not the last prepared statement in D, then +sqlite3_next_stmt(D, S) routine shall return a pointer +to the next prepared statement in D after S. + + +

    H13152: +If S is the last prepared statement in the +database connection D then the sqlite3_next_stmt(D, S) +routine shall return a NULL pointer. + + +

    H13202: +If the prepared statement S is ready to be run, then +sqlite3_step(S) advances that prepared statement until +completion or until it is ready to return another row of the +result set, or until an interrupt +or a run-time error occurs. + + +

    H13506: +The SQL statement compiler recognizes tokens of the forms +"?", "?NNN", "$VVV", ":VVV", and "@VVV" as SQL parameters, +where NNN is any sequence of one or more digits +and where VVV is any sequence of one or more alphanumeric +characters or "::" optionally followed by a string containing +no spaces and contained within parentheses. + + +

    H13509: +The initial value of an SQL parameter is NULL. + + +

    H13512: +The index of an "?" SQL parameter is one larger than the +largest index of SQL parameter to the left, or 1 if +the "?" is the leftmost SQL parameter. + + +

    H13515: +The index of an "?NNN" SQL parameter is the integer NNN. + + +

    H13518: +The index of an ":VVV", "$VVV", or "@VVV" SQL parameter is +the same as the index of leftmost occurrences of the same +parameter, or one more than the largest index over all +parameters to the left if this is the first occurrence +of this parameter, or 1 if this is the leftmost parameter. + + +

    H13521: +The SQL statement compiler fails with an SQLITE_RANGE +error if the index of an SQL parameter is less than 1 +or greater than the compile-time SQLITE_MAX_VARIABLE_NUMBER +parameter. + + +

    H13524: +Calls to sqlite3_bind(S,N,V,...) +associate the value V with all SQL parameters having an +index of N in the prepared statement S. + + +

    H13527: +Calls to sqlite3_bind(S,N,...) +override prior calls with the same values of S and N. + + +

    H13530: +Bindings established by sqlite3_bind(S,...) +persist across calls to sqlite3_reset(S). + + +

    H13533: +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) SQLite binds the first L +bytes of the BLOB or string pointed to by V, when L +is non-negative. + + +

    H13536: +In calls to sqlite3_bind_text(S,N,V,L,D) or +sqlite3_bind_text16(S,N,V,L,D) SQLite binds characters +from V through the first zero character when L is negative. + + +

    H13539: +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is the special +constant SQLITE_STATIC, SQLite assumes that the value V +is held in static unmanaged space that will not change +during the lifetime of the binding. + + +

    H13542: +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is the special +constant SQLITE_TRANSIENT, the routine makes a +private copy of the value V before it returns. + + +

    H13545: +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is a pointer to +a function, SQLite invokes that function to destroy the +value V after it has finished using the value V. + + +

    H13548: +In calls to sqlite3_bind_zeroblob(S,N,V,L) the value bound +is a BLOB of L bytes, or a zero-length BLOB if L is negative. + + +

    H13551: +In calls to sqlite3_bind_value(S,N,V) the V argument may +be either a protected sqlite3_value object or an +unprotected sqlite3_value object. + + +

    H13601: +The sqlite3_bind_parameter_count(S) interface returns +the largest index of all SQL parameters in the +prepared statement S, or 0 if S contains no SQL parameters. + + +

    H13621: +The sqlite3_bind_parameter_name(S,N) interface returns +a UTF-8 rendering of the name of the SQL parameter in +the prepared statement S having index N, or +NULL if there is no SQL parameter with index N or if the +parameter with index N is an anonymous parameter "?". + + +

    H13641: +The sqlite3_bind_parameter_index(S,N) interface returns +the index of SQL parameter in the prepared statement +S whose name matches the UTF-8 string N, or 0 if there is +no match. + + +

    H13661: +The sqlite3_clear_bindings(S) interface resets all SQL +parameter bindings in the prepared statement S back to NULL. + + +

    H13711: +The sqlite3_column_count(S) interface returns the number of +columns in the result set generated by the prepared statement S, +or 0 if S does not generate a result set. + + +

    H13721: +A successful invocation of the sqlite3_column_name(S,N) +interface returns the name of the Nth column (where 0 is +the leftmost column) for the result set of the +prepared statement S as a zero-terminated UTF-8 string. + + +

    H13723: +A successful invocation of the sqlite3_column_name16(S,N) +interface returns the name of the Nth column (where 0 is +the leftmost column) for the result set of the +prepared statement S as a zero-terminated UTF-16 string +in the native byte order. + + +

    H13724: +The sqlite3_column_name() and sqlite3_column_name16() +interfaces return a NULL pointer if they are unable to +allocate memory to hold their normal return strings. + + +

    H13725: +If the N parameter to sqlite3_column_name(S,N) or +sqlite3_column_name16(S,N) is out of range, then the +interfaces return a NULL pointer. + + +

    H13726: +The strings returned by sqlite3_column_name(S,N) and +sqlite3_column_name16(S,N) are valid until the next +call to either routine with the same S and N parameters +or until sqlite3_finalize(S) is called. + + +

    H13727: +When a result column of a SELECT statement contains +an AS clause, the name of that column is the identifier +to the right of the AS keyword. + + +

    H13741: +The sqlite3_column_database_name(S,N) interface returns either +the UTF-8 zero-terminated name of the database from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. + + +

    H13742: +The sqlite3_column_database_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the database +from which the Nth result column of the prepared statement S is +extracted, or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. + + +

    H13743: +The sqlite3_column_table_name(S,N) interface returns either +the UTF-8 zero-terminated name of the table from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. + + +

    H13744: +The sqlite3_column_table_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the table +from which the Nth result column of the prepared statement S is +extracted, or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. + + +

    H13745: +The sqlite3_column_origin_name(S,N) interface returns either +the UTF-8 zero-terminated name of the table column from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. + + +

    H13746: +The sqlite3_column_origin_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the table +column from which the Nth result column of the +prepared statement S is extracted, or NULL if the Nth column +of S is a general expression or if unable to allocate memory +to store the name. + + +

    H13748: +The return values from +column metadata interfaces +are valid for the lifetime of the prepared statement +or until the encoding is changed by another metadata +interface call for the same prepared statement and column. + + +

    H13761: +A successful call to sqlite3_column_decltype(S,N) returns a +zero-terminated UTF-8 string containing the declared datatype +of the table column that appears as the Nth column (numbered +from 0) of the result set to the prepared statement S. + + +

    H13762: +A successful call to sqlite3_column_decltype16(S,N) +returns a zero-terminated UTF-16 native byte order string +containing the declared datatype of the table column that appears +as the Nth column (numbered from 0) of the result set to the +prepared statement S. + + +

    H13763: +If N is less than 0 or N is greater than or equal to +the number of columns in the prepared statement S, +or if the Nth column of S is an expression or subquery rather +than a table column, or if a memory allocation failure +occurs during encoding conversions, then +calls to sqlite3_column_decltype(S,N) or +sqlite3_column_decltype16(S,N) return NULL. + + +

    H13771: +After a call to sqlite3_step(S) that returns SQLITE_ROW, +the sqlite3_data_count(S) routine will return the same value +as the sqlite3_column_count(S) function. + + +

    H13772: +After sqlite3_step(S) has returned any value other than +SQLITE_ROW or before sqlite3_step(S) has been called on the +prepared statement for the first time since it was +prepared or reset, +the sqlite3_data_count(S) routine returns zero. + + +

    H13803: +The sqlite3_column_blob(S,N) interface converts the +Nth column in the current row of the result set for +the prepared statement S into a BLOB and then returns a +pointer to the converted value. + + +

    H13806: +The sqlite3_column_bytes(S,N) interface returns the +number of bytes in the BLOB or string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_column_blob(S,N) or +sqlite3_column_text(S,N). + + +

    H13809: +The sqlite3_column_bytes16(S,N) interface returns the +number of bytes in the string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_column_text16(S,N). + + +

    H13812: +The sqlite3_column_double(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a floating point value and +returns a copy of that value. + + +

    H13815: +The sqlite3_column_int(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a 64-bit signed integer and +returns the lower 32 bits of that integer. + + +

    H13818: +The sqlite3_column_int64(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a 64-bit signed integer and +returns a copy of that integer. + + +

    H13821: +The sqlite3_column_text(S,N) interface converts the +Nth column in the current row of the result set for +the prepared statement S into a zero-terminated UTF-8 +string and returns a pointer to that string. + + +

    H13824: +The sqlite3_column_text16(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a zero-terminated 2-byte +aligned UTF-16 native byte order string and returns +a pointer to that string. + + +

    H13827: +The sqlite3_column_type(S,N) interface returns +one of SQLITE_NULL, SQLITE_INTEGER, SQLITE_FLOAT, +SQLITE_TEXT, or SQLITE_BLOB as appropriate for +the Nth column in the current row of the result set for +the prepared statement S. + + +

    H13830: +The sqlite3_column_value(S,N) interface returns a +pointer to an unprotected sqlite3_value object for the +Nth column in the current row of the result set for +the prepared statement S. + + +

    H14103: +A successful invocation of sqlite3_config() shall return +SQLITE_OK. + + +

    H14106: +The sqlite3_config() interface shall return SQLITE_MISUSE +if it is invoked in between calls to sqlite3_initialize() and +sqlite3_shutdown(). + + +

    H14120: +A successful call to sqlite3_config(SQLITE_CONFIG_SINGLETHREAD) +shall set the default threading mode to Single-thread. + + +

    H14123: +A successful call to sqlite3_config(SQLITE_CONFIG_MULTITHREAD) +shall set the default threading mode to Multi-thread. + + +

    H14126: +A successful call to sqlite3_config(SQLITE_CONFIG_SERIALIZED) +shall set the default threading mode to Serialized. + + +

    H14129: +A successful call to sqlite3_config(SQLITE_CONFIG_MUTEX,X) +where X is a pointer to an initialized sqlite3_mutex_methods +object shall cause all subsequent mutex operations performed +by SQLite to use the mutex methods that were present in X +during the call to sqlite3_config(). + + +

    H14132: +A successful call to sqlite3_config(SQLITE_CONFIG_GETMUTEX,X) +where X is a pointer to an sqlite3_mutex_methods object +shall overwrite the content of sqlite3_mutex_methods object +with the mutex methods currently in use by SQLite. + + +

    H14135: +A successful call to sqlite3_config(SQLITE_CONFIG_MALLOC,M) +where M is a pointer to an initialized sqlite3_mem_methods +object shall cause all subsequent memory allocation operations +performed by SQLite to use the methods that were present in +M during the call to sqlite3_config(). + + +

    H14138: +A successful call to sqlite3_config(SQLITE_CONFIG_GETMALLOC,M) +where M is a pointer to an sqlite3_mem_methods object shall +overwrite the content of sqlite3_mem_methods object with +the memory allocation methods currently in use by +SQLite. + + +

    H14141: +A successful call to sqlite3_config(SQLITE_CONFIG_MEMSTATUS,1) +shall enable the memory allocation status collection logic. + + +

    H14144: +A successful call to sqlite3_config(SQLITE_CONFIG_MEMSTATUS,0) +shall disable the memory allocation status collection logic. + + +

    H14147: +The memory allocation status collection logic shall be +enabled by default. + + +

    H14150: +A successful call to sqlite3_config(SQLITE_CONFIG_SCRATCH,S,Z,N) +where Z and N are non-negative integers and +S is a pointer to an aligned memory buffer not less than +Z*N bytes in size shall cause S to be used by the +scratch memory allocator for as many as N simulataneous +allocations each of size Z. + + +

    H14153: +A successful call to sqlite3_config(SQLITE_CONFIG_SCRATCH,S,Z,N) +where S is a NULL pointer shall disable the +scratch memory allocator. + + +

    H14156: +A successful call to +sqlite3_config(SQLITE_CONFIG_PAGECACHE,S,Z,N) +where Z and N are non-negative integers and +S is a pointer to an aligned memory buffer not less than +Z*N bytes in size shall cause S to be used by the +pagecache memory allocator for as many as N simulataneous +allocations each of size Z. + + +

    H14159: +A successful call to +sqlite3_config(SQLITE_CONFIG_PAGECACHE,S,Z,N) +where S is a NULL pointer shall disable the +pagecache memory allocator. + + +

    H14162: +A successful call to sqlite3_config(SQLITE_CONFIG_HEAP,H,Z,N) +where Z and N are non-negative integers and +H is a pointer to an aligned memory buffer not less than +Z bytes in size shall enable the memsys5 memory allocator +and cause it to use buffer S as its memory source and to use +a minimum allocation size of N. + + +

    H14165: +A successful call to sqlite3_config(SQLITE_CONFIG_HEAP,H,Z,N) +where H is a NULL pointer shall disable the +memsys5 memory allocator. + + +

    H14168: +A successful call to sqlite3_config(SQLITE_CONFIG_LOOKASIDE,Z,N) +shall cause the default lookaside memory allocator configuration +for new database connections to be N slots of Z bytes each. + + +

    H14203: +A call to sqlite3_db_config(D,V,...) shall return SQLITE_OK +if and only if the call is successful. + + +

    H14206: +If one or more slots of the lookaside memory allocator for +database connection D are in use, then a call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,...) shall +fail with an SQLITE_BUSY return code. + + +

    H14209: +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are positive +integers and B is an aligned buffer at least Z*N bytes in size +shall cause the lookaside memory allocator for D to use buffer B +with N slots of Z bytes each. + + +

    H14212: +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are positive +integers and B is NULL pointer shall cause the +lookaside memory allocator for D to a obtain Z*N byte buffer +from the primary memory allocator and use that buffer +with N lookaside slots of Z bytes each. + + +

    H14215: +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are zero shall +disable the lookaside memory allocator for D. + + +

    H15103: +The sqlite3_value_blob(V) interface converts the +protected sqlite3_value object V into a BLOB and then +returns a pointer to the converted value. + + +

    H15106: +The sqlite3_value_bytes(V) interface returns the +number of bytes in the BLOB or string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_value_blob(V) or +sqlite3_value_text(V). + + +

    H15109: +The sqlite3_value_bytes16(V) interface returns the +number of bytes in the string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_value_text16(V), +sqlite3_value_text16be(V), or sqlite3_value_text16le(V). + + +

    H15112: +The sqlite3_value_double(V) interface converts the +protected sqlite3_value object V into a floating point value and +returns a copy of that value. + + +

    H15115: +The sqlite3_value_int(V) interface converts the +protected sqlite3_value object V into a 64-bit signed integer and +returns the lower 32 bits of that integer. + + +

    H15118: +The sqlite3_value_int64(V) interface converts the +protected sqlite3_value object V into a 64-bit signed integer and +returns a copy of that integer. + + +

    H15121: +The sqlite3_value_text(V) interface converts the +protected sqlite3_value object V into a zero-terminated UTF-8 +string and returns a pointer to that string. + + +

    H15124: +The sqlite3_value_text16(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 native byte order +string and returns a pointer to that string. + + +

    H15127: +The sqlite3_value_text16be(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 big-endian +string and returns a pointer to that string. + + +

    H15130: +The sqlite3_value_text16le(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 little-endian +string and returns a pointer to that string. + + +

    H15133: +The sqlite3_value_type(V) interface returns +one of SQLITE_NULL, SQLITE_INTEGER, SQLITE_FLOAT, +SQLITE_TEXT, or SQLITE_BLOB as appropriate for +the sqlite3_value object V. + + +

    H15136: +The sqlite3_value_numeric_type(V) interface converts +the protected sqlite3_value object V into either an integer or +a floating point value if it can do so without loss of +information, and returns one of SQLITE_NULL, +SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, or +SQLITE_BLOB as appropriate for the +protected sqlite3_value object V after the conversion attempt. + + +

    H15304: +When a call to sqlite3_step(S) causes the prepared statement +S to run to completion, the function returns SQLITE_DONE. + + +

    H15306: +When a call to sqlite3_step(S) stops because it is ready to +return another row of the result set, it returns SQLITE_ROW. + + +

    H15308: +If a call to sqlite3_step(S) encounters an +interrupt or a run-time error, +it returns an appropriate error code that is not one of +SQLITE_OK, SQLITE_ROW, or SQLITE_DONE. + + +

    H15310: +If an interrupt or a run-time error +occurs during a call to sqlite3_step(S) +for a prepared statement S created using +legacy interfaces sqlite3_prepare() or +sqlite3_prepare16(), then the function returns either +SQLITE_ERROR, SQLITE_BUSY, or SQLITE_MISUSE. + + +

    H16103: +The sqlite3_create_function16(D,X,...) interface shall behave +as sqlite3_create_function(D,X,...) in every way except that it +interprets the X argument as zero-terminated UTF-16 +native byte order instead of as zero-terminated UTF-8. + + +

    H16106: +A successful invocation of the +sqlite3_create_function(D,X,N,E,...) interface shall register +or replaces callback functions in the database connection D +used to implement the SQL function named X with N parameters +and having a preferred text encoding of E. + + +

    H16109: +A successful call to sqlite3_create_function(D,X,N,E,P,F,S,L) +shall replace the P, F, S, and L values from any prior calls with +the same D, X, N, and E values. + + +

    H16112: +The sqlite3_create_function(D,X,...) interface shall fail +if the SQL function name X is +longer than 255 bytes exclusive of the zero terminator. + + +

    H16118: +The sqlite3_create_function(D,X,N,E,P,F,S,L) interface +shall fail unless either F is NULL and S and L are non-NULL or +F is non-NULL and S and L are NULL. + + +

    H16121: +The sqlite3_create_function(D,...) interface shall fails with an +error code of SQLITE_BUSY if there exist prepared statements +associated with the database connection D. + + +

    H16127: +When N is non-negative, the sqlite3_create_function(D,X,N,...) +interface shall register callbacks to be invoked for the +SQL function +named X when the number of arguments to the SQL function is +exactly N. + + +

    H16130: +When N is -1, the sqlite3_create_function(D,X,N,...) +interface shall register callbacks to be invoked for the SQL +function named X with any number of arguments. + + +

    H16133: +When calls to sqlite3_create_function(D,X,N,...) +specify multiple implementations of the same function X +and when one implementation has N>=0 and the other has N=(-1) +the implementation with a non-zero N shall be preferred. + + +

    H16136: +When calls to sqlite3_create_function(D,X,N,E,...) +specify multiple implementations of the same function X with +the same number of arguments N but with different +encodings E, then the implementation where E matches the +database encoding shall preferred. + + +

    H16139: +For an aggregate SQL function created using +sqlite3_create_function(D,X,N,E,P,0,S,L) the finalizer +function L shall always be invoked exactly once if the +step function S is called one or more times. + + +

    H16142: +When SQLite invokes either the xFunc or xStep function of +an application-defined SQL function or aggregate created +by sqlite3_create_function() or sqlite3_create_function16(), +then the array of sqlite3_value objects passed as the +third parameter shall be protected sqlite3_value objects. + + +

    H16211: +The first invocation of sqlite3_aggregate_context(C,N) for +a particular instance of an aggregate function (for a particular +context C) causes SQLite to allocate N bytes of memory, +zero that memory, and return a pointer to the allocated memory. + + +

    H16213: +If a memory allocation error occurs during +sqlite3_aggregate_context(C,N) then the function returns 0. + + +

    H16215: +Second and subsequent invocations of +sqlite3_aggregate_context(C,N) for the same context pointer C +ignore the N parameter and return a pointer to the same +block of memory returned by the first invocation. + + +

    H16217: +The memory allocated by sqlite3_aggregate_context(C,N) is +automatically freed on the next call to sqlite3_reset() +or sqlite3_finalize() for the prepared statement containing +the aggregate function associated with context C. + + +

    H16243: +The sqlite3_user_data(C) interface returns a copy of the +P pointer from the sqlite3_create_function(D,X,N,E,P,F,S,L) +or sqlite3_create_function16(D,X,N,E,P,F,S,L) call that +registered the SQL function associated with sqlite3_context C. + + +

    H16253: +The sqlite3_context_db_handle(C) interface returns a copy of the +D pointer from the sqlite3_create_function(D,X,N,E,P,F,S,L) +or sqlite3_create_function16(D,X,N,E,P,F,S,L) call that +registered the SQL function associated with sqlite3_context C. + + +

    H16272: +The sqlite3_get_auxdata(C,N) interface returns a pointer +to metadata associated with the Nth parameter of the SQL function +whose context is C, or NULL if there is no metadata associated +with that parameter. + + +

    H16274: +The sqlite3_set_auxdata(C,N,P,D) interface assigns a metadata +pointer P to the Nth parameter of the SQL function with context C. + + +

    H16276: +SQLite will invoke the destructor D with a single argument +which is the metadata pointer P following a call to +sqlite3_set_auxdata(C,N,P,D) when SQLite ceases to hold +the metadata. + + +

    H16277: +SQLite ceases to hold metadata for an SQL function parameter +when the value of that parameter changes. + + +

    H16278: +When sqlite3_set_auxdata(C,N,P,D) is invoked, the destructor +is called for any prior metadata associated with the same function +context C and parameter N. + + +

    H16279: +SQLite will call destructors for any metadata it is holding +in a particular prepared statement S when either +sqlite3_reset(S) or sqlite3_finalize(S) is called. + + +

    H16351: +The sqlite3_soft_heap_limit(N) interface places a soft limit +of N bytes on the amount of heap memory that may be allocated +using sqlite3_malloc() or sqlite3_realloc() at any point +in time. + + +

    H16352: +If a call to sqlite3_malloc() or sqlite3_realloc() would +cause the total amount of allocated memory to exceed the +soft heap limit, then sqlite3_release_memory() is invoked +in an attempt to reduce the memory usage prior to proceeding +with the memory allocation attempt. + + +

    H16353: +Calls to sqlite3_malloc() or sqlite3_realloc() that trigger +attempts to reduce memory usage through the soft heap limit +mechanism continue even if the attempt to reduce memory +usage is unsuccessful. + + +

    H16354: +A negative or zero value for N in a call to +sqlite3_soft_heap_limit(N) means that there is no soft +heap limit and sqlite3_release_memory() will only be +called when memory is completely exhausted. + + +

    H16355: +The default value for the soft heap limit is zero. + + +

    H16358: +Each call to sqlite3_soft_heap_limit(N) overrides the +values set by all prior calls. + + +

    H16403: +The default return value from any SQL function is NULL. + + +

    H16406: +The sqlite3_result_blob(C,V,N,D) interface changes the +return value of function C to be a BLOB that is N bytes +in length and with content pointed to by V. + + +

    H16409: +The sqlite3_result_double(C,V) interface changes the +return value of function C to be the floating point value V. + + +

    H16412: +The sqlite3_result_error(C,V,N) interface changes the return +value of function C to be an exception with error code +SQLITE_ERROR and a UTF-8 error message copied from V up to the +first zero byte or until N bytes are read if N is positive. + + +

    H16415: +The sqlite3_result_error16(C,V,N) interface changes the return +value of function C to be an exception with error code +SQLITE_ERROR and a UTF-16 native byte order error message +copied from V up to the first zero terminator or until N bytes +are read if N is positive. + + +

    H16418: +The sqlite3_result_error_toobig(C) interface changes the return +value of the function C to be an exception with error code +SQLITE_TOOBIG and an appropriate error message. + + +

    H16421: +The sqlite3_result_error_nomem(C) interface changes the return +value of the function C to be an exception with error code +SQLITE_NOMEM and an appropriate error message. + + +

    H16424: +The sqlite3_result_error_code(C,E) interface changes the return +value of the function C to be an exception with error code E. +The error message text is unchanged. + + +

    H16427: +The sqlite3_result_int(C,V) interface changes the +return value of function C to be the 32-bit integer value V. + + +

    H16430: +The sqlite3_result_int64(C,V) interface changes the +return value of function C to be the 64-bit integer value V. + + +

    H16433: +The sqlite3_result_null(C) interface changes the +return value of function C to be NULL. + + +

    H16436: +The sqlite3_result_text(C,V,N,D) interface changes the +return value of function C to be the UTF-8 string +V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. + + +

    H16439: +The sqlite3_result_text16(C,V,N,D) interface changes the +return value of function C to be the UTF-16 native byte order +string V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. + + +

    H16442: +The sqlite3_result_text16be(C,V,N,D) interface changes the +return value of function C to be the UTF-16 big-endian +string V up to the first zero if N is negative +or the first N bytes or V if N is non-negative. + + +

    H16445: +The sqlite3_result_text16le(C,V,N,D) interface changes the +return value of function C to be the UTF-16 little-endian +string V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. + + +

    H16448: +The sqlite3_result_value(C,V) interface changes the +return value of function C to be the unprotected sqlite3_value +object V. + + +

    H16451: +The sqlite3_result_zeroblob(C,N) interface changes the +return value of function C to be an N-byte BLOB of all zeros. + + +

    H16454: +The sqlite3_result_error() and sqlite3_result_error16() +interfaces make a copy of their error message strings before +returning. + + +

    H16457: +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is the constant SQLITE_STATIC +then no destructor is ever called on the pointer V and SQLite +assumes that V is immutable. + + +

    H16460: +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is the constant +SQLITE_TRANSIENT then the interfaces makes a copy of the +content of V and retains the copy. + + +

    H16463: +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is some value other than +the constants SQLITE_STATIC and SQLITE_TRANSIENT then +SQLite will invoke the destructor D with V as its only argument +when it has finished with the V value. + + +

    H16603: +A successful call to the +sqlite3_create_collation_v2(B,X,E,P,F,D) interface +registers function F as the comparison function used to +implement collation X on the database connection B for +databases having encoding E. + + +

    H16604: +SQLite understands the X parameter to +sqlite3_create_collation_v2(B,X,E,P,F,D) as a zero-terminated +UTF-8 string in which case is ignored for ASCII characters and +is significant for non-ASCII characters. + + +

    H16606: +Successive calls to sqlite3_create_collation_v2(B,X,E,P,F,D) +with the same values for B, X, and E, override prior values +of P, F, and D. + + +

    H16609: +If the destructor D in sqlite3_create_collation_v2(B,X,E,P,F,D) +is not NULL then it is called with argument P when the +collating function is dropped by SQLite. + + +

    H16612: +A collating function is dropped when it is overloaded. + + +

    H16615: +A collating function is dropped when the database connection +is closed using sqlite3_close(). + + +

    H16618: +The pointer P in sqlite3_create_collation_v2(B,X,E,P,F,D) +is passed through as the first parameter to the comparison +function F for all subsequent invocations of F. + + +

    H16621: +A call to sqlite3_create_collation(B,X,E,P,F) is exactly +the same as a call to sqlite3_create_collation_v2() with +the same parameters and a NULL destructor. + + +

    H16624: +Following a sqlite3_create_collation_v2(B,X,E,P,F,D), +SQLite uses the comparison function F for all text comparison +operations on the database connection B on text values that +use the collating sequence named X. + + +

    H16627: +The sqlite3_create_collation16(B,X,E,P,F) works the same +as sqlite3_create_collation(B,X,E,P,F) except that the +collation name X is understood as UTF-16 in native byte order +instead of UTF-8. + + +

    H16630: +When multiple comparison functions are available for the same +collating sequence, SQLite chooses the one whose text encoding +requires the least amount of conversion from the default +text encoding of the database. + + +

    H16702: +A successful call to sqlite3_collation_needed(D,P,F) +or sqlite3_collation_needed16(D,P,F) causes +the database connection D to invoke callback F with first +parameter P whenever it needs a comparison function for a +collating sequence that it does not know about. + + +

    H16704: +Each successful call to sqlite3_collation_needed() or +sqlite3_collation_needed16() overrides the callback registered +on the same database connection by prior calls to either +interface. + + +

    H16706: +The name of the requested collating function passed in the +4th parameter to the callback is in UTF-8 if the callback +was registered using sqlite3_collation_needed() and +is in UTF-16 native byte order if the callback was +registered using sqlite3_collation_needed16(). + + +

    H17303: +The sqlite3_malloc(N) interface returns either a pointer to +a newly checked-out block of at least N bytes of memory +that is 8-byte aligned, or it returns NULL if it is unable +to fulfill the request. + + +

    H17304: +The sqlite3_malloc(N) interface returns a NULL pointer if +N is less than or equal to zero. + + +

    H17305: +The sqlite3_free(P) interface releases memory previously +returned from sqlite3_malloc() or sqlite3_realloc(), +making it available for reuse. + + +

    H17306: +A call to sqlite3_free(NULL) is a harmless no-op. + + +

    H17310: +A call to sqlite3_realloc(0,N) is equivalent to a call +to sqlite3_malloc(N). + + +

    H17312: +A call to sqlite3_realloc(P,0) is equivalent to a call +to sqlite3_free(P). + + +

    H17315: +The SQLite core uses sqlite3_malloc(), sqlite3_realloc(), +and sqlite3_free() for all of its memory allocation and +deallocation needs. + + +

    H17318: +The sqlite3_realloc(P,N) interface returns either a pointer +to a block of checked-out memory of at least N bytes in size +that is 8-byte aligned, or a NULL pointer. + + +

    H17321: +When sqlite3_realloc(P,N) returns a non-NULL pointer, it first +copies the first K bytes of content from P into the newly +allocated block, where K is the lesser of N and the size of +the buffer P. + + +

    H17322: +When sqlite3_realloc(P,N) returns a non-NULL pointer, it first +releases the buffer P. + + +

    H17323: +When sqlite3_realloc(P,N) returns NULL, the buffer P is +not modified or released. + + +

    H17341: +The sqlite3_release_memory(N) interface attempts to +free N bytes of heap memory by deallocating non-essential +memory allocations held by the database library. + + +

    H17342: +The sqlite3_release_memory(N) returns the number +of bytes actually freed, which might be more or less +than the amount requested. + + +

    H17371: +The sqlite3_memory_used() routine returns the number of bytes +of memory currently outstanding (malloced but not freed). + + +

    H17373: +The sqlite3_memory_highwater() routine returns the maximum +value of sqlite3_memory_used() since the high-water mark +was last reset. + + +

    H17374: +The values returned by sqlite3_memory_used() and +sqlite3_memory_highwater() include any overhead +added by SQLite in its implementation of sqlite3_malloc(), +but not overhead added by the any underlying system library +routines that sqlite3_malloc() may call. + + +

    H17375: +The memory high-water mark is reset to the current value of +sqlite3_memory_used() if and only if the parameter to +sqlite3_memory_highwater() is true. The value returned +by sqlite3_memory_highwater(1) is the high-water mark +prior to the reset. + + +

    H17392: +The sqlite3_randomness(N,P) interface writes N bytes of +high-quality pseudo-randomness into buffer P. + + +

    H17403: +The sqlite3_mprintf() and sqlite3_vmprintf() interfaces +return either pointers to zero-terminated UTF-8 strings held in +memory obtained from sqlite3_malloc() or NULL pointers if +a call to sqlite3_malloc() fails. + + +

    H17406: +The sqlite3_snprintf() interface writes a zero-terminated +UTF-8 string into the buffer pointed to by the second parameter +provided that the first parameter is greater than zero. + + +

    H17407: +The sqlite3_snprintf() interface does not write slots of +its output buffer (the second parameter) outside the range +of 0 through N-1 (where N is the first parameter) +regardless of the length of the string +requested by the format specification. + + +

    H17813: +A successful invocation of the sqlite3_blob_open(D,B,T,C,R,F,P) +interface shall open an sqlite3_blob object P on the BLOB +in column C of the table T in the database B on +the database connection D. + + +

    H17814: +A successful invocation of sqlite3_blob_open(D,...) shall start +a new transaction on the database connection D if that +connection is not already in a transaction. + + +

    H17816: +The sqlite3_blob_open(D,B,T,C,R,F,P) interface shall open +the BLOB for read and write access if and only if the F +parameter is non-zero. + + +

    H17819: +The sqlite3_blob_open() interface shall return SQLITE_OK on +success and an appropriate error code on failure. + + +

    H17821: +If an error occurs during evaluation of sqlite3_blob_open(D,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error. + + +

    H17824: +If any column in the row that a sqlite3_blob has open is +changed by a separate UPDATE or DELETE statement or by +an ON CONFLICT side effect, then the sqlite3_blob shall +be marked as invalid. + + +

    H17833: +The sqlite3_blob_close(P) interface closes an sqlite3_blob +object P previously opened using sqlite3_blob_open(). + + +

    H17836: +Closing an sqlite3_blob object using +sqlite3_blob_close() shall cause the current transaction to +commit if there are no other open sqlite3_blob objects +or prepared statements on the same database connection and +the database connection is in autocommit mode. + + +

    H17839: +The sqlite3_blob_close(P) interfaces shall close the +sqlite3_blob object P unconditionally, even if +sqlite3_blob_close(P) returns something other than SQLITE_OK. + + +

    H17843: +The sqlite3_blob_bytes(P) interface returns the size +in bytes of the BLOB that the sqlite3_blob object P +refers to. + + +

    H17853: +A successful invocation of sqlite3_blob_read(P,Z,N,X) +shall reads N bytes of data out of the BLOB referenced by +BLOB handle P beginning at offset X and store those bytes +into buffer Z. + + +

    H17856: +In sqlite3_blob_read(P,Z,N,X) if the size of the BLOB +is less than N+X bytes, then the function shall leave the +Z buffer unchanged and return SQLITE_ERROR. + + +

    H17859: +In sqlite3_blob_read(P,Z,N,X) if X or N is less than zero +then the function shall leave the Z buffer unchanged +and return SQLITE_ERROR. + + +

    H17862: +The sqlite3_blob_read(P,Z,N,X) interface shall return SQLITE_OK +if N bytes are successfully read into buffer Z. + + +

    H17863: +If the BLOB handle P is expired and X and N are within bounds +then sqlite3_blob_read(P,Z,N,X) shall leave the Z buffer +unchanged and return SQLITE_ABORT. + + +

    H17865: +If the requested read could not be completed, +the sqlite3_blob_read(P,Z,N,X) interface shall return an +appropriate error code or extended error code. + + +

    H17868: +If an error occurs during evaluation of sqlite3_blob_read(P,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error, where D is the +database connection that was used to open the BLOB handle P. + + +

    H17873: +A successful invocation of sqlite3_blob_write(P,Z,N,X) +shall write N bytes of data from buffer Z into the BLOB +referenced by BLOB handle P beginning at offset X into +the BLOB. + + +

    H17874: +In the absence of other overridding changes, the changes +written to a BLOB by sqlite3_blob_write() shall +remain in effect after the associated BLOB handle expires. + + +

    H17875: +If the BLOB handle P was opened for reading only then +an invocation of sqlite3_blob_write(P,Z,N,X) shall leave +the referenced BLOB unchanged and return SQLITE_READONLY. + + +

    H17876: +If the size of the BLOB referenced by BLOB handle P is +less than N+X bytes then sqlite3_blob_write(P,Z,N,X) shall +leave the BLOB unchanged and return SQLITE_ERROR. + + +

    H17877: +If the BLOB handle P is expired and X and N are within bounds +then sqlite3_blob_read(P,Z,N,X) shall leave the BLOB +unchanged and return SQLITE_ABORT. + + +

    H17879: +If X or N are less than zero then sqlite3_blob_write(P,Z,N,X) +shall leave the BLOB referenced by BLOB handle P unchanged +and return SQLITE_ERROR. + + +

    H17882: +The sqlite3_blob_write(P,Z,N,X) interface shall return +SQLITE_OK if N bytes where successfully written into the BLOB. + + +

    H17885: +If the requested write could not be completed, +the sqlite3_blob_write(P,Z,N,X) interface shall return an +appropriate error code or extended error code. + + +

    H17888: +If an error occurs during evaluation of sqlite3_blob_write(D,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error. + + +


    +This page last modified 2009/02/19 14:27:55 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/hlr20000.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/hlr20000.html --- sqlite3-3.4.2/www/hlr20000.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/hlr20000.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,89 @@ + + +C-Language Interfaces For Extending SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Requirements for the C-Language Interfaces that Extend SQLite +

    + +

    +This document contains detailed requirements for the C-language +interfaces to SQLite that are used to extend SQLite. +

    + +
    + + +
    +This page last modified 2009/02/19 14:35:45 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/hlr30000.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/hlr30000.html --- sqlite3-3.4.2/www/hlr30000.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/hlr30000.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,1919 @@ + + +SQLite Database File Format Requirements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Requirements for the SQLite Database File Format +

    + +

    +This document contains detailed requirements for the database +file format and the file I/O characteristics of SQLite. +

    + +
    + + +

    H30010: +The system shall ensure that at the successful conclusion of a +database transaction the contents of the database file constitute +a well-formed SQLite database file. + + +

    H30020: +The system shall ensure that at the successful conclusion of a +database transaction the contents of the database file are a valid +serialization of the contents of the logical SQL database produced +by the transaction. + + +

    H30030: +The first 16 bytes of a well-formed database file shall contain +the UTF-8 encoding of the string "SQLite format 3" followed by a +single nul-terminator byte. + + +

    H30040: +The 6 bytes beginning at byte offset 18 of a well-formed database +image shall contain the values 0x01, 0x01, 0x00, 0x40, 0x20 and +0x20, respectively. + + +

    H30120: +The 4-byte big-endian signed integer field at byte offset 44 of a +well-formed database image, the schema layer file format field, +shall be set to an integer value between 1 and 4, inclusive. + + +

    H30130: +The 4-byte big-endian unsigned integer field at byte offset 48 of a +well-formed database image shall be set to the value of the +database default page-cache size. + + +

    H30140: +If the database is not an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the value 0. + + +

    H30141: +If the database is an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the numerically largest root-page number +of any table or index B-Tree within the database image. + + +

    H30150: +The 4 byte big-endian unsigned integer field at byte offset 56 of a +well-formed database image shall be set to 1 if the database text-encoding +is UTF-8, 2 if the database text-encoding is little-endian UTF-16, and 3 +if the database text-encoding is big-endian UTF-16. + + +

    H30160: +The 4-byte big-endian unsigned integer field at byte offset 60 of a +well-formed database image shall be set to the value of the +database user-cookie. + + +

    H30190: +The 2-byte big-endian unsigned integer field at byte offset 16 of a +well-formed database image shall be set to the value of the database +page-size. + + +

    H30191: +The page-size of an SQLite database in bytes shall be an integer power +of 2 between 512 and 32768, inclusive. + + +

    H30170: +The 4-byte big-endian unsigned integer field at byte offset 64 of a +well-formed database image shall be set to the value of the database +incremental-vacuum flag. + + +

    H30171: +The value of the incremental-vacuum flag of an SQLite database shall be +either 0 or 1. + + +

    H30180: +In a well-formed non-autovacuum database (one with a zero stored +in the 4-byte big-endian integer value beginning at byte offset +52 of the database file header, the incremental vacuum flag is +set to 0. + + +

    H30200: +The size of a well formed database image shall be an integer +multiple of the database page size. + + +

    H30210: +Each page of a well formed database image shall be exactly one of a +B-Tree page, an overflow page, a free page, a +pointer-map page or the locking page. + + +

    H30220: +The database page that starts at byte offset 230, the +locking page, shall never be used for any purpose. + + +

    H30230: +In a well-formed database file, the portion of the first +database page not consumed by the database file-header (all but the +first 100 bytes) contains the root node of a table B-Tree, +the schema table. + + +

    H30240: +All records stored in the schema table contain exactly five +fields. + + +

    H30250: +For each SQL table in the database apart from itself +("sqlite_master"), the schema table of a well-formed +database file contains an associated record. + + +

    H30260: +The first field of each schema table record associated with an +SQL table shall be the text value "table". + + +

    H30270: +The second field of each schema table record associated with an +SQL table shall be a text value set to the name of the SQL table. + + +

    H30280: +In a well-formed database file, the third field of all +schema table records associated with SQL tables shall contain +the same value as the second field. + + +

    H30290: +In a well-formed database file, the fourth field of all +schema table records associated with SQL tables that are not +virtual tables contains the page number (an integer value) of the root +page of the associated table B-Tree structure within the +database file. + + +

    H30300: +If the associated database table is a virtual table, the fourth +field of the schema table record shall contain the integer +value 0 (zero). + + +

    H30310: +In a well-formed database, the fifth field of all schema table +records associated with SQL tables shall contain a "CREATE TABLE" +or "CREATE VIRTUAL TABLE" statment (a text value). The details +of the statement shall be such that executing the statement +would create a table of precisely the same name and schema as the +existing database table. + + +

    H30320: +For each PRIMARY KEY or UNIQUE constraint present in the definition +of each SQL table in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index", and the second field set to a text value containing a +string of the form "sqlite_autoindex_<name>_<idx>", where +<name> is the name of the SQL table and <idx> is an +integer value. + + +

    H30330: +In a well-formed database, the third field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the name of the table to which the constraint applies (a +text value). + + +

    H30340: +In a well-formed database, the fourth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the page number (an integer value) of the root page of the +associated index B-Tree structure. + + +

    H30350: +In a well-formed database, the fifth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain an SQL NULL value. + + +

    H30360: +For each SQL index in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index" and the second field set to a text value containing the +name of the SQL index. + + +

    H30370: +In a well-formed database, the third field of all schema table +records associated with SQL indexes shall contain the name of the +SQL table that the index applies to. + + +

    H30380: +In a well-formed database, the fourth field of all schema table +records associated with SQL indexes shall contain the page number +(an integer value) of the root page of the associated index B-Tree +structure. + + +

    H30390: +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +INDEX" statement (a text value). The details of the statement shall +be such that executing the statement would create an index of +precisely the same name and content as the existing database index. + + +

    H30400: +For each SQL view in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "view" and the second field set to a text value containing the +name of the SQL view. + + +

    H30410: +In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the same value as +the second field. + + +

    H30420: +In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the integer value 0. + + +

    H30430: +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +VIEW" statement (a text value). The details of the statement shall +be such that executing the statement would create a view of +precisely the same name and definition as the existing database view. + + +

    H30440: +For each SQL trigger in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "trigger" and the second field set to a text value containing the +name of the SQL trigger. + + +

    H30450: +In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the name of the +database table or view to which the trigger applies. + + +

    H30460: +In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the integer value 0. + + +

    H30470: +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +TRIGGER" statement (a text value). The details of the statement shall +be such that executing the statement would create a trigger of +precisely the same name and definition as the existing database trigger. + + +

    H30480: +In an auto-vacuum database, all pages that occur before the page +number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page. + + +

    H30490: +In an auto-vacuum database, no B-Tree root pages may occur +on or after the page number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page. + + +

    H30500: +As well as the schema table, a well-formed database file +contains N table B-Tree structures, where N is the +number of non-virtual tables in the logical database, excluding the +sqlite_master table but including sqlite_sequence and other system +tables. + + +

    H30510: +A well-formed database file contains N table B-Tree structures, +where N is the number of indexes in the logical database, +including indexes created by UNIQUE or PRIMARY KEY clauses in the +declaration of SQL tables. + + +

    H30520: +A 64-bit signed integer value stored in variable length integer +format consumes from 1 to 9 bytes of space. + + +

    H30530: +The most significant bit of all bytes except the last in a serialized +variable length integer is always set. Unless the serialized +form consumes the maximum 9 bytes available, then the most significant +bit of the final byte of the representation is always cleared. + + +

    H30540: +The eight least significant bytes of the 64-bit twos-compliment +representation of a value stored in a 9 byte variable length +integer are stored in the final byte (byte offset 8) of the +serialized variable length integer. The other 56 bits are +stored in the 7 least significant bits of each of the first 8 bytes +of the serialized variable length integer, in order from +most significant to least significant. + + +

    H30550: +A variable length integer that consumes less than 9 bytes of +space contains a value represented as an N-bit unsigned +integer, where N is equal to the number of bytes consumed by +the serial representation (between 1 and 8) multiplied by 7. The +N bits are stored in the 7 least significant bits of each +byte of the serial representation, from most to least significant. + + +

    H30560: +A database record consists of a database record header, +followed by database record data. The first part of the +database record header is a variable length integer +containing the total size (including itself) of the header in bytes. + + +

    H30570: +Following the length field, the remainder of the database record +header is populated with N variable length integer +fields, where N is the number of database values stored in +the record. + + +

    H30580: +Following the database record header, the database record +data is made up of N variable length blobs of data, where +N is again the number of database values stored in the record. +The n blob contains the data for the nth value in +the database record. The size and format of each blob of data is +encoded in the corresponding variable length integer field +in the database record header. + + +

    H30590: +A value of 0 stored within the database record header indicates +that the corresponding database value is an SQL NULL. In this case +the blob of data in the data area is 0 bytes in size. + + +

    H30600: +A value of 1 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 1-byte +big-endian signed integer. + + +

    H30610: +A value of 2 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 2-byte +big-endian signed integer. + + +

    H30620: +A value of 3 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 3-byte +big-endian signed integer. + + +

    H30630: +A value of 4 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 4-byte +big-endian signed integer. + + +

    H30640: +A value of 5 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 6-byte +big-endian signed integer. + + +

    H30650: +A value of 6 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 8-byte +big-endian signed integer. + + +

    H30660: +A value of 7 stored within the database record header indicates +that the corresponding database value is an SQL real (floating +point number). In this case the blob of data contains an 8-byte +IEEE floating point number, stored in big-endian byte order. + + +

    H30670: +A value of 8 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 0. +In this case the blob of data in the data area is 0 bytes in size. + + +

    H30680: +A value of 9 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 1. +In this case the blob of data in the data area is 0 bytes in size. + + +

    H30690: +An even value greater than or equal to 12 stored within the +database record header indicates that the corresponding +database value is an SQL blob field. The blob of data contains the +value data. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header. + + +

    H30700: +An odd value greater than or equal to 13 stored within the +database record header indicates that the corresponding +database value is an SQL text field. The blob of data contains the +value text stored using the database encoding, with no +nul-terminator. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header. + + +

    H30710: +In a well-formed database file, if the values 8 or 9 appear within +any database record header within the database, then the +schema-layer file format (stored at byte offset 44 of the +database file header) must be set to 4. + + +

    H30720: +In a well-formed database file, the values 10 and 11, and all +negative values may not appear within any database record header +in the database. + + +

    H30730: +The pages in an index B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth. + + +

    H30740: +Each leaf node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a database record. + + +

    H30750: +Each internal node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a child page number, C, +and a database record R. All database records stored within +the sub-tree headed by page C are smaller than record R, +according to the index sort order (see below). Additionally, unless +R is the smallest database record stored on the internal node +page, all integer keys within the sub-tree headed by C are +greater than R-1, where R-1 is the +largest database record on the internal node page that is smaller +than R. + + +

    H30760: +As well as child page numbers associated with B-Tree cells, each +internal node page in an index B-Tree contains the page number +of an extra child page, the right-child page. All database +records stored in all B-Tree cells within the sub-tree headed by the +right-child page are greater than all database records +stored within B-Tree cells on the internal node page. + + +

    H30770: +In a well-formed database, each index B-Tree contains a single entry +for each row in the indexed logical database table. + + +

    H30780: +Each database record (key) stored by an index B-Tree in a +well-formed database contains the same number of values, the number +of indexed columns plus one. + + +

    H30790: +The final value in each database record (key) stored by an +index B-Tree in a well-formed database contains the rowid (an integer +value) of the corresponding logical database row. + + +

    H30800: +The first N values in each database record (key) +stored in an index B-Tree where N is the number of indexed +columns, contain the values of the indexed columns from the +corresponding logical database row, in the order specified for the +index. + + +

    H30810: +The b-tree page flags field (the first byte) of each database +page used as an internal node of an index B-Tree structure is set to +0x02. + + +

    H30820: +The b-tree page flags field (the first byte) of each database +page used as a leaf node of an index B-Tree structure is set to 0x0A. + + +

    H30830: +The first byte of each database page used as a B-Tree page contains +the b-tree page flags field. On page 1, the b-tree page +flags field is stored directly after the 100 byte file header +at byte offset 100. + + +

    H30840: +The number of B-Tree cells stored on a B-Tree page is stored as a +2-byte big-endian integer starting at byte offset 3 of the B-Tree +page. On page 1, this field is stored at byte offset 103. + + +

    H30850: +The 2-byte big-endian integer starting at byte offset 5 of each +B-Tree page contains the byte-offset from the start of the page +to the start of the cell content area, which consumes all space +from this offset to the end of the usable region of the page. +On page 1, this field is stored at byte offset 105. All B-Tree +cells on the page are stored within the cell-content area. + + +

    H30860: +On each page used as an internal node a of B-Tree structures, the +page number of the rightmost child node in the B-Tree structure is +stored as a 4-byte big-endian unsigned integer beginning at byte +offset 8 of the database page, or byte offset 108 on page 1. + + +

    H30870: +Immediately following the page header on each B-Tree page is the +cell offset array, consisting of N 2-byte big-endian +unsigned integers, where N is the number of cells stored +on the B-Tree page (H30840). On an internal node B-Tree page, +the cell offset array begins at byte offset 12, or on a leaf +page, byte offset 8. For the B-Tree node on page 1, these +offsets are 112 and 108, respectively. + + +

    H30880: +The cell offset array and the cell content area (H30850) +may not overlap. + + +

    H30890: +Each value stored in the cell offset array must be greater +than or equal to the offset to the cell content area (H30850), +and less than the database page size. + + +

    H30900: +The N values stored within the cell offset array are the +byte offsets from the start of the B-Tree page to the beginning of +each of the N cells stored on the page. + + +

    H30910: +No two B-Tree cells may overlap. + + +

    H30920: +Within the cell content area, all blocks of contiguous +free-space (space not used by B-Tree cells) greater than 3 bytes in +size are linked together into a linked list, the free block list. +Such blocks of free space are known as free blocks. + + +

    H30930: +The first two bytes of each free block contain the offset +of the next free block in the free block list formatted +as a 2-byte big-endian integer, relative to the start of the database +page. If there is no next free block, then the first two +bytes are set to 0x00. + + +

    H30940: +The second two bytes (byte offsets 2 and 3) of each free block +contain the total size of the free block, formatted as a 2-byte +big-endian integer. + + +

    H30950: +On all B-Tree pages, the offset of the first free block in the +free block list, relative to the start of the database page, +is stored as a 2-byte big-endian integer starting at byte offset +1 of the database page. If there is no first free block +(because the free block list is empty), then the two bytes +at offsets 1 and 2 of the database page are set to 0x00. On page 1, +this field is stored at byte offset 101 of the page. + + +

    H30960: +Within the cell-content area, all blocks of contiguous free-space +(space not used by B-Tree cells) less than or equal to 3 bytes in +size are known as fragments. The total size of all +fragments on a B-Tree page is stored as a 1-byte unsigned +integer at byte offset 7 of the database page. On page 1, this +field is stored at byte offset 107. + + +

    H30970: +Each B-Tree cell belonging to an internal node page of an index +B-Tree consists of a 4-byte big-endian unsigned integer, the +child page number, followed by a variable length integer +field, followed by a database record. The +variable length integer field contains the length of the +database record in bytes. + + +

    H30980: +Each B-Tree cell belonging to an leaf page of an index B-Tree +consists of a variable length integer field, followed by +a database record. The variable length integer field +contains the length of the database record in bytes. + + +

    H30990: +If the database record stored in an index B-Tree page is +sufficiently small, then the entire cell is stored within the +index B-Tree page. Sufficiently small is defined as equal to or +less than max-local, where: + +max-local := (usable-size - 12) * 64 / 255 - 23 + + +

    H31000: +If the database record stored as part of an index B-Tree cell is too +large to be stored entirely within the B-Tree page (as defined by +H30520), then only a prefix of the database record is stored +within the B-Tree page and the remainder stored in an overflow +chain. In this case, the database record prefix is immediately +followed by the page number of the first page of the +overflow chain, formatted as a 4-byte big-endian unsigned +integer. + + +

    H31010: +When a database record belonging to a table B-Tree cell is +stored partially within an overflow page chain, the size +of the prefix stored within the index B-Tree page is N bytes, +where N is calculated using the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 12) * 64 / 255 - 23 +N := min-local + ((record-size - min-local) % (usable-size - 4)) +if( N > max-local ) N := min-local + + +

    H31020: +The pages in a table B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth. + + +

    H31030: +Each leaf page in a table B-Tree structure contains one or more +B-Tree cells, where each cell contains a 64-bit signed integer key +value and a database record. + + +

    H31040: +Each internal node page in a table B-Tree structure contains one or +more B-Tree cells, where each cell contains a 64-bit signed integer +key value, K, and a child page number, C. All integer key +values in all B-Tree cells within the sub-tree headed by page C +are less than or equal to K. Additionally, unless K +is the smallest integer key value stored on the internal node page, +all integer keys within the sub-tree headed by C are greater +than K-1, where K-1 is the largest +integer key on the internal node page that is smaller than K. + + +

    H31050: +As well as child page numbers associated with B-Tree cells, each +internal node page in a table B-Tree contains the page number +of an extra child page, the right-child page. All key values +in all B-Tree cells within the sub-tree headed by the right-child +page are greater than all key values stored within B-Tree cells +on the internal node page. + + +

    H31060: +In a well-formed database, each table B-Tree contains a single entry +for each row in the corresponding logical database table. + + +

    H31070: +The key value (a 64-bit signed integer) for each B-Tree entry is +the same as the value of the rowid field of the corresponding +logical database row. + + +

    H31080: +The SQL values serialized to make up each database record +stored as ancillary data in a table B-Tree shall be the equal to the +values taken by the N leftmost columns of the corresponding +logical database row, where N is the number of values in the +database record. + + +

    H31090: +If a logical database table column is declared as an "INTEGER +PRIMARY KEY", then instead of its integer value, an SQL NULL +shall be stored in its place in any database records used as +ancillary data in a table B-Tree. + + +

    H31100: +If the database schema layer file-format (the value stored +as a 4-byte integer at byte offset 44 of the file header) is 1, +then all database records stored as ancillary data in a table +B-Tree structure have the same number of fields as there are +columns in the corresponding logical database table. + + +

    H31110: +If the database schema layer file-format value is two or +greater and the rightmost M columns of a row contain SQL NULL +values, then the corresponding record stored as ancillary data in +the table B-Tree has between N-M and N fields, +where N is the number of columns in the logical database +table. + + +

    H31120: +If the database schema layer file-format value is three or +greater and the rightmost M columns of a row contain their +default values according to the logical table declaration, then the +corresponding record stored as ancillary data in the table B-Tree +may have as few as N-M fields, where N is the +number of columns in the logical database table. + + +

    H31130: +In a well-formed database file, the first byte of each page used +as an internal node of a table B-Tree structure is set to 0x05. + + +

    H31140: +In a well-formed database file, the first byte of each page used +as a leaf node of a table B-Tree structure is set to 0x0D. + + +

    H31150: +B-Tree cells belonging to table B-Tree internal node pages consist +of exactly two fields, a 4-byte big-endian unsigned integer +immediately followed by a variable length integer. These +fields contain the child page number and key value respectively +(see H31030). + + +

    H31160: +B-Tree cells belonging to table B-Tree leaf node pages consist +of three fields, two variable length integer values +followed by a database record. The size of the database record +in bytes is stored in the first of the two +variable length integer fields. The second of the two +variable length integer fields contains the 64-bit signed +integer key (see H31030). + + +

    H31170: +If the size of the record stored in a table B-Tree leaf page cell +is less than or equal to (usable page size-35) bytes, then +the entire cell is stored on the B-Tree leaf page. In a well-formed +database, usable page size is the same as the database +page size. + + +

    H31180: +If a table B-Tree cell is too large to be stored entirely on +a leaf page (as defined by H31170), then a prefix of the cell +is stored on the leaf page, and the remainder stored in an +overflow page chain. In this case the cell prefix +stored on the B-Tree leaf page is immediately followed by a +4-byte big-endian unsigned integer containing the page number +of the first overflow page in the chain. + + +

    H31190: +When a table B-Tree cell is stored partially in an +overflow page chain, the prefix stored on the B-Tree +leaf page consists of the two variable length integer fields, +followed by the first N bytes of the database record, where +N is determined by the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 35) +N := min-local + (record-size - min-local) % (usable-size - 4) +if( N > max-local ) N := min-local + + + +

    H31200: +A single overflow page may store up to available-space +bytes of database record data, where available-space is equal +to (usable-size - 4). + + +

    H31210: +When a database record is too large to store within a B-Tree page +(see H31170 and H31000), a prefix of the record is stored within +the B-Tree page and the remainder stored across N overflow +pages. In this case N is the minimum number of pages required +to store the portion of the record not stored on the B-Tree page, +given the maximum payload per overflow page defined by H31200. + + +

    H31220: +The list of overflow pages used to store a single database record +are linked together in a singly linked list known as an +overflow chain. The first four bytes of each page except the +last in an overflow chain are used to store the page number +of the next page in the linked list, formatted as an unsigned +big-endian integer. The first four bytes of the last page in an +overflow chain are set to 0x00. + + +

    H31230: +Each overflow page except the last in an overflow chain +contains N bytes of record data starting at byte offset 4 of +the page, where N is the maximum payload per overflow page, +as defined by H31200. The final page in an overflow chain +contains the remaining data, also starting at byte offset 4. + + +

    H31240: +All free pages in a well-formed database file are part of +the database free page list. + + +

    H31250: +Each free page is either a free list trunk page or a +free list leaf page. + + +

    H31260: +All free list trunk pages are linked together into a singly +linked list. The first 4 bytes of each page in the linked list +contains the page number of the next page in the list, formatted +as an unsigned big-endian integer. The first 4 bytes of the last +page in the linked list are set to 0x00. + + +

    H31270: +The second 4 bytes of each free list trunk page contains +the number of free list leaf page numbers stored on the free list +trunk page, formatted as an unsigned big-endian integer. + + +

    H31280: +Beginning at byte offset 8 of each free list trunk page are +N page numbers, each formatted as a 4-byte unsigned big-endian +integers, where N is the value described in requirement H31270. + + +

    H31290: +All page numbers stored on all free list trunk pages refer to +database pages that are free list leaves. + + +

    H31300: +The page number of each free list leaf page in a well-formed +database file appears exactly once within the set of pages numbers +stored on free list trunk pages. + + +

    H31310: +The total number of pages in the free list, including all free list +trunk and free list leaf pages, is stored as a 4-byte unsigned +big-endian integer at offset 36 of the database file header. + + +

    H31320: +The page number of the first page in the linked list of free list +trunk pages is stored as a 4-byte big-endian unsigned integer at +offset 32 of the database file header. If there are no free list +trunk pages in the database file, then the value stored at +offset 32 of the database file header is 0. + + +

    H31330: +Non auto-vacuum databases do not contain pointer map pages. + + +

    H31340: +In an auto-vacuum database file, every (num-entries + 1)th +page beginning with page 2 is designated a pointer-map page, where +num-entries is calculated as: + +num-entries := database-usable-page-size / 5 + + + +

    H31350: +In an auto-vacuum database file, each pointer-map page contains +a pointer map entry for each of the num-entries (defined by +H31340) pages that follow it, if they exist. + + +

    H31360: +Each pointer-map page entry consists of a 1-byte page type and a +4-byte page parent number, 5 bytes in total. + + +

    H31370: +Pointer-map entries are packed into the pointer-map page in order, +starting at offset 0. The entry associated with the database +page that immediately follows the pointer-map page is located at +offset 0. The entry for the following page at offset 5 etc. + + +

    H31380: +For each page except page 1 in an auto-vacuum database file that is +the root page of a B-Tree structure, the page type of the +corresponding pointer-map entry is set to the value 0x01 and the +parent page number is zero. + + +

    H31390: +For each page that is a part of an auto-vacuum database file free-list, +the page type of the corresponding pointer-map entry is set to the +value 0x02 and the parent page number is zero. + + +

    H31400: +For each page in a well-formed auto-vacuum database that is the first +page in an overflow chain, the page type of the corresponding +pointer-map entry is set to 0x03 and the parent page number field +is set to the page number of the B-Tree page that contains the start +of the B-Tree cell stored in the overflow-chain. + + +

    H31410: +For each page that is the second or a subsequent page in an overflow +chain, the page type of the corresponding pointer-map entry is set to +0x04 and the parent page number field is set to the page number of the +preceding page in the overflow chain. + + +

    H31420: +For each page that is not a root page but is a part of a B-Tree tree +structure (not part of an overflow chain), the page type of the +corresponding pointer-map entry is set to the value 0x05 and the parent +page number field is set to the page number of the parent node in the +B-Tree structure. + + +

    H32000: +If a journal file contains a well-formed master-journal pointer and the +named master-journal file does not exist then the journal file shall be +considered invalid. + + +

    H32010: +If the first 28 bytes of a journal file do not contain a well-formed +journal header, then the journal file shall be considered +invalid. + + +

    H32020: +If the journal file exists within the file-system and neither H32000 +, H32010 nor H33080 apply, then the journal file shall be considered valid. + + +

    H32030: +If there exists a valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 4-byte big-endian unsigned integer at byte +offset 24 of the journal file. + + +

    H32040: +If there exists a valid journal file in the file-system, then the +number of pages in the database image shall be the value stored as +a 4-byte big-endian unsigned integer at byte offset 24 of the +journal file. + + +

    H32050: +If there is no valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 2-byte big-endian unsigned integer at byte +offset 16 of the database file. + + +

    H32060: +If there is no valid journal file in the file-system, then the +number of pages in the database image shall be calculated by dividing +the size of the database file in bytes by the database page-size. + + +

    H32070: +If there exists a valid journal file in the file-system, then the +contents of each page of the database image for which there is a valid +journal record in the journal file shall be read from the +corresponding journal record. + + +

    H32080: +The contents of all database image pages for which there is no valid +journal record shall be read from the database file. + + +

    H32090: +A buffer of 28 bytes shall be considered a well-formed journal +header if it is not excluded by requirements H32180, H32190 or H32200. + + +

    H32180: +A buffer of 28 bytes shall only be considered a well-formed journal +header if the first eight bytes of the buffer contain the values 0xd9, +0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively. + + +

    H32190: +A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the sector size field (the 4-byte big-endian +unsigned integer at offset 20 of the buffer) contains a value that +is an integer power of two greater than 512. + + +

    H32200: +A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the page size field (the 4-byte big-endian +unsigned integer at offset 24 of the buffer) contains a value that +is an integer power of two greater than 512. + + +

    H32100: +A buffer of (8 + page size) bytes shall be considered a well-formed journal +record if it is not excluded by requirements H32110 or H32120. + + +

    H32110: +A journal record shall only be considered to be well-formed if the page number +field contains a value other than zero and the locking-page number, calculated +using the page size found in the first journal header of the journal file that +contains the journal record. + + +

    H32120: +A journal record shall only be considered to be well-formed if the checksum +field contains a value equal to the sum of the value stored in the +checksum-initializer field of the journal header that precedes the record +and the value stored in every 200th byte of the page data field, interpreted +as an 8-bit unsigned integer), starting with byte offset (page-size % 200) and +ending with the byte at byte offset (page-size - 200). + + +

    H32130: +A buffer shall be considered to contain a well-formed master journal pointer +record if it is not excluded from this category by requirements H32140, +H32150, H32160 or H32170. + + +

    H32140: +A buffer shall only be considered to be a well-formed master journal pointer +if the final eight bytes of the buffer contain the values 0xd9, 0xd5, 0x05, +0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively. + + +

    H32150: +A buffer shall only be considered to be a well-formed master journal pointer +if the size of the buffer in bytes is equal to the value stored as a 4-byte +big-endian unsigned integer starting 16 bytes before the end of the buffer. + + +

    H32160: +A buffer shall only be considered to be a well-formed master journal pointer +if the first four bytes of the buffer, interpreted as a big-endian unsigned +integer, contain the page number of the locking page (the value +(1 + 230 / page-size), where page-size is the value stored in +the page-size field of the first journal header of the journal file). + + +

    H32170: +A buffer shall only be considered to be a well-formed master journal pointer +if the value stored as a 4-byte big-endian integer starting 12 bytes before +the end of the buffer is equal to the sum of all bytes, each interpreted +as an 8-bit unsigned integer, starting at offset 4 of the buffer and continuing +until offset (buffer-size - 16) (the 17th last byte of the buffer). + + +

    H32210: +A buffer shall be considered to contain a well-formed journal section +if it is not excluded from this category by requirements H32220, H32230 or +H32240. + + +

    H32220: +A buffer shall only be considered to contain a well-formed journal section +if the first 28 bytes of it contain a well-formed journal header. + + +

    H32230: +A buffer shall only be considered to contain a well-formed journal section +if, beginning at byte offset sector-size, it contains a sequence of +record-count well-formed journal records. In this case sector-size and +record-count are the integer values stored in the sector size and record +count fields of the journal section's journal header. + + +

    H32240: +A buffer shall only be considered to contain a well-formed journal section +if it is an integer multiple of sector-size bytes in size, where sector-size +is the value stored in the sector size field of the journal section's journal +header. + + +

    H32250: +A journal record found within a valid journal file shall be considered a valid +journal record if it is not excluded from this category by requirement H32260, +H32270 or H32280. + + +

    H32260: +A journal record shall only be considered a valid journal record if it and any +other journal records that occur before it within the same journal section are +well-formed. + + +

    H32270: +A journal record shall only be considered a valid journal record if the journal +section to which it belongs begins with a well-formed journal header. + + +

    H32280: +A journal record shall only be considered a valid journal record if all journal +sections that occur before the journal section containing the journal record +are well-formed journal sections. + + +

    H32290: +Two database images shall be considered to be equivalent if they (a) have the +same page size, (b) contain the same number of pages and (c) the content of +each page of the first database image that is not a free-list leaf page is +the same as the content of the corresponding page in the second database image. + + +

    H32300: +If, while writing to an SQLite database file-system representation in +order to replace database image A with database image B, a failure that +should be handled gracefully occurs, then following recovery the database +file-system representation shall contain a database image equivalent to +either A or B. + + +

    H32320: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before the size of +the database file is modified, the first 28 bytes of the journal file contain a +stable valid journal header with the page-size and page-count fields set to +values corresponding to the original database image. + + +

    H32330: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that the first 28 bytes +of the journal file does not become unstable at any point after the size of the +database file is modified until the journal file is invalidated to commit the +transaction. + + +

    H32340: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before any part of +the database file that contained a page of the original database image that was +not a free-list leaf page is overwritten or made unstable the journal file +contains a valid and stable journal record containing the original page data. + + +

    H32350: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that after any part of +the database file that contained a page of the original database image that was +not a free-list leaf page has been overwritten or made unstable the corresponding +journal record (see H32340) is not modified or made unstable. + + +

    H32360: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before the database +file is truncated, the journal file contains stable valid journal records +corresponding to all pages of the original database image that were part of the +region being discarded by the truncate operation and were not free-list leaf +pages. + + +

    H32370: +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that after the database +file has been truncated the journal records corresponding to pages from the +original database image that were part of the truncated region and were not +free-list leaf pages are not modified or made unstable. + + +

    H33000: +Before reading from a database file , a database reader shall establish a +SHARED or greater lock on the database file-system representation. + + +

    H33010: +Before writing to a database file, a database writer shall establish +an EXCLUSIVE lock on the database file-system representation. + + +

    H33020: +Before writing to a journal file, a database writer shall establish +a RESERVED, PENDING or EXCLUSIVE lock on the database file-system +representation. + + +

    H33030: +Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that the database file contains a valid +database image. + + +

    H33060: +Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that any journal file that may be present +is not a valid journal file. + + +

    H33080: +If another database client holds either a RESERVED or PENDING lock on the +database file-system representation, then any journal file that exists within +the file system shall be considered invalid. + + +

    H33040: +A database writer shall increment the value of the database header change +counter field, a 4-byte big-endian unsigned integer field stored at byte offset 24 +of the database header, as part of the first database image modification +that it performs after obtaining an EXCLUSIVE lock. + + +

    H33050: +A database writer shall increment the value of the database schema version +field, a 4-byte big-endian unsigned integer field stored at byte offset 40 +of the database header, as part of the first database image modification that +includes a schema change that it performs after obtaining an EXCLUSIVE lock. + + +

    H33070: +If a database writer is required by either H33050 or H33040 to increment a +database header field, and that header field already contains the maximum +value possible (0xFFFFFFFF, or 4294967295 for 32-bit unsigned integer +fields), "incrementing" the field shall be interpreted to mean setting it to +zero. + + +

    H35010: +Except for the read operation required by H35070 and those reads made +as part of opening a read-only transaction, SQLite shall ensure that +a database connection has an open read-only or read/write +transaction when any data is read from the database file. + + +

    H35020: +Aside from those read operations described by H35070 and H21XXX, SQLite +shall read data from the database file in aligned blocks of +page-size bytes, where page-size is the database page size +used by the database file. + + +

    H35030: +While opening a read-only transaction, after successfully +obtaining a shared lock on the database file, SQLite shall +attempt to detect and roll back a hot journal file associated +with the same database file. + + +

    H35040: +Assuming no errors have occured, then after attempting to detect and +roll back a hot journal file, if the page cache contains +any entries associated with the current database connection, +then SQLite shall validate the contents of the page cache by +testing the file change counter. This procedure is known as +cache validiation. + + +

    H35050: +If the cache validiate procedure prescribed by H35040 is required and +does not prove that the page cache entries associated with the +current database connection are valid, then SQLite shall discard +all entries associated with the current database connection from +the page cache. + + +

    H35060: +When a new database connection is required, SQLite shall attempt +to open a file-handle on the database file. If the attempt fails, then +no new database connection is created and an error returned. + + +

    H35070: +When a new database connection is required, after opening the +new file-handle, SQLite shall attempt to read the first 100 bytes +of the database file. If the attempt fails for any other reason than +that the opened file is less than 100 bytes in size, then +the file-handle is closed, no new database connection is created +and an error returned instead. + + +

    H35080: +If the database file header is successfully read from a newly +opened database file, the connections expected page-size shall +be set to the value stored in the page-size field of the +database header. + + +

    H35090: +If the database file header cannot be read from a newly opened +database file (because the file is less than 100 bytes in size), the +connections expected page-size shall be set to the compile time +value of the SQLITE_DEFAULT_PAGESIZE option. + + +

    H35100: +When required to open a read-only transaction using a +database connection, SQLite shall first attempt to obtain +a shared-lock on the file-handle open on the database file. + + +

    H35110: +If, while opening a read-only transaction, SQLite fails to obtain +the shared-lock on the database file, then the process is +abandoned, no transaction is opened and an error returned to the user. + + +

    H35120: +If, while opening a read-only transaction, SQLite encounters +an error while attempting to detect or roll back a hot journal +file, then the shared-lock on the database file is released, +no transaction is opened and an error returned to the user. + + +

    H35130: +When required to end a read-only transaction, SQLite shall +relinquish the shared lock held on the database file by +calling the xUnlock() method of the file-handle. + + +

    H35140: +When required to attempt to detect a hot-journal file, SQLite +shall first use the xAccess() method of the VFS layer to check if a +journal file exists in the file-system. + + +

    H35150: +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file does +not exist, then SQLite shall conclude that there is no hot-journal +file in the file system and therefore that no hot journal +rollback is required. + + +

    H35160: +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file +is present, then the xCheckReservedLock() method of the database file +file-handle is invoked to determine whether or not some other +process is holding a reserved or greater lock on the database +file. + + +

    H35170: +If the call to xCheckReservedLock() required by H35160 indicates that +some other database connection is holding a reserved +or greater lock on the database file, then SQLite shall conclude that +there is no hot journal file. In this case the attempt to detect +a hot journal file is concluded. + + +

    H35180: +When a file-handle open on a database file is unlocked, if the +page cache contains one or more entries belonging to the +associated database connection, SQLite shall store the value +of the file change counter internally. + + +

    H35190: +When required to perform cache validation as part of opening +a read transaction, SQLite shall read a 16 byte block +starting at byte offset 24 of the database file using the xRead() +method of the database connections file handle. + + +

    H35200: +While performing cache validation, after loading the 16 byte +block as required by H35190, SQLite shall compare the 32-bit big-endian +integer stored in the first 4 bytes of the block to the most +recently stored value of the file change counter (see H35180). +If the values are not the same, then SQLite shall conclude that +the contents of the cache are invalid. + + +

    H35210: +During the conclusion of a read transaction, before unlocking +the database file, SQLite shall set the connections +expected page size to the current database page-size. + + +

    H35220: +As part of opening a new read transaction, immediately after +performing cache validation, if there is no data for database +page 1 in the page cache, SQLite shall read N bytes from +the start of the database file using the xRead() method of the +connections file handle, where N is the connections current +expected page size value. + + +

    H35230: +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is not the same as the +connections current expected page size, then the +expected page size is set to this value, the database file is +unlocked and the entire procedure to open a read transaction +is repeated. + + +

    H35240: +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is the same as the +connections current expected page size, then the block of data +read is stored in the page cache as page 1. + + +

    H35270: +When required to journal a database page, SQLite shall first +append the page number of the page being journalled to the +journal file, formatted as a 4-byte big-endian unsigned integer, +using a single call to the xWrite method of the file-handle opened +on the journal file. + + +

    H35280: +When required to journal a database page, if the attempt to +append the page number to the journal file is successful, +then the current page data (page-size bytes) shall be appended +to the journal file, using a single call to the xWrite method of the +file-handle opened on the journal file. + + +

    H35290: +When required to journal a database page, if the attempt to +append the current page data to the journal file is successful, +then SQLite shall append a 4-byte big-endian integer checksum value +to the to the journal file, using a single call to the xWrite method +of the file-handle opened on the journal file. + + +

    H35300: +The checksum value written to the journal file by the write +required by H35290 shall be equal to the sum of the checksum +initializer field stored in the journal header (H35700) and +every 200th byte of the page data, beginning with the +(page-size % 200)th byte. + + +

    H35350: +When required to open a write transaction on the database, +SQLite shall first open a read transaction, if the database +connection in question has not already opened one. + + +

    H35360: +When required to open a write transaction on the database, after +ensuring a read transaction has already been opened, SQLite +shall obtain a reserved lock on the database file by calling +the xLock method of the file-handle open on the database file. + + +

    H35370: +When required to open a write transaction on the database, after +obtaining a reserved lock on the database file, SQLite shall +open a read/write file-handle on the corresponding journal file. + + +

    H35380: +When required to open a write transaction on the database, after +opening a file-handle on the journal file, SQLite shall append +a journal header to the (currently empty) journal file. + + +

    H35400: +When a database connection is closed, SQLite shall close the +associated file handle at the VFS level. + + +

    H35420: +SQLite shall ensure that a database connection has an open +read-only or read/write transaction before using data stored in the page +cache to satisfy user queries. + + +

    H35430: +When a database connection is closed, all associated page +cache entries shall be discarded. + + +

    H35440: +If while attempting to detect a hot-journal file the call to +xCheckReservedLock() indicates that no process holds a reserved +or greater lock on the database file, then SQLite shall open +a file handle on the potentially hot journal file using the VFS xOpen() +method. + + +

    H35450: +After successfully opening a file-handle on a potentially hot journal +file, SQLite shall query the file for its size in bytes using the +xFileSize() method of the open file handle. + + +

    H35460: +If the size of a potentially hot journal file is revealed to be zero +bytes by a query required by H35450, then SQLite shall close the +file handle opened on the journal file and delete the journal file using +a call to the VFS xDelete() method. In this case SQLite shall conclude +that there is no hot journal file. + + +

    H35470: +If the size of a potentially hot journal file is revealed to be greater +than zero bytes by a query required by H35450, then SQLite shall attempt +to upgrade the shared lock held by the database connection +on the database file directly to an exclusive lock. + + +

    H35480: +If an attempt to upgrade to an exclusive lock prescribed by +H35470 fails for any reason, then SQLite shall release all locks held by +the database connection and close the file handle opened on the +journal file. The attempt to open a read-only transaction +shall be deemed to have failed and an error returned to the user. + + +

    H35490: +If, as part of the hot journal file detection process, the +attempt to upgrade to an exclusive lock mandated by H35470 is +successful, then SQLite shall query the file-system using the xAccess() +method of the VFS implementation to test whether or not the journal +file is still present in the file-system. + + +

    H35500: +If the xAccess() query required by H35490 reveals that the journal +file is still present in the file system, then SQLite shall conclude +that the journal file is a hot journal file that needs to +be rolled back. SQLite shall immediately begin hot journal +rollback. + + +

    H35510: +If the call to xAccess() required by H35140 fails (due to an IO error or +similar), then SQLite shall abandon the attempt to open a read-only +transaction, relinquish the shared lock held on the database +file and return an error to the user. + + +

    H35520: +If the call to xCheckReservedLock() required by H35160 fails (due to an +IO or other internal VFS error), then SQLite shall abandon the attempt +to open a read-only transaction, relinquish the shared lock +held on the database file and return an error to the user. + + +

    H35530: +If the call to xOpen() required by H35440 fails (due to an IO or other +internal VFS error), then SQLite shall abandon the attempt to open a +read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. + + +

    H35540: +If the call to xFileSize() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file, close the file handle opened on the journal file and +return an error to the user. + + +

    H35550: +If the call to xDelete() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. + + +

    H35560: +If the call to xAccess() required by H35490 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the lock held on the +database file, close the file handle opened on the journal file and +return an error to the user. + + +

    H35570: +If the call to xAccess() required by H35490 reveals that the journal +file is no longer present in the file system, then SQLite shall abandon +the attempt to open a read-only transaction, relinquish the +lock held on the database file, close the file handle opened on the +journal file and return an SQLITE_BUSY error to the user. + + +

    H35580: +If an attempt to acquire a reserved lock prescribed by +requirement H35360 fails, then SQLite shall deem the attempt to +open a write transaction to have failed and return an error +to the user. + + +

    H35590: +When required to modify the contents of an existing database page that +existed and was not a free-list leaf page when the write +transaction was opened, SQLite shall journal the page if it has not +already been journalled within the current write transaction. + + +

    H35600: +When required to modify the contents of an existing database page, +SQLite shall update the cached version of the database page content +stored as part of the page cache entry associated with the page. + + +

    H35610: +When required to append a new database page to the database file, +SQLite shall create a new page cache entry corresponding to +the new page and insert it into the page cache. The dirty +flag of the new page cache entry shall be set. + + +

    H35620: +When required to truncate (remove) a database page that existed and was +not a free-list leaf page when the write transaction was +opened from the end of a database file, SQLite shall journal the page if +it has not already been journalled within the current write +transaction. + + +

    H35630: +When required to truncate a database page from the end of the database +file, SQLite shall discard the associated page cache entry +from the page cache. + + +

    H35640: +When required to purge a non-writable dirty page from the +page cache, SQLite shall sync the journal file before +proceding with the write operation required by H35670. + + +

    H35660: +After syncing the journal file as required by H35640, SQLite +shall append a new journal header to the journal file +before proceding with the write operation required by H35670. + + +

    H35670: +When required to purge a page cache entry that is a +dirty page SQLite shall write the page data into the database +file, using a single call to the xWrite method of the database +connection file handle. + + +

    H35680: +When required to append a journal header to the journal +file, SQLite shall do so by writing a block of sector-size +bytes using a single call to the xWrite method of the file-handle +open on the journal file. The block of data written shall begin +at the smallest sector-size aligned offset at or following the current +end of the journal file. + + +

    H35690: +The first 8 bytes of the journal header required to be written +by H35680 shall contain the following values, in order from byte offset 0 +to 7: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63 and 0xd7. + + +

    H35700: +Bytes 8-11 of the journal header required to be written by +H35680 shall contain 0x00. + + +

    H35710: +Bytes 12-15 of the journal header required to be written by +H35680 shall contain the number of pages that the database file +contained when the current write-transaction was started, +formatted as a 4-byte big-endian unsigned integer. + + +

    H35720: +Bytes 16-19 of the journal header required to be written by +H35680 shall contain pseudo-randomly generated values. + + +

    H35730: +Bytes 20-23 of the journal header required to be written by +H35680 shall contain the sector size used by the VFS layer, +formatted as a 4-byte big-endian unsigned integer. + + +

    H35740: +Bytes 24-27 of the journal header required to be written by +H35680 shall contain the page size used by the database at +the start of the write transaction, formatted as a 4-byte +big-endian unsigned integer. + + +

    H35750: +When required to sync the journal file, SQLite shall invoke the +xSync method of the file handle open on the journal file. + + +

    H35760: +When required to sync the journal file, after invoking the +xSync method as required by H35750, SQLite shall update the record +count of the journal header most recently written to the +journal file. The 4-byte field shall be updated to contain +the number of journal records that have been written to the +journal file since the journal header was written, +formatted as a 4-byte big-endian unsigned integer. + + +

    H35770: +When required to sync the journal file, after updating the +record count field of a journal header as required by +H35760, SQLite shall invoke the xSync method of the file handle open +on the journal file. + + +

    H35780: +When required to upgrade to an exclusive lock as part of a write +transaction, SQLite shall first attempt to obtain a pending lock +on the database file if one is not already held by invoking the xLock +method of the file handle opened on the database file. + + +

    H35790: +When required to upgrade to an exclusive lock as part of a write +transaction, after successfully obtaining a pending lock SQLite +shall attempt to obtain an exclusive lock by invoking the +xLock method of the file handle opened on the database file. + + +

    H35800: +When required to commit a write-transaction, SQLite shall +modify page 1 to increment the value stored in the change counter +field of the database file header. + + +

    H35810: +When required to commit a write-transaction, after incrementing +the change counter field, SQLite shall sync the journal +file. + + +

    H35820: +When required to commit a write-transaction, after syncing +the journal file as required by H35810, if an exclusive lock +on the database file is not already held, SQLite shall attempt to +upgrade to an exclusive lock. + + +

    H35830: +When required to commit a write-transaction, after syncing +the journal file as required by H35810 and ensuring that an +exclusive lock is held on the database file as required by +H35830, SQLite shall copy the contents of all dirty page +stored in the page cache into the database file using +calls to the xWrite method of the database connection file +handle. Each call to xWrite shall write the contents of a single +dirty page (page-size bytes of data) to the database +file. Dirty pages shall be written in order of page number, +from lowest to highest. + + +

    H35840: +When required to commit a write-transaction, after copying the +contents of any dirty pages to the database file as required +by H35830, SQLite shall sync the database file by invoking the xSync +method of the database connection file handle. + + +

    H35850: +When required to commit a write-transaction, after syncing +the database file as required by H35840, SQLite shall close the +file-handle opened on the journal file and delete the +journal file from the file system via a call to the VFS +xDelete method. + + +

    H35860: +When required to commit a write-transaction, after deleting +the journal file as required by H35850, SQLite shall relinquish +all locks held on the database file by invoking the xUnlock +method of the database connection file handle. + + +


    +This page last modified 2009/02/19 14:35:32 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/hlr40000.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/hlr40000.html --- sqlite3-3.4.2/www/hlr40000.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/hlr40000.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,1129 @@ + + +SQLite SQL Language Specification + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +6.0 SQL Language Specification +

    + +

    +This document details the requirements for the SQL langauge understood +by SQLite. +

    +
    + + + +

    H41010: +SQLite shall divide input SQL text into tokens working from left to +right. + + +

    H41020: +At each step in the SQL tokenization process, SQLite shall extract +the longest possible token from the remaining input text. + + +

    H41030: +The tokenizer shall pass each non-WHITESPACE token seen on to the +parser in the order in which the tokens are seen. + + +

    H41040: +When the tokenizer reaches the end of input where the last token sent +to the parser was not a SEMI token, it shall +send a SEMI token to the parser. + + +

    H41050: +When the tokenizer encounters text that is not a valid token, it shall +cause an error to be returned to the application. + + +

    H41100: +SQLite shall recognize a sequence of one or more WHITESPACE characters +as a WHITESPACE token. + + +

    H41110: +SQLite shall recognize as a WHITESPACE token the two-character sequence "--" +(u002d, u002d) followed by any sequence of non-zero characters up through and +including the first u000a character or until end of input. + + +

    H41120: +SQLite shall recognize as a WHITESPACE token the two-character sequence "/*" +(u002f, u002a) followed by any sequence of zero or more +non-zero characters through with the first "*/" (u002a, u002f) sequence or +until end of input. + + +

    H41130: +SQLite shall recognize as an ID token +any sequence of characters that begins with +an ALPHABETIC character and continue with zero or more +ALPHANUMERIC characters and/or "$" (u0024) characters and which is +not a keyword token. + + +

    H41140: +SQLite shall recognize as an ID token +any sequence of non-zero characters that begins with "[" (u005b) and +continuing through the first "]" (u005d) character. + + +

    H41150: +SQLite shall recognize as an ID token +any sequence of characters +that begins with a double-quote (u0022), is followed by zero or +more non-zero characters and/or pairs of double-quotes (u0022) +and terminates with a double-quote (u0022) that +is not part of a pair. + + +

    H41160: +SQLite shall recognize as an ID token +any sequence of characters +that begins with a grave accent (u0060), is followed by zero or +more non-zero characters and/or pairs ofgrave accents (u0060) +and terminates with a grave accent (u0022) that +is not part of a pair. + + +

    H41200: +SQLite shall recognize as a STRING token a sequence of characters +that begins with a single-quote (u0027), is followed by zero or +more non-zero characters and/or pairs of single-quotes (u0027) +and terminates with a single-quote (u0027) that +is not part of a pair. + + +

    H41210: +SQLite shall recognize as a BLOB token an upper or lower-case "X" +(u0058 or u0078) followed by a single-quote (u0027) followed by +a number of HEXADECIMAL character that is a multiple of two and +terminated by a single-quote (u0027). + + +

    H41220: +SQLite shall recognize as an INTEGER token any squence of +one or more NUMERIC characters. + + +

    H41230: +SQLite shall recognize as a FLOAT token a sequence of one +or more NUMERIC characters together with zero or one period +(u002e) and followed by an exponentiation suffix. + + +

    H41240: +SQLite shall recognize as a FLOAT token a sequence of one +or more NUMERIC characters that includes exactly one period +(u002e) character. + + +

    H41403: +SQLite shall recognize the 1-character sequenence "-" (u002d) as token MINUS + + +

    H41406: +SQLite shall recognize the 1-character sequenence "(" (u0028) as token LP + + +

    H41409: +SQLite shall recognize the 1-character sequenence ")" (u0029) as token RP + + +

    H41412: +SQLite shall recognize the 1-character sequenence ";" (u003b) as token SEMI + + +

    H41415: +SQLite shall recognize the 1-character sequenence "+" (u002b) as token PLUS + + +

    H41418: +SQLite shall recognize the 1-character sequenence "*" (u002a) as token STAR + + +

    H41421: +SQLite shall recognize the 1-character sequenence "/" (u002f) as token SLASH + + +

    H41424: +SQLite shall recognize the 1-character sequenence "%" (u0025) as token REM + + +

    H41427: +SQLite shall recognize the 1-character sequenence "=" (u003d) as token EQ + + +

    H41430: +SQLite shall recognize the 2-character sequenence "==" (u003d u003d) as token EQ + + +

    H41433: +SQLite shall recognize the 2-character sequenence "<=" (u003c u003d) as token LE + + +

    H41436: +SQLite shall recognize the 2-character sequenence "<>" (u003c u003e) as token NE + + +

    H41439: +SQLite shall recognize the 2-character sequenence "<<" (u003c u003c) as token LSHIFT + + +

    H41442: +SQLite shall recognize the 1-character sequenence "<" (u003c) as token LT + + +

    H41445: +SQLite shall recognize the 2-character sequenence ">=" (u003e u003d) as token GE + + +

    H41448: +SQLite shall recognize the 2-character sequenence ">>" (u003e u003e) as token RSHIFT + + +

    H41451: +SQLite shall recognize the 1-character sequenence ">" (u003e) as token GT + + +

    H41454: +SQLite shall recognize the 2-character sequenence "!=" (u0021 u003d) as token NE + + +

    H41457: +SQLite shall recognize the 1-character sequenence "," (u002c) as token COMMA + + +

    H41460: +SQLite shall recognize the 1-character sequenence "&" (u0026) as token BITAND + + +

    H41463: +SQLite shall recognize the 1-character sequenence "~" (u007e) as token BITNOT + + +

    H41466: +SQLite shall recognize the 1-character sequenence "|" (u007c) as token BITOR + + +

    H41469: +SQLite shall recognize the 2-character sequenence "||" (u007c u007c) as token CONCAT + + +

    H41472: +SQLite shall recognize the 1-character sequenence "." (u002e) as token DOT + + +

    H41503: +SQLite shall recognize the 5-character sequenence "ABORT" in any combination of upper and lower case letters as the keyword token ABORT. + + +

    H41506: +SQLite shall recognize the 3-character sequenence "ADD" in any combination of upper and lower case letters as the keyword token ADD. + + +

    H41509: +SQLite shall recognize the 5-character sequenence "AFTER" in any combination of upper and lower case letters as the keyword token AFTER. + + +

    H41512: +SQLite shall recognize the 3-character sequenence "ALL" in any combination of upper and lower case letters as the keyword token ALL. + + +

    H41515: +SQLite shall recognize the 5-character sequenence "ALTER" in any combination of upper and lower case letters as the keyword token ALTER. + + +

    H41518: +SQLite shall recognize the 7-character sequenence "ANALYZE" in any combination of upper and lower case letters as the keyword token ANALYZE. + + +

    H41521: +SQLite shall recognize the 3-character sequenence "AND" in any combination of upper and lower case letters as the keyword token AND. + + +

    H41524: +SQLite shall recognize the 2-character sequenence "AS" in any combination of upper and lower case letters as the keyword token AS. + + +

    H41527: +SQLite shall recognize the 3-character sequenence "ASC" in any combination of upper and lower case letters as the keyword token ASC. + + +

    H41530: +SQLite shall recognize the 6-character sequenence "ATTACH" in any combination of upper and lower case letters as the keyword token ATTACH. + + +

    H41533: +SQLite shall recognize the 13-character sequenence "AUTOINCREMENT" in any combination of upper and lower case letters as the keyword token AUTOINCR. + + +

    H41536: +SQLite shall recognize the 6-character sequenence "BEFORE" in any combination of upper and lower case letters as the keyword token BEFORE. + + +

    H41539: +SQLite shall recognize the 5-character sequenence "BEGIN" in any combination of upper and lower case letters as the keyword token BEGIN. + + +

    H41542: +SQLite shall recognize the 7-character sequenence "BETWEEN" in any combination of upper and lower case letters as the keyword token BETWEEN. + + +

    H41545: +SQLite shall recognize the 2-character sequenence "BY" in any combination of upper and lower case letters as the keyword token BY. + + +

    H41548: +SQLite shall recognize the 7-character sequenence "CASCADE" in any combination of upper and lower case letters as the keyword token CASCADE. + + +

    H41551: +SQLite shall recognize the 4-character sequenence "CASE" in any combination of upper and lower case letters as the keyword token CASE. + + +

    H41554: +SQLite shall recognize the 4-character sequenence "CAST" in any combination of upper and lower case letters as the keyword token CAST. + + +

    H41557: +SQLite shall recognize the 5-character sequenence "CHECK" in any combination of upper and lower case letters as the keyword token CHECK. + + +

    H41560: +SQLite shall recognize the 7-character sequenence "COLLATE" in any combination of upper and lower case letters as the keyword token COLLATE. + + +

    H41563: +SQLite shall recognize the 6-character sequenence "COLUMN" in any combination of upper and lower case letters as the keyword token COLUMNKW. + + +

    H41566: +SQLite shall recognize the 6-character sequenence "COMMIT" in any combination of upper and lower case letters as the keyword token COMMIT. + + +

    H41569: +SQLite shall recognize the 8-character sequenence "CONFLICT" in any combination of upper and lower case letters as the keyword token CONFLICT. + + +

    H41572: +SQLite shall recognize the 10-character sequenence "CONSTRAINT" in any combination of upper and lower case letters as the keyword token CONSTRAINT. + + +

    H41575: +SQLite shall recognize the 6-character sequenence "CREATE" in any combination of upper and lower case letters as the keyword token CREATE. + + +

    H41578: +SQLite shall recognize the 5-character sequenence "CROSS" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41581: +SQLite shall recognize the 12-character sequenence "CURRENT_DATE" in any combination of upper and lower case letters as the keyword token CTIME_KW. + + +

    H41584: +SQLite shall recognize the 12-character sequenence "CURRENT_TIME" in any combination of upper and lower case letters as the keyword token CTIME_KW. + + +

    H41587: +SQLite shall recognize the 17-character sequenence "CURRENT_TIMESTAMP" in any combination of upper and lower case letters as the keyword token CTIME_KW. + + +

    H41590: +SQLite shall recognize the 8-character sequenence "DATABASE" in any combination of upper and lower case letters as the keyword token DATABASE. + + +

    H41593: +SQLite shall recognize the 7-character sequenence "DEFAULT" in any combination of upper and lower case letters as the keyword token DEFAULT. + + +

    H41596: +SQLite shall recognize the 8-character sequenence "DEFERRED" in any combination of upper and lower case letters as the keyword token DEFERRED. + + +

    H41599: +SQLite shall recognize the 10-character sequenence "DEFERRABLE" in any combination of upper and lower case letters as the keyword token DEFERRABLE. + + +

    H41602: +SQLite shall recognize the 6-character sequenence "DELETE" in any combination of upper and lower case letters as the keyword token DELETE. + + +

    H41605: +SQLite shall recognize the 4-character sequenence "DESC" in any combination of upper and lower case letters as the keyword token DESC. + + +

    H41608: +SQLite shall recognize the 6-character sequenence "DETACH" in any combination of upper and lower case letters as the keyword token DETACH. + + +

    H41611: +SQLite shall recognize the 8-character sequenence "DISTINCT" in any combination of upper and lower case letters as the keyword token DISTINCT. + + +

    H41614: +SQLite shall recognize the 4-character sequenence "DROP" in any combination of upper and lower case letters as the keyword token DROP. + + +

    H41617: +SQLite shall recognize the 3-character sequenence "END" in any combination of upper and lower case letters as the keyword token END. + + +

    H41620: +SQLite shall recognize the 4-character sequenence "EACH" in any combination of upper and lower case letters as the keyword token EACH. + + +

    H41623: +SQLite shall recognize the 4-character sequenence "ELSE" in any combination of upper and lower case letters as the keyword token ELSE. + + +

    H41626: +SQLite shall recognize the 6-character sequenence "ESCAPE" in any combination of upper and lower case letters as the keyword token ESCAPE. + + +

    H41629: +SQLite shall recognize the 6-character sequenence "EXCEPT" in any combination of upper and lower case letters as the keyword token EXCEPT. + + +

    H41632: +SQLite shall recognize the 9-character sequenence "EXCLUSIVE" in any combination of upper and lower case letters as the keyword token EXCLUSIVE. + + +

    H41635: +SQLite shall recognize the 6-character sequenence "EXISTS" in any combination of upper and lower case letters as the keyword token EXISTS. + + +

    H41638: +SQLite shall recognize the 7-character sequenence "EXPLAIN" in any combination of upper and lower case letters as the keyword token EXPLAIN. + + +

    H41641: +SQLite shall recognize the 4-character sequenence "FAIL" in any combination of upper and lower case letters as the keyword token FAIL. + + +

    H41644: +SQLite shall recognize the 3-character sequenence "FOR" in any combination of upper and lower case letters as the keyword token FOR. + + +

    H41647: +SQLite shall recognize the 7-character sequenence "FOREIGN" in any combination of upper and lower case letters as the keyword token FOREIGN. + + +

    H41650: +SQLite shall recognize the 4-character sequenence "FROM" in any combination of upper and lower case letters as the keyword token FROM. + + +

    H41653: +SQLite shall recognize the 4-character sequenence "FULL" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41656: +SQLite shall recognize the 4-character sequenence "GLOB" in any combination of upper and lower case letters as the keyword token LIKE_KW. + + +

    H41659: +SQLite shall recognize the 5-character sequenence "GROUP" in any combination of upper and lower case letters as the keyword token GROUP. + + +

    H41662: +SQLite shall recognize the 6-character sequenence "HAVING" in any combination of upper and lower case letters as the keyword token HAVING. + + +

    H41665: +SQLite shall recognize the 2-character sequenence "IF" in any combination of upper and lower case letters as the keyword token IF. + + +

    H41668: +SQLite shall recognize the 6-character sequenence "IGNORE" in any combination of upper and lower case letters as the keyword token IGNORE. + + +

    H41671: +SQLite shall recognize the 9-character sequenence "IMMEDIATE" in any combination of upper and lower case letters as the keyword token IMMEDIATE. + + +

    H41674: +SQLite shall recognize the 2-character sequenence "IN" in any combination of upper and lower case letters as the keyword token IN. + + +

    H41677: +SQLite shall recognize the 5-character sequenence "INDEX" in any combination of upper and lower case letters as the keyword token INDEX. + + +

    H41680: +SQLite shall recognize the 9-character sequenence "INITIALLY" in any combination of upper and lower case letters as the keyword token INITIALLY. + + +

    H41683: +SQLite shall recognize the 5-character sequenence "INNER" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41686: +SQLite shall recognize the 6-character sequenence "INSERT" in any combination of upper and lower case letters as the keyword token INSERT. + + +

    H41689: +SQLite shall recognize the 7-character sequenence "INSTEAD" in any combination of upper and lower case letters as the keyword token INSTEAD. + + +

    H41692: +SQLite shall recognize the 9-character sequenence "INTERSECT" in any combination of upper and lower case letters as the keyword token INTERSECT. + + +

    H41695: +SQLite shall recognize the 4-character sequenence "INTO" in any combination of upper and lower case letters as the keyword token INTO. + + +

    H41698: +SQLite shall recognize the 2-character sequenence "IS" in any combination of upper and lower case letters as the keyword token IS. + + +

    H41701: +SQLite shall recognize the 6-character sequenence "ISNULL" in any combination of upper and lower case letters as the keyword token ISNULL. + + +

    H41704: +SQLite shall recognize the 4-character sequenence "JOIN" in any combination of upper and lower case letters as the keyword token JOIN. + + +

    H41707: +SQLite shall recognize the 3-character sequenence "KEY" in any combination of upper and lower case letters as the keyword token KEY. + + +

    H41710: +SQLite shall recognize the 4-character sequenence "LEFT" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41713: +SQLite shall recognize the 4-character sequenence "LIKE" in any combination of upper and lower case letters as the keyword token LIKE_KW. + + +

    H41716: +SQLite shall recognize the 5-character sequenence "LIMIT" in any combination of upper and lower case letters as the keyword token LIMIT. + + +

    H41719: +SQLite shall recognize the 5-character sequenence "MATCH" in any combination of upper and lower case letters as the keyword token MATCH. + + +

    H41722: +SQLite shall recognize the 7-character sequenence "NATURAL" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41725: +SQLite shall recognize the 3-character sequenence "NOT" in any combination of upper and lower case letters as the keyword token NOT. + + +

    H41728: +SQLite shall recognize the 7-character sequenence "NOTNULL" in any combination of upper and lower case letters as the keyword token NOTNULL. + + +

    H41731: +SQLite shall recognize the 4-character sequenence "NULL" in any combination of upper and lower case letters as the keyword token NULL. + + +

    H41734: +SQLite shall recognize the 2-character sequenence "OF" in any combination of upper and lower case letters as the keyword token OF. + + +

    H41737: +SQLite shall recognize the 6-character sequenence "OFFSET" in any combination of upper and lower case letters as the keyword token OFFSET. + + +

    H41740: +SQLite shall recognize the 2-character sequenence "ON" in any combination of upper and lower case letters as the keyword token ON. + + +

    H41743: +SQLite shall recognize the 2-character sequenence "OR" in any combination of upper and lower case letters as the keyword token OR. + + +

    H41746: +SQLite shall recognize the 5-character sequenence "ORDER" in any combination of upper and lower case letters as the keyword token ORDER. + + +

    H41749: +SQLite shall recognize the 5-character sequenence "OUTER" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41752: +SQLite shall recognize the 4-character sequenence "PLAN" in any combination of upper and lower case letters as the keyword token PLAN. + + +

    H41755: +SQLite shall recognize the 6-character sequenence "PRAGMA" in any combination of upper and lower case letters as the keyword token PRAGMA. + + +

    H41758: +SQLite shall recognize the 7-character sequenence "PRIMARY" in any combination of upper and lower case letters as the keyword token PRIMARY. + + +

    H41761: +SQLite shall recognize the 5-character sequenence "QUERY" in any combination of upper and lower case letters as the keyword token QUERY. + + +

    H41764: +SQLite shall recognize the 5-character sequenence "RAISE" in any combination of upper and lower case letters as the keyword token RAISE. + + +

    H41767: +SQLite shall recognize the 10-character sequenence "REFERENCES" in any combination of upper and lower case letters as the keyword token REFERENCES. + + +

    H41770: +SQLite shall recognize the 6-character sequenence "REGEXP" in any combination of upper and lower case letters as the keyword token LIKE_KW. + + +

    H41773: +SQLite shall recognize the 7-character sequenence "REINDEX" in any combination of upper and lower case letters as the keyword token REINDEX. + + +

    H41776: +SQLite shall recognize the 6-character sequenence "RENAME" in any combination of upper and lower case letters as the keyword token RENAME. + + +

    H41779: +SQLite shall recognize the 7-character sequenence "REPLACE" in any combination of upper and lower case letters as the keyword token REPLACE. + + +

    H41782: +SQLite shall recognize the 8-character sequenence "RESTRICT" in any combination of upper and lower case letters as the keyword token RESTRICT. + + +

    H41785: +SQLite shall recognize the 5-character sequenence "RIGHT" in any combination of upper and lower case letters as the keyword token JOIN_KW. + + +

    H41788: +SQLite shall recognize the 8-character sequenence "ROLLBACK" in any combination of upper and lower case letters as the keyword token ROLLBACK. + + +

    H41791: +SQLite shall recognize the 3-character sequenence "ROW" in any combination of upper and lower case letters as the keyword token ROW. + + +

    H41794: +SQLite shall recognize the 6-character sequenence "SELECT" in any combination of upper and lower case letters as the keyword token SELECT. + + +

    H41797: +SQLite shall recognize the 3-character sequenence "SET" in any combination of upper and lower case letters as the keyword token SET. + + +

    H41800: +SQLite shall recognize the 5-character sequenence "TABLE" in any combination of upper and lower case letters as the keyword token TABLE. + + +

    H41803: +SQLite shall recognize the 4-character sequenence "TEMP" in any combination of upper and lower case letters as the keyword token TEMP. + + +

    H41806: +SQLite shall recognize the 9-character sequenence "TEMPORARY" in any combination of upper and lower case letters as the keyword token TEMP. + + +

    H41809: +SQLite shall recognize the 4-character sequenence "THEN" in any combination of upper and lower case letters as the keyword token THEN. + + +

    H41812: +SQLite shall recognize the 2-character sequenence "TO" in any combination of upper and lower case letters as the keyword token TO. + + +

    H41815: +SQLite shall recognize the 11-character sequenence "TRANSACTION" in any combination of upper and lower case letters as the keyword token TRANSACTION. + + +

    H41818: +SQLite shall recognize the 7-character sequenence "TRIGGER" in any combination of upper and lower case letters as the keyword token TRIGGER. + + +

    H41821: +SQLite shall recognize the 5-character sequenence "UNION" in any combination of upper and lower case letters as the keyword token UNION. + + +

    H41824: +SQLite shall recognize the 6-character sequenence "UNIQUE" in any combination of upper and lower case letters as the keyword token UNIQUE. + + +

    H41827: +SQLite shall recognize the 6-character sequenence "UPDATE" in any combination of upper and lower case letters as the keyword token UPDATE. + + +

    H41830: +SQLite shall recognize the 5-character sequenence "USING" in any combination of upper and lower case letters as the keyword token USING. + + +

    H41833: +SQLite shall recognize the 6-character sequenence "VACUUM" in any combination of upper and lower case letters as the keyword token VACUUM. + + +

    H41836: +SQLite shall recognize the 6-character sequenence "VALUES" in any combination of upper and lower case letters as the keyword token VALUES. + + +

    H41839: +SQLite shall recognize the 4-character sequenence "VIEW" in any combination of upper and lower case letters as the keyword token VIEW. + + +

    H41842: +SQLite shall recognize the 7-character sequenence "VIRTUAL" in any combination of upper and lower case letters as the keyword token VIRTUAL. + + +

    H41845: +SQLite shall recognize the 4-character sequenence "WHEN" in any combination of upper and lower case letters as the keyword token WHEN. + + +

    H41848: +SQLite shall recognize the 5-character sequenence "WHERE" in any combination of upper and lower case letters as the keyword token WHERE. + + +

    H41900: +The preparation of an SQL statement that is not accepted by +the SQLite parser shall fail with an error. + + +

    H41910: +SQLite shall use the built-in NOCASE collating sequence when comparing +identifiers and datatype names within SQL statements during +statement preparation. + + +

    H41920: +A token received by the parser shall be converted into an ID token +if the original token value would have resulted in a syntax error, +a token value of ID will allow the parse to continue, +and if the original token value was one of: +ABORT +AFTER +ANALYZE +ASC +ATTACH +BEFORE +BEGIN +CASCADE +CAST +CONFLICT +CTIME_KW +DATABASE +DEFERRED +DESC +DETACH +EACH +END +EXCEPT +EXCLUSIVE +EXPLAIN +FAIL +FOR +IF +IGNORE +IMMEDIATE +INITIALLY +INSTEAD +INTERSECT +KEY +LIKE_KW +MATCH +OF +OFFSET +PLAN +PRAGMA +QUERY +RAISE +REINDEX +RENAME +REPLACE +RESTRICT +ROW +TEMP +TRIGGER +UNION +VACUUM +VIEW +VIRTUAL + + +

    H41930: +A token received by the parser shall be converted into an ANY token +if the original token value would have resulted in a syntax error +and if a token value of ANY will allow the parse to continue. + + +

    H42000: +In the absence of semantic or other errors, the SQLite parser shall +accept a "sql-stmt-list" that conforms to the following syntax: +

    + + +

    H42100: +In the absence of semantic or other errors, the SQLite parser shall +accept a "sql-stmt" that conforms to the following syntax: +

    + + +

    H42200: +In the absence of semantic or other errors, the SQLite parser shall +accept a "alter-table-stmt" that conforms to the following syntax: +

    + + +

    H42300: +In the absence of semantic or other errors, the SQLite parser shall +accept a "analyze-stmt" that conforms to the following syntax: +

    + + +

    H42400: +In the absence of semantic or other errors, the SQLite parser shall +accept a "attach-stmt" that conforms to the following syntax: +

    + + +

    H42500: +In the absence of semantic or other errors, the SQLite parser shall +accept a "begin-stmt" that conforms to the following syntax: +

    + + +

    H42600: +In the absence of semantic or other errors, the SQLite parser shall +accept a "commit-stmt" that conforms to the following syntax: +

    + + +

    H42700: +In the absence of semantic or other errors, the SQLite parser shall +accept a "rollback-stmt" that conforms to the following syntax: +

    + + +

    H42800: +In the absence of semantic or other errors, the SQLite parser shall +accept a "savepoint-stmt" that conforms to the following syntax: +

    + + +

    H42900: +In the absence of semantic or other errors, the SQLite parser shall +accept a "release-stmt" that conforms to the following syntax: +

    + + +

    H43000: +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-index-stmt" that conforms to the following syntax: +

    + + +

    H43100: +In the absence of semantic or other errors, the SQLite parser shall +accept a "indexed-column" that conforms to the following syntax: +

    + + +

    H43200: +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-table-stmt" that conforms to the following syntax: +

    + + +

    H43300: +In the absence of semantic or other errors, the SQLite parser shall +accept a "column-def" that conforms to the following syntax: +

    + + +

    H43400: +In the absence of semantic or other errors, the SQLite parser shall +accept a "type-name" that conforms to the following syntax: +

    + + +

    H43500: +In the absence of semantic or other errors, the SQLite parser shall +accept a "column-constraint" that conforms to the following syntax: +

    + + +

    H43600: +In the absence of semantic or other errors, the SQLite parser shall +accept a "signed-number" that conforms to the following syntax: +

    + + +

    H43700: +In the absence of semantic or other errors, the SQLite parser shall +accept a "table-constraint" that conforms to the following syntax: +

    + + +

    H43800: +In the absence of semantic or other errors, the SQLite parser shall +accept a "foreign-key-clause" that conforms to the following syntax: +

    + + +

    H43900: +In the absence of semantic or other errors, the SQLite parser shall +accept a "conflict-clause" that conforms to the following syntax: +

    + + +

    H44000: +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-trigger-stmt" that conforms to the following syntax: +

    + + +

    H44100: +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-view-stmt" that conforms to the following syntax: +

    + + +

    H44200: +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-virtual-table-stmt" that conforms to the following syntax: +

    + + +

    H44300: +In the absence of semantic or other errors, the SQLite parser shall +accept a "delete-stmt" that conforms to the following syntax: +

    + + +

    H44400: +In the absence of semantic or other errors, the SQLite parser shall +accept a "delete-stmt-limited" that conforms to the following syntax: +

    + + +

    H44500: +In the absence of semantic or other errors, the SQLite parser shall +accept a "detach-stmt" that conforms to the following syntax: +

    + + +

    H44600: +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-index-stmt" that conforms to the following syntax: +

    + + +

    H44700: +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-table-stmt" that conforms to the following syntax: +

    + + +

    H44800: +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-trigger-stmt" that conforms to the following syntax: +

    + + +

    H44900: +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-view-stmt" that conforms to the following syntax: +

    + + +

    H45000: +In the absence of semantic or other errors, the SQLite parser shall +accept a "expr" that conforms to the following syntax: +

    + + +

    H45100: +In the absence of semantic or other errors, the SQLite parser shall +accept a "raise-function" that conforms to the following syntax: +

    + + +

    H45200: +In the absence of semantic or other errors, the SQLite parser shall +accept a "literal-value" that conforms to the following syntax: +

    + + +

    H45300: +In the absence of semantic or other errors, the SQLite parser shall +accept a "insert-stmt" that conforms to the following syntax: +

    + + +

    H45400: +In the absence of semantic or other errors, the SQLite parser shall +accept a "pragma-stmt" that conforms to the following syntax: +

    + + +

    H45500: +In the absence of semantic or other errors, the SQLite parser shall +accept a "pragma-value" that conforms to the following syntax: +

    + + +

    H45600: +In the absence of semantic or other errors, the SQLite parser shall +accept a "reindex-stmt" that conforms to the following syntax: +

    + + +

    H45700: +In the absence of semantic or other errors, the SQLite parser shall +accept a "select-stmt" that conforms to the following syntax: +

    + + +

    H45800: +In the absence of semantic or other errors, the SQLite parser shall +accept a "select-core" that conforms to the following syntax: +

    + + +

    H45900: +In the absence of semantic or other errors, the SQLite parser shall +accept a "result-column" that conforms to the following syntax: +

    + + +

    H46000: +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-source" that conforms to the following syntax: +

    + + +

    H46100: +In the absence of semantic or other errors, the SQLite parser shall +accept a "single-source" that conforms to the following syntax: +

    + + +

    H46200: +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-op" that conforms to the following syntax: +

    + + +

    H46300: +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-constraint" that conforms to the following syntax: +

    + + +

    H46400: +In the absence of semantic or other errors, the SQLite parser shall +accept a "ordering-term" that conforms to the following syntax: +

    + + +

    H46500: +In the absence of semantic or other errors, the SQLite parser shall +accept a "compound-operator" that conforms to the following syntax: +

    + + +

    H46600: +In the absence of semantic or other errors, the SQLite parser shall +accept a "update-stmt" that conforms to the following syntax: +

    + + +

    H46700: +In the absence of semantic or other errors, the SQLite parser shall +accept a "update-stmt-limited" that conforms to the following syntax: +

    + + +

    H46800: +In the absence of semantic or other errors, the SQLite parser shall +accept a "qualified-table-name" that conforms to the following syntax: +

    + + +

    H46900: +In the absence of semantic or other errors, the SQLite parser shall +accept a "vacuum-stmt" that conforms to the following syntax: +

    + + +
    +This page last modified 2009/02/19 14:35:30 UTC +
    Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/2005osaward.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/2005osaward.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-0.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-0.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-1.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-1.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-2.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-3.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-3.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-4.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-4.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-5.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-5.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-6.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-6.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-7.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-7.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-8.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-8.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-9.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-9.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-A.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-A.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/commit-B.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/commit-B.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-0.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-0.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-1.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-1.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-2.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-3.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-3.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-4.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-4.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/multi-5.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/multi-5.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-0.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-0.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-1.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-1.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-2.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-3.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-3.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-4.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-4.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/ac/rollback-5.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/ac/rollback-5.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/arch2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/arch2.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/arch.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/arch.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/arch.png and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/arch.png differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/btreemodule_balance_deeper.svg /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/btreemodule_balance_deeper.svg --- sqlite3-3.4.2/www/images/btreemodule_balance_deeper.svg 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/btreemodule_balance_deeper.svg 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,211 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + Right-child pointer + + + + + + Rest of Tree Content + Root page of b-tree + + + + + + Rest of Tree Content + Root page of b-tree + + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/btreemodule_balance_quick.svg /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/btreemodule_balance_quick.svg --- sqlite3-3.4.2/www/images/btreemodule_balance_quick.svg 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/btreemodule_balance_quick.svg 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,328 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + Leaf page + + + + Other leaf pages + + Right-child pointer + + + Right-child pointer + + + + Leaf page + + + + Other leaf pages + + Right-child pointer + + + Right-child pointer + + + New Leaf page + + + + + + + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/btreemodule_balance_shallower.svg /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/btreemodule_balance_shallower.svg --- sqlite3-3.4.2/www/images/btreemodule_balance_shallower.svg 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/btreemodule_balance_shallower.svg 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,211 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + Right-child pointer + + + + + + Rest of Tree Content + Root page of b-tree + + + + + + Rest of Tree Content + Root page of b-tree + + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/btreemodule_delete1.svg /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/btreemodule_delete1.svg --- sqlite3-3.4.2/www/images/btreemodule_delete1.svg 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/btreemodule_delete1.svg 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + The blue cell has been removed from leaf nodeand used to replace thecell deleted from the internal node. + The green cell is to be deleted froman internal tree node. The blue cellis the cell with the largest key inthe sub-tree headed by the child-page of the green cell. + + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/btreemodule_overview.svg /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/btreemodule_overview.svg --- sqlite3-3.4.2/www/images/btreemodule_overview.svg 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/btreemodule_overview.svg 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,385 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + In-Memory Page Cache + + + + + + + + + + + + + + + + + + + Journal File + Database File + + Client 1 + + + Client 2 + + IO using VFS interface + Data Exchanged Using B-Tree Interface + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/chw.jpg and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/chw.jpg differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/dan1.jpg and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/dan1.jpg differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/direct1b.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/direct1b.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/drh1.jpg and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/drh1.jpg differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/db_connection.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/db_connection.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/examplepop.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/examplepop.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/fileio_diagrams.odg and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/fileio_diagrams.odg differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/filesystem1.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/filesystem1.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/filesystem2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/filesystem2.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/freelistpage.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/freelistpage.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/indexlongrecord.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/indexlongrecord.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/indexpage.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/indexpage.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/indexpage.odg and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/indexpage.odg differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/indexshortrecord.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/indexshortrecord.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/indextree.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/indextree.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/journal_format.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/journal_format.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/journal_header.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/journal_header.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/journal_record.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/journal_record.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/master_journal_ptr.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/master_journal_ptr.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/overflowpage.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/overflowpage.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/pointermapentry.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/pointermapentry.gif differ Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/recordformat.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/recordformat.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/rtdocs.css /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/rtdocs.css --- sqlite3-3.4.2/www/images/fileformat/rtdocs.css 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/fileformat/rtdocs.css 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,81 @@ + /* Style for requirements paragraph. */ + .req { margin: 1.0em 10ex; } + .req span { color: darkblue } + + .subreq { margin: 1.0em 5ex 1.0em 15ex; color: darkblue } + .subsubreq { margin: 1.0em 5ex 1.0em 20ex; color: darkblue } + + .req:before { color: black; content: "[" attr(id) "] "} + .subreq:before { color: black; content: "[" attr(id) "] "} + .subsubreq:before { color: black; content: "[SUBSUBREQ] " } + + /* The dark-green color used for headings. */ + h1,h2,h3,h4,#toc_header,#document_title { color: #80a796 } + + .req code {display: block; margin: 0.5em 5ex} + code {white-space: pre} + + /* Style for content headings */ + h2 { margin-left: 10px } + h3 { margin-left: 20px } + h4 { margin-left: 30px } + h1,h2,h3,h4 { font-weight: normal } + + /* Style for document and toc headings */ + #toc_header { font-size: 1.5em; margin: 1.0em; } + #document_title { font-size: 2em; text-align: center } + + #toc a { color: darkblue ; text-decoration: none } + + /* Document font */ + body { font-family: sans-serif ; font-size:14px ; } + + /* Margins for block boxes that occur in the document flow. */ + p,ul,ol { margin: 1em 5ex } + td p, td ul, td ol { margin: 1em auto } + + /* Table style */ + table.striped, table#glossary { margin: 1em auto; width: 80% ; border-spacing: 0} + .striped th, #glossary th { + white-space:nowrap; + text-align:left; + border-bottom: solid 1px #444444; + padding: 0.2em 1ex; + } + .striped td, #glossary td { vertical-align: top } + .striped td, #glossary td { padding: 0.2em 1ex; } + + .spacedlist li { margin-top: 0.5em ; margin-bottom: 0.5em } + + li p { margin: 1em auto ; padding: 0 } + + /* Style for "todo" notes. These are represented by markup like: + ** + ** Fix this bit! + **

    Longer todo note.

    + */ + .todo { color: #AA3333 ; font-style : italic } + .todo:before { content: 'TODO:' } + p.todo { border: solid #AA3333 1px; padding: 1ex } + + cite a, cite a:visited { color: inherit; text-decoration: none ; font-style: normal; } + .defnlink, .reqlink { + color: inherit; + text-decoration: none; + } + .defnlink { font-style: italic } + .defnlink:visited,.reqlink:visited { color: inherit } + h1 .defnlink, h2 .defnlink, h3 .defnlink, h4 .defnlink, .defn .defnlink{ font-style: inherit } + :link:hover,:visited:hover { background: wheat } + + .defn { white-space: nowrap } + + img { + display:block; + } + + pre.api { + margin: 1em 15ex; + font-size: 16px; + } + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/images/fileformat/rtdocs.js /tmp/3ARg2Grji7/sqlite3-3.6.16/www/images/fileformat/rtdocs.js --- sqlite3-3.4.2/www/images/fileformat/rtdocs.js 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/images/fileformat/rtdocs.js 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,116 @@ + + +function populate_toc () { + var children = document.getElementsByTagName("h1").item(0).parentNode.childNodes + var toc = "" + + var counters = new Array() + counters[1] = 0 + counters[2] = 0 + counters[3] = 0 + counters[4] = 0 + + /* Generate the table of contents */ + for(var ii=0; ii0 ){ + var anchor = "tocentry_" + ii + + for(var jj=iHeader+1; jj<=4; jj++){ counters[jj] = 0 } + counters[iHeader]++ + + var number = "" + for(var jj=1; jj<=iHeader; jj++){ number += counters[jj] + "." } + + toc += '" + + var a = '' + number + '' + node.innerHTML = a + " " + node.innerHTML + } + } + document.getElementById("toc").innerHTML = toc +} + +function number_figs () { + /* Number the figures in this document */ + var figcounter = 1 + var spans = document.getElementsByTagName("span") + for(var ii=0; ii references */ + var cites = document.getElementsByTagName("cite") + for(var ii=0; ii' + label + '' + } +} + +function decorate_tables () { + /* Decorate tables */ + var tables = document.getElementsByTagName("table") + for(var ii=0; ii + +SQLite Home Page + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + + +
    +

    Welcome.

    +

    SQLite is a software library that implements a +self-contained, +serverless, +zero-configuration, +transactional +SQL database engine. +SQLite is the +most widely deployed +SQL database engine in the world. +The source code for SQLite is in the +public domain.

    + +
    + +

    Sponsors

    +

    Ongoing development and maintenance of SQLite is +sponsored in part by SQLite Consortium +members, including:

    + +
    + + + + + + +
    +
    + + +
    +

    Current Status

    + +

      +
    • Version 3.6.16 +of SQLite is recommended for all new development. Upgrading from +version 3.6.12, 3.6.13, and 3.6.14.2 is optional. +Upgrading from other SQLite versions is recommended.
    • +

    + +

    Common Links

    + +

    + +
    +
    +This page last modified 2009/06/23 14:19:57 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/index.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/index.tcl --- sqlite3-3.4.2/www/index.tcl 2007-08-13 17:15:29.000000000 +0100 +++ sqlite3-3.6.16/www/index.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite home page} -puts { - - - - -
    -

    About SQLite

    -

    -
    - - -
    -SQLite is a small -C library that implements a self-contained, embeddable, -zero-configuration -SQL database engine. -Features include: -

    - -

      -
    • Transactions are atomic, consistent, isolated, and durable (ACID) - even after system crashes and power failures. -
    • Zero-configuration - no setup or administration needed.
    • -
    • Implements most of SQL92. - (Features not supported)
    • -
    • A complete database is stored in a single disk file.
    • -
    • Database files can be freely shared between machines with - different byte orders.
    • -
    • Supports terabyte-sized databases and gigabyte-sized strings - and blobs. (See limits.html.) -
    • Small code footprint: - - less than 250KiB fully configured or less - than 150KiB with optional features omitted.
    • -
    • Faster than popular client/server database - engines for most common operations.
    • -
    • Simple, easy to use API.
    • -
    • TCL bindings included. - Bindings for many other languages - - available separately.
    • -
    • Well-commented source code with over 98% test coverage.
    • -
    • Available as a - - single ANSI-C source-code file that you can easily drop into - another project. -
    • Self-contained: no external dependencies.
    • -
    • Sources are in the public domain. - Use for any purpose.
    • -
    -

    - -

    -The SQLite distribution comes with a standalone command-line -access program (sqlite) that can -be used to administer an SQLite database and which serves as -an example of how to use the SQLite library. -

    - -
    -

    News

    -} - -proc newsitem {date title text} { - puts "

    $date - $title

    " - regsub -all "\n( *\n)+" $text "

    \n\n

    " txt - puts "

    $txt

    " - puts "
    " -} - -newsitem {2007-Aug-13} {Version 3.4.2} { - While stress-testing the - soft_heap_limit - feature, a bug that could lead to - database - corruption was - discovered and fixed. - Though the consequences of this bug are severe, the chances of hitting - it in a typical application are remote. Upgrading is recommended - only if you use the - sqlite3_soft_heap_limit - interface. -} - -newsitem {2007-Jly-20} {Version 3.4.1} { - This release fixes a bug in VACUUM that - can lead to - database corruption. The bug was introduced in version - 3.3.14. - Upgrading is recommended for all users. Also included are a slew of - other more routine - enhancements and bug fixes. -} - -newsitem {2007-Jun-18} {Version 3.4.0} { - This release fixes two separate bugs either of which - can lead to database corruption. Upgrading - is strongly recommended. If you must continue using an older version - of SQLite, please at least read about how to avoid these bugs - at - - CorruptionFollowingBusyError and - ticket #2418 -

    - This release also adds explicit limits on the - sizes and quantities of things SQLite will handle. The new limits might - causes compatibility problems for existing applications that - use excessively large strings, BLOBs, tables, or SQL statements. - The new limits can be increased at compile-time to work around any problems - that arise. Nevertheless, the version number of this release is - 3.4.0 instead of 3.3.18 in order to call attention to the possible - incompatibility. -

    - There are also new features, including - incremental BLOB I/O and - incremental vacuum. - See the change log - for additional information. -} - -puts { -

    Old news...

    -
    -} -footer {$Id: index.tcl,v 1.159 2007/08/13 16:15:29 drh Exp $} Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/indirect1b1.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/indirect1b1.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/inmemorydb.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/inmemorydb.html --- sqlite3-3.4.2/www/inmemorydb.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/inmemorydb.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,149 @@ + + +In-Memory Databases + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    In-Memory Databases

    + +

    An SQLite database is normally stored in a single ordinary disk +file. However, in certain circumstances, the database might be stored in +memory.

    + +

    The most common way to force an SQLite database to exist purely +in memory is to open the database using the special filename +":memory:". In other words, instead of passing the name of +a real disk file into one of the sqlite3_open(), sqlite3_open16(), or +sqlite3_open_v2() functions, pass in the string ":memory:". For +example:

    + +
    +rc = sqlite3_open(":memory:", &db);
    +
    + +

    When this is done, no disk file is opened. +Instead, a new database is created +purely in memory. The database ceases to exist as soon as the database +connection is closed. Every :memory: database is distinct from every +other. So, opening two database connections each with the filename +":memory:" will create two independent in-memory databases.

    + +

    The special filename ":memory:" can be used anywhere that a database +filename is permitted. For example, it can be used as the +filename in an ATTACH command:

    + +
    +ATTACH DATABASE ':memory:' AS aux1; +
    + +

    Note that in order for the special ":memory:" name to apply and to +create a pure in-memory database, there must be no additional text in the +filename. Thus, a disk-based database can be created in a file by prepending +a pathname, like this: "./:memory:".

    + + + +

    Temporary Databases

    + +

    When the name of the database file handed to sqlite3_open() or to +ATTACH is an empty string, then a new temporary file is created to hold +the database.

    + +
    +rc = sqlite3_open("", &db);
    +
    + +
    +ATTACH DATABASE '' AS aux2; +
    + +

    A different temporary file is created each time, so that just like as +with the special ":memory:" string, two database connections to temporary +databases each have their own private database. Temporary databases are +automatically deleted when the connection that created them closes.

    + +

    Even though a disk file is allocated for each temporary database, in +practice the temporary database usually resides in the in-memory pager +cache and hence is very little difference between a pure in-memory database +created by ":memory:" and a temporary database created by an empty filename. +The sole difference is that a ":memory:" database must remain in memory +at all times whereas parts of a temporary database might be flushed to +disk if database becomes large or if SQLite comes under memory pressure.

    + +

    The previous paragraphs describe the behavior of temporary databases +under the default SQLite configuration. An application can use the +temp_store pragma and the SQLITE_TEMP_STORE compile-time parameter to +force temporary databases to behave as pure in-memory databases, if desired. +

    +
    +This page last modified 2009/02/05 02:02:44 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_aggfunc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_aggfunc.html --- sqlite3-3.4.2/www/lang_aggfunc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_aggfunc.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,153 @@ + + +SQLite Query Language: Aggregate Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    Aggregate Functions

    + +

    +The aggregate functions shown below are available by default. Additional +aggregate functions written in C may be added using the +sqlite3_create_function() +API.

    + +

    +In any aggregate function that takes a single argument, that argument +can be preceded by the keyword DISTINCT. In such cases, duplicate +elements are filtered before being passed into the aggregate function. +For example, the function "count(distinct X)" will return the number +of distinct values of column X instead of the total number of non-null +values in column X. +

    + + + +
    avg(X) + + Return the average value of all non-NULL X within a + group. String and BLOB values that do not look like numbers are + interpreted as 0. + The result of avg() is always a floating point value as long as + at there is at least one non-NULL input even if all + inputs are integers. The result of avg() NULL if and only if + there are no non-NULL inputs. +
    count(X)

    count(*)
    + + The first form return a count of the number of times + that X is not NULL in a group. The second form (with no argument) + returns the total number of rows in the group. +
    group_concat(X)

    group_concat(X,Y)
    + + The result is a string which is the concatenation of + all non-NULL values of X. If parameter Y is the separator + between instances of X. A comma (",") is used as the separator + if Y is omitted. The order of the concatenated elements is + arbitrary. +
    max(X) + + Return the maximum value of all values in the group. + The maximum value is the value that would be returned last in an + ORDER BY on the same column. NULL is returned if and only if there are + no non-NULL values in the group. +
    min(X) + + Return the minimum non-NULL value of all values in the group. + The minimum value is the first non-NULL value that would appear + in an ORDER BY of the column. + NULL is only returned if and only if there are no non-NULL values in the + group. +
    sum(X)

    total(X)
    + + Return the numeric sum of all non-NULL values in the group. + If there are no non-NULL input rows then sum() returns + NULL but total() returns 0.0. + NULL is not normally a helpful result for the sum of no rows + but the SQL standard requires it and most other + SQL database engines implement sum() that way so SQLite does it in the + same way in order to be compatible. The non-standard total() function + is provided as a convenient way to work around this design problem + in the SQL language.

    + +

    The result of total() is always a floating point value. + The result of sum() is an integer value if all non-NULL inputs are integers. + If any input to sum() is neither an integer or a NULL + then sum() returns a floating point value + which might be an approximation to the true sum.

    + +

    Sum() will throw an "integer overflow" exception if all inputs + are integers or NULL + and an integer overflow occurs at any point during the computation. + Total() never throws and integer overflow. +

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_altertable.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_altertable.html --- sqlite3-3.4.2/www/lang_altertable.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_altertable.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,127 @@ + + +SQLite Query Language: ALTER TABLE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    ALTER TABLE

    alter-table-stmt:

    + + +

    SQLite supports a limited subset of ALTER TABLE. +The ALTER TABLE command in SQLite allows the user to rename a table +or to add a new column to an existing table. It is not possible +to rename a colum, remove a column, or add or remove constraints from a table. +

    + +

    The RENAME TO syntax is used to rename the table identified by +[database-name.]table-name to new-table-name. +This command +cannot be used to move a table between attached databases, only to rename +a table within the same database.

    + +

    If the table being renamed has triggers or indices, then these remain +attached to the table after it has been renamed. However, if there are +any view definitions, or statements executed by triggers that refer to +the table being renamed, these are not automatically modified to use the new +table name. If this is required, the triggers or view definitions must be +dropped and recreated to use the new table name by hand. +

    + +

    The ADD COLUMN syntax +is used to add a new column to an existing table. +The new column is always appended to the end of the list of existing columns. +The column-def rule defines the characteristics of the new column. +The new column may take any of the forms permissable in a CREATE TABLE +statement, with the following restrictions: +

      +
    • The column may not have a PRIMARY KEY or UNIQUE constraint.
    • +
    • The column may not have a default value of CURRENT_TIME, CURRENT_DATE + or CURRENT_TIMESTAMP.
    • +
    • If a NOT NULL constraint is specified, then the column must have a + default value other than NULL. +
    + +

    Note also that when adding a CHECK constraint, the CHECK constraint +is not tested against preexisting rows of the table. +This can result in a table that contains data that +is in violation of the CHECK constraint. Future versions of SQLite might +change to validate CHECK constraints as they are added.

    + +

    The execution time of the ALTER TABLE command is independent of +the amount of data in the table. The ALTER TABLE command runs as quickly +on a table with 10 million rows as it does on a table with 1 row. +

    + +

    After ADD COLUMN has been run on a database, that database will not +be readable by SQLite version 3.1.3 and earlier.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_analyze.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_analyze.html --- sqlite3-3.4.2/www/lang_analyze.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_analyze.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,95 @@ + + +SQLite Query Language: ANALYZE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    ANALYZE

    analyze-stmt:

    + + +

    The ANALYZE command gathers statistics about indices and stores them +in a special tables in the database where the query optimizer can use +them to help make better index choices. +If no arguments are given, all indices in all attached databases are +analyzed. If a database name is given as the argument, all indices +in that one database are analyzed. If the argument is a table name, +then only indices associated with that one table are analyzed.

    + +

    The initial implementation stores all statistics in a single +table named sqlite_stat1. Future enhancements may create +additional tables with the same name pattern except with the "1" +changed to a different digit. The DROP TABLE command does +not work on the sqlite_stat1 table, +but all the content can be removed using the DELETE command, +which has the same effect.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_attach.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_attach.html --- sqlite3-3.4.2/www/lang_attach.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_attach.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,114 @@ + + +SQLite Query Language: ATTACH DATABASE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    ATTACH DATABASE

    attach-stmt:

    + + +

    The ATTACH DATABASE statement adds another database +file to the current database connection. If the filename contains +punctuation characters it must be quoted. The database-names 'main' and +'temp' refer to the main database and the database used for +temporary tables. These cannot be detached. Attached databases +are removed using the DETACH statement.

    + +

    You cannot create a new table with the same name as a table in +an attached database, but you can attach a database which contains +tables whose names are duplicates of tables in the main database. It is +also permissible to attach the same database file multiple times.

    + +

    Tables in an attached database can be referred to using the syntax +database-name.table-name. If an attached table doesn't have +a duplicate table name in the main database, it does not require a +database-name prefix. When a database is attached, all of its +tables which don't have duplicate names become the default table +of that name. Any tables of that name attached afterwards require the +database prefix. If the default table of a given name is detached, then +the last table of that name attached becomes the new default.

    + +

    +Transactions involving multiple attached databases are atomic, +assuming that the main database is not ":memory:". If the main +database is ":memory:" then +transactions continue to be atomic within each individual +database file. But if the host computer crashes in the middle +of a COMMIT where two or more database files are updated, +some of those files might get the changes where others +might not. +

    + +

    There is a compile-time limit of SQLITE_MAX_ATTACHED +attached database files.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_comment.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_comment.html --- sqlite3-3.4.2/www/lang_comment.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_comment.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,101 @@ + + +SQLite Query Language: comment + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    comment

    comment-syntax:

    + + +

    Comments aren't SQL commands, but can occur within the text of +SQL queries passed to sqlite3_prepare_v2() and related interfaces.. +Comments are +treated as whitespace by the parser. They can begin anywhere whitespace +can be found, including inside expressions that span multiple lines. +

    + +

    SQL comments begin with two consecutive "-" characters (ASCII 0x2d) +and extend up to and including the next newline character (ASCII 0x0a) +or until the end of input, whichever comes first.

    + +

    C comments can span any number of lines. C-style comments begin +with "/*" and extend up to and including the next "*/" character pair +or until the end of input, whichever comes first. C-style comments +can span multiple lines.

    + +

    Comments can appear anywhere whitespace can occur, +including inside expressions and in the middle of other SQL statements. +Comments do not nest. +

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_conflict.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_conflict.html --- sqlite3-3.4.2/www/lang_conflict.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_conflict.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,149 @@ + + +SQLite Query Language: ON CONFLICT clause + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    ON CONFLICT clause

    conflict-clause:

    + + +

    The ON CONFLICT clause is not a separate SQL command. It is a +non-standard clause that can appear in many other SQL commands. +It is given its own section in this document because it is not +part of standard SQL and therefore might not be familiar.

    + +

    The syntax for the ON CONFLICT clause is as shown above for +the CREATE TABLE command. For the INSERT and +UPDATE commands, the keywords "ON CONFLICT" are replaced by "OR", to make +the syntax seem more natural. For example, instead of +"INSERT ON CONFLICT IGNORE" we have "INSERT OR IGNORE". +The keywords change but the meaning of the clause is the same +either way.

    + +

    The ON CONFLICT clause specifies an algorithm used to resolve +constraint conflicts. There are five choices: ROLLBACK, ABORT, +FAIL, IGNORE, and REPLACE. The default algorithm is ABORT. This +is what they mean:

    + +
    +
    ROLLBACK
    +

    When a constraint violation occurs, an immediate ROLLBACK +occurs, thus ending the current transaction, and the command aborts +with a return code of SQLITE_CONSTRAINT. If no transaction is +active (other than the implied transaction that is created on every +command) then this algorithm works the same as ABORT.

    + +
    ABORT
    +

    When a constraint violation occurs, the command backs out +any prior changes it might have made and aborts with a return code +of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes +from prior commands within the same transaction +are preserved. This is the default behavior.

    + +
    FAIL
    +

    When a constraint violation occurs, the command aborts with a +return code SQLITE_CONSTRAINT. But any changes to the database that +the command made prior to encountering the constraint violation +are preserved and are not backed out. For example, if an UPDATE +statement encountered a constraint violation on the 100th row that +it attempts to update, then the first 99 row changes are preserved +but changes to rows 100 and beyond never occur.

    + +
    IGNORE
    +

    When a constraint violation occurs, the one row that contains +the constraint violation is not inserted or changed. But the command +continues executing normally. Other rows before and after the row that +contained the constraint violation continue to be inserted or updated +normally. No error is returned.

    + +
    REPLACE
    +

    When a UNIQUE constraint violation occurs, the pre-existing rows +that are causing the constraint violation are removed prior to inserting +or updating the current row. Thus the insert or update always occurs. +The command continues executing normally. No error is returned. +If a NOT NULL constraint violation occurs, the NULL value is replaced +by the default value for that column. If the column has no default +value, then the ABORT algorithm is used. If a CHECK constraint violation +occurs then the IGNORE algorithm is used.

    + +

    When this conflict resolution strategy deletes rows in order to +satisfy a constraint, it does not invoke delete triggers +on those rows. Nor is the update hook invoked. +The exceptional behaviors defined in this paragraph might change +in a future release.

    +
    + +

    The algorithm specified in the OR clause of a INSERT or UPDATE +overrides any algorithm specified in a CREATE TABLE. +If no algorithm is specified anywhere, the ABORT algorithm is used.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_corefunc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_corefunc.html --- sqlite3-3.4.2/www/lang_corefunc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_corefunc.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,285 @@ + + +SQLite Query Language: Core Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    Core Functions

    + +

    The core functions shown below are available by default. +Date & Time functions and +aggregate functions are documented separately. An +application may define additional +functions written in C and added to the database engine using +the sqlite3_create_function() API.

    + + + + + +
    abs(X) + + Return the absolute value of the numeric + argument X. Return NULL if X is NULL. Return 0.0 if + X is not a numeric value. +
    changes() + + Return the number of database rows that were changed or inserted or + deleted by the most recently complete SQL. This is a wrapper around + the sqlite3_changes() C/C++ function. +
    coalesce(X,Y,...) + + Return a copy of the first non-NULL argument. If + all arguments are NULL then NULL is returned. There must be at least + 2 arguments. +
    glob(X,Y) + + This function is used to implement the + "Y GLOB X" syntax of SQLite. + Note that the X and Y arguments are reversed in the glob() function + relative to the infix GLOB operator. + The sqlite3_create_function() + interface can + be used to override this function and thereby change the operation + of the GLOB operator.
    ifnull(X,Y) + + Return a copy of the first non-NULL argument. If + both arguments are NULL then NULL is returned. The ifnull() functions and + coalesce() with two arguments are interchangeable.
    hex(X) + + The argument is interpreted as a BLOB. The result + is a hexadecimal rendering of the content of that blob. +
    last_insert_rowid() + + Return the ROWID + of the last row insert from this + connection to the database. + This is the same value that would be returned + from the sqlite3_last_insert_rowid() API function. +
    length(X) + + Return the string length of X in characters if X is a string, + or in bytes if X is a blob. +
    like(X,Y)

    like(X,Y,Z)
    + + This function is used to implement the "Y LIKE X [ESCAPE Z]" + syntax of SQL. If the optional ESCAPE clause is present, then the + user-function is invoked with three arguments. Otherwise, it is + invoked with two arguments only. Note that the X and Y parameters are + reversed in the like() function relative to the infix LIKE operator. + The sqlite3_create_function() interface can be used to override this + function and thereby change the operation of the + LIKE operator. When doing this, it may be important + to override both the two and three argument versions of the like() + function. Otherwise, different code may be called to implement the + LIKE operator depending on whether or not an ESCAPE clause was + specified. +
    load_extension(X)

    load_extension(X,Y)
    + + Load SQLite extensions out of the shared library + file named X using the entry point Y. The result + is a NULL. If Y is omitted then the default entry point + of sqlite3_extension_init is used. This function raises + an exception if the extension fails to load or initialize correctly. + +

    This function will fail if the extension attempts to modify + or delete a SQL function or collating sequence. The + extension can add new functions or collating sequences, but cannot + modify or delete existing functions or collating sequences because + those functions and/or collating sequences might be used elsewhere + in the currently running SQL statement. To load an extension that + changes or deletes functions or collating sequences, use the + sqlite3_load_extension() C-language API.

    +
    lower(X) + + Return a copy of string X with all ASCII characters + converted to lower case. The default built-in lower() function works + for ASCII characters only. To do case conversions on non-ASCII + characters, load the ICU extension. +
    ltrim(X)

    ltrim(X,Y)
    + + Return a string formed by removing any and all + characters that appear in Y from the left side of X. + If the Y argument is omitted, spaces are removed. +
    max(X,Y,...) + + Return the argument with the maximum value. Or return NULL if any argument + is NULL. + Note that max() is a simple function when + it has 2 or more arguments but converts to an aggregate function if given + only a single argument. +
    min(X,Y,...) + + Return the argument with the minimum value. + Note that min() is a simple function when + it has 2 or more arguments but converts to an aggregate function if given + only a single argument. +
    nullif(X,Y) + + Return the first argument if the arguments are different, + otherwise return NULL. +
    quote(X) + + This routine returns a string which is the value of + its argument suitable for inclusion into another SQL statement. + Strings are surrounded by single-quotes with escapes on interior quotes + as needed. BLOBs are encoded as hexadecimal literals. + SQLite uses this function internally in its implementation of VACUUM + so if this function is overloading to provide incompatible behavior, the + VACUUM command will likely cease to work. +
    random() + + Return a pseudo-random integer + between -9223372036854775808 and +9223372036854775807. +
    randomblob(N) + + Return an N-byte blob containing pseudo-random bytes. + N should be a postive integer. + +

    Hint: applications can generate globally unique identifiers + using this function together with hex() and/or + lower() like this:

    + +
    + hex(randomblob(16))

    + lower(hex(randomblob(16))) +
    +
    replace(X,Y,Z) + + Return a string formed by substituting string Z for + every occurrance of string Y in string X. The BINARY + collating sequence is used for comparisons. If Y is an empty + string then return X unchanged. +
    round(X)

    round(X,Y)
    + + Round off the number X to Y digits to the + right of the decimal point. If the Y argument is omitted, 0 is + assumed. +
    rtrim(X)

    rtrim(X,Y)
    + + Return a string formed by removing any and all + characters that appear in Y from the right side of X. + If the Y argument is omitted, spaces are removed. +
    soundex(X) + + Compute the soundex encoding of the string X. + The string "?000" is returned if the argument is NULL. + This function is omitted from SQLite by default. + It is only available the -DSQLITE_SOUNDEX=1 compiler option + is used when SQLite is built. +
    sqlite_version() + + Return the version string for the SQLite library + that is running. Example: "3.5.9" +
    substr(X,Y,Z)

    substr(X,Y)
    + + Return a substring of input string X that begins + with the Y-th character and which is Z characters long. + If Z is omitted then all character through the end of the string + are returned. + The left-most character of X is number 1. If Y is negative + the the first character of the substring is found by counting from the + right rather than the left. If X is string + then characters indices refer to actual UTF-8 characters. If + X is a BLOB then the indices refer to bytes. +
    total_changes() + + Return the number of row changes caused by INSERT, UPDATE or DELETE + statements since the current database connection was opened. + This function is a wrapper around the sqlite3_total_changes() + C/C++ interface. +
    trim(X)

    trim(X,Y)
    + + Return a string formed by removing any and all + characters that appear in Y from both ends of X. + If the Y argument is omitted, spaces are removed. +
    typeof(X) + + Return the datatype of the expression X. The only + return values are "null", "integer", "real", "text", and "blob". +
    upper(X) + + Return a copy of input string X converted to all + upper-case letters. The implementation of this function uses the C library + routine toupper() which means it may not work correctly on + non-ASCII UTF-8 strings. +
    zeroblob(N) + + Return a BLOB consisting of N bytes of 0x00. SQLite + manages these zeroblobs very efficiently. Zeroblobs can be used to + reserve space for a BLOB that is later written using + incremental BLOB I/O. + This SQL function is implemented using the sqlite3_result_zeroblob() + routine from the C/C++ interface. +
    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_createindex.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_createindex.html --- sqlite3-3.4.2/www/lang_createindex.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_createindex.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,131 @@ + + +SQLite Query Language: CREATE INDEX + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    CREATE INDEX

    create-index-stmt:

    +

    indexed-column:

    + + +

    The CREATE INDEX command consists of the keywords "CREATE INDEX" followed +by the name of the new index, the keyword "ON", the name of a previously +created table that is to be indexed, and a parenthesized list of names of +columns in the table that are used for the index key.

    + +

    Each column name can be followed by one of the "ASC" or "DESC" keywords +to indicate sort order. The sort order may or may not be ignored depending +on the database file format. The "legacy" file format ignores index +sort order. The descending index file format takes index sort order +into account. Only copies of SQLite newer than version 3.3.0 +(released on 2006-01-10) are able to understand the newer descending +index file format and so for compatibility with older versions of +SQLite, the legacy file format is generated by default. Use the +legacy_file_format pragma to modify this behavior and generate +databases that use the newer file format. Future versions of SQLite +may begin to generate the newer file format by default.

    + +

    The COLLATE clause following each column name defines a collating +sequence used for text entries in that column. The default collating +sequence is the collating sequence defined for that column in the +CREATE TABLE statement. Or if no collating sequence is otherwise defined, +the built-in BINARY collating sequence is used.

    + +

    There are no arbitrary limits on the number of indices that can be +attached to a single table. The number of columns in an index is +limited to SQLITE_MAX_COLUMN.

    + +

    If the UNIQUE keyword appears between CREATE and INDEX then duplicate +index entries are not allowed. Any attempt to insert a duplicate entry +will result in an error. For the purposes of unique indices, all NULL values +are considered to different from all other NULL values and are thus unique. +This is one of the two possible interpretations of the SQL-92 standard +(the language in the standard is ambiguious) and is the interpretation +followed by PostgreSQL, MySQL, Firebird, and Oracle. Informix and +Microsoft SQL Server follow the other interpretation of the standard.

    + +

    The text +of each CREATE INDEX statement is stored in the sqlite_master +or sqlite_temp_master table, depending on whether the table +being indexed is temporary. Every time the database is opened, +all CREATE INDEX statements +are read from the sqlite_master table and used to regenerate +SQLite's internal representation of the index layout.

    + +

    If the optional IF NOT EXISTS clause is present and another index +with the same name aleady exists, then this command becomes a no-op.

    + +

    Indexes are removed with the DROP INDEX +command.

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_createtable.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_createtable.html --- sqlite3-3.4.2/www/lang_createtable.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_createtable.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,263 @@ + + +SQLite Query Language: CREATE TABLE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    CREATE TABLE

    create-table-stmt:

    +

    column-def:

    +

    type-name:

    +

    column-constraint:

    +

    table-constraint:

    +

    foreign-key-clause:

    + + +

    A CREATE TABLE statement is basically the keywords "CREATE TABLE" +followed by the name of a new table and a parenthesized list of column +definitions and constraints. +Tables names that begin with "sqlite_" are reserved +for use by the engine.

    + +

    Each column definition is the name of the column optionally followed by the +datatype for that column, then one or more optional column constraints. +SQLite uses dynamic typing; +the datatype for the column does not restrict what data may be put +in that column. +The UNIQUE constraint causes an unique index to be created on the specified +columns. All NULL values are considered different from each other and from +all other values for the purpose of determining uniqueness, hence a UNIQUE +column may contain multiple entries with the value of NULL. +The COLLATE clause specifies what text +collating function to use when comparing text entries for the column. +The built-in BINARY collating function is used by default. +

    +The DEFAULT constraint specifies a default value to use when doing an INSERT. +The value may be NULL, a string constant or a number. +The default value may also be one of the special case-independant +keywords CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP. If the value is +NULL, a string constant or number, it is literally inserted into the column +whenever an INSERT statement that does not specify a value for the column is +executed. If the value is CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP, then +the current UTC date and/or time is inserted into the columns. For +CURRENT_TIME, the format is HH:MM:SS. For CURRENT_DATE, YYYY-MM-DD. The format +for CURRENT_TIMESTAMP is "YYYY-MM-DD HH:MM:SS". +

    + +

    The PRIMARY KEY attribute normally creates a UNIQUE index on +the column or columns that are specified as the PRIMARY KEY. The only +exception to this behavior is special INTEGER PRIMARY KEY column, +described below. +According to the SQL standard, PRIMARY KEY should imply NOT NULL. +Unfortunately, due to a long-standing coding oversight, this is not +the case in SQLite. SQLite allows NULL values +in a PRIMARY KEY column. We could change SQLite to conform to the +standard (and we might do so in the future), but by the time the +oversight was discovered, SQLite was in such wide use that we feared +breaking legacy code if we fixed the problem. So for now we have +chosen to continue allowing NULLs in PRIMARY KEY columns. +Developers should be aware, however, that we may change SQLite to +conform to the SQL standard in future and should design new programs +accordingly.

    + +

    SQLite uses dynamic typing instead of static typing. Except for the +special case of INTEGER PRIMARY KEY, SQLite will allow values of any +type to be stored in any column regardless of the declared datatype of +that column. The declared datatype is a type affinity that +SQLite attempts to comply with, but the operation will proceed even if +compliance is not possible.

    + +

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" +and "TABLE" then the table that is created is only visible +within that same database connection +and is automatically deleted when +the database connection is closed. Any indices created on a temporary table +are also temporary. Temporary tables and indices are stored in a +separate file distinct from the main database file.

    + +

    If a <database-name> is specified, then the table is created in +the named database. It is an error to specify both a <database-name> +and the TEMP keyword, unless the <database-name> is "temp". If no +database name is specified, and the TEMP keyword is not present, +the table is created in the main database.

    + +

    The optional conflict clause following each constraint +allows the specification of an alternative default +constraint conflict resolution algorithm for that constraint. +The default is abort ABORT. Different constraints within the same +table may have different default conflict resolution algorithms. +If an INSERT or UPDATE statement specifies a different conflict +resolution algorithm, then that algorithm is used in place of the +default algorithm specified in the CREATE TABLE statement. +See the section titled +ON CONFLICT for additional information.

    + +

    CHECK constraints are supported as of version 3.3.0. Prior +to version 3.3.0, CHECK constraints were parsed but not enforced.

    + +

    The number of columns in a table is limited by the +SQLITE_MAX_COLUMN compile-time parameter. +A single row of a table cannot store more than +SQLITE_MAX_LENGTH bytes of data. +Both of these limits can be lowered at runtime using the +sqlite3_limit() C/C++ interface.

    + + +

    The CREATE TABLE AS form defines the table to be +the result set of a query. The names of the table columns are +the names of the columns in the result.

    + +

    The text +of each CREATE TABLE statement is stored in the sqlite_master +table. Every time the database is opened, all CREATE TABLE statements +are read from the sqlite_master table and used to regenerate +SQLite's internal representation of the table layout. +If the original command was a CREATE TABLE AS then then an equivalent +CREATE TABLE statement is synthesized and store in sqlite_master +in place of the original command. +The text of CREATE TEMPORARY TABLE statements are stored in the +sqlite_temp_master table. +

    + +

    If the optional IF NOT EXISTS clause is present and another table +with the same name aleady exists, then this command becomes a no-op.

    + +

    Tables are removed using the DROP TABLE +statement.

    + + + +

    ROWIDs and the INTEGER PRIMARY KEY

    + +

    Every row of every SQLite table has a 64-bit signed integer key +that is unique within the same table. +This integer is usually called the "rowid". The rowid is the actual key used +in the B-Tree that implements an SQLite table. Rows are stored in +rowid order. The +rowid value can be accessed using one of the special names +"ROWID", "OID", or "_ROWID_".

    + +

    +If a column is declared to be an INTEGER PRIMARY KEY, then that column is not +a "real" database column but instead becomes +an alias for the rowid. Unlike normal SQLite columns, the rowid +must be a non-NULL integer value. The rowid is not able to hold +floating point values, strings, BLOBs, or NULLs.

    + +
    +An INTEGER PRIMARY KEY column is an alias for the 64-bit signed integer rowid. +
    + +

    An INTEGER PRIMARY KEY column can also include the +keyword AUTOINCREMENT. The AUTOINCREMENT keyword modified the way +that B-Tree keys are automatically generated. Additional detail +on automatic B-Tree key generation is available +separately.

    + +

    The special behavior of INTEGER PRIMARY KEY +is only available if the type name is exactly "INTEGER" (in any mixture +of upper and lower case.) Other integer type names +like "INT" or "BIGINT" or "SHORT INTEGER" or "UNSIGNED INTEGER" +causes the primary key column to behave as an ordinary table column with +integer affinity and a unique index, not as an alias for the rowid. +The special behavior of INTEGER PRIMARY KEY is only available if the +primary key is a single column. Multi-column primary keys do not become +aliases for the rowid. +The AUTOINCREMENT keyword only works on a column that is an alias +for the rowid.

    + +

    Note that searches against a rowid are generally about twice as +fast as searches against any other PRIMARY KEY or indexed value. +

    + +

    Goofy behavior alert: The following three declarations all cause +the column "x" be an alias for the rowid:

    + +
      +
    • CREATE TABLE t(x INTEGER PRIMARY KEY ASC, y, z); +
    • CREATE TABLE t(x INTEGER, y, z, PRIMARY KEY(x ASC)); +
    • CREATE TABLE t(x INTEGER, y, z, PRIMARY KEY(x DESC)); +
    + +

    But, in contrast, the following declaration does not result in +"x" being an alias for the rowid:

    + +
      +
    • CREATE TABLE t(x INTEGER PRIMARY KEY DESC, y, z); +
    + +

    This asymmetrical behavior is unfortunate and is really due to a bug +in the parser in early versions of SQLite. But fixing the bug would +result in very serious backwards incompatibilities. The SQLite developers +feel that goofy behavior in an corner case is far better than +a compatibility break, so the original behavior is retained.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_createtrigger.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_createtrigger.html --- sqlite3-3.4.2/www/lang_createtrigger.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_createtrigger.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,220 @@ + + +SQLite Query Language: CREATE TRIGGER + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    CREATE TRIGGER

    create-trigger-stmt:

    + + +

    The CREATE TRIGGER statement is used to add triggers to the +database schema. Triggers are database operations +that are automatically performed when a specified database event +occurs.

    + +

    A trigger may be specified to fire whenever a DELETE, INSERT, +or UPDATE of a +particular database table occurs, or whenever an UPDATE of one or more +specified columns of a table are updated.

    + +

    At this time SQLite supports only FOR EACH ROW triggers, not FOR EACH +STATEMENT triggers. Hence explicitly specifying FOR EACH ROW is optional. FOR +EACH ROW implies that the SQL statements specified in the trigger +may be executed (depending on the WHEN clause) for each database row being +inserted, updated or deleted by the statement causing the trigger to fire.

    + +

    Both the WHEN clause and the trigger actions may access elements of +the row being inserted, deleted or updated using references of the form +"NEW.column-name" and "OLD.column-name", where +column-name is the name of a column from the table that the trigger +is associated with. OLD and NEW references may only be used in triggers on +events for which they are relevant, as follows:

    + + + + + + + + + + + + + + +
    INSERTNEW references are valid
    UPDATENEW and OLD references are valid
    DELETEOLD references are valid
    +

    + +

    If a WHEN clause is supplied, the SQL statements specified +are only executed for rows for which the WHEN +clause is true. If no WHEN clause is supplied, the SQL statements +are executed for all rows.

    + +

    The BEFORE or AFTER keyword determines when the trigger actions +will be executed relative to the insertion, modification or removal of the +associated row.

    + +

    An ON CONFLICT clause may be specified as part of an UPDATE or INSERT +action within the body of the trigger. +However if an ON CONFLICT clause is specified as part of +the statement causing the trigger to fire, then this conflict handling +policy is used instead.

    + +

    Triggers are automatically dropped +when the table that they are +associated with (the table-name table) is +dropped. However if the the trigger actions reference +other tables, the trigger is not dropped or modified if those other +tables are dropped or modified.

    + +

    Triggers are removed using the DROP TRIGGER statement.

    + + + + + +

    INSTEAD OF trigger

    + +

    Triggers may be created on views, as well as ordinary tables, by +specifying INSTEAD OF in the CREATE TRIGGER statement. +If one or more ON INSERT, ON DELETE +or ON UPDATE triggers are defined on a view, then it is not an +error to execute an INSERT, DELETE or UPDATE statement on the view, +respectively. Thereafter, +executing an INSERT, DELETE or UPDATE on the view causes the associated +triggers to fire. The real tables underlying the view are not modified +(except possibly explicitly, by a trigger program).

    + +

    Note that the sqlite3_changes() and sqlite3_total_changes() interfaces +do not count INSTEAD OF trigger firings, but the +count_changes pragma does count INSTEAD OF trigger firing.

    + +

    Examples

    + +

    Assuming that customer records are stored in the "customers" table, and +that order records are stored in the "orders" table, the following trigger +ensures that all associated orders are redirected when a customer changes +his or her address:

    + +
    +CREATE TRIGGER update_customer_address UPDATE OF address ON customers 
    +  BEGIN
    +    UPDATE orders SET address = new.address WHERE customer_name = old.name;
    +  END;
    +
    + +

    With this trigger installed, executing the statement:

    + +
    +UPDATE customers SET address = '1 Main St.' WHERE name = 'Jack Jones';
    +
    + +

    causes the following to be automatically executed:

    + +
    +UPDATE orders SET address = '1 Main St.' WHERE customer_name = 'Jack Jones';
    +
    + +

    Rowids and BEFORE triggers

    + +

    Note that currently, triggers may behave oddly when created on tables + with INTEGER PRIMARY KEY fields. If a BEFORE trigger program modifies the + INTEGER PRIMARY KEY field of a row that will be subsequently updated by the + statement that causes the trigger to fire, then the update may not occur. + The workaround is to declare the table with a PRIMARY KEY column instead + of an INTEGER PRIMARY KEY column.

    + +

    The RAISE() function

    + +

    A special SQL function RAISE() may be used within a trigger-program, +with the following syntax

    + +

    raise-function:

    + + +

    When one of the first three forms is called during trigger-program +execution, the specified ON CONFLICT processing is performed +(either ABORT, FAIL or ROLLBACK) and the current query terminates. +An error code of SQLITE_CONSTRAINT is returned to the application, +along with the specified error message.

    + +

    When RAISE(IGNORE) is called, the remainder of the current trigger program, +the statement that caused the trigger program to execute and any subsequent +trigger programs that would of been executed are abandoned. No database +changes are rolled back. If the statement that caused the trigger program +to execute is itself part of a trigger program, then that trigger program +resumes execution at the beginning of the next step. +

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_createview.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_createview.html --- sqlite3-3.4.2/www/lang_createview.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_createview.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,102 @@ + + +SQLite Query Language: CREATE VIEW + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    CREATE VIEW

    create-view-stmt:

    + + +

    The CREATE VIEW command assigns a name to a pre-packaged +SELECT +statement. Once the view is created, it can be used in the FROM clause +of another SELECT in place of a table name. +

    + +

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" +and "VIEW" then the view that is created is only visible to the +process that opened the database and is automatically deleted when +the database is closed.

    + +

    If a <database-name> is specified, then the view is created in +the named database. It is an error to specify both a <database-name> +and the TEMP keyword, unless the <database-name> is "temp". If no +database name is specified, and the TEMP keyword is not present, +the table is created in the main database.

    + +

    You cannot DELETE, INSERT, or UPDATE a view. Views are read-only +in SQLite. However, in many cases you can use an +INSTEAD OF trigger on the view to accomplish +the same thing. Views are removed +with the DROP VIEW command.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_createvtab.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_createvtab.html --- sqlite3-3.4.2/www/lang_createvtab.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_createvtab.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,108 @@ + + +SQLite Query Language: CREATE VIRTUAL TABLE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    CREATE VIRTUAL TABLE

    create-virtual-table-stmt:

    + + +

    A virtual table is an interface to an external storage or computation +engine that appears to be a table but does not actually store information +in the database file.

    + +

    In general, you can do anything with a virtual table that can be done +with an ordinary table, except that you cannot create indices or triggers on a +virtual table. Some virtual table implementations might impose additional +restrictions. For example, many virtual tables are read-only. +Virtual tables cannot be used in +shared cache mode.

    + +

    The <module-name> is the name of an object that implements +the virtual table. The <module-name> must be registered with +the SQLite database connection using +sqlite3_create_module() or sqlite3_create_module_v2() +prior to issuing the CREATE VIRTUAL TABLE statement. +The module takes zero or more comma-separated arguments. +The arguments can be just about any text as long as it has balanced +parentheses. The argument syntax is sufficiently general that the +arguments can be made to appear as column definitions in a traditional +CREATE TABLE statement. +SQLite passes the module arguments directly +to the module without any interpretation. It is the responsibility +of the module implementation to parse and interpret its own arguments.

    + +

    A virtual table is destroyed using the ordinary +DROP TABLE statement. There is no +DROP VIRTUAL TABLE statement.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_datefunc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_datefunc.html --- sqlite3-3.4.2/www/lang_datefunc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_datefunc.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,338 @@ + + +SQLite Query Language: Date And Time Functions + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    Date And Time Functions

    + +

    +SQLite supports five date and time functions as follows: +

    + +

    +

      +
    1. date(timestring, modifier, modifier, ...)
    2. +
    3. time(timestring, modifier, modifier, ...)
    4. +
    5. datetime(timestring, modifier, modifier, ...)
    6. +
    7. julianday(timestring, modifier, modifier, ...)
    8. +
    9. strftime(format, timestring, modifier, modifier, ...)
    10. +
    + +

    +All five functions take a time string as an argument. The time string +is followed by zero or more modifiers. +The strftime() function also takes a format string as its first argument. +

    + +

    +The date() function returns the date in this format: YYYY-MM-DD. +The time() function returns the time as HH:MM:SS. +The datetime() function returns "YYYY-MM-DD HH:MM:SS". +The julianday() function returns the +Julian day - +number of days since noon in Greenwich on November 24, 4714 B.C. +(Proleptic +Gregorian calendar). +The strftime() routine returns the date formatted according to +the format string specified as the first argument. +The format string supports the most common substitutions found in the + +strftime() function from +the standard C library plus two new substitutions, %f and %J. +The following is a complete list of valid strftime() substitutions: +

    + +
    + + + +
    %d day of month: 00 +
    %f fractional seconds: SS.SSS +
    %H hour: 00-24 +
    %j day of year: 001-366 +
    %J Julian day number +
    %m month: 01-12 +
    %M minute: 00-59 +
    %s seconds since 1970-01-01 +
    %S seconds: 00-59 +
    %w day of week 0-6 with sunday==0 +
    %W week of year: 00-53 +
    %Y year: 0000-9999 +
    %% % +
    +
    + +

    +Notice that all other date and time functions can be expressed +in terms of strftime(): +

    + +
    + +
    FunctionEquivalent strftime() +
    date(...) strftime('%Y-%m-%d', ...) +
    time(...) strftime('%H:%M:%S', ...) +
    datetime(...) strftime('%Y-%m-%d %H:%M:%S', ...) +
    julianday(...) strftime('%J', ...) +
    +
    + +

    +The only reasons for providing functions other than strftime() is +for convenience and for efficiency. +

    + +

    Time Strings

    + +

    A time string can be in any of the following formats:

    + +
      +
    1. YYYY-MM-DD +
    2. YYYY-MM-DD HH:MM +
    3. YYYY-MM-DD HH:MM:SS +
    4. YYYY-MM-DD HH:MM:SS.SSS +
    5. YYYY-MM-DDTHH:MM +
    6. YYYY-MM-DDTHH:MM:SS +
    7. YYYY-MM-DDTHH:MM:SS.SSS +
    8. HH:MM +
    9. HH:MM:SS +
    10. HH:MM:SS.SSS +
    11. now +
    12. DDDDDDDDDD +
    + +

    +In formats 5 through 7, the "T" is a literal character separating +the date and the time, as required by +ISO-8601. +Formats 8 through 10 that specify only a time assume a date of +2000-01-01. Format 11, the string 'now', is converted into the +current date and time as obtained from the xCurrentTime method +of the sqlite3_vfs object in use. + +Universal Coordinated Time (UTC) is used. +Format 12 is the +Julian day number +expressed as a floating point value. +

    + +

    Modifiers

    + +

    The time string can be followed by zero or more modifiers that +alter the date and time string. Each modifier +is a transformation that is applied to the time string to its left. +Modifiers are applied from left to right; order is important. +The available modifiers are as follows.

    + +
      +
    1. NNN days +
    2. NNN hours +
    3. NNN minutes +
    4. NNN.NNNN seconds +
    5. NNN months +
    6. NNN years +
    7. start of month +
    8. start of year +
    9. start of day +
    10. weekday N +
    11. unixepoch +
    12. localtime +
    13. utc +
    + +

    The first six modifiers (1 through 6) +simply add the specified amount of time to the date +specified by the preceding timestring. +Note that "±NNN months" works by rendering the original date into +the YYYY-MM-DD format, adding the ±NNN to the MM month value, then +normalizing the result. Thus, for example, the data 2001-03-31 modified +by '+1 month' initially yields 2001-04-31, but April only has 30 days +so the date is normalized to 2001-05-01. A similar effect occurs when +the original date is February 29 of a leapyear and the modifier is +±N years where N is not a multiple of four.

    + +

    The "start of" modifiers (7 through 9) shift the date backwards +to the beginning of the current month, year or day.

    + +

    The "weekday" modifier advances the date forward to the next date +where the weekday number is N. Sunday is 0, Monday is 1, and so forth.

    + +

    The "unixepoch" modifier (11) only works if it immediately follows +a timestring in the DDDDDDDDDD format. +This modifier causes the DDDDDDDDDD to be interpreted not +as a Julian day number as it normally would be, but as +Unix Time - the +number of seconds since 1970. If the "unixepoch" modifier does not +follow a timestring of the form DDDDDDDDDD which expresses the number +of seconds since 1970 or if other modifiers +separate the "unixepoch" modifier from prior DDDDDDDDDD then the +behavior is undefined.

    + +

    The "localtime" modifier (12) assumes the time string to its left is in +Universal Coordinated Time (UTC) and adjusts the time +string so that it displays localtime. If "localtime" +follows a time that is not UTC, then the behavior is undefined. +The "utc" is the opposite of "localtime". "utc" assumes that the string +to its left is in the local timezone and adjusts that string to be in UTC. +If the prior string is not in localtime, then the result of "utc" is +undefined.

    + +

    Examples

    + +

    Compute the current date.

    + +

    SELECT date('now');
    + +

    Compute the last day of the current month.

    + +
    SELECT date('now','start of month','+1 month','-1 day'); +
    + +

    Compute the date and time given a unix timestamp 1092941466.

    + +
    + SELECT datetime(1092941466, 'unixepoch'); +
    + +

    Compute the date and time given a unix timestamp 1092941466, and +compensate for your local timezone.

    + +
    + SELECT datetime(1092941466, 'unixepoch', 'localtime'); +
    + +

    Compute the current unix timestamp.

    + +
    + SELECT strftime('%s','now'); +
    + +

    Compute the number of days since the signing of the US Declaration +of Independent.

    + +
    + SELECT julianday('now') - julianday('1776-07-04'); +
    + +

    Compute the number of seconds since a particular moment in 2004:

    + +
    + SELECT strftime('%s','now') - strftime('%s','2004-01-01 02:34:56'); +
    + +

    +Compute the date of the first Tuesday in October +for the current year. +

    + +
    + SELECT date('now','start of year','+9 months','weekday 2'); +
    + +

    Compute the time since the unix epoch in seconds +(like strftime('%s','now') except includes fractional part):

    + +
    + SELECT (julianday('now') - 2440587.5)*86400.0; +
    + +

    Caveats And Bugs

    + +

    The computation of local time depends heavily on the whim +of politicians and is thus difficult to get correct for +all locales. In this implementation, the standard C library +function localtime_r() is used to assist in the calculation of +local time. The +localtime_r() C function normally only works for years +between 1970 and 2037. For dates outside this range, SQLite +attempts to map the year into an equivalent year within +this range, do the calculation, then map the year back.

    + + +

    Date computations do not give correct results for dates +before Julian day number 0 (-4713-11-24 12:00:00).

    + +

    Non-Vista Windows platforms only support one set of DST rules. +Vista only supports two. Therefore, on these platforms, +historical DST calculations will be incorrect. +For example, in the US, in 2007 the DST rules changed. +Non-Vista Windows platforms apply the new 2007 DST rules +to all previous years as well. Vista does somewhat better +getting results correct back to 1986, when the rules were also changed.

    + +

    All internal computations assume the + +Gregorian calendar system. It is also assumed that every +day is exactly 86400 seconds in duration.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_delete.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_delete.html --- sqlite3-3.4.2/www/lang_delete.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_delete.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,133 @@ + + +SQLite Query Language: DELETE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DELETE

    delete-stmt:

    +

    qualified-table-name:

    + + +

    The DELETE command is used to remove records from a table. +The command consists of the "DELETE FROM" keywords followed by +the name of the table from which records are to be removed. +

    + +

    Without a WHERE clause, all rows of the table are removed. +If a WHERE clause is supplied, then only those rows that match +the expression are removed.

    + + + +

    The Truncate Optimization

    + +

    When the WHERE is omitted from a DELETE statement and the table +being deleted has no triggers, +SQLite uses an optimization to erase the entire table content +without having to visit each row of the table individual. +This "truncate" optimization makes the delete run much faster. +Prior to SQLite version 3.6.5, the truncate optimization +also meant that the sqlite3_changes() and +sqlite3_total_changes() interfaces +and the count_changes pragma +will not actually return the number of deleted rows. +That problem has been fixed as of version 3.6.5. + +

    The truncate optimization can be permanently disabled for all queries +by recompiling +SQLite with the SQLITE_OMIT_TRUNCATE_OPTIMIZATION compile-time switch.

    + +

    The truncate optimization can also be disabled at runtime using +the sqlite3_set_authorizer() interface. If an authorizer callback +returns SQLITE_IGNORE for an SQLITE_DELETE action code, then +the DELETE operation will proceed but the truncate optimization will +be bypassed and rows will be deleted one by one.

    + +

    Use Of LIMIT

    + +

    If SQLite is compiled with the SQLITE_ENABLE_UPDATE_DELETE_LIMIT +compile-time option, then the syntax of the DELETE statement is +extended by the addition of optional ORDER BY and LIMIT clauses:

    + +

    delete-stmt-limited:

    + + +

    The optional LIMIT clause can be used to limit the number of +rows deleted, and thereby limit the size of the transaction. +The ORDER BY clause is used only to determine which rows fall +within the LIMIT. The order in which rows are deleted is arbitrary +and is not determined by the ORDER BY clause.

    + +

    The presence of a LIMIT clause defeats the truncate optimization +causing all rows being deleted to be visited.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_detach.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_detach.html --- sqlite3-3.4.2/www/lang_detach.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_detach.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,88 @@ + + +SQLite Query Language: DETACH DATABASE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DETACH DATABASE

    detach-stmt:

    + + +

    This statement detaches an additional database connection previously +attached using the ATTACH statement. +It is possible to have the same database file attached multiple times using +different names, and detaching one connection to a file will leave the +others intact.

    + +

    This statement will fail if SQLite is in the middle of a transaction.

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_dropindex.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_dropindex.html --- sqlite3-3.4.2/www/lang_dropindex.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_dropindex.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,92 @@ + + +SQLite Query Language: DROP INDEX + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DROP INDEX

    drop-index-stmt:

    + + +

    The DROP INDEX statement removes an index added +with the CREATE INDEX statement. The index named is completely removed from +the disk. The only way to recover the index is to reenter the +appropriate CREATE INDEX command.

    + +

    The DROP INDEX statement does not reduce the size of the database +file in the default mode. +Empty space in the database is retained for later INSERT statements. To +remove free space in the database, use the VACUUM +command. If auto_vacuum mode is enabled for a database then space +will be freed automatically by DROP INDEX.

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_droptable.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_droptable.html --- sqlite3-3.4.2/www/lang_droptable.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_droptable.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,96 @@ + + +SQLite Query Language: DROP TABLE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DROP TABLE

    drop-table-stmt:

    + + +

    The DROP TABLE statement removes a table added with the +CREATE TABLE statement. The name specified is the +table name. It is completely removed from the database schema and the +disk file. The table can not be recovered. All indices and triggers +associated with the table are also deleted.

    + +

    The DROP TABLE statement does not reduce the size of the database +file in the default mode. Empty space in the database is retained for +later INSERT statements. To +remove free space in the database, +use the VACUUM statement. +If auto_vacuum mode is enabled for a database then space +will be freed automatically by DROP TABLE.

    + +

    The optional IF EXISTS clause suppresses the error that would normally +result if the table does not exist.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_droptrigger.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_droptrigger.html --- sqlite3-3.4.2/www/lang_droptrigger.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_droptrigger.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,84 @@ + + +SQLite Query Language: DROP TRIGGER + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DROP TRIGGER

    drop-trigger-stmt:

    + + +

    The DROP TRIGGER statement removes a trigger created by the +CREATE TRIGGER statement. The trigger is +deleted from the database schema. Note that triggers are automatically +dropped when the associated table is dropped.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_dropview.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_dropview.html --- sqlite3-3.4.2/www/lang_dropview.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_dropview.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,84 @@ + + +SQLite Query Language: DROP VIEW + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    DROP VIEW

    drop-view-stmt:

    + + +

    The DROP VIEW statement removes a view created by the +CREATE VIEW statement. The name specified is the +view name. It is removed from the database schema, but no actual data +in the underlying base tables is modified.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_explain.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_explain.html --- sqlite3-3.4.2/www/lang_explain.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_explain.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,98 @@ + + +SQLite Query Language: EXPLAIN + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    EXPLAIN

    sql-stmt:

    + + +

    An SQL statement can be preceded by the keyword "EXPLAIN" or +by the phrase "EXPLAIN QUERY PLAN". Either modification causes the +SQL statement to behave as a query and to return information about +how the SQL statement would have operated if the EXPLAIN keyword or +phrase had been omitted.

    + +

    When the EXPLAIN keyword appears by itself it causes the statement +to behave as a query that returns the sequence of +virtual machine instructions it would have +used to execute the command had the EXPLAIN keyword not been present. +When the EXPLAIN QUERY PLAN phrase appears, the statement returns +high-level information about what indices would have been used.

    + +

    The output from EXPLAIN and EXPLAIN QUERY PLAN is intended for +interactive analysis and troubleshooting only. The details of the +output format are subject to change from one release of SQLite to the next. +Applications should not use EXPLAIN or EXPLAIN QUERY PLAN since +their behavior is undocumented, unspecified, and variable.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_expr.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_expr.html --- sqlite3-3.4.2/www/lang_expr.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_expr.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,339 @@ + + +SQLite Query Language: expression + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    expression

    expr:

    +

    literal-value:

    +

    signed-number:

    +

    raise-function:

    + + +

    This section is different from the others. Most other sections of +this document talks about a particular SQL command. This section does +not talk about a standalone command but about "expressions" which are +subcomponents of most other commands.

    + + + +

    Operators

    +

    SQLite understands the following binary operators, in order from +highest to lowest precedence:

    + +
    +||
    +*    /    %
    ++    -
    +<<   >>   &    |
    +<    <=   >    >=
    +=    ==   !=   <>   IN  LIKE  GLOB  MATCH  REGEXP
    +AND   
    +OR
    +
    + +

    Supported unary prefix operators are these:

    + +
    +-    +    ~    NOT
    +
    + +

    The COLLATE operator can be thought of as a unary postfix +operator. The COLLATE operator has the highest precedence. +It always binds more tightly than any prefix unary operator or +any binary operator.

    + + +

    The unary operator + is a no-op. It can be applied +to strings, numbers, or blobs and it always gives as its result the +value of the operand.

    + +

    Note that there are two variations of the equals and not equals +operators. Equals can be either + += or ==. +The non-equals operator can be either +!= or <>. +The || operator is "concatenate" - it joins together +the two strings of its operands. +The operator % outputs the remainder of its left +operand modulo its right operand.

    + +

    The result of any binary operator is a numeric value, except +for the || concatenation operator which gives a string +result.

    + + + +

    Literal Values

    +

    +A literal value is a constant of some kind. +Literal values may be integers, floating point numbers, strings, +BLOBs, or NULLs. +Scientific notation is supported for floating point literal values. +The "." character is always used +as the decimal point even if the locale setting specifies "," for +this role - the use of "," for the decimal point would result in +syntactic ambiguity. A string constant is formed by enclosing the +string in single quotes ('). A single quote within the string can +be encoded by putting two single quotes in a row - as in Pascal. +C-style escapes using the backslash character are not supported because +they are not standard SQL. +BLOB literals are string literals containing hexadecimal data and +preceded by a single "x" or "X" character. For example:

    + +
    +X'53514C697465'
    +
    + +

    +A literal value can also be the token "NULL". +

    + + + +

    Parameters

    +

    +A "variable" or "parameter" token +specifies a placeholder in the expression for a literal +value that is filled in at runtime using the +sqlite3_bind() family of C/C++ interfaces. +Parameters can take several forms: +

    + + + + + + + + + + + + + + + + + + + + + +
    ?NNNA question mark followed by a number NNN holds a spot for the +NNN-th parameter. NNN must be between 1 and SQLITE_MAX_VARIABLE_NUMBER.
    ?A question mark that is not followed by a number holds a spot for +the next unused parameter.
    :AAAAA colon followed by an identifier name holds a spot for a named +parameter with the name AAAA. Named parameters are also numbered. +The number assigned is the next unused number. To avoid confusion, +it is best to avoid mixing named and numbered parameters.
    @AAAAAn "at" sign works exactly like a colon.
    $AAAAA dollar-sign followed by an identifier name also holds a spot for a named +parameter with the name AAAA. The identifier name in this case can include +one or more occurances of "::" and a suffix enclosed in "(...)" containing +any text at all. This syntax is the form of a variable name in the +Tcl programming language. The presence +of this syntax results from the fact that SQLite is really a +Tcl extension that has escaped into the wild.
    + + +

    Parameters that are not assigned values using +sqlite3_bind() are treated +as NULL.

    + + + +

    The LIKE and GLOB operators

    +

    The LIKE operator does a pattern matching comparison. The operand +to the right contains the pattern, the left hand operand contains the +string to match against the pattern. + +A percent symbol ("%") in the pattern matches any +sequence of zero or more characters in the string. An underscore +("_") in the pattern matches any single character in the +string. Any other character matches itself or it's lower/upper case +equivalent (i.e. case-insensitive matching). (A bug: SQLite only +understands upper/lower case for ASCII characters. The +LIKE operator is case sensitive for unicode characters that are beyond +the ASCII range. For example, the expression 'a' LIKE 'A' +is TRUE but 'æ' LIKE 'Æ' is FALSE.).

    + +

    If the optional ESCAPE clause is present, then the expression +following the ESCAPE keyword must evaluate to a string consisting of +a single character. This character may be used in the LIKE pattern +to include literal percent or underscore characters. The escape +character followed by a percent symbol, underscore or itself matches a +literal percent symbol, underscore or escape character in the string, +respectively. + +

    The infix LIKE operator is implemented by calling the +application-defined SQL functions like(Y,X) or +like(Y,X,Z).

    + +

    The LIKE operator can be made case sensitive using the +case_sensitive_like pragma.

    + + + +

    The GLOB operator is similar to LIKE but uses the Unix +file globbing syntax for its wildcards. Also, GLOB is case +sensitive, unlike LIKE. Both GLOB and LIKE may be preceded by +the NOT keyword to invert the sense of the test. The infix GLOB +operator is implemented by calling the function +glob(Y,X) and can be modified by overriding +that function.

    + + + +

    The REGEXP operator is a special syntax for the regexp() +user function. No regexp() user function is defined by default +and so use of the REGEXP operator will normally result in an +error message. If a user-defined function named "regexp" +is added at run-time, that function will be called in order +to implement the REGEXP operator.

    + + + +

    The MATCH operator is a special syntax for the match() +user function. The default match() function implementation +raises and exception and is not really useful for anything. +But extensions can override the match() function with more +helpful logic.

    + + + +

    The BETWEEN operator

    +

    The BETWEEN operator is equivalent to a pair of comparisons. +"a BETWEEN b AND c" is +equivalent to +"a>=b AND a<=c". +The precedence of the BETWEEN operator is the same as the precedence +as operators == and != and LIKE and groups left to right. + +

    Table Column Names

    +

    A column name can be any of the names defined in the CREATE TABLE +statement or one of the following special identifiers: "ROWID", +"OID", or "_ROWID_". +These special identifiers all describe the +unique integer key (the rowid) associated with every +row of every table. +The special identifiers only refer to the row key if the CREATE TABLE +statement does not define a real column with the same name. Row keys +act like read-only columns. A row key can be used anywhere a regular +column can be used, except that you cannot change the value +of a row key in an UPDATE or INSERT statement. +"SELECT * ..." does not return the row key.

    + +

    Subqueries

    +

    SELECT statements can appear in expressions as either the +right-hand operand of the IN operator, as a scalar quantity, or +as the operand of an EXISTS operator. +As a scalar quantity or the operand of an IN operator, +the SELECT should have only a single column in its +result. Compound SELECTs (connected with keywords like UNION or +EXCEPT) are allowed. +With the EXISTS operator, the columns in the result set of the SELECT are +ignored and the expression returns TRUE if one or more rows exist +and FALSE if the result set is empty. +If no terms in the SELECT expression refer to value in the containing +query, then the expression is evaluated once prior to any other +processing and the result is reused as necessary. If the SELECT expression +does contain variables from the outer query, then the SELECT is reevaluated +every time it is needed.

    + +

    When a SELECT is the right operand of the IN operator, the IN +operator returns TRUE if the result of the left operand is any of +the values generated by the select. The IN operator may be preceded +by the NOT keyword to invert the sense of the test.

    + +

    When a SELECT appears within an expression but is not the right +operand of an IN operator, then the first row of the result of the +SELECT becomes the value used in the expression. If the SELECT yields +more than one result row, all rows after the first are ignored. If +the SELECT yields no rows, then the value of the SELECT is NULL.

    + +

    CAST expressions

    +

    A CAST expression changes the datatype of the into the +type specified by <type>. +<type> can be any non-empty type name that is valid +for the type in a column definition of a CREATE TABLE statement.

    + +

    Functions

    +

    Both simple and aggregate functions are supported. +(For presentation purposes, simple functions are further subdivided into +core functions and date-time functions.) +A simple function can be used in any expression. Simple functions return +a result immediately based on their inputs. Aggregate functions +may only be used in a SELECT statement. Aggregate functions compute +their result across all rows of the result set.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang.html --- sqlite3-3.4.2/www/lang.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,119 @@ + + +Query Language Understood by SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQL As Understood By SQLite

    + +

    SQLite understands most of the standard SQL +language. But it does omit some features +while at the same time +adding a few features of its own. This document attempts to +describe precisely what parts of the SQL language SQLite does +and does not support. A list of keywords is +also provided. The SQL language syntax is described by +syntax diagrams.

    + +

    In all of the syntax diagrams that follow, literal text is shown in +bold blue. Non-terminal symbols are shown in italic red. Operators +that are part of the syntactic markup itself are shown in black roman.

    + +

    This document is just an overview of the SQL syntax implemented +by SQLite. Many low-level productions are omitted. For detailed information +on the language that SQLite understands, refer to the source code and +the grammar file "parse.y".

    + +

    The following syntax documentation topics are available:

    + + +
    + +

    The routines sqlite3_prepare_v2(), sqlite3_prepare(), +sqlite3_prepare16(), sqlite3_exec(), and sqlite3_get_table() accept +an SQL statement list (sql-stmt-list) which is a semicolon-separated +list of statements.

    + +

    sql-stmt-list:

    + + +

    Each SQL statement in the statement list is an instance of the +following:

    + +

    sql-stmt:

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_indexedby.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_indexedby.html --- sqlite3-3.4.2/www/lang_indexedby.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_indexedby.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,126 @@ + + +SQLite Query Language: INDEXED BY + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    INDEXED BY

    +

    The INDEXED BY phrase is a SQL extension found only in SQLite which can +be used to verify that the correct indices are being used on a DELETE, +SELECT, or UPDATE statement. +The INDEXED BY phrase always follows the name of a table that SQLite will +be reading. The INDEXED BY phrase can be seen in the following syntax +diagrams:

    + +

    qualified-table-name:

    +

    single-source:

    + + +

    The "INDEXED BY index-name" clause specifies that the named index +must be used in order to look up values on the preceding table. +If index-name does not exist or cannot be used for the query, then +the preparation of the SQL statement fails. +The "NOT INDEXED" clause specifies that no index shall be used when +accessing the preceding table, including implied indices create by +UNIQUE and PRIMARY KEY constraints. However, the INTEGER PRIMARY KEY +can still be used to look up entries even when "NOT INDEXED" is specified.

    + +

    Some SQL database engines provide non-standard "hint" mechanisms which +can be used to give the query optimizer clues about what indices it should +use for a particular statement. The INDEX BY clause of SQLite is +not a hinting mechanism and it should not be used as such. +The INDEXED BY clause does not give the optimizer hints about which index +to use; it gives the optimizer a requirement of which index to use. +If the query optimizer is unable to use the index specified by the +INDEX BY clause, then the query will fail with an error.

    + +

    The INDEXED BY clause is not intended for use in tuning +the preformance of a query. The intent of the INDEXED BY clause is +to raise a run-time error if a schema change, such as dropping or +creating an index, causes the query plan for a time-sensitive query +to change. The INDEXED BY clause is designed to help detect +undesirable query plan changes during regression testing. +Developers are admonished to omit all use of INDEXED BY during +application design, implementation, testing, and tuning. If +INDEXED BY is to be used at all, it should be inserted at the very +end of the development process when "locking down" a design.

    + +

    See Also:

    + +

    The sqlite3_stmt_status() C/C++ interface together with the +SQLITE_STMTSTATUS_FULLSCAN_STEP and SQLITE_STMTSTATUS_SORT verbs +can be used to detect at run-time when an SQL statement is not +making effective use of indices. Many applications may prefer to +use the sqlite3_stmt_status() interface to detect index misuse +rather than the INDEXED BY phrase described here.

    + +

    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_insert.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_insert.html --- sqlite3-3.4.2/www/lang_insert.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_insert.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,106 @@ + + +SQLite Query Language: INSERT + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    INSERT

    insert-stmt:

    + + +

    The INSERT statement comes in two basic forms. The first form +(with the "VALUES" keyword) creates a single new row in an existing table. +If no column-list is specified then the number of values must +be the same as the number of columns in the table. If a column-list +is specified, then the number of values must match the number of +specified columns. Columns of the table that do not appear in the +column list are filled with the default value, or with NULL if no +default value is specified. +

    + +

    The second form of the INSERT statement takes it data from a +SELECT statement. The number of columns in the result of the +SELECT must exactly match the number of columns in the table if +no column list is specified, or it must match the number of columns +name in the column list. A new entry is made in the table +for every row of the SELECT result. The SELECT may be simple +or compound.

    + +

    The optional conflict-clause allows the specification of an alternative +constraint conflict resolution algorithm to use during this one command. +See the section titled +ON CONFLICT for additional information. +For compatibility with MySQL, the parser allows the use of the +single keyword REPLACE as an +alias for "INSERT OR REPLACE". +

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_keywords.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_keywords.html --- sqlite3-3.4.2/www/lang_keywords.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_keywords.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,178 @@ + + +SQLite Query Language: SQLite Keywords + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    SQLite Keywords

    + +

    The SQL standard specifies a huge number of keywords which may not +be used as the names of tables, indices, columns, databases, user-defined +functions, collations, virtual table modules, or any other named object. +The list of keywords is so long that few people can remember them all. +For most SQL code, your safest bet is to never use any English language +word as the name of a user-defined object.

    + +

    If you want to use a keyword as a name, you need to quote it. There +are three ways of quoting keywords in SQLite:

    + +

    +

    + + + + + + + + + + + + +
    'keyword'A keyword in single quotes is a string literal.
    "keyword"A keyword in double-quotes is an identifier
    [keyword]A keyword enclosed in square brackets is + an identifier. This is not standard SQL. This quoting mechanism + is used by MS Access and SQL Server and is included in SQLite for + compatibility.
    `keyword`A keyword enclosed in grave accents (ASCII code 96) is + an identifier. This is not standard SQL. This quoting mechanism + is used by MySQL and is included in SQLite for + compatibility.
    +
    +

    + +

    For resilience when confronted with historical SQL statements, SQLite +will sometimes bend the quoting rules above:

    + +
      +
    • If a keyword in single +quotes (ex: 'key' or 'glob') is used in a context where +an identifier is allowed but where a string literal is not allowed, then +the token is understood to be an identifier instead of a string literal. +

    • + +
    • If a keyword in double +quotes (ex: "key" or "glob") is used in a context where +it cannot be resolved to an identifier but where a string literal +is allowed, then the token is understood to be a string literal instead +of an identifer.

    • +
    + +

    Programmers are cautioned not to use the two exceptions described in +the previous bullets. We emphasize that they exist only so that old +and ill-formed SQL statements will run correctly. Future versions of +SQLite might change to raise errors instead of accepting the malformed +statements covered by the exceptions above.

    + +

    SQLite is extended to allow many keywords to be used unquoted +as the names of databases, tables, indices, triggers, views, columns, +user-defined functions, collations, attached databases, and virtual +function modules. +In the list of keywords that follows, those that can be used as identifiers +are shown in an italic font. Keywords that must be quoted in order to be +used as identifiers are shown in bold.

    + +

    +SQLite adds new keywords from time to time when it takes on new features. +So to prevent your code from being broken by future enhancements, you should +normally quote any identifier that is an English language word, even if +you do not have to. +

    + +

    +The following are the keywords currently recognized by SQLite: +

    + +
    + + +
    + +ABORT
    ADD
    AFTER
    ALL
    ALTER
    ANALYZE
    AND
    AS
    ASC
    ATTACH
    AUTOINCREMENT
    BEFORE
    BEGIN
    BETWEEN
    BY
    CASCADE
    CASE
    CAST
    CHECK
    COLLATE
    COLUMN
    COMMIT
    CONFLICT
    CONSTRAINT
    CREATE
    CROSS
    CURRENT_DATE
    CURRENT_TIME
    CURRENT_TIMESTAMP
    DATABASE
    DEFAULT
    DEFERRABLE
    DEFERRED
    DELETE
    DESC
    DETACH
    DISTINCT
    DROP
    EACH
    ELSE
    END
    ESCAPE
    EXCEPT
    EXCLUSIVE
    EXISTS
    EXPLAIN
    FAIL
    FOR
    FOREIGN
    FROM
    FULL
    GLOB
    GROUP
    HAVING
    IF
    IGNORE
    IMMEDIATE
    IN
    INDEX
    INDEXED
    INITIALLY
    INNER
    INSERT
    INSTEAD
    INTERSECT
    INTO
    IS
    ISNULL
    JOIN
    KEY
    LEFT
    LIKE
    LIMIT
    MATCH
    NATURAL
    NOT
    NOTNULL
    NULL
    OF
    OFFSET
    ON
    OR
    ORDER
    OUTER
    PLAN
    PRAGMA
    PRIMARY
    QUERY
    RAISE
    REFERENCES
    REGEXP
    REINDEX
    RELEASE
    RENAME
    REPLACE
    RESTRICT
    RIGHT
    ROLLBACK
    ROW
    SAVEPOINT
    SELECT
    SET
    TABLE
    TEMP
    TEMPORARY
    THEN
    TO
    TRANSACTION
    TRIGGER
    UNION
    UNIQUE
    UPDATE
    USING
    VACUUM
    VALUES
    VIEW
    VIRTUAL
    WHEN
    WHERE
    +
    + +

    Special names

    + +

    The following are not keywords in SQLite, but are used as names of +system objects. They can be used as an identifier for a different +type of object.

    + +
    + _ROWID_
    + MAIN
    + OID
    + ROWID
    + SQLITE_MASTER
    + SQLITE_SEQUENCE
    + SQLITE_TEMP_MASTER
    + TEMP
    +
    +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_reindex.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_reindex.html --- sqlite3-3.4.2/www/lang_reindex.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_reindex.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,97 @@ + + +SQLite Query Language: REINDEX + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    REINDEX

    reindex-stmt:

    + + +

    The REINDEX command is used to delete and recreate indices from scratch. +This is useful when the definition of a collation sequence has changed. +

    + +

    In the first form, all indices in all attached databases that use the +named collation sequence are recreated. In the second form, if +[database-name.]table/index-name identifies a table, +then all indices +associated with the table are rebuilt. If an index is identified, then only +this specific index is deleted and recreated. +

    + +

    If no database-name is specified and there exists both a table or +index and a collation sequence of the specified name, then indices associated +with the collation sequence only are reconstructed. This ambiguity may be +dispelled by always specifying a database-name when reindexing a +specific table or index. + +


    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_replace.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_replace.html --- sqlite3-3.4.2/www/lang_replace.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_replace.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Query Language: REPLACE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    REPLACE

    + +

    The REPLACE command is an alias for the "INSERT OR REPLACE" variant +of the INSERT command. +This alias is provided for +compatibility with MySQL. See the +INSERT command documentation for additional +information.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_savepoint.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_savepoint.html --- sqlite3-3.4.2/www/lang_savepoint.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_savepoint.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,187 @@ + + +SQLite Query Language: SAVEPOINT + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    SAVEPOINT

    savepoint-stmt:

    +

    release-stmt:

    +

    rollback-stmt:

    + + +

    SAVEPOINTs are a method of creating transactions, similar to +BEGIN and COMMIT, except that the SAVEPOINT and RELEASE commands +are named and may be nested.

    + +

    The SAVEPOINT command starts a new transaction with a name. +The transaction names need not be unique. +A SAVEPOINT can be started either within or outside of +a BEGIN...COMMIT. When a SAVEPOINT is the outer-most savepoint +and it is not within a BEGIN...COMMIT then the behavior is the +same as BEGIN DEFERRED TRANSACTION.

    + +

    The ROLLBACK TO command reverts the state of the database back to what +it was just after the corresponding SAVEPOINT. Note that unlike that +plain ROLLBACK command (without the TO keyword) the ROLLBACK TO command +does not cancel the transaction. Instead of cancelling the transaction, +the ROLLBACK TO command restarts the transaction again at the beginning. +All intervening SAVEPOINTs are cancelled, however.

    + +

    The RELEASE is like a COMMIT for a SAVEPOINT. +The RELEASE command causes all savepoints back to and including the first +savepoint with a matching name to be removed from the transaction stack. The +RELEASE of an inner transaction +does not cause any changes to be written to the database file; it merely +removes savepoints from the transaction stack such that it is +no longer possible to ROLLBACK TO those savepoints. +If a RELEASE command releases the outermost savepoint, so +that the transaction stack becomes empty, then RELEASE is the same +as COMMIT. +The COMMIT command may used to release all savepoints and +commit the transaction even if the transaction was originally started +by a SAVEPOINT command instead of a BEGIN command.

    + +

    If the savepoint-name in a RELEASE command does not match any +savepoint currently in the tranaction stack, then no savepoints are +released, the database is unchanged, and the RELEASE command returns +an error.

    + +

    Note that an inner transaction might commit (using the RELEASE command) +but then later have its work undone by a ROLLBACK in an outer transaction. +A power failure or program crash or OS crash will cause the outer-most +transaction to rollback, undoing all changes that have occurred within +that outer transaction, even changes that have supposedly been "committed" +by the RELEASE command. Content is not actually committed on the disk +until the outermost transaction commits.

    + +

    There are several ways of thinking about the RELEASE command:

    + +
      +
    • +Some people view RELEASE as the equivalent of COMMIT for a SAVEPOINT. +This is an acceptable point of view as long as one remembers that the +changes committed by an inner transaction might later be undone by a +rollback in an outer transaction.

    • + +
    • +Another view of RELEASE is that it merges a named transaction into its +parent transaction, so that the named transaction and its parent become +the same transaction. After RELEASE, the named transaction and its parent +will commit or rollback together, whatever their fate may be. +

    • + +
    • +One can also think of savepoints as +"marks" in the transaction timeline. In this view, the SAVEPOINT command +creates a new mark, the ROLLBACK TO command rewinds the timeline back +to a point just after the named mark, and the RELEASE command +erases marks from the timeline without actually making any +changes to the database. +

    • +
    + + + +

    Transaction Nesting Rules

    + +

    Transactions stack. The last transaction started will be the first +transaction committed or rolled back.

    + +

    The BEGIN command only works if the transaction stack is empty, or +in other words if there are no pending transactions. If the transaction +stack is not empty when the BEGIN command is invoked, then the command +fails with an error.

    + +

    The COMMIT command commits all outstanding transactions and leaves +the transaction stack empty.

    + +

    The RELEASE command starts with the most recent addition to the +transaction stack and releases savepoints backwards +in time until it releases a savepoint mark with a matching savepoint-name. +Prior savepoints, even savepoints with matching savepoint-names, are +unchanged. +If the RELEASE command causes the +transaction stack to become empty (if the RELEASE command releases the +outermost transaction from the stack) then the transaction commits.

    + +

    The ROLLBACK command without a TO clause rolls backs all transactions +and leaves the transaction stack empty.

    + +

    The ROLLBACK command with a TO clause rolls back transactions going +backwards in time back to the most recent SAVEPOINT with a matching name. +The SAVEPOINT with the matching name remains on the transaction stack, +but all database changes that occurred after that SAVEPOINT was created +are rolled back. If the savepoint-name in a ROLLBACK TO command does not +match any SAVEPOINT on the stack, then the ROLLBACK command fails with an +error and leaves the state of the database unchanged.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_select.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_select.html --- sqlite3-3.4.2/www/lang_select.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_select.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,182 @@ + + +SQLite Query Language: SELECT + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    SELECT

    select-stmt:

    +

    select-core:

    +

    result-column:

    +

    join-source:

    +

    single-source:

    +

    join-op:

    +

    join-constraint:

    +

    ordering-term:

    +

    compound-operator:

    + + +

    The SELECT statement is used to query the database. The +result of a SELECT is zero or more rows of data where each row +has a fixed number of columns. The number of columns in the +result is specified by the expression list in between the +SELECT and FROM keywords. Any arbitrary expression can be used +as a result. If a result expression is * then all columns of all tables are substituted +for that one expression. If the expression is the name of +a table followed by .* then the result is all columns +in that one table.

    + +

    The DISTINCT keyword causes a subset of result rows to be returned, +in which each result row is different. NULL values are not treated as +distinct from each other. The default behavior is that all result rows +be returned, which can be made explicit with the keyword ALL.

    + +

    The query is executed against one or more tables specified after +the FROM keyword. If multiple tables names are separated by commas, +then the query is against the cross join of the various tables. +The full SQL-92 join syntax can also be used to specify joins. +A sub-query +in parentheses may be substituted for any table name in the FROM clause. +The entire FROM clause may be omitted, in which case the result is a +single row consisting of the values of the expression list. +

    + +

    The WHERE clause can be used to limit the number of rows over +which the query operates.

    + +

    The GROUP BY clause causes one or more rows of the result to +be combined into a single row of output. This is especially useful +when the result contains aggregate functions. The expressions in +the GROUP BY clause do not have to be expressions that +appear in the result. The HAVING clause is similar to WHERE except +that HAVING applies after grouping has occurred. The HAVING expression +may refer to values, even aggregate functions, that are not in the result.

    + +

    The ORDER BY clause causes the output rows to be sorted. +The argument to ORDER BY is a list of expressions that are used as the +key for the sort. The expressions do not have to be part of the +result for a simple SELECT, but in a compound SELECT each sort +expression must exactly match one of the result columns. Each +sort expression may be optionally followed by a COLLATE keyword and +the name of a collating function used for ordering text and/or +keywords ASC or DESC to specify the sort order.

    + +

    Each term of an ORDER BY expression is processed as follows:

    + +
      +
    1. If the ORDER BY expression is a constant integer K then the +output is ordered by the K-th column of the result set.

    2. +
    3. If the ORDER BY expression is an identifier and one of the +output columns has an alias by the same name, then the output is +ordered by the identified column.

    4. +
    5. Otherwise, the ORDER BY expression is evaluated and the output +is ordered by the value of that expression.

    6. +
    + +

    In a compound SELECT statement, the third ORDER BY matching rule +requires that the expression be identical to one of the columns in +the result set. The three rules are first applied to the left-most +SELECT in the compound. If a match is found, the search stops. Otherwise, +the next SELECT to the right is tried. This continues until a match +is found. Each term of the ORDER BY clause is processed separately +and may come from different SELECT statements in the compound.

    + +

    The LIMIT clause places an upper bound on the number of rows +returned in the result. A negative LIMIT indicates no upper bound. +The optional OFFSET following LIMIT specifies how many +rows to skip at the beginning of the result set. +In a compound query, the LIMIT clause may only appear on the +final SELECT statement. +The limit is applied to the entire query not +to the individual SELECT statement to which it is attached. +Note that if the OFFSET keyword is used in the LIMIT clause, then the +limit is the first number and the offset is the second number. If a +comma is used instead of the OFFSET keyword, then the offset is the +first number and the limit is the second number. This seeming +contradition is intentional - it maximizes compatibility with legacy +SQL database systems. +

    + +

    A compound SELECT is formed from two or more simple SELECTs connected +by one of the operators UNION, UNION ALL, INTERSECT, or EXCEPT. In +a compound SELECT, all the constituent SELECTs must specify the +same number of result columns. There may be only a single ORDER BY +clause at the end of the compound SELECT. The UNION and UNION ALL +operators combine the results of the SELECTs to the right and left into +a single big table. The difference is that in UNION all result rows +are distinct where in UNION ALL there may be duplicates. +The INTERSECT operator takes the intersection of the results of the +left and right SELECTs. EXCEPT takes the result of left SELECT after +removing the results of the right SELECT. When three or more SELECTs +are connected into a compound, they group from left to right.

    + + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang.tcl --- sqlite3-3.4.2/www/lang.tcl 2007-08-09 01:00:26.000000000 +0100 +++ sqlite3-3.6.16/www/lang.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,2156 +0,0 @@ -# -# Run this Tcl script to generate the lang-*.html files. -# -set rcsid {$Id: lang.tcl,v 1.133 2007/08/09 00:00:26 drh Exp $} -source common.tcl - -if {[llength $argv]>0} { - set outputdir [lindex $argv 0] -} else { - set outputdir "" -} - -header {Query Language Understood by SQLite} -puts { -

    SQL As Understood By SQLite

    - -

    The SQLite library understands most of the standard SQL -language. But it does omit some features -while at the same time -adding a few features of its own. This document attempts to -describe precisely what parts of the SQL language SQLite does -and does not support. A list of keywords is -also provided.

    - -

    In all of the syntax diagrams that follow, literal text is shown in -bold blue. Non-terminal symbols are shown in italic red. Operators -that are part of the syntactic markup itself are shown in black roman.

    - -

    This document is just an overview of the SQL syntax implemented -by SQLite. Many low-level productions are omitted. For detailed information -on the language that SQLite understands, refer to the source code and -the grammar file "parse.y".

    - -
    -

    SQLite implements the follow syntax:

    -

      -} - -proc slink {label} { - if {[string match *.html $label]} { - return $label - } - if {[string length $::outputdir]==0} { - return #$label - } else { - return lang_$label.html - } -} - -foreach {section} [lsort -index 0 -dictionary { - {{CREATE TABLE} createtable} - {{CREATE VIRTUAL TABLE} createvtab} - {{CREATE INDEX} createindex} - {VACUUM vacuum} - {{DROP TABLE} droptable} - {{DROP INDEX} dropindex} - {INSERT insert} - {REPLACE replace} - {DELETE delete} - {UPDATE update} - {SELECT select} - {comment comment} - {COPY copy} - {EXPLAIN explain} - {expression expr} - {{BEGIN TRANSACTION} transaction} - {{COMMIT TRANSACTION} transaction} - {{END TRANSACTION} transaction} - {{ROLLBACK TRANSACTION} transaction} - {PRAGMA pragma.html} - {{ON CONFLICT clause} conflict} - {{CREATE VIEW} createview} - {{DROP VIEW} dropview} - {{CREATE TRIGGER} createtrigger} - {{DROP TRIGGER} droptrigger} - {{ATTACH DATABASE} attach} - {{DETACH DATABASE} detach} - {REINDEX reindex} - {{ALTER TABLE} altertable} - {{ANALYZE} analyze} -}] { - foreach {s_title s_tag} $section {} - puts "
    • $s_title
    • " -} -puts {

    -
    - -

    Details on the implementation of each command are provided in -the sequel.

    -} - -proc Operator {name} { - return "$name" -} -proc Nonterminal {name} { - return "$name" -} -proc Keyword {name} { - return "$name" -} -proc Example {text} { - puts "
    $text
    " -} - -proc Section {name label} { - global outputdir - - if {[string length $outputdir]!=0} { - if {[llength [info commands puts_standard]]>0} { - footer $::rcsid - } - - if {[string length $label]>0} { - rename puts puts_standard - proc puts {str} { - regsub -all {href="#([a-z]+)"} $str {href="lang_\1.html"} str - puts_standard $::section_file $str - } - rename footer footer_standard - proc footer {id} { - footer_standard $id - rename footer "" - rename puts "" - rename puts_standard puts - rename footer_standard footer - } - set ::section_file [open [file join $outputdir lang_$label.html] w] - header "Query Language Understood by SQLite: $name" - puts "

    SQL As Understood By SQLite

    " - puts "\[Contents\]" - puts "

    $name

    " - return - } - } - puts "\n
    " - if {$label!=""} { - puts "" - } - puts "

    $name

    \n" -} - -Section {ALTER TABLE} altertable - -Syntax {sql-statement} { -ALTER TABLE [ .] -} {alteration} { -RENAME TO -} {alteration} { -ADD [COLUMN] -} - -puts { -

    SQLite's version of the ALTER TABLE command allows the user to -rename or add a new column to an existing table. It is not possible -to remove a column from a table. -

    - -

    The RENAME TO syntax is used to rename the table identified by -[database-name.]table-name to new-table-name. This command -cannot be used to move a table between attached databases, only to rename -a table within the same database.

    - -

    If the table being renamed has triggers or indices, then these remain -attached to the table after it has been renamed. However, if there are -any view definitions, or statements executed by triggers that refer to -the table being renamed, these are not automatically modified to use the new -table name. If this is required, the triggers or view definitions must be -dropped and recreated to use the new table name by hand. -

    - -

    The ADD [COLUMN] syntax is used to add a new column to an existing table. -The new column is always appended to the end of the list of existing columns. -Column-def may take any of the forms permissable in a CREATE TABLE -statement, with the following restrictions: -

      -
    • The column may not have a PRIMARY KEY or UNIQUE constraint.
    • -
    • The column may not have a default value of CURRENT_TIME, CURRENT_DATE - or CURRENT_TIMESTAMP.
    • -
    • If a NOT NULL constraint is specified, then the column must have a - default value other than NULL. -
    - -

    The execution time of the ALTER TABLE command is independent of -the amount of data in the table. The ALTER TABLE command runs as quickly -on a table with 10 million rows as it does on a table with 1 row. -

    - -

    After ADD COLUMN has been run on a database, that database will not -be readable by SQLite version 3.1.3 and earlier until the database -is VACUUMed.

    -} - -Section {ANALYZE} analyze - -Syntax {sql-statement} { - ANALYZE -} -Syntax {sql-statement} { - ANALYZE -} -Syntax {sql-statement} { - ANALYZE [ .] -} - -puts { -

    The ANALYZE command gathers statistics about indices and stores them -in a special tables in the database where the query optimizer can use -them to help make better index choices. -If no arguments are given, all indices in all attached databases are -analyzed. If a database name is given as the argument, all indices -in that one database are analyzed. If the argument is a table name, -then only indices associated with that one table are analyzed.

    - -

    The initial implementation stores all statistics in a single -table named sqlite_stat1. Future enhancements may create -additional tables with the same name pattern except with the "1" -changed to a different digit. The sqlite_stat1 table cannot -be DROPped, -but all the content can be DELETEd which has the -same effect.

    -} - -Section {ATTACH DATABASE} attach - -Syntax {sql-statement} { -ATTACH [DATABASE] AS -} - -puts { -

    The ATTACH DATABASE statement adds another database -file to the current database connection. If the filename contains -punctuation characters it must be quoted. The names 'main' and -'temp' refer to the main database and the database used for -temporary tables. These cannot be detached. Attached databases -are removed using the DETACH DATABASE -statement.

    - -

    You can read from and write to an attached database and you -can modify the schema of the attached database. This is a new -feature of SQLite version 3.0. In SQLite 2.8, schema changes -to attached databases were not allowed.

    - -

    You cannot create a new table with the same name as a table in -an attached database, but you can attach a database which contains -tables whose names are duplicates of tables in the main database. It is -also permissible to attach the same database file multiple times.

    - -

    Tables in an attached database can be referred to using the syntax -database-name.table-name. If an attached table doesn't have -a duplicate table name in the main database, it doesn't require a -database name prefix. When a database is attached, all of its -tables which don't have duplicate names become the default table -of that name. Any tables of that name attached afterwards require the table -prefix. If the default table of a given name is detached, then -the last table of that name attached becomes the new default.

    - -

    -Transactions involving multiple attached databases are atomic, -assuming that the main database is not ":memory:". If the main -database is ":memory:" then -transactions continue to be atomic within each individual -database file. But if the host computer crashes in the middle -of a COMMIT where two or more database files are updated, -some of those files might get the changes where others -might not. -Atomic commit of attached databases is a new feature of SQLite version 3.0. -In SQLite version 2.8, all commits to attached databases behaved as if -the main database were ":memory:". -

    - -

    There is a compile-time limit of 10 attached database files.

    -} - - -Section {BEGIN TRANSACTION} transaction - -Syntax {sql-statement} { -BEGIN [ DEFERRED | IMMEDIATE | EXCLUSIVE ] [TRANSACTION []] -} -Syntax {sql-statement} { -END [TRANSACTION []] -} -Syntax {sql-statement} { -COMMIT [TRANSACTION []] -} -Syntax {sql-statement} { -ROLLBACK [TRANSACTION []] -} - -puts { -

    Beginning in version 2.0, SQLite supports transactions with -rollback and atomic commit.

    - -

    The optional transaction name is ignored. SQLite currently -does not allow nested transactions.

    - -

    -No changes can be made to the database except within a transaction. -Any command that changes the database (basically, any SQL command -other than SELECT) will automatically start a transaction if -one is not already in effect. Automatically started transactions -are committed at the conclusion of the command. -

    - -

    -Transactions can be started manually using the BEGIN -command. Such transactions usually persist until the next -COMMIT or ROLLBACK command. But a transaction will also -ROLLBACK if the database is closed or if an error occurs -and the ROLLBACK conflict resolution algorithm is specified. -See the documentation on the ON CONFLICT -clause for additional information about the ROLLBACK -conflict resolution algorithm. -

    - -

    -In SQLite version 3.0.8 and later, transactions can be deferred, -immediate, or exclusive. Deferred means that no locks are acquired -on the database until the database is first accessed. Thus with a -deferred transaction, the BEGIN statement itself does nothing. Locks -are not acquired until the first read or write operation. The first read -operation against a database creates a SHARED lock and the first -write operation creates a RESERVED lock. Because the acquisition of -locks is deferred until they are needed, it is possible that another -thread or process could create a separate transaction and write to -the database after the BEGIN on the current thread has executed. -If the transaction is immediate, then RESERVED locks -are acquired on all databases as soon as the BEGIN command is -executed, without waiting for the -database to be used. After a BEGIN IMMEDIATE, you are guaranteed that -no other thread or process will be able to write to the database or -do a BEGIN IMMEDIATE or BEGIN EXCLUSIVE. Other processes can continue -to read from the database, however. An exclusive transaction causes -EXCLUSIVE locks to be acquired on all databases. After a BEGIN -EXCLUSIVE, you are guaranteed that no other thread or process will -be able to read or write the database until the transaction is -complete. -

    - -

    -A description of the meaning of SHARED, RESERVED, and EXCLUSIVE locks -is available separately. -

    - -

    -The default behavior for SQLite version 3.0.8 is a -deferred transaction. For SQLite version 3.0.0 through 3.0.7, -deferred is the only kind of transaction available. For SQLite -version 2.8 and earlier, all transactions are exclusive. -

    - -

    -The COMMIT command does not actually perform a commit until all -pending SQL commands finish. Thus if two or more SELECT statements -are in the middle of processing and a COMMIT is executed, the commit -will not actually occur until all SELECT statements finish. -

    - -

    -An attempt to execute COMMIT might result in an SQLITE_BUSY return code. -This indicates that another thread or process had a read lock on the database -that prevented the database from being updated. When COMMIT fails in this -way, the transaction remains active and the COMMIT can be retried later -after the reader has had a chance to clear. -

    -} - - -Section comment comment - -Syntax {comment} { | -} {SQL-comment} {-- -} {C-comment} {/STAR [STAR/] -} - -puts { -

    Comments aren't SQL commands, but can occur in SQL queries. They are -treated as whitespace by the parser. They can begin anywhere whitespace -can be found, including inside expressions that span multiple lines. -

    - -

    SQL comments only extend to the end of the current line.

    - -

    C comments can span any number of lines. If there is no terminating -delimiter, they extend to the end of the input. This is not treated as -an error. A new SQL statement can begin on a line after a multiline -comment ends. C comments can be embedded anywhere whitespace can occur, -including inside expressions, and in the middle of other SQL statements. -C comments do not nest. SQL comments inside a C comment will be ignored. -

    -} - - -Section COPY copy - -Syntax {sql-statement} { -COPY [ OR ] [ .] FROM -[ USING DELIMITERS ] -} - -puts { -

    The COPY command is available in SQLite version 2.8 and earlier. -The COPY command has been removed from SQLite version 3.0 due to -complications in trying to support it in a mixed UTF-8/16 environment. -In version 3.0, the command-line shell -contains a new command .import that can be used as a substitute -for COPY. -

    - -

    The COPY command is an extension used to load large amounts of -data into a table. It is modeled after a similar command found -in PostgreSQL. In fact, the SQLite COPY command is specifically -designed to be able to read the output of the PostgreSQL dump -utility pg_dump so that data can be easily transferred from -PostgreSQL into SQLite.

    - -

    The table-name is the name of an existing table which is to -be filled with data. The filename is a string or identifier that -names a file from which data will be read. The filename can be -the STDIN to read data from standard input.

    - -

    Each line of the input file is converted into a single record -in the table. Columns are separated by tabs. If a tab occurs as -data within a column, then that tab is preceded by a baskslash "\" -character. A baskslash in the data appears as two backslashes in -a row. The optional USING DELIMITERS clause can specify a delimiter -other than tab.

    - -

    If a column consists of the character "\N", that column is filled -with the value NULL.

    - -

    The optional conflict-clause allows the specification of an alternative -constraint conflict resolution algorithm to use for this one command. -See the section titled -ON CONFLICT for additional information.

    - -

    When the input data source is STDIN, the input can be terminated -by a line that contains only a baskslash and a dot:} -puts "\"[Operator \\.]\".

    " - - -Section {CREATE INDEX} createindex - -Syntax {sql-statement} { -CREATE [UNIQUE] INDEX [IF NOT EXISTS] [ .] -ON ( [, ]* ) -} {column-name} { - [ COLLATE ] [ ASC | DESC ] -} - -puts { -

    The CREATE INDEX command consists of the keywords "CREATE INDEX" followed -by the name of the new index, the keyword "ON", the name of a previously -created table that is to be indexed, and a parenthesized list of names of -columns in the table that are used for the index key. -Each column name can be followed by one of the "ASC" or "DESC" keywords -to indicate sort order, but the sort order is ignored in the current -implementation. Sorting is always done in ascending order.

    - -

    The COLLATE clause following each column name defines a collating -sequence used for text entires in that column. The default collating -sequence is the collating sequence defined for that column in the -CREATE TABLE statement. Or if no collating sequence is otherwise defined, -the built-in BINARY collating sequence is used.

    - -

    There are no arbitrary limits on the number of indices that can be -attached to a single table, nor on the number of columns in an index.

    - -

    If the UNIQUE keyword appears between CREATE and INDEX then duplicate -index entries are not allowed. Any attempt to insert a duplicate entry -will result in an error.

    - -

    The exact text -of each CREATE INDEX statement is stored in the sqlite_master -or sqlite_temp_master table, depending on whether the table -being indexed is temporary. Every time the database is opened, -all CREATE INDEX statements -are read from the sqlite_master table and used to regenerate -SQLite's internal representation of the index layout.

    - -

    If the optional IF NOT EXISTS clause is present and another index -with the same name aleady exists, then this command becomes a no-op.

    - -

    Indexes are removed with the DROP INDEX -command.

    -} - - -Section {CREATE TABLE} {createtable} - -Syntax {sql-command} { -CREATE [TEMP | TEMPORARY] TABLE [IF NOT EXISTS] [ .] ( - [, ]* - [, ]* -) -} {sql-command} { -CREATE [TEMP | TEMPORARY] TABLE [.] AS -} {column-def} { - [] [[CONSTRAINT ] ]* -} {type} { - | - ( ) | - ( , ) -} {column-constraint} { -NOT NULL [ ] | -PRIMARY KEY [] [ ] [AUTOINCREMENT] | -UNIQUE [ ] | -CHECK ( ) | -DEFAULT | -COLLATE -} {constraint} { -PRIMARY KEY ( ) [ ] | -UNIQUE ( ) [ ] | -CHECK ( ) -} {conflict-clause} { -ON CONFLICT -} - -puts { -

    A CREATE TABLE statement is basically the keywords "CREATE TABLE" -followed by the name of a new table and a parenthesized list of column -definitions and constraints. The table name can be either an identifier -or a string. Tables names that begin with "sqlite_" are reserved -for use by the engine.

    - -

    Each column definition is the name of the column followed by the -datatype for that column, then one or more optional column constraints. -The datatype for the column does not restrict what data may be put -in that column. -See Datatypes In SQLite Version 3 for -additional information. -The UNIQUE constraint causes an index to be created on the specified -columns. This index must contain unique keys. -The COLLATE clause specifies what text -collating function to use when comparing text entries for the column. -The built-in BINARY collating function is used by default. -

    -The DEFAULT constraint specifies a default value to use when doing an INSERT. -The value may be NULL, a string constant or a number. Starting with version -3.1.0, the default value may also be one of the special case-independant -keywords CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP. If the value is -NULL, a string constant or number, it is literally inserted into the column -whenever an INSERT statement that does not specify a value for the column is -executed. If the value is CURRENT_TIME, CURRENT_DATE or CURRENT_TIMESTAMP, then -the current UTC date and/or time is inserted into the columns. For -CURRENT_TIME, the format is HH:MM:SS. For CURRENT_DATE, YYYY-MM-DD. The format -for CURRENT_TIMESTAMP is "YYYY-MM-DD HH:MM:SS". -

    - -

    Specifying a PRIMARY KEY normally just creates a UNIQUE index -on the corresponding columns. However, if primary key is on a single column -that has datatype INTEGER, then that column is used internally -as the actual key of the B-Tree for the table. This means that the column -may only hold unique integer values. (Except for this one case, -SQLite ignores the datatype specification of columns and allows -any kind of data to be put in a column regardless of its declared -datatype.) If a table does not have an INTEGER PRIMARY KEY column, -then the B-Tree key will be a automatically generated integer. - The -B-Tree key for a row can always be accessed using one of the -special names "ROWID", "OID", or "_ROWID_". -This is true regardless of whether or not there is an INTEGER -PRIMARY KEY. An INTEGER PRIMARY KEY column can also include the -keyword AUTOINCREMENT. The AUTOINCREMENT keyword modified the way -that B-Tree keys are automatically generated. Additional detail -on automatic B-Tree key generation is available -separately.

    - -

    According to the SQL standard, PRIMARY KEY should imply NOT NULL. -Unfortunately, due to a long-standing coding oversight, this is not -the case in SQLite. SQLite allows NULL values -in a PRIMARY KEY column. We could change SQLite to conform to the -standard (and we might do so in the future), but by the time the -oversight was discovered, SQLite was in such wide use that we feared -breaking legacy code if we fixed the problem. So for now we have -chosen to contain allowing NULLs in PRIMARY KEY columns. -Developers should be aware, however, that we may change SQLite to -conform to the SQL standard in future and should design new programs -accordingly.

    - -

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" -and "TABLE" then the table that is created is only visible -within that same database connection -and is automatically deleted when -the database connection is closed. Any indices created on a temporary table -are also temporary. Temporary tables and indices are stored in a -separate file distinct from the main database file.

    - -

    If a <database-name> is specified, then the table is created in -the named database. It is an error to specify both a <database-name> -and the TEMP keyword, unless the <database-name> is "temp". If no -database name is specified, and the TEMP keyword is not present, -the table is created in the main database.

    - -

    The optional conflict-clause following each constraint -allows the specification of an alternative default -constraint conflict resolution algorithm for that constraint. -The default is abort ABORT. Different constraints within the same -table may have different default conflict resolution algorithms. -If an COPY, INSERT, or UPDATE command specifies a different conflict -resolution algorithm, then that algorithm is used in place of the -default algorithm specified in the CREATE TABLE statement. -See the section titled -ON CONFLICT for additional information.

    - -

    CHECK constraints are supported as of version 3.3.0. Prior -to version 3.3.0, CHECK constraints were parsed but not enforced.

    - -

    There are no arbitrary limits on the number -of columns or on the number of constraints in a table. -The total amount of data in a single row is limited to about -1 megabytes in version 2.8. In version 3.0 there is no arbitrary -limit on the amount of data in a row.

    - - -

    The CREATE TABLE AS form defines the table to be -the result set of a query. The names of the table columns are -the names of the columns in the result.

    - -

    The exact text -of each CREATE TABLE statement is stored in the sqlite_master -table. Every time the database is opened, all CREATE TABLE statements -are read from the sqlite_master table and used to regenerate -SQLite's internal representation of the table layout. -If the original command was a CREATE TABLE AS then then an equivalent -CREATE TABLE statement is synthesized and store in sqlite_master -in place of the original command. -The text of CREATE TEMPORARY TABLE statements are stored in the -sqlite_temp_master table. -

    - -

    If the optional IF NOT EXISTS clause is present and another table -with the same name aleady exists, then this command becomes a no-op.

    - -

    Tables are removed using the DROP TABLE -statement.

    -} - - -Section {CREATE TRIGGER} createtrigger - -Syntax {sql-statement} { -CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] [ BEFORE | AFTER ] - ON [ .] - -} - -Syntax {sql-statement} { -CREATE [TEMP | TEMPORARY] TRIGGER [IF NOT EXISTS] INSTEAD OF - ON [ .] - -} - -Syntax {database-event} { -DELETE | -INSERT | -UPDATE | -UPDATE OF -} - -Syntax {trigger-action} { -[ FOR EACH ROW ] [ WHEN ] -BEGIN - ; [ ; ]* -END -} - -Syntax {trigger-step} { - | | - | -} - -puts { -

    The CREATE TRIGGER statement is used to add triggers to the -database schema. Triggers are database operations (the trigger-action) -that are automatically performed when a specified database event (the -database-event) occurs.

    - -

    A trigger may be specified to fire whenever a DELETE, INSERT or UPDATE of a -particular database table occurs, or whenever an UPDATE of one or more -specified columns of a table are updated.

    - -

    At this time SQLite supports only FOR EACH ROW triggers, not FOR EACH -STATEMENT triggers. Hence explicitly specifying FOR EACH ROW is optional. FOR -EACH ROW implies that the SQL statements specified as trigger-steps -may be executed (depending on the WHEN clause) for each database row being -inserted, updated or deleted by the statement causing the trigger to fire.

    - -

    Both the WHEN clause and the trigger-steps may access elements of -the row being inserted, deleted or updated using references of the form -"NEW.column-name" and "OLD.column-name", where -column-name is the name of a column from the table that the trigger -is associated with. OLD and NEW references may only be used in triggers on -trigger-events for which they are relevant, as follows:

    - - - - - - - - - - - - - - -
    INSERTNEW references are valid
    UPDATENEW and OLD references are valid
    DELETEOLD references are valid
    -

    - -

    If a WHEN clause is supplied, the SQL statements specified as trigger-steps are only executed for rows for which the WHEN clause is true. If no WHEN clause is supplied, the SQL statements are executed for all rows.

    - -

    The specified trigger-time determines when the trigger-steps -will be executed relative to the insertion, modification or removal of the -associated row.

    - -

    An ON CONFLICT clause may be specified as part of an UPDATE or INSERT -trigger-step. However if an ON CONFLICT clause is specified as part of -the statement causing the trigger to fire, then this conflict handling -policy is used instead.

    - -

    Triggers are automatically dropped when the table that they are -associated with is dropped.

    - -

    Triggers may be created on views, as well as ordinary tables, by specifying -INSTEAD OF in the CREATE TRIGGER statement. If one or more ON INSERT, ON DELETE -or ON UPDATE triggers are defined on a view, then it is not an error to execute -an INSERT, DELETE or UPDATE statement on the view, respectively. Thereafter, -executing an INSERT, DELETE or UPDATE on the view causes the associated - triggers to fire. The real tables underlying the view are not modified - (except possibly explicitly, by a trigger program).

    - -

    Example:

    - -

    Assuming that customer records are stored in the "customers" table, and -that order records are stored in the "orders" table, the following trigger -ensures that all associated orders are redirected when a customer changes -his or her address:

    -} -Example { -CREATE TRIGGER update_customer_address UPDATE OF address ON customers - BEGIN - UPDATE orders SET address = new.address WHERE customer_name = old.name; - END; -} -puts { -

    With this trigger installed, executing the statement:

    -} - -Example { -UPDATE customers SET address = '1 Main St.' WHERE name = 'Jack Jones'; -} -puts { -

    causes the following to be automatically executed:

    -} -Example { -UPDATE orders SET address = '1 Main St.' WHERE customer_name = 'Jack Jones'; -} - -puts { -

    Note that currently, triggers may behave oddly when created on tables - with INTEGER PRIMARY KEY fields. If a BEFORE trigger program modifies the - INTEGER PRIMARY KEY field of a row that will be subsequently updated by the - statement that causes the trigger to fire, then the update may not occur. - The workaround is to declare the table with a PRIMARY KEY column instead - of an INTEGER PRIMARY KEY column.

    -} - -puts { -

    A special SQL function RAISE() may be used within a trigger-program, with the following syntax

    -} -Syntax {raise-function} { -RAISE ( ABORT, ) | -RAISE ( FAIL, ) | -RAISE ( ROLLBACK, ) | -RAISE ( IGNORE ) -} -puts { -

    When one of the first three forms is called during trigger-program execution, the specified ON CONFLICT processing is performed (either ABORT, FAIL or - ROLLBACK) and the current query terminates. An error code of SQLITE_CONSTRAINT is returned to the user, along with the specified error message.

    - -

    When RAISE(IGNORE) is called, the remainder of the current trigger program, -the statement that caused the trigger program to execute and any subsequent - trigger programs that would of been executed are abandoned. No database - changes are rolled back. If the statement that caused the trigger program - to execute is itself part of a trigger program, then that trigger program - resumes execution at the beginning of the next step. -

    - -

    Triggers are removed using the DROP TRIGGER -statement.

    -} - - -Section {CREATE VIEW} {createview} - -Syntax {sql-command} { -CREATE [TEMP | TEMPORARY] VIEW [IF NOT EXISTS] [.] AS -} - -puts { -

    The CREATE VIEW command assigns a name to a pre-packaged -SELECT -statement. Once the view is created, it can be used in the FROM clause -of another SELECT in place of a table name. -

    - -

    If the "TEMP" or "TEMPORARY" keyword occurs in between "CREATE" -and "VIEW" then the view that is created is only visible to the -process that opened the database and is automatically deleted when -the database is closed.

    - -

    If a <database-name> is specified, then the view is created in -the named database. It is an error to specify both a <database-name> -and the TEMP keyword, unless the <database-name> is "temp". If no -database name is specified, and the TEMP keyword is not present, -the table is created in the main database.

    - -

    You cannot COPY, DELETE, INSERT or UPDATE a view. Views are read-only -in SQLite. However, in many cases you can use a -TRIGGER on the view to accomplish the same thing. Views are removed -with the DROP VIEW -command.

    -} - -Section {CREATE VIRTUAL TABLE} {createvtab} - -Syntax {sql-command} { -CREATE VIRTUAL TABLE [ .] USING [( )] -} - -puts { -

    A virtual table is an interface to an external storage or computation -engine that appears to be a table but does not actually store information -in the database file.

    - -

    In general, you can do anything with a virtual table that can be done -with an ordinary table, except that you cannot create triggers on a -virtual table. Some virtual table implementations might impose additional -restrictions. For example, many virtual tables are read-only.

    - -

    The <module-name> is the name of an object that implements -the virtual table. The <module-name> must be registered with -the SQLite database connection using -sqlite3_create_module -prior to issuing the CREATE VIRTUAL TABLE statement. -The module takes zero or more comma-separated arguments. -The arguments can be just about any text as long as it has balanced -parentheses. The argument syntax is sufficiently general that the -arguments can be made to appear as column definitions in a traditional -CREATE TABLE statement. -SQLite passes the module arguments directly -to the module without any interpretation. It is the responsibility -of the module implementation to parse and interpret its own arguments.

    - -

    A virtual table is destroyed using the ordinary -DROP TABLE statement. There is no -DROP VIRTUAL TABLE statement.

    -} - -Section DELETE delete - -Syntax {sql-statement} { -DELETE FROM [ .] [WHERE ] -} - -puts { -

    The DELETE command is used to remove records from a table. -The command consists of the "DELETE FROM" keywords followed by -the name of the table from which records are to be removed. -

    - -

    Without a WHERE clause, all rows of the table are removed. -If a WHERE clause is supplied, then only those rows that match -the expression are removed.

    -} - - -Section {DETACH DATABASE} detach - -Syntax {sql-command} { -DETACH [DATABASE] -} - -puts { -

    This statement detaches an additional database connection previously -attached using the ATTACH DATABASE statement. It -is possible to have the same database file attached multiple times using -different names, and detaching one connection to a file will leave the -others intact.

    - -

    This statement will fail if SQLite is in the middle of a transaction.

    -} - - -Section {DROP INDEX} dropindex - -Syntax {sql-command} { -DROP INDEX [IF EXISTS] [ .] -} - -puts { -

    The DROP INDEX statement removes an index added -with the -CREATE INDEX statement. The index named is completely removed from -the disk. The only way to recover the index is to reenter the -appropriate CREATE INDEX command.

    - -

    The DROP INDEX statement does not reduce the size of the database -file in the default mode. -Empty space in the database is retained for later INSERTs. To -remove free space in the database, use the VACUUM -command. If AUTOVACUUM mode is enabled for a database then space -will be freed automatically by DROP INDEX.

    -} - - -Section {DROP TABLE} droptable - -Syntax {sql-command} { -DROP TABLE [IF EXISTS] [.] -} - -puts { -

    The DROP TABLE statement removes a table added with the CREATE TABLE statement. The name specified is the -table name. It is completely removed from the database schema and the -disk file. The table can not be recovered. All indices associated -with the table are also deleted.

    - -

    The DROP TABLE statement does not reduce the size of the database -file in the default mode. Empty space in the database is retained for -later INSERTs. To -remove free space in the database, use the VACUUM -command. If AUTOVACUUM mode is enabled for a database then space -will be freed automatically by DROP TABLE.

    - -

    The optional IF EXISTS clause suppresses the error that would normally -result if the table does not exist.

    -} - - -Section {DROP TRIGGER} droptrigger -Syntax {sql-statement} { -DROP TRIGGER [IF EXISTS] [ .] -} -puts { -

    The DROP TRIGGER statement removes a trigger created by the -CREATE TRIGGER statement. The trigger is -deleted from the database schema. Note that triggers are automatically -dropped when the associated table is dropped.

    -} - - -Section {DROP VIEW} dropview - -Syntax {sql-command} { -DROP VIEW [IF EXISTS] -} - -puts { -

    The DROP VIEW statement removes a view created by the CREATE VIEW statement. The name specified is the -view name. It is removed from the database schema, but no actual data -in the underlying base tables is modified.

    -} - - -Section EXPLAIN explain - -Syntax {sql-statement} { -EXPLAIN -} - -puts { -

    The EXPLAIN command modifier is a non-standard extension. The -idea comes from a similar command found in PostgreSQL, but the operation -is completely different.

    - -

    If the EXPLAIN keyword appears before any other SQLite SQL command -then instead of actually executing the command, the SQLite library will -report back the sequence of virtual machine instructions it would have -used to execute the command had the EXPLAIN keyword not been present. -For additional information about virtual machine instructions see -the architecture description or the documentation -on available opcodes for the virtual machine.

    -} - - -Section expression expr - -Syntax {expr} { - | - [NOT] [ESCAPE ] | - | -( ) | - | - . | - . . | - | - | - ( | STAR ) | - ISNULL | - NOTNULL | - [NOT] BETWEEN AND | - [NOT] IN ( ) | - [NOT] IN ( ) | - [NOT] IN [ .] | -[EXISTS] ( ) | -CASE [] LP WHEN THEN RPPLUS [ELSE ] END | -CAST ( AS ) | - COLLATE -} {like-op} { -LIKE | GLOB | REGEXP | MATCH -} - -puts { -

    This section is different from the others. Most other sections of -this document talks about a particular SQL command. This section does -not talk about a standalone command but about "expressions" which are -subcomponents of most other commands.

    - -

    SQLite understands the following binary operators, in order from -highest to lowest precedence:

    - -
    -||
    -*    /    %
    -+    -
    -<<   >>   &    |
    -<    <=   >    >=
    -=    ==   !=   <>   IN
    -AND   
    -OR
    -
    - -

    Supported unary prefix operators are these:

    - -
    --    +    !    ~    NOT
    -
    - -

    The COLLATE operator can be thought of as a unary postfix -operator. The COLLATE operator has the highest precedence. -It always binds more tightly than any prefix unary operator or -any binary operator.

    - -

    The unary operator [Operator +] is a no-op. It can be applied -to strings, numbers, or blobs and it always gives as its result the -value of the operand.

    - -

    Note that there are two variations of the equals and not equals -operators. Equals can be either} -puts "[Operator =] or [Operator ==]. -The non-equals operator can be either -[Operator !=] or [Operator {<>}]. -The [Operator ||] operator is \"concatenate\" - it joins together -the two strings of its operands. -The operator [Operator %] outputs the remainder of its left -operand modulo its right operand.

    - -

    The result of any binary operator is a numeric value, except -for the [Operator ||] concatenation operator which gives a string -result.

    " - -puts { - - -

    -A literal value is an integer number or a floating point number. -Scientific notation is supported. The "." character is always used -as the decimal point even if the locale setting specifies "," for -this role - the use of "," for the decimal point would result in -syntactic ambiguity. A string constant is formed by enclosing the -string in single quotes ('). A single quote within the string can -be encoded by putting two single quotes in a row - as in Pascal. -C-style escapes using the backslash character are not supported because -they are not standard SQL. -BLOB literals are string literals containing hexadecimal data and -preceded by a single "x" or "X" character. For example:

    - -
    -X'53514C697465'
    -
    - -

    -A literal value can also be the token "NULL". -

    - -

    -A parameter specifies a placeholder in the expression for a literal -value that is filled in at runtime using the -sqlite3_bind API. -Parameters can take several forms: -

    - - - - - - - - - - - - - - - - - - - - - -
    ?NNNA question mark followed by a number NNN holds a spot for the -NNN-th parameter. NNN must be between 1 and 999.
    ?A question mark that is not followed by a number holds a spot for -the next unused parameter.
    :AAAAA colon followed by an identifier name holds a spot for a named -parameter with the name AAAA. Named parameters are also numbered. -The number assigned is the next unused number. To avoid confusion, -it is best to avoid mixing named and numbered parameters.
    @AAAAAn "at" sign works exactly like a colon.
    $AAAAA dollar-sign followed by an identifier name also holds a spot for a named -parameter with the name AAAA. The identifier name in this case can include -one or more occurances of "::" and a suffix enclosed in "(...)" containing -any text at all. This syntax is the form of a variable name in the Tcl -programming language.
    - - -

    Parameters that are not assigned values using -sqlite3_bind are treated -as NULL.

    - - -

    The LIKE operator does a pattern matching comparison. The operand -to the right contains the pattern, the left hand operand contains the -string to match against the pattern. -} -puts "A percent symbol [Operator %] in the pattern matches any -sequence of zero or more characters in the string. An underscore -[Operator _] in the pattern matches any single character in the -string. Any other character matches itself or it's lower/upper case -equivalent (i.e. case-insensitive matching). (A bug: SQLite only -understands upper/lower case for 7-bit Latin characters. Hence the -LIKE operator is case sensitive for 8-bit iso8859 characters or UTF-8 -characters. For example, the expression 'a' LIKE 'A' -is TRUE but 'æ' LIKE 'Æ' is FALSE.).

    " - -puts { -

    If the optional ESCAPE clause is present, then the expression -following the ESCAPE keyword must evaluate to a string consisting of -a single character. This character may be used in the LIKE pattern -to include literal percent or underscore characters. The escape -character followed by a percent symbol, underscore or itself matches a -literal percent symbol, underscore or escape character in the string, -respectively. The infix LIKE operator is implemented by calling the -user function like(X,Y).

    -} - -puts { -The LIKE operator is not case sensitive and will match upper case -characters on one side against lower case characters on the other. -(A bug: SQLite only understands upper/lower case for 7-bit Latin -characters. Hence the LIKE operator is case sensitive for 8-bit -iso8859 characters or UTF-8 characters. For example, the expression -'a' LIKE 'A' is TRUE but -'æ' LIKE 'Æ' is FALSE.).

    - -

    The infix LIKE -operator is implemented by calling the user function -like(X,Y). If an ESCAPE clause is present, it adds -a third parameter to the function call. If the functionality of LIKE can be -overridden by defining an alternative implementation of the -like() SQL function.

    -

    - - -

    The GLOB operator is similar to LIKE but uses the Unix -file globbing syntax for its wildcards. Also, GLOB is case -sensitive, unlike LIKE. Both GLOB and LIKE may be preceded by -the NOT keyword to invert the sense of the test. The infix GLOB -operator is implemented by calling the user function -glob(X,Y) and can be modified by overriding -that function.

    - - -

    The REGEXP operator is a special syntax for the regexp() -user function. No regexp() user function is defined by default -and so use of the REGEXP operator will normally result in an -error message. If a user-defined function named "regexp" -is added at run-time, that function will be called in order -to implement the REGEXP operator.

    - - -

    The MATCH operator is a special syntax for the match() -user function. The default match() function implementation -raises and exception and is not really useful for anything. -But extensions can override the match() function with more -helpful logic.

    - -

    A column name can be any of the names defined in the CREATE TABLE -statement or one of the following special identifiers: "ROWID", -"OID", or "_ROWID_". -These special identifiers all describe the -unique integer key (the "row key") associated with every -row of every table. -The special identifiers only refer to the row key if the CREATE TABLE -statement does not define a real column with the same name. Row keys -act like read-only columns. A row key can be used anywhere a regular -column can be used, except that you cannot change the value -of a row key in an UPDATE or INSERT statement. -"SELECT * ..." does not return the row key.

    - -

    SELECT statements can appear in expressions as either the -right-hand operand of the IN operator, as a scalar quantity, or -as the operand of an EXISTS operator. -As a scalar quantity or the operand of an IN operator, -the SELECT should have only a single column in its -result. Compound SELECTs (connected with keywords like UNION or -EXCEPT) are allowed. -With the EXISTS operator, the columns in the result set of the SELECT are -ignored and the expression returns TRUE if one or more rows exist -and FALSE if the result set is empty. -If no terms in the SELECT expression refer to value in the containing -query, then the expression is evaluated once prior to any other -processing and the result is reused as necessary. If the SELECT expression -does contain variables from the outer query, then the SELECT is reevaluated -every time it is needed.

    - -

    When a SELECT is the right operand of the IN operator, the IN -operator returns TRUE if the result of the left operand is any of -the values generated by the select. The IN operator may be preceded -by the NOT keyword to invert the sense of the test.

    - -

    When a SELECT appears within an expression but is not the right -operand of an IN operator, then the first row of the result of the -SELECT becomes the value used in the expression. If the SELECT yields -more than one result row, all rows after the first are ignored. If -the SELECT yields no rows, then the value of the SELECT is NULL.

    - -

    A CAST expression changes the datatype of the into the -type specified by <type>. -<type> can be any non-empty type name that is valid -for the type in a column definition of a CREATE TABLE statement.

    - -

    Both simple and aggregate functions are supported. A simple -function can be used in any expression. Simple functions return -a result immediately based on their inputs. Aggregate functions -may only be used in a SELECT statement. Aggregate functions compute -their result across all rows of the result set.

    - - -Core Functions - -

    The core functions shown below are available by default. Additional -functions may be written in C and added to the database engine using -the sqlite3_create_function() -API.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    abs(X)Return the absolute value of argument X.
    coalesce(X,Y,...)Return a copy of the first non-NULL argument. If -all arguments are NULL then NULL is returned. There must be at least -2 arguments.
    - -glob(X,Y)This function is used to implement the -"X GLOB Y" syntax of SQLite. The -sqlite3_create_function() -interface can -be used to override this function and thereby change the operation -of the GLOB operator.
    ifnull(X,Y)Return a copy of the first non-NULL argument. If -both arguments are NULL then NULL is returned. This behaves the same as -coalesce() above.
    - -hex(X)The argument is interpreted as a BLOB. The result -is a hexadecimal rendering of the content of that blob.
    last_insert_rowid()Return the ROWID -of the last row insert from this -connection to the database. This is the same value that would be returned -from the sqlite_last_insert_rowid() API function.
    length(X)Return the string length of X in characters. -If SQLite is configured to support UTF-8, then the number of UTF-8 -characters is returned, not the number of bytes.
    - -like(X,Y)
    -like(X,Y,Z)
    -This function is used to implement the "X LIKE Y [ESCAPE Z]" -syntax of SQL. If the optional ESCAPE clause is present, then the -user-function is invoked with three arguments. Otherwise, it is -invoked with two arguments only. The - -sqlite_create_function() interface can be used to override this -function and thereby change the operation of the LIKE operator. When doing this, it may be important -to override both the two and three argument versions of the like() -function. Otherwise, different code may be called to implement the -LIKE operator depending on whether or not an ESCAPE clause was -specified.
    load_extension(X)
    -load_extension(X,Y)
    Load SQLite extensions out of the shared library -file named X using the entry point Y. The result -is a NULL. If Y is omitted then the default entry point -of sqlite3_extension_init is used. This function raises -an exception if the extension fails to load or initialize correctly. -
    lower(X)Return a copy of string X will all characters -converted to lower case. The C library tolower() routine is used -for the conversion, which means that this function might not -work correctly on UTF-8 characters.
    - -ltrim(X)
    ltrim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from the left side of X. -If the Y argument is omitted, spaces are removed.
    max(X,Y,...)Return the argument with the maximum value. Arguments -may be strings in addition to numbers. The maximum value is determined -by the usual sort order. Note that max() is a simple function when -it has 2 or more arguments but converts to an aggregate function if given -only a single argument.
    min(X,Y,...)Return the argument with the minimum value. Arguments -may be strings in addition to numbers. The minimum value is determined -by the usual sort order. Note that min() is a simple function when -it has 2 or more arguments but converts to an aggregate function if given -only a single argument.
    nullif(X,Y)Return the first argument if the arguments are different, -otherwise return NULL.
    quote(X)This routine returns a string which is the value of -its argument suitable for inclusion into another SQL statement. -Strings are surrounded by single-quotes with escapes on interior quotes -as needed. BLOBs are encoded as hexadecimal literals. -The current implementation of VACUUM uses this function. The function -is also useful when writing triggers to implement undo/redo functionality. -
    random(*)Return a pseudo-random integer -between -9223372036854775808 and +9223372036854775807.
    - -replace(X,Y,Z)Return a string formed by substituting string Z for -every occurrance of string Y in string X. The BINARY -collating sequence is used for comparisons.
    - -randomblob(N)Return a N-byte blob containing pseudo-random bytes. -N should be a postive integer.
    round(X)
    round(X,Y)
    Round off the number X to Y digits to the -right of the decimal point. If the Y argument is omitted, 0 is -assumed.
    - -rtrim(X)
    rtrim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from the right side of X. -If the Y argument is omitted, spaces are removed.
    soundex(X)Compute the soundex encoding of the string X. -The string "?000" is returned if the argument is NULL. -This function is omitted from SQLite by default. -It is only available the -DSQLITE_SOUNDEX=1 compiler option -is used when SQLite is built.
    sqlite_version(*)Return the version string for the SQLite library -that is running. Example: "2.8.0"
    substr(X,Y,Z)Return a substring of input string X that begins -with the Y-th character and which is Z characters long. -The left-most character of X is number 1. If Y is negative -the the first character of the substring is found by counting from the -right rather than the left. If X is string -then characters indices refer to actual UTF-8 characters. If -X is a BLOB then the indices refer to bytes.
    - -trim(X)
    trim(X,Y)
    Return a string formed by removing any and all -characters that appear in Y from both ends of X. -If the Y argument is omitted, spaces are removed.
    typeof(X)Return the type of the expression X. The only -return values are "null", "integer", "real", "text", and "blob". -SQLite's type handling is -explained in Datatypes in SQLite Version 3.
    upper(X)Return a copy of input string X converted to all -upper-case letters. The implementation of this function uses the C library -routine toupper() which means it may not work correctly on -UTF-8 strings.
    zeroblob(N) -Return a BLOB consisting of N bytes of 0x00. SQLite -manages these zeroblobs very efficiently. Zeroblobs can be used to -reserve space for a BLOB that is later written using -incremental BLOB I/O.
    - -Date And Time Functions - -

    Date and time functions are documented in the - -SQLite Wiki.

    - - -Aggregate Functions - -

    -The aggregate functions shown below are available by default. Additional -aggregate functions written in C may be added using the -sqlite3_create_function() -API.

    - -

    -In any aggregate function that takes a single argument, that argument -can be preceeded by the keyword DISTINCT. In such cases, duplicate -elements are filtered before being passed into the aggregate function. -For example, the function "count(distinct X)" will return the number -of distinct values of column X instead of the total number of non-null -values in column X. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    avg(X)Return the average value of all non-NULL X within a -group. String and BLOB values that do not look like numbers are -interpreted as 0. -The result of avg() is always a floating point value even if all -inputs are integers.

    count(X)
    count(*)
    The first form return a count of the number of times -that X is not NULL in a group. The second form (with no argument) -returns the total number of rows in the group.
    max(X)Return the maximum value of all values in the group. -The usual sort order is used to determine the maximum.
    min(X)Return the minimum non-NULL value of all values in the group. -The usual sort order is used to determine the minimum. NULL is only returned -if all values in the group are NULL.
    sum(X)
    total(X)
    Return the numeric sum of all non-NULL values in the group. - If there are no non-NULL input rows then sum() returns - NULL but total() returns 0.0. - NULL is not normally a helpful result for the sum of no rows - but the SQL standard requires it and most other - SQL database engines implement sum() that way so SQLite does it in the - same way in order to be compatible. The non-standard total() function - is provided as a convenient way to work around this design problem - in the SQL language.

    - -

    The result of total() is always a floating point value. - The result of sum() is an integer value if all non-NULL inputs are integers. - If any input to sum() is neither an integer or a NULL - then sum() returns a floating point value - which might be an approximation to the true sum.

    - -

    Sum() will throw an "integer overflow" exception if all inputs - are integers or NULL - and an integer overflow occurs at any point during the computation. - Total() never throws an exception.

    -
    -} - - -Section INSERT insert - -Syntax {sql-statement} { -INSERT [OR ] INTO [ .] [()] VALUES() | -INSERT [OR ] INTO [ .] [()] -} - -puts { -

    The INSERT statement comes in two basic forms. The first form -(with the "VALUES" keyword) creates a single new row in an existing table. -If no column-list is specified then the number of values must -be the same as the number of columns in the table. If a column-list -is specified, then the number of values must match the number of -specified columns. Columns of the table that do not appear in the -column list are filled with the default value, or with NULL if no -default value is specified. -

    - -

    The second form of the INSERT statement takes it data from a -SELECT statement. The number of columns in the result of the -SELECT must exactly match the number of columns in the table if -no column list is specified, or it must match the number of columns -name in the column list. A new entry is made in the table -for every row of the SELECT result. The SELECT may be simple -or compound.

    - -

    The optional conflict-clause allows the specification of an alternative -constraint conflict resolution algorithm to use during this one command. -See the section titled -ON CONFLICT for additional information. -For compatibility with MySQL, the parser allows the use of the -single keyword REPLACE as an alias for "INSERT OR REPLACE". -

    -} - - -Section {ON CONFLICT clause} conflict - -Syntax {conflict-clause} { -ON CONFLICT -} {conflict-algorithm} { -ROLLBACK | ABORT | FAIL | IGNORE | REPLACE -} - -puts { -

    The ON CONFLICT clause is not a separate SQL command. It is a -non-standard clause that can appear in many other SQL commands. -It is given its own section in this document because it is not -part of standard SQL and therefore might not be familiar.

    - -

    The syntax for the ON CONFLICT clause is as shown above for -the CREATE TABLE command. For the INSERT and -UPDATE commands, the keywords "ON CONFLICT" are replaced by "OR", to make -the syntax seem more natural. For example, instead of -"INSERT ON CONFLICT IGNORE" we have "INSERT OR IGNORE". -The keywords change but the meaning of the clause is the same -either way.

    - -

    The ON CONFLICT clause specifies an algorithm used to resolve -constraint conflicts. There are five choices: ROLLBACK, ABORT, -FAIL, IGNORE, and REPLACE. The default algorithm is ABORT. This -is what they mean:

    - -
    -
    ROLLBACK
    -

    When a constraint violation occurs, an immediate ROLLBACK -occurs, thus ending the current transaction, and the command aborts -with a return code of SQLITE_CONSTRAINT. If no transaction is -active (other than the implied transaction that is created on every -command) then this algorithm works the same as ABORT.

    - -
    ABORT
    -

    When a constraint violation occurs, the command backs out -any prior changes it might have made and aborts with a return code -of SQLITE_CONSTRAINT. But no ROLLBACK is executed so changes -from prior commands within the same transaction -are preserved. This is the default behavior.

    - -
    FAIL
    -

    When a constraint violation occurs, the command aborts with a -return code SQLITE_CONSTRAINT. But any changes to the database that -the command made prior to encountering the constraint violation -are preserved and are not backed out. For example, if an UPDATE -statement encountered a constraint violation on the 100th row that -it attempts to update, then the first 99 row changes are preserved -but changes to rows 100 and beyond never occur.

    - -
    IGNORE
    -

    When a constraint violation occurs, the one row that contains -the constraint violation is not inserted or changed. But the command -continues executing normally. Other rows before and after the row that -contained the constraint violation continue to be inserted or updated -normally. No error is returned.

    - -
    REPLACE
    -

    When a UNIQUE constraint violation occurs, the pre-existing rows -that are causing the constraint violation are removed prior to inserting -or updating the current row. Thus the insert or update always occurs. -The command continues executing normally. No error is returned. -If a NOT NULL constraint violation occurs, the NULL value is replaced -by the default value for that column. If the column has no default -value, then the ABORT algorithm is used. If a CHECK constraint violation -occurs then the IGNORE algorithm is used.

    - -

    When this conflict resolution strategy deletes rows in order to -satisfy a constraint, it does not invoke delete triggers on those -rows. This behavior might change in a future release.

    -
    - -

    The algorithm specified in the OR clause of a INSERT or UPDATE -overrides any algorithm specified in a CREATE TABLE. -If no algorithm is specified anywhere, the ABORT algorithm is used.

    -} - -Section REINDEX reindex - -Syntax {sql-statement} { - REINDEX -} -Syntax {sql-statement} { - REINDEX [ .] -} - -puts { -

    The REINDEX command is used to delete and recreate indices from scratch. -This is useful when the definition of a collation sequence has changed. -

    - -

    In the first form, all indices in all attached databases that use the -named collation sequence are recreated. In the second form, if -[database-name.]table/index-name identifies a table, then all indices -associated with the table are rebuilt. If an index is identified, then only -this specific index is deleted and recreated. -

    - -

    If no database-name is specified and there exists both a table or -index and a collation sequence of the specified name, then indices associated -with the collation sequence only are reconstructed. This ambiguity may be -dispelled by always specifying a database-name when reindexing a -specific table or index. -} - -Section REPLACE replace - -Syntax {sql-statement} { -REPLACE INTO [ .] [( )] VALUES ( ) | -REPLACE INTO [ .] [( )] -} - -puts { -

    The REPLACE command is an alias for the "INSERT OR REPLACE" variant -of the INSERT command. This alias is provided for -compatibility with MySQL. See the -INSERT command documentation for additional -information.

    -} - - -Section SELECT select - -Syntax {sql-statement} { -SELECT [ALL | DISTINCT] [FROM ] -[WHERE ] -[GROUP BY ] -[HAVING ] -[
    [
    ]* -} {table} { - [AS ] | -(
    - - - - - - -
    'keyword'A keyword in single quotes is interpreted as a literal string - if it occurs in a context where a string literal is allowed, otherwise - it is understood as an identifier.
    "keyword"A keyword in double-quotes is interpreted as an identifier if - it matches a known identifier. Otherwise it is interpreted as a - string literal.
    [keyword]A keyword enclosed in square brackets is always understood as - an identifier. This is not standard SQL. This quoting mechanism - is used by MS Access and SQL Server and is included in SQLite for - compatibility.
    - -

    - -

    Quoted keywords are unaesthetic. -To help you avoid them, SQLite allows many keywords to be used unquoted -as the names of databases, tables, indices, triggers, views, columns, -user-defined functions, collations, attached databases, and virtual -function modules. -In the list of keywords that follows, those that can be used as identifiers -are shown in an italic font. Keywords that must be quoted in order to be -used as identifiers are shown in bold.

    - -

    -SQLite adds new keywords from time to time when it take on new features. -So to prevent your code from being broken by future enhancements, you should -normally quote any indentifier that is an English language word, even if -you do not have to. -

    - -

    -The following are the keywords currently recognized by SQLite: -

    - -
    - - -
    -} - -set n [llength $keyword_list] -set nCol 5 -set nRow [expr {($n+$nCol-1)/$nCol}] -set i 0 -foreach word $keyword_list { - if {[string index $word end]=="*"} { - set word [string range $word 0 end-1] - set font i - } else { - set font b - } - if {$i==$nRow} { - puts "" - set i 1 - } else { - incr i - } - puts "<$font>$word
    " -} - -puts { -
    - -

    Special names

    - -

    The following are not keywords in SQLite, but are used as names of -system objects. They can be used as an identifier for a different -type of object.

    - -
    - _ROWID_
    - MAIN
    - OID
    - ROWID
    - SQLITE_MASTER
    - SQLITE_SEQUENCE
    - SQLITE_TEMP_MASTER
    - TEMP
    -
    -} - -puts {
    } -footer $rcsid -if {[string length $outputdir]} { - footer $rcsid -} -puts {
    } diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_transaction.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_transaction.html --- sqlite3-3.4.2/www/lang_transaction.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_transaction.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,216 @@ + + +SQLite Query Language: BEGIN TRANSACTION + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    BEGIN TRANSACTION

    begin-stmt:

    +

    commit-stmt:

    +

    rollback-stmt:

    + + +

    +No changes can be made to the database except within a transaction. +Any command that changes the database (basically, any SQL command +other than SELECT) will automatically start a transaction if +one is not already in effect. Automatically started transactions +are committed when the last query finishes. +

    + +

    +Transactions can be started manually using the BEGIN +command. Such transactions usually persist until the next +COMMIT or ROLLBACK command. But a transaction will also +ROLLBACK if the database is closed or if an error occurs +and the ROLLBACK conflict resolution algorithm is specified. +See the documentation on the ON CONFLICT +clause for additional information about the ROLLBACK +conflict resolution algorithm. +

    + +

    +END TRANSACTION is an alias for COMMIT. +

    + +

    Transactions created using BEGIN...COMMIT do not nest. +For nested transactions, use the SAVEPOINT and RELEASE commands. +The "TO SAVEPOINT name" clause of the ROLLBACK command shown +in the syntax diagram above is only applicable to SAVEPOINT +transactions. An attempt to invoke the BEGIN command within +a transaction will fail with an error, regardless of whether +the transaction was started by SAVEPOINT or a prior BEGIN. +The COMMIT command and the ROLLBACK command without the TO clause +work the same on SAVEPOINT transactions as they do with transactions +started by BEGIN.

    + +

    +Transactions can be deferred, immediate, or exclusive. +The default transaction behavior is deferred. +Deferred means that no locks are acquired +on the database until the database is first accessed. Thus with a +deferred transaction, the BEGIN statement itself does nothing. Locks +are not acquired until the first read or write operation. The first read +operation against a database creates a SHARED lock and the first +write operation creates a RESERVED lock. Because the acquisition of +locks is deferred until they are needed, it is possible that another +thread or process could create a separate transaction and write to +the database after the BEGIN on the current thread has executed. +If the transaction is immediate, then RESERVED locks +are acquired on all databases as soon as the BEGIN command is +executed, without waiting for the +database to be used. After a BEGIN IMMEDIATE, you are guaranteed that +no other thread or process will be able to write to the database or +do a BEGIN IMMEDIATE or BEGIN EXCLUSIVE. Other processes can continue +to read from the database, however. An exclusive transaction causes +EXCLUSIVE locks to be acquired on all databases. After a BEGIN +EXCLUSIVE, you are guaranteed that no other thread or process will +be able to read or write the database until the transaction is +complete. +

    + +

    +An implicit transaction (a transaction that is started automatically, +not a transaction started by BEGIN) is committed automatically when +the last active statement finishes. A statement finishes when its +prepared statement is reset or +finalized. An open sqlite3_blob used for +incremental BLOB I/O counts as an unfinished statement. The sqlite3_blob +finishes when it is closed. +

    + +

    +The explicit COMMIT command runs immediately, even if there are +pending SELECT statements. However, if there are pending +write operations, the COMMIT command +will fail with a error code SQLITE_BUSY. +

    + +

    +An attempt to execute COMMIT might also result in an SQLITE_BUSY return code +if an another thread or process has a shared lock on the database +that prevented the database from being updated. When COMMIT fails in this +way, the transaction remains active and the COMMIT can be retried later +after the reader has had a chance to clear. +

    + +

    +The ROLLBACK will fail with an error code SQLITE_BUSY if there +are any pending queries. Both read-only and read/write queries will +cause a ROLLBACK to fail. A ROLLBACK must fail if there are pending +read operations (unlike COMMIT which can succeed) because bad things +will happen if memory image of the database is changed out from under +an active query. +

    + +

    +If PRAGMA journal_mode is set to OFF (thus disabling the rollback journal +file) then the behavior of the ROLLBACK command is undefined. +

    + +

    Response To Errors Within A Transaction

    + +

    If certain kinds of errors occur within a transaction, the +transaction may or may not be rolled back automatically. The +errors that cause the behavior include:

    + + + +

    +For all of these errors, SQLite attempts to undo just the one statement +it was working on and leave changes from prior statements within the +same transaction intact and continue with the transaction. However, +depending on the statement being evaluated and the point at which the +error occurs, it might be necessary for SQLite to rollback and +cancel the entire transaction. An application can tell which +course of action SQLite took by using the +sqlite3_get_autocommit() C-language interface.

    + +

    It is recommended that applications respond to the errors +listed above by explicitly issuing a ROLLBACK command. If the +transaction has already been rolled back automatically +by the error response, then the ROLLBACK command will fail with an +error, but no harm is caused by this.

    + +

    Future versions of SQLite may extend the list of errors which +might cause automatic transaction rollback. Future versions of +SQLite might change the error response. In particular, we may +choose to simplify the interface in future versions of SQLite by +causing the errors above to force an unconditional rollback.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_update.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_update.html --- sqlite3-3.4.2/www/lang_update.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_update.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,105 @@ + + +SQLite Query Language: UPDATE + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    UPDATE

    update-stmt:

    +

    qualified-table-name:

    + + +

    The UPDATE statement is used to change the value of columns in +selected rows of a table. Each assignment in an UPDATE specifies +a column name to the left of the equals sign and an arbitrary expression +to the right. The expressions may use the values of other columns. +All expressions are evaluated before any assignments are made. +A WHERE clause can be used to restrict which rows are updated.

    + +

    The optional conflict-clause allows the specification of an alternative +constraint conflict resolution algorithm to use during this one command. +See the section titled +ON CONFLICT for additional information.

    + +

    If SQLite is built with the SQLITE_ENABLE_UPDATE_DELETE_LIMIT +compile-time option then the syntax of the UPDATE statement is extended +with optional ORDER BY and LIMIT clauses as follows:

    + +

    update-stmt-limited:

    + + +

    The optional LIMIT clause can be used to limit the number of +rows modifed, and thereby limit the size of the transaction. +The ORDER BY clause is used only to determine which rows fall +within the LIMIT. The order in which rows are modified is arbitrary +and is not determined by the ORDER BY clause.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lang_vacuum.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lang_vacuum.html --- sqlite3-3.4.2/www/lang_vacuum.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lang_vacuum.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,117 @@ + + +SQLite Query Language: VACUUM + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + +

    SQL As Understood By SQLite

    VACUUM

    vacuum-stmt:

    + + +

    When an object (table, index, or trigger) is dropped from the +database, it leaves behind empty space. +This empty space will be reused the next time new information is +added to the database. But in the meantime, the database file might +be larger than strictly necessary. Also, frequent inserts, updates, +and deletes can cause the information in the database to become +fragmented - scrattered out all across the database file rather +than clustered together in one place.

    + +

    The VACUUM command cleans +the main database by copying its contents to a temporary database file and +reloading the original database file from the copy. This eliminates +free pages, aligns table data to be contiguous, and otherwise cleans +up the database file structure.

    + +

    The VACUUM command may change the +ROWIDs of entries in tables that do +not have an explicit INTEGER PRIMARY KEY.

    + +

    VACUUM only works on the main database. +It is not possible to VACUUM an attached database file.

    + +

    The VACUUM command will fail if there is an active transaction. +The VACUUM command is a no-op for in-memory databases.

    + +

    As of SQLite version 3.1, an alternative to using the VACUUM command +is auto-vacuum mode, enabled using the +auto_vacuum pragma. When auto_vacuum is enabled for a database, +large deletes cause +the size of the database file to shrink. However, auto_vacuum +also causes excess fragmentation of the database file. And auto_vacuum +does not compact partially filled pages of the database as VACUUM +does.

    + +

    The page_size and/or auto_vacuum mode of a database can be changed +by invoking the page_size pragma and/or auto_vacuum pragma and then +immediately VACUUMing the database.

    + +
    +This page last modified 2009/05/14 23:56:17 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/limits.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/limits.html --- sqlite3-3.4.2/www/limits.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/limits.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,451 @@ + + +Implementation Limits For SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    Limits In SQLite

    + +

    +"Limits" in the context of this article means sizes or +quantities that can not be exceeded. We are concerned +with things like the maximum number of bytes in a +BLOB or the maximum number of columns in a table. +

    + +

    +SQLite was originally designed with a policy of avoiding +arbitrary limits. +Of course, every program that runs on a machine with finite +memory and disk space has limits of some kind. But in SQLite, +those limits +were not well defined. The policy was that if it would fit +in memory and you could count it with a 32-bit integer, then +it should work. +

    + +

    +Unfortunately, the no-limits policy has been shown to create +problems. Because the upper bounds were not well +defined, they were not tested, and bugs (including possible +security exploits) were often found when pushing SQLite to +extremes. For this reason, newer versions of SQLite have +well-defined limits and those limits are tested as part of +the test suite. +

    + +

    +This article defines what the limits of SQLite are and how they +can be customized for specific applications. The default settings +for limits are normally quite large and adequate for almost every +application. Some applications may want to increase a limit here +or there, but we expect such needs to be rare. More commonly, +an application might want to recompile SQLite with much lower +limits to avoid excess resource utilization in the event of +bug in higher-level SQL statement generators or to help thwart +attackers who inject malicious SQL statements. +

    + +

    +Some limits can be changed at run-time on a per-connection basis +using the sqlite3_limit() interface with one of the +limit categories defined for that interface. +Run-time limits are designed for applications that have multiple +databases, some of which are for internal use only and others which +can be influenced or controlled by potentially hostile external agents. +For example, a web browser application might use an internal database +to track historical page views but have one or more separate databases +that are created and controlled by javascript applications that are +downloaded from the internet. +The sqlite3_limit() interface allows internal databases managed by +trusted code to be unconstrained while simultaneously placing tight +limitations on databases created or controlled by untrusted external +code in order to help prevent a denial of service attack. +

    + + +
      + +
    1. Maximum length of a string or BLOB

      + +

      +The maximum number of bytes in a string or BLOB in SQLite is defined +by the preprocessor macro SQLITE_MAX_LENGTH. The default value +of this macro is 1 billion (1 thousand million or 1,000,000,000). +You can raise or lower this value at compile-time using a command-line +option like this: +

      + +
      -DSQLITE_MAX_LENGTH=123456789
      + +

      +The current implementation will only support a string or BLOB +length up to 231-1 or 2147483647. And +some built-in functions such as hex() might fail well before that +point. In security-sensitive applications it is best not to +try to increase the maximum string and blob length. In fact, +you might do well to lower the maximum string and blob length +to something more in the range of a few million if that is +possible. +

      + +

      +During part of SQLite's INSERT and SELECT processing, the complete +content of each row in the database is encoded as a single BLOB. +So the SQLITE_MAX_LENGTH parameter also determines the maximum +number of bytes in a row. +

      + +

      +The maximum string or BLOB length can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_LENGTH,size) interface. +

      +
    2. +
    3. Maximum Number Of Columns

      + +

      +The SQLITE_MAX_COLUMN compile-time parameter is used to set an upper +bound on: +

      + +
        +
      • The number of columns in a table
      • +
      • The number of columns in an index
      • +
      • The number of columns in a view
      • +
      • The number of terms in the SET clause of an UPDATE statement
      • +
      • The number of columns in the result set of a SELECT statement
      • +
      • The number of terms in a GROUP BY or ORDER BY clause
      • +
      • The number of values in an INSERT statement
      • +
      + +

      +The default setting for SQLITE_MAX_COLUMN is 2000. You can change it +at compile time to values as large as 32767. On the other hand, many +experienced database designers will argue that a well-normalized database +will never need more than 100 columns in a table. +

      + +

      +In most applications, the number of columns is small - a few dozen. +There are places in the SQLite code generator that use algorithms +that are O(N²) where N is the number of columns. +So if you redefine SQLITE_MAX_COLUMN to be a +really huge number and you generate SQL that uses a large number of +columns, you may find that sqlite3_prepare_v2() +runs slowly.

      + + +

      +The maximum number of columns can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_COLUMN,size) interface. +

      + +
    4. +
    5. Maximum Length Of An SQL Statement

      + +

      +The maximum number of bytes in the text of an SQL statement is +limited to SQLITE_MAX_SQL_LENGTH which defaults to 1000000. You +can redefine this limit to be as large as the smaller of SQLITE_MAX_LENGTH +and 1073741824. +

      + +

      +If an SQL statement is limited to be a million bytes in length, then +obviously you will not be able to insert multi-million byte strings +by embedding them as literals inside of INSERT statements. But +you should not do that anyway. Use host parameters +for your data. Prepare short SQL statements like this: +

      + +
      +INSERT INTO tab1 VALUES(?,?,?); +
      + +

      +Then use the sqlite3_bind_XXXX() functions +to bind your large string values to the SQL statement. The use of binding +obviates the need to escape quote characters in the string, reducing the +risk of SQL injection attacks. It is also runs faster since the large +string does not need to be parsed or copied as much. +

      + +

      +The maximum length of an SQL statement can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_SQL_LENGTH,size) interface. +

      +
    6. Maximum Number Of Tables In A Join

      + +

      +SQLite does not support joins containing more than 64 tables. +This limit arises from the fact that the SQLite code generator +uses bitmaps with one bit per join-table in the query optimizer. +

      + +

      +SQLite uses a very efficient O(N²) greedy algorithm for determining +the order of tables in a join and so a large join can be +prepared quickly. +Hence, there is no mechanism to raise or lower the limit on the +number of tables in a join. +

      +
    7. +
    8. Maximum Depth Of An Expression Tree

      + +

      +SQLite parses expressions into a tree for processing. During +code generation, SQLite walks this tree recursively. The depth +of expression trees is therefore limited in order to avoid +using too much stack space. +

      + +

      +The SQLITE_MAX_EXPR_DEPTH parameter determines the maximum expression +tree depth. If the value is 0, then no limit is enforced. The +current implementation has a default value of 1000. +

      + +

      +The maximum depth of an expression tree can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_EXPR_DEPTH,size) interface if the +SQLITE_MAX_EXPR_DEPTH is initially positive. In other words, the maximum +expression depth can be lowered at run-time if there is already a +compile-time limit on the expression depth. If SQLITE_MAX_EXPR_DEPTH is +set to 0 at compile time (if the depth of expressions is unlimited) then +the sqlite3_limit(db,SQLITE_LIMIT_EXPR_DEPTH,size) is a no-op. +

      + + +
    9. +
    10. Maximum Number Of Arguments On A Function

      + +

      +The SQLITE_MAX_FUNCTION_ARG parameter determines the maximum number +of parameters that can be passed to an SQL function. The default value +of this limit is 100. SQLite should work with functions that have +thousands of parameters. However, we suspect that anybody who tries +to invoke a function with more than a few parameters is really +trying to find security exploits in systems that use SQLite, +not do useful work, +and so for that reason we have set this parameter relatively low.

      + +

      The number of arguments to a function is sometimes stored in a signed +character. So there is a hard upper bound on SQLITE_MAX_FUNCTION_ARG +of 127.

      + +

      +The maximum number of arguments in a function can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_FUNCTION_ARG,size) interface. +

      +
    11. +
    12. Maximum Number Of Terms In A Compound SELECT Statement

      + +

      +A compound SELECT statement is two or more SELECT statements connected +by operators UNION, UNION ALL, EXCEPT, or INTERSECT. We call each +individual SELECT statement within a compound SELECT a "term". +

      + +

      +The code generator in SQLite processes compound SELECT statements using +a recursive algorithm. In order to limit the size of the stack, we +therefore limit the number of terms in a compound SELECT. The maximum +number of terms is SQLITE_MAX_COMPOUND_SELECT which defaults to 500. +We think this is a generous allotment since in practice we almost +never see the number of terms in a compound select exceed single digits. +

      + +

      +The maximum number of compound SELECT terms can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_COMPOUND_SELECT,size) interface. +

      + + +
    13. +
    14. Maximum Length Of A LIKE Or GLOB Pattern

      + +

      +The pattern matching algorithm used in the default LIKE and GLOB +implementation of SQLite can exhibit O(N²) performance (where +N is the number of characters in the pattern) for certain pathological +cases. To avoid denial-of-service attacks from miscreants who are able +to specify their own LIKE or GLOB patterns, the length of the LIKE +or GLOB pattern is limited to SQLITE_MAX_LIKE_PATTERN_LENGTH bytes. +The default value of this limit is 50000. A modern workstation can +evaluate even a pathological LIKE or GLOB pattern of 50000 bytes +relatively quickly. The denial of service problem only comes into +play when the pattern length gets into millions of bytes. Nevertheless, +since most useful LIKE or GLOB patterns are at most a few dozen bytes +in length, paranoid application developers may want to reduce this +parameter to something in the range of a few hundred if they know that +external users are able to generate arbitrary patterns. +

      + +

      +The maximum length of a LIKE or GLOB pattern can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_LIKE_PATTERN_LENGTH,size) interface. +

      +
    15. +
    16. Maximum Number Of Host Parameters In A Single SQL Statement

      + +

      +A host parameter is a place-holder in an SQL statement that is filled +in using one of the +sqlite3_bind_XXXX() interfaces. +Many SQL programmers are familiar with using a question mark ("?") as a +host parameter. SQLite also supports named host parameters prefaced +by ":", "$", or "@" and numbered host parameters of the form "?123". +

      + +

      +Each host parameter in an SQLite statement is assigned a number. The +numbers normally begin with 1 and increase by one with each new +parameter. However, when the "?123" form is used, the host parameter +number is the number that follows the question mark. +

      + +

      +SQLite allocates space to hold all host parameters between 1 and the +largest host parameter number used. Hence, an SQL statement that contains +a host parameter like ?1000000000 would require gigabytes of storage. +This could easily overwhelm the resources of the host machine. +To prevent excessive memory allocations, +the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER, +which defaults to 999. +

      + +

      +The maximum host parameter number can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_VARIABLE_NUMBER,size) interface. +

      +
    17. +
    18. Maximum Number Of Attached Databases

      + +

      +The ATTACH statement is an SQLite extension +that allows two or more databases to be associated to the same database +connection and to operate as if they were a single database. The number +of simulataneously attached databases is limited to SQLITE_MAX_ATTACHED +which is set to 10 by default. +The code generator in SQLite uses bitmaps +to keep track of attached databases. That means that the number of +attached databases cannot be increased above 30 on a machines with +a 32-bit integer.

      + +

      +The maximum number of attached databases can be lowered at run-time using +the sqlite3_limit(db,SQLITE_LIMIT_ATTACHED,size) interface. +

      +
    19. +
    20. Maximum Database Page Size

      + +

      +An SQLite database file is organized as pages. The size of each +page is a power of 2 between 512 and SQLITE_MAX_PAGE_SIZE. +The default value for SQLITE_MAX_PAGE_SIZE is 32768. The current +implementation will not support a larger value. +

      + +

      +It used to be the case that SQLite would allocate some stack +structures whose size was proportional to the maximum page size. +For this reason, SQLite would sometimes be compiled with a smaller +maximum page size on embedded devices with limited stack memory. But +more recent versions of SQLite put these large structures on the +heap, not on the stack, so reducing the maximum page size is no +longer necessary on embedded devices. There is no longer any +real reason to lower the maximum page size. +

      +
    21. +
    22. Maximum Number Of Pages In A Database File

      + +

      +SQLite is able to limit the size of a database file to prevent +the database file from growing too large and consuming too much +disk space. +The SQLITE_MAX_PAGE_COUNT parameter, which is normally set to +1073741823, is the maximum number of pages allowed in a single +database file. An attempt to insert new data that would cause +the database file to grow larger than this will return +SQLITE_FULL. +

      + +

      +The +max_page_count PRAGMA can be used to raise or lower this +limit at run-time. +

      +
    + +
    +This page last modified 2009/05/24 11:16:05 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/limits.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/limits.tcl --- sqlite3-3.4.2/www/limits.tcl 2007-08-09 01:00:26.000000000 +0100 +++ sqlite3-3.6.16/www/limits.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,318 +0,0 @@ -# -# Run this script to generate the limits.html output file -# -set rcsid {$Id: limits.tcl,v 1.5 2007/08/09 00:00:26 drh Exp $} -source common.tcl -header {Implementation Limits For SQLite} -puts { -

    Limits In SQLite

    - -

    -"Limits" in the context of this article means sizes or -quantities that can not be exceeded. We are concerned -with things like the maximum number of bytes in a -BLOB or the maximum number of columns in a table. -

    - -

    -SQLite was originally designed with a policy of avoiding -arbitrary limits. -Of course, every program that runs on a machine with finite -memory and disk space has limits of some kind. But in SQLite, -those limits -were not well defined. The policy was that if it would fit -in memory and you could count it with a 32-bit integer, then -it should work. -

    - -

    -Unfortunately, the no-limits policy has been shown to create -problems. Because the upper bounds were not well -defined, they were not tested, and bugs (including possible -security exploits) were often found when pushing SQLite to -extremes. For this reason, newer versions of SQLite have -well-defined limits and those limits are tested as part of -the test suite. -

    - -

    -This article defines what the limits of SQLite are and how they -can be customized for specific applications. The default settings -for limits are normally quite large and adequate for almost every -application. Some applications may what to increase a limit here -or there, but we expect such needs to be rare. More commonly, -an application might want to recompile SQLite with much lower -limits to avoid excess resource utilization in the event of -bug in higher-level SQL statement generators or to help thwart -attackers who inject malicious SQL statements. -

    -} -proc limititem {title text} { - puts "
  • $title

    \n$text
  • " -} -puts { -
      -} - -limititem {Maximum length of a string or BLOB} { -

      -The maximum number of bytes in a string or BLOB in SQLite is defined -by the preprocessor macro SQLITE_MAX_LENGTH. The default value -of this macro is 1 billion (1 thousand million or 1,000,000,000). -You can raise or lower this value at compile-time using a command-line -option like this: -

      - -
      -DSQLITE_MAX_LENGTH=123456789
      - -

      -The current implementation will only support a string or BLOB -length up to 231-1 or 2147483647. And -some built-in functions such as hex() might fail well before that -point. In security-sensitive applications it is best not to -try to increase the maximum string and blob length. In fact, -you might do well to lower the maximum string and blob length -to something more in the range of a few million if that is -possible. -

      - -

      -During part of SQLite's INSERT and SELECT processing, the complete -content of each row in the database is encoded as a single BLOB. -So the SQLITE_MAX_LENGTH parameter also determines the maximum -number of bytes in a row. -

      -} - -limititem {Maximum Number Of Columns} { -

      -The SQLITE_MAX_COLUMN compile-time parameter is used to set an upper -bound on: -

      - -
        -
      • The number of columns in a table
      • -
      • The number of columns in an index
      • -
      • The number of columns in a view
      • -
      • The number of terms in the SET clause of an UPDATE statement
      • -
      • The number of columns in the result set of a SELECT statement
      • -
      • The number of terms in a GROUP BY or ORDER BY clause
      • -
      • The number of values in an INSERT statement
      • -
      - -

      -The default setting for SQLITE_MAX_COLUMN is 2000. You can change it -at compile time to values as large as 32676. You might be able to -redefine this value to be as large as billions, though nobody has ever -tried doing that so we do not know if it will work. On the other hand, there -are people who will argue that a well-normalized database design -will never need a value larger than about 100. -

      - -

      -In most applications, the number of columns is small - a few dozen. -There are places in the SQLite code generator that use algorithms -that are O(N²) where N is the number of columns. -So if you redefine SQLITE_MAX_COLUMN to be a -really huge number and you generate SQL that uses a large number of -columns, you may find that -sqlite3_prepare_v2() -runs slowly. -} - -limititem {Maximum Length Of An SQL Statement} { -

      -The maximum number of bytes in the text of an SQL statement is -limited to SQLITE_MAX_SQL_LENGTH which defaults to 1000000. You -can redefine this limit to be as large as the smaller of SQLITE_MAX_LENGTH -and 1073741824. -

      - -

      -If an SQL statement is limited to be a million bytes in length, then -obviously you will not be able to insert multi-million byte strings -by embedding them as literals inside of INSERT statements. But -you should not do that anyway. Use host parameters -for your data. Prepare short SQL statements like this: -

      - -
      -INSERT INTO tab1 VALUES(?,?,?); -
      - -

      -Then use the -sqlite3_bind_XXXX() functions -to bind your large string values to the SQL statement. The use of binding -obviates the need to escape quote characters in the string, reducing the -risk of SQL injection attacks. It is also runs faster since the large -string does not need to be parsed or copied as much. -

      -} - -limititem {Maximum Number Of Tables In A Join} { -

      -SQLite does not support joins containing more than 64 tables. -This limit arises from the fact that the SQLite code generator -uses bitmaps with one bit per join-table in the query optimizer. -

      -} - -limititem {Maximum Depth Of An Expression Tree} { -

      -SQLite parses expressions into a tree for processing. During -code generation, SQLite walks this tree recursively. The depth -of expression trees is therefore limited in order to avoid -using too much stack space. -

      - -

      -The SQLITE_MAX_EXPR_DEPTH parameter determines the maximum expression -tree depth. If the value is 0, then no limit is enforced. The -current implementation has a default value of 1000. -

      -} - -limititem {Maximum Number Of Arguments On A Function} { -

      -The SQLITE_MAX_FUNCTION_ARG parameter determines the maximum number -of parameters that can be passed to an SQL function. The default value -of this limit is 100. We know of no -technical reason why SQLite would not work with functions that have -millions of parameters. However, we suspect that anybody who tries -to invoke a function with millions of parameters is really -trying to find security exploits in systems that use SQLite, -not do useful work, -and so for that reason we have set this parameter relatively low. -} - -limititem {Maximum Number Of Terms In A Compound SELECT Statement} { -

      -A compound SELECT statement is two or more SELECT statements connected -by operators UNION, UNION ALL, EXCEPT, or INTERSECT. We call each -individual SELECT statement within a compound SELECT a "term". -

      - -

      -The code generator in SQLite processes compound SELECT statements using -a recursive algorithm. In order to limit the size of the stack, we -therefore limit the number of terms in a compound SELECT. The maximum -number of terms is SQLITE_MAX_COMPOUND_SELECT which defaults to 500. -We think this is a generous allotment since in practice we almost -never see the number of terms in a compound select exceed single digits. -

      -} - -limititem {Maximum Length Of A LIKE Or GLOB Pattern} { -

      -The pattern matching algorithm used in the default LIKE and GLOB -implementation of SQLite can exhibit O(N²) performance (where -N is the number of characters in the pattern) for certain pathological -cases. To avoid denial-of-service attacks from miscreants who are able -to specify their own LIKE or GLOB patterns, the length of the LIKE -or GLOB pattern is limited to SQLITE_MAX_LIKE_PATTERN_LENGTH bytes. -The default value of this limit is 50000. A modern workstation can -evaluate even a pathological LIKE or GLOB pattern of 50000 bytes -relatively quickly. The denial of service problem only comes into -play when the pattern length gets into millions of bytes. Nevertheless, -since most useful LIKE or GLOB patterns are at most a few dozen bytes -in length, paranoid application developers may want to reduce this -parameter to something in the range of a few hundred if they know that -external users are able to generate arbitrary patterns. -

      -} - -limititem {Maximum Number Of Host Parameters In A Single SQL Statement} { -

      -A host parameter is a place-holder in an SQL statement that is filled -in using one of the -sqlite3_bind_XXXX() interfaces. -Many SQL programmers are familiar with using a question mark ("?") as a -host parameter. SQLite also supports named host parameters prefaced -by ":", "$", or "@" and numbered host parameters of the form "?123". -

      - -

      -Each host parameter in an SQLite statement is assigned a number. The -numbers normally begin with 1 and increase by one with each new -parameter. However, when the "?123" form is used, the host parameter -number is the number that follows the question mark. -

      - -

      -The maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER. -This setting defaults to 999. -

      -} - -limititem {Maximum Number Of Attached Databases} { -

      -The ATTACH statement is an SQLite extension -that allows two or more databases to be associated to the same database -connection and to operate as if they were a single database. The number -of simulataneously attached databases is limited to SQLITE_MAX_ATTACHED -which is set to 10 by default. -The code generator in SQLite uses bitmaps -to keep track of attached databases. That means that the number of -attached databases cannot be increased above 30 on a 32-bit machine -or 62 on a 64-bit machine. -} - -limititem {Maximum Database Page Size} { -

      -An SQLite database file is organized as pages. The size of each -page is a power of 2 between 512 and SQLITE_MAX_PAGE_SIZE. -The default value for SQLITE_MAX_PAGE_SIZE is 32768. The current -implementation will not support a larger value. -

      - -

      -It used to be the case that SQLite would allocate some stack -structures whose size was proportional to the maximum page size. -For this reason, SQLite would sometimes be compiled with a smaller -maximum page size on embedded devices with limited stack memory. But -more recent versions of SQLite put these large structures on the -heap, not on the stack, so reducing the maximum page size is no -longer necessary on embedded devices. -

      -} - -limititem {Maximum Number Of Pages In A Database File} { -

      -SQLite is able to limit the size of a database file to prevent -the database file from growing too large and consuming too much -disk or flash space. -The SQLITE_MAX_PAGE_COUNT parameter, which is normally set to -1073741823, is the maximum number of pages allowed in a single -database file. An attempt to insert new data that would cause -the database file to grow larger than this will return -SQLITE_FULL. -

      - -

      -The -max_page_count PRAGMA can be used to raise or lower this -limit at run-time. -

      - -

      -Note that the transaction processing in SQLite requires two bits -of heap memory for every page in the database file. For databases -of a few megabytes in size, this amounts to only a few hundred -bytes of heap memory. But for gigabyte-sized databases the amount -of heap memory required is getting into the kilobyte range and -for terabyte-sized databases, megabytes of heap memory must be -allocated and zeroed at each transaction. SQLite will -support very large databases in theory, but the current implementation -is optimized for the common SQLite use cases of embedded devices -and persistent stores for desktop applications. In other words, -SQLite is designed for use with databases sized in kilobytes or -megabytes not gigabytes. If you are building an application to -work with databases that are hundreds of gigabytes or more -in size, then you should perhaps consider using a different database -engine that is explicitly designed for such large data sets. -

      -} - -puts {
    } -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lockingv3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lockingv3.html --- sqlite3-3.4.2/www/lockingv3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/lockingv3.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,581 @@ + + +File Locking And Concurrency In SQLite Version 3 + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    1.0 File Locking And Concurrency In SQLite Version 3

    +

    Version 3 of SQLite introduces a more complex locking and journaling +mechanism designed to improve concurrency and reduce the writer starvation +problem. The new mechanism also allows atomic commits of transactions +involving multiple database files. +This document describes the new locking mechanism. +The intended audience is programmers who want to understand and/or modify +the pager code and reviewers working to verify the design +of SQLite version 3. +

    +

    2.0 Overview

    +

    +Locking and concurrency control are handled by the the + +pager module. +The pager module is responsible for making SQLite "ACID" (Atomic, +Consistent, Isolated, and Durable). The pager module makes sure changes +happen all at once, that either all changes occur or none of them do, +that two or more processes do not try to access the database +in incompatible ways at the same time, and that once changes have been +written they persist until explicitly deleted. The pager also provides +a memory cache of some of the contents of the disk file.

    + +

    The pager is unconcerned +with the details of B-Trees, text encodings, indices, and so forth. +From the point of view of the pager the database consists of +a single file of uniform-sized blocks. Each block is called a +"page" and is usually 1024 bytes in size. The pages are numbered +beginning with 1. So the first 1024 bytes of the database are called +"page 1" and the second 1024 bytes are call "page 2" and so forth. All +other encoding details are handled by higher layers of the library. +The pager communicates with the operating system using one of several +modules +(Examples: + +os_unix.c, + +os_win.c) +that provides a uniform abstraction for operating system services. +

    + +

    The pager module effectively controls access for separate threads, or +separate processes, or both. Throughout this document whenever the +word "process" is written you may substitute the word "thread" without +changing the truth of the statement.

    +

    3.0 Locking

    +

    +From the point of view of a single process, a database file +can be in one of five locking states: +

    + +

    + + + + + + + + + + + + + + + +
    UNLOCKED +No locks are held on the database. The database may be neither read nor +written. Any internally cached data is considered suspect and subject to +verification against the database file before being used. Other +processes can read or write the database as their own locking states +permit. This is the default state. +
    SHARED + + +The database may be read but not written. Any number of +processes can hold SHARED locks at the same time, hence there can be +many simultaneous readers. But no other thread or process is allowed +to write to the database file while one or more SHARED locks are active. +
    RESERVED + + +A RESERVED lock means that the process is planning on writing to the +database file at some point in the future but that it is currently just +reading from the file. Only a single RESERVED lock may be active at one +time, though multiple SHARED locks can coexist with a single RESERVED lock. +RESERVED differs from PENDING in that new SHARED locks can be acquired +while there is a RESERVED lock. +
    PENDING + + +A PENDING lock means that the process holding the lock wants to write +to the database as soon as possible and is just waiting on all current +SHARED locks to clear so that it can get an EXCLUSIVE lock. No new +SHARED locks are permitted against the database if +a PENDING lock is active, though existing SHARED locks are allowed to +continue. +
    EXCLUSIVE + + +An EXCLUSIVE lock is needed in order to write to the database file. +Only one EXCLUSIVE lock is allowed on the file and no other locks of +any kind are allowed to coexist with an EXCLUSIVE lock. In order to +maximize concurrency, SQLite works to minimize the amount of time that +EXCLUSIVE locks are held. +
    +

    + +

    +The operating system interface layer understands and tracks all five +locking states described above. +The pager module only tracks four of the five locking states. +A PENDING lock is always just a temporary +stepping stone on the path to an EXCLUSIVE lock and so the pager module +does not track PENDING locks. +

    +

    4.0 The Rollback Journal

    +

    Any time a process wants to make a changes to a database file, it +first records enough information in the rollback journal to +restore the database file back to its initial condition. Thus, before +altering any page of the database, the original contents of that page +must be written into the journal. The journal also records the initial +size of the database so that if the database file grows it can be truncated +back to its original size on a rollback.

    + +

    The rollback journal is a ordinary disk file that has the same name as +the database file with the suffix "-journal" added.

    + +

    If SQLite is working with multiple databases at the same time +(using the ATTACH command) then each database has its own journal. +But there is also a separate aggregate journal +called the master journal. +The master journal does not contain page data used for rolling back +changes. Instead the master journal contains the names of the +individual file journals for each of the ATTACHed databases. Each of +the individual file journals also contain the name of the master journal. +If there are no ATTACHed databases (or if none of the ATTACHed database +is participating in the current transaction) no master journal is +created and the normal rollback journal contains an empty string +in the place normally reserved for recording the name of the master +journal.

    + +

    A individual file journal is said to be hot +if it needs to be rolled back +in order to restore the integrity of its database. +A hot journal is created when a process is in the middle of a database +update and a program or operating system crash or power failure prevents +the update from completing. +Hot journals are an exception condition. +Hot journals exist to recover from crashes and power failures. +If everything is working correctly +(that is, if there are no crashes or power failures) +you will never get a hot journal. +

    + +

    +If no master journal is involved, then +a journal is hot if it exists and its corresponding database file +does not have a RESERVED lock. +If a master journal is named in the file journal, then the file journal +is hot if its master journal exists and there is no RESERVED +lock on the corresponding database file. +It is important to understand when a journal is hot so the +preceding rules will be repeated in bullets: +

    + +
      +
    • A journal is hot if... +
        +
      • It exists, and
      • +
      • Its master journal exists or the master journal name is an + empty string, and
      • +
      • There is no RESERVED lock on the corresponding database file.
      • +
      +
    • +
    +

    4.1 Dealing with hot journals

    +

    +Before reading from a a database file, SQLite always checks to see if that +database file has a hot journal. If the file does have a hot journal, then +the journal is rolled back before the file is read. In this way, we ensure +that the database file is in a consistent state before it is read. +

    + +

    When a process wants to read from a database file, it followed +the following sequence of steps: +

    + +
      +
    1. Open the database file and obtain a SHARED lock. If the SHARED lock + cannot be obtained, fail immediately and return SQLITE_BUSY.
    2. +
    3. Check to see if the database file has a hot journal. If the file + does not have a hot journal, we are done. Return immediately. + If there is a hot journal, that journal must be rolled back by + the subsequent steps of this algorithm.
    4. +
    5. Acquire a PENDING lock then an EXCLUSIVE lock on the database file. + (Note: Do not acquire a RESERVED lock because that would make + other processes think the journal was no longer hot.) If we + fail to acquire these locks it means another process + is already trying to do the rollback. In that case, + drop all locks, close the database, and return SQLITE_BUSY.
    6. +
    7. Read the journal file and roll back the changes.
    8. +
    9. Wait for the rolled back changes to be written onto + the surface of the disk. This protects the integrity of the database + in case another power failure or crash occurs.
    10. +
    11. Delete the journal file.
    12. +
    13. Delete the master journal file if it is safe to do so. + This step is optional. It is here only to prevent stale + master journals from cluttering up the disk drive. + See the discussion below for details.
    14. +
    15. Drop the EXCLUSIVE and PENDING locks but retain the SHARED lock.
    16. +
    + +

    After the algorithm above completes successfully, it is safe to +read from the database file. Once all reading has completed, the +SHARED lock is dropped.

    +

    4.2 Deleting stale master journals

    +

    A stale master journal is a master journal that is no longer being +used for anything. There is no requirement that stale master journals +be deleted. The only reason for doing so is to free up disk space.

    + +

    A master journal is stale if no individual file journals are pointing +to it. To figure out if a master journal is stale, we first read the +master journal to obtain the names of all of its file journals. Then +we check each of those file journals. If any of the file journals named +in the master journal exists and points back to the master journal, then +the master journal is not stale. If all file journals are either missing +or refer to other master journals or no master journal at all, then the +master journal we are testing is stale and can be safely deleted.

    +

    5.0 Writing to a database file

    +

    To write to a database, a process must first acquire a SHARED lock +as described above (possibly rolling back incomplete changes if there +is a hot journal). +After a SHARED lock is obtained, a RESERVED lock must be acquired. +The RESERVED lock signals that the process intends to write to the +database at some point in the future. Only one process at a time +can hold a RESERVED lock. But other processes can continue to read +the database while the RESERVED lock is held. +

    + +

    If the process that wants to write is unable to obtain a RESERVED +lock, it must mean that another process already has a RESERVED lock. +In that case, the write attempt fails and returns SQLITE_BUSY.

    + +

    After obtaining a RESERVED lock, the process that wants to write +creates a rollback journal. The header of the journal is initialized +with the original size of the database file. Space in the journal header +is also reserved for a master journal name, though the master journal +name is initially empty.

    + +

    Before making changes to any page of the database, the process writes +the original content of that page into the rollback journal. Changes +to pages are held in memory at first and are not written to the disk. +The original database file remains unaltered, which means that other +processes can continue to read the database.

    + +

    Eventually, the writing process will want to update the database +file, either because its memory cache has filled up or because it is +ready to commit its changes. Before this happens, the writer must +make sure no other process is reading the database and that the rollback +journal data is safely on the disk surface so that it can be used to +rollback incomplete changes in the event of a power failure. +The steps are as follows:

    + +
      +
    1. Make sure all rollback journal data has actually been written to + the surface of the disk (and is not just being held in the operating + system's or disk controllers cache) so that if a power failure occurs + the data will still be there after power is restored.
    2. +
    3. Obtain a PENDING lock and then an EXCLUSIVE lock on the database file. + If other processes still have SHARED locks, the writer might have + to wait until those SHARED locks clear before it is able to obtain + an EXCLUSIVE lock.
    4. +
    5. Write all page modifications currently held in memory out to the + original database disk file.
    6. +
    + +

    +If the reason for writing to the database file is because the memory +cache was full, then the writer will not commit right away. Instead, +the writer might continue to make changes to other pages. Before +subsequent changes are written to the database file, the rollback +journal must be flushed to disk again. Note also that the EXCLUSIVE +lock that the writer obtained in order to write to the database initially +must be held until all changes are committed. That means that no other +processes are able to access the database from the +time the memory cache first spills to disk until the transaction +commits. +

    + +

    +When a writer is ready to commit its changes, it executes the following +steps: +

    + +
      +
    1. + Obtain an EXCLUSIVE lock on the database file and + make sure all memory changes have been written to the database file + using the algorithm of steps 1-3 above.
    2. +
    3. Flush all database file changes to the disk. Wait for those changes + to actually be written onto the disk surface.
    4. +
    5. Delete the journal file. This is the instant when the changes are + committed. Prior to deleting the journal file, if a power failure + or crash occurs, the next process to open the database will see that + it has a hot journal and will roll the changes back. + After the journal is deleted, there will no longer be a hot journal + and the changes will persist. +
    6. +
    7. Drop the EXCLUSIVE and PENDING locks from the database file. +
    8. +
    + +

    As soon as PENDING lock is released from the database file, other +processes can begin reading the database again. In the current implementation, +the RESERVED lock is also released, but that is not essential. Future +versions of SQLite might provide a "CHECKPOINT" SQL command that will +commit all changes made so far within a transaction but retain the +RESERVED lock so that additional changes can be made without given +any other process an opportunity to write.

    + +

    If a transaction involves multiple databases, then a more complex +commit sequence is used, as follows:

    + +
      +
    1. + Make sure all individual database files have an EXCLUSIVE lock and a + valid journal. +
    2. Create a master-journal. The name of the master-journal is arbitrary. + (The current implementation appends random suffixes to the name of the + main database file until it finds a name that does not previously exist.) + Fill the master journal with the names of all the individual journals + and flush its contents to disk. +
    3. Write the name of the master journal into + all individual journals (in space set aside for that purpose in the + headers of the individual journals) and flush the contents of the + individual journals to disk and wait for those changes to reach the + disk surface. +
    4. Flush all database file changes to the disk. Wait for those changes + to actually be written onto the disk surface.
    5. +
    6. Delete the master journal file. This is the instant when the changes are + committed. Prior to deleting the master journal file, if a power failure + or crash occurs, the individual file journals will be considered hot + and will be rolled back by the next process that + attempts to read them. After the master journal has been deleted, + the file journals will no longer be considered hot and the changes + will persist. +
    7. +
    8. Delete all individual journal files. +
    9. Drop the EXCLUSIVE and PENDING locks from all database files. +
    10. +
    +

    5.1 Writer starvation

    +

    In SQLite version 2, if many processes are reading from the database, +it might be the case that there is never a time when there are +no active readers. And if there is always at least one read lock on the +database, no process would ever be able to make changes to the database +because it would be impossible to acquire a write lock. This situation +is called writer starvation.

    + +

    SQLite version 3 seeks to avoid writer starvation through the use of +the PENDING lock. The PENDING lock allows existing readers to continue +but prevents new readers from connecting to the database. So when a +process wants to write a busy database, it can set a PENDING lock which +will prevent new readers from coming in. Assuming existing readers do +eventually complete, all SHARED locks will eventually clear and the +writer will be given a chance to make its changes.

    +

    6.0 How To Corrupt Your Database Files

    +

    The pager module is robust but it is not completely failsafe. +It can be subverted. This section attempts to identify and explain +the risks.

    + +

    +Clearly, a hardware or operating system fault that introduces incorrect data +into the middle of the database file or journal will cause problems. +Likewise, +if a rogue process opens a database file or journal and writes malformed +data into the middle of it, then the database will become corrupt. +There is not much that can be done about these kinds of problems +so they are given no further attention. +

    + +

    +SQLite uses POSIX advisory locks to implement locking on Unix. On +Windows it uses the LockFile(), LockFileEx(), and UnlockFile() system +calls. SQLite assumes that these system calls all work as advertised. If +that is not the case, then database corruption can result. One should +note that POSIX advisory locking is known to be buggy or even unimplemented +on many NFS implementations (including recent versions of Mac OS X) +and that there are reports of locking problems +for network filesystems under Windows. Your best defense is to not +use SQLite for files on a network filesystem. +

    + +

    +SQLite uses the fsync() system call to flush data to the disk under Unix and +it uses the FlushFileBuffers() to do the same under Windows. Once again, +SQLite assumes that these operating system services function as advertised. +But it has been reported that fsync() and FlushFileBuffers() do not always +work correctly, especially with inexpensive IDE disks. Apparently some +manufactures of IDE disks have defective controller chips that report +that data has reached the disk surface when in fact the data is still +in volatile cache memory in the disk drive electronics. There are also +reports that Windows sometimes chooses to ignore FlushFileBuffers() for +unspecified reasons. The author cannot verify any of these reports. +But if they are true, it means that database corruption is a possibility +following an unexpected power loss. These are hardware and/or operating +system bugs that SQLite is unable to defend against. +

    + +

    +If a crash or power failure occurs and results in a hot journal but that +journal is deleted, the next process to open the database will not +know that it contains changes that need to be rolled back. The rollback +will not occur and the database will be left in an inconsistent state. +Rollback journals might be deleted for any number of reasons: +

    + +
      +
    • An administrator might be cleaning up after an OS crash or power failure, + see the journal file, think it is junk, and delete it.
    • +
    • Someone (or some process) might rename the database file but fail to + also rename its associated journal.
    • +
    • If the database file has aliases (hard or soft links) and the file + is opened by a different alias than the one used to create the journal, + then the journal will not be found. To avoid this problem, you should + not create links to SQLite database files.
    • +
    • Filesystem corruption following a power failure might cause the + journal to be renamed or deleted.
    • +
    + +

    +The last (fourth) bullet above merits additional comment. When SQLite creates +a journal file on Unix, it opens the directory that contains that file and +calls fsync() on the directory, in an effort to push the directory information +to disk. But suppose some other process is adding or removing unrelated +files to the directory that contains the database and journal at the the +moment of a power failure. The supposedly unrelated actions of this other +process might result in the journal file being dropped from the directory and +moved into "lost+found". This is an unlikely scenario, but it could happen. +The best defenses are to use a journaling filesystem or to keep the +database and journal in a directory by themselves. +

    + +

    +For a commit involving multiple databases and a master journal, if the +various databases were on different disk volumes and a power failure occurs +during the commit, then when the machine comes back up the disks might +be remounted with different names. Or some disks might not be mounted +at all. When this happens the individual file journals and the master +journal might not be able to find each other. The worst outcome from +this scenario is that the commit ceases to be atomic. +Some databases might be rolled back and others might not. +All databases will continue to be self-consistent. +To defend against this problem, keep all databases +on the same disk volume and/or remount disks using exactly the same names +after a power failure. +

    +

    7.0 Transaction Control At The SQL Level

    +

    +The changes to locking and concurrency control in SQLite version 3 also +introduce some subtle changes in the way transactions work at the SQL +language level. +By default, SQLite version 3 operates in autocommit mode. +In autocommit mode, +all changes to the database are committed as soon as all operations associated +with the current database connection complete.

    + +

    The SQL command "BEGIN TRANSACTION" (the TRANSACTION keyword +is optional) is used to take SQLite out of autocommit mode. +Note that the BEGIN command does not acquire any locks on the database. +After a BEGIN command, a SHARED lock will be acquired when the first +SELECT statement is executed. A RESERVED lock will be acquired when +the first INSERT, UPDATE, or DELETE statement is executed. No EXCLUSIVE +lock is acquired until either the memory cache fills up and must +be spilled to disk or until the transaction commits. In this way, +the system delays blocking read access to the file file until the +last possible moment. +

    + +

    The SQL command "COMMIT" does not actually commit the changes to +disk. It just turns autocommit back on. Then, at the conclusion of +the command, the regular autocommit logic takes over and causes the +actual commit to disk to occur. +The SQL command "ROLLBACK" also operates by turning autocommit back on, +but it also sets a flag that tells the autocommit logic to rollback rather +than commit.

    + +

    If the SQL COMMIT command turns autocommit on and the autocommit logic +then tries to commit change but fails because some other process is holding +a SHARED lock, then autocommit is turned back off automatically. This +allows the user to retry the COMMIT at a later time after the SHARED lock +has had an opportunity to clear.

    + +

    If multiple commands are being executed against the same SQLite database +connection at the same time, the autocommit is deferred until the very +last command completes. For example, if a SELECT statement is being +executed, the execution of the command will pause as each row of the +result is returned. During this pause other INSERT, UPDATE, or DELETE +commands can be executed against other tables in the database. But none +of these changes will commit until the original SELECT statement finishes. +

    + +
    +This page last modified 2009/03/19 00:04:36 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/lockingv3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/lockingv3.tcl --- sqlite3-3.4.2/www/lockingv3.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/lockingv3.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,570 +0,0 @@ -# -# Run this script to generated a lockingv3.html output file -# -set rcsid {$Id: } -source common.tcl -header {File Locking And Concurrency In SQLite Version 3} - -proc HEADING {level title {label {}}} { - global pnum - incr pnum($level) - foreach i [array names pnum] { - if {$i>$level} {set pnum($i) 0} - } - set h [expr {$level+1}] - if {$h>6} {set h 6} - set n $pnum(1).$pnum(2) - for {set i 3} {$i<=$level} {incr i} { - append n .$pnum($i) - } - if {$label!=""} { - puts "" - } - puts "$n $title" -} -set pnum(1) 0 -set pnum(2) 0 -set pnum(3) 0 -set pnum(4) 0 -set pnum(5) 0 -set pnum(6) 0 -set pnum(7) 0 -set pnum(8) 0 - -HEADING 1 {File Locking And Concurrency In SQLite Version 3} - -puts { -

    Version 3 of SQLite introduces a more complex locking and journaling -mechanism designed to improve concurrency and reduce the writer starvation -problem. The new mechanism also allows atomic commits of transactions -involving multiple database files. -This document describes the new locking mechanism. -The intended audience is programmers who want to understand and/or modify -the pager code and reviewers working to verify the design -of SQLite version 3. -

    -} - -HEADING 1 {Overview} overview - -puts { -

    -Locking and concurrency control are handled by the the - -pager module. -The pager module is responsible for making SQLite "ACID" (Atomic, -Consistent, Isolated, and Durable). The pager module makes sure changes -happen all at once, that either all changes occur or none of them do, -that two or more processes do not try to access the database -in incompatible ways at the same time, and that once changes have been -written they persist until explicitly deleted. The pager also provides -an memory cache of some of the contents of the disk file.

    - -

    The pager is unconcerned -with the details of B-Trees, text encodings, indices, and so forth. -From the point of view of the pager the database consists of -a single file of uniform-sized blocks. Each block is called a -"page" and is usually 1024 bytes in size. The pages are numbered -beginning with 1. So the first 1024 bytes of the database are called -"page 1" and the second 1024 bytes are call "page 2" and so forth. All -other encoding details are handled by higher layers of the library. -The pager communicates with the operating system using one of several -modules -(Examples: - -os_unix.c, - -os_win.c) -that provides a uniform abstraction for operating system services. -

    - -

    The pager module effectively controls access for separate threads, or -separate processes, or both. Throughout this document whenever the -word "process" is written you may substitute the word "thread" without -changing the truth of the statement.

    -} - -HEADING 1 {Locking} locking - -puts { -

    -From the point of view of a single process, a database file -can be in one of five locking states: -

    - -

    - - - - - - - - - - - - - - - -
    UNLOCKED -No locks are held on the database. The database may be neither read nor -written. Any internally cached data is considered suspect and subject to -verification against the database file before being used. Other -processes can read or write the database as their own locking states -permit. This is the default state. -
    SHARED -The database may be read but not written. Any number of -processes can hold SHARED locks at the same time, hence there can be -many simultaneous readers. But no other thread or process is allowed -to write to the database file while one or more SHARED locks are active. -
    RESERVED -A RESERVED lock means that the process is planning on writing to the -database file at some point in the future but that it is currently just -reading from the file. Only a single RESERVED lock may be active at one -time, though multiple SHARED locks can coexist with a single RESERVED lock. -RESERVED differs from PENDING in that new SHARED locks can be acquired -while there is a RESERVED lock. -
    PENDING -A PENDING lock means that the process holding the lock wants to write -to the database as soon as possible and is just waiting on all current -SHARED locks to clear so that it can get an EXCLUSIVE lock. No new -SHARED locks are permitted against the database if -a PENDING lock is active, though existing SHARED locks are allowed to -continue. -
    EXCLUSIVE -An EXCLUSIVE lock is needed in order to write to the database file. -Only one EXCLUSIVE lock is allowed on the file and no other locks of -any kind are allowed to coexist with an EXCLUSIVE lock. In order to -maximize concurrency, SQLite works to minimize the amount of time that -EXCLUSIVE locks are held. -
    -

    - -

    -The operating system interface layer understands and tracks all five -locking states described above. -The pager module only tracks four of the five locking states. -A PENDING lock is always just a temporary -stepping stone on the path to an EXCLUSIVE lock and so the pager module -does not track PENDING locks. -

    -} - -HEADING 1 {The Rollback Journal} rollback - -puts { -

    Any time a process wants to make a changes to a database file, it -first records enough information in the rollback journal to -restore the database file back to its initial condition. Thus, before -altering any page of the database, the original contents of that page -must be written into the journal. The journal also records the initial -size of the database so that if the database file grows it can be truncated -back to its original size on a rollback.

    - -

    The rollback journal is a ordinary disk file that has the same name as -the database file with the suffix "-journal" added.

    - -

    If SQLite is working with multiple databases at the same time -(using the ATTACH command) then each database has its own journal. -But there is also a separate aggregate journal -called the master journal. -The master journal does not contain page data used for rolling back -changes. Instead the master journal contains the names of the -individual file journals for each of the ATTACHed databases. Each of -the individual file journals also contain the name of the master journal. -If there are no ATTACHed databases (or if none of the ATTACHed database -is participating in the current transaction) no master journal is -created and the normal rollback journal contains an empty string -in the place normally reserved for recording the name of the master -journal.

    - -

    A individual file journal is said to be hot -if it needs to be rolled back -in order to restore the integrity of its database. -A hot journal is created when a process is in the middle of a database -update and a program or operating system crash or power failure prevents -the update from completing. -Hot journals are an exception condition. -Hot journals exist to recover from crashes and power failures. -If everything is working correctly -(that is, if there are no crashes or power failures) -you will never get a hot journal. -

    - -

    -If no master journal is involved, then -a journal is hot if it exists and its corresponding database file -does not have a RESERVED lock. -If a master journal is named in the file journal, then the file journal -is hot if its master journal exists and there is no RESERVED -lock on the corresponding database file. -It is important to understand when a journal is hot so the -preceding rules will be repeated in bullets: -

    - -
      -
    • A journal is hot if... -
        -
      • It exists, and
      • -
      • It's master journal exists or the master journal name is an - empty string, and
      • -
      • There is no RESERVED lock on the corresponding database file.
      • -
      -
    • -
    -} - -HEADING 2 {Dealing with hot journals} hot_journals - -puts { -

    -Before reading from a a database file, SQLite always checks to see if that -database file has a hot journal. If the file does have a hot journal, then -the journal is rolled back before the file is read. In this way, we ensure -that the database file is in a consistent state before it is read. -

    - -

    When a process wants to read from a database file, it followed -the following sequence of steps: -

    - -
      -
    1. Open the database file and obtain a SHARED lock. If the SHARED lock - cannot be obtained, fail immediately and return SQLITE_BUSY.
    2. -
    3. Check to see if the database file has a hot journal. If the file - does not have a hot journal, we are done. Return immediately. - If there is a hot journal, that journal must be rolled back by - the subsequent steps of this algorithm.
    4. -
    5. Acquire a PENDING lock then an EXCLUSIVE lock on the database file. - (Note: Do not acquire a RESERVED lock because that would make - other processes think the journal was no longer hot.) If we - fail to acquire these locks it means another process - is already trying to do the rollback. In that case, - drop all locks, close the database, and return SQLITE_BUSY.
    6. -
    7. Read the journal file and roll back the changes.
    8. -
    9. Wait for the rolled back changes to be written onto - the surface of the disk. This protects the integrity of the database - in case another power failure or crash occurs.
    10. -
    11. Delete the journal file.
    12. -
    13. Delete the master journal file if it is safe to do so. - This step is optional. It is here only to prevent stale - master journals from cluttering up the disk drive. - See the discussion below for details.
    14. -
    15. Drop the EXCLUSIVE and PENDING locks but retain the SHARED lock.
    16. -
    - -

    After the algorithm above completes successfully, it is safe to -read from the database file. Once all reading has completed, the -SHARED lock is dropped.

    -} - -HEADING 2 {Deleting stale master journals} stale_master_journals - -puts { -

    A stale master journal is a master journal that is no longer being -used for anything. There is no requirement that stale master journals -be deleted. The only reason for doing so is to free up disk space.

    - -

    A master journal is stale if no individual file journals are pointing -to it. To figure out if a master journal is stale, we first read the -master journal to obtain the names of all of its file journals. Then -we check each of those file journals. If any of the file journals named -in the master journal exists and points back to the master journal, then -the master journal is not stale. If all file journals are either missing -or refer to other master journals or no master journal at all, then the -master journal we are testing is stale and can be safely deleted.

    -} - -HEADING 1 {Writing to a database file} writing - -puts { -

    To write to a database, a process must first acquire a SHARED lock -as described above (possibly rolling back incomplete changes if there -is a hot journal). -After a SHARED lock is obtained, a RESERVED lock must be acquired. -The RESERVED lock signals that the process intends to write to the -database at some point in the future. Only one process at a time -can hold a RESERVED lock. But other processes can continue to read -the database while the RESERVED lock is held. -

    - -

    If the process that wants to write is unable to obtain a RESERVED -lock, it must mean that another process already has a RESERVED lock. -In that case, the write attempt fails and returns SQLITE_BUSY.

    - -

    After obtaining a RESERVED lock, the process that wants to write -creates a rollback journal. The header of the journal is initialized -with the original size of the database file. Space in the journal header -is also reserved for a master journal name, though the master journal -name is initially empty.

    - -

    Before making changes to any page of the database, the process writes -the original content of that page into the rollback journal. Changes -to pages are held in memory at first and are not written to the disk. -The original database file remains unaltered, which means that other -processes can continue to read the database.

    - -

    Eventually, the writing process will want to update the database -file, either because its memory cache has filled up or because it is -ready to commit its changes. Before this happens, the writer must -make sure no other process is reading the database and that the rollback -journal data is safely on the disk surface so that it can be used to -rollback incomplete changes in the event of a power failure. -The steps are as follows:

    - -
      -
    1. Make sure all rollback journal data has actually been written to - the surface of the disk (and is not just being held in the operating - system's or disk controllers cache) so that if a power failure occurs - the data will still be there after power is restored.
    2. -
    3. Obtain a PENDING lock and then an EXCLUSIVE lock on the database file. - If other processes are still have SHARED locks, the writer might have - to wait until those SHARED locks clear before it is able to obtain - an EXCLUSIVE lock.
    4. -
    5. Write all page modifications currently held in memory out to the - original database disk file.
    6. -
    - -

    -If the reason for writing to the database file is because the memory -cache was full, then the writer will not commit right away. Instead, -the writer might continue to make changes to other pages. Before -subsequent changes are written to the database file, the rollback -journal must be flushed to disk again. Note also that the EXCLUSIVE -lock that the writer obtained in order to write to the database initially -must be held until all changes are committed. That means that no other -processes are able to access the database from the -time the memory cache first spills to disk until the transaction -commits. -

    - -

    -When a writer is ready to commit its changes, it executes the following -steps: -

    - -
      -
    1. - Obtain an EXCLUSIVE lock on the database file and - make sure all memory changes have been written to the database file - using the algorithm of steps 1-3 above.
    2. -
    3. Flush all database file changes to the disk. Wait for those changes - to actually be written onto the disk surface.
    4. -
    5. Delete the journal file. This is the instant when the changes are - committed. Prior to deleting the journal file, if a power failure - or crash occurs, the next process to open the database will see that - it has a hot journal and will roll the changes back. - After the journal is deleted, there will no longer be a hot journal - and the changes will persist. -
    6. -
    7. Drop the EXCLUSIVE and PENDING locks from the database file. -
    8. -
    - -

    As soon as PENDING lock is released from the database file, other -processes can begin reading the database again. In the current implementation, -the RESERVED lock is also released, but that is not essential. Future -versions of SQLite might provide a "CHECKPOINT" SQL command that will -commit all changes made so far within a transaction but retain the -RESERVED lock so that additional changes can be made without given -any other process an opportunity to write.

    - -

    If a transaction involves multiple databases, then a more complex -commit sequence is used, as follows:

    - -
      -
    1. - Make sure all individual database files have an EXCLUSIVE lock and a - valid journal. -
    2. Create a master-journal. The name of the master-journal is arbitrary. - (The current implementation appends random suffixes to the name of the - main database file until it finds a name that does not previously exist.) - Fill the master journal with the names of all the individual journals - and flush its contents to disk. -
    3. Write the name of the master journal into - all individual journals (in space set aside for that purpose in the - headers of the individual journals) and flush the contents of the - individual journals to disk and wait for those changes to reach the - disk surface. -
    4. Flush all database file changes to the disk. Wait for those changes - to actually be written onto the disk surface.
    5. -
    6. Delete the master journal file. This is the instant when the changes are - committed. Prior to deleting the master journal file, if a power failure - or crash occurs, the individual file journals will be considered hot - and will be rolled back by the next process that - attempts to read them. After the master journal has been deleted, - the file journals will no longer be considered hot and the changes - will persist. -
    7. -
    8. Delete all individual journal files. -
    9. Drop the EXCLUSIVE and PENDING locks from all database files. -
    10. -
    -} - -HEADING 2 {Writer starvation} writer_starvation - -puts { -

    In SQLite version 2, if many processes are reading from the database, -it might be the case that there is never a time when there are -no active readers. And if there is always at least one read lock on the -database, no process would ever be able to make changes to the database -because it would be impossible to acquire a write lock. This situation -is called writer starvation.

    - -

    SQLite version 3 seeks to avoid writer starvation through the use of -the PENDING lock. The PENDING lock allows existing readers to continue -but prevents new readers from connecting to the database. So when a -process wants to write a busy database, it can set a PENDING lock which -will prevent new readers from coming in. Assuming existing readers do -eventually complete, all SHARED locks will eventually clear and the -writer will be given a chance to make its changes.

    -} - -HEADING 1 {How To Corrupt Your Database Files} how_to_corrupt - -puts { -

    The pager module is robust but it is not completely failsafe. -It can be subverted. This section attempts to identify and explain -the risks.

    - -

    -Clearly, a hardware or operating system fault that introduces incorrect data -into the middle of the database file or journal will cause problems. -Likewise, -if a rogue process opens a database file or journal and writes malformed -data into the middle of it, then the database will become corrupt. -There is not much that can be done about these kinds of problems -so they are given no further attention. -

    - -

    -SQLite uses POSIX advisory locks to implement locking on Unix. On -windows it uses the LockFile(), LockFileEx(), and UnlockFile() system -calls. SQLite assumes that these system calls all work as advertised. If -that is not the case, then database corruption can result. One should -note that POSIX advisory locking is known to be buggy or even unimplemented -on many NFS implementations (including recent versions of Mac OS X) -and that there are reports of locking problems -for network filesystems under windows. Your best defense is to not -use SQLite for files on a network filesystem. -

    - -

    -SQLite uses the fsync() system call to flush data to the disk under Unix and -it uses the FlushFileBuffers() to do the same under windows. Once again, -SQLite assumes that these operating system services function as advertised. -But it has been reported that fsync() and FlushFileBuffers() do not always -work correctly, especially with inexpensive IDE disks. Apparently some -manufactures of IDE disks have defective controller chips that report -that data has reached the disk surface when in fact the data is still -in volatile cache memory in the disk drive electronics. There are also -reports that windows sometimes chooses to ignore FlushFileBuffers() for -unspecified reasons. The author cannot verify any of these reports. -But if they are true, it means that database corruption is a possibility -following an unexpected power loss. These are hardware and/or operating -system bugs that SQLite is unable to defend against. -

    - -

    -If a crash or power failure occurs and results in a hot journal but that -journal is deleted, the next process to open the database will not -know that it contains changes that need to be rolled back. The rollback -will not occur and the database will be left in an inconsistent state. -Rollback journals might be deleted for any number of reasons: -

    - -
      -
    • An administrator might be cleaning up after an OS crash or power failure, - see the journal file, think it is junk, and delete it.
    • -
    • Someone (or some process) might rename the database file but fail to - also rename its associated journal.
    • -
    • If the database file has aliases (hard or soft links) and the file - is opened by a different alias than the one used to create the journal, - then the journal will not be found. To avoid this problem, you should - not create links to SQLite database files.
    • -
    • Filesystem corruption following a power failure might cause the - journal to be renamed or deleted.
    • -
    - -

    -The last (fourth) bullet above merits additional comment. When SQLite creates -a journal file on Unix, it opens the directory that contains that file and -calls fsync() on the directory, in an effort to push the directory information -to disk. But suppose some other process is adding or removing unrelated -files to the directory that contains the database and journal at the the -moment of a power failure. The supposedly unrelated actions of this other -process might result in the journal file being dropped from the directory and -moved into "lost+found". This is an unlikely scenario, but it could happen. -The best defenses are to use a journaling filesystem or to keep the -database and journal in a directory by themselves. -

    - -

    -For a commit involving multiple databases and a master journal, if the -various databases were on different disk volumes and a power failure occurs -during the commit, then when the machine comes back up the disks might -be remounted with different names. Or some disks might not be mounted -at all. When this happens the individual file journals and the master -journal might not be able to find each other. The worst outcome from -this scenario is that the commit ceases to be atomic. -Some databases might be rolled back and others might not. -All databases will continue to be self-consistent. -To defend against this problem, keep all databases -on the same disk volume and/or remount disks using exactly the same names -after a power failure. -

    -} - -HEADING 1 {Transaction Control At The SQL Level} transaction_control - -puts { -

    -The changes to locking and concurrency control in SQLite version 3 also -introduce some subtle changes in the way transactions work at the SQL -language level. -By default, SQLite version 3 operates in autocommit mode. -In autocommit mode, -all changes to the database are committed as soon as all operations associated -with the current database connection complete.

    - -

    The SQL command "BEGIN TRANSACTION" (the TRANSACTION keyword -is optional) is used to take SQLite out of autocommit mode. -Note that the BEGIN command does not acquire any locks on the database. -After a BEGIN command, a SHARED lock will be acquired when the first -SELECT statement is executed. A RESERVED lock will be acquired when -the first INSERT, UPDATE, or DELETE statement is executed. No EXCLUSIVE -lock is acquired until either the memory cache fills up and must -be spilled to disk or until the transaction commits. In this way, -the system delays blocking read access to the file file until the -last possible moment. -

    - -

    The SQL command "COMMIT" does not actually commit the changes to -disk. It just turns autocommit back on. Then, at the conclusion of -the command, the regular autocommit logic takes over and causes the -actual commit to disk to occur. -The SQL command "ROLLBACK" also operates by turning autocommit back on, -but it also sets a flag that tells the autocommit logic to rollback rather -than commit.

    - -

    If the SQL COMMIT command turns autocommit on and the autocommit logic -then tries to commit change but fails because some other process is holding -a SHARED lock, then autocommit is turned back off automatically. This -allows the user to retry the COMMIT at a later time after the SHARED lock -has had an opportunity to clear.

    - -

    If multiple commands are being executed against the same SQLite database -connection at the same time, the autocommit is deferred until the very -last command completes. For example, if a SELECT statement is being -executed, the execution of the command will pause as each row of the -result is returned. During this pause other INSERT, UPDATE, or DELETE -commands can be executed against other tables in the database. But none -of these changes will commit until the original SELECT statement finishes. -

    -} - - -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/malloc.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/malloc.html --- sqlite3-3.4.2/www/malloc.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/malloc.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,1044 @@ + + +Dynamic Memory Allocation In SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Dynamic Memory Allocation In SQLite

    + + +

    SQLite uses of dynamic memory allocation to obtain +memory for storing various objects +(ex: database connections and prepared statements) and to build +a memory cache of the database file and to hold the results of queries. +Much effort has gone into making the dynamic memory allocation subsystem +of SQLite reliable, predictable, robust, and efficient.

    + +

    This document provides an overview of dynamic memory allocation within +SQLite. The target audience is software engineers who are tuning their +use of SQLite for peak performance in demanding environments. +Nothing in this document is required knowledge for using SQLite. The +default settings and configuration for SQLite will work well in most +applications. However, the information contained in this document may +be useful to engineers who are tuning SQLite to comply with special +requirements or to run under unusual circumstances.

    + +

    This report is a work in progress...

    + + +

    1.0 Features

    + +

    The SQLite core and its memory allocation subsystem provides the +following capabilities:

    + +
      +
    • +Robust against allocation failures. +If a memory allocation ever fails (that is to say, +if malloc() or realloc() ever return NULL) +then SQLite will recover gracefully. SQLite will first attempt +to free memory from unpinned cache pages then retry the allocation +request. +Failing that, SQLite will either stop what +it is doing and return the +SQLITE_NOMEM error code back up to the application or it will +make due without the requested memory. +

    • + +
    • +No memory leaks. +The application is responsible for destroying any objects it allocates. +(For example, the application must use sqlite3_finalize() on +every prepared statement and sqlite3_close() on every +database connection.) But as long as +the application cooperates, SQLite will never leak memory. This is +true even in the face of memory allocation failures or other system +errors. +

    • + +
    • +Memory usage limits. +The sqlite3_soft_heap_limit() mechanism allows the application to +set a memory usage limit that SQLite strives to stay below. SQLite +will attempt to reuse memory from its caches rather than allocation new +memory as it approaches the soft limit. +

    • + +
    • +Zero-malloc option +The application can provide SQLite with several buffers of bulk memory +at startup and SQLite will then use those provided buffers for all of +its memory allocation needs and never call system malloc() or free(). +

    • + +
    • +Application-supplied memory allocators. +The application can provide SQLite with pointers to alternative +memory allocators at start-time. The alternative memory allocator +will be used in place of system malloc() and free(). +

    • + +
    • +Proof against breakdown and fragmentation. +SQLite can be configured so that, subject to certain usage constraints +detailed below, it is guaranteed to never fail a memory allocation +or fragment the heap. +This property is important to long-running, high-reliability +embedded systems where a memory allocation error could contribute +to an overall system failure. +

    • + +
    • +Memory usage statistics. +Applications can see how much memory they are using and detect when +memory usage is approaching or exceeding design boundaries. +

    • + +
    • +Minimal calls to the allocator. +The system malloc() and free() implementations are inefficient +on many systems. SQLite strives to reduce overall processing time +by minimizing its use of malloc() and free(). +

    • + +
    • +Open access. +Pluggable SQLite extensions or even the application itself can +access to the same underlying memory allocation +routines used by SQLite through the +sqlite3_malloc(), sqlite3_realloc(), and sqlite3_free() interfaces. +

    • + +
    + + +

    2.0 Testing

    + +

    Over +75% of the code in the SQLite source tree is devoted purely to testing +and verification. Reliability is important to SQLite. +Among the tasks of the test infrastructure is to insure that +SQLite does not misuse dynamically allocated memory, that SQLite +does not leak memory, and that SQLite responds +correctly to a dynamic memory allocation failure.

    + +

    The test infrastructure verifies that SQLite does not misuse +dynamically allocated memory by using a specially instrumented +memory allocator. The instrumented memory allocator is enabled +at compile-time using the SQLITE_MEMDEBUG option. The instrumented +memory allocator is much slower than the default memory allocator and +so its use is not recommended in production. But when +enabled during testing, +the instrumented memory allocator performs the following checks:

    + +
      +
    • Bounds checking. +The instrumented memory allocator places sentinel values at both ends +of each memory allocation to verify that nothing within SQLite writes +outside the bounds of the allocation.

    • + +
    • Use of memory after freeing. +When each block of memory is freed, every byte is overwritten with a +nonsense bit pattern. This helps to insure that no memory is ever +used after having been freed.

    • + +
    • Freeing memory not obtained from malloc. +Each memory allocation from the instrumented memory allocator contains +sentinels used to verify that every allocation freed came +from prior malloc.

    • + +
    • Uninitialized memory. +The instrumented memory allocator initializes each memory allocation +to a nonsense bit pattern to help insure that the user makes no +assumptions about the content of allocation memory.

    • +
    + +

    Regardless of whether or not the instrumented memory allocator is +used, SQLite keeps track of how much memory is currently checked out. +There are hundreds of test scripts used for testing SQLite. At the +end of each script, all objects are destroyed and a test is made to +insure that all memory has been freed. This is how memory +leaks are detected. Notice that memory leak detection is in force at +all times, during test builds and during production builds. Whenever +one of the developers runs any individual test script, memory leak +detection is active. Hence memory leaks that do arise during development +are quickly detected and fixed.

    + + +

    The response of SQLite to out-of-memory (OOM) errors is tested using +a specialized memory allocator overlay that can simulate memory failures. +The overlay is a layer that is inserted in between the memory allocator +and the rest of SQLite. The overlay passes most memory allocation +requests straight through to the underlying allocator and passes the +results back up to the requester. But the overlay can be set to +cause the Nth memory allocation to fail. To run an OOM test, the overlay +is first set to fail on the first allocation attempt. Then some test +script is run and verification that the allocation was correctly caught +and handled is made. Then the overlay is set to fail on the second +allocation and the test repeats. The failure point continues to advice +one allocation at a time until the entire test procedure runs to +completion without hitting a memory allocation error. This whole +test sequence run twice. On the first pass, the +overlay is set to fail only the Nth allocation. On the second pass, +the overlay is set to fail the Nth and all subsequent allocations.

    + +

    Note that the memory leak detection logic continues to work even +when the OOM overlay is being used. This verifies that SQLite +does not leak memory even when it encounters memory allocation errors. +Note also that the OOM overlay can work with any underlying memory +allocator, including the instrumented memory allocator that checks +for memory allocation misuse. In this way it is verified that +OOM errors do not induce other kinds of memory usage errors.

    + +

    Finally, we observe that the instrumented memory allocator and the +memory leak detector both work over the entire SQLite test suite and +the test suite provides over 99% statement test coverage. This is +strong evidence that dynamic memory allocation is used correctly +everywhere within SQLite.

    + + +

    3.0 Configuration

    + +

    The default memory allocation settings in SQLite are appropriate +for most applications. However, applications with unusual or particularly +strict requirements may want to adjust the configuration to more closely +align SQLite to their needs. +Both compile-time and start-time configuration options are available.

    + + +

    3.1 Alternative low-level memory allocators

    + +

    The SQLite source code includes several different memory allocation +modules that can be selected at compile-time, or to a limited extent +at start-time.

    + + + +

    3.1.1 The default memory allocator

    + +

    By default, SQLite uses the malloc(), realloc(), and free() routines +from the standard C library for its memory allocation needs. These routines +are surrounded by a thin wrapper that also provides a "memsize()" function +that will return the size of an existing allocation. The memsize() function +is needed to keep an accurate count of the number of bytes of outstanding +memory; memsize() determines how many bytes to remove from the outstanding +count when an allocation is freed. The default allocator implements +memsize() by always allocating 8 extra bytes on each malloc() request and +storing the size of the allocation in that 8-byte header.

    + +

    The default memory allocator is recommended for most applications. +If you do not have a compelling need to use an alternative memory +allocator, then use the default.

    + + + +

    3.1.2 The debugging memory allocator

    + +

    If SQLite is compiled with the SQLITE_MEMDEBUG compile-time option, +then a different, heavy wrapper is used around system malloc(), realloc(), +and free(). +The heavy wrapper allocates around 100 bytes of extra space +with each allocation. The extra space is used to places sentinel values +at both ends of the allocation returned to the SQLite core. When an +allocation is freed, +these sentinels are checked to make sure the SQLite core did not overrun +the buffer in either direction. When the system library is GLIBC, the +heavy wrapper also makes use of the GNU backtrace() function to examine +the stack and record the ancestor functions of the malloc() call. When +running the SQLite test suite, the heavy wrapper also records the name of +the current test case. These latter two features are useful for +tracking down the source of memory leaks detected by the test suite.

    + +

    The heavy wrapper that is used when SQLITE_MEMDEBUG is set also +makes sure each new allocation is filled with nonsense data prior to +returning the allocation to the caller. And as soon as an allocation +is free, it is again filled with nonsense data. These two actions help +to insure that the SQLite core does not make assumptions about the state +of newly allocated memory and that memory allocations are not used after +they have been freed.

    + +

    The heavy wrapper employed by SQLITE_MEMDEBUG is intended for use +only during testing, analysis, and debugging of SQLite. The heavy wrapper +has a significant performance and memory overhead and probably should not +be used in production.

    + + + +

    3.1.3 Zero-malloc memory allocator

    + +

    When SQLite is compiled with the SQLITE_ENABLE_MEMSYS5 option, an +alternative memory allocator that does not use malloc() is included in the +build. The SQLite developers refer to this alternative memory allocator +as "memsys5". Even when it is included in the build, memsys5 is +disabled by default. +To enable memsys5, the application must invoke the following SQLite +interface at start-time:

    + +
    +sqlite3_config(SQLITE_CONFIG_HEAP, pBuf, szBuf, mnReq);
    +
    + +

    In the call above, pBuf is a pointer to a large, contiguous chunk +of memory space that SQLite will use to satisfy all of its memory +allocation needs. pBuf might point to a static array or it might +be memory obtained from some other application-specific mechanism. +szBuf is an integer that is the number of bytes of memory space +pointed to by pBuf. mnReq is another integer that is the +minimum size of an allocation. Any call to sqlite3_malloc(N) where +N is less than mnReq will be rounded up to mnReq. mnReq must be +a power of two. We shall see later that the mnReq parameter is +important in reducing the value of n and hence the minimum memory +size requirement in the Robson proof.

    + +

    The memsys5 allocator is designed for use on embedded systems, +though there is nothing to prevent its use on workstations. +The szBuf is typically between a few hundred kilobytes up to a few +dozen megabytes, depending on system requirements and memory budget.

    + +

    The algorithm used by memsys5 can be called "power-of-two, +first-fit". The sizes of all memory allocation +requests are rounded up to a power of two and the request is satisfied +by the first free slot in pBuf that is large enough. Adjacent freed +allocations are coalesced using a buddy system. When used appropriately, +this algorithm provides mathematical guarantees against fragmentation and +breakdown, as described further below.

    + + + +

    3.1.4 Experimental memory allocators

    + +

    The name "memsys5" used for the zero-malloc memory allocator implies +that there are several additional memory allocators available, and indeed +there are. The default memory allocator is "memsys1". The debugging +memory allocator is "memsys2". Those have already been covered.

    + +

    If SQLite is compiled with SQLITE_ENABLE_MEMSYS3 than another +zero-malloc memory allocator, similar to memsys5, is included in the +source tree. The memsys3 allocator, like memsys5, must be activated +by a call to sqlite3_config(SQLITE_CONFIG_HEAP,...). Memsys3 +uses the memory buffer supplied as its source for all memory allocations. +The difference between memsys3 and memsys5 is that memsys3 uses a +different memory allocation algorithm that seems to work well in +practice, but which does not provide mathematical +guarantees against memory fragmentation and breakdown. Memsys3 was +a predecessor to memsys5. The SQLite developers now believe that +memsys5 is superior to +memsys3 and that all applications that need a zero-malloc memory +allocator should use memsys5 in preference to memsys3. Memsys3 is +considered both experimental and deprecated and will likely be removed +from the source tree in a future release of SQLite.

    + +

    Code for memsys4 is still in the SQLite source tree (as of this writing - +SQLite version 3.6.1), but it has not been maintained for several release +cycles and probably does not work. (Update: memsys4 was removed as +of version 3.6.5) Memsys4 was an attempt to use mmap() +to obtain memory and then use madvise() to release unused pages +back to the operating system so that they could be reused by other +processes. The work on memsys4 has been abandoned and the memsys4 module +will likely be removed from the source tree in the near future.

    + +

    Memsys6 uses system malloc() and free() to obtain the memory it needs. +Memsys6 serves as an aggregator. Memsys6 only calls system malloc() to obtain +large allocations. It then subdivides those large allocations to services +multiple smaller memory allocation requests coming from the SQLite core. +Memsys6 is intended for use in systems where +system malloc() is particularly inefficient. The idea behind memsys6 is +to reduce the number of calls to system malloc() by a factor of 10 or more.

    + +

    Memsys6 is made available by compiling SQLite with the SQLITE_ENABLE_MEMSYS6 +compile-time option and then at start-time invoking:

    + +
    +sqlite3_config(SQLITE_CONFIG_CHUNKALLOC);
    +
    + +

    Memsys6 was added in SQLite version 3.6.1. +It is very experimental. Its future is uncertain and it may be removed +in a subsequent release. Update: Memsys6 was removed as of +version 3.6.5.

    + +

    Other experimental memory allocators might be added in future releases +of SQLite. One many anticipate that these will be called memsys7, memsys8, +and so forth.

    + + +

    3.1.5 Application-defined memory allocators

    + +

    New memory allocators do not have to be part of the SQLite source tree +nor included in the sqlite3.c amalgamation. Individual applications can +supply their own memory allocators to SQLite at start-time.

    + +

    To cause SQLite to use a new memory allocator, the application +simply calls:

    + +
    +sqlite3_config(SQLITE_CONFIG_MALLOC, pMem);
    +
    + +

    In the call above, pMem is a pointer to an sqlite3_mem_methods object +that defines the interface to the application-specific memory allocator. +The sqlite3_mem_methods object is really just a structure containing +pointers to functions to implement the various memory allocation primitives. +

    + +

    In a multi-threaded application, access to the sqlite3_mem_methods +is serialized if and only if SQLITE_CONFIG_MEMSTATUS is enabled. +If SQLITE_CONFIG_MEMSTATUS is disabled then the methods in +sqlite3_mem_methods must take care of their own serialization needs.

    + + +

    3.1.6 Memory allocator overlays

    + +

    An application can insert layers or "overlays" in between the +SQLite core and the underlying memory allocator. +For example, the out-of-memory test logic +for SQLite uses an overlay that can simulate memory allocation +failures.

    + +

    An overlay can be created by using the

    + +
    +sqlite3_config(SQLITE_CONFIG_GETMALLOC, pOldMem);
    +
    + +

    interface to obtain pointers to the existing memory allocator. +The existing allocator is saved by the overlay and is used as +a fallback to do real memory allocation. Then the overlay is +inserted in place of the existing memory allocator using +the sqlite3_config(SQLITE_CONFIG_MALLOC,...) as described +above. + + +

    3.1.7 No-op memory allocator stub

    + +

    If SQLite is compiled with the SQLITE_ZERO_MALLOC option, then +the default memory allocator is omitted and replaced by a stub +memory allocator that never allocates any memory. Any calls to the +stub memory allocator will report back that no memory is available.

    + +

    The no-op memory allocator is not useful by itself. It exists only +as a placeholder so that SQLite has a memory allocator to link against +on systems that may not have malloc(), free(), or realloc() in their +standard library. +An application that is compiled with SQLITE_ZERO_MALLOC will need to +use sqlite3_config() together with SQLITE_CONFIG_MALLOC or +SQLITE_CONFIG_HEAP to specify a new alternative memory allocator +before beginning to use SQLite.

    + + + +

    3.2 Scratch memory

    + +

    SQLite occasionally needs a large chunk of "scratch" memory to +perform some transient calculation. Scratch memory is used, for example, +as temporary storage when rebalancing a B-Tree. These scratch memory +allocations are typically about 10 kilobytes in size and are +transient - lasting +only for the duration of a single, short-lived function call.

    + +

    In older versions of SQLite, the scratch memory was obtained from +the processor stack. That works great on workstations with a large stack. +But pulling large buffers from the stack +caused problems on embedded systems with a +small processor stack (typically 4K or 8K). And so SQLite was modified +to allocate scratch memory from the heap.

    + +

    However, doing occasional large transient allocations from the heap can +lead to memory fragmentation in embedded systems. To work around this +problem, a separate memory allocation system for scratch memory has been +created.

    + +

    The scratch memory allocator is set up as follows:

    + +
    +sqlite3_config(SQLITE_CONFIG_SCRATCH, pBuf, sz, N);
    +
    + +

    The pBuf parameter is a pointer to a contiguous range of bytes that +SQLite will use for all scratch memory allocations. The buffer must be +at least sz*N bytes in size. The "sz" parameter +is the maximum size of each scratch allocation. N is the maximum +number of simultaneous scratch allocations. The "sz" parameter should +be approximately 6 times the maximum database page size. N should +be the number of threads running in the system. No single thread will +ever request more than one scratch allocation at a time so if there +are never more than N threads, then there will always be enough scratch +memory available.

    + +

    If the scratch memory setup does not define enough memory, then +SQLite falls back to using the regular memory allocator for its scratch +memory allocations. The default setup is sz=0 and N=0 so the use +of the regular memory allocator is the default behavior.

    + + + +

    3.3 Page cache memory

    + +

    In most applications, the database page cache subsystem within +SQLite uses more dynamically allocated memory than all other parts +of SQLite combined. It is not unusual to see the database page cache +consumes over 10 times more memory than the rest of SQLite combined.

    + +

    SQLite can be configured to make page cache memory allocations from +a separate and distinct memory pool of fixed-size +slots. This can have two advantages:

    + +
      +
    • +Because allocations are all the same size, the memory allocator can +operate much faster. The allocator need not bother with coalescing +adjacent free slots or searching for a slot +of an appropriate size. All unallocated memory slots can be stored on +a linked list. Allocating consists of removing the first entry from the +list. Deallocating is simply adding an entry to the beginning of the list. +

    • + +
    • +With a single allocation size, the n parameter in the +Robson proof is 1, and the total memory space required by the allocator +(N) is exactly equal to maximum memory used (M). +No additional memory is required to cover fragmentation overhead, thus +reducing memory requirements. This is particularly important for the +page cache memory since the page cache constitutes the largest component +of the memory needs of SQLite. +

    • +
    + +

    The page-cache memory allocator is disabled by default. +An application can enabled it at start-time as follows:

    + +
    +sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N);
    +
    + +

    The pBuf parameter is a pointer to a contiguous range of bytes that +SQLite will use for page-cache memory allocations. The buffer must be +at least sz*N bytes in size. The "sz" parameter +is the size of each page-cache allocation. N is the maximum +number of available allocations.

    + +

    If SQLite needs a page-cache entry that is larger than "sz" bytes or +if it needs more than N entries, it falls back to using the +general-purpose memory allocator.

    + + + +

    3.4 Lookaside memory allocator

    + +

    SQLite database connections make many +small and short-lived memory allocations. +This occurs most commonly when compiling SQL statements using +sqlite3_prepare_v2() but also to a lesser extent when running +prepared statements using sqlite3_step(). These small memory +allocations are used to hold things such as the names of tables +and columns, parse tree nodes, individual query results values, +and B-Tree cursor objects. There are consequently +many calls to malloc() and free() - so many calls that malloc() and +free() end up using a significant fraction of the CPU time assigned +to SQLite.

    + +

    SQLite version 3.6.1 introduced the lookaside memory allocator to +help reduce the memory allocation load. In the lookaside allocator, +each database connection preallocates a single large chunk of memory +(typically in the range of 50 to 100 kilobytes) and divides that chunk +up into small fixed-size "slots" of around 50 to 200 byte each. This +becomes the lookaside memory pool. Thereafter, memory allocations +associated with the database connection and that are not too larger +are satisfied using one of the lookaside pool slots rather than by calling +the general-purpose memory allocator. Larger allocations continue to +use the general-purpose memory allocator, as do allocations that occur +when the lookaside pool slots are all checked out. +But in many cases, the memory +allocations are small enough and there are few enough outstanding that +the new memory requests can be satisfied from the lookaside +pool.

    + +

    Because lookaside allocations are always the same size, the allocation +and deallocation algorithms are very quick. There is no +need to coalesce adjacent free slots or search for a slot +of a particular size. Each database connection maintains a singly-linked +list of unused slots. Allocation requests simply pull the first +element of this list. Deallocations simply push the element back onto +the front of the list. +Furthermore, each database connection is assumed to already be +running in a single thread (there are mutexes already in +place to enforce this) so no additional mutexing is required to +serialize access to the lookaside slot freelist. +Consequently, lookaside memory +allocations and deallocations are very fast. In speed tests on +Linux and Mac OS X workstations, SQLite has shown overall performance +improvements as high as 10% and 15%, depending on the workload how +lookaside is configured.

    + +

    The size of the lookaside memory pool has a global default value +but can also be configured on a connection-by-connection basis. +To change the default size of the lookaside memory pool use the +following interface at start-time:

    + +
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, sz, cnt);
    +
    + +

    The "sz" parameter is the size in bytes of each lookaside slot. +The default is 100 bytes. The "cnt" parameter is +the total number of lookaside memory slots per database connection. +The default value is 500 slots. The total amount +of lookaside memory allocated to each database connection is +sz*cnt bytes. Hence the lookaside memory pool allocated per database +connection is 50 kilobytes by default. +(Note: these default values are for SQLite version 3.6.1 and are subject +to changes in future releases.) +

    + +

    The lookaside pool can be changed for an individual +database connection "db" using this call:

    + +
    +sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE, pBuf, sz, cnt);
    +
    + +

    The "pBuf" parameter is a pointer to memory space that will be +used for the lookaside memory pool. If pBuf is NULL, then SQLite +will obtain its own space for the memory pool using sqlite3_malloc(). +The "sz" and "cnt" parameters are the size of each lookaside slot +and the number of slots, respectively. If pBuf is not NULL, then it +must point to at least sz*cnt bytes of memory.

    + +

    The lookaside configuration can only be changed while there are +no outstanding lookaside allocations for the database connection. +Hence, the configuration should be set immediately after creating the +database connection using sqlite3_open() (or equivalent) and before +evaluating any SQL statements on the connection.

    + + + +

    3.5 Memory status

    + +

    By default, SQLite keeps statistics on its memory usage. These +statistics are useful in helping to determine how much memory an +application really needs. The statistics can also be used in +high-reliability system to determine +if the memory usage is coming close to or exceeding the limits +of the Robson proof and hence that the memory allocation subsystem is +liable to breakdown.

    + +

    Most memory statistics are global, and therefore the tracking of +statistics must be serialized with a mutex. Statistics are turned +on by default, but an option exists to disable them. By disabling +memory statistics, +SQLite avoids entering and leaving a mutex on each memory allocation +and deallocation. That savings can be noticeable on systems where +mutex operations are expensive. To disable memory statistics, the +following interface is used at start-time:

    + +
    +sqlite3_config(SQLITE_CONFIG_MEMSTATUS, onoff);
    +
    + +

    The "onoff" parameter is true to enable the tracking of memory +statistics and false to disable statistics tracking.

    + +

    Assuming statistics are enabled, the following routine can be used +to access them:

    + +
    +sqlite3_status(verb, ¤t, &highwater, resetflag);
    +
    + +

    The "verb" argument determines what statistic is accessed. +There are various verbs defined. The +list is expected to grow as the sqlite3_status() interface matures. +The current value the selected parameter is written into integer +"current" and the highest historical value +is written into integer "highwater". If resetflag is true, then +the high-water mark is reset down to the current value after the call +returns.

    + +

    A different interface is used to find statistics associated with a +single database connection:

    + +
    +sqlite3_db_status(db, verb, ¤t, &highwater, resetflag);
    +
    + +

    This interface is similar except that it takes a pointer to +a database connection as its first argument and returns statistics about +that one object rather than about the entire SQLite library. +The sqlite3_db_status() interface currently only recognizes a +single verb SQLITE_DBSTATUS_LOOKASIDE_USED, though additional verbs +may be added in the future.

    + +

    The per-connection statistics do not use global variables and hence +do not require mutexes to update or access. Consequently the +per-connection statistics continue to function even if +SQLITE_CONFIG_MEMSTATUS is turned off.

    + + +

    3.6 Setting memory usage limits

    + +

    The sqlite3_soft_heap_limit() interface can be used to set an +upper bound on the total amount of outstanding memory that the +general-purpose memory allocator for SQLite will allow to be outstanding +at one time. If attempts are made to allocate more memory that specified +by the soft heap limit, then SQLite will first attempt to free cache +memory before continuing with the allocation request. The soft heap +limit mechanism only works if memory statistics are enabled and +if the SQLite library is compiled with the SQLITE_ENABLE_MEMORY_MANAGEMENT +compile-time option.

    + +

    The soft heap limit is "soft" in this sense: If SQLite is not able +to free up enough auxiliary memory to stay below the limit, it goes +ahead and allocations the extra memory and exceeds its limit. This occurs +under the theory that it is better to use additional memory than to fail +outright.

    + +

    As of SQLite version 3.6.1, the soft heap limit only applies to the +general-purpose memory allocator. The soft heap limit does not know +about or interact with the scratch memory allocator, +the pagecache memory allocator, or the lookaside memory allocator. +This deficiency will likely be addressed in a future release.

    + + + +

    4.0 Mathematical Guarantees Against Memory Allocation Failures

    + +

    The problem of dynamic memory allocation, and specifically the +problem of a memory allocator breakdown, has been studied by +J. M. Robson and the results published as:

    + +
    +J. M. Robson. "Bounds for Some Functions Concerning Dynamic +Storage Allocation". Journal of the Association for +Computing Machinery, Volume 21, Number 8, July 1974, +pages 491-499. +
    + +

    Let us use the following notation (similar but not identical to +Robson's notation):

    + +
    + + + + + + + +
    N +The amount of raw memory needed by the memory allocation system +in order to guarantee that no memory allocation will ever fail. +
    M +The maximum amount of memory that the application ever has checked out +at any point in time. +
    n +The ratio of the largest memory allocation to the smallest. We assume +that every memory allocation size is an integer multiple of the smallest memory +allocation size. +
    +
    + +

    Robson proves the following result:

    + +
    +N = M*(1 + (log2 n)/2) - n + 1 +
    + +

    Colloquially, the Robson proof shows that in order to guarantee +breakdown-free operation, any memory allocator must use a memory pool +of size N which exceeds the maximum amount of memory every +used M by a multiplier that depends on n, +the ratio of the largest to the smallest allocation size. In other +words, unless all memory allocations are of exactly the same size +(n=1) then the system needs access to more memory than it will +ever use at one time. Furthermore, we see that the amount of surplus +memory required grows rapidly as the ratio of largest to smallest +allocations increases, and so there is strong incentive to keep all +allocations as near to the same size as possible.

    + +

    Robson's proof is constructive. +He provides an algorithm for computing a sequence of allocation +and deallocation operations that will lead to an allocation failure due to +memory fragmentation if available memory is as much as one byte +less than N. +And, Robson shows that a power-of-two first-fit memory allocator +(such as implemented by memsys5) will never fail a memory allocation +provided that available memory is N or more bytes.

    + +

    The values M and n are properties of the application. +If an application is constructed in such a way that both M and +n are known, or at least have known upper bounds, and if the +application uses +the memsys5 memory allocator and is provided with N bytes of +available memory space using SQLITE_CONFIG_HEAP +then Robson proves that no memory allocation request will ever fail +within the application. +To put this another way, the application developer can select a value +for N that will guarantee that no call to any SQLite interface +will ever return SQLITE_NOMEM. The memory pool will never become +so fragmented that a new memory allocation request cannot be satisfied. +This is an important property for +applications where a software fault could cause injury, physical harm, or +loss of irreplaceable data.

    + +

    4.1 Computing and controlling parameters M and n

    + +

    The Robson proof applies separately to each of the memory allocators +used by SQLite:

    + + + +

    For allocators other than memsys5, +all memory allocations are of the same size. Hence, n=1 +and therefore N=M. In other words, the memory pool need +be no larger than the largest amount of memory in use at any given moment.

    + +

    SQLite guarantees that no thread will ever use more than a single +scratch memory slot at one time. So if an application allocates as many +scratch memory slots as there are threads, and assuming the size of +each slot is large enough, there is never a chance of overflowing the +scratch memory allocator. An upper bound on the size of scratch memory +allocations is six times the largest page size. It is easy, therefore, +to guarantee breakdown-free operation of the scratch memory allocator.

    + +

    The usage of pagecache memory is somewhat harder to control in +SQLite version 3.6.1, though mechanisms are planned for subsequent +releases that will make controlling pagecache memory much easier. +Prior to the introduction of these new mechanisms, the only way +to control pagecache memory is using the cache_size pragma.

    + +

    Safety-critical applications will usually want to modify the +default lookaside memory configuration so that when the initial +lookaside memory buffer is allocated during sqlite3_open() the +resulting memory allocation is not so large as to force the n +parameter to be too large. In order to keep n under control, +it is best to try to keep the largest memory allocation below 2 or 4 +kilobytes. Hence, a reasonable default setup for the lookaside +memory allocator might any one of the following:

    + +
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 32, 32);  /* 1K */
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 64, 32);  /* 2K */
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 32, 64);  /* 2K */
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 64, 64);  /* 4K */
    +
    + +

    Another approach is to initially disable the lookaside memory +allocator:

    + +
    +sqlite3_config(SQLITE_CONFIG_LOOKASIDE, 0, 0);
    +
    + +

    Then let the application maintain a separate pool of larger +lookaside memory buffers that it can distribute to database connections +as they are created. In the common case, the application will only +have a single database connection and so the lookaside memory pool +can consist of a single large buffer.

    + +
    +sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE, aStatic, 256, 500);
    +
    + +

    The lookaside memory allocator is really intended as performance +optimization, not as a method for assuring breakdown-free memory allocation, +so it is not unreasonable to completely disable the lookaside memory +allocator for safety-critical operations.

    + +

    The general purpose memory allocator is the most difficult memory pool +to manage because it supports allocations of varying sizes. Since +n is a multiplier on M we want to keep n as small +as possible. This argues for keeping the minimum allocation size for +memsys5 as large as possible. In most applications, the +lookaside memory allocator is able to handle small allocations. So +it is reasonable to set the minimum allocation size for memsys5 to +2, 4 or even 8 times the maximum size of a lookaside allocation. +A minimum allocation size of 512 is a reasonable setting.

    + +

    Further to keeping n small, one desires to keep the size of +the largest memory allocations under control. +Large requests to the general-purpose memory allocator +might come from several sources:

    + +
      +
    1. SQL table rows that contain large strings or BLOBs.
    2. +
    3. Complex SQL queries that compile down to large prepared statements.
    4. +
    5. SQL parser objects used internally by sqlite3_prepare_v2().
    6. +
    7. Storage space for database connection objects.
    8. +
    9. Scratch memory allocations that overflow into the general-purpose + memory allocator.
    10. +
    11. Page cache memory allocations that overflow into the general-purpose + memory allocator.
    12. +
    13. Lookaside buffer allocations for new database connections.
    14. +
    + +

    The last three allocations can be controlled and/or eliminated by +configuring the scratch memory allocator, pagecache memory allocator, +and lookaside memory allocator appropriately, as described above. +The storage space required for database connection objects depends +to some extent on the length of the filename of the database file, but +rarely exceeds 2KB on 32-bit systems. (More space is required on +64-bit systems due to the increased size of pointers.) +Each parser object uses about 1.6KB of memory. Thus, elements 3 through 7 +above can easily be controlled to keep the maximum memory allocation +size below 2KB.

    + +

    If the application is designed to manage data in small pieces, +then the database should never contain any large strings or BLOBs +and hence element 1 above should not be a factor. If the database +does contain large strings or BLOBs, they should be read using +incremental BLOB I/O and rows that contain the +large strings or BLOBs should never be update by any means other +than incremental BLOB I/O. Otherwise, the +sqlite3_step() routine will need to read the entire row into +contiguous memory at some point, and that will involve at least +one large memory allocation.

    + +

    The final source of large memory allocations is the space to hold +the prepared statements that result from compiling complex SQL +operations. Ongoing work by the SQLite developers is reducing the +amount of space required here. But large and complex queries might +still require prepared statements that are several kilobytes in +size. The only workaround at the moment is for the application to +break complex SQL operations up into two or more smaller and simpler +operations contained in separate prepared statements.

    + +

    All things considered, applications should normally be able to +hold their maximum memory allocation size below 2K or 4K. This +gives a value for log2(n) of 2 or 3. This will +limit N to between 2 and 2.5 times M.

    + +

    The maximum amount of general-purpose memory needed by the application +is determined by such factors as how many simultaneous open +database connection and prepared statement objects the application +uses, and on the complexity of the prepared statements. For any +given application, these factors are normally fixed and can be +determined experimentally using SQLITE_STATUS_MEMORY_USED. +A typical application might only use about 40KB of general-purpose +memory. This gives a value of N of around 100KB.

    + +

    4.2 Ductile failure

    + +

    If the memory allocation subsystems within SQLite are configured +for breakdown-free operation but the actual memory usage exceeds +design limits set by the Robson proof, SQLite will usually continue +to operate normally. +The scratch memory allocator, the pagecache memory allocator, +and the lookaside memory allocator all automatically failover +to the memsys5 general-purpose memory allocator. And it is usually the +case that the memsys5 memory allocator will continue to function +without fragmentation even if M and/or n exceeds the limits +imposed by the Robson proof. The Robson proof shows that it is +possible for a memory allocation to break down and fail in this +circumstance, but such a failure requires an especially +despicable sequence of allocations and deallocations - a sequence that +SQLite has never been observed to follow. So in practice it is usually +the case that the limits imposed by Robson can be exceeded by a +considerable margin with no ill effect.

    + +

    Nevertheless, application developers are admonished to monitor +the state of the memory allocation subsystems and raise alarms when +memory usage approaches or exceeds Robson limits. In this way, +the application will provide operators with abundant warning well +in advance of failure. +The memory statistics interfaces of SQLite provide the application with +all the mechanism necessary to complete the monitoring portion of +this task.

    + + +

    5.0 Stability Of Memory Interfaces

    + +

    As of this writing (circa SQLite version 3.6.1) all of the alternative +memory allocators and mechanisms for manipulating, controlling, and +measuring memory allocation in SQLite are considered experimental and +subject to change from one release to the next. These interfaces are +in the process of being refined to work on a wide variety of systems +under a range of constraints. The SQLite developers need the flexibility +to change the memory allocator interfaces in order to best meet the +needs of a wide variety of systems.

    + +

    One may anticipate that the memory allocator interfaces will +eventually stabilize. Appropriate notice will be given when that +occurs. In the meantime, applications developers who make use of +these interfaces need to be prepared to modify their applications +to accommodate changes in the SQLite interface.

    + + +

    6.0 Summary Of Memory Allocator Interfaces

    + +

    To be completed...

    +
    +This page last modified 2009/01/12 14:56:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/mingw.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/mingw.html --- sqlite3-3.4.2/www/mingw.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/mingw.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,200 @@ + + +Notes On How To Build MinGW As A Cross-Compiler + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    +Notes On How To Build MinGW As A Cross-Compiler +

    + +

    MinGW or +Minimalist GNU For Windows +is a version of the popular GCC compiler that builds Win95/Win98/WinNT +binaries. See the website for details.

    + +

    This page describes how you can build MinGW +from sources as a cross-compiler +running under Linux. Doing so will allow you to construct +WinNT binaries from the comfort and convenience of your +Unix desktop.

    + + + +

    Here are the steps:

    + +
      +
    1. +

      Get a copy of source code. You will need the binutils, the +compiler, and the MinGW runtime. Each are available separately. +As of this writing, Mumit Khan has collected everything you need +together in one FTP site: + +ftp://ftp.nanotech.wisc.edu/pub/khan/gnu-win32/mingw32/snapshots/gcc-2.95.2-1 + +The three files you will need are:

      + + +

      Put all the downloads in a directory out of the way. The sequel +will assume all downloads are in a directory named +~/mingw/download.

      +
    2. + +
    3. +

      +Create a directory in which to install the new compiler suite and make +the new directory writable. +Depending on what directory you choose, you might need to become +root. The example shell commands that follow +will assume the installation directory is +/opt/mingw and that your user ID is drh.

      +
      +su
      +mkdir /opt/mingw
      +chown drh /opt/mingw
      +exit
      +
      +
    4. + +
    5. +

      Unpack the source tarballs into a separate directory.

      +
      +mkdir ~/mingw/src
      +cd ~/mingw/src
      +tar xzf ../download/binutils-*.tar.gz
      +tar xzf ../download/gcc-*.tar.gz
      +unzip ../download/mingw-*.zip
      +
      +
    6. + +
    7. +

      Create a directory in which to put all the build products.

      +
      +mkdir ~/mingw/bld
      +
      +
    8. + +
    9. +

      Configure and build binutils and add the results to your PATH.

      +
      +mkdir ~/mingw/bld/binutils
      +cd ~/mingw/bld/binutils
      +../../src/binutils/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      +make 2>&1 | tee make.out
      +make install 2>&1 | tee make-install.out
      +export PATH=$PATH:/opt/mingw/bin
      +
      +
    10. + +
    11. +

      Manually copy the runtime include files into the installation directory +before trying to build the compiler.

      +
      +mkdir /opt/mingw/i386-mingw32/include
      +cd ~/mingw/src/mingw-runtime*/mingw/include
      +cp -r * /opt/mingw/i386-mingw32/include
      +
      +
    12. + +
    13. +

      Configure and build the compiler

      +
      +mkdir ~/mingw/bld/gcc
      +cd ~/mingw/bld/gcc
      +../../src/gcc-*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      +cd gcc
      +make installdirs
      +cd ..
      +make 2>&1 | tee make.out
      +make install
      +
      +
    14. + +
    15. +

      Configure and build the MinGW runtime

      +
      +mkdir ~/mingw/bld/runtime
      +cd ~/mingw/bld/runtime
      +../../src/mingw-runtime*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      +make install-target-w32api
      +make install
      +
      +
    16. +
    + +

    And you are done...

    +
    +This page last modified 2007/12/20 02:07:07 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/mingw.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/mingw.tcl --- sqlite3-3.4.2/www/mingw.tcl 2005-04-23 23:45:22.000000000 +0100 +++ sqlite3-3.6.16/www/mingw.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,160 +0,0 @@ -# -# Run this Tcl script to generate the mingw.html file. -# -set rcsid {$Id: mingw.tcl,v 1.4 2003/03/30 18:58:58 drh Exp $} - -puts { - - Notes On How To Build MinGW As A Cross-Compiler - - -

    -Notes On How To Build MinGW As A Cross-Compiler -

    } -puts "

    -(This page was last modified on [lrange $rcsid 3 4] UTC) -

    " - -puts { -

    MinGW or -Minimalist GNU For Windows -is a version of the popular GCC compiler that builds Win95/Win98/WinNT -binaries. See the website for details.

    - -

    This page describes how you can build MinGW -from sources as a cross-compiler -running under Linux. Doing so will allow you to construct -WinNT binaries from the comfort and convenience of your -Unix desktop.

    -} - -proc Link {path {file {}}} { - if {$file!=""} { - set path $path/$file - } else { - set file $path - } - puts "$file" -} - -puts { -

    Here are the steps:

    - -
      -
    1. -

      Get a copy of source code. You will need the binutils, the -compiler, and the MinGW runtime. Each are available separately. -As of this writing, Mumit Khan has collected everything you need -together in one FTP site: -} -set ftpsite \ - ftp://ftp.nanotech.wisc.edu/pub/khan/gnu-win32/mingw32/snapshots/gcc-2.95.2-1 -Link $ftpsite -puts { -The three files you will need are:

      -
        -
      • } -Link $ftpsite binutils-19990818-1-src.tar.gz -puts
      • -Link $ftpsite gcc-2.95.2-1-src.tar.gz -puts
      • -Link $ftpsite mingw-20000203.zip -puts {
      • -
      - -

      Put all the downloads in a directory out of the way. The sequel -will assume all downloads are in a directory named -~/mingw/download.

      -
    2. - -
    3. -

      -Create a directory in which to install the new compiler suite and make -the new directory writable. -Depending on what directory you choose, you might need to become -root. The example shell commands that follow -will assume the installation directory is -/opt/mingw and that your user ID is drh.

      -
      -su
      -mkdir /opt/mingw
      -chown drh /opt/mingw
      -exit
      -
      -
    4. - -
    5. -

      Unpack the source tarballs into a separate directory.

      -
      -mkdir ~/mingw/src
      -cd ~/mingw/src
      -tar xzf ../download/binutils-*.tar.gz
      -tar xzf ../download/gcc-*.tar.gz
      -unzip ../download/mingw-*.zip
      -
      -
    6. - -
    7. -

      Create a directory in which to put all the build products.

      -
      -mkdir ~/mingw/bld
      -
      -
    8. - -
    9. -

      Configure and build binutils and add the results to your PATH.

      -
      -mkdir ~/mingw/bld/binutils
      -cd ~/mingw/bld/binutils
      -../../src/binutils/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -make 2>&1 | tee make.out
      -make install 2>&1 | tee make-install.out
      -export PATH=$PATH:/opt/mingw/bin
      -
      -
    10. - -
    11. -

      Manually copy the runtime include files into the installation directory -before trying to build the compiler.

      -
      -mkdir /opt/mingw/i386-mingw32/include
      -cd ~/mingw/src/mingw-runtime*/mingw/include
      -cp -r * /opt/mingw/i386-mingw32/include
      -
      -
    12. - -
    13. -

      Configure and build the compiler

      -
      -mkdir ~/mingw/bld/gcc
      -cd ~/mingw/bld/gcc
      -../../src/gcc-*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -cd gcc
      -make installdirs
      -cd ..
      -make 2>&1 | tee make.out
      -make install
      -
      -
    14. - -
    15. -

      Configure and build the MinGW runtime

      -
      -mkdir ~/mingw/bld/runtime
      -cd ~/mingw/bld/runtime
      -../../src/mingw-runtime*/configure --prefix=/opt/mingw --target=i386-mingw32 -v
      -make install-target-w32api
      -make install
      -
      -
    16. -
    - -

    And you are done...

    -} -puts { -


    -

    -Back to the SQLite Home Page -

    - -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/mkapidoc.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/mkapidoc.tcl --- sqlite3-3.4.2/www/mkapidoc.tcl 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/www/mkapidoc.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,176 +0,0 @@ -#!/usr/bin/tclsh -# -# Run this script redirecting the sqlite3.h file as standard -# inputs and this script will generate API documentation. -# -set rcsid {$Id: mkapidoc.tcl,v 1.2 2007/06/20 09:09:48 danielk1977 Exp $} -source common.tcl -header {C/C++ Interface For SQLite Version 3} -puts { -

    C/C++ Interface For SQLite Version 3

    -} - -# Scan standard input to extract the information we need -# to build the documentation. -# -set title {} -set type {} -set body {} -set code {} -set phase 0 -set content {} -while {![eof stdin]} { - set line [gets stdin] - if {$phase==0} { - # Looking for the CAPI3REF: keyword - if {[regexp {^\*\* CAPI3REF: +(.*)} $line all tx]} { - set title $tx - set phase 1 - } - } elseif {$phase==1} { - if {[string range $line 0 1]=="**"} { - set lx [string trim [string range $line 3 end]] - if {[regexp {^CATEGORY: +([a-z]*)} $lx all cx]} { - set type $cx - } elseif {[regexp {^KEYWORDS: +(.*)} $lx all kx]} { - foreach k $kx { - set keyword($k) 1 - } - } else { - append body $lx\n - } - } elseif {[string range $line 0 1]=="*/"} { - set phase 2 - } - } elseif {$phase==2} { - if {$line==""} { - set kwlist [lsort [array names keyword]] - unset -nocomplain keyword - set key $type:$kwlist - lappend content [list $key $title $type $kwlist $body $code] - set title {} - set keywords {} - set type {} - set body {} - set code {} - set phase 0 - } else { - if {[regexp {^#define (SQLITE_[A-Z0-9_]+)} $line all kx]} { - set type constant - set keyword($kx) 1 - } elseif {[regexp {^typedef .* (sqlite[0-9a-z_]+);} $line all kx]} { - set type datatype - set keyword($kx) 1 - } elseif {[regexp {^[a-z].*[ *](sqlite3_[a-z0-9_]+)\(} $line all kx]} { - set type function - set keyword($kx) 1 - } - append code $line\n - } - } -} - -# Output HTML that displays the given list in N columns -# -proc output_list {N lx} { - puts {} - set len [llength $lx] - set n [expr {($len + $N - 1)/$N}] - for {set i 0} {$i<$N} {incr i} { - set start [expr {$i*$n}] - set end [expr {($i+1)*$n}] - puts {} - } - puts {
      } - for {set j $start} {$j<$end} {incr j} { - set entry [lindex $lx $j] - if {$entry!=""} { - foreach {link label} $entry break - puts "
    • $label
    • " - } - } - puts {
    } -} - -# Do a table of contents for objects -# -set objlist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="datatype"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend objlist [list $k $kw] - } -} -puts {

    Datatypes:

    } -output_list 3 $objlist -puts {
    } - -# Do a table of contents for constants -# -set clist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="constant"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend clist [list $k $kw] - } -} -puts {

    Constants:

    } -set clist [lsort -index 1 $clist] -output_list 3 $clist -puts {
    } - - -# Do a table of contents for functions -# -set funclist {} -foreach c $content { - foreach {key title type keywords body code} $c break - if {$type!="function"} continue - set keywords [lsort $keywords] - set k [lindex $keywords 0] - foreach kw $keywords { - lappend funclist [list $k $kw] - } -} -puts {

    Functions:

    } -set funclist [lsort -index 1 $funclist] -output_list 3 $funclist -puts {
    } - -# Resolve links -# -proc resolve_links {args} { - set tag [lindex $args 0] - regsub -all {[^a-zA-Z0-9_]} $tag {} tag - set x "" - if {[llength $args]>2} { - append x [lrange $args 2 end] - } else { - append x [lindex $args 0] - } - return $x -} - -# Output all the records -# -foreach c [lsort $content] { - foreach {key title type keywords body code} $c break - foreach k $keywords { - puts "" - } - puts "

    $title

    " - puts "
    "
    -  puts "$code"
    -  puts "
    " - regsub -all "\n\n+" $body {

    \1

    } body - regsub -all {\[}

    $body

    {[resolve_links } body - set body [subst -novar -noback $body] - puts "$body" - puts "
    " -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/mostdeployed.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/mostdeployed.html --- sqlite3-3.4.2/www/mostdeployed.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/mostdeployed.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,159 @@ + + +SQLite: Most Widely Deployed SQL Database + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Most Widely Deployed SQL Database

    + +

    We believe that there are more copies of SQLite +in use around the world than any other SQL database engine, +and possibly all other SQL database engines combined. We cannot +be certain of this since we have no way of measuring either +the number of SQLite deployments nor the number of deployments +of other databases. But we believe the claim is defensible.

    + +

    The belief that SQLite is the most widely deployed SQL +database engine stems from its use as an embedded database. +Other database engines, such as MySQL, PostgreSQL, or Oracle, +are typically found one to a server. And usually a single +server can serve multiple users. With SQLite, on the other +hand, a single user will typically have exclusive use of +multiple copies of SQLite. SQLite is used on servers, but +it is also used on desktop PC, and in cellphones, and PDAs, +and MP3-players, and set-top boxes.

    + +

    Estimates

    + +

    At the end of 2006, there were 100 million websites on the internet. +[1] +Let us use that number as a proxy for the number of deployed +SQL database engines other than SQLite. Not every website +runs an SQL database engine, and not ever SQL database engine +runs a website. Larger websites run multiple +database engines. But the vast majority of smaller websites +(the long tail) share +a database engine with several other websites, +if they use a database engine at all. +And many large SQL database installations have nothing to do with +websites. +So using the number of websites as a surrogate for the number of operational +SQL database engines is a crude approximation, but it is the best +we have so we will go with it. (Readers are encouraged to submit +better estimates.)

    + +

    Now let's consider where SQLite is used:

    + +
      +
    • 125 million copies of Mozilla Firefox +[2]
    • +
    • 20 million Mac computers, each of which contains multiple +copies of SQLite
    • +
    • 20 million websites run PHP which has SQLite built in. +[3] We have no +way of estimating what fraction of those sites actively use +SQLite, but we think it is a significant fraction.
    • +
    • 300 million downloads of the Skype +client software and 100 million registered users +[4]. All recent versions of the Skype +client use SQLite internally.
    • +
    • 20 million Symbian smartphones shipped in Q3 2007 +[5] +Newer versions of the SymbianOS have SQLite built in. It is unclear +exactly how many Symbian phones actually contain SQLite, so we will +use a single quarter's sales as a lower bound.
    • +
    • 10 million AOL subscribers use SQLite in the AOL email client that +comes bundled with their subscription.
    • +
    • 10 million Solaris 10 installations, all of which require SQLite in +order to boot.
    • +
    • Millions and millions of copies of +McAfee anti-virus software all +use SQLite internally.
    • +
    • Millions of iPhones use SQLite
    • +
    • Millions and millions of other cellphones from +manufactures other than Symbian and Apple use SQLite. +This has not been publicly acknowledged by the manufactures +but it is known to the SQLite developers.
    • +
    • There are perhaps millions of additional deployments of +SQLite that the SQLite developers do not know about.
    • +
    + +

    +By these estimates, we see 200 or 300 million SQLite deployments +and about 100 million deployments of other SQL database engines. These +estimates are obviously very rough and may be off significantly. +But there is a wide margin. So the SQLite +developers think it is likely that SQLite is the most widely deployed +SQL database engine in the world. +

    +
    +This page last modified 2008/03/03 13:41:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/news.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/news.html --- sqlite3-3.4.2/www/news.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/news.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,229 @@ + + +Recent SQLite News + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Recent News

    + +

    2009-Jun-27 - Version 3.6.16

    + SQLite version 3.6.16 is another general maintenance relase containing + performance and robustness enhancements. A single notable bug was fixed + (ticket #3929). This bug cause cause INSERT or UPDATE statements to fail + on indexed tables that have AFTER triggers that modify the same table and + index. +


    2009-Jun-15 - Version 3.6.15

    + SQLite version 3.6.15 is a general maintenance release containing + performance and robustness enhancements and fixes for various obscure + bugs. +


    2009-May-25 - Version 3.6.14.2

    + SQLite version 3.6.14.2 fixes an obscure bug in the code generator + (ticket #3879) + section of SQLite which can potentially cause incorrect query results. + The changes from the prior release consist of only this one bug fix, + check-in [6676] + and a change to the version number text.

    + +

    The bug was introduced in version 3.6.14. It is recommended that + users of version 3.6.14 and 3.6.14.1 upgrade to this release. Applications + are unlikely to hit this bug, but since it is difficult to predict which + applications might hit it and which might not, we recommend that all + users of 3.6.14 and 3.5.14.1 upgrade to this release. +


    2009-May-19 - Version 3.6.14.1

    + SQLite version 3.6.14.1 is a patch release to version 3.6.14 with + minimal changes that fixes three bugs. Upgrading is only necessary + for users who are impacted by one or more of those bugs. +


    2009-May-07 - Version 3.6.14

    + SQLite version 3.6.14 provides new performance enhancements in + the btree and pager layers and in the query optimizer. Certain + workloads can be as much as twice as fast as the previous release, + though 10% faster is a more typical result.

    + +

    Queries against virtual tables that contain OR and IN operators + in the WHERE clause are now able to use indexing.

    + +

    A new optional asynchronous I/O backend is available for + unix and windows. The asynchronous backend gives the illusion of faster + response time by pushing slow write operations into a background thread. + The tradeoff for faster response time is that more memory is required + (to hold the content of the pending writes) and if a power failure or + program crash occurs, some transactions that appeared to have committed + might end up being rolled back upon restart.

    + +

    This release also contains many minor bug fixes, documentation enhancements, + new test cases, and cleanups and simplifications to the source code.

    + +

    There is no compelling reason to upgrade from versions 3.6.12 or + 3.6.13 if those prior versions are working. Though many users may + benefit from the improved performance. +


    2009-Apr-14 - Version 3.6.13

    + SQLite version 3.6.13 fixes several minor issues that appeared + in previous version, including Ticket #3774, ticket #3791, and ticket #3777. + This is a bug-fix release only. There are no new features or + enhancements. +


    2009-Mar-31 - Version 3.6.12

    + SQLite version 3.6.12 fixes a database corruption bug. If an + incremental_vacuum is rolled back in an in-memory database, the + database will often go corrupt. This only happens for in-memory + databases. On-disk databases are unaffected. And the corruption + only appears if an incremental vacuum is rolled back. Nevertheless, + upgrading is recommended for all applications, especially those that + make use of in-memory databases and/or incremental vacuum. See ticket #3761.

    + +

    SQLite version 3.6.12 also adds support for the sqlite3_unlock_notify() + interface and the reverse_unordered_selects pragma and the new + ".genfkey" command in the CLI. There are also performance improvements + in many count(*) SQL statements.

    + +

    During testing of version 3.6.12, a bug in the lookaside memory allocator + as it relates to shared cache mode + was found that effects all prior versions of SQLite back to version 3.6.1. + If you are using shared cache mode you should either disable lookaside + memory allocation or upgrade to version 3.6.12. See ticket #3743. +


    2009-Feb-18 - Version 3.6.11

    + SQLite version 3.6.11 adds support for the + hot-backup interface. This interface can be + used to create a backup copy of an SQLite database while it is in use. + The same interface can be used to initialize an in-memory database from + a persistent disk image or to save an in-memory database into a + persistent disk image. Usage examples can be found at + Using the SQLite Online Backup API. +


    2009-Jan-15 - Version 3.6.10

    + SQLite version 3.6.10 fixes a cache coherency bug (Ticket #3584) + introduced by check-in + [5864] + which was part of version 3.6.5. This bug might lead to database + corruption, hence we felt it was important to get it out as quickly + as possible, even though there had already been two prior releases + this week.

    + +

    Some concern has been expressed that we are releasing too frequently. + (Three releases in one week is a lot!) The concern is that this creates + the impression of volatility and unreliability. We have been told that + we should delay releases in order to create the impression of stability. + But the SQLite developers feel that truth is more important than + perception, not the other way around. We think it is important to make + the highest quality and most stable version of SQLite available to users + at all times. This week has seen two important bugs being discovered + shortly after a major release, and so we have issued two emergency + patch releases after the regularly scheduled major release. This makes + us look bad. This puts "egg on our face." We do not like that. But, + three releases also ensures that the best quality SQLite code base + is available available to you at all times.

    + +

    It has been suggested that "beta" releases might find these kinds of bugs + prior to a major release. But our experience indicates otherwise. + The two issues that prompted releases 3.6.9 and 3.6.10 were both + discovered by internal testing and review - not by external users. + And, indeed, most the problems found in SQLite these days are discovered + by our rigorous internal testing protocol, + not bug reports from the field.

    + +

    It has also been argued that we should withhold releases "until testing + is finished." The fallacy there is that we never finish testing. We + are constantly writing new test cases for SQLite and thinking of new + ways to stress and potentially break the code. This is a continuous, + never-ending, and on-going process. All existing tests pass before each + release. But we will always be writing new tests the day after a release, + regardless of how long we delay that release. And sometimes those new + tests will uncover new problems.

    + +

    All this is to say that we believe that SQLite version 3.6.10 is the + most stable, most thoroughly tested, and bug-free version of SQLite + that has ever existed. Please do not be freaked out by three releases + occurring in one week. +


    2009-Jan-14 - Version 3.6.9

    + Internal stress testing revealed a corner case where the cost function + on the query optimizer might mislead the query optimizer into making a + poor indexing choice. That choice could then tickle another bug in + the VDBE which might result in an incorrect query result. This + release fixes both problems. The chances of actually hitting this + combination of problems in a real application seems remote. + Nevertheless upgrading is recommended. +


    2009-Jan-12 - Version 3.6.8

    + SQLite version 3.6.8 adds support for + nested transactions and improved optimization of + WHERE clauses with + OR-connected terms. There is also + a new compile-time option that changes + the way full-text search patterns are parsed so that they can contain + nested parentheses.

    + +

    These are substantial changes. Even so, the + release testing for SQLite + has become so extensive that the developers have high confidence that + this release is stable and ready for production use. +


    + +Old news... +
    +This page last modified 2009/06/27 13:41:53 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/nulls.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/nulls.html --- sqlite3-3.4.2/www/nulls.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/nulls.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,396 @@ + + +NULL Handling in SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    NULL Handling in SQLite Versus Other Database Engines

    + +

    +The goal is +to make SQLite handle NULLs in a standards-compliant way. +But the descriptions in the SQL standards on how to handle +NULLs seem ambiguous. +It is not clear from the standards documents exactly how NULLs should +be handled in all circumstances. +

    + +

    +So instead of going by the standards documents, various popular +SQL engines were tested to see how they handle NULLs. The idea +was to make SQLite work like all the other engines. +A SQL test script was developed and run by volunteers on various +SQL RDBMSes and the results of those tests were used to deduce +how each engine processed NULL values. +The original tests were run in May of 2002. +A copy of the test script is found at the end of this document. +

    + +

    +SQLite was originally coded in such a way that the answer to +all questions in the chart below would be "Yes". But the +experiments run on other SQL engines showed that none of them +worked this way. So SQLite was modified to work the same as +Oracle, PostgreSQL, and DB2. This involved making NULLs +indistinct for the purposes of the SELECT DISTINCT statement and +for the UNION operator in a SELECT. NULLs are still distinct +in a UNIQUE column. This seems somewhat arbitrary, but the desire +to be compatible with other engines outweighted that objection. +

    + +

    +It is possible to make SQLite treat NULLs as distinct for the +purposes of the SELECT DISTINCT and UNION. To do so, one should +change the value of the NULL_ALWAYS_DISTINCT #define in the +sqliteInt.h source file and recompile. +

    + +
    +

    +Update 2003-07-13: +Since this document was originally written some of the database engines +tested have been updated and users have been kind enough to send in +corrections to the chart below. The original data showed a wide variety +of behaviors, but over time the range of behaviors has converged toward +the PostgreSQL/Oracle model. The only significant difference +is that Informix and MS-SQL both threat NULLs as +indistinct in a UNIQUE column. +

    + +

    +The fact that NULLs are distinct for UNIQUE columns but are indistinct for +SELECT DISTINCT and UNION continues to be puzzling. It seems that NULLs +should be either distinct everywhere or nowhere. And the SQL standards +documents suggest that NULLs should be distinct everywhere. Yet as of +this writing, no SQL engine tested treats NULLs as distinct in a SELECT +DISTINCT statement or in a UNION. +

    +
    + + +

    +The following table shows the results of the NULL handling experiments. +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      SQLitePostgreSQLOracleInformixDB2MS-SQLOCELOT
    Adding anything to null gives nullYesYesYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYesNo(Note 4)NoYes
    nulls are distinct in SELECT DISTINCTNoNoNoNoNoNoNo
    nulls are distinct in a UNIONNoNoNoNoNoNoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYesYesYesYes
    "null OR true" is trueYesYesYesYesYesYesYes
    "not (null AND false)" is trueYesYesYesYesYesYesYes
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      MySQL
    3.23.41
    MySQL
    4.0.16
    FirebirdSQL
    Anywhere
    Borland
    Interbase
    Adding anything to null gives nullYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYes(Note 4)(Note 4)
    nulls are distinct in SELECT DISTINCTNoNoNo (Note 1)NoNo
    nulls are distinct in a UNION(Note 3)NoNo (Note 1)NoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYes(Note 5)
    "null OR true" is trueYesYesYesYesYes
    "not (null AND false)" is trueNoYesYesYesYes
    + + + + + + + + + + + + + + + + + + + +
    Notes:  1. Older versions of firebird omits all NULLs from SELECT DISTINCT +and from UNION.
    2. Test data unavailable.
    3. MySQL version 3.23.41 does not support UNION.
    4. DB2, SQL Anywhere, and Borland Interbase +do not allow NULLs in a UNIQUE column.
    5. Borland Interbase does not support CASE expressions.
    +
    + +

     

    +

    +The following script was used to gather information for the table +above. +

    + +
    +-- I have about decided that SQL's treatment of NULLs is capricious and cannot be
    +-- deduced by logic.  It must be discovered by experiment.  To that end, I have 
    +-- prepared the following script to test how various SQL databases deal with NULL.
    +-- My aim is to use the information gather from this script to make SQLite as much
    +-- like other databases as possible.
    +--
    +-- If you could please run this script in your database engine and mail the results
    +-- to me at drh@hwaci.com, that will be a big help.  Please be sure to identify the
    +-- database engine you use for this test.  Thanks.
    +--
    +-- If you have to change anything to get this script to run with your database
    +-- engine, please send your revised script together with your results.
    +--
    +
    +-- Create a test table with data
    +create table t1(a int, b int, c int);
    +insert into t1 values(1,0,0);
    +insert into t1 values(2,0,1);
    +insert into t1 values(3,1,0);
    +insert into t1 values(4,1,1);
    +insert into t1 values(5,null,0);
    +insert into t1 values(6,null,1);
    +insert into t1 values(7,null,null);
    +
    +-- Check to see what CASE does with NULLs in its test expressions
    +select a, case when b<>0 then 1 else 0 end from t1;
    +select a+10, case when not b<>0 then 1 else 0 end from t1;
    +select a+20, case when b<>0 and c<>0 then 1 else 0 end from t1;
    +select a+30, case when not (b<>0 and c<>0) then 1 else 0 end from t1;
    +select a+40, case when b<>0 or c<>0 then 1 else 0 end from t1;
    +select a+50, case when not (b<>0 or c<>0) then 1 else 0 end from t1;
    +select a+60, case b when c then 1 else 0 end from t1;
    +select a+70, case c when b then 1 else 0 end from t1;
    +
    +-- What happens when you multiple a NULL by zero?
    +select a+80, b*0 from t1;
    +select a+90, b*c from t1;
    +
    +-- What happens to NULL for other operators?
    +select a+100, b+c from t1;
    +
    +-- Test the treatment of aggregate operators
    +select count(*), count(b), sum(b), avg(b), min(b), max(b) from t1;
    +
    +-- Check the behavior of NULLs in WHERE clauses
    +select a+110 from t1 where b<10;
    +select a+120 from t1 where not b>10;
    +select a+130 from t1 where b<10 OR c=1;
    +select a+140 from t1 where b<10 AND c=1;
    +select a+150 from t1 where not (b<10 AND c=1);
    +select a+160 from t1 where not (c=1 AND b<10);
    +
    +-- Check the behavior of NULLs in a DISTINCT query
    +select distinct b from t1;
    +
    +-- Check the behavior of NULLs in a UNION query
    +select b from t1 union select b from t1;
    +
    +-- Create a new table with a unique column.  Check to see if NULLs are considered
    +-- to be distinct.
    +create table t2(a int, b int unique);
    +insert into t2 values(1,1);
    +insert into t2 values(2,null);
    +insert into t2 values(3,null);
    +select * from t2;
    +
    +drop table t1;
    +drop table t2;
    +
    +
    +This page last modified 2007/11/12 14:46:23 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/nulls.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/nulls.tcl --- sqlite3-3.4.2/www/nulls.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/nulls.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,329 +0,0 @@ -# -# Run this script to generated a nulls.html output file -# -set rcsid {$Id: nulls.tcl,v 1.8 2004/10/10 17:24:55 drh Exp $} -source common.tcl -header {NULL Handling in SQLite} -puts { -

    NULL Handling in SQLite Versus Other Database Engines

    - -

    -The goal is -to make SQLite handle NULLs in a standards-compliant way. -But the descriptions in the SQL standards on how to handle -NULLs seem ambiguous. -It is not clear from the standards documents exactly how NULLs should -be handled in all circumstances. -

    - -

    -So instead of going by the standards documents, various popular -SQL engines were tested to see how they handle NULLs. The idea -was to make SQLite work like all the other engines. -A SQL test script was developed and run by volunteers on various -SQL RDBMSes and the results of those tests were used to deduce -how each engine processed NULL values. -The original tests were run in May of 2002. -A copy of the test script is found at the end of this document. -

    - -

    -SQLite was originally coded in such a way that the answer to -all questions in the chart below would be "Yes". But the -experiments run on other SQL engines showed that none of them -worked this way. So SQLite was modified to work the same as -Oracle, PostgreSQL, and DB2. This involved making NULLs -indistinct for the purposes of the SELECT DISTINCT statement and -for the UNION operator in a SELECT. NULLs are still distinct -in a UNIQUE column. This seems somewhat arbitrary, but the desire -to be compatible with other engines outweighted that objection. -

    - -

    -It is possible to make SQLite treat NULLs as distinct for the -purposes of the SELECT DISTINCT and UNION. To do so, one should -change the value of the NULL_ALWAYS_DISTINCT #define in the -sqliteInt.h source file and recompile. -

    - -
    -

    -Update 2003-07-13: -Since this document was originally written some of the database engines -tested have been updated and users have been kind enough to send in -corrections to the chart below. The original data showed a wide variety -of behaviors, but over time the range of behaviors has converged toward -the PostgreSQL/Oracle model. The only significant difference -is that Informix and MS-SQL both threat NULLs as -indistinct in a UNIQUE column. -

    - -

    -The fact that NULLs are distinct for UNIQUE columns but are indistinct for -SELECT DISTINCT and UNION continues to be puzzling. It seems that NULLs -should be either distinct everywhere or nowhere. And the SQL standards -documents suggest that NULLs should be distinct everywhere. Yet as of -this writing, no SQL engine tested treats NULLs as distinct in a SELECT -DISTINCT statement or in a UNION. -

    -
    - - -

    -The following table shows the results of the NULL handling experiments. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      SQLitePostgreSQLOracleInformixDB2MS-SQLOCELOT
    Adding anything to null gives nullYesYesYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYesNo(Note 4)NoYes
    nulls are distinct in SELECT DISTINCTNoNoNoNoNoNoNo
    nulls are distinct in a UNIONNoNoNoNoNoNoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYesYesYesYes
    "null OR true" is trueYesYesYesYesYesYesYes
    "not (null AND false)" is trueYesYesYesYesYesYesYes
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      MySQL
    3.23.41
    MySQL
    4.0.16
    FirebirdSQL
    Anywhere
    Borland
    Interbase
    Adding anything to null gives nullYesYesYesYesYes
    Multiplying null by zero gives nullYesYesYesYesYes
    nulls are distinct in a UNIQUE columnYesYesYes(Note 4)(Note 4)
    nulls are distinct in SELECT DISTINCTNoNoNo (Note 1)NoNo
    nulls are distinct in a UNION(Note 3)NoNo (Note 1)NoNo
    "CASE WHEN null THEN 1 ELSE 0 END" is 0?YesYesYesYes(Note 5)
    "null OR true" is trueYesYesYesYesYes
    "not (null AND false)" is trueNoYesYesYesYes
    - - - - - - - - - - - - - - - - - - - -
    Notes:  1. Older versions of firebird omits all NULLs from SELECT DISTINCT -and from UNION.
    2. Test data unavailable.
    3. MySQL version 3.23.41 does not support UNION.
    4. DB2, SQL Anywhere, and Borland Interbase -do not allow NULLs in a UNIQUE column.
    5. Borland Interbase does not support CASE expressions.
    -
    - -

     

    -

    -The following script was used to gather information for the table -above. -

    - -
    --- I have about decided that SQL's treatment of NULLs is capricious and cannot be
    --- deduced by logic.  It must be discovered by experiment.  To that end, I have 
    --- prepared the following script to test how various SQL databases deal with NULL.
    --- My aim is to use the information gather from this script to make SQLite as much
    --- like other databases as possible.
    ---
    --- If you could please run this script in your database engine and mail the results
    --- to me at drh@hwaci.com, that will be a big help.  Please be sure to identify the
    --- database engine you use for this test.  Thanks.
    ---
    --- If you have to change anything to get this script to run with your database
    --- engine, please send your revised script together with your results.
    ---
    -
    --- Create a test table with data
    -create table t1(a int, b int, c int);
    -insert into t1 values(1,0,0);
    -insert into t1 values(2,0,1);
    -insert into t1 values(3,1,0);
    -insert into t1 values(4,1,1);
    -insert into t1 values(5,null,0);
    -insert into t1 values(6,null,1);
    -insert into t1 values(7,null,null);
    -
    --- Check to see what CASE does with NULLs in its test expressions
    -select a, case when b<>0 then 1 else 0 end from t1;
    -select a+10, case when not b<>0 then 1 else 0 end from t1;
    -select a+20, case when b<>0 and c<>0 then 1 else 0 end from t1;
    -select a+30, case when not (b<>0 and c<>0) then 1 else 0 end from t1;
    -select a+40, case when b<>0 or c<>0 then 1 else 0 end from t1;
    -select a+50, case when not (b<>0 or c<>0) then 1 else 0 end from t1;
    -select a+60, case b when c then 1 else 0 end from t1;
    -select a+70, case c when b then 1 else 0 end from t1;
    -
    --- What happens when you multiple a NULL by zero?
    -select a+80, b*0 from t1;
    -select a+90, b*c from t1;
    -
    --- What happens to NULL for other operators?
    -select a+100, b+c from t1;
    -
    --- Test the treatment of aggregate operators
    -select count(*), count(b), sum(b), avg(b), min(b), max(b) from t1;
    -
    --- Check the behavior of NULLs in WHERE clauses
    -select a+110 from t1 where b<10;
    -select a+120 from t1 where not b>10;
    -select a+130 from t1 where b<10 OR c=1;
    -select a+140 from t1 where b<10 AND c=1;
    -select a+150 from t1 where not (b<10 AND c=1);
    -select a+160 from t1 where not (c=1 AND b<10);
    -
    --- Check the behavior of NULLs in a DISTINCT query
    -select distinct b from t1;
    -
    --- Check the behavior of NULLs in a UNION query
    -select b from t1 union select b from t1;
    -
    --- Create a new table with a unique column.  Check to see if NULLs are considered
    --- to be distinct.
    -create table t2(a int, b int unique);
    -insert into t2 values(1,1);
    -insert into t2 values(2,null);
    -insert into t2 values(3,null);
    -select * from t2;
    -
    -drop table t1;
    -drop table t2;
    -
    -} - -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/oldnews.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/oldnews.html --- sqlite3-3.4.2/www/oldnews.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/oldnews.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,645 @@ + + +SQLite Older News + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    2008-Dec-16 - Version 3.6.7

    + SQLite version 3.6.7 contains a major cleanup of the Unix driver, + and support for the new Proxy Locking mechanism on MacOSX. Though + the Unix driver is reorganized, its functionality is the same and so + applications should not notice a difference. +


    2008-Nov-26 - Version 3.6.6.2

    + This release fixes a bug that was introduced into SQLite version 3.6.6 + and which seems like it might be able to cause database corruption. This + bug was detected during stress testing. It has not been seen in the wild. + An analysis of the problem suggests that the bug might be able to cause + database corruption, however focused efforts to find a real-world test + cases that actually causes database corruption have so far been unsuccessful. + Hence, the likelihood of this bug causing problems is low. Nevertheless, + we have decided to do an emergency branch release out of an abundance of + caution.

    + +

    The version 3.6.6.2 release also fixes a obscure memory leak that + can occur following a disk I/O error. +


    2008-Nov-22 - Version 3.6.6.1

    + This release fixes a bug that was introduced into SQLite version 3.6.4 + and that can cause database corruption in obscure cases. This bug has + never been seen in the wild; it was first detected by internal stress + tests and required substantial analysis before it could be shown to + potentially lead to corruption. So we feel that SQLite versions 3.6.4, + 3.6.5, and 3.6.6 are safe to use for development work. But upgrading + to this patch release or later is recommended prior to deploying + products that incorporate SQLite.

    + +

    We have taken the unusual step of issuing a patch release in order to + get the fix for this bug into circulation quickly. SQLite version 3.6.7 + will continue on its normal path of development with an anticipated + release in mid December. +


    2008-Nov-19 - Version 3.6.6

    + SQLite version 3.6.5 is released. This is a quick turn-around release + that fixes a bug in virtual tables and FTS3 that snuck into + version 3.6.5. This release also adds the new + application-defined page cache mechanism. +


    2008-Nov-12 - Version 3.6.5

    + SQLite version 3.6.5 is released. There are various minor feature + enhancements and numerous obscure bug fixes. + The change log contains the details. Upgrading is + optional. +


    2008-Nov-01 - Bloomberg Joins SQLite Consortium

    + The SQLite developers are honored to announce that + Bloomberg has joined the + SQLite Consortium. +


    2008-Oct-15 - Version 3.6.4

    + SQLite version 3.6.4 adds new features designed to help applications + detect when indices are not being used on query. There are also some + important performance improvements. Upgrading is optional. +


    2008-Sep-22 - Version 3.6.3

    + SQLite version 3.6.3 fixes a bug in SELECT DISTINCT that was introduced + by the previous version. No new features are added. Upgrading is + recommended for all applications that make use of DISTINCT. +


    2008-Aug-30 - Version 3.6.2

    + SQLite version 3.6.2 contains rewrites of the page-cache subsystem and + the procedures for matching identifiers to table columns in SQL statements. + These changes are designed to better modularize the code and make it more + maintainable and reliable moving forward. Nearly 5000 non-comment lines + of core code (about 11.3%) have changed + from the previous release. Nevertheless, there should be no + application-visible changes, other than bug fixes. +


    2008-Aug-06 - Version 3.6.1

    + SQLite version 3.6.1 is a stabilization and performance enhancement + release. +


    2008-July-16 - Version 3.6.0 beta

    + Version 3.6.0 makes changes to the VFS object in order + to make SQLite more easily portable to a wider variety of platforms. + There are potential incompatibilities with some legacy applications. + See the 35to36.html document for details.

    + +

    Many new interfaces are introduced in version 3.6.0. The code is + very well tested and is appropriate for use in stable systems. We + have attached the "beta" designation only so that we can make tweaks to + the new interfaces in the next release without having to declare an + incompatibility. +


    2008-May-12 - Version 3.5.9

    + Version 3.5.9 adds a new experimental PRAGMA: journal_mode. + Setting the journal mode to PERSIST can provide performance improvement + on systems where deleting a file is expective. The PERSIST journal + mode is still considered experimental and should be used with caution + pending further testing.

    + +

    Version 3.5.9 is intended to be the last stable release prior to + version 3.6.0. Version 3.6.0 will make incompatible changes to the + sqlite3_vfs VFS layer in order to address deficiencies in the original + design. These incompatibilities will only effect programmers who + write their own custom VFS layers (typically embedded device builders). + The planned VFS changes will be much smaller + than the changes that occurred on the + 3.4.2 to 3.5.0 transaction that occurred last + September.

    + +

    This release of SQLite is considered stable and ready for production use. +


    2008-Apr-16 - Version 3.5.8

    + Version 3.5.8 includes some important new performance optimizations + in the virtual machine code generator, including constant subexpression + factoring and common subexpression elimination. This release also + creates new public interfaces: + sqlite3_randomness() provides access to SQLite's internal + pseudo-random number generator, sqlite3_limit() allows size + limits to be set at run-time on a per-connection basis, and + sqlite3_context_db_handle() is a convenience routine that allows + an application-defined SQL function implementation to retrieve + its database connection handle.

    + +

    This release of SQLite is considered stable and ready for production use. +


    2008-Mar-17 - Version 3.5.7

    + Version 3.5.7 fixes several minor and obscure bugs, especially + in the autoconf-generated makefile. Upgrading is optional. + This release of SQLite is considered stable and ready for production use. +


    2008-Feb-6 - Version 3.5.6

    + Version 3.5.6 fixes a minor regression in 3.5.5 - a regression that + had nothing to do with the massive change ove the virtual machine + to a register-based design. + No problems have been reported with the new virtual machine. This + release of SQLite is considered stable and ready for production use. +


    2008-Jan-31 - Version 3.5.5

    + Version 3.5.5 changes over 8% of the core source code of SQLite in order + to convert the internal virtual machine from a stack-based design into + a register-based design. This change will allow future optimizations + and will avoid an entire class of stack overflow bugs that have caused + problems in the past. Even though this change is large, extensive testing + has found zero errors in the new virtual machine and so we believe this + to be a very stable release. +


    2007-Dec-14 - Version 3.5.4

    + Version 3.5.4 fixes an long-standing but obscure bug in UPDATE and + DELETE which might cause database corruption. (See ticket #2832.) + Upgrading is recommended for all users.

    + +

    This release also brings the processing of ORDER BY statements into + compliance with standard SQL. This could, in theory, cause problems + for existing applications that depend on the older, buggy behavior. + See ticket #2822 for additional information. +


    2007-Dec-12 - SQLite Consortium Announced

    + The SQLite Consortium was launched + today with Mozilla and + Symbian as charter members. + As noted in the press release, + the Consortium's goal is to promote the continuing vitality and + independence of SQLite. +


    2007-Nov-27 - Version 3.5.3

    + This is an incremental release that fixes several minor problems. + Upgrading is optional. If Version 3.5.2 or 3.5.1 is working fine + for you, then there is no pressing need to change to 3.5.3.

    + +

    The prebuilt binaries and the amalgamation found on the + download page include the FTS3 fulltext + search extension module. We are doing this on an experimental + basis and are not promising to provide prebuilt binaries with + FTS3 in the future. +


    2007-Nov-05 - Version 3.5.2

    + This is an incremental release that fixes several minor problems, + adds some obscure features, and provides some performance tweaks. + Upgrading is optional.

    + +

    The experimental compile-time option + SQLITE_OMIT_MEMORY_ALLOCATION is no longer supported. On the other + hand, it is now possible to compile SQLite so that it uses a static + array for all its dynamic memory allocation needs and never calls + malloc. Expect to see additional radical changes to the memory + allocation subsystem in future releases. +


    2007-Oct-04 - Version 3.5.1

    + Fix a long-standing bug that might cause database corruption if a + disk-full error occurs in the middle of a transaction and that + transaction is not rolled back. + Ticket #2686.

    + +

    The new VFS layer is stable. However, we still reserve the right to + make tweaks to the interface definition of the VFS if necessary. +


    2007-Sep-04 - Version 3.5.0 alpha

    + The OS interface layer and the memory allocation subsystems in + SQLite have been reimplemented. The published API is largely unchanged + but the (unpublished) OS interface has been modified extensively. + Applications that implement their own OS interface will require + modification. See + 34to35.html for details.

    + +

    This is a large change. Approximately 10% of the source code was + modified. We are calling this first release "alpha" in order to give + the user community time to test and evaluate the changes before we + freeze the new design. +


    2007-Aug-13 - Version 3.4.2

    + While stress-testing the + soft_heap_limit + feature, a bug that could lead to + database + corruption was + discovered and fixed. + Though the consequences of this bug are severe, the chances of hitting + it in a typical application are remote. Upgrading is recommended + only if you use the + sqlite3_soft_heap_limit + interface. +


    2007-Jly-20 - Version 3.4.1

    + This release fixes a bug in VACUUM that + can lead to + database corruption. The bug was introduced in version + 3.3.14. + Upgrading is recommended for all users. Also included are a slew of + other more routine + enhancements and bug fixes. +


    2007-Jun-18 - Version 3.4.0

    + This release fixes two separate bugs either of which + can lead to database corruption. Upgrading + is strongly recommended. If you must continue using an older version + of SQLite, please at least read about how to avoid these bugs + at + + CorruptionFollowingBusyError and + ticket #2418 +

    + This release also adds explicit limits on the + sizes and quantities of things SQLite will handle. The new limits might + causes compatibility problems for existing applications that + use excessively large strings, BLOBs, tables, or SQL statements. + The new limits can be increased at compile-time to work around any problems + that arise. Nevertheless, the version number of this release is + 3.4.0 instead of 3.3.18 in order to call attention to the possible + incompatibility. +

    + There are also new features, including + incremental BLOB I/O and + incremental vacuum. + See the change log + for additional information. +


    2007-Apr-25 - Version 3.3.17

    + This version fixes a bug in the forwards-compatibility logic of SQLite + that was causing a database to become unreadable when it should have + been read-only. Upgrade from 3.3.16 only if you plan to deploy into + a product that might need to be upgraded in the future. For day to day + use, it probably does not matter. +


    2007-Apr-18 - Version 3.3.16

    + Performance improvements added in 3.3.14 but mistakenly turned off + in 3.3.15 have been reinstated. A bug has been fixed that prevented + VACUUM from running if a NULL value was in a UNIQUE column. +


    2007-Apr-09 - Version 3.3.15

    + An annoying bug introduced in 3.3.14 has been fixed. There are + also many enhancements to the test suite. +


    2007-Apr-02 - Version 3.3.14

    + This version focuses on performance improvements. If you recompile + + the amalgamation using GCC option -O3 (the precompiled binaries + use -O2) you may see performance + improvements of 35% or more over version 3.3.13 depending on your + workload. This version also + adds support for + exclusive access mode. +


    2007-Feb-13 - Version 3.3.13

    + This version fixes a subtle bug in the ORDER BY optimizer that can + occur when using joins. There are also a few minor enhancements. + Upgrading is recommended. +


    2007-Jan-27 - Version 3.3.12

    + The first published build of the previous version used the wrong + set of source files. Consequently, many people downloaded a build + that was labeled as "3.3.11" but was really 3.3.10. Version 3.3.12 + is released to clear up the ambiguity. A couple more bugs have + also been fixed and + PRAGMA integrity_check has been enhanced. +


    2007-Jan-22 - Version 3.3.11

    + Version 3.3.11 fixes for a few more problems in version 3.3.9 that + version 3.3.10 failed to catch. Upgrading is recommended. +


    2007-Jan-9 - Version 3.3.10

    + Version 3.3.10 fixes several bugs that were introduced by the previous + release. Upgrading is recommended. +


    2007-Jan-4 - Version 3.3.9

    + Version 3.3.9 fixes bugs that can lead to database corruption under + obscure and difficult to reproduce circumstances. See + + DatabaseCorruption in the + wiki for details. + This release also adds the new + sqlite3_prepare_v2() + API and includes important bug fixes in the command-line + shell and enhancements to the query optimizer. Upgrading is + recommended. +


    2006-Oct-9 - Version 3.3.8

    + Version 3.3.8 adds support for full-text search using the + FTS1 + module. There are also minor bug fixes. Upgrade only if + you want to try out the new full-text search capabilities or if + you are having problems with 3.3.7. +


    2006-Aug-12 - Version 3.3.7

    + Version 3.3.7 includes support for loadable extensions and virtual + tables. But both features are still considered "beta" and their + APIs are subject to change in a future release. This release is + mostly to make available the minor bug fixes that have accumulated + since 3.3.6. Upgrading is not necessary. Do so only if you encounter + one of the obscure bugs that have been fixed or if you want to try + out the new features. +


    2006-Jun-19 - New Book About SQLite

    + + The Definitive Guide to SQLite, a new book by + Mike Owens. + is now available from Apress. + The books covers the latest SQLite internals as well as + the native C interface and bindings for PHP, Python, + Perl, Ruby, Tcl, and Java. Recommended. +


    2006-Jun-6 - Version 3.3.6

    + Changes include improved tolerance for Windows virus scanners + and faster :memory: databases. There are also fixes for several + obscure bugs. Upgrade if you are having problems. +


    2006-Apr-5 - Version 3.3.5

    + This release fixes many minor bugs and documentation typos and + provides some minor new features and performance enhancements. + Upgrade only if you are having problems or need one of the new features. +


    2006-Feb-11 - Version 3.3.4

    + This release fixes several bugs, including a + a blunder that might cause a deadlock on multithreaded systems. + Anyone using SQLite in a multithreaded environment should probably upgrade. +


    2006-Jan-31 - Version 3.3.3 stable

    + There have been no major problems discovered in version 3.3.2, so + we hereby declare the new APIs and language features to be stable + and supported. +


    2006-Jan-24 - Version 3.3.2 beta

    + More bug fixes and performance improvements as we move closer to + a production-ready version 3.3.x. +


    2006-Jan-16 - Version 3.3.1 alpha

    + Many bugs found in last week's alpha release have now been fixed and + the library is running much faster again.

    + +

    Database connections can now be moved between threads as long as the + connection holds no locks at the time it is moved. Thus the common + paradigm of maintaining a pool of database connections and handing + them off to transient worker threads is now supported. + Please help test this new feature. + See + the MultiThreading wiki page for additional + information. +


    2006-Jan-10 - Version 3.3.0 alpha

    + Version 3.3.0 adds support for CHECK constraints, DESC indices, + separate REAL and INTEGER column affinities, a new OS interface layer + design, and many other changes. The code passed a regression + test but should still be considered alpha. Please report any + problems.

    + +

    The file format for version 3.3.0 has changed slightly to support + descending indices and + a more efficient encoding of boolean values. SQLite 3.3.0 will read and + write legacy databases created with any prior version of SQLite 3. But + databases created by version 3.3.0 will not be readable or writable + by earlier versions of the SQLite. The older file format can be + specified at compile-time for those rare cases where it is needed. +


    2005-Dec-19 - Versions 3.2.8 and 2.8.17

    + These versions contain one-line changes to 3.2.7 and 2.8.16 to fix a bug + that has been present since March of 2002 and version 2.4.0. + That bug might possibly cause database corruption if a large INSERT or + UPDATE statement within a multi-statement transaction fails due to a + uniqueness constraint but the containing transaction commits. +


    2005-Sep-24 - Version 3.2.7

    + This version fixes several minor and obscure bugs. + Upgrade only if you are having problems. +


    2005-Sep-16 - Version 3.2.6 - Critical Bug Fix

    + This version fixes a bug that can result in database + corruption if a VACUUM of a 1 gibibyte or larger database fails + (perhaps do to running out of disk space or an unexpected power loss) + and is later rolled back. +

    + Also in this release: + The ORDER BY and GROUP BY processing was rewritten to use less memory. + Support for COUNT(DISTINCT) was added. The LIKE operator can now be + used by the optimizer on columns with COLLATE NOCASE. +


    2005-Aug-27 - Version 3.2.5

    + This release fixes a few more lingering bugs in the new code. + We expect that this release will be stable and ready for production use. +


    2005-Aug-24 - Version 3.2.4

    + This release fixes a bug in the new optimizer that can lead to segfaults + when parsing very complex WHERE clauses. +


    2005-Aug-21 - Version 3.2.3

    + This release adds the ANALYZE command, + the CAST operator, and many + very substantial improvements to the query optimizer. See the + change log for additional + information. +


    2005-Aug-2 - 2005 Open Source Award for SQLite

    + SQLite and its primary author D. Richard Hipp have been honored with + a 2005 Open Source + Award from Google and O'Reilly.
    +


    2005-Jun-13 - Version 3.2.2

    + This release includes numerous minor bug fixes, speed improvements, + and code size reductions. There is no reason to upgrade unless you + are having problems or unless you just want to. +


    2005-Mar-29 - Version 3.2.1

    + This release fixes a memory allocation problem in the new + ALTER TABLE ADD COLUMN + command. +


    2005-Mar-21 - Version 3.2.0

    + The primary purpose for version 3.2.0 is to add support for + ALTER TABLE ADD COLUMN. + The new ADD COLUMN capability is made + possible by AOL developers supporting and embracing great + open-source software. Thanks, AOL!

    + +

    Version 3.2.0 also fixes an obscure but serious bug that was discovered + just prior to release. If you have a multi-statement transaction and + within that transaction an UPDATE or INSERT statement fails due to a + constraint, then you try to rollback the whole transaction, the rollback + might not work correctly. See + Ticket #1171 + for details. Upgrading is recommended for all users. +


    2005-Mar-16 - Version 3.1.6

    + Version 3.1.6 fixes a critical bug that can cause database corruption + when inserting rows into tables with around 125 columns. This bug was + introduced in version 3.0.0. See + Ticket #1163 + for additional information. +


    2005-Mar-11 - Versions 3.1.4 and 3.1.5 Released

    + Version 3.1.4 fixes a critical bug that could cause database corruption + if the autovacuum mode of version 3.1.0 is turned on (it is off by + default) and a CREATE UNIQUE INDEX is executed within a transaction but + fails because the indexed columns are not unique. Anyone using the + autovacuum feature and unique indices should upgrade.

    + +

    Version 3.1.5 adds the ability to disable + the F_FULLFSYNC ioctl() in OS-X by setting "PRAGMA synchronous=on" instead + of the default "PRAGMA synchronous=full". There was an attempt to add + this capability in 3.1.4 but it did not work due to a spelling error. +


    2005-Feb-19 - Version 3.1.3 Released

    + Version 3.1.3 cleans up some minor issues discovered in version 3.1.2. +


    2005-Feb-15 - Versions 2.8.16 and 3.1.2 Released

    + A critical bug in the VACUUM command that can lead to database + corruption has been fixed in both the 2.x branch and the main + 3.x line. This bug has existed in all prior versions of SQLite. + Even though it is unlikely you will ever encounter this bug, + it is suggested that all users upgrade. See + + ticket #1116. for additional information.

    + +

    Version 3.1.2 is also the first stable release of the 3.1 + series. SQLite 3.1 features added support for correlated + subqueries, autovacuum, autoincrement, ALTER TABLE, and + other enhancements. See the + release notes + for version 3.1.0 for a detailed description of the + changes available in the 3.1 series. +


    2005-Feb-01 - Version 3.1.1 (beta) Released

    + Version 3.1.1 (beta) is now available on the + website. Verison 3.1.1 is fully backwards compatible with the 3.0 series + and features many new features including Autovacuum and correlated + subqueries. The + release notes + From version 3.1.0 apply equally to this release beta. A stable release + is expected within a couple of weeks. +


    2005-Jan-21 - Version 3.1.0 (alpha) Released

    + Version 3.1.0 (alpha) is now available on the + website. Verison 3.1.0 is fully backwards compatible with the 3.0 series + and features many new features including Autovacuum and correlated + subqueries. See the + release notes + for details.

    + +

    This is an alpha release. A beta release is expected in about a week + with the first stable release to follow after two more weeks. +


    2004-Nov-09 - SQLite at the 2004 International PHP Conference

    + There was a talk on the architecture of SQLite and how to optimize + SQLite queries at the 2004 International PHP Conference in Frankfurt, + Germany. + + Slides from that talk are available. +


    2004-Oct-11 - Version 3.0.8

    + Version 3.0.8 of SQLite contains several code optimizations and minor + bug fixes and adds support for DEFERRED, IMMEDIATE, and EXCLUSIVE + transactions. This is an incremental release. There is no reason + to upgrade from version 3.0.7 if that version is working for you. +


    2004-Oct-10 - SQLite at the 11th +Annual Tcl/Tk Conference

    + There will be a talk on the use of SQLite in Tcl/Tk at the + 11th Tcl/Tk Conference this week in + New Orleans. Visit + http://www.tcl.tk/ for details. + + Slides from the talk are available. +


    2004-Sep-18 - Version 3.0.7

    + Version 3.0 has now been in use by multiple projects for several + months with no major difficulties. We consider it stable and + ready for production use. +


    2004-Sep-02 - Version 3.0.6 (beta)

    + Because of some important changes to sqlite3_step(), + we have decided to + do an additional beta release prior to the first "stable" release. + If no serious problems are discovered in this version, we will + release version 3.0 "stable" in about a week. +


    2004-Aug-29 - Version 3.0.5 (beta)

    + The fourth beta release of SQLite version 3.0 is now available. + The next release is expected to be called "stable". +


    2004-Aug-08 - Version 3.0.4 (beta)

    + The third beta release of SQLite version 3.0 is now available. + This new beta fixes several bugs including a database corruption + problem that can occur when doing a DELETE while a SELECT is pending. + Expect at least one more beta before version 3.0 goes final. +


    2004-July-22 - Version 3.0.3 (beta)

    + The second beta release of SQLite version 3.0 is now available. + This new beta fixes many bugs and adds support for databases with + varying page sizes. The next 3.0 release will probably be called + a final or stable release.

    + +

    Version 3.0 adds support for internationalization and a new + more compact file format. + Details. + The API and file format have been fixed since 3.0.2. All + regression tests pass (over 100000 tests) and the test suite + exercises over 95% of the code.

    + +

    SQLite version 3.0 is made possible in part by AOL + developers supporting and embracing great Open-Source Software. +


    2004-Jly-22 - Version 2.8.15

    + SQLite version 2.8.15 is a maintenance release for the version 2.8 + series. Version 2.8 continues to be maintained with bug fixes, but + no new features will be added to version 2.8. All the changes in + this release are minor. If you are not having problems, there is + there is no reason to upgrade. +


    2004-Jun-30 - Version 3.0.2 (beta) Released

    + The first beta release of SQLite version 3.0 is now available. + Version 3.0 adds support for internationalization and a new + more compact file format. + Details. + As of this release, the API and file format are frozen. All + regression tests pass (over 100000 tests) and the test suite + exercises over 95% of the code.

    + +

    SQLite version 3.0 is made possible in part by AOL + developers supporting and embracing great Open-Source Software. +


    2004-Jun-25 - Website hacked

    + The www.sqlite.org website was hacked sometime around 2004-Jun-22 + because the lead SQLite developer failed to properly patch CVS. + Evidence suggests that the attacker was unable to elevate privileges + above user "cvs". Nevertheless, as a precaution the entire website + has been reconstructed from scratch on a fresh machine. All services + should be back to normal as of 2004-Jun-28. +


    2004-Jun-18 - Version 3.0.0 (alpha) Released

    + The first alpha release of SQLite version 3.0 is available for + public review and comment. Version 3.0 enhances internationalization support + through the use of UTF-16 and user-defined text collating sequences. + BLOBs can now be stored directly, without encoding. + A new file format results in databases that are 25% smaller (depending + on content). The code is also a little faster. In spite of the many + new features, the library footprint is still less than 240KB + (x86, gcc -O1). + Additional information.

    + +

    Our intent is to freeze the file format and API on 2004-Jul-01. + Users are encouraged to review and evaluate this alpha release carefully + and submit any feedback prior to that date.

    + +

    The 2.8 series of SQLite will continue to be supported with bug + fixes for the foreseeable future. +


    2004-Jun-09 - Version 2.8.14 Released

    + SQLite version 2.8.14 is a patch release to the stable 2.8 series. + There is no reason to upgrade if 2.8.13 is working ok for you. + This is only a bug-fix release. Most development effort is + going into version 3.0.0 which is due out soon. +


    2004-May-31 - CVS Access Temporarily Disabled

    + Anonymous access to the CVS repository will be suspended + for 2 weeks beginning on 2004-June-04. Everyone will still + be able to download + prepackaged source bundles, create or modify trouble tickets, or view + change logs during the CVS service interruption. Full open access to the + CVS repository will be restored on 2004-June-18. +


    2004-Apr-23 - Work Begins On SQLite Version 3

    + Work has begun on version 3 of SQLite. Version 3 is a major + changes to both the C-language API and the underlying file format + that will enable SQLite to better support internationalization. + The first beta is schedule for release on 2004-July-01.

    + +

    Plans are to continue to support SQLite version 2.8 with + bug fixes. But all new development will occur in version 3.0. +


    +
    +This page last modified 2009/06/08 13:35:22 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/oldnews.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/oldnews.tcl --- sqlite3-3.4.2/www/oldnews.tcl 2007-08-13 17:15:29.000000000 +0100 +++ sqlite3-3.6.16/www/oldnews.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,460 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite Older News} - -proc newsitem {date title text} { - puts "

    $date - $title

    " - regsub -all "\n( *\n)+" $text "

    \n\n

    " txt - puts "

    $txt

    " - puts "
    " -} - - -newsitem {2007-Apr-25} {Version 3.3.17} { - This version fixes a bug in the forwards-compatibility logic of SQLite - that was causing a database to become unreadable when it should have - been read-only. Upgrade from 3.3.16 only if you plan to deploy into - a product that might need to be upgraded in the future. For day to day - use, it probably does not matter. -} - -newsitem {2007-Apr-18} {Version 3.3.16} { - Performance improvements added in 3.3.14 but mistakenly turned off - in 3.3.15 have been reinstated. A bug has been fixed that prevented - VACUUM from running if a NULL value was in a UNIQUE column. -} - -newsitem {2007-Apr-09} {Version 3.3.15} { - An annoying bug introduced in 3.3.14 has been fixed. There are - also many enhancements to the test suite. -} - -newsitem {2007-Apr-02} {Version 3.3.14} { - This version focuses on performance improvements. If you recompile - - the amalgamation using GCC option -O3 (the precompiled binaries - use -O2) you may see performance - improvements of 35% or more over version 3.3.13 depending on your - workload. This version also - adds support for - exclusive access mode. -} - -newsitem {2007-Feb-13} {Version 3.3.13} { - This version fixes a subtle bug in the ORDER BY optimizer that can - occur when using joins. There are also a few minor enhancements. - Upgrading is recommended. -} - -newsitem {2007-Jan-27} {Version 3.3.12} { - The first published build of the previous version used the wrong - set of source files. Consequently, many people downloaded a build - that was labeled as "3.3.11" but was really 3.3.10. Version 3.3.12 - is released to clear up the ambiguity. A couple more bugs have - also been fixed and - PRAGMA integrity_check has been enhanced. -} - -newsitem {2007-Jan-22} {Version 3.3.11} { - Version 3.3.11 fixes for a few more problems in version 3.3.9 that - version 3.3.10 failed to catch. Upgrading is recommended. -} - -newsitem {2007-Jan-9} {Version 3.3.10} { - Version 3.3.10 fixes several bugs that were introduced by the previous - release. Upgrading is recommended. -} - -newsitem {2007-Jan-4} {Version 3.3.9} { - Version 3.3.9 fixes bugs that can lead to database corruption under - obscure and difficult to reproduce circumstances. See - - DatabaseCorruption in the - wiki for details. - This release also adds the new - sqlite3_prepare_v2() - API and includes important bug fixes in the command-line - shell and enhancements to the query optimizer. Upgrading is - recommended. -} - -newsitem {2006-Oct-9} {Version 3.3.8} { - Version 3.3.8 adds support for full-text search using the - FTS1 - module. There are also minor bug fixes. Upgrade only if - you want to try out the new full-text search capabilities or if - you are having problems with 3.3.7. -} - -newsitem {2006-Aug-12} {Version 3.3.7} { - Version 3.3.7 includes support for loadable extensions and virtual - tables. But both features are still considered "beta" and their - APIs are subject to change in a future release. This release is - mostly to make available the minor bug fixes that have accumulated - since 3.3.6. Upgrading is not necessary. Do so only if you encounter - one of the obscure bugs that have been fixed or if you want to try - out the new features. -} - -newsitem {2006-Jun-19} {New Book About SQLite} { - - The Definitive Guide to SQLite, a new book by - Mike Owens. - is now available from Apress. - The books covers the latest SQLite internals as well as - the native C interface and bindings for PHP, Python, - Perl, Ruby, Tcl, and Java. Recommended. -} - - -newsitem {2006-Jun-6} {Version 3.3.6} { - Changes include improved tolerance for windows virus scanners - and faster :memory: databases. There are also fixes for several - obscure bugs. Upgrade if you are having problems. -} - -newsitem {2006-Apr-5} {Version 3.3.5} { - This release fixes many minor bugs and documentation typos and - provides some minor new features and performance enhancements. - Upgrade only if you are having problems or need one of the new features. -} - -newsitem {2006-Feb-11} {Version 3.3.4} { - This release fixes several bugs, including a - a blunder that might cause a deadlock on multithreaded systems. - Anyone using SQLite in a multithreaded environment should probably upgrade. -} - -newsitem {2006-Jan-31} {Version 3.3.3 stable} { - There have been no major problems discovered in version 3.3.2, so - we hereby declare the new APIs and language features to be stable - and supported. -} - -newsitem {2006-Jan-24} {Version 3.3.2 beta} { - More bug fixes and performance improvements as we move closer to - a production-ready version 3.3.x. -} - -newsitem {2006-Jan-16} {Version 3.3.1 alpha} { - Many bugs found in last week's alpha release have now been fixed and - the library is running much faster again. - - Database connections can now be moved between threads as long as the - connection holds no locks at the time it is moved. Thus the common - paradigm of maintaining a pool of database connections and handing - them off to transient worker threads is now supported. - Please help test this new feature. - See - the MultiThreading wiki page for additional - information. -} - -newsitem {2006-Jan-10} {Version 3.3.0 alpha} { - Version 3.3.0 adds support for CHECK constraints, DESC indices, - separate REAL and INTEGER column affinities, a new OS interface layer - design, and many other changes. The code passed a regression - test but should still be considered alpha. Please report any - problems. - - The file format for version 3.3.0 has changed slightly to support - descending indices and - a more efficient encoding of boolean values. SQLite 3.3.0 will read and - write legacy databases created with any prior version of SQLite 3. But - databases created by version 3.3.0 will not be readable or writable - by earlier versions of the SQLite. The older file format can be - specified at compile-time for those rare cases where it is needed. -} - -newsitem {2005-Dec-19} {Versions 3.2.8 and 2.8.17} { - These versions contain one-line changes to 3.2.7 and 2.8.16 to fix a bug - that has been present since March of 2002 and version 2.4.0. - That bug might possibly cause database corruption if a large INSERT or - UPDATE statement within a multi-statement transaction fails due to a - uniqueness constraint but the containing transaction commits. -} - - -newsitem {2005-Sep-24} {Version 3.2.7} { - This version fixes several minor and obscure bugs. - Upgrade only if you are having problems. -} - -newsitem {2005-Sep-16} {Version 3.2.6 - Critical Bug Fix} { - This version fixes a bug that can result in database - corruption if a VACUUM of a 1 gibibyte or larger database fails - (perhaps do to running out of disk space or an unexpected power loss) - and is later rolled back. -

    - Also in this release: - The ORDER BY and GROUP BY processing was rewritten to use less memory. - Support for COUNT(DISTINCT) was added. The LIKE operator can now be - used by the optimizer on columns with COLLATE NOCASE. -} - -newsitem {2005-Aug-27} {Version 3.2.5} { - This release fixes a few more lingering bugs in the new code. - We expect that this release will be stable and ready for production use. -} - -newsitem {2005-Aug-24} {Version 3.2.4} { - This release fixes a bug in the new optimizer that can lead to segfaults - when parsing very complex WHERE clauses. -} - -newsitem {2005-Aug-21} {Version 3.2.3} { - This release adds the ANALYZE command, - the CAST operator, and many - very substantial improvements to the query optimizer. See the - change log for additional - information. -} - -newsitem {2005-Aug-2} {2005 Open Source Award for SQLite} { - SQLite and its primary author D. Richard Hipp have been honored with - a 2005 Open Source - Award from Google and O'Reilly.
    -} - - -newsitem {2005-Jun-13} {Version 3.2.2} { - This release includes numerous minor bug fixes, speed improvements, - and code size reductions. There is no reason to upgrade unless you - are having problems or unless you just want to. -} - -newsitem {2005-Mar-29} {Version 3.2.1} { - This release fixes a memory allocation problem in the new - ALTER TABLE ADD COLUMN - command. -} - -newsitem {2005-Mar-21} {Version 3.2.0} { - The primary purpose for version 3.2.0 is to add support for - ALTER TABLE ADD COLUMN. - The new ADD COLUMN capability is made - possible by AOL developers supporting and embracing great - open-source software. Thanks, AOL! - - Version 3.2.0 also fixes an obscure but serious bug that was discovered - just prior to release. If you have a multi-statement transaction and - within that transaction an UPDATE or INSERT statement fails due to a - constraint, then you try to rollback the whole transaction, the rollback - might not work correctly. See - Ticket #1171 - for details. Upgrading is recommended for all users. -} - -newsitem {2005-Mar-16} {Version 3.1.6} { - Version 3.1.6 fixes a critical bug that can cause database corruption - when inserting rows into tables with around 125 columns. This bug was - introduced in version 3.0.0. See - Ticket #1163 - for additional information. -} - -newsitem {2005-Mar-11} {Versions 3.1.4 and 3.1.5 Released} { - Version 3.1.4 fixes a critical bug that could cause database corruption - if the autovacuum mode of version 3.1.0 is turned on (it is off by - default) and a CREATE UNIQUE INDEX is executed within a transaction but - fails because the indexed columns are not unique. Anyone using the - autovacuum feature and unique indices should upgrade. - - Version 3.1.5 adds the ability to disable - the F_FULLFSYNC ioctl() in OS-X by setting "PRAGMA synchronous=on" instead - of the default "PRAGMA synchronous=full". There was an attempt to add - this capability in 3.1.4 but it did not work due to a spelling error. -} - -newsitem {2005-Feb-19} {Version 3.1.3 Released} { - Version 3.1.3 cleans up some minor issues discovered in version 3.1.2. -} - -newsitem {2005-Feb-15} {Versions 2.8.16 and 3.1.2 Released} { - A critical bug in the VACUUM command that can lead to database - corruption has been fixed in both the 2.x branch and the main - 3.x line. This bug has existed in all prior versions of SQLite. - Even though it is unlikely you will ever encounter this bug, - it is suggested that all users upgrade. See - - ticket #1116. for additional information. - - Version 3.1.2 is also the first stable release of the 3.1 - series. SQLite 3.1 features added support for correlated - subqueries, autovacuum, autoincrement, ALTER TABLE, and - other enhancements. See the - release notes - for version 3.1.0 for a detailed description of the - changes available in the 3.1 series. -} - -newsitem {2005-Feb-01} {Version 3.1.1 (beta) Released} { - Version 3.1.1 (beta) is now available on the - website. Verison 3.1.1 is fully backwards compatible with the 3.0 series - and features many new features including Autovacuum and correlated - subqueries. The - release notes - From version 3.1.0 apply equally to this release beta. A stable release - is expected within a couple of weeks. -} - -newsitem {2005-Jan-21} {Version 3.1.0 (alpha) Released} { - Version 3.1.0 (alpha) is now available on the - website. Verison 3.1.0 is fully backwards compatible with the 3.0 series - and features many new features including Autovacuum and correlated - subqueries. See the - release notes - for details. - - This is an alpha release. A beta release is expected in about a week - with the first stable release to follow after two more weeks. -} - -newsitem {2004-Nov-09} {SQLite at the 2004 International PHP Conference} { - There was a talk on the architecture of SQLite and how to optimize - SQLite queries at the 2004 International PHP Conference in Frankfurt, - Germany. - - Slides from that talk are available. -} - -newsitem {2004-Oct-11} {Version 3.0.8} { - Version 3.0.8 of SQLite contains several code optimizations and minor - bug fixes and adds support for DEFERRED, IMMEDIATE, and EXCLUSIVE - transactions. This is an incremental release. There is no reason - to upgrade from version 3.0.7 if that version is working for you. -} - - -newsitem {2004-Oct-10} {SQLite at the 11th -Annual Tcl/Tk Conference} { - There will be a talk on the use of SQLite in Tcl/Tk at the - 11th Tcl/Tk Conference this week in - New Orleans. Visit - http://www.tcl.tk/ for details. - - Slides from the talk are available. -} - -newsitem {2004-Sep-18} {Version 3.0.7} { - Version 3.0 has now been in use by multiple projects for several - months with no major difficulties. We consider it stable and - ready for production use. -} - -newsitem {2004-Sep-02} {Version 3.0.6 (beta)} { - Because of some important changes to sqlite3_step(), - we have decided to - do an additional beta release prior to the first "stable" release. - If no serious problems are discovered in this version, we will - release version 3.0 "stable" in about a week. -} - - -newsitem {2004-Aug-29} {Version 3.0.5 (beta)} { - The fourth beta release of SQLite version 3.0 is now available. - The next release is expected to be called "stable". -} - - -newsitem {2004-Aug-08} {Version 3.0.4 (beta)} { - The third beta release of SQLite version 3.0 is now available. - This new beta fixes several bugs including a database corruption - problem that can occur when doing a DELETE while a SELECT is pending. - Expect at least one more beta before version 3.0 goes final. -} - -newsitem {2004-July-22} {Version 3.0.3 (beta)} { - The second beta release of SQLite version 3.0 is now available. - This new beta fixes many bugs and adds support for databases with - varying page sizes. The next 3.0 release will probably be called - a final or stable release. - - Version 3.0 adds support for internationalization and a new - more compact file format. - Details. - The API and file format have been fixed since 3.0.2. All - regression tests pass (over 100000 tests) and the test suite - exercises over 95% of the code. - - SQLite version 3.0 is made possible in part by AOL - developers supporting and embracing great Open-Source Software. -} - -newsitem {2004-Jly-22} {Version 2.8.15} { - SQLite version 2.8.15 is a maintenance release for the version 2.8 - series. Version 2.8 continues to be maintained with bug fixes, but - no new features will be added to version 2.8. All the changes in - this release are minor. If you are not having problems, there is - there is no reason to upgrade. -} - -newsitem {2004-Jun-30} {Version 3.0.2 (beta) Released} { - The first beta release of SQLite version 3.0 is now available. - Version 3.0 adds support for internationalization and a new - more compact file format. - Details. - As of this release, the API and file format are frozen. All - regression tests pass (over 100000 tests) and the test suite - exercises over 95% of the code. - - SQLite version 3.0 is made possible in part by AOL - developers supporting and embracing great Open-Source Software. -} - - -newsitem {2004-Jun-25} {Website hacked} { - The www.sqlite.org website was hacked sometime around 2004-Jun-22 - because the lead SQLite developer failed to properly patch CVS. - Evidence suggests that the attacker was unable to elevate privileges - above user "cvs". Nevertheless, as a precaution the entire website - has been reconstructed from scratch on a fresh machine. All services - should be back to normal as of 2004-Jun-28. -} - - -newsitem {2004-Jun-18} {Version 3.0.0 (alpha) Released} { - The first alpha release of SQLite version 3.0 is available for - public review and comment. Version 3.0 enhances internationalization support - through the use of UTF-16 and user-defined text collating sequences. - BLOBs can now be stored directly, without encoding. - A new file format results in databases that are 25% smaller (depending - on content). The code is also a little faster. In spite of the many - new features, the library footprint is still less than 240KB - (x86, gcc -O1). - Additional information. - - Our intent is to freeze the file format and API on 2004-Jul-01. - Users are encouraged to review and evaluate this alpha release carefully - and submit any feedback prior to that date. - - The 2.8 series of SQLite will continue to be supported with bug - fixes for the foreseeable future. -} - -newsitem {2004-Jun-09} {Version 2.8.14 Released} { - SQLite version 2.8.14 is a patch release to the stable 2.8 series. - There is no reason to upgrade if 2.8.13 is working ok for you. - This is only a bug-fix release. Most development effort is - going into version 3.0.0 which is due out soon. -} - -newsitem {2004-May-31} {CVS Access Temporarily Disabled} { - Anonymous access to the CVS repository will be suspended - for 2 weeks beginning on 2004-June-04. Everyone will still - be able to download - prepackaged source bundles, create or modify trouble tickets, or view - change logs during the CVS service interruption. Full open access to the - CVS repository will be restored on 2004-June-18. -} - -newsitem {2004-Apr-23} {Work Begins On SQLite Version 3} { - Work has begun on version 3 of SQLite. Version 3 is a major - changes to both the C-language API and the underlying file format - that will enable SQLite to better support internationalization. - The first beta is schedule for release on 2004-July-01. - - Plans are to continue to support SQLite version 2.8 with - bug fixes. But all new development will occur in version 3.0. -} -footer {$Id: oldnews.tcl,v 1.22 2007/08/13 16:15:29 drh Exp $} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/omitted.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/omitted.html --- sqlite3-3.4.2/www/omitted.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/omitted.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,138 @@ + + +SQL Features That SQLite Does Not Implement + + + + + +

    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQL Features That SQLite Does Not Implement

    + +

    +Rather than try to list all the features of SQL92 that SQLite does +support, it is much easier to list those that it does not. +Unsupported features of SQL92 are shown below.

    + +

    +The order of this list gives some hint as to when a feature might +be added to SQLite. Those features near the top of the list are +likely to be added in the near future. There are no immediate +plans to add features near the bottom of the list. +

    + + + + +
    FOREIGN KEY constraints  + FOREIGN KEY constraints are parsed but are not enforced. + However, the equivalent constraint enforcement can be achieved + using triggers. The SQLite source tree contains + + source code and + + documentation for a C program that will read an SQLite database, + analyze the foreign key constraints, and generate appropriate triggers + automatically. +
    Complete trigger support  + There is some support for triggers but it is not complete. Missing + subfeatures include FOR EACH STATEMENT triggers (currently all triggers + must be FOR EACH ROW), INSTEAD OF triggers on tables (currently + INSTEAD OF triggers are only allowed on views), and recursive + triggers - triggers that trigger themselves. +
    Complete ALTER TABLE support  + Only the RENAME TABLE and ADD COLUMN variants of the + ALTER TABLE command are supported. Other kinds of ALTER TABLE operations + such as + DROP COLUMN, ALTER COLUMN, ADD CONSTRAINT, and so forth are omitted. +
    RIGHT and FULL OUTER JOIN  + LEFT OUTER JOIN is implemented, but not RIGHT OUTER JOIN or + FULL OUTER JOIN. +
    Writing to VIEWs  + VIEWs in SQLite are read-only. You may not execute a DELETE, INSERT, or + UPDATE statement on a view. But you can create a trigger + that fires on an attempt to DELETE, INSERT, or UPDATE a view and do + what you need in the body of the trigger. +
    GRANT and REVOKE  + Since SQLite reads and writes an ordinary disk file, the + only access permissions that can be applied are the normal + file access permissions of the underlying operating system. + The GRANT and REVOKE commands commonly found on client/server + RDBMSes are not implemented because they would be meaningless + for an embedded database engine. +
    + +

    +If you find other SQL92 features that SQLite does not support, please +add them to the Wiki page at + +http://www.sqlite.org/cvstrac/wiki?p=Unsupported +

    +
    +This page last modified 2009/01/03 15:03:50 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/omitted.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/omitted.tcl --- sqlite3-3.4.2/www/omitted.tcl 2006-12-15 21:21:28.000000000 +0000 +++ sqlite3-3.6.16/www/omitted.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -# -# Run this script to generated a omitted.html output file -# -set rcsid {$Id: omitted.tcl,v 1.10 2005/11/03 00:41:18 drh Exp $} -source common.tcl -header {SQL Features That SQLite Does Not Implement} -puts { -

    SQL Features That SQLite Does Not Implement

    - -

    -Rather than try to list all the features of SQL92 that SQLite does -support, it is much easier to list those that it does not. -Unsupported features of SQL92 are shown below.

    - -

    -The order of this list gives some hint as to when a feature might -be added to SQLite. Those features near the top of the list are -likely to be added in the near future. There are no immediate -plans to add features near the bottom of the list. -

    - - -} - -proc feature {name desc} { - puts "" - puts "" -} - -feature {FOREIGN KEY constraints} { - FOREIGN KEY constraints are parsed but are not enforced. -} - -feature {Complete trigger support} { - There is some support for triggers but it is not complete. Missing - subfeatures include FOR EACH STATEMENT triggers (currently all triggers - must be FOR EACH ROW), INSTEAD OF triggers on tables (currently - INSTEAD OF triggers are only allowed on views), and recursive - triggers - triggers that trigger themselves. -} - -feature {Complete ALTER TABLE support} { - Only the RENAME TABLE and ADD COLUMN variants of the - ALTER TABLE command are supported. Other kinds of ALTER TABLE operations - such as - DROP COLUMN, ALTER COLUMN, ADD CONSTRAINT, and so forth are omitted. -} - -feature {Nested transactions} { - The current implementation only allows a single active transaction. -} - -feature {RIGHT and FULL OUTER JOIN} { - LEFT OUTER JOIN is implemented, but not RIGHT OUTER JOIN or - FULL OUTER JOIN. -} - -feature {Writing to VIEWs} { - VIEWs in SQLite are read-only. You may not execute a DELETE, INSERT, or - UPDATE statement on a view. But you can create a trigger - that fires on an attempt to DELETE, INSERT, or UPDATE a view and do - what you need in the body of the trigger. -} - -feature {GRANT and REVOKE} { - Since SQLite reads and writes an ordinary disk file, the - only access permissions that can be applied are the normal - file access permissions of the underlying operating system. - The GRANT and REVOKE commands commonly found on client/server - RDBMSes are not implemented because they would be meaningless - for an embedded database engine. -} - -puts { -
    $name " - puts "$desc
    - -

    -If you find other SQL92 features that SQLite does not support, please -add them to the Wiki page at - -http://www.sqlite.org/cvstrac/wiki?p=Unsupported -

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/onefile.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/onefile.html --- sqlite3-3.4.2/www/onefile.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/onefile.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,111 @@ + + +SQLite: Single File Database + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Single-file Cross-platform Database

    + +

    +A database in SQLite is a single disk file. +Furthermore, the file format is cross-platform. +A database that is created on one machine can be +copied and used on a different machine with +a different architecture. SQLite databases +are portable across 32-bit and 64-bit machines +and between +big-endian and +little-endian +architectures. +

    + +

    +The SQLite database file format is also stable. +All releases of of SQLite version 3 can read and write database +files created by the very first SQLite 3 release (version 3.0.0) +going back to 2004-06-18. This is "backwards compatibility". +The developers promise to maintain backwards compatibility of +the database file format for all future releases of SQLite 3. +"Forwards compatiblity" means that older releases +of SQLite can also read and write databases created by newer +releases. SQLite is usually, but not completely forwards +compatible. +

    + +

    +The stability of the SQLite database file format and the fact +that the file format is cross-platform combine to make SQLite +database files an excellent choice as an +Application File Format.

    +

    +
    +This page last modified 2008/03/03 13:41:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/opcode.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/opcode.html --- sqlite3-3.4.2/www/opcode.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/opcode.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,1055 @@ + + +SQLite Virtual Machine Opcodes + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Virtual Machine Opcodes

    + + + +

    Introduction

    + +

    In order to execute an SQL statement, the SQLite library first parses +the SQL, analyzes the statement, then generates a short program to execute +the statement. The program is generated for a "virtual machine" implemented +by the SQLite library. This document describes the operation of that +virtual machine.

    + +

    This document is intended as a reference, not a tutorial. +A separate Virtual Machine Tutorial is +available. If you are looking for a narrative description +of how the virtual machine works, you should read the tutorial +and not this document. Once you have a basic idea of what the +virtual machine does, you can refer back to this document for +the details on a particular opcode. +Unfortunately, the virtual machine tutorial was written for +SQLite version 1.0. There are substantial changes in the virtual +machine for version 2.0 and and again for version 3.0.0 and again +for version 3.5.5 and the document has not been updated. But the +basic concepts behind the virtual machine still apply. +

    + +

    The source code to the virtual machine is in the vdbe.c source +file. All of the opcode definitions further down in this document are +contained in comments in the source file. In fact, the opcode table +in this document +was generated by scanning the vdbe.c source file +and extracting the necessary information from comments. So the +source code comments are really the canonical source of information +about the virtual machine. When in doubt, refer to the source code.

    + +

    Each instruction in the virtual machine consists of an opcode and +up to five operands named P1, P2 P3, P4, and P5. P1, P2, and P3 +are 32-bit signed integers. These operands often refer to registers. +P2 is always the +jump destination in any operation that might cause a jump. +P4 may be a 32-bit signed integer, a 64-bit signed integer, a +64-bit floating point value, a string literal, a Blob literal, +a pointer to a collating sequence comparison function, or a +pointer to the implemantation of an application-defined SQL +function, or various other things. P5 is an unsigned character +normally used as a flag. +Some operators use all five operands. Some use +one or two. Some operators use none of the operands.

    + +

    The virtual machine begins execution on instruction number 0. +Execution continues until a Halt instruction is seen, or +the program counter becomes one greater than the address of +last instruction, or there is an execution error. +When the virtual machine halts, all memory +that it allocated is released and all database cursors it may +have had open are closed. If the execution stopped due to an +error, any pending transactions are terminated and changes made +to the database are rolled back.

    + +

    The virtual machine can have zero or more cursors. Each cursor +is a pointer into a single table or index within the database. +There can be multiple cursors pointing at the same index or table. +All cursors operate independently, even cursors pointing to the same +indices or tables. +The only way for the virtual machine to interact with a database +file is through a cursor. +Instructions in the virtual +machine can create a new cursor (OpenRead or OpenWrite), +read data from a cursor +(Column), advance the cursor to the next entry in the table +(Next) or index (NextIdx), and many other operations. +All cursors are automatically +closed when the virtual machine terminates.

    + +

    The virtual machine contains an arbitrary number of registers +locations with addresses beginning at one and growing upward. +Each memory location can hold an arbitrary string. The registers +hold all intermediate results of a calculation.

    + +

    Viewing Programs Generated By SQLite

    + +

    Every SQL statement that SQLite interprets results in a program +for the virtual machine. But if you precede the SQL statement with +the keyword EXPLAIN the virtual machine will not execute the +program. Instead, the instructions of the program will be returned +like a query result. This feature is useful for debugging and +for learning how the virtual machine operates.

    + +

    You can use the sqlite3.exe command-line interface (CLI) +tool to see the +instructions generated by an SQL statement. The following is +an example:

    + +
    sqlite3 ex1.db
    +sqlite> .explain
    +sqlite> explain delete from tbl1 where two<20;
    +addr  opcode         p1    p2    p3    p4         p5  comment
    +----  -------------  ----  ----  ----  ---------  --  -------
    +0     Trace          0     0     0     explain..  00         
    +1     Goto           0     20    0                00         
    +2     OpenRead       0     2     0                00  tbl    
    +3     SetNumColumns  0     2     0                00         
    +4     Rewind         0     11    0                00         
    +5     Column         0     1     2                00  tbl.two
    +6     Integer        20    3     0                00         
    +7     Ge             3     10    2     cs(BINARY) 6a         
    +8     Rowid          0     1     0                00         
    +9     FifoWrite      1     0     0                00         
    +10    Next           0     5     0                00         
    +11    Close          0     0     0                00         
    +12    OpenWrite      0     2     0                00  tbl    
    +13    SetNumColumns  0     2     0                00         
    +14    FifoRead       1     18    0                00         
    +15    NotExists      0     17    1                00         
    +16    Delete         0     1     0     tbl        00         
    +17    Goto           0     14    0                00         
    +18    Close          0     0     0                00         
    +19    Halt           0     0     0                00         
    +20    Transaction    0     1     0                00         
    +21    VerifyCookie   0     1     0                00         
    +22    TableLock      -1    2     0     tbl        00         
    +23    Goto           0     2     0                00
    + +

    All you have to do is add the EXPLAIN keyword to the front of the +SQL statement. But if you use the ".explain" command in the CLI, +it will set up the output mode to make the program more easily +viewable.

    + +

    Depending on compile-time options, you +can put the SQLite virtual machine in a mode where it will trace its +execution by writing messages to standard output. The non-standard +SQL "PRAGMA" comments can be used to turn tracing on and off. To +turn tracing on, enter: +

    + +
    +PRAGMA vdbe_trace=on;
    +
    + +

    +You can turn tracing back off by entering a similar statement but +changing the value "on" to "off".

    + +

    The Opcodes

    + +

    There are currently 137 +opcodes defined by the virtual machine. +All currently defined opcodes are described in the table below. +This table was generated automatically by scanning the source code +from the file vdbe.c.

    + +

    + + + +
    Opcode NameDescription

    Add

    Add the value in register P1 to the value in register P2 +and store the result in register P3. +If either input is NULL, the result is NULL.

    AddImm

    Add the constant P2 to the value in register P1. +The result is always an integer.

    + +

    To force any register to be an integer, just add 0.

    Affinity

    Apply affinities to a range of P2 registers starting with P1.

    + +

    P4 is a string that is P2 characters long. The nth character of the +string indicates the column affinity that should be used for the nth +memory cell in the range.

    AggFinal

    Execute the finalizer function for an aggregate. P1 is +the memory location that is the accumulator for the aggregate.

    + +

    P2 is the number of arguments that the step function takes and +P4 is a pointer to the FuncDef for this function. The P2 +argument is not used by this opcode. It is only there to disambiguate +functions that can take varying numbers of arguments. The +P4 argument is only needed for the degenerate case where +the step function was not previously called.

    AggStep

    Execute the step function for an aggregate. The +function has P5 arguments. P4 is a pointer to the FuncDef +structure that specifies the function. Use register +P3 as the accumulator.

    + +

    The P5 arguments are taken from register P2 and its +successors.

    And

    Take the logical AND of the values in registers P1 and P2 and +write the result into register P3.

    + +

    If either P1 or P2 is 0 (false) then the result is 0 even if +the other input is NULL. A NULL and true or two NULLs give +a NULL output.

    AutoCommit

    Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll +back any currently active btree transactions. If there are any active +VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if +there are active writing VMs or active VMs that use shared cache.

    + +

    This instruction causes the VM to halt.

    BitAnd

    Take the bit-wise AND of the values in register P1 and P2 and +store the result in register P3. +If either input is NULL, the result is NULL.

    BitNot

    Interpret the content of register P1 as an integer. Store the +ones-complement of the P1 value into register P2. If P1 holds +a NULL then store a NULL in P2.

    BitOr

    Take the bit-wise OR of the values in register P1 and P2 and +store the result in register P3. +If either input is NULL, the result is NULL.

    Blob

    P4 points to a blob of data P1 bytes long. Store this +blob in register P2. This instruction is not coded directly +by the compiler. Instead, the compiler layer specifies +an OP_HexBlob opcode, with the hex string representation of +the blob as P4. This opcode is transformed to an OP_Blob +the first time it is executed.

    Clear

    Delete all contents of the database table or index whose root page +in the database file is given by P1. But, unlike Destroy, do not +remove the table or index from the database file.

    + +

    The table being clear is in the main database file if P2==0. If +P2==1 then the table to be clear is in the auxiliary database file +that is used to store tables create using CREATE TEMPORARY TABLE.

    + +

    If the P3 value is non-zero, then the table referred to must be an +intkey table (an SQL table, not an index). In this case the row change +count is incremented by the number of rows in the table being cleared. +If P3 is greater than zero, then the value stored in register P3 is +also incremented by the number of rows in the table being cleared.

    + +

    See also: Destroy

    Close

    Close a cursor previously opened as P1. If P1 is not +currently open, this instruction is a no-op.

    CollSeq

    P4 is a pointer to a CollSeq struct. If the next call to a user function +or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will +be returned. This is used by the built-in min(), max() and nullif() +functions.

    + +

    The interface used by the implementation of the aforementioned functions +to retrieve the collation sequence set by this opcode is not available +publicly, only to user functions defined in func.c.

    Column

    Interpret the data that cursor P1 points to as a structure built using +the MakeRecord instruction. (See the MakeRecord opcode for additional +information about the format of the data.) Extract the P2-th column +from this record. If there are less that (P2+1) +values in the record, extract a NULL.

    + +

    The value extracted is stored in register P3.

    + +

    If the column contains fewer than P2 fields, then extract a NULL. Or, +if the P4 argument is a P4_MEM use the value of the P4 argument as +the result.

    Compare

    Compare to vectors of registers in reg(P1)..reg(P1+P3-1) (all this +one "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of +the comparison for use by the next OP_Jump instruct.

    + +

    P4 is a KeyInfo structure that defines collating sequences and sort +orders for the comparison. The permutation applies to registers +only. The KeyInfo elements are used sequentially.

    + +

    The comparison is a sort comparison, so NULLs compare equal, +NULLs are less than numbers, numbers are less than strings, +and strings are less than blobs.

    Concat

    Add the text in register P1 onto the end of the text in +register P2 and store the result in register P3. +If either the P1 or P2 text are NULL then store NULL in P3.

    + +

    P3 = P2 || P1

    + +

    It is illegal for P1 and P3 to be the same register. Sometimes, +if P3 is the same register as P2, the implementation is able +to avoid a memcpy().

    ContextPop

    Restore the Vdbe context to the state it was in when contextPush was last +executed. The context stores the last insert row id, the last statement +change count, and the current statement change count.

    ContextPush

    Save the current Vdbe context such that it can be restored by a ContextPop +opcode. The context stores the last insert row id, the last statement change +count, and the current statement change count.

    Copy

    Make a copy of register P1 into register P2.

    + +

    This instruction makes a deep copy of the value. A duplicate +is made of any string or blob constant. See also OP_SCopy.

    Count

    Store the number of entries (an integer value) in the table or index +opened by cursor P1 in register P2

    CreateIndex

    Allocate a new index in the main database file if P1==0 or in the +auxiliary database file if P1==1 or in an attached database if +P1>1. Write the root page number of the new table into +register P2.

    + +

    See documentation on OP_CreateTable for additional information.

    CreateTable

    Allocate a new table in the main database file if P1==0 or in the +auxiliary database file if P1==1 or in an attached database if +P1>1. Write the root page number of the new table into +register P2

    + +

    The difference between a table and an index is this: A table must +have a 4-byte integer key and can have arbitrary data. An index +has an arbitrary key but no data.

    + +

    See also: CreateIndex

    Delete

    Delete the record at which the P1 cursor is currently pointing.

    + +

    The cursor will be left pointing at either the next or the previous +record in the table. If it is left pointing at the next record, then +the next Next instruction will be a no-op. Hence it is OK to delete +a record from within an Next loop.

    + +

    If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is +incremented (otherwise not).

    + +

    P1 must not be pseudo-table. It has to be a real table with +multiple rows.

    + +

    If P4 is not NULL, then it is the name of the table that P1 is +pointing to. The update hook will be invoked, if it exists. +If P4 is not NULL then the P1 cursor must have been positioned +using OP_NotFound prior to invoking this opcode.

    Destroy

    Delete an entire database table or index whose root page in the database +file is given by P1.

    + +

    The table being destroyed is in the main database file if P3==0. If +P3==1 then the table to be clear is in the auxiliary database file +that is used to store tables create using CREATE TEMPORARY TABLE.

    + +

    If AUTOVACUUM is enabled then it is possible that another root page +might be moved into the newly deleted root page in order to keep all +root pages contiguous at the beginning of the database. The former +value of the root page that moved - its value before the move occurred - +is stored in register P2. If no page +movement was required (because the table being dropped was already +the last one in the database) then a zero is stored in register P2. +If AUTOVACUUM is disabled then a zero is stored in register P2.

    + +

    See also: Clear

    Divide

    Divide the value in register P1 by the value in register P2 +and store the result in register P3. If the value in register P2 +is zero, then the result is NULL. +If either input is NULL, the result is NULL.

    DropIndex

    Remove the internal (in-memory) data structures that describe +the index named P4 in database P1. This is called after an index +is dropped in order to keep the internal representation of the +schema consistent with what is on disk.

    DropTable

    Remove the internal (in-memory) data structures that describe +the table named P4 in database P1. This is called after a table +is dropped in order to keep the internal representation of the +schema consistent with what is on disk.

    DropTrigger

    Remove the internal (in-memory) data structures that describe +the trigger named P4 in database P1. This is called after a trigger +is dropped in order to keep the internal representation of the +schema consistent with what is on disk.

    Eq

    This works just like the Lt opcode except that the jump is taken if +the operands in registers P1 and P3 are equal. +See the Lt opcode for additional information.

    Expire

    Cause precompiled statements to become expired. An expired statement +fails with an error code of SQLITE_SCHEMA if it is ever executed +(via sqlite3_step()).

    + +

    If P1 is 0, then all SQL statements become expired. If P1 is non-zero, +then only the currently executing statement is affected.

    Found

    Register P3 holds a blob constructed by MakeRecord. P1 is an index. +If an entry that matches the value in register p3 exists in P1 then +jump to P2. If the P3 value does not match any entry in P1 +then fall thru. The P1 cursor is left pointing at the matching entry +if it exists.

    + +

    This instruction is used to implement the IN operator where the +left-hand side is a SELECT statement. P1 may be a true index, or it +may be a temporary index that holds the results of the SELECT +statement. This instruction is also used to implement the +DISTINCT keyword in SELECT statements.

    + +

    This instruction checks if index P1 contains a record for which +the first N serialized values exactly match the N serialized values +in the record in register P3, where N is the total number of values in +the P3 record (the P3 record is a prefix of the P1 record).

    + +

    See also: NotFound, IsUnique, NotExists

    Function

    Invoke a user function (P4 is a pointer to a Function structure that +defines the function) with P5 arguments taken from register P2 and +successors. The result of the function is stored in register P3. +Register P3 must not be one of the function inputs.

    + +

    P1 is a 32-bit bitmask indicating whether or not each argument to the +function was determined to be constant at compile time. If the first +argument was constant then bit 0 of P1 is set. This is used to determine +whether meta data associated with a user function argument using the +sqlite3_set_auxdata() API may be safely retained until the next +invocation of this opcode.

    + +

    See also: AggStep and AggFinal

    Ge

    This works just like the Lt opcode except that the jump is taken if +the content of register P3 is greater than or equal to the content of +register P1. See the Lt opcode for additional information.

    Gosub

    Write the current address onto register P1 +and then jump to address P2.

    Goto

    An unconditional jump to address P2. +The next instruction executed will be +the one at index P2 from the beginning of +the program.

    Gt

    This works just like the Lt opcode except that the jump is taken if +the content of register P3 is greater than the content of +register P1. See the Lt opcode for additional information.

    Halt

    Exit immediately. All open cursors, etc are closed +automatically.

    + +

    P1 is the result code returned by sqlite3_exec(), sqlite3_reset(), +or sqlite3_finalize(). For a normal halt, this should be SQLITE_OK (0). +For errors, it can be some other value. If P1!=0 then P2 will determine +whether or not to rollback the current transaction. Do not rollback +if P2==OE_Fail. Do the rollback if P2==OE_Rollback. If P2==OE_Abort, +then back out all changes that have occurred during this execution of the +VDBE, but do not rollback the transaction.

    + +

    If P4 is not null then it is an error message string.

    + +

    There is an implied "Halt 0 0 0" instruction inserted at the very end of +every program. So a jump past the last instruction of the program +is the same as executing Halt.

    HaltIfNull

    Check the value in register P3. If is is NULL then Halt using +parameter P1, P2, and P4 as if this were a Halt instruction. If the +value in register P3 is not NULL, then this routine is a no-op.

    IdxDelete

    The content of P3 registers starting at register P2 form +an unpacked index key. This opcode removes that entry from the +index opened by cursor P1.

    IdxGE

    The P4 register values beginning with P3 form an unpacked index +key that omits the ROWID. Compare this key value against the index +that P1 is currently pointing to, ignoring the ROWID on the P1 index.

    + +

    If the P1 index entry is greater than or equal to the key value +then jump to P2. Otherwise fall through to the next instruction.

    + +

    If P5 is non-zero then the key value is increased by an epsilon +prior to the comparison. This make the opcode work like IdxGT except +that if the key from register P3 is a prefix of the key in the cursor, +the result is false whereas it would be true with IdxGT.

    IdxInsert

    Register P2 holds a SQL index key made using the +MakeRecord instructions. This opcode writes that key +into the index P1. Data for the entry is nil.

    + +

    P3 is a flag that provides a hint to the b-tree layer that this +insert is likely to be an append.

    + +

    This instruction only works for indices. The equivalent instruction +for tables is OP_Insert.

    IdxLT

    The P4 register values beginning with P3 form an unpacked index +key that omits the ROWID. Compare this key value against the index +that P1 is currently pointing to, ignoring the ROWID on the P1 index.

    + +

    If the P1 index entry is less than the key value then jump to P2. +Otherwise fall through to the next instruction.

    + +

    If P5 is non-zero then the key value is increased by an epsilon prior +to the comparison. This makes the opcode work like IdxLE.

    IdxRowid

    Write into register P2 an integer which is the last entry in the record at +the end of the index key pointed to by cursor P1. This integer should be +the rowid of the table entry to which this index entry points.

    + +

    See also: Rowid, MakeRecord.

    If

    Jump to P2 if the value in register P1 is true. The value is +is considered true if it is numeric and non-zero. If the value +in P1 is NULL then take the jump if P3 is true.

    IfNeg

    If the value of register P1 is less than zero, jump to P2.

    + +

    It is illegal to use this instruction on a register that does +not contain an integer. An assertion fault will result if you try.

    IfNot

    Jump to P2 if the value in register P1 is False. The value is +is considered true if it has a numeric value of zero. If the value +in P1 is NULL then take the jump if P3 is true.

    IfPos

    If the value of register P1 is 1 or greater, jump to P2.

    + +

    It is illegal to use this instruction on a register that does +not contain an integer. An assertion fault will result if you try.

    IfZero

    If the value of register P1 is exactly 0, jump to P2.

    + +

    It is illegal to use this instruction on a register that does +not contain an integer. An assertion fault will result if you try.

    IncrVacuum

    Perform a single step of the incremental vacuum procedure on +the P1 database. If the vacuum has finished, jump to instruction +P2. Otherwise, fall through to the next instruction.

    Insert

    Write an entry into the table of cursor P1. A new entry is +created if it doesn't already exist or the data for an existing +entry is overwritten. The data is the value stored register +number P2. The key is stored in register P3. The key must +be an integer.

    + +

    If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is +incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set, +then rowid is stored for subsequent return by the +sqlite3_last_insert_rowid() function (otherwise it is unmodified).

    + +

    Parameter P4 may point to a string containing the table-name, or +may be NULL. If it is not NULL, then the update-hook +(sqlite3.xUpdateCallback) is invoked following a successful insert.

    + +

    (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically +allocated, then ownership of P2 is transferred to the pseudo-cursor +and register P2 becomes ephemeral. If the cursor is changed, the +value of register P2 will then change. Make sure this does not +cause any problems.)

    + +

    This instruction only works on tables. The equivalent instruction +for indices is OP_IdxInsert.

    Int64

    P4 is a pointer to a 64-bit integer value. +Write that value into register P2.

    Integer

    The 32-bit integer value P1 is written into register P2.

    IntegrityCk

    Do an analysis of the currently open database. Store in +register P1 the text of an error message describing any problems. +If no problems are found, store a NULL in register P1.

    + +

    The register P3 contains the maximum number of allowed errors. +At most reg(P3) errors will be reported. +In other words, the analysis stops as soon as reg(P1) errors are +seen. Reg(P1) is updated with the number of errors remaining.

    + +

    The root page numbers of all tables in the database are integer +stored in reg(P1), reg(P1+1), reg(P1+2), .... There are P2 tables +total.

    + +

    If P5 is not zero, the check is done on the auxiliary database +file, not the main database file.

    + +

    This opcode is used to implement the integrity_check pragma.

    IsNull

    Jump to P2 if the value in register P1 is NULL.

    IsUnique

    Cursor P1 is open on an index. So it has no data and its key consists +of a record generated by OP_MakeRecord where the last field is the +rowid of the entry that the index refers to.

    + +

    The P3 register contains an integer record number. Call this record +number R. Register P4 is the first in a set of N contiguous registers +that make up an unpacked index key that can be used with cursor P1. +The value of N can be inferred from the cursor. N includes the rowid +value appended to the end of the index record. This rowid value may +or may not be the same as R.

    + +

    If any of the N registers beginning with register P4 contains a NULL +value, jump immediately to P2.

    + +

    Otherwise, this instruction checks if cursor P1 contains an entry +where the first (N-1) fields match but the rowid value at the end +of the index entry is not R. If there is no such entry, control jumps +to instruction P2. Otherwise, the rowid of the conflicting index +entry is copied to register P3 and control falls through to the next +instruction.

    + +

    See also: NotFound, NotExists, Found

    Jump

    Jump to the instruction at address P1, P2, or P3 depending on whether +in the most recent OP_Compare instruction the P1 vector was less than +equal to, or greater than the P2 vector, respectively.

    Last

    The next use of the Rowid or Column or Next instruction for P1 +will refer to the last entry in the database table or index. +If the table or index is empty and P2>0, then jump immediately to P2. +If P2 is 0 or if the table or index is not empty, fall through +to the following instruction.

    Le

    This works just like the Lt opcode except that the jump is taken if +the content of register P3 is less than or equal to the content of +register P1. See the Lt opcode for additional information.

    LoadAnalysis

    Read the sqlite_stat1 table for database P1 and load the content +of that table into the internal index hash table. This will cause +the analysis to be used when preparing all subsequent queries.

    Lt

    Compare the values in register P1 and P3. If reg(P3)<reg(P1) then +jump to address P2.

    + +

    If the SQLITE_JUMPIFNULL bit of P5 is set and either reg(P1) or +reg(P3) is NULL then take the jump. If the SQLITE_JUMPIFNULL +bit is clear then fall thru if either operand is NULL.

    + +

    The SQLITE_AFF_MASK portion of P5 must be an affinity character - +SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made +to coerce both inputs according to this affinity before the +comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric +affinity is used. Note that the affinity conversions are stored +back into the input registers P1 and P3. So this opcode can cause +persistent changes to registers P1 and P3.

    + +

    Once any conversions have taken place, and neither value is NULL, +the values are compared. If both values are blobs then memcmp() is +used to determine the results of the comparison. If both values +are text, then the appropriate collating function specified in +P4 is used to do the comparison. If P4 is not specified then +memcmp() is used to compare text string. If both values are +numeric, then a numeric comparison is used. If the two values +are of different types, then numbers are considered less than +strings and strings are considered less than blobs.

    + +

    If the SQLITE_STOREP2 bit of P5 is set, then do not jump. Instead, +store a boolean result (either 0, or 1, or NULL) in register P2.

    MakeRecord

    Convert P2 registers beginning with P1 into a single entry +suitable for use as a data record in a database table or as a key +in an index. The details of the format are irrelevant as long as +the OP_Column opcode can decode the record later. +Refer to source code comments for the details of the record +format.

    + +

    P4 may be a string that is P2 characters long. The nth character of the +string indicates the column affinity that should be used for the nth +field of the index key.

    + +

    The mapping from character to affinity is given by the SQLITE_AFF_ +macros defined in sqliteInt.h.

    + +

    If P4 is NULL then all index fields have the affinity NONE.

    MemMax

    Set the value of register P1 to the maximum of its current value +and the value in register P2.

    + +

    This instruction throws an error if the memory cell is not initially +an integer.

    Move

    Move the values in register P1..P1+P3-1 over into +registers P2..P2+P3-1. Registers P1..P1+P1-1 are +left holding a NULL. It is an error for register ranges +P1..P1+P3-1 and P2..P2+P3-1 to overlap.

    Multiply

    Multiply the value in register P1 by the value in register P2 +and store the result in register P3. +If either input is NULL, the result is NULL.

    MustBeInt

    Force the value in register P1 to be an integer. If the value +in P1 is not an integer and cannot be converted into an integer +without data loss, then jump immediately to P2, or if P2==0 +raise an SQLITE_MISMATCH exception.

    Ne

    This works just like the Lt opcode except that the jump is taken if +the operands in registers P1 and P3 are not equal. See the Lt opcode for +additional information.

    NewRowid

    Get a new integer record number (a.k.a "rowid") used as the key to a table. +The record number is not previously used as a key in the database +table that cursor P1 points to. The new record number is written +written to register P2.

    + +

    If P3>0 then P3 is a register that holds the largest previously +generated record number. No new record numbers are allowed to be less +than this value. When this value reaches its maximum, a SQLITE_FULL +error is generated. The P3 register is updated with the generated +record number. This P3 mechanism is used to help implement the +AUTOINCREMENT feature.

    Next

    Advance cursor P1 so that it points to the next key/data pair in its +table or index. If there are no more key/value pairs then fall through +to the following instruction. But if the cursor advance was successful, +jump immediately to P2.

    + +

    The P1 cursor must be for a real table, not a pseudo-table.

    + +

    See also: Prev

    Noop

    Do nothing. This instruction is often useful as a jump +destination.

    Not

    Interpret the value in register P1 as a boolean value. Store the +boolean complement in register P2. If the value in register P1 is +NULL, then a NULL is stored in P2.

    NotExists

    Use the content of register P3 as a integer key. If a record +with that key does not exist in table of P1, then jump to P2. +If the record does exist, then fall thru. The cursor is left +pointing to the record if it exists.

    + +

    The difference between this operation and NotFound is that this +operation assumes the key is an integer and that P1 is a table whereas +NotFound assumes key is a blob constructed from MakeRecord and +P1 is an index.

    + +

    See also: Found, NotFound, IsUnique

    NotFound

    Register P3 holds a blob constructed by MakeRecord. P1 is +an index. If no entry exists in P1 that matches the blob then jump +to P2. If an entry does existing, fall through. The cursor is left +pointing to the entry that matches.

    + +

    See also: Found, NotExists, IsUnique

    NotNull

    Jump to P2 if the value in register P1 is not NULL.

    Null

    Write a NULL into register P2.

    NullRow

    Move the cursor P1 to a null row. Any OP_Column operations +that occur while the cursor is on the null row will always +write a NULL.

    OpenEphemeral

    Open a new cursor P1 to a transient table. +The cursor is always opened read/write even if +the main database is read-only. The transient or virtual +table is deleted automatically when the cursor is closed.

    + +

    P2 is the number of columns in the virtual table. +The cursor points to a BTree table if P4==0 and to a BTree index +if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure +that defines the format of keys in the index.

    + +

    This opcode was once called OpenTemp. But that created +confusion because the term "temp table", might refer either +to a TEMP table at the SQL level, or to a table opened by +this opcode. Then this opcode was call OpenVirtual. But +that created confusion with the whole virtual-table idea.

    OpenPseudo

    Open a new cursor that points to a fake table that contains a single +row of data. Any attempt to write a second row of data causes the +first row to be deleted. All data is deleted when the cursor is +closed.

    + +

    A pseudo-table created by this opcode is useful for holding the +NEW or OLD tables in a trigger. Also used to hold the a single +row output from the sorter so that the row can be decomposed into +individual columns using the OP_Column opcode.

    + +

    When OP_Insert is executed to insert a row in to the pseudo table, +the pseudo-table cursor may or may not make it's own copy of the +original row data. If P2 is 0, then the pseudo-table will copy the +original row data. Otherwise, a pointer to the original memory cell +is stored. In this case, the vdbe program must ensure that the +memory cell containing the row data is not overwritten until the +pseudo table is closed (or a new row is inserted into it).

    + +

    P3 is the number of fields in the records that will be stored by +the pseudo-table.

    OpenRead

    Open a read-only cursor for the database table whose root page is +P2 in a database file. The database file is determined by P3. +P3==0 means the main database, P3==1 means the database used for +temporary tables, and P3>1 means used the corresponding attached +database. Give the new cursor an identifier of P1. The P1 +values need not be contiguous but all P1 values should be small integers. +It is an error for P1 to be negative.

    + +

    If P5!=0 then use the content of register P2 as the root page, not +the value of P2 itself.

    + +

    There will be a read lock on the database whenever there is an +open cursor. If the database was unlocked prior to this instruction +then a read lock is acquired as part of this instruction. A read +lock allows other processes to read the database but prohibits +any other process from modifying the database. The read lock is +released when all cursors are closed. If this instruction attempts +to get a read lock but fails, the script terminates with an +SQLITE_BUSY error code.

    + +

    The P4 value may be either an integer (P4_INT32) or a pointer to +a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +structure, then said structure defines the content and collating +sequence of the index being opened. Otherwise, if P4 is an integer +value, it is set to the number of columns in the table.

    + +

    See also OpenWrite.

    OpenWrite

    Open a read/write cursor named P1 on the table or index whose root +page is P2. Or if P5!=0 use the content of register P2 to find the +root page.

    + +

    The P4 value may be either an integer (P4_INT32) or a pointer to +a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +structure, then said structure defines the content and collating +sequence of the index being opened. Otherwise, if P4 is an integer +value, it is set to the number of columns in the table, or to the +largest index of any column of the table that is actually used.

    + +

    This instruction works just like OpenRead except that it opens the cursor +in read/write mode. For a given table, there can be one or more read-only +cursors or a single read/write cursor but not both.

    + +

    See also OpenRead.

    Or

    Take the logical OR of the values in register P1 and P2 and +store the answer in register P3.

    + +

    If either P1 or P2 is nonzero (true) then the result is 1 (true) +even if the other input is NULL. A NULL and false or two NULLs +give a NULL output.

    Pagecount

    Write the current number of pages in database P1 to memory cell P2.

    ParseSchema

    Read and parse all entries from the SQLITE_MASTER table of database P1 +that match the WHERE clause P4. P2 is the "force" flag. Always do +the parsing if P2 is true. If P2 is false, then this routine is a +no-op if the schema is not currently loaded. In other words, if P2 +is false, the SQLITE_MASTER table is only parsed if the rest of the +schema is already loaded into the symbol table.

    + +

    This opcode invokes the parser to create a new virtual machine, +then runs the new virtual machine. It is thus a re-entrant opcode.

    Permutation

    Set the permutation used by the OP_Compare operator to be the array +of integers in P4.

    + +

    The permutation is only valid until the next OP_Permutation, OP_Compare, +OP_Halt, or OP_ResultRow. Typically the OP_Permutation should occur +immediately prior to the OP_Compare.

    Prev

    Back up cursor P1 so that it points to the previous key/data pair in its +table or index. If there is no previous key/value pairs then fall through +to the following instruction. But if the cursor backup was successful, +jump immediately to P2.

    + +

    The P1 cursor must be for a real table, not a pseudo-table.

    ReadCookie

    Read cookie number P3 from database P1 and write it into register P2. +P3==1 is the schema version. P3==2 is the database format. +P3==3 is the recommended pager cache size, and so forth. P1==0 is +the main database file and P1==1 is the database file used to store +temporary tables.

    + +

    There must be a read-lock on the database (either a transaction +must be started or there must be an open cursor) before +executing this instruction.

    Real

    P4 is a pointer to a 64-bit floating point value. +Write that value into register P2.

    RealAffinity

    If register P1 holds an integer convert it to a real value.

    + +

    This opcode is used when extracting information from a column that +has REAL affinity. Such column values may still be stored as +integers, for space efficiency, but after extraction we want them +to have only a real value.

    Remainder

    Compute the remainder after integer division of the value in +register P1 by the value in register P2 and store the result in P3. +If the value in register P2 is zero the result is NULL. +If either operand is NULL, the result is NULL.

    ResetCount

    This opcode resets the VMs internal change counter to 0. If P1 is true, +then the value of the change counter is copied to the database handle +change counter (returned by subsequent calls to sqlite3_changes()) +before it is reset. This is used by trigger programs.

    ResultRow

    The registers P1 through P1+P2-1 contain a single row of +results. This opcode causes the sqlite3_step() call to terminate +with an SQLITE_ROW return code and it sets up the sqlite3_stmt +structure to provide access to the top P1 values as the result +row.

    Return

    Jump to the next instruction after the address in register P1.

    Rewind

    The next use of the Rowid or Column or Next instruction for P1 +will refer to the first entry in the database table or index. +If the table or index is empty and P2>0, then jump immediately to P2. +If P2 is 0 or if the table or index is not empty, fall through +to the following instruction.

    RowData

    Write into register P2 the complete row data for cursor P1. +There is no interpretation of the data. +It is just copied onto the P2 register exactly as +it is found in the database file.

    + +

    If the P1 cursor must be pointing to a valid row (not a NULL row) +of a real table, not a pseudo-table.

    Rowid

    Store in register P2 an integer which is the key of the table entry that +P1 is currently point to.

    + +

    P1 can be either an ordinary table or a virtual table. There used to +be a separate OP_VRowid opcode for use with virtual tables, but this +one opcode now works for both table types.

    RowKey

    Write into register P2 the complete row key for cursor P1. +There is no interpretation of the data. +The key is copied onto the P3 register exactly as +it is found in the database file.

    + +

    If the P1 cursor must be pointing to a valid row (not a NULL row) +of a real table, not a pseudo-table.

    RowSetAdd

    Insert the integer value held by register P2 into a boolean index +held in register P1.

    + +

    An assertion fails if P2 is not an integer.

    RowSetRead

    Extract the smallest value from boolean index P1 and put that value into +register P3. Or, if boolean index P1 is initially empty, leave P3 +unchanged and jump to instruction P2.

    RowSetTest

    Register P3 is assumed to hold a 64-bit integer value. If register P1 +contains a RowSet object and that RowSet object contains +the value held in P3, jump to register P2. Otherwise, insert the +integer in P3 into the RowSet and continue on to the +next opcode.

    + +

    The RowSet object is optimized for the case where successive sets +of integers, where each set contains no duplicates. Each set +of values is identified by a unique P4 value. The first set +must have P4==0, the final set P4=-1. P4 must be either -1 or +non-negative. For non-negative values of P4 only the lower 4 +bits are significant.

    + +

    This allows optimizations: (a) when P4==0 there is no need to test +the rowset object for P3, as it is guaranteed not to contain it, +(b) when P4==-1 there is no need to insert the value, as it will +never be tested for, and (c) when a value that is part of set X is +inserted, there is no need to search to see if the same value was +previously inserted as part of set X (only if it was previously +inserted as part of some other set).

    Savepoint

    Open, release or rollback the savepoint named by parameter P4, depending +on the value of P1. To open a new savepoint, P1==0. To release (commit) an +existing savepoint, P1==1, or to rollback an existing savepoint P1==2.

    SCopy

    Make a shallow copy of register P1 into register P2.

    + +

    This instruction makes a shallow copy of the value. If the value +is a string or blob, then the copy is only a pointer to the +original and hence if the original changes so will the copy. +Worse, if the original is deallocated, the copy becomes invalid. +Thus the program must guarantee that the original will not change +during the lifetime of the copy. Use OP_Copy to make a complete +copy.

    Seek

    P1 is an open table cursor and P2 is a rowid integer. Arrange +for P1 to move so that it points to the rowid given by P2.

    + +

    This is actually a deferred seek. Nothing actually happens until +the cursor is used to read a record. That way, if no reads +occur, no unnecessary I/O happens.

    SeekGe

    If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +use the value in register P3 as the key. If cursor P1 refers +to an SQL index, then P3 is the first in an array of P4 registers +that are used as an unpacked index key.

    + +

    Reposition cursor P1 so that it points to the smallest entry that +is greater than or equal to the key value. If there are no records +greater than or equal to the key and P2 is not zero, then jump to P2.

    + +

    See also: Found, NotFound, Distinct, SeekLt, SeekGt, SeekLe

    SeekGt

    If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +use the value in register P3 as a key. If cursor P1 refers +to an SQL index, then P3 is the first in an array of P4 registers +that are used as an unpacked index key.

    + +

    Reposition cursor P1 so that it points to the smallest entry that +is greater than the key value. If there are no records greater than +the key and P2 is not zero, then jump to P2.

    + +

    See also: Found, NotFound, Distinct, SeekLt, SeekGe, SeekLe

    SeekLe

    If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +use the value in register P3 as a key. If cursor P1 refers +to an SQL index, then P3 is the first in an array of P4 registers +that are used as an unpacked index key.

    + +

    Reposition cursor P1 so that it points to the largest entry that +is less than or equal to the key value. If there are no records +less than or equal to the key and P2 is not zero, then jump to P2.

    + +

    See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLt

    SeekLt

    If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +use the value in register P3 as a key. If cursor P1 refers +to an SQL index, then P3 is the first in an array of P4 registers +that are used as an unpacked index key.

    + +

    Reposition cursor P1 so that it points to the largest entry that +is less than the key value. If there are no records less than +the key and P2 is not zero, then jump to P2.

    + +

    See also: Found, NotFound, Distinct, SeekGt, SeekGe, SeekLe

    Sequence

    Find the next available sequence number for cursor P1. +Write the sequence number into register P2. +The sequence number on the cursor is incremented after this +instruction.

    SetCookie

    Write the content of register P3 (interpreted as an integer) +into cookie number P2 of database P1. P2==1 is the schema version. +P2==2 is the database format. P2==3 is the recommended pager cache +size, and so forth. P1==0 is the main database file and P1==1 is the +database file used to store temporary tables.

    + +

    A transaction must be started before executing this opcode.

    SetNumColumns

    This opcode sets the number of columns for the cursor opened by the +following instruction to P2.

    + +

    An OP_SetNumColumns is only useful if it occurs immediately before +one of the following opcodes:

    + +

    OpenRead +OpenWrite +OpenPseudo

    + +

    If the OP_Column opcode is to be executed on a cursor, then +this opcode must be present immediately before the opcode that +opens the cursor.

    ShiftLeft

    Shift the integer value in register P2 to the left by the +number of bits specified by the integer in regiser P1. +Store the result in register P3. +If either input is NULL, the result is NULL.

    ShiftRight

    Shift the integer value in register P2 to the right by the +number of bits specified by the integer in register P1. +Store the result in register P3. +If either input is NULL, the result is NULL.

    Sort

    This opcode does exactly the same thing as OP_Rewind except that +it increments an undocumented global variable used for testing.

    + +

    Sorting is accomplished by writing records into a sorting index, +then rewinding that index and playing it back from beginning to +end. We use the OP_Sort opcode instead of OP_Rewind to do the +rewinding so that the global variable will be incremented and +regression tests can determine whether or not the optimizer is +correctly optimizing out sorts.

    Statement

    Begin an individual statement transaction which is part of a larger +transaction. This is needed so that the statement +can be rolled back after an error without having to roll back the +entire transaction. The statement transaction will automatically +commit when the VDBE halts.

    + +

    If the database connection is currently in autocommit mode (that +is to say, if it is in between BEGIN and COMMIT) +and if there are no other active statements on the same database +connection, then this operation is a no-op. No statement transaction +is needed since any error can use the normal ROLLBACK process to +undo changes.

    + +

    If a statement transaction is started, then a statement journal file +will be allocated and initialized.

    + +

    The statement is begun on the database file with index P1. The main +database file has an index of 0 and the file used for temporary tables +has an index of 1.

    String

    The string value P4 of length P1 (bytes) is stored in register P2.

    String8

    P4 points to a nul terminated UTF-8 string. This opcode is transformed +into an OP_String before it is executed for the first time.

    Subtract

    Subtract the value in register P1 from the value in register P2 +and store the result in register P3. +If either input is NULL, the result is NULL.

    TableLock

    Obtain a lock on a particular table. This instruction is only used when +the shared-cache feature is enabled.

    + +

    If P1 is the index of the database in sqlite3.aDb[] of the database +on which the lock is acquired. A readlock is obtained if P3==0 or +a write lock if P3==1.

    + +

    P2 contains the root-page of the table to lock.

    + +

    P4 contains a pointer to the name of the table being locked. This is only +used to generate an error message if the lock cannot be obtained.

    ToBlob

    Force the value in register P1 to be a BLOB. +If the value is numeric, convert it to a string first. +Strings are simply reinterpreted as blobs with no change +to the underlying data.

    + +

    A NULL value is not changed by this routine. It remains NULL.

    ToInt

    Force the value in register P1 be an integer. If +The value is currently a real number, drop its fractional part. +If the value is text or blob, try to convert it to an integer using the +equivalent of atoi() and store 0 if no such conversion is possible.

    + +

    A NULL value is not changed by this routine. It remains NULL.

    ToNumeric

    Force the value in register P1 to be numeric (either an +integer or a floating-point number.) +If the value is text or blob, try to convert it to an using the +equivalent of atoi() or atof() and store 0 if no such conversion +is possible.

    + +

    A NULL value is not changed by this routine. It remains NULL.

    ToReal

    Force the value in register P1 to be a floating point number. +If The value is currently an integer, convert it. +If the value is text or blob, try to convert it to an integer using the +equivalent of atoi() and store 0.0 if no such conversion is possible.

    + +

    A NULL value is not changed by this routine. It remains NULL.

    ToText

    Force the value in register P1 to be text. +If the value is numeric, convert it to a string using the +equivalent of printf(). Blob values are unchanged and +are afterwards simply interpreted as text.

    + +

    A NULL value is not changed by this routine. It remains NULL.

    Trace

    If tracing is enabled (by the sqlite3_trace()) interface, then +the UTF-8 string contained in P4 is emitted on the trace callback.

    Transaction

    Begin a transaction. The transaction ends when a Commit or Rollback +opcode is encountered. Depending on the ON CONFLICT setting, the +transaction might also be rolled back if an error is encountered.

    + +

    P1 is the index of the database file on which the transaction is +started. Index 0 is the main database file and index 1 is the +file used for temporary tables. Indices of 2 or more are used for +attached databases.

    + +

    If P2 is non-zero, then a write-transaction is started. A RESERVED lock is +obtained on the database file when a write-transaction is started. No +other process can start another write transaction while this transaction is +underway. Starting a write transaction also creates a rollback journal. A +write transaction must be started before any changes can be made to the +database. If P2 is 2 or greater then an EXCLUSIVE lock is also obtained +on the file.

    + +

    If P2 is zero, then a read-lock is obtained on the database file.

    Vacuum

    Vacuum the entire database. This opcode will cause other virtual +machines to be created and run. It may not be called from within +a transaction.

    Variable

    Transfer the values of bound parameters P1..P1+P3-1 into registers +P2..P2+P3-1.

    + +

    If the parameter is named, then its name appears in P4 and P3==1. +The P4 value is used by sqlite3_bind_parameter_name().

    VBegin

    P4 may be a pointer to an sqlite3_vtab structure. If so, call the +xBegin method for that table.

    + +

    Also, whether or not P4 is set, check that this is not being called from +within a callback to a virtual table xSync() method. If it is, the error +code will be set to SQLITE_LOCKED.

    VColumn

    Store the value of the P2-th column of +the row of the virtual-table that the +P1 cursor is pointing to into register P3.

    VCreate

    P4 is the name of a virtual table in database P1. Call the xCreate method +for that table.

    VDestroy

    P4 is the name of a virtual table in database P1. Call the xDestroy method +of that table.

    VerifyCookie

    Check the value of global database parameter number 0 (the +schema version) and make sure it is equal to P2. +P1 is the database number which is 0 for the main database file +and 1 for the file holding temporary tables and some higher number +for auxiliary databases.

    + +

    The cookie changes its value whenever the database schema changes. +This operation is used to detect when that the cookie has changed +and that the current process needs to reread the schema.

    + +

    Either a transaction needs to have been started or an OP_Open needs +to be executed (to establish a read lock) before this opcode is +invoked.

    VFilter

    P1 is a cursor opened using VOpen. P2 is an address to jump to if +the filtered result set is empty.

    + +

    P4 is either NULL or a string that was generated by the xBestIndex +method of the module. The interpretation of the P4 string is left +to the module implementation.

    + +

    This opcode invokes the xFilter method on the virtual table specified +by P1. The integer query plan parameter to xFilter is stored in register +P3. Register P3+1 stores the argc parameter to be passed to the +xFilter method. Registers P3+2..P3+1+argc are the argc +additional parameters which are passed to +xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter.

    + +

    A jump is made to P2 if the result set after filtering would be empty.

    VNext

    Advance virtual table P1 to the next row in its result set and +jump to instruction P2. Or, if the virtual table has reached +the end of its result set, then fall through to the next instruction.

    VOpen

    P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +P1 is a cursor number. This opcode opens a cursor to the virtual +table and stores that cursor in P1.

    VRename

    P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +This opcode invokes the corresponding xRename method. The value +in register P1 is passed as the zName argument to the xRename method.

    VUpdate

    P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +This opcode invokes the corresponding xUpdate method. P2 values +are contiguous memory cells starting at P3 to pass to the xUpdate +invocation. The value in register (P3+P2-1) corresponds to the +p2th element of the argv array passed to xUpdate.

    + +

    The xUpdate method will do a DELETE or an INSERT or both. +The argv[0] element (which corresponds to memory cell P3) +is the rowid of a row to delete. If argv[0] is NULL then no +deletion occurs. The argv[1] element is the rowid of the new +row. This can be NULL to have the virtual table select the new +rowid for itself. The subsequent elements in the array are +the values of columns in the new row.

    + +

    If P2==1 then no insert is performed. argv[0] is the rowid of +a row to delete.

    + +

    P1 is a boolean flag. If it is set to true and the xUpdate call +is successful, then the value returned by sqlite3_last_insert_rowid() +is set to the value of the rowid for the row just inserted.

    Yield

    Swap the program counter with the value in register P1.

    +
    +This page last modified 2008/10/03 02:14:01 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/opcode.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/opcode.tcl --- sqlite3-3.4.2/www/opcode.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/opcode.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,243 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: opcode.tcl,v 1.15 2005/03/09 12:26:51 danielk1977 Exp $} -source common.tcl -header {SQLite Virtual Machine Opcodes} -puts { -

    SQLite Virtual Machine Opcodes

    -} - -set fd [open [lindex $argv 0] r] -set file [read $fd [file size [lindex $argv 0]]] -close $fd -set current_op {} -foreach line [split $file \n] { - set line [string trim $line] - if {[string index $line 1]!="*"} { - set current_op {} - continue - } - if {[regexp {^/\* Opcode: } $line]} { - set current_op [lindex $line 2] - set txt [lrange $line 3 end] - regsub -all {>} $txt {\>} txt - regsub -all {<} $txt {\<} txt - set Opcode($current_op:args) $txt - lappend OpcodeList $current_op - continue - } - if {$current_op==""} continue - if {[regexp {^\*/} $line]} { - set current_op {} - continue - } - set line [string trim [string range $line 3 end]] - if {$line==""} { - append Opcode($current_op:text) \n

    - } else { - regsub -all {>} $line {\>} line - regsub -all {<} $line {\<} line - append Opcode($current_op:text) \n$line - } -} -unset file - -puts { -

    Introduction

    - -

    In order to execute an SQL statement, the SQLite library first parses -the SQL, analyzes the statement, then generates a short program to execute -the statement. The program is generated for a "virtual machine" implemented -by the SQLite library. This document describes the operation of that -virtual machine.

    - -

    This document is intended as a reference, not a tutorial. -A separate Virtual Machine Tutorial is -available. If you are looking for a narrative description -of how the virtual machine works, you should read the tutorial -and not this document. Once you have a basic idea of what the -virtual machine does, you can refer back to this document for -the details on a particular opcode. -Unfortunately, the virtual machine tutorial was written for -SQLite version 1.0. There are substantial changes in the virtual -machine for version 2.0 and the document has not been updated. -

    - -

    The source code to the virtual machine is in the vdbe.c source -file. All of the opcode definitions further down in this document are -contained in comments in the source file. In fact, the opcode table -in this document -was generated by scanning the vdbe.c source file -and extracting the necessary information from comments. So the -source code comments are really the canonical source of information -about the virtual machine. When in doubt, refer to the source code.

    - -

    Each instruction in the virtual machine consists of an opcode and -up to three operands named P1, P2 and P3. P1 may be an arbitrary -integer. P2 must be a non-negative integer. P2 is always the -jump destination in any operation that might cause a jump. -P3 is a null-terminated -string or NULL. Some operators use all three operands. Some use -one or two. Some operators use none of the operands.

    - -

    The virtual machine begins execution on instruction number 0. -Execution continues until (1) a Halt instruction is seen, or -(2) the program counter becomes one greater than the address of -last instruction, or (3) there is an execution error. -When the virtual machine halts, all memory -that it allocated is released and all database cursors it may -have had open are closed. If the execution stopped due to an -error, any pending transactions are terminated and changes made -to the database are rolled back.

    - -

    The virtual machine also contains an operand stack of unlimited -depth. Many of the opcodes use operands from the stack. See the -individual opcode descriptions for details.

    - -

    The virtual machine can have zero or more cursors. Each cursor -is a pointer into a single table or index within the database. -There can be multiple cursors pointing at the same index or table. -All cursors operate independently, even cursors pointing to the same -indices or tables. -The only way for the virtual machine to interact with a database -file is through a cursor. -Instructions in the virtual -machine can create a new cursor (Open), read data from a cursor -(Column), advance the cursor to the next entry in the table -(Next) or index (NextIdx), and many other operations. -All cursors are automatically -closed when the virtual machine terminates.

    - -

    The virtual machine contains an arbitrary number of fixed memory -locations with addresses beginning at zero and growing upward. -Each memory location can hold an arbitrary string. The memory -cells are typically used to hold the result of a scalar SELECT -that is part of a larger expression.

    - -

    The virtual machine contains a single sorter. -The sorter is able to accumulate records, sort those records, -then play the records back in sorted order. The sorter is used -to implement the ORDER BY clause of a SELECT statement.

    - -

    The virtual machine contains a single "List". -The list stores a list of integers. The list is used to hold the -rowids for records of a database table that needs to be modified. -The WHERE clause of an UPDATE or DELETE statement scans through -the table and writes the rowid of every record to be modified -into the list. Then the list is played back and the table is modified -in a separate step.

    - -

    The virtual machine can contain an arbitrary number of "Sets". -Each set holds an arbitrary number of strings. Sets are used to -implement the IN operator with a constant right-hand side.

    - -

    The virtual machine can open a single external file for reading. -This external read file is used to implement the COPY command.

    - -

    Finally, the virtual machine can have a single set of aggregators. -An aggregator is a device used to implement the GROUP BY clause -of a SELECT. An aggregator has one or more slots that can hold -values being extracted by the select. The number of slots is the -same for all aggregators and is defined by the AggReset operation. -At any point in time a single aggregator is current or "has focus". -There are operations to read or write to memory slots of the aggregator -in focus. There are also operations to change the focus aggregator -and to scan through all aggregators.

    - -

    Viewing Programs Generated By SQLite

    - -

    Every SQL statement that SQLite interprets results in a program -for the virtual machine. But if you precede the SQL statement with -the keyword "EXPLAIN" the virtual machine will not execute the -program. Instead, the instructions of the program will be returned -like a query result. This feature is useful for debugging and -for learning how the virtual machine operates.

    - -

    You can use the sqlite command-line tool to see the -instructions generated by an SQL statement. The following is -an example:

    } - -proc Code {body} { - puts {
    } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
    \n body - puts $body - puts {
    } -} - -Code { -$ (((sqlite ex1))) -sqlite> (((.explain))) -sqlite> (((explain delete from tbl1 where two<20;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ---------------------------------------- -0 Transaction 0 0 -1 VerifyCookie 219 0 -2 ListOpen 0 0 -3 Open 0 3 tbl1 -4 Rewind 0 0 -5 Next 0 12 -6 Column 0 1 -7 Integer 20 0 -8 Ge 0 5 -9 Recno 0 0 -10 ListWrite 0 0 -11 Goto 0 5 -12 Close 0 0 -13 ListRewind 0 0 -14 OpenWrite 0 3 -15 ListRead 0 19 -16 MoveTo 0 0 -17 Delete 0 0 -18 Goto 0 15 -19 ListClose 0 0 -20 Commit 0 0 -} - -puts { -

    All you have to do is add the "EXPLAIN" keyword to the front of the -SQL statement. But if you use the ".explain" command to sqlite -first, it will set up the output mode to make the program more easily -viewable.

    - -

    If sqlite has been compiled without the "-DNDEBUG=1" option -(that is, with the NDEBUG preprocessor macro not defined) then you -can put the SQLite virtual machine in a mode where it will trace its -execution by writing messages to standard output. The non-standard -SQL "PRAGMA" comments can be used to turn tracing on and off. To -turn tracing on, enter: -

    - -
    -PRAGMA vdbe_trace=on;
    -
    - -

    -You can turn tracing back off by entering a similar statement but -changing the value "on" to "off".

    - -

    The Opcodes

    -} - -puts "

    There are currently [llength $OpcodeList] opcodes defined by -the virtual machine." -puts {All currently defined opcodes are described in the table below. -This table was generated automatically by scanning the source code -from the file vdbe.c.

    } - -puts { -

    -} -foreach op [lsort -dictionary $OpcodeList] { - puts {" -} -puts {
    Opcode NameDescription
    } - puts "$op" - puts "[string trim $Opcode($op:text)]

    } -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/optimizer.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/optimizer.tcl --- sqlite3-3.4.2/www/optimizer.tcl 2005-08-30 23:44:06.000000000 +0100 +++ sqlite3-3.6.16/www/optimizer.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,265 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: optimizer.tcl,v 1.1 2005/08/30 22:44:06 drh Exp $} -source common.tcl -header {The SQLite Query Optimizer} - -proc CODE {text} { - puts "
    "
    -  puts $text
    -  puts "
    " -} -proc IMAGE {name {caption {}}} { - puts "
    " - if {$caption!=""} { - puts "
    $caption" - } - puts "
    " -} -proc PARAGRAPH {text} { - puts "

    $text

    \n" -} -proc HEADING {level name} { - puts "$name" -} - -HEADING 1 {The SQLite Query Optimizer} - -PARAGRAPH { - This article describes how the SQLite query optimizer works. - This is not something you have to know in order to use SQLite - many - programmers use SQLite successfully without the slightest hint of what - goes on in the inside. - But a basic understanding of what SQLite is doing - behind the scenes will help you to write more efficient SQL. And the - knowledge gained by studying the SQLite query optimizer has broad - application since most other relational database engines operate - similarly. - A solid understanding of how the query optimizer works is also - required before making meaningful changes or additions to the SQLite, so - this article should be read closely by anyone aspiring - to hack the source code. -} - -HEADING 2 Background - -PARAGRAPH { - It is important to understand that SQL is a programming language. - SQL is a perculiar programming language in that it - describes what the programmer wants to compute not how - to compute it as most other programming languages do. - But perculiar or not, SQL is still just a programming language. -} - -PARAGRAPH { - It is very helpful to think of each SQL statement as a separate - program. - An important job of the SQL database engine is to translate each - SQL statement from its descriptive form that specifies what the - information is desired (the what) - into a procedural form that specifies how to go - about acquiring the desired information (the how). - The task of translating the what into a - how is assigned to the query optimizer. -} - -PARAGRAPH { - The beauty of SQL comes from the fact that the optimizer frees the programmer - from having to worry over the details of how. The programmer - only has to specify the what and then leave the optimizer - to deal with all of the minutae of implementing the - how. Thus the programmer is able to think and work at a - much higher level and leave the optimizer to stress over the low-level - work. -} - -HEADING 2 {Database Layout} - -PARAGRAPH { - An SQLite database consists of one or more "b-trees". - Each b-tree contains zero or more "rows". - A single row contains a "key" and some "data". - In general, both the key and the data are arbitrary binary - data of any length. - The keys must all be unique within a single b-tree. - Rows are stored in order of increasing key values - each - b-tree has a comparision functions for keys that determines - this order. -} - -PARAGRAPH { - In SQLite, each SQL table is stored as a b-tree where the - key is a 64-bit integer and the data is the content of the - table row. The 64-bit integer key is the ROWID. And, of course, - if the table has an INTEGER PRIMARY KEY, then that integer is just - an alias for the ROWID. -} - -PARAGRAPH { - Consider the following block of SQL code: -} - -CODE { - CREATE TABLE ex1( - id INTEGER PRIMARY KEY, - x VARCHAR(30), - y INTEGER - ); - INSERT INTO ex1 VALUES(NULL,'abc',12345); - INSERT INTO ex1 VALUES(NULL,456,'def'); - INSERT INTO ex1 VALUES(100,'hello','world'); - INSERT INTO ex1 VALUES(-5,'abc','xyz'); - INSERT INTO ex1 VALUES(54321,NULL,987); -} - -PARAGRAPH { - This code generates a new b-tree (named "ex1") containing 5 rows. - This table can be visualized as follows: -} -IMAGE table-ex1b2.gif - -PARAGRAPH { - Note that the key for each row if the b-tree is the INTEGER PRIMARY KEY - for that row. (Remember that the INTEGER PRIMARY KEY is just an alias - for the ROWID.) The other fields of the table form the data for each - entry in the b-tree. Note also that the b-tree entries are in ROWID order - which is different from the order that they were originally inserted. -} - -PARAGRAPH { - Now consider the following SQL query: -} -CODE { - SELECT y FROM ex1 WHERE x=456; -} - -PARAGRAPH { - When the SQLite parser and query optimizer are handed this query, they - have to translate it into a procedure that will find the desired result. - In this case, they do what is call a "full table scan". They start - at the beginning of the b-tree that contains the table and visit each - row. Within each row, the value of the "x" column is tested and when it - is found to match 456, the value of the "y" column is output. - We can represent this procedure graphically as follows: -} -IMAGE fullscanb.gif - -PARAGRAPH { - A full table scan is the access method of last resort. It will always - work. But if the table contains millions of rows and you are only looking - a single one, it might take a very long time to find the particular row - you are interested in. - In particular, the time needed to access a single row of the table is - proportional to the total number of rows in the table. - So a big part of the job of the optimizer is to try to find ways to - satisfy the query without doing a full table scan. -} -PARAGRAPH { - The usual way to avoid doing a full table scan is use a binary search - to find the particular row or rows of interest in the table. - Consider the next query which searches on rowid instead of x: -} -CODE { - SELECT y FROM ex1 WHERE rowid=2; -} - -PARAGRAPH { - In the previous query, we could not use a binary search for x because - the values of x were not ordered. But the rowid values are ordered. - So instead of having to visit every row of the b-tree looking for one - that has a rowid value of 2, we can do a binary search for that particular - row and output its corresponding y value. We show this graphically - as follows: -} -IMAGE direct1b.gif - -PARAGRAPH { - When doing a binary search, we only have to look at a number of - rows with is proportional to the logorithm of the number of entries - in the table. For a table with just 5 entires as in the example above, - the difference between a full table scan and a binary search is - negligible. In fact, the full table scan might be faster. But in - a database that has 5 million rows, a binary search will be able to - find the desired row in only about 23 tries, whereas the full table - scan will need to look at all 5 million rows. So the binary search - is about 200,000 times faster in that case. -} -PARAGRAPH { - A 200,000-fold speed improvement is huge. So we always want to do - a binary search rather than a full table scan when we can. -} -PARAGRAPH { - The problem with a binary search is that the it only works if the - fields you are search for are in sorted order. So we can do a binary - search when looking up the rowid because the rows of the table are - sorted by rowid. But we cannot use a binary search when looking up - x because the values in the x column are in no particular order. -} -PARAGRAPH { - The way to work around this problem and to permit binary searching on - fields like x is to provide an index. - An index is another b-tree. - But in the index b-tree the key is not the rowid but rather the field - or fields being indexed followed by the rowid. - The data in an index b-tree is empty - it is not needed or used. - The following diagram shows an index on the x field of our example table: -} -IMAGE index-ex1-x-b.gif - -PARAGRAPH { - An important point to note in the index are that they keys of the - b-tree are in sorted order. (Recall that NULL values in SQLite sort - first, followed by numeric values in numerical order, then strings, and - finally BLOBs.) This is the property that will allow use to do a - binary search for the field x. The rowid is also included in every - key for two reasons. First, by including the rowid we guarantee that - every key will be unique. And second, the rowid will be used to look - up the actual table entry after doing the binary search. Finally, note - that the data portion of the index b-tree serves no purpose and is thus - kept empty to save space in the disk file. -} -PARAGRAPH { - Remember what the original query example looked like: -} -CODE { - SELECT y FROM ex1 WHERE x=456; -} - -PARAGRAPH { - The first time this query was encountered we had to do a full table - scan. But now that we have an index on x, we can do a binary search - on that index for the entry where x==456. Then from that entry we - can find the rowid value and use the rowid to look up the corresponding - entry in the original table. From the entry in the original table, - we can find the value y and return it as our result. The following - diagram shows this process graphically: -} -IMAGE indirect1b1.gif - -PARAGRAPH { - With the index, we are able to look up an entry based on the value of - x after visiting only a logorithmic number of b-tree entries. Unlike - the case where we were searching using rowid, we have to do two binary - searches for each output row. But for a 5-million row table, that is - still only 46 searches instead of 5 million for a 100,000-fold speedup. -} - -HEADING 3 {Parsing The WHERE Clause} - - - -# parsing the where clause -# rowid lookup -# index lookup -# index lookup without the table -# how an index is chosen -# joins -# join reordering -# order by using an index -# group by using an index -# OR -> IN optimization -# Bitmap indices -# LIKE and GLOB optimization -# subquery flattening -# MIN and MAX optimizations diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/optimizing.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/optimizing.tcl --- sqlite3-3.4.2/www/optimizing.tcl 2005-01-17 03:42:52.000000000 +0000 +++ sqlite3-3.6.16/www/optimizing.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -set rcsid {$Id: optimizing.tcl,v 1.1 2005/01/17 03:42:52 drh Exp $} -source common.tcl -header {Hints For Optimizing Queries In SQLite} -proc section {level tag name} { - incr level - if {$level>6} {set level 6} - puts "\n"" - puts "$name\n" -} -section 1 recompile {Recompile the library for optimal performance} -section 2 avoidtrans {Minimize the number of transactions} -section 3 usebind {Use sqlite3_bind to insert large chunks of data} -section 4 useindices {Use appropriate indices} -section 5 recordjoin {Reorder the tables in a join} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/optoverview.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/optoverview.html --- sqlite3-3.4.2/www/optoverview.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/optoverview.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,536 @@ + + +The SQLite Query Optimizer Overview + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    The SQLite Query Optimizer Overview

    + This document provides a terse overview of how the query optimizer + for SQLite works. This is not a tutorial. The reader is likely to + need some prior knowledge of how database engines operate + in order to fully understand this text. +

    + +

    1.0 WHERE clause analysis

    + The WHERE clause on a query is broken up into "terms" where each term + is separated from the others by an AND operator. +

    +

    + All terms of the WHERE clause are analyzed to see if they can be + satisfied using indices. + Terms that cannot be satisfied through the use of indices become + tests that are evaluated against each row of the relevant input + tables. No tests are done for terms that are completely satisfied by + indices. Sometimes + one or more terms will provide hints to indices but still must be + evaluated against each row of the input tables. +

    +

    + The analysis of a term might cause new "virtual" terms to + be added to the WHERE clause. Virtual terms can be used with + indices to restrict a search. But virtual terms never generate code + that is tested against input rows. +

    +

    + To be usable by an index a term must be of one of the following + forms: +

    +
    
    +  column = expression
    +  column > expression
    +  column >= expression
    +  column < expression
    +  column <= expression
    +  expression = column
    +  expression > column
    +  expression >= column
    +  expression < column
    +  expression <= column
    +  column IN (expression-list)
    +  column IN (subquery)
    +  column IS NULL
    +

    + If an index is created using a statement like this: +

    +
    +  CREATE INDEX idx_ex1 ON ex1(a,b,c,d,e,...,y,z);
    +

    + Then the index might be used if the initial columns of the index + (columns a, b, and so forth) appear in WHERE clause terms. + All index columns must be used with + the = or IN operators except for + the right-most column which can use inequalities. For the right-most + column of an index that is used, there can be up to two inequalities + that must sandwich the allowed values of the column between two extremes. +

    +

    + It is not necessary for every column of an index to appear in a + WHERE clause term in order for that index to be used. + But there can not be gaps in the columns of the index that are used. + Thus for the example index above, if there is no WHERE clause term + that constraints column c, then terms that constraint columns a and b can + be used with the index but not terms that constraint columns d through z. + Similarly, no index column will be used (for indexing purposes) + that is to the right of a + column that is constrained only by inequalities. + For the index above and WHERE clause like this: +

    +
    +  ... WHERE a=5 AND b IN (1,2,3) AND c>12 AND d='hello'
    +

    + Only columns a, b, and c of the index would be usable. The d column + would not be usable because it occurs to the right of c and c is + constrained only by inequalities. +

    + +

    2.0 The BETWEEN optimization

    + If a term of the WHERE clause is of the following form: +

    +
    
    +  expr1 BETWEEN expr2 AND expr3
    +

    + Then two virtual terms are added as follows: +

    +
    
    +  expr1 >= expr2 AND expr1 <= expr3
    +

    + If both virtual terms end up being used as constraints on an index, + then the original BETWEEN term is omitted and the corresponding test + is not performed on input rows. + Thus if the BETWEEN term ends up being used as an index constraint + no tests are ever performed on that term. + On the other hand, the + virtual terms themselves never causes tests to be performed on + input rows. + Thus if the BETWEEN term is not used as an index constraint and + instead must be used to test input rows, the expr1 expression is + only evaluated once. +

    + +

    3.0 The OR optimization

    + If a term consists of multiple subterms containing a common column + name and separated by OR, like this: +

    +
    
    +  column = expr1 OR column = expr2 OR column = expr3 OR ...
    +

    + Then the term is rewritten as follows: +

    +
    
    +  column IN (expr1,expr2,expr3,expr4,...)
    +

    + The rewritten term then might go on to constraint an index using the + normal rules for IN operators. + Note that column must be the same column in every OR-connected subterm, + although the column can occur on either the left or the right side of + the = operator. +

    +

    + Suppose the OR clause consists of multiple subterms as follows: +

    +
    
    +  expr1 OR expr2 OR expr3
    +

    + If every subterm of an OR clause is separately indexable and the + transformation to an IN operator described above does not apply, + then the OR clause is coded so that it logically works the same as + the following: +

    +
    
    +  rowid IN (SELECT rowid FROM table WHERE expr1
    +            UNION SELECT rowid FROM table WHERE expr2
    +            UNION SELECT rowid FROM table WHERE expr3)
    +

    + The implemention of the OR clause does not really use subqueries. + A more efficient internal mechanism is employed. The implementation + also works even for tables where the "rowid" column name has been + overloaded for other uses and no longer refers to the real rowid. + But the essence of the implementation is captured by the statement + above: Separate indices are used to find rowids that satisfy each + subterm of the OR clause and then the union of those rowids is used + to find all matching rows in the database. +

    + +

    4.0 The LIKE optimization

    + Terms that are composed of the LIKE or GLOB operator + can sometimes be used to constrain indices. + There are many conditions on this use: +

    +

    +

      +
    1. The left-hand side of the LIKE or GLOB operator must be the name + of an indexed column.
    2. +
    3. The right-hand side of the LIKE or GLOB must be a string literal + that does not begin with a wildcard character.
    4. +
    5. The ESCAPE clause cannot appear on the LIKE operator.
    6. +
    7. The build-in functions used to implement LIKE and GLOB must not + have been overloaded using the sqlite3_create_function() API.
    8. +
    9. For the GLOB operator, the column must use the default BINARY + collating sequence.
    10. +
    11. For the LIKE operator, if case_sensitive_like mode is enabled then + the column must use the default BINARY collating sequence, or if + case_sensitive_like mode is disabled then the column must use the + built-in NOCASE collating sequence.
    12. +
    +

    +

    + The LIKE operator has two modes that can be set by a pragma. The + default mode is for LIKE comparisons to be insensitive to differences + of case for latin1 characters. Thus, by default, the following + expression is true: +

    +
    +  'a' LIKE 'A'
    +

    + By turning on the case_sensitive_like pragma as follows: +

    +
    +  PRAGMA case_sensitive_like=ON;
    +

    + Then the LIKE operator pays attention to case and the example above would + evaluate to false. Note that case insensitivity only applies to + latin1 characters - basically the upper and lower case letters of English + in the lower 127 byte codes of ASCII. International character sets + are case sensitive in SQLite unless a user-supplied collating + sequence is used. But if you employ a user-supplied collating sequence, + the LIKE optimization described here will never be taken. +

    +

    + The LIKE operator is case insensitive by default because this is what + the SQL standard requires. You can change the default behavior at + compile time by using the -DSQLITE_CASE_SENSITIVE_LIKE command-line option + to the compiler. +

    +

    + The LIKE optimization might occur if the column named on the left of the + operator uses the BINARY collating sequence (which is the default) and + case_sensitive_like is turned on. Or the optimization might occur if + the column uses the built-in NOCASE collating sequence and the + case_sensitive_like mode is off. These are the only two combinations + under which LIKE operators will be optimized. If the column on the + right-hand side of the LIKE operator uses any collating sequence other + than the built-in BINARY and NOCASE collating sequences, then no optimizations + will ever be attempted on the LIKE operator. +

    +

    + The GLOB operator is always case sensitive. The column on the left side + of the GLOB operator must always use the built-in BINARY collating sequence + or no attempt will be made to optimize that operator with indices. +

    +

    + The right-hand side of the GLOB or LIKE operator must be a literal string + value that does not begin with a wildcard. If the right-hand side is a + parameter that is bound to a string, then no optimization is attempted. + If the right-hand side begins with a wildcard character then no + optimization is attempted. +

    +

    + Suppose the initial sequence of non-wildcard characters on the right-hand + side of the LIKE or GLOB operator is x. We are using a single + character to denote this non-wildcard prefix but the reader should + understand that the prefix can consist of more than 1 character. + Let y the smallest string that is the same length as /x/ but which + compares greater than x. For example, if x is hello then + y would be hellp. + The LIKE and GLOB optimizations consist of adding two virtual terms + like this: +

    +
    
    +  column >= x AND column < y
    +

    + Under most circumstances, the original LIKE or GLOB operator is still + tested against each input row even if the virtual terms are used to + constrain an index. This is because we do not know what additional + constraints may be imposed by characters to the right + of the x prefix. However, if there is only a single global wildcard + to the right of x, then the original LIKE or GLOB test is disabled. + In other words, if the pattern is like this: +

    +
    
    +  column LIKE x%
    +  column GLOB x*
    +

    + Then the original LIKE or GLOB tests are disabled when the virtual + terms constrain an index because in that case we know that all of the + rows selected by the index will pass the LIKE or GLOB test. +

    + +

    5.0 Joins

    + The current implementation of + SQLite uses only loop joins. That is to say, joins are implemented as + nested loops. +

    +

    + The default order of the nested loops in a join is for the left-most + table in the FROM clause to form the outer loop and the right-most + table to form the inner loop. + However, SQLite will nest the loops in a different order if doing so + will help it to select better indices. +

    +

    + Inner joins can be freely reordered. However a left outer join is + neither commutative nor associative and hence will not be reordered. + Inner joins to the left and right of the outer join might be reordered + if the optimizer thinks that is advantageous but the outer joins are + always evaluated in the order in which they occur. +

    +

    + When selecting the order of tables in a join, SQLite uses a greedy + algorithm that runs in polynomial time. +

    +

    + The ON and USING clauses of a join are converted into additional + terms of the WHERE clause prior to WHERE clause analysis described + above in paragraph 1.0. Thus + with SQLite, there is no advantage to use the newer SQL92 join syntax + over the older SQL89 comma-join syntax. They both end up accomplishing + exactly the same thing. +

    +

    + Join reordering is automatic and usually works well enough that + programmer do not have to think about it. But occasionally some + hints from the programmer are needed. For a description of when + hints might be necessary and how to provide those hints, see the + QueryPlans + page in the Wiki. +

    + +

    6.0 Choosing between multiple indices

    + Each table in the FROM clause of a query can use at most one index, + and SQLite strives to use at least one index on each table. Sometimes, + two or more indices might be candidates for use on a single table. + For example: +

    +
    +  CREATE TABLE ex2(x,y,z);
    +  CREATE INDEX ex2i1 ON ex2(x);
    +  CREATE INDEX ex2i2 ON ex2(y);
    +  SELECT z FROM ex2 WHERE x=5 AND y=6;
    +

    + For the SELECT statement above, the optimizer can use the ex2i1 index + to lookup rows of ex2 that contain x=5 and then test each row against + the y=6 term. Or it can use the ex2i2 index to lookup rows + of ex2 that contain y=6 then test each of those rows against the + x=5 term. +

    +

    + When faced with a choice of two or more indices, SQLite tries to estimate + the total amount of work needed to perform the query using each option. + It then selects the option that gives the least estimated work. +

    +

    + To help the optimizer get a more accurate estimate of the work involved + in using various indices, the user may optionally run the ANALYZE command. + The ANALYZE command scans all indices of database where there might + be a choice between two or more indices and gathers statistics on the + selectiveness of those indices. The results of this scan are stored + in the sqlite_stat1 table. + The contents of the sqlite_stat1 table are not updated as the database + changes so after making significant changes it might be prudent to + rerun ANALYZE. + The results of an ANALYZE command are only available to database connections + that are opened after the ANALYZE command completes. +

    +

    + Once created, the sqlite_stat1 table cannot be dropped. But its + content can be viewed, modified, or erased. Erasing the entire content + of the sqlite_stat1 table has the effect of undoing the ANALYZE command. + Changing the content of the sqlite_stat1 table can get the optimizer + deeply confused and cause it to make silly index choices. Making + updates to the sqlite_stat1 table (except by running ANALYZE) is + not recommended. +

    +

    + Terms of the WHERE clause can be manually disqualified for use with + indices by prepending a unary + operator to the column name. The + unary + is a no-op and will not slow down the evaluation of the test + specified by the term. + But it will prevent the term from constraining an index. + So, in the example above, if the query were rewritten as: +

    +
    +  SELECT z FROM ex2 WHERE +x=5 AND y=6;
    +

    + The + operator on the x column will prevent that term from + constraining an index. This would force the use of the ex2i2 index. +

    +

    + Note that the unary + operator also removes + type affinity from + an expression, and in some cases this can cause subtle changes in + the meaning of an expression. + In the example above, + if column x has TEXT affinity + then the comparison "x=5" will be done as text. But the + operator + removes the affinity. So the comparison "+x=5" will compare the text + in column x with the numeric value 5 and will always be false. +

    + +

    7.0 Avoidance of table lookups

    + When doing an indexed lookup of a row, the usual procedure is to + do a binary search on the index to find the index entry, then extract + the rowid from the index and use that rowid to do a binary search on + the original table. Thus a typical indexed lookup involves two + binary searches. + If, however, all columns that were to be fetched from the table are + already available in the index itself, SQLite will use the values + contained in the index and will never look up the original table + row. This saves one binary search for each row and can make many + queries run twice as fast. +

    + +

    8.0 ORDER BY optimizations

    + SQLite attempts to use an index to satisfy the ORDER BY clause of a + query when possible. + When faced with the choice of using an index to satisfy WHERE clause + constraints or satisfying an ORDER BY clause, SQLite does the same + work analysis described in section 6.0 + and chooses the index that it believes will result in the fastest answer. + +

    + +

    9.0 Subquery flattening

    + When a subquery occurs in the FROM clause of a SELECT, the default + behavior is to evaluate the subquery into a transient table, then run + the outer SELECT against the transient table. + This is problematic since the transient table will not have any indices + and the outer query (which is likely a join) will be forced to do a + full table scan on the transient table. +

    +

    + To overcome this problem, SQLite attempts to flatten subqueries in + the FROM clause of a SELECT. + This involves inserting the FROM clause of the subquery into the + FROM clause of the outer query and rewriting expressions in + the outer query that refer to the result set of the subquery. + For example: +

    +
    +  SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5
    +

    + Would be rewritten using query flattening as: +

    +
    +  SELECT x+y AS a FROM t1 WHERE z<100 AND a>5
    +

    + There is a long list of conditions that must all be met in order for + query flattening to occur. +

    +

    +

      +
    1. The subquery and the outer query do not both use aggregates.
    2. +
    3. The subquery is not an aggregate or the outer query is not a join.
    4. +
    5. The subquery is not the right operand of a left outer join, or + the subquery is not itself a join.
    6. +
    7. The subquery is not DISTINCT or the outer query is not a join.
    8. +
    9. The subquery is not DISTINCT or the outer query does not use + aggregates.
    10. +
    11. The subquery does not use aggregates or the outer query is not + DISTINCT.
    12. +
    13. The subquery has a FROM clause.
    14. +
    15. The subquery does not use LIMIT or the outer query is not a join.
    16. +
    17. The subquery does not use LIMIT or the outer query does not use + aggregates.
    18. +
    19. The subquery does not use aggregates or the outer query does not + use LIMIT.
    20. +
    21. The subquery and the outer query do not both have ORDER BY clauses.
    22. +
    23. The subquery is not the right term of a LEFT OUTER JOIN or the + subquery has no WHERE clause.
    24. +
    +

    +

    + The proof that query flattening may safely occur if all of the the + above conditions are met is left as an exercise to the reader. +

    +

    + Query flattening is an important optimization when views are used as + each use of a view is translated into a subquery. +

    + +

    10.0 The MIN/MAX optimization

    + Queries of the following forms will be optimized to run in logarithmic + time assuming appropriate indices exist: +

    +
    +  SELECT MIN(x) FROM table;
    +  SELECT MAX(x) FROM table;
    +

    + In order for these optimizations to occur, they must appear in exactly + the form shown above - changing only the name of the table and column. + It is not permissible to add a WHERE clause or do any arithmetic on the + result. The result set must contain a single column. + The column in the MIN or MAX function must be an indexed column. +

    + +
    +This page last modified 2009/03/19 00:04:36 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/optoverview.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/optoverview.tcl --- sqlite3-3.4.2/www/optoverview.tcl 2006-12-15 21:21:28.000000000 +0000 +++ sqlite3-3.6.16/www/optoverview.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,516 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: optoverview.tcl,v 1.5 2005/11/24 13:15:34 drh Exp $} -source common.tcl -header {The SQLite Query Optimizer Overview} - -proc CODE {text} { - puts "
    "
    -  puts $text
    -  puts "
    " -} -proc SYNTAX {text} { - puts "
    " -} -proc IMAGE {name {caption {}}} { - puts "
    " - if {$caption!=""} { - puts "
    $caption" - } - puts "
    " -} -proc PARAGRAPH {text} { - # regsub -all "/(\[a-zA-Z0-9\]+)/" $text {\1} t2 - regsub -all "\\*(\[^\n*\]+)\\*" $text {\1} t3 - puts "

    $t3

    \n" -} -set level(0) 0 -set level(1) 0 -proc HEADING {n name {tag {}}} { - if {$tag!=""} { - puts "" - } - global level - incr level($n) - for {set i [expr {$n+1}]} {$i<10} {incr i} { - set level($i) 0 - } - if {$n==0} { - set num {} - } elseif {$n==1} { - set num $level(1).0 - } else { - set num $level(1) - for {set i 2} {$i<=$n} {incr i} { - append num .$level($i) - } - } - incr n 1 - puts "$num $name" -} - -HEADING 0 {The SQLite Query Optimizer Overview} - -PARAGRAPH { - This document provides a terse overview of how the query optimizer - for SQLite works. This is not a tutorial. The reader is likely to - need some prior knowledge of how database engines operate - in order to fully understand this text. -} - -HEADING 1 {WHERE clause analysis} where_clause - -PARAGRAPH { - The WHERE clause on a query is broken up into "terms" where each term - is separated from the others by an AND operator. -} -PARAGRAPH { - All terms of the WHERE clause are analyzed to see if they can be - satisfied using indices. - Terms that cannot be satisfied through the use of indices become - tests that are evaluated against each row of the relevant input - tables. No tests are done for terms that are completely satisfied by - indices. Sometimes - one or more terms will provide hints to indices but still must be - evaluated against each row of the input tables. -} - -PARAGRAPH { - The analysis of a term might cause new "virtual" terms to - be added to the WHERE clause. Virtual terms can be used with - indices to restrict a search. But virtual terms never generate code - that is tested against input rows. -} - -PARAGRAPH { - To be usable by an index a term must be of one of the following - forms: -} -SYNTAX { - /column/ = /expression/ - /column/ > /expression/ - /column/ >= /expression/ - /column/ < /expression/ - /column/ <= /expression/ - /expression/ = /column/ - /expression/ > /column/ - /expression/ >= /column/ - /expression/ < /column/ - /expression/ <= /column/ - /column/ IN (/expression-list/) - /column/ IN (/subquery/) -} -PARAGRAPH { - If an index is created using a statement like this: -} -CODE { - CREATE INDEX idx_ex1 ON ex1(a,b,c,d,e,...,y,z); -} -PARAGRAPH { - Then the index might be used if the initial columns of the index - (columns a, b, and so forth) appear in WHERE clause terms. - All index columns must be used with - the *=* or *IN* operators except for - the right-most column which can use inequalities. For the right-most - column of an index that is used, there can be up to two inequalities - that must sandwich the allowed values of the column between two extremes. -} -PARAGRAPH { - It is not necessary for every column of an index to appear in a - WHERE clause term in order for that index to be used. - But there can not be gaps in the columns of the index that are used. - Thus for the example index above, if there is no WHERE clause term - that constraints column c, then terms that constraint columns a and b can - be used with the index but not terms that constraint columns d through z. - Similarly, no index column will be used (for indexing purposes) - that is to the right of a - column that is constrained only by inequalities. - For the index above and WHERE clause like this: -} -CODE { - ... WHERE a=5 AND b IN (1,2,3) AND c>12 AND d='hello' -} -PARAGRAPH { - Only columns a, b, and c of the index would be usable. The d column - would not be usable because it occurs to the right of c and c is - constrained only by inequalities. -} - -HEADING 1 {The BETWEEN optimization} between_opt - -PARAGRAPH { - If a term of the WHERE clause is of the following form: -} -SYNTAX { - /expr1/ BETWEEN /expr2/ AND /expr3/ -} -PARAGRAPH { - Then two virtual terms are added as follows: -} -SYNTAX { - /expr1/ >= /expr2/ AND /expr1/ <= /expr3/ -} -PARAGRAPH { - If both virtual terms end up being used as constraints on an index, - then the original BETWEEN term is omitted and the corresponding test - is not performed on input rows. - Thus if the BETWEEN term ends up being used as an index constraint - no tests are ever performed on that term. - On the other hand, the - virtual terms themselves never causes tests to be performed on - input rows. - Thus if the BETWEEN term is not used as an index constraint and - instead must be used to test input rows, the expr1 expression is - only evaluated once. -} - -HEADING 1 {The OR optimization} or_opt - -PARAGRAPH { - If a term consists of multiple subterms containing a common column - name and separated by OR, like this: -} -SYNTAX { - /column/ = /expr1/ OR /column/ = /expr2/ OR /column/ = /expr3/ OR ... -} -PARAGRAPH { - Then the term is rewritten as follows: -} -SYNTAX { - /column/ IN (/expr1/,/expr2/,/expr3/,/expr4/,...) -} -PARAGRAPH { - The rewritten term then might go on to constraint an index using the - normal rules for *IN* operators. - Note that column must be the same column in every OR-connected subterm, - although the column can occur on either the left or the right side of - the *=* operator. -} - -HEADING 1 {The LIKE optimization} like_opt - -PARAGRAPH { - Terms that are composed of the LIKE or GLOB operator - can sometimes be used to constrain indices. - There are many conditions on this use: -} -PARAGRAPH { -
      -
    1. The left-hand side of the LIKE or GLOB operator must be the name - of an indexed column.
    2. -
    3. The right-hand side of the LIKE or GLOB must be a string literal - that does not begin with a wildcard character.
    4. -
    5. The ESCAPE clause cannot appear on the LIKE operator.
    6. -
    7. The build-in functions used to implement LIKE and GLOB must not - have been overloaded using the sqlite3_create_function() API.
    8. -
    9. For the GLOB operator, the column must use the default BINARY - collating sequence.
    10. -
    11. For the LIKE operator, if case_sensitive_like mode is enabled then - the column must use the default BINARY collating sequence, or if - case_sensitive_like mode is disabled then the column must use the - built-in NOCASE collating sequence.
    12. -
    -} -PARAGRAPH { - The LIKE operator has two modes that can be set by a pragma. The - default mode is for LIKE comparisons to be insensitive to differences - of case for latin1 characters. Thus, by default, the following - expression is true: -} -CODE { - 'a' LIKE 'A' -} -PARAGRAPH { - By turned on the case_sensitive_like pragma as follows: -} -CODE { - PRAGMA case_sensitive_like=ON; -} -PARAGRAPH { - Then the LIKE operator pays attention to case and the example above would - evaluate to false. Note that case insensitivity only applies to - latin1 characters - basically the upper and lower case letters of English - in the lower 127 byte codes of ASCII. International character sets - are case sensitive in SQLite unless a user-supplied collating - sequence is used. But if you employ a user-supplied collating sequence, - the LIKE optimization describe here will never be taken. -} -PARAGRAPH { - The LIKE operator is case insensitive by default because this is what - the SQL standard requires. You can change the default behavior at - compile time by using the -DSQLITE_CASE_SENSITIVE_LIKE command-line option - to the compiler. -} -PARAGRAPH { - The LIKE optimization might occur if the column named on the left of the - operator uses the BINARY collating sequence (which is the default) and - case_sensitive_like is turned on. Or the optimization might occur if - the column uses the built-in NOCASE collating sequence and the - case_sensitive_like mode is off. These are the only two combinations - under which LIKE operators will be optimized. If the column on the - right-hand side of the LIKE operator uses any collating sequence other - than the built-in BINARY and NOCASE collating sequences, then no optimizations - will ever be attempted on the LIKE operator. -} -PARAGRAPH { - The GLOB operator is always case sensitive. The column on the left side - of the GLOB operator must always use the built-in BINARY collating sequence - or no attempt will be made to optimize that operator with indices. -} -PARAGRAPH { - The right-hand side of the GLOB or LIKE operator must be a literal string - value that does not begin with a wildcard. If the right-hand side is a - parameter that is bound to a string, then no optimization is attempted. - If the right-hand side begins with a wildcard character then no - optimization is attempted. -} -PARAGRAPH { - Suppose the initial sequence of non-wildcard characters on the right-hand - side of the LIKE or GLOB operator is x. We are using a single - character to denote this non-wildcard prefix but the reader should - understand that the prefix can consist of more than 1 character. - Let y the smallest string that is the same length as /x/ but which - compares greater than x. For example, if x is *hello* then - y would be *hellp*. - The LIKE and GLOB optimizations consist of adding two virtual terms - like this: -} -SYNTAX { - /column/ >= /x/ AND /column/ < /y/ -} -PARAGRAPH { - Under most circumstances, the original LIKE or GLOB operator is still - tested against each input row even if the virtual terms are used to - constrain an index. This is because we do not know what additional - constraints may be imposed by characters to the right - of the x prefix. However, if there is only a single global wildcard - to the right of x, then the original LIKE or GLOB test is disabled. - In other words, if the pattern is like this: -} -SYNTAX { - /column/ LIKE /x/% - /column/ GLOB /x/* -} -PARAGRAPH { - Then the original LIKE or GLOB tests are disabled when the virtual - terms constrain an index because in that case we know that all of the - rows selected by the index will pass the LIKE or GLOB test. -} - -HEADING 1 {Joins} joins - -PARAGRAPH { - The current implementation of - SQLite uses only loop joins. That is to say, joins are implemented as - nested loops. -} -PARAGRAPH { - The default order of the nested loops in a join is for the left-most - table in the FROM clause to form the outer loop and the right-most - table to form the inner loop. - However, SQLite will nest the loops in a different order if doing so - will help it to select better indices. -} -PARAGRAPH { - Inner joins can be freely reordered. However a left outer join is - neither commutative nor associative and hence will not be reordered. - Inner joins to the left and right of the outer join might be reordered - if the optimizer thinks that is advantageous but the outer joins are - always evaluated in the order in which they occur. -} -PARAGRAPH { - When selecting the order of tables in a join, SQLite uses a greedy - algorithm that runs in polynomial time. -} -PARAGRAPH { - The ON and USING clauses of a join are converted into additional - terms of the WHERE clause prior to WHERE clause analysis described - above in paragraph 1.0. Thus - with SQLite, there is no advantage to use the newer SQL92 join syntax - over the older SQL89 comma-join syntax. They both end up accomplishing - exactly the same thing. -} -PARAGRAPH { - Join reordering is automatic and usually works well enough that - programmer do not have to think about it. But occasionally some - hints from the programmer are needed. For a description of when - hints might be necessary and how to provide those hints, see the -
    QueryPlans - page in the Wiki. -} - -HEADING 1 {Choosing between multiple indices} multi_index - -PARAGRAPH { - Each table in the FROM clause of a query can use at most one index, - and SQLite strives to use at least one index on each table. Sometimes, - two or more indices might be candidates for use on a single table. - For example: -} -CODE { - CREATE TABLE ex2(x,y,z); - CREATE INDEX ex2i1 ON ex2(x); - CREATE INDEX ex2i2 ON ex2(y); - SELECT z FROM ex2 WHERE x=5 AND y=6; -} -PARAGRAPH { - For the SELECT statement above, the optimizer can use the ex2i1 index - to lookup rows of ex2 that contain x=5 and then test each row against - the y=6 term. Or it can use the ex2i2 index to lookup rows - of ex2 that contain y=6 then test each of those rows against the - x=5 term. -} -PARAGRAPH { - When faced with a choice of two or more indices, SQLite tries to estimate - the total amount of work needed to perform the query using each option. - It then selects the option that gives the least estimated work. -} -PARAGRAPH { - To help the optimizer get a more accurate estimate of the work involved - in using various indices, the user may optional run the ANALYZE command. - The ANALYZE command scans all indices of database where there might - be a choice between two or more indices and gathers statistics on the - selectiveness of those indices. The results of this scan are stored - in the sqlite_stat1 table. - The contents of the sqlite_stat1 table are not updated as the database - changes so after making significant changes it might be prudent to - rerun ANALYZE. - The results of an ANALYZE command are only available to database connections - that are opened after the ANALYZE command completes. -} -PARAGRAPH { - Once created, the sqlite_stat1 table cannot be dropped. But its - content can be viewed, modified, or erased. Erasing the entire content - of the sqlite_stat1 table has the effect of undoing the ANALYZE command. - Changing the content of the sqlite_stat1 table can get the optimizer - deeply confused and cause it to make silly index choices. Making - updates to the sqlite_stat1 table (except by running ANALYZE) is - not recommended. -} -PARAGRAPH { - Terms of the WHERE clause can be manually disqualified for use with - indices by prepending a unary *+* operator to the column name. The - unary *+* is a no-op and will not slow down the evaluation of the test - specified by the term. - But it will prevent the term from constraining an index. - So, in the example above, if the query were rewritten as: -} -CODE { - SELECT z FROM ex2 WHERE +x=5 AND y=6; -} -PARAGRAPH { - The *+* operator on the *x* column would prevent that term from - constraining an index. This would force the use of the ex2i2 index. -} - -HEADING 1 {Avoidance of table lookups} index_only - -PARAGRAPH { - When doing an indexed lookup of a row, the usual procedure is to - do a binary search on the index to find the index entry, then extract - the rowid from the index and use that rowid to do a binary search on - the original table. Thus a typical indexed lookup involves two - binary searches. - If, however, all columns that were to be fetched from the table are - already available in the index itself, SQLite will use the values - contained in the index and will never look up the original table - row. This saves one binary search for each row and can make many - queries run twice as fast. -} - -HEADING 1 {ORDER BY optimizations} order_by - -PARAGRAPH { - SQLite attempts to use an index to satisfy the ORDER BY clause of a - query when possible. - When faced with the choice of using an index to satisfy WHERE clause - constraints or satisfying an ORDER BY clause, SQLite does the same - work analysis described in section 6.0 - and chooses the index that it believes will result in the fastest answer. - -} - -HEADING 1 {Subquery flattening} flattening - -PARAGRAPH { - When a subquery occurs in the FROM clause of a SELECT, the default - behavior is to evaluate the subquery into a transient table, then run - the outer SELECT against the transient table. - This is problematic since the transient table will not have any indices - and the outer query (which is likely a join) will be forced to do a - full table scan on the transient table. -} -PARAGRAPH { - To overcome this problem, SQLite attempts to flatten subqueries in - the FROM clause of a SELECT. - This involves inserting the FROM clause of the subquery into the - FROM clause of the outer query and rewriting expressions in - the outer query that refer to the result set of the subquery. - For example: -} -CODE { - SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 -} -PARAGRAPH { - Would be rewritten using query flattening as: -} -CODE { - SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 -} -PARAGRAPH { - There is a long list of conditions that must all be met in order for - query flattening to occur. -} -PARAGRAPH { -
      -
    1. The subquery and the outer query do not both use aggregates.
    2. -
    3. The subquery is not an aggregate or the outer query is not a join.
    4. -
    5. The subquery is not the right operand of a left outer join, or - the subquery is not itself a join.
    6. -
    7. The subquery is not DISTINCT or the outer query is not a join.
    8. -
    9. The subquery is not DISTINCT or the outer query does not use - aggregates.
    10. -
    11. The subquery does not use aggregates or the outer query is not - DISTINCT.
    12. -
    13. The subquery has a FROM clause.
    14. -
    15. The subquery does not use LIMIT or the outer query is not a join.
    16. -
    17. The subquery does not use LIMIT or the outer query does not use - aggregates.
    18. -
    19. The subquery does not use aggregates or the outer query does not - use LIMIT.
    20. -
    21. The subquery and the outer query do not both have ORDER BY clauses.
    22. -
    23. The subquery is not the right term of a LEFT OUTER JOIN or the - subquery has no WHERE clause.
    24. -
    -} -PARAGRAPH { - The proof that query flattening may safely occur if all of the the - above conditions are met is left as an exercise to the reader. -} -PARAGRAPH { - Query flattening is an important optimization when views are used as - each use of a view is translated into a subquery. -} - -HEADING 1 {The MIN/MAX optimization} minmax - -PARAGRAPH { - Queries of the following forms will be optimized to run in logarithmic - time assuming appropriate indices exist: -} -CODE { - SELECT MIN(x) FROM table; - SELECT MAX(x) FROM table; -} -PARAGRAPH { - In order for these optimizations to occur, they must appear in exactly - the form shown above - changing only the name of the table and column. - It is not permissible to add a WHERE clause or do any arithmetic on the - result. The result set must contain a single column. - The column in the MIN or MAX function must be an indexed column. -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/pragma.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/pragma.html --- sqlite3-3.4.2/www/pragma.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/pragma.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,954 @@ + + +Pragma statements supported by SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + +

    The PRAGMA statement is a SQL extension specific to SQLite and used to +modify the operation of the SQLite library or to query the SQLite library for +internal (non-table) data. The PRAGMA statement is issued using the same +interface as other SQLite commands (e.g. SELECT, INSERT) but is +different in the following important respects: +

    +
      +
    • Specific pragma statements may be removed and others added in future + releases of SQLite. There is no guarantee of backwards compatiblity. +
    • No error messages are generated if an unknown pragma is issued. + Unknown pragmas are simply ignored. This means if there is a typo in + a pragma statement the library does not inform the user of the fact. +
    • Some pragmas take effect during the SQL compilation stage, not the + execution stage. This means if using the C-language sqlite3_prepare(), + sqlite3_step(), sqlite3_finalize() API (or similar in a wrapper + interface), the pragma may run during the sqlite3_prepare() call, + not during the sqlite3_step() call as normal SQL statements do. + Or the pragma might run during sqlite3_step() just like normal + SQL statements. Whether or not the pragma runs during sqlite3_prepare() + or sqlite3_step() depends on the pragma and on the specific release + of SQLite. +
    • The pragma command is specific to SQLite and is very unlikely + to be compatible with any other SQL database engine. +
    + +

    The available pragmas fall into four basic categories:

    + + + +
    +

    PRAGMA command syntax

    +

    pragma-stmt:

    +

    pragma-value:

    + + +

    +A pragma can take either zero or one argument. The argument is may be either +in parentheses or it may be separated from the pragma name by an equal sign. +The two syntaxes yield identical results. +In many pragmas, the argument is a boolean. The boolean can be one of: +

    + +
    +1 yes true on
    0 no false off
    +
    + +

    Keyword arguments can optionally appear in quotes. +(Example: 'yes' &91;FALSE&93;.) Some pragmas +takes a string literal as their argument. When pragma takes a keyword +argument, it will usually also take a numeric equivalent as well. +For example, "0" and "no" mean the same thing, as does "1" and "yes". +When querying the value of a setting, many pragmas return the number +rather than the keyword.

    + +

    A pragma may have an optional database name before the pragma name. +The database name is the name of an ATTACH-ed database. Or it can be +"main" or "temp" for the main and the TEMP databases. If the optional +database name is omitted, "main" is assumed. In some pragmas, the database +name is meaningless and is simply ignored.

    + + +
    +

    Pragmas to modify library operation

    + + + +
      + + +
    • PRAGMA auto_vacuum;
      + PRAGMA auto_vacuum =
      + 0 | NONE | 1 | FULL | 2 | INCREMENTAL;

      + +

      Query or set the auto-vacuum status in the database.

      + +

      The default setting for auto-vacuum is 0 or "none", + unless the SQLITE_DEFAULT_AUTOVACUUM compile-time option is used. + The "none" setting means that auto-vacuum is disabled. + When auto-vacuum is disabled and data is deleted data from a database, + the database file remains the same size. Unused database file + pages are added to a "freelist" and reused for subsequent inserts. So + no database file space is lost. However, the database file does not + shrink. In this mode the VACUUM + command can be used to rebuild the entire database file and + thus reclaim unused disk space.

      + +

      When the auto-vacuum mode is 1 or "full", the freelist pages are + moved to the end of the database file and the database file is truncated + to remove the freelist pages at every transaction commit. + Note, however, that auto-vacuum only truncates the freelist pages + from the file. Auto-vacuum does not defragment the database nor + repack individual database pages the way that the + VACUUM command does. In fact, because + it moves pages around within the file, auto-vacuum can actually + make fragmentation worse.

      + +

      Auto-vacuuming is only possible if the database stores some + additional information that allows each database page to be + traced backwards to its referer. Therefore, auto-vacuuming must + be turned on before any tables are created. It is not possible + to enable or disable auto-vacuum after a table has been created.

      + +

      When the value of auto-vacuum is 2 or "incremental" then the additional + information needed to do auto-vacuuming is stored in the database file + but auto-vacuuming does not occur automatically at each commit as it + does with auto_vacuum=full. In incremental mode, the separate + incremental_vacuum pragma must + be invoked to cause the auto-vacuum to occur.

      + +

      The database connection can be changed between full and incremental + autovacuum mode at any time. However, the connection can only be changed + "none" to "full" or "incremental" when the database is empty (no tables + have yet been created) or by running the VACUUM command. To + change auto-vacuum modes, first use the auto_vacuum pragma to set + the new desired mode, then invoke the VACUUM command to + reorganize the entire database file. To change from "full" or + "incremental" back to "none" always requires running VACUUM even + on an empty database. +

      + +

      When the auto_vacuum pragma is invoked with no arguments, it + returns the current auto_vacuum mode.

      +
    • + + + +
    • PRAGMA cache_size; +
      PRAGMA cache_size =
      Number-of-pages;

      +

      Query or change the suggested maximum number of database disk pages + that SQLite will hold in memory at once per open database file. Whether + or not this suggestion is honored is at the discretion of the + Application Defined Page Cache. The default + page cache implemention that is built into SQLite does honor the + suggestion if it can, but alternative page caches implementations + set by the application at run-time may choose to ignore this suggestion. + The default suggested cache size is 2000.

      + +

      When you change the cache size using the cache_size pragma, the + change only endures for the current session. The cache size reverts + to the default value when the database is closed and reopened. Use + the default_cache_size + pragma to check the cache size permanently.

    • + + + +
    • PRAGMA case_sensitive_like = boolean;

      +

      The default behavior of the LIKE operator is to ignore case + for ASCII characters. Hence, by default 'a' LIKE 'A' is + true. The case_sensitive_like pragma installs a new application-defined + LIKE function that can change + this behavior. When case_sensitive_like is enabled, + 'a' LIKE 'A' is false but 'a' LIKE 'a' is still true.

      +
    • + + + +
    • PRAGMA count_changes; +
      PRAGMA count_changes =
      boolean;

      +

      Query or change the count-changes flag. Normally, when the + count-changes flag is not set, INSERT, UPDATE and DELETE statements + return no data. When count-changes is set, each of these commands + returns a single row of data consisting of one integer value - the + number of rows inserted, modified or deleted by the command. The + returned change count does not include any insertions, modifications + or deletions performed by triggers.

      + +

      Another way to get the row change counts is to use the + sqlite3_changes() or sqlite3_total_changes() interfaces. + There is a subtle different, though. When an INSERT, UPDATE, or + DELETE is run against a view using an INSTEAD OF trigger, + the count_changes pragma reports the number of rows in the view + that fired the trigger, whereas sqlite3_changes() and + sqlite3_total_changes() do not.

      + + + +
    • PRAGMA default_cache_size; +
      PRAGMA default_cache_size =
      Number-of-pages;

      +

      This pragma queries or sets the suggested maximum number of pages + of disk cache that will be allocated per open database file. + The difference between this pragma and cache_size is that the + value set here persists across database connections. +

    • + + + + +
    • PRAGMA empty_result_callbacks; +
      PRAGMA empty_result_callbacks =
      boolean;

      + +

      Query or change the empty-result-callbacks flag.

      + +

      The empty-result-callbacks flag affects the sqlite3_exec() API only. + Normally, when the empty-result-callbacks flag is cleared, the + callback function supplied to the sqlite3_exec() call is not invoked + for commands that return zero rows of data. When empty-result-callbacks + is set in this situation, the callback function is invoked exactly once, + with the third parameter set to 0 (NULL). This is to enable programs + that use the sqlite3_exec() API to retrieve column-names even when + a query returns no data.

      + +

      This pragma is legacy. It was created long ago in the early + days of SQLite before the prepared statement interface was available. + Do not use this pragma. It is likely to go away in a future release

      + + + + + +
    • PRAGMA encoding; +
      PRAGMA encoding = "UTF-8"; +
      PRAGMA encoding = "UTF-16"; +
      PRAGMA encoding = "UTF-16le"; +
      PRAGMA encoding = "UTF-16be";

      +

      In first form, if the main database has already been + created, then this pragma returns the text encoding used by the + main database, one of "UTF-8", "UTF-16le" (little-endian UTF-16 + encoding) or "UTF-16be" (big-endian UTF-16 encoding). If the main + database has not already been created, then the value returned is the + text encoding that will be used to create the main database, if + it is created by this session.

      +

      The second and subsequent forms of this pragma are only useful if + the main database has not already been created. In this case the + pragma sets the encoding that the main database will be created with if + it is created by this session. The string "UTF-16" is interpreted + as "UTF-16 encoding using native machine byte-ordering". If the second + and subsequent forms are used after the database file has already + been created, they have no effect and are silently ignored.

      + +

      Once an encoding has been set for a database, it cannot be changed.

      + +

      Databases created by the ATTACH command always use the same encoding + as the main database.

      +
    • + + + +
    • PRAGMA full_column_names; +
      PRAGMA full_column_names =
      boolean;

      +

      Query or change the full_column_names flag. This flag together + with the short_column_names flag determine + the way SQLite assigns names to results returned by SELECT statements. + Result columns are named by applying the following rules in order: +

        +
      1. If there is an AS clause on the result, then the name of + the column is the right-hand side of the AS clause.

      2. +
      3. If the result is a general expression, not a just the name of + a source table column, + then the name of the result is a copy of the expression text.

      4. +
      5. If the short_column_names pragma is ON, then the name of the + result is the name of the source table column without the + source table name prefix: COLUMN.

      6. +
      7. If both pragmas short_column_names and full_column_names + are OFF then case (2) applies. +

      8. +
      9. The name of the result column is a combination of the source table + and source column name: TABLE.COLUMN

      10. +
      +
    • + + + +
    • PRAGMA fullfsync +
      PRAGMA fullfsync =
      boolean;

      +

      Query or change the fullfsync flag. This flag affects + determines whether or not the F_FULLFSYNC syncing method is used + on systems that support it. The default value is off. As of this + writing (2006-02-10) only Mac OS X supports F_FULLFSYNC. +

      +
    • + + + +
    • PRAGMA incremental_vacuum(N);

      +

      The incremental_vacuum pragma causes up to N pages to + be removed from the freelist. The database file is truncated by + the same amount. The incremental_vacuum pragma has no effect if + the database is not in + auto_vacuum==incremental mode + or if there are no pages on the freelist. If there are fewer than + N pages on the freelist, or if N is less than 1, or + if N is omitted entirely, then the entire freelist is cleared.

      + +

      As of version 3.4.0 (the first version that supports + incremental_vacuum) this feature is still experimental. Possible + future changes include enhancing incremental vacuum to do + defragmentation and node repacking just as the full-blown + VACUUM command does. And + incremental vacuum may be promoted from a pragma to a separate + SQL command, or perhaps some variation on the VACUUM command. + Programmers are cautioned to not become enamored with the + current syntax or functionality as it is likely to change.

      +
    • + + + +
    • PRAGMA journal_mode; +
      PRAGMA
      database.journal_mode; +
      PRAGMA journal_mode + = DELETE | TRUNCATE | PERSIST | MEMORY | OFF +
      PRAGMA
      database.journal_mode + = DELETE | TRUNCATE | PERSIST | MEMORY | OFF

      + +

      This pragma queries or sets the journal mode for databases + associated with the current database connection.

      + +

      The first two forms of this pragma query the current journaling + mode. In the first form, the default journal_mode is returned. + The default journaling mode is the mode used by databases added + to the connection by subsequent ATTACH statements. The second + form returns the current journaling mode for a specific database.

      + +

      The last two forms change the journaling mode. The 4th form + changes the journaling mode for a specific database connection. + Use "main" for the main database (the database that was opened by + the original sqlite3_open(), sqlite3_open16(), or + sqlite3_open_v2() interface call) and use "temp" for database + that holds TEMP tables. The 3rd form changes the journaling mode + on all databases and it changes the default journaling mode that + will be used for new databases added by subsequent ATTACH + commands. The new journal mode is returned. If the journal mode + could not be changed, the original journal mode is returned.

      + +

      The DELETE journaling mode is the normal behavior. In the DELETE + mode, the rollback journal is deleted at the conclusion of each + transaction. Indeed, the delete operation is the action that causes + the transaction to commit. + (See the documented titled + Atomic Commit In SQLite for additional detail.)

      + +

      The TRUNCATE journaling mode commits transactions by truncating + the rollback journal to zero-length instead of deleting it. On many + systems, truncating a file is much faster than deleting the file since + the containing directory does not need to be changed.

      + +

      The PERSIST journaling mode prevents the rollback journal from + being deleted at the end of each transaction. Instead, the header + of the journal is overwritten with zeros. This will prevent other + database connections from rolling the journal back. The PERSIST + journaling mode is useful as an optimization on platforms where + deleting or truncating a file is much more expensive than overwriting + the first block of a file with zeros.

      + +

      The MEMORY journaling mode stores the rollback journal in + volatile RAM. This saves disk I/O but that the expense of database + safety and integrity. If the application using SQLite crashes in + the middle of a transaction when the MEMORY journaling mode is set, + then the database file will very likely go corrupt.

      + +

      The OFF journaling mode disables the rollback journal completely. + No rollback journal is ever created and hence there is never a rollback + journal to delete. The OFF journaling mode disables the atomic + commit and rollback capabilities of SQLite. The ROLLBACK command + no longer works; it behaves in an undefined way. Applications must + avoid using the ROLLBACK command when the journal mode is OFF. + If the application crashes + in the middle of a transaction when the OFF journaling mode is + set, then the database file will very likely go corrupt.

      + +

      Note that the journal_mode for an in-memory database + is either MEMORY or OFF and can not be changed to a different value. + An attempt to change the journal_mode of an in-memory database to + any setting other than MEMORY or OFF is ignored. Note also that + the journal_mode cannot be changed while a transaction is active.

      +
    • + + + +
    • + PRAGMA journal_size_limit
      + PRAGMA journal_size_limit =
      N ; + +

      If a database connection is operating in either "exclusive mode" + (PRAGMA locking_mode=exclusive) or "persistent journal mode" + (PRAGMA journal_mode=persist) then under certain circumstances + after committing a transaction the journal file may remain in + the file-system. This increases efficiency but also consumes + space in the file-system. After a large transaction (e.g. a VACUUM), + it may consume a very large amount of space. + +

      This pragma may be used to limit the size of journal files left + in the file-system after transactions are committed on a per database + basis. Each time a transaction is committed, SQLite compares the + size of the journal file left in the file-system to the size limit + configured using this pragma. If the journal file is larger than the + limit allows for, it is truncated to the limit. + +

      The second form of the pragma listed above is used to set a new limit + in bytes for the specified database. A negative number implies no limit. + Both the first and second forms of the pragma listed above return a single + result row containing a single integer column - the value of the journal + size limit in bytes. The default limit value is -1 (no limit), which + may be overridden by defining the preprocessor macro + SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT at compile time.

      + +

      This pragma only operates on the single database specified prior + to the pragma name (or on the "main" database if no database is specified.) + There is no way to operate on all attached databases using a single + PRAGMA statement, nor is there a way to set the limit to use for databases + that will be attached in the future. +

    • + + + + +
    • PRAGMA legacy_file_format; +
      PRAGMA legacy_file_format = boolean

      +

      This pragma sets or queries the value of the legacy_file_format + flag. When this flag is on, new SQLite databases are created in + a file format that is readable and writable by all versions of + SQLite going back to 3.0.0. When the flag is off, new databases + are created using the latest file format which might not be + readable or writable by versions of SQLite prior to 3.3.0.

      + +

      When the pragma is issued with no argument, it returns the + setting of the flag. This pragma does not tell which + file format the current database is using. It tells what format + will be used by any newly created databases.

      + +

      This flag only affects newly created databases. It has no + effect on databases that already exist.

      + +

      The default file format is set by the + SQLITE_DEFAULT_FILE_FORMAT compile-time option.

      +
    • + + + +
    • PRAGMA locking_mode; +
      PRAGMA locking_mode = NORMAL | EXCLUSIVE

      +

      This pragma sets or queries the database connection locking-mode. + The locking-mode is either NORMAL or EXCLUSIVE. + +

      In NORMAL locking-mode (the default), a database connection + unlocks the database file at the conclusion of each read or + write transaction. When the locking-mode is set to EXCLUSIVE, the + database connection never releases file-locks. The first time the + database is read in EXCLUSIVE mode, a shared lock is obtained and + held. The first time the database is written, an exclusive lock is + obtained and held.

      + +

      Database locks obtained by a connection in EXCLUSIVE mode may be + released either by closing the database connection, or by setting the + locking-mode back to NORMAL using this pragma and then accessing the + database file (for read or write). Simply setting the locking-mode to + NORMAL is not enough - locks are not be released until the next time + the database file is accessed.

      + +

      There are two reasons to set the locking-mode to EXCLUSIVE. One + is if the application actually wants to prevent other processes from + accessing the database file. The other is that a small number of + filesystem operations are saved by optimizations enabled in this + mode. This may be significant in embedded environments.

      + +

      When the locking_mode pragma specifies a particular database, + for example:

      + +
      +PRAGMA main.locking_mode=EXCLUSIVE; +
      + +

      Then the locking mode applies only to the named database. If no + database name qualifier precedes the "locking_mode" keyword then + the locking mode is applied to all databases, including any new + databases added by subsequent ATTACH commands.

      + +

      The "temp" database (in which TEMP tables and indices are stored) + and in-memory databases + always uses exclusive locking mode. The locking mode of temp and + in-memory databases cannot + be changed. All other databases use the normal locking mode by default + and are affected by this pragma.

      +
    • + + + +
    • PRAGMA page_size; +
      PRAGMA page_size =
      bytes;

      +

      Query or set the page size of the database. The page size + may only be set if the database has not yet been created. The page + size must be a power of two greater than or equal to 512 and less + than or equal to SQLITE_MAX_PAGE_SIZE. + The maximum value for SQLITE_MAX_PAGE_SIZE is 32768. +

      + +

      When a new database is created, SQLite assigned a default page size + based on information received from the xSectorSize and + xDeviceCharacteristics methods of the sqlite3_io_methods object + of the newly created database file. The page_size pragma will only + cause an immediate change in the + page size if it is issued while the database is still empty, prior + to the first CREATE TABLE statement. As of version 3.5.8, if + the page_size pragma is used to specify a new page size just prior to + running the VACUUM command then VACUUM will change the page + size to the new value.

      + +

      If SQLite is compiled with the SQLITE_ENABLE_ATOMIC_WRITE option, + then the default page size is chosen to be the largest page size + less than or equal to SQLITE_MAX_DEFAULT_PAGE_SIZE for which atomic + write is enabled according to the + xDeviceCharacteristics method of the sqlite3_io_methods object for + the database file. If the SQLITE_ENABLE_ATOMIC_WRITE option is + disabled or if xDeviceCharacteristics reports no suitable atomic + write page sizes, then the default page size is the larger of + SQLITE_DEFALT_PAGE_SIZE + and the sector size as reported by the xSectorSize method of the + sqlite3_io_methods object, but not more than + SQLITE_MAX_DEFAULT_PAGE_SIZE. The normal configuration for SQLite + running on workstations is for atomic write to be + disabled, for the maximum page size to be set to 32768, for + SQLITE_DEFAULT_PAGE_SIZE to be 1024, and for the + maximum default page size to be set to 8192. The default xSectorSize + method on workstation implementations always reports a sector size + of 512 bytes. Hence, + the default page size chosen by SQLite is usually 1024 bytes.

      +
    • + + + +
    • PRAGMA max_page_count; +
      PRAGMA max_page_count =
      N;

      +

      Query or set the maximum number of pages in the database file. + Both forms of the pragma return the maximum page count. The second + form attempts to modify the maximum page count. The maximum page + count cannot be reduced below the current database size. +

      +
    • + + + +
    • PRAGMA read_uncommitted; +
      PRAGMA read_uncommitted =
      boolean;

      +

      Query, set, or clear READ UNCOMMITTED isolation. The default isolation + level for SQLite is SERIALIZABLE. Any process or thread can select + READ UNCOMMITTED isolation, but SERIALIZABLE will still be used except + between connections that share a common page and schema cache. + Cache sharing is enabled using the sqlite3_enable_shared_cache() API. + Cache sharing is disabled by default. +

      + +

      See SQLite Shared-Cache Mode for additional information.

      +
    • + + + +
    • PRAGMA reverse_unordered_selects; +
      PRAGMA reverse_unordered_selects =
      boolean;

      +

      When enabled, this PRAGMA causes SELECT statements without a + an ORDER BY clause to emit their results in the reverse order of what + they normally would. This can help debug applications that are + making invalid assumptions about the result order.

      SQLite makes no + guarantees about the order of results if a SELECT omits the ORDER BY + clause. Even so, the order of results does not change from one + run to the next, and so many applications mistakenly come to depend + on the arbitrary output order whatever that order happens to be. However, + sometimes new versions of SQLite will contain optimizer enhancements + that will cause the output order of queries without ORDER BY clauses + to shift. When that happens, applications that depend on a certain + output order might malfunction. By running the application multiple + times with this pragma both disabled and enabled, cases where the + application makes faulty assumptions about output order can be + identified and fixed early, reducing problems + that might be caused by linking against a different version of SQLite. +

      +
    • + + + +
    • PRAGMA short_column_names; +
      PRAGMA short_column_names =
      boolean;

      +

      Query or change the short-column-names flag. This flag affects + the way SQLite names columns of data returned by SELECT statements. + See the full_column_names pragma for full details. +

      +
    • + + + +
    • PRAGMA synchronous; +
      PRAGMA synchronous =
      + 0 | OFF | 1 | NORMAL | 2 | FULL;

      + +

      Query or change the setting of the "synchronous" flag. + The first (query) form will return the setting as an + integer. When synchronous is FULL (2), the SQLite database engine will + pause at critical moments to make sure that data has actually been + written to the disk surface before continuing. This ensures that if + the operating system crashes or if there is a power failure, the database + will be uncorrupted after rebooting. FULL synchronous is very + safe, but it is also slower. + When synchronous is NORMAL, the SQLite database + engine will still pause at the most critical moments, but less often + than in FULL mode. There is a very small (though non-zero) chance that + a power failure at just the wrong time could corrupt the database in + NORMAL mode. But in practice, you are more likely to suffer + a catastrophic disk failure or some other unrecoverable hardware + fault. + With synchronous OFF (0), SQLite continues without pausing + as soon as it has handed data off to the operating system. + If the application running SQLite crashes, the data will be safe, but + the database might become corrupted if the operating system + crashes or the computer loses power before that data has been written + to the disk surface. On the other hand, some + operations are as much as 50 or more times faster with synchronous OFF. +

      +

      The default setting is synchronous=FULL. +

      +
    • + + + + +
    • PRAGMA temp_store; +
      PRAGMA temp_store =
      + 0 | DEFAULT | 1 | FILE | 2 | MEMORY;

      + +

      Query or change the setting of the "temp_store" parameter. + When temp_store is DEFAULT (0), the compile-time C preprocessor macro + SQLITE_TEMP_STORE is used to determine where temporary tables and indices + are stored. When + temp_store is MEMORY (2) temporary tables and indices are kept in + as if they were pure in-memory databases memory. + When temp_store is FILE (1) temporary tables and indices are stored + in a file. The temp_store_directory pragma can be used to specify + the directory containing temporary files when + FILE is specified. When the temp_store setting is changed, + all existing temporary tables, indices, triggers, and views are + immediately deleted.

      + +

      It is possible for the library compile-time C preprocessor symbol + SQLITE_TEMP_STORE to override this pragma setting. + The following table summarizes + the interaction of the SQLITE_TEMP_STORE preprocessor macro and the + temp_store pragma:

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      SQLITE_TEMP_STOREPRAGMA
      temp_store
      Storage used for
      TEMP tables and indices
      0anyfile
      10file
      11file
      12memory
      20memory
      21file
      22memory
      3anymemory
      +
      +
    • +
      + + + +
    • PRAGMA temp_store_directory; +
      PRAGMA temp_store_directory = '
      directory-name';

      +

      Query or change the setting of the "temp_store_directory" - the + directory where files used for storing temporary tables and indices + are kept.

      + +

      When the temp_store_directory setting is changed, all existing temporary + tables, indices, triggers, and viewers are immediately deleted. In + practice, temp_store_directory should be set immediately after the + database is opened.

      + +

      Changing the temp_store_directory setting is not threadsafe. + Never change the temp_store_directory setting if another thread + within the application is running any SQLite interface at the same time. + Doing so results in undefined behavior. Changing the temp_store_directory + setting writes to the sqlite3_temp_directory global + variable and that global variable is not protected by a mutex.

      + +

      The value directory-name should be enclosed in single quotes. + To revert the directory to the default, set the directory-name to + an empty string, e.g., PRAGMA temp_store_directory = ''. An + error is raised if directory-name is not found or is not + writable.

      + +

      The default directory for temporary files depends on the OS. Some + OS interfaces may choose to ignore this variable in place temporary + files in some other directory different from the directory specified + here. In that sense, this pragma is only advisory.

      +
    • +
    + + +
    +

    Pragmas to query the database schema

    + + +
      + + +
    • PRAGMA collation_list;

      +

      Return a list of the collating sequences defined for the current + database connection.

    • + + + +
    • PRAGMA database_list;

      +

      For each open database, invoke the callback function once with + information about that database. Arguments include the index and + the name the database was attached with. The first row will be for + the main database. The second row will be for the database used to + store temporary tables.

    • + + + +
    • PRAGMA foreign_key_list(table-name);

      +

      For each foreign key that references a column in the argument + table, invoke the callback function with information about that + foreign key. The callback function will be invoked once for each + column in each foreign key.

    • + + + +
    • PRAGMA freelist_count;

      +

      Return the number of unused pages in the database file. Running + a "PRAGMA incremental_vaccum(N);" + command with a large value of N will shrink the database file by this + number of pages.

    • + + + +
    • PRAGMA index_info(index-name);

      +

      For each column that the named index references, invoke the + callback function + once with information about that column, including the column name, + and the column number.

    • + + + +
    • PRAGMA index_list(table-name);

      +

      For each index on the named table, invoke the callback function + once with information about that index. Arguments include the + index name and a flag to indicate whether or not the index must be + unique.

    • + + + +
    • PRAGMA page_count;

      +

      Return the total number of pages in the database file.

    • + + + +
    • PRAGMA table_info(table-name);

      +

      For each column in the named table, invoke the callback function + once with information about that column, including the column name, + data type, whether or not the column can be NULL, and the default + value for the column.

    • +
    + + +
    +

    Pragmas to query/modify version values

    + + +
      + + +
    • PRAGMA schema_version; +
      PRAGMA schema_version =
      integer ; +
      PRAGMA user_version; +
      PRAGMA user_version =
      integer ; + + +

      The pragmas schema_version and user_version are used to set or get + the value of the schema-version and user-version, respectively. Both + the schema-version and the user-version are 32-bit signed integers + stored in the database header.

      + +

      The schema-version is usually only manipulated internally by SQLite. + It is incremented by SQLite whenever the database schema is modified + (by creating or dropping a table or index). The schema version is + used by SQLite each time a query is executed to ensure that the + internal cache of the schema used when compiling the SQL query matches + the schema of the database against which the compiled query is actually + executed. Subverting this mechanism by using "PRAGMA schema_version" + to modify the schema-version is potentially dangerous and may lead + to program crashes or database corruption. Use with caution!

      + +

      The user-version is not used internally by SQLite. It may be used by + applications for any purpose.

      +
    • +
    + + +
    +

    Pragmas to debug the library

    + + +
      + + +
    • PRAGMA integrity_check; +
      PRAGMA integrity_check(
      integer)

      +

      This pragma does an integrity check of the entire database. It + looks for out-of-order records, missing pages, malformed records, and + corrupt indices. + If any problems are found, then strings are returned (as multiple + rows with a single column per row) which describe + the problems. At most integer errors will be reported + before the analysis quits. The default value for integer + is 100. If no errors are found, a single row with the value "ok" is + returned.

    • + + + +
    • PRAGMA quick_check; +
      PRAGMA quick_check(
      integer)

      +

      The pragma is like integrity_check except that it does not verify + that index content matches table content. By skipping the verification + of index content, quick_check is able to run much faster than + integrity_check. Otherwise the two pragmas are the same. +

    • + + + +
    • PRAGMA parser_trace = boolean;

      + +

      Turn tracing of the SQL parser inside of the + SQLite library on and off. This is used for debugging. + This only works if the library is compiled with the SQLITE_DEBUG + compile-time option. +

    • + + + +
    • PRAGMA vdbe_trace = boolean;

      + +

      Turn tracing of the virtual database engine inside of the + SQLite library on and off. This is used for debugging. See the + VDBE documentation for more + information.

    • + + + +
    • PRAGMA vdbe_listing = boolean;

      + +

      Turn listings of virtual machine programs on and off. + With listing is on, the entire content of a program is printed + just prior to beginning execution. The statement + executes normally after the listing is printed. + This is used for debugging. See the + VDBE documentation for more + information.

    • +
    +
    +This page last modified 2009/06/09 00:53:57 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/pragma.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/pragma.tcl --- sqlite3-3.4.2/www/pragma.tcl 2007-07-24 11:22:58.000000000 +0100 +++ sqlite3-3.6.16/www/pragma.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,635 +0,0 @@ -# -# Run this Tcl script to generate the pragma.html file. -# -set rcsid {$Id: pragma.tcl,v 1.27 2007/07/24 10:22:58 drh Exp $} -source common.tcl -header {Pragma statements supported by SQLite} - -proc Section {name {label {}}} { - puts "\n
    " - if {$label!=""} { - puts "" - } - puts "

    $name

    \n" -} - -puts { -

    The PRAGMA command is a special command used to -modify the operation of the SQLite library or to query the library for -internal (non-table) data. The PRAGMA command is issued using the same -interface as other SQLite commands (e.g. SELECT, INSERT) but is -different in the following important respects: -

    -
      -
    • Specific pragma statements may be removed and others added in future - releases of SQLite. Use with caution! -
    • No error messages are generated if an unknown pragma is issued. - Unknown pragmas are simply ignored. This means if there is a typo in - a pragma statement the library does not inform the user of the fact. -
    • Some pragmas take effect during the SQL compilation stage, not the - execution stage. This means if using the C-language sqlite3_prepare(), - sqlite3_step(), sqlite3_finalize() API (or similar in a wrapper - interface), the pragma may be applied to the library during the - sqlite3_prepare() call. -
    • The pragma command is unlikely to be compatible with any other SQL - engine. -
    - -

    The available pragmas fall into four basic categories:

    - -} - -Section {PRAGMA command syntax} syntax - -Syntax {sql-statement} { -PRAGMA [= ] | -PRAGMA () -} - -puts { -

    The pragmas that take an integer value also accept -symbolic names. The strings "on", "true", and "yes" -are equivalent to 1. The strings "off", "false", -and "no" are equivalent to 0. These strings are case- -insensitive, and do not require quotes. An unrecognized string will be -treated as 1, and will not generate an error. When the value -is returned it is as an integer.

    -} - -Section {Pragmas to modify library operation} modify - -puts { -
      - -
    • PRAGMA auto_vacuum;
      - PRAGMA auto_vacuum =
      - 0 | none | 1 | full | 2 | incremental;

      -

      Query or set the auto-vacuum flag in the database.

      - -

      Normally, (that is to say when auto_vacuum is 0 or "none") - when a transaction that deletes data from a database is - committed, the database file remains the same size. Unused database file - pages are added to a "freelist" are reused for subsequent inserts. The - database file does not shrink. - In this mode the VACUUM - command can be used to reclaim unused space.

      - -

      When the auto-vacuum flag is 1 (full), the freelist pages are - moved to the end of the file and the file is truncated to remove - the freelist pages at every commit. - Note, however, that auto-vacuum only truncates the freelist pages - from the file. Auto-vacuum does not defragment the database nor - repack individual database pages the way that the - VACUUM command does. In fact, because - it moves pages around within the file, auto-vacuum can actually - make fragmentation worse.

      - -

      Auto-vacuuming is only possible if the database stores some - additional information that allows each database page to be - traced backwards to its referer. Therefore, auto-vacuuming must - be turned on before any tables are created. It is not possible - to enable or disable auto-vacuum after a table has been created.

      - -

      When the value of auto-vacuum is 2 (incremental) then the additional - information needed to do autovacuuming is stored in the database file - but autovacuuming does not occur automatically at each commit as it - does with auto_vacuum==full. In incremental mode, the separate - incremental_vacuum pragma must - be invoked to cause the vacuum to occur.

      - -

      The database connection can be changed between full and incremental - autovacuum mode at will. However, the connection cannot be changed - in and out of the "none" mode after any table has been created in the - database. -

    • - - -
    • PRAGMA cache_size; -
      PRAGMA cache_size =
      Number-of-pages;

      -

      Query or change the maximum number of database disk pages that SQLite - will hold in memory at once. Each page uses about 1.5K of memory. - The default cache size is 2000. If you are doing UPDATEs or DELETEs - that change many rows of a database and you do not mind if SQLite - uses more memory, you can increase the cache size for a possible speed - improvement.

      -

      When you change the cache size using the cache_size pragma, the - change only endures for the current session. The cache size reverts - to the default value when the database is closed and reopened. Use - the default_cache_size - pragma to check the cache size permanently.

    • - - -
    • PRAGMA case_sensitive_like; -
      PRAGMA case_sensitive_like =
      0 | 1;

      -

      The default behavior of the LIKE operator is to ignore case - for latin1 characters. Hence, by default 'a' LIKE 'A' is - true. The case_sensitive_like pragma can be turned on to change - this behavior. When case_sensitive_like is enabled, - 'a' LIKE 'A' is false but 'a' LIKE 'a' is still true.

      -
    • - - -
    • PRAGMA count_changes; -
      PRAGMA count_changes =
      0 | 1;

      -

      Query or change the count-changes flag. Normally, when the - count-changes flag is not set, INSERT, UPDATE and DELETE statements - return no data. When count-changes is set, each of these commands - returns a single row of data consisting of one integer value - the - number of rows inserted, modified or deleted by the command. The - returned change count does not include any insertions, modifications - or deletions performed by triggers.

      - - -
    • PRAGMA default_cache_size; -
      PRAGMA default_cache_size =
      Number-of-pages;

      -

      Query or change the maximum number of database disk pages that SQLite - will hold in memory at once. Each page uses 1K on disk and about - 1.5K in memory. - This pragma works like the - cache_size - pragma with the additional - feature that it changes the cache size persistently. With this pragma, - you can set the cache size once and that setting is retained and reused - every time you reopen the database.

    • - - -
    • PRAGMA default_synchronous;

      -

      This pragma was available in version 2.8 but was removed in version - 3.0. It is a dangerous pragma whose use is discouraged. To help - dissuide users of version 2.8 from employing this pragma, the documentation - will not tell you what it does.

    • - - - -
    • PRAGMA empty_result_callbacks; -
      PRAGMA empty_result_callbacks =
      0 | 1;

      -

      Query or change the empty-result-callbacks flag.

      -

      The empty-result-callbacks flag affects the sqlite3_exec API only. - Normally, when the empty-result-callbacks flag is cleared, the - callback function supplied to the sqlite3_exec() call is not invoked - for commands that return zero rows of data. When empty-result-callbacks - is set in this situation, the callback function is invoked exactly once, - with the third parameter set to 0 (NULL). This is to enable programs - that use the sqlite3_exec() API to retrieve column-names even when - a query returns no data. -

      - - -
    • PRAGMA encoding; -
      PRAGMA encoding = "UTF-8"; -
      PRAGMA encoding = "UTF-16"; -
      PRAGMA encoding = "UTF-16le"; -
      PRAGMA encoding = "UTF-16be";

      -

      In first form, if the main database has already been - created, then this pragma returns the text encoding used by the - main database, one of "UTF-8", "UTF-16le" (little-endian UTF-16 - encoding) or "UTF-16be" (big-endian UTF-16 encoding). If the main - database has not already been created, then the value returned is the - text encoding that will be used to create the main database, if - it is created by this session.

      -

      The second and subsequent forms of this pragma are only useful if - the main database has not already been created. In this case the - pragma sets the encoding that the main database will be created with if - it is created by this session. The string "UTF-16" is interpreted - as "UTF-16 encoding using native machine byte-ordering". If the second - and subsequent forms are used after the database file has already - been created, they have no effect and are silently ignored.

      - -

      Once an encoding has been set for a database, it cannot be changed.

      - -

      Databases created by the ATTACH command always use the same encoding - as the main database.

      -
    • - - -
    • PRAGMA full_column_names; -
      PRAGMA full_column_names =
      0 | 1;

      -

      Query or change the full-column-names flag. This flag affects - the way SQLite names columns of data returned by SELECT statements - when the expression for the column is a table-column name or the - wildcard "*". Normally, such result columns are named - <table-name/alias><column-name> if the SELECT statement joins - two or - more tables together, or simply <column-name> if the SELECT - statement queries a single table. When the full-column-names flag - is set, such columns are always named <table-name/alias> - <column-name> regardless of whether or not a join is performed. -

      -

      If both the short-column-names and full-column-names are set, - then the behaviour associated with the full-column-names flag is - exhibited. -

      -
    • - - -
    • PRAGMA fullfsync -
      PRAGMA fullfsync =
      0 | 1;

      -

      Query or change the fullfsync flag. This flag affects - determines whether or not the F_FULLFSYNC syncing method is used - on systems that support it. The default value is off. As of this - writing (2006-02-10) only Mac OS X supports F_FULLFSYNC. -

      -
    • - - -
    • PRAGMA incremental_vacuum(N);

      -

      The incremental_vacuum pragma causes up to N pages to - be removed from the freelist. The database file is truncated by - the same amount. The incremental_vacuum pragma has no effect if - the database is not in - auto_vacuum==incremental mode - or if there are no pages on the freelist. If there are fewer than - N pages on the freelist, then the entire freelist is cleared.

      - -

      As of version 3.4.0 (the first version that supports - incremental_vacuum) this feature is still experimental. Possible - future changes include enhancing incremental vacuum to do - defragmentation and node repacking just as the full-blown - VACUUM command does. And - incremental vacuum may be promoted from a pragma to a separate - SQL command, or perhaps some variation on the VACUUM command. - Programmers are cautioned to not become enamored with the - current syntax or functionality as it is likely to change.

      -
    • - - - -
    • PRAGMA legacy_file_format; -
      PRAGMA legacy_file_format = ON | OFF

      -

      This pragma sets or queries the value of the legacy_file_format - flag. When this flag is on, new SQLite databases are created in - a file format that is readable and writable by all versions of - SQLite going back to 3.0.0. When the flag is off, new databases - are created using the latest file format which might not be - readable or writable by older versions of SQLite.

      - -

      This flag only affects newly created databases. It has no - effect on databases that already exist.

      -
    • - - -
    • PRAGMA locking_mode; -
      PRAGMA locking_mode = NORMAL | EXCLUSIVE

      -

      This pragma sets or queries the database connection locking-mode. - The locking-mode is either NORMAL or EXCLUSIVE. - -

      In NORMAL locking-mode (the default), a database connection - unlocks the database file at the conclusion of each read or - write transaction. When the locking-mode is set to EXCLUSIVE, the - database connection never releases file-locks. The first time the - database is read in EXCLUSIVE mode, a shared lock is obtained and - held. The first time the database is written, an exclusive lock is - obtained and held.

      - -

      Database locks obtained by a connection in EXCLUSIVE mode may be - released either by closing the database connection, or by setting the - locking-mode back to NORMAL using this pragma and then accessing the - database file (for read or write). Simply setting the locking-mode to - NORMAL is not enough - locks are not be released until the next time - the database file is accessed.

      - -

      There are two reasons to set the locking-mode to EXCLUSIVE. One - is if the application actually wants to prevent other processes from - accessing the database file. The other is that a small number of - filesystem operations are saved by optimizations enabled in this - mode. This may be significant in embedded environments.

      - -

      When the locking_mode pragma specifies a particular database, - for example:

      - -
      -PRAGMA main.locking_mode=EXCLUSIVE; -
      - -

      Then the locking mode applies only to the named database. If no - database name qualifier preceeds the "locking_mode" keyword then - the locking mode is applied to all databases, including any new - databases added by subsequent ATTACH - commands.

      - -

      The "temp" database (in which TEMP tables and indices are stored) - always uses exclusive locking mode. The locking mode of temp cannot - be changed. All other databases use the normal locking mode by default - and are effected by this pragma.

      -
    • - - -
    • PRAGMA page_size; -
      PRAGMA page_size =
      bytes;

      -

      Query or set the page-size of the database. The page-size - may only be set if the database has not yet been created. The page - size must be a power of two greater than or equal to 512 and less - than or equal to 8192. The upper limit may be modified by setting - the value of macro SQLITE_MAX_PAGE_SIZE during compilation. The - maximum upper bound is 32768. -

      -
    • - - -
    • PRAGMA max_page_count; -
      PRAGMA max_page_count =
      N;

      -

      Query or set the maximum number of pages in the database file. - Both forms of the pragma return the maximum page count. The second - form attempts to modify the maximum page count. The maximum page - count cannot be reduced below the current database size. -

      -
    • - - -
    • PRAGMA read_uncommitted; -
      PRAGMA read_uncommitted =
      0 | 1;

      -

      Query, set, or clear READ UNCOMMITTED isolation. The default isolation - level for SQLite is SERIALIZABLE. Any process or thread can select - READ UNCOMMITTED isolation, but SERIALIZABLE will still be used except - between connections that share a common page and schema cache. - Cache sharing is enabled using the - - sqlite3_enable_shared_cache() API and is only available between - connections running the same thread. Cache sharing is off by default. -

      -
    • - - -
    • PRAGMA short_column_names; -
      PRAGMA short_column_names =
      0 | 1;

      -

      Query or change the short-column-names flag. This flag affects - the way SQLite names columns of data returned by SELECT statements - when the expression for the column is a table-column name or the - wildcard "*". Normally, such result columns are named - <table-name/alias>lt;column-name> if the SELECT statement - joins two or more tables together, or simply <column-name> if - the SELECT statement queries a single table. When the short-column-names - flag is set, such columns are always named <column-name> - regardless of whether or not a join is performed. -

      -

      If both the short-column-names and full-column-names are set, - then the behaviour associated with the full-column-names flag is - exhibited. -

      -
    • - - -
    • PRAGMA synchronous; -
      PRAGMA synchronous = FULL;
      (2) -
      PRAGMA synchronous = NORMAL;
      (1) -
      PRAGMA synchronous = OFF;
      (0)

      -

      Query or change the setting of the "synchronous" flag. - The first (query) form will return the setting as an - integer. When synchronous is FULL (2), the SQLite database engine will - pause at critical moments to make sure that data has actually been - written to the disk surface before continuing. This ensures that if - the operating system crashes or if there is a power failure, the database - will be uncorrupted after rebooting. FULL synchronous is very - safe, but it is also slow. - When synchronous is NORMAL, the SQLite database - engine will still pause at the most critical moments, but less often - than in FULL mode. There is a very small (though non-zero) chance that - a power failure at just the wrong time could corrupt the database in - NORMAL mode. But in practice, you are more likely to suffer - a catastrophic disk failure or some other unrecoverable hardware - fault. - With synchronous OFF (0), SQLite continues without pausing - as soon as it has handed data off to the operating system. - If the application running SQLite crashes, the data will be safe, but - the database might become corrupted if the operating system - crashes or the computer loses power before that data has been written - to the disk surface. On the other hand, some - operations are as much as 50 or more times faster with synchronous OFF. -

      -

      In SQLite version 2, the default value is NORMAL. For version 3, the - default was changed to FULL. -

      -
    • - - - -
    • PRAGMA temp_store; -
      PRAGMA temp_store = DEFAULT;
      (0) -
      PRAGMA temp_store = FILE;
      (1) -
      PRAGMA temp_store = MEMORY;
      (2)

      -

      Query or change the setting of the "temp_store" parameter. - When temp_store is DEFAULT (0), the compile-time C preprocessor macro - TEMP_STORE is used to determine where temporary tables and indices - are stored. When - temp_store is MEMORY (2) temporary tables and indices are kept in memory. - When temp_store is FILE (1) temporary tables and indices are stored - in a file. The - temp_store_directory pragma can be used to specify the directory - containing this file. - FILE is specified. When the temp_store setting is changed, - all existing temporary tables, indices, triggers, and views are - immediately deleted.

      - -

      It is possible for the library compile-time C preprocessor symbol - TEMP_STORE to override this pragma setting. The following table summarizes - the interaction of the TEMP_STORE preprocessor macro and the - temp_store pragma:

      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      TEMP_STOREPRAGMA
      temp_store
      Storage used for
      TEMP tables and indices
      0anyfile
      10file
      11file
      12memory
      20memory
      21file
      22memory
      3anymemory
      -
      -
    • -
      - - -
    • PRAGMA temp_store_directory; -
      PRAGMA temp_store_directory = 'directory-name';

      -

      Query or change the setting of the "temp_store_directory" - the - directory where files used for storing temporary tables and indices - are kept. This setting lasts for the duration of the current connection - only and resets to its default value for each new connection opened. - -

      When the temp_store_directory setting is changed, all existing temporary - tables, indices, triggers, and viewers are immediately deleted. In - practice, temp_store_directory should be set immediately after the - database is opened.

      - -

      The value directory-name should be enclosed in single quotes. - To revert the directory to the default, set the directory-name to - an empty string, e.g., PRAGMA temp_store_directory = ''. An - error is raised if directory-name is not found or is not - writable.

      - -

      The default directory for temporary files depends on the OS. For - Unix/Linux/OSX, the default is the is the first writable directory found - in the list of: /var/tmp, /usr/tmp, /tmp, and - current-directory. For Windows NT, the default - directory is determined by Windows, generally - C:\Documents and Settings\user-name\Local Settings\Temp\. - Temporary files created by SQLite are unlinked immediately after - opening, so that the operating system can automatically delete the - files when the SQLite process exits. Thus, temporary files are not - normally visible through ls or dir commands.

      - -
    • -
    -} - -Section {Pragmas to query the database schema} schema - -puts { -
      - -
    • PRAGMA database_list;

      -

      For each open database, invoke the callback function once with - information about that database. Arguments include the index and - the name the database was attached with. The first row will be for - the main database. The second row will be for the database used to - store temporary tables.

    • - - -
    • PRAGMA foreign_key_list(table-name);

      -

      For each foreign key that references a column in the argument - table, invoke the callback function with information about that - foreign key. The callback function will be invoked once for each - column in each foreign key.

    • - - -
    • PRAGMA [database].freelist_count;

      -

      Return the number of unused pages in the database file. Running - a "PRAGMA incremental_vaccum(N);" - command with a large value of N will shrink the database file by this - number of pages.

    • - - -
    • PRAGMA index_info(index-name);

      -

      For each column that the named index references, invoke the - callback function - once with information about that column, including the column name, - and the column number.

    • - - -
    • PRAGMA index_list(table-name);

      -

      For each index on the named table, invoke the callback function - once with information about that index. Arguments include the - index name and a flag to indicate whether or not the index must be - unique.

    • - - -
    • PRAGMA table_info(table-name);

      -

      For each column in the named table, invoke the callback function - once with information about that column, including the column name, - data type, whether or not the column can be NULL, and the default - value for the column.

    • -
    -} - -Section {Pragmas to query/modify version values} version - -puts { - -
      - - -
    • PRAGMA [database.]schema_version; -
      PRAGMA [database.]schema_version =
      integer ; -
      PRAGMA [database.]user_version; -
      PRAGMA [database.]user_version =
      integer ; - - -

      The pragmas schema_version and user_version are used to set or get - the value of the schema-version and user-version, respectively. Both - the schema-version and the user-version are 32-bit signed integers - stored in the database header.

      - -

      The schema-version is usually only manipulated internally by SQLite. - It is incremented by SQLite whenever the database schema is modified - (by creating or dropping a table or index). The schema version is - used by SQLite each time a query is executed to ensure that the - internal cache of the schema used when compiling the SQL query matches - the schema of the database against which the compiled query is actually - executed. Subverting this mechanism by using "PRAGMA schema_version" - to modify the schema-version is potentially dangerous and may lead - to program crashes or database corruption. Use with caution!

      - -

      The user-version is not used internally by SQLite. It may be used by - applications for any purpose.

      -
    • -
    -} - -Section {Pragmas to debug the library} debug - -puts { -
      - -
    • PRAGMA integrity_check; -
      PRAGMA integrity_check(
      integer)

      -

      The command does an integrity check of the entire database. It - looks for out-of-order records, missing pages, malformed records, and - corrupt indices. - If any problems are found, then strings are returned (as multiple - rows with a single column per row) which describe - the problems. At most integer errors will be reported - before the analysis quits. The default value for integer - is 100. If no errors are found, a single row with the value "ok" is - returned.

    • - - -
    • PRAGMA parser_trace = ON; (1) -
      PRAGMA parser_trace = OFF;
      (0)

      -

      Turn tracing of the SQL parser inside of the - SQLite library on and off. This is used for debugging. - This only works if the library is compiled without the NDEBUG macro. -

    • - - -
    • PRAGMA vdbe_trace = ON; (1) -
      PRAGMA vdbe_trace = OFF;
      (0)

      -

      Turn tracing of the virtual database engine inside of the - SQLite library on and off. This is used for debugging. See the - VDBE documentation for more - information.

    • - - -
    • PRAGMA vdbe_listing = ON; (1) -
      PRAGMA vdbe_listing = OFF;
      (0)

      -

      Turn listings of virtual machine programs on and off. - With listing is on, the entire content of a program is printed - just prior to beginning execution. This is like automatically - executing an EXPLAIN prior to each statement. The statement - executes normally after the listing is printed. - This is used for debugging. See the - VDBE documentation for more - information.

    • -
    - -} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/pressrelease-20071212.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/pressrelease-20071212.html --- sqlite3-3.4.2/www/pressrelease-20071212.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/pressrelease-20071212.html 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,123 @@ + + +

    FOR IMMEDIATE RELEASE:

    + +

    SQLite Consortium Launches With Mozilla And Symbian As Charter Members

    + + +

    Ensures independent technical direction for world's +most deployed SQL database engine

    + +

    Charlotte, North Carolina - December 12, 2007 - The SQLite Consortium, +a new membership association dedicated to maintaining SQLite as a fully open +and independent product, was formally announced today. Mozilla and Symbian Ltd. +have joined the SQLite Consortium as charter members.

    + +

    SQLite is a compact, high efficiency, high reliability, embeddable SQL database +engine. The source code to SQLite is in the public domain and is available with +no associated fees. SQLite is the most deployed SQL database engine in the world +and is currently used in a wide range of commercial software products and electronic +devices from leading manufacturers. SQLite is found today in many mobile phones, +MP3 players, set-top boxes, and PCs.

    + +

    The mission of the SQLite Consortium is to continue developing and enhancing +SQLite as a product that anyone may use without paying royalties or licensing fees. +Members of the SQLite Consortium provide funding to enable this mission and in +return receive enterprise-level technical support. Technical control and direction +of SQLite remains entirely with the SQLite developers.

    + +

    Mozilla, developer of the popular open-source Firefox web browser, and Symbian, +the market-leading open operating system for advanced data-enabled smartphones, +both deploy the SQLite database engine in their products. As charter members of +the Consortium, Mozilla and Symbian are ensuring the development and support of +SQLite as a freely accessible and public domain software asset.

    + +

    "SQLite has become a popular embedded database because it is lightweight, fast, +and open source," said Michael Schroepfer, Vice President of Engineering, Mozilla. +"As a charter member of the SQLite Consortium, Mozilla is excited to help ensure +SQLite remains a vibrant and open technology, in line with our mission to promote +choice and innovation on the Internet."

    + +

    "The SQLite Consortium will help set the standards for database management +which are essential in smartphone functionality and will also help create a pool +of developers, highly-skilled in SQLite for future mobile phone development, " +said Bruce Carney, Director, Developer Programmes & Services, Symbian. +"Our involvement with the SQL Consortium not only demonstrates Symbian's commitment +to open standards in the industry, but as mobile phones become increasingly powerful +and smartphones become increasingly popular, we are focused on ensuring that +desktop developers, who move to the mobile space, have the easiest and most +productive experience possible."

    + +

    SQLite is a winner of the 2005 Google/O'Reilly Open Source Award. Additional +information regarding the SQLite Consortium is available at the SQLite website, +http://www.sqlite.org/.

    + +
    +# # # +
    + +

    About SQLite

    + +

    SQLite is a software library that implements a self-contained, embeddable, +serverless, zero-configuration, transactional SQL database engine. The code for +SQLite is in the public domain and is free for any use, commercial or private. +SQLite is currently found in countless software titles and electronic devices.

    + +

    SQLite was originally developed and released 2000 by Dr. D. Richard Hipp. +The code continues to be maintained and enhanced by an international team +of developers under Hipp's direction.

    + + +

    +CONTACT INFORMATION:
    +SQLite Consortium
    +Dr. D. Richard Hipp
    +6200 Maple Cove Lane
    +Charlotte, NC 28269
    +Email: drh@sqlite.org
    +http://www.sqlite.org/
    +Tel: +1.704.948.4565 +

    + + +

    About Mozilla

    + +

    Mozilla is a global community dedicated to building free, open source products +and technologies that improve the online experience for people everywhere. +Mozilla works in the open with a highly disciplined, transparent and cooperative +development process, under the umbrella of the non-profit Mozilla Foundation. +As a wholly owned subsidiary, the Mozilla Corporation organizes the development +and marketing of Mozilla products. This unique structure has enabled Mozilla to +financially support and cultivate competitive, viable community innovation. +For more information, visit www.mozilla.com.

    + +

    CONTACT INFORMATION:
    +Mozilla Corporation
    +Jessica Waight
    +Tel: +1.415.345.4754
    +Email: jwaight@outcastpr.com +

    + +

    About Symbian Limited

    + +

    Symbian is a software licensing company that develops and licenses Symbian OS, +the market-leading open operating system for advanced, data-enabled mobile phones +known as smartphones.

    + +

    Symbian licenses Symbian OS to the world's leading handset manufacturers +and has built close co-operative business relationships with leading companies +across the mobile industry. During Q3 2007, 20.4 million Symbian smartphones +were sold worldwide to over 250 major network operators, bringing the total +number of Symbian smartphones shipped up to 30 September 2007 to 165 million.

    + +

    Symbian has its headquarters in London, United Kingdom, with offices in the +United States, United Kingdom, Asia (India, P.R. China, and Korea) and Japan. +For more information, please visit www.symbian.com.

    + +

    CONTACT INFORMATION:
    +Symbian Global Press Office
    +Karen Hamblen
    ++44 207 154 1707
    +press@symbian.com +

    + diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/privatebranch.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/privatebranch.html --- sqlite3-3.4.2/www/privatebranch.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/privatebranch.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,424 @@ + + +Private Branches Of SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    +Maintaining Private Branches Of SQLite +

    + +

    1.0 Introduction

    + +

    SQLite is designed to meet most developer's needs without any +changes or customization. When changes are needed, they can normally +be accomplished using start-time (1) +or runtime +(2) +(3) +(4) configuration methods +or via compile-time options. It is very rare that an application +developer will need to edit the SQLite source code in order to +incorporate SQLite into a product.

    + +

    We call custom modifications to the SQLite source code that are held +for the use of a single application a "private branch". When a private +branch becomes necessary, the application developer must take on the +task of keeping the private branch in synchronization with the public +SQLite sources. This is tedious. It can also be tricky, since while +the SQLite file format and published interfaces are very stable, the +internal implementation of SQLite changes quite rapidly. Hundreds or +thousands of lines of code might change for any given SQLite point release. +

    + +

    This article outlines one possible method for keeping a private branch +of SQLite in sync with the public SQLite source code. +There are many ways of maintaining a private branch, of course. +Nobody is compelled to use the method describe here. +This article is not trying to impose a particular procedure on +maintainers of private branches. The point of this article is to offer +an example of one process for maintaining a private branch which can +be used as a template for designing processes best suited for the +circumstances of each individual project.

    + + +

    2.0 The Basic Idea

    + + +

    We propose to use the +fossil software configuration management +system to set up two branches. One branch (the "public branch" or "trunk") +contains the published SQLite sources and the other branch is the +private branch which contains the code that is customized for the project. +Whenever a new public release of SQLite is made, that release is added to +the public branch and then the changes are merged into the private branch.

    + +

    This document proposes to use +fossil, +but any other distributed software configuration management system such as +monotone or +mercurial (a.k.a. "hg"), or +git could serve just as well. +The concept will be the same, +though the specifics of the procedure will vary.

    + +

    The diagram at the right illustrates the concept. +One begins with a standard SQLite release. For the +sake of example, suppose that one intends to create a +private branch off of SQLite version 3.6.15. In the +diagram this is version (1). The +maintainer makes an exact copy of the the baseline +SQLite into the branch space, shown as version (2). +Note that (1) and (2) are exactly the same. Then +the maintainer applies the private changes to +version (2) resulting in version (3). In other words, +version (3) is SQLite version 3.6.15 plus edits.

    + +

    Later, SQLite version 3.6.16 is released, as shown +by circle (4) in the diagram. At the point, the private +branch maintainer does a merge which takes all of the +changes going from (1) to (4) and applies those changes to +(3). The result is version (5), which is SQLite 3.6.16 +plus edits.

    + +

    There might be merge conflicts. In other words, it might +be that the changes from (2) to (3) are incompatible with the +changes from (1) to (4). In that case, the maintainer will +have to manually resolve the conflicts. Hopefully conflicts +will not come up that often. Conflicts are less likely to +occur when the private edits are kept to a minimum.

    + +

    The cycle above can be repeated many times. The +diagram shows a third SQLite release, 3.6.17 in +circle (6). The private branch maintainer can do +another merge in order to incorporate the changes +moving from (4) to (6) into the private branch, resulting +in version (7).

    + +

    3.0 The Procedure

    + +

    The remainder of this document will guide the reader through +the steps needed to maintain a private branch. The general idea +is the same as outlined above. This section merely provides more +detail.

    + +

    We emphasize again that these steps are not intended to be the only +acceptable method for maintaining private branch. This approach +is one of many. Use this document as a baseline for preparing +project-specific procedures. Do not be afraid to experiment.

    + +

    3.1 Obtain The Software

    + +

    Fossil is a computer program +that must be installed on your machine before you use it. +Fortunately, installing fossil is very easy. Fossil is a single +"*.exe" file that you simply download and run. To uninstall fossil, +simply delete the exe file. + +Detailed instructions for installing and getting started with +fossil are available on the +fossil website.

    + +

    3.2 Create A Project Repository

    + +

    Create a fossil repository to host the private branch using the +following command:

    + +
    +fossil new private-project.fossil
    +
    + +

    You can call your project anything you like. The ".fossil" +suffix is optional. For this document, we will continue to call the +project "private-project.fossil". Note that +private-project.fossil is an ordinary disk file (actually an +SQLite database) that will contain your complete project history. +You can make a backup of the project simply by making a copy of that +one file.

    + +

    If you want to configure the new project, type:

    + +
    +fossil ui private-project.fossil
    +
    + +

    The "ui" command will cause fossil to run a miniature built-in webserver +and to launch your web-browser pointing +at that webserver. You can use your web-browser to configure your project +in various ways. See the instructions on the fossil website for additional +information.

    + +

    Once the project repository is created, create an open checkout of the +project by moving to the directory where you want to keep all of the +project source code and typing:

    + +
    +fossil open private-project.fossil
    +
    + +

    You can have multiple checkouts of the same project if you want. +And you can "clone" the repository to different machines so that multiple +developers can use it. See the fossil website for further information.

    + +

    3.3 Installing The SQLite Baseline In Fossil

    + +

    The repository created in the previous set is initially empty. The +next step is to load the baseline SQLite release - circle (1) in the diagram +above.

    + +

    Begin by obtaining a copy of SQLite in whatever form you use it. +The public SQLite you obtain should be as close to your private edited +copy as possible. If your project uses the SQLite amalgamation, then +get a copy of the the amalgamation. If you use the preprocessed separate +source files, get those instead. Put all the source files in the +checkout directory created in the previous step.

    + +

    The source code in public SQLite releases uses unix line endings +(ASCII code 10: "newline" only, NL) and spaces instead of tabs. If you will +be changing the line ending to windows-style line endings +(ASCII codes 13, 10: "carriage-return" and "newline"; CR-NL) or if you will be +changing space indents into tab indents, make that change now +before you check in the baseline. The merging process will only work +well if the differences between the public and the private branches are +minimal. If every single line of the source file is changed in the +private branch because you changed from NL to CR-NL line endings, then +the merge steps will not work correctly.

    + +

    Let us assume that you are using the amalgamation source code. +Add the baseline to your project as follows:

    + +
    +fossil add sqlite3.c sqlite3.h
    +
    + +

    If you are using separate source files, name all of the source files instead +of just the two amalgamation source files. Once this is done, commit your +changes as follows:

    + +
    +fossil commit
    +
    + +

    You will be prompted for a check-in comment. Say whatever you like. +After the commit completes, your baseline will be part of the repository. +The following command, if you like, to see this on the "timeline": +

    + +
    +fossil ui
    +
    + +

    That last command is the same "ui" command that we ran before. It +starts a mini-webserver running and points your web browser at it. But +this time we didn't have to specify the repository file because we are +located inside a checkout and so fossil can figure out the repository for +itself. If you want to type in the repository filename as the second +argument, you can. But it is optional.

    + +

    If you do not want to use your web browser to view the new check-in, +you can get some information from the command-line using commands like +these: + +

    +fossil timeline
    +fossil info
    +fossil status
    +
    + +

    3.4 Creating The Private Branch

    + +

    The previous step created circle (1) in the diagram above. +This step will create circle (2). Run the following command:

    + +
     
    +fossil branch new private trunk -bgcolor "#add8e8"
    +
    + +

    This command will create a new branch named "private" (you can use +a different name if you like) and assign it a background color +of light blue ("#add8e8"). You can omit the background color if you want, +though having a distinct background does make it easier to tell the +branch from the "trunk" (the public branch) on timeline displays. You +can change the background color of the private branch or of the public +branch (the "trunk") using the web interface if you like.

    + +

    The command above created the new branch. But your checkout is +still on the trunk - a fact you can see by running the command:

    + +
    +fossil info
    +
    + +

    To change your check-out to the private branch, type:

    + +
    +fossil update private
    +
    + +

    You can run the "info" command again to verify that you are on the +private branch. To go back to the public branch, type:

    + +
    +fossil update trunk
    +
    + +

    Normally, fossil will modify all the files in your checkout when switching +between the private and the public branches. But at this point, the files +are identical in both branches so not modifications need to be made.

    + +

    3.5 Adding Customizations To The Code In The Private Branch

    + +

    Now it is time to make the private, custom modifications to SQLite +which are the whole point of this exercise. Switch to the private branch +(if you are not already there) using the "fossil update private" +command, then bring up the source files in your text editor and make +whatever changes you want to make. Once you have finished making +changes, commit those changes using this command:

    + +
    +fossil commit
    +
    + +

    You will be prompted once again to enter a commit describing your +changes. Then the commit will occur. The commit creates a new checkin +in the repository that corresponds to circle (3) in the diagram above.

    + +

    Now that the public and private branches are different, you can run +the "fossil update trunk" and "fossil update private" +commands and see that fossil really does change the files in the checkout +as you switch back and forth between branches.

    + +

    Note that in the diagram above, we showed the private edits as a single +commit. This was for clarity of presentation only. There is nothing to stop +you from doing dozens or hundreds of separate tiny changes and committing +each separately. In fact, making many small changes is the preferred way +to work. The only reason for doing all the changes in a single commit +is that it makes the diagram easier to draw.

    + +

    3.6 Incorporating New Public SQLite Releases

    + +

    Suppose that after a while (about a month, usually) a new version of +SQLite is released: 3.6.16. You will want to incorporate this new +public version of SQLite into your repository in the public branch (the +trunk). To do this, first change your repository over to the trunk:

    + +
    +fossil update trunk
    +
    + +

    Then download the new version of the SQLite sources and overwrite the +files that are in the checkout.

    + +

    If you made NL to CR-NL line ending changes or space to tab +indentation changes in the original baseline, make the same changes +to the new source file.

    + +

    Once everything is ready, run the "fossil commit" command to +check in the changes. This creates circle (4) in the diagram above.

    + +

    3.7 Merging Public SQLite Updates Into The Private Branch

    + +

    The next step is to move the changes in the public branch over into +the private branch. In other words, we want to create circle (5) in the +diagram above. Begin by changing to the private branch using +"fossil update private". Then type this command:

    + +
    +fossil merge trunk
    +
    + +

    The "merge" command attempts to apply all the changes between +circles (1) and (4) to the files in the local checkout. Note that +circle (5) has not been created yet. You will need to run the +"commit" to create circle (5).

    + +

    It might be that there are conflicts in the merge. Conflicts +occur when the same line of code was changed in different ways between +circles (1) and (4) versus circles (2) and (3). The merge command will +announce any conflicts and will include both versions of the conflicting +lines in the output. You will need to bring up the files that contain +conflicts and manually resolve the conflicts.

    + +

    After resolving conflicts, many users like to compile and test the +new version before committing it to the repository. Or you can commit +first and test later. Either way, run the "fossil commit" +command to check-in the circle (5) version. + +

    3.8 Further Updates

    + +

    As new versions of SQLite are released, repeat steps 3.6 and 3.7 to +add changes in the new release to the private branch. +Additional private changes can be +made on the private branch in between releases if desired.

    +
    +This page last modified 2009/06/08 13:14:40 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/quickstart.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/quickstart.html --- sqlite3-3.4.2/www/quickstart.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/quickstart.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,184 @@ + + +SQLite In 5 Minutes Or Less + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    Here is what you do to start experimenting with SQLite without having +to do a lot of tedious reading and configuration:

    + +

    Download The Code

    + +
      +
    • Get a copy of the prebuilt binaries for your machine, or get a copy +of the sources and compile them yourself. Visit +the download page for more information.

    • +
    + +

    Create A New Database

    + +
      +
    • At a shell or DOS prompt, enter: "sqlite3 test.db". This will +create a new database named "test.db". (You can use a different name if +you like.)

    • +
    • Enter SQL commands at the prompt to create and populate the +new database.

    • +
    • Additional documentation is available here

    • +
    + +

    Write Programs That Use SQLite

    + +
      +
    • Below is a simple +TCL program that demonstrates how to use +the TCL interface to SQLite. The program executes the SQL statements +given as the second argument on the database defined by the first +argument. The commands to watch for are the sqlite3 command +on line 7 which opens an SQLite database and creates +a new TCL command named "db" to access that database, the +invocation of the db command on line 8 to execute +SQL commands against the database, and the closing of the database connection +on the last line of the script.

      + +
      +#!/usr/bin/tclsh
      +if {$argc!=2} {
      +  puts stderr "Usage: %s DATABASE SQL-STATEMENT"
      +  exit 1
      +}
      +load /usr/lib/tclsqlite3.so Sqlite3
      +sqlite3 db [lindex $argv 0]
      +db eval [lindex $argv 1] x {
      +  foreach v $x(*) {
      +    puts "$v = $x($v)"
      +  }
      +  puts ""
      +}
      +db close
      +
      +
    • + +
    • Below is a simple C program that demonstrates how to use +the C/C++ interface to SQLite. The name of a database is given by +the first argument and the second argument is one or more SQL statements +to execute against the database. The function calls to pay attention +to here are the call to sqlite3_open() on line 22 which opens +the database, sqlite3_exec() on line 27 that executes SQL +commands against the database, and sqlite3_close() on line 31 +that closes the database connection.

      + +

      See also the Introduction To The SQLite C/C++ Interface for +an introductory overview and roadmap to the dozens of SQLite interface +functions.

      + +
      +#include <stdio.h>
      +#include <sqlite3.h>
      +
      +static int callback(void *NotUsed, int argc, char **argv, char **azColName){
      +  int i;
      +  for(i=0; i<argc; i++){
      +    printf("%s = %s\n", azColName[i], argv[i] ? argv[i] : "NULL");
      +  }
      +  printf("\n");
      +  return 0;
      +}
      +
      +int main(int argc, char **argv){
      +  sqlite3 *db;
      +  char *zErrMsg = 0;
      +  int rc;
      +
      +  if( argc!=3 ){
      +    fprintf(stderr, "Usage: %s DATABASE SQL-STATEMENT\n", argv[0]);
      +    exit(1);
      +  }
      +  rc = sqlite3_open(argv[1], &db);
      +  if( rc ){
      +    fprintf(stderr, "Can't open database: %s\n", sqlite3_errmsg(db));
      +    sqlite3_close(db);
      +    exit(1);
      +  }
      +  rc = sqlite3_exec(db, argv[2], callback, 0, &zErrMsg);
      +  if( rc!=SQLITE_OK ){
      +    fprintf(stderr, "SQL error: %s\n", zErrMsg);
      +    sqlite3_free(zErrMsg);
      +  }
      +  sqlite3_close(db);
      +  return 0;
      +}
      +
      +
    • +
    +
    +This page last modified 2008/05/12 13:08:44 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/quickstart.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/quickstart.tcl --- sqlite3-3.4.2/www/quickstart.tcl 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/www/quickstart.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,110 +0,0 @@ -# -# Run this TCL script to generate HTML for the quickstart.html file. -# -set rcsid {$Id: quickstart.tcl,v 1.8 2006/06/13 11:27:22 drh Exp $} -source common.tcl -header {SQLite In 5 Minutes Or Less} -puts { -

    Here is what you do to start experimenting with SQLite without having -to do a lot of tedious reading and configuration:

    - -

    Download The Code

    - -
      -
    • Get a copy of the prebuilt binaries for your machine, or get a copy -of the sources and compile them yourself. Visit -the download page for more information.

    • -
    - -

    Create A New Database

    - -
      -
    • At a shell or DOS prompt, enter: "sqlite3 test.db". This will -create a new database named "test.db". (You can use a different name if -you like.)

    • -
    • Enter SQL commands at the prompt to create and populate the -new database.

    • -
    • Additional documentation is available here

    • -
    - -

    Write Programs That Use SQLite

    - -
      -
    • Below is a simple TCL program that demonstrates how to use -the TCL interface to SQLite. The program executes the SQL statements -given as the second argument on the database defined by the first -argument. The commands to watch for are the sqlite3 command -on line 7 which opens an SQLite database and creates -a new TCL command named "db" to access that database, the -invocation of the db command on line 8 to execute -SQL commands against the database, and the closing of the database connection -on the last line of the script.

      - -
      -#!/usr/bin/tclsh
      -if {$argc!=2} {
      -  puts stderr "Usage: %s DATABASE SQL-STATEMENT"
      -  exit 1
      -}
      -load /usr/lib/tclsqlite3.so Sqlite3
      -sqlite3 db [lindex $argv 0]
      -db eval [lindex $argv 1] x {
      -  foreach v $x(*) {
      -    puts "$v = $x($v)"
      -  }
      -  puts ""
      -}
      -db close
      -
      -
    • - -
    • Below is a simple C program that demonstrates how to use -the C/C++ interface to SQLite. The name of a database is given by -the first argument and the second argument is one or more SQL statements -to execute against the database. The function calls to pay attention -to here are the call to sqlite3_open() on line 22 which opens -the database, sqlite3_exec() on line 27 that executes SQL -commands against the database, and sqlite3_close() on line 31 -that closes the database connection.

      - -
      -#include <stdio.h>
      -#include <sqlite3.h>
      -
      -static int callback(void *NotUsed, int argc, char **argv, char **azColName){
      -  int i;
      -  for(i=0; i<argc; i++){
      -    printf("%s = %s\n", azColName[i], argv[i] ? argv[i] : "NULL");
      -  }
      -  printf("\n");
      -  return 0;
      -}
      -
      -int main(int argc, char **argv){
      -  sqlite3 *db;
      -  char *zErrMsg = 0;
      -  int rc;
      -
      -  if( argc!=3 ){
      -    fprintf(stderr, "Usage: %s DATABASE SQL-STATEMENT\n", argv[0]);
      -    exit(1);
      -  }
      -  rc = sqlite3_open(argv[1], &db);
      -  if( rc ){
      -    fprintf(stderr, "Can't open database: %s\n", sqlite3_errmsg(db));
      -    sqlite3_close(db);
      -    exit(1);
      -  }
      -  rc = sqlite3_exec(db, argv[2], callback, 0, &zErrMsg);
      -  if( rc!=SQLITE_OK ){
      -    fprintf(stderr, "SQL error: %s\n", zErrMsg);
      -    sqlite3_free(zErrMsg);
      -  }
      -  sqlite3_close(db);
      -  return 0;
      -}
      -
      -
    • -
    -} -footer {$Id: quickstart.tcl,v 1.8 2006/06/13 11:27:22 drh Exp $} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_0.html --- sqlite3-3.4.2/www/releaselog/3_0_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_0.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.0.0 On 2004 June 18 (3.0.0 alpha) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.0 On 2004 June 18 (3.0.0 alpha)

    Changes associated with this release include the following:

      +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Support for internationalization including UTF-8, UTF-16, and + user defined collating sequences.
    • +
    • New file format that is 25% to 35% smaller for typical use.
    • +
    • Improved concurrency.
    • +
    • Atomic commits for ATTACHed databases.
    • +
    • Remove cruft from the APIs.
    • +
    • BLOB support.
    • +
    • 64-bit rowids.
    • +
    • More information. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_1.html --- sqlite3-3.4.2/www/releaselog/3_0_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_1.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.0.1 On 2004 June 22 (3.0.1 alpha) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.1 On 2004 June 22 (3.0.1 alpha)

    Changes associated with this release include the following:

      +
    • + *** Alpha Release - Research And Testing Use Only *** +
    • Lots of bug fixes.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_2.html --- sqlite3-3.4.2/www/releaselog/3_0_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_2.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,83 @@ + + +SQLite Release 3.0.2 On 2004 June 30 (3.0.2 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.2 On 2004 June 30 (3.0.2 beta)

    Changes associated with this release include the following:

      +
    • The first beta release for SQLite 3.0.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_3.html --- sqlite3-3.4.2/www/releaselog/3_0_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_3.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.0.3 On 2004 July 22 (3.0.3 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.3 On 2004 July 22 (3.0.3 beta)

    Changes associated with this release include the following:

      +
    • The second beta release for SQLite 3.0.
    • +
    • Add support for "PRAGMA page_size" to adjust the page size of +the database.
    • +
    • Various bug fixes and documentation updates.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_4.html --- sqlite3-3.4.2/www/releaselog/3_0_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_4.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.0.4 On 2004 August 8 (3.0.4 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.4 On 2004 August 8 (3.0.4 beta)

    Changes associated with this release include the following:

      +
    • CREATE TABLE and DROP TABLE now work correctly as prepared statements.
    • +
    • Fix a bug in VACUUM and UNIQUE indices.
    • +
    • Add the ".import" command to the command-line shell.
    • +
    • Fix a bug that could cause index corruption when an attempt to + delete rows of a table is blocked by a pending query.
    • +
    • Library size optimizations.
    • +
    • Other minor bug fixes.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_5.html --- sqlite3-3.4.2/www/releaselog/3_0_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_5.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,94 @@ + + +SQLite Release 3.0.5 On 2004 August 29 (3.0.5 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.5 On 2004 August 29 (3.0.5 beta)

    Changes associated with this release include the following:

      +
    • Support for ":AAA" style bind parameter names.
    • +
    • Added the new sqlite3_bind_parameter_name() interface.
    • +
    • Support for TCL variable names embedded in SQL statements in the + TCL bindings.
    • +
    • The TCL bindings transfer data without necessarily doing a conversion + to a string.
    • +
    • The database for TEMP tables is not created until it is needed.
    • +
    • Add the ability to specify an alternative temporary file directory + using the "sqlite_temp_directory" global variable.
    • +
    • A compile-time option (SQLITE_BUSY_RESERVED_LOCK) causes the busy + handler to be called when there is contention for a RESERVED lock.
    • +
    • Various bug fixes and optimizations
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_6.html --- sqlite3-3.4.2/www/releaselog/3_0_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_6.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,88 @@ + + +SQLite Release 3.0.6 On 2004 September 02 (3.0.6 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.6 On 2004 September 02 (3.0.6 beta)

    Changes associated with this release include the following:

      +
    • Better detection and handling of corrupt database files.
    • +
    • The sqlite3_step() interface returns SQLITE_BUSY if it is unable + to commit a change because of a lock
    • +
    • Combine the implementations of LIKE and GLOB into a single + pattern-matching subroutine.
    • +
    • Miscellaneous code size optimizations and bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_7.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_7.html --- sqlite3-3.4.2/www/releaselog/3_0_7.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_7.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,95 @@ + + +SQLite Release 3.0.7 On 2004 September 18 (3.0.7) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.7 On 2004 September 18 (3.0.7)

    Changes associated with this release include the following:

      +
    • The BTree module allocates large buffers using malloc() instead of + off of the stack, in order to play better on machines with limited + stack space.
    • +
    • Fixed naming conflicts so that versions 2.8 and 3.0 can be + linked and used together in the same ANSI-C source file.
    • +
    • New interface: sqlite3_bind_parameter_index()
    • +
    • Add support for wildcard parameters of the form: "?nnn"
    • +
    • Fix problems found on 64-bit systems.
    • +
    • Removed encode.c file (containing unused routines) from the + version 3.0 source tree.
    • +
    • The sqlite3_trace() callbacks occur before each statement + is executed, not when the statement is compiled.
    • +
    • Makefile updates and miscellaneous bug fixes.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_0_8.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_0_8.html --- sqlite3-3.4.2/www/releaselog/3_0_8.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_0_8.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,94 @@ + + +SQLite Release 3.0.8 On 2004 October 11 (3.0.8) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.0.8 On 2004 October 11 (3.0.8)

    Changes associated with this release include the following:

      +
    • Add support for DEFERRED, IMMEDIATE, and EXCLUSIVE transactions.
    • +
    • Allow new user-defined functions to be created when there are +already one or more precompiled SQL statements.
    • +
    • Fix portability problems for Mingw/MSYS.
    • +
    • Fix a byte alignment problem on 64-bit Sparc machines.
    • +
    • Fix the ".import" command of the shell so that it ignores \r +characters at the end of lines.
    • +
    • The "csv" mode option in the shell puts strings inside double-quotes.
    • +
    • Fix typos in documentation.
    • +
    • Convert array constants in the code to have type "const".
    • +
    • Numerous code optimizations, specially optimizations designed to +make the code footprint smaller.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_0.html --- sqlite3-3.4.2/www/releaselog/3_1_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_0.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,95 @@ + + +SQLite Release 3.1.0 On 2005 January 21 (3.1.0 ALPHA) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.0 On 2005 January 21 (3.1.0 ALPHA)

    Changes associated with this release include the following:

      +
    • Autovacuum support added
    • +
    • CURRENT_TIME, CURRENT_DATE, and CURRENT_TIMESTAMP added
    • +
    • Support for the EXISTS clause added.
    • +
    • Support for correlated subqueries added.
    • +
    • Added the ESCAPE clause on the LIKE operator.
    • +
    • Support for ALTER TABLE ... RENAME TABLE ... added
    • +
    • AUTOINCREMENT keyword supported on INTEGER PRIMARY KEY
    • +
    • Many SQLITE_OMIT_ macros inserts to omit features at compile-time + and reduce the library footprint.
    • +
    • The REINDEX command was added.
    • +
    • The engine no longer consults the main table if it can get + all the information it needs from an index.
    • +
    • Many nuisance bugs fixed.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_1.html --- sqlite3-3.4.2/www/releaselog/3_1_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_1.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.1.1 On 2005 February 1 (3.1.1 BETA) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.1 On 2005 February 1 (3.1.1 BETA)

    Changes associated with this release include the following:

      +
    • Automatic caching of prepared statements in the TCL interface
    • +
    • ATTACH and DETACH as well as some other operations cause existing + prepared statements to expire.
    • +
    • Numerious minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_2.html --- sqlite3-3.4.2/www/releaselog/3_1_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_2.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,90 @@ + + +SQLite Release 3.1.2 On 2005 February 15 (3.1.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.2 On 2005 February 15 (3.1.2)

    Changes associated with this release include the following:

      +
    • Fix a bug that can lead to database corruption if there are two +open connections to the same database and one connection does a VACUUM +and the second makes some change to the database.
    • +
    • Allow "?" parameters in the LIMIT clause.
    • +
    • Fix VACUUM so that it works with AUTOINCREMENT.
    • +
    • Fix a race condition in AUTOVACUUM that can lead to corrupt databases
    • +
    • Add a numeric version number to the sqlite3.h include file.
    • +
    • Other minor bug fixes and performance enhancements.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_3.html --- sqlite3-3.4.2/www/releaselog/3_1_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_3.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,87 @@ + + +SQLite Release 3.1.3 On 2005 February 19 (3.1.3) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.3 On 2005 February 19 (3.1.3)

    Changes associated with this release include the following:

      +
    • Fix a problem with VACUUM on databases from which tables containing +AUTOINCREMENT have been dropped.
    • +
    • Add forward compatibility to the future version 3.2 database file +format.
    • +
    • Documentation updates
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_4.html --- sqlite3-3.4.2/www/releaselog/3_1_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_4.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,95 @@ + + +SQLite Release 3.1.4 On 2005 March 10 (3.1.4) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.4 On 2005 March 10 (3.1.4)

    Changes associated with this release include the following:

      +
    • Fix a bug in autovacuum that could cause database corruption if +a CREATE UNIQUE INDEX fails because of a constraint violation. +This problem only occurs if the new autovacuum feature introduced in +version 3.1 is turned on.
    • +
    • The F_FULLSYNC ioctl (currently only supported on OS-X) is disabled +if the synchronous pragma is set to something other than "full".
    • +
    • Add additional forward compatibility to the future version 3.2 database +file format.
    • +
    • Fix a bug in WHERE clauses of the form (rowid<'2')
    • +
    • New SQLITE_OMIT_... compile-time options added
    • +
    • Updates to the man page
    • +
    • Remove the use of strcasecmp() from the shell
    • +
    • Windows DLL exports symbols Tclsqlite_Init and Sqlite_Init
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_5.html --- sqlite3-3.4.2/www/releaselog/3_1_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_5.html 2009-06-27 15:07:38.000000000 +0100 @@ -0,0 +1,84 @@ + + +SQLite Release 3.1.5 On 2005 March 11 (3.1.5) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.5 On 2005 March 11 (3.1.5)

    Changes associated with this release include the following:

      +
    • The ioctl on OS-X to control syncing to disk is F_FULLFSYNC, + not F_FULLSYNC. The previous release had it wrong.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_1_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_1_6.html --- sqlite3-3.4.2/www/releaselog/3_1_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_1_6.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +SQLite Release 3.1.6 On 2005 March 16 (3.1.6) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.1.6 On 2005 March 16 (3.1.6)

    Changes associated with this release include the following:

      +
    • Fix a bug that could cause database corruption when inserting + record into tables with around 125 columns.
    • +
    • sqlite3_step() is now much more likely to invoke the busy handler + and less likely to return SQLITE_BUSY.
    • +
    • Fix memory leaks that used to occur after a malloc() failure.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_0.html --- sqlite3-3.4.2/www/releaselog/3_2_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_0.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.2.0 On 2005 March 21 (3.2.0) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.0 On 2005 March 21 (3.2.0)

    Changes associated with this release include the following:

      +
    • Added support for ALTER TABLE ADD COLUMN.
    • +
    • Added support for the "T" separator in ISO-8601 date/time strings.
    • +
    • Improved support for Cygwin.
    • +
    • Numerous bug fixes and documentation updates.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_1.html --- sqlite3-3.4.2/www/releaselog/3_2_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,84 @@ + + +SQLite Release 3.2.1 On 2005 March 29 (3.2.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.1 On 2005 March 29 (3.2.1)

    Changes associated with this release include the following:

      +
    • Fix a memory allocation error in the new ADD COLUMN comment.
    • +
    • Documentation updates
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_2.html --- sqlite3-3.4.2/www/releaselog/3_2_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,90 @@ + + +SQLite Release 3.2.2 On 2005 June 13 (3.2.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.2 On 2005 June 13 (3.2.2)

    Changes associated with this release include the following:

      +
    • Added the sqlite3_db_handle() API
    • +
    • Added the sqlite3_get_autocommit() API
    • +
    • Added a REGEXP operator to the parser. There is no function to back +up this operator in the standard build but users can add their own using +sqlite3_create_function()
    • +
    • Speed improvements and library footprint reductions.
    • +
    • Fix byte alignment problems on 64-bit architectures.
    • +
    • Many, many minor bug fixes and documentation updates.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_3.html --- sqlite3-3.4.2/www/releaselog/3_2_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_3.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,109 @@ + + +SQLite Release 3.2.3 On 2005 August 21 (3.2.3) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.3 On 2005 August 21 (3.2.3)

    Changes associated with this release include the following:

      +
    • Added support for the CAST operator
    • +
    • Tcl interface allows BLOB values to be transferred to user-defined +functions
    • +
    • Added the "transaction" method to the Tcl interface
    • +
    • Allow the DEFAULT value of a column to call functions that have constant +operands
    • +
    • Added the ANALYZE command for gathering statistics on indices and +using those statistics when picking an index in the optimizer
    • +
    • Remove the limit (formerly 100) on the number of terms in the +WHERE clause
    • +
    • The right-hand side of the IN operator can now be a list of expressions +instead of just a list of constants
    • +
    • Rework the optimizer so that it is able to make better use of indices
    • +
    • The order of tables in a join is adjusted automatically to make +better use of indices
    • +
    • The IN operator is now a candidate for optimization even if the left-hand +side is not the left-most term of the index. Multiple IN operators can be +used with the same index.
    • +
    • WHERE clause expressions using BETWEEN and OR are now candidates +for optimization
    • +
    • Added the "case_sensitive_like" pragma and the SQLITE_CASE_SENSITIVE_LIKE +compile-time option to set its default value to "on".
    • +
    • Use indices to help with GLOB expressions and LIKE expressions too +when the case_sensitive_like pragma is enabled
    • +
    • Added support for grave-accent quoting for compatibility with MySQL
    • +
    • Improved test coverage
    • +
    • Dozens of minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_4.html --- sqlite3-3.4.2/www/releaselog/3_2_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_4.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.2.4 On 2005 August 24 (3.2.4) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.4 On 2005 August 24 (3.2.4)

    Changes associated with this release include the following:

      +
    • Fix a bug introduced in the previous release +that can cause a segfault while generating code +for complex WHERE clauses.
    • +
    • Allow floating point literals to begin or end with a decimal point.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_5.html --- sqlite3-3.4.2/www/releaselog/3_2_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_5.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,90 @@ + + +SQLite Release 3.2.5 On 2005 August 27 (3.2.5) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.5 On 2005 August 27 (3.2.5)

    Changes associated with this release include the following:

      +
    • Fix a bug effecting DELETE and UPDATE statements that changed +more than 40960 rows.
    • +
    • Change the makefile so that it no longer requires GNUmake extensions
    • +
    • Fix the --enable-threadsafe option on the configure script
    • +
    • Fix a code generator bug that occurs when the left-hand side of an IN +operator is constant and the right-hand side is a SELECT statement
    • +
    • The PRAGMA synchronous=off statement now disables syncing of the +master journal file in addition to the normal rollback journals
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_6.html --- sqlite3-3.4.2/www/releaselog/3_2_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_6.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,96 @@ + + +SQLite Release 3.2.6 On 2005 September 17 (3.2.6) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.6 On 2005 September 17 (3.2.6)

    Changes associated with this release include the following:

      +
    • Fix a bug that can cause database corruption if a VACUUM (or + autovacuum) fails and is rolled back on a database that is + larger than 1GiB
    • +
    • LIKE optiization now works for columns with COLLATE NOCASE
    • +
    • ORDER BY and GROUP BY now use bounded memory
    • +
    • Added support for COUNT(DISTINCT expr)
    • +
    • Change the way SUM() handles NULL values in order to comply with + the SQL standard
    • +
    • Use fdatasync() instead of fsync() where possible in order to speed + up commits slightly
    • +
    • Use of the CROSS keyword in a join turns off the table reordering + optimization
    • +
    • Added the experimental and undocumented EXPLAIN QUERY PLAN capability
    • +
    • Use the unicode API in Windows
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_7.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_7.html --- sqlite3-3.4.2/www/releaselog/3_2_7.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_7.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +SQLite Release 3.2.7 On 2005 September 24 (3.2.7) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.7 On 2005 September 24 (3.2.7)

    Changes associated with this release include the following:

      +
    • GROUP BY now considers NULLs to be equal again, as it should +
    • +
    • Now compiles on Solaris and OpenBSD and other Unix variants +that lack the fdatasync() function
    • +
    • Now compiles on MSVC++6 again
    • +
    • Fix uninitialized variables causing malfunctions for various obscure +queries
    • +
    • Correctly compute a LEFT OUTER JOINs that is constrained on the +left table only
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_2_8.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_2_8.html --- sqlite3-3.4.2/www/releaselog/3_2_8.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_2_8.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.2.8 On 2005 December 19 (3.2.8) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.2.8 On 2005 December 19 (3.2.8)

    Changes associated with this release include the following:

      +
    • Fix an obscure bug that can cause database corruption under the +following unusual circumstances: A large INSERT or UPDATE statement which +is part of an even larger transaction fails due to a uniqueness contraint +but the containing transaction commits.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_0.html --- sqlite3-3.4.2/www/releaselog/3_3_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_0.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +SQLite Release 3.3.0 On 2006 January 10 (3.3.0 alpha) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.0 On 2006 January 10 (3.3.0 alpha)

    Changes associated with this release include the following:

      +
    • CHECK constraints
    • +
    • IF EXISTS and IF NOT EXISTS clauses on CREATE/DROP TABLE/INDEX.
    • +
    • DESC indices
    • +
    • More efficient encoding of boolean values resulting in smaller database +files
    • +
    • More aggressive SQLITE_OMIT_FLOATING_POINT
    • +
    • Separate INTEGER and REAL affinity
    • +
    • Added a virtual function layer for the OS interface
    • +
    • "exists" method added to the TCL interface
    • +
    • Improved response to out-of-memory errors
    • +
    • Database cache can be optionally shared between connections +in the same thread
    • +
    • Optional READ UNCOMMITTED isolation (instead of the default +isolation level of SERIALIZABLE) and table level locking when +database connections share a common cache.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_10.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_10.html --- sqlite3-3.4.2/www/releaselog/3_3_10.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_10.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,96 @@ + + +SQLite Release 3.3.10 On 2007 January 9 (3.3.10) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.10 On 2007 January 9 (3.3.10)

    Changes associated with this release include the following:

      +
    • Fix bugs in the implementation of the new +sqlite3_prepare_v2() API +that can lead to segfaults.
    • +
    • Fix 1-second round-off errors in the + +strftime() function
    • +
    • Enhance the Windows OS layer to provide detailed error codes
    • +
    • Work around a win2k problem so that SQLite can use single-character +database file names
    • +
    • The +user_version and +schema_version pragmas +correctly set their column names in the result set
    • +
    • Documentation updates
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_11.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_11.html --- sqlite3-3.4.2/www/releaselog/3_3_11.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_11.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +SQLite Release 3.3.11 On 2007 January 22 (3.3.11) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.11 On 2007 January 22 (3.3.11)

    Changes associated with this release include the following:

      +
    • Fix another bug in the implementation of the new +sqlite3_prepare_v2() API. +We'll get it right eventually...
    • +
    • Fix a bug in the IS NULL optimization that was added in version 3.3.9 - +the bug was causing incorrect results on certain LEFT JOINs that included +in the WHERE clause an IS NULL constraint for the right table of the +LEFT JOIN.
    • +
    • Make AreFileApisANSI() a no-op macro in WinCE since WinCE does not +support this function.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_12.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_12.html --- sqlite3-3.4.2/www/releaselog/3_3_12.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_12.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.3.12 On 2007 January 27 (3.3.12) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.12 On 2007 January 27 (3.3.12)

    Changes associated with this release include the following:

      +
    • Fix another bug in the IS NULL optimization that was added in +version 3.3.9.
    • +
    • Fix a assertion fault that occurred on deeply nested views.
    • +
    • Limit the amount of output that +PRAGMA integrity_check +generates.
    • +
    • Minor syntactic changes to support a wider variety of compilers.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_13.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_13.html --- sqlite3-3.4.2/www/releaselog/3_3_13.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_13.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +SQLite Release 3.3.13 On 2007 February 13 (3.3.13) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.13 On 2007 February 13 (3.3.13)

    Changes associated with this release include the following:

      +
    • Add a "fragmentation" measurement in the output of sqlite3_analyzer.
    • +
    • Add the COLLATE operator used to explicitly set the collating sequence +used by an expression. This feature is considered experimental pending +additional testing.
    • +
    • Allow up to 64 tables in a join - the old limit was 32.
    • +
    • Added two new experimental functions: +randomBlob() and +hex(). +Their intended use is to facilitate generating +UUIDs. +
    • +
    • Fix a problem where +PRAGMA count_changes was +causing incorrect results for updates on tables with triggers
    • +
    • Fix a bug in the ORDER BY clause optimizer for joins where the +left-most table in the join is constrained by a UNIQUE index.
    • +
    • Fixed a bug in the "copy" method of the TCL interface.
    • +
    • Bug fixes in fts1 and fts2 modules.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_14.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_14.html --- sqlite3-3.4.2/www/releaselog/3_3_14.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_14.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,121 @@ + + +SQLite Release 3.3.14 On 2007 April 2 (3.3.14) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.14 On 2007 April 2 (3.3.14)

    Changes associated with this release include the following:

      +
    • Fix a bug (ticket #2273) + that could cause a segfault when the IN operator + is used one one term of a two-column index and the right-hand side of + the IN operator contains a NULL.
    • +
    • Added a new OS interface method for determining the sector size + of underlying media: sqlite3OsSectorSize().
    • +
    • A new algorithm for statements of the form + INSERT INTO table1 SELECT * FROM table2 + is faster and reduces fragmentation. VACUUM uses statements of + this form and thus runs faster and defragments better.
    • +
    • Performance enhancements through reductions in disk I/O: +
        +
      • Do not read the last page of an overflow chain when + deleting the row - just add that page to the freelist.
      • +
      • Do not store pages being deleted in the + rollback journal.
      • +
      • Do not read in the (meaningless) content of + pages extracted from the freelist.
      • +
      • Do not flush the page cache (and thus avoiding + a cache refill) unless another process changes the underlying + database file.
      • +
      • Truncate rather than delete the rollback journal when committing + a transaction in exclusive access mode, or when committing the TEMP + database.
      • +
    • +
    • Added support for exclusive access mode using + + "PRAGMA locking_mode=EXCLUSIVE"
    • +
    • Use heap space instead of stack space for large buffers in the + pager - useful on embedded platforms with stack-space + limitations.
    • +
    • Add a makefile target "sqlite3.c" that builds an amalgamation containing + the core SQLite library C code in a single file.
    • +
    • Get the library working correctly when compiled + with GCC option "-fstrict-aliasing".
    • +
    • Removed the vestigal SQLITE_PROTOCOL error.
    • +
    • Improvements to test coverage, other minor bugs fixed, + memory leaks plugged, + code refactored and/or recommended in places for easier reading.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_15.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_15.html --- sqlite3-3.4.2/www/releaselog/3_3_15.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_15.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +SQLite Release 3.3.15 On 2007 April 9 (3.3.15) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.15 On 2007 April 9 (3.3.15)

    Changes associated with this release include the following:

      +
    • Fix a bug introduced in 3.3.14 that caused a rollback of + CREATE TEMP TABLE to leave the database connection wedged.
    • +
    • Fix a bug that caused an extra NULL row to be returned when + a descending query was interrupted by a change to the database.
    • +
    • The FOR EACH STATEMENT clause on a trigger now causes a syntax + error. It used to be silently ignored.
    • +
    • Fix an obscure and relatively harmless problem that might have caused + a resource leak following an I/O error.
    • +
    • Many improvements to the test suite. Test coverage now exceeded 98%
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_16.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_16.html --- sqlite3-3.4.2/www/releaselog/3_3_16.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_16.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,92 @@ + + +SQLite Release 3.3.16 On 2007 April 18 (3.3.16) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.16 On 2007 April 18 (3.3.16)

    Changes associated with this release include the following:

      +
    • Fix a bug that caused VACUUM to fail if NULLs appeared in a + UNIQUE column.
    • +
    • Reinstate performance improvements that were added in + Version 3.3.14 + but regressed in Version 3.3.15.
    • +
    • Fix problems with the handling of ORDER BY expressions on + compound SELECT statements in subqueries.
    • +
    • Fix a potential segfault when destroying locks on WinCE in + a multi-threaded environment.
    • +
    • Documentation updates.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_17.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_17.html --- sqlite3-3.4.2/www/releaselog/3_3_17.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_17.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.3.17 On 2007 April 25 (3.3.17) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.17 On 2007 April 25 (3.3.17)

    Changes associated with this release include the following:

      +
    • When the "write_version" value of the database header is larger than + what the library understands, make the database read-only instead of + unreadable.
    • +
    • Other minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_1.html --- sqlite3-3.4.2/www/releaselog/3_3_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.3.1 On 2006 January 16 (3.3.1 alpha) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.1 On 2006 January 16 (3.3.1 alpha)

    Changes associated with this release include the following:

      +
    • Countless bug fixes
    • +
    • Speed improvements
    • +
    • Database connections can now be used by multiple threads, not just +the thread in which they were created.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_2.html --- sqlite3-3.4.2/www/releaselog/3_3_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.3.2 On 2006 January 24 (3.3.2 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.2 On 2006 January 24 (3.3.2 beta)

    Changes associated with this release include the following:

      +
    • Bug fixes and speed improvements. Improved test coverage.
    • +
    • Changes to the OS-layer interface: mutexes must now be recursive.
    • +
    • Discontinue the use of thread-specific data for out-of-memory +exception handling
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_3.html --- sqlite3-3.4.2/www/releaselog/3_3_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_3.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +SQLite Release 3.3.3 On 2006 January 31 (3.3.3) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.3 On 2006 January 31 (3.3.3)

    Changes associated with this release include the following:

      +
    • Removed support for an ON CONFLICT clause on CREATE INDEX - it never +worked correctly so this should not present any backward compatibility +problems.
    • +
    • Authorizer callback now notified of ALTER TABLE ADD COLUMN commands
    • +
    • After any changes to the TEMP database schema, all prepared statements +are invalidated and must be recreated using a new call to +sqlite3_prepare()
    • +
    • Other minor bug fixes in preparation for the first stable release +of version 3.3
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_4.html --- sqlite3-3.4.2/www/releaselog/3_3_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_4.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.3.4 On 2006 February 11 (3.3.4) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.4 On 2006 February 11 (3.3.4)

    Changes associated with this release include the following:

      +
    • Fix a blunder in the Unix mutex implementation that can lead to +deadlock on multithreaded systems.
    • +
    • Fix an alignment problem on 64-bit machines
    • +
    • Added the fullfsync pragma.
    • +
    • Fix an optimizer bug that could have caused some unusual LEFT OUTER JOINs +to give incorrect results.
    • +
    • The SUM function detects integer overflow and converts to accumulating +an approximate result using floating point numbers
    • +
    • Host parameter names can begin with '@' for compatibility with SQL Server. +
    • +
    • Other miscellaneous bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_5.html --- sqlite3-3.4.2/www/releaselog/3_3_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_5.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +SQLite Release 3.3.5 On 2006 April 5 (3.3.5) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.5 On 2006 April 5 (3.3.5)

    Changes associated with this release include the following:

      +
    • CHECK constraints use conflict resolution algorithms correctly.
    • +
    • The SUM() function throws an error on integer overflow.
    • +
    • Choose the column names in a compound query from the left-most SELECT + instead of the right-most.
    • +
    • The sqlite3_create_collation() function + honors the SQLITE_UTF16_ALIGNED flag.
    • +
    • SQLITE_SECURE_DELETE compile-time option causes deletes to overwrite + old data with zeros.
    • +
    • Detect integer overflow in abs().
    • +
    • The random() function provides 64 bits of randomness instead of + only 32 bits.
    • +
    • Parser detects and reports automaton stack overflow.
    • +
    • Change the round() function to return REAL instead of TEXT.
    • +
    • Allow WHERE clause terms on the left table of a LEFT OUTER JOIN to + contain aggregate subqueries.
    • +
    • Skip over leading spaces in text to numeric conversions.
    • +
    • Various minor bug and documentation typo fixes and + performance enhancements.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_6.html --- sqlite3-3.4.2/www/releaselog/3_3_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_6.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.3.6 On 2006 June 6 (3.3.6) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.6 On 2006 June 6 (3.3.6)

    Changes associated with this release include the following:

      +
    • Plays better with virus scanners on Windows
    • +
    • Faster :memory: databases
    • +
    • Fix an obscure segfault in UTF-8 to UTF-16 conversions
    • +
    • Added driver for OS/2
    • +
    • Correct column meta-information returned for aggregate queries
    • +
    • Enhanced output from EXPLAIN QUERY PLAN
    • +
    • LIMIT 0 now works on subqueries
    • +
    • Bug fixes and performance enhancements in the query optimizer
    • +
    • Correctly handle NULL filenames in ATTACH and DETACH
    • +
    • Inproved syntax error messages in the parser
    • +
    • Fix type coercion rules for the IN operator
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_7.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_7.html --- sqlite3-3.4.2/www/releaselog/3_3_7.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_7.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.3.7 On 2006 August 12 (3.3.7) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.7 On 2006 August 12 (3.3.7)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_8.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_8.html --- sqlite3-3.4.2/www/releaselog/3_3_8.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_8.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,94 @@ + + +SQLite Release 3.3.8 On 2006 October 9 (3.3.8) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.8 On 2006 October 9 (3.3.8)

    Changes associated with this release include the following:

      +
    • Support for full text search using the +FTS1 module +(beta)
    • +
    • Added OS-X locking patches (beta - disabled by default)
    • +
    • Introduce extended error codes and add error codes for various +kinds of I/O errors.
    • +
    • Added support for IF EXISTS on CREATE/DROP TRIGGER/VIEW
    • +
    • Fix the regression test suite so that it works with Tcl8.5
    • +
    • Enhance sqlite3_set_authorizer() to provide notification of calls to + SQL functions.
    • +
    • Added experimental API: sqlite3_auto_extension()
    • +
    • Various minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_3_9.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_3_9.html --- sqlite3-3.4.2/www/releaselog/3_3_9.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_3_9.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,112 @@ + + +SQLite Release 3.3.9 On 2007 January 4 (3.3.9) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.3.9 On 2007 January 4 (3.3.9)

    Changes associated with this release include the following:

      +
    • Fix bugs in pager.c that could lead to database corruption if two +processes both try to recover a hot journal at the same instant
    • +
    • Added the sqlite3_prepare_v2() +API.
    • +
    • Fixed the ".dump" command in the command-line shell to show +indices, triggers and views again.
    • +
    • Change the table_info pragma so that it returns NULL for the default +value if there is no default value
    • +
    • Support for non-ASCII characters in win95 filenames
    • +
    • Query optimizer enhancements: +
        +
      • Optimizer does a better job of using indices to satisfy ORDER BY +clauses that sort on the integer primary key
      • +
      • Use an index to satisfy an IS NULL operator in the WHERE clause
      • +
      • Fix a bug that was causing the optimizer to miss an OR optimization +opportunity
      • +
      • The optimizer has more freedom to reorder tables in the FROM clause +even in there are LEFT joins.
      • +
      +
    • Extension loading supported added to WinCE
    • +
    • Allow constraint names on the DEFAULT clause in a table definition
    • +
    • Added the ".bail" command to the command-line shell
    • +
    • Make CSV (comma separate value) output from the command-line shell +more closely aligned to accepted practice
    • +
    • Experimental FTS2 module added
    • +
    • Use sqlite3_mprintf() instead of strdup() to avoid libc dependencies
    • +
    • VACUUM uses a temporary file in the official TEMP folder, not in the +same directory as the original database
    • +
    • The prefix on temporary filenames on Windows is changed from "sqlite" +to "etilqs".
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_4_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_4_0.html --- sqlite3-3.4.2/www/releaselog/3_4_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_4_0.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,141 @@ + + +SQLite Release 3.4.0 On 2007 June 18 (3.4.0) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.4.0 On 2007 June 18 (3.4.0)

    Changes associated with this release include the following:

      +
    • Fix a bug that can lead to database corruption if an SQLITE_BUSY error + occurs in the middle of an explicit transaction and that transaction + is later committed. Ticket #2409. + See the + + CorruptionFollowingBusyError wiki page for details. +
    • Fix a bug that can lead to database corruption if autovacuum mode is + on and a malloc() failure follows a CREATE TABLE or CREATE INDEX statement + which itself follows a cache overflow inside a transaction. See + ticket #2418. +
    • +
    • Added explicit upper bounds on the sizes and + quantities of things SQLite can process. This change might cause + compatibility problems for + applications that use SQLite in the extreme, which is why the current + release is 3.4.0 instead of 3.3.18.
    • +
    • Added support for Incremental BLOB I/O.
    • +
    • Added the sqlite3_bind_zeroblob() API + and the zeroblob() SQL function.
    • +
    • Added support for + Incremental Vacuum.
    • +
    • Added the SQLITE_MIXED_ENDIAN_64BIT_FLOAT compile-time option to support + ARM7 processors with goofy endianness.
    • +
    • Removed all instances of sprintf() and strcpy() from the core library.
    • +
    • Added support for + International Components for Unicode (ICU) to the full-text search + extensions. +

    +

      +
    • In the Windows OS driver, reacquire a SHARED lock if an attempt to + acquire an EXCLUSIVE lock fails. Ticket #2354
    • +
    • Fix the REPLACE() function so that it returns NULL if the second argument + is an empty string. Ticket #2324.
    • +
    • Document the hazards of type conversions in + sqlite3_column_blob() + and related APIs. Fix unnecessary type conversions. Ticket #2321.
    • +
    • Internationalization of the TRIM() function. Ticket #2323
    • +
    • Use memmove() instead of memcpy() when moving between memory regions + that might overlap. Ticket #2334
    • +
    • Fix an optimizer bug involving subqueries in a compound SELECT that has + both an ORDER BY and a LIMIT clause. Ticket #2339.
    • +
    • Make sure the sqlite3_snprintf() + interface does not zero-terminate the buffer if the buffer size is + less than 1. Ticket #2341
    • +
    • Fix the built-in printf logic so that it prints "NaN" not "Inf" for + floating-point NaNs. Ticket #2345
    • +
    • When converting BLOB to TEXT, use the text encoding of the main database. + Ticket #2349
    • +
    • Keep the full precision of integers (if possible) when casting to + NUMERIC. Ticket #2364
    • +
    • Fix a bug in the handling of UTF16 codepoint 0xE000
    • +
    • Consider explicit collate clauses when matching WHERE constraints + to indices in the query optimizer. Ticket #2391
    • +
    • Fix the query optimizer to correctly handle constant expressions in + the ON clause of a LEFT JOIN. Ticket #2403
    • +
    • Fix the query optimizer to handle rowid comparisons to NULL + correctly. Ticket #2404
    • +
    • Fix many potential segfaults that could be caused by malicious SQL + statements.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_4_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_4_1.html --- sqlite3-3.4.2/www/releaselog/3_4_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_4_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,109 @@ + + +SQLite Release 3.4.1 On 2007 July 20 (3.4.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.4.1 On 2007 July 20 (3.4.1)

    Changes associated with this release include the following:

      +
    • Fix a bug in VACUUM that can lead to + + database corruption if two + processes are connected to the database at the same time and one + VACUUMs then the other then modifies the database.
    • +
    • The expression "+column" is now considered the same as "column" + when computing the collating sequence to use on the expression.
    • +
    • In the TCL language interface, + "@variable" instead of "$variable" always binds as a blob.
    • +
    • Added PRAGMA freelist_count + for determining the current size of the freelist.
    • +
    • The + PRAGMA auto_vacuum=incremental setting is now persistent.
    • +
    • Add FD_CLOEXEC to all open files under Unix.
    • +
    • Fix a bug in the + min()/max() optimization when applied to + descending indices.
    • +
    • Make sure the TCL language interface works correctly with 64-bit + integers on 64-bit machines.
    • +
    • Allow the value -9223372036854775808 as an integer literal in SQL + statements.
    • +
    • Add the capability of "hidden" columns in virtual tables.
    • +
    • Use the macro SQLITE_PRIVATE (defaulting to "static") on all + internal functions in the amalgamation.
    • +
    • Add pluggable tokenizers and ICU + tokenization support to FTS2
    • +
    • Other minor bug fixes and documentation enhancements
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_4_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_4_2.html --- sqlite3-3.4.2/www/releaselog/3_4_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_4_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,99 @@ + + +SQLite Release 3.4.2 On 2007 August 13 (3.4.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.4.2 On 2007 August 13 (3.4.2)

    Changes associated with this release include the following:

      +
    • Fix a database corruption bug that might occur if a ROLLBACK command +is executed in auto-vacuum mode +and a very small sqlite3_soft_heap_limit is set. +Ticket #2565. + +
    • Add the ability to run a full regression test with a small +sqlite3_soft_heap_limit. + +
    • Fix other minor problems with using small soft heap limits. + +
    • Work-around for +GCC bug 32575. + +
    • Improved error detection of misused aggregate functions. + +
    • Improvements to the amalgamation generator script so that all symbols +are prefixed with either SQLITE_PRIVATE or SQLITE_API. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_0.html --- sqlite3-3.4.2/www/releaselog/3_5_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_0.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +SQLite Release 3.5.0 On 2007 Sep 04 (3.5.0) alpha + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.0 On 2007 Sep 04 (3.5.0) alpha

    Changes associated with this release include the following:

      +
    • Redesign the OS interface layer. See + 34to35.html for details. + *** Potentially incompatible change *** +
    • The sqlite3_release_memory(), sqlite3_soft_heap_limit(), + and sqlite3_enable_shared_cache() interfaces now work cross all + threads in the process, not just the single thread in which they + are invoked. + *** Potentially incompatible change *** +
    • Added the sqlite3_open_v2() interface. +
    • Reimplemented the memory allocation subsystem and made it + replaceable at compile-time. +
    • Created a new mutex subsystem and made it replicable at + compile-time. +
    • The same database connection may now be used simultaneously by + separate threads. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_1.html --- sqlite3-3.4.2/www/releaselog/3_5_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +SQLite Release 3.5.1 On 2007 Oct 04 (3.5.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.1 On 2007 Oct 04 (3.5.1)

    Changes associated with this release include the following:

      +
    • Nota Bene: We are not using terms "alpha" or "beta" on this + release because the code is stable and because if we use those terms, + nobody will upgrade. However, we still reserve the right to make + incompatible changes to the new VFS interface in future releases.
    • + +
    • Fix a bug in the handling of SQLITE_FULL errors that could lead + to database corruption. Ticket #2686. +
    • The test_async.c drive now does full file locking and works correctly + when used simultaneously by multiple processes on the same database. +
    • The CLI ignores whitespace (including comments) at the end of lines +
    • Make sure the query optimizer checks dependencies on all terms of + a compound SELECT statement. Ticket #2640. +
    • Add demonstration code showing how to build a VFS for a raw + mass storage without a filesystem. +
    • Added an output buffer size parameter to the xGetTempname() method + of the VFS layer. +
    • Sticky SQLITE_FULL or SQLITE_IOERR errors in the pager are reset + when a new transaction is started. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_2.html --- sqlite3-3.4.2/www/releaselog/3_5_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.5.2 On 2007 Nov 05 (3.5.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.2 On 2007 Nov 05 (3.5.2)

    Changes associated with this release include the following:

      +
    • Dropped support for the SQLITE_OMIT_MEMORY_ALLOCATION compile-time +option. +
    • Always open files using FILE_FLAG_RANDOM_ACCESS under Windows. +
    • The 3rd parameter of the built-in SUBSTR() function is now optional. +
    • Bug fix: do not invoke the authorizer when reparsing the schema after +a schema change. +
    • Added the experimental malloc-free memory allocator in mem3.c. +
    • Virtual machine stores 64-bit integer and floating point constants +in binary instead of text for a performance boost. +
    • Fix a race condition in test_async.c. +
    • Added the ".timer" command to the CLI +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_3.html --- sqlite3-3.4.2/www/releaselog/3_5_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_3.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,101 @@ + + +SQLite Release 3.5.3 On 2007 Nov 27 (3.5.3) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.3 On 2007 Nov 27 (3.5.3)

    Changes associated with this release include the following:

      +
    • Move website and documentation files out of the source tree into +a separate CM system. +
    • Fix a long-standing bug in INSERT INTO ... SELECT ... statements +where the SELECT is compound. +
    • Fix a long-standing bug in RAISE(IGNORE) as used in BEFORE triggers. +
    • Fixed the operator precedence for the ~ operator. +
    • On Win32, do not return an error when attempting to delete a file +that does not exist. +
    • Allow collating sequence names to be quoted. +
    • Modify the TCL interface to use sqlite3_prepare_v2(). +
    • Fix multiple bugs that can occur following a malloc() failure. +
    • sqlite3_step() returns SQLITE_MISUSE instead of crashing when +called with a NULL parameter. +
    • FTS3 now uses the SQLite memory allocator exclusively. The +FTS3 amalgamation can now be appended to the SQLite amalgamation to +generate a super-amalgamation containing both. +
    • The DISTINCT keyword now will sometimes use an INDEX if an +appropriate index is available and the optimizer thinks its use +might be advantageous. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_4.html --- sqlite3-3.4.2/www/releaselog/3_5_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_4.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,106 @@ + + +SQLite Release 3.5.4 On 2007 Dec 14 (3.5.4) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.4 On 2007 Dec 14 (3.5.4)

    Changes associated with this release include the following:

      +
    • Fix a critical bug in UPDATE or DELETE that occurs when an +OR REPLACE clause or a trigger causes rows in the same table to +be deleted as side effects. (See ticket #2832.) The most likely +result of this bug is a segmentation fault, though database +corruption is a possibility.
    • +
    • Bring the processing of ORDER BY into compliance with the +SQL standard for case where a result alias and a table column name +are in conflict. Correct behavior is to prefer the result alias. +Older versions of SQLite incorrectly picked the table column. +(See ticket #2822.)
    • +
    • The VACUUM command preserves +the setting of the +legacy_file_format pragma. +(Ticket #2804.)
    • +
    • Productize and officially support the group_concat() SQL function.
    • +
    • Better optimization of some IN operator expressions.
    • +
    • Add the ability to change the +auto_vacuum status of a +database by setting the auto_vaccum pragma and VACUUMing the database.
    • +
    • Prefix search in FTS3 is much more efficient.
    • +
    • Relax the SQL statement length restriction in the CLI so that +the ".dump" output of databases with very large BLOBs and strings can +be played back to recreate the database.
    • +
    • Other small bug fixes and optimizations.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_5.html --- sqlite3-3.4.2/www/releaselog/3_5_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_5.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.5.5 On 2008 Jan 31 (3.5.5) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.5 On 2008 Jan 31 (3.5.5)

    Changes associated with this release include the following:

      +
    • Convert the underlying virtual machine to be a register-based machine +rather than a stack-based machine. The only user-visible change +is in the output of EXPLAIN.
    • +
    • Add the build-in RTRIM collating sequence.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_6.html --- sqlite3-3.4.2/www/releaselog/3_5_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_6.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.5.6 On 2008 Feb 6 (3.5.6) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.6 On 2008 Feb 6 (3.5.6)

    Changes associated with this release include the following:

      +
    • Fix a bug (ticket #2913) +that prevented virtual tables from working in a LEFT JOIN. +The problem was introduced into shortly before the 3.5.5 release.
    • +
    • Bring the OS/2 porting layer up-to-date.
    • +
    • Add the new sqlite3_result_error_code() API and use it in the +implementation of ATTACH so that proper error codes are returned +when an ATTACH fails.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_7.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_7.html --- sqlite3-3.4.2/www/releaselog/3_5_7.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_7.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,103 @@ + + +SQLite Release 3.5.7 On 2008 Mar 17 (3.5.7) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.7 On 2008 Mar 17 (3.5.7)

    Changes associated with this release include the following:

      +
    • Fix a bug (ticket #2927) in the register allocation for +compound selects - introduced by the new VM code in version 3.5.5.
    • +
    • ALTER TABLE uses double-quotes instead of single-quotes for quoting +filenames.
    • +
    • Use the WHERE clause to reduce the size of a materialized VIEW in +an UPDATE or DELETE statement. (Optimization)
    • +
    • Do not apply the flattening optimization if the outer query is an +aggregate and the inner query contains ORDER BY. (Ticket #2943)
    • +
    • Additional OS/2 updates
    • +
    • Added an experimental power-of-two, first-fit memory allocator.
    • +
    • Remove all instances of sprintf() from the code
    • +
    • Accept "Z" as the zulu timezone at the end of date strings
    • +
    • Fix a bug in the LIKE optimizer that occurs when the last character +before the first wildcard is an upper-case "Z"
    • +
    • Added the "bitvec" object for keeping track of which pages have +been journalled. Improves speed and reduces memory consumption, especially +for large database files.
    • +
    • Get the SQLITE_ENABLE_LOCKING_STYLE macro working again on Mac OS X.
    • +
    • Store the statement journal in the temporary file directory instead of +colocated with the database file.
    • +
    • Many improvements and cleanups to the configure script
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_8.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_8.html --- sqlite3-3.4.2/www/releaselog/3_5_8.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_8.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,107 @@ + + +SQLite Release 3.5.8 On 2008 Apr 16 (3.5.8) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.8 On 2008 Apr 16 (3.5.8)

    Changes associated with this release include the following:

      +
    • Expose SQLite's internal pseudo-random number generator (PRNG) + via the sqlite3_randomness() interface
    • +
    • New interface sqlite3_context_db_handle() that returns the + database connection handle that has invoked an application-defined + SQL function.
    • +
    • New interface sqlite3_limit() allows size and length limits to be + set on a per-connection basis and at run-time.
    • +
    • Improved crash-robustness: write the database page size into the rollback + journal header.
    • +
    • Allow the VACUUM command to change the page size of a database file.
    • +
    • The xAccess() method of the VFS is allowed to return -1 to signal + a memory allocation error.
    • +
    • Performance improvement: The OP_IdxDelete opcode uses unpacked records, + obviating the need for one OP_MakeRecord opcode call for each index + record deleted.
    • +
    • Performance improvement: Constant subexpressions are factored out of + loops.
    • +
    • Performance improvement: Results of OP_Column are reused rather than + issuing multiple OP_Column opcodes.
    • +
    • Fix a bug in the RTRIM collating sequence.
    • +
    • Fix a bug in the SQLITE_SECURE_DELETE option that was causing + Firefox crashes. Make arrangements to always test SQLITE_SECURE_DELETE + prior to each release.
    • +
    • Other miscellaneous performance enhancements.
    • +
    • Other miscellaneous minor bug fixes.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_5_9.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_5_9.html --- sqlite3-3.4.2/www/releaselog/3_5_9.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_5_9.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,101 @@ + + +SQLite Release 3.5.9 On 2008 May 14 (3.5.9) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.5.9 On 2008 May 14 (3.5.9)

    Changes associated with this release include the following:

      +
    • Added experimental + support for the journal_mode PRAGMA and persistent journal.
    • +
    • Journal mode PERSIST is the default behavior in + exclusive locking mode.
    • +
    • Fix a performance regression on LEFT JOIN (see ticket #3015) + that was mistakenly introduced in version 3.5.8.
    • +
    • Performance enhancement: Reengineer the internal routines used + to interpret and render variable-length integers.
    • +
    • Fix a buffer-overrun problem in sqlite3_mprintf() which occurs + when a string without a zero-terminator is passed to "%.*s".
    • +
    • Always convert IEEE floating point NaN values into NULL during + processing. (Ticket #3060)
    • +
    • Make sure that when a connection blocks on a RESERVED lock that + it is able to continue after the lock is released. (Ticket #3093)
    • +
    • The "configure" scripts should now automatically configure Unix + systems for large file support. Improved error messages for + when large files are encountered and large file support is disabled.
    • +
    • Avoid cache pages leaks following disk-full or I/O errors
    • +
    • And, many more minor bug fixes and performance enhancements....
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_0.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_0.html --- sqlite3-3.4.2/www/releaselog/3_6_0.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_0.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,114 @@ + + +SQLite Release 3.6.0 On 2008 July 16 (3.6.0 beta) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.0 On 2008 July 16 (3.6.0 beta)

    Changes associated with this release include the following:

      +
    • Modifications to the virtual file system interface + to support a wider range of embedded systems. + See 35to36.html for additional information. + *** Potentially incompatible change ***
    • +
    • All C-preprocessor macros used to control compile-time options + now begin with the prefix "SQLITE_". This may require changes to + applications that compile SQLite using their own makefiles and with + custom compile-time options, hence we mark this as a + *** Potentially incompatible change ***
    • +
    • The SQLITE_MUTEX_APPDEF compile-time option is no longer supported. + Alternative mutex implementations can now be added at run-time using + the sqlite3_config() interface with the SQLITE_CONFIG_MUTEX verb. + *** Potentially incompatible change ***
    • +
    • The handling of IN and NOT IN operators that contain a NULL on their + right-hand side expression is brought into compliance with the SQL + standard and with other SQL database engines. This is a bug fix, + but as it has the potential to break legacy applications that depend + on the older buggy behavior, we mark that as a + *** Potentially incompatible change ***
    • +
    • The result column names generated for compound subqueries have been + simplified to show only the name of the column of the original table and + omit the table name. This makes SQLite operate more like other SQL + database engines.
    • +
    • Added the sqlite3_config() interface for doing run-time configuration + of the entire SQLite library.
    • +
    • Added the sqlite3_status() interface used for querying run-time status + information about the overall SQLite library and its subsystems.
    • +
    • Added the sqlite3_initialize() and sqlite3_shutdown() interfaces.
    • +
    • The SQLITE_OPEN_NOMUTEX option was added to sqlite3_open_v2().
    • +
    • Added the PRAGMA page_count command.
    • +
    • Added the sqlite3_next_stmt() interface.
    • +
    • Added a new R*Tree virtual table
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_10.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_10.html --- sqlite3-3.4.2/www/releaselog/3_6_10.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_10.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,84 @@ + + +SQLite Release 3.6.10 On 2009 Jan 15 (3.6.10) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.10 On 2009 Jan 15 (3.6.10)

    Changes associated with this release include the following:

      +
    • Fix a cache coherency problem that could lead to database corruption. + Ticket #3584. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_11.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_11.html --- sqlite3-3.4.2/www/releaselog/3_6_11.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_11.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.6.11 On 2009 Feb 18 (3.6.11) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.11 On 2009 Feb 18 (3.6.11)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_12.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_12.html --- sqlite3-3.4.2/www/releaselog/3_6_12.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_12.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.6.12 On 2009 March 31 (3.6.12) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.12 On 2009 March 31 (3.6.12)

    Changes associated with this release include the following:

      +
    • Fixed a bug that caused database corruption when an incremental_vacuum is + rolled back in an in-memory database. Ticket #3761. +
    • Added the sqlite3_unlock_notify() interface. +
    • Added the reverse_unordered_selects pragma. +
    • The default page size on windows is automatically adjusted to match the + capabilities of the underlying filesystem. +
    • Add the new ".genfkey" command in the CLI for generating triggers to + implement foreign key constraints. +
    • Performance improvements for "count(*)" queries. +
    • Reduce the amount of heap memory used, especially by TRIGGERs. +
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_13.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_13.html --- sqlite3-3.4.2/www/releaselog/3_6_13.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_13.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.6.13 On 2009 April 13 (3.6.13) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.13 On 2009 April 13 (3.6.13)

    Changes associated with this release include the following:

      +
    • Fix a bug in version 3.6.12 that causes a segfault when running + a count(*) on the sqlite_master table of an empty database. Ticket #3774. +
    • Fix a bug in version 3.6.12 that causes a segfault that when + inserting into a table using a DEFAULT value where there is a + function as part of the DEFAULT value expression. Ticket #3791. +
    • Fix data structure alignment issues on Sparc. Ticket #3777. +
    • Other minor bug fixes. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_14_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_14_1.html --- sqlite3-3.4.2/www/releaselog/3_6_14_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_14_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.6.14.1 On 2009 May 19 (3.6.14.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.14.1 On 2009 May 19 (3.6.14.1)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_14_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_14_2.html --- sqlite3-3.4.2/www/releaselog/3_6_14_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_14_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.14.2 On 2009 May 25 (3.6.14.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.14.2 On 2009 May 25 (3.6.14.2)

    Changes associated with this release include the following:

      +
    • Fix a code generator bug introduced in version 3.6.14. This bug + can cause incorrect query results under obscure circumstances. + Ticket #3879. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_14.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_14.html --- sqlite3-3.4.2/www/releaselog/3_6_14.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_14.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,91 @@ + + +SQLite Release 3.6.14 On 2009 May 7 (3.6.14) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.14 On 2009 May 7 (3.6.14)

    Changes associated with this release include the following:

      +
    • Added the optional asynchronous VFS module.
    • +
    • Enhanced the query optimizer so that virtual tables are able to + make use of OR and IN operators in the WHERE clause.
    • +
    • Speed improvements in the btree and pager layers.
    • +
    • Added the SQLITE_HAVE_ISNAN compile-time option which will cause + the isnan() function from the standard math library to be used instead + of SQLite's own home-brew NaN checker.
    • +
    • Countless minor bug fixes, documentation improvements, new and + improved test cases, and code simplifications and cleanups.

      +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_15.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_15.html --- sqlite3-3.4.2/www/releaselog/3_6_15.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_15.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +SQLite Release 3.6.15 On 2009 June 15 (3.6.15) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.15 On 2009 June 15 (3.6.15)

    Changes associated with this release include the following:

      +
    • Refactor the internal representation of SQL expressions so that they + use less memory on embedded platforms. +
    • Reduce the amount of stack space used +
    • Fix an 64-bit alignment bug on HP/UX and Sparc +
    • The sqlite3_create_function() family of interfaces now return + SQLITE_MISUSE instead of SQLITE_ERROR when passed invalid + parameter combinations. +
    • When new tables are created using CREATE TABLE ... AS SELECT ... the + datatype of the columns is the simplified SQLite datatype (TEXT, INT, + REAL, NUMERIC, or BLOB) instead of a copy of the original datatype from + the source table. +
    • Resolve race conditions when checking for a hot rollback journal. +
    • The sqlite3_shutdown() interface frees all mutexes under windows. +
    • Enhanced robustness against corrupt database files +
    • Continuing improvements to the test suite and fixes to obscure + bugs and inconsistencies that the test suite improvements are + uncovering. + +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_16.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_16.html --- sqlite3-3.4.2/www/releaselog/3_6_16.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_16.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.16 On 2009 June 27 (3.6.16) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.16 On 2009 June 27 (3.6.16)

    Changes associated with this release include the following:

      +
    • Fix a bug (ticket #3929) that occasionally causes INSERT or UPDATE + operations to fail on an indexed table that has a self-modifying trigger. +
    • Other minor bug fixes and performance optimizations. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_1.html --- sqlite3-3.4.2/www/releaselog/3_6_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,95 @@ + + +SQLite Release 3.6.1 On 2008 Aug 6 (3.6.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.1 On 2008 Aug 6 (3.6.1)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_2.html --- sqlite3-3.4.2/www/releaselog/3_6_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.2 On 2008 Aug 30 (3.6.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.2 On 2008 Aug 30 (3.6.2)

    Changes associated with this release include the following:

      +
    • Split the pager subsystem into separate pager and pcache subsystems.
    • +
    • Factor out identifier resolution procedures into separate files.
    • +
    • Bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_3.html --- sqlite3-3.4.2/www/releaselog/3_6_3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_3.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.3 On 2008 Sep 22 (3.6.3) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.3 On 2008 Sep 22 (3.6.3)

    Changes associated with this release include the following:

      +
    • Fix for a bug in the SELECT DISTINCT logic that was introduced by the + prior version.
    • +
    • Other minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_4.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_4.html --- sqlite3-3.4.2/www/releaselog/3_6_4.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_4.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,100 @@ + + +SQLite Release 3.6.4 On 2008 Oct 15 (3.6.4) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.4 On 2008 Oct 15 (3.6.4)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_5.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_5.html --- sqlite3-3.4.2/www/releaselog/3_6_5.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_5.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,97 @@ + + +SQLite Release 3.6.5 On 2008 Nov 12 (3.6.5) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.5 On 2008 Nov 12 (3.6.5)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_6_1.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_6_1.html --- sqlite3-3.4.2/www/releaselog/3_6_6_1.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_6_1.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.6.1 On 2008 Nov 22 (3.6.6.1) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.6.1 On 2008 Nov 22 (3.6.6.1)

    Changes associated with this release include the following:

      +
    • Fix a bug in the page cache that can lead database corruption following + a rollback. This bug was first introduced in version 3.6.4.
    • +
    • Two other very minor bug fixes
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_6_2.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_6_2.html --- sqlite3-3.4.2/www/releaselog/3_6_6_2.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_6_2.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,86 @@ + + +SQLite Release 3.6.6.2 On 2008 Nov 26 (3.6.6.2) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.6.2 On 2008 Nov 26 (3.6.6.2)

    Changes associated with this release include the following:

      +
    • Fix a bug in the b-tree delete algorithm that seems like it might be + able to cause database corruption. The bug was first introduced in + version 3.6.6 by check-in [5899] on 2008-11-13.
    • +
    • Fix a memory leak that can occur following a disk I/O error.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_6.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_6.html --- sqlite3-3.4.2/www/releaselog/3_6_6.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_6.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,87 @@ + + +SQLite Release 3.6.6 On 2008 Nov 19 (3.6.6) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.6 On 2008 Nov 19 (3.6.6)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_7.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_7.html --- sqlite3-3.4.2/www/releaselog/3_6_7.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_7.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Release 3.6.7 On 2008 Dec 16 (3.6.7) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.7 On 2008 Dec 16 (3.6.7)

    Changes associated with this release include the following:

      +
    • Reorganize the Unix interface in os_unix.c
    • +
    • Added support for "Proxy Locking" on MacOSX.
    • +
    • Changed the prototype of the sqlite3_auto_extension() interface in a + way that is backwards compatible but which might cause warnings in new + builds of applications that use that interface.
    • +
    • Changed the signature of the xDlSym method of the sqlite3_vfs object + in a way that is backwards compatible but which might cause + compiler warnings.
    • +
    • Added superfluous casts and variable initializations in order + to suppress nuisance compiler warnings.
    • +
    • Fixes for various minor bugs.
    • +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_8.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_8.html --- sqlite3-3.4.2/www/releaselog/3_6_8.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_8.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,89 @@ + + +SQLite Release 3.6.8 On 2009 Jan 12 (3.6.8) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.8 On 2009 Jan 12 (3.6.8)

    Changes associated with this release include the following:

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/releaselog/3_6_9.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/releaselog/3_6_9.html --- sqlite3-3.4.2/www/releaselog/3_6_9.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/releaselog/3_6_9.html 2009-06-27 15:07:37.000000000 +0100 @@ -0,0 +1,85 @@ + + +SQLite Release 3.6.9 On 2009 Jan 14 (3.6.9) + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + +

    SQLite Release 3.6.9 On 2009 Jan 14 (3.6.9)

    Changes associated with this release include the following:

      +
    • Fix two bugs, which when combined might result in incorrect + query results. Both bugs were harmless by themselves; only when + they team up do they cause problems. Ticket #3581. +

    +

    A complete list of SQLite releases + in a single pages is also available. A detailed history of every + check-in is available at + + http://www.sqlite.org/cvstrac/timeline.

    +
    +This page last modified 2009/06/27 13:40:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/reqmatrix.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/reqmatrix.html --- sqlite3-3.4.2/www/reqmatrix.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/reqmatrix.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,10942 @@ + + +Requirements Derivation Matrix + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + +

    Requirements Derivation Matrix

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    S10000 +The SQLite library shall translate high-level SQL statements into +low-level I/O calls to persistent storage. +

    No parents

    +

    Children: S10100 + S10200 + S10300 + S10500 + S10600 + S10700 + H12101 + H12102 + H12104 + H12105 + H12107 + H12110 + H12113 + H12116 + H12119 + H12122 + H12125 + H12131 + H12134 + H12137 + H12138 + H12371 + H12373 + H12374 + H12376 + H12379 + H12382 + H13011 + H13012 + H13013 + H13014 + H13015 + H13016 + H13019 + H13021 + H13202 + H15304 + H15306 + H15308 + H15310 +

    +
    S10100 +The SQLite library shall accepts a well-defined dialect of SQL +that conforms to published SQL standards. +

    Parents: S10000 +

    +

    Children: S10110 + S10120 +

    +
    S10110 +The SQLite library shall support BLOB, CLOB, integer, and floating-point +datatypes. +

    Parents: S10100 +

    +

    Children: H10201 + H10202 +

    +
    S10120 +The SQLite library shall implement the standard SQL interpretation +of NULL values. +

    Parents: S10100 +

    +

    No children

    +
    S10200 +The SQLite library shall communicate directly with database files +in persistent storage. +

    Parents: S10000 +

    +

    No children

    +
    S10300 +The SQLite library shall implement ACID transactions. +

    Parents: S10000 +

    +

    No children

    +
    S10500 +The SQLite library shall implement transactions that are robust +across application crashes, operating-system crashes, and power +failures. +

    Parents: S10000 +

    +

    No children

    +
    S10600 +The SQLite library shall support simultaneous access to multiple +database files on the same database connection. +

    Parents: S10000 +

    +

    Children: H12241 + H12243 + H12261 + H12263 +

    +
    S10700 +The SQLite library shall provide interfaces that allow the application +to obtain the status and results of SQL operations. +

    Parents: S10000 +

    +

    Children: H12201 + H12202 + H12221 + H12223 + H13711 + H13721 + H13723 + H13724 + H13725 + H13726 + H13727 + H13741 + H13742 + H13743 + H13744 + H13745 + H13746 + H13748 + H13761 + H13762 + H13763 + H13771 + H13772 + H13803 + H13806 + H13809 + H13812 + H13815 + H13818 + H13821 + H13824 + H13827 + H13830 +

    +
    S20000 +The SQLite library shall be extensible and configurable. +

    No parents

    +

    Children: S20100 + S20200 + S20300 + S20400 + S20500 + S20600 + H14103 + H14106 + H14120 + H14123 + H14126 + H14129 + H14132 + H14135 + H14138 + H14141 + H14144 + H14147 + H14150 + H14153 + H14156 + H14159 + H14162 + H14165 + H14168 + H14203 + H14206 + H14209 + H14212 + H14215 + H17303 + H17304 + H17305 + H17306 + H17310 + H17312 + H17315 + H17318 + H17321 + H17322 + H17323 + H17392 +

    +
    S20100 +The SQLite library shall provide interfaces that permit the application +to override interfaces to the platform on which the application is running. +

    Parents: S20000 +

    +

    Children: S20110 + S20120 + S20130 + H11203 + H11206 + H11209 + H11212 + H11215 + H11218 +

    +
    S20110 +The SQLite library shall provide interfaces that permit the application +to override the interfaces used to read and write persistent storage. +

    Parents: S20100 +

    +

    No children

    +
    S20120 +The SQLite library shall provide interfaces that permit the application +to override the interfaces used for memory allocation. +

    Parents: S20100 +

    +

    No children

    +
    S20130 +The SQLite library shall provide interfaces that permit the application +to override the interfaces used for controlling mutexes. +

    Parents: S20100 +

    +

    No children

    +
    S20200 +The SQLite library shall provide interfaces that permit the application +to create new SQL functions. +

    Parents: S20000 +

    +

    Children: S60600 + H15103 + H15106 + H15109 + H15112 + H15115 + H15118 + H15121 + H15124 + H15127 + H15130 + H15133 + H15136 + H16103 + H16106 + H16109 + H16112 + H16118 + H16121 + H16127 + H16130 + H16133 + H16136 + H16139 + H16142 + H16211 + H16213 + H16215 + H16217 + H16243 + H16272 + H16274 + H16276 + H16277 + H16278 + H16279 + H16403 + H16406 + H16409 + H16412 + H16415 + H16418 + H16421 + H16424 + H16427 + H16430 + H16433 + H16436 + H16439 + H16442 + H16445 + H16448 + H16451 + H16454 + H16457 + H16460 + H16463 +

    +
    S20300 +The SQLite library shall provide interfaces that permit the application +to create new text collating sequences. +

    Parents: S20000 +

    +

    Children: H16603 + H16604 + H16606 + H16609 + H16612 + H16615 + H16618 + H16621 + H16624 + H16627 + H16630 + H16702 + H16704 + H16706 +

    +
    S20400 +The SQLite library shall provide interfaces that permit the application +to create new classes of virtual SQL tables. +

    Parents: S20000 +

    +

    No children

    +
    S20500 +The SQLite library shall provide interfaces that permit the application +to load extensions at run-time using shared libraries. +

    Parents: S20000 +

    +

    No children

    +
    S20600 +The SQLite library shall provide interfaces that permit the application +to dynamically query and modify size limits. +

    Parents: S20000 +

    +

    Children: H12762 + H12766 + H12769 +

    +
    S30000 +The SQLite library shall be safe for use in long-running, +low-resource, high-reliability applications. +

    No parents

    +

    Children: S30100 + S30200 + S30300 + S30400 + S30500 + S30600 + S30700 + S30800 + S30900 + S70300 +

    +
    S30100 +The SQLite library shall release all system resources it holds +when it is properly shutdown. +

    Parents: S30000 +

    +

    Children: H12011 + H12012 + H12013 + H12014 + H12015 + H12019 +

    +
    S30200 +The SQLite library shall be configurable so that it is guaranteed +to never fail a memory allocation as long as the application does +not request resources in excess of reasonable and published limits. +

    Parents: S30000 +

    +

    Children: S30210 + S30220 + S30230 +

    +
    S30210 +The SQLite library shall be provide instrumentation that can alert +the application when its resource usages nears or exceeds the limits +of the memory breakdown guarantee. +

    Parents: S30200 +

    +

    Children: H17371 + H17373 + H17374 + H17375 +

    +
    S30220 +The SQLite library shall be provide facilities to automatically +recycle memory when usage nears preset limits. +

    Parents: S30200 +

    +

    Children: H16351 + H16352 + H16353 + H16354 + H16355 + H16358 + H17341 + H17342 +

    +
    S30230 +The SQLite library shall be permit BLOB and CLOB objects to be +read and written incrementally using small memory buffers. +

    Parents: S30200 +

    +

    Children: H17813 + H17814 + H17816 + H17819 + H17821 + H17824 + H17833 + H17836 + H17839 + H17843 + H17853 + H17856 + H17859 + H17862 + H17863 + H17865 + H17868 + H17873 + H17874 + H17875 + H17876 + H17877 + H17879 + H17882 + H17885 + H17888 +

    +
    S30300 +When a memory allocation fails, SQLite shall either silently make +due without the requested memory or else it shall report the error +back to the application. +

    Parents: S30000 +

    +

    No children

    +
    S30400 +When a I/O operation fails, SQLite shall either silently +recover or else it shall report the error +back to the application. +

    Parents: S30000 +

    +

    No children

    +
    S30500 +SQLite shall provide the capability to monitor +the progress and interrupt the evaluation of a long-running query. +

    Parents: S30000 +

    +

    Children: H12271 + H12272 + H12918 +

    +
    S30600 +All unused portions of a well-formed SQLite database file shall +be available for reuse. +

    Parents: S30000 +

    +

    No children

    +
    S30700 +SQLite shall provide the capability to incrementally decrease the +size of the persistent storage file as information is removed from +the database. +

    Parents: S30000 +

    +

    No children

    +
    S30800 +SQLite shall provide the interfaces that support testing and +validation of the library code in an as-delivered configuration. +

    Parents: S30000 +

    +

    No children

    +
    S30900 +SQLite shall provide the ability for separate database connections +within the same process to share resources. +

    Parents: S30000 +

    +

    Children: H10331 + H10336 + H10337 + H10339 +

    +
    S40000 +The SQLite library shall be safe for use in applications that +make concurrent access to the underlying database from different +threads and/or processes. +

    No parents

    +

    Children: S40100 + S40200 + S40300 + S40400 + S40410 +

    +
    S40100 +The SQLite library shall be configurable to operate correctly in +a multi-threaded application. +

    Parents: S40000 +

    +

    No children

    +
    S40200 +The SQLite library shall support multiple independent database +connections per thread and per process. +

    Parents: S40000 +

    +

    Children: H12701 + H12702 + H12703 + H12704 + H12706 + H12707 + H12709 + H12711 + H12712 + H12713 + H12714 + H12717 + H12719 + H12721 + H12723 +

    +
    S40300 +The SQLite library shall automatically control access to common +databases from different connections in different threads or processes. +

    Parents: S40000 +

    +

    No children

    +
    S40400 +The SQLite library shall notify the application if an operation can +not be completed due to concurrent access constraints. +

    Parents: S40000 +

    +

    Children: H12311 + H12312 + H12314 + H12316 + H12318 +

    +
    S40410 +The SQLite library shall provide interfaces to assist the application +in responding appropriately when an operation can +not be completed due to concurrent access constraints. +

    Parents: S40000 +

    +

    Children: H10533 + H10536 + H12341 + H12343 + H12344 +

    +
    S50000 +The SQLite library shall be cross-platform. +

    No parents

    +

    Children: S50100 + S50200 + S50300 +

    +
    S50100 +The SQLite library shall be implemented in ANSI-C. +

    Parents: S50000 +

    +

    No children

    +
    S50200 +The SQLite library shall support text encoded as UTF-8, +UTF-16le, or UTF-16be. +

    Parents: S50000 +

    +

    No children

    +
    S50300 +SQLite database files shall be processor and byte-order independent. +

    Parents: S50000 +

    +

    No children

    +
    S60000 +The SQLite library shall provide introspection capabilities to the +application. +

    No parents

    +

    Children: S60100 + S60200 + S60300 + S60400 + S60500 + S60600 +

    +
    S60100 +The SQLite library shall provide interfaces that an application can +use to discover fixed, compile-time characteristics of the +SQLite library. +

    Parents: S60000 +

    +

    Children: H10011 + H10014 + H10021 + H10022 + H10023 + H10101 + H10102 +

    +
    S60200 +The SQLite library shall provide interfaces that an application can +use to find run-time performance characteristics and status of the +SQLite library. +

    Parents: S60000 +

    +

    Children: H12801 + H12802 + H12803 + H12807 + H12808 + H12809 + H12931 + H12932 + H12933 + H12934 +

    +
    S60300 +The SQLite library shall provide interfaces that permit an application +to query the schema of a database. +

    Parents: S60000 +

    +

    No children

    +
    S60400 +The SQLite library shall provide interfaces that allow an application +to monitor sequence of queries and progress of submitted to SQLite. +

    Parents: S60000 +

    +

    Children: H12281 + H12282 + H12283 + H12284 + H12285 + H12287 + H12288 + H12289 + H12290 + H12911 + H12912 + H12913 + H12914 + H12915 + H12916 + H12917 + H12918 + H12951 + H12952 + H12953 + H12954 + H12955 + H12961 + H12962 + H12963 + H12964 + H12971 + H12973 + H12975 + H12977 + H12979 + H12981 + H12983 + H12986 +

    +
    S60500 +The SQLite library shall provide interfaces that allow an application +to discover the algorithms that SQLite has chosen to implement specific +SQL statements. +

    Parents: S60000 +

    +

    No children

    +
    S60600 +The SQLite library shall provide interfaces that allow an application +to discover relationships between SQLite objects. +

    Parents: S60000 + S20200 +

    +

    Children: H13123 + H13143 + H13146 + H13149 + H13152 + H16253 +

    +
    S70000 +The SQLite library shall provide interfaces that promote the safe +construction and processing of SQL statements and data from +untrusted sources. +

    No parents

    +

    Children: S70100 + S70200 + S70300 + H17403 + H17406 + H17407 +

    +
    S70100 +The SQLite library shall provide the application means by which the +application can test and enforce compliance with database access +policies for any particular SQL statement. +

    Parents: S70000 +

    +

    Children: H12501 + H12502 + H12503 + H12504 + H12505 + H12506 + H12507 + H12510 + H12511 + H12512 + H12520 + H12521 + H12522 +

    +
    S70200 +The SQLite library shall provide interfaces that test to see if an +SQL statement being received incrementally is complete. +

    Parents: S70000 +

    +

    Children: H10511 + H10512 +

    +
    S70300 +The SQLite library shall support prepared statement objects with +late parameter binding +

    Parents: S70000 + S30000 +

    +

    Children: H11302 + H11304 + H13506 + H13509 + H13512 + H13515 + H13518 + H13521 + H13524 + H13527 + H13530 + H13533 + H13536 + H13539 + H13542 + H13545 + H13548 + H13551 + H13601 + H13621 + H13641 + H13661 +

    +
    S80000 +SQLite shall exhibit ductile failure characteristics +

    No parents

    +

    Children: S80100 +

    +
    S80100 +SQLite shall make anomalies visible to the application +

    Parents: S80000 +

    +

    No children

    +
    H10011 +The SQLITE_VERSION #define in the sqlite3.h header file shall +evaluate to a string literal that is the SQLite version +with which the header file is associated. +

    Parents: S60100 +

    +

    No children

    +
    H10014 +The SQLITE_VERSION_NUMBER #define shall resolve to an integer +with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z +are the major version, minor version, and release number. +

    Parents: S60100 +

    +

    No children

    +
    H10021 +The sqlite3_libversion_number() interface shall return +an integer equal to SQLITE_VERSION_NUMBER. +

    Parents: S60100 +

    +

    No children

    +
    H10022 +The sqlite3_version string constant shall contain +the text of the SQLITE_VERSION string. +

    Parents: S60100 +

    +

    No children

    +
    H10023 +The sqlite3_libversion() function shall return +a pointer to the sqlite3_version string constant. +

    Parents: S60100 +

    +

    No children

    +
    H10101 +The sqlite3_threadsafe() function shall return zero if +and only if SQLite was compiled with mutexing code omitted. +

    Parents: S60100 +

    +

    No children

    +
    H10102 +The value returned by sqlite3_threadsafe() shall remain the same +across calls to sqlite3_config(). +

    Parents: S60100 +

    +

    No children

    +
    H10201 +The sqlite_int64 and sqlite3_int64 types shall specify +a 64-bit signed integer. +

    Parents: S10110 +

    +

    No children

    +
    H10202 +The sqlite_uint64 and sqlite3_uint64 types shall specify +a 64-bit unsigned integer. +

    Parents: S10110 +

    +

    No children

    +
    H10331 +A successful invocation of sqlite3_enable_shared_cache(B) +will enable or disable shared cache mode for subsequently +created database connection in the same process. +

    Parents: S30900 +

    +

    No children

    +
    H10336 +When shared cache is enabled, the sqlite3_create_module() +interface will always return an error. +

    Parents: S30900 +

    +

    No children

    +
    H10337 +The sqlite3_enable_shared_cache(B) interface returns +SQLITE_OK if shared cache was enabled or disabled successfully. +

    Parents: S30900 +

    +

    No children

    +
    H10339 +Shared cache is disabled by default. +

    Parents: S30900 +

    +

    No children

    +
    H10511 +A successful evaluation of sqlite3_complete() or +sqlite3_complete16() functions shall +return a numeric 1 if and only if the input string contains +one or more non-whitespace tokens and the last non-whitespace +token in is a semicolon that is not in the middle of +a CREATE TRIGGER statement. +

    Parents: S70200 +

    +

    No children

    +
    H10512 +If a memory allocation error occurs during an invocation +of sqlite3_complete() or sqlite3_complete16() then the +routine shall return SQLITE_NOMEM. +

    Parents: S70200 +

    +

    No children

    +
    H10533 +The sqlite3_sleep(M) interface invokes the xSleep +method of the default VFS in order to +suspend execution of the current thread for at least +M milliseconds. +

    Parents: S40410 +

    +

    No children

    +
    H10536 +The sqlite3_sleep(M) interface returns the number of +milliseconds of sleep actually requested of the operating +system, which might be larger than the parameter M. +

    Parents: S40410 +

    +

    No children

    +
    H11203 +The sqlite3_vfs_find(N) interface returns a pointer to the +registered sqlite3_vfs object whose name exactly matches +the zero-terminated UTF-8 string N, or it returns NULL if +there is no match. +

    Parents: S20100 +

    +

    No children

    +
    H11206 +If the N parameter to sqlite3_vfs_find(N) is NULL then +the function returns a pointer to the default sqlite3_vfs +object if there is one, or NULL if there is no default +sqlite3_vfs object. +

    Parents: S20100 +

    +

    No children

    +
    H11209 +The sqlite3_vfs_register(P,F) interface registers the +well-formed sqlite3_vfs object P using the name given +by the zName field of the object P. +

    Parents: S20100 +

    +

    No children

    +
    H11212 +Using the sqlite3_vfs_register(P,F) interface to register +the same sqlite3_vfs object multiple times is a harmless no-op. +

    Parents: S20100 +

    +

    No children

    +
    H11215 +The sqlite3_vfs_register(P,F) interface makes the sqlite3_vfs +object P the default sqlite3_vfs object if F is non-zero. +

    Parents: S20100 +

    +

    No children

    +
    H11218 +The sqlite3_vfs_unregister(P) interface unregisters the +sqlite3_vfs object P so that it is no longer returned by +subsequent calls to sqlite3_vfs_find(). +

    Parents: S20100 +

    +

    No children

    +
    H11302 +The sqlite3_finalize(S) interface destroys the +prepared statement S and releases all +memory and file resources held by that object. +

    Parents: S70300 +

    +

    No children

    +
    H11304 +If the most recent call to sqlite3_step(S) for the +prepared statement S returned an error, +then sqlite3_finalize(S) returns that same error. +

    Parents: S70300 +

    +

    No children

    +
    H12011 +A successful call to sqlite3_close(C) shall destroy the +database connection object C. +

    Parents: S30100 +

    +

    No children

    +
    H12012 +A successful call to sqlite3_close(C) shall return SQLITE_OK. +

    Parents: S30100 +

    +

    No children

    +
    H12013 +A successful call to sqlite3_close(C) shall release all +memory and system resources associated with database connection C. +

    Parents: S30100 +

    +

    No children

    +
    H12014 +A call to sqlite3_close(C) on a database connection C that +has one or more open prepared statements shall fail with +an SQLITE_BUSY error code. +

    Parents: S30100 +

    +

    No children

    +
    H12015 +A call to sqlite3_close(C) where C is a NULL pointer shall +be a harmless no-op returning SQLITE_OK. +

    Parents: S30100 +

    +

    No children

    +
    H12019 +When sqlite3_close(C) is invoked on a database connection C +that has a pending transaction, the transaction shall be +rolled back. +

    Parents: S30100 +

    +

    No children

    +
    H12101 +A successful invocation of sqlite3_exec(D,S,C,A,E) +shall sequentially evaluate all of the UTF-8 encoded, +semicolon-separated SQL statements in the zero-terminated +string S within the context of the database connection D. +

    Parents: S10000 +

    +

    No children

    +
    H12102 +If the S parameter to sqlite3_exec(D,S,C,A,E) is NULL then +the actions of the interface shall be the same as if the +S parameter were an empty string. +

    Parents: S10000 +

    +

    No children

    +
    H12104 +The return value of sqlite3_exec() shall be SQLITE_OK if all +SQL statements run successfully and to completion. +

    Parents: S10000 +

    +

    No children

    +
    H12105 +The return value of sqlite3_exec() shall be an appropriate +non-zero error code if any SQL statement fails. +

    Parents: S10000 +

    +

    No children

    +
    H12107 +If one or more of the SQL statements handed to sqlite3_exec() +return results and the 3rd parameter is not NULL, then +the callback function specified by the 3rd parameter shall be +invoked once for each row of result. +

    Parents: S10000 +

    +

    No children

    +
    H12110 +If the callback returns a non-zero value then sqlite3_exec() +shall abort the SQL statement it is currently evaluating, +skip all subsequent SQL statements, and return SQLITE_ABORT. +

    Parents: S10000 +

    +

    No children

    +
    H12113 +The sqlite3_exec() routine shall pass its 4th parameter through +as the 1st parameter of the callback. +

    Parents: S10000 +

    +

    No children

    +
    H12116 +The sqlite3_exec() routine shall set the 2nd parameter of its +callback to be the number of columns in the current row of +result. +

    Parents: S10000 +

    +

    No children

    +
    H12119 +The sqlite3_exec() routine shall set the 3rd parameter of its +callback to be an array of pointers to strings holding the +values for each column in the current result set row as +obtained from sqlite3_column_text(). +

    Parents: S10000 +

    +

    No children

    +
    H12122 +The sqlite3_exec() routine shall set the 4th parameter of its +callback to be an array of pointers to strings holding the +names of result columns as obtained from sqlite3_column_name(). +

    Parents: S10000 +

    +

    No children

    +
    H12125 +If the 3rd parameter to sqlite3_exec() is NULL then +sqlite3_exec() shall silently discard query results. +

    Parents: S10000 +

    +

    No children

    +
    H12131 +If an error occurs while parsing or evaluating any of the SQL +statements in the S parameter of sqlite3_exec(D,S,C,A,E) and if +the E parameter is not NULL, then sqlite3_exec() shall store +in *E an appropriate error message written into memory obtained +from sqlite3_malloc(). +

    Parents: S10000 +

    +

    No children

    +
    H12134 +The sqlite3_exec(D,S,C,A,E) routine shall set the value of +*E to NULL if E is not NULL and there are no errors. +

    Parents: S10000 +

    +

    No children

    +
    H12137 +The sqlite3_exec(D,S,C,A,E) function shall set the error code +and message accessible via sqlite3_errcode(), +sqlite3_extended_errcode(), +sqlite3_errmsg(), and sqlite3_errmsg16(). +

    Parents: S10000 +

    +

    No children

    +
    H12138 +If the S parameter to sqlite3_exec(D,S,C,A,E) is NULL or an +empty string or contains nothing other than whitespace, comments, +and/or semicolons, then results of sqlite3_errcode(), +sqlite3_extended_errcode(), +sqlite3_errmsg(), and sqlite3_errmsg16() +shall reset to indicate no errors. +

    Parents: S10000 +

    +

    No children

    +
    H12201 +Each new database connection shall have the +extended result codes feature disabled by default. +

    Parents: S10700 +

    +

    No children

    +
    H12202 +The sqlite3_extended_result_codes(D,F) interface shall enable +extended result codes for the database connection D +if the F parameter is true, or disable them if F is false. +

    Parents: S10700 +

    +

    No children

    +
    H12221 +The sqlite3_last_insert_rowid() function shall return the rowid +of the most recent successful INSERT performed on the same +database connection and within the same or higher level +trigger context, or zero if there have been no qualifying +INSERT statements. +

    Parents: S10700 +

    +

    No children

    +
    H12223 +The sqlite3_last_insert_rowid() function shall return the +same value when called from the same trigger context +immediately before and after a ROLLBACK. +

    Parents: S10700 +

    +

    No children

    +
    H12241 +The sqlite3_changes() function shall return the number of +row changes caused by the most recent INSERT, UPDATE, +or DELETE statement on the same database connection and +within the same or higher trigger context, or zero if there have +not been any qualifying row changes. +

    Parents: S10600 +

    +

    No children

    +
    H12243 +Statements of the form "DELETE FROM tablename" with no +WHERE clause shall cause subsequent calls to +sqlite3_changes() to return zero, regardless of the +number of rows originally in the table. +

    Parents: S10600 +

    +

    No children

    +
    H12261 +The sqlite3_total_changes() returns the total number +of row changes caused by INSERT, UPDATE, and/or DELETE +statements on the same database connection, in any +trigger context, since the database connection was created. +

    Parents: S10600 +

    +

    No children

    +
    H12263 +Statements of the form "DELETE FROM tablename" with no +WHERE clause shall not change the value returned +by sqlite3_total_changes(). +

    Parents: S10600 +

    +

    No children

    +
    H12271 +The sqlite3_interrupt() interface will force all running +SQL statements associated with the same database connection +to halt after processing at most one additional row of data. +

    Parents: S30500 +

    +

    No children

    +
    H12272 +Any SQL statement that is interrupted by sqlite3_interrupt() +will return SQLITE_INTERRUPT. +

    Parents: S30500 +

    +

    No children

    +
    H12281 +The callback function registered by sqlite3_trace() +shall be invoked +whenever an SQL statement first begins to execute and +whenever a trigger subprogram first begins to run. +

    Parents: S60400 +

    +

    No children

    +
    H12282 +Each call to sqlite3_trace() shall override the previously +registered trace callback. +

    Parents: S60400 +

    +

    No children

    +
    H12283 +A NULL trace callback shall disable tracing. +

    Parents: S60400 +

    +

    No children

    +
    H12284 +The first argument to the trace callback shall be a copy of +the pointer which was the 3rd argument to sqlite3_trace(). +

    Parents: S60400 +

    +

    No children

    +
    H12285 +The second argument to the trace callback is a +zero-terminated UTF-8 string containing the original text +of the SQL statement as it was passed into sqlite3_prepare_v2() +or the equivalent, or an SQL comment indicating the beginning +of a trigger subprogram. +

    Parents: S60400 +

    +

    No children

    +
    H12287 +The callback function registered by sqlite3_profile() is invoked +as each SQL statement finishes. +

    Parents: S60400 +

    +

    No children

    +
    H12288 +The first parameter to the profile callback is a copy of +the 3rd parameter to sqlite3_profile(). +

    Parents: S60400 +

    +

    No children

    +
    H12289 +The second parameter to the profile callback is a +zero-terminated UTF-8 string that contains the complete text of +the SQL statement as it was processed by sqlite3_prepare_v2() +or the equivalent. +

    Parents: S60400 +

    +

    No children

    +
    H12290 +The third parameter to the profile callback is an estimate +of the number of nanoseconds of wall-clock time required to +run the SQL statement from start to finish. +

    Parents: S60400 +

    +

    No children

    +
    H12311 +The sqlite3_busy_handler(D,C,A) function shall replace +busy callback in the database connection D with a new +a new busy handler C and application data pointer A. +

    Parents: S40400 +

    +

    No children

    +
    H12312 +Newly created database connections shall have a busy +handler of NULL. +

    Parents: S40400 +

    +

    No children

    +
    H12314 +When two or more database connections share a +common cache, +the busy handler for the database connection currently using +the cache shall be invoked when the cache encounters a lock. +

    Parents: S40400 +

    +

    No children

    +
    H12316 +If a busy handler callback returns zero, then the SQLite interface +that provoked the locking event shall return SQLITE_BUSY. +

    Parents: S40400 +

    +

    No children

    +
    H12318 +SQLite shall invokes the busy handler with two arguments which +are a copy of the pointer supplied by the 3rd parameter to +sqlite3_busy_handler() and a count of the number of prior +invocations of the busy handler for the same locking event. +

    Parents: S40400 +

    +

    No children

    +
    H12341 +The sqlite3_busy_timeout() function shall override any prior +sqlite3_busy_timeout() or sqlite3_busy_handler() setting +on the same database connection. +

    Parents: S40410 +

    +

    No children

    +
    H12343 +If the 2nd parameter to sqlite3_busy_timeout() is less than +or equal to zero, then the busy handler shall be cleared so that +all subsequent locking events immediately return SQLITE_BUSY. +

    Parents: S40410 +

    +

    No children

    +
    H12344 +If the 2nd parameter to sqlite3_busy_timeout() is a positive +number N, then a busy handler shall be set that repeatedly calls +the xSleep() method in the VFS interface until +either the lock clears or until the cumulative sleep time +reported back by xSleep() exceeds N milliseconds. +

    Parents: S40410 +

    +

    No children

    +
    H12371 +If a sqlite3_get_table() fails a memory allocation, then +it shall free the result table under construction, abort the +query in process, skip any subsequent queries, set the +*pazResult output pointer to NULL and return SQLITE_NOMEM. +

    Parents: S10000 +

    +

    No children

    +
    H12373 +If the pnColumn parameter to sqlite3_get_table() is not NULL +then a successful invocation of sqlite3_get_table() shall +write the number of columns in the +result set of the query into *pnColumn. +

    Parents: S10000 +

    +

    No children

    +
    H12374 +If the pnRow parameter to sqlite3_get_table() is not NULL +then a successful invocation of sqlite3_get_table() shall +writes the number of rows in the +result set of the query into *pnRow. +

    Parents: S10000 +

    +

    No children

    +
    H12376 +A successful invocation of sqlite3_get_table() that computes +N rows of result with C columns per row shall make *pazResult +point to an array of pointers to (N+1)*C strings where the first +C strings are column names as obtained from +sqlite3_column_name() and the rest are column result values +obtained from sqlite3_column_text(). +

    Parents: S10000 +

    +

    No children

    +
    H12379 +The values in the pazResult array returned by sqlite3_get_table() +shall remain valid until cleared by sqlite3_free_table(). +

    Parents: S10000 +

    +

    No children

    +
    H12382 +When an error other than SQLITE_NOMEM occurs during evaluation +of sqlite3_get_table() the function shall set *pazResult to NULL, +write an error message into memory obtained from sqlite3_malloc(), make +**pzErrmsg point to that error message, and return a +appropriate error code. +

    Parents: S10000 +

    +

    No children

    +
    H12501 +The sqlite3_set_authorizer(D,...) interface registers a +authorizer callback with database connection D. +

    Parents: S70100 +

    +

    No children

    +
    H12502 +The authorizer callback is invoked as SQL statements are +being parseed and compiled. +

    Parents: S70100 +

    +

    No children

    +
    H12503 +If the authorizer callback returns any value other than +SQLITE_IGNORE, SQLITE_OK, or SQLITE_DENY, then +the application interface call that caused +the authorizer callback to run shall fail with an +SQLITE_ERROR error code and an appropriate error message. +

    Parents: S70100 +

    +

    No children

    +
    H12504 +When the authorizer callback returns SQLITE_OK, the operation +described is processed normally. +

    Parents: S70100 +

    +

    No children

    +
    H12505 +When the authorizer callback returns SQLITE_DENY, the +application interface call that caused the +authorizer callback to run shall fail +with an SQLITE_ERROR error code and an error message +explaining that access is denied. +

    Parents: S70100 +

    +

    No children

    +
    H12506 +If the authorizer code (the 2nd parameter to the authorizer +callback) is SQLITE_READ and the authorizer callback returns +SQLITE_IGNORE, then the prepared statement is constructed to +insert a NULL value in place of the table column that would have +been read if SQLITE_OK had been returned. +

    Parents: S70100 +

    +

    No children

    +
    H12507 +If the authorizer code (the 2nd parameter to the authorizer +callback) is anything other than SQLITE_READ, then +a return of SQLITE_IGNORE has the same effect as SQLITE_DENY. +

    Parents: S70100 +

    +

    No children

    +
    H12510 +The first parameter to the authorizer callback is a copy of +the third parameter to the sqlite3_set_authorizer() interface. +

    Parents: S70100 +

    +

    No children

    +
    H12511 +The second parameter to the callback is an integer +action code that specifies the particular action +to be authorized. +

    Parents: S70100 +

    +

    No children

    +
    H12512 +The third through sixth parameters to the callback are +zero-terminated strings that contain +additional details about the action to be authorized. +

    Parents: S70100 +

    +

    No children

    +
    H12520 +Each call to sqlite3_set_authorizer() overrides +any previously installed authorizer. +

    Parents: S70100 +

    +

    No children

    +
    H12521 +A NULL authorizer means that no authorization +callback is invoked. +

    Parents: S70100 +

    +

    No children

    +
    H12522 +The default authorizer is NULL. +

    Parents: S70100 +

    +

    No children

    +
    H12551 +The second parameter to an +authorizer callback shall be an integer +authorizer code that specifies what action +is being authorized. +

    Parents: H12500 +

    +

    No children

    +
    H12552 +The 3rd and 4th parameters to the +authorization callback +shall be parameters or NULL depending on which +authorizer code is used as the second parameter. +

    Parents: H12500 +

    +

    No children

    +
    H12553 +The 5th parameter to the +authorizer callback shall be the name +of the database (example: "main", "temp", etc.) if applicable. +

    Parents: H12500 +

    +

    No children

    +
    H12554 +The 6th parameter to the +authorizer callback shall be the name +of the inner-most trigger or view that is responsible for +the access attempt or NULL if this access attempt is directly from +top-level SQL code. +

    Parents: H12500 +

    +

    No children

    +
    H12701 +The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces create a new +database connection associated with +the database file given in their first parameter. +

    Parents: S40200 +

    +

    No children

    +
    H12702 +The filename argument is interpreted as UTF-8 +for sqlite3_open() and sqlite3_open_v2() and as UTF-16 +in the native byte order for sqlite3_open16(). +

    Parents: S40200 +

    +

    No children

    +
    H12703 +A successful invocation of sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2() writes a pointer to a new +database connection into *ppDb. +

    Parents: S40200 +

    +

    No children

    +
    H12704 +The sqlite3_open(), sqlite3_open16(), and +sqlite3_open_v2() interfaces return SQLITE_OK upon success, +or an appropriate error code on failure. +

    Parents: S40200 +

    +

    No children

    +
    H12706 +The default text encoding for a new database created using +sqlite3_open() or sqlite3_open_v2() will be UTF-8. +

    Parents: S40200 +

    +

    No children

    +
    H12707 +The default text encoding for a new database created using +sqlite3_open16() will be UTF-16. +

    Parents: S40200 +

    +

    No children

    +
    H12709 +The sqlite3_open(F,D) interface is equivalent to +sqlite3_open_v2(F,D,G,0) where the G parameter is +SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE. +

    Parents: S40200 +

    +

    No children

    +
    H12711 +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_READONLY then the database is opened +for reading only. +

    Parents: S40200 +

    +

    No children

    +
    H12712 +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_READWRITE then the database is opened +reading and writing if possible, or for reading only if the +file is write protected by the operating system. +

    Parents: S40200 +

    +

    No children

    +
    H12713 +If the G parameter to sqlite3_open_v2(F,D,G,V) omits the +bit value SQLITE_OPEN_CREATE and the database does not +previously exist, an error is returned. +

    Parents: S40200 +

    +

    No children

    +
    H12714 +If the G parameter to sqlite3_open_v2(F,D,G,V) contains the +bit value SQLITE_OPEN_CREATE and the database does not +previously exist, then an attempt is made to create and +initialize the database. +

    Parents: S40200 +

    +

    No children

    +
    H12717 +If the filename argument to sqlite3_open(), sqlite3_open16(), +or sqlite3_open_v2() is ":memory:", then an private, +ephemeral, in-memory database is created for the connection. +Is SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE required +in sqlite3_open_v2()? +

    Parents: S40200 +

    +

    No children

    +
    H12719 +If the filename is NULL or an empty string, then a private, +ephemeral on-disk database will be created. +Is SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE required +in sqlite3_open_v2()? +

    Parents: S40200 +

    +

    No children

    +
    H12721 +The database connection created by sqlite3_open_v2(F,D,G,V) +will use the sqlite3_vfs object identified by the V parameter, +or the default sqlite3_vfs object if V is a NULL pointer. +

    Parents: S40200 +

    +

    No children

    +
    H12723 +Two database connections will share a common cache if both were +opened with the same VFS while shared cache mode was enabled and +if both filenames compare equal using memcmp() after having been +processed by the xFullPathname method of the VFS. +

    Parents: S40200 +

    +

    No children

    +
    H12762 +A successful call to sqlite3_limit(D,C,V) where V is +positive changes the limit on the size of construct C in the +database connection D to the lesser of V and the hard upper +bound on the size of C that is set at compile-time. +

    Parents: S20600 +

    +

    No children

    +
    H12766 +A successful call to sqlite3_limit(D,C,V) where V is negative +leaves the state of the database connection D unchanged. +

    Parents: S20600 +

    +

    No children

    +
    H12769 +A successful call to sqlite3_limit(D,C,V) returns the +value of the limit on the size of construct C in the +database connection D as it was prior to the call. +

    Parents: S20600 +

    +

    No children

    +
    H12801 +The sqlite3_errcode(D) interface returns the numeric +result code or extended result code for the most recently +failed interface call associated with the database connection D. +

    Parents: S60200 +

    +

    No children

    +
    H12802 +The sqlite3_extended_errcode(D) interface returns the numeric +extended result code for the most recently +failed interface call associated with the database connection D. +

    Parents: S60200 +

    +

    No children

    +
    H12803 +The sqlite3_errmsg(D) and sqlite3_errmsg16(D) +interfaces return English-language text that describes +the error in the mostly recently failed interface call, +encoded as either UTF-8 or UTF-16 respectively. +

    Parents: S60200 +

    +

    No children

    +
    H12807 +The strings returned by sqlite3_errmsg() and sqlite3_errmsg16() +are valid until the next SQLite interface call. +

    Parents: S60200 +

    +

    No children

    +
    H12808 +Calls to API routines that do not return an error code +(example: sqlite3_data_count()) do not +change the error code or message returned by +sqlite3_errcode(), sqlite3_extended_errcode(), +sqlite3_errmsg(), or sqlite3_errmsg16(). +

    Parents: S60200 +

    +

    No children

    +
    H12809 +Interfaces that are not associated with a specific +database connection (examples: +sqlite3_mprintf() or sqlite3_enable_shared_cache() +do not change the values returned by +sqlite3_errcode(), sqlite3_extended_errcode(), +sqlite3_errmsg(), or sqlite3_errmsg16(). +

    Parents: S60200 +

    +

    No children

    +
    H12911 +The callback function registered by sqlite3_progress_handler() +is invoked periodically during long running calls to +sqlite3_step(). +

    Parents: S60400 +

    +

    No children

    +
    H12912 +The progress callback is invoked once for every N virtual +machine opcodes, where N is the second argument to +the sqlite3_progress_handler() call that registered +the callback. If N is less than 1, sqlite3_progress_handler() +acts as if a NULL progress handler had been specified. +

    Parents: S60400 +

    +

    No children

    +
    H12913 +The progress callback itself is identified by the third +argument to sqlite3_progress_handler(). +

    Parents: S60400 +

    +

    No children

    +
    H12914 +The fourth argument to sqlite3_progress_handler() is a +void pointer passed to the progress callback +function each time it is invoked. +

    Parents: S60400 +

    +

    No children

    +
    H12915 +If a call to sqlite3_step() results in fewer than N opcodes +being executed, then the progress callback is never invoked. +

    Parents: S60400 +

    +

    No children

    +
    H12916 +Every call to sqlite3_progress_handler() +overwrites any previously registered progress handler. +

    Parents: S60400 +

    +

    No children

    +
    H12917 +If the progress handler callback is NULL then no progress +handler is invoked. +

    Parents: S60400 +

    +

    No children

    +
    H12918 +If the progress callback returns a result other than 0, then +the behavior is a if sqlite3_interrupt() had been called. +

    Parents: S30500 + S60400 +

    +

    No children

    +
    H12931 +The sqlite3_get_autocommit(D) interface returns non-zero or +zero if the database connection D is or is not in autocommit +mode, respectively. +

    Parents: S60200 +

    +

    No children

    +
    H12932 +Autocommit mode is on by default. +

    Parents: S60200 +

    +

    No children

    +
    H12933 +Autocommit mode is disabled by a successful BEGIN statement. +

    Parents: S60200 +

    +

    No children

    +
    H12934 +Autocommit mode is enabled by a successful COMMIT or ROLLBACK +statement. +

    Parents: S60200 +

    +

    No children

    +
    H12951 +The sqlite3_commit_hook(D,F,P) interface registers the +callback function F to be invoked with argument P whenever +a transaction commits on the database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12952 +The sqlite3_commit_hook(D,F,P) interface returns the P argument +from the previous call with the same database connection D, +or NULL on the first call for a particular database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12953 +Each call to sqlite3_commit_hook() overwrites the callback +registered by prior calls. +

    Parents: S60400 +

    +

    No children

    +
    H12954 +If the F argument to sqlite3_commit_hook(D,F,P) is NULL +then the commit hook callback is canceled and no callback +is invoked when a transaction commits. +

    Parents: S60400 +

    +

    No children

    +
    H12955 +If the commit callback returns non-zero then the commit is +converted into a rollback. +

    Parents: S60400 +

    +

    No children

    +
    H12961 +The sqlite3_rollback_hook(D,F,P) interface registers the +callback function F to be invoked with argument P whenever +a transaction rolls back on the database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12962 +The sqlite3_rollback_hook(D,F,P) interface returns the P +argument from the previous call with the same +database connection D, or NULL on the first call +for a particular database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12963 +Each call to sqlite3_rollback_hook() overwrites the callback +registered by prior calls. +

    Parents: S60400 +

    +

    No children

    +
    H12964 +If the F argument to sqlite3_rollback_hook(D,F,P) is NULL +then the rollback hook callback is canceled and no callback +is invoked when a transaction rolls back. +

    Parents: S60400 +

    +

    No children

    +
    H12971 +The sqlite3_update_hook(D,F,P) interface causes the callback +function F to be invoked with first parameter P whenever +a table row is modified, inserted, or deleted on +the database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12973 +The sqlite3_update_hook(D,F,P) interface returns the value +of P for the previous call on the same database connection D, +or NULL for the first call. +

    Parents: S60400 +

    +

    No children

    +
    H12975 +If the update hook callback F in sqlite3_update_hook(D,F,P) +is NULL then the no update callbacks are made. +

    Parents: S60400 +

    +

    No children

    +
    H12977 +Each call to sqlite3_update_hook(D,F,P) overrides prior calls +to the same interface on the same database connection D. +

    Parents: S60400 +

    +

    No children

    +
    H12979 +The update hook callback is not invoked when internal system +tables such as sqlite_master and sqlite_sequence are modified. +

    Parents: S60400 +

    +

    No children

    +
    H12981 +The second parameter to the update callback +is one of SQLITE_INSERT, SQLITE_DELETE or SQLITE_UPDATE, +depending on the operation that caused the callback to be invoked. +

    Parents: S60400 +

    +

    No children

    +
    H12983 +The third and fourth arguments to the callback contain pointers +to zero-terminated UTF-8 strings which are the names of the +database and table that is being updated. +

    Parents: S60400 +

    +

    No children

    +
    H12986 +The final callback parameter is the rowid of the row after +the change occurs. +

    Parents: S60400 +

    +

    No children

    +
    H13011 +The sqlite3_prepare(db,zSql,...) and +sqlite3_prepare_v2(db,zSql,...) interfaces interpret the +text in their zSql parameter as UTF-8. +

    Parents: S10000 +

    +

    No children

    +
    H13012 +The sqlite3_prepare16(db,zSql,...) and +sqlite3_prepare16_v2(db,zSql,...) interfaces interpret the +text in their zSql parameter as UTF-16 in the native byte order. +

    Parents: S10000 +

    +

    No children

    +
    H13013 +If the nByte argument to sqlite3_prepare_v2(db,zSql,nByte,...) +and its variants is less than zero, the SQL text is +read from zSql is read up to the first zero terminator. +

    Parents: S10000 +

    +

    No children

    +
    H13014 +If the nByte argument to sqlite3_prepare_v2(db,zSql,nByte,...) +and its variants is non-negative, then at most nBytes bytes of +SQL text is read from zSql. +

    Parents: S10000 +

    +

    No children

    +
    H13015 +In sqlite3_prepare_v2(db,zSql,N,P,pzTail) and its variants +if the zSql input text contains more than one SQL statement +and pzTail is not NULL, then *pzTail is made to point to the +first byte past the end of the first SQL statement in zSql. +What does *pzTail point to if there is one statement? +

    Parents: S10000 +

    +

    No children

    +
    H13016 +A successful call to sqlite3_prepare_v2(db,zSql,N,ppStmt,...) +or one of its variants writes into *ppStmt a pointer to a new +prepared statement or a pointer to NULL if zSql contains +nothing other than whitespace or comments. +

    Parents: S10000 +

    +

    No children

    +
    H13019 +The sqlite3_prepare_v2() interface and its variants return +SQLITE_OK or an appropriate error code upon failure. +

    Parents: S10000 +

    +

    No children

    +
    H13021 +Before sqlite3_prepare(db,zSql,nByte,ppStmt,pzTail) or its +variants returns an error (any value other than SQLITE_OK), +they first set *ppStmt to NULL. +

    Parents: S10000 +

    +

    No children

    +
    H13101 +If the prepared statement passed as the argument to +sqlite3_sql() was compiled using either sqlite3_prepare_v2() or +sqlite3_prepare16_v2(), then sqlite3_sql() returns +a pointer to a zero-terminated string containing a UTF-8 rendering +of the original SQL statement. +

    Parents: H13000 +

    +

    No children

    +
    H13102 +If the prepared statement passed as the argument to +sqlite3_sql() was compiled using either sqlite3_prepare() or +sqlite3_prepare16(), then sqlite3_sql() returns a NULL pointer. +

    Parents: H13000 +

    +

    No children

    +
    H13103 +The string returned by sqlite3_sql(S) is valid until the +prepared statement S is deleted using sqlite3_finalize(S). +

    Parents: H13000 +

    +

    No children

    +
    H13123 +The sqlite3_db_handle(S) interface returns a pointer +to the database connection associated with the +prepared statement S. +

    Parents: S60600 +

    +

    No children

    +
    H13143 +If D is a database connection that holds one or more +unfinalized prepared statements and S is a NULL pointer, +then sqlite3_next_stmt(D, S) routine shall return a pointer +to one of the prepared statements associated with D. +

    Parents: S60600 +

    +

    No children

    +
    H13146 +If D is a database connection that holds no unfinalized +prepared statements and S is a NULL pointer, then +sqlite3_next_stmt(D, S) routine shall return a NULL pointer. +

    Parents: S60600 +

    +

    No children

    +
    H13149 +If S is a prepared statement in the database connection D +and S is not the last prepared statement in D, then +sqlite3_next_stmt(D, S) routine shall return a pointer +to the next prepared statement in D after S. +

    Parents: S60600 +

    +

    No children

    +
    H13152 +If S is the last prepared statement in the +database connection D then the sqlite3_next_stmt(D, S) +routine shall return a NULL pointer. +

    Parents: S60600 +

    +

    No children

    +
    H13202 +If the prepared statement S is ready to be run, then +sqlite3_step(S) advances that prepared statement until +completion or until it is ready to return another row of the +result set, or until an interrupt +or a run-time error occurs. +

    Parents: S10000 +

    +

    No children

    +
    H13506 +The SQL statement compiler recognizes tokens of the forms +"?", "?NNN", "$VVV", ":VVV", and "@VVV" as SQL parameters, +where NNN is any sequence of one or more digits +and where VVV is any sequence of one or more alphanumeric +characters or "::" optionally followed by a string containing +no spaces and contained within parentheses. +

    Parents: S70300 +

    +

    No children

    +
    H13509 +The initial value of an SQL parameter is NULL. +

    Parents: S70300 +

    +

    No children

    +
    H13512 +The index of an "?" SQL parameter is one larger than the +largest index of SQL parameter to the left, or 1 if +the "?" is the leftmost SQL parameter. +

    Parents: S70300 +

    +

    No children

    +
    H13515 +The index of an "?NNN" SQL parameter is the integer NNN. +

    Parents: S70300 +

    +

    No children

    +
    H13518 +The index of an ":VVV", "$VVV", or "@VVV" SQL parameter is +the same as the index of leftmost occurrences of the same +parameter, or one more than the largest index over all +parameters to the left if this is the first occurrence +of this parameter, or 1 if this is the leftmost parameter. +

    Parents: S70300 +

    +

    No children

    +
    H13521 +The SQL statement compiler fails with an SQLITE_RANGE +error if the index of an SQL parameter is less than 1 +or greater than the compile-time SQLITE_MAX_VARIABLE_NUMBER +parameter. +

    Parents: S70300 +

    +

    No children

    +
    H13524 +Calls to sqlite3_bind(S,N,V,...) +associate the value V with all SQL parameters having an +index of N in the prepared statement S. +

    Parents: S70300 +

    +

    No children

    +
    H13527 +Calls to sqlite3_bind(S,N,...) +override prior calls with the same values of S and N. +

    Parents: S70300 +

    +

    No children

    +
    H13530 +Bindings established by sqlite3_bind(S,...) +persist across calls to sqlite3_reset(S). +

    Parents: S70300 +

    +

    No children

    +
    H13533 +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) SQLite binds the first L +bytes of the BLOB or string pointed to by V, when L +is non-negative. +

    Parents: S70300 +

    +

    No children

    +
    H13536 +In calls to sqlite3_bind_text(S,N,V,L,D) or +sqlite3_bind_text16(S,N,V,L,D) SQLite binds characters +from V through the first zero character when L is negative. +

    Parents: S70300 +

    +

    No children

    +
    H13539 +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is the special +constant SQLITE_STATIC, SQLite assumes that the value V +is held in static unmanaged space that will not change +during the lifetime of the binding. +

    Parents: S70300 +

    +

    No children

    +
    H13542 +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is the special +constant SQLITE_TRANSIENT, the routine makes a +private copy of the value V before it returns. +

    Parents: S70300 +

    +

    No children

    +
    H13545 +In calls to sqlite3_bind_blob(S,N,V,L,D), +sqlite3_bind_text(S,N,V,L,D), or +sqlite3_bind_text16(S,N,V,L,D) when D is a pointer to +a function, SQLite invokes that function to destroy the +value V after it has finished using the value V. +

    Parents: S70300 +

    +

    No children

    +
    H13548 +In calls to sqlite3_bind_zeroblob(S,N,V,L) the value bound +is a BLOB of L bytes, or a zero-length BLOB if L is negative. +

    Parents: S70300 +

    +

    No children

    +
    H13551 +In calls to sqlite3_bind_value(S,N,V) the V argument may +be either a protected sqlite3_value object or an +unprotected sqlite3_value object. +

    Parents: S70300 +

    +

    No children

    +
    H13601 +The sqlite3_bind_parameter_count(S) interface returns +the largest index of all SQL parameters in the +prepared statement S, or 0 if S contains no SQL parameters. +

    Parents: S70300 +

    +

    No children

    +
    H13621 +The sqlite3_bind_parameter_name(S,N) interface returns +a UTF-8 rendering of the name of the SQL parameter in +the prepared statement S having index N, or +NULL if there is no SQL parameter with index N or if the +parameter with index N is an anonymous parameter "?". +

    Parents: S70300 +

    +

    No children

    +
    H13641 +The sqlite3_bind_parameter_index(S,N) interface returns +the index of SQL parameter in the prepared statement +S whose name matches the UTF-8 string N, or 0 if there is +no match. +

    Parents: S70300 +

    +

    No children

    +
    H13661 +The sqlite3_clear_bindings(S) interface resets all SQL +parameter bindings in the prepared statement S back to NULL. +

    Parents: S70300 +

    +

    No children

    +
    H13711 +The sqlite3_column_count(S) interface returns the number of +columns in the result set generated by the prepared statement S, +or 0 if S does not generate a result set. +

    Parents: S10700 +

    +

    No children

    +
    H13721 +A successful invocation of the sqlite3_column_name(S,N) +interface returns the name of the Nth column (where 0 is +the leftmost column) for the result set of the +prepared statement S as a zero-terminated UTF-8 string. +

    Parents: S10700 +

    +

    No children

    +
    H13723 +A successful invocation of the sqlite3_column_name16(S,N) +interface returns the name of the Nth column (where 0 is +the leftmost column) for the result set of the +prepared statement S as a zero-terminated UTF-16 string +in the native byte order. +

    Parents: S10700 +

    +

    No children

    +
    H13724 +The sqlite3_column_name() and sqlite3_column_name16() +interfaces return a NULL pointer if they are unable to +allocate memory to hold their normal return strings. +

    Parents: S10700 +

    +

    No children

    +
    H13725 +If the N parameter to sqlite3_column_name(S,N) or +sqlite3_column_name16(S,N) is out of range, then the +interfaces return a NULL pointer. +

    Parents: S10700 +

    +

    No children

    +
    H13726 +The strings returned by sqlite3_column_name(S,N) and +sqlite3_column_name16(S,N) are valid until the next +call to either routine with the same S and N parameters +or until sqlite3_finalize(S) is called. +

    Parents: S10700 +

    +

    No children

    +
    H13727 +When a result column of a SELECT statement contains +an AS clause, the name of that column is the identifier +to the right of the AS keyword. +

    Parents: S10700 +

    +

    No children

    +
    H13741 +The sqlite3_column_database_name(S,N) interface returns either +the UTF-8 zero-terminated name of the database from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13742 +The sqlite3_column_database_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the database +from which the Nth result column of the prepared statement S is +extracted, or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13743 +The sqlite3_column_table_name(S,N) interface returns either +the UTF-8 zero-terminated name of the table from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13744 +The sqlite3_column_table_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the table +from which the Nth result column of the prepared statement S is +extracted, or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13745 +The sqlite3_column_origin_name(S,N) interface returns either +the UTF-8 zero-terminated name of the table column from which the +Nth result column of the prepared statement S is extracted, +or NULL if the Nth column of S is a general expression +or if unable to allocate memory to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13746 +The sqlite3_column_origin_name16(S,N) interface returns either +the UTF-16 native byte order zero-terminated name of the table +column from which the Nth result column of the +prepared statement S is extracted, or NULL if the Nth column +of S is a general expression or if unable to allocate memory +to store the name. +

    Parents: S10700 +

    +

    No children

    +
    H13748 +The return values from +column metadata interfaces +are valid for the lifetime of the prepared statement +or until the encoding is changed by another metadata +interface call for the same prepared statement and column. +

    Parents: S10700 +

    +

    No children

    +
    H13761 +A successful call to sqlite3_column_decltype(S,N) returns a +zero-terminated UTF-8 string containing the declared datatype +of the table column that appears as the Nth column (numbered +from 0) of the result set to the prepared statement S. +

    Parents: S10700 +

    +

    No children

    +
    H13762 +A successful call to sqlite3_column_decltype16(S,N) +returns a zero-terminated UTF-16 native byte order string +containing the declared datatype of the table column that appears +as the Nth column (numbered from 0) of the result set to the +prepared statement S. +

    Parents: S10700 +

    +

    No children

    +
    H13763 +If N is less than 0 or N is greater than or equal to +the number of columns in the prepared statement S, +or if the Nth column of S is an expression or subquery rather +than a table column, or if a memory allocation failure +occurs during encoding conversions, then +calls to sqlite3_column_decltype(S,N) or +sqlite3_column_decltype16(S,N) return NULL. +

    Parents: S10700 +

    +

    No children

    +
    H13771 +After a call to sqlite3_step(S) that returns SQLITE_ROW, +the sqlite3_data_count(S) routine will return the same value +as the sqlite3_column_count(S) function. +

    Parents: S10700 +

    +

    No children

    +
    H13772 +After sqlite3_step(S) has returned any value other than +SQLITE_ROW or before sqlite3_step(S) has been called on the +prepared statement for the first time since it was +prepared or reset, +the sqlite3_data_count(S) routine returns zero. +

    Parents: S10700 +

    +

    No children

    +
    H13803 +The sqlite3_column_blob(S,N) interface converts the +Nth column in the current row of the result set for +the prepared statement S into a BLOB and then returns a +pointer to the converted value. +

    Parents: S10700 +

    +

    No children

    +
    H13806 +The sqlite3_column_bytes(S,N) interface returns the +number of bytes in the BLOB or string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_column_blob(S,N) or +sqlite3_column_text(S,N). +

    Parents: S10700 +

    +

    No children

    +
    H13809 +The sqlite3_column_bytes16(S,N) interface returns the +number of bytes in the string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_column_text16(S,N). +

    Parents: S10700 +

    +

    No children

    +
    H13812 +The sqlite3_column_double(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a floating point value and +returns a copy of that value. +

    Parents: S10700 +

    +

    No children

    +
    H13815 +The sqlite3_column_int(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a 64-bit signed integer and +returns the lower 32 bits of that integer. +

    Parents: S10700 +

    +

    No children

    +
    H13818 +The sqlite3_column_int64(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a 64-bit signed integer and +returns a copy of that integer. +

    Parents: S10700 +

    +

    No children

    +
    H13821 +The sqlite3_column_text(S,N) interface converts the +Nth column in the current row of the result set for +the prepared statement S into a zero-terminated UTF-8 +string and returns a pointer to that string. +

    Parents: S10700 +

    +

    No children

    +
    H13824 +The sqlite3_column_text16(S,N) interface converts the +Nth column in the current row of the result set for the +prepared statement S into a zero-terminated 2-byte +aligned UTF-16 native byte order string and returns +a pointer to that string. +

    Parents: S10700 +

    +

    No children

    +
    H13827 +The sqlite3_column_type(S,N) interface returns +one of SQLITE_NULL, SQLITE_INTEGER, SQLITE_FLOAT, +SQLITE_TEXT, or SQLITE_BLOB as appropriate for +the Nth column in the current row of the result set for +the prepared statement S. +

    Parents: S10700 +

    +

    No children

    +
    H13830 +The sqlite3_column_value(S,N) interface returns a +pointer to an unprotected sqlite3_value object for the +Nth column in the current row of the result set for +the prepared statement S. +

    Parents: S10700 +

    +

    No children

    +
    H14103 +A successful invocation of sqlite3_config() shall return +SQLITE_OK. +

    Parents: S20000 +

    +

    No children

    +
    H14106 +The sqlite3_config() interface shall return SQLITE_MISUSE +if it is invoked in between calls to sqlite3_initialize() and +sqlite3_shutdown(). +

    Parents: S20000 +

    +

    No children

    +
    H14120 +A successful call to sqlite3_config(SQLITE_CONFIG_SINGLETHREAD) +shall set the default threading mode to Single-thread. +

    Parents: S20000 +

    +

    No children

    +
    H14123 +A successful call to sqlite3_config(SQLITE_CONFIG_MULTITHREAD) +shall set the default threading mode to Multi-thread. +

    Parents: S20000 +

    +

    No children

    +
    H14126 +A successful call to sqlite3_config(SQLITE_CONFIG_SERIALIZED) +shall set the default threading mode to Serialized. +

    Parents: S20000 +

    +

    No children

    +
    H14129 +A successful call to sqlite3_config(SQLITE_CONFIG_MUTEX,X) +where X is a pointer to an initialized sqlite3_mutex_methods +object shall cause all subsequent mutex operations performed +by SQLite to use the mutex methods that were present in X +during the call to sqlite3_config(). +

    Parents: S20000 +

    +

    No children

    +
    H14132 +A successful call to sqlite3_config(SQLITE_CONFIG_GETMUTEX,X) +where X is a pointer to an sqlite3_mutex_methods object +shall overwrite the content of sqlite3_mutex_methods object +with the mutex methods currently in use by SQLite. +

    Parents: S20000 +

    +

    No children

    +
    H14135 +A successful call to sqlite3_config(SQLITE_CONFIG_MALLOC,M) +where M is a pointer to an initialized sqlite3_mem_methods +object shall cause all subsequent memory allocation operations +performed by SQLite to use the methods that were present in +M during the call to sqlite3_config(). +

    Parents: S20000 +

    +

    No children

    +
    H14138 +A successful call to sqlite3_config(SQLITE_CONFIG_GETMALLOC,M) +where M is a pointer to an sqlite3_mem_methods object shall +overwrite the content of sqlite3_mem_methods object with +the memory allocation methods currently in use by +SQLite. +

    Parents: S20000 +

    +

    No children

    +
    H14141 +A successful call to sqlite3_config(SQLITE_CONFIG_MEMSTATUS,1) +shall enable the memory allocation status collection logic. +

    Parents: S20000 +

    +

    No children

    +
    H14144 +A successful call to sqlite3_config(SQLITE_CONFIG_MEMSTATUS,0) +shall disable the memory allocation status collection logic. +

    Parents: S20000 +

    +

    No children

    +
    H14147 +The memory allocation status collection logic shall be +enabled by default. +

    Parents: S20000 +

    +

    No children

    +
    H14150 +A successful call to sqlite3_config(SQLITE_CONFIG_SCRATCH,S,Z,N) +where Z and N are non-negative integers and +S is a pointer to an aligned memory buffer not less than +Z*N bytes in size shall cause S to be used by the +scratch memory allocator for as many as N simulataneous +allocations each of size Z. +

    Parents: S20000 +

    +

    No children

    +
    H14153 +A successful call to sqlite3_config(SQLITE_CONFIG_SCRATCH,S,Z,N) +where S is a NULL pointer shall disable the +scratch memory allocator. +

    Parents: S20000 +

    +

    No children

    +
    H14156 +A successful call to +sqlite3_config(SQLITE_CONFIG_PAGECACHE,S,Z,N) +where Z and N are non-negative integers and +S is a pointer to an aligned memory buffer not less than +Z*N bytes in size shall cause S to be used by the +pagecache memory allocator for as many as N simulataneous +allocations each of size Z. +

    Parents: S20000 +

    +

    No children

    +
    H14159 +A successful call to +sqlite3_config(SQLITE_CONFIG_PAGECACHE,S,Z,N) +where S is a NULL pointer shall disable the +pagecache memory allocator. +

    Parents: S20000 +

    +

    No children

    +
    H14162 +A successful call to sqlite3_config(SQLITE_CONFIG_HEAP,H,Z,N) +where Z and N are non-negative integers and +H is a pointer to an aligned memory buffer not less than +Z bytes in size shall enable the memsys5 memory allocator +and cause it to use buffer S as its memory source and to use +a minimum allocation size of N. +

    Parents: S20000 +

    +

    No children

    +
    H14165 +A successful call to sqlite3_config(SQLITE_CONFIG_HEAP,H,Z,N) +where H is a NULL pointer shall disable the +memsys5 memory allocator. +

    Parents: S20000 +

    +

    No children

    +
    H14168 +A successful call to sqlite3_config(SQLITE_CONFIG_LOOKASIDE,Z,N) +shall cause the default lookaside memory allocator configuration +for new database connections to be N slots of Z bytes each. +

    Parents: S20000 +

    +

    No children

    +
    H14203 +A call to sqlite3_db_config(D,V,...) shall return SQLITE_OK +if and only if the call is successful. +

    Parents: S20000 +

    +

    No children

    +
    H14206 +If one or more slots of the lookaside memory allocator for +database connection D are in use, then a call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,...) shall +fail with an SQLITE_BUSY return code. +

    Parents: S20000 +

    +

    No children

    +
    H14209 +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are positive +integers and B is an aligned buffer at least Z*N bytes in size +shall cause the lookaside memory allocator for D to use buffer B +with N slots of Z bytes each. +

    Parents: S20000 +

    +

    No children

    +
    H14212 +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are positive +integers and B is NULL pointer shall cause the +lookaside memory allocator for D to a obtain Z*N byte buffer +from the primary memory allocator and use that buffer +with N lookaside slots of Z bytes each. +

    Parents: S20000 +

    +

    No children

    +
    H14215 +A successful call to +sqlite3_db_config(D,SQLITE_DBCONFIG_LOOKASIDE,B,Z,N) where +D is an open database connection and Z and N are zero shall +disable the lookaside memory allocator for D. +

    Parents: S20000 +

    +

    No children

    +
    H15103 +The sqlite3_value_blob(V) interface converts the +protected sqlite3_value object V into a BLOB and then +returns a pointer to the converted value. +

    Parents: S20200 +

    +

    No children

    +
    H15106 +The sqlite3_value_bytes(V) interface returns the +number of bytes in the BLOB or string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_value_blob(V) or +sqlite3_value_text(V). +

    Parents: S20200 +

    +

    No children

    +
    H15109 +The sqlite3_value_bytes16(V) interface returns the +number of bytes in the string (exclusive of the +zero terminator on the string) that was returned by the +most recent call to sqlite3_value_text16(V), +sqlite3_value_text16be(V), or sqlite3_value_text16le(V). +

    Parents: S20200 +

    +

    No children

    +
    H15112 +The sqlite3_value_double(V) interface converts the +protected sqlite3_value object V into a floating point value and +returns a copy of that value. +

    Parents: S20200 +

    +

    No children

    +
    H15115 +The sqlite3_value_int(V) interface converts the +protected sqlite3_value object V into a 64-bit signed integer and +returns the lower 32 bits of that integer. +

    Parents: S20200 +

    +

    No children

    +
    H15118 +The sqlite3_value_int64(V) interface converts the +protected sqlite3_value object V into a 64-bit signed integer and +returns a copy of that integer. +

    Parents: S20200 +

    +

    No children

    +
    H15121 +The sqlite3_value_text(V) interface converts the +protected sqlite3_value object V into a zero-terminated UTF-8 +string and returns a pointer to that string. +

    Parents: S20200 +

    +

    No children

    +
    H15124 +The sqlite3_value_text16(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 native byte order +string and returns a pointer to that string. +

    Parents: S20200 +

    +

    No children

    +
    H15127 +The sqlite3_value_text16be(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 big-endian +string and returns a pointer to that string. +

    Parents: S20200 +

    +

    No children

    +
    H15130 +The sqlite3_value_text16le(V) interface converts the +protected sqlite3_value object V into a zero-terminated 2-byte +aligned UTF-16 little-endian +string and returns a pointer to that string. +

    Parents: S20200 +

    +

    No children

    +
    H15133 +The sqlite3_value_type(V) interface returns +one of SQLITE_NULL, SQLITE_INTEGER, SQLITE_FLOAT, +SQLITE_TEXT, or SQLITE_BLOB as appropriate for +the sqlite3_value object V. +

    Parents: S20200 +

    +

    No children

    +
    H15136 +The sqlite3_value_numeric_type(V) interface converts +the protected sqlite3_value object V into either an integer or +a floating point value if it can do so without loss of +information, and returns one of SQLITE_NULL, +SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, or +SQLITE_BLOB as appropriate for the +protected sqlite3_value object V after the conversion attempt. +

    Parents: S20200 +

    +

    No children

    +
    H15304 +When a call to sqlite3_step(S) causes the prepared statement +S to run to completion, the function returns SQLITE_DONE. +

    Parents: S10000 +

    +

    No children

    +
    H15306 +When a call to sqlite3_step(S) stops because it is ready to +return another row of the result set, it returns SQLITE_ROW. +

    Parents: S10000 +

    +

    No children

    +
    H15308 +If a call to sqlite3_step(S) encounters an +interrupt or a run-time error, +it returns an appropriate error code that is not one of +SQLITE_OK, SQLITE_ROW, or SQLITE_DONE. +

    Parents: S10000 +

    +

    No children

    +
    H15310 +If an interrupt or a run-time error +occurs during a call to sqlite3_step(S) +for a prepared statement S created using +legacy interfaces sqlite3_prepare() or +sqlite3_prepare16(), then the function returns either +SQLITE_ERROR, SQLITE_BUSY, or SQLITE_MISUSE. +

    Parents: S10000 +

    +

    No children

    +
    H16103 +The sqlite3_create_function16(D,X,...) interface shall behave +as sqlite3_create_function(D,X,...) in every way except that it +interprets the X argument as zero-terminated UTF-16 +native byte order instead of as zero-terminated UTF-8. +

    Parents: S20200 +

    +

    No children

    +
    H16106 +A successful invocation of the +sqlite3_create_function(D,X,N,E,...) interface shall register +or replaces callback functions in the database connection D +used to implement the SQL function named X with N parameters +and having a preferred text encoding of E. +

    Parents: S20200 +

    +

    No children

    +
    H16109 +A successful call to sqlite3_create_function(D,X,N,E,P,F,S,L) +shall replace the P, F, S, and L values from any prior calls with +the same D, X, N, and E values. +

    Parents: S20200 +

    +

    No children

    +
    H16112 +The sqlite3_create_function(D,X,...) interface shall fail +if the SQL function name X is +longer than 255 bytes exclusive of the zero terminator. +

    Parents: S20200 +

    +

    No children

    +
    H16118 +The sqlite3_create_function(D,X,N,E,P,F,S,L) interface +shall fail unless either F is NULL and S and L are non-NULL or +F is non-NULL and S and L are NULL. +

    Parents: S20200 +

    +

    No children

    +
    H16121 +The sqlite3_create_function(D,...) interface shall fails with an +error code of SQLITE_BUSY if there exist prepared statements +associated with the database connection D. +

    Parents: S20200 +

    +

    No children

    +
    H16127 +When N is non-negative, the sqlite3_create_function(D,X,N,...) +interface shall register callbacks to be invoked for the +SQL function +named X when the number of arguments to the SQL function is +exactly N. +

    Parents: S20200 +

    +

    No children

    +
    H16130 +When N is -1, the sqlite3_create_function(D,X,N,...) +interface shall register callbacks to be invoked for the SQL +function named X with any number of arguments. +

    Parents: S20200 +

    +

    No children

    +
    H16133 +When calls to sqlite3_create_function(D,X,N,...) +specify multiple implementations of the same function X +and when one implementation has N>=0 and the other has N=(-1) +the implementation with a non-zero N shall be preferred. +

    Parents: S20200 +

    +

    No children

    +
    H16136 +When calls to sqlite3_create_function(D,X,N,E,...) +specify multiple implementations of the same function X with +the same number of arguments N but with different +encodings E, then the implementation where E matches the +database encoding shall preferred. +

    Parents: S20200 +

    +

    No children

    +
    H16139 +For an aggregate SQL function created using +sqlite3_create_function(D,X,N,E,P,0,S,L) the finalizer +function L shall always be invoked exactly once if the +step function S is called one or more times. +

    Parents: S20200 +

    +

    No children

    +
    H16142 +When SQLite invokes either the xFunc or xStep function of +an application-defined SQL function or aggregate created +by sqlite3_create_function() or sqlite3_create_function16(), +then the array of sqlite3_value objects passed as the +third parameter shall be protected sqlite3_value objects. +

    Parents: S20200 +

    +

    No children

    +
    H16211 +The first invocation of sqlite3_aggregate_context(C,N) for +a particular instance of an aggregate function (for a particular +context C) causes SQLite to allocate N bytes of memory, +zero that memory, and return a pointer to the allocated memory. +

    Parents: S20200 +

    +

    No children

    +
    H16213 +If a memory allocation error occurs during +sqlite3_aggregate_context(C,N) then the function returns 0. +

    Parents: S20200 +

    +

    No children

    +
    H16215 +Second and subsequent invocations of +sqlite3_aggregate_context(C,N) for the same context pointer C +ignore the N parameter and return a pointer to the same +block of memory returned by the first invocation. +

    Parents: S20200 +

    +

    No children

    +
    H16217 +The memory allocated by sqlite3_aggregate_context(C,N) is +automatically freed on the next call to sqlite3_reset() +or sqlite3_finalize() for the prepared statement containing +the aggregate function associated with context C. +

    Parents: S20200 +

    +

    No children

    +
    H16243 +The sqlite3_user_data(C) interface returns a copy of the +P pointer from the sqlite3_create_function(D,X,N,E,P,F,S,L) +or sqlite3_create_function16(D,X,N,E,P,F,S,L) call that +registered the SQL function associated with sqlite3_context C. +

    Parents: S20200 +

    +

    No children

    +
    H16253 +The sqlite3_context_db_handle(C) interface returns a copy of the +D pointer from the sqlite3_create_function(D,X,N,E,P,F,S,L) +or sqlite3_create_function16(D,X,N,E,P,F,S,L) call that +registered the SQL function associated with sqlite3_context C. +

    Parents: S60600 +

    +

    No children

    +
    H16272 +The sqlite3_get_auxdata(C,N) interface returns a pointer +to metadata associated with the Nth parameter of the SQL function +whose context is C, or NULL if there is no metadata associated +with that parameter. +

    Parents: S20200 +

    +

    No children

    +
    H16274 +The sqlite3_set_auxdata(C,N,P,D) interface assigns a metadata +pointer P to the Nth parameter of the SQL function with context C. +

    Parents: S20200 +

    +

    No children

    +
    H16276 +SQLite will invoke the destructor D with a single argument +which is the metadata pointer P following a call to +sqlite3_set_auxdata(C,N,P,D) when SQLite ceases to hold +the metadata. +

    Parents: S20200 +

    +

    No children

    +
    H16277 +SQLite ceases to hold metadata for an SQL function parameter +when the value of that parameter changes. +

    Parents: S20200 +

    +

    No children

    +
    H16278 +When sqlite3_set_auxdata(C,N,P,D) is invoked, the destructor +is called for any prior metadata associated with the same function +context C and parameter N. +

    Parents: S20200 +

    +

    No children

    +
    H16279 +SQLite will call destructors for any metadata it is holding +in a particular prepared statement S when either +sqlite3_reset(S) or sqlite3_finalize(S) is called. +

    Parents: S20200 +

    +

    No children

    +
    H16351 +The sqlite3_soft_heap_limit(N) interface places a soft limit +of N bytes on the amount of heap memory that may be allocated +using sqlite3_malloc() or sqlite3_realloc() at any point +in time. +

    Parents: S30220 +

    +

    No children

    +
    H16352 +If a call to sqlite3_malloc() or sqlite3_realloc() would +cause the total amount of allocated memory to exceed the +soft heap limit, then sqlite3_release_memory() is invoked +in an attempt to reduce the memory usage prior to proceeding +with the memory allocation attempt. +

    Parents: S30220 +

    +

    No children

    +
    H16353 +Calls to sqlite3_malloc() or sqlite3_realloc() that trigger +attempts to reduce memory usage through the soft heap limit +mechanism continue even if the attempt to reduce memory +usage is unsuccessful. +

    Parents: S30220 +

    +

    No children

    +
    H16354 +A negative or zero value for N in a call to +sqlite3_soft_heap_limit(N) means that there is no soft +heap limit and sqlite3_release_memory() will only be +called when memory is completely exhausted. +

    Parents: S30220 +

    +

    No children

    +
    H16355 +The default value for the soft heap limit is zero. +

    Parents: S30220 +

    +

    No children

    +
    H16358 +Each call to sqlite3_soft_heap_limit(N) overrides the +values set by all prior calls. +

    Parents: S30220 +

    +

    No children

    +
    H16403 +The default return value from any SQL function is NULL. +

    Parents: S20200 +

    +

    No children

    +
    H16406 +The sqlite3_result_blob(C,V,N,D) interface changes the +return value of function C to be a BLOB that is N bytes +in length and with content pointed to by V. +

    Parents: S20200 +

    +

    No children

    +
    H16409 +The sqlite3_result_double(C,V) interface changes the +return value of function C to be the floating point value V. +

    Parents: S20200 +

    +

    No children

    +
    H16412 +The sqlite3_result_error(C,V,N) interface changes the return +value of function C to be an exception with error code +SQLITE_ERROR and a UTF-8 error message copied from V up to the +first zero byte or until N bytes are read if N is positive. +

    Parents: S20200 +

    +

    No children

    +
    H16415 +The sqlite3_result_error16(C,V,N) interface changes the return +value of function C to be an exception with error code +SQLITE_ERROR and a UTF-16 native byte order error message +copied from V up to the first zero terminator or until N bytes +are read if N is positive. +

    Parents: S20200 +

    +

    No children

    +
    H16418 +The sqlite3_result_error_toobig(C) interface changes the return +value of the function C to be an exception with error code +SQLITE_TOOBIG and an appropriate error message. +

    Parents: S20200 +

    +

    No children

    +
    H16421 +The sqlite3_result_error_nomem(C) interface changes the return +value of the function C to be an exception with error code +SQLITE_NOMEM and an appropriate error message. +

    Parents: S20200 +

    +

    No children

    +
    H16424 +The sqlite3_result_error_code(C,E) interface changes the return +value of the function C to be an exception with error code E. +The error message text is unchanged. +

    Parents: S20200 +

    +

    No children

    +
    H16427 +The sqlite3_result_int(C,V) interface changes the +return value of function C to be the 32-bit integer value V. +

    Parents: S20200 +

    +

    No children

    +
    H16430 +The sqlite3_result_int64(C,V) interface changes the +return value of function C to be the 64-bit integer value V. +

    Parents: S20200 +

    +

    No children

    +
    H16433 +The sqlite3_result_null(C) interface changes the +return value of function C to be NULL. +

    Parents: S20200 +

    +

    No children

    +
    H16436 +The sqlite3_result_text(C,V,N,D) interface changes the +return value of function C to be the UTF-8 string +V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. +

    Parents: S20200 +

    +

    No children

    +
    H16439 +The sqlite3_result_text16(C,V,N,D) interface changes the +return value of function C to be the UTF-16 native byte order +string V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. +

    Parents: S20200 +

    +

    No children

    +
    H16442 +The sqlite3_result_text16be(C,V,N,D) interface changes the +return value of function C to be the UTF-16 big-endian +string V up to the first zero if N is negative +or the first N bytes or V if N is non-negative. +

    Parents: S20200 +

    +

    No children

    +
    H16445 +The sqlite3_result_text16le(C,V,N,D) interface changes the +return value of function C to be the UTF-16 little-endian +string V up to the first zero if N is negative +or the first N bytes of V if N is non-negative. +

    Parents: S20200 +

    +

    No children

    +
    H16448 +The sqlite3_result_value(C,V) interface changes the +return value of function C to be the unprotected sqlite3_value +object V. +

    Parents: S20200 +

    +

    No children

    +
    H16451 +The sqlite3_result_zeroblob(C,N) interface changes the +return value of function C to be an N-byte BLOB of all zeros. +

    Parents: S20200 +

    +

    No children

    +
    H16454 +The sqlite3_result_error() and sqlite3_result_error16() +interfaces make a copy of their error message strings before +returning. +

    Parents: S20200 +

    +

    No children

    +
    H16457 +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is the constant SQLITE_STATIC +then no destructor is ever called on the pointer V and SQLite +assumes that V is immutable. +

    Parents: S20200 +

    +

    No children

    +
    H16460 +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is the constant +SQLITE_TRANSIENT then the interfaces makes a copy of the +content of V and retains the copy. +

    Parents: S20200 +

    +

    No children

    +
    H16463 +If the D destructor parameter to sqlite3_result_blob(C,V,N,D), +sqlite3_result_text(C,V,N,D), sqlite3_result_text16(C,V,N,D), +sqlite3_result_text16be(C,V,N,D), or +sqlite3_result_text16le(C,V,N,D) is some value other than +the constants SQLITE_STATIC and SQLITE_TRANSIENT then +SQLite will invoke the destructor D with V as its only argument +when it has finished with the V value. +

    Parents: S20200 +

    +

    No children

    +
    H16603 +A successful call to the +sqlite3_create_collation_v2(B,X,E,P,F,D) interface +registers function F as the comparison function used to +implement collation X on the database connection B for +databases having encoding E. +

    Parents: S20300 +

    +

    No children

    +
    H16604 +SQLite understands the X parameter to +sqlite3_create_collation_v2(B,X,E,P,F,D) as a zero-terminated +UTF-8 string in which case is ignored for ASCII characters and +is significant for non-ASCII characters. +

    Parents: S20300 +

    +

    No children

    +
    H16606 +Successive calls to sqlite3_create_collation_v2(B,X,E,P,F,D) +with the same values for B, X, and E, override prior values +of P, F, and D. +

    Parents: S20300 +

    +

    No children

    +
    H16609 +If the destructor D in sqlite3_create_collation_v2(B,X,E,P,F,D) +is not NULL then it is called with argument P when the +collating function is dropped by SQLite. +

    Parents: S20300 +

    +

    No children

    +
    H16612 +A collating function is dropped when it is overloaded. +

    Parents: S20300 +

    +

    No children

    +
    H16615 +A collating function is dropped when the database connection +is closed using sqlite3_close(). +

    Parents: S20300 +

    +

    No children

    +
    H16618 +The pointer P in sqlite3_create_collation_v2(B,X,E,P,F,D) +is passed through as the first parameter to the comparison +function F for all subsequent invocations of F. +

    Parents: S20300 +

    +

    No children

    +
    H16621 +A call to sqlite3_create_collation(B,X,E,P,F) is exactly +the same as a call to sqlite3_create_collation_v2() with +the same parameters and a NULL destructor. +

    Parents: S20300 +

    +

    No children

    +
    H16624 +Following a sqlite3_create_collation_v2(B,X,E,P,F,D), +SQLite uses the comparison function F for all text comparison +operations on the database connection B on text values that +use the collating sequence named X. +

    Parents: S20300 +

    +

    No children

    +
    H16627 +The sqlite3_create_collation16(B,X,E,P,F) works the same +as sqlite3_create_collation(B,X,E,P,F) except that the +collation name X is understood as UTF-16 in native byte order +instead of UTF-8. +

    Parents: S20300 +

    +

    No children

    +
    H16630 +When multiple comparison functions are available for the same +collating sequence, SQLite chooses the one whose text encoding +requires the least amount of conversion from the default +text encoding of the database. +

    Parents: S20300 +

    +

    No children

    +
    H16702 +A successful call to sqlite3_collation_needed(D,P,F) +or sqlite3_collation_needed16(D,P,F) causes +the database connection D to invoke callback F with first +parameter P whenever it needs a comparison function for a +collating sequence that it does not know about. +

    Parents: S20300 +

    +

    No children

    +
    H16704 +Each successful call to sqlite3_collation_needed() or +sqlite3_collation_needed16() overrides the callback registered +on the same database connection by prior calls to either +interface. +

    Parents: S20300 +

    +

    No children

    +
    H16706 +The name of the requested collating function passed in the +4th parameter to the callback is in UTF-8 if the callback +was registered using sqlite3_collation_needed() and +is in UTF-16 native byte order if the callback was +registered using sqlite3_collation_needed16(). +

    Parents: S20300 +

    +

    No children

    +
    H17303 +The sqlite3_malloc(N) interface returns either a pointer to +a newly checked-out block of at least N bytes of memory +that is 8-byte aligned, or it returns NULL if it is unable +to fulfill the request. +

    Parents: S20000 +

    +

    No children

    +
    H17304 +The sqlite3_malloc(N) interface returns a NULL pointer if +N is less than or equal to zero. +

    Parents: S20000 +

    +

    No children

    +
    H17305 +The sqlite3_free(P) interface releases memory previously +returned from sqlite3_malloc() or sqlite3_realloc(), +making it available for reuse. +

    Parents: S20000 +

    +

    No children

    +
    H17306 +A call to sqlite3_free(NULL) is a harmless no-op. +

    Parents: S20000 +

    +

    No children

    +
    H17310 +A call to sqlite3_realloc(0,N) is equivalent to a call +to sqlite3_malloc(N). +

    Parents: S20000 +

    +

    No children

    +
    H17312 +A call to sqlite3_realloc(P,0) is equivalent to a call +to sqlite3_free(P). +

    Parents: S20000 +

    +

    No children

    +
    H17315 +The SQLite core uses sqlite3_malloc(), sqlite3_realloc(), +and sqlite3_free() for all of its memory allocation and +deallocation needs. +

    Parents: S20000 +

    +

    No children

    +
    H17318 +The sqlite3_realloc(P,N) interface returns either a pointer +to a block of checked-out memory of at least N bytes in size +that is 8-byte aligned, or a NULL pointer. +

    Parents: S20000 +

    +

    No children

    +
    H17321 +When sqlite3_realloc(P,N) returns a non-NULL pointer, it first +copies the first K bytes of content from P into the newly +allocated block, where K is the lesser of N and the size of +the buffer P. +

    Parents: S20000 +

    +

    No children

    +
    H17322 +When sqlite3_realloc(P,N) returns a non-NULL pointer, it first +releases the buffer P. +

    Parents: S20000 +

    +

    No children

    +
    H17323 +When sqlite3_realloc(P,N) returns NULL, the buffer P is +not modified or released. +

    Parents: S20000 +

    +

    No children

    +
    H17341 +The sqlite3_release_memory(N) interface attempts to +free N bytes of heap memory by deallocating non-essential +memory allocations held by the database library. +

    Parents: S30220 +

    +

    No children

    +
    H17342 +The sqlite3_release_memory(N) returns the number +of bytes actually freed, which might be more or less +than the amount requested. +

    Parents: S30220 +

    +

    No children

    +
    H17371 +The sqlite3_memory_used() routine returns the number of bytes +of memory currently outstanding (malloced but not freed). +

    Parents: S30210 +

    +

    No children

    +
    H17373 +The sqlite3_memory_highwater() routine returns the maximum +value of sqlite3_memory_used() since the high-water mark +was last reset. +

    Parents: S30210 +

    +

    No children

    +
    H17374 +The values returned by sqlite3_memory_used() and +sqlite3_memory_highwater() include any overhead +added by SQLite in its implementation of sqlite3_malloc(), +but not overhead added by the any underlying system library +routines that sqlite3_malloc() may call. +

    Parents: S30210 +

    +

    No children

    +
    H17375 +The memory high-water mark is reset to the current value of +sqlite3_memory_used() if and only if the parameter to +sqlite3_memory_highwater() is true. The value returned +by sqlite3_memory_highwater(1) is the high-water mark +prior to the reset. +

    Parents: S30210 +

    +

    No children

    +
    H17392 +The sqlite3_randomness(N,P) interface writes N bytes of +high-quality pseudo-randomness into buffer P. +

    Parents: S20000 +

    +

    No children

    +
    H17403 +The sqlite3_mprintf() and sqlite3_vmprintf() interfaces +return either pointers to zero-terminated UTF-8 strings held in +memory obtained from sqlite3_malloc() or NULL pointers if +a call to sqlite3_malloc() fails. +

    Parents: S70000 +

    +

    No children

    +
    H17406 +The sqlite3_snprintf() interface writes a zero-terminated +UTF-8 string into the buffer pointed to by the second parameter +provided that the first parameter is greater than zero. +

    Parents: S70000 +

    +

    No children

    +
    H17407 +The sqlite3_snprintf() interface does not write slots of +its output buffer (the second parameter) outside the range +of 0 through N-1 (where N is the first parameter) +regardless of the length of the string +requested by the format specification. +

    Parents: S70000 +

    +

    No children

    +
    H17813 +A successful invocation of the sqlite3_blob_open(D,B,T,C,R,F,P) +interface shall open an sqlite3_blob object P on the BLOB +in column C of the table T in the database B on +the database connection D. +

    Parents: S30230 +

    +

    No children

    +
    H17814 +A successful invocation of sqlite3_blob_open(D,...) shall start +a new transaction on the database connection D if that +connection is not already in a transaction. +

    Parents: S30230 +

    +

    No children

    +
    H17816 +The sqlite3_blob_open(D,B,T,C,R,F,P) interface shall open +the BLOB for read and write access if and only if the F +parameter is non-zero. +

    Parents: S30230 +

    +

    No children

    +
    H17819 +The sqlite3_blob_open() interface shall return SQLITE_OK on +success and an appropriate error code on failure. +

    Parents: S30230 +

    +

    No children

    +
    H17821 +If an error occurs during evaluation of sqlite3_blob_open(D,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error. +

    Parents: S30230 +

    +

    No children

    +
    H17824 +If any column in the row that a sqlite3_blob has open is +changed by a separate UPDATE or DELETE statement or by +an ON CONFLICT side effect, then the sqlite3_blob shall +be marked as invalid. +

    Parents: S30230 +

    +

    No children

    +
    H17833 +The sqlite3_blob_close(P) interface closes an sqlite3_blob +object P previously opened using sqlite3_blob_open(). +

    Parents: S30230 +

    +

    No children

    +
    H17836 +Closing an sqlite3_blob object using +sqlite3_blob_close() shall cause the current transaction to +commit if there are no other open sqlite3_blob objects +or prepared statements on the same database connection and +the database connection is in autocommit mode. +

    Parents: S30230 +

    +

    No children

    +
    H17839 +The sqlite3_blob_close(P) interfaces shall close the +sqlite3_blob object P unconditionally, even if +sqlite3_blob_close(P) returns something other than SQLITE_OK. +

    Parents: S30230 +

    +

    No children

    +
    H17843 +The sqlite3_blob_bytes(P) interface returns the size +in bytes of the BLOB that the sqlite3_blob object P +refers to. +

    Parents: S30230 +

    +

    No children

    +
    H17853 +A successful invocation of sqlite3_blob_read(P,Z,N,X) +shall reads N bytes of data out of the BLOB referenced by +BLOB handle P beginning at offset X and store those bytes +into buffer Z. +

    Parents: S30230 +

    +

    No children

    +
    H17856 +In sqlite3_blob_read(P,Z,N,X) if the size of the BLOB +is less than N+X bytes, then the function shall leave the +Z buffer unchanged and return SQLITE_ERROR. +

    Parents: S30230 +

    +

    No children

    +
    H17859 +In sqlite3_blob_read(P,Z,N,X) if X or N is less than zero +then the function shall leave the Z buffer unchanged +and return SQLITE_ERROR. +

    Parents: S30230 +

    +

    No children

    +
    H17862 +The sqlite3_blob_read(P,Z,N,X) interface shall return SQLITE_OK +if N bytes are successfully read into buffer Z. +

    Parents: S30230 +

    +

    No children

    +
    H17863 +If the BLOB handle P is expired and X and N are within bounds +then sqlite3_blob_read(P,Z,N,X) shall leave the Z buffer +unchanged and return SQLITE_ABORT. +

    Parents: S30230 +

    +

    No children

    +
    H17865 +If the requested read could not be completed, +the sqlite3_blob_read(P,Z,N,X) interface shall return an +appropriate error code or extended error code. +

    Parents: S30230 +

    +

    No children

    +
    H17868 +If an error occurs during evaluation of sqlite3_blob_read(P,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error, where D is the +database connection that was used to open the BLOB handle P. +

    Parents: S30230 +

    +

    No children

    +
    H17873 +A successful invocation of sqlite3_blob_write(P,Z,N,X) +shall write N bytes of data from buffer Z into the BLOB +referenced by BLOB handle P beginning at offset X into +the BLOB. +

    Parents: S30230 +

    +

    No children

    +
    H17874 +In the absence of other overridding changes, the changes +written to a BLOB by sqlite3_blob_write() shall +remain in effect after the associated BLOB handle expires. +

    Parents: S30230 +

    +

    No children

    +
    H17875 +If the BLOB handle P was opened for reading only then +an invocation of sqlite3_blob_write(P,Z,N,X) shall leave +the referenced BLOB unchanged and return SQLITE_READONLY. +

    Parents: S30230 +

    +

    No children

    +
    H17876 +If the size of the BLOB referenced by BLOB handle P is +less than N+X bytes then sqlite3_blob_write(P,Z,N,X) shall +leave the BLOB unchanged and return SQLITE_ERROR. +

    Parents: S30230 +

    +

    No children

    +
    H17877 +If the BLOB handle P is expired and X and N are within bounds +then sqlite3_blob_read(P,Z,N,X) shall leave the BLOB +unchanged and return SQLITE_ABORT. +

    Parents: S30230 +

    +

    No children

    +
    H17879 +If X or N are less than zero then sqlite3_blob_write(P,Z,N,X) +shall leave the BLOB referenced by BLOB handle P unchanged +and return SQLITE_ERROR. +

    Parents: S30230 +

    +

    No children

    +
    H17882 +The sqlite3_blob_write(P,Z,N,X) interface shall return +SQLITE_OK if N bytes where successfully written into the BLOB. +

    Parents: S30230 +

    +

    No children

    +
    H17885 +If the requested write could not be completed, +the sqlite3_blob_write(P,Z,N,X) interface shall return an +appropriate error code or extended error code. +

    Parents: S30230 +

    +

    No children

    +
    H17888 +If an error occurs during evaluation of sqlite3_blob_write(D,...) +then subsequent calls to sqlite3_errcode(D), +sqlite3_extended_errcode(), +sqlite3_errmsg(D), and sqlite3_errmsg16(D) shall return +information appropriate for that error. +

    Parents: S30230 +

    +

    No children

    +
    H30010 +The system shall ensure that at the successful conclusion of a +database transaction the contents of the database file constitute +a well-formed SQLite database file. +

    No parents

    +

    No children

    +
    H30020 +The system shall ensure that at the successful conclusion of a +database transaction the contents of the database file are a valid +serialization of the contents of the logical SQL database produced +by the transaction. +

    No parents

    +

    No children

    +
    H30030 +The first 16 bytes of a well-formed database file shall contain +the UTF-8 encoding of the string "SQLite format 3" followed by a +single nul-terminator byte. +

    No parents

    +

    No children

    +
    H30040 +The 6 bytes beginning at byte offset 18 of a well-formed database +image shall contain the values 0x01, 0x01, 0x00, 0x40, 0x20 and +0x20, respectively. +

    No parents

    +

    No children

    +
    H30120 +The 4-byte big-endian signed integer field at byte offset 44 of a +well-formed database image, the schema layer file format field, +shall be set to an integer value between 1 and 4, inclusive. +

    No parents

    +

    No children

    +
    H30130 +The 4-byte big-endian unsigned integer field at byte offset 48 of a +well-formed database image shall be set to the value of the +database default page-cache size. +

    No parents

    +

    No children

    +
    H30140 +If the database is not an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the value 0. +

    No parents

    +

    No children

    +
    H30141 +If the database is an auto-vacuum capable database, then the 4 byte +big-endian unsigned integer field at byte offset 52 of a well-formed +database image shall contain the numerically largest root-page number +of any table or index B-Tree within the database image. +

    No parents

    +

    No children

    +
    H30150 +The 4 byte big-endian unsigned integer field at byte offset 56 of a +well-formed database image shall be set to 1 if the database text-encoding +is UTF-8, 2 if the database text-encoding is little-endian UTF-16, and 3 +if the database text-encoding is big-endian UTF-16. +

    No parents

    +

    No children

    +
    H30160 +The 4-byte big-endian unsigned integer field at byte offset 60 of a +well-formed database image shall be set to the value of the +database user-cookie. +

    No parents

    +

    No children

    +
    H30190 +The 2-byte big-endian unsigned integer field at byte offset 16 of a +well-formed database image shall be set to the value of the database +page-size. +

    No parents

    +

    No children

    +
    H30191 +The page-size of an SQLite database in bytes shall be an integer power +of 2 between 512 and 32768, inclusive. +

    No parents

    +

    No children

    +
    H30170 +The 4-byte big-endian unsigned integer field at byte offset 64 of a +well-formed database image shall be set to the value of the database +incremental-vacuum flag. +

    No parents

    +

    No children

    +
    H30171 +The value of the incremental-vacuum flag of an SQLite database shall be +either 0 or 1. +

    No parents

    +

    No children

    +
    H30180 +In a well-formed non-autovacuum database (one with a zero stored +in the 4-byte big-endian integer value beginning at byte offset +52 of the database file header, the incremental vacuum flag is +set to 0. +

    No parents

    +

    No children

    +
    H30200 +The size of a well formed database image shall be an integer +multiple of the database page size. +

    No parents

    +

    No children

    +
    H30210 +Each page of a well formed database image shall be exactly one of a +B-Tree page, an overflow page, a free page, a +pointer-map page or the locking page. +

    No parents

    +

    No children

    +
    H30220 +The database page that starts at byte offset 230, the +locking page, shall never be used for any purpose. +

    No parents

    +

    No children

    +
    H30230 +In a well-formed database file, the portion of the first +database page not consumed by the database file-header (all but the +first 100 bytes) contains the root node of a table B-Tree, +the schema table. +

    No parents

    +

    No children

    +
    H30240 +All records stored in the schema table contain exactly five +fields. +

    No parents

    +

    No children

    +
    H30250 +For each SQL table in the database apart from itself +("sqlite_master"), the schema table of a well-formed +database file contains an associated record. +

    No parents

    +

    No children

    +
    H30260 +The first field of each schema table record associated with an +SQL table shall be the text value "table". +

    No parents

    +

    No children

    +
    H30270 +The second field of each schema table record associated with an +SQL table shall be a text value set to the name of the SQL table. +

    No parents

    +

    No children

    +
    H30280 +In a well-formed database file, the third field of all +schema table records associated with SQL tables shall contain +the same value as the second field. +

    No parents

    +

    No children

    +
    H30290 +In a well-formed database file, the fourth field of all +schema table records associated with SQL tables that are not +virtual tables contains the page number (an integer value) of the root +page of the associated table B-Tree structure within the +database file. +

    No parents

    +

    No children

    +
    H30300 +If the associated database table is a virtual table, the fourth +field of the schema table record shall contain the integer +value 0 (zero). +

    No parents

    +

    No children

    +
    H30310 +In a well-formed database, the fifth field of all schema table +records associated with SQL tables shall contain a "CREATE TABLE" +or "CREATE VIRTUAL TABLE" statment (a text value). The details +of the statement shall be such that executing the statement +would create a table of precisely the same name and schema as the +existing database table. +

    No parents

    +

    No children

    +
    H30320 +For each PRIMARY KEY or UNIQUE constraint present in the definition +of each SQL table in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index", and the second field set to a text value containing a +string of the form "sqlite_autoindex_<name>_<idx>", where +<name> is the name of the SQL table and <idx> is an +integer value. +

    No parents

    +

    No children

    +
    H30330 +In a well-formed database, the third field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the name of the table to which the constraint applies (a +text value). +

    No parents

    +

    No children

    +
    H30340 +In a well-formed database, the fourth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain the page number (an integer value) of the root page of the +associated index B-Tree structure. +

    No parents

    +

    No children

    +
    H30350 +In a well-formed database, the fifth field of all schema table +records associated with SQL PRIMARY KEY or UNIQUE constraints shall +contain an SQL NULL value. +

    No parents

    +

    No children

    +
    H30360 +For each SQL index in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "index" and the second field set to a text value containing the +name of the SQL index. +

    No parents

    +

    No children

    +
    H30370 +In a well-formed database, the third field of all schema table +records associated with SQL indexes shall contain the name of the +SQL table that the index applies to. +

    No parents

    +

    No children

    +
    H30380 +In a well-formed database, the fourth field of all schema table +records associated with SQL indexes shall contain the page number +(an integer value) of the root page of the associated index B-Tree +structure. +

    No parents

    +

    No children

    +
    H30390 +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +INDEX" statement (a text value). The details of the statement shall +be such that executing the statement would create an index of +precisely the same name and content as the existing database index. +

    No parents

    +

    No children

    +
    H30400 +For each SQL view in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "view" and the second field set to a text value containing the +name of the SQL view. +

    No parents

    +

    No children

    +
    H30410 +In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the same value as +the second field. +

    No parents

    +

    No children

    +
    H30420 +In a well-formed database, the third field of all schema table +records associated with SQL views shall contain the integer value 0. +

    No parents

    +

    No children

    +
    H30430 +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +VIEW" statement (a text value). The details of the statement shall +be such that executing the statement would create a view of +precisely the same name and definition as the existing database view. +

    No parents

    +

    No children

    +
    H30440 +For each SQL trigger in the database, the schema table of a well-formed +database shall contain a record with the first field set to the text +value "trigger" and the second field set to a text value containing the +name of the SQL trigger. +

    No parents

    +

    No children

    +
    H30450 +In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the name of the +database table or view to which the trigger applies. +

    No parents

    +

    No children

    +
    H30460 +In a well-formed database, the third field of all schema table +records associated with SQL triggers shall contain the integer value 0. +

    No parents

    +

    No children

    +
    H30470 +In a well-formed database, the fifth field of all schema table +records associated with SQL indexes shall contain an SQL "CREATE +TRIGGER" statement (a text value). The details of the statement shall +be such that executing the statement would create a trigger of +precisely the same name and definition as the existing database trigger. +

    No parents

    +

    No children

    +
    H30480 +In an auto-vacuum database, all pages that occur before the page +number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page. +

    No parents

    +

    No children

    +
    H30490 +In an auto-vacuum database, no B-Tree root pages may occur +on or after the page number stored in the auto-vacuum last root-page field +of the database file header (see H30140) must be either B-Tree root +pages, pointer-map pages or the locking page. +

    No parents

    +

    No children

    +
    H30500 +As well as the schema table, a well-formed database file +contains N table B-Tree structures, where N is the +number of non-virtual tables in the logical database, excluding the +sqlite_master table but including sqlite_sequence and other system +tables. +

    No parents

    +

    No children

    +
    H30510 +A well-formed database file contains N table B-Tree structures, +where N is the number of indexes in the logical database, +including indexes created by UNIQUE or PRIMARY KEY clauses in the +declaration of SQL tables. +

    No parents

    +

    No children

    +
    H30520 +A 64-bit signed integer value stored in variable length integer +format consumes from 1 to 9 bytes of space. +

    No parents

    +

    No children

    +
    H30530 +The most significant bit of all bytes except the last in a serialized +variable length integer is always set. Unless the serialized +form consumes the maximum 9 bytes available, then the most significant +bit of the final byte of the representation is always cleared. +

    No parents

    +

    No children

    +
    H30540 +The eight least significant bytes of the 64-bit twos-compliment +representation of a value stored in a 9 byte variable length +integer are stored in the final byte (byte offset 8) of the +serialized variable length integer. The other 56 bits are +stored in the 7 least significant bits of each of the first 8 bytes +of the serialized variable length integer, in order from +most significant to least significant. +

    No parents

    +

    No children

    +
    H30550 +A variable length integer that consumes less than 9 bytes of +space contains a value represented as an N-bit unsigned +integer, where N is equal to the number of bytes consumed by +the serial representation (between 1 and 8) multiplied by 7. The +N bits are stored in the 7 least significant bits of each +byte of the serial representation, from most to least significant. +

    No parents

    +

    No children

    +
    H30560 +A database record consists of a database record header, +followed by database record data. The first part of the +database record header is a variable length integer +containing the total size (including itself) of the header in bytes. +

    No parents

    +

    No children

    +
    H30570 +Following the length field, the remainder of the database record +header is populated with N variable length integer +fields, where N is the number of database values stored in +the record. +

    No parents

    +

    No children

    +
    H30580 +Following the database record header, the database record +data is made up of N variable length blobs of data, where +N is again the number of database values stored in the record. +The n blob contains the data for the nth value in +the database record. The size and format of each blob of data is +encoded in the corresponding variable length integer field +in the database record header. +

    No parents

    +

    No children

    +
    H30590 +A value of 0 stored within the database record header indicates +that the corresponding database value is an SQL NULL. In this case +the blob of data in the data area is 0 bytes in size. +

    No parents

    +

    No children

    +
    H30600 +A value of 1 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 1-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30610 +A value of 2 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 2-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30620 +A value of 3 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 3-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30630 +A value of 4 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 4-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30640 +A value of 5 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 6-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30650 +A value of 6 stored within the database record header indicates +that the corresponding database value is an SQL integer. In this case +the blob of data contains the integer value, formatted as a 8-byte +big-endian signed integer. +

    No parents

    +

    No children

    +
    H30660 +A value of 7 stored within the database record header indicates +that the corresponding database value is an SQL real (floating +point number). In this case the blob of data contains an 8-byte +IEEE floating point number, stored in big-endian byte order. +

    No parents

    +

    No children

    +
    H30670 +A value of 8 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 0. +In this case the blob of data in the data area is 0 bytes in size. +

    No parents

    +

    No children

    +
    H30680 +A value of 9 stored within the database record header indicates +that the corresponding database value is an SQL integer, value 1. +In this case the blob of data in the data area is 0 bytes in size. +

    No parents

    +

    No children

    +
    H30690 +An even value greater than or equal to 12 stored within the +database record header indicates that the corresponding +database value is an SQL blob field. The blob of data contains the +value data. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header. +

    No parents

    +

    No children

    +
    H30700 +An odd value greater than or equal to 13 stored within the +database record header indicates that the corresponding +database value is an SQL text field. The blob of data contains the +value text stored using the database encoding, with no +nul-terminator. The blob of data is exactly (n-12)/2 bytes +in size, where n is the integer value stored in the +database record header. +

    No parents

    +

    No children

    +
    H30710 +In a well-formed database file, if the values 8 or 9 appear within +any database record header within the database, then the +schema-layer file format (stored at byte offset 44 of the +database file header) must be set to 4. +

    No parents

    +

    No children

    +
    H30720 +In a well-formed database file, the values 10 and 11, and all +negative values may not appear within any database record header +in the database. +

    No parents

    +

    No children

    +
    H30730 +The pages in an index B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth. +

    No parents

    +

    No children

    +
    H30740 +Each leaf node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a database record. +

    No parents

    +

    No children

    +
    H30750 +Each internal node page in an index B-Tree contains one or more +B-Tree cells, where each cell contains a child page number, C, +and a database record R. All database records stored within +the sub-tree headed by page C are smaller than record R, +according to the index sort order (see below). Additionally, unless +R is the smallest database record stored on the internal node +page, all integer keys within the sub-tree headed by C are +greater than R-1, where R-1 is the +largest database record on the internal node page that is smaller +than R. +

    No parents

    +

    No children

    +
    H30760 +As well as child page numbers associated with B-Tree cells, each +internal node page in an index B-Tree contains the page number +of an extra child page, the right-child page. All database +records stored in all B-Tree cells within the sub-tree headed by the +right-child page are greater than all database records +stored within B-Tree cells on the internal node page. +

    No parents

    +

    No children

    +
    H30770 +In a well-formed database, each index B-Tree contains a single entry +for each row in the indexed logical database table. +

    No parents

    +

    No children

    +
    H30780 +Each database record (key) stored by an index B-Tree in a +well-formed database contains the same number of values, the number +of indexed columns plus one. +

    No parents

    +

    No children

    +
    H30790 +The final value in each database record (key) stored by an +index B-Tree in a well-formed database contains the rowid (an integer +value) of the corresponding logical database row. +

    No parents

    +

    No children

    +
    H30800 +The first N values in each database record (key) +stored in an index B-Tree where N is the number of indexed +columns, contain the values of the indexed columns from the +corresponding logical database row, in the order specified for the +index. +

    No parents

    +

    No children

    +
    H30810 +The b-tree page flags field (the first byte) of each database +page used as an internal node of an index B-Tree structure is set to +0x02. +

    No parents

    +

    No children

    +
    H30820 +The b-tree page flags field (the first byte) of each database +page used as a leaf node of an index B-Tree structure is set to 0x0A. +

    No parents

    +

    No children

    +
    H30830 +The first byte of each database page used as a B-Tree page contains +the b-tree page flags field. On page 1, the b-tree page +flags field is stored directly after the 100 byte file header +at byte offset 100. +

    No parents

    +

    No children

    +
    H30840 +The number of B-Tree cells stored on a B-Tree page is stored as a +2-byte big-endian integer starting at byte offset 3 of the B-Tree +page. On page 1, this field is stored at byte offset 103. +

    No parents

    +

    No children

    +
    H30850 +The 2-byte big-endian integer starting at byte offset 5 of each +B-Tree page contains the byte-offset from the start of the page +to the start of the cell content area, which consumes all space +from this offset to the end of the usable region of the page. +On page 1, this field is stored at byte offset 105. All B-Tree +cells on the page are stored within the cell-content area. +

    No parents

    +

    No children

    +
    H30860 +On each page used as an internal node a of B-Tree structures, the +page number of the rightmost child node in the B-Tree structure is +stored as a 4-byte big-endian unsigned integer beginning at byte +offset 8 of the database page, or byte offset 108 on page 1. +

    No parents

    +

    No children

    +
    H30870 +Immediately following the page header on each B-Tree page is the +cell offset array, consisting of N 2-byte big-endian +unsigned integers, where N is the number of cells stored +on the B-Tree page (H30840). On an internal node B-Tree page, +the cell offset array begins at byte offset 12, or on a leaf +page, byte offset 8. For the B-Tree node on page 1, these +offsets are 112 and 108, respectively. +

    No parents

    +

    No children

    +
    H30880 +The cell offset array and the cell content area (H30850) +may not overlap. +

    No parents

    +

    No children

    +
    H30890 +Each value stored in the cell offset array must be greater +than or equal to the offset to the cell content area (H30850), +and less than the database page size. +

    No parents

    +

    No children

    +
    H30900 +The N values stored within the cell offset array are the +byte offsets from the start of the B-Tree page to the beginning of +each of the N cells stored on the page. +

    No parents

    +

    No children

    +
    H30910 +No two B-Tree cells may overlap. +

    No parents

    +

    No children

    +
    H30920 +Within the cell content area, all blocks of contiguous +free-space (space not used by B-Tree cells) greater than 3 bytes in +size are linked together into a linked list, the free block list. +Such blocks of free space are known as free blocks. +

    No parents

    +

    No children

    +
    H30930 +The first two bytes of each free block contain the offset +of the next free block in the free block list formatted +as a 2-byte big-endian integer, relative to the start of the database +page. If there is no next free block, then the first two +bytes are set to 0x00. +

    No parents

    +

    No children

    +
    H30940 +The second two bytes (byte offsets 2 and 3) of each free block +contain the total size of the free block, formatted as a 2-byte +big-endian integer. +

    No parents

    +

    No children

    +
    H30950 +On all B-Tree pages, the offset of the first free block in the +free block list, relative to the start of the database page, +is stored as a 2-byte big-endian integer starting at byte offset +1 of the database page. If there is no first free block +(because the free block list is empty), then the two bytes +at offsets 1 and 2 of the database page are set to 0x00. On page 1, +this field is stored at byte offset 101 of the page. +

    No parents

    +

    No children

    +
    H30960 +Within the cell-content area, all blocks of contiguous free-space +(space not used by B-Tree cells) less than or equal to 3 bytes in +size are known as fragments. The total size of all +fragments on a B-Tree page is stored as a 1-byte unsigned +integer at byte offset 7 of the database page. On page 1, this +field is stored at byte offset 107. +

    No parents

    +

    No children

    +
    H30970 +Each B-Tree cell belonging to an internal node page of an index +B-Tree consists of a 4-byte big-endian unsigned integer, the +child page number, followed by a variable length integer +field, followed by a database record. The +variable length integer field contains the length of the +database record in bytes. +

    No parents

    +

    No children

    +
    H30980 +Each B-Tree cell belonging to an leaf page of an index B-Tree +consists of a variable length integer field, followed by +a database record. The variable length integer field +contains the length of the database record in bytes. +

    No parents

    +

    No children

    +
    H30990 +If the database record stored in an index B-Tree page is +sufficiently small, then the entire cell is stored within the +index B-Tree page. Sufficiently small is defined as equal to or +less than max-local, where: + +max-local := (usable-size - 12) * 64 / 255 - 23 +

    No parents

    +

    No children

    +
    H31000 +If the database record stored as part of an index B-Tree cell is too +large to be stored entirely within the B-Tree page (as defined by +H30520), then only a prefix of the database record is stored +within the B-Tree page and the remainder stored in an overflow +chain. In this case, the database record prefix is immediately +followed by the page number of the first page of the +overflow chain, formatted as a 4-byte big-endian unsigned +integer. +

    No parents

    +

    No children

    +
    H31010 +When a database record belonging to a table B-Tree cell is +stored partially within an overflow page chain, the size +of the prefix stored within the index B-Tree page is N bytes, +where N is calculated using the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 12) * 64 / 255 - 23 +N := min-local + ((record-size - min-local) % (usable-size - 4)) +if( N > max-local ) N := min-local +

    No parents

    +

    No children

    +
    H31020 +The pages in a table B-Tree structures are arranged into a tree +structure such that all leaf pages are at the same depth. +

    No parents

    +

    No children

    +
    H31030 +Each leaf page in a table B-Tree structure contains one or more +B-Tree cells, where each cell contains a 64-bit signed integer key +value and a database record. +

    No parents

    +

    No children

    +
    H31040 +Each internal node page in a table B-Tree structure contains one or +more B-Tree cells, where each cell contains a 64-bit signed integer +key value, K, and a child page number, C. All integer key +values in all B-Tree cells within the sub-tree headed by page C +are less than or equal to K. Additionally, unless K +is the smallest integer key value stored on the internal node page, +all integer keys within the sub-tree headed by C are greater +than K-1, where K-1 is the largest +integer key on the internal node page that is smaller than K. +

    No parents

    +

    No children

    +
    H31050 +As well as child page numbers associated with B-Tree cells, each +internal node page in a table B-Tree contains the page number +of an extra child page, the right-child page. All key values +in all B-Tree cells within the sub-tree headed by the right-child +page are greater than all key values stored within B-Tree cells +on the internal node page. +

    No parents

    +

    No children

    +
    H31060 +In a well-formed database, each table B-Tree contains a single entry +for each row in the corresponding logical database table. +

    No parents

    +

    No children

    +
    H31070 +The key value (a 64-bit signed integer) for each B-Tree entry is +the same as the value of the rowid field of the corresponding +logical database row. +

    No parents

    +

    No children

    +
    H31080 +The SQL values serialized to make up each database record +stored as ancillary data in a table B-Tree shall be the equal to the +values taken by the N leftmost columns of the corresponding +logical database row, where N is the number of values in the +database record. +

    No parents

    +

    No children

    +
    H31090 +If a logical database table column is declared as an "INTEGER +PRIMARY KEY", then instead of its integer value, an SQL NULL +shall be stored in its place in any database records used as +ancillary data in a table B-Tree. +

    No parents

    +

    No children

    +
    H31100 +If the database schema layer file-format (the value stored +as a 4-byte integer at byte offset 44 of the file header) is 1, +then all database records stored as ancillary data in a table +B-Tree structure have the same number of fields as there are +columns in the corresponding logical database table. +

    No parents

    +

    No children

    +
    H31110 +If the database schema layer file-format value is two or +greater and the rightmost M columns of a row contain SQL NULL +values, then the corresponding record stored as ancillary data in +the table B-Tree has between N-M and N fields, +where N is the number of columns in the logical database +table. +

    No parents

    +

    No children

    +
    H31120 +If the database schema layer file-format value is three or +greater and the rightmost M columns of a row contain their +default values according to the logical table declaration, then the +corresponding record stored as ancillary data in the table B-Tree +may have as few as N-M fields, where N is the +number of columns in the logical database table. +

    No parents

    +

    No children

    +
    H31130 +In a well-formed database file, the first byte of each page used +as an internal node of a table B-Tree structure is set to 0x05. +

    No parents

    +

    No children

    +
    H31140 +In a well-formed database file, the first byte of each page used +as a leaf node of a table B-Tree structure is set to 0x0D. +

    No parents

    +

    No children

    +
    H31150 +B-Tree cells belonging to table B-Tree internal node pages consist +of exactly two fields, a 4-byte big-endian unsigned integer +immediately followed by a variable length integer. These +fields contain the child page number and key value respectively +(see H31030). +

    No parents

    +

    No children

    +
    H31160 +B-Tree cells belonging to table B-Tree leaf node pages consist +of three fields, two variable length integer values +followed by a database record. The size of the database record +in bytes is stored in the first of the two +variable length integer fields. The second of the two +variable length integer fields contains the 64-bit signed +integer key (see H31030). +

    No parents

    +

    No children

    +
    H31170 +If the size of the record stored in a table B-Tree leaf page cell +is less than or equal to (usable page size-35) bytes, then +the entire cell is stored on the B-Tree leaf page. In a well-formed +database, usable page size is the same as the database +page size. +

    No parents

    +

    No children

    +
    H31180 +If a table B-Tree cell is too large to be stored entirely on +a leaf page (as defined by H31170), then a prefix of the cell +is stored on the leaf page, and the remainder stored in an +overflow page chain. In this case the cell prefix +stored on the B-Tree leaf page is immediately followed by a +4-byte big-endian unsigned integer containing the page number +of the first overflow page in the chain. +

    No parents

    +

    No children

    +
    H31190 +When a table B-Tree cell is stored partially in an +overflow page chain, the prefix stored on the B-Tree +leaf page consists of the two variable length integer fields, +followed by the first N bytes of the database record, where +N is determined by the following algorithm: + +min-local := (usable-size - 12) * 32 / 255 - 23 +max-local := (usable-size - 35) +N := min-local + (record-size - min-local) % (usable-size - 4) +if( N > max-local ) N := min-local + +

    No parents

    +

    No children

    +
    H31200 +A single overflow page may store up to available-space +bytes of database record data, where available-space is equal +to (usable-size - 4). +

    No parents

    +

    No children

    +
    H31210 +When a database record is too large to store within a B-Tree page +(see H31170 and H31000), a prefix of the record is stored within +the B-Tree page and the remainder stored across N overflow +pages. In this case N is the minimum number of pages required +to store the portion of the record not stored on the B-Tree page, +given the maximum payload per overflow page defined by H31200. +

    No parents

    +

    No children

    +
    H31220 +The list of overflow pages used to store a single database record +are linked together in a singly linked list known as an +overflow chain. The first four bytes of each page except the +last in an overflow chain are used to store the page number +of the next page in the linked list, formatted as an unsigned +big-endian integer. The first four bytes of the last page in an +overflow chain are set to 0x00. +

    No parents

    +

    No children

    +
    H31230 +Each overflow page except the last in an overflow chain +contains N bytes of record data starting at byte offset 4 of +the page, where N is the maximum payload per overflow page, +as defined by H31200. The final page in an overflow chain +contains the remaining data, also starting at byte offset 4. +

    No parents

    +

    No children

    +
    H31240 +All free pages in a well-formed database file are part of +the database free page list. +

    No parents

    +

    No children

    +
    H31250 +Each free page is either a free list trunk page or a +free list leaf page. +

    No parents

    +

    No children

    +
    H31260 +All free list trunk pages are linked together into a singly +linked list. The first 4 bytes of each page in the linked list +contains the page number of the next page in the list, formatted +as an unsigned big-endian integer. The first 4 bytes of the last +page in the linked list are set to 0x00. +

    No parents

    +

    No children

    +
    H31270 +The second 4 bytes of each free list trunk page contains +the number of free list leaf page numbers stored on the free list +trunk page, formatted as an unsigned big-endian integer. +

    No parents

    +

    No children

    +
    H31280 +Beginning at byte offset 8 of each free list trunk page are +N page numbers, each formatted as a 4-byte unsigned big-endian +integers, where N is the value described in requirement H31270. +

    No parents

    +

    No children

    +
    H31290 +All page numbers stored on all free list trunk pages refer to +database pages that are free list leaves. +

    No parents

    +

    No children

    +
    H31300 +The page number of each free list leaf page in a well-formed +database file appears exactly once within the set of pages numbers +stored on free list trunk pages. +

    No parents

    +

    No children

    +
    H31310 +The total number of pages in the free list, including all free list +trunk and free list leaf pages, is stored as a 4-byte unsigned +big-endian integer at offset 36 of the database file header. +

    No parents

    +

    No children

    +
    H31320 +The page number of the first page in the linked list of free list +trunk pages is stored as a 4-byte big-endian unsigned integer at +offset 32 of the database file header. If there are no free list +trunk pages in the database file, then the value stored at +offset 32 of the database file header is 0. +

    No parents

    +

    No children

    +
    H31330 +Non auto-vacuum databases do not contain pointer map pages. +

    No parents

    +

    No children

    +
    H31340 +In an auto-vacuum database file, every (num-entries + 1)th +page beginning with page 2 is designated a pointer-map page, where +num-entries is calculated as: + +num-entries := database-usable-page-size / 5 + +

    No parents

    +

    No children

    +
    H31350 +In an auto-vacuum database file, each pointer-map page contains +a pointer map entry for each of the num-entries (defined by +H31340) pages that follow it, if they exist. +

    No parents

    +

    No children

    +
    H31360 +Each pointer-map page entry consists of a 1-byte page type and a +4-byte page parent number, 5 bytes in total. +

    No parents

    +

    No children

    +
    H31370 +Pointer-map entries are packed into the pointer-map page in order, +starting at offset 0. The entry associated with the database +page that immediately follows the pointer-map page is located at +offset 0. The entry for the following page at offset 5 etc. +

    No parents

    +

    No children

    +
    H31380 +For each page except page 1 in an auto-vacuum database file that is +the root page of a B-Tree structure, the page type of the +corresponding pointer-map entry is set to the value 0x01 and the +parent page number is zero. +

    No parents

    +

    No children

    +
    H31390 +For each page that is a part of an auto-vacuum database file free-list, +the page type of the corresponding pointer-map entry is set to the +value 0x02 and the parent page number is zero. +

    No parents

    +

    No children

    +
    H31400 +For each page in a well-formed auto-vacuum database that is the first +page in an overflow chain, the page type of the corresponding +pointer-map entry is set to 0x03 and the parent page number field +is set to the page number of the B-Tree page that contains the start +of the B-Tree cell stored in the overflow-chain. +

    No parents

    +

    No children

    +
    H31410 +For each page that is the second or a subsequent page in an overflow +chain, the page type of the corresponding pointer-map entry is set to +0x04 and the parent page number field is set to the page number of the +preceding page in the overflow chain. +

    No parents

    +

    No children

    +
    H31420 +For each page that is not a root page but is a part of a B-Tree tree +structure (not part of an overflow chain), the page type of the +corresponding pointer-map entry is set to the value 0x05 and the parent +page number field is set to the page number of the parent node in the +B-Tree structure. +

    No parents

    +

    No children

    +
    H32000 +If a journal file contains a well-formed master-journal pointer and the +named master-journal file does not exist then the journal file shall be +considered invalid. +

    No parents

    +

    No children

    +
    H32010 +If the first 28 bytes of a journal file do not contain a well-formed +journal header, then the journal file shall be considered +invalid. +

    No parents

    +

    No children

    +
    H32020 +If the journal file exists within the file-system and neither H32000 +, H32010 nor H33080 apply, then the journal file shall be considered valid. +

    No parents

    +

    No children

    +
    H32030 +If there exists a valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 4-byte big-endian unsigned integer at byte +offset 24 of the journal file. +

    No parents

    +

    No children

    +
    H32040 +If there exists a valid journal file in the file-system, then the +number of pages in the database image shall be the value stored as +a 4-byte big-endian unsigned integer at byte offset 24 of the +journal file. +

    No parents

    +

    No children

    +
    H32050 +If there is no valid journal file in the file-system, then the +database page-size in bytes used to interpret the database image +shall be the value stored as a 2-byte big-endian unsigned integer at byte +offset 16 of the database file. +

    No parents

    +

    No children

    +
    H32060 +If there is no valid journal file in the file-system, then the +number of pages in the database image shall be calculated by dividing +the size of the database file in bytes by the database page-size. +

    No parents

    +

    No children

    +
    H32070 +If there exists a valid journal file in the file-system, then the +contents of each page of the database image for which there is a valid +journal record in the journal file shall be read from the +corresponding journal record. +

    No parents

    +

    No children

    +
    H32080 +The contents of all database image pages for which there is no valid +journal record shall be read from the database file. +

    No parents

    +

    No children

    +
    H32090 +A buffer of 28 bytes shall be considered a well-formed journal +header if it is not excluded by requirements H32180, H32190 or H32200. +

    No parents

    +

    No children

    +
    H32180 +A buffer of 28 bytes shall only be considered a well-formed journal +header if the first eight bytes of the buffer contain the values 0xd9, +0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively. +

    No parents

    +

    No children

    +
    H32190 +A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the sector size field (the 4-byte big-endian +unsigned integer at offset 20 of the buffer) contains a value that +is an integer power of two greater than 512. +

    No parents

    +

    No children

    +
    H32200 +A buffer of 28 bytes shall only be considered a well-formed journal +header if the value stored in the page size field (the 4-byte big-endian +unsigned integer at offset 24 of the buffer) contains a value that +is an integer power of two greater than 512. +

    No parents

    +

    No children

    +
    H32100 +A buffer of (8 + page size) bytes shall be considered a well-formed journal +record if it is not excluded by requirements H32110 or H32120. +

    No parents

    +

    No children

    +
    H32110 +A journal record shall only be considered to be well-formed if the page number +field contains a value other than zero and the locking-page number, calculated +using the page size found in the first journal header of the journal file that +contains the journal record. +

    No parents

    +

    No children

    +
    H32120 +A journal record shall only be considered to be well-formed if the checksum +field contains a value equal to the sum of the value stored in the +checksum-initializer field of the journal header that precedes the record +and the value stored in every 200th byte of the page data field, interpreted +as an 8-bit unsigned integer), starting with byte offset (page-size % 200) and +ending with the byte at byte offset (page-size - 200). +

    No parents

    +

    No children

    +
    H32130 +A buffer shall be considered to contain a well-formed master journal pointer +record if it is not excluded from this category by requirements H32140, +H32150, H32160 or H32170. +

    No parents

    +

    No children

    +
    H32140 +A buffer shall only be considered to be a well-formed master journal pointer +if the final eight bytes of the buffer contain the values 0xd9, 0xd5, 0x05, +0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively. +

    No parents

    +

    No children

    +
    H32150 +A buffer shall only be considered to be a well-formed master journal pointer +if the size of the buffer in bytes is equal to the value stored as a 4-byte +big-endian unsigned integer starting 16 bytes before the end of the buffer. +

    No parents

    +

    No children

    +
    H32160 +A buffer shall only be considered to be a well-formed master journal pointer +if the first four bytes of the buffer, interpreted as a big-endian unsigned +integer, contain the page number of the locking page (the value +(1 + 230 / page-size), where page-size is the value stored in +the page-size field of the first journal header of the journal file). +

    No parents

    +

    No children

    +
    H32170 +A buffer shall only be considered to be a well-formed master journal pointer +if the value stored as a 4-byte big-endian integer starting 12 bytes before +the end of the buffer is equal to the sum of all bytes, each interpreted +as an 8-bit unsigned integer, starting at offset 4 of the buffer and continuing +until offset (buffer-size - 16) (the 17th last byte of the buffer). +

    No parents

    +

    No children

    +
    H32210 +A buffer shall be considered to contain a well-formed journal section +if it is not excluded from this category by requirements H32220, H32230 or +H32240. +

    No parents

    +

    No children

    +
    H32220 +A buffer shall only be considered to contain a well-formed journal section +if the first 28 bytes of it contain a well-formed journal header. +

    No parents

    +

    No children

    +
    H32230 +A buffer shall only be considered to contain a well-formed journal section +if, beginning at byte offset sector-size, it contains a sequence of +record-count well-formed journal records. In this case sector-size and +record-count are the integer values stored in the sector size and record +count fields of the journal section's journal header. +

    No parents

    +

    No children

    +
    H32240 +A buffer shall only be considered to contain a well-formed journal section +if it is an integer multiple of sector-size bytes in size, where sector-size +is the value stored in the sector size field of the journal section's journal +header. +

    No parents

    +

    No children

    +
    H32250 +A journal record found within a valid journal file shall be considered a valid +journal record if it is not excluded from this category by requirement H32260, +H32270 or H32280. +

    No parents

    +

    No children

    +
    H32260 +A journal record shall only be considered a valid journal record if it and any +other journal records that occur before it within the same journal section are +well-formed. +

    No parents

    +

    No children

    +
    H32270 +A journal record shall only be considered a valid journal record if the journal +section to which it belongs begins with a well-formed journal header. +

    No parents

    +

    No children

    +
    H32280 +A journal record shall only be considered a valid journal record if all journal +sections that occur before the journal section containing the journal record +are well-formed journal sections. +

    No parents

    +

    No children

    +
    H32290 +Two database images shall be considered to be equivalent if they (a) have the +same page size, (b) contain the same number of pages and (c) the content of +each page of the first database image that is not a free-list leaf page is +the same as the content of the corresponding page in the second database image. +

    No parents

    +

    No children

    +
    H32300 +If, while writing to an SQLite database file-system representation in +order to replace database image A with database image B, a failure that +should be handled gracefully occurs, then following recovery the database +file-system representation shall contain a database image equivalent to +either A or B. +

    No parents

    +

    No children

    +
    H32320 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before the size of +the database file is modified, the first 28 bytes of the journal file contain a +stable valid journal header with the page-size and page-count fields set to +values corresponding to the original database image. +

    No parents

    +

    No children

    +
    H32330 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that the first 28 bytes +of the journal file does not become unstable at any point after the size of the +database file is modified until the journal file is invalidated to commit the +transaction. +

    No parents

    +

    No children

    +
    H32340 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before any part of +the database file that contained a page of the original database image that was +not a free-list leaf page is overwritten or made unstable the journal file +contains a valid and stable journal record containing the original page data. +

    No parents

    +

    No children

    +
    H32350 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that after any part of +the database file that contained a page of the original database image that was +not a free-list leaf page has been overwritten or made unstable the corresponding +journal record (see H32340) is not modified or made unstable. +

    No parents

    +

    No children

    +
    H32360 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that before the database +file is truncated, the journal file contains stable valid journal records +corresponding to all pages of the original database image that were part of the +region being discarded by the truncate operation and were not free-list leaf +pages. +

    No parents

    +

    No children

    +
    H32370 +When using the rollback-journal method to modify the file-system representation +of a database image, the database writer shall ensure that after the database +file has been truncated the journal records corresponding to pages from the +original database image that were part of the truncated region and were not +free-list leaf pages are not modified or made unstable. +

    No parents

    +

    No children

    +
    H33000 +Before reading from a database file , a database reader shall establish a +SHARED or greater lock on the database file-system representation. +

    No parents

    +

    No children

    +
    H33010 +Before writing to a database file, a database writer shall establish +an EXCLUSIVE lock on the database file-system representation. +

    No parents

    +

    No children

    +
    H33020 +Before writing to a journal file, a database writer shall establish +a RESERVED, PENDING or EXCLUSIVE lock on the database file-system +representation. +

    No parents

    +

    No children

    +
    H33030 +Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that the database file contains a valid +database image. +

    No parents

    +

    No children

    +
    H33060 +Before establishing a RESERVED or PENDING lock on a database file, a +database writer shall ensure that any journal file that may be present +is not a valid journal file. +

    No parents

    +

    No children

    +
    H33080 +If another database client holds either a RESERVED or PENDING lock on the +database file-system representation, then any journal file that exists within +the file system shall be considered invalid. +

    No parents

    +

    No children

    +
    H33040 +A database writer shall increment the value of the database header change +counter field, a 4-byte big-endian unsigned integer field stored at byte offset 24 +of the database header, as part of the first database image modification +that it performs after obtaining an EXCLUSIVE lock. +

    No parents

    +

    No children

    +
    H33050 +A database writer shall increment the value of the database schema version +field, a 4-byte big-endian unsigned integer field stored at byte offset 40 +of the database header, as part of the first database image modification that +includes a schema change that it performs after obtaining an EXCLUSIVE lock. +

    No parents

    +

    No children

    +
    H33070 +If a database writer is required by either H33050 or H33040 to increment a +database header field, and that header field already contains the maximum +value possible (0xFFFFFFFF, or 4294967295 for 32-bit unsigned integer +fields), "incrementing" the field shall be interpreted to mean setting it to +zero. +

    No parents

    +

    No children

    +
    H35010 +Except for the read operation required by H35070 and those reads made +as part of opening a read-only transaction, SQLite shall ensure that +a database connection has an open read-only or read/write +transaction when any data is read from the database file. +

    No parents

    +

    No children

    +
    H35020 +Aside from those read operations described by H35070 and H21XXX, SQLite +shall read data from the database file in aligned blocks of +page-size bytes, where page-size is the database page size +used by the database file. +

    No parents

    +

    No children

    +
    H35030 +While opening a read-only transaction, after successfully +obtaining a shared lock on the database file, SQLite shall +attempt to detect and roll back a hot journal file associated +with the same database file. +

    No parents

    +

    No children

    +
    H35040 +Assuming no errors have occured, then after attempting to detect and +roll back a hot journal file, if the page cache contains +any entries associated with the current database connection, +then SQLite shall validate the contents of the page cache by +testing the file change counter. This procedure is known as +cache validiation. +

    No parents

    +

    No children

    +
    H35050 +If the cache validiate procedure prescribed by H35040 is required and +does not prove that the page cache entries associated with the +current database connection are valid, then SQLite shall discard +all entries associated with the current database connection from +the page cache. +

    No parents

    +

    No children

    +
    H35060 +When a new database connection is required, SQLite shall attempt +to open a file-handle on the database file. If the attempt fails, then +no new database connection is created and an error returned. +

    No parents

    +

    No children

    +
    H35070 +When a new database connection is required, after opening the +new file-handle, SQLite shall attempt to read the first 100 bytes +of the database file. If the attempt fails for any other reason than +that the opened file is less than 100 bytes in size, then +the file-handle is closed, no new database connection is created +and an error returned instead. +

    No parents

    +

    No children

    +
    H35080 +If the database file header is successfully read from a newly +opened database file, the connections expected page-size shall +be set to the value stored in the page-size field of the +database header. +

    No parents

    +

    No children

    +
    H35090 +If the database file header cannot be read from a newly opened +database file (because the file is less than 100 bytes in size), the +connections expected page-size shall be set to the compile time +value of the SQLITE_DEFAULT_PAGESIZE option. +

    No parents

    +

    No children

    +
    H35100 +When required to open a read-only transaction using a +database connection, SQLite shall first attempt to obtain +a shared-lock on the file-handle open on the database file. +

    No parents

    +

    No children

    +
    H35110 +If, while opening a read-only transaction, SQLite fails to obtain +the shared-lock on the database file, then the process is +abandoned, no transaction is opened and an error returned to the user. +

    No parents

    +

    No children

    +
    H35120 +If, while opening a read-only transaction, SQLite encounters +an error while attempting to detect or roll back a hot journal +file, then the shared-lock on the database file is released, +no transaction is opened and an error returned to the user. +

    No parents

    +

    No children

    +
    H35130 +When required to end a read-only transaction, SQLite shall +relinquish the shared lock held on the database file by +calling the xUnlock() method of the file-handle. +

    No parents

    +

    No children

    +
    H35140 +When required to attempt to detect a hot-journal file, SQLite +shall first use the xAccess() method of the VFS layer to check if a +journal file exists in the file-system. +

    No parents

    +

    No children

    +
    H35150 +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file does +not exist, then SQLite shall conclude that there is no hot-journal +file in the file system and therefore that no hot journal +rollback is required. +

    No parents

    +

    No children

    +
    H35160 +When required to attempt to detect a hot-journal file, if the +call to xAccess() required by H35140 indicates that a journal file +is present, then the xCheckReservedLock() method of the database file +file-handle is invoked to determine whether or not some other +process is holding a reserved or greater lock on the database +file. +

    No parents

    +

    No children

    +
    H35170 +If the call to xCheckReservedLock() required by H35160 indicates that +some other database connection is holding a reserved +or greater lock on the database file, then SQLite shall conclude that +there is no hot journal file. In this case the attempt to detect +a hot journal file is concluded. +

    No parents

    +

    No children

    +
    H35180 +When a file-handle open on a database file is unlocked, if the +page cache contains one or more entries belonging to the +associated database connection, SQLite shall store the value +of the file change counter internally. +

    No parents

    +

    No children

    +
    H35190 +When required to perform cache validation as part of opening +a read transaction, SQLite shall read a 16 byte block +starting at byte offset 24 of the database file using the xRead() +method of the database connections file handle. +

    No parents

    +

    No children

    +
    H35200 +While performing cache validation, after loading the 16 byte +block as required by H35190, SQLite shall compare the 32-bit big-endian +integer stored in the first 4 bytes of the block to the most +recently stored value of the file change counter (see H35180). +If the values are not the same, then SQLite shall conclude that +the contents of the cache are invalid. +

    No parents

    +

    No children

    +
    H35210 +During the conclusion of a read transaction, before unlocking +the database file, SQLite shall set the connections +expected page size to the current database page-size. +

    No parents

    +

    No children

    +
    H35220 +As part of opening a new read transaction, immediately after +performing cache validation, if there is no data for database +page 1 in the page cache, SQLite shall read N bytes from +the start of the database file using the xRead() method of the +connections file handle, where N is the connections current +expected page size value. +

    No parents

    +

    No children

    +
    H35230 +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is not the same as the +connections current expected page size, then the +expected page size is set to this value, the database file is +unlocked and the entire procedure to open a read transaction +is repeated. +

    No parents

    +

    No children

    +
    H35240 +If page 1 data is read as required by H35230, then the value of the +page-size field that appears in the database file header that +consumes the first 100 bytes of the read block is the same as the +connections current expected page size, then the block of data +read is stored in the page cache as page 1. +

    No parents

    +

    No children

    +
    H35270 +When required to journal a database page, SQLite shall first +append the page number of the page being journalled to the +journal file, formatted as a 4-byte big-endian unsigned integer, +using a single call to the xWrite method of the file-handle opened +on the journal file. +

    No parents

    +

    No children

    +
    H35280 +When required to journal a database page, if the attempt to +append the page number to the journal file is successful, +then the current page data (page-size bytes) shall be appended +to the journal file, using a single call to the xWrite method of the +file-handle opened on the journal file. +

    No parents

    +

    No children

    +
    H35290 +When required to journal a database page, if the attempt to +append the current page data to the journal file is successful, +then SQLite shall append a 4-byte big-endian integer checksum value +to the to the journal file, using a single call to the xWrite method +of the file-handle opened on the journal file. +

    No parents

    +

    No children

    +
    H35300 +The checksum value written to the journal file by the write +required by H35290 shall be equal to the sum of the checksum +initializer field stored in the journal header (H35700) and +every 200th byte of the page data, beginning with the +(page-size % 200)th byte. +

    No parents

    +

    No children

    +
    H35350 +When required to open a write transaction on the database, +SQLite shall first open a read transaction, if the database +connection in question has not already opened one. +

    No parents

    +

    No children

    +
    H35360 +When required to open a write transaction on the database, after +ensuring a read transaction has already been opened, SQLite +shall obtain a reserved lock on the database file by calling +the xLock method of the file-handle open on the database file. +

    No parents

    +

    No children

    +
    H35370 +When required to open a write transaction on the database, after +obtaining a reserved lock on the database file, SQLite shall +open a read/write file-handle on the corresponding journal file. +

    No parents

    +

    No children

    +
    H35380 +When required to open a write transaction on the database, after +opening a file-handle on the journal file, SQLite shall append +a journal header to the (currently empty) journal file. +

    No parents

    +

    No children

    +
    H35400 +When a database connection is closed, SQLite shall close the +associated file handle at the VFS level. +

    No parents

    +

    No children

    +
    H35420 +SQLite shall ensure that a database connection has an open +read-only or read/write transaction before using data stored in the page +cache to satisfy user queries. +

    No parents

    +

    No children

    +
    H35430 +When a database connection is closed, all associated page +cache entries shall be discarded. +

    No parents

    +

    No children

    +
    H35440 +If while attempting to detect a hot-journal file the call to +xCheckReservedLock() indicates that no process holds a reserved +or greater lock on the database file, then SQLite shall open +a file handle on the potentially hot journal file using the VFS xOpen() +method. +

    No parents

    +

    No children

    +
    H35450 +After successfully opening a file-handle on a potentially hot journal +file, SQLite shall query the file for its size in bytes using the +xFileSize() method of the open file handle. +

    No parents

    +

    No children

    +
    H35460 +If the size of a potentially hot journal file is revealed to be zero +bytes by a query required by H35450, then SQLite shall close the +file handle opened on the journal file and delete the journal file using +a call to the VFS xDelete() method. In this case SQLite shall conclude +that there is no hot journal file. +

    No parents

    +

    No children

    +
    H35470 +If the size of a potentially hot journal file is revealed to be greater +than zero bytes by a query required by H35450, then SQLite shall attempt +to upgrade the shared lock held by the database connection +on the database file directly to an exclusive lock. +

    No parents

    +

    No children

    +
    H35480 +If an attempt to upgrade to an exclusive lock prescribed by +H35470 fails for any reason, then SQLite shall release all locks held by +the database connection and close the file handle opened on the +journal file. The attempt to open a read-only transaction +shall be deemed to have failed and an error returned to the user. +

    No parents

    +

    No children

    +
    H35490 +If, as part of the hot journal file detection process, the +attempt to upgrade to an exclusive lock mandated by H35470 is +successful, then SQLite shall query the file-system using the xAccess() +method of the VFS implementation to test whether or not the journal +file is still present in the file-system. +

    No parents

    +

    No children

    +
    H35500 +If the xAccess() query required by H35490 reveals that the journal +file is still present in the file system, then SQLite shall conclude +that the journal file is a hot journal file that needs to +be rolled back. SQLite shall immediately begin hot journal +rollback. +

    No parents

    +

    No children

    +
    H35510 +If the call to xAccess() required by H35140 fails (due to an IO error or +similar), then SQLite shall abandon the attempt to open a read-only +transaction, relinquish the shared lock held on the database +file and return an error to the user. +

    No parents

    +

    No children

    +
    H35520 +If the call to xCheckReservedLock() required by H35160 fails (due to an +IO or other internal VFS error), then SQLite shall abandon the attempt +to open a read-only transaction, relinquish the shared lock +held on the database file and return an error to the user. +

    No parents

    +

    No children

    +
    H35530 +If the call to xOpen() required by H35440 fails (due to an IO or other +internal VFS error), then SQLite shall abandon the attempt to open a +read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. +

    No parents

    +

    No children

    +
    H35540 +If the call to xFileSize() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file, close the file handle opened on the journal file and +return an error to the user. +

    No parents

    +

    No children

    +
    H35550 +If the call to xDelete() required by H35450 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the shared lock held on +the database file and return an error to the user. +

    No parents

    +

    No children

    +
    H35560 +If the call to xAccess() required by H35490 fails (due to an IO or +other internal VFS error), then SQLite shall abandon the attempt to open +a read-only transaction, relinquish the lock held on the +database file, close the file handle opened on the journal file and +return an error to the user. +

    No parents

    +

    No children

    +
    H35570 +If the call to xAccess() required by H35490 reveals that the journal +file is no longer present in the file system, then SQLite shall abandon +the attempt to open a read-only transaction, relinquish the +lock held on the database file, close the file handle opened on the +journal file and return an SQLITE_BUSY error to the user. +

    No parents

    +

    No children

    +
    H35580 +If an attempt to acquire a reserved lock prescribed by +requirement H35360 fails, then SQLite shall deem the attempt to +open a write transaction to have failed and return an error +to the user. +

    No parents

    +

    No children

    +
    H35590 +When required to modify the contents of an existing database page that +existed and was not a free-list leaf page when the write +transaction was opened, SQLite shall journal the page if it has not +already been journalled within the current write transaction. +

    No parents

    +

    No children

    +
    H35600 +When required to modify the contents of an existing database page, +SQLite shall update the cached version of the database page content +stored as part of the page cache entry associated with the page. +

    No parents

    +

    No children

    +
    H35610 +When required to append a new database page to the database file, +SQLite shall create a new page cache entry corresponding to +the new page and insert it into the page cache. The dirty +flag of the new page cache entry shall be set. +

    No parents

    +

    No children

    +
    H35620 +When required to truncate (remove) a database page that existed and was +not a free-list leaf page when the write transaction was +opened from the end of a database file, SQLite shall journal the page if +it has not already been journalled within the current write +transaction. +

    No parents

    +

    No children

    +
    H35630 +When required to truncate a database page from the end of the database +file, SQLite shall discard the associated page cache entry +from the page cache. +

    No parents

    +

    No children

    +
    H35640 +When required to purge a non-writable dirty page from the +page cache, SQLite shall sync the journal file before +proceding with the write operation required by H35670. +

    No parents

    +

    No children

    +
    H35660 +After syncing the journal file as required by H35640, SQLite +shall append a new journal header to the journal file +before proceding with the write operation required by H35670. +

    No parents

    +

    No children

    +
    H35670 +When required to purge a page cache entry that is a +dirty page SQLite shall write the page data into the database +file, using a single call to the xWrite method of the database +connection file handle. +

    No parents

    +

    No children

    +
    H35680 +When required to append a journal header to the journal +file, SQLite shall do so by writing a block of sector-size +bytes using a single call to the xWrite method of the file-handle +open on the journal file. The block of data written shall begin +at the smallest sector-size aligned offset at or following the current +end of the journal file. +

    No parents

    +

    No children

    +
    H35690 +The first 8 bytes of the journal header required to be written +by H35680 shall contain the following values, in order from byte offset 0 +to 7: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63 and 0xd7. +

    No parents

    +

    No children

    +
    H35700 +Bytes 8-11 of the journal header required to be written by +H35680 shall contain 0x00. +

    No parents

    +

    No children

    +
    H35710 +Bytes 12-15 of the journal header required to be written by +H35680 shall contain the number of pages that the database file +contained when the current write-transaction was started, +formatted as a 4-byte big-endian unsigned integer. +

    No parents

    +

    No children

    +
    H35720 +Bytes 16-19 of the journal header required to be written by +H35680 shall contain pseudo-randomly generated values. +

    No parents

    +

    No children

    +
    H35730 +Bytes 20-23 of the journal header required to be written by +H35680 shall contain the sector size used by the VFS layer, +formatted as a 4-byte big-endian unsigned integer. +

    No parents

    +

    No children

    +
    H35740 +Bytes 24-27 of the journal header required to be written by +H35680 shall contain the page size used by the database at +the start of the write transaction, formatted as a 4-byte +big-endian unsigned integer. +

    No parents

    +

    No children

    +
    H35750 +When required to sync the journal file, SQLite shall invoke the +xSync method of the file handle open on the journal file. +

    No parents

    +

    No children

    +
    H35760 +When required to sync the journal file, after invoking the +xSync method as required by H35750, SQLite shall update the record +count of the journal header most recently written to the +journal file. The 4-byte field shall be updated to contain +the number of journal records that have been written to the +journal file since the journal header was written, +formatted as a 4-byte big-endian unsigned integer. +

    No parents

    +

    No children

    +
    H35770 +When required to sync the journal file, after updating the +record count field of a journal header as required by +H35760, SQLite shall invoke the xSync method of the file handle open +on the journal file. +

    No parents

    +

    No children

    +
    H35780 +When required to upgrade to an exclusive lock as part of a write +transaction, SQLite shall first attempt to obtain a pending lock +on the database file if one is not already held by invoking the xLock +method of the file handle opened on the database file. +

    No parents

    +

    No children

    +
    H35790 +When required to upgrade to an exclusive lock as part of a write +transaction, after successfully obtaining a pending lock SQLite +shall attempt to obtain an exclusive lock by invoking the +xLock method of the file handle opened on the database file. +

    No parents

    +

    No children

    +
    H35800 +When required to commit a write-transaction, SQLite shall +modify page 1 to increment the value stored in the change counter +field of the database file header. +

    No parents

    +

    No children

    +
    H35810 +When required to commit a write-transaction, after incrementing +the change counter field, SQLite shall sync the journal +file. +

    No parents

    +

    No children

    +
    H35820 +When required to commit a write-transaction, after syncing +the journal file as required by H35810, if an exclusive lock +on the database file is not already held, SQLite shall attempt to +upgrade to an exclusive lock. +

    No parents

    +

    No children

    +
    H35830 +When required to commit a write-transaction, after syncing +the journal file as required by H35810 and ensuring that an +exclusive lock is held on the database file as required by +H35830, SQLite shall copy the contents of all dirty page +stored in the page cache into the database file using +calls to the xWrite method of the database connection file +handle. Each call to xWrite shall write the contents of a single +dirty page (page-size bytes of data) to the database +file. Dirty pages shall be written in order of page number, +from lowest to highest. +

    No parents

    +

    No children

    +
    H35840 +When required to commit a write-transaction, after copying the +contents of any dirty pages to the database file as required +by H35830, SQLite shall sync the database file by invoking the xSync +method of the database connection file handle. +

    No parents

    +

    No children

    +
    H35850 +When required to commit a write-transaction, after syncing +the database file as required by H35840, SQLite shall close the +file-handle opened on the journal file and delete the +journal file from the file system via a call to the VFS +xDelete method. +

    No parents

    +

    No children

    +
    H35860 +When required to commit a write-transaction, after deleting +the journal file as required by H35850, SQLite shall relinquish +all locks held on the database file by invoking the xUnlock +method of the database connection file handle. +

    No parents

    +

    No children

    +
    H41010 +SQLite shall divide input SQL text into tokens working from left to +right. +

    No parents

    +

    No children

    +
    H41020 +At each step in the SQL tokenization process, SQLite shall extract +the longest possible token from the remaining input text. +

    No parents

    +

    No children

    +
    H41030 +The tokenizer shall pass each non-WHITESPACE token seen on to the +parser in the order in which the tokens are seen. +

    No parents

    +

    No children

    +
    H41040 +When the tokenizer reaches the end of input where the last token sent +to the parser was not a SEMI token, it shall +send a SEMI token to the parser. +

    No parents

    +

    No children

    +
    H41050 +When the tokenizer encounters text that is not a valid token, it shall +cause an error to be returned to the application. +

    No parents

    +

    No children

    +
    H41100 +SQLite shall recognize a sequence of one or more WHITESPACE characters +as a WHITESPACE token. +

    No parents

    +

    No children

    +
    H41110 +SQLite shall recognize as a WHITESPACE token the two-character sequence "--" +(u002d, u002d) followed by any sequence of non-zero characters up through and +including the first u000a character or until end of input. +

    No parents

    +

    No children

    +
    H41120 +SQLite shall recognize as a WHITESPACE token the two-character sequence "/*" +(u002f, u002a) followed by any sequence of zero or more +non-zero characters through with the first "*/" (u002a, u002f) sequence or +until end of input. +

    No parents

    +

    No children

    +
    H41130 +SQLite shall recognize as an ID token +any sequence of characters that begins with +an ALPHABETIC character and continue with zero or more +ALPHANUMERIC characters and/or "$" (u0024) characters and which is +not a keyword token. +

    No parents

    +

    No children

    +
    H41140 +SQLite shall recognize as an ID token +any sequence of non-zero characters that begins with "[" (u005b) and +continuing through the first "]" (u005d) character. +

    No parents

    +

    No children

    +
    H41150 +SQLite shall recognize as an ID token +any sequence of characters +that begins with a double-quote (u0022), is followed by zero or +more non-zero characters and/or pairs of double-quotes (u0022) +and terminates with a double-quote (u0022) that +is not part of a pair. +

    No parents

    +

    No children

    +
    H41160 +SQLite shall recognize as an ID token +any sequence of characters +that begins with a grave accent (u0060), is followed by zero or +more non-zero characters and/or pairs ofgrave accents (u0060) +and terminates with a grave accent (u0022) that +is not part of a pair. +

    No parents

    +

    No children

    +
    H41200 +SQLite shall recognize as a STRING token a sequence of characters +that begins with a single-quote (u0027), is followed by zero or +more non-zero characters and/or pairs of single-quotes (u0027) +and terminates with a single-quote (u0027) that +is not part of a pair. +

    No parents

    +

    No children

    +
    H41210 +SQLite shall recognize as a BLOB token an upper or lower-case "X" +(u0058 or u0078) followed by a single-quote (u0027) followed by +a number of HEXADECIMAL character that is a multiple of two and +terminated by a single-quote (u0027). +

    No parents

    +

    No children

    +
    H41220 +SQLite shall recognize as an INTEGER token any squence of +one or more NUMERIC characters. +

    No parents

    +

    No children

    +
    H41230 +SQLite shall recognize as a FLOAT token a sequence of one +or more NUMERIC characters together with zero or one period +(u002e) and followed by an exponentiation suffix. +

    No parents

    +

    No children

    +
    H41240 +SQLite shall recognize as a FLOAT token a sequence of one +or more NUMERIC characters that includes exactly one period +(u002e) character. +

    No parents

    +

    No children

    +
    H41403 +SQLite shall recognize the 1-character sequenence "-" (u002d) as token MINUS +

    No parents

    +

    No children

    +
    H41406 +SQLite shall recognize the 1-character sequenence "(" (u0028) as token LP +

    No parents

    +

    No children

    +
    H41409 +SQLite shall recognize the 1-character sequenence ")" (u0029) as token RP +

    No parents

    +

    No children

    +
    H41412 +SQLite shall recognize the 1-character sequenence ";" (u003b) as token SEMI +

    No parents

    +

    No children

    +
    H41415 +SQLite shall recognize the 1-character sequenence "+" (u002b) as token PLUS +

    No parents

    +

    No children

    +
    H41418 +SQLite shall recognize the 1-character sequenence "*" (u002a) as token STAR +

    No parents

    +

    No children

    +
    H41421 +SQLite shall recognize the 1-character sequenence "/" (u002f) as token SLASH +

    No parents

    +

    No children

    +
    H41424 +SQLite shall recognize the 1-character sequenence "%" (u0025) as token REM +

    No parents

    +

    No children

    +
    H41427 +SQLite shall recognize the 1-character sequenence "=" (u003d) as token EQ +

    No parents

    +

    No children

    +
    H41430 +SQLite shall recognize the 2-character sequenence "==" (u003d u003d) as token EQ +

    No parents

    +

    No children

    +
    H41433 +SQLite shall recognize the 2-character sequenence "<=" (u003c u003d) as token LE +

    No parents

    +

    No children

    +
    H41436 +SQLite shall recognize the 2-character sequenence "<>" (u003c u003e) as token NE +

    No parents

    +

    No children

    +
    H41439 +SQLite shall recognize the 2-character sequenence "<<" (u003c u003c) as token LSHIFT +

    No parents

    +

    No children

    +
    H41442 +SQLite shall recognize the 1-character sequenence "<" (u003c) as token LT +

    No parents

    +

    No children

    +
    H41445 +SQLite shall recognize the 2-character sequenence ">=" (u003e u003d) as token GE +

    No parents

    +

    No children

    +
    H41448 +SQLite shall recognize the 2-character sequenence ">>" (u003e u003e) as token RSHIFT +

    No parents

    +

    No children

    +
    H41451 +SQLite shall recognize the 1-character sequenence ">" (u003e) as token GT +

    No parents

    +

    No children

    +
    H41454 +SQLite shall recognize the 2-character sequenence "!=" (u0021 u003d) as token NE +

    No parents

    +

    No children

    +
    H41457 +SQLite shall recognize the 1-character sequenence "," (u002c) as token COMMA +

    No parents

    +

    No children

    +
    H41460 +SQLite shall recognize the 1-character sequenence "&" (u0026) as token BITAND +

    No parents

    +

    No children

    +
    H41463 +SQLite shall recognize the 1-character sequenence "~" (u007e) as token BITNOT +

    No parents

    +

    No children

    +
    H41466 +SQLite shall recognize the 1-character sequenence "|" (u007c) as token BITOR +

    No parents

    +

    No children

    +
    H41469 +SQLite shall recognize the 2-character sequenence "||" (u007c u007c) as token CONCAT +

    No parents

    +

    No children

    +
    H41472 +SQLite shall recognize the 1-character sequenence "." (u002e) as token DOT +

    No parents

    +

    No children

    +
    H41503 +SQLite shall recognize the 5-character sequenence "ABORT" in any combination of upper and lower case letters as the keyword token ABORT. +

    No parents

    +

    No children

    +
    H41506 +SQLite shall recognize the 3-character sequenence "ADD" in any combination of upper and lower case letters as the keyword token ADD. +

    No parents

    +

    No children

    +
    H41509 +SQLite shall recognize the 5-character sequenence "AFTER" in any combination of upper and lower case letters as the keyword token AFTER. +

    No parents

    +

    No children

    +
    H41512 +SQLite shall recognize the 3-character sequenence "ALL" in any combination of upper and lower case letters as the keyword token ALL. +

    No parents

    +

    No children

    +
    H41515 +SQLite shall recognize the 5-character sequenence "ALTER" in any combination of upper and lower case letters as the keyword token ALTER. +

    No parents

    +

    No children

    +
    H41518 +SQLite shall recognize the 7-character sequenence "ANALYZE" in any combination of upper and lower case letters as the keyword token ANALYZE. +

    No parents

    +

    No children

    +
    H41521 +SQLite shall recognize the 3-character sequenence "AND" in any combination of upper and lower case letters as the keyword token AND. +

    No parents

    +

    No children

    +
    H41524 +SQLite shall recognize the 2-character sequenence "AS" in any combination of upper and lower case letters as the keyword token AS. +

    No parents

    +

    No children

    +
    H41527 +SQLite shall recognize the 3-character sequenence "ASC" in any combination of upper and lower case letters as the keyword token ASC. +

    No parents

    +

    No children

    +
    H41530 +SQLite shall recognize the 6-character sequenence "ATTACH" in any combination of upper and lower case letters as the keyword token ATTACH. +

    No parents

    +

    No children

    +
    H41533 +SQLite shall recognize the 13-character sequenence "AUTOINCREMENT" in any combination of upper and lower case letters as the keyword token AUTOINCR. +

    No parents

    +

    No children

    +
    H41536 +SQLite shall recognize the 6-character sequenence "BEFORE" in any combination of upper and lower case letters as the keyword token BEFORE. +

    No parents

    +

    No children

    +
    H41539 +SQLite shall recognize the 5-character sequenence "BEGIN" in any combination of upper and lower case letters as the keyword token BEGIN. +

    No parents

    +

    No children

    +
    H41542 +SQLite shall recognize the 7-character sequenence "BETWEEN" in any combination of upper and lower case letters as the keyword token BETWEEN. +

    No parents

    +

    No children

    +
    H41545 +SQLite shall recognize the 2-character sequenence "BY" in any combination of upper and lower case letters as the keyword token BY. +

    No parents

    +

    No children

    +
    H41548 +SQLite shall recognize the 7-character sequenence "CASCADE" in any combination of upper and lower case letters as the keyword token CASCADE. +

    No parents

    +

    No children

    +
    H41551 +SQLite shall recognize the 4-character sequenence "CASE" in any combination of upper and lower case letters as the keyword token CASE. +

    No parents

    +

    No children

    +
    H41554 +SQLite shall recognize the 4-character sequenence "CAST" in any combination of upper and lower case letters as the keyword token CAST. +

    No parents

    +

    No children

    +
    H41557 +SQLite shall recognize the 5-character sequenence "CHECK" in any combination of upper and lower case letters as the keyword token CHECK. +

    No parents

    +

    No children

    +
    H41560 +SQLite shall recognize the 7-character sequenence "COLLATE" in any combination of upper and lower case letters as the keyword token COLLATE. +

    No parents

    +

    No children

    +
    H41563 +SQLite shall recognize the 6-character sequenence "COLUMN" in any combination of upper and lower case letters as the keyword token COLUMNKW. +

    No parents

    +

    No children

    +
    H41566 +SQLite shall recognize the 6-character sequenence "COMMIT" in any combination of upper and lower case letters as the keyword token COMMIT. +

    No parents

    +

    No children

    +
    H41569 +SQLite shall recognize the 8-character sequenence "CONFLICT" in any combination of upper and lower case letters as the keyword token CONFLICT. +

    No parents

    +

    No children

    +
    H41572 +SQLite shall recognize the 10-character sequenence "CONSTRAINT" in any combination of upper and lower case letters as the keyword token CONSTRAINT. +

    No parents

    +

    No children

    +
    H41575 +SQLite shall recognize the 6-character sequenence "CREATE" in any combination of upper and lower case letters as the keyword token CREATE. +

    No parents

    +

    No children

    +
    H41578 +SQLite shall recognize the 5-character sequenence "CROSS" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41581 +SQLite shall recognize the 12-character sequenence "CURRENT_DATE" in any combination of upper and lower case letters as the keyword token CTIME_KW. +

    No parents

    +

    No children

    +
    H41584 +SQLite shall recognize the 12-character sequenence "CURRENT_TIME" in any combination of upper and lower case letters as the keyword token CTIME_KW. +

    No parents

    +

    No children

    +
    H41587 +SQLite shall recognize the 17-character sequenence "CURRENT_TIMESTAMP" in any combination of upper and lower case letters as the keyword token CTIME_KW. +

    No parents

    +

    No children

    +
    H41590 +SQLite shall recognize the 8-character sequenence "DATABASE" in any combination of upper and lower case letters as the keyword token DATABASE. +

    No parents

    +

    No children

    +
    H41593 +SQLite shall recognize the 7-character sequenence "DEFAULT" in any combination of upper and lower case letters as the keyword token DEFAULT. +

    No parents

    +

    No children

    +
    H41596 +SQLite shall recognize the 8-character sequenence "DEFERRED" in any combination of upper and lower case letters as the keyword token DEFERRED. +

    No parents

    +

    No children

    +
    H41599 +SQLite shall recognize the 10-character sequenence "DEFERRABLE" in any combination of upper and lower case letters as the keyword token DEFERRABLE. +

    No parents

    +

    No children

    +
    H41602 +SQLite shall recognize the 6-character sequenence "DELETE" in any combination of upper and lower case letters as the keyword token DELETE. +

    No parents

    +

    No children

    +
    H41605 +SQLite shall recognize the 4-character sequenence "DESC" in any combination of upper and lower case letters as the keyword token DESC. +

    No parents

    +

    No children

    +
    H41608 +SQLite shall recognize the 6-character sequenence "DETACH" in any combination of upper and lower case letters as the keyword token DETACH. +

    No parents

    +

    No children

    +
    H41611 +SQLite shall recognize the 8-character sequenence "DISTINCT" in any combination of upper and lower case letters as the keyword token DISTINCT. +

    No parents

    +

    No children

    +
    H41614 +SQLite shall recognize the 4-character sequenence "DROP" in any combination of upper and lower case letters as the keyword token DROP. +

    No parents

    +

    No children

    +
    H41617 +SQLite shall recognize the 3-character sequenence "END" in any combination of upper and lower case letters as the keyword token END. +

    No parents

    +

    No children

    +
    H41620 +SQLite shall recognize the 4-character sequenence "EACH" in any combination of upper and lower case letters as the keyword token EACH. +

    No parents

    +

    No children

    +
    H41623 +SQLite shall recognize the 4-character sequenence "ELSE" in any combination of upper and lower case letters as the keyword token ELSE. +

    No parents

    +

    No children

    +
    H41626 +SQLite shall recognize the 6-character sequenence "ESCAPE" in any combination of upper and lower case letters as the keyword token ESCAPE. +

    No parents

    +

    No children

    +
    H41629 +SQLite shall recognize the 6-character sequenence "EXCEPT" in any combination of upper and lower case letters as the keyword token EXCEPT. +

    No parents

    +

    No children

    +
    H41632 +SQLite shall recognize the 9-character sequenence "EXCLUSIVE" in any combination of upper and lower case letters as the keyword token EXCLUSIVE. +

    No parents

    +

    No children

    +
    H41635 +SQLite shall recognize the 6-character sequenence "EXISTS" in any combination of upper and lower case letters as the keyword token EXISTS. +

    No parents

    +

    No children

    +
    H41638 +SQLite shall recognize the 7-character sequenence "EXPLAIN" in any combination of upper and lower case letters as the keyword token EXPLAIN. +

    No parents

    +

    No children

    +
    H41641 +SQLite shall recognize the 4-character sequenence "FAIL" in any combination of upper and lower case letters as the keyword token FAIL. +

    No parents

    +

    No children

    +
    H41644 +SQLite shall recognize the 3-character sequenence "FOR" in any combination of upper and lower case letters as the keyword token FOR. +

    No parents

    +

    No children

    +
    H41647 +SQLite shall recognize the 7-character sequenence "FOREIGN" in any combination of upper and lower case letters as the keyword token FOREIGN. +

    No parents

    +

    No children

    +
    H41650 +SQLite shall recognize the 4-character sequenence "FROM" in any combination of upper and lower case letters as the keyword token FROM. +

    No parents

    +

    No children

    +
    H41653 +SQLite shall recognize the 4-character sequenence "FULL" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41656 +SQLite shall recognize the 4-character sequenence "GLOB" in any combination of upper and lower case letters as the keyword token LIKE_KW. +

    No parents

    +

    No children

    +
    H41659 +SQLite shall recognize the 5-character sequenence "GROUP" in any combination of upper and lower case letters as the keyword token GROUP. +

    No parents

    +

    No children

    +
    H41662 +SQLite shall recognize the 6-character sequenence "HAVING" in any combination of upper and lower case letters as the keyword token HAVING. +

    No parents

    +

    No children

    +
    H41665 +SQLite shall recognize the 2-character sequenence "IF" in any combination of upper and lower case letters as the keyword token IF. +

    No parents

    +

    No children

    +
    H41668 +SQLite shall recognize the 6-character sequenence "IGNORE" in any combination of upper and lower case letters as the keyword token IGNORE. +

    No parents

    +

    No children

    +
    H41671 +SQLite shall recognize the 9-character sequenence "IMMEDIATE" in any combination of upper and lower case letters as the keyword token IMMEDIATE. +

    No parents

    +

    No children

    +
    H41674 +SQLite shall recognize the 2-character sequenence "IN" in any combination of upper and lower case letters as the keyword token IN. +

    No parents

    +

    No children

    +
    H41677 +SQLite shall recognize the 5-character sequenence "INDEX" in any combination of upper and lower case letters as the keyword token INDEX. +

    No parents

    +

    No children

    +
    H41680 +SQLite shall recognize the 9-character sequenence "INITIALLY" in any combination of upper and lower case letters as the keyword token INITIALLY. +

    No parents

    +

    No children

    +
    H41683 +SQLite shall recognize the 5-character sequenence "INNER" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41686 +SQLite shall recognize the 6-character sequenence "INSERT" in any combination of upper and lower case letters as the keyword token INSERT. +

    No parents

    +

    No children

    +
    H41689 +SQLite shall recognize the 7-character sequenence "INSTEAD" in any combination of upper and lower case letters as the keyword token INSTEAD. +

    No parents

    +

    No children

    +
    H41692 +SQLite shall recognize the 9-character sequenence "INTERSECT" in any combination of upper and lower case letters as the keyword token INTERSECT. +

    No parents

    +

    No children

    +
    H41695 +SQLite shall recognize the 4-character sequenence "INTO" in any combination of upper and lower case letters as the keyword token INTO. +

    No parents

    +

    No children

    +
    H41698 +SQLite shall recognize the 2-character sequenence "IS" in any combination of upper and lower case letters as the keyword token IS. +

    No parents

    +

    No children

    +
    H41701 +SQLite shall recognize the 6-character sequenence "ISNULL" in any combination of upper and lower case letters as the keyword token ISNULL. +

    No parents

    +

    No children

    +
    H41704 +SQLite shall recognize the 4-character sequenence "JOIN" in any combination of upper and lower case letters as the keyword token JOIN. +

    No parents

    +

    No children

    +
    H41707 +SQLite shall recognize the 3-character sequenence "KEY" in any combination of upper and lower case letters as the keyword token KEY. +

    No parents

    +

    No children

    +
    H41710 +SQLite shall recognize the 4-character sequenence "LEFT" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41713 +SQLite shall recognize the 4-character sequenence "LIKE" in any combination of upper and lower case letters as the keyword token LIKE_KW. +

    No parents

    +

    No children

    +
    H41716 +SQLite shall recognize the 5-character sequenence "LIMIT" in any combination of upper and lower case letters as the keyword token LIMIT. +

    No parents

    +

    No children

    +
    H41719 +SQLite shall recognize the 5-character sequenence "MATCH" in any combination of upper and lower case letters as the keyword token MATCH. +

    No parents

    +

    No children

    +
    H41722 +SQLite shall recognize the 7-character sequenence "NATURAL" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41725 +SQLite shall recognize the 3-character sequenence "NOT" in any combination of upper and lower case letters as the keyword token NOT. +

    No parents

    +

    No children

    +
    H41728 +SQLite shall recognize the 7-character sequenence "NOTNULL" in any combination of upper and lower case letters as the keyword token NOTNULL. +

    No parents

    +

    No children

    +
    H41731 +SQLite shall recognize the 4-character sequenence "NULL" in any combination of upper and lower case letters as the keyword token NULL. +

    No parents

    +

    No children

    +
    H41734 +SQLite shall recognize the 2-character sequenence "OF" in any combination of upper and lower case letters as the keyword token OF. +

    No parents

    +

    No children

    +
    H41737 +SQLite shall recognize the 6-character sequenence "OFFSET" in any combination of upper and lower case letters as the keyword token OFFSET. +

    No parents

    +

    No children

    +
    H41740 +SQLite shall recognize the 2-character sequenence "ON" in any combination of upper and lower case letters as the keyword token ON. +

    No parents

    +

    No children

    +
    H41743 +SQLite shall recognize the 2-character sequenence "OR" in any combination of upper and lower case letters as the keyword token OR. +

    No parents

    +

    No children

    +
    H41746 +SQLite shall recognize the 5-character sequenence "ORDER" in any combination of upper and lower case letters as the keyword token ORDER. +

    No parents

    +

    No children

    +
    H41749 +SQLite shall recognize the 5-character sequenence "OUTER" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41752 +SQLite shall recognize the 4-character sequenence "PLAN" in any combination of upper and lower case letters as the keyword token PLAN. +

    No parents

    +

    No children

    +
    H41755 +SQLite shall recognize the 6-character sequenence "PRAGMA" in any combination of upper and lower case letters as the keyword token PRAGMA. +

    No parents

    +

    No children

    +
    H41758 +SQLite shall recognize the 7-character sequenence "PRIMARY" in any combination of upper and lower case letters as the keyword token PRIMARY. +

    No parents

    +

    No children

    +
    H41761 +SQLite shall recognize the 5-character sequenence "QUERY" in any combination of upper and lower case letters as the keyword token QUERY. +

    No parents

    +

    No children

    +
    H41764 +SQLite shall recognize the 5-character sequenence "RAISE" in any combination of upper and lower case letters as the keyword token RAISE. +

    No parents

    +

    No children

    +
    H41767 +SQLite shall recognize the 10-character sequenence "REFERENCES" in any combination of upper and lower case letters as the keyword token REFERENCES. +

    No parents

    +

    No children

    +
    H41770 +SQLite shall recognize the 6-character sequenence "REGEXP" in any combination of upper and lower case letters as the keyword token LIKE_KW. +

    No parents

    +

    No children

    +
    H41773 +SQLite shall recognize the 7-character sequenence "REINDEX" in any combination of upper and lower case letters as the keyword token REINDEX. +

    No parents

    +

    No children

    +
    H41776 +SQLite shall recognize the 6-character sequenence "RENAME" in any combination of upper and lower case letters as the keyword token RENAME. +

    No parents

    +

    No children

    +
    H41779 +SQLite shall recognize the 7-character sequenence "REPLACE" in any combination of upper and lower case letters as the keyword token REPLACE. +

    No parents

    +

    No children

    +
    H41782 +SQLite shall recognize the 8-character sequenence "RESTRICT" in any combination of upper and lower case letters as the keyword token RESTRICT. +

    No parents

    +

    No children

    +
    H41785 +SQLite shall recognize the 5-character sequenence "RIGHT" in any combination of upper and lower case letters as the keyword token JOIN_KW. +

    No parents

    +

    No children

    +
    H41788 +SQLite shall recognize the 8-character sequenence "ROLLBACK" in any combination of upper and lower case letters as the keyword token ROLLBACK. +

    No parents

    +

    No children

    +
    H41791 +SQLite shall recognize the 3-character sequenence "ROW" in any combination of upper and lower case letters as the keyword token ROW. +

    No parents

    +

    No children

    +
    H41794 +SQLite shall recognize the 6-character sequenence "SELECT" in any combination of upper and lower case letters as the keyword token SELECT. +

    No parents

    +

    No children

    +
    H41797 +SQLite shall recognize the 3-character sequenence "SET" in any combination of upper and lower case letters as the keyword token SET. +

    No parents

    +

    No children

    +
    H41800 +SQLite shall recognize the 5-character sequenence "TABLE" in any combination of upper and lower case letters as the keyword token TABLE. +

    No parents

    +

    No children

    +
    H41803 +SQLite shall recognize the 4-character sequenence "TEMP" in any combination of upper and lower case letters as the keyword token TEMP. +

    No parents

    +

    No children

    +
    H41806 +SQLite shall recognize the 9-character sequenence "TEMPORARY" in any combination of upper and lower case letters as the keyword token TEMP. +

    No parents

    +

    No children

    +
    H41809 +SQLite shall recognize the 4-character sequenence "THEN" in any combination of upper and lower case letters as the keyword token THEN. +

    No parents

    +

    No children

    +
    H41812 +SQLite shall recognize the 2-character sequenence "TO" in any combination of upper and lower case letters as the keyword token TO. +

    No parents

    +

    No children

    +
    H41815 +SQLite shall recognize the 11-character sequenence "TRANSACTION" in any combination of upper and lower case letters as the keyword token TRANSACTION. +

    No parents

    +

    No children

    +
    H41818 +SQLite shall recognize the 7-character sequenence "TRIGGER" in any combination of upper and lower case letters as the keyword token TRIGGER. +

    No parents

    +

    No children

    +
    H41821 +SQLite shall recognize the 5-character sequenence "UNION" in any combination of upper and lower case letters as the keyword token UNION. +

    No parents

    +

    No children

    +
    H41824 +SQLite shall recognize the 6-character sequenence "UNIQUE" in any combination of upper and lower case letters as the keyword token UNIQUE. +

    No parents

    +

    No children

    +
    H41827 +SQLite shall recognize the 6-character sequenence "UPDATE" in any combination of upper and lower case letters as the keyword token UPDATE. +

    No parents

    +

    No children

    +
    H41830 +SQLite shall recognize the 5-character sequenence "USING" in any combination of upper and lower case letters as the keyword token USING. +

    No parents

    +

    No children

    +
    H41833 +SQLite shall recognize the 6-character sequenence "VACUUM" in any combination of upper and lower case letters as the keyword token VACUUM. +

    No parents

    +

    No children

    +
    H41836 +SQLite shall recognize the 6-character sequenence "VALUES" in any combination of upper and lower case letters as the keyword token VALUES. +

    No parents

    +

    No children

    +
    H41839 +SQLite shall recognize the 4-character sequenence "VIEW" in any combination of upper and lower case letters as the keyword token VIEW. +

    No parents

    +

    No children

    +
    H41842 +SQLite shall recognize the 7-character sequenence "VIRTUAL" in any combination of upper and lower case letters as the keyword token VIRTUAL. +

    No parents

    +

    No children

    +
    H41845 +SQLite shall recognize the 4-character sequenence "WHEN" in any combination of upper and lower case letters as the keyword token WHEN. +

    No parents

    +

    No children

    +
    H41848 +SQLite shall recognize the 5-character sequenence "WHERE" in any combination of upper and lower case letters as the keyword token WHERE. +

    No parents

    +

    No children

    +
    H41900 +The preparation of an SQL statement that is not accepted by +the SQLite parser shall fail with an error. +

    No parents

    +

    No children

    +
    H41910 +SQLite shall use the built-in NOCASE collating sequence when comparing +identifiers and datatype names within SQL statements during +statement preparation. +

    No parents

    +

    No children

    +
    H41920 +A token received by the parser shall be converted into an ID token +if the original token value would have resulted in a syntax error, +a token value of ID will allow the parse to continue, +and if the original token value was one of: +ABORT +AFTER +ANALYZE +ASC +ATTACH +BEFORE +BEGIN +CASCADE +CAST +CONFLICT +CTIME_KW +DATABASE +DEFERRED +DESC +DETACH +EACH +END +EXCEPT +EXCLUSIVE +EXPLAIN +FAIL +FOR +IF +IGNORE +IMMEDIATE +INITIALLY +INSTEAD +INTERSECT +KEY +LIKE_KW +MATCH +OF +OFFSET +PLAN +PRAGMA +QUERY +RAISE +REINDEX +RENAME +REPLACE +RESTRICT +ROW +TEMP +TRIGGER +UNION +VACUUM +VIEW +VIRTUAL +

    No parents

    +

    No children

    +
    H41930 +A token received by the parser shall be converted into an ANY token +if the original token value would have resulted in a syntax error +and if a token value of ANY will allow the parse to continue. +

    No parents

    +

    No children

    +
    H42000 +In the absence of semantic or other errors, the SQLite parser shall +accept a "sql-stmt-list" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42100 +In the absence of semantic or other errors, the SQLite parser shall +accept a "sql-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42200 +In the absence of semantic or other errors, the SQLite parser shall +accept a "alter-table-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42300 +In the absence of semantic or other errors, the SQLite parser shall +accept a "analyze-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42400 +In the absence of semantic or other errors, the SQLite parser shall +accept a "attach-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42500 +In the absence of semantic or other errors, the SQLite parser shall +accept a "begin-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42600 +In the absence of semantic or other errors, the SQLite parser shall +accept a "commit-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42700 +In the absence of semantic or other errors, the SQLite parser shall +accept a "rollback-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42800 +In the absence of semantic or other errors, the SQLite parser shall +accept a "savepoint-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H42900 +In the absence of semantic or other errors, the SQLite parser shall +accept a "release-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43000 +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-index-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43100 +In the absence of semantic or other errors, the SQLite parser shall +accept a "indexed-column" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43200 +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-table-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43300 +In the absence of semantic or other errors, the SQLite parser shall +accept a "column-def" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43400 +In the absence of semantic or other errors, the SQLite parser shall +accept a "type-name" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43500 +In the absence of semantic or other errors, the SQLite parser shall +accept a "column-constraint" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43600 +In the absence of semantic or other errors, the SQLite parser shall +accept a "signed-number" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43700 +In the absence of semantic or other errors, the SQLite parser shall +accept a "table-constraint" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43800 +In the absence of semantic or other errors, the SQLite parser shall +accept a "foreign-key-clause" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H43900 +In the absence of semantic or other errors, the SQLite parser shall +accept a "conflict-clause" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44000 +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-trigger-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44100 +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-view-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44200 +In the absence of semantic or other errors, the SQLite parser shall +accept a "create-virtual-table-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44300 +In the absence of semantic or other errors, the SQLite parser shall +accept a "delete-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44400 +In the absence of semantic or other errors, the SQLite parser shall +accept a "delete-stmt-limited" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44500 +In the absence of semantic or other errors, the SQLite parser shall +accept a "detach-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44600 +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-index-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44700 +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-table-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44800 +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-trigger-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H44900 +In the absence of semantic or other errors, the SQLite parser shall +accept a "drop-view-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45000 +In the absence of semantic or other errors, the SQLite parser shall +accept a "expr" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45100 +In the absence of semantic or other errors, the SQLite parser shall +accept a "raise-function" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45200 +In the absence of semantic or other errors, the SQLite parser shall +accept a "literal-value" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45300 +In the absence of semantic or other errors, the SQLite parser shall +accept a "insert-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45400 +In the absence of semantic or other errors, the SQLite parser shall +accept a "pragma-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45500 +In the absence of semantic or other errors, the SQLite parser shall +accept a "pragma-value" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45600 +In the absence of semantic or other errors, the SQLite parser shall +accept a "reindex-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45700 +In the absence of semantic or other errors, the SQLite parser shall +accept a "select-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45800 +In the absence of semantic or other errors, the SQLite parser shall +accept a "select-core" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H45900 +In the absence of semantic or other errors, the SQLite parser shall +accept a "result-column" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46000 +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-source" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46100 +In the absence of semantic or other errors, the SQLite parser shall +accept a "single-source" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46200 +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-op" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46300 +In the absence of semantic or other errors, the SQLite parser shall +accept a "join-constraint" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46400 +In the absence of semantic or other errors, the SQLite parser shall +accept a "ordering-term" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46500 +In the absence of semantic or other errors, the SQLite parser shall +accept a "compound-operator" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46600 +In the absence of semantic or other errors, the SQLite parser shall +accept a "update-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46700 +In the absence of semantic or other errors, the SQLite parser shall +accept a "update-stmt-limited" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46800 +In the absence of semantic or other errors, the SQLite parser shall +accept a "qualified-table-name" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H46900 +In the absence of semantic or other errors, the SQLite parser shall +accept a "vacuum-stmt" that conforms to the following syntax: +
    +

    No parents

    +

    No children

    +
    H50010 +The B-Tree module shall provide an interface to open a connection +to either a named persistent database file, or an anonymous temporary +database. +

    No parents

    +

    Children: H51001 +

    +
    H50020 +When opening a persistent database, the B-Tree module shall allow the user +to specify that the connection be opened for read-only access. +

    No parents

    +

    No children

    +
    H50030 +When opening a persistent database, the B-Tree module shall allow the user +to specify that the connection only be opened if the specified file exists. +

    No parents

    +

    No children

    +
    H50040 +If SQLite is configured to run in shared-cache mode, and a connection is opened +to a persistent database file for which there exists already a page-cache within +the current processes address space, then the connection opened shall be a +connection to the existing page-cache. +

    No parents

    +

    No children

    +
    H50050 +If a new B-Tree database connection is opened and requirement H50040 does not apply, +then a new page-cache shall be created within the processes address space. The +opened connection shall be a connection to the new page-cache. +

    No parents

    +

    No children

    +
    H50060 +The B-Tree module shall provide an interface to close a B-Tree database connection. +

    No parents

    +

    No children

    +
    H50070 +If a B-Tree database connection is closed and this causes the associated +page-cache to have zero connections to it, then the page-cache shall be closed +and all associated resources released. +

    No parents

    +

    No children

    +
    H50080 +The B-Tree module shall provide an interface to configure the page-size of a +new database image. +

    No parents

    +

    No children

    +
    H50090 +The B-Tree module shall provide an interface to configure whether or not a new +database image is auto-vacuum capable. +

    No parents

    +

    No children

    +
    H50100 +The B-Tree module shall provide an interface to open (start) a read-only transaction. +

    No parents

    +

    No children

    +
    H50101 +The B-Tree module shall provide an interface to close (finish) a read-only transaction. +

    No parents

    +

    No children

    +
    H50102 +The B-Tree module shall provide an interface to open a read/write transaction +or to upgrade from a read-only transaction to a read/write transaction. +

    No parents

    +

    No children

    +
    H50103 +The B-Tree module shall provide an interface to commit a read/write transaction. +

    No parents

    +

    No children

    +
    H50104 +The B-Tree module shall provide an interface to rollback a read/write transaction. +

    No parents

    +

    No children

    +
    H50105 +The B-Tree module shall provide an interface to open savepoint transactions. +

    No parents

    +

    No children

    +
    H50106 +The B-Tree module shall provide an interface to commit savepoint transactions. +

    No parents

    +

    No children

    +
    H50107 +The B-Tree module shall provide an interface to rollback savepoint transactions. +

    No parents

    +

    No children

    +
    H50108 +The B-Tree module shall provide an interface to query a B-Tree database +connection to determine if there is an open transaction, and if so if the open +transaction is read-only or read/write. +

    No parents

    +

    No children

    +
    H50109 +The B-Tree module shall provide an interface to read the value of any of the +4-byte unsigned big-endian integer fields beginning at byte offset 36 of the +database image. +

    No parents

    +

    Children: H51015 + H51016 +

    +
    H50110 +The B-Tree module shall provide an interface to open a B-Tree cursor on any table or +index b-tree within the database image, given its root page number. +

    No parents

    +

    No children

    +
    H50111 +The B-Tree module shall provide an interface to close a B-Tree cursor. +

    No parents

    +

    No children

    +
    H50112 +The B-Tree module shall provide an interface to move an open B-Tree cursor to +the entry associated with the largest key in the open b-tree structure. +

    No parents

    +

    No children

    +
    H50113 +The B-Tree module shall provide an interface to move an open B-Tree cursor to +the entry associated with the smallest key in the open b-tree structure. +

    No parents

    +

    No children

    +
    H50114 +The B-Tree module shall provide an interface to move an open B-Tree cursor that +currently points at a valid b-tree entry to the next entry in the b-tree +structure, sorted in order of key value, if any. +

    No parents

    +

    No children

    +
    H50115 +The B-Tree module shall provide an interface to move an open B-Tree cursor that +currently points at a valid b-tree entry to the previous entry in the b-tree +structure, sorted in order of key value, if any. +

    No parents

    +

    No children

    +
    H50116 +The B-Tree module shall provide an interface to retrieve the key value +associated with the b-tree structure entry that a B-Tree cursor is pointing to, +if any. +

    No parents

    +

    No children

    +
    H50117 +The B-Tree module shall provide an interface to retrieve the blob of data (the +database record) associated with the b-tree structure entry that a B-Tree +cursor open on a table b-tree is pointing to, if any. +

    No parents

    +

    No children

    +
    H50118 +The B-Tree module shall provide an interface to return the number of entries +currently stored in the b-tree structure that a B-Tree cursor is open on. +

    No parents

    +

    No children

    +
    H50119 +Given a key value, the B-Tree module shall provide an interface to move a +B-Tree cursor open on a b-tree structure to the B-Tree entry with the matching +key value, if such an entry exists. +

    No parents

    +

    No children

    +
    H50120 +If the interface required by H50119 is used to search for a key value that is +not present in the b-tree structure and the b-tree is not empty, the cursor shall +be moved to an existing entry that would be adjacent to a hypothetical +entry with the specified key value. +

    No parents

    +

    No children

    +
    H50121 +The interface required by H50119 shall provide an indication to the caller as +to whether the cursor is left pointing at an entry with a key value that is +smaller, larger or equal to the requested value, or if it is pointing to no +entry at all (because the b-tree structure is empty). +

    No parents

    +

    No children

    +
    H50122 +The B-Tree module shall provide an interface to write a value to any of the +4-byte unsigned big-endian integer fields beginning at byte offset 36 of the +database image. +

    No parents

    +

    No children

    +
    H50123 +The B-Tree module shall provide an interface to create a new index or table +b-tree structures within the database image. The interface shall automatically +assign a root-page to the new b-tree structure. +

    No parents

    +

    No children

    +
    H50124 +The B-Tree module shall provide an interface to remove an existing index or +table b-tree structure from the database image, given the root page number of +the b-tree to remove. +

    No parents

    +

    No children

    +
    H50125 +The B-Tree module shall provide an interface to remove all entries from (delete +the contents of) an index or table b-tree, given the root page number of the +b-tree to empty. +

    No parents

    +

    No children

    +
    H50126 +When opening a B-Tree cursor using the interface required by H50110, it shall +be possible to specify that the new cursor be a write cursor, or an ordinary +read-only cursor. +

    No parents

    +

    No children

    +
    H50127 +The B-Tree module shall provide an interface that allows the user to delete the +b-tree entry that a write cursor points to, if any. +

    No parents

    +

    Children: L50013 +

    +
    H50128 +The B-Tree module shall provide an interface to insert new entries into a table +or index B-Tree, given a write cursor open on the table or index b-tree the new +entry is to be inserted into. +

    No parents

    +

    Children: L50001 + L50012 + L50002 + L50003 + L50004 +

    +
    H50129 +The B-Tree module shall provide an interface allowing the application to query +a b-tree database connection open on a persistent database for the name of the +underlying database file within the file-system. +

    No parents

    +

    No children

    +
    H50130 +The B-Tree module shall provide an interface allowing the application to query +a b-tree database connection open on a persistent database for the name of the +underlying journal file within the file-system. +

    No parents

    +

    No children

    +
    H50131 +The B-Tree module shall provide an interface to query an open b-tree database +handle to determine if the underlying database is a persistent database or a +temporary database. +

    No parents

    +

    No children

    +
    H50132 +The B-Tree module shall provide an interface to query the current locking-mode +of a page-cache, given an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50133 +The B-Tree module shall provide an interface to query the current journal-mode +of a page-cache, given an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50134 +The B-Tree module shall provide an interface to query the current journal file +size-limit of a page-cache, given an open b-tree database connection to that +page-cache. +

    No parents

    +

    No children

    +
    H50135 +The B-Tree module shall provide an interface to query the current database file +size-limit of a page-cache, given an open b-tree database connection to that +page-cache. +

    No parents

    +

    No children

    +
    H50136 +The B-Tree module shall provide an interface to query the current cache-size +of a page-cache, given an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50137 +The B-Tree module shall provide an interface to query the current safety-level +of a page-cache, given an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50138 +The B-Tree module shall provide an interface allowing the application to set +the locking-mode of a page-cache to either "normal" or "exclusive", given an +open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50139 +If the locking-mode of a page-cache is set to "normal" when a read/write +or read-only transaction is ended, any locks held on the database file-system +representation by the page-cache shall be relinquished. +

    No parents

    +

    No children

    +
    H50140 +If the locking-mode of a page-cache is set to "exclusive" when a read/write +or read-only transaction is ended, any locks held on the database file-system +representation by the page-cache shall be retained. +

    No parents

    +

    No children

    +
    H50141 +The B-Tree module shall provide an interface allowing the application to set +the journal-mode of a page-cache to one of "off", "memory", "delete", +"persist", or "truncate", given an open b-tree database connection to that +page-cache. +

    No parents

    +

    No children

    +
    H50142 +If the journal-mode of a page-cache is set to "off" when a read/write +transaction is opened, then the transaction shall use no journal file. +

    No parents

    +

    No children

    +
    H50143 +If the journal-mode of a page-cache is set to "memory" when a read/write +transaction is opened, then instead of using the journal file located in the +file-system, journal-file data shall be stored in main-memory. +

    No parents

    +

    No children

    +
    H50144 +If the journal-mode of a page-cache is set to "delete" when a read/write +transaction is opened, then any journal file used by the transaction shall +be deleted at the conclusion of the transaction. +

    No parents

    +

    No children

    +
    H50145 +If the journal-mode of a page-cache is set to "truncate" when a read/write +transaction is opened, then any journal file used by the transaction shall +be truncated to zero bytes in size at the conclusion of the transaction. +

    No parents

    +

    No children

    +
    H50146 +If the journal-mode of a page-cache is set to "persist" when a read/write +transaction is opened, then any journal file used by the transaction shall +remain in the file-system at the conclusion of the transaction. +

    No parents

    +

    No children

    +
    H50147 +The B-Tree module shall provide an interface to set the value of the +journal-file size limit configuration parameter of a page-cache, given +an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50148 +The default value assigned to the journal-file size limit configuration of a +page-cache shall be -1. +

    No parents

    +

    No children

    +
    H50149 +If the journal-file size limit parameter is set to a non-negative value, and +the user executes a write operation that would otherwise require the journal +file to be extended to a size greater than the configured value in bytes, then +the operation shall fail and an error be returned to the user. +

    No parents

    +

    No children

    +
    H50150 +The B-Tree module shall provide an interface to set the value of the +database-image size limit configuration parameter of a page-cache, given +an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50151 +The default value assigned to the database-image size limit configuration of a +page-cache shall be the value of the compile time symbol SQLITE_MAX_PAGE_COUNT +(1073741823 by default). +

    No parents

    +

    No children

    +
    H50152 +If the database-image size limit parameter is set to a non-negative value, and +the user executes a write operation that would otherwise require the journal +file to be extended to a size greater than the configured value in bytes, then +the operation shall fail and an error be returned to the user. +

    No parents

    +

    No children

    +
    H50153 +The B-Tree module shall provide an interface to set the value of the +cache-size configuration parameter of a page-cache, given an open b-tree +database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50154 +The B-Tree module shall provide an interface allowing the application to set +the safety-level of a page-cache to one of "off", "normal" or "full", +given an open b-tree database connection to that page-cache. +

    No parents

    +

    No children

    +
    H50155 +The default value assigned to the safety-level configuration parameter of a +page-cache shall be "full". +

    No parents

    +

    No children

    +
    H51001 +If successful, a call to the sqlite3BtreeOpen function shall return SQLITE_OK +and set the value of *ppBtree to contain a new B-Tree database connection +handle. +

    Parents: H50010 +

    +

    No children

    +
    H51002 +If unsuccessful, a call to the sqlite3BtreeOpen function shall return an SQLite +error code other than SQLITE_OK indicating the reason for the failure. The +value of *ppBtree shall not be modified in this case. +

    No parents

    +

    No children

    +
    H51003 +If the zFilename parameter to a call to sqlite3BtreeOpen is NULL or a pointer +to a buffer of which the first byte is a nul (0x00), then sqlite3BtreeOpen +shall attempt to open a connection to a temporary database. +

    No parents

    +

    No children

    +
    H51004 +If the zFilename parameter to a call to sqlite3BtreeOpen is a pointer to a +buffer containing a nul-terminated UTF-8 encoded string, sqlite3BtreeOpen shall +attempt to open a connection to a persistent database. +

    No parents

    +

    No children

    +
    H51005 +If the BTREE_OMIT_JOURNAL bit is set in the flags parameter passed to a +successful call to sqlite3BtreeOpen to open a temporary database, then the +page-cache created as a result shall not open or use a journal file for any +purpose. +

    No parents

    +

    No children

    +
    H51006 +If the BTREE_NO_READLOCK bit is set in the flags parameter passed to a +successful call to sqlite3BtreeOpen to open a persistent database and a +new page-cache is created as a result of the call, then the new page-cache +shall only lock the database file-system representation when writing to +it. +

    No parents

    +

    No children

    +
    H51007 +If the sqlite3BtreeOpen function is called to open a connection to a persistent +database, and the call causes a new page-cache to be created, when opening the +database file using the VFS interface xOpen method the 4th parameter passed to +xOpen (flags) shall be a copy of the vfsFlags value passed to sqlite3BtreeOpen. +

    No parents

    +

    No children

    +
    H51008 +If the sqlite3BtreeOpen function is called to open a connection to a temporary +database, if and when a temporary file is opened to use as secondary storage +using the VFS interface xOpen method the 4th parameter passed to xOpen (flags) +shall be a copy of the vfsFlags value passed to sqlite3BtreeOpen with the +SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_EXCLUSIVE and +SQLITE_OPEN_DELETEONCLOSE bits also set. +

    No parents

    +

    No children

    +
    H51009 +A call to the sqlite3BtreeClose function with a valid b-tree database +connection handle passed as the only argument shall invalidate the handle, +close the b-tree database connection and release all associated resources. +

    No parents

    +

    No children

    +
    H51010 +If a call to sqlite3BtreeClose is made to close a b-tree database connection +while there exist open B-Tree cursors that were opened using the specified +b-tree database connection, they shall be closed automatically from within +sqlite3BtreeClose, just as if their handles were passed to +sqlite3BtreeCloseCursor. +

    No parents

    +

    No children

    +
    H51011 +A call to the sqlite3BtreeGetFilename function with a valid B-Tree database +connection handle opened on a persistent database as the first argument shall +return a pointer to a buffer containing the full-path of the database file +formatted as a nul-terminated, UTF-8 string. +

    No parents

    +

    No children

    +
    H51012 +A call to the sqlite3BtreeGetFilename function with a valid B-Tree database +connection handle opened on a temporary database as the first argument shall +return a pointer to a buffer to a nul-terminated string zero bytes in length +(i.e. the first byte of the buffer shall be 0x00). +

    No parents

    +

    No children

    +
    H51013 +A call to the sqlite3BtreeGetJournalname function with a valid B-Tree database +connection handle opened on a persistent database as the first argument shall +return a pointer to a buffer containing the full-path of the journal file +formatted as a nul-terminated, UTF-8 string. +

    No parents

    +

    No children

    +
    H51014 +A call to the sqlite3BtreeGetJournalname function with a valid B-Tree database +connection handle opened on a temporary database as the first argument shall +return a pointer to a buffer to a nul-terminated string zero bytes in length +(i.e. the first byte of the buffer shall be 0x00). +

    No parents

    +

    No children

    +
    H51015 +If successful, a call to the sqlite3BtreeGetMeta function shall set the +value of *pValue to the current value of the specified 32-bit unsigned +integer in the database image database header and return SQLITE_OK. +

    Parents: H50109 +

    +

    No children

    +
    H51016 +The database header field read from the database image by a call to +sqlite3BtreeGetMeta shall be the 32-bit unsigned integer header field stored at +byte offset (36 + 4 * idx) of the database header, where idx is the value of +the second parameter passed to sqlite3BtreeGetMeta. +

    Parents: H50109 +

    +

    No children

    +
    L50001 +A successful call to the sqlite3BtreeInsert function made with a read/write +b-tree cursor passed as the first argument shall insert a new entry into +the b-tree structure the b-tree cursor is open on. +

    Parents: H50128 +

    +

    No children

    +
    L50012 +If a call to sqlite3BtreeInsert is made to insert an entry specifying a key +value for which there already exists a matching key within the b-tree +structure, the entry with the matching key shall be removed from the b-tree +structure before the new entry is inserted. +

    Parents: H50128 +

    +

    No children

    +
    L50002 +If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on a table b-tree, then the values passed as the second parameter (pKey) +shall be ignored. The value passed as the third parameter (nKey) shall be +used as the integer key for the new entry. +

    Parents: H50128 +

    +

    No children

    +
    L50003 +If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on a table b-tree, then the database record associated with the new entry +shall consist of a copy of the first nData bytes of the buffer pointed to by pData +followed by nZero zero (0x00) bytes, where pData, nData and nZero are the +fourth, fifth and sixth parameters passed to sqlite3BtreeInsert, respectively. +

    Parents: H50128 +

    +

    No children

    +
    L50004 +If the b-tree cursor passed to sqlite3BtreeInsert as the first argument is +open on an index b-tree, then the values passed as the fourth, fifth and sixth +parameters shall be ignored. The key (a database record) used by the new entry +shall consist of the first nKey bytes of the buffer pointed to by pKey, where +pKey and nKey are the second and third parameters passed to sqlite3BtreeInsert, +respectively. +

    Parents: H50128 +

    +

    No children

    +
    L50005 +If the value passed as the seventh parameter to a call to sqlite3BtreeInsert +is non-zero, sqlite3BtreeInsert shall interpret this to mean that it is likely +(but not certain) that the key belonging to the new entry is larger than the +largest key currently stored in the b-tree structure, and optimize accordingly. +

    No parents

    +

    No children

    +
    L50006 +If the value passed as the eighth parameter to a call to sqlite3BtreeInsert +is non-zero, then the B-Tree module shall interpret this to mean that the +the b-tree cursor has already been positioned by a successful call to +sqlite3BtreeMovetoUnpacked specifying the same key value as is being inserted, +and that sqlite3BtreeMovetoUnpacked has set the output value required by L50011 to +this value. +

    No parents

    +

    No children

    +
    L50008 +If a call is made to sqlite3BtreeMovetoUnpacked specifying a key value for +which there exists an entry with a matching key value in the b-tree structure, +the b-tree cursor shall be moved to point to this entry. In this case *pRes +(the value of the "int" variable pointed to by the pointer passed as the +fifth parameter to sqlite3BtreeMovetoUnpacked) shall be set to 0 before +returning. +

    No parents

    +

    No children

    +
    L50009 +If a call is made to sqlite3BtreeMovetoUnpacked specifying a key value for +which there does not exist an entry with a matching key value in the b-tree +structure, the b-tree cursor shall be moved to point to an entry located +on the leaf page that would contain the requested entry, were it present. +

    No parents

    +

    No children

    +
    L50010 +If the condition specified in L50009 is met and the b-tree structure +contains one or more entries (is not empty), the b-tree cursor shall be left +pointing to an entry that would lie adjacent (immediately before or after in +order by key) to the requested entry on the leaf page, were it present. +

    No parents

    +

    No children

    +
    L50011 +If the condition specified in L50009 is met and the b-tree cursor is left +pointing to an entry with a smaller key than that requested, or the cursor +is left pointing a no entry at all because the b-tree structure is completely +empty, *pRes (the value of the "int" variable pointed to by the pointer passed +as the fifth parameter to sqlite3BtreeMovetoUnpacked) shall be set to -1. +Otherwise, if the b-tree cursor is left pointing to an entry with a larger key +than that requested, *pRes shall be set to 1. +

    No parents

    +

    No children

    +
    L50013 +A successful call to the sqlite3BtreeDelete function made with a read/write +b-tree cursor passed as the first argument shall remove the entry pointed to by +the b-tree cursor from the b-tree structure. +

    Parents: H50127 +

    +

    No children

    +
    L51001 +The balance-siblings algorithm shall redistribute the b-tree cells currently +stored on a overfull or underfull page and up to two sibling pages, adding +or removing siblings as required, such that no sibling page is overfull and +the minimum possible number of sibling pages is used to store the +redistributed b-tree cells. +

    No parents

    +

    No children

    +
    +
    +This page last modified 2008/12/09 22:44:35 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/requirements.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/requirements.html --- sqlite3-3.4.2/www/requirements.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/requirements.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,143 @@ + + +SQLite Requirements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    SQLite Requirements

    + +

    1.0 Introduction

    + +

    These requirements strive to describe the interfaces and operation of +SQLite in sufficient detail that a compatible implementation of SQLite +can be written solely from these requirements and without reference to +the canonical SQLite source code.

    + +

    Software development processes typically recognize a hierarchy of +requirements:

    + +
      +
    • System requirements
    • +
    • High-level requirements
    • +
    • Derived high-level requirements
    • +
    • Low-level requirements
    • +
    • Derived low-level requirements
    • +
    + +

    The usual distinction between high-level and low-level requirements is +that high-level requirements describe "what" the system does and the +low-level requirements describe "how" the system does it. Since the +the requirements denoted here describe the +behavior of SQLite and not its implementation, they are best thought of +as high-level requirements. Consistent with that view, most of +the requirements numbers begin with the letter "H" (for "high-level"). +A few of the requirements presented here specify broad objectives that +SQLite strives to achieve. These broad requirements can be thought of +as system requirements and are number with an initial letter "S".

    + +

    These requirements are hierarchical in the sense that the more +specific requirements are derived from broader and more general +requirements. When requirement B is derived from requirement A, we say +that A is the parent of B and that B is a child of A. The parent/child +relationships of all requirements are tracked. All requirements presented +here ultimately derive from a single very broad, very high-level, and +very general system requirement called "S10000".

    + +

    Some behaviors of SQLite are undefined. For example, the order in +which result rows are returned from a SELECT statement is undefined if +there is no ORDER BY clause. As another example, many of the C interfaces +require a pointer to an open database connection as their first argument +and the behavior of those interfaces is undefined (and probably undesirable) +if a pointer to some other object is supplied instead. Some, but not all +undefined behaviors are explicitly stated and number in this document +with numbers beginning with the "U" for "Undefined". Applications +that use SQLite should never depend on undefined behavior. If a behavior +is not explicitly defined by a requirement, then the behavior is undefined, +and so explicitly stating undefined behaviors in this document is +technically redundant. Nevertheless, we find that explicitly stating +some undefined behaviors helps application developers to better understand +the boundaries of operation of SQLite and to generate safer and more +accurate programs that use SQLite.

    + +

    2.0 System Requirements

    + +

    3.0 Application C-Language Interfaces

    + +

    4.0 C-Language Interfaces For +Extending SQLite

    + +

    5.0 Database File Format

    + +

    6.0 SQL Language Specification

    +
    +This page last modified 2009/02/19 14:17:41 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/robots.txt /tmp/3ARg2Grji7/sqlite3-3.6.16/www/robots.txt --- sqlite3-3.4.2/www/robots.txt 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/robots.txt 2009-06-27 15:07:33.000000000 +0100 @@ -0,0 +1,21 @@ +User-agent: * +Disallow: /cvstrac/attach_add +Disallow: /cvstrac/attach_get +Disallow: /cvstrac/chngedit +Disallow: /cvstrac/chngview +Disallow: /cvstrac/dir +Disallow: /cvstrac/filediff +Disallow: /cvstrac/getfile +Disallow: /cvstrac/msedit +Disallow: /cvstrac/msnew +Disallow: /cvstrac/rlog +Disallow: /cvstrac/rptedit +Disallow: /cvstrac/rptnew +Disallow: /cvstrac/rptsql +Disallow: /cvstrac/timeline +Disallow: /cvstrac/tktedit +Disallow: /cvstrac/tktview +Disallow: /cvstrac/wikiedit +Disallow: /cvstrac/honeypot +Disallow: /cvstrac/wiki/attach_get +Disallow: /contrib/download diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/rtree.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/rtree.html --- sqlite3-3.4.2/www/rtree.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/rtree.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,370 @@ + + +The SQLite R*Tree Module + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    1.0 Overview

    + +

    +An R-Tree is a special +index that is designed for doing range queries. R-Trees are most commonly +used in geospatial systems where each entry is a rectangle with minimum and +maximum X and Y coordinates. Given a query rectangle, an R-Tree is able +to quickly find all entries that are contained within the query rectangle +or which overlap the query rectangle. This idea is easily extended to +three dimensions for use in CAD systems. R-Trees also find use in time-domain +range look-ups. For example, suppose a database records the starting and +ending times for a large number of events. A R-Tree is able to quickly +find all events, for example, that were active at any time during a given +time interval, or all events that started during a particular time interval, +or all events that both started and ended within a given time interval. +And so forth. +

    + +

    +The R-Tree concept originated with +Toni Guttman: +R-Trees: A Dynamic Index Structure for Spatial Searching, +Proc. 1984 ACM SIGMOD International Conference on Management of Data, +pp. 47-57. +The implementation found in SQLite is a refinement of Guttman's original +idea, commonly called "R*Trees", that was described by +Norbert Beckmann, Hans-Peter Kriegel, Ralf Schneider, Bernhard Seeger: +The R*-Tree: An Efficient and Robust Access Method for Points +and Rectangles. SIGMOD Conference 1990: 322-331. +

    + +

    2.0 Compiling The R*Tree Module

    + +

    +The source code to the SQLite R*Tree module is included as part +of the amalgamation but is disabled by default. To enable the +R*Tree module, simply compile with the SQLITE_ENABLE_RTREE +C-preprocessor macro defined. With many compilers, this is accomplished +by adding the option "-DSQLITE_ENABLE_RTREE=1" to the compiler +command-line. +

    + +

    3.0 Using the R*Tree Module

    + +

    +The SQLite R*Tree module is implemented as a +virtual table. Each R*Tree index is a +virtual table with an odd number of columns between 3 and 11. +The first column is always a 64-bit signed integer primary key. +The other columns are minimum- and maximum-value pairs (stored as +32-bit floating point numbers) for each +dimension. A 1-dimensional R*Tree thus has 3 columns. +A 2-dimensional R*Tree (the most common case) has 5 columns. +A 5-dimensional R*Tree has 11 columns. The SQLite R*Tree implementation +does not support R*Trees wider than 5 dimensions. +

    + +

    +The first column of an SQLite R*Tree must always be an integer +primary key. +The min/max-value pair columns are always stored as +32-bit floating point values. Unlike regular SQLite tables which +can store data in a variety of datatypes and formats, the R*Tree +indices rigidly enforce these two storage types. Attempts to insert +something other than an integer into the first column, or something +other than a floating point value into the other columns, will result +in an error. +

    + +

    3.1 Creating An R*Tree Index

    + +

    +A new R*Tree index is created as follows: +

    + +
    +CREATE VIRTUAL TABLE <name> USING rtree(<column-names>);
    +
    + +

    +The <name> is the name your application chooses for the +R*Tree index and <column-names> is a comma separated list +of between 3 and 11 columns. +The virtual <name> table creates three "shadow" tables to actually +store its content. The names of these shadow tables are: +

    + +
    +<name>_node
    +<name>_rowid
    +<name>_parent +
    + +

    +The shadow tables are ordinary SQLite data tables. You can query them +directly if you like, though this unlikely to to reveal anything particularly +useful. +And you can UPDATE, DELETE, INSERT or even DROP +the shadow tables, though doing so will corrupt your R*Tree index. +So it is best to simply ignore the shadow tables. Recognize that they +are there to hold your R*Tree index information and let it go as that. +

    + +

    +As an example, consider creating a two-dimensional R*Tree index for use in +spatial queries: +

    + +
    +CREATE VIRTUAL TABLE demo_index USING rtree(
    +   id,              -- Integer primary key
    +   minX, maxX,      -- Minimum and maximum X coordinate
    +   minY, maxY       -- Minimum and maximum Y coordinate
    +);
    +
    + +

    3.2 Populating An R*Tree Index

    + +

    +The usual INSERT, UPDATE, and DELETE commands work on an R*Tree +index just like on regular tables. So to insert some data into our sample +R*Tree index, we can do something like this: +

    + +
    +INSERT INTO demo_index VALUES(
    +    1,                   -- Primary key
    +    -80.7749, -80.7747,  -- Longitude range
    +    30.3776, 30.3778     -- Latitude range
    +);
    +INSERT INTO demo_index VALUES(
    +    2,
    +    -81.0, -79.6,
    +    35.0, 36.2
    +);
    +
    + +

    +The entries above might represent (for example) a bounding box around +the offices for SQLite.org and bounding box around the +12th Congressional District of North Carolina in which SQLite.org is located. +

    + +

    3.3 Querying An R*Tree Index

    + +

    +Any valid query will work against an R*Tree index. But the R*Tree +implementation is designed to make two kinds of queries especially +efficient. First, queries against the primary key are efficient: +

    + +
    +SELECT * FROM demo_index WHERE id=1;
    +
    + +

    +Of course, an ordinary SQLite table will do a query against its +integer primary key efficiently, so the previous is no big deal. +The real reason for using an R*Tree in the first place is so that +you can efficiently do inequality queries against the coordinate +ranges. To find all elements of the index that are contained within +the vicinity of Charlotte, North Carolina, one might do: +

    + +
    +SELECT id FROM demo_index
    + WHERE minX>=-81.08 AND maxX<=-80.58
    +   AND minY>=35.00  AND maxY<=35.44;
    +
    + +

    +The query above would very quickly locate id of 1 even if the +R*Tree contained millions of entries. The previous is an example +of a "contained-within" query. The R*Tree also supports "overlapping" +queries. For example, to find all bounding boxes that overlap the +Charlotte area: +

    + +
    +SELECT id FROM demo_index
    + WHERE maxX>=-81.08 AND minX<=-80.58
    +   AND maxY>=35.00  AND minY<=35.44;
    +
    + +

    +This second query would find both entry 1 (the SQLite.org office) which +is entirely contained within the query box and also +Mel Watt's Congressional District which extends well outside the +query box but still overlaps the query box. +

    + +

    +Note that is not necessary for all coordinates in an R*Tree index +to be constrained in order for the index search to be efficient. +One might, for example, want to query all objects that overlap with +the 35th parallel: +

    + +
    +SELECT id FROM demo_index
    + WHERE maxY>=35.0  AND minY<=35.0;
    +
    + +

    +But, generally speaking, the more constraints that the R*Tree module +has to work with, and the smaller the bounding box, the faster the +results will come back. +

    + +

    4.0 Using R*Trees Effectively

    + +

    +The only information that an R*Tree index stores about an object is +its integer ID and its bounding box. Additional information needs to +be stored in separate tables and related to the R*Tree index using +the primary key. For the example above, one might create an auxiliary +table as follows: +

    + +
    +CREATE TABLE demo_data(
    +  id INTEGER PRIMARY KEY,  -- primary key
    +  objname TEXT,            -- name of the object
    +  objtype TEXT,            -- object type
    +  boundary BLOB            -- detailed boundary of object
    +);
    +
    + +

    +In this example, the demo_data.boundary field is intended to hold some +kind of binary representation of the precise boundaries of the object. +The R*Tree index only holds an axis-aligned rectangular boundary for the +object. The R*Tree boundary is just an approximation of the true object +boundary. So what typically happens is that the R*Tree index is used to +narrow a search down to a list of candidate objects and then more detailed +and expensive computations are done on each candidate to find if the +candidate truly meets the search criteria. +

    + +

    +Key Point: +An R*Tree index does not normally provide the exact answer but merely +reduces the set of potential answers from millions to dozens. +

    + +

    +Suppose the demo_data.boundary field holds some proprietary data description +of a complex two-dimensional boundary for an object and suppose that the +application has used the sqlite3_create_function() interface to +created application-defined functions "contained_in" and +"overlaps" accept two demo_data.boundary objects and return true or false. +One may assume that "contained_in" and "overlaps" are relatively slow +functions that we do not want to invoke too frequently. +Then an efficient way to find the name of all objects located within +the North Carolina 12th District, one may run a query like this: +

    + +
    +SELECT objname FROM demo_data, demo_index
    + WHERE demo_data.id=demo_index.id
    +   AND contained_in(demo_data.boundary, :boundary)
    +   AND minX>=-81.0 AND max<=-79.6
    +   AND minY>=35.0 AND maxY<=36.2;
    +
    + +

    In the query above, one would presumably bind the binary BLOB +description of the precise boundary of the 12th district to the +":boundary" parameter.

    + +

    Notice how the query above works: The R*Tree index runs in the outer +loop to find entries that are contained within the bounding box +of longitude -81..-79.6 and latitude 35.0..36.2. +For each object identifier found, SQLite looks up +the corresponding entry in the demo_data table. It then uses the boundary +field from the demo_data table as a parameter to the contained_in() +function and if that function returns true, the objname field from +the demo_data table is returned as the next row of query result.

    + +

    One would get the same answer without the use of the R*Tree index +using the following simpler query:

    + +
    +SELECT objname FROM demo_data
    + WHERE contained_in(demo_data.boundary, :boundary);
    +
    + +

    The problem with this latter query is that it must apply the +contained_in() function to millions of entries in the demo_data table. +The use of the R*Tree in the penultimate query reduces the number of +calls to contained_in() function to a small subset of the entire table. +The R*Tree index did not find the exact answer itself, it merely +limited the search space.

    +
    +This page last modified 2009/03/17 11:06:25 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/selfcontained.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/selfcontained.html --- sqlite3-3.4.2/www/selfcontained.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/selfcontained.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,142 @@ + + +SQLite Is Self-Contained + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Is Self-Contained

    + +

    +SQLite is largely self-contained. It requires very minimal +support from external libraries or from the operating system. +This makes it well suited for use in embedded devices that +lack the support infrastructure of a desktop computer. This +also makes SQLite appropriate for use within applications that +need to run without modification on a wide variety of computers +of varying configurations. +

    + +

    +SQLite is written in ANSI-C and should be easily compiled by +any standard C compiler. It makes minimal use of the standard C +library. The only required C library functions called are: +

    + +
      +
    • memset() +
    • memcpy() +
    • memcmp() +
    • strcmp() +
    • malloc(), free(), and realloc() +
    + +

    +SQLite can be configured at start-time to +use a static buffer in place of calling malloc() +for the memory it needs. +The date and time SQL functions provided by SQLite require +some additional C library support, but those functions can +be also be omitted from the build using compile-time options. +

    + +

    +Communications between SQLite and the operating system and disk are +mediated through an interchangable +Virtual File System (VFS) layer. +VFS modules for Unix (Linux and Mac OS X), OS/2, and Windows (Win32 and WinCE) +are provided in the source tree. It is a simple matter to devise an +alternative VFS for embedded devices. +

    + +

    +For safe operation in multi-threaded environments, SQLite requires +the use of mutexes. Appropriate mutex libraries are linked automatically +for Win32 and POSIX platforms. For other systems, mutex primitives +can be added at start-time using the +sqlite3_config(SQLITE_CONFIG_MUTEX,...) interface. +Mutexes are only required if SQLite is +used by more than one thread at a time. +

    + +

    +The SQLite source code is available as an +"amalgamation" - a single large C source code file. +Projects that want to include SQLite can do so simply +by dropping this one source file (named "sqlite3.c") and +its corresponding header ("sqlite3.h") into their source +tree and compiling it together with the rest of the +code. SQLite does not link against any external libraries +(other than the C library, as described above) and does +not require any special build support. +

    +
    +This page last modified 2009/02/16 16:58:59 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/serverless.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/serverless.html --- sqlite3-3.4.2/www/serverless.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/serverless.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,117 @@ + + +SQLite Is Serverless + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Is Serverless

    + +

    +Most SQL database engines are implemented as a separate server process. +Programs that want to access the database communicate with the server +using some kind of interprocess communication (typically TCP/IP) to send +requests to the server and to receive back results. +SQLite does not work this way. +With SQLite, the process that wants to access the database reads and +writes directly from the database files on disk. +There is no intermediary server process. +

    + +

    +There are advantages and disadvantages to being serverless. +The main advantage is that there is no separate server process +to install, setup, configure, initialize, manage, and troubleshoot. +This is one reason why SQLite is a +"zero-configuration" database engine. +Programs that use SQLite require no administrative support for +setting up the database engine before they are run. +Any program that is able to access the disk is able to use an SQLite database. +

    + +

    +On the other hand, a database engine that uses a server can +provide better protection from bugs in the client +application - stray pointers in a client cannot corrupt memory +on the server. +And because a server is a single persistent process, +it is able control database access with more precision, +allowing for finer grain locking and better concurrancy. +

    + +

    +Most SQL database engines are client/server based. +Of those that are serverless, SQLite is the only one +known to this author that allows multiple applications +to access the same database at the same time. +

    +
    +This page last modified 2008/03/03 13:41:48 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sharedcache.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sharedcache.html --- sqlite3-3.4.2/www/sharedcache.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/sharedcache.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,273 @@ + + +SQLite Shared-Cache Mode + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + +

    1.0 SQLite Shared-Cache Mode

    + +

    Starting with version 3.3.0, SQLite includes a special "shared-cache" +mode (disabled by default) intended for use in embedded servers. If +shared-cache mode is enabled and a thread establishes multiple connections +to the same database, the connections share a single data and schema cache. +This can significantly reduce the quantity of memory and IO required by +the system.

    + +

    In version 3.5.0, shared-cache mode was modified so that the same +cache can be shared across an entire process rather than just within +a single thread. Prior to this change, there were restrictions on +passing database connections between threads. Those restrictions were +dropped in 3.5.0 update. This document describes shared-cache mode +as of version 3.5.0.

    + +

    Shared-cache mode changes the semantics +of the locking model in some cases. The details are described by +this document. A basic understanding of the normal SQLite locking model (see +File Locking And Concurrency In SQLite Version 3 +for details) is assumed.

    + +

    2.0 Shared-Cache Locking Model

    + +

    Externally, from the point of view of another process or thread, two +or more database connections using a shared-cache appear as a single +connection. The locking protocol used to arbitrate between multiple +shared-caches or regular database users is described elsewhere. +

    + + +
    + + +
    +

    Figure 1

    + +

    Figure 1 depicts an example runtime configuration where three +database connections have been established. Connection 1 is a normal +SQLite database connection. Connections 2 and 3 share a cache +The normal locking +protocol is used to serialize database access between connection 1 and +the shared cache. The internal protocol used to serialize (or not, see +"Read-Uncommitted Isolation Mode" below) access to the shared-cache by +connections 2 and 3 is described in the remainder of this section. +

    + +

    There are three levels to the shared-cache locking model, +transaction level locking, table level locking and schema level locking. +They are described in the following three sub-sections.

    + +

    2.1 Transaction Level Locking

    + +

    SQLite connections can open two kinds of transactions, read and write +transactions. This is not done explicitly, a transaction is implicitly a +read-transaction until it first writes to a database table, at which point +it becomes a write-transaction. +

    +

    At most one connection to a single shared cache may open a +write transaction at any one time. This may co-exist with any number of read +transactions. +

    + +

    2.2 Table Level Locking

    + +

    When two or more connections use a shared-cache, locks are used to +serialize concurrent access attempts on a per-table basis. Tables support +two types of locks, "read-locks" and "write-locks". Locks are granted to +connections - at any one time, each database connection has either a +read-lock, write-lock or no lock on each database table. +

    + +

    At any one time, a single table may have any number of active read-locks +or a single active write lock. To read data a table, a connection must +first obtain a read-lock. To write to a table, a connection must obtain a +write-lock on that table. If a required table lock cannot be obtained, +the query fails and SQLITE_LOCKED is returned to the caller. +

    + +

    Once a connection obtains a table lock, it is not released until the +current transaction (read or write) is concluded. +

    + +

    2.2.1 Read-Uncommitted Isolation Mode

    + +

    The behaviour described above may be modified slightly by using the +read_uncommitted pragma to change the isolation level from serialized +(the default), to read-uncommitted.

    + +

    A database connection in read-uncommitted mode does not attempt +to obtain read-locks before reading from database tables as described +above. This can lead to inconsistent query results if another database +connection modifies a table while it is being read, but it also means that +a read-transaction opened by a connection in read-uncommitted mode can +neither block nor be blocked by any other connection.

    + +

    Read-uncommitted mode has no effect on the locks required to write to +database tables (i.e. read-uncommitted connections must still obtain +write-locks and hence database writes may still block or be blocked). +Also, read-uncommitted mode has no effect on the sqlite_master +locks required by the rules enumerated below (see section +"Schema (sqlite_master) Level Locking"). +

    + +
    +  /* Set the value of the read-uncommitted flag:
    +  **
    +  **   True  -> Set the connection to read-uncommitted mode.
    +  **   False -> Set the connectino to serialized (the default) mode.
    +  */
    +  PRAGMA read_uncommitted = <boolean>;
    +
    +  /* Retrieve the current value of the read-uncommitted flag */
    +  PRAGMA read_uncommitted;
    +
    + +

    2.3 Schema (sqlite_master) Level Locking

    + +

    The sqlite_master table supports shared-cache read and write +locks in the same way as all other database tables (see description +above). The following special rules also apply: +

    + +
      +
    • A connection must obtain a read-lock on sqlite_master before +accessing any database tables or obtaining any other read or write locks.
    • +
    • Before executing a statement that modifies the database schema (i.e. +a CREATE or DROP TABLE statement), a connection must obtain a write-lock on +sqlite_master. +
    • +
    • A connection may not compile an SQL statement if any other connection +is holding a write-lock on the sqlite_master table of any attached +database (including the default database, "main"). +
    • +
    + +

    3.0 Thread Related Issues

    + +

    In SQLite versions 3.3.0 through 3.4.2 when shared-cache mode is enabled, +a database connection may only be +used by the thread that called sqlite3_open() to create it. +And a connection could only share cache with another connection in the +same thread. +These restrictions were dropped beginning with SQLite version 3.5.0. +

    + +

    4.0 Shared Cache And Virtual Tables

    + +

    Shared cache mode cannot be used together with virtual tables. +The reason for this is that virtual tables often make a copy of +the "sqlite3*" database handle that was used to originally open +the virtual table. The virtual table might use this handle to +prepare statements for recursive access to the database. But +a prepared statement only works for the database connection on +which it was originally created. If a virtual table is part of +a shared cache, it might be invoked by multiple database connections +but its prepared statements will only work on one of those +connections. To avoid problems sorting all of this out, and to +simplify the implementation of virtual tables, SQLite includes +checks that prohibit shared cache and virtual tables from being +used at the same time.

    + +

    5.0 Enabling Shared-Cache Mode

    + +

    Shared-cache mode is enabled on a per-process basis. Using the C +interface, the following API can be used to enable or disable shared-cache +mode for the calling thread: +

    + +
    +int sqlite3_enable_shared_cache(int);
    +
    + +

    Each call sqlite3_enable_shared_cache() effects subsequent database +connections created using sqlite3_open(), sqlite3_open16(), or +sqlite3_open_v2(). Database connections that already exist are +uneffected. Each call to sqlite3_enable_shared_cache() overrides +all previous calls within the same process. +

    +
    +This page last modified 2009/03/23 11:40:05 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sharedcache.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sharedcache.tcl --- sqlite3-3.4.2/www/sharedcache.tcl 2006-01-30 16:20:30.000000000 +0000 +++ sqlite3-3.6.16/www/sharedcache.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,221 +0,0 @@ -# -# Run this script to generated a sharedcache.html output file -# -set rcsid {$Id: } -source common.tcl -header {SQLite Shared-Cache Mode} - -proc HEADING {level title} { - global pnum - incr pnum($level) - foreach i [array names pnum] { - if {$i>$level} {set pnum($i) 0} - } - set h [expr {$level+1}] - if {$h>6} {set h 6} - set n $pnum(1).$pnum(2) - for {set i 3} {$i<=$level} {incr i} { - append n .$pnum($i) - } - puts "$n $title" -} -set pnum(1) 0 -set pnum(2) 0 -set pnum(3) 0 -set pnum(4) 0 -set pnum(5) 0 -set pnum(6) 0 -set pnum(7) 0 -set pnum(8) 0 - -HEADING 1 {SQLite Shared-Cache Mode} - -puts { -

    Starting with version 3.3.0, SQLite includes a special "shared-cache" -mode (disabled by default) intended for use in embedded servers. If -shared-cache mode is enabled and a thread establishes multiple connections -to the same database, the connections share a single data and schema cache. -This can significantly reduce the quantity of memory and IO required by -the system.

    - -

    Using shared-cache mode imposes some extra restrictions on -passing database handles between threads and changes the semantics -of the locking model in some cases. These details are described in full by -this document. A basic understanding of the normal SQLite locking model (see -File Locking And Concurrency In SQLite Version 3 -for details) is assumed.

    -} - -HEADING 1 {Shared-Cache Locking Model} - -puts { -

    Externally, from the point of view of another process or thread, two -or more database connections using a shared-cache appear as a single -connection. The locking protocol used to arbitrate between multiple -shared-caches or regular database users is described elsewhere. -

    - - -
    - - -
    -

    Figure 1

    - -

    Figure 1 depicts an example runtime configuration where three -database connections have been established. Connection 1 is a normal -SQLite database connection. Connections 2 and 3 share a cache (and so must -have been established by the same process thread). The normal locking -protocol is used to serialize database access between connection 1 and -the shared cache. The internal protocol used to serialize (or not, see -"Read-Uncommitted Isolation Mode" below) access to the shared-cache by -connections 2 and 3 is described in the remainder of this section. -

    - -

    There are three levels to the shared-cache locking model, -transaction level locking, table level locking and schema level locking. -They are described in the following three sub-sections.

    - -} - -HEADING 2 {Transaction Level Locking} - -puts { -

    SQLite connections can open two kinds of transactions, read and write -transactions. This is not done explicitly, a transaction is implicitly a -read-transaction until it first writes to a database table, at which point -it becomes a write-transaction. -

    -

    At most one connection to a single shared cache may open a -write transaction at any one time. This may co-exist with any number of read -transactions. -

    -} - -HEADING 2 {Table Level Locking} - -puts { -

    When two or more connections use a shared-cache, locks are used to -serialize concurrent access attempts on a per-table basis. Tables support -two types of locks, "read-locks" and "write-locks". Locks are granted to -connections - at any one time, each database connection has either a -read-lock, write-lock or no lock on each database table. -

    - -

    At any one time, a single table may have any number of active read-locks -or a single active write lock. To read data a table, a connection must -first obtain a read-lock. To write to a table, a connection must obtain a -write-lock on that table. If a required table lock cannot be obtained, -the query fails and SQLITE_LOCKED is returned to the caller. -

    - -

    Once a connection obtains a table lock, it is not released until the -current transaction (read or write) is concluded. -

    -} - -HEADING 3 {Read-Uncommitted Isolation Mode} - -puts { -

    The behaviour described above may be modified slightly by using the -read_uncommitted pragma to change the isolation level from serialized -(the default), to read-uncommitted.

    - -

    A database connection in read-uncommitted mode does not attempt -to obtain read-locks before reading from database tables as described -above. This can lead to inconsistent query results if another database -connection modifies a table while it is being read, but it also means that -a read-transaction opened by a connection in read-uncommitted mode can -neither block nor be blocked by any other connection.

    - -

    Read-uncommitted mode has no effect on the locks required to write to -database tables (i.e. read-uncommitted connections must still obtain -write-locks and hence database writes may still block or be blocked). -Also, read-uncommitted mode has no effect on the sqlite_master -locks required by the rules enumerated below (see section -"Schema (sqlite_master) Level Locking"). -

    - -
    -  /* Set the value of the read-uncommitted flag:
    -  **
    -  **   True  -> Set the connection to read-uncommitted mode.
    -  **   False -> Set the connectino to serialized (the default) mode.
    -  */
    -  PRAGMA read_uncommitted = <boolean>;
    -
    -  /* Retrieve the current value of the read-uncommitted flag */
    -  PRAGMA read_uncommitted;
    -
    -} - -HEADING 2 {Schema (sqlite_master) Level Locking} - -puts { -

    The sqlite_master table supports shared-cache read and write -locks in the same way as all other database tables (see description -above). The following special rules also apply: -

    - -
      -
    • A connection must obtain a read-lock on sqlite_master before -accessing any database tables or obtaining any other read or write locks.
    • -
    • Before executing a statement that modifies the database schema (i.e. -a CREATE or DROP TABLE statement), a connection must obtain a write-lock on -sqlite_master. -
    • -
    • A connection may not compile an SQL statement if any other connection -is holding a write-lock on the sqlite_master table of any attached -database (including the default database, "main"). -
    • -
    -} - -HEADING 1 {Thread Related Issues} - -puts { -

    When shared-cache mode is enabled, a database connection may only be -used by the thread that called sqlite3_open() to create it. If another -thread attempts to use the database connection, in most cases an -SQLITE_MISUSE error is returned. However this is not guaranteed and -programs should not depend on this behaviour, in some cases a segfault -may result. -

    -} - -HEADING 1 {Enabling Shared-Cache Mode} - -puts { -

    Shared-cache mode is enabled on a thread-wide basis. Using the C -interface, the following API can be used to enable or disable shared-cache -mode for the calling thread: -

    - -
    -int sqlite3_enable_shared_cache(int);
    -
    - -

    It is illegal to call sqlite3_enable_shared_cache() if one or more -open database connections were opened by the calling thread. If the argument -is non-zero, shared-cache mode is enabled. If the argument is zero, -shared-cache mode is disabled. The return value is either SQLITE_OK (if the -operation was successful), SQLITE_NOMEM (if a malloc() failed), or -SQLITE_MISUSE (if the thread has open database connections). -

    -} - -footer $rcsid Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/shared.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/shared.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sitemap.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sitemap.html --- sqlite3-3.4.2/www/sitemap.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/sitemap.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,142 @@ + + +SQLite Site Map + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Site Map

    + + +
    +This page last modified 2009/04/22 12:10:38 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/specification.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/specification.html --- sqlite3-3.4.2/www/specification.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/specification.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,93 @@ + + +SQLite Requirements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Specifications

    + +

    This document is a work in progress.

    + +

    The goal of this document is to provide an precise and exact +definition of what SQLite does, how it works, and what to expect +from SQLite for any given input. When completed, this document +will become the authoritative reference for using SQLite.

    + +

    The C/C++ Interface

    + + + + + +
    +
    +This page last modified 2007/12/20 02:19:24 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/speed.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/speed.html --- sqlite3-3.4.2/www/speed.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/speed.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,561 @@ + + +SQLite Database Speed Comparison + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Database Speed Comparison

    + + +Note: This document is old. It describes a speed comparison between +an older version of SQLite against archaic versions of MySQL and PostgreSQL. +Readers are invited to contribute more up-to-date speed comparisons +on the SQLite Wiki. +

    +The numbers here are old enough to be nearly meaningless. Until it is +updated, use this document only as proof that SQLite is not a +sluggard. + + +

    Executive Summary

    + +

    A series of tests were run to measure the relative performance of +SQLite 2.7.6, PostgreSQL 7.1.3, and MySQL 3.23.41. +The following are general +conclusions drawn from these experiments: +

    + +
      +
    • + SQLite 2.7.6 is significantly faster (sometimes as much as 10 or + 20 times faster) than the default PostgreSQL 7.1.3 installation + on RedHat 7.2 for most common operations. +

    • +
    • + SQLite 2.7.6 is often faster (sometimes + more than twice as fast) than MySQL 3.23.41 + for most common operations. +

    • +
    • + SQLite does not execute CREATE INDEX or DROP TABLE as fast as + the other databases. But this is not seen as a problem because + those are infrequent operations. +

    • +
    • + SQLite works best if you group multiple operations together into + a single transaction. +

    • +
    + +

    +The results presented here come with the following caveats: +

    + +
      +
    • + These tests did not attempt to measure multi-user performance or + optimization of complex queries involving multiple joins and subqueries. +

    • +
    • + These tests are on a relatively small (approximately 14 megabyte) database. + They do not measure how well the database engines scale to larger problems. +

    • +
    + +

    Test Environment

    + +

    +The platform used for these tests is a 1.6GHz Athlon with 1GB or memory +and an IDE disk drive. The operating system is RedHat Linux 7.2 with +a stock kernel. +

    + +

    +The PostgreSQL and MySQL servers used were as delivered by default on +RedHat 7.2. (PostgreSQL version 7.1.3 and MySQL version 3.23.41.) +No effort was made to tune these engines. Note in particular +the the default MySQL configuration on RedHat 7.2 does not support +transactions. Not having to support transactions gives MySQL a +big speed advantage, but SQLite is still able to hold its own on most +tests. +

    + +

    +I am told that the default PostgreSQL configuration in RedHat 7.3 +is unnecessarily conservative (it is designed to +work on a machine with 8MB of RAM) and that PostgreSQL could +be made to run a lot faster with some knowledgeable configuration +tuning. +Matt Sergeant reports that he has tuned his PostgreSQL installation +and rerun the tests shown below. His results show that +PostgreSQL and MySQL run at about the same speed. For Matt's +results, visit +

    + +
    + +http://www.sergeant.org/sqlite_vs_pgsync.html +
    + +

    +SQLite was tested in the same configuration that it appears +on the website. It was compiled with -O6 optimization and with +the -DNDEBUG=1 switch which disables the many "assert()" statements +in the SQLite code. The -DNDEBUG=1 compiler option roughly doubles +the speed of SQLite. +

    + +

    +All tests are conducted on an otherwise quiescent machine. +A simple Tcl script was used to generate and run all the tests. +A copy of this Tcl script can be found in the SQLite source tree +in the file tools/speedtest.tcl. +

    + +

    +The times reported on all tests represent wall-clock time +in seconds. Two separate time values are reported for SQLite. +The first value is for SQLite in its default configuration with +full disk synchronization turned on. With synchronization turned +on, SQLite executes +an fsync() system call (or the equivalent) at key points +to make certain that critical data has +actually been written to the disk drive surface. Synchronization +is necessary to guarantee the integrity of the database if the +operating system crashes or the computer powers down unexpectedly +in the middle of a database update. The second time reported for SQLite is +when synchronization is turned off. With synchronization off, +SQLite is sometimes much faster, but there is a risk that an +operating system crash or an unexpected power failure could +damage the database. Generally speaking, the synchronous SQLite +times are for comparison against PostgreSQL (which is also +synchronous) and the asynchronous SQLite times are for +comparison against the asynchronous MySQL engine. +

    + +

    Test 1: 1000 INSERTs

    +
    +CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));
    +INSERT INTO t1 VALUES(1,13153,'thirteen thousand one hundred fifty three');
    +INSERT INTO t1 VALUES(2,75560,'seventy five thousand five hundred sixty');
    +... 995 lines omitted
    +INSERT INTO t1 VALUES(998,66289,'sixty six thousand two hundred eighty nine');
    +INSERT INTO t1 VALUES(999,24322,'twenty four thousand three hundred twenty two');
    +INSERT INTO t1 VALUES(1000,94142,'ninety four thousand one hundred forty two');
    + +
    + + + + +
    PostgreSQL:   4.373
    MySQL:   0.114
    SQLite 2.7.6:   13.061
    SQLite 2.7.6 (nosync):   0.223
    + +

    +Because it does not have a central server to coordinate access, +SQLite must close and reopen the database file, and thus invalidate +its cache, for each transaction. In this test, each SQL statement +is a separate transaction so the database file must be opened and closed +and the cache must be flushed 1000 times. In spite of this, the asynchronous +version of SQLite is still nearly as fast as MySQL. Notice how much slower +the synchronous version is, however. SQLite calls fsync() after +each synchronous transaction to make sure that all data is safely on +the disk surface before continuing. For most of the 13 seconds in the +synchronous test, SQLite was sitting idle waiting on disk I/O to complete.

    + + +

    Test 2: 25000 INSERTs in a transaction

    +
    +BEGIN;
    +CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));
    +INSERT INTO t2 VALUES(1,59672,'fifty nine thousand six hundred seventy two');
    +... 24997 lines omitted
    +INSERT INTO t2 VALUES(24999,89569,'eighty nine thousand five hundred sixty nine');
    +INSERT INTO t2 VALUES(25000,94666,'ninety four thousand six hundred sixty six');
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   4.900
    MySQL:   2.184
    SQLite 2.7.6:   0.914
    SQLite 2.7.6 (nosync):   0.757
    + +

    +When all the INSERTs are put in a transaction, SQLite no longer has to +close and reopen the database or invalidate its cache between each statement. +It also does not +have to do any fsync()s until the very end. When unshackled in +this way, SQLite is much faster than either PostgreSQL and MySQL. +

    + +

    Test 3: 25000 INSERTs into an indexed table

    +
    +BEGIN;
    +CREATE TABLE t3(a INTEGER, b INTEGER, c VARCHAR(100));
    +CREATE INDEX i3 ON t3(c);
    +... 24998 lines omitted
    +INSERT INTO t3 VALUES(24999,88509,'eighty eight thousand five hundred nine');
    +INSERT INTO t3 VALUES(25000,84791,'eighty four thousand seven hundred ninety one');
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   8.175
    MySQL:   3.197
    SQLite 2.7.6:   1.555
    SQLite 2.7.6 (nosync):   1.402
    + +

    +There were reports that SQLite did not perform as well on an indexed table. +This test was recently added to disprove those rumors. It is true that +SQLite is not as fast at creating new index entries as the other engines +(see Test 6 below) but its overall speed is still better. +

    + +

    Test 4: 100 SELECTs without an index

    +
    +BEGIN;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100;
    +... 96 lines omitted
    +SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900;
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   3.629
    MySQL:   2.760
    SQLite 2.7.6:   2.494
    SQLite 2.7.6 (nosync):   2.526
    + + +

    +This test does 100 queries on a 25000 entry table without an index, +thus requiring a full table scan. Prior versions of SQLite used to +be slower than PostgreSQL and MySQL on this test, but recent performance +enhancements have increased its speed so that it is now the fastest +of the group. +

    + +

    Test 5: 100 SELECTs on a string comparison

    +
    +BEGIN;
    +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%';
    +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%';
    +... 96 lines omitted
    +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety nine%';
    +SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%';
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   13.409
    MySQL:   4.640
    SQLite 2.7.6:   3.362
    SQLite 2.7.6 (nosync):   3.372
    + +

    +This test still does 100 full table scans but it uses +uses string comparisons instead of numerical comparisons. +SQLite is over three times faster than PostgreSQL here and about 30% +faster than MySQL. +

    + +

    Test 6: Creating an index

    +
    +CREATE INDEX i2a ON t2(a);
    CREATE INDEX i2b ON t2(b); +
    + + + + +
    PostgreSQL:   0.381
    MySQL:   0.318
    SQLite 2.7.6:   0.777
    SQLite 2.7.6 (nosync):   0.659
    + +

    +SQLite is slower at creating new indices. This is not a huge problem +(since new indices are not created very often) but it is something that +is being worked on. Hopefully, future versions of SQLite will do better +here. +

    + +

    Test 7: 5000 SELECTs with an index

    +
    +SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<100;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<200;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<300;
    +... 4994 lines omitted
    +SELECT count(*), avg(b) FROM t2 WHERE b>=499700 AND b<499800;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=499800 AND b<499900;
    +SELECT count(*), avg(b) FROM t2 WHERE b>=499900 AND b<500000;
    + +
    + + + + +
    PostgreSQL:   4.614
    MySQL:   1.270
    SQLite 2.7.6:   1.121
    SQLite 2.7.6 (nosync):   1.162
    + +

    +All three database engines run faster when they have indices to work with. +But SQLite is still the fastest. +

    + +

    Test 8: 1000 UPDATEs without an index

    +
    +BEGIN;
    +UPDATE t1 SET b=b*2 WHERE a>=0 AND a<10;
    +UPDATE t1 SET b=b*2 WHERE a>=10 AND a<20;
    +... 996 lines omitted
    +UPDATE t1 SET b=b*2 WHERE a>=9980 AND a<9990;
    +UPDATE t1 SET b=b*2 WHERE a>=9990 AND a<10000;
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   1.739
    MySQL:   8.410
    SQLite 2.7.6:   0.637
    SQLite 2.7.6 (nosync):   0.638
    + +

    +For this particular UPDATE test, MySQL is consistently +five or ten times +slower than PostgreSQL and SQLite. I do not know why. MySQL is +normally a very fast engine. Perhaps this problem has been addressed +in later versions of MySQL. +

    + +

    Test 9: 25000 UPDATEs with an index

    +
    +BEGIN;
    +UPDATE t2 SET b=468026 WHERE a=1;
    +UPDATE t2 SET b=121928 WHERE a=2;
    +... 24996 lines omitted
    +UPDATE t2 SET b=35065 WHERE a=24999;
    +UPDATE t2 SET b=347393 WHERE a=25000;
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   18.797
    MySQL:   8.134
    SQLite 2.7.6:   3.520
    SQLite 2.7.6 (nosync):   3.104
    + +

    +As recently as version 2.7.0, SQLite ran at about the same speed as +MySQL on this test. But recent optimizations to SQLite have more +than doubled speed of UPDATEs. +

    + +

    Test 10: 25000 text UPDATEs with an index

    +
    +BEGIN;
    +UPDATE t2 SET c='one hundred forty eight thousand three hundred eighty two' WHERE a=1;
    +UPDATE t2 SET c='three hundred sixty six thousand five hundred two' WHERE a=2;
    +... 24996 lines omitted
    +UPDATE t2 SET c='three hundred eighty three thousand ninety nine' WHERE a=24999;
    +UPDATE t2 SET c='two hundred fifty six thousand eight hundred thirty' WHERE a=25000;
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   48.133
    MySQL:   6.982
    SQLite 2.7.6:   2.408
    SQLite 2.7.6 (nosync):   1.725
    + +

    +Here again, version 2.7.0 of SQLite used to run at about the same speed +as MySQL. But now version 2.7.6 is over two times faster than MySQL and +over twenty times faster than PostgreSQL. +

    + +

    +In fairness to PostgreSQL, it started thrashing on this test. A +knowledgeable administrator might be able to get PostgreSQL to run a lot +faster here by tweaking and tuning the server a little. +

    + +

    Test 11: INSERTs from a SELECT

    +
    +BEGIN;
    INSERT INTO t1 SELECT b,a,c FROM t2;
    INSERT INTO t2 SELECT b,a,c FROM t1;
    COMMIT; +
    + + + + +
    PostgreSQL:   61.364
    MySQL:   1.537
    SQLite 2.7.6:   2.787
    SQLite 2.7.6 (nosync):   1.599
    + +

    +The asynchronous SQLite is just a shade slower than MySQL on this test. +(MySQL seems to be especially adept at INSERT...SELECT statements.) +The PostgreSQL engine is still thrashing - most of the 61 seconds it used +were spent waiting on disk I/O. +

    + +

    Test 12: DELETE without an index

    +
    +DELETE FROM t2 WHERE c LIKE '%fifty%'; +
    + + + + +
    PostgreSQL:   1.509
    MySQL:   0.975
    SQLite 2.7.6:   4.004
    SQLite 2.7.6 (nosync):   0.560
    + +

    +The synchronous version of SQLite is the slowest of the group in this test, +but the asynchronous version is the fastest. +The difference is the extra time needed to execute fsync(). +

    + +

    Test 13: DELETE with an index

    +
    +DELETE FROM t2 WHERE a>10 AND a<20000; +
    + + + + +
    PostgreSQL:   1.316
    MySQL:   2.262
    SQLite 2.7.6:   2.068
    SQLite 2.7.6 (nosync):   0.752
    + +

    +This test is significant because it is one of the few where +PostgreSQL is faster than MySQL. The asynchronous SQLite is, +however, faster then both the other two. +

    + +

    Test 14: A big INSERT after a big DELETE

    +
    +INSERT INTO t2 SELECT * FROM t1; +
    + + + + +
    PostgreSQL:   13.168
    MySQL:   1.815
    SQLite 2.7.6:   3.210
    SQLite 2.7.6 (nosync):   1.485
    + +

    +Some older versions of SQLite (prior to version 2.4.0) +would show decreasing performance after a +sequence of DELETEs followed by new INSERTs. As this test shows, the +problem has now been resolved. +

    + +

    Test 15: A big DELETE followed by many small INSERTs

    +
    +BEGIN;
    +DELETE FROM t1;
    +INSERT INTO t1 VALUES(1,10719,'ten thousand seven hundred nineteen');
    +... 11997 lines omitted
    +INSERT INTO t1 VALUES(11999,72836,'seventy two thousand eight hundred thirty six');
    +INSERT INTO t1 VALUES(12000,64231,'sixty four thousand two hundred thirty one');
    +COMMIT;
    + +
    + + + + +
    PostgreSQL:   4.556
    MySQL:   1.704
    SQLite 2.7.6:   0.618
    SQLite 2.7.6 (nosync):   0.406
    + +

    +SQLite is very good at doing INSERTs within a transaction, which probably +explains why it is so much faster than the other databases at this test. +

    + +

    Test 16: DROP TABLE

    +
    +DROP TABLE t1;
    DROP TABLE t2;
    DROP TABLE t3; +
    + + + + +
    PostgreSQL:   0.135
    MySQL:   0.015
    SQLite 2.7.6:   0.939
    SQLite 2.7.6 (nosync):   0.254
    + +

    +SQLite is slower than the other databases when it comes to dropping tables. +This probably is because when SQLite drops a table, it has to go through and +erase the records in the database file that deal with that table. MySQL and +PostgreSQL, on the other hand, use separate files to represent each table +so they can drop a table simply by deleting a file, which is much faster. +

    + +

    +On the other hand, dropping tables is not a very common operation +so if SQLite takes a little longer, that is not seen as a big problem. +

    +
    +This page last modified 2007/11/12 15:00:07 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/speed.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/speed.tcl --- sqlite3-3.4.2/www/speed.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/speed.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,495 +0,0 @@ -# -# Run this Tcl script to generate the speed.html file. -# -set rcsid {$Id: speed.tcl,v 1.17 2005/03/12 15:55:11 drh Exp $ } -source common.tcl -header {SQLite Database Speed Comparison} - -puts { -

    Database Speed Comparison

    - - -Note: This document is old. It describes a speed comparison between -an older version of SQLite against archaic versions of MySQL and PostgreSQL. -Readers are invited to contribute more up-to-date speed comparisons -on the SQLite Wiki. -

    -The numbers here are old enough to be nearly meaningless. Until it is -updated, use this document only as proof that SQLite is not a -sluggard. - - -

    Executive Summary

    - -

    A series of tests were run to measure the relative performance of -SQLite 2.7.6, PostgreSQL 7.1.3, and MySQL 3.23.41. -The following are general -conclusions drawn from these experiments: -

    - -
      -
    • - SQLite 2.7.6 is significantly faster (sometimes as much as 10 or - 20 times faster) than the default PostgreSQL 7.1.3 installation - on RedHat 7.2 for most common operations. -

    • -
    • - SQLite 2.7.6 is often faster (sometimes - more than twice as fast) than MySQL 3.23.41 - for most common operations. -

    • -
    • - SQLite does not execute CREATE INDEX or DROP TABLE as fast as - the other databases. But this is not seen as a problem because - those are infrequent operations. -

    • -
    • - SQLite works best if you group multiple operations together into - a single transaction. -

    • -
    - -

    -The results presented here come with the following caveats: -

    - -
      -
    • - These tests did not attempt to measure multi-user performance or - optimization of complex queries involving multiple joins and subqueries. -

    • -
    • - These tests are on a relatively small (approximately 14 megabyte) database. - They do not measure how well the database engines scale to larger problems. -

    • -
    - -

    Test Environment

    - -

    -The platform used for these tests is a 1.6GHz Athlon with 1GB or memory -and an IDE disk drive. The operating system is RedHat Linux 7.2 with -a stock kernel. -

    - -

    -The PostgreSQL and MySQL servers used were as delivered by default on -RedHat 7.2. (PostgreSQL version 7.1.3 and MySQL version 3.23.41.) -No effort was made to tune these engines. Note in particular -the the default MySQL configuration on RedHat 7.2 does not support -transactions. Not having to support transactions gives MySQL a -big speed advantage, but SQLite is still able to hold its own on most -tests. -

    - -

    -I am told that the default PostgreSQL configuration in RedHat 7.3 -is unnecessarily conservative (it is designed to -work on a machine with 8MB of RAM) and that PostgreSQL could -be made to run a lot faster with some knowledgeable configuration -tuning. -Matt Sergeant reports that he has tuned his PostgreSQL installation -and rerun the tests shown below. His results show that -PostgreSQL and MySQL run at about the same speed. For Matt's -results, visit -

    - -
    - -http://www.sergeant.org/sqlite_vs_pgsync.html -
    - -

    -SQLite was tested in the same configuration that it appears -on the website. It was compiled with -O6 optimization and with -the -DNDEBUG=1 switch which disables the many "assert()" statements -in the SQLite code. The -DNDEBUG=1 compiler option roughly doubles -the speed of SQLite. -

    - -

    -All tests are conducted on an otherwise quiescent machine. -A simple Tcl script was used to generate and run all the tests. -A copy of this Tcl script can be found in the SQLite source tree -in the file tools/speedtest.tcl. -

    - -

    -The times reported on all tests represent wall-clock time -in seconds. Two separate time values are reported for SQLite. -The first value is for SQLite in its default configuration with -full disk synchronization turned on. With synchronization turned -on, SQLite executes -an fsync() system call (or the equivalent) at key points -to make certain that critical data has -actually been written to the disk drive surface. Synchronization -is necessary to guarantee the integrity of the database if the -operating system crashes or the computer powers down unexpectedly -in the middle of a database update. The second time reported for SQLite is -when synchronization is turned off. With synchronization off, -SQLite is sometimes much faster, but there is a risk that an -operating system crash or an unexpected power failure could -damage the database. Generally speaking, the synchronous SQLite -times are for comparison against PostgreSQL (which is also -synchronous) and the asynchronous SQLite times are for -comparison against the asynchronous MySQL engine. -

    - -

    Test 1: 1000 INSERTs

    -
    -CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));
    -INSERT INTO t1 VALUES(1,13153,'thirteen thousand one hundred fifty three');
    -INSERT INTO t1 VALUES(2,75560,'seventy five thousand five hundred sixty');
    -... 995 lines omitted
    -INSERT INTO t1 VALUES(998,66289,'sixty six thousand two hundred eighty nine');
    -INSERT INTO t1 VALUES(999,24322,'twenty four thousand three hundred twenty two');
    -INSERT INTO t1 VALUES(1000,94142,'ninety four thousand one hundred forty two');
    - -
    - - - - -
    PostgreSQL:   4.373
    MySQL:   0.114
    SQLite 2.7.6:   13.061
    SQLite 2.7.6 (nosync):   0.223
    - -

    -Because it does not have a central server to coordinate access, -SQLite must close and reopen the database file, and thus invalidate -its cache, for each transaction. In this test, each SQL statement -is a separate transaction so the database file must be opened and closed -and the cache must be flushed 1000 times. In spite of this, the asynchronous -version of SQLite is still nearly as fast as MySQL. Notice how much slower -the synchronous version is, however. SQLite calls fsync() after -each synchronous transaction to make sure that all data is safely on -the disk surface before continuing. For most of the 13 seconds in the -synchronous test, SQLite was sitting idle waiting on disk I/O to complete.

    - - -

    Test 2: 25000 INSERTs in a transaction

    -
    -BEGIN;
    -CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));
    -INSERT INTO t2 VALUES(1,59672,'fifty nine thousand six hundred seventy two');
    -... 24997 lines omitted
    -INSERT INTO t2 VALUES(24999,89569,'eighty nine thousand five hundred sixty nine');
    -INSERT INTO t2 VALUES(25000,94666,'ninety four thousand six hundred sixty six');
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   4.900
    MySQL:   2.184
    SQLite 2.7.6:   0.914
    SQLite 2.7.6 (nosync):   0.757
    - -

    -When all the INSERTs are put in a transaction, SQLite no longer has to -close and reopen the database or invalidate its cache between each statement. -It also does not -have to do any fsync()s until the very end. When unshackled in -this way, SQLite is much faster than either PostgreSQL and MySQL. -

    - -

    Test 3: 25000 INSERTs into an indexed table

    -
    -BEGIN;
    -CREATE TABLE t3(a INTEGER, b INTEGER, c VARCHAR(100));
    -CREATE INDEX i3 ON t3(c);
    -... 24998 lines omitted
    -INSERT INTO t3 VALUES(24999,88509,'eighty eight thousand five hundred nine');
    -INSERT INTO t3 VALUES(25000,84791,'eighty four thousand seven hundred ninety one');
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   8.175
    MySQL:   3.197
    SQLite 2.7.6:   1.555
    SQLite 2.7.6 (nosync):   1.402
    - -

    -There were reports that SQLite did not perform as well on an indexed table. -This test was recently added to disprove those rumors. It is true that -SQLite is not as fast at creating new index entries as the other engines -(see Test 6 below) but its overall speed is still better. -

    - -

    Test 4: 100 SELECTs without an index

    -
    -BEGIN;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100;
    -... 96 lines omitted
    -SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900;
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   3.629
    MySQL:   2.760
    SQLite 2.7.6:   2.494
    SQLite 2.7.6 (nosync):   2.526
    - - -

    -This test does 100 queries on a 25000 entry table without an index, -thus requiring a full table scan. Prior versions of SQLite used to -be slower than PostgreSQL and MySQL on this test, but recent performance -enhancements have increased its speed so that it is now the fastest -of the group. -

    - -

    Test 5: 100 SELECTs on a string comparison

    -
    -BEGIN;
    -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%';
    -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%';
    -... 96 lines omitted
    -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety nine%';
    -SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%';
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   13.409
    MySQL:   4.640
    SQLite 2.7.6:   3.362
    SQLite 2.7.6 (nosync):   3.372
    - -

    -This test still does 100 full table scans but it uses -uses string comparisons instead of numerical comparisons. -SQLite is over three times faster than PostgreSQL here and about 30% -faster than MySQL. -

    - -

    Test 6: Creating an index

    -
    -CREATE INDEX i2a ON t2(a);
    CREATE INDEX i2b ON t2(b); -
    - - - - -
    PostgreSQL:   0.381
    MySQL:   0.318
    SQLite 2.7.6:   0.777
    SQLite 2.7.6 (nosync):   0.659
    - -

    -SQLite is slower at creating new indices. This is not a huge problem -(since new indices are not created very often) but it is something that -is being worked on. Hopefully, future versions of SQLite will do better -here. -

    - -

    Test 7: 5000 SELECTs with an index

    -
    -SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<100;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<200;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<300;
    -... 4994 lines omitted
    -SELECT count(*), avg(b) FROM t2 WHERE b>=499700 AND b<499800;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=499800 AND b<499900;
    -SELECT count(*), avg(b) FROM t2 WHERE b>=499900 AND b<500000;
    - -
    - - - - -
    PostgreSQL:   4.614
    MySQL:   1.270
    SQLite 2.7.6:   1.121
    SQLite 2.7.6 (nosync):   1.162
    - -

    -All three database engines run faster when they have indices to work with. -But SQLite is still the fastest. -

    - -

    Test 8: 1000 UPDATEs without an index

    -
    -BEGIN;
    -UPDATE t1 SET b=b*2 WHERE a>=0 AND a<10;
    -UPDATE t1 SET b=b*2 WHERE a>=10 AND a<20;
    -... 996 lines omitted
    -UPDATE t1 SET b=b*2 WHERE a>=9980 AND a<9990;
    -UPDATE t1 SET b=b*2 WHERE a>=9990 AND a<10000;
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   1.739
    MySQL:   8.410
    SQLite 2.7.6:   0.637
    SQLite 2.7.6 (nosync):   0.638
    - -

    -For this particular UPDATE test, MySQL is consistently -five or ten times -slower than PostgreSQL and SQLite. I do not know why. MySQL is -normally a very fast engine. Perhaps this problem has been addressed -in later versions of MySQL. -

    - -

    Test 9: 25000 UPDATEs with an index

    -
    -BEGIN;
    -UPDATE t2 SET b=468026 WHERE a=1;
    -UPDATE t2 SET b=121928 WHERE a=2;
    -... 24996 lines omitted
    -UPDATE t2 SET b=35065 WHERE a=24999;
    -UPDATE t2 SET b=347393 WHERE a=25000;
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   18.797
    MySQL:   8.134
    SQLite 2.7.6:   3.520
    SQLite 2.7.6 (nosync):   3.104
    - -

    -As recently as version 2.7.0, SQLite ran at about the same speed as -MySQL on this test. But recent optimizations to SQLite have more -than doubled speed of UPDATEs. -

    - -

    Test 10: 25000 text UPDATEs with an index

    -
    -BEGIN;
    -UPDATE t2 SET c='one hundred forty eight thousand three hundred eighty two' WHERE a=1;
    -UPDATE t2 SET c='three hundred sixty six thousand five hundred two' WHERE a=2;
    -... 24996 lines omitted
    -UPDATE t2 SET c='three hundred eighty three thousand ninety nine' WHERE a=24999;
    -UPDATE t2 SET c='two hundred fifty six thousand eight hundred thirty' WHERE a=25000;
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   48.133
    MySQL:   6.982
    SQLite 2.7.6:   2.408
    SQLite 2.7.6 (nosync):   1.725
    - -

    -Here again, version 2.7.0 of SQLite used to run at about the same speed -as MySQL. But now version 2.7.6 is over two times faster than MySQL and -over twenty times faster than PostgreSQL. -

    - -

    -In fairness to PostgreSQL, it started thrashing on this test. A -knowledgeable administrator might be able to get PostgreSQL to run a lot -faster here by tweaking and tuning the server a little. -

    - -

    Test 11: INSERTs from a SELECT

    -
    -BEGIN;
    INSERT INTO t1 SELECT b,a,c FROM t2;
    INSERT INTO t2 SELECT b,a,c FROM t1;
    COMMIT; -
    - - - - -
    PostgreSQL:   61.364
    MySQL:   1.537
    SQLite 2.7.6:   2.787
    SQLite 2.7.6 (nosync):   1.599
    - -

    -The asynchronous SQLite is just a shade slower than MySQL on this test. -(MySQL seems to be especially adept at INSERT...SELECT statements.) -The PostgreSQL engine is still thrashing - most of the 61 seconds it used -were spent waiting on disk I/O. -

    - -

    Test 12: DELETE without an index

    -
    -DELETE FROM t2 WHERE c LIKE '%fifty%'; -
    - - - - -
    PostgreSQL:   1.509
    MySQL:   0.975
    SQLite 2.7.6:   4.004
    SQLite 2.7.6 (nosync):   0.560
    - -

    -The synchronous version of SQLite is the slowest of the group in this test, -but the asynchronous version is the fastest. -The difference is the extra time needed to execute fsync(). -

    - -

    Test 13: DELETE with an index

    -
    -DELETE FROM t2 WHERE a>10 AND a<20000; -
    - - - - -
    PostgreSQL:   1.316
    MySQL:   2.262
    SQLite 2.7.6:   2.068
    SQLite 2.7.6 (nosync):   0.752
    - -

    -This test is significant because it is one of the few where -PostgreSQL is faster than MySQL. The asynchronous SQLite is, -however, faster then both the other two. -

    - -

    Test 14: A big INSERT after a big DELETE

    -
    -INSERT INTO t2 SELECT * FROM t1; -
    - - - - -
    PostgreSQL:   13.168
    MySQL:   1.815
    SQLite 2.7.6:   3.210
    SQLite 2.7.6 (nosync):   1.485
    - -

    -Some older versions of SQLite (prior to version 2.4.0) -would show decreasing performance after a -sequence of DELETEs followed by new INSERTs. As this test shows, the -problem has now been resolved. -

    - -

    Test 15: A big DELETE followed by many small INSERTs

    -
    -BEGIN;
    -DELETE FROM t1;
    -INSERT INTO t1 VALUES(1,10719,'ten thousand seven hundred nineteen');
    -... 11997 lines omitted
    -INSERT INTO t1 VALUES(11999,72836,'seventy two thousand eight hundred thirty six');
    -INSERT INTO t1 VALUES(12000,64231,'sixty four thousand two hundred thirty one');
    -COMMIT;
    - -
    - - - - -
    PostgreSQL:   4.556
    MySQL:   1.704
    SQLite 2.7.6:   0.618
    SQLite 2.7.6 (nosync):   0.406
    - -

    -SQLite is very good at doing INSERTs within a transaction, which probably -explains why it is so much faster than the other databases at this test. -

    - -

    Test 16: DROP TABLE

    -
    -DROP TABLE t1;
    DROP TABLE t2;
    DROP TABLE t3; -
    - - - - -
    PostgreSQL:   0.135
    MySQL:   0.015
    SQLite 2.7.6:   0.939
    SQLite 2.7.6 (nosync):   0.254
    - -

    -SQLite is slower than the other databases when it comes to dropping tables. -This probably is because when SQLite drops a table, it has to go through and -erase the records in the database file that deal with that table. MySQL and -PostgreSQL, on the other hand, use separate files to represent each table -so they can drop a table simply by deleting a file, which is much faster. -

    - -

    -On the other hand, dropping tables is not a very common operation -so if SQLite takes a little longer, that is not seen as a big problem. -

    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sqlite.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sqlite.html --- sqlite3-3.4.2/www/sqlite.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/sqlite.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,600 @@ + + +sqlite3: A command-line access program for SQLite databases + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    sqlite3: A command-line access program for SQLite databases

    + +

    The SQLite library includes a simple command-line utility named +sqlite3 that allows the user to manually enter and execute SQL +commands against an SQLite database. This document provides a brief +introduction on how to use sqlite3. + +

    Getting Started

    + +

    To start the sqlite3 program, just type "sqlite3" followed by +the name the file that holds the SQLite database. If the file does +not exist, a new one is created automatically. +The sqlite3 program will +then prompt you to enter SQL. Type in SQL statements (terminated by a +semicolon), press "Enter" and the SQL will be executed.

    + +

    For example, to create a new SQLite database named "ex1" +with a single table named "tbl1", you might do this:

    + +
    sqlite3 ex1
    +SQLite version 3.6.11
    +Enter ".help" for instructions
    +Enter SQL statements terminated with a ";"
    +sqlite> create table tbl1(one varchar(10), two smallint);
    +sqlite> insert into tbl1 values('hello!',10);
    +sqlite> insert into tbl1 values('goodbye', 20);
    +sqlite> select * from tbl1;
    +hello!|10
    +goodbye|20
    +sqlite>
    + +

    You can terminate the sqlite3 program by typing your systems +End-Of-File character (usually a Control-D). Use the interrupt +character (usually a Control-C) to stop a long-running SQL statement.

    + +

    Make sure you type a semicolon at the end of each SQL command! +The sqlite3 program looks for a semicolon to know when your SQL command is +complete. If you omit the semicolon, sqlite3 will give you a +continuation prompt and wait for you to enter more text to be +added to the current SQL command. This feature allows you to +enter SQL commands that span multiple lines. For example:

    + + +
    sqlite> CREATE TABLE tbl2 (
    +   ...>   f1 varchar(30) primary key,
    +   ...>   f2 text,
    +   ...>   f3 real
    +   ...> );
    +sqlite>
    + +

    Aside: Querying the SQLITE_MASTER table

    + +

    The database schema in an SQLite database is stored in +a special table named "sqlite_master". +You can execute "SELECT" statements against the +special sqlite_master table just like any other table +in an SQLite database. For example:

    + +
    sqlite3 ex1
    +SQLite vresion 3.6.11
    +Enter ".help" for instructions
    +sqlite> select * from sqlite_master;
    +    type = table
    +    name = tbl1
    +tbl_name = tbl1
    +rootpage = 3
    +     sql = create table tbl1(one varchar(10), two smallint)
    +sqlite>
    + +

    +But you cannot execute DROP TABLE, UPDATE, INSERT or DELETE against +the sqlite_master table. The sqlite_master +table is updated automatically as you create or drop tables and +indices from the database. You can not make manual changes +to the sqlite_master table. +

    + +

    +The schema for TEMPORARY tables is not stored in the "sqlite_master" table +since TEMPORARY tables are not visible to applications other than the +application that created the table. The schema for TEMPORARY tables +is stored in another special table named "sqlite_temp_master". The +"sqlite_temp_master" table is temporary itself. +

    + +

    Special commands to sqlite3

    + +

    +Most of the time, sqlite3 just reads lines of input and passes them +on to the SQLite library for execution. +But if an input line begins with a dot ("."), then +that line is intercepted and interpreted by the sqlite3 program itself. +These "dot commands" are typically used to change the output format +of queries, or to execute certain prepackaged query statements. +

    + +

    +For a listing of the available dot commands, you can enter ".help" +at any time. For example: +

    + +
    sqlite> .help
    +.backup ?DB? FILE      Backup DB (default "main") to FILE
    +.bail ON|OFF           Stop after hitting an error.  Default OFF
    +.databases             List names and files of attached databases
    +.dump ?TABLE? ...      Dump the database in an SQL text format
    +.echo ON|OFF           Turn command echo on or off
    +.exit                  Exit this program
    +.explain ON|OFF        Turn output mode suitable for EXPLAIN on or off.
    +.genfkey ?OPTIONS?     Options are:
    +                         --no-drop: Do not drop old fkey triggers.
    +                         --ignore-errors: Ignore tables with fkey errors
    +                         --exec: Execute generated SQL immediately
    +                       See file tool/genfkey.README in the source 
    +                       distribution for further information.
    +.header(s) ON|OFF      Turn display of headers on or off
    +.help                  Show this message
    +.import FILE TABLE     Import data from FILE into TABLE
    +.indices TABLE         Show names of all indices on TABLE
    +.iotrace FILE          Enable I/O diagnostic logging to FILE
    +.load FILE ?ENTRY?     Load an extension library
    +.mode MODE ?TABLE?     Set output mode where MODE is one of:
    +                         csv      Comma-separated values
    +                         column   Left-aligned columns.  (See .width)
    +                         html     HTML <table> code
    +                         insert   SQL insert statements for TABLE
    +                         line     One value per line
    +                         list     Values delimited by .separator string
    +                         tabs     Tab-separated values
    +                         tcl      TCL list elements
    +.nullvalue STRING      Print STRING in place of NULL values
    +.output FILENAME       Send output to FILENAME
    +.output stdout         Send output to the screen
    +.prompt MAIN CONTINUE  Replace the standard prompts
    +.quit                  Exit this program
    +.read FILENAME         Execute SQL in FILENAME
    +.restore ?DB? FILE     Restore content of DB (default "main") from FILE
    +.schema ?TABLE?        Show the CREATE statements
    +.separator STRING      Change separator used by output mode and .import
    +.show                  Show the current values for various settings
    +.tables ?PATTERN?      List names of tables matching a LIKE pattern
    +.timeout MS            Try opening locked tables for MS milliseconds
    +.timer ON|OFF          Turn the CPU timer measurement on or off
    +.width NUM NUM ...     Set column widths for "column" mode
    +sqlite>
    + +

    Changing Output Formats

    + +

    The sqlite3 program is able to show the results of a query +in eight different formats: "csv", "column", "html", "insert", +"line", "list", "tabs", and "tcl". +You can use the ".mode" dot command to switch between these output +formats.

    + +

    The default output mode is "list". In +list mode, each record of a query result is written on one line of +output and each column within that record is separated by a specific +separator string. The default separator is a pipe symbol ("|"). +List mode is especially useful when you are going to send the output +of a query to another program (such as AWK) for additional processing.

    + +
    sqlite> .mode list
    +sqlite> select * from tbl1;
    +hello|10
    +goodbye|20
    +sqlite>
    + +

    You can use the ".separator" dot command to change the separator +for list mode. For example, to change the separator to a comma and +a space, you could do this:

    + +
    sqlite> .separator ", "
    +sqlite> select * from tbl1;
    +hello, 10
    +goodbye, 20
    +sqlite>
    + +

    In "line" mode, each column in a row of the database +is shown on a line by itself. Each line consists of the column +name, an equal sign and the column data. Successive records are +separated by a blank line. Here is an example of line mode +output:

    + +
    sqlite> .mode line
    +sqlite> select * from tbl1;
    +one = hello
    +two = 10
    +
    +one = goodbye
    +two = 20
    +sqlite>
    + +

    In column mode, each record is shown on a separate line with the +data aligned in columns. For example:

    + +
    sqlite> .mode column
    +sqlite> select * from tbl1;
    +one         two       
    +----------  ----------
    +hello       10        
    +goodbye     20        
    +sqlite>
    + +

    By default, each column is at least 10 characters wide. +Data that is too wide to fit in a column is truncated. You can +adjust the column widths using the ".width" command. Like this:

    + +
    sqlite> .width 12 6
    +sqlite> select * from tbl1;
    +one           two   
    +------------  ------
    +hello         10    
    +goodbye       20    
    +sqlite>
    + +

    The ".width" command in the example above sets the width of the first +column to 12 and the width of the second column to 6. All other column +widths were unaltered. You can gives as many arguments to ".width" as +necessary to specify the widths of as many columns as are in your +query results.

    + +

    If you specify a column a width of 0, then the column +width is automatically adjusted to be the maximum of three +numbers: 10, the width of the header, and the width of the +first row of data. This makes the column width self-adjusting. +The default width setting for every column is this +auto-adjusting 0 value.

    + +

    The column labels that appear on the first two lines of output +can be turned on and off using the ".header" dot command. In the +examples above, the column labels are on. To turn them off you +could do this:

    + +
    sqlite> .header off
    +sqlite> select * from tbl1;
    +hello         10    
    +goodbye       20    
    +sqlite>
    + +

    Another useful output mode is "insert". In insert mode, the output +is formatted to look like SQL INSERT statements. You can use insert +mode to generate text that can later be used to input data into a +different database.

    + +

    When specifying insert mode, you have to give an extra argument +which is the name of the table to be inserted into. For example:

    + +
    sqlite> .mode insert new_table
    +sqlite> select * from tbl1;
    +INSERT INTO 'new_table' VALUES('hello',10);
    +INSERT INTO 'new_table' VALUES('goodbye',20);
    +sqlite>
    + + +

    The last output mode is "html". In this mode, sqlite3 writes +the results of the query as an XHTML table. The beginning +<TABLE> and the ending </TABLE> are not written, but +all of the intervening <TR>s, <TH>s, and <TD>s +are. The html output mode is envisioned as being useful for +CGI.

    + + + +

    Writing results to a file

    + +

    By default, sqlite3 sends query results to standard output. You +can change this using the ".output" command. Just put the name of +an output file as an argument to the .output command and all subsequent +query results will be written to that file. Use ".output stdout" to +begin writing to standard output again. For example:

    + +
    sqlite> .mode list
    +sqlite> .separator |
    +sqlite> .output test_file_1.txt
    +sqlite> select * from tbl1;
    +sqlite> .exit
    +$ cat test_file_1.txt
    +hello|10
    +goodbye|20
    +$
    + + +

    Querying the database schema

    + +

    The sqlite3 program provides several convenience commands that +are useful for looking at the schema of the database. There is +nothing that these commands do that cannot be done by some other +means. These commands are provided purely as a shortcut.

    + +

    For example, to see a list of the tables in the database, you +can enter ".tables".

    + + +
    sqlite> .tables
    +tbl1
    +tbl2
    +sqlite>
    + + +

    The ".tables" command is similar to setting list mode then +executing the following query:

    + +
    +SELECT name FROM sqlite_master 
    +WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'
    +UNION ALL 
    +SELECT name FROM sqlite_temp_master 
    +WHERE type IN ('table','view') 
    +ORDER BY 1
    +
    + +

    In fact, if you look at the source code to the sqlite3 program +(found in the source tree in the file src/shell.c) you'll find +exactly the above query.

    + +

    The ".indices" command works in a similar way to list all of +the indices for a particular table. The ".indices" command takes +a single argument which is the name of the table for which the +indices are desired. Last, but not least, is the ".schema" command. +With no arguments, the ".schema" command shows the original CREATE TABLE +and CREATE INDEX statements that were used to build the current database. +If you give the name of a table to ".schema", it shows the original +CREATE statement used to make that table and all if its indices. +We have:

    + +
    sqlite> .schema
    +create table tbl1(one varchar(10), two smallint)
    +CREATE TABLE tbl2 (
    +  f1 varchar(30) primary key,
    +  f2 text,
    +  f3 real
    +)
    +sqlite> .schema tbl2
    +CREATE TABLE tbl2 (
    +  f1 varchar(30) primary key,
    +  f2 text,
    +  f3 real
    +)
    +sqlite>
    + + +

    The ".schema" command accomplishes the same thing as setting +list mode, then entering the following query:

    + +
    +SELECT sql FROM 
    +   (SELECT * FROM sqlite_master UNION ALL
    +    SELECT * FROM sqlite_temp_master)
    +WHERE type!='meta'
    +ORDER BY tbl_name, type DESC, name
    +
    + +

    Or, if you give an argument to ".schema" because you only +want the schema for a single table, the query looks like this:

    + +
    +SELECT sql FROM
    +   (SELECT * FROM sqlite_master UNION ALL
    +    SELECT * FROM sqlite_temp_master)
    +WHERE type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
    +ORDER BY substr(type,2,1), name
    +
    + +

    +You can supply an argument to the .schema command. If you do, the +query looks like this: +

    + +
    +SELECT sql FROM
    +   (SELECT * FROM sqlite_master UNION ALL
    +    SELECT * FROM sqlite_temp_master)
    +WHERE tbl_name LIKE '%s'
    +  AND type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
    +ORDER BY substr(type,2,1), name
    +
    + +

    The "%s" in the query is replace by your argument. This allows you +to view the schema for some subset of the database.

    + + +
    sqlite> .schema %abc%
    + + +

    +Along these same lines, +the ".table" command also accepts a pattern as its first argument. +If you give an argument to the .table command, a "%" is both +appended and prepended and a LIKE clause is added to the query. +This allows you to list only those tables that match a particular +pattern.

    + +

    The ".databases" command shows a list of all databases open in +the current connection. There will always be at least 2. The first +one is "main", the original database opened. The second is "temp", +the database used for temporary tables. There may be additional +databases listed for databases attached using the ATTACH statement. +The first output column is the name the database is attached with, +and the second column is the filename of the external file.

    + +
    sqlite> .databases
    + + +

    Converting An Entire Database To An ASCII Text File

    + +

    Use the ".dump" command to convert the entire contents of a +database into a single ASCII text file. This file can be converted +back into a database by piping it back into sqlite3.

    + +

    A good way to make an archival copy of a database is this:

    + + +
    echo '.dump' | sqlite3 ex1 | gzip -c >ex1.dump.gz
    + + +

    This generates a file named ex1.dump.gz that contains everything +you need to reconstruct the database at a later time, or on another +machine. To reconstruct the database, just type:

    + + +
    zcat ex1.dump.gz | sqlite3 ex2
    + + +

    The text format is pure SQL so you +can also use the .dump command to export an SQLite database +into other popular SQL database engines. Like this:

    + + +
    createdb ex2
    +$ sqlite3 ex1 .dump | psql ex2
    + + +

    Other Dot Commands

    + +

    The ".explain" dot command can be used to set the output mode +to "column" and to set the column widths to values that are reasonable +for looking at the output of an EXPLAIN command. The EXPLAIN command +is an SQLite-specific SQL extension that is useful for debugging. If any +regular SQL is prefaced by EXPLAIN, then the SQL command is parsed and +analyzed but is not executed. Instead, the sequence of virtual machine +instructions that would have been used to execute the SQL command are +returned like a query result. For example:

    + +
    sqlite> .explain
    +sqlite> explain delete from tbl1 where two<20;
    +addr  opcode        p1     p2     p3          
    +----  ------------  -----  -----  -------------------------------------   
    +0     ListOpen      0      0                  
    +1     Open          0      1      tbl1        
    +2     Next          0      9                  
    +3     Field         0      1                  
    +4     Integer       20     0                  
    +5     Ge            0      2                  
    +6     Key           0      0                  
    +7     ListWrite     0      0                  
    +8     Goto          0      2                  
    +9     Noop          0      0                  
    +10    ListRewind    0      0                  
    +11    ListRead      0      14                 
    +12    Delete        0      0                  
    +13    Goto          0      11                 
    +14    ListClose     0      0
    + + + +

    The ".timeout" command sets the amount of time that the sqlite3 +program will wait for locks to clear on files it is trying to access +before returning an error. The default value of the timeout is zero so +that an error is returned immediately if any needed database table or +index is locked.

    + +

    And finally, we mention the ".exit" command which causes the +sqlite3 program to exit.

    + +

    Using sqlite3 in a shell script

    + +

    +One way to use sqlite3 in a shell script is to use "echo" or +"cat" to generate a sequence of commands in a file, then invoke sqlite3 +while redirecting input from the generated command file. This +works fine and is appropriate in many circumstances. But as +an added convenience, sqlite3 allows a single SQL command to be +entered on the command line as a second argument after the +database name. When the sqlite3 program is launched with two +arguments, the second argument is passed to the SQLite library +for processing, the query results are printed on standard output +in list mode, and the program exits. This mechanism is designed +to make sqlite3 easy to use in conjunction with programs like +"awk". For example:

    + +
    sqlite3 ex1 'select * from tbl1' |
    +>  awk '{printf "<tr><td>%s<td>%s\n",$1,$2 }'
    +<tr><td>hello<td>10
    +<tr><td>goodbye<td>20
    +$
    + + +

    Ending shell commands

    + +

    +SQLite commands are normally terminated by a semicolon. In the shell +you can also use the word "GO" (case-insensitive) or a slash character +"/" on a line by itself to end a command. These are used by SQL Server +and Oracle, respectively. These won't work in sqlite3_exec(), +because the shell translates these into a semicolon before passing them +to that function.

    + + + +

    Compiling the sqlite3 program from sources

    + +

    +The source code to the sqlite3 command line interface is in a single +file named "shell.c" which you can + +download from the SQLite website. Compile this file (together +with the sqlite3 library source code to generate +the executable. For example:

    + +
    +gcc -o sqlite3 shell.c sqlite3.c -ldl -lpthread
    +
    +
    +This page last modified 2009/03/16 18:35:57 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sqlite.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sqlite.tcl --- sqlite3-3.4.2/www/sqlite.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/sqlite.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,582 +0,0 @@ -# -# Run this Tcl script to generate the sqlite.html file. -# -set rcsid {$Id: sqlite.tcl,v 1.25 2007/01/08 14:31:36 drh Exp $} -source common.tcl -header {sqlite3: A command-line access program for SQLite databases} -puts { -

    sqlite3: A command-line access program for SQLite databases

    - -

    The SQLite library includes a simple command-line utility named -sqlite3 that allows the user to manually enter and execute SQL -commands against an SQLite database. This document provides a brief -introduction on how to use sqlite3. - -

    Getting Started

    - -

    To start the sqlite3 program, just type "sqlite3" followed by -the name the file that holds the SQLite database. If the file does -not exist, a new one is created automatically. -The sqlite3 program will -then prompt you to enter SQL. Type in SQL statements (terminated by a -semicolon), press "Enter" and the SQL will be executed.

    - -

    For example, to create a new SQLite database named "ex1" -with a single table named "tbl1", you might do this:

    -} - -proc Code {body} { - puts {
    } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
    \n body - puts $body - puts {
    } -} - -Code { -$ (((sqlite3 ex1))) -SQLite version 3.3.10 -Enter ".help" for instructions -sqlite> (((create table tbl1(one varchar(10), two smallint);))) -sqlite> (((insert into tbl1 values('hello!',10);))) -sqlite> (((insert into tbl1 values('goodbye', 20);))) -sqlite> (((select * from tbl1;))) -hello!|10 -goodbye|20 -sqlite> -} - -puts { -

    You can terminate the sqlite3 program by typing your systems -End-Of-File character (usually a Control-D) or the interrupt -character (usually a Control-C).

    - -

    Make sure you type a semicolon at the end of each SQL command! -The sqlite3 program looks for a semicolon to know when your SQL command is -complete. If you omit the semicolon, sqlite3 will give you a -continuation prompt and wait for you to enter more text to be -added to the current SQL command. This feature allows you to -enter SQL commands that span multiple lines. For example:

    -} - -Code { -sqlite> (((CREATE TABLE tbl2 ())) - ...> ((( f1 varchar(30) primary key,))) - ...> ((( f2 text,))) - ...> ((( f3 real))) - ...> ((();))) -sqlite> -} - -puts { - -

    Aside: Querying the SQLITE_MASTER table

    - -

    The database schema in an SQLite database is stored in -a special table named "sqlite_master". -You can execute "SELECT" statements against the -special sqlite_master table just like any other table -in an SQLite database. For example:

    -} - -Code { -$ (((sqlite3 ex1))) -SQlite vresion 3.3.10 -Enter ".help" for instructions -sqlite> (((select * from sqlite_master;))) - type = table - name = tbl1 -tbl_name = tbl1 -rootpage = 3 - sql = create table tbl1(one varchar(10), two smallint) -sqlite> -} - -puts { -

    -But you cannot execute DROP TABLE, UPDATE, INSERT or DELETE against -the sqlite_master table. The sqlite_master -table is updated automatically as you create or drop tables and -indices from the database. You can not make manual changes -to the sqlite_master table. -

    - -

    -The schema for TEMPORARY tables is not stored in the "sqlite_master" table -since TEMPORARY tables are not visible to applications other than the -application that created the table. The schema for TEMPORARY tables -is stored in another special table named "sqlite_temp_master". The -"sqlite_temp_master" table is temporary itself. -

    - -

    Special commands to sqlite3

    - -

    -Most of the time, sqlite3 just reads lines of input and passes them -on to the SQLite library for execution. -But if an input line begins with a dot ("."), then -that line is intercepted and interpreted by the sqlite3 program itself. -These "dot commands" are typically used to change the output format -of queries, or to execute certain prepackaged query statements. -

    - -

    -For a listing of the available dot commands, you can enter ".help" -at any time. For example: -

    } - -Code { -sqlite> (((.help))) -.bail ON|OFF Stop after hitting an error. Default OFF -.databases List names and files of attached databases -.dump ?TABLE? ... Dump the database in an SQL text format -.echo ON|OFF Turn command echo on or off -.exit Exit this program -.explain ON|OFF Turn output mode suitable for EXPLAIN on or off. -.header(s) ON|OFF Turn display of headers on or off -.help Show this message -.import FILE TABLE Import data from FILE into TABLE -.indices TABLE Show names of all indices on TABLE -.load FILE ?ENTRY? Load an extension library -.mode MODE ?TABLE? Set output mode where MODE is one of: - csv Comma-separated values - column Left-aligned columns. (See .width) - html HTML code - insert SQL insert statements for TABLE - line One value per line - list Values delimited by .separator string - tabs Tab-separated values - tcl TCL list elements -.nullvalue STRING Print STRING in place of NULL values -.output FILENAME Send output to FILENAME -.output stdout Send output to the screen -.prompt MAIN CONTINUE Replace the standard prompts -.quit Exit this program -.read FILENAME Execute SQL in FILENAME -.schema ?TABLE? Show the CREATE statements -.separator STRING Change separator used by output mode and .import -.show Show the current values for various settings -.tables ?PATTERN? List names of tables matching a LIKE pattern -.timeout MS Try opening locked tables for MS milliseconds -.width NUM NUM ... Set column widths for "column" mode -sqlite> -} - -puts { -

    Changing Output Formats

    - -

    The sqlite3 program is able to show the results of a query -in eight different formats: "csv", "column", "html", "insert", -"line", "tabs", and "tcl". -You can use the ".mode" dot command to switch between these output -formats.

    - -

    The default output mode is "list". In -list mode, each record of a query result is written on one line of -output and each column within that record is separated by a specific -separator string. The default separator is a pipe symbol ("|"). -List mode is especially useful when you are going to send the output -of a query to another program (such as AWK) for additional processing.

    } - -Code { -sqlite> (((.mode list))) -sqlite> (((select * from tbl1;))) -hello|10 -goodbye|20 -sqlite> -} - -puts { -

    You can use the ".separator" dot command to change the separator -for list mode. For example, to change the separator to a comma and -a space, you could do this:

    } - -Code { -sqlite> (((.separator ", "))) -sqlite> (((select * from tbl1;))) -hello, 10 -goodbye, 20 -sqlite> -} - -puts { -

    In "line" mode, each column in a row of the database -is shown on a line by itself. Each line consists of the column -name, an equal sign and the column data. Successive records are -separated by a blank line. Here is an example of line mode -output:

    } - -Code { -sqlite> (((.mode line))) -sqlite> (((select * from tbl1;))) -one = hello -two = 10 - -one = goodbye -two = 20 -sqlite> -} - -puts { -

    In column mode, each record is shown on a separate line with the -data aligned in columns. For example:

    } - -Code { -sqlite> (((.mode column))) -sqlite> (((select * from tbl1;))) -one two ----------- ---------- -hello 10 -goodbye 20 -sqlite> -} - -puts { -

    By default, each column is at least 10 characters wide. -Data that is too wide to fit in a column is truncated. You can -adjust the column widths using the ".width" command. Like this:

    } - -Code { -sqlite> (((.width 12 6))) -sqlite> (((select * from tbl1;))) -one two ------------- ------ -hello 10 -goodbye 20 -sqlite> -} - -puts { -

    The ".width" command in the example above sets the width of the first -column to 12 and the width of the second column to 6. All other column -widths were unaltered. You can gives as many arguments to ".width" as -necessary to specify the widths of as many columns as are in your -query results.

    - -

    If you specify a column a width of 0, then the column -width is automatically adjusted to be the maximum of three -numbers: 10, the width of the header, and the width of the -first row of data. This makes the column width self-adjusting. -The default width setting for every column is this -auto-adjusting 0 value.

    - -

    The column labels that appear on the first two lines of output -can be turned on and off using the ".header" dot command. In the -examples above, the column labels are on. To turn them off you -could do this:

    } - -Code { -sqlite> (((.header off))) -sqlite> (((select * from tbl1;))) -hello 10 -goodbye 20 -sqlite> -} - -puts { -

    Another useful output mode is "insert". In insert mode, the output -is formatted to look like SQL INSERT statements. You can use insert -mode to generate text that can later be used to input data into a -different database.

    - -

    When specifying insert mode, you have to give an extra argument -which is the name of the table to be inserted into. For example:

    -} - -Code { -sqlite> (((.mode insert new_table))) -sqlite> (((select * from tbl1;))) -INSERT INTO 'new_table' VALUES('hello',10); -INSERT INTO 'new_table' VALUES('goodbye',20); -sqlite> -} - -puts { -

    The last output mode is "html". In this mode, sqlite3 writes -the results of the query as an XHTML table. The beginning -<TABLE> and the ending </TABLE> are not written, but -all of the intervening <TR>s, <TH>s, and <TD>s -are. The html output mode is envisioned as being useful for -CGI.

    -} - -puts { -

    Writing results to a file

    - -

    By default, sqlite3 sends query results to standard output. You -can change this using the ".output" command. Just put the name of -an output file as an argument to the .output command and all subsequent -query results will be written to that file. Use ".output stdout" to -begin writing to standard output again. For example:

    } - -Code { -sqlite> (((.mode list))) -sqlite> (((.separator |))) -sqlite> (((.output test_file_1.txt))) -sqlite> (((select * from tbl1;))) -sqlite> (((.exit))) -$ (((cat test_file_1.txt))) -hello|10 -goodbye|20 -$ -} - -puts { -

    Querying the database schema

    - -

    The sqlite3 program provides several convenience commands that -are useful for looking at the schema of the database. There is -nothing that these commands do that cannot be done by some other -means. These commands are provided purely as a shortcut.

    - -

    For example, to see a list of the tables in the database, you -can enter ".tables".

    -} - -Code { -sqlite> (((.tables))) -tbl1 -tbl2 -sqlite> -} - -puts { -

    The ".tables" command is similar to setting list mode then -executing the following query:

    - -
    -SELECT name FROM sqlite_master 
    -WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'
    -UNION ALL 
    -SELECT name FROM sqlite_temp_master 
    -WHERE type IN ('table','view') 
    -ORDER BY 1
    -
    - -

    In fact, if you look at the source code to the sqlite3 program -(found in the source tree in the file src/shell.c) you'll find -exactly the above query.

    - -

    The ".indices" command works in a similar way to list all of -the indices for a particular table. The ".indices" command takes -a single argument which is the name of the table for which the -indices are desired. Last, but not least, is the ".schema" command. -With no arguments, the ".schema" command shows the original CREATE TABLE -and CREATE INDEX statements that were used to build the current database. -If you give the name of a table to ".schema", it shows the original -CREATE statement used to make that table and all if its indices. -We have:

    } - -Code { -sqlite> (((.schema))) -create table tbl1(one varchar(10), two smallint) -CREATE TABLE tbl2 ( - f1 varchar(30) primary key, - f2 text, - f3 real -) -sqlite> (((.schema tbl2))) -CREATE TABLE tbl2 ( - f1 varchar(30) primary key, - f2 text, - f3 real -) -sqlite> -} - -puts { -

    The ".schema" command accomplishes the same thing as setting -list mode, then entering the following query:

    - -
    -SELECT sql FROM 
    -   (SELECT * FROM sqlite_master UNION ALL
    -    SELECT * FROM sqlite_temp_master)
    -WHERE type!='meta'
    -ORDER BY tbl_name, type DESC, name
    -
    - -

    Or, if you give an argument to ".schema" because you only -want the schema for a single table, the query looks like this:

    - -
    -SELECT sql FROM
    -   (SELECT * FROM sqlite_master UNION ALL
    -    SELECT * FROM sqlite_temp_master)
    -WHERE type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
    -ORDER BY substr(type,2,1), name
    -
    - -

    -You can supply an argument to the .schema command. If you do, the -query looks like this: -

    - -
    -SELECT sql FROM
    -   (SELECT * FROM sqlite_master UNION ALL
    -    SELECT * FROM sqlite_temp_master)
    -WHERE tbl_name LIKE '%s'
    -  AND type!='meta' AND sql NOT NULL AND name NOT LIKE 'sqlite_%'
    -ORDER BY substr(type,2,1), name
    -
    - -

    The "%s" in the query is replace by your argument. This allows you -to view the schema for some subset of the database.

    -} - -Code { -sqlite> (((.schema %abc%))) -} - -puts { -

    -Along these same lines, -the ".table" command also accepts a pattern as its first argument. -If you give an argument to the .table command, a "%" is both -appended and prepended and a LIKE clause is added to the query. -This allows you to list only those tables that match a particular -pattern.

    - -

    The ".databases" command shows a list of all databases open in -the current connection. There will always be at least 2. The first -one is "main", the original database opened. The second is "temp", -the database used for temporary tables. There may be additional -databases listed for databases attached using the ATTACH statement. -The first output column is the name the database is attached with, -and the second column is the filename of the external file.

    } - -Code { -sqlite> (((.databases))) -} - -puts { -

    Converting An Entire Database To An ASCII Text File

    - -

    Use the ".dump" command to convert the entire contents of a -database into a single ASCII text file. This file can be converted -back into a database by piping it back into sqlite3.

    - -

    A good way to make an archival copy of a database is this:

    -} - -Code { -$ (((echo '.dump' | sqlite3 ex1 | gzip -c >ex1.dump.gz))) -} - -puts { -

    This generates a file named ex1.dump.gz that contains everything -you need to reconstruct the database at a later time, or on another -machine. To reconstruct the database, just type:

    -} - -Code { -$ (((zcat ex1.dump.gz | sqlite3 ex2))) -} - -puts { -

    The text format is pure SQL so you -can also use the .dump command to export an SQLite database -into other popular SQL database engines. Like this:

    -} - -Code { -$ (((createdb ex2))) -$ (((sqlite3 ex1 .dump | psql ex2))) -} - -puts { -

    Other Dot Commands

    - -

    The ".explain" dot command can be used to set the output mode -to "column" and to set the column widths to values that are reasonable -for looking at the output of an EXPLAIN command. The EXPLAIN command -is an SQLite-specific SQL extension that is useful for debugging. If any -regular SQL is prefaced by EXPLAIN, then the SQL command is parsed and -analyzed but is not executed. Instead, the sequence of virtual machine -instructions that would have been used to execute the SQL command are -returned like a query result. For example:

    } - -Code { -sqlite> (((.explain))) -sqlite> (((explain delete from tbl1 where two<20;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ------------------------------------- -0 ListOpen 0 0 -1 Open 0 1 tbl1 -2 Next 0 9 -3 Field 0 1 -4 Integer 20 0 -5 Ge 0 2 -6 Key 0 0 -7 ListWrite 0 0 -8 Goto 0 2 -9 Noop 0 0 -10 ListRewind 0 0 -11 ListRead 0 14 -12 Delete 0 0 -13 Goto 0 11 -14 ListClose 0 0 -} - -puts { - -

    The ".timeout" command sets the amount of time that the sqlite3 -program will wait for locks to clear on files it is trying to access -before returning an error. The default value of the timeout is zero so -that an error is returned immediately if any needed database table or -index is locked.

    - -

    And finally, we mention the ".exit" command which causes the -sqlite3 program to exit.

    - -

    Using sqlite3 in a shell script

    - -

    -One way to use sqlite3 in a shell script is to use "echo" or -"cat" to generate a sequence of commands in a file, then invoke sqlite3 -while redirecting input from the generated command file. This -works fine and is appropriate in many circumstances. But as -an added convenience, sqlite3 allows a single SQL command to be -entered on the command line as a second argument after the -database name. When the sqlite3 program is launched with two -arguments, the second argument is passed to the SQLite library -for processing, the query results are printed on standard output -in list mode, and the program exits. This mechanism is designed -to make sqlite3 easy to use in conjunction with programs like -"awk". For example:

    } - -Code { -$ (((sqlite3 ex1 'select * from tbl1' |))) -> ((( awk '{printf "
    %s%s\n",$1,$2 }'))) -
    hello10 -
    goodbye20 -$ -} - -puts { -

    Ending shell commands

    - -

    -SQLite commands are normally terminated by a semicolon. In the shell -you can also use the word "GO" (case-insensitive) or a slash character -"/" on a line by itself to end a command. These are used by SQL Server -and Oracle, respectively. These won't work in sqlite3_exec(), -because the shell translates these into a semicolon before passing them -to that function.

    -} - -puts { -

    Compiling the sqlite3 program from sources

    - -

    -The sqlite3 program is built automatically when you compile the -SQLite library. Just get a copy of the source tree, run -"configure" and then "make".

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/support.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/support.html --- sqlite3-3.4.2/www/support.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/support.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,173 @@ + + +SQLite Support Options + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Support Options

    + +

    Professional Support

    + +

    +If you would like professional support for SQLite +or if you want custom modifications performed by the +original author or SQLite, these services are available for a modest fee. +For additional information visit + +http://www.hwaci.com/sw/sqlite/prosupport.html or contact:

    + +
    +D. Richard Hipp
    +Hwaci - Applied Software Research
    +704.948.4565
    +drh@hwaci.com +
    + +

    Proprietary SQLite Extensions

    + +

    The core SQLite library found on this website is in the +public domain. But there also exist +proprietary, licensed extensions to SQLite, written and maintained +by the original developer.

    + + + + +

    Mailing Lists

    +

    Three separate mailing lists have been established to help support +SQLite:

    + +
      +
    • +sqlite-announce - announcements of new +releases or significant developments.
    • +
    • +sqlite-users - general user discussion; most postings belong here.
    • +
    • +sqlite-dev - developer conversations; for people who have or aspire to +have write permission on the SQLite CVS repository.
    • +
    + +

    +Most users of SQLite will want to join the + +sqlite-announce list and many will want to join the + +sqlite-users list. The + +sqlite-dev list is more specialized and appeals to a narrower audience. +Off-site archives of the + +sqlite-users list are available at: +

    + +
    + +http://www.mail-archive.com/sqlite-users%40sqlite.org
    + +http://marc.info/?l=sqlite-users&r=1&w=2
    + +http://news.gmane.org/gmane.comp.db.sqlite.general +
    + +

    + + +

    Direct E-Mail To The Author

    + +

    +Use the mailing list. +Please do not send email directly to the author of SQLite +unless: +

      +
    • You have or intend to acquire a professional support contract +as described above, or
    • +
    • You are working on an open source project.
    • +
    +You are welcomed to use SQLite in closed source, proprietary, and/or +commerical projects and to ask questions about such use on the public +mailing list. But please do not ask to receive free direct technical +support. The software is free; direct technical support is not. +

    +
    +This page last modified 2009/01/03 16:35:33 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/support.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/support.tcl --- sqlite3-3.4.2/www/support.tcl 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/www/support.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -set rcsid {$Id: support.tcl,v 1.7 2007/06/21 13:30:40 drh Exp $} -source common.tcl -header {SQLite Support Options} -puts { -

    SQLite Support Options

    - - -

    Mailing List

    -

    -A mailing list has been set up for asking questions and -for open discussion of problems -and issues by the SQLite user community. -To subscribe to the mailing list, send an email to - -sqlite-users-subscribe@sqlite.org. -If you would prefer to get digests rather than individual -emails, send a message to to - -sqlite-users-digest-subscribe@sqlite.org. -For additional information about operating and using this -mailing list, send a message to - -sqlite-users-help@sqlite.org and instructions will be -sent by to you by return email. -

    - -

    -There are multiple archives of the mailing list: -

    - -
    - -http://www.mail-archive.com/sqlite-users%40sqlite.org
    - -http://marc.info/?l=sqlite-users&r=1&w=2
    - -http://news.gmane.org/gmane.comp.db.sqlite.general -
    - -

    - - -

    Direct E-Mail To The Author

    - -

    -Use the mailing list. -Please do not send email directly to the author of SQLite -unless: -

      -
    • You have or intend to acquire a professional support contract -as described below, or
    • -
    • You are working on an open source project.
    • -
    -You are welcomed to use SQLite in closed source, proprietary, and/or -commerical projects and to ask questions about such use on the public -mailing list. But please do not ask to receive free direct technical -support. The software is free; direct technical support is not. -

    - - -

    Professional Support

    - -

    -If you would like professional support for SQLite -or if you want custom modifications to SQLite performed by the -original author, these services are available for a modest fee. -For additional information visit - -http://www.hwaci.com/sw/sqlite/prosupport.html or contact:

    - -
    -D. Richard Hipp
    -Hwaci - Applied Software Research
    -704.948.4565
    -drh@hwaci.com -
    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/syntaxdiagrams.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/syntaxdiagrams.html --- sqlite3-3.4.2/www/syntaxdiagrams.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/syntaxdiagrams.html 2009-06-27 15:07:44.000000000 +0100 @@ -0,0 +1,409 @@ + + +SQLite SQL Language Syntax Diagrams + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite SQL Language Syntax Diagrams

    + + +

    sql-stmt-list:

    +



    +References:   sql-stmt +
    + + +

    sql-stmt:

    +

    +Used by:   sql-stmt-list

    +References:   alter-table-stmt   analyze-stmt   attach-stmt   begin-stmt   commit-stmt   create-index-stmt   create-table-stmt   create-trigger-stmt   create-view-stmt   create-virtual-table-stmt   delete-stmt   delete-stmt-limited   detach-stmt   drop-index-stmt   drop-table-stmt   drop-trigger-stmt   drop-view-stmt   insert-stmt   pragma-stmt   reindex-stmt   release-stmt   rollback-stmt   savepoint-stmt   select-stmt   update-stmt   update-stmt-limited   vacuum-stmt +
    + + +

    alter-table-stmt:

    +

    +Used by:   sql-stmt

    +References:   column-def +
    + + +

    analyze-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    attach-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    begin-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    commit-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    rollback-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    savepoint-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    release-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    create-index-stmt:

    +

    +Used by:   sql-stmt

    +References:   indexed-column +
    + + +

    indexed-column:

    +

    +Used by:   create-index-stmt   table-constraint +
    + + +

    create-table-stmt:

    +

    +Used by:   sql-stmt

    +References:   column-def   select-stmt   table-constraint +
    + + +

    column-def:

    +

    +Used by:   alter-table-stmt   create-table-stmt

    +References:   column-constraint   type-name +
    + + +

    type-name:

    +

    +Used by:   column-def   expr

    +References:   signed-number +
    + + +

    column-constraint:

    +

    +Used by:   column-def

    +References:   conflict-clause   expr   foreign-key-clause   literal-value   signed-number +
    + + +

    signed-number:

    +

    +Used by:   column-constraint   pragma-value   type-name +
    + + +

    table-constraint:

    +

    +Used by:   create-table-stmt

    +References:   conflict-clause   expr   foreign-key-clause   indexed-column +
    + + +

    foreign-key-clause:

    +

    +Used by:   column-constraint   table-constraint +
    + + +

    conflict-clause:

    +

    +Used by:   column-constraint   table-constraint +
    + + +

    create-trigger-stmt:

    +

    +Used by:   sql-stmt

    +References:   delete-stmt   expr   insert-stmt   select-stmt   update-stmt +
    + + +

    create-view-stmt:

    +

    +Used by:   sql-stmt

    +References:   select-stmt +
    + + +

    create-virtual-table-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    delete-stmt:

    +

    +Used by:   create-trigger-stmt   sql-stmt

    +References:   expr   qualified-table-name +
    + + +

    delete-stmt-limited:

    +

    +Used by:   sql-stmt

    +References:   expr   ordering-term   qualified-table-name +
    + + +

    detach-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    drop-index-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    drop-table-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    drop-trigger-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    drop-view-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    expr:

    +

    +Used by:   column-constraint   create-trigger-stmt   delete-stmt   delete-stmt-limited   insert-stmt   join-constraint   ordering-term   result-column   select-core   table-constraint   update-stmt   update-stmt-limited

    +References:   literal-value   raise-function   select-stmt   type-name +
    + + +

    raise-function:

    +

    +Used by:   expr +
    + + +

    literal-value:

    +

    +Used by:   column-constraint   expr +
    + + +

    insert-stmt:

    +

    +Used by:   create-trigger-stmt   sql-stmt

    +References:   expr   select-stmt +
    + + +

    pragma-stmt:

    +

    +Used by:   sql-stmt

    +References:   pragma-value +
    + + +

    pragma-value:

    +

    +Used by:   pragma-stmt

    +References:   signed-number +
    + + +

    reindex-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    select-stmt:

    +

    +Used by:   create-table-stmt   create-trigger-stmt   create-view-stmt   expr   insert-stmt   single-source   sql-stmt

    +References:   compound-operator   ordering-term   select-core +
    + + +

    select-core:

    +

    +Used by:   select-stmt

    +References:   expr   join-source   ordering-term   result-column +
    + + +

    result-column:

    +

    +Used by:   select-core

    +References:   expr +
    + + +

    join-source:

    +

    +Used by:   select-core   single-source

    +References:   join-constraint   join-op   single-source +
    + + +

    single-source:

    +

    +Used by:   join-source

    +References:   join-source   select-stmt +
    + + +

    join-op:

    +

    +Used by:   join-source +
    + + +

    join-constraint:

    +

    +Used by:   join-source

    +References:   expr +
    + + +

    ordering-term:

    +

    +Used by:   delete-stmt-limited   select-core   select-stmt   update-stmt-limited

    +References:   expr +
    + + +

    compound-operator:

    +

    +Used by:   select-stmt +
    + + +

    update-stmt:

    +

    +Used by:   create-trigger-stmt   sql-stmt

    +References:   expr   qualified-table-name +
    + + +

    update-stmt-limited:

    +

    +Used by:   sql-stmt

    +References:   expr   ordering-term   qualified-table-name +
    + + +

    qualified-table-name:

    +

    +Used by:   delete-stmt   delete-stmt-limited   update-stmt   update-stmt-limited +
    + + +

    vacuum-stmt:

    +

    +Used by:   sql-stmt +
    + + +

    comment-syntax:

    +

    +
    + +
    +This page last modified 2008/10/09 15:16:24 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/sysreq.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/sysreq.html --- sqlite3-3.4.2/www/sysreq.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/sysreq.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,789 @@ + + +SQLite System Requirements + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    System Requirements For SQLite

    + +

    This document outlines the chief goals and objectives of the SQLite +library. All of the features and capabilities of SQLite (that is to say, +the requirements of SQLite) can +ultimately be traced back to one of the broad and general +requirements specified here.

    + +

    SQLite is not a complete system and so the requirements +provided by this document are not system requirements in the +strict sense. Perhaps "sub-system requirements" would be a better +term, since SQLite is always a sub-component of a larger system.

    + +

    This document is targeted primarily at developers who are +working within a waterfall development model that uses detailed +requirements written with the modal auxiliary verb "shall". +However, this document is also useful as a general-purpose high-level +description of SQLite for developers who are not working under +different development paradigms.

    + + +

    S10000: +The SQLite library shall translate high-level SQL statements into +low-level I/O calls to persistent storage. +

    +SQLite is an SQL database engine. And the fundamental task of + every SQL database engine it to translate the abstract SQL statements + readily understood by humans into sequences of I/O operations readily + understood by computer hardware. This requirement expresses the + essense of SQLite. +
    + + +

    S10100: +The SQLite library shall accepts a well-defined dialect of SQL +that conforms to published SQL standards. +

    +SQL is one of the worlds most widely known programming languages, + but it is also one of the most ill-defined. There are various SQL + standards documents available. However the SQL standards documents are + obtuse to the point of being incomprehensible. And the standards + allow for so much "implementation defined" behavior that there exist + two SQL database engines understand exactly the same language.

    + +

    SQLite does not attempt to obtain strict compliance with any + one of the various SQL standards. + Instead, SQLite tries to be as compatible as possible with other SQL + database engines. SQLite attempts to operate on the principle of + least surprise. That is to say, experienced SQL programmers should + find SQLite's dialect intuitive and natural.

    + +

    SQLite may omit some obscure features of SQL. And the SQL + dialect that SQLite understands might contain some enhancements not + found in some standards documents. Nevertheless, applications + written for other SQL database engines should be portable to SQLite + with little to no change. And programmers writing code for SQLite + should not encounter anything unexpected. +

    + + +

    S10110: +The SQLite library shall support BLOB, CLOB, integer, and floating-point +datatypes. + + +

    S10120: +The SQLite library shall implement the standard SQL interpretation +of NULL values. +

    +In cases where + published standards are ambiguous, SQLite will follow the practice of + other popular database engines. +
    + + +

    S10200: +The SQLite library shall communicate directly with database files +in persistent storage. +

    +Most other database + engines implement a client/server model in which a small client library + is linked with the application and the client communicates with a separate + server process using interprocess communication (IPC). SQLite avoids + the complication of having a separate server process by doing direct + I/O directly to the underlying filesystem. +
    + + +

    S10300: +The SQLite library shall implement ACID transactions. +

    +In the database world, "ACID" is an acronym for Atomic, Consistent, + Isolated, and Durable. Atomic means that a change to the database + happens either entirely or not at all. Consistent means that if the + database file is well-formed before the start of a transaction then + it is guaranteed to be well-formed after the transaction commits. + Isolated means that when two or more threads are + processes are working with the same database, uncommitted changes + made by one are not visible to the other. Durable means that once + a transaction commits, it stays committed even if there is a subsequent + software crash or power failure. +
    + + +

    S10500: +The SQLite library shall implement transactions that are robust +across application crashes, operating-system crashes, and power +failures. +

    +An operating system crash or an unexpected power loss can + sometimes damage + the underlying persistent storage in ways that no software can defend + against. (For example, the content of a disk drive might be completely + erased and become unrecoverable.) + Nevertheless, software can take steps to defend against the kinds + of damage that typically occurs following operating system crashes and + power failures. The usual damage is that some writes are missing + or incomplete and that writes have occurred out of order. We say + that software is "robust" if it defines against the common kinds of + damage seen following an operating system crash or power loss.

    +
    + + +

    S10600: +The SQLite library shall support simultaneous access to multiple +database files on the same database connection. +

    +Many applications benefit from being about to access multiple + database file using the same database connection, so that + information can be transfered from from one database to another + atomically, or so that queries can join data across multiple + databases. +
    + + +

    S10700: +The SQLite library shall provide interfaces that allow the application +to obtain the status and results of SQL operations. + + +

    S20000: +The SQLite library shall be extensible and configurable. +

    +SQLite is intended to be an embedded database that functions well + in resource-limited systems. For that reason we desire to keep the + size of the library small. That choices argues against a large + default function set. Instead of having many built-in features, SQLite is + designed to be extensible at compile-time and run-time with new + application-defined functions and behaviors. +
    + + +

    S20100: +The SQLite library shall provide interfaces that permit the application +to override interfaces to the platform on which the application is running. +

    +SQLite works on common workstations and in embedded systems. + Sometimes these devices, particularly embedded systems, + have odd and unusual operating systems. In order to support + this level of portability, SQLite allows the interface to the operating + system to be defined at run-time. +
    + + +

    S20110: +The SQLite library shall provide interfaces that permit the application +to override the interfaces used to read and write persistent storage. + + +

    S20120: +The SQLite library shall provide interfaces that permit the application +to override the interfaces used for memory allocation. + + +

    S20130: +The SQLite library shall provide interfaces that permit the application +to override the interfaces used for controlling mutexes. + + +

    S20200: +The SQLite library shall provide interfaces that permit the application +to create new SQL functions. +

    +Most SQL database engines support a rich set of SQL functions. + SQLite, in contrast, supports only a select few SQL functions. + But SQLite makes up for its dearth of built-in SQL functions by + allowing the application to create new SQL function easily. +
    + + +

    S20300: +The SQLite library shall provide interfaces that permit the application +to create new text collating sequences. +

    +By default, SQLite only understands ASCII text. The tables needed + to do proper comparisons and case folding + of full unicode text are huge - much larger + than the SQLite library itself. And, any application that is dealing + with unicode already probably already has those tables built in. For + SQLite to include unicode comparison tables would be redundant and wasteful. + As a compromise, SQLite allows the application to specify alternative + collating sequences for things such as unicode text, + so that for applications that need such comparison sequences can have + them easily while other applications that are content with ASCII are + not burdened with unnecessary tables. +
    + + +

    S20400: +The SQLite library shall provide interfaces that permit the application +to create new classes of virtual SQL tables. +

    +A virtual table is an SQL object that appears to be an ordinary + SQL table for the purposes of INSERT, UPDATE, DELETE, and SELECT statements. + But instead of being backed by persistent storage, the virtual table is + an object that responds programmatically to INSERT, UPDATE, DELETE, and + SELECT requests. Virtual tables have been used to implement full-text + search and R-Tree indices, among other things. +
    + + +

    S20500: +The SQLite library shall provide interfaces that permit the application +to load extensions at run-time using shared libraries. +

    +Some applications choose to package extensions in separate + shared library files and load those extensions at run-time on + an as-needed basis. Depending on the nature of the application, + this can be an aid to configuration management, since it allows + the extension to be updated without having to replace the core + application. +
    + + +

    S20600: +The SQLite library shall provide interfaces that permit the application +to dynamically query and modify size limits. +

    +SQLite has finite limits. For example, there is a maximum size BLOB + or CLOB that SQLite will store, a maximum size to a database file, + a maximum number of columns in a table or query, and a maximum depth + of an expression parse tree. All of these have default values that + are sufficiently large that a typical application is very unlikely to + ever reach the limits. But some applications (for example, applications + that process content from untrusted and possibly hostile sources) + might want to define much lower limits on some database connections for + the purpose of preventing denial-of-service attacks. Or, an application + might want to select much lower limits in order to prevent over-utilization + of limited resources on an embedded device. Whatever the rationale, SQLite + permits limits to be queried and set at run-time. +
    + + +

    S30000: +The SQLite library shall be safe for use in long-running, +low-resource, high-reliability applications. +

    +SQLite is designed to work well within embedded devices with + very limited resources. To this end, it expects to confront situations + where memory is unavailable and where I/O operations fail and it is designed + to handle such situations with ease and grace. SQLite also avoids aggravating + low-resource situations by correctly freeing rather than leaking + resources it uses itself. +
    + + +

    S30100: +The SQLite library shall release all system resources it holds +when it is properly shutdown. +

    +A "Proper shutdown" means that all resources that the application + has allocated from SQLite have been released by the application. + The leak-free operation guarantee of SQLite applies even if there + have been memory allocation errors or I/O errors during operation. +
    + + +

    S30200: +The SQLite library shall be configurable so that it is guaranteed +to never fail a memory allocation as long as the application does +not request resources in excess of reasonable and published limits. +

    +Safety-critical systems typically disallow the use of malloc() and + free() because one never knows when they might fail due to memory + fragmentation. However, SQLite makes extensive use of dynamic objects + and so it must be able to allocate and deallocate memory + to hold those objects.

    + +

    In order to be acceptable for use in safety critical systems, + SQLite can be configured to use its own internal memory allocator + which, subject to proper usage by the application, guarantees that + memory allocation will never fail either due to memory fragmentation + or any other cause. The proof of correctness is due to J. M. Robson: + "Bounds for Some Functions Concerning Dynamic Storage Allocations", + Journal of the ACM, Volume 21, Number 3, July 1974.

    + +

    The internal memory allocator is seeded with a large contiguous + block of memory at application start. SQLite makes all of its + internal memory allocations from this initial seed. + The Robson proof depends on SQLite being coupled to a well-behaved + application. The application must not try to use more than a + precomputed fraction of the available memory - that fraction depending + on the size ratio between the largest and smallest memory allocations. + Additional details are provided elsewhere. +

    + + +

    S30210: +The SQLite library shall be provide instrumentation that can alert +the application when its resource usages nears or exceeds the limits +of the memory breakdown guarantee. +

    +To help insure that an + application never fails a memory allocation call, SQLite provides + interfaces that can inform the application if its memory usage + is growing close to or has exceeded the critical Robson limits. + In practice, the memory used by an application can exceed the + limits of the Robson proof by a wide margin with no harmful effect. + There is plenty of safety margin. But the Robson proof does break + down once the limits are exceeded + and the guarantee that no memory allocation will fail is lost. Hence + it is important to be able to track how close an application has come + to reaching critical limits. +
    + + +

    S30220: +The SQLite library shall be provide facilities to automatically +recycle memory when usage nears preset limits. +

    +When SQLite comes under memory pressure, it can be configured to + recycle memory from one use to another, thus helping to reduce the + pressure. "Memory pressure" means that memory available for + allocation is becoming less plentiful. In a safety-critical application, + memory pressure might mean that the amount of allocated memory is + getting close to the point where the Robson proof + breaks down. On a workstation, memory pressure might mean that + available virtual memory is running low. +
    + + +

    S30230: +The SQLite library shall be permit BLOB and CLOB objects to be +read and written incrementally using small memory buffers. +

    +SQLite provides the ability to read and write megabyte + or gigabyte blobs and text strings without having to allocate + enough memory to hold the entire blob and string in memory all + at once. This enables SQLite to read and write BLOBs that + are actually larger than the available memory on the device. + It also helps reduce the size of the maximum memory allocation + which helps keep memory usage below Robson limits and thus helps + to guarantee failure-free memory allocation. +
    + + +

    S30300: +When a memory allocation fails, SQLite shall either silently make +due without the requested memory or else it shall report the error +back to the application. +

    +Memory allocation problems do not cause SQLite to fail + catastrophically. + SQLite recognizes all memory allocation failures and either works + around them, or + cleanly aborts what it is doing and returns to the application + with an error that indicates insufficient memory was available. + Assuming new memory becomes available, SQLite is able to continue + operating normally after a memory allocation failure. +
    + + +

    S30400: +When a I/O operation fails, SQLite shall either silently +recover or else it shall report the error +back to the application. +

    +SQLite responses sanely to disk I/O errors. If it is unable + to work around the problem, SQLite might have to report the error + back up to the application. In either case, SQLite is able to + continue functioning, assuming of course that the I/O error was + transient. +
    + + +

    S30500: +SQLite shall provide the capability to monitor +the progress and interrupt the evaluation of a long-running query. +

    +SQLite is able to cleanly abort an operation in progress and + afterwards continue functioning normally without any memory or + other resource leaks. An example of where this functionality is + used occurs in the command-line interface (CLI) program for SQLite. + If the user enters a query that has millions of result rows, those + rows begin pouring out onto the screen. The operator can then + hit the interrupt key sequence (which varies from one operating + system to another but it often Control-C) which causes the query + to be aborted. +
    + + +

    S30600: +All unused portions of a well-formed SQLite database file shall +be available for reuse. +

    +When information is deleted from an SQLite database, the default + action is for SQLite to mark the space as unused and then to reuse + the space at the next opportune INSERT. On devices where persistent + storage is scarce, however, it is sometime desirable to return the + unused space back to the operating system. SQLite supports this. +
    + + +

    S30700: +SQLite shall provide the capability to incrementally decrease the +size of the persistent storage file as information is removed from +the database. + + +

    S30800: +SQLite shall provide the interfaces that support testing and +validation of the library code in an as-delivered configuration. +

    +In consumer-grade software, it is often acceptable to run tests + on an instrumented version of the code. But for high-reliability + systems, it is better to test the code exactly as it is deployed. + The saying at NASA is "test what you fly and fly what you test." + In support of this goal, SQLite includes interfaces whose only purpose + is to observe internal state and to place SQLite into internal states + for the testing. +
    + + +

    S30900: +SQLite shall provide the ability for separate database connections +within the same process to share resources. +

    +On resource-constrained devices, it is desirable to get double-duty + out of resources where possible. +
    + + +

    S40000: +The SQLite library shall be safe for use in applications that +make concurrent access to the underlying database from different +threads and/or processes. +

    +In nearly all modern digital systems, there are many things happening + at once. And many of those things involve SQLite. +
    + + +

    S40100: +The SQLite library shall be configurable to operate correctly in +a multi-threaded application. +

    +The developers of SQLite believe that "thread-safe" is a + self contradiction. No application that includes multiple threads + of control within the same address space is every truly "safe". + And yet it is recognized that many developers want to + create multithreaded applications and to use SQLite in those + applications. Therefore, SQLite is engineered to be "thread-safe". +
    + + +

    S40200: +The SQLite library shall support multiple independent database +connections per thread and per process. + + +

    S40300: +The SQLite library shall automatically control access to common +databases from different connections in different threads or processes. +

    +SQLite uses both internal mutexes and external file locking to + ensure that two or more threads or processes working + on the same database file play nicely with one another. +
    + + +

    S40400: +The SQLite library shall notify the application if an operation can +not be completed due to concurrent access constraints. + + +

    S40410: +The SQLite library shall provide interfaces to assist the application +in responding appropriately when an operation can +not be completed due to concurrent access constraints. +

    +If an SQL statement cannot be completed because another process is + holding a lock on the database, then the application needs to be able + to take corrective action, such waiting for the lock to clear. +
    + + +

    S50000: +The SQLite library shall be cross-platform. +

    +Cross-platform in this context means that the SQLite + can be used on a wide variety of operating systems and processors, + ranging from small, special-purpose embedded systems, to workstations, + to servers. Platforms can be 32- or 64-bit, big-endian or little-endian. + Cross-platform refers to the source code. Obviously the SQLite would + need to be recompiled in order to run on processors with different + instruction sets. +
    + + +

    S50100: +The SQLite library shall be implemented in ANSI-C. +

    +C has been called the "universal assembly language". + Nearly all computer systems accept code written in C. + Thus, to help make SQLite cross-platform: +
    + + +

    S50200: +The SQLite library shall support text encoded as UTF-8, +UTF-16le, or UTF-16be. + + +

    S50300: +SQLite database files shall be processor and byte-order independent. +

    +An SQLite database file can be freely moved between machine + with different operating systems, different processors, + different size integers, and different byte orders. The same + database file should work on any machine. +
    + + +

    S60000: +The SQLite library shall provide introspection capabilities to the +application. +

    +Some applications need to be able to discover characteristics of + their environment at run-time and to make appropriate adjustments to + their processing to accommodate the environment they find themselves in. + SQLite attempts to support this need. +
    + + +

    S60100: +The SQLite library shall provide interfaces that an application can +use to discover fixed, compile-time characteristics of the +SQLite library. +

    +Some applications are designed to work with different versions + of SQLite which may or may not enable selected features. For example, + SQLite can be compiled to be threadsafe or not. The threadsafe version + works in multi-threaded applications. The non-threadsafe build runs + faster. When an application is using an unknown version of SQLite + it is important that it be able to determine the characteristics of + the particular SQLite build it is using. +
    + + +

    S60200: +The SQLite library shall provide interfaces that an application can +use to find run-time performance characteristics and status of the +SQLite library. + + +

    S60300: +The SQLite library shall provide interfaces that permit an application +to query the schema of a database. + + +

    S60400: +The SQLite library shall provide interfaces that allow an application +to monitor sequence of queries and progress of submitted to SQLite. + + +

    S60500: +The SQLite library shall provide interfaces that allow an application +to discover the algorithms that SQLite has chosen to implement specific +SQL statements. + + +

    S60600: +The SQLite library shall provide interfaces that allow an application +to discover relationships between SQLite objects. +

    +SQLite objects are often related. For example, every prepared + statement is associated with a database connection. And every + function context is associated with a prepared statement. + Applications and extensions frequently find it useful to be able + to discover these relationships at runtime. +
    + + +

    S70000: +The SQLite library shall provide interfaces that promote the safe +construction and processing of SQL statements and data from +untrusted sources. +

    +Many applications need to be able to safely process data or + even SQL statements that are received from untrusted sources. + An "SQL Injection Attack" occurs when an adversary intentionally + introduces data that is designed to have undesirable side effects + on the database files. For example, suppose an application generates + an INSERT statement as follows:

    + +
    +  snprintf(z, n, "INSERT INTO table1 VALUES('%s')", zUserData);
    +  
    + +

    If a hostile user supplies data that reads:

    + +
    +  beginning'); DELETE FROM table1; INSERT INTO table1 VALUES('
    +  
    + +

    Then the constructed INSERT statement would be transformed into + three statements, the second of which is an undesired deletion of + all prior content from the table. SQLite contains interfaces that + are designed to help applications avoid SQL injection attacks and + similar problems. +

    + + +

    S70100: +The SQLite library shall provide the application means by which the +application can test and enforce compliance with database access +policies for any particular SQL statement. +

    +Some applications (for example + CVSTrac and + Fossil) will run SELECT + statements entered by anonymous users on the internet. Such + applications want to be able to guarantee that a hostile users does + not access restricted tables (such as the PASSWORD column of the USER + table) or modify the database in any way. SQLite supports the ability + to analyze an arbitrary SQL statement to insure that it does not + perform undesired operations. +
    + + +

    S70200: +The SQLite library shall provide interfaces that test to see if an +SQL statement being received incrementally is complete. +

    +

    Applications such as the command-line interface (CLI) for SQLite + will prompt the user to enter SQL statements and will evaluate those + statements as they are entered. But sometimes an SQL statement spans + multiple lines. The CLI needs to know to issue a continuation prompt + and await additional input if the input received so far is incomplete. + SQLite supports interfaces that allow the CLI and similar applications + to know if the input it has gathered so far is complete or if it needs + to await additional input before processing the SQL. +

    + + +

    S70300: +The SQLite library shall support prepared statement objects with +late parameter binding +

    +The concept of a "prepared statement" allows an SQL statement to be + parsed and compiled once and then reused many times. This is a performance + advantage in many applications. In addition, binding values to variables + in the prepared statement is safer than embedding values as literals because + bound values do not need to be quoted in order to avoid an SQL injection + attack. +
    + + +

    S80000: +SQLite shall exhibit ductile failure characteristics +

    +A common characteristic of digital systems (as opposed to analog + systems) is that digital systems tend to be brittle. In other words, + digital systems tend to work perfectly with no sign of stress until + they fail utterly and completely. The behavior is like a + physical object that holds its shape as external + loads increase, until it shatters without warning.

    + +

    In most circumstances, ductile failure is preferred over brittle + failure. A ductile device begins showing signs of + trouble well in advance of failure. Physical objects bend and/or crack, + providing operators with warnings of overload and an opportunity + to take corrective action, while continuing to function for as long + as possible.

    + +

    Digital systems have a reputation for being brittle, yet brittleness + is not an intrinsic property of digital systems. Digital systems can + be designed to continuing functioning outside their design parameters + while providing operators with warning of possible trouble. But there + most be focused effort on the part of the designers to make digital + systems ductile. With analog systems, the ductileness tends to be + inherent in the medium, but with digital systems ductileness needs + to be explicitly added. +

    + + +

    S80100: +SQLite shall make anomalies visible to the application +

    +SQLite strives to deal gracefully with anomalous behavior by + the application or by its own internal subsystems. Yet graceful + handling of out-of-band inputs is of no value if the anomaly goes + unreported. The problems must be visible to the + application so that warnings and alarms can be propagated to operators. + The useful aspect of ductile failure is that it gives advance warning. + Ductile behavior is of no use to anyone if nobody can see the part + bending. +
    + + + +
    +This page last modified 2009/02/19 14:22:46 UTC +
    Binary files /tmp/wcSK23PnjP/sqlite3-3.4.2/www/table-ex1b2.gif and /tmp/3ARg2Grji7/sqlite3-3.6.16/www/table-ex1b2.gif differ diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/tclsqlite.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/tclsqlite.html --- sqlite3-3.4.2/www/tclsqlite.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/tclsqlite.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,749 @@ + + +The Tcl interface to the SQLite library + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    The Tcl interface to the SQLite library

    + +

    The SQLite library is designed to be very easy to use from +a Tcl or Tcl/Tk script. SQLite +began as a Tcl extension +and the primary test suite for SQLite is written in TCL. SQLite +can be used with any programming language, but its connections to +TCL run deep.

    + +

    This document gives an overview of the Tcl +programming interface for SQLite.

    + +

    The API

    + +

    The interface to the SQLite library consists of single +tcl command named sqlite3 +Because there is only this +one command, the interface is not placed in a separate +namespace.

    + +

    The sqlite3 command is used as follows:

    + +
    +sqlite3  dbcmd  database-name +
    + +

    +The sqlite3 command opens the database named in the second +argument. If the database does not already exist, it is +automatically created. +The sqlite3 command also creates a new Tcl +command to control the database. The name of the new Tcl command +is given by the first argument. This approach is similar to the +way widgets are created in Tk. +

    + +

    +The name of the database is just the name of a disk file in which +the database is stored. If the name of the database is +the special name ":memory:" then a new database is created +in memory. If the name of the database is an empty string, then +the database is created in an empty file that is automatically deleted +when the database connection closes. +

    + +

    +Once an SQLite database is open, it can be controlled using +methods of the dbcmd. There are currently 22 methods +defined.

    + +

    +

    +

    + +

    The use of each of these methods will be explained in the sequel, though +not in the order shown above.

    + + +

    The "eval" method

    + +

    +The most useful dbcmd method is "eval". The eval method is used +to execute SQL on the database. The syntax of the eval method looks +like this:

    + +
    +dbcmd  eval  sql +    ?array-name ? ?script? +
    + +

    +The job of the eval method is to execute the SQL statement or statements +given in the second argument. For example, to create a new table in +a database, you can do this:

    + +
    +sqlite3 db1 ./testdb
    +db1 eval {CREATE TABLE t1(a int, b text)}
    +
    + +

    The above code creates a new table named t1 with columns +a and b. What could be simpler?

    + +

    Query results are returned as a list of column values. If a +query requests 2 columns and there are 3 rows matching the query, +then the returned list will contain 6 elements. For example:

    + +
    +db1 eval {INSERT INTO t1 VALUES(1,'hello')}
    +db1 eval {INSERT INTO t1 VALUES(2,'goodbye')}
    +db1 eval {INSERT INTO t1 VALUES(3,'howdy!')}
    +set x [db1 eval {SELECT * FROM t1 ORDER BY a}]
    +
    + +

    The variable $x is set by the above code to

    + +
    +1 hello 2 goodbye 3 howdy! +
    + +

    You can also process the results of a query one row at a time +by specifying the name of an array variable and a script following +the SQL code. For each row of the query result, the values of all +columns will be inserted into the array variable and the script will +be executed. For instance:

    + +
    +db1 eval {SELECT * FROM t1 ORDER BY a} values {
    +    parray values
    +    puts ""
    +}
    +
    + +

    This last code will give the following output:

    + +
    +values(*) = a b
    +values(a) = 1
    +values(b) = hello

    + +values(*) = a b
    +values(a) = 2
    +values(b) = goodbye

    + +values(*) = a b
    +values(a) = 3
    +values(b) = howdy!
    +

    + +

    +For each column in a row of the result, the name of that column +is used as an index in to array. The value of the column is stored +in the corresponding array entry. The special array index * is +used to store a list of column names in the order that they appear. +

    + +

    +If the array variable name is omitted or is the empty string, then the value of +each column is stored in a variable with the same name as the column +itself. For example: +

    + +
    +db1 eval {SELECT * FROM t1 ORDER BY a} {
    +    puts "a=$a b=$b"
    +}
    +
    + +

    +From this we get the following output +

    + +
    +a=1 b=hello
    +a=2 b=goodbye
    +a=3 b=howdy!
    +
    + +

    +Tcl variable names can appear in the SQL statement of the second argument +in any position where it is legal to put a string or number literal. The +value of the variable is substituted for the variable name. If the +variable does not exist a NULL values is used. For example: +

    + +
    +db1 eval {INSERT INTO t1 VALUES(5,$bigstring)} +
    + +

    +Note that it is not necessary to quote the $bigstring value. That happens +automatically. If $bigstring is a large string or binary object, this +technique is not only easier to write, it is also much more efficient +since it avoids making a copy of the content of $bigstring. +

    + +

    +If the $bigstring variable has both a string and a "bytearray" representation, +then TCL inserts the value as a string. If it has only a "bytearray" +representation, then the value is inserted as a BLOB. To force a +value to be inserted as a BLOB even if it also has a text representation, +us a "@" character to in place of the "$". Like this: +

    + +
    +db1 eval {INSERT INTO t1 VALUES(5,@bigstring)} +
    + +

    +If the variable does not have a bytearray representation, then "@" works +just like "$". +

    + + +

    The "close" method

    + + +

    +As its name suggests, the "close" method to an SQLite database just +closes the database. This has the side-effect of deleting the +dbcmd Tcl command. Here is an example of opening and then +immediately closing a database: +

    + +
    +sqlite3 db1 ./testdb
    +db1 close
    +
    + +

    +If you delete the dbcmd directly, that has the same effect +as invoking the "close" method. So the following code is equivalent +to the previous:

    + +
    +sqlite3 db1 ./testdb
    +rename db1 {}
    +
    + +

    The "transaction" method

    + + +

    +The "transaction" method is used to execute a TCL script inside an SQLite +database transaction. The transaction is committed when the script completes, +or it rolls back if the script fails. If the transaction occurs within +another transaction (even one that is started manually using BEGIN) it +is a no-op. +

    + +

    +The transaction command can be used to group together several SQLite +commands in a safe way. You can always start transactions manually using +BEGIN, of +course. But if an error occurs so that the COMMIT or ROLLBACK are never +run, then the database will remain locked indefinitely. Also, BEGIN +does not nest, so you have to make sure no other transactions are active +before starting a new one. The "transaction" method takes care of +all of these details automatically. +

    + +

    +The syntax looks like this: +

    + +
    +dbcmd  transaction  ?transaction-type? +  SCRIPT, +
    + + +

    +The transaction-type can be one of deferred, +exclusive or immediate. The default is deferred. +

    + +

    The "cache" method

    + + +

    +The "eval" method described above keeps a cache of +prepared statements +for recently evaluated SQL commands. +The "cache" method is used to control this cache. +The first form of this command is:

    + +
    +dbcmd  cache size  N +
    + +

    This sets the maximum number of statements that can be cached. +The upper limit is 100. The default is 10. If you set the cache size +to 0, no caching is done.

    + +

    The second form of the command is this:

    + + +
    +dbcmd  cache flush +
    + +

    The cache-flush method +finalizes +all prepared statements currently +in the cache.

    + + +

    The "complete" method

    + + +

    +The "complete" method takes a string of supposed SQL as its only argument. +It returns TRUE if the string is a complete statement of SQL and FALSE if +there is more to be entered.

    + +

    The "complete" method is useful when building interactive applications +in order to know when the user has finished entering a line of SQL code. +This is really just an interface to the +sqlite3_complete() C +function. + +

    The "copy" method

    + + +

    +The "copy" method copies data from a file into a table. +It returns the number of rows processed successfully from the file. +The syntax of the copy method looks like this:

    + +
    +dbcmd  copy  conflict-algorithm +  table-name   file-name  +    ?column-separator ? +  ?null-indicator? +
    + +

    Conflict-alogrithm must be one of the SQLite conflict algorithms for +the INSERT statement: rollback, abort, +fail,ignore, or replace. See the SQLite Language +section for ON CONFLICT for more information. +The conflict-algorithm must be specified in lower case. +

    + +

    Table-name must already exists as a table. File-name must exist, and +each row must contain the same number of columns as defined in the table. +If a line in the file contains more or less than the number of columns defined, +the copy method rollbacks any inserts, and returns an error.

    + +

    Column-separator is an optional column separator string. The default is +the ASCII tab character \t.

    + +

    Null-indicator is an optional string that indicates a column value is null. +The default is an empty string. Note that column-separator and +null-indicator are optional positional arguments; if null-indicator +is specified, a column-separator argument must be specifed and +precede the null-indicator argument.

    + +

    The copy method implements similar functionality to the .import +SQLite shell command. +The SQLite 2.x COPY statement +(using the PostgreSQL COPY file format) +can be implemented with this method as:

    + +
    +dbcmd  copy  $conflictalgo +  $tablename   $filename  +    \t  +  \\N +
    + + +

    The "timeout" method

    + + +

    The "timeout" method is used to control how long the SQLite library +will wait for locks to clear before giving up on a database transaction. +The default timeout is 0 millisecond. (In other words, the default behavior +is not to wait at all.)

    + +

    The SQLite database allows multiple simultaneous +readers or a single writer but not both. If any process is writing to +the database no other process is allows to read or write. If any process +is reading the database other processes are allowed to read but not write. +The entire database shared a single lock.

    + +

    When SQLite tries to open a database and finds that it is locked, it +can optionally delay for a short while and try to open the file again. +This process repeats until the query times out and SQLite returns a +failure. The timeout is adjustable. It is set to 0 by default so that +if the database is locked, the SQL statement fails immediately. But you +can use the "timeout" method to change the timeout value to a positive +number. For example:

    + +
    db1 timeout 2000
    + +

    The argument to the timeout method is the maximum number of milliseconds +to wait for the lock to clear. So in the example above, the maximum delay +would be 2 seconds.

    + +

    The "busy" method

    + + +

    The "busy" method, like "timeout", only comes into play when the +database is locked. But the "busy" method gives the programmer much more +control over what action to take. The "busy" method specifies a callback +Tcl procedure that is invoked whenever SQLite tries to open a locked +database. This callback can do whatever is desired. Presumably, the +callback will do some other useful work for a short while (such as service +GUI events) then return +so that the lock can be tried again. The callback procedure should +return "0" if it wants SQLite to try again to open the database and +should return "1" if it wants SQLite to abandon the current operation. + +

    The "enable_load_extension" method

    + + +

    The extension loading mechanism of SQLite (accessed using the +load_extension() SQL function) is turned off by default. This is +a security precaution. If an application wants to make use of the +load_extension() function it must first turn the capability on using +this method.

    + +

    This method takes a single boolean argument which will turn the +extension loading functionality on or off.

    + +

    This method maps to the sqlite3_enable_load_extension() C/C++ +interface.

    + +

    The "exists" method

    + + +

    The "exists" method is similar to "onecolumn" and "eval" in that +it executes SQL statements. The difference is that the "exists" method +always returns a boolean value which is TRUE if a query in the SQL +statement it executes returns one or more rows and FALSE if the SQL +returns an empty set.

    + +

    The "exists" method is often used to test for the existance of +rows in a table. For example:

    + +
    +if {[db exists {SELECT 1 FROM table1 WHERE user=$user}]} {
    +   # Processing if $user exists
    +} else {
    +   # Processing if $user does not exist
    +} +
    + +

    The "last_insert_rowid" method

    + + +

    The "last_insert_rowid" method returns an integer which is the ROWID +of the most recently inserted database row.

    + +

    The "function" method

    + + +

    The "function" method registers new SQL functions with the SQLite engine. +The arguments are the name of the new SQL function and a TCL command that +implements that function. Arguments to the function are appended to the +TCL command before it is invoked.

    + +

    +The following example creates a new SQL function named "hex" that converts +its numeric argument in to a hexadecimal encoded string: +

    + +
    +db function hex {format 0x%X} +
    + + +

    The "nullvalue" method

    + + +

    +The "nullvalue" method changes the representation for NULL returned +as result of the "eval" method.

    + +
    +db1 nullvalue NULL +
    + +

    The "nullvalue" method is useful to differ between NULL and empty +column values as Tcl lacks a NULL representation. The default +representation for NULL values is an empty string.

    + +

    The "onecolumn" method

    + + +

    The "onecolumn" method works like +"eval" in that it evaluates the +SQL query statement given as its argument. The difference is that +"onecolumn" returns a single element which is the first column of the +first row of the query result.

    + +

    This is a convenience method. It saves the user from having to +do a "[lindex ... 0]" on the results of an "eval" +in order to extract a single column result.

    + +

    The "changes" method

    + + +

    The "changes" method returns an integer which is the number of rows +in the database that were inserted, deleted, and/or modified by the most +recent "eval" method.

    + +

    The "total_changes" method

    + + +

    The "total_changes" method returns an integer which is the number of rows +in the database that were inserted, deleted, and/or modified since the +current database connection was first opened.

    + +

    The "authorizer" method

    + + +

    The "authorizer" method provides access to the +sqlite3_set_authorizer +C/C++ interface. The argument to authorizer is the name of a procedure that +is called when SQL statements are being compiled in order to authorize +certain operations. The callback procedure takes 5 arguments which describe +the operation being coded. If the callback returns the text string +"SQLITE_OK", then the operation is allowed. If it returns "SQLITE_IGNORE", +then the operation is silently disabled. If the return is "SQLITE_DENY" +then the compilation fails with an error. +

    + +

    If the argument is an empty string then the authorizer is disabled. +If the argument is omitted, then the current authorizer is returned.

    + +

    The "progress" method

    + + +

    This method registers a callback that is invoked periodically during +query processing. There are two arguments: the number of SQLite virtual +machine opcodes between invocations, and the TCL command to invoke. +Setting the progress callback to an empty string disables it.

    + +

    The progress callback can be used to display the status of a lengthy +query or to process GUI events during a lengthy query.

    + +

    The "collate" method

    + + +

    This method registers new text collating sequences. There are +two arguments: the name of the collating sequence and the name of a +TCL procedure that implements a comparison function for the collating +sequence. +

    + +

    For example, the following code implements a collating sequence called +"NOCASE" that sorts in text order without regard to case: +

    + +
    +proc nocase_compare {a b} {
    +    return [string compare [string tolower $a] [string tolower $b]]
    +}
    +db collate NOCASE nocase_compare
    +
    + +

    The "collation_needed" method

    + + +

    This method registers a callback routine that is invoked when the SQLite +engine needs a particular collating sequence but does not have that +collating sequence registered. The callback can register the collating +sequence. The callback is invoked with a single parameter which is the +name of the needed collating sequence.

    + +

    The "commit_hook" method

    + + +

    This method registers a callback routine that is invoked just before +SQLite tries to commit changes to a database. If the callback throws +an exception or returns a non-zero result, then the transaction rolls back +rather than commit.

    + +

    The "rollback_hook" method

    + + +

    This method registers a callback routine that is invoked just before +SQLite tries to do a rollback. The script argument is run without change.

    + +

    The "status" method

    + +

    This method returns status information from the most recently evaluated +SQL statement. The status method takes a single argument which should be +either "steps" or "sorts". If the argument is "steps", then the method +returns the number of full table scan steps that the previous SQL statement +evaluated. If the argument is "sorts", the method returns the number of +sort operations. This information can be used to detect queries that are +not using indices to speed search or sorting.

    + +

    The status method is basically a wrapper on the +sqlite3_stmt_status() C-language interface.

    + +

    The "update_hook" method

    + + +

    This method registers a callback routine that is invoked just before +each row is modified by an UPDATE, INSERT, or DELETE statement. Four +arguments are appended to the callback before it is invoked:

    + +
      +
    • The keyword "INSERT", "UPDATE", or "DELETE", as appropriate
    • +
    • The name of the database which is being changed
    • +
    • The table that is being changed
    • +
    • The rowid of the row in the table being changed
    • +
    + +

    The "incrblob" method

    + + +

    This method opens a TCL channel that can be used to read or write +into a preexisting BLOB in the database. The syntax is like this:

    + +
    +dbcmd  incrblob  ?-readonly?? +  ?DB?  TABLE  COLUMN  ROWID +
    + +

    +The command returns a new TCL channel for reading or writing to the BLOB. +The channel is opened using the underlying +sqlite3_blob_open() C-langauge +interface. Close the channel using the close command of TCL. +

    + +

    The "errorcode" method

    + + +

    This method returns the numeric error code that resulted from the most +recent SQLite operation.

    + +

    The "trace" method

    + + +

    The "trace" method registers a callback that is invoked as each SQL +statement is compiled. The text of the SQL is appended as a single string +to the command before it is invoked. This can be used (for example) to +keep a log of all SQL operations that an application performs. +

    + +

    The "backup" method

    + + +

    The "backup" method makes a backup copy of a live database. The +command syntax is like this:

    + +
    +dbcmd  backup  ?source-database?  backup-filename +
    + +

    The optional source-database argument tells which database in +the current connection should be backed up. The default value is main +(or, in other words, the primary database file). To back up TEMP tables +use temp. To backup an auxilary database added to the connection +using the ATTACH command, use the name of that database as it was assigned +in the ATTACH command.

    + +

    The backup-filename is the name of a file into which the backup is +written. Backup-filename does not have to exist ahead of time, but if +it does, it must be a well-formed SQLite database.

    + +

    The "restore" method

    + + +

    The "restore" method copies the content a separate database file +into the current database connection, overwriting any preexisting content. +The command syntax is like this:

    + +
    +dbcmd  restore  ?target-database?  source-filename +
    + +

    The optional target-database argument tells which database in +the current connection should be overwritten with new content. +The default value is main +(or, in other words, the primary database file). To repopulate the TEMP tables +use temp. To overwrite an auxilary database added to the connection +using the ATTACH command, use the name of that database as it was assigned +in the ATTACH command.

    + +

    The source-filename is the name of a existing well-formed SQLite +database file from which the content is extracted.

    + +
    +This page last modified 2009/05/14 23:24:56 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/tclsqlite.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/tclsqlite.tcl --- sqlite3-3.4.2/www/tclsqlite.tcl 2007-06-28 13:46:20.000000000 +0100 +++ sqlite3-3.6.16/www/tclsqlite.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,666 +0,0 @@ -# -# Run this Tcl script to generate the tclsqlite.html file. -# -set rcsid {$Id: tclsqlite.tcl,v 1.17 2007/06/19 17:48:57 drh Exp $} -source common.tcl -header {The Tcl interface to the SQLite library} -proc METHOD {name text} { - puts "\n

    The \"$name\" method

    \n" - puts $text -} -puts { -

    The Tcl interface to the SQLite library

    - -

    The SQLite library is designed to be very easy to use from -a Tcl or Tcl/Tk script. This document gives an overview of the Tcl -programming interface.

    - -

    The API

    - -

    The interface to the SQLite library consists of single -tcl command named sqlite3 -Because there is only this -one command, the interface is not placed in a separate -namespace.

    - -

    The sqlite3 command is used as follows:

    - -
    -sqlite3  dbcmd  database-name -
    - -

    -The sqlite3 command opens the database named in the second -argument. If the database does not already exist, it is -automatically created. -The sqlite3 command also creates a new Tcl -command to control the database. The name of the new Tcl command -is given by the first argument. This approach is similar to the -way widgets are created in Tk. -

    - -

    -The name of the database is just the name of a disk file in which -the database is stored. If the name of the database is an empty -string or the special name ":memory:" then a new database is created -in memory. -

    - -

    -Once an SQLite database is open, it can be controlled using -methods of the dbcmd. There are currently 22 methods -defined.

    - -

    -

    -

    - -

    The use of each of these methods will be explained in the sequel, though -not in the order shown above.

    - -} - -############################################################################## -METHOD eval { -

    -The most useful dbcmd method is "eval". The eval method is used -to execute SQL on the database. The syntax of the eval method looks -like this:

    - -
    -dbcmd  eval  sql -    ?array-name ? ?script? -
    - -

    -The job of the eval method is to execute the SQL statement or statements -given in the second argument. For example, to create a new table in -a database, you can do this:

    - -
    -sqlite3 db1 ./testdb
    -db1 eval {CREATE TABLE t1(a int, b text)}
    -
    - -

    The above code creates a new table named t1 with columns -a and b. What could be simpler?

    - -

    Query results are returned as a list of column values. If a -query requests 2 columns and there are 3 rows matching the query, -then the returned list will contain 6 elements. For example:

    - -
    -db1 eval {INSERT INTO t1 VALUES(1,'hello')}
    -db1 eval {INSERT INTO t1 VALUES(2,'goodbye')}
    -db1 eval {INSERT INTO t1 VALUES(3,'howdy!')}
    -set x [db1 eval {SELECT * FROM t1 ORDER BY a}]
    -
    - -

    The variable $x is set by the above code to

    - -
    -1 hello 2 goodbye 3 howdy! -
    - -

    You can also process the results of a query one row at a time -by specifying the name of an array variable and a script following -the SQL code. For each row of the query result, the values of all -columns will be inserted into the array variable and the script will -be executed. For instance:

    - -
    -db1 eval {SELECT * FROM t1 ORDER BY a} values {
    -    parray values
    -    puts ""
    -}
    -
    - -

    This last code will give the following output:

    - -
    -values(*) = a b
    -values(a) = 1
    -values(b) = hello

    - -values(*) = a b
    -values(a) = 2
    -values(b) = goodbye

    - -values(*) = a b
    -values(a) = 3
    -values(b) = howdy!
    -

    - -

    -For each column in a row of the result, the name of that column -is used as an index in to array. The value of the column is stored -in the corresponding array entry. The special array index * is -used to store a list of column names in the order that they appear. -

    - -

    -If the array variable name is omitted or is the empty string, then the value of -each column is stored in a variable with the same name as the column -itself. For example: -

    - -
    -db1 eval {SELECT * FROM t1 ORDER BY a} {
    -    puts "a=$a b=$b"
    -}
    -
    - -

    -From this we get the following output -

    - -
    -a=1 b=hello
    -a=2 b=goodbye
    -a=3 b=howdy!
    -
    - -

    -Tcl variable names can appear in the SQL statement of the second argument -in any position where it is legal to put a string or number literal. The -value of the variable is substituted for the variable name. If the -variable does not exist a NULL values is used. For example: -

    - -
    -db1 eval {INSERT INTO t1 VALUES(5,$bigstring)} -
    - -

    -Note that it is not necessary to quote the $bigstring value. That happens -automatically. If $bigstring is a large string or binary object, this -technique is not only easier to write, it is also much more efficient -since it avoids making a copy of the content of $bigstring. -

    - -

    -If the $bigstring variable has both a string and a "bytearray" representation, -then TCL inserts the value as a string. If it has only a "bytearray" -representation, then the value is inserted as a BLOB. To force a -value to be inserted as a BLOB even if it also has a text representation, -us a "@" character to in place of the "$". Like this: -

    - -
    -db1 eval {INSERT INTO t1 VALUES(5,@bigstring)} -
    - -

    -If the variable does not have a bytearray representation, then "@" works -just like "$". -

    - -} - -############################################################################## -METHOD close { - -

    -As its name suggests, the "close" method to an SQLite database just -closes the database. This has the side-effect of deleting the -dbcmd Tcl command. Here is an example of opening and then -immediately closing a database: -

    - -
    -sqlite3 db1 ./testdb
    -db1 close
    -
    - -

    -If you delete the dbcmd directly, that has the same effect -as invoking the "close" method. So the following code is equivalent -to the previous:

    - -
    -sqlite3 db1 ./testdb
    -rename db1 {}
    -
    -} - -############################################################################## -METHOD transaction { - -

    -The "transaction" method is used to execute a TCL script inside an SQLite -database transaction. The transaction is committed when the script completes, -or it rolls back if the script fails. If the transaction occurs within -another transaction (even one that is started manually using BEGIN) it -is a no-op. -

    - -

    -The transaction command can be used to group together several SQLite -commands in a safe way. You can always start transactions manually using -BEGIN, of -course. But if an error occurs so that the COMMIT or ROLLBACK are never -run, then the database will remain locked indefinitely. Also, BEGIN -does not nest, so you have to make sure no other transactions are active -before starting a new one. The "transaction" method takes care of -all of these details automatically. -

    - -

    -The syntax looks like this: -

    - -
    -dbcmd  transaction  ?transaction-type? -  SCRIPT, -
    - - -

    -The transaction-type can be one of deferred, -exclusive or immediate. The default is deferred. -

    -} - -############################################################################## -METHOD cache { - -

    -The "eval" method described above keeps a cache of -prepared statements -for recently evaluated SQL commands. -The "cache" method is used to control this cache. -The first form of this command is:

    - -
    -dbcmd  cache size  N -
    - -

    This sets the maximum number of statements that can be cached. -The upper limit is 100. The default is 10. If you set the cache size -to 0, no caching is done.

    - -

    The second form of the command is this:

    - - -
    -dbcmd  cache flush -
    - -

    The cache-flush method -finalizes -all prepared statements currently -in the cache.

    - -} - -############################################################################## -METHOD complete { - -

    -The "complete" method takes a string of supposed SQL as its only argument. -It returns TRUE if the string is a complete statement of SQL and FALSE if -there is more to be entered.

    - -

    The "complete" method is useful when building interactive applications -in order to know when the user has finished entering a line of SQL code. -This is really just an interface to the -sqlite3_complete() C -function. -} - -############################################################################## -METHOD copy { - -

    -The "copy" method copies data from a file into a table. -It returns the number of rows processed successfully from the file. -The syntax of the copy method looks like this:

    - -
    -dbcmd  copy  conflict-algorithm -  table-name   file-name  -    ?column-separator ? -  ?null-indicator? -
    - -

    Conflict-alogrithm must be one of the SQLite conflict algorithms for -the INSERT statement: rollback, abort, -fail,ignore, or replace. See the SQLite Language -section for ON CONFLICT for more information. -The conflict-algorithm must be specified in lower case. -

    - -

    Table-name must already exists as a table. File-name must exist, and -each row must contain the same number of columns as defined in the table. -If a line in the file contains more or less than the number of columns defined, -the copy method rollbacks any inserts, and returns an error.

    - -

    Column-separator is an optional column separator string. The default is -the ASCII tab character \t.

    - -

    Null-indicator is an optional string that indicates a column value is null. -The default is an empty string. Note that column-separator and -null-indicator are optional positional arguments; if null-indicator -is specified, a column-separator argument must be specifed and -precede the null-indicator argument.

    - -

    The copy method implements similar functionality to the .import -SQLite shell command. -The SQLite 2.x COPY statement -(using the PostgreSQL COPY file format) -can be implemented with this method as:

    - -
    -dbcmd  copy  $conflictalgo -  $tablename   $filename  -    \t  -  \\N -
    - -} - -############################################################################## -METHOD timeout { - -

    The "timeout" method is used to control how long the SQLite library -will wait for locks to clear before giving up on a database transaction. -The default timeout is 0 millisecond. (In other words, the default behavior -is not to wait at all.)

    - -

    The SQLite database allows multiple simultaneous -readers or a single writer but not both. If any process is writing to -the database no other process is allows to read or write. If any process -is reading the database other processes are allowed to read but not write. -The entire database shared a single lock.

    - -

    When SQLite tries to open a database and finds that it is locked, it -can optionally delay for a short while and try to open the file again. -This process repeats until the query times out and SQLite returns a -failure. The timeout is adjustable. It is set to 0 by default so that -if the database is locked, the SQL statement fails immediately. But you -can use the "timeout" method to change the timeout value to a positive -number. For example:

    - -
    db1 timeout 2000
    - -

    The argument to the timeout method is the maximum number of milliseconds -to wait for the lock to clear. So in the example above, the maximum delay -would be 2 seconds.

    -} - -############################################################################## -METHOD busy { - -

    The "busy" method, like "timeout", only comes into play when the -database is locked. But the "busy" method gives the programmer much more -control over what action to take. The "busy" method specifies a callback -Tcl procedure that is invoked whenever SQLite tries to open a locked -database. This callback can do whatever is desired. Presumably, the -callback will do some other useful work for a short while (such as service -GUI events) then return -so that the lock can be tried again. The callback procedure should -return "0" if it wants SQLite to try again to open the database and -should return "1" if it wants SQLite to abandon the current operation. -} - -############################################################################## -METHOD exists { - -

    The "exists" method is similar to "onecolumn" and "eval" in that -it executes SQL statements. The difference is that the "exists" method -always returns a boolean value which is TRUE if a query in the SQL -statement it executes returns one or more rows and FALSE if the SQL -returns an empty set.

    - -

    The "exists" method is often used to test for the existance of -rows in a table. For example:

    - -
    -if {[db exists {SELECT 1 FROM table1 WHERE user=$user}]} {
    -   # Processing if $user exists
    -} else {
    -   # Processing if $user does not exist
    -} -
    -} - - -############################################################################## -METHOD last_insert_rowid { - -

    The "last_insert_rowid" method returns an integer which is the ROWID -of the most recently inserted database row.

    -} - -############################################################################## -METHOD function { - -

    The "function" method registers new SQL functions with the SQLite engine. -The arguments are the name of the new SQL function and a TCL command that -implements that function. Arguments to the function are appended to the -TCL command before it is invoked.

    - -

    -The following example creates a new SQL function named "hex" that converts -its numeric argument in to a hexadecimal encoded string: -

    - -
    -db function hex {format 0x%X} -
    - -} - -############################################################################## -METHOD nullvalue { - -

    -The "nullvalue" method changes the representation for NULL returned -as result of the "eval" method.

    - -
    -db1 nullvalue NULL -
    - -

    The "nullvalue" method is useful to differ between NULL and empty -column values as Tcl lacks a NULL representation. The default -representation for NULL values is an empty string.

    -} - - - -############################################################################## -METHOD onecolumn { - -

    The "onecolumn" method works like -"eval" in that it evaluates the -SQL query statement given as its argument. The difference is that -"onecolumn" returns a single element which is the first column of the -first row of the query result.

    - -

    This is a convenience method. It saves the user from having to -do a "[lindex ... 0]" on the results of an "eval" -in order to extract a single column result.

    -} - -############################################################################## -METHOD changes { - -

    The "changes" method returns an integer which is the number of rows -in the database that were inserted, deleted, and/or modified by the most -recent "eval" method.

    -} - -############################################################################## -METHOD total_changes { - -

    The "total_changes" method returns an integer which is the number of rows -in the database that were inserted, deleted, and/or modified since the -current database connection was first opened.

    -} - -############################################################################## -METHOD authorizer { - -

    The "authorizer" method provides access to the -sqlite3_set_authorizer -C/C++ interface. The argument to authorizer is the name of a procedure that -is called when SQL statements are being compiled in order to authorize -certain operations. The callback procedure takes 5 arguments which describe -the operation being coded. If the callback returns the text string -"SQLITE_OK", then the operation is allowed. If it returns "SQLITE_IGNORE", -then the operation is silently disabled. If the return is "SQLITE_DENY" -then the compilation fails with an error. -

    - -

    If the argument is an empty string then the authorizer is disabled. -If the argument is omitted, then the current authorizer is returned.

    -} - -############################################################################## -METHOD progress { - -

    This method registers a callback that is invoked periodically during -query processing. There are two arguments: the number of SQLite virtual -machine opcodes between invocations, and the TCL command to invoke. -Setting the progress callback to an empty string disables it.

    - -

    The progress callback can be used to display the status of a lengthy -query or to process GUI events during a lengthy query.

    -} - - -############################################################################## -METHOD collate { - -

    This method registers new text collating sequences. There are -two arguments: the name of the collating sequence and the name of a -TCL procedure that implements a comparison function for the collating -sequence. -

    - -

    For example, the following code implements a collating sequence called -"NOCASE" that sorts in text order without regard to case: -

    - -
    -proc nocase_compare {a b} {
    -    return [string compare [string tolower $a] [string tolower $b]]
    -}
    -db collate NOCASE nocase_compare
    -
    -} - -############################################################################## -METHOD collation_needed { - -

    This method registers a callback routine that is invoked when the SQLite -engine needs a particular collating sequence but does not have that -collating sequence registered. The callback can register the collating -sequence. The callback is invoked with a single parameter which is the -name of the needed collating sequence.

    -} - -############################################################################## -METHOD commit_hook { - -

    This method registers a callback routine that is invoked just before -SQLite tries to commit changes to a database. If the callback throws -an exception or returns a non-zero result, then the transaction rolls back -rather than commit.

    -} - -############################################################################## -METHOD rollback_hook { - -

    This method registers a callback routine that is invoked just before -SQLite tries to do a rollback. The script argument is run without change.

    -} - -############################################################################## -METHOD update_hook { - -

    This method registers a callback routine that is invoked just before -each row is modified by an UPDATE, INSERT, or DELETE statement. Four -arguments are appended to the callback before it is invoked:

    - -
      -
    • The keyword "INSERT", "UPDATE", or "DELETE", as appropriate
    • -
    • The name of the database which is being changed
    • -
    • The table that is being changed
    • -
    • The rowid of the row in the table being changed
    • -
    -} - -############################################################################## -METHOD incrblob { - -

    This method opens a TCL channel that can be used to read or write -into a preexisting BLOB in the database. The syntax is like this:

    - -
    -dbcmd  incrblob  ?-readonly?? -  ?DB?  TABLE  COLUMN  ROWID -
    - -

    -The command returns a new TCL channel for reading or writing to the BLOB. -The channel is opened using the underlying -sqlite3_blob_open() C-langauge -interface. Close the channel using the close command of TCL. -

    -} - -############################################################################## -METHOD errorcode { - -

    This method returns the numeric error code that resulted from the most -recent SQLite operation.

    -} - -############################################################################## -METHOD trace { - -

    The "trace" method registers a callback that is invoked as each SQL -statement is compiled. The text of the SQL is appended as a single string -to the command before it is invoked. This can be used (for example) to -keep a log of all SQL operations that an application performs. -

    -} - - -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/tempfiles.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/tempfiles.html --- sqlite3-3.4.2/www/tempfiles.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/tempfiles.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,667 @@ + + +Temporary Files Used By SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite's Use Of Temporary Disk Files

    + +

    1.0 Introduction

    + +

    +On of the distinctive features of +SQLite is that a database consists of a single disk file. +This simplifies the use of SQLite since moving or backing up a +database is a simple as copying a single file. It also makes +SQLite appropriate for use as an +application file format. +But while a complete database is held in a single disk file, +SQLite does make use of many temporary files during the +course of processing a database. +

    + +

    +This article describes the various temporary files that SQLite +creates and uses. It describes when the files are created, when +they are deleted, what they are used for, why they are important, +and how to avoid them on systems where creating temporary files is +expensive. +

    + +

    +The manner in which SQLite uses temporary files is not considered +part of the contract that SQLite makes with applications. The +information in this document is a correct description of how +SQLite operates at the time that this document was written or last +updated. But there is no guarantee that future versions of SQLite +will use temporary files in the same way. New kinds of temporary +files might be employed and some of +the current temporary file uses might be discontinued +in future releases of SQLite. +

    + +

    2.0 Seven Kinds Of Temporary Files

    + +

    +SQLite currently uses seven distinct types of temporary files: +

    + +
      +
    1. Rollback journals
    2. +
    3. Master journals
    4. +
    5. Statement journals
    6. +
    7. TEMP databases
    8. +
    9. Materializations of views and subqueries
    10. +
    11. Transient indices
    12. +
    13. Transient databases used by VACUUM
    14. +
    + +

    +Additional information about each of these temporary file types +is in the sequel. +

    + +

    2.1 Rollback Journals

    + +

    +A rollback journal is a temporary file used to implement +atomic commit and rollback capabilities in SQLite. +(For a detailed discussion of how this works, see +the separate document titled +Atomic Commit In SQLite.) +The rollback journal is always located in the same directory +as the database file and has the same name as the database +file except with the 8 characters "-journal" appended. +The rollback journal is usually created when a transaction +is first started and is usually deleted when a transaction +commits or rolls back. +The rollback journal file is essential for implementing the +atomic commit and rollback capabilities of SQLite. Without +a rollback journal, SQLite would be unable to rollback an +incomplete transaction, and if a crash or power loss occurred +in the middle of a transaction the entire database would likely +go corrupt without a rollback journal. +

    + +

    +The rollback journal is usually created and destroyed at the +start and end of a transaction, respectively. But there are exceptions +to this rule. +

    + +

    +If a crash or power loss occurs in the middle of a transaction, +then the rollback journal file is left on disk. The next time +another application attempts to open the database file, it notices +the presence of the abandoned rollback journal (we call it a "hot +journal" in this circumstance) and uses the information in the +journal to restore the database to its state prior to the start +of the incomplete transaction. This is how SQLite implements +atomic commit. +

    + +

    +If an application puts SQLite in +exclusive locking mode using +the pragma: +

    + +
    +PRAGMA locking_mode=EXCLUSIVE;
    +
    + +

    +SQLite creates a new rollback journal at the start of the first +transaction within an exclusive locking mode session. But at the +conclusion of the transaction, it does not delete the rollback +journal. The rollback journal might be truncated, or its header +might be zeroed (depending on what version of SQLite you are using) +but the rollback journal is not deleted. The rollback journal is +not deleted until exclusive access mode is exited.

    + +

    +Rollback journal creation and deletion is also changed by the +journal_mode pragma. +The default journaling mode is DELETE, which is the default behavior +of deleting the rollback journal file at the end of each transaction, +as described above. The PERSIST journal mode foregoes the deletion of +the journal file and instead overwrites the rollback journal header +with zeros, which prevents other processes from rolling back the +journal and thus has the same effect as deleting the journal file, though +without the expense of actually removing the file from disk. In other +words, journal mode PERSIST exhibits the same behavior as is seen +in EXCLUSIVE locking mode. The +OFF journal mode causes SQLite to forego creating a rollback journal +in the first place. The OFF journal mode disables the atomic +commit and rollback capabilities of SQLite. The ROLLBACK command +is not available when OFF journal mode is set. And if a crash or power +loss occurs in the middle of a transaction that uses the OFF journal +mode, no recovery is possible and the database file will likely +go corrupt. +

    + + +

    2.2 Master Journal Files

    + +

    +The master journal file is used as part of the atomic commit +process when a single transaction makes changes to multiple +databases that have been added to a single database connection +using the ATTACH statement. The master journal file is always +located in the same directory as the main database file +(the main database file is the database that is identified +in the original sqlite3_open(), sqlite3_open16(), or +sqlite3_open_v2() call that created the database connection) +with a randomized suffix. The master journal file contains +the names of all of the various attached auxiliary databases +that were changed during the transaction. The multi-database +transaction commits when the master journal file is deleted. +See the documentation titled +Atomic Commit In SQLite for +additional detail. +

    + +

    +The master journal file is only created in cases where a single +database connection is talking with two or more databases files +as a result of using ATTACH to connection to auxiliary databases, +and where a single transaction modifies more than one of those +database files. +Without the master journal, the transaction commit on a multi-database +transaction would be atomic for each database individually, but it +would not be atomic across all databases. In other words, if the +commit were interrupted in the middle by a crash or power loss, then +the changes to one of the databases might complete while the changes +to another database might roll back. The master journal causes all +changes in all databases to either rollback or commit together. +

    + +

    2.3 Statement Journal Files

    + +

    +A statement journal file is used to rollback partial results of +a single statement within a larger transaction. For example, suppose +an UPDATE statement will attempt to modify 100 rows in the database. +But after modifying the first 50 rows, the UPDATE hits +a constraint violation which should block the entire statement. +The statement journal is used to undo the first 50 row changes +so that the database is restored to the state it was in at the start +of the statement. +

    + +

    +A statement journal is only created for an UPDATE or INSERT statement +that might change muliple rows of a database and which might hit a +constraint or a RAISE exception within a trigger and thus need to +undo partial results. +If the UPDATE or INSERT is not contained within BEGIN...COMMIT and if +there are no other active statements on the same database connection then +no statement journal is created since the ordinary +rollback journal can be used instead. +The statement journal is also omitted if an alternative +conflict resolution algorithm is +used. For example: +

    + +
    +UPDATE OR FAIL ...
    +UPDATE OR IGNORE ...
    +UPDATE OR REPLACE ...
    +INSERT OR FAIL ...
    +INSERT OR IGNORE ...
    +INSERT OR REPLACE ...
    +REPLACE INTO ....
    +
    + +

    +The statement journal is given a randomized name, not necessarily +in the same directory as the main database, and is automatically +deleted at the conclusion of the transaction. The size of the +statement journal is proportional to the size of the change implemented +by the UPDATE or INSERT statement that caused the statement journal +to be created. +

    + +

    2.4 TEMP Databases

    + +

    Tables created using the "CREATE TEMP TABLE" syntax are only +visible to the database connection in which the "CREATE TEMP TABLE" +statement is originally evaluated. These TEMP tables, together +with any associated indices, triggers, and views, are collectively +stored in a separate temporary database file that is created as +soon as the first "CREATE TEMP TABLE" statement is seen. +This separate temporary database file also has an associated +rollback journal. +The temporary database file used to store TEMP tables is deleted +automatically when the database connection is closed +using sqlite3_close(). +

    + +

    +The TEMP database file is very similar to auxiliary database +files added using the ATTACH statement, though with a few +special properties. +The TEMP database is always automatically deleted when the +database connection is closed. +The TEMP database always uses the +synchronous=OFF and journal_mode=PERSIST +PRAGMA settings. +And, the TEMP database cannot be used with DETACH nor can +another process ATTACH the TEMP database. +

    + +

    +The temporary files associated with the TEMP database and its +rollback journal are only created if the application makes use +of the "CREATE TEMP TABLE" statement. +

    + +

    2.5 Materializations Of Views And Subqueries

    + +

    Queries that contain subqueries must sometime evaluate +the subqueries separately and store the results in a temporary +table, then use the content of the temporary table to evaluate +the outer query. +We call this "materializing" the subquery. +The query optimizer in SQLite attempts to avoid materializing, +but sometimes it is not easily avoidable. +The temporary tables created by materialization are each stored +in their own separate temporary file, which is automatically +deleted at the conclusion of the query. +The size of these temporary tables depends on the amount of +data in the materialization of the subquery, of course. +

    + +

    +A subquery on the right-hand side of IN operator must often +be materialized. For example: +

    + +
    +SELECT * FROM ex1 WHERE ex1.a IN (SELECT b FROM ex2);
    +
    + +

    +In the query above, the subquery "SELECT b FROM ex2" is evaluated +and its results are stored in a temporary table (actually a temporary +index) that allows one to determine whether or not a value ex2.b +exists using a simple binary search. Once this table is constructed, +the outer query is run and for each prospective result row a check +is made to see if ex1.a is contained within the temporary table. +The row is output only if the check is true. +

    + +

    +To avoid creating the temporary table, the query might be rewritten +as follows: +

    + +
    +SELECT * FROM ex1 WHERE EXISTS(SELECT 1 FROM ex2 WHERE ex2.b=ex1.a);
    +
    + +

    +Recent versions of SQLite (version 3.5.4 and later) +will do this rewrite automatically +if an index exists on the column ex2.b. +

    + +

    +If the right-hand side of an IN operator can be list of values +as in the following: +

    +
    +SELECT * FROM ex1 WHERE a IN (1,2,3);
    +
    +

    +List values on the right-hand side of IN are treated as a +subquery that must be materialized. In other words, the +previous statement acts as if it were: +

    +
    +SELECT * FROM ex1 WHERE a IN (SELECT 1 UNION ALL
    +                              SELECT 2 UNION ALL
    +                              SELECT 3);
    +
    +

    +A temporary index is always used to hold the values of the +right-hand side of an IN operator when that right-hand side +is a list of values. +

    + +

    +Subqueries might also need to be materialized when they appear +in the FROM clause of a SELECT statement. For example: +

    + +
    +SELECT * FROM ex1 JOIN (SELECT b FROM ex2) AS t ON t.b=ex1.a;
    +
    + +

    +Depending on the query, SQLite might need to materialize the +"(SELECT b FROM ex2)" subquery into a temporary table, then +perform the join between ex1 and the temporary table. The +query optimizer tries to avoid this by "flattening" the +query. In the previous example the query can be flattened, +and SQLite will automatically transform the query into +

    + +
    +SELECT ex1.*, ex2.b FROM ex1 JOIN ex2 ON ex2.b=ex1.a;
    +
    + +

    +More complex queries may or may not be able to employ query +flattening to avoid the temporary table. Whether or not +the query can be flattened depends on such factors as whether +or not the subquery or outer query contain aggregate functions, +ORDER BY or GROUP BY clauses, LIMIT clauses, and so forth. +The rules for when a query and an cannot be flattened are +very complex and are beyond the scope of this document. +

    + +

    2.6 Transient Indices

    + +

    +SQLite may make use of transient indices to +implement SQL language features such as: +

    + +
      +
    • An ORDER BY or GROUP BY clause
    • +
    • The DISTINCT keyword in an aggregate query
    • +
    • Compound SELECT statements joined by UNION, EXCEPT, or INTERSECT
    • +
    + +

    +Each transient index is stored in its own temporary file. +The temporary file for a transient index is automatically deleted +at the end of the statement that uses it. +

    + +

    +SQLite strives to implement ORDER BY clauses using a preexisting +index. If an appropriate index already exists, SQLite will walk +the index, rather than the underlying table, to extract the +requested information, and thus cause the rows to come out in +the desired order. But if SQLite cannot find an appropriate index +it will evaluate the query and store each row in a transient index +whose data is the row data and whose key is the ORDER BY terms. +After the query is evaluated, SQLite goes back and walks the +transient index from beginning to end in order to output the +rows in the desired order. +

    + +

    +SQLite implements GROUP BY by ordering the output rows in the +order suggested by the GROUP BY terms. Each output row is +compared to the previous to see if it starts a new "group". +The ordering by GROUP BY terms is done in exactly the same way +as the ordering by ORDER BY terms. A preexisting index is used +if possible, but if no suitable index is available, a transient +index is created. +

    + +

    +The previous two paragraphs describe the implementation of SQLite +as of version 3.5.8. There are known problems with this approach +for very large results sets - result sets that are larger than the +available disk cache. Future versions of SQLite will likely address +this deficiency by completely reworking the sort algorithm for +cases when no suitable preexisting sort index is available. The +new sort algorithm will also use temporary files, but not in the +same way as the current implementation, the the temporary files +for the new implementation will probably not be index files. +

    + +

    +The DISTINCT keyword on an aggregate query is implemented by +creating an transient index in a temporary file and storing +each result row in that index. As new result rows are computed +a check is made to see if they already exist in the transient +index and if they do the new result row is discarded. +

    + +

    +The UNION operator for compound queries is implemented by creating +a transient index in a temporary file and storing the results +of the left and right subquery in the transient index, discarding +duplicates. After both subqueries have been evaluated, the +transient index is walked from beginning to end to generate the final output. +

    + +

    +The EXCEPT operator for compound queries is implemented by creating +a transient index in a temporary file, storing the results of the +left subquery in this transient index, then removing the result +from right subquery from the transient index, and finally walking +the index from beginning to end to obtain the final output. +

    + +

    +The INTERSECT operator for compound queries is implemented by +creating two separate transient indices, each in a separate +temporary file. The left and right subqueries are evaluated +each into a separate transient index. Then the two indices +are walked together and entries that appear in both indices +are output. +

    + +

    +Note that the UNION ALL operator for compound queries does not +use transient indices by itself (though of course the right +and left subqueries of the UNION ALL might use transient indices +depending on how they are composed.) + +

    2.7 Transient Database Used By VACUUM

    + +

    +The VACUUM command works by creating a temporary file +and then rebuilding the entire database into that temporary +file. Then the content of the temporary file is copied back +into the original database file and the temporary file is +deleted. +

    + +

    +The temporary file created by the VACUUM command exists only +for the duration of the command itself. The size of the temporary +file will be no larger than the original database. +

    + + + +

    3.0 The SQLITE_TEMP_STORE Compile-Time Parameter and Pragma

    + +

    +The rollback journal, master journal, +and statement journal files are always written +to disk. +But the other kinds of temporary files might be stored in memory +only and never written to disk. +Whether or not temporary files other than the rollback, +master, and statement journals are written to disk or stored only in memory +depends on the SQLITE_TEMP_STORE compile-time parameter, the +temp_store pragma, +and on the size of the temporary file. +

    + +

    +The SQLITE_TEMP_STORE compile-time parameter is a #define whose value is +an integer between 0 and 3, inclusive. The meaning of the +SQLITE_TEMP_STORE compile-time parameter is as follows: +

    + +
      +
    1. +Temporary files are always stored on disk regardless of the setting +of the temp_store pragma. +
    2. +
    3. +Temporary files are stored on disk by default but this can be +overridden by the temp_store pragma. +
    4. +
    5. +Temporary files are stored in memory by default but this can be +overridden by the temp_store pragma. +
    6. +
    7. +Temporary files are always stored in memory regardless of the setting +of the temp_store pragma. +
    8. +
    + +

    +The default value of the SQLITE_TEMP_STORE compile-time parameter is 1, +which means to store temporary files on disk but provide the option +of overriding the behavior using the temp_store pragma. +

    + +

    +The temp_store pragma has +an integer value which also influences the decision of where to store +temporary files. The values of the the temp_store pragma have the +following meanings: +

    + +
      +
    1. +Use either disk or memory storage for temporary files as determined +by the SQLITE_TEMP_STORE compile-time parameter. +
    2. +
    3. +If the SQLITE_TEMP_STORE compile-time parameter specifies memory storage for +temporary files, then override that decision and use disk storage instead. +Otherwise follow the recommendation of the SQLITE_TEMP_STORE compile-time +parameter. +
    4. +
    5. +If the SQLITE_TEMP_STORE compile-time parameter specifies disk storage for +temporary files, then override that decision and use memory storage instead. +Otherwise follow the recommendation of the SQLITE_TEMP_STORE compile-time +parameter. +
    6. +
    + +

    +The default setting for the temp_store pragma is 0, +which means to following the recommendation of SQLITE_TEMP_STORE compile-time +parameter. +

    + +

    +To reiterate, the SQLITE_TEMP_STORE compile-time parameter an the +temp_store pragma only +influence the temporary files other than the rollback journal +and the master journal. The rollback journal and the master +journal are always written to disk regardless of the settings of +the SQLITE_TEMP_STORE compile-time parameter and the +temp_store pragma. +

    + +

    4.0 Other Temporary File Optimizations

    + +

    +SQLite uses a page cache of recently read and written database +pages. This page cache is used not just for the main database +file but also for transient indices and tables stored in temporary +files. If SQLite needs to use a temporary index or table and +the SQLITE_TEMP_STORE compile-time parameter and the +temp_store pragma are +set to store temporary tables and index on disk, the information +is still initially stored in memory in the page cache. The +temporary file is not opened and the information is not truly +written to disk until the page cache is full. +

    + +

    +This means that for many common cases where the temporary tables +and indices are small (small enough to fit into the page cache) +no temporary files are created and no disk I/O occurs. Only +when the temporary data becomes too large to fit in RAM does +the information spill to disk. +

    + +

    +Each temporary table and index is given its own page cache +which can store a maximum number of database pages determined +by the SQLITE_DEFAULT_TEMP_CACHE_SIZE compile-time parameter. +(The default value is 500 pages.) +The maximum number of database pages in the page cache is the +same for every temporary table and index. The value cannot +be changed at run-time or on a per-table or per-index basis. +Each temporary file gets its own private page cache with its +own SQLITE_DEFAULT_TEMP_CACHE_SIZE page limit. +

    +
    +This page last modified 2008/06/26 15:44:09 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/testing.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/testing.html --- sqlite3-3.4.2/www/testing.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/testing.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,624 @@ + + +How SQLite Is Tested + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + +

    How SQLite Is Tested

    + +

    1.0 Introduction

    + +

    The reliability and robustness of SQLite is achieved in part +by thorough and careful testing.

    + +

    As of version 3.6.16 (all statistics in the report are against that +release of SQLite), +the SQLite library consists of approximately +63.9 KSLOC of C code. +(KSLOC means thousands of "Source Lines Of Code" or, in other words, +lines of code excluding blank lines and comments.) +By comparison, the project has +709 times as much +test code and test scripts - +45385.9 KSLOC.

    + +

    2.0 Test Harnesses

    + +

    There are three independent test harnesses used for testing the +core SQLite library. +Each test harness is designed, maintained, and managed separately +from the others. +

    + +
      +
    1. +The TCL Tests are the oldest and most complete set of tests +for SQLite. The TCL tests are contained in the same source tree as the +SQLite core and like the SQLite core are in the public domain. The +TCL tests are the primary tests used during development. +The TCL tests are written using the +TCL scripting language. +The TCL test harness itself consists of 17.3 KSLOC +of C code use to create the TCL interface. The test scripts are contained +in 495 files totaling +7.7MB in size. There are +24228 distinct test cases, but many of the test +cases are parameterized and run multiple times (with different parameters) +so that on a full test run, about 2.2 million +separate tests are performed. +

      +
    2. + +
    3. +The TH3 test harness is a set of proprietary tests, written in +C. The impetus for TH3 was the need to have a set of tests that ran +on embedded and specialized platforms that would not easily support +TCL or other workstation services. TH3 tests use only the published +SQLite interfaces. These tests are free to SQLite Consortium members +and are available by license to others. TH3 consists of about +22.2 MB or 319.5 KSLOC +of C code implementing 10991 distinct test cases. +TH3 tests are heavily parameterized, though, so a full test runs +about 3.9 million different test +instances.

    4. + +
    5. +The SQL Logic Test or SLT test harness is used to run huge numbers +of SQL statements against both SQLite and several other SQL database engines +and verify that they all get the same answers. SLT currently compares +SQLite against PostgreSQL, MySQL, and Microsoft SQL Server. +SLT runs 7.2 million queries comprising +1.12GB of test data. +

    6. +
    + +

    All of the tests above must run successfully, on multiple platforms +and under multiple compile-time configurations, +before each release of SQLite.

    + +

    Prior to each check-in to the SQLite source tree, developers +typically run a subset (called "veryquick") of the Tcl tests +consisting of about +41.7 thousand test cases +and covering +96.96% of the core SQLite source code. +The veryquick tests covers everything except the anomaly, fuzz, and +soak tests. The idea behind the veryquick tests are that they are +sufficient to catch most errors, but also run in only a few minutes +instead of a few hours.

    + +

    3.0 Anomaly Testing

    + +

    Anomaly tests are tests designed to verify the correct behavior +of SQLite when something goes wrong. It is (relatively) easy to build +an SQL database engine that behaves correctly on well-formed inputs +on a fully functional computer. It is more difficult to build a system +that responses sanely to invalid inputs and continues to function following +system malfunctions. The anomaly tests are designed to verify the latter +behavior.

    + +

    3.1 Out-Of-Memory Testing

    + +

    SQLite, like all SQL database engines, makes extensive use of +malloc() (See the separate report on +dynamic memory allocation in SQLite for +additional detail.) +On workstations, malloc() never fails in practice and so correct +handling of out-of-memory (OOM) errors is not particularly important. +But on embedded devices, OOM errors are frightenly common and since +SQLite is frequently used on embedded devices, it is important that +SQLite be able to gracefully handle OOM errors.

    + +

    OOM testing is accomplished by simulating OOM errors. +SQLite allows an application to substitute an alternative malloc() +implementation using the sqlite3_config(SQLITE_CONFIG_MALLOC,...) +interface. The TCL and TH3 test harnesses are both capable of +inserting a modified version of malloc() that can be rigged to fail +after a certain number of allocations. These instrumented mallocs +can be set to fail only once and then start working again, or to +continue failing after the first failure. OOM tests are done in a +loop. On the first iteration of the loop, the instrumented malloc +is rigged to fail on the first allocation. Then some SQLite operation +is carried out and checks are done to make sure SQLite handled the +OOM error correctly. Then the time-to-failure counter +on the instrumented malloc is increased by one and the test is +repeated. The loop continues until the entire operation runs to +completion without ever encountering a simulated OOM failure. +Tests like this are run twice, once with the instrumented malloc +set to fail only once, and again with the instrumented malloc set +to fail continuously after the first failure.

    + +

    3.2 I/O Error Testing

    + +

    I/O error testing seeks to verify that SQLite responds sanely +to failed I/O operations. I/O errors might result from a full disk drive, +malfunctioning disk hardware, network outages when using a network +file system, system configuration or permission changes that occur in the +middle of an SQL operation, or other hardware or operating system +malfunctions. Whatever the cause, it is important that SQLite be able +to respond correctly to these errors and I/O error testing seeks to +verify that it does.

    + +

    I/O error testing is similar in concept to OOM testing; I/O errors +are simulated and checks are made to verify that SQLite responds +correctly to the simulated errors. I/O errors are simulated in both +the TCL and TH3 test harnesses by inserting a new +Virtual File System object that is specially rigged +to simulate an I/O error after a set number of I/O operations. +As with OOM error testing, the I/O error simulators can be set to +fail just once, or to fail continuously after the first failure. +Tests are run in a loop, slowly increasing the point of failure until +the test case runs to completion without error. The loop is run twice, +once with the I/O error simulator set to simulate only a single failure +and a second time with it set to fail all I/O operations after the first +failure.

    + +

    In I/O error tests, after the I/O error simulation failure mechanism +is disabled, the database is examined using +PRAGMA integrity_check to make sure that the I/O error has not +introduced database corruption.

    + +

    3.2 Crash Testing

    + +

    Crash testing seeks to demonstrate that an SQLite database will not +go corrupt if the application or operating system crashes or if there +is a power failure in the middle of a database update. A separate +white-paper titled +Atomic Commit in SQLite describes the +defensive measure SQLite takes to prevent database corruption following +a crash. Crash tests strive to verify that those defensive measures +are working correctly.

    + +

    It is impractical to do crash testing using real power failures, of +course, and so crash testing is done in simulation. An alternative +Virtual File System is inserted that allows the test +harness to simulate the state of the database file following a crash.

    + +

    In the TCL test harness, the crash simulation is done in a separate +process. The main testing process spawns a child process which runs +some SQLite operation and randomly crashes somewhere in the middle of +a write operation. A special VFS randomly reorders and corrupts +the unsynchronized +write operations to simulate the effect of buffered filesystems. After +the child dies, the original test process opens and reads the test +database and verifies that the changes attempted by the child either +completed successfully or else were completely rolled back. The +integrity_check PRAGMA is used to make sure no database corruption +occurs.

    + +

    The TH3 test harness needs to run on embedded systems that do not +necessarily have the ability to spawn child processes, so it uses +an in-memory VFS to simulate crashes. The in-memory VFS can be rigged +to make snapshot of the entire filesystem after a set number of I/O +operations. Crash tests run in a loop. On each iteration of the loop, +the point at which a snapshot is made is advanced until the SQLite +operations being tested run to completion without ever hitting a +snapshot. Within the loop, after the SQLite operation under test has +completed, the filesystem is reverted to the snapshot and random file +damage is introduced that is characteristic of the kinds of damage +one expects to see following a power loss. Then the database is opened +and checks are made to ensure that it is well-formed and that the +transaction either ran to completion or was completely rolled back. +The interior of the loop is repeated multiple times for each +snapshot with different random damage each time.

    + +

    4.0 Fuzz Testing

    + +

    Fuzz testing +seeks to establish that SQLite response correctly to invalid, out-of-range, +or malformed inputs.

    + +

    4.1 SQL Fuzz

    + +

    SQL fuzz testing consists of creating syntactically correct yet +wildly nonsensical SQL statements and feeding them to SQLite to see +what it will do with them. Usually some kind of error is returned +(such as "no such table"). Sometimes, purely by chance, the SQL +statement also happens to be semantically correct. In that case, the +resulting prepared statement is run to make sure it gives a reasonable +result.

    + +

    The SQL fuzz generator tests are part of the TCL test suite. +During a full test run, about 108.7 +thousand fuzz SQL statements are +generated and tested.

    + +

    4.2 Malformed Database Files

    + +

    There are numerous test cases that verify that SQLite is able to +deal with malformed database files. +These tests first build a well-formed database file, then add +corruption by changing one or more bytes in the file by some means +other than SQLite. Then SQLite is used to read the database. +In some cases, the bytes changes are in the middle of data files. +This causes the content to change, but does not otherwise impact +the operation of SQLite. In other cases, unused bytes of the file +are modified. The interesting cases are when bytes of the file that +define database structure get changed. The malformed database tests +verify that SQLite finds the file format errors and reports them +using the SQLITE_CORRUPT return code without overflowing +buffers, dereferencing NULL pointers, or performing other +unwholesome actions.

    + +

    4.3 Boundary Value Tests

    + +

    SQLite defines certain limits on its operation, such as the +maximum number of columns in a table, the maximum length of an +SQL statement, or the maximum value of an integer. The TCL test +suite contains numerous tests that push SQLite right to the edge +of its defined limits and verify that it performs correctly for +all allowed values. Additional tests go beyond the defined limits +and verify that SQLite correctly returns errors.

    + +

    5.0 Regression Testing

    + +

    Whenever a bug is reported against SQLite, that bug is not considered +fixed until new test cases have been added to the TCL test suite which +would exhibit the bug in an unpatched version of SQLite. Over the years, +this has resulted in thousands and thousands of new tests being added +to the TCL test suite. These regression tests insure that bugs that have +been fixed in the past are never reintroduced into future versions of +SQLite.

    + +

    6.0 Automatic Resource Leak Detection

    + +

    Resource leak occurs when system resources +are allocated and never freed. The most troublesome resource leaks +in many applications are memory leaks - when memory is allocated using +malloc() but never released using free(). But other kinds of resources +can also be linked: file descriptors, threads, mutexes, etc.

    + +

    Both the TCL and TH3 test harnesses automatically track system +resources and report resources leaks on every test run. +No special configuration or setup is required. The test harnesses +are especially vigilant with regard to memory leaks. If a change +causes a memory leak, the test harnesses will recognize this +quickly. SQLite is designed to never leak memory, even after +an exception such as an OOM error or disk I/O error. The test +harnesses are zealous to enforce this.

    + +

    7.0 Test Coverage

    + +

    The gcov +utility is used to measure the "test coverage" of the SQLite test suite. +SQLite strives for but does not yet obtain 100% test coverage. A major +goal of the SQLite project is to obtain 100% branch coverage +during 2009.Test coverage can be measured in several ways. "Statement coverage" +measures (as a percentage of the whole) how many lines of code are +exercised by the test cases. Statement coverage for the SQLite core +is 99.36% for the full TCL test suite, +96.96% for the "veryquick.test" abbreviated +TCL test suite and 98.58% for the TH3 +test suite. (The SQLite core, in this case, excludes the operating-system +dependent VFS backends.) +"Branch" coverage measures (again, as a percentage of the +whole) how many machine-code branch instructions are taken at least +once in both directions. Branch coverage +is 96.84% for the full TCL test suite, +91.67% for the abbreviated TCL test suite, and +96.26% for the TH3 test harness.

    + +

    To illustrate the difference between statement coverage and +branch coverage, consider the following hypothetical +line of C code:

    + +
    +if( a>b && c!=25 ){ d++; }
    +
    + +

    Such a line of C code might generate a dozen separate machine code +instructions. If any one of those instructions is ever evaluated, then +we say that the statement has been tested. So, for example, it might +be the case that the conditional expression is +always false and the "d" variable is +never incremented. Even so, statement coverage counts this line of +code as having been tested.

    + +

    Branch coverage is more strict. With branch coverage, each test and +each subblock within the statement is considered separately. In order +to achieve 100% branch coverage in the example above, there must be at +least three test cases:

    + +

      +
    • a<=b +
    • a>b && c==25 +
    • a>b && c!=25 +

    + +

    Branch test coverage is normally less than statement coverage since +a C program will typically contain some defensive tests which in practice +are always true or always false. +For testing purposes, the SQLite source code defines +macros called ALWAYS() and NEVER(). The ALWAYS() macro +surrounds conditions +which are expected to always evaluated to true and NEVER() surrounds +conditions that are always evaluate to false. These macros serve as +comments to indicate that the conditions are defensive code. +For standard builds, these macros are pass-throughs:

    + +
    +#define ALWAYS(X)  (X)
    +#define NEVER(X)   (X)
    +
    + +

    During most testing, however, these macros will throw an assertion +fault if their argument does not have the expected truth value. This +alerts the developers quickly to incorrect design assumptions. + +

    +#define ALWAYS(X)  ((X)?1:assert(0),0)
    +#define NEVER(X)   ((X)?assert(0),1:0)
    +
    + +

    When measuring test coverage, these macros are defined to be constant +truth values so that they do not generate assembly language branch +instructions, and hence do not come into play when calculating the +branch coverage level:

    + +
    +#define ALWAYS(X)  (1)
    +#define NEVER(X)   (0)
    +
    + +

    Another macro used in conjuction with test coverage measurement is +the testcase() macro. The argument is a condition for which +we want test cases that evaluate to both true and false. +In non-coverage builds (that is to so, in release builds) the testcase() +macro is a no-op:

    + +
    +#define testcase(X)
    +
    + +

    But in a coverage measuring build, the testcase() macro generates code +that evaluates the conditional expression in its argument. +Then during analysis, a check +is made to insure tests exists that evaluate the conditional to both true +and false. Testcase() macros are used, for example, to help verify that +boundary values are tested. For example:

    + +
    +testcase( a==b );
    +testcase( a==b+1 );
    +if( a>b && c!=25 ){ d++; }
    +
    + +

    Testcase macros are also used when two or more cases of a switch +statement go to the same block of code, to make sure that the code was +reached for all cases:

    + +
    +switch( op ){
    +  case OP_Add:
    +  case OP_Subtract: {
    +    testcase( op==OP_Add );
    +    testcase( op==OP_Subtract );
    +    /* ... */
    +    break;
    +  }
    +  /* ... */
    +}
    +
    + +

    For bitmask tests, testcase() macros are used to verify that every +bit of the bitmask effects the test. For example, in the following block +of code, the condition is true if the mask contains either of two bits +indicating either a MAIN_DB or a TEMP_DB is being opened. The testcase() +macros that precede the if statement verify that both cases are tested:

    + +
    +testcase( mask & SQLITE_OPEN_MAIN_DB );
    +testcase( mask & SQLITE_OPEN_TEMP_DB );
    +if( (mask & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB))!=0 ){ ... }
    +
    + +

    The developers of SQLite have found that coverage testing is an +extremely productive method for finding bugs. Because such a high +percentage of SQLite core code is covered by test cases, the developers +can have confident that changes they make in one part of the code +do not have unintended consequences in other parts of the code. +It would not be possible to maintain the quality of SQLite without +coverage testing.

    + +

    8.0 Dynamic Analysis

    + +

    Dynamic analysis refers to internal and external checks on the +SQLite code which are performed while the code is live and running. +Dynamic analysis has proven to be a great help in maintaining the +quality of SQLite.

    + +

    8.1 Assert

    + +

    The SQLite core contains 2717 assert() +statements that verify function preconditions and postconditions and +loop invariants. Assert() is a macro which is a standard part of +ANSI-C. The argument is a boolean value that is assumed to always be +true. If the assertion is false, the program prints an error message +and halts.

    + +

    Assert() macros are disabled by compiling with the NDEBUG macro defined. +In most systems, asserts are enabled by default. But in SQLite, the +asserts are so numerous and are in such performance critical places, that +the database engine runs about three times slower when asserts are enabled. +Hence, the default (production) build of SQLite disables asserts. +Assert statements are only enabled when SQLite is compiled with the +SQLITE_DEBUG preprocessor macro defined.

    + +

    8.2 Valgrind

    + +

    Valgrind is perhaps the most amazing +and useful developer tool in the world. Valgrind is a simulator - it simulates +an x86 running a linux binary. (Ports of valgrind for platforms other +than linux are in development, but as of this writing, valgrind only +works reliably on linux, which in the opinion of the SQLite developers +means that linux should be preferred platform for all software development.) +As valgrind runs a linux binary, it looks for all kinds of interesting +errors such as array overruns, reading from uninitialized memory, +stack overflows, memory leaks, and so forth. Valgrind finds problems +that can easily slip through all of the other tests run against SQLite. +And, when valgrind does find an error, it can dump the developer directly +into a symbolic debugger at the exact point where the error occur, to +facilitate a quick fix.

    + +

    Because it is a simulator, running a binary in valgrind is slower than +running it on native hardware. So it is impractical to run the full +SQLite test suite through valgrind. However, the veryquick tests and +a subset of the TH3 tests are run through valgrind prior to every release.

    + +

    8.3 Memsys2

    + +

    SQLite contains a pluggable memory allocation subsystem. +The default implementation uses system malloc() and free(). +However, if SQLite is compiled with SQLITE_MEMDEBUG, an alternative +memory allocation wrapper (memsys2) +is inserted that looks for memory allocation +errors at run-time. The memsys2 wrapper checks for memory leaks, of +course, but also looks for buffer overruns, uses of uninitialized memory, +and attempts to use memory after it has been freed. These same checks +are also done by valgrind (and, indeed, valgrind does them better) +but memsys2 has the advantage of being much faster than valgrind, which +means the checks can be done more often and for longer tests.

    + +

    8.4 Mutex Asserts

    + +

    SQLite contains a pluggable mutex subsystem. Depending on +compile-time options, the default mutex system contains interfaces +sqlite3_mutex_held() and sqlite3_mutex_notheld() that detect +whether or not a particular mutex is held by the calling thread. +These two interfaces are used extensively within assert() statements +in SQLite to verify mutexes are held and released at all the right +moments, in order to double-check that SQLite does work correctly +in multi-threaded applications.

    + +

    8.5 Journal Tests

    + +

    One of the things that SQLite does to insure that transactions +are atomic across system crashes and power failures is to write +all changes into the rollback journal file prior to changing the +database. The TCL test harness contains an alternative +Virtual File System implementation that helps to +verify this is occurring correctly. The "journal-test VFS" monitors +all disk I/O traffic between the database file and rollback journal, +checking to make sure that nothing is written into the database +file which has not first by written and synced to the rollback journal. +If any discrepancies are found, an assertion fault is raised.

    + +

    The journal tests are an additional double-check over and above +the crash tests to make sure that SQLite transactions will be atomic +across system crashes and power failures.

    + +

    9.0 Static Analysis

    + +

    Static analysis means analyzing code at or before compile-time to +check for correctness. Static analysis consists mostly of making +sure SQLite compiles without warnings, even when all warnings are +enabled. SQLite is developed primarily using GCC and it does +compile without warnings on GCC using the -Wall and -Wextra flags. +There are occasional reports of warnings coming from VC++, however.

    + +

    Static analysis has not proven to be helpful in finding +bugs. We cannot call to mind a single problem in SQLite that +was detected by static analysis that was not first seen by one +of the other testing methods described above. On the other hand, +we have on occasion introduced new bugs in our efforts to get SQLite +to compile without warnings.

    + +

    Our experience, then, is that static analysis is counter-productive +to quality. In other words, focusing on static analysis (being +concerned with compiler warnings) actually reduces the quality of the +code. Nevertheless, we developers have capitulated to pressure from +users and actively work to eliminate compiler warnings. We are +willing to do this because the other tests described above do an +excellent job of finding the bugs that are often introduced when +removing compiler warnings, so that product quality is probably not +decreased as a result.

    + +

    10.0 Summary

    + +

    SQLite is open source. This gives many people with the idea that +it is not well tested as commercial software and is perhaps unreliable. +But that impression is false. +SQLite has exhibited very high reliability in the field and +a very low defect rate, especially considering how rapidly it is evolving. +The quality of SQLite is achieved in part by careful code design and +implementation. But extensive testing also plays a vital role in +maintaining and improving the quality of SQLite. This document has +summarized the testing procedures that every release of SQLite undergoes +with the hopes of inspiring the reader to understand that SQLite is +suitable for use in mission-critical applications.

    +
    +This page last modified 2009/06/27 14:04:58 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/threadsafe.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/threadsafe.html --- sqlite3-3.4.2/www/threadsafe.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/threadsafe.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,174 @@ + + +Using SQLite In Multi-Threaded Applications + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + +

    SQLite And Multiple Threads

    + +

    SQLite support three different threading modes:

    + +
      +
    1. Single-thread. +In this mode, all mutexes are disabled and SQLite is unsafe to use in +more than a single thread at once.

    2. + +
    3. Multi-thread. +In this mode, SQLite can be safely used by multiple threads provided that +no single database connection is used simulataneously in two or more threads. +

    4. + +
    5. Serialized. +In serialized mode, SQLite can be safely used by multiple threads with no +restriction.

    6. +
    + +

    +The threading mode can be selected at compile-time (when the SQLite +library is being compiled from source code) or at start-time (when the +application that intends to use SQLite is initializing) or at +run-time (when a new SQLite database connection is being created). +Generally speaking, run-time overrides start-time and start-time +overrides compile-time. Except, single-thread mode cannot be +overridden once selected. +

    + +

    +The default mode is serialized. +

    + +

    Compile-time selection of threading mode

    + +

    +Use the SQLITE_THREADSAFE compile-time parameter to selected the +threading mode. If no SQLITE_THREADSAFE compile-time parameter is +present, then serialized mode is used. +This can be made explicit with +-DSQLITE_THREADSAFE=1. +With +-DSQLITE_THREADSAFE=0 the threading mode is +single-thread. With +-DSQLITE_THREADSAFE=2 the threading mode is +multi-thread. +

    + +

    +The return value of the sqlite3_threadsafe() interface is determined +by the compile-time threading mode selection. If single-thread mode +is selected at compile-time, then sqlite3_threadsafe() returns false. +If either the multi-thread or serialized modes are selected, then +sqlite3_threadsafe() returns true. The sqlite3_threadsafe() +interface predates the multi-thread mode and start-time and run-time +mode selection and so is unable to distinguish +between multi-thread and serialized mode nor is it able to report start-time +or run-time mode changes. +

    + +

    +If single-thread mode is selected at compile-time, then critical +mutexing logic is omitted from the build and it is impossible to +enable either multi-thread or serialized modes at start-time or +run-time. +

    + +

    Start-time selection of threading mode

    + +

    +Assuming that the compile-time threading mode is not single-thread, then +the threading mode can be changed during initialization using the +sqlite3_config() interface. The SQLITE_CONFIG_SINGLETHREAD verb +puts SQLite into single-thread mode, the SQLITE_CONFIG_MULTITHREAD +verb sets multi-thread mode, and the SQLITE_CONFIG_SERIALIZED verb +sets serialized mode. +

    + +

    Run-time selection of threading mode

    + +

    If single-thread mode has not been selected at compile-time or start-time, +then individual database connections can be created as either multi-thread +or serialized. It is not possible to downgrade an individual database +connection to single-thread mode. Nor is it possible to escalate an +individual database connection if the compile-time or start-time mode +is single-thread.

    + +

    The threading mode for an individual database connection is determined +by flags given as the third argument to sqlite3_open_v2(). The +SQLITE_OPEN_NOMUTEX flag causes the database connection to be in the +multi-thread mode and the SQLITE_OPEN_FULLMUTEX flag causes the connection +to be in serialized mode. If neither flag is specified or if +sqlite3_open() or sqlite3_open16() are used instead of +sqlite3_open_v2(), then the default +mode determined by the compile-time and start-time settings is used. +

    +
    +This page last modified 2008/10/10 16:11:25 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/transactional.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/transactional.html --- sqlite3-3.4.2/www/transactional.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/transactional.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,112 @@ + + +SQLite Is Transactional + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite is Transactional

    + +

    A transactional database is one in which all changes and queries +appear to be +Atomic, Consistent, Isolated, and Durable +(ACID). +SQLite implements +serializable +transactions that are atomic, consistent, isolated, and durable, +even if the transaction is interrupted by a program crash, an +operating system crash, or a power failure to the computer. +

    + +

    +We here restate and amplify the previous sentence for emphasis: +All changes within a single transaction in SQLite either occur +completely or not at all, even if the act of writing the change +out to the disk is interrupted by +

      +
    • a program crash,
    • +
    • an operating system crash, or
    • +
    • a power failure.
    • +
    +

    + +

    +The claim of the previous paragraph is extensively checked in the +SQLite regression test suite using a special test harness that +simulates the effects on a database file of operating system crashes +and power failures. +

    + +

    +Additional informaton +

    +
    +This page last modified 2007/12/11 15:54:00 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/unlock_notify.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/unlock_notify.html --- sqlite3-3.4.2/www/unlock_notify.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/unlock_notify.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,449 @@ + + +SQLite Unlock-Notify API + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + + + + +

    Using the sqlite3_unlock_notify() API

    + +
    +/* This example uses the pthreads API */
    +#include <pthread.h>
    +
    +/*
    +** A pointer to an instance of this structure is passed as the user-context
    +** pointer when registering for an unlock-notify callback.
    +*/
    +typedef struct UnlockNotification UnlockNotification;
    +struct UnlockNotification {
    +  int fired;                           /* True after unlock event has occured */
    +  pthread_cond_t cond;                 /* Condition variable to wait on */
    +  pthread_mutex_t mutex;               /* Mutex to protect structure */
    +};
    +
    +/*
    +** This function is an unlock-notify callback registered with SQLite.
    +*/
    +static void unlock_notify_cb(void **apArg, int nArg){
    +  int i;
    +  for(i=0; i<nArg; i++){
    +    UnlockNotification *p = (UnlockNotification *)apArg[i];
    +    pthread_mutex_lock(&p->mutex);
    +    p->fired = 1;
    +    pthread_cond_signal(&p->cond);
    +    pthread_mutex_unlock(&p->mutex);
    +  }
    +}
    +
    +/*
    +** This function assumes that an SQLite API call (either sqlite3_prepare_v2() 
    +** or sqlite3_step()) has just returned SQLITE_LOCKED. The argument is the
    +** associated database connection.
    +**
    +** This function calls sqlite3_unlock_notify() to register for an 
    +** unlock-notify callback, then blocks until that callback is delivered 
    +** and returns SQLITE_OK. The caller should then retry the failed operation.
    +**
    +** Or, if sqlite3_unlock_notify() indicates that to block would deadlock 
    +** the system, then this function returns SQLITE_LOCKED immediately. In 
    +** this case the caller should not retry the operation and should roll 
    +** back the current transaction (if any).
    +*/
    +static int wait_for_unlock_notify(sqlite3 *db){
    +  int rc;
    +  UnlockNotification un;
    +
    +  /* Initialize the UnlockNotification structure. */
    +  un.fired = 0;
    +  pthread_mutex_init(&un.mutex, 0);
    +  pthread_cond_init(&un.cond, 0);
    +
    +  /* Register for an unlock-notify callback. */
    +  rc = sqlite3_unlock_notify(db, unlock_notify_cb, (void *)&un);
    +  assert( rc==SQLITE_LOCKED || rc==SQLITE_OK );
    +
    +  /* The call to sqlite3_unlock_notify() always returns either SQLITE_LOCKED 
    +  ** or SQLITE_OK. 
    +  **
    +  ** If SQLITE_LOCKED was returned, then the system is deadlocked. In this
    +  ** case this function needs to return SQLITE_LOCKED to the caller so 
    +  ** that the current transaction can be rolled back. Otherwise, block
    +  ** until the unlock-notify callback is invoked, then return SQLITE_OK.
    +  */
    +  if( rc==SQLITE_OK ){
    +    pthread_mutex_lock(&un.mutex);
    +    if( !un.fired ){
    +      pthread_cond_wait(&un.cond, &un.mutex);
    +    }
    +    pthread_mutex_unlock(&un.mutex);
    +  }
    +
    +  /* Destroy the mutex and condition variables. */
    +  pthread_cond_destroy(&un.cond);
    +  pthread_mutex_destroy(&un.mutex);
    +
    +  return rc;
    +}
    +
    +/*
    +** This function is a wrapper around the SQLite function sqlite3_step().
    +** It functions in the same way as step(), except that if a required
    +** shared-cache lock cannot be obtained, this function may block waiting for
    +** the lock to become available. In this scenario the normal API step()
    +** function always returns SQLITE_LOCKED.
    +**
    +** If this function returns SQLITE_LOCKED, the caller should rollback
    +** the current transaction (if any) and try again later. Otherwise, the
    +** system may become deadlocked.
    +*/
    +int sqlite3_blocking_step(sqlite3_stmt *pStmt){
    +  int rc;
    +  while( SQLITE_LOCKED==(rc = sqlite3_step(pStmt)) ){
    +    rc = wait_for_unlock_notify(sqlite3_db_handle(pStmt));
    +    if( rc!=SQLITE_OK ) break;
    +    sqlite3_reset(pStmt);
    +  }
    +  return rc;
    +}
    +
    +/*
    +** This function is a wrapper around the SQLite function sqlite3_prepare_v2().
    +** It functions in the same way as prepare_v2(), except that if a required
    +** shared-cache lock cannot be obtained, this function may block waiting for
    +** the lock to become available. In this scenario the normal API prepare_v2()
    +** function always returns SQLITE_LOCKED.
    +**
    +** If this function returns SQLITE_LOCKED, the caller should rollback
    +** the current transaction (if any) and try again later. Otherwise, the
    +** system may become deadlocked.
    +*/
    +int sqlite3_blocking_prepare_v2(
    +  sqlite3 *db,              /* Database handle. */
    +  const char *zSql,         /* UTF-8 encoded SQL statement. */
    +  int nSql,                 /* Length of zSql in bytes. */
    +  sqlite3_stmt **ppStmt,    /* OUT: A pointer to the prepared statement */
    +  const char **pz           /* OUT: End of parsed string */
    +){
    +  int rc;
    +  while( SQLITE_LOCKED==(rc = sqlite3_prepare_v2(db, zSql, nSql, ppStmt, pz)) ){
    +    rc = wait_for_unlock_notify(db);
    +    if( rc!=SQLITE_OK ) break;
    +  }
    +  return rc;
    +}
    +
    + + +

    + When two or more connections access the same database in shared-cache + mode, read and write (shared and exclusive) locks on individual tables + are used to ensure that concurrently executing transactions are kept + isolated. Before writing to a table, a write (exclusive) lock must be + obtained on that table. Before reading, a read (shared) lock must be + obtained. A connection releases all held table locks when it concludes + its transaction. If a connection cannot obtain a required lock, then + the call to sqlite3_step() returns SQLITE_LOCKED. + +

    + Although it is less common, a call to sqlite3_prepare() or + sqlite3_prepare_v2() may also return SQLITE_LOCKED if it cannot obtain + a read-lock on the sqlite_master table of each attached database. These + APIs need to read the schema data contained in the sqlite_master table + in order to compile SQL statements to sqlite3_stmt* objects. + +

    + This page presents a technique using the SQLite sqlite3_unlock_notify() + API to create a versions of sqlite3_step() and sqlite3_prepare_v2() that + block until the required locks are available instead of returning + SQLITE_LOCKED immediately, for use in multi-threaded applications. If the + sqlite3_blocking_step() or sqlite3_blocking_prepare_v2() functions presented + to the left return SQLITE_LOCKED, this indicates that to block would + deadlock the system. + +

    + The sqlite3_unlock_notify() API, which is only available if the library is + compiled with the pre-processor symbol SQLITE_ENABLE_UNLOCK_NOTIFY defined, + is documented here. This page is no substitute for + reading the full API documentation! + +

    + The sqlite3_unlock_notify() interface is designed for use in systems + that have a separate thread assigned to each database connection. There + is nothing in the implementation that prevents a single thread from running + multiple database connections. However, the sqlite3_unlock_notify() + interface only works on a single connection at a time, so the lock + resolution logic presented here will only work for a single + database connection per thread. + +

    The sqlite3_unlock_notify() API + +

    + After a call to sqlite3_step() or sqlite3_prepare_v2() returns + SQLITE_LOCKED, the sqlite3_unlock_notify() API may be invoked to register + for an unlock-notify callback. The unlock-notify callback is invoked by + SQLite after the database connection holding the table-lock that prevented + the call to sqlite3_step() or sqlite3_prepare_v2() from succeeding has + finished its transaction and released all locks. For example, if a call to + sqlite3_step() is an attempt to read from table X, and some other connection + Y is holding a write-lock on table X, then sqlite3_step() will return + SQLITE_LOCKED. If sqlite3_unlock_notify() is then called, the unlock-notify + callback will be invoked after connection Y's transaction is concluded. The + connection that the unlock-notify callback is waiting on, in this case + connection Y, is known as the "blocking connection". + +

    + If a call to sqlite3_step() that attempts to write to a database table + returns SQLITE_LOCKED, then more than one other connection may be holding + a read-lock on the database table in question. In this case SQLite simply + selects one of those other connections at random and issues the + unlock-notify callback when that connection's transaction is finished. + Whether the call to sqlite3_step() was blocked by one or many connections, + when the corresponding unlock-notify callback is issued it is not + guaranteed that the required lock is available, only that it may be. + +

    + When the unlock-notify callback is issued, it is issued from within a + call to sqlite3_step() (or sqlite3_close()) associated with the blocking + connection. It is illegal to invoke any sqlite3_XXX() API functions from + within an unlock-notify callback. The expected use is that the unlock-notify + callback will single some other waiting thread or schedule some action + to take place later. + +

    + The algorithm used by the sqlite3_blocking_step() function is as follows: + +

      +
    1. Call sqlite3_step() on the supplied statement handle. If the call + returns anything other than SQLITE_LOCKED, then return this value + to the caller. Otherwise, continue. + +

    2. Invoke sqlite3_unlock_notify() on the database connection handle + associated with the supplied statement handle to register for an + unlock-notify callback. If the call to unlock_notify() returns + SQLITE_LOCKED, then return this value to the caller. + +

    3. Block until the unlock-notify callback is invoked by another thread. + +

    4. Call sqlite3_reset() on the statement handle. Since an + SQLITE_LOCKED error may only occur on the first call to sqlite3_step() + (it is not possible for one call to sqlite3_step() to return + SQLITE_ROW and then the next SQLITE_LOCKED), the statement handle may + be reset at this point without affecting the results of the query + from the point of view of the caller. If sqlite3_reset() were not + called at this point, the next call to sqlite3_step() would return + SQLITE_MISUSE. + +

    5. Return to step 1. + +

      + The algorithm used by the sqlite3_blocking_prepare_v2() function is similar, + except that step 4 (reseting the statement handle) is omitted. + +

    + +

    Writer Starvation + +

    + Based on the description above, it could be concluded that if there are + sufficient database readers reading the same table often enough, it is + possible that the table will never become unlocked and that a connection + waiting for a write-lock on the table will wait indefinitely. This + phenomena is known as writer-starvation. + +

    + SQLite helps applications avoid this scenario. After any attempt to + obtain a write-lock on a table fails (because one or more other + connections are holding read-locks), all attempts to open new transactions + on the shared-cache fail until one of the following is true: + +

      +
    • The current writer concludes its transaction, OR +
    • The number of open read-transactions on the shared-cache drops to zero. +
    + +

    + Failed attempts to open new read-transactions return SQLITE_LOCKED to the + caller. If the caller then calls sqlite3_unlock_notify() to register for + an unlock-notify callback, the blocking connection is the connection that + currently has an open write-transaction on the shared-cache. This prevents + writer-starvation as, if no new read-transactions may be opened and + assuming all existing read-transactions are eventually concluded, the + writer will eventually have an opportunity to obtain the required + write-lock. + +

    The pthreads API + +

    By the time sqlite3_unlock_notify() is invoked by + wait_for_unlock_notify(), it is possible that the blocking connection + that prevented the sqlite3_step() or sqlite3_prepare_v2() call from + succeeding has already finished its transaction. In this case, the + unlock-notify callback is invoked immediately, before + sqlite3_unlock_notify() returns. Or, it is possible that the + unlock-notify callback is invoked by a second thread after + sqlite3_unlock_notify() is called but before the thread starts waiting + to be asynchronously signalled. + +

    Exactly how such a potential race-condition is handled depends on the + threads and synchronization primitives interface used by the application. + This example uses pthreads, the interface provided by modern UNIX-like + systems, including Linux. + +

    The pthreads interface provides the pthread_cond_wait() function. + This function allows the caller to simultaneously release a mutex + and start waiting for an asynchronous signal. Using this function, + a "fired" flag and a mutex, the race-condition described above may + be eliminated as follows: + +

    When the unlock-notify callback is invoked, which may be before the + thread that called sqlite3_unlock_notify() begins waiting for the + asynchronous signal, it does the following: + +

      +
    1. Obtains the mutex. +
    2. Sets the "fired" flag to true. +
    3. Attempts to signal a waiting thread. +
    4. Releases the mutex. +
    + +

    When the wait_for_unlock_notify() thread is ready to begin waiting for + the unlock-notify callback to arrive, it: + +

      +
    1. Obtains the mutex. +
    2. Checks if the "fired" flag has been set. If so, the unlock-notify + callback has already been invoked. Release the mutex and continue. +
    3. Atomicly releases the mutex and begins waiting for the + asynchronous signal. When the signal arrives, continue. +
    + +

    This way, it doesn't matter if the unlock-notify callback has already + been invoked, or is being invoked, when the wait_for_unlock_notify() + thread begins blocking. + +

    Possible Enhancements + +

    The code on this page could be improved in at least two ways: + +

      +
    • It could manage thread priorities. +
    • It could handle a special case of SQLITE_LOCKED that can occur + when dropping a table or index. +
    + +

    + Even though the sqlite3_unlock_notify() function only allows the caller + to specify a single user-context pointer, an unlock-notify callback + is passed an array of such context pointers. This is because if when + a blocking connection concludes its transaction, if there is more + than one unlock-notify registered to call the same C function, the + context-pointers are marshalled into an array and a single callback + issued. If each thread were assigned a priority, then instead of just + signalling the threads in arbitrary order as this implementation does, + higher priority threads could be signalled before lower priority threads. + +

    + If a "DROP TABLE" or "DROP INDEX" SQL command is executed, and the + associated database connection currently has one or more actively + executing SELECT statements, then SQLITE_LOCKED is returned. If + sqlite3_unlock_notify() is called in this case, then the specified + callback will be invoked immediately. Unless the other running SELECT + statements complete execution in the meantime, re-attempting the "DROP + TABLE" or "DROP INDEX" statement will return another SQLITE_LOCKED + error. In the implementation of sqlite3_blocking_step() shown to the + left, this could cause an infinite loop. + +

    + The caller could distinguish between this special "DROP TABLE|INDEX" + case and other cases by using extended error codes. When it is appropriate + to call sqlite3_unlock_notify(), the extended error code is + SQLITE_LOCKED_SHAREDCACHE. Otherwise, in the "DROP TABLE|INDEX" case, + it is just plain SQLITE_LOCKED. Another solution might be to limit + the number of times that any single query could be reattempted (to say + 100). Although this might be less efficient than one might wish, the + situation in question is not likely to occur often. + +

    +
    +This page last modified 2009/03/28 13:01:54 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/vdbe.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/vdbe.html --- sqlite3-3.4.2/www/vdbe.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/vdbe.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,1766 @@ + + +The Virtual Database Engine of SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    The Virtual Database Engine of SQLite

    + +
    +This document describes the virtual machine used in SQLite version 2.8.0. +The virtual machine in SQLite version 3.0 and 3.1 is very similar in +concept but many of the opcodes have changed and the algorithms are +somewhat different. Use this document as a rough guide to the idea +behind the virtual machine in SQLite version 3, not as a reference on +how the virtual machine works. +
    + +

    If you want to know how the SQLite library works internally, +you need to begin with a solid understanding of the Virtual Database +Engine or VDBE. The VDBE occurs right in the middle of the +processing stream (see the architecture diagram) +and so it seems to touch most parts of the library. Even +parts of the code that do not directly interact with the VDBE +are usually in a supporting role. The VDBE really is the heart of +SQLite.

    + +

    This article is a brief introduction to how the VDBE +works and in particular how the various VDBE instructions +(documented here) work together +to do useful things with the database. The style is tutorial, +beginning with simple tasks and working toward solving more +complex problems. Along the way we will visit most +submodules in the SQLite library. After completeing this tutorial, +you should have a pretty good understanding of how SQLite works +and will be ready to begin studying the actual source code.

    + +

    Preliminaries

    + +

    The VDBE implements a virtual computer that runs a program in +its virtual machine language. The goal of each program is to +interrogate or change the database. Toward this end, the machine +language that the VDBE implements is specifically designed to +search, read, and modify databases.

    + +

    Each instruction of the VDBE language contains an opcode and +three operands labeled P1, P2, and P3. Operand P1 is an arbitrary +integer. P2 is a non-negative integer. P3 is a pointer to a data +structure or null-terminated string, possibly null. Only a few VDBE +instructions use all three operands. Many instructions use only +one or two operands. A significant number of instructions use +no operands at all but instead take their data and store their results +on the execution stack. The details of what each instruction +does and which operands it uses are described in the separate +opcode description document.

    + +

    A VDBE program begins +execution on instruction 0 and continues with successive instructions +until it either (1) encounters a fatal error, (2) executes a +Halt instruction, or (3) advances the program counter past the +last instruction of the program. When the VDBE completes execution, +all open database cursors are closed, all memory is freed, and +everything is popped from the stack. +So there are never any worries about memory leaks or +undeallocated resources.

    + +

    If you have done any assembly language programming or have +worked with any kind of abstract machine before, all of these +details should be familiar to you. So let's jump right in and +start looking as some code.

    + + +

    Inserting Records Into The Database

    + +

    We begin with a problem that can be solved using a VDBE program +that is only a few instructions long. Suppose we have an SQL +table that was created like this:

    + +
    +CREATE TABLE examp(one text, two int);
    +
    + +

    In words, we have a database table named "examp" that has two +columns of data named "one" and "two". Now suppose we want to insert a single +record into this table. Like this:

    + +
    +INSERT INTO examp VALUES('Hello, World!',99);
    +
    + +

    We can see the VDBE program that SQLite uses to implement this +INSERT using the sqlite command-line utility. First start +up sqlite on a new, empty database, then create the table. +Next change the output format of sqlite to a form that +is designed to work with VDBE program dumps by entering the +".explain" command. +Finally, enter the [INSERT] statement shown above, but precede the +[INSERT] with the special keyword [EXPLAIN]. The [EXPLAIN] keyword +will cause sqlite to print the VDBE program rather than +execute it. We have:

    +
    sqlite test_database_1
    +sqlite> CREATE TABLE examp(one text, two int);
    +sqlite> .explain
    +sqlite> EXPLAIN INSERT INTO examp VALUES('Hello, World!',99);
    +addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     Transaction   0      0                                         
    +1     VerifyCookie  0      81                                        
    +2     Transaction   1      0                                         
    +3     Integer       0      0                                         
    +4     OpenWrite     0      3      examp                              
    +5     NewRecno      0      0                                         
    +6     String        0      0      Hello, World!                      
    +7     Integer       99     0      99                                 
    +8     MakeRecord    2      0                                         
    +9     PutIntKey     0      1                                         
    +10    Close         0      0                                         
    +11    Commit        0      0                                         
    +12    Halt          0      0

    As you can see above, our simple insert statement is +implemented in 12 instructions. The first 3 and last 2 instructions are +a standard prologue and epilogue, so the real work is done in the middle +7 instructions. There are no jumps, so the program executes once through +from top to bottom. Let's now look at each instruction in detail.

    +

    0     Transaction   0      0                                         
    +1     VerifyCookie  0      81                                        
    +2     Transaction   1      0
    +

    The instruction Transaction +begins a transaction. The transaction ends when a Commit or Rollback +opcode is encountered. P1 is the index of the database file on which +the transaction is started. Index 0 is the main database file. A write +lock is obtained on the database file when a transaction is started. +No other process can read or write the file while the transaction is +underway. Starting a transaction also creates a rollback journal. A +transaction must be started before any changes can be made to the +database.

    + +

    The instruction VerifyCookie +checks cookie 0 (the database schema version) to make sure it is equal +to P2 (the value obtained when the database schema was last read). +P1 is the database number (0 for the main database). This is done to +make sure the database schema hasn't been changed by another thread, in +which case it has to be reread.

    + +

    The second Transaction +instruction begins a transaction and starts a rollback journal for +database 1, the database used for temporary tables.

    +
    3     Integer       0      0                                    
    +4     OpenWrite     0      3      examp
    +

    The instruction Integer pushes +the integer value P1 (0) onto the stack. Here 0 is the number of the +database to use in the following OpenWrite instruction. If P3 is not +NULL then it is a string representation of the same integer. Afterwards +the stack looks like this:

    +
    (integer) 0
    +

    The instruction OpenWrite opens +a new read/write cursor with handle P1 (0 in this case) on table "examp", +whose root page is P2 (3, in this database file). Cursor handles can be +any non-negative integer. But the VDBE allocates cursors in an array +with the size of the array being one more than the largest cursor. So +to conserve memory, it is best to use handles beginning with zero and +working upward consecutively. Here P3 ("examp") is the name of the +table being opened, but this is unused, and only generated to make the +code easier to read. This instruction pops the database number to use +(0, the main database) from the top of the stack, so afterwards the +stack is empty again.

    +
    5     NewRecno      0      0
    +

    The instruction NewRecno creates +a new integer record number for the table pointed to by cursor P1. The +record number is one not currently used as a key in the table. The new +record number is pushed onto the stack. Afterwards the stack looks like +this:

    +
    (integer) new record key
    6     String        0      0      Hello, World!
    +

    The instruction String pushes its +P3 operand onto the stack. Afterwards the stack looks like this:

    +
    (string) "Hello, World!"
    (integer) new record key
    7     Integer       99     0      99
    +

    The instruction Integer pushes +its P1 operand (99) onto the stack. Afterwards the stack looks like +this:

    +
    (integer) 99
    (string) "Hello, World!"
    (integer) new record key
    8     MakeRecord    2      0
    +

    The instruction MakeRecord pops +the top P1 elements off the stack (2 in this case) and converts them into +the binary format used for storing records in a database file. +(See the file format description for +details.) The new record generated by the MakeRecord instruction is +pushed back onto the stack. Afterwards the stack looks like this:

    + +
    (record) "Hello, World!", 99
    (integer) new record key
    9     PutIntKey     0      1
    +

    The instruction PutIntKey uses +the top 2 stack entries to write an entry into the table pointed to by +cursor P1. A new entry is created if it doesn't already exist or the +data for an existing entry is overwritten. The record data is the top +stack entry, and the key is the next entry down. The stack is popped +twice by this instruction. Because operand P2 is 1 the row change count +is incremented and the rowid is stored for subsequent return by the +sqlite_last_insert_rowid() function. If P2 is 0 the row change count is +unmodified. This instruction is where the insert actually occurs.

    +
    10    Close         0      0
    +

    The instruction Close closes a +cursor previously opened as P1 (0, the only open cursor). If P1 is not +currently open, this instruction is a no-op.

    +
    11    Commit        0      0
    +

    The instruction Commit causes all +modifications to the database that have been made since the last +Transaction to actually take effect. No additional modifications are +allowed until another transaction is started. The Commit instruction +deletes the journal file and releases the write lock on the database. +A read lock continues to be held if there are still cursors open.

    +
    12    Halt          0      0
    +

    The instruction Halt causes the VDBE +engine to exit immediately. All open cursors, Lists, Sorts, etc are +closed automatically. P1 is the result code returned by sqlite_exec(). +For a normal halt, this should be SQLITE_OK (0). For errors, it can be +some other value. The operand P2 is only used when there is an error. +There is an implied "Halt 0 0 0" instruction at the end of every +program, which the VDBE appends when it prepares a program to run.

    + + + +

    Tracing VDBE Program Execution

    + +

    If the SQLite library is compiled without the NDEBUG preprocessor +macro, then the PRAGMA vdbe_trace + causes the VDBE to trace the execution of programs. Though this +feature was originally intended for testing and debugging, it can also +be useful in learning about how the VDBE operates. +Use "PRAGMA vdbe_trace=ON;" to turn tracing on and +"PRAGMA vdbe_trace=OFF" to turn tracing back off. +Like this:

    +
    sqlite> PRAGMA vdbe_trace=ON;
    +   0 Halt            0    0
    +sqlite> INSERT INTO examp VALUES('Hello, World!',99);
    +   0 Transaction     0    0
    +   1 VerifyCookie    0   81
    +   2 Transaction     1    0
    +   3 Integer         0    0
    +Stack: i:0
    +   4 OpenWrite       0    3 examp
    +   5 NewRecno        0    0
    +Stack: i:2
    +   6 String          0    0 Hello, World!
    +Stack: t[Hello,.World!] i:2
    +   7 Integer        99    0 99
    +Stack: si:99 t[Hello,.World!] i:2
    +   8 MakeRecord      2    0
    +Stack: s[...Hello,.World!.99] i:2
    +   9 PutIntKey       0    1
    +  10 Close           0    0
    +  11 Commit          0    0
    +  12 Halt            0    0
    +

    With tracing mode on, the VDBE prints each instruction prior +to executing it. After the instruction is executed, the top few +entries in the stack are displayed. The stack display is omitted +if the stack is empty.

    + +

    On the stack display, most entries are shown with a prefix +that tells the datatype of that stack entry. Integers begin +with "i:". Floating point values begin with "r:". +(The "r" stands for "real-number".) Strings begin with either +"s:", "t:", "e:" or "z:". +The difference among the string prefixes is caused by how their +memory is allocated. The z: strings are stored in memory obtained +from malloc(). The t: strings are statically allocated. +The e: strings are ephemeral. All other strings have the s: prefix. +This doesn't make any difference to you, +the observer, but it is vitally important to the VDBE since the +z: strings need to be passed to free() when they are +popped to avoid a memory leak. Note that only the first 10 +characters of string values are displayed and that binary +values (such as the result of the MakeRecord instruction) are +treated as strings. The only other datatype that can be stored +on the VDBE stack is a NULL, which is display without prefix +as simply "NULL". If an integer has been placed on the +stack as both an integer and a string, its prefix is "si:". + + + +

    Simple Queries

    + +

    At this point, you should understand the basics of how the VDBE +writes to a database. Now let's look at how it does queries. +We will use the following simple SELECT statement as our example:

    + +
    +SELECT * FROM examp;
    +
    + +

    The VDBE program generated for this SQL statement is as follows:

    +
    sqlite> EXPLAIN SELECT * FROM examp;
    +addr  opcode        p1     p2     p3                                 
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      one                                
    +1     ColumnName    1      0      two                                
    +2     Integer       0      0                                         
    +3     OpenRead      0      3      examp                              
    +4     VerifyCookie  0      81                                        
    +5     Rewind        0      10                                        
    +6     Column        0      0                                         
    +7     Column        0      1                                         
    +8     Callback      2      0                                         
    +9     Next          0      6                                         
    +10    Close         0      0                                         
    +11    Halt          0      0
    +

    Before we begin looking at this problem, let's briefly review +how queries work in SQLite so that we will know what we are trying +to accomplish. For each row in the result of a query, +SQLite will invoke a callback function with the following +prototype:

    + +
    +int Callback(void *pUserData, int nColumn, char *azData[], char *azColumnName[]);
    +
    + +

    The SQLite library supplies the VDBE with a pointer to the callback function +and the pUserData pointer. (Both the callback and the user data were +originally passed in as arguments to the sqlite_exec() API function.) +The job of the VDBE is to +come up with values for nColumn, azData[], +and azColumnName[]. +nColumn is the number of columns in the results, of course. +azColumnName[] is an array of strings where each string is the name +of one of the result columns. azData[] is an array of strings holding +the actual data.

    +
    0     ColumnName    0      0      one                                
    +1     ColumnName    1      0      two
    +

    The first two instructions in the VDBE program for our query are +concerned with setting up values for azColumn. +The ColumnName instructions tell +the VDBE what values to fill in for each element of the azColumnName[] +array. Every query will begin with one ColumnName instruction for each +column in the result, and there will be a matching Column instruction for +each one later in the query. +

    +
    2     Integer       0      0                                         
    +3     OpenRead      0      3      examp                              
    +4     VerifyCookie  0      81
    +

    Instructions 2 and 3 open a read cursor on the database table that is +to be queried. This works the same as the OpenWrite instruction in the +INSERT example except that the cursor is opened for reading this time +instead of for writing. Instruction 4 verifies the database schema as +in the INSERT example.

    +
    5     Rewind        0      10
    +

    The Rewind instruction initializes +a loop that iterates over the "examp" table. It rewinds the cursor P1 +to the first entry in its table. This is required by the the Column and +Next instructions, which use the cursor to iterate through the table. +If the table is empty, then jump to P2 (10), which is the instruction just +past the loop. If the table is not empty, fall through to the following +instruction at 6, which is the beginning of the loop body.

    +
    6     Column        0      0                                         
    +7     Column        0      1                                         
    +8     Callback      2      0
    +

    The instructions 6 through 8 form the body of the loop that will +execute once for each record in the database file. + +The Column instructions at addresses 6 +and 7 each take the P2-th column from the P1-th cursor and push it onto +the stack. In this example, the first Column instruction is pushing the +value for the column "one" onto the stack and the second Column +instruction is pushing the value for column "two". + +The Callback instruction at address 8 +invokes the callback() function. The P1 operand to Callback becomes the +value for nColumn. The Callback instruction pops P1 values from +the stack and uses them to fill the azData[] array.

    +
    9     Next          0      6
    +

    The instruction at address 9 implements the branching part of the +loop. Together with the Rewind at address 5 it forms the loop logic. +This is a key concept that you should pay close attention to. +The Next instruction advances the cursor +P1 to the next record. If the cursor advance was successful, then jump +immediately to P2 (6, the beginning of the loop body). If the cursor +was at the end, then fall through to the following instruction, which +ends the loop.

    +
    10    Close         0      0                                         
    +11    Halt          0      0
    +

    The Close instruction at the end of the program closes the +cursor that points into the table "examp". It is not really necessary +to call Close here since all cursors will be automatically closed +by the VDBE when the program halts. But we needed an instruction +for the Rewind to jump to so we might as well go ahead and have that +instruction do something useful. +The Halt instruction ends the VDBE program.

    + +

    Note that the program for this SELECT query didn't contain the +Transaction and Commit instructions used in the INSERT example. Because +the SELECT is a read operation that doesn't alter the database, it +doesn't require a transaction.

    + + +

    A Slightly More Complex Query

    + +

    The key points of the previous example were the use of the Callback +instruction to invoke the callback function, and the use of the Next +instruction to implement a loop over all records of the database file. +This example attempts to drive home those ideas by demonstrating a +slightly more complex query that involves more columns of +output, some of which are computed values, and a WHERE clause that +limits which records actually make it to the callback function. +Consider this query:

    + +
    +SELECT one, two, one || two AS 'both'
    +FROM examp
    +WHERE one LIKE 'H%'
    +
    + +

    This query is perhaps a bit contrived, but it does serve to +illustrate our points. The result will have three column with +names "one", "two", and "both". The first two columns are direct +copies of the two columns in the table and the third result +column is a string formed by concatenating the first and +second columns of the table. +Finally, the +WHERE clause says that we will only chose rows for the +results where the "one" column begins with an "H". +Here is what the VDBE program looks like for this query:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      one
    +1     ColumnName    1      0      two
    +2     ColumnName    2      0      both
    +3     Integer       0      0
    +4     OpenRead      0      3      examp
    +5     VerifyCookie  0      81
    +6     Rewind        0      18
    +7     String        0      0      H%                                      
    +8     Column        0      0
    +9     Function      2      0      ptr(0x7f1ac0)
    +10    IfNot         1      17
    +11    Column        0      0
    +12    Column        0      1
    +13    Column        0      0
    +14    Column        0      1
    +15    Concat        2      0
    +16    Callback      3      0
    +17    Next          0      7
    +18    Close         0      0
    +19    Halt          0      0
    +

    Except for the WHERE clause, the structure of the program for +this example is very much like the prior example, just with an +extra column. There are now 3 columns, instead of 2 as before, +and there are three ColumnName instructions. +A cursor is opened using the OpenRead instruction, just like in the +prior example. The Rewind instruction at address 6 and the +Next at address 17 form a loop over all records of the table. +The Close instruction at the end is there to give the +Rewind instruction something to jump to when it is done. All of +this is just like in the first query demonstration.

    + +

    The Callback instruction in this example has to generate +data for three result columns instead of two, but is otherwise +the same as in the first query. When the Callback instruction +is invoked, the left-most column of the result should be +the lowest in the stack and the right-most result column should +be the top of the stack. We can see the stack being set up +this way at addresses 11 through 15. The Column instructions at +11 and 12 push the values for the first two columns in the result. +The two Column instructions at 13 and 14 pull in the values needed +to compute the third result column and the Concat instruction at +15 joins them together into a single entry on the stack.

    + +

    The only thing that is really new about the current example +is the WHERE clause which is implemented by instructions at +addresses 7 through 10. Instructions at address 7 and 8 push +onto the stack the value of the "one" column from the table +and the literal string "H%". +The Function instruction at address 9 +pops these two values from the stack and pushes the result of the LIKE() +function back onto the stack. +The IfNot instruction pops the top stack +value and causes an immediate jump forward to the Next instruction if the +top value was false (not not like the literal string "H%"). +Taking this jump effectively skips the callback, which is the whole point +of the WHERE clause. If the result +of the comparison is true, the jump is not taken and control +falls through to the Callback instruction below.

    + +

    Notice how the LIKE operator is implemented. It is a user-defined +function in SQLite, so the address of its function definition is +specified in P3. The operand P1 is the number of function arguments for +it to take from the stack. In this case the LIKE() function takes 2 +arguments. The arguments are taken off the stack in reverse order +(right-to-left), so the pattern to match is the top stack element, and +the next element is the data to compare. The return value is pushed +onto the stack.

    + + + +

    A Template For SELECT Programs

    + +

    The first two query examples illustrate a kind of template that +every SELECT program will follow. Basically, we have:

    + +

    +

      +
    1. Initialize the azColumnName[] array for the callback.
    2. +
    3. Open a cursor into the table to be queried.
    4. +
    5. For each record in the table, do: +
        +
      1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
      2. +
      3. Compute all columns for the current row of the result.
      4. +
      5. Invoke the callback function for the current row of the result.
      6. +
      +
    6. Close the cursor.
    7. +
    +

    + +

    This template will be expanded considerably as we consider +additional complications such as joins, compound selects, using +indices to speed the search, sorting, and aggregate functions +with and without GROUP BY and HAVING clauses. +But the same basic ideas will continue to apply.

    + +

    UPDATE And DELETE Statements

    + +

    The UPDATE and DELETE statements are coded using a template +that is very similar to the SELECT statement template. The main +difference, of course, is that the end action is to modify the +database rather than invoke a callback function. Because it modifies +the database it will also use transactions. Let's begin +by looking at a DELETE statement:

    + +
    +DELETE FROM examp WHERE two<50;
    +
    + +

    This DELETE statement will remove every record from the "examp" +table where the "two" column is less than 50. +The code generated to do this is as follows:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     Transaction   1      0
    +1     Transaction   0      0
    +2     VerifyCookie  0      178
    +3     Integer       0      0
    +4     OpenRead      0      3      examp
    +5     Rewind        0      12
    +6     Column        0      1
    +7     Integer       50     0      50
    +8     Ge            1      11
    +9     Recno         0      0
    +10    ListWrite     0      0
    +11    Next          0      6
    +12    Close         0      0
    +13    ListRewind    0      0
    +14    Integer       0      0
    +15    OpenWrite     0      3
    +16    ListRead      0      20
    +17    NotExists     0      19
    +18    Delete        0      1
    +19    Goto          0      16
    +20    ListReset     0      0
    +21    Close         0      0
    +22    Commit        0      0
    +23    Halt          0      0
    +

    Here is what the program must do. First it has to locate all of +the records in the table "examp" that are to be deleted. This is +done using a loop very much like the loop used in the SELECT examples +above. Once all records have been located, then we can go back through +and delete them one by one. Note that we cannot delete each record +as soon as we find it. We have to locate all records first, then +go back and delete them. This is because the SQLite database +backend might change the scan order after a delete operation. +And if the scan +order changes in the middle of the scan, some records might be +visited more than once and other records might not be visited at all.

    + +

    So the implemention of DELETE is really in two loops. The first loop +(instructions 5 through 11) locates the records that are to be deleted +and saves their keys onto a temporary list, and the second loop +(instructions 16 through 19) uses the key list to delete the records one +by one.

    +
    0     Transaction   1      0
    +1     Transaction   0      0
    +2     VerifyCookie  0      178
    +3     Integer       0      0
    +4     OpenRead      0      3      examp
    +

    Instructions 0 though 4 are as in the INSERT example. They start +transactions for the main and temporary databases, verify the database +schema for the main database, and open a read cursor on the table +"examp". Notice that the cursor is opened for reading, not writing. At +this stage of the program we are only going to be scanning the table, +not changing it. We will reopen the same table for writing later, at +instruction 15.

    +
    5     Rewind        0      12
    +

    As in the SELECT example, the Rewind +instruction rewinds the cursor to the beginning of the table, readying +it for use in the loop body.

    +
    6     Column        0      1
    +7     Integer       50     0      50
    +8     Ge            1      11
    +

    The WHERE clause is implemented by instructions 6 through 8. +The job of the where clause is to skip the ListWrite if the WHERE +condition is false. To this end, it jumps ahead to the Next instruction +if the "two" column (extracted by the Column instruction) is +greater than or equal to 50.

    + +

    As before, the Column instruction uses cursor P1 and pushes the data +record in column P2 (1, column "two") onto the stack. The Integer +instruction pushes the value 50 onto the top of the stack. After these +two instructions the stack looks like:

    +
    (integer) 50
    (record) current record for column "two"
    +

    The Ge operator compares the top two +elements on the stack, pops them, and then branches based on the result +of the comparison. If the second element is >= the top element, then +jump to address P2 (the Next instruction at the end of the loop). +Because P1 is true, if either operand is NULL (and thus the result is +NULL) then take the jump. If we don't jump, just advance to the next +instruction.

    +
    9     Recno         0      0
    +10    ListWrite     0      0
    +

    The Recno instruction pushes onto the +stack an integer which is the first 4 bytes of the the key to the current +entry in a sequential scan of the table pointed to by cursor P1. +The ListWrite instruction writes the +integer on the top of the stack into a temporary storage list and pops +the top element. This is the important work of this loop, to store the +keys of the records to be deleted so we can delete them in the second +loop. After this ListWrite instruction the stack is empty again.

    +
    11    Next          0      6
    +12    Close         0      0
    +

    The Next instruction increments the cursor to point to the next +element in the table pointed to by cursor P0, and if it was successful +branches to P2 (6, the beginning of the loop body). The Close +instruction closes cursor P1. It doesn't affect the temporary storage +list because it isn't associated with cursor P1; it is instead a global +working list (which can be saved with ListPush).

    +
    13    ListRewind    0      0
    +

    The ListRewind instruction +rewinds the temporary storage list to the beginning. This prepares it +for use in the second loop.

    +
    14    Integer       0      0
    +15    OpenWrite     0      3
    +

    As in the INSERT example, we push the database number P1 (0, the main +database) onto the stack and use OpenWrite to open the cursor P1 on table +P2 (base page 3, "examp") for modification.

    +
    16    ListRead      0      20
    +17    NotExists     0      19
    +18    Delete        0      1
    +19    Goto          0      16
    +

    This loop does the actual deleting. It is organized differently from +the one in the UPDATE example. The ListRead instruction plays the role +that the Next did in the INSERT loop, but because it jumps to P2 on +failure, and Next jumps on success, we put it at the start of the loop +instead of the end. This means that we have to put a Goto at the end of +the loop to jump back to the the loop test at the beginning. So this +loop has the form of a C while(){...} loop, while the loop in the INSERT +example had the form of a do{...}while() loop. The Delete instruction +fills the role that the callback function did in the preceding examples. +

    +

    The ListRead instruction reads an +element from the temporary storage list and pushes it onto the stack. +If this was successful, it continues to the next instruction. If this +fails because the list is empty, it branches to P2, which is the +instruction just after the loop. Afterwards the stack looks like:

    +
    (integer) key for current record
    +

    Notice the similarity between the ListRead and Next instructions. +Both operations work according to this rule: +

    +
    +Push the next "thing" onto the stack and fall through OR jump to P2, +depending on whether or not there is a next "thing" to push. +
    +

    One difference between Next and ListRead is their idea of a "thing". +The "things" for the Next instruction are records in a database file. +"Things" for ListRead are integer keys in a list. Another difference +is whether to jump or fall through if there is no next "thing". In this +case, Next falls through, and ListRead jumps. Later on, we will see +other looping instructions (NextIdx and SortNext) that operate using the +same principle.

    + +

    The NotExists instruction pops +the top stack element and uses it as an integer key. If a record with +that key does not exist in table P1, then jump to P2. If a record does +exist, then fall thru to the next instruction. In this case P2 takes +us to the Goto at the end of the loop, which jumps back to the ListRead +at the beginning. This could have been coded to have P2 be 16, the +ListRead at the start of the loop, but the SQLite parser which generated +this code didn't make that optimization.

    +

    The Delete does the work of this +loop; it pops an integer key off the stack (placed there by the +preceding ListRead) and deletes the record of cursor P1 that has that key. +Because P2 is true, the row change counter is incremented.

    +

    The Goto jumps back to the beginning +of the loop. This is the end of the loop.

    +
    20    ListReset     0      0
    +21    Close         0      0
    +22    Commit        0      0
    +23    Halt          0      0
    +

    This block of instruction cleans up the VDBE program. Three of these +instructions aren't really required, but are generated by the SQLite +parser from its code templates, which are designed to handle more +complicated cases.

    +

    The ListReset instruction empties +the temporary storage list. This list is emptied automatically when the +VDBE program terminates, so it isn't necessary in this case. The Close +instruction closes the cursor P1. Again, this is done by the VDBE +engine when it is finished running this program. The Commit ends the +current transaction successfully, and causes all changes that occurred +in this transaction to be saved to the database. The final Halt is also +unneccessary, since it is added to every VDBE program when it is +prepared to run.

    + + +

    UPDATE statements work very much like DELETE statements except +that instead of deleting the record they replace it with a new one. +Consider this example: +

    + +
    +UPDATE examp SET one= '(' || one || ')' WHERE two < 50;
    +
    + +

    Instead of deleting records where the "two" column is less than +50, this statement just puts the "one" column in parentheses +The VDBE program to implement this statement follows:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     Transaction   1      0                                         
    +1     Transaction   0      0                                         
    +2     VerifyCookie  0      178                                            
    +3     Integer       0      0                                         
    +4     OpenRead      0      3      examp                              
    +5     Rewind        0      12                                        
    +6     Column        0      1                                         
    +7     Integer       50     0      50                                 
    +8     Ge            1      11                                        
    +9     Recno         0      0                                         
    +10    ListWrite     0      0                                         
    +11    Next          0      6                                              
    +12    Close         0      0                                         
    +13    Integer       0      0                                         
    +14    OpenWrite     0      3                                              
    +15    ListRewind    0      0                                         
    +16    ListRead      0      28                                             
    +17    Dup           0      0                                         
    +18    NotExists     0      16                                             
    +19    String        0      0      (                                  
    +20    Column        0      0                                         
    +21    Concat        2      0                                         
    +22    String        0      0      )                                  
    +23    Concat        2      0                                         
    +24    Column        0      1                                         
    +25    MakeRecord    2      0                                         
    +26    PutIntKey     0      1                                         
    +27    Goto          0      16                                             
    +28    ListReset     0      0                                         
    +29    Close         0      0                                         
    +30    Commit        0      0                                         
    +31    Halt          0      0
    +

    This program is essentially the same as the DELETE program except +that the body of the second loop has been replace by a sequence of +instructions (at addresses 17 through 26) that update the record rather +than delete it. Most of this instruction sequence should already be +familiar to you, but there are a couple of minor twists so we will go +over it briefly. Also note that the order of some of the instructions +before and after the 2nd loop has changed. This is just the way the +SQLite parser chose to output the code using a different template.

    + +

    As we enter the interior of the second loop (at instruction 17) +the stack contains a single integer which is the key of the +record we want to modify. We are going to need to use this +key twice: once to fetch the old value of the record and +a second time to write back the revised record. So the first instruction +is a Dup to make a duplicate of the key on the top of the stack. The +Dup instruction will duplicate any element of the stack, not just the top +element. You specify which element to duplication using the +P1 operand. When P1 is 0, the top of the stack is duplicated. +When P1 is 1, the next element down on the stack duplication. +And so forth.

    + +

    After duplicating the key, the next instruction, NotExists, +pops the stack once and uses the value popped as a key to +check the existence of a record in the database file. If there is no record +for this key, it jumps back to the ListRead to get another key.

    + +

    Instructions 19 through 25 construct a new database record +that will be used to replace the existing record. This is +the same kind of code that we saw +in the description of INSERT and will not be described further. +After instruction 25 executes, the stack looks like this:

    +
    (record) new data record
    (integer) key
    +

    The PutIntKey instruction (also described +during the discussion about INSERT) writes an entry into the +database file whose data is the top of the stack and whose key +is the next on the stack, and then pops the stack twice. The +PutIntKey instruction will overwrite the data of an existing record +with the same key, which is what we want here. Overwriting was not +an issue with INSERT because with INSERT the key was generated +by the NewRecno instruction which is guaranteed to provide a key +that has not been used before.

    + +

    CREATE and DROP

    + +

    Using CREATE or DROP to create or destroy a table or index is +really the same as doing an INSERT or DELETE from the special +"sqlite_master" table, at least from the point of view of the VDBE. +The sqlite_master table is a special table that is automatically +created for every SQLite database. It looks like this:

    + +
    +CREATE TABLE sqlite_master (
    +  type      TEXT,    -- either "table" or "index"
    +  name      TEXT,    -- name of this table or index
    +  tbl_name  TEXT,    -- for indices: name of associated table
    +  sql       TEXT     -- SQL text of the original CREATE statement
    +)
    +
    + +

    Every table (except the "sqlite_master" table itself) +and every named index in an SQLite database has an entry +in the sqlite_master table. You can query this table using +a SELECT statement just like any other table. But you are +not allowed to directly change the table using UPDATE, INSERT, +or DELETE. Changes to sqlite_master have to occur using +the CREATE and DROP commands because SQLite also has to update +some of its internal data structures when tables and indices +are added or destroyed.

    + +

    But from the point of view of the VDBE, a CREATE works +pretty much like an INSERT and a DROP works like a DELETE. +When the SQLite library opens to an existing database, +the first thing it does is a SELECT to read the "sql" +columns from all entries of the sqlite_master table. +The "sql" column contains the complete SQL text of the +CREATE statement that originally generated the index or +table. This text is fed back into the SQLite parser +and used to reconstruct the +internal data structures describing the index or table.

    + +

    Using Indexes To Speed Searching

    + +

    In the example queries above, every row of the table being +queried must be loaded off of the disk and examined, even if only +a small percentage of the rows end up in the result. This can +take a long time on a big table. To speed things up, SQLite +can use an index.

    + +

    An SQLite file associates a key with some data. For an SQLite +table, the database file is set up so that the key is an integer +and the data is the information for one row of the table. +Indices in SQLite reverse this arrangement. The index key +is (some of) the information being stored and the index data +is an integer. +To access a table row that has some particular +content, we first look up the content in the index table to find +its integer index, then we use that integer to look up the +complete record in the table.

    + +

    Note that SQLite uses b-trees, which are a sorted data structure, +so indices can be used when the WHERE clause of the SELECT statement +contains tests for equality or inequality. Queries like the following +can use an index if it is available:

    + +
    +SELECT * FROM examp WHERE two==50;
    +SELECT * FROM examp WHERE two<50;
    +SELECT * FROM examp WHERE two IN (50, 100);
    +
    + +

    If there exists an index that maps the "two" column of the "examp" +table into integers, then SQLite will use that index to find the integer +keys of all rows in examp that have a value of 50 for column two, or +all rows that are less than 50, etc. +But the following queries cannot use the index:

    + +
    +SELECT * FROM examp WHERE two%50 == 10;
    +SELECT * FROM examp WHERE two&127 == 3;
    +
    + +

    Note that the SQLite parser will not always generate code to use an +index, even if it is possible to do so. The following queries will not +currently use the index:

    + +
    +SELECT * FROM examp WHERE two+10 == 50;
    +SELECT * FROM examp WHERE two==50 OR two==100;
    +
    + +

    To understand better how indices work, lets first look at how +they are created. Let's go ahead and put an index on the two +column of the examp table. We have:

    + +
    +CREATE INDEX examp_idx1 ON examp(two);
    +
    + +

    The VDBE code generated by the above statement looks like the +following:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     Transaction   1      0                                         
    +1     Transaction   0      0                                         
    +2     VerifyCookie  0      178                                            
    +3     Integer       0      0                                         
    +4     OpenWrite     0      2                                         
    +5     NewRecno      0      0                                         
    +6     String        0      0      index                              
    +7     String        0      0      examp_idx1                         
    +8     String        0      0      examp                              
    +9     CreateIndex   0      0      ptr(0x791380)                      
    +10    Dup           0      0                                         
    +11    Integer       0      0                                         
    +12    OpenWrite     1      0                                         
    +13    String        0      0      CREATE INDEX examp_idx1 ON examp(tw
    +14    MakeRecord    5      0                                         
    +15    PutIntKey     0      0                                         
    +16    Integer       0      0                                         
    +17    OpenRead      2      3      examp                              
    +18    Rewind        2      24                                             
    +19    Recno         2      0                                         
    +20    Column        2      1                                         
    +21    MakeIdxKey    1      0      n                                  
    +22    IdxPut        1      0      indexed columns are not unique     
    +23    Next          2      19                                             
    +24    Close         2      0                                         
    +25    Close         1      0                                         
    +26    Integer       333    0                                         
    +27    SetCookie     0      0                                         
    +28    Close         0      0                                         
    +29    Commit        0      0                                         
    +30    Halt          0      0
    +

    Remember that every table (except sqlite_master) and every named +index has an entry in the sqlite_master table. Since we are creating +a new index, we have to add a new entry to sqlite_master. This is +handled by instructions 3 through 15. Adding an entry to sqlite_master +works just like any other INSERT statement so we will not say anymore +about it here. In this example, we want to focus on populating the +new index with valid data, which happens on instructions 16 through +23.

    +
    16    Integer       0      0                                         
    +17    OpenRead      2      3      examp
    +

    The first thing that happens is that we open the table being +indexed for reading. In order to construct an index for a table, +we have to know what is in that table. The index has already been +opened for writing using cursor 0 by instructions 3 and 4.

    +
    18    Rewind        2      24                                             
    +19    Recno         2      0                                         
    +20    Column        2      1                                         
    +21    MakeIdxKey    1      0      n                                  
    +22    IdxPut        1      0      indexed columns are not unique     
    +23    Next          2      19
    +

    Instructions 18 through 23 implement a loop over every row of the +table being indexed. For each table row, we first extract the integer +key for that row using Recno in instruction 19, then get the value of +the "two" column using Column in instruction 20. +The MakeIdxKey instruction at 21 +converts data from the "two" column (which is on the top of the stack) +into a valid index key. For an index on a single column, this is +basically a no-op. But if the P1 operand to MakeIdxKey had been +greater than one multiple entries would have been popped from the stack +and converted into a single index key. +The IdxPut instruction at 22 is what +actually creates the index entry. IdxPut pops two elements from the +stack. The top of the stack is used as a key to fetch an entry from the +index table. Then the integer which was second on stack is added to the +set of integers for that index and the new record is written back to the +database file. Note +that the same index entry can store multiple integers if there +are two or more table entries with the same value for the two +column. +

    + +

    Now let's look at how this index will be used. Consider the +following query:

    + +
    +SELECT * FROM examp WHERE two==50;
    +
    + +

    SQLite generates the following VDBE code to handle this query:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      one                                
    +1     ColumnName    1      0      two                                
    +2     Integer       0      0                                         
    +3     OpenRead      0      3      examp                              
    +4     VerifyCookie  0      256                                            
    +5     Integer       0      0                                         
    +6     OpenRead      1      4      examp_idx1                         
    +7     Integer       50     0      50                            
    +8     MakeKey       1      0      n                                  
    +9     MemStore      0      0                                         
    +10    MoveTo        1      19                                             
    +11    MemLoad       0      0                                         
    +12    IdxGT         1      19                                             
    +13    IdxRecno      1      0                                         
    +14    MoveTo        0      0                                         
    +15    Column        0      0                                         
    +16    Column        0      1                                         
    +17    Callback      2      0                                         
    +18    Next          1      11                                        
    +19    Close         0      0                                         
    +20    Close         1      0                                         
    +21    Halt          0      0
    +

    The SELECT begins in a familiar fashion. First the column +names are initialized and the table being queried is opened. +Things become different beginning with instructions 5 and 6 where +the index file is also opened. Instructions 7 and 8 make +a key with the value of 50. +The MemStore instruction at 9 stores +the index key in VDBE memory location 0. The VDBE memory is used to +avoid having to fetch a value from deep in the stack, which can be done, +but makes the program harder to generate. The following instruction +MoveTo at address 10 pops the key off +the stack and moves the index cursor to the first row of the index with +that key. This initializes the cursor for use in the following loop.

    + +

    Instructions 11 through 18 implement a loop over all index records +with the key that was fetched by instruction 8. All of the index +records with this key will be contiguous in the index table, so we walk +through them and fetch the corresponding table key from the index. +This table key is then used to move the cursor to that row in the table. +The rest of the loop is the same as the loop for the non-indexed SELECT +query.

    + +

    The loop begins with the MemLoad +instruction at 11 which pushes a copy of the index key back onto the +stack. The instruction IdxGT at 12 +compares the key to the key in the current index record pointed to by +cursor P1. If the index key at the current cursor location is greater +than the the index we are looking for, then jump out of the loop.

    + +

    The instruction IdxRecno at 13 +pushes onto the stack the table record number from the index. The +following MoveTo pops it and moves the table cursor to that row. The +next 3 instructions select the column data the same way as in the non- +indexed case. The Column instructions fetch the column data and the +callback function is invoked. The final Next instruction advances the +index cursor, not the table cursor, to the next row, and then branches +back to the start of the loop if there are any index records left.

    + +

    Since the index is used to look up values in the table, +it is important that the index and table be kept consistent. +Now that there is an index on the examp table, we will have +to update that index whenever data is inserted, deleted, or +changed in the examp table. Remember the first example above +where we were able to insert a new row into the "examp" table using +12 VDBE instructions. Now that this table is indexed, 19 +instructions are required. The SQL statement is this:

    + +
    +INSERT INTO examp VALUES('Hello, World!',99);
    +
    + +

    And the generated code looks like this:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     Transaction   1      0                                         
    +1     Transaction   0      0                                         
    +2     VerifyCookie  0      256                                            
    +3     Integer       0      0                                         
    +4     OpenWrite     0      3      examp                              
    +5     Integer       0      0                                         
    +6     OpenWrite     1      4      examp_idx1                         
    +7     NewRecno      0      0                                         
    +8     String        0      0      Hello, World!                      
    +9     Integer       99     0      99                                 
    +10    Dup           2      1                                         
    +11    Dup           1      1                                         
    +12    MakeIdxKey    1      0      n                                  
    +13    IdxPut        1      0                                         
    +14    MakeRecord    2      0                                         
    +15    PutIntKey     0      1                                         
    +16    Close         0      0                                         
    +17    Close         1      0                                         
    +18    Commit        0      0                                         
    +19    Halt          0      0
    +

    At this point, you should understand the VDBE well enough to +figure out on your own how the above program works. So we will +not discuss it further in this text.

    + +

    Joins

    + +

    In a join, two or more tables are combined to generate a single +result. The result table consists of every possible combination +of rows from the tables being joined. The easiest and most natural +way to implement this is with nested loops.

    + +

    Recall the query template discussed above where there was a +single loop that searched through every record of the table. +In a join we have basically the same thing except that there +are nested loops. For example, to join two tables, the query +template might look something like this:

    + +

    +

      +
    1. Initialize the azColumnName[] array for the callback.
    2. +
    3. Open two cursors, one to each of the two tables being queried.
    4. +
    5. For each record in the first table, do: +
        +
      1. For each record in the second table do: +
          +
        1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
        2. +
        3. Compute all columns for the current row of the result.
        4. +
        5. Invoke the callback function for the current row of the result.
        6. +
      2. +
      +
    6. Close both cursors.
    7. +
    +

    + +

    This template will work, but it is likely to be slow since we +are now dealing with an O(N2) loop. But it often works +out that the WHERE clause can be factored into terms and that one or +more of those terms will involve only columns in the first table. +When this happens, we can factor part of the WHERE clause test out of +the inner loop and gain a lot of efficiency. So a better template +would be something like this:

    + +

    +

      +
    1. Initialize the azColumnName[] array for the callback.
    2. +
    3. Open two cursors, one to each of the two tables being queried.
    4. +
    5. For each record in the first table, do: +
        +
      1. Evaluate terms of the WHERE clause that only involve columns from + the first table. If any term is false (meaning that the whole + WHERE clause must be false) then skip the rest of this loop and + continue to the next record.
      2. +
      3. For each record in the second table do: +
          +
        1. If the WHERE clause evaluates to FALSE, then skip the steps that + follow and continue to the next record.
        2. +
        3. Compute all columns for the current row of the result.
        4. +
        5. Invoke the callback function for the current row of the result.
        6. +
      4. +
      +
    6. Close both cursors.
    7. +
    +

    + +

    Additional speed-up can occur if an index can be used to speed +the search of either or the two loops.

    + +

    SQLite always constructs the loops in the same order as the +tables appear in the FROM clause of the SELECT statement. The +left-most table becomes the outer loop and the right-most table +becomes the inner loop. It is possible, in theory, to reorder +the loops in some circumstances to speed the evaluation of the +join. But SQLite does not attempt this optimization.

    + +

    You can see how SQLite constructs nested loops in the following +example:

    + +
    +CREATE TABLE examp2(three int, four int);
    +SELECT * FROM examp, examp2 WHERE two<50 AND four==two;
    +
    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      examp.one                          
    +1     ColumnName    1      0      examp.two                          
    +2     ColumnName    2      0      examp2.three                       
    +3     ColumnName    3      0      examp2.four                        
    +4     Integer       0      0                                         
    +5     OpenRead      0      3      examp                              
    +6     VerifyCookie  0      909                                            
    +7     Integer       0      0                                         
    +8     OpenRead      1      5      examp2                             
    +9     Rewind        0      24                                             
    +10    Column        0      1                                         
    +11    Integer       50     0      50                                 
    +12    Ge            1      23                                             
    +13    Rewind        1      23                                             
    +14    Column        1      1                                         
    +15    Column        0      1                                         
    +16    Ne            1      22                                        
    +17    Column        0      0                                         
    +18    Column        0      1                                         
    +19    Column        1      0                                         
    +20    Column        1      1                                         
    +21    Callback      4      0                                         
    +22    Next          1      14                                             
    +23    Next          0      10                                        
    +24    Close         0      0                                         
    +25    Close         1      0                                         
    +26    Halt          0      0
    +

    The outer loop over table examp is implement by instructions +7 through 23. The inner loop is instructions 13 through 22. +Notice that the "two<50" term of the WHERE expression involves +only columns from the first table and can be factored out of +the inner loop. SQLite does this and implements the "two<50" +test in instructions 10 through 12. The "four==two" test is +implement by instructions 14 through 16 in the inner loop.

    + +

    SQLite does not impose any arbitrary limits on the tables in +a join. It also allows a table to be joined with itself.

    + +

    The ORDER BY clause

    + +

    For historical reasons, and for efficiency, all sorting is currently +done in memory.

    + +

    SQLite implements the ORDER BY clause using a special +set of instructions to control an object called a sorter. In the +inner-most loop of the query, where there would normally be +a Callback instruction, instead a record is constructed that +contains both callback parameters and a key. This record +is added to the sorter (in a linked list). After the query loop +finishes, the list of records is sorted and this list is walked. For +each record on the list, the callback is invoked. Finally, the sorter +is closed and memory is deallocated.

    + +

    We can see the process in action in the following query:

    + +
    +SELECT * FROM examp ORDER BY one DESC, two;
    +
    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      one                                
    +1     ColumnName    1      0      two                                
    +2     Integer       0      0                                         
    +3     OpenRead      0      3      examp                              
    +4     VerifyCookie  0      909                                            
    +5     Rewind        0      14                                             
    +6     Column        0      0                                         
    +7     Column        0      1                                         
    +8     SortMakeRec   2      0                                              
    +9     Column        0      0                                         
    +10    Column        0      1                                         
    +11    SortMakeKey   2      0      D+                                 
    +12    SortPut       0      0                                              
    +13    Next          0      6                                              
    +14    Close         0      0                                              
    +15    Sort          0      0                                              
    +16    SortNext      0      19                                             
    +17    SortCallback  2      0                                              
    +18    Goto          0      16                                             
    +19    SortReset     0      0                                         
    +20    Halt          0      0
    +

    There is only one sorter object, so there are no instructions to open +or close it. It is opened automatically when needed, and it is closed +when the VDBE program halts.

    + +

    The query loop is built from instructions 5 through 13. Instructions +6 through 8 build a record that contains the azData[] values for a single +invocation of the callback. A sort key is generated by instructions +9 through 11. Instruction 12 combines the invocation record and the +sort key into a single entry and puts that entry on the sort list.

    + +

    The P3 argument of instruction 11 is of particular interest. The +sort key is formed by prepending one character from P3 to each string +and concatenating all the strings. The sort comparison function will +look at this character to determine whether the sort order is +ascending or descending, and whether to sort as a string or number. +In this example, the first column should be sorted as a string +in descending order so its prefix is "D" and the second column should +sorted numerically in ascending order so its prefix is "+". Ascending +string sorting uses "A", and descending numeric sorting uses "-".

    + +

    After the query loop ends, the table being queried is closed at +instruction 14. This is done early in order to allow other processes +or threads to access that table, if desired. The list of records +that was built up inside the query loop is sorted by the instruction +at 15. Instructions 16 through 18 walk through the record list +(which is now in sorted order) and invoke the callback once for +each record. Finally, the sorter is closed at instruction 19.

    + +

    Aggregate Functions And The GROUP BY and HAVING Clauses

    + +

    To compute aggregate functions, the VDBE implements a special +data structure and instructions for controlling that data structure. +The data structure is an unordered set of buckets, where each bucket +has a key and one or more memory locations. Within the query +loop, the GROUP BY clause is used to construct a key and the bucket +with that key is brought into focus. A new bucket is created with +the key if one did not previously exist. Once the bucket is in +focus, the memory locations of the bucket are used to accumulate +the values of the various aggregate functions. After the query +loop terminates, each bucket is visited once to generate a +single row of the results.

    + +

    An example will help to clarify this concept. Consider the +following query:

    + +
    +SELECT three, min(three+four)+avg(four) 
    +FROM examp2
    +GROUP BY three;
    +
    + + +

    The VDBE code generated for this query is as follows:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      three                              
    +1     ColumnName    1      0      min(three+four)+avg(four)          
    +2     AggReset      0      3                                              
    +3     AggInit       0      1      ptr(0x7903a0)                      
    +4     AggInit       0      2      ptr(0x790700)                      
    +5     Integer       0      0                                         
    +6     OpenRead      0      5      examp2                             
    +7     VerifyCookie  0      909                                            
    +8     Rewind        0      23                                             
    +9     Column        0      0                                         
    +10    MakeKey       1      0      n                                  
    +11    AggFocus      0      14                                             
    +12    Column        0      0                                         
    +13    AggSet        0      0                                         
    +14    Column        0      0                                         
    +15    Column        0      1                                         
    +16    Add           0      0                                         
    +17    Integer       1      0                                         
    +18    AggFunc       0      1      ptr(0x7903a0)                      
    +19    Column        0      1                                         
    +20    Integer       2      0                                         
    +21    AggFunc       0      1      ptr(0x790700)                      
    +22    Next          0      9                                              
    +23    Close         0      0                                              
    +24    AggNext       0      31                                        
    +25    AggGet        0      0                                              
    +26    AggGet        0      1                                              
    +27    AggGet        0      2                                         
    +28    Add           0      0                                         
    +29    Callback      2      0                                         
    +30    Goto          0      24                                             
    +31    Noop          0      0                                         
    +32    Halt          0      0
    +

    The first instruction of interest is the +AggReset at 2. +The AggReset instruction initializes the set of buckets to be the +empty set and specifies the number of memory slots available in each +bucket as P2. In this example, each bucket will hold 3 memory slots. +It is not obvious, but if you look closely at the rest of the program +you can figure out what each of these slots is intended for.

    + +
    + + + + +
    Memory SlotIntended Use Of This Memory Slot
    0The "three" column -- the key to the bucket
    1The minimum "three+four" value
    2The sum of all "four" values. This is used to compute + "avg(four)".
    + +

    The query loop is implemented by instructions 8 through 22. +The aggregate key specified by the GROUP BY clause is computed +by instructions 9 and 10. Instruction 11 causes the appropriate +bucket to come into focus. If a bucket with the given key does +not already exists, a new bucket is created and control falls +through to instructions 12 and 13 which initialize the bucket. +If the bucket does already exist, then a jump is made to instruction +14. The values of aggregate functions are updated by the instructions +between 11 and 21. Instructions 14 through 18 update memory +slot 1 to hold the next value "min(three+four)". Then the sum of the +"four" column is updated by instructions 19 through 21.

    + +

    After the query loop is finished, the table "examp2" is closed at +instruction 23 so that its lock will be released and it can be +used by other threads or processes. The next step is to loop +over all aggregate buckets and output one row of the result for +each bucket. This is done by the loop at instructions 24 +through 30. The AggNext instruction at 24 brings the next bucket +into focus, or jumps to the end of the loop if all buckets have +been examined already. The 3 columns of the result are fetched from +the aggregator bucket in order at instructions 25 through 27. +Finally, the callback is invoked at instruction 29.

    + +

    In summary then, any query with aggregate functions is implemented +by two loops. The first loop scans the input table and computes +aggregate information into buckets and the second loop scans through +all the buckets to compute the final result.

    + +

    The realization that an aggregate query is really two consequtive +loops makes it much easier to understand the difference between +a WHERE clause and a HAVING clause in SQL query statement. The +WHERE clause is a restriction on the first loop and the HAVING +clause is a restriction on the second loop. You can see this +by adding both a WHERE and a HAVING clause to our example query:

    + + +
    +SELECT three, min(three+four)+avg(four) 
    +FROM examp2
    +WHERE three>four
    +GROUP BY three
    +HAVING avg(four)<10;
    +
    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     ColumnName    0      0      three                              
    +1     ColumnName    1      0      min(three+four)+avg(four)          
    +2     AggReset      0      3                                              
    +3     AggInit       0      1      ptr(0x7903a0)                      
    +4     AggInit       0      2      ptr(0x790700)                      
    +5     Integer       0      0                                         
    +6     OpenRead      0      5      examp2                             
    +7     VerifyCookie  0      909                                            
    +8     Rewind        0      26                                             
    +9     Column        0      0                                         
    +10    Column        0      1                                         
    +11    Le            1      25                                             
    +12    Column        0      0                                         
    +13    MakeKey       1      0      n                                  
    +14    AggFocus      0      17                                             
    +15    Column        0      0                                         
    +16    AggSet        0      0                                         
    +17    Column        0      0                                         
    +18    Column        0      1                                         
    +19    Add           0      0                                         
    +20    Integer       1      0                                         
    +21    AggFunc       0      1      ptr(0x7903a0)                      
    +22    Column        0      1                                         
    +23    Integer       2      0                                         
    +24    AggFunc       0      1      ptr(0x790700)                      
    +25    Next          0      9                                              
    +26    Close         0      0                                              
    +27    AggNext       0      37                                             
    +28    AggGet        0      2                                         
    +29    Integer       10     0      10                                 
    +30    Ge            1      27                                             
    +31    AggGet        0      0                                         
    +32    AggGet        0      1                                         
    +33    AggGet        0      2                                         
    +34    Add           0      0                                         
    +35    Callback      2      0                                         
    +36    Goto          0      27                                             
    +37    Noop          0      0                                         
    +38    Halt          0      0
    +

    The code generated in this last example is the same as the +previous except for the addition of two conditional jumps used +to implement the extra WHERE and HAVING clauses. The WHERE +clause is implemented by instructions 9 through 11 in the query +loop. The HAVING clause is implemented by instruction 28 through +30 in the output loop.

    + +

    Using SELECT Statements As Terms In An Expression

    + +

    The very name "Structured Query Language" tells us that SQL should +support nested queries. And, in fact, two different kinds of nesting +are supported. Any SELECT statement that returns a single-row, single-column +result can be used as a term in an expression of another SELECT statement. +And, a SELECT statement that returns a single-column, multi-row result +can be used as the right-hand operand of the IN and NOT IN operators. +We will begin this section with an example of the first kind of nesting, +where a single-row, single-column SELECT is used as a term in an expression +of another SELECT. Here is our example:

    + +
    +SELECT * FROM examp
    +WHERE two!=(SELECT three FROM examp2
    +            WHERE four=5);
    +
    + +

    The way SQLite deals with this is to first run the inner SELECT +(the one against examp2) and store its result in a private memory +cell. SQLite then substitutes the value of this private memory +cell for the inner SELECT when it evaluates the outer SELECT. +The code looks like this:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     String        0      0                                         
    +1     MemStore      0      1                                         
    +2     Integer       0      0                                         
    +3     OpenRead      1      5      examp2                             
    +4     VerifyCookie  0      909                                            
    +5     Rewind        1      13                                             
    +6     Column        1      1                                         
    +7     Integer       5      0      5                                  
    +8     Ne            1      12                                        
    +9     Column        1      0                                         
    +10    MemStore      0      1                                         
    +11    Goto          0      13                                             
    +12    Next          1      6                                              
    +13    Close         1      0                                         
    +14    ColumnName    0      0      one                                
    +15    ColumnName    1      0      two                                
    +16    Integer       0      0                                         
    +17    OpenRead      0      3      examp                              
    +18    Rewind        0      26                                             
    +19    Column        0      1                                         
    +20    MemLoad       0      0                                         
    +21    Eq            1      25                                             
    +22    Column        0      0                                         
    +23    Column        0      1                                         
    +24    Callback      2      0                                         
    +25    Next          0      19                                             
    +26    Close         0      0                                         
    +27    Halt          0      0
    +

    The private memory cell is initialized to NULL by the first +two instructions. Instructions 2 through 13 implement the inner +SELECT statement against the examp2 table. Notice that instead of +sending the result to a callback or storing the result on a sorter, +the result of the query is pushed into the memory cell by instruction +10 and the loop is abandoned by the jump at instruction 11. +The jump at instruction at 11 is vestigial and never executes.

    + +

    The outer SELECT is implemented by instructions 14 through 25. +In particular, the WHERE clause that contains the nested select +is implemented by instructions 19 through 21. You can see that +the result of the inner select is loaded onto the stack by instruction +20 and used by the conditional jump at 21.

    + +

    When the result of a sub-select is a scalar, a single private memory +cell can be used, as shown in the previous +example. But when the result of a sub-select is a vector, such +as when the sub-select is the right-hand operand of IN or NOT IN, +a different approach is needed. In this case, +the result of the sub-select is +stored in a transient table and the contents of that table +are tested using the Found or NotFound operators. Consider this +example:

    + +
    +SELECT * FROM examp
    +WHERE two IN (SELECT three FROM examp2);
    +
    + +

    The code generated to implement this last query is as follows:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     OpenTemp      1      1                                         
    +1     Integer       0      0                                         
    +2     OpenRead      2      5      examp2                             
    +3     VerifyCookie  0      909                                            
    +4     Rewind        2      10                                        
    +5     Column        2      0                                         
    +6     IsNull        -1     9                                              
    +7     String        0      0                                         
    +8     PutStrKey     1      0                                         
    +9     Next          2      5                                              
    +10    Close         2      0                                         
    +11    ColumnName    0      0      one                                
    +12    ColumnName    1      0      two                                
    +13    Integer       0      0                                         
    +14    OpenRead      0      3      examp                              
    +15    Rewind        0      25                                             
    +16    Column        0      1                                         
    +17    NotNull       -1     20                                        
    +18    Pop           1      0                                         
    +19    Goto          0      24                                             
    +20    NotFound      1      24                                             
    +21    Column        0      0                                         
    +22    Column        0      1                                         
    +23    Callback      2      0                                         
    +24    Next          0      16                                             
    +25    Close         0      0                                         
    +26    Halt          0      0
    +

    The transient table in which the results of the inner SELECT are +stored is created by the OpenTemp +instruction at 0. This opcode is used for tables that exist for the +duration of a single SQL statement only. The transient cursor is always +opened read/write even if the main database is read-only. The transient +table is deleted automatically when the cursor is closed. The P2 value +of 1 means the cursor points to a BTree index, which has no data but can +have an arbitrary key.

    + +

    The inner SELECT statement is implemented by instructions 1 through 10. +All this code does is make an entry in the temporary table for each +row of the examp2 table with a non-NULL value for the "three" column. +The key for each temporary table entry is the "three" column of examp2 +and the data is an empty string since it is never used.

    + +

    The outer SELECT is implemented by instructions 11 through 25. In +particular, the WHERE clause containing the IN operator is implemented +by instructions at 16, 17, and 20. Instruction 16 pushes the value of +the "two" column for the current row onto the stack and instruction 17 +checks to see that it is non-NULL. If this is successful, execution +jumps to 20, where it tests to see if top of the stack matches any key +in the temporary table. The rest of the code is the same as what has +been shown before.

    + +

    Compound SELECT Statements

    + +

    SQLite also allows two or more SELECT statements to be joined as +peers using operators UNION, UNION ALL, INTERSECT, and EXCEPT. These +compound select statements are implemented using transient tables. +The implementation is slightly different for each operator, but the +basic ideas are the same. For an example we will use the EXCEPT +operator.

    + +
    +SELECT two FROM examp
    +EXCEPT
    +SELECT four FROM examp2;
    +
    + +

    The result of this last example should be every unique value +of the "two" column in the examp table, except any value that is +in the "four" column of examp2 is removed. The code to implement +this query is as follows:

    +
    addr  opcode        p1     p2     p3                                      
    +----  ------------  -----  -----  -----------------------------------
    +0     OpenTemp      0      1                                         
    +1     KeyAsData     0      1                                              
    +2     Integer       0      0                                         
    +3     OpenRead      1      3      examp                              
    +4     VerifyCookie  0      909                                            
    +5     Rewind        1      11                                        
    +6     Column        1      1                                         
    +7     MakeRecord    1      0                                         
    +8     String        0      0                                         
    +9     PutStrKey     0      0                                         
    +10    Next          1      6                                              
    +11    Close         1      0                                         
    +12    Integer       0      0                                         
    +13    OpenRead      2      5      examp2                             
    +14    Rewind        2      20                                        
    +15    Column        2      1                                         
    +16    MakeRecord    1      0                                         
    +17    NotFound      0      19                                             
    +18    Delete        0      0                                         
    +19    Next          2      15                                             
    +20    Close         2      0                                         
    +21    ColumnName    0      0      four                               
    +22    Rewind        0      26                                             
    +23    Column        0      0                                         
    +24    Callback      1      0                                         
    +25    Next          0      23                                             
    +26    Close         0      0                                         
    +27    Halt          0      0
    +

    The transient table in which the result is built is created by +instruction 0. Three loops then follow. The loop at instructions +5 through 10 implements the first SELECT statement. The second +SELECT statement is implemented by the loop at instructions 14 through +19. Finally, a loop at instructions 22 through 25 reads the transient +table and invokes the callback once for each row in the result.

    + +

    Instruction 1 is of particular importance in this example. Normally, +the Column instruction extracts the value of a column from a larger +record in the data of an SQLite file entry. Instruction 1 sets a flag on +the transient table so that Column will instead treat the key of the +SQLite file entry as if it were data and extract column information from +the key.

    + +

    Here is what is going to happen: The first SELECT statement +will construct rows of the result and save each row as the key of +an entry in the transient table. The data for each entry in the +transient table is a never used so we fill it in with an empty string. +The second SELECT statement also constructs rows, but the rows +constructed by the second SELECT are removed from the transient table. +That is why we want the rows to be stored in the key of the SQLite file +instead of in the data -- so they can be easily located and deleted.

    + +

    Let's look more closely at what is happening here. The first +SELECT is implemented by the loop at instructions 5 through 10. +Instruction 5 intializes the loop by rewinding its cursor. +Instruction 6 extracts the value of the "two" column from "examp" +and instruction 7 converts this into a row. Instruction 8 pushes +an empty string onto the stack. Finally, instruction 9 writes the +row into the temporary table. But remember, the PutStrKey opcode uses +the top of the stack as the record data and the next on stack as the +key. For an INSERT statement, the row generated by the +MakeRecord opcode is the record data and the record key is an integer +created by the NewRecno opcode. But here the roles are reversed and +the row created by MakeRecord is the record key and the record data is +just an empty string.

    + +

    The second SELECT is implemented by instructions 14 through 19. +Instruction 14 intializes the loop by rewinding its cursor. +A new result row is created from the "four" column of table "examp2" +by instructions 15 and 16. But instead of using PutStrKey to write this +new row into the temporary table, we instead call Delete to remove +it from the temporary table if it exists.

    + +

    The result of the compound select is sent to the callback routine +by the loop at instructions 22 through 25. There is nothing new +or remarkable about this loop, except for the fact that the Column +instruction at 23 will be extracting a column out of the record key +rather than the record data.

    + +

    Summary

    + +

    This article has reviewed all of the major techniques used by +SQLite's VDBE to implement SQL statements. What has not been shown +is that most of these techniques can be used in combination to +generate code for an appropriately complex query statement. For +example, we have shown how sorting is accomplished on a simple query +and we have shown how to implement a compound query. But we did +not give an example of sorting in a compound query. This is because +sorting a compound query does not introduce any new concepts: it +merely combines two previous ideas (sorting and compounding) +in the same VDBE program.

    + +

    For additional information on how the SQLite library +functions, the reader is directed to look at the SQLite source +code directly. If you understand the material in this article, +you should not have much difficulty in following the sources. +Serious students of the internals of SQLite will probably +also want to make a careful study of the VDBE opcodes +as documented here. Most of the +opcode documentation is extracted from comments in the source +code using a script so you can also get information about the +various opcodes directly from the vdbe.c source file. +If you have successfully read this far, you should have little +difficulty understanding the rest.

    + +

    If you find errors in either the documentation or the code, +feel free to fix them and/or contact the author at +drh@hwaci.com. Your bug fixes or +suggestions are always welcomed.

    + +
    +This page last modified 2008/10/03 02:14:51 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/vdbe.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/vdbe.tcl --- sqlite3-3.4.2/www/vdbe.tcl 2006-08-07 12:31:13.000000000 +0100 +++ sqlite3-3.6.16/www/vdbe.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,1988 +0,0 @@ -# -# Run this Tcl script to generate the vdbe.html file. -# -set rcsid {$Id: vdbe.tcl,v 1.14 2005/03/12 15:55:11 drh Exp $} -source common.tcl -header {The Virtual Database Engine of SQLite} -puts { -

    The Virtual Database Engine of SQLite

    - -
    -This document describes the virtual machine used in SQLite version 2.8.0. -The virtual machine in SQLite version 3.0 and 3.1 is very similar in -concept but many of the opcodes have changed and the algorithms are -somewhat different. Use this document as a rough guide to the idea -behind the virtual machine in SQLite version 3, not as a reference on -how the virtual machine works. -
    -} - -puts { -

    If you want to know how the SQLite library works internally, -you need to begin with a solid understanding of the Virtual Database -Engine or VDBE. The VDBE occurs right in the middle of the -processing stream (see the architecture diagram) -and so it seems to touch most parts of the library. Even -parts of the code that do not directly interact with the VDBE -are usually in a supporting role. The VDBE really is the heart of -SQLite.

    - -

    This article is a brief introduction to how the VDBE -works and in particular how the various VDBE instructions -(documented here) work together -to do useful things with the database. The style is tutorial, -beginning with simple tasks and working toward solving more -complex problems. Along the way we will visit most -submodules in the SQLite library. After completeing this tutorial, -you should have a pretty good understanding of how SQLite works -and will be ready to begin studying the actual source code.

    - -

    Preliminaries

    - -

    The VDBE implements a virtual computer that runs a program in -its virtual machine language. The goal of each program is to -interrogate or change the database. Toward this end, the machine -language that the VDBE implements is specifically designed to -search, read, and modify databases.

    - -

    Each instruction of the VDBE language contains an opcode and -three operands labeled P1, P2, and P3. Operand P1 is an arbitrary -integer. P2 is a non-negative integer. P3 is a pointer to a data -structure or null-terminated string, possibly null. Only a few VDBE -instructions use all three operands. Many instructions use only -one or two operands. A significant number of instructions use -no operands at all but instead take their data and store their results -on the execution stack. The details of what each instruction -does and which operands it uses are described in the separate -opcode description document.

    - -

    A VDBE program begins -execution on instruction 0 and continues with successive instructions -until it either (1) encounters a fatal error, (2) executes a -Halt instruction, or (3) advances the program counter past the -last instruction of the program. When the VDBE completes execution, -all open database cursors are closed, all memory is freed, and -everything is popped from the stack. -So there are never any worries about memory leaks or -undeallocated resources.

    - -

    If you have done any assembly language programming or have -worked with any kind of abstract machine before, all of these -details should be familiar to you. So let's jump right in and -start looking as some code.

    - - -

    Inserting Records Into The Database

    - -

    We begin with a problem that can be solved using a VDBE program -that is only a few instructions long. Suppose we have an SQL -table that was created like this:

    - -
    -CREATE TABLE examp(one text, two int);
    -
    - -

    In words, we have a database table named "examp" that has two -columns of data named "one" and "two". Now suppose we want to insert a single -record into this table. Like this:

    - -
    -INSERT INTO examp VALUES('Hello, World!',99);
    -
    - -

    We can see the VDBE program that SQLite uses to implement this -INSERT using the sqlite command-line utility. First start -up sqlite on a new, empty database, then create the table. -Next change the output format of sqlite to a form that -is designed to work with VDBE program dumps by entering the -".explain" command. -Finally, enter the INSERT statement shown above, but precede the -INSERT with the special keyword "EXPLAIN". The EXPLAIN keyword -will cause sqlite to print the VDBE program rather than -execute it. We have:

    -} -proc Code {body} { - puts {
    } - regsub -all {&} [string trim $body] {\&} body - regsub -all {>} $body {\>} body - regsub -all {<} $body {\<} body - regsub -all {\(\(\(} $body {} body - regsub -all {\)\)\)} $body {} body - regsub -all { } $body {\ } body - regsub -all \n $body
    \n body - puts $body - puts {
    } -} - -Code { -$ (((sqlite test_database_1))) -sqlite> (((CREATE TABLE examp(one text, two int);))) -sqlite> (((.explain))) -sqlite> (((EXPLAIN INSERT INTO examp VALUES('Hello, World!',99);))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 0 0 -1 VerifyCookie 0 81 -2 Transaction 1 0 -3 Integer 0 0 -4 OpenWrite 0 3 examp -5 NewRecno 0 0 -6 String 0 0 Hello, World! -7 Integer 99 0 99 -8 MakeRecord 2 0 -9 PutIntKey 0 1 -10 Close 0 0 -11 Commit 0 0 -12 Halt 0 0 -} - -puts {

    As you can see above, our simple insert statement is -implemented in 12 instructions. The first 3 and last 2 instructions are -a standard prologue and epilogue, so the real work is done in the middle -7 instructions. There are no jumps, so the program executes once through -from top to bottom. Let's now look at each instruction in detail.

    -} - -Code { -0 Transaction 0 0 -1 VerifyCookie 0 81 -2 Transaction 1 0 -} -puts { -

    The instruction Transaction -begins a transaction. The transaction ends when a Commit or Rollback -opcode is encountered. P1 is the index of the database file on which -the transaction is started. Index 0 is the main database file. A write -lock is obtained on the database file when a transaction is started. -No other process can read or write the file while the transaction is -underway. Starting a transaction also creates a rollback journal. A -transaction must be started before any changes can be made to the -database.

    - -

    The instruction VerifyCookie -checks cookie 0 (the database schema version) to make sure it is equal -to P2 (the value obtained when the database schema was last read). -P1 is the database number (0 for the main database). This is done to -make sure the database schema hasn't been changed by another thread, in -which case it has to be reread.

    - -

    The second Transaction -instruction begins a transaction and starts a rollback journal for -database 1, the database used for temporary tables.

    -} - -proc stack args { - puts "
    " - foreach elem $args { - puts "" - } - puts "
    $elem
    " -} - -Code { -3 Integer 0 0 -4 OpenWrite 0 3 examp -} -puts { -

    The instruction Integer pushes -the integer value P1 (0) onto the stack. Here 0 is the number of the -database to use in the following OpenWrite instruction. If P3 is not -NULL then it is a string representation of the same integer. Afterwards -the stack looks like this:

    -} -stack {(integer) 0} - -puts { -

    The instruction OpenWrite opens -a new read/write cursor with handle P1 (0 in this case) on table "examp", -whose root page is P2 (3, in this database file). Cursor handles can be -any non-negative integer. But the VDBE allocates cursors in an array -with the size of the array being one more than the largest cursor. So -to conserve memory, it is best to use handles beginning with zero and -working upward consecutively. Here P3 ("examp") is the name of the -table being opened, but this is unused, and only generated to make the -code easier to read. This instruction pops the database number to use -(0, the main database) from the top of the stack, so afterwards the -stack is empty again.

    -} - -Code { -5 NewRecno 0 0 -} -puts { -

    The instruction NewRecno creates -a new integer record number for the table pointed to by cursor P1. The -record number is one not currently used as a key in the table. The new -record number is pushed onto the stack. Afterwards the stack looks like -this:

    -} -stack {(integer) new record key} - -Code { -6 String 0 0 Hello, World! -} -puts { -

    The instruction String pushes its -P3 operand onto the stack. Afterwards the stack looks like this:

    -} -stack {(string) "Hello, World!"} \ - {(integer) new record key} - -Code { -7 Integer 99 0 99 -} -puts { -

    The instruction Integer pushes -its P1 operand (99) onto the stack. Afterwards the stack looks like -this:

    -} -stack {(integer) 99} \ - {(string) "Hello, World!"} \ - {(integer) new record key} - -Code { -8 MakeRecord 2 0 -} -puts { -

    The instruction MakeRecord pops -the top P1 elements off the stack (2 in this case) and converts them into -the binary format used for storing records in a database file. -(See the file format description for -details.) The new record generated by the MakeRecord instruction is -pushed back onto the stack. Afterwards the stack looks like this:

    - -} -stack {(record) "Hello, World!", 99} \ - {(integer) new record key} - -Code { -9 PutIntKey 0 1 -} -puts { -

    The instruction PutIntKey uses -the top 2 stack entries to write an entry into the table pointed to by -cursor P1. A new entry is created if it doesn't already exist or the -data for an existing entry is overwritten. The record data is the top -stack entry, and the key is the next entry down. The stack is popped -twice by this instruction. Because operand P2 is 1 the row change count -is incremented and the rowid is stored for subsequent return by the -sqlite_last_insert_rowid() function. If P2 is 0 the row change count is -unmodified. This instruction is where the insert actually occurs.

    -} - -Code { -10 Close 0 0 -} -puts { -

    The instruction Close closes a -cursor previously opened as P1 (0, the only open cursor). If P1 is not -currently open, this instruction is a no-op.

    -} - -Code { -11 Commit 0 0 -} -puts { -

    The instruction Commit causes all -modifications to the database that have been made since the last -Transaction to actually take effect. No additional modifications are -allowed until another transaction is started. The Commit instruction -deletes the journal file and releases the write lock on the database. -A read lock continues to be held if there are still cursors open.

    -} - -Code { -12 Halt 0 0 -} -puts { -

    The instruction Halt causes the VDBE -engine to exit immediately. All open cursors, Lists, Sorts, etc are -closed automatically. P1 is the result code returned by sqlite_exec(). -For a normal halt, this should be SQLITE_OK (0). For errors, it can be -some other value. The operand P2 is only used when there is an error. -There is an implied "Halt 0 0 0" instruction at the end of every -program, which the VDBE appends when it prepares a program to run.

    - - - -

    Tracing VDBE Program Execution

    - -

    If the SQLite library is compiled without the NDEBUG preprocessor -macro, then the PRAGMA vdbe_trace - causes the VDBE to trace the execution of programs. Though this -feature was originally intended for testing and debugging, it can also -be useful in learning about how the VDBE operates. -Use "PRAGMA vdbe_trace=ON;" to turn tracing on and -"PRAGMA vdbe_trace=OFF" to turn tracing back off. -Like this:

    -} - -Code { -sqlite> (((PRAGMA vdbe_trace=ON;))) - 0 Halt 0 0 -sqlite> (((INSERT INTO examp VALUES('Hello, World!',99);))) - 0 Transaction 0 0 - 1 VerifyCookie 0 81 - 2 Transaction 1 0 - 3 Integer 0 0 -Stack: i:0 - 4 OpenWrite 0 3 examp - 5 NewRecno 0 0 -Stack: i:2 - 6 String 0 0 Hello, World! -Stack: t[Hello,.World!] i:2 - 7 Integer 99 0 99 -Stack: si:99 t[Hello,.World!] i:2 - 8 MakeRecord 2 0 -Stack: s[...Hello,.World!.99] i:2 - 9 PutIntKey 0 1 - 10 Close 0 0 - 11 Commit 0 0 - 12 Halt 0 0 -} - -puts { -

    With tracing mode on, the VDBE prints each instruction prior -to executing it. After the instruction is executed, the top few -entries in the stack are displayed. The stack display is omitted -if the stack is empty.

    - -

    On the stack display, most entries are shown with a prefix -that tells the datatype of that stack entry. Integers begin -with "i:". Floating point values begin with "r:". -(The "r" stands for "real-number".) Strings begin with either -"s:", "t:", "e:" or "z:". -The difference among the string prefixes is caused by how their -memory is allocated. The z: strings are stored in memory obtained -from malloc(). The t: strings are statically allocated. -The e: strings are ephemeral. All other strings have the s: prefix. -This doesn't make any difference to you, -the observer, but it is vitally important to the VDBE since the -z: strings need to be passed to free() when they are -popped to avoid a memory leak. Note that only the first 10 -characters of string values are displayed and that binary -values (such as the result of the MakeRecord instruction) are -treated as strings. The only other datatype that can be stored -on the VDBE stack is a NULL, which is display without prefix -as simply "NULL". If an integer has been placed on the -stack as both an integer and a string, its prefix is "si:". - - - -

    Simple Queries

    - -

    At this point, you should understand the basics of how the VDBE -writes to a database. Now let's look at how it does queries. -We will use the following simple SELECT statement as our example:

    - -
    -SELECT * FROM examp;
    -
    - -

    The VDBE program generated for this SQL statement is as follows:

    -} - -Code { -sqlite> (((EXPLAIN SELECT * FROM examp;))) -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 81 -5 Rewind 0 10 -6 Column 0 0 -7 Column 0 1 -8 Callback 2 0 -9 Next 0 6 -10 Close 0 0 -11 Halt 0 0 -} - -puts { -

    Before we begin looking at this problem, let's briefly review -how queries work in SQLite so that we will know what we are trying -to accomplish. For each row in the result of a query, -SQLite will invoke a callback function with the following -prototype:

    - -
    -int Callback(void *pUserData, int nColumn, char *azData[], char *azColumnName[]);
    -
    - -

    The SQLite library supplies the VDBE with a pointer to the callback function -and the pUserData pointer. (Both the callback and the user data were -originally passed in as arguments to the sqlite_exec() API function.) -The job of the VDBE is to -come up with values for nColumn, azData[], -and azColumnName[]. -nColumn is the number of columns in the results, of course. -azColumnName[] is an array of strings where each string is the name -of one of the result columns. azData[] is an array of strings holding -the actual data.

    -} - -Code { -0 ColumnName 0 0 one -1 ColumnName 1 0 two -} -puts { -

    The first two instructions in the VDBE program for our query are -concerned with setting up values for azColumn. -The ColumnName instructions tell -the VDBE what values to fill in for each element of the azColumnName[] -array. Every query will begin with one ColumnName instruction for each -column in the result, and there will be a matching Column instruction for -each one later in the query. -

    -} - -Code { -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 81 -} -puts { -

    Instructions 2 and 3 open a read cursor on the database table that is -to be queried. This works the same as the OpenWrite instruction in the -INSERT example except that the cursor is opened for reading this time -instead of for writing. Instruction 4 verifies the database schema as -in the INSERT example.

    -} - -Code { -5 Rewind 0 10 -} -puts { -

    The Rewind instruction initializes -a loop that iterates over the "examp" table. It rewinds the cursor P1 -to the first entry in its table. This is required by the the Column and -Next instructions, which use the cursor to iterate through the table. -If the table is empty, then jump to P2 (10), which is the instruction just -past the loop. If the table is not empty, fall through to the following -instruction at 6, which is the beginning of the loop body.

    -} - -Code { -6 Column 0 0 -7 Column 0 1 -8 Callback 2 0 -} -puts { -

    The instructions 6 through 8 form the body of the loop that will -execute once for each record in the database file. - -The Column instructions at addresses 6 -and 7 each take the P2-th column from the P1-th cursor and push it onto -the stack. In this example, the first Column instruction is pushing the -value for the column "one" onto the stack and the second Column -instruction is pushing the value for column "two". - -The Callback instruction at address 8 -invokes the callback() function. The P1 operand to Callback becomes the -value for nColumn. The Callback instruction pops P1 values from -the stack and uses them to fill the azData[] array.

    -} - -Code { -9 Next 0 6 -} -puts { -

    The instruction at address 9 implements the branching part of the -loop. Together with the Rewind at address 5 it forms the loop logic. -This is a key concept that you should pay close attention to. -The Next instruction advances the cursor -P1 to the next record. If the cursor advance was successful, then jump -immediately to P2 (6, the beginning of the loop body). If the cursor -was at the end, then fall through to the following instruction, which -ends the loop.

    -} - -Code { -10 Close 0 0 -11 Halt 0 0 -} -puts { -

    The Close instruction at the end of the program closes the -cursor that points into the table "examp". It is not really necessary -to call Close here since all cursors will be automatically closed -by the VDBE when the program halts. But we needed an instruction -for the Rewind to jump to so we might as well go ahead and have that -instruction do something useful. -The Halt instruction ends the VDBE program.

    - -

    Note that the program for this SELECT query didn't contain the -Transaction and Commit instructions used in the INSERT example. Because -the SELECT is a read operation that doesn't alter the database, it -doesn't require a transaction.

    -} - - -puts { - -

    A Slightly More Complex Query

    - -

    The key points of the previous example were the use of the Callback -instruction to invoke the callback function, and the use of the Next -instruction to implement a loop over all records of the database file. -This example attempts to drive home those ideas by demonstrating a -slightly more complex query that involves more columns of -output, some of which are computed values, and a WHERE clause that -limits which records actually make it to the callback function. -Consider this query:

    - -
    -SELECT one, two, one || two AS 'both'
    -FROM examp
    -WHERE one LIKE 'H%'
    -
    - -

    This query is perhaps a bit contrived, but it does serve to -illustrate our points. The result will have three column with -names "one", "two", and "both". The first two columns are direct -copies of the two columns in the table and the third result -column is a string formed by concatenating the first and -second columns of the table. -Finally, the -WHERE clause says that we will only chose rows for the -results where the "one" column begins with an "H". -Here is what the VDBE program looks like for this query:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 ColumnName 2 0 both -3 Integer 0 0 -4 OpenRead 0 3 examp -5 VerifyCookie 0 81 -6 Rewind 0 18 -7 String 0 0 H% -8 Column 0 0 -9 Function 2 0 ptr(0x7f1ac0) -10 IfNot 1 17 -11 Column 0 0 -12 Column 0 1 -13 Column 0 0 -14 Column 0 1 -15 Concat 2 0 -16 Callback 3 0 -17 Next 0 7 -18 Close 0 0 -19 Halt 0 0 -} - -puts { -

    Except for the WHERE clause, the structure of the program for -this example is very much like the prior example, just with an -extra column. There are now 3 columns, instead of 2 as before, -and there are three ColumnName instructions. -A cursor is opened using the OpenRead instruction, just like in the -prior example. The Rewind instruction at address 6 and the -Next at address 17 form a loop over all records of the table. -The Close instruction at the end is there to give the -Rewind instruction something to jump to when it is done. All of -this is just like in the first query demonstration.

    - -

    The Callback instruction in this example has to generate -data for three result columns instead of two, but is otherwise -the same as in the first query. When the Callback instruction -is invoked, the left-most column of the result should be -the lowest in the stack and the right-most result column should -be the top of the stack. We can see the stack being set up -this way at addresses 11 through 15. The Column instructions at -11 and 12 push the values for the first two columns in the result. -The two Column instructions at 13 and 14 pull in the values needed -to compute the third result column and the Concat instruction at -15 joins them together into a single entry on the stack.

    - -

    The only thing that is really new about the current example -is the WHERE clause which is implemented by instructions at -addresses 7 through 10. Instructions at address 7 and 8 push -onto the stack the value of the "one" column from the table -and the literal string "H%". -The Function instruction at address 9 -pops these two values from the stack and pushes the result of the LIKE() -function back onto the stack. -The IfNot instruction pops the top stack -value and causes an immediate jump forward to the Next instruction if the -top value was false (not not like the literal string "H%"). -Taking this jump effectively skips the callback, which is the whole point -of the WHERE clause. If the result -of the comparison is true, the jump is not taken and control -falls through to the Callback instruction below.

    - -

    Notice how the LIKE operator is implemented. It is a user-defined -function in SQLite, so the address of its function definition is -specified in P3. The operand P1 is the number of function arguments for -it to take from the stack. In this case the LIKE() function takes 2 -arguments. The arguments are taken off the stack in reverse order -(right-to-left), so the pattern to match is the top stack element, and -the next element is the data to compare. The return value is pushed -onto the stack.

    - - - -

    A Template For SELECT Programs

    - -

    The first two query examples illustrate a kind of template that -every SELECT program will follow. Basically, we have:

    - -

    -

      -
    1. Initialize the azColumnName[] array for the callback.
    2. -
    3. Open a cursor into the table to be queried.
    4. -
    5. For each record in the table, do: -
        -
      1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
      2. -
      3. Compute all columns for the current row of the result.
      4. -
      5. Invoke the callback function for the current row of the result.
      6. -
      -
    6. Close the cursor.
    7. -
    -

    - -

    This template will be expanded considerably as we consider -additional complications such as joins, compound selects, using -indices to speed the search, sorting, and aggregate functions -with and without GROUP BY and HAVING clauses. -But the same basic ideas will continue to apply.

    - -

    UPDATE And DELETE Statements

    - -

    The UPDATE and DELETE statements are coded using a template -that is very similar to the SELECT statement template. The main -difference, of course, is that the end action is to modify the -database rather than invoke a callback function. Because it modifies -the database it will also use transactions. Let's begin -by looking at a DELETE statement:

    - -
    -DELETE FROM examp WHERE two<50;
    -
    - -

    This DELETE statement will remove every record from the "examp" -table where the "two" column is less than 50. -The code generated to do this is as follows:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -5 Rewind 0 12 -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -9 Recno 0 0 -10 ListWrite 0 0 -11 Next 0 6 -12 Close 0 0 -13 ListRewind 0 0 -14 Integer 0 0 -15 OpenWrite 0 3 -16 ListRead 0 20 -17 NotExists 0 19 -18 Delete 0 1 -19 Goto 0 16 -20 ListReset 0 0 -21 Close 0 0 -22 Commit 0 0 -23 Halt 0 0 -} - -puts { -

    Here is what the program must do. First it has to locate all of -the records in the table "examp" that are to be deleted. This is -done using a loop very much like the loop used in the SELECT examples -above. Once all records have been located, then we can go back through -and delete them one by one. Note that we cannot delete each record -as soon as we find it. We have to locate all records first, then -go back and delete them. This is because the SQLite database -backend might change the scan order after a delete operation. -And if the scan -order changes in the middle of the scan, some records might be -visited more than once and other records might not be visited at all.

    - -

    So the implemention of DELETE is really in two loops. The first loop -(instructions 5 through 11) locates the records that are to be deleted -and saves their keys onto a temporary list, and the second loop -(instructions 16 through 19) uses the key list to delete the records one -by one.

    -} - - -Code { -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -} -puts { -

    Instructions 0 though 4 are as in the INSERT example. They start -transactions for the main and temporary databases, verify the database -schema for the main database, and open a read cursor on the table -"examp". Notice that the cursor is opened for reading, not writing. At -this stage of the program we are only going to be scanning the table, -not changing it. We will reopen the same table for writing later, at -instruction 15.

    -} - -Code { -5 Rewind 0 12 -} -puts { -

    As in the SELECT example, the Rewind -instruction rewinds the cursor to the beginning of the table, readying -it for use in the loop body.

    -} - -Code { -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -} -puts { -

    The WHERE clause is implemented by instructions 6 through 8. -The job of the where clause is to skip the ListWrite if the WHERE -condition is false. To this end, it jumps ahead to the Next instruction -if the "two" column (extracted by the Column instruction) is -greater than or equal to 50.

    - -

    As before, the Column instruction uses cursor P1 and pushes the data -record in column P2 (1, column "two") onto the stack. The Integer -instruction pushes the value 50 onto the top of the stack. After these -two instructions the stack looks like:

    -} -stack {(integer) 50} \ - {(record) current record for column "two" } - -puts { -

    The Ge operator compares the top two -elements on the stack, pops them, and then branches based on the result -of the comparison. If the second element is >= the top element, then -jump to address P2 (the Next instruction at the end of the loop). -Because P1 is true, if either operand is NULL (and thus the result is -NULL) then take the jump. If we don't jump, just advance to the next -instruction.

    -} - -Code { -9 Recno 0 0 -10 ListWrite 0 0 -} -puts { -

    The Recno instruction pushes onto the -stack an integer which is the first 4 bytes of the the key to the current -entry in a sequential scan of the table pointed to by cursor P1. -The ListWrite instruction writes the -integer on the top of the stack into a temporary storage list and pops -the top element. This is the important work of this loop, to store the -keys of the records to be deleted so we can delete them in the second -loop. After this ListWrite instruction the stack is empty again.

    -} - -Code { -11 Next 0 6 -12 Close 0 0 -} -puts { -

    The Next instruction increments the cursor to point to the next -element in the table pointed to by cursor P0, and if it was successful -branches to P2 (6, the beginning of the loop body). The Close -instruction closes cursor P1. It doesn't affect the temporary storage -list because it isn't associated with cursor P1; it is instead a global -working list (which can be saved with ListPush).

    -} - -Code { -13 ListRewind 0 0 -} -puts { -

    The ListRewind instruction -rewinds the temporary storage list to the beginning. This prepares it -for use in the second loop.

    -} - -Code { -14 Integer 0 0 -15 OpenWrite 0 3 -} -puts { -

    As in the INSERT example, we push the database number P1 (0, the main -database) onto the stack and use OpenWrite to open the cursor P1 on table -P2 (base page 3, "examp") for modification.

    -} - -Code { -16 ListRead 0 20 -17 NotExists 0 19 -18 Delete 0 1 -19 Goto 0 16 -} -puts { -

    This loop does the actual deleting. It is organized differently from -the one in the UPDATE example. The ListRead instruction plays the role -that the Next did in the INSERT loop, but because it jumps to P2 on -failure, and Next jumps on success, we put it at the start of the loop -instead of the end. This means that we have to put a Goto at the end of -the loop to jump back to the the loop test at the beginning. So this -loop has the form of a C while(){...} loop, while the loop in the INSERT -example had the form of a do{...}while() loop. The Delete instruction -fills the role that the callback function did in the preceding examples. -

    -

    The ListRead instruction reads an -element from the temporary storage list and pushes it onto the stack. -If this was successful, it continues to the next instruction. If this -fails because the list is empty, it branches to P2, which is the -instruction just after the loop. Afterwards the stack looks like:

    -} -stack {(integer) key for current record} - -puts { -

    Notice the similarity between the ListRead and Next instructions. -Both operations work according to this rule: -

    -
    -Push the next "thing" onto the stack and fall through OR jump to P2, -depending on whether or not there is a next "thing" to push. -
    -

    One difference between Next and ListRead is their idea of a "thing". -The "things" for the Next instruction are records in a database file. -"Things" for ListRead are integer keys in a list. Another difference -is whether to jump or fall through if there is no next "thing". In this -case, Next falls through, and ListRead jumps. Later on, we will see -other looping instructions (NextIdx and SortNext) that operate using the -same principle.

    - -

    The NotExists instruction pops -the top stack element and uses it as an integer key. If a record with -that key does not exist in table P1, then jump to P2. If a record does -exist, then fall thru to the next instruction. In this case P2 takes -us to the Goto at the end of the loop, which jumps back to the ListRead -at the beginning. This could have been coded to have P2 be 16, the -ListRead at the start of the loop, but the SQLite parser which generated -this code didn't make that optimization.

    -

    The Delete does the work of this -loop; it pops an integer key off the stack (placed there by the -preceding ListRead) and deletes the record of cursor P1 that has that key. -Because P2 is true, the row change counter is incremented.

    -

    The Goto jumps back to the beginning -of the loop. This is the end of the loop.

    -} - -Code { -20 ListReset 0 0 -21 Close 0 0 -22 Commit 0 0 -23 Halt 0 0 -} -puts { -

    This block of instruction cleans up the VDBE program. Three of these -instructions aren't really required, but are generated by the SQLite -parser from its code templates, which are designed to handle more -complicated cases.

    -

    The ListReset instruction empties -the temporary storage list. This list is emptied automatically when the -VDBE program terminates, so it isn't necessary in this case. The Close -instruction closes the cursor P1. Again, this is done by the VDBE -engine when it is finished running this program. The Commit ends the -current transaction successfully, and causes all changes that occurred -in this transaction to be saved to the database. The final Halt is also -unneccessary, since it is added to every VDBE program when it is -prepared to run.

    - - -

    UPDATE statements work very much like DELETE statements except -that instead of deleting the record they replace it with a new one. -Consider this example: -

    - -
    -UPDATE examp SET one= '(' || one || ')' WHERE two < 50;
    -
    - -

    Instead of deleting records where the "two" column is less than -50, this statement just puts the "one" column in parentheses -The VDBE program to implement this statement follows:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenRead 0 3 examp -5 Rewind 0 12 -6 Column 0 1 -7 Integer 50 0 50 -8 Ge 1 11 -9 Recno 0 0 -10 ListWrite 0 0 -11 Next 0 6 -12 Close 0 0 -13 Integer 0 0 -14 OpenWrite 0 3 -15 ListRewind 0 0 -16 ListRead 0 28 -17 Dup 0 0 -18 NotExists 0 16 -19 String 0 0 ( -20 Column 0 0 -21 Concat 2 0 -22 String 0 0 ) -23 Concat 2 0 -24 Column 0 1 -25 MakeRecord 2 0 -26 PutIntKey 0 1 -27 Goto 0 16 -28 ListReset 0 0 -29 Close 0 0 -30 Commit 0 0 -31 Halt 0 0 -} - -puts { -

    This program is essentially the same as the DELETE program except -that the body of the second loop has been replace by a sequence of -instructions (at addresses 17 through 26) that update the record rather -than delete it. Most of this instruction sequence should already be -familiar to you, but there are a couple of minor twists so we will go -over it briefly. Also note that the order of some of the instructions -before and after the 2nd loop has changed. This is just the way the -SQLite parser chose to output the code using a different template.

    - -

    As we enter the interior of the second loop (at instruction 17) -the stack contains a single integer which is the key of the -record we want to modify. We are going to need to use this -key twice: once to fetch the old value of the record and -a second time to write back the revised record. So the first instruction -is a Dup to make a duplicate of the key on the top of the stack. The -Dup instruction will duplicate any element of the stack, not just the top -element. You specify which element to duplication using the -P1 operand. When P1 is 0, the top of the stack is duplicated. -When P1 is 1, the next element down on the stack duplication. -And so forth.

    - -

    After duplicating the key, the next instruction, NotExists, -pops the stack once and uses the value popped as a key to -check the existence of a record in the database file. If there is no record -for this key, it jumps back to the ListRead to get another key.

    - -

    Instructions 19 through 25 construct a new database record -that will be used to replace the existing record. This is -the same kind of code that we saw -in the description of INSERT and will not be described further. -After instruction 25 executes, the stack looks like this:

    -} - -stack {(record) new data record} {(integer) key} - -puts { -

    The PutIntKey instruction (also described -during the discussion about INSERT) writes an entry into the -database file whose data is the top of the stack and whose key -is the next on the stack, and then pops the stack twice. The -PutIntKey instruction will overwrite the data of an existing record -with the same key, which is what we want here. Overwriting was not -an issue with INSERT because with INSERT the key was generated -by the NewRecno instruction which is guaranteed to provide a key -that has not been used before.

    -} - -if 0 {

    (By the way, since keys must -all be unique and each key is a 32-bit integer, a single -SQLite database table can have no more than 232 -rows. Actually, the Key instruction starts to become -very inefficient as you approach this upper bound, so it -is best to keep the number of entries below 231 -or so. Surely a couple billion records will be enough for -most applications!)

    -} - -puts { -

    CREATE and DROP

    - -

    Using CREATE or DROP to create or destroy a table or index is -really the same as doing an INSERT or DELETE from the special -"sqlite_master" table, at least from the point of view of the VDBE. -The sqlite_master table is a special table that is automatically -created for every SQLite database. It looks like this:

    - -
    -CREATE TABLE sqlite_master (
    -  type      TEXT,    -- either "table" or "index"
    -  name      TEXT,    -- name of this table or index
    -  tbl_name  TEXT,    -- for indices: name of associated table
    -  sql       TEXT     -- SQL text of the original CREATE statement
    -)
    -
    - -

    Every table (except the "sqlite_master" table itself) -and every named index in an SQLite database has an entry -in the sqlite_master table. You can query this table using -a SELECT statement just like any other table. But you are -not allowed to directly change the table using UPDATE, INSERT, -or DELETE. Changes to sqlite_master have to occur using -the CREATE and DROP commands because SQLite also has to update -some of its internal data structures when tables and indices -are added or destroyed.

    - -

    But from the point of view of the VDBE, a CREATE works -pretty much like an INSERT and a DROP works like a DELETE. -When the SQLite library opens to an existing database, -the first thing it does is a SELECT to read the "sql" -columns from all entries of the sqlite_master table. -The "sql" column contains the complete SQL text of the -CREATE statement that originally generated the index or -table. This text is fed back into the SQLite parser -and used to reconstruct the -internal data structures describing the index or table.

    - -

    Using Indexes To Speed Searching

    - -

    In the example queries above, every row of the table being -queried must be loaded off of the disk and examined, even if only -a small percentage of the rows end up in the result. This can -take a long time on a big table. To speed things up, SQLite -can use an index.

    - -

    An SQLite file associates a key with some data. For an SQLite -table, the database file is set up so that the key is an integer -and the data is the information for one row of the table. -Indices in SQLite reverse this arrangement. The index key -is (some of) the information being stored and the index data -is an integer. -To access a table row that has some particular -content, we first look up the content in the index table to find -its integer index, then we use that integer to look up the -complete record in the table.

    - -

    Note that SQLite uses b-trees, which are a sorted data structure, -so indices can be used when the WHERE clause of the SELECT statement -contains tests for equality or inequality. Queries like the following -can use an index if it is available:

    - -
    -SELECT * FROM examp WHERE two==50;
    -SELECT * FROM examp WHERE two<50;
    -SELECT * FROM examp WHERE two IN (50, 100);
    -
    - -

    If there exists an index that maps the "two" column of the "examp" -table into integers, then SQLite will use that index to find the integer -keys of all rows in examp that have a value of 50 for column two, or -all rows that are less than 50, etc. -But the following queries cannot use the index:

    - -
    -SELECT * FROM examp WHERE two%50 == 10;
    -SELECT * FROM examp WHERE two&127 == 3;
    -
    - -

    Note that the SQLite parser will not always generate code to use an -index, even if it is possible to do so. The following queries will not -currently use the index:

    - -
    -SELECT * FROM examp WHERE two+10 == 50;
    -SELECT * FROM examp WHERE two==50 OR two==100;
    -
    - -

    To understand better how indices work, lets first look at how -they are created. Let's go ahead and put an index on the two -column of the examp table. We have:

    - -
    -CREATE INDEX examp_idx1 ON examp(two);
    -
    - -

    The VDBE code generated by the above statement looks like the -following:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 178 -3 Integer 0 0 -4 OpenWrite 0 2 -5 NewRecno 0 0 -6 String 0 0 index -7 String 0 0 examp_idx1 -8 String 0 0 examp -9 CreateIndex 0 0 ptr(0x791380) -10 Dup 0 0 -11 Integer 0 0 -12 OpenWrite 1 0 -13 String 0 0 CREATE INDEX examp_idx1 ON examp(tw -14 MakeRecord 5 0 -15 PutIntKey 0 0 -16 Integer 0 0 -17 OpenRead 2 3 examp -18 Rewind 2 24 -19 Recno 2 0 -20 Column 2 1 -21 MakeIdxKey 1 0 n -22 IdxPut 1 0 indexed columns are not unique -23 Next 2 19 -24 Close 2 0 -25 Close 1 0 -26 Integer 333 0 -27 SetCookie 0 0 -28 Close 0 0 -29 Commit 0 0 -30 Halt 0 0 -} - -puts { -

    Remember that every table (except sqlite_master) and every named -index has an entry in the sqlite_master table. Since we are creating -a new index, we have to add a new entry to sqlite_master. This is -handled by instructions 3 through 15. Adding an entry to sqlite_master -works just like any other INSERT statement so we will not say anymore -about it here. In this example, we want to focus on populating the -new index with valid data, which happens on instructions 16 through -23.

    -} - -Code { -16 Integer 0 0 -17 OpenRead 2 3 examp -} -puts { -

    The first thing that happens is that we open the table being -indexed for reading. In order to construct an index for a table, -we have to know what is in that table. The index has already been -opened for writing using cursor 0 by instructions 3 and 4.

    -} - -Code { -18 Rewind 2 24 -19 Recno 2 0 -20 Column 2 1 -21 MakeIdxKey 1 0 n -22 IdxPut 1 0 indexed columns are not unique -23 Next 2 19 -} -puts { -

    Instructions 18 through 23 implement a loop over every row of the -table being indexed. For each table row, we first extract the integer -key for that row using Recno in instruction 19, then get the value of -the "two" column using Column in instruction 20. -The MakeIdxKey instruction at 21 -converts data from the "two" column (which is on the top of the stack) -into a valid index key. For an index on a single column, this is -basically a no-op. But if the P1 operand to MakeIdxKey had been -greater than one multiple entries would have been popped from the stack -and converted into a single index key. -The IdxPut instruction at 22 is what -actually creates the index entry. IdxPut pops two elements from the -stack. The top of the stack is used as a key to fetch an entry from the -index table. Then the integer which was second on stack is added to the -set of integers for that index and the new record is written back to the -database file. Note -that the same index entry can store multiple integers if there -are two or more table entries with the same value for the two -column. -

    - -

    Now let's look at how this index will be used. Consider the -following query:

    - -
    -SELECT * FROM examp WHERE two==50;
    -
    - -

    SQLite generates the following VDBE code to handle this query:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 256 -5 Integer 0 0 -6 OpenRead 1 4 examp_idx1 -7 Integer 50 0 50 -8 MakeKey 1 0 n -9 MemStore 0 0 -10 MoveTo 1 19 -11 MemLoad 0 0 -12 IdxGT 1 19 -13 IdxRecno 1 0 -14 MoveTo 0 0 -15 Column 0 0 -16 Column 0 1 -17 Callback 2 0 -18 Next 1 11 -19 Close 0 0 -20 Close 1 0 -21 Halt 0 0 -} - -puts { -

    The SELECT begins in a familiar fashion. First the column -names are initialized and the table being queried is opened. -Things become different beginning with instructions 5 and 6 where -the index file is also opened. Instructions 7 and 8 make -a key with the value of 50. -The MemStore instruction at 9 stores -the index key in VDBE memory location 0. The VDBE memory is used to -avoid having to fetch a value from deep in the stack, which can be done, -but makes the program harder to generate. The following instruction -MoveTo at address 10 pops the key off -the stack and moves the index cursor to the first row of the index with -that key. This initializes the cursor for use in the following loop.

    - -

    Instructions 11 through 18 implement a loop over all index records -with the key that was fetched by instruction 8. All of the index -records with this key will be contiguous in the index table, so we walk -through them and fetch the corresponding table key from the index. -This table key is then used to move the cursor to that row in the table. -The rest of the loop is the same as the loop for the non-indexed SELECT -query.

    - -

    The loop begins with the MemLoad -instruction at 11 which pushes a copy of the index key back onto the -stack. The instruction IdxGT at 12 -compares the key to the key in the current index record pointed to by -cursor P1. If the index key at the current cursor location is greater -than the the index we are looking for, then jump out of the loop.

    - -

    The instruction IdxRecno at 13 -pushes onto the stack the table record number from the index. The -following MoveTo pops it and moves the table cursor to that row. The -next 3 instructions select the column data the same way as in the non- -indexed case. The Column instructions fetch the column data and the -callback function is invoked. The final Next instruction advances the -index cursor, not the table cursor, to the next row, and then branches -back to the start of the loop if there are any index records left.

    - -

    Since the index is used to look up values in the table, -it is important that the index and table be kept consistent. -Now that there is an index on the examp table, we will have -to update that index whenever data is inserted, deleted, or -changed in the examp table. Remember the first example above -where we were able to insert a new row into the "examp" table using -12 VDBE instructions. Now that this table is indexed, 19 -instructions are required. The SQL statement is this:

    - -
    -INSERT INTO examp VALUES('Hello, World!',99);
    -
    - -

    And the generated code looks like this:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 Transaction 1 0 -1 Transaction 0 0 -2 VerifyCookie 0 256 -3 Integer 0 0 -4 OpenWrite 0 3 examp -5 Integer 0 0 -6 OpenWrite 1 4 examp_idx1 -7 NewRecno 0 0 -8 String 0 0 Hello, World! -9 Integer 99 0 99 -10 Dup 2 1 -11 Dup 1 1 -12 MakeIdxKey 1 0 n -13 IdxPut 1 0 -14 MakeRecord 2 0 -15 PutIntKey 0 1 -16 Close 0 0 -17 Close 1 0 -18 Commit 0 0 -19 Halt 0 0 -} - -puts { -

    At this point, you should understand the VDBE well enough to -figure out on your own how the above program works. So we will -not discuss it further in this text.

    - -

    Joins

    - -

    In a join, two or more tables are combined to generate a single -result. The result table consists of every possible combination -of rows from the tables being joined. The easiest and most natural -way to implement this is with nested loops.

    - -

    Recall the query template discussed above where there was a -single loop that searched through every record of the table. -In a join we have basically the same thing except that there -are nested loops. For example, to join two tables, the query -template might look something like this:

    - -

    -

      -
    1. Initialize the azColumnName[] array for the callback.
    2. -
    3. Open two cursors, one to each of the two tables being queried.
    4. -
    5. For each record in the first table, do: -
        -
      1. For each record in the second table do: -
          -
        1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
        2. -
        3. Compute all columns for the current row of the result.
        4. -
        5. Invoke the callback function for the current row of the result.
        6. -
      2. -
      -
    6. Close both cursors.
    7. -
    -

    - -

    This template will work, but it is likely to be slow since we -are now dealing with an O(N2) loop. But it often works -out that the WHERE clause can be factored into terms and that one or -more of those terms will involve only columns in the first table. -When this happens, we can factor part of the WHERE clause test out of -the inner loop and gain a lot of efficiency. So a better template -would be something like this:

    - -

    -

      -
    1. Initialize the azColumnName[] array for the callback.
    2. -
    3. Open two cursors, one to each of the two tables being queried.
    4. -
    5. For each record in the first table, do: -
        -
      1. Evaluate terms of the WHERE clause that only involve columns from - the first table. If any term is false (meaning that the whole - WHERE clause must be false) then skip the rest of this loop and - continue to the next record.
      2. -
      3. For each record in the second table do: -
          -
        1. If the WHERE clause evaluates to FALSE, then skip the steps that - follow and continue to the next record.
        2. -
        3. Compute all columns for the current row of the result.
        4. -
        5. Invoke the callback function for the current row of the result.
        6. -
      4. -
      -
    6. Close both cursors.
    7. -
    -

    - -

    Additional speed-up can occur if an index can be used to speed -the search of either or the two loops.

    - -

    SQLite always constructs the loops in the same order as the -tables appear in the FROM clause of the SELECT statement. The -left-most table becomes the outer loop and the right-most table -becomes the inner loop. It is possible, in theory, to reorder -the loops in some circumstances to speed the evaluation of the -join. But SQLite does not attempt this optimization.

    - -

    You can see how SQLite constructs nested loops in the following -example:

    - -
    -CREATE TABLE examp2(three int, four int);
    -SELECT * FROM examp, examp2 WHERE two<50 AND four==two;
    -
    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 examp.one -1 ColumnName 1 0 examp.two -2 ColumnName 2 0 examp2.three -3 ColumnName 3 0 examp2.four -4 Integer 0 0 -5 OpenRead 0 3 examp -6 VerifyCookie 0 909 -7 Integer 0 0 -8 OpenRead 1 5 examp2 -9 Rewind 0 24 -10 Column 0 1 -11 Integer 50 0 50 -12 Ge 1 23 -13 Rewind 1 23 -14 Column 1 1 -15 Column 0 1 -16 Ne 1 22 -17 Column 0 0 -18 Column 0 1 -19 Column 1 0 -20 Column 1 1 -21 Callback 4 0 -22 Next 1 14 -23 Next 0 10 -24 Close 0 0 -25 Close 1 0 -26 Halt 0 0 -} - -puts { -

    The outer loop over table examp is implement by instructions -7 through 23. The inner loop is instructions 13 through 22. -Notice that the "two<50" term of the WHERE expression involves -only columns from the first table and can be factored out of -the inner loop. SQLite does this and implements the "two<50" -test in instructions 10 through 12. The "four==two" test is -implement by instructions 14 through 16 in the inner loop.

    - -

    SQLite does not impose any arbitrary limits on the tables in -a join. It also allows a table to be joined with itself.

    - -

    The ORDER BY clause

    - -

    For historical reasons, and for efficiency, all sorting is currently -done in memory.

    - -

    SQLite implements the ORDER BY clause using a special -set of instructions to control an object called a sorter. In the -inner-most loop of the query, where there would normally be -a Callback instruction, instead a record is constructed that -contains both callback parameters and a key. This record -is added to the sorter (in a linked list). After the query loop -finishes, the list of records is sorted and this list is walked. For -each record on the list, the callback is invoked. Finally, the sorter -is closed and memory is deallocated.

    - -

    We can see the process in action in the following query:

    - -
    -SELECT * FROM examp ORDER BY one DESC, two;
    -
    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 one -1 ColumnName 1 0 two -2 Integer 0 0 -3 OpenRead 0 3 examp -4 VerifyCookie 0 909 -5 Rewind 0 14 -6 Column 0 0 -7 Column 0 1 -8 SortMakeRec 2 0 -9 Column 0 0 -10 Column 0 1 -11 SortMakeKey 2 0 D+ -12 SortPut 0 0 -13 Next 0 6 -14 Close 0 0 -15 Sort 0 0 -16 SortNext 0 19 -17 SortCallback 2 0 -18 Goto 0 16 -19 SortReset 0 0 -20 Halt 0 0 -} - -puts { -

    There is only one sorter object, so there are no instructions to open -or close it. It is opened automatically when needed, and it is closed -when the VDBE program halts.

    - -

    The query loop is built from instructions 5 through 13. Instructions -6 through 8 build a record that contains the azData[] values for a single -invocation of the callback. A sort key is generated by instructions -9 through 11. Instruction 12 combines the invocation record and the -sort key into a single entry and puts that entry on the sort list.

    - -

    The P3 argument of instruction 11 is of particular interest. The -sort key is formed by prepending one character from P3 to each string -and concatenating all the strings. The sort comparison function will -look at this character to determine whether the sort order is -ascending or descending, and whether to sort as a string or number. -In this example, the first column should be sorted as a string -in descending order so its prefix is "D" and the second column should -sorted numerically in ascending order so its prefix is "+". Ascending -string sorting uses "A", and descending numeric sorting uses "-".

    - -

    After the query loop ends, the table being queried is closed at -instruction 14. This is done early in order to allow other processes -or threads to access that table, if desired. The list of records -that was built up inside the query loop is sorted by the instruction -at 15. Instructions 16 through 18 walk through the record list -(which is now in sorted order) and invoke the callback once for -each record. Finally, the sorter is closed at instruction 19.

    - -

    Aggregate Functions And The GROUP BY and HAVING Clauses

    - -

    To compute aggregate functions, the VDBE implements a special -data structure and instructions for controlling that data structure. -The data structure is an unordered set of buckets, where each bucket -has a key and one or more memory locations. Within the query -loop, the GROUP BY clause is used to construct a key and the bucket -with that key is brought into focus. A new bucket is created with -the key if one did not previously exist. Once the bucket is in -focus, the memory locations of the bucket are used to accumulate -the values of the various aggregate functions. After the query -loop terminates, each bucket is visited once to generate a -single row of the results.

    - -

    An example will help to clarify this concept. Consider the -following query:

    - -
    -SELECT three, min(three+four)+avg(four) 
    -FROM examp2
    -GROUP BY three;
    -
    - - -

    The VDBE code generated for this query is as follows:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 three -1 ColumnName 1 0 min(three+four)+avg(four) -2 AggReset 0 3 -3 AggInit 0 1 ptr(0x7903a0) -4 AggInit 0 2 ptr(0x790700) -5 Integer 0 0 -6 OpenRead 0 5 examp2 -7 VerifyCookie 0 909 -8 Rewind 0 23 -9 Column 0 0 -10 MakeKey 1 0 n -11 AggFocus 0 14 -12 Column 0 0 -13 AggSet 0 0 -14 Column 0 0 -15 Column 0 1 -16 Add 0 0 -17 Integer 1 0 -18 AggFunc 0 1 ptr(0x7903a0) -19 Column 0 1 -20 Integer 2 0 -21 AggFunc 0 1 ptr(0x790700) -22 Next 0 9 -23 Close 0 0 -24 AggNext 0 31 -25 AggGet 0 0 -26 AggGet 0 1 -27 AggGet 0 2 -28 Add 0 0 -29 Callback 2 0 -30 Goto 0 24 -31 Noop 0 0 -32 Halt 0 0 -} - -puts { -

    The first instruction of interest is the -AggReset at 2. -The AggReset instruction initializes the set of buckets to be the -empty set and specifies the number of memory slots available in each -bucket as P2. In this example, each bucket will hold 3 memory slots. -It is not obvious, but if you look closely at the rest of the program -you can figure out what each of these slots is intended for.

    - -
    - - - - -
    Memory SlotIntended Use Of This Memory Slot
    0The "three" column -- the key to the bucket
    1The minimum "three+four" value
    2The sum of all "four" values. This is used to compute - "avg(four)".
    - -

    The query loop is implemented by instructions 8 through 22. -The aggregate key specified by the GROUP BY clause is computed -by instructions 9 and 10. Instruction 11 causes the appropriate -bucket to come into focus. If a bucket with the given key does -not already exists, a new bucket is created and control falls -through to instructions 12 and 13 which initialize the bucket. -If the bucket does already exist, then a jump is made to instruction -14. The values of aggregate functions are updated by the instructions -between 11 and 21. Instructions 14 through 18 update memory -slot 1 to hold the next value "min(three+four)". Then the sum of the -"four" column is updated by instructions 19 through 21.

    - -

    After the query loop is finished, the table "examp2" is closed at -instruction 23 so that its lock will be released and it can be -used by other threads or processes. The next step is to loop -over all aggregate buckets and output one row of the result for -each bucket. This is done by the loop at instructions 24 -through 30. The AggNext instruction at 24 brings the next bucket -into focus, or jumps to the end of the loop if all buckets have -been examined already. The 3 columns of the result are fetched from -the aggregator bucket in order at instructions 25 through 27. -Finally, the callback is invoked at instruction 29.

    - -

    In summary then, any query with aggregate functions is implemented -by two loops. The first loop scans the input table and computes -aggregate information into buckets and the second loop scans through -all the buckets to compute the final result.

    - -

    The realization that an aggregate query is really two consequtive -loops makes it much easier to understand the difference between -a WHERE clause and a HAVING clause in SQL query statement. The -WHERE clause is a restriction on the first loop and the HAVING -clause is a restriction on the second loop. You can see this -by adding both a WHERE and a HAVING clause to our example query:

    - - -
    -SELECT three, min(three+four)+avg(four) 
    -FROM examp2
    -WHERE three>four
    -GROUP BY three
    -HAVING avg(four)<10;
    -
    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 ColumnName 0 0 three -1 ColumnName 1 0 min(three+four)+avg(four) -2 AggReset 0 3 -3 AggInit 0 1 ptr(0x7903a0) -4 AggInit 0 2 ptr(0x790700) -5 Integer 0 0 -6 OpenRead 0 5 examp2 -7 VerifyCookie 0 909 -8 Rewind 0 26 -9 Column 0 0 -10 Column 0 1 -11 Le 1 25 -12 Column 0 0 -13 MakeKey 1 0 n -14 AggFocus 0 17 -15 Column 0 0 -16 AggSet 0 0 -17 Column 0 0 -18 Column 0 1 -19 Add 0 0 -20 Integer 1 0 -21 AggFunc 0 1 ptr(0x7903a0) -22 Column 0 1 -23 Integer 2 0 -24 AggFunc 0 1 ptr(0x790700) -25 Next 0 9 -26 Close 0 0 -27 AggNext 0 37 -28 AggGet 0 2 -29 Integer 10 0 10 -30 Ge 1 27 -31 AggGet 0 0 -32 AggGet 0 1 -33 AggGet 0 2 -34 Add 0 0 -35 Callback 2 0 -36 Goto 0 27 -37 Noop 0 0 -38 Halt 0 0 -} - -puts { -

    The code generated in this last example is the same as the -previous except for the addition of two conditional jumps used -to implement the extra WHERE and HAVING clauses. The WHERE -clause is implemented by instructions 9 through 11 in the query -loop. The HAVING clause is implemented by instruction 28 through -30 in the output loop.

    - -

    Using SELECT Statements As Terms In An Expression

    - -

    The very name "Structured Query Language" tells us that SQL should -support nested queries. And, in fact, two different kinds of nesting -are supported. Any SELECT statement that returns a single-row, single-column -result can be used as a term in an expression of another SELECT statement. -And, a SELECT statement that returns a single-column, multi-row result -can be used as the right-hand operand of the IN and NOT IN operators. -We will begin this section with an example of the first kind of nesting, -where a single-row, single-column SELECT is used as a term in an expression -of another SELECT. Here is our example:

    - -
    -SELECT * FROM examp
    -WHERE two!=(SELECT three FROM examp2
    -            WHERE four=5);
    -
    - -

    The way SQLite deals with this is to first run the inner SELECT -(the one against examp2) and store its result in a private memory -cell. SQLite then substitutes the value of this private memory -cell for the inner SELECT when it evaluates the outer SELECT. -The code looks like this:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 String 0 0 -1 MemStore 0 1 -2 Integer 0 0 -3 OpenRead 1 5 examp2 -4 VerifyCookie 0 909 -5 Rewind 1 13 -6 Column 1 1 -7 Integer 5 0 5 -8 Ne 1 12 -9 Column 1 0 -10 MemStore 0 1 -11 Goto 0 13 -12 Next 1 6 -13 Close 1 0 -14 ColumnName 0 0 one -15 ColumnName 1 0 two -16 Integer 0 0 -17 OpenRead 0 3 examp -18 Rewind 0 26 -19 Column 0 1 -20 MemLoad 0 0 -21 Eq 1 25 -22 Column 0 0 -23 Column 0 1 -24 Callback 2 0 -25 Next 0 19 -26 Close 0 0 -27 Halt 0 0 -} - -puts { -

    The private memory cell is initialized to NULL by the first -two instructions. Instructions 2 through 13 implement the inner -SELECT statement against the examp2 table. Notice that instead of -sending the result to a callback or storing the result on a sorter, -the result of the query is pushed into the memory cell by instruction -10 and the loop is abandoned by the jump at instruction 11. -The jump at instruction at 11 is vestigial and never executes.

    - -

    The outer SELECT is implemented by instructions 14 through 25. -In particular, the WHERE clause that contains the nested select -is implemented by instructions 19 through 21. You can see that -the result of the inner select is loaded onto the stack by instruction -20 and used by the conditional jump at 21.

    - -

    When the result of a sub-select is a scalar, a single private memory -cell can be used, as shown in the previous -example. But when the result of a sub-select is a vector, such -as when the sub-select is the right-hand operand of IN or NOT IN, -a different approach is needed. In this case, -the result of the sub-select is -stored in a transient table and the contents of that table -are tested using the Found or NotFound operators. Consider this -example:

    - -
    -SELECT * FROM examp
    -WHERE two IN (SELECT three FROM examp2);
    -
    - -

    The code generated to implement this last query is as follows:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 OpenTemp 1 1 -1 Integer 0 0 -2 OpenRead 2 5 examp2 -3 VerifyCookie 0 909 -4 Rewind 2 10 -5 Column 2 0 -6 IsNull -1 9 -7 String 0 0 -8 PutStrKey 1 0 -9 Next 2 5 -10 Close 2 0 -11 ColumnName 0 0 one -12 ColumnName 1 0 two -13 Integer 0 0 -14 OpenRead 0 3 examp -15 Rewind 0 25 -16 Column 0 1 -17 NotNull -1 20 -18 Pop 1 0 -19 Goto 0 24 -20 NotFound 1 24 -21 Column 0 0 -22 Column 0 1 -23 Callback 2 0 -24 Next 0 16 -25 Close 0 0 -26 Halt 0 0 -} - -puts { -

    The transient table in which the results of the inner SELECT are -stored is created by the OpenTemp -instruction at 0. This opcode is used for tables that exist for the -duration of a single SQL statement only. The transient cursor is always -opened read/write even if the main database is read-only. The transient -table is deleted automatically when the cursor is closed. The P2 value -of 1 means the cursor points to a BTree index, which has no data but can -have an arbitrary key.

    - -

    The inner SELECT statement is implemented by instructions 1 through 10. -All this code does is make an entry in the temporary table for each -row of the examp2 table with a non-NULL value for the "three" column. -The key for each temporary table entry is the "three" column of examp2 -and the data is an empty string since it is never used.

    - -

    The outer SELECT is implemented by instructions 11 through 25. In -particular, the WHERE clause containing the IN operator is implemented -by instructions at 16, 17, and 20. Instruction 16 pushes the value of -the "two" column for the current row onto the stack and instruction 17 -checks to see that it is non-NULL. If this is successful, execution -jumps to 20, where it tests to see if top of the stack matches any key -in the temporary table. The rest of the code is the same as what has -been shown before.

    - -

    Compound SELECT Statements

    - -

    SQLite also allows two or more SELECT statements to be joined as -peers using operators UNION, UNION ALL, INTERSECT, and EXCEPT. These -compound select statements are implemented using transient tables. -The implementation is slightly different for each operator, but the -basic ideas are the same. For an example we will use the EXCEPT -operator.

    - -
    -SELECT two FROM examp
    -EXCEPT
    -SELECT four FROM examp2;
    -
    - -

    The result of this last example should be every unique value -of the "two" column in the examp table, except any value that is -in the "four" column of examp2 is removed. The code to implement -this query is as follows:

    -} - -Code { -addr opcode p1 p2 p3 ----- ------------ ----- ----- ----------------------------------- -0 OpenTemp 0 1 -1 KeyAsData 0 1 -2 Integer 0 0 -3 OpenRead 1 3 examp -4 VerifyCookie 0 909 -5 Rewind 1 11 -6 Column 1 1 -7 MakeRecord 1 0 -8 String 0 0 -9 PutStrKey 0 0 -10 Next 1 6 -11 Close 1 0 -12 Integer 0 0 -13 OpenRead 2 5 examp2 -14 Rewind 2 20 -15 Column 2 1 -16 MakeRecord 1 0 -17 NotFound 0 19 -18 Delete 0 0 -19 Next 2 15 -20 Close 2 0 -21 ColumnName 0 0 four -22 Rewind 0 26 -23 Column 0 0 -24 Callback 1 0 -25 Next 0 23 -26 Close 0 0 -27 Halt 0 0 -} - -puts { -

    The transient table in which the result is built is created by -instruction 0. Three loops then follow. The loop at instructions -5 through 10 implements the first SELECT statement. The second -SELECT statement is implemented by the loop at instructions 14 through -19. Finally, a loop at instructions 22 through 25 reads the transient -table and invokes the callback once for each row in the result.

    - -

    Instruction 1 is of particular importance in this example. Normally, -the Column instruction extracts the value of a column from a larger -record in the data of an SQLite file entry. Instruction 1 sets a flag on -the transient table so that Column will instead treat the key of the -SQLite file entry as if it were data and extract column information from -the key.

    - -

    Here is what is going to happen: The first SELECT statement -will construct rows of the result and save each row as the key of -an entry in the transient table. The data for each entry in the -transient table is a never used so we fill it in with an empty string. -The second SELECT statement also constructs rows, but the rows -constructed by the second SELECT are removed from the transient table. -That is why we want the rows to be stored in the key of the SQLite file -instead of in the data -- so they can be easily located and deleted.

    - -

    Let's look more closely at what is happening here. The first -SELECT is implemented by the loop at instructions 5 through 10. -Instruction 5 intializes the loop by rewinding its cursor. -Instruction 6 extracts the value of the "two" column from "examp" -and instruction 7 converts this into a row. Instruction 8 pushes -an empty string onto the stack. Finally, instruction 9 writes the -row into the temporary table. But remember, the PutStrKey opcode uses -the top of the stack as the record data and the next on stack as the -key. For an INSERT statement, the row generated by the -MakeRecord opcode is the record data and the record key is an integer -created by the NewRecno opcode. But here the roles are reversed and -the row created by MakeRecord is the record key and the record data is -just an empty string.

    - -

    The second SELECT is implemented by instructions 14 through 19. -Instruction 14 intializes the loop by rewinding its cursor. -A new result row is created from the "four" column of table "examp2" -by instructions 15 and 16. But instead of using PutStrKey to write this -new row into the temporary table, we instead call Delete to remove -it from the temporary table if it exists.

    - -

    The result of the compound select is sent to the callback routine -by the loop at instructions 22 through 25. There is nothing new -or remarkable about this loop, except for the fact that the Column -instruction at 23 will be extracting a column out of the record key -rather than the record data.

    - -

    Summary

    - -

    This article has reviewed all of the major techniques used by -SQLite's VDBE to implement SQL statements. What has not been shown -is that most of these techniques can be used in combination to -generate code for an appropriately complex query statement. For -example, we have shown how sorting is accomplished on a simple query -and we have shown how to implement a compound query. But we did -not give an example of sorting in a compound query. This is because -sorting a compound query does not introduce any new concepts: it -merely combines two previous ideas (sorting and compounding) -in the same VDBE program.

    - -

    For additional information on how the SQLite library -functions, the reader is directed to look at the SQLite source -code directly. If you understand the material in this article, -you should not have much difficulty in following the sources. -Serious students of the internals of SQLite will probably -also what to make a careful study of the VDBE opcodes -as documented here. Most of the -opcode documentation is extracted from comments in the source -code using a script so you can also get information about the -various opcodes directly from the vdbe.c source file. -If you have successfully read this far, you should have little -difficulty understanding the rest.

    - -

    If you find errors in either the documentation or the code, -feel free to fix them and/or contact the author at -drh@hwaci.com. Your bug fixes or -suggestions are always welcomed.

    -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/version3.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/version3.html --- sqlite3-3.4.2/www/version3.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/version3.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,362 @@ + + +SQLite Version 3 Overview + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Version 3 Overview

    + +

    +SQLite version 3.0 introduces important changes to the library, including: +

    + +
      +
    • A more compact format for database files.
    • +
    • Manifest typing and BLOB support.
    • +
    • Support for both UTF-8 and UTF-16 text.
    • +
    • User-defined text collating sequences.
    • +
    • 64-bit ROWIDs.
    • +
    • Improved Concurrency.
    • +
    + +

    +This document is a quick introduction to the changes for SQLite 3.0 +for users who are already familiar with SQLite version 2.8. +

    + +

    Naming Changes

    + +

    +SQLite version 2.8 will continue to be supported with bug fixes +for the foreseeable future. In order to allow SQLite version 2.8 +and SQLite version 3.0 to peacefully coexist, the names of key files +and APIs in SQLite version 3.0 have been changed to include the +character "3". For example, the include file used by C programs +has been changed from "sqlite.h" to "sqlite3.h". And the name of +the shell program used to interact with databases has been changed +from "sqlite.exe" to "sqlite3.exe". With these changes, it is possible +to have both SQLite 2.8 and SQLite 3.0 installed on the same system at +the same time. And it is possible for the same C program to link +against both SQLite 2.8 and SQLite 3.0 at the same time and to use +both libraries at the same time. +

    + +

    New File Format

    + +

    +The format used by SQLite database files has been completely revised. +The old version 2.1 format and the new 3.0 format are incompatible with +one another. Version 2.8 of SQLite will not read a version 3.0 database +files and version 3.0 of SQLite will not read a version 2.8 database file. +

    + +

    +To convert an SQLite 2.8 database into an SQLite 3.0 database, have +ready the command-line shells for both version 2.8 and 3.0. Then +enter a command like the following: +

    + +
    +sqlite OLD.DB .dump | sqlite3 NEW.DB
    +
    + +

    +The new database file format uses B+trees for tables. In a B+tree, all +data is stored in the leaves of the tree instead of in both the leaves and +the intermediate branch nodes. The use of B+trees for tables allows for +better scalability and the storage of larger data fields without the use of +overflow pages. Traditional B-trees are still used for indices.

    + +

    +The new file format also supports variable pages sizes between 512 and +32768 bytes. The size of a page is stored in the file header so the +same library can read databases with different pages sizes, in theory, +though this feature has not yet been implemented in practice. +

    + +

    +The new file format omits unused fields from its disk images. For example, +indices use only the key part of a B-tree record and not the data. So +for indices, the field that records the length of the data is omitted. +Integer values such as the length of key and data are stored using +a variable-length encoding so that only one or two bytes are required to +store the most common cases but up to 64-bits of information can be encoded +if needed. +Integer and floating point data is stored on the disk in binary rather +than being converted into ASCII as in SQLite version 2.8. +These changes taken together result in database files that are typically +25% to 35% smaller than the equivalent files in SQLite version 2.8. +

    + +

    +Details of the low-level B-tree format used in SQLite version 3.0 can +be found in header comments to the +btree.c +source file. +

    + +

    Manifest Typing and BLOB Support

    + +

    +SQLite version 2.8 will deal with data in various formats internally, +but when writing to the disk or interacting through its API, SQLite 2.8 +always converts data into ASCII text. SQLite 3.0, in contrast, exposes +its internal data representations to the user and stores binary representations +to disk when appropriate. The exposing of non-ASCII representations was +added in order to support BLOBs. +

    + +

    +SQLite version 2.8 had the feature that any type of data could be stored +in any table column regardless of the declared type of that column. This +feature is retained in version 3.0, though in a slightly modified form. +Each table column will store any type of data, though columns have an +affinity for the format of data defined by their declared datatype. +When data is inserted into a column, that column will make at attempt +to convert the data format into the columns declared type. All SQL +database engines do this. The difference is that SQLite 3.0 will +still store the data even if a format conversion is not possible. +

    + +

    +For example, if you have a table column declared to be of type "INTEGER" +and you try to insert a string, the column will look at the text string +and see if it looks like a number. If the string does look like a number +it is converted into a number and into an integer if the number does not +have a fractional part, and stored that way. But if the string is not +a well-formed number it is still stored as a string. A column with a +type of "TEXT" tries to convert numbers into an ASCII-Text representation +before storing them. But BLOBs are stored in TEXT columns as BLOBs because +you cannot in general convert a BLOB into text. +

    + +

    +In most other SQL database engines the datatype is associated with +the table column that holds the data - with the data container. +In SQLite 3.0, the datatype is associated with the data itself, not +with its container. +Paul Graham in his book +ANSI Common Lisp +calls this property "Manifest Typing". +Other writers have other definitions for the term "manifest typing", +so beware of confusion. But by whatever name, that is the datatype +model supported by SQLite 3.0. +

    + +

    +Additional information about datatypes in SQLite version 3.0 is +available +separately. +

    + +

    Support for UTF-8 and UTF-16

    + +

    +The new API for SQLite 3.0 contains routines that accept text as +both UTF-8 and UTF-16 in the native byte order of the host machine. +Each database file manages text as either UTF-8, UTF-16BE (big-endian), +or UTF-16LE (little-endian). Internally and in the disk file, the +same text representation is used everywhere. If the text representation +specified by the database file (in the file header) does not match +the text representation required by the interface routines, then text +is converted on-the-fly. +Constantly converting text from one representation to another can be +computationally expensive, so it is suggested that programmers choose a +single representation and stick with it throughout their application. +

    + +

    +In the current implementation of SQLite, the SQL parser only works +with UTF-8 text. So if you supply UTF-16 text it will be converted. +This is just an implementation issue and there is nothing to prevent +future versions of SQLite from parsing UTF-16 encoded SQL natively. +

    + +

    +When creating new user-defined SQL functions and collating sequences, +each function or collating sequence can specify it if works with +UTF-8, UTF-16be, or UTF-16le. Separate implementations can be registered +for each encoding. If an SQL function or collating sequences is required +but a version for the current text encoding is not available, then +the text is automatically converted. As before, this conversion takes +computation time, so programmers are advised to pick a single +encoding and stick with it in order to minimize the amount of unnecessary +format juggling. +

    + +

    +SQLite is not particular about the text it receives and is more than +happy to process text strings that are not normalized or even +well-formed UTF-8 or UTF-16. Thus, programmers who want to store +IS08859 data can do so using the UTF-8 interfaces. As long as no +attempts are made to use a UTF-16 collating sequence or SQL function, +the byte sequence of the text will not be modified in any way. +

    + +

    User-defined Collating Sequences

    + +

    +A collating sequence is just a defined order for text. When SQLite 3.0 +sorts (or uses a comparison operator like "<" or ">=") the sort +order is first determined by the data type. +

    + +
      +
    • NULLs sort first
    • +
    • Numeric values sort next in numerical order
    • +
    • Text values come after numerics
    • +
    • BLOBs sort last
    • +
    + +

    +Collating sequences are used for comparing two text strings. +The collating sequence does not change the ordering of NULLs, numbers, +or BLOBs, only text. +

    + +

    +A collating sequence is implemented as a function that takes the +two strings being compared as inputs and returns negative, zero, or +positive if the first string is less than, equal to, or greater than +the second. +SQLite 3.0 comes with a single built-in collating sequence named "BINARY" +which is implemented using the memcmp() routine from the standard C library. +The BINARY collating sequence works well for English text. For other +languages or locales, alternative collating sequences may be preferred. +

    + +

    +The decision of which collating sequence to use is controlled by the +COLLATE clause in SQL. A COLLATE clause can occur on a table definition, +to define a default collating sequence to a table column, or on field +of an index, or in the ORDER BY clause of a SELECT statement. +Planned enhancements to SQLite are to include standard CAST() syntax +to allow the collating sequence of an expression to be defined. +

    + +

    64-bit ROWIDs

    + +

    +Every row of a table has a unique rowid. +If the table defines a column with the type "INTEGER PRIMARY KEY" then that +column becomes an alias for the rowid. But with or without an INTEGER PRIMARY +KEY column, every row still has a rowid. +

    + +

    +In SQLite version 3.0, the rowid is a 64-bit signed integer. +This is an expansion of SQLite version 2.8 which only permitted +rowids of 32-bits. +

    + +

    +To minimize storage space, the 64-bit rowid is stored as a variable length +integer. Rowids between 0 and 127 use only a single byte. +Rowids between 0 and 16383 use just 2 bytes. Up to 2097152 uses three +bytes. And so forth. Negative rowids are allowed but they always use +nine bytes of storage and so their use is discouraged. When rowids +are generated automatically by SQLite, they will always be non-negative. +

    + +

    Improved Concurrency

    + +

    +SQLite version 2.8 allowed multiple simultaneous readers or a single +writer but not both. SQLite version 3.0 allows one process to begin +writing the database while other processes continue to read. The +writer must still obtain an exclusive lock on the database for a brief +interval in order to commit its changes, but the exclusive lock is no +longer required for the entire write operation. +A more detailed report on the locking +behavior of SQLite version 3.0 is available separately. +

    + +

    +A limited form of table-level locking is now also available in SQLite. +If each table is stored in a separate database file, those separate +files can be attached to the main database (using the ATTACH command) +and the combined databases will function as one. But locks will only +be acquired on individual files as needed. So if you redefine "database" +to mean two or more database files, then it is entirely possible for +two processes to be writing to the same database at the same time. +To further support this capability, commits of transactions involving +two or more ATTACHed database are now atomic. +

    + +

    Credits

    + +

    +SQLite version 3.0 is made possible in part by AOL developers +supporting and embracing great Open-Source Software. +

    +
    +This page last modified 2008/06/26 02:51:28 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/version3.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/version3.tcl --- sqlite3-3.4.2/www/version3.tcl 2007-03-29 19:39:37.000000000 +0100 +++ sqlite3-3.6.16/www/version3.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,293 +0,0 @@ -#!/usr/bin/tclsh -source common.tcl -header {SQLite Version 3 Overview} -puts { -

    SQLite Version 3 Overview

    - -

    -SQLite version 3.0 introduces important changes to the library, including: -

    - -
      -
    • A more compact format for database files.
    • -
    • Manifest typing and BLOB support.
    • -
    • Support for both UTF-8 and UTF-16 text.
    • -
    • User-defined text collating sequences.
    • -
    • 64-bit ROWIDs.
    • -
    • Improved Concurrency.
    • -
    - -

    -This document is a quick introduction to the changes for SQLite 3.0 -for users who are already familiar with SQLite version 2.8. -

    - -

    Naming Changes

    - -

    -SQLite version 2.8 will continue to be supported with bug fixes -for the foreseeable future. In order to allow SQLite version 2.8 -and SQLite version 3.0 to peacefully coexist, the names of key files -and APIs in SQLite version 3.0 have been changed to include the -character "3". For example, the include file used by C programs -has been changed from "sqlite.h" to "sqlite3.h". And the name of -the shell program used to interact with databases has been changed -from "sqlite.exe" to "sqlite3.exe". With these changes, it is possible -to have both SQLite 2.8 and SQLite 3.0 installed on the same system at -the same time. And it is possible for the same C program to link -against both SQLite 2.8 and SQLite 3.0 at the same time and to use -both libraries at the same time. -

    - -

    New File Format

    - -

    -The format used by SQLite database files has been completely revised. -The old version 2.1 format and the new 3.0 format are incompatible with -one another. Version 2.8 of SQLite will not read a version 3.0 database -files and version 3.0 of SQLite will not read a version 2.8 database file. -

    - -

    -To convert an SQLite 2.8 database into an SQLite 3.0 database, have -ready the command-line shells for both version 2.8 and 3.0. Then -enter a command like the following: -

    - -
    -sqlite OLD.DB .dump | sqlite3 NEW.DB
    -
    - -

    -The new database file format uses B+trees for tables. In a B+tree, all -data is stored in the leaves of the tree instead of in both the leaves and -the intermediate branch nodes. The use of B+trees for tables allows for -better scalability and the storage of larger data fields without the use of -overflow pages. Traditional B-trees are still used for indices.

    - -

    -The new file format also supports variable pages sizes between 512 and -32768 bytes. The size of a page is stored in the file header so the -same library can read databases with different pages sizes, in theory, -though this feature has not yet been implemented in practice. -

    - -

    -The new file format omits unused fields from its disk images. For example, -indices use only the key part of a B-tree record and not the data. So -for indices, the field that records the length of the data is omitted. -Integer values such as the length of key and data are stored using -a variable-length encoding so that only one or two bytes are required to -store the most common cases but up to 64-bits of information can be encoded -if needed. -Integer and floating point data is stored on the disk in binary rather -than being converted into ASCII as in SQLite version 2.8. -These changes taken together result in database files that are typically -25% to 35% smaller than the equivalent files in SQLite version 2.8. -

    - -

    -Details of the low-level B-tree format used in SQLite version 3.0 can -be found in header comments to the -btree.c -source file. -

    - -

    Manifest Typing and BLOB Support

    - -

    -SQLite version 2.8 will deal with data in various formats internally, -but when writing to the disk or interacting through its API, SQLite 2.8 -always converts data into ASCII text. SQLite 3.0, in contrast, exposes -its internal data representations to the user and stores binary representations -to disk when appropriate. The exposing of non-ASCII representations was -added in order to support BLOBs. -

    - -

    -SQLite version 2.8 had the feature that any type of data could be stored -in any table column regardless of the declared type of that column. This -feature is retained in version 3.0, though in a slightly modified form. -Each table column will store any type of data, though columns have an -affinity for the format of data defined by their declared datatype. -When data is inserted into a column, that column will make at attempt -to convert the data format into the columns declared type. All SQL -database engines do this. The difference is that SQLite 3.0 will -still store the data even if a format conversion is not possible. -

    - -

    -For example, if you have a table column declared to be of type "INTEGER" -and you try to insert a string, the column will look at the text string -and see if it looks like a number. If the string does look like a number -it is converted into a number and into an integer if the number does not -have a fractional part, and stored that way. But if the string is not -a well-formed number it is still stored as a string. A column with a -type of "TEXT" tries to convert numbers into an ASCII-Text representation -before storing them. But BLOBs are stored in TEXT columns as BLOBs because -you cannot in general convert a BLOB into text. -

    - -

    -In most other SQL database engines the datatype is associated with -the table column that holds the data - with the data container. -In SQLite 3.0, the datatype is associated with the data itself, not -with its container. -Paul Graham in his book -ANSI Common Lisp -calls this property "Manifest Typing". -Other writers have other definitions for the term "manifest typing", -so beware of confusion. But by whatever name, that is the datatype -model supported by SQLite 3.0. -

    - -

    -Additional information about datatypes in SQLite version 3.0 is -available -separately. -

    - -

    Support for UTF-8 and UTF-16

    - -

    -The new API for SQLite 3.0 contains routines that accept text as -both UTF-8 and UTF-16 in the native byte order of the host machine. -Each database file manages text as either UTF-8, UTF-16BE (big-endian), -or UTF-16LE (little-endian). Internally and in the disk file, the -same text representation is used everywhere. If the text representation -specified by the database file (in the file header) does not match -the text representation required by the interface routines, then text -is converted on-the-fly. -Constantly converting text from one representation to another can be -computationally expensive, so it is suggested that programmers choose a -single representation and stick with it throughout their application. -

    - -

    -In the current implementation of SQLite, the SQL parser only works -with UTF-8 text. So if you supply UTF-16 text it will be converted. -This is just an implementation issue and there is nothing to prevent -future versions of SQLite from parsing UTF-16 encoded SQL natively. -

    - -

    -When creating new user-defined SQL functions and collating sequences, -each function or collating sequence can specify it if works with -UTF-8, UTF-16be, or UTF-16le. Separate implementations can be registered -for each encoding. If an SQL function or collating sequences is required -but a version for the current text encoding is not available, then -the text is automatically converted. As before, this conversion takes -computation time, so programmers are advised to pick a single -encoding and stick with it in order to minimize the amount of unnecessary -format juggling. -

    - -

    -SQLite is not particular about the text it receives and is more than -happy to process text strings that are not normalized or even -well-formed UTF-8 or UTF-16. Thus, programmers who want to store -IS08859 data can do so using the UTF-8 interfaces. As long as no -attempts are made to use a UTF-16 collating sequence or SQL function, -the byte sequence of the text will not be modified in any way. -

    - -

    User-defined Collating Sequences

    - -

    -A collating sequence is just a defined order for text. When SQLite 3.0 -sorts (or uses a comparison operator like "<" or ">=") the sort order -is first determined by the data type. -

    - -
      -
    • NULLs sort first
    • -
    • Numeric values sort next in numerical order
    • -
    • Text values come after numerics
    • -
    • BLOBs sort last
    • -
    - -

    -Collating sequences are used for comparing two text strings. -The collating sequence does not change the ordering of NULLs, numbers, -or BLOBs, only text. -

    - -

    -A collating sequence is implemented as a function that takes the -two strings being compared as inputs and returns negative, zero, or -positive if the first string is less than, equal to, or greater than -the second. -SQLite 3.0 comes with a single built-in collating sequence named "BINARY" -which is implemented using the memcmp() routine from the standard C library. -The BINARY collating sequence works well for English text. For other -languages or locales, alternative collating sequences may be preferred. -

    - -

    -The decision of which collating sequence to use is controlled by the -COLLATE clause in SQL. A COLLATE clause can occur on a table definition, -to define a default collating sequence to a table column, or on field -of an index, or in the ORDER BY clause of a SELECT statement. -Planned enhancements to SQLite are to include standard CAST() syntax -to allow the collating sequence of an expression to be defined. -

    - -

    64-bit ROWIDs

    - -

    -Every row of a table has a unique rowid. -If the table defines a column with the type "INTEGER PRIMARY KEY" then that -column becomes an alias for the rowid. But with or without an INTEGER PRIMARY -KEY column, every row still has a rowid. -

    - -

    -In SQLite version 3.0, the rowid is a 64-bit signed integer. -This is an expansion of SQLite version 2.8 which only permitted -rowids of 32-bits. -

    - -

    -To minimize storage space, the 64-bit rowid is stored as a variable length -integer. Rowids between 0 and 127 use only a single byte. -Rowids between 0 and 16383 use just 2 bytes. Up to 2097152 uses three -bytes. And so forth. Negative rowids are allowed but they always use -nine bytes of storage and so their use is discouraged. When rowids -are generated automatically by SQLite, they will always be non-negative. -

    - -

    Improved Concurrency

    - -

    -SQLite version 2.8 allowed multiple simultaneous readers or a single -writer but not both. SQLite version 3.0 allows one process to begin -writing the database while other processes continue to read. The -writer must still obtain an exclusive lock on the database for a brief -interval in order to commit its changes, but the exclusive lock is no -longer required for the entire write operation. -A more detailed report on the locking -behavior of SQLite version 3.0 is available separately. -

    - -

    -A limited form of table-level locking is now also available in SQLite. -If each table is stored in a separate database file, those separate -files can be attached to the main database (using the ATTACH command) -and the combined databases will function as one. But locks will only -be acquired on individual files as needed. So if you redefine "database" -to mean two or more database files, then it is entirely possible for -two processes to be writing to the same database at the same time. -To further support this capability, commits of transactions involving -two or more ATTACHed database are now atomic. -

    - -

    Credits

    - -

    -SQLite version 3.0 is made possible in part by AOL developers -supporting and embracing great Open-Source Software. -

    - - -} -footer {$Id: version3.tcl,v 1.6 2006/03/03 21:39:54 drh Exp $} diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/vtab.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/vtab.html --- sqlite3-3.4.2/www/vtab.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/vtab.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,1155 @@ + + +The Virtual Table Mechanism Of SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    The Virtual Table Mechanism Of SQLite

    + + +

    1.0 Introduction

    + +

    A virtual table is an object that is registered with an open SQLite +database connection. From the perspective of an SQL statement, +the virtual table object looks like any other table or view. +But behind the scenes, queries from and updates to a virtual table +invoke callback methods on the virtual table object instead of +reading and writing to the database file. + +

    The virtual table mechanism allows an application to publish +interfaces that are accessible from SQL statements as if they were +tables. SQL statements can in general do anything to a +virtual table that they can do to a real table, with the following +exceptions: + +

    +

      +
    • One cannot create a trigger on a virtual table. +
    • One cannot create additional indices on a virtual table. + (Virtual tables can have indices but that must be built into + the virtual table implementation. Indices cannot be added + separately using CREATE INDEX statements.) +
    • One cannot run ALTER TABLE ... ADD COLUMN + commands against a virtual table. +
    • Virtual tables cannot be used in a database that makes use of + the shared cache feature. +
    + +

    Particular virtual table implementations might impose additional +constraints. For example, some virtual implementations might provide +read-only tables. Or some virtual table implementations might allow +INSERT or DELETE but not UPDATE. Or some virtual table implementations +might limit the kinds of UPDATEs that can be made. + +

    A virtual table might represent an in-memory data structures. +Or it might represent a view of data on disk that is not in the +SQLite format. Or the application might compute the content of the +virtual table on demand. + +

    Here are some postulated uses for virtual tables: + +

      +
    • A full-text search interface +
    • Spatial indices using R-Trees +
    • Read and/or write the content of a comma-separated value (CSV) + file +
    • Access to the filesystem of the host computer +
    • Enabling SQL manipulation of data in statistics packages like R +
    + +

    1.1 Usage

    + +

    A virtual table is created using using a CREATE VIRTUAL TABLE statement. +This statement creates a table with a particular name and associates the +table with a "module". + +

    +   CREATE VIRTUAL TABLE tablename USING modulename;
    +
    + +

    One can also provide comma-separated arguments to the module following +the module name: + +

    +   CREATE VIRTUAL TABLE tablename USING modulename(arg1, arg2, ...);
    +
    + +

    The format of the arguments to the module is very general. Each argument +can consist of keywords, string literals, identifiers, numbers, and +punctuation. The arguments are passed as written (as text) into the +constructor method of the virtual table implementation +when the virtual +table is created and that constructor is responsible for parsing and +interpreting the arguments. The argument syntax is sufficiently general +that a virtual table implementation can, if it wants to, interpret its +arguments as column definitions in an ordinary CREATE TABLE statement. +The implementation could also impose some other interpretation on the +arguments. + +

    Once a virtual table has been created, it can be used like any other +table with the exceptions noted above and imposed by specific virtual +table implementations. A virtual table is destroyed using the ordinary +DROP TABLE syntax. + +

    2.2 Implementation

    + +

    Several new C-level objects are used by the virtual table implementation: + +

    +  typedef struct sqlite3_vtab sqlite3_vtab;
    +  typedef struct sqlite3_index_info sqlite3_index_info;
    +  typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor;
    +  typedef struct sqlite3_module sqlite3_module;
    +
    + +

    The sqlite3_module structure defines a module object used to implement +a virtual table. Think of a module as a class from which one can +construct multiple virtual tables having similar properties. For example, +one might have a module that provides read-only access to +comma-separated-value (CSV) files on disk. That one module can then be +used to create several virtual tables where each virtual table refers +to a different CSV file. + +

    The module structure contains methods that are invoked by SQLite to +perform various actions on the virtual table such as creating new +instances of a virtual table or destroying old ones, reading and +writing data, searching for and deleting, updating, or inserting rows. +The module structure is explained in more detail below. + +

    Each virtual table instance is represented by an sqlite3_vtab structure. +The sqlite3_vtab structure looks like this: + +

    +  struct sqlite3_vtab {
    +    const sqlite3_module *pModule;
    +    int nRef;
    +    char *zErrMsg;
    +  };
    +
    + +

    Virtual table implementations will normally subclass this structure +to add additional private and implementation-specific fields. +The nRef field is used internally by the SQLite core and should not +be altered by the virtual table implementation. The virtual table +implementation may pass error message text to the core by putting +an error message string in zErrMsg. +Space to hold this error message string must be obtained from an +SQLite memory allocation function such as sqlite3_mprintf() or +sqlite3_malloc(). +Prior to assigning a new value to zErrMsg, the virtual table +implementation must free any preexisting content of zErrMsg using +sqlite3_free(). Failure to do this will result in a memory leak. +The SQLite core will free and zero the content of zErrMsg when it +delivers the error message text to the client application or when +it destroys the virtual table. The virtual table implementation only +needs to worry about freeing the zErrMsg content when it overwrites +the content with a new, different error message. + +

    The sqlite3_vtab_cursor structure represents a pointer to a specific +row of a virtual table. This is what an sqlite3_vtab_cursor looks like: + +

    +  struct sqlite3_vtab_cursor {
    +    sqlite3_vtab *pVtab;
    +  };
    +
    + +

    Once again, practical implementations will likely subclass this +structure to add additional private fields. + +

    The sqlite3_index_info structure is used to pass information into +and out of the xBestIndex method of the module that implements a +virtual table. + +

    Before a CREATE VIRTUAL TABLE statement can be run, the module +specified in that statement must be registered with the database +connection. This is accomplished using either of the sqlite3_create_module() +or sqlite3_create_module_v2() interfaces: + +

    +  int sqlite3_create_module(
    +    sqlite3 *db,               /* SQLite connection to register module with */
    +    const char *zName,         /* Name of the module */
    +    const sqlite3_module *,    /* Methods for the module */
    +    void *                     /* Client data for xCreate/xConnect */
    +  );
    +  int sqlite3_create_module_v2(
    +    sqlite3 *db,               /* SQLite connection to register module with */
    +    const char *zName,         /* Name of the module */
    +    const sqlite3_module *,    /* Methods for the module */
    +    void *,                    /* Client data for xCreate/xConnect */
    +    void(*xDestroy)(void*)     /* Client data destructor function */
    +  );
    +
    + +

    The sqlite3_create_module() and sqlite3_create_module_v2() +routines associates a module name with +an sqlite3_module structure and a separate client data that is specific +to each module. The only difference between the two create_module methods +is that the _v2 method includes an extra parameter that specifies a +destructor for client data pointer. The module structure is what defines +the behavior of a virtual table. The module structure looks like this: + +

      
    +  struct sqlite3_module {
    +    int iVersion;
    +    int (*xCreate)(sqlite3*, void *pAux,
    +                 int argc, char **argv,
    +                 sqlite3_vtab **ppVTab,
    +                 char **pzErr);
    +    int (*xConnect)(sqlite3*, void *pAux,
    +                 int argc, char **argv,
    +                 sqlite3_vtab **ppVTab,
    +                 char **pzErr);
    +    int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
    +    int (*xDisconnect)(sqlite3_vtab *pVTab);
    +    int (*xDestroy)(sqlite3_vtab *pVTab);
    +    int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
    +    int (*xClose)(sqlite3_vtab_cursor*);
    +    int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
    +                  int argc, sqlite3_value **argv);
    +    int (*xNext)(sqlite3_vtab_cursor*);
    +    int (*xEof)(sqlite3_vtab_cursor*);
    +    int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
    +    int (*xRowid)(sqlite3_vtab_cursor*, sqlite_int64 *pRowid);
    +    int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite_int64 *);
    +    int (*xBegin)(sqlite3_vtab *pVTab);
    +    int (*xSync)(sqlite3_vtab *pVTab);
    +    int (*xCommit)(sqlite3_vtab *pVTab);
    +    int (*xRollback)(sqlite3_vtab *pVTab);
    +    int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
    +                       void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
    +                       void **ppArg);
    +    int (*Rename)(sqlite3_vtab *pVtab, const char *zNew);
    +  };
    +
    + +

    The module structure defines all of the methods for each virtual +table object. The module structure also contains the iVersion field which +defines the particular edition of the module table structure. Currently, +iVersion is always 1, but in future releases of SQLite the module structure +definition might be extended with additional methods and in that case +the iVersion value will be increased. + +

    The rest of the module structure consists of methods used to implement +various features of the virtual table. Details on what each of these +methods do are provided in the sequel. + +

    1.3 Virtual Tables And Shared Cache

    + +

    The virtual table mechanism assumes that each database connection keeps +its own copy of the database schema. Hence, the virtual table mechanism +cannot be used in a database that has shared cache enabled. +The sqlite3_create_module() interface will return an error if +the shared cache feature is enabled. + +

    1.4 Creating New Virtual Table Implementations

    + +

    Follow these steps to create your own virtual table: + +

    +

      +
    1. Write all necessary methods. +
    2. Create an instance of the sqlite3_module structure containing pointers + to all the methods from step 1. +
    3. Register your sqlite3_module structure using one of the + sqlite3_create_module() or sqlite3_create_module_v2() interfaces. +
    4. Run a CREATE VIRTUAL TABLE command that specifies the new module in + the USING clause. +
    + +

    The only really hard part is step 1. You might want to start with an +existing virtual table implementation and modify it to suit your needs. +There are several virtual table implementations in the SQLite source tree +(for testing purposes). You might use one of those as a guide. Locate +these test virtual table implementations by searching +for "sqlite3_create_module". + +

    You might also want to implement your new virtual table as a +loadable extension. + +

    2.0 Virtual Table Methods

    + + + +

    2.1 The xCreate Method

    + +
    +  int (*xCreate)(sqlite3 *db, void *pAux,
    +               int argc, char **argv,
    +               sqlite3_vtab **ppVTab,
    +               char **pzErr);
    +
    + +

    This method is called to create a new instance of a virtual table +in response to a CREATE VIRTUAL TABLE statement. +The db parameter is a pointer to the SQLite database connection that +is executing the CREATE VIRTUAL TABLE statement. +The pAux argument is the copy of the client data pointer that was the +fourth argument to the sqlite3_create_module() or +sqlite3_create_module_v2() call that registered the +virtual table module. +The argv parameter is an array of argc pointers to null terminated strings. +The first string, argv[0], is the name of the module being invoked. The +module name is the name provided as the second argument to +sqlite3_create_module() and as the argument to the USING clause of the +CREATE VIRTUAL TABLE statement that is running. +The second, argv[1], is the name of the database in which the new virtual table is being created. The database name is "main" for the primary database, or +"temp" for TEMP database, or the name given at the end of the ATTACH +statement for attached databases. The third element of the array, argv[2], +is the name of the new virtual table, as specified following the TABLE +keyword in the CREATE VIRTUAL TABLE statement. +If present, the fourth and subsquent strings in the argv[] array report +the arguments to the module name in the CREATE VIRTUAL TABLE statement. + +

    The job of this method is to construct the new virtual table object +(an sqlite3_vtab object) and return a pointer to it in *ppVTab. + +

    As part of the task of creating a new sqlite3_vtab structure, this +method must invoke sqlite3_declare_vtab() to tell the SQLite +core about the columns and datatypes in the virtual table. +The sqlite3_declare_vtab() API has the following prototype: + +

    +    int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable)
    +
    + +

    The first argument to sqlite3_declare_vtab() must be the same +database connection pointer as the first parameter to this method. +The second argument to sqlite3_declare_vtab() must a zero-terminated +UTF-8 string that contains a well-formed CREATE TABLE statement that +defines the columns in the virtual table and their data types. +The name of the table in this CREATE TABLE statement is ignored, +as are all constraints. Only the column names and datatypes matter. +The CREATE TABLE statement string need not to be +held in persistent memory. The string can be +deallocated and/or reused as soon as the sqlite3_declare_vtab() +routine returns. + +

    If a column datatype contains the special keyword "HIDDEN" +(in any combination of upper and lower case letters) then that keyword +it is omitted from the column datatype name and the column is marked +as a hidden column internally. +A hidden column differs from a normal column in three respects: + +

    +

      +
    • Hidden columns are not listed in the dataset returned by + "PRAGMA table_info", +
    • Hidden columns are not included in the expansion of a "*" + expression in the result set of a SELECT, and +
    • Hidden columns are not included in the implicit column-list + used by an INSERT statement that lacks an explicit column-list. +
    + +

    For example, if the following SQL is passed to sqlite3_declare_vtab(): + +

    +   CREATE TABLE x(a HIDDEN VARCHAR(12), b INTEGER, c INTEGER Hidden);
    +
    + +

    Then the virtual table would be created with two hidden columns, +and with datatypes of "VARCHAR(12)" and "INTEGER". + +

    The xCreate method need not initialize the pModule, nRef, and zErrMsg +fields of the sqlite3_vtab object. The SQLite core will take care of +that chore. + +

    The xCreate must should return SQLITE_OK if it is successful in +creating the new virtual table, or SQLITE_ERROR if it is not successful. +If not successful, the sqlite3_vtab structure must not be allocated. +An error message may optionally be returned in *pzErr if unsuccessful. +Space to hold the error message string must be allocated using +an SQLite memory allocation function like +sqlite3_malloc() or sqlite3_mprintf() as the SQLite core will +attempt to free the space using sqlite3_free() after the error has +been reported up to the application. + +

    The xCreate method is required for every virtual table implementation, +though the xCreate and xConnect pointers of the sqlite3_module object +may point to the same function the virtual table does not need to initialize +backing store. + + + +

    2.2 The xConnect Method

    + +
    +  int (*xConnect)(sqlite3*, void *pAux,
    +               int argc, char **argv,
    +               sqlite3_vtab **ppVTab,
    +               char **pzErr);
    +
    + +

    The xConnect method is very similar to xCreate. +It has the same parameters and constructs a new sqlite3_vtab structure +just like xCreate. +And it must also call sqlite3_declare_vtab() like xCreate. + +

    The difference is that xConnect is called to establish a new +connection to an existing virtual table whereas xCreate is called +to create a new virtual table from scratch. + +

    The xCreate and xConnect methods are only different when the +virtual table has some kind of backing store that must be initialized +the first time the virtual table is created. The xCreate method creates +and initializes the backing store. The xConnect method just connects +to an existing backing store. + +

    As an example, consider a virtual table implementation that +provides read-only access to existing comma-separated-value (CSV) +files on disk. There is no backing store that needs to be created +or initialized for such a virtual table (since the CSV files already +exist on disk) so the xCreate and xConnect methods will be identical +for that module. + +

    Another example is a virtual table that implements a full-text index. +The xCreate method must create and initialize data structures to hold +the dictionary and posting lists for that index. The xConnect method, +on the other hand, only has to locate and use an existing dictionary +and posting lists that were created by a prior xCreate call. + +

    The xConnect method must return SQLITE_OK if it is successful +in creating the new virtual table, or SQLITE_ERROR if it is not +successful. If not successful, the sqlite3_vtab structure must not be +allocated. An error message may optionally be returned in *pzErr if +unsuccessful. +Space to hold the error message string must be allocated using +an SQLite memory allocation function like +sqlite3_malloc() or sqlite3_mprintf() as the SQLite core will +attempt to free the space using sqlite3_free() after the error has +been reported up to the application. + +

    The xConnect method is required for every virtual table implementation, +though the xCreate and xConnect pointers of the sqlite3_module object +may point to the same function the virtual table does not need to initialize +backing store. + + + +

    2.3 The xBestIndex Method

    + +

    SQLite uses the xBestIndex method of a virtual table module to determine +the best way to access the virtual table. +The xBestIndex method has a prototype like this: + +

    +  int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
    +
    + +

    The SQLite core communicates with the xBestIndex method by filling +in certain fields of the sqlite3_index_info structure and passing a +pointer to that structure into xBestIndex as the second parameter. +The xBestIndex method fills out other fields of this structure which +forms the reply. The sqlite3_index_info structure looks like this: + +

    +  struct sqlite3_index_info {
    +    /* Inputs */
    +    const int nConstraint;     /* Number of entries in aConstraint */
    +    const struct sqlite3_index_constraint {
    +       int iColumn;              /* Column on left-hand side of constraint */
    +       unsigned char op;         /* Constraint operator */
    +       unsigned char usable;     /* True if this constraint is usable */
    +       int iTermOffset;          /* Used internally - xBestIndex should ignore */
    +    } *const aConstraint;      /* Table of WHERE clause constraints */
    +    const int nOrderBy;        /* Number of terms in the ORDER BY clause */
    +    const struct sqlite3_index_orderby {
    +       int iColumn;              /* Column number */
    +       unsigned char desc;       /* True for DESC.  False for ASC. */
    +    } *const aOrderBy;         /* The ORDER BY clause */
    +
    +    /* Outputs */
    +    struct sqlite3_index_constraint_usage {
    +      int argvIndex;           /* if >0, constraint is part of argv to xFilter */
    +      unsigned char omit;      /* Do not code a test for this constraint */
    +    } *const aConstraintUsage;
    +    int idxNum;                /* Number used to identify the index */
    +    char *idxStr;              /* String, possibly obtained from sqlite3_malloc */
    +    int needToFreeIdxStr;      /* Free idxStr using sqlite3_free() if true */
    +    int orderByConsumed;       /* True if output is already ordered */
    +    double estimatedCost;      /* Estimated cost of using this index */
    +  };
    +
    + +

    In addition, there are some defined constants: + +

    +  #define SQLITE_INDEX_CONSTRAINT_EQ    2
    +  #define SQLITE_INDEX_CONSTRAINT_GT    4
    +  #define SQLITE_INDEX_CONSTRAINT_LE    8
    +  #define SQLITE_INDEX_CONSTRAINT_LT    16
    +  #define SQLITE_INDEX_CONSTRAINT_GE    32
    +  #define SQLITE_INDEX_CONSTRAINT_MATCH 64
    +
    + +

    The SQLite core calls the xBestIndex method when it is compiling a query +that involves a virtual table. In other words, SQLite calls this method +when it is running sqlite3_prepare() or the equivalent. +By calling this method, the +SQLite core is saying to the virtual table that it needs to access +some subset of the rows in the virtual table and it wants to know the +most efficient way to do that access. The xBestIndex method replies +with information that the SQLite core can then use to conduct an +efficient search of the virtual table. + +

    While compiling a single SQL query, the SQLite core might call +xBestIndex multiple times with different settings in sqlite3_index_info. +The SQLite core will then select the combination that appears to +give the best performance. + +

    Before calling this method, the SQLite core initializes an instance +of the sqlite3_index_info structure with information about the +query that it is currently trying to process. This information +derives mainly from the WHERE clause and ORDER BY or GROUP BY clauses +of the query, but also from any ON or USING clauses if the query is a +join. The information that the SQLite core provides to the xBestIndex +method is held in the part of the structure that is marked as "Inputs". +The "Outputs" section is initialized to zero. + +

    The information in the sqlite3_index_info structure is ephemeral +and may be overwritten or deallocated as soon as the xBestIndex method +returns. If the xBestIndex method needs to remember any part of the +sqlite3_index_info structure, it should make a copy. Care must be +take to store the copy in a place where it will be deallocated, such +as in the idxStr field with needToFreeIdxStr set to 1. + +

    Note that xBestIndex will always be called before xFilter, since +the idxNum and idxStr outputs from xBestIndex are required inputs to +xFilter. However, there is no guarantee that xFilter will be called +following a successful xBestIndex. + +

    The xBestIndex method is required for every virtual table implementation. + +

    2.3.1 Inputs

    + +

    The main thing that the SQLite core is trying to communicate to +the virtual table is the constraints that are available to limit +the number of rows that need to be searched. The aConstraint[] array +contains one entry for each constraint. There will be exactly +nConstraint entries in that array. + +

    Each constraint will correspond to a term in the WHERE clause +or in a USING or ON clause that is of the form + +

    + column OP EXPR +
    + +

    Where "column" is a column in the virtual table, OP is an operator +like "=" or "<", and EXPR is an arbitrary expression. So, for example, +if the WHERE clause contained a term like this: + +

    +     a = 5
    +
    + +

    Then one of the constraints would be on the "a" column with +operator "=" and an expression of "5". Constraints need not have a +literal representation of the WHERE clause. The query optimizer might +make transformations to the +WHERE clause in order to extract as many constraints +as it can. So, for example, if the WHERE clause contained something +like this: + +

    +     x BETWEEN 10 AND 100 AND 999>y
    +
    + +

    The query optimizer might translate this into three separate constraints: + +

    +     x >= 10
    +     x <= 100
    +     y < 999
    +
    + +

    For each constraint, the aConstraint[].iColumn field indicates which +column appears on the left-hand side of the constraint. +The first column of the virtual table is column 0. +The rowid of the virtual table is column -1. +The aConstraint[].op field indicates which operator is used. +The SQLITE_INDEX_CONSTRAINT_* constants map integer constants +into operator values. +Columns occur in the order they were defined by the call to +sqlite3_declare_vtab() in the xCreate or xConnect method. +Hidden columns are counted when determining the column index. + +

    The aConstraint[] array contains information about all constraints +that apply to the virtual table. But some of the constraints might +not be usable because of the way tables are ordered in a join. +The xBestIndex method must therefore only consider constraints +that have a aConstraint[].usable flag which is true. + +

    In addition to WHERE clause constraints, the SQLite core also +tells the xBestIndex method about the ORDER BY clause. +(In an aggregate query, the SQLite core might put in GROUP BY clause +information in place of the ORDER BY clause information, but this fact +should not make any difference to the xBestIndex method.) +If all terms of the ORDER BY clause are columns in the virtual table, +then nOrderBy will be the number of terms in the ORDER BY clause +and the aOrderBy[] array will identify the column for each term +in the order by clause and whether or not that column is ASC or DESC. + +

    2.3.2 Outputs

    + +

    Given all of the information above, the job of the xBestIndex +method it to figure out the best way to search the virtual table. + +

    The xBestIndex method fills the idxNum and idxStr fields with +information that communicates an indexing strategy to the xFilter +method. The information in idxNum and idxStr is arbitrary as far +as the SQLite core is concerned. The SQLite core just copies the +information through to the xFilter method. Any desired meaning can +be assigned to idxNum and idxStr as long as xBestIndex and xFilter +agree on what that meaning is. + +

    The idxStr value may be a string obtained from an SQLite +memory allocation function such as sqlite3_mprintf(). +If this is the case, then the needToFreeIdxStr flag must be set to +true so that the SQLite core will know to call sqlite3_free() on +that string when it has finished with it, and thus avoid a memory leak. + +

    If the virtual table will output rows in the order specified by +the ORDER BY clause, then the orderByConsumed flag may be set to +true. If the output is not automatically in the correct order +then orderByConsumed must be left in its default false setting. +This will indicate to the SQLite core that it will need to do a +separate sorting pass over the data after it comes out of the virtual table. + +

    The estimatedCost field should be set to the estimated number +of disk access operations required to execute this query against +the virtual table. The SQLite core will often call xBestIndex +multiple times with different constraints, obtain multiple cost +estimates, then choose the query plan that gives the lowest estimate. + +

    The aConstraintUsage[] array contains one element for each of +the nConstraint constraints in the inputs section of the +sqlite3_index_info structure. +The aConstraintUsage[] array is used by xBestIndex to tell the +core how it is using the constraints. + +

    The xBestIndex method may set aConstraintUsage[].argvIndex +entries to values greater than one. +Exactly one entry should be set to 1, another to 2, another to 3, +and so forth up to as many or as few as the xBestIndex method wants. +The EXPR of the corresponding constraints will then be passed +in as the argv[] parameters to xFilter. + +

    For example, if the aConstraint[3].argvIndex is set to 1, then +when xFilter is called, the argv[0] passed to xFilter will have +the EXPR value of the aConstraint[3] constraint. + +

    By default, the SQLite core double checks all constraints on +each row of the virtual table that it receives. If such a check +is redundant, the xBestFilter method can suppress that double-check by +setting aConstraintUsage[].omit. + + + +

    2.4 The xDisconnect Method

    + +
    +  int (*xDisconnect)(sqlite3_vtab *pVTab);
    +
    + +

    This method releases a connection to a virtual table. +Only the sqlite3_vtab object is destroyed. +The virtual table is not destroyed and any backing store +associated with the virtual table persists. +This method undoes the work of xConnect. + +

    This method is a destructor for a connection to the virtual table. +Constrast this method with xDestroy. The xDestroy is a destructor +for the entire virtual table. + +

    The xDestroy method is required for every virtual table implementation, +though it is acceptable for the xDisconnect and xDestroy methods to be +the same function if that makes sense for the particular virtual table. + + + +

    2.5 The xDestroy Method

    + +
    +  int (*xDestroy)(sqlite3_vtab *pVTab);
    +
    + +

    This method releases a connection to a virtual table, just like +the xDisconnect method, and it also destroys the underlying +table implementation. This method undoes the work of xCreate. + +

    The xDisconnect method is called whenever a database connection +that uses a virtual table is closed. The xDestroy method is only +called when a DROP TABLE statement is executed against the virtual table. + +

    The xDisconnect method is required for every virtual table implementation, +though it is acceptable for the xDisconnect and xDestroy methods to be +the same function if that makes sense for the particular virtual table. + + + +

    2.6 The xOpen Method

    + +
    +  int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
    +
    + +

    The xOpen method creates a new cursor used for accessing (read and/or +writing) a virtual table. A successful invocation of this method +will allocate the memory for the sqlite3_vtab_cursor (or a subclass), +initialize the new object, and make *ppCursor point to the new object. +The successful call then returns SQLITE_OK. + +

    For every successful call to this method, the SQLite core will +later invoke the xClose method to destroy +the allocated cursor. + +

    The xOpen method need not initialize the pVtab field of the +sqlite3_vtab_cursor structure. The SQLite core will take care +of that chore automatically. + +

    A virtual table implementation must be able to support an arbitrary +number of simultaneously open cursors. + +

    When initially opened, the cursor is in an undefined state. +The SQLite core will invoke the xFilter method +on the cursor prior to any attempt to position or read from the cursor. + +

    The xOpen method is required for every virtual table implementation. + + + +

    2.7 The xClose Method

    + +
    +  int (*xClose)(sqlite3_vtab_cursor*);
    +
    + +

    The xClose method closes a cursor previously opened by +xOpen. +The SQLite core will always call xClose once for each cursor opened +using xOpen. + +

    This method must release all resources allocated by the +corresponding xOpen call. The routine will not be called again even if it +returns an error. The SQLite core will not use the +sqlite3_vtab_cursor again after it has been closed. + +

    The xClose method is required for every virtual table implementation. + + + +

    2.8 The xEof Method

    + +
    +  int (*xEof)(sqlite3_vtab_cursor*);
    +
    + +

    The xEof method must return false (zero) if the specified cursor +currently points to a valid row of data, or true (non-zero) otherwise. +This method is called by the SQL engine immediately after each +xFilter and xNext invocation. + +

    The xEof method is required for every virtual table implementation. + + + +

    2.9 The xFilter Method

    + +
    +  int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
    +                int argc, sqlite3_value **argv);
    +
    + +

    This method begins a search of a virtual table. +The first argument is a cursor opened by xOpen. +The next two argument define a particular search index previously +choosen by xBestIndex. The specific meanings of idxNum and idxStr +are unimportant as long as xFilter and xBestIndex agree on what +that meaning is. + +

    The xBestIndex function may have requested the values of +certain expressions using the aConstraintUsage[].argvIndex values +of the sqlite3_index_info structure. +Those values are passed to xFilter using the argc and argv parameters. + +

    If the virtual table contains one or more rows that match the +search criteria, then the cursor must be left point at the first row. +Subsequent calls to xEof must return false (zero). +If there are no rows match, then the cursor must be left in a state +that will cause the xEof to return true (non-zero). +The SQLite engine will use +the xColumn and xRowid methods to access that row content. +The xNext method will be used to advance to the next row. + +

    This method must return SQLITE_OK if successful, or an sqlite +error code if an error occurs. + +

    The xFilter method is required for every virtual table implementation. + + + +

    2.10 The xNext Method

    + +
    +  int (*xNext)(sqlite3_vtab_cursor*);
    +
    + +

    The xNext method advances a virtual table cursor +to the next row of a result set initiated by xFilter. +If the cursor is already pointing at the last row when this +routine is called, then the cursor no longer points to valid +data and a subsequent call to the xEof method must return true (non-zero). +If the cursor is successfully advanced to another row of content, then +subsequent calls to xEof must return false (zero). + +

    This method must return SQLITE_OK if successful, or an sqlite +error code if an error occurs. + +

    The xNext method is required for every virtual table implementation. + + + +

    2.11 The xColumn Method

    + +
    +  int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int N);
    +
    + +

    The SQLite core invokes this method in order to find the value for +the N-th column of the current row. N is zero-based so the first column +is numbered 0. +The xColumn method may return its result back to SQLite using one of the +following interface: + +

    +

    +

    + +

    If the xColumn method implementation calls none of the functions above, +then the value of the column defaults to an SQL NULL. + +

    To raise an error, the xColumn method should use one of the result_text() +methods to set the error message text, then return an appropriate +error code. The xColumn method must return SQLITE_OK on success. + +

    The xColumn method is required for every virtual table implementation. + + + +

    2.12 The xRowid Method

    + +
    +  int (*xRowid)(sqlite3_vtab_cursor *pCur, sqlite_int64 *pRowid);
    +
    + +

    A successful invocation of this method will cause *pRowid to be +filled with the rowid of row that the +virtual table cursor pCur is currently pointing at. +This method returns SQLITE_OK on success. +It returns an appropriate error code on failure.

    + +

    The xRowid method is required for every virtual table implementation. + + + +

    2.13 The xUpdate Method

    + +
    +  int (*xUpdate)(
    +    sqlite3_vtab *pVTab,
    +    int argc,
    +    sqlite3_value **argv,
    +    sqlite_int64 *pRowid
    +  );
    +
    + +

    All changes to a virtual table are made using the xUpdate method. +This one method can be used to insert, delete, or update. + +

    The argc parameter specifies the number of entries in the argv array. +The value of argc will be 1 for a pure delete operation or N+2 for an insert +or replace or update where N is the number of columns in the table. +In the previous sentence, N includes any hidden columns. + +

    Every argv entry will have a non-NULL value in C but may contain the +SQL value NULL. In other words, it is always true that +argv[i]!=0 for i between 0 and argc-1. +However, it might be the case that +sqlite3_value_type(argv[i])==SQLITE_NULL. + +

    The argv[0] parameter is the rowid of a row in the virtual table +to be deleted. If argv[0] is an SQL NULL, then no deletion occurs. + +

    The argv[1] parameter is the rowid of a new row to be inserted +into the virtual table. If argv[1] is an SQL NULL, then the implementation +must choose a rowid for the newly inserted row. Subsequent argv[] +entries contain values of the columns of the virtual table, in the +order that the columns were declared. The number of columns will +match the table declaration that the xConnect or xCreate method made +using the sqlite3_declare_vtab() call. All hidden columns are included. + +

    When doing an insert without a rowid (argc>1, argv[1] is an SQL NULL), the +implementation must set *pRowid to the rowid of the newly inserted row; +this will become the value returned by the sqlite3_last_insert_rowid() +function. Setting this value in all the other cases is a harmless no-op; +the SQLite engine ignores the *pRowid return value if argc==1 or +argv[1] is not an SQL NULL. + +

    Each call to xUpdate will fall into one of cases shown below. +Not that references to argv[i] mean the SQL value +held within the argv[i] object, not the argv[i] +object itself. + +

    +
    +
    argc = 1 +

    The single row with rowid equal to argv[0] is deleted. No insert occurs. + +

    argc > 1
    argv[0] = NULL
    +

    A new row is inserted with a rowid argv[1] and column values in + argv[2] and following. If argv[1] is an SQL NULL, + the a new unique rowid is generated automatically. + +

    argc > 1
    argv[0] ≠ NULL
    argv[0] = argv[1]
    +

    The row with rowid argv[0] is updated with new values + in argv[2] and following parameters. + +

    argc > 1
    argv[0] ≠ NULL
    argv[0] ≠ argv[1]
    +

    The row with rowid argv[0] is updated with rowid argv[1] +and new values in argv[2] and following parameters. This will occur +when an SQL statement updates a rowid, as in the statement: +

    + UPDATE table SET rowid=rowid+1 WHERE ...; +
    +
    +
    + +

    The xUpdate method must return SQLITE_OK if and only if it is +successful. If a failure occurs, the xUpdate must return an appropriate +error code. On a failure, the pVTab->zErrMsg element may optionally +be replaced with error message text stored in memory allocated from SQLite +using functions such as sqlite3_mprintf() or sqlite3_malloc(). + +

    If the xUpdate method violates some constraint of the virtual table +(including, but not limited to, attempting to store a value of the wrong +datatype, attempting to store a value that is too +large or too small, or attempting to change a read-only value) then the +xUpdate must fail with an appropriate error code. + +

    There might be one or more sqlite3_vtab_cursor objects open and in use +on the virtual table instance and perhaps even on the row of the virtual +table when the xUpdate method is invoked. The implementation of +xUpdate must be prepared for attempts to delete or modify rows of the table +out from other existing cursors. If the virtual table cannot accommodate +such changes, the xUpdate method must return an error code. + +

    The xUpdate method is optional. +If the xUpdate pointer in the sqlite3_module for a virtual table +is a NULL pointer, then the virtual table is read-only. + + + + +

    2.14 The xFindFunction Method

    + +
    +  int (*xFindFunction)(
    +    sqlite3_vtab *pVtab,
    +    int nArg,
    +    const char *zName,
    +    void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
    +    void **ppArg
    +  );
    +
    + +

    This method is called during sqlite3_prepare() to give the virtual +table implementation an opportunity to overload functions. +This method may be set to NULL in which case no overloading occurs. + +

    When a function uses a column from a virtual table as its first +argument, this method is called to see if the virtual table would +like to overload the function. The first three parameters are inputs: +the virtual table, the number of arguments to the function, and the +name of the function. If no overloading is desired, this method +returns 0. To overload the function, this method writes the new +function implementation into *pxFunc and writes user data into *ppArg +and returns 1. + +

    Note that infix functions (LIKE, GLOB, REGEXP, and MATCH) reverse +the order of their arguments. So "like(A,B)" is equivalent to "B like A". +For the form "B like A" the B term is considered the first argument +to the function. But for "like(A,B)" the A term is considered the +first argument. + +

    The function pointer returned by this routine must be valid for +the lifetime of the sqlite3_vtab object given in the first parameter. + + + +

    2.15 The xBegin Method

    + +
    +  int (*xBegin)(sqlite3_vtab *pVTab);
    +
    + +

    This method begins a transaction on a virtual table. +This is method is optional. The xBegin pointer of sqlite3_module +may be NULL. + +

    This method is always followed by one call to either the +xCommit or xRollback method. Virtual table transactions do +not nest, so the xBegin method will not be invoked more than once +on a single virtual table +without an intervening call to either xCommit or xRollback. +Multiple calls to other methods can and likely will occur in between +the xBegin and the corresponding xCommit or xRollback. + + + +

    2.16 The xSync Method

    + +
    +  int (*xSync)(sqlite3_vtab *pVTab);
    +
    + + +

    This method signals the start of a two-phase commit on a virtual +table. +This is method is optional. The xSync pointer of sqlite3_module +may be NULL. + +

    This method is only invoked after call to the xBegin method and +prior to an xCommit or xRollback. In order to implement two-phase +commit, the xSync method on all virtual tables is invoked prior to +invoking the xCommit method on any virtual table. If any of the +xSync methods fail, the entire transaction is rolled back. + + + +

    2.17 The xCommit Method

    + +
    +  int (*xCommit)(sqlite3_vtab *pVTab);
    +
    + +

    This method causes a virtual table transaction to commit. +This is method is optional. The xCommit pointer of sqlite3_module +may be NULL. + +

    A call to this method always follows a prior call to xBegin and +xSync. + + + + +

    2.18 The xRollback Method

    + +
    +  int (*xRollback)(sqlite3_vtab *pVTab);
    +
    + +

    This method causes a virtual table transaction to rollback. +This is method is optional. The xRollback pointer of sqlite3_module +may be NULL. + +

    A call to this method always follows a prior call to xBegin. + + + + +

    2.19 The xRename Method

    + +
    +  int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
    +
    + +

    This method provides notification that the virtual table implementation +that the virtual table will be given a new name. +If this method returns SQLITE_OK then SQLite renames the table. +If this method returns an error code then the renaming is prevented. + +

    The xRename method is required for every virtual table implementation. +


    +This page last modified 2009/04/15 12:50:06 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/whentouse.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/whentouse.html --- sqlite3-3.4.2/www/whentouse.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/whentouse.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,331 @@ + + +Appropriate Uses For SQLite + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    Appropriate Uses For SQLite

    + +

    +SQLite is different from most other SQL database engines in that its +primary design goal is to be simple: +

    + +
      +
    • Simple to administer
    • +
    • Simple to operate
    • +
    • Simple to embed in a larger program
    • +
    • Simple to maintain and customize
    • +
    + +

    +Many people like SQLite because it is small and fast. But those +qualities are just happy accidents. +Users also find that SQLite is very reliable. Reliability is +a consequence of simplicity. With less complication, there is +less to go wrong. So, yes, SQLite is small, fast, and reliable, +but first and foremost, SQLite strives to be simple. +

    + +

    +Simplicity in a database engine can be either a strength or a +weakness, depending on what you are trying to do. In order to +achieve simplicity, SQLite has had to sacrifice other characteristics +that some people find useful, such as high concurrency, fine-grained +access control, a rich set of built-in functions, stored procedures, +esoteric SQL language features, XML and/or Java extensions, +tera- or peta-byte scalability, and so forth. If you need some of these +features and do not mind the added complexity that they +bring, then SQLite is probably not the database for you. +SQLite is not intended to be an enterprise database engine. It is +not designed to compete with Oracle or PostgreSQL. +

    + +

    +The basic rule of thumb for when it is appropriate to use SQLite is +this: Use SQLite in situations where simplicity of administration, +implementation, and maintenance are more important than the countless +complex features that enterprise database engines provide. +As it turns out, situations where simplicity is the better choice +are more common than many people realize. +

    + +

    +Another way to look at SQLite is this: SQLite is not designed +to replace Oracle. +It is designed to replace +fopen(). +

    + +

    Situations Where SQLite Works Well

    + +
      + + +
    • Application File Format

      + +

      +SQLite has been used with great success as the on-disk file format +for desktop applications such as financial analysis tools, CAD +packages, record keeping programs, and so forth. The traditional +File/Open operation does an sqlite3_open() and executes a +BEGIN TRANSACTION to get exclusive access to the content. File/Save +does a COMMIT followed by another BEGIN TRANSACTION. The use +of transactions guarantees that updates to the application file are atomic, +durable, isolated, and consistent. +

      + +

      +Temporary triggers can be added to the database to record all +changes into a (temporary) undo/redo log table. These changes can then +be played back when the user presses the Undo and Redo buttons. Using +this technique, an unlimited depth undo/redo implementation can be written +in surprisingly little code. +

      +
    • + +
    • Embedded devices and applications

      + +

      Because an SQLite database requires little or no administration, +SQLite is a good choice for devices or services that must work +unattended and without human support. SQLite is a good fit for +use in cellphones, PDAs, set-top boxes, and/or appliances. It also +works well as an embedded database in downloadable consumer applications. +

      +
    • + +
    • Websites

      + +

      SQLite usually will work great as the database engine for low to +medium traffic websites (which is to say, 99.9% of all websites). +The amount of web traffic that SQLite can handle depends, of course, +on how heavily the website uses its database. Generally +speaking, any site that gets fewer than 100K hits/day should work +fine with SQLite. +The 100K hits/day figure is a conservative estimate, not a +hard upper bound. +SQLite has been demonstrated to work with 10 times that amount +of traffic.

      +
    • + +
    • Replacement for ad hoc disk files

      + +

      Many programs use +fopen(), +fread(), and +fwrite() to create and +manage files of data in home-grown formats. SQLite works +particularly well as a +replacement for these ad hoc data files.

      +
    • + +
    • Internal or temporary databases

      + +

      +For programs that have a lot of data that must be sifted and sorted +in diverse ways, it is often easier and quicker to load the data into +an in-memory SQLite database and use queries with joins and ORDER BY +clauses to extract the data in the form and order needed rather than +to try to code the same operations manually. +Using an SQL database internally in this way also gives the program +greater flexibility since new columns and indices can be added without +having to recode every query. +

      +
    • + +
    • Command-line dataset analysis tool

      + +

      +Experienced SQL users can employ +the command-line sqlite program to analyze miscellaneous +datasets. Raw data can be imported from CSV files, then that +data can be sliced and diced to generate a myriad of summary +reports. Possible uses include website log analysis, sports +statistics analysis, compilation of programming metrics, and +analysis of experimental results. +

      + +

      +You can also do the same thing with an enterprise client/server +database, of course. The advantages to using SQLite in this situation +are that SQLite is much easier to set up and the resulting database +is a single file that you can store on a floppy disk or flash-memory stick +or email to a colleague. +

      +
    • + +
    • Stand-in for an enterprise database during demos or testing

      + +

      +If you are writing a client application for an enterprise database engine, +it makes sense to use a generic database backend that allows you to connect +to many different kinds of SQL database engines. It makes even better +sense to +go ahead and include SQLite in the mix of supported databases and to statically +link the SQLite engine in with the client. That way the client program +can be used standalone with an SQLite data file for testing or for +demonstrations. +

      +
    • + +
    • Database Pedagogy

      + +

      +Because it is simple to setup and use (installation is trivial: just +copy the sqlite or sqlite.exe executable to the target machine +and run it) SQLite makes a good database engine for use in teaching SQL. +Students can easily create as many databases as they like and can +email databases to the instructor for comments or grading. For more +advanced students who are interested in studying how an RDBMS is +implemented, the modular and well-commented and documented SQLite code +can serve as a good basis. This is not to say that SQLite is an accurate +model of how other database engines are implemented, but rather a student who +understands how SQLite works can more quickly comprehend the operational +principles of other systems. +

      +
    • + +
    • Experimental SQL language extensions

      + +

      The simple, modular design of SQLite makes it a good platform for +prototyping new, experimental database language features or ideas. +

      +
    • + + +
    + +

    Situations Where Another RDBMS May Work Better

    + +
      +
    • Client/Server Applications

      + +

      If you have many client programs accessing a common database +over a network, you should consider using a client/server database +engine instead of SQLite. SQLite will work over a network filesystem, +but because of the latency associated with most network filesystems, +performance will not be great. Also, the file locking logic of +many network filesystems implementation contains bugs (on both Unix +and Windows). If file locking does not work like it should, +it might be possible for two or more client programs to modify the +same part of the same database at the same time, resulting in +database corruption. Because this problem results from bugs in +the underlying filesystem implementation, there is nothing SQLite +can do to prevent it.

      + +

      A good rule of thumb is that you should avoid using SQLite +in situations where the same database will be accessed simultaneously +from many computers over a network filesystem.

      +
    • + +
    • High-volume Websites

      + +

      SQLite will normally work fine as the database backend to a website. +But if you website is so busy that you are thinking of splitting the +database component off onto a separate machine, then you should +definitely consider using an enterprise-class client/server database +engine instead of SQLite.

      +
    • + +
    • Very large datasets

      + +

      With the default page size of 1024 bytes, an SQLite database is +limited in size to 2 tebibytes (241 bytes). +And even if it could handle larger databases, SQLite stores the entire +database in a single disk file and many filesystems limit the maximum +size of files to something less than this. So if you are contemplating +databases of this magnitude, you would do well to consider using a +client/server database engine that spreads its content across multiple +disk files, and prehaps across multiple volumes. +

      +
    • + +
    • High Concurrency

      + +

      +SQLite uses reader/writer locks on the entire database file. That means +if any process is reading from any part of the database, all other +processes are prevented from writing any other part of the database. +Similarly, if any one process is writing to the database, +all other processes are prevented from reading any other part of the +database. +For many situations, this is not a problem. Each application +does its database work quickly and moves on, and no lock lasts for more +than a few dozen milliseconds. But there are some applications that require +more concurrency, and those applications may need to seek a different +solution. +

      +
    • + +
    +
    +This page last modified 2009/04/14 11:42:59 UTC +
    diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/whentouse.tcl /tmp/3ARg2Grji7/sqlite3-3.6.16/www/whentouse.tcl --- sqlite3-3.4.2/www/whentouse.tcl 2007-06-12 13:18:03.000000000 +0100 +++ sqlite3-3.6.16/www/whentouse.tcl 1970-01-01 01:00:00.000000000 +0100 @@ -1,254 +0,0 @@ -# -# Run this TCL script to generate HTML for the goals.html file. -# -set rcsid {$Id: whentouse.tcl,v 1.7 2007/04/14 12:04:39 drh Exp $} -source common.tcl -header {Appropriate Uses For SQLite} - -puts { -

    -SQLite is different from most other SQL database engines in that its -primary design goal is to be simple: -

    - -
      -
    • Simple to administer
    • -
    • Simple to operate
    • -
    • Simple to embed in a larger program
    • -
    • Simple to maintain and customize
    • -
    - -

    -Many people like SQLite because it is small and fast. But those -qualities are just happy accidents. -Users also find that SQLite is very reliable. Reliability is -a consequence of simplicity. With less complication, there is -less to go wrong. So, yes, SQLite is small, fast, and reliable, -but first and foremost, SQLite strives to be simple. -

    - -

    -Simplicity in a database engine can be either a strength or a -weakness, depending on what you are trying to do. In order to -achieve simplicity, SQLite has had to sacrifice other characteristics -that some people find useful, such as high concurrency, fine-grained -access control, a rich set of built-in functions, stored procedures, -esoteric SQL language features, XML and/or Java extensions, -tera- or peta-byte scalability, and so forth. If you need some of these -features and do not mind the added complexity that they -bring, then SQLite is probably not the database for you. -SQLite is not intended to be an enterprise database engine. It -not designed to compete with Oracle or PostgreSQL. -

    - -

    -The basic rule of thumb for when it is appropriate to use SQLite is -this: Use SQLite in situations where simplicity of administration, -implementation, and maintenance are more important than the countless -complex features that enterprise database engines provide. -As it turns out, situations where simplicity is the better choice -are more common than many people realize. -

    - -

    Situations Where SQLite Works Well

    - -
      -
    • Websites

      - -

      SQLite usually will work great as the database engine for low to -medium traffic websites (which is to say, 99.9% of all websites). -The amount of web traffic that SQLite can handle depends, of course, -on how heavily the website uses its database. Generally -speaking, any site that gets fewer than a 100000 hits/day should work -fine with SQLite. -The 100000 hits/day figure is a conservative estimate, not a -hard upper bound. -SQLite has been demonstrated to work with 10 times that amount -of traffic.

      -
    • - -
    • Embedded devices and applications

      - -

      Because an SQLite database requires little or no administration, -SQLite is a good choice for devices or services that must work -unattended and without human support. SQLite is a good fit for -use in cellphones, PDAs, set-top boxes, and/or appliances. It also -works well as an embedded database in downloadable consumer applications. -

      -
    • - -
    • Application File Format

      - -

      -SQLite has been used with great success as the on-disk file format -for desktop applications such as financial analysis tools, CAD -packages, record keeping programs, and so forth. The traditional -File/Open operation does an sqlite3_open() and executes a -BEGIN TRANSACTION to get exclusive access to the content. File/Save -does a COMMIT followed by another BEGIN TRANSACTION. The use -of transactions guarantees that updates to the application file are atomic, -durable, isolated, and consistent. -

      - -

      -Temporary triggers can be added to the database to record all -changes into a (temporary) undo/redo log table. These changes can then -be played back when the user presses the Undo and Redo buttons. Using -this technique, a unlimited depth undo/redo implementation can be written -in surprising little code. -

      -
    • - -
    • Replacement for ad hoc disk files

      - -

      Many programs use fopen(), fread(), and fwrite() to create and -manage files of data in home-grown formats. SQLite works -particularly well as a -replacement for these ad hoc data files.

      -
    • - -
    • Internal or temporary databases

      - -

      -For programs that have a lot of data that must be sifted and sorted -in diverse ways, it is often easier and quicker to load the data into -an in-memory SQLite database and use queries with joins and ORDER BY -clauses to extract the data in the form and order needed rather than -to try to code the same operations manually. -Using an SQL database internally in this way also gives the program -greater flexibility since new columns and indices can be added without -having to recode every query. -

      -
    • - -
    • Command-line dataset analysis tool

      - -

      -Experienced SQL users can employ -the command-line sqlite program to analyze miscellaneous -datasets. Raw data can be imported from CSV files, then that -data can be sliced and diced to generate a myriad of summary -reports. Possible uses include website log analysis, sports -statistics analysis, compilation of programming metrics, and -analysis of experimental results. -

      - -

      -You can also do the same thing with a enterprise client/server -database, of course. The advantages to using SQLite in this situation -are that SQLite is much easier to set up and the resulting database -is a single file that you can store on a floppy disk or flash-memory stick -or email to a colleague. -

      -
    • - -
    • Stand-in for an enterprise database during demos or testing

      - -

      -If you are writing a client application for an enterprise database engine, -it makes sense to use a generic database backend that allows you to connect -to many different kinds of SQL database engines. It makes even better -sense to -go ahead and include SQLite in the mix of supported database and to statically -link the SQLite engine in with the client. That way the client program -can be used standalone with an SQLite data file for testing or for -demonstrations. -

      -
    • - -
    • Database Pedagogy

      - -

      -Because it is simple to setup and use (installation is trivial: just -copy the sqlite or sqlite.exe executable to the target machine -and run it) SQLite makes a good database engine for use in teaching SQL. -Students can easily create as many databases as they like and can -email databases to the instructor for comments or grading. For more -advanced students who are interested in studying how an RDBMS is -implemented, the modular and well-commented and documented SQLite code -can serve as a good basis. This is not to say that SQLite is an accurate -model of how other database engines are implemented, but rather a student who -understands how SQLite works can more quickly comprehend the operational -principles of other systems. -

      -
    • - -
    • Experimental SQL language extensions

      - -

      The simple, modular design of SQLite makes it a good platform for -prototyping new, experimental database language features or ideas. -

      -
    • - - -
    - -

    Situations Where Another RDBMS May Work Better

    - -
      -
    • Client/Server Applications

      - -

      If you have many client programs accessing a common database -over a network, you should consider using a client/server database -engine instead of SQLite. SQLite will work over a network filesystem, -but because of the latency associated with most network filesystems, -performance will not be great. Also, the file locking logic of -many network filesystems implementation contains bugs (on both Unix -and windows). If file locking does not work like it should, -it might be possible for two or more client programs to modify the -same part of the same database at the same time, resulting in -database corruption. Because this problem results from bugs in -the underlying filesystem implementation, there is nothing SQLite -can do to prevent it.

      - -

      A good rule of thumb is that you should avoid using SQLite -in situations where the same database will be accessed simultaneously -from many computers over a network filesystem.

      -
    • - -
    • High-volume Websites

      - -

      SQLite will normally work fine as the database backend to a website. -But if you website is so busy that your are thinking of splitting the -database component off onto a separate machine, then you should -definitely consider using an enterprise-class client/server database -engine instead of SQLite.

      -
    • - -
    • Very large datasets

      - -

      When you start a transaction in SQLite (which happens automatically -before any write operation that is not within an explicit BEGIN...COMMIT) -the engine has to allocate a bitmap of dirty pages in the disk file to -help it manage its rollback journal. SQLite needs 256 bytes of RAM for -every 1MiB of database (assuming a 1024-byte page size: less memory is -used with larger page sizes, of course). -For smaller databases, the amount of memory -required is not a problem, but when database begin to grow into the -multi-gigabyte range, the size of the bitmap can get quite large. If -you need to store and modify more than a few dozen GB of data, you should -consider using a different database engine. -

      -
    • - -
    • High Concurrency

      - -

      -SQLite uses reader/writer locks on the entire database file. That means -if any process is reading from any part of the database, all other -processes are prevented from writing any other part of the database. -Similarly, if any one process is writing to the database, -all other processes are prevented from reading any other part of the -database. -For many situations, this is not a problem. Each application -does its database work quickly and moves on, and no lock lasts for more -than a few dozen milliseconds. But there are some applications that require -more concurrency, and those applications may need to seek a different -solution. -

      -
    • - -
    - -} -footer $rcsid diff -Nru /tmp/wcSK23PnjP/sqlite3-3.4.2/www/zeroconf.html /tmp/3ARg2Grji7/sqlite3-3.6.16/www/zeroconf.html --- sqlite3-3.4.2/www/zeroconf.html 1970-01-01 01:00:00.000000000 +0100 +++ sqlite3-3.6.16/www/zeroconf.html 2009-06-27 15:07:45.000000000 +0100 @@ -0,0 +1,102 @@ + + +Zero-Configuration + + + + + +
    + + + +
    +
    Small. Fast. Reliable.
    Choose any three.
    + +
    + +
    + + + +

    SQLite Is A Zero-Configuration Database

    + +

    +SQLite does not need to be "installed" before it is used. +There is no "setup" procedure. There is no +server process that needs to be started, stopped, or configured. +There is +no need for an administrator to create a new database instance or assign +access permissions to users. +SQLite uses no configuration files. +Nothing needs to be done to tell the system that SQLite is running. +No actions are required to recover after a system crash or power failure. +There is nothing to troubleshoot. +

    + + +

    +SQLite just works. +

    + +

    +Other database engines may run great once you get them going. +But doing the initial installation and configuration can often +be intimidating. +

    +
    +This page last modified 2007/11/13 00:53:43 UTC +
    "
    -  set t2 [string map {& & < < > >} $text]
    -  regsub -all "/(\[^\n/\]+)/" $t2 {\1} t3
    -  puts "$t3"
    -  puts "